diff options
author | Ben Skeggs <bskeggs@redhat.com> | 2011-02-24 15:17:07 -0500 |
---|---|---|
committer | Ben Skeggs <bskeggs@redhat.com> | 2011-02-24 15:17:07 -0500 |
commit | e11d57ca0b6dada29007ce3ad3db6c84034a768f (patch) | |
tree | 39ad0c208e2cb3da79f958bd0e9d0d12b8e5f8fe /drivers/gpu | |
parent | 317495b25ec1f0beb0dbac8ee0dfec59a1addf03 (diff) | |
parent | a2c06ee2fe5b48a71e697bae00c6e7195fc016b6 (diff) |
Merge remote-tracking branch 'airlied/drm-core-next' into drm-nouveau-next
Diffstat (limited to 'drivers/gpu')
155 files changed, 11923 insertions, 8826 deletions
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig index 7af443672626..a6feb78c404c 100644 --- a/drivers/gpu/drm/Kconfig +++ b/drivers/gpu/drm/Kconfig | |||
@@ -23,7 +23,7 @@ config DRM_KMS_HELPER | |||
23 | tristate | 23 | tristate |
24 | depends on DRM | 24 | depends on DRM |
25 | select FB | 25 | select FB |
26 | select FRAMEBUFFER_CONSOLE if !EMBEDDED | 26 | select FRAMEBUFFER_CONSOLE if !EXPERT |
27 | help | 27 | help |
28 | FB and CRTC helpers for KMS drivers. | 28 | FB and CRTC helpers for KMS drivers. |
29 | 29 | ||
@@ -73,52 +73,47 @@ source "drivers/gpu/drm/radeon/Kconfig" | |||
73 | 73 | ||
74 | config DRM_I810 | 74 | config DRM_I810 |
75 | tristate "Intel I810" | 75 | tristate "Intel I810" |
76 | # BKL usage in order to avoid AB-BA deadlocks, may become BROKEN_ON_SMP | 76 | # !PREEMPT because of missing ioctl locking |
77 | depends on DRM && AGP && AGP_INTEL && BKL | 77 | depends on DRM && AGP && AGP_INTEL && (!PREEMPT || BROKEN) |
78 | help | 78 | help |
79 | Choose this option if you have an Intel I810 graphics card. If M is | 79 | Choose this option if you have an Intel I810 graphics card. If M is |
80 | selected, the module will be called i810. AGP support is required | 80 | selected, the module will be called i810. AGP support is required |
81 | for this driver to work. | 81 | for this driver to work. |
82 | 82 | ||
83 | choice | ||
84 | prompt "Intel 830M, 845G, 852GM, 855GM, 865G" | ||
85 | depends on DRM && AGP && AGP_INTEL | ||
86 | optional | ||
87 | |||
88 | config DRM_I830 | ||
89 | tristate "i830 driver" | ||
90 | # BKL usage in order to avoid AB-BA deadlocks, i830 may get removed | ||
91 | depends on BKL | ||
92 | help | ||
93 | Choose this option if you have a system that has Intel 830M, 845G, | ||
94 | 852GM, 855GM or 865G integrated graphics. If M is selected, the | ||
95 | module will be called i830. AGP support is required for this driver | ||
96 | to work. This driver is used by the older X releases X.org 6.7 and | ||
97 | XFree86 4.3. If unsure, build this and i915 as modules and the X server | ||
98 | will load the correct one. | ||
99 | |||
100 | config DRM_I915 | 83 | config DRM_I915 |
101 | tristate "i915 driver" | 84 | tristate "Intel 8xx/9xx/G3x/G4x/HD Graphics" |
85 | depends on DRM | ||
86 | depends on AGP | ||
102 | depends on AGP_INTEL | 87 | depends on AGP_INTEL |
88 | # we need shmfs for the swappable backing store, and in particular | ||
89 | # the shmem_readpage() which depends upon tmpfs | ||
103 | select SHMEM | 90 | select SHMEM |
91 | select TMPFS | ||
104 | select DRM_KMS_HELPER | 92 | select DRM_KMS_HELPER |
105 | select FB_CFB_FILLRECT | 93 | select FB_CFB_FILLRECT |
106 | select FB_CFB_COPYAREA | 94 | select FB_CFB_COPYAREA |
107 | select FB_CFB_IMAGEBLIT | 95 | select FB_CFB_IMAGEBLIT |
108 | # i915 depends on ACPI_VIDEO when ACPI is enabled | 96 | # i915 depends on ACPI_VIDEO when ACPI is enabled |
109 | # but for select to work, need to select ACPI_VIDEO's dependencies, ick | 97 | # but for select to work, need to select ACPI_VIDEO's dependencies, ick |
110 | select VIDEO_OUTPUT_CONTROL if ACPI | ||
111 | select BACKLIGHT_CLASS_DEVICE if ACPI | 98 | select BACKLIGHT_CLASS_DEVICE if ACPI |
112 | select INPUT if ACPI | 99 | select INPUT if ACPI |
113 | select ACPI_VIDEO if ACPI | 100 | select ACPI_VIDEO if ACPI |
114 | select ACPI_BUTTON if ACPI | 101 | select ACPI_BUTTON if ACPI |
115 | help | 102 | help |
116 | Choose this option if you have a system that has Intel 830M, 845G, | 103 | Choose this option if you have a system that has "Intel Graphics |
117 | 852GM, 855GM 865G or 915G integrated graphics. If M is selected, the | 104 | Media Accelerator" or "HD Graphics" integrated graphics, |
118 | module will be called i915. AGP support is required for this driver | 105 | including 830M, 845G, 852GM, 855GM, 865G, 915G, 945G, 965G, |
119 | to work. This driver is used by the Intel driver in X.org 6.8 and | 106 | G35, G41, G43, G45 chipsets and Celeron, Pentium, Core i3, |
120 | XFree86 4.4 and above. If unsure, build this and i830 as modules and | 107 | Core i5, Core i7 as well as Atom CPUs with integrated graphics. |
121 | the X server will load the correct one. | 108 | If M is selected, the module will be called i915. AGP support |
109 | is required for this driver to work. This driver is used by | ||
110 | the Intel driver in X.org 6.8 and XFree86 4.4 and above. It | ||
111 | replaces the older i830 module that supported a subset of the | ||
112 | hardware in older X.org releases. | ||
113 | |||
114 | Note that the older i810/i815 chipsets require the use of the | ||
115 | i810 driver instead, and the Atom z5xx series has an entirely | ||
116 | different implementation. | ||
122 | 117 | ||
123 | config DRM_I915_KMS | 118 | config DRM_I915_KMS |
124 | bool "Enable modesetting on intel by default" | 119 | bool "Enable modesetting on intel by default" |
@@ -130,8 +125,6 @@ config DRM_I915_KMS | |||
130 | the driver to bind to PCI devices, which precludes loading things | 125 | the driver to bind to PCI devices, which precludes loading things |
131 | like intelfb. | 126 | like intelfb. |
132 | 127 | ||
133 | endchoice | ||
134 | |||
135 | config DRM_MGA | 128 | config DRM_MGA |
136 | tristate "Matrox g200/g400" | 129 | tristate "Matrox g200/g400" |
137 | depends on DRM && PCI | 130 | depends on DRM && PCI |
diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile index 997c43d04909..89cf05a72d1c 100644 --- a/drivers/gpu/drm/Makefile +++ b/drivers/gpu/drm/Makefile | |||
@@ -12,7 +12,7 @@ drm-y := drm_auth.o drm_buffer.o drm_bufs.o drm_cache.o \ | |||
12 | drm_platform.o drm_sysfs.o drm_hashtab.o drm_sman.o drm_mm.o \ | 12 | drm_platform.o drm_sysfs.o drm_hashtab.o drm_sman.o drm_mm.o \ |
13 | drm_crtc.o drm_modes.o drm_edid.o \ | 13 | drm_crtc.o drm_modes.o drm_edid.o \ |
14 | drm_info.o drm_debugfs.o drm_encoder_slave.o \ | 14 | drm_info.o drm_debugfs.o drm_encoder_slave.o \ |
15 | drm_trace_points.o drm_global.o | 15 | drm_trace_points.o drm_global.o drm_usb.o |
16 | 16 | ||
17 | drm-$(CONFIG_COMPAT) += drm_ioc32.o | 17 | drm-$(CONFIG_COMPAT) += drm_ioc32.o |
18 | 18 | ||
@@ -29,7 +29,6 @@ obj-$(CONFIG_DRM_R128) += r128/ | |||
29 | obj-$(CONFIG_DRM_RADEON)+= radeon/ | 29 | obj-$(CONFIG_DRM_RADEON)+= radeon/ |
30 | obj-$(CONFIG_DRM_MGA) += mga/ | 30 | obj-$(CONFIG_DRM_MGA) += mga/ |
31 | obj-$(CONFIG_DRM_I810) += i810/ | 31 | obj-$(CONFIG_DRM_I810) += i810/ |
32 | obj-$(CONFIG_DRM_I830) += i830/ | ||
33 | obj-$(CONFIG_DRM_I915) += i915/ | 32 | obj-$(CONFIG_DRM_I915) += i915/ |
34 | obj-$(CONFIG_DRM_SIS) += sis/ | 33 | obj-$(CONFIG_DRM_SIS) += sis/ |
35 | obj-$(CONFIG_DRM_SAVAGE)+= savage/ | 34 | obj-$(CONFIG_DRM_SAVAGE)+= savage/ |
diff --git a/drivers/gpu/drm/drm_agpsupport.c b/drivers/gpu/drm/drm_agpsupport.c index 252fdb98b73a..0cb2ba50af53 100644 --- a/drivers/gpu/drm/drm_agpsupport.c +++ b/drivers/gpu/drm/drm_agpsupport.c | |||
@@ -466,10 +466,4 @@ drm_agp_bind_pages(struct drm_device *dev, | |||
466 | } | 466 | } |
467 | EXPORT_SYMBOL(drm_agp_bind_pages); | 467 | EXPORT_SYMBOL(drm_agp_bind_pages); |
468 | 468 | ||
469 | void drm_agp_chipset_flush(struct drm_device *dev) | ||
470 | { | ||
471 | agp_flush_chipset(dev->agp->bridge); | ||
472 | } | ||
473 | EXPORT_SYMBOL(drm_agp_chipset_flush); | ||
474 | |||
475 | #endif /* __OS_HAS_AGP */ | 469 | #endif /* __OS_HAS_AGP */ |
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c index 6985cb1da72c..4c95b5fd9df3 100644 --- a/drivers/gpu/drm/drm_crtc.c +++ b/drivers/gpu/drm/drm_crtc.c | |||
@@ -156,12 +156,12 @@ static struct drm_conn_prop_enum_list drm_connector_enum_list[] = | |||
156 | { DRM_MODE_CONNECTOR_SVIDEO, "SVIDEO", 0 }, | 156 | { DRM_MODE_CONNECTOR_SVIDEO, "SVIDEO", 0 }, |
157 | { DRM_MODE_CONNECTOR_LVDS, "LVDS", 0 }, | 157 | { DRM_MODE_CONNECTOR_LVDS, "LVDS", 0 }, |
158 | { DRM_MODE_CONNECTOR_Component, "Component", 0 }, | 158 | { DRM_MODE_CONNECTOR_Component, "Component", 0 }, |
159 | { DRM_MODE_CONNECTOR_9PinDIN, "9-pin DIN", 0 }, | 159 | { DRM_MODE_CONNECTOR_9PinDIN, "DIN", 0 }, |
160 | { DRM_MODE_CONNECTOR_DisplayPort, "DisplayPort", 0 }, | 160 | { DRM_MODE_CONNECTOR_DisplayPort, "DP", 0 }, |
161 | { DRM_MODE_CONNECTOR_HDMIA, "HDMI Type A", 0 }, | 161 | { DRM_MODE_CONNECTOR_HDMIA, "HDMI-A", 0 }, |
162 | { DRM_MODE_CONNECTOR_HDMIB, "HDMI Type B", 0 }, | 162 | { DRM_MODE_CONNECTOR_HDMIB, "HDMI-B", 0 }, |
163 | { DRM_MODE_CONNECTOR_TV, "TV", 0 }, | 163 | { DRM_MODE_CONNECTOR_TV, "TV", 0 }, |
164 | { DRM_MODE_CONNECTOR_eDP, "Embedded DisplayPort", 0 }, | 164 | { DRM_MODE_CONNECTOR_eDP, "eDP", 0 }, |
165 | }; | 165 | }; |
166 | 166 | ||
167 | static struct drm_prop_enum_list drm_encoder_enum_list[] = | 167 | static struct drm_prop_enum_list drm_encoder_enum_list[] = |
@@ -2674,3 +2674,56 @@ out: | |||
2674 | mutex_unlock(&dev->mode_config.mutex); | 2674 | mutex_unlock(&dev->mode_config.mutex); |
2675 | return ret; | 2675 | return ret; |
2676 | } | 2676 | } |
2677 | |||
2678 | void drm_mode_config_reset(struct drm_device *dev) | ||
2679 | { | ||
2680 | struct drm_crtc *crtc; | ||
2681 | struct drm_encoder *encoder; | ||
2682 | struct drm_connector *connector; | ||
2683 | |||
2684 | list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) | ||
2685 | if (crtc->funcs->reset) | ||
2686 | crtc->funcs->reset(crtc); | ||
2687 | |||
2688 | list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) | ||
2689 | if (encoder->funcs->reset) | ||
2690 | encoder->funcs->reset(encoder); | ||
2691 | |||
2692 | list_for_each_entry(connector, &dev->mode_config.connector_list, head) | ||
2693 | if (connector->funcs->reset) | ||
2694 | connector->funcs->reset(connector); | ||
2695 | } | ||
2696 | EXPORT_SYMBOL(drm_mode_config_reset); | ||
2697 | |||
2698 | int drm_mode_create_dumb_ioctl(struct drm_device *dev, | ||
2699 | void *data, struct drm_file *file_priv) | ||
2700 | { | ||
2701 | struct drm_mode_create_dumb *args = data; | ||
2702 | |||
2703 | if (!dev->driver->dumb_create) | ||
2704 | return -ENOSYS; | ||
2705 | return dev->driver->dumb_create(file_priv, dev, args); | ||
2706 | } | ||
2707 | |||
2708 | int drm_mode_mmap_dumb_ioctl(struct drm_device *dev, | ||
2709 | void *data, struct drm_file *file_priv) | ||
2710 | { | ||
2711 | struct drm_mode_map_dumb *args = data; | ||
2712 | |||
2713 | /* call driver ioctl to get mmap offset */ | ||
2714 | if (!dev->driver->dumb_map_offset) | ||
2715 | return -ENOSYS; | ||
2716 | |||
2717 | return dev->driver->dumb_map_offset(file_priv, dev, args->handle, &args->offset); | ||
2718 | } | ||
2719 | |||
2720 | int drm_mode_destroy_dumb_ioctl(struct drm_device *dev, | ||
2721 | void *data, struct drm_file *file_priv) | ||
2722 | { | ||
2723 | struct drm_mode_destroy_dumb *args = data; | ||
2724 | |||
2725 | if (!dev->driver->dumb_destroy) | ||
2726 | return -ENOSYS; | ||
2727 | |||
2728 | return dev->driver->dumb_destroy(file_priv, dev, args->handle); | ||
2729 | } | ||
diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c index 4c200931a6bc..92369655dca3 100644 --- a/drivers/gpu/drm/drm_crtc_helper.c +++ b/drivers/gpu/drm/drm_crtc_helper.c | |||
@@ -343,13 +343,12 @@ bool drm_crtc_helper_set_mode(struct drm_crtc *crtc, | |||
343 | struct drm_encoder *encoder; | 343 | struct drm_encoder *encoder; |
344 | bool ret = true; | 344 | bool ret = true; |
345 | 345 | ||
346 | adjusted_mode = drm_mode_duplicate(dev, mode); | ||
347 | |||
348 | crtc->enabled = drm_helper_crtc_in_use(crtc); | 346 | crtc->enabled = drm_helper_crtc_in_use(crtc); |
349 | |||
350 | if (!crtc->enabled) | 347 | if (!crtc->enabled) |
351 | return true; | 348 | return true; |
352 | 349 | ||
350 | adjusted_mode = drm_mode_duplicate(dev, mode); | ||
351 | |||
353 | saved_hwmode = crtc->hwmode; | 352 | saved_hwmode = crtc->hwmode; |
354 | saved_mode = crtc->mode; | 353 | saved_mode = crtc->mode; |
355 | saved_x = crtc->x; | 354 | saved_x = crtc->x; |
@@ -437,10 +436,9 @@ bool drm_crtc_helper_set_mode(struct drm_crtc *crtc, | |||
437 | */ | 436 | */ |
438 | drm_calc_timestamping_constants(crtc); | 437 | drm_calc_timestamping_constants(crtc); |
439 | 438 | ||
440 | /* XXX free adjustedmode */ | ||
441 | drm_mode_destroy(dev, adjusted_mode); | ||
442 | /* FIXME: add subpixel order */ | 439 | /* FIXME: add subpixel order */ |
443 | done: | 440 | done: |
441 | drm_mode_destroy(dev, adjusted_mode); | ||
444 | if (!ret) { | 442 | if (!ret) { |
445 | crtc->hwmode = saved_hwmode; | 443 | crtc->hwmode = saved_hwmode; |
446 | crtc->mode = saved_mode; | 444 | crtc->mode = saved_mode; |
@@ -482,6 +480,7 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set) | |||
482 | int count = 0, ro, fail = 0; | 480 | int count = 0, ro, fail = 0; |
483 | struct drm_crtc_helper_funcs *crtc_funcs; | 481 | struct drm_crtc_helper_funcs *crtc_funcs; |
484 | int ret = 0; | 482 | int ret = 0; |
483 | int i; | ||
485 | 484 | ||
486 | DRM_DEBUG_KMS("\n"); | 485 | DRM_DEBUG_KMS("\n"); |
487 | 486 | ||
@@ -496,14 +495,17 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set) | |||
496 | 495 | ||
497 | crtc_funcs = set->crtc->helper_private; | 496 | crtc_funcs = set->crtc->helper_private; |
498 | 497 | ||
498 | if (!set->mode) | ||
499 | set->fb = NULL; | ||
500 | |||
499 | if (set->fb) { | 501 | if (set->fb) { |
500 | DRM_DEBUG_KMS("[CRTC:%d] [FB:%d] #connectors=%d (x y) (%i %i)\n", | 502 | DRM_DEBUG_KMS("[CRTC:%d] [FB:%d] #connectors=%d (x y) (%i %i)\n", |
501 | set->crtc->base.id, set->fb->base.id, | 503 | set->crtc->base.id, set->fb->base.id, |
502 | (int)set->num_connectors, set->x, set->y); | 504 | (int)set->num_connectors, set->x, set->y); |
503 | } else { | 505 | } else { |
504 | DRM_DEBUG_KMS("[CRTC:%d] [NOFB] #connectors=%d (x y) (%i %i)\n", | 506 | DRM_DEBUG_KMS("[CRTC:%d] [NOFB]\n", set->crtc->base.id); |
505 | set->crtc->base.id, (int)set->num_connectors, | 507 | set->mode = NULL; |
506 | set->x, set->y); | 508 | set->num_connectors = 0; |
507 | } | 509 | } |
508 | 510 | ||
509 | dev = set->crtc->dev; | 511 | dev = set->crtc->dev; |
@@ -648,8 +650,8 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set) | |||
648 | mode_changed = true; | 650 | mode_changed = true; |
649 | 651 | ||
650 | if (mode_changed) { | 652 | if (mode_changed) { |
651 | set->crtc->enabled = (set->mode != NULL); | 653 | set->crtc->enabled = drm_helper_crtc_in_use(set->crtc); |
652 | if (set->mode != NULL) { | 654 | if (set->crtc->enabled) { |
653 | DRM_DEBUG_KMS("attempting to set mode from" | 655 | DRM_DEBUG_KMS("attempting to set mode from" |
654 | " userspace\n"); | 656 | " userspace\n"); |
655 | drm_mode_debug_printmodeline(set->mode); | 657 | drm_mode_debug_printmodeline(set->mode); |
@@ -660,9 +662,16 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set) | |||
660 | old_fb)) { | 662 | old_fb)) { |
661 | DRM_ERROR("failed to set mode on [CRTC:%d]\n", | 663 | DRM_ERROR("failed to set mode on [CRTC:%d]\n", |
662 | set->crtc->base.id); | 664 | set->crtc->base.id); |
665 | set->crtc->fb = old_fb; | ||
663 | ret = -EINVAL; | 666 | ret = -EINVAL; |
664 | goto fail; | 667 | goto fail; |
665 | } | 668 | } |
669 | DRM_DEBUG_KMS("Setting connector DPMS state to on\n"); | ||
670 | for (i = 0; i < set->num_connectors; i++) { | ||
671 | DRM_DEBUG_KMS("\t[CONNECTOR:%d:%s] set DPMS on\n", set->connectors[i]->base.id, | ||
672 | drm_get_connector_name(set->connectors[i])); | ||
673 | set->connectors[i]->dpms = DRM_MODE_DPMS_ON; | ||
674 | } | ||
666 | } | 675 | } |
667 | drm_helper_disable_unused_functions(dev); | 676 | drm_helper_disable_unused_functions(dev); |
668 | } else if (fb_changed) { | 677 | } else if (fb_changed) { |
@@ -674,8 +683,10 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set) | |||
674 | set->crtc->fb = set->fb; | 683 | set->crtc->fb = set->fb; |
675 | ret = crtc_funcs->mode_set_base(set->crtc, | 684 | ret = crtc_funcs->mode_set_base(set->crtc, |
676 | set->x, set->y, old_fb); | 685 | set->x, set->y, old_fb); |
677 | if (ret != 0) | 686 | if (ret != 0) { |
687 | set->crtc->fb = old_fb; | ||
678 | goto fail; | 688 | goto fail; |
689 | } | ||
679 | } | 690 | } |
680 | 691 | ||
681 | kfree(save_connectors); | 692 | kfree(save_connectors); |
@@ -852,7 +863,7 @@ static void output_poll_execute(struct work_struct *work) | |||
852 | struct delayed_work *delayed_work = to_delayed_work(work); | 863 | struct delayed_work *delayed_work = to_delayed_work(work); |
853 | struct drm_device *dev = container_of(delayed_work, struct drm_device, mode_config.output_poll_work); | 864 | struct drm_device *dev = container_of(delayed_work, struct drm_device, mode_config.output_poll_work); |
854 | struct drm_connector *connector; | 865 | struct drm_connector *connector; |
855 | enum drm_connector_status old_status, status; | 866 | enum drm_connector_status old_status; |
856 | bool repoll = false, changed = false; | 867 | bool repoll = false, changed = false; |
857 | 868 | ||
858 | if (!drm_kms_helper_poll) | 869 | if (!drm_kms_helper_poll) |
@@ -877,8 +888,12 @@ static void output_poll_execute(struct work_struct *work) | |||
877 | !(connector->polled & DRM_CONNECTOR_POLL_HPD)) | 888 | !(connector->polled & DRM_CONNECTOR_POLL_HPD)) |
878 | continue; | 889 | continue; |
879 | 890 | ||
880 | status = connector->funcs->detect(connector, false); | 891 | connector->status = connector->funcs->detect(connector, false); |
881 | if (old_status != status) | 892 | DRM_DEBUG_KMS("[CONNECTOR:%d:%s] status updated from %d to %d\n", |
893 | connector->base.id, | ||
894 | drm_get_connector_name(connector), | ||
895 | old_status, connector->status); | ||
896 | if (old_status != connector->status) | ||
882 | changed = true; | 897 | changed = true; |
883 | } | 898 | } |
884 | 899 | ||
diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c index 271835a71570..0d04914eb058 100644 --- a/drivers/gpu/drm/drm_drv.c +++ b/drivers/gpu/drm/drm_drv.c | |||
@@ -150,7 +150,10 @@ static struct drm_ioctl_desc drm_ioctls[] = { | |||
150 | DRM_IOCTL_DEF(DRM_IOCTL_MODE_ADDFB, drm_mode_addfb, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), | 150 | DRM_IOCTL_DEF(DRM_IOCTL_MODE_ADDFB, drm_mode_addfb, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), |
151 | DRM_IOCTL_DEF(DRM_IOCTL_MODE_RMFB, drm_mode_rmfb, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), | 151 | DRM_IOCTL_DEF(DRM_IOCTL_MODE_RMFB, drm_mode_rmfb, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), |
152 | DRM_IOCTL_DEF(DRM_IOCTL_MODE_PAGE_FLIP, drm_mode_page_flip_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), | 152 | DRM_IOCTL_DEF(DRM_IOCTL_MODE_PAGE_FLIP, drm_mode_page_flip_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), |
153 | DRM_IOCTL_DEF(DRM_IOCTL_MODE_DIRTYFB, drm_mode_dirtyfb_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED) | 153 | DRM_IOCTL_DEF(DRM_IOCTL_MODE_DIRTYFB, drm_mode_dirtyfb_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), |
154 | DRM_IOCTL_DEF(DRM_IOCTL_MODE_CREATE_DUMB, drm_mode_create_dumb_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), | ||
155 | DRM_IOCTL_DEF(DRM_IOCTL_MODE_MAP_DUMB, drm_mode_mmap_dumb_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), | ||
156 | DRM_IOCTL_DEF(DRM_IOCTL_MODE_DESTROY_DUMB, drm_mode_destroy_dumb_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED) | ||
154 | }; | 157 | }; |
155 | 158 | ||
156 | #define DRM_CORE_IOCTL_COUNT ARRAY_SIZE( drm_ioctls ) | 159 | #define DRM_CORE_IOCTL_COUNT ARRAY_SIZE( drm_ioctls ) |
@@ -234,49 +237,6 @@ int drm_lastclose(struct drm_device * dev) | |||
234 | return 0; | 237 | return 0; |
235 | } | 238 | } |
236 | 239 | ||
237 | /** | ||
238 | * Module initialization. Called via init_module at module load time, or via | ||
239 | * linux/init/main.c (this is not currently supported). | ||
240 | * | ||
241 | * \return zero on success or a negative number on failure. | ||
242 | * | ||
243 | * Initializes an array of drm_device structures, and attempts to | ||
244 | * initialize all available devices, using consecutive minors, registering the | ||
245 | * stubs and initializing the device. | ||
246 | * | ||
247 | * Expands the \c DRIVER_PREINIT and \c DRIVER_POST_INIT macros before and | ||
248 | * after the initialization for driver customization. | ||
249 | */ | ||
250 | int drm_init(struct drm_driver *driver) | ||
251 | { | ||
252 | DRM_DEBUG("\n"); | ||
253 | INIT_LIST_HEAD(&driver->device_list); | ||
254 | |||
255 | if (driver->driver_features & DRIVER_USE_PLATFORM_DEVICE) | ||
256 | return drm_platform_init(driver); | ||
257 | else | ||
258 | return drm_pci_init(driver); | ||
259 | } | ||
260 | |||
261 | EXPORT_SYMBOL(drm_init); | ||
262 | |||
263 | void drm_exit(struct drm_driver *driver) | ||
264 | { | ||
265 | struct drm_device *dev, *tmp; | ||
266 | DRM_DEBUG("\n"); | ||
267 | |||
268 | if (driver->driver_features & DRIVER_MODESET) { | ||
269 | pci_unregister_driver(&driver->pci_driver); | ||
270 | } else { | ||
271 | list_for_each_entry_safe(dev, tmp, &driver->device_list, driver_item) | ||
272 | drm_put_dev(dev); | ||
273 | } | ||
274 | |||
275 | DRM_INFO("Module unloaded\n"); | ||
276 | } | ||
277 | |||
278 | EXPORT_SYMBOL(drm_exit); | ||
279 | |||
280 | /** File operations structure */ | 240 | /** File operations structure */ |
281 | static const struct file_operations drm_stub_fops = { | 241 | static const struct file_operations drm_stub_fops = { |
282 | .owner = THIS_MODULE, | 242 | .owner = THIS_MODULE, |
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c index a245d17165ae..af60d9be9632 100644 --- a/drivers/gpu/drm/drm_edid.c +++ b/drivers/gpu/drm/drm_edid.c | |||
@@ -449,12 +449,11 @@ static void edid_fixup_preferred(struct drm_connector *connector, | |||
449 | struct drm_display_mode *drm_mode_find_dmt(struct drm_device *dev, | 449 | struct drm_display_mode *drm_mode_find_dmt(struct drm_device *dev, |
450 | int hsize, int vsize, int fresh) | 450 | int hsize, int vsize, int fresh) |
451 | { | 451 | { |
452 | struct drm_display_mode *mode = NULL; | ||
452 | int i; | 453 | int i; |
453 | struct drm_display_mode *ptr, *mode; | ||
454 | 454 | ||
455 | mode = NULL; | ||
456 | for (i = 0; i < drm_num_dmt_modes; i++) { | 455 | for (i = 0; i < drm_num_dmt_modes; i++) { |
457 | ptr = &drm_dmt_modes[i]; | 456 | const struct drm_display_mode *ptr = &drm_dmt_modes[i]; |
458 | if (hsize == ptr->hdisplay && | 457 | if (hsize == ptr->hdisplay && |
459 | vsize == ptr->vdisplay && | 458 | vsize == ptr->vdisplay && |
460 | fresh == drm_mode_vrefresh(ptr)) { | 459 | fresh == drm_mode_vrefresh(ptr)) { |
@@ -885,7 +884,7 @@ static struct drm_display_mode *drm_mode_detailed(struct drm_device *dev, | |||
885 | } | 884 | } |
886 | 885 | ||
887 | static bool | 886 | static bool |
888 | mode_is_rb(struct drm_display_mode *mode) | 887 | mode_is_rb(const struct drm_display_mode *mode) |
889 | { | 888 | { |
890 | return (mode->htotal - mode->hdisplay == 160) && | 889 | return (mode->htotal - mode->hdisplay == 160) && |
891 | (mode->hsync_end - mode->hdisplay == 80) && | 890 | (mode->hsync_end - mode->hdisplay == 80) && |
@@ -894,7 +893,8 @@ mode_is_rb(struct drm_display_mode *mode) | |||
894 | } | 893 | } |
895 | 894 | ||
896 | static bool | 895 | static bool |
897 | mode_in_hsync_range(struct drm_display_mode *mode, struct edid *edid, u8 *t) | 896 | mode_in_hsync_range(const struct drm_display_mode *mode, |
897 | struct edid *edid, u8 *t) | ||
898 | { | 898 | { |
899 | int hsync, hmin, hmax; | 899 | int hsync, hmin, hmax; |
900 | 900 | ||
@@ -910,7 +910,8 @@ mode_in_hsync_range(struct drm_display_mode *mode, struct edid *edid, u8 *t) | |||
910 | } | 910 | } |
911 | 911 | ||
912 | static bool | 912 | static bool |
913 | mode_in_vsync_range(struct drm_display_mode *mode, struct edid *edid, u8 *t) | 913 | mode_in_vsync_range(const struct drm_display_mode *mode, |
914 | struct edid *edid, u8 *t) | ||
914 | { | 915 | { |
915 | int vsync, vmin, vmax; | 916 | int vsync, vmin, vmax; |
916 | 917 | ||
@@ -941,7 +942,7 @@ range_pixel_clock(struct edid *edid, u8 *t) | |||
941 | } | 942 | } |
942 | 943 | ||
943 | static bool | 944 | static bool |
944 | mode_in_range(struct drm_display_mode *mode, struct edid *edid, | 945 | mode_in_range(const struct drm_display_mode *mode, struct edid *edid, |
945 | struct detailed_timing *timing) | 946 | struct detailed_timing *timing) |
946 | { | 947 | { |
947 | u32 max_clock; | 948 | u32 max_clock; |
@@ -1472,7 +1473,7 @@ int drm_add_modes_noedid(struct drm_connector *connector, | |||
1472 | int hdisplay, int vdisplay) | 1473 | int hdisplay, int vdisplay) |
1473 | { | 1474 | { |
1474 | int i, count, num_modes = 0; | 1475 | int i, count, num_modes = 0; |
1475 | struct drm_display_mode *mode, *ptr; | 1476 | struct drm_display_mode *mode; |
1476 | struct drm_device *dev = connector->dev; | 1477 | struct drm_device *dev = connector->dev; |
1477 | 1478 | ||
1478 | count = sizeof(drm_dmt_modes) / sizeof(struct drm_display_mode); | 1479 | count = sizeof(drm_dmt_modes) / sizeof(struct drm_display_mode); |
@@ -1482,7 +1483,7 @@ int drm_add_modes_noedid(struct drm_connector *connector, | |||
1482 | vdisplay = 0; | 1483 | vdisplay = 0; |
1483 | 1484 | ||
1484 | for (i = 0; i < count; i++) { | 1485 | for (i = 0; i < count; i++) { |
1485 | ptr = &drm_dmt_modes[i]; | 1486 | const struct drm_display_mode *ptr = &drm_dmt_modes[i]; |
1486 | if (hdisplay && vdisplay) { | 1487 | if (hdisplay && vdisplay) { |
1487 | /* | 1488 | /* |
1488 | * Only when two are valid, they will be used to check | 1489 | * Only when two are valid, they will be used to check |
diff --git a/drivers/gpu/drm/drm_edid_modes.h b/drivers/gpu/drm/drm_edid_modes.h index 6eb7592e152f..5f2064489fd5 100644 --- a/drivers/gpu/drm/drm_edid_modes.h +++ b/drivers/gpu/drm/drm_edid_modes.h | |||
@@ -32,7 +32,7 @@ | |||
32 | * This table is copied from xfree86/modes/xf86EdidModes.c. | 32 | * This table is copied from xfree86/modes/xf86EdidModes.c. |
33 | * But the mode with Reduced blank feature is deleted. | 33 | * But the mode with Reduced blank feature is deleted. |
34 | */ | 34 | */ |
35 | static struct drm_display_mode drm_dmt_modes[] = { | 35 | static const struct drm_display_mode drm_dmt_modes[] = { |
36 | /* 640x350@85Hz */ | 36 | /* 640x350@85Hz */ |
37 | { DRM_MODE("640x350", DRM_MODE_TYPE_DRIVER, 31500, 640, 672, | 37 | { DRM_MODE("640x350", DRM_MODE_TYPE_DRIVER, 31500, 640, 672, |
38 | 736, 832, 0, 350, 382, 385, 445, 0, | 38 | 736, 832, 0, 350, 382, 385, 445, 0, |
@@ -266,7 +266,7 @@ static struct drm_display_mode drm_dmt_modes[] = { | |||
266 | static const int drm_num_dmt_modes = | 266 | static const int drm_num_dmt_modes = |
267 | sizeof(drm_dmt_modes) / sizeof(struct drm_display_mode); | 267 | sizeof(drm_dmt_modes) / sizeof(struct drm_display_mode); |
268 | 268 | ||
269 | static struct drm_display_mode edid_est_modes[] = { | 269 | static const struct drm_display_mode edid_est_modes[] = { |
270 | { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 40000, 800, 840, | 270 | { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 40000, 800, 840, |
271 | 968, 1056, 0, 600, 601, 605, 628, 0, | 271 | 968, 1056, 0, 600, 601, 605, 628, 0, |
272 | DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 800x600@60Hz */ | 272 | DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 800x600@60Hz */ |
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c index d2849e4ea4d0..d421f9d58d46 100644 --- a/drivers/gpu/drm/drm_fb_helper.c +++ b/drivers/gpu/drm/drm_fb_helper.c | |||
@@ -627,6 +627,11 @@ static int setcolreg(struct drm_crtc *crtc, u16 red, u16 green, | |||
627 | value = (red << info->var.red.offset) | | 627 | value = (red << info->var.red.offset) | |
628 | (green << info->var.green.offset) | | 628 | (green << info->var.green.offset) | |
629 | (blue << info->var.blue.offset); | 629 | (blue << info->var.blue.offset); |
630 | if (info->var.transp.length > 0) { | ||
631 | u32 mask = (1 << info->var.transp.length) - 1; | ||
632 | mask <<= info->var.transp.offset; | ||
633 | value |= mask; | ||
634 | } | ||
630 | palette[regno] = value; | 635 | palette[regno] = value; |
631 | return 0; | 636 | return 0; |
632 | } | 637 | } |
@@ -985,6 +990,8 @@ void drm_fb_helper_fill_fix(struct fb_info *info, uint32_t pitch, | |||
985 | info->fix.type = FB_TYPE_PACKED_PIXELS; | 990 | info->fix.type = FB_TYPE_PACKED_PIXELS; |
986 | info->fix.visual = depth == 8 ? FB_VISUAL_PSEUDOCOLOR : | 991 | info->fix.visual = depth == 8 ? FB_VISUAL_PSEUDOCOLOR : |
987 | FB_VISUAL_TRUECOLOR; | 992 | FB_VISUAL_TRUECOLOR; |
993 | info->fix.mmio_start = 0; | ||
994 | info->fix.mmio_len = 0; | ||
988 | info->fix.type_aux = 0; | 995 | info->fix.type_aux = 0; |
989 | info->fix.xpanstep = 1; /* doing it in hw */ | 996 | info->fix.xpanstep = 1; /* doing it in hw */ |
990 | info->fix.ypanstep = 1; /* doing it in hw */ | 997 | info->fix.ypanstep = 1; /* doing it in hw */ |
@@ -1005,6 +1012,7 @@ void drm_fb_helper_fill_var(struct fb_info *info, struct drm_fb_helper *fb_helpe | |||
1005 | info->var.xres_virtual = fb->width; | 1012 | info->var.xres_virtual = fb->width; |
1006 | info->var.yres_virtual = fb->height; | 1013 | info->var.yres_virtual = fb->height; |
1007 | info->var.bits_per_pixel = fb->bits_per_pixel; | 1014 | info->var.bits_per_pixel = fb->bits_per_pixel; |
1015 | info->var.accel_flags = FB_ACCELF_TEXT; | ||
1008 | info->var.xoffset = 0; | 1016 | info->var.xoffset = 0; |
1009 | info->var.yoffset = 0; | 1017 | info->var.yoffset = 0; |
1010 | info->var.activate = FB_ACTIVATE_NOW; | 1018 | info->var.activate = FB_ACTIVATE_NOW; |
@@ -1530,3 +1538,24 @@ bool drm_fb_helper_hotplug_event(struct drm_fb_helper *fb_helper) | |||
1530 | } | 1538 | } |
1531 | EXPORT_SYMBOL(drm_fb_helper_hotplug_event); | 1539 | EXPORT_SYMBOL(drm_fb_helper_hotplug_event); |
1532 | 1540 | ||
1541 | /* The Kconfig DRM_KMS_HELPER selects FRAMEBUFFER_CONSOLE (if !EXPERT) | ||
1542 | * but the module doesn't depend on any fb console symbols. At least | ||
1543 | * attempt to load fbcon to avoid leaving the system without a usable console. | ||
1544 | */ | ||
1545 | #if defined(CONFIG_FRAMEBUFFER_CONSOLE_MODULE) && !defined(CONFIG_EXPERT) | ||
1546 | static int __init drm_fb_helper_modinit(void) | ||
1547 | { | ||
1548 | const char *name = "fbcon"; | ||
1549 | struct module *fbcon; | ||
1550 | |||
1551 | mutex_lock(&module_mutex); | ||
1552 | fbcon = find_module(name); | ||
1553 | mutex_unlock(&module_mutex); | ||
1554 | |||
1555 | if (!fbcon) | ||
1556 | request_module_nowait(name); | ||
1557 | return 0; | ||
1558 | } | ||
1559 | |||
1560 | module_init(drm_fb_helper_modinit); | ||
1561 | #endif | ||
diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c index a39794bac04b..2ec7d48fc4a8 100644 --- a/drivers/gpu/drm/drm_fops.c +++ b/drivers/gpu/drm/drm_fops.c | |||
@@ -236,6 +236,8 @@ static int drm_open_helper(struct inode *inode, struct file *filp, | |||
236 | return -EBUSY; /* No exclusive opens */ | 236 | return -EBUSY; /* No exclusive opens */ |
237 | if (!drm_cpu_valid()) | 237 | if (!drm_cpu_valid()) |
238 | return -EINVAL; | 238 | return -EINVAL; |
239 | if (dev->switch_power_state != DRM_SWITCH_POWER_ON) | ||
240 | return -EINVAL; | ||
239 | 241 | ||
240 | DRM_DEBUG("pid = %d, minor = %d\n", task_pid_nr(current), minor_id); | 242 | DRM_DEBUG("pid = %d, minor = %d\n", task_pid_nr(current), minor_id); |
241 | 243 | ||
diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c index ea1c4b019ebf..57ce27c9a747 100644 --- a/drivers/gpu/drm/drm_gem.c +++ b/drivers/gpu/drm/drm_gem.c | |||
@@ -101,7 +101,7 @@ drm_gem_init(struct drm_device *dev) | |||
101 | 101 | ||
102 | dev->mm_private = mm; | 102 | dev->mm_private = mm; |
103 | 103 | ||
104 | if (drm_ht_create(&mm->offset_hash, 19)) { | 104 | if (drm_ht_create(&mm->offset_hash, 12)) { |
105 | kfree(mm); | 105 | kfree(mm); |
106 | return -ENOMEM; | 106 | return -ENOMEM; |
107 | } | 107 | } |
@@ -181,7 +181,7 @@ EXPORT_SYMBOL(drm_gem_object_alloc); | |||
181 | /** | 181 | /** |
182 | * Removes the mapping from handle to filp for this object. | 182 | * Removes the mapping from handle to filp for this object. |
183 | */ | 183 | */ |
184 | static int | 184 | int |
185 | drm_gem_handle_delete(struct drm_file *filp, u32 handle) | 185 | drm_gem_handle_delete(struct drm_file *filp, u32 handle) |
186 | { | 186 | { |
187 | struct drm_device *dev; | 187 | struct drm_device *dev; |
@@ -214,6 +214,7 @@ drm_gem_handle_delete(struct drm_file *filp, u32 handle) | |||
214 | 214 | ||
215 | return 0; | 215 | return 0; |
216 | } | 216 | } |
217 | EXPORT_SYMBOL(drm_gem_handle_delete); | ||
217 | 218 | ||
218 | /** | 219 | /** |
219 | * Create a handle for this object. This adds a handle reference | 220 | * Create a handle for this object. This adds a handle reference |
diff --git a/drivers/gpu/drm/drm_hashtab.c b/drivers/gpu/drm/drm_hashtab.c index a93d7b4ddaa6..e3a75688f3cd 100644 --- a/drivers/gpu/drm/drm_hashtab.c +++ b/drivers/gpu/drm/drm_hashtab.c | |||
@@ -39,27 +39,18 @@ | |||
39 | 39 | ||
40 | int drm_ht_create(struct drm_open_hash *ht, unsigned int order) | 40 | int drm_ht_create(struct drm_open_hash *ht, unsigned int order) |
41 | { | 41 | { |
42 | unsigned int i; | 42 | unsigned int size = 1 << order; |
43 | 43 | ||
44 | ht->size = 1 << order; | ||
45 | ht->order = order; | 44 | ht->order = order; |
46 | ht->fill = 0; | ||
47 | ht->table = NULL; | 45 | ht->table = NULL; |
48 | ht->use_vmalloc = ((ht->size * sizeof(*ht->table)) > PAGE_SIZE); | 46 | if (size <= PAGE_SIZE / sizeof(*ht->table)) |
49 | if (!ht->use_vmalloc) { | 47 | ht->table = kcalloc(size, sizeof(*ht->table), GFP_KERNEL); |
50 | ht->table = kcalloc(ht->size, sizeof(*ht->table), GFP_KERNEL); | 48 | else |
51 | } | 49 | ht->table = vzalloc(size*sizeof(*ht->table)); |
52 | if (!ht->table) { | ||
53 | ht->use_vmalloc = 1; | ||
54 | ht->table = vmalloc(ht->size*sizeof(*ht->table)); | ||
55 | } | ||
56 | if (!ht->table) { | 50 | if (!ht->table) { |
57 | DRM_ERROR("Out of memory for hash table\n"); | 51 | DRM_ERROR("Out of memory for hash table\n"); |
58 | return -ENOMEM; | 52 | return -ENOMEM; |
59 | } | 53 | } |
60 | for (i=0; i< ht->size; ++i) { | ||
61 | INIT_HLIST_HEAD(&ht->table[i]); | ||
62 | } | ||
63 | return 0; | 54 | return 0; |
64 | } | 55 | } |
65 | EXPORT_SYMBOL(drm_ht_create); | 56 | EXPORT_SYMBOL(drm_ht_create); |
@@ -180,7 +171,6 @@ int drm_ht_remove_key(struct drm_open_hash *ht, unsigned long key) | |||
180 | list = drm_ht_find_key(ht, key); | 171 | list = drm_ht_find_key(ht, key); |
181 | if (list) { | 172 | if (list) { |
182 | hlist_del_init(list); | 173 | hlist_del_init(list); |
183 | ht->fill--; | ||
184 | return 0; | 174 | return 0; |
185 | } | 175 | } |
186 | return -EINVAL; | 176 | return -EINVAL; |
@@ -189,7 +179,6 @@ int drm_ht_remove_key(struct drm_open_hash *ht, unsigned long key) | |||
189 | int drm_ht_remove_item(struct drm_open_hash *ht, struct drm_hash_item *item) | 179 | int drm_ht_remove_item(struct drm_open_hash *ht, struct drm_hash_item *item) |
190 | { | 180 | { |
191 | hlist_del_init(&item->head); | 181 | hlist_del_init(&item->head); |
192 | ht->fill--; | ||
193 | return 0; | 182 | return 0; |
194 | } | 183 | } |
195 | EXPORT_SYMBOL(drm_ht_remove_item); | 184 | EXPORT_SYMBOL(drm_ht_remove_item); |
@@ -197,10 +186,10 @@ EXPORT_SYMBOL(drm_ht_remove_item); | |||
197 | void drm_ht_remove(struct drm_open_hash *ht) | 186 | void drm_ht_remove(struct drm_open_hash *ht) |
198 | { | 187 | { |
199 | if (ht->table) { | 188 | if (ht->table) { |
200 | if (ht->use_vmalloc) | 189 | if ((PAGE_SIZE / sizeof(*ht->table)) >> ht->order) |
201 | vfree(ht->table); | ||
202 | else | ||
203 | kfree(ht->table); | 190 | kfree(ht->table); |
191 | else | ||
192 | vfree(ht->table); | ||
204 | ht->table = NULL; | 193 | ht->table = NULL; |
205 | } | 194 | } |
206 | } | 195 | } |
diff --git a/drivers/gpu/drm/drm_info.c b/drivers/gpu/drm/drm_info.c index 3cdbaf379bb5..812aaac4438a 100644 --- a/drivers/gpu/drm/drm_info.c +++ b/drivers/gpu/drm/drm_info.c | |||
@@ -47,30 +47,19 @@ int drm_name_info(struct seq_file *m, void *data) | |||
47 | struct drm_minor *minor = node->minor; | 47 | struct drm_minor *minor = node->minor; |
48 | struct drm_device *dev = minor->dev; | 48 | struct drm_device *dev = minor->dev; |
49 | struct drm_master *master = minor->master; | 49 | struct drm_master *master = minor->master; |
50 | 50 | const char *bus_name; | |
51 | if (!master) | 51 | if (!master) |
52 | return 0; | 52 | return 0; |
53 | 53 | ||
54 | if (drm_core_check_feature(dev, DRIVER_USE_PLATFORM_DEVICE)) { | 54 | bus_name = dev->driver->bus->get_name(dev); |
55 | if (master->unique) { | 55 | if (master->unique) { |
56 | seq_printf(m, "%s %s %s\n", | 56 | seq_printf(m, "%s %s %s\n", |
57 | dev->driver->platform_device->name, | 57 | bus_name, |
58 | dev_name(dev->dev), master->unique); | 58 | dev_name(dev->dev), master->unique); |
59 | } else { | ||
60 | seq_printf(m, "%s\n", | ||
61 | dev->driver->platform_device->name); | ||
62 | } | ||
63 | } else { | 59 | } else { |
64 | if (master->unique) { | 60 | seq_printf(m, "%s %s\n", |
65 | seq_printf(m, "%s %s %s\n", | 61 | bus_name, dev_name(dev->dev)); |
66 | dev->driver->pci_driver.name, | ||
67 | dev_name(dev->dev), master->unique); | ||
68 | } else { | ||
69 | seq_printf(m, "%s %s\n", dev->driver->pci_driver.name, | ||
70 | dev_name(dev->dev)); | ||
71 | } | ||
72 | } | 62 | } |
73 | |||
74 | return 0; | 63 | return 0; |
75 | } | 64 | } |
76 | 65 | ||
diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c index 47db4df37a69..117490590f56 100644 --- a/drivers/gpu/drm/drm_ioctl.c +++ b/drivers/gpu/drm/drm_ioctl.c | |||
@@ -96,7 +96,7 @@ int drm_setunique(struct drm_device *dev, void *data, | |||
96 | { | 96 | { |
97 | struct drm_unique *u = data; | 97 | struct drm_unique *u = data; |
98 | struct drm_master *master = file_priv->master; | 98 | struct drm_master *master = file_priv->master; |
99 | int domain, bus, slot, func, ret; | 99 | int ret; |
100 | 100 | ||
101 | if (master->unique_len || master->unique) | 101 | if (master->unique_len || master->unique) |
102 | return -EBUSY; | 102 | return -EBUSY; |
@@ -104,50 +104,12 @@ int drm_setunique(struct drm_device *dev, void *data, | |||
104 | if (!u->unique_len || u->unique_len > 1024) | 104 | if (!u->unique_len || u->unique_len > 1024) |
105 | return -EINVAL; | 105 | return -EINVAL; |
106 | 106 | ||
107 | master->unique_len = u->unique_len; | 107 | if (!dev->driver->bus->set_unique) |
108 | master->unique_size = u->unique_len + 1; | 108 | return -EINVAL; |
109 | master->unique = kmalloc(master->unique_size, GFP_KERNEL); | ||
110 | if (!master->unique) { | ||
111 | ret = -ENOMEM; | ||
112 | goto err; | ||
113 | } | ||
114 | |||
115 | if (copy_from_user(master->unique, u->unique, master->unique_len)) { | ||
116 | ret = -EFAULT; | ||
117 | goto err; | ||
118 | } | ||
119 | |||
120 | master->unique[master->unique_len] = '\0'; | ||
121 | |||
122 | dev->devname = kmalloc(strlen(dev->driver->pci_driver.name) + | ||
123 | strlen(master->unique) + 2, GFP_KERNEL); | ||
124 | if (!dev->devname) { | ||
125 | ret = -ENOMEM; | ||
126 | goto err; | ||
127 | } | ||
128 | |||
129 | sprintf(dev->devname, "%s@%s", dev->driver->pci_driver.name, | ||
130 | master->unique); | ||
131 | |||
132 | /* Return error if the busid submitted doesn't match the device's actual | ||
133 | * busid. | ||
134 | */ | ||
135 | ret = sscanf(master->unique, "PCI:%d:%d:%d", &bus, &slot, &func); | ||
136 | if (ret != 3) { | ||
137 | ret = -EINVAL; | ||
138 | goto err; | ||
139 | } | ||
140 | |||
141 | domain = bus >> 8; | ||
142 | bus &= 0xff; | ||
143 | 109 | ||
144 | if ((domain != drm_get_pci_domain(dev)) || | 110 | ret = dev->driver->bus->set_unique(dev, master, u); |
145 | (bus != dev->pdev->bus->number) || | 111 | if (ret) |
146 | (slot != PCI_SLOT(dev->pdev->devfn)) || | ||
147 | (func != PCI_FUNC(dev->pdev->devfn))) { | ||
148 | ret = -EINVAL; | ||
149 | goto err; | 112 | goto err; |
150 | } | ||
151 | 113 | ||
152 | return 0; | 114 | return 0; |
153 | 115 | ||
@@ -159,74 +121,15 @@ err: | |||
159 | static int drm_set_busid(struct drm_device *dev, struct drm_file *file_priv) | 121 | static int drm_set_busid(struct drm_device *dev, struct drm_file *file_priv) |
160 | { | 122 | { |
161 | struct drm_master *master = file_priv->master; | 123 | struct drm_master *master = file_priv->master; |
162 | int len, ret; | 124 | int ret; |
163 | 125 | ||
164 | if (master->unique != NULL) | 126 | if (master->unique != NULL) |
165 | drm_unset_busid(dev, master); | 127 | drm_unset_busid(dev, master); |
166 | 128 | ||
167 | if (drm_core_check_feature(dev, DRIVER_USE_PLATFORM_DEVICE)) { | 129 | ret = dev->driver->bus->set_busid(dev, master); |
168 | master->unique_len = 10 + strlen(dev->platformdev->name); | 130 | if (ret) |
169 | master->unique = kmalloc(master->unique_len + 1, GFP_KERNEL); | 131 | goto err; |
170 | |||
171 | if (master->unique == NULL) | ||
172 | return -ENOMEM; | ||
173 | |||
174 | len = snprintf(master->unique, master->unique_len, | ||
175 | "platform:%s", dev->platformdev->name); | ||
176 | |||
177 | if (len > master->unique_len) { | ||
178 | DRM_ERROR("Unique buffer overflowed\n"); | ||
179 | ret = -EINVAL; | ||
180 | goto err; | ||
181 | } | ||
182 | |||
183 | dev->devname = | ||
184 | kmalloc(strlen(dev->platformdev->name) + | ||
185 | master->unique_len + 2, GFP_KERNEL); | ||
186 | |||
187 | if (dev->devname == NULL) { | ||
188 | ret = -ENOMEM; | ||
189 | goto err; | ||
190 | } | ||
191 | |||
192 | sprintf(dev->devname, "%s@%s", dev->platformdev->name, | ||
193 | master->unique); | ||
194 | |||
195 | } else { | ||
196 | master->unique_len = 40; | ||
197 | master->unique_size = master->unique_len; | ||
198 | master->unique = kmalloc(master->unique_size, GFP_KERNEL); | ||
199 | if (master->unique == NULL) | ||
200 | return -ENOMEM; | ||
201 | |||
202 | len = snprintf(master->unique, master->unique_len, | ||
203 | "pci:%04x:%02x:%02x.%d", | ||
204 | drm_get_pci_domain(dev), | ||
205 | dev->pdev->bus->number, | ||
206 | PCI_SLOT(dev->pdev->devfn), | ||
207 | PCI_FUNC(dev->pdev->devfn)); | ||
208 | if (len >= master->unique_len) { | ||
209 | DRM_ERROR("buffer overflow"); | ||
210 | ret = -EINVAL; | ||
211 | goto err; | ||
212 | } else | ||
213 | master->unique_len = len; | ||
214 | |||
215 | dev->devname = | ||
216 | kmalloc(strlen(dev->driver->pci_driver.name) + | ||
217 | master->unique_len + 2, GFP_KERNEL); | ||
218 | |||
219 | if (dev->devname == NULL) { | ||
220 | ret = -ENOMEM; | ||
221 | goto err; | ||
222 | } | ||
223 | |||
224 | sprintf(dev->devname, "%s@%s", dev->driver->pci_driver.name, | ||
225 | master->unique); | ||
226 | } | ||
227 | |||
228 | return 0; | 132 | return 0; |
229 | |||
230 | err: | 133 | err: |
231 | drm_unset_busid(dev, master); | 134 | drm_unset_busid(dev, master); |
232 | return ret; | 135 | return ret; |
diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c index 4e82d0d3c378..cb49685bde01 100644 --- a/drivers/gpu/drm/drm_irq.c +++ b/drivers/gpu/drm/drm_irq.c | |||
@@ -74,23 +74,13 @@ int drm_irq_by_busid(struct drm_device *dev, void *data, | |||
74 | { | 74 | { |
75 | struct drm_irq_busid *p = data; | 75 | struct drm_irq_busid *p = data; |
76 | 76 | ||
77 | if (drm_core_check_feature(dev, DRIVER_USE_PLATFORM_DEVICE)) | 77 | if (!dev->driver->bus->irq_by_busid) |
78 | return -EINVAL; | 78 | return -EINVAL; |
79 | 79 | ||
80 | if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ)) | 80 | if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ)) |
81 | return -EINVAL; | 81 | return -EINVAL; |
82 | 82 | ||
83 | if ((p->busnum >> 8) != drm_get_pci_domain(dev) || | 83 | return dev->driver->bus->irq_by_busid(dev, p); |
84 | (p->busnum & 0xff) != dev->pdev->bus->number || | ||
85 | p->devnum != PCI_SLOT(dev->pdev->devfn) || p->funcnum != PCI_FUNC(dev->pdev->devfn)) | ||
86 | return -EINVAL; | ||
87 | |||
88 | p->irq = dev->pdev->irq; | ||
89 | |||
90 | DRM_DEBUG("%d:%d:%d => IRQ %d\n", p->busnum, p->devnum, p->funcnum, | ||
91 | p->irq); | ||
92 | |||
93 | return 0; | ||
94 | } | 84 | } |
95 | 85 | ||
96 | /* | 86 | /* |
@@ -1047,10 +1037,13 @@ static int drm_queue_vblank_event(struct drm_device *dev, int pipe, | |||
1047 | struct timeval now; | 1037 | struct timeval now; |
1048 | unsigned long flags; | 1038 | unsigned long flags; |
1049 | unsigned int seq; | 1039 | unsigned int seq; |
1040 | int ret; | ||
1050 | 1041 | ||
1051 | e = kzalloc(sizeof *e, GFP_KERNEL); | 1042 | e = kzalloc(sizeof *e, GFP_KERNEL); |
1052 | if (e == NULL) | 1043 | if (e == NULL) { |
1053 | return -ENOMEM; | 1044 | ret = -ENOMEM; |
1045 | goto err_put; | ||
1046 | } | ||
1054 | 1047 | ||
1055 | e->pipe = pipe; | 1048 | e->pipe = pipe; |
1056 | e->base.pid = current->pid; | 1049 | e->base.pid = current->pid; |
@@ -1064,9 +1057,8 @@ static int drm_queue_vblank_event(struct drm_device *dev, int pipe, | |||
1064 | spin_lock_irqsave(&dev->event_lock, flags); | 1057 | spin_lock_irqsave(&dev->event_lock, flags); |
1065 | 1058 | ||
1066 | if (file_priv->event_space < sizeof e->event) { | 1059 | if (file_priv->event_space < sizeof e->event) { |
1067 | spin_unlock_irqrestore(&dev->event_lock, flags); | 1060 | ret = -EBUSY; |
1068 | kfree(e); | 1061 | goto err_unlock; |
1069 | return -ENOMEM; | ||
1070 | } | 1062 | } |
1071 | 1063 | ||
1072 | file_priv->event_space -= sizeof e->event; | 1064 | file_priv->event_space -= sizeof e->event; |
@@ -1086,20 +1078,30 @@ static int drm_queue_vblank_event(struct drm_device *dev, int pipe, | |||
1086 | 1078 | ||
1087 | e->event.sequence = vblwait->request.sequence; | 1079 | e->event.sequence = vblwait->request.sequence; |
1088 | if ((seq - vblwait->request.sequence) <= (1 << 23)) { | 1080 | if ((seq - vblwait->request.sequence) <= (1 << 23)) { |
1081 | e->event.sequence = seq; | ||
1089 | e->event.tv_sec = now.tv_sec; | 1082 | e->event.tv_sec = now.tv_sec; |
1090 | e->event.tv_usec = now.tv_usec; | 1083 | e->event.tv_usec = now.tv_usec; |
1091 | drm_vblank_put(dev, e->pipe); | 1084 | drm_vblank_put(dev, pipe); |
1092 | list_add_tail(&e->base.link, &e->base.file_priv->event_list); | 1085 | list_add_tail(&e->base.link, &e->base.file_priv->event_list); |
1093 | wake_up_interruptible(&e->base.file_priv->event_wait); | 1086 | wake_up_interruptible(&e->base.file_priv->event_wait); |
1087 | vblwait->reply.sequence = seq; | ||
1094 | trace_drm_vblank_event_delivered(current->pid, pipe, | 1088 | trace_drm_vblank_event_delivered(current->pid, pipe, |
1095 | vblwait->request.sequence); | 1089 | vblwait->request.sequence); |
1096 | } else { | 1090 | } else { |
1097 | list_add_tail(&e->base.link, &dev->vblank_event_list); | 1091 | list_add_tail(&e->base.link, &dev->vblank_event_list); |
1092 | vblwait->reply.sequence = vblwait->request.sequence; | ||
1098 | } | 1093 | } |
1099 | 1094 | ||
1100 | spin_unlock_irqrestore(&dev->event_lock, flags); | 1095 | spin_unlock_irqrestore(&dev->event_lock, flags); |
1101 | 1096 | ||
1102 | return 0; | 1097 | return 0; |
1098 | |||
1099 | err_unlock: | ||
1100 | spin_unlock_irqrestore(&dev->event_lock, flags); | ||
1101 | kfree(e); | ||
1102 | err_put: | ||
1103 | drm_vblank_put(dev, pipe); | ||
1104 | return ret; | ||
1103 | } | 1105 | } |
1104 | 1106 | ||
1105 | /** | 1107 | /** |
@@ -1238,7 +1240,7 @@ void drm_handle_vblank_events(struct drm_device *dev, int crtc) | |||
1238 | * Drivers should call this routine in their vblank interrupt handlers to | 1240 | * Drivers should call this routine in their vblank interrupt handlers to |
1239 | * update the vblank counter and send any signals that may be pending. | 1241 | * update the vblank counter and send any signals that may be pending. |
1240 | */ | 1242 | */ |
1241 | void drm_handle_vblank(struct drm_device *dev, int crtc) | 1243 | bool drm_handle_vblank(struct drm_device *dev, int crtc) |
1242 | { | 1244 | { |
1243 | u32 vblcount; | 1245 | u32 vblcount; |
1244 | s64 diff_ns; | 1246 | s64 diff_ns; |
@@ -1246,7 +1248,7 @@ void drm_handle_vblank(struct drm_device *dev, int crtc) | |||
1246 | unsigned long irqflags; | 1248 | unsigned long irqflags; |
1247 | 1249 | ||
1248 | if (!dev->num_crtcs) | 1250 | if (!dev->num_crtcs) |
1249 | return; | 1251 | return false; |
1250 | 1252 | ||
1251 | /* Need timestamp lock to prevent concurrent execution with | 1253 | /* Need timestamp lock to prevent concurrent execution with |
1252 | * vblank enable/disable, as this would cause inconsistent | 1254 | * vblank enable/disable, as this would cause inconsistent |
@@ -1257,7 +1259,7 @@ void drm_handle_vblank(struct drm_device *dev, int crtc) | |||
1257 | /* Vblank irq handling disabled. Nothing to do. */ | 1259 | /* Vblank irq handling disabled. Nothing to do. */ |
1258 | if (!dev->vblank_enabled[crtc]) { | 1260 | if (!dev->vblank_enabled[crtc]) { |
1259 | spin_unlock_irqrestore(&dev->vblank_time_lock, irqflags); | 1261 | spin_unlock_irqrestore(&dev->vblank_time_lock, irqflags); |
1260 | return; | 1262 | return false; |
1261 | } | 1263 | } |
1262 | 1264 | ||
1263 | /* Fetch corresponding timestamp for this vblank interval from | 1265 | /* Fetch corresponding timestamp for this vblank interval from |
@@ -1299,5 +1301,6 @@ void drm_handle_vblank(struct drm_device *dev, int crtc) | |||
1299 | drm_handle_vblank_events(dev, crtc); | 1301 | drm_handle_vblank_events(dev, crtc); |
1300 | 1302 | ||
1301 | spin_unlock_irqrestore(&dev->vblank_time_lock, irqflags); | 1303 | spin_unlock_irqrestore(&dev->vblank_time_lock, irqflags); |
1304 | return true; | ||
1302 | } | 1305 | } |
1303 | EXPORT_SYMBOL(drm_handle_vblank); | 1306 | EXPORT_SYMBOL(drm_handle_vblank); |
diff --git a/drivers/gpu/drm/drm_mm.c b/drivers/gpu/drm/drm_mm.c index a6bfc302ed90..add1737dae0d 100644 --- a/drivers/gpu/drm/drm_mm.c +++ b/drivers/gpu/drm/drm_mm.c | |||
@@ -64,8 +64,8 @@ static struct drm_mm_node *drm_mm_kmalloc(struct drm_mm *mm, int atomic) | |||
64 | else { | 64 | else { |
65 | child = | 65 | child = |
66 | list_entry(mm->unused_nodes.next, | 66 | list_entry(mm->unused_nodes.next, |
67 | struct drm_mm_node, free_stack); | 67 | struct drm_mm_node, node_list); |
68 | list_del(&child->free_stack); | 68 | list_del(&child->node_list); |
69 | --mm->num_unused; | 69 | --mm->num_unused; |
70 | } | 70 | } |
71 | spin_unlock(&mm->unused_lock); | 71 | spin_unlock(&mm->unused_lock); |
@@ -94,195 +94,242 @@ int drm_mm_pre_get(struct drm_mm *mm) | |||
94 | return ret; | 94 | return ret; |
95 | } | 95 | } |
96 | ++mm->num_unused; | 96 | ++mm->num_unused; |
97 | list_add_tail(&node->free_stack, &mm->unused_nodes); | 97 | list_add_tail(&node->node_list, &mm->unused_nodes); |
98 | } | 98 | } |
99 | spin_unlock(&mm->unused_lock); | 99 | spin_unlock(&mm->unused_lock); |
100 | return 0; | 100 | return 0; |
101 | } | 101 | } |
102 | EXPORT_SYMBOL(drm_mm_pre_get); | 102 | EXPORT_SYMBOL(drm_mm_pre_get); |
103 | 103 | ||
104 | static int drm_mm_create_tail_node(struct drm_mm *mm, | 104 | static inline unsigned long drm_mm_hole_node_start(struct drm_mm_node *hole_node) |
105 | unsigned long start, | ||
106 | unsigned long size, int atomic) | ||
107 | { | 105 | { |
108 | struct drm_mm_node *child; | 106 | return hole_node->start + hole_node->size; |
109 | 107 | } | |
110 | child = drm_mm_kmalloc(mm, atomic); | ||
111 | if (unlikely(child == NULL)) | ||
112 | return -ENOMEM; | ||
113 | |||
114 | child->free = 1; | ||
115 | child->size = size; | ||
116 | child->start = start; | ||
117 | child->mm = mm; | ||
118 | 108 | ||
119 | list_add_tail(&child->node_list, &mm->node_list); | 109 | static inline unsigned long drm_mm_hole_node_end(struct drm_mm_node *hole_node) |
120 | list_add_tail(&child->free_stack, &mm->free_stack); | 110 | { |
111 | struct drm_mm_node *next_node = | ||
112 | list_entry(hole_node->node_list.next, struct drm_mm_node, | ||
113 | node_list); | ||
121 | 114 | ||
122 | return 0; | 115 | return next_node->start; |
123 | } | 116 | } |
124 | 117 | ||
125 | static struct drm_mm_node *drm_mm_split_at_start(struct drm_mm_node *parent, | 118 | static void drm_mm_insert_helper(struct drm_mm_node *hole_node, |
126 | unsigned long size, | 119 | struct drm_mm_node *node, |
127 | int atomic) | 120 | unsigned long size, unsigned alignment) |
128 | { | 121 | { |
129 | struct drm_mm_node *child; | 122 | struct drm_mm *mm = hole_node->mm; |
123 | unsigned long tmp = 0, wasted = 0; | ||
124 | unsigned long hole_start = drm_mm_hole_node_start(hole_node); | ||
125 | unsigned long hole_end = drm_mm_hole_node_end(hole_node); | ||
130 | 126 | ||
131 | child = drm_mm_kmalloc(parent->mm, atomic); | 127 | BUG_ON(!hole_node->hole_follows || node->allocated); |
132 | if (unlikely(child == NULL)) | ||
133 | return NULL; | ||
134 | 128 | ||
135 | INIT_LIST_HEAD(&child->free_stack); | 129 | if (alignment) |
130 | tmp = hole_start % alignment; | ||
136 | 131 | ||
137 | child->size = size; | 132 | if (!tmp) { |
138 | child->start = parent->start; | 133 | hole_node->hole_follows = 0; |
139 | child->mm = parent->mm; | 134 | list_del_init(&hole_node->hole_stack); |
135 | } else | ||
136 | wasted = alignment - tmp; | ||
140 | 137 | ||
141 | list_add_tail(&child->node_list, &parent->node_list); | 138 | node->start = hole_start + wasted; |
142 | INIT_LIST_HEAD(&child->free_stack); | 139 | node->size = size; |
140 | node->mm = mm; | ||
141 | node->allocated = 1; | ||
143 | 142 | ||
144 | parent->size -= size; | 143 | INIT_LIST_HEAD(&node->hole_stack); |
145 | parent->start += size; | 144 | list_add(&node->node_list, &hole_node->node_list); |
146 | return child; | ||
147 | } | ||
148 | 145 | ||
146 | BUG_ON(node->start + node->size > hole_end); | ||
147 | |||
148 | if (node->start + node->size < hole_end) { | ||
149 | list_add(&node->hole_stack, &mm->hole_stack); | ||
150 | node->hole_follows = 1; | ||
151 | } else { | ||
152 | node->hole_follows = 0; | ||
153 | } | ||
154 | } | ||
149 | 155 | ||
150 | struct drm_mm_node *drm_mm_get_block_generic(struct drm_mm_node *node, | 156 | struct drm_mm_node *drm_mm_get_block_generic(struct drm_mm_node *hole_node, |
151 | unsigned long size, | 157 | unsigned long size, |
152 | unsigned alignment, | 158 | unsigned alignment, |
153 | int atomic) | 159 | int atomic) |
154 | { | 160 | { |
161 | struct drm_mm_node *node; | ||
162 | |||
163 | node = drm_mm_kmalloc(hole_node->mm, atomic); | ||
164 | if (unlikely(node == NULL)) | ||
165 | return NULL; | ||
155 | 166 | ||
156 | struct drm_mm_node *align_splitoff = NULL; | 167 | drm_mm_insert_helper(hole_node, node, size, alignment); |
157 | unsigned tmp = 0; | ||
158 | 168 | ||
159 | if (alignment) | 169 | return node; |
160 | tmp = node->start % alignment; | 170 | } |
171 | EXPORT_SYMBOL(drm_mm_get_block_generic); | ||
161 | 172 | ||
162 | if (tmp) { | 173 | /** |
163 | align_splitoff = | 174 | * Search for free space and insert a preallocated memory node. Returns |
164 | drm_mm_split_at_start(node, alignment - tmp, atomic); | 175 | * -ENOSPC if no suitable free area is available. The preallocated memory node |
165 | if (unlikely(align_splitoff == NULL)) | 176 | * must be cleared. |
166 | return NULL; | 177 | */ |
167 | } | 178 | int drm_mm_insert_node(struct drm_mm *mm, struct drm_mm_node *node, |
179 | unsigned long size, unsigned alignment) | ||
180 | { | ||
181 | struct drm_mm_node *hole_node; | ||
168 | 182 | ||
169 | if (node->size == size) { | 183 | hole_node = drm_mm_search_free(mm, size, alignment, 0); |
170 | list_del_init(&node->free_stack); | 184 | if (!hole_node) |
171 | node->free = 0; | 185 | return -ENOSPC; |
172 | } else { | ||
173 | node = drm_mm_split_at_start(node, size, atomic); | ||
174 | } | ||
175 | 186 | ||
176 | if (align_splitoff) | 187 | drm_mm_insert_helper(hole_node, node, size, alignment); |
177 | drm_mm_put_block(align_splitoff); | ||
178 | 188 | ||
179 | return node; | 189 | return 0; |
180 | } | 190 | } |
181 | EXPORT_SYMBOL(drm_mm_get_block_generic); | 191 | EXPORT_SYMBOL(drm_mm_insert_node); |
182 | 192 | ||
183 | struct drm_mm_node *drm_mm_get_block_range_generic(struct drm_mm_node *node, | 193 | static void drm_mm_insert_helper_range(struct drm_mm_node *hole_node, |
184 | unsigned long size, | 194 | struct drm_mm_node *node, |
185 | unsigned alignment, | 195 | unsigned long size, unsigned alignment, |
186 | unsigned long start, | 196 | unsigned long start, unsigned long end) |
187 | unsigned long end, | ||
188 | int atomic) | ||
189 | { | 197 | { |
190 | struct drm_mm_node *align_splitoff = NULL; | 198 | struct drm_mm *mm = hole_node->mm; |
191 | unsigned tmp = 0; | 199 | unsigned long tmp = 0, wasted = 0; |
192 | unsigned wasted = 0; | 200 | unsigned long hole_start = drm_mm_hole_node_start(hole_node); |
201 | unsigned long hole_end = drm_mm_hole_node_end(hole_node); | ||
193 | 202 | ||
194 | if (node->start < start) | 203 | BUG_ON(!hole_node->hole_follows || node->allocated); |
195 | wasted += start - node->start; | 204 | |
205 | if (hole_start < start) | ||
206 | wasted += start - hole_start; | ||
196 | if (alignment) | 207 | if (alignment) |
197 | tmp = ((node->start + wasted) % alignment); | 208 | tmp = (hole_start + wasted) % alignment; |
198 | 209 | ||
199 | if (tmp) | 210 | if (tmp) |
200 | wasted += alignment - tmp; | 211 | wasted += alignment - tmp; |
201 | if (wasted) { | 212 | |
202 | align_splitoff = drm_mm_split_at_start(node, wasted, atomic); | 213 | if (!wasted) { |
203 | if (unlikely(align_splitoff == NULL)) | 214 | hole_node->hole_follows = 0; |
204 | return NULL; | 215 | list_del_init(&hole_node->hole_stack); |
205 | } | 216 | } |
206 | 217 | ||
207 | if (node->size == size) { | 218 | node->start = hole_start + wasted; |
208 | list_del_init(&node->free_stack); | 219 | node->size = size; |
209 | node->free = 0; | 220 | node->mm = mm; |
221 | node->allocated = 1; | ||
222 | |||
223 | INIT_LIST_HEAD(&node->hole_stack); | ||
224 | list_add(&node->node_list, &hole_node->node_list); | ||
225 | |||
226 | BUG_ON(node->start + node->size > hole_end); | ||
227 | BUG_ON(node->start + node->size > end); | ||
228 | |||
229 | if (node->start + node->size < hole_end) { | ||
230 | list_add(&node->hole_stack, &mm->hole_stack); | ||
231 | node->hole_follows = 1; | ||
210 | } else { | 232 | } else { |
211 | node = drm_mm_split_at_start(node, size, atomic); | 233 | node->hole_follows = 0; |
212 | } | 234 | } |
235 | } | ||
236 | |||
237 | struct drm_mm_node *drm_mm_get_block_range_generic(struct drm_mm_node *hole_node, | ||
238 | unsigned long size, | ||
239 | unsigned alignment, | ||
240 | unsigned long start, | ||
241 | unsigned long end, | ||
242 | int atomic) | ||
243 | { | ||
244 | struct drm_mm_node *node; | ||
213 | 245 | ||
214 | if (align_splitoff) | 246 | node = drm_mm_kmalloc(hole_node->mm, atomic); |
215 | drm_mm_put_block(align_splitoff); | 247 | if (unlikely(node == NULL)) |
248 | return NULL; | ||
249 | |||
250 | drm_mm_insert_helper_range(hole_node, node, size, alignment, | ||
251 | start, end); | ||
216 | 252 | ||
217 | return node; | 253 | return node; |
218 | } | 254 | } |
219 | EXPORT_SYMBOL(drm_mm_get_block_range_generic); | 255 | EXPORT_SYMBOL(drm_mm_get_block_range_generic); |
220 | 256 | ||
221 | /* | 257 | /** |
222 | * Put a block. Merge with the previous and / or next block if they are free. | 258 | * Search for free space and insert a preallocated memory node. Returns |
223 | * Otherwise add to the free stack. | 259 | * -ENOSPC if no suitable free area is available. This is for range |
260 | * restricted allocations. The preallocated memory node must be cleared. | ||
224 | */ | 261 | */ |
225 | 262 | int drm_mm_insert_node_in_range(struct drm_mm *mm, struct drm_mm_node *node, | |
226 | void drm_mm_put_block(struct drm_mm_node *cur) | 263 | unsigned long size, unsigned alignment, |
264 | unsigned long start, unsigned long end) | ||
227 | { | 265 | { |
266 | struct drm_mm_node *hole_node; | ||
228 | 267 | ||
229 | struct drm_mm *mm = cur->mm; | 268 | hole_node = drm_mm_search_free_in_range(mm, size, alignment, |
230 | struct list_head *cur_head = &cur->node_list; | 269 | start, end, 0); |
231 | struct list_head *root_head = &mm->node_list; | 270 | if (!hole_node) |
232 | struct drm_mm_node *prev_node = NULL; | 271 | return -ENOSPC; |
233 | struct drm_mm_node *next_node; | ||
234 | 272 | ||
235 | int merged = 0; | 273 | drm_mm_insert_helper_range(hole_node, node, size, alignment, |
274 | start, end); | ||
236 | 275 | ||
237 | BUG_ON(cur->scanned_block || cur->scanned_prev_free | 276 | return 0; |
238 | || cur->scanned_next_free); | 277 | } |
278 | EXPORT_SYMBOL(drm_mm_insert_node_in_range); | ||
239 | 279 | ||
240 | if (cur_head->prev != root_head) { | 280 | /** |
241 | prev_node = | 281 | * Remove a memory node from the allocator. |
242 | list_entry(cur_head->prev, struct drm_mm_node, node_list); | 282 | */ |
243 | if (prev_node->free) { | 283 | void drm_mm_remove_node(struct drm_mm_node *node) |
244 | prev_node->size += cur->size; | 284 | { |
245 | merged = 1; | 285 | struct drm_mm *mm = node->mm; |
246 | } | 286 | struct drm_mm_node *prev_node; |
247 | } | 287 | |
248 | if (cur_head->next != root_head) { | 288 | BUG_ON(node->scanned_block || node->scanned_prev_free |
249 | next_node = | 289 | || node->scanned_next_free); |
250 | list_entry(cur_head->next, struct drm_mm_node, node_list); | 290 | |
251 | if (next_node->free) { | 291 | prev_node = |
252 | if (merged) { | 292 | list_entry(node->node_list.prev, struct drm_mm_node, node_list); |
253 | prev_node->size += next_node->size; | 293 | |
254 | list_del(&next_node->node_list); | 294 | if (node->hole_follows) { |
255 | list_del(&next_node->free_stack); | 295 | BUG_ON(drm_mm_hole_node_start(node) |
256 | spin_lock(&mm->unused_lock); | 296 | == drm_mm_hole_node_end(node)); |
257 | if (mm->num_unused < MM_UNUSED_TARGET) { | 297 | list_del(&node->hole_stack); |
258 | list_add(&next_node->free_stack, | 298 | } else |
259 | &mm->unused_nodes); | 299 | BUG_ON(drm_mm_hole_node_start(node) |
260 | ++mm->num_unused; | 300 | != drm_mm_hole_node_end(node)); |
261 | } else | 301 | |
262 | kfree(next_node); | 302 | if (!prev_node->hole_follows) { |
263 | spin_unlock(&mm->unused_lock); | 303 | prev_node->hole_follows = 1; |
264 | } else { | 304 | list_add(&prev_node->hole_stack, &mm->hole_stack); |
265 | next_node->size += cur->size; | 305 | } else |
266 | next_node->start = cur->start; | 306 | list_move(&prev_node->hole_stack, &mm->hole_stack); |
267 | merged = 1; | 307 | |
268 | } | 308 | list_del(&node->node_list); |
269 | } | 309 | node->allocated = 0; |
270 | } | ||
271 | if (!merged) { | ||
272 | cur->free = 1; | ||
273 | list_add(&cur->free_stack, &mm->free_stack); | ||
274 | } else { | ||
275 | list_del(&cur->node_list); | ||
276 | spin_lock(&mm->unused_lock); | ||
277 | if (mm->num_unused < MM_UNUSED_TARGET) { | ||
278 | list_add(&cur->free_stack, &mm->unused_nodes); | ||
279 | ++mm->num_unused; | ||
280 | } else | ||
281 | kfree(cur); | ||
282 | spin_unlock(&mm->unused_lock); | ||
283 | } | ||
284 | } | 310 | } |
311 | EXPORT_SYMBOL(drm_mm_remove_node); | ||
312 | |||
313 | /* | ||
314 | * Remove a memory node from the allocator and free the allocated struct | ||
315 | * drm_mm_node. Only to be used on a struct drm_mm_node obtained by one of the | ||
316 | * drm_mm_get_block functions. | ||
317 | */ | ||
318 | void drm_mm_put_block(struct drm_mm_node *node) | ||
319 | { | ||
320 | |||
321 | struct drm_mm *mm = node->mm; | ||
322 | |||
323 | drm_mm_remove_node(node); | ||
285 | 324 | ||
325 | spin_lock(&mm->unused_lock); | ||
326 | if (mm->num_unused < MM_UNUSED_TARGET) { | ||
327 | list_add(&node->node_list, &mm->unused_nodes); | ||
328 | ++mm->num_unused; | ||
329 | } else | ||
330 | kfree(node); | ||
331 | spin_unlock(&mm->unused_lock); | ||
332 | } | ||
286 | EXPORT_SYMBOL(drm_mm_put_block); | 333 | EXPORT_SYMBOL(drm_mm_put_block); |
287 | 334 | ||
288 | static int check_free_hole(unsigned long start, unsigned long end, | 335 | static int check_free_hole(unsigned long start, unsigned long end, |
@@ -319,8 +366,10 @@ struct drm_mm_node *drm_mm_search_free(const struct drm_mm *mm, | |||
319 | best = NULL; | 366 | best = NULL; |
320 | best_size = ~0UL; | 367 | best_size = ~0UL; |
321 | 368 | ||
322 | list_for_each_entry(entry, &mm->free_stack, free_stack) { | 369 | list_for_each_entry(entry, &mm->hole_stack, hole_stack) { |
323 | if (!check_free_hole(entry->start, entry->start + entry->size, | 370 | BUG_ON(!entry->hole_follows); |
371 | if (!check_free_hole(drm_mm_hole_node_start(entry), | ||
372 | drm_mm_hole_node_end(entry), | ||
324 | size, alignment)) | 373 | size, alignment)) |
325 | continue; | 374 | continue; |
326 | 375 | ||
@@ -353,12 +402,13 @@ struct drm_mm_node *drm_mm_search_free_in_range(const struct drm_mm *mm, | |||
353 | best = NULL; | 402 | best = NULL; |
354 | best_size = ~0UL; | 403 | best_size = ~0UL; |
355 | 404 | ||
356 | list_for_each_entry(entry, &mm->free_stack, free_stack) { | 405 | list_for_each_entry(entry, &mm->hole_stack, hole_stack) { |
357 | unsigned long adj_start = entry->start < start ? | 406 | unsigned long adj_start = drm_mm_hole_node_start(entry) < start ? |
358 | start : entry->start; | 407 | start : drm_mm_hole_node_start(entry); |
359 | unsigned long adj_end = entry->start + entry->size > end ? | 408 | unsigned long adj_end = drm_mm_hole_node_end(entry) > end ? |
360 | end : entry->start + entry->size; | 409 | end : drm_mm_hole_node_end(entry); |
361 | 410 | ||
411 | BUG_ON(!entry->hole_follows); | ||
362 | if (!check_free_hole(adj_start, adj_end, size, alignment)) | 412 | if (!check_free_hole(adj_start, adj_end, size, alignment)) |
363 | continue; | 413 | continue; |
364 | 414 | ||
@@ -376,6 +426,23 @@ struct drm_mm_node *drm_mm_search_free_in_range(const struct drm_mm *mm, | |||
376 | EXPORT_SYMBOL(drm_mm_search_free_in_range); | 426 | EXPORT_SYMBOL(drm_mm_search_free_in_range); |
377 | 427 | ||
378 | /** | 428 | /** |
429 | * Moves an allocation. To be used with embedded struct drm_mm_node. | ||
430 | */ | ||
431 | void drm_mm_replace_node(struct drm_mm_node *old, struct drm_mm_node *new) | ||
432 | { | ||
433 | list_replace(&old->node_list, &new->node_list); | ||
434 | list_replace(&old->node_list, &new->hole_stack); | ||
435 | new->hole_follows = old->hole_follows; | ||
436 | new->mm = old->mm; | ||
437 | new->start = old->start; | ||
438 | new->size = old->size; | ||
439 | |||
440 | old->allocated = 0; | ||
441 | new->allocated = 1; | ||
442 | } | ||
443 | EXPORT_SYMBOL(drm_mm_replace_node); | ||
444 | |||
445 | /** | ||
379 | * Initializa lru scanning. | 446 | * Initializa lru scanning. |
380 | * | 447 | * |
381 | * This simply sets up the scanning routines with the parameters for the desired | 448 | * This simply sets up the scanning routines with the parameters for the desired |
@@ -392,10 +459,38 @@ void drm_mm_init_scan(struct drm_mm *mm, unsigned long size, | |||
392 | mm->scanned_blocks = 0; | 459 | mm->scanned_blocks = 0; |
393 | mm->scan_hit_start = 0; | 460 | mm->scan_hit_start = 0; |
394 | mm->scan_hit_size = 0; | 461 | mm->scan_hit_size = 0; |
462 | mm->scan_check_range = 0; | ||
463 | mm->prev_scanned_node = NULL; | ||
395 | } | 464 | } |
396 | EXPORT_SYMBOL(drm_mm_init_scan); | 465 | EXPORT_SYMBOL(drm_mm_init_scan); |
397 | 466 | ||
398 | /** | 467 | /** |
468 | * Initializa lru scanning. | ||
469 | * | ||
470 | * This simply sets up the scanning routines with the parameters for the desired | ||
471 | * hole. This version is for range-restricted scans. | ||
472 | * | ||
473 | * Warning: As long as the scan list is non-empty, no other operations than | ||
474 | * adding/removing nodes to/from the scan list are allowed. | ||
475 | */ | ||
476 | void drm_mm_init_scan_with_range(struct drm_mm *mm, unsigned long size, | ||
477 | unsigned alignment, | ||
478 | unsigned long start, | ||
479 | unsigned long end) | ||
480 | { | ||
481 | mm->scan_alignment = alignment; | ||
482 | mm->scan_size = size; | ||
483 | mm->scanned_blocks = 0; | ||
484 | mm->scan_hit_start = 0; | ||
485 | mm->scan_hit_size = 0; | ||
486 | mm->scan_start = start; | ||
487 | mm->scan_end = end; | ||
488 | mm->scan_check_range = 1; | ||
489 | mm->prev_scanned_node = NULL; | ||
490 | } | ||
491 | EXPORT_SYMBOL(drm_mm_init_scan_with_range); | ||
492 | |||
493 | /** | ||
399 | * Add a node to the scan list that might be freed to make space for the desired | 494 | * Add a node to the scan list that might be freed to make space for the desired |
400 | * hole. | 495 | * hole. |
401 | * | 496 | * |
@@ -404,58 +499,42 @@ EXPORT_SYMBOL(drm_mm_init_scan); | |||
404 | int drm_mm_scan_add_block(struct drm_mm_node *node) | 499 | int drm_mm_scan_add_block(struct drm_mm_node *node) |
405 | { | 500 | { |
406 | struct drm_mm *mm = node->mm; | 501 | struct drm_mm *mm = node->mm; |
407 | struct list_head *prev_free, *next_free; | 502 | struct drm_mm_node *prev_node; |
408 | struct drm_mm_node *prev_node, *next_node; | 503 | unsigned long hole_start, hole_end; |
504 | unsigned long adj_start; | ||
505 | unsigned long adj_end; | ||
409 | 506 | ||
410 | mm->scanned_blocks++; | 507 | mm->scanned_blocks++; |
411 | 508 | ||
412 | prev_free = next_free = NULL; | 509 | BUG_ON(node->scanned_block); |
413 | |||
414 | BUG_ON(node->free); | ||
415 | node->scanned_block = 1; | 510 | node->scanned_block = 1; |
416 | node->free = 1; | ||
417 | |||
418 | if (node->node_list.prev != &mm->node_list) { | ||
419 | prev_node = list_entry(node->node_list.prev, struct drm_mm_node, | ||
420 | node_list); | ||
421 | |||
422 | if (prev_node->free) { | ||
423 | list_del(&prev_node->node_list); | ||
424 | |||
425 | node->start = prev_node->start; | ||
426 | node->size += prev_node->size; | ||
427 | |||
428 | prev_node->scanned_prev_free = 1; | ||
429 | |||
430 | prev_free = &prev_node->free_stack; | ||
431 | } | ||
432 | } | ||
433 | |||
434 | if (node->node_list.next != &mm->node_list) { | ||
435 | next_node = list_entry(node->node_list.next, struct drm_mm_node, | ||
436 | node_list); | ||
437 | 511 | ||
438 | if (next_node->free) { | 512 | prev_node = list_entry(node->node_list.prev, struct drm_mm_node, |
439 | list_del(&next_node->node_list); | 513 | node_list); |
440 | 514 | ||
441 | node->size += next_node->size; | 515 | node->scanned_preceeds_hole = prev_node->hole_follows; |
442 | 516 | prev_node->hole_follows = 1; | |
443 | next_node->scanned_next_free = 1; | 517 | list_del(&node->node_list); |
444 | 518 | node->node_list.prev = &prev_node->node_list; | |
445 | next_free = &next_node->free_stack; | 519 | node->node_list.next = &mm->prev_scanned_node->node_list; |
446 | } | 520 | mm->prev_scanned_node = node; |
521 | |||
522 | hole_start = drm_mm_hole_node_start(prev_node); | ||
523 | hole_end = drm_mm_hole_node_end(prev_node); | ||
524 | if (mm->scan_check_range) { | ||
525 | adj_start = hole_start < mm->scan_start ? | ||
526 | mm->scan_start : hole_start; | ||
527 | adj_end = hole_end > mm->scan_end ? | ||
528 | mm->scan_end : hole_end; | ||
529 | } else { | ||
530 | adj_start = hole_start; | ||
531 | adj_end = hole_end; | ||
447 | } | 532 | } |
448 | 533 | ||
449 | /* The free_stack list is not used for allocated objects, so these two | 534 | if (check_free_hole(adj_start , adj_end, |
450 | * pointers can be abused (as long as no allocations in this memory | ||
451 | * manager happens). */ | ||
452 | node->free_stack.prev = prev_free; | ||
453 | node->free_stack.next = next_free; | ||
454 | |||
455 | if (check_free_hole(node->start, node->start + node->size, | ||
456 | mm->scan_size, mm->scan_alignment)) { | 535 | mm->scan_size, mm->scan_alignment)) { |
457 | mm->scan_hit_start = node->start; | 536 | mm->scan_hit_start = hole_start; |
458 | mm->scan_hit_size = node->size; | 537 | mm->scan_hit_size = hole_end; |
459 | 538 | ||
460 | return 1; | 539 | return 1; |
461 | } | 540 | } |
@@ -481,39 +560,19 @@ EXPORT_SYMBOL(drm_mm_scan_add_block); | |||
481 | int drm_mm_scan_remove_block(struct drm_mm_node *node) | 560 | int drm_mm_scan_remove_block(struct drm_mm_node *node) |
482 | { | 561 | { |
483 | struct drm_mm *mm = node->mm; | 562 | struct drm_mm *mm = node->mm; |
484 | struct drm_mm_node *prev_node, *next_node; | 563 | struct drm_mm_node *prev_node; |
485 | 564 | ||
486 | mm->scanned_blocks--; | 565 | mm->scanned_blocks--; |
487 | 566 | ||
488 | BUG_ON(!node->scanned_block); | 567 | BUG_ON(!node->scanned_block); |
489 | node->scanned_block = 0; | 568 | node->scanned_block = 0; |
490 | node->free = 0; | ||
491 | 569 | ||
492 | prev_node = list_entry(node->free_stack.prev, struct drm_mm_node, | 570 | prev_node = list_entry(node->node_list.prev, struct drm_mm_node, |
493 | free_stack); | 571 | node_list); |
494 | next_node = list_entry(node->free_stack.next, struct drm_mm_node, | ||
495 | free_stack); | ||
496 | 572 | ||
497 | if (prev_node) { | 573 | prev_node->hole_follows = node->scanned_preceeds_hole; |
498 | BUG_ON(!prev_node->scanned_prev_free); | 574 | INIT_LIST_HEAD(&node->node_list); |
499 | prev_node->scanned_prev_free = 0; | 575 | list_add(&node->node_list, &prev_node->node_list); |
500 | |||
501 | list_add_tail(&prev_node->node_list, &node->node_list); | ||
502 | |||
503 | node->start = prev_node->start + prev_node->size; | ||
504 | node->size -= prev_node->size; | ||
505 | } | ||
506 | |||
507 | if (next_node) { | ||
508 | BUG_ON(!next_node->scanned_next_free); | ||
509 | next_node->scanned_next_free = 0; | ||
510 | |||
511 | list_add(&next_node->node_list, &node->node_list); | ||
512 | |||
513 | node->size -= next_node->size; | ||
514 | } | ||
515 | |||
516 | INIT_LIST_HEAD(&node->free_stack); | ||
517 | 576 | ||
518 | /* Only need to check for containement because start&size for the | 577 | /* Only need to check for containement because start&size for the |
519 | * complete resulting free block (not just the desired part) is | 578 | * complete resulting free block (not just the desired part) is |
@@ -530,7 +589,7 @@ EXPORT_SYMBOL(drm_mm_scan_remove_block); | |||
530 | 589 | ||
531 | int drm_mm_clean(struct drm_mm * mm) | 590 | int drm_mm_clean(struct drm_mm * mm) |
532 | { | 591 | { |
533 | struct list_head *head = &mm->node_list; | 592 | struct list_head *head = &mm->head_node.node_list; |
534 | 593 | ||
535 | return (head->next->next == head); | 594 | return (head->next->next == head); |
536 | } | 595 | } |
@@ -538,38 +597,40 @@ EXPORT_SYMBOL(drm_mm_clean); | |||
538 | 597 | ||
539 | int drm_mm_init(struct drm_mm * mm, unsigned long start, unsigned long size) | 598 | int drm_mm_init(struct drm_mm * mm, unsigned long start, unsigned long size) |
540 | { | 599 | { |
541 | INIT_LIST_HEAD(&mm->node_list); | 600 | INIT_LIST_HEAD(&mm->hole_stack); |
542 | INIT_LIST_HEAD(&mm->free_stack); | ||
543 | INIT_LIST_HEAD(&mm->unused_nodes); | 601 | INIT_LIST_HEAD(&mm->unused_nodes); |
544 | mm->num_unused = 0; | 602 | mm->num_unused = 0; |
545 | mm->scanned_blocks = 0; | 603 | mm->scanned_blocks = 0; |
546 | spin_lock_init(&mm->unused_lock); | 604 | spin_lock_init(&mm->unused_lock); |
547 | 605 | ||
548 | return drm_mm_create_tail_node(mm, start, size, 0); | 606 | /* Clever trick to avoid a special case in the free hole tracking. */ |
607 | INIT_LIST_HEAD(&mm->head_node.node_list); | ||
608 | INIT_LIST_HEAD(&mm->head_node.hole_stack); | ||
609 | mm->head_node.hole_follows = 1; | ||
610 | mm->head_node.scanned_block = 0; | ||
611 | mm->head_node.scanned_prev_free = 0; | ||
612 | mm->head_node.scanned_next_free = 0; | ||
613 | mm->head_node.mm = mm; | ||
614 | mm->head_node.start = start + size; | ||
615 | mm->head_node.size = start - mm->head_node.start; | ||
616 | list_add_tail(&mm->head_node.hole_stack, &mm->hole_stack); | ||
617 | |||
618 | return 0; | ||
549 | } | 619 | } |
550 | EXPORT_SYMBOL(drm_mm_init); | 620 | EXPORT_SYMBOL(drm_mm_init); |
551 | 621 | ||
552 | void drm_mm_takedown(struct drm_mm * mm) | 622 | void drm_mm_takedown(struct drm_mm * mm) |
553 | { | 623 | { |
554 | struct list_head *bnode = mm->free_stack.next; | 624 | struct drm_mm_node *entry, *next; |
555 | struct drm_mm_node *entry; | ||
556 | struct drm_mm_node *next; | ||
557 | |||
558 | entry = list_entry(bnode, struct drm_mm_node, free_stack); | ||
559 | 625 | ||
560 | if (entry->node_list.next != &mm->node_list || | 626 | if (!list_empty(&mm->head_node.node_list)) { |
561 | entry->free_stack.next != &mm->free_stack) { | ||
562 | DRM_ERROR("Memory manager not clean. Delaying takedown\n"); | 627 | DRM_ERROR("Memory manager not clean. Delaying takedown\n"); |
563 | return; | 628 | return; |
564 | } | 629 | } |
565 | 630 | ||
566 | list_del(&entry->free_stack); | ||
567 | list_del(&entry->node_list); | ||
568 | kfree(entry); | ||
569 | |||
570 | spin_lock(&mm->unused_lock); | 631 | spin_lock(&mm->unused_lock); |
571 | list_for_each_entry_safe(entry, next, &mm->unused_nodes, free_stack) { | 632 | list_for_each_entry_safe(entry, next, &mm->unused_nodes, node_list) { |
572 | list_del(&entry->free_stack); | 633 | list_del(&entry->node_list); |
573 | kfree(entry); | 634 | kfree(entry); |
574 | --mm->num_unused; | 635 | --mm->num_unused; |
575 | } | 636 | } |
@@ -582,19 +643,37 @@ EXPORT_SYMBOL(drm_mm_takedown); | |||
582 | void drm_mm_debug_table(struct drm_mm *mm, const char *prefix) | 643 | void drm_mm_debug_table(struct drm_mm *mm, const char *prefix) |
583 | { | 644 | { |
584 | struct drm_mm_node *entry; | 645 | struct drm_mm_node *entry; |
585 | int total_used = 0, total_free = 0, total = 0; | 646 | unsigned long total_used = 0, total_free = 0, total = 0; |
586 | 647 | unsigned long hole_start, hole_end, hole_size; | |
587 | list_for_each_entry(entry, &mm->node_list, node_list) { | 648 | |
588 | printk(KERN_DEBUG "%s 0x%08lx-0x%08lx: %8ld: %s\n", | 649 | hole_start = drm_mm_hole_node_start(&mm->head_node); |
650 | hole_end = drm_mm_hole_node_end(&mm->head_node); | ||
651 | hole_size = hole_end - hole_start; | ||
652 | if (hole_size) | ||
653 | printk(KERN_DEBUG "%s 0x%08lx-0x%08lx: %8lu: free\n", | ||
654 | prefix, hole_start, hole_end, | ||
655 | hole_size); | ||
656 | total_free += hole_size; | ||
657 | |||
658 | drm_mm_for_each_node(entry, mm) { | ||
659 | printk(KERN_DEBUG "%s 0x%08lx-0x%08lx: %8lu: used\n", | ||
589 | prefix, entry->start, entry->start + entry->size, | 660 | prefix, entry->start, entry->start + entry->size, |
590 | entry->size, entry->free ? "free" : "used"); | 661 | entry->size); |
591 | total += entry->size; | 662 | total_used += entry->size; |
592 | if (entry->free) | 663 | |
593 | total_free += entry->size; | 664 | if (entry->hole_follows) { |
594 | else | 665 | hole_start = drm_mm_hole_node_start(entry); |
595 | total_used += entry->size; | 666 | hole_end = drm_mm_hole_node_end(entry); |
667 | hole_size = hole_end - hole_start; | ||
668 | printk(KERN_DEBUG "%s 0x%08lx-0x%08lx: %8lu: free\n", | ||
669 | prefix, hole_start, hole_end, | ||
670 | hole_size); | ||
671 | total_free += hole_size; | ||
672 | } | ||
596 | } | 673 | } |
597 | printk(KERN_DEBUG "%s total: %d, used %d free %d\n", prefix, total, | 674 | total = total_free + total_used; |
675 | |||
676 | printk(KERN_DEBUG "%s total: %lu, used %lu free %lu\n", prefix, total, | ||
598 | total_used, total_free); | 677 | total_used, total_free); |
599 | } | 678 | } |
600 | EXPORT_SYMBOL(drm_mm_debug_table); | 679 | EXPORT_SYMBOL(drm_mm_debug_table); |
@@ -603,17 +682,34 @@ EXPORT_SYMBOL(drm_mm_debug_table); | |||
603 | int drm_mm_dump_table(struct seq_file *m, struct drm_mm *mm) | 682 | int drm_mm_dump_table(struct seq_file *m, struct drm_mm *mm) |
604 | { | 683 | { |
605 | struct drm_mm_node *entry; | 684 | struct drm_mm_node *entry; |
606 | int total_used = 0, total_free = 0, total = 0; | 685 | unsigned long total_used = 0, total_free = 0, total = 0; |
607 | 686 | unsigned long hole_start, hole_end, hole_size; | |
608 | list_for_each_entry(entry, &mm->node_list, node_list) { | 687 | |
609 | seq_printf(m, "0x%08lx-0x%08lx: 0x%08lx: %s\n", entry->start, entry->start + entry->size, entry->size, entry->free ? "free" : "used"); | 688 | hole_start = drm_mm_hole_node_start(&mm->head_node); |
610 | total += entry->size; | 689 | hole_end = drm_mm_hole_node_end(&mm->head_node); |
611 | if (entry->free) | 690 | hole_size = hole_end - hole_start; |
612 | total_free += entry->size; | 691 | if (hole_size) |
613 | else | 692 | seq_printf(m, "0x%08lx-0x%08lx: 0x%08lx: free\n", |
614 | total_used += entry->size; | 693 | hole_start, hole_end, hole_size); |
694 | total_free += hole_size; | ||
695 | |||
696 | drm_mm_for_each_node(entry, mm) { | ||
697 | seq_printf(m, "0x%08lx-0x%08lx: 0x%08lx: used\n", | ||
698 | entry->start, entry->start + entry->size, | ||
699 | entry->size); | ||
700 | total_used += entry->size; | ||
701 | if (entry->hole_follows) { | ||
702 | hole_start = drm_mm_hole_node_start(&mm->head_node); | ||
703 | hole_end = drm_mm_hole_node_end(&mm->head_node); | ||
704 | hole_size = hole_end - hole_start; | ||
705 | seq_printf(m, "0x%08lx-0x%08lx: 0x%08lx: free\n", | ||
706 | hole_start, hole_end, hole_size); | ||
707 | total_free += hole_size; | ||
708 | } | ||
615 | } | 709 | } |
616 | seq_printf(m, "total: %d, used %d free %d\n", total, total_used, total_free); | 710 | total = total_free + total_used; |
711 | |||
712 | seq_printf(m, "total: %lu, used %lu free %lu\n", total, total_used, total_free); | ||
617 | return 0; | 713 | return 0; |
618 | } | 714 | } |
619 | EXPORT_SYMBOL(drm_mm_dump_table); | 715 | EXPORT_SYMBOL(drm_mm_dump_table); |
diff --git a/drivers/gpu/drm/drm_modes.c b/drivers/gpu/drm/drm_modes.c index 58e65f92c232..25bf87390f53 100644 --- a/drivers/gpu/drm/drm_modes.c +++ b/drivers/gpu/drm/drm_modes.c | |||
@@ -593,7 +593,7 @@ EXPORT_SYMBOL(drm_mode_height); | |||
593 | * | 593 | * |
594 | * Return @modes's hsync rate in kHz, rounded to the nearest int. | 594 | * Return @modes's hsync rate in kHz, rounded to the nearest int. |
595 | */ | 595 | */ |
596 | int drm_mode_hsync(struct drm_display_mode *mode) | 596 | int drm_mode_hsync(const struct drm_display_mode *mode) |
597 | { | 597 | { |
598 | unsigned int calc_val; | 598 | unsigned int calc_val; |
599 | 599 | ||
@@ -627,7 +627,7 @@ EXPORT_SYMBOL(drm_mode_hsync); | |||
627 | * If it is 70.288, it will return 70Hz. | 627 | * If it is 70.288, it will return 70Hz. |
628 | * If it is 59.6, it will return 60Hz. | 628 | * If it is 59.6, it will return 60Hz. |
629 | */ | 629 | */ |
630 | int drm_mode_vrefresh(struct drm_display_mode *mode) | 630 | int drm_mode_vrefresh(const struct drm_display_mode *mode) |
631 | { | 631 | { |
632 | int refresh = 0; | 632 | int refresh = 0; |
633 | unsigned int calc_val; | 633 | unsigned int calc_val; |
@@ -725,7 +725,7 @@ EXPORT_SYMBOL(drm_mode_set_crtcinfo); | |||
725 | * a pointer to it. Used to create new instances of established modes. | 725 | * a pointer to it. Used to create new instances of established modes. |
726 | */ | 726 | */ |
727 | struct drm_display_mode *drm_mode_duplicate(struct drm_device *dev, | 727 | struct drm_display_mode *drm_mode_duplicate(struct drm_device *dev, |
728 | struct drm_display_mode *mode) | 728 | const struct drm_display_mode *mode) |
729 | { | 729 | { |
730 | struct drm_display_mode *nmode; | 730 | struct drm_display_mode *nmode; |
731 | int new_id; | 731 | int new_id; |
diff --git a/drivers/gpu/drm/drm_pci.c b/drivers/gpu/drm/drm_pci.c index f5bd9e590c80..e1aee4f6a7c6 100644 --- a/drivers/gpu/drm/drm_pci.c +++ b/drivers/gpu/drm/drm_pci.c | |||
@@ -125,6 +125,176 @@ void drm_pci_free(struct drm_device * dev, drm_dma_handle_t * dmah) | |||
125 | EXPORT_SYMBOL(drm_pci_free); | 125 | EXPORT_SYMBOL(drm_pci_free); |
126 | 126 | ||
127 | #ifdef CONFIG_PCI | 127 | #ifdef CONFIG_PCI |
128 | |||
129 | static int drm_get_pci_domain(struct drm_device *dev) | ||
130 | { | ||
131 | #ifndef __alpha__ | ||
132 | /* For historical reasons, drm_get_pci_domain() is busticated | ||
133 | * on most archs and has to remain so for userspace interface | ||
134 | * < 1.4, except on alpha which was right from the beginning | ||
135 | */ | ||
136 | if (dev->if_version < 0x10004) | ||
137 | return 0; | ||
138 | #endif /* __alpha__ */ | ||
139 | |||
140 | return pci_domain_nr(dev->pdev->bus); | ||
141 | } | ||
142 | |||
143 | static int drm_pci_get_irq(struct drm_device *dev) | ||
144 | { | ||
145 | return dev->pdev->irq; | ||
146 | } | ||
147 | |||
148 | static const char *drm_pci_get_name(struct drm_device *dev) | ||
149 | { | ||
150 | struct pci_driver *pdriver = dev->driver->kdriver.pci; | ||
151 | return pdriver->name; | ||
152 | } | ||
153 | |||
154 | int drm_pci_set_busid(struct drm_device *dev, struct drm_master *master) | ||
155 | { | ||
156 | int len, ret; | ||
157 | struct pci_driver *pdriver = dev->driver->kdriver.pci; | ||
158 | master->unique_len = 40; | ||
159 | master->unique_size = master->unique_len; | ||
160 | master->unique = kmalloc(master->unique_size, GFP_KERNEL); | ||
161 | if (master->unique == NULL) | ||
162 | return -ENOMEM; | ||
163 | |||
164 | |||
165 | len = snprintf(master->unique, master->unique_len, | ||
166 | "pci:%04x:%02x:%02x.%d", | ||
167 | drm_get_pci_domain(dev), | ||
168 | dev->pdev->bus->number, | ||
169 | PCI_SLOT(dev->pdev->devfn), | ||
170 | PCI_FUNC(dev->pdev->devfn)); | ||
171 | |||
172 | if (len >= master->unique_len) { | ||
173 | DRM_ERROR("buffer overflow"); | ||
174 | ret = -EINVAL; | ||
175 | goto err; | ||
176 | } else | ||
177 | master->unique_len = len; | ||
178 | |||
179 | dev->devname = | ||
180 | kmalloc(strlen(pdriver->name) + | ||
181 | master->unique_len + 2, GFP_KERNEL); | ||
182 | |||
183 | if (dev->devname == NULL) { | ||
184 | ret = -ENOMEM; | ||
185 | goto err; | ||
186 | } | ||
187 | |||
188 | sprintf(dev->devname, "%s@%s", pdriver->name, | ||
189 | master->unique); | ||
190 | |||
191 | return 0; | ||
192 | err: | ||
193 | return ret; | ||
194 | } | ||
195 | |||
196 | int drm_pci_set_unique(struct drm_device *dev, | ||
197 | struct drm_master *master, | ||
198 | struct drm_unique *u) | ||
199 | { | ||
200 | int domain, bus, slot, func, ret; | ||
201 | const char *bus_name; | ||
202 | |||
203 | master->unique_len = u->unique_len; | ||
204 | master->unique_size = u->unique_len + 1; | ||
205 | master->unique = kmalloc(master->unique_size, GFP_KERNEL); | ||
206 | if (!master->unique) { | ||
207 | ret = -ENOMEM; | ||
208 | goto err; | ||
209 | } | ||
210 | |||
211 | if (copy_from_user(master->unique, u->unique, master->unique_len)) { | ||
212 | ret = -EFAULT; | ||
213 | goto err; | ||
214 | } | ||
215 | |||
216 | master->unique[master->unique_len] = '\0'; | ||
217 | |||
218 | bus_name = dev->driver->bus->get_name(dev); | ||
219 | dev->devname = kmalloc(strlen(bus_name) + | ||
220 | strlen(master->unique) + 2, GFP_KERNEL); | ||
221 | if (!dev->devname) { | ||
222 | ret = -ENOMEM; | ||
223 | goto err; | ||
224 | } | ||
225 | |||
226 | sprintf(dev->devname, "%s@%s", bus_name, | ||
227 | master->unique); | ||
228 | |||
229 | /* Return error if the busid submitted doesn't match the device's actual | ||
230 | * busid. | ||
231 | */ | ||
232 | ret = sscanf(master->unique, "PCI:%d:%d:%d", &bus, &slot, &func); | ||
233 | if (ret != 3) { | ||
234 | ret = -EINVAL; | ||
235 | goto err; | ||
236 | } | ||
237 | |||
238 | domain = bus >> 8; | ||
239 | bus &= 0xff; | ||
240 | |||
241 | if ((domain != drm_get_pci_domain(dev)) || | ||
242 | (bus != dev->pdev->bus->number) || | ||
243 | (slot != PCI_SLOT(dev->pdev->devfn)) || | ||
244 | (func != PCI_FUNC(dev->pdev->devfn))) { | ||
245 | ret = -EINVAL; | ||
246 | goto err; | ||
247 | } | ||
248 | return 0; | ||
249 | err: | ||
250 | return ret; | ||
251 | } | ||
252 | |||
253 | |||
254 | int drm_pci_irq_by_busid(struct drm_device *dev, struct drm_irq_busid *p) | ||
255 | { | ||
256 | if ((p->busnum >> 8) != drm_get_pci_domain(dev) || | ||
257 | (p->busnum & 0xff) != dev->pdev->bus->number || | ||
258 | p->devnum != PCI_SLOT(dev->pdev->devfn) || p->funcnum != PCI_FUNC(dev->pdev->devfn)) | ||
259 | return -EINVAL; | ||
260 | |||
261 | p->irq = dev->pdev->irq; | ||
262 | |||
263 | DRM_DEBUG("%d:%d:%d => IRQ %d\n", p->busnum, p->devnum, p->funcnum, | ||
264 | p->irq); | ||
265 | return 0; | ||
266 | } | ||
267 | |||
268 | int drm_pci_agp_init(struct drm_device *dev) | ||
269 | { | ||
270 | if (drm_core_has_AGP(dev)) { | ||
271 | if (drm_pci_device_is_agp(dev)) | ||
272 | dev->agp = drm_agp_init(dev); | ||
273 | if (drm_core_check_feature(dev, DRIVER_REQUIRE_AGP) | ||
274 | && (dev->agp == NULL)) { | ||
275 | DRM_ERROR("Cannot initialize the agpgart module.\n"); | ||
276 | return -EINVAL; | ||
277 | } | ||
278 | if (drm_core_has_MTRR(dev)) { | ||
279 | if (dev->agp) | ||
280 | dev->agp->agp_mtrr = | ||
281 | mtrr_add(dev->agp->agp_info.aper_base, | ||
282 | dev->agp->agp_info.aper_size * | ||
283 | 1024 * 1024, MTRR_TYPE_WRCOMB, 1); | ||
284 | } | ||
285 | } | ||
286 | return 0; | ||
287 | } | ||
288 | |||
289 | static struct drm_bus drm_pci_bus = { | ||
290 | .bus_type = DRIVER_BUS_PCI, | ||
291 | .get_irq = drm_pci_get_irq, | ||
292 | .get_name = drm_pci_get_name, | ||
293 | .set_busid = drm_pci_set_busid, | ||
294 | .set_unique = drm_pci_set_unique, | ||
295 | .agp_init = drm_pci_agp_init, | ||
296 | }; | ||
297 | |||
128 | /** | 298 | /** |
129 | * Register. | 299 | * Register. |
130 | * | 300 | * |
@@ -219,7 +389,7 @@ err_g1: | |||
219 | EXPORT_SYMBOL(drm_get_pci_dev); | 389 | EXPORT_SYMBOL(drm_get_pci_dev); |
220 | 390 | ||
221 | /** | 391 | /** |
222 | * PCI device initialization. Called via drm_init at module load time, | 392 | * PCI device initialization. Called direct from modules at load time. |
223 | * | 393 | * |
224 | * \return zero on success or a negative number on failure. | 394 | * \return zero on success or a negative number on failure. |
225 | * | 395 | * |
@@ -229,18 +399,24 @@ EXPORT_SYMBOL(drm_get_pci_dev); | |||
229 | * Expands the \c DRIVER_PREINIT and \c DRIVER_POST_INIT macros before and | 399 | * Expands the \c DRIVER_PREINIT and \c DRIVER_POST_INIT macros before and |
230 | * after the initialization for driver customization. | 400 | * after the initialization for driver customization. |
231 | */ | 401 | */ |
232 | int drm_pci_init(struct drm_driver *driver) | 402 | int drm_pci_init(struct drm_driver *driver, struct pci_driver *pdriver) |
233 | { | 403 | { |
234 | struct pci_dev *pdev = NULL; | 404 | struct pci_dev *pdev = NULL; |
235 | const struct pci_device_id *pid; | 405 | const struct pci_device_id *pid; |
236 | int i; | 406 | int i; |
237 | 407 | ||
408 | DRM_DEBUG("\n"); | ||
409 | |||
410 | INIT_LIST_HEAD(&driver->device_list); | ||
411 | driver->kdriver.pci = pdriver; | ||
412 | driver->bus = &drm_pci_bus; | ||
413 | |||
238 | if (driver->driver_features & DRIVER_MODESET) | 414 | if (driver->driver_features & DRIVER_MODESET) |
239 | return pci_register_driver(&driver->pci_driver); | 415 | return pci_register_driver(pdriver); |
240 | 416 | ||
241 | /* If not using KMS, fall back to stealth mode manual scanning. */ | 417 | /* If not using KMS, fall back to stealth mode manual scanning. */ |
242 | for (i = 0; driver->pci_driver.id_table[i].vendor != 0; i++) { | 418 | for (i = 0; pdriver->id_table[i].vendor != 0; i++) { |
243 | pid = &driver->pci_driver.id_table[i]; | 419 | pid = &pdriver->id_table[i]; |
244 | 420 | ||
245 | /* Loop around setting up a DRM device for each PCI device | 421 | /* Loop around setting up a DRM device for each PCI device |
246 | * matching our ID and device class. If we had the internal | 422 | * matching our ID and device class. If we had the internal |
@@ -265,10 +441,27 @@ int drm_pci_init(struct drm_driver *driver) | |||
265 | 441 | ||
266 | #else | 442 | #else |
267 | 443 | ||
268 | int drm_pci_init(struct drm_driver *driver) | 444 | int drm_pci_init(struct drm_driver *driver, struct pci_driver *pdriver) |
269 | { | 445 | { |
270 | return -1; | 446 | return -1; |
271 | } | 447 | } |
272 | 448 | ||
273 | #endif | 449 | #endif |
450 | |||
451 | EXPORT_SYMBOL(drm_pci_init); | ||
452 | |||
274 | /*@}*/ | 453 | /*@}*/ |
454 | void drm_pci_exit(struct drm_driver *driver, struct pci_driver *pdriver) | ||
455 | { | ||
456 | struct drm_device *dev, *tmp; | ||
457 | DRM_DEBUG("\n"); | ||
458 | |||
459 | if (driver->driver_features & DRIVER_MODESET) { | ||
460 | pci_unregister_driver(pdriver); | ||
461 | } else { | ||
462 | list_for_each_entry_safe(dev, tmp, &driver->device_list, driver_item) | ||
463 | drm_put_dev(dev); | ||
464 | } | ||
465 | DRM_INFO("Module unloaded\n"); | ||
466 | } | ||
467 | EXPORT_SYMBOL(drm_pci_exit); | ||
diff --git a/drivers/gpu/drm/drm_platform.c b/drivers/gpu/drm/drm_platform.c index 92d1d0fb7b75..7223f06d8e58 100644 --- a/drivers/gpu/drm/drm_platform.c +++ b/drivers/gpu/drm/drm_platform.c | |||
@@ -109,8 +109,60 @@ err_g1: | |||
109 | } | 109 | } |
110 | EXPORT_SYMBOL(drm_get_platform_dev); | 110 | EXPORT_SYMBOL(drm_get_platform_dev); |
111 | 111 | ||
112 | static int drm_platform_get_irq(struct drm_device *dev) | ||
113 | { | ||
114 | return platform_get_irq(dev->platformdev, 0); | ||
115 | } | ||
116 | |||
117 | static const char *drm_platform_get_name(struct drm_device *dev) | ||
118 | { | ||
119 | return dev->platformdev->name; | ||
120 | } | ||
121 | |||
122 | static int drm_platform_set_busid(struct drm_device *dev, struct drm_master *master) | ||
123 | { | ||
124 | int len, ret; | ||
125 | |||
126 | master->unique_len = 10 + strlen(dev->platformdev->name); | ||
127 | master->unique = kmalloc(master->unique_len + 1, GFP_KERNEL); | ||
128 | |||
129 | if (master->unique == NULL) | ||
130 | return -ENOMEM; | ||
131 | |||
132 | len = snprintf(master->unique, master->unique_len, | ||
133 | "platform:%s", dev->platformdev->name); | ||
134 | |||
135 | if (len > master->unique_len) { | ||
136 | DRM_ERROR("Unique buffer overflowed\n"); | ||
137 | ret = -EINVAL; | ||
138 | goto err; | ||
139 | } | ||
140 | |||
141 | dev->devname = | ||
142 | kmalloc(strlen(dev->platformdev->name) + | ||
143 | master->unique_len + 2, GFP_KERNEL); | ||
144 | |||
145 | if (dev->devname == NULL) { | ||
146 | ret = -ENOMEM; | ||
147 | goto err; | ||
148 | } | ||
149 | |||
150 | sprintf(dev->devname, "%s@%s", dev->platformdev->name, | ||
151 | master->unique); | ||
152 | return 0; | ||
153 | err: | ||
154 | return ret; | ||
155 | } | ||
156 | |||
157 | static struct drm_bus drm_platform_bus = { | ||
158 | .bus_type = DRIVER_BUS_PLATFORM, | ||
159 | .get_irq = drm_platform_get_irq, | ||
160 | .get_name = drm_platform_get_name, | ||
161 | .set_busid = drm_platform_set_busid, | ||
162 | }; | ||
163 | |||
112 | /** | 164 | /** |
113 | * Platform device initialization. Called via drm_init at module load time, | 165 | * Platform device initialization. Called direct from modules. |
114 | * | 166 | * |
115 | * \return zero on success or a negative number on failure. | 167 | * \return zero on success or a negative number on failure. |
116 | * | 168 | * |
@@ -121,7 +173,24 @@ EXPORT_SYMBOL(drm_get_platform_dev); | |||
121 | * after the initialization for driver customization. | 173 | * after the initialization for driver customization. |
122 | */ | 174 | */ |
123 | 175 | ||
124 | int drm_platform_init(struct drm_driver *driver) | 176 | int drm_platform_init(struct drm_driver *driver, struct platform_device *platform_device) |
125 | { | 177 | { |
126 | return drm_get_platform_dev(driver->platform_device, driver); | 178 | DRM_DEBUG("\n"); |
179 | |||
180 | driver->kdriver.platform_device = platform_device; | ||
181 | driver->bus = &drm_platform_bus; | ||
182 | INIT_LIST_HEAD(&driver->device_list); | ||
183 | return drm_get_platform_dev(platform_device, driver); | ||
184 | } | ||
185 | EXPORT_SYMBOL(drm_platform_init); | ||
186 | |||
187 | void drm_platform_exit(struct drm_driver *driver, struct platform_device *platform_device) | ||
188 | { | ||
189 | struct drm_device *dev, *tmp; | ||
190 | DRM_DEBUG("\n"); | ||
191 | |||
192 | list_for_each_entry_safe(dev, tmp, &driver->device_list, driver_item) | ||
193 | drm_put_dev(dev); | ||
194 | DRM_INFO("Module unloaded\n"); | ||
127 | } | 195 | } |
196 | EXPORT_SYMBOL(drm_platform_exit); | ||
diff --git a/drivers/gpu/drm/drm_stub.c b/drivers/gpu/drm/drm_stub.c index d59edc18301f..001273d57f2d 100644 --- a/drivers/gpu/drm/drm_stub.c +++ b/drivers/gpu/drm/drm_stub.c | |||
@@ -269,25 +269,14 @@ int drm_fill_in_dev(struct drm_device *dev, | |||
269 | 269 | ||
270 | dev->driver = driver; | 270 | dev->driver = driver; |
271 | 271 | ||
272 | if (drm_core_has_AGP(dev)) { | 272 | if (dev->driver->bus->agp_init) { |
273 | if (drm_device_is_agp(dev)) | 273 | retcode = dev->driver->bus->agp_init(dev); |
274 | dev->agp = drm_agp_init(dev); | 274 | if (retcode) |
275 | if (drm_core_check_feature(dev, DRIVER_REQUIRE_AGP) | ||
276 | && (dev->agp == NULL)) { | ||
277 | DRM_ERROR("Cannot initialize the agpgart module.\n"); | ||
278 | retcode = -EINVAL; | ||
279 | goto error_out_unreg; | 275 | goto error_out_unreg; |
280 | } | ||
281 | if (drm_core_has_MTRR(dev)) { | ||
282 | if (dev->agp) | ||
283 | dev->agp->agp_mtrr = | ||
284 | mtrr_add(dev->agp->agp_info.aper_base, | ||
285 | dev->agp->agp_info.aper_size * | ||
286 | 1024 * 1024, MTRR_TYPE_WRCOMB, 1); | ||
287 | } | ||
288 | } | 276 | } |
289 | 277 | ||
290 | 278 | ||
279 | |||
291 | retcode = drm_ctxbitmap_init(dev); | 280 | retcode = drm_ctxbitmap_init(dev); |
292 | if (retcode) { | 281 | if (retcode) { |
293 | DRM_ERROR("Cannot allocate memory for context bitmap.\n"); | 282 | DRM_ERROR("Cannot allocate memory for context bitmap.\n"); |
@@ -425,7 +414,6 @@ int drm_put_minor(struct drm_minor **minor_p) | |||
425 | * | 414 | * |
426 | * Cleans up all DRM device, calling drm_lastclose(). | 415 | * Cleans up all DRM device, calling drm_lastclose(). |
427 | * | 416 | * |
428 | * \sa drm_init | ||
429 | */ | 417 | */ |
430 | void drm_put_dev(struct drm_device *dev) | 418 | void drm_put_dev(struct drm_device *dev) |
431 | { | 419 | { |
@@ -475,6 +463,7 @@ void drm_put_dev(struct drm_device *dev) | |||
475 | 463 | ||
476 | drm_put_minor(&dev->primary); | 464 | drm_put_minor(&dev->primary); |
477 | 465 | ||
466 | list_del(&dev->driver_item); | ||
478 | if (dev->devname) { | 467 | if (dev->devname) { |
479 | kfree(dev->devname); | 468 | kfree(dev->devname); |
480 | dev->devname = NULL; | 469 | dev->devname = NULL; |
diff --git a/drivers/gpu/drm/drm_usb.c b/drivers/gpu/drm/drm_usb.c new file mode 100644 index 000000000000..206d2300d873 --- /dev/null +++ b/drivers/gpu/drm/drm_usb.c | |||
@@ -0,0 +1,117 @@ | |||
1 | #include "drmP.h" | ||
2 | #include <linux/usb.h> | ||
3 | |||
4 | #ifdef CONFIG_USB | ||
5 | int drm_get_usb_dev(struct usb_interface *interface, | ||
6 | const struct usb_device_id *id, | ||
7 | struct drm_driver *driver) | ||
8 | { | ||
9 | struct drm_device *dev; | ||
10 | struct usb_device *usbdev; | ||
11 | int ret; | ||
12 | |||
13 | DRM_DEBUG("\n"); | ||
14 | |||
15 | dev = kzalloc(sizeof(*dev), GFP_KERNEL); | ||
16 | if (!dev) | ||
17 | return -ENOMEM; | ||
18 | |||
19 | usbdev = interface_to_usbdev(interface); | ||
20 | dev->usbdev = usbdev; | ||
21 | dev->dev = &usbdev->dev; | ||
22 | |||
23 | mutex_lock(&drm_global_mutex); | ||
24 | |||
25 | ret = drm_fill_in_dev(dev, NULL, driver); | ||
26 | if (ret) { | ||
27 | printk(KERN_ERR "DRM: Fill_in_dev failed.\n"); | ||
28 | goto err_g1; | ||
29 | } | ||
30 | |||
31 | usb_set_intfdata(interface, dev); | ||
32 | ret = drm_get_minor(dev, &dev->control, DRM_MINOR_CONTROL); | ||
33 | if (ret) | ||
34 | goto err_g1; | ||
35 | |||
36 | ret = drm_get_minor(dev, &dev->primary, DRM_MINOR_LEGACY); | ||
37 | if (ret) | ||
38 | goto err_g2; | ||
39 | |||
40 | if (dev->driver->load) { | ||
41 | ret = dev->driver->load(dev, 0); | ||
42 | if (ret) | ||
43 | goto err_g3; | ||
44 | } | ||
45 | |||
46 | /* setup the grouping for the legacy output */ | ||
47 | ret = drm_mode_group_init_legacy_group(dev, | ||
48 | &dev->primary->mode_group); | ||
49 | if (ret) | ||
50 | goto err_g3; | ||
51 | |||
52 | list_add_tail(&dev->driver_item, &driver->device_list); | ||
53 | |||
54 | mutex_unlock(&drm_global_mutex); | ||
55 | |||
56 | DRM_INFO("Initialized %s %d.%d.%d %s on minor %d\n", | ||
57 | driver->name, driver->major, driver->minor, driver->patchlevel, | ||
58 | driver->date, dev->primary->index); | ||
59 | |||
60 | return 0; | ||
61 | |||
62 | err_g3: | ||
63 | drm_put_minor(&dev->primary); | ||
64 | err_g2: | ||
65 | drm_put_minor(&dev->control); | ||
66 | err_g1: | ||
67 | kfree(dev); | ||
68 | mutex_unlock(&drm_global_mutex); | ||
69 | return ret; | ||
70 | |||
71 | } | ||
72 | EXPORT_SYMBOL(drm_get_usb_dev); | ||
73 | |||
74 | static int drm_usb_get_irq(struct drm_device *dev) | ||
75 | { | ||
76 | return 0; | ||
77 | } | ||
78 | |||
79 | static const char *drm_usb_get_name(struct drm_device *dev) | ||
80 | { | ||
81 | return "USB"; | ||
82 | } | ||
83 | |||
84 | static int drm_usb_set_busid(struct drm_device *dev, | ||
85 | struct drm_master *master) | ||
86 | { | ||
87 | return 0; | ||
88 | } | ||
89 | |||
90 | static struct drm_bus drm_usb_bus = { | ||
91 | .bus_type = DRIVER_BUS_USB, | ||
92 | .get_irq = drm_usb_get_irq, | ||
93 | .get_name = drm_usb_get_name, | ||
94 | .set_busid = drm_usb_set_busid, | ||
95 | }; | ||
96 | |||
97 | int drm_usb_init(struct drm_driver *driver, struct usb_driver *udriver) | ||
98 | { | ||
99 | int res; | ||
100 | DRM_DEBUG("\n"); | ||
101 | |||
102 | INIT_LIST_HEAD(&driver->device_list); | ||
103 | driver->kdriver.usb = udriver; | ||
104 | driver->bus = &drm_usb_bus; | ||
105 | |||
106 | res = usb_register(udriver); | ||
107 | return res; | ||
108 | } | ||
109 | EXPORT_SYMBOL(drm_usb_init); | ||
110 | |||
111 | void drm_usb_exit(struct drm_driver *driver, | ||
112 | struct usb_driver *udriver) | ||
113 | { | ||
114 | usb_deregister(udriver); | ||
115 | } | ||
116 | EXPORT_SYMBOL(drm_usb_exit); | ||
117 | #endif | ||
diff --git a/drivers/gpu/drm/i810/i810_dma.c b/drivers/gpu/drm/i810/i810_dma.c index ff33e53bbbf8..8f371e8d630f 100644 --- a/drivers/gpu/drm/i810/i810_dma.c +++ b/drivers/gpu/drm/i810/i810_dma.c | |||
@@ -37,7 +37,6 @@ | |||
37 | #include <linux/interrupt.h> /* For task queue support */ | 37 | #include <linux/interrupt.h> /* For task queue support */ |
38 | #include <linux/delay.h> | 38 | #include <linux/delay.h> |
39 | #include <linux/slab.h> | 39 | #include <linux/slab.h> |
40 | #include <linux/smp_lock.h> | ||
41 | #include <linux/pagemap.h> | 40 | #include <linux/pagemap.h> |
42 | 41 | ||
43 | #define I810_BUF_FREE 2 | 42 | #define I810_BUF_FREE 2 |
@@ -94,7 +93,6 @@ static int i810_mmap_buffers(struct file *filp, struct vm_area_struct *vma) | |||
94 | struct drm_buf *buf; | 93 | struct drm_buf *buf; |
95 | drm_i810_buf_priv_t *buf_priv; | 94 | drm_i810_buf_priv_t *buf_priv; |
96 | 95 | ||
97 | lock_kernel(); | ||
98 | dev = priv->minor->dev; | 96 | dev = priv->minor->dev; |
99 | dev_priv = dev->dev_private; | 97 | dev_priv = dev->dev_private; |
100 | buf = dev_priv->mmap_buffer; | 98 | buf = dev_priv->mmap_buffer; |
@@ -104,7 +102,6 @@ static int i810_mmap_buffers(struct file *filp, struct vm_area_struct *vma) | |||
104 | vma->vm_file = filp; | 102 | vma->vm_file = filp; |
105 | 103 | ||
106 | buf_priv->currently_mapped = I810_BUF_MAPPED; | 104 | buf_priv->currently_mapped = I810_BUF_MAPPED; |
107 | unlock_kernel(); | ||
108 | 105 | ||
109 | if (io_remap_pfn_range(vma, vma->vm_start, | 106 | if (io_remap_pfn_range(vma, vma->vm_start, |
110 | vma->vm_pgoff, | 107 | vma->vm_pgoff, |
@@ -116,7 +113,7 @@ static int i810_mmap_buffers(struct file *filp, struct vm_area_struct *vma) | |||
116 | static const struct file_operations i810_buffer_fops = { | 113 | static const struct file_operations i810_buffer_fops = { |
117 | .open = drm_open, | 114 | .open = drm_open, |
118 | .release = drm_release, | 115 | .release = drm_release, |
119 | .unlocked_ioctl = i810_ioctl, | 116 | .unlocked_ioctl = drm_ioctl, |
120 | .mmap = i810_mmap_buffers, | 117 | .mmap = i810_mmap_buffers, |
121 | .fasync = drm_fasync, | 118 | .fasync = drm_fasync, |
122 | .llseek = noop_llseek, | 119 | .llseek = noop_llseek, |
@@ -1242,19 +1239,6 @@ int i810_driver_dma_quiescent(struct drm_device *dev) | |||
1242 | return 0; | 1239 | return 0; |
1243 | } | 1240 | } |
1244 | 1241 | ||
1245 | /* | ||
1246 | * call the drm_ioctl under the big kernel lock because | ||
1247 | * to lock against the i810_mmap_buffers function. | ||
1248 | */ | ||
1249 | long i810_ioctl(struct file *file, unsigned int cmd, unsigned long arg) | ||
1250 | { | ||
1251 | int ret; | ||
1252 | lock_kernel(); | ||
1253 | ret = drm_ioctl(file, cmd, arg); | ||
1254 | unlock_kernel(); | ||
1255 | return ret; | ||
1256 | } | ||
1257 | |||
1258 | struct drm_ioctl_desc i810_ioctls[] = { | 1242 | struct drm_ioctl_desc i810_ioctls[] = { |
1259 | DRM_IOCTL_DEF_DRV(I810_INIT, i810_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED), | 1243 | DRM_IOCTL_DEF_DRV(I810_INIT, i810_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED), |
1260 | DRM_IOCTL_DEF_DRV(I810_VERTEX, i810_dma_vertex, DRM_AUTH|DRM_UNLOCKED), | 1244 | DRM_IOCTL_DEF_DRV(I810_VERTEX, i810_dma_vertex, DRM_AUTH|DRM_UNLOCKED), |
diff --git a/drivers/gpu/drm/i810/i810_drv.c b/drivers/gpu/drm/i810/i810_drv.c index 88bcd331e7c5..6f98d059f68a 100644 --- a/drivers/gpu/drm/i810/i810_drv.c +++ b/drivers/gpu/drm/i810/i810_drv.c | |||
@@ -57,18 +57,13 @@ static struct drm_driver driver = { | |||
57 | .owner = THIS_MODULE, | 57 | .owner = THIS_MODULE, |
58 | .open = drm_open, | 58 | .open = drm_open, |
59 | .release = drm_release, | 59 | .release = drm_release, |
60 | .unlocked_ioctl = i810_ioctl, | 60 | .unlocked_ioctl = drm_ioctl, |
61 | .mmap = drm_mmap, | 61 | .mmap = drm_mmap, |
62 | .poll = drm_poll, | 62 | .poll = drm_poll, |
63 | .fasync = drm_fasync, | 63 | .fasync = drm_fasync, |
64 | .llseek = noop_llseek, | 64 | .llseek = noop_llseek, |
65 | }, | 65 | }, |
66 | 66 | ||
67 | .pci_driver = { | ||
68 | .name = DRIVER_NAME, | ||
69 | .id_table = pciidlist, | ||
70 | }, | ||
71 | |||
72 | .name = DRIVER_NAME, | 67 | .name = DRIVER_NAME, |
73 | .desc = DRIVER_DESC, | 68 | .desc = DRIVER_DESC, |
74 | .date = DRIVER_DATE, | 69 | .date = DRIVER_DATE, |
@@ -77,15 +72,24 @@ static struct drm_driver driver = { | |||
77 | .patchlevel = DRIVER_PATCHLEVEL, | 72 | .patchlevel = DRIVER_PATCHLEVEL, |
78 | }; | 73 | }; |
79 | 74 | ||
75 | static struct pci_driver i810_pci_driver = { | ||
76 | .name = DRIVER_NAME, | ||
77 | .id_table = pciidlist, | ||
78 | }; | ||
79 | |||
80 | static int __init i810_init(void) | 80 | static int __init i810_init(void) |
81 | { | 81 | { |
82 | if (num_possible_cpus() > 1) { | ||
83 | pr_err("drm/i810 does not support SMP\n"); | ||
84 | return -EINVAL; | ||
85 | } | ||
82 | driver.num_ioctls = i810_max_ioctl; | 86 | driver.num_ioctls = i810_max_ioctl; |
83 | return drm_init(&driver); | 87 | return drm_pci_init(&driver, &i810_pci_driver); |
84 | } | 88 | } |
85 | 89 | ||
86 | static void __exit i810_exit(void) | 90 | static void __exit i810_exit(void) |
87 | { | 91 | { |
88 | drm_exit(&driver); | 92 | drm_pci_exit(&driver, &i810_pci_driver); |
89 | } | 93 | } |
90 | 94 | ||
91 | module_init(i810_init); | 95 | module_init(i810_init); |
diff --git a/drivers/gpu/drm/i830/Makefile b/drivers/gpu/drm/i830/Makefile deleted file mode 100644 index c642ee0b238c..000000000000 --- a/drivers/gpu/drm/i830/Makefile +++ /dev/null | |||
@@ -1,8 +0,0 @@ | |||
1 | # | ||
2 | # Makefile for the drm device driver. This driver provides support for the | ||
3 | # Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher. | ||
4 | |||
5 | ccflags-y := -Iinclude/drm | ||
6 | i830-y := i830_drv.o i830_dma.o i830_irq.o | ||
7 | |||
8 | obj-$(CONFIG_DRM_I830) += i830.o | ||
diff --git a/drivers/gpu/drm/i830/i830_dma.c b/drivers/gpu/drm/i830/i830_dma.c deleted file mode 100644 index ca6f31ff0eec..000000000000 --- a/drivers/gpu/drm/i830/i830_dma.c +++ /dev/null | |||
@@ -1,1560 +0,0 @@ | |||
1 | /* i830_dma.c -- DMA support for the I830 -*- linux-c -*- | ||
2 | * Created: Mon Dec 13 01:50:01 1999 by jhartmann@precisioninsight.com | ||
3 | * | ||
4 | * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas. | ||
5 | * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. | ||
6 | * All Rights Reserved. | ||
7 | * | ||
8 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
9 | * copy of this software and associated documentation files (the "Software"), | ||
10 | * to deal in the Software without restriction, including without limitation | ||
11 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
12 | * and/or sell copies of the Software, and to permit persons to whom the | ||
13 | * Software is furnished to do so, subject to the following conditions: | ||
14 | * | ||
15 | * The above copyright notice and this permission notice (including the next | ||
16 | * paragraph) shall be included in all copies or substantial portions of the | ||
17 | * Software. | ||
18 | * | ||
19 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
20 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
21 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
22 | * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
23 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
24 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER | ||
25 | * DEALINGS IN THE SOFTWARE. | ||
26 | * | ||
27 | * Authors: Rickard E. (Rik) Faith <faith@valinux.com> | ||
28 | * Jeff Hartmann <jhartmann@valinux.com> | ||
29 | * Keith Whitwell <keith@tungstengraphics.com> | ||
30 | * Abraham vd Merwe <abraham@2d3d.co.za> | ||
31 | * | ||
32 | */ | ||
33 | |||
34 | #include "drmP.h" | ||
35 | #include "drm.h" | ||
36 | #include "i830_drm.h" | ||
37 | #include "i830_drv.h" | ||
38 | #include <linux/interrupt.h> /* For task queue support */ | ||
39 | #include <linux/smp_lock.h> | ||
40 | #include <linux/pagemap.h> | ||
41 | #include <linux/delay.h> | ||
42 | #include <linux/slab.h> | ||
43 | #include <asm/uaccess.h> | ||
44 | |||
45 | #define I830_BUF_FREE 2 | ||
46 | #define I830_BUF_CLIENT 1 | ||
47 | #define I830_BUF_HARDWARE 0 | ||
48 | |||
49 | #define I830_BUF_UNMAPPED 0 | ||
50 | #define I830_BUF_MAPPED 1 | ||
51 | |||
52 | static struct drm_buf *i830_freelist_get(struct drm_device * dev) | ||
53 | { | ||
54 | struct drm_device_dma *dma = dev->dma; | ||
55 | int i; | ||
56 | int used; | ||
57 | |||
58 | /* Linear search might not be the best solution */ | ||
59 | |||
60 | for (i = 0; i < dma->buf_count; i++) { | ||
61 | struct drm_buf *buf = dma->buflist[i]; | ||
62 | drm_i830_buf_priv_t *buf_priv = buf->dev_private; | ||
63 | /* In use is already a pointer */ | ||
64 | used = cmpxchg(buf_priv->in_use, I830_BUF_FREE, | ||
65 | I830_BUF_CLIENT); | ||
66 | if (used == I830_BUF_FREE) | ||
67 | return buf; | ||
68 | } | ||
69 | return NULL; | ||
70 | } | ||
71 | |||
72 | /* This should only be called if the buffer is not sent to the hardware | ||
73 | * yet, the hardware updates in use for us once its on the ring buffer. | ||
74 | */ | ||
75 | |||
76 | static int i830_freelist_put(struct drm_device *dev, struct drm_buf *buf) | ||
77 | { | ||
78 | drm_i830_buf_priv_t *buf_priv = buf->dev_private; | ||
79 | int used; | ||
80 | |||
81 | /* In use is already a pointer */ | ||
82 | used = cmpxchg(buf_priv->in_use, I830_BUF_CLIENT, I830_BUF_FREE); | ||
83 | if (used != I830_BUF_CLIENT) { | ||
84 | DRM_ERROR("Freeing buffer thats not in use : %d\n", buf->idx); | ||
85 | return -EINVAL; | ||
86 | } | ||
87 | |||
88 | return 0; | ||
89 | } | ||
90 | |||
91 | static int i830_mmap_buffers(struct file *filp, struct vm_area_struct *vma) | ||
92 | { | ||
93 | struct drm_file *priv = filp->private_data; | ||
94 | struct drm_device *dev; | ||
95 | drm_i830_private_t *dev_priv; | ||
96 | struct drm_buf *buf; | ||
97 | drm_i830_buf_priv_t *buf_priv; | ||
98 | |||
99 | lock_kernel(); | ||
100 | dev = priv->minor->dev; | ||
101 | dev_priv = dev->dev_private; | ||
102 | buf = dev_priv->mmap_buffer; | ||
103 | buf_priv = buf->dev_private; | ||
104 | |||
105 | vma->vm_flags |= (VM_IO | VM_DONTCOPY); | ||
106 | vma->vm_file = filp; | ||
107 | |||
108 | buf_priv->currently_mapped = I830_BUF_MAPPED; | ||
109 | unlock_kernel(); | ||
110 | |||
111 | if (io_remap_pfn_range(vma, vma->vm_start, | ||
112 | vma->vm_pgoff, | ||
113 | vma->vm_end - vma->vm_start, vma->vm_page_prot)) | ||
114 | return -EAGAIN; | ||
115 | return 0; | ||
116 | } | ||
117 | |||
118 | static const struct file_operations i830_buffer_fops = { | ||
119 | .open = drm_open, | ||
120 | .release = drm_release, | ||
121 | .unlocked_ioctl = i830_ioctl, | ||
122 | .mmap = i830_mmap_buffers, | ||
123 | .fasync = drm_fasync, | ||
124 | .llseek = noop_llseek, | ||
125 | }; | ||
126 | |||
127 | static int i830_map_buffer(struct drm_buf *buf, struct drm_file *file_priv) | ||
128 | { | ||
129 | struct drm_device *dev = file_priv->minor->dev; | ||
130 | drm_i830_buf_priv_t *buf_priv = buf->dev_private; | ||
131 | drm_i830_private_t *dev_priv = dev->dev_private; | ||
132 | const struct file_operations *old_fops; | ||
133 | unsigned long virtual; | ||
134 | int retcode = 0; | ||
135 | |||
136 | if (buf_priv->currently_mapped == I830_BUF_MAPPED) | ||
137 | return -EINVAL; | ||
138 | |||
139 | down_write(¤t->mm->mmap_sem); | ||
140 | old_fops = file_priv->filp->f_op; | ||
141 | file_priv->filp->f_op = &i830_buffer_fops; | ||
142 | dev_priv->mmap_buffer = buf; | ||
143 | virtual = do_mmap(file_priv->filp, 0, buf->total, PROT_READ | PROT_WRITE, | ||
144 | MAP_SHARED, buf->bus_address); | ||
145 | dev_priv->mmap_buffer = NULL; | ||
146 | file_priv->filp->f_op = old_fops; | ||
147 | if (IS_ERR((void *)virtual)) { /* ugh */ | ||
148 | /* Real error */ | ||
149 | DRM_ERROR("mmap error\n"); | ||
150 | retcode = PTR_ERR((void *)virtual); | ||
151 | buf_priv->virtual = NULL; | ||
152 | } else { | ||
153 | buf_priv->virtual = (void __user *)virtual; | ||
154 | } | ||
155 | up_write(¤t->mm->mmap_sem); | ||
156 | |||
157 | return retcode; | ||
158 | } | ||
159 | |||
160 | static int i830_unmap_buffer(struct drm_buf *buf) | ||
161 | { | ||
162 | drm_i830_buf_priv_t *buf_priv = buf->dev_private; | ||
163 | int retcode = 0; | ||
164 | |||
165 | if (buf_priv->currently_mapped != I830_BUF_MAPPED) | ||
166 | return -EINVAL; | ||
167 | |||
168 | down_write(¤t->mm->mmap_sem); | ||
169 | retcode = do_munmap(current->mm, | ||
170 | (unsigned long)buf_priv->virtual, | ||
171 | (size_t) buf->total); | ||
172 | up_write(¤t->mm->mmap_sem); | ||
173 | |||
174 | buf_priv->currently_mapped = I830_BUF_UNMAPPED; | ||
175 | buf_priv->virtual = NULL; | ||
176 | |||
177 | return retcode; | ||
178 | } | ||
179 | |||
180 | static int i830_dma_get_buffer(struct drm_device *dev, drm_i830_dma_t *d, | ||
181 | struct drm_file *file_priv) | ||
182 | { | ||
183 | struct drm_buf *buf; | ||
184 | drm_i830_buf_priv_t *buf_priv; | ||
185 | int retcode = 0; | ||
186 | |||
187 | buf = i830_freelist_get(dev); | ||
188 | if (!buf) { | ||
189 | retcode = -ENOMEM; | ||
190 | DRM_DEBUG("retcode=%d\n", retcode); | ||
191 | return retcode; | ||
192 | } | ||
193 | |||
194 | retcode = i830_map_buffer(buf, file_priv); | ||
195 | if (retcode) { | ||
196 | i830_freelist_put(dev, buf); | ||
197 | DRM_ERROR("mapbuf failed, retcode %d\n", retcode); | ||
198 | return retcode; | ||
199 | } | ||
200 | buf->file_priv = file_priv; | ||
201 | buf_priv = buf->dev_private; | ||
202 | d->granted = 1; | ||
203 | d->request_idx = buf->idx; | ||
204 | d->request_size = buf->total; | ||
205 | d->virtual = buf_priv->virtual; | ||
206 | |||
207 | return retcode; | ||
208 | } | ||
209 | |||
210 | static int i830_dma_cleanup(struct drm_device *dev) | ||
211 | { | ||
212 | struct drm_device_dma *dma = dev->dma; | ||
213 | |||
214 | /* Make sure interrupts are disabled here because the uninstall ioctl | ||
215 | * may not have been called from userspace and after dev_private | ||
216 | * is freed, it's too late. | ||
217 | */ | ||
218 | if (dev->irq_enabled) | ||
219 | drm_irq_uninstall(dev); | ||
220 | |||
221 | if (dev->dev_private) { | ||
222 | int i; | ||
223 | drm_i830_private_t *dev_priv = | ||
224 | (drm_i830_private_t *) dev->dev_private; | ||
225 | |||
226 | if (dev_priv->ring.virtual_start) | ||
227 | drm_core_ioremapfree(&dev_priv->ring.map, dev); | ||
228 | if (dev_priv->hw_status_page) { | ||
229 | pci_free_consistent(dev->pdev, PAGE_SIZE, | ||
230 | dev_priv->hw_status_page, | ||
231 | dev_priv->dma_status_page); | ||
232 | /* Need to rewrite hardware status page */ | ||
233 | I830_WRITE(0x02080, 0x1ffff000); | ||
234 | } | ||
235 | |||
236 | kfree(dev->dev_private); | ||
237 | dev->dev_private = NULL; | ||
238 | |||
239 | for (i = 0; i < dma->buf_count; i++) { | ||
240 | struct drm_buf *buf = dma->buflist[i]; | ||
241 | drm_i830_buf_priv_t *buf_priv = buf->dev_private; | ||
242 | if (buf_priv->kernel_virtual && buf->total) | ||
243 | drm_core_ioremapfree(&buf_priv->map, dev); | ||
244 | } | ||
245 | } | ||
246 | return 0; | ||
247 | } | ||
248 | |||
249 | int i830_wait_ring(struct drm_device *dev, int n, const char *caller) | ||
250 | { | ||
251 | drm_i830_private_t *dev_priv = dev->dev_private; | ||
252 | drm_i830_ring_buffer_t *ring = &(dev_priv->ring); | ||
253 | int iters = 0; | ||
254 | unsigned long end; | ||
255 | unsigned int last_head = I830_READ(LP_RING + RING_HEAD) & HEAD_ADDR; | ||
256 | |||
257 | end = jiffies + (HZ * 3); | ||
258 | while (ring->space < n) { | ||
259 | ring->head = I830_READ(LP_RING + RING_HEAD) & HEAD_ADDR; | ||
260 | ring->space = ring->head - (ring->tail + 8); | ||
261 | if (ring->space < 0) | ||
262 | ring->space += ring->Size; | ||
263 | |||
264 | if (ring->head != last_head) { | ||
265 | end = jiffies + (HZ * 3); | ||
266 | last_head = ring->head; | ||
267 | } | ||
268 | |||
269 | iters++; | ||
270 | if (time_before(end, jiffies)) { | ||
271 | DRM_ERROR("space: %d wanted %d\n", ring->space, n); | ||
272 | DRM_ERROR("lockup\n"); | ||
273 | goto out_wait_ring; | ||
274 | } | ||
275 | udelay(1); | ||
276 | dev_priv->sarea_priv->perf_boxes |= I830_BOX_WAIT; | ||
277 | } | ||
278 | |||
279 | out_wait_ring: | ||
280 | return iters; | ||
281 | } | ||
282 | |||
283 | static void i830_kernel_lost_context(struct drm_device *dev) | ||
284 | { | ||
285 | drm_i830_private_t *dev_priv = dev->dev_private; | ||
286 | drm_i830_ring_buffer_t *ring = &(dev_priv->ring); | ||
287 | |||
288 | ring->head = I830_READ(LP_RING + RING_HEAD) & HEAD_ADDR; | ||
289 | ring->tail = I830_READ(LP_RING + RING_TAIL) & TAIL_ADDR; | ||
290 | ring->space = ring->head - (ring->tail + 8); | ||
291 | if (ring->space < 0) | ||
292 | ring->space += ring->Size; | ||
293 | |||
294 | if (ring->head == ring->tail) | ||
295 | dev_priv->sarea_priv->perf_boxes |= I830_BOX_RING_EMPTY; | ||
296 | } | ||
297 | |||
298 | static int i830_freelist_init(struct drm_device *dev, drm_i830_private_t *dev_priv) | ||
299 | { | ||
300 | struct drm_device_dma *dma = dev->dma; | ||
301 | int my_idx = 36; | ||
302 | u32 *hw_status = (u32 *) (dev_priv->hw_status_page + my_idx); | ||
303 | int i; | ||
304 | |||
305 | if (dma->buf_count > 1019) { | ||
306 | /* Not enough space in the status page for the freelist */ | ||
307 | return -EINVAL; | ||
308 | } | ||
309 | |||
310 | for (i = 0; i < dma->buf_count; i++) { | ||
311 | struct drm_buf *buf = dma->buflist[i]; | ||
312 | drm_i830_buf_priv_t *buf_priv = buf->dev_private; | ||
313 | |||
314 | buf_priv->in_use = hw_status++; | ||
315 | buf_priv->my_use_idx = my_idx; | ||
316 | my_idx += 4; | ||
317 | |||
318 | *buf_priv->in_use = I830_BUF_FREE; | ||
319 | |||
320 | buf_priv->map.offset = buf->bus_address; | ||
321 | buf_priv->map.size = buf->total; | ||
322 | buf_priv->map.type = _DRM_AGP; | ||
323 | buf_priv->map.flags = 0; | ||
324 | buf_priv->map.mtrr = 0; | ||
325 | |||
326 | drm_core_ioremap(&buf_priv->map, dev); | ||
327 | buf_priv->kernel_virtual = buf_priv->map.handle; | ||
328 | } | ||
329 | return 0; | ||
330 | } | ||
331 | |||
332 | static int i830_dma_initialize(struct drm_device *dev, | ||
333 | drm_i830_private_t *dev_priv, | ||
334 | drm_i830_init_t *init) | ||
335 | { | ||
336 | struct drm_map_list *r_list; | ||
337 | |||
338 | memset(dev_priv, 0, sizeof(drm_i830_private_t)); | ||
339 | |||
340 | list_for_each_entry(r_list, &dev->maplist, head) { | ||
341 | if (r_list->map && | ||
342 | r_list->map->type == _DRM_SHM && | ||
343 | r_list->map->flags & _DRM_CONTAINS_LOCK) { | ||
344 | dev_priv->sarea_map = r_list->map; | ||
345 | break; | ||
346 | } | ||
347 | } | ||
348 | |||
349 | if (!dev_priv->sarea_map) { | ||
350 | dev->dev_private = (void *)dev_priv; | ||
351 | i830_dma_cleanup(dev); | ||
352 | DRM_ERROR("can not find sarea!\n"); | ||
353 | return -EINVAL; | ||
354 | } | ||
355 | dev_priv->mmio_map = drm_core_findmap(dev, init->mmio_offset); | ||
356 | if (!dev_priv->mmio_map) { | ||
357 | dev->dev_private = (void *)dev_priv; | ||
358 | i830_dma_cleanup(dev); | ||
359 | DRM_ERROR("can not find mmio map!\n"); | ||
360 | return -EINVAL; | ||
361 | } | ||
362 | dev->agp_buffer_token = init->buffers_offset; | ||
363 | dev->agp_buffer_map = drm_core_findmap(dev, init->buffers_offset); | ||
364 | if (!dev->agp_buffer_map) { | ||
365 | dev->dev_private = (void *)dev_priv; | ||
366 | i830_dma_cleanup(dev); | ||
367 | DRM_ERROR("can not find dma buffer map!\n"); | ||
368 | return -EINVAL; | ||
369 | } | ||
370 | |||
371 | dev_priv->sarea_priv = (drm_i830_sarea_t *) | ||
372 | ((u8 *) dev_priv->sarea_map->handle + init->sarea_priv_offset); | ||
373 | |||
374 | dev_priv->ring.Start = init->ring_start; | ||
375 | dev_priv->ring.End = init->ring_end; | ||
376 | dev_priv->ring.Size = init->ring_size; | ||
377 | |||
378 | dev_priv->ring.map.offset = dev->agp->base + init->ring_start; | ||
379 | dev_priv->ring.map.size = init->ring_size; | ||
380 | dev_priv->ring.map.type = _DRM_AGP; | ||
381 | dev_priv->ring.map.flags = 0; | ||
382 | dev_priv->ring.map.mtrr = 0; | ||
383 | |||
384 | drm_core_ioremap(&dev_priv->ring.map, dev); | ||
385 | |||
386 | if (dev_priv->ring.map.handle == NULL) { | ||
387 | dev->dev_private = (void *)dev_priv; | ||
388 | i830_dma_cleanup(dev); | ||
389 | DRM_ERROR("can not ioremap virtual address for" | ||
390 | " ring buffer\n"); | ||
391 | return -ENOMEM; | ||
392 | } | ||
393 | |||
394 | dev_priv->ring.virtual_start = dev_priv->ring.map.handle; | ||
395 | |||
396 | dev_priv->ring.tail_mask = dev_priv->ring.Size - 1; | ||
397 | |||
398 | dev_priv->w = init->w; | ||
399 | dev_priv->h = init->h; | ||
400 | dev_priv->pitch = init->pitch; | ||
401 | dev_priv->back_offset = init->back_offset; | ||
402 | dev_priv->depth_offset = init->depth_offset; | ||
403 | dev_priv->front_offset = init->front_offset; | ||
404 | |||
405 | dev_priv->front_di1 = init->front_offset | init->pitch_bits; | ||
406 | dev_priv->back_di1 = init->back_offset | init->pitch_bits; | ||
407 | dev_priv->zi1 = init->depth_offset | init->pitch_bits; | ||
408 | |||
409 | DRM_DEBUG("front_di1 %x\n", dev_priv->front_di1); | ||
410 | DRM_DEBUG("back_offset %x\n", dev_priv->back_offset); | ||
411 | DRM_DEBUG("back_di1 %x\n", dev_priv->back_di1); | ||
412 | DRM_DEBUG("pitch_bits %x\n", init->pitch_bits); | ||
413 | |||
414 | dev_priv->cpp = init->cpp; | ||
415 | /* We are using separate values as placeholders for mechanisms for | ||
416 | * private backbuffer/depthbuffer usage. | ||
417 | */ | ||
418 | |||
419 | dev_priv->back_pitch = init->back_pitch; | ||
420 | dev_priv->depth_pitch = init->depth_pitch; | ||
421 | dev_priv->do_boxes = 0; | ||
422 | dev_priv->use_mi_batchbuffer_start = 0; | ||
423 | |||
424 | /* Program Hardware Status Page */ | ||
425 | dev_priv->hw_status_page = | ||
426 | pci_alloc_consistent(dev->pdev, PAGE_SIZE, | ||
427 | &dev_priv->dma_status_page); | ||
428 | if (!dev_priv->hw_status_page) { | ||
429 | dev->dev_private = (void *)dev_priv; | ||
430 | i830_dma_cleanup(dev); | ||
431 | DRM_ERROR("Can not allocate hardware status page\n"); | ||
432 | return -ENOMEM; | ||
433 | } | ||
434 | memset(dev_priv->hw_status_page, 0, PAGE_SIZE); | ||
435 | DRM_DEBUG("hw status page @ %p\n", dev_priv->hw_status_page); | ||
436 | |||
437 | I830_WRITE(0x02080, dev_priv->dma_status_page); | ||
438 | DRM_DEBUG("Enabled hardware status page\n"); | ||
439 | |||
440 | /* Now we need to init our freelist */ | ||
441 | if (i830_freelist_init(dev, dev_priv) != 0) { | ||
442 | dev->dev_private = (void *)dev_priv; | ||
443 | i830_dma_cleanup(dev); | ||
444 | DRM_ERROR("Not enough space in the status page for" | ||
445 | " the freelist\n"); | ||
446 | return -ENOMEM; | ||
447 | } | ||
448 | dev->dev_private = (void *)dev_priv; | ||
449 | |||
450 | return 0; | ||
451 | } | ||
452 | |||
453 | static int i830_dma_init(struct drm_device *dev, void *data, | ||
454 | struct drm_file *file_priv) | ||
455 | { | ||
456 | drm_i830_private_t *dev_priv; | ||
457 | drm_i830_init_t *init = data; | ||
458 | int retcode = 0; | ||
459 | |||
460 | switch (init->func) { | ||
461 | case I830_INIT_DMA: | ||
462 | dev_priv = kmalloc(sizeof(drm_i830_private_t), GFP_KERNEL); | ||
463 | if (dev_priv == NULL) | ||
464 | return -ENOMEM; | ||
465 | retcode = i830_dma_initialize(dev, dev_priv, init); | ||
466 | break; | ||
467 | case I830_CLEANUP_DMA: | ||
468 | retcode = i830_dma_cleanup(dev); | ||
469 | break; | ||
470 | default: | ||
471 | retcode = -EINVAL; | ||
472 | break; | ||
473 | } | ||
474 | |||
475 | return retcode; | ||
476 | } | ||
477 | |||
478 | #define GFX_OP_STIPPLE ((0x3<<29)|(0x1d<<24)|(0x83<<16)) | ||
479 | #define ST1_ENABLE (1<<16) | ||
480 | #define ST1_MASK (0xffff) | ||
481 | |||
482 | /* Most efficient way to verify state for the i830 is as it is | ||
483 | * emitted. Non-conformant state is silently dropped. | ||
484 | */ | ||
485 | static void i830EmitContextVerified(struct drm_device *dev, unsigned int *code) | ||
486 | { | ||
487 | drm_i830_private_t *dev_priv = dev->dev_private; | ||
488 | int i, j = 0; | ||
489 | unsigned int tmp; | ||
490 | RING_LOCALS; | ||
491 | |||
492 | BEGIN_LP_RING(I830_CTX_SETUP_SIZE + 4); | ||
493 | |||
494 | for (i = 0; i < I830_CTXREG_BLENDCOLR0; i++) { | ||
495 | tmp = code[i]; | ||
496 | if ((tmp & (7 << 29)) == CMD_3D && | ||
497 | (tmp & (0x1f << 24)) < (0x1d << 24)) { | ||
498 | OUT_RING(tmp); | ||
499 | j++; | ||
500 | } else { | ||
501 | DRM_ERROR("Skipping %d\n", i); | ||
502 | } | ||
503 | } | ||
504 | |||
505 | OUT_RING(STATE3D_CONST_BLEND_COLOR_CMD); | ||
506 | OUT_RING(code[I830_CTXREG_BLENDCOLR]); | ||
507 | j += 2; | ||
508 | |||
509 | for (i = I830_CTXREG_VF; i < I830_CTXREG_MCSB0; i++) { | ||
510 | tmp = code[i]; | ||
511 | if ((tmp & (7 << 29)) == CMD_3D && | ||
512 | (tmp & (0x1f << 24)) < (0x1d << 24)) { | ||
513 | OUT_RING(tmp); | ||
514 | j++; | ||
515 | } else { | ||
516 | DRM_ERROR("Skipping %d\n", i); | ||
517 | } | ||
518 | } | ||
519 | |||
520 | OUT_RING(STATE3D_MAP_COORD_SETBIND_CMD); | ||
521 | OUT_RING(code[I830_CTXREG_MCSB1]); | ||
522 | j += 2; | ||
523 | |||
524 | if (j & 1) | ||
525 | OUT_RING(0); | ||
526 | |||
527 | ADVANCE_LP_RING(); | ||
528 | } | ||
529 | |||
530 | static void i830EmitTexVerified(struct drm_device *dev, unsigned int *code) | ||
531 | { | ||
532 | drm_i830_private_t *dev_priv = dev->dev_private; | ||
533 | int i, j = 0; | ||
534 | unsigned int tmp; | ||
535 | RING_LOCALS; | ||
536 | |||
537 | if (code[I830_TEXREG_MI0] == GFX_OP_MAP_INFO || | ||
538 | (code[I830_TEXREG_MI0] & ~(0xf * LOAD_TEXTURE_MAP0)) == | ||
539 | (STATE3D_LOAD_STATE_IMMEDIATE_2 | 4)) { | ||
540 | |||
541 | BEGIN_LP_RING(I830_TEX_SETUP_SIZE); | ||
542 | |||
543 | OUT_RING(code[I830_TEXREG_MI0]); /* TM0LI */ | ||
544 | OUT_RING(code[I830_TEXREG_MI1]); /* TM0S0 */ | ||
545 | OUT_RING(code[I830_TEXREG_MI2]); /* TM0S1 */ | ||
546 | OUT_RING(code[I830_TEXREG_MI3]); /* TM0S2 */ | ||
547 | OUT_RING(code[I830_TEXREG_MI4]); /* TM0S3 */ | ||
548 | OUT_RING(code[I830_TEXREG_MI5]); /* TM0S4 */ | ||
549 | |||
550 | for (i = 6; i < I830_TEX_SETUP_SIZE; i++) { | ||
551 | tmp = code[i]; | ||
552 | OUT_RING(tmp); | ||
553 | j++; | ||
554 | } | ||
555 | |||
556 | if (j & 1) | ||
557 | OUT_RING(0); | ||
558 | |||
559 | ADVANCE_LP_RING(); | ||
560 | } else | ||
561 | printk("rejected packet %x\n", code[0]); | ||
562 | } | ||
563 | |||
564 | static void i830EmitTexBlendVerified(struct drm_device *dev, | ||
565 | unsigned int *code, unsigned int num) | ||
566 | { | ||
567 | drm_i830_private_t *dev_priv = dev->dev_private; | ||
568 | int i, j = 0; | ||
569 | unsigned int tmp; | ||
570 | RING_LOCALS; | ||
571 | |||
572 | if (!num) | ||
573 | return; | ||
574 | |||
575 | BEGIN_LP_RING(num + 1); | ||
576 | |||
577 | for (i = 0; i < num; i++) { | ||
578 | tmp = code[i]; | ||
579 | OUT_RING(tmp); | ||
580 | j++; | ||
581 | } | ||
582 | |||
583 | if (j & 1) | ||
584 | OUT_RING(0); | ||
585 | |||
586 | ADVANCE_LP_RING(); | ||
587 | } | ||
588 | |||
589 | static void i830EmitTexPalette(struct drm_device *dev, | ||
590 | unsigned int *palette, int number, int is_shared) | ||
591 | { | ||
592 | drm_i830_private_t *dev_priv = dev->dev_private; | ||
593 | int i; | ||
594 | RING_LOCALS; | ||
595 | |||
596 | return; | ||
597 | |||
598 | BEGIN_LP_RING(258); | ||
599 | |||
600 | if (is_shared == 1) { | ||
601 | OUT_RING(CMD_OP_MAP_PALETTE_LOAD | | ||
602 | MAP_PALETTE_NUM(0) | MAP_PALETTE_BOTH); | ||
603 | } else { | ||
604 | OUT_RING(CMD_OP_MAP_PALETTE_LOAD | MAP_PALETTE_NUM(number)); | ||
605 | } | ||
606 | for (i = 0; i < 256; i++) | ||
607 | OUT_RING(palette[i]); | ||
608 | OUT_RING(0); | ||
609 | /* KW: WHERE IS THE ADVANCE_LP_RING? This is effectively a noop! | ||
610 | */ | ||
611 | } | ||
612 | |||
613 | /* Need to do some additional checking when setting the dest buffer. | ||
614 | */ | ||
615 | static void i830EmitDestVerified(struct drm_device *dev, unsigned int *code) | ||
616 | { | ||
617 | drm_i830_private_t *dev_priv = dev->dev_private; | ||
618 | unsigned int tmp; | ||
619 | RING_LOCALS; | ||
620 | |||
621 | BEGIN_LP_RING(I830_DEST_SETUP_SIZE + 10); | ||
622 | |||
623 | tmp = code[I830_DESTREG_CBUFADDR]; | ||
624 | if (tmp == dev_priv->front_di1 || tmp == dev_priv->back_di1) { | ||
625 | if (((int)outring) & 8) { | ||
626 | OUT_RING(0); | ||
627 | OUT_RING(0); | ||
628 | } | ||
629 | |||
630 | OUT_RING(CMD_OP_DESTBUFFER_INFO); | ||
631 | OUT_RING(BUF_3D_ID_COLOR_BACK | | ||
632 | BUF_3D_PITCH(dev_priv->back_pitch * dev_priv->cpp) | | ||
633 | BUF_3D_USE_FENCE); | ||
634 | OUT_RING(tmp); | ||
635 | OUT_RING(0); | ||
636 | |||
637 | OUT_RING(CMD_OP_DESTBUFFER_INFO); | ||
638 | OUT_RING(BUF_3D_ID_DEPTH | BUF_3D_USE_FENCE | | ||
639 | BUF_3D_PITCH(dev_priv->depth_pitch * dev_priv->cpp)); | ||
640 | OUT_RING(dev_priv->zi1); | ||
641 | OUT_RING(0); | ||
642 | } else { | ||
643 | DRM_ERROR("bad di1 %x (allow %x or %x)\n", | ||
644 | tmp, dev_priv->front_di1, dev_priv->back_di1); | ||
645 | } | ||
646 | |||
647 | /* invarient: | ||
648 | */ | ||
649 | |||
650 | OUT_RING(GFX_OP_DESTBUFFER_VARS); | ||
651 | OUT_RING(code[I830_DESTREG_DV1]); | ||
652 | |||
653 | OUT_RING(GFX_OP_DRAWRECT_INFO); | ||
654 | OUT_RING(code[I830_DESTREG_DR1]); | ||
655 | OUT_RING(code[I830_DESTREG_DR2]); | ||
656 | OUT_RING(code[I830_DESTREG_DR3]); | ||
657 | OUT_RING(code[I830_DESTREG_DR4]); | ||
658 | |||
659 | /* Need to verify this */ | ||
660 | tmp = code[I830_DESTREG_SENABLE]; | ||
661 | if ((tmp & ~0x3) == GFX_OP_SCISSOR_ENABLE) { | ||
662 | OUT_RING(tmp); | ||
663 | } else { | ||
664 | DRM_ERROR("bad scissor enable\n"); | ||
665 | OUT_RING(0); | ||
666 | } | ||
667 | |||
668 | OUT_RING(GFX_OP_SCISSOR_RECT); | ||
669 | OUT_RING(code[I830_DESTREG_SR1]); | ||
670 | OUT_RING(code[I830_DESTREG_SR2]); | ||
671 | OUT_RING(0); | ||
672 | |||
673 | ADVANCE_LP_RING(); | ||
674 | } | ||
675 | |||
676 | static void i830EmitStippleVerified(struct drm_device *dev, unsigned int *code) | ||
677 | { | ||
678 | drm_i830_private_t *dev_priv = dev->dev_private; | ||
679 | RING_LOCALS; | ||
680 | |||
681 | BEGIN_LP_RING(2); | ||
682 | OUT_RING(GFX_OP_STIPPLE); | ||
683 | OUT_RING(code[1]); | ||
684 | ADVANCE_LP_RING(); | ||
685 | } | ||
686 | |||
687 | static void i830EmitState(struct drm_device *dev) | ||
688 | { | ||
689 | drm_i830_private_t *dev_priv = dev->dev_private; | ||
690 | drm_i830_sarea_t *sarea_priv = dev_priv->sarea_priv; | ||
691 | unsigned int dirty = sarea_priv->dirty; | ||
692 | |||
693 | DRM_DEBUG("%s %x\n", __func__, dirty); | ||
694 | |||
695 | if (dirty & I830_UPLOAD_BUFFERS) { | ||
696 | i830EmitDestVerified(dev, sarea_priv->BufferState); | ||
697 | sarea_priv->dirty &= ~I830_UPLOAD_BUFFERS; | ||
698 | } | ||
699 | |||
700 | if (dirty & I830_UPLOAD_CTX) { | ||
701 | i830EmitContextVerified(dev, sarea_priv->ContextState); | ||
702 | sarea_priv->dirty &= ~I830_UPLOAD_CTX; | ||
703 | } | ||
704 | |||
705 | if (dirty & I830_UPLOAD_TEX0) { | ||
706 | i830EmitTexVerified(dev, sarea_priv->TexState[0]); | ||
707 | sarea_priv->dirty &= ~I830_UPLOAD_TEX0; | ||
708 | } | ||
709 | |||
710 | if (dirty & I830_UPLOAD_TEX1) { | ||
711 | i830EmitTexVerified(dev, sarea_priv->TexState[1]); | ||
712 | sarea_priv->dirty &= ~I830_UPLOAD_TEX1; | ||
713 | } | ||
714 | |||
715 | if (dirty & I830_UPLOAD_TEXBLEND0) { | ||
716 | i830EmitTexBlendVerified(dev, sarea_priv->TexBlendState[0], | ||
717 | sarea_priv->TexBlendStateWordsUsed[0]); | ||
718 | sarea_priv->dirty &= ~I830_UPLOAD_TEXBLEND0; | ||
719 | } | ||
720 | |||
721 | if (dirty & I830_UPLOAD_TEXBLEND1) { | ||
722 | i830EmitTexBlendVerified(dev, sarea_priv->TexBlendState[1], | ||
723 | sarea_priv->TexBlendStateWordsUsed[1]); | ||
724 | sarea_priv->dirty &= ~I830_UPLOAD_TEXBLEND1; | ||
725 | } | ||
726 | |||
727 | if (dirty & I830_UPLOAD_TEX_PALETTE_SHARED) { | ||
728 | i830EmitTexPalette(dev, sarea_priv->Palette[0], 0, 1); | ||
729 | } else { | ||
730 | if (dirty & I830_UPLOAD_TEX_PALETTE_N(0)) { | ||
731 | i830EmitTexPalette(dev, sarea_priv->Palette[0], 0, 0); | ||
732 | sarea_priv->dirty &= ~I830_UPLOAD_TEX_PALETTE_N(0); | ||
733 | } | ||
734 | if (dirty & I830_UPLOAD_TEX_PALETTE_N(1)) { | ||
735 | i830EmitTexPalette(dev, sarea_priv->Palette[1], 1, 0); | ||
736 | sarea_priv->dirty &= ~I830_UPLOAD_TEX_PALETTE_N(1); | ||
737 | } | ||
738 | |||
739 | /* 1.3: | ||
740 | */ | ||
741 | #if 0 | ||
742 | if (dirty & I830_UPLOAD_TEX_PALETTE_N(2)) { | ||
743 | i830EmitTexPalette(dev, sarea_priv->Palette2[0], 0, 0); | ||
744 | sarea_priv->dirty &= ~I830_UPLOAD_TEX_PALETTE_N(2); | ||
745 | } | ||
746 | if (dirty & I830_UPLOAD_TEX_PALETTE_N(3)) { | ||
747 | i830EmitTexPalette(dev, sarea_priv->Palette2[1], 1, 0); | ||
748 | sarea_priv->dirty &= ~I830_UPLOAD_TEX_PALETTE_N(2); | ||
749 | } | ||
750 | #endif | ||
751 | } | ||
752 | |||
753 | /* 1.3: | ||
754 | */ | ||
755 | if (dirty & I830_UPLOAD_STIPPLE) { | ||
756 | i830EmitStippleVerified(dev, sarea_priv->StippleState); | ||
757 | sarea_priv->dirty &= ~I830_UPLOAD_STIPPLE; | ||
758 | } | ||
759 | |||
760 | if (dirty & I830_UPLOAD_TEX2) { | ||
761 | i830EmitTexVerified(dev, sarea_priv->TexState2); | ||
762 | sarea_priv->dirty &= ~I830_UPLOAD_TEX2; | ||
763 | } | ||
764 | |||
765 | if (dirty & I830_UPLOAD_TEX3) { | ||
766 | i830EmitTexVerified(dev, sarea_priv->TexState3); | ||
767 | sarea_priv->dirty &= ~I830_UPLOAD_TEX3; | ||
768 | } | ||
769 | |||
770 | if (dirty & I830_UPLOAD_TEXBLEND2) { | ||
771 | i830EmitTexBlendVerified(dev, | ||
772 | sarea_priv->TexBlendState2, | ||
773 | sarea_priv->TexBlendStateWordsUsed2); | ||
774 | |||
775 | sarea_priv->dirty &= ~I830_UPLOAD_TEXBLEND2; | ||
776 | } | ||
777 | |||
778 | if (dirty & I830_UPLOAD_TEXBLEND3) { | ||
779 | i830EmitTexBlendVerified(dev, | ||
780 | sarea_priv->TexBlendState3, | ||
781 | sarea_priv->TexBlendStateWordsUsed3); | ||
782 | sarea_priv->dirty &= ~I830_UPLOAD_TEXBLEND3; | ||
783 | } | ||
784 | } | ||
785 | |||
786 | /* ================================================================ | ||
787 | * Performance monitoring functions | ||
788 | */ | ||
789 | |||
790 | static void i830_fill_box(struct drm_device *dev, | ||
791 | int x, int y, int w, int h, int r, int g, int b) | ||
792 | { | ||
793 | drm_i830_private_t *dev_priv = dev->dev_private; | ||
794 | u32 color; | ||
795 | unsigned int BR13, CMD; | ||
796 | RING_LOCALS; | ||
797 | |||
798 | BR13 = (0xF0 << 16) | (dev_priv->pitch * dev_priv->cpp) | (1 << 24); | ||
799 | CMD = XY_COLOR_BLT_CMD; | ||
800 | x += dev_priv->sarea_priv->boxes[0].x1; | ||
801 | y += dev_priv->sarea_priv->boxes[0].y1; | ||
802 | |||
803 | if (dev_priv->cpp == 4) { | ||
804 | BR13 |= (1 << 25); | ||
805 | CMD |= (XY_COLOR_BLT_WRITE_ALPHA | XY_COLOR_BLT_WRITE_RGB); | ||
806 | color = (((0xff) << 24) | (r << 16) | (g << 8) | b); | ||
807 | } else { | ||
808 | color = (((r & 0xf8) << 8) | | ||
809 | ((g & 0xfc) << 3) | ((b & 0xf8) >> 3)); | ||
810 | } | ||
811 | |||
812 | BEGIN_LP_RING(6); | ||
813 | OUT_RING(CMD); | ||
814 | OUT_RING(BR13); | ||
815 | OUT_RING((y << 16) | x); | ||
816 | OUT_RING(((y + h) << 16) | (x + w)); | ||
817 | |||
818 | if (dev_priv->current_page == 1) | ||
819 | OUT_RING(dev_priv->front_offset); | ||
820 | else | ||
821 | OUT_RING(dev_priv->back_offset); | ||
822 | |||
823 | OUT_RING(color); | ||
824 | ADVANCE_LP_RING(); | ||
825 | } | ||
826 | |||
827 | static void i830_cp_performance_boxes(struct drm_device *dev) | ||
828 | { | ||
829 | drm_i830_private_t *dev_priv = dev->dev_private; | ||
830 | |||
831 | /* Purple box for page flipping | ||
832 | */ | ||
833 | if (dev_priv->sarea_priv->perf_boxes & I830_BOX_FLIP) | ||
834 | i830_fill_box(dev, 4, 4, 8, 8, 255, 0, 255); | ||
835 | |||
836 | /* Red box if we have to wait for idle at any point | ||
837 | */ | ||
838 | if (dev_priv->sarea_priv->perf_boxes & I830_BOX_WAIT) | ||
839 | i830_fill_box(dev, 16, 4, 8, 8, 255, 0, 0); | ||
840 | |||
841 | /* Blue box: lost context? | ||
842 | */ | ||
843 | if (dev_priv->sarea_priv->perf_boxes & I830_BOX_LOST_CONTEXT) | ||
844 | i830_fill_box(dev, 28, 4, 8, 8, 0, 0, 255); | ||
845 | |||
846 | /* Yellow box for texture swaps | ||
847 | */ | ||
848 | if (dev_priv->sarea_priv->perf_boxes & I830_BOX_TEXTURE_LOAD) | ||
849 | i830_fill_box(dev, 40, 4, 8, 8, 255, 255, 0); | ||
850 | |||
851 | /* Green box if hardware never idles (as far as we can tell) | ||
852 | */ | ||
853 | if (!(dev_priv->sarea_priv->perf_boxes & I830_BOX_RING_EMPTY)) | ||
854 | i830_fill_box(dev, 64, 4, 8, 8, 0, 255, 0); | ||
855 | |||
856 | /* Draw bars indicating number of buffers allocated | ||
857 | * (not a great measure, easily confused) | ||
858 | */ | ||
859 | if (dev_priv->dma_used) { | ||
860 | int bar = dev_priv->dma_used / 10240; | ||
861 | if (bar > 100) | ||
862 | bar = 100; | ||
863 | if (bar < 1) | ||
864 | bar = 1; | ||
865 | i830_fill_box(dev, 4, 16, bar, 4, 196, 128, 128); | ||
866 | dev_priv->dma_used = 0; | ||
867 | } | ||
868 | |||
869 | dev_priv->sarea_priv->perf_boxes = 0; | ||
870 | } | ||
871 | |||
872 | static void i830_dma_dispatch_clear(struct drm_device *dev, int flags, | ||
873 | unsigned int clear_color, | ||
874 | unsigned int clear_zval, | ||
875 | unsigned int clear_depthmask) | ||
876 | { | ||
877 | drm_i830_private_t *dev_priv = dev->dev_private; | ||
878 | drm_i830_sarea_t *sarea_priv = dev_priv->sarea_priv; | ||
879 | int nbox = sarea_priv->nbox; | ||
880 | struct drm_clip_rect *pbox = sarea_priv->boxes; | ||
881 | int pitch = dev_priv->pitch; | ||
882 | int cpp = dev_priv->cpp; | ||
883 | int i; | ||
884 | unsigned int BR13, CMD, D_CMD; | ||
885 | RING_LOCALS; | ||
886 | |||
887 | if (dev_priv->current_page == 1) { | ||
888 | unsigned int tmp = flags; | ||
889 | |||
890 | flags &= ~(I830_FRONT | I830_BACK); | ||
891 | if (tmp & I830_FRONT) | ||
892 | flags |= I830_BACK; | ||
893 | if (tmp & I830_BACK) | ||
894 | flags |= I830_FRONT; | ||
895 | } | ||
896 | |||
897 | i830_kernel_lost_context(dev); | ||
898 | |||
899 | switch (cpp) { | ||
900 | case 2: | ||
901 | BR13 = (0xF0 << 16) | (pitch * cpp) | (1 << 24); | ||
902 | D_CMD = CMD = XY_COLOR_BLT_CMD; | ||
903 | break; | ||
904 | case 4: | ||
905 | BR13 = (0xF0 << 16) | (pitch * cpp) | (1 << 24) | (1 << 25); | ||
906 | CMD = (XY_COLOR_BLT_CMD | XY_COLOR_BLT_WRITE_ALPHA | | ||
907 | XY_COLOR_BLT_WRITE_RGB); | ||
908 | D_CMD = XY_COLOR_BLT_CMD; | ||
909 | if (clear_depthmask & 0x00ffffff) | ||
910 | D_CMD |= XY_COLOR_BLT_WRITE_RGB; | ||
911 | if (clear_depthmask & 0xff000000) | ||
912 | D_CMD |= XY_COLOR_BLT_WRITE_ALPHA; | ||
913 | break; | ||
914 | default: | ||
915 | BR13 = (0xF0 << 16) | (pitch * cpp) | (1 << 24); | ||
916 | D_CMD = CMD = XY_COLOR_BLT_CMD; | ||
917 | break; | ||
918 | } | ||
919 | |||
920 | if (nbox > I830_NR_SAREA_CLIPRECTS) | ||
921 | nbox = I830_NR_SAREA_CLIPRECTS; | ||
922 | |||
923 | for (i = 0; i < nbox; i++, pbox++) { | ||
924 | if (pbox->x1 > pbox->x2 || | ||
925 | pbox->y1 > pbox->y2 || | ||
926 | pbox->x2 > dev_priv->w || pbox->y2 > dev_priv->h) | ||
927 | continue; | ||
928 | |||
929 | if (flags & I830_FRONT) { | ||
930 | DRM_DEBUG("clear front\n"); | ||
931 | BEGIN_LP_RING(6); | ||
932 | OUT_RING(CMD); | ||
933 | OUT_RING(BR13); | ||
934 | OUT_RING((pbox->y1 << 16) | pbox->x1); | ||
935 | OUT_RING((pbox->y2 << 16) | pbox->x2); | ||
936 | OUT_RING(dev_priv->front_offset); | ||
937 | OUT_RING(clear_color); | ||
938 | ADVANCE_LP_RING(); | ||
939 | } | ||
940 | |||
941 | if (flags & I830_BACK) { | ||
942 | DRM_DEBUG("clear back\n"); | ||
943 | BEGIN_LP_RING(6); | ||
944 | OUT_RING(CMD); | ||
945 | OUT_RING(BR13); | ||
946 | OUT_RING((pbox->y1 << 16) | pbox->x1); | ||
947 | OUT_RING((pbox->y2 << 16) | pbox->x2); | ||
948 | OUT_RING(dev_priv->back_offset); | ||
949 | OUT_RING(clear_color); | ||
950 | ADVANCE_LP_RING(); | ||
951 | } | ||
952 | |||
953 | if (flags & I830_DEPTH) { | ||
954 | DRM_DEBUG("clear depth\n"); | ||
955 | BEGIN_LP_RING(6); | ||
956 | OUT_RING(D_CMD); | ||
957 | OUT_RING(BR13); | ||
958 | OUT_RING((pbox->y1 << 16) | pbox->x1); | ||
959 | OUT_RING((pbox->y2 << 16) | pbox->x2); | ||
960 | OUT_RING(dev_priv->depth_offset); | ||
961 | OUT_RING(clear_zval); | ||
962 | ADVANCE_LP_RING(); | ||
963 | } | ||
964 | } | ||
965 | } | ||
966 | |||
967 | static void i830_dma_dispatch_swap(struct drm_device *dev) | ||
968 | { | ||
969 | drm_i830_private_t *dev_priv = dev->dev_private; | ||
970 | drm_i830_sarea_t *sarea_priv = dev_priv->sarea_priv; | ||
971 | int nbox = sarea_priv->nbox; | ||
972 | struct drm_clip_rect *pbox = sarea_priv->boxes; | ||
973 | int pitch = dev_priv->pitch; | ||
974 | int cpp = dev_priv->cpp; | ||
975 | int i; | ||
976 | unsigned int CMD, BR13; | ||
977 | RING_LOCALS; | ||
978 | |||
979 | DRM_DEBUG("swapbuffers\n"); | ||
980 | |||
981 | i830_kernel_lost_context(dev); | ||
982 | |||
983 | if (dev_priv->do_boxes) | ||
984 | i830_cp_performance_boxes(dev); | ||
985 | |||
986 | switch (cpp) { | ||
987 | case 2: | ||
988 | BR13 = (pitch * cpp) | (0xCC << 16) | (1 << 24); | ||
989 | CMD = XY_SRC_COPY_BLT_CMD; | ||
990 | break; | ||
991 | case 4: | ||
992 | BR13 = (pitch * cpp) | (0xCC << 16) | (1 << 24) | (1 << 25); | ||
993 | CMD = (XY_SRC_COPY_BLT_CMD | XY_SRC_COPY_BLT_WRITE_ALPHA | | ||
994 | XY_SRC_COPY_BLT_WRITE_RGB); | ||
995 | break; | ||
996 | default: | ||
997 | BR13 = (pitch * cpp) | (0xCC << 16) | (1 << 24); | ||
998 | CMD = XY_SRC_COPY_BLT_CMD; | ||
999 | break; | ||
1000 | } | ||
1001 | |||
1002 | if (nbox > I830_NR_SAREA_CLIPRECTS) | ||
1003 | nbox = I830_NR_SAREA_CLIPRECTS; | ||
1004 | |||
1005 | for (i = 0; i < nbox; i++, pbox++) { | ||
1006 | if (pbox->x1 > pbox->x2 || | ||
1007 | pbox->y1 > pbox->y2 || | ||
1008 | pbox->x2 > dev_priv->w || pbox->y2 > dev_priv->h) | ||
1009 | continue; | ||
1010 | |||
1011 | DRM_DEBUG("dispatch swap %d,%d-%d,%d!\n", | ||
1012 | pbox->x1, pbox->y1, pbox->x2, pbox->y2); | ||
1013 | |||
1014 | BEGIN_LP_RING(8); | ||
1015 | OUT_RING(CMD); | ||
1016 | OUT_RING(BR13); | ||
1017 | OUT_RING((pbox->y1 << 16) | pbox->x1); | ||
1018 | OUT_RING((pbox->y2 << 16) | pbox->x2); | ||
1019 | |||
1020 | if (dev_priv->current_page == 0) | ||
1021 | OUT_RING(dev_priv->front_offset); | ||
1022 | else | ||
1023 | OUT_RING(dev_priv->back_offset); | ||
1024 | |||
1025 | OUT_RING((pbox->y1 << 16) | pbox->x1); | ||
1026 | OUT_RING(BR13 & 0xffff); | ||
1027 | |||
1028 | if (dev_priv->current_page == 0) | ||
1029 | OUT_RING(dev_priv->back_offset); | ||
1030 | else | ||
1031 | OUT_RING(dev_priv->front_offset); | ||
1032 | |||
1033 | ADVANCE_LP_RING(); | ||
1034 | } | ||
1035 | } | ||
1036 | |||
1037 | static void i830_dma_dispatch_flip(struct drm_device *dev) | ||
1038 | { | ||
1039 | drm_i830_private_t *dev_priv = dev->dev_private; | ||
1040 | RING_LOCALS; | ||
1041 | |||
1042 | DRM_DEBUG("%s: page=%d pfCurrentPage=%d\n", | ||
1043 | __func__, | ||
1044 | dev_priv->current_page, | ||
1045 | dev_priv->sarea_priv->pf_current_page); | ||
1046 | |||
1047 | i830_kernel_lost_context(dev); | ||
1048 | |||
1049 | if (dev_priv->do_boxes) { | ||
1050 | dev_priv->sarea_priv->perf_boxes |= I830_BOX_FLIP; | ||
1051 | i830_cp_performance_boxes(dev); | ||
1052 | } | ||
1053 | |||
1054 | BEGIN_LP_RING(2); | ||
1055 | OUT_RING(INST_PARSER_CLIENT | INST_OP_FLUSH | INST_FLUSH_MAP_CACHE); | ||
1056 | OUT_RING(0); | ||
1057 | ADVANCE_LP_RING(); | ||
1058 | |||
1059 | BEGIN_LP_RING(6); | ||
1060 | OUT_RING(CMD_OP_DISPLAYBUFFER_INFO | ASYNC_FLIP); | ||
1061 | OUT_RING(0); | ||
1062 | if (dev_priv->current_page == 0) { | ||
1063 | OUT_RING(dev_priv->back_offset); | ||
1064 | dev_priv->current_page = 1; | ||
1065 | } else { | ||
1066 | OUT_RING(dev_priv->front_offset); | ||
1067 | dev_priv->current_page = 0; | ||
1068 | } | ||
1069 | OUT_RING(0); | ||
1070 | ADVANCE_LP_RING(); | ||
1071 | |||
1072 | BEGIN_LP_RING(2); | ||
1073 | OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_PLANE_A_FLIP); | ||
1074 | OUT_RING(0); | ||
1075 | ADVANCE_LP_RING(); | ||
1076 | |||
1077 | dev_priv->sarea_priv->pf_current_page = dev_priv->current_page; | ||
1078 | } | ||
1079 | |||
1080 | static void i830_dma_dispatch_vertex(struct drm_device *dev, | ||
1081 | struct drm_buf *buf, int discard, int used) | ||
1082 | { | ||
1083 | drm_i830_private_t *dev_priv = dev->dev_private; | ||
1084 | drm_i830_buf_priv_t *buf_priv = buf->dev_private; | ||
1085 | drm_i830_sarea_t *sarea_priv = dev_priv->sarea_priv; | ||
1086 | struct drm_clip_rect *box = sarea_priv->boxes; | ||
1087 | int nbox = sarea_priv->nbox; | ||
1088 | unsigned long address = (unsigned long)buf->bus_address; | ||
1089 | unsigned long start = address - dev->agp->base; | ||
1090 | int i = 0, u; | ||
1091 | RING_LOCALS; | ||
1092 | |||
1093 | i830_kernel_lost_context(dev); | ||
1094 | |||
1095 | if (nbox > I830_NR_SAREA_CLIPRECTS) | ||
1096 | nbox = I830_NR_SAREA_CLIPRECTS; | ||
1097 | |||
1098 | if (discard) { | ||
1099 | u = cmpxchg(buf_priv->in_use, I830_BUF_CLIENT, | ||
1100 | I830_BUF_HARDWARE); | ||
1101 | if (u != I830_BUF_CLIENT) | ||
1102 | DRM_DEBUG("xxxx 2\n"); | ||
1103 | } | ||
1104 | |||
1105 | if (used > 4 * 1023) | ||
1106 | used = 0; | ||
1107 | |||
1108 | if (sarea_priv->dirty) | ||
1109 | i830EmitState(dev); | ||
1110 | |||
1111 | DRM_DEBUG("dispatch vertex addr 0x%lx, used 0x%x nbox %d\n", | ||
1112 | address, used, nbox); | ||
1113 | |||
1114 | dev_priv->counter++; | ||
1115 | DRM_DEBUG("dispatch counter : %ld\n", dev_priv->counter); | ||
1116 | DRM_DEBUG("i830_dma_dispatch\n"); | ||
1117 | DRM_DEBUG("start : %lx\n", start); | ||
1118 | DRM_DEBUG("used : %d\n", used); | ||
1119 | DRM_DEBUG("start + used - 4 : %ld\n", start + used - 4); | ||
1120 | |||
1121 | if (buf_priv->currently_mapped == I830_BUF_MAPPED) { | ||
1122 | u32 *vp = buf_priv->kernel_virtual; | ||
1123 | |||
1124 | vp[0] = (GFX_OP_PRIMITIVE | | ||
1125 | sarea_priv->vertex_prim | ((used / 4) - 2)); | ||
1126 | |||
1127 | if (dev_priv->use_mi_batchbuffer_start) { | ||
1128 | vp[used / 4] = MI_BATCH_BUFFER_END; | ||
1129 | used += 4; | ||
1130 | } | ||
1131 | |||
1132 | if (used & 4) { | ||
1133 | vp[used / 4] = 0; | ||
1134 | used += 4; | ||
1135 | } | ||
1136 | |||
1137 | i830_unmap_buffer(buf); | ||
1138 | } | ||
1139 | |||
1140 | if (used) { | ||
1141 | do { | ||
1142 | if (i < nbox) { | ||
1143 | BEGIN_LP_RING(6); | ||
1144 | OUT_RING(GFX_OP_DRAWRECT_INFO); | ||
1145 | OUT_RING(sarea_priv-> | ||
1146 | BufferState[I830_DESTREG_DR1]); | ||
1147 | OUT_RING(box[i].x1 | (box[i].y1 << 16)); | ||
1148 | OUT_RING(box[i].x2 | (box[i].y2 << 16)); | ||
1149 | OUT_RING(sarea_priv-> | ||
1150 | BufferState[I830_DESTREG_DR4]); | ||
1151 | OUT_RING(0); | ||
1152 | ADVANCE_LP_RING(); | ||
1153 | } | ||
1154 | |||
1155 | if (dev_priv->use_mi_batchbuffer_start) { | ||
1156 | BEGIN_LP_RING(2); | ||
1157 | OUT_RING(MI_BATCH_BUFFER_START | (2 << 6)); | ||
1158 | OUT_RING(start | MI_BATCH_NON_SECURE); | ||
1159 | ADVANCE_LP_RING(); | ||
1160 | } else { | ||
1161 | BEGIN_LP_RING(4); | ||
1162 | OUT_RING(MI_BATCH_BUFFER); | ||
1163 | OUT_RING(start | MI_BATCH_NON_SECURE); | ||
1164 | OUT_RING(start + used - 4); | ||
1165 | OUT_RING(0); | ||
1166 | ADVANCE_LP_RING(); | ||
1167 | } | ||
1168 | |||
1169 | } while (++i < nbox); | ||
1170 | } | ||
1171 | |||
1172 | if (discard) { | ||
1173 | dev_priv->counter++; | ||
1174 | |||
1175 | (void)cmpxchg(buf_priv->in_use, I830_BUF_CLIENT, | ||
1176 | I830_BUF_HARDWARE); | ||
1177 | |||
1178 | BEGIN_LP_RING(8); | ||
1179 | OUT_RING(CMD_STORE_DWORD_IDX); | ||
1180 | OUT_RING(20); | ||
1181 | OUT_RING(dev_priv->counter); | ||
1182 | OUT_RING(CMD_STORE_DWORD_IDX); | ||
1183 | OUT_RING(buf_priv->my_use_idx); | ||
1184 | OUT_RING(I830_BUF_FREE); | ||
1185 | OUT_RING(CMD_REPORT_HEAD); | ||
1186 | OUT_RING(0); | ||
1187 | ADVANCE_LP_RING(); | ||
1188 | } | ||
1189 | } | ||
1190 | |||
1191 | static void i830_dma_quiescent(struct drm_device *dev) | ||
1192 | { | ||
1193 | drm_i830_private_t *dev_priv = dev->dev_private; | ||
1194 | RING_LOCALS; | ||
1195 | |||
1196 | i830_kernel_lost_context(dev); | ||
1197 | |||
1198 | BEGIN_LP_RING(4); | ||
1199 | OUT_RING(INST_PARSER_CLIENT | INST_OP_FLUSH | INST_FLUSH_MAP_CACHE); | ||
1200 | OUT_RING(CMD_REPORT_HEAD); | ||
1201 | OUT_RING(0); | ||
1202 | OUT_RING(0); | ||
1203 | ADVANCE_LP_RING(); | ||
1204 | |||
1205 | i830_wait_ring(dev, dev_priv->ring.Size - 8, __func__); | ||
1206 | } | ||
1207 | |||
1208 | static int i830_flush_queue(struct drm_device *dev) | ||
1209 | { | ||
1210 | drm_i830_private_t *dev_priv = dev->dev_private; | ||
1211 | struct drm_device_dma *dma = dev->dma; | ||
1212 | int i, ret = 0; | ||
1213 | RING_LOCALS; | ||
1214 | |||
1215 | i830_kernel_lost_context(dev); | ||
1216 | |||
1217 | BEGIN_LP_RING(2); | ||
1218 | OUT_RING(CMD_REPORT_HEAD); | ||
1219 | OUT_RING(0); | ||
1220 | ADVANCE_LP_RING(); | ||
1221 | |||
1222 | i830_wait_ring(dev, dev_priv->ring.Size - 8, __func__); | ||
1223 | |||
1224 | for (i = 0; i < dma->buf_count; i++) { | ||
1225 | struct drm_buf *buf = dma->buflist[i]; | ||
1226 | drm_i830_buf_priv_t *buf_priv = buf->dev_private; | ||
1227 | |||
1228 | int used = cmpxchg(buf_priv->in_use, I830_BUF_HARDWARE, | ||
1229 | I830_BUF_FREE); | ||
1230 | |||
1231 | if (used == I830_BUF_HARDWARE) | ||
1232 | DRM_DEBUG("reclaimed from HARDWARE\n"); | ||
1233 | if (used == I830_BUF_CLIENT) | ||
1234 | DRM_DEBUG("still on client\n"); | ||
1235 | } | ||
1236 | |||
1237 | return ret; | ||
1238 | } | ||
1239 | |||
1240 | /* Must be called with the lock held */ | ||
1241 | static void i830_reclaim_buffers(struct drm_device *dev, struct drm_file *file_priv) | ||
1242 | { | ||
1243 | struct drm_device_dma *dma = dev->dma; | ||
1244 | int i; | ||
1245 | |||
1246 | if (!dma) | ||
1247 | return; | ||
1248 | if (!dev->dev_private) | ||
1249 | return; | ||
1250 | if (!dma->buflist) | ||
1251 | return; | ||
1252 | |||
1253 | i830_flush_queue(dev); | ||
1254 | |||
1255 | for (i = 0; i < dma->buf_count; i++) { | ||
1256 | struct drm_buf *buf = dma->buflist[i]; | ||
1257 | drm_i830_buf_priv_t *buf_priv = buf->dev_private; | ||
1258 | |||
1259 | if (buf->file_priv == file_priv && buf_priv) { | ||
1260 | int used = cmpxchg(buf_priv->in_use, I830_BUF_CLIENT, | ||
1261 | I830_BUF_FREE); | ||
1262 | |||
1263 | if (used == I830_BUF_CLIENT) | ||
1264 | DRM_DEBUG("reclaimed from client\n"); | ||
1265 | if (buf_priv->currently_mapped == I830_BUF_MAPPED) | ||
1266 | buf_priv->currently_mapped = I830_BUF_UNMAPPED; | ||
1267 | } | ||
1268 | } | ||
1269 | } | ||
1270 | |||
1271 | static int i830_flush_ioctl(struct drm_device *dev, void *data, | ||
1272 | struct drm_file *file_priv) | ||
1273 | { | ||
1274 | LOCK_TEST_WITH_RETURN(dev, file_priv); | ||
1275 | |||
1276 | i830_flush_queue(dev); | ||
1277 | return 0; | ||
1278 | } | ||
1279 | |||
1280 | static int i830_dma_vertex(struct drm_device *dev, void *data, | ||
1281 | struct drm_file *file_priv) | ||
1282 | { | ||
1283 | struct drm_device_dma *dma = dev->dma; | ||
1284 | drm_i830_private_t *dev_priv = (drm_i830_private_t *) dev->dev_private; | ||
1285 | u32 *hw_status = dev_priv->hw_status_page; | ||
1286 | drm_i830_sarea_t *sarea_priv = (drm_i830_sarea_t *) | ||
1287 | dev_priv->sarea_priv; | ||
1288 | drm_i830_vertex_t *vertex = data; | ||
1289 | |||
1290 | LOCK_TEST_WITH_RETURN(dev, file_priv); | ||
1291 | |||
1292 | DRM_DEBUG("i830 dma vertex, idx %d used %d discard %d\n", | ||
1293 | vertex->idx, vertex->used, vertex->discard); | ||
1294 | |||
1295 | if (vertex->idx < 0 || vertex->idx > dma->buf_count) | ||
1296 | return -EINVAL; | ||
1297 | |||
1298 | i830_dma_dispatch_vertex(dev, | ||
1299 | dma->buflist[vertex->idx], | ||
1300 | vertex->discard, vertex->used); | ||
1301 | |||
1302 | sarea_priv->last_enqueue = dev_priv->counter - 1; | ||
1303 | sarea_priv->last_dispatch = (int)hw_status[5]; | ||
1304 | |||
1305 | return 0; | ||
1306 | } | ||
1307 | |||
1308 | static int i830_clear_bufs(struct drm_device *dev, void *data, | ||
1309 | struct drm_file *file_priv) | ||
1310 | { | ||
1311 | drm_i830_clear_t *clear = data; | ||
1312 | |||
1313 | LOCK_TEST_WITH_RETURN(dev, file_priv); | ||
1314 | |||
1315 | /* GH: Someone's doing nasty things... */ | ||
1316 | if (!dev->dev_private) | ||
1317 | return -EINVAL; | ||
1318 | |||
1319 | i830_dma_dispatch_clear(dev, clear->flags, | ||
1320 | clear->clear_color, | ||
1321 | clear->clear_depth, clear->clear_depthmask); | ||
1322 | return 0; | ||
1323 | } | ||
1324 | |||
1325 | static int i830_swap_bufs(struct drm_device *dev, void *data, | ||
1326 | struct drm_file *file_priv) | ||
1327 | { | ||
1328 | DRM_DEBUG("i830_swap_bufs\n"); | ||
1329 | |||
1330 | LOCK_TEST_WITH_RETURN(dev, file_priv); | ||
1331 | |||
1332 | i830_dma_dispatch_swap(dev); | ||
1333 | return 0; | ||
1334 | } | ||
1335 | |||
1336 | /* Not sure why this isn't set all the time: | ||
1337 | */ | ||
1338 | static void i830_do_init_pageflip(struct drm_device *dev) | ||
1339 | { | ||
1340 | drm_i830_private_t *dev_priv = dev->dev_private; | ||
1341 | |||
1342 | DRM_DEBUG("%s\n", __func__); | ||
1343 | dev_priv->page_flipping = 1; | ||
1344 | dev_priv->current_page = 0; | ||
1345 | dev_priv->sarea_priv->pf_current_page = dev_priv->current_page; | ||
1346 | } | ||
1347 | |||
1348 | static int i830_do_cleanup_pageflip(struct drm_device *dev) | ||
1349 | { | ||
1350 | drm_i830_private_t *dev_priv = dev->dev_private; | ||
1351 | |||
1352 | DRM_DEBUG("%s\n", __func__); | ||
1353 | if (dev_priv->current_page != 0) | ||
1354 | i830_dma_dispatch_flip(dev); | ||
1355 | |||
1356 | dev_priv->page_flipping = 0; | ||
1357 | return 0; | ||
1358 | } | ||
1359 | |||
1360 | static int i830_flip_bufs(struct drm_device *dev, void *data, | ||
1361 | struct drm_file *file_priv) | ||
1362 | { | ||
1363 | drm_i830_private_t *dev_priv = dev->dev_private; | ||
1364 | |||
1365 | DRM_DEBUG("%s\n", __func__); | ||
1366 | |||
1367 | LOCK_TEST_WITH_RETURN(dev, file_priv); | ||
1368 | |||
1369 | if (!dev_priv->page_flipping) | ||
1370 | i830_do_init_pageflip(dev); | ||
1371 | |||
1372 | i830_dma_dispatch_flip(dev); | ||
1373 | return 0; | ||
1374 | } | ||
1375 | |||
1376 | static int i830_getage(struct drm_device *dev, void *data, | ||
1377 | struct drm_file *file_priv) | ||
1378 | { | ||
1379 | drm_i830_private_t *dev_priv = (drm_i830_private_t *) dev->dev_private; | ||
1380 | u32 *hw_status = dev_priv->hw_status_page; | ||
1381 | drm_i830_sarea_t *sarea_priv = (drm_i830_sarea_t *) | ||
1382 | dev_priv->sarea_priv; | ||
1383 | |||
1384 | sarea_priv->last_dispatch = (int)hw_status[5]; | ||
1385 | return 0; | ||
1386 | } | ||
1387 | |||
1388 | static int i830_getbuf(struct drm_device *dev, void *data, | ||
1389 | struct drm_file *file_priv) | ||
1390 | { | ||
1391 | int retcode = 0; | ||
1392 | drm_i830_dma_t *d = data; | ||
1393 | drm_i830_private_t *dev_priv = (drm_i830_private_t *) dev->dev_private; | ||
1394 | u32 *hw_status = dev_priv->hw_status_page; | ||
1395 | drm_i830_sarea_t *sarea_priv = (drm_i830_sarea_t *) | ||
1396 | dev_priv->sarea_priv; | ||
1397 | |||
1398 | DRM_DEBUG("getbuf\n"); | ||
1399 | |||
1400 | LOCK_TEST_WITH_RETURN(dev, file_priv); | ||
1401 | |||
1402 | d->granted = 0; | ||
1403 | |||
1404 | retcode = i830_dma_get_buffer(dev, d, file_priv); | ||
1405 | |||
1406 | DRM_DEBUG("i830_dma: %d returning %d, granted = %d\n", | ||
1407 | task_pid_nr(current), retcode, d->granted); | ||
1408 | |||
1409 | sarea_priv->last_dispatch = (int)hw_status[5]; | ||
1410 | |||
1411 | return retcode; | ||
1412 | } | ||
1413 | |||
1414 | static int i830_copybuf(struct drm_device *dev, void *data, | ||
1415 | struct drm_file *file_priv) | ||
1416 | { | ||
1417 | /* Never copy - 2.4.x doesn't need it */ | ||
1418 | return 0; | ||
1419 | } | ||
1420 | |||
1421 | static int i830_docopy(struct drm_device *dev, void *data, | ||
1422 | struct drm_file *file_priv) | ||
1423 | { | ||
1424 | return 0; | ||
1425 | } | ||
1426 | |||
1427 | static int i830_getparam(struct drm_device *dev, void *data, | ||
1428 | struct drm_file *file_priv) | ||
1429 | { | ||
1430 | drm_i830_private_t *dev_priv = dev->dev_private; | ||
1431 | drm_i830_getparam_t *param = data; | ||
1432 | int value; | ||
1433 | |||
1434 | if (!dev_priv) { | ||
1435 | DRM_ERROR("%s called with no initialization\n", __func__); | ||
1436 | return -EINVAL; | ||
1437 | } | ||
1438 | |||
1439 | switch (param->param) { | ||
1440 | case I830_PARAM_IRQ_ACTIVE: | ||
1441 | value = dev->irq_enabled; | ||
1442 | break; | ||
1443 | default: | ||
1444 | return -EINVAL; | ||
1445 | } | ||
1446 | |||
1447 | if (copy_to_user(param->value, &value, sizeof(int))) { | ||
1448 | DRM_ERROR("copy_to_user\n"); | ||
1449 | return -EFAULT; | ||
1450 | } | ||
1451 | |||
1452 | return 0; | ||
1453 | } | ||
1454 | |||
1455 | static int i830_setparam(struct drm_device *dev, void *data, | ||
1456 | struct drm_file *file_priv) | ||
1457 | { | ||
1458 | drm_i830_private_t *dev_priv = dev->dev_private; | ||
1459 | drm_i830_setparam_t *param = data; | ||
1460 | |||
1461 | if (!dev_priv) { | ||
1462 | DRM_ERROR("%s called with no initialization\n", __func__); | ||
1463 | return -EINVAL; | ||
1464 | } | ||
1465 | |||
1466 | switch (param->param) { | ||
1467 | case I830_SETPARAM_USE_MI_BATCHBUFFER_START: | ||
1468 | dev_priv->use_mi_batchbuffer_start = param->value; | ||
1469 | break; | ||
1470 | default: | ||
1471 | return -EINVAL; | ||
1472 | } | ||
1473 | |||
1474 | return 0; | ||
1475 | } | ||
1476 | |||
1477 | int i830_driver_load(struct drm_device *dev, unsigned long flags) | ||
1478 | { | ||
1479 | /* i830 has 4 more counters */ | ||
1480 | dev->counters += 4; | ||
1481 | dev->types[6] = _DRM_STAT_IRQ; | ||
1482 | dev->types[7] = _DRM_STAT_PRIMARY; | ||
1483 | dev->types[8] = _DRM_STAT_SECONDARY; | ||
1484 | dev->types[9] = _DRM_STAT_DMA; | ||
1485 | |||
1486 | return 0; | ||
1487 | } | ||
1488 | |||
1489 | void i830_driver_lastclose(struct drm_device *dev) | ||
1490 | { | ||
1491 | i830_dma_cleanup(dev); | ||
1492 | } | ||
1493 | |||
1494 | void i830_driver_preclose(struct drm_device *dev, struct drm_file *file_priv) | ||
1495 | { | ||
1496 | if (dev->dev_private) { | ||
1497 | drm_i830_private_t *dev_priv = dev->dev_private; | ||
1498 | if (dev_priv->page_flipping) | ||
1499 | i830_do_cleanup_pageflip(dev); | ||
1500 | } | ||
1501 | } | ||
1502 | |||
1503 | void i830_driver_reclaim_buffers_locked(struct drm_device *dev, struct drm_file *file_priv) | ||
1504 | { | ||
1505 | i830_reclaim_buffers(dev, file_priv); | ||
1506 | } | ||
1507 | |||
1508 | int i830_driver_dma_quiescent(struct drm_device *dev) | ||
1509 | { | ||
1510 | i830_dma_quiescent(dev); | ||
1511 | return 0; | ||
1512 | } | ||
1513 | |||
1514 | /* | ||
1515 | * call the drm_ioctl under the big kernel lock because | ||
1516 | * to lock against the i830_mmap_buffers function. | ||
1517 | */ | ||
1518 | long i830_ioctl(struct file *file, unsigned int cmd, unsigned long arg) | ||
1519 | { | ||
1520 | int ret; | ||
1521 | lock_kernel(); | ||
1522 | ret = drm_ioctl(file, cmd, arg); | ||
1523 | unlock_kernel(); | ||
1524 | return ret; | ||
1525 | } | ||
1526 | |||
1527 | struct drm_ioctl_desc i830_ioctls[] = { | ||
1528 | DRM_IOCTL_DEF_DRV(I830_INIT, i830_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED), | ||
1529 | DRM_IOCTL_DEF_DRV(I830_VERTEX, i830_dma_vertex, DRM_AUTH|DRM_UNLOCKED), | ||
1530 | DRM_IOCTL_DEF_DRV(I830_CLEAR, i830_clear_bufs, DRM_AUTH|DRM_UNLOCKED), | ||
1531 | DRM_IOCTL_DEF_DRV(I830_FLUSH, i830_flush_ioctl, DRM_AUTH|DRM_UNLOCKED), | ||
1532 | DRM_IOCTL_DEF_DRV(I830_GETAGE, i830_getage, DRM_AUTH|DRM_UNLOCKED), | ||
1533 | DRM_IOCTL_DEF_DRV(I830_GETBUF, i830_getbuf, DRM_AUTH|DRM_UNLOCKED), | ||
1534 | DRM_IOCTL_DEF_DRV(I830_SWAP, i830_swap_bufs, DRM_AUTH|DRM_UNLOCKED), | ||
1535 | DRM_IOCTL_DEF_DRV(I830_COPY, i830_copybuf, DRM_AUTH|DRM_UNLOCKED), | ||
1536 | DRM_IOCTL_DEF_DRV(I830_DOCOPY, i830_docopy, DRM_AUTH|DRM_UNLOCKED), | ||
1537 | DRM_IOCTL_DEF_DRV(I830_FLIP, i830_flip_bufs, DRM_AUTH|DRM_UNLOCKED), | ||
1538 | DRM_IOCTL_DEF_DRV(I830_IRQ_EMIT, i830_irq_emit, DRM_AUTH|DRM_UNLOCKED), | ||
1539 | DRM_IOCTL_DEF_DRV(I830_IRQ_WAIT, i830_irq_wait, DRM_AUTH|DRM_UNLOCKED), | ||
1540 | DRM_IOCTL_DEF_DRV(I830_GETPARAM, i830_getparam, DRM_AUTH|DRM_UNLOCKED), | ||
1541 | DRM_IOCTL_DEF_DRV(I830_SETPARAM, i830_setparam, DRM_AUTH|DRM_UNLOCKED), | ||
1542 | }; | ||
1543 | |||
1544 | int i830_max_ioctl = DRM_ARRAY_SIZE(i830_ioctls); | ||
1545 | |||
1546 | /** | ||
1547 | * Determine if the device really is AGP or not. | ||
1548 | * | ||
1549 | * All Intel graphics chipsets are treated as AGP, even if they are really | ||
1550 | * PCI-e. | ||
1551 | * | ||
1552 | * \param dev The device to be tested. | ||
1553 | * | ||
1554 | * \returns | ||
1555 | * A value of 1 is always retured to indictate every i8xx is AGP. | ||
1556 | */ | ||
1557 | int i830_driver_device_is_agp(struct drm_device *dev) | ||
1558 | { | ||
1559 | return 1; | ||
1560 | } | ||
diff --git a/drivers/gpu/drm/i830/i830_drv.c b/drivers/gpu/drm/i830/i830_drv.c deleted file mode 100644 index f655ab7977da..000000000000 --- a/drivers/gpu/drm/i830/i830_drv.c +++ /dev/null | |||
@@ -1,107 +0,0 @@ | |||
1 | /* i830_drv.c -- I810 driver -*- linux-c -*- | ||
2 | * Created: Mon Dec 13 01:56:22 1999 by jhartmann@precisioninsight.com | ||
3 | * | ||
4 | * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas. | ||
5 | * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. | ||
6 | * All Rights Reserved. | ||
7 | * | ||
8 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
9 | * copy of this software and associated documentation files (the "Software"), | ||
10 | * to deal in the Software without restriction, including without limitation | ||
11 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
12 | * and/or sell copies of the Software, and to permit persons to whom the | ||
13 | * Software is furnished to do so, subject to the following conditions: | ||
14 | * | ||
15 | * The above copyright notice and this permission notice (including the next | ||
16 | * paragraph) shall be included in all copies or substantial portions of the | ||
17 | * Software. | ||
18 | * | ||
19 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
20 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
21 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
22 | * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
23 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
24 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
25 | * OTHER DEALINGS IN THE SOFTWARE. | ||
26 | * | ||
27 | * Authors: | ||
28 | * Rickard E. (Rik) Faith <faith@valinux.com> | ||
29 | * Jeff Hartmann <jhartmann@valinux.com> | ||
30 | * Gareth Hughes <gareth@valinux.com> | ||
31 | * Abraham vd Merwe <abraham@2d3d.co.za> | ||
32 | * Keith Whitwell <keith@tungstengraphics.com> | ||
33 | */ | ||
34 | |||
35 | #include "drmP.h" | ||
36 | #include "drm.h" | ||
37 | #include "i830_drm.h" | ||
38 | #include "i830_drv.h" | ||
39 | |||
40 | #include "drm_pciids.h" | ||
41 | |||
42 | static struct pci_device_id pciidlist[] = { | ||
43 | i830_PCI_IDS | ||
44 | }; | ||
45 | |||
46 | static struct drm_driver driver = { | ||
47 | .driver_features = | ||
48 | DRIVER_USE_AGP | DRIVER_REQUIRE_AGP | DRIVER_USE_MTRR | | ||
49 | DRIVER_HAVE_DMA | DRIVER_DMA_QUEUE, | ||
50 | #if USE_IRQS | ||
51 | .driver_features |= DRIVER_HAVE_IRQ | DRIVER_SHARED_IRQ, | ||
52 | #endif | ||
53 | .dev_priv_size = sizeof(drm_i830_buf_priv_t), | ||
54 | .load = i830_driver_load, | ||
55 | .lastclose = i830_driver_lastclose, | ||
56 | .preclose = i830_driver_preclose, | ||
57 | .device_is_agp = i830_driver_device_is_agp, | ||
58 | .reclaim_buffers_locked = i830_driver_reclaim_buffers_locked, | ||
59 | .dma_quiescent = i830_driver_dma_quiescent, | ||
60 | #if USE_IRQS | ||
61 | .irq_preinstall = i830_driver_irq_preinstall, | ||
62 | .irq_postinstall = i830_driver_irq_postinstall, | ||
63 | .irq_uninstall = i830_driver_irq_uninstall, | ||
64 | .irq_handler = i830_driver_irq_handler, | ||
65 | #endif | ||
66 | .ioctls = i830_ioctls, | ||
67 | .fops = { | ||
68 | .owner = THIS_MODULE, | ||
69 | .open = drm_open, | ||
70 | .release = drm_release, | ||
71 | .unlocked_ioctl = i830_ioctl, | ||
72 | .mmap = drm_mmap, | ||
73 | .poll = drm_poll, | ||
74 | .fasync = drm_fasync, | ||
75 | .llseek = noop_llseek, | ||
76 | }, | ||
77 | |||
78 | .pci_driver = { | ||
79 | .name = DRIVER_NAME, | ||
80 | .id_table = pciidlist, | ||
81 | }, | ||
82 | |||
83 | .name = DRIVER_NAME, | ||
84 | .desc = DRIVER_DESC, | ||
85 | .date = DRIVER_DATE, | ||
86 | .major = DRIVER_MAJOR, | ||
87 | .minor = DRIVER_MINOR, | ||
88 | .patchlevel = DRIVER_PATCHLEVEL, | ||
89 | }; | ||
90 | |||
91 | static int __init i830_init(void) | ||
92 | { | ||
93 | driver.num_ioctls = i830_max_ioctl; | ||
94 | return drm_init(&driver); | ||
95 | } | ||
96 | |||
97 | static void __exit i830_exit(void) | ||
98 | { | ||
99 | drm_exit(&driver); | ||
100 | } | ||
101 | |||
102 | module_init(i830_init); | ||
103 | module_exit(i830_exit); | ||
104 | |||
105 | MODULE_AUTHOR(DRIVER_AUTHOR); | ||
106 | MODULE_DESCRIPTION(DRIVER_DESC); | ||
107 | MODULE_LICENSE("GPL and additional rights"); | ||
diff --git a/drivers/gpu/drm/i830/i830_drv.h b/drivers/gpu/drm/i830/i830_drv.h deleted file mode 100644 index 0df1c720560b..000000000000 --- a/drivers/gpu/drm/i830/i830_drv.h +++ /dev/null | |||
@@ -1,295 +0,0 @@ | |||
1 | /* i830_drv.h -- Private header for the I830 driver -*- linux-c -*- | ||
2 | * Created: Mon Dec 13 01:50:01 1999 by jhartmann@precisioninsight.com | ||
3 | * | ||
4 | * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas. | ||
5 | * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. | ||
6 | * All rights reserved. | ||
7 | * | ||
8 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
9 | * copy of this software and associated documentation files (the "Software"), | ||
10 | * to deal in the Software without restriction, including without limitation | ||
11 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
12 | * and/or sell copies of the Software, and to permit persons to whom the | ||
13 | * Software is furnished to do so, subject to the following conditions: | ||
14 | * | ||
15 | * The above copyright notice and this permission notice (including the next | ||
16 | * paragraph) shall be included in all copies or substantial portions of the | ||
17 | * Software. | ||
18 | * | ||
19 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
20 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
21 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
22 | * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
23 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
24 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER | ||
25 | * DEALINGS IN THE SOFTWARE. | ||
26 | * | ||
27 | * Authors: Rickard E. (Rik) Faith <faith@valinux.com> | ||
28 | * Jeff Hartmann <jhartmann@valinux.com> | ||
29 | * | ||
30 | */ | ||
31 | |||
32 | #ifndef _I830_DRV_H_ | ||
33 | #define _I830_DRV_H_ | ||
34 | |||
35 | /* General customization: | ||
36 | */ | ||
37 | |||
38 | #define DRIVER_AUTHOR "VA Linux Systems Inc." | ||
39 | |||
40 | #define DRIVER_NAME "i830" | ||
41 | #define DRIVER_DESC "Intel 830M" | ||
42 | #define DRIVER_DATE "20021108" | ||
43 | |||
44 | /* Interface history: | ||
45 | * | ||
46 | * 1.1: Original. | ||
47 | * 1.2: ? | ||
48 | * 1.3: New irq emit/wait ioctls. | ||
49 | * New pageflip ioctl. | ||
50 | * New getparam ioctl. | ||
51 | * State for texunits 3&4 in sarea. | ||
52 | * New (alternative) layout for texture state. | ||
53 | */ | ||
54 | #define DRIVER_MAJOR 1 | ||
55 | #define DRIVER_MINOR 3 | ||
56 | #define DRIVER_PATCHLEVEL 2 | ||
57 | |||
58 | /* Driver will work either way: IRQ's save cpu time when waiting for | ||
59 | * the card, but are subject to subtle interactions between bios, | ||
60 | * hardware and the driver. | ||
61 | */ | ||
62 | /* XXX: Add vblank support? */ | ||
63 | #define USE_IRQS 0 | ||
64 | |||
65 | typedef struct drm_i830_buf_priv { | ||
66 | u32 *in_use; | ||
67 | int my_use_idx; | ||
68 | int currently_mapped; | ||
69 | void __user *virtual; | ||
70 | void *kernel_virtual; | ||
71 | drm_local_map_t map; | ||
72 | } drm_i830_buf_priv_t; | ||
73 | |||
74 | typedef struct _drm_i830_ring_buffer { | ||
75 | int tail_mask; | ||
76 | unsigned long Start; | ||
77 | unsigned long End; | ||
78 | unsigned long Size; | ||
79 | u8 *virtual_start; | ||
80 | int head; | ||
81 | int tail; | ||
82 | int space; | ||
83 | drm_local_map_t map; | ||
84 | } drm_i830_ring_buffer_t; | ||
85 | |||
86 | typedef struct drm_i830_private { | ||
87 | struct drm_local_map *sarea_map; | ||
88 | struct drm_local_map *mmio_map; | ||
89 | |||
90 | drm_i830_sarea_t *sarea_priv; | ||
91 | drm_i830_ring_buffer_t ring; | ||
92 | |||
93 | void *hw_status_page; | ||
94 | unsigned long counter; | ||
95 | |||
96 | dma_addr_t dma_status_page; | ||
97 | |||
98 | struct drm_buf *mmap_buffer; | ||
99 | |||
100 | u32 front_di1, back_di1, zi1; | ||
101 | |||
102 | int back_offset; | ||
103 | int depth_offset; | ||
104 | int front_offset; | ||
105 | int w, h; | ||
106 | int pitch; | ||
107 | int back_pitch; | ||
108 | int depth_pitch; | ||
109 | unsigned int cpp; | ||
110 | |||
111 | int do_boxes; | ||
112 | int dma_used; | ||
113 | |||
114 | int current_page; | ||
115 | int page_flipping; | ||
116 | |||
117 | wait_queue_head_t irq_queue; | ||
118 | atomic_t irq_received; | ||
119 | atomic_t irq_emitted; | ||
120 | |||
121 | int use_mi_batchbuffer_start; | ||
122 | |||
123 | } drm_i830_private_t; | ||
124 | |||
125 | long i830_ioctl(struct file *file, unsigned int cmd, unsigned long arg); | ||
126 | extern struct drm_ioctl_desc i830_ioctls[]; | ||
127 | extern int i830_max_ioctl; | ||
128 | |||
129 | /* i830_irq.c */ | ||
130 | extern int i830_irq_emit(struct drm_device *dev, void *data, | ||
131 | struct drm_file *file_priv); | ||
132 | extern int i830_irq_wait(struct drm_device *dev, void *data, | ||
133 | struct drm_file *file_priv); | ||
134 | |||
135 | extern irqreturn_t i830_driver_irq_handler(DRM_IRQ_ARGS); | ||
136 | extern void i830_driver_irq_preinstall(struct drm_device *dev); | ||
137 | extern void i830_driver_irq_postinstall(struct drm_device *dev); | ||
138 | extern void i830_driver_irq_uninstall(struct drm_device *dev); | ||
139 | extern int i830_driver_load(struct drm_device *, unsigned long flags); | ||
140 | extern void i830_driver_preclose(struct drm_device *dev, | ||
141 | struct drm_file *file_priv); | ||
142 | extern void i830_driver_lastclose(struct drm_device *dev); | ||
143 | extern void i830_driver_reclaim_buffers_locked(struct drm_device *dev, | ||
144 | struct drm_file *file_priv); | ||
145 | extern int i830_driver_dma_quiescent(struct drm_device *dev); | ||
146 | extern int i830_driver_device_is_agp(struct drm_device *dev); | ||
147 | |||
148 | #define I830_READ(reg) DRM_READ32(dev_priv->mmio_map, reg) | ||
149 | #define I830_WRITE(reg, val) DRM_WRITE32(dev_priv->mmio_map, reg, val) | ||
150 | #define I830_READ16(reg) DRM_READ16(dev_priv->mmio_map, reg) | ||
151 | #define I830_WRITE16(reg, val) DRM_WRITE16(dev_priv->mmio_map, reg, val) | ||
152 | |||
153 | #define I830_VERBOSE 0 | ||
154 | |||
155 | #define RING_LOCALS unsigned int outring, ringmask, outcount; \ | ||
156 | volatile char *virt; | ||
157 | |||
158 | #define BEGIN_LP_RING(n) do { \ | ||
159 | if (I830_VERBOSE) \ | ||
160 | printk("BEGIN_LP_RING(%d)\n", (n)); \ | ||
161 | if (dev_priv->ring.space < n*4) \ | ||
162 | i830_wait_ring(dev, n*4, __func__); \ | ||
163 | outcount = 0; \ | ||
164 | outring = dev_priv->ring.tail; \ | ||
165 | ringmask = dev_priv->ring.tail_mask; \ | ||
166 | virt = dev_priv->ring.virtual_start; \ | ||
167 | } while (0) | ||
168 | |||
169 | #define OUT_RING(n) do { \ | ||
170 | if (I830_VERBOSE) \ | ||
171 | printk(" OUT_RING %x\n", (int)(n)); \ | ||
172 | *(volatile unsigned int *)(virt + outring) = n; \ | ||
173 | outcount++; \ | ||
174 | outring += 4; \ | ||
175 | outring &= ringmask; \ | ||
176 | } while (0) | ||
177 | |||
178 | #define ADVANCE_LP_RING() do { \ | ||
179 | if (I830_VERBOSE) \ | ||
180 | printk("ADVANCE_LP_RING %x\n", outring); \ | ||
181 | dev_priv->ring.tail = outring; \ | ||
182 | dev_priv->ring.space -= outcount * 4; \ | ||
183 | I830_WRITE(LP_RING + RING_TAIL, outring); \ | ||
184 | } while (0) | ||
185 | |||
186 | extern int i830_wait_ring(struct drm_device *dev, int n, const char *caller); | ||
187 | |||
188 | #define GFX_OP_USER_INTERRUPT ((0<<29)|(2<<23)) | ||
189 | #define GFX_OP_BREAKPOINT_INTERRUPT ((0<<29)|(1<<23)) | ||
190 | #define CMD_REPORT_HEAD (7<<23) | ||
191 | #define CMD_STORE_DWORD_IDX ((0x21<<23) | 0x1) | ||
192 | #define CMD_OP_BATCH_BUFFER ((0x0<<29)|(0x30<<23)|0x1) | ||
193 | |||
194 | #define STATE3D_LOAD_STATE_IMMEDIATE_2 ((0x3<<29)|(0x1d<<24)|(0x03<<16)) | ||
195 | #define LOAD_TEXTURE_MAP0 (1<<11) | ||
196 | |||
197 | #define INST_PARSER_CLIENT 0x00000000 | ||
198 | #define INST_OP_FLUSH 0x02000000 | ||
199 | #define INST_FLUSH_MAP_CACHE 0x00000001 | ||
200 | |||
201 | #define BB1_START_ADDR_MASK (~0x7) | ||
202 | #define BB1_PROTECTED (1<<0) | ||
203 | #define BB1_UNPROTECTED (0<<0) | ||
204 | #define BB2_END_ADDR_MASK (~0x7) | ||
205 | |||
206 | #define I830REG_HWSTAM 0x02098 | ||
207 | #define I830REG_INT_IDENTITY_R 0x020a4 | ||
208 | #define I830REG_INT_MASK_R 0x020a8 | ||
209 | #define I830REG_INT_ENABLE_R 0x020a0 | ||
210 | |||
211 | #define I830_IRQ_RESERVED ((1<<13)|(3<<2)) | ||
212 | |||
213 | #define LP_RING 0x2030 | ||
214 | #define HP_RING 0x2040 | ||
215 | #define RING_TAIL 0x00 | ||
216 | #define TAIL_ADDR 0x001FFFF8 | ||
217 | #define RING_HEAD 0x04 | ||
218 | #define HEAD_WRAP_COUNT 0xFFE00000 | ||
219 | #define HEAD_WRAP_ONE 0x00200000 | ||
220 | #define HEAD_ADDR 0x001FFFFC | ||
221 | #define RING_START 0x08 | ||
222 | #define START_ADDR 0x0xFFFFF000 | ||
223 | #define RING_LEN 0x0C | ||
224 | #define RING_NR_PAGES 0x001FF000 | ||
225 | #define RING_REPORT_MASK 0x00000006 | ||
226 | #define RING_REPORT_64K 0x00000002 | ||
227 | #define RING_REPORT_128K 0x00000004 | ||
228 | #define RING_NO_REPORT 0x00000000 | ||
229 | #define RING_VALID_MASK 0x00000001 | ||
230 | #define RING_VALID 0x00000001 | ||
231 | #define RING_INVALID 0x00000000 | ||
232 | |||
233 | #define GFX_OP_SCISSOR ((0x3<<29)|(0x1c<<24)|(0x10<<19)) | ||
234 | #define SC_UPDATE_SCISSOR (0x1<<1) | ||
235 | #define SC_ENABLE_MASK (0x1<<0) | ||
236 | #define SC_ENABLE (0x1<<0) | ||
237 | |||
238 | #define GFX_OP_SCISSOR_INFO ((0x3<<29)|(0x1d<<24)|(0x81<<16)|(0x1)) | ||
239 | #define SCI_YMIN_MASK (0xffff<<16) | ||
240 | #define SCI_XMIN_MASK (0xffff<<0) | ||
241 | #define SCI_YMAX_MASK (0xffff<<16) | ||
242 | #define SCI_XMAX_MASK (0xffff<<0) | ||
243 | |||
244 | #define GFX_OP_SCISSOR_ENABLE ((0x3<<29)|(0x1c<<24)|(0x10<<19)) | ||
245 | #define GFX_OP_SCISSOR_RECT ((0x3<<29)|(0x1d<<24)|(0x81<<16)|1) | ||
246 | #define GFX_OP_COLOR_FACTOR ((0x3<<29)|(0x1d<<24)|(0x1<<16)|0x0) | ||
247 | #define GFX_OP_STIPPLE ((0x3<<29)|(0x1d<<24)|(0x83<<16)) | ||
248 | #define GFX_OP_MAP_INFO ((0x3<<29)|(0x1d<<24)|0x4) | ||
249 | #define GFX_OP_DESTBUFFER_VARS ((0x3<<29)|(0x1d<<24)|(0x85<<16)|0x0) | ||
250 | #define GFX_OP_DRAWRECT_INFO ((0x3<<29)|(0x1d<<24)|(0x80<<16)|(0x3)) | ||
251 | #define GFX_OP_PRIMITIVE ((0x3<<29)|(0x1f<<24)) | ||
252 | |||
253 | #define CMD_OP_DESTBUFFER_INFO ((0x3<<29)|(0x1d<<24)|(0x8e<<16)|1) | ||
254 | |||
255 | #define CMD_OP_DISPLAYBUFFER_INFO ((0x0<<29)|(0x14<<23)|2) | ||
256 | #define ASYNC_FLIP (1<<22) | ||
257 | |||
258 | #define CMD_3D (0x3<<29) | ||
259 | #define STATE3D_CONST_BLEND_COLOR_CMD (CMD_3D|(0x1d<<24)|(0x88<<16)) | ||
260 | #define STATE3D_MAP_COORD_SETBIND_CMD (CMD_3D|(0x1d<<24)|(0x02<<16)) | ||
261 | |||
262 | #define BR00_BITBLT_CLIENT 0x40000000 | ||
263 | #define BR00_OP_COLOR_BLT 0x10000000 | ||
264 | #define BR00_OP_SRC_COPY_BLT 0x10C00000 | ||
265 | #define BR13_SOLID_PATTERN 0x80000000 | ||
266 | |||
267 | #define BUF_3D_ID_COLOR_BACK (0x3<<24) | ||
268 | #define BUF_3D_ID_DEPTH (0x7<<24) | ||
269 | #define BUF_3D_USE_FENCE (1<<23) | ||
270 | #define BUF_3D_PITCH(x) (((x)/4)<<2) | ||
271 | |||
272 | #define CMD_OP_MAP_PALETTE_LOAD ((3<<29)|(0x1d<<24)|(0x82<<16)|255) | ||
273 | #define MAP_PALETTE_NUM(x) ((x<<8) & (1<<8)) | ||
274 | #define MAP_PALETTE_BOTH (1<<11) | ||
275 | |||
276 | #define XY_COLOR_BLT_CMD ((2<<29)|(0x50<<22)|0x4) | ||
277 | #define XY_COLOR_BLT_WRITE_ALPHA (1<<21) | ||
278 | #define XY_COLOR_BLT_WRITE_RGB (1<<20) | ||
279 | |||
280 | #define XY_SRC_COPY_BLT_CMD ((2<<29)|(0x53<<22)|6) | ||
281 | #define XY_SRC_COPY_BLT_WRITE_ALPHA (1<<21) | ||
282 | #define XY_SRC_COPY_BLT_WRITE_RGB (1<<20) | ||
283 | |||
284 | #define MI_BATCH_BUFFER ((0x30<<23)|1) | ||
285 | #define MI_BATCH_BUFFER_START (0x31<<23) | ||
286 | #define MI_BATCH_BUFFER_END (0xA<<23) | ||
287 | #define MI_BATCH_NON_SECURE (1) | ||
288 | |||
289 | #define MI_WAIT_FOR_EVENT ((0x3<<23)) | ||
290 | #define MI_WAIT_FOR_PLANE_A_FLIP (1<<2) | ||
291 | #define MI_WAIT_FOR_PLANE_A_SCANLINES (1<<1) | ||
292 | |||
293 | #define MI_LOAD_SCAN_LINES_INCL ((0x12<<23)) | ||
294 | |||
295 | #endif | ||
diff --git a/drivers/gpu/drm/i830/i830_irq.c b/drivers/gpu/drm/i830/i830_irq.c deleted file mode 100644 index d1a6b95d631d..000000000000 --- a/drivers/gpu/drm/i830/i830_irq.c +++ /dev/null | |||
@@ -1,186 +0,0 @@ | |||
1 | /* i830_dma.c -- DMA support for the I830 -*- linux-c -*- | ||
2 | * | ||
3 | * Copyright 2002 Tungsten Graphics, Inc. | ||
4 | * All Rights Reserved. | ||
5 | * | ||
6 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
7 | * copy of this software and associated documentation files (the "Software"), | ||
8 | * to deal in the Software without restriction, including without limitation | ||
9 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
10 | * and/or sell copies of the Software, and to permit persons to whom the | ||
11 | * Software is furnished to do so, subject to the following conditions: | ||
12 | * | ||
13 | * The above copyright notice and this permission notice (including the next | ||
14 | * paragraph) shall be included in all copies or substantial portions of the | ||
15 | * Software. | ||
16 | * | ||
17 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
18 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
19 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
20 | * TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
21 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
22 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER | ||
23 | * DEALINGS IN THE SOFTWARE. | ||
24 | * | ||
25 | * Authors: Keith Whitwell <keith@tungstengraphics.com> | ||
26 | * | ||
27 | */ | ||
28 | |||
29 | #include "drmP.h" | ||
30 | #include "drm.h" | ||
31 | #include "i830_drm.h" | ||
32 | #include "i830_drv.h" | ||
33 | #include <linux/interrupt.h> /* For task queue support */ | ||
34 | #include <linux/delay.h> | ||
35 | |||
36 | irqreturn_t i830_driver_irq_handler(DRM_IRQ_ARGS) | ||
37 | { | ||
38 | struct drm_device *dev = (struct drm_device *) arg; | ||
39 | drm_i830_private_t *dev_priv = (drm_i830_private_t *) dev->dev_private; | ||
40 | u16 temp; | ||
41 | |||
42 | temp = I830_READ16(I830REG_INT_IDENTITY_R); | ||
43 | DRM_DEBUG("%x\n", temp); | ||
44 | |||
45 | if (!(temp & 2)) | ||
46 | return IRQ_NONE; | ||
47 | |||
48 | I830_WRITE16(I830REG_INT_IDENTITY_R, temp); | ||
49 | |||
50 | atomic_inc(&dev_priv->irq_received); | ||
51 | wake_up_interruptible(&dev_priv->irq_queue); | ||
52 | |||
53 | return IRQ_HANDLED; | ||
54 | } | ||
55 | |||
56 | static int i830_emit_irq(struct drm_device *dev) | ||
57 | { | ||
58 | drm_i830_private_t *dev_priv = dev->dev_private; | ||
59 | RING_LOCALS; | ||
60 | |||
61 | DRM_DEBUG("%s\n", __func__); | ||
62 | |||
63 | atomic_inc(&dev_priv->irq_emitted); | ||
64 | |||
65 | BEGIN_LP_RING(2); | ||
66 | OUT_RING(0); | ||
67 | OUT_RING(GFX_OP_USER_INTERRUPT); | ||
68 | ADVANCE_LP_RING(); | ||
69 | |||
70 | return atomic_read(&dev_priv->irq_emitted); | ||
71 | } | ||
72 | |||
73 | static int i830_wait_irq(struct drm_device *dev, int irq_nr) | ||
74 | { | ||
75 | drm_i830_private_t *dev_priv = (drm_i830_private_t *) dev->dev_private; | ||
76 | DECLARE_WAITQUEUE(entry, current); | ||
77 | unsigned long end = jiffies + HZ * 3; | ||
78 | int ret = 0; | ||
79 | |||
80 | DRM_DEBUG("%s\n", __func__); | ||
81 | |||
82 | if (atomic_read(&dev_priv->irq_received) >= irq_nr) | ||
83 | return 0; | ||
84 | |||
85 | dev_priv->sarea_priv->perf_boxes |= I830_BOX_WAIT; | ||
86 | |||
87 | add_wait_queue(&dev_priv->irq_queue, &entry); | ||
88 | |||
89 | for (;;) { | ||
90 | __set_current_state(TASK_INTERRUPTIBLE); | ||
91 | if (atomic_read(&dev_priv->irq_received) >= irq_nr) | ||
92 | break; | ||
93 | if ((signed)(end - jiffies) <= 0) { | ||
94 | DRM_ERROR("timeout iir %x imr %x ier %x hwstam %x\n", | ||
95 | I830_READ16(I830REG_INT_IDENTITY_R), | ||
96 | I830_READ16(I830REG_INT_MASK_R), | ||
97 | I830_READ16(I830REG_INT_ENABLE_R), | ||
98 | I830_READ16(I830REG_HWSTAM)); | ||
99 | |||
100 | ret = -EBUSY; /* Lockup? Missed irq? */ | ||
101 | break; | ||
102 | } | ||
103 | schedule_timeout(HZ * 3); | ||
104 | if (signal_pending(current)) { | ||
105 | ret = -EINTR; | ||
106 | break; | ||
107 | } | ||
108 | } | ||
109 | |||
110 | __set_current_state(TASK_RUNNING); | ||
111 | remove_wait_queue(&dev_priv->irq_queue, &entry); | ||
112 | return ret; | ||
113 | } | ||
114 | |||
115 | /* Needs the lock as it touches the ring. | ||
116 | */ | ||
117 | int i830_irq_emit(struct drm_device *dev, void *data, | ||
118 | struct drm_file *file_priv) | ||
119 | { | ||
120 | drm_i830_private_t *dev_priv = dev->dev_private; | ||
121 | drm_i830_irq_emit_t *emit = data; | ||
122 | int result; | ||
123 | |||
124 | LOCK_TEST_WITH_RETURN(dev, file_priv); | ||
125 | |||
126 | if (!dev_priv) { | ||
127 | DRM_ERROR("%s called with no initialization\n", __func__); | ||
128 | return -EINVAL; | ||
129 | } | ||
130 | |||
131 | result = i830_emit_irq(dev); | ||
132 | |||
133 | if (copy_to_user(emit->irq_seq, &result, sizeof(int))) { | ||
134 | DRM_ERROR("copy_to_user\n"); | ||
135 | return -EFAULT; | ||
136 | } | ||
137 | |||
138 | return 0; | ||
139 | } | ||
140 | |||
141 | /* Doesn't need the hardware lock. | ||
142 | */ | ||
143 | int i830_irq_wait(struct drm_device *dev, void *data, | ||
144 | struct drm_file *file_priv) | ||
145 | { | ||
146 | drm_i830_private_t *dev_priv = dev->dev_private; | ||
147 | drm_i830_irq_wait_t *irqwait = data; | ||
148 | |||
149 | if (!dev_priv) { | ||
150 | DRM_ERROR("%s called with no initialization\n", __func__); | ||
151 | return -EINVAL; | ||
152 | } | ||
153 | |||
154 | return i830_wait_irq(dev, irqwait->irq_seq); | ||
155 | } | ||
156 | |||
157 | /* drm_dma.h hooks | ||
158 | */ | ||
159 | void i830_driver_irq_preinstall(struct drm_device *dev) | ||
160 | { | ||
161 | drm_i830_private_t *dev_priv = (drm_i830_private_t *) dev->dev_private; | ||
162 | |||
163 | I830_WRITE16(I830REG_HWSTAM, 0xffff); | ||
164 | I830_WRITE16(I830REG_INT_MASK_R, 0x0); | ||
165 | I830_WRITE16(I830REG_INT_ENABLE_R, 0x0); | ||
166 | atomic_set(&dev_priv->irq_received, 0); | ||
167 | atomic_set(&dev_priv->irq_emitted, 0); | ||
168 | init_waitqueue_head(&dev_priv->irq_queue); | ||
169 | } | ||
170 | |||
171 | void i830_driver_irq_postinstall(struct drm_device *dev) | ||
172 | { | ||
173 | drm_i830_private_t *dev_priv = (drm_i830_private_t *) dev->dev_private; | ||
174 | |||
175 | I830_WRITE16(I830REG_INT_ENABLE_R, 0x2); | ||
176 | } | ||
177 | |||
178 | void i830_driver_irq_uninstall(struct drm_device *dev) | ||
179 | { | ||
180 | drm_i830_private_t *dev_priv = (drm_i830_private_t *) dev->dev_private; | ||
181 | if (!dev_priv) | ||
182 | return; | ||
183 | |||
184 | I830_WRITE16(I830REG_INT_MASK_R, 0xffff); | ||
185 | I830_WRITE16(I830REG_INT_ENABLE_R, 0x0); | ||
186 | } | ||
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile index fdc833d5cc7b..0ae6a7c5020f 100644 --- a/drivers/gpu/drm/i915/Makefile +++ b/drivers/gpu/drm/i915/Makefile | |||
@@ -9,6 +9,8 @@ i915-y := i915_drv.o i915_dma.o i915_irq.o i915_mem.o \ | |||
9 | i915_gem.o \ | 9 | i915_gem.o \ |
10 | i915_gem_debug.o \ | 10 | i915_gem_debug.o \ |
11 | i915_gem_evict.o \ | 11 | i915_gem_evict.o \ |
12 | i915_gem_execbuffer.o \ | ||
13 | i915_gem_gtt.o \ | ||
12 | i915_gem_tiling.o \ | 14 | i915_gem_tiling.o \ |
13 | i915_trace_points.o \ | 15 | i915_trace_points.o \ |
14 | intel_display.o \ | 16 | intel_display.o \ |
diff --git a/drivers/gpu/drm/i915/dvo_ch7017.c b/drivers/gpu/drm/i915/dvo_ch7017.c index af70337567ce..d3e8c540f778 100644 --- a/drivers/gpu/drm/i915/dvo_ch7017.c +++ b/drivers/gpu/drm/i915/dvo_ch7017.c | |||
@@ -242,7 +242,7 @@ fail: | |||
242 | 242 | ||
243 | static enum drm_connector_status ch7017_detect(struct intel_dvo_device *dvo) | 243 | static enum drm_connector_status ch7017_detect(struct intel_dvo_device *dvo) |
244 | { | 244 | { |
245 | return connector_status_unknown; | 245 | return connector_status_connected; |
246 | } | 246 | } |
247 | 247 | ||
248 | static enum drm_mode_status ch7017_mode_valid(struct intel_dvo_device *dvo, | 248 | static enum drm_mode_status ch7017_mode_valid(struct intel_dvo_device *dvo, |
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index 1f4f3ceb63c7..3601466c5502 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c | |||
@@ -32,6 +32,7 @@ | |||
32 | #include "drmP.h" | 32 | #include "drmP.h" |
33 | #include "drm.h" | 33 | #include "drm.h" |
34 | #include "intel_drv.h" | 34 | #include "intel_drv.h" |
35 | #include "intel_ringbuffer.h" | ||
35 | #include "i915_drm.h" | 36 | #include "i915_drm.h" |
36 | #include "i915_drv.h" | 37 | #include "i915_drv.h" |
37 | 38 | ||
@@ -72,7 +73,6 @@ static int i915_capabilities(struct seq_file *m, void *data) | |||
72 | B(is_broadwater); | 73 | B(is_broadwater); |
73 | B(is_crestline); | 74 | B(is_crestline); |
74 | B(has_fbc); | 75 | B(has_fbc); |
75 | B(has_rc6); | ||
76 | B(has_pipe_cxsr); | 76 | B(has_pipe_cxsr); |
77 | B(has_hotplug); | 77 | B(has_hotplug); |
78 | B(cursor_needs_physical); | 78 | B(cursor_needs_physical); |
@@ -86,19 +86,19 @@ static int i915_capabilities(struct seq_file *m, void *data) | |||
86 | return 0; | 86 | return 0; |
87 | } | 87 | } |
88 | 88 | ||
89 | static const char *get_pin_flag(struct drm_i915_gem_object *obj_priv) | 89 | static const char *get_pin_flag(struct drm_i915_gem_object *obj) |
90 | { | 90 | { |
91 | if (obj_priv->user_pin_count > 0) | 91 | if (obj->user_pin_count > 0) |
92 | return "P"; | 92 | return "P"; |
93 | else if (obj_priv->pin_count > 0) | 93 | else if (obj->pin_count > 0) |
94 | return "p"; | 94 | return "p"; |
95 | else | 95 | else |
96 | return " "; | 96 | return " "; |
97 | } | 97 | } |
98 | 98 | ||
99 | static const char *get_tiling_flag(struct drm_i915_gem_object *obj_priv) | 99 | static const char *get_tiling_flag(struct drm_i915_gem_object *obj) |
100 | { | 100 | { |
101 | switch (obj_priv->tiling_mode) { | 101 | switch (obj->tiling_mode) { |
102 | default: | 102 | default: |
103 | case I915_TILING_NONE: return " "; | 103 | case I915_TILING_NONE: return " "; |
104 | case I915_TILING_X: return "X"; | 104 | case I915_TILING_X: return "X"; |
@@ -106,10 +106,19 @@ static const char *get_tiling_flag(struct drm_i915_gem_object *obj_priv) | |||
106 | } | 106 | } |
107 | } | 107 | } |
108 | 108 | ||
109 | static const char *agp_type_str(int type) | ||
110 | { | ||
111 | switch (type) { | ||
112 | case 0: return " uncached"; | ||
113 | case 1: return " snooped"; | ||
114 | default: return ""; | ||
115 | } | ||
116 | } | ||
117 | |||
109 | static void | 118 | static void |
110 | describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj) | 119 | describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj) |
111 | { | 120 | { |
112 | seq_printf(m, "%p: %s%s %8zd %08x %08x %d%s%s", | 121 | seq_printf(m, "%p: %s%s %8zd %04x %04x %d %d%s%s%s", |
113 | &obj->base, | 122 | &obj->base, |
114 | get_pin_flag(obj), | 123 | get_pin_flag(obj), |
115 | get_tiling_flag(obj), | 124 | get_tiling_flag(obj), |
@@ -117,6 +126,8 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj) | |||
117 | obj->base.read_domains, | 126 | obj->base.read_domains, |
118 | obj->base.write_domain, | 127 | obj->base.write_domain, |
119 | obj->last_rendering_seqno, | 128 | obj->last_rendering_seqno, |
129 | obj->last_fenced_seqno, | ||
130 | agp_type_str(obj->agp_type == AGP_USER_CACHED_MEMORY), | ||
120 | obj->dirty ? " dirty" : "", | 131 | obj->dirty ? " dirty" : "", |
121 | obj->madv == I915_MADV_DONTNEED ? " purgeable" : ""); | 132 | obj->madv == I915_MADV_DONTNEED ? " purgeable" : ""); |
122 | if (obj->base.name) | 133 | if (obj->base.name) |
@@ -124,7 +135,17 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj) | |||
124 | if (obj->fence_reg != I915_FENCE_REG_NONE) | 135 | if (obj->fence_reg != I915_FENCE_REG_NONE) |
125 | seq_printf(m, " (fence: %d)", obj->fence_reg); | 136 | seq_printf(m, " (fence: %d)", obj->fence_reg); |
126 | if (obj->gtt_space != NULL) | 137 | if (obj->gtt_space != NULL) |
127 | seq_printf(m, " (gtt_offset: %08x)", obj->gtt_offset); | 138 | seq_printf(m, " (gtt offset: %08x, size: %08x)", |
139 | obj->gtt_offset, (unsigned int)obj->gtt_space->size); | ||
140 | if (obj->pin_mappable || obj->fault_mappable) { | ||
141 | char s[3], *t = s; | ||
142 | if (obj->pin_mappable) | ||
143 | *t++ = 'p'; | ||
144 | if (obj->fault_mappable) | ||
145 | *t++ = 'f'; | ||
146 | *t = '\0'; | ||
147 | seq_printf(m, " (%s mappable)", s); | ||
148 | } | ||
128 | if (obj->ring != NULL) | 149 | if (obj->ring != NULL) |
129 | seq_printf(m, " (%s)", obj->ring->name); | 150 | seq_printf(m, " (%s)", obj->ring->name); |
130 | } | 151 | } |
@@ -136,7 +157,7 @@ static int i915_gem_object_list_info(struct seq_file *m, void *data) | |||
136 | struct list_head *head; | 157 | struct list_head *head; |
137 | struct drm_device *dev = node->minor->dev; | 158 | struct drm_device *dev = node->minor->dev; |
138 | drm_i915_private_t *dev_priv = dev->dev_private; | 159 | drm_i915_private_t *dev_priv = dev->dev_private; |
139 | struct drm_i915_gem_object *obj_priv; | 160 | struct drm_i915_gem_object *obj; |
140 | size_t total_obj_size, total_gtt_size; | 161 | size_t total_obj_size, total_gtt_size; |
141 | int count, ret; | 162 | int count, ret; |
142 | 163 | ||
@@ -171,12 +192,12 @@ static int i915_gem_object_list_info(struct seq_file *m, void *data) | |||
171 | } | 192 | } |
172 | 193 | ||
173 | total_obj_size = total_gtt_size = count = 0; | 194 | total_obj_size = total_gtt_size = count = 0; |
174 | list_for_each_entry(obj_priv, head, mm_list) { | 195 | list_for_each_entry(obj, head, mm_list) { |
175 | seq_printf(m, " "); | 196 | seq_printf(m, " "); |
176 | describe_obj(m, obj_priv); | 197 | describe_obj(m, obj); |
177 | seq_printf(m, "\n"); | 198 | seq_printf(m, "\n"); |
178 | total_obj_size += obj_priv->base.size; | 199 | total_obj_size += obj->base.size; |
179 | total_gtt_size += obj_priv->gtt_space->size; | 200 | total_gtt_size += obj->gtt_space->size; |
180 | count++; | 201 | count++; |
181 | } | 202 | } |
182 | mutex_unlock(&dev->struct_mutex); | 203 | mutex_unlock(&dev->struct_mutex); |
@@ -186,30 +207,116 @@ static int i915_gem_object_list_info(struct seq_file *m, void *data) | |||
186 | return 0; | 207 | return 0; |
187 | } | 208 | } |
188 | 209 | ||
210 | #define count_objects(list, member) do { \ | ||
211 | list_for_each_entry(obj, list, member) { \ | ||
212 | size += obj->gtt_space->size; \ | ||
213 | ++count; \ | ||
214 | if (obj->map_and_fenceable) { \ | ||
215 | mappable_size += obj->gtt_space->size; \ | ||
216 | ++mappable_count; \ | ||
217 | } \ | ||
218 | } \ | ||
219 | } while(0) | ||
220 | |||
189 | static int i915_gem_object_info(struct seq_file *m, void* data) | 221 | static int i915_gem_object_info(struct seq_file *m, void* data) |
190 | { | 222 | { |
191 | struct drm_info_node *node = (struct drm_info_node *) m->private; | 223 | struct drm_info_node *node = (struct drm_info_node *) m->private; |
192 | struct drm_device *dev = node->minor->dev; | 224 | struct drm_device *dev = node->minor->dev; |
193 | struct drm_i915_private *dev_priv = dev->dev_private; | 225 | struct drm_i915_private *dev_priv = dev->dev_private; |
226 | u32 count, mappable_count; | ||
227 | size_t size, mappable_size; | ||
228 | struct drm_i915_gem_object *obj; | ||
194 | int ret; | 229 | int ret; |
195 | 230 | ||
196 | ret = mutex_lock_interruptible(&dev->struct_mutex); | 231 | ret = mutex_lock_interruptible(&dev->struct_mutex); |
197 | if (ret) | 232 | if (ret) |
198 | return ret; | 233 | return ret; |
199 | 234 | ||
200 | seq_printf(m, "%u objects\n", dev_priv->mm.object_count); | 235 | seq_printf(m, "%u objects, %zu bytes\n", |
201 | seq_printf(m, "%zu object bytes\n", dev_priv->mm.object_memory); | 236 | dev_priv->mm.object_count, |
202 | seq_printf(m, "%u pinned\n", dev_priv->mm.pin_count); | 237 | dev_priv->mm.object_memory); |
203 | seq_printf(m, "%zu pin bytes\n", dev_priv->mm.pin_memory); | 238 | |
204 | seq_printf(m, "%u objects in gtt\n", dev_priv->mm.gtt_count); | 239 | size = count = mappable_size = mappable_count = 0; |
205 | seq_printf(m, "%zu gtt bytes\n", dev_priv->mm.gtt_memory); | 240 | count_objects(&dev_priv->mm.gtt_list, gtt_list); |
206 | seq_printf(m, "%zu gtt total\n", dev_priv->mm.gtt_total); | 241 | seq_printf(m, "%u [%u] objects, %zu [%zu] bytes in gtt\n", |
242 | count, mappable_count, size, mappable_size); | ||
243 | |||
244 | size = count = mappable_size = mappable_count = 0; | ||
245 | count_objects(&dev_priv->mm.active_list, mm_list); | ||
246 | count_objects(&dev_priv->mm.flushing_list, mm_list); | ||
247 | seq_printf(m, " %u [%u] active objects, %zu [%zu] bytes\n", | ||
248 | count, mappable_count, size, mappable_size); | ||
249 | |||
250 | size = count = mappable_size = mappable_count = 0; | ||
251 | count_objects(&dev_priv->mm.pinned_list, mm_list); | ||
252 | seq_printf(m, " %u [%u] pinned objects, %zu [%zu] bytes\n", | ||
253 | count, mappable_count, size, mappable_size); | ||
254 | |||
255 | size = count = mappable_size = mappable_count = 0; | ||
256 | count_objects(&dev_priv->mm.inactive_list, mm_list); | ||
257 | seq_printf(m, " %u [%u] inactive objects, %zu [%zu] bytes\n", | ||
258 | count, mappable_count, size, mappable_size); | ||
259 | |||
260 | size = count = mappable_size = mappable_count = 0; | ||
261 | count_objects(&dev_priv->mm.deferred_free_list, mm_list); | ||
262 | seq_printf(m, " %u [%u] freed objects, %zu [%zu] bytes\n", | ||
263 | count, mappable_count, size, mappable_size); | ||
264 | |||
265 | size = count = mappable_size = mappable_count = 0; | ||
266 | list_for_each_entry(obj, &dev_priv->mm.gtt_list, gtt_list) { | ||
267 | if (obj->fault_mappable) { | ||
268 | size += obj->gtt_space->size; | ||
269 | ++count; | ||
270 | } | ||
271 | if (obj->pin_mappable) { | ||
272 | mappable_size += obj->gtt_space->size; | ||
273 | ++mappable_count; | ||
274 | } | ||
275 | } | ||
276 | seq_printf(m, "%u pinned mappable objects, %zu bytes\n", | ||
277 | mappable_count, mappable_size); | ||
278 | seq_printf(m, "%u fault mappable objects, %zu bytes\n", | ||
279 | count, size); | ||
280 | |||
281 | seq_printf(m, "%zu [%zu] gtt total\n", | ||
282 | dev_priv->mm.gtt_total, dev_priv->mm.mappable_gtt_total); | ||
207 | 283 | ||
208 | mutex_unlock(&dev->struct_mutex); | 284 | mutex_unlock(&dev->struct_mutex); |
209 | 285 | ||
210 | return 0; | 286 | return 0; |
211 | } | 287 | } |
212 | 288 | ||
289 | static int i915_gem_gtt_info(struct seq_file *m, void* data) | ||
290 | { | ||
291 | struct drm_info_node *node = (struct drm_info_node *) m->private; | ||
292 | struct drm_device *dev = node->minor->dev; | ||
293 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
294 | struct drm_i915_gem_object *obj; | ||
295 | size_t total_obj_size, total_gtt_size; | ||
296 | int count, ret; | ||
297 | |||
298 | ret = mutex_lock_interruptible(&dev->struct_mutex); | ||
299 | if (ret) | ||
300 | return ret; | ||
301 | |||
302 | total_obj_size = total_gtt_size = count = 0; | ||
303 | list_for_each_entry(obj, &dev_priv->mm.gtt_list, gtt_list) { | ||
304 | seq_printf(m, " "); | ||
305 | describe_obj(m, obj); | ||
306 | seq_printf(m, "\n"); | ||
307 | total_obj_size += obj->base.size; | ||
308 | total_gtt_size += obj->gtt_space->size; | ||
309 | count++; | ||
310 | } | ||
311 | |||
312 | mutex_unlock(&dev->struct_mutex); | ||
313 | |||
314 | seq_printf(m, "Total %d objects, %zu bytes, %zu GTT size\n", | ||
315 | count, total_obj_size, total_gtt_size); | ||
316 | |||
317 | return 0; | ||
318 | } | ||
319 | |||
213 | 320 | ||
214 | static int i915_gem_pageflip_info(struct seq_file *m, void *data) | 321 | static int i915_gem_pageflip_info(struct seq_file *m, void *data) |
215 | { | 322 | { |
@@ -243,14 +350,14 @@ static int i915_gem_pageflip_info(struct seq_file *m, void *data) | |||
243 | seq_printf(m, "%d prepares\n", work->pending); | 350 | seq_printf(m, "%d prepares\n", work->pending); |
244 | 351 | ||
245 | if (work->old_fb_obj) { | 352 | if (work->old_fb_obj) { |
246 | struct drm_i915_gem_object *obj_priv = to_intel_bo(work->old_fb_obj); | 353 | struct drm_i915_gem_object *obj = work->old_fb_obj; |
247 | if(obj_priv) | 354 | if (obj) |
248 | seq_printf(m, "Old framebuffer gtt_offset 0x%08x\n", obj_priv->gtt_offset ); | 355 | seq_printf(m, "Old framebuffer gtt_offset 0x%08x\n", obj->gtt_offset); |
249 | } | 356 | } |
250 | if (work->pending_flip_obj) { | 357 | if (work->pending_flip_obj) { |
251 | struct drm_i915_gem_object *obj_priv = to_intel_bo(work->pending_flip_obj); | 358 | struct drm_i915_gem_object *obj = work->pending_flip_obj; |
252 | if(obj_priv) | 359 | if (obj) |
253 | seq_printf(m, "New framebuffer gtt_offset 0x%08x\n", obj_priv->gtt_offset ); | 360 | seq_printf(m, "New framebuffer gtt_offset 0x%08x\n", obj->gtt_offset); |
254 | } | 361 | } |
255 | } | 362 | } |
256 | spin_unlock_irqrestore(&dev->event_lock, flags); | 363 | spin_unlock_irqrestore(&dev->event_lock, flags); |
@@ -265,44 +372,80 @@ static int i915_gem_request_info(struct seq_file *m, void *data) | |||
265 | struct drm_device *dev = node->minor->dev; | 372 | struct drm_device *dev = node->minor->dev; |
266 | drm_i915_private_t *dev_priv = dev->dev_private; | 373 | drm_i915_private_t *dev_priv = dev->dev_private; |
267 | struct drm_i915_gem_request *gem_request; | 374 | struct drm_i915_gem_request *gem_request; |
268 | int ret; | 375 | int ret, count; |
269 | 376 | ||
270 | ret = mutex_lock_interruptible(&dev->struct_mutex); | 377 | ret = mutex_lock_interruptible(&dev->struct_mutex); |
271 | if (ret) | 378 | if (ret) |
272 | return ret; | 379 | return ret; |
273 | 380 | ||
274 | seq_printf(m, "Request:\n"); | 381 | count = 0; |
275 | list_for_each_entry(gem_request, &dev_priv->render_ring.request_list, | 382 | if (!list_empty(&dev_priv->ring[RCS].request_list)) { |
276 | list) { | 383 | seq_printf(m, "Render requests:\n"); |
277 | seq_printf(m, " %d @ %d\n", | 384 | list_for_each_entry(gem_request, |
278 | gem_request->seqno, | 385 | &dev_priv->ring[RCS].request_list, |
279 | (int) (jiffies - gem_request->emitted_jiffies)); | 386 | list) { |
387 | seq_printf(m, " %d @ %d\n", | ||
388 | gem_request->seqno, | ||
389 | (int) (jiffies - gem_request->emitted_jiffies)); | ||
390 | } | ||
391 | count++; | ||
392 | } | ||
393 | if (!list_empty(&dev_priv->ring[VCS].request_list)) { | ||
394 | seq_printf(m, "BSD requests:\n"); | ||
395 | list_for_each_entry(gem_request, | ||
396 | &dev_priv->ring[VCS].request_list, | ||
397 | list) { | ||
398 | seq_printf(m, " %d @ %d\n", | ||
399 | gem_request->seqno, | ||
400 | (int) (jiffies - gem_request->emitted_jiffies)); | ||
401 | } | ||
402 | count++; | ||
403 | } | ||
404 | if (!list_empty(&dev_priv->ring[BCS].request_list)) { | ||
405 | seq_printf(m, "BLT requests:\n"); | ||
406 | list_for_each_entry(gem_request, | ||
407 | &dev_priv->ring[BCS].request_list, | ||
408 | list) { | ||
409 | seq_printf(m, " %d @ %d\n", | ||
410 | gem_request->seqno, | ||
411 | (int) (jiffies - gem_request->emitted_jiffies)); | ||
412 | } | ||
413 | count++; | ||
280 | } | 414 | } |
281 | mutex_unlock(&dev->struct_mutex); | 415 | mutex_unlock(&dev->struct_mutex); |
282 | 416 | ||
417 | if (count == 0) | ||
418 | seq_printf(m, "No requests\n"); | ||
419 | |||
283 | return 0; | 420 | return 0; |
284 | } | 421 | } |
285 | 422 | ||
423 | static void i915_ring_seqno_info(struct seq_file *m, | ||
424 | struct intel_ring_buffer *ring) | ||
425 | { | ||
426 | if (ring->get_seqno) { | ||
427 | seq_printf(m, "Current sequence (%s): %d\n", | ||
428 | ring->name, ring->get_seqno(ring)); | ||
429 | seq_printf(m, "Waiter sequence (%s): %d\n", | ||
430 | ring->name, ring->waiting_seqno); | ||
431 | seq_printf(m, "IRQ sequence (%s): %d\n", | ||
432 | ring->name, ring->irq_seqno); | ||
433 | } | ||
434 | } | ||
435 | |||
286 | static int i915_gem_seqno_info(struct seq_file *m, void *data) | 436 | static int i915_gem_seqno_info(struct seq_file *m, void *data) |
287 | { | 437 | { |
288 | struct drm_info_node *node = (struct drm_info_node *) m->private; | 438 | struct drm_info_node *node = (struct drm_info_node *) m->private; |
289 | struct drm_device *dev = node->minor->dev; | 439 | struct drm_device *dev = node->minor->dev; |
290 | drm_i915_private_t *dev_priv = dev->dev_private; | 440 | drm_i915_private_t *dev_priv = dev->dev_private; |
291 | int ret; | 441 | int ret, i; |
292 | 442 | ||
293 | ret = mutex_lock_interruptible(&dev->struct_mutex); | 443 | ret = mutex_lock_interruptible(&dev->struct_mutex); |
294 | if (ret) | 444 | if (ret) |
295 | return ret; | 445 | return ret; |
296 | 446 | ||
297 | if (dev_priv->render_ring.status_page.page_addr != NULL) { | 447 | for (i = 0; i < I915_NUM_RINGS; i++) |
298 | seq_printf(m, "Current sequence: %d\n", | 448 | i915_ring_seqno_info(m, &dev_priv->ring[i]); |
299 | dev_priv->render_ring.get_seqno(dev, &dev_priv->render_ring)); | ||
300 | } else { | ||
301 | seq_printf(m, "Current sequence: hws uninitialized\n"); | ||
302 | } | ||
303 | seq_printf(m, "Waiter sequence: %d\n", | ||
304 | dev_priv->mm.waiting_gem_seqno); | ||
305 | seq_printf(m, "IRQ sequence: %d\n", dev_priv->mm.irq_gem_seqno); | ||
306 | 449 | ||
307 | mutex_unlock(&dev->struct_mutex); | 450 | mutex_unlock(&dev->struct_mutex); |
308 | 451 | ||
@@ -315,7 +458,7 @@ static int i915_interrupt_info(struct seq_file *m, void *data) | |||
315 | struct drm_info_node *node = (struct drm_info_node *) m->private; | 458 | struct drm_info_node *node = (struct drm_info_node *) m->private; |
316 | struct drm_device *dev = node->minor->dev; | 459 | struct drm_device *dev = node->minor->dev; |
317 | drm_i915_private_t *dev_priv = dev->dev_private; | 460 | drm_i915_private_t *dev_priv = dev->dev_private; |
318 | int ret; | 461 | int ret, i; |
319 | 462 | ||
320 | ret = mutex_lock_interruptible(&dev->struct_mutex); | 463 | ret = mutex_lock_interruptible(&dev->struct_mutex); |
321 | if (ret) | 464 | if (ret) |
@@ -354,16 +497,14 @@ static int i915_interrupt_info(struct seq_file *m, void *data) | |||
354 | } | 497 | } |
355 | seq_printf(m, "Interrupts received: %d\n", | 498 | seq_printf(m, "Interrupts received: %d\n", |
356 | atomic_read(&dev_priv->irq_received)); | 499 | atomic_read(&dev_priv->irq_received)); |
357 | if (dev_priv->render_ring.status_page.page_addr != NULL) { | 500 | for (i = 0; i < I915_NUM_RINGS; i++) { |
358 | seq_printf(m, "Current sequence: %d\n", | 501 | if (IS_GEN6(dev)) { |
359 | dev_priv->render_ring.get_seqno(dev, &dev_priv->render_ring)); | 502 | seq_printf(m, "Graphics Interrupt mask (%s): %08x\n", |
360 | } else { | 503 | dev_priv->ring[i].name, |
361 | seq_printf(m, "Current sequence: hws uninitialized\n"); | 504 | I915_READ_IMR(&dev_priv->ring[i])); |
505 | } | ||
506 | i915_ring_seqno_info(m, &dev_priv->ring[i]); | ||
362 | } | 507 | } |
363 | seq_printf(m, "Waiter sequence: %d\n", | ||
364 | dev_priv->mm.waiting_gem_seqno); | ||
365 | seq_printf(m, "IRQ sequence: %d\n", | ||
366 | dev_priv->mm.irq_gem_seqno); | ||
367 | mutex_unlock(&dev->struct_mutex); | 508 | mutex_unlock(&dev->struct_mutex); |
368 | 509 | ||
369 | return 0; | 510 | return 0; |
@@ -383,29 +524,17 @@ static int i915_gem_fence_regs_info(struct seq_file *m, void *data) | |||
383 | seq_printf(m, "Reserved fences = %d\n", dev_priv->fence_reg_start); | 524 | seq_printf(m, "Reserved fences = %d\n", dev_priv->fence_reg_start); |
384 | seq_printf(m, "Total fences = %d\n", dev_priv->num_fence_regs); | 525 | seq_printf(m, "Total fences = %d\n", dev_priv->num_fence_regs); |
385 | for (i = 0; i < dev_priv->num_fence_regs; i++) { | 526 | for (i = 0; i < dev_priv->num_fence_regs; i++) { |
386 | struct drm_gem_object *obj = dev_priv->fence_regs[i].obj; | 527 | struct drm_i915_gem_object *obj = dev_priv->fence_regs[i].obj; |
387 | 528 | ||
388 | if (obj == NULL) { | 529 | seq_printf(m, "Fenced object[%2d] = ", i); |
389 | seq_printf(m, "Fenced object[%2d] = unused\n", i); | 530 | if (obj == NULL) |
390 | } else { | 531 | seq_printf(m, "unused"); |
391 | struct drm_i915_gem_object *obj_priv; | 532 | else |
392 | 533 | describe_obj(m, obj); | |
393 | obj_priv = to_intel_bo(obj); | 534 | seq_printf(m, "\n"); |
394 | seq_printf(m, "Fenced object[%2d] = %p: %s " | ||
395 | "%08x %08zx %08x %s %08x %08x %d", | ||
396 | i, obj, get_pin_flag(obj_priv), | ||
397 | obj_priv->gtt_offset, | ||
398 | obj->size, obj_priv->stride, | ||
399 | get_tiling_flag(obj_priv), | ||
400 | obj->read_domains, obj->write_domain, | ||
401 | obj_priv->last_rendering_seqno); | ||
402 | if (obj->name) | ||
403 | seq_printf(m, " (name: %d)", obj->name); | ||
404 | seq_printf(m, "\n"); | ||
405 | } | ||
406 | } | 535 | } |
407 | mutex_unlock(&dev->struct_mutex); | ||
408 | 536 | ||
537 | mutex_unlock(&dev->struct_mutex); | ||
409 | return 0; | 538 | return 0; |
410 | } | 539 | } |
411 | 540 | ||
@@ -414,10 +543,12 @@ static int i915_hws_info(struct seq_file *m, void *data) | |||
414 | struct drm_info_node *node = (struct drm_info_node *) m->private; | 543 | struct drm_info_node *node = (struct drm_info_node *) m->private; |
415 | struct drm_device *dev = node->minor->dev; | 544 | struct drm_device *dev = node->minor->dev; |
416 | drm_i915_private_t *dev_priv = dev->dev_private; | 545 | drm_i915_private_t *dev_priv = dev->dev_private; |
417 | int i; | 546 | struct intel_ring_buffer *ring; |
418 | volatile u32 *hws; | 547 | volatile u32 *hws; |
548 | int i; | ||
419 | 549 | ||
420 | hws = (volatile u32 *)dev_priv->render_ring.status_page.page_addr; | 550 | ring = &dev_priv->ring[(uintptr_t)node->info_ent->data]; |
551 | hws = (volatile u32 *)ring->status_page.page_addr; | ||
421 | if (hws == NULL) | 552 | if (hws == NULL) |
422 | return 0; | 553 | return 0; |
423 | 554 | ||
@@ -431,14 +562,14 @@ static int i915_hws_info(struct seq_file *m, void *data) | |||
431 | 562 | ||
432 | static void i915_dump_object(struct seq_file *m, | 563 | static void i915_dump_object(struct seq_file *m, |
433 | struct io_mapping *mapping, | 564 | struct io_mapping *mapping, |
434 | struct drm_i915_gem_object *obj_priv) | 565 | struct drm_i915_gem_object *obj) |
435 | { | 566 | { |
436 | int page, page_count, i; | 567 | int page, page_count, i; |
437 | 568 | ||
438 | page_count = obj_priv->base.size / PAGE_SIZE; | 569 | page_count = obj->base.size / PAGE_SIZE; |
439 | for (page = 0; page < page_count; page++) { | 570 | for (page = 0; page < page_count; page++) { |
440 | u32 *mem = io_mapping_map_wc(mapping, | 571 | u32 *mem = io_mapping_map_wc(mapping, |
441 | obj_priv->gtt_offset + page * PAGE_SIZE); | 572 | obj->gtt_offset + page * PAGE_SIZE); |
442 | for (i = 0; i < PAGE_SIZE; i += 4) | 573 | for (i = 0; i < PAGE_SIZE; i += 4) |
443 | seq_printf(m, "%08x : %08x\n", i, mem[i / 4]); | 574 | seq_printf(m, "%08x : %08x\n", i, mem[i / 4]); |
444 | io_mapping_unmap(mem); | 575 | io_mapping_unmap(mem); |
@@ -450,25 +581,21 @@ static int i915_batchbuffer_info(struct seq_file *m, void *data) | |||
450 | struct drm_info_node *node = (struct drm_info_node *) m->private; | 581 | struct drm_info_node *node = (struct drm_info_node *) m->private; |
451 | struct drm_device *dev = node->minor->dev; | 582 | struct drm_device *dev = node->minor->dev; |
452 | drm_i915_private_t *dev_priv = dev->dev_private; | 583 | drm_i915_private_t *dev_priv = dev->dev_private; |
453 | struct drm_gem_object *obj; | 584 | struct drm_i915_gem_object *obj; |
454 | struct drm_i915_gem_object *obj_priv; | ||
455 | int ret; | 585 | int ret; |
456 | 586 | ||
457 | ret = mutex_lock_interruptible(&dev->struct_mutex); | 587 | ret = mutex_lock_interruptible(&dev->struct_mutex); |
458 | if (ret) | 588 | if (ret) |
459 | return ret; | 589 | return ret; |
460 | 590 | ||
461 | list_for_each_entry(obj_priv, &dev_priv->mm.active_list, mm_list) { | 591 | list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) { |
462 | obj = &obj_priv->base; | 592 | if (obj->base.read_domains & I915_GEM_DOMAIN_COMMAND) { |
463 | if (obj->read_domains & I915_GEM_DOMAIN_COMMAND) { | 593 | seq_printf(m, "--- gtt_offset = 0x%08x\n", obj->gtt_offset); |
464 | seq_printf(m, "--- gtt_offset = 0x%08x\n", | 594 | i915_dump_object(m, dev_priv->mm.gtt_mapping, obj); |
465 | obj_priv->gtt_offset); | ||
466 | i915_dump_object(m, dev_priv->mm.gtt_mapping, obj_priv); | ||
467 | } | 595 | } |
468 | } | 596 | } |
469 | 597 | ||
470 | mutex_unlock(&dev->struct_mutex); | 598 | mutex_unlock(&dev->struct_mutex); |
471 | |||
472 | return 0; | 599 | return 0; |
473 | } | 600 | } |
474 | 601 | ||
@@ -477,19 +604,21 @@ static int i915_ringbuffer_data(struct seq_file *m, void *data) | |||
477 | struct drm_info_node *node = (struct drm_info_node *) m->private; | 604 | struct drm_info_node *node = (struct drm_info_node *) m->private; |
478 | struct drm_device *dev = node->minor->dev; | 605 | struct drm_device *dev = node->minor->dev; |
479 | drm_i915_private_t *dev_priv = dev->dev_private; | 606 | drm_i915_private_t *dev_priv = dev->dev_private; |
607 | struct intel_ring_buffer *ring; | ||
480 | int ret; | 608 | int ret; |
481 | 609 | ||
482 | ret = mutex_lock_interruptible(&dev->struct_mutex); | 610 | ret = mutex_lock_interruptible(&dev->struct_mutex); |
483 | if (ret) | 611 | if (ret) |
484 | return ret; | 612 | return ret; |
485 | 613 | ||
486 | if (!dev_priv->render_ring.gem_object) { | 614 | ring = &dev_priv->ring[(uintptr_t)node->info_ent->data]; |
615 | if (!ring->obj) { | ||
487 | seq_printf(m, "No ringbuffer setup\n"); | 616 | seq_printf(m, "No ringbuffer setup\n"); |
488 | } else { | 617 | } else { |
489 | u8 *virt = dev_priv->render_ring.virtual_start; | 618 | u8 *virt = ring->virtual_start; |
490 | uint32_t off; | 619 | uint32_t off; |
491 | 620 | ||
492 | for (off = 0; off < dev_priv->render_ring.size; off += 4) { | 621 | for (off = 0; off < ring->size; off += 4) { |
493 | uint32_t *ptr = (uint32_t *)(virt + off); | 622 | uint32_t *ptr = (uint32_t *)(virt + off); |
494 | seq_printf(m, "%08x : %08x\n", off, *ptr); | 623 | seq_printf(m, "%08x : %08x\n", off, *ptr); |
495 | } | 624 | } |
@@ -504,19 +633,38 @@ static int i915_ringbuffer_info(struct seq_file *m, void *data) | |||
504 | struct drm_info_node *node = (struct drm_info_node *) m->private; | 633 | struct drm_info_node *node = (struct drm_info_node *) m->private; |
505 | struct drm_device *dev = node->minor->dev; | 634 | struct drm_device *dev = node->minor->dev; |
506 | drm_i915_private_t *dev_priv = dev->dev_private; | 635 | drm_i915_private_t *dev_priv = dev->dev_private; |
507 | unsigned int head, tail; | 636 | struct intel_ring_buffer *ring; |
508 | 637 | ||
509 | head = I915_READ(PRB0_HEAD) & HEAD_ADDR; | 638 | ring = &dev_priv->ring[(uintptr_t)node->info_ent->data]; |
510 | tail = I915_READ(PRB0_TAIL) & TAIL_ADDR; | 639 | if (ring->size == 0) |
640 | return 0; | ||
511 | 641 | ||
512 | seq_printf(m, "RingHead : %08x\n", head); | 642 | seq_printf(m, "Ring %s:\n", ring->name); |
513 | seq_printf(m, "RingTail : %08x\n", tail); | 643 | seq_printf(m, " Head : %08x\n", I915_READ_HEAD(ring) & HEAD_ADDR); |
514 | seq_printf(m, "RingSize : %08lx\n", dev_priv->render_ring.size); | 644 | seq_printf(m, " Tail : %08x\n", I915_READ_TAIL(ring) & TAIL_ADDR); |
515 | seq_printf(m, "Acthd : %08x\n", I915_READ(INTEL_INFO(dev)->gen >= 4 ? ACTHD_I965 : ACTHD)); | 645 | seq_printf(m, " Size : %08x\n", ring->size); |
646 | seq_printf(m, " Active : %08x\n", intel_ring_get_active_head(ring)); | ||
647 | seq_printf(m, " NOPID : %08x\n", I915_READ_NOPID(ring)); | ||
648 | if (IS_GEN6(dev)) { | ||
649 | seq_printf(m, " Sync 0 : %08x\n", I915_READ_SYNC_0(ring)); | ||
650 | seq_printf(m, " Sync 1 : %08x\n", I915_READ_SYNC_1(ring)); | ||
651 | } | ||
652 | seq_printf(m, " Control : %08x\n", I915_READ_CTL(ring)); | ||
653 | seq_printf(m, " Start : %08x\n", I915_READ_START(ring)); | ||
516 | 654 | ||
517 | return 0; | 655 | return 0; |
518 | } | 656 | } |
519 | 657 | ||
658 | static const char *ring_str(int ring) | ||
659 | { | ||
660 | switch (ring) { | ||
661 | case RING_RENDER: return " render"; | ||
662 | case RING_BSD: return " bsd"; | ||
663 | case RING_BLT: return " blt"; | ||
664 | default: return ""; | ||
665 | } | ||
666 | } | ||
667 | |||
520 | static const char *pin_flag(int pinned) | 668 | static const char *pin_flag(int pinned) |
521 | { | 669 | { |
522 | if (pinned > 0) | 670 | if (pinned > 0) |
@@ -547,6 +695,37 @@ static const char *purgeable_flag(int purgeable) | |||
547 | return purgeable ? " purgeable" : ""; | 695 | return purgeable ? " purgeable" : ""; |
548 | } | 696 | } |
549 | 697 | ||
698 | static void print_error_buffers(struct seq_file *m, | ||
699 | const char *name, | ||
700 | struct drm_i915_error_buffer *err, | ||
701 | int count) | ||
702 | { | ||
703 | seq_printf(m, "%s [%d]:\n", name, count); | ||
704 | |||
705 | while (count--) { | ||
706 | seq_printf(m, " %08x %8u %04x %04x %08x%s%s%s%s%s%s", | ||
707 | err->gtt_offset, | ||
708 | err->size, | ||
709 | err->read_domains, | ||
710 | err->write_domain, | ||
711 | err->seqno, | ||
712 | pin_flag(err->pinned), | ||
713 | tiling_flag(err->tiling), | ||
714 | dirty_flag(err->dirty), | ||
715 | purgeable_flag(err->purgeable), | ||
716 | ring_str(err->ring), | ||
717 | agp_type_str(err->agp_type)); | ||
718 | |||
719 | if (err->name) | ||
720 | seq_printf(m, " (name: %d)", err->name); | ||
721 | if (err->fence_reg != I915_FENCE_REG_NONE) | ||
722 | seq_printf(m, " (fence: %d)", err->fence_reg); | ||
723 | |||
724 | seq_printf(m, "\n"); | ||
725 | err++; | ||
726 | } | ||
727 | } | ||
728 | |||
550 | static int i915_error_state(struct seq_file *m, void *unused) | 729 | static int i915_error_state(struct seq_file *m, void *unused) |
551 | { | 730 | { |
552 | struct drm_info_node *node = (struct drm_info_node *) m->private; | 731 | struct drm_info_node *node = (struct drm_info_node *) m->private; |
@@ -568,47 +747,54 @@ static int i915_error_state(struct seq_file *m, void *unused) | |||
568 | error->time.tv_usec); | 747 | error->time.tv_usec); |
569 | seq_printf(m, "PCI ID: 0x%04x\n", dev->pci_device); | 748 | seq_printf(m, "PCI ID: 0x%04x\n", dev->pci_device); |
570 | seq_printf(m, "EIR: 0x%08x\n", error->eir); | 749 | seq_printf(m, "EIR: 0x%08x\n", error->eir); |
571 | seq_printf(m, " PGTBL_ER: 0x%08x\n", error->pgtbl_er); | 750 | seq_printf(m, "PGTBL_ER: 0x%08x\n", error->pgtbl_er); |
572 | seq_printf(m, " INSTPM: 0x%08x\n", error->instpm); | 751 | if (INTEL_INFO(dev)->gen >= 6) { |
752 | seq_printf(m, "ERROR: 0x%08x\n", error->error); | ||
753 | seq_printf(m, "Blitter command stream:\n"); | ||
754 | seq_printf(m, " ACTHD: 0x%08x\n", error->bcs_acthd); | ||
755 | seq_printf(m, " IPEIR: 0x%08x\n", error->bcs_ipeir); | ||
756 | seq_printf(m, " IPEHR: 0x%08x\n", error->bcs_ipehr); | ||
757 | seq_printf(m, " INSTDONE: 0x%08x\n", error->bcs_instdone); | ||
758 | seq_printf(m, " seqno: 0x%08x\n", error->bcs_seqno); | ||
759 | seq_printf(m, "Video (BSD) command stream:\n"); | ||
760 | seq_printf(m, " ACTHD: 0x%08x\n", error->vcs_acthd); | ||
761 | seq_printf(m, " IPEIR: 0x%08x\n", error->vcs_ipeir); | ||
762 | seq_printf(m, " IPEHR: 0x%08x\n", error->vcs_ipehr); | ||
763 | seq_printf(m, " INSTDONE: 0x%08x\n", error->vcs_instdone); | ||
764 | seq_printf(m, " seqno: 0x%08x\n", error->vcs_seqno); | ||
765 | } | ||
766 | seq_printf(m, "Render command stream:\n"); | ||
767 | seq_printf(m, " ACTHD: 0x%08x\n", error->acthd); | ||
573 | seq_printf(m, " IPEIR: 0x%08x\n", error->ipeir); | 768 | seq_printf(m, " IPEIR: 0x%08x\n", error->ipeir); |
574 | seq_printf(m, " IPEHR: 0x%08x\n", error->ipehr); | 769 | seq_printf(m, " IPEHR: 0x%08x\n", error->ipehr); |
575 | seq_printf(m, " INSTDONE: 0x%08x\n", error->instdone); | 770 | seq_printf(m, " INSTDONE: 0x%08x\n", error->instdone); |
576 | seq_printf(m, " ACTHD: 0x%08x\n", error->acthd); | ||
577 | if (INTEL_INFO(dev)->gen >= 4) { | 771 | if (INTEL_INFO(dev)->gen >= 4) { |
578 | seq_printf(m, " INSTPS: 0x%08x\n", error->instps); | ||
579 | seq_printf(m, " INSTDONE1: 0x%08x\n", error->instdone1); | 772 | seq_printf(m, " INSTDONE1: 0x%08x\n", error->instdone1); |
773 | seq_printf(m, " INSTPS: 0x%08x\n", error->instps); | ||
580 | } | 774 | } |
581 | seq_printf(m, "seqno: 0x%08x\n", error->seqno); | 775 | seq_printf(m, " INSTPM: 0x%08x\n", error->instpm); |
582 | 776 | seq_printf(m, " seqno: 0x%08x\n", error->seqno); | |
583 | if (error->active_bo_count) { | 777 | |
584 | seq_printf(m, "Buffers [%d]:\n", error->active_bo_count); | 778 | for (i = 0; i < 16; i++) |
585 | 779 | seq_printf(m, " fence[%d] = %08llx\n", i, error->fence[i]); | |
586 | for (i = 0; i < error->active_bo_count; i++) { | 780 | |
587 | seq_printf(m, " %08x %8zd %08x %08x %08x%s%s%s%s", | 781 | if (error->active_bo) |
588 | error->active_bo[i].gtt_offset, | 782 | print_error_buffers(m, "Active", |
589 | error->active_bo[i].size, | 783 | error->active_bo, |
590 | error->active_bo[i].read_domains, | 784 | error->active_bo_count); |
591 | error->active_bo[i].write_domain, | 785 | |
592 | error->active_bo[i].seqno, | 786 | if (error->pinned_bo) |
593 | pin_flag(error->active_bo[i].pinned), | 787 | print_error_buffers(m, "Pinned", |
594 | tiling_flag(error->active_bo[i].tiling), | 788 | error->pinned_bo, |
595 | dirty_flag(error->active_bo[i].dirty), | 789 | error->pinned_bo_count); |
596 | purgeable_flag(error->active_bo[i].purgeable)); | ||
597 | |||
598 | if (error->active_bo[i].name) | ||
599 | seq_printf(m, " (name: %d)", error->active_bo[i].name); | ||
600 | if (error->active_bo[i].fence_reg != I915_FENCE_REG_NONE) | ||
601 | seq_printf(m, " (fence: %d)", error->active_bo[i].fence_reg); | ||
602 | |||
603 | seq_printf(m, "\n"); | ||
604 | } | ||
605 | } | ||
606 | 790 | ||
607 | for (i = 0; i < ARRAY_SIZE(error->batchbuffer); i++) { | 791 | for (i = 0; i < ARRAY_SIZE(error->batchbuffer); i++) { |
608 | if (error->batchbuffer[i]) { | 792 | if (error->batchbuffer[i]) { |
609 | struct drm_i915_error_object *obj = error->batchbuffer[i]; | 793 | struct drm_i915_error_object *obj = error->batchbuffer[i]; |
610 | 794 | ||
611 | seq_printf(m, "--- gtt_offset = 0x%08x\n", obj->gtt_offset); | 795 | seq_printf(m, "%s --- gtt_offset = 0x%08x\n", |
796 | dev_priv->ring[i].name, | ||
797 | obj->gtt_offset); | ||
612 | offset = 0; | 798 | offset = 0; |
613 | for (page = 0; page < obj->page_count; page++) { | 799 | for (page = 0; page < obj->page_count; page++) { |
614 | for (elt = 0; elt < PAGE_SIZE/4; elt++) { | 800 | for (elt = 0; elt < PAGE_SIZE/4; elt++) { |
@@ -635,6 +821,9 @@ static int i915_error_state(struct seq_file *m, void *unused) | |||
635 | if (error->overlay) | 821 | if (error->overlay) |
636 | intel_overlay_print_error_state(m, error->overlay); | 822 | intel_overlay_print_error_state(m, error->overlay); |
637 | 823 | ||
824 | if (error->display) | ||
825 | intel_display_print_error_state(m, dev, error->display); | ||
826 | |||
638 | out: | 827 | out: |
639 | spin_unlock_irqrestore(&dev_priv->error_lock, flags); | 828 | spin_unlock_irqrestore(&dev_priv->error_lock, flags); |
640 | 829 | ||
@@ -658,15 +847,51 @@ static int i915_cur_delayinfo(struct seq_file *m, void *unused) | |||
658 | struct drm_info_node *node = (struct drm_info_node *) m->private; | 847 | struct drm_info_node *node = (struct drm_info_node *) m->private; |
659 | struct drm_device *dev = node->minor->dev; | 848 | struct drm_device *dev = node->minor->dev; |
660 | drm_i915_private_t *dev_priv = dev->dev_private; | 849 | drm_i915_private_t *dev_priv = dev->dev_private; |
661 | u16 rgvswctl = I915_READ16(MEMSWCTL); | ||
662 | u16 rgvstat = I915_READ16(MEMSTAT_ILK); | ||
663 | 850 | ||
664 | seq_printf(m, "Requested P-state: %d\n", (rgvswctl >> 8) & 0xf); | 851 | if (IS_GEN5(dev)) { |
665 | seq_printf(m, "Requested VID: %d\n", rgvswctl & 0x3f); | 852 | u16 rgvswctl = I915_READ16(MEMSWCTL); |
666 | seq_printf(m, "Current VID: %d\n", (rgvstat & MEMSTAT_VID_MASK) >> | 853 | u16 rgvstat = I915_READ16(MEMSTAT_ILK); |
667 | MEMSTAT_VID_SHIFT); | 854 | |
668 | seq_printf(m, "Current P-state: %d\n", | 855 | seq_printf(m, "Requested P-state: %d\n", (rgvswctl >> 8) & 0xf); |
669 | (rgvstat & MEMSTAT_PSTATE_MASK) >> MEMSTAT_PSTATE_SHIFT); | 856 | seq_printf(m, "Requested VID: %d\n", rgvswctl & 0x3f); |
857 | seq_printf(m, "Current VID: %d\n", (rgvstat & MEMSTAT_VID_MASK) >> | ||
858 | MEMSTAT_VID_SHIFT); | ||
859 | seq_printf(m, "Current P-state: %d\n", | ||
860 | (rgvstat & MEMSTAT_PSTATE_MASK) >> MEMSTAT_PSTATE_SHIFT); | ||
861 | } else if (IS_GEN6(dev)) { | ||
862 | u32 gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS); | ||
863 | u32 rp_state_limits = I915_READ(GEN6_RP_STATE_LIMITS); | ||
864 | u32 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP); | ||
865 | int max_freq; | ||
866 | |||
867 | /* RPSTAT1 is in the GT power well */ | ||
868 | __gen6_force_wake_get(dev_priv); | ||
869 | |||
870 | seq_printf(m, "GT_PERF_STATUS: 0x%08x\n", gt_perf_status); | ||
871 | seq_printf(m, "RPSTAT1: 0x%08x\n", I915_READ(GEN6_RPSTAT1)); | ||
872 | seq_printf(m, "Render p-state ratio: %d\n", | ||
873 | (gt_perf_status & 0xff00) >> 8); | ||
874 | seq_printf(m, "Render p-state VID: %d\n", | ||
875 | gt_perf_status & 0xff); | ||
876 | seq_printf(m, "Render p-state limit: %d\n", | ||
877 | rp_state_limits & 0xff); | ||
878 | |||
879 | max_freq = (rp_state_cap & 0xff0000) >> 16; | ||
880 | seq_printf(m, "Lowest (RPN) frequency: %dMHz\n", | ||
881 | max_freq * 100); | ||
882 | |||
883 | max_freq = (rp_state_cap & 0xff00) >> 8; | ||
884 | seq_printf(m, "Nominal (RP1) frequency: %dMHz\n", | ||
885 | max_freq * 100); | ||
886 | |||
887 | max_freq = rp_state_cap & 0xff; | ||
888 | seq_printf(m, "Max non-overclocked (RP0) frequency: %dMHz\n", | ||
889 | max_freq * 100); | ||
890 | |||
891 | __gen6_force_wake_put(dev_priv); | ||
892 | } else { | ||
893 | seq_printf(m, "no P-state info available\n"); | ||
894 | } | ||
670 | 895 | ||
671 | return 0; | 896 | return 0; |
672 | } | 897 | } |
@@ -715,7 +940,7 @@ static int i915_drpc_info(struct seq_file *m, void *unused) | |||
715 | struct drm_device *dev = node->minor->dev; | 940 | struct drm_device *dev = node->minor->dev; |
716 | drm_i915_private_t *dev_priv = dev->dev_private; | 941 | drm_i915_private_t *dev_priv = dev->dev_private; |
717 | u32 rgvmodectl = I915_READ(MEMMODECTL); | 942 | u32 rgvmodectl = I915_READ(MEMMODECTL); |
718 | u32 rstdbyctl = I915_READ(MCHBAR_RENDER_STANDBY); | 943 | u32 rstdbyctl = I915_READ(RSTDBYCTL); |
719 | u16 crstandvid = I915_READ16(CRSTANDVID); | 944 | u16 crstandvid = I915_READ16(CRSTANDVID); |
720 | 945 | ||
721 | seq_printf(m, "HD boost: %s\n", (rgvmodectl & MEMMODE_BOOST_EN) ? | 946 | seq_printf(m, "HD boost: %s\n", (rgvmodectl & MEMMODE_BOOST_EN) ? |
@@ -738,6 +963,30 @@ static int i915_drpc_info(struct seq_file *m, void *unused) | |||
738 | seq_printf(m, "RS2 VID: %d\n", ((crstandvid >> 8) & 0x3f)); | 963 | seq_printf(m, "RS2 VID: %d\n", ((crstandvid >> 8) & 0x3f)); |
739 | seq_printf(m, "Render standby enabled: %s\n", | 964 | seq_printf(m, "Render standby enabled: %s\n", |
740 | (rstdbyctl & RCX_SW_EXIT) ? "no" : "yes"); | 965 | (rstdbyctl & RCX_SW_EXIT) ? "no" : "yes"); |
966 | seq_printf(m, "Current RS state: "); | ||
967 | switch (rstdbyctl & RSX_STATUS_MASK) { | ||
968 | case RSX_STATUS_ON: | ||
969 | seq_printf(m, "on\n"); | ||
970 | break; | ||
971 | case RSX_STATUS_RC1: | ||
972 | seq_printf(m, "RC1\n"); | ||
973 | break; | ||
974 | case RSX_STATUS_RC1E: | ||
975 | seq_printf(m, "RC1E\n"); | ||
976 | break; | ||
977 | case RSX_STATUS_RS1: | ||
978 | seq_printf(m, "RS1\n"); | ||
979 | break; | ||
980 | case RSX_STATUS_RS2: | ||
981 | seq_printf(m, "RS2 (RC6)\n"); | ||
982 | break; | ||
983 | case RSX_STATUS_RS3: | ||
984 | seq_printf(m, "RC3 (RC6+)\n"); | ||
985 | break; | ||
986 | default: | ||
987 | seq_printf(m, "unknown\n"); | ||
988 | break; | ||
989 | } | ||
741 | 990 | ||
742 | return 0; | 991 | return 0; |
743 | } | 992 | } |
@@ -794,7 +1043,7 @@ static int i915_sr_status(struct seq_file *m, void *unused) | |||
794 | drm_i915_private_t *dev_priv = dev->dev_private; | 1043 | drm_i915_private_t *dev_priv = dev->dev_private; |
795 | bool sr_enabled = false; | 1044 | bool sr_enabled = false; |
796 | 1045 | ||
797 | if (IS_GEN5(dev)) | 1046 | if (HAS_PCH_SPLIT(dev)) |
798 | sr_enabled = I915_READ(WM1_LP_ILK) & WM1_LP_SR_EN; | 1047 | sr_enabled = I915_READ(WM1_LP_ILK) & WM1_LP_SR_EN; |
799 | else if (IS_CRESTLINE(dev) || IS_I945G(dev) || IS_I945GM(dev)) | 1048 | else if (IS_CRESTLINE(dev) || IS_I945G(dev) || IS_I945GM(dev)) |
800 | sr_enabled = I915_READ(FW_BLC_SELF) & FW_BLC_SELF_EN; | 1049 | sr_enabled = I915_READ(FW_BLC_SELF) & FW_BLC_SELF_EN; |
@@ -886,7 +1135,7 @@ static int i915_gem_framebuffer_info(struct seq_file *m, void *data) | |||
886 | fb->base.height, | 1135 | fb->base.height, |
887 | fb->base.depth, | 1136 | fb->base.depth, |
888 | fb->base.bits_per_pixel); | 1137 | fb->base.bits_per_pixel); |
889 | describe_obj(m, to_intel_bo(fb->obj)); | 1138 | describe_obj(m, fb->obj); |
890 | seq_printf(m, "\n"); | 1139 | seq_printf(m, "\n"); |
891 | 1140 | ||
892 | list_for_each_entry(fb, &dev->mode_config.fb_list, base.head) { | 1141 | list_for_each_entry(fb, &dev->mode_config.fb_list, base.head) { |
@@ -898,7 +1147,7 @@ static int i915_gem_framebuffer_info(struct seq_file *m, void *data) | |||
898 | fb->base.height, | 1147 | fb->base.height, |
899 | fb->base.depth, | 1148 | fb->base.depth, |
900 | fb->base.bits_per_pixel); | 1149 | fb->base.bits_per_pixel); |
901 | describe_obj(m, to_intel_bo(fb->obj)); | 1150 | describe_obj(m, fb->obj); |
902 | seq_printf(m, "\n"); | 1151 | seq_printf(m, "\n"); |
903 | } | 1152 | } |
904 | 1153 | ||
@@ -943,7 +1192,6 @@ i915_wedged_write(struct file *filp, | |||
943 | loff_t *ppos) | 1192 | loff_t *ppos) |
944 | { | 1193 | { |
945 | struct drm_device *dev = filp->private_data; | 1194 | struct drm_device *dev = filp->private_data; |
946 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
947 | char buf[20]; | 1195 | char buf[20]; |
948 | int val = 1; | 1196 | int val = 1; |
949 | 1197 | ||
@@ -959,12 +1207,7 @@ i915_wedged_write(struct file *filp, | |||
959 | } | 1207 | } |
960 | 1208 | ||
961 | DRM_INFO("Manually setting wedged to %d\n", val); | 1209 | DRM_INFO("Manually setting wedged to %d\n", val); |
962 | 1210 | i915_handle_error(dev, val); | |
963 | atomic_set(&dev_priv->mm.wedged, val); | ||
964 | if (val) { | ||
965 | wake_up_all(&dev_priv->irq_queue); | ||
966 | queue_work(dev_priv->wq, &dev_priv->error_work); | ||
967 | } | ||
968 | 1211 | ||
969 | return cnt; | 1212 | return cnt; |
970 | } | 1213 | } |
@@ -1018,6 +1261,7 @@ static int i915_wedged_create(struct dentry *root, struct drm_minor *minor) | |||
1018 | static struct drm_info_list i915_debugfs_list[] = { | 1261 | static struct drm_info_list i915_debugfs_list[] = { |
1019 | {"i915_capabilities", i915_capabilities, 0, 0}, | 1262 | {"i915_capabilities", i915_capabilities, 0, 0}, |
1020 | {"i915_gem_objects", i915_gem_object_info, 0}, | 1263 | {"i915_gem_objects", i915_gem_object_info, 0}, |
1264 | {"i915_gem_gtt", i915_gem_gtt_info, 0}, | ||
1021 | {"i915_gem_active", i915_gem_object_list_info, 0, (void *) ACTIVE_LIST}, | 1265 | {"i915_gem_active", i915_gem_object_list_info, 0, (void *) ACTIVE_LIST}, |
1022 | {"i915_gem_flushing", i915_gem_object_list_info, 0, (void *) FLUSHING_LIST}, | 1266 | {"i915_gem_flushing", i915_gem_object_list_info, 0, (void *) FLUSHING_LIST}, |
1023 | {"i915_gem_inactive", i915_gem_object_list_info, 0, (void *) INACTIVE_LIST}, | 1267 | {"i915_gem_inactive", i915_gem_object_list_info, 0, (void *) INACTIVE_LIST}, |
@@ -1028,9 +1272,15 @@ static struct drm_info_list i915_debugfs_list[] = { | |||
1028 | {"i915_gem_seqno", i915_gem_seqno_info, 0}, | 1272 | {"i915_gem_seqno", i915_gem_seqno_info, 0}, |
1029 | {"i915_gem_fence_regs", i915_gem_fence_regs_info, 0}, | 1273 | {"i915_gem_fence_regs", i915_gem_fence_regs_info, 0}, |
1030 | {"i915_gem_interrupt", i915_interrupt_info, 0}, | 1274 | {"i915_gem_interrupt", i915_interrupt_info, 0}, |
1031 | {"i915_gem_hws", i915_hws_info, 0}, | 1275 | {"i915_gem_hws", i915_hws_info, 0, (void *)RCS}, |
1032 | {"i915_ringbuffer_data", i915_ringbuffer_data, 0}, | 1276 | {"i915_gem_hws_blt", i915_hws_info, 0, (void *)BCS}, |
1033 | {"i915_ringbuffer_info", i915_ringbuffer_info, 0}, | 1277 | {"i915_gem_hws_bsd", i915_hws_info, 0, (void *)VCS}, |
1278 | {"i915_ringbuffer_data", i915_ringbuffer_data, 0, (void *)RCS}, | ||
1279 | {"i915_ringbuffer_info", i915_ringbuffer_info, 0, (void *)RCS}, | ||
1280 | {"i915_bsd_ringbuffer_data", i915_ringbuffer_data, 0, (void *)VCS}, | ||
1281 | {"i915_bsd_ringbuffer_info", i915_ringbuffer_info, 0, (void *)VCS}, | ||
1282 | {"i915_blt_ringbuffer_data", i915_ringbuffer_data, 0, (void *)BCS}, | ||
1283 | {"i915_blt_ringbuffer_info", i915_ringbuffer_info, 0, (void *)BCS}, | ||
1034 | {"i915_batchbuffers", i915_batchbuffer_info, 0}, | 1284 | {"i915_batchbuffers", i915_batchbuffer_info, 0}, |
1035 | {"i915_error_state", i915_error_state, 0}, | 1285 | {"i915_error_state", i915_error_state, 0}, |
1036 | {"i915_rstdby_delays", i915_rstdby_delays, 0}, | 1286 | {"i915_rstdby_delays", i915_rstdby_delays, 0}, |
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c index 7a26f4dd21ae..17bd766f2081 100644 --- a/drivers/gpu/drm/i915/i915_dma.c +++ b/drivers/gpu/drm/i915/i915_dma.c | |||
@@ -34,6 +34,7 @@ | |||
34 | #include "i915_drm.h" | 34 | #include "i915_drm.h" |
35 | #include "i915_drv.h" | 35 | #include "i915_drv.h" |
36 | #include "i915_trace.h" | 36 | #include "i915_trace.h" |
37 | #include "../../../platform/x86/intel_ips.h" | ||
37 | #include <linux/pci.h> | 38 | #include <linux/pci.h> |
38 | #include <linux/vgaarb.h> | 39 | #include <linux/vgaarb.h> |
39 | #include <linux/acpi.h> | 40 | #include <linux/acpi.h> |
@@ -49,6 +50,8 @@ | |||
49 | static int i915_init_phys_hws(struct drm_device *dev) | 50 | static int i915_init_phys_hws(struct drm_device *dev) |
50 | { | 51 | { |
51 | drm_i915_private_t *dev_priv = dev->dev_private; | 52 | drm_i915_private_t *dev_priv = dev->dev_private; |
53 | struct intel_ring_buffer *ring = LP_RING(dev_priv); | ||
54 | |||
52 | /* Program Hardware Status Page */ | 55 | /* Program Hardware Status Page */ |
53 | dev_priv->status_page_dmah = | 56 | dev_priv->status_page_dmah = |
54 | drm_pci_alloc(dev, PAGE_SIZE, PAGE_SIZE); | 57 | drm_pci_alloc(dev, PAGE_SIZE, PAGE_SIZE); |
@@ -57,11 +60,10 @@ static int i915_init_phys_hws(struct drm_device *dev) | |||
57 | DRM_ERROR("Can not allocate hardware status page\n"); | 60 | DRM_ERROR("Can not allocate hardware status page\n"); |
58 | return -ENOMEM; | 61 | return -ENOMEM; |
59 | } | 62 | } |
60 | dev_priv->render_ring.status_page.page_addr | 63 | ring->status_page.page_addr = dev_priv->status_page_dmah->vaddr; |
61 | = dev_priv->status_page_dmah->vaddr; | ||
62 | dev_priv->dma_status_page = dev_priv->status_page_dmah->busaddr; | 64 | dev_priv->dma_status_page = dev_priv->status_page_dmah->busaddr; |
63 | 65 | ||
64 | memset(dev_priv->render_ring.status_page.page_addr, 0, PAGE_SIZE); | 66 | memset(ring->status_page.page_addr, 0, PAGE_SIZE); |
65 | 67 | ||
66 | if (INTEL_INFO(dev)->gen >= 4) | 68 | if (INTEL_INFO(dev)->gen >= 4) |
67 | dev_priv->dma_status_page |= (dev_priv->dma_status_page >> 28) & | 69 | dev_priv->dma_status_page |= (dev_priv->dma_status_page >> 28) & |
@@ -79,13 +81,15 @@ static int i915_init_phys_hws(struct drm_device *dev) | |||
79 | static void i915_free_hws(struct drm_device *dev) | 81 | static void i915_free_hws(struct drm_device *dev) |
80 | { | 82 | { |
81 | drm_i915_private_t *dev_priv = dev->dev_private; | 83 | drm_i915_private_t *dev_priv = dev->dev_private; |
84 | struct intel_ring_buffer *ring = LP_RING(dev_priv); | ||
85 | |||
82 | if (dev_priv->status_page_dmah) { | 86 | if (dev_priv->status_page_dmah) { |
83 | drm_pci_free(dev, dev_priv->status_page_dmah); | 87 | drm_pci_free(dev, dev_priv->status_page_dmah); |
84 | dev_priv->status_page_dmah = NULL; | 88 | dev_priv->status_page_dmah = NULL; |
85 | } | 89 | } |
86 | 90 | ||
87 | if (dev_priv->render_ring.status_page.gfx_addr) { | 91 | if (ring->status_page.gfx_addr) { |
88 | dev_priv->render_ring.status_page.gfx_addr = 0; | 92 | ring->status_page.gfx_addr = 0; |
89 | drm_core_ioremapfree(&dev_priv->hws_map, dev); | 93 | drm_core_ioremapfree(&dev_priv->hws_map, dev); |
90 | } | 94 | } |
91 | 95 | ||
@@ -97,7 +101,7 @@ void i915_kernel_lost_context(struct drm_device * dev) | |||
97 | { | 101 | { |
98 | drm_i915_private_t *dev_priv = dev->dev_private; | 102 | drm_i915_private_t *dev_priv = dev->dev_private; |
99 | struct drm_i915_master_private *master_priv; | 103 | struct drm_i915_master_private *master_priv; |
100 | struct intel_ring_buffer *ring = &dev_priv->render_ring; | 104 | struct intel_ring_buffer *ring = LP_RING(dev_priv); |
101 | 105 | ||
102 | /* | 106 | /* |
103 | * We should never lose context on the ring with modesetting | 107 | * We should never lose context on the ring with modesetting |
@@ -106,8 +110,8 @@ void i915_kernel_lost_context(struct drm_device * dev) | |||
106 | if (drm_core_check_feature(dev, DRIVER_MODESET)) | 110 | if (drm_core_check_feature(dev, DRIVER_MODESET)) |
107 | return; | 111 | return; |
108 | 112 | ||
109 | ring->head = I915_READ(PRB0_HEAD) & HEAD_ADDR; | 113 | ring->head = I915_READ_HEAD(ring) & HEAD_ADDR; |
110 | ring->tail = I915_READ(PRB0_TAIL) & TAIL_ADDR; | 114 | ring->tail = I915_READ_TAIL(ring) & TAIL_ADDR; |
111 | ring->space = ring->head - (ring->tail + 8); | 115 | ring->space = ring->head - (ring->tail + 8); |
112 | if (ring->space < 0) | 116 | if (ring->space < 0) |
113 | ring->space += ring->size; | 117 | ring->space += ring->size; |
@@ -123,6 +127,8 @@ void i915_kernel_lost_context(struct drm_device * dev) | |||
123 | static int i915_dma_cleanup(struct drm_device * dev) | 127 | static int i915_dma_cleanup(struct drm_device * dev) |
124 | { | 128 | { |
125 | drm_i915_private_t *dev_priv = dev->dev_private; | 129 | drm_i915_private_t *dev_priv = dev->dev_private; |
130 | int i; | ||
131 | |||
126 | /* Make sure interrupts are disabled here because the uninstall ioctl | 132 | /* Make sure interrupts are disabled here because the uninstall ioctl |
127 | * may not have been called from userspace and after dev_private | 133 | * may not have been called from userspace and after dev_private |
128 | * is freed, it's too late. | 134 | * is freed, it's too late. |
@@ -131,9 +137,8 @@ static int i915_dma_cleanup(struct drm_device * dev) | |||
131 | drm_irq_uninstall(dev); | 137 | drm_irq_uninstall(dev); |
132 | 138 | ||
133 | mutex_lock(&dev->struct_mutex); | 139 | mutex_lock(&dev->struct_mutex); |
134 | intel_cleanup_ring_buffer(dev, &dev_priv->render_ring); | 140 | for (i = 0; i < I915_NUM_RINGS; i++) |
135 | intel_cleanup_ring_buffer(dev, &dev_priv->bsd_ring); | 141 | intel_cleanup_ring_buffer(&dev_priv->ring[i]); |
136 | intel_cleanup_ring_buffer(dev, &dev_priv->blt_ring); | ||
137 | mutex_unlock(&dev->struct_mutex); | 142 | mutex_unlock(&dev->struct_mutex); |
138 | 143 | ||
139 | /* Clear the HWS virtual address at teardown */ | 144 | /* Clear the HWS virtual address at teardown */ |
@@ -147,6 +152,7 @@ static int i915_initialize(struct drm_device * dev, drm_i915_init_t * init) | |||
147 | { | 152 | { |
148 | drm_i915_private_t *dev_priv = dev->dev_private; | 153 | drm_i915_private_t *dev_priv = dev->dev_private; |
149 | struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; | 154 | struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; |
155 | int ret; | ||
150 | 156 | ||
151 | master_priv->sarea = drm_getsarea(dev); | 157 | master_priv->sarea = drm_getsarea(dev); |
152 | if (master_priv->sarea) { | 158 | if (master_priv->sarea) { |
@@ -157,33 +163,22 @@ static int i915_initialize(struct drm_device * dev, drm_i915_init_t * init) | |||
157 | } | 163 | } |
158 | 164 | ||
159 | if (init->ring_size != 0) { | 165 | if (init->ring_size != 0) { |
160 | if (dev_priv->render_ring.gem_object != NULL) { | 166 | if (LP_RING(dev_priv)->obj != NULL) { |
161 | i915_dma_cleanup(dev); | 167 | i915_dma_cleanup(dev); |
162 | DRM_ERROR("Client tried to initialize ringbuffer in " | 168 | DRM_ERROR("Client tried to initialize ringbuffer in " |
163 | "GEM mode\n"); | 169 | "GEM mode\n"); |
164 | return -EINVAL; | 170 | return -EINVAL; |
165 | } | 171 | } |
166 | 172 | ||
167 | dev_priv->render_ring.size = init->ring_size; | 173 | ret = intel_render_ring_init_dri(dev, |
168 | 174 | init->ring_start, | |
169 | dev_priv->render_ring.map.offset = init->ring_start; | 175 | init->ring_size); |
170 | dev_priv->render_ring.map.size = init->ring_size; | 176 | if (ret) { |
171 | dev_priv->render_ring.map.type = 0; | ||
172 | dev_priv->render_ring.map.flags = 0; | ||
173 | dev_priv->render_ring.map.mtrr = 0; | ||
174 | |||
175 | drm_core_ioremap_wc(&dev_priv->render_ring.map, dev); | ||
176 | |||
177 | if (dev_priv->render_ring.map.handle == NULL) { | ||
178 | i915_dma_cleanup(dev); | 177 | i915_dma_cleanup(dev); |
179 | DRM_ERROR("can not ioremap virtual address for" | 178 | return ret; |
180 | " ring buffer\n"); | ||
181 | return -ENOMEM; | ||
182 | } | 179 | } |
183 | } | 180 | } |
184 | 181 | ||
185 | dev_priv->render_ring.virtual_start = dev_priv->render_ring.map.handle; | ||
186 | |||
187 | dev_priv->cpp = init->cpp; | 182 | dev_priv->cpp = init->cpp; |
188 | dev_priv->back_offset = init->back_offset; | 183 | dev_priv->back_offset = init->back_offset; |
189 | dev_priv->front_offset = init->front_offset; | 184 | dev_priv->front_offset = init->front_offset; |
@@ -201,12 +196,10 @@ static int i915_initialize(struct drm_device * dev, drm_i915_init_t * init) | |||
201 | static int i915_dma_resume(struct drm_device * dev) | 196 | static int i915_dma_resume(struct drm_device * dev) |
202 | { | 197 | { |
203 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | 198 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; |
199 | struct intel_ring_buffer *ring = LP_RING(dev_priv); | ||
204 | 200 | ||
205 | struct intel_ring_buffer *ring; | ||
206 | DRM_DEBUG_DRIVER("%s\n", __func__); | 201 | DRM_DEBUG_DRIVER("%s\n", __func__); |
207 | 202 | ||
208 | ring = &dev_priv->render_ring; | ||
209 | |||
210 | if (ring->map.handle == NULL) { | 203 | if (ring->map.handle == NULL) { |
211 | DRM_ERROR("can not ioremap virtual address for" | 204 | DRM_ERROR("can not ioremap virtual address for" |
212 | " ring buffer\n"); | 205 | " ring buffer\n"); |
@@ -221,7 +214,7 @@ static int i915_dma_resume(struct drm_device * dev) | |||
221 | DRM_DEBUG_DRIVER("hw status page @ %p\n", | 214 | DRM_DEBUG_DRIVER("hw status page @ %p\n", |
222 | ring->status_page.page_addr); | 215 | ring->status_page.page_addr); |
223 | if (ring->status_page.gfx_addr != 0) | 216 | if (ring->status_page.gfx_addr != 0) |
224 | intel_ring_setup_status_page(dev, ring); | 217 | intel_ring_setup_status_page(ring); |
225 | else | 218 | else |
226 | I915_WRITE(HWS_PGA, dev_priv->dma_status_page); | 219 | I915_WRITE(HWS_PGA, dev_priv->dma_status_page); |
227 | 220 | ||
@@ -263,7 +256,7 @@ static int i915_dma_init(struct drm_device *dev, void *data, | |||
263 | * instruction detected will be given a size of zero, which is a | 256 | * instruction detected will be given a size of zero, which is a |
264 | * signal to abort the rest of the buffer. | 257 | * signal to abort the rest of the buffer. |
265 | */ | 258 | */ |
266 | static int do_validate_cmd(int cmd) | 259 | static int validate_cmd(int cmd) |
267 | { | 260 | { |
268 | switch (((cmd >> 29) & 0x7)) { | 261 | switch (((cmd >> 29) & 0x7)) { |
269 | case 0x0: | 262 | case 0x0: |
@@ -321,40 +314,27 @@ static int do_validate_cmd(int cmd) | |||
321 | return 0; | 314 | return 0; |
322 | } | 315 | } |
323 | 316 | ||
324 | static int validate_cmd(int cmd) | ||
325 | { | ||
326 | int ret = do_validate_cmd(cmd); | ||
327 | |||
328 | /* printk("validate_cmd( %x ): %d\n", cmd, ret); */ | ||
329 | |||
330 | return ret; | ||
331 | } | ||
332 | |||
333 | static int i915_emit_cmds(struct drm_device * dev, int *buffer, int dwords) | 317 | static int i915_emit_cmds(struct drm_device * dev, int *buffer, int dwords) |
334 | { | 318 | { |
335 | drm_i915_private_t *dev_priv = dev->dev_private; | 319 | drm_i915_private_t *dev_priv = dev->dev_private; |
336 | int i; | 320 | int i, ret; |
337 | 321 | ||
338 | if ((dwords+1) * sizeof(int) >= dev_priv->render_ring.size - 8) | 322 | if ((dwords+1) * sizeof(int) >= LP_RING(dev_priv)->size - 8) |
339 | return -EINVAL; | 323 | return -EINVAL; |
340 | 324 | ||
341 | BEGIN_LP_RING((dwords+1)&~1); | ||
342 | |||
343 | for (i = 0; i < dwords;) { | 325 | for (i = 0; i < dwords;) { |
344 | int cmd, sz; | 326 | int sz = validate_cmd(buffer[i]); |
345 | 327 | if (sz == 0 || i + sz > dwords) | |
346 | cmd = buffer[i]; | ||
347 | |||
348 | if ((sz = validate_cmd(cmd)) == 0 || i + sz > dwords) | ||
349 | return -EINVAL; | 328 | return -EINVAL; |
350 | 329 | i += sz; | |
351 | OUT_RING(cmd); | ||
352 | |||
353 | while (++i, --sz) { | ||
354 | OUT_RING(buffer[i]); | ||
355 | } | ||
356 | } | 330 | } |
357 | 331 | ||
332 | ret = BEGIN_LP_RING((dwords+1)&~1); | ||
333 | if (ret) | ||
334 | return ret; | ||
335 | |||
336 | for (i = 0; i < dwords; i++) | ||
337 | OUT_RING(buffer[i]); | ||
358 | if (dwords & 1) | 338 | if (dwords & 1) |
359 | OUT_RING(0); | 339 | OUT_RING(0); |
360 | 340 | ||
@@ -365,34 +345,41 @@ static int i915_emit_cmds(struct drm_device * dev, int *buffer, int dwords) | |||
365 | 345 | ||
366 | int | 346 | int |
367 | i915_emit_box(struct drm_device *dev, | 347 | i915_emit_box(struct drm_device *dev, |
368 | struct drm_clip_rect *boxes, | 348 | struct drm_clip_rect *box, |
369 | int i, int DR1, int DR4) | 349 | int DR1, int DR4) |
370 | { | 350 | { |
371 | struct drm_clip_rect box = boxes[i]; | 351 | struct drm_i915_private *dev_priv = dev->dev_private; |
352 | int ret; | ||
372 | 353 | ||
373 | if (box.y2 <= box.y1 || box.x2 <= box.x1 || box.y2 <= 0 || box.x2 <= 0) { | 354 | if (box->y2 <= box->y1 || box->x2 <= box->x1 || |
355 | box->y2 <= 0 || box->x2 <= 0) { | ||
374 | DRM_ERROR("Bad box %d,%d..%d,%d\n", | 356 | DRM_ERROR("Bad box %d,%d..%d,%d\n", |
375 | box.x1, box.y1, box.x2, box.y2); | 357 | box->x1, box->y1, box->x2, box->y2); |
376 | return -EINVAL; | 358 | return -EINVAL; |
377 | } | 359 | } |
378 | 360 | ||
379 | if (INTEL_INFO(dev)->gen >= 4) { | 361 | if (INTEL_INFO(dev)->gen >= 4) { |
380 | BEGIN_LP_RING(4); | 362 | ret = BEGIN_LP_RING(4); |
363 | if (ret) | ||
364 | return ret; | ||
365 | |||
381 | OUT_RING(GFX_OP_DRAWRECT_INFO_I965); | 366 | OUT_RING(GFX_OP_DRAWRECT_INFO_I965); |
382 | OUT_RING((box.x1 & 0xffff) | (box.y1 << 16)); | 367 | OUT_RING((box->x1 & 0xffff) | (box->y1 << 16)); |
383 | OUT_RING(((box.x2 - 1) & 0xffff) | ((box.y2 - 1) << 16)); | 368 | OUT_RING(((box->x2 - 1) & 0xffff) | ((box->y2 - 1) << 16)); |
384 | OUT_RING(DR4); | 369 | OUT_RING(DR4); |
385 | ADVANCE_LP_RING(); | ||
386 | } else { | 370 | } else { |
387 | BEGIN_LP_RING(6); | 371 | ret = BEGIN_LP_RING(6); |
372 | if (ret) | ||
373 | return ret; | ||
374 | |||
388 | OUT_RING(GFX_OP_DRAWRECT_INFO); | 375 | OUT_RING(GFX_OP_DRAWRECT_INFO); |
389 | OUT_RING(DR1); | 376 | OUT_RING(DR1); |
390 | OUT_RING((box.x1 & 0xffff) | (box.y1 << 16)); | 377 | OUT_RING((box->x1 & 0xffff) | (box->y1 << 16)); |
391 | OUT_RING(((box.x2 - 1) & 0xffff) | ((box.y2 - 1) << 16)); | 378 | OUT_RING(((box->x2 - 1) & 0xffff) | ((box->y2 - 1) << 16)); |
392 | OUT_RING(DR4); | 379 | OUT_RING(DR4); |
393 | OUT_RING(0); | 380 | OUT_RING(0); |
394 | ADVANCE_LP_RING(); | ||
395 | } | 381 | } |
382 | ADVANCE_LP_RING(); | ||
396 | 383 | ||
397 | return 0; | 384 | return 0; |
398 | } | 385 | } |
@@ -412,12 +399,13 @@ static void i915_emit_breadcrumb(struct drm_device *dev) | |||
412 | if (master_priv->sarea_priv) | 399 | if (master_priv->sarea_priv) |
413 | master_priv->sarea_priv->last_enqueue = dev_priv->counter; | 400 | master_priv->sarea_priv->last_enqueue = dev_priv->counter; |
414 | 401 | ||
415 | BEGIN_LP_RING(4); | 402 | if (BEGIN_LP_RING(4) == 0) { |
416 | OUT_RING(MI_STORE_DWORD_INDEX); | 403 | OUT_RING(MI_STORE_DWORD_INDEX); |
417 | OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT); | 404 | OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT); |
418 | OUT_RING(dev_priv->counter); | 405 | OUT_RING(dev_priv->counter); |
419 | OUT_RING(0); | 406 | OUT_RING(0); |
420 | ADVANCE_LP_RING(); | 407 | ADVANCE_LP_RING(); |
408 | } | ||
421 | } | 409 | } |
422 | 410 | ||
423 | static int i915_dispatch_cmdbuffer(struct drm_device * dev, | 411 | static int i915_dispatch_cmdbuffer(struct drm_device * dev, |
@@ -439,7 +427,7 @@ static int i915_dispatch_cmdbuffer(struct drm_device * dev, | |||
439 | 427 | ||
440 | for (i = 0; i < count; i++) { | 428 | for (i = 0; i < count; i++) { |
441 | if (i < nbox) { | 429 | if (i < nbox) { |
442 | ret = i915_emit_box(dev, cliprects, i, | 430 | ret = i915_emit_box(dev, &cliprects[i], |
443 | cmd->DR1, cmd->DR4); | 431 | cmd->DR1, cmd->DR4); |
444 | if (ret) | 432 | if (ret) |
445 | return ret; | 433 | return ret; |
@@ -458,8 +446,9 @@ static int i915_dispatch_batchbuffer(struct drm_device * dev, | |||
458 | drm_i915_batchbuffer_t * batch, | 446 | drm_i915_batchbuffer_t * batch, |
459 | struct drm_clip_rect *cliprects) | 447 | struct drm_clip_rect *cliprects) |
460 | { | 448 | { |
449 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
461 | int nbox = batch->num_cliprects; | 450 | int nbox = batch->num_cliprects; |
462 | int i = 0, count; | 451 | int i, count, ret; |
463 | 452 | ||
464 | if ((batch->start | batch->used) & 0x7) { | 453 | if ((batch->start | batch->used) & 0x7) { |
465 | DRM_ERROR("alignment"); | 454 | DRM_ERROR("alignment"); |
@@ -469,17 +458,19 @@ static int i915_dispatch_batchbuffer(struct drm_device * dev, | |||
469 | i915_kernel_lost_context(dev); | 458 | i915_kernel_lost_context(dev); |
470 | 459 | ||
471 | count = nbox ? nbox : 1; | 460 | count = nbox ? nbox : 1; |
472 | |||
473 | for (i = 0; i < count; i++) { | 461 | for (i = 0; i < count; i++) { |
474 | if (i < nbox) { | 462 | if (i < nbox) { |
475 | int ret = i915_emit_box(dev, cliprects, i, | 463 | ret = i915_emit_box(dev, &cliprects[i], |
476 | batch->DR1, batch->DR4); | 464 | batch->DR1, batch->DR4); |
477 | if (ret) | 465 | if (ret) |
478 | return ret; | 466 | return ret; |
479 | } | 467 | } |
480 | 468 | ||
481 | if (!IS_I830(dev) && !IS_845G(dev)) { | 469 | if (!IS_I830(dev) && !IS_845G(dev)) { |
482 | BEGIN_LP_RING(2); | 470 | ret = BEGIN_LP_RING(2); |
471 | if (ret) | ||
472 | return ret; | ||
473 | |||
483 | if (INTEL_INFO(dev)->gen >= 4) { | 474 | if (INTEL_INFO(dev)->gen >= 4) { |
484 | OUT_RING(MI_BATCH_BUFFER_START | (2 << 6) | MI_BATCH_NON_SECURE_I965); | 475 | OUT_RING(MI_BATCH_BUFFER_START | (2 << 6) | MI_BATCH_NON_SECURE_I965); |
485 | OUT_RING(batch->start); | 476 | OUT_RING(batch->start); |
@@ -487,26 +478,29 @@ static int i915_dispatch_batchbuffer(struct drm_device * dev, | |||
487 | OUT_RING(MI_BATCH_BUFFER_START | (2 << 6)); | 478 | OUT_RING(MI_BATCH_BUFFER_START | (2 << 6)); |
488 | OUT_RING(batch->start | MI_BATCH_NON_SECURE); | 479 | OUT_RING(batch->start | MI_BATCH_NON_SECURE); |
489 | } | 480 | } |
490 | ADVANCE_LP_RING(); | ||
491 | } else { | 481 | } else { |
492 | BEGIN_LP_RING(4); | 482 | ret = BEGIN_LP_RING(4); |
483 | if (ret) | ||
484 | return ret; | ||
485 | |||
493 | OUT_RING(MI_BATCH_BUFFER); | 486 | OUT_RING(MI_BATCH_BUFFER); |
494 | OUT_RING(batch->start | MI_BATCH_NON_SECURE); | 487 | OUT_RING(batch->start | MI_BATCH_NON_SECURE); |
495 | OUT_RING(batch->start + batch->used - 4); | 488 | OUT_RING(batch->start + batch->used - 4); |
496 | OUT_RING(0); | 489 | OUT_RING(0); |
497 | ADVANCE_LP_RING(); | ||
498 | } | 490 | } |
491 | ADVANCE_LP_RING(); | ||
499 | } | 492 | } |
500 | 493 | ||
501 | 494 | ||
502 | if (IS_G4X(dev) || IS_GEN5(dev)) { | 495 | if (IS_G4X(dev) || IS_GEN5(dev)) { |
503 | BEGIN_LP_RING(2); | 496 | if (BEGIN_LP_RING(2) == 0) { |
504 | OUT_RING(MI_FLUSH | MI_NO_WRITE_FLUSH | MI_INVALIDATE_ISP); | 497 | OUT_RING(MI_FLUSH | MI_NO_WRITE_FLUSH | MI_INVALIDATE_ISP); |
505 | OUT_RING(MI_NOOP); | 498 | OUT_RING(MI_NOOP); |
506 | ADVANCE_LP_RING(); | 499 | ADVANCE_LP_RING(); |
500 | } | ||
507 | } | 501 | } |
508 | i915_emit_breadcrumb(dev); | ||
509 | 502 | ||
503 | i915_emit_breadcrumb(dev); | ||
510 | return 0; | 504 | return 0; |
511 | } | 505 | } |
512 | 506 | ||
@@ -515,6 +509,7 @@ static int i915_dispatch_flip(struct drm_device * dev) | |||
515 | drm_i915_private_t *dev_priv = dev->dev_private; | 509 | drm_i915_private_t *dev_priv = dev->dev_private; |
516 | struct drm_i915_master_private *master_priv = | 510 | struct drm_i915_master_private *master_priv = |
517 | dev->primary->master->driver_priv; | 511 | dev->primary->master->driver_priv; |
512 | int ret; | ||
518 | 513 | ||
519 | if (!master_priv->sarea_priv) | 514 | if (!master_priv->sarea_priv) |
520 | return -EINVAL; | 515 | return -EINVAL; |
@@ -526,12 +521,13 @@ static int i915_dispatch_flip(struct drm_device * dev) | |||
526 | 521 | ||
527 | i915_kernel_lost_context(dev); | 522 | i915_kernel_lost_context(dev); |
528 | 523 | ||
529 | BEGIN_LP_RING(2); | 524 | ret = BEGIN_LP_RING(10); |
525 | if (ret) | ||
526 | return ret; | ||
527 | |||
530 | OUT_RING(MI_FLUSH | MI_READ_FLUSH); | 528 | OUT_RING(MI_FLUSH | MI_READ_FLUSH); |
531 | OUT_RING(0); | 529 | OUT_RING(0); |
532 | ADVANCE_LP_RING(); | ||
533 | 530 | ||
534 | BEGIN_LP_RING(6); | ||
535 | OUT_RING(CMD_OP_DISPLAYBUFFER_INFO | ASYNC_FLIP); | 531 | OUT_RING(CMD_OP_DISPLAYBUFFER_INFO | ASYNC_FLIP); |
536 | OUT_RING(0); | 532 | OUT_RING(0); |
537 | if (dev_priv->current_page == 0) { | 533 | if (dev_priv->current_page == 0) { |
@@ -542,33 +538,32 @@ static int i915_dispatch_flip(struct drm_device * dev) | |||
542 | dev_priv->current_page = 0; | 538 | dev_priv->current_page = 0; |
543 | } | 539 | } |
544 | OUT_RING(0); | 540 | OUT_RING(0); |
545 | ADVANCE_LP_RING(); | ||
546 | 541 | ||
547 | BEGIN_LP_RING(2); | ||
548 | OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_PLANE_A_FLIP); | 542 | OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_PLANE_A_FLIP); |
549 | OUT_RING(0); | 543 | OUT_RING(0); |
544 | |||
550 | ADVANCE_LP_RING(); | 545 | ADVANCE_LP_RING(); |
551 | 546 | ||
552 | master_priv->sarea_priv->last_enqueue = dev_priv->counter++; | 547 | master_priv->sarea_priv->last_enqueue = dev_priv->counter++; |
553 | 548 | ||
554 | BEGIN_LP_RING(4); | 549 | if (BEGIN_LP_RING(4) == 0) { |
555 | OUT_RING(MI_STORE_DWORD_INDEX); | 550 | OUT_RING(MI_STORE_DWORD_INDEX); |
556 | OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT); | 551 | OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT); |
557 | OUT_RING(dev_priv->counter); | 552 | OUT_RING(dev_priv->counter); |
558 | OUT_RING(0); | 553 | OUT_RING(0); |
559 | ADVANCE_LP_RING(); | 554 | ADVANCE_LP_RING(); |
555 | } | ||
560 | 556 | ||
561 | master_priv->sarea_priv->pf_current_page = dev_priv->current_page; | 557 | master_priv->sarea_priv->pf_current_page = dev_priv->current_page; |
562 | return 0; | 558 | return 0; |
563 | } | 559 | } |
564 | 560 | ||
565 | static int i915_quiescent(struct drm_device * dev) | 561 | static int i915_quiescent(struct drm_device *dev) |
566 | { | 562 | { |
567 | drm_i915_private_t *dev_priv = dev->dev_private; | 563 | struct intel_ring_buffer *ring = LP_RING(dev->dev_private); |
568 | 564 | ||
569 | i915_kernel_lost_context(dev); | 565 | i915_kernel_lost_context(dev); |
570 | return intel_wait_ring_buffer(dev, &dev_priv->render_ring, | 566 | return intel_wait_ring_buffer(ring, ring->size - 8); |
571 | dev_priv->render_ring.size - 8); | ||
572 | } | 567 | } |
573 | 568 | ||
574 | static int i915_flush_ioctl(struct drm_device *dev, void *data, | 569 | static int i915_flush_ioctl(struct drm_device *dev, void *data, |
@@ -767,6 +762,15 @@ static int i915_getparam(struct drm_device *dev, void *data, | |||
767 | case I915_PARAM_HAS_BLT: | 762 | case I915_PARAM_HAS_BLT: |
768 | value = HAS_BLT(dev); | 763 | value = HAS_BLT(dev); |
769 | break; | 764 | break; |
765 | case I915_PARAM_HAS_RELAXED_FENCING: | ||
766 | value = 1; | ||
767 | break; | ||
768 | case I915_PARAM_HAS_COHERENT_RINGS: | ||
769 | value = 1; | ||
770 | break; | ||
771 | case I915_PARAM_HAS_EXEC_CONSTANTS: | ||
772 | value = INTEL_INFO(dev)->gen >= 4; | ||
773 | break; | ||
770 | default: | 774 | default: |
771 | DRM_DEBUG_DRIVER("Unknown parameter %d\n", | 775 | DRM_DEBUG_DRIVER("Unknown parameter %d\n", |
772 | param->param); | 776 | param->param); |
@@ -822,7 +826,7 @@ static int i915_set_status_page(struct drm_device *dev, void *data, | |||
822 | { | 826 | { |
823 | drm_i915_private_t *dev_priv = dev->dev_private; | 827 | drm_i915_private_t *dev_priv = dev->dev_private; |
824 | drm_i915_hws_addr_t *hws = data; | 828 | drm_i915_hws_addr_t *hws = data; |
825 | struct intel_ring_buffer *ring = &dev_priv->render_ring; | 829 | struct intel_ring_buffer *ring = LP_RING(dev_priv); |
826 | 830 | ||
827 | if (!I915_NEED_GFX_HWS(dev)) | 831 | if (!I915_NEED_GFX_HWS(dev)) |
828 | return -EINVAL; | 832 | return -EINVAL; |
@@ -1001,73 +1005,47 @@ intel_teardown_mchbar(struct drm_device *dev) | |||
1001 | #define PTE_VALID (1 << 0) | 1005 | #define PTE_VALID (1 << 0) |
1002 | 1006 | ||
1003 | /** | 1007 | /** |
1004 | * i915_gtt_to_phys - take a GTT address and turn it into a physical one | 1008 | * i915_stolen_to_phys - take an offset into stolen memory and turn it into |
1009 | * a physical one | ||
1005 | * @dev: drm device | 1010 | * @dev: drm device |
1006 | * @gtt_addr: address to translate | 1011 | * @offset: address to translate |
1007 | * | 1012 | * |
1008 | * Some chip functions require allocations from stolen space but need the | 1013 | * Some chip functions require allocations from stolen space and need the |
1009 | * physical address of the memory in question. We use this routine | 1014 | * physical address of the memory in question. |
1010 | * to get a physical address suitable for register programming from a given | ||
1011 | * GTT address. | ||
1012 | */ | 1015 | */ |
1013 | static unsigned long i915_gtt_to_phys(struct drm_device *dev, | 1016 | static unsigned long i915_stolen_to_phys(struct drm_device *dev, u32 offset) |
1014 | unsigned long gtt_addr) | ||
1015 | { | 1017 | { |
1016 | unsigned long *gtt; | 1018 | struct drm_i915_private *dev_priv = dev->dev_private; |
1017 | unsigned long entry, phys; | 1019 | struct pci_dev *pdev = dev_priv->bridge_dev; |
1018 | int gtt_bar = IS_GEN2(dev) ? 1 : 0; | 1020 | u32 base; |
1019 | int gtt_offset, gtt_size; | 1021 | |
1020 | 1022 | #if 0 | |
1021 | if (INTEL_INFO(dev)->gen >= 4) { | 1023 | /* On the machines I have tested the Graphics Base of Stolen Memory |
1022 | if (IS_G4X(dev) || INTEL_INFO(dev)->gen > 4) { | 1024 | * is unreliable, so compute the base by subtracting the stolen memory |
1023 | gtt_offset = 2*1024*1024; | 1025 | * from the Top of Low Usable DRAM which is where the BIOS places |
1024 | gtt_size = 2*1024*1024; | 1026 | * the graphics stolen memory. |
1025 | } else { | 1027 | */ |
1026 | gtt_offset = 512*1024; | 1028 | if (INTEL_INFO(dev)->gen > 3 || IS_G33(dev)) { |
1027 | gtt_size = 512*1024; | 1029 | /* top 32bits are reserved = 0 */ |
1028 | } | 1030 | pci_read_config_dword(pdev, 0xA4, &base); |
1029 | } else { | 1031 | } else { |
1030 | gtt_bar = 3; | 1032 | /* XXX presume 8xx is the same as i915 */ |
1031 | gtt_offset = 0; | 1033 | pci_bus_read_config_dword(pdev->bus, 2, 0x5C, &base); |
1032 | gtt_size = pci_resource_len(dev->pdev, gtt_bar); | 1034 | } |
1033 | } | 1035 | #else |
1034 | 1036 | if (INTEL_INFO(dev)->gen > 3 || IS_G33(dev)) { | |
1035 | gtt = ioremap_wc(pci_resource_start(dev->pdev, gtt_bar) + gtt_offset, | 1037 | u16 val; |
1036 | gtt_size); | 1038 | pci_read_config_word(pdev, 0xb0, &val); |
1037 | if (!gtt) { | 1039 | base = val >> 4 << 20; |
1038 | DRM_ERROR("ioremap of GTT failed\n"); | 1040 | } else { |
1039 | return 0; | 1041 | u8 val; |
1040 | } | 1042 | pci_read_config_byte(pdev, 0x9c, &val); |
1041 | 1043 | base = val >> 3 << 27; | |
1042 | entry = *(volatile u32 *)(gtt + (gtt_addr / 1024)); | ||
1043 | |||
1044 | DRM_DEBUG_DRIVER("GTT addr: 0x%08lx, PTE: 0x%08lx\n", gtt_addr, entry); | ||
1045 | |||
1046 | /* Mask out these reserved bits on this hardware. */ | ||
1047 | if (INTEL_INFO(dev)->gen < 4 && !IS_G33(dev)) | ||
1048 | entry &= ~PTE_ADDRESS_MASK_HIGH; | ||
1049 | |||
1050 | /* If it's not a mapping type we know, then bail. */ | ||
1051 | if ((entry & PTE_MAPPING_TYPE_MASK) != PTE_MAPPING_TYPE_UNCACHED && | ||
1052 | (entry & PTE_MAPPING_TYPE_MASK) != PTE_MAPPING_TYPE_CACHED) { | ||
1053 | iounmap(gtt); | ||
1054 | return 0; | ||
1055 | } | ||
1056 | |||
1057 | if (!(entry & PTE_VALID)) { | ||
1058 | DRM_ERROR("bad GTT entry in stolen space\n"); | ||
1059 | iounmap(gtt); | ||
1060 | return 0; | ||
1061 | } | 1044 | } |
1045 | base -= dev_priv->mm.gtt->stolen_size; | ||
1046 | #endif | ||
1062 | 1047 | ||
1063 | iounmap(gtt); | 1048 | return base + offset; |
1064 | |||
1065 | phys =(entry & PTE_ADDRESS_MASK) | | ||
1066 | ((uint64_t)(entry & PTE_ADDRESS_MASK_HIGH) << (32 - 4)); | ||
1067 | |||
1068 | DRM_DEBUG_DRIVER("GTT addr: 0x%08lx, phys addr: 0x%08lx\n", gtt_addr, phys); | ||
1069 | |||
1070 | return phys; | ||
1071 | } | 1049 | } |
1072 | 1050 | ||
1073 | static void i915_warn_stolen(struct drm_device *dev) | 1051 | static void i915_warn_stolen(struct drm_device *dev) |
@@ -1083,54 +1061,35 @@ static void i915_setup_compression(struct drm_device *dev, int size) | |||
1083 | unsigned long cfb_base; | 1061 | unsigned long cfb_base; |
1084 | unsigned long ll_base = 0; | 1062 | unsigned long ll_base = 0; |
1085 | 1063 | ||
1086 | /* Leave 1M for line length buffer & misc. */ | 1064 | compressed_fb = drm_mm_search_free(&dev_priv->mm.stolen, size, 4096, 0); |
1087 | compressed_fb = drm_mm_search_free(&dev_priv->mm.vram, size, 4096, 0); | 1065 | if (compressed_fb) |
1088 | if (!compressed_fb) { | 1066 | compressed_fb = drm_mm_get_block(compressed_fb, size, 4096); |
1089 | dev_priv->no_fbc_reason = FBC_STOLEN_TOO_SMALL; | 1067 | if (!compressed_fb) |
1090 | i915_warn_stolen(dev); | 1068 | goto err; |
1091 | return; | ||
1092 | } | ||
1093 | |||
1094 | compressed_fb = drm_mm_get_block(compressed_fb, size, 4096); | ||
1095 | if (!compressed_fb) { | ||
1096 | i915_warn_stolen(dev); | ||
1097 | dev_priv->no_fbc_reason = FBC_STOLEN_TOO_SMALL; | ||
1098 | return; | ||
1099 | } | ||
1100 | |||
1101 | cfb_base = i915_gtt_to_phys(dev, compressed_fb->start); | ||
1102 | if (!cfb_base) { | ||
1103 | DRM_ERROR("failed to get stolen phys addr, disabling FBC\n"); | ||
1104 | drm_mm_put_block(compressed_fb); | ||
1105 | } | ||
1106 | 1069 | ||
1107 | if (!(IS_GM45(dev) || IS_IRONLAKE_M(dev))) { | 1070 | cfb_base = i915_stolen_to_phys(dev, compressed_fb->start); |
1108 | compressed_llb = drm_mm_search_free(&dev_priv->mm.vram, 4096, | 1071 | if (!cfb_base) |
1109 | 4096, 0); | 1072 | goto err_fb; |
1110 | if (!compressed_llb) { | ||
1111 | i915_warn_stolen(dev); | ||
1112 | return; | ||
1113 | } | ||
1114 | 1073 | ||
1115 | compressed_llb = drm_mm_get_block(compressed_llb, 4096, 4096); | 1074 | if (!(IS_GM45(dev) || HAS_PCH_SPLIT(dev))) { |
1116 | if (!compressed_llb) { | 1075 | compressed_llb = drm_mm_search_free(&dev_priv->mm.stolen, |
1117 | i915_warn_stolen(dev); | 1076 | 4096, 4096, 0); |
1118 | return; | 1077 | if (compressed_llb) |
1119 | } | 1078 | compressed_llb = drm_mm_get_block(compressed_llb, |
1079 | 4096, 4096); | ||
1080 | if (!compressed_llb) | ||
1081 | goto err_fb; | ||
1120 | 1082 | ||
1121 | ll_base = i915_gtt_to_phys(dev, compressed_llb->start); | 1083 | ll_base = i915_stolen_to_phys(dev, compressed_llb->start); |
1122 | if (!ll_base) { | 1084 | if (!ll_base) |
1123 | DRM_ERROR("failed to get stolen phys addr, disabling FBC\n"); | 1085 | goto err_llb; |
1124 | drm_mm_put_block(compressed_fb); | ||
1125 | drm_mm_put_block(compressed_llb); | ||
1126 | } | ||
1127 | } | 1086 | } |
1128 | 1087 | ||
1129 | dev_priv->cfb_size = size; | 1088 | dev_priv->cfb_size = size; |
1130 | 1089 | ||
1131 | intel_disable_fbc(dev); | 1090 | intel_disable_fbc(dev); |
1132 | dev_priv->compressed_fb = compressed_fb; | 1091 | dev_priv->compressed_fb = compressed_fb; |
1133 | if (IS_IRONLAKE_M(dev)) | 1092 | if (HAS_PCH_SPLIT(dev)) |
1134 | I915_WRITE(ILK_DPFC_CB_BASE, compressed_fb->start); | 1093 | I915_WRITE(ILK_DPFC_CB_BASE, compressed_fb->start); |
1135 | else if (IS_GM45(dev)) { | 1094 | else if (IS_GM45(dev)) { |
1136 | I915_WRITE(DPFC_CB_BASE, compressed_fb->start); | 1095 | I915_WRITE(DPFC_CB_BASE, compressed_fb->start); |
@@ -1140,8 +1099,17 @@ static void i915_setup_compression(struct drm_device *dev, int size) | |||
1140 | dev_priv->compressed_llb = compressed_llb; | 1099 | dev_priv->compressed_llb = compressed_llb; |
1141 | } | 1100 | } |
1142 | 1101 | ||
1143 | DRM_DEBUG_KMS("FBC base 0x%08lx, ll base 0x%08lx, size %dM\n", cfb_base, | 1102 | DRM_DEBUG_KMS("FBC base 0x%08lx, ll base 0x%08lx, size %dM\n", |
1144 | ll_base, size >> 20); | 1103 | cfb_base, ll_base, size >> 20); |
1104 | return; | ||
1105 | |||
1106 | err_llb: | ||
1107 | drm_mm_put_block(compressed_llb); | ||
1108 | err_fb: | ||
1109 | drm_mm_put_block(compressed_fb); | ||
1110 | err: | ||
1111 | dev_priv->no_fbc_reason = FBC_STOLEN_TOO_SMALL; | ||
1112 | i915_warn_stolen(dev); | ||
1145 | } | 1113 | } |
1146 | 1114 | ||
1147 | static void i915_cleanup_compression(struct drm_device *dev) | 1115 | static void i915_cleanup_compression(struct drm_device *dev) |
@@ -1172,12 +1140,16 @@ static void i915_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_ | |||
1172 | pm_message_t pmm = { .event = PM_EVENT_SUSPEND }; | 1140 | pm_message_t pmm = { .event = PM_EVENT_SUSPEND }; |
1173 | if (state == VGA_SWITCHEROO_ON) { | 1141 | if (state == VGA_SWITCHEROO_ON) { |
1174 | printk(KERN_INFO "i915: switched on\n"); | 1142 | printk(KERN_INFO "i915: switched on\n"); |
1143 | dev->switch_power_state = DRM_SWITCH_POWER_CHANGING; | ||
1175 | /* i915 resume handler doesn't set to D0 */ | 1144 | /* i915 resume handler doesn't set to D0 */ |
1176 | pci_set_power_state(dev->pdev, PCI_D0); | 1145 | pci_set_power_state(dev->pdev, PCI_D0); |
1177 | i915_resume(dev); | 1146 | i915_resume(dev); |
1147 | dev->switch_power_state = DRM_SWITCH_POWER_ON; | ||
1178 | } else { | 1148 | } else { |
1179 | printk(KERN_ERR "i915: switched off\n"); | 1149 | printk(KERN_ERR "i915: switched off\n"); |
1150 | dev->switch_power_state = DRM_SWITCH_POWER_CHANGING; | ||
1180 | i915_suspend(dev, pmm); | 1151 | i915_suspend(dev, pmm); |
1152 | dev->switch_power_state = DRM_SWITCH_POWER_OFF; | ||
1181 | } | 1153 | } |
1182 | } | 1154 | } |
1183 | 1155 | ||
@@ -1192,17 +1164,20 @@ static bool i915_switcheroo_can_switch(struct pci_dev *pdev) | |||
1192 | return can_switch; | 1164 | return can_switch; |
1193 | } | 1165 | } |
1194 | 1166 | ||
1195 | static int i915_load_modeset_init(struct drm_device *dev, | 1167 | static int i915_load_modeset_init(struct drm_device *dev) |
1196 | unsigned long prealloc_size, | ||
1197 | unsigned long agp_size) | ||
1198 | { | 1168 | { |
1199 | struct drm_i915_private *dev_priv = dev->dev_private; | 1169 | struct drm_i915_private *dev_priv = dev->dev_private; |
1170 | unsigned long prealloc_size, gtt_size, mappable_size; | ||
1200 | int ret = 0; | 1171 | int ret = 0; |
1201 | 1172 | ||
1202 | /* Basic memrange allocator for stolen space (aka mm.vram) */ | 1173 | prealloc_size = dev_priv->mm.gtt->stolen_size; |
1203 | drm_mm_init(&dev_priv->mm.vram, 0, prealloc_size); | 1174 | gtt_size = dev_priv->mm.gtt->gtt_total_entries << PAGE_SHIFT; |
1175 | mappable_size = dev_priv->mm.gtt->gtt_mappable_entries << PAGE_SHIFT; | ||
1176 | |||
1177 | /* Basic memrange allocator for stolen space */ | ||
1178 | drm_mm_init(&dev_priv->mm.stolen, 0, prealloc_size); | ||
1204 | 1179 | ||
1205 | /* Let GEM Manage from end of prealloc space to end of aperture. | 1180 | /* Let GEM Manage all of the aperture. |
1206 | * | 1181 | * |
1207 | * However, leave one page at the end still bound to the scratch page. | 1182 | * However, leave one page at the end still bound to the scratch page. |
1208 | * There are a number of places where the hardware apparently | 1183 | * There are a number of places where the hardware apparently |
@@ -1211,7 +1186,7 @@ static int i915_load_modeset_init(struct drm_device *dev, | |||
1211 | * at the last page of the aperture. One page should be enough to | 1186 | * at the last page of the aperture. One page should be enough to |
1212 | * keep any prefetching inside of the aperture. | 1187 | * keep any prefetching inside of the aperture. |
1213 | */ | 1188 | */ |
1214 | i915_gem_do_init(dev, prealloc_size, agp_size - 4096); | 1189 | i915_gem_do_init(dev, 0, mappable_size, gtt_size - PAGE_SIZE); |
1215 | 1190 | ||
1216 | mutex_lock(&dev->struct_mutex); | 1191 | mutex_lock(&dev->struct_mutex); |
1217 | ret = i915_gem_init_ringbuffer(dev); | 1192 | ret = i915_gem_init_ringbuffer(dev); |
@@ -1223,31 +1198,39 @@ static int i915_load_modeset_init(struct drm_device *dev, | |||
1223 | if (I915_HAS_FBC(dev) && i915_powersave) { | 1198 | if (I915_HAS_FBC(dev) && i915_powersave) { |
1224 | int cfb_size; | 1199 | int cfb_size; |
1225 | 1200 | ||
1226 | /* Try to get an 8M buffer... */ | 1201 | /* Leave 1M for line length buffer & misc. */ |
1227 | if (prealloc_size > (9*1024*1024)) | 1202 | |
1228 | cfb_size = 8*1024*1024; | 1203 | /* Try to get a 32M buffer... */ |
1204 | if (prealloc_size > (36*1024*1024)) | ||
1205 | cfb_size = 32*1024*1024; | ||
1229 | else /* fall back to 7/8 of the stolen space */ | 1206 | else /* fall back to 7/8 of the stolen space */ |
1230 | cfb_size = prealloc_size * 7 / 8; | 1207 | cfb_size = prealloc_size * 7 / 8; |
1231 | i915_setup_compression(dev, cfb_size); | 1208 | i915_setup_compression(dev, cfb_size); |
1232 | } | 1209 | } |
1233 | 1210 | ||
1234 | /* Allow hardware batchbuffers unless told otherwise. | 1211 | /* Allow hardware batchbuffers unless told otherwise. */ |
1235 | */ | ||
1236 | dev_priv->allow_batchbuffer = 1; | 1212 | dev_priv->allow_batchbuffer = 1; |
1237 | 1213 | ||
1238 | ret = intel_parse_bios(dev); | 1214 | ret = intel_parse_bios(dev); |
1239 | if (ret) | 1215 | if (ret) |
1240 | DRM_INFO("failed to find VBIOS tables\n"); | 1216 | DRM_INFO("failed to find VBIOS tables\n"); |
1241 | 1217 | ||
1242 | /* if we have > 1 VGA cards, then disable the radeon VGA resources */ | 1218 | /* If we have > 1 VGA cards, then we need to arbitrate access |
1219 | * to the common VGA resources. | ||
1220 | * | ||
1221 | * If we are a secondary display controller (!PCI_DISPLAY_CLASS_VGA), | ||
1222 | * then we do not take part in VGA arbitration and the | ||
1223 | * vga_client_register() fails with -ENODEV. | ||
1224 | */ | ||
1243 | ret = vga_client_register(dev->pdev, dev, NULL, i915_vga_set_decode); | 1225 | ret = vga_client_register(dev->pdev, dev, NULL, i915_vga_set_decode); |
1244 | if (ret) | 1226 | if (ret && ret != -ENODEV) |
1245 | goto cleanup_ringbuffer; | 1227 | goto cleanup_ringbuffer; |
1246 | 1228 | ||
1247 | intel_register_dsm_handler(); | 1229 | intel_register_dsm_handler(); |
1248 | 1230 | ||
1249 | ret = vga_switcheroo_register_client(dev->pdev, | 1231 | ret = vga_switcheroo_register_client(dev->pdev, |
1250 | i915_switcheroo_set_state, | 1232 | i915_switcheroo_set_state, |
1233 | NULL, | ||
1251 | i915_switcheroo_can_switch); | 1234 | i915_switcheroo_can_switch); |
1252 | if (ret) | 1235 | if (ret) |
1253 | goto cleanup_vga_client; | 1236 | goto cleanup_vga_client; |
@@ -1422,152 +1405,12 @@ static void i915_ironlake_get_mem_freq(struct drm_device *dev) | |||
1422 | } | 1405 | } |
1423 | } | 1406 | } |
1424 | 1407 | ||
1425 | struct v_table { | 1408 | static const struct cparams { |
1426 | u8 vid; | 1409 | u16 i; |
1427 | unsigned long vd; /* in .1 mil */ | 1410 | u16 t; |
1428 | unsigned long vm; /* in .1 mil */ | 1411 | u16 m; |
1429 | u8 pvid; | 1412 | u16 c; |
1430 | }; | 1413 | } cparams[] = { |
1431 | |||
1432 | static struct v_table v_table[] = { | ||
1433 | { 0, 16125, 15000, 0x7f, }, | ||
1434 | { 1, 16000, 14875, 0x7e, }, | ||
1435 | { 2, 15875, 14750, 0x7d, }, | ||
1436 | { 3, 15750, 14625, 0x7c, }, | ||
1437 | { 4, 15625, 14500, 0x7b, }, | ||
1438 | { 5, 15500, 14375, 0x7a, }, | ||
1439 | { 6, 15375, 14250, 0x79, }, | ||
1440 | { 7, 15250, 14125, 0x78, }, | ||
1441 | { 8, 15125, 14000, 0x77, }, | ||
1442 | { 9, 15000, 13875, 0x76, }, | ||
1443 | { 10, 14875, 13750, 0x75, }, | ||
1444 | { 11, 14750, 13625, 0x74, }, | ||
1445 | { 12, 14625, 13500, 0x73, }, | ||
1446 | { 13, 14500, 13375, 0x72, }, | ||
1447 | { 14, 14375, 13250, 0x71, }, | ||
1448 | { 15, 14250, 13125, 0x70, }, | ||
1449 | { 16, 14125, 13000, 0x6f, }, | ||
1450 | { 17, 14000, 12875, 0x6e, }, | ||
1451 | { 18, 13875, 12750, 0x6d, }, | ||
1452 | { 19, 13750, 12625, 0x6c, }, | ||
1453 | { 20, 13625, 12500, 0x6b, }, | ||
1454 | { 21, 13500, 12375, 0x6a, }, | ||
1455 | { 22, 13375, 12250, 0x69, }, | ||
1456 | { 23, 13250, 12125, 0x68, }, | ||
1457 | { 24, 13125, 12000, 0x67, }, | ||
1458 | { 25, 13000, 11875, 0x66, }, | ||
1459 | { 26, 12875, 11750, 0x65, }, | ||
1460 | { 27, 12750, 11625, 0x64, }, | ||
1461 | { 28, 12625, 11500, 0x63, }, | ||
1462 | { 29, 12500, 11375, 0x62, }, | ||
1463 | { 30, 12375, 11250, 0x61, }, | ||
1464 | { 31, 12250, 11125, 0x60, }, | ||
1465 | { 32, 12125, 11000, 0x5f, }, | ||
1466 | { 33, 12000, 10875, 0x5e, }, | ||
1467 | { 34, 11875, 10750, 0x5d, }, | ||
1468 | { 35, 11750, 10625, 0x5c, }, | ||
1469 | { 36, 11625, 10500, 0x5b, }, | ||
1470 | { 37, 11500, 10375, 0x5a, }, | ||
1471 | { 38, 11375, 10250, 0x59, }, | ||
1472 | { 39, 11250, 10125, 0x58, }, | ||
1473 | { 40, 11125, 10000, 0x57, }, | ||
1474 | { 41, 11000, 9875, 0x56, }, | ||
1475 | { 42, 10875, 9750, 0x55, }, | ||
1476 | { 43, 10750, 9625, 0x54, }, | ||
1477 | { 44, 10625, 9500, 0x53, }, | ||
1478 | { 45, 10500, 9375, 0x52, }, | ||
1479 | { 46, 10375, 9250, 0x51, }, | ||
1480 | { 47, 10250, 9125, 0x50, }, | ||
1481 | { 48, 10125, 9000, 0x4f, }, | ||
1482 | { 49, 10000, 8875, 0x4e, }, | ||
1483 | { 50, 9875, 8750, 0x4d, }, | ||
1484 | { 51, 9750, 8625, 0x4c, }, | ||
1485 | { 52, 9625, 8500, 0x4b, }, | ||
1486 | { 53, 9500, 8375, 0x4a, }, | ||
1487 | { 54, 9375, 8250, 0x49, }, | ||
1488 | { 55, 9250, 8125, 0x48, }, | ||
1489 | { 56, 9125, 8000, 0x47, }, | ||
1490 | { 57, 9000, 7875, 0x46, }, | ||
1491 | { 58, 8875, 7750, 0x45, }, | ||
1492 | { 59, 8750, 7625, 0x44, }, | ||
1493 | { 60, 8625, 7500, 0x43, }, | ||
1494 | { 61, 8500, 7375, 0x42, }, | ||
1495 | { 62, 8375, 7250, 0x41, }, | ||
1496 | { 63, 8250, 7125, 0x40, }, | ||
1497 | { 64, 8125, 7000, 0x3f, }, | ||
1498 | { 65, 8000, 6875, 0x3e, }, | ||
1499 | { 66, 7875, 6750, 0x3d, }, | ||
1500 | { 67, 7750, 6625, 0x3c, }, | ||
1501 | { 68, 7625, 6500, 0x3b, }, | ||
1502 | { 69, 7500, 6375, 0x3a, }, | ||
1503 | { 70, 7375, 6250, 0x39, }, | ||
1504 | { 71, 7250, 6125, 0x38, }, | ||
1505 | { 72, 7125, 6000, 0x37, }, | ||
1506 | { 73, 7000, 5875, 0x36, }, | ||
1507 | { 74, 6875, 5750, 0x35, }, | ||
1508 | { 75, 6750, 5625, 0x34, }, | ||
1509 | { 76, 6625, 5500, 0x33, }, | ||
1510 | { 77, 6500, 5375, 0x32, }, | ||
1511 | { 78, 6375, 5250, 0x31, }, | ||
1512 | { 79, 6250, 5125, 0x30, }, | ||
1513 | { 80, 6125, 5000, 0x2f, }, | ||
1514 | { 81, 6000, 4875, 0x2e, }, | ||
1515 | { 82, 5875, 4750, 0x2d, }, | ||
1516 | { 83, 5750, 4625, 0x2c, }, | ||
1517 | { 84, 5625, 4500, 0x2b, }, | ||
1518 | { 85, 5500, 4375, 0x2a, }, | ||
1519 | { 86, 5375, 4250, 0x29, }, | ||
1520 | { 87, 5250, 4125, 0x28, }, | ||
1521 | { 88, 5125, 4000, 0x27, }, | ||
1522 | { 89, 5000, 3875, 0x26, }, | ||
1523 | { 90, 4875, 3750, 0x25, }, | ||
1524 | { 91, 4750, 3625, 0x24, }, | ||
1525 | { 92, 4625, 3500, 0x23, }, | ||
1526 | { 93, 4500, 3375, 0x22, }, | ||
1527 | { 94, 4375, 3250, 0x21, }, | ||
1528 | { 95, 4250, 3125, 0x20, }, | ||
1529 | { 96, 4125, 3000, 0x1f, }, | ||
1530 | { 97, 4125, 3000, 0x1e, }, | ||
1531 | { 98, 4125, 3000, 0x1d, }, | ||
1532 | { 99, 4125, 3000, 0x1c, }, | ||
1533 | { 100, 4125, 3000, 0x1b, }, | ||
1534 | { 101, 4125, 3000, 0x1a, }, | ||
1535 | { 102, 4125, 3000, 0x19, }, | ||
1536 | { 103, 4125, 3000, 0x18, }, | ||
1537 | { 104, 4125, 3000, 0x17, }, | ||
1538 | { 105, 4125, 3000, 0x16, }, | ||
1539 | { 106, 4125, 3000, 0x15, }, | ||
1540 | { 107, 4125, 3000, 0x14, }, | ||
1541 | { 108, 4125, 3000, 0x13, }, | ||
1542 | { 109, 4125, 3000, 0x12, }, | ||
1543 | { 110, 4125, 3000, 0x11, }, | ||
1544 | { 111, 4125, 3000, 0x10, }, | ||
1545 | { 112, 4125, 3000, 0x0f, }, | ||
1546 | { 113, 4125, 3000, 0x0e, }, | ||
1547 | { 114, 4125, 3000, 0x0d, }, | ||
1548 | { 115, 4125, 3000, 0x0c, }, | ||
1549 | { 116, 4125, 3000, 0x0b, }, | ||
1550 | { 117, 4125, 3000, 0x0a, }, | ||
1551 | { 118, 4125, 3000, 0x09, }, | ||
1552 | { 119, 4125, 3000, 0x08, }, | ||
1553 | { 120, 1125, 0, 0x07, }, | ||
1554 | { 121, 1000, 0, 0x06, }, | ||
1555 | { 122, 875, 0, 0x05, }, | ||
1556 | { 123, 750, 0, 0x04, }, | ||
1557 | { 124, 625, 0, 0x03, }, | ||
1558 | { 125, 500, 0, 0x02, }, | ||
1559 | { 126, 375, 0, 0x01, }, | ||
1560 | { 127, 0, 0, 0x00, }, | ||
1561 | }; | ||
1562 | |||
1563 | struct cparams { | ||
1564 | int i; | ||
1565 | int t; | ||
1566 | int m; | ||
1567 | int c; | ||
1568 | }; | ||
1569 | |||
1570 | static struct cparams cparams[] = { | ||
1571 | { 1, 1333, 301, 28664 }, | 1414 | { 1, 1333, 301, 28664 }, |
1572 | { 1, 1066, 294, 24460 }, | 1415 | { 1, 1066, 294, 24460 }, |
1573 | { 1, 800, 294, 25192 }, | 1416 | { 1, 800, 294, 25192 }, |
@@ -1633,21 +1476,145 @@ unsigned long i915_mch_val(struct drm_i915_private *dev_priv) | |||
1633 | return ((m * x) / 127) - b; | 1476 | return ((m * x) / 127) - b; |
1634 | } | 1477 | } |
1635 | 1478 | ||
1636 | static unsigned long pvid_to_extvid(struct drm_i915_private *dev_priv, u8 pxvid) | 1479 | static u16 pvid_to_extvid(struct drm_i915_private *dev_priv, u8 pxvid) |
1637 | { | 1480 | { |
1638 | unsigned long val = 0; | 1481 | static const struct v_table { |
1639 | int i; | 1482 | u16 vd; /* in .1 mil */ |
1640 | 1483 | u16 vm; /* in .1 mil */ | |
1641 | for (i = 0; i < ARRAY_SIZE(v_table); i++) { | 1484 | } v_table[] = { |
1642 | if (v_table[i].pvid == pxvid) { | 1485 | { 0, 0, }, |
1643 | if (IS_MOBILE(dev_priv->dev)) | 1486 | { 375, 0, }, |
1644 | val = v_table[i].vm; | 1487 | { 500, 0, }, |
1645 | else | 1488 | { 625, 0, }, |
1646 | val = v_table[i].vd; | 1489 | { 750, 0, }, |
1647 | } | 1490 | { 875, 0, }, |
1648 | } | 1491 | { 1000, 0, }, |
1649 | 1492 | { 1125, 0, }, | |
1650 | return val; | 1493 | { 4125, 3000, }, |
1494 | { 4125, 3000, }, | ||
1495 | { 4125, 3000, }, | ||
1496 | { 4125, 3000, }, | ||
1497 | { 4125, 3000, }, | ||
1498 | { 4125, 3000, }, | ||
1499 | { 4125, 3000, }, | ||
1500 | { 4125, 3000, }, | ||
1501 | { 4125, 3000, }, | ||
1502 | { 4125, 3000, }, | ||
1503 | { 4125, 3000, }, | ||
1504 | { 4125, 3000, }, | ||
1505 | { 4125, 3000, }, | ||
1506 | { 4125, 3000, }, | ||
1507 | { 4125, 3000, }, | ||
1508 | { 4125, 3000, }, | ||
1509 | { 4125, 3000, }, | ||
1510 | { 4125, 3000, }, | ||
1511 | { 4125, 3000, }, | ||
1512 | { 4125, 3000, }, | ||
1513 | { 4125, 3000, }, | ||
1514 | { 4125, 3000, }, | ||
1515 | { 4125, 3000, }, | ||
1516 | { 4125, 3000, }, | ||
1517 | { 4250, 3125, }, | ||
1518 | { 4375, 3250, }, | ||
1519 | { 4500, 3375, }, | ||
1520 | { 4625, 3500, }, | ||
1521 | { 4750, 3625, }, | ||
1522 | { 4875, 3750, }, | ||
1523 | { 5000, 3875, }, | ||
1524 | { 5125, 4000, }, | ||
1525 | { 5250, 4125, }, | ||
1526 | { 5375, 4250, }, | ||
1527 | { 5500, 4375, }, | ||
1528 | { 5625, 4500, }, | ||
1529 | { 5750, 4625, }, | ||
1530 | { 5875, 4750, }, | ||
1531 | { 6000, 4875, }, | ||
1532 | { 6125, 5000, }, | ||
1533 | { 6250, 5125, }, | ||
1534 | { 6375, 5250, }, | ||
1535 | { 6500, 5375, }, | ||
1536 | { 6625, 5500, }, | ||
1537 | { 6750, 5625, }, | ||
1538 | { 6875, 5750, }, | ||
1539 | { 7000, 5875, }, | ||
1540 | { 7125, 6000, }, | ||
1541 | { 7250, 6125, }, | ||
1542 | { 7375, 6250, }, | ||
1543 | { 7500, 6375, }, | ||
1544 | { 7625, 6500, }, | ||
1545 | { 7750, 6625, }, | ||
1546 | { 7875, 6750, }, | ||
1547 | { 8000, 6875, }, | ||
1548 | { 8125, 7000, }, | ||
1549 | { 8250, 7125, }, | ||
1550 | { 8375, 7250, }, | ||
1551 | { 8500, 7375, }, | ||
1552 | { 8625, 7500, }, | ||
1553 | { 8750, 7625, }, | ||
1554 | { 8875, 7750, }, | ||
1555 | { 9000, 7875, }, | ||
1556 | { 9125, 8000, }, | ||
1557 | { 9250, 8125, }, | ||
1558 | { 9375, 8250, }, | ||
1559 | { 9500, 8375, }, | ||
1560 | { 9625, 8500, }, | ||
1561 | { 9750, 8625, }, | ||
1562 | { 9875, 8750, }, | ||
1563 | { 10000, 8875, }, | ||
1564 | { 10125, 9000, }, | ||
1565 | { 10250, 9125, }, | ||
1566 | { 10375, 9250, }, | ||
1567 | { 10500, 9375, }, | ||
1568 | { 10625, 9500, }, | ||
1569 | { 10750, 9625, }, | ||
1570 | { 10875, 9750, }, | ||
1571 | { 11000, 9875, }, | ||
1572 | { 11125, 10000, }, | ||
1573 | { 11250, 10125, }, | ||
1574 | { 11375, 10250, }, | ||
1575 | { 11500, 10375, }, | ||
1576 | { 11625, 10500, }, | ||
1577 | { 11750, 10625, }, | ||
1578 | { 11875, 10750, }, | ||
1579 | { 12000, 10875, }, | ||
1580 | { 12125, 11000, }, | ||
1581 | { 12250, 11125, }, | ||
1582 | { 12375, 11250, }, | ||
1583 | { 12500, 11375, }, | ||
1584 | { 12625, 11500, }, | ||
1585 | { 12750, 11625, }, | ||
1586 | { 12875, 11750, }, | ||
1587 | { 13000, 11875, }, | ||
1588 | { 13125, 12000, }, | ||
1589 | { 13250, 12125, }, | ||
1590 | { 13375, 12250, }, | ||
1591 | { 13500, 12375, }, | ||
1592 | { 13625, 12500, }, | ||
1593 | { 13750, 12625, }, | ||
1594 | { 13875, 12750, }, | ||
1595 | { 14000, 12875, }, | ||
1596 | { 14125, 13000, }, | ||
1597 | { 14250, 13125, }, | ||
1598 | { 14375, 13250, }, | ||
1599 | { 14500, 13375, }, | ||
1600 | { 14625, 13500, }, | ||
1601 | { 14750, 13625, }, | ||
1602 | { 14875, 13750, }, | ||
1603 | { 15000, 13875, }, | ||
1604 | { 15125, 14000, }, | ||
1605 | { 15250, 14125, }, | ||
1606 | { 15375, 14250, }, | ||
1607 | { 15500, 14375, }, | ||
1608 | { 15625, 14500, }, | ||
1609 | { 15750, 14625, }, | ||
1610 | { 15875, 14750, }, | ||
1611 | { 16000, 14875, }, | ||
1612 | { 16125, 15000, }, | ||
1613 | }; | ||
1614 | if (dev_priv->info->is_mobile) | ||
1615 | return v_table[pxvid].vm; | ||
1616 | else | ||
1617 | return v_table[pxvid].vd; | ||
1651 | } | 1618 | } |
1652 | 1619 | ||
1653 | void i915_update_gfx_val(struct drm_i915_private *dev_priv) | 1620 | void i915_update_gfx_val(struct drm_i915_private *dev_priv) |
@@ -1868,6 +1835,26 @@ out_unlock: | |||
1868 | EXPORT_SYMBOL_GPL(i915_gpu_turbo_disable); | 1835 | EXPORT_SYMBOL_GPL(i915_gpu_turbo_disable); |
1869 | 1836 | ||
1870 | /** | 1837 | /** |
1838 | * Tells the intel_ips driver that the i915 driver is now loaded, if | ||
1839 | * IPS got loaded first. | ||
1840 | * | ||
1841 | * This awkward dance is so that neither module has to depend on the | ||
1842 | * other in order for IPS to do the appropriate communication of | ||
1843 | * GPU turbo limits to i915. | ||
1844 | */ | ||
1845 | static void | ||
1846 | ips_ping_for_i915_load(void) | ||
1847 | { | ||
1848 | void (*link)(void); | ||
1849 | |||
1850 | link = symbol_get(ips_link_to_i915_driver); | ||
1851 | if (link) { | ||
1852 | link(); | ||
1853 | symbol_put(ips_link_to_i915_driver); | ||
1854 | } | ||
1855 | } | ||
1856 | |||
1857 | /** | ||
1871 | * i915_driver_load - setup chip and create an initial config | 1858 | * i915_driver_load - setup chip and create an initial config |
1872 | * @dev: DRM device | 1859 | * @dev: DRM device |
1873 | * @flags: startup flags | 1860 | * @flags: startup flags |
@@ -1881,9 +1868,9 @@ EXPORT_SYMBOL_GPL(i915_gpu_turbo_disable); | |||
1881 | int i915_driver_load(struct drm_device *dev, unsigned long flags) | 1868 | int i915_driver_load(struct drm_device *dev, unsigned long flags) |
1882 | { | 1869 | { |
1883 | struct drm_i915_private *dev_priv; | 1870 | struct drm_i915_private *dev_priv; |
1884 | resource_size_t base, size; | ||
1885 | int ret = 0, mmio_bar; | 1871 | int ret = 0, mmio_bar; |
1886 | uint32_t agp_size, prealloc_size; | 1872 | uint32_t agp_size; |
1873 | |||
1887 | /* i915 has 4 more counters */ | 1874 | /* i915 has 4 more counters */ |
1888 | dev->counters += 4; | 1875 | dev->counters += 4; |
1889 | dev->types[6] = _DRM_STAT_IRQ; | 1876 | dev->types[6] = _DRM_STAT_IRQ; |
@@ -1899,11 +1886,6 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) | |||
1899 | dev_priv->dev = dev; | 1886 | dev_priv->dev = dev; |
1900 | dev_priv->info = (struct intel_device_info *) flags; | 1887 | dev_priv->info = (struct intel_device_info *) flags; |
1901 | 1888 | ||
1902 | /* Add register map (needed for suspend/resume) */ | ||
1903 | mmio_bar = IS_GEN2(dev) ? 1 : 0; | ||
1904 | base = pci_resource_start(dev->pdev, mmio_bar); | ||
1905 | size = pci_resource_len(dev->pdev, mmio_bar); | ||
1906 | |||
1907 | if (i915_get_bridge_dev(dev)) { | 1889 | if (i915_get_bridge_dev(dev)) { |
1908 | ret = -EIO; | 1890 | ret = -EIO; |
1909 | goto free_priv; | 1891 | goto free_priv; |
@@ -1913,16 +1895,25 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) | |||
1913 | if (IS_GEN2(dev)) | 1895 | if (IS_GEN2(dev)) |
1914 | dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(30)); | 1896 | dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(30)); |
1915 | 1897 | ||
1916 | dev_priv->regs = ioremap(base, size); | 1898 | mmio_bar = IS_GEN2(dev) ? 1 : 0; |
1899 | dev_priv->regs = pci_iomap(dev->pdev, mmio_bar, 0); | ||
1917 | if (!dev_priv->regs) { | 1900 | if (!dev_priv->regs) { |
1918 | DRM_ERROR("failed to map registers\n"); | 1901 | DRM_ERROR("failed to map registers\n"); |
1919 | ret = -EIO; | 1902 | ret = -EIO; |
1920 | goto put_bridge; | 1903 | goto put_bridge; |
1921 | } | 1904 | } |
1922 | 1905 | ||
1906 | dev_priv->mm.gtt = intel_gtt_get(); | ||
1907 | if (!dev_priv->mm.gtt) { | ||
1908 | DRM_ERROR("Failed to initialize GTT\n"); | ||
1909 | ret = -ENODEV; | ||
1910 | goto out_iomapfree; | ||
1911 | } | ||
1912 | |||
1913 | agp_size = dev_priv->mm.gtt->gtt_mappable_entries << PAGE_SHIFT; | ||
1914 | |||
1923 | dev_priv->mm.gtt_mapping = | 1915 | dev_priv->mm.gtt_mapping = |
1924 | io_mapping_create_wc(dev->agp->base, | 1916 | io_mapping_create_wc(dev->agp->base, agp_size); |
1925 | dev->agp->agp_info.aper_size * 1024*1024); | ||
1926 | if (dev_priv->mm.gtt_mapping == NULL) { | 1917 | if (dev_priv->mm.gtt_mapping == NULL) { |
1927 | ret = -EIO; | 1918 | ret = -EIO; |
1928 | goto out_rmmap; | 1919 | goto out_rmmap; |
@@ -1934,24 +1925,13 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) | |||
1934 | * MTRR if present. Even if a UC MTRR isn't present. | 1925 | * MTRR if present. Even if a UC MTRR isn't present. |
1935 | */ | 1926 | */ |
1936 | dev_priv->mm.gtt_mtrr = mtrr_add(dev->agp->base, | 1927 | dev_priv->mm.gtt_mtrr = mtrr_add(dev->agp->base, |
1937 | dev->agp->agp_info.aper_size * | 1928 | agp_size, |
1938 | 1024 * 1024, | ||
1939 | MTRR_TYPE_WRCOMB, 1); | 1929 | MTRR_TYPE_WRCOMB, 1); |
1940 | if (dev_priv->mm.gtt_mtrr < 0) { | 1930 | if (dev_priv->mm.gtt_mtrr < 0) { |
1941 | DRM_INFO("MTRR allocation failed. Graphics " | 1931 | DRM_INFO("MTRR allocation failed. Graphics " |
1942 | "performance may suffer.\n"); | 1932 | "performance may suffer.\n"); |
1943 | } | 1933 | } |
1944 | 1934 | ||
1945 | dev_priv->mm.gtt = intel_gtt_get(); | ||
1946 | if (!dev_priv->mm.gtt) { | ||
1947 | DRM_ERROR("Failed to initialize GTT\n"); | ||
1948 | ret = -ENODEV; | ||
1949 | goto out_iomapfree; | ||
1950 | } | ||
1951 | |||
1952 | prealloc_size = dev_priv->mm.gtt->gtt_stolen_entries << PAGE_SHIFT; | ||
1953 | agp_size = dev_priv->mm.gtt->gtt_mappable_entries << PAGE_SHIFT; | ||
1954 | |||
1955 | /* The i915 workqueue is primarily used for batched retirement of | 1935 | /* The i915 workqueue is primarily used for batched retirement of |
1956 | * requests (and thus managing bo) once the task has been completed | 1936 | * requests (and thus managing bo) once the task has been completed |
1957 | * by the GPU. i915_gem_retire_requests() is called directly when we | 1937 | * by the GPU. i915_gem_retire_requests() is called directly when we |
@@ -1959,7 +1939,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) | |||
1959 | * bo. | 1939 | * bo. |
1960 | * | 1940 | * |
1961 | * It is also used for periodic low-priority events, such as | 1941 | * It is also used for periodic low-priority events, such as |
1962 | * idle-timers and hangcheck. | 1942 | * idle-timers and recording error state. |
1963 | * | 1943 | * |
1964 | * All tasks on the workqueue are expected to acquire the dev mutex | 1944 | * All tasks on the workqueue are expected to acquire the dev mutex |
1965 | * so there is no point in running more than one instance of the | 1945 | * so there is no point in running more than one instance of the |
@@ -1977,22 +1957,6 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) | |||
1977 | /* enable GEM by default */ | 1957 | /* enable GEM by default */ |
1978 | dev_priv->has_gem = 1; | 1958 | dev_priv->has_gem = 1; |
1979 | 1959 | ||
1980 | if (prealloc_size > agp_size * 3 / 4) { | ||
1981 | DRM_ERROR("Detected broken video BIOS with %d/%dkB of video " | ||
1982 | "memory stolen.\n", | ||
1983 | prealloc_size / 1024, agp_size / 1024); | ||
1984 | DRM_ERROR("Disabling GEM. (try reducing stolen memory or " | ||
1985 | "updating the BIOS to fix).\n"); | ||
1986 | dev_priv->has_gem = 0; | ||
1987 | } | ||
1988 | |||
1989 | if (dev_priv->has_gem == 0 && | ||
1990 | drm_core_check_feature(dev, DRIVER_MODESET)) { | ||
1991 | DRM_ERROR("kernel modesetting requires GEM, disabling driver.\n"); | ||
1992 | ret = -ENODEV; | ||
1993 | goto out_iomapfree; | ||
1994 | } | ||
1995 | |||
1996 | dev->driver->get_vblank_counter = i915_get_vblank_counter; | 1960 | dev->driver->get_vblank_counter = i915_get_vblank_counter; |
1997 | dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */ | 1961 | dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */ |
1998 | if (IS_G4X(dev) || IS_GEN5(dev) || IS_GEN6(dev)) { | 1962 | if (IS_G4X(dev) || IS_GEN5(dev) || IS_GEN6(dev)) { |
@@ -2013,8 +1977,8 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) | |||
2013 | /* Init HWS */ | 1977 | /* Init HWS */ |
2014 | if (!I915_NEED_GFX_HWS(dev)) { | 1978 | if (!I915_NEED_GFX_HWS(dev)) { |
2015 | ret = i915_init_phys_hws(dev); | 1979 | ret = i915_init_phys_hws(dev); |
2016 | if (ret != 0) | 1980 | if (ret) |
2017 | goto out_workqueue_free; | 1981 | goto out_gem_unload; |
2018 | } | 1982 | } |
2019 | 1983 | ||
2020 | if (IS_PINEVIEW(dev)) | 1984 | if (IS_PINEVIEW(dev)) |
@@ -2036,16 +2000,13 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) | |||
2036 | if (!IS_I945G(dev) && !IS_I945GM(dev)) | 2000 | if (!IS_I945G(dev) && !IS_I945GM(dev)) |
2037 | pci_enable_msi(dev->pdev); | 2001 | pci_enable_msi(dev->pdev); |
2038 | 2002 | ||
2039 | spin_lock_init(&dev_priv->user_irq_lock); | 2003 | spin_lock_init(&dev_priv->irq_lock); |
2040 | spin_lock_init(&dev_priv->error_lock); | 2004 | spin_lock_init(&dev_priv->error_lock); |
2041 | dev_priv->trace_irq_seqno = 0; | 2005 | dev_priv->trace_irq_seqno = 0; |
2042 | 2006 | ||
2043 | ret = drm_vblank_init(dev, I915_NUM_PIPE); | 2007 | ret = drm_vblank_init(dev, I915_NUM_PIPE); |
2044 | 2008 | if (ret) | |
2045 | if (ret) { | 2009 | goto out_gem_unload; |
2046 | (void) i915_driver_unload(dev); | ||
2047 | return ret; | ||
2048 | } | ||
2049 | 2010 | ||
2050 | /* Start out suspended */ | 2011 | /* Start out suspended */ |
2051 | dev_priv->mm.suspended = 1; | 2012 | dev_priv->mm.suspended = 1; |
@@ -2053,10 +2014,10 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) | |||
2053 | intel_detect_pch(dev); | 2014 | intel_detect_pch(dev); |
2054 | 2015 | ||
2055 | if (drm_core_check_feature(dev, DRIVER_MODESET)) { | 2016 | if (drm_core_check_feature(dev, DRIVER_MODESET)) { |
2056 | ret = i915_load_modeset_init(dev, prealloc_size, agp_size); | 2017 | ret = i915_load_modeset_init(dev); |
2057 | if (ret < 0) { | 2018 | if (ret < 0) { |
2058 | DRM_ERROR("failed to init modeset\n"); | 2019 | DRM_ERROR("failed to init modeset\n"); |
2059 | goto out_workqueue_free; | 2020 | goto out_gem_unload; |
2060 | } | 2021 | } |
2061 | } | 2022 | } |
2062 | 2023 | ||
@@ -2072,14 +2033,21 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) | |||
2072 | dev_priv->mchdev_lock = &mchdev_lock; | 2033 | dev_priv->mchdev_lock = &mchdev_lock; |
2073 | spin_unlock(&mchdev_lock); | 2034 | spin_unlock(&mchdev_lock); |
2074 | 2035 | ||
2036 | ips_ping_for_i915_load(); | ||
2037 | |||
2075 | return 0; | 2038 | return 0; |
2076 | 2039 | ||
2077 | out_workqueue_free: | 2040 | out_gem_unload: |
2041 | if (dev->pdev->msi_enabled) | ||
2042 | pci_disable_msi(dev->pdev); | ||
2043 | |||
2044 | intel_teardown_gmbus(dev); | ||
2045 | intel_teardown_mchbar(dev); | ||
2078 | destroy_workqueue(dev_priv->wq); | 2046 | destroy_workqueue(dev_priv->wq); |
2079 | out_iomapfree: | 2047 | out_iomapfree: |
2080 | io_mapping_free(dev_priv->mm.gtt_mapping); | 2048 | io_mapping_free(dev_priv->mm.gtt_mapping); |
2081 | out_rmmap: | 2049 | out_rmmap: |
2082 | iounmap(dev_priv->regs); | 2050 | pci_iounmap(dev->pdev, dev_priv->regs); |
2083 | put_bridge: | 2051 | put_bridge: |
2084 | pci_dev_put(dev_priv->bridge_dev); | 2052 | pci_dev_put(dev_priv->bridge_dev); |
2085 | free_priv: | 2053 | free_priv: |
@@ -2096,6 +2064,9 @@ int i915_driver_unload(struct drm_device *dev) | |||
2096 | i915_mch_dev = NULL; | 2064 | i915_mch_dev = NULL; |
2097 | spin_unlock(&mchdev_lock); | 2065 | spin_unlock(&mchdev_lock); |
2098 | 2066 | ||
2067 | if (dev_priv->mm.inactive_shrinker.shrink) | ||
2068 | unregister_shrinker(&dev_priv->mm.inactive_shrinker); | ||
2069 | |||
2099 | mutex_lock(&dev->struct_mutex); | 2070 | mutex_lock(&dev->struct_mutex); |
2100 | ret = i915_gpu_idle(dev); | 2071 | ret = i915_gpu_idle(dev); |
2101 | if (ret) | 2072 | if (ret) |
@@ -2153,7 +2124,7 @@ int i915_driver_unload(struct drm_device *dev) | |||
2153 | mutex_unlock(&dev->struct_mutex); | 2124 | mutex_unlock(&dev->struct_mutex); |
2154 | if (I915_HAS_FBC(dev) && i915_powersave) | 2125 | if (I915_HAS_FBC(dev) && i915_powersave) |
2155 | i915_cleanup_compression(dev); | 2126 | i915_cleanup_compression(dev); |
2156 | drm_mm_takedown(&dev_priv->mm.vram); | 2127 | drm_mm_takedown(&dev_priv->mm.stolen); |
2157 | 2128 | ||
2158 | intel_cleanup_overlay(dev); | 2129 | intel_cleanup_overlay(dev); |
2159 | 2130 | ||
@@ -2162,7 +2133,7 @@ int i915_driver_unload(struct drm_device *dev) | |||
2162 | } | 2133 | } |
2163 | 2134 | ||
2164 | if (dev_priv->regs != NULL) | 2135 | if (dev_priv->regs != NULL) |
2165 | iounmap(dev_priv->regs); | 2136 | pci_iounmap(dev->pdev, dev_priv->regs); |
2166 | 2137 | ||
2167 | intel_teardown_gmbus(dev); | 2138 | intel_teardown_gmbus(dev); |
2168 | intel_teardown_mchbar(dev); | 2139 | intel_teardown_mchbar(dev); |
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index f737960712e6..9ad42d583493 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c | |||
@@ -49,12 +49,18 @@ module_param_named(powersave, i915_powersave, int, 0600); | |||
49 | unsigned int i915_lvds_downclock = 0; | 49 | unsigned int i915_lvds_downclock = 0; |
50 | module_param_named(lvds_downclock, i915_lvds_downclock, int, 0400); | 50 | module_param_named(lvds_downclock, i915_lvds_downclock, int, 0400); |
51 | 51 | ||
52 | unsigned int i915_panel_use_ssc = 1; | ||
53 | module_param_named(lvds_use_ssc, i915_panel_use_ssc, int, 0600); | ||
54 | |||
55 | bool i915_try_reset = true; | ||
56 | module_param_named(reset, i915_try_reset, bool, 0600); | ||
57 | |||
52 | static struct drm_driver driver; | 58 | static struct drm_driver driver; |
53 | extern int intel_agp_enabled; | 59 | extern int intel_agp_enabled; |
54 | 60 | ||
55 | #define INTEL_VGA_DEVICE(id, info) { \ | 61 | #define INTEL_VGA_DEVICE(id, info) { \ |
56 | .class = PCI_CLASS_DISPLAY_VGA << 8, \ | 62 | .class = PCI_CLASS_DISPLAY_VGA << 8, \ |
57 | .class_mask = 0xffff00, \ | 63 | .class_mask = 0xff0000, \ |
58 | .vendor = 0x8086, \ | 64 | .vendor = 0x8086, \ |
59 | .device = id, \ | 65 | .device = id, \ |
60 | .subvendor = PCI_ANY_ID, \ | 66 | .subvendor = PCI_ANY_ID, \ |
@@ -111,7 +117,7 @@ static const struct intel_device_info intel_i965g_info = { | |||
111 | 117 | ||
112 | static const struct intel_device_info intel_i965gm_info = { | 118 | static const struct intel_device_info intel_i965gm_info = { |
113 | .gen = 4, .is_crestline = 1, | 119 | .gen = 4, .is_crestline = 1, |
114 | .is_mobile = 1, .has_fbc = 1, .has_rc6 = 1, .has_hotplug = 1, | 120 | .is_mobile = 1, .has_fbc = 1, .has_hotplug = 1, |
115 | .has_overlay = 1, | 121 | .has_overlay = 1, |
116 | .supports_tv = 1, | 122 | .supports_tv = 1, |
117 | }; | 123 | }; |
@@ -130,7 +136,7 @@ static const struct intel_device_info intel_g45_info = { | |||
130 | 136 | ||
131 | static const struct intel_device_info intel_gm45_info = { | 137 | static const struct intel_device_info intel_gm45_info = { |
132 | .gen = 4, .is_g4x = 1, | 138 | .gen = 4, .is_g4x = 1, |
133 | .is_mobile = 1, .need_gfx_hws = 1, .has_fbc = 1, .has_rc6 = 1, | 139 | .is_mobile = 1, .need_gfx_hws = 1, .has_fbc = 1, |
134 | .has_pipe_cxsr = 1, .has_hotplug = 1, | 140 | .has_pipe_cxsr = 1, .has_hotplug = 1, |
135 | .supports_tv = 1, | 141 | .supports_tv = 1, |
136 | .has_bsd_ring = 1, | 142 | .has_bsd_ring = 1, |
@@ -150,7 +156,7 @@ static const struct intel_device_info intel_ironlake_d_info = { | |||
150 | 156 | ||
151 | static const struct intel_device_info intel_ironlake_m_info = { | 157 | static const struct intel_device_info intel_ironlake_m_info = { |
152 | .gen = 5, .is_mobile = 1, | 158 | .gen = 5, .is_mobile = 1, |
153 | .need_gfx_hws = 1, .has_rc6 = 1, .has_hotplug = 1, | 159 | .need_gfx_hws = 1, .has_hotplug = 1, |
154 | .has_fbc = 0, /* disabled due to buggy hardware */ | 160 | .has_fbc = 0, /* disabled due to buggy hardware */ |
155 | .has_bsd_ring = 1, | 161 | .has_bsd_ring = 1, |
156 | }; | 162 | }; |
@@ -165,6 +171,7 @@ static const struct intel_device_info intel_sandybridge_d_info = { | |||
165 | static const struct intel_device_info intel_sandybridge_m_info = { | 171 | static const struct intel_device_info intel_sandybridge_m_info = { |
166 | .gen = 6, .is_mobile = 1, | 172 | .gen = 6, .is_mobile = 1, |
167 | .need_gfx_hws = 1, .has_hotplug = 1, | 173 | .need_gfx_hws = 1, .has_hotplug = 1, |
174 | .has_fbc = 1, | ||
168 | .has_bsd_ring = 1, | 175 | .has_bsd_ring = 1, |
169 | .has_blt_ring = 1, | 176 | .has_blt_ring = 1, |
170 | }; | 177 | }; |
@@ -244,10 +251,34 @@ void intel_detect_pch (struct drm_device *dev) | |||
244 | } | 251 | } |
245 | } | 252 | } |
246 | 253 | ||
254 | void __gen6_force_wake_get(struct drm_i915_private *dev_priv) | ||
255 | { | ||
256 | int count; | ||
257 | |||
258 | count = 0; | ||
259 | while (count++ < 50 && (I915_READ_NOTRACE(FORCEWAKE_ACK) & 1)) | ||
260 | udelay(10); | ||
261 | |||
262 | I915_WRITE_NOTRACE(FORCEWAKE, 1); | ||
263 | POSTING_READ(FORCEWAKE); | ||
264 | |||
265 | count = 0; | ||
266 | while (count++ < 50 && (I915_READ_NOTRACE(FORCEWAKE_ACK) & 1) == 0) | ||
267 | udelay(10); | ||
268 | } | ||
269 | |||
270 | void __gen6_force_wake_put(struct drm_i915_private *dev_priv) | ||
271 | { | ||
272 | I915_WRITE_NOTRACE(FORCEWAKE, 0); | ||
273 | POSTING_READ(FORCEWAKE); | ||
274 | } | ||
275 | |||
247 | static int i915_drm_freeze(struct drm_device *dev) | 276 | static int i915_drm_freeze(struct drm_device *dev) |
248 | { | 277 | { |
249 | struct drm_i915_private *dev_priv = dev->dev_private; | 278 | struct drm_i915_private *dev_priv = dev->dev_private; |
250 | 279 | ||
280 | drm_kms_helper_poll_disable(dev); | ||
281 | |||
251 | pci_save_state(dev->pdev); | 282 | pci_save_state(dev->pdev); |
252 | 283 | ||
253 | /* If KMS is active, we do the leavevt stuff here */ | 284 | /* If KMS is active, we do the leavevt stuff here */ |
@@ -284,7 +315,9 @@ int i915_suspend(struct drm_device *dev, pm_message_t state) | |||
284 | if (state.event == PM_EVENT_PRETHAW) | 315 | if (state.event == PM_EVENT_PRETHAW) |
285 | return 0; | 316 | return 0; |
286 | 317 | ||
287 | drm_kms_helper_poll_disable(dev); | 318 | |
319 | if (dev->switch_power_state == DRM_SWITCH_POWER_OFF) | ||
320 | return 0; | ||
288 | 321 | ||
289 | error = i915_drm_freeze(dev); | 322 | error = i915_drm_freeze(dev); |
290 | if (error) | 323 | if (error) |
@@ -304,6 +337,12 @@ static int i915_drm_thaw(struct drm_device *dev) | |||
304 | struct drm_i915_private *dev_priv = dev->dev_private; | 337 | struct drm_i915_private *dev_priv = dev->dev_private; |
305 | int error = 0; | 338 | int error = 0; |
306 | 339 | ||
340 | if (drm_core_check_feature(dev, DRIVER_MODESET)) { | ||
341 | mutex_lock(&dev->struct_mutex); | ||
342 | i915_gem_restore_gtt_mappings(dev); | ||
343 | mutex_unlock(&dev->struct_mutex); | ||
344 | } | ||
345 | |||
307 | i915_restore_state(dev); | 346 | i915_restore_state(dev); |
308 | intel_opregion_setup(dev); | 347 | intel_opregion_setup(dev); |
309 | 348 | ||
@@ -315,10 +354,14 @@ static int i915_drm_thaw(struct drm_device *dev) | |||
315 | error = i915_gem_init_ringbuffer(dev); | 354 | error = i915_gem_init_ringbuffer(dev); |
316 | mutex_unlock(&dev->struct_mutex); | 355 | mutex_unlock(&dev->struct_mutex); |
317 | 356 | ||
357 | drm_mode_config_reset(dev); | ||
318 | drm_irq_install(dev); | 358 | drm_irq_install(dev); |
319 | 359 | ||
320 | /* Resume the modeset for every activated CRTC */ | 360 | /* Resume the modeset for every activated CRTC */ |
321 | drm_helper_resume_force_mode(dev); | 361 | drm_helper_resume_force_mode(dev); |
362 | |||
363 | if (dev_priv->renderctx && dev_priv->pwrctx) | ||
364 | ironlake_enable_rc6(dev); | ||
322 | } | 365 | } |
323 | 366 | ||
324 | intel_opregion_init(dev); | 367 | intel_opregion_init(dev); |
@@ -332,6 +375,9 @@ int i915_resume(struct drm_device *dev) | |||
332 | { | 375 | { |
333 | int ret; | 376 | int ret; |
334 | 377 | ||
378 | if (dev->switch_power_state == DRM_SWITCH_POWER_OFF) | ||
379 | return 0; | ||
380 | |||
335 | if (pci_enable_device(dev->pdev)) | 381 | if (pci_enable_device(dev->pdev)) |
336 | return -EIO; | 382 | return -EIO; |
337 | 383 | ||
@@ -405,6 +451,14 @@ static int ironlake_do_reset(struct drm_device *dev, u8 flags) | |||
405 | return wait_for(I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR) & 0x1, 500); | 451 | return wait_for(I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR) & 0x1, 500); |
406 | } | 452 | } |
407 | 453 | ||
454 | static int gen6_do_reset(struct drm_device *dev, u8 flags) | ||
455 | { | ||
456 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
457 | |||
458 | I915_WRITE(GEN6_GDRST, GEN6_GRDOM_FULL); | ||
459 | return wait_for((I915_READ(GEN6_GDRST) & GEN6_GRDOM_FULL) == 0, 500); | ||
460 | } | ||
461 | |||
408 | /** | 462 | /** |
409 | * i965_reset - reset chip after a hang | 463 | * i965_reset - reset chip after a hang |
410 | * @dev: drm device to reset | 464 | * @dev: drm device to reset |
@@ -431,7 +485,11 @@ int i915_reset(struct drm_device *dev, u8 flags) | |||
431 | bool need_display = true; | 485 | bool need_display = true; |
432 | int ret; | 486 | int ret; |
433 | 487 | ||
434 | mutex_lock(&dev->struct_mutex); | 488 | if (!i915_try_reset) |
489 | return 0; | ||
490 | |||
491 | if (!mutex_trylock(&dev->struct_mutex)) | ||
492 | return -EBUSY; | ||
435 | 493 | ||
436 | i915_gem_reset(dev); | 494 | i915_gem_reset(dev); |
437 | 495 | ||
@@ -439,6 +497,9 @@ int i915_reset(struct drm_device *dev, u8 flags) | |||
439 | if (get_seconds() - dev_priv->last_gpu_reset < 5) { | 497 | if (get_seconds() - dev_priv->last_gpu_reset < 5) { |
440 | DRM_ERROR("GPU hanging too fast, declaring wedged!\n"); | 498 | DRM_ERROR("GPU hanging too fast, declaring wedged!\n"); |
441 | } else switch (INTEL_INFO(dev)->gen) { | 499 | } else switch (INTEL_INFO(dev)->gen) { |
500 | case 6: | ||
501 | ret = gen6_do_reset(dev, flags); | ||
502 | break; | ||
442 | case 5: | 503 | case 5: |
443 | ret = ironlake_do_reset(dev, flags); | 504 | ret = ironlake_do_reset(dev, flags); |
444 | break; | 505 | break; |
@@ -472,11 +533,17 @@ int i915_reset(struct drm_device *dev, u8 flags) | |||
472 | */ | 533 | */ |
473 | if (drm_core_check_feature(dev, DRIVER_MODESET) || | 534 | if (drm_core_check_feature(dev, DRIVER_MODESET) || |
474 | !dev_priv->mm.suspended) { | 535 | !dev_priv->mm.suspended) { |
475 | struct intel_ring_buffer *ring = &dev_priv->render_ring; | ||
476 | dev_priv->mm.suspended = 0; | 536 | dev_priv->mm.suspended = 0; |
477 | ring->init(dev, ring); | 537 | |
538 | dev_priv->ring[RCS].init(&dev_priv->ring[RCS]); | ||
539 | if (HAS_BSD(dev)) | ||
540 | dev_priv->ring[VCS].init(&dev_priv->ring[VCS]); | ||
541 | if (HAS_BLT(dev)) | ||
542 | dev_priv->ring[BCS].init(&dev_priv->ring[BCS]); | ||
543 | |||
478 | mutex_unlock(&dev->struct_mutex); | 544 | mutex_unlock(&dev->struct_mutex); |
479 | drm_irq_uninstall(dev); | 545 | drm_irq_uninstall(dev); |
546 | drm_mode_config_reset(dev); | ||
480 | drm_irq_install(dev); | 547 | drm_irq_install(dev); |
481 | mutex_lock(&dev->struct_mutex); | 548 | mutex_lock(&dev->struct_mutex); |
482 | } | 549 | } |
@@ -501,6 +568,14 @@ int i915_reset(struct drm_device *dev, u8 flags) | |||
501 | static int __devinit | 568 | static int __devinit |
502 | i915_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | 569 | i915_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) |
503 | { | 570 | { |
571 | /* Only bind to function 0 of the device. Early generations | ||
572 | * used function 1 as a placeholder for multi-head. This causes | ||
573 | * us confusion instead, especially on the systems where both | ||
574 | * functions have the same PCI-ID! | ||
575 | */ | ||
576 | if (PCI_FUNC(pdev->devfn)) | ||
577 | return -ENODEV; | ||
578 | |||
504 | return drm_get_pci_dev(pdev, ent, &driver); | 579 | return drm_get_pci_dev(pdev, ent, &driver); |
505 | } | 580 | } |
506 | 581 | ||
@@ -523,6 +598,9 @@ static int i915_pm_suspend(struct device *dev) | |||
523 | return -ENODEV; | 598 | return -ENODEV; |
524 | } | 599 | } |
525 | 600 | ||
601 | if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF) | ||
602 | return 0; | ||
603 | |||
526 | error = i915_drm_freeze(drm_dev); | 604 | error = i915_drm_freeze(drm_dev); |
527 | if (error) | 605 | if (error) |
528 | return error; | 606 | return error; |
@@ -606,6 +684,8 @@ static struct drm_driver driver = { | |||
606 | .device_is_agp = i915_driver_device_is_agp, | 684 | .device_is_agp = i915_driver_device_is_agp, |
607 | .enable_vblank = i915_enable_vblank, | 685 | .enable_vblank = i915_enable_vblank, |
608 | .disable_vblank = i915_disable_vblank, | 686 | .disable_vblank = i915_disable_vblank, |
687 | .get_vblank_timestamp = i915_get_vblank_timestamp, | ||
688 | .get_scanout_position = i915_get_crtc_scanoutpos, | ||
609 | .irq_preinstall = i915_driver_irq_preinstall, | 689 | .irq_preinstall = i915_driver_irq_preinstall, |
610 | .irq_postinstall = i915_driver_irq_postinstall, | 690 | .irq_postinstall = i915_driver_irq_postinstall, |
611 | .irq_uninstall = i915_driver_irq_uninstall, | 691 | .irq_uninstall = i915_driver_irq_uninstall, |
@@ -620,6 +700,9 @@ static struct drm_driver driver = { | |||
620 | .gem_init_object = i915_gem_init_object, | 700 | .gem_init_object = i915_gem_init_object, |
621 | .gem_free_object = i915_gem_free_object, | 701 | .gem_free_object = i915_gem_free_object, |
622 | .gem_vm_ops = &i915_gem_vm_ops, | 702 | .gem_vm_ops = &i915_gem_vm_ops, |
703 | .dumb_create = i915_gem_dumb_create, | ||
704 | .dumb_map_offset = i915_gem_mmap_gtt, | ||
705 | .dumb_destroy = i915_gem_dumb_destroy, | ||
623 | .ioctls = i915_ioctls, | 706 | .ioctls = i915_ioctls, |
624 | .fops = { | 707 | .fops = { |
625 | .owner = THIS_MODULE, | 708 | .owner = THIS_MODULE, |
@@ -636,14 +719,6 @@ static struct drm_driver driver = { | |||
636 | .llseek = noop_llseek, | 719 | .llseek = noop_llseek, |
637 | }, | 720 | }, |
638 | 721 | ||
639 | .pci_driver = { | ||
640 | .name = DRIVER_NAME, | ||
641 | .id_table = pciidlist, | ||
642 | .probe = i915_pci_probe, | ||
643 | .remove = i915_pci_remove, | ||
644 | .driver.pm = &i915_pm_ops, | ||
645 | }, | ||
646 | |||
647 | .name = DRIVER_NAME, | 722 | .name = DRIVER_NAME, |
648 | .desc = DRIVER_DESC, | 723 | .desc = DRIVER_DESC, |
649 | .date = DRIVER_DATE, | 724 | .date = DRIVER_DATE, |
@@ -652,6 +727,14 @@ static struct drm_driver driver = { | |||
652 | .patchlevel = DRIVER_PATCHLEVEL, | 727 | .patchlevel = DRIVER_PATCHLEVEL, |
653 | }; | 728 | }; |
654 | 729 | ||
730 | static struct pci_driver i915_pci_driver = { | ||
731 | .name = DRIVER_NAME, | ||
732 | .id_table = pciidlist, | ||
733 | .probe = i915_pci_probe, | ||
734 | .remove = i915_pci_remove, | ||
735 | .driver.pm = &i915_pm_ops, | ||
736 | }; | ||
737 | |||
655 | static int __init i915_init(void) | 738 | static int __init i915_init(void) |
656 | { | 739 | { |
657 | if (!intel_agp_enabled) { | 740 | if (!intel_agp_enabled) { |
@@ -661,8 +744,6 @@ static int __init i915_init(void) | |||
661 | 744 | ||
662 | driver.num_ioctls = i915_max_ioctl; | 745 | driver.num_ioctls = i915_max_ioctl; |
663 | 746 | ||
664 | i915_gem_shrinker_init(); | ||
665 | |||
666 | /* | 747 | /* |
667 | * If CONFIG_DRM_I915_KMS is set, default to KMS unless | 748 | * If CONFIG_DRM_I915_KMS is set, default to KMS unless |
668 | * explicitly disabled with the module pararmeter. | 749 | * explicitly disabled with the module pararmeter. |
@@ -684,18 +765,15 @@ static int __init i915_init(void) | |||
684 | driver.driver_features &= ~DRIVER_MODESET; | 765 | driver.driver_features &= ~DRIVER_MODESET; |
685 | #endif | 766 | #endif |
686 | 767 | ||
687 | if (!(driver.driver_features & DRIVER_MODESET)) { | 768 | if (!(driver.driver_features & DRIVER_MODESET)) |
688 | driver.suspend = i915_suspend; | 769 | driver.get_vblank_timestamp = NULL; |
689 | driver.resume = i915_resume; | ||
690 | } | ||
691 | 770 | ||
692 | return drm_init(&driver); | 771 | return drm_pci_init(&driver, &i915_pci_driver); |
693 | } | 772 | } |
694 | 773 | ||
695 | static void __exit i915_exit(void) | 774 | static void __exit i915_exit(void) |
696 | { | 775 | { |
697 | i915_gem_shrinker_exit(); | 776 | drm_pci_exit(&driver, &i915_pci_driver); |
698 | drm_exit(&driver); | ||
699 | } | 777 | } |
700 | 778 | ||
701 | module_init(i915_init); | 779 | module_init(i915_init); |
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 409826da3099..a78197d43ce6 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h | |||
@@ -89,7 +89,7 @@ struct drm_i915_gem_phys_object { | |||
89 | int id; | 89 | int id; |
90 | struct page **page_list; | 90 | struct page **page_list; |
91 | drm_dma_handle_t *handle; | 91 | drm_dma_handle_t *handle; |
92 | struct drm_gem_object *cur_obj; | 92 | struct drm_i915_gem_object *cur_obj; |
93 | }; | 93 | }; |
94 | 94 | ||
95 | struct mem_block { | 95 | struct mem_block { |
@@ -124,9 +124,9 @@ struct drm_i915_master_private { | |||
124 | #define I915_FENCE_REG_NONE -1 | 124 | #define I915_FENCE_REG_NONE -1 |
125 | 125 | ||
126 | struct drm_i915_fence_reg { | 126 | struct drm_i915_fence_reg { |
127 | struct drm_gem_object *obj; | ||
128 | struct list_head lru_list; | 127 | struct list_head lru_list; |
129 | bool gpu; | 128 | struct drm_i915_gem_object *obj; |
129 | uint32_t setup_seqno; | ||
130 | }; | 130 | }; |
131 | 131 | ||
132 | struct sdvo_device_mapping { | 132 | struct sdvo_device_mapping { |
@@ -139,6 +139,8 @@ struct sdvo_device_mapping { | |||
139 | u8 ddc_pin; | 139 | u8 ddc_pin; |
140 | }; | 140 | }; |
141 | 141 | ||
142 | struct intel_display_error_state; | ||
143 | |||
142 | struct drm_i915_error_state { | 144 | struct drm_i915_error_state { |
143 | u32 eir; | 145 | u32 eir; |
144 | u32 pgtbl_er; | 146 | u32 pgtbl_er; |
@@ -148,32 +150,47 @@ struct drm_i915_error_state { | |||
148 | u32 ipehr; | 150 | u32 ipehr; |
149 | u32 instdone; | 151 | u32 instdone; |
150 | u32 acthd; | 152 | u32 acthd; |
153 | u32 error; /* gen6+ */ | ||
154 | u32 bcs_acthd; /* gen6+ blt engine */ | ||
155 | u32 bcs_ipehr; | ||
156 | u32 bcs_ipeir; | ||
157 | u32 bcs_instdone; | ||
158 | u32 bcs_seqno; | ||
159 | u32 vcs_acthd; /* gen6+ bsd engine */ | ||
160 | u32 vcs_ipehr; | ||
161 | u32 vcs_ipeir; | ||
162 | u32 vcs_instdone; | ||
163 | u32 vcs_seqno; | ||
151 | u32 instpm; | 164 | u32 instpm; |
152 | u32 instps; | 165 | u32 instps; |
153 | u32 instdone1; | 166 | u32 instdone1; |
154 | u32 seqno; | 167 | u32 seqno; |
155 | u64 bbaddr; | 168 | u64 bbaddr; |
169 | u64 fence[16]; | ||
156 | struct timeval time; | 170 | struct timeval time; |
157 | struct drm_i915_error_object { | 171 | struct drm_i915_error_object { |
158 | int page_count; | 172 | int page_count; |
159 | u32 gtt_offset; | 173 | u32 gtt_offset; |
160 | u32 *pages[0]; | 174 | u32 *pages[0]; |
161 | } *ringbuffer, *batchbuffer[2]; | 175 | } *ringbuffer, *batchbuffer[I915_NUM_RINGS]; |
162 | struct drm_i915_error_buffer { | 176 | struct drm_i915_error_buffer { |
163 | size_t size; | 177 | u32 size; |
164 | u32 name; | 178 | u32 name; |
165 | u32 seqno; | 179 | u32 seqno; |
166 | u32 gtt_offset; | 180 | u32 gtt_offset; |
167 | u32 read_domains; | 181 | u32 read_domains; |
168 | u32 write_domain; | 182 | u32 write_domain; |
169 | u32 fence_reg; | 183 | s32 fence_reg:5; |
170 | s32 pinned:2; | 184 | s32 pinned:2; |
171 | u32 tiling:2; | 185 | u32 tiling:2; |
172 | u32 dirty:1; | 186 | u32 dirty:1; |
173 | u32 purgeable:1; | 187 | u32 purgeable:1; |
174 | } *active_bo; | 188 | u32 ring:4; |
175 | u32 active_bo_count; | 189 | u32 agp_type:1; |
190 | } *active_bo, *pinned_bo; | ||
191 | u32 active_bo_count, pinned_bo_count; | ||
176 | struct intel_overlay_error_state *overlay; | 192 | struct intel_overlay_error_state *overlay; |
193 | struct intel_display_error_state *display; | ||
177 | }; | 194 | }; |
178 | 195 | ||
179 | struct drm_i915_display_funcs { | 196 | struct drm_i915_display_funcs { |
@@ -207,7 +224,6 @@ struct intel_device_info { | |||
207 | u8 is_broadwater : 1; | 224 | u8 is_broadwater : 1; |
208 | u8 is_crestline : 1; | 225 | u8 is_crestline : 1; |
209 | u8 has_fbc : 1; | 226 | u8 has_fbc : 1; |
210 | u8 has_rc6 : 1; | ||
211 | u8 has_pipe_cxsr : 1; | 227 | u8 has_pipe_cxsr : 1; |
212 | u8 has_hotplug : 1; | 228 | u8 has_hotplug : 1; |
213 | u8 cursor_needs_physical : 1; | 229 | u8 cursor_needs_physical : 1; |
@@ -243,6 +259,7 @@ typedef struct drm_i915_private { | |||
243 | const struct intel_device_info *info; | 259 | const struct intel_device_info *info; |
244 | 260 | ||
245 | int has_gem; | 261 | int has_gem; |
262 | int relative_constants_mode; | ||
246 | 263 | ||
247 | void __iomem *regs; | 264 | void __iomem *regs; |
248 | 265 | ||
@@ -253,20 +270,15 @@ typedef struct drm_i915_private { | |||
253 | } *gmbus; | 270 | } *gmbus; |
254 | 271 | ||
255 | struct pci_dev *bridge_dev; | 272 | struct pci_dev *bridge_dev; |
256 | struct intel_ring_buffer render_ring; | 273 | struct intel_ring_buffer ring[I915_NUM_RINGS]; |
257 | struct intel_ring_buffer bsd_ring; | ||
258 | struct intel_ring_buffer blt_ring; | ||
259 | uint32_t next_seqno; | 274 | uint32_t next_seqno; |
260 | 275 | ||
261 | drm_dma_handle_t *status_page_dmah; | 276 | drm_dma_handle_t *status_page_dmah; |
262 | void *seqno_page; | ||
263 | dma_addr_t dma_status_page; | 277 | dma_addr_t dma_status_page; |
264 | uint32_t counter; | 278 | uint32_t counter; |
265 | unsigned int seqno_gfx_addr; | ||
266 | drm_local_map_t hws_map; | 279 | drm_local_map_t hws_map; |
267 | struct drm_gem_object *seqno_obj; | 280 | struct drm_i915_gem_object *pwrctx; |
268 | struct drm_gem_object *pwrctx; | 281 | struct drm_i915_gem_object *renderctx; |
269 | struct drm_gem_object *renderctx; | ||
270 | 282 | ||
271 | struct resource mch_res; | 283 | struct resource mch_res; |
272 | 284 | ||
@@ -275,25 +287,17 @@ typedef struct drm_i915_private { | |||
275 | int front_offset; | 287 | int front_offset; |
276 | int current_page; | 288 | int current_page; |
277 | int page_flipping; | 289 | int page_flipping; |
278 | #define I915_DEBUG_READ (1<<0) | ||
279 | #define I915_DEBUG_WRITE (1<<1) | ||
280 | unsigned long debug_flags; | ||
281 | 290 | ||
282 | wait_queue_head_t irq_queue; | ||
283 | atomic_t irq_received; | 291 | atomic_t irq_received; |
284 | /** Protects user_irq_refcount and irq_mask_reg */ | ||
285 | spinlock_t user_irq_lock; | ||
286 | u32 trace_irq_seqno; | 292 | u32 trace_irq_seqno; |
293 | |||
294 | /* protects the irq masks */ | ||
295 | spinlock_t irq_lock; | ||
287 | /** Cached value of IMR to avoid reads in updating the bitfield */ | 296 | /** Cached value of IMR to avoid reads in updating the bitfield */ |
288 | u32 irq_mask_reg; | ||
289 | u32 pipestat[2]; | 297 | u32 pipestat[2]; |
290 | /** splitted irq regs for graphics and display engine on Ironlake, | 298 | u32 irq_mask; |
291 | irq_mask_reg is still used for display irq. */ | 299 | u32 gt_irq_mask; |
292 | u32 gt_irq_mask_reg; | 300 | u32 pch_irq_mask; |
293 | u32 gt_irq_enable_reg; | ||
294 | u32 de_irq_enable_reg; | ||
295 | u32 pch_irq_mask_reg; | ||
296 | u32 pch_irq_enable_reg; | ||
297 | 301 | ||
298 | u32 hotplug_supported_mask; | 302 | u32 hotplug_supported_mask; |
299 | struct work_struct hotplug_work; | 303 | struct work_struct hotplug_work; |
@@ -306,7 +310,7 @@ typedef struct drm_i915_private { | |||
306 | int num_pipe; | 310 | int num_pipe; |
307 | 311 | ||
308 | /* For hangcheck timer */ | 312 | /* For hangcheck timer */ |
309 | #define DRM_I915_HANGCHECK_PERIOD 250 /* in ms */ | 313 | #define DRM_I915_HANGCHECK_PERIOD 1500 /* in ms */ |
310 | struct timer_list hangcheck_timer; | 314 | struct timer_list hangcheck_timer; |
311 | int hangcheck_count; | 315 | int hangcheck_count; |
312 | uint32_t last_acthd; | 316 | uint32_t last_acthd; |
@@ -329,6 +333,7 @@ typedef struct drm_i915_private { | |||
329 | 333 | ||
330 | /* LVDS info */ | 334 | /* LVDS info */ |
331 | int backlight_level; /* restore backlight to this value */ | 335 | int backlight_level; /* restore backlight to this value */ |
336 | bool backlight_enabled; | ||
332 | struct drm_display_mode *panel_fixed_mode; | 337 | struct drm_display_mode *panel_fixed_mode; |
333 | struct drm_display_mode *lfp_lvds_vbt_mode; /* if any */ | 338 | struct drm_display_mode *lfp_lvds_vbt_mode; /* if any */ |
334 | struct drm_display_mode *sdvo_lvds_vbt_mode; /* if any */ | 339 | struct drm_display_mode *sdvo_lvds_vbt_mode; /* if any */ |
@@ -530,23 +535,24 @@ typedef struct drm_i915_private { | |||
530 | 535 | ||
531 | struct { | 536 | struct { |
532 | /** Bridge to intel-gtt-ko */ | 537 | /** Bridge to intel-gtt-ko */ |
533 | struct intel_gtt *gtt; | 538 | const struct intel_gtt *gtt; |
534 | /** Memory allocator for GTT stolen memory */ | 539 | /** Memory allocator for GTT stolen memory */ |
535 | struct drm_mm vram; | 540 | struct drm_mm stolen; |
536 | /** Memory allocator for GTT */ | 541 | /** Memory allocator for GTT */ |
537 | struct drm_mm gtt_space; | 542 | struct drm_mm gtt_space; |
543 | /** List of all objects in gtt_space. Used to restore gtt | ||
544 | * mappings on resume */ | ||
545 | struct list_head gtt_list; | ||
546 | |||
547 | /** Usable portion of the GTT for GEM */ | ||
548 | unsigned long gtt_start; | ||
549 | unsigned long gtt_mappable_end; | ||
550 | unsigned long gtt_end; | ||
538 | 551 | ||
539 | struct io_mapping *gtt_mapping; | 552 | struct io_mapping *gtt_mapping; |
540 | int gtt_mtrr; | 553 | int gtt_mtrr; |
541 | 554 | ||
542 | /** | 555 | struct shrinker inactive_shrinker; |
543 | * Membership on list of all loaded devices, used to evict | ||
544 | * inactive buffers under memory pressure. | ||
545 | * | ||
546 | * Modifications should only be done whilst holding the | ||
547 | * shrink_list_lock spinlock. | ||
548 | */ | ||
549 | struct list_head shrink_list; | ||
550 | 556 | ||
551 | /** | 557 | /** |
552 | * List of objects currently involved in rendering. | 558 | * List of objects currently involved in rendering. |
@@ -609,16 +615,6 @@ typedef struct drm_i915_private { | |||
609 | struct delayed_work retire_work; | 615 | struct delayed_work retire_work; |
610 | 616 | ||
611 | /** | 617 | /** |
612 | * Waiting sequence number, if any | ||
613 | */ | ||
614 | uint32_t waiting_gem_seqno; | ||
615 | |||
616 | /** | ||
617 | * Last seq seen at irq time | ||
618 | */ | ||
619 | uint32_t irq_gem_seqno; | ||
620 | |||
621 | /** | ||
622 | * Flag if the X Server, and thus DRM, is not currently in | 618 | * Flag if the X Server, and thus DRM, is not currently in |
623 | * control of the device. | 619 | * control of the device. |
624 | * | 620 | * |
@@ -645,16 +641,11 @@ typedef struct drm_i915_private { | |||
645 | /* storage for physical objects */ | 641 | /* storage for physical objects */ |
646 | struct drm_i915_gem_phys_object *phys_objs[I915_MAX_PHYS_OBJECT]; | 642 | struct drm_i915_gem_phys_object *phys_objs[I915_MAX_PHYS_OBJECT]; |
647 | 643 | ||
648 | uint32_t flush_rings; | ||
649 | |||
650 | /* accounting, useful for userland debugging */ | 644 | /* accounting, useful for userland debugging */ |
651 | size_t object_memory; | ||
652 | size_t pin_memory; | ||
653 | size_t gtt_memory; | ||
654 | size_t gtt_total; | 645 | size_t gtt_total; |
646 | size_t mappable_gtt_total; | ||
647 | size_t object_memory; | ||
655 | u32 object_count; | 648 | u32 object_count; |
656 | u32 pin_count; | ||
657 | u32 gtt_count; | ||
658 | } mm; | 649 | } mm; |
659 | struct sdvo_device_mapping sdvo_mappings[2]; | 650 | struct sdvo_device_mapping sdvo_mappings[2]; |
660 | /* indicate whether the LVDS_BORDER should be enabled or not */ | 651 | /* indicate whether the LVDS_BORDER should be enabled or not */ |
@@ -688,14 +679,14 @@ typedef struct drm_i915_private { | |||
688 | u8 fmax; | 679 | u8 fmax; |
689 | u8 fstart; | 680 | u8 fstart; |
690 | 681 | ||
691 | u64 last_count1; | 682 | u64 last_count1; |
692 | unsigned long last_time1; | 683 | unsigned long last_time1; |
693 | u64 last_count2; | 684 | u64 last_count2; |
694 | struct timespec last_time2; | 685 | struct timespec last_time2; |
695 | unsigned long gfx_power; | 686 | unsigned long gfx_power; |
696 | int c_m; | 687 | int c_m; |
697 | int r_t; | 688 | int r_t; |
698 | u8 corr; | 689 | u8 corr; |
699 | spinlock_t *mchdev_lock; | 690 | spinlock_t *mchdev_lock; |
700 | 691 | ||
701 | enum no_fbc_reason no_fbc_reason; | 692 | enum no_fbc_reason no_fbc_reason; |
@@ -709,20 +700,20 @@ typedef struct drm_i915_private { | |||
709 | struct intel_fbdev *fbdev; | 700 | struct intel_fbdev *fbdev; |
710 | } drm_i915_private_t; | 701 | } drm_i915_private_t; |
711 | 702 | ||
712 | /** driver private structure attached to each drm_gem_object */ | ||
713 | struct drm_i915_gem_object { | 703 | struct drm_i915_gem_object { |
714 | struct drm_gem_object base; | 704 | struct drm_gem_object base; |
715 | 705 | ||
716 | /** Current space allocated to this object in the GTT, if any. */ | 706 | /** Current space allocated to this object in the GTT, if any. */ |
717 | struct drm_mm_node *gtt_space; | 707 | struct drm_mm_node *gtt_space; |
708 | struct list_head gtt_list; | ||
718 | 709 | ||
719 | /** This object's place on the active/flushing/inactive lists */ | 710 | /** This object's place on the active/flushing/inactive lists */ |
720 | struct list_head ring_list; | 711 | struct list_head ring_list; |
721 | struct list_head mm_list; | 712 | struct list_head mm_list; |
722 | /** This object's place on GPU write list */ | 713 | /** This object's place on GPU write list */ |
723 | struct list_head gpu_write_list; | 714 | struct list_head gpu_write_list; |
724 | /** This object's place on eviction list */ | 715 | /** This object's place in the batchbuffer or on the eviction list */ |
725 | struct list_head evict_list; | 716 | struct list_head exec_list; |
726 | 717 | ||
727 | /** | 718 | /** |
728 | * This is set if the object is on the active or flushing lists | 719 | * This is set if the object is on the active or flushing lists |
@@ -738,6 +729,12 @@ struct drm_i915_gem_object { | |||
738 | unsigned int dirty : 1; | 729 | unsigned int dirty : 1; |
739 | 730 | ||
740 | /** | 731 | /** |
732 | * This is set if the object has been written to since the last | ||
733 | * GPU flush. | ||
734 | */ | ||
735 | unsigned int pending_gpu_write : 1; | ||
736 | |||
737 | /** | ||
741 | * Fence register bits (if any) for this object. Will be set | 738 | * Fence register bits (if any) for this object. Will be set |
742 | * as needed when mapped into the GTT. | 739 | * as needed when mapped into the GTT. |
743 | * Protected by dev->struct_mutex. | 740 | * Protected by dev->struct_mutex. |
@@ -747,29 +744,15 @@ struct drm_i915_gem_object { | |||
747 | signed int fence_reg : 5; | 744 | signed int fence_reg : 5; |
748 | 745 | ||
749 | /** | 746 | /** |
750 | * Used for checking the object doesn't appear more than once | ||
751 | * in an execbuffer object list. | ||
752 | */ | ||
753 | unsigned int in_execbuffer : 1; | ||
754 | |||
755 | /** | ||
756 | * Advice: are the backing pages purgeable? | 747 | * Advice: are the backing pages purgeable? |
757 | */ | 748 | */ |
758 | unsigned int madv : 2; | 749 | unsigned int madv : 2; |
759 | 750 | ||
760 | /** | 751 | /** |
761 | * Refcount for the pages array. With the current locking scheme, there | ||
762 | * are at most two concurrent users: Binding a bo to the gtt and | ||
763 | * pwrite/pread using physical addresses. So two bits for a maximum | ||
764 | * of two users are enough. | ||
765 | */ | ||
766 | unsigned int pages_refcount : 2; | ||
767 | #define DRM_I915_GEM_OBJECT_MAX_PAGES_REFCOUNT 0x3 | ||
768 | |||
769 | /** | ||
770 | * Current tiling mode for the object. | 752 | * Current tiling mode for the object. |
771 | */ | 753 | */ |
772 | unsigned int tiling_mode : 2; | 754 | unsigned int tiling_mode : 2; |
755 | unsigned int tiling_changed : 1; | ||
773 | 756 | ||
774 | /** How many users have pinned this object in GTT space. The following | 757 | /** How many users have pinned this object in GTT space. The following |
775 | * users can each hold at most one reference: pwrite/pread, pin_ioctl | 758 | * users can each hold at most one reference: pwrite/pread, pin_ioctl |
@@ -783,28 +766,55 @@ struct drm_i915_gem_object { | |||
783 | unsigned int pin_count : 4; | 766 | unsigned int pin_count : 4; |
784 | #define DRM_I915_GEM_OBJECT_MAX_PIN_COUNT 0xf | 767 | #define DRM_I915_GEM_OBJECT_MAX_PIN_COUNT 0xf |
785 | 768 | ||
786 | /** AGP memory structure for our GTT binding. */ | 769 | /** |
787 | DRM_AGP_MEM *agp_mem; | 770 | * Is the object at the current location in the gtt mappable and |
771 | * fenceable? Used to avoid costly recalculations. | ||
772 | */ | ||
773 | unsigned int map_and_fenceable : 1; | ||
774 | |||
775 | /** | ||
776 | * Whether the current gtt mapping needs to be mappable (and isn't just | ||
777 | * mappable by accident). Track pin and fault separate for a more | ||
778 | * accurate mappable working set. | ||
779 | */ | ||
780 | unsigned int fault_mappable : 1; | ||
781 | unsigned int pin_mappable : 1; | ||
782 | |||
783 | /* | ||
784 | * Is the GPU currently using a fence to access this buffer, | ||
785 | */ | ||
786 | unsigned int pending_fenced_gpu_access:1; | ||
787 | unsigned int fenced_gpu_access:1; | ||
788 | 788 | ||
789 | struct page **pages; | 789 | struct page **pages; |
790 | 790 | ||
791 | /** | 791 | /** |
792 | * Current offset of the object in GTT space. | 792 | * DMAR support |
793 | * | ||
794 | * This is the same as gtt_space->start | ||
795 | */ | 793 | */ |
796 | uint32_t gtt_offset; | 794 | struct scatterlist *sg_list; |
795 | int num_sg; | ||
797 | 796 | ||
798 | /* Which ring is refering to is this object */ | 797 | /** |
799 | struct intel_ring_buffer *ring; | 798 | * Used for performing relocations during execbuffer insertion. |
799 | */ | ||
800 | struct hlist_node exec_node; | ||
801 | unsigned long exec_handle; | ||
802 | struct drm_i915_gem_exec_object2 *exec_entry; | ||
800 | 803 | ||
801 | /** | 804 | /** |
802 | * Fake offset for use by mmap(2) | 805 | * Current offset of the object in GTT space. |
806 | * | ||
807 | * This is the same as gtt_space->start | ||
803 | */ | 808 | */ |
804 | uint64_t mmap_offset; | 809 | uint32_t gtt_offset; |
805 | 810 | ||
806 | /** Breadcrumb of last rendering to the buffer. */ | 811 | /** Breadcrumb of last rendering to the buffer. */ |
807 | uint32_t last_rendering_seqno; | 812 | uint32_t last_rendering_seqno; |
813 | struct intel_ring_buffer *ring; | ||
814 | |||
815 | /** Breadcrumb of last fenced GPU access to the buffer. */ | ||
816 | uint32_t last_fenced_seqno; | ||
817 | struct intel_ring_buffer *last_fenced_ring; | ||
808 | 818 | ||
809 | /** Current tiling stride for the object, if it's tiled. */ | 819 | /** Current tiling stride for the object, if it's tiled. */ |
810 | uint32_t stride; | 820 | uint32_t stride; |
@@ -880,11 +890,74 @@ enum intel_chip_family { | |||
880 | CHIP_I965 = 0x08, | 890 | CHIP_I965 = 0x08, |
881 | }; | 891 | }; |
882 | 892 | ||
893 | #define INTEL_INFO(dev) (((struct drm_i915_private *) (dev)->dev_private)->info) | ||
894 | |||
895 | #define IS_I830(dev) ((dev)->pci_device == 0x3577) | ||
896 | #define IS_845G(dev) ((dev)->pci_device == 0x2562) | ||
897 | #define IS_I85X(dev) (INTEL_INFO(dev)->is_i85x) | ||
898 | #define IS_I865G(dev) ((dev)->pci_device == 0x2572) | ||
899 | #define IS_I915G(dev) (INTEL_INFO(dev)->is_i915g) | ||
900 | #define IS_I915GM(dev) ((dev)->pci_device == 0x2592) | ||
901 | #define IS_I945G(dev) ((dev)->pci_device == 0x2772) | ||
902 | #define IS_I945GM(dev) (INTEL_INFO(dev)->is_i945gm) | ||
903 | #define IS_BROADWATER(dev) (INTEL_INFO(dev)->is_broadwater) | ||
904 | #define IS_CRESTLINE(dev) (INTEL_INFO(dev)->is_crestline) | ||
905 | #define IS_GM45(dev) ((dev)->pci_device == 0x2A42) | ||
906 | #define IS_G4X(dev) (INTEL_INFO(dev)->is_g4x) | ||
907 | #define IS_PINEVIEW_G(dev) ((dev)->pci_device == 0xa001) | ||
908 | #define IS_PINEVIEW_M(dev) ((dev)->pci_device == 0xa011) | ||
909 | #define IS_PINEVIEW(dev) (INTEL_INFO(dev)->is_pineview) | ||
910 | #define IS_G33(dev) (INTEL_INFO(dev)->is_g33) | ||
911 | #define IS_IRONLAKE_D(dev) ((dev)->pci_device == 0x0042) | ||
912 | #define IS_IRONLAKE_M(dev) ((dev)->pci_device == 0x0046) | ||
913 | #define IS_MOBILE(dev) (INTEL_INFO(dev)->is_mobile) | ||
914 | |||
915 | #define IS_GEN2(dev) (INTEL_INFO(dev)->gen == 2) | ||
916 | #define IS_GEN3(dev) (INTEL_INFO(dev)->gen == 3) | ||
917 | #define IS_GEN4(dev) (INTEL_INFO(dev)->gen == 4) | ||
918 | #define IS_GEN5(dev) (INTEL_INFO(dev)->gen == 5) | ||
919 | #define IS_GEN6(dev) (INTEL_INFO(dev)->gen == 6) | ||
920 | |||
921 | #define HAS_BSD(dev) (INTEL_INFO(dev)->has_bsd_ring) | ||
922 | #define HAS_BLT(dev) (INTEL_INFO(dev)->has_blt_ring) | ||
923 | #define I915_NEED_GFX_HWS(dev) (INTEL_INFO(dev)->need_gfx_hws) | ||
924 | |||
925 | #define HAS_OVERLAY(dev) (INTEL_INFO(dev)->has_overlay) | ||
926 | #define OVERLAY_NEEDS_PHYSICAL(dev) (INTEL_INFO(dev)->overlay_needs_physical) | ||
927 | |||
928 | /* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte | ||
929 | * rows, which changed the alignment requirements and fence programming. | ||
930 | */ | ||
931 | #define HAS_128_BYTE_Y_TILING(dev) (!IS_GEN2(dev) && !(IS_I915G(dev) || \ | ||
932 | IS_I915GM(dev))) | ||
933 | #define SUPPORTS_DIGITAL_OUTPUTS(dev) (!IS_GEN2(dev) && !IS_PINEVIEW(dev)) | ||
934 | #define SUPPORTS_INTEGRATED_HDMI(dev) (IS_G4X(dev) || IS_GEN5(dev)) | ||
935 | #define SUPPORTS_INTEGRATED_DP(dev) (IS_G4X(dev) || IS_GEN5(dev)) | ||
936 | #define SUPPORTS_EDP(dev) (IS_IRONLAKE_M(dev)) | ||
937 | #define SUPPORTS_TV(dev) (INTEL_INFO(dev)->supports_tv) | ||
938 | #define I915_HAS_HOTPLUG(dev) (INTEL_INFO(dev)->has_hotplug) | ||
939 | /* dsparb controlled by hw only */ | ||
940 | #define DSPARB_HWCONTROL(dev) (IS_G4X(dev) || IS_IRONLAKE(dev)) | ||
941 | |||
942 | #define HAS_FW_BLC(dev) (INTEL_INFO(dev)->gen > 2) | ||
943 | #define HAS_PIPE_CXSR(dev) (INTEL_INFO(dev)->has_pipe_cxsr) | ||
944 | #define I915_HAS_FBC(dev) (INTEL_INFO(dev)->has_fbc) | ||
945 | |||
946 | #define HAS_PCH_SPLIT(dev) (IS_GEN5(dev) || IS_GEN6(dev)) | ||
947 | #define HAS_PIPE_CONTROL(dev) (IS_GEN5(dev) || IS_GEN6(dev)) | ||
948 | |||
949 | #define INTEL_PCH_TYPE(dev) (((struct drm_i915_private *)(dev)->dev_private)->pch_type) | ||
950 | #define HAS_PCH_CPT(dev) (INTEL_PCH_TYPE(dev) == PCH_CPT) | ||
951 | #define HAS_PCH_IBX(dev) (INTEL_PCH_TYPE(dev) == PCH_IBX) | ||
952 | |||
953 | #include "i915_trace.h" | ||
954 | |||
883 | extern struct drm_ioctl_desc i915_ioctls[]; | 955 | extern struct drm_ioctl_desc i915_ioctls[]; |
884 | extern int i915_max_ioctl; | 956 | extern int i915_max_ioctl; |
885 | extern unsigned int i915_fbpercrtc; | 957 | extern unsigned int i915_fbpercrtc; |
886 | extern unsigned int i915_powersave; | 958 | extern unsigned int i915_powersave; |
887 | extern unsigned int i915_lvds_downclock; | 959 | extern unsigned int i915_lvds_downclock; |
960 | extern unsigned int i915_panel_use_ssc; | ||
888 | 961 | ||
889 | extern int i915_suspend(struct drm_device *dev, pm_message_t state); | 962 | extern int i915_suspend(struct drm_device *dev, pm_message_t state); |
890 | extern int i915_resume(struct drm_device *dev); | 963 | extern int i915_resume(struct drm_device *dev); |
@@ -907,8 +980,8 @@ extern int i915_driver_device_is_agp(struct drm_device * dev); | |||
907 | extern long i915_compat_ioctl(struct file *filp, unsigned int cmd, | 980 | extern long i915_compat_ioctl(struct file *filp, unsigned int cmd, |
908 | unsigned long arg); | 981 | unsigned long arg); |
909 | extern int i915_emit_box(struct drm_device *dev, | 982 | extern int i915_emit_box(struct drm_device *dev, |
910 | struct drm_clip_rect *boxes, | 983 | struct drm_clip_rect *box, |
911 | int i, int DR1, int DR4); | 984 | int DR1, int DR4); |
912 | extern int i915_reset(struct drm_device *dev, u8 flags); | 985 | extern int i915_reset(struct drm_device *dev, u8 flags); |
913 | extern unsigned long i915_chipset_val(struct drm_i915_private *dev_priv); | 986 | extern unsigned long i915_chipset_val(struct drm_i915_private *dev_priv); |
914 | extern unsigned long i915_mch_val(struct drm_i915_private *dev_priv); | 987 | extern unsigned long i915_mch_val(struct drm_i915_private *dev_priv); |
@@ -918,6 +991,7 @@ extern void i915_update_gfx_val(struct drm_i915_private *dev_priv); | |||
918 | 991 | ||
919 | /* i915_irq.c */ | 992 | /* i915_irq.c */ |
920 | void i915_hangcheck_elapsed(unsigned long data); | 993 | void i915_hangcheck_elapsed(unsigned long data); |
994 | void i915_handle_error(struct drm_device *dev, bool wedged); | ||
921 | extern int i915_irq_emit(struct drm_device *dev, void *data, | 995 | extern int i915_irq_emit(struct drm_device *dev, void *data, |
922 | struct drm_file *file_priv); | 996 | struct drm_file *file_priv); |
923 | extern int i915_irq_wait(struct drm_device *dev, void *data, | 997 | extern int i915_irq_wait(struct drm_device *dev, void *data, |
@@ -939,12 +1013,6 @@ extern u32 i915_get_vblank_counter(struct drm_device *dev, int crtc); | |||
939 | extern u32 gm45_get_vblank_counter(struct drm_device *dev, int crtc); | 1013 | extern u32 gm45_get_vblank_counter(struct drm_device *dev, int crtc); |
940 | extern int i915_vblank_swap(struct drm_device *dev, void *data, | 1014 | extern int i915_vblank_swap(struct drm_device *dev, void *data, |
941 | struct drm_file *file_priv); | 1015 | struct drm_file *file_priv); |
942 | extern void i915_enable_irq(drm_i915_private_t *dev_priv, u32 mask); | ||
943 | extern void i915_disable_irq(drm_i915_private_t *dev_priv, u32 mask); | ||
944 | extern void ironlake_enable_graphics_irq(drm_i915_private_t *dev_priv, | ||
945 | u32 mask); | ||
946 | extern void ironlake_disable_graphics_irq(drm_i915_private_t *dev_priv, | ||
947 | u32 mask); | ||
948 | 1016 | ||
949 | void | 1017 | void |
950 | i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask); | 1018 | i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask); |
@@ -953,6 +1021,13 @@ void | |||
953 | i915_disable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask); | 1021 | i915_disable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask); |
954 | 1022 | ||
955 | void intel_enable_asle (struct drm_device *dev); | 1023 | void intel_enable_asle (struct drm_device *dev); |
1024 | int i915_get_vblank_timestamp(struct drm_device *dev, int crtc, | ||
1025 | int *max_error, | ||
1026 | struct timeval *vblank_time, | ||
1027 | unsigned flags); | ||
1028 | |||
1029 | int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe, | ||
1030 | int *vpos, int *hpos); | ||
956 | 1031 | ||
957 | #ifdef CONFIG_DEBUG_FS | 1032 | #ifdef CONFIG_DEBUG_FS |
958 | extern void i915_destroy_error_state(struct drm_device *dev); | 1033 | extern void i915_destroy_error_state(struct drm_device *dev); |
@@ -1017,15 +1092,35 @@ int i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data, | |||
1017 | struct drm_file *file_priv); | 1092 | struct drm_file *file_priv); |
1018 | void i915_gem_load(struct drm_device *dev); | 1093 | void i915_gem_load(struct drm_device *dev); |
1019 | int i915_gem_init_object(struct drm_gem_object *obj); | 1094 | int i915_gem_init_object(struct drm_gem_object *obj); |
1020 | struct drm_gem_object * i915_gem_alloc_object(struct drm_device *dev, | 1095 | int __must_check i915_gem_flush_ring(struct drm_device *dev, |
1021 | size_t size); | 1096 | struct intel_ring_buffer *ring, |
1097 | uint32_t invalidate_domains, | ||
1098 | uint32_t flush_domains); | ||
1099 | struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev, | ||
1100 | size_t size); | ||
1022 | void i915_gem_free_object(struct drm_gem_object *obj); | 1101 | void i915_gem_free_object(struct drm_gem_object *obj); |
1023 | int i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment); | 1102 | int __must_check i915_gem_object_pin(struct drm_i915_gem_object *obj, |
1024 | void i915_gem_object_unpin(struct drm_gem_object *obj); | 1103 | uint32_t alignment, |
1025 | int i915_gem_object_unbind(struct drm_gem_object *obj); | 1104 | bool map_and_fenceable); |
1026 | void i915_gem_release_mmap(struct drm_gem_object *obj); | 1105 | void i915_gem_object_unpin(struct drm_i915_gem_object *obj); |
1106 | int __must_check i915_gem_object_unbind(struct drm_i915_gem_object *obj); | ||
1107 | void i915_gem_release_mmap(struct drm_i915_gem_object *obj); | ||
1027 | void i915_gem_lastclose(struct drm_device *dev); | 1108 | void i915_gem_lastclose(struct drm_device *dev); |
1028 | 1109 | ||
1110 | int __must_check i915_mutex_lock_interruptible(struct drm_device *dev); | ||
1111 | int __must_check i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj, | ||
1112 | bool interruptible); | ||
1113 | void i915_gem_object_move_to_active(struct drm_i915_gem_object *obj, | ||
1114 | struct intel_ring_buffer *ring, | ||
1115 | u32 seqno); | ||
1116 | |||
1117 | int i915_gem_dumb_create(struct drm_file *file_priv, | ||
1118 | struct drm_device *dev, | ||
1119 | struct drm_mode_create_dumb *args); | ||
1120 | int i915_gem_mmap_gtt(struct drm_file *file_priv, struct drm_device *dev, | ||
1121 | uint32_t handle, uint64_t *offset); | ||
1122 | int i915_gem_dumb_destroy(struct drm_file *file_priv, struct drm_device *dev, | ||
1123 | uint32_t handle); | ||
1029 | /** | 1124 | /** |
1030 | * Returns true if seq1 is later than seq2. | 1125 | * Returns true if seq1 is later than seq2. |
1031 | */ | 1126 | */ |
@@ -1035,73 +1130,88 @@ i915_seqno_passed(uint32_t seq1, uint32_t seq2) | |||
1035 | return (int32_t)(seq1 - seq2) >= 0; | 1130 | return (int32_t)(seq1 - seq2) >= 0; |
1036 | } | 1131 | } |
1037 | 1132 | ||
1038 | int i915_gem_object_get_fence_reg(struct drm_gem_object *obj, | 1133 | static inline u32 |
1039 | bool interruptible); | 1134 | i915_gem_next_request_seqno(struct drm_device *dev, |
1040 | int i915_gem_object_put_fence_reg(struct drm_gem_object *obj, | 1135 | struct intel_ring_buffer *ring) |
1041 | bool interruptible); | 1136 | { |
1137 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
1138 | return ring->outstanding_lazy_request = dev_priv->next_seqno; | ||
1139 | } | ||
1140 | |||
1141 | int __must_check i915_gem_object_get_fence(struct drm_i915_gem_object *obj, | ||
1142 | struct intel_ring_buffer *pipelined, | ||
1143 | bool interruptible); | ||
1144 | int __must_check i915_gem_object_put_fence(struct drm_i915_gem_object *obj); | ||
1145 | |||
1042 | void i915_gem_retire_requests(struct drm_device *dev); | 1146 | void i915_gem_retire_requests(struct drm_device *dev); |
1043 | void i915_gem_reset(struct drm_device *dev); | 1147 | void i915_gem_reset(struct drm_device *dev); |
1044 | void i915_gem_clflush_object(struct drm_gem_object *obj); | 1148 | void i915_gem_clflush_object(struct drm_i915_gem_object *obj); |
1045 | int i915_gem_object_set_domain(struct drm_gem_object *obj, | 1149 | int __must_check i915_gem_object_set_domain(struct drm_i915_gem_object *obj, |
1046 | uint32_t read_domains, | 1150 | uint32_t read_domains, |
1047 | uint32_t write_domain); | 1151 | uint32_t write_domain); |
1048 | int i915_gem_object_flush_gpu(struct drm_i915_gem_object *obj, | 1152 | int __must_check i915_gem_object_flush_gpu(struct drm_i915_gem_object *obj, |
1049 | bool interruptible); | 1153 | bool interruptible); |
1050 | int i915_gem_init_ringbuffer(struct drm_device *dev); | 1154 | int __must_check i915_gem_init_ringbuffer(struct drm_device *dev); |
1051 | void i915_gem_cleanup_ringbuffer(struct drm_device *dev); | 1155 | void i915_gem_cleanup_ringbuffer(struct drm_device *dev); |
1052 | int i915_gem_do_init(struct drm_device *dev, unsigned long start, | 1156 | void i915_gem_do_init(struct drm_device *dev, |
1053 | unsigned long end); | 1157 | unsigned long start, |
1054 | int i915_gpu_idle(struct drm_device *dev); | 1158 | unsigned long mappable_end, |
1055 | int i915_gem_idle(struct drm_device *dev); | 1159 | unsigned long end); |
1056 | uint32_t i915_add_request(struct drm_device *dev, | 1160 | int __must_check i915_gpu_idle(struct drm_device *dev); |
1057 | struct drm_file *file_priv, | 1161 | int __must_check i915_gem_idle(struct drm_device *dev); |
1058 | struct drm_i915_gem_request *request, | 1162 | int __must_check i915_add_request(struct drm_device *dev, |
1059 | struct intel_ring_buffer *ring); | 1163 | struct drm_file *file_priv, |
1060 | int i915_do_wait_request(struct drm_device *dev, | 1164 | struct drm_i915_gem_request *request, |
1061 | uint32_t seqno, | 1165 | struct intel_ring_buffer *ring); |
1062 | bool interruptible, | 1166 | int __must_check i915_do_wait_request(struct drm_device *dev, |
1063 | struct intel_ring_buffer *ring); | 1167 | uint32_t seqno, |
1168 | bool interruptible, | ||
1169 | struct intel_ring_buffer *ring); | ||
1064 | int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf); | 1170 | int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf); |
1065 | int i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, | 1171 | int __must_check |
1066 | int write); | 1172 | i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, |
1067 | int i915_gem_object_set_to_display_plane(struct drm_gem_object *obj, | 1173 | bool write); |
1068 | bool pipelined); | 1174 | int __must_check |
1175 | i915_gem_object_set_to_display_plane(struct drm_i915_gem_object *obj, | ||
1176 | struct intel_ring_buffer *pipelined); | ||
1069 | int i915_gem_attach_phys_object(struct drm_device *dev, | 1177 | int i915_gem_attach_phys_object(struct drm_device *dev, |
1070 | struct drm_gem_object *obj, | 1178 | struct drm_i915_gem_object *obj, |
1071 | int id, | 1179 | int id, |
1072 | int align); | 1180 | int align); |
1073 | void i915_gem_detach_phys_object(struct drm_device *dev, | 1181 | void i915_gem_detach_phys_object(struct drm_device *dev, |
1074 | struct drm_gem_object *obj); | 1182 | struct drm_i915_gem_object *obj); |
1075 | void i915_gem_free_all_phys_object(struct drm_device *dev); | 1183 | void i915_gem_free_all_phys_object(struct drm_device *dev); |
1076 | void i915_gem_release(struct drm_device * dev, struct drm_file *file_priv); | 1184 | void i915_gem_release(struct drm_device *dev, struct drm_file *file); |
1077 | 1185 | ||
1078 | void i915_gem_shrinker_init(void); | 1186 | /* i915_gem_gtt.c */ |
1079 | void i915_gem_shrinker_exit(void); | 1187 | void i915_gem_restore_gtt_mappings(struct drm_device *dev); |
1188 | int __must_check i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj); | ||
1189 | void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj); | ||
1080 | 1190 | ||
1081 | /* i915_gem_evict.c */ | 1191 | /* i915_gem_evict.c */ |
1082 | int i915_gem_evict_something(struct drm_device *dev, int min_size, unsigned alignment); | 1192 | int __must_check i915_gem_evict_something(struct drm_device *dev, int min_size, |
1083 | int i915_gem_evict_everything(struct drm_device *dev); | 1193 | unsigned alignment, bool mappable); |
1084 | int i915_gem_evict_inactive(struct drm_device *dev); | 1194 | int __must_check i915_gem_evict_everything(struct drm_device *dev, |
1195 | bool purgeable_only); | ||
1196 | int __must_check i915_gem_evict_inactive(struct drm_device *dev, | ||
1197 | bool purgeable_only); | ||
1085 | 1198 | ||
1086 | /* i915_gem_tiling.c */ | 1199 | /* i915_gem_tiling.c */ |
1087 | void i915_gem_detect_bit_6_swizzle(struct drm_device *dev); | 1200 | void i915_gem_detect_bit_6_swizzle(struct drm_device *dev); |
1088 | void i915_gem_object_do_bit_17_swizzle(struct drm_gem_object *obj); | 1201 | void i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj); |
1089 | void i915_gem_object_save_bit_17_swizzle(struct drm_gem_object *obj); | 1202 | void i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj); |
1090 | bool i915_tiling_ok(struct drm_device *dev, int stride, int size, | ||
1091 | int tiling_mode); | ||
1092 | bool i915_gem_object_fence_offset_ok(struct drm_gem_object *obj, | ||
1093 | int tiling_mode); | ||
1094 | 1203 | ||
1095 | /* i915_gem_debug.c */ | 1204 | /* i915_gem_debug.c */ |
1096 | void i915_gem_dump_object(struct drm_gem_object *obj, int len, | 1205 | void i915_gem_dump_object(struct drm_i915_gem_object *obj, int len, |
1097 | const char *where, uint32_t mark); | 1206 | const char *where, uint32_t mark); |
1098 | #if WATCH_LISTS | 1207 | #if WATCH_LISTS |
1099 | int i915_verify_lists(struct drm_device *dev); | 1208 | int i915_verify_lists(struct drm_device *dev); |
1100 | #else | 1209 | #else |
1101 | #define i915_verify_lists(dev) 0 | 1210 | #define i915_verify_lists(dev) 0 |
1102 | #endif | 1211 | #endif |
1103 | void i915_gem_object_check_coherency(struct drm_gem_object *obj, int handle); | 1212 | void i915_gem_object_check_coherency(struct drm_i915_gem_object *obj, |
1104 | void i915_gem_dump_object(struct drm_gem_object *obj, int len, | 1213 | int handle); |
1214 | void i915_gem_dump_object(struct drm_i915_gem_object *obj, int len, | ||
1105 | const char *where, uint32_t mark); | 1215 | const char *where, uint32_t mark); |
1106 | 1216 | ||
1107 | /* i915_debugfs.c */ | 1217 | /* i915_debugfs.c */ |
@@ -1163,6 +1273,8 @@ extern void intel_disable_fbc(struct drm_device *dev); | |||
1163 | extern void intel_enable_fbc(struct drm_crtc *crtc, unsigned long interval); | 1273 | extern void intel_enable_fbc(struct drm_crtc *crtc, unsigned long interval); |
1164 | extern bool intel_fbc_enabled(struct drm_device *dev); | 1274 | extern bool intel_fbc_enabled(struct drm_device *dev); |
1165 | extern bool ironlake_set_drps(struct drm_device *dev, u8 val); | 1275 | extern bool ironlake_set_drps(struct drm_device *dev, u8 val); |
1276 | extern void ironlake_enable_rc6(struct drm_device *dev); | ||
1277 | extern void gen6_set_rps(struct drm_device *dev, u8 val); | ||
1166 | extern void intel_detect_pch (struct drm_device *dev); | 1278 | extern void intel_detect_pch (struct drm_device *dev); |
1167 | extern int intel_trans_dp_port_sel (struct drm_crtc *crtc); | 1279 | extern int intel_trans_dp_port_sel (struct drm_crtc *crtc); |
1168 | 1280 | ||
@@ -1170,79 +1282,120 @@ extern int intel_trans_dp_port_sel (struct drm_crtc *crtc); | |||
1170 | #ifdef CONFIG_DEBUG_FS | 1282 | #ifdef CONFIG_DEBUG_FS |
1171 | extern struct intel_overlay_error_state *intel_overlay_capture_error_state(struct drm_device *dev); | 1283 | extern struct intel_overlay_error_state *intel_overlay_capture_error_state(struct drm_device *dev); |
1172 | extern void intel_overlay_print_error_state(struct seq_file *m, struct intel_overlay_error_state *error); | 1284 | extern void intel_overlay_print_error_state(struct seq_file *m, struct intel_overlay_error_state *error); |
1285 | |||
1286 | extern struct intel_display_error_state *intel_display_capture_error_state(struct drm_device *dev); | ||
1287 | extern void intel_display_print_error_state(struct seq_file *m, | ||
1288 | struct drm_device *dev, | ||
1289 | struct intel_display_error_state *error); | ||
1173 | #endif | 1290 | #endif |
1174 | 1291 | ||
1292 | #define LP_RING(d) (&((struct drm_i915_private *)(d))->ring[RCS]) | ||
1293 | |||
1294 | #define BEGIN_LP_RING(n) \ | ||
1295 | intel_ring_begin(LP_RING(dev_priv), (n)) | ||
1296 | |||
1297 | #define OUT_RING(x) \ | ||
1298 | intel_ring_emit(LP_RING(dev_priv), x) | ||
1299 | |||
1300 | #define ADVANCE_LP_RING() \ | ||
1301 | intel_ring_advance(LP_RING(dev_priv)) | ||
1302 | |||
1175 | /** | 1303 | /** |
1176 | * Lock test for when it's just for synchronization of ring access. | 1304 | * Lock test for when it's just for synchronization of ring access. |
1177 | * | 1305 | * |
1178 | * In that case, we don't need to do it when GEM is initialized as nobody else | 1306 | * In that case, we don't need to do it when GEM is initialized as nobody else |
1179 | * has access to the ring. | 1307 | * has access to the ring. |
1180 | */ | 1308 | */ |
1181 | #define RING_LOCK_TEST_WITH_RETURN(dev, file_priv) do { \ | 1309 | #define RING_LOCK_TEST_WITH_RETURN(dev, file) do { \ |
1182 | if (((drm_i915_private_t *)dev->dev_private)->render_ring.gem_object \ | 1310 | if (LP_RING(dev->dev_private)->obj == NULL) \ |
1183 | == NULL) \ | 1311 | LOCK_TEST_WITH_RETURN(dev, file); \ |
1184 | LOCK_TEST_WITH_RETURN(dev, file_priv); \ | ||
1185 | } while (0) | 1312 | } while (0) |
1186 | 1313 | ||
1187 | static inline u32 i915_read(struct drm_i915_private *dev_priv, u32 reg) | 1314 | |
1315 | #define __i915_read(x, y) \ | ||
1316 | static inline u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg) { \ | ||
1317 | u##x val = read##y(dev_priv->regs + reg); \ | ||
1318 | trace_i915_reg_rw('R', reg, val, sizeof(val)); \ | ||
1319 | return val; \ | ||
1320 | } | ||
1321 | __i915_read(8, b) | ||
1322 | __i915_read(16, w) | ||
1323 | __i915_read(32, l) | ||
1324 | __i915_read(64, q) | ||
1325 | #undef __i915_read | ||
1326 | |||
1327 | #define __i915_write(x, y) \ | ||
1328 | static inline void i915_write##x(struct drm_i915_private *dev_priv, u32 reg, u##x val) { \ | ||
1329 | trace_i915_reg_rw('W', reg, val, sizeof(val)); \ | ||
1330 | write##y(val, dev_priv->regs + reg); \ | ||
1331 | } | ||
1332 | __i915_write(8, b) | ||
1333 | __i915_write(16, w) | ||
1334 | __i915_write(32, l) | ||
1335 | __i915_write(64, q) | ||
1336 | #undef __i915_write | ||
1337 | |||
1338 | #define I915_READ8(reg) i915_read8(dev_priv, (reg)) | ||
1339 | #define I915_WRITE8(reg, val) i915_write8(dev_priv, (reg), (val)) | ||
1340 | |||
1341 | #define I915_READ16(reg) i915_read16(dev_priv, (reg)) | ||
1342 | #define I915_WRITE16(reg, val) i915_write16(dev_priv, (reg), (val)) | ||
1343 | #define I915_READ16_NOTRACE(reg) readw(dev_priv->regs + (reg)) | ||
1344 | #define I915_WRITE16_NOTRACE(reg, val) writew(val, dev_priv->regs + (reg)) | ||
1345 | |||
1346 | #define I915_READ(reg) i915_read32(dev_priv, (reg)) | ||
1347 | #define I915_WRITE(reg, val) i915_write32(dev_priv, (reg), (val)) | ||
1348 | #define I915_READ_NOTRACE(reg) readl(dev_priv->regs + (reg)) | ||
1349 | #define I915_WRITE_NOTRACE(reg, val) writel(val, dev_priv->regs + (reg)) | ||
1350 | |||
1351 | #define I915_WRITE64(reg, val) i915_write64(dev_priv, (reg), (val)) | ||
1352 | #define I915_READ64(reg) i915_read64(dev_priv, (reg)) | ||
1353 | |||
1354 | #define POSTING_READ(reg) (void)I915_READ_NOTRACE(reg) | ||
1355 | #define POSTING_READ16(reg) (void)I915_READ16_NOTRACE(reg) | ||
1356 | |||
1357 | |||
1358 | /* On SNB platform, before reading ring registers forcewake bit | ||
1359 | * must be set to prevent GT core from power down and stale values being | ||
1360 | * returned. | ||
1361 | */ | ||
1362 | void __gen6_force_wake_get(struct drm_i915_private *dev_priv); | ||
1363 | void __gen6_force_wake_put (struct drm_i915_private *dev_priv); | ||
1364 | static inline u32 i915_safe_read(struct drm_i915_private *dev_priv, u32 reg) | ||
1188 | { | 1365 | { |
1189 | u32 val; | 1366 | u32 val; |
1190 | 1367 | ||
1191 | val = readl(dev_priv->regs + reg); | 1368 | if (dev_priv->info->gen >= 6) { |
1192 | if (dev_priv->debug_flags & I915_DEBUG_READ) | 1369 | __gen6_force_wake_get(dev_priv); |
1193 | printk(KERN_ERR "read 0x%08x from 0x%08x\n", val, reg); | 1370 | val = I915_READ(reg); |
1371 | __gen6_force_wake_put(dev_priv); | ||
1372 | } else | ||
1373 | val = I915_READ(reg); | ||
1374 | |||
1194 | return val; | 1375 | return val; |
1195 | } | 1376 | } |
1196 | 1377 | ||
1197 | static inline void i915_write(struct drm_i915_private *dev_priv, u32 reg, | 1378 | static inline void |
1198 | u32 val) | 1379 | i915_write(struct drm_i915_private *dev_priv, u32 reg, u64 val, int len) |
1199 | { | 1380 | { |
1200 | writel(val, dev_priv->regs + reg); | 1381 | /* Trace down the write operation before the real write */ |
1201 | if (dev_priv->debug_flags & I915_DEBUG_WRITE) | 1382 | trace_i915_reg_rw('W', reg, val, len); |
1202 | printk(KERN_ERR "wrote 0x%08x to 0x%08x\n", val, reg); | 1383 | switch (len) { |
1384 | case 8: | ||
1385 | writeq(val, dev_priv->regs + reg); | ||
1386 | break; | ||
1387 | case 4: | ||
1388 | writel(val, dev_priv->regs + reg); | ||
1389 | break; | ||
1390 | case 2: | ||
1391 | writew(val, dev_priv->regs + reg); | ||
1392 | break; | ||
1393 | case 1: | ||
1394 | writeb(val, dev_priv->regs + reg); | ||
1395 | break; | ||
1396 | } | ||
1203 | } | 1397 | } |
1204 | 1398 | ||
1205 | #define I915_READ(reg) i915_read(dev_priv, (reg)) | ||
1206 | #define I915_WRITE(reg, val) i915_write(dev_priv, (reg), (val)) | ||
1207 | #define I915_READ16(reg) readw(dev_priv->regs + (reg)) | ||
1208 | #define I915_WRITE16(reg, val) writel(val, dev_priv->regs + (reg)) | ||
1209 | #define I915_READ8(reg) readb(dev_priv->regs + (reg)) | ||
1210 | #define I915_WRITE8(reg, val) writeb(val, dev_priv->regs + (reg)) | ||
1211 | #define I915_WRITE64(reg, val) writeq(val, dev_priv->regs + (reg)) | ||
1212 | #define I915_READ64(reg) readq(dev_priv->regs + (reg)) | ||
1213 | #define POSTING_READ(reg) (void)I915_READ(reg) | ||
1214 | #define POSTING_READ16(reg) (void)I915_READ16(reg) | ||
1215 | |||
1216 | #define I915_DEBUG_ENABLE_IO() (dev_priv->debug_flags |= I915_DEBUG_READ | \ | ||
1217 | I915_DEBUG_WRITE) | ||
1218 | #define I915_DEBUG_DISABLE_IO() (dev_priv->debug_flags &= ~(I915_DEBUG_READ | \ | ||
1219 | I915_DEBUG_WRITE)) | ||
1220 | |||
1221 | #define I915_VERBOSE 0 | ||
1222 | |||
1223 | #define BEGIN_LP_RING(n) do { \ | ||
1224 | drm_i915_private_t *dev_priv__ = dev->dev_private; \ | ||
1225 | if (I915_VERBOSE) \ | ||
1226 | DRM_DEBUG(" BEGIN_LP_RING %x\n", (int)(n)); \ | ||
1227 | intel_ring_begin(dev, &dev_priv__->render_ring, (n)); \ | ||
1228 | } while (0) | ||
1229 | |||
1230 | |||
1231 | #define OUT_RING(x) do { \ | ||
1232 | drm_i915_private_t *dev_priv__ = dev->dev_private; \ | ||
1233 | if (I915_VERBOSE) \ | ||
1234 | DRM_DEBUG(" OUT_RING %x\n", (int)(x)); \ | ||
1235 | intel_ring_emit(dev, &dev_priv__->render_ring, x); \ | ||
1236 | } while (0) | ||
1237 | |||
1238 | #define ADVANCE_LP_RING() do { \ | ||
1239 | drm_i915_private_t *dev_priv__ = dev->dev_private; \ | ||
1240 | if (I915_VERBOSE) \ | ||
1241 | DRM_DEBUG("ADVANCE_LP_RING %x\n", \ | ||
1242 | dev_priv__->render_ring.tail); \ | ||
1243 | intel_ring_advance(dev, &dev_priv__->render_ring); \ | ||
1244 | } while(0) | ||
1245 | |||
1246 | /** | 1399 | /** |
1247 | * Reads a dword out of the status page, which is written to from the command | 1400 | * Reads a dword out of the status page, which is written to from the command |
1248 | * queue by automatic updates, MI_REPORT_HEAD, MI_STORE_DATA_INDEX, or | 1401 | * queue by automatic updates, MI_REPORT_HEAD, MI_STORE_DATA_INDEX, or |
@@ -1259,72 +1412,9 @@ static inline void i915_write(struct drm_i915_private *dev_priv, u32 reg, | |||
1259 | * The area from dword 0x20 to 0x3ff is available for driver usage. | 1412 | * The area from dword 0x20 to 0x3ff is available for driver usage. |
1260 | */ | 1413 | */ |
1261 | #define READ_HWSP(dev_priv, reg) (((volatile u32 *)\ | 1414 | #define READ_HWSP(dev_priv, reg) (((volatile u32 *)\ |
1262 | (dev_priv->render_ring.status_page.page_addr))[reg]) | 1415 | (LP_RING(dev_priv)->status_page.page_addr))[reg]) |
1263 | #define READ_BREADCRUMB(dev_priv) READ_HWSP(dev_priv, I915_BREADCRUMB_INDEX) | 1416 | #define READ_BREADCRUMB(dev_priv) READ_HWSP(dev_priv, I915_BREADCRUMB_INDEX) |
1264 | #define I915_GEM_HWS_INDEX 0x20 | 1417 | #define I915_GEM_HWS_INDEX 0x20 |
1265 | #define I915_BREADCRUMB_INDEX 0x21 | 1418 | #define I915_BREADCRUMB_INDEX 0x21 |
1266 | 1419 | ||
1267 | #define INTEL_INFO(dev) (((struct drm_i915_private *) (dev)->dev_private)->info) | ||
1268 | |||
1269 | #define IS_I830(dev) ((dev)->pci_device == 0x3577) | ||
1270 | #define IS_845G(dev) ((dev)->pci_device == 0x2562) | ||
1271 | #define IS_I85X(dev) (INTEL_INFO(dev)->is_i85x) | ||
1272 | #define IS_I865G(dev) ((dev)->pci_device == 0x2572) | ||
1273 | #define IS_I915G(dev) (INTEL_INFO(dev)->is_i915g) | ||
1274 | #define IS_I915GM(dev) ((dev)->pci_device == 0x2592) | ||
1275 | #define IS_I945G(dev) ((dev)->pci_device == 0x2772) | ||
1276 | #define IS_I945GM(dev) (INTEL_INFO(dev)->is_i945gm) | ||
1277 | #define IS_BROADWATER(dev) (INTEL_INFO(dev)->is_broadwater) | ||
1278 | #define IS_CRESTLINE(dev) (INTEL_INFO(dev)->is_crestline) | ||
1279 | #define IS_GM45(dev) ((dev)->pci_device == 0x2A42) | ||
1280 | #define IS_G4X(dev) (INTEL_INFO(dev)->is_g4x) | ||
1281 | #define IS_PINEVIEW_G(dev) ((dev)->pci_device == 0xa001) | ||
1282 | #define IS_PINEVIEW_M(dev) ((dev)->pci_device == 0xa011) | ||
1283 | #define IS_PINEVIEW(dev) (INTEL_INFO(dev)->is_pineview) | ||
1284 | #define IS_G33(dev) (INTEL_INFO(dev)->is_g33) | ||
1285 | #define IS_IRONLAKE_D(dev) ((dev)->pci_device == 0x0042) | ||
1286 | #define IS_IRONLAKE_M(dev) ((dev)->pci_device == 0x0046) | ||
1287 | #define IS_MOBILE(dev) (INTEL_INFO(dev)->is_mobile) | ||
1288 | |||
1289 | #define IS_GEN2(dev) (INTEL_INFO(dev)->gen == 2) | ||
1290 | #define IS_GEN3(dev) (INTEL_INFO(dev)->gen == 3) | ||
1291 | #define IS_GEN4(dev) (INTEL_INFO(dev)->gen == 4) | ||
1292 | #define IS_GEN5(dev) (INTEL_INFO(dev)->gen == 5) | ||
1293 | #define IS_GEN6(dev) (INTEL_INFO(dev)->gen == 6) | ||
1294 | |||
1295 | #define HAS_BSD(dev) (INTEL_INFO(dev)->has_bsd_ring) | ||
1296 | #define HAS_BLT(dev) (INTEL_INFO(dev)->has_blt_ring) | ||
1297 | #define I915_NEED_GFX_HWS(dev) (INTEL_INFO(dev)->need_gfx_hws) | ||
1298 | |||
1299 | #define HAS_OVERLAY(dev) (INTEL_INFO(dev)->has_overlay) | ||
1300 | #define OVERLAY_NEEDS_PHYSICAL(dev) (INTEL_INFO(dev)->overlay_needs_physical) | ||
1301 | |||
1302 | /* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte | ||
1303 | * rows, which changed the alignment requirements and fence programming. | ||
1304 | */ | ||
1305 | #define HAS_128_BYTE_Y_TILING(dev) (!IS_GEN2(dev) && !(IS_I915G(dev) || \ | ||
1306 | IS_I915GM(dev))) | ||
1307 | #define SUPPORTS_DIGITAL_OUTPUTS(dev) (!IS_GEN2(dev) && !IS_PINEVIEW(dev)) | ||
1308 | #define SUPPORTS_INTEGRATED_HDMI(dev) (IS_G4X(dev) || IS_GEN5(dev)) | ||
1309 | #define SUPPORTS_INTEGRATED_DP(dev) (IS_G4X(dev) || IS_GEN5(dev)) | ||
1310 | #define SUPPORTS_EDP(dev) (IS_IRONLAKE_M(dev)) | ||
1311 | #define SUPPORTS_TV(dev) (INTEL_INFO(dev)->supports_tv) | ||
1312 | #define I915_HAS_HOTPLUG(dev) (INTEL_INFO(dev)->has_hotplug) | ||
1313 | /* dsparb controlled by hw only */ | ||
1314 | #define DSPARB_HWCONTROL(dev) (IS_G4X(dev) || IS_IRONLAKE(dev)) | ||
1315 | |||
1316 | #define HAS_FW_BLC(dev) (INTEL_INFO(dev)->gen > 2) | ||
1317 | #define HAS_PIPE_CXSR(dev) (INTEL_INFO(dev)->has_pipe_cxsr) | ||
1318 | #define I915_HAS_FBC(dev) (INTEL_INFO(dev)->has_fbc) | ||
1319 | #define I915_HAS_RC6(dev) (INTEL_INFO(dev)->has_rc6) | ||
1320 | |||
1321 | #define HAS_PCH_SPLIT(dev) (IS_GEN5(dev) || IS_GEN6(dev)) | ||
1322 | #define HAS_PIPE_CONTROL(dev) (IS_GEN5(dev) || IS_GEN6(dev)) | ||
1323 | |||
1324 | #define INTEL_PCH_TYPE(dev) (((struct drm_i915_private *)(dev)->dev_private)->pch_type) | ||
1325 | #define HAS_PCH_CPT(dev) (INTEL_PCH_TYPE(dev) == PCH_CPT) | ||
1326 | #define HAS_PCH_IBX(dev) (INTEL_PCH_TYPE(dev) == PCH_IBX) | ||
1327 | |||
1328 | #define PRIMARY_RINGBUFFER_SIZE (128*1024) | ||
1329 | |||
1330 | #endif | 1420 | #endif |
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 17b1cba3b5f1..bc7f06b8fbca 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c | |||
@@ -34,39 +34,31 @@ | |||
34 | #include <linux/slab.h> | 34 | #include <linux/slab.h> |
35 | #include <linux/swap.h> | 35 | #include <linux/swap.h> |
36 | #include <linux/pci.h> | 36 | #include <linux/pci.h> |
37 | #include <linux/intel-gtt.h> | 37 | |
38 | 38 | static __must_check int i915_gem_object_flush_gpu_write_domain(struct drm_i915_gem_object *obj); | |
39 | static uint32_t i915_gem_get_gtt_alignment(struct drm_gem_object *obj); | 39 | static void i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj); |
40 | 40 | static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj); | |
41 | static int i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj, | 41 | static __must_check int i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, |
42 | bool pipelined); | 42 | bool write); |
43 | static void i915_gem_object_flush_gtt_write_domain(struct drm_gem_object *obj); | 43 | static __must_check int i915_gem_object_set_cpu_read_domain_range(struct drm_i915_gem_object *obj, |
44 | static void i915_gem_object_flush_cpu_write_domain(struct drm_gem_object *obj); | 44 | uint64_t offset, |
45 | static int i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj, | 45 | uint64_t size); |
46 | int write); | 46 | static void i915_gem_object_set_to_full_cpu_read_domain(struct drm_i915_gem_object *obj); |
47 | static int i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj, | 47 | static __must_check int i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj, |
48 | uint64_t offset, | 48 | unsigned alignment, |
49 | uint64_t size); | 49 | bool map_and_fenceable); |
50 | static void i915_gem_object_set_to_full_cpu_read_domain(struct drm_gem_object *obj); | 50 | static void i915_gem_clear_fence_reg(struct drm_device *dev, |
51 | static int i915_gem_object_wait_rendering(struct drm_gem_object *obj, | 51 | struct drm_i915_fence_reg *reg); |
52 | bool interruptible); | 52 | static int i915_gem_phys_pwrite(struct drm_device *dev, |
53 | static int i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, | 53 | struct drm_i915_gem_object *obj, |
54 | unsigned alignment); | ||
55 | static void i915_gem_clear_fence_reg(struct drm_gem_object *obj); | ||
56 | static int i915_gem_phys_pwrite(struct drm_device *dev, struct drm_gem_object *obj, | ||
57 | struct drm_i915_gem_pwrite *args, | 54 | struct drm_i915_gem_pwrite *args, |
58 | struct drm_file *file_priv); | 55 | struct drm_file *file); |
59 | static void i915_gem_free_object_tail(struct drm_gem_object *obj); | 56 | static void i915_gem_free_object_tail(struct drm_i915_gem_object *obj); |
60 | 57 | ||
61 | static int | 58 | static int i915_gem_inactive_shrink(struct shrinker *shrinker, |
62 | i915_gem_object_get_pages(struct drm_gem_object *obj, | 59 | int nr_to_scan, |
63 | gfp_t gfpmask); | 60 | gfp_t gfp_mask); |
64 | 61 | ||
65 | static void | ||
66 | i915_gem_object_put_pages(struct drm_gem_object *obj); | ||
67 | |||
68 | static LIST_HEAD(shrink_list); | ||
69 | static DEFINE_SPINLOCK(shrink_list_lock); | ||
70 | 62 | ||
71 | /* some bookkeeping */ | 63 | /* some bookkeeping */ |
72 | static void i915_gem_info_add_obj(struct drm_i915_private *dev_priv, | 64 | static void i915_gem_info_add_obj(struct drm_i915_private *dev_priv, |
@@ -83,34 +75,6 @@ static void i915_gem_info_remove_obj(struct drm_i915_private *dev_priv, | |||
83 | dev_priv->mm.object_memory -= size; | 75 | dev_priv->mm.object_memory -= size; |
84 | } | 76 | } |
85 | 77 | ||
86 | static void i915_gem_info_add_gtt(struct drm_i915_private *dev_priv, | ||
87 | size_t size) | ||
88 | { | ||
89 | dev_priv->mm.gtt_count++; | ||
90 | dev_priv->mm.gtt_memory += size; | ||
91 | } | ||
92 | |||
93 | static void i915_gem_info_remove_gtt(struct drm_i915_private *dev_priv, | ||
94 | size_t size) | ||
95 | { | ||
96 | dev_priv->mm.gtt_count--; | ||
97 | dev_priv->mm.gtt_memory -= size; | ||
98 | } | ||
99 | |||
100 | static void i915_gem_info_add_pin(struct drm_i915_private *dev_priv, | ||
101 | size_t size) | ||
102 | { | ||
103 | dev_priv->mm.pin_count++; | ||
104 | dev_priv->mm.pin_memory += size; | ||
105 | } | ||
106 | |||
107 | static void i915_gem_info_remove_pin(struct drm_i915_private *dev_priv, | ||
108 | size_t size) | ||
109 | { | ||
110 | dev_priv->mm.pin_count--; | ||
111 | dev_priv->mm.pin_memory -= size; | ||
112 | } | ||
113 | |||
114 | int | 78 | int |
115 | i915_gem_check_is_wedged(struct drm_device *dev) | 79 | i915_gem_check_is_wedged(struct drm_device *dev) |
116 | { | 80 | { |
@@ -141,7 +105,7 @@ i915_gem_check_is_wedged(struct drm_device *dev) | |||
141 | return -EIO; | 105 | return -EIO; |
142 | } | 106 | } |
143 | 107 | ||
144 | static int i915_mutex_lock_interruptible(struct drm_device *dev) | 108 | int i915_mutex_lock_interruptible(struct drm_device *dev) |
145 | { | 109 | { |
146 | struct drm_i915_private *dev_priv = dev->dev_private; | 110 | struct drm_i915_private *dev_priv = dev->dev_private; |
147 | int ret; | 111 | int ret; |
@@ -164,124 +128,141 @@ static int i915_mutex_lock_interruptible(struct drm_device *dev) | |||
164 | } | 128 | } |
165 | 129 | ||
166 | static inline bool | 130 | static inline bool |
167 | i915_gem_object_is_inactive(struct drm_i915_gem_object *obj_priv) | 131 | i915_gem_object_is_inactive(struct drm_i915_gem_object *obj) |
168 | { | 132 | { |
169 | return obj_priv->gtt_space && | 133 | return obj->gtt_space && !obj->active && obj->pin_count == 0; |
170 | !obj_priv->active && | ||
171 | obj_priv->pin_count == 0; | ||
172 | } | 134 | } |
173 | 135 | ||
174 | int i915_gem_do_init(struct drm_device *dev, | 136 | void i915_gem_do_init(struct drm_device *dev, |
175 | unsigned long start, | 137 | unsigned long start, |
176 | unsigned long end) | 138 | unsigned long mappable_end, |
139 | unsigned long end) | ||
177 | { | 140 | { |
178 | drm_i915_private_t *dev_priv = dev->dev_private; | 141 | drm_i915_private_t *dev_priv = dev->dev_private; |
179 | 142 | ||
180 | if (start >= end || | 143 | drm_mm_init(&dev_priv->mm.gtt_space, start, end - start); |
181 | (start & (PAGE_SIZE - 1)) != 0 || | ||
182 | (end & (PAGE_SIZE - 1)) != 0) { | ||
183 | return -EINVAL; | ||
184 | } | ||
185 | |||
186 | drm_mm_init(&dev_priv->mm.gtt_space, start, | ||
187 | end - start); | ||
188 | 144 | ||
145 | dev_priv->mm.gtt_start = start; | ||
146 | dev_priv->mm.gtt_mappable_end = mappable_end; | ||
147 | dev_priv->mm.gtt_end = end; | ||
189 | dev_priv->mm.gtt_total = end - start; | 148 | dev_priv->mm.gtt_total = end - start; |
149 | dev_priv->mm.mappable_gtt_total = min(end, mappable_end) - start; | ||
190 | 150 | ||
191 | return 0; | 151 | /* Take over this portion of the GTT */ |
152 | intel_gtt_clear_range(start / PAGE_SIZE, (end-start) / PAGE_SIZE); | ||
192 | } | 153 | } |
193 | 154 | ||
194 | int | 155 | int |
195 | i915_gem_init_ioctl(struct drm_device *dev, void *data, | 156 | i915_gem_init_ioctl(struct drm_device *dev, void *data, |
196 | struct drm_file *file_priv) | 157 | struct drm_file *file) |
197 | { | 158 | { |
198 | struct drm_i915_gem_init *args = data; | 159 | struct drm_i915_gem_init *args = data; |
199 | int ret; | 160 | |
161 | if (args->gtt_start >= args->gtt_end || | ||
162 | (args->gtt_end | args->gtt_start) & (PAGE_SIZE - 1)) | ||
163 | return -EINVAL; | ||
200 | 164 | ||
201 | mutex_lock(&dev->struct_mutex); | 165 | mutex_lock(&dev->struct_mutex); |
202 | ret = i915_gem_do_init(dev, args->gtt_start, args->gtt_end); | 166 | i915_gem_do_init(dev, args->gtt_start, args->gtt_end, args->gtt_end); |
203 | mutex_unlock(&dev->struct_mutex); | 167 | mutex_unlock(&dev->struct_mutex); |
204 | 168 | ||
205 | return ret; | 169 | return 0; |
206 | } | 170 | } |
207 | 171 | ||
208 | int | 172 | int |
209 | i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data, | 173 | i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data, |
210 | struct drm_file *file_priv) | 174 | struct drm_file *file) |
211 | { | 175 | { |
212 | struct drm_i915_private *dev_priv = dev->dev_private; | 176 | struct drm_i915_private *dev_priv = dev->dev_private; |
213 | struct drm_i915_gem_get_aperture *args = data; | 177 | struct drm_i915_gem_get_aperture *args = data; |
178 | struct drm_i915_gem_object *obj; | ||
179 | size_t pinned; | ||
214 | 180 | ||
215 | if (!(dev->driver->driver_features & DRIVER_GEM)) | 181 | if (!(dev->driver->driver_features & DRIVER_GEM)) |
216 | return -ENODEV; | 182 | return -ENODEV; |
217 | 183 | ||
184 | pinned = 0; | ||
218 | mutex_lock(&dev->struct_mutex); | 185 | mutex_lock(&dev->struct_mutex); |
219 | args->aper_size = dev_priv->mm.gtt_total; | 186 | list_for_each_entry(obj, &dev_priv->mm.pinned_list, mm_list) |
220 | args->aper_available_size = args->aper_size - dev_priv->mm.pin_memory; | 187 | pinned += obj->gtt_space->size; |
221 | mutex_unlock(&dev->struct_mutex); | 188 | mutex_unlock(&dev->struct_mutex); |
222 | 189 | ||
190 | args->aper_size = dev_priv->mm.gtt_total; | ||
191 | args->aper_available_size = args->aper_size -pinned; | ||
192 | |||
223 | return 0; | 193 | return 0; |
224 | } | 194 | } |
225 | 195 | ||
226 | 196 | static int | |
227 | /** | 197 | i915_gem_create(struct drm_file *file, |
228 | * Creates a new mm object and returns a handle to it. | 198 | struct drm_device *dev, |
229 | */ | 199 | uint64_t size, |
230 | int | 200 | uint32_t *handle_p) |
231 | i915_gem_create_ioctl(struct drm_device *dev, void *data, | ||
232 | struct drm_file *file_priv) | ||
233 | { | 201 | { |
234 | struct drm_i915_gem_create *args = data; | 202 | struct drm_i915_gem_object *obj; |
235 | struct drm_gem_object *obj; | ||
236 | int ret; | 203 | int ret; |
237 | u32 handle; | 204 | u32 handle; |
238 | 205 | ||
239 | args->size = roundup(args->size, PAGE_SIZE); | 206 | size = roundup(size, PAGE_SIZE); |
240 | 207 | ||
241 | /* Allocate the new object */ | 208 | /* Allocate the new object */ |
242 | obj = i915_gem_alloc_object(dev, args->size); | 209 | obj = i915_gem_alloc_object(dev, size); |
243 | if (obj == NULL) | 210 | if (obj == NULL) |
244 | return -ENOMEM; | 211 | return -ENOMEM; |
245 | 212 | ||
246 | ret = drm_gem_handle_create(file_priv, obj, &handle); | 213 | ret = drm_gem_handle_create(file, &obj->base, &handle); |
247 | if (ret) { | 214 | if (ret) { |
248 | drm_gem_object_release(obj); | 215 | drm_gem_object_release(&obj->base); |
249 | i915_gem_info_remove_obj(dev->dev_private, obj->size); | 216 | i915_gem_info_remove_obj(dev->dev_private, obj->base.size); |
250 | kfree(obj); | 217 | kfree(obj); |
251 | return ret; | 218 | return ret; |
252 | } | 219 | } |
253 | 220 | ||
254 | /* drop reference from allocate - handle holds it now */ | 221 | /* drop reference from allocate - handle holds it now */ |
255 | drm_gem_object_unreference(obj); | 222 | drm_gem_object_unreference(&obj->base); |
256 | trace_i915_gem_object_create(obj); | 223 | trace_i915_gem_object_create(obj); |
257 | 224 | ||
258 | args->handle = handle; | 225 | *handle_p = handle; |
259 | return 0; | 226 | return 0; |
260 | } | 227 | } |
261 | 228 | ||
262 | static inline int | 229 | int |
263 | fast_shmem_read(struct page **pages, | 230 | i915_gem_dumb_create(struct drm_file *file, |
264 | loff_t page_base, int page_offset, | 231 | struct drm_device *dev, |
265 | char __user *data, | 232 | struct drm_mode_create_dumb *args) |
266 | int length) | ||
267 | { | 233 | { |
268 | char *vaddr; | 234 | /* have to work out size/pitch and return them */ |
269 | int ret; | 235 | args->pitch = ALIGN(args->width & ((args->bpp + 1) / 8), 64); |
236 | args->size = args->pitch * args->height; | ||
237 | return i915_gem_create(file, dev, | ||
238 | args->size, &args->handle); | ||
239 | } | ||
270 | 240 | ||
271 | vaddr = kmap_atomic(pages[page_base >> PAGE_SHIFT]); | 241 | int i915_gem_dumb_destroy(struct drm_file *file, |
272 | ret = __copy_to_user_inatomic(data, vaddr + page_offset, length); | 242 | struct drm_device *dev, |
273 | kunmap_atomic(vaddr); | 243 | uint32_t handle) |
244 | { | ||
245 | return drm_gem_handle_delete(file, handle); | ||
246 | } | ||
274 | 247 | ||
275 | return ret; | 248 | /** |
249 | * Creates a new mm object and returns a handle to it. | ||
250 | */ | ||
251 | int | ||
252 | i915_gem_create_ioctl(struct drm_device *dev, void *data, | ||
253 | struct drm_file *file) | ||
254 | { | ||
255 | struct drm_i915_gem_create *args = data; | ||
256 | return i915_gem_create(file, dev, | ||
257 | args->size, &args->handle); | ||
276 | } | 258 | } |
277 | 259 | ||
278 | static int i915_gem_object_needs_bit17_swizzle(struct drm_gem_object *obj) | 260 | static int i915_gem_object_needs_bit17_swizzle(struct drm_i915_gem_object *obj) |
279 | { | 261 | { |
280 | drm_i915_private_t *dev_priv = obj->dev->dev_private; | 262 | drm_i915_private_t *dev_priv = obj->base.dev->dev_private; |
281 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); | ||
282 | 263 | ||
283 | return dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_9_10_17 && | 264 | return dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_9_10_17 && |
284 | obj_priv->tiling_mode != I915_TILING_NONE; | 265 | obj->tiling_mode != I915_TILING_NONE; |
285 | } | 266 | } |
286 | 267 | ||
287 | static inline void | 268 | static inline void |
@@ -357,38 +338,51 @@ slow_shmem_bit17_copy(struct page *gpu_page, | |||
357 | * fault, it fails so we can fall back to i915_gem_shmem_pwrite_slow(). | 338 | * fault, it fails so we can fall back to i915_gem_shmem_pwrite_slow(). |
358 | */ | 339 | */ |
359 | static int | 340 | static int |
360 | i915_gem_shmem_pread_fast(struct drm_device *dev, struct drm_gem_object *obj, | 341 | i915_gem_shmem_pread_fast(struct drm_device *dev, |
342 | struct drm_i915_gem_object *obj, | ||
361 | struct drm_i915_gem_pread *args, | 343 | struct drm_i915_gem_pread *args, |
362 | struct drm_file *file_priv) | 344 | struct drm_file *file) |
363 | { | 345 | { |
364 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); | 346 | struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping; |
365 | ssize_t remain; | 347 | ssize_t remain; |
366 | loff_t offset, page_base; | 348 | loff_t offset; |
367 | char __user *user_data; | 349 | char __user *user_data; |
368 | int page_offset, page_length; | 350 | int page_offset, page_length; |
369 | 351 | ||
370 | user_data = (char __user *) (uintptr_t) args->data_ptr; | 352 | user_data = (char __user *) (uintptr_t) args->data_ptr; |
371 | remain = args->size; | 353 | remain = args->size; |
372 | 354 | ||
373 | obj_priv = to_intel_bo(obj); | ||
374 | offset = args->offset; | 355 | offset = args->offset; |
375 | 356 | ||
376 | while (remain > 0) { | 357 | while (remain > 0) { |
358 | struct page *page; | ||
359 | char *vaddr; | ||
360 | int ret; | ||
361 | |||
377 | /* Operation in this page | 362 | /* Operation in this page |
378 | * | 363 | * |
379 | * page_base = page offset within aperture | ||
380 | * page_offset = offset within page | 364 | * page_offset = offset within page |
381 | * page_length = bytes to copy for this page | 365 | * page_length = bytes to copy for this page |
382 | */ | 366 | */ |
383 | page_base = (offset & ~(PAGE_SIZE-1)); | ||
384 | page_offset = offset & (PAGE_SIZE-1); | 367 | page_offset = offset & (PAGE_SIZE-1); |
385 | page_length = remain; | 368 | page_length = remain; |
386 | if ((page_offset + remain) > PAGE_SIZE) | 369 | if ((page_offset + remain) > PAGE_SIZE) |
387 | page_length = PAGE_SIZE - page_offset; | 370 | page_length = PAGE_SIZE - page_offset; |
388 | 371 | ||
389 | if (fast_shmem_read(obj_priv->pages, | 372 | page = read_cache_page_gfp(mapping, offset >> PAGE_SHIFT, |
390 | page_base, page_offset, | 373 | GFP_HIGHUSER | __GFP_RECLAIMABLE); |
391 | user_data, page_length)) | 374 | if (IS_ERR(page)) |
375 | return PTR_ERR(page); | ||
376 | |||
377 | vaddr = kmap_atomic(page); | ||
378 | ret = __copy_to_user_inatomic(user_data, | ||
379 | vaddr + page_offset, | ||
380 | page_length); | ||
381 | kunmap_atomic(vaddr); | ||
382 | |||
383 | mark_page_accessed(page); | ||
384 | page_cache_release(page); | ||
385 | if (ret) | ||
392 | return -EFAULT; | 386 | return -EFAULT; |
393 | 387 | ||
394 | remain -= page_length; | 388 | remain -= page_length; |
@@ -399,30 +393,6 @@ i915_gem_shmem_pread_fast(struct drm_device *dev, struct drm_gem_object *obj, | |||
399 | return 0; | 393 | return 0; |
400 | } | 394 | } |
401 | 395 | ||
402 | static int | ||
403 | i915_gem_object_get_pages_or_evict(struct drm_gem_object *obj) | ||
404 | { | ||
405 | int ret; | ||
406 | |||
407 | ret = i915_gem_object_get_pages(obj, __GFP_NORETRY | __GFP_NOWARN); | ||
408 | |||
409 | /* If we've insufficient memory to map in the pages, attempt | ||
410 | * to make some space by throwing out some old buffers. | ||
411 | */ | ||
412 | if (ret == -ENOMEM) { | ||
413 | struct drm_device *dev = obj->dev; | ||
414 | |||
415 | ret = i915_gem_evict_something(dev, obj->size, | ||
416 | i915_gem_get_gtt_alignment(obj)); | ||
417 | if (ret) | ||
418 | return ret; | ||
419 | |||
420 | ret = i915_gem_object_get_pages(obj, 0); | ||
421 | } | ||
422 | |||
423 | return ret; | ||
424 | } | ||
425 | |||
426 | /** | 396 | /** |
427 | * This is the fallback shmem pread path, which allocates temporary storage | 397 | * This is the fallback shmem pread path, which allocates temporary storage |
428 | * in kernel space to copy_to_user into outside of the struct_mutex, so we | 398 | * in kernel space to copy_to_user into outside of the struct_mutex, so we |
@@ -430,18 +400,19 @@ i915_gem_object_get_pages_or_evict(struct drm_gem_object *obj) | |||
430 | * and not take page faults. | 400 | * and not take page faults. |
431 | */ | 401 | */ |
432 | static int | 402 | static int |
433 | i915_gem_shmem_pread_slow(struct drm_device *dev, struct drm_gem_object *obj, | 403 | i915_gem_shmem_pread_slow(struct drm_device *dev, |
404 | struct drm_i915_gem_object *obj, | ||
434 | struct drm_i915_gem_pread *args, | 405 | struct drm_i915_gem_pread *args, |
435 | struct drm_file *file_priv) | 406 | struct drm_file *file) |
436 | { | 407 | { |
437 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); | 408 | struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping; |
438 | struct mm_struct *mm = current->mm; | 409 | struct mm_struct *mm = current->mm; |
439 | struct page **user_pages; | 410 | struct page **user_pages; |
440 | ssize_t remain; | 411 | ssize_t remain; |
441 | loff_t offset, pinned_pages, i; | 412 | loff_t offset, pinned_pages, i; |
442 | loff_t first_data_page, last_data_page, num_pages; | 413 | loff_t first_data_page, last_data_page, num_pages; |
443 | int shmem_page_index, shmem_page_offset; | 414 | int shmem_page_offset; |
444 | int data_page_index, data_page_offset; | 415 | int data_page_index, data_page_offset; |
445 | int page_length; | 416 | int page_length; |
446 | int ret; | 417 | int ret; |
447 | uint64_t data_ptr = args->data_ptr; | 418 | uint64_t data_ptr = args->data_ptr; |
@@ -480,19 +451,18 @@ i915_gem_shmem_pread_slow(struct drm_device *dev, struct drm_gem_object *obj, | |||
480 | 451 | ||
481 | do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj); | 452 | do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj); |
482 | 453 | ||
483 | obj_priv = to_intel_bo(obj); | ||
484 | offset = args->offset; | 454 | offset = args->offset; |
485 | 455 | ||
486 | while (remain > 0) { | 456 | while (remain > 0) { |
457 | struct page *page; | ||
458 | |||
487 | /* Operation in this page | 459 | /* Operation in this page |
488 | * | 460 | * |
489 | * shmem_page_index = page number within shmem file | ||
490 | * shmem_page_offset = offset within page in shmem file | 461 | * shmem_page_offset = offset within page in shmem file |
491 | * data_page_index = page number in get_user_pages return | 462 | * data_page_index = page number in get_user_pages return |
492 | * data_page_offset = offset with data_page_index page. | 463 | * data_page_offset = offset with data_page_index page. |
493 | * page_length = bytes to copy for this page | 464 | * page_length = bytes to copy for this page |
494 | */ | 465 | */ |
495 | shmem_page_index = offset / PAGE_SIZE; | ||
496 | shmem_page_offset = offset & ~PAGE_MASK; | 466 | shmem_page_offset = offset & ~PAGE_MASK; |
497 | data_page_index = data_ptr / PAGE_SIZE - first_data_page; | 467 | data_page_index = data_ptr / PAGE_SIZE - first_data_page; |
498 | data_page_offset = data_ptr & ~PAGE_MASK; | 468 | data_page_offset = data_ptr & ~PAGE_MASK; |
@@ -503,8 +473,13 @@ i915_gem_shmem_pread_slow(struct drm_device *dev, struct drm_gem_object *obj, | |||
503 | if ((data_page_offset + page_length) > PAGE_SIZE) | 473 | if ((data_page_offset + page_length) > PAGE_SIZE) |
504 | page_length = PAGE_SIZE - data_page_offset; | 474 | page_length = PAGE_SIZE - data_page_offset; |
505 | 475 | ||
476 | page = read_cache_page_gfp(mapping, offset >> PAGE_SHIFT, | ||
477 | GFP_HIGHUSER | __GFP_RECLAIMABLE); | ||
478 | if (IS_ERR(page)) | ||
479 | return PTR_ERR(page); | ||
480 | |||
506 | if (do_bit17_swizzling) { | 481 | if (do_bit17_swizzling) { |
507 | slow_shmem_bit17_copy(obj_priv->pages[shmem_page_index], | 482 | slow_shmem_bit17_copy(page, |
508 | shmem_page_offset, | 483 | shmem_page_offset, |
509 | user_pages[data_page_index], | 484 | user_pages[data_page_index], |
510 | data_page_offset, | 485 | data_page_offset, |
@@ -513,11 +488,14 @@ i915_gem_shmem_pread_slow(struct drm_device *dev, struct drm_gem_object *obj, | |||
513 | } else { | 488 | } else { |
514 | slow_shmem_copy(user_pages[data_page_index], | 489 | slow_shmem_copy(user_pages[data_page_index], |
515 | data_page_offset, | 490 | data_page_offset, |
516 | obj_priv->pages[shmem_page_index], | 491 | page, |
517 | shmem_page_offset, | 492 | shmem_page_offset, |
518 | page_length); | 493 | page_length); |
519 | } | 494 | } |
520 | 495 | ||
496 | mark_page_accessed(page); | ||
497 | page_cache_release(page); | ||
498 | |||
521 | remain -= page_length; | 499 | remain -= page_length; |
522 | data_ptr += page_length; | 500 | data_ptr += page_length; |
523 | offset += page_length; | 501 | offset += page_length; |
@@ -526,6 +504,7 @@ i915_gem_shmem_pread_slow(struct drm_device *dev, struct drm_gem_object *obj, | |||
526 | out: | 504 | out: |
527 | for (i = 0; i < pinned_pages; i++) { | 505 | for (i = 0; i < pinned_pages; i++) { |
528 | SetPageDirty(user_pages[i]); | 506 | SetPageDirty(user_pages[i]); |
507 | mark_page_accessed(user_pages[i]); | ||
529 | page_cache_release(user_pages[i]); | 508 | page_cache_release(user_pages[i]); |
530 | } | 509 | } |
531 | drm_free_large(user_pages); | 510 | drm_free_large(user_pages); |
@@ -540,11 +519,10 @@ out: | |||
540 | */ | 519 | */ |
541 | int | 520 | int |
542 | i915_gem_pread_ioctl(struct drm_device *dev, void *data, | 521 | i915_gem_pread_ioctl(struct drm_device *dev, void *data, |
543 | struct drm_file *file_priv) | 522 | struct drm_file *file) |
544 | { | 523 | { |
545 | struct drm_i915_gem_pread *args = data; | 524 | struct drm_i915_gem_pread *args = data; |
546 | struct drm_gem_object *obj; | 525 | struct drm_i915_gem_object *obj; |
547 | struct drm_i915_gem_object *obj_priv; | ||
548 | int ret = 0; | 526 | int ret = 0; |
549 | 527 | ||
550 | if (args->size == 0) | 528 | if (args->size == 0) |
@@ -564,39 +542,33 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data, | |||
564 | if (ret) | 542 | if (ret) |
565 | return ret; | 543 | return ret; |
566 | 544 | ||
567 | obj = drm_gem_object_lookup(dev, file_priv, args->handle); | 545 | obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle)); |
568 | if (obj == NULL) { | 546 | if (obj == NULL) { |
569 | ret = -ENOENT; | 547 | ret = -ENOENT; |
570 | goto unlock; | 548 | goto unlock; |
571 | } | 549 | } |
572 | obj_priv = to_intel_bo(obj); | ||
573 | 550 | ||
574 | /* Bounds check source. */ | 551 | /* Bounds check source. */ |
575 | if (args->offset > obj->size || args->size > obj->size - args->offset) { | 552 | if (args->offset > obj->base.size || |
553 | args->size > obj->base.size - args->offset) { | ||
576 | ret = -EINVAL; | 554 | ret = -EINVAL; |
577 | goto out; | 555 | goto out; |
578 | } | 556 | } |
579 | 557 | ||
580 | ret = i915_gem_object_get_pages_or_evict(obj); | ||
581 | if (ret) | ||
582 | goto out; | ||
583 | |||
584 | ret = i915_gem_object_set_cpu_read_domain_range(obj, | 558 | ret = i915_gem_object_set_cpu_read_domain_range(obj, |
585 | args->offset, | 559 | args->offset, |
586 | args->size); | 560 | args->size); |
587 | if (ret) | 561 | if (ret) |
588 | goto out_put; | 562 | goto out; |
589 | 563 | ||
590 | ret = -EFAULT; | 564 | ret = -EFAULT; |
591 | if (!i915_gem_object_needs_bit17_swizzle(obj)) | 565 | if (!i915_gem_object_needs_bit17_swizzle(obj)) |
592 | ret = i915_gem_shmem_pread_fast(dev, obj, args, file_priv); | 566 | ret = i915_gem_shmem_pread_fast(dev, obj, args, file); |
593 | if (ret == -EFAULT) | 567 | if (ret == -EFAULT) |
594 | ret = i915_gem_shmem_pread_slow(dev, obj, args, file_priv); | 568 | ret = i915_gem_shmem_pread_slow(dev, obj, args, file); |
595 | 569 | ||
596 | out_put: | ||
597 | i915_gem_object_put_pages(obj); | ||
598 | out: | 570 | out: |
599 | drm_gem_object_unreference(obj); | 571 | drm_gem_object_unreference(&obj->base); |
600 | unlock: | 572 | unlock: |
601 | mutex_unlock(&dev->struct_mutex); | 573 | mutex_unlock(&dev->struct_mutex); |
602 | return ret; | 574 | return ret; |
@@ -646,32 +618,16 @@ slow_kernel_write(struct io_mapping *mapping, | |||
646 | io_mapping_unmap(dst_vaddr); | 618 | io_mapping_unmap(dst_vaddr); |
647 | } | 619 | } |
648 | 620 | ||
649 | static inline int | ||
650 | fast_shmem_write(struct page **pages, | ||
651 | loff_t page_base, int page_offset, | ||
652 | char __user *data, | ||
653 | int length) | ||
654 | { | ||
655 | char *vaddr; | ||
656 | int ret; | ||
657 | |||
658 | vaddr = kmap_atomic(pages[page_base >> PAGE_SHIFT]); | ||
659 | ret = __copy_from_user_inatomic(vaddr + page_offset, data, length); | ||
660 | kunmap_atomic(vaddr); | ||
661 | |||
662 | return ret; | ||
663 | } | ||
664 | |||
665 | /** | 621 | /** |
666 | * This is the fast pwrite path, where we copy the data directly from the | 622 | * This is the fast pwrite path, where we copy the data directly from the |
667 | * user into the GTT, uncached. | 623 | * user into the GTT, uncached. |
668 | */ | 624 | */ |
669 | static int | 625 | static int |
670 | i915_gem_gtt_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj, | 626 | i915_gem_gtt_pwrite_fast(struct drm_device *dev, |
627 | struct drm_i915_gem_object *obj, | ||
671 | struct drm_i915_gem_pwrite *args, | 628 | struct drm_i915_gem_pwrite *args, |
672 | struct drm_file *file_priv) | 629 | struct drm_file *file) |
673 | { | 630 | { |
674 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); | ||
675 | drm_i915_private_t *dev_priv = dev->dev_private; | 631 | drm_i915_private_t *dev_priv = dev->dev_private; |
676 | ssize_t remain; | 632 | ssize_t remain; |
677 | loff_t offset, page_base; | 633 | loff_t offset, page_base; |
@@ -681,8 +637,7 @@ i915_gem_gtt_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj, | |||
681 | user_data = (char __user *) (uintptr_t) args->data_ptr; | 637 | user_data = (char __user *) (uintptr_t) args->data_ptr; |
682 | remain = args->size; | 638 | remain = args->size; |
683 | 639 | ||
684 | obj_priv = to_intel_bo(obj); | 640 | offset = obj->gtt_offset + args->offset; |
685 | offset = obj_priv->gtt_offset + args->offset; | ||
686 | 641 | ||
687 | while (remain > 0) { | 642 | while (remain > 0) { |
688 | /* Operation in this page | 643 | /* Operation in this page |
@@ -722,11 +677,11 @@ i915_gem_gtt_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj, | |||
722 | * than using i915_gem_gtt_pwrite_fast on a G45 (32-bit). | 677 | * than using i915_gem_gtt_pwrite_fast on a G45 (32-bit). |
723 | */ | 678 | */ |
724 | static int | 679 | static int |
725 | i915_gem_gtt_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj, | 680 | i915_gem_gtt_pwrite_slow(struct drm_device *dev, |
681 | struct drm_i915_gem_object *obj, | ||
726 | struct drm_i915_gem_pwrite *args, | 682 | struct drm_i915_gem_pwrite *args, |
727 | struct drm_file *file_priv) | 683 | struct drm_file *file) |
728 | { | 684 | { |
729 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); | ||
730 | drm_i915_private_t *dev_priv = dev->dev_private; | 685 | drm_i915_private_t *dev_priv = dev->dev_private; |
731 | ssize_t remain; | 686 | ssize_t remain; |
732 | loff_t gtt_page_base, offset; | 687 | loff_t gtt_page_base, offset; |
@@ -763,12 +718,15 @@ i915_gem_gtt_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj, | |||
763 | goto out_unpin_pages; | 718 | goto out_unpin_pages; |
764 | } | 719 | } |
765 | 720 | ||
766 | ret = i915_gem_object_set_to_gtt_domain(obj, 1); | 721 | ret = i915_gem_object_set_to_gtt_domain(obj, true); |
722 | if (ret) | ||
723 | goto out_unpin_pages; | ||
724 | |||
725 | ret = i915_gem_object_put_fence(obj); | ||
767 | if (ret) | 726 | if (ret) |
768 | goto out_unpin_pages; | 727 | goto out_unpin_pages; |
769 | 728 | ||
770 | obj_priv = to_intel_bo(obj); | 729 | offset = obj->gtt_offset + args->offset; |
771 | offset = obj_priv->gtt_offset + args->offset; | ||
772 | 730 | ||
773 | while (remain > 0) { | 731 | while (remain > 0) { |
774 | /* Operation in this page | 732 | /* Operation in this page |
@@ -814,39 +772,58 @@ out_unpin_pages: | |||
814 | * copy_from_user into the kmapped pages backing the object. | 772 | * copy_from_user into the kmapped pages backing the object. |
815 | */ | 773 | */ |
816 | static int | 774 | static int |
817 | i915_gem_shmem_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj, | 775 | i915_gem_shmem_pwrite_fast(struct drm_device *dev, |
776 | struct drm_i915_gem_object *obj, | ||
818 | struct drm_i915_gem_pwrite *args, | 777 | struct drm_i915_gem_pwrite *args, |
819 | struct drm_file *file_priv) | 778 | struct drm_file *file) |
820 | { | 779 | { |
821 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); | 780 | struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping; |
822 | ssize_t remain; | 781 | ssize_t remain; |
823 | loff_t offset, page_base; | 782 | loff_t offset; |
824 | char __user *user_data; | 783 | char __user *user_data; |
825 | int page_offset, page_length; | 784 | int page_offset, page_length; |
826 | 785 | ||
827 | user_data = (char __user *) (uintptr_t) args->data_ptr; | 786 | user_data = (char __user *) (uintptr_t) args->data_ptr; |
828 | remain = args->size; | 787 | remain = args->size; |
829 | 788 | ||
830 | obj_priv = to_intel_bo(obj); | ||
831 | offset = args->offset; | 789 | offset = args->offset; |
832 | obj_priv->dirty = 1; | 790 | obj->dirty = 1; |
833 | 791 | ||
834 | while (remain > 0) { | 792 | while (remain > 0) { |
793 | struct page *page; | ||
794 | char *vaddr; | ||
795 | int ret; | ||
796 | |||
835 | /* Operation in this page | 797 | /* Operation in this page |
836 | * | 798 | * |
837 | * page_base = page offset within aperture | ||
838 | * page_offset = offset within page | 799 | * page_offset = offset within page |
839 | * page_length = bytes to copy for this page | 800 | * page_length = bytes to copy for this page |
840 | */ | 801 | */ |
841 | page_base = (offset & ~(PAGE_SIZE-1)); | ||
842 | page_offset = offset & (PAGE_SIZE-1); | 802 | page_offset = offset & (PAGE_SIZE-1); |
843 | page_length = remain; | 803 | page_length = remain; |
844 | if ((page_offset + remain) > PAGE_SIZE) | 804 | if ((page_offset + remain) > PAGE_SIZE) |
845 | page_length = PAGE_SIZE - page_offset; | 805 | page_length = PAGE_SIZE - page_offset; |
846 | 806 | ||
847 | if (fast_shmem_write(obj_priv->pages, | 807 | page = read_cache_page_gfp(mapping, offset >> PAGE_SHIFT, |
848 | page_base, page_offset, | 808 | GFP_HIGHUSER | __GFP_RECLAIMABLE); |
849 | user_data, page_length)) | 809 | if (IS_ERR(page)) |
810 | return PTR_ERR(page); | ||
811 | |||
812 | vaddr = kmap_atomic(page, KM_USER0); | ||
813 | ret = __copy_from_user_inatomic(vaddr + page_offset, | ||
814 | user_data, | ||
815 | page_length); | ||
816 | kunmap_atomic(vaddr, KM_USER0); | ||
817 | |||
818 | set_page_dirty(page); | ||
819 | mark_page_accessed(page); | ||
820 | page_cache_release(page); | ||
821 | |||
822 | /* If we get a fault while copying data, then (presumably) our | ||
823 | * source page isn't available. Return the error and we'll | ||
824 | * retry in the slow path. | ||
825 | */ | ||
826 | if (ret) | ||
850 | return -EFAULT; | 827 | return -EFAULT; |
851 | 828 | ||
852 | remain -= page_length; | 829 | remain -= page_length; |
@@ -865,17 +842,18 @@ i915_gem_shmem_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj, | |||
865 | * struct_mutex is held. | 842 | * struct_mutex is held. |
866 | */ | 843 | */ |
867 | static int | 844 | static int |
868 | i915_gem_shmem_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj, | 845 | i915_gem_shmem_pwrite_slow(struct drm_device *dev, |
846 | struct drm_i915_gem_object *obj, | ||
869 | struct drm_i915_gem_pwrite *args, | 847 | struct drm_i915_gem_pwrite *args, |
870 | struct drm_file *file_priv) | 848 | struct drm_file *file) |
871 | { | 849 | { |
872 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); | 850 | struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping; |
873 | struct mm_struct *mm = current->mm; | 851 | struct mm_struct *mm = current->mm; |
874 | struct page **user_pages; | 852 | struct page **user_pages; |
875 | ssize_t remain; | 853 | ssize_t remain; |
876 | loff_t offset, pinned_pages, i; | 854 | loff_t offset, pinned_pages, i; |
877 | loff_t first_data_page, last_data_page, num_pages; | 855 | loff_t first_data_page, last_data_page, num_pages; |
878 | int shmem_page_index, shmem_page_offset; | 856 | int shmem_page_offset; |
879 | int data_page_index, data_page_offset; | 857 | int data_page_index, data_page_offset; |
880 | int page_length; | 858 | int page_length; |
881 | int ret; | 859 | int ret; |
@@ -913,20 +891,19 @@ i915_gem_shmem_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj, | |||
913 | 891 | ||
914 | do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj); | 892 | do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj); |
915 | 893 | ||
916 | obj_priv = to_intel_bo(obj); | ||
917 | offset = args->offset; | 894 | offset = args->offset; |
918 | obj_priv->dirty = 1; | 895 | obj->dirty = 1; |
919 | 896 | ||
920 | while (remain > 0) { | 897 | while (remain > 0) { |
898 | struct page *page; | ||
899 | |||
921 | /* Operation in this page | 900 | /* Operation in this page |
922 | * | 901 | * |
923 | * shmem_page_index = page number within shmem file | ||
924 | * shmem_page_offset = offset within page in shmem file | 902 | * shmem_page_offset = offset within page in shmem file |
925 | * data_page_index = page number in get_user_pages return | 903 | * data_page_index = page number in get_user_pages return |
926 | * data_page_offset = offset with data_page_index page. | 904 | * data_page_offset = offset with data_page_index page. |
927 | * page_length = bytes to copy for this page | 905 | * page_length = bytes to copy for this page |
928 | */ | 906 | */ |
929 | shmem_page_index = offset / PAGE_SIZE; | ||
930 | shmem_page_offset = offset & ~PAGE_MASK; | 907 | shmem_page_offset = offset & ~PAGE_MASK; |
931 | data_page_index = data_ptr / PAGE_SIZE - first_data_page; | 908 | data_page_index = data_ptr / PAGE_SIZE - first_data_page; |
932 | data_page_offset = data_ptr & ~PAGE_MASK; | 909 | data_page_offset = data_ptr & ~PAGE_MASK; |
@@ -937,21 +914,32 @@ i915_gem_shmem_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj, | |||
937 | if ((data_page_offset + page_length) > PAGE_SIZE) | 914 | if ((data_page_offset + page_length) > PAGE_SIZE) |
938 | page_length = PAGE_SIZE - data_page_offset; | 915 | page_length = PAGE_SIZE - data_page_offset; |
939 | 916 | ||
917 | page = read_cache_page_gfp(mapping, offset >> PAGE_SHIFT, | ||
918 | GFP_HIGHUSER | __GFP_RECLAIMABLE); | ||
919 | if (IS_ERR(page)) { | ||
920 | ret = PTR_ERR(page); | ||
921 | goto out; | ||
922 | } | ||
923 | |||
940 | if (do_bit17_swizzling) { | 924 | if (do_bit17_swizzling) { |
941 | slow_shmem_bit17_copy(obj_priv->pages[shmem_page_index], | 925 | slow_shmem_bit17_copy(page, |
942 | shmem_page_offset, | 926 | shmem_page_offset, |
943 | user_pages[data_page_index], | 927 | user_pages[data_page_index], |
944 | data_page_offset, | 928 | data_page_offset, |
945 | page_length, | 929 | page_length, |
946 | 0); | 930 | 0); |
947 | } else { | 931 | } else { |
948 | slow_shmem_copy(obj_priv->pages[shmem_page_index], | 932 | slow_shmem_copy(page, |
949 | shmem_page_offset, | 933 | shmem_page_offset, |
950 | user_pages[data_page_index], | 934 | user_pages[data_page_index], |
951 | data_page_offset, | 935 | data_page_offset, |
952 | page_length); | 936 | page_length); |
953 | } | 937 | } |
954 | 938 | ||
939 | set_page_dirty(page); | ||
940 | mark_page_accessed(page); | ||
941 | page_cache_release(page); | ||
942 | |||
955 | remain -= page_length; | 943 | remain -= page_length; |
956 | data_ptr += page_length; | 944 | data_ptr += page_length; |
957 | offset += page_length; | 945 | offset += page_length; |
@@ -975,8 +963,7 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data, | |||
975 | struct drm_file *file) | 963 | struct drm_file *file) |
976 | { | 964 | { |
977 | struct drm_i915_gem_pwrite *args = data; | 965 | struct drm_i915_gem_pwrite *args = data; |
978 | struct drm_gem_object *obj; | 966 | struct drm_i915_gem_object *obj; |
979 | struct drm_i915_gem_object *obj_priv; | ||
980 | int ret; | 967 | int ret; |
981 | 968 | ||
982 | if (args->size == 0) | 969 | if (args->size == 0) |
@@ -996,15 +983,15 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data, | |||
996 | if (ret) | 983 | if (ret) |
997 | return ret; | 984 | return ret; |
998 | 985 | ||
999 | obj = drm_gem_object_lookup(dev, file, args->handle); | 986 | obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle)); |
1000 | if (obj == NULL) { | 987 | if (obj == NULL) { |
1001 | ret = -ENOENT; | 988 | ret = -ENOENT; |
1002 | goto unlock; | 989 | goto unlock; |
1003 | } | 990 | } |
1004 | obj_priv = to_intel_bo(obj); | ||
1005 | 991 | ||
1006 | /* Bounds check destination. */ | 992 | /* Bounds check destination. */ |
1007 | if (args->offset > obj->size || args->size > obj->size - args->offset) { | 993 | if (args->offset > obj->base.size || |
994 | args->size > obj->base.size - args->offset) { | ||
1008 | ret = -EINVAL; | 995 | ret = -EINVAL; |
1009 | goto out; | 996 | goto out; |
1010 | } | 997 | } |
@@ -1015,16 +1002,19 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data, | |||
1015 | * pread/pwrite currently are reading and writing from the CPU | 1002 | * pread/pwrite currently are reading and writing from the CPU |
1016 | * perspective, requiring manual detiling by the client. | 1003 | * perspective, requiring manual detiling by the client. |
1017 | */ | 1004 | */ |
1018 | if (obj_priv->phys_obj) | 1005 | if (obj->phys_obj) |
1019 | ret = i915_gem_phys_pwrite(dev, obj, args, file); | 1006 | ret = i915_gem_phys_pwrite(dev, obj, args, file); |
1020 | else if (obj_priv->tiling_mode == I915_TILING_NONE && | 1007 | else if (obj->gtt_space && |
1021 | obj_priv->gtt_space && | 1008 | obj->base.write_domain != I915_GEM_DOMAIN_CPU) { |
1022 | obj->write_domain != I915_GEM_DOMAIN_CPU) { | 1009 | ret = i915_gem_object_pin(obj, 0, true); |
1023 | ret = i915_gem_object_pin(obj, 0); | ||
1024 | if (ret) | 1010 | if (ret) |
1025 | goto out; | 1011 | goto out; |
1026 | 1012 | ||
1027 | ret = i915_gem_object_set_to_gtt_domain(obj, 1); | 1013 | ret = i915_gem_object_set_to_gtt_domain(obj, true); |
1014 | if (ret) | ||
1015 | goto out_unpin; | ||
1016 | |||
1017 | ret = i915_gem_object_put_fence(obj); | ||
1028 | if (ret) | 1018 | if (ret) |
1029 | goto out_unpin; | 1019 | goto out_unpin; |
1030 | 1020 | ||
@@ -1035,26 +1025,19 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data, | |||
1035 | out_unpin: | 1025 | out_unpin: |
1036 | i915_gem_object_unpin(obj); | 1026 | i915_gem_object_unpin(obj); |
1037 | } else { | 1027 | } else { |
1038 | ret = i915_gem_object_get_pages_or_evict(obj); | ||
1039 | if (ret) | ||
1040 | goto out; | ||
1041 | |||
1042 | ret = i915_gem_object_set_to_cpu_domain(obj, 1); | 1028 | ret = i915_gem_object_set_to_cpu_domain(obj, 1); |
1043 | if (ret) | 1029 | if (ret) |
1044 | goto out_put; | 1030 | goto out; |
1045 | 1031 | ||
1046 | ret = -EFAULT; | 1032 | ret = -EFAULT; |
1047 | if (!i915_gem_object_needs_bit17_swizzle(obj)) | 1033 | if (!i915_gem_object_needs_bit17_swizzle(obj)) |
1048 | ret = i915_gem_shmem_pwrite_fast(dev, obj, args, file); | 1034 | ret = i915_gem_shmem_pwrite_fast(dev, obj, args, file); |
1049 | if (ret == -EFAULT) | 1035 | if (ret == -EFAULT) |
1050 | ret = i915_gem_shmem_pwrite_slow(dev, obj, args, file); | 1036 | ret = i915_gem_shmem_pwrite_slow(dev, obj, args, file); |
1051 | |||
1052 | out_put: | ||
1053 | i915_gem_object_put_pages(obj); | ||
1054 | } | 1037 | } |
1055 | 1038 | ||
1056 | out: | 1039 | out: |
1057 | drm_gem_object_unreference(obj); | 1040 | drm_gem_object_unreference(&obj->base); |
1058 | unlock: | 1041 | unlock: |
1059 | mutex_unlock(&dev->struct_mutex); | 1042 | mutex_unlock(&dev->struct_mutex); |
1060 | return ret; | 1043 | return ret; |
@@ -1066,12 +1049,10 @@ unlock: | |||
1066 | */ | 1049 | */ |
1067 | int | 1050 | int |
1068 | i915_gem_set_domain_ioctl(struct drm_device *dev, void *data, | 1051 | i915_gem_set_domain_ioctl(struct drm_device *dev, void *data, |
1069 | struct drm_file *file_priv) | 1052 | struct drm_file *file) |
1070 | { | 1053 | { |
1071 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
1072 | struct drm_i915_gem_set_domain *args = data; | 1054 | struct drm_i915_gem_set_domain *args = data; |
1073 | struct drm_gem_object *obj; | 1055 | struct drm_i915_gem_object *obj; |
1074 | struct drm_i915_gem_object *obj_priv; | ||
1075 | uint32_t read_domains = args->read_domains; | 1056 | uint32_t read_domains = args->read_domains; |
1076 | uint32_t write_domain = args->write_domain; | 1057 | uint32_t write_domain = args->write_domain; |
1077 | int ret; | 1058 | int ret; |
@@ -1096,28 +1077,15 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data, | |||
1096 | if (ret) | 1077 | if (ret) |
1097 | return ret; | 1078 | return ret; |
1098 | 1079 | ||
1099 | obj = drm_gem_object_lookup(dev, file_priv, args->handle); | 1080 | obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle)); |
1100 | if (obj == NULL) { | 1081 | if (obj == NULL) { |
1101 | ret = -ENOENT; | 1082 | ret = -ENOENT; |
1102 | goto unlock; | 1083 | goto unlock; |
1103 | } | 1084 | } |
1104 | obj_priv = to_intel_bo(obj); | ||
1105 | |||
1106 | intel_mark_busy(dev, obj); | ||
1107 | 1085 | ||
1108 | if (read_domains & I915_GEM_DOMAIN_GTT) { | 1086 | if (read_domains & I915_GEM_DOMAIN_GTT) { |
1109 | ret = i915_gem_object_set_to_gtt_domain(obj, write_domain != 0); | 1087 | ret = i915_gem_object_set_to_gtt_domain(obj, write_domain != 0); |
1110 | 1088 | ||
1111 | /* Update the LRU on the fence for the CPU access that's | ||
1112 | * about to occur. | ||
1113 | */ | ||
1114 | if (obj_priv->fence_reg != I915_FENCE_REG_NONE) { | ||
1115 | struct drm_i915_fence_reg *reg = | ||
1116 | &dev_priv->fence_regs[obj_priv->fence_reg]; | ||
1117 | list_move_tail(®->lru_list, | ||
1118 | &dev_priv->mm.fence_list); | ||
1119 | } | ||
1120 | |||
1121 | /* Silently promote "you're not bound, there was nothing to do" | 1089 | /* Silently promote "you're not bound, there was nothing to do" |
1122 | * to success, since the client was just asking us to | 1090 | * to success, since the client was just asking us to |
1123 | * make sure everything was done. | 1091 | * make sure everything was done. |
@@ -1128,11 +1096,7 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data, | |||
1128 | ret = i915_gem_object_set_to_cpu_domain(obj, write_domain != 0); | 1096 | ret = i915_gem_object_set_to_cpu_domain(obj, write_domain != 0); |
1129 | } | 1097 | } |
1130 | 1098 | ||
1131 | /* Maintain LRU order of "inactive" objects */ | 1099 | drm_gem_object_unreference(&obj->base); |
1132 | if (ret == 0 && i915_gem_object_is_inactive(obj_priv)) | ||
1133 | list_move_tail(&obj_priv->mm_list, &dev_priv->mm.inactive_list); | ||
1134 | |||
1135 | drm_gem_object_unreference(obj); | ||
1136 | unlock: | 1100 | unlock: |
1137 | mutex_unlock(&dev->struct_mutex); | 1101 | mutex_unlock(&dev->struct_mutex); |
1138 | return ret; | 1102 | return ret; |
@@ -1143,10 +1107,10 @@ unlock: | |||
1143 | */ | 1107 | */ |
1144 | int | 1108 | int |
1145 | i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data, | 1109 | i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data, |
1146 | struct drm_file *file_priv) | 1110 | struct drm_file *file) |
1147 | { | 1111 | { |
1148 | struct drm_i915_gem_sw_finish *args = data; | 1112 | struct drm_i915_gem_sw_finish *args = data; |
1149 | struct drm_gem_object *obj; | 1113 | struct drm_i915_gem_object *obj; |
1150 | int ret = 0; | 1114 | int ret = 0; |
1151 | 1115 | ||
1152 | if (!(dev->driver->driver_features & DRIVER_GEM)) | 1116 | if (!(dev->driver->driver_features & DRIVER_GEM)) |
@@ -1156,17 +1120,17 @@ i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data, | |||
1156 | if (ret) | 1120 | if (ret) |
1157 | return ret; | 1121 | return ret; |
1158 | 1122 | ||
1159 | obj = drm_gem_object_lookup(dev, file_priv, args->handle); | 1123 | obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle)); |
1160 | if (obj == NULL) { | 1124 | if (obj == NULL) { |
1161 | ret = -ENOENT; | 1125 | ret = -ENOENT; |
1162 | goto unlock; | 1126 | goto unlock; |
1163 | } | 1127 | } |
1164 | 1128 | ||
1165 | /* Pinned buffers may be scanout, so flush the cache */ | 1129 | /* Pinned buffers may be scanout, so flush the cache */ |
1166 | if (to_intel_bo(obj)->pin_count) | 1130 | if (obj->pin_count) |
1167 | i915_gem_object_flush_cpu_write_domain(obj); | 1131 | i915_gem_object_flush_cpu_write_domain(obj); |
1168 | 1132 | ||
1169 | drm_gem_object_unreference(obj); | 1133 | drm_gem_object_unreference(&obj->base); |
1170 | unlock: | 1134 | unlock: |
1171 | mutex_unlock(&dev->struct_mutex); | 1135 | mutex_unlock(&dev->struct_mutex); |
1172 | return ret; | 1136 | return ret; |
@@ -1181,8 +1145,9 @@ unlock: | |||
1181 | */ | 1145 | */ |
1182 | int | 1146 | int |
1183 | i915_gem_mmap_ioctl(struct drm_device *dev, void *data, | 1147 | i915_gem_mmap_ioctl(struct drm_device *dev, void *data, |
1184 | struct drm_file *file_priv) | 1148 | struct drm_file *file) |
1185 | { | 1149 | { |
1150 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
1186 | struct drm_i915_gem_mmap *args = data; | 1151 | struct drm_i915_gem_mmap *args = data; |
1187 | struct drm_gem_object *obj; | 1152 | struct drm_gem_object *obj; |
1188 | loff_t offset; | 1153 | loff_t offset; |
@@ -1191,10 +1156,15 @@ i915_gem_mmap_ioctl(struct drm_device *dev, void *data, | |||
1191 | if (!(dev->driver->driver_features & DRIVER_GEM)) | 1156 | if (!(dev->driver->driver_features & DRIVER_GEM)) |
1192 | return -ENODEV; | 1157 | return -ENODEV; |
1193 | 1158 | ||
1194 | obj = drm_gem_object_lookup(dev, file_priv, args->handle); | 1159 | obj = drm_gem_object_lookup(dev, file, args->handle); |
1195 | if (obj == NULL) | 1160 | if (obj == NULL) |
1196 | return -ENOENT; | 1161 | return -ENOENT; |
1197 | 1162 | ||
1163 | if (obj->size > dev_priv->mm.gtt_mappable_end) { | ||
1164 | drm_gem_object_unreference_unlocked(obj); | ||
1165 | return -E2BIG; | ||
1166 | } | ||
1167 | |||
1198 | offset = args->offset; | 1168 | offset = args->offset; |
1199 | 1169 | ||
1200 | down_write(¤t->mm->mmap_sem); | 1170 | down_write(¤t->mm->mmap_sem); |
@@ -1229,10 +1199,9 @@ i915_gem_mmap_ioctl(struct drm_device *dev, void *data, | |||
1229 | */ | 1199 | */ |
1230 | int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf) | 1200 | int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf) |
1231 | { | 1201 | { |
1232 | struct drm_gem_object *obj = vma->vm_private_data; | 1202 | struct drm_i915_gem_object *obj = to_intel_bo(vma->vm_private_data); |
1233 | struct drm_device *dev = obj->dev; | 1203 | struct drm_device *dev = obj->base.dev; |
1234 | drm_i915_private_t *dev_priv = dev->dev_private; | 1204 | drm_i915_private_t *dev_priv = dev->dev_private; |
1235 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); | ||
1236 | pgoff_t page_offset; | 1205 | pgoff_t page_offset; |
1237 | unsigned long pfn; | 1206 | unsigned long pfn; |
1238 | int ret = 0; | 1207 | int ret = 0; |
@@ -1244,27 +1213,35 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf) | |||
1244 | 1213 | ||
1245 | /* Now bind it into the GTT if needed */ | 1214 | /* Now bind it into the GTT if needed */ |
1246 | mutex_lock(&dev->struct_mutex); | 1215 | mutex_lock(&dev->struct_mutex); |
1247 | if (!obj_priv->gtt_space) { | ||
1248 | ret = i915_gem_object_bind_to_gtt(obj, 0); | ||
1249 | if (ret) | ||
1250 | goto unlock; | ||
1251 | 1216 | ||
1252 | ret = i915_gem_object_set_to_gtt_domain(obj, write); | 1217 | if (!obj->map_and_fenceable) { |
1218 | ret = i915_gem_object_unbind(obj); | ||
1253 | if (ret) | 1219 | if (ret) |
1254 | goto unlock; | 1220 | goto unlock; |
1255 | } | 1221 | } |
1256 | 1222 | if (!obj->gtt_space) { | |
1257 | /* Need a new fence register? */ | 1223 | ret = i915_gem_object_bind_to_gtt(obj, 0, true); |
1258 | if (obj_priv->tiling_mode != I915_TILING_NONE) { | ||
1259 | ret = i915_gem_object_get_fence_reg(obj, true); | ||
1260 | if (ret) | 1224 | if (ret) |
1261 | goto unlock; | 1225 | goto unlock; |
1262 | } | 1226 | } |
1263 | 1227 | ||
1264 | if (i915_gem_object_is_inactive(obj_priv)) | 1228 | ret = i915_gem_object_set_to_gtt_domain(obj, write); |
1265 | list_move_tail(&obj_priv->mm_list, &dev_priv->mm.inactive_list); | 1229 | if (ret) |
1230 | goto unlock; | ||
1231 | |||
1232 | if (obj->tiling_mode == I915_TILING_NONE) | ||
1233 | ret = i915_gem_object_put_fence(obj); | ||
1234 | else | ||
1235 | ret = i915_gem_object_get_fence(obj, NULL, true); | ||
1236 | if (ret) | ||
1237 | goto unlock; | ||
1238 | |||
1239 | if (i915_gem_object_is_inactive(obj)) | ||
1240 | list_move_tail(&obj->mm_list, &dev_priv->mm.inactive_list); | ||
1266 | 1241 | ||
1267 | pfn = ((dev->agp->base + obj_priv->gtt_offset) >> PAGE_SHIFT) + | 1242 | obj->fault_mappable = true; |
1243 | |||
1244 | pfn = ((dev->agp->base + obj->gtt_offset) >> PAGE_SHIFT) + | ||
1268 | page_offset; | 1245 | page_offset; |
1269 | 1246 | ||
1270 | /* Finally, remap it using the new GTT offset */ | 1247 | /* Finally, remap it using the new GTT offset */ |
@@ -1273,11 +1250,12 @@ unlock: | |||
1273 | mutex_unlock(&dev->struct_mutex); | 1250 | mutex_unlock(&dev->struct_mutex); |
1274 | 1251 | ||
1275 | switch (ret) { | 1252 | switch (ret) { |
1253 | case -EAGAIN: | ||
1254 | set_need_resched(); | ||
1276 | case 0: | 1255 | case 0: |
1277 | case -ERESTARTSYS: | 1256 | case -ERESTARTSYS: |
1278 | return VM_FAULT_NOPAGE; | 1257 | return VM_FAULT_NOPAGE; |
1279 | case -ENOMEM: | 1258 | case -ENOMEM: |
1280 | case -EAGAIN: | ||
1281 | return VM_FAULT_OOM; | 1259 | return VM_FAULT_OOM; |
1282 | default: | 1260 | default: |
1283 | return VM_FAULT_SIGBUS; | 1261 | return VM_FAULT_SIGBUS; |
@@ -1296,37 +1274,39 @@ unlock: | |||
1296 | * This routine allocates and attaches a fake offset for @obj. | 1274 | * This routine allocates and attaches a fake offset for @obj. |
1297 | */ | 1275 | */ |
1298 | static int | 1276 | static int |
1299 | i915_gem_create_mmap_offset(struct drm_gem_object *obj) | 1277 | i915_gem_create_mmap_offset(struct drm_i915_gem_object *obj) |
1300 | { | 1278 | { |
1301 | struct drm_device *dev = obj->dev; | 1279 | struct drm_device *dev = obj->base.dev; |
1302 | struct drm_gem_mm *mm = dev->mm_private; | 1280 | struct drm_gem_mm *mm = dev->mm_private; |
1303 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); | ||
1304 | struct drm_map_list *list; | 1281 | struct drm_map_list *list; |
1305 | struct drm_local_map *map; | 1282 | struct drm_local_map *map; |
1306 | int ret = 0; | 1283 | int ret = 0; |
1307 | 1284 | ||
1308 | /* Set the object up for mmap'ing */ | 1285 | /* Set the object up for mmap'ing */ |
1309 | list = &obj->map_list; | 1286 | list = &obj->base.map_list; |
1310 | list->map = kzalloc(sizeof(struct drm_map_list), GFP_KERNEL); | 1287 | list->map = kzalloc(sizeof(struct drm_map_list), GFP_KERNEL); |
1311 | if (!list->map) | 1288 | if (!list->map) |
1312 | return -ENOMEM; | 1289 | return -ENOMEM; |
1313 | 1290 | ||
1314 | map = list->map; | 1291 | map = list->map; |
1315 | map->type = _DRM_GEM; | 1292 | map->type = _DRM_GEM; |
1316 | map->size = obj->size; | 1293 | map->size = obj->base.size; |
1317 | map->handle = obj; | 1294 | map->handle = obj; |
1318 | 1295 | ||
1319 | /* Get a DRM GEM mmap offset allocated... */ | 1296 | /* Get a DRM GEM mmap offset allocated... */ |
1320 | list->file_offset_node = drm_mm_search_free(&mm->offset_manager, | 1297 | list->file_offset_node = drm_mm_search_free(&mm->offset_manager, |
1321 | obj->size / PAGE_SIZE, 0, 0); | 1298 | obj->base.size / PAGE_SIZE, |
1299 | 0, 0); | ||
1322 | if (!list->file_offset_node) { | 1300 | if (!list->file_offset_node) { |
1323 | DRM_ERROR("failed to allocate offset for bo %d\n", obj->name); | 1301 | DRM_ERROR("failed to allocate offset for bo %d\n", |
1302 | obj->base.name); | ||
1324 | ret = -ENOSPC; | 1303 | ret = -ENOSPC; |
1325 | goto out_free_list; | 1304 | goto out_free_list; |
1326 | } | 1305 | } |
1327 | 1306 | ||
1328 | list->file_offset_node = drm_mm_get_block(list->file_offset_node, | 1307 | list->file_offset_node = drm_mm_get_block(list->file_offset_node, |
1329 | obj->size / PAGE_SIZE, 0); | 1308 | obj->base.size / PAGE_SIZE, |
1309 | 0); | ||
1330 | if (!list->file_offset_node) { | 1310 | if (!list->file_offset_node) { |
1331 | ret = -ENOMEM; | 1311 | ret = -ENOMEM; |
1332 | goto out_free_list; | 1312 | goto out_free_list; |
@@ -1339,16 +1319,13 @@ i915_gem_create_mmap_offset(struct drm_gem_object *obj) | |||
1339 | goto out_free_mm; | 1319 | goto out_free_mm; |
1340 | } | 1320 | } |
1341 | 1321 | ||
1342 | /* By now we should be all set, any drm_mmap request on the offset | ||
1343 | * below will get to our mmap & fault handler */ | ||
1344 | obj_priv->mmap_offset = ((uint64_t) list->hash.key) << PAGE_SHIFT; | ||
1345 | |||
1346 | return 0; | 1322 | return 0; |
1347 | 1323 | ||
1348 | out_free_mm: | 1324 | out_free_mm: |
1349 | drm_mm_put_block(list->file_offset_node); | 1325 | drm_mm_put_block(list->file_offset_node); |
1350 | out_free_list: | 1326 | out_free_list: |
1351 | kfree(list->map); | 1327 | kfree(list->map); |
1328 | list->map = NULL; | ||
1352 | 1329 | ||
1353 | return ret; | 1330 | return ret; |
1354 | } | 1331 | } |
@@ -1368,38 +1345,51 @@ out_free_list: | |||
1368 | * fixup by i915_gem_fault(). | 1345 | * fixup by i915_gem_fault(). |
1369 | */ | 1346 | */ |
1370 | void | 1347 | void |
1371 | i915_gem_release_mmap(struct drm_gem_object *obj) | 1348 | i915_gem_release_mmap(struct drm_i915_gem_object *obj) |
1372 | { | 1349 | { |
1373 | struct drm_device *dev = obj->dev; | 1350 | if (!obj->fault_mappable) |
1374 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); | 1351 | return; |
1352 | |||
1353 | unmap_mapping_range(obj->base.dev->dev_mapping, | ||
1354 | (loff_t)obj->base.map_list.hash.key<<PAGE_SHIFT, | ||
1355 | obj->base.size, 1); | ||
1375 | 1356 | ||
1376 | if (dev->dev_mapping) | 1357 | obj->fault_mappable = false; |
1377 | unmap_mapping_range(dev->dev_mapping, | ||
1378 | obj_priv->mmap_offset, obj->size, 1); | ||
1379 | } | 1358 | } |
1380 | 1359 | ||
1381 | static void | 1360 | static void |
1382 | i915_gem_free_mmap_offset(struct drm_gem_object *obj) | 1361 | i915_gem_free_mmap_offset(struct drm_i915_gem_object *obj) |
1383 | { | 1362 | { |
1384 | struct drm_device *dev = obj->dev; | 1363 | struct drm_device *dev = obj->base.dev; |
1385 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); | ||
1386 | struct drm_gem_mm *mm = dev->mm_private; | 1364 | struct drm_gem_mm *mm = dev->mm_private; |
1387 | struct drm_map_list *list; | 1365 | struct drm_map_list *list = &obj->base.map_list; |
1388 | 1366 | ||
1389 | list = &obj->map_list; | ||
1390 | drm_ht_remove_item(&mm->offset_hash, &list->hash); | 1367 | drm_ht_remove_item(&mm->offset_hash, &list->hash); |
1368 | drm_mm_put_block(list->file_offset_node); | ||
1369 | kfree(list->map); | ||
1370 | list->map = NULL; | ||
1371 | } | ||
1391 | 1372 | ||
1392 | if (list->file_offset_node) { | 1373 | static uint32_t |
1393 | drm_mm_put_block(list->file_offset_node); | 1374 | i915_gem_get_gtt_size(struct drm_i915_gem_object *obj) |
1394 | list->file_offset_node = NULL; | 1375 | { |
1395 | } | 1376 | struct drm_device *dev = obj->base.dev; |
1377 | uint32_t size; | ||
1396 | 1378 | ||
1397 | if (list->map) { | 1379 | if (INTEL_INFO(dev)->gen >= 4 || |
1398 | kfree(list->map); | 1380 | obj->tiling_mode == I915_TILING_NONE) |
1399 | list->map = NULL; | 1381 | return obj->base.size; |
1400 | } | ||
1401 | 1382 | ||
1402 | obj_priv->mmap_offset = 0; | 1383 | /* Previous chips need a power-of-two fence region when tiling */ |
1384 | if (INTEL_INFO(dev)->gen == 3) | ||
1385 | size = 1024*1024; | ||
1386 | else | ||
1387 | size = 512*1024; | ||
1388 | |||
1389 | while (size < obj->base.size) | ||
1390 | size <<= 1; | ||
1391 | |||
1392 | return size; | ||
1403 | } | 1393 | } |
1404 | 1394 | ||
1405 | /** | 1395 | /** |
@@ -1407,59 +1397,71 @@ i915_gem_free_mmap_offset(struct drm_gem_object *obj) | |||
1407 | * @obj: object to check | 1397 | * @obj: object to check |
1408 | * | 1398 | * |
1409 | * Return the required GTT alignment for an object, taking into account | 1399 | * Return the required GTT alignment for an object, taking into account |
1410 | * potential fence register mapping if needed. | 1400 | * potential fence register mapping. |
1411 | */ | 1401 | */ |
1412 | static uint32_t | 1402 | static uint32_t |
1413 | i915_gem_get_gtt_alignment(struct drm_gem_object *obj) | 1403 | i915_gem_get_gtt_alignment(struct drm_i915_gem_object *obj) |
1414 | { | 1404 | { |
1415 | struct drm_device *dev = obj->dev; | 1405 | struct drm_device *dev = obj->base.dev; |
1416 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); | ||
1417 | int start, i; | ||
1418 | 1406 | ||
1419 | /* | 1407 | /* |
1420 | * Minimum alignment is 4k (GTT page size), but might be greater | 1408 | * Minimum alignment is 4k (GTT page size), but might be greater |
1421 | * if a fence register is needed for the object. | 1409 | * if a fence register is needed for the object. |
1422 | */ | 1410 | */ |
1423 | if (INTEL_INFO(dev)->gen >= 4 || obj_priv->tiling_mode == I915_TILING_NONE) | 1411 | if (INTEL_INFO(dev)->gen >= 4 || |
1412 | obj->tiling_mode == I915_TILING_NONE) | ||
1424 | return 4096; | 1413 | return 4096; |
1425 | 1414 | ||
1426 | /* | 1415 | /* |
1427 | * Previous chips need to be aligned to the size of the smallest | 1416 | * Previous chips need to be aligned to the size of the smallest |
1428 | * fence register that can contain the object. | 1417 | * fence register that can contain the object. |
1429 | */ | 1418 | */ |
1430 | if (INTEL_INFO(dev)->gen == 3) | 1419 | return i915_gem_get_gtt_size(obj); |
1431 | start = 1024*1024; | ||
1432 | else | ||
1433 | start = 512*1024; | ||
1434 | |||
1435 | for (i = start; i < obj->size; i <<= 1) | ||
1436 | ; | ||
1437 | |||
1438 | return i; | ||
1439 | } | 1420 | } |
1440 | 1421 | ||
1441 | /** | 1422 | /** |
1442 | * i915_gem_mmap_gtt_ioctl - prepare an object for GTT mmap'ing | 1423 | * i915_gem_get_unfenced_gtt_alignment - return required GTT alignment for an |
1443 | * @dev: DRM device | 1424 | * unfenced object |
1444 | * @data: GTT mapping ioctl data | 1425 | * @obj: object to check |
1445 | * @file_priv: GEM object info | ||
1446 | * | ||
1447 | * Simply returns the fake offset to userspace so it can mmap it. | ||
1448 | * The mmap call will end up in drm_gem_mmap(), which will set things | ||
1449 | * up so we can get faults in the handler above. | ||
1450 | * | 1426 | * |
1451 | * The fault handler will take care of binding the object into the GTT | 1427 | * Return the required GTT alignment for an object, only taking into account |
1452 | * (since it may have been evicted to make room for something), allocating | 1428 | * unfenced tiled surface requirements. |
1453 | * a fence register, and mapping the appropriate aperture address into | ||
1454 | * userspace. | ||
1455 | */ | 1429 | */ |
1430 | static uint32_t | ||
1431 | i915_gem_get_unfenced_gtt_alignment(struct drm_i915_gem_object *obj) | ||
1432 | { | ||
1433 | struct drm_device *dev = obj->base.dev; | ||
1434 | int tile_height; | ||
1435 | |||
1436 | /* | ||
1437 | * Minimum alignment is 4k (GTT page size) for sane hw. | ||
1438 | */ | ||
1439 | if (INTEL_INFO(dev)->gen >= 4 || IS_G33(dev) || | ||
1440 | obj->tiling_mode == I915_TILING_NONE) | ||
1441 | return 4096; | ||
1442 | |||
1443 | /* | ||
1444 | * Older chips need unfenced tiled buffers to be aligned to the left | ||
1445 | * edge of an even tile row (where tile rows are counted as if the bo is | ||
1446 | * placed in a fenced gtt region). | ||
1447 | */ | ||
1448 | if (IS_GEN2(dev) || | ||
1449 | (obj->tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev))) | ||
1450 | tile_height = 32; | ||
1451 | else | ||
1452 | tile_height = 8; | ||
1453 | |||
1454 | return tile_height * obj->stride * 2; | ||
1455 | } | ||
1456 | |||
1456 | int | 1457 | int |
1457 | i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data, | 1458 | i915_gem_mmap_gtt(struct drm_file *file, |
1458 | struct drm_file *file_priv) | 1459 | struct drm_device *dev, |
1460 | uint32_t handle, | ||
1461 | uint64_t *offset) | ||
1459 | { | 1462 | { |
1460 | struct drm_i915_gem_mmap_gtt *args = data; | 1463 | struct drm_i915_private *dev_priv = dev->dev_private; |
1461 | struct drm_gem_object *obj; | 1464 | struct drm_i915_gem_object *obj; |
1462 | struct drm_i915_gem_object *obj_priv; | ||
1463 | int ret; | 1465 | int ret; |
1464 | 1466 | ||
1465 | if (!(dev->driver->driver_features & DRIVER_GEM)) | 1467 | if (!(dev->driver->driver_features & DRIVER_GEM)) |
@@ -1469,130 +1471,224 @@ i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data, | |||
1469 | if (ret) | 1471 | if (ret) |
1470 | return ret; | 1472 | return ret; |
1471 | 1473 | ||
1472 | obj = drm_gem_object_lookup(dev, file_priv, args->handle); | 1474 | obj = to_intel_bo(drm_gem_object_lookup(dev, file, handle)); |
1473 | if (obj == NULL) { | 1475 | if (obj == NULL) { |
1474 | ret = -ENOENT; | 1476 | ret = -ENOENT; |
1475 | goto unlock; | 1477 | goto unlock; |
1476 | } | 1478 | } |
1477 | obj_priv = to_intel_bo(obj); | ||
1478 | 1479 | ||
1479 | if (obj_priv->madv != I915_MADV_WILLNEED) { | 1480 | if (obj->base.size > dev_priv->mm.gtt_mappable_end) { |
1481 | ret = -E2BIG; | ||
1482 | goto unlock; | ||
1483 | } | ||
1484 | |||
1485 | if (obj->madv != I915_MADV_WILLNEED) { | ||
1480 | DRM_ERROR("Attempting to mmap a purgeable buffer\n"); | 1486 | DRM_ERROR("Attempting to mmap a purgeable buffer\n"); |
1481 | ret = -EINVAL; | 1487 | ret = -EINVAL; |
1482 | goto out; | 1488 | goto out; |
1483 | } | 1489 | } |
1484 | 1490 | ||
1485 | if (!obj_priv->mmap_offset) { | 1491 | if (!obj->base.map_list.map) { |
1486 | ret = i915_gem_create_mmap_offset(obj); | 1492 | ret = i915_gem_create_mmap_offset(obj); |
1487 | if (ret) | 1493 | if (ret) |
1488 | goto out; | 1494 | goto out; |
1489 | } | 1495 | } |
1490 | 1496 | ||
1491 | args->offset = obj_priv->mmap_offset; | 1497 | *offset = (u64)obj->base.map_list.hash.key << PAGE_SHIFT; |
1492 | |||
1493 | /* | ||
1494 | * Pull it into the GTT so that we have a page list (makes the | ||
1495 | * initial fault faster and any subsequent flushing possible). | ||
1496 | */ | ||
1497 | if (!obj_priv->agp_mem) { | ||
1498 | ret = i915_gem_object_bind_to_gtt(obj, 0); | ||
1499 | if (ret) | ||
1500 | goto out; | ||
1501 | } | ||
1502 | 1498 | ||
1503 | out: | 1499 | out: |
1504 | drm_gem_object_unreference(obj); | 1500 | drm_gem_object_unreference(&obj->base); |
1505 | unlock: | 1501 | unlock: |
1506 | mutex_unlock(&dev->struct_mutex); | 1502 | mutex_unlock(&dev->struct_mutex); |
1507 | return ret; | 1503 | return ret; |
1508 | } | 1504 | } |
1509 | 1505 | ||
1510 | static void | 1506 | /** |
1511 | i915_gem_object_put_pages(struct drm_gem_object *obj) | 1507 | * i915_gem_mmap_gtt_ioctl - prepare an object for GTT mmap'ing |
1508 | * @dev: DRM device | ||
1509 | * @data: GTT mapping ioctl data | ||
1510 | * @file: GEM object info | ||
1511 | * | ||
1512 | * Simply returns the fake offset to userspace so it can mmap it. | ||
1513 | * The mmap call will end up in drm_gem_mmap(), which will set things | ||
1514 | * up so we can get faults in the handler above. | ||
1515 | * | ||
1516 | * The fault handler will take care of binding the object into the GTT | ||
1517 | * (since it may have been evicted to make room for something), allocating | ||
1518 | * a fence register, and mapping the appropriate aperture address into | ||
1519 | * userspace. | ||
1520 | */ | ||
1521 | int | ||
1522 | i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data, | ||
1523 | struct drm_file *file) | ||
1512 | { | 1524 | { |
1513 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); | 1525 | struct drm_i915_gem_mmap_gtt *args = data; |
1514 | int page_count = obj->size / PAGE_SIZE; | ||
1515 | int i; | ||
1516 | 1526 | ||
1517 | BUG_ON(obj_priv->pages_refcount == 0); | 1527 | if (!(dev->driver->driver_features & DRIVER_GEM)) |
1518 | BUG_ON(obj_priv->madv == __I915_MADV_PURGED); | 1528 | return -ENODEV; |
1519 | 1529 | ||
1520 | if (--obj_priv->pages_refcount != 0) | 1530 | return i915_gem_mmap_gtt(file, dev, args->handle, &args->offset); |
1521 | return; | 1531 | } |
1522 | 1532 | ||
1523 | if (obj_priv->tiling_mode != I915_TILING_NONE) | ||
1524 | i915_gem_object_save_bit_17_swizzle(obj); | ||
1525 | 1533 | ||
1526 | if (obj_priv->madv == I915_MADV_DONTNEED) | 1534 | static int |
1527 | obj_priv->dirty = 0; | 1535 | i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj, |
1536 | gfp_t gfpmask) | ||
1537 | { | ||
1538 | int page_count, i; | ||
1539 | struct address_space *mapping; | ||
1540 | struct inode *inode; | ||
1541 | struct page *page; | ||
1528 | 1542 | ||
1529 | for (i = 0; i < page_count; i++) { | 1543 | /* Get the list of pages out of our struct file. They'll be pinned |
1530 | if (obj_priv->dirty) | 1544 | * at this point until we release them. |
1531 | set_page_dirty(obj_priv->pages[i]); | 1545 | */ |
1546 | page_count = obj->base.size / PAGE_SIZE; | ||
1547 | BUG_ON(obj->pages != NULL); | ||
1548 | obj->pages = drm_malloc_ab(page_count, sizeof(struct page *)); | ||
1549 | if (obj->pages == NULL) | ||
1550 | return -ENOMEM; | ||
1532 | 1551 | ||
1533 | if (obj_priv->madv == I915_MADV_WILLNEED) | 1552 | inode = obj->base.filp->f_path.dentry->d_inode; |
1534 | mark_page_accessed(obj_priv->pages[i]); | 1553 | mapping = inode->i_mapping; |
1554 | for (i = 0; i < page_count; i++) { | ||
1555 | page = read_cache_page_gfp(mapping, i, | ||
1556 | GFP_HIGHUSER | | ||
1557 | __GFP_COLD | | ||
1558 | __GFP_RECLAIMABLE | | ||
1559 | gfpmask); | ||
1560 | if (IS_ERR(page)) | ||
1561 | goto err_pages; | ||
1535 | 1562 | ||
1536 | page_cache_release(obj_priv->pages[i]); | 1563 | obj->pages[i] = page; |
1537 | } | 1564 | } |
1538 | obj_priv->dirty = 0; | ||
1539 | 1565 | ||
1540 | drm_free_large(obj_priv->pages); | 1566 | if (obj->tiling_mode != I915_TILING_NONE) |
1541 | obj_priv->pages = NULL; | 1567 | i915_gem_object_do_bit_17_swizzle(obj); |
1568 | |||
1569 | return 0; | ||
1570 | |||
1571 | err_pages: | ||
1572 | while (i--) | ||
1573 | page_cache_release(obj->pages[i]); | ||
1574 | |||
1575 | drm_free_large(obj->pages); | ||
1576 | obj->pages = NULL; | ||
1577 | return PTR_ERR(page); | ||
1542 | } | 1578 | } |
1543 | 1579 | ||
1544 | static uint32_t | 1580 | static void |
1545 | i915_gem_next_request_seqno(struct drm_device *dev, | 1581 | i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj) |
1546 | struct intel_ring_buffer *ring) | ||
1547 | { | 1582 | { |
1548 | drm_i915_private_t *dev_priv = dev->dev_private; | 1583 | int page_count = obj->base.size / PAGE_SIZE; |
1584 | int i; | ||
1585 | |||
1586 | BUG_ON(obj->madv == __I915_MADV_PURGED); | ||
1587 | |||
1588 | if (obj->tiling_mode != I915_TILING_NONE) | ||
1589 | i915_gem_object_save_bit_17_swizzle(obj); | ||
1590 | |||
1591 | if (obj->madv == I915_MADV_DONTNEED) | ||
1592 | obj->dirty = 0; | ||
1593 | |||
1594 | for (i = 0; i < page_count; i++) { | ||
1595 | if (obj->dirty) | ||
1596 | set_page_dirty(obj->pages[i]); | ||
1597 | |||
1598 | if (obj->madv == I915_MADV_WILLNEED) | ||
1599 | mark_page_accessed(obj->pages[i]); | ||
1549 | 1600 | ||
1550 | ring->outstanding_lazy_request = true; | 1601 | page_cache_release(obj->pages[i]); |
1551 | return dev_priv->next_seqno; | 1602 | } |
1603 | obj->dirty = 0; | ||
1604 | |||
1605 | drm_free_large(obj->pages); | ||
1606 | obj->pages = NULL; | ||
1552 | } | 1607 | } |
1553 | 1608 | ||
1554 | static void | 1609 | void |
1555 | i915_gem_object_move_to_active(struct drm_gem_object *obj, | 1610 | i915_gem_object_move_to_active(struct drm_i915_gem_object *obj, |
1556 | struct intel_ring_buffer *ring) | 1611 | struct intel_ring_buffer *ring, |
1612 | u32 seqno) | ||
1557 | { | 1613 | { |
1558 | struct drm_device *dev = obj->dev; | 1614 | struct drm_device *dev = obj->base.dev; |
1559 | struct drm_i915_private *dev_priv = dev->dev_private; | 1615 | struct drm_i915_private *dev_priv = dev->dev_private; |
1560 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); | ||
1561 | uint32_t seqno = i915_gem_next_request_seqno(dev, ring); | ||
1562 | 1616 | ||
1563 | BUG_ON(ring == NULL); | 1617 | BUG_ON(ring == NULL); |
1564 | obj_priv->ring = ring; | 1618 | obj->ring = ring; |
1565 | 1619 | ||
1566 | /* Add a reference if we're newly entering the active list. */ | 1620 | /* Add a reference if we're newly entering the active list. */ |
1567 | if (!obj_priv->active) { | 1621 | if (!obj->active) { |
1568 | drm_gem_object_reference(obj); | 1622 | drm_gem_object_reference(&obj->base); |
1569 | obj_priv->active = 1; | 1623 | obj->active = 1; |
1570 | } | 1624 | } |
1571 | 1625 | ||
1572 | /* Move from whatever list we were on to the tail of execution. */ | 1626 | /* Move from whatever list we were on to the tail of execution. */ |
1573 | list_move_tail(&obj_priv->mm_list, &dev_priv->mm.active_list); | 1627 | list_move_tail(&obj->mm_list, &dev_priv->mm.active_list); |
1574 | list_move_tail(&obj_priv->ring_list, &ring->active_list); | 1628 | list_move_tail(&obj->ring_list, &ring->active_list); |
1575 | obj_priv->last_rendering_seqno = seqno; | 1629 | |
1630 | obj->last_rendering_seqno = seqno; | ||
1631 | if (obj->fenced_gpu_access) { | ||
1632 | struct drm_i915_fence_reg *reg; | ||
1633 | |||
1634 | BUG_ON(obj->fence_reg == I915_FENCE_REG_NONE); | ||
1635 | |||
1636 | obj->last_fenced_seqno = seqno; | ||
1637 | obj->last_fenced_ring = ring; | ||
1638 | |||
1639 | reg = &dev_priv->fence_regs[obj->fence_reg]; | ||
1640 | list_move_tail(®->lru_list, &dev_priv->mm.fence_list); | ||
1641 | } | ||
1576 | } | 1642 | } |
1577 | 1643 | ||
1578 | static void | 1644 | static void |
1579 | i915_gem_object_move_to_flushing(struct drm_gem_object *obj) | 1645 | i915_gem_object_move_off_active(struct drm_i915_gem_object *obj) |
1580 | { | 1646 | { |
1581 | struct drm_device *dev = obj->dev; | 1647 | list_del_init(&obj->ring_list); |
1648 | obj->last_rendering_seqno = 0; | ||
1649 | } | ||
1650 | |||
1651 | static void | ||
1652 | i915_gem_object_move_to_flushing(struct drm_i915_gem_object *obj) | ||
1653 | { | ||
1654 | struct drm_device *dev = obj->base.dev; | ||
1582 | drm_i915_private_t *dev_priv = dev->dev_private; | 1655 | drm_i915_private_t *dev_priv = dev->dev_private; |
1583 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); | ||
1584 | 1656 | ||
1585 | BUG_ON(!obj_priv->active); | 1657 | BUG_ON(!obj->active); |
1586 | list_move_tail(&obj_priv->mm_list, &dev_priv->mm.flushing_list); | 1658 | list_move_tail(&obj->mm_list, &dev_priv->mm.flushing_list); |
1587 | list_del_init(&obj_priv->ring_list); | 1659 | |
1588 | obj_priv->last_rendering_seqno = 0; | 1660 | i915_gem_object_move_off_active(obj); |
1661 | } | ||
1662 | |||
1663 | static void | ||
1664 | i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj) | ||
1665 | { | ||
1666 | struct drm_device *dev = obj->base.dev; | ||
1667 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
1668 | |||
1669 | if (obj->pin_count != 0) | ||
1670 | list_move_tail(&obj->mm_list, &dev_priv->mm.pinned_list); | ||
1671 | else | ||
1672 | list_move_tail(&obj->mm_list, &dev_priv->mm.inactive_list); | ||
1673 | |||
1674 | BUG_ON(!list_empty(&obj->gpu_write_list)); | ||
1675 | BUG_ON(!obj->active); | ||
1676 | obj->ring = NULL; | ||
1677 | |||
1678 | i915_gem_object_move_off_active(obj); | ||
1679 | obj->fenced_gpu_access = false; | ||
1680 | |||
1681 | obj->active = 0; | ||
1682 | obj->pending_gpu_write = false; | ||
1683 | drm_gem_object_unreference(&obj->base); | ||
1684 | |||
1685 | WARN_ON(i915_verify_lists(dev)); | ||
1589 | } | 1686 | } |
1590 | 1687 | ||
1591 | /* Immediately discard the backing storage */ | 1688 | /* Immediately discard the backing storage */ |
1592 | static void | 1689 | static void |
1593 | i915_gem_object_truncate(struct drm_gem_object *obj) | 1690 | i915_gem_object_truncate(struct drm_i915_gem_object *obj) |
1594 | { | 1691 | { |
1595 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); | ||
1596 | struct inode *inode; | 1692 | struct inode *inode; |
1597 | 1693 | ||
1598 | /* Our goal here is to return as much of the memory as | 1694 | /* Our goal here is to return as much of the memory as |
@@ -1601,42 +1697,18 @@ i915_gem_object_truncate(struct drm_gem_object *obj) | |||
1601 | * backing pages, *now*. Here we mirror the actions taken | 1697 | * backing pages, *now*. Here we mirror the actions taken |
1602 | * when by shmem_delete_inode() to release the backing store. | 1698 | * when by shmem_delete_inode() to release the backing store. |
1603 | */ | 1699 | */ |
1604 | inode = obj->filp->f_path.dentry->d_inode; | 1700 | inode = obj->base.filp->f_path.dentry->d_inode; |
1605 | truncate_inode_pages(inode->i_mapping, 0); | 1701 | truncate_inode_pages(inode->i_mapping, 0); |
1606 | if (inode->i_op->truncate_range) | 1702 | if (inode->i_op->truncate_range) |
1607 | inode->i_op->truncate_range(inode, 0, (loff_t)-1); | 1703 | inode->i_op->truncate_range(inode, 0, (loff_t)-1); |
1608 | 1704 | ||
1609 | obj_priv->madv = __I915_MADV_PURGED; | 1705 | obj->madv = __I915_MADV_PURGED; |
1610 | } | 1706 | } |
1611 | 1707 | ||
1612 | static inline int | 1708 | static inline int |
1613 | i915_gem_object_is_purgeable(struct drm_i915_gem_object *obj_priv) | 1709 | i915_gem_object_is_purgeable(struct drm_i915_gem_object *obj) |
1614 | { | ||
1615 | return obj_priv->madv == I915_MADV_DONTNEED; | ||
1616 | } | ||
1617 | |||
1618 | static void | ||
1619 | i915_gem_object_move_to_inactive(struct drm_gem_object *obj) | ||
1620 | { | 1710 | { |
1621 | struct drm_device *dev = obj->dev; | 1711 | return obj->madv == I915_MADV_DONTNEED; |
1622 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
1623 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); | ||
1624 | |||
1625 | if (obj_priv->pin_count != 0) | ||
1626 | list_move_tail(&obj_priv->mm_list, &dev_priv->mm.pinned_list); | ||
1627 | else | ||
1628 | list_move_tail(&obj_priv->mm_list, &dev_priv->mm.inactive_list); | ||
1629 | list_del_init(&obj_priv->ring_list); | ||
1630 | |||
1631 | BUG_ON(!list_empty(&obj_priv->gpu_write_list)); | ||
1632 | |||
1633 | obj_priv->last_rendering_seqno = 0; | ||
1634 | obj_priv->ring = NULL; | ||
1635 | if (obj_priv->active) { | ||
1636 | obj_priv->active = 0; | ||
1637 | drm_gem_object_unreference(obj); | ||
1638 | } | ||
1639 | WARN_ON(i915_verify_lists(dev)); | ||
1640 | } | 1712 | } |
1641 | 1713 | ||
1642 | static void | 1714 | static void |
@@ -1644,37 +1716,27 @@ i915_gem_process_flushing_list(struct drm_device *dev, | |||
1644 | uint32_t flush_domains, | 1716 | uint32_t flush_domains, |
1645 | struct intel_ring_buffer *ring) | 1717 | struct intel_ring_buffer *ring) |
1646 | { | 1718 | { |
1647 | drm_i915_private_t *dev_priv = dev->dev_private; | 1719 | struct drm_i915_gem_object *obj, *next; |
1648 | struct drm_i915_gem_object *obj_priv, *next; | ||
1649 | 1720 | ||
1650 | list_for_each_entry_safe(obj_priv, next, | 1721 | list_for_each_entry_safe(obj, next, |
1651 | &ring->gpu_write_list, | 1722 | &ring->gpu_write_list, |
1652 | gpu_write_list) { | 1723 | gpu_write_list) { |
1653 | struct drm_gem_object *obj = &obj_priv->base; | 1724 | if (obj->base.write_domain & flush_domains) { |
1654 | 1725 | uint32_t old_write_domain = obj->base.write_domain; | |
1655 | if (obj->write_domain & flush_domains) { | ||
1656 | uint32_t old_write_domain = obj->write_domain; | ||
1657 | 1726 | ||
1658 | obj->write_domain = 0; | 1727 | obj->base.write_domain = 0; |
1659 | list_del_init(&obj_priv->gpu_write_list); | 1728 | list_del_init(&obj->gpu_write_list); |
1660 | i915_gem_object_move_to_active(obj, ring); | 1729 | i915_gem_object_move_to_active(obj, ring, |
1661 | 1730 | i915_gem_next_request_seqno(dev, ring)); | |
1662 | /* update the fence lru list */ | ||
1663 | if (obj_priv->fence_reg != I915_FENCE_REG_NONE) { | ||
1664 | struct drm_i915_fence_reg *reg = | ||
1665 | &dev_priv->fence_regs[obj_priv->fence_reg]; | ||
1666 | list_move_tail(®->lru_list, | ||
1667 | &dev_priv->mm.fence_list); | ||
1668 | } | ||
1669 | 1731 | ||
1670 | trace_i915_gem_object_change_domain(obj, | 1732 | trace_i915_gem_object_change_domain(obj, |
1671 | obj->read_domains, | 1733 | obj->base.read_domains, |
1672 | old_write_domain); | 1734 | old_write_domain); |
1673 | } | 1735 | } |
1674 | } | 1736 | } |
1675 | } | 1737 | } |
1676 | 1738 | ||
1677 | uint32_t | 1739 | int |
1678 | i915_add_request(struct drm_device *dev, | 1740 | i915_add_request(struct drm_device *dev, |
1679 | struct drm_file *file, | 1741 | struct drm_file *file, |
1680 | struct drm_i915_gem_request *request, | 1742 | struct drm_i915_gem_request *request, |
@@ -1684,17 +1746,17 @@ i915_add_request(struct drm_device *dev, | |||
1684 | struct drm_i915_file_private *file_priv = NULL; | 1746 | struct drm_i915_file_private *file_priv = NULL; |
1685 | uint32_t seqno; | 1747 | uint32_t seqno; |
1686 | int was_empty; | 1748 | int was_empty; |
1749 | int ret; | ||
1750 | |||
1751 | BUG_ON(request == NULL); | ||
1687 | 1752 | ||
1688 | if (file != NULL) | 1753 | if (file != NULL) |
1689 | file_priv = file->driver_priv; | 1754 | file_priv = file->driver_priv; |
1690 | 1755 | ||
1691 | if (request == NULL) { | 1756 | ret = ring->add_request(ring, &seqno); |
1692 | request = kzalloc(sizeof(*request), GFP_KERNEL); | 1757 | if (ret) |
1693 | if (request == NULL) | 1758 | return ret; |
1694 | return 0; | ||
1695 | } | ||
1696 | 1759 | ||
1697 | seqno = ring->add_request(dev, ring, 0); | ||
1698 | ring->outstanding_lazy_request = false; | 1760 | ring->outstanding_lazy_request = false; |
1699 | 1761 | ||
1700 | request->seqno = seqno; | 1762 | request->seqno = seqno; |
@@ -1718,26 +1780,7 @@ i915_add_request(struct drm_device *dev, | |||
1718 | queue_delayed_work(dev_priv->wq, | 1780 | queue_delayed_work(dev_priv->wq, |
1719 | &dev_priv->mm.retire_work, HZ); | 1781 | &dev_priv->mm.retire_work, HZ); |
1720 | } | 1782 | } |
1721 | return seqno; | 1783 | return 0; |
1722 | } | ||
1723 | |||
1724 | /** | ||
1725 | * Command execution barrier | ||
1726 | * | ||
1727 | * Ensures that all commands in the ring are finished | ||
1728 | * before signalling the CPU | ||
1729 | */ | ||
1730 | static void | ||
1731 | i915_retire_commands(struct drm_device *dev, struct intel_ring_buffer *ring) | ||
1732 | { | ||
1733 | uint32_t flush_domains = 0; | ||
1734 | |||
1735 | /* The sampler always gets flushed on i965 (sigh) */ | ||
1736 | if (INTEL_INFO(dev)->gen >= 4) | ||
1737 | flush_domains |= I915_GEM_DOMAIN_SAMPLER; | ||
1738 | |||
1739 | ring->flush(dev, ring, | ||
1740 | I915_GEM_DOMAIN_COMMAND, flush_domains); | ||
1741 | } | 1784 | } |
1742 | 1785 | ||
1743 | static inline void | 1786 | static inline void |
@@ -1770,62 +1813,76 @@ static void i915_gem_reset_ring_lists(struct drm_i915_private *dev_priv, | |||
1770 | } | 1813 | } |
1771 | 1814 | ||
1772 | while (!list_empty(&ring->active_list)) { | 1815 | while (!list_empty(&ring->active_list)) { |
1773 | struct drm_i915_gem_object *obj_priv; | 1816 | struct drm_i915_gem_object *obj; |
1817 | |||
1818 | obj = list_first_entry(&ring->active_list, | ||
1819 | struct drm_i915_gem_object, | ||
1820 | ring_list); | ||
1821 | |||
1822 | obj->base.write_domain = 0; | ||
1823 | list_del_init(&obj->gpu_write_list); | ||
1824 | i915_gem_object_move_to_inactive(obj); | ||
1825 | } | ||
1826 | } | ||
1827 | |||
1828 | static void i915_gem_reset_fences(struct drm_device *dev) | ||
1829 | { | ||
1830 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
1831 | int i; | ||
1832 | |||
1833 | for (i = 0; i < 16; i++) { | ||
1834 | struct drm_i915_fence_reg *reg = &dev_priv->fence_regs[i]; | ||
1835 | struct drm_i915_gem_object *obj = reg->obj; | ||
1836 | |||
1837 | if (!obj) | ||
1838 | continue; | ||
1774 | 1839 | ||
1775 | obj_priv = list_first_entry(&ring->active_list, | 1840 | if (obj->tiling_mode) |
1776 | struct drm_i915_gem_object, | 1841 | i915_gem_release_mmap(obj); |
1777 | ring_list); | ||
1778 | 1842 | ||
1779 | obj_priv->base.write_domain = 0; | 1843 | reg->obj->fence_reg = I915_FENCE_REG_NONE; |
1780 | list_del_init(&obj_priv->gpu_write_list); | 1844 | reg->obj->fenced_gpu_access = false; |
1781 | i915_gem_object_move_to_inactive(&obj_priv->base); | 1845 | reg->obj->last_fenced_seqno = 0; |
1846 | reg->obj->last_fenced_ring = NULL; | ||
1847 | i915_gem_clear_fence_reg(dev, reg); | ||
1782 | } | 1848 | } |
1783 | } | 1849 | } |
1784 | 1850 | ||
1785 | void i915_gem_reset(struct drm_device *dev) | 1851 | void i915_gem_reset(struct drm_device *dev) |
1786 | { | 1852 | { |
1787 | struct drm_i915_private *dev_priv = dev->dev_private; | 1853 | struct drm_i915_private *dev_priv = dev->dev_private; |
1788 | struct drm_i915_gem_object *obj_priv; | 1854 | struct drm_i915_gem_object *obj; |
1789 | int i; | 1855 | int i; |
1790 | 1856 | ||
1791 | i915_gem_reset_ring_lists(dev_priv, &dev_priv->render_ring); | 1857 | for (i = 0; i < I915_NUM_RINGS; i++) |
1792 | i915_gem_reset_ring_lists(dev_priv, &dev_priv->bsd_ring); | 1858 | i915_gem_reset_ring_lists(dev_priv, &dev_priv->ring[i]); |
1793 | i915_gem_reset_ring_lists(dev_priv, &dev_priv->blt_ring); | ||
1794 | 1859 | ||
1795 | /* Remove anything from the flushing lists. The GPU cache is likely | 1860 | /* Remove anything from the flushing lists. The GPU cache is likely |
1796 | * to be lost on reset along with the data, so simply move the | 1861 | * to be lost on reset along with the data, so simply move the |
1797 | * lost bo to the inactive list. | 1862 | * lost bo to the inactive list. |
1798 | */ | 1863 | */ |
1799 | while (!list_empty(&dev_priv->mm.flushing_list)) { | 1864 | while (!list_empty(&dev_priv->mm.flushing_list)) { |
1800 | obj_priv = list_first_entry(&dev_priv->mm.flushing_list, | 1865 | obj= list_first_entry(&dev_priv->mm.flushing_list, |
1801 | struct drm_i915_gem_object, | 1866 | struct drm_i915_gem_object, |
1802 | mm_list); | 1867 | mm_list); |
1803 | 1868 | ||
1804 | obj_priv->base.write_domain = 0; | 1869 | obj->base.write_domain = 0; |
1805 | list_del_init(&obj_priv->gpu_write_list); | 1870 | list_del_init(&obj->gpu_write_list); |
1806 | i915_gem_object_move_to_inactive(&obj_priv->base); | 1871 | i915_gem_object_move_to_inactive(obj); |
1807 | } | 1872 | } |
1808 | 1873 | ||
1809 | /* Move everything out of the GPU domains to ensure we do any | 1874 | /* Move everything out of the GPU domains to ensure we do any |
1810 | * necessary invalidation upon reuse. | 1875 | * necessary invalidation upon reuse. |
1811 | */ | 1876 | */ |
1812 | list_for_each_entry(obj_priv, | 1877 | list_for_each_entry(obj, |
1813 | &dev_priv->mm.inactive_list, | 1878 | &dev_priv->mm.inactive_list, |
1814 | mm_list) | 1879 | mm_list) |
1815 | { | 1880 | { |
1816 | obj_priv->base.read_domains &= ~I915_GEM_GPU_DOMAINS; | 1881 | obj->base.read_domains &= ~I915_GEM_GPU_DOMAINS; |
1817 | } | 1882 | } |
1818 | 1883 | ||
1819 | /* The fence registers are invalidated so clear them out */ | 1884 | /* The fence registers are invalidated so clear them out */ |
1820 | for (i = 0; i < 16; i++) { | 1885 | i915_gem_reset_fences(dev); |
1821 | struct drm_i915_fence_reg *reg; | ||
1822 | |||
1823 | reg = &dev_priv->fence_regs[i]; | ||
1824 | if (!reg->obj) | ||
1825 | continue; | ||
1826 | |||
1827 | i915_gem_clear_fence_reg(reg->obj); | ||
1828 | } | ||
1829 | } | 1886 | } |
1830 | 1887 | ||
1831 | /** | 1888 | /** |
@@ -1837,6 +1894,7 @@ i915_gem_retire_requests_ring(struct drm_device *dev, | |||
1837 | { | 1894 | { |
1838 | drm_i915_private_t *dev_priv = dev->dev_private; | 1895 | drm_i915_private_t *dev_priv = dev->dev_private; |
1839 | uint32_t seqno; | 1896 | uint32_t seqno; |
1897 | int i; | ||
1840 | 1898 | ||
1841 | if (!ring->status_page.page_addr || | 1899 | if (!ring->status_page.page_addr || |
1842 | list_empty(&ring->request_list)) | 1900 | list_empty(&ring->request_list)) |
@@ -1844,7 +1902,12 @@ i915_gem_retire_requests_ring(struct drm_device *dev, | |||
1844 | 1902 | ||
1845 | WARN_ON(i915_verify_lists(dev)); | 1903 | WARN_ON(i915_verify_lists(dev)); |
1846 | 1904 | ||
1847 | seqno = ring->get_seqno(dev, ring); | 1905 | seqno = ring->get_seqno(ring); |
1906 | |||
1907 | for (i = 0; i < ARRAY_SIZE(ring->sync_seqno); i++) | ||
1908 | if (seqno >= ring->sync_seqno[i]) | ||
1909 | ring->sync_seqno[i] = 0; | ||
1910 | |||
1848 | while (!list_empty(&ring->request_list)) { | 1911 | while (!list_empty(&ring->request_list)) { |
1849 | struct drm_i915_gem_request *request; | 1912 | struct drm_i915_gem_request *request; |
1850 | 1913 | ||
@@ -1866,18 +1929,16 @@ i915_gem_retire_requests_ring(struct drm_device *dev, | |||
1866 | * by the ringbuffer to the flushing/inactive lists as appropriate. | 1929 | * by the ringbuffer to the flushing/inactive lists as appropriate. |
1867 | */ | 1930 | */ |
1868 | while (!list_empty(&ring->active_list)) { | 1931 | while (!list_empty(&ring->active_list)) { |
1869 | struct drm_gem_object *obj; | 1932 | struct drm_i915_gem_object *obj; |
1870 | struct drm_i915_gem_object *obj_priv; | ||
1871 | 1933 | ||
1872 | obj_priv = list_first_entry(&ring->active_list, | 1934 | obj= list_first_entry(&ring->active_list, |
1873 | struct drm_i915_gem_object, | 1935 | struct drm_i915_gem_object, |
1874 | ring_list); | 1936 | ring_list); |
1875 | 1937 | ||
1876 | if (!i915_seqno_passed(seqno, obj_priv->last_rendering_seqno)) | 1938 | if (!i915_seqno_passed(seqno, obj->last_rendering_seqno)) |
1877 | break; | 1939 | break; |
1878 | 1940 | ||
1879 | obj = &obj_priv->base; | 1941 | if (obj->base.write_domain != 0) |
1880 | if (obj->write_domain != 0) | ||
1881 | i915_gem_object_move_to_flushing(obj); | 1942 | i915_gem_object_move_to_flushing(obj); |
1882 | else | 1943 | else |
1883 | i915_gem_object_move_to_inactive(obj); | 1944 | i915_gem_object_move_to_inactive(obj); |
@@ -1885,7 +1946,7 @@ i915_gem_retire_requests_ring(struct drm_device *dev, | |||
1885 | 1946 | ||
1886 | if (unlikely (dev_priv->trace_irq_seqno && | 1947 | if (unlikely (dev_priv->trace_irq_seqno && |
1887 | i915_seqno_passed(dev_priv->trace_irq_seqno, seqno))) { | 1948 | i915_seqno_passed(dev_priv->trace_irq_seqno, seqno))) { |
1888 | ring->user_irq_put(dev, ring); | 1949 | ring->irq_put(ring); |
1889 | dev_priv->trace_irq_seqno = 0; | 1950 | dev_priv->trace_irq_seqno = 0; |
1890 | } | 1951 | } |
1891 | 1952 | ||
@@ -1896,24 +1957,24 @@ void | |||
1896 | i915_gem_retire_requests(struct drm_device *dev) | 1957 | i915_gem_retire_requests(struct drm_device *dev) |
1897 | { | 1958 | { |
1898 | drm_i915_private_t *dev_priv = dev->dev_private; | 1959 | drm_i915_private_t *dev_priv = dev->dev_private; |
1960 | int i; | ||
1899 | 1961 | ||
1900 | if (!list_empty(&dev_priv->mm.deferred_free_list)) { | 1962 | if (!list_empty(&dev_priv->mm.deferred_free_list)) { |
1901 | struct drm_i915_gem_object *obj_priv, *tmp; | 1963 | struct drm_i915_gem_object *obj, *next; |
1902 | 1964 | ||
1903 | /* We must be careful that during unbind() we do not | 1965 | /* We must be careful that during unbind() we do not |
1904 | * accidentally infinitely recurse into retire requests. | 1966 | * accidentally infinitely recurse into retire requests. |
1905 | * Currently: | 1967 | * Currently: |
1906 | * retire -> free -> unbind -> wait -> retire_ring | 1968 | * retire -> free -> unbind -> wait -> retire_ring |
1907 | */ | 1969 | */ |
1908 | list_for_each_entry_safe(obj_priv, tmp, | 1970 | list_for_each_entry_safe(obj, next, |
1909 | &dev_priv->mm.deferred_free_list, | 1971 | &dev_priv->mm.deferred_free_list, |
1910 | mm_list) | 1972 | mm_list) |
1911 | i915_gem_free_object_tail(&obj_priv->base); | 1973 | i915_gem_free_object_tail(obj); |
1912 | } | 1974 | } |
1913 | 1975 | ||
1914 | i915_gem_retire_requests_ring(dev, &dev_priv->render_ring); | 1976 | for (i = 0; i < I915_NUM_RINGS; i++) |
1915 | i915_gem_retire_requests_ring(dev, &dev_priv->bsd_ring); | 1977 | i915_gem_retire_requests_ring(dev, &dev_priv->ring[i]); |
1916 | i915_gem_retire_requests_ring(dev, &dev_priv->blt_ring); | ||
1917 | } | 1978 | } |
1918 | 1979 | ||
1919 | static void | 1980 | static void |
@@ -1921,6 +1982,8 @@ i915_gem_retire_work_handler(struct work_struct *work) | |||
1921 | { | 1982 | { |
1922 | drm_i915_private_t *dev_priv; | 1983 | drm_i915_private_t *dev_priv; |
1923 | struct drm_device *dev; | 1984 | struct drm_device *dev; |
1985 | bool idle; | ||
1986 | int i; | ||
1924 | 1987 | ||
1925 | dev_priv = container_of(work, drm_i915_private_t, | 1988 | dev_priv = container_of(work, drm_i915_private_t, |
1926 | mm.retire_work.work); | 1989 | mm.retire_work.work); |
@@ -1934,11 +1997,31 @@ i915_gem_retire_work_handler(struct work_struct *work) | |||
1934 | 1997 | ||
1935 | i915_gem_retire_requests(dev); | 1998 | i915_gem_retire_requests(dev); |
1936 | 1999 | ||
1937 | if (!dev_priv->mm.suspended && | 2000 | /* Send a periodic flush down the ring so we don't hold onto GEM |
1938 | (!list_empty(&dev_priv->render_ring.request_list) || | 2001 | * objects indefinitely. |
1939 | !list_empty(&dev_priv->bsd_ring.request_list) || | 2002 | */ |
1940 | !list_empty(&dev_priv->blt_ring.request_list))) | 2003 | idle = true; |
2004 | for (i = 0; i < I915_NUM_RINGS; i++) { | ||
2005 | struct intel_ring_buffer *ring = &dev_priv->ring[i]; | ||
2006 | |||
2007 | if (!list_empty(&ring->gpu_write_list)) { | ||
2008 | struct drm_i915_gem_request *request; | ||
2009 | int ret; | ||
2010 | |||
2011 | ret = i915_gem_flush_ring(dev, ring, 0, | ||
2012 | I915_GEM_GPU_DOMAINS); | ||
2013 | request = kzalloc(sizeof(*request), GFP_KERNEL); | ||
2014 | if (ret || request == NULL || | ||
2015 | i915_add_request(dev, NULL, request, ring)) | ||
2016 | kfree(request); | ||
2017 | } | ||
2018 | |||
2019 | idle &= list_empty(&ring->request_list); | ||
2020 | } | ||
2021 | |||
2022 | if (!dev_priv->mm.suspended && !idle) | ||
1941 | queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, HZ); | 2023 | queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, HZ); |
2024 | |||
1942 | mutex_unlock(&dev->struct_mutex); | 2025 | mutex_unlock(&dev->struct_mutex); |
1943 | } | 2026 | } |
1944 | 2027 | ||
@@ -1955,14 +2038,23 @@ i915_do_wait_request(struct drm_device *dev, uint32_t seqno, | |||
1955 | if (atomic_read(&dev_priv->mm.wedged)) | 2038 | if (atomic_read(&dev_priv->mm.wedged)) |
1956 | return -EAGAIN; | 2039 | return -EAGAIN; |
1957 | 2040 | ||
1958 | if (ring->outstanding_lazy_request) { | 2041 | if (seqno == ring->outstanding_lazy_request) { |
1959 | seqno = i915_add_request(dev, NULL, NULL, ring); | 2042 | struct drm_i915_gem_request *request; |
1960 | if (seqno == 0) | 2043 | |
2044 | request = kzalloc(sizeof(*request), GFP_KERNEL); | ||
2045 | if (request == NULL) | ||
1961 | return -ENOMEM; | 2046 | return -ENOMEM; |
2047 | |||
2048 | ret = i915_add_request(dev, NULL, request, ring); | ||
2049 | if (ret) { | ||
2050 | kfree(request); | ||
2051 | return ret; | ||
2052 | } | ||
2053 | |||
2054 | seqno = request->seqno; | ||
1962 | } | 2055 | } |
1963 | BUG_ON(seqno == dev_priv->next_seqno); | ||
1964 | 2056 | ||
1965 | if (!i915_seqno_passed(ring->get_seqno(dev, ring), seqno)) { | 2057 | if (!i915_seqno_passed(ring->get_seqno(ring), seqno)) { |
1966 | if (HAS_PCH_SPLIT(dev)) | 2058 | if (HAS_PCH_SPLIT(dev)) |
1967 | ier = I915_READ(DEIER) | I915_READ(GTIER); | 2059 | ier = I915_READ(DEIER) | I915_READ(GTIER); |
1968 | else | 2060 | else |
@@ -1976,21 +2068,23 @@ i915_do_wait_request(struct drm_device *dev, uint32_t seqno, | |||
1976 | 2068 | ||
1977 | trace_i915_gem_request_wait_begin(dev, seqno); | 2069 | trace_i915_gem_request_wait_begin(dev, seqno); |
1978 | 2070 | ||
1979 | ring->waiting_gem_seqno = seqno; | 2071 | ring->waiting_seqno = seqno; |
1980 | ring->user_irq_get(dev, ring); | 2072 | if (ring->irq_get(ring)) { |
1981 | if (interruptible) | 2073 | if (interruptible) |
1982 | ret = wait_event_interruptible(ring->irq_queue, | 2074 | ret = wait_event_interruptible(ring->irq_queue, |
1983 | i915_seqno_passed( | 2075 | i915_seqno_passed(ring->get_seqno(ring), seqno) |
1984 | ring->get_seqno(dev, ring), seqno) | 2076 | || atomic_read(&dev_priv->mm.wedged)); |
1985 | || atomic_read(&dev_priv->mm.wedged)); | 2077 | else |
1986 | else | 2078 | wait_event(ring->irq_queue, |
1987 | wait_event(ring->irq_queue, | 2079 | i915_seqno_passed(ring->get_seqno(ring), seqno) |
1988 | i915_seqno_passed( | 2080 | || atomic_read(&dev_priv->mm.wedged)); |
1989 | ring->get_seqno(dev, ring), seqno) | ||
1990 | || atomic_read(&dev_priv->mm.wedged)); | ||
1991 | 2081 | ||
1992 | ring->user_irq_put(dev, ring); | 2082 | ring->irq_put(ring); |
1993 | ring->waiting_gem_seqno = 0; | 2083 | } else if (wait_for(i915_seqno_passed(ring->get_seqno(ring), |
2084 | seqno) || | ||
2085 | atomic_read(&dev_priv->mm.wedged), 3000)) | ||
2086 | ret = -EBUSY; | ||
2087 | ring->waiting_seqno = 0; | ||
1994 | 2088 | ||
1995 | trace_i915_gem_request_wait_end(dev, seqno); | 2089 | trace_i915_gem_request_wait_end(dev, seqno); |
1996 | } | 2090 | } |
@@ -1999,7 +2093,7 @@ i915_do_wait_request(struct drm_device *dev, uint32_t seqno, | |||
1999 | 2093 | ||
2000 | if (ret && ret != -ERESTARTSYS) | 2094 | if (ret && ret != -ERESTARTSYS) |
2001 | DRM_ERROR("%s returns %d (awaiting %d at %d, next %d)\n", | 2095 | DRM_ERROR("%s returns %d (awaiting %d at %d, next %d)\n", |
2002 | __func__, ret, seqno, ring->get_seqno(dev, ring), | 2096 | __func__, ret, seqno, ring->get_seqno(ring), |
2003 | dev_priv->next_seqno); | 2097 | dev_priv->next_seqno); |
2004 | 2098 | ||
2005 | /* Directly dispatch request retiring. While we have the work queue | 2099 | /* Directly dispatch request retiring. While we have the work queue |
@@ -2024,70 +2118,30 @@ i915_wait_request(struct drm_device *dev, uint32_t seqno, | |||
2024 | return i915_do_wait_request(dev, seqno, 1, ring); | 2118 | return i915_do_wait_request(dev, seqno, 1, ring); |
2025 | } | 2119 | } |
2026 | 2120 | ||
2027 | static void | ||
2028 | i915_gem_flush_ring(struct drm_device *dev, | ||
2029 | struct drm_file *file_priv, | ||
2030 | struct intel_ring_buffer *ring, | ||
2031 | uint32_t invalidate_domains, | ||
2032 | uint32_t flush_domains) | ||
2033 | { | ||
2034 | ring->flush(dev, ring, invalidate_domains, flush_domains); | ||
2035 | i915_gem_process_flushing_list(dev, flush_domains, ring); | ||
2036 | } | ||
2037 | |||
2038 | static void | ||
2039 | i915_gem_flush(struct drm_device *dev, | ||
2040 | struct drm_file *file_priv, | ||
2041 | uint32_t invalidate_domains, | ||
2042 | uint32_t flush_domains, | ||
2043 | uint32_t flush_rings) | ||
2044 | { | ||
2045 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
2046 | |||
2047 | if (flush_domains & I915_GEM_DOMAIN_CPU) | ||
2048 | drm_agp_chipset_flush(dev); | ||
2049 | |||
2050 | if ((flush_domains | invalidate_domains) & I915_GEM_GPU_DOMAINS) { | ||
2051 | if (flush_rings & RING_RENDER) | ||
2052 | i915_gem_flush_ring(dev, file_priv, | ||
2053 | &dev_priv->render_ring, | ||
2054 | invalidate_domains, flush_domains); | ||
2055 | if (flush_rings & RING_BSD) | ||
2056 | i915_gem_flush_ring(dev, file_priv, | ||
2057 | &dev_priv->bsd_ring, | ||
2058 | invalidate_domains, flush_domains); | ||
2059 | if (flush_rings & RING_BLT) | ||
2060 | i915_gem_flush_ring(dev, file_priv, | ||
2061 | &dev_priv->blt_ring, | ||
2062 | invalidate_domains, flush_domains); | ||
2063 | } | ||
2064 | } | ||
2065 | |||
2066 | /** | 2121 | /** |
2067 | * Ensures that all rendering to the object has completed and the object is | 2122 | * Ensures that all rendering to the object has completed and the object is |
2068 | * safe to unbind from the GTT or access from the CPU. | 2123 | * safe to unbind from the GTT or access from the CPU. |
2069 | */ | 2124 | */ |
2070 | static int | 2125 | int |
2071 | i915_gem_object_wait_rendering(struct drm_gem_object *obj, | 2126 | i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj, |
2072 | bool interruptible) | 2127 | bool interruptible) |
2073 | { | 2128 | { |
2074 | struct drm_device *dev = obj->dev; | 2129 | struct drm_device *dev = obj->base.dev; |
2075 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); | ||
2076 | int ret; | 2130 | int ret; |
2077 | 2131 | ||
2078 | /* This function only exists to support waiting for existing rendering, | 2132 | /* This function only exists to support waiting for existing rendering, |
2079 | * not for emitting required flushes. | 2133 | * not for emitting required flushes. |
2080 | */ | 2134 | */ |
2081 | BUG_ON((obj->write_domain & I915_GEM_GPU_DOMAINS) != 0); | 2135 | BUG_ON((obj->base.write_domain & I915_GEM_GPU_DOMAINS) != 0); |
2082 | 2136 | ||
2083 | /* If there is rendering queued on the buffer being evicted, wait for | 2137 | /* If there is rendering queued on the buffer being evicted, wait for |
2084 | * it. | 2138 | * it. |
2085 | */ | 2139 | */ |
2086 | if (obj_priv->active) { | 2140 | if (obj->active) { |
2087 | ret = i915_do_wait_request(dev, | 2141 | ret = i915_do_wait_request(dev, |
2088 | obj_priv->last_rendering_seqno, | 2142 | obj->last_rendering_seqno, |
2089 | interruptible, | 2143 | interruptible, |
2090 | obj_priv->ring); | 2144 | obj->ring); |
2091 | if (ret) | 2145 | if (ret) |
2092 | return ret; | 2146 | return ret; |
2093 | } | 2147 | } |
@@ -2099,17 +2153,14 @@ i915_gem_object_wait_rendering(struct drm_gem_object *obj, | |||
2099 | * Unbinds an object from the GTT aperture. | 2153 | * Unbinds an object from the GTT aperture. |
2100 | */ | 2154 | */ |
2101 | int | 2155 | int |
2102 | i915_gem_object_unbind(struct drm_gem_object *obj) | 2156 | i915_gem_object_unbind(struct drm_i915_gem_object *obj) |
2103 | { | 2157 | { |
2104 | struct drm_device *dev = obj->dev; | ||
2105 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
2106 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); | ||
2107 | int ret = 0; | 2158 | int ret = 0; |
2108 | 2159 | ||
2109 | if (obj_priv->gtt_space == NULL) | 2160 | if (obj->gtt_space == NULL) |
2110 | return 0; | 2161 | return 0; |
2111 | 2162 | ||
2112 | if (obj_priv->pin_count != 0) { | 2163 | if (obj->pin_count != 0) { |
2113 | DRM_ERROR("Attempting to unbind pinned buffer\n"); | 2164 | DRM_ERROR("Attempting to unbind pinned buffer\n"); |
2114 | return -EINVAL; | 2165 | return -EINVAL; |
2115 | } | 2166 | } |
@@ -2132,27 +2183,27 @@ i915_gem_object_unbind(struct drm_gem_object *obj) | |||
2132 | */ | 2183 | */ |
2133 | if (ret) { | 2184 | if (ret) { |
2134 | i915_gem_clflush_object(obj); | 2185 | i915_gem_clflush_object(obj); |
2135 | obj->read_domains = obj->write_domain = I915_GEM_DOMAIN_CPU; | 2186 | obj->base.read_domains = obj->base.write_domain = I915_GEM_DOMAIN_CPU; |
2136 | } | 2187 | } |
2137 | 2188 | ||
2138 | /* release the fence reg _after_ flushing */ | 2189 | /* release the fence reg _after_ flushing */ |
2139 | if (obj_priv->fence_reg != I915_FENCE_REG_NONE) | 2190 | ret = i915_gem_object_put_fence(obj); |
2140 | i915_gem_clear_fence_reg(obj); | 2191 | if (ret == -ERESTARTSYS) |
2141 | 2192 | return ret; | |
2142 | drm_unbind_agp(obj_priv->agp_mem); | ||
2143 | drm_free_agp(obj_priv->agp_mem, obj->size / PAGE_SIZE); | ||
2144 | 2193 | ||
2145 | i915_gem_object_put_pages(obj); | 2194 | i915_gem_gtt_unbind_object(obj); |
2146 | BUG_ON(obj_priv->pages_refcount); | 2195 | i915_gem_object_put_pages_gtt(obj); |
2147 | 2196 | ||
2148 | i915_gem_info_remove_gtt(dev_priv, obj->size); | 2197 | list_del_init(&obj->gtt_list); |
2149 | list_del_init(&obj_priv->mm_list); | 2198 | list_del_init(&obj->mm_list); |
2199 | /* Avoid an unnecessary call to unbind on rebind. */ | ||
2200 | obj->map_and_fenceable = true; | ||
2150 | 2201 | ||
2151 | drm_mm_put_block(obj_priv->gtt_space); | 2202 | drm_mm_put_block(obj->gtt_space); |
2152 | obj_priv->gtt_space = NULL; | 2203 | obj->gtt_space = NULL; |
2153 | obj_priv->gtt_offset = 0; | 2204 | obj->gtt_offset = 0; |
2154 | 2205 | ||
2155 | if (i915_gem_object_is_purgeable(obj_priv)) | 2206 | if (i915_gem_object_is_purgeable(obj)) |
2156 | i915_gem_object_truncate(obj); | 2207 | i915_gem_object_truncate(obj); |
2157 | 2208 | ||
2158 | trace_i915_gem_object_unbind(obj); | 2209 | trace_i915_gem_object_unbind(obj); |
@@ -2160,14 +2211,37 @@ i915_gem_object_unbind(struct drm_gem_object *obj) | |||
2160 | return ret; | 2211 | return ret; |
2161 | } | 2212 | } |
2162 | 2213 | ||
2214 | int | ||
2215 | i915_gem_flush_ring(struct drm_device *dev, | ||
2216 | struct intel_ring_buffer *ring, | ||
2217 | uint32_t invalidate_domains, | ||
2218 | uint32_t flush_domains) | ||
2219 | { | ||
2220 | int ret; | ||
2221 | |||
2222 | ret = ring->flush(ring, invalidate_domains, flush_domains); | ||
2223 | if (ret) | ||
2224 | return ret; | ||
2225 | |||
2226 | i915_gem_process_flushing_list(dev, flush_domains, ring); | ||
2227 | return 0; | ||
2228 | } | ||
2229 | |||
2163 | static int i915_ring_idle(struct drm_device *dev, | 2230 | static int i915_ring_idle(struct drm_device *dev, |
2164 | struct intel_ring_buffer *ring) | 2231 | struct intel_ring_buffer *ring) |
2165 | { | 2232 | { |
2233 | int ret; | ||
2234 | |||
2166 | if (list_empty(&ring->gpu_write_list) && list_empty(&ring->active_list)) | 2235 | if (list_empty(&ring->gpu_write_list) && list_empty(&ring->active_list)) |
2167 | return 0; | 2236 | return 0; |
2168 | 2237 | ||
2169 | i915_gem_flush_ring(dev, NULL, ring, | 2238 | if (!list_empty(&ring->gpu_write_list)) { |
2170 | I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS); | 2239 | ret = i915_gem_flush_ring(dev, ring, |
2240 | I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS); | ||
2241 | if (ret) | ||
2242 | return ret; | ||
2243 | } | ||
2244 | |||
2171 | return i915_wait_request(dev, | 2245 | return i915_wait_request(dev, |
2172 | i915_gem_next_request_seqno(dev, ring), | 2246 | i915_gem_next_request_seqno(dev, ring), |
2173 | ring); | 2247 | ring); |
@@ -2178,7 +2252,7 @@ i915_gpu_idle(struct drm_device *dev) | |||
2178 | { | 2252 | { |
2179 | drm_i915_private_t *dev_priv = dev->dev_private; | 2253 | drm_i915_private_t *dev_priv = dev->dev_private; |
2180 | bool lists_empty; | 2254 | bool lists_empty; |
2181 | int ret; | 2255 | int ret, i; |
2182 | 2256 | ||
2183 | lists_empty = (list_empty(&dev_priv->mm.flushing_list) && | 2257 | lists_empty = (list_empty(&dev_priv->mm.flushing_list) && |
2184 | list_empty(&dev_priv->mm.active_list)); | 2258 | list_empty(&dev_priv->mm.active_list)); |
@@ -2186,258 +2260,305 @@ i915_gpu_idle(struct drm_device *dev) | |||
2186 | return 0; | 2260 | return 0; |
2187 | 2261 | ||
2188 | /* Flush everything onto the inactive list. */ | 2262 | /* Flush everything onto the inactive list. */ |
2189 | ret = i915_ring_idle(dev, &dev_priv->render_ring); | 2263 | for (i = 0; i < I915_NUM_RINGS; i++) { |
2190 | if (ret) | 2264 | ret = i915_ring_idle(dev, &dev_priv->ring[i]); |
2191 | return ret; | 2265 | if (ret) |
2192 | 2266 | return ret; | |
2193 | ret = i915_ring_idle(dev, &dev_priv->bsd_ring); | ||
2194 | if (ret) | ||
2195 | return ret; | ||
2196 | |||
2197 | ret = i915_ring_idle(dev, &dev_priv->blt_ring); | ||
2198 | if (ret) | ||
2199 | return ret; | ||
2200 | |||
2201 | return 0; | ||
2202 | } | ||
2203 | |||
2204 | static int | ||
2205 | i915_gem_object_get_pages(struct drm_gem_object *obj, | ||
2206 | gfp_t gfpmask) | ||
2207 | { | ||
2208 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); | ||
2209 | int page_count, i; | ||
2210 | struct address_space *mapping; | ||
2211 | struct inode *inode; | ||
2212 | struct page *page; | ||
2213 | |||
2214 | BUG_ON(obj_priv->pages_refcount | ||
2215 | == DRM_I915_GEM_OBJECT_MAX_PAGES_REFCOUNT); | ||
2216 | |||
2217 | if (obj_priv->pages_refcount++ != 0) | ||
2218 | return 0; | ||
2219 | |||
2220 | /* Get the list of pages out of our struct file. They'll be pinned | ||
2221 | * at this point until we release them. | ||
2222 | */ | ||
2223 | page_count = obj->size / PAGE_SIZE; | ||
2224 | BUG_ON(obj_priv->pages != NULL); | ||
2225 | obj_priv->pages = drm_calloc_large(page_count, sizeof(struct page *)); | ||
2226 | if (obj_priv->pages == NULL) { | ||
2227 | obj_priv->pages_refcount--; | ||
2228 | return -ENOMEM; | ||
2229 | } | ||
2230 | |||
2231 | inode = obj->filp->f_path.dentry->d_inode; | ||
2232 | mapping = inode->i_mapping; | ||
2233 | for (i = 0; i < page_count; i++) { | ||
2234 | page = read_cache_page_gfp(mapping, i, | ||
2235 | GFP_HIGHUSER | | ||
2236 | __GFP_COLD | | ||
2237 | __GFP_RECLAIMABLE | | ||
2238 | gfpmask); | ||
2239 | if (IS_ERR(page)) | ||
2240 | goto err_pages; | ||
2241 | |||
2242 | obj_priv->pages[i] = page; | ||
2243 | } | 2267 | } |
2244 | 2268 | ||
2245 | if (obj_priv->tiling_mode != I915_TILING_NONE) | ||
2246 | i915_gem_object_do_bit_17_swizzle(obj); | ||
2247 | |||
2248 | return 0; | 2269 | return 0; |
2249 | |||
2250 | err_pages: | ||
2251 | while (i--) | ||
2252 | page_cache_release(obj_priv->pages[i]); | ||
2253 | |||
2254 | drm_free_large(obj_priv->pages); | ||
2255 | obj_priv->pages = NULL; | ||
2256 | obj_priv->pages_refcount--; | ||
2257 | return PTR_ERR(page); | ||
2258 | } | 2270 | } |
2259 | 2271 | ||
2260 | static void sandybridge_write_fence_reg(struct drm_i915_fence_reg *reg) | 2272 | static int sandybridge_write_fence_reg(struct drm_i915_gem_object *obj, |
2273 | struct intel_ring_buffer *pipelined) | ||
2261 | { | 2274 | { |
2262 | struct drm_gem_object *obj = reg->obj; | 2275 | struct drm_device *dev = obj->base.dev; |
2263 | struct drm_device *dev = obj->dev; | ||
2264 | drm_i915_private_t *dev_priv = dev->dev_private; | 2276 | drm_i915_private_t *dev_priv = dev->dev_private; |
2265 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); | 2277 | u32 size = obj->gtt_space->size; |
2266 | int regnum = obj_priv->fence_reg; | 2278 | int regnum = obj->fence_reg; |
2267 | uint64_t val; | 2279 | uint64_t val; |
2268 | 2280 | ||
2269 | val = (uint64_t)((obj_priv->gtt_offset + obj->size - 4096) & | 2281 | val = (uint64_t)((obj->gtt_offset + size - 4096) & |
2270 | 0xfffff000) << 32; | 2282 | 0xfffff000) << 32; |
2271 | val |= obj_priv->gtt_offset & 0xfffff000; | 2283 | val |= obj->gtt_offset & 0xfffff000; |
2272 | val |= (uint64_t)((obj_priv->stride / 128) - 1) << | 2284 | val |= (uint64_t)((obj->stride / 128) - 1) << |
2273 | SANDYBRIDGE_FENCE_PITCH_SHIFT; | 2285 | SANDYBRIDGE_FENCE_PITCH_SHIFT; |
2274 | 2286 | ||
2275 | if (obj_priv->tiling_mode == I915_TILING_Y) | 2287 | if (obj->tiling_mode == I915_TILING_Y) |
2276 | val |= 1 << I965_FENCE_TILING_Y_SHIFT; | 2288 | val |= 1 << I965_FENCE_TILING_Y_SHIFT; |
2277 | val |= I965_FENCE_REG_VALID; | 2289 | val |= I965_FENCE_REG_VALID; |
2278 | 2290 | ||
2279 | I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + (regnum * 8), val); | 2291 | if (pipelined) { |
2292 | int ret = intel_ring_begin(pipelined, 6); | ||
2293 | if (ret) | ||
2294 | return ret; | ||
2295 | |||
2296 | intel_ring_emit(pipelined, MI_NOOP); | ||
2297 | intel_ring_emit(pipelined, MI_LOAD_REGISTER_IMM(2)); | ||
2298 | intel_ring_emit(pipelined, FENCE_REG_SANDYBRIDGE_0 + regnum*8); | ||
2299 | intel_ring_emit(pipelined, (u32)val); | ||
2300 | intel_ring_emit(pipelined, FENCE_REG_SANDYBRIDGE_0 + regnum*8 + 4); | ||
2301 | intel_ring_emit(pipelined, (u32)(val >> 32)); | ||
2302 | intel_ring_advance(pipelined); | ||
2303 | } else | ||
2304 | I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + regnum * 8, val); | ||
2305 | |||
2306 | return 0; | ||
2280 | } | 2307 | } |
2281 | 2308 | ||
2282 | static void i965_write_fence_reg(struct drm_i915_fence_reg *reg) | 2309 | static int i965_write_fence_reg(struct drm_i915_gem_object *obj, |
2310 | struct intel_ring_buffer *pipelined) | ||
2283 | { | 2311 | { |
2284 | struct drm_gem_object *obj = reg->obj; | 2312 | struct drm_device *dev = obj->base.dev; |
2285 | struct drm_device *dev = obj->dev; | ||
2286 | drm_i915_private_t *dev_priv = dev->dev_private; | 2313 | drm_i915_private_t *dev_priv = dev->dev_private; |
2287 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); | 2314 | u32 size = obj->gtt_space->size; |
2288 | int regnum = obj_priv->fence_reg; | 2315 | int regnum = obj->fence_reg; |
2289 | uint64_t val; | 2316 | uint64_t val; |
2290 | 2317 | ||
2291 | val = (uint64_t)((obj_priv->gtt_offset + obj->size - 4096) & | 2318 | val = (uint64_t)((obj->gtt_offset + size - 4096) & |
2292 | 0xfffff000) << 32; | 2319 | 0xfffff000) << 32; |
2293 | val |= obj_priv->gtt_offset & 0xfffff000; | 2320 | val |= obj->gtt_offset & 0xfffff000; |
2294 | val |= ((obj_priv->stride / 128) - 1) << I965_FENCE_PITCH_SHIFT; | 2321 | val |= ((obj->stride / 128) - 1) << I965_FENCE_PITCH_SHIFT; |
2295 | if (obj_priv->tiling_mode == I915_TILING_Y) | 2322 | if (obj->tiling_mode == I915_TILING_Y) |
2296 | val |= 1 << I965_FENCE_TILING_Y_SHIFT; | 2323 | val |= 1 << I965_FENCE_TILING_Y_SHIFT; |
2297 | val |= I965_FENCE_REG_VALID; | 2324 | val |= I965_FENCE_REG_VALID; |
2298 | 2325 | ||
2299 | I915_WRITE64(FENCE_REG_965_0 + (regnum * 8), val); | 2326 | if (pipelined) { |
2327 | int ret = intel_ring_begin(pipelined, 6); | ||
2328 | if (ret) | ||
2329 | return ret; | ||
2330 | |||
2331 | intel_ring_emit(pipelined, MI_NOOP); | ||
2332 | intel_ring_emit(pipelined, MI_LOAD_REGISTER_IMM(2)); | ||
2333 | intel_ring_emit(pipelined, FENCE_REG_965_0 + regnum*8); | ||
2334 | intel_ring_emit(pipelined, (u32)val); | ||
2335 | intel_ring_emit(pipelined, FENCE_REG_965_0 + regnum*8 + 4); | ||
2336 | intel_ring_emit(pipelined, (u32)(val >> 32)); | ||
2337 | intel_ring_advance(pipelined); | ||
2338 | } else | ||
2339 | I915_WRITE64(FENCE_REG_965_0 + regnum * 8, val); | ||
2340 | |||
2341 | return 0; | ||
2300 | } | 2342 | } |
2301 | 2343 | ||
2302 | static void i915_write_fence_reg(struct drm_i915_fence_reg *reg) | 2344 | static int i915_write_fence_reg(struct drm_i915_gem_object *obj, |
2345 | struct intel_ring_buffer *pipelined) | ||
2303 | { | 2346 | { |
2304 | struct drm_gem_object *obj = reg->obj; | 2347 | struct drm_device *dev = obj->base.dev; |
2305 | struct drm_device *dev = obj->dev; | ||
2306 | drm_i915_private_t *dev_priv = dev->dev_private; | 2348 | drm_i915_private_t *dev_priv = dev->dev_private; |
2307 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); | 2349 | u32 size = obj->gtt_space->size; |
2308 | int regnum = obj_priv->fence_reg; | 2350 | u32 fence_reg, val, pitch_val; |
2309 | int tile_width; | 2351 | int tile_width; |
2310 | uint32_t fence_reg, val; | ||
2311 | uint32_t pitch_val; | ||
2312 | 2352 | ||
2313 | if ((obj_priv->gtt_offset & ~I915_FENCE_START_MASK) || | 2353 | if (WARN((obj->gtt_offset & ~I915_FENCE_START_MASK) || |
2314 | (obj_priv->gtt_offset & (obj->size - 1))) { | 2354 | (size & -size) != size || |
2315 | WARN(1, "%s: object 0x%08x not 1M or size (0x%zx) aligned\n", | 2355 | (obj->gtt_offset & (size - 1)), |
2316 | __func__, obj_priv->gtt_offset, obj->size); | 2356 | "object 0x%08x [fenceable? %d] not 1M or pot-size (0x%08x) aligned\n", |
2317 | return; | 2357 | obj->gtt_offset, obj->map_and_fenceable, size)) |
2318 | } | 2358 | return -EINVAL; |
2319 | 2359 | ||
2320 | if (obj_priv->tiling_mode == I915_TILING_Y && | 2360 | if (obj->tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev)) |
2321 | HAS_128_BYTE_Y_TILING(dev)) | ||
2322 | tile_width = 128; | 2361 | tile_width = 128; |
2323 | else | 2362 | else |
2324 | tile_width = 512; | 2363 | tile_width = 512; |
2325 | 2364 | ||
2326 | /* Note: pitch better be a power of two tile widths */ | 2365 | /* Note: pitch better be a power of two tile widths */ |
2327 | pitch_val = obj_priv->stride / tile_width; | 2366 | pitch_val = obj->stride / tile_width; |
2328 | pitch_val = ffs(pitch_val) - 1; | 2367 | pitch_val = ffs(pitch_val) - 1; |
2329 | 2368 | ||
2330 | if (obj_priv->tiling_mode == I915_TILING_Y && | 2369 | val = obj->gtt_offset; |
2331 | HAS_128_BYTE_Y_TILING(dev)) | 2370 | if (obj->tiling_mode == I915_TILING_Y) |
2332 | WARN_ON(pitch_val > I830_FENCE_MAX_PITCH_VAL); | ||
2333 | else | ||
2334 | WARN_ON(pitch_val > I915_FENCE_MAX_PITCH_VAL); | ||
2335 | |||
2336 | val = obj_priv->gtt_offset; | ||
2337 | if (obj_priv->tiling_mode == I915_TILING_Y) | ||
2338 | val |= 1 << I830_FENCE_TILING_Y_SHIFT; | 2371 | val |= 1 << I830_FENCE_TILING_Y_SHIFT; |
2339 | val |= I915_FENCE_SIZE_BITS(obj->size); | 2372 | val |= I915_FENCE_SIZE_BITS(size); |
2340 | val |= pitch_val << I830_FENCE_PITCH_SHIFT; | 2373 | val |= pitch_val << I830_FENCE_PITCH_SHIFT; |
2341 | val |= I830_FENCE_REG_VALID; | 2374 | val |= I830_FENCE_REG_VALID; |
2342 | 2375 | ||
2343 | if (regnum < 8) | 2376 | fence_reg = obj->fence_reg; |
2344 | fence_reg = FENCE_REG_830_0 + (regnum * 4); | 2377 | if (fence_reg < 8) |
2378 | fence_reg = FENCE_REG_830_0 + fence_reg * 4; | ||
2345 | else | 2379 | else |
2346 | fence_reg = FENCE_REG_945_8 + ((regnum - 8) * 4); | 2380 | fence_reg = FENCE_REG_945_8 + (fence_reg - 8) * 4; |
2347 | I915_WRITE(fence_reg, val); | 2381 | |
2382 | if (pipelined) { | ||
2383 | int ret = intel_ring_begin(pipelined, 4); | ||
2384 | if (ret) | ||
2385 | return ret; | ||
2386 | |||
2387 | intel_ring_emit(pipelined, MI_NOOP); | ||
2388 | intel_ring_emit(pipelined, MI_LOAD_REGISTER_IMM(1)); | ||
2389 | intel_ring_emit(pipelined, fence_reg); | ||
2390 | intel_ring_emit(pipelined, val); | ||
2391 | intel_ring_advance(pipelined); | ||
2392 | } else | ||
2393 | I915_WRITE(fence_reg, val); | ||
2394 | |||
2395 | return 0; | ||
2348 | } | 2396 | } |
2349 | 2397 | ||
2350 | static void i830_write_fence_reg(struct drm_i915_fence_reg *reg) | 2398 | static int i830_write_fence_reg(struct drm_i915_gem_object *obj, |
2399 | struct intel_ring_buffer *pipelined) | ||
2351 | { | 2400 | { |
2352 | struct drm_gem_object *obj = reg->obj; | 2401 | struct drm_device *dev = obj->base.dev; |
2353 | struct drm_device *dev = obj->dev; | ||
2354 | drm_i915_private_t *dev_priv = dev->dev_private; | 2402 | drm_i915_private_t *dev_priv = dev->dev_private; |
2355 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); | 2403 | u32 size = obj->gtt_space->size; |
2356 | int regnum = obj_priv->fence_reg; | 2404 | int regnum = obj->fence_reg; |
2357 | uint32_t val; | 2405 | uint32_t val; |
2358 | uint32_t pitch_val; | 2406 | uint32_t pitch_val; |
2359 | uint32_t fence_size_bits; | ||
2360 | 2407 | ||
2361 | if ((obj_priv->gtt_offset & ~I830_FENCE_START_MASK) || | 2408 | if (WARN((obj->gtt_offset & ~I830_FENCE_START_MASK) || |
2362 | (obj_priv->gtt_offset & (obj->size - 1))) { | 2409 | (size & -size) != size || |
2363 | WARN(1, "%s: object 0x%08x not 512K or size aligned\n", | 2410 | (obj->gtt_offset & (size - 1)), |
2364 | __func__, obj_priv->gtt_offset); | 2411 | "object 0x%08x not 512K or pot-size 0x%08x aligned\n", |
2365 | return; | 2412 | obj->gtt_offset, size)) |
2366 | } | 2413 | return -EINVAL; |
2367 | 2414 | ||
2368 | pitch_val = obj_priv->stride / 128; | 2415 | pitch_val = obj->stride / 128; |
2369 | pitch_val = ffs(pitch_val) - 1; | 2416 | pitch_val = ffs(pitch_val) - 1; |
2370 | WARN_ON(pitch_val > I830_FENCE_MAX_PITCH_VAL); | ||
2371 | 2417 | ||
2372 | val = obj_priv->gtt_offset; | 2418 | val = obj->gtt_offset; |
2373 | if (obj_priv->tiling_mode == I915_TILING_Y) | 2419 | if (obj->tiling_mode == I915_TILING_Y) |
2374 | val |= 1 << I830_FENCE_TILING_Y_SHIFT; | 2420 | val |= 1 << I830_FENCE_TILING_Y_SHIFT; |
2375 | fence_size_bits = I830_FENCE_SIZE_BITS(obj->size); | 2421 | val |= I830_FENCE_SIZE_BITS(size); |
2376 | WARN_ON(fence_size_bits & ~0x00000f00); | ||
2377 | val |= fence_size_bits; | ||
2378 | val |= pitch_val << I830_FENCE_PITCH_SHIFT; | 2422 | val |= pitch_val << I830_FENCE_PITCH_SHIFT; |
2379 | val |= I830_FENCE_REG_VALID; | 2423 | val |= I830_FENCE_REG_VALID; |
2380 | 2424 | ||
2381 | I915_WRITE(FENCE_REG_830_0 + (regnum * 4), val); | 2425 | if (pipelined) { |
2426 | int ret = intel_ring_begin(pipelined, 4); | ||
2427 | if (ret) | ||
2428 | return ret; | ||
2429 | |||
2430 | intel_ring_emit(pipelined, MI_NOOP); | ||
2431 | intel_ring_emit(pipelined, MI_LOAD_REGISTER_IMM(1)); | ||
2432 | intel_ring_emit(pipelined, FENCE_REG_830_0 + regnum*4); | ||
2433 | intel_ring_emit(pipelined, val); | ||
2434 | intel_ring_advance(pipelined); | ||
2435 | } else | ||
2436 | I915_WRITE(FENCE_REG_830_0 + regnum * 4, val); | ||
2437 | |||
2438 | return 0; | ||
2382 | } | 2439 | } |
2383 | 2440 | ||
2384 | static int i915_find_fence_reg(struct drm_device *dev, | 2441 | static bool ring_passed_seqno(struct intel_ring_buffer *ring, u32 seqno) |
2385 | bool interruptible) | 2442 | { |
2443 | return i915_seqno_passed(ring->get_seqno(ring), seqno); | ||
2444 | } | ||
2445 | |||
2446 | static int | ||
2447 | i915_gem_object_flush_fence(struct drm_i915_gem_object *obj, | ||
2448 | struct intel_ring_buffer *pipelined, | ||
2449 | bool interruptible) | ||
2450 | { | ||
2451 | int ret; | ||
2452 | |||
2453 | if (obj->fenced_gpu_access) { | ||
2454 | if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) { | ||
2455 | ret = i915_gem_flush_ring(obj->base.dev, | ||
2456 | obj->last_fenced_ring, | ||
2457 | 0, obj->base.write_domain); | ||
2458 | if (ret) | ||
2459 | return ret; | ||
2460 | } | ||
2461 | |||
2462 | obj->fenced_gpu_access = false; | ||
2463 | } | ||
2464 | |||
2465 | if (obj->last_fenced_seqno && pipelined != obj->last_fenced_ring) { | ||
2466 | if (!ring_passed_seqno(obj->last_fenced_ring, | ||
2467 | obj->last_fenced_seqno)) { | ||
2468 | ret = i915_do_wait_request(obj->base.dev, | ||
2469 | obj->last_fenced_seqno, | ||
2470 | interruptible, | ||
2471 | obj->last_fenced_ring); | ||
2472 | if (ret) | ||
2473 | return ret; | ||
2474 | } | ||
2475 | |||
2476 | obj->last_fenced_seqno = 0; | ||
2477 | obj->last_fenced_ring = NULL; | ||
2478 | } | ||
2479 | |||
2480 | /* Ensure that all CPU reads are completed before installing a fence | ||
2481 | * and all writes before removing the fence. | ||
2482 | */ | ||
2483 | if (obj->base.read_domains & I915_GEM_DOMAIN_GTT) | ||
2484 | mb(); | ||
2485 | |||
2486 | return 0; | ||
2487 | } | ||
2488 | |||
2489 | int | ||
2490 | i915_gem_object_put_fence(struct drm_i915_gem_object *obj) | ||
2491 | { | ||
2492 | int ret; | ||
2493 | |||
2494 | if (obj->tiling_mode) | ||
2495 | i915_gem_release_mmap(obj); | ||
2496 | |||
2497 | ret = i915_gem_object_flush_fence(obj, NULL, true); | ||
2498 | if (ret) | ||
2499 | return ret; | ||
2500 | |||
2501 | if (obj->fence_reg != I915_FENCE_REG_NONE) { | ||
2502 | struct drm_i915_private *dev_priv = obj->base.dev->dev_private; | ||
2503 | i915_gem_clear_fence_reg(obj->base.dev, | ||
2504 | &dev_priv->fence_regs[obj->fence_reg]); | ||
2505 | |||
2506 | obj->fence_reg = I915_FENCE_REG_NONE; | ||
2507 | } | ||
2508 | |||
2509 | return 0; | ||
2510 | } | ||
2511 | |||
2512 | static struct drm_i915_fence_reg * | ||
2513 | i915_find_fence_reg(struct drm_device *dev, | ||
2514 | struct intel_ring_buffer *pipelined) | ||
2386 | { | 2515 | { |
2387 | struct drm_i915_fence_reg *reg = NULL; | ||
2388 | struct drm_i915_gem_object *obj_priv = NULL; | ||
2389 | struct drm_i915_private *dev_priv = dev->dev_private; | 2516 | struct drm_i915_private *dev_priv = dev->dev_private; |
2390 | struct drm_gem_object *obj = NULL; | 2517 | struct drm_i915_fence_reg *reg, *first, *avail; |
2391 | int i, avail, ret; | 2518 | int i; |
2392 | 2519 | ||
2393 | /* First try to find a free reg */ | 2520 | /* First try to find a free reg */ |
2394 | avail = 0; | 2521 | avail = NULL; |
2395 | for (i = dev_priv->fence_reg_start; i < dev_priv->num_fence_regs; i++) { | 2522 | for (i = dev_priv->fence_reg_start; i < dev_priv->num_fence_regs; i++) { |
2396 | reg = &dev_priv->fence_regs[i]; | 2523 | reg = &dev_priv->fence_regs[i]; |
2397 | if (!reg->obj) | 2524 | if (!reg->obj) |
2398 | return i; | 2525 | return reg; |
2399 | 2526 | ||
2400 | obj_priv = to_intel_bo(reg->obj); | 2527 | if (!reg->obj->pin_count) |
2401 | if (!obj_priv->pin_count) | 2528 | avail = reg; |
2402 | avail++; | ||
2403 | } | 2529 | } |
2404 | 2530 | ||
2405 | if (avail == 0) | 2531 | if (avail == NULL) |
2406 | return -ENOSPC; | 2532 | return NULL; |
2407 | 2533 | ||
2408 | /* None available, try to steal one or wait for a user to finish */ | 2534 | /* None available, try to steal one or wait for a user to finish */ |
2409 | i = I915_FENCE_REG_NONE; | 2535 | avail = first = NULL; |
2410 | list_for_each_entry(reg, &dev_priv->mm.fence_list, | 2536 | list_for_each_entry(reg, &dev_priv->mm.fence_list, lru_list) { |
2411 | lru_list) { | 2537 | if (reg->obj->pin_count) |
2412 | obj = reg->obj; | ||
2413 | obj_priv = to_intel_bo(obj); | ||
2414 | |||
2415 | if (obj_priv->pin_count) | ||
2416 | continue; | 2538 | continue; |
2417 | 2539 | ||
2418 | /* found one! */ | 2540 | if (first == NULL) |
2419 | i = obj_priv->fence_reg; | 2541 | first = reg; |
2420 | break; | ||
2421 | } | ||
2422 | 2542 | ||
2423 | BUG_ON(i == I915_FENCE_REG_NONE); | 2543 | if (!pipelined || |
2544 | !reg->obj->last_fenced_ring || | ||
2545 | reg->obj->last_fenced_ring == pipelined) { | ||
2546 | avail = reg; | ||
2547 | break; | ||
2548 | } | ||
2549 | } | ||
2424 | 2550 | ||
2425 | /* We only have a reference on obj from the active list. put_fence_reg | 2551 | if (avail == NULL) |
2426 | * might drop that one, causing a use-after-free in it. So hold a | 2552 | avail = first; |
2427 | * private reference to obj like the other callers of put_fence_reg | ||
2428 | * (set_tiling ioctl) do. */ | ||
2429 | drm_gem_object_reference(obj); | ||
2430 | ret = i915_gem_object_put_fence_reg(obj, interruptible); | ||
2431 | drm_gem_object_unreference(obj); | ||
2432 | if (ret != 0) | ||
2433 | return ret; | ||
2434 | 2553 | ||
2435 | return i; | 2554 | return avail; |
2436 | } | 2555 | } |
2437 | 2556 | ||
2438 | /** | 2557 | /** |
2439 | * i915_gem_object_get_fence_reg - set up a fence reg for an object | 2558 | * i915_gem_object_get_fence - set up a fence reg for an object |
2440 | * @obj: object to map through a fence reg | 2559 | * @obj: object to map through a fence reg |
2560 | * @pipelined: ring on which to queue the change, or NULL for CPU access | ||
2561 | * @interruptible: must we wait uninterruptibly for the register to retire? | ||
2441 | * | 2562 | * |
2442 | * When mapping objects through the GTT, userspace wants to be able to write | 2563 | * When mapping objects through the GTT, userspace wants to be able to write |
2443 | * to them without having to worry about swizzling if the object is tiled. | 2564 | * to them without having to worry about swizzling if the object is tiled. |
@@ -2449,72 +2570,141 @@ static int i915_find_fence_reg(struct drm_device *dev, | |||
2449 | * and tiling format. | 2570 | * and tiling format. |
2450 | */ | 2571 | */ |
2451 | int | 2572 | int |
2452 | i915_gem_object_get_fence_reg(struct drm_gem_object *obj, | 2573 | i915_gem_object_get_fence(struct drm_i915_gem_object *obj, |
2453 | bool interruptible) | 2574 | struct intel_ring_buffer *pipelined, |
2575 | bool interruptible) | ||
2454 | { | 2576 | { |
2455 | struct drm_device *dev = obj->dev; | 2577 | struct drm_device *dev = obj->base.dev; |
2456 | struct drm_i915_private *dev_priv = dev->dev_private; | 2578 | struct drm_i915_private *dev_priv = dev->dev_private; |
2457 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); | 2579 | struct drm_i915_fence_reg *reg; |
2458 | struct drm_i915_fence_reg *reg = NULL; | ||
2459 | int ret; | 2580 | int ret; |
2460 | 2581 | ||
2461 | /* Just update our place in the LRU if our fence is getting used. */ | 2582 | /* XXX disable pipelining. There are bugs. Shocking. */ |
2462 | if (obj_priv->fence_reg != I915_FENCE_REG_NONE) { | 2583 | pipelined = NULL; |
2463 | reg = &dev_priv->fence_regs[obj_priv->fence_reg]; | 2584 | |
2585 | /* Just update our place in the LRU if our fence is getting reused. */ | ||
2586 | if (obj->fence_reg != I915_FENCE_REG_NONE) { | ||
2587 | reg = &dev_priv->fence_regs[obj->fence_reg]; | ||
2464 | list_move_tail(®->lru_list, &dev_priv->mm.fence_list); | 2588 | list_move_tail(®->lru_list, &dev_priv->mm.fence_list); |
2589 | |||
2590 | if (!obj->fenced_gpu_access && !obj->last_fenced_seqno) | ||
2591 | pipelined = NULL; | ||
2592 | |||
2593 | if (!pipelined) { | ||
2594 | if (reg->setup_seqno) { | ||
2595 | if (!ring_passed_seqno(obj->last_fenced_ring, | ||
2596 | reg->setup_seqno)) { | ||
2597 | ret = i915_do_wait_request(obj->base.dev, | ||
2598 | reg->setup_seqno, | ||
2599 | interruptible, | ||
2600 | obj->last_fenced_ring); | ||
2601 | if (ret) | ||
2602 | return ret; | ||
2603 | } | ||
2604 | |||
2605 | reg->setup_seqno = 0; | ||
2606 | } | ||
2607 | } else if (obj->last_fenced_ring && | ||
2608 | obj->last_fenced_ring != pipelined) { | ||
2609 | ret = i915_gem_object_flush_fence(obj, | ||
2610 | pipelined, | ||
2611 | interruptible); | ||
2612 | if (ret) | ||
2613 | return ret; | ||
2614 | } else if (obj->tiling_changed) { | ||
2615 | if (obj->fenced_gpu_access) { | ||
2616 | if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) { | ||
2617 | ret = i915_gem_flush_ring(obj->base.dev, obj->ring, | ||
2618 | 0, obj->base.write_domain); | ||
2619 | if (ret) | ||
2620 | return ret; | ||
2621 | } | ||
2622 | |||
2623 | obj->fenced_gpu_access = false; | ||
2624 | } | ||
2625 | } | ||
2626 | |||
2627 | if (!obj->fenced_gpu_access && !obj->last_fenced_seqno) | ||
2628 | pipelined = NULL; | ||
2629 | BUG_ON(!pipelined && reg->setup_seqno); | ||
2630 | |||
2631 | if (obj->tiling_changed) { | ||
2632 | if (pipelined) { | ||
2633 | reg->setup_seqno = | ||
2634 | i915_gem_next_request_seqno(dev, pipelined); | ||
2635 | obj->last_fenced_seqno = reg->setup_seqno; | ||
2636 | obj->last_fenced_ring = pipelined; | ||
2637 | } | ||
2638 | goto update; | ||
2639 | } | ||
2640 | |||
2465 | return 0; | 2641 | return 0; |
2466 | } | 2642 | } |
2467 | 2643 | ||
2468 | switch (obj_priv->tiling_mode) { | 2644 | reg = i915_find_fence_reg(dev, pipelined); |
2469 | case I915_TILING_NONE: | 2645 | if (reg == NULL) |
2470 | WARN(1, "allocating a fence for non-tiled object?\n"); | 2646 | return -ENOSPC; |
2471 | break; | ||
2472 | case I915_TILING_X: | ||
2473 | if (!obj_priv->stride) | ||
2474 | return -EINVAL; | ||
2475 | WARN((obj_priv->stride & (512 - 1)), | ||
2476 | "object 0x%08x is X tiled but has non-512B pitch\n", | ||
2477 | obj_priv->gtt_offset); | ||
2478 | break; | ||
2479 | case I915_TILING_Y: | ||
2480 | if (!obj_priv->stride) | ||
2481 | return -EINVAL; | ||
2482 | WARN((obj_priv->stride & (128 - 1)), | ||
2483 | "object 0x%08x is Y tiled but has non-128B pitch\n", | ||
2484 | obj_priv->gtt_offset); | ||
2485 | break; | ||
2486 | } | ||
2487 | 2647 | ||
2488 | ret = i915_find_fence_reg(dev, interruptible); | 2648 | ret = i915_gem_object_flush_fence(obj, pipelined, interruptible); |
2489 | if (ret < 0) | 2649 | if (ret) |
2490 | return ret; | 2650 | return ret; |
2491 | 2651 | ||
2492 | obj_priv->fence_reg = ret; | 2652 | if (reg->obj) { |
2493 | reg = &dev_priv->fence_regs[obj_priv->fence_reg]; | 2653 | struct drm_i915_gem_object *old = reg->obj; |
2494 | list_add_tail(®->lru_list, &dev_priv->mm.fence_list); | 2654 | |
2655 | drm_gem_object_reference(&old->base); | ||
2656 | |||
2657 | if (old->tiling_mode) | ||
2658 | i915_gem_release_mmap(old); | ||
2659 | |||
2660 | ret = i915_gem_object_flush_fence(old, | ||
2661 | pipelined, | ||
2662 | interruptible); | ||
2663 | if (ret) { | ||
2664 | drm_gem_object_unreference(&old->base); | ||
2665 | return ret; | ||
2666 | } | ||
2667 | |||
2668 | if (old->last_fenced_seqno == 0 && obj->last_fenced_seqno == 0) | ||
2669 | pipelined = NULL; | ||
2670 | |||
2671 | old->fence_reg = I915_FENCE_REG_NONE; | ||
2672 | old->last_fenced_ring = pipelined; | ||
2673 | old->last_fenced_seqno = | ||
2674 | pipelined ? i915_gem_next_request_seqno(dev, pipelined) : 0; | ||
2675 | |||
2676 | drm_gem_object_unreference(&old->base); | ||
2677 | } else if (obj->last_fenced_seqno == 0) | ||
2678 | pipelined = NULL; | ||
2495 | 2679 | ||
2496 | reg->obj = obj; | 2680 | reg->obj = obj; |
2681 | list_move_tail(®->lru_list, &dev_priv->mm.fence_list); | ||
2682 | obj->fence_reg = reg - dev_priv->fence_regs; | ||
2683 | obj->last_fenced_ring = pipelined; | ||
2497 | 2684 | ||
2685 | reg->setup_seqno = | ||
2686 | pipelined ? i915_gem_next_request_seqno(dev, pipelined) : 0; | ||
2687 | obj->last_fenced_seqno = reg->setup_seqno; | ||
2688 | |||
2689 | update: | ||
2690 | obj->tiling_changed = false; | ||
2498 | switch (INTEL_INFO(dev)->gen) { | 2691 | switch (INTEL_INFO(dev)->gen) { |
2499 | case 6: | 2692 | case 6: |
2500 | sandybridge_write_fence_reg(reg); | 2693 | ret = sandybridge_write_fence_reg(obj, pipelined); |
2501 | break; | 2694 | break; |
2502 | case 5: | 2695 | case 5: |
2503 | case 4: | 2696 | case 4: |
2504 | i965_write_fence_reg(reg); | 2697 | ret = i965_write_fence_reg(obj, pipelined); |
2505 | break; | 2698 | break; |
2506 | case 3: | 2699 | case 3: |
2507 | i915_write_fence_reg(reg); | 2700 | ret = i915_write_fence_reg(obj, pipelined); |
2508 | break; | 2701 | break; |
2509 | case 2: | 2702 | case 2: |
2510 | i830_write_fence_reg(reg); | 2703 | ret = i830_write_fence_reg(obj, pipelined); |
2511 | break; | 2704 | break; |
2512 | } | 2705 | } |
2513 | 2706 | ||
2514 | trace_i915_gem_object_get_fence(obj, obj_priv->fence_reg, | 2707 | return ret; |
2515 | obj_priv->tiling_mode); | ||
2516 | |||
2517 | return 0; | ||
2518 | } | 2708 | } |
2519 | 2709 | ||
2520 | /** | 2710 | /** |
@@ -2522,154 +2712,125 @@ i915_gem_object_get_fence_reg(struct drm_gem_object *obj, | |||
2522 | * @obj: object to clear | 2712 | * @obj: object to clear |
2523 | * | 2713 | * |
2524 | * Zeroes out the fence register itself and clears out the associated | 2714 | * Zeroes out the fence register itself and clears out the associated |
2525 | * data structures in dev_priv and obj_priv. | 2715 | * data structures in dev_priv and obj. |
2526 | */ | 2716 | */ |
2527 | static void | 2717 | static void |
2528 | i915_gem_clear_fence_reg(struct drm_gem_object *obj) | 2718 | i915_gem_clear_fence_reg(struct drm_device *dev, |
2719 | struct drm_i915_fence_reg *reg) | ||
2529 | { | 2720 | { |
2530 | struct drm_device *dev = obj->dev; | ||
2531 | drm_i915_private_t *dev_priv = dev->dev_private; | 2721 | drm_i915_private_t *dev_priv = dev->dev_private; |
2532 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); | 2722 | uint32_t fence_reg = reg - dev_priv->fence_regs; |
2533 | struct drm_i915_fence_reg *reg = | ||
2534 | &dev_priv->fence_regs[obj_priv->fence_reg]; | ||
2535 | uint32_t fence_reg; | ||
2536 | 2723 | ||
2537 | switch (INTEL_INFO(dev)->gen) { | 2724 | switch (INTEL_INFO(dev)->gen) { |
2538 | case 6: | 2725 | case 6: |
2539 | I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + | 2726 | I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + fence_reg*8, 0); |
2540 | (obj_priv->fence_reg * 8), 0); | ||
2541 | break; | 2727 | break; |
2542 | case 5: | 2728 | case 5: |
2543 | case 4: | 2729 | case 4: |
2544 | I915_WRITE64(FENCE_REG_965_0 + (obj_priv->fence_reg * 8), 0); | 2730 | I915_WRITE64(FENCE_REG_965_0 + fence_reg*8, 0); |
2545 | break; | 2731 | break; |
2546 | case 3: | 2732 | case 3: |
2547 | if (obj_priv->fence_reg >= 8) | 2733 | if (fence_reg >= 8) |
2548 | fence_reg = FENCE_REG_945_8 + (obj_priv->fence_reg - 8) * 4; | 2734 | fence_reg = FENCE_REG_945_8 + (fence_reg - 8) * 4; |
2549 | else | 2735 | else |
2550 | case 2: | 2736 | case 2: |
2551 | fence_reg = FENCE_REG_830_0 + obj_priv->fence_reg * 4; | 2737 | fence_reg = FENCE_REG_830_0 + fence_reg * 4; |
2552 | 2738 | ||
2553 | I915_WRITE(fence_reg, 0); | 2739 | I915_WRITE(fence_reg, 0); |
2554 | break; | 2740 | break; |
2555 | } | 2741 | } |
2556 | 2742 | ||
2557 | reg->obj = NULL; | ||
2558 | obj_priv->fence_reg = I915_FENCE_REG_NONE; | ||
2559 | list_del_init(®->lru_list); | 2743 | list_del_init(®->lru_list); |
2560 | } | 2744 | reg->obj = NULL; |
2561 | 2745 | reg->setup_seqno = 0; | |
2562 | /** | ||
2563 | * i915_gem_object_put_fence_reg - waits on outstanding fenced access | ||
2564 | * to the buffer to finish, and then resets the fence register. | ||
2565 | * @obj: tiled object holding a fence register. | ||
2566 | * @bool: whether the wait upon the fence is interruptible | ||
2567 | * | ||
2568 | * Zeroes out the fence register itself and clears out the associated | ||
2569 | * data structures in dev_priv and obj_priv. | ||
2570 | */ | ||
2571 | int | ||
2572 | i915_gem_object_put_fence_reg(struct drm_gem_object *obj, | ||
2573 | bool interruptible) | ||
2574 | { | ||
2575 | struct drm_device *dev = obj->dev; | ||
2576 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
2577 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); | ||
2578 | struct drm_i915_fence_reg *reg; | ||
2579 | |||
2580 | if (obj_priv->fence_reg == I915_FENCE_REG_NONE) | ||
2581 | return 0; | ||
2582 | |||
2583 | /* If we've changed tiling, GTT-mappings of the object | ||
2584 | * need to re-fault to ensure that the correct fence register | ||
2585 | * setup is in place. | ||
2586 | */ | ||
2587 | i915_gem_release_mmap(obj); | ||
2588 | |||
2589 | /* On the i915, GPU access to tiled buffers is via a fence, | ||
2590 | * therefore we must wait for any outstanding access to complete | ||
2591 | * before clearing the fence. | ||
2592 | */ | ||
2593 | reg = &dev_priv->fence_regs[obj_priv->fence_reg]; | ||
2594 | if (reg->gpu) { | ||
2595 | int ret; | ||
2596 | |||
2597 | ret = i915_gem_object_flush_gpu_write_domain(obj, true); | ||
2598 | if (ret) | ||
2599 | return ret; | ||
2600 | |||
2601 | ret = i915_gem_object_wait_rendering(obj, interruptible); | ||
2602 | if (ret) | ||
2603 | return ret; | ||
2604 | |||
2605 | reg->gpu = false; | ||
2606 | } | ||
2607 | |||
2608 | i915_gem_object_flush_gtt_write_domain(obj); | ||
2609 | i915_gem_clear_fence_reg(obj); | ||
2610 | |||
2611 | return 0; | ||
2612 | } | 2746 | } |
2613 | 2747 | ||
2614 | /** | 2748 | /** |
2615 | * Finds free space in the GTT aperture and binds the object there. | 2749 | * Finds free space in the GTT aperture and binds the object there. |
2616 | */ | 2750 | */ |
2617 | static int | 2751 | static int |
2618 | i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment) | 2752 | i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj, |
2753 | unsigned alignment, | ||
2754 | bool map_and_fenceable) | ||
2619 | { | 2755 | { |
2620 | struct drm_device *dev = obj->dev; | 2756 | struct drm_device *dev = obj->base.dev; |
2621 | drm_i915_private_t *dev_priv = dev->dev_private; | 2757 | drm_i915_private_t *dev_priv = dev->dev_private; |
2622 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); | ||
2623 | struct drm_mm_node *free_space; | 2758 | struct drm_mm_node *free_space; |
2624 | gfp_t gfpmask = __GFP_NORETRY | __GFP_NOWARN; | 2759 | gfp_t gfpmask = __GFP_NORETRY | __GFP_NOWARN; |
2760 | u32 size, fence_size, fence_alignment, unfenced_alignment; | ||
2761 | bool mappable, fenceable; | ||
2625 | int ret; | 2762 | int ret; |
2626 | 2763 | ||
2627 | if (obj_priv->madv != I915_MADV_WILLNEED) { | 2764 | if (obj->madv != I915_MADV_WILLNEED) { |
2628 | DRM_ERROR("Attempting to bind a purgeable object\n"); | 2765 | DRM_ERROR("Attempting to bind a purgeable object\n"); |
2629 | return -EINVAL; | 2766 | return -EINVAL; |
2630 | } | 2767 | } |
2631 | 2768 | ||
2769 | fence_size = i915_gem_get_gtt_size(obj); | ||
2770 | fence_alignment = i915_gem_get_gtt_alignment(obj); | ||
2771 | unfenced_alignment = i915_gem_get_unfenced_gtt_alignment(obj); | ||
2772 | |||
2632 | if (alignment == 0) | 2773 | if (alignment == 0) |
2633 | alignment = i915_gem_get_gtt_alignment(obj); | 2774 | alignment = map_and_fenceable ? fence_alignment : |
2634 | if (alignment & (i915_gem_get_gtt_alignment(obj) - 1)) { | 2775 | unfenced_alignment; |
2776 | if (map_and_fenceable && alignment & (fence_alignment - 1)) { | ||
2635 | DRM_ERROR("Invalid object alignment requested %u\n", alignment); | 2777 | DRM_ERROR("Invalid object alignment requested %u\n", alignment); |
2636 | return -EINVAL; | 2778 | return -EINVAL; |
2637 | } | 2779 | } |
2638 | 2780 | ||
2781 | size = map_and_fenceable ? fence_size : obj->base.size; | ||
2782 | |||
2639 | /* If the object is bigger than the entire aperture, reject it early | 2783 | /* If the object is bigger than the entire aperture, reject it early |
2640 | * before evicting everything in a vain attempt to find space. | 2784 | * before evicting everything in a vain attempt to find space. |
2641 | */ | 2785 | */ |
2642 | if (obj->size > dev_priv->mm.gtt_total) { | 2786 | if (obj->base.size > |
2787 | (map_and_fenceable ? dev_priv->mm.gtt_mappable_end : dev_priv->mm.gtt_total)) { | ||
2643 | DRM_ERROR("Attempting to bind an object larger than the aperture\n"); | 2788 | DRM_ERROR("Attempting to bind an object larger than the aperture\n"); |
2644 | return -E2BIG; | 2789 | return -E2BIG; |
2645 | } | 2790 | } |
2646 | 2791 | ||
2647 | search_free: | 2792 | search_free: |
2648 | free_space = drm_mm_search_free(&dev_priv->mm.gtt_space, | 2793 | if (map_and_fenceable) |
2649 | obj->size, alignment, 0); | 2794 | free_space = |
2650 | if (free_space != NULL) | 2795 | drm_mm_search_free_in_range(&dev_priv->mm.gtt_space, |
2651 | obj_priv->gtt_space = drm_mm_get_block(free_space, obj->size, | 2796 | size, alignment, 0, |
2652 | alignment); | 2797 | dev_priv->mm.gtt_mappable_end, |
2653 | if (obj_priv->gtt_space == NULL) { | 2798 | 0); |
2799 | else | ||
2800 | free_space = drm_mm_search_free(&dev_priv->mm.gtt_space, | ||
2801 | size, alignment, 0); | ||
2802 | |||
2803 | if (free_space != NULL) { | ||
2804 | if (map_and_fenceable) | ||
2805 | obj->gtt_space = | ||
2806 | drm_mm_get_block_range_generic(free_space, | ||
2807 | size, alignment, 0, | ||
2808 | dev_priv->mm.gtt_mappable_end, | ||
2809 | 0); | ||
2810 | else | ||
2811 | obj->gtt_space = | ||
2812 | drm_mm_get_block(free_space, size, alignment); | ||
2813 | } | ||
2814 | if (obj->gtt_space == NULL) { | ||
2654 | /* If the gtt is empty and we're still having trouble | 2815 | /* If the gtt is empty and we're still having trouble |
2655 | * fitting our object in, we're out of memory. | 2816 | * fitting our object in, we're out of memory. |
2656 | */ | 2817 | */ |
2657 | ret = i915_gem_evict_something(dev, obj->size, alignment); | 2818 | ret = i915_gem_evict_something(dev, size, alignment, |
2819 | map_and_fenceable); | ||
2658 | if (ret) | 2820 | if (ret) |
2659 | return ret; | 2821 | return ret; |
2660 | 2822 | ||
2661 | goto search_free; | 2823 | goto search_free; |
2662 | } | 2824 | } |
2663 | 2825 | ||
2664 | ret = i915_gem_object_get_pages(obj, gfpmask); | 2826 | ret = i915_gem_object_get_pages_gtt(obj, gfpmask); |
2665 | if (ret) { | 2827 | if (ret) { |
2666 | drm_mm_put_block(obj_priv->gtt_space); | 2828 | drm_mm_put_block(obj->gtt_space); |
2667 | obj_priv->gtt_space = NULL; | 2829 | obj->gtt_space = NULL; |
2668 | 2830 | ||
2669 | if (ret == -ENOMEM) { | 2831 | if (ret == -ENOMEM) { |
2670 | /* first try to clear up some space from the GTT */ | 2832 | /* first try to reclaim some memory by clearing the GTT */ |
2671 | ret = i915_gem_evict_something(dev, obj->size, | 2833 | ret = i915_gem_evict_everything(dev, false); |
2672 | alignment); | ||
2673 | if (ret) { | 2834 | if (ret) { |
2674 | /* now try to shrink everyone else */ | 2835 | /* now try to shrink everyone else */ |
2675 | if (gfpmask) { | 2836 | if (gfpmask) { |
@@ -2677,7 +2838,7 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment) | |||
2677 | goto search_free; | 2838 | goto search_free; |
2678 | } | 2839 | } |
2679 | 2840 | ||
2680 | return ret; | 2841 | return -ENOMEM; |
2681 | } | 2842 | } |
2682 | 2843 | ||
2683 | goto search_free; | 2844 | goto search_free; |
@@ -2686,126 +2847,116 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment) | |||
2686 | return ret; | 2847 | return ret; |
2687 | } | 2848 | } |
2688 | 2849 | ||
2689 | /* Create an AGP memory structure pointing at our pages, and bind it | 2850 | ret = i915_gem_gtt_bind_object(obj); |
2690 | * into the GTT. | 2851 | if (ret) { |
2691 | */ | 2852 | i915_gem_object_put_pages_gtt(obj); |
2692 | obj_priv->agp_mem = drm_agp_bind_pages(dev, | 2853 | drm_mm_put_block(obj->gtt_space); |
2693 | obj_priv->pages, | 2854 | obj->gtt_space = NULL; |
2694 | obj->size >> PAGE_SHIFT, | 2855 | |
2695 | obj_priv->gtt_space->start, | 2856 | if (i915_gem_evict_everything(dev, false)) |
2696 | obj_priv->agp_type); | ||
2697 | if (obj_priv->agp_mem == NULL) { | ||
2698 | i915_gem_object_put_pages(obj); | ||
2699 | drm_mm_put_block(obj_priv->gtt_space); | ||
2700 | obj_priv->gtt_space = NULL; | ||
2701 | |||
2702 | ret = i915_gem_evict_something(dev, obj->size, alignment); | ||
2703 | if (ret) | ||
2704 | return ret; | 2857 | return ret; |
2705 | 2858 | ||
2706 | goto search_free; | 2859 | goto search_free; |
2707 | } | 2860 | } |
2708 | 2861 | ||
2709 | /* keep track of bounds object by adding it to the inactive list */ | 2862 | list_add_tail(&obj->gtt_list, &dev_priv->mm.gtt_list); |
2710 | list_add_tail(&obj_priv->mm_list, &dev_priv->mm.inactive_list); | 2863 | list_add_tail(&obj->mm_list, &dev_priv->mm.inactive_list); |
2711 | i915_gem_info_add_gtt(dev_priv, obj->size); | ||
2712 | 2864 | ||
2713 | /* Assert that the object is not currently in any GPU domain. As it | 2865 | /* Assert that the object is not currently in any GPU domain. As it |
2714 | * wasn't in the GTT, there shouldn't be any way it could have been in | 2866 | * wasn't in the GTT, there shouldn't be any way it could have been in |
2715 | * a GPU cache | 2867 | * a GPU cache |
2716 | */ | 2868 | */ |
2717 | BUG_ON(obj->read_domains & I915_GEM_GPU_DOMAINS); | 2869 | BUG_ON(obj->base.read_domains & I915_GEM_GPU_DOMAINS); |
2718 | BUG_ON(obj->write_domain & I915_GEM_GPU_DOMAINS); | 2870 | BUG_ON(obj->base.write_domain & I915_GEM_GPU_DOMAINS); |
2719 | 2871 | ||
2720 | obj_priv->gtt_offset = obj_priv->gtt_space->start; | 2872 | obj->gtt_offset = obj->gtt_space->start; |
2721 | trace_i915_gem_object_bind(obj, obj_priv->gtt_offset); | ||
2722 | 2873 | ||
2874 | fenceable = | ||
2875 | obj->gtt_space->size == fence_size && | ||
2876 | (obj->gtt_space->start & (fence_alignment -1)) == 0; | ||
2877 | |||
2878 | mappable = | ||
2879 | obj->gtt_offset + obj->base.size <= dev_priv->mm.gtt_mappable_end; | ||
2880 | |||
2881 | obj->map_and_fenceable = mappable && fenceable; | ||
2882 | |||
2883 | trace_i915_gem_object_bind(obj, obj->gtt_offset, map_and_fenceable); | ||
2723 | return 0; | 2884 | return 0; |
2724 | } | 2885 | } |
2725 | 2886 | ||
2726 | void | 2887 | void |
2727 | i915_gem_clflush_object(struct drm_gem_object *obj) | 2888 | i915_gem_clflush_object(struct drm_i915_gem_object *obj) |
2728 | { | 2889 | { |
2729 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); | ||
2730 | |||
2731 | /* If we don't have a page list set up, then we're not pinned | 2890 | /* If we don't have a page list set up, then we're not pinned |
2732 | * to GPU, and we can ignore the cache flush because it'll happen | 2891 | * to GPU, and we can ignore the cache flush because it'll happen |
2733 | * again at bind time. | 2892 | * again at bind time. |
2734 | */ | 2893 | */ |
2735 | if (obj_priv->pages == NULL) | 2894 | if (obj->pages == NULL) |
2736 | return; | 2895 | return; |
2737 | 2896 | ||
2738 | trace_i915_gem_object_clflush(obj); | 2897 | trace_i915_gem_object_clflush(obj); |
2739 | 2898 | ||
2740 | drm_clflush_pages(obj_priv->pages, obj->size / PAGE_SIZE); | 2899 | drm_clflush_pages(obj->pages, obj->base.size / PAGE_SIZE); |
2741 | } | 2900 | } |
2742 | 2901 | ||
2743 | /** Flushes any GPU write domain for the object if it's dirty. */ | 2902 | /** Flushes any GPU write domain for the object if it's dirty. */ |
2744 | static int | 2903 | static int |
2745 | i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj, | 2904 | i915_gem_object_flush_gpu_write_domain(struct drm_i915_gem_object *obj) |
2746 | bool pipelined) | ||
2747 | { | 2905 | { |
2748 | struct drm_device *dev = obj->dev; | 2906 | struct drm_device *dev = obj->base.dev; |
2749 | uint32_t old_write_domain; | ||
2750 | 2907 | ||
2751 | if ((obj->write_domain & I915_GEM_GPU_DOMAINS) == 0) | 2908 | if ((obj->base.write_domain & I915_GEM_GPU_DOMAINS) == 0) |
2752 | return 0; | 2909 | return 0; |
2753 | 2910 | ||
2754 | /* Queue the GPU write cache flushing we need. */ | 2911 | /* Queue the GPU write cache flushing we need. */ |
2755 | old_write_domain = obj->write_domain; | 2912 | return i915_gem_flush_ring(dev, obj->ring, 0, obj->base.write_domain); |
2756 | i915_gem_flush_ring(dev, NULL, | ||
2757 | to_intel_bo(obj)->ring, | ||
2758 | 0, obj->write_domain); | ||
2759 | BUG_ON(obj->write_domain); | ||
2760 | |||
2761 | trace_i915_gem_object_change_domain(obj, | ||
2762 | obj->read_domains, | ||
2763 | old_write_domain); | ||
2764 | |||
2765 | if (pipelined) | ||
2766 | return 0; | ||
2767 | |||
2768 | return i915_gem_object_wait_rendering(obj, true); | ||
2769 | } | 2913 | } |
2770 | 2914 | ||
2771 | /** Flushes the GTT write domain for the object if it's dirty. */ | 2915 | /** Flushes the GTT write domain for the object if it's dirty. */ |
2772 | static void | 2916 | static void |
2773 | i915_gem_object_flush_gtt_write_domain(struct drm_gem_object *obj) | 2917 | i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj) |
2774 | { | 2918 | { |
2775 | uint32_t old_write_domain; | 2919 | uint32_t old_write_domain; |
2776 | 2920 | ||
2777 | if (obj->write_domain != I915_GEM_DOMAIN_GTT) | 2921 | if (obj->base.write_domain != I915_GEM_DOMAIN_GTT) |
2778 | return; | 2922 | return; |
2779 | 2923 | ||
2780 | /* No actual flushing is required for the GTT write domain. Writes | 2924 | /* No actual flushing is required for the GTT write domain. Writes |
2781 | * to it immediately go to main memory as far as we know, so there's | 2925 | * to it immediately go to main memory as far as we know, so there's |
2782 | * no chipset flush. It also doesn't land in render cache. | 2926 | * no chipset flush. It also doesn't land in render cache. |
2927 | * | ||
2928 | * However, we do have to enforce the order so that all writes through | ||
2929 | * the GTT land before any writes to the device, such as updates to | ||
2930 | * the GATT itself. | ||
2783 | */ | 2931 | */ |
2784 | old_write_domain = obj->write_domain; | 2932 | wmb(); |
2785 | obj->write_domain = 0; | 2933 | |
2934 | i915_gem_release_mmap(obj); | ||
2935 | |||
2936 | old_write_domain = obj->base.write_domain; | ||
2937 | obj->base.write_domain = 0; | ||
2786 | 2938 | ||
2787 | trace_i915_gem_object_change_domain(obj, | 2939 | trace_i915_gem_object_change_domain(obj, |
2788 | obj->read_domains, | 2940 | obj->base.read_domains, |
2789 | old_write_domain); | 2941 | old_write_domain); |
2790 | } | 2942 | } |
2791 | 2943 | ||
2792 | /** Flushes the CPU write domain for the object if it's dirty. */ | 2944 | /** Flushes the CPU write domain for the object if it's dirty. */ |
2793 | static void | 2945 | static void |
2794 | i915_gem_object_flush_cpu_write_domain(struct drm_gem_object *obj) | 2946 | i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj) |
2795 | { | 2947 | { |
2796 | struct drm_device *dev = obj->dev; | ||
2797 | uint32_t old_write_domain; | 2948 | uint32_t old_write_domain; |
2798 | 2949 | ||
2799 | if (obj->write_domain != I915_GEM_DOMAIN_CPU) | 2950 | if (obj->base.write_domain != I915_GEM_DOMAIN_CPU) |
2800 | return; | 2951 | return; |
2801 | 2952 | ||
2802 | i915_gem_clflush_object(obj); | 2953 | i915_gem_clflush_object(obj); |
2803 | drm_agp_chipset_flush(dev); | 2954 | intel_gtt_chipset_flush(); |
2804 | old_write_domain = obj->write_domain; | 2955 | old_write_domain = obj->base.write_domain; |
2805 | obj->write_domain = 0; | 2956 | obj->base.write_domain = 0; |
2806 | 2957 | ||
2807 | trace_i915_gem_object_change_domain(obj, | 2958 | trace_i915_gem_object_change_domain(obj, |
2808 | obj->read_domains, | 2959 | obj->base.read_domains, |
2809 | old_write_domain); | 2960 | old_write_domain); |
2810 | } | 2961 | } |
2811 | 2962 | ||
@@ -2816,40 +2967,39 @@ i915_gem_object_flush_cpu_write_domain(struct drm_gem_object *obj) | |||
2816 | * flushes to occur. | 2967 | * flushes to occur. |
2817 | */ | 2968 | */ |
2818 | int | 2969 | int |
2819 | i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, int write) | 2970 | i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write) |
2820 | { | 2971 | { |
2821 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); | ||
2822 | uint32_t old_write_domain, old_read_domains; | 2972 | uint32_t old_write_domain, old_read_domains; |
2823 | int ret; | 2973 | int ret; |
2824 | 2974 | ||
2825 | /* Not valid to be called on unbound objects. */ | 2975 | /* Not valid to be called on unbound objects. */ |
2826 | if (obj_priv->gtt_space == NULL) | 2976 | if (obj->gtt_space == NULL) |
2827 | return -EINVAL; | 2977 | return -EINVAL; |
2828 | 2978 | ||
2829 | ret = i915_gem_object_flush_gpu_write_domain(obj, false); | 2979 | ret = i915_gem_object_flush_gpu_write_domain(obj); |
2830 | if (ret != 0) | 2980 | if (ret) |
2831 | return ret; | 2981 | return ret; |
2832 | 2982 | ||
2833 | i915_gem_object_flush_cpu_write_domain(obj); | 2983 | if (obj->pending_gpu_write || write) { |
2834 | |||
2835 | if (write) { | ||
2836 | ret = i915_gem_object_wait_rendering(obj, true); | 2984 | ret = i915_gem_object_wait_rendering(obj, true); |
2837 | if (ret) | 2985 | if (ret) |
2838 | return ret; | 2986 | return ret; |
2839 | } | 2987 | } |
2840 | 2988 | ||
2841 | old_write_domain = obj->write_domain; | 2989 | i915_gem_object_flush_cpu_write_domain(obj); |
2842 | old_read_domains = obj->read_domains; | 2990 | |
2991 | old_write_domain = obj->base.write_domain; | ||
2992 | old_read_domains = obj->base.read_domains; | ||
2843 | 2993 | ||
2844 | /* It should now be out of any other write domains, and we can update | 2994 | /* It should now be out of any other write domains, and we can update |
2845 | * the domain values for our changes. | 2995 | * the domain values for our changes. |
2846 | */ | 2996 | */ |
2847 | BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_GTT) != 0); | 2997 | BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_GTT) != 0); |
2848 | obj->read_domains |= I915_GEM_DOMAIN_GTT; | 2998 | obj->base.read_domains |= I915_GEM_DOMAIN_GTT; |
2849 | if (write) { | 2999 | if (write) { |
2850 | obj->read_domains = I915_GEM_DOMAIN_GTT; | 3000 | obj->base.read_domains = I915_GEM_DOMAIN_GTT; |
2851 | obj->write_domain = I915_GEM_DOMAIN_GTT; | 3001 | obj->base.write_domain = I915_GEM_DOMAIN_GTT; |
2852 | obj_priv->dirty = 1; | 3002 | obj->dirty = 1; |
2853 | } | 3003 | } |
2854 | 3004 | ||
2855 | trace_i915_gem_object_change_domain(obj, | 3005 | trace_i915_gem_object_change_domain(obj, |
@@ -2864,23 +3014,23 @@ i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, int write) | |||
2864 | * wait, as in modesetting process we're not supposed to be interrupted. | 3014 | * wait, as in modesetting process we're not supposed to be interrupted. |
2865 | */ | 3015 | */ |
2866 | int | 3016 | int |
2867 | i915_gem_object_set_to_display_plane(struct drm_gem_object *obj, | 3017 | i915_gem_object_set_to_display_plane(struct drm_i915_gem_object *obj, |
2868 | bool pipelined) | 3018 | struct intel_ring_buffer *pipelined) |
2869 | { | 3019 | { |
2870 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); | ||
2871 | uint32_t old_read_domains; | 3020 | uint32_t old_read_domains; |
2872 | int ret; | 3021 | int ret; |
2873 | 3022 | ||
2874 | /* Not valid to be called on unbound objects. */ | 3023 | /* Not valid to be called on unbound objects. */ |
2875 | if (obj_priv->gtt_space == NULL) | 3024 | if (obj->gtt_space == NULL) |
2876 | return -EINVAL; | 3025 | return -EINVAL; |
2877 | 3026 | ||
2878 | ret = i915_gem_object_flush_gpu_write_domain(obj, true); | 3027 | ret = i915_gem_object_flush_gpu_write_domain(obj); |
2879 | if (ret) | 3028 | if (ret) |
2880 | return ret; | 3029 | return ret; |
2881 | 3030 | ||
3031 | |||
2882 | /* Currently, we are always called from an non-interruptible context. */ | 3032 | /* Currently, we are always called from an non-interruptible context. */ |
2883 | if (!pipelined) { | 3033 | if (pipelined != obj->ring) { |
2884 | ret = i915_gem_object_wait_rendering(obj, false); | 3034 | ret = i915_gem_object_wait_rendering(obj, false); |
2885 | if (ret) | 3035 | if (ret) |
2886 | return ret; | 3036 | return ret; |
@@ -2888,12 +3038,12 @@ i915_gem_object_set_to_display_plane(struct drm_gem_object *obj, | |||
2888 | 3038 | ||
2889 | i915_gem_object_flush_cpu_write_domain(obj); | 3039 | i915_gem_object_flush_cpu_write_domain(obj); |
2890 | 3040 | ||
2891 | old_read_domains = obj->read_domains; | 3041 | old_read_domains = obj->base.read_domains; |
2892 | obj->read_domains |= I915_GEM_DOMAIN_GTT; | 3042 | obj->base.read_domains |= I915_GEM_DOMAIN_GTT; |
2893 | 3043 | ||
2894 | trace_i915_gem_object_change_domain(obj, | 3044 | trace_i915_gem_object_change_domain(obj, |
2895 | old_read_domains, | 3045 | old_read_domains, |
2896 | obj->write_domain); | 3046 | obj->base.write_domain); |
2897 | 3047 | ||
2898 | return 0; | 3048 | return 0; |
2899 | } | 3049 | } |
@@ -2902,14 +3052,19 @@ int | |||
2902 | i915_gem_object_flush_gpu(struct drm_i915_gem_object *obj, | 3052 | i915_gem_object_flush_gpu(struct drm_i915_gem_object *obj, |
2903 | bool interruptible) | 3053 | bool interruptible) |
2904 | { | 3054 | { |
3055 | int ret; | ||
3056 | |||
2905 | if (!obj->active) | 3057 | if (!obj->active) |
2906 | return 0; | 3058 | return 0; |
2907 | 3059 | ||
2908 | if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) | 3060 | if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) { |
2909 | i915_gem_flush_ring(obj->base.dev, NULL, obj->ring, | 3061 | ret = i915_gem_flush_ring(obj->base.dev, obj->ring, |
2910 | 0, obj->base.write_domain); | 3062 | 0, obj->base.write_domain); |
3063 | if (ret) | ||
3064 | return ret; | ||
3065 | } | ||
2911 | 3066 | ||
2912 | return i915_gem_object_wait_rendering(&obj->base, interruptible); | 3067 | return i915_gem_object_wait_rendering(obj, interruptible); |
2913 | } | 3068 | } |
2914 | 3069 | ||
2915 | /** | 3070 | /** |
@@ -2919,13 +3074,17 @@ i915_gem_object_flush_gpu(struct drm_i915_gem_object *obj, | |||
2919 | * flushes to occur. | 3074 | * flushes to occur. |
2920 | */ | 3075 | */ |
2921 | static int | 3076 | static int |
2922 | i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj, int write) | 3077 | i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write) |
2923 | { | 3078 | { |
2924 | uint32_t old_write_domain, old_read_domains; | 3079 | uint32_t old_write_domain, old_read_domains; |
2925 | int ret; | 3080 | int ret; |
2926 | 3081 | ||
2927 | ret = i915_gem_object_flush_gpu_write_domain(obj, false); | 3082 | ret = i915_gem_object_flush_gpu_write_domain(obj); |
2928 | if (ret != 0) | 3083 | if (ret) |
3084 | return ret; | ||
3085 | |||
3086 | ret = i915_gem_object_wait_rendering(obj, true); | ||
3087 | if (ret) | ||
2929 | return ret; | 3088 | return ret; |
2930 | 3089 | ||
2931 | i915_gem_object_flush_gtt_write_domain(obj); | 3090 | i915_gem_object_flush_gtt_write_domain(obj); |
@@ -2935,33 +3094,27 @@ i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj, int write) | |||
2935 | */ | 3094 | */ |
2936 | i915_gem_object_set_to_full_cpu_read_domain(obj); | 3095 | i915_gem_object_set_to_full_cpu_read_domain(obj); |
2937 | 3096 | ||
2938 | if (write) { | 3097 | old_write_domain = obj->base.write_domain; |
2939 | ret = i915_gem_object_wait_rendering(obj, true); | 3098 | old_read_domains = obj->base.read_domains; |
2940 | if (ret) | ||
2941 | return ret; | ||
2942 | } | ||
2943 | |||
2944 | old_write_domain = obj->write_domain; | ||
2945 | old_read_domains = obj->read_domains; | ||
2946 | 3099 | ||
2947 | /* Flush the CPU cache if it's still invalid. */ | 3100 | /* Flush the CPU cache if it's still invalid. */ |
2948 | if ((obj->read_domains & I915_GEM_DOMAIN_CPU) == 0) { | 3101 | if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0) { |
2949 | i915_gem_clflush_object(obj); | 3102 | i915_gem_clflush_object(obj); |
2950 | 3103 | ||
2951 | obj->read_domains |= I915_GEM_DOMAIN_CPU; | 3104 | obj->base.read_domains |= I915_GEM_DOMAIN_CPU; |
2952 | } | 3105 | } |
2953 | 3106 | ||
2954 | /* It should now be out of any other write domains, and we can update | 3107 | /* It should now be out of any other write domains, and we can update |
2955 | * the domain values for our changes. | 3108 | * the domain values for our changes. |
2956 | */ | 3109 | */ |
2957 | BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_CPU) != 0); | 3110 | BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_CPU) != 0); |
2958 | 3111 | ||
2959 | /* If we're writing through the CPU, then the GPU read domains will | 3112 | /* If we're writing through the CPU, then the GPU read domains will |
2960 | * need to be invalidated at next use. | 3113 | * need to be invalidated at next use. |
2961 | */ | 3114 | */ |
2962 | if (write) { | 3115 | if (write) { |
2963 | obj->read_domains = I915_GEM_DOMAIN_CPU; | 3116 | obj->base.read_domains = I915_GEM_DOMAIN_CPU; |
2964 | obj->write_domain = I915_GEM_DOMAIN_CPU; | 3117 | obj->base.write_domain = I915_GEM_DOMAIN_CPU; |
2965 | } | 3118 | } |
2966 | 3119 | ||
2967 | trace_i915_gem_object_change_domain(obj, | 3120 | trace_i915_gem_object_change_domain(obj, |
@@ -2971,184 +3124,6 @@ i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj, int write) | |||
2971 | return 0; | 3124 | return 0; |
2972 | } | 3125 | } |
2973 | 3126 | ||
2974 | /* | ||
2975 | * Set the next domain for the specified object. This | ||
2976 | * may not actually perform the necessary flushing/invaliding though, | ||
2977 | * as that may want to be batched with other set_domain operations | ||
2978 | * | ||
2979 | * This is (we hope) the only really tricky part of gem. The goal | ||
2980 | * is fairly simple -- track which caches hold bits of the object | ||
2981 | * and make sure they remain coherent. A few concrete examples may | ||
2982 | * help to explain how it works. For shorthand, we use the notation | ||
2983 | * (read_domains, write_domain), e.g. (CPU, CPU) to indicate the | ||
2984 | * a pair of read and write domain masks. | ||
2985 | * | ||
2986 | * Case 1: the batch buffer | ||
2987 | * | ||
2988 | * 1. Allocated | ||
2989 | * 2. Written by CPU | ||
2990 | * 3. Mapped to GTT | ||
2991 | * 4. Read by GPU | ||
2992 | * 5. Unmapped from GTT | ||
2993 | * 6. Freed | ||
2994 | * | ||
2995 | * Let's take these a step at a time | ||
2996 | * | ||
2997 | * 1. Allocated | ||
2998 | * Pages allocated from the kernel may still have | ||
2999 | * cache contents, so we set them to (CPU, CPU) always. | ||
3000 | * 2. Written by CPU (using pwrite) | ||
3001 | * The pwrite function calls set_domain (CPU, CPU) and | ||
3002 | * this function does nothing (as nothing changes) | ||
3003 | * 3. Mapped by GTT | ||
3004 | * This function asserts that the object is not | ||
3005 | * currently in any GPU-based read or write domains | ||
3006 | * 4. Read by GPU | ||
3007 | * i915_gem_execbuffer calls set_domain (COMMAND, 0). | ||
3008 | * As write_domain is zero, this function adds in the | ||
3009 | * current read domains (CPU+COMMAND, 0). | ||
3010 | * flush_domains is set to CPU. | ||
3011 | * invalidate_domains is set to COMMAND | ||
3012 | * clflush is run to get data out of the CPU caches | ||
3013 | * then i915_dev_set_domain calls i915_gem_flush to | ||
3014 | * emit an MI_FLUSH and drm_agp_chipset_flush | ||
3015 | * 5. Unmapped from GTT | ||
3016 | * i915_gem_object_unbind calls set_domain (CPU, CPU) | ||
3017 | * flush_domains and invalidate_domains end up both zero | ||
3018 | * so no flushing/invalidating happens | ||
3019 | * 6. Freed | ||
3020 | * yay, done | ||
3021 | * | ||
3022 | * Case 2: The shared render buffer | ||
3023 | * | ||
3024 | * 1. Allocated | ||
3025 | * 2. Mapped to GTT | ||
3026 | * 3. Read/written by GPU | ||
3027 | * 4. set_domain to (CPU,CPU) | ||
3028 | * 5. Read/written by CPU | ||
3029 | * 6. Read/written by GPU | ||
3030 | * | ||
3031 | * 1. Allocated | ||
3032 | * Same as last example, (CPU, CPU) | ||
3033 | * 2. Mapped to GTT | ||
3034 | * Nothing changes (assertions find that it is not in the GPU) | ||
3035 | * 3. Read/written by GPU | ||
3036 | * execbuffer calls set_domain (RENDER, RENDER) | ||
3037 | * flush_domains gets CPU | ||
3038 | * invalidate_domains gets GPU | ||
3039 | * clflush (obj) | ||
3040 | * MI_FLUSH and drm_agp_chipset_flush | ||
3041 | * 4. set_domain (CPU, CPU) | ||
3042 | * flush_domains gets GPU | ||
3043 | * invalidate_domains gets CPU | ||
3044 | * wait_rendering (obj) to make sure all drawing is complete. | ||
3045 | * This will include an MI_FLUSH to get the data from GPU | ||
3046 | * to memory | ||
3047 | * clflush (obj) to invalidate the CPU cache | ||
3048 | * Another MI_FLUSH in i915_gem_flush (eliminate this somehow?) | ||
3049 | * 5. Read/written by CPU | ||
3050 | * cache lines are loaded and dirtied | ||
3051 | * 6. Read written by GPU | ||
3052 | * Same as last GPU access | ||
3053 | * | ||
3054 | * Case 3: The constant buffer | ||
3055 | * | ||
3056 | * 1. Allocated | ||
3057 | * 2. Written by CPU | ||
3058 | * 3. Read by GPU | ||
3059 | * 4. Updated (written) by CPU again | ||
3060 | * 5. Read by GPU | ||
3061 | * | ||
3062 | * 1. Allocated | ||
3063 | * (CPU, CPU) | ||
3064 | * 2. Written by CPU | ||
3065 | * (CPU, CPU) | ||
3066 | * 3. Read by GPU | ||
3067 | * (CPU+RENDER, 0) | ||
3068 | * flush_domains = CPU | ||
3069 | * invalidate_domains = RENDER | ||
3070 | * clflush (obj) | ||
3071 | * MI_FLUSH | ||
3072 | * drm_agp_chipset_flush | ||
3073 | * 4. Updated (written) by CPU again | ||
3074 | * (CPU, CPU) | ||
3075 | * flush_domains = 0 (no previous write domain) | ||
3076 | * invalidate_domains = 0 (no new read domains) | ||
3077 | * 5. Read by GPU | ||
3078 | * (CPU+RENDER, 0) | ||
3079 | * flush_domains = CPU | ||
3080 | * invalidate_domains = RENDER | ||
3081 | * clflush (obj) | ||
3082 | * MI_FLUSH | ||
3083 | * drm_agp_chipset_flush | ||
3084 | */ | ||
3085 | static void | ||
3086 | i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj, | ||
3087 | struct intel_ring_buffer *ring) | ||
3088 | { | ||
3089 | struct drm_device *dev = obj->dev; | ||
3090 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
3091 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); | ||
3092 | uint32_t invalidate_domains = 0; | ||
3093 | uint32_t flush_domains = 0; | ||
3094 | uint32_t old_read_domains; | ||
3095 | |||
3096 | intel_mark_busy(dev, obj); | ||
3097 | |||
3098 | /* | ||
3099 | * If the object isn't moving to a new write domain, | ||
3100 | * let the object stay in multiple read domains | ||
3101 | */ | ||
3102 | if (obj->pending_write_domain == 0) | ||
3103 | obj->pending_read_domains |= obj->read_domains; | ||
3104 | else | ||
3105 | obj_priv->dirty = 1; | ||
3106 | |||
3107 | /* | ||
3108 | * Flush the current write domain if | ||
3109 | * the new read domains don't match. Invalidate | ||
3110 | * any read domains which differ from the old | ||
3111 | * write domain | ||
3112 | */ | ||
3113 | if (obj->write_domain && | ||
3114 | (obj->write_domain != obj->pending_read_domains || | ||
3115 | obj_priv->ring != ring)) { | ||
3116 | flush_domains |= obj->write_domain; | ||
3117 | invalidate_domains |= | ||
3118 | obj->pending_read_domains & ~obj->write_domain; | ||
3119 | } | ||
3120 | /* | ||
3121 | * Invalidate any read caches which may have | ||
3122 | * stale data. That is, any new read domains. | ||
3123 | */ | ||
3124 | invalidate_domains |= obj->pending_read_domains & ~obj->read_domains; | ||
3125 | if ((flush_domains | invalidate_domains) & I915_GEM_DOMAIN_CPU) | ||
3126 | i915_gem_clflush_object(obj); | ||
3127 | |||
3128 | old_read_domains = obj->read_domains; | ||
3129 | |||
3130 | /* The actual obj->write_domain will be updated with | ||
3131 | * pending_write_domain after we emit the accumulated flush for all | ||
3132 | * of our domain changes in execbuffers (which clears objects' | ||
3133 | * write_domains). So if we have a current write domain that we | ||
3134 | * aren't changing, set pending_write_domain to that. | ||
3135 | */ | ||
3136 | if (flush_domains == 0 && obj->pending_write_domain == 0) | ||
3137 | obj->pending_write_domain = obj->write_domain; | ||
3138 | obj->read_domains = obj->pending_read_domains; | ||
3139 | |||
3140 | dev->invalidate_domains |= invalidate_domains; | ||
3141 | dev->flush_domains |= flush_domains; | ||
3142 | if (flush_domains & I915_GEM_GPU_DOMAINS) | ||
3143 | dev_priv->mm.flush_rings |= obj_priv->ring->id; | ||
3144 | if (invalidate_domains & I915_GEM_GPU_DOMAINS) | ||
3145 | dev_priv->mm.flush_rings |= ring->id; | ||
3146 | |||
3147 | trace_i915_gem_object_change_domain(obj, | ||
3148 | old_read_domains, | ||
3149 | obj->write_domain); | ||
3150 | } | ||
3151 | |||
3152 | /** | 3127 | /** |
3153 | * Moves the object from a partially CPU read to a full one. | 3128 | * Moves the object from a partially CPU read to a full one. |
3154 | * | 3129 | * |
@@ -3156,30 +3131,28 @@ i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj, | |||
3156 | * and doesn't handle transitioning from !(read_domains & I915_GEM_DOMAIN_CPU). | 3131 | * and doesn't handle transitioning from !(read_domains & I915_GEM_DOMAIN_CPU). |
3157 | */ | 3132 | */ |
3158 | static void | 3133 | static void |
3159 | i915_gem_object_set_to_full_cpu_read_domain(struct drm_gem_object *obj) | 3134 | i915_gem_object_set_to_full_cpu_read_domain(struct drm_i915_gem_object *obj) |
3160 | { | 3135 | { |
3161 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); | 3136 | if (!obj->page_cpu_valid) |
3162 | |||
3163 | if (!obj_priv->page_cpu_valid) | ||
3164 | return; | 3137 | return; |
3165 | 3138 | ||
3166 | /* If we're partially in the CPU read domain, finish moving it in. | 3139 | /* If we're partially in the CPU read domain, finish moving it in. |
3167 | */ | 3140 | */ |
3168 | if (obj->read_domains & I915_GEM_DOMAIN_CPU) { | 3141 | if (obj->base.read_domains & I915_GEM_DOMAIN_CPU) { |
3169 | int i; | 3142 | int i; |
3170 | 3143 | ||
3171 | for (i = 0; i <= (obj->size - 1) / PAGE_SIZE; i++) { | 3144 | for (i = 0; i <= (obj->base.size - 1) / PAGE_SIZE; i++) { |
3172 | if (obj_priv->page_cpu_valid[i]) | 3145 | if (obj->page_cpu_valid[i]) |
3173 | continue; | 3146 | continue; |
3174 | drm_clflush_pages(obj_priv->pages + i, 1); | 3147 | drm_clflush_pages(obj->pages + i, 1); |
3175 | } | 3148 | } |
3176 | } | 3149 | } |
3177 | 3150 | ||
3178 | /* Free the page_cpu_valid mappings which are now stale, whether | 3151 | /* Free the page_cpu_valid mappings which are now stale, whether |
3179 | * or not we've got I915_GEM_DOMAIN_CPU. | 3152 | * or not we've got I915_GEM_DOMAIN_CPU. |
3180 | */ | 3153 | */ |
3181 | kfree(obj_priv->page_cpu_valid); | 3154 | kfree(obj->page_cpu_valid); |
3182 | obj_priv->page_cpu_valid = NULL; | 3155 | obj->page_cpu_valid = NULL; |
3183 | } | 3156 | } |
3184 | 3157 | ||
3185 | /** | 3158 | /** |
@@ -3195,354 +3168,65 @@ i915_gem_object_set_to_full_cpu_read_domain(struct drm_gem_object *obj) | |||
3195 | * flushes to occur. | 3168 | * flushes to occur. |
3196 | */ | 3169 | */ |
3197 | static int | 3170 | static int |
3198 | i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj, | 3171 | i915_gem_object_set_cpu_read_domain_range(struct drm_i915_gem_object *obj, |
3199 | uint64_t offset, uint64_t size) | 3172 | uint64_t offset, uint64_t size) |
3200 | { | 3173 | { |
3201 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); | ||
3202 | uint32_t old_read_domains; | 3174 | uint32_t old_read_domains; |
3203 | int i, ret; | 3175 | int i, ret; |
3204 | 3176 | ||
3205 | if (offset == 0 && size == obj->size) | 3177 | if (offset == 0 && size == obj->base.size) |
3206 | return i915_gem_object_set_to_cpu_domain(obj, 0); | 3178 | return i915_gem_object_set_to_cpu_domain(obj, 0); |
3207 | 3179 | ||
3208 | ret = i915_gem_object_flush_gpu_write_domain(obj, false); | 3180 | ret = i915_gem_object_flush_gpu_write_domain(obj); |
3209 | if (ret != 0) | 3181 | if (ret) |
3210 | return ret; | 3182 | return ret; |
3183 | |||
3184 | ret = i915_gem_object_wait_rendering(obj, true); | ||
3185 | if (ret) | ||
3186 | return ret; | ||
3187 | |||
3211 | i915_gem_object_flush_gtt_write_domain(obj); | 3188 | i915_gem_object_flush_gtt_write_domain(obj); |
3212 | 3189 | ||
3213 | /* If we're already fully in the CPU read domain, we're done. */ | 3190 | /* If we're already fully in the CPU read domain, we're done. */ |
3214 | if (obj_priv->page_cpu_valid == NULL && | 3191 | if (obj->page_cpu_valid == NULL && |
3215 | (obj->read_domains & I915_GEM_DOMAIN_CPU) != 0) | 3192 | (obj->base.read_domains & I915_GEM_DOMAIN_CPU) != 0) |
3216 | return 0; | 3193 | return 0; |
3217 | 3194 | ||
3218 | /* Otherwise, create/clear the per-page CPU read domain flag if we're | 3195 | /* Otherwise, create/clear the per-page CPU read domain flag if we're |
3219 | * newly adding I915_GEM_DOMAIN_CPU | 3196 | * newly adding I915_GEM_DOMAIN_CPU |
3220 | */ | 3197 | */ |
3221 | if (obj_priv->page_cpu_valid == NULL) { | 3198 | if (obj->page_cpu_valid == NULL) { |
3222 | obj_priv->page_cpu_valid = kzalloc(obj->size / PAGE_SIZE, | 3199 | obj->page_cpu_valid = kzalloc(obj->base.size / PAGE_SIZE, |
3223 | GFP_KERNEL); | 3200 | GFP_KERNEL); |
3224 | if (obj_priv->page_cpu_valid == NULL) | 3201 | if (obj->page_cpu_valid == NULL) |
3225 | return -ENOMEM; | 3202 | return -ENOMEM; |
3226 | } else if ((obj->read_domains & I915_GEM_DOMAIN_CPU) == 0) | 3203 | } else if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0) |
3227 | memset(obj_priv->page_cpu_valid, 0, obj->size / PAGE_SIZE); | 3204 | memset(obj->page_cpu_valid, 0, obj->base.size / PAGE_SIZE); |
3228 | 3205 | ||
3229 | /* Flush the cache on any pages that are still invalid from the CPU's | 3206 | /* Flush the cache on any pages that are still invalid from the CPU's |
3230 | * perspective. | 3207 | * perspective. |
3231 | */ | 3208 | */ |
3232 | for (i = offset / PAGE_SIZE; i <= (offset + size - 1) / PAGE_SIZE; | 3209 | for (i = offset / PAGE_SIZE; i <= (offset + size - 1) / PAGE_SIZE; |
3233 | i++) { | 3210 | i++) { |
3234 | if (obj_priv->page_cpu_valid[i]) | 3211 | if (obj->page_cpu_valid[i]) |
3235 | continue; | 3212 | continue; |
3236 | 3213 | ||
3237 | drm_clflush_pages(obj_priv->pages + i, 1); | 3214 | drm_clflush_pages(obj->pages + i, 1); |
3238 | 3215 | ||
3239 | obj_priv->page_cpu_valid[i] = 1; | 3216 | obj->page_cpu_valid[i] = 1; |
3240 | } | 3217 | } |
3241 | 3218 | ||
3242 | /* It should now be out of any other write domains, and we can update | 3219 | /* It should now be out of any other write domains, and we can update |
3243 | * the domain values for our changes. | 3220 | * the domain values for our changes. |
3244 | */ | 3221 | */ |
3245 | BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_CPU) != 0); | 3222 | BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_CPU) != 0); |
3246 | 3223 | ||
3247 | old_read_domains = obj->read_domains; | 3224 | old_read_domains = obj->base.read_domains; |
3248 | obj->read_domains |= I915_GEM_DOMAIN_CPU; | 3225 | obj->base.read_domains |= I915_GEM_DOMAIN_CPU; |
3249 | 3226 | ||
3250 | trace_i915_gem_object_change_domain(obj, | 3227 | trace_i915_gem_object_change_domain(obj, |
3251 | old_read_domains, | 3228 | old_read_domains, |
3252 | obj->write_domain); | 3229 | obj->base.write_domain); |
3253 | |||
3254 | return 0; | ||
3255 | } | ||
3256 | |||
3257 | /** | ||
3258 | * Pin an object to the GTT and evaluate the relocations landing in it. | ||
3259 | */ | ||
3260 | static int | ||
3261 | i915_gem_execbuffer_relocate(struct drm_i915_gem_object *obj, | ||
3262 | struct drm_file *file_priv, | ||
3263 | struct drm_i915_gem_exec_object2 *entry) | ||
3264 | { | ||
3265 | struct drm_device *dev = obj->base.dev; | ||
3266 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
3267 | struct drm_i915_gem_relocation_entry __user *user_relocs; | ||
3268 | struct drm_gem_object *target_obj = NULL; | ||
3269 | uint32_t target_handle = 0; | ||
3270 | int i, ret = 0; | ||
3271 | |||
3272 | user_relocs = (void __user *)(uintptr_t)entry->relocs_ptr; | ||
3273 | for (i = 0; i < entry->relocation_count; i++) { | ||
3274 | struct drm_i915_gem_relocation_entry reloc; | ||
3275 | uint32_t target_offset; | ||
3276 | |||
3277 | if (__copy_from_user_inatomic(&reloc, | ||
3278 | user_relocs+i, | ||
3279 | sizeof(reloc))) { | ||
3280 | ret = -EFAULT; | ||
3281 | break; | ||
3282 | } | ||
3283 | |||
3284 | if (reloc.target_handle != target_handle) { | ||
3285 | drm_gem_object_unreference(target_obj); | ||
3286 | |||
3287 | target_obj = drm_gem_object_lookup(dev, file_priv, | ||
3288 | reloc.target_handle); | ||
3289 | if (target_obj == NULL) { | ||
3290 | ret = -ENOENT; | ||
3291 | break; | ||
3292 | } | ||
3293 | |||
3294 | target_handle = reloc.target_handle; | ||
3295 | } | ||
3296 | target_offset = to_intel_bo(target_obj)->gtt_offset; | ||
3297 | |||
3298 | #if WATCH_RELOC | ||
3299 | DRM_INFO("%s: obj %p offset %08x target %d " | ||
3300 | "read %08x write %08x gtt %08x " | ||
3301 | "presumed %08x delta %08x\n", | ||
3302 | __func__, | ||
3303 | obj, | ||
3304 | (int) reloc.offset, | ||
3305 | (int) reloc.target_handle, | ||
3306 | (int) reloc.read_domains, | ||
3307 | (int) reloc.write_domain, | ||
3308 | (int) target_offset, | ||
3309 | (int) reloc.presumed_offset, | ||
3310 | reloc.delta); | ||
3311 | #endif | ||
3312 | |||
3313 | /* The target buffer should have appeared before us in the | ||
3314 | * exec_object list, so it should have a GTT space bound by now. | ||
3315 | */ | ||
3316 | if (target_offset == 0) { | ||
3317 | DRM_ERROR("No GTT space found for object %d\n", | ||
3318 | reloc.target_handle); | ||
3319 | ret = -EINVAL; | ||
3320 | break; | ||
3321 | } | ||
3322 | |||
3323 | /* Validate that the target is in a valid r/w GPU domain */ | ||
3324 | if (reloc.write_domain & (reloc.write_domain - 1)) { | ||
3325 | DRM_ERROR("reloc with multiple write domains: " | ||
3326 | "obj %p target %d offset %d " | ||
3327 | "read %08x write %08x", | ||
3328 | obj, reloc.target_handle, | ||
3329 | (int) reloc.offset, | ||
3330 | reloc.read_domains, | ||
3331 | reloc.write_domain); | ||
3332 | ret = -EINVAL; | ||
3333 | break; | ||
3334 | } | ||
3335 | if (reloc.write_domain & I915_GEM_DOMAIN_CPU || | ||
3336 | reloc.read_domains & I915_GEM_DOMAIN_CPU) { | ||
3337 | DRM_ERROR("reloc with read/write CPU domains: " | ||
3338 | "obj %p target %d offset %d " | ||
3339 | "read %08x write %08x", | ||
3340 | obj, reloc.target_handle, | ||
3341 | (int) reloc.offset, | ||
3342 | reloc.read_domains, | ||
3343 | reloc.write_domain); | ||
3344 | ret = -EINVAL; | ||
3345 | break; | ||
3346 | } | ||
3347 | if (reloc.write_domain && target_obj->pending_write_domain && | ||
3348 | reloc.write_domain != target_obj->pending_write_domain) { | ||
3349 | DRM_ERROR("Write domain conflict: " | ||
3350 | "obj %p target %d offset %d " | ||
3351 | "new %08x old %08x\n", | ||
3352 | obj, reloc.target_handle, | ||
3353 | (int) reloc.offset, | ||
3354 | reloc.write_domain, | ||
3355 | target_obj->pending_write_domain); | ||
3356 | ret = -EINVAL; | ||
3357 | break; | ||
3358 | } | ||
3359 | |||
3360 | target_obj->pending_read_domains |= reloc.read_domains; | ||
3361 | target_obj->pending_write_domain |= reloc.write_domain; | ||
3362 | |||
3363 | /* If the relocation already has the right value in it, no | ||
3364 | * more work needs to be done. | ||
3365 | */ | ||
3366 | if (target_offset == reloc.presumed_offset) | ||
3367 | continue; | ||
3368 | |||
3369 | /* Check that the relocation address is valid... */ | ||
3370 | if (reloc.offset > obj->base.size - 4) { | ||
3371 | DRM_ERROR("Relocation beyond object bounds: " | ||
3372 | "obj %p target %d offset %d size %d.\n", | ||
3373 | obj, reloc.target_handle, | ||
3374 | (int) reloc.offset, (int) obj->base.size); | ||
3375 | ret = -EINVAL; | ||
3376 | break; | ||
3377 | } | ||
3378 | if (reloc.offset & 3) { | ||
3379 | DRM_ERROR("Relocation not 4-byte aligned: " | ||
3380 | "obj %p target %d offset %d.\n", | ||
3381 | obj, reloc.target_handle, | ||
3382 | (int) reloc.offset); | ||
3383 | ret = -EINVAL; | ||
3384 | break; | ||
3385 | } | ||
3386 | |||
3387 | /* and points to somewhere within the target object. */ | ||
3388 | if (reloc.delta >= target_obj->size) { | ||
3389 | DRM_ERROR("Relocation beyond target object bounds: " | ||
3390 | "obj %p target %d delta %d size %d.\n", | ||
3391 | obj, reloc.target_handle, | ||
3392 | (int) reloc.delta, (int) target_obj->size); | ||
3393 | ret = -EINVAL; | ||
3394 | break; | ||
3395 | } | ||
3396 | |||
3397 | reloc.delta += target_offset; | ||
3398 | if (obj->base.write_domain == I915_GEM_DOMAIN_CPU) { | ||
3399 | uint32_t page_offset = reloc.offset & ~PAGE_MASK; | ||
3400 | char *vaddr; | ||
3401 | |||
3402 | vaddr = kmap_atomic(obj->pages[reloc.offset >> PAGE_SHIFT]); | ||
3403 | *(uint32_t *)(vaddr + page_offset) = reloc.delta; | ||
3404 | kunmap_atomic(vaddr); | ||
3405 | } else { | ||
3406 | uint32_t __iomem *reloc_entry; | ||
3407 | void __iomem *reloc_page; | ||
3408 | |||
3409 | ret = i915_gem_object_set_to_gtt_domain(&obj->base, 1); | ||
3410 | if (ret) | ||
3411 | break; | ||
3412 | |||
3413 | /* Map the page containing the relocation we're going to perform. */ | ||
3414 | reloc.offset += obj->gtt_offset; | ||
3415 | reloc_page = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping, | ||
3416 | reloc.offset & PAGE_MASK); | ||
3417 | reloc_entry = (uint32_t __iomem *) | ||
3418 | (reloc_page + (reloc.offset & ~PAGE_MASK)); | ||
3419 | iowrite32(reloc.delta, reloc_entry); | ||
3420 | io_mapping_unmap_atomic(reloc_page); | ||
3421 | } | ||
3422 | |||
3423 | /* and update the user's relocation entry */ | ||
3424 | reloc.presumed_offset = target_offset; | ||
3425 | if (__copy_to_user_inatomic(&user_relocs[i].presumed_offset, | ||
3426 | &reloc.presumed_offset, | ||
3427 | sizeof(reloc.presumed_offset))) { | ||
3428 | ret = -EFAULT; | ||
3429 | break; | ||
3430 | } | ||
3431 | } | ||
3432 | |||
3433 | drm_gem_object_unreference(target_obj); | ||
3434 | return ret; | ||
3435 | } | ||
3436 | |||
3437 | static int | ||
3438 | i915_gem_execbuffer_pin(struct drm_device *dev, | ||
3439 | struct drm_file *file, | ||
3440 | struct drm_gem_object **object_list, | ||
3441 | struct drm_i915_gem_exec_object2 *exec_list, | ||
3442 | int count) | ||
3443 | { | ||
3444 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
3445 | int ret, i, retry; | ||
3446 | |||
3447 | /* attempt to pin all of the buffers into the GTT */ | ||
3448 | for (retry = 0; retry < 2; retry++) { | ||
3449 | ret = 0; | ||
3450 | for (i = 0; i < count; i++) { | ||
3451 | struct drm_i915_gem_exec_object2 *entry = &exec_list[i]; | ||
3452 | struct drm_i915_gem_object *obj= to_intel_bo(object_list[i]); | ||
3453 | bool need_fence = | ||
3454 | entry->flags & EXEC_OBJECT_NEEDS_FENCE && | ||
3455 | obj->tiling_mode != I915_TILING_NONE; | ||
3456 | |||
3457 | /* Check fence reg constraints and rebind if necessary */ | ||
3458 | if (need_fence && | ||
3459 | !i915_gem_object_fence_offset_ok(&obj->base, | ||
3460 | obj->tiling_mode)) { | ||
3461 | ret = i915_gem_object_unbind(&obj->base); | ||
3462 | if (ret) | ||
3463 | break; | ||
3464 | } | ||
3465 | |||
3466 | ret = i915_gem_object_pin(&obj->base, entry->alignment); | ||
3467 | if (ret) | ||
3468 | break; | ||
3469 | |||
3470 | /* | ||
3471 | * Pre-965 chips need a fence register set up in order | ||
3472 | * to properly handle blits to/from tiled surfaces. | ||
3473 | */ | ||
3474 | if (need_fence) { | ||
3475 | ret = i915_gem_object_get_fence_reg(&obj->base, true); | ||
3476 | if (ret) { | ||
3477 | i915_gem_object_unpin(&obj->base); | ||
3478 | break; | ||
3479 | } | ||
3480 | |||
3481 | dev_priv->fence_regs[obj->fence_reg].gpu = true; | ||
3482 | } | ||
3483 | |||
3484 | entry->offset = obj->gtt_offset; | ||
3485 | } | ||
3486 | |||
3487 | while (i--) | ||
3488 | i915_gem_object_unpin(object_list[i]); | ||
3489 | |||
3490 | if (ret == 0) | ||
3491 | break; | ||
3492 | |||
3493 | if (ret != -ENOSPC || retry) | ||
3494 | return ret; | ||
3495 | |||
3496 | ret = i915_gem_evict_everything(dev); | ||
3497 | if (ret) | ||
3498 | return ret; | ||
3499 | } | ||
3500 | |||
3501 | return 0; | ||
3502 | } | ||
3503 | |||
3504 | static int | ||
3505 | i915_gem_execbuffer_move_to_gpu(struct drm_device *dev, | ||
3506 | struct drm_file *file, | ||
3507 | struct intel_ring_buffer *ring, | ||
3508 | struct drm_gem_object **objects, | ||
3509 | int count) | ||
3510 | { | ||
3511 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
3512 | int ret, i; | ||
3513 | |||
3514 | /* Zero the global flush/invalidate flags. These | ||
3515 | * will be modified as new domains are computed | ||
3516 | * for each object | ||
3517 | */ | ||
3518 | dev->invalidate_domains = 0; | ||
3519 | dev->flush_domains = 0; | ||
3520 | dev_priv->mm.flush_rings = 0; | ||
3521 | for (i = 0; i < count; i++) | ||
3522 | i915_gem_object_set_to_gpu_domain(objects[i], ring); | ||
3523 | |||
3524 | if (dev->invalidate_domains | dev->flush_domains) { | ||
3525 | #if WATCH_EXEC | ||
3526 | DRM_INFO("%s: invalidate_domains %08x flush_domains %08x\n", | ||
3527 | __func__, | ||
3528 | dev->invalidate_domains, | ||
3529 | dev->flush_domains); | ||
3530 | #endif | ||
3531 | i915_gem_flush(dev, file, | ||
3532 | dev->invalidate_domains, | ||
3533 | dev->flush_domains, | ||
3534 | dev_priv->mm.flush_rings); | ||
3535 | } | ||
3536 | |||
3537 | for (i = 0; i < count; i++) { | ||
3538 | struct drm_i915_gem_object *obj = to_intel_bo(objects[i]); | ||
3539 | /* XXX replace with semaphores */ | ||
3540 | if (obj->ring && ring != obj->ring) { | ||
3541 | ret = i915_gem_object_wait_rendering(&obj->base, true); | ||
3542 | if (ret) | ||
3543 | return ret; | ||
3544 | } | ||
3545 | } | ||
3546 | 3230 | ||
3547 | return 0; | 3231 | return 0; |
3548 | } | 3232 | } |
@@ -3582,586 +3266,129 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file) | |||
3582 | return 0; | 3266 | return 0; |
3583 | 3267 | ||
3584 | ret = 0; | 3268 | ret = 0; |
3585 | if (!i915_seqno_passed(ring->get_seqno(dev, ring), seqno)) { | 3269 | if (!i915_seqno_passed(ring->get_seqno(ring), seqno)) { |
3586 | /* And wait for the seqno passing without holding any locks and | 3270 | /* And wait for the seqno passing without holding any locks and |
3587 | * causing extra latency for others. This is safe as the irq | 3271 | * causing extra latency for others. This is safe as the irq |
3588 | * generation is designed to be run atomically and so is | 3272 | * generation is designed to be run atomically and so is |
3589 | * lockless. | 3273 | * lockless. |
3590 | */ | 3274 | */ |
3591 | ring->user_irq_get(dev, ring); | 3275 | if (ring->irq_get(ring)) { |
3592 | ret = wait_event_interruptible(ring->irq_queue, | 3276 | ret = wait_event_interruptible(ring->irq_queue, |
3593 | i915_seqno_passed(ring->get_seqno(dev, ring), seqno) | 3277 | i915_seqno_passed(ring->get_seqno(ring), seqno) |
3594 | || atomic_read(&dev_priv->mm.wedged)); | 3278 | || atomic_read(&dev_priv->mm.wedged)); |
3595 | ring->user_irq_put(dev, ring); | 3279 | ring->irq_put(ring); |
3596 | |||
3597 | if (ret == 0 && atomic_read(&dev_priv->mm.wedged)) | ||
3598 | ret = -EIO; | ||
3599 | } | ||
3600 | |||
3601 | if (ret == 0) | ||
3602 | queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, 0); | ||
3603 | |||
3604 | return ret; | ||
3605 | } | ||
3606 | |||
3607 | static int | ||
3608 | i915_gem_check_execbuffer(struct drm_i915_gem_execbuffer2 *exec, | ||
3609 | uint64_t exec_offset) | ||
3610 | { | ||
3611 | uint32_t exec_start, exec_len; | ||
3612 | |||
3613 | exec_start = (uint32_t) exec_offset + exec->batch_start_offset; | ||
3614 | exec_len = (uint32_t) exec->batch_len; | ||
3615 | |||
3616 | if ((exec_start | exec_len) & 0x7) | ||
3617 | return -EINVAL; | ||
3618 | |||
3619 | if (!exec_start) | ||
3620 | return -EINVAL; | ||
3621 | |||
3622 | return 0; | ||
3623 | } | ||
3624 | |||
3625 | static int | ||
3626 | validate_exec_list(struct drm_i915_gem_exec_object2 *exec, | ||
3627 | int count) | ||
3628 | { | ||
3629 | int i; | ||
3630 | |||
3631 | for (i = 0; i < count; i++) { | ||
3632 | char __user *ptr = (char __user *)(uintptr_t)exec[i].relocs_ptr; | ||
3633 | size_t length = exec[i].relocation_count * sizeof(struct drm_i915_gem_relocation_entry); | ||
3634 | |||
3635 | if (!access_ok(VERIFY_READ, ptr, length)) | ||
3636 | return -EFAULT; | ||
3637 | |||
3638 | /* we may also need to update the presumed offsets */ | ||
3639 | if (!access_ok(VERIFY_WRITE, ptr, length)) | ||
3640 | return -EFAULT; | ||
3641 | |||
3642 | if (fault_in_pages_readable(ptr, length)) | ||
3643 | return -EFAULT; | ||
3644 | } | ||
3645 | |||
3646 | return 0; | ||
3647 | } | ||
3648 | |||
3649 | static int | ||
3650 | i915_gem_do_execbuffer(struct drm_device *dev, void *data, | ||
3651 | struct drm_file *file, | ||
3652 | struct drm_i915_gem_execbuffer2 *args, | ||
3653 | struct drm_i915_gem_exec_object2 *exec_list) | ||
3654 | { | ||
3655 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
3656 | struct drm_gem_object **object_list = NULL; | ||
3657 | struct drm_gem_object *batch_obj; | ||
3658 | struct drm_i915_gem_object *obj_priv; | ||
3659 | struct drm_clip_rect *cliprects = NULL; | ||
3660 | struct drm_i915_gem_request *request = NULL; | ||
3661 | int ret, i, flips; | ||
3662 | uint64_t exec_offset; | ||
3663 | |||
3664 | struct intel_ring_buffer *ring = NULL; | ||
3665 | |||
3666 | ret = i915_gem_check_is_wedged(dev); | ||
3667 | if (ret) | ||
3668 | return ret; | ||
3669 | |||
3670 | ret = validate_exec_list(exec_list, args->buffer_count); | ||
3671 | if (ret) | ||
3672 | return ret; | ||
3673 | |||
3674 | #if WATCH_EXEC | ||
3675 | DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n", | ||
3676 | (int) args->buffers_ptr, args->buffer_count, args->batch_len); | ||
3677 | #endif | ||
3678 | switch (args->flags & I915_EXEC_RING_MASK) { | ||
3679 | case I915_EXEC_DEFAULT: | ||
3680 | case I915_EXEC_RENDER: | ||
3681 | ring = &dev_priv->render_ring; | ||
3682 | break; | ||
3683 | case I915_EXEC_BSD: | ||
3684 | if (!HAS_BSD(dev)) { | ||
3685 | DRM_ERROR("execbuf with invalid ring (BSD)\n"); | ||
3686 | return -EINVAL; | ||
3687 | } | ||
3688 | ring = &dev_priv->bsd_ring; | ||
3689 | break; | ||
3690 | case I915_EXEC_BLT: | ||
3691 | if (!HAS_BLT(dev)) { | ||
3692 | DRM_ERROR("execbuf with invalid ring (BLT)\n"); | ||
3693 | return -EINVAL; | ||
3694 | } | ||
3695 | ring = &dev_priv->blt_ring; | ||
3696 | break; | ||
3697 | default: | ||
3698 | DRM_ERROR("execbuf with unknown ring: %d\n", | ||
3699 | (int)(args->flags & I915_EXEC_RING_MASK)); | ||
3700 | return -EINVAL; | ||
3701 | } | ||
3702 | |||
3703 | if (args->buffer_count < 1) { | ||
3704 | DRM_ERROR("execbuf with %d buffers\n", args->buffer_count); | ||
3705 | return -EINVAL; | ||
3706 | } | ||
3707 | object_list = drm_malloc_ab(sizeof(*object_list), args->buffer_count); | ||
3708 | if (object_list == NULL) { | ||
3709 | DRM_ERROR("Failed to allocate object list for %d buffers\n", | ||
3710 | args->buffer_count); | ||
3711 | ret = -ENOMEM; | ||
3712 | goto pre_mutex_err; | ||
3713 | } | ||
3714 | |||
3715 | if (args->num_cliprects != 0) { | ||
3716 | cliprects = kcalloc(args->num_cliprects, sizeof(*cliprects), | ||
3717 | GFP_KERNEL); | ||
3718 | if (cliprects == NULL) { | ||
3719 | ret = -ENOMEM; | ||
3720 | goto pre_mutex_err; | ||
3721 | } | ||
3722 | |||
3723 | ret = copy_from_user(cliprects, | ||
3724 | (struct drm_clip_rect __user *) | ||
3725 | (uintptr_t) args->cliprects_ptr, | ||
3726 | sizeof(*cliprects) * args->num_cliprects); | ||
3727 | if (ret != 0) { | ||
3728 | DRM_ERROR("copy %d cliprects failed: %d\n", | ||
3729 | args->num_cliprects, ret); | ||
3730 | ret = -EFAULT; | ||
3731 | goto pre_mutex_err; | ||
3732 | } | ||
3733 | } | ||
3734 | |||
3735 | request = kzalloc(sizeof(*request), GFP_KERNEL); | ||
3736 | if (request == NULL) { | ||
3737 | ret = -ENOMEM; | ||
3738 | goto pre_mutex_err; | ||
3739 | } | ||
3740 | |||
3741 | ret = i915_mutex_lock_interruptible(dev); | ||
3742 | if (ret) | ||
3743 | goto pre_mutex_err; | ||
3744 | |||
3745 | if (dev_priv->mm.suspended) { | ||
3746 | mutex_unlock(&dev->struct_mutex); | ||
3747 | ret = -EBUSY; | ||
3748 | goto pre_mutex_err; | ||
3749 | } | ||
3750 | |||
3751 | /* Look up object handles */ | ||
3752 | for (i = 0; i < args->buffer_count; i++) { | ||
3753 | object_list[i] = drm_gem_object_lookup(dev, file, | ||
3754 | exec_list[i].handle); | ||
3755 | if (object_list[i] == NULL) { | ||
3756 | DRM_ERROR("Invalid object handle %d at index %d\n", | ||
3757 | exec_list[i].handle, i); | ||
3758 | /* prevent error path from reading uninitialized data */ | ||
3759 | args->buffer_count = i + 1; | ||
3760 | ret = -ENOENT; | ||
3761 | goto err; | ||
3762 | } | ||
3763 | |||
3764 | obj_priv = to_intel_bo(object_list[i]); | ||
3765 | if (obj_priv->in_execbuffer) { | ||
3766 | DRM_ERROR("Object %p appears more than once in object list\n", | ||
3767 | object_list[i]); | ||
3768 | /* prevent error path from reading uninitialized data */ | ||
3769 | args->buffer_count = i + 1; | ||
3770 | ret = -EINVAL; | ||
3771 | goto err; | ||
3772 | } | ||
3773 | obj_priv->in_execbuffer = true; | ||
3774 | } | ||
3775 | |||
3776 | /* Move the objects en-masse into the GTT, evicting if necessary. */ | ||
3777 | ret = i915_gem_execbuffer_pin(dev, file, | ||
3778 | object_list, exec_list, | ||
3779 | args->buffer_count); | ||
3780 | if (ret) | ||
3781 | goto err; | ||
3782 | |||
3783 | /* The objects are in their final locations, apply the relocations. */ | ||
3784 | for (i = 0; i < args->buffer_count; i++) { | ||
3785 | struct drm_i915_gem_object *obj = to_intel_bo(object_list[i]); | ||
3786 | obj->base.pending_read_domains = 0; | ||
3787 | obj->base.pending_write_domain = 0; | ||
3788 | ret = i915_gem_execbuffer_relocate(obj, file, &exec_list[i]); | ||
3789 | if (ret) | ||
3790 | goto err; | ||
3791 | } | ||
3792 | |||
3793 | /* Set the pending read domains for the batch buffer to COMMAND */ | ||
3794 | batch_obj = object_list[args->buffer_count-1]; | ||
3795 | if (batch_obj->pending_write_domain) { | ||
3796 | DRM_ERROR("Attempting to use self-modifying batch buffer\n"); | ||
3797 | ret = -EINVAL; | ||
3798 | goto err; | ||
3799 | } | ||
3800 | batch_obj->pending_read_domains |= I915_GEM_DOMAIN_COMMAND; | ||
3801 | |||
3802 | /* Sanity check the batch buffer */ | ||
3803 | exec_offset = to_intel_bo(batch_obj)->gtt_offset; | ||
3804 | ret = i915_gem_check_execbuffer(args, exec_offset); | ||
3805 | if (ret != 0) { | ||
3806 | DRM_ERROR("execbuf with invalid offset/length\n"); | ||
3807 | goto err; | ||
3808 | } | ||
3809 | |||
3810 | ret = i915_gem_execbuffer_move_to_gpu(dev, file, ring, | ||
3811 | object_list, args->buffer_count); | ||
3812 | if (ret) | ||
3813 | goto err; | ||
3814 | |||
3815 | for (i = 0; i < args->buffer_count; i++) { | ||
3816 | struct drm_gem_object *obj = object_list[i]; | ||
3817 | uint32_t old_write_domain = obj->write_domain; | ||
3818 | obj->write_domain = obj->pending_write_domain; | ||
3819 | trace_i915_gem_object_change_domain(obj, | ||
3820 | obj->read_domains, | ||
3821 | old_write_domain); | ||
3822 | } | ||
3823 | |||
3824 | #if WATCH_COHERENCY | ||
3825 | for (i = 0; i < args->buffer_count; i++) { | ||
3826 | i915_gem_object_check_coherency(object_list[i], | ||
3827 | exec_list[i].handle); | ||
3828 | } | ||
3829 | #endif | ||
3830 | |||
3831 | #if WATCH_EXEC | ||
3832 | i915_gem_dump_object(batch_obj, | ||
3833 | args->batch_len, | ||
3834 | __func__, | ||
3835 | ~0); | ||
3836 | #endif | ||
3837 | |||
3838 | /* Check for any pending flips. As we only maintain a flip queue depth | ||
3839 | * of 1, we can simply insert a WAIT for the next display flip prior | ||
3840 | * to executing the batch and avoid stalling the CPU. | ||
3841 | */ | ||
3842 | flips = 0; | ||
3843 | for (i = 0; i < args->buffer_count; i++) { | ||
3844 | if (object_list[i]->write_domain) | ||
3845 | flips |= atomic_read(&to_intel_bo(object_list[i])->pending_flip); | ||
3846 | } | ||
3847 | if (flips) { | ||
3848 | int plane, flip_mask; | ||
3849 | |||
3850 | for (plane = 0; flips >> plane; plane++) { | ||
3851 | if (((flips >> plane) & 1) == 0) | ||
3852 | continue; | ||
3853 | |||
3854 | if (plane) | ||
3855 | flip_mask = MI_WAIT_FOR_PLANE_B_FLIP; | ||
3856 | else | ||
3857 | flip_mask = MI_WAIT_FOR_PLANE_A_FLIP; | ||
3858 | |||
3859 | intel_ring_begin(dev, ring, 2); | ||
3860 | intel_ring_emit(dev, ring, | ||
3861 | MI_WAIT_FOR_EVENT | flip_mask); | ||
3862 | intel_ring_emit(dev, ring, MI_NOOP); | ||
3863 | intel_ring_advance(dev, ring); | ||
3864 | } | ||
3865 | } | ||
3866 | |||
3867 | /* Exec the batchbuffer */ | ||
3868 | ret = ring->dispatch_gem_execbuffer(dev, ring, args, | ||
3869 | cliprects, exec_offset); | ||
3870 | if (ret) { | ||
3871 | DRM_ERROR("dispatch failed %d\n", ret); | ||
3872 | goto err; | ||
3873 | } | ||
3874 | |||
3875 | /* | ||
3876 | * Ensure that the commands in the batch buffer are | ||
3877 | * finished before the interrupt fires | ||
3878 | */ | ||
3879 | i915_retire_commands(dev, ring); | ||
3880 | |||
3881 | for (i = 0; i < args->buffer_count; i++) { | ||
3882 | struct drm_gem_object *obj = object_list[i]; | ||
3883 | |||
3884 | i915_gem_object_move_to_active(obj, ring); | ||
3885 | if (obj->write_domain) | ||
3886 | list_move_tail(&to_intel_bo(obj)->gpu_write_list, | ||
3887 | &ring->gpu_write_list); | ||
3888 | } | ||
3889 | |||
3890 | i915_add_request(dev, file, request, ring); | ||
3891 | request = NULL; | ||
3892 | |||
3893 | err: | ||
3894 | for (i = 0; i < args->buffer_count; i++) { | ||
3895 | if (object_list[i]) { | ||
3896 | obj_priv = to_intel_bo(object_list[i]); | ||
3897 | obj_priv->in_execbuffer = false; | ||
3898 | } | ||
3899 | drm_gem_object_unreference(object_list[i]); | ||
3900 | } | ||
3901 | |||
3902 | mutex_unlock(&dev->struct_mutex); | ||
3903 | |||
3904 | pre_mutex_err: | ||
3905 | drm_free_large(object_list); | ||
3906 | kfree(cliprects); | ||
3907 | kfree(request); | ||
3908 | |||
3909 | return ret; | ||
3910 | } | ||
3911 | |||
3912 | /* | ||
3913 | * Legacy execbuffer just creates an exec2 list from the original exec object | ||
3914 | * list array and passes it to the real function. | ||
3915 | */ | ||
3916 | int | ||
3917 | i915_gem_execbuffer(struct drm_device *dev, void *data, | ||
3918 | struct drm_file *file_priv) | ||
3919 | { | ||
3920 | struct drm_i915_gem_execbuffer *args = data; | ||
3921 | struct drm_i915_gem_execbuffer2 exec2; | ||
3922 | struct drm_i915_gem_exec_object *exec_list = NULL; | ||
3923 | struct drm_i915_gem_exec_object2 *exec2_list = NULL; | ||
3924 | int ret, i; | ||
3925 | |||
3926 | #if WATCH_EXEC | ||
3927 | DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n", | ||
3928 | (int) args->buffers_ptr, args->buffer_count, args->batch_len); | ||
3929 | #endif | ||
3930 | |||
3931 | if (args->buffer_count < 1) { | ||
3932 | DRM_ERROR("execbuf with %d buffers\n", args->buffer_count); | ||
3933 | return -EINVAL; | ||
3934 | } | ||
3935 | |||
3936 | /* Copy in the exec list from userland */ | ||
3937 | exec_list = drm_malloc_ab(sizeof(*exec_list), args->buffer_count); | ||
3938 | exec2_list = drm_malloc_ab(sizeof(*exec2_list), args->buffer_count); | ||
3939 | if (exec_list == NULL || exec2_list == NULL) { | ||
3940 | DRM_ERROR("Failed to allocate exec list for %d buffers\n", | ||
3941 | args->buffer_count); | ||
3942 | drm_free_large(exec_list); | ||
3943 | drm_free_large(exec2_list); | ||
3944 | return -ENOMEM; | ||
3945 | } | ||
3946 | ret = copy_from_user(exec_list, | ||
3947 | (struct drm_i915_relocation_entry __user *) | ||
3948 | (uintptr_t) args->buffers_ptr, | ||
3949 | sizeof(*exec_list) * args->buffer_count); | ||
3950 | if (ret != 0) { | ||
3951 | DRM_ERROR("copy %d exec entries failed %d\n", | ||
3952 | args->buffer_count, ret); | ||
3953 | drm_free_large(exec_list); | ||
3954 | drm_free_large(exec2_list); | ||
3955 | return -EFAULT; | ||
3956 | } | ||
3957 | |||
3958 | for (i = 0; i < args->buffer_count; i++) { | ||
3959 | exec2_list[i].handle = exec_list[i].handle; | ||
3960 | exec2_list[i].relocation_count = exec_list[i].relocation_count; | ||
3961 | exec2_list[i].relocs_ptr = exec_list[i].relocs_ptr; | ||
3962 | exec2_list[i].alignment = exec_list[i].alignment; | ||
3963 | exec2_list[i].offset = exec_list[i].offset; | ||
3964 | if (INTEL_INFO(dev)->gen < 4) | ||
3965 | exec2_list[i].flags = EXEC_OBJECT_NEEDS_FENCE; | ||
3966 | else | ||
3967 | exec2_list[i].flags = 0; | ||
3968 | } | ||
3969 | 3280 | ||
3970 | exec2.buffers_ptr = args->buffers_ptr; | 3281 | if (ret == 0 && atomic_read(&dev_priv->mm.wedged)) |
3971 | exec2.buffer_count = args->buffer_count; | 3282 | ret = -EIO; |
3972 | exec2.batch_start_offset = args->batch_start_offset; | ||
3973 | exec2.batch_len = args->batch_len; | ||
3974 | exec2.DR1 = args->DR1; | ||
3975 | exec2.DR4 = args->DR4; | ||
3976 | exec2.num_cliprects = args->num_cliprects; | ||
3977 | exec2.cliprects_ptr = args->cliprects_ptr; | ||
3978 | exec2.flags = I915_EXEC_RENDER; | ||
3979 | |||
3980 | ret = i915_gem_do_execbuffer(dev, data, file_priv, &exec2, exec2_list); | ||
3981 | if (!ret) { | ||
3982 | /* Copy the new buffer offsets back to the user's exec list. */ | ||
3983 | for (i = 0; i < args->buffer_count; i++) | ||
3984 | exec_list[i].offset = exec2_list[i].offset; | ||
3985 | /* ... and back out to userspace */ | ||
3986 | ret = copy_to_user((struct drm_i915_relocation_entry __user *) | ||
3987 | (uintptr_t) args->buffers_ptr, | ||
3988 | exec_list, | ||
3989 | sizeof(*exec_list) * args->buffer_count); | ||
3990 | if (ret) { | ||
3991 | ret = -EFAULT; | ||
3992 | DRM_ERROR("failed to copy %d exec entries " | ||
3993 | "back to user (%d)\n", | ||
3994 | args->buffer_count, ret); | ||
3995 | } | 3283 | } |
3996 | } | 3284 | } |
3997 | 3285 | ||
3998 | drm_free_large(exec_list); | 3286 | if (ret == 0) |
3999 | drm_free_large(exec2_list); | 3287 | queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, 0); |
4000 | return ret; | ||
4001 | } | ||
4002 | |||
4003 | int | ||
4004 | i915_gem_execbuffer2(struct drm_device *dev, void *data, | ||
4005 | struct drm_file *file_priv) | ||
4006 | { | ||
4007 | struct drm_i915_gem_execbuffer2 *args = data; | ||
4008 | struct drm_i915_gem_exec_object2 *exec2_list = NULL; | ||
4009 | int ret; | ||
4010 | |||
4011 | #if WATCH_EXEC | ||
4012 | DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n", | ||
4013 | (int) args->buffers_ptr, args->buffer_count, args->batch_len); | ||
4014 | #endif | ||
4015 | |||
4016 | if (args->buffer_count < 1) { | ||
4017 | DRM_ERROR("execbuf2 with %d buffers\n", args->buffer_count); | ||
4018 | return -EINVAL; | ||
4019 | } | ||
4020 | |||
4021 | exec2_list = drm_malloc_ab(sizeof(*exec2_list), args->buffer_count); | ||
4022 | if (exec2_list == NULL) { | ||
4023 | DRM_ERROR("Failed to allocate exec list for %d buffers\n", | ||
4024 | args->buffer_count); | ||
4025 | return -ENOMEM; | ||
4026 | } | ||
4027 | ret = copy_from_user(exec2_list, | ||
4028 | (struct drm_i915_relocation_entry __user *) | ||
4029 | (uintptr_t) args->buffers_ptr, | ||
4030 | sizeof(*exec2_list) * args->buffer_count); | ||
4031 | if (ret != 0) { | ||
4032 | DRM_ERROR("copy %d exec entries failed %d\n", | ||
4033 | args->buffer_count, ret); | ||
4034 | drm_free_large(exec2_list); | ||
4035 | return -EFAULT; | ||
4036 | } | ||
4037 | |||
4038 | ret = i915_gem_do_execbuffer(dev, data, file_priv, args, exec2_list); | ||
4039 | if (!ret) { | ||
4040 | /* Copy the new buffer offsets back to the user's exec list. */ | ||
4041 | ret = copy_to_user((struct drm_i915_relocation_entry __user *) | ||
4042 | (uintptr_t) args->buffers_ptr, | ||
4043 | exec2_list, | ||
4044 | sizeof(*exec2_list) * args->buffer_count); | ||
4045 | if (ret) { | ||
4046 | ret = -EFAULT; | ||
4047 | DRM_ERROR("failed to copy %d exec entries " | ||
4048 | "back to user (%d)\n", | ||
4049 | args->buffer_count, ret); | ||
4050 | } | ||
4051 | } | ||
4052 | 3288 | ||
4053 | drm_free_large(exec2_list); | ||
4054 | return ret; | 3289 | return ret; |
4055 | } | 3290 | } |
4056 | 3291 | ||
4057 | int | 3292 | int |
4058 | i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment) | 3293 | i915_gem_object_pin(struct drm_i915_gem_object *obj, |
3294 | uint32_t alignment, | ||
3295 | bool map_and_fenceable) | ||
4059 | { | 3296 | { |
4060 | struct drm_device *dev = obj->dev; | 3297 | struct drm_device *dev = obj->base.dev; |
4061 | struct drm_i915_private *dev_priv = dev->dev_private; | 3298 | struct drm_i915_private *dev_priv = dev->dev_private; |
4062 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); | ||
4063 | int ret; | 3299 | int ret; |
4064 | 3300 | ||
4065 | BUG_ON(obj_priv->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT); | 3301 | BUG_ON(obj->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT); |
4066 | WARN_ON(i915_verify_lists(dev)); | 3302 | WARN_ON(i915_verify_lists(dev)); |
4067 | 3303 | ||
4068 | if (obj_priv->gtt_space != NULL) { | 3304 | if (obj->gtt_space != NULL) { |
4069 | if (alignment == 0) | 3305 | if ((alignment && obj->gtt_offset & (alignment - 1)) || |
4070 | alignment = i915_gem_get_gtt_alignment(obj); | 3306 | (map_and_fenceable && !obj->map_and_fenceable)) { |
4071 | if (obj_priv->gtt_offset & (alignment - 1)) { | 3307 | WARN(obj->pin_count, |
4072 | WARN(obj_priv->pin_count, | 3308 | "bo is already pinned with incorrect alignment:" |
4073 | "bo is already pinned with incorrect alignment: offset=%x, req.alignment=%x\n", | 3309 | " offset=%x, req.alignment=%x, req.map_and_fenceable=%d," |
4074 | obj_priv->gtt_offset, alignment); | 3310 | " obj->map_and_fenceable=%d\n", |
3311 | obj->gtt_offset, alignment, | ||
3312 | map_and_fenceable, | ||
3313 | obj->map_and_fenceable); | ||
4075 | ret = i915_gem_object_unbind(obj); | 3314 | ret = i915_gem_object_unbind(obj); |
4076 | if (ret) | 3315 | if (ret) |
4077 | return ret; | 3316 | return ret; |
4078 | } | 3317 | } |
4079 | } | 3318 | } |
4080 | 3319 | ||
4081 | if (obj_priv->gtt_space == NULL) { | 3320 | if (obj->gtt_space == NULL) { |
4082 | ret = i915_gem_object_bind_to_gtt(obj, alignment); | 3321 | ret = i915_gem_object_bind_to_gtt(obj, alignment, |
3322 | map_and_fenceable); | ||
4083 | if (ret) | 3323 | if (ret) |
4084 | return ret; | 3324 | return ret; |
4085 | } | 3325 | } |
4086 | 3326 | ||
4087 | obj_priv->pin_count++; | 3327 | if (obj->pin_count++ == 0) { |
4088 | 3328 | if (!obj->active) | |
4089 | /* If the object is not active and not pending a flush, | 3329 | list_move_tail(&obj->mm_list, |
4090 | * remove it from the inactive list | ||
4091 | */ | ||
4092 | if (obj_priv->pin_count == 1) { | ||
4093 | i915_gem_info_add_pin(dev_priv, obj->size); | ||
4094 | if (!obj_priv->active) | ||
4095 | list_move_tail(&obj_priv->mm_list, | ||
4096 | &dev_priv->mm.pinned_list); | 3330 | &dev_priv->mm.pinned_list); |
4097 | } | 3331 | } |
3332 | obj->pin_mappable |= map_and_fenceable; | ||
4098 | 3333 | ||
4099 | WARN_ON(i915_verify_lists(dev)); | 3334 | WARN_ON(i915_verify_lists(dev)); |
4100 | return 0; | 3335 | return 0; |
4101 | } | 3336 | } |
4102 | 3337 | ||
4103 | void | 3338 | void |
4104 | i915_gem_object_unpin(struct drm_gem_object *obj) | 3339 | i915_gem_object_unpin(struct drm_i915_gem_object *obj) |
4105 | { | 3340 | { |
4106 | struct drm_device *dev = obj->dev; | 3341 | struct drm_device *dev = obj->base.dev; |
4107 | drm_i915_private_t *dev_priv = dev->dev_private; | 3342 | drm_i915_private_t *dev_priv = dev->dev_private; |
4108 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); | ||
4109 | 3343 | ||
4110 | WARN_ON(i915_verify_lists(dev)); | 3344 | WARN_ON(i915_verify_lists(dev)); |
4111 | obj_priv->pin_count--; | 3345 | BUG_ON(obj->pin_count == 0); |
4112 | BUG_ON(obj_priv->pin_count < 0); | 3346 | BUG_ON(obj->gtt_space == NULL); |
4113 | BUG_ON(obj_priv->gtt_space == NULL); | ||
4114 | 3347 | ||
4115 | /* If the object is no longer pinned, and is | 3348 | if (--obj->pin_count == 0) { |
4116 | * neither active nor being flushed, then stick it on | 3349 | if (!obj->active) |
4117 | * the inactive list | 3350 | list_move_tail(&obj->mm_list, |
4118 | */ | ||
4119 | if (obj_priv->pin_count == 0) { | ||
4120 | if (!obj_priv->active) | ||
4121 | list_move_tail(&obj_priv->mm_list, | ||
4122 | &dev_priv->mm.inactive_list); | 3351 | &dev_priv->mm.inactive_list); |
4123 | i915_gem_info_remove_pin(dev_priv, obj->size); | 3352 | obj->pin_mappable = false; |
4124 | } | 3353 | } |
4125 | WARN_ON(i915_verify_lists(dev)); | 3354 | WARN_ON(i915_verify_lists(dev)); |
4126 | } | 3355 | } |
4127 | 3356 | ||
4128 | int | 3357 | int |
4129 | i915_gem_pin_ioctl(struct drm_device *dev, void *data, | 3358 | i915_gem_pin_ioctl(struct drm_device *dev, void *data, |
4130 | struct drm_file *file_priv) | 3359 | struct drm_file *file) |
4131 | { | 3360 | { |
4132 | struct drm_i915_gem_pin *args = data; | 3361 | struct drm_i915_gem_pin *args = data; |
4133 | struct drm_gem_object *obj; | 3362 | struct drm_i915_gem_object *obj; |
4134 | struct drm_i915_gem_object *obj_priv; | ||
4135 | int ret; | 3363 | int ret; |
4136 | 3364 | ||
4137 | ret = i915_mutex_lock_interruptible(dev); | 3365 | ret = i915_mutex_lock_interruptible(dev); |
4138 | if (ret) | 3366 | if (ret) |
4139 | return ret; | 3367 | return ret; |
4140 | 3368 | ||
4141 | obj = drm_gem_object_lookup(dev, file_priv, args->handle); | 3369 | obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle)); |
4142 | if (obj == NULL) { | 3370 | if (obj == NULL) { |
4143 | ret = -ENOENT; | 3371 | ret = -ENOENT; |
4144 | goto unlock; | 3372 | goto unlock; |
4145 | } | 3373 | } |
4146 | obj_priv = to_intel_bo(obj); | ||
4147 | 3374 | ||
4148 | if (obj_priv->madv != I915_MADV_WILLNEED) { | 3375 | if (obj->madv != I915_MADV_WILLNEED) { |
4149 | DRM_ERROR("Attempting to pin a purgeable buffer\n"); | 3376 | DRM_ERROR("Attempting to pin a purgeable buffer\n"); |
4150 | ret = -EINVAL; | 3377 | ret = -EINVAL; |
4151 | goto out; | 3378 | goto out; |
4152 | } | 3379 | } |
4153 | 3380 | ||
4154 | if (obj_priv->pin_filp != NULL && obj_priv->pin_filp != file_priv) { | 3381 | if (obj->pin_filp != NULL && obj->pin_filp != file) { |
4155 | DRM_ERROR("Already pinned in i915_gem_pin_ioctl(): %d\n", | 3382 | DRM_ERROR("Already pinned in i915_gem_pin_ioctl(): %d\n", |
4156 | args->handle); | 3383 | args->handle); |
4157 | ret = -EINVAL; | 3384 | ret = -EINVAL; |
4158 | goto out; | 3385 | goto out; |
4159 | } | 3386 | } |
4160 | 3387 | ||
4161 | obj_priv->user_pin_count++; | 3388 | obj->user_pin_count++; |
4162 | obj_priv->pin_filp = file_priv; | 3389 | obj->pin_filp = file; |
4163 | if (obj_priv->user_pin_count == 1) { | 3390 | if (obj->user_pin_count == 1) { |
4164 | ret = i915_gem_object_pin(obj, args->alignment); | 3391 | ret = i915_gem_object_pin(obj, args->alignment, true); |
4165 | if (ret) | 3392 | if (ret) |
4166 | goto out; | 3393 | goto out; |
4167 | } | 3394 | } |
@@ -4170,9 +3397,9 @@ i915_gem_pin_ioctl(struct drm_device *dev, void *data, | |||
4170 | * as the X server doesn't manage domains yet | 3397 | * as the X server doesn't manage domains yet |
4171 | */ | 3398 | */ |
4172 | i915_gem_object_flush_cpu_write_domain(obj); | 3399 | i915_gem_object_flush_cpu_write_domain(obj); |
4173 | args->offset = obj_priv->gtt_offset; | 3400 | args->offset = obj->gtt_offset; |
4174 | out: | 3401 | out: |
4175 | drm_gem_object_unreference(obj); | 3402 | drm_gem_object_unreference(&obj->base); |
4176 | unlock: | 3403 | unlock: |
4177 | mutex_unlock(&dev->struct_mutex); | 3404 | mutex_unlock(&dev->struct_mutex); |
4178 | return ret; | 3405 | return ret; |
@@ -4180,38 +3407,36 @@ unlock: | |||
4180 | 3407 | ||
4181 | int | 3408 | int |
4182 | i915_gem_unpin_ioctl(struct drm_device *dev, void *data, | 3409 | i915_gem_unpin_ioctl(struct drm_device *dev, void *data, |
4183 | struct drm_file *file_priv) | 3410 | struct drm_file *file) |
4184 | { | 3411 | { |
4185 | struct drm_i915_gem_pin *args = data; | 3412 | struct drm_i915_gem_pin *args = data; |
4186 | struct drm_gem_object *obj; | 3413 | struct drm_i915_gem_object *obj; |
4187 | struct drm_i915_gem_object *obj_priv; | ||
4188 | int ret; | 3414 | int ret; |
4189 | 3415 | ||
4190 | ret = i915_mutex_lock_interruptible(dev); | 3416 | ret = i915_mutex_lock_interruptible(dev); |
4191 | if (ret) | 3417 | if (ret) |
4192 | return ret; | 3418 | return ret; |
4193 | 3419 | ||
4194 | obj = drm_gem_object_lookup(dev, file_priv, args->handle); | 3420 | obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle)); |
4195 | if (obj == NULL) { | 3421 | if (obj == NULL) { |
4196 | ret = -ENOENT; | 3422 | ret = -ENOENT; |
4197 | goto unlock; | 3423 | goto unlock; |
4198 | } | 3424 | } |
4199 | obj_priv = to_intel_bo(obj); | ||
4200 | 3425 | ||
4201 | if (obj_priv->pin_filp != file_priv) { | 3426 | if (obj->pin_filp != file) { |
4202 | DRM_ERROR("Not pinned by caller in i915_gem_pin_ioctl(): %d\n", | 3427 | DRM_ERROR("Not pinned by caller in i915_gem_pin_ioctl(): %d\n", |
4203 | args->handle); | 3428 | args->handle); |
4204 | ret = -EINVAL; | 3429 | ret = -EINVAL; |
4205 | goto out; | 3430 | goto out; |
4206 | } | 3431 | } |
4207 | obj_priv->user_pin_count--; | 3432 | obj->user_pin_count--; |
4208 | if (obj_priv->user_pin_count == 0) { | 3433 | if (obj->user_pin_count == 0) { |
4209 | obj_priv->pin_filp = NULL; | 3434 | obj->pin_filp = NULL; |
4210 | i915_gem_object_unpin(obj); | 3435 | i915_gem_object_unpin(obj); |
4211 | } | 3436 | } |
4212 | 3437 | ||
4213 | out: | 3438 | out: |
4214 | drm_gem_object_unreference(obj); | 3439 | drm_gem_object_unreference(&obj->base); |
4215 | unlock: | 3440 | unlock: |
4216 | mutex_unlock(&dev->struct_mutex); | 3441 | mutex_unlock(&dev->struct_mutex); |
4217 | return ret; | 3442 | return ret; |
@@ -4219,52 +3444,64 @@ unlock: | |||
4219 | 3444 | ||
4220 | int | 3445 | int |
4221 | i915_gem_busy_ioctl(struct drm_device *dev, void *data, | 3446 | i915_gem_busy_ioctl(struct drm_device *dev, void *data, |
4222 | struct drm_file *file_priv) | 3447 | struct drm_file *file) |
4223 | { | 3448 | { |
4224 | struct drm_i915_gem_busy *args = data; | 3449 | struct drm_i915_gem_busy *args = data; |
4225 | struct drm_gem_object *obj; | 3450 | struct drm_i915_gem_object *obj; |
4226 | struct drm_i915_gem_object *obj_priv; | ||
4227 | int ret; | 3451 | int ret; |
4228 | 3452 | ||
4229 | ret = i915_mutex_lock_interruptible(dev); | 3453 | ret = i915_mutex_lock_interruptible(dev); |
4230 | if (ret) | 3454 | if (ret) |
4231 | return ret; | 3455 | return ret; |
4232 | 3456 | ||
4233 | obj = drm_gem_object_lookup(dev, file_priv, args->handle); | 3457 | obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle)); |
4234 | if (obj == NULL) { | 3458 | if (obj == NULL) { |
4235 | ret = -ENOENT; | 3459 | ret = -ENOENT; |
4236 | goto unlock; | 3460 | goto unlock; |
4237 | } | 3461 | } |
4238 | obj_priv = to_intel_bo(obj); | ||
4239 | 3462 | ||
4240 | /* Count all active objects as busy, even if they are currently not used | 3463 | /* Count all active objects as busy, even if they are currently not used |
4241 | * by the gpu. Users of this interface expect objects to eventually | 3464 | * by the gpu. Users of this interface expect objects to eventually |
4242 | * become non-busy without any further actions, therefore emit any | 3465 | * become non-busy without any further actions, therefore emit any |
4243 | * necessary flushes here. | 3466 | * necessary flushes here. |
4244 | */ | 3467 | */ |
4245 | args->busy = obj_priv->active; | 3468 | args->busy = obj->active; |
4246 | if (args->busy) { | 3469 | if (args->busy) { |
4247 | /* Unconditionally flush objects, even when the gpu still uses this | 3470 | /* Unconditionally flush objects, even when the gpu still uses this |
4248 | * object. Userspace calling this function indicates that it wants to | 3471 | * object. Userspace calling this function indicates that it wants to |
4249 | * use this buffer rather sooner than later, so issuing the required | 3472 | * use this buffer rather sooner than later, so issuing the required |
4250 | * flush earlier is beneficial. | 3473 | * flush earlier is beneficial. |
4251 | */ | 3474 | */ |
4252 | if (obj->write_domain & I915_GEM_GPU_DOMAINS) | 3475 | if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) { |
4253 | i915_gem_flush_ring(dev, file_priv, | 3476 | ret = i915_gem_flush_ring(dev, obj->ring, |
4254 | obj_priv->ring, | 3477 | 0, obj->base.write_domain); |
4255 | 0, obj->write_domain); | 3478 | } else if (obj->ring->outstanding_lazy_request == |
3479 | obj->last_rendering_seqno) { | ||
3480 | struct drm_i915_gem_request *request; | ||
3481 | |||
3482 | /* This ring is not being cleared by active usage, | ||
3483 | * so emit a request to do so. | ||
3484 | */ | ||
3485 | request = kzalloc(sizeof(*request), GFP_KERNEL); | ||
3486 | if (request) | ||
3487 | ret = i915_add_request(dev, | ||
3488 | NULL, request, | ||
3489 | obj->ring); | ||
3490 | else | ||
3491 | ret = -ENOMEM; | ||
3492 | } | ||
4256 | 3493 | ||
4257 | /* Update the active list for the hardware's current position. | 3494 | /* Update the active list for the hardware's current position. |
4258 | * Otherwise this only updates on a delayed timer or when irqs | 3495 | * Otherwise this only updates on a delayed timer or when irqs |
4259 | * are actually unmasked, and our working set ends up being | 3496 | * are actually unmasked, and our working set ends up being |
4260 | * larger than required. | 3497 | * larger than required. |
4261 | */ | 3498 | */ |
4262 | i915_gem_retire_requests_ring(dev, obj_priv->ring); | 3499 | i915_gem_retire_requests_ring(dev, obj->ring); |
4263 | 3500 | ||
4264 | args->busy = obj_priv->active; | 3501 | args->busy = obj->active; |
4265 | } | 3502 | } |
4266 | 3503 | ||
4267 | drm_gem_object_unreference(obj); | 3504 | drm_gem_object_unreference(&obj->base); |
4268 | unlock: | 3505 | unlock: |
4269 | mutex_unlock(&dev->struct_mutex); | 3506 | mutex_unlock(&dev->struct_mutex); |
4270 | return ret; | 3507 | return ret; |
@@ -4282,8 +3519,7 @@ i915_gem_madvise_ioctl(struct drm_device *dev, void *data, | |||
4282 | struct drm_file *file_priv) | 3519 | struct drm_file *file_priv) |
4283 | { | 3520 | { |
4284 | struct drm_i915_gem_madvise *args = data; | 3521 | struct drm_i915_gem_madvise *args = data; |
4285 | struct drm_gem_object *obj; | 3522 | struct drm_i915_gem_object *obj; |
4286 | struct drm_i915_gem_object *obj_priv; | ||
4287 | int ret; | 3523 | int ret; |
4288 | 3524 | ||
4289 | switch (args->madv) { | 3525 | switch (args->madv) { |
@@ -4298,37 +3534,36 @@ i915_gem_madvise_ioctl(struct drm_device *dev, void *data, | |||
4298 | if (ret) | 3534 | if (ret) |
4299 | return ret; | 3535 | return ret; |
4300 | 3536 | ||
4301 | obj = drm_gem_object_lookup(dev, file_priv, args->handle); | 3537 | obj = to_intel_bo(drm_gem_object_lookup(dev, file_priv, args->handle)); |
4302 | if (obj == NULL) { | 3538 | if (obj == NULL) { |
4303 | ret = -ENOENT; | 3539 | ret = -ENOENT; |
4304 | goto unlock; | 3540 | goto unlock; |
4305 | } | 3541 | } |
4306 | obj_priv = to_intel_bo(obj); | ||
4307 | 3542 | ||
4308 | if (obj_priv->pin_count) { | 3543 | if (obj->pin_count) { |
4309 | ret = -EINVAL; | 3544 | ret = -EINVAL; |
4310 | goto out; | 3545 | goto out; |
4311 | } | 3546 | } |
4312 | 3547 | ||
4313 | if (obj_priv->madv != __I915_MADV_PURGED) | 3548 | if (obj->madv != __I915_MADV_PURGED) |
4314 | obj_priv->madv = args->madv; | 3549 | obj->madv = args->madv; |
4315 | 3550 | ||
4316 | /* if the object is no longer bound, discard its backing storage */ | 3551 | /* if the object is no longer bound, discard its backing storage */ |
4317 | if (i915_gem_object_is_purgeable(obj_priv) && | 3552 | if (i915_gem_object_is_purgeable(obj) && |
4318 | obj_priv->gtt_space == NULL) | 3553 | obj->gtt_space == NULL) |
4319 | i915_gem_object_truncate(obj); | 3554 | i915_gem_object_truncate(obj); |
4320 | 3555 | ||
4321 | args->retained = obj_priv->madv != __I915_MADV_PURGED; | 3556 | args->retained = obj->madv != __I915_MADV_PURGED; |
4322 | 3557 | ||
4323 | out: | 3558 | out: |
4324 | drm_gem_object_unreference(obj); | 3559 | drm_gem_object_unreference(&obj->base); |
4325 | unlock: | 3560 | unlock: |
4326 | mutex_unlock(&dev->struct_mutex); | 3561 | mutex_unlock(&dev->struct_mutex); |
4327 | return ret; | 3562 | return ret; |
4328 | } | 3563 | } |
4329 | 3564 | ||
4330 | struct drm_gem_object * i915_gem_alloc_object(struct drm_device *dev, | 3565 | struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev, |
4331 | size_t size) | 3566 | size_t size) |
4332 | { | 3567 | { |
4333 | struct drm_i915_private *dev_priv = dev->dev_private; | 3568 | struct drm_i915_private *dev_priv = dev->dev_private; |
4334 | struct drm_i915_gem_object *obj; | 3569 | struct drm_i915_gem_object *obj; |
@@ -4351,11 +3586,15 @@ struct drm_gem_object * i915_gem_alloc_object(struct drm_device *dev, | |||
4351 | obj->base.driver_private = NULL; | 3586 | obj->base.driver_private = NULL; |
4352 | obj->fence_reg = I915_FENCE_REG_NONE; | 3587 | obj->fence_reg = I915_FENCE_REG_NONE; |
4353 | INIT_LIST_HEAD(&obj->mm_list); | 3588 | INIT_LIST_HEAD(&obj->mm_list); |
3589 | INIT_LIST_HEAD(&obj->gtt_list); | ||
4354 | INIT_LIST_HEAD(&obj->ring_list); | 3590 | INIT_LIST_HEAD(&obj->ring_list); |
3591 | INIT_LIST_HEAD(&obj->exec_list); | ||
4355 | INIT_LIST_HEAD(&obj->gpu_write_list); | 3592 | INIT_LIST_HEAD(&obj->gpu_write_list); |
4356 | obj->madv = I915_MADV_WILLNEED; | 3593 | obj->madv = I915_MADV_WILLNEED; |
3594 | /* Avoid an unnecessary call to unbind on the first bind. */ | ||
3595 | obj->map_and_fenceable = true; | ||
4357 | 3596 | ||
4358 | return &obj->base; | 3597 | return obj; |
4359 | } | 3598 | } |
4360 | 3599 | ||
4361 | int i915_gem_init_object(struct drm_gem_object *obj) | 3600 | int i915_gem_init_object(struct drm_gem_object *obj) |
@@ -4365,42 +3604,41 @@ int i915_gem_init_object(struct drm_gem_object *obj) | |||
4365 | return 0; | 3604 | return 0; |
4366 | } | 3605 | } |
4367 | 3606 | ||
4368 | static void i915_gem_free_object_tail(struct drm_gem_object *obj) | 3607 | static void i915_gem_free_object_tail(struct drm_i915_gem_object *obj) |
4369 | { | 3608 | { |
4370 | struct drm_device *dev = obj->dev; | 3609 | struct drm_device *dev = obj->base.dev; |
4371 | drm_i915_private_t *dev_priv = dev->dev_private; | 3610 | drm_i915_private_t *dev_priv = dev->dev_private; |
4372 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); | ||
4373 | int ret; | 3611 | int ret; |
4374 | 3612 | ||
4375 | ret = i915_gem_object_unbind(obj); | 3613 | ret = i915_gem_object_unbind(obj); |
4376 | if (ret == -ERESTARTSYS) { | 3614 | if (ret == -ERESTARTSYS) { |
4377 | list_move(&obj_priv->mm_list, | 3615 | list_move(&obj->mm_list, |
4378 | &dev_priv->mm.deferred_free_list); | 3616 | &dev_priv->mm.deferred_free_list); |
4379 | return; | 3617 | return; |
4380 | } | 3618 | } |
4381 | 3619 | ||
4382 | if (obj_priv->mmap_offset) | 3620 | if (obj->base.map_list.map) |
4383 | i915_gem_free_mmap_offset(obj); | 3621 | i915_gem_free_mmap_offset(obj); |
4384 | 3622 | ||
4385 | drm_gem_object_release(obj); | 3623 | drm_gem_object_release(&obj->base); |
4386 | i915_gem_info_remove_obj(dev_priv, obj->size); | 3624 | i915_gem_info_remove_obj(dev_priv, obj->base.size); |
4387 | 3625 | ||
4388 | kfree(obj_priv->page_cpu_valid); | 3626 | kfree(obj->page_cpu_valid); |
4389 | kfree(obj_priv->bit_17); | 3627 | kfree(obj->bit_17); |
4390 | kfree(obj_priv); | 3628 | kfree(obj); |
4391 | } | 3629 | } |
4392 | 3630 | ||
4393 | void i915_gem_free_object(struct drm_gem_object *obj) | 3631 | void i915_gem_free_object(struct drm_gem_object *gem_obj) |
4394 | { | 3632 | { |
4395 | struct drm_device *dev = obj->dev; | 3633 | struct drm_i915_gem_object *obj = to_intel_bo(gem_obj); |
4396 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); | 3634 | struct drm_device *dev = obj->base.dev; |
4397 | 3635 | ||
4398 | trace_i915_gem_object_destroy(obj); | 3636 | trace_i915_gem_object_destroy(obj); |
4399 | 3637 | ||
4400 | while (obj_priv->pin_count > 0) | 3638 | while (obj->pin_count > 0) |
4401 | i915_gem_object_unpin(obj); | 3639 | i915_gem_object_unpin(obj); |
4402 | 3640 | ||
4403 | if (obj_priv->phys_obj) | 3641 | if (obj->phys_obj) |
4404 | i915_gem_detach_phys_object(dev, obj); | 3642 | i915_gem_detach_phys_object(dev, obj); |
4405 | 3643 | ||
4406 | i915_gem_free_object_tail(obj); | 3644 | i915_gem_free_object_tail(obj); |
@@ -4427,13 +3665,15 @@ i915_gem_idle(struct drm_device *dev) | |||
4427 | 3665 | ||
4428 | /* Under UMS, be paranoid and evict. */ | 3666 | /* Under UMS, be paranoid and evict. */ |
4429 | if (!drm_core_check_feature(dev, DRIVER_MODESET)) { | 3667 | if (!drm_core_check_feature(dev, DRIVER_MODESET)) { |
4430 | ret = i915_gem_evict_inactive(dev); | 3668 | ret = i915_gem_evict_inactive(dev, false); |
4431 | if (ret) { | 3669 | if (ret) { |
4432 | mutex_unlock(&dev->struct_mutex); | 3670 | mutex_unlock(&dev->struct_mutex); |
4433 | return ret; | 3671 | return ret; |
4434 | } | 3672 | } |
4435 | } | 3673 | } |
4436 | 3674 | ||
3675 | i915_gem_reset_fences(dev); | ||
3676 | |||
4437 | /* Hack! Don't let anybody do execbuf while we don't control the chip. | 3677 | /* Hack! Don't let anybody do execbuf while we don't control the chip. |
4438 | * We need to replace this with a semaphore, or something. | 3678 | * We need to replace this with a semaphore, or something. |
4439 | * And not confound mm.suspended! | 3679 | * And not confound mm.suspended! |
@@ -4452,82 +3692,15 @@ i915_gem_idle(struct drm_device *dev) | |||
4452 | return 0; | 3692 | return 0; |
4453 | } | 3693 | } |
4454 | 3694 | ||
4455 | /* | ||
4456 | * 965+ support PIPE_CONTROL commands, which provide finer grained control | ||
4457 | * over cache flushing. | ||
4458 | */ | ||
4459 | static int | ||
4460 | i915_gem_init_pipe_control(struct drm_device *dev) | ||
4461 | { | ||
4462 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
4463 | struct drm_gem_object *obj; | ||
4464 | struct drm_i915_gem_object *obj_priv; | ||
4465 | int ret; | ||
4466 | |||
4467 | obj = i915_gem_alloc_object(dev, 4096); | ||
4468 | if (obj == NULL) { | ||
4469 | DRM_ERROR("Failed to allocate seqno page\n"); | ||
4470 | ret = -ENOMEM; | ||
4471 | goto err; | ||
4472 | } | ||
4473 | obj_priv = to_intel_bo(obj); | ||
4474 | obj_priv->agp_type = AGP_USER_CACHED_MEMORY; | ||
4475 | |||
4476 | ret = i915_gem_object_pin(obj, 4096); | ||
4477 | if (ret) | ||
4478 | goto err_unref; | ||
4479 | |||
4480 | dev_priv->seqno_gfx_addr = obj_priv->gtt_offset; | ||
4481 | dev_priv->seqno_page = kmap(obj_priv->pages[0]); | ||
4482 | if (dev_priv->seqno_page == NULL) | ||
4483 | goto err_unpin; | ||
4484 | |||
4485 | dev_priv->seqno_obj = obj; | ||
4486 | memset(dev_priv->seqno_page, 0, PAGE_SIZE); | ||
4487 | |||
4488 | return 0; | ||
4489 | |||
4490 | err_unpin: | ||
4491 | i915_gem_object_unpin(obj); | ||
4492 | err_unref: | ||
4493 | drm_gem_object_unreference(obj); | ||
4494 | err: | ||
4495 | return ret; | ||
4496 | } | ||
4497 | |||
4498 | |||
4499 | static void | ||
4500 | i915_gem_cleanup_pipe_control(struct drm_device *dev) | ||
4501 | { | ||
4502 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
4503 | struct drm_gem_object *obj; | ||
4504 | struct drm_i915_gem_object *obj_priv; | ||
4505 | |||
4506 | obj = dev_priv->seqno_obj; | ||
4507 | obj_priv = to_intel_bo(obj); | ||
4508 | kunmap(obj_priv->pages[0]); | ||
4509 | i915_gem_object_unpin(obj); | ||
4510 | drm_gem_object_unreference(obj); | ||
4511 | dev_priv->seqno_obj = NULL; | ||
4512 | |||
4513 | dev_priv->seqno_page = NULL; | ||
4514 | } | ||
4515 | |||
4516 | int | 3695 | int |
4517 | i915_gem_init_ringbuffer(struct drm_device *dev) | 3696 | i915_gem_init_ringbuffer(struct drm_device *dev) |
4518 | { | 3697 | { |
4519 | drm_i915_private_t *dev_priv = dev->dev_private; | 3698 | drm_i915_private_t *dev_priv = dev->dev_private; |
4520 | int ret; | 3699 | int ret; |
4521 | 3700 | ||
4522 | if (HAS_PIPE_CONTROL(dev)) { | ||
4523 | ret = i915_gem_init_pipe_control(dev); | ||
4524 | if (ret) | ||
4525 | return ret; | ||
4526 | } | ||
4527 | |||
4528 | ret = intel_init_render_ring_buffer(dev); | 3701 | ret = intel_init_render_ring_buffer(dev); |
4529 | if (ret) | 3702 | if (ret) |
4530 | goto cleanup_pipe_control; | 3703 | return ret; |
4531 | 3704 | ||
4532 | if (HAS_BSD(dev)) { | 3705 | if (HAS_BSD(dev)) { |
4533 | ret = intel_init_bsd_ring_buffer(dev); | 3706 | ret = intel_init_bsd_ring_buffer(dev); |
@@ -4546,12 +3719,9 @@ i915_gem_init_ringbuffer(struct drm_device *dev) | |||
4546 | return 0; | 3719 | return 0; |
4547 | 3720 | ||
4548 | cleanup_bsd_ring: | 3721 | cleanup_bsd_ring: |
4549 | intel_cleanup_ring_buffer(dev, &dev_priv->bsd_ring); | 3722 | intel_cleanup_ring_buffer(&dev_priv->ring[VCS]); |
4550 | cleanup_render_ring: | 3723 | cleanup_render_ring: |
4551 | intel_cleanup_ring_buffer(dev, &dev_priv->render_ring); | 3724 | intel_cleanup_ring_buffer(&dev_priv->ring[RCS]); |
4552 | cleanup_pipe_control: | ||
4553 | if (HAS_PIPE_CONTROL(dev)) | ||
4554 | i915_gem_cleanup_pipe_control(dev); | ||
4555 | return ret; | 3725 | return ret; |
4556 | } | 3726 | } |
4557 | 3727 | ||
@@ -4559,12 +3729,10 @@ void | |||
4559 | i915_gem_cleanup_ringbuffer(struct drm_device *dev) | 3729 | i915_gem_cleanup_ringbuffer(struct drm_device *dev) |
4560 | { | 3730 | { |
4561 | drm_i915_private_t *dev_priv = dev->dev_private; | 3731 | drm_i915_private_t *dev_priv = dev->dev_private; |
3732 | int i; | ||
4562 | 3733 | ||
4563 | intel_cleanup_ring_buffer(dev, &dev_priv->render_ring); | 3734 | for (i = 0; i < I915_NUM_RINGS; i++) |
4564 | intel_cleanup_ring_buffer(dev, &dev_priv->bsd_ring); | 3735 | intel_cleanup_ring_buffer(&dev_priv->ring[i]); |
4565 | intel_cleanup_ring_buffer(dev, &dev_priv->blt_ring); | ||
4566 | if (HAS_PIPE_CONTROL(dev)) | ||
4567 | i915_gem_cleanup_pipe_control(dev); | ||
4568 | } | 3736 | } |
4569 | 3737 | ||
4570 | int | 3738 | int |
@@ -4572,7 +3740,7 @@ i915_gem_entervt_ioctl(struct drm_device *dev, void *data, | |||
4572 | struct drm_file *file_priv) | 3740 | struct drm_file *file_priv) |
4573 | { | 3741 | { |
4574 | drm_i915_private_t *dev_priv = dev->dev_private; | 3742 | drm_i915_private_t *dev_priv = dev->dev_private; |
4575 | int ret; | 3743 | int ret, i; |
4576 | 3744 | ||
4577 | if (drm_core_check_feature(dev, DRIVER_MODESET)) | 3745 | if (drm_core_check_feature(dev, DRIVER_MODESET)) |
4578 | return 0; | 3746 | return 0; |
@@ -4592,14 +3760,12 @@ i915_gem_entervt_ioctl(struct drm_device *dev, void *data, | |||
4592 | } | 3760 | } |
4593 | 3761 | ||
4594 | BUG_ON(!list_empty(&dev_priv->mm.active_list)); | 3762 | BUG_ON(!list_empty(&dev_priv->mm.active_list)); |
4595 | BUG_ON(!list_empty(&dev_priv->render_ring.active_list)); | ||
4596 | BUG_ON(!list_empty(&dev_priv->bsd_ring.active_list)); | ||
4597 | BUG_ON(!list_empty(&dev_priv->blt_ring.active_list)); | ||
4598 | BUG_ON(!list_empty(&dev_priv->mm.flushing_list)); | 3763 | BUG_ON(!list_empty(&dev_priv->mm.flushing_list)); |
4599 | BUG_ON(!list_empty(&dev_priv->mm.inactive_list)); | 3764 | BUG_ON(!list_empty(&dev_priv->mm.inactive_list)); |
4600 | BUG_ON(!list_empty(&dev_priv->render_ring.request_list)); | 3765 | for (i = 0; i < I915_NUM_RINGS; i++) { |
4601 | BUG_ON(!list_empty(&dev_priv->bsd_ring.request_list)); | 3766 | BUG_ON(!list_empty(&dev_priv->ring[i].active_list)); |
4602 | BUG_ON(!list_empty(&dev_priv->blt_ring.request_list)); | 3767 | BUG_ON(!list_empty(&dev_priv->ring[i].request_list)); |
3768 | } | ||
4603 | mutex_unlock(&dev->struct_mutex); | 3769 | mutex_unlock(&dev->struct_mutex); |
4604 | 3770 | ||
4605 | ret = drm_irq_install(dev); | 3771 | ret = drm_irq_install(dev); |
@@ -4661,17 +3827,14 @@ i915_gem_load(struct drm_device *dev) | |||
4661 | INIT_LIST_HEAD(&dev_priv->mm.pinned_list); | 3827 | INIT_LIST_HEAD(&dev_priv->mm.pinned_list); |
4662 | INIT_LIST_HEAD(&dev_priv->mm.fence_list); | 3828 | INIT_LIST_HEAD(&dev_priv->mm.fence_list); |
4663 | INIT_LIST_HEAD(&dev_priv->mm.deferred_free_list); | 3829 | INIT_LIST_HEAD(&dev_priv->mm.deferred_free_list); |
4664 | init_ring_lists(&dev_priv->render_ring); | 3830 | INIT_LIST_HEAD(&dev_priv->mm.gtt_list); |
4665 | init_ring_lists(&dev_priv->bsd_ring); | 3831 | for (i = 0; i < I915_NUM_RINGS; i++) |
4666 | init_ring_lists(&dev_priv->blt_ring); | 3832 | init_ring_lists(&dev_priv->ring[i]); |
4667 | for (i = 0; i < 16; i++) | 3833 | for (i = 0; i < 16; i++) |
4668 | INIT_LIST_HEAD(&dev_priv->fence_regs[i].lru_list); | 3834 | INIT_LIST_HEAD(&dev_priv->fence_regs[i].lru_list); |
4669 | INIT_DELAYED_WORK(&dev_priv->mm.retire_work, | 3835 | INIT_DELAYED_WORK(&dev_priv->mm.retire_work, |
4670 | i915_gem_retire_work_handler); | 3836 | i915_gem_retire_work_handler); |
4671 | init_completion(&dev_priv->error_completion); | 3837 | init_completion(&dev_priv->error_completion); |
4672 | spin_lock(&shrink_list_lock); | ||
4673 | list_add(&dev_priv->mm.shrink_list, &shrink_list); | ||
4674 | spin_unlock(&shrink_list_lock); | ||
4675 | 3838 | ||
4676 | /* On GEN3 we really need to make sure the ARB C3 LP bit is set */ | 3839 | /* On GEN3 we really need to make sure the ARB C3 LP bit is set */ |
4677 | if (IS_GEN3(dev)) { | 3840 | if (IS_GEN3(dev)) { |
@@ -4683,6 +3846,8 @@ i915_gem_load(struct drm_device *dev) | |||
4683 | } | 3846 | } |
4684 | } | 3847 | } |
4685 | 3848 | ||
3849 | dev_priv->relative_constants_mode = I915_EXEC_CONSTANTS_REL_GENERAL; | ||
3850 | |||
4686 | /* Old X drivers will take 0-2 for front, back, depth buffers */ | 3851 | /* Old X drivers will take 0-2 for front, back, depth buffers */ |
4687 | if (!drm_core_check_feature(dev, DRIVER_MODESET)) | 3852 | if (!drm_core_check_feature(dev, DRIVER_MODESET)) |
4688 | dev_priv->fence_reg_start = 3; | 3853 | dev_priv->fence_reg_start = 3; |
@@ -4714,6 +3879,10 @@ i915_gem_load(struct drm_device *dev) | |||
4714 | } | 3879 | } |
4715 | i915_gem_detect_bit_6_swizzle(dev); | 3880 | i915_gem_detect_bit_6_swizzle(dev); |
4716 | init_waitqueue_head(&dev_priv->pending_flip_queue); | 3881 | init_waitqueue_head(&dev_priv->pending_flip_queue); |
3882 | |||
3883 | dev_priv->mm.inactive_shrinker.shrink = i915_gem_inactive_shrink; | ||
3884 | dev_priv->mm.inactive_shrinker.seeks = DEFAULT_SEEKS; | ||
3885 | register_shrinker(&dev_priv->mm.inactive_shrinker); | ||
4717 | } | 3886 | } |
4718 | 3887 | ||
4719 | /* | 3888 | /* |
@@ -4783,47 +3952,47 @@ void i915_gem_free_all_phys_object(struct drm_device *dev) | |||
4783 | } | 3952 | } |
4784 | 3953 | ||
4785 | void i915_gem_detach_phys_object(struct drm_device *dev, | 3954 | void i915_gem_detach_phys_object(struct drm_device *dev, |
4786 | struct drm_gem_object *obj) | 3955 | struct drm_i915_gem_object *obj) |
4787 | { | 3956 | { |
4788 | struct drm_i915_gem_object *obj_priv; | 3957 | struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping; |
3958 | char *vaddr; | ||
4789 | int i; | 3959 | int i; |
4790 | int ret; | ||
4791 | int page_count; | 3960 | int page_count; |
4792 | 3961 | ||
4793 | obj_priv = to_intel_bo(obj); | 3962 | if (!obj->phys_obj) |
4794 | if (!obj_priv->phys_obj) | ||
4795 | return; | 3963 | return; |
3964 | vaddr = obj->phys_obj->handle->vaddr; | ||
4796 | 3965 | ||
4797 | ret = i915_gem_object_get_pages(obj, 0); | 3966 | page_count = obj->base.size / PAGE_SIZE; |
4798 | if (ret) | ||
4799 | goto out; | ||
4800 | |||
4801 | page_count = obj->size / PAGE_SIZE; | ||
4802 | |||
4803 | for (i = 0; i < page_count; i++) { | 3967 | for (i = 0; i < page_count; i++) { |
4804 | char *dst = kmap_atomic(obj_priv->pages[i]); | 3968 | struct page *page = read_cache_page_gfp(mapping, i, |
4805 | char *src = obj_priv->phys_obj->handle->vaddr + (i * PAGE_SIZE); | 3969 | GFP_HIGHUSER | __GFP_RECLAIMABLE); |
4806 | 3970 | if (!IS_ERR(page)) { | |
4807 | memcpy(dst, src, PAGE_SIZE); | 3971 | char *dst = kmap_atomic(page); |
4808 | kunmap_atomic(dst); | 3972 | memcpy(dst, vaddr + i*PAGE_SIZE, PAGE_SIZE); |
3973 | kunmap_atomic(dst); | ||
3974 | |||
3975 | drm_clflush_pages(&page, 1); | ||
3976 | |||
3977 | set_page_dirty(page); | ||
3978 | mark_page_accessed(page); | ||
3979 | page_cache_release(page); | ||
3980 | } | ||
4809 | } | 3981 | } |
4810 | drm_clflush_pages(obj_priv->pages, page_count); | 3982 | intel_gtt_chipset_flush(); |
4811 | drm_agp_chipset_flush(dev); | ||
4812 | 3983 | ||
4813 | i915_gem_object_put_pages(obj); | 3984 | obj->phys_obj->cur_obj = NULL; |
4814 | out: | 3985 | obj->phys_obj = NULL; |
4815 | obj_priv->phys_obj->cur_obj = NULL; | ||
4816 | obj_priv->phys_obj = NULL; | ||
4817 | } | 3986 | } |
4818 | 3987 | ||
4819 | int | 3988 | int |
4820 | i915_gem_attach_phys_object(struct drm_device *dev, | 3989 | i915_gem_attach_phys_object(struct drm_device *dev, |
4821 | struct drm_gem_object *obj, | 3990 | struct drm_i915_gem_object *obj, |
4822 | int id, | 3991 | int id, |
4823 | int align) | 3992 | int align) |
4824 | { | 3993 | { |
3994 | struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping; | ||
4825 | drm_i915_private_t *dev_priv = dev->dev_private; | 3995 | drm_i915_private_t *dev_priv = dev->dev_private; |
4826 | struct drm_i915_gem_object *obj_priv; | ||
4827 | int ret = 0; | 3996 | int ret = 0; |
4828 | int page_count; | 3997 | int page_count; |
4829 | int i; | 3998 | int i; |
@@ -4831,10 +4000,8 @@ i915_gem_attach_phys_object(struct drm_device *dev, | |||
4831 | if (id > I915_MAX_PHYS_OBJECT) | 4000 | if (id > I915_MAX_PHYS_OBJECT) |
4832 | return -EINVAL; | 4001 | return -EINVAL; |
4833 | 4002 | ||
4834 | obj_priv = to_intel_bo(obj); | 4003 | if (obj->phys_obj) { |
4835 | 4004 | if (obj->phys_obj->id == id) | |
4836 | if (obj_priv->phys_obj) { | ||
4837 | if (obj_priv->phys_obj->id == id) | ||
4838 | return 0; | 4005 | return 0; |
4839 | i915_gem_detach_phys_object(dev, obj); | 4006 | i915_gem_detach_phys_object(dev, obj); |
4840 | } | 4007 | } |
@@ -4842,51 +4009,50 @@ i915_gem_attach_phys_object(struct drm_device *dev, | |||
4842 | /* create a new object */ | 4009 | /* create a new object */ |
4843 | if (!dev_priv->mm.phys_objs[id - 1]) { | 4010 | if (!dev_priv->mm.phys_objs[id - 1]) { |
4844 | ret = i915_gem_init_phys_object(dev, id, | 4011 | ret = i915_gem_init_phys_object(dev, id, |
4845 | obj->size, align); | 4012 | obj->base.size, align); |
4846 | if (ret) { | 4013 | if (ret) { |
4847 | DRM_ERROR("failed to init phys object %d size: %zu\n", id, obj->size); | 4014 | DRM_ERROR("failed to init phys object %d size: %zu\n", |
4848 | goto out; | 4015 | id, obj->base.size); |
4016 | return ret; | ||
4849 | } | 4017 | } |
4850 | } | 4018 | } |
4851 | 4019 | ||
4852 | /* bind to the object */ | 4020 | /* bind to the object */ |
4853 | obj_priv->phys_obj = dev_priv->mm.phys_objs[id - 1]; | 4021 | obj->phys_obj = dev_priv->mm.phys_objs[id - 1]; |
4854 | obj_priv->phys_obj->cur_obj = obj; | 4022 | obj->phys_obj->cur_obj = obj; |
4855 | 4023 | ||
4856 | ret = i915_gem_object_get_pages(obj, 0); | 4024 | page_count = obj->base.size / PAGE_SIZE; |
4857 | if (ret) { | ||
4858 | DRM_ERROR("failed to get page list\n"); | ||
4859 | goto out; | ||
4860 | } | ||
4861 | |||
4862 | page_count = obj->size / PAGE_SIZE; | ||
4863 | 4025 | ||
4864 | for (i = 0; i < page_count; i++) { | 4026 | for (i = 0; i < page_count; i++) { |
4865 | char *src = kmap_atomic(obj_priv->pages[i]); | 4027 | struct page *page; |
4866 | char *dst = obj_priv->phys_obj->handle->vaddr + (i * PAGE_SIZE); | 4028 | char *dst, *src; |
4029 | |||
4030 | page = read_cache_page_gfp(mapping, i, | ||
4031 | GFP_HIGHUSER | __GFP_RECLAIMABLE); | ||
4032 | if (IS_ERR(page)) | ||
4033 | return PTR_ERR(page); | ||
4867 | 4034 | ||
4035 | src = kmap_atomic(page); | ||
4036 | dst = obj->phys_obj->handle->vaddr + (i * PAGE_SIZE); | ||
4868 | memcpy(dst, src, PAGE_SIZE); | 4037 | memcpy(dst, src, PAGE_SIZE); |
4869 | kunmap_atomic(src); | 4038 | kunmap_atomic(src); |
4870 | } | ||
4871 | 4039 | ||
4872 | i915_gem_object_put_pages(obj); | 4040 | mark_page_accessed(page); |
4041 | page_cache_release(page); | ||
4042 | } | ||
4873 | 4043 | ||
4874 | return 0; | 4044 | return 0; |
4875 | out: | ||
4876 | return ret; | ||
4877 | } | 4045 | } |
4878 | 4046 | ||
4879 | static int | 4047 | static int |
4880 | i915_gem_phys_pwrite(struct drm_device *dev, struct drm_gem_object *obj, | 4048 | i915_gem_phys_pwrite(struct drm_device *dev, |
4049 | struct drm_i915_gem_object *obj, | ||
4881 | struct drm_i915_gem_pwrite *args, | 4050 | struct drm_i915_gem_pwrite *args, |
4882 | struct drm_file *file_priv) | 4051 | struct drm_file *file_priv) |
4883 | { | 4052 | { |
4884 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); | 4053 | void *vaddr = obj->phys_obj->handle->vaddr + args->offset; |
4885 | void *vaddr = obj_priv->phys_obj->handle->vaddr + args->offset; | ||
4886 | char __user *user_data = (char __user *) (uintptr_t) args->data_ptr; | 4054 | char __user *user_data = (char __user *) (uintptr_t) args->data_ptr; |
4887 | 4055 | ||
4888 | DRM_DEBUG_DRIVER("vaddr %p, %lld\n", vaddr, args->size); | ||
4889 | |||
4890 | if (__copy_from_user_inatomic_nocache(vaddr, user_data, args->size)) { | 4056 | if (__copy_from_user_inatomic_nocache(vaddr, user_data, args->size)) { |
4891 | unsigned long unwritten; | 4057 | unsigned long unwritten; |
4892 | 4058 | ||
@@ -4901,7 +4067,7 @@ i915_gem_phys_pwrite(struct drm_device *dev, struct drm_gem_object *obj, | |||
4901 | return -EFAULT; | 4067 | return -EFAULT; |
4902 | } | 4068 | } |
4903 | 4069 | ||
4904 | drm_agp_chipset_flush(dev); | 4070 | intel_gtt_chipset_flush(); |
4905 | return 0; | 4071 | return 0; |
4906 | } | 4072 | } |
4907 | 4073 | ||
@@ -4939,144 +4105,68 @@ i915_gpu_is_active(struct drm_device *dev) | |||
4939 | } | 4105 | } |
4940 | 4106 | ||
4941 | static int | 4107 | static int |
4942 | i915_gem_shrink(struct shrinker *shrink, int nr_to_scan, gfp_t gfp_mask) | 4108 | i915_gem_inactive_shrink(struct shrinker *shrinker, |
4109 | int nr_to_scan, | ||
4110 | gfp_t gfp_mask) | ||
4943 | { | 4111 | { |
4944 | drm_i915_private_t *dev_priv, *next_dev; | 4112 | struct drm_i915_private *dev_priv = |
4945 | struct drm_i915_gem_object *obj_priv, *next_obj; | 4113 | container_of(shrinker, |
4946 | int cnt = 0; | 4114 | struct drm_i915_private, |
4947 | int would_deadlock = 1; | 4115 | mm.inactive_shrinker); |
4116 | struct drm_device *dev = dev_priv->dev; | ||
4117 | struct drm_i915_gem_object *obj, *next; | ||
4118 | int cnt; | ||
4119 | |||
4120 | if (!mutex_trylock(&dev->struct_mutex)) | ||
4121 | return 0; | ||
4948 | 4122 | ||
4949 | /* "fast-path" to count number of available objects */ | 4123 | /* "fast-path" to count number of available objects */ |
4950 | if (nr_to_scan == 0) { | 4124 | if (nr_to_scan == 0) { |
4951 | spin_lock(&shrink_list_lock); | 4125 | cnt = 0; |
4952 | list_for_each_entry(dev_priv, &shrink_list, mm.shrink_list) { | 4126 | list_for_each_entry(obj, |
4953 | struct drm_device *dev = dev_priv->dev; | 4127 | &dev_priv->mm.inactive_list, |
4954 | 4128 | mm_list) | |
4955 | if (mutex_trylock(&dev->struct_mutex)) { | 4129 | cnt++; |
4956 | list_for_each_entry(obj_priv, | 4130 | mutex_unlock(&dev->struct_mutex); |
4957 | &dev_priv->mm.inactive_list, | 4131 | return cnt / 100 * sysctl_vfs_cache_pressure; |
4958 | mm_list) | ||
4959 | cnt++; | ||
4960 | mutex_unlock(&dev->struct_mutex); | ||
4961 | } | ||
4962 | } | ||
4963 | spin_unlock(&shrink_list_lock); | ||
4964 | |||
4965 | return (cnt / 100) * sysctl_vfs_cache_pressure; | ||
4966 | } | 4132 | } |
4967 | 4133 | ||
4968 | spin_lock(&shrink_list_lock); | ||
4969 | |||
4970 | rescan: | 4134 | rescan: |
4971 | /* first scan for clean buffers */ | 4135 | /* first scan for clean buffers */ |
4972 | list_for_each_entry_safe(dev_priv, next_dev, | 4136 | i915_gem_retire_requests(dev); |
4973 | &shrink_list, mm.shrink_list) { | ||
4974 | struct drm_device *dev = dev_priv->dev; | ||
4975 | |||
4976 | if (! mutex_trylock(&dev->struct_mutex)) | ||
4977 | continue; | ||
4978 | |||
4979 | spin_unlock(&shrink_list_lock); | ||
4980 | i915_gem_retire_requests(dev); | ||
4981 | 4137 | ||
4982 | list_for_each_entry_safe(obj_priv, next_obj, | 4138 | list_for_each_entry_safe(obj, next, |
4983 | &dev_priv->mm.inactive_list, | 4139 | &dev_priv->mm.inactive_list, |
4984 | mm_list) { | 4140 | mm_list) { |
4985 | if (i915_gem_object_is_purgeable(obj_priv)) { | 4141 | if (i915_gem_object_is_purgeable(obj)) { |
4986 | i915_gem_object_unbind(&obj_priv->base); | 4142 | if (i915_gem_object_unbind(obj) == 0 && |
4987 | if (--nr_to_scan <= 0) | 4143 | --nr_to_scan == 0) |
4988 | break; | 4144 | break; |
4989 | } | ||
4990 | } | 4145 | } |
4991 | |||
4992 | spin_lock(&shrink_list_lock); | ||
4993 | mutex_unlock(&dev->struct_mutex); | ||
4994 | |||
4995 | would_deadlock = 0; | ||
4996 | |||
4997 | if (nr_to_scan <= 0) | ||
4998 | break; | ||
4999 | } | 4146 | } |
5000 | 4147 | ||
5001 | /* second pass, evict/count anything still on the inactive list */ | 4148 | /* second pass, evict/count anything still on the inactive list */ |
5002 | list_for_each_entry_safe(dev_priv, next_dev, | 4149 | cnt = 0; |
5003 | &shrink_list, mm.shrink_list) { | 4150 | list_for_each_entry_safe(obj, next, |
5004 | struct drm_device *dev = dev_priv->dev; | 4151 | &dev_priv->mm.inactive_list, |
5005 | 4152 | mm_list) { | |
5006 | if (! mutex_trylock(&dev->struct_mutex)) | 4153 | if (nr_to_scan && |
5007 | continue; | 4154 | i915_gem_object_unbind(obj) == 0) |
5008 | 4155 | nr_to_scan--; | |
5009 | spin_unlock(&shrink_list_lock); | 4156 | else |
5010 | 4157 | cnt++; | |
5011 | list_for_each_entry_safe(obj_priv, next_obj, | ||
5012 | &dev_priv->mm.inactive_list, | ||
5013 | mm_list) { | ||
5014 | if (nr_to_scan > 0) { | ||
5015 | i915_gem_object_unbind(&obj_priv->base); | ||
5016 | nr_to_scan--; | ||
5017 | } else | ||
5018 | cnt++; | ||
5019 | } | ||
5020 | |||
5021 | spin_lock(&shrink_list_lock); | ||
5022 | mutex_unlock(&dev->struct_mutex); | ||
5023 | |||
5024 | would_deadlock = 0; | ||
5025 | } | 4158 | } |
5026 | 4159 | ||
5027 | if (nr_to_scan) { | 4160 | if (nr_to_scan && i915_gpu_is_active(dev)) { |
5028 | int active = 0; | ||
5029 | |||
5030 | /* | 4161 | /* |
5031 | * We are desperate for pages, so as a last resort, wait | 4162 | * We are desperate for pages, so as a last resort, wait |
5032 | * for the GPU to finish and discard whatever we can. | 4163 | * for the GPU to finish and discard whatever we can. |
5033 | * This has a dramatic impact to reduce the number of | 4164 | * This has a dramatic impact to reduce the number of |
5034 | * OOM-killer events whilst running the GPU aggressively. | 4165 | * OOM-killer events whilst running the GPU aggressively. |
5035 | */ | 4166 | */ |
5036 | list_for_each_entry(dev_priv, &shrink_list, mm.shrink_list) { | 4167 | if (i915_gpu_idle(dev) == 0) |
5037 | struct drm_device *dev = dev_priv->dev; | ||
5038 | |||
5039 | if (!mutex_trylock(&dev->struct_mutex)) | ||
5040 | continue; | ||
5041 | |||
5042 | spin_unlock(&shrink_list_lock); | ||
5043 | |||
5044 | if (i915_gpu_is_active(dev)) { | ||
5045 | i915_gpu_idle(dev); | ||
5046 | active++; | ||
5047 | } | ||
5048 | |||
5049 | spin_lock(&shrink_list_lock); | ||
5050 | mutex_unlock(&dev->struct_mutex); | ||
5051 | } | ||
5052 | |||
5053 | if (active) | ||
5054 | goto rescan; | 4168 | goto rescan; |
5055 | } | 4169 | } |
5056 | 4170 | mutex_unlock(&dev->struct_mutex); | |
5057 | spin_unlock(&shrink_list_lock); | 4171 | return cnt / 100 * sysctl_vfs_cache_pressure; |
5058 | |||
5059 | if (would_deadlock) | ||
5060 | return -1; | ||
5061 | else if (cnt > 0) | ||
5062 | return (cnt / 100) * sysctl_vfs_cache_pressure; | ||
5063 | else | ||
5064 | return 0; | ||
5065 | } | ||
5066 | |||
5067 | static struct shrinker shrinker = { | ||
5068 | .shrink = i915_gem_shrink, | ||
5069 | .seeks = DEFAULT_SEEKS, | ||
5070 | }; | ||
5071 | |||
5072 | __init void | ||
5073 | i915_gem_shrinker_init(void) | ||
5074 | { | ||
5075 | register_shrinker(&shrinker); | ||
5076 | } | ||
5077 | |||
5078 | __exit void | ||
5079 | i915_gem_shrinker_exit(void) | ||
5080 | { | ||
5081 | unregister_shrinker(&shrinker); | ||
5082 | } | 4172 | } |
diff --git a/drivers/gpu/drm/i915/i915_gem_debug.c b/drivers/gpu/drm/i915/i915_gem_debug.c index 48644b840a8d..29d014c48ca2 100644 --- a/drivers/gpu/drm/i915/i915_gem_debug.c +++ b/drivers/gpu/drm/i915/i915_gem_debug.c | |||
@@ -152,13 +152,12 @@ i915_gem_dump_page(struct page *page, uint32_t start, uint32_t end, | |||
152 | } | 152 | } |
153 | 153 | ||
154 | void | 154 | void |
155 | i915_gem_dump_object(struct drm_gem_object *obj, int len, | 155 | i915_gem_dump_object(struct drm_i915_gem_object *obj, int len, |
156 | const char *where, uint32_t mark) | 156 | const char *where, uint32_t mark) |
157 | { | 157 | { |
158 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); | ||
159 | int page; | 158 | int page; |
160 | 159 | ||
161 | DRM_INFO("%s: object at offset %08x\n", where, obj_priv->gtt_offset); | 160 | DRM_INFO("%s: object at offset %08x\n", where, obj->gtt_offset); |
162 | for (page = 0; page < (len + PAGE_SIZE-1) / PAGE_SIZE; page++) { | 161 | for (page = 0; page < (len + PAGE_SIZE-1) / PAGE_SIZE; page++) { |
163 | int page_len, chunk, chunk_len; | 162 | int page_len, chunk, chunk_len; |
164 | 163 | ||
@@ -170,9 +169,9 @@ i915_gem_dump_object(struct drm_gem_object *obj, int len, | |||
170 | chunk_len = page_len - chunk; | 169 | chunk_len = page_len - chunk; |
171 | if (chunk_len > 128) | 170 | if (chunk_len > 128) |
172 | chunk_len = 128; | 171 | chunk_len = 128; |
173 | i915_gem_dump_page(obj_priv->pages[page], | 172 | i915_gem_dump_page(obj->pages[page], |
174 | chunk, chunk + chunk_len, | 173 | chunk, chunk + chunk_len, |
175 | obj_priv->gtt_offset + | 174 | obj->gtt_offset + |
176 | page * PAGE_SIZE, | 175 | page * PAGE_SIZE, |
177 | mark); | 176 | mark); |
178 | } | 177 | } |
@@ -182,21 +181,19 @@ i915_gem_dump_object(struct drm_gem_object *obj, int len, | |||
182 | 181 | ||
183 | #if WATCH_COHERENCY | 182 | #if WATCH_COHERENCY |
184 | void | 183 | void |
185 | i915_gem_object_check_coherency(struct drm_gem_object *obj, int handle) | 184 | i915_gem_object_check_coherency(struct drm_i915_gem_object *obj, int handle) |
186 | { | 185 | { |
187 | struct drm_device *dev = obj->dev; | 186 | struct drm_device *dev = obj->base.dev; |
188 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); | ||
189 | int page; | 187 | int page; |
190 | uint32_t *gtt_mapping; | 188 | uint32_t *gtt_mapping; |
191 | uint32_t *backing_map = NULL; | 189 | uint32_t *backing_map = NULL; |
192 | int bad_count = 0; | 190 | int bad_count = 0; |
193 | 191 | ||
194 | DRM_INFO("%s: checking coherency of object %p@0x%08x (%d, %zdkb):\n", | 192 | DRM_INFO("%s: checking coherency of object %p@0x%08x (%d, %zdkb):\n", |
195 | __func__, obj, obj_priv->gtt_offset, handle, | 193 | __func__, obj, obj->gtt_offset, handle, |
196 | obj->size / 1024); | 194 | obj->size / 1024); |
197 | 195 | ||
198 | gtt_mapping = ioremap(dev->agp->base + obj_priv->gtt_offset, | 196 | gtt_mapping = ioremap(dev->agp->base + obj->gtt_offset, obj->base.size); |
199 | obj->size); | ||
200 | if (gtt_mapping == NULL) { | 197 | if (gtt_mapping == NULL) { |
201 | DRM_ERROR("failed to map GTT space\n"); | 198 | DRM_ERROR("failed to map GTT space\n"); |
202 | return; | 199 | return; |
@@ -205,7 +202,7 @@ i915_gem_object_check_coherency(struct drm_gem_object *obj, int handle) | |||
205 | for (page = 0; page < obj->size / PAGE_SIZE; page++) { | 202 | for (page = 0; page < obj->size / PAGE_SIZE; page++) { |
206 | int i; | 203 | int i; |
207 | 204 | ||
208 | backing_map = kmap_atomic(obj_priv->pages[page], KM_USER0); | 205 | backing_map = kmap_atomic(obj->pages[page], KM_USER0); |
209 | 206 | ||
210 | if (backing_map == NULL) { | 207 | if (backing_map == NULL) { |
211 | DRM_ERROR("failed to map backing page\n"); | 208 | DRM_ERROR("failed to map backing page\n"); |
@@ -220,7 +217,7 @@ i915_gem_object_check_coherency(struct drm_gem_object *obj, int handle) | |||
220 | if (cpuval != gttval) { | 217 | if (cpuval != gttval) { |
221 | DRM_INFO("incoherent CPU vs GPU at 0x%08x: " | 218 | DRM_INFO("incoherent CPU vs GPU at 0x%08x: " |
222 | "0x%08x vs 0x%08x\n", | 219 | "0x%08x vs 0x%08x\n", |
223 | (int)(obj_priv->gtt_offset + | 220 | (int)(obj->gtt_offset + |
224 | page * PAGE_SIZE + i * 4), | 221 | page * PAGE_SIZE + i * 4), |
225 | cpuval, gttval); | 222 | cpuval, gttval); |
226 | if (bad_count++ >= 8) { | 223 | if (bad_count++ >= 8) { |
diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c index d8ae7d1d0cc6..3d39005540aa 100644 --- a/drivers/gpu/drm/i915/i915_gem_evict.c +++ b/drivers/gpu/drm/i915/i915_gem_evict.c | |||
@@ -32,28 +32,36 @@ | |||
32 | #include "i915_drm.h" | 32 | #include "i915_drm.h" |
33 | 33 | ||
34 | static bool | 34 | static bool |
35 | mark_free(struct drm_i915_gem_object *obj_priv, | 35 | mark_free(struct drm_i915_gem_object *obj, struct list_head *unwind) |
36 | struct list_head *unwind) | ||
37 | { | 36 | { |
38 | list_add(&obj_priv->evict_list, unwind); | 37 | list_add(&obj->exec_list, unwind); |
39 | drm_gem_object_reference(&obj_priv->base); | 38 | drm_gem_object_reference(&obj->base); |
40 | return drm_mm_scan_add_block(obj_priv->gtt_space); | 39 | return drm_mm_scan_add_block(obj->gtt_space); |
41 | } | 40 | } |
42 | 41 | ||
43 | int | 42 | int |
44 | i915_gem_evict_something(struct drm_device *dev, int min_size, unsigned alignment) | 43 | i915_gem_evict_something(struct drm_device *dev, int min_size, |
44 | unsigned alignment, bool mappable) | ||
45 | { | 45 | { |
46 | drm_i915_private_t *dev_priv = dev->dev_private; | 46 | drm_i915_private_t *dev_priv = dev->dev_private; |
47 | struct list_head eviction_list, unwind_list; | 47 | struct list_head eviction_list, unwind_list; |
48 | struct drm_i915_gem_object *obj_priv; | 48 | struct drm_i915_gem_object *obj; |
49 | int ret = 0; | 49 | int ret = 0; |
50 | 50 | ||
51 | i915_gem_retire_requests(dev); | 51 | i915_gem_retire_requests(dev); |
52 | 52 | ||
53 | /* Re-check for free space after retiring requests */ | 53 | /* Re-check for free space after retiring requests */ |
54 | if (drm_mm_search_free(&dev_priv->mm.gtt_space, | 54 | if (mappable) { |
55 | min_size, alignment, 0)) | 55 | if (drm_mm_search_free_in_range(&dev_priv->mm.gtt_space, |
56 | return 0; | 56 | min_size, alignment, 0, |
57 | dev_priv->mm.gtt_mappable_end, | ||
58 | 0)) | ||
59 | return 0; | ||
60 | } else { | ||
61 | if (drm_mm_search_free(&dev_priv->mm.gtt_space, | ||
62 | min_size, alignment, 0)) | ||
63 | return 0; | ||
64 | } | ||
57 | 65 | ||
58 | /* | 66 | /* |
59 | * The goal is to evict objects and amalgamate space in LRU order. | 67 | * The goal is to evict objects and amalgamate space in LRU order. |
@@ -79,45 +87,56 @@ i915_gem_evict_something(struct drm_device *dev, int min_size, unsigned alignmen | |||
79 | */ | 87 | */ |
80 | 88 | ||
81 | INIT_LIST_HEAD(&unwind_list); | 89 | INIT_LIST_HEAD(&unwind_list); |
82 | drm_mm_init_scan(&dev_priv->mm.gtt_space, min_size, alignment); | 90 | if (mappable) |
91 | drm_mm_init_scan_with_range(&dev_priv->mm.gtt_space, min_size, | ||
92 | alignment, 0, | ||
93 | dev_priv->mm.gtt_mappable_end); | ||
94 | else | ||
95 | drm_mm_init_scan(&dev_priv->mm.gtt_space, min_size, alignment); | ||
83 | 96 | ||
84 | /* First see if there is a large enough contiguous idle region... */ | 97 | /* First see if there is a large enough contiguous idle region... */ |
85 | list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list, mm_list) { | 98 | list_for_each_entry(obj, &dev_priv->mm.inactive_list, mm_list) { |
86 | if (mark_free(obj_priv, &unwind_list)) | 99 | if (mark_free(obj, &unwind_list)) |
87 | goto found; | 100 | goto found; |
88 | } | 101 | } |
89 | 102 | ||
90 | /* Now merge in the soon-to-be-expired objects... */ | 103 | /* Now merge in the soon-to-be-expired objects... */ |
91 | list_for_each_entry(obj_priv, &dev_priv->mm.active_list, mm_list) { | 104 | list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) { |
92 | /* Does the object require an outstanding flush? */ | 105 | /* Does the object require an outstanding flush? */ |
93 | if (obj_priv->base.write_domain || obj_priv->pin_count) | 106 | if (obj->base.write_domain || obj->pin_count) |
94 | continue; | 107 | continue; |
95 | 108 | ||
96 | if (mark_free(obj_priv, &unwind_list)) | 109 | if (mark_free(obj, &unwind_list)) |
97 | goto found; | 110 | goto found; |
98 | } | 111 | } |
99 | 112 | ||
100 | /* Finally add anything with a pending flush (in order of retirement) */ | 113 | /* Finally add anything with a pending flush (in order of retirement) */ |
101 | list_for_each_entry(obj_priv, &dev_priv->mm.flushing_list, mm_list) { | 114 | list_for_each_entry(obj, &dev_priv->mm.flushing_list, mm_list) { |
102 | if (obj_priv->pin_count) | 115 | if (obj->pin_count) |
103 | continue; | 116 | continue; |
104 | 117 | ||
105 | if (mark_free(obj_priv, &unwind_list)) | 118 | if (mark_free(obj, &unwind_list)) |
106 | goto found; | 119 | goto found; |
107 | } | 120 | } |
108 | list_for_each_entry(obj_priv, &dev_priv->mm.active_list, mm_list) { | 121 | list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) { |
109 | if (! obj_priv->base.write_domain || obj_priv->pin_count) | 122 | if (! obj->base.write_domain || obj->pin_count) |
110 | continue; | 123 | continue; |
111 | 124 | ||
112 | if (mark_free(obj_priv, &unwind_list)) | 125 | if (mark_free(obj, &unwind_list)) |
113 | goto found; | 126 | goto found; |
114 | } | 127 | } |
115 | 128 | ||
116 | /* Nothing found, clean up and bail out! */ | 129 | /* Nothing found, clean up and bail out! */ |
117 | list_for_each_entry(obj_priv, &unwind_list, evict_list) { | 130 | while (!list_empty(&unwind_list)) { |
118 | ret = drm_mm_scan_remove_block(obj_priv->gtt_space); | 131 | obj = list_first_entry(&unwind_list, |
132 | struct drm_i915_gem_object, | ||
133 | exec_list); | ||
134 | |||
135 | ret = drm_mm_scan_remove_block(obj->gtt_space); | ||
119 | BUG_ON(ret); | 136 | BUG_ON(ret); |
120 | drm_gem_object_unreference(&obj_priv->base); | 137 | |
138 | list_del_init(&obj->exec_list); | ||
139 | drm_gem_object_unreference(&obj->base); | ||
121 | } | 140 | } |
122 | 141 | ||
123 | /* We expect the caller to unpin, evict all and try again, or give up. | 142 | /* We expect the caller to unpin, evict all and try again, or give up. |
@@ -131,33 +150,34 @@ found: | |||
131 | * temporary list. */ | 150 | * temporary list. */ |
132 | INIT_LIST_HEAD(&eviction_list); | 151 | INIT_LIST_HEAD(&eviction_list); |
133 | while (!list_empty(&unwind_list)) { | 152 | while (!list_empty(&unwind_list)) { |
134 | obj_priv = list_first_entry(&unwind_list, | 153 | obj = list_first_entry(&unwind_list, |
135 | struct drm_i915_gem_object, | 154 | struct drm_i915_gem_object, |
136 | evict_list); | 155 | exec_list); |
137 | if (drm_mm_scan_remove_block(obj_priv->gtt_space)) { | 156 | if (drm_mm_scan_remove_block(obj->gtt_space)) { |
138 | list_move(&obj_priv->evict_list, &eviction_list); | 157 | list_move(&obj->exec_list, &eviction_list); |
139 | continue; | 158 | continue; |
140 | } | 159 | } |
141 | list_del(&obj_priv->evict_list); | 160 | list_del_init(&obj->exec_list); |
142 | drm_gem_object_unreference(&obj_priv->base); | 161 | drm_gem_object_unreference(&obj->base); |
143 | } | 162 | } |
144 | 163 | ||
145 | /* Unbinding will emit any required flushes */ | 164 | /* Unbinding will emit any required flushes */ |
146 | while (!list_empty(&eviction_list)) { | 165 | while (!list_empty(&eviction_list)) { |
147 | obj_priv = list_first_entry(&eviction_list, | 166 | obj = list_first_entry(&eviction_list, |
148 | struct drm_i915_gem_object, | 167 | struct drm_i915_gem_object, |
149 | evict_list); | 168 | exec_list); |
150 | if (ret == 0) | 169 | if (ret == 0) |
151 | ret = i915_gem_object_unbind(&obj_priv->base); | 170 | ret = i915_gem_object_unbind(obj); |
152 | list_del(&obj_priv->evict_list); | 171 | |
153 | drm_gem_object_unreference(&obj_priv->base); | 172 | list_del_init(&obj->exec_list); |
173 | drm_gem_object_unreference(&obj->base); | ||
154 | } | 174 | } |
155 | 175 | ||
156 | return ret; | 176 | return ret; |
157 | } | 177 | } |
158 | 178 | ||
159 | int | 179 | int |
160 | i915_gem_evict_everything(struct drm_device *dev) | 180 | i915_gem_evict_everything(struct drm_device *dev, bool purgeable_only) |
161 | { | 181 | { |
162 | drm_i915_private_t *dev_priv = dev->dev_private; | 182 | drm_i915_private_t *dev_priv = dev->dev_private; |
163 | int ret; | 183 | int ret; |
@@ -176,36 +196,22 @@ i915_gem_evict_everything(struct drm_device *dev) | |||
176 | 196 | ||
177 | BUG_ON(!list_empty(&dev_priv->mm.flushing_list)); | 197 | BUG_ON(!list_empty(&dev_priv->mm.flushing_list)); |
178 | 198 | ||
179 | ret = i915_gem_evict_inactive(dev); | 199 | return i915_gem_evict_inactive(dev, purgeable_only); |
180 | if (ret) | ||
181 | return ret; | ||
182 | |||
183 | lists_empty = (list_empty(&dev_priv->mm.inactive_list) && | ||
184 | list_empty(&dev_priv->mm.flushing_list) && | ||
185 | list_empty(&dev_priv->mm.active_list)); | ||
186 | BUG_ON(!lists_empty); | ||
187 | |||
188 | return 0; | ||
189 | } | 200 | } |
190 | 201 | ||
191 | /** Unbinds all inactive objects. */ | 202 | /** Unbinds all inactive objects. */ |
192 | int | 203 | int |
193 | i915_gem_evict_inactive(struct drm_device *dev) | 204 | i915_gem_evict_inactive(struct drm_device *dev, bool purgeable_only) |
194 | { | 205 | { |
195 | drm_i915_private_t *dev_priv = dev->dev_private; | 206 | drm_i915_private_t *dev_priv = dev->dev_private; |
196 | 207 | struct drm_i915_gem_object *obj, *next; | |
197 | while (!list_empty(&dev_priv->mm.inactive_list)) { | 208 | |
198 | struct drm_gem_object *obj; | 209 | list_for_each_entry_safe(obj, next, |
199 | int ret; | 210 | &dev_priv->mm.inactive_list, mm_list) { |
200 | 211 | if (!purgeable_only || obj->madv != I915_MADV_WILLNEED) { | |
201 | obj = &list_first_entry(&dev_priv->mm.inactive_list, | 212 | int ret = i915_gem_object_unbind(obj); |
202 | struct drm_i915_gem_object, | 213 | if (ret) |
203 | mm_list)->base; | 214 | return ret; |
204 | |||
205 | ret = i915_gem_object_unbind(obj); | ||
206 | if (ret != 0) { | ||
207 | DRM_ERROR("Error unbinding object: %d\n", ret); | ||
208 | return ret; | ||
209 | } | 215 | } |
210 | } | 216 | } |
211 | 217 | ||
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c new file mode 100644 index 000000000000..d2f445e825f2 --- /dev/null +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c | |||
@@ -0,0 +1,1377 @@ | |||
1 | /* | ||
2 | * Copyright © 2008,2010 Intel Corporation | ||
3 | * | ||
4 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
5 | * copy of this software and associated documentation files (the "Software"), | ||
6 | * to deal in the Software without restriction, including without limitation | ||
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
8 | * and/or sell copies of the Software, and to permit persons to whom the | ||
9 | * Software is furnished to do so, subject to the following conditions: | ||
10 | * | ||
11 | * The above copyright notice and this permission notice (including the next | ||
12 | * paragraph) shall be included in all copies or substantial portions of the | ||
13 | * Software. | ||
14 | * | ||
15 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
16 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
17 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
18 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | ||
19 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | ||
20 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS | ||
21 | * IN THE SOFTWARE. | ||
22 | * | ||
23 | * Authors: | ||
24 | * Eric Anholt <eric@anholt.net> | ||
25 | * Chris Wilson <chris@chris-wilson.co.uk> | ||
26 | * | ||
27 | */ | ||
28 | |||
29 | #include "drmP.h" | ||
30 | #include "drm.h" | ||
31 | #include "i915_drm.h" | ||
32 | #include "i915_drv.h" | ||
33 | #include "i915_trace.h" | ||
34 | #include "intel_drv.h" | ||
35 | |||
36 | struct change_domains { | ||
37 | uint32_t invalidate_domains; | ||
38 | uint32_t flush_domains; | ||
39 | uint32_t flush_rings; | ||
40 | }; | ||
41 | |||
42 | /* | ||
43 | * Set the next domain for the specified object. This | ||
44 | * may not actually perform the necessary flushing/invaliding though, | ||
45 | * as that may want to be batched with other set_domain operations | ||
46 | * | ||
47 | * This is (we hope) the only really tricky part of gem. The goal | ||
48 | * is fairly simple -- track which caches hold bits of the object | ||
49 | * and make sure they remain coherent. A few concrete examples may | ||
50 | * help to explain how it works. For shorthand, we use the notation | ||
51 | * (read_domains, write_domain), e.g. (CPU, CPU) to indicate the | ||
52 | * a pair of read and write domain masks. | ||
53 | * | ||
54 | * Case 1: the batch buffer | ||
55 | * | ||
56 | * 1. Allocated | ||
57 | * 2. Written by CPU | ||
58 | * 3. Mapped to GTT | ||
59 | * 4. Read by GPU | ||
60 | * 5. Unmapped from GTT | ||
61 | * 6. Freed | ||
62 | * | ||
63 | * Let's take these a step at a time | ||
64 | * | ||
65 | * 1. Allocated | ||
66 | * Pages allocated from the kernel may still have | ||
67 | * cache contents, so we set them to (CPU, CPU) always. | ||
68 | * 2. Written by CPU (using pwrite) | ||
69 | * The pwrite function calls set_domain (CPU, CPU) and | ||
70 | * this function does nothing (as nothing changes) | ||
71 | * 3. Mapped by GTT | ||
72 | * This function asserts that the object is not | ||
73 | * currently in any GPU-based read or write domains | ||
74 | * 4. Read by GPU | ||
75 | * i915_gem_execbuffer calls set_domain (COMMAND, 0). | ||
76 | * As write_domain is zero, this function adds in the | ||
77 | * current read domains (CPU+COMMAND, 0). | ||
78 | * flush_domains is set to CPU. | ||
79 | * invalidate_domains is set to COMMAND | ||
80 | * clflush is run to get data out of the CPU caches | ||
81 | * then i915_dev_set_domain calls i915_gem_flush to | ||
82 | * emit an MI_FLUSH and drm_agp_chipset_flush | ||
83 | * 5. Unmapped from GTT | ||
84 | * i915_gem_object_unbind calls set_domain (CPU, CPU) | ||
85 | * flush_domains and invalidate_domains end up both zero | ||
86 | * so no flushing/invalidating happens | ||
87 | * 6. Freed | ||
88 | * yay, done | ||
89 | * | ||
90 | * Case 2: The shared render buffer | ||
91 | * | ||
92 | * 1. Allocated | ||
93 | * 2. Mapped to GTT | ||
94 | * 3. Read/written by GPU | ||
95 | * 4. set_domain to (CPU,CPU) | ||
96 | * 5. Read/written by CPU | ||
97 | * 6. Read/written by GPU | ||
98 | * | ||
99 | * 1. Allocated | ||
100 | * Same as last example, (CPU, CPU) | ||
101 | * 2. Mapped to GTT | ||
102 | * Nothing changes (assertions find that it is not in the GPU) | ||
103 | * 3. Read/written by GPU | ||
104 | * execbuffer calls set_domain (RENDER, RENDER) | ||
105 | * flush_domains gets CPU | ||
106 | * invalidate_domains gets GPU | ||
107 | * clflush (obj) | ||
108 | * MI_FLUSH and drm_agp_chipset_flush | ||
109 | * 4. set_domain (CPU, CPU) | ||
110 | * flush_domains gets GPU | ||
111 | * invalidate_domains gets CPU | ||
112 | * wait_rendering (obj) to make sure all drawing is complete. | ||
113 | * This will include an MI_FLUSH to get the data from GPU | ||
114 | * to memory | ||
115 | * clflush (obj) to invalidate the CPU cache | ||
116 | * Another MI_FLUSH in i915_gem_flush (eliminate this somehow?) | ||
117 | * 5. Read/written by CPU | ||
118 | * cache lines are loaded and dirtied | ||
119 | * 6. Read written by GPU | ||
120 | * Same as last GPU access | ||
121 | * | ||
122 | * Case 3: The constant buffer | ||
123 | * | ||
124 | * 1. Allocated | ||
125 | * 2. Written by CPU | ||
126 | * 3. Read by GPU | ||
127 | * 4. Updated (written) by CPU again | ||
128 | * 5. Read by GPU | ||
129 | * | ||
130 | * 1. Allocated | ||
131 | * (CPU, CPU) | ||
132 | * 2. Written by CPU | ||
133 | * (CPU, CPU) | ||
134 | * 3. Read by GPU | ||
135 | * (CPU+RENDER, 0) | ||
136 | * flush_domains = CPU | ||
137 | * invalidate_domains = RENDER | ||
138 | * clflush (obj) | ||
139 | * MI_FLUSH | ||
140 | * drm_agp_chipset_flush | ||
141 | * 4. Updated (written) by CPU again | ||
142 | * (CPU, CPU) | ||
143 | * flush_domains = 0 (no previous write domain) | ||
144 | * invalidate_domains = 0 (no new read domains) | ||
145 | * 5. Read by GPU | ||
146 | * (CPU+RENDER, 0) | ||
147 | * flush_domains = CPU | ||
148 | * invalidate_domains = RENDER | ||
149 | * clflush (obj) | ||
150 | * MI_FLUSH | ||
151 | * drm_agp_chipset_flush | ||
152 | */ | ||
153 | static void | ||
154 | i915_gem_object_set_to_gpu_domain(struct drm_i915_gem_object *obj, | ||
155 | struct intel_ring_buffer *ring, | ||
156 | struct change_domains *cd) | ||
157 | { | ||
158 | uint32_t invalidate_domains = 0, flush_domains = 0; | ||
159 | |||
160 | /* | ||
161 | * If the object isn't moving to a new write domain, | ||
162 | * let the object stay in multiple read domains | ||
163 | */ | ||
164 | if (obj->base.pending_write_domain == 0) | ||
165 | obj->base.pending_read_domains |= obj->base.read_domains; | ||
166 | |||
167 | /* | ||
168 | * Flush the current write domain if | ||
169 | * the new read domains don't match. Invalidate | ||
170 | * any read domains which differ from the old | ||
171 | * write domain | ||
172 | */ | ||
173 | if (obj->base.write_domain && | ||
174 | (((obj->base.write_domain != obj->base.pending_read_domains || | ||
175 | obj->ring != ring)) || | ||
176 | (obj->fenced_gpu_access && !obj->pending_fenced_gpu_access))) { | ||
177 | flush_domains |= obj->base.write_domain; | ||
178 | invalidate_domains |= | ||
179 | obj->base.pending_read_domains & ~obj->base.write_domain; | ||
180 | } | ||
181 | /* | ||
182 | * Invalidate any read caches which may have | ||
183 | * stale data. That is, any new read domains. | ||
184 | */ | ||
185 | invalidate_domains |= obj->base.pending_read_domains & ~obj->base.read_domains; | ||
186 | if ((flush_domains | invalidate_domains) & I915_GEM_DOMAIN_CPU) | ||
187 | i915_gem_clflush_object(obj); | ||
188 | |||
189 | /* blow away mappings if mapped through GTT */ | ||
190 | if ((flush_domains | invalidate_domains) & I915_GEM_DOMAIN_GTT) | ||
191 | i915_gem_release_mmap(obj); | ||
192 | |||
193 | /* The actual obj->write_domain will be updated with | ||
194 | * pending_write_domain after we emit the accumulated flush for all | ||
195 | * of our domain changes in execbuffers (which clears objects' | ||
196 | * write_domains). So if we have a current write domain that we | ||
197 | * aren't changing, set pending_write_domain to that. | ||
198 | */ | ||
199 | if (flush_domains == 0 && obj->base.pending_write_domain == 0) | ||
200 | obj->base.pending_write_domain = obj->base.write_domain; | ||
201 | |||
202 | cd->invalidate_domains |= invalidate_domains; | ||
203 | cd->flush_domains |= flush_domains; | ||
204 | if (flush_domains & I915_GEM_GPU_DOMAINS) | ||
205 | cd->flush_rings |= obj->ring->id; | ||
206 | if (invalidate_domains & I915_GEM_GPU_DOMAINS) | ||
207 | cd->flush_rings |= ring->id; | ||
208 | } | ||
209 | |||
210 | struct eb_objects { | ||
211 | int and; | ||
212 | struct hlist_head buckets[0]; | ||
213 | }; | ||
214 | |||
215 | static struct eb_objects * | ||
216 | eb_create(int size) | ||
217 | { | ||
218 | struct eb_objects *eb; | ||
219 | int count = PAGE_SIZE / sizeof(struct hlist_head) / 2; | ||
220 | while (count > size) | ||
221 | count >>= 1; | ||
222 | eb = kzalloc(count*sizeof(struct hlist_head) + | ||
223 | sizeof(struct eb_objects), | ||
224 | GFP_KERNEL); | ||
225 | if (eb == NULL) | ||
226 | return eb; | ||
227 | |||
228 | eb->and = count - 1; | ||
229 | return eb; | ||
230 | } | ||
231 | |||
232 | static void | ||
233 | eb_reset(struct eb_objects *eb) | ||
234 | { | ||
235 | memset(eb->buckets, 0, (eb->and+1)*sizeof(struct hlist_head)); | ||
236 | } | ||
237 | |||
238 | static void | ||
239 | eb_add_object(struct eb_objects *eb, struct drm_i915_gem_object *obj) | ||
240 | { | ||
241 | hlist_add_head(&obj->exec_node, | ||
242 | &eb->buckets[obj->exec_handle & eb->and]); | ||
243 | } | ||
244 | |||
245 | static struct drm_i915_gem_object * | ||
246 | eb_get_object(struct eb_objects *eb, unsigned long handle) | ||
247 | { | ||
248 | struct hlist_head *head; | ||
249 | struct hlist_node *node; | ||
250 | struct drm_i915_gem_object *obj; | ||
251 | |||
252 | head = &eb->buckets[handle & eb->and]; | ||
253 | hlist_for_each(node, head) { | ||
254 | obj = hlist_entry(node, struct drm_i915_gem_object, exec_node); | ||
255 | if (obj->exec_handle == handle) | ||
256 | return obj; | ||
257 | } | ||
258 | |||
259 | return NULL; | ||
260 | } | ||
261 | |||
262 | static void | ||
263 | eb_destroy(struct eb_objects *eb) | ||
264 | { | ||
265 | kfree(eb); | ||
266 | } | ||
267 | |||
268 | static int | ||
269 | i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj, | ||
270 | struct eb_objects *eb, | ||
271 | struct drm_i915_gem_relocation_entry *reloc) | ||
272 | { | ||
273 | struct drm_device *dev = obj->base.dev; | ||
274 | struct drm_gem_object *target_obj; | ||
275 | uint32_t target_offset; | ||
276 | int ret = -EINVAL; | ||
277 | |||
278 | /* we've already hold a reference to all valid objects */ | ||
279 | target_obj = &eb_get_object(eb, reloc->target_handle)->base; | ||
280 | if (unlikely(target_obj == NULL)) | ||
281 | return -ENOENT; | ||
282 | |||
283 | target_offset = to_intel_bo(target_obj)->gtt_offset; | ||
284 | |||
285 | #if WATCH_RELOC | ||
286 | DRM_INFO("%s: obj %p offset %08x target %d " | ||
287 | "read %08x write %08x gtt %08x " | ||
288 | "presumed %08x delta %08x\n", | ||
289 | __func__, | ||
290 | obj, | ||
291 | (int) reloc->offset, | ||
292 | (int) reloc->target_handle, | ||
293 | (int) reloc->read_domains, | ||
294 | (int) reloc->write_domain, | ||
295 | (int) target_offset, | ||
296 | (int) reloc->presumed_offset, | ||
297 | reloc->delta); | ||
298 | #endif | ||
299 | |||
300 | /* The target buffer should have appeared before us in the | ||
301 | * exec_object list, so it should have a GTT space bound by now. | ||
302 | */ | ||
303 | if (unlikely(target_offset == 0)) { | ||
304 | DRM_ERROR("No GTT space found for object %d\n", | ||
305 | reloc->target_handle); | ||
306 | return ret; | ||
307 | } | ||
308 | |||
309 | /* Validate that the target is in a valid r/w GPU domain */ | ||
310 | if (unlikely(reloc->write_domain & (reloc->write_domain - 1))) { | ||
311 | DRM_ERROR("reloc with multiple write domains: " | ||
312 | "obj %p target %d offset %d " | ||
313 | "read %08x write %08x", | ||
314 | obj, reloc->target_handle, | ||
315 | (int) reloc->offset, | ||
316 | reloc->read_domains, | ||
317 | reloc->write_domain); | ||
318 | return ret; | ||
319 | } | ||
320 | if (unlikely((reloc->write_domain | reloc->read_domains) & I915_GEM_DOMAIN_CPU)) { | ||
321 | DRM_ERROR("reloc with read/write CPU domains: " | ||
322 | "obj %p target %d offset %d " | ||
323 | "read %08x write %08x", | ||
324 | obj, reloc->target_handle, | ||
325 | (int) reloc->offset, | ||
326 | reloc->read_domains, | ||
327 | reloc->write_domain); | ||
328 | return ret; | ||
329 | } | ||
330 | if (unlikely(reloc->write_domain && target_obj->pending_write_domain && | ||
331 | reloc->write_domain != target_obj->pending_write_domain)) { | ||
332 | DRM_ERROR("Write domain conflict: " | ||
333 | "obj %p target %d offset %d " | ||
334 | "new %08x old %08x\n", | ||
335 | obj, reloc->target_handle, | ||
336 | (int) reloc->offset, | ||
337 | reloc->write_domain, | ||
338 | target_obj->pending_write_domain); | ||
339 | return ret; | ||
340 | } | ||
341 | |||
342 | target_obj->pending_read_domains |= reloc->read_domains; | ||
343 | target_obj->pending_write_domain |= reloc->write_domain; | ||
344 | |||
345 | /* If the relocation already has the right value in it, no | ||
346 | * more work needs to be done. | ||
347 | */ | ||
348 | if (target_offset == reloc->presumed_offset) | ||
349 | return 0; | ||
350 | |||
351 | /* Check that the relocation address is valid... */ | ||
352 | if (unlikely(reloc->offset > obj->base.size - 4)) { | ||
353 | DRM_ERROR("Relocation beyond object bounds: " | ||
354 | "obj %p target %d offset %d size %d.\n", | ||
355 | obj, reloc->target_handle, | ||
356 | (int) reloc->offset, | ||
357 | (int) obj->base.size); | ||
358 | return ret; | ||
359 | } | ||
360 | if (unlikely(reloc->offset & 3)) { | ||
361 | DRM_ERROR("Relocation not 4-byte aligned: " | ||
362 | "obj %p target %d offset %d.\n", | ||
363 | obj, reloc->target_handle, | ||
364 | (int) reloc->offset); | ||
365 | return ret; | ||
366 | } | ||
367 | |||
368 | /* and points to somewhere within the target object. */ | ||
369 | if (unlikely(reloc->delta >= target_obj->size)) { | ||
370 | DRM_ERROR("Relocation beyond target object bounds: " | ||
371 | "obj %p target %d delta %d size %d.\n", | ||
372 | obj, reloc->target_handle, | ||
373 | (int) reloc->delta, | ||
374 | (int) target_obj->size); | ||
375 | return ret; | ||
376 | } | ||
377 | |||
378 | reloc->delta += target_offset; | ||
379 | if (obj->base.write_domain == I915_GEM_DOMAIN_CPU) { | ||
380 | uint32_t page_offset = reloc->offset & ~PAGE_MASK; | ||
381 | char *vaddr; | ||
382 | |||
383 | vaddr = kmap_atomic(obj->pages[reloc->offset >> PAGE_SHIFT]); | ||
384 | *(uint32_t *)(vaddr + page_offset) = reloc->delta; | ||
385 | kunmap_atomic(vaddr); | ||
386 | } else { | ||
387 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
388 | uint32_t __iomem *reloc_entry; | ||
389 | void __iomem *reloc_page; | ||
390 | |||
391 | ret = i915_gem_object_set_to_gtt_domain(obj, 1); | ||
392 | if (ret) | ||
393 | return ret; | ||
394 | |||
395 | /* Map the page containing the relocation we're going to perform. */ | ||
396 | reloc->offset += obj->gtt_offset; | ||
397 | reloc_page = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping, | ||
398 | reloc->offset & PAGE_MASK); | ||
399 | reloc_entry = (uint32_t __iomem *) | ||
400 | (reloc_page + (reloc->offset & ~PAGE_MASK)); | ||
401 | iowrite32(reloc->delta, reloc_entry); | ||
402 | io_mapping_unmap_atomic(reloc_page); | ||
403 | } | ||
404 | |||
405 | /* and update the user's relocation entry */ | ||
406 | reloc->presumed_offset = target_offset; | ||
407 | |||
408 | return 0; | ||
409 | } | ||
410 | |||
411 | static int | ||
412 | i915_gem_execbuffer_relocate_object(struct drm_i915_gem_object *obj, | ||
413 | struct eb_objects *eb) | ||
414 | { | ||
415 | struct drm_i915_gem_relocation_entry __user *user_relocs; | ||
416 | struct drm_i915_gem_exec_object2 *entry = obj->exec_entry; | ||
417 | int i, ret; | ||
418 | |||
419 | user_relocs = (void __user *)(uintptr_t)entry->relocs_ptr; | ||
420 | for (i = 0; i < entry->relocation_count; i++) { | ||
421 | struct drm_i915_gem_relocation_entry reloc; | ||
422 | |||
423 | if (__copy_from_user_inatomic(&reloc, | ||
424 | user_relocs+i, | ||
425 | sizeof(reloc))) | ||
426 | return -EFAULT; | ||
427 | |||
428 | ret = i915_gem_execbuffer_relocate_entry(obj, eb, &reloc); | ||
429 | if (ret) | ||
430 | return ret; | ||
431 | |||
432 | if (__copy_to_user_inatomic(&user_relocs[i].presumed_offset, | ||
433 | &reloc.presumed_offset, | ||
434 | sizeof(reloc.presumed_offset))) | ||
435 | return -EFAULT; | ||
436 | } | ||
437 | |||
438 | return 0; | ||
439 | } | ||
440 | |||
441 | static int | ||
442 | i915_gem_execbuffer_relocate_object_slow(struct drm_i915_gem_object *obj, | ||
443 | struct eb_objects *eb, | ||
444 | struct drm_i915_gem_relocation_entry *relocs) | ||
445 | { | ||
446 | const struct drm_i915_gem_exec_object2 *entry = obj->exec_entry; | ||
447 | int i, ret; | ||
448 | |||
449 | for (i = 0; i < entry->relocation_count; i++) { | ||
450 | ret = i915_gem_execbuffer_relocate_entry(obj, eb, &relocs[i]); | ||
451 | if (ret) | ||
452 | return ret; | ||
453 | } | ||
454 | |||
455 | return 0; | ||
456 | } | ||
457 | |||
458 | static int | ||
459 | i915_gem_execbuffer_relocate(struct drm_device *dev, | ||
460 | struct eb_objects *eb, | ||
461 | struct list_head *objects) | ||
462 | { | ||
463 | struct drm_i915_gem_object *obj; | ||
464 | int ret; | ||
465 | |||
466 | list_for_each_entry(obj, objects, exec_list) { | ||
467 | ret = i915_gem_execbuffer_relocate_object(obj, eb); | ||
468 | if (ret) | ||
469 | return ret; | ||
470 | } | ||
471 | |||
472 | return 0; | ||
473 | } | ||
474 | |||
475 | static int | ||
476 | i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring, | ||
477 | struct drm_file *file, | ||
478 | struct list_head *objects) | ||
479 | { | ||
480 | struct drm_i915_gem_object *obj; | ||
481 | int ret, retry; | ||
482 | bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4; | ||
483 | struct list_head ordered_objects; | ||
484 | |||
485 | INIT_LIST_HEAD(&ordered_objects); | ||
486 | while (!list_empty(objects)) { | ||
487 | struct drm_i915_gem_exec_object2 *entry; | ||
488 | bool need_fence, need_mappable; | ||
489 | |||
490 | obj = list_first_entry(objects, | ||
491 | struct drm_i915_gem_object, | ||
492 | exec_list); | ||
493 | entry = obj->exec_entry; | ||
494 | |||
495 | need_fence = | ||
496 | has_fenced_gpu_access && | ||
497 | entry->flags & EXEC_OBJECT_NEEDS_FENCE && | ||
498 | obj->tiling_mode != I915_TILING_NONE; | ||
499 | need_mappable = | ||
500 | entry->relocation_count ? true : need_fence; | ||
501 | |||
502 | if (need_mappable) | ||
503 | list_move(&obj->exec_list, &ordered_objects); | ||
504 | else | ||
505 | list_move_tail(&obj->exec_list, &ordered_objects); | ||
506 | |||
507 | obj->base.pending_read_domains = 0; | ||
508 | obj->base.pending_write_domain = 0; | ||
509 | } | ||
510 | list_splice(&ordered_objects, objects); | ||
511 | |||
512 | /* Attempt to pin all of the buffers into the GTT. | ||
513 | * This is done in 3 phases: | ||
514 | * | ||
515 | * 1a. Unbind all objects that do not match the GTT constraints for | ||
516 | * the execbuffer (fenceable, mappable, alignment etc). | ||
517 | * 1b. Increment pin count for already bound objects. | ||
518 | * 2. Bind new objects. | ||
519 | * 3. Decrement pin count. | ||
520 | * | ||
521 | * This avoid unnecessary unbinding of later objects in order to makr | ||
522 | * room for the earlier objects *unless* we need to defragment. | ||
523 | */ | ||
524 | retry = 0; | ||
525 | do { | ||
526 | ret = 0; | ||
527 | |||
528 | /* Unbind any ill-fitting objects or pin. */ | ||
529 | list_for_each_entry(obj, objects, exec_list) { | ||
530 | struct drm_i915_gem_exec_object2 *entry = obj->exec_entry; | ||
531 | bool need_fence, need_mappable; | ||
532 | if (!obj->gtt_space) | ||
533 | continue; | ||
534 | |||
535 | need_fence = | ||
536 | has_fenced_gpu_access && | ||
537 | entry->flags & EXEC_OBJECT_NEEDS_FENCE && | ||
538 | obj->tiling_mode != I915_TILING_NONE; | ||
539 | need_mappable = | ||
540 | entry->relocation_count ? true : need_fence; | ||
541 | |||
542 | if ((entry->alignment && obj->gtt_offset & (entry->alignment - 1)) || | ||
543 | (need_mappable && !obj->map_and_fenceable)) | ||
544 | ret = i915_gem_object_unbind(obj); | ||
545 | else | ||
546 | ret = i915_gem_object_pin(obj, | ||
547 | entry->alignment, | ||
548 | need_mappable); | ||
549 | if (ret) | ||
550 | goto err; | ||
551 | |||
552 | entry++; | ||
553 | } | ||
554 | |||
555 | /* Bind fresh objects */ | ||
556 | list_for_each_entry(obj, objects, exec_list) { | ||
557 | struct drm_i915_gem_exec_object2 *entry = obj->exec_entry; | ||
558 | bool need_fence; | ||
559 | |||
560 | need_fence = | ||
561 | has_fenced_gpu_access && | ||
562 | entry->flags & EXEC_OBJECT_NEEDS_FENCE && | ||
563 | obj->tiling_mode != I915_TILING_NONE; | ||
564 | |||
565 | if (!obj->gtt_space) { | ||
566 | bool need_mappable = | ||
567 | entry->relocation_count ? true : need_fence; | ||
568 | |||
569 | ret = i915_gem_object_pin(obj, | ||
570 | entry->alignment, | ||
571 | need_mappable); | ||
572 | if (ret) | ||
573 | break; | ||
574 | } | ||
575 | |||
576 | if (has_fenced_gpu_access) { | ||
577 | if (need_fence) { | ||
578 | ret = i915_gem_object_get_fence(obj, ring, 1); | ||
579 | if (ret) | ||
580 | break; | ||
581 | } else if (entry->flags & EXEC_OBJECT_NEEDS_FENCE && | ||
582 | obj->tiling_mode == I915_TILING_NONE) { | ||
583 | /* XXX pipelined! */ | ||
584 | ret = i915_gem_object_put_fence(obj); | ||
585 | if (ret) | ||
586 | break; | ||
587 | } | ||
588 | obj->pending_fenced_gpu_access = need_fence; | ||
589 | } | ||
590 | |||
591 | entry->offset = obj->gtt_offset; | ||
592 | } | ||
593 | |||
594 | /* Decrement pin count for bound objects */ | ||
595 | list_for_each_entry(obj, objects, exec_list) { | ||
596 | if (obj->gtt_space) | ||
597 | i915_gem_object_unpin(obj); | ||
598 | } | ||
599 | |||
600 | if (ret != -ENOSPC || retry > 1) | ||
601 | return ret; | ||
602 | |||
603 | /* First attempt, just clear anything that is purgeable. | ||
604 | * Second attempt, clear the entire GTT. | ||
605 | */ | ||
606 | ret = i915_gem_evict_everything(ring->dev, retry == 0); | ||
607 | if (ret) | ||
608 | return ret; | ||
609 | |||
610 | retry++; | ||
611 | } while (1); | ||
612 | |||
613 | err: | ||
614 | obj = list_entry(obj->exec_list.prev, | ||
615 | struct drm_i915_gem_object, | ||
616 | exec_list); | ||
617 | while (objects != &obj->exec_list) { | ||
618 | if (obj->gtt_space) | ||
619 | i915_gem_object_unpin(obj); | ||
620 | |||
621 | obj = list_entry(obj->exec_list.prev, | ||
622 | struct drm_i915_gem_object, | ||
623 | exec_list); | ||
624 | } | ||
625 | |||
626 | return ret; | ||
627 | } | ||
628 | |||
629 | static int | ||
630 | i915_gem_execbuffer_relocate_slow(struct drm_device *dev, | ||
631 | struct drm_file *file, | ||
632 | struct intel_ring_buffer *ring, | ||
633 | struct list_head *objects, | ||
634 | struct eb_objects *eb, | ||
635 | struct drm_i915_gem_exec_object2 *exec, | ||
636 | int count) | ||
637 | { | ||
638 | struct drm_i915_gem_relocation_entry *reloc; | ||
639 | struct drm_i915_gem_object *obj; | ||
640 | int *reloc_offset; | ||
641 | int i, total, ret; | ||
642 | |||
643 | /* We may process another execbuffer during the unlock... */ | ||
644 | while (!list_empty(objects)) { | ||
645 | obj = list_first_entry(objects, | ||
646 | struct drm_i915_gem_object, | ||
647 | exec_list); | ||
648 | list_del_init(&obj->exec_list); | ||
649 | drm_gem_object_unreference(&obj->base); | ||
650 | } | ||
651 | |||
652 | mutex_unlock(&dev->struct_mutex); | ||
653 | |||
654 | total = 0; | ||
655 | for (i = 0; i < count; i++) | ||
656 | total += exec[i].relocation_count; | ||
657 | |||
658 | reloc_offset = drm_malloc_ab(count, sizeof(*reloc_offset)); | ||
659 | reloc = drm_malloc_ab(total, sizeof(*reloc)); | ||
660 | if (reloc == NULL || reloc_offset == NULL) { | ||
661 | drm_free_large(reloc); | ||
662 | drm_free_large(reloc_offset); | ||
663 | mutex_lock(&dev->struct_mutex); | ||
664 | return -ENOMEM; | ||
665 | } | ||
666 | |||
667 | total = 0; | ||
668 | for (i = 0; i < count; i++) { | ||
669 | struct drm_i915_gem_relocation_entry __user *user_relocs; | ||
670 | |||
671 | user_relocs = (void __user *)(uintptr_t)exec[i].relocs_ptr; | ||
672 | |||
673 | if (copy_from_user(reloc+total, user_relocs, | ||
674 | exec[i].relocation_count * sizeof(*reloc))) { | ||
675 | ret = -EFAULT; | ||
676 | mutex_lock(&dev->struct_mutex); | ||
677 | goto err; | ||
678 | } | ||
679 | |||
680 | reloc_offset[i] = total; | ||
681 | total += exec[i].relocation_count; | ||
682 | } | ||
683 | |||
684 | ret = i915_mutex_lock_interruptible(dev); | ||
685 | if (ret) { | ||
686 | mutex_lock(&dev->struct_mutex); | ||
687 | goto err; | ||
688 | } | ||
689 | |||
690 | /* reacquire the objects */ | ||
691 | eb_reset(eb); | ||
692 | for (i = 0; i < count; i++) { | ||
693 | struct drm_i915_gem_object *obj; | ||
694 | |||
695 | obj = to_intel_bo(drm_gem_object_lookup(dev, file, | ||
696 | exec[i].handle)); | ||
697 | if (obj == NULL) { | ||
698 | DRM_ERROR("Invalid object handle %d at index %d\n", | ||
699 | exec[i].handle, i); | ||
700 | ret = -ENOENT; | ||
701 | goto err; | ||
702 | } | ||
703 | |||
704 | list_add_tail(&obj->exec_list, objects); | ||
705 | obj->exec_handle = exec[i].handle; | ||
706 | obj->exec_entry = &exec[i]; | ||
707 | eb_add_object(eb, obj); | ||
708 | } | ||
709 | |||
710 | ret = i915_gem_execbuffer_reserve(ring, file, objects); | ||
711 | if (ret) | ||
712 | goto err; | ||
713 | |||
714 | list_for_each_entry(obj, objects, exec_list) { | ||
715 | int offset = obj->exec_entry - exec; | ||
716 | ret = i915_gem_execbuffer_relocate_object_slow(obj, eb, | ||
717 | reloc + reloc_offset[offset]); | ||
718 | if (ret) | ||
719 | goto err; | ||
720 | } | ||
721 | |||
722 | /* Leave the user relocations as are, this is the painfully slow path, | ||
723 | * and we want to avoid the complication of dropping the lock whilst | ||
724 | * having buffers reserved in the aperture and so causing spurious | ||
725 | * ENOSPC for random operations. | ||
726 | */ | ||
727 | |||
728 | err: | ||
729 | drm_free_large(reloc); | ||
730 | drm_free_large(reloc_offset); | ||
731 | return ret; | ||
732 | } | ||
733 | |||
734 | static int | ||
735 | i915_gem_execbuffer_flush(struct drm_device *dev, | ||
736 | uint32_t invalidate_domains, | ||
737 | uint32_t flush_domains, | ||
738 | uint32_t flush_rings) | ||
739 | { | ||
740 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
741 | int i, ret; | ||
742 | |||
743 | if (flush_domains & I915_GEM_DOMAIN_CPU) | ||
744 | intel_gtt_chipset_flush(); | ||
745 | |||
746 | if (flush_domains & I915_GEM_DOMAIN_GTT) | ||
747 | wmb(); | ||
748 | |||
749 | if ((flush_domains | invalidate_domains) & I915_GEM_GPU_DOMAINS) { | ||
750 | for (i = 0; i < I915_NUM_RINGS; i++) | ||
751 | if (flush_rings & (1 << i)) { | ||
752 | ret = i915_gem_flush_ring(dev, | ||
753 | &dev_priv->ring[i], | ||
754 | invalidate_domains, | ||
755 | flush_domains); | ||
756 | if (ret) | ||
757 | return ret; | ||
758 | } | ||
759 | } | ||
760 | |||
761 | return 0; | ||
762 | } | ||
763 | |||
764 | static int | ||
765 | i915_gem_execbuffer_sync_rings(struct drm_i915_gem_object *obj, | ||
766 | struct intel_ring_buffer *to) | ||
767 | { | ||
768 | struct intel_ring_buffer *from = obj->ring; | ||
769 | u32 seqno; | ||
770 | int ret, idx; | ||
771 | |||
772 | if (from == NULL || to == from) | ||
773 | return 0; | ||
774 | |||
775 | /* XXX gpu semaphores are currently causing hard hangs on SNB mobile */ | ||
776 | if (INTEL_INFO(obj->base.dev)->gen < 6 || IS_MOBILE(obj->base.dev)) | ||
777 | return i915_gem_object_wait_rendering(obj, true); | ||
778 | |||
779 | idx = intel_ring_sync_index(from, to); | ||
780 | |||
781 | seqno = obj->last_rendering_seqno; | ||
782 | if (seqno <= from->sync_seqno[idx]) | ||
783 | return 0; | ||
784 | |||
785 | if (seqno == from->outstanding_lazy_request) { | ||
786 | struct drm_i915_gem_request *request; | ||
787 | |||
788 | request = kzalloc(sizeof(*request), GFP_KERNEL); | ||
789 | if (request == NULL) | ||
790 | return -ENOMEM; | ||
791 | |||
792 | ret = i915_add_request(obj->base.dev, NULL, request, from); | ||
793 | if (ret) { | ||
794 | kfree(request); | ||
795 | return ret; | ||
796 | } | ||
797 | |||
798 | seqno = request->seqno; | ||
799 | } | ||
800 | |||
801 | from->sync_seqno[idx] = seqno; | ||
802 | return intel_ring_sync(to, from, seqno - 1); | ||
803 | } | ||
804 | |||
805 | static int | ||
806 | i915_gem_execbuffer_move_to_gpu(struct intel_ring_buffer *ring, | ||
807 | struct list_head *objects) | ||
808 | { | ||
809 | struct drm_i915_gem_object *obj; | ||
810 | struct change_domains cd; | ||
811 | int ret; | ||
812 | |||
813 | cd.invalidate_domains = 0; | ||
814 | cd.flush_domains = 0; | ||
815 | cd.flush_rings = 0; | ||
816 | list_for_each_entry(obj, objects, exec_list) | ||
817 | i915_gem_object_set_to_gpu_domain(obj, ring, &cd); | ||
818 | |||
819 | if (cd.invalidate_domains | cd.flush_domains) { | ||
820 | #if WATCH_EXEC | ||
821 | DRM_INFO("%s: invalidate_domains %08x flush_domains %08x\n", | ||
822 | __func__, | ||
823 | cd.invalidate_domains, | ||
824 | cd.flush_domains); | ||
825 | #endif | ||
826 | ret = i915_gem_execbuffer_flush(ring->dev, | ||
827 | cd.invalidate_domains, | ||
828 | cd.flush_domains, | ||
829 | cd.flush_rings); | ||
830 | if (ret) | ||
831 | return ret; | ||
832 | } | ||
833 | |||
834 | list_for_each_entry(obj, objects, exec_list) { | ||
835 | ret = i915_gem_execbuffer_sync_rings(obj, ring); | ||
836 | if (ret) | ||
837 | return ret; | ||
838 | } | ||
839 | |||
840 | return 0; | ||
841 | } | ||
842 | |||
843 | static bool | ||
844 | i915_gem_check_execbuffer(struct drm_i915_gem_execbuffer2 *exec) | ||
845 | { | ||
846 | return ((exec->batch_start_offset | exec->batch_len) & 0x7) == 0; | ||
847 | } | ||
848 | |||
849 | static int | ||
850 | validate_exec_list(struct drm_i915_gem_exec_object2 *exec, | ||
851 | int count) | ||
852 | { | ||
853 | int i; | ||
854 | |||
855 | for (i = 0; i < count; i++) { | ||
856 | char __user *ptr = (char __user *)(uintptr_t)exec[i].relocs_ptr; | ||
857 | int length; /* limited by fault_in_pages_readable() */ | ||
858 | |||
859 | /* First check for malicious input causing overflow */ | ||
860 | if (exec[i].relocation_count > | ||
861 | INT_MAX / sizeof(struct drm_i915_gem_relocation_entry)) | ||
862 | return -EINVAL; | ||
863 | |||
864 | length = exec[i].relocation_count * | ||
865 | sizeof(struct drm_i915_gem_relocation_entry); | ||
866 | if (!access_ok(VERIFY_READ, ptr, length)) | ||
867 | return -EFAULT; | ||
868 | |||
869 | /* we may also need to update the presumed offsets */ | ||
870 | if (!access_ok(VERIFY_WRITE, ptr, length)) | ||
871 | return -EFAULT; | ||
872 | |||
873 | if (fault_in_pages_readable(ptr, length)) | ||
874 | return -EFAULT; | ||
875 | } | ||
876 | |||
877 | return 0; | ||
878 | } | ||
879 | |||
880 | static int | ||
881 | i915_gem_execbuffer_wait_for_flips(struct intel_ring_buffer *ring, | ||
882 | struct list_head *objects) | ||
883 | { | ||
884 | struct drm_i915_gem_object *obj; | ||
885 | int flips; | ||
886 | |||
887 | /* Check for any pending flips. As we only maintain a flip queue depth | ||
888 | * of 1, we can simply insert a WAIT for the next display flip prior | ||
889 | * to executing the batch and avoid stalling the CPU. | ||
890 | */ | ||
891 | flips = 0; | ||
892 | list_for_each_entry(obj, objects, exec_list) { | ||
893 | if (obj->base.write_domain) | ||
894 | flips |= atomic_read(&obj->pending_flip); | ||
895 | } | ||
896 | if (flips) { | ||
897 | int plane, flip_mask, ret; | ||
898 | |||
899 | for (plane = 0; flips >> plane; plane++) { | ||
900 | if (((flips >> plane) & 1) == 0) | ||
901 | continue; | ||
902 | |||
903 | if (plane) | ||
904 | flip_mask = MI_WAIT_FOR_PLANE_B_FLIP; | ||
905 | else | ||
906 | flip_mask = MI_WAIT_FOR_PLANE_A_FLIP; | ||
907 | |||
908 | ret = intel_ring_begin(ring, 2); | ||
909 | if (ret) | ||
910 | return ret; | ||
911 | |||
912 | intel_ring_emit(ring, MI_WAIT_FOR_EVENT | flip_mask); | ||
913 | intel_ring_emit(ring, MI_NOOP); | ||
914 | intel_ring_advance(ring); | ||
915 | } | ||
916 | } | ||
917 | |||
918 | return 0; | ||
919 | } | ||
920 | |||
921 | static void | ||
922 | i915_gem_execbuffer_move_to_active(struct list_head *objects, | ||
923 | struct intel_ring_buffer *ring, | ||
924 | u32 seqno) | ||
925 | { | ||
926 | struct drm_i915_gem_object *obj; | ||
927 | |||
928 | list_for_each_entry(obj, objects, exec_list) { | ||
929 | obj->base.read_domains = obj->base.pending_read_domains; | ||
930 | obj->base.write_domain = obj->base.pending_write_domain; | ||
931 | obj->fenced_gpu_access = obj->pending_fenced_gpu_access; | ||
932 | |||
933 | i915_gem_object_move_to_active(obj, ring, seqno); | ||
934 | if (obj->base.write_domain) { | ||
935 | obj->dirty = 1; | ||
936 | obj->pending_gpu_write = true; | ||
937 | list_move_tail(&obj->gpu_write_list, | ||
938 | &ring->gpu_write_list); | ||
939 | intel_mark_busy(ring->dev, obj); | ||
940 | } | ||
941 | |||
942 | trace_i915_gem_object_change_domain(obj, | ||
943 | obj->base.read_domains, | ||
944 | obj->base.write_domain); | ||
945 | } | ||
946 | } | ||
947 | |||
948 | static void | ||
949 | i915_gem_execbuffer_retire_commands(struct drm_device *dev, | ||
950 | struct drm_file *file, | ||
951 | struct intel_ring_buffer *ring) | ||
952 | { | ||
953 | struct drm_i915_gem_request *request; | ||
954 | u32 invalidate; | ||
955 | |||
956 | /* | ||
957 | * Ensure that the commands in the batch buffer are | ||
958 | * finished before the interrupt fires. | ||
959 | * | ||
960 | * The sampler always gets flushed on i965 (sigh). | ||
961 | */ | ||
962 | invalidate = I915_GEM_DOMAIN_COMMAND; | ||
963 | if (INTEL_INFO(dev)->gen >= 4) | ||
964 | invalidate |= I915_GEM_DOMAIN_SAMPLER; | ||
965 | if (ring->flush(ring, invalidate, 0)) { | ||
966 | i915_gem_next_request_seqno(dev, ring); | ||
967 | return; | ||
968 | } | ||
969 | |||
970 | /* Add a breadcrumb for the completion of the batch buffer */ | ||
971 | request = kzalloc(sizeof(*request), GFP_KERNEL); | ||
972 | if (request == NULL || i915_add_request(dev, file, request, ring)) { | ||
973 | i915_gem_next_request_seqno(dev, ring); | ||
974 | kfree(request); | ||
975 | } | ||
976 | } | ||
977 | |||
978 | static int | ||
979 | i915_gem_do_execbuffer(struct drm_device *dev, void *data, | ||
980 | struct drm_file *file, | ||
981 | struct drm_i915_gem_execbuffer2 *args, | ||
982 | struct drm_i915_gem_exec_object2 *exec) | ||
983 | { | ||
984 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
985 | struct list_head objects; | ||
986 | struct eb_objects *eb; | ||
987 | struct drm_i915_gem_object *batch_obj; | ||
988 | struct drm_clip_rect *cliprects = NULL; | ||
989 | struct intel_ring_buffer *ring; | ||
990 | u32 exec_start, exec_len; | ||
991 | u32 seqno; | ||
992 | int ret, mode, i; | ||
993 | |||
994 | if (!i915_gem_check_execbuffer(args)) { | ||
995 | DRM_ERROR("execbuf with invalid offset/length\n"); | ||
996 | return -EINVAL; | ||
997 | } | ||
998 | |||
999 | ret = validate_exec_list(exec, args->buffer_count); | ||
1000 | if (ret) | ||
1001 | return ret; | ||
1002 | |||
1003 | #if WATCH_EXEC | ||
1004 | DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n", | ||
1005 | (int) args->buffers_ptr, args->buffer_count, args->batch_len); | ||
1006 | #endif | ||
1007 | switch (args->flags & I915_EXEC_RING_MASK) { | ||
1008 | case I915_EXEC_DEFAULT: | ||
1009 | case I915_EXEC_RENDER: | ||
1010 | ring = &dev_priv->ring[RCS]; | ||
1011 | break; | ||
1012 | case I915_EXEC_BSD: | ||
1013 | if (!HAS_BSD(dev)) { | ||
1014 | DRM_ERROR("execbuf with invalid ring (BSD)\n"); | ||
1015 | return -EINVAL; | ||
1016 | } | ||
1017 | ring = &dev_priv->ring[VCS]; | ||
1018 | break; | ||
1019 | case I915_EXEC_BLT: | ||
1020 | if (!HAS_BLT(dev)) { | ||
1021 | DRM_ERROR("execbuf with invalid ring (BLT)\n"); | ||
1022 | return -EINVAL; | ||
1023 | } | ||
1024 | ring = &dev_priv->ring[BCS]; | ||
1025 | break; | ||
1026 | default: | ||
1027 | DRM_ERROR("execbuf with unknown ring: %d\n", | ||
1028 | (int)(args->flags & I915_EXEC_RING_MASK)); | ||
1029 | return -EINVAL; | ||
1030 | } | ||
1031 | |||
1032 | mode = args->flags & I915_EXEC_CONSTANTS_MASK; | ||
1033 | switch (mode) { | ||
1034 | case I915_EXEC_CONSTANTS_REL_GENERAL: | ||
1035 | case I915_EXEC_CONSTANTS_ABSOLUTE: | ||
1036 | case I915_EXEC_CONSTANTS_REL_SURFACE: | ||
1037 | if (ring == &dev_priv->ring[RCS] && | ||
1038 | mode != dev_priv->relative_constants_mode) { | ||
1039 | if (INTEL_INFO(dev)->gen < 4) | ||
1040 | return -EINVAL; | ||
1041 | |||
1042 | if (INTEL_INFO(dev)->gen > 5 && | ||
1043 | mode == I915_EXEC_CONSTANTS_REL_SURFACE) | ||
1044 | return -EINVAL; | ||
1045 | |||
1046 | ret = intel_ring_begin(ring, 4); | ||
1047 | if (ret) | ||
1048 | return ret; | ||
1049 | |||
1050 | intel_ring_emit(ring, MI_NOOP); | ||
1051 | intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1)); | ||
1052 | intel_ring_emit(ring, INSTPM); | ||
1053 | intel_ring_emit(ring, | ||
1054 | I915_EXEC_CONSTANTS_MASK << 16 | mode); | ||
1055 | intel_ring_advance(ring); | ||
1056 | |||
1057 | dev_priv->relative_constants_mode = mode; | ||
1058 | } | ||
1059 | break; | ||
1060 | default: | ||
1061 | DRM_ERROR("execbuf with unknown constants: %d\n", mode); | ||
1062 | return -EINVAL; | ||
1063 | } | ||
1064 | |||
1065 | if (args->buffer_count < 1) { | ||
1066 | DRM_ERROR("execbuf with %d buffers\n", args->buffer_count); | ||
1067 | return -EINVAL; | ||
1068 | } | ||
1069 | |||
1070 | if (args->num_cliprects != 0) { | ||
1071 | if (ring != &dev_priv->ring[RCS]) { | ||
1072 | DRM_ERROR("clip rectangles are only valid with the render ring\n"); | ||
1073 | return -EINVAL; | ||
1074 | } | ||
1075 | |||
1076 | cliprects = kmalloc(args->num_cliprects * sizeof(*cliprects), | ||
1077 | GFP_KERNEL); | ||
1078 | if (cliprects == NULL) { | ||
1079 | ret = -ENOMEM; | ||
1080 | goto pre_mutex_err; | ||
1081 | } | ||
1082 | |||
1083 | if (copy_from_user(cliprects, | ||
1084 | (struct drm_clip_rect __user *)(uintptr_t) | ||
1085 | args->cliprects_ptr, | ||
1086 | sizeof(*cliprects)*args->num_cliprects)) { | ||
1087 | ret = -EFAULT; | ||
1088 | goto pre_mutex_err; | ||
1089 | } | ||
1090 | } | ||
1091 | |||
1092 | ret = i915_mutex_lock_interruptible(dev); | ||
1093 | if (ret) | ||
1094 | goto pre_mutex_err; | ||
1095 | |||
1096 | if (dev_priv->mm.suspended) { | ||
1097 | mutex_unlock(&dev->struct_mutex); | ||
1098 | ret = -EBUSY; | ||
1099 | goto pre_mutex_err; | ||
1100 | } | ||
1101 | |||
1102 | eb = eb_create(args->buffer_count); | ||
1103 | if (eb == NULL) { | ||
1104 | mutex_unlock(&dev->struct_mutex); | ||
1105 | ret = -ENOMEM; | ||
1106 | goto pre_mutex_err; | ||
1107 | } | ||
1108 | |||
1109 | /* Look up object handles */ | ||
1110 | INIT_LIST_HEAD(&objects); | ||
1111 | for (i = 0; i < args->buffer_count; i++) { | ||
1112 | struct drm_i915_gem_object *obj; | ||
1113 | |||
1114 | obj = to_intel_bo(drm_gem_object_lookup(dev, file, | ||
1115 | exec[i].handle)); | ||
1116 | if (obj == NULL) { | ||
1117 | DRM_ERROR("Invalid object handle %d at index %d\n", | ||
1118 | exec[i].handle, i); | ||
1119 | /* prevent error path from reading uninitialized data */ | ||
1120 | ret = -ENOENT; | ||
1121 | goto err; | ||
1122 | } | ||
1123 | |||
1124 | if (!list_empty(&obj->exec_list)) { | ||
1125 | DRM_ERROR("Object %p [handle %d, index %d] appears more than once in object list\n", | ||
1126 | obj, exec[i].handle, i); | ||
1127 | ret = -EINVAL; | ||
1128 | goto err; | ||
1129 | } | ||
1130 | |||
1131 | list_add_tail(&obj->exec_list, &objects); | ||
1132 | obj->exec_handle = exec[i].handle; | ||
1133 | obj->exec_entry = &exec[i]; | ||
1134 | eb_add_object(eb, obj); | ||
1135 | } | ||
1136 | |||
1137 | /* take note of the batch buffer before we might reorder the lists */ | ||
1138 | batch_obj = list_entry(objects.prev, | ||
1139 | struct drm_i915_gem_object, | ||
1140 | exec_list); | ||
1141 | |||
1142 | /* Move the objects en-masse into the GTT, evicting if necessary. */ | ||
1143 | ret = i915_gem_execbuffer_reserve(ring, file, &objects); | ||
1144 | if (ret) | ||
1145 | goto err; | ||
1146 | |||
1147 | /* The objects are in their final locations, apply the relocations. */ | ||
1148 | ret = i915_gem_execbuffer_relocate(dev, eb, &objects); | ||
1149 | if (ret) { | ||
1150 | if (ret == -EFAULT) { | ||
1151 | ret = i915_gem_execbuffer_relocate_slow(dev, file, ring, | ||
1152 | &objects, eb, | ||
1153 | exec, | ||
1154 | args->buffer_count); | ||
1155 | BUG_ON(!mutex_is_locked(&dev->struct_mutex)); | ||
1156 | } | ||
1157 | if (ret) | ||
1158 | goto err; | ||
1159 | } | ||
1160 | |||
1161 | /* Set the pending read domains for the batch buffer to COMMAND */ | ||
1162 | if (batch_obj->base.pending_write_domain) { | ||
1163 | DRM_ERROR("Attempting to use self-modifying batch buffer\n"); | ||
1164 | ret = -EINVAL; | ||
1165 | goto err; | ||
1166 | } | ||
1167 | batch_obj->base.pending_read_domains |= I915_GEM_DOMAIN_COMMAND; | ||
1168 | |||
1169 | ret = i915_gem_execbuffer_move_to_gpu(ring, &objects); | ||
1170 | if (ret) | ||
1171 | goto err; | ||
1172 | |||
1173 | ret = i915_gem_execbuffer_wait_for_flips(ring, &objects); | ||
1174 | if (ret) | ||
1175 | goto err; | ||
1176 | |||
1177 | seqno = i915_gem_next_request_seqno(dev, ring); | ||
1178 | for (i = 0; i < ARRAY_SIZE(ring->sync_seqno); i++) { | ||
1179 | if (seqno < ring->sync_seqno[i]) { | ||
1180 | /* The GPU can not handle its semaphore value wrapping, | ||
1181 | * so every billion or so execbuffers, we need to stall | ||
1182 | * the GPU in order to reset the counters. | ||
1183 | */ | ||
1184 | ret = i915_gpu_idle(dev); | ||
1185 | if (ret) | ||
1186 | goto err; | ||
1187 | |||
1188 | BUG_ON(ring->sync_seqno[i]); | ||
1189 | } | ||
1190 | } | ||
1191 | |||
1192 | exec_start = batch_obj->gtt_offset + args->batch_start_offset; | ||
1193 | exec_len = args->batch_len; | ||
1194 | if (cliprects) { | ||
1195 | for (i = 0; i < args->num_cliprects; i++) { | ||
1196 | ret = i915_emit_box(dev, &cliprects[i], | ||
1197 | args->DR1, args->DR4); | ||
1198 | if (ret) | ||
1199 | goto err; | ||
1200 | |||
1201 | ret = ring->dispatch_execbuffer(ring, | ||
1202 | exec_start, exec_len); | ||
1203 | if (ret) | ||
1204 | goto err; | ||
1205 | } | ||
1206 | } else { | ||
1207 | ret = ring->dispatch_execbuffer(ring, exec_start, exec_len); | ||
1208 | if (ret) | ||
1209 | goto err; | ||
1210 | } | ||
1211 | |||
1212 | i915_gem_execbuffer_move_to_active(&objects, ring, seqno); | ||
1213 | i915_gem_execbuffer_retire_commands(dev, file, ring); | ||
1214 | |||
1215 | err: | ||
1216 | eb_destroy(eb); | ||
1217 | while (!list_empty(&objects)) { | ||
1218 | struct drm_i915_gem_object *obj; | ||
1219 | |||
1220 | obj = list_first_entry(&objects, | ||
1221 | struct drm_i915_gem_object, | ||
1222 | exec_list); | ||
1223 | list_del_init(&obj->exec_list); | ||
1224 | drm_gem_object_unreference(&obj->base); | ||
1225 | } | ||
1226 | |||
1227 | mutex_unlock(&dev->struct_mutex); | ||
1228 | |||
1229 | pre_mutex_err: | ||
1230 | kfree(cliprects); | ||
1231 | return ret; | ||
1232 | } | ||
1233 | |||
1234 | /* | ||
1235 | * Legacy execbuffer just creates an exec2 list from the original exec object | ||
1236 | * list array and passes it to the real function. | ||
1237 | */ | ||
1238 | int | ||
1239 | i915_gem_execbuffer(struct drm_device *dev, void *data, | ||
1240 | struct drm_file *file) | ||
1241 | { | ||
1242 | struct drm_i915_gem_execbuffer *args = data; | ||
1243 | struct drm_i915_gem_execbuffer2 exec2; | ||
1244 | struct drm_i915_gem_exec_object *exec_list = NULL; | ||
1245 | struct drm_i915_gem_exec_object2 *exec2_list = NULL; | ||
1246 | int ret, i; | ||
1247 | |||
1248 | #if WATCH_EXEC | ||
1249 | DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n", | ||
1250 | (int) args->buffers_ptr, args->buffer_count, args->batch_len); | ||
1251 | #endif | ||
1252 | |||
1253 | if (args->buffer_count < 1) { | ||
1254 | DRM_ERROR("execbuf with %d buffers\n", args->buffer_count); | ||
1255 | return -EINVAL; | ||
1256 | } | ||
1257 | |||
1258 | /* Copy in the exec list from userland */ | ||
1259 | exec_list = drm_malloc_ab(sizeof(*exec_list), args->buffer_count); | ||
1260 | exec2_list = drm_malloc_ab(sizeof(*exec2_list), args->buffer_count); | ||
1261 | if (exec_list == NULL || exec2_list == NULL) { | ||
1262 | DRM_ERROR("Failed to allocate exec list for %d buffers\n", | ||
1263 | args->buffer_count); | ||
1264 | drm_free_large(exec_list); | ||
1265 | drm_free_large(exec2_list); | ||
1266 | return -ENOMEM; | ||
1267 | } | ||
1268 | ret = copy_from_user(exec_list, | ||
1269 | (struct drm_i915_relocation_entry __user *) | ||
1270 | (uintptr_t) args->buffers_ptr, | ||
1271 | sizeof(*exec_list) * args->buffer_count); | ||
1272 | if (ret != 0) { | ||
1273 | DRM_ERROR("copy %d exec entries failed %d\n", | ||
1274 | args->buffer_count, ret); | ||
1275 | drm_free_large(exec_list); | ||
1276 | drm_free_large(exec2_list); | ||
1277 | return -EFAULT; | ||
1278 | } | ||
1279 | |||
1280 | for (i = 0; i < args->buffer_count; i++) { | ||
1281 | exec2_list[i].handle = exec_list[i].handle; | ||
1282 | exec2_list[i].relocation_count = exec_list[i].relocation_count; | ||
1283 | exec2_list[i].relocs_ptr = exec_list[i].relocs_ptr; | ||
1284 | exec2_list[i].alignment = exec_list[i].alignment; | ||
1285 | exec2_list[i].offset = exec_list[i].offset; | ||
1286 | if (INTEL_INFO(dev)->gen < 4) | ||
1287 | exec2_list[i].flags = EXEC_OBJECT_NEEDS_FENCE; | ||
1288 | else | ||
1289 | exec2_list[i].flags = 0; | ||
1290 | } | ||
1291 | |||
1292 | exec2.buffers_ptr = args->buffers_ptr; | ||
1293 | exec2.buffer_count = args->buffer_count; | ||
1294 | exec2.batch_start_offset = args->batch_start_offset; | ||
1295 | exec2.batch_len = args->batch_len; | ||
1296 | exec2.DR1 = args->DR1; | ||
1297 | exec2.DR4 = args->DR4; | ||
1298 | exec2.num_cliprects = args->num_cliprects; | ||
1299 | exec2.cliprects_ptr = args->cliprects_ptr; | ||
1300 | exec2.flags = I915_EXEC_RENDER; | ||
1301 | |||
1302 | ret = i915_gem_do_execbuffer(dev, data, file, &exec2, exec2_list); | ||
1303 | if (!ret) { | ||
1304 | /* Copy the new buffer offsets back to the user's exec list. */ | ||
1305 | for (i = 0; i < args->buffer_count; i++) | ||
1306 | exec_list[i].offset = exec2_list[i].offset; | ||
1307 | /* ... and back out to userspace */ | ||
1308 | ret = copy_to_user((struct drm_i915_relocation_entry __user *) | ||
1309 | (uintptr_t) args->buffers_ptr, | ||
1310 | exec_list, | ||
1311 | sizeof(*exec_list) * args->buffer_count); | ||
1312 | if (ret) { | ||
1313 | ret = -EFAULT; | ||
1314 | DRM_ERROR("failed to copy %d exec entries " | ||
1315 | "back to user (%d)\n", | ||
1316 | args->buffer_count, ret); | ||
1317 | } | ||
1318 | } | ||
1319 | |||
1320 | drm_free_large(exec_list); | ||
1321 | drm_free_large(exec2_list); | ||
1322 | return ret; | ||
1323 | } | ||
1324 | |||
1325 | int | ||
1326 | i915_gem_execbuffer2(struct drm_device *dev, void *data, | ||
1327 | struct drm_file *file) | ||
1328 | { | ||
1329 | struct drm_i915_gem_execbuffer2 *args = data; | ||
1330 | struct drm_i915_gem_exec_object2 *exec2_list = NULL; | ||
1331 | int ret; | ||
1332 | |||
1333 | #if WATCH_EXEC | ||
1334 | DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n", | ||
1335 | (int) args->buffers_ptr, args->buffer_count, args->batch_len); | ||
1336 | #endif | ||
1337 | |||
1338 | if (args->buffer_count < 1) { | ||
1339 | DRM_ERROR("execbuf2 with %d buffers\n", args->buffer_count); | ||
1340 | return -EINVAL; | ||
1341 | } | ||
1342 | |||
1343 | exec2_list = drm_malloc_ab(sizeof(*exec2_list), args->buffer_count); | ||
1344 | if (exec2_list == NULL) { | ||
1345 | DRM_ERROR("Failed to allocate exec list for %d buffers\n", | ||
1346 | args->buffer_count); | ||
1347 | return -ENOMEM; | ||
1348 | } | ||
1349 | ret = copy_from_user(exec2_list, | ||
1350 | (struct drm_i915_relocation_entry __user *) | ||
1351 | (uintptr_t) args->buffers_ptr, | ||
1352 | sizeof(*exec2_list) * args->buffer_count); | ||
1353 | if (ret != 0) { | ||
1354 | DRM_ERROR("copy %d exec entries failed %d\n", | ||
1355 | args->buffer_count, ret); | ||
1356 | drm_free_large(exec2_list); | ||
1357 | return -EFAULT; | ||
1358 | } | ||
1359 | |||
1360 | ret = i915_gem_do_execbuffer(dev, data, file, args, exec2_list); | ||
1361 | if (!ret) { | ||
1362 | /* Copy the new buffer offsets back to the user's exec list. */ | ||
1363 | ret = copy_to_user((struct drm_i915_relocation_entry __user *) | ||
1364 | (uintptr_t) args->buffers_ptr, | ||
1365 | exec2_list, | ||
1366 | sizeof(*exec2_list) * args->buffer_count); | ||
1367 | if (ret) { | ||
1368 | ret = -EFAULT; | ||
1369 | DRM_ERROR("failed to copy %d exec entries " | ||
1370 | "back to user (%d)\n", | ||
1371 | args->buffer_count, ret); | ||
1372 | } | ||
1373 | } | ||
1374 | |||
1375 | drm_free_large(exec2_list); | ||
1376 | return ret; | ||
1377 | } | ||
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c new file mode 100644 index 000000000000..b0abdc64aa9f --- /dev/null +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c | |||
@@ -0,0 +1,99 @@ | |||
1 | /* | ||
2 | * Copyright © 2010 Daniel Vetter | ||
3 | * | ||
4 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
5 | * copy of this software and associated documentation files (the "Software"), | ||
6 | * to deal in the Software without restriction, including without limitation | ||
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
8 | * and/or sell copies of the Software, and to permit persons to whom the | ||
9 | * Software is furnished to do so, subject to the following conditions: | ||
10 | * | ||
11 | * The above copyright notice and this permission notice (including the next | ||
12 | * paragraph) shall be included in all copies or substantial portions of the | ||
13 | * Software. | ||
14 | * | ||
15 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
16 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
17 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
18 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | ||
19 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | ||
20 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS | ||
21 | * IN THE SOFTWARE. | ||
22 | * | ||
23 | */ | ||
24 | |||
25 | #include "drmP.h" | ||
26 | #include "drm.h" | ||
27 | #include "i915_drm.h" | ||
28 | #include "i915_drv.h" | ||
29 | #include "i915_trace.h" | ||
30 | #include "intel_drv.h" | ||
31 | |||
32 | void i915_gem_restore_gtt_mappings(struct drm_device *dev) | ||
33 | { | ||
34 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
35 | struct drm_i915_gem_object *obj; | ||
36 | |||
37 | /* First fill our portion of the GTT with scratch pages */ | ||
38 | intel_gtt_clear_range(dev_priv->mm.gtt_start / PAGE_SIZE, | ||
39 | (dev_priv->mm.gtt_end - dev_priv->mm.gtt_start) / PAGE_SIZE); | ||
40 | |||
41 | list_for_each_entry(obj, &dev_priv->mm.gtt_list, gtt_list) { | ||
42 | i915_gem_clflush_object(obj); | ||
43 | |||
44 | if (dev_priv->mm.gtt->needs_dmar) { | ||
45 | BUG_ON(!obj->sg_list); | ||
46 | |||
47 | intel_gtt_insert_sg_entries(obj->sg_list, | ||
48 | obj->num_sg, | ||
49 | obj->gtt_space->start | ||
50 | >> PAGE_SHIFT, | ||
51 | obj->agp_type); | ||
52 | } else | ||
53 | intel_gtt_insert_pages(obj->gtt_space->start | ||
54 | >> PAGE_SHIFT, | ||
55 | obj->base.size >> PAGE_SHIFT, | ||
56 | obj->pages, | ||
57 | obj->agp_type); | ||
58 | } | ||
59 | |||
60 | intel_gtt_chipset_flush(); | ||
61 | } | ||
62 | |||
63 | int i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj) | ||
64 | { | ||
65 | struct drm_device *dev = obj->base.dev; | ||
66 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
67 | int ret; | ||
68 | |||
69 | if (dev_priv->mm.gtt->needs_dmar) { | ||
70 | ret = intel_gtt_map_memory(obj->pages, | ||
71 | obj->base.size >> PAGE_SHIFT, | ||
72 | &obj->sg_list, | ||
73 | &obj->num_sg); | ||
74 | if (ret != 0) | ||
75 | return ret; | ||
76 | |||
77 | intel_gtt_insert_sg_entries(obj->sg_list, | ||
78 | obj->num_sg, | ||
79 | obj->gtt_space->start >> PAGE_SHIFT, | ||
80 | obj->agp_type); | ||
81 | } else | ||
82 | intel_gtt_insert_pages(obj->gtt_space->start >> PAGE_SHIFT, | ||
83 | obj->base.size >> PAGE_SHIFT, | ||
84 | obj->pages, | ||
85 | obj->agp_type); | ||
86 | |||
87 | return 0; | ||
88 | } | ||
89 | |||
90 | void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj) | ||
91 | { | ||
92 | intel_gtt_clear_range(obj->gtt_space->start >> PAGE_SHIFT, | ||
93 | obj->base.size >> PAGE_SHIFT); | ||
94 | |||
95 | if (obj->sg_list) { | ||
96 | intel_gtt_unmap_memory(obj->sg_list, obj->num_sg); | ||
97 | obj->sg_list = NULL; | ||
98 | } | ||
99 | } | ||
diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c index af352de70be1..22a32b9932c5 100644 --- a/drivers/gpu/drm/i915/i915_gem_tiling.c +++ b/drivers/gpu/drm/i915/i915_gem_tiling.c | |||
@@ -181,7 +181,7 @@ i915_gem_detect_bit_6_swizzle(struct drm_device *dev) | |||
181 | } | 181 | } |
182 | 182 | ||
183 | /* Check pitch constriants for all chips & tiling formats */ | 183 | /* Check pitch constriants for all chips & tiling formats */ |
184 | bool | 184 | static bool |
185 | i915_tiling_ok(struct drm_device *dev, int stride, int size, int tiling_mode) | 185 | i915_tiling_ok(struct drm_device *dev, int stride, int size, int tiling_mode) |
186 | { | 186 | { |
187 | int tile_width; | 187 | int tile_width; |
@@ -232,32 +232,44 @@ i915_tiling_ok(struct drm_device *dev, int stride, int size, int tiling_mode) | |||
232 | return true; | 232 | return true; |
233 | } | 233 | } |
234 | 234 | ||
235 | bool | 235 | /* Is the current GTT allocation valid for the change in tiling? */ |
236 | i915_gem_object_fence_offset_ok(struct drm_gem_object *obj, int tiling_mode) | 236 | static bool |
237 | i915_gem_object_fence_ok(struct drm_i915_gem_object *obj, int tiling_mode) | ||
237 | { | 238 | { |
238 | struct drm_device *dev = obj->dev; | 239 | u32 size; |
239 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); | ||
240 | |||
241 | if (obj_priv->gtt_space == NULL) | ||
242 | return true; | ||
243 | 240 | ||
244 | if (tiling_mode == I915_TILING_NONE) | 241 | if (tiling_mode == I915_TILING_NONE) |
245 | return true; | 242 | return true; |
246 | 243 | ||
247 | if (INTEL_INFO(dev)->gen >= 4) | 244 | if (INTEL_INFO(obj->base.dev)->gen >= 4) |
248 | return true; | 245 | return true; |
249 | 246 | ||
250 | if (obj_priv->gtt_offset & (obj->size - 1)) | 247 | if (INTEL_INFO(obj->base.dev)->gen == 3) { |
251 | return false; | 248 | if (obj->gtt_offset & ~I915_FENCE_START_MASK) |
252 | |||
253 | if (IS_GEN3(dev)) { | ||
254 | if (obj_priv->gtt_offset & ~I915_FENCE_START_MASK) | ||
255 | return false; | 249 | return false; |
256 | } else { | 250 | } else { |
257 | if (obj_priv->gtt_offset & ~I830_FENCE_START_MASK) | 251 | if (obj->gtt_offset & ~I830_FENCE_START_MASK) |
258 | return false; | 252 | return false; |
259 | } | 253 | } |
260 | 254 | ||
255 | /* | ||
256 | * Previous chips need to be aligned to the size of the smallest | ||
257 | * fence register that can contain the object. | ||
258 | */ | ||
259 | if (INTEL_INFO(obj->base.dev)->gen == 3) | ||
260 | size = 1024*1024; | ||
261 | else | ||
262 | size = 512*1024; | ||
263 | |||
264 | while (size < obj->base.size) | ||
265 | size <<= 1; | ||
266 | |||
267 | if (obj->gtt_space->size != size) | ||
268 | return false; | ||
269 | |||
270 | if (obj->gtt_offset & (size - 1)) | ||
271 | return false; | ||
272 | |||
261 | return true; | 273 | return true; |
262 | } | 274 | } |
263 | 275 | ||
@@ -267,30 +279,29 @@ i915_gem_object_fence_offset_ok(struct drm_gem_object *obj, int tiling_mode) | |||
267 | */ | 279 | */ |
268 | int | 280 | int |
269 | i915_gem_set_tiling(struct drm_device *dev, void *data, | 281 | i915_gem_set_tiling(struct drm_device *dev, void *data, |
270 | struct drm_file *file_priv) | 282 | struct drm_file *file) |
271 | { | 283 | { |
272 | struct drm_i915_gem_set_tiling *args = data; | 284 | struct drm_i915_gem_set_tiling *args = data; |
273 | drm_i915_private_t *dev_priv = dev->dev_private; | 285 | drm_i915_private_t *dev_priv = dev->dev_private; |
274 | struct drm_gem_object *obj; | 286 | struct drm_i915_gem_object *obj; |
275 | struct drm_i915_gem_object *obj_priv; | ||
276 | int ret; | 287 | int ret; |
277 | 288 | ||
278 | ret = i915_gem_check_is_wedged(dev); | 289 | ret = i915_gem_check_is_wedged(dev); |
279 | if (ret) | 290 | if (ret) |
280 | return ret; | 291 | return ret; |
281 | 292 | ||
282 | obj = drm_gem_object_lookup(dev, file_priv, args->handle); | 293 | obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle)); |
283 | if (obj == NULL) | 294 | if (obj == NULL) |
284 | return -ENOENT; | 295 | return -ENOENT; |
285 | obj_priv = to_intel_bo(obj); | ||
286 | 296 | ||
287 | if (!i915_tiling_ok(dev, args->stride, obj->size, args->tiling_mode)) { | 297 | if (!i915_tiling_ok(dev, |
288 | drm_gem_object_unreference_unlocked(obj); | 298 | args->stride, obj->base.size, args->tiling_mode)) { |
299 | drm_gem_object_unreference_unlocked(&obj->base); | ||
289 | return -EINVAL; | 300 | return -EINVAL; |
290 | } | 301 | } |
291 | 302 | ||
292 | if (obj_priv->pin_count) { | 303 | if (obj->pin_count) { |
293 | drm_gem_object_unreference_unlocked(obj); | 304 | drm_gem_object_unreference_unlocked(&obj->base); |
294 | return -EBUSY; | 305 | return -EBUSY; |
295 | } | 306 | } |
296 | 307 | ||
@@ -324,34 +335,28 @@ i915_gem_set_tiling(struct drm_device *dev, void *data, | |||
324 | } | 335 | } |
325 | 336 | ||
326 | mutex_lock(&dev->struct_mutex); | 337 | mutex_lock(&dev->struct_mutex); |
327 | if (args->tiling_mode != obj_priv->tiling_mode || | 338 | if (args->tiling_mode != obj->tiling_mode || |
328 | args->stride != obj_priv->stride) { | 339 | args->stride != obj->stride) { |
329 | /* We need to rebind the object if its current allocation | 340 | /* We need to rebind the object if its current allocation |
330 | * no longer meets the alignment restrictions for its new | 341 | * no longer meets the alignment restrictions for its new |
331 | * tiling mode. Otherwise we can just leave it alone, but | 342 | * tiling mode. Otherwise we can just leave it alone, but |
332 | * need to ensure that any fence register is cleared. | 343 | * need to ensure that any fence register is cleared. |
333 | */ | 344 | */ |
334 | if (!i915_gem_object_fence_offset_ok(obj, args->tiling_mode)) | 345 | i915_gem_release_mmap(obj); |
335 | ret = i915_gem_object_unbind(obj); | ||
336 | else if (obj_priv->fence_reg != I915_FENCE_REG_NONE) | ||
337 | ret = i915_gem_object_put_fence_reg(obj, true); | ||
338 | else | ||
339 | i915_gem_release_mmap(obj); | ||
340 | 346 | ||
341 | if (ret != 0) { | 347 | obj->map_and_fenceable = |
342 | args->tiling_mode = obj_priv->tiling_mode; | 348 | obj->gtt_space == NULL || |
343 | args->stride = obj_priv->stride; | 349 | (obj->gtt_offset + obj->base.size <= dev_priv->mm.gtt_mappable_end && |
344 | goto err; | 350 | i915_gem_object_fence_ok(obj, args->tiling_mode)); |
345 | } | ||
346 | 351 | ||
347 | obj_priv->tiling_mode = args->tiling_mode; | 352 | obj->tiling_changed = true; |
348 | obj_priv->stride = args->stride; | 353 | obj->tiling_mode = args->tiling_mode; |
354 | obj->stride = args->stride; | ||
349 | } | 355 | } |
350 | err: | 356 | drm_gem_object_unreference(&obj->base); |
351 | drm_gem_object_unreference(obj); | ||
352 | mutex_unlock(&dev->struct_mutex); | 357 | mutex_unlock(&dev->struct_mutex); |
353 | 358 | ||
354 | return ret; | 359 | return 0; |
355 | } | 360 | } |
356 | 361 | ||
357 | /** | 362 | /** |
@@ -359,22 +364,20 @@ err: | |||
359 | */ | 364 | */ |
360 | int | 365 | int |
361 | i915_gem_get_tiling(struct drm_device *dev, void *data, | 366 | i915_gem_get_tiling(struct drm_device *dev, void *data, |
362 | struct drm_file *file_priv) | 367 | struct drm_file *file) |
363 | { | 368 | { |
364 | struct drm_i915_gem_get_tiling *args = data; | 369 | struct drm_i915_gem_get_tiling *args = data; |
365 | drm_i915_private_t *dev_priv = dev->dev_private; | 370 | drm_i915_private_t *dev_priv = dev->dev_private; |
366 | struct drm_gem_object *obj; | 371 | struct drm_i915_gem_object *obj; |
367 | struct drm_i915_gem_object *obj_priv; | ||
368 | 372 | ||
369 | obj = drm_gem_object_lookup(dev, file_priv, args->handle); | 373 | obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle)); |
370 | if (obj == NULL) | 374 | if (obj == NULL) |
371 | return -ENOENT; | 375 | return -ENOENT; |
372 | obj_priv = to_intel_bo(obj); | ||
373 | 376 | ||
374 | mutex_lock(&dev->struct_mutex); | 377 | mutex_lock(&dev->struct_mutex); |
375 | 378 | ||
376 | args->tiling_mode = obj_priv->tiling_mode; | 379 | args->tiling_mode = obj->tiling_mode; |
377 | switch (obj_priv->tiling_mode) { | 380 | switch (obj->tiling_mode) { |
378 | case I915_TILING_X: | 381 | case I915_TILING_X: |
379 | args->swizzle_mode = dev_priv->mm.bit_6_swizzle_x; | 382 | args->swizzle_mode = dev_priv->mm.bit_6_swizzle_x; |
380 | break; | 383 | break; |
@@ -394,7 +397,7 @@ i915_gem_get_tiling(struct drm_device *dev, void *data, | |||
394 | if (args->swizzle_mode == I915_BIT_6_SWIZZLE_9_10_17) | 397 | if (args->swizzle_mode == I915_BIT_6_SWIZZLE_9_10_17) |
395 | args->swizzle_mode = I915_BIT_6_SWIZZLE_9_10; | 398 | args->swizzle_mode = I915_BIT_6_SWIZZLE_9_10; |
396 | 399 | ||
397 | drm_gem_object_unreference(obj); | 400 | drm_gem_object_unreference(&obj->base); |
398 | mutex_unlock(&dev->struct_mutex); | 401 | mutex_unlock(&dev->struct_mutex); |
399 | 402 | ||
400 | return 0; | 403 | return 0; |
@@ -424,46 +427,44 @@ i915_gem_swizzle_page(struct page *page) | |||
424 | } | 427 | } |
425 | 428 | ||
426 | void | 429 | void |
427 | i915_gem_object_do_bit_17_swizzle(struct drm_gem_object *obj) | 430 | i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj) |
428 | { | 431 | { |
429 | struct drm_device *dev = obj->dev; | 432 | struct drm_device *dev = obj->base.dev; |
430 | drm_i915_private_t *dev_priv = dev->dev_private; | 433 | drm_i915_private_t *dev_priv = dev->dev_private; |
431 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); | 434 | int page_count = obj->base.size >> PAGE_SHIFT; |
432 | int page_count = obj->size >> PAGE_SHIFT; | ||
433 | int i; | 435 | int i; |
434 | 436 | ||
435 | if (dev_priv->mm.bit_6_swizzle_x != I915_BIT_6_SWIZZLE_9_10_17) | 437 | if (dev_priv->mm.bit_6_swizzle_x != I915_BIT_6_SWIZZLE_9_10_17) |
436 | return; | 438 | return; |
437 | 439 | ||
438 | if (obj_priv->bit_17 == NULL) | 440 | if (obj->bit_17 == NULL) |
439 | return; | 441 | return; |
440 | 442 | ||
441 | for (i = 0; i < page_count; i++) { | 443 | for (i = 0; i < page_count; i++) { |
442 | char new_bit_17 = page_to_phys(obj_priv->pages[i]) >> 17; | 444 | char new_bit_17 = page_to_phys(obj->pages[i]) >> 17; |
443 | if ((new_bit_17 & 0x1) != | 445 | if ((new_bit_17 & 0x1) != |
444 | (test_bit(i, obj_priv->bit_17) != 0)) { | 446 | (test_bit(i, obj->bit_17) != 0)) { |
445 | i915_gem_swizzle_page(obj_priv->pages[i]); | 447 | i915_gem_swizzle_page(obj->pages[i]); |
446 | set_page_dirty(obj_priv->pages[i]); | 448 | set_page_dirty(obj->pages[i]); |
447 | } | 449 | } |
448 | } | 450 | } |
449 | } | 451 | } |
450 | 452 | ||
451 | void | 453 | void |
452 | i915_gem_object_save_bit_17_swizzle(struct drm_gem_object *obj) | 454 | i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj) |
453 | { | 455 | { |
454 | struct drm_device *dev = obj->dev; | 456 | struct drm_device *dev = obj->base.dev; |
455 | drm_i915_private_t *dev_priv = dev->dev_private; | 457 | drm_i915_private_t *dev_priv = dev->dev_private; |
456 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); | 458 | int page_count = obj->base.size >> PAGE_SHIFT; |
457 | int page_count = obj->size >> PAGE_SHIFT; | ||
458 | int i; | 459 | int i; |
459 | 460 | ||
460 | if (dev_priv->mm.bit_6_swizzle_x != I915_BIT_6_SWIZZLE_9_10_17) | 461 | if (dev_priv->mm.bit_6_swizzle_x != I915_BIT_6_SWIZZLE_9_10_17) |
461 | return; | 462 | return; |
462 | 463 | ||
463 | if (obj_priv->bit_17 == NULL) { | 464 | if (obj->bit_17 == NULL) { |
464 | obj_priv->bit_17 = kmalloc(BITS_TO_LONGS(page_count) * | 465 | obj->bit_17 = kmalloc(BITS_TO_LONGS(page_count) * |
465 | sizeof(long), GFP_KERNEL); | 466 | sizeof(long), GFP_KERNEL); |
466 | if (obj_priv->bit_17 == NULL) { | 467 | if (obj->bit_17 == NULL) { |
467 | DRM_ERROR("Failed to allocate memory for bit 17 " | 468 | DRM_ERROR("Failed to allocate memory for bit 17 " |
468 | "record\n"); | 469 | "record\n"); |
469 | return; | 470 | return; |
@@ -471,9 +472,9 @@ i915_gem_object_save_bit_17_swizzle(struct drm_gem_object *obj) | |||
471 | } | 472 | } |
472 | 473 | ||
473 | for (i = 0; i < page_count; i++) { | 474 | for (i = 0; i < page_count; i++) { |
474 | if (page_to_phys(obj_priv->pages[i]) & (1 << 17)) | 475 | if (page_to_phys(obj->pages[i]) & (1 << 17)) |
475 | __set_bit(i, obj_priv->bit_17); | 476 | __set_bit(i, obj->bit_17); |
476 | else | 477 | else |
477 | __clear_bit(i, obj_priv->bit_17); | 478 | __clear_bit(i, obj->bit_17); |
478 | } | 479 | } |
479 | } | 480 | } |
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 729fd0c91d7b..97f946dcc1aa 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c | |||
@@ -64,64 +64,24 @@ | |||
64 | #define DRM_I915_VBLANK_PIPE_ALL (DRM_I915_VBLANK_PIPE_A | \ | 64 | #define DRM_I915_VBLANK_PIPE_ALL (DRM_I915_VBLANK_PIPE_A | \ |
65 | DRM_I915_VBLANK_PIPE_B) | 65 | DRM_I915_VBLANK_PIPE_B) |
66 | 66 | ||
67 | void | ||
68 | ironlake_enable_graphics_irq(drm_i915_private_t *dev_priv, u32 mask) | ||
69 | { | ||
70 | if ((dev_priv->gt_irq_mask_reg & mask) != 0) { | ||
71 | dev_priv->gt_irq_mask_reg &= ~mask; | ||
72 | I915_WRITE(GTIMR, dev_priv->gt_irq_mask_reg); | ||
73 | (void) I915_READ(GTIMR); | ||
74 | } | ||
75 | } | ||
76 | |||
77 | void | ||
78 | ironlake_disable_graphics_irq(drm_i915_private_t *dev_priv, u32 mask) | ||
79 | { | ||
80 | if ((dev_priv->gt_irq_mask_reg & mask) != mask) { | ||
81 | dev_priv->gt_irq_mask_reg |= mask; | ||
82 | I915_WRITE(GTIMR, dev_priv->gt_irq_mask_reg); | ||
83 | (void) I915_READ(GTIMR); | ||
84 | } | ||
85 | } | ||
86 | |||
87 | /* For display hotplug interrupt */ | 67 | /* For display hotplug interrupt */ |
88 | static void | 68 | static void |
89 | ironlake_enable_display_irq(drm_i915_private_t *dev_priv, u32 mask) | 69 | ironlake_enable_display_irq(drm_i915_private_t *dev_priv, u32 mask) |
90 | { | 70 | { |
91 | if ((dev_priv->irq_mask_reg & mask) != 0) { | 71 | if ((dev_priv->irq_mask & mask) != 0) { |
92 | dev_priv->irq_mask_reg &= ~mask; | 72 | dev_priv->irq_mask &= ~mask; |
93 | I915_WRITE(DEIMR, dev_priv->irq_mask_reg); | 73 | I915_WRITE(DEIMR, dev_priv->irq_mask); |
94 | (void) I915_READ(DEIMR); | 74 | POSTING_READ(DEIMR); |
95 | } | 75 | } |
96 | } | 76 | } |
97 | 77 | ||
98 | static inline void | 78 | static inline void |
99 | ironlake_disable_display_irq(drm_i915_private_t *dev_priv, u32 mask) | 79 | ironlake_disable_display_irq(drm_i915_private_t *dev_priv, u32 mask) |
100 | { | 80 | { |
101 | if ((dev_priv->irq_mask_reg & mask) != mask) { | 81 | if ((dev_priv->irq_mask & mask) != mask) { |
102 | dev_priv->irq_mask_reg |= mask; | 82 | dev_priv->irq_mask |= mask; |
103 | I915_WRITE(DEIMR, dev_priv->irq_mask_reg); | 83 | I915_WRITE(DEIMR, dev_priv->irq_mask); |
104 | (void) I915_READ(DEIMR); | 84 | POSTING_READ(DEIMR); |
105 | } | ||
106 | } | ||
107 | |||
108 | void | ||
109 | i915_enable_irq(drm_i915_private_t *dev_priv, u32 mask) | ||
110 | { | ||
111 | if ((dev_priv->irq_mask_reg & mask) != 0) { | ||
112 | dev_priv->irq_mask_reg &= ~mask; | ||
113 | I915_WRITE(IMR, dev_priv->irq_mask_reg); | ||
114 | (void) I915_READ(IMR); | ||
115 | } | ||
116 | } | ||
117 | |||
118 | void | ||
119 | i915_disable_irq(drm_i915_private_t *dev_priv, u32 mask) | ||
120 | { | ||
121 | if ((dev_priv->irq_mask_reg & mask) != mask) { | ||
122 | dev_priv->irq_mask_reg |= mask; | ||
123 | I915_WRITE(IMR, dev_priv->irq_mask_reg); | ||
124 | (void) I915_READ(IMR); | ||
125 | } | 85 | } |
126 | } | 86 | } |
127 | 87 | ||
@@ -144,7 +104,7 @@ i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask) | |||
144 | dev_priv->pipestat[pipe] |= mask; | 104 | dev_priv->pipestat[pipe] |= mask; |
145 | /* Enable the interrupt, clear any pending status */ | 105 | /* Enable the interrupt, clear any pending status */ |
146 | I915_WRITE(reg, dev_priv->pipestat[pipe] | (mask >> 16)); | 106 | I915_WRITE(reg, dev_priv->pipestat[pipe] | (mask >> 16)); |
147 | (void) I915_READ(reg); | 107 | POSTING_READ(reg); |
148 | } | 108 | } |
149 | } | 109 | } |
150 | 110 | ||
@@ -156,16 +116,19 @@ i915_disable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask) | |||
156 | 116 | ||
157 | dev_priv->pipestat[pipe] &= ~mask; | 117 | dev_priv->pipestat[pipe] &= ~mask; |
158 | I915_WRITE(reg, dev_priv->pipestat[pipe]); | 118 | I915_WRITE(reg, dev_priv->pipestat[pipe]); |
159 | (void) I915_READ(reg); | 119 | POSTING_READ(reg); |
160 | } | 120 | } |
161 | } | 121 | } |
162 | 122 | ||
163 | /** | 123 | /** |
164 | * intel_enable_asle - enable ASLE interrupt for OpRegion | 124 | * intel_enable_asle - enable ASLE interrupt for OpRegion |
165 | */ | 125 | */ |
166 | void intel_enable_asle (struct drm_device *dev) | 126 | void intel_enable_asle(struct drm_device *dev) |
167 | { | 127 | { |
168 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | 128 | drm_i915_private_t *dev_priv = dev->dev_private; |
129 | unsigned long irqflags; | ||
130 | |||
131 | spin_lock_irqsave(&dev_priv->irq_lock, irqflags); | ||
169 | 132 | ||
170 | if (HAS_PCH_SPLIT(dev)) | 133 | if (HAS_PCH_SPLIT(dev)) |
171 | ironlake_enable_display_irq(dev_priv, DE_GSE); | 134 | ironlake_enable_display_irq(dev_priv, DE_GSE); |
@@ -176,6 +139,8 @@ void intel_enable_asle (struct drm_device *dev) | |||
176 | i915_enable_pipestat(dev_priv, 0, | 139 | i915_enable_pipestat(dev_priv, 0, |
177 | PIPE_LEGACY_BLC_EVENT_ENABLE); | 140 | PIPE_LEGACY_BLC_EVENT_ENABLE); |
178 | } | 141 | } |
142 | |||
143 | spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); | ||
179 | } | 144 | } |
180 | 145 | ||
181 | /** | 146 | /** |
@@ -243,6 +208,103 @@ u32 gm45_get_vblank_counter(struct drm_device *dev, int pipe) | |||
243 | return I915_READ(reg); | 208 | return I915_READ(reg); |
244 | } | 209 | } |
245 | 210 | ||
211 | int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe, | ||
212 | int *vpos, int *hpos) | ||
213 | { | ||
214 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | ||
215 | u32 vbl = 0, position = 0; | ||
216 | int vbl_start, vbl_end, htotal, vtotal; | ||
217 | bool in_vbl = true; | ||
218 | int ret = 0; | ||
219 | |||
220 | if (!i915_pipe_enabled(dev, pipe)) { | ||
221 | DRM_DEBUG_DRIVER("trying to get scanoutpos for disabled " | ||
222 | "pipe %d\n", pipe); | ||
223 | return 0; | ||
224 | } | ||
225 | |||
226 | /* Get vtotal. */ | ||
227 | vtotal = 1 + ((I915_READ(VTOTAL(pipe)) >> 16) & 0x1fff); | ||
228 | |||
229 | if (INTEL_INFO(dev)->gen >= 4) { | ||
230 | /* No obvious pixelcount register. Only query vertical | ||
231 | * scanout position from Display scan line register. | ||
232 | */ | ||
233 | position = I915_READ(PIPEDSL(pipe)); | ||
234 | |||
235 | /* Decode into vertical scanout position. Don't have | ||
236 | * horizontal scanout position. | ||
237 | */ | ||
238 | *vpos = position & 0x1fff; | ||
239 | *hpos = 0; | ||
240 | } else { | ||
241 | /* Have access to pixelcount since start of frame. | ||
242 | * We can split this into vertical and horizontal | ||
243 | * scanout position. | ||
244 | */ | ||
245 | position = (I915_READ(PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT; | ||
246 | |||
247 | htotal = 1 + ((I915_READ(HTOTAL(pipe)) >> 16) & 0x1fff); | ||
248 | *vpos = position / htotal; | ||
249 | *hpos = position - (*vpos * htotal); | ||
250 | } | ||
251 | |||
252 | /* Query vblank area. */ | ||
253 | vbl = I915_READ(VBLANK(pipe)); | ||
254 | |||
255 | /* Test position against vblank region. */ | ||
256 | vbl_start = vbl & 0x1fff; | ||
257 | vbl_end = (vbl >> 16) & 0x1fff; | ||
258 | |||
259 | if ((*vpos < vbl_start) || (*vpos > vbl_end)) | ||
260 | in_vbl = false; | ||
261 | |||
262 | /* Inside "upper part" of vblank area? Apply corrective offset: */ | ||
263 | if (in_vbl && (*vpos >= vbl_start)) | ||
264 | *vpos = *vpos - vtotal; | ||
265 | |||
266 | /* Readouts valid? */ | ||
267 | if (vbl > 0) | ||
268 | ret |= DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE; | ||
269 | |||
270 | /* In vblank? */ | ||
271 | if (in_vbl) | ||
272 | ret |= DRM_SCANOUTPOS_INVBL; | ||
273 | |||
274 | return ret; | ||
275 | } | ||
276 | |||
277 | int i915_get_vblank_timestamp(struct drm_device *dev, int pipe, | ||
278 | int *max_error, | ||
279 | struct timeval *vblank_time, | ||
280 | unsigned flags) | ||
281 | { | ||
282 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
283 | struct drm_crtc *crtc; | ||
284 | |||
285 | if (pipe < 0 || pipe >= dev_priv->num_pipe) { | ||
286 | DRM_ERROR("Invalid crtc %d\n", pipe); | ||
287 | return -EINVAL; | ||
288 | } | ||
289 | |||
290 | /* Get drm_crtc to timestamp: */ | ||
291 | crtc = intel_get_crtc_for_pipe(dev, pipe); | ||
292 | if (crtc == NULL) { | ||
293 | DRM_ERROR("Invalid crtc %d\n", pipe); | ||
294 | return -EINVAL; | ||
295 | } | ||
296 | |||
297 | if (!crtc->enabled) { | ||
298 | DRM_DEBUG_KMS("crtc %d is disabled\n", pipe); | ||
299 | return -EBUSY; | ||
300 | } | ||
301 | |||
302 | /* Helper routine in DRM core does all the work: */ | ||
303 | return drm_calc_vbltimestamp_from_scanoutpos(dev, pipe, max_error, | ||
304 | vblank_time, flags, | ||
305 | crtc); | ||
306 | } | ||
307 | |||
246 | /* | 308 | /* |
247 | * Handle hotplug events outside the interrupt handler proper. | 309 | * Handle hotplug events outside the interrupt handler proper. |
248 | */ | 310 | */ |
@@ -297,20 +359,109 @@ static void notify_ring(struct drm_device *dev, | |||
297 | struct intel_ring_buffer *ring) | 359 | struct intel_ring_buffer *ring) |
298 | { | 360 | { |
299 | struct drm_i915_private *dev_priv = dev->dev_private; | 361 | struct drm_i915_private *dev_priv = dev->dev_private; |
300 | u32 seqno = ring->get_seqno(dev, ring); | 362 | u32 seqno; |
301 | ring->irq_gem_seqno = seqno; | 363 | |
364 | if (ring->obj == NULL) | ||
365 | return; | ||
366 | |||
367 | seqno = ring->get_seqno(ring); | ||
302 | trace_i915_gem_request_complete(dev, seqno); | 368 | trace_i915_gem_request_complete(dev, seqno); |
369 | |||
370 | ring->irq_seqno = seqno; | ||
303 | wake_up_all(&ring->irq_queue); | 371 | wake_up_all(&ring->irq_queue); |
372 | |||
304 | dev_priv->hangcheck_count = 0; | 373 | dev_priv->hangcheck_count = 0; |
305 | mod_timer(&dev_priv->hangcheck_timer, | 374 | mod_timer(&dev_priv->hangcheck_timer, |
306 | jiffies + msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD)); | 375 | jiffies + msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD)); |
307 | } | 376 | } |
308 | 377 | ||
378 | static void gen6_pm_irq_handler(struct drm_device *dev) | ||
379 | { | ||
380 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | ||
381 | u8 new_delay = dev_priv->cur_delay; | ||
382 | u32 pm_iir; | ||
383 | |||
384 | pm_iir = I915_READ(GEN6_PMIIR); | ||
385 | if (!pm_iir) | ||
386 | return; | ||
387 | |||
388 | if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) { | ||
389 | if (dev_priv->cur_delay != dev_priv->max_delay) | ||
390 | new_delay = dev_priv->cur_delay + 1; | ||
391 | if (new_delay > dev_priv->max_delay) | ||
392 | new_delay = dev_priv->max_delay; | ||
393 | } else if (pm_iir & (GEN6_PM_RP_DOWN_THRESHOLD | GEN6_PM_RP_DOWN_TIMEOUT)) { | ||
394 | if (dev_priv->cur_delay != dev_priv->min_delay) | ||
395 | new_delay = dev_priv->cur_delay - 1; | ||
396 | if (new_delay < dev_priv->min_delay) { | ||
397 | new_delay = dev_priv->min_delay; | ||
398 | I915_WRITE(GEN6_RP_INTERRUPT_LIMITS, | ||
399 | I915_READ(GEN6_RP_INTERRUPT_LIMITS) | | ||
400 | ((new_delay << 16) & 0x3f0000)); | ||
401 | } else { | ||
402 | /* Make sure we continue to get down interrupts | ||
403 | * until we hit the minimum frequency */ | ||
404 | I915_WRITE(GEN6_RP_INTERRUPT_LIMITS, | ||
405 | I915_READ(GEN6_RP_INTERRUPT_LIMITS) & ~0x3f0000); | ||
406 | } | ||
407 | |||
408 | } | ||
409 | |||
410 | gen6_set_rps(dev, new_delay); | ||
411 | dev_priv->cur_delay = new_delay; | ||
412 | |||
413 | I915_WRITE(GEN6_PMIIR, pm_iir); | ||
414 | } | ||
415 | |||
416 | static void pch_irq_handler(struct drm_device *dev) | ||
417 | { | ||
418 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | ||
419 | u32 pch_iir; | ||
420 | |||
421 | pch_iir = I915_READ(SDEIIR); | ||
422 | |||
423 | if (pch_iir & SDE_AUDIO_POWER_MASK) | ||
424 | DRM_DEBUG_DRIVER("PCH audio power change on port %d\n", | ||
425 | (pch_iir & SDE_AUDIO_POWER_MASK) >> | ||
426 | SDE_AUDIO_POWER_SHIFT); | ||
427 | |||
428 | if (pch_iir & SDE_GMBUS) | ||
429 | DRM_DEBUG_DRIVER("PCH GMBUS interrupt\n"); | ||
430 | |||
431 | if (pch_iir & SDE_AUDIO_HDCP_MASK) | ||
432 | DRM_DEBUG_DRIVER("PCH HDCP audio interrupt\n"); | ||
433 | |||
434 | if (pch_iir & SDE_AUDIO_TRANS_MASK) | ||
435 | DRM_DEBUG_DRIVER("PCH transcoder audio interrupt\n"); | ||
436 | |||
437 | if (pch_iir & SDE_POISON) | ||
438 | DRM_ERROR("PCH poison interrupt\n"); | ||
439 | |||
440 | if (pch_iir & SDE_FDI_MASK) { | ||
441 | u32 fdia, fdib; | ||
442 | |||
443 | fdia = I915_READ(FDI_RXA_IIR); | ||
444 | fdib = I915_READ(FDI_RXB_IIR); | ||
445 | DRM_DEBUG_DRIVER("PCH FDI RX interrupt; FDI RXA IIR: 0x%08x, FDI RXB IIR: 0x%08x\n", fdia, fdib); | ||
446 | } | ||
447 | |||
448 | if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE)) | ||
449 | DRM_DEBUG_DRIVER("PCH transcoder CRC done interrupt\n"); | ||
450 | |||
451 | if (pch_iir & (SDE_TRANSB_CRC_ERR | SDE_TRANSA_CRC_ERR)) | ||
452 | DRM_DEBUG_DRIVER("PCH transcoder CRC error interrupt\n"); | ||
453 | |||
454 | if (pch_iir & SDE_TRANSB_FIFO_UNDER) | ||
455 | DRM_DEBUG_DRIVER("PCH transcoder B underrun interrupt\n"); | ||
456 | if (pch_iir & SDE_TRANSA_FIFO_UNDER) | ||
457 | DRM_DEBUG_DRIVER("PCH transcoder A underrun interrupt\n"); | ||
458 | } | ||
459 | |||
309 | static irqreturn_t ironlake_irq_handler(struct drm_device *dev) | 460 | static irqreturn_t ironlake_irq_handler(struct drm_device *dev) |
310 | { | 461 | { |
311 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | 462 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; |
312 | int ret = IRQ_NONE; | 463 | int ret = IRQ_NONE; |
313 | u32 de_iir, gt_iir, de_ier, pch_iir; | 464 | u32 de_iir, gt_iir, de_ier, pch_iir, pm_iir; |
314 | u32 hotplug_mask; | 465 | u32 hotplug_mask; |
315 | struct drm_i915_master_private *master_priv; | 466 | struct drm_i915_master_private *master_priv; |
316 | u32 bsd_usr_interrupt = GT_BSD_USER_INTERRUPT; | 467 | u32 bsd_usr_interrupt = GT_BSD_USER_INTERRUPT; |
@@ -321,13 +472,15 @@ static irqreturn_t ironlake_irq_handler(struct drm_device *dev) | |||
321 | /* disable master interrupt before clearing iir */ | 472 | /* disable master interrupt before clearing iir */ |
322 | de_ier = I915_READ(DEIER); | 473 | de_ier = I915_READ(DEIER); |
323 | I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL); | 474 | I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL); |
324 | (void)I915_READ(DEIER); | 475 | POSTING_READ(DEIER); |
325 | 476 | ||
326 | de_iir = I915_READ(DEIIR); | 477 | de_iir = I915_READ(DEIIR); |
327 | gt_iir = I915_READ(GTIIR); | 478 | gt_iir = I915_READ(GTIIR); |
328 | pch_iir = I915_READ(SDEIIR); | 479 | pch_iir = I915_READ(SDEIIR); |
480 | pm_iir = I915_READ(GEN6_PMIIR); | ||
329 | 481 | ||
330 | if (de_iir == 0 && gt_iir == 0 && pch_iir == 0) | 482 | if (de_iir == 0 && gt_iir == 0 && pch_iir == 0 && |
483 | (!IS_GEN6(dev) || pm_iir == 0)) | ||
331 | goto done; | 484 | goto done; |
332 | 485 | ||
333 | if (HAS_PCH_CPT(dev)) | 486 | if (HAS_PCH_CPT(dev)) |
@@ -344,12 +497,12 @@ static irqreturn_t ironlake_irq_handler(struct drm_device *dev) | |||
344 | READ_BREADCRUMB(dev_priv); | 497 | READ_BREADCRUMB(dev_priv); |
345 | } | 498 | } |
346 | 499 | ||
347 | if (gt_iir & GT_PIPE_NOTIFY) | 500 | if (gt_iir & (GT_USER_INTERRUPT | GT_PIPE_NOTIFY)) |
348 | notify_ring(dev, &dev_priv->render_ring); | 501 | notify_ring(dev, &dev_priv->ring[RCS]); |
349 | if (gt_iir & bsd_usr_interrupt) | 502 | if (gt_iir & bsd_usr_interrupt) |
350 | notify_ring(dev, &dev_priv->bsd_ring); | 503 | notify_ring(dev, &dev_priv->ring[VCS]); |
351 | if (HAS_BLT(dev) && gt_iir & GT_BLT_USER_INTERRUPT) | 504 | if (gt_iir & GT_BLT_USER_INTERRUPT) |
352 | notify_ring(dev, &dev_priv->blt_ring); | 505 | notify_ring(dev, &dev_priv->ring[BCS]); |
353 | 506 | ||
354 | if (de_iir & DE_GSE) | 507 | if (de_iir & DE_GSE) |
355 | intel_opregion_gse_intr(dev); | 508 | intel_opregion_gse_intr(dev); |
@@ -371,14 +524,20 @@ static irqreturn_t ironlake_irq_handler(struct drm_device *dev) | |||
371 | drm_handle_vblank(dev, 1); | 524 | drm_handle_vblank(dev, 1); |
372 | 525 | ||
373 | /* check event from PCH */ | 526 | /* check event from PCH */ |
374 | if ((de_iir & DE_PCH_EVENT) && (pch_iir & hotplug_mask)) | 527 | if (de_iir & DE_PCH_EVENT) { |
375 | queue_work(dev_priv->wq, &dev_priv->hotplug_work); | 528 | if (pch_iir & hotplug_mask) |
529 | queue_work(dev_priv->wq, &dev_priv->hotplug_work); | ||
530 | pch_irq_handler(dev); | ||
531 | } | ||
376 | 532 | ||
377 | if (de_iir & DE_PCU_EVENT) { | 533 | if (de_iir & DE_PCU_EVENT) { |
378 | I915_WRITE16(MEMINTRSTS, I915_READ(MEMINTRSTS)); | 534 | I915_WRITE16(MEMINTRSTS, I915_READ(MEMINTRSTS)); |
379 | i915_handle_rps_change(dev); | 535 | i915_handle_rps_change(dev); |
380 | } | 536 | } |
381 | 537 | ||
538 | if (IS_GEN6(dev)) | ||
539 | gen6_pm_irq_handler(dev); | ||
540 | |||
382 | /* should clear PCH hotplug event before clear CPU irq */ | 541 | /* should clear PCH hotplug event before clear CPU irq */ |
383 | I915_WRITE(SDEIIR, pch_iir); | 542 | I915_WRITE(SDEIIR, pch_iir); |
384 | I915_WRITE(GTIIR, gt_iir); | 543 | I915_WRITE(GTIIR, gt_iir); |
@@ -386,7 +545,7 @@ static irqreturn_t ironlake_irq_handler(struct drm_device *dev) | |||
386 | 545 | ||
387 | done: | 546 | done: |
388 | I915_WRITE(DEIER, de_ier); | 547 | I915_WRITE(DEIER, de_ier); |
389 | (void)I915_READ(DEIER); | 548 | POSTING_READ(DEIER); |
390 | 549 | ||
391 | return ret; | 550 | return ret; |
392 | } | 551 | } |
@@ -422,29 +581,23 @@ static void i915_error_work_func(struct work_struct *work) | |||
422 | 581 | ||
423 | #ifdef CONFIG_DEBUG_FS | 582 | #ifdef CONFIG_DEBUG_FS |
424 | static struct drm_i915_error_object * | 583 | static struct drm_i915_error_object * |
425 | i915_error_object_create(struct drm_device *dev, | 584 | i915_error_object_create(struct drm_i915_private *dev_priv, |
426 | struct drm_gem_object *src) | 585 | struct drm_i915_gem_object *src) |
427 | { | 586 | { |
428 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
429 | struct drm_i915_error_object *dst; | 587 | struct drm_i915_error_object *dst; |
430 | struct drm_i915_gem_object *src_priv; | ||
431 | int page, page_count; | 588 | int page, page_count; |
432 | u32 reloc_offset; | 589 | u32 reloc_offset; |
433 | 590 | ||
434 | if (src == NULL) | 591 | if (src == NULL || src->pages == NULL) |
435 | return NULL; | 592 | return NULL; |
436 | 593 | ||
437 | src_priv = to_intel_bo(src); | 594 | page_count = src->base.size / PAGE_SIZE; |
438 | if (src_priv->pages == NULL) | ||
439 | return NULL; | ||
440 | |||
441 | page_count = src->size / PAGE_SIZE; | ||
442 | 595 | ||
443 | dst = kmalloc(sizeof(*dst) + page_count * sizeof (u32 *), GFP_ATOMIC); | 596 | dst = kmalloc(sizeof(*dst) + page_count * sizeof (u32 *), GFP_ATOMIC); |
444 | if (dst == NULL) | 597 | if (dst == NULL) |
445 | return NULL; | 598 | return NULL; |
446 | 599 | ||
447 | reloc_offset = src_priv->gtt_offset; | 600 | reloc_offset = src->gtt_offset; |
448 | for (page = 0; page < page_count; page++) { | 601 | for (page = 0; page < page_count; page++) { |
449 | unsigned long flags; | 602 | unsigned long flags; |
450 | void __iomem *s; | 603 | void __iomem *s; |
@@ -466,7 +619,7 @@ i915_error_object_create(struct drm_device *dev, | |||
466 | reloc_offset += PAGE_SIZE; | 619 | reloc_offset += PAGE_SIZE; |
467 | } | 620 | } |
468 | dst->page_count = page_count; | 621 | dst->page_count = page_count; |
469 | dst->gtt_offset = src_priv->gtt_offset; | 622 | dst->gtt_offset = src->gtt_offset; |
470 | 623 | ||
471 | return dst; | 624 | return dst; |
472 | 625 | ||
@@ -503,53 +656,98 @@ i915_error_state_free(struct drm_device *dev, | |||
503 | kfree(error); | 656 | kfree(error); |
504 | } | 657 | } |
505 | 658 | ||
506 | static u32 | 659 | static u32 capture_bo_list(struct drm_i915_error_buffer *err, |
507 | i915_get_bbaddr(struct drm_device *dev, u32 *ring) | 660 | int count, |
661 | struct list_head *head) | ||
508 | { | 662 | { |
509 | u32 cmd; | 663 | struct drm_i915_gem_object *obj; |
664 | int i = 0; | ||
665 | |||
666 | list_for_each_entry(obj, head, mm_list) { | ||
667 | err->size = obj->base.size; | ||
668 | err->name = obj->base.name; | ||
669 | err->seqno = obj->last_rendering_seqno; | ||
670 | err->gtt_offset = obj->gtt_offset; | ||
671 | err->read_domains = obj->base.read_domains; | ||
672 | err->write_domain = obj->base.write_domain; | ||
673 | err->fence_reg = obj->fence_reg; | ||
674 | err->pinned = 0; | ||
675 | if (obj->pin_count > 0) | ||
676 | err->pinned = 1; | ||
677 | if (obj->user_pin_count > 0) | ||
678 | err->pinned = -1; | ||
679 | err->tiling = obj->tiling_mode; | ||
680 | err->dirty = obj->dirty; | ||
681 | err->purgeable = obj->madv != I915_MADV_WILLNEED; | ||
682 | err->ring = obj->ring ? obj->ring->id : 0; | ||
683 | err->agp_type = obj->agp_type == AGP_USER_CACHED_MEMORY; | ||
684 | |||
685 | if (++i == count) | ||
686 | break; | ||
510 | 687 | ||
511 | if (IS_I830(dev) || IS_845G(dev)) | 688 | err++; |
512 | cmd = MI_BATCH_BUFFER; | 689 | } |
513 | else if (INTEL_INFO(dev)->gen >= 4) | ||
514 | cmd = (MI_BATCH_BUFFER_START | (2 << 6) | | ||
515 | MI_BATCH_NON_SECURE_I965); | ||
516 | else | ||
517 | cmd = (MI_BATCH_BUFFER_START | (2 << 6)); | ||
518 | 690 | ||
519 | return ring[0] == cmd ? ring[1] : 0; | 691 | return i; |
520 | } | 692 | } |
521 | 693 | ||
522 | static u32 | 694 | static void i915_gem_record_fences(struct drm_device *dev, |
523 | i915_ringbuffer_last_batch(struct drm_device *dev) | 695 | struct drm_i915_error_state *error) |
524 | { | 696 | { |
525 | struct drm_i915_private *dev_priv = dev->dev_private; | 697 | struct drm_i915_private *dev_priv = dev->dev_private; |
526 | u32 head, bbaddr; | 698 | int i; |
527 | u32 *ring; | 699 | |
700 | /* Fences */ | ||
701 | switch (INTEL_INFO(dev)->gen) { | ||
702 | case 6: | ||
703 | for (i = 0; i < 16; i++) | ||
704 | error->fence[i] = I915_READ64(FENCE_REG_SANDYBRIDGE_0 + (i * 8)); | ||
705 | break; | ||
706 | case 5: | ||
707 | case 4: | ||
708 | for (i = 0; i < 16; i++) | ||
709 | error->fence[i] = I915_READ64(FENCE_REG_965_0 + (i * 8)); | ||
710 | break; | ||
711 | case 3: | ||
712 | if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) | ||
713 | for (i = 0; i < 8; i++) | ||
714 | error->fence[i+8] = I915_READ(FENCE_REG_945_8 + (i * 4)); | ||
715 | case 2: | ||
716 | for (i = 0; i < 8; i++) | ||
717 | error->fence[i] = I915_READ(FENCE_REG_830_0 + (i * 4)); | ||
718 | break; | ||
528 | 719 | ||
529 | /* Locate the current position in the ringbuffer and walk back | ||
530 | * to find the most recently dispatched batch buffer. | ||
531 | */ | ||
532 | bbaddr = 0; | ||
533 | head = I915_READ(PRB0_HEAD) & HEAD_ADDR; | ||
534 | ring = (u32 *)(dev_priv->render_ring.virtual_start + head); | ||
535 | |||
536 | while (--ring >= (u32 *)dev_priv->render_ring.virtual_start) { | ||
537 | bbaddr = i915_get_bbaddr(dev, ring); | ||
538 | if (bbaddr) | ||
539 | break; | ||
540 | } | 720 | } |
721 | } | ||
541 | 722 | ||
542 | if (bbaddr == 0) { | 723 | static struct drm_i915_error_object * |
543 | ring = (u32 *)(dev_priv->render_ring.virtual_start | 724 | i915_error_first_batchbuffer(struct drm_i915_private *dev_priv, |
544 | + dev_priv->render_ring.size); | 725 | struct intel_ring_buffer *ring) |
545 | while (--ring >= (u32 *)dev_priv->render_ring.virtual_start) { | 726 | { |
546 | bbaddr = i915_get_bbaddr(dev, ring); | 727 | struct drm_i915_gem_object *obj; |
547 | if (bbaddr) | 728 | u32 seqno; |
548 | break; | 729 | |
549 | } | 730 | if (!ring->get_seqno) |
731 | return NULL; | ||
732 | |||
733 | seqno = ring->get_seqno(ring); | ||
734 | list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) { | ||
735 | if (obj->ring != ring) | ||
736 | continue; | ||
737 | |||
738 | if (i915_seqno_passed(seqno, obj->last_rendering_seqno)) | ||
739 | continue; | ||
740 | |||
741 | if ((obj->base.read_domains & I915_GEM_DOMAIN_COMMAND) == 0) | ||
742 | continue; | ||
743 | |||
744 | /* We need to copy these to an anonymous buffer as the simplest | ||
745 | * method to avoid being overwritten by userspace. | ||
746 | */ | ||
747 | return i915_error_object_create(dev_priv, obj); | ||
550 | } | 748 | } |
551 | 749 | ||
552 | return bbaddr; | 750 | return NULL; |
553 | } | 751 | } |
554 | 752 | ||
555 | /** | 753 | /** |
@@ -564,12 +762,10 @@ i915_ringbuffer_last_batch(struct drm_device *dev) | |||
564 | static void i915_capture_error_state(struct drm_device *dev) | 762 | static void i915_capture_error_state(struct drm_device *dev) |
565 | { | 763 | { |
566 | struct drm_i915_private *dev_priv = dev->dev_private; | 764 | struct drm_i915_private *dev_priv = dev->dev_private; |
567 | struct drm_i915_gem_object *obj_priv; | 765 | struct drm_i915_gem_object *obj; |
568 | struct drm_i915_error_state *error; | 766 | struct drm_i915_error_state *error; |
569 | struct drm_gem_object *batchbuffer[2]; | ||
570 | unsigned long flags; | 767 | unsigned long flags; |
571 | u32 bbaddr; | 768 | int i; |
572 | int count; | ||
573 | 769 | ||
574 | spin_lock_irqsave(&dev_priv->error_lock, flags); | 770 | spin_lock_irqsave(&dev_priv->error_lock, flags); |
575 | error = dev_priv->first_error; | 771 | error = dev_priv->first_error; |
@@ -585,20 +781,33 @@ static void i915_capture_error_state(struct drm_device *dev) | |||
585 | 781 | ||
586 | DRM_DEBUG_DRIVER("generating error event\n"); | 782 | DRM_DEBUG_DRIVER("generating error event\n"); |
587 | 783 | ||
588 | error->seqno = | 784 | error->seqno = dev_priv->ring[RCS].get_seqno(&dev_priv->ring[RCS]); |
589 | dev_priv->render_ring.get_seqno(dev, &dev_priv->render_ring); | ||
590 | error->eir = I915_READ(EIR); | 785 | error->eir = I915_READ(EIR); |
591 | error->pgtbl_er = I915_READ(PGTBL_ER); | 786 | error->pgtbl_er = I915_READ(PGTBL_ER); |
592 | error->pipeastat = I915_READ(PIPEASTAT); | 787 | error->pipeastat = I915_READ(PIPEASTAT); |
593 | error->pipebstat = I915_READ(PIPEBSTAT); | 788 | error->pipebstat = I915_READ(PIPEBSTAT); |
594 | error->instpm = I915_READ(INSTPM); | 789 | error->instpm = I915_READ(INSTPM); |
595 | if (INTEL_INFO(dev)->gen < 4) { | 790 | error->error = 0; |
596 | error->ipeir = I915_READ(IPEIR); | 791 | if (INTEL_INFO(dev)->gen >= 6) { |
597 | error->ipehr = I915_READ(IPEHR); | 792 | error->error = I915_READ(ERROR_GEN6); |
598 | error->instdone = I915_READ(INSTDONE); | 793 | |
599 | error->acthd = I915_READ(ACTHD); | 794 | error->bcs_acthd = I915_READ(BCS_ACTHD); |
600 | error->bbaddr = 0; | 795 | error->bcs_ipehr = I915_READ(BCS_IPEHR); |
601 | } else { | 796 | error->bcs_ipeir = I915_READ(BCS_IPEIR); |
797 | error->bcs_instdone = I915_READ(BCS_INSTDONE); | ||
798 | error->bcs_seqno = 0; | ||
799 | if (dev_priv->ring[BCS].get_seqno) | ||
800 | error->bcs_seqno = dev_priv->ring[BCS].get_seqno(&dev_priv->ring[BCS]); | ||
801 | |||
802 | error->vcs_acthd = I915_READ(VCS_ACTHD); | ||
803 | error->vcs_ipehr = I915_READ(VCS_IPEHR); | ||
804 | error->vcs_ipeir = I915_READ(VCS_IPEIR); | ||
805 | error->vcs_instdone = I915_READ(VCS_INSTDONE); | ||
806 | error->vcs_seqno = 0; | ||
807 | if (dev_priv->ring[VCS].get_seqno) | ||
808 | error->vcs_seqno = dev_priv->ring[VCS].get_seqno(&dev_priv->ring[VCS]); | ||
809 | } | ||
810 | if (INTEL_INFO(dev)->gen >= 4) { | ||
602 | error->ipeir = I915_READ(IPEIR_I965); | 811 | error->ipeir = I915_READ(IPEIR_I965); |
603 | error->ipehr = I915_READ(IPEHR_I965); | 812 | error->ipehr = I915_READ(IPEHR_I965); |
604 | error->instdone = I915_READ(INSTDONE_I965); | 813 | error->instdone = I915_READ(INSTDONE_I965); |
@@ -606,118 +815,63 @@ static void i915_capture_error_state(struct drm_device *dev) | |||
606 | error->instdone1 = I915_READ(INSTDONE1); | 815 | error->instdone1 = I915_READ(INSTDONE1); |
607 | error->acthd = I915_READ(ACTHD_I965); | 816 | error->acthd = I915_READ(ACTHD_I965); |
608 | error->bbaddr = I915_READ64(BB_ADDR); | 817 | error->bbaddr = I915_READ64(BB_ADDR); |
818 | } else { | ||
819 | error->ipeir = I915_READ(IPEIR); | ||
820 | error->ipehr = I915_READ(IPEHR); | ||
821 | error->instdone = I915_READ(INSTDONE); | ||
822 | error->acthd = I915_READ(ACTHD); | ||
823 | error->bbaddr = 0; | ||
609 | } | 824 | } |
825 | i915_gem_record_fences(dev, error); | ||
610 | 826 | ||
611 | bbaddr = i915_ringbuffer_last_batch(dev); | 827 | /* Record the active batchbuffers */ |
612 | 828 | for (i = 0; i < I915_NUM_RINGS; i++) | |
613 | /* Grab the current batchbuffer, most likely to have crashed. */ | 829 | error->batchbuffer[i] = |
614 | batchbuffer[0] = NULL; | 830 | i915_error_first_batchbuffer(dev_priv, |
615 | batchbuffer[1] = NULL; | 831 | &dev_priv->ring[i]); |
616 | count = 0; | ||
617 | list_for_each_entry(obj_priv, &dev_priv->mm.active_list, mm_list) { | ||
618 | struct drm_gem_object *obj = &obj_priv->base; | ||
619 | |||
620 | if (batchbuffer[0] == NULL && | ||
621 | bbaddr >= obj_priv->gtt_offset && | ||
622 | bbaddr < obj_priv->gtt_offset + obj->size) | ||
623 | batchbuffer[0] = obj; | ||
624 | |||
625 | if (batchbuffer[1] == NULL && | ||
626 | error->acthd >= obj_priv->gtt_offset && | ||
627 | error->acthd < obj_priv->gtt_offset + obj->size) | ||
628 | batchbuffer[1] = obj; | ||
629 | |||
630 | count++; | ||
631 | } | ||
632 | /* Scan the other lists for completeness for those bizarre errors. */ | ||
633 | if (batchbuffer[0] == NULL || batchbuffer[1] == NULL) { | ||
634 | list_for_each_entry(obj_priv, &dev_priv->mm.flushing_list, mm_list) { | ||
635 | struct drm_gem_object *obj = &obj_priv->base; | ||
636 | |||
637 | if (batchbuffer[0] == NULL && | ||
638 | bbaddr >= obj_priv->gtt_offset && | ||
639 | bbaddr < obj_priv->gtt_offset + obj->size) | ||
640 | batchbuffer[0] = obj; | ||
641 | |||
642 | if (batchbuffer[1] == NULL && | ||
643 | error->acthd >= obj_priv->gtt_offset && | ||
644 | error->acthd < obj_priv->gtt_offset + obj->size) | ||
645 | batchbuffer[1] = obj; | ||
646 | |||
647 | if (batchbuffer[0] && batchbuffer[1]) | ||
648 | break; | ||
649 | } | ||
650 | } | ||
651 | if (batchbuffer[0] == NULL || batchbuffer[1] == NULL) { | ||
652 | list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list, mm_list) { | ||
653 | struct drm_gem_object *obj = &obj_priv->base; | ||
654 | |||
655 | if (batchbuffer[0] == NULL && | ||
656 | bbaddr >= obj_priv->gtt_offset && | ||
657 | bbaddr < obj_priv->gtt_offset + obj->size) | ||
658 | batchbuffer[0] = obj; | ||
659 | |||
660 | if (batchbuffer[1] == NULL && | ||
661 | error->acthd >= obj_priv->gtt_offset && | ||
662 | error->acthd < obj_priv->gtt_offset + obj->size) | ||
663 | batchbuffer[1] = obj; | ||
664 | |||
665 | if (batchbuffer[0] && batchbuffer[1]) | ||
666 | break; | ||
667 | } | ||
668 | } | ||
669 | |||
670 | /* We need to copy these to an anonymous buffer as the simplest | ||
671 | * method to avoid being overwritten by userspace. | ||
672 | */ | ||
673 | error->batchbuffer[0] = i915_error_object_create(dev, batchbuffer[0]); | ||
674 | if (batchbuffer[1] != batchbuffer[0]) | ||
675 | error->batchbuffer[1] = i915_error_object_create(dev, batchbuffer[1]); | ||
676 | else | ||
677 | error->batchbuffer[1] = NULL; | ||
678 | 832 | ||
679 | /* Record the ringbuffer */ | 833 | /* Record the ringbuffer */ |
680 | error->ringbuffer = i915_error_object_create(dev, | 834 | error->ringbuffer = i915_error_object_create(dev_priv, |
681 | dev_priv->render_ring.gem_object); | 835 | dev_priv->ring[RCS].obj); |
682 | 836 | ||
683 | /* Record buffers on the active list. */ | 837 | /* Record buffers on the active and pinned lists. */ |
684 | error->active_bo = NULL; | 838 | error->active_bo = NULL; |
685 | error->active_bo_count = 0; | 839 | error->pinned_bo = NULL; |
686 | 840 | ||
687 | if (count) | 841 | i = 0; |
688 | error->active_bo = kmalloc(sizeof(*error->active_bo)*count, | 842 | list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) |
689 | GFP_ATOMIC); | 843 | i++; |
844 | error->active_bo_count = i; | ||
845 | list_for_each_entry(obj, &dev_priv->mm.pinned_list, mm_list) | ||
846 | i++; | ||
847 | error->pinned_bo_count = i - error->active_bo_count; | ||
690 | 848 | ||
691 | if (error->active_bo) { | 849 | error->active_bo = NULL; |
692 | int i = 0; | 850 | error->pinned_bo = NULL; |
693 | list_for_each_entry(obj_priv, &dev_priv->mm.active_list, mm_list) { | 851 | if (i) { |
694 | struct drm_gem_object *obj = &obj_priv->base; | 852 | error->active_bo = kmalloc(sizeof(*error->active_bo)*i, |
695 | 853 | GFP_ATOMIC); | |
696 | error->active_bo[i].size = obj->size; | 854 | if (error->active_bo) |
697 | error->active_bo[i].name = obj->name; | 855 | error->pinned_bo = |
698 | error->active_bo[i].seqno = obj_priv->last_rendering_seqno; | 856 | error->active_bo + error->active_bo_count; |
699 | error->active_bo[i].gtt_offset = obj_priv->gtt_offset; | ||
700 | error->active_bo[i].read_domains = obj->read_domains; | ||
701 | error->active_bo[i].write_domain = obj->write_domain; | ||
702 | error->active_bo[i].fence_reg = obj_priv->fence_reg; | ||
703 | error->active_bo[i].pinned = 0; | ||
704 | if (obj_priv->pin_count > 0) | ||
705 | error->active_bo[i].pinned = 1; | ||
706 | if (obj_priv->user_pin_count > 0) | ||
707 | error->active_bo[i].pinned = -1; | ||
708 | error->active_bo[i].tiling = obj_priv->tiling_mode; | ||
709 | error->active_bo[i].dirty = obj_priv->dirty; | ||
710 | error->active_bo[i].purgeable = obj_priv->madv != I915_MADV_WILLNEED; | ||
711 | |||
712 | if (++i == count) | ||
713 | break; | ||
714 | } | ||
715 | error->active_bo_count = i; | ||
716 | } | 857 | } |
717 | 858 | ||
859 | if (error->active_bo) | ||
860 | error->active_bo_count = | ||
861 | capture_bo_list(error->active_bo, | ||
862 | error->active_bo_count, | ||
863 | &dev_priv->mm.active_list); | ||
864 | |||
865 | if (error->pinned_bo) | ||
866 | error->pinned_bo_count = | ||
867 | capture_bo_list(error->pinned_bo, | ||
868 | error->pinned_bo_count, | ||
869 | &dev_priv->mm.pinned_list); | ||
870 | |||
718 | do_gettimeofday(&error->time); | 871 | do_gettimeofday(&error->time); |
719 | 872 | ||
720 | error->overlay = intel_overlay_capture_error_state(dev); | 873 | error->overlay = intel_overlay_capture_error_state(dev); |
874 | error->display = intel_display_capture_error_state(dev); | ||
721 | 875 | ||
722 | spin_lock_irqsave(&dev_priv->error_lock, flags); | 876 | spin_lock_irqsave(&dev_priv->error_lock, flags); |
723 | if (dev_priv->first_error == NULL) { | 877 | if (dev_priv->first_error == NULL) { |
@@ -775,7 +929,7 @@ static void i915_report_and_clear_eir(struct drm_device *dev) | |||
775 | printk(KERN_ERR " ACTHD: 0x%08x\n", | 929 | printk(KERN_ERR " ACTHD: 0x%08x\n", |
776 | I915_READ(ACTHD_I965)); | 930 | I915_READ(ACTHD_I965)); |
777 | I915_WRITE(IPEIR_I965, ipeir); | 931 | I915_WRITE(IPEIR_I965, ipeir); |
778 | (void)I915_READ(IPEIR_I965); | 932 | POSTING_READ(IPEIR_I965); |
779 | } | 933 | } |
780 | if (eir & GM45_ERROR_PAGE_TABLE) { | 934 | if (eir & GM45_ERROR_PAGE_TABLE) { |
781 | u32 pgtbl_err = I915_READ(PGTBL_ER); | 935 | u32 pgtbl_err = I915_READ(PGTBL_ER); |
@@ -783,7 +937,7 @@ static void i915_report_and_clear_eir(struct drm_device *dev) | |||
783 | printk(KERN_ERR " PGTBL_ER: 0x%08x\n", | 937 | printk(KERN_ERR " PGTBL_ER: 0x%08x\n", |
784 | pgtbl_err); | 938 | pgtbl_err); |
785 | I915_WRITE(PGTBL_ER, pgtbl_err); | 939 | I915_WRITE(PGTBL_ER, pgtbl_err); |
786 | (void)I915_READ(PGTBL_ER); | 940 | POSTING_READ(PGTBL_ER); |
787 | } | 941 | } |
788 | } | 942 | } |
789 | 943 | ||
@@ -794,7 +948,7 @@ static void i915_report_and_clear_eir(struct drm_device *dev) | |||
794 | printk(KERN_ERR " PGTBL_ER: 0x%08x\n", | 948 | printk(KERN_ERR " PGTBL_ER: 0x%08x\n", |
795 | pgtbl_err); | 949 | pgtbl_err); |
796 | I915_WRITE(PGTBL_ER, pgtbl_err); | 950 | I915_WRITE(PGTBL_ER, pgtbl_err); |
797 | (void)I915_READ(PGTBL_ER); | 951 | POSTING_READ(PGTBL_ER); |
798 | } | 952 | } |
799 | } | 953 | } |
800 | 954 | ||
@@ -825,7 +979,7 @@ static void i915_report_and_clear_eir(struct drm_device *dev) | |||
825 | printk(KERN_ERR " ACTHD: 0x%08x\n", | 979 | printk(KERN_ERR " ACTHD: 0x%08x\n", |
826 | I915_READ(ACTHD)); | 980 | I915_READ(ACTHD)); |
827 | I915_WRITE(IPEIR, ipeir); | 981 | I915_WRITE(IPEIR, ipeir); |
828 | (void)I915_READ(IPEIR); | 982 | POSTING_READ(IPEIR); |
829 | } else { | 983 | } else { |
830 | u32 ipeir = I915_READ(IPEIR_I965); | 984 | u32 ipeir = I915_READ(IPEIR_I965); |
831 | 985 | ||
@@ -842,12 +996,12 @@ static void i915_report_and_clear_eir(struct drm_device *dev) | |||
842 | printk(KERN_ERR " ACTHD: 0x%08x\n", | 996 | printk(KERN_ERR " ACTHD: 0x%08x\n", |
843 | I915_READ(ACTHD_I965)); | 997 | I915_READ(ACTHD_I965)); |
844 | I915_WRITE(IPEIR_I965, ipeir); | 998 | I915_WRITE(IPEIR_I965, ipeir); |
845 | (void)I915_READ(IPEIR_I965); | 999 | POSTING_READ(IPEIR_I965); |
846 | } | 1000 | } |
847 | } | 1001 | } |
848 | 1002 | ||
849 | I915_WRITE(EIR, eir); | 1003 | I915_WRITE(EIR, eir); |
850 | (void)I915_READ(EIR); | 1004 | POSTING_READ(EIR); |
851 | eir = I915_READ(EIR); | 1005 | eir = I915_READ(EIR); |
852 | if (eir) { | 1006 | if (eir) { |
853 | /* | 1007 | /* |
@@ -870,7 +1024,7 @@ static void i915_report_and_clear_eir(struct drm_device *dev) | |||
870 | * so userspace knows something bad happened (should trigger collection | 1024 | * so userspace knows something bad happened (should trigger collection |
871 | * of a ring dump etc.). | 1025 | * of a ring dump etc.). |
872 | */ | 1026 | */ |
873 | static void i915_handle_error(struct drm_device *dev, bool wedged) | 1027 | void i915_handle_error(struct drm_device *dev, bool wedged) |
874 | { | 1028 | { |
875 | struct drm_i915_private *dev_priv = dev->dev_private; | 1029 | struct drm_i915_private *dev_priv = dev->dev_private; |
876 | 1030 | ||
@@ -884,11 +1038,11 @@ static void i915_handle_error(struct drm_device *dev, bool wedged) | |||
884 | /* | 1038 | /* |
885 | * Wakeup waiting processes so they don't hang | 1039 | * Wakeup waiting processes so they don't hang |
886 | */ | 1040 | */ |
887 | wake_up_all(&dev_priv->render_ring.irq_queue); | 1041 | wake_up_all(&dev_priv->ring[RCS].irq_queue); |
888 | if (HAS_BSD(dev)) | 1042 | if (HAS_BSD(dev)) |
889 | wake_up_all(&dev_priv->bsd_ring.irq_queue); | 1043 | wake_up_all(&dev_priv->ring[VCS].irq_queue); |
890 | if (HAS_BLT(dev)) | 1044 | if (HAS_BLT(dev)) |
891 | wake_up_all(&dev_priv->blt_ring.irq_queue); | 1045 | wake_up_all(&dev_priv->ring[BCS].irq_queue); |
892 | } | 1046 | } |
893 | 1047 | ||
894 | queue_work(dev_priv->wq, &dev_priv->error_work); | 1048 | queue_work(dev_priv->wq, &dev_priv->error_work); |
@@ -899,7 +1053,7 @@ static void i915_pageflip_stall_check(struct drm_device *dev, int pipe) | |||
899 | drm_i915_private_t *dev_priv = dev->dev_private; | 1053 | drm_i915_private_t *dev_priv = dev->dev_private; |
900 | struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; | 1054 | struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; |
901 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | 1055 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
902 | struct drm_i915_gem_object *obj_priv; | 1056 | struct drm_i915_gem_object *obj; |
903 | struct intel_unpin_work *work; | 1057 | struct intel_unpin_work *work; |
904 | unsigned long flags; | 1058 | unsigned long flags; |
905 | bool stall_detected; | 1059 | bool stall_detected; |
@@ -918,13 +1072,13 @@ static void i915_pageflip_stall_check(struct drm_device *dev, int pipe) | |||
918 | } | 1072 | } |
919 | 1073 | ||
920 | /* Potential stall - if we see that the flip has happened, assume a missed interrupt */ | 1074 | /* Potential stall - if we see that the flip has happened, assume a missed interrupt */ |
921 | obj_priv = to_intel_bo(work->pending_flip_obj); | 1075 | obj = work->pending_flip_obj; |
922 | if (INTEL_INFO(dev)->gen >= 4) { | 1076 | if (INTEL_INFO(dev)->gen >= 4) { |
923 | int dspsurf = intel_crtc->plane == 0 ? DSPASURF : DSPBSURF; | 1077 | int dspsurf = intel_crtc->plane == 0 ? DSPASURF : DSPBSURF; |
924 | stall_detected = I915_READ(dspsurf) == obj_priv->gtt_offset; | 1078 | stall_detected = I915_READ(dspsurf) == obj->gtt_offset; |
925 | } else { | 1079 | } else { |
926 | int dspaddr = intel_crtc->plane == 0 ? DSPAADDR : DSPBADDR; | 1080 | int dspaddr = intel_crtc->plane == 0 ? DSPAADDR : DSPBADDR; |
927 | stall_detected = I915_READ(dspaddr) == (obj_priv->gtt_offset + | 1081 | stall_detected = I915_READ(dspaddr) == (obj->gtt_offset + |
928 | crtc->y * crtc->fb->pitch + | 1082 | crtc->y * crtc->fb->pitch + |
929 | crtc->x * crtc->fb->bits_per_pixel/8); | 1083 | crtc->x * crtc->fb->bits_per_pixel/8); |
930 | } | 1084 | } |
@@ -970,7 +1124,7 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS) | |||
970 | * It doesn't set the bit in iir again, but it still produces | 1124 | * It doesn't set the bit in iir again, but it still produces |
971 | * interrupts (for non-MSI). | 1125 | * interrupts (for non-MSI). |
972 | */ | 1126 | */ |
973 | spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags); | 1127 | spin_lock_irqsave(&dev_priv->irq_lock, irqflags); |
974 | pipea_stats = I915_READ(PIPEASTAT); | 1128 | pipea_stats = I915_READ(PIPEASTAT); |
975 | pipeb_stats = I915_READ(PIPEBSTAT); | 1129 | pipeb_stats = I915_READ(PIPEBSTAT); |
976 | 1130 | ||
@@ -993,7 +1147,7 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS) | |||
993 | I915_WRITE(PIPEBSTAT, pipeb_stats); | 1147 | I915_WRITE(PIPEBSTAT, pipeb_stats); |
994 | irq_received = 1; | 1148 | irq_received = 1; |
995 | } | 1149 | } |
996 | spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags); | 1150 | spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); |
997 | 1151 | ||
998 | if (!irq_received) | 1152 | if (!irq_received) |
999 | break; | 1153 | break; |
@@ -1026,9 +1180,9 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS) | |||
1026 | } | 1180 | } |
1027 | 1181 | ||
1028 | if (iir & I915_USER_INTERRUPT) | 1182 | if (iir & I915_USER_INTERRUPT) |
1029 | notify_ring(dev, &dev_priv->render_ring); | 1183 | notify_ring(dev, &dev_priv->ring[RCS]); |
1030 | if (HAS_BSD(dev) && (iir & I915_BSD_USER_INTERRUPT)) | 1184 | if (iir & I915_BSD_USER_INTERRUPT) |
1031 | notify_ring(dev, &dev_priv->bsd_ring); | 1185 | notify_ring(dev, &dev_priv->ring[VCS]); |
1032 | 1186 | ||
1033 | if (iir & I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT) { | 1187 | if (iir & I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT) { |
1034 | intel_prepare_page_flip(dev, 0); | 1188 | intel_prepare_page_flip(dev, 0); |
@@ -1042,18 +1196,18 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS) | |||
1042 | intel_finish_page_flip_plane(dev, 1); | 1196 | intel_finish_page_flip_plane(dev, 1); |
1043 | } | 1197 | } |
1044 | 1198 | ||
1045 | if (pipea_stats & vblank_status) { | 1199 | if (pipea_stats & vblank_status && |
1200 | drm_handle_vblank(dev, 0)) { | ||
1046 | vblank++; | 1201 | vblank++; |
1047 | drm_handle_vblank(dev, 0); | ||
1048 | if (!dev_priv->flip_pending_is_done) { | 1202 | if (!dev_priv->flip_pending_is_done) { |
1049 | i915_pageflip_stall_check(dev, 0); | 1203 | i915_pageflip_stall_check(dev, 0); |
1050 | intel_finish_page_flip(dev, 0); | 1204 | intel_finish_page_flip(dev, 0); |
1051 | } | 1205 | } |
1052 | } | 1206 | } |
1053 | 1207 | ||
1054 | if (pipeb_stats & vblank_status) { | 1208 | if (pipeb_stats & vblank_status && |
1209 | drm_handle_vblank(dev, 1)) { | ||
1055 | vblank++; | 1210 | vblank++; |
1056 | drm_handle_vblank(dev, 1); | ||
1057 | if (!dev_priv->flip_pending_is_done) { | 1211 | if (!dev_priv->flip_pending_is_done) { |
1058 | i915_pageflip_stall_check(dev, 1); | 1212 | i915_pageflip_stall_check(dev, 1); |
1059 | intel_finish_page_flip(dev, 1); | 1213 | intel_finish_page_flip(dev, 1); |
@@ -1101,12 +1255,13 @@ static int i915_emit_irq(struct drm_device * dev) | |||
1101 | if (master_priv->sarea_priv) | 1255 | if (master_priv->sarea_priv) |
1102 | master_priv->sarea_priv->last_enqueue = dev_priv->counter; | 1256 | master_priv->sarea_priv->last_enqueue = dev_priv->counter; |
1103 | 1257 | ||
1104 | BEGIN_LP_RING(4); | 1258 | if (BEGIN_LP_RING(4) == 0) { |
1105 | OUT_RING(MI_STORE_DWORD_INDEX); | 1259 | OUT_RING(MI_STORE_DWORD_INDEX); |
1106 | OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT); | 1260 | OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT); |
1107 | OUT_RING(dev_priv->counter); | 1261 | OUT_RING(dev_priv->counter); |
1108 | OUT_RING(MI_USER_INTERRUPT); | 1262 | OUT_RING(MI_USER_INTERRUPT); |
1109 | ADVANCE_LP_RING(); | 1263 | ADVANCE_LP_RING(); |
1264 | } | ||
1110 | 1265 | ||
1111 | return dev_priv->counter; | 1266 | return dev_priv->counter; |
1112 | } | 1267 | } |
@@ -1114,12 +1269,11 @@ static int i915_emit_irq(struct drm_device * dev) | |||
1114 | void i915_trace_irq_get(struct drm_device *dev, u32 seqno) | 1269 | void i915_trace_irq_get(struct drm_device *dev, u32 seqno) |
1115 | { | 1270 | { |
1116 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | 1271 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; |
1117 | struct intel_ring_buffer *render_ring = &dev_priv->render_ring; | 1272 | struct intel_ring_buffer *ring = LP_RING(dev_priv); |
1118 | |||
1119 | if (dev_priv->trace_irq_seqno == 0) | ||
1120 | render_ring->user_irq_get(dev, render_ring); | ||
1121 | 1273 | ||
1122 | dev_priv->trace_irq_seqno = seqno; | 1274 | if (dev_priv->trace_irq_seqno == 0 && |
1275 | ring->irq_get(ring)) | ||
1276 | dev_priv->trace_irq_seqno = seqno; | ||
1123 | } | 1277 | } |
1124 | 1278 | ||
1125 | static int i915_wait_irq(struct drm_device * dev, int irq_nr) | 1279 | static int i915_wait_irq(struct drm_device * dev, int irq_nr) |
@@ -1127,7 +1281,7 @@ static int i915_wait_irq(struct drm_device * dev, int irq_nr) | |||
1127 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | 1281 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; |
1128 | struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; | 1282 | struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; |
1129 | int ret = 0; | 1283 | int ret = 0; |
1130 | struct intel_ring_buffer *render_ring = &dev_priv->render_ring; | 1284 | struct intel_ring_buffer *ring = LP_RING(dev_priv); |
1131 | 1285 | ||
1132 | DRM_DEBUG_DRIVER("irq_nr=%d breadcrumb=%d\n", irq_nr, | 1286 | DRM_DEBUG_DRIVER("irq_nr=%d breadcrumb=%d\n", irq_nr, |
1133 | READ_BREADCRUMB(dev_priv)); | 1287 | READ_BREADCRUMB(dev_priv)); |
@@ -1141,10 +1295,12 @@ static int i915_wait_irq(struct drm_device * dev, int irq_nr) | |||
1141 | if (master_priv->sarea_priv) | 1295 | if (master_priv->sarea_priv) |
1142 | master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT; | 1296 | master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT; |
1143 | 1297 | ||
1144 | render_ring->user_irq_get(dev, render_ring); | 1298 | if (ring->irq_get(ring)) { |
1145 | DRM_WAIT_ON(ret, dev_priv->render_ring.irq_queue, 3 * DRM_HZ, | 1299 | DRM_WAIT_ON(ret, ring->irq_queue, 3 * DRM_HZ, |
1146 | READ_BREADCRUMB(dev_priv) >= irq_nr); | 1300 | READ_BREADCRUMB(dev_priv) >= irq_nr); |
1147 | render_ring->user_irq_put(dev, render_ring); | 1301 | ring->irq_put(ring); |
1302 | } else if (wait_for(READ_BREADCRUMB(dev_priv) >= irq_nr, 3000)) | ||
1303 | ret = -EBUSY; | ||
1148 | 1304 | ||
1149 | if (ret == -EBUSY) { | 1305 | if (ret == -EBUSY) { |
1150 | DRM_ERROR("EBUSY -- rec: %d emitted: %d\n", | 1306 | DRM_ERROR("EBUSY -- rec: %d emitted: %d\n", |
@@ -1163,7 +1319,7 @@ int i915_irq_emit(struct drm_device *dev, void *data, | |||
1163 | drm_i915_irq_emit_t *emit = data; | 1319 | drm_i915_irq_emit_t *emit = data; |
1164 | int result; | 1320 | int result; |
1165 | 1321 | ||
1166 | if (!dev_priv || !dev_priv->render_ring.virtual_start) { | 1322 | if (!dev_priv || !LP_RING(dev_priv)->virtual_start) { |
1167 | DRM_ERROR("called with no initialization\n"); | 1323 | DRM_ERROR("called with no initialization\n"); |
1168 | return -EINVAL; | 1324 | return -EINVAL; |
1169 | } | 1325 | } |
@@ -1209,9 +1365,9 @@ int i915_enable_vblank(struct drm_device *dev, int pipe) | |||
1209 | if (!i915_pipe_enabled(dev, pipe)) | 1365 | if (!i915_pipe_enabled(dev, pipe)) |
1210 | return -EINVAL; | 1366 | return -EINVAL; |
1211 | 1367 | ||
1212 | spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags); | 1368 | spin_lock_irqsave(&dev_priv->irq_lock, irqflags); |
1213 | if (HAS_PCH_SPLIT(dev)) | 1369 | if (HAS_PCH_SPLIT(dev)) |
1214 | ironlake_enable_display_irq(dev_priv, (pipe == 0) ? | 1370 | ironlake_enable_display_irq(dev_priv, (pipe == 0) ? |
1215 | DE_PIPEA_VBLANK: DE_PIPEB_VBLANK); | 1371 | DE_PIPEA_VBLANK: DE_PIPEB_VBLANK); |
1216 | else if (INTEL_INFO(dev)->gen >= 4) | 1372 | else if (INTEL_INFO(dev)->gen >= 4) |
1217 | i915_enable_pipestat(dev_priv, pipe, | 1373 | i915_enable_pipestat(dev_priv, pipe, |
@@ -1219,7 +1375,7 @@ int i915_enable_vblank(struct drm_device *dev, int pipe) | |||
1219 | else | 1375 | else |
1220 | i915_enable_pipestat(dev_priv, pipe, | 1376 | i915_enable_pipestat(dev_priv, pipe, |
1221 | PIPE_VBLANK_INTERRUPT_ENABLE); | 1377 | PIPE_VBLANK_INTERRUPT_ENABLE); |
1222 | spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags); | 1378 | spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); |
1223 | return 0; | 1379 | return 0; |
1224 | } | 1380 | } |
1225 | 1381 | ||
@@ -1231,15 +1387,15 @@ void i915_disable_vblank(struct drm_device *dev, int pipe) | |||
1231 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | 1387 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; |
1232 | unsigned long irqflags; | 1388 | unsigned long irqflags; |
1233 | 1389 | ||
1234 | spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags); | 1390 | spin_lock_irqsave(&dev_priv->irq_lock, irqflags); |
1235 | if (HAS_PCH_SPLIT(dev)) | 1391 | if (HAS_PCH_SPLIT(dev)) |
1236 | ironlake_disable_display_irq(dev_priv, (pipe == 0) ? | 1392 | ironlake_disable_display_irq(dev_priv, (pipe == 0) ? |
1237 | DE_PIPEA_VBLANK: DE_PIPEB_VBLANK); | 1393 | DE_PIPEA_VBLANK: DE_PIPEB_VBLANK); |
1238 | else | 1394 | else |
1239 | i915_disable_pipestat(dev_priv, pipe, | 1395 | i915_disable_pipestat(dev_priv, pipe, |
1240 | PIPE_VBLANK_INTERRUPT_ENABLE | | 1396 | PIPE_VBLANK_INTERRUPT_ENABLE | |
1241 | PIPE_START_VBLANK_INTERRUPT_ENABLE); | 1397 | PIPE_START_VBLANK_INTERRUPT_ENABLE); |
1242 | spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags); | 1398 | spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); |
1243 | } | 1399 | } |
1244 | 1400 | ||
1245 | void i915_enable_interrupt (struct drm_device *dev) | 1401 | void i915_enable_interrupt (struct drm_device *dev) |
@@ -1306,12 +1462,50 @@ int i915_vblank_swap(struct drm_device *dev, void *data, | |||
1306 | return -EINVAL; | 1462 | return -EINVAL; |
1307 | } | 1463 | } |
1308 | 1464 | ||
1309 | static struct drm_i915_gem_request * | 1465 | static u32 |
1310 | i915_get_tail_request(struct drm_device *dev) | 1466 | ring_last_seqno(struct intel_ring_buffer *ring) |
1311 | { | 1467 | { |
1312 | drm_i915_private_t *dev_priv = dev->dev_private; | 1468 | return list_entry(ring->request_list.prev, |
1313 | return list_entry(dev_priv->render_ring.request_list.prev, | 1469 | struct drm_i915_gem_request, list)->seqno; |
1314 | struct drm_i915_gem_request, list); | 1470 | } |
1471 | |||
1472 | static bool i915_hangcheck_ring_idle(struct intel_ring_buffer *ring, bool *err) | ||
1473 | { | ||
1474 | if (list_empty(&ring->request_list) || | ||
1475 | i915_seqno_passed(ring->get_seqno(ring), ring_last_seqno(ring))) { | ||
1476 | /* Issue a wake-up to catch stuck h/w. */ | ||
1477 | if (ring->waiting_seqno && waitqueue_active(&ring->irq_queue)) { | ||
1478 | DRM_ERROR("Hangcheck timer elapsed... %s idle [waiting on %d, at %d], missed IRQ?\n", | ||
1479 | ring->name, | ||
1480 | ring->waiting_seqno, | ||
1481 | ring->get_seqno(ring)); | ||
1482 | wake_up_all(&ring->irq_queue); | ||
1483 | *err = true; | ||
1484 | } | ||
1485 | return true; | ||
1486 | } | ||
1487 | return false; | ||
1488 | } | ||
1489 | |||
1490 | static bool kick_ring(struct intel_ring_buffer *ring) | ||
1491 | { | ||
1492 | struct drm_device *dev = ring->dev; | ||
1493 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
1494 | u32 tmp = I915_READ_CTL(ring); | ||
1495 | if (tmp & RING_WAIT) { | ||
1496 | DRM_ERROR("Kicking stuck wait on %s\n", | ||
1497 | ring->name); | ||
1498 | I915_WRITE_CTL(ring, tmp); | ||
1499 | return true; | ||
1500 | } | ||
1501 | if (IS_GEN6(dev) && | ||
1502 | (tmp & RING_WAIT_SEMAPHORE)) { | ||
1503 | DRM_ERROR("Kicking stuck semaphore on %s\n", | ||
1504 | ring->name); | ||
1505 | I915_WRITE_CTL(ring, tmp); | ||
1506 | return true; | ||
1507 | } | ||
1508 | return false; | ||
1315 | } | 1509 | } |
1316 | 1510 | ||
1317 | /** | 1511 | /** |
@@ -1325,6 +1519,17 @@ void i915_hangcheck_elapsed(unsigned long data) | |||
1325 | struct drm_device *dev = (struct drm_device *)data; | 1519 | struct drm_device *dev = (struct drm_device *)data; |
1326 | drm_i915_private_t *dev_priv = dev->dev_private; | 1520 | drm_i915_private_t *dev_priv = dev->dev_private; |
1327 | uint32_t acthd, instdone, instdone1; | 1521 | uint32_t acthd, instdone, instdone1; |
1522 | bool err = false; | ||
1523 | |||
1524 | /* If all work is done then ACTHD clearly hasn't advanced. */ | ||
1525 | if (i915_hangcheck_ring_idle(&dev_priv->ring[RCS], &err) && | ||
1526 | i915_hangcheck_ring_idle(&dev_priv->ring[VCS], &err) && | ||
1527 | i915_hangcheck_ring_idle(&dev_priv->ring[BCS], &err)) { | ||
1528 | dev_priv->hangcheck_count = 0; | ||
1529 | if (err) | ||
1530 | goto repeat; | ||
1531 | return; | ||
1532 | } | ||
1328 | 1533 | ||
1329 | if (INTEL_INFO(dev)->gen < 4) { | 1534 | if (INTEL_INFO(dev)->gen < 4) { |
1330 | acthd = I915_READ(ACTHD); | 1535 | acthd = I915_READ(ACTHD); |
@@ -1336,38 +1541,6 @@ void i915_hangcheck_elapsed(unsigned long data) | |||
1336 | instdone1 = I915_READ(INSTDONE1); | 1541 | instdone1 = I915_READ(INSTDONE1); |
1337 | } | 1542 | } |
1338 | 1543 | ||
1339 | /* If all work is done then ACTHD clearly hasn't advanced. */ | ||
1340 | if (list_empty(&dev_priv->render_ring.request_list) || | ||
1341 | i915_seqno_passed(dev_priv->render_ring.get_seqno(dev, &dev_priv->render_ring), | ||
1342 | i915_get_tail_request(dev)->seqno)) { | ||
1343 | bool missed_wakeup = false; | ||
1344 | |||
1345 | dev_priv->hangcheck_count = 0; | ||
1346 | |||
1347 | /* Issue a wake-up to catch stuck h/w. */ | ||
1348 | if (dev_priv->render_ring.waiting_gem_seqno && | ||
1349 | waitqueue_active(&dev_priv->render_ring.irq_queue)) { | ||
1350 | wake_up_all(&dev_priv->render_ring.irq_queue); | ||
1351 | missed_wakeup = true; | ||
1352 | } | ||
1353 | |||
1354 | if (dev_priv->bsd_ring.waiting_gem_seqno && | ||
1355 | waitqueue_active(&dev_priv->bsd_ring.irq_queue)) { | ||
1356 | wake_up_all(&dev_priv->bsd_ring.irq_queue); | ||
1357 | missed_wakeup = true; | ||
1358 | } | ||
1359 | |||
1360 | if (dev_priv->blt_ring.waiting_gem_seqno && | ||
1361 | waitqueue_active(&dev_priv->blt_ring.irq_queue)) { | ||
1362 | wake_up_all(&dev_priv->blt_ring.irq_queue); | ||
1363 | missed_wakeup = true; | ||
1364 | } | ||
1365 | |||
1366 | if (missed_wakeup) | ||
1367 | DRM_ERROR("Hangcheck timer elapsed... GPU idle, missed IRQ.\n"); | ||
1368 | return; | ||
1369 | } | ||
1370 | |||
1371 | if (dev_priv->last_acthd == acthd && | 1544 | if (dev_priv->last_acthd == acthd && |
1372 | dev_priv->last_instdone == instdone && | 1545 | dev_priv->last_instdone == instdone && |
1373 | dev_priv->last_instdone1 == instdone1) { | 1546 | dev_priv->last_instdone1 == instdone1) { |
@@ -1380,12 +1553,17 @@ void i915_hangcheck_elapsed(unsigned long data) | |||
1380 | * and break the hang. This should work on | 1553 | * and break the hang. This should work on |
1381 | * all but the second generation chipsets. | 1554 | * all but the second generation chipsets. |
1382 | */ | 1555 | */ |
1383 | u32 tmp = I915_READ(PRB0_CTL); | 1556 | |
1384 | if (tmp & RING_WAIT) { | 1557 | if (kick_ring(&dev_priv->ring[RCS])) |
1385 | I915_WRITE(PRB0_CTL, tmp); | 1558 | goto repeat; |
1386 | POSTING_READ(PRB0_CTL); | 1559 | |
1387 | goto out; | 1560 | if (HAS_BSD(dev) && |
1388 | } | 1561 | kick_ring(&dev_priv->ring[VCS])) |
1562 | goto repeat; | ||
1563 | |||
1564 | if (HAS_BLT(dev) && | ||
1565 | kick_ring(&dev_priv->ring[BCS])) | ||
1566 | goto repeat; | ||
1389 | } | 1567 | } |
1390 | 1568 | ||
1391 | i915_handle_error(dev, true); | 1569 | i915_handle_error(dev, true); |
@@ -1399,7 +1577,7 @@ void i915_hangcheck_elapsed(unsigned long data) | |||
1399 | dev_priv->last_instdone1 = instdone1; | 1577 | dev_priv->last_instdone1 = instdone1; |
1400 | } | 1578 | } |
1401 | 1579 | ||
1402 | out: | 1580 | repeat: |
1403 | /* Reset timer case chip hangs without another request being added */ | 1581 | /* Reset timer case chip hangs without another request being added */ |
1404 | mod_timer(&dev_priv->hangcheck_timer, | 1582 | mod_timer(&dev_priv->hangcheck_timer, |
1405 | jiffies + msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD)); | 1583 | jiffies + msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD)); |
@@ -1417,17 +1595,17 @@ static void ironlake_irq_preinstall(struct drm_device *dev) | |||
1417 | 1595 | ||
1418 | I915_WRITE(DEIMR, 0xffffffff); | 1596 | I915_WRITE(DEIMR, 0xffffffff); |
1419 | I915_WRITE(DEIER, 0x0); | 1597 | I915_WRITE(DEIER, 0x0); |
1420 | (void) I915_READ(DEIER); | 1598 | POSTING_READ(DEIER); |
1421 | 1599 | ||
1422 | /* and GT */ | 1600 | /* and GT */ |
1423 | I915_WRITE(GTIMR, 0xffffffff); | 1601 | I915_WRITE(GTIMR, 0xffffffff); |
1424 | I915_WRITE(GTIER, 0x0); | 1602 | I915_WRITE(GTIER, 0x0); |
1425 | (void) I915_READ(GTIER); | 1603 | POSTING_READ(GTIER); |
1426 | 1604 | ||
1427 | /* south display irq */ | 1605 | /* south display irq */ |
1428 | I915_WRITE(SDEIMR, 0xffffffff); | 1606 | I915_WRITE(SDEIMR, 0xffffffff); |
1429 | I915_WRITE(SDEIER, 0x0); | 1607 | I915_WRITE(SDEIER, 0x0); |
1430 | (void) I915_READ(SDEIER); | 1608 | POSTING_READ(SDEIER); |
1431 | } | 1609 | } |
1432 | 1610 | ||
1433 | static int ironlake_irq_postinstall(struct drm_device *dev) | 1611 | static int ironlake_irq_postinstall(struct drm_device *dev) |
@@ -1436,38 +1614,34 @@ static int ironlake_irq_postinstall(struct drm_device *dev) | |||
1436 | /* enable kind of interrupts always enabled */ | 1614 | /* enable kind of interrupts always enabled */ |
1437 | u32 display_mask = DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT | | 1615 | u32 display_mask = DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT | |
1438 | DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE; | 1616 | DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE; |
1439 | u32 render_mask = GT_PIPE_NOTIFY | GT_BSD_USER_INTERRUPT; | 1617 | u32 render_irqs; |
1440 | u32 hotplug_mask; | 1618 | u32 hotplug_mask; |
1441 | 1619 | ||
1442 | dev_priv->irq_mask_reg = ~display_mask; | 1620 | dev_priv->irq_mask = ~display_mask; |
1443 | dev_priv->de_irq_enable_reg = display_mask | DE_PIPEA_VBLANK | DE_PIPEB_VBLANK; | ||
1444 | 1621 | ||
1445 | /* should always can generate irq */ | 1622 | /* should always can generate irq */ |
1446 | I915_WRITE(DEIIR, I915_READ(DEIIR)); | 1623 | I915_WRITE(DEIIR, I915_READ(DEIIR)); |
1447 | I915_WRITE(DEIMR, dev_priv->irq_mask_reg); | 1624 | I915_WRITE(DEIMR, dev_priv->irq_mask); |
1448 | I915_WRITE(DEIER, dev_priv->de_irq_enable_reg); | 1625 | I915_WRITE(DEIER, display_mask | DE_PIPEA_VBLANK | DE_PIPEB_VBLANK); |
1449 | (void) I915_READ(DEIER); | 1626 | POSTING_READ(DEIER); |
1450 | |||
1451 | if (IS_GEN6(dev)) { | ||
1452 | render_mask = | ||
1453 | GT_PIPE_NOTIFY | | ||
1454 | GT_GEN6_BSD_USER_INTERRUPT | | ||
1455 | GT_BLT_USER_INTERRUPT; | ||
1456 | } | ||
1457 | 1627 | ||
1458 | dev_priv->gt_irq_mask_reg = ~render_mask; | 1628 | dev_priv->gt_irq_mask = ~0; |
1459 | dev_priv->gt_irq_enable_reg = render_mask; | ||
1460 | 1629 | ||
1461 | I915_WRITE(GTIIR, I915_READ(GTIIR)); | 1630 | I915_WRITE(GTIIR, I915_READ(GTIIR)); |
1462 | I915_WRITE(GTIMR, dev_priv->gt_irq_mask_reg); | 1631 | I915_WRITE(GTIMR, dev_priv->gt_irq_mask); |
1463 | if (IS_GEN6(dev)) { | ||
1464 | I915_WRITE(GEN6_RENDER_IMR, ~GEN6_RENDER_PIPE_CONTROL_NOTIFY_INTERRUPT); | ||
1465 | I915_WRITE(GEN6_BSD_IMR, ~GEN6_BSD_IMR_USER_INTERRUPT); | ||
1466 | I915_WRITE(GEN6_BLITTER_IMR, ~GEN6_BLITTER_USER_INTERRUPT); | ||
1467 | } | ||
1468 | 1632 | ||
1469 | I915_WRITE(GTIER, dev_priv->gt_irq_enable_reg); | 1633 | if (IS_GEN6(dev)) |
1470 | (void) I915_READ(GTIER); | 1634 | render_irqs = |
1635 | GT_USER_INTERRUPT | | ||
1636 | GT_GEN6_BSD_USER_INTERRUPT | | ||
1637 | GT_BLT_USER_INTERRUPT; | ||
1638 | else | ||
1639 | render_irqs = | ||
1640 | GT_USER_INTERRUPT | | ||
1641 | GT_PIPE_NOTIFY | | ||
1642 | GT_BSD_USER_INTERRUPT; | ||
1643 | I915_WRITE(GTIER, render_irqs); | ||
1644 | POSTING_READ(GTIER); | ||
1471 | 1645 | ||
1472 | if (HAS_PCH_CPT(dev)) { | 1646 | if (HAS_PCH_CPT(dev)) { |
1473 | hotplug_mask = SDE_CRT_HOTPLUG_CPT | SDE_PORTB_HOTPLUG_CPT | | 1647 | hotplug_mask = SDE_CRT_HOTPLUG_CPT | SDE_PORTB_HOTPLUG_CPT | |
@@ -1475,15 +1649,17 @@ static int ironlake_irq_postinstall(struct drm_device *dev) | |||
1475 | } else { | 1649 | } else { |
1476 | hotplug_mask = SDE_CRT_HOTPLUG | SDE_PORTB_HOTPLUG | | 1650 | hotplug_mask = SDE_CRT_HOTPLUG | SDE_PORTB_HOTPLUG | |
1477 | SDE_PORTC_HOTPLUG | SDE_PORTD_HOTPLUG; | 1651 | SDE_PORTC_HOTPLUG | SDE_PORTD_HOTPLUG; |
1652 | hotplug_mask |= SDE_AUX_MASK | SDE_FDI_MASK | SDE_TRANS_MASK; | ||
1653 | I915_WRITE(FDI_RXA_IMR, 0); | ||
1654 | I915_WRITE(FDI_RXB_IMR, 0); | ||
1478 | } | 1655 | } |
1479 | 1656 | ||
1480 | dev_priv->pch_irq_mask_reg = ~hotplug_mask; | 1657 | dev_priv->pch_irq_mask = ~hotplug_mask; |
1481 | dev_priv->pch_irq_enable_reg = hotplug_mask; | ||
1482 | 1658 | ||
1483 | I915_WRITE(SDEIIR, I915_READ(SDEIIR)); | 1659 | I915_WRITE(SDEIIR, I915_READ(SDEIIR)); |
1484 | I915_WRITE(SDEIMR, dev_priv->pch_irq_mask_reg); | 1660 | I915_WRITE(SDEIMR, dev_priv->pch_irq_mask); |
1485 | I915_WRITE(SDEIER, dev_priv->pch_irq_enable_reg); | 1661 | I915_WRITE(SDEIER, hotplug_mask); |
1486 | (void) I915_READ(SDEIER); | 1662 | POSTING_READ(SDEIER); |
1487 | 1663 | ||
1488 | if (IS_IRONLAKE_M(dev)) { | 1664 | if (IS_IRONLAKE_M(dev)) { |
1489 | /* Clear & enable PCU event interrupts */ | 1665 | /* Clear & enable PCU event interrupts */ |
@@ -1519,7 +1695,7 @@ void i915_driver_irq_preinstall(struct drm_device * dev) | |||
1519 | I915_WRITE(PIPEBSTAT, 0); | 1695 | I915_WRITE(PIPEBSTAT, 0); |
1520 | I915_WRITE(IMR, 0xffffffff); | 1696 | I915_WRITE(IMR, 0xffffffff); |
1521 | I915_WRITE(IER, 0x0); | 1697 | I915_WRITE(IER, 0x0); |
1522 | (void) I915_READ(IER); | 1698 | POSTING_READ(IER); |
1523 | } | 1699 | } |
1524 | 1700 | ||
1525 | /* | 1701 | /* |
@@ -1532,11 +1708,11 @@ int i915_driver_irq_postinstall(struct drm_device *dev) | |||
1532 | u32 enable_mask = I915_INTERRUPT_ENABLE_FIX | I915_INTERRUPT_ENABLE_VAR; | 1708 | u32 enable_mask = I915_INTERRUPT_ENABLE_FIX | I915_INTERRUPT_ENABLE_VAR; |
1533 | u32 error_mask; | 1709 | u32 error_mask; |
1534 | 1710 | ||
1535 | DRM_INIT_WAITQUEUE(&dev_priv->render_ring.irq_queue); | 1711 | DRM_INIT_WAITQUEUE(&dev_priv->ring[RCS].irq_queue); |
1536 | if (HAS_BSD(dev)) | 1712 | if (HAS_BSD(dev)) |
1537 | DRM_INIT_WAITQUEUE(&dev_priv->bsd_ring.irq_queue); | 1713 | DRM_INIT_WAITQUEUE(&dev_priv->ring[VCS].irq_queue); |
1538 | if (HAS_BLT(dev)) | 1714 | if (HAS_BLT(dev)) |
1539 | DRM_INIT_WAITQUEUE(&dev_priv->blt_ring.irq_queue); | 1715 | DRM_INIT_WAITQUEUE(&dev_priv->ring[BCS].irq_queue); |
1540 | 1716 | ||
1541 | dev_priv->vblank_pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B; | 1717 | dev_priv->vblank_pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B; |
1542 | 1718 | ||
@@ -1544,7 +1720,7 @@ int i915_driver_irq_postinstall(struct drm_device *dev) | |||
1544 | return ironlake_irq_postinstall(dev); | 1720 | return ironlake_irq_postinstall(dev); |
1545 | 1721 | ||
1546 | /* Unmask the interrupts that we always want on. */ | 1722 | /* Unmask the interrupts that we always want on. */ |
1547 | dev_priv->irq_mask_reg = ~I915_INTERRUPT_ENABLE_FIX; | 1723 | dev_priv->irq_mask = ~I915_INTERRUPT_ENABLE_FIX; |
1548 | 1724 | ||
1549 | dev_priv->pipestat[0] = 0; | 1725 | dev_priv->pipestat[0] = 0; |
1550 | dev_priv->pipestat[1] = 0; | 1726 | dev_priv->pipestat[1] = 0; |
@@ -1553,7 +1729,7 @@ int i915_driver_irq_postinstall(struct drm_device *dev) | |||
1553 | /* Enable in IER... */ | 1729 | /* Enable in IER... */ |
1554 | enable_mask |= I915_DISPLAY_PORT_INTERRUPT; | 1730 | enable_mask |= I915_DISPLAY_PORT_INTERRUPT; |
1555 | /* and unmask in IMR */ | 1731 | /* and unmask in IMR */ |
1556 | dev_priv->irq_mask_reg &= ~I915_DISPLAY_PORT_INTERRUPT; | 1732 | dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT; |
1557 | } | 1733 | } |
1558 | 1734 | ||
1559 | /* | 1735 | /* |
@@ -1571,9 +1747,9 @@ int i915_driver_irq_postinstall(struct drm_device *dev) | |||
1571 | } | 1747 | } |
1572 | I915_WRITE(EMR, error_mask); | 1748 | I915_WRITE(EMR, error_mask); |
1573 | 1749 | ||
1574 | I915_WRITE(IMR, dev_priv->irq_mask_reg); | 1750 | I915_WRITE(IMR, dev_priv->irq_mask); |
1575 | I915_WRITE(IER, enable_mask); | 1751 | I915_WRITE(IER, enable_mask); |
1576 | (void) I915_READ(IER); | 1752 | POSTING_READ(IER); |
1577 | 1753 | ||
1578 | if (I915_HAS_HOTPLUG(dev)) { | 1754 | if (I915_HAS_HOTPLUG(dev)) { |
1579 | u32 hotplug_en = I915_READ(PORT_HOTPLUG_EN); | 1755 | u32 hotplug_en = I915_READ(PORT_HOTPLUG_EN); |
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 25ed911a3112..5cfc68940f17 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h | |||
@@ -78,6 +78,12 @@ | |||
78 | #define GRDOM_RENDER (1<<2) | 78 | #define GRDOM_RENDER (1<<2) |
79 | #define GRDOM_MEDIA (3<<2) | 79 | #define GRDOM_MEDIA (3<<2) |
80 | 80 | ||
81 | #define GEN6_GDRST 0x941c | ||
82 | #define GEN6_GRDOM_FULL (1 << 0) | ||
83 | #define GEN6_GRDOM_RENDER (1 << 1) | ||
84 | #define GEN6_GRDOM_MEDIA (1 << 2) | ||
85 | #define GEN6_GRDOM_BLT (1 << 3) | ||
86 | |||
81 | /* VGA stuff */ | 87 | /* VGA stuff */ |
82 | 88 | ||
83 | #define VGA_ST01_MDA 0x3ba | 89 | #define VGA_ST01_MDA 0x3ba |
@@ -139,6 +145,8 @@ | |||
139 | #define MI_END_SCENE (1 << 4) /* flush binner and incr scene count */ | 145 | #define MI_END_SCENE (1 << 4) /* flush binner and incr scene count */ |
140 | #define MI_INVALIDATE_ISP (1 << 5) /* invalidate indirect state pointers */ | 146 | #define MI_INVALIDATE_ISP (1 << 5) /* invalidate indirect state pointers */ |
141 | #define MI_BATCH_BUFFER_END MI_INSTR(0x0a, 0) | 147 | #define MI_BATCH_BUFFER_END MI_INSTR(0x0a, 0) |
148 | #define MI_SUSPEND_FLUSH MI_INSTR(0x0b, 0) | ||
149 | #define MI_SUSPEND_FLUSH_EN (1<<0) | ||
142 | #define MI_REPORT_HEAD MI_INSTR(0x07, 0) | 150 | #define MI_REPORT_HEAD MI_INSTR(0x07, 0) |
143 | #define MI_OVERLAY_FLIP MI_INSTR(0x11,0) | 151 | #define MI_OVERLAY_FLIP MI_INSTR(0x11,0) |
144 | #define MI_OVERLAY_CONTINUE (0x0<<21) | 152 | #define MI_OVERLAY_CONTINUE (0x0<<21) |
@@ -153,17 +161,29 @@ | |||
153 | #define MI_MM_SPACE_PHYSICAL (0<<8) | 161 | #define MI_MM_SPACE_PHYSICAL (0<<8) |
154 | #define MI_SAVE_EXT_STATE_EN (1<<3) | 162 | #define MI_SAVE_EXT_STATE_EN (1<<3) |
155 | #define MI_RESTORE_EXT_STATE_EN (1<<2) | 163 | #define MI_RESTORE_EXT_STATE_EN (1<<2) |
164 | #define MI_FORCE_RESTORE (1<<1) | ||
156 | #define MI_RESTORE_INHIBIT (1<<0) | 165 | #define MI_RESTORE_INHIBIT (1<<0) |
157 | #define MI_STORE_DWORD_IMM MI_INSTR(0x20, 1) | 166 | #define MI_STORE_DWORD_IMM MI_INSTR(0x20, 1) |
158 | #define MI_MEM_VIRTUAL (1 << 22) /* 965+ only */ | 167 | #define MI_MEM_VIRTUAL (1 << 22) /* 965+ only */ |
159 | #define MI_STORE_DWORD_INDEX MI_INSTR(0x21, 1) | 168 | #define MI_STORE_DWORD_INDEX MI_INSTR(0x21, 1) |
160 | #define MI_STORE_DWORD_INDEX_SHIFT 2 | 169 | #define MI_STORE_DWORD_INDEX_SHIFT 2 |
161 | #define MI_LOAD_REGISTER_IMM MI_INSTR(0x22, 1) | 170 | /* Official intel docs are somewhat sloppy concerning MI_LOAD_REGISTER_IMM: |
171 | * - Always issue a MI_NOOP _before_ the MI_LOAD_REGISTER_IMM - otherwise hw | ||
172 | * simply ignores the register load under certain conditions. | ||
173 | * - One can actually load arbitrary many arbitrary registers: Simply issue x | ||
174 | * address/value pairs. Don't overdue it, though, x <= 2^4 must hold! | ||
175 | */ | ||
176 | #define MI_LOAD_REGISTER_IMM(x) MI_INSTR(0x22, 2*x-1) | ||
162 | #define MI_FLUSH_DW MI_INSTR(0x26, 2) /* for GEN6 */ | 177 | #define MI_FLUSH_DW MI_INSTR(0x26, 2) /* for GEN6 */ |
163 | #define MI_BATCH_BUFFER MI_INSTR(0x30, 1) | 178 | #define MI_BATCH_BUFFER MI_INSTR(0x30, 1) |
164 | #define MI_BATCH_NON_SECURE (1) | 179 | #define MI_BATCH_NON_SECURE (1) |
165 | #define MI_BATCH_NON_SECURE_I965 (1<<8) | 180 | #define MI_BATCH_NON_SECURE_I965 (1<<8) |
166 | #define MI_BATCH_BUFFER_START MI_INSTR(0x31, 0) | 181 | #define MI_BATCH_BUFFER_START MI_INSTR(0x31, 0) |
182 | #define MI_SEMAPHORE_MBOX MI_INSTR(0x16, 1) /* gen6+ */ | ||
183 | #define MI_SEMAPHORE_GLOBAL_GTT (1<<22) | ||
184 | #define MI_SEMAPHORE_UPDATE (1<<21) | ||
185 | #define MI_SEMAPHORE_COMPARE (1<<20) | ||
186 | #define MI_SEMAPHORE_REGISTER (1<<18) | ||
167 | /* | 187 | /* |
168 | * 3D instructions used by the kernel | 188 | * 3D instructions used by the kernel |
169 | */ | 189 | */ |
@@ -256,10 +276,6 @@ | |||
256 | * Instruction and interrupt control regs | 276 | * Instruction and interrupt control regs |
257 | */ | 277 | */ |
258 | #define PGTBL_ER 0x02024 | 278 | #define PGTBL_ER 0x02024 |
259 | #define PRB0_TAIL 0x02030 | ||
260 | #define PRB0_HEAD 0x02034 | ||
261 | #define PRB0_START 0x02038 | ||
262 | #define PRB0_CTL 0x0203c | ||
263 | #define RENDER_RING_BASE 0x02000 | 279 | #define RENDER_RING_BASE 0x02000 |
264 | #define BSD_RING_BASE 0x04000 | 280 | #define BSD_RING_BASE 0x04000 |
265 | #define GEN6_BSD_RING_BASE 0x12000 | 281 | #define GEN6_BSD_RING_BASE 0x12000 |
@@ -268,9 +284,14 @@ | |||
268 | #define RING_HEAD(base) ((base)+0x34) | 284 | #define RING_HEAD(base) ((base)+0x34) |
269 | #define RING_START(base) ((base)+0x38) | 285 | #define RING_START(base) ((base)+0x38) |
270 | #define RING_CTL(base) ((base)+0x3c) | 286 | #define RING_CTL(base) ((base)+0x3c) |
287 | #define RING_SYNC_0(base) ((base)+0x40) | ||
288 | #define RING_SYNC_1(base) ((base)+0x44) | ||
289 | #define RING_MAX_IDLE(base) ((base)+0x54) | ||
271 | #define RING_HWS_PGA(base) ((base)+0x80) | 290 | #define RING_HWS_PGA(base) ((base)+0x80) |
272 | #define RING_HWS_PGA_GEN6(base) ((base)+0x2080) | 291 | #define RING_HWS_PGA_GEN6(base) ((base)+0x2080) |
273 | #define RING_ACTHD(base) ((base)+0x74) | 292 | #define RING_ACTHD(base) ((base)+0x74) |
293 | #define RING_NOPID(base) ((base)+0x94) | ||
294 | #define RING_IMR(base) ((base)+0xa8) | ||
274 | #define TAIL_ADDR 0x001FFFF8 | 295 | #define TAIL_ADDR 0x001FFFF8 |
275 | #define HEAD_WRAP_COUNT 0xFFE00000 | 296 | #define HEAD_WRAP_COUNT 0xFFE00000 |
276 | #define HEAD_WRAP_ONE 0x00200000 | 297 | #define HEAD_WRAP_ONE 0x00200000 |
@@ -285,10 +306,17 @@ | |||
285 | #define RING_INVALID 0x00000000 | 306 | #define RING_INVALID 0x00000000 |
286 | #define RING_WAIT_I8XX (1<<0) /* gen2, PRBx_HEAD */ | 307 | #define RING_WAIT_I8XX (1<<0) /* gen2, PRBx_HEAD */ |
287 | #define RING_WAIT (1<<11) /* gen3+, PRBx_CTL */ | 308 | #define RING_WAIT (1<<11) /* gen3+, PRBx_CTL */ |
309 | #define RING_WAIT_SEMAPHORE (1<<10) /* gen6+ */ | ||
310 | #if 0 | ||
311 | #define PRB0_TAIL 0x02030 | ||
312 | #define PRB0_HEAD 0x02034 | ||
313 | #define PRB0_START 0x02038 | ||
314 | #define PRB0_CTL 0x0203c | ||
288 | #define PRB1_TAIL 0x02040 /* 915+ only */ | 315 | #define PRB1_TAIL 0x02040 /* 915+ only */ |
289 | #define PRB1_HEAD 0x02044 /* 915+ only */ | 316 | #define PRB1_HEAD 0x02044 /* 915+ only */ |
290 | #define PRB1_START 0x02048 /* 915+ only */ | 317 | #define PRB1_START 0x02048 /* 915+ only */ |
291 | #define PRB1_CTL 0x0204c /* 915+ only */ | 318 | #define PRB1_CTL 0x0204c /* 915+ only */ |
319 | #endif | ||
292 | #define IPEIR_I965 0x02064 | 320 | #define IPEIR_I965 0x02064 |
293 | #define IPEHR_I965 0x02068 | 321 | #define IPEHR_I965 0x02068 |
294 | #define INSTDONE_I965 0x0206c | 322 | #define INSTDONE_I965 0x0206c |
@@ -305,11 +333,42 @@ | |||
305 | #define INSTDONE 0x02090 | 333 | #define INSTDONE 0x02090 |
306 | #define NOPID 0x02094 | 334 | #define NOPID 0x02094 |
307 | #define HWSTAM 0x02098 | 335 | #define HWSTAM 0x02098 |
336 | #define VCS_INSTDONE 0x1206C | ||
337 | #define VCS_IPEIR 0x12064 | ||
338 | #define VCS_IPEHR 0x12068 | ||
339 | #define VCS_ACTHD 0x12074 | ||
340 | #define BCS_INSTDONE 0x2206C | ||
341 | #define BCS_IPEIR 0x22064 | ||
342 | #define BCS_IPEHR 0x22068 | ||
343 | #define BCS_ACTHD 0x22074 | ||
344 | |||
345 | #define ERROR_GEN6 0x040a0 | ||
346 | |||
347 | /* GM45+ chicken bits -- debug workaround bits that may be required | ||
348 | * for various sorts of correct behavior. The top 16 bits of each are | ||
349 | * the enables for writing to the corresponding low bit. | ||
350 | */ | ||
351 | #define _3D_CHICKEN 0x02084 | ||
352 | #define _3D_CHICKEN2 0x0208c | ||
353 | /* Disables pipelining of read flushes past the SF-WIZ interface. | ||
354 | * Required on all Ironlake steppings according to the B-Spec, but the | ||
355 | * particular danger of not doing so is not specified. | ||
356 | */ | ||
357 | # define _3D_CHICKEN2_WM_READ_PIPELINED (1 << 14) | ||
358 | #define _3D_CHICKEN3 0x02090 | ||
308 | 359 | ||
309 | #define MI_MODE 0x0209c | 360 | #define MI_MODE 0x0209c |
310 | # define VS_TIMER_DISPATCH (1 << 6) | 361 | # define VS_TIMER_DISPATCH (1 << 6) |
311 | # define MI_FLUSH_ENABLE (1 << 11) | 362 | # define MI_FLUSH_ENABLE (1 << 11) |
312 | 363 | ||
364 | #define GFX_MODE 0x02520 | ||
365 | #define GFX_RUN_LIST_ENABLE (1<<15) | ||
366 | #define GFX_TLB_INVALIDATE_ALWAYS (1<<13) | ||
367 | #define GFX_SURFACE_FAULT_ENABLE (1<<12) | ||
368 | #define GFX_REPLAY_MODE (1<<11) | ||
369 | #define GFX_PSMI_GRANULARITY (1<<10) | ||
370 | #define GFX_PPGTT_ENABLE (1<<9) | ||
371 | |||
313 | #define SCPD0 0x0209c /* 915+ only */ | 372 | #define SCPD0 0x0209c /* 915+ only */ |
314 | #define IER 0x020a0 | 373 | #define IER 0x020a0 |
315 | #define IIR 0x020a4 | 374 | #define IIR 0x020a4 |
@@ -454,6 +513,10 @@ | |||
454 | #define GEN6_BLITTER_SYNC_STATUS (1 << 24) | 513 | #define GEN6_BLITTER_SYNC_STATUS (1 << 24) |
455 | #define GEN6_BLITTER_USER_INTERRUPT (1 << 22) | 514 | #define GEN6_BLITTER_USER_INTERRUPT (1 << 22) |
456 | 515 | ||
516 | #define GEN6_BLITTER_ECOSKPD 0x221d0 | ||
517 | #define GEN6_BLITTER_LOCK_SHIFT 16 | ||
518 | #define GEN6_BLITTER_FBC_NOTIFY (1<<3) | ||
519 | |||
457 | #define GEN6_BSD_SLEEP_PSMI_CONTROL 0x12050 | 520 | #define GEN6_BSD_SLEEP_PSMI_CONTROL 0x12050 |
458 | #define GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_MODIFY_MASK (1 << 16) | 521 | #define GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_MODIFY_MASK (1 << 16) |
459 | #define GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_DISABLE (1 << 0) | 522 | #define GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_DISABLE (1 << 0) |
@@ -461,7 +524,7 @@ | |||
461 | #define GEN6_BSD_SLEEP_PSMI_CONTROL_IDLE_INDICATOR (1 << 3) | 524 | #define GEN6_BSD_SLEEP_PSMI_CONTROL_IDLE_INDICATOR (1 << 3) |
462 | 525 | ||
463 | #define GEN6_BSD_IMR 0x120a8 | 526 | #define GEN6_BSD_IMR 0x120a8 |
464 | #define GEN6_BSD_IMR_USER_INTERRUPT (1 << 12) | 527 | #define GEN6_BSD_USER_INTERRUPT (1 << 12) |
465 | 528 | ||
466 | #define GEN6_BSD_RNCID 0x12198 | 529 | #define GEN6_BSD_RNCID 0x12198 |
467 | 530 | ||
@@ -541,6 +604,18 @@ | |||
541 | 604 | ||
542 | #define ILK_DISPLAY_CHICKEN1 0x42000 | 605 | #define ILK_DISPLAY_CHICKEN1 0x42000 |
543 | #define ILK_FBCQ_DIS (1<<22) | 606 | #define ILK_FBCQ_DIS (1<<22) |
607 | #define ILK_PABSTRETCH_DIS (1<<21) | ||
608 | |||
609 | |||
610 | /* | ||
611 | * Framebuffer compression for Sandybridge | ||
612 | * | ||
613 | * The following two registers are of type GTTMMADR | ||
614 | */ | ||
615 | #define SNB_DPFC_CTL_SA 0x100100 | ||
616 | #define SNB_CPU_FENCE_ENABLE (1<<29) | ||
617 | #define DPFC_CPU_FENCE_OFFSET 0x100104 | ||
618 | |||
544 | 619 | ||
545 | /* | 620 | /* |
546 | * GPIO regs | 621 | * GPIO regs |
@@ -900,6 +975,8 @@ | |||
900 | */ | 975 | */ |
901 | #define MCHBAR_MIRROR_BASE 0x10000 | 976 | #define MCHBAR_MIRROR_BASE 0x10000 |
902 | 977 | ||
978 | #define MCHBAR_MIRROR_BASE_SNB 0x140000 | ||
979 | |||
903 | /** 915-945 and GM965 MCH register controlling DRAM channel access */ | 980 | /** 915-945 and GM965 MCH register controlling DRAM channel access */ |
904 | #define DCC 0x10200 | 981 | #define DCC 0x10200 |
905 | #define DCC_ADDRESSING_MODE_SINGLE_CHANNEL (0 << 0) | 982 | #define DCC_ADDRESSING_MODE_SINGLE_CHANNEL (0 << 0) |
@@ -1061,9 +1138,50 @@ | |||
1061 | #define RCBMINAVG 0x111a0 | 1138 | #define RCBMINAVG 0x111a0 |
1062 | #define RCUPEI 0x111b0 | 1139 | #define RCUPEI 0x111b0 |
1063 | #define RCDNEI 0x111b4 | 1140 | #define RCDNEI 0x111b4 |
1064 | #define MCHBAR_RENDER_STANDBY 0x111b8 | 1141 | #define RSTDBYCTL 0x111b8 |
1065 | #define RCX_SW_EXIT (1<<23) | 1142 | #define RS1EN (1<<31) |
1066 | #define RSX_STATUS_MASK 0x00700000 | 1143 | #define RS2EN (1<<30) |
1144 | #define RS3EN (1<<29) | ||
1145 | #define D3RS3EN (1<<28) /* Display D3 imlies RS3 */ | ||
1146 | #define SWPROMORSX (1<<27) /* RSx promotion timers ignored */ | ||
1147 | #define RCWAKERW (1<<26) /* Resetwarn from PCH causes wakeup */ | ||
1148 | #define DPRSLPVREN (1<<25) /* Fast voltage ramp enable */ | ||
1149 | #define GFXTGHYST (1<<24) /* Hysteresis to allow trunk gating */ | ||
1150 | #define RCX_SW_EXIT (1<<23) /* Leave RSx and prevent re-entry */ | ||
1151 | #define RSX_STATUS_MASK (7<<20) | ||
1152 | #define RSX_STATUS_ON (0<<20) | ||
1153 | #define RSX_STATUS_RC1 (1<<20) | ||
1154 | #define RSX_STATUS_RC1E (2<<20) | ||
1155 | #define RSX_STATUS_RS1 (3<<20) | ||
1156 | #define RSX_STATUS_RS2 (4<<20) /* aka rc6 */ | ||
1157 | #define RSX_STATUS_RSVD (5<<20) /* deep rc6 unsupported on ilk */ | ||
1158 | #define RSX_STATUS_RS3 (6<<20) /* rs3 unsupported on ilk */ | ||
1159 | #define RSX_STATUS_RSVD2 (7<<20) | ||
1160 | #define UWRCRSXE (1<<19) /* wake counter limit prevents rsx */ | ||
1161 | #define RSCRP (1<<18) /* rs requests control on rs1/2 reqs */ | ||
1162 | #define JRSC (1<<17) /* rsx coupled to cpu c-state */ | ||
1163 | #define RS2INC0 (1<<16) /* allow rs2 in cpu c0 */ | ||
1164 | #define RS1CONTSAV_MASK (3<<14) | ||
1165 | #define RS1CONTSAV_NO_RS1 (0<<14) /* rs1 doesn't save/restore context */ | ||
1166 | #define RS1CONTSAV_RSVD (1<<14) | ||
1167 | #define RS1CONTSAV_SAVE_RS1 (2<<14) /* rs1 saves context */ | ||
1168 | #define RS1CONTSAV_FULL_RS1 (3<<14) /* rs1 saves and restores context */ | ||
1169 | #define NORMSLEXLAT_MASK (3<<12) | ||
1170 | #define SLOW_RS123 (0<<12) | ||
1171 | #define SLOW_RS23 (1<<12) | ||
1172 | #define SLOW_RS3 (2<<12) | ||
1173 | #define NORMAL_RS123 (3<<12) | ||
1174 | #define RCMODE_TIMEOUT (1<<11) /* 0 is eval interval method */ | ||
1175 | #define IMPROMOEN (1<<10) /* promo is immediate or delayed until next idle interval (only for timeout method above) */ | ||
1176 | #define RCENTSYNC (1<<9) /* rs coupled to cpu c-state (3/6/7) */ | ||
1177 | #define STATELOCK (1<<7) /* locked to rs_cstate if 0 */ | ||
1178 | #define RS_CSTATE_MASK (3<<4) | ||
1179 | #define RS_CSTATE_C367_RS1 (0<<4) | ||
1180 | #define RS_CSTATE_C36_RS1_C7_RS2 (1<<4) | ||
1181 | #define RS_CSTATE_RSVD (2<<4) | ||
1182 | #define RS_CSTATE_C367_RS2 (3<<4) | ||
1183 | #define REDSAVES (1<<3) /* no context save if was idle during rs0 */ | ||
1184 | #define REDRESTORES (1<<2) /* no restore if was idle during rs0 */ | ||
1067 | #define VIDCTL 0x111c0 | 1185 | #define VIDCTL 0x111c0 |
1068 | #define VIDSTS 0x111c8 | 1186 | #define VIDSTS 0x111c8 |
1069 | #define VIDSTART 0x111cc /* 8 bits */ | 1187 | #define VIDSTART 0x111cc /* 8 bits */ |
@@ -1119,6 +1237,10 @@ | |||
1119 | #define DDRMPLL1 0X12c20 | 1237 | #define DDRMPLL1 0X12c20 |
1120 | #define PEG_BAND_GAP_DATA 0x14d68 | 1238 | #define PEG_BAND_GAP_DATA 0x14d68 |
1121 | 1239 | ||
1240 | #define GEN6_GT_PERF_STATUS 0x145948 | ||
1241 | #define GEN6_RP_STATE_LIMITS 0x145994 | ||
1242 | #define GEN6_RP_STATE_CAP 0x145998 | ||
1243 | |||
1122 | /* | 1244 | /* |
1123 | * Logical Context regs | 1245 | * Logical Context regs |
1124 | */ | 1246 | */ |
@@ -1168,7 +1290,6 @@ | |||
1168 | #define VTOTAL(pipe) _PIPE(pipe, VTOTAL_A, VTOTAL_B) | 1290 | #define VTOTAL(pipe) _PIPE(pipe, VTOTAL_A, VTOTAL_B) |
1169 | #define VBLANK(pipe) _PIPE(pipe, VBLANK_A, VBLANK_B) | 1291 | #define VBLANK(pipe) _PIPE(pipe, VBLANK_A, VBLANK_B) |
1170 | #define VSYNC(pipe) _PIPE(pipe, VSYNC_A, VSYNC_B) | 1292 | #define VSYNC(pipe) _PIPE(pipe, VSYNC_A, VSYNC_B) |
1171 | #define PIPESRC(pipe) _PIPE(pipe, PIPEASRC, PIPEBSRC) | ||
1172 | #define BCLRPAT(pipe) _PIPE(pipe, BCLRPAT_A, BCLRPAT_B) | 1293 | #define BCLRPAT(pipe) _PIPE(pipe, BCLRPAT_A, BCLRPAT_B) |
1173 | 1294 | ||
1174 | /* VGA port control */ | 1295 | /* VGA port control */ |
@@ -2182,8 +2303,10 @@ | |||
2182 | #define PIPE_6BPC (2 << 5) | 2303 | #define PIPE_6BPC (2 << 5) |
2183 | #define PIPE_12BPC (3 << 5) | 2304 | #define PIPE_12BPC (3 << 5) |
2184 | 2305 | ||
2306 | #define PIPESRC(pipe) _PIPE(pipe, PIPEASRC, PIPEBSRC) | ||
2185 | #define PIPECONF(pipe) _PIPE(pipe, PIPEACONF, PIPEBCONF) | 2307 | #define PIPECONF(pipe) _PIPE(pipe, PIPEACONF, PIPEBCONF) |
2186 | #define PIPEDSL(pipe) _PIPE(pipe, PIPEADSL, PIPEBDSL) | 2308 | #define PIPEDSL(pipe) _PIPE(pipe, PIPEADSL, PIPEBDSL) |
2309 | #define PIPEFRAMEPIXEL(pipe) _PIPE(pipe, PIPEAFRAMEPIXEL, PIPEBFRAMEPIXEL) | ||
2187 | 2310 | ||
2188 | #define DSPARB 0x70030 | 2311 | #define DSPARB 0x70030 |
2189 | #define DSPARB_CSTART_MASK (0x7f << 7) | 2312 | #define DSPARB_CSTART_MASK (0x7f << 7) |
@@ -2271,8 +2394,13 @@ | |||
2271 | 2394 | ||
2272 | /* Memory latency timer register */ | 2395 | /* Memory latency timer register */ |
2273 | #define MLTR_ILK 0x11222 | 2396 | #define MLTR_ILK 0x11222 |
2397 | #define MLTR_WM1_SHIFT 0 | ||
2398 | #define MLTR_WM2_SHIFT 8 | ||
2274 | /* the unit of memory self-refresh latency time is 0.5us */ | 2399 | /* the unit of memory self-refresh latency time is 0.5us */ |
2275 | #define ILK_SRLT_MASK 0x3f | 2400 | #define ILK_SRLT_MASK 0x3f |
2401 | #define ILK_LATENCY(shift) (I915_READ(MLTR_ILK) >> (shift) & ILK_SRLT_MASK) | ||
2402 | #define ILK_READ_WM1_LATENCY() ILK_LATENCY(MLTR_WM1_SHIFT) | ||
2403 | #define ILK_READ_WM2_LATENCY() ILK_LATENCY(MLTR_WM2_SHIFT) | ||
2276 | 2404 | ||
2277 | /* define the fifo size on Ironlake */ | 2405 | /* define the fifo size on Ironlake */ |
2278 | #define ILK_DISPLAY_FIFO 128 | 2406 | #define ILK_DISPLAY_FIFO 128 |
@@ -2291,6 +2419,40 @@ | |||
2291 | 2419 | ||
2292 | #define ILK_FIFO_LINE_SIZE 64 | 2420 | #define ILK_FIFO_LINE_SIZE 64 |
2293 | 2421 | ||
2422 | /* define the WM info on Sandybridge */ | ||
2423 | #define SNB_DISPLAY_FIFO 128 | ||
2424 | #define SNB_DISPLAY_MAXWM 0x7f /* bit 16:22 */ | ||
2425 | #define SNB_DISPLAY_DFTWM 8 | ||
2426 | #define SNB_CURSOR_FIFO 32 | ||
2427 | #define SNB_CURSOR_MAXWM 0x1f /* bit 4:0 */ | ||
2428 | #define SNB_CURSOR_DFTWM 8 | ||
2429 | |||
2430 | #define SNB_DISPLAY_SR_FIFO 512 | ||
2431 | #define SNB_DISPLAY_MAX_SRWM 0x1ff /* bit 16:8 */ | ||
2432 | #define SNB_DISPLAY_DFT_SRWM 0x3f | ||
2433 | #define SNB_CURSOR_SR_FIFO 64 | ||
2434 | #define SNB_CURSOR_MAX_SRWM 0x3f /* bit 5:0 */ | ||
2435 | #define SNB_CURSOR_DFT_SRWM 8 | ||
2436 | |||
2437 | #define SNB_FBC_MAX_SRWM 0xf /* bit 23:20 */ | ||
2438 | |||
2439 | #define SNB_FIFO_LINE_SIZE 64 | ||
2440 | |||
2441 | |||
2442 | /* the address where we get all kinds of latency value */ | ||
2443 | #define SSKPD 0x5d10 | ||
2444 | #define SSKPD_WM_MASK 0x3f | ||
2445 | #define SSKPD_WM0_SHIFT 0 | ||
2446 | #define SSKPD_WM1_SHIFT 8 | ||
2447 | #define SSKPD_WM2_SHIFT 16 | ||
2448 | #define SSKPD_WM3_SHIFT 24 | ||
2449 | |||
2450 | #define SNB_LATENCY(shift) (I915_READ(MCHBAR_MIRROR_BASE_SNB + SSKPD) >> (shift) & SSKPD_WM_MASK) | ||
2451 | #define SNB_READ_WM0_LATENCY() SNB_LATENCY(SSKPD_WM0_SHIFT) | ||
2452 | #define SNB_READ_WM1_LATENCY() SNB_LATENCY(SSKPD_WM1_SHIFT) | ||
2453 | #define SNB_READ_WM2_LATENCY() SNB_LATENCY(SSKPD_WM2_SHIFT) | ||
2454 | #define SNB_READ_WM3_LATENCY() SNB_LATENCY(SSKPD_WM3_SHIFT) | ||
2455 | |||
2294 | /* | 2456 | /* |
2295 | * The two pipe frame counter registers are not synchronized, so | 2457 | * The two pipe frame counter registers are not synchronized, so |
2296 | * reading a stable value is somewhat tricky. The following code | 2458 | * reading a stable value is somewhat tricky. The following code |
@@ -2351,6 +2513,10 @@ | |||
2351 | #define CURBBASE 0x700c4 | 2513 | #define CURBBASE 0x700c4 |
2352 | #define CURBPOS 0x700c8 | 2514 | #define CURBPOS 0x700c8 |
2353 | 2515 | ||
2516 | #define CURCNTR(pipe) _PIPE(pipe, CURACNTR, CURBCNTR) | ||
2517 | #define CURBASE(pipe) _PIPE(pipe, CURABASE, CURBBASE) | ||
2518 | #define CURPOS(pipe) _PIPE(pipe, CURAPOS, CURBPOS) | ||
2519 | |||
2354 | /* Display A control */ | 2520 | /* Display A control */ |
2355 | #define DSPACNTR 0x70180 | 2521 | #define DSPACNTR 0x70180 |
2356 | #define DISPLAY_PLANE_ENABLE (1<<31) | 2522 | #define DISPLAY_PLANE_ENABLE (1<<31) |
@@ -2464,6 +2630,8 @@ | |||
2464 | #define DISPLAY_PORT_PLL_BIOS_2 0x46014 | 2630 | #define DISPLAY_PORT_PLL_BIOS_2 0x46014 |
2465 | 2631 | ||
2466 | #define PCH_DSPCLK_GATE_D 0x42020 | 2632 | #define PCH_DSPCLK_GATE_D 0x42020 |
2633 | # define DPFCUNIT_CLOCK_GATE_DISABLE (1 << 9) | ||
2634 | # define DPFCRUNIT_CLOCK_GATE_DISABLE (1 << 8) | ||
2467 | # define DPFDUNIT_CLOCK_GATE_DISABLE (1 << 7) | 2635 | # define DPFDUNIT_CLOCK_GATE_DISABLE (1 << 7) |
2468 | # define DPARBUNIT_CLOCK_GATE_DISABLE (1 << 5) | 2636 | # define DPARBUNIT_CLOCK_GATE_DISABLE (1 << 5) |
2469 | 2637 | ||
@@ -2471,6 +2639,9 @@ | |||
2471 | # define MARIUNIT_CLOCK_GATE_DISABLE (1 << 18) | 2639 | # define MARIUNIT_CLOCK_GATE_DISABLE (1 << 18) |
2472 | # define SVSMUNIT_CLOCK_GATE_DISABLE (1 << 1) | 2640 | # define SVSMUNIT_CLOCK_GATE_DISABLE (1 << 1) |
2473 | 2641 | ||
2642 | #define PCH_3DCGDIS1 0x46024 | ||
2643 | # define VFMUNIT_CLOCK_GATE_DISABLE (1 << 11) | ||
2644 | |||
2474 | #define FDI_PLL_FREQ_CTL 0x46030 | 2645 | #define FDI_PLL_FREQ_CTL 0x46030 |
2475 | #define FDI_PLL_FREQ_CHANGE_REQUEST (1<<24) | 2646 | #define FDI_PLL_FREQ_CHANGE_REQUEST (1<<24) |
2476 | #define FDI_PLL_FREQ_LOCK_LIMIT_MASK 0xfff00 | 2647 | #define FDI_PLL_FREQ_LOCK_LIMIT_MASK 0xfff00 |
@@ -2586,10 +2757,21 @@ | |||
2586 | #define GTIER 0x4401c | 2757 | #define GTIER 0x4401c |
2587 | 2758 | ||
2588 | #define ILK_DISPLAY_CHICKEN2 0x42004 | 2759 | #define ILK_DISPLAY_CHICKEN2 0x42004 |
2760 | /* Required on all Ironlake and Sandybridge according to the B-Spec. */ | ||
2761 | #define ILK_ELPIN_409_SELECT (1 << 25) | ||
2589 | #define ILK_DPARB_GATE (1<<22) | 2762 | #define ILK_DPARB_GATE (1<<22) |
2590 | #define ILK_VSDPFD_FULL (1<<21) | 2763 | #define ILK_VSDPFD_FULL (1<<21) |
2764 | #define ILK_DISPLAY_CHICKEN_FUSES 0x42014 | ||
2765 | #define ILK_INTERNAL_GRAPHICS_DISABLE (1<<31) | ||
2766 | #define ILK_INTERNAL_DISPLAY_DISABLE (1<<30) | ||
2767 | #define ILK_DISPLAY_DEBUG_DISABLE (1<<29) | ||
2768 | #define ILK_HDCP_DISABLE (1<<25) | ||
2769 | #define ILK_eDP_A_DISABLE (1<<24) | ||
2770 | #define ILK_DESKTOP (1<<23) | ||
2591 | #define ILK_DSPCLK_GATE 0x42020 | 2771 | #define ILK_DSPCLK_GATE 0x42020 |
2592 | #define ILK_DPARB_CLK_GATE (1<<5) | 2772 | #define ILK_DPARB_CLK_GATE (1<<5) |
2773 | #define ILK_DPFD_CLK_GATE (1<<7) | ||
2774 | |||
2593 | /* According to spec this bit 7/8/9 of 0x42020 should be set to enable FBC */ | 2775 | /* According to spec this bit 7/8/9 of 0x42020 should be set to enable FBC */ |
2594 | #define ILK_CLK_FBC (1<<7) | 2776 | #define ILK_CLK_FBC (1<<7) |
2595 | #define ILK_DPFC_DIS1 (1<<8) | 2777 | #define ILK_DPFC_DIS1 (1<<8) |
@@ -2602,12 +2784,41 @@ | |||
2602 | /* PCH */ | 2784 | /* PCH */ |
2603 | 2785 | ||
2604 | /* south display engine interrupt */ | 2786 | /* south display engine interrupt */ |
2787 | #define SDE_AUDIO_POWER_D (1 << 27) | ||
2788 | #define SDE_AUDIO_POWER_C (1 << 26) | ||
2789 | #define SDE_AUDIO_POWER_B (1 << 25) | ||
2790 | #define SDE_AUDIO_POWER_SHIFT (25) | ||
2791 | #define SDE_AUDIO_POWER_MASK (7 << SDE_AUDIO_POWER_SHIFT) | ||
2792 | #define SDE_GMBUS (1 << 24) | ||
2793 | #define SDE_AUDIO_HDCP_TRANSB (1 << 23) | ||
2794 | #define SDE_AUDIO_HDCP_TRANSA (1 << 22) | ||
2795 | #define SDE_AUDIO_HDCP_MASK (3 << 22) | ||
2796 | #define SDE_AUDIO_TRANSB (1 << 21) | ||
2797 | #define SDE_AUDIO_TRANSA (1 << 20) | ||
2798 | #define SDE_AUDIO_TRANS_MASK (3 << 20) | ||
2799 | #define SDE_POISON (1 << 19) | ||
2800 | /* 18 reserved */ | ||
2801 | #define SDE_FDI_RXB (1 << 17) | ||
2802 | #define SDE_FDI_RXA (1 << 16) | ||
2803 | #define SDE_FDI_MASK (3 << 16) | ||
2804 | #define SDE_AUXD (1 << 15) | ||
2805 | #define SDE_AUXC (1 << 14) | ||
2806 | #define SDE_AUXB (1 << 13) | ||
2807 | #define SDE_AUX_MASK (7 << 13) | ||
2808 | /* 12 reserved */ | ||
2605 | #define SDE_CRT_HOTPLUG (1 << 11) | 2809 | #define SDE_CRT_HOTPLUG (1 << 11) |
2606 | #define SDE_PORTD_HOTPLUG (1 << 10) | 2810 | #define SDE_PORTD_HOTPLUG (1 << 10) |
2607 | #define SDE_PORTC_HOTPLUG (1 << 9) | 2811 | #define SDE_PORTC_HOTPLUG (1 << 9) |
2608 | #define SDE_PORTB_HOTPLUG (1 << 8) | 2812 | #define SDE_PORTB_HOTPLUG (1 << 8) |
2609 | #define SDE_SDVOB_HOTPLUG (1 << 6) | 2813 | #define SDE_SDVOB_HOTPLUG (1 << 6) |
2610 | #define SDE_HOTPLUG_MASK (0xf << 8) | 2814 | #define SDE_HOTPLUG_MASK (0xf << 8) |
2815 | #define SDE_TRANSB_CRC_DONE (1 << 5) | ||
2816 | #define SDE_TRANSB_CRC_ERR (1 << 4) | ||
2817 | #define SDE_TRANSB_FIFO_UNDER (1 << 3) | ||
2818 | #define SDE_TRANSA_CRC_DONE (1 << 2) | ||
2819 | #define SDE_TRANSA_CRC_ERR (1 << 1) | ||
2820 | #define SDE_TRANSA_FIFO_UNDER (1 << 0) | ||
2821 | #define SDE_TRANS_MASK (0x3f) | ||
2611 | /* CPT */ | 2822 | /* CPT */ |
2612 | #define SDE_CRT_HOTPLUG_CPT (1 << 19) | 2823 | #define SDE_CRT_HOTPLUG_CPT (1 << 19) |
2613 | #define SDE_PORTD_HOTPLUG_CPT (1 << 23) | 2824 | #define SDE_PORTD_HOTPLUG_CPT (1 << 23) |
@@ -2669,6 +2880,7 @@ | |||
2669 | #define PCH_DPLL(pipe) _PIPE(pipe, PCH_DPLL_A, PCH_DPLL_B) | 2880 | #define PCH_DPLL(pipe) _PIPE(pipe, PCH_DPLL_A, PCH_DPLL_B) |
2670 | 2881 | ||
2671 | #define PCH_FPA0 0xc6040 | 2882 | #define PCH_FPA0 0xc6040 |
2883 | #define FP_CB_TUNE (0x3<<22) | ||
2672 | #define PCH_FPA1 0xc6044 | 2884 | #define PCH_FPA1 0xc6044 |
2673 | #define PCH_FPB0 0xc6048 | 2885 | #define PCH_FPB0 0xc6048 |
2674 | #define PCH_FPB1 0xc604c | 2886 | #define PCH_FPB1 0xc604c |
@@ -3033,6 +3245,7 @@ | |||
3033 | #define TRANS_DP_10BPC (1<<9) | 3245 | #define TRANS_DP_10BPC (1<<9) |
3034 | #define TRANS_DP_6BPC (2<<9) | 3246 | #define TRANS_DP_6BPC (2<<9) |
3035 | #define TRANS_DP_12BPC (3<<9) | 3247 | #define TRANS_DP_12BPC (3<<9) |
3248 | #define TRANS_DP_BPC_MASK (3<<9) | ||
3036 | #define TRANS_DP_VSYNC_ACTIVE_HIGH (1<<4) | 3249 | #define TRANS_DP_VSYNC_ACTIVE_HIGH (1<<4) |
3037 | #define TRANS_DP_VSYNC_ACTIVE_LOW 0 | 3250 | #define TRANS_DP_VSYNC_ACTIVE_LOW 0 |
3038 | #define TRANS_DP_HSYNC_ACTIVE_HIGH (1<<3) | 3251 | #define TRANS_DP_HSYNC_ACTIVE_HIGH (1<<3) |
@@ -3046,10 +3259,74 @@ | |||
3046 | #define EDP_LINK_TRAIN_600MV_3_5DB_SNB_A (0x01<<22) | 3259 | #define EDP_LINK_TRAIN_600MV_3_5DB_SNB_A (0x01<<22) |
3047 | #define EDP_LINK_TRAIN_800MV_0DB_SNB_A (0x0<<22) | 3260 | #define EDP_LINK_TRAIN_800MV_0DB_SNB_A (0x0<<22) |
3048 | /* SNB B-stepping */ | 3261 | /* SNB B-stepping */ |
3049 | #define EDP_LINK_TRAIN_400MV_0DB_SNB_B (0x0<<22) | 3262 | #define EDP_LINK_TRAIN_400_600MV_0DB_SNB_B (0x0<<22) |
3050 | #define EDP_LINK_TRAIN_400MV_6DB_SNB_B (0x3a<<22) | 3263 | #define EDP_LINK_TRAIN_400MV_3_5DB_SNB_B (0x1<<22) |
3051 | #define EDP_LINK_TRAIN_600MV_3_5DB_SNB_B (0x39<<22) | 3264 | #define EDP_LINK_TRAIN_400_600MV_6DB_SNB_B (0x3a<<22) |
3052 | #define EDP_LINK_TRAIN_800MV_0DB_SNB_B (0x38<<22) | 3265 | #define EDP_LINK_TRAIN_600_800MV_3_5DB_SNB_B (0x39<<22) |
3266 | #define EDP_LINK_TRAIN_800_1200MV_0DB_SNB_B (0x38<<22) | ||
3053 | #define EDP_LINK_TRAIN_VOL_EMP_MASK_SNB (0x3f<<22) | 3267 | #define EDP_LINK_TRAIN_VOL_EMP_MASK_SNB (0x3f<<22) |
3054 | 3268 | ||
3269 | #define FORCEWAKE 0xA18C | ||
3270 | #define FORCEWAKE_ACK 0x130090 | ||
3271 | |||
3272 | #define GEN6_RPNSWREQ 0xA008 | ||
3273 | #define GEN6_TURBO_DISABLE (1<<31) | ||
3274 | #define GEN6_FREQUENCY(x) ((x)<<25) | ||
3275 | #define GEN6_OFFSET(x) ((x)<<19) | ||
3276 | #define GEN6_AGGRESSIVE_TURBO (0<<15) | ||
3277 | #define GEN6_RC_VIDEO_FREQ 0xA00C | ||
3278 | #define GEN6_RC_CONTROL 0xA090 | ||
3279 | #define GEN6_RC_CTL_RC6pp_ENABLE (1<<16) | ||
3280 | #define GEN6_RC_CTL_RC6p_ENABLE (1<<17) | ||
3281 | #define GEN6_RC_CTL_RC6_ENABLE (1<<18) | ||
3282 | #define GEN6_RC_CTL_RC1e_ENABLE (1<<20) | ||
3283 | #define GEN6_RC_CTL_RC7_ENABLE (1<<22) | ||
3284 | #define GEN6_RC_CTL_EI_MODE(x) ((x)<<27) | ||
3285 | #define GEN6_RC_CTL_HW_ENABLE (1<<31) | ||
3286 | #define GEN6_RP_DOWN_TIMEOUT 0xA010 | ||
3287 | #define GEN6_RP_INTERRUPT_LIMITS 0xA014 | ||
3288 | #define GEN6_RPSTAT1 0xA01C | ||
3289 | #define GEN6_RP_CONTROL 0xA024 | ||
3290 | #define GEN6_RP_MEDIA_TURBO (1<<11) | ||
3291 | #define GEN6_RP_USE_NORMAL_FREQ (1<<9) | ||
3292 | #define GEN6_RP_MEDIA_IS_GFX (1<<8) | ||
3293 | #define GEN6_RP_ENABLE (1<<7) | ||
3294 | #define GEN6_RP_UP_BUSY_MAX (0x2<<3) | ||
3295 | #define GEN6_RP_DOWN_BUSY_MIN (0x2<<0) | ||
3296 | #define GEN6_RP_UP_THRESHOLD 0xA02C | ||
3297 | #define GEN6_RP_DOWN_THRESHOLD 0xA030 | ||
3298 | #define GEN6_RP_UP_EI 0xA068 | ||
3299 | #define GEN6_RP_DOWN_EI 0xA06C | ||
3300 | #define GEN6_RP_IDLE_HYSTERSIS 0xA070 | ||
3301 | #define GEN6_RC_STATE 0xA094 | ||
3302 | #define GEN6_RC1_WAKE_RATE_LIMIT 0xA098 | ||
3303 | #define GEN6_RC6_WAKE_RATE_LIMIT 0xA09C | ||
3304 | #define GEN6_RC6pp_WAKE_RATE_LIMIT 0xA0A0 | ||
3305 | #define GEN6_RC_EVALUATION_INTERVAL 0xA0A8 | ||
3306 | #define GEN6_RC_IDLE_HYSTERSIS 0xA0AC | ||
3307 | #define GEN6_RC_SLEEP 0xA0B0 | ||
3308 | #define GEN6_RC1e_THRESHOLD 0xA0B4 | ||
3309 | #define GEN6_RC6_THRESHOLD 0xA0B8 | ||
3310 | #define GEN6_RC6p_THRESHOLD 0xA0BC | ||
3311 | #define GEN6_RC6pp_THRESHOLD 0xA0C0 | ||
3312 | #define GEN6_PMINTRMSK 0xA168 | ||
3313 | |||
3314 | #define GEN6_PMISR 0x44020 | ||
3315 | #define GEN6_PMIMR 0x44024 | ||
3316 | #define GEN6_PMIIR 0x44028 | ||
3317 | #define GEN6_PMIER 0x4402C | ||
3318 | #define GEN6_PM_MBOX_EVENT (1<<25) | ||
3319 | #define GEN6_PM_THERMAL_EVENT (1<<24) | ||
3320 | #define GEN6_PM_RP_DOWN_TIMEOUT (1<<6) | ||
3321 | #define GEN6_PM_RP_UP_THRESHOLD (1<<5) | ||
3322 | #define GEN6_PM_RP_DOWN_THRESHOLD (1<<4) | ||
3323 | #define GEN6_PM_RP_UP_EI_EXPIRED (1<<2) | ||
3324 | #define GEN6_PM_RP_DOWN_EI_EXPIRED (1<<1) | ||
3325 | |||
3326 | #define GEN6_PCODE_MAILBOX 0x138124 | ||
3327 | #define GEN6_PCODE_READY (1<<31) | ||
3328 | #define GEN6_READ_OC_PARAMS 0xc | ||
3329 | #define GEN6_PCODE_WRITE_MIN_FREQ_TABLE 0x9 | ||
3330 | #define GEN6_PCODE_DATA 0x138128 | ||
3331 | |||
3055 | #endif /* _I915_REG_H_ */ | 3332 | #endif /* _I915_REG_H_ */ |
diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c index 454c064f8ef7..0521ecf26017 100644 --- a/drivers/gpu/drm/i915/i915_suspend.c +++ b/drivers/gpu/drm/i915/i915_suspend.c | |||
@@ -235,10 +235,21 @@ static void i915_restore_vga(struct drm_device *dev) | |||
235 | static void i915_save_modeset_reg(struct drm_device *dev) | 235 | static void i915_save_modeset_reg(struct drm_device *dev) |
236 | { | 236 | { |
237 | struct drm_i915_private *dev_priv = dev->dev_private; | 237 | struct drm_i915_private *dev_priv = dev->dev_private; |
238 | int i; | ||
238 | 239 | ||
239 | if (drm_core_check_feature(dev, DRIVER_MODESET)) | 240 | if (drm_core_check_feature(dev, DRIVER_MODESET)) |
240 | return; | 241 | return; |
241 | 242 | ||
243 | /* Cursor state */ | ||
244 | dev_priv->saveCURACNTR = I915_READ(CURACNTR); | ||
245 | dev_priv->saveCURAPOS = I915_READ(CURAPOS); | ||
246 | dev_priv->saveCURABASE = I915_READ(CURABASE); | ||
247 | dev_priv->saveCURBCNTR = I915_READ(CURBCNTR); | ||
248 | dev_priv->saveCURBPOS = I915_READ(CURBPOS); | ||
249 | dev_priv->saveCURBBASE = I915_READ(CURBBASE); | ||
250 | if (IS_GEN2(dev)) | ||
251 | dev_priv->saveCURSIZE = I915_READ(CURSIZE); | ||
252 | |||
242 | if (HAS_PCH_SPLIT(dev)) { | 253 | if (HAS_PCH_SPLIT(dev)) { |
243 | dev_priv->savePCH_DREF_CONTROL = I915_READ(PCH_DREF_CONTROL); | 254 | dev_priv->savePCH_DREF_CONTROL = I915_READ(PCH_DREF_CONTROL); |
244 | dev_priv->saveDISP_ARB_CTL = I915_READ(DISP_ARB_CTL); | 255 | dev_priv->saveDISP_ARB_CTL = I915_READ(DISP_ARB_CTL); |
@@ -357,6 +368,28 @@ static void i915_save_modeset_reg(struct drm_device *dev) | |||
357 | } | 368 | } |
358 | i915_save_palette(dev, PIPE_B); | 369 | i915_save_palette(dev, PIPE_B); |
359 | dev_priv->savePIPEBSTAT = I915_READ(PIPEBSTAT); | 370 | dev_priv->savePIPEBSTAT = I915_READ(PIPEBSTAT); |
371 | |||
372 | /* Fences */ | ||
373 | switch (INTEL_INFO(dev)->gen) { | ||
374 | case 6: | ||
375 | for (i = 0; i < 16; i++) | ||
376 | dev_priv->saveFENCE[i] = I915_READ64(FENCE_REG_SANDYBRIDGE_0 + (i * 8)); | ||
377 | break; | ||
378 | case 5: | ||
379 | case 4: | ||
380 | for (i = 0; i < 16; i++) | ||
381 | dev_priv->saveFENCE[i] = I915_READ64(FENCE_REG_965_0 + (i * 8)); | ||
382 | break; | ||
383 | case 3: | ||
384 | if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) | ||
385 | for (i = 0; i < 8; i++) | ||
386 | dev_priv->saveFENCE[i+8] = I915_READ(FENCE_REG_945_8 + (i * 4)); | ||
387 | case 2: | ||
388 | for (i = 0; i < 8; i++) | ||
389 | dev_priv->saveFENCE[i] = I915_READ(FENCE_REG_830_0 + (i * 4)); | ||
390 | break; | ||
391 | } | ||
392 | |||
360 | return; | 393 | return; |
361 | } | 394 | } |
362 | 395 | ||
@@ -365,10 +398,33 @@ static void i915_restore_modeset_reg(struct drm_device *dev) | |||
365 | struct drm_i915_private *dev_priv = dev->dev_private; | 398 | struct drm_i915_private *dev_priv = dev->dev_private; |
366 | int dpll_a_reg, fpa0_reg, fpa1_reg; | 399 | int dpll_a_reg, fpa0_reg, fpa1_reg; |
367 | int dpll_b_reg, fpb0_reg, fpb1_reg; | 400 | int dpll_b_reg, fpb0_reg, fpb1_reg; |
401 | int i; | ||
368 | 402 | ||
369 | if (drm_core_check_feature(dev, DRIVER_MODESET)) | 403 | if (drm_core_check_feature(dev, DRIVER_MODESET)) |
370 | return; | 404 | return; |
371 | 405 | ||
406 | /* Fences */ | ||
407 | switch (INTEL_INFO(dev)->gen) { | ||
408 | case 6: | ||
409 | for (i = 0; i < 16; i++) | ||
410 | I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + (i * 8), dev_priv->saveFENCE[i]); | ||
411 | break; | ||
412 | case 5: | ||
413 | case 4: | ||
414 | for (i = 0; i < 16; i++) | ||
415 | I915_WRITE64(FENCE_REG_965_0 + (i * 8), dev_priv->saveFENCE[i]); | ||
416 | break; | ||
417 | case 3: | ||
418 | case 2: | ||
419 | if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) | ||
420 | for (i = 0; i < 8; i++) | ||
421 | I915_WRITE(FENCE_REG_945_8 + (i * 4), dev_priv->saveFENCE[i+8]); | ||
422 | for (i = 0; i < 8; i++) | ||
423 | I915_WRITE(FENCE_REG_830_0 + (i * 4), dev_priv->saveFENCE[i]); | ||
424 | break; | ||
425 | } | ||
426 | |||
427 | |||
372 | if (HAS_PCH_SPLIT(dev)) { | 428 | if (HAS_PCH_SPLIT(dev)) { |
373 | dpll_a_reg = PCH_DPLL_A; | 429 | dpll_a_reg = PCH_DPLL_A; |
374 | dpll_b_reg = PCH_DPLL_B; | 430 | dpll_b_reg = PCH_DPLL_B; |
@@ -529,6 +585,16 @@ static void i915_restore_modeset_reg(struct drm_device *dev) | |||
529 | I915_WRITE(DSPBCNTR, dev_priv->saveDSPBCNTR); | 585 | I915_WRITE(DSPBCNTR, dev_priv->saveDSPBCNTR); |
530 | I915_WRITE(DSPBADDR, I915_READ(DSPBADDR)); | 586 | I915_WRITE(DSPBADDR, I915_READ(DSPBADDR)); |
531 | 587 | ||
588 | /* Cursor state */ | ||
589 | I915_WRITE(CURAPOS, dev_priv->saveCURAPOS); | ||
590 | I915_WRITE(CURACNTR, dev_priv->saveCURACNTR); | ||
591 | I915_WRITE(CURABASE, dev_priv->saveCURABASE); | ||
592 | I915_WRITE(CURBPOS, dev_priv->saveCURBPOS); | ||
593 | I915_WRITE(CURBCNTR, dev_priv->saveCURBCNTR); | ||
594 | I915_WRITE(CURBBASE, dev_priv->saveCURBBASE); | ||
595 | if (IS_GEN2(dev)) | ||
596 | I915_WRITE(CURSIZE, dev_priv->saveCURSIZE); | ||
597 | |||
532 | return; | 598 | return; |
533 | } | 599 | } |
534 | 600 | ||
@@ -543,16 +609,6 @@ void i915_save_display(struct drm_device *dev) | |||
543 | /* Don't save them in KMS mode */ | 609 | /* Don't save them in KMS mode */ |
544 | i915_save_modeset_reg(dev); | 610 | i915_save_modeset_reg(dev); |
545 | 611 | ||
546 | /* Cursor state */ | ||
547 | dev_priv->saveCURACNTR = I915_READ(CURACNTR); | ||
548 | dev_priv->saveCURAPOS = I915_READ(CURAPOS); | ||
549 | dev_priv->saveCURABASE = I915_READ(CURABASE); | ||
550 | dev_priv->saveCURBCNTR = I915_READ(CURBCNTR); | ||
551 | dev_priv->saveCURBPOS = I915_READ(CURBPOS); | ||
552 | dev_priv->saveCURBBASE = I915_READ(CURBBASE); | ||
553 | if (IS_GEN2(dev)) | ||
554 | dev_priv->saveCURSIZE = I915_READ(CURSIZE); | ||
555 | |||
556 | /* CRT state */ | 612 | /* CRT state */ |
557 | if (HAS_PCH_SPLIT(dev)) { | 613 | if (HAS_PCH_SPLIT(dev)) { |
558 | dev_priv->saveADPA = I915_READ(PCH_ADPA); | 614 | dev_priv->saveADPA = I915_READ(PCH_ADPA); |
@@ -657,16 +713,6 @@ void i915_restore_display(struct drm_device *dev) | |||
657 | /* Don't restore them in KMS mode */ | 713 | /* Don't restore them in KMS mode */ |
658 | i915_restore_modeset_reg(dev); | 714 | i915_restore_modeset_reg(dev); |
659 | 715 | ||
660 | /* Cursor state */ | ||
661 | I915_WRITE(CURAPOS, dev_priv->saveCURAPOS); | ||
662 | I915_WRITE(CURACNTR, dev_priv->saveCURACNTR); | ||
663 | I915_WRITE(CURABASE, dev_priv->saveCURABASE); | ||
664 | I915_WRITE(CURBPOS, dev_priv->saveCURBPOS); | ||
665 | I915_WRITE(CURBCNTR, dev_priv->saveCURBCNTR); | ||
666 | I915_WRITE(CURBBASE, dev_priv->saveCURBBASE); | ||
667 | if (IS_GEN2(dev)) | ||
668 | I915_WRITE(CURSIZE, dev_priv->saveCURSIZE); | ||
669 | |||
670 | /* CRT state */ | 716 | /* CRT state */ |
671 | if (HAS_PCH_SPLIT(dev)) | 717 | if (HAS_PCH_SPLIT(dev)) |
672 | I915_WRITE(PCH_ADPA, dev_priv->saveADPA); | 718 | I915_WRITE(PCH_ADPA, dev_priv->saveADPA); |
@@ -694,7 +740,7 @@ void i915_restore_display(struct drm_device *dev) | |||
694 | I915_WRITE(PCH_PP_OFF_DELAYS, dev_priv->savePP_OFF_DELAYS); | 740 | I915_WRITE(PCH_PP_OFF_DELAYS, dev_priv->savePP_OFF_DELAYS); |
695 | I915_WRITE(PCH_PP_DIVISOR, dev_priv->savePP_DIVISOR); | 741 | I915_WRITE(PCH_PP_DIVISOR, dev_priv->savePP_DIVISOR); |
696 | I915_WRITE(PCH_PP_CONTROL, dev_priv->savePP_CONTROL); | 742 | I915_WRITE(PCH_PP_CONTROL, dev_priv->savePP_CONTROL); |
697 | I915_WRITE(MCHBAR_RENDER_STANDBY, | 743 | I915_WRITE(RSTDBYCTL, |
698 | dev_priv->saveMCHBAR_RENDER_STANDBY); | 744 | dev_priv->saveMCHBAR_RENDER_STANDBY); |
699 | } else { | 745 | } else { |
700 | I915_WRITE(PFIT_PGM_RATIOS, dev_priv->savePFIT_PGM_RATIOS); | 746 | I915_WRITE(PFIT_PGM_RATIOS, dev_priv->savePFIT_PGM_RATIOS); |
@@ -765,14 +811,16 @@ int i915_save_state(struct drm_device *dev) | |||
765 | dev_priv->saveFDI_RXA_IMR = I915_READ(FDI_RXA_IMR); | 811 | dev_priv->saveFDI_RXA_IMR = I915_READ(FDI_RXA_IMR); |
766 | dev_priv->saveFDI_RXB_IMR = I915_READ(FDI_RXB_IMR); | 812 | dev_priv->saveFDI_RXB_IMR = I915_READ(FDI_RXB_IMR); |
767 | dev_priv->saveMCHBAR_RENDER_STANDBY = | 813 | dev_priv->saveMCHBAR_RENDER_STANDBY = |
768 | I915_READ(MCHBAR_RENDER_STANDBY); | 814 | I915_READ(RSTDBYCTL); |
769 | } else { | 815 | } else { |
770 | dev_priv->saveIER = I915_READ(IER); | 816 | dev_priv->saveIER = I915_READ(IER); |
771 | dev_priv->saveIMR = I915_READ(IMR); | 817 | dev_priv->saveIMR = I915_READ(IMR); |
772 | } | 818 | } |
773 | 819 | ||
774 | if (HAS_PCH_SPLIT(dev)) | 820 | if (IS_IRONLAKE_M(dev)) |
775 | ironlake_disable_drps(dev); | 821 | ironlake_disable_drps(dev); |
822 | if (IS_GEN6(dev)) | ||
823 | gen6_disable_rps(dev); | ||
776 | 824 | ||
777 | /* Cache mode state */ | 825 | /* Cache mode state */ |
778 | dev_priv->saveCACHE_MODE_0 = I915_READ(CACHE_MODE_0); | 826 | dev_priv->saveCACHE_MODE_0 = I915_READ(CACHE_MODE_0); |
@@ -788,28 +836,6 @@ int i915_save_state(struct drm_device *dev) | |||
788 | for (i = 0; i < 3; i++) | 836 | for (i = 0; i < 3; i++) |
789 | dev_priv->saveSWF2[i] = I915_READ(SWF30 + (i << 2)); | 837 | dev_priv->saveSWF2[i] = I915_READ(SWF30 + (i << 2)); |
790 | 838 | ||
791 | /* Fences */ | ||
792 | switch (INTEL_INFO(dev)->gen) { | ||
793 | case 6: | ||
794 | for (i = 0; i < 16; i++) | ||
795 | dev_priv->saveFENCE[i] = I915_READ64(FENCE_REG_SANDYBRIDGE_0 + (i * 8)); | ||
796 | break; | ||
797 | case 5: | ||
798 | case 4: | ||
799 | for (i = 0; i < 16; i++) | ||
800 | dev_priv->saveFENCE[i] = I915_READ64(FENCE_REG_965_0 + (i * 8)); | ||
801 | break; | ||
802 | case 3: | ||
803 | if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) | ||
804 | for (i = 0; i < 8; i++) | ||
805 | dev_priv->saveFENCE[i+8] = I915_READ(FENCE_REG_945_8 + (i * 4)); | ||
806 | case 2: | ||
807 | for (i = 0; i < 8; i++) | ||
808 | dev_priv->saveFENCE[i] = I915_READ(FENCE_REG_830_0 + (i * 4)); | ||
809 | break; | ||
810 | |||
811 | } | ||
812 | |||
813 | return 0; | 839 | return 0; |
814 | } | 840 | } |
815 | 841 | ||
@@ -823,27 +849,6 @@ int i915_restore_state(struct drm_device *dev) | |||
823 | /* Hardware status page */ | 849 | /* Hardware status page */ |
824 | I915_WRITE(HWS_PGA, dev_priv->saveHWS); | 850 | I915_WRITE(HWS_PGA, dev_priv->saveHWS); |
825 | 851 | ||
826 | /* Fences */ | ||
827 | switch (INTEL_INFO(dev)->gen) { | ||
828 | case 6: | ||
829 | for (i = 0; i < 16; i++) | ||
830 | I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + (i * 8), dev_priv->saveFENCE[i]); | ||
831 | break; | ||
832 | case 5: | ||
833 | case 4: | ||
834 | for (i = 0; i < 16; i++) | ||
835 | I915_WRITE64(FENCE_REG_965_0 + (i * 8), dev_priv->saveFENCE[i]); | ||
836 | break; | ||
837 | case 3: | ||
838 | case 2: | ||
839 | if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) | ||
840 | for (i = 0; i < 8; i++) | ||
841 | I915_WRITE(FENCE_REG_945_8 + (i * 4), dev_priv->saveFENCE[i+8]); | ||
842 | for (i = 0; i < 8; i++) | ||
843 | I915_WRITE(FENCE_REG_830_0 + (i * 4), dev_priv->saveFENCE[i]); | ||
844 | break; | ||
845 | } | ||
846 | |||
847 | i915_restore_display(dev); | 852 | i915_restore_display(dev); |
848 | 853 | ||
849 | /* Interrupt state */ | 854 | /* Interrupt state */ |
@@ -860,13 +865,16 @@ int i915_restore_state(struct drm_device *dev) | |||
860 | } | 865 | } |
861 | 866 | ||
862 | /* Clock gating state */ | 867 | /* Clock gating state */ |
863 | intel_init_clock_gating(dev); | 868 | intel_enable_clock_gating(dev); |
864 | 869 | ||
865 | if (HAS_PCH_SPLIT(dev)) { | 870 | if (IS_IRONLAKE_M(dev)) { |
866 | ironlake_enable_drps(dev); | 871 | ironlake_enable_drps(dev); |
867 | intel_init_emon(dev); | 872 | intel_init_emon(dev); |
868 | } | 873 | } |
869 | 874 | ||
875 | if (IS_GEN6(dev)) | ||
876 | gen6_enable_rps(dev_priv); | ||
877 | |||
870 | /* Cache mode state */ | 878 | /* Cache mode state */ |
871 | I915_WRITE (CACHE_MODE_0, dev_priv->saveCACHE_MODE_0 | 0xffff0000); | 879 | I915_WRITE (CACHE_MODE_0, dev_priv->saveCACHE_MODE_0 | 0xffff0000); |
872 | 880 | ||
diff --git a/drivers/gpu/drm/i915/i915_trace.h b/drivers/gpu/drm/i915/i915_trace.h index fea97a21cc14..7f0fc3ed61aa 100644 --- a/drivers/gpu/drm/i915/i915_trace.h +++ b/drivers/gpu/drm/i915/i915_trace.h | |||
@@ -6,6 +6,7 @@ | |||
6 | #include <linux/tracepoint.h> | 6 | #include <linux/tracepoint.h> |
7 | 7 | ||
8 | #include <drm/drmP.h> | 8 | #include <drm/drmP.h> |
9 | #include "i915_drv.h" | ||
9 | 10 | ||
10 | #undef TRACE_SYSTEM | 11 | #undef TRACE_SYSTEM |
11 | #define TRACE_SYSTEM i915 | 12 | #define TRACE_SYSTEM i915 |
@@ -16,18 +17,18 @@ | |||
16 | 17 | ||
17 | TRACE_EVENT(i915_gem_object_create, | 18 | TRACE_EVENT(i915_gem_object_create, |
18 | 19 | ||
19 | TP_PROTO(struct drm_gem_object *obj), | 20 | TP_PROTO(struct drm_i915_gem_object *obj), |
20 | 21 | ||
21 | TP_ARGS(obj), | 22 | TP_ARGS(obj), |
22 | 23 | ||
23 | TP_STRUCT__entry( | 24 | TP_STRUCT__entry( |
24 | __field(struct drm_gem_object *, obj) | 25 | __field(struct drm_i915_gem_object *, obj) |
25 | __field(u32, size) | 26 | __field(u32, size) |
26 | ), | 27 | ), |
27 | 28 | ||
28 | TP_fast_assign( | 29 | TP_fast_assign( |
29 | __entry->obj = obj; | 30 | __entry->obj = obj; |
30 | __entry->size = obj->size; | 31 | __entry->size = obj->base.size; |
31 | ), | 32 | ), |
32 | 33 | ||
33 | TP_printk("obj=%p, size=%u", __entry->obj, __entry->size) | 34 | TP_printk("obj=%p, size=%u", __entry->obj, __entry->size) |
@@ -35,40 +36,43 @@ TRACE_EVENT(i915_gem_object_create, | |||
35 | 36 | ||
36 | TRACE_EVENT(i915_gem_object_bind, | 37 | TRACE_EVENT(i915_gem_object_bind, |
37 | 38 | ||
38 | TP_PROTO(struct drm_gem_object *obj, u32 gtt_offset), | 39 | TP_PROTO(struct drm_i915_gem_object *obj, u32 gtt_offset, bool mappable), |
39 | 40 | ||
40 | TP_ARGS(obj, gtt_offset), | 41 | TP_ARGS(obj, gtt_offset, mappable), |
41 | 42 | ||
42 | TP_STRUCT__entry( | 43 | TP_STRUCT__entry( |
43 | __field(struct drm_gem_object *, obj) | 44 | __field(struct drm_i915_gem_object *, obj) |
44 | __field(u32, gtt_offset) | 45 | __field(u32, gtt_offset) |
46 | __field(bool, mappable) | ||
45 | ), | 47 | ), |
46 | 48 | ||
47 | TP_fast_assign( | 49 | TP_fast_assign( |
48 | __entry->obj = obj; | 50 | __entry->obj = obj; |
49 | __entry->gtt_offset = gtt_offset; | 51 | __entry->gtt_offset = gtt_offset; |
52 | __entry->mappable = mappable; | ||
50 | ), | 53 | ), |
51 | 54 | ||
52 | TP_printk("obj=%p, gtt_offset=%08x", | 55 | TP_printk("obj=%p, gtt_offset=%08x%s", |
53 | __entry->obj, __entry->gtt_offset) | 56 | __entry->obj, __entry->gtt_offset, |
57 | __entry->mappable ? ", mappable" : "") | ||
54 | ); | 58 | ); |
55 | 59 | ||
56 | TRACE_EVENT(i915_gem_object_change_domain, | 60 | TRACE_EVENT(i915_gem_object_change_domain, |
57 | 61 | ||
58 | TP_PROTO(struct drm_gem_object *obj, uint32_t old_read_domains, uint32_t old_write_domain), | 62 | TP_PROTO(struct drm_i915_gem_object *obj, uint32_t old_read_domains, uint32_t old_write_domain), |
59 | 63 | ||
60 | TP_ARGS(obj, old_read_domains, old_write_domain), | 64 | TP_ARGS(obj, old_read_domains, old_write_domain), |
61 | 65 | ||
62 | TP_STRUCT__entry( | 66 | TP_STRUCT__entry( |
63 | __field(struct drm_gem_object *, obj) | 67 | __field(struct drm_i915_gem_object *, obj) |
64 | __field(u32, read_domains) | 68 | __field(u32, read_domains) |
65 | __field(u32, write_domain) | 69 | __field(u32, write_domain) |
66 | ), | 70 | ), |
67 | 71 | ||
68 | TP_fast_assign( | 72 | TP_fast_assign( |
69 | __entry->obj = obj; | 73 | __entry->obj = obj; |
70 | __entry->read_domains = obj->read_domains | (old_read_domains << 16); | 74 | __entry->read_domains = obj->base.read_domains | (old_read_domains << 16); |
71 | __entry->write_domain = obj->write_domain | (old_write_domain << 16); | 75 | __entry->write_domain = obj->base.write_domain | (old_write_domain << 16); |
72 | ), | 76 | ), |
73 | 77 | ||
74 | TP_printk("obj=%p, read=%04x, write=%04x", | 78 | TP_printk("obj=%p, read=%04x, write=%04x", |
@@ -76,36 +80,14 @@ TRACE_EVENT(i915_gem_object_change_domain, | |||
76 | __entry->read_domains, __entry->write_domain) | 80 | __entry->read_domains, __entry->write_domain) |
77 | ); | 81 | ); |
78 | 82 | ||
79 | TRACE_EVENT(i915_gem_object_get_fence, | ||
80 | |||
81 | TP_PROTO(struct drm_gem_object *obj, int fence, int tiling_mode), | ||
82 | |||
83 | TP_ARGS(obj, fence, tiling_mode), | ||
84 | |||
85 | TP_STRUCT__entry( | ||
86 | __field(struct drm_gem_object *, obj) | ||
87 | __field(int, fence) | ||
88 | __field(int, tiling_mode) | ||
89 | ), | ||
90 | |||
91 | TP_fast_assign( | ||
92 | __entry->obj = obj; | ||
93 | __entry->fence = fence; | ||
94 | __entry->tiling_mode = tiling_mode; | ||
95 | ), | ||
96 | |||
97 | TP_printk("obj=%p, fence=%d, tiling=%d", | ||
98 | __entry->obj, __entry->fence, __entry->tiling_mode) | ||
99 | ); | ||
100 | |||
101 | DECLARE_EVENT_CLASS(i915_gem_object, | 83 | DECLARE_EVENT_CLASS(i915_gem_object, |
102 | 84 | ||
103 | TP_PROTO(struct drm_gem_object *obj), | 85 | TP_PROTO(struct drm_i915_gem_object *obj), |
104 | 86 | ||
105 | TP_ARGS(obj), | 87 | TP_ARGS(obj), |
106 | 88 | ||
107 | TP_STRUCT__entry( | 89 | TP_STRUCT__entry( |
108 | __field(struct drm_gem_object *, obj) | 90 | __field(struct drm_i915_gem_object *, obj) |
109 | ), | 91 | ), |
110 | 92 | ||
111 | TP_fast_assign( | 93 | TP_fast_assign( |
@@ -117,21 +99,21 @@ DECLARE_EVENT_CLASS(i915_gem_object, | |||
117 | 99 | ||
118 | DEFINE_EVENT(i915_gem_object, i915_gem_object_clflush, | 100 | DEFINE_EVENT(i915_gem_object, i915_gem_object_clflush, |
119 | 101 | ||
120 | TP_PROTO(struct drm_gem_object *obj), | 102 | TP_PROTO(struct drm_i915_gem_object *obj), |
121 | 103 | ||
122 | TP_ARGS(obj) | 104 | TP_ARGS(obj) |
123 | ); | 105 | ); |
124 | 106 | ||
125 | DEFINE_EVENT(i915_gem_object, i915_gem_object_unbind, | 107 | DEFINE_EVENT(i915_gem_object, i915_gem_object_unbind, |
126 | 108 | ||
127 | TP_PROTO(struct drm_gem_object *obj), | 109 | TP_PROTO(struct drm_i915_gem_object *obj), |
128 | 110 | ||
129 | TP_ARGS(obj) | 111 | TP_ARGS(obj) |
130 | ); | 112 | ); |
131 | 113 | ||
132 | DEFINE_EVENT(i915_gem_object, i915_gem_object_destroy, | 114 | DEFINE_EVENT(i915_gem_object, i915_gem_object_destroy, |
133 | 115 | ||
134 | TP_PROTO(struct drm_gem_object *obj), | 116 | TP_PROTO(struct drm_i915_gem_object *obj), |
135 | 117 | ||
136 | TP_ARGS(obj) | 118 | TP_ARGS(obj) |
137 | ); | 119 | ); |
@@ -263,13 +245,13 @@ DEFINE_EVENT(i915_ring, i915_ring_wait_end, | |||
263 | ); | 245 | ); |
264 | 246 | ||
265 | TRACE_EVENT(i915_flip_request, | 247 | TRACE_EVENT(i915_flip_request, |
266 | TP_PROTO(int plane, struct drm_gem_object *obj), | 248 | TP_PROTO(int plane, struct drm_i915_gem_object *obj), |
267 | 249 | ||
268 | TP_ARGS(plane, obj), | 250 | TP_ARGS(plane, obj), |
269 | 251 | ||
270 | TP_STRUCT__entry( | 252 | TP_STRUCT__entry( |
271 | __field(int, plane) | 253 | __field(int, plane) |
272 | __field(struct drm_gem_object *, obj) | 254 | __field(struct drm_i915_gem_object *, obj) |
273 | ), | 255 | ), |
274 | 256 | ||
275 | TP_fast_assign( | 257 | TP_fast_assign( |
@@ -281,13 +263,13 @@ TRACE_EVENT(i915_flip_request, | |||
281 | ); | 263 | ); |
282 | 264 | ||
283 | TRACE_EVENT(i915_flip_complete, | 265 | TRACE_EVENT(i915_flip_complete, |
284 | TP_PROTO(int plane, struct drm_gem_object *obj), | 266 | TP_PROTO(int plane, struct drm_i915_gem_object *obj), |
285 | 267 | ||
286 | TP_ARGS(plane, obj), | 268 | TP_ARGS(plane, obj), |
287 | 269 | ||
288 | TP_STRUCT__entry( | 270 | TP_STRUCT__entry( |
289 | __field(int, plane) | 271 | __field(int, plane) |
290 | __field(struct drm_gem_object *, obj) | 272 | __field(struct drm_i915_gem_object *, obj) |
291 | ), | 273 | ), |
292 | 274 | ||
293 | TP_fast_assign( | 275 | TP_fast_assign( |
@@ -298,6 +280,29 @@ TRACE_EVENT(i915_flip_complete, | |||
298 | TP_printk("plane=%d, obj=%p", __entry->plane, __entry->obj) | 280 | TP_printk("plane=%d, obj=%p", __entry->plane, __entry->obj) |
299 | ); | 281 | ); |
300 | 282 | ||
283 | TRACE_EVENT(i915_reg_rw, | ||
284 | TP_PROTO(int cmd, uint32_t reg, uint64_t val, int len), | ||
285 | |||
286 | TP_ARGS(cmd, reg, val, len), | ||
287 | |||
288 | TP_STRUCT__entry( | ||
289 | __field(int, cmd) | ||
290 | __field(uint32_t, reg) | ||
291 | __field(uint64_t, val) | ||
292 | __field(int, len) | ||
293 | ), | ||
294 | |||
295 | TP_fast_assign( | ||
296 | __entry->cmd = cmd; | ||
297 | __entry->reg = reg; | ||
298 | __entry->val = (uint64_t)val; | ||
299 | __entry->len = len; | ||
300 | ), | ||
301 | |||
302 | TP_printk("cmd=%c, reg=0x%x, val=0x%llx, len=%d", | ||
303 | __entry->cmd, __entry->reg, __entry->val, __entry->len) | ||
304 | ); | ||
305 | |||
301 | #endif /* _I915_TRACE_H_ */ | 306 | #endif /* _I915_TRACE_H_ */ |
302 | 307 | ||
303 | /* This part must be outside protection */ | 308 | /* This part must be outside protection */ |
diff --git a/drivers/gpu/drm/i915/intel_acpi.c b/drivers/gpu/drm/i915/intel_acpi.c index 65c88f9ba12c..2cb8e0b9f1ee 100644 --- a/drivers/gpu/drm/i915/intel_acpi.c +++ b/drivers/gpu/drm/i915/intel_acpi.c | |||
@@ -190,37 +190,6 @@ out: | |||
190 | kfree(output.pointer); | 190 | kfree(output.pointer); |
191 | } | 191 | } |
192 | 192 | ||
193 | static int intel_dsm_switchto(enum vga_switcheroo_client_id id) | ||
194 | { | ||
195 | return 0; | ||
196 | } | ||
197 | |||
198 | static int intel_dsm_power_state(enum vga_switcheroo_client_id id, | ||
199 | enum vga_switcheroo_state state) | ||
200 | { | ||
201 | return 0; | ||
202 | } | ||
203 | |||
204 | static int intel_dsm_init(void) | ||
205 | { | ||
206 | return 0; | ||
207 | } | ||
208 | |||
209 | static int intel_dsm_get_client_id(struct pci_dev *pdev) | ||
210 | { | ||
211 | if (intel_dsm_priv.dhandle == DEVICE_ACPI_HANDLE(&pdev->dev)) | ||
212 | return VGA_SWITCHEROO_IGD; | ||
213 | else | ||
214 | return VGA_SWITCHEROO_DIS; | ||
215 | } | ||
216 | |||
217 | static struct vga_switcheroo_handler intel_dsm_handler = { | ||
218 | .switchto = intel_dsm_switchto, | ||
219 | .power_state = intel_dsm_power_state, | ||
220 | .init = intel_dsm_init, | ||
221 | .get_client_id = intel_dsm_get_client_id, | ||
222 | }; | ||
223 | |||
224 | static bool intel_dsm_pci_probe(struct pci_dev *pdev) | 193 | static bool intel_dsm_pci_probe(struct pci_dev *pdev) |
225 | { | 194 | { |
226 | acpi_handle dhandle, intel_handle; | 195 | acpi_handle dhandle, intel_handle; |
@@ -276,11 +245,8 @@ void intel_register_dsm_handler(void) | |||
276 | { | 245 | { |
277 | if (!intel_dsm_detect()) | 246 | if (!intel_dsm_detect()) |
278 | return; | 247 | return; |
279 | |||
280 | vga_switcheroo_register_handler(&intel_dsm_handler); | ||
281 | } | 248 | } |
282 | 249 | ||
283 | void intel_unregister_dsm_handler(void) | 250 | void intel_unregister_dsm_handler(void) |
284 | { | 251 | { |
285 | vga_switcheroo_unregister_handler(); | ||
286 | } | 252 | } |
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c index b0b1200ed650..0b44956c336b 100644 --- a/drivers/gpu/drm/i915/intel_bios.c +++ b/drivers/gpu/drm/i915/intel_bios.c | |||
@@ -264,17 +264,12 @@ parse_general_features(struct drm_i915_private *dev_priv, | |||
264 | dev_priv->int_crt_support = general->int_crt_support; | 264 | dev_priv->int_crt_support = general->int_crt_support; |
265 | dev_priv->lvds_use_ssc = general->enable_ssc; | 265 | dev_priv->lvds_use_ssc = general->enable_ssc; |
266 | 266 | ||
267 | if (dev_priv->lvds_use_ssc) { | 267 | if (IS_I85X(dev)) |
268 | if (IS_I85X(dev)) | 268 | dev_priv->lvds_ssc_freq = general->ssc_freq ? 66 : 48; |
269 | dev_priv->lvds_ssc_freq = | 269 | else if (IS_GEN5(dev) || IS_GEN6(dev)) |
270 | general->ssc_freq ? 66 : 48; | 270 | dev_priv->lvds_ssc_freq = general->ssc_freq ? 100 : 120; |
271 | else if (IS_GEN5(dev) || IS_GEN6(dev)) | 271 | else |
272 | dev_priv->lvds_ssc_freq = | 272 | dev_priv->lvds_ssc_freq = general->ssc_freq ? 100 : 96; |
273 | general->ssc_freq ? 100 : 120; | ||
274 | else | ||
275 | dev_priv->lvds_ssc_freq = | ||
276 | general->ssc_freq ? 100 : 96; | ||
277 | } | ||
278 | } | 273 | } |
279 | } | 274 | } |
280 | 275 | ||
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c index 8df574316063..8a77ff4a7237 100644 --- a/drivers/gpu/drm/i915/intel_crt.c +++ b/drivers/gpu/drm/i915/intel_crt.c | |||
@@ -30,6 +30,7 @@ | |||
30 | #include "drm.h" | 30 | #include "drm.h" |
31 | #include "drm_crtc.h" | 31 | #include "drm_crtc.h" |
32 | #include "drm_crtc_helper.h" | 32 | #include "drm_crtc_helper.h" |
33 | #include "drm_edid.h" | ||
33 | #include "intel_drv.h" | 34 | #include "intel_drv.h" |
34 | #include "i915_drm.h" | 35 | #include "i915_drm.h" |
35 | #include "i915_drv.h" | 36 | #include "i915_drv.h" |
@@ -287,8 +288,9 @@ static bool intel_crt_ddc_probe(struct drm_i915_private *dev_priv, int ddc_bus) | |||
287 | return i2c_transfer(&dev_priv->gmbus[ddc_bus].adapter, msgs, 1) == 1; | 288 | return i2c_transfer(&dev_priv->gmbus[ddc_bus].adapter, msgs, 1) == 1; |
288 | } | 289 | } |
289 | 290 | ||
290 | static bool intel_crt_detect_ddc(struct intel_crt *crt) | 291 | static bool intel_crt_detect_ddc(struct drm_connector *connector) |
291 | { | 292 | { |
293 | struct intel_crt *crt = intel_attached_crt(connector); | ||
292 | struct drm_i915_private *dev_priv = crt->base.base.dev->dev_private; | 294 | struct drm_i915_private *dev_priv = crt->base.base.dev->dev_private; |
293 | 295 | ||
294 | /* CRT should always be at 0, but check anyway */ | 296 | /* CRT should always be at 0, but check anyway */ |
@@ -301,8 +303,26 @@ static bool intel_crt_detect_ddc(struct intel_crt *crt) | |||
301 | } | 303 | } |
302 | 304 | ||
303 | if (intel_ddc_probe(&crt->base, dev_priv->crt_ddc_pin)) { | 305 | if (intel_ddc_probe(&crt->base, dev_priv->crt_ddc_pin)) { |
304 | DRM_DEBUG_KMS("CRT detected via DDC:0x50 [EDID]\n"); | 306 | struct edid *edid; |
305 | return true; | 307 | bool is_digital = false; |
308 | |||
309 | edid = drm_get_edid(connector, | ||
310 | &dev_priv->gmbus[dev_priv->crt_ddc_pin].adapter); | ||
311 | /* | ||
312 | * This may be a DVI-I connector with a shared DDC | ||
313 | * link between analog and digital outputs, so we | ||
314 | * have to check the EDID input spec of the attached device. | ||
315 | */ | ||
316 | if (edid != NULL) { | ||
317 | is_digital = edid->input & DRM_EDID_INPUT_DIGITAL; | ||
318 | connector->display_info.raw_edid = NULL; | ||
319 | kfree(edid); | ||
320 | } | ||
321 | |||
322 | if (!is_digital) { | ||
323 | DRM_DEBUG_KMS("CRT detected via DDC:0x50 [EDID]\n"); | ||
324 | return true; | ||
325 | } | ||
306 | } | 326 | } |
307 | 327 | ||
308 | return false; | 328 | return false; |
@@ -458,7 +478,7 @@ intel_crt_detect(struct drm_connector *connector, bool force) | |||
458 | } | 478 | } |
459 | } | 479 | } |
460 | 480 | ||
461 | if (intel_crt_detect_ddc(crt)) | 481 | if (intel_crt_detect_ddc(connector)) |
462 | return connector_status_connected; | 482 | return connector_status_connected; |
463 | 483 | ||
464 | if (!force) | 484 | if (!force) |
@@ -472,7 +492,7 @@ intel_crt_detect(struct drm_connector *connector, bool force) | |||
472 | crtc = intel_get_load_detect_pipe(&crt->base, connector, | 492 | crtc = intel_get_load_detect_pipe(&crt->base, connector, |
473 | NULL, &dpms_mode); | 493 | NULL, &dpms_mode); |
474 | if (crtc) { | 494 | if (crtc) { |
475 | if (intel_crt_detect_ddc(crt)) | 495 | if (intel_crt_detect_ddc(connector)) |
476 | status = connector_status_connected; | 496 | status = connector_status_connected; |
477 | else | 497 | else |
478 | status = intel_crt_load_detect(crtc, crt); | 498 | status = intel_crt_load_detect(crtc, crt); |
@@ -515,6 +535,15 @@ static int intel_crt_set_property(struct drm_connector *connector, | |||
515 | return 0; | 535 | return 0; |
516 | } | 536 | } |
517 | 537 | ||
538 | static void intel_crt_reset(struct drm_connector *connector) | ||
539 | { | ||
540 | struct drm_device *dev = connector->dev; | ||
541 | struct intel_crt *crt = intel_attached_crt(connector); | ||
542 | |||
543 | if (HAS_PCH_SPLIT(dev)) | ||
544 | crt->force_hotplug_required = 1; | ||
545 | } | ||
546 | |||
518 | /* | 547 | /* |
519 | * Routines for controlling stuff on the analog port | 548 | * Routines for controlling stuff on the analog port |
520 | */ | 549 | */ |
@@ -528,6 +557,7 @@ static const struct drm_encoder_helper_funcs intel_crt_helper_funcs = { | |||
528 | }; | 557 | }; |
529 | 558 | ||
530 | static const struct drm_connector_funcs intel_crt_connector_funcs = { | 559 | static const struct drm_connector_funcs intel_crt_connector_funcs = { |
560 | .reset = intel_crt_reset, | ||
531 | .dpms = drm_helper_connector_dpms, | 561 | .dpms = drm_helper_connector_dpms, |
532 | .detect = intel_crt_detect, | 562 | .detect = intel_crt_detect, |
533 | .fill_modes = drm_helper_probe_single_connector_modes, | 563 | .fill_modes = drm_helper_probe_single_connector_modes, |
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index bee24b1a58e8..7e42aa586504 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c | |||
@@ -642,26 +642,23 @@ static const intel_limit_t intel_limits_ironlake_display_port = { | |||
642 | .find_pll = intel_find_pll_ironlake_dp, | 642 | .find_pll = intel_find_pll_ironlake_dp, |
643 | }; | 643 | }; |
644 | 644 | ||
645 | static const intel_limit_t *intel_ironlake_limit(struct drm_crtc *crtc) | 645 | static const intel_limit_t *intel_ironlake_limit(struct drm_crtc *crtc, |
646 | int refclk) | ||
646 | { | 647 | { |
647 | struct drm_device *dev = crtc->dev; | 648 | struct drm_device *dev = crtc->dev; |
648 | struct drm_i915_private *dev_priv = dev->dev_private; | 649 | struct drm_i915_private *dev_priv = dev->dev_private; |
649 | const intel_limit_t *limit; | 650 | const intel_limit_t *limit; |
650 | int refclk = 120; | ||
651 | 651 | ||
652 | if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) { | 652 | if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) { |
653 | if (dev_priv->lvds_use_ssc && dev_priv->lvds_ssc_freq == 100) | ||
654 | refclk = 100; | ||
655 | |||
656 | if ((I915_READ(PCH_LVDS) & LVDS_CLKB_POWER_MASK) == | 653 | if ((I915_READ(PCH_LVDS) & LVDS_CLKB_POWER_MASK) == |
657 | LVDS_CLKB_POWER_UP) { | 654 | LVDS_CLKB_POWER_UP) { |
658 | /* LVDS dual channel */ | 655 | /* LVDS dual channel */ |
659 | if (refclk == 100) | 656 | if (refclk == 100000) |
660 | limit = &intel_limits_ironlake_dual_lvds_100m; | 657 | limit = &intel_limits_ironlake_dual_lvds_100m; |
661 | else | 658 | else |
662 | limit = &intel_limits_ironlake_dual_lvds; | 659 | limit = &intel_limits_ironlake_dual_lvds; |
663 | } else { | 660 | } else { |
664 | if (refclk == 100) | 661 | if (refclk == 100000) |
665 | limit = &intel_limits_ironlake_single_lvds_100m; | 662 | limit = &intel_limits_ironlake_single_lvds_100m; |
666 | else | 663 | else |
667 | limit = &intel_limits_ironlake_single_lvds; | 664 | limit = &intel_limits_ironlake_single_lvds; |
@@ -702,13 +699,13 @@ static const intel_limit_t *intel_g4x_limit(struct drm_crtc *crtc) | |||
702 | return limit; | 699 | return limit; |
703 | } | 700 | } |
704 | 701 | ||
705 | static const intel_limit_t *intel_limit(struct drm_crtc *crtc) | 702 | static const intel_limit_t *intel_limit(struct drm_crtc *crtc, int refclk) |
706 | { | 703 | { |
707 | struct drm_device *dev = crtc->dev; | 704 | struct drm_device *dev = crtc->dev; |
708 | const intel_limit_t *limit; | 705 | const intel_limit_t *limit; |
709 | 706 | ||
710 | if (HAS_PCH_SPLIT(dev)) | 707 | if (HAS_PCH_SPLIT(dev)) |
711 | limit = intel_ironlake_limit(crtc); | 708 | limit = intel_ironlake_limit(crtc, refclk); |
712 | else if (IS_G4X(dev)) { | 709 | else if (IS_G4X(dev)) { |
713 | limit = intel_g4x_limit(crtc); | 710 | limit = intel_g4x_limit(crtc); |
714 | } else if (IS_PINEVIEW(dev)) { | 711 | } else if (IS_PINEVIEW(dev)) { |
@@ -773,11 +770,10 @@ bool intel_pipe_has_type(struct drm_crtc *crtc, int type) | |||
773 | * the given connectors. | 770 | * the given connectors. |
774 | */ | 771 | */ |
775 | 772 | ||
776 | static bool intel_PLL_is_valid(struct drm_crtc *crtc, intel_clock_t *clock) | 773 | static bool intel_PLL_is_valid(struct drm_device *dev, |
774 | const intel_limit_t *limit, | ||
775 | const intel_clock_t *clock) | ||
777 | { | 776 | { |
778 | const intel_limit_t *limit = intel_limit (crtc); | ||
779 | struct drm_device *dev = crtc->dev; | ||
780 | |||
781 | if (clock->p1 < limit->p1.min || limit->p1.max < clock->p1) | 777 | if (clock->p1 < limit->p1.min || limit->p1.max < clock->p1) |
782 | INTELPllInvalid ("p1 out of range\n"); | 778 | INTELPllInvalid ("p1 out of range\n"); |
783 | if (clock->p < limit->p.min || limit->p.max < clock->p) | 779 | if (clock->p < limit->p.min || limit->p.max < clock->p) |
@@ -849,8 +845,8 @@ intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc, | |||
849 | int this_err; | 845 | int this_err; |
850 | 846 | ||
851 | intel_clock(dev, refclk, &clock); | 847 | intel_clock(dev, refclk, &clock); |
852 | 848 | if (!intel_PLL_is_valid(dev, limit, | |
853 | if (!intel_PLL_is_valid(crtc, &clock)) | 849 | &clock)) |
854 | continue; | 850 | continue; |
855 | 851 | ||
856 | this_err = abs(clock.dot - target); | 852 | this_err = abs(clock.dot - target); |
@@ -912,9 +908,11 @@ intel_g4x_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc, | |||
912 | int this_err; | 908 | int this_err; |
913 | 909 | ||
914 | intel_clock(dev, refclk, &clock); | 910 | intel_clock(dev, refclk, &clock); |
915 | if (!intel_PLL_is_valid(crtc, &clock)) | 911 | if (!intel_PLL_is_valid(dev, limit, |
912 | &clock)) | ||
916 | continue; | 913 | continue; |
917 | this_err = abs(clock.dot - target) ; | 914 | |
915 | this_err = abs(clock.dot - target); | ||
918 | if (this_err < err_most) { | 916 | if (this_err < err_most) { |
919 | *best_clock = clock; | 917 | *best_clock = clock; |
920 | err_most = this_err; | 918 | err_most = this_err; |
@@ -1066,13 +1064,13 @@ static void i8xx_enable_fbc(struct drm_crtc *crtc, unsigned long interval) | |||
1066 | struct drm_i915_private *dev_priv = dev->dev_private; | 1064 | struct drm_i915_private *dev_priv = dev->dev_private; |
1067 | struct drm_framebuffer *fb = crtc->fb; | 1065 | struct drm_framebuffer *fb = crtc->fb; |
1068 | struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); | 1066 | struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); |
1069 | struct drm_i915_gem_object *obj_priv = to_intel_bo(intel_fb->obj); | 1067 | struct drm_i915_gem_object *obj = intel_fb->obj; |
1070 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | 1068 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
1071 | int plane, i; | 1069 | int plane, i; |
1072 | u32 fbc_ctl, fbc_ctl2; | 1070 | u32 fbc_ctl, fbc_ctl2; |
1073 | 1071 | ||
1074 | if (fb->pitch == dev_priv->cfb_pitch && | 1072 | if (fb->pitch == dev_priv->cfb_pitch && |
1075 | obj_priv->fence_reg == dev_priv->cfb_fence && | 1073 | obj->fence_reg == dev_priv->cfb_fence && |
1076 | intel_crtc->plane == dev_priv->cfb_plane && | 1074 | intel_crtc->plane == dev_priv->cfb_plane && |
1077 | I915_READ(FBC_CONTROL) & FBC_CTL_EN) | 1075 | I915_READ(FBC_CONTROL) & FBC_CTL_EN) |
1078 | return; | 1076 | return; |
@@ -1086,7 +1084,7 @@ static void i8xx_enable_fbc(struct drm_crtc *crtc, unsigned long interval) | |||
1086 | 1084 | ||
1087 | /* FBC_CTL wants 64B units */ | 1085 | /* FBC_CTL wants 64B units */ |
1088 | dev_priv->cfb_pitch = (dev_priv->cfb_pitch / 64) - 1; | 1086 | dev_priv->cfb_pitch = (dev_priv->cfb_pitch / 64) - 1; |
1089 | dev_priv->cfb_fence = obj_priv->fence_reg; | 1087 | dev_priv->cfb_fence = obj->fence_reg; |
1090 | dev_priv->cfb_plane = intel_crtc->plane; | 1088 | dev_priv->cfb_plane = intel_crtc->plane; |
1091 | plane = dev_priv->cfb_plane == 0 ? FBC_CTL_PLANEA : FBC_CTL_PLANEB; | 1089 | plane = dev_priv->cfb_plane == 0 ? FBC_CTL_PLANEA : FBC_CTL_PLANEB; |
1092 | 1090 | ||
@@ -1096,7 +1094,7 @@ static void i8xx_enable_fbc(struct drm_crtc *crtc, unsigned long interval) | |||
1096 | 1094 | ||
1097 | /* Set it up... */ | 1095 | /* Set it up... */ |
1098 | fbc_ctl2 = FBC_CTL_FENCE_DBL | FBC_CTL_IDLE_IMM | plane; | 1096 | fbc_ctl2 = FBC_CTL_FENCE_DBL | FBC_CTL_IDLE_IMM | plane; |
1099 | if (obj_priv->tiling_mode != I915_TILING_NONE) | 1097 | if (obj->tiling_mode != I915_TILING_NONE) |
1100 | fbc_ctl2 |= FBC_CTL_CPU_FENCE; | 1098 | fbc_ctl2 |= FBC_CTL_CPU_FENCE; |
1101 | I915_WRITE(FBC_CONTROL2, fbc_ctl2); | 1099 | I915_WRITE(FBC_CONTROL2, fbc_ctl2); |
1102 | I915_WRITE(FBC_FENCE_OFF, crtc->y); | 1100 | I915_WRITE(FBC_FENCE_OFF, crtc->y); |
@@ -1107,7 +1105,7 @@ static void i8xx_enable_fbc(struct drm_crtc *crtc, unsigned long interval) | |||
1107 | fbc_ctl |= FBC_CTL_C3_IDLE; /* 945 needs special SR handling */ | 1105 | fbc_ctl |= FBC_CTL_C3_IDLE; /* 945 needs special SR handling */ |
1108 | fbc_ctl |= (dev_priv->cfb_pitch & 0xff) << FBC_CTL_STRIDE_SHIFT; | 1106 | fbc_ctl |= (dev_priv->cfb_pitch & 0xff) << FBC_CTL_STRIDE_SHIFT; |
1109 | fbc_ctl |= (interval & 0x2fff) << FBC_CTL_INTERVAL_SHIFT; | 1107 | fbc_ctl |= (interval & 0x2fff) << FBC_CTL_INTERVAL_SHIFT; |
1110 | if (obj_priv->tiling_mode != I915_TILING_NONE) | 1108 | if (obj->tiling_mode != I915_TILING_NONE) |
1111 | fbc_ctl |= dev_priv->cfb_fence; | 1109 | fbc_ctl |= dev_priv->cfb_fence; |
1112 | I915_WRITE(FBC_CONTROL, fbc_ctl); | 1110 | I915_WRITE(FBC_CONTROL, fbc_ctl); |
1113 | 1111 | ||
@@ -1150,7 +1148,7 @@ static void g4x_enable_fbc(struct drm_crtc *crtc, unsigned long interval) | |||
1150 | struct drm_i915_private *dev_priv = dev->dev_private; | 1148 | struct drm_i915_private *dev_priv = dev->dev_private; |
1151 | struct drm_framebuffer *fb = crtc->fb; | 1149 | struct drm_framebuffer *fb = crtc->fb; |
1152 | struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); | 1150 | struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); |
1153 | struct drm_i915_gem_object *obj_priv = to_intel_bo(intel_fb->obj); | 1151 | struct drm_i915_gem_object *obj = intel_fb->obj; |
1154 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | 1152 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
1155 | int plane = intel_crtc->plane == 0 ? DPFC_CTL_PLANEA : DPFC_CTL_PLANEB; | 1153 | int plane = intel_crtc->plane == 0 ? DPFC_CTL_PLANEA : DPFC_CTL_PLANEB; |
1156 | unsigned long stall_watermark = 200; | 1154 | unsigned long stall_watermark = 200; |
@@ -1159,7 +1157,7 @@ static void g4x_enable_fbc(struct drm_crtc *crtc, unsigned long interval) | |||
1159 | dpfc_ctl = I915_READ(DPFC_CONTROL); | 1157 | dpfc_ctl = I915_READ(DPFC_CONTROL); |
1160 | if (dpfc_ctl & DPFC_CTL_EN) { | 1158 | if (dpfc_ctl & DPFC_CTL_EN) { |
1161 | if (dev_priv->cfb_pitch == dev_priv->cfb_pitch / 64 - 1 && | 1159 | if (dev_priv->cfb_pitch == dev_priv->cfb_pitch / 64 - 1 && |
1162 | dev_priv->cfb_fence == obj_priv->fence_reg && | 1160 | dev_priv->cfb_fence == obj->fence_reg && |
1163 | dev_priv->cfb_plane == intel_crtc->plane && | 1161 | dev_priv->cfb_plane == intel_crtc->plane && |
1164 | dev_priv->cfb_y == crtc->y) | 1162 | dev_priv->cfb_y == crtc->y) |
1165 | return; | 1163 | return; |
@@ -1170,12 +1168,12 @@ static void g4x_enable_fbc(struct drm_crtc *crtc, unsigned long interval) | |||
1170 | } | 1168 | } |
1171 | 1169 | ||
1172 | dev_priv->cfb_pitch = (dev_priv->cfb_pitch / 64) - 1; | 1170 | dev_priv->cfb_pitch = (dev_priv->cfb_pitch / 64) - 1; |
1173 | dev_priv->cfb_fence = obj_priv->fence_reg; | 1171 | dev_priv->cfb_fence = obj->fence_reg; |
1174 | dev_priv->cfb_plane = intel_crtc->plane; | 1172 | dev_priv->cfb_plane = intel_crtc->plane; |
1175 | dev_priv->cfb_y = crtc->y; | 1173 | dev_priv->cfb_y = crtc->y; |
1176 | 1174 | ||
1177 | dpfc_ctl = plane | DPFC_SR_EN | DPFC_CTL_LIMIT_1X; | 1175 | dpfc_ctl = plane | DPFC_SR_EN | DPFC_CTL_LIMIT_1X; |
1178 | if (obj_priv->tiling_mode != I915_TILING_NONE) { | 1176 | if (obj->tiling_mode != I915_TILING_NONE) { |
1179 | dpfc_ctl |= DPFC_CTL_FENCE_EN | dev_priv->cfb_fence; | 1177 | dpfc_ctl |= DPFC_CTL_FENCE_EN | dev_priv->cfb_fence; |
1180 | I915_WRITE(DPFC_CHICKEN, DPFC_HT_MODIFY); | 1178 | I915_WRITE(DPFC_CHICKEN, DPFC_HT_MODIFY); |
1181 | } else { | 1179 | } else { |
@@ -1215,13 +1213,33 @@ static bool g4x_fbc_enabled(struct drm_device *dev) | |||
1215 | return I915_READ(DPFC_CONTROL) & DPFC_CTL_EN; | 1213 | return I915_READ(DPFC_CONTROL) & DPFC_CTL_EN; |
1216 | } | 1214 | } |
1217 | 1215 | ||
1216 | static void sandybridge_blit_fbc_update(struct drm_device *dev) | ||
1217 | { | ||
1218 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
1219 | u32 blt_ecoskpd; | ||
1220 | |||
1221 | /* Make sure blitter notifies FBC of writes */ | ||
1222 | __gen6_force_wake_get(dev_priv); | ||
1223 | blt_ecoskpd = I915_READ(GEN6_BLITTER_ECOSKPD); | ||
1224 | blt_ecoskpd |= GEN6_BLITTER_FBC_NOTIFY << | ||
1225 | GEN6_BLITTER_LOCK_SHIFT; | ||
1226 | I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd); | ||
1227 | blt_ecoskpd |= GEN6_BLITTER_FBC_NOTIFY; | ||
1228 | I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd); | ||
1229 | blt_ecoskpd &= ~(GEN6_BLITTER_FBC_NOTIFY << | ||
1230 | GEN6_BLITTER_LOCK_SHIFT); | ||
1231 | I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd); | ||
1232 | POSTING_READ(GEN6_BLITTER_ECOSKPD); | ||
1233 | __gen6_force_wake_put(dev_priv); | ||
1234 | } | ||
1235 | |||
1218 | static void ironlake_enable_fbc(struct drm_crtc *crtc, unsigned long interval) | 1236 | static void ironlake_enable_fbc(struct drm_crtc *crtc, unsigned long interval) |
1219 | { | 1237 | { |
1220 | struct drm_device *dev = crtc->dev; | 1238 | struct drm_device *dev = crtc->dev; |
1221 | struct drm_i915_private *dev_priv = dev->dev_private; | 1239 | struct drm_i915_private *dev_priv = dev->dev_private; |
1222 | struct drm_framebuffer *fb = crtc->fb; | 1240 | struct drm_framebuffer *fb = crtc->fb; |
1223 | struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); | 1241 | struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); |
1224 | struct drm_i915_gem_object *obj_priv = to_intel_bo(intel_fb->obj); | 1242 | struct drm_i915_gem_object *obj = intel_fb->obj; |
1225 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | 1243 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
1226 | int plane = intel_crtc->plane == 0 ? DPFC_CTL_PLANEA : DPFC_CTL_PLANEB; | 1244 | int plane = intel_crtc->plane == 0 ? DPFC_CTL_PLANEA : DPFC_CTL_PLANEB; |
1227 | unsigned long stall_watermark = 200; | 1245 | unsigned long stall_watermark = 200; |
@@ -1230,9 +1248,9 @@ static void ironlake_enable_fbc(struct drm_crtc *crtc, unsigned long interval) | |||
1230 | dpfc_ctl = I915_READ(ILK_DPFC_CONTROL); | 1248 | dpfc_ctl = I915_READ(ILK_DPFC_CONTROL); |
1231 | if (dpfc_ctl & DPFC_CTL_EN) { | 1249 | if (dpfc_ctl & DPFC_CTL_EN) { |
1232 | if (dev_priv->cfb_pitch == dev_priv->cfb_pitch / 64 - 1 && | 1250 | if (dev_priv->cfb_pitch == dev_priv->cfb_pitch / 64 - 1 && |
1233 | dev_priv->cfb_fence == obj_priv->fence_reg && | 1251 | dev_priv->cfb_fence == obj->fence_reg && |
1234 | dev_priv->cfb_plane == intel_crtc->plane && | 1252 | dev_priv->cfb_plane == intel_crtc->plane && |
1235 | dev_priv->cfb_offset == obj_priv->gtt_offset && | 1253 | dev_priv->cfb_offset == obj->gtt_offset && |
1236 | dev_priv->cfb_y == crtc->y) | 1254 | dev_priv->cfb_y == crtc->y) |
1237 | return; | 1255 | return; |
1238 | 1256 | ||
@@ -1242,14 +1260,14 @@ static void ironlake_enable_fbc(struct drm_crtc *crtc, unsigned long interval) | |||
1242 | } | 1260 | } |
1243 | 1261 | ||
1244 | dev_priv->cfb_pitch = (dev_priv->cfb_pitch / 64) - 1; | 1262 | dev_priv->cfb_pitch = (dev_priv->cfb_pitch / 64) - 1; |
1245 | dev_priv->cfb_fence = obj_priv->fence_reg; | 1263 | dev_priv->cfb_fence = obj->fence_reg; |
1246 | dev_priv->cfb_plane = intel_crtc->plane; | 1264 | dev_priv->cfb_plane = intel_crtc->plane; |
1247 | dev_priv->cfb_offset = obj_priv->gtt_offset; | 1265 | dev_priv->cfb_offset = obj->gtt_offset; |
1248 | dev_priv->cfb_y = crtc->y; | 1266 | dev_priv->cfb_y = crtc->y; |
1249 | 1267 | ||
1250 | dpfc_ctl &= DPFC_RESERVED; | 1268 | dpfc_ctl &= DPFC_RESERVED; |
1251 | dpfc_ctl |= (plane | DPFC_CTL_LIMIT_1X); | 1269 | dpfc_ctl |= (plane | DPFC_CTL_LIMIT_1X); |
1252 | if (obj_priv->tiling_mode != I915_TILING_NONE) { | 1270 | if (obj->tiling_mode != I915_TILING_NONE) { |
1253 | dpfc_ctl |= (DPFC_CTL_FENCE_EN | dev_priv->cfb_fence); | 1271 | dpfc_ctl |= (DPFC_CTL_FENCE_EN | dev_priv->cfb_fence); |
1254 | I915_WRITE(ILK_DPFC_CHICKEN, DPFC_HT_MODIFY); | 1272 | I915_WRITE(ILK_DPFC_CHICKEN, DPFC_HT_MODIFY); |
1255 | } else { | 1273 | } else { |
@@ -1260,10 +1278,17 @@ static void ironlake_enable_fbc(struct drm_crtc *crtc, unsigned long interval) | |||
1260 | (stall_watermark << DPFC_RECOMP_STALL_WM_SHIFT) | | 1278 | (stall_watermark << DPFC_RECOMP_STALL_WM_SHIFT) | |
1261 | (interval << DPFC_RECOMP_TIMER_COUNT_SHIFT)); | 1279 | (interval << DPFC_RECOMP_TIMER_COUNT_SHIFT)); |
1262 | I915_WRITE(ILK_DPFC_FENCE_YOFF, crtc->y); | 1280 | I915_WRITE(ILK_DPFC_FENCE_YOFF, crtc->y); |
1263 | I915_WRITE(ILK_FBC_RT_BASE, obj_priv->gtt_offset | ILK_FBC_RT_VALID); | 1281 | I915_WRITE(ILK_FBC_RT_BASE, obj->gtt_offset | ILK_FBC_RT_VALID); |
1264 | /* enable it... */ | 1282 | /* enable it... */ |
1265 | I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN); | 1283 | I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN); |
1266 | 1284 | ||
1285 | if (IS_GEN6(dev)) { | ||
1286 | I915_WRITE(SNB_DPFC_CTL_SA, | ||
1287 | SNB_CPU_FENCE_ENABLE | dev_priv->cfb_fence); | ||
1288 | I915_WRITE(DPFC_CPU_FENCE_OFFSET, crtc->y); | ||
1289 | sandybridge_blit_fbc_update(dev); | ||
1290 | } | ||
1291 | |||
1267 | DRM_DEBUG_KMS("enabled fbc on plane %d\n", intel_crtc->plane); | 1292 | DRM_DEBUG_KMS("enabled fbc on plane %d\n", intel_crtc->plane); |
1268 | } | 1293 | } |
1269 | 1294 | ||
@@ -1345,7 +1370,7 @@ static void intel_update_fbc(struct drm_device *dev) | |||
1345 | struct intel_crtc *intel_crtc; | 1370 | struct intel_crtc *intel_crtc; |
1346 | struct drm_framebuffer *fb; | 1371 | struct drm_framebuffer *fb; |
1347 | struct intel_framebuffer *intel_fb; | 1372 | struct intel_framebuffer *intel_fb; |
1348 | struct drm_i915_gem_object *obj_priv; | 1373 | struct drm_i915_gem_object *obj; |
1349 | 1374 | ||
1350 | DRM_DEBUG_KMS("\n"); | 1375 | DRM_DEBUG_KMS("\n"); |
1351 | 1376 | ||
@@ -1384,9 +1409,9 @@ static void intel_update_fbc(struct drm_device *dev) | |||
1384 | intel_crtc = to_intel_crtc(crtc); | 1409 | intel_crtc = to_intel_crtc(crtc); |
1385 | fb = crtc->fb; | 1410 | fb = crtc->fb; |
1386 | intel_fb = to_intel_framebuffer(fb); | 1411 | intel_fb = to_intel_framebuffer(fb); |
1387 | obj_priv = to_intel_bo(intel_fb->obj); | 1412 | obj = intel_fb->obj; |
1388 | 1413 | ||
1389 | if (intel_fb->obj->size > dev_priv->cfb_size) { | 1414 | if (intel_fb->obj->base.size > dev_priv->cfb_size) { |
1390 | DRM_DEBUG_KMS("framebuffer too large, disabling " | 1415 | DRM_DEBUG_KMS("framebuffer too large, disabling " |
1391 | "compression\n"); | 1416 | "compression\n"); |
1392 | dev_priv->no_fbc_reason = FBC_STOLEN_TOO_SMALL; | 1417 | dev_priv->no_fbc_reason = FBC_STOLEN_TOO_SMALL; |
@@ -1410,7 +1435,7 @@ static void intel_update_fbc(struct drm_device *dev) | |||
1410 | dev_priv->no_fbc_reason = FBC_BAD_PLANE; | 1435 | dev_priv->no_fbc_reason = FBC_BAD_PLANE; |
1411 | goto out_disable; | 1436 | goto out_disable; |
1412 | } | 1437 | } |
1413 | if (obj_priv->tiling_mode != I915_TILING_X) { | 1438 | if (obj->tiling_mode != I915_TILING_X) { |
1414 | DRM_DEBUG_KMS("framebuffer not tiled, disabling compression\n"); | 1439 | DRM_DEBUG_KMS("framebuffer not tiled, disabling compression\n"); |
1415 | dev_priv->no_fbc_reason = FBC_NOT_TILED; | 1440 | dev_priv->no_fbc_reason = FBC_NOT_TILED; |
1416 | goto out_disable; | 1441 | goto out_disable; |
@@ -1433,14 +1458,13 @@ out_disable: | |||
1433 | 1458 | ||
1434 | int | 1459 | int |
1435 | intel_pin_and_fence_fb_obj(struct drm_device *dev, | 1460 | intel_pin_and_fence_fb_obj(struct drm_device *dev, |
1436 | struct drm_gem_object *obj, | 1461 | struct drm_i915_gem_object *obj, |
1437 | bool pipelined) | 1462 | struct intel_ring_buffer *pipelined) |
1438 | { | 1463 | { |
1439 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); | ||
1440 | u32 alignment; | 1464 | u32 alignment; |
1441 | int ret; | 1465 | int ret; |
1442 | 1466 | ||
1443 | switch (obj_priv->tiling_mode) { | 1467 | switch (obj->tiling_mode) { |
1444 | case I915_TILING_NONE: | 1468 | case I915_TILING_NONE: |
1445 | if (IS_BROADWATER(dev) || IS_CRESTLINE(dev)) | 1469 | if (IS_BROADWATER(dev) || IS_CRESTLINE(dev)) |
1446 | alignment = 128 * 1024; | 1470 | alignment = 128 * 1024; |
@@ -1461,7 +1485,7 @@ intel_pin_and_fence_fb_obj(struct drm_device *dev, | |||
1461 | BUG(); | 1485 | BUG(); |
1462 | } | 1486 | } |
1463 | 1487 | ||
1464 | ret = i915_gem_object_pin(obj, alignment); | 1488 | ret = i915_gem_object_pin(obj, alignment, true); |
1465 | if (ret) | 1489 | if (ret) |
1466 | return ret; | 1490 | return ret; |
1467 | 1491 | ||
@@ -1474,9 +1498,8 @@ intel_pin_and_fence_fb_obj(struct drm_device *dev, | |||
1474 | * framebuffer compression. For simplicity, we always install | 1498 | * framebuffer compression. For simplicity, we always install |
1475 | * a fence as the cost is not that onerous. | 1499 | * a fence as the cost is not that onerous. |
1476 | */ | 1500 | */ |
1477 | if (obj_priv->fence_reg == I915_FENCE_REG_NONE && | 1501 | if (obj->tiling_mode != I915_TILING_NONE) { |
1478 | obj_priv->tiling_mode != I915_TILING_NONE) { | 1502 | ret = i915_gem_object_get_fence(obj, pipelined, false); |
1479 | ret = i915_gem_object_get_fence_reg(obj, false); | ||
1480 | if (ret) | 1503 | if (ret) |
1481 | goto err_unpin; | 1504 | goto err_unpin; |
1482 | } | 1505 | } |
@@ -1497,8 +1520,7 @@ intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb, | |||
1497 | struct drm_i915_private *dev_priv = dev->dev_private; | 1520 | struct drm_i915_private *dev_priv = dev->dev_private; |
1498 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | 1521 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
1499 | struct intel_framebuffer *intel_fb; | 1522 | struct intel_framebuffer *intel_fb; |
1500 | struct drm_i915_gem_object *obj_priv; | 1523 | struct drm_i915_gem_object *obj; |
1501 | struct drm_gem_object *obj; | ||
1502 | int plane = intel_crtc->plane; | 1524 | int plane = intel_crtc->plane; |
1503 | unsigned long Start, Offset; | 1525 | unsigned long Start, Offset; |
1504 | u32 dspcntr; | 1526 | u32 dspcntr; |
@@ -1515,7 +1537,6 @@ intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb, | |||
1515 | 1537 | ||
1516 | intel_fb = to_intel_framebuffer(fb); | 1538 | intel_fb = to_intel_framebuffer(fb); |
1517 | obj = intel_fb->obj; | 1539 | obj = intel_fb->obj; |
1518 | obj_priv = to_intel_bo(obj); | ||
1519 | 1540 | ||
1520 | reg = DSPCNTR(plane); | 1541 | reg = DSPCNTR(plane); |
1521 | dspcntr = I915_READ(reg); | 1542 | dspcntr = I915_READ(reg); |
@@ -1540,7 +1561,7 @@ intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb, | |||
1540 | return -EINVAL; | 1561 | return -EINVAL; |
1541 | } | 1562 | } |
1542 | if (INTEL_INFO(dev)->gen >= 4) { | 1563 | if (INTEL_INFO(dev)->gen >= 4) { |
1543 | if (obj_priv->tiling_mode != I915_TILING_NONE) | 1564 | if (obj->tiling_mode != I915_TILING_NONE) |
1544 | dspcntr |= DISPPLANE_TILED; | 1565 | dspcntr |= DISPPLANE_TILED; |
1545 | else | 1566 | else |
1546 | dspcntr &= ~DISPPLANE_TILED; | 1567 | dspcntr &= ~DISPPLANE_TILED; |
@@ -1552,7 +1573,7 @@ intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb, | |||
1552 | 1573 | ||
1553 | I915_WRITE(reg, dspcntr); | 1574 | I915_WRITE(reg, dspcntr); |
1554 | 1575 | ||
1555 | Start = obj_priv->gtt_offset; | 1576 | Start = obj->gtt_offset; |
1556 | Offset = y * fb->pitch + x * (fb->bits_per_pixel / 8); | 1577 | Offset = y * fb->pitch + x * (fb->bits_per_pixel / 8); |
1557 | 1578 | ||
1558 | DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d %d\n", | 1579 | DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d %d\n", |
@@ -1598,7 +1619,7 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y, | |||
1598 | mutex_lock(&dev->struct_mutex); | 1619 | mutex_lock(&dev->struct_mutex); |
1599 | ret = intel_pin_and_fence_fb_obj(dev, | 1620 | ret = intel_pin_and_fence_fb_obj(dev, |
1600 | to_intel_framebuffer(crtc->fb)->obj, | 1621 | to_intel_framebuffer(crtc->fb)->obj, |
1601 | false); | 1622 | NULL); |
1602 | if (ret != 0) { | 1623 | if (ret != 0) { |
1603 | mutex_unlock(&dev->struct_mutex); | 1624 | mutex_unlock(&dev->struct_mutex); |
1604 | return ret; | 1625 | return ret; |
@@ -1606,18 +1627,17 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y, | |||
1606 | 1627 | ||
1607 | if (old_fb) { | 1628 | if (old_fb) { |
1608 | struct drm_i915_private *dev_priv = dev->dev_private; | 1629 | struct drm_i915_private *dev_priv = dev->dev_private; |
1609 | struct drm_gem_object *obj = to_intel_framebuffer(old_fb)->obj; | 1630 | struct drm_i915_gem_object *obj = to_intel_framebuffer(old_fb)->obj; |
1610 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); | ||
1611 | 1631 | ||
1612 | wait_event(dev_priv->pending_flip_queue, | 1632 | wait_event(dev_priv->pending_flip_queue, |
1613 | atomic_read(&obj_priv->pending_flip) == 0); | 1633 | atomic_read(&obj->pending_flip) == 0); |
1614 | 1634 | ||
1615 | /* Big Hammer, we also need to ensure that any pending | 1635 | /* Big Hammer, we also need to ensure that any pending |
1616 | * MI_WAIT_FOR_EVENT inside a user batch buffer on the | 1636 | * MI_WAIT_FOR_EVENT inside a user batch buffer on the |
1617 | * current scanout is retired before unpinning the old | 1637 | * current scanout is retired before unpinning the old |
1618 | * framebuffer. | 1638 | * framebuffer. |
1619 | */ | 1639 | */ |
1620 | ret = i915_gem_object_flush_gpu(obj_priv, false); | 1640 | ret = i915_gem_object_flush_gpu(obj, false); |
1621 | if (ret) { | 1641 | if (ret) { |
1622 | i915_gem_object_unpin(to_intel_framebuffer(crtc->fb)->obj); | 1642 | i915_gem_object_unpin(to_intel_framebuffer(crtc->fb)->obj); |
1623 | mutex_unlock(&dev->struct_mutex); | 1643 | mutex_unlock(&dev->struct_mutex); |
@@ -1633,8 +1653,10 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y, | |||
1633 | return ret; | 1653 | return ret; |
1634 | } | 1654 | } |
1635 | 1655 | ||
1636 | if (old_fb) | 1656 | if (old_fb) { |
1657 | intel_wait_for_vblank(dev, intel_crtc->pipe); | ||
1637 | i915_gem_object_unpin(to_intel_framebuffer(old_fb)->obj); | 1658 | i915_gem_object_unpin(to_intel_framebuffer(old_fb)->obj); |
1659 | } | ||
1638 | 1660 | ||
1639 | mutex_unlock(&dev->struct_mutex); | 1661 | mutex_unlock(&dev->struct_mutex); |
1640 | 1662 | ||
@@ -1996,31 +2018,31 @@ static void intel_flush_display_plane(struct drm_device *dev, | |||
1996 | static void intel_clear_scanline_wait(struct drm_device *dev) | 2018 | static void intel_clear_scanline_wait(struct drm_device *dev) |
1997 | { | 2019 | { |
1998 | struct drm_i915_private *dev_priv = dev->dev_private; | 2020 | struct drm_i915_private *dev_priv = dev->dev_private; |
2021 | struct intel_ring_buffer *ring; | ||
1999 | u32 tmp; | 2022 | u32 tmp; |
2000 | 2023 | ||
2001 | if (IS_GEN2(dev)) | 2024 | if (IS_GEN2(dev)) |
2002 | /* Can't break the hang on i8xx */ | 2025 | /* Can't break the hang on i8xx */ |
2003 | return; | 2026 | return; |
2004 | 2027 | ||
2005 | tmp = I915_READ(PRB0_CTL); | 2028 | ring = LP_RING(dev_priv); |
2006 | if (tmp & RING_WAIT) { | 2029 | tmp = I915_READ_CTL(ring); |
2007 | I915_WRITE(PRB0_CTL, tmp); | 2030 | if (tmp & RING_WAIT) |
2008 | POSTING_READ(PRB0_CTL); | 2031 | I915_WRITE_CTL(ring, tmp); |
2009 | } | ||
2010 | } | 2032 | } |
2011 | 2033 | ||
2012 | static void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc) | 2034 | static void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc) |
2013 | { | 2035 | { |
2014 | struct drm_i915_gem_object *obj_priv; | 2036 | struct drm_i915_gem_object *obj; |
2015 | struct drm_i915_private *dev_priv; | 2037 | struct drm_i915_private *dev_priv; |
2016 | 2038 | ||
2017 | if (crtc->fb == NULL) | 2039 | if (crtc->fb == NULL) |
2018 | return; | 2040 | return; |
2019 | 2041 | ||
2020 | obj_priv = to_intel_bo(to_intel_framebuffer(crtc->fb)->obj); | 2042 | obj = to_intel_framebuffer(crtc->fb)->obj; |
2021 | dev_priv = crtc->dev->dev_private; | 2043 | dev_priv = crtc->dev->dev_private; |
2022 | wait_event(dev_priv->pending_flip_queue, | 2044 | wait_event(dev_priv->pending_flip_queue, |
2023 | atomic_read(&obj_priv->pending_flip) == 0); | 2045 | atomic_read(&obj->pending_flip) == 0); |
2024 | } | 2046 | } |
2025 | 2047 | ||
2026 | static void ironlake_crtc_enable(struct drm_crtc *crtc) | 2048 | static void ironlake_crtc_enable(struct drm_crtc *crtc) |
@@ -2120,9 +2142,11 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc) | |||
2120 | reg = TRANS_DP_CTL(pipe); | 2142 | reg = TRANS_DP_CTL(pipe); |
2121 | temp = I915_READ(reg); | 2143 | temp = I915_READ(reg); |
2122 | temp &= ~(TRANS_DP_PORT_SEL_MASK | | 2144 | temp &= ~(TRANS_DP_PORT_SEL_MASK | |
2123 | TRANS_DP_SYNC_MASK); | 2145 | TRANS_DP_SYNC_MASK | |
2146 | TRANS_DP_BPC_MASK); | ||
2124 | temp |= (TRANS_DP_OUTPUT_ENABLE | | 2147 | temp |= (TRANS_DP_OUTPUT_ENABLE | |
2125 | TRANS_DP_ENH_FRAMING); | 2148 | TRANS_DP_ENH_FRAMING); |
2149 | temp |= TRANS_DP_8BPC; | ||
2126 | 2150 | ||
2127 | if (crtc->mode.flags & DRM_MODE_FLAG_PHSYNC) | 2151 | if (crtc->mode.flags & DRM_MODE_FLAG_PHSYNC) |
2128 | temp |= TRANS_DP_HSYNC_ACTIVE_HIGH; | 2152 | temp |= TRANS_DP_HSYNC_ACTIVE_HIGH; |
@@ -2712,27 +2736,19 @@ fdi_reduce_ratio(u32 *num, u32 *den) | |||
2712 | } | 2736 | } |
2713 | } | 2737 | } |
2714 | 2738 | ||
2715 | #define DATA_N 0x800000 | ||
2716 | #define LINK_N 0x80000 | ||
2717 | |||
2718 | static void | 2739 | static void |
2719 | ironlake_compute_m_n(int bits_per_pixel, int nlanes, int pixel_clock, | 2740 | ironlake_compute_m_n(int bits_per_pixel, int nlanes, int pixel_clock, |
2720 | int link_clock, struct fdi_m_n *m_n) | 2741 | int link_clock, struct fdi_m_n *m_n) |
2721 | { | 2742 | { |
2722 | u64 temp; | ||
2723 | |||
2724 | m_n->tu = 64; /* default size */ | 2743 | m_n->tu = 64; /* default size */ |
2725 | 2744 | ||
2726 | temp = (u64) DATA_N * pixel_clock; | 2745 | /* BUG_ON(pixel_clock > INT_MAX / 36); */ |
2727 | temp = div_u64(temp, link_clock); | 2746 | m_n->gmch_m = bits_per_pixel * pixel_clock; |
2728 | m_n->gmch_m = div_u64(temp * bits_per_pixel, nlanes); | 2747 | m_n->gmch_n = link_clock * nlanes * 8; |
2729 | m_n->gmch_m >>= 3; /* convert to bytes_per_pixel */ | ||
2730 | m_n->gmch_n = DATA_N; | ||
2731 | fdi_reduce_ratio(&m_n->gmch_m, &m_n->gmch_n); | 2748 | fdi_reduce_ratio(&m_n->gmch_m, &m_n->gmch_n); |
2732 | 2749 | ||
2733 | temp = (u64) LINK_N * pixel_clock; | 2750 | m_n->link_m = pixel_clock; |
2734 | m_n->link_m = div_u64(temp, link_clock); | 2751 | m_n->link_n = link_clock; |
2735 | m_n->link_n = LINK_N; | ||
2736 | fdi_reduce_ratio(&m_n->link_m, &m_n->link_n); | 2752 | fdi_reduce_ratio(&m_n->link_m, &m_n->link_n); |
2737 | } | 2753 | } |
2738 | 2754 | ||
@@ -2856,6 +2872,39 @@ static struct intel_watermark_params ironlake_cursor_srwm_info = { | |||
2856 | ILK_FIFO_LINE_SIZE | 2872 | ILK_FIFO_LINE_SIZE |
2857 | }; | 2873 | }; |
2858 | 2874 | ||
2875 | static struct intel_watermark_params sandybridge_display_wm_info = { | ||
2876 | SNB_DISPLAY_FIFO, | ||
2877 | SNB_DISPLAY_MAXWM, | ||
2878 | SNB_DISPLAY_DFTWM, | ||
2879 | 2, | ||
2880 | SNB_FIFO_LINE_SIZE | ||
2881 | }; | ||
2882 | |||
2883 | static struct intel_watermark_params sandybridge_cursor_wm_info = { | ||
2884 | SNB_CURSOR_FIFO, | ||
2885 | SNB_CURSOR_MAXWM, | ||
2886 | SNB_CURSOR_DFTWM, | ||
2887 | 2, | ||
2888 | SNB_FIFO_LINE_SIZE | ||
2889 | }; | ||
2890 | |||
2891 | static struct intel_watermark_params sandybridge_display_srwm_info = { | ||
2892 | SNB_DISPLAY_SR_FIFO, | ||
2893 | SNB_DISPLAY_MAX_SRWM, | ||
2894 | SNB_DISPLAY_DFT_SRWM, | ||
2895 | 2, | ||
2896 | SNB_FIFO_LINE_SIZE | ||
2897 | }; | ||
2898 | |||
2899 | static struct intel_watermark_params sandybridge_cursor_srwm_info = { | ||
2900 | SNB_CURSOR_SR_FIFO, | ||
2901 | SNB_CURSOR_MAX_SRWM, | ||
2902 | SNB_CURSOR_DFT_SRWM, | ||
2903 | 2, | ||
2904 | SNB_FIFO_LINE_SIZE | ||
2905 | }; | ||
2906 | |||
2907 | |||
2859 | /** | 2908 | /** |
2860 | * intel_calculate_wm - calculate watermark level | 2909 | * intel_calculate_wm - calculate watermark level |
2861 | * @clock_in_khz: pixel clock | 2910 | * @clock_in_khz: pixel clock |
@@ -3389,12 +3438,17 @@ static void i830_update_wm(struct drm_device *dev, int planea_clock, int unused, | |||
3389 | 3438 | ||
3390 | static bool ironlake_compute_wm0(struct drm_device *dev, | 3439 | static bool ironlake_compute_wm0(struct drm_device *dev, |
3391 | int pipe, | 3440 | int pipe, |
3441 | const struct intel_watermark_params *display, | ||
3442 | int display_latency_ns, | ||
3443 | const struct intel_watermark_params *cursor, | ||
3444 | int cursor_latency_ns, | ||
3392 | int *plane_wm, | 3445 | int *plane_wm, |
3393 | int *cursor_wm) | 3446 | int *cursor_wm) |
3394 | { | 3447 | { |
3395 | struct drm_crtc *crtc; | 3448 | struct drm_crtc *crtc; |
3396 | int htotal, hdisplay, clock, pixel_size = 0; | 3449 | int htotal, hdisplay, clock, pixel_size; |
3397 | int line_time_us, line_count, entries; | 3450 | int line_time_us, line_count; |
3451 | int entries, tlb_miss; | ||
3398 | 3452 | ||
3399 | crtc = intel_get_crtc_for_pipe(dev, pipe); | 3453 | crtc = intel_get_crtc_for_pipe(dev, pipe); |
3400 | if (crtc->fb == NULL || !crtc->enabled) | 3454 | if (crtc->fb == NULL || !crtc->enabled) |
@@ -3406,37 +3460,141 @@ static bool ironlake_compute_wm0(struct drm_device *dev, | |||
3406 | pixel_size = crtc->fb->bits_per_pixel / 8; | 3460 | pixel_size = crtc->fb->bits_per_pixel / 8; |
3407 | 3461 | ||
3408 | /* Use the small buffer method to calculate plane watermark */ | 3462 | /* Use the small buffer method to calculate plane watermark */ |
3409 | entries = ((clock * pixel_size / 1000) * ILK_LP0_PLANE_LATENCY) / 1000; | 3463 | entries = ((clock * pixel_size / 1000) * display_latency_ns) / 1000; |
3410 | entries = DIV_ROUND_UP(entries, | 3464 | tlb_miss = display->fifo_size*display->cacheline_size - hdisplay * 8; |
3411 | ironlake_display_wm_info.cacheline_size); | 3465 | if (tlb_miss > 0) |
3412 | *plane_wm = entries + ironlake_display_wm_info.guard_size; | 3466 | entries += tlb_miss; |
3413 | if (*plane_wm > (int)ironlake_display_wm_info.max_wm) | 3467 | entries = DIV_ROUND_UP(entries, display->cacheline_size); |
3414 | *plane_wm = ironlake_display_wm_info.max_wm; | 3468 | *plane_wm = entries + display->guard_size; |
3469 | if (*plane_wm > (int)display->max_wm) | ||
3470 | *plane_wm = display->max_wm; | ||
3415 | 3471 | ||
3416 | /* Use the large buffer method to calculate cursor watermark */ | 3472 | /* Use the large buffer method to calculate cursor watermark */ |
3417 | line_time_us = ((htotal * 1000) / clock); | 3473 | line_time_us = ((htotal * 1000) / clock); |
3418 | line_count = (ILK_LP0_CURSOR_LATENCY / line_time_us + 1000) / 1000; | 3474 | line_count = (cursor_latency_ns / line_time_us + 1000) / 1000; |
3419 | entries = line_count * 64 * pixel_size; | 3475 | entries = line_count * 64 * pixel_size; |
3420 | entries = DIV_ROUND_UP(entries, | 3476 | tlb_miss = cursor->fifo_size*cursor->cacheline_size - hdisplay * 8; |
3421 | ironlake_cursor_wm_info.cacheline_size); | 3477 | if (tlb_miss > 0) |
3422 | *cursor_wm = entries + ironlake_cursor_wm_info.guard_size; | 3478 | entries += tlb_miss; |
3423 | if (*cursor_wm > ironlake_cursor_wm_info.max_wm) | 3479 | entries = DIV_ROUND_UP(entries, cursor->cacheline_size); |
3424 | *cursor_wm = ironlake_cursor_wm_info.max_wm; | 3480 | *cursor_wm = entries + cursor->guard_size; |
3481 | if (*cursor_wm > (int)cursor->max_wm) | ||
3482 | *cursor_wm = (int)cursor->max_wm; | ||
3483 | |||
3484 | return true; | ||
3485 | } | ||
3486 | |||
3487 | /* | ||
3488 | * Check the wm result. | ||
3489 | * | ||
3490 | * If any calculated watermark values is larger than the maximum value that | ||
3491 | * can be programmed into the associated watermark register, that watermark | ||
3492 | * must be disabled. | ||
3493 | */ | ||
3494 | static bool ironlake_check_srwm(struct drm_device *dev, int level, | ||
3495 | int fbc_wm, int display_wm, int cursor_wm, | ||
3496 | const struct intel_watermark_params *display, | ||
3497 | const struct intel_watermark_params *cursor) | ||
3498 | { | ||
3499 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
3500 | |||
3501 | DRM_DEBUG_KMS("watermark %d: display plane %d, fbc lines %d," | ||
3502 | " cursor %d\n", level, display_wm, fbc_wm, cursor_wm); | ||
3503 | |||
3504 | if (fbc_wm > SNB_FBC_MAX_SRWM) { | ||
3505 | DRM_DEBUG_KMS("fbc watermark(%d) is too large(%d), disabling wm%d+\n", | ||
3506 | fbc_wm, SNB_FBC_MAX_SRWM, level); | ||
3507 | |||
3508 | /* fbc has it's own way to disable FBC WM */ | ||
3509 | I915_WRITE(DISP_ARB_CTL, | ||
3510 | I915_READ(DISP_ARB_CTL) | DISP_FBC_WM_DIS); | ||
3511 | return false; | ||
3512 | } | ||
3513 | |||
3514 | if (display_wm > display->max_wm) { | ||
3515 | DRM_DEBUG_KMS("display watermark(%d) is too large(%d), disabling wm%d+\n", | ||
3516 | display_wm, SNB_DISPLAY_MAX_SRWM, level); | ||
3517 | return false; | ||
3518 | } | ||
3519 | |||
3520 | if (cursor_wm > cursor->max_wm) { | ||
3521 | DRM_DEBUG_KMS("cursor watermark(%d) is too large(%d), disabling wm%d+\n", | ||
3522 | cursor_wm, SNB_CURSOR_MAX_SRWM, level); | ||
3523 | return false; | ||
3524 | } | ||
3525 | |||
3526 | if (!(fbc_wm || display_wm || cursor_wm)) { | ||
3527 | DRM_DEBUG_KMS("latency %d is 0, disabling wm%d+\n", level, level); | ||
3528 | return false; | ||
3529 | } | ||
3425 | 3530 | ||
3426 | return true; | 3531 | return true; |
3427 | } | 3532 | } |
3428 | 3533 | ||
3534 | /* | ||
3535 | * Compute watermark values of WM[1-3], | ||
3536 | */ | ||
3537 | static bool ironlake_compute_srwm(struct drm_device *dev, int level, | ||
3538 | int hdisplay, int htotal, | ||
3539 | int pixel_size, int clock, int latency_ns, | ||
3540 | const struct intel_watermark_params *display, | ||
3541 | const struct intel_watermark_params *cursor, | ||
3542 | int *fbc_wm, int *display_wm, int *cursor_wm) | ||
3543 | { | ||
3544 | |||
3545 | unsigned long line_time_us; | ||
3546 | int line_count, line_size; | ||
3547 | int small, large; | ||
3548 | int entries; | ||
3549 | |||
3550 | if (!latency_ns) { | ||
3551 | *fbc_wm = *display_wm = *cursor_wm = 0; | ||
3552 | return false; | ||
3553 | } | ||
3554 | |||
3555 | line_time_us = (htotal * 1000) / clock; | ||
3556 | line_count = (latency_ns / line_time_us + 1000) / 1000; | ||
3557 | line_size = hdisplay * pixel_size; | ||
3558 | |||
3559 | /* Use the minimum of the small and large buffer method for primary */ | ||
3560 | small = ((clock * pixel_size / 1000) * latency_ns) / 1000; | ||
3561 | large = line_count * line_size; | ||
3562 | |||
3563 | entries = DIV_ROUND_UP(min(small, large), display->cacheline_size); | ||
3564 | *display_wm = entries + display->guard_size; | ||
3565 | |||
3566 | /* | ||
3567 | * Spec says: | ||
3568 | * FBC WM = ((Final Primary WM * 64) / number of bytes per line) + 2 | ||
3569 | */ | ||
3570 | *fbc_wm = DIV_ROUND_UP(*display_wm * 64, line_size) + 2; | ||
3571 | |||
3572 | /* calculate the self-refresh watermark for display cursor */ | ||
3573 | entries = line_count * pixel_size * 64; | ||
3574 | entries = DIV_ROUND_UP(entries, cursor->cacheline_size); | ||
3575 | *cursor_wm = entries + cursor->guard_size; | ||
3576 | |||
3577 | return ironlake_check_srwm(dev, level, | ||
3578 | *fbc_wm, *display_wm, *cursor_wm, | ||
3579 | display, cursor); | ||
3580 | } | ||
3581 | |||
3429 | static void ironlake_update_wm(struct drm_device *dev, | 3582 | static void ironlake_update_wm(struct drm_device *dev, |
3430 | int planea_clock, int planeb_clock, | 3583 | int planea_clock, int planeb_clock, |
3431 | int sr_hdisplay, int sr_htotal, | 3584 | int hdisplay, int htotal, |
3432 | int pixel_size) | 3585 | int pixel_size) |
3433 | { | 3586 | { |
3434 | struct drm_i915_private *dev_priv = dev->dev_private; | 3587 | struct drm_i915_private *dev_priv = dev->dev_private; |
3435 | int plane_wm, cursor_wm, enabled; | 3588 | int fbc_wm, plane_wm, cursor_wm, enabled; |
3436 | int tmp; | 3589 | int clock; |
3437 | 3590 | ||
3438 | enabled = 0; | 3591 | enabled = 0; |
3439 | if (ironlake_compute_wm0(dev, 0, &plane_wm, &cursor_wm)) { | 3592 | if (ironlake_compute_wm0(dev, 0, |
3593 | &ironlake_display_wm_info, | ||
3594 | ILK_LP0_PLANE_LATENCY, | ||
3595 | &ironlake_cursor_wm_info, | ||
3596 | ILK_LP0_CURSOR_LATENCY, | ||
3597 | &plane_wm, &cursor_wm)) { | ||
3440 | I915_WRITE(WM0_PIPEA_ILK, | 3598 | I915_WRITE(WM0_PIPEA_ILK, |
3441 | (plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm); | 3599 | (plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm); |
3442 | DRM_DEBUG_KMS("FIFO watermarks For pipe A -" | 3600 | DRM_DEBUG_KMS("FIFO watermarks For pipe A -" |
@@ -3445,7 +3603,12 @@ static void ironlake_update_wm(struct drm_device *dev, | |||
3445 | enabled++; | 3603 | enabled++; |
3446 | } | 3604 | } |
3447 | 3605 | ||
3448 | if (ironlake_compute_wm0(dev, 1, &plane_wm, &cursor_wm)) { | 3606 | if (ironlake_compute_wm0(dev, 1, |
3607 | &ironlake_display_wm_info, | ||
3608 | ILK_LP0_PLANE_LATENCY, | ||
3609 | &ironlake_cursor_wm_info, | ||
3610 | ILK_LP0_CURSOR_LATENCY, | ||
3611 | &plane_wm, &cursor_wm)) { | ||
3449 | I915_WRITE(WM0_PIPEB_ILK, | 3612 | I915_WRITE(WM0_PIPEB_ILK, |
3450 | (plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm); | 3613 | (plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm); |
3451 | DRM_DEBUG_KMS("FIFO watermarks For pipe B -" | 3614 | DRM_DEBUG_KMS("FIFO watermarks For pipe B -" |
@@ -3458,57 +3621,151 @@ static void ironlake_update_wm(struct drm_device *dev, | |||
3458 | * Calculate and update the self-refresh watermark only when one | 3621 | * Calculate and update the self-refresh watermark only when one |
3459 | * display plane is used. | 3622 | * display plane is used. |
3460 | */ | 3623 | */ |
3461 | tmp = 0; | 3624 | I915_WRITE(WM3_LP_ILK, 0); |
3462 | if (enabled == 1 && /* XXX disabled due to buggy implmentation? */ 0) { | 3625 | I915_WRITE(WM2_LP_ILK, 0); |
3463 | unsigned long line_time_us; | 3626 | I915_WRITE(WM1_LP_ILK, 0); |
3464 | int small, large, plane_fbc; | ||
3465 | int sr_clock, entries; | ||
3466 | int line_count, line_size; | ||
3467 | /* Read the self-refresh latency. The unit is 0.5us */ | ||
3468 | int ilk_sr_latency = I915_READ(MLTR_ILK) & ILK_SRLT_MASK; | ||
3469 | 3627 | ||
3470 | sr_clock = planea_clock ? planea_clock : planeb_clock; | 3628 | if (enabled != 1) |
3471 | line_time_us = (sr_htotal * 1000) / sr_clock; | 3629 | return; |
3472 | 3630 | ||
3473 | /* Use ns/us then divide to preserve precision */ | 3631 | clock = planea_clock ? planea_clock : planeb_clock; |
3474 | line_count = ((ilk_sr_latency * 500) / line_time_us + 1000) | ||
3475 | / 1000; | ||
3476 | line_size = sr_hdisplay * pixel_size; | ||
3477 | 3632 | ||
3478 | /* Use the minimum of the small and large buffer method for primary */ | 3633 | /* WM1 */ |
3479 | small = ((sr_clock * pixel_size / 1000) * (ilk_sr_latency * 500)) / 1000; | 3634 | if (!ironlake_compute_srwm(dev, 1, hdisplay, htotal, pixel_size, |
3480 | large = line_count * line_size; | 3635 | clock, ILK_READ_WM1_LATENCY() * 500, |
3636 | &ironlake_display_srwm_info, | ||
3637 | &ironlake_cursor_srwm_info, | ||
3638 | &fbc_wm, &plane_wm, &cursor_wm)) | ||
3639 | return; | ||
3481 | 3640 | ||
3482 | entries = DIV_ROUND_UP(min(small, large), | 3641 | I915_WRITE(WM1_LP_ILK, |
3483 | ironlake_display_srwm_info.cacheline_size); | 3642 | WM1_LP_SR_EN | |
3643 | (ILK_READ_WM1_LATENCY() << WM1_LP_LATENCY_SHIFT) | | ||
3644 | (fbc_wm << WM1_LP_FBC_SHIFT) | | ||
3645 | (plane_wm << WM1_LP_SR_SHIFT) | | ||
3646 | cursor_wm); | ||
3647 | |||
3648 | /* WM2 */ | ||
3649 | if (!ironlake_compute_srwm(dev, 2, hdisplay, htotal, pixel_size, | ||
3650 | clock, ILK_READ_WM2_LATENCY() * 500, | ||
3651 | &ironlake_display_srwm_info, | ||
3652 | &ironlake_cursor_srwm_info, | ||
3653 | &fbc_wm, &plane_wm, &cursor_wm)) | ||
3654 | return; | ||
3484 | 3655 | ||
3485 | plane_fbc = entries * 64; | 3656 | I915_WRITE(WM2_LP_ILK, |
3486 | plane_fbc = DIV_ROUND_UP(plane_fbc, line_size); | 3657 | WM2_LP_EN | |
3658 | (ILK_READ_WM2_LATENCY() << WM1_LP_LATENCY_SHIFT) | | ||
3659 | (fbc_wm << WM1_LP_FBC_SHIFT) | | ||
3660 | (plane_wm << WM1_LP_SR_SHIFT) | | ||
3661 | cursor_wm); | ||
3487 | 3662 | ||
3488 | plane_wm = entries + ironlake_display_srwm_info.guard_size; | 3663 | /* |
3489 | if (plane_wm > (int)ironlake_display_srwm_info.max_wm) | 3664 | * WM3 is unsupported on ILK, probably because we don't have latency |
3490 | plane_wm = ironlake_display_srwm_info.max_wm; | 3665 | * data for that power state |
3666 | */ | ||
3667 | } | ||
3491 | 3668 | ||
3492 | /* calculate the self-refresh watermark for display cursor */ | 3669 | static void sandybridge_update_wm(struct drm_device *dev, |
3493 | entries = line_count * pixel_size * 64; | 3670 | int planea_clock, int planeb_clock, |
3494 | entries = DIV_ROUND_UP(entries, | 3671 | int hdisplay, int htotal, |
3495 | ironlake_cursor_srwm_info.cacheline_size); | 3672 | int pixel_size) |
3673 | { | ||
3674 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
3675 | int latency = SNB_READ_WM0_LATENCY() * 100; /* In unit 0.1us */ | ||
3676 | int fbc_wm, plane_wm, cursor_wm, enabled; | ||
3677 | int clock; | ||
3496 | 3678 | ||
3497 | cursor_wm = entries + ironlake_cursor_srwm_info.guard_size; | 3679 | enabled = 0; |
3498 | if (cursor_wm > (int)ironlake_cursor_srwm_info.max_wm) | 3680 | if (ironlake_compute_wm0(dev, 0, |
3499 | cursor_wm = ironlake_cursor_srwm_info.max_wm; | 3681 | &sandybridge_display_wm_info, latency, |
3682 | &sandybridge_cursor_wm_info, latency, | ||
3683 | &plane_wm, &cursor_wm)) { | ||
3684 | I915_WRITE(WM0_PIPEA_ILK, | ||
3685 | (plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm); | ||
3686 | DRM_DEBUG_KMS("FIFO watermarks For pipe A -" | ||
3687 | " plane %d, " "cursor: %d\n", | ||
3688 | plane_wm, cursor_wm); | ||
3689 | enabled++; | ||
3690 | } | ||
3500 | 3691 | ||
3501 | /* configure watermark and enable self-refresh */ | 3692 | if (ironlake_compute_wm0(dev, 1, |
3502 | tmp = (WM1_LP_SR_EN | | 3693 | &sandybridge_display_wm_info, latency, |
3503 | (ilk_sr_latency << WM1_LP_LATENCY_SHIFT) | | 3694 | &sandybridge_cursor_wm_info, latency, |
3504 | (plane_fbc << WM1_LP_FBC_SHIFT) | | 3695 | &plane_wm, &cursor_wm)) { |
3505 | (plane_wm << WM1_LP_SR_SHIFT) | | 3696 | I915_WRITE(WM0_PIPEB_ILK, |
3506 | cursor_wm); | 3697 | (plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm); |
3507 | DRM_DEBUG_KMS("self-refresh watermark: display plane %d, fbc lines %d," | 3698 | DRM_DEBUG_KMS("FIFO watermarks For pipe B -" |
3508 | " cursor %d\n", plane_wm, plane_fbc, cursor_wm); | 3699 | " plane %d, cursor: %d\n", |
3700 | plane_wm, cursor_wm); | ||
3701 | enabled++; | ||
3509 | } | 3702 | } |
3510 | I915_WRITE(WM1_LP_ILK, tmp); | 3703 | |
3511 | /* XXX setup WM2 and WM3 */ | 3704 | /* |
3705 | * Calculate and update the self-refresh watermark only when one | ||
3706 | * display plane is used. | ||
3707 | * | ||
3708 | * SNB support 3 levels of watermark. | ||
3709 | * | ||
3710 | * WM1/WM2/WM2 watermarks have to be enabled in the ascending order, | ||
3711 | * and disabled in the descending order | ||
3712 | * | ||
3713 | */ | ||
3714 | I915_WRITE(WM3_LP_ILK, 0); | ||
3715 | I915_WRITE(WM2_LP_ILK, 0); | ||
3716 | I915_WRITE(WM1_LP_ILK, 0); | ||
3717 | |||
3718 | if (enabled != 1) | ||
3719 | return; | ||
3720 | |||
3721 | clock = planea_clock ? planea_clock : planeb_clock; | ||
3722 | |||
3723 | /* WM1 */ | ||
3724 | if (!ironlake_compute_srwm(dev, 1, hdisplay, htotal, pixel_size, | ||
3725 | clock, SNB_READ_WM1_LATENCY() * 500, | ||
3726 | &sandybridge_display_srwm_info, | ||
3727 | &sandybridge_cursor_srwm_info, | ||
3728 | &fbc_wm, &plane_wm, &cursor_wm)) | ||
3729 | return; | ||
3730 | |||
3731 | I915_WRITE(WM1_LP_ILK, | ||
3732 | WM1_LP_SR_EN | | ||
3733 | (SNB_READ_WM1_LATENCY() << WM1_LP_LATENCY_SHIFT) | | ||
3734 | (fbc_wm << WM1_LP_FBC_SHIFT) | | ||
3735 | (plane_wm << WM1_LP_SR_SHIFT) | | ||
3736 | cursor_wm); | ||
3737 | |||
3738 | /* WM2 */ | ||
3739 | if (!ironlake_compute_srwm(dev, 2, | ||
3740 | hdisplay, htotal, pixel_size, | ||
3741 | clock, SNB_READ_WM2_LATENCY() * 500, | ||
3742 | &sandybridge_display_srwm_info, | ||
3743 | &sandybridge_cursor_srwm_info, | ||
3744 | &fbc_wm, &plane_wm, &cursor_wm)) | ||
3745 | return; | ||
3746 | |||
3747 | I915_WRITE(WM2_LP_ILK, | ||
3748 | WM2_LP_EN | | ||
3749 | (SNB_READ_WM2_LATENCY() << WM1_LP_LATENCY_SHIFT) | | ||
3750 | (fbc_wm << WM1_LP_FBC_SHIFT) | | ||
3751 | (plane_wm << WM1_LP_SR_SHIFT) | | ||
3752 | cursor_wm); | ||
3753 | |||
3754 | /* WM3 */ | ||
3755 | if (!ironlake_compute_srwm(dev, 3, | ||
3756 | hdisplay, htotal, pixel_size, | ||
3757 | clock, SNB_READ_WM3_LATENCY() * 500, | ||
3758 | &sandybridge_display_srwm_info, | ||
3759 | &sandybridge_cursor_srwm_info, | ||
3760 | &fbc_wm, &plane_wm, &cursor_wm)) | ||
3761 | return; | ||
3762 | |||
3763 | I915_WRITE(WM3_LP_ILK, | ||
3764 | WM3_LP_EN | | ||
3765 | (SNB_READ_WM3_LATENCY() << WM1_LP_LATENCY_SHIFT) | | ||
3766 | (fbc_wm << WM1_LP_FBC_SHIFT) | | ||
3767 | (plane_wm << WM1_LP_SR_SHIFT) | | ||
3768 | cursor_wm); | ||
3512 | } | 3769 | } |
3513 | 3770 | ||
3514 | /** | 3771 | /** |
@@ -3586,6 +3843,11 @@ static void intel_update_watermarks(struct drm_device *dev) | |||
3586 | sr_hdisplay, sr_htotal, pixel_size); | 3843 | sr_hdisplay, sr_htotal, pixel_size); |
3587 | } | 3844 | } |
3588 | 3845 | ||
3846 | static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv) | ||
3847 | { | ||
3848 | return dev_priv->lvds_use_ssc && i915_panel_use_ssc; | ||
3849 | } | ||
3850 | |||
3589 | static int intel_crtc_mode_set(struct drm_crtc *crtc, | 3851 | static int intel_crtc_mode_set(struct drm_crtc *crtc, |
3590 | struct drm_display_mode *mode, | 3852 | struct drm_display_mode *mode, |
3591 | struct drm_display_mode *adjusted_mode, | 3853 | struct drm_display_mode *adjusted_mode, |
@@ -3648,7 +3910,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, | |||
3648 | num_connectors++; | 3910 | num_connectors++; |
3649 | } | 3911 | } |
3650 | 3912 | ||
3651 | if (is_lvds && dev_priv->lvds_use_ssc && num_connectors < 2) { | 3913 | if (is_lvds && intel_panel_use_ssc(dev_priv) && num_connectors < 2) { |
3652 | refclk = dev_priv->lvds_ssc_freq * 1000; | 3914 | refclk = dev_priv->lvds_ssc_freq * 1000; |
3653 | DRM_DEBUG_KMS("using SSC reference clock of %d MHz\n", | 3915 | DRM_DEBUG_KMS("using SSC reference clock of %d MHz\n", |
3654 | refclk / 1000); | 3916 | refclk / 1000); |
@@ -3666,7 +3928,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, | |||
3666 | * refclk, or FALSE. The returned values represent the clock equation: | 3928 | * refclk, or FALSE. The returned values represent the clock equation: |
3667 | * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2. | 3929 | * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2. |
3668 | */ | 3930 | */ |
3669 | limit = intel_limit(crtc); | 3931 | limit = intel_limit(crtc, refclk); |
3670 | ok = limit->find_pll(limit, crtc, adjusted_mode->clock, refclk, &clock); | 3932 | ok = limit->find_pll(limit, crtc, adjusted_mode->clock, refclk, &clock); |
3671 | if (!ok) { | 3933 | if (!ok) { |
3672 | DRM_ERROR("Couldn't find PLL settings for mode!\n"); | 3934 | DRM_ERROR("Couldn't find PLL settings for mode!\n"); |
@@ -3716,10 +3978,11 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, | |||
3716 | 3978 | ||
3717 | /* FDI link */ | 3979 | /* FDI link */ |
3718 | if (HAS_PCH_SPLIT(dev)) { | 3980 | if (HAS_PCH_SPLIT(dev)) { |
3981 | int pixel_multiplier = intel_mode_get_pixel_multiplier(adjusted_mode); | ||
3719 | int lane = 0, link_bw, bpp; | 3982 | int lane = 0, link_bw, bpp; |
3720 | /* CPU eDP doesn't require FDI link, so just set DP M/N | 3983 | /* CPU eDP doesn't require FDI link, so just set DP M/N |
3721 | according to current link config */ | 3984 | according to current link config */ |
3722 | if (has_edp_encoder && !intel_encoder_is_pch_edp(&encoder->base)) { | 3985 | if (has_edp_encoder && !intel_encoder_is_pch_edp(&has_edp_encoder->base)) { |
3723 | target_clock = mode->clock; | 3986 | target_clock = mode->clock; |
3724 | intel_edp_link_config(has_edp_encoder, | 3987 | intel_edp_link_config(has_edp_encoder, |
3725 | &lane, &link_bw); | 3988 | &lane, &link_bw); |
@@ -3799,6 +4062,8 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, | |||
3799 | 4062 | ||
3800 | intel_crtc->fdi_lanes = lane; | 4063 | intel_crtc->fdi_lanes = lane; |
3801 | 4064 | ||
4065 | if (pixel_multiplier > 1) | ||
4066 | link_bw *= pixel_multiplier; | ||
3802 | ironlake_compute_m_n(bpp, lane, target_clock, link_bw, &m_n); | 4067 | ironlake_compute_m_n(bpp, lane, target_clock, link_bw, &m_n); |
3803 | } | 4068 | } |
3804 | 4069 | ||
@@ -3820,7 +4085,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, | |||
3820 | udelay(200); | 4085 | udelay(200); |
3821 | 4086 | ||
3822 | if (has_edp_encoder) { | 4087 | if (has_edp_encoder) { |
3823 | if (dev_priv->lvds_use_ssc) { | 4088 | if (intel_panel_use_ssc(dev_priv)) { |
3824 | temp |= DREF_SSC1_ENABLE; | 4089 | temp |= DREF_SSC1_ENABLE; |
3825 | I915_WRITE(PCH_DREF_CONTROL, temp); | 4090 | I915_WRITE(PCH_DREF_CONTROL, temp); |
3826 | 4091 | ||
@@ -3831,13 +4096,13 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, | |||
3831 | 4096 | ||
3832 | /* Enable CPU source on CPU attached eDP */ | 4097 | /* Enable CPU source on CPU attached eDP */ |
3833 | if (!intel_encoder_is_pch_edp(&has_edp_encoder->base)) { | 4098 | if (!intel_encoder_is_pch_edp(&has_edp_encoder->base)) { |
3834 | if (dev_priv->lvds_use_ssc) | 4099 | if (intel_panel_use_ssc(dev_priv)) |
3835 | temp |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD; | 4100 | temp |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD; |
3836 | else | 4101 | else |
3837 | temp |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD; | 4102 | temp |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD; |
3838 | } else { | 4103 | } else { |
3839 | /* Enable SSC on PCH eDP if needed */ | 4104 | /* Enable SSC on PCH eDP if needed */ |
3840 | if (dev_priv->lvds_use_ssc) { | 4105 | if (intel_panel_use_ssc(dev_priv)) { |
3841 | DRM_ERROR("enabling SSC on PCH\n"); | 4106 | DRM_ERROR("enabling SSC on PCH\n"); |
3842 | temp |= DREF_SUPERSPREAD_SOURCE_ENABLE; | 4107 | temp |= DREF_SUPERSPREAD_SOURCE_ENABLE; |
3843 | } | 4108 | } |
@@ -3860,6 +4125,22 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, | |||
3860 | reduced_clock.m2; | 4125 | reduced_clock.m2; |
3861 | } | 4126 | } |
3862 | 4127 | ||
4128 | /* Enable autotuning of the PLL clock (if permissible) */ | ||
4129 | if (HAS_PCH_SPLIT(dev)) { | ||
4130 | int factor = 21; | ||
4131 | |||
4132 | if (is_lvds) { | ||
4133 | if ((intel_panel_use_ssc(dev_priv) && | ||
4134 | dev_priv->lvds_ssc_freq == 100) || | ||
4135 | (I915_READ(PCH_LVDS) & LVDS_CLKB_POWER_MASK) == LVDS_CLKB_POWER_UP) | ||
4136 | factor = 25; | ||
4137 | } else if (is_sdvo && is_tv) | ||
4138 | factor = 20; | ||
4139 | |||
4140 | if (clock.m1 < factor * clock.n) | ||
4141 | fp |= FP_CB_TUNE; | ||
4142 | } | ||
4143 | |||
3863 | dpll = 0; | 4144 | dpll = 0; |
3864 | if (!HAS_PCH_SPLIT(dev)) | 4145 | if (!HAS_PCH_SPLIT(dev)) |
3865 | dpll = DPLL_VGA_MODE_DIS; | 4146 | dpll = DPLL_VGA_MODE_DIS; |
@@ -3928,7 +4209,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, | |||
3928 | /* XXX: just matching BIOS for now */ | 4209 | /* XXX: just matching BIOS for now */ |
3929 | /* dpll |= PLL_REF_INPUT_TVCLKINBC; */ | 4210 | /* dpll |= PLL_REF_INPUT_TVCLKINBC; */ |
3930 | dpll |= 3; | 4211 | dpll |= 3; |
3931 | else if (is_lvds && dev_priv->lvds_use_ssc && num_connectors < 2) | 4212 | else if (is_lvds && intel_panel_use_ssc(dev_priv) && num_connectors < 2) |
3932 | dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN; | 4213 | dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN; |
3933 | else | 4214 | else |
3934 | dpll |= PLL_REF_INPUT_DREFCLK; | 4215 | dpll |= PLL_REF_INPUT_DREFCLK; |
@@ -4074,7 +4355,6 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, | |||
4074 | } | 4355 | } |
4075 | 4356 | ||
4076 | if (!has_edp_encoder || intel_encoder_is_pch_edp(&has_edp_encoder->base)) { | 4357 | if (!has_edp_encoder || intel_encoder_is_pch_edp(&has_edp_encoder->base)) { |
4077 | I915_WRITE(fp_reg, fp); | ||
4078 | I915_WRITE(dpll_reg, dpll); | 4358 | I915_WRITE(dpll_reg, dpll); |
4079 | 4359 | ||
4080 | /* Wait for the clocks to stabilize. */ | 4360 | /* Wait for the clocks to stabilize. */ |
@@ -4092,13 +4372,13 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, | |||
4092 | } | 4372 | } |
4093 | I915_WRITE(DPLL_MD(pipe), temp); | 4373 | I915_WRITE(DPLL_MD(pipe), temp); |
4094 | } else { | 4374 | } else { |
4095 | /* write it again -- the BIOS does, after all */ | 4375 | /* The pixel multiplier can only be updated once the |
4376 | * DPLL is enabled and the clocks are stable. | ||
4377 | * | ||
4378 | * So write it again. | ||
4379 | */ | ||
4096 | I915_WRITE(dpll_reg, dpll); | 4380 | I915_WRITE(dpll_reg, dpll); |
4097 | } | 4381 | } |
4098 | |||
4099 | /* Wait for the clocks to stabilize. */ | ||
4100 | POSTING_READ(dpll_reg); | ||
4101 | udelay(150); | ||
4102 | } | 4382 | } |
4103 | 4383 | ||
4104 | intel_crtc->lowfreq_avail = false; | 4384 | intel_crtc->lowfreq_avail = false; |
@@ -4334,15 +4614,14 @@ static void intel_crtc_update_cursor(struct drm_crtc *crtc, | |||
4334 | } | 4614 | } |
4335 | 4615 | ||
4336 | static int intel_crtc_cursor_set(struct drm_crtc *crtc, | 4616 | static int intel_crtc_cursor_set(struct drm_crtc *crtc, |
4337 | struct drm_file *file_priv, | 4617 | struct drm_file *file, |
4338 | uint32_t handle, | 4618 | uint32_t handle, |
4339 | uint32_t width, uint32_t height) | 4619 | uint32_t width, uint32_t height) |
4340 | { | 4620 | { |
4341 | struct drm_device *dev = crtc->dev; | 4621 | struct drm_device *dev = crtc->dev; |
4342 | struct drm_i915_private *dev_priv = dev->dev_private; | 4622 | struct drm_i915_private *dev_priv = dev->dev_private; |
4343 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | 4623 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
4344 | struct drm_gem_object *bo; | 4624 | struct drm_i915_gem_object *obj; |
4345 | struct drm_i915_gem_object *obj_priv; | ||
4346 | uint32_t addr; | 4625 | uint32_t addr; |
4347 | int ret; | 4626 | int ret; |
4348 | 4627 | ||
@@ -4352,7 +4631,7 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc, | |||
4352 | if (!handle) { | 4631 | if (!handle) { |
4353 | DRM_DEBUG_KMS("cursor off\n"); | 4632 | DRM_DEBUG_KMS("cursor off\n"); |
4354 | addr = 0; | 4633 | addr = 0; |
4355 | bo = NULL; | 4634 | obj = NULL; |
4356 | mutex_lock(&dev->struct_mutex); | 4635 | mutex_lock(&dev->struct_mutex); |
4357 | goto finish; | 4636 | goto finish; |
4358 | } | 4637 | } |
@@ -4363,13 +4642,11 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc, | |||
4363 | return -EINVAL; | 4642 | return -EINVAL; |
4364 | } | 4643 | } |
4365 | 4644 | ||
4366 | bo = drm_gem_object_lookup(dev, file_priv, handle); | 4645 | obj = to_intel_bo(drm_gem_object_lookup(dev, file, handle)); |
4367 | if (!bo) | 4646 | if (!obj) |
4368 | return -ENOENT; | 4647 | return -ENOENT; |
4369 | 4648 | ||
4370 | obj_priv = to_intel_bo(bo); | 4649 | if (obj->base.size < width * height * 4) { |
4371 | |||
4372 | if (bo->size < width * height * 4) { | ||
4373 | DRM_ERROR("buffer is to small\n"); | 4650 | DRM_ERROR("buffer is to small\n"); |
4374 | ret = -ENOMEM; | 4651 | ret = -ENOMEM; |
4375 | goto fail; | 4652 | goto fail; |
@@ -4378,29 +4655,41 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc, | |||
4378 | /* we only need to pin inside GTT if cursor is non-phy */ | 4655 | /* we only need to pin inside GTT if cursor is non-phy */ |
4379 | mutex_lock(&dev->struct_mutex); | 4656 | mutex_lock(&dev->struct_mutex); |
4380 | if (!dev_priv->info->cursor_needs_physical) { | 4657 | if (!dev_priv->info->cursor_needs_physical) { |
4381 | ret = i915_gem_object_pin(bo, PAGE_SIZE); | 4658 | if (obj->tiling_mode) { |
4659 | DRM_ERROR("cursor cannot be tiled\n"); | ||
4660 | ret = -EINVAL; | ||
4661 | goto fail_locked; | ||
4662 | } | ||
4663 | |||
4664 | ret = i915_gem_object_pin(obj, PAGE_SIZE, true); | ||
4382 | if (ret) { | 4665 | if (ret) { |
4383 | DRM_ERROR("failed to pin cursor bo\n"); | 4666 | DRM_ERROR("failed to pin cursor bo\n"); |
4384 | goto fail_locked; | 4667 | goto fail_locked; |
4385 | } | 4668 | } |
4386 | 4669 | ||
4387 | ret = i915_gem_object_set_to_gtt_domain(bo, 0); | 4670 | ret = i915_gem_object_set_to_gtt_domain(obj, 0); |
4671 | if (ret) { | ||
4672 | DRM_ERROR("failed to move cursor bo into the GTT\n"); | ||
4673 | goto fail_unpin; | ||
4674 | } | ||
4675 | |||
4676 | ret = i915_gem_object_put_fence(obj); | ||
4388 | if (ret) { | 4677 | if (ret) { |
4389 | DRM_ERROR("failed to move cursor bo into the GTT\n"); | 4678 | DRM_ERROR("failed to move cursor bo into the GTT\n"); |
4390 | goto fail_unpin; | 4679 | goto fail_unpin; |
4391 | } | 4680 | } |
4392 | 4681 | ||
4393 | addr = obj_priv->gtt_offset; | 4682 | addr = obj->gtt_offset; |
4394 | } else { | 4683 | } else { |
4395 | int align = IS_I830(dev) ? 16 * 1024 : 256; | 4684 | int align = IS_I830(dev) ? 16 * 1024 : 256; |
4396 | ret = i915_gem_attach_phys_object(dev, bo, | 4685 | ret = i915_gem_attach_phys_object(dev, obj, |
4397 | (intel_crtc->pipe == 0) ? I915_GEM_PHYS_CURSOR_0 : I915_GEM_PHYS_CURSOR_1, | 4686 | (intel_crtc->pipe == 0) ? I915_GEM_PHYS_CURSOR_0 : I915_GEM_PHYS_CURSOR_1, |
4398 | align); | 4687 | align); |
4399 | if (ret) { | 4688 | if (ret) { |
4400 | DRM_ERROR("failed to attach phys object\n"); | 4689 | DRM_ERROR("failed to attach phys object\n"); |
4401 | goto fail_locked; | 4690 | goto fail_locked; |
4402 | } | 4691 | } |
4403 | addr = obj_priv->phys_obj->handle->busaddr; | 4692 | addr = obj->phys_obj->handle->busaddr; |
4404 | } | 4693 | } |
4405 | 4694 | ||
4406 | if (IS_GEN2(dev)) | 4695 | if (IS_GEN2(dev)) |
@@ -4409,17 +4698,17 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc, | |||
4409 | finish: | 4698 | finish: |
4410 | if (intel_crtc->cursor_bo) { | 4699 | if (intel_crtc->cursor_bo) { |
4411 | if (dev_priv->info->cursor_needs_physical) { | 4700 | if (dev_priv->info->cursor_needs_physical) { |
4412 | if (intel_crtc->cursor_bo != bo) | 4701 | if (intel_crtc->cursor_bo != obj) |
4413 | i915_gem_detach_phys_object(dev, intel_crtc->cursor_bo); | 4702 | i915_gem_detach_phys_object(dev, intel_crtc->cursor_bo); |
4414 | } else | 4703 | } else |
4415 | i915_gem_object_unpin(intel_crtc->cursor_bo); | 4704 | i915_gem_object_unpin(intel_crtc->cursor_bo); |
4416 | drm_gem_object_unreference(intel_crtc->cursor_bo); | 4705 | drm_gem_object_unreference(&intel_crtc->cursor_bo->base); |
4417 | } | 4706 | } |
4418 | 4707 | ||
4419 | mutex_unlock(&dev->struct_mutex); | 4708 | mutex_unlock(&dev->struct_mutex); |
4420 | 4709 | ||
4421 | intel_crtc->cursor_addr = addr; | 4710 | intel_crtc->cursor_addr = addr; |
4422 | intel_crtc->cursor_bo = bo; | 4711 | intel_crtc->cursor_bo = obj; |
4423 | intel_crtc->cursor_width = width; | 4712 | intel_crtc->cursor_width = width; |
4424 | intel_crtc->cursor_height = height; | 4713 | intel_crtc->cursor_height = height; |
4425 | 4714 | ||
@@ -4427,11 +4716,11 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc, | |||
4427 | 4716 | ||
4428 | return 0; | 4717 | return 0; |
4429 | fail_unpin: | 4718 | fail_unpin: |
4430 | i915_gem_object_unpin(bo); | 4719 | i915_gem_object_unpin(obj); |
4431 | fail_locked: | 4720 | fail_locked: |
4432 | mutex_unlock(&dev->struct_mutex); | 4721 | mutex_unlock(&dev->struct_mutex); |
4433 | fail: | 4722 | fail: |
4434 | drm_gem_object_unreference_unlocked(bo); | 4723 | drm_gem_object_unreference_unlocked(&obj->base); |
4435 | return ret; | 4724 | return ret; |
4436 | } | 4725 | } |
4437 | 4726 | ||
@@ -4742,8 +5031,14 @@ static void intel_gpu_idle_timer(unsigned long arg) | |||
4742 | struct drm_device *dev = (struct drm_device *)arg; | 5031 | struct drm_device *dev = (struct drm_device *)arg; |
4743 | drm_i915_private_t *dev_priv = dev->dev_private; | 5032 | drm_i915_private_t *dev_priv = dev->dev_private; |
4744 | 5033 | ||
4745 | dev_priv->busy = false; | 5034 | if (!list_empty(&dev_priv->mm.active_list)) { |
5035 | /* Still processing requests, so just re-arm the timer. */ | ||
5036 | mod_timer(&dev_priv->idle_timer, jiffies + | ||
5037 | msecs_to_jiffies(GPU_IDLE_TIMEOUT)); | ||
5038 | return; | ||
5039 | } | ||
4746 | 5040 | ||
5041 | dev_priv->busy = false; | ||
4747 | queue_work(dev_priv->wq, &dev_priv->idle_work); | 5042 | queue_work(dev_priv->wq, &dev_priv->idle_work); |
4748 | } | 5043 | } |
4749 | 5044 | ||
@@ -4754,9 +5049,17 @@ static void intel_crtc_idle_timer(unsigned long arg) | |||
4754 | struct intel_crtc *intel_crtc = (struct intel_crtc *)arg; | 5049 | struct intel_crtc *intel_crtc = (struct intel_crtc *)arg; |
4755 | struct drm_crtc *crtc = &intel_crtc->base; | 5050 | struct drm_crtc *crtc = &intel_crtc->base; |
4756 | drm_i915_private_t *dev_priv = crtc->dev->dev_private; | 5051 | drm_i915_private_t *dev_priv = crtc->dev->dev_private; |
5052 | struct intel_framebuffer *intel_fb; | ||
4757 | 5053 | ||
4758 | intel_crtc->busy = false; | 5054 | intel_fb = to_intel_framebuffer(crtc->fb); |
5055 | if (intel_fb && intel_fb->obj->active) { | ||
5056 | /* The framebuffer is still being accessed by the GPU. */ | ||
5057 | mod_timer(&intel_crtc->idle_timer, jiffies + | ||
5058 | msecs_to_jiffies(CRTC_IDLE_TIMEOUT)); | ||
5059 | return; | ||
5060 | } | ||
4759 | 5061 | ||
5062 | intel_crtc->busy = false; | ||
4760 | queue_work(dev_priv->wq, &dev_priv->idle_work); | 5063 | queue_work(dev_priv->wq, &dev_priv->idle_work); |
4761 | } | 5064 | } |
4762 | 5065 | ||
@@ -4766,8 +5069,8 @@ static void intel_increase_pllclock(struct drm_crtc *crtc) | |||
4766 | drm_i915_private_t *dev_priv = dev->dev_private; | 5069 | drm_i915_private_t *dev_priv = dev->dev_private; |
4767 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | 5070 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
4768 | int pipe = intel_crtc->pipe; | 5071 | int pipe = intel_crtc->pipe; |
4769 | int dpll_reg = (pipe == 0) ? DPLL_A : DPLL_B; | 5072 | int dpll_reg = DPLL(pipe); |
4770 | int dpll = I915_READ(dpll_reg); | 5073 | int dpll; |
4771 | 5074 | ||
4772 | if (HAS_PCH_SPLIT(dev)) | 5075 | if (HAS_PCH_SPLIT(dev)) |
4773 | return; | 5076 | return; |
@@ -4775,17 +5078,19 @@ static void intel_increase_pllclock(struct drm_crtc *crtc) | |||
4775 | if (!dev_priv->lvds_downclock_avail) | 5078 | if (!dev_priv->lvds_downclock_avail) |
4776 | return; | 5079 | return; |
4777 | 5080 | ||
5081 | dpll = I915_READ(dpll_reg); | ||
4778 | if (!HAS_PIPE_CXSR(dev) && (dpll & DISPLAY_RATE_SELECT_FPA1)) { | 5082 | if (!HAS_PIPE_CXSR(dev) && (dpll & DISPLAY_RATE_SELECT_FPA1)) { |
4779 | DRM_DEBUG_DRIVER("upclocking LVDS\n"); | 5083 | DRM_DEBUG_DRIVER("upclocking LVDS\n"); |
4780 | 5084 | ||
4781 | /* Unlock panel regs */ | 5085 | /* Unlock panel regs */ |
4782 | I915_WRITE(PP_CONTROL, I915_READ(PP_CONTROL) | | 5086 | I915_WRITE(PP_CONTROL, |
4783 | PANEL_UNLOCK_REGS); | 5087 | I915_READ(PP_CONTROL) | PANEL_UNLOCK_REGS); |
4784 | 5088 | ||
4785 | dpll &= ~DISPLAY_RATE_SELECT_FPA1; | 5089 | dpll &= ~DISPLAY_RATE_SELECT_FPA1; |
4786 | I915_WRITE(dpll_reg, dpll); | 5090 | I915_WRITE(dpll_reg, dpll); |
4787 | dpll = I915_READ(dpll_reg); | 5091 | POSTING_READ(dpll_reg); |
4788 | intel_wait_for_vblank(dev, pipe); | 5092 | intel_wait_for_vblank(dev, pipe); |
5093 | |||
4789 | dpll = I915_READ(dpll_reg); | 5094 | dpll = I915_READ(dpll_reg); |
4790 | if (dpll & DISPLAY_RATE_SELECT_FPA1) | 5095 | if (dpll & DISPLAY_RATE_SELECT_FPA1) |
4791 | DRM_DEBUG_DRIVER("failed to upclock LVDS!\n"); | 5096 | DRM_DEBUG_DRIVER("failed to upclock LVDS!\n"); |
@@ -4891,7 +5196,7 @@ static void intel_idle_update(struct work_struct *work) | |||
4891 | * buffer), we'll also mark the display as busy, so we know to increase its | 5196 | * buffer), we'll also mark the display as busy, so we know to increase its |
4892 | * clock frequency. | 5197 | * clock frequency. |
4893 | */ | 5198 | */ |
4894 | void intel_mark_busy(struct drm_device *dev, struct drm_gem_object *obj) | 5199 | void intel_mark_busy(struct drm_device *dev, struct drm_i915_gem_object *obj) |
4895 | { | 5200 | { |
4896 | drm_i915_private_t *dev_priv = dev->dev_private; | 5201 | drm_i915_private_t *dev_priv = dev->dev_private; |
4897 | struct drm_crtc *crtc = NULL; | 5202 | struct drm_crtc *crtc = NULL; |
@@ -4972,8 +5277,9 @@ static void intel_unpin_work_fn(struct work_struct *__work) | |||
4972 | 5277 | ||
4973 | mutex_lock(&work->dev->struct_mutex); | 5278 | mutex_lock(&work->dev->struct_mutex); |
4974 | i915_gem_object_unpin(work->old_fb_obj); | 5279 | i915_gem_object_unpin(work->old_fb_obj); |
4975 | drm_gem_object_unreference(work->pending_flip_obj); | 5280 | drm_gem_object_unreference(&work->pending_flip_obj->base); |
4976 | drm_gem_object_unreference(work->old_fb_obj); | 5281 | drm_gem_object_unreference(&work->old_fb_obj->base); |
5282 | |||
4977 | mutex_unlock(&work->dev->struct_mutex); | 5283 | mutex_unlock(&work->dev->struct_mutex); |
4978 | kfree(work); | 5284 | kfree(work); |
4979 | } | 5285 | } |
@@ -4984,15 +5290,17 @@ static void do_intel_finish_page_flip(struct drm_device *dev, | |||
4984 | drm_i915_private_t *dev_priv = dev->dev_private; | 5290 | drm_i915_private_t *dev_priv = dev->dev_private; |
4985 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | 5291 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
4986 | struct intel_unpin_work *work; | 5292 | struct intel_unpin_work *work; |
4987 | struct drm_i915_gem_object *obj_priv; | 5293 | struct drm_i915_gem_object *obj; |
4988 | struct drm_pending_vblank_event *e; | 5294 | struct drm_pending_vblank_event *e; |
4989 | struct timeval now; | 5295 | struct timeval tnow, tvbl; |
4990 | unsigned long flags; | 5296 | unsigned long flags; |
4991 | 5297 | ||
4992 | /* Ignore early vblank irqs */ | 5298 | /* Ignore early vblank irqs */ |
4993 | if (intel_crtc == NULL) | 5299 | if (intel_crtc == NULL) |
4994 | return; | 5300 | return; |
4995 | 5301 | ||
5302 | do_gettimeofday(&tnow); | ||
5303 | |||
4996 | spin_lock_irqsave(&dev->event_lock, flags); | 5304 | spin_lock_irqsave(&dev->event_lock, flags); |
4997 | work = intel_crtc->unpin_work; | 5305 | work = intel_crtc->unpin_work; |
4998 | if (work == NULL || !work->pending) { | 5306 | if (work == NULL || !work->pending) { |
@@ -5001,26 +5309,49 @@ static void do_intel_finish_page_flip(struct drm_device *dev, | |||
5001 | } | 5309 | } |
5002 | 5310 | ||
5003 | intel_crtc->unpin_work = NULL; | 5311 | intel_crtc->unpin_work = NULL; |
5004 | drm_vblank_put(dev, intel_crtc->pipe); | ||
5005 | 5312 | ||
5006 | if (work->event) { | 5313 | if (work->event) { |
5007 | e = work->event; | 5314 | e = work->event; |
5008 | do_gettimeofday(&now); | 5315 | e->event.sequence = drm_vblank_count_and_time(dev, intel_crtc->pipe, &tvbl); |
5009 | e->event.sequence = drm_vblank_count(dev, intel_crtc->pipe); | 5316 | |
5010 | e->event.tv_sec = now.tv_sec; | 5317 | /* Called before vblank count and timestamps have |
5011 | e->event.tv_usec = now.tv_usec; | 5318 | * been updated for the vblank interval of flip |
5319 | * completion? Need to increment vblank count and | ||
5320 | * add one videorefresh duration to returned timestamp | ||
5321 | * to account for this. We assume this happened if we | ||
5322 | * get called over 0.9 frame durations after the last | ||
5323 | * timestamped vblank. | ||
5324 | * | ||
5325 | * This calculation can not be used with vrefresh rates | ||
5326 | * below 5Hz (10Hz to be on the safe side) without | ||
5327 | * promoting to 64 integers. | ||
5328 | */ | ||
5329 | if (10 * (timeval_to_ns(&tnow) - timeval_to_ns(&tvbl)) > | ||
5330 | 9 * crtc->framedur_ns) { | ||
5331 | e->event.sequence++; | ||
5332 | tvbl = ns_to_timeval(timeval_to_ns(&tvbl) + | ||
5333 | crtc->framedur_ns); | ||
5334 | } | ||
5335 | |||
5336 | e->event.tv_sec = tvbl.tv_sec; | ||
5337 | e->event.tv_usec = tvbl.tv_usec; | ||
5338 | |||
5012 | list_add_tail(&e->base.link, | 5339 | list_add_tail(&e->base.link, |
5013 | &e->base.file_priv->event_list); | 5340 | &e->base.file_priv->event_list); |
5014 | wake_up_interruptible(&e->base.file_priv->event_wait); | 5341 | wake_up_interruptible(&e->base.file_priv->event_wait); |
5015 | } | 5342 | } |
5016 | 5343 | ||
5344 | drm_vblank_put(dev, intel_crtc->pipe); | ||
5345 | |||
5017 | spin_unlock_irqrestore(&dev->event_lock, flags); | 5346 | spin_unlock_irqrestore(&dev->event_lock, flags); |
5018 | 5347 | ||
5019 | obj_priv = to_intel_bo(work->old_fb_obj); | 5348 | obj = work->old_fb_obj; |
5349 | |||
5020 | atomic_clear_mask(1 << intel_crtc->plane, | 5350 | atomic_clear_mask(1 << intel_crtc->plane, |
5021 | &obj_priv->pending_flip.counter); | 5351 | &obj->pending_flip.counter); |
5022 | if (atomic_read(&obj_priv->pending_flip) == 0) | 5352 | if (atomic_read(&obj->pending_flip) == 0) |
5023 | wake_up(&dev_priv->pending_flip_queue); | 5353 | wake_up(&dev_priv->pending_flip_queue); |
5354 | |||
5024 | schedule_work(&work->work); | 5355 | schedule_work(&work->work); |
5025 | 5356 | ||
5026 | trace_i915_flip_complete(intel_crtc->plane, work->pending_flip_obj); | 5357 | trace_i915_flip_complete(intel_crtc->plane, work->pending_flip_obj); |
@@ -5066,8 +5397,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, | |||
5066 | struct drm_device *dev = crtc->dev; | 5397 | struct drm_device *dev = crtc->dev; |
5067 | struct drm_i915_private *dev_priv = dev->dev_private; | 5398 | struct drm_i915_private *dev_priv = dev->dev_private; |
5068 | struct intel_framebuffer *intel_fb; | 5399 | struct intel_framebuffer *intel_fb; |
5069 | struct drm_i915_gem_object *obj_priv; | 5400 | struct drm_i915_gem_object *obj; |
5070 | struct drm_gem_object *obj; | ||
5071 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | 5401 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
5072 | struct intel_unpin_work *work; | 5402 | struct intel_unpin_work *work; |
5073 | unsigned long flags, offset; | 5403 | unsigned long flags, offset; |
@@ -5101,13 +5431,13 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, | |||
5101 | obj = intel_fb->obj; | 5431 | obj = intel_fb->obj; |
5102 | 5432 | ||
5103 | mutex_lock(&dev->struct_mutex); | 5433 | mutex_lock(&dev->struct_mutex); |
5104 | ret = intel_pin_and_fence_fb_obj(dev, obj, true); | 5434 | ret = intel_pin_and_fence_fb_obj(dev, obj, LP_RING(dev_priv)); |
5105 | if (ret) | 5435 | if (ret) |
5106 | goto cleanup_work; | 5436 | goto cleanup_work; |
5107 | 5437 | ||
5108 | /* Reference the objects for the scheduled work. */ | 5438 | /* Reference the objects for the scheduled work. */ |
5109 | drm_gem_object_reference(work->old_fb_obj); | 5439 | drm_gem_object_reference(&work->old_fb_obj->base); |
5110 | drm_gem_object_reference(obj); | 5440 | drm_gem_object_reference(&obj->base); |
5111 | 5441 | ||
5112 | crtc->fb = fb; | 5442 | crtc->fb = fb; |
5113 | 5443 | ||
@@ -5115,22 +5445,16 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, | |||
5115 | if (ret) | 5445 | if (ret) |
5116 | goto cleanup_objs; | 5446 | goto cleanup_objs; |
5117 | 5447 | ||
5118 | /* Block clients from rendering to the new back buffer until | ||
5119 | * the flip occurs and the object is no longer visible. | ||
5120 | */ | ||
5121 | atomic_add(1 << intel_crtc->plane, | ||
5122 | &to_intel_bo(work->old_fb_obj)->pending_flip); | ||
5123 | |||
5124 | work->pending_flip_obj = obj; | ||
5125 | obj_priv = to_intel_bo(obj); | ||
5126 | |||
5127 | if (IS_GEN3(dev) || IS_GEN2(dev)) { | 5448 | if (IS_GEN3(dev) || IS_GEN2(dev)) { |
5128 | u32 flip_mask; | 5449 | u32 flip_mask; |
5129 | 5450 | ||
5130 | /* Can't queue multiple flips, so wait for the previous | 5451 | /* Can't queue multiple flips, so wait for the previous |
5131 | * one to finish before executing the next. | 5452 | * one to finish before executing the next. |
5132 | */ | 5453 | */ |
5133 | BEGIN_LP_RING(2); | 5454 | ret = BEGIN_LP_RING(2); |
5455 | if (ret) | ||
5456 | goto cleanup_objs; | ||
5457 | |||
5134 | if (intel_crtc->plane) | 5458 | if (intel_crtc->plane) |
5135 | flip_mask = MI_WAIT_FOR_PLANE_B_FLIP; | 5459 | flip_mask = MI_WAIT_FOR_PLANE_B_FLIP; |
5136 | else | 5460 | else |
@@ -5140,18 +5464,28 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, | |||
5140 | ADVANCE_LP_RING(); | 5464 | ADVANCE_LP_RING(); |
5141 | } | 5465 | } |
5142 | 5466 | ||
5467 | work->pending_flip_obj = obj; | ||
5468 | |||
5143 | work->enable_stall_check = true; | 5469 | work->enable_stall_check = true; |
5144 | 5470 | ||
5145 | /* Offset into the new buffer for cases of shared fbs between CRTCs */ | 5471 | /* Offset into the new buffer for cases of shared fbs between CRTCs */ |
5146 | offset = crtc->y * fb->pitch + crtc->x * fb->bits_per_pixel/8; | 5472 | offset = crtc->y * fb->pitch + crtc->x * fb->bits_per_pixel/8; |
5147 | 5473 | ||
5148 | BEGIN_LP_RING(4); | 5474 | ret = BEGIN_LP_RING(4); |
5149 | switch(INTEL_INFO(dev)->gen) { | 5475 | if (ret) |
5476 | goto cleanup_objs; | ||
5477 | |||
5478 | /* Block clients from rendering to the new back buffer until | ||
5479 | * the flip occurs and the object is no longer visible. | ||
5480 | */ | ||
5481 | atomic_add(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip); | ||
5482 | |||
5483 | switch (INTEL_INFO(dev)->gen) { | ||
5150 | case 2: | 5484 | case 2: |
5151 | OUT_RING(MI_DISPLAY_FLIP | | 5485 | OUT_RING(MI_DISPLAY_FLIP | |
5152 | MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); | 5486 | MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); |
5153 | OUT_RING(fb->pitch); | 5487 | OUT_RING(fb->pitch); |
5154 | OUT_RING(obj_priv->gtt_offset + offset); | 5488 | OUT_RING(obj->gtt_offset + offset); |
5155 | OUT_RING(MI_NOOP); | 5489 | OUT_RING(MI_NOOP); |
5156 | break; | 5490 | break; |
5157 | 5491 | ||
@@ -5159,7 +5493,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, | |||
5159 | OUT_RING(MI_DISPLAY_FLIP_I915 | | 5493 | OUT_RING(MI_DISPLAY_FLIP_I915 | |
5160 | MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); | 5494 | MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); |
5161 | OUT_RING(fb->pitch); | 5495 | OUT_RING(fb->pitch); |
5162 | OUT_RING(obj_priv->gtt_offset + offset); | 5496 | OUT_RING(obj->gtt_offset + offset); |
5163 | OUT_RING(MI_NOOP); | 5497 | OUT_RING(MI_NOOP); |
5164 | break; | 5498 | break; |
5165 | 5499 | ||
@@ -5172,7 +5506,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, | |||
5172 | OUT_RING(MI_DISPLAY_FLIP | | 5506 | OUT_RING(MI_DISPLAY_FLIP | |
5173 | MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); | 5507 | MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); |
5174 | OUT_RING(fb->pitch); | 5508 | OUT_RING(fb->pitch); |
5175 | OUT_RING(obj_priv->gtt_offset | obj_priv->tiling_mode); | 5509 | OUT_RING(obj->gtt_offset | obj->tiling_mode); |
5176 | 5510 | ||
5177 | /* XXX Enabling the panel-fitter across page-flip is so far | 5511 | /* XXX Enabling the panel-fitter across page-flip is so far |
5178 | * untested on non-native modes, so ignore it for now. | 5512 | * untested on non-native modes, so ignore it for now. |
@@ -5186,8 +5520,8 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, | |||
5186 | case 6: | 5520 | case 6: |
5187 | OUT_RING(MI_DISPLAY_FLIP | | 5521 | OUT_RING(MI_DISPLAY_FLIP | |
5188 | MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); | 5522 | MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); |
5189 | OUT_RING(fb->pitch | obj_priv->tiling_mode); | 5523 | OUT_RING(fb->pitch | obj->tiling_mode); |
5190 | OUT_RING(obj_priv->gtt_offset); | 5524 | OUT_RING(obj->gtt_offset); |
5191 | 5525 | ||
5192 | pf = I915_READ(pipe == 0 ? PFA_CTL_1 : PFB_CTL_1) & PF_ENABLE; | 5526 | pf = I915_READ(pipe == 0 ? PFA_CTL_1 : PFB_CTL_1) & PF_ENABLE; |
5193 | pipesrc = I915_READ(pipe == 0 ? PIPEASRC : PIPEBSRC) & 0x0fff0fff; | 5527 | pipesrc = I915_READ(pipe == 0 ? PIPEASRC : PIPEBSRC) & 0x0fff0fff; |
@@ -5203,8 +5537,8 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, | |||
5203 | return 0; | 5537 | return 0; |
5204 | 5538 | ||
5205 | cleanup_objs: | 5539 | cleanup_objs: |
5206 | drm_gem_object_unreference(work->old_fb_obj); | 5540 | drm_gem_object_unreference(&work->old_fb_obj->base); |
5207 | drm_gem_object_unreference(obj); | 5541 | drm_gem_object_unreference(&obj->base); |
5208 | cleanup_work: | 5542 | cleanup_work: |
5209 | mutex_unlock(&dev->struct_mutex); | 5543 | mutex_unlock(&dev->struct_mutex); |
5210 | 5544 | ||
@@ -5217,6 +5551,18 @@ cleanup_work: | |||
5217 | return ret; | 5551 | return ret; |
5218 | } | 5552 | } |
5219 | 5553 | ||
5554 | static void intel_crtc_reset(struct drm_crtc *crtc) | ||
5555 | { | ||
5556 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | ||
5557 | |||
5558 | /* Reset flags back to the 'unknown' status so that they | ||
5559 | * will be correctly set on the initial modeset. | ||
5560 | */ | ||
5561 | intel_crtc->cursor_addr = 0; | ||
5562 | intel_crtc->dpms_mode = -1; | ||
5563 | intel_crtc->active = true; /* force the pipe off on setup_init_config */ | ||
5564 | } | ||
5565 | |||
5220 | static struct drm_crtc_helper_funcs intel_helper_funcs = { | 5566 | static struct drm_crtc_helper_funcs intel_helper_funcs = { |
5221 | .dpms = intel_crtc_dpms, | 5567 | .dpms = intel_crtc_dpms, |
5222 | .mode_fixup = intel_crtc_mode_fixup, | 5568 | .mode_fixup = intel_crtc_mode_fixup, |
@@ -5228,6 +5574,7 @@ static struct drm_crtc_helper_funcs intel_helper_funcs = { | |||
5228 | }; | 5574 | }; |
5229 | 5575 | ||
5230 | static const struct drm_crtc_funcs intel_crtc_funcs = { | 5576 | static const struct drm_crtc_funcs intel_crtc_funcs = { |
5577 | .reset = intel_crtc_reset, | ||
5231 | .cursor_set = intel_crtc_cursor_set, | 5578 | .cursor_set = intel_crtc_cursor_set, |
5232 | .cursor_move = intel_crtc_cursor_move, | 5579 | .cursor_move = intel_crtc_cursor_move, |
5233 | .gamma_set = intel_crtc_gamma_set, | 5580 | .gamma_set = intel_crtc_gamma_set, |
@@ -5236,6 +5583,55 @@ static const struct drm_crtc_funcs intel_crtc_funcs = { | |||
5236 | .page_flip = intel_crtc_page_flip, | 5583 | .page_flip = intel_crtc_page_flip, |
5237 | }; | 5584 | }; |
5238 | 5585 | ||
5586 | static void intel_sanitize_modesetting(struct drm_device *dev, | ||
5587 | int pipe, int plane) | ||
5588 | { | ||
5589 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
5590 | u32 reg, val; | ||
5591 | |||
5592 | if (HAS_PCH_SPLIT(dev)) | ||
5593 | return; | ||
5594 | |||
5595 | /* Who knows what state these registers were left in by the BIOS or | ||
5596 | * grub? | ||
5597 | * | ||
5598 | * If we leave the registers in a conflicting state (e.g. with the | ||
5599 | * display plane reading from the other pipe than the one we intend | ||
5600 | * to use) then when we attempt to teardown the active mode, we will | ||
5601 | * not disable the pipes and planes in the correct order -- leaving | ||
5602 | * a plane reading from a disabled pipe and possibly leading to | ||
5603 | * undefined behaviour. | ||
5604 | */ | ||
5605 | |||
5606 | reg = DSPCNTR(plane); | ||
5607 | val = I915_READ(reg); | ||
5608 | |||
5609 | if ((val & DISPLAY_PLANE_ENABLE) == 0) | ||
5610 | return; | ||
5611 | if (!!(val & DISPPLANE_SEL_PIPE_MASK) == pipe) | ||
5612 | return; | ||
5613 | |||
5614 | /* This display plane is active and attached to the other CPU pipe. */ | ||
5615 | pipe = !pipe; | ||
5616 | |||
5617 | /* Disable the plane and wait for it to stop reading from the pipe. */ | ||
5618 | I915_WRITE(reg, val & ~DISPLAY_PLANE_ENABLE); | ||
5619 | intel_flush_display_plane(dev, plane); | ||
5620 | |||
5621 | if (IS_GEN2(dev)) | ||
5622 | intel_wait_for_vblank(dev, pipe); | ||
5623 | |||
5624 | if (pipe == 0 && (dev_priv->quirks & QUIRK_PIPEA_FORCE)) | ||
5625 | return; | ||
5626 | |||
5627 | /* Switch off the pipe. */ | ||
5628 | reg = PIPECONF(pipe); | ||
5629 | val = I915_READ(reg); | ||
5630 | if (val & PIPECONF_ENABLE) { | ||
5631 | I915_WRITE(reg, val & ~PIPECONF_ENABLE); | ||
5632 | intel_wait_for_pipe_off(dev, pipe); | ||
5633 | } | ||
5634 | } | ||
5239 | 5635 | ||
5240 | static void intel_crtc_init(struct drm_device *dev, int pipe) | 5636 | static void intel_crtc_init(struct drm_device *dev, int pipe) |
5241 | { | 5637 | { |
@@ -5269,9 +5665,7 @@ static void intel_crtc_init(struct drm_device *dev, int pipe) | |||
5269 | dev_priv->plane_to_crtc_mapping[intel_crtc->plane] = &intel_crtc->base; | 5665 | dev_priv->plane_to_crtc_mapping[intel_crtc->plane] = &intel_crtc->base; |
5270 | dev_priv->pipe_to_crtc_mapping[intel_crtc->pipe] = &intel_crtc->base; | 5666 | dev_priv->pipe_to_crtc_mapping[intel_crtc->pipe] = &intel_crtc->base; |
5271 | 5667 | ||
5272 | intel_crtc->cursor_addr = 0; | 5668 | intel_crtc_reset(&intel_crtc->base); |
5273 | intel_crtc->dpms_mode = -1; | ||
5274 | intel_crtc->active = true; /* force the pipe off on setup_init_config */ | ||
5275 | 5669 | ||
5276 | if (HAS_PCH_SPLIT(dev)) { | 5670 | if (HAS_PCH_SPLIT(dev)) { |
5277 | intel_helper_funcs.prepare = ironlake_crtc_prepare; | 5671 | intel_helper_funcs.prepare = ironlake_crtc_prepare; |
@@ -5287,10 +5681,12 @@ static void intel_crtc_init(struct drm_device *dev, int pipe) | |||
5287 | 5681 | ||
5288 | setup_timer(&intel_crtc->idle_timer, intel_crtc_idle_timer, | 5682 | setup_timer(&intel_crtc->idle_timer, intel_crtc_idle_timer, |
5289 | (unsigned long)intel_crtc); | 5683 | (unsigned long)intel_crtc); |
5684 | |||
5685 | intel_sanitize_modesetting(dev, intel_crtc->pipe, intel_crtc->plane); | ||
5290 | } | 5686 | } |
5291 | 5687 | ||
5292 | int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data, | 5688 | int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data, |
5293 | struct drm_file *file_priv) | 5689 | struct drm_file *file) |
5294 | { | 5690 | { |
5295 | drm_i915_private_t *dev_priv = dev->dev_private; | 5691 | drm_i915_private_t *dev_priv = dev->dev_private; |
5296 | struct drm_i915_get_pipe_from_crtc_id *pipe_from_crtc_id = data; | 5692 | struct drm_i915_get_pipe_from_crtc_id *pipe_from_crtc_id = data; |
@@ -5331,19 +5727,41 @@ static int intel_encoder_clones(struct drm_device *dev, int type_mask) | |||
5331 | return index_mask; | 5727 | return index_mask; |
5332 | } | 5728 | } |
5333 | 5729 | ||
5730 | static bool has_edp_a(struct drm_device *dev) | ||
5731 | { | ||
5732 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
5733 | |||
5734 | if (!IS_MOBILE(dev)) | ||
5735 | return false; | ||
5736 | |||
5737 | if ((I915_READ(DP_A) & DP_DETECTED) == 0) | ||
5738 | return false; | ||
5739 | |||
5740 | if (IS_GEN5(dev) && | ||
5741 | (I915_READ(ILK_DISPLAY_CHICKEN_FUSES) & ILK_eDP_A_DISABLE)) | ||
5742 | return false; | ||
5743 | |||
5744 | return true; | ||
5745 | } | ||
5746 | |||
5334 | static void intel_setup_outputs(struct drm_device *dev) | 5747 | static void intel_setup_outputs(struct drm_device *dev) |
5335 | { | 5748 | { |
5336 | struct drm_i915_private *dev_priv = dev->dev_private; | 5749 | struct drm_i915_private *dev_priv = dev->dev_private; |
5337 | struct intel_encoder *encoder; | 5750 | struct intel_encoder *encoder; |
5338 | bool dpd_is_edp = false; | 5751 | bool dpd_is_edp = false; |
5752 | bool has_lvds = false; | ||
5339 | 5753 | ||
5340 | if (IS_MOBILE(dev) && !IS_I830(dev)) | 5754 | if (IS_MOBILE(dev) && !IS_I830(dev)) |
5341 | intel_lvds_init(dev); | 5755 | has_lvds = intel_lvds_init(dev); |
5756 | if (!has_lvds && !HAS_PCH_SPLIT(dev)) { | ||
5757 | /* disable the panel fitter on everything but LVDS */ | ||
5758 | I915_WRITE(PFIT_CONTROL, 0); | ||
5759 | } | ||
5342 | 5760 | ||
5343 | if (HAS_PCH_SPLIT(dev)) { | 5761 | if (HAS_PCH_SPLIT(dev)) { |
5344 | dpd_is_edp = intel_dpd_is_edp(dev); | 5762 | dpd_is_edp = intel_dpd_is_edp(dev); |
5345 | 5763 | ||
5346 | if (IS_MOBILE(dev) && (I915_READ(DP_A) & DP_DETECTED)) | 5764 | if (has_edp_a(dev)) |
5347 | intel_dp_init(dev, DP_A); | 5765 | intel_dp_init(dev, DP_A); |
5348 | 5766 | ||
5349 | if (dpd_is_edp && (I915_READ(PCH_DP_D) & DP_DETECTED)) | 5767 | if (dpd_is_edp && (I915_READ(PCH_DP_D) & DP_DETECTED)) |
@@ -5428,6 +5846,8 @@ static void intel_setup_outputs(struct drm_device *dev) | |||
5428 | encoder->base.possible_clones = | 5846 | encoder->base.possible_clones = |
5429 | intel_encoder_clones(dev, encoder->clone_mask); | 5847 | intel_encoder_clones(dev, encoder->clone_mask); |
5430 | } | 5848 | } |
5849 | |||
5850 | intel_panel_setup_backlight(dev); | ||
5431 | } | 5851 | } |
5432 | 5852 | ||
5433 | static void intel_user_framebuffer_destroy(struct drm_framebuffer *fb) | 5853 | static void intel_user_framebuffer_destroy(struct drm_framebuffer *fb) |
@@ -5435,19 +5855,19 @@ static void intel_user_framebuffer_destroy(struct drm_framebuffer *fb) | |||
5435 | struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); | 5855 | struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); |
5436 | 5856 | ||
5437 | drm_framebuffer_cleanup(fb); | 5857 | drm_framebuffer_cleanup(fb); |
5438 | drm_gem_object_unreference_unlocked(intel_fb->obj); | 5858 | drm_gem_object_unreference_unlocked(&intel_fb->obj->base); |
5439 | 5859 | ||
5440 | kfree(intel_fb); | 5860 | kfree(intel_fb); |
5441 | } | 5861 | } |
5442 | 5862 | ||
5443 | static int intel_user_framebuffer_create_handle(struct drm_framebuffer *fb, | 5863 | static int intel_user_framebuffer_create_handle(struct drm_framebuffer *fb, |
5444 | struct drm_file *file_priv, | 5864 | struct drm_file *file, |
5445 | unsigned int *handle) | 5865 | unsigned int *handle) |
5446 | { | 5866 | { |
5447 | struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); | 5867 | struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); |
5448 | struct drm_gem_object *object = intel_fb->obj; | 5868 | struct drm_i915_gem_object *obj = intel_fb->obj; |
5449 | 5869 | ||
5450 | return drm_gem_handle_create(file_priv, object, handle); | 5870 | return drm_gem_handle_create(file, &obj->base, handle); |
5451 | } | 5871 | } |
5452 | 5872 | ||
5453 | static const struct drm_framebuffer_funcs intel_fb_funcs = { | 5873 | static const struct drm_framebuffer_funcs intel_fb_funcs = { |
@@ -5458,12 +5878,11 @@ static const struct drm_framebuffer_funcs intel_fb_funcs = { | |||
5458 | int intel_framebuffer_init(struct drm_device *dev, | 5878 | int intel_framebuffer_init(struct drm_device *dev, |
5459 | struct intel_framebuffer *intel_fb, | 5879 | struct intel_framebuffer *intel_fb, |
5460 | struct drm_mode_fb_cmd *mode_cmd, | 5880 | struct drm_mode_fb_cmd *mode_cmd, |
5461 | struct drm_gem_object *obj) | 5881 | struct drm_i915_gem_object *obj) |
5462 | { | 5882 | { |
5463 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); | ||
5464 | int ret; | 5883 | int ret; |
5465 | 5884 | ||
5466 | if (obj_priv->tiling_mode == I915_TILING_Y) | 5885 | if (obj->tiling_mode == I915_TILING_Y) |
5467 | return -EINVAL; | 5886 | return -EINVAL; |
5468 | 5887 | ||
5469 | if (mode_cmd->pitch & 63) | 5888 | if (mode_cmd->pitch & 63) |
@@ -5495,11 +5914,11 @@ intel_user_framebuffer_create(struct drm_device *dev, | |||
5495 | struct drm_file *filp, | 5914 | struct drm_file *filp, |
5496 | struct drm_mode_fb_cmd *mode_cmd) | 5915 | struct drm_mode_fb_cmd *mode_cmd) |
5497 | { | 5916 | { |
5498 | struct drm_gem_object *obj; | 5917 | struct drm_i915_gem_object *obj; |
5499 | struct intel_framebuffer *intel_fb; | 5918 | struct intel_framebuffer *intel_fb; |
5500 | int ret; | 5919 | int ret; |
5501 | 5920 | ||
5502 | obj = drm_gem_object_lookup(dev, filp, mode_cmd->handle); | 5921 | obj = to_intel_bo(drm_gem_object_lookup(dev, filp, mode_cmd->handle)); |
5503 | if (!obj) | 5922 | if (!obj) |
5504 | return ERR_PTR(-ENOENT); | 5923 | return ERR_PTR(-ENOENT); |
5505 | 5924 | ||
@@ -5507,10 +5926,9 @@ intel_user_framebuffer_create(struct drm_device *dev, | |||
5507 | if (!intel_fb) | 5926 | if (!intel_fb) |
5508 | return ERR_PTR(-ENOMEM); | 5927 | return ERR_PTR(-ENOMEM); |
5509 | 5928 | ||
5510 | ret = intel_framebuffer_init(dev, intel_fb, | 5929 | ret = intel_framebuffer_init(dev, intel_fb, mode_cmd, obj); |
5511 | mode_cmd, obj); | ||
5512 | if (ret) { | 5930 | if (ret) { |
5513 | drm_gem_object_unreference_unlocked(obj); | 5931 | drm_gem_object_unreference_unlocked(&obj->base); |
5514 | kfree(intel_fb); | 5932 | kfree(intel_fb); |
5515 | return ERR_PTR(ret); | 5933 | return ERR_PTR(ret); |
5516 | } | 5934 | } |
@@ -5523,10 +5941,10 @@ static const struct drm_mode_config_funcs intel_mode_funcs = { | |||
5523 | .output_poll_changed = intel_fb_output_poll_changed, | 5941 | .output_poll_changed = intel_fb_output_poll_changed, |
5524 | }; | 5942 | }; |
5525 | 5943 | ||
5526 | static struct drm_gem_object * | 5944 | static struct drm_i915_gem_object * |
5527 | intel_alloc_context_page(struct drm_device *dev) | 5945 | intel_alloc_context_page(struct drm_device *dev) |
5528 | { | 5946 | { |
5529 | struct drm_gem_object *ctx; | 5947 | struct drm_i915_gem_object *ctx; |
5530 | int ret; | 5948 | int ret; |
5531 | 5949 | ||
5532 | ctx = i915_gem_alloc_object(dev, 4096); | 5950 | ctx = i915_gem_alloc_object(dev, 4096); |
@@ -5536,7 +5954,7 @@ intel_alloc_context_page(struct drm_device *dev) | |||
5536 | } | 5954 | } |
5537 | 5955 | ||
5538 | mutex_lock(&dev->struct_mutex); | 5956 | mutex_lock(&dev->struct_mutex); |
5539 | ret = i915_gem_object_pin(ctx, 4096); | 5957 | ret = i915_gem_object_pin(ctx, 4096, true); |
5540 | if (ret) { | 5958 | if (ret) { |
5541 | DRM_ERROR("failed to pin power context: %d\n", ret); | 5959 | DRM_ERROR("failed to pin power context: %d\n", ret); |
5542 | goto err_unref; | 5960 | goto err_unref; |
@@ -5554,7 +5972,7 @@ intel_alloc_context_page(struct drm_device *dev) | |||
5554 | err_unpin: | 5972 | err_unpin: |
5555 | i915_gem_object_unpin(ctx); | 5973 | i915_gem_object_unpin(ctx); |
5556 | err_unref: | 5974 | err_unref: |
5557 | drm_gem_object_unreference(ctx); | 5975 | drm_gem_object_unreference(&ctx->base); |
5558 | mutex_unlock(&dev->struct_mutex); | 5976 | mutex_unlock(&dev->struct_mutex); |
5559 | return NULL; | 5977 | return NULL; |
5560 | } | 5978 | } |
@@ -5666,6 +6084,25 @@ void ironlake_disable_drps(struct drm_device *dev) | |||
5666 | 6084 | ||
5667 | } | 6085 | } |
5668 | 6086 | ||
6087 | void gen6_set_rps(struct drm_device *dev, u8 val) | ||
6088 | { | ||
6089 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
6090 | u32 swreq; | ||
6091 | |||
6092 | swreq = (val & 0x3ff) << 25; | ||
6093 | I915_WRITE(GEN6_RPNSWREQ, swreq); | ||
6094 | } | ||
6095 | |||
6096 | void gen6_disable_rps(struct drm_device *dev) | ||
6097 | { | ||
6098 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
6099 | |||
6100 | I915_WRITE(GEN6_RPNSWREQ, 1 << 31); | ||
6101 | I915_WRITE(GEN6_PMINTRMSK, 0xffffffff); | ||
6102 | I915_WRITE(GEN6_PMIER, 0); | ||
6103 | I915_WRITE(GEN6_PMIIR, I915_READ(GEN6_PMIIR)); | ||
6104 | } | ||
6105 | |||
5669 | static unsigned long intel_pxfreq(u32 vidfreq) | 6106 | static unsigned long intel_pxfreq(u32 vidfreq) |
5670 | { | 6107 | { |
5671 | unsigned long freq; | 6108 | unsigned long freq; |
@@ -5752,7 +6189,123 @@ void intel_init_emon(struct drm_device *dev) | |||
5752 | dev_priv->corr = (lcfuse & LCFUSE_HIV_MASK); | 6189 | dev_priv->corr = (lcfuse & LCFUSE_HIV_MASK); |
5753 | } | 6190 | } |
5754 | 6191 | ||
5755 | void intel_init_clock_gating(struct drm_device *dev) | 6192 | void gen6_enable_rps(struct drm_i915_private *dev_priv) |
6193 | { | ||
6194 | u32 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP); | ||
6195 | u32 gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS); | ||
6196 | u32 pcu_mbox; | ||
6197 | int cur_freq, min_freq, max_freq; | ||
6198 | int i; | ||
6199 | |||
6200 | /* Here begins a magic sequence of register writes to enable | ||
6201 | * auto-downclocking. | ||
6202 | * | ||
6203 | * Perhaps there might be some value in exposing these to | ||
6204 | * userspace... | ||
6205 | */ | ||
6206 | I915_WRITE(GEN6_RC_STATE, 0); | ||
6207 | __gen6_force_wake_get(dev_priv); | ||
6208 | |||
6209 | /* disable the counters and set deterministic thresholds */ | ||
6210 | I915_WRITE(GEN6_RC_CONTROL, 0); | ||
6211 | |||
6212 | I915_WRITE(GEN6_RC1_WAKE_RATE_LIMIT, 1000 << 16); | ||
6213 | I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16 | 30); | ||
6214 | I915_WRITE(GEN6_RC6pp_WAKE_RATE_LIMIT, 30); | ||
6215 | I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000); | ||
6216 | I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25); | ||
6217 | |||
6218 | for (i = 0; i < I915_NUM_RINGS; i++) | ||
6219 | I915_WRITE(RING_MAX_IDLE(dev_priv->ring[i].mmio_base), 10); | ||
6220 | |||
6221 | I915_WRITE(GEN6_RC_SLEEP, 0); | ||
6222 | I915_WRITE(GEN6_RC1e_THRESHOLD, 1000); | ||
6223 | I915_WRITE(GEN6_RC6_THRESHOLD, 50000); | ||
6224 | I915_WRITE(GEN6_RC6p_THRESHOLD, 100000); | ||
6225 | I915_WRITE(GEN6_RC6pp_THRESHOLD, 64000); /* unused */ | ||
6226 | |||
6227 | I915_WRITE(GEN6_RC_CONTROL, | ||
6228 | GEN6_RC_CTL_RC6p_ENABLE | | ||
6229 | GEN6_RC_CTL_RC6_ENABLE | | ||
6230 | GEN6_RC_CTL_EI_MODE(1) | | ||
6231 | GEN6_RC_CTL_HW_ENABLE); | ||
6232 | |||
6233 | I915_WRITE(GEN6_RPNSWREQ, | ||
6234 | GEN6_FREQUENCY(10) | | ||
6235 | GEN6_OFFSET(0) | | ||
6236 | GEN6_AGGRESSIVE_TURBO); | ||
6237 | I915_WRITE(GEN6_RC_VIDEO_FREQ, | ||
6238 | GEN6_FREQUENCY(12)); | ||
6239 | |||
6240 | I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 1000000); | ||
6241 | I915_WRITE(GEN6_RP_INTERRUPT_LIMITS, | ||
6242 | 18 << 24 | | ||
6243 | 6 << 16); | ||
6244 | I915_WRITE(GEN6_RP_UP_THRESHOLD, 90000); | ||
6245 | I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 100000); | ||
6246 | I915_WRITE(GEN6_RP_UP_EI, 100000); | ||
6247 | I915_WRITE(GEN6_RP_DOWN_EI, 300000); | ||
6248 | I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10); | ||
6249 | I915_WRITE(GEN6_RP_CONTROL, | ||
6250 | GEN6_RP_MEDIA_TURBO | | ||
6251 | GEN6_RP_USE_NORMAL_FREQ | | ||
6252 | GEN6_RP_MEDIA_IS_GFX | | ||
6253 | GEN6_RP_ENABLE | | ||
6254 | GEN6_RP_UP_BUSY_MAX | | ||
6255 | GEN6_RP_DOWN_BUSY_MIN); | ||
6256 | |||
6257 | if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0, | ||
6258 | 500)) | ||
6259 | DRM_ERROR("timeout waiting for pcode mailbox to become idle\n"); | ||
6260 | |||
6261 | I915_WRITE(GEN6_PCODE_DATA, 0); | ||
6262 | I915_WRITE(GEN6_PCODE_MAILBOX, | ||
6263 | GEN6_PCODE_READY | | ||
6264 | GEN6_PCODE_WRITE_MIN_FREQ_TABLE); | ||
6265 | if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0, | ||
6266 | 500)) | ||
6267 | DRM_ERROR("timeout waiting for pcode mailbox to finish\n"); | ||
6268 | |||
6269 | min_freq = (rp_state_cap & 0xff0000) >> 16; | ||
6270 | max_freq = rp_state_cap & 0xff; | ||
6271 | cur_freq = (gt_perf_status & 0xff00) >> 8; | ||
6272 | |||
6273 | /* Check for overclock support */ | ||
6274 | if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0, | ||
6275 | 500)) | ||
6276 | DRM_ERROR("timeout waiting for pcode mailbox to become idle\n"); | ||
6277 | I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_READ_OC_PARAMS); | ||
6278 | pcu_mbox = I915_READ(GEN6_PCODE_DATA); | ||
6279 | if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0, | ||
6280 | 500)) | ||
6281 | DRM_ERROR("timeout waiting for pcode mailbox to finish\n"); | ||
6282 | if (pcu_mbox & (1<<31)) { /* OC supported */ | ||
6283 | max_freq = pcu_mbox & 0xff; | ||
6284 | DRM_DEBUG_DRIVER("overclocking supported, adjusting frequency max to %dMHz\n", pcu_mbox * 100); | ||
6285 | } | ||
6286 | |||
6287 | /* In units of 100MHz */ | ||
6288 | dev_priv->max_delay = max_freq; | ||
6289 | dev_priv->min_delay = min_freq; | ||
6290 | dev_priv->cur_delay = cur_freq; | ||
6291 | |||
6292 | /* requires MSI enabled */ | ||
6293 | I915_WRITE(GEN6_PMIER, | ||
6294 | GEN6_PM_MBOX_EVENT | | ||
6295 | GEN6_PM_THERMAL_EVENT | | ||
6296 | GEN6_PM_RP_DOWN_TIMEOUT | | ||
6297 | GEN6_PM_RP_UP_THRESHOLD | | ||
6298 | GEN6_PM_RP_DOWN_THRESHOLD | | ||
6299 | GEN6_PM_RP_UP_EI_EXPIRED | | ||
6300 | GEN6_PM_RP_DOWN_EI_EXPIRED); | ||
6301 | I915_WRITE(GEN6_PMIMR, 0); | ||
6302 | /* enable all PM interrupts */ | ||
6303 | I915_WRITE(GEN6_PMINTRMSK, 0); | ||
6304 | |||
6305 | __gen6_force_wake_put(dev_priv); | ||
6306 | } | ||
6307 | |||
6308 | void intel_enable_clock_gating(struct drm_device *dev) | ||
5756 | { | 6309 | { |
5757 | struct drm_i915_private *dev_priv = dev->dev_private; | 6310 | struct drm_i915_private *dev_priv = dev->dev_private; |
5758 | 6311 | ||
@@ -5765,13 +6318,17 @@ void intel_init_clock_gating(struct drm_device *dev) | |||
5765 | 6318 | ||
5766 | if (IS_GEN5(dev)) { | 6319 | if (IS_GEN5(dev)) { |
5767 | /* Required for FBC */ | 6320 | /* Required for FBC */ |
5768 | dspclk_gate |= DPFDUNIT_CLOCK_GATE_DISABLE; | 6321 | dspclk_gate |= DPFCUNIT_CLOCK_GATE_DISABLE | |
6322 | DPFCRUNIT_CLOCK_GATE_DISABLE | | ||
6323 | DPFDUNIT_CLOCK_GATE_DISABLE; | ||
5769 | /* Required for CxSR */ | 6324 | /* Required for CxSR */ |
5770 | dspclk_gate |= DPARBUNIT_CLOCK_GATE_DISABLE; | 6325 | dspclk_gate |= DPARBUNIT_CLOCK_GATE_DISABLE; |
5771 | 6326 | ||
5772 | I915_WRITE(PCH_3DCGDIS0, | 6327 | I915_WRITE(PCH_3DCGDIS0, |
5773 | MARIUNIT_CLOCK_GATE_DISABLE | | 6328 | MARIUNIT_CLOCK_GATE_DISABLE | |
5774 | SVSMUNIT_CLOCK_GATE_DISABLE); | 6329 | SVSMUNIT_CLOCK_GATE_DISABLE); |
6330 | I915_WRITE(PCH_3DCGDIS1, | ||
6331 | VFMUNIT_CLOCK_GATE_DISABLE); | ||
5775 | } | 6332 | } |
5776 | 6333 | ||
5777 | I915_WRITE(PCH_DSPCLK_GATE_D, dspclk_gate); | 6334 | I915_WRITE(PCH_DSPCLK_GATE_D, dspclk_gate); |
@@ -5800,9 +6357,9 @@ void intel_init_clock_gating(struct drm_device *dev) | |||
5800 | I915_WRITE(DISP_ARB_CTL, | 6357 | I915_WRITE(DISP_ARB_CTL, |
5801 | (I915_READ(DISP_ARB_CTL) | | 6358 | (I915_READ(DISP_ARB_CTL) | |
5802 | DISP_FBC_WM_DIS)); | 6359 | DISP_FBC_WM_DIS)); |
5803 | I915_WRITE(WM3_LP_ILK, 0); | 6360 | I915_WRITE(WM3_LP_ILK, 0); |
5804 | I915_WRITE(WM2_LP_ILK, 0); | 6361 | I915_WRITE(WM2_LP_ILK, 0); |
5805 | I915_WRITE(WM1_LP_ILK, 0); | 6362 | I915_WRITE(WM1_LP_ILK, 0); |
5806 | } | 6363 | } |
5807 | /* | 6364 | /* |
5808 | * Based on the document from hardware guys the following bits | 6365 | * Based on the document from hardware guys the following bits |
@@ -5824,7 +6381,49 @@ void intel_init_clock_gating(struct drm_device *dev) | |||
5824 | ILK_DPFC_DIS2 | | 6381 | ILK_DPFC_DIS2 | |
5825 | ILK_CLK_FBC); | 6382 | ILK_CLK_FBC); |
5826 | } | 6383 | } |
5827 | return; | 6384 | |
6385 | I915_WRITE(ILK_DISPLAY_CHICKEN2, | ||
6386 | I915_READ(ILK_DISPLAY_CHICKEN2) | | ||
6387 | ILK_ELPIN_409_SELECT); | ||
6388 | |||
6389 | if (IS_GEN5(dev)) { | ||
6390 | I915_WRITE(_3D_CHICKEN2, | ||
6391 | _3D_CHICKEN2_WM_READ_PIPELINED << 16 | | ||
6392 | _3D_CHICKEN2_WM_READ_PIPELINED); | ||
6393 | } | ||
6394 | |||
6395 | if (IS_GEN6(dev)) { | ||
6396 | I915_WRITE(WM3_LP_ILK, 0); | ||
6397 | I915_WRITE(WM2_LP_ILK, 0); | ||
6398 | I915_WRITE(WM1_LP_ILK, 0); | ||
6399 | |||
6400 | /* | ||
6401 | * According to the spec the following bits should be | ||
6402 | * set in order to enable memory self-refresh and fbc: | ||
6403 | * The bit21 and bit22 of 0x42000 | ||
6404 | * The bit21 and bit22 of 0x42004 | ||
6405 | * The bit5 and bit7 of 0x42020 | ||
6406 | * The bit14 of 0x70180 | ||
6407 | * The bit14 of 0x71180 | ||
6408 | */ | ||
6409 | I915_WRITE(ILK_DISPLAY_CHICKEN1, | ||
6410 | I915_READ(ILK_DISPLAY_CHICKEN1) | | ||
6411 | ILK_FBCQ_DIS | ILK_PABSTRETCH_DIS); | ||
6412 | I915_WRITE(ILK_DISPLAY_CHICKEN2, | ||
6413 | I915_READ(ILK_DISPLAY_CHICKEN2) | | ||
6414 | ILK_DPARB_GATE | ILK_VSDPFD_FULL); | ||
6415 | I915_WRITE(ILK_DSPCLK_GATE, | ||
6416 | I915_READ(ILK_DSPCLK_GATE) | | ||
6417 | ILK_DPARB_CLK_GATE | | ||
6418 | ILK_DPFD_CLK_GATE); | ||
6419 | |||
6420 | I915_WRITE(DSPACNTR, | ||
6421 | I915_READ(DSPACNTR) | | ||
6422 | DISPPLANE_TRICKLE_FEED_DISABLE); | ||
6423 | I915_WRITE(DSPBCNTR, | ||
6424 | I915_READ(DSPBCNTR) | | ||
6425 | DISPPLANE_TRICKLE_FEED_DISABLE); | ||
6426 | } | ||
5828 | } else if (IS_G4X(dev)) { | 6427 | } else if (IS_G4X(dev)) { |
5829 | uint32_t dspclk_gate; | 6428 | uint32_t dspclk_gate; |
5830 | I915_WRITE(RENCLK_GATE_D1, 0); | 6429 | I915_WRITE(RENCLK_GATE_D1, 0); |
@@ -5862,55 +6461,84 @@ void intel_init_clock_gating(struct drm_device *dev) | |||
5862 | } else if (IS_I830(dev)) { | 6461 | } else if (IS_I830(dev)) { |
5863 | I915_WRITE(DSPCLK_GATE_D, OVRUNIT_CLOCK_GATE_DISABLE); | 6462 | I915_WRITE(DSPCLK_GATE_D, OVRUNIT_CLOCK_GATE_DISABLE); |
5864 | } | 6463 | } |
6464 | } | ||
5865 | 6465 | ||
5866 | /* | 6466 | void intel_disable_clock_gating(struct drm_device *dev) |
5867 | * GPU can automatically power down the render unit if given a page | 6467 | { |
5868 | * to save state. | 6468 | struct drm_i915_private *dev_priv = dev->dev_private; |
5869 | */ | 6469 | |
5870 | if (IS_IRONLAKE_M(dev)) { | 6470 | if (dev_priv->renderctx) { |
5871 | if (dev_priv->renderctx == NULL) | 6471 | struct drm_i915_gem_object *obj = dev_priv->renderctx; |
5872 | dev_priv->renderctx = intel_alloc_context_page(dev); | 6472 | |
5873 | if (dev_priv->renderctx) { | 6473 | I915_WRITE(CCID, 0); |
5874 | struct drm_i915_gem_object *obj_priv; | 6474 | POSTING_READ(CCID); |
5875 | obj_priv = to_intel_bo(dev_priv->renderctx); | 6475 | |
5876 | if (obj_priv) { | 6476 | i915_gem_object_unpin(obj); |
5877 | BEGIN_LP_RING(4); | 6477 | drm_gem_object_unreference(&obj->base); |
5878 | OUT_RING(MI_SET_CONTEXT); | 6478 | dev_priv->renderctx = NULL; |
5879 | OUT_RING(obj_priv->gtt_offset | | ||
5880 | MI_MM_SPACE_GTT | | ||
5881 | MI_SAVE_EXT_STATE_EN | | ||
5882 | MI_RESTORE_EXT_STATE_EN | | ||
5883 | MI_RESTORE_INHIBIT); | ||
5884 | OUT_RING(MI_NOOP); | ||
5885 | OUT_RING(MI_FLUSH); | ||
5886 | ADVANCE_LP_RING(); | ||
5887 | } | ||
5888 | } else | ||
5889 | DRM_DEBUG_KMS("Failed to allocate render context." | ||
5890 | "Disable RC6\n"); | ||
5891 | } | 6479 | } |
5892 | 6480 | ||
5893 | if (I915_HAS_RC6(dev) && drm_core_check_feature(dev, DRIVER_MODESET)) { | 6481 | if (dev_priv->pwrctx) { |
5894 | struct drm_i915_gem_object *obj_priv = NULL; | 6482 | struct drm_i915_gem_object *obj = dev_priv->pwrctx; |
5895 | 6483 | ||
5896 | if (dev_priv->pwrctx) { | 6484 | I915_WRITE(PWRCTXA, 0); |
5897 | obj_priv = to_intel_bo(dev_priv->pwrctx); | 6485 | POSTING_READ(PWRCTXA); |
5898 | } else { | ||
5899 | struct drm_gem_object *pwrctx; | ||
5900 | 6486 | ||
5901 | pwrctx = intel_alloc_context_page(dev); | 6487 | i915_gem_object_unpin(obj); |
5902 | if (pwrctx) { | 6488 | drm_gem_object_unreference(&obj->base); |
5903 | dev_priv->pwrctx = pwrctx; | 6489 | dev_priv->pwrctx = NULL; |
5904 | obj_priv = to_intel_bo(pwrctx); | 6490 | } |
5905 | } | 6491 | } |
5906 | } | ||
5907 | 6492 | ||
5908 | if (obj_priv) { | 6493 | static void ironlake_disable_rc6(struct drm_device *dev) |
5909 | I915_WRITE(PWRCTXA, obj_priv->gtt_offset | PWRCTX_EN); | 6494 | { |
5910 | I915_WRITE(MCHBAR_RENDER_STANDBY, | 6495 | struct drm_i915_private *dev_priv = dev->dev_private; |
5911 | I915_READ(MCHBAR_RENDER_STANDBY) & ~RCX_SW_EXIT); | 6496 | |
5912 | } | 6497 | /* Wake the GPU, prevent RC6, then restore RSTDBYCTL */ |
6498 | I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) | RCX_SW_EXIT); | ||
6499 | wait_for(((I915_READ(RSTDBYCTL) & RSX_STATUS_MASK) == RSX_STATUS_ON), | ||
6500 | 10); | ||
6501 | POSTING_READ(CCID); | ||
6502 | I915_WRITE(PWRCTXA, 0); | ||
6503 | POSTING_READ(PWRCTXA); | ||
6504 | I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) & ~RCX_SW_EXIT); | ||
6505 | POSTING_READ(RSTDBYCTL); | ||
6506 | i915_gem_object_unpin(dev_priv->renderctx); | ||
6507 | drm_gem_object_unreference(&dev_priv->renderctx->base); | ||
6508 | dev_priv->renderctx = NULL; | ||
6509 | i915_gem_object_unpin(dev_priv->pwrctx); | ||
6510 | drm_gem_object_unreference(&dev_priv->pwrctx->base); | ||
6511 | dev_priv->pwrctx = NULL; | ||
6512 | } | ||
6513 | |||
6514 | void ironlake_enable_rc6(struct drm_device *dev) | ||
6515 | { | ||
6516 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
6517 | int ret; | ||
6518 | |||
6519 | /* | ||
6520 | * GPU can automatically power down the render unit if given a page | ||
6521 | * to save state. | ||
6522 | */ | ||
6523 | ret = BEGIN_LP_RING(6); | ||
6524 | if (ret) { | ||
6525 | ironlake_disable_rc6(dev); | ||
6526 | return; | ||
5913 | } | 6527 | } |
6528 | OUT_RING(MI_SUSPEND_FLUSH | MI_SUSPEND_FLUSH_EN); | ||
6529 | OUT_RING(MI_SET_CONTEXT); | ||
6530 | OUT_RING(dev_priv->renderctx->gtt_offset | | ||
6531 | MI_MM_SPACE_GTT | | ||
6532 | MI_SAVE_EXT_STATE_EN | | ||
6533 | MI_RESTORE_EXT_STATE_EN | | ||
6534 | MI_RESTORE_INHIBIT); | ||
6535 | OUT_RING(MI_SUSPEND_FLUSH); | ||
6536 | OUT_RING(MI_NOOP); | ||
6537 | OUT_RING(MI_FLUSH); | ||
6538 | ADVANCE_LP_RING(); | ||
6539 | |||
6540 | I915_WRITE(PWRCTXA, dev_priv->pwrctx->gtt_offset | PWRCTX_EN); | ||
6541 | I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) & ~RCX_SW_EXIT); | ||
5914 | } | 6542 | } |
5915 | 6543 | ||
5916 | /* Set up chip specific display functions */ | 6544 | /* Set up chip specific display functions */ |
@@ -5925,7 +6553,7 @@ static void intel_init_display(struct drm_device *dev) | |||
5925 | dev_priv->display.dpms = i9xx_crtc_dpms; | 6553 | dev_priv->display.dpms = i9xx_crtc_dpms; |
5926 | 6554 | ||
5927 | if (I915_HAS_FBC(dev)) { | 6555 | if (I915_HAS_FBC(dev)) { |
5928 | if (IS_IRONLAKE_M(dev)) { | 6556 | if (HAS_PCH_SPLIT(dev)) { |
5929 | dev_priv->display.fbc_enabled = ironlake_fbc_enabled; | 6557 | dev_priv->display.fbc_enabled = ironlake_fbc_enabled; |
5930 | dev_priv->display.enable_fbc = ironlake_enable_fbc; | 6558 | dev_priv->display.enable_fbc = ironlake_enable_fbc; |
5931 | dev_priv->display.disable_fbc = ironlake_disable_fbc; | 6559 | dev_priv->display.disable_fbc = ironlake_disable_fbc; |
@@ -5974,6 +6602,14 @@ static void intel_init_display(struct drm_device *dev) | |||
5974 | "Disable CxSR\n"); | 6602 | "Disable CxSR\n"); |
5975 | dev_priv->display.update_wm = NULL; | 6603 | dev_priv->display.update_wm = NULL; |
5976 | } | 6604 | } |
6605 | } else if (IS_GEN6(dev)) { | ||
6606 | if (SNB_READ_WM0_LATENCY()) { | ||
6607 | dev_priv->display.update_wm = sandybridge_update_wm; | ||
6608 | } else { | ||
6609 | DRM_DEBUG_KMS("Failed to read display plane latency. " | ||
6610 | "Disable CxSR\n"); | ||
6611 | dev_priv->display.update_wm = NULL; | ||
6612 | } | ||
5977 | } else | 6613 | } else |
5978 | dev_priv->display.update_wm = NULL; | 6614 | dev_priv->display.update_wm = NULL; |
5979 | } else if (IS_PINEVIEW(dev)) { | 6615 | } else if (IS_PINEVIEW(dev)) { |
@@ -6119,12 +6755,7 @@ void intel_modeset_init(struct drm_device *dev) | |||
6119 | dev->mode_config.max_width = 8192; | 6755 | dev->mode_config.max_width = 8192; |
6120 | dev->mode_config.max_height = 8192; | 6756 | dev->mode_config.max_height = 8192; |
6121 | } | 6757 | } |
6122 | 6758 | dev->mode_config.fb_base = dev->agp->base; | |
6123 | /* set memory base */ | ||
6124 | if (IS_GEN2(dev)) | ||
6125 | dev->mode_config.fb_base = pci_resource_start(dev->pdev, 0); | ||
6126 | else | ||
6127 | dev->mode_config.fb_base = pci_resource_start(dev->pdev, 2); | ||
6128 | 6759 | ||
6129 | if (IS_MOBILE(dev) || !IS_GEN2(dev)) | 6760 | if (IS_MOBILE(dev) || !IS_GEN2(dev)) |
6130 | dev_priv->num_pipe = 2; | 6761 | dev_priv->num_pipe = 2; |
@@ -6139,7 +6770,7 @@ void intel_modeset_init(struct drm_device *dev) | |||
6139 | 6770 | ||
6140 | intel_setup_outputs(dev); | 6771 | intel_setup_outputs(dev); |
6141 | 6772 | ||
6142 | intel_init_clock_gating(dev); | 6773 | intel_enable_clock_gating(dev); |
6143 | 6774 | ||
6144 | /* Just disable it once at startup */ | 6775 | /* Just disable it once at startup */ |
6145 | i915_disable_vga(dev); | 6776 | i915_disable_vga(dev); |
@@ -6149,6 +6780,24 @@ void intel_modeset_init(struct drm_device *dev) | |||
6149 | intel_init_emon(dev); | 6780 | intel_init_emon(dev); |
6150 | } | 6781 | } |
6151 | 6782 | ||
6783 | if (IS_GEN6(dev)) | ||
6784 | gen6_enable_rps(dev_priv); | ||
6785 | |||
6786 | if (IS_IRONLAKE_M(dev)) { | ||
6787 | dev_priv->renderctx = intel_alloc_context_page(dev); | ||
6788 | if (!dev_priv->renderctx) | ||
6789 | goto skip_rc6; | ||
6790 | dev_priv->pwrctx = intel_alloc_context_page(dev); | ||
6791 | if (!dev_priv->pwrctx) { | ||
6792 | i915_gem_object_unpin(dev_priv->renderctx); | ||
6793 | drm_gem_object_unreference(&dev_priv->renderctx->base); | ||
6794 | dev_priv->renderctx = NULL; | ||
6795 | goto skip_rc6; | ||
6796 | } | ||
6797 | ironlake_enable_rc6(dev); | ||
6798 | } | ||
6799 | |||
6800 | skip_rc6: | ||
6152 | INIT_WORK(&dev_priv->idle_work, intel_idle_update); | 6801 | INIT_WORK(&dev_priv->idle_work, intel_idle_update); |
6153 | setup_timer(&dev_priv->idle_timer, intel_gpu_idle_timer, | 6802 | setup_timer(&dev_priv->idle_timer, intel_gpu_idle_timer, |
6154 | (unsigned long)dev); | 6803 | (unsigned long)dev); |
@@ -6180,28 +6829,13 @@ void intel_modeset_cleanup(struct drm_device *dev) | |||
6180 | if (dev_priv->display.disable_fbc) | 6829 | if (dev_priv->display.disable_fbc) |
6181 | dev_priv->display.disable_fbc(dev); | 6830 | dev_priv->display.disable_fbc(dev); |
6182 | 6831 | ||
6183 | if (dev_priv->renderctx) { | ||
6184 | struct drm_i915_gem_object *obj_priv; | ||
6185 | |||
6186 | obj_priv = to_intel_bo(dev_priv->renderctx); | ||
6187 | I915_WRITE(CCID, obj_priv->gtt_offset &~ CCID_EN); | ||
6188 | I915_READ(CCID); | ||
6189 | i915_gem_object_unpin(dev_priv->renderctx); | ||
6190 | drm_gem_object_unreference(dev_priv->renderctx); | ||
6191 | } | ||
6192 | |||
6193 | if (dev_priv->pwrctx) { | ||
6194 | struct drm_i915_gem_object *obj_priv; | ||
6195 | |||
6196 | obj_priv = to_intel_bo(dev_priv->pwrctx); | ||
6197 | I915_WRITE(PWRCTXA, obj_priv->gtt_offset &~ PWRCTX_EN); | ||
6198 | I915_READ(PWRCTXA); | ||
6199 | i915_gem_object_unpin(dev_priv->pwrctx); | ||
6200 | drm_gem_object_unreference(dev_priv->pwrctx); | ||
6201 | } | ||
6202 | |||
6203 | if (IS_IRONLAKE_M(dev)) | 6832 | if (IS_IRONLAKE_M(dev)) |
6204 | ironlake_disable_drps(dev); | 6833 | ironlake_disable_drps(dev); |
6834 | if (IS_GEN6(dev)) | ||
6835 | gen6_disable_rps(dev); | ||
6836 | |||
6837 | if (IS_IRONLAKE_M(dev)) | ||
6838 | ironlake_disable_rc6(dev); | ||
6205 | 6839 | ||
6206 | mutex_unlock(&dev->struct_mutex); | 6840 | mutex_unlock(&dev->struct_mutex); |
6207 | 6841 | ||
@@ -6253,3 +6887,113 @@ int intel_modeset_vga_set_state(struct drm_device *dev, bool state) | |||
6253 | pci_write_config_word(dev_priv->bridge_dev, INTEL_GMCH_CTRL, gmch_ctrl); | 6887 | pci_write_config_word(dev_priv->bridge_dev, INTEL_GMCH_CTRL, gmch_ctrl); |
6254 | return 0; | 6888 | return 0; |
6255 | } | 6889 | } |
6890 | |||
6891 | #ifdef CONFIG_DEBUG_FS | ||
6892 | #include <linux/seq_file.h> | ||
6893 | |||
6894 | struct intel_display_error_state { | ||
6895 | struct intel_cursor_error_state { | ||
6896 | u32 control; | ||
6897 | u32 position; | ||
6898 | u32 base; | ||
6899 | u32 size; | ||
6900 | } cursor[2]; | ||
6901 | |||
6902 | struct intel_pipe_error_state { | ||
6903 | u32 conf; | ||
6904 | u32 source; | ||
6905 | |||
6906 | u32 htotal; | ||
6907 | u32 hblank; | ||
6908 | u32 hsync; | ||
6909 | u32 vtotal; | ||
6910 | u32 vblank; | ||
6911 | u32 vsync; | ||
6912 | } pipe[2]; | ||
6913 | |||
6914 | struct intel_plane_error_state { | ||
6915 | u32 control; | ||
6916 | u32 stride; | ||
6917 | u32 size; | ||
6918 | u32 pos; | ||
6919 | u32 addr; | ||
6920 | u32 surface; | ||
6921 | u32 tile_offset; | ||
6922 | } plane[2]; | ||
6923 | }; | ||
6924 | |||
6925 | struct intel_display_error_state * | ||
6926 | intel_display_capture_error_state(struct drm_device *dev) | ||
6927 | { | ||
6928 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
6929 | struct intel_display_error_state *error; | ||
6930 | int i; | ||
6931 | |||
6932 | error = kmalloc(sizeof(*error), GFP_ATOMIC); | ||
6933 | if (error == NULL) | ||
6934 | return NULL; | ||
6935 | |||
6936 | for (i = 0; i < 2; i++) { | ||
6937 | error->cursor[i].control = I915_READ(CURCNTR(i)); | ||
6938 | error->cursor[i].position = I915_READ(CURPOS(i)); | ||
6939 | error->cursor[i].base = I915_READ(CURBASE(i)); | ||
6940 | |||
6941 | error->plane[i].control = I915_READ(DSPCNTR(i)); | ||
6942 | error->plane[i].stride = I915_READ(DSPSTRIDE(i)); | ||
6943 | error->plane[i].size = I915_READ(DSPSIZE(i)); | ||
6944 | error->plane[i].pos= I915_READ(DSPPOS(i)); | ||
6945 | error->plane[i].addr = I915_READ(DSPADDR(i)); | ||
6946 | if (INTEL_INFO(dev)->gen >= 4) { | ||
6947 | error->plane[i].surface = I915_READ(DSPSURF(i)); | ||
6948 | error->plane[i].tile_offset = I915_READ(DSPTILEOFF(i)); | ||
6949 | } | ||
6950 | |||
6951 | error->pipe[i].conf = I915_READ(PIPECONF(i)); | ||
6952 | error->pipe[i].source = I915_READ(PIPESRC(i)); | ||
6953 | error->pipe[i].htotal = I915_READ(HTOTAL(i)); | ||
6954 | error->pipe[i].hblank = I915_READ(HBLANK(i)); | ||
6955 | error->pipe[i].hsync = I915_READ(HSYNC(i)); | ||
6956 | error->pipe[i].vtotal = I915_READ(VTOTAL(i)); | ||
6957 | error->pipe[i].vblank = I915_READ(VBLANK(i)); | ||
6958 | error->pipe[i].vsync = I915_READ(VSYNC(i)); | ||
6959 | } | ||
6960 | |||
6961 | return error; | ||
6962 | } | ||
6963 | |||
6964 | void | ||
6965 | intel_display_print_error_state(struct seq_file *m, | ||
6966 | struct drm_device *dev, | ||
6967 | struct intel_display_error_state *error) | ||
6968 | { | ||
6969 | int i; | ||
6970 | |||
6971 | for (i = 0; i < 2; i++) { | ||
6972 | seq_printf(m, "Pipe [%d]:\n", i); | ||
6973 | seq_printf(m, " CONF: %08x\n", error->pipe[i].conf); | ||
6974 | seq_printf(m, " SRC: %08x\n", error->pipe[i].source); | ||
6975 | seq_printf(m, " HTOTAL: %08x\n", error->pipe[i].htotal); | ||
6976 | seq_printf(m, " HBLANK: %08x\n", error->pipe[i].hblank); | ||
6977 | seq_printf(m, " HSYNC: %08x\n", error->pipe[i].hsync); | ||
6978 | seq_printf(m, " VTOTAL: %08x\n", error->pipe[i].vtotal); | ||
6979 | seq_printf(m, " VBLANK: %08x\n", error->pipe[i].vblank); | ||
6980 | seq_printf(m, " VSYNC: %08x\n", error->pipe[i].vsync); | ||
6981 | |||
6982 | seq_printf(m, "Plane [%d]:\n", i); | ||
6983 | seq_printf(m, " CNTR: %08x\n", error->plane[i].control); | ||
6984 | seq_printf(m, " STRIDE: %08x\n", error->plane[i].stride); | ||
6985 | seq_printf(m, " SIZE: %08x\n", error->plane[i].size); | ||
6986 | seq_printf(m, " POS: %08x\n", error->plane[i].pos); | ||
6987 | seq_printf(m, " ADDR: %08x\n", error->plane[i].addr); | ||
6988 | if (INTEL_INFO(dev)->gen >= 4) { | ||
6989 | seq_printf(m, " SURF: %08x\n", error->plane[i].surface); | ||
6990 | seq_printf(m, " TILEOFF: %08x\n", error->plane[i].tile_offset); | ||
6991 | } | ||
6992 | |||
6993 | seq_printf(m, "Cursor [%d]:\n", i); | ||
6994 | seq_printf(m, " CNTR: %08x\n", error->cursor[i].control); | ||
6995 | seq_printf(m, " POS: %08x\n", error->cursor[i].position); | ||
6996 | seq_printf(m, " BASE: %08x\n", error->cursor[i].base); | ||
6997 | } | ||
6998 | } | ||
6999 | #endif | ||
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index c8e005553310..1f4242b682c8 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c | |||
@@ -479,6 +479,7 @@ intel_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode, | |||
479 | uint16_t address = algo_data->address; | 479 | uint16_t address = algo_data->address; |
480 | uint8_t msg[5]; | 480 | uint8_t msg[5]; |
481 | uint8_t reply[2]; | 481 | uint8_t reply[2]; |
482 | unsigned retry; | ||
482 | int msg_bytes; | 483 | int msg_bytes; |
483 | int reply_bytes; | 484 | int reply_bytes; |
484 | int ret; | 485 | int ret; |
@@ -513,14 +514,33 @@ intel_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode, | |||
513 | break; | 514 | break; |
514 | } | 515 | } |
515 | 516 | ||
516 | for (;;) { | 517 | for (retry = 0; retry < 5; retry++) { |
517 | ret = intel_dp_aux_ch(intel_dp, | 518 | ret = intel_dp_aux_ch(intel_dp, |
518 | msg, msg_bytes, | 519 | msg, msg_bytes, |
519 | reply, reply_bytes); | 520 | reply, reply_bytes); |
520 | if (ret < 0) { | 521 | if (ret < 0) { |
521 | DRM_DEBUG_KMS("aux_ch failed %d\n", ret); | 522 | DRM_DEBUG_KMS("aux_ch failed %d\n", ret); |
522 | return ret; | 523 | return ret; |
523 | } | 524 | } |
525 | |||
526 | switch (reply[0] & AUX_NATIVE_REPLY_MASK) { | ||
527 | case AUX_NATIVE_REPLY_ACK: | ||
528 | /* I2C-over-AUX Reply field is only valid | ||
529 | * when paired with AUX ACK. | ||
530 | */ | ||
531 | break; | ||
532 | case AUX_NATIVE_REPLY_NACK: | ||
533 | DRM_DEBUG_KMS("aux_ch native nack\n"); | ||
534 | return -EREMOTEIO; | ||
535 | case AUX_NATIVE_REPLY_DEFER: | ||
536 | udelay(100); | ||
537 | continue; | ||
538 | default: | ||
539 | DRM_ERROR("aux_ch invalid native reply 0x%02x\n", | ||
540 | reply[0]); | ||
541 | return -EREMOTEIO; | ||
542 | } | ||
543 | |||
524 | switch (reply[0] & AUX_I2C_REPLY_MASK) { | 544 | switch (reply[0] & AUX_I2C_REPLY_MASK) { |
525 | case AUX_I2C_REPLY_ACK: | 545 | case AUX_I2C_REPLY_ACK: |
526 | if (mode == MODE_I2C_READ) { | 546 | if (mode == MODE_I2C_READ) { |
@@ -528,17 +548,20 @@ intel_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode, | |||
528 | } | 548 | } |
529 | return reply_bytes - 1; | 549 | return reply_bytes - 1; |
530 | case AUX_I2C_REPLY_NACK: | 550 | case AUX_I2C_REPLY_NACK: |
531 | DRM_DEBUG_KMS("aux_ch nack\n"); | 551 | DRM_DEBUG_KMS("aux_i2c nack\n"); |
532 | return -EREMOTEIO; | 552 | return -EREMOTEIO; |
533 | case AUX_I2C_REPLY_DEFER: | 553 | case AUX_I2C_REPLY_DEFER: |
534 | DRM_DEBUG_KMS("aux_ch defer\n"); | 554 | DRM_DEBUG_KMS("aux_i2c defer\n"); |
535 | udelay(100); | 555 | udelay(100); |
536 | break; | 556 | break; |
537 | default: | 557 | default: |
538 | DRM_ERROR("aux_ch invalid reply 0x%02x\n", reply[0]); | 558 | DRM_ERROR("aux_i2c invalid reply 0x%02x\n", reply[0]); |
539 | return -EREMOTEIO; | 559 | return -EREMOTEIO; |
540 | } | 560 | } |
541 | } | 561 | } |
562 | |||
563 | DRM_ERROR("too many retries, giving up\n"); | ||
564 | return -EREMOTEIO; | ||
542 | } | 565 | } |
543 | 566 | ||
544 | static int | 567 | static int |
@@ -584,17 +607,6 @@ intel_dp_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode, | |||
584 | mode->clock = dev_priv->panel_fixed_mode->clock; | 607 | mode->clock = dev_priv->panel_fixed_mode->clock; |
585 | } | 608 | } |
586 | 609 | ||
587 | /* Just use VBT values for eDP */ | ||
588 | if (is_edp(intel_dp)) { | ||
589 | intel_dp->lane_count = dev_priv->edp.lanes; | ||
590 | intel_dp->link_bw = dev_priv->edp.rate; | ||
591 | adjusted_mode->clock = intel_dp_link_clock(intel_dp->link_bw); | ||
592 | DRM_DEBUG_KMS("eDP link bw %02x lane count %d clock %d\n", | ||
593 | intel_dp->link_bw, intel_dp->lane_count, | ||
594 | adjusted_mode->clock); | ||
595 | return true; | ||
596 | } | ||
597 | |||
598 | for (lane_count = 1; lane_count <= max_lane_count; lane_count <<= 1) { | 610 | for (lane_count = 1; lane_count <= max_lane_count; lane_count <<= 1) { |
599 | for (clock = 0; clock <= max_clock; clock++) { | 611 | for (clock = 0; clock <= max_clock; clock++) { |
600 | int link_avail = intel_dp_max_data_rate(intel_dp_link_clock(bws[clock]), lane_count); | 612 | int link_avail = intel_dp_max_data_rate(intel_dp_link_clock(bws[clock]), lane_count); |
@@ -613,6 +625,19 @@ intel_dp_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode, | |||
613 | } | 625 | } |
614 | } | 626 | } |
615 | 627 | ||
628 | if (is_edp(intel_dp)) { | ||
629 | /* okay we failed just pick the highest */ | ||
630 | intel_dp->lane_count = max_lane_count; | ||
631 | intel_dp->link_bw = bws[max_clock]; | ||
632 | adjusted_mode->clock = intel_dp_link_clock(intel_dp->link_bw); | ||
633 | DRM_DEBUG_KMS("Force picking display port link bw %02x lane " | ||
634 | "count %d clock %d\n", | ||
635 | intel_dp->link_bw, intel_dp->lane_count, | ||
636 | adjusted_mode->clock); | ||
637 | |||
638 | return true; | ||
639 | } | ||
640 | |||
616 | return false; | 641 | return false; |
617 | } | 642 | } |
618 | 643 | ||
@@ -1087,21 +1112,11 @@ intel_get_adjust_train(struct intel_dp *intel_dp) | |||
1087 | } | 1112 | } |
1088 | 1113 | ||
1089 | static uint32_t | 1114 | static uint32_t |
1090 | intel_dp_signal_levels(struct intel_dp *intel_dp) | 1115 | intel_dp_signal_levels(uint8_t train_set, int lane_count) |
1091 | { | 1116 | { |
1092 | struct drm_device *dev = intel_dp->base.base.dev; | 1117 | uint32_t signal_levels = 0; |
1093 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
1094 | uint32_t signal_levels = 0; | ||
1095 | u8 train_set = intel_dp->train_set[0]; | ||
1096 | u32 vswing = train_set & DP_TRAIN_VOLTAGE_SWING_MASK; | ||
1097 | u32 preemphasis = train_set & DP_TRAIN_PRE_EMPHASIS_MASK; | ||
1098 | 1118 | ||
1099 | if (is_edp(intel_dp)) { | 1119 | switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) { |
1100 | vswing = dev_priv->edp.vswing; | ||
1101 | preemphasis = dev_priv->edp.preemphasis; | ||
1102 | } | ||
1103 | |||
1104 | switch (vswing) { | ||
1105 | case DP_TRAIN_VOLTAGE_SWING_400: | 1120 | case DP_TRAIN_VOLTAGE_SWING_400: |
1106 | default: | 1121 | default: |
1107 | signal_levels |= DP_VOLTAGE_0_4; | 1122 | signal_levels |= DP_VOLTAGE_0_4; |
@@ -1116,7 +1131,7 @@ intel_dp_signal_levels(struct intel_dp *intel_dp) | |||
1116 | signal_levels |= DP_VOLTAGE_1_2; | 1131 | signal_levels |= DP_VOLTAGE_1_2; |
1117 | break; | 1132 | break; |
1118 | } | 1133 | } |
1119 | switch (preemphasis) { | 1134 | switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) { |
1120 | case DP_TRAIN_PRE_EMPHASIS_0: | 1135 | case DP_TRAIN_PRE_EMPHASIS_0: |
1121 | default: | 1136 | default: |
1122 | signal_levels |= DP_PRE_EMPHASIS_0; | 1137 | signal_levels |= DP_PRE_EMPHASIS_0; |
@@ -1138,18 +1153,27 @@ intel_dp_signal_levels(struct intel_dp *intel_dp) | |||
1138 | static uint32_t | 1153 | static uint32_t |
1139 | intel_gen6_edp_signal_levels(uint8_t train_set) | 1154 | intel_gen6_edp_signal_levels(uint8_t train_set) |
1140 | { | 1155 | { |
1141 | switch (train_set & (DP_TRAIN_VOLTAGE_SWING_MASK|DP_TRAIN_PRE_EMPHASIS_MASK)) { | 1156 | int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK | |
1157 | DP_TRAIN_PRE_EMPHASIS_MASK); | ||
1158 | switch (signal_levels) { | ||
1142 | case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_0: | 1159 | case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_0: |
1143 | return EDP_LINK_TRAIN_400MV_0DB_SNB_B; | 1160 | case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_0: |
1161 | return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B; | ||
1162 | case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_3_5: | ||
1163 | return EDP_LINK_TRAIN_400MV_3_5DB_SNB_B; | ||
1144 | case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_6: | 1164 | case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_6: |
1145 | return EDP_LINK_TRAIN_400MV_6DB_SNB_B; | 1165 | case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_6: |
1166 | return EDP_LINK_TRAIN_400_600MV_6DB_SNB_B; | ||
1146 | case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_3_5: | 1167 | case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_3_5: |
1147 | return EDP_LINK_TRAIN_600MV_3_5DB_SNB_B; | 1168 | case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_3_5: |
1169 | return EDP_LINK_TRAIN_600_800MV_3_5DB_SNB_B; | ||
1148 | case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_0: | 1170 | case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_0: |
1149 | return EDP_LINK_TRAIN_800MV_0DB_SNB_B; | 1171 | case DP_TRAIN_VOLTAGE_SWING_1200 | DP_TRAIN_PRE_EMPHASIS_0: |
1172 | return EDP_LINK_TRAIN_800_1200MV_0DB_SNB_B; | ||
1150 | default: | 1173 | default: |
1151 | DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level\n"); | 1174 | DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:" |
1152 | return EDP_LINK_TRAIN_400MV_0DB_SNB_B; | 1175 | "0x%x\n", signal_levels); |
1176 | return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B; | ||
1153 | } | 1177 | } |
1154 | } | 1178 | } |
1155 | 1179 | ||
@@ -1203,18 +1227,6 @@ intel_channel_eq_ok(struct intel_dp *intel_dp) | |||
1203 | } | 1227 | } |
1204 | 1228 | ||
1205 | static bool | 1229 | static bool |
1206 | intel_dp_aux_handshake_required(struct intel_dp *intel_dp) | ||
1207 | { | ||
1208 | struct drm_device *dev = intel_dp->base.base.dev; | ||
1209 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
1210 | |||
1211 | if (is_edp(intel_dp) && dev_priv->no_aux_handshake) | ||
1212 | return false; | ||
1213 | |||
1214 | return true; | ||
1215 | } | ||
1216 | |||
1217 | static bool | ||
1218 | intel_dp_set_link_train(struct intel_dp *intel_dp, | 1230 | intel_dp_set_link_train(struct intel_dp *intel_dp, |
1219 | uint32_t dp_reg_value, | 1231 | uint32_t dp_reg_value, |
1220 | uint8_t dp_train_pat) | 1232 | uint8_t dp_train_pat) |
@@ -1226,9 +1238,6 @@ intel_dp_set_link_train(struct intel_dp *intel_dp, | |||
1226 | I915_WRITE(intel_dp->output_reg, dp_reg_value); | 1238 | I915_WRITE(intel_dp->output_reg, dp_reg_value); |
1227 | POSTING_READ(intel_dp->output_reg); | 1239 | POSTING_READ(intel_dp->output_reg); |
1228 | 1240 | ||
1229 | if (!intel_dp_aux_handshake_required(intel_dp)) | ||
1230 | return true; | ||
1231 | |||
1232 | intel_dp_aux_native_write_1(intel_dp, | 1241 | intel_dp_aux_native_write_1(intel_dp, |
1233 | DP_TRAINING_PATTERN_SET, | 1242 | DP_TRAINING_PATTERN_SET, |
1234 | dp_train_pat); | 1243 | dp_train_pat); |
@@ -1261,11 +1270,10 @@ intel_dp_start_link_train(struct intel_dp *intel_dp) | |||
1261 | POSTING_READ(intel_dp->output_reg); | 1270 | POSTING_READ(intel_dp->output_reg); |
1262 | intel_wait_for_vblank(dev, intel_crtc->pipe); | 1271 | intel_wait_for_vblank(dev, intel_crtc->pipe); |
1263 | 1272 | ||
1264 | if (intel_dp_aux_handshake_required(intel_dp)) | 1273 | /* Write the link configuration data */ |
1265 | /* Write the link configuration data */ | 1274 | intel_dp_aux_native_write(intel_dp, DP_LINK_BW_SET, |
1266 | intel_dp_aux_native_write(intel_dp, DP_LINK_BW_SET, | 1275 | intel_dp->link_configuration, |
1267 | intel_dp->link_configuration, | 1276 | DP_LINK_CONFIGURATION_SIZE); |
1268 | DP_LINK_CONFIGURATION_SIZE); | ||
1269 | 1277 | ||
1270 | DP |= DP_PORT_EN; | 1278 | DP |= DP_PORT_EN; |
1271 | if (HAS_PCH_CPT(dev) && !is_edp(intel_dp)) | 1279 | if (HAS_PCH_CPT(dev) && !is_edp(intel_dp)) |
@@ -1283,7 +1291,7 @@ intel_dp_start_link_train(struct intel_dp *intel_dp) | |||
1283 | signal_levels = intel_gen6_edp_signal_levels(intel_dp->train_set[0]); | 1291 | signal_levels = intel_gen6_edp_signal_levels(intel_dp->train_set[0]); |
1284 | DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_SNB) | signal_levels; | 1292 | DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_SNB) | signal_levels; |
1285 | } else { | 1293 | } else { |
1286 | signal_levels = intel_dp_signal_levels(intel_dp); | 1294 | signal_levels = intel_dp_signal_levels(intel_dp->train_set[0], intel_dp->lane_count); |
1287 | DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels; | 1295 | DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels; |
1288 | } | 1296 | } |
1289 | 1297 | ||
@@ -1297,37 +1305,33 @@ intel_dp_start_link_train(struct intel_dp *intel_dp) | |||
1297 | break; | 1305 | break; |
1298 | /* Set training pattern 1 */ | 1306 | /* Set training pattern 1 */ |
1299 | 1307 | ||
1300 | udelay(500); | 1308 | udelay(100); |
1301 | if (intel_dp_aux_handshake_required(intel_dp)) { | 1309 | if (!intel_dp_get_link_status(intel_dp)) |
1302 | break; | 1310 | break; |
1303 | } else { | ||
1304 | if (!intel_dp_get_link_status(intel_dp)) | ||
1305 | break; | ||
1306 | 1311 | ||
1307 | if (intel_clock_recovery_ok(intel_dp->link_status, intel_dp->lane_count)) { | 1312 | if (intel_clock_recovery_ok(intel_dp->link_status, intel_dp->lane_count)) { |
1308 | clock_recovery = true; | 1313 | clock_recovery = true; |
1309 | break; | 1314 | break; |
1310 | } | 1315 | } |
1311 | 1316 | ||
1312 | /* Check to see if we've tried the max voltage */ | 1317 | /* Check to see if we've tried the max voltage */ |
1313 | for (i = 0; i < intel_dp->lane_count; i++) | 1318 | for (i = 0; i < intel_dp->lane_count; i++) |
1314 | if ((intel_dp->train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0) | 1319 | if ((intel_dp->train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0) |
1315 | break; | ||
1316 | if (i == intel_dp->lane_count) | ||
1317 | break; | 1320 | break; |
1321 | if (i == intel_dp->lane_count) | ||
1322 | break; | ||
1318 | 1323 | ||
1319 | /* Check to see if we've tried the same voltage 5 times */ | 1324 | /* Check to see if we've tried the same voltage 5 times */ |
1320 | if ((intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) == voltage) { | 1325 | if ((intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) == voltage) { |
1321 | ++tries; | 1326 | ++tries; |
1322 | if (tries == 5) | 1327 | if (tries == 5) |
1323 | break; | 1328 | break; |
1324 | } else | 1329 | } else |
1325 | tries = 0; | 1330 | tries = 0; |
1326 | voltage = intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK; | 1331 | voltage = intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK; |
1327 | 1332 | ||
1328 | /* Compute new intel_dp->train_set as requested by target */ | 1333 | /* Compute new intel_dp->train_set as requested by target */ |
1329 | intel_get_adjust_train(intel_dp); | 1334 | intel_get_adjust_train(intel_dp); |
1330 | } | ||
1331 | } | 1335 | } |
1332 | 1336 | ||
1333 | intel_dp->DP = DP; | 1337 | intel_dp->DP = DP; |
@@ -1339,22 +1343,29 @@ intel_dp_complete_link_train(struct intel_dp *intel_dp) | |||
1339 | struct drm_device *dev = intel_dp->base.base.dev; | 1343 | struct drm_device *dev = intel_dp->base.base.dev; |
1340 | struct drm_i915_private *dev_priv = dev->dev_private; | 1344 | struct drm_i915_private *dev_priv = dev->dev_private; |
1341 | bool channel_eq = false; | 1345 | bool channel_eq = false; |
1342 | int tries; | 1346 | int tries, cr_tries; |
1343 | u32 reg; | 1347 | u32 reg; |
1344 | uint32_t DP = intel_dp->DP; | 1348 | uint32_t DP = intel_dp->DP; |
1345 | 1349 | ||
1346 | /* channel equalization */ | 1350 | /* channel equalization */ |
1347 | tries = 0; | 1351 | tries = 0; |
1352 | cr_tries = 0; | ||
1348 | channel_eq = false; | 1353 | channel_eq = false; |
1349 | for (;;) { | 1354 | for (;;) { |
1350 | /* Use intel_dp->train_set[0] to set the voltage and pre emphasis values */ | 1355 | /* Use intel_dp->train_set[0] to set the voltage and pre emphasis values */ |
1351 | uint32_t signal_levels; | 1356 | uint32_t signal_levels; |
1352 | 1357 | ||
1358 | if (cr_tries > 5) { | ||
1359 | DRM_ERROR("failed to train DP, aborting\n"); | ||
1360 | intel_dp_link_down(intel_dp); | ||
1361 | break; | ||
1362 | } | ||
1363 | |||
1353 | if (IS_GEN6(dev) && is_edp(intel_dp)) { | 1364 | if (IS_GEN6(dev) && is_edp(intel_dp)) { |
1354 | signal_levels = intel_gen6_edp_signal_levels(intel_dp->train_set[0]); | 1365 | signal_levels = intel_gen6_edp_signal_levels(intel_dp->train_set[0]); |
1355 | DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_SNB) | signal_levels; | 1366 | DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_SNB) | signal_levels; |
1356 | } else { | 1367 | } else { |
1357 | signal_levels = intel_dp_signal_levels(intel_dp); | 1368 | signal_levels = intel_dp_signal_levels(intel_dp->train_set[0], intel_dp->lane_count); |
1358 | DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels; | 1369 | DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels; |
1359 | } | 1370 | } |
1360 | 1371 | ||
@@ -1368,28 +1379,36 @@ intel_dp_complete_link_train(struct intel_dp *intel_dp) | |||
1368 | DP_TRAINING_PATTERN_2)) | 1379 | DP_TRAINING_PATTERN_2)) |
1369 | break; | 1380 | break; |
1370 | 1381 | ||
1371 | udelay(500); | 1382 | udelay(400); |
1372 | 1383 | if (!intel_dp_get_link_status(intel_dp)) | |
1373 | if (!intel_dp_aux_handshake_required(intel_dp)) { | ||
1374 | break; | 1384 | break; |
1375 | } else { | ||
1376 | if (!intel_dp_get_link_status(intel_dp)) | ||
1377 | break; | ||
1378 | 1385 | ||
1379 | if (intel_channel_eq_ok(intel_dp)) { | 1386 | /* Make sure clock is still ok */ |
1380 | channel_eq = true; | 1387 | if (!intel_clock_recovery_ok(intel_dp->link_status, intel_dp->lane_count)) { |
1381 | break; | 1388 | intel_dp_start_link_train(intel_dp); |
1382 | } | 1389 | cr_tries++; |
1390 | continue; | ||
1391 | } | ||
1383 | 1392 | ||
1384 | /* Try 5 times */ | 1393 | if (intel_channel_eq_ok(intel_dp)) { |
1385 | if (tries > 5) | 1394 | channel_eq = true; |
1386 | break; | 1395 | break; |
1396 | } | ||
1387 | 1397 | ||
1388 | /* Compute new intel_dp->train_set as requested by target */ | 1398 | /* Try 5 times, then try clock recovery if that fails */ |
1389 | intel_get_adjust_train(intel_dp); | 1399 | if (tries > 5) { |
1390 | ++tries; | 1400 | intel_dp_link_down(intel_dp); |
1401 | intel_dp_start_link_train(intel_dp); | ||
1402 | tries = 0; | ||
1403 | cr_tries++; | ||
1404 | continue; | ||
1391 | } | 1405 | } |
1406 | |||
1407 | /* Compute new intel_dp->train_set as requested by target */ | ||
1408 | intel_get_adjust_train(intel_dp); | ||
1409 | ++tries; | ||
1392 | } | 1410 | } |
1411 | |||
1393 | if (HAS_PCH_CPT(dev) && !is_edp(intel_dp)) | 1412 | if (HAS_PCH_CPT(dev) && !is_edp(intel_dp)) |
1394 | reg = DP | DP_LINK_TRAIN_OFF_CPT; | 1413 | reg = DP | DP_LINK_TRAIN_OFF_CPT; |
1395 | else | 1414 | else |
@@ -1408,6 +1427,9 @@ intel_dp_link_down(struct intel_dp *intel_dp) | |||
1408 | struct drm_i915_private *dev_priv = dev->dev_private; | 1427 | struct drm_i915_private *dev_priv = dev->dev_private; |
1409 | uint32_t DP = intel_dp->DP; | 1428 | uint32_t DP = intel_dp->DP; |
1410 | 1429 | ||
1430 | if ((I915_READ(intel_dp->output_reg) & DP_PORT_EN) == 0) | ||
1431 | return; | ||
1432 | |||
1411 | DRM_DEBUG_KMS("\n"); | 1433 | DRM_DEBUG_KMS("\n"); |
1412 | 1434 | ||
1413 | if (is_edp(intel_dp)) { | 1435 | if (is_edp(intel_dp)) { |
@@ -1430,6 +1452,27 @@ intel_dp_link_down(struct intel_dp *intel_dp) | |||
1430 | 1452 | ||
1431 | if (is_edp(intel_dp)) | 1453 | if (is_edp(intel_dp)) |
1432 | DP |= DP_LINK_TRAIN_OFF; | 1454 | DP |= DP_LINK_TRAIN_OFF; |
1455 | |||
1456 | if (!HAS_PCH_CPT(dev) && | ||
1457 | I915_READ(intel_dp->output_reg) & DP_PIPEB_SELECT) { | ||
1458 | struct intel_crtc *intel_crtc = to_intel_crtc(intel_dp->base.base.crtc); | ||
1459 | /* Hardware workaround: leaving our transcoder select | ||
1460 | * set to transcoder B while it's off will prevent the | ||
1461 | * corresponding HDMI output on transcoder A. | ||
1462 | * | ||
1463 | * Combine this with another hardware workaround: | ||
1464 | * transcoder select bit can only be cleared while the | ||
1465 | * port is enabled. | ||
1466 | */ | ||
1467 | DP &= ~DP_PIPEB_SELECT; | ||
1468 | I915_WRITE(intel_dp->output_reg, DP); | ||
1469 | |||
1470 | /* Changes to enable or select take place the vblank | ||
1471 | * after being written. | ||
1472 | */ | ||
1473 | intel_wait_for_vblank(dev, intel_crtc->pipe); | ||
1474 | } | ||
1475 | |||
1433 | I915_WRITE(intel_dp->output_reg, DP & ~DP_PORT_EN); | 1476 | I915_WRITE(intel_dp->output_reg, DP & ~DP_PORT_EN); |
1434 | POSTING_READ(intel_dp->output_reg); | 1477 | POSTING_READ(intel_dp->output_reg); |
1435 | } | 1478 | } |
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index 21551fe74541..74db2557d644 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h | |||
@@ -127,7 +127,7 @@ intel_mode_get_pixel_multiplier(const struct drm_display_mode *mode) | |||
127 | 127 | ||
128 | struct intel_framebuffer { | 128 | struct intel_framebuffer { |
129 | struct drm_framebuffer base; | 129 | struct drm_framebuffer base; |
130 | struct drm_gem_object *obj; | 130 | struct drm_i915_gem_object *obj; |
131 | }; | 131 | }; |
132 | 132 | ||
133 | struct intel_fbdev { | 133 | struct intel_fbdev { |
@@ -166,7 +166,7 @@ struct intel_crtc { | |||
166 | struct intel_unpin_work *unpin_work; | 166 | struct intel_unpin_work *unpin_work; |
167 | int fdi_lanes; | 167 | int fdi_lanes; |
168 | 168 | ||
169 | struct drm_gem_object *cursor_bo; | 169 | struct drm_i915_gem_object *cursor_bo; |
170 | uint32_t cursor_addr; | 170 | uint32_t cursor_addr; |
171 | int16_t cursor_x, cursor_y; | 171 | int16_t cursor_x, cursor_y; |
172 | int16_t cursor_width, cursor_height; | 172 | int16_t cursor_width, cursor_height; |
@@ -220,8 +220,8 @@ intel_get_crtc_for_pipe(struct drm_device *dev, int pipe) | |||
220 | struct intel_unpin_work { | 220 | struct intel_unpin_work { |
221 | struct work_struct work; | 221 | struct work_struct work; |
222 | struct drm_device *dev; | 222 | struct drm_device *dev; |
223 | struct drm_gem_object *old_fb_obj; | 223 | struct drm_i915_gem_object *old_fb_obj; |
224 | struct drm_gem_object *pending_flip_obj; | 224 | struct drm_i915_gem_object *pending_flip_obj; |
225 | struct drm_pending_vblank_event *event; | 225 | struct drm_pending_vblank_event *event; |
226 | int pending; | 226 | int pending; |
227 | bool enable_stall_check; | 227 | bool enable_stall_check; |
@@ -236,8 +236,9 @@ void intel_dip_infoframe_csum(struct dip_infoframe *avi_if); | |||
236 | extern bool intel_sdvo_init(struct drm_device *dev, int output_device); | 236 | extern bool intel_sdvo_init(struct drm_device *dev, int output_device); |
237 | extern void intel_dvo_init(struct drm_device *dev); | 237 | extern void intel_dvo_init(struct drm_device *dev); |
238 | extern void intel_tv_init(struct drm_device *dev); | 238 | extern void intel_tv_init(struct drm_device *dev); |
239 | extern void intel_mark_busy(struct drm_device *dev, struct drm_gem_object *obj); | 239 | extern void intel_mark_busy(struct drm_device *dev, |
240 | extern void intel_lvds_init(struct drm_device *dev); | 240 | struct drm_i915_gem_object *obj); |
241 | extern bool intel_lvds_init(struct drm_device *dev); | ||
241 | extern void intel_dp_init(struct drm_device *dev, int dp_reg); | 242 | extern void intel_dp_init(struct drm_device *dev, int dp_reg); |
242 | void | 243 | void |
243 | intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode, | 244 | intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode, |
@@ -256,6 +257,9 @@ extern void intel_pch_panel_fitting(struct drm_device *dev, | |||
256 | extern u32 intel_panel_get_max_backlight(struct drm_device *dev); | 257 | extern u32 intel_panel_get_max_backlight(struct drm_device *dev); |
257 | extern u32 intel_panel_get_backlight(struct drm_device *dev); | 258 | extern u32 intel_panel_get_backlight(struct drm_device *dev); |
258 | extern void intel_panel_set_backlight(struct drm_device *dev, u32 level); | 259 | extern void intel_panel_set_backlight(struct drm_device *dev, u32 level); |
260 | extern void intel_panel_setup_backlight(struct drm_device *dev); | ||
261 | extern void intel_panel_enable_backlight(struct drm_device *dev); | ||
262 | extern void intel_panel_disable_backlight(struct drm_device *dev); | ||
259 | 263 | ||
260 | extern void intel_crtc_load_lut(struct drm_crtc *crtc); | 264 | extern void intel_crtc_load_lut(struct drm_crtc *crtc); |
261 | extern void intel_encoder_prepare (struct drm_encoder *encoder); | 265 | extern void intel_encoder_prepare (struct drm_encoder *encoder); |
@@ -293,19 +297,22 @@ extern void intel_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green, | |||
293 | u16 blue, int regno); | 297 | u16 blue, int regno); |
294 | extern void intel_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green, | 298 | extern void intel_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green, |
295 | u16 *blue, int regno); | 299 | u16 *blue, int regno); |
296 | extern void intel_init_clock_gating(struct drm_device *dev); | 300 | extern void intel_enable_clock_gating(struct drm_device *dev); |
301 | extern void intel_disable_clock_gating(struct drm_device *dev); | ||
297 | extern void ironlake_enable_drps(struct drm_device *dev); | 302 | extern void ironlake_enable_drps(struct drm_device *dev); |
298 | extern void ironlake_disable_drps(struct drm_device *dev); | 303 | extern void ironlake_disable_drps(struct drm_device *dev); |
304 | extern void gen6_enable_rps(struct drm_i915_private *dev_priv); | ||
305 | extern void gen6_disable_rps(struct drm_device *dev); | ||
299 | extern void intel_init_emon(struct drm_device *dev); | 306 | extern void intel_init_emon(struct drm_device *dev); |
300 | 307 | ||
301 | extern int intel_pin_and_fence_fb_obj(struct drm_device *dev, | 308 | extern int intel_pin_and_fence_fb_obj(struct drm_device *dev, |
302 | struct drm_gem_object *obj, | 309 | struct drm_i915_gem_object *obj, |
303 | bool pipelined); | 310 | struct intel_ring_buffer *pipelined); |
304 | 311 | ||
305 | extern int intel_framebuffer_init(struct drm_device *dev, | 312 | extern int intel_framebuffer_init(struct drm_device *dev, |
306 | struct intel_framebuffer *ifb, | 313 | struct intel_framebuffer *ifb, |
307 | struct drm_mode_fb_cmd *mode_cmd, | 314 | struct drm_mode_fb_cmd *mode_cmd, |
308 | struct drm_gem_object *obj); | 315 | struct drm_i915_gem_object *obj); |
309 | extern int intel_fbdev_init(struct drm_device *dev); | 316 | extern int intel_fbdev_init(struct drm_device *dev); |
310 | extern void intel_fbdev_fini(struct drm_device *dev); | 317 | extern void intel_fbdev_fini(struct drm_device *dev); |
311 | 318 | ||
diff --git a/drivers/gpu/drm/i915/intel_fb.c b/drivers/gpu/drm/i915/intel_fb.c index af2a1dddc28e..512782728e51 100644 --- a/drivers/gpu/drm/i915/intel_fb.c +++ b/drivers/gpu/drm/i915/intel_fb.c | |||
@@ -62,13 +62,13 @@ static int intelfb_create(struct intel_fbdev *ifbdev, | |||
62 | struct drm_fb_helper_surface_size *sizes) | 62 | struct drm_fb_helper_surface_size *sizes) |
63 | { | 63 | { |
64 | struct drm_device *dev = ifbdev->helper.dev; | 64 | struct drm_device *dev = ifbdev->helper.dev; |
65 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
65 | struct fb_info *info; | 66 | struct fb_info *info; |
66 | struct drm_framebuffer *fb; | 67 | struct drm_framebuffer *fb; |
67 | struct drm_mode_fb_cmd mode_cmd; | 68 | struct drm_mode_fb_cmd mode_cmd; |
68 | struct drm_gem_object *fbo = NULL; | 69 | struct drm_i915_gem_object *obj; |
69 | struct drm_i915_gem_object *obj_priv; | ||
70 | struct device *device = &dev->pdev->dev; | 70 | struct device *device = &dev->pdev->dev; |
71 | int size, ret, mmio_bar = IS_GEN2(dev) ? 1 : 0; | 71 | int size, ret; |
72 | 72 | ||
73 | /* we don't do packed 24bpp */ | 73 | /* we don't do packed 24bpp */ |
74 | if (sizes->surface_bpp == 24) | 74 | if (sizes->surface_bpp == 24) |
@@ -78,23 +78,22 @@ static int intelfb_create(struct intel_fbdev *ifbdev, | |||
78 | mode_cmd.height = sizes->surface_height; | 78 | mode_cmd.height = sizes->surface_height; |
79 | 79 | ||
80 | mode_cmd.bpp = sizes->surface_bpp; | 80 | mode_cmd.bpp = sizes->surface_bpp; |
81 | mode_cmd.pitch = ALIGN(mode_cmd.width * ((mode_cmd.bpp + 1) / 8), 64); | 81 | mode_cmd.pitch = ALIGN(mode_cmd.width * ((mode_cmd.bpp + 7) / 8), 64); |
82 | mode_cmd.depth = sizes->surface_depth; | 82 | mode_cmd.depth = sizes->surface_depth; |
83 | 83 | ||
84 | size = mode_cmd.pitch * mode_cmd.height; | 84 | size = mode_cmd.pitch * mode_cmd.height; |
85 | size = ALIGN(size, PAGE_SIZE); | 85 | size = ALIGN(size, PAGE_SIZE); |
86 | fbo = i915_gem_alloc_object(dev, size); | 86 | obj = i915_gem_alloc_object(dev, size); |
87 | if (!fbo) { | 87 | if (!obj) { |
88 | DRM_ERROR("failed to allocate framebuffer\n"); | 88 | DRM_ERROR("failed to allocate framebuffer\n"); |
89 | ret = -ENOMEM; | 89 | ret = -ENOMEM; |
90 | goto out; | 90 | goto out; |
91 | } | 91 | } |
92 | obj_priv = to_intel_bo(fbo); | ||
93 | 92 | ||
94 | mutex_lock(&dev->struct_mutex); | 93 | mutex_lock(&dev->struct_mutex); |
95 | 94 | ||
96 | /* Flush everything out, we'll be doing GTT only from now on */ | 95 | /* Flush everything out, we'll be doing GTT only from now on */ |
97 | ret = intel_pin_and_fence_fb_obj(dev, fbo, false); | 96 | ret = intel_pin_and_fence_fb_obj(dev, obj, false); |
98 | if (ret) { | 97 | if (ret) { |
99 | DRM_ERROR("failed to pin fb: %d\n", ret); | 98 | DRM_ERROR("failed to pin fb: %d\n", ret); |
100 | goto out_unref; | 99 | goto out_unref; |
@@ -108,7 +107,7 @@ static int intelfb_create(struct intel_fbdev *ifbdev, | |||
108 | 107 | ||
109 | info->par = ifbdev; | 108 | info->par = ifbdev; |
110 | 109 | ||
111 | ret = intel_framebuffer_init(dev, &ifbdev->ifb, &mode_cmd, fbo); | 110 | ret = intel_framebuffer_init(dev, &ifbdev->ifb, &mode_cmd, obj); |
112 | if (ret) | 111 | if (ret) |
113 | goto out_unpin; | 112 | goto out_unpin; |
114 | 113 | ||
@@ -122,6 +121,11 @@ static int intelfb_create(struct intel_fbdev *ifbdev, | |||
122 | info->flags = FBINFO_DEFAULT | FBINFO_CAN_FORCE_OUTPUT; | 121 | info->flags = FBINFO_DEFAULT | FBINFO_CAN_FORCE_OUTPUT; |
123 | info->fbops = &intelfb_ops; | 122 | info->fbops = &intelfb_ops; |
124 | 123 | ||
124 | ret = fb_alloc_cmap(&info->cmap, 256, 0); | ||
125 | if (ret) { | ||
126 | ret = -ENOMEM; | ||
127 | goto out_unpin; | ||
128 | } | ||
125 | /* setup aperture base/size for vesafb takeover */ | 129 | /* setup aperture base/size for vesafb takeover */ |
126 | info->apertures = alloc_apertures(1); | 130 | info->apertures = alloc_apertures(1); |
127 | if (!info->apertures) { | 131 | if (!info->apertures) { |
@@ -129,26 +133,17 @@ static int intelfb_create(struct intel_fbdev *ifbdev, | |||
129 | goto out_unpin; | 133 | goto out_unpin; |
130 | } | 134 | } |
131 | info->apertures->ranges[0].base = dev->mode_config.fb_base; | 135 | info->apertures->ranges[0].base = dev->mode_config.fb_base; |
132 | if (!IS_GEN2(dev)) | 136 | info->apertures->ranges[0].size = |
133 | info->apertures->ranges[0].size = pci_resource_len(dev->pdev, 2); | 137 | dev_priv->mm.gtt->gtt_mappable_entries << PAGE_SHIFT; |
134 | else | ||
135 | info->apertures->ranges[0].size = pci_resource_len(dev->pdev, 0); | ||
136 | 138 | ||
137 | info->fix.smem_start = dev->mode_config.fb_base + obj_priv->gtt_offset; | 139 | info->fix.smem_start = dev->mode_config.fb_base + obj->gtt_offset; |
138 | info->fix.smem_len = size; | 140 | info->fix.smem_len = size; |
139 | 141 | ||
140 | info->screen_base = ioremap_wc(dev->agp->base + obj_priv->gtt_offset, | 142 | info->screen_base = ioremap_wc(dev->agp->base + obj->gtt_offset, size); |
141 | size); | ||
142 | if (!info->screen_base) { | 143 | if (!info->screen_base) { |
143 | ret = -ENOSPC; | 144 | ret = -ENOSPC; |
144 | goto out_unpin; | 145 | goto out_unpin; |
145 | } | 146 | } |
146 | |||
147 | ret = fb_alloc_cmap(&info->cmap, 256, 0); | ||
148 | if (ret) { | ||
149 | ret = -ENOMEM; | ||
150 | goto out_unpin; | ||
151 | } | ||
152 | info->screen_size = size; | 147 | info->screen_size = size; |
153 | 148 | ||
154 | // memset(info->screen_base, 0, size); | 149 | // memset(info->screen_base, 0, size); |
@@ -156,10 +151,6 @@ static int intelfb_create(struct intel_fbdev *ifbdev, | |||
156 | drm_fb_helper_fill_fix(info, fb->pitch, fb->depth); | 151 | drm_fb_helper_fill_fix(info, fb->pitch, fb->depth); |
157 | drm_fb_helper_fill_var(info, &ifbdev->helper, sizes->fb_width, sizes->fb_height); | 152 | drm_fb_helper_fill_var(info, &ifbdev->helper, sizes->fb_width, sizes->fb_height); |
158 | 153 | ||
159 | /* FIXME: we really shouldn't expose mmio space at all */ | ||
160 | info->fix.mmio_start = pci_resource_start(dev->pdev, mmio_bar); | ||
161 | info->fix.mmio_len = pci_resource_len(dev->pdev, mmio_bar); | ||
162 | |||
163 | info->pixmap.size = 64*1024; | 154 | info->pixmap.size = 64*1024; |
164 | info->pixmap.buf_align = 8; | 155 | info->pixmap.buf_align = 8; |
165 | info->pixmap.access_align = 32; | 156 | info->pixmap.access_align = 32; |
@@ -168,7 +159,7 @@ static int intelfb_create(struct intel_fbdev *ifbdev, | |||
168 | 159 | ||
169 | DRM_DEBUG_KMS("allocated %dx%d fb: 0x%08x, bo %p\n", | 160 | DRM_DEBUG_KMS("allocated %dx%d fb: 0x%08x, bo %p\n", |
170 | fb->width, fb->height, | 161 | fb->width, fb->height, |
171 | obj_priv->gtt_offset, fbo); | 162 | obj->gtt_offset, obj); |
172 | 163 | ||
173 | 164 | ||
174 | mutex_unlock(&dev->struct_mutex); | 165 | mutex_unlock(&dev->struct_mutex); |
@@ -176,9 +167,9 @@ static int intelfb_create(struct intel_fbdev *ifbdev, | |||
176 | return 0; | 167 | return 0; |
177 | 168 | ||
178 | out_unpin: | 169 | out_unpin: |
179 | i915_gem_object_unpin(fbo); | 170 | i915_gem_object_unpin(obj); |
180 | out_unref: | 171 | out_unref: |
181 | drm_gem_object_unreference(fbo); | 172 | drm_gem_object_unreference(&obj->base); |
182 | mutex_unlock(&dev->struct_mutex); | 173 | mutex_unlock(&dev->struct_mutex); |
183 | out: | 174 | out: |
184 | return ret; | 175 | return ret; |
@@ -225,7 +216,7 @@ static void intel_fbdev_destroy(struct drm_device *dev, | |||
225 | 216 | ||
226 | drm_framebuffer_cleanup(&ifb->base); | 217 | drm_framebuffer_cleanup(&ifb->base); |
227 | if (ifb->obj) { | 218 | if (ifb->obj) { |
228 | drm_gem_object_unreference_unlocked(ifb->obj); | 219 | drm_gem_object_unreference_unlocked(&ifb->obj->base); |
229 | ifb->obj = NULL; | 220 | ifb->obj = NULL; |
230 | } | 221 | } |
231 | } | 222 | } |
diff --git a/drivers/gpu/drm/i915/intel_i2c.c b/drivers/gpu/drm/i915/intel_i2c.c index 3dba086e7eea..58040f68ed7a 100644 --- a/drivers/gpu/drm/i915/intel_i2c.c +++ b/drivers/gpu/drm/i915/intel_i2c.c | |||
@@ -85,8 +85,9 @@ static u32 get_reserved(struct intel_gpio *gpio) | |||
85 | 85 | ||
86 | /* On most chips, these bits must be preserved in software. */ | 86 | /* On most chips, these bits must be preserved in software. */ |
87 | if (!IS_I830(dev) && !IS_845G(dev)) | 87 | if (!IS_I830(dev) && !IS_845G(dev)) |
88 | reserved = I915_READ(gpio->reg) & (GPIO_DATA_PULLUP_DISABLE | | 88 | reserved = I915_READ_NOTRACE(gpio->reg) & |
89 | GPIO_CLOCK_PULLUP_DISABLE); | 89 | (GPIO_DATA_PULLUP_DISABLE | |
90 | GPIO_CLOCK_PULLUP_DISABLE); | ||
90 | 91 | ||
91 | return reserved; | 92 | return reserved; |
92 | } | 93 | } |
@@ -96,9 +97,9 @@ static int get_clock(void *data) | |||
96 | struct intel_gpio *gpio = data; | 97 | struct intel_gpio *gpio = data; |
97 | struct drm_i915_private *dev_priv = gpio->dev_priv; | 98 | struct drm_i915_private *dev_priv = gpio->dev_priv; |
98 | u32 reserved = get_reserved(gpio); | 99 | u32 reserved = get_reserved(gpio); |
99 | I915_WRITE(gpio->reg, reserved | GPIO_CLOCK_DIR_MASK); | 100 | I915_WRITE_NOTRACE(gpio->reg, reserved | GPIO_CLOCK_DIR_MASK); |
100 | I915_WRITE(gpio->reg, reserved); | 101 | I915_WRITE_NOTRACE(gpio->reg, reserved); |
101 | return (I915_READ(gpio->reg) & GPIO_CLOCK_VAL_IN) != 0; | 102 | return (I915_READ_NOTRACE(gpio->reg) & GPIO_CLOCK_VAL_IN) != 0; |
102 | } | 103 | } |
103 | 104 | ||
104 | static int get_data(void *data) | 105 | static int get_data(void *data) |
@@ -106,9 +107,9 @@ static int get_data(void *data) | |||
106 | struct intel_gpio *gpio = data; | 107 | struct intel_gpio *gpio = data; |
107 | struct drm_i915_private *dev_priv = gpio->dev_priv; | 108 | struct drm_i915_private *dev_priv = gpio->dev_priv; |
108 | u32 reserved = get_reserved(gpio); | 109 | u32 reserved = get_reserved(gpio); |
109 | I915_WRITE(gpio->reg, reserved | GPIO_DATA_DIR_MASK); | 110 | I915_WRITE_NOTRACE(gpio->reg, reserved | GPIO_DATA_DIR_MASK); |
110 | I915_WRITE(gpio->reg, reserved); | 111 | I915_WRITE_NOTRACE(gpio->reg, reserved); |
111 | return (I915_READ(gpio->reg) & GPIO_DATA_VAL_IN) != 0; | 112 | return (I915_READ_NOTRACE(gpio->reg) & GPIO_DATA_VAL_IN) != 0; |
112 | } | 113 | } |
113 | 114 | ||
114 | static void set_clock(void *data, int state_high) | 115 | static void set_clock(void *data, int state_high) |
@@ -124,7 +125,7 @@ static void set_clock(void *data, int state_high) | |||
124 | clock_bits = GPIO_CLOCK_DIR_OUT | GPIO_CLOCK_DIR_MASK | | 125 | clock_bits = GPIO_CLOCK_DIR_OUT | GPIO_CLOCK_DIR_MASK | |
125 | GPIO_CLOCK_VAL_MASK; | 126 | GPIO_CLOCK_VAL_MASK; |
126 | 127 | ||
127 | I915_WRITE(gpio->reg, reserved | clock_bits); | 128 | I915_WRITE_NOTRACE(gpio->reg, reserved | clock_bits); |
128 | POSTING_READ(gpio->reg); | 129 | POSTING_READ(gpio->reg); |
129 | } | 130 | } |
130 | 131 | ||
@@ -141,7 +142,7 @@ static void set_data(void *data, int state_high) | |||
141 | data_bits = GPIO_DATA_DIR_OUT | GPIO_DATA_DIR_MASK | | 142 | data_bits = GPIO_DATA_DIR_OUT | GPIO_DATA_DIR_MASK | |
142 | GPIO_DATA_VAL_MASK; | 143 | GPIO_DATA_VAL_MASK; |
143 | 144 | ||
144 | I915_WRITE(gpio->reg, reserved | data_bits); | 145 | I915_WRITE_NOTRACE(gpio->reg, reserved | data_bits); |
145 | POSTING_READ(gpio->reg); | 146 | POSTING_READ(gpio->reg); |
146 | } | 147 | } |
147 | 148 | ||
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c index 4324a326f98e..ace8d5d30dd2 100644 --- a/drivers/gpu/drm/i915/intel_lvds.c +++ b/drivers/gpu/drm/i915/intel_lvds.c | |||
@@ -68,7 +68,7 @@ static struct intel_lvds *intel_attached_lvds(struct drm_connector *connector) | |||
68 | /** | 68 | /** |
69 | * Sets the power state for the panel. | 69 | * Sets the power state for the panel. |
70 | */ | 70 | */ |
71 | static void intel_lvds_set_power(struct intel_lvds *intel_lvds, bool on) | 71 | static void intel_lvds_enable(struct intel_lvds *intel_lvds) |
72 | { | 72 | { |
73 | struct drm_device *dev = intel_lvds->base.base.dev; | 73 | struct drm_device *dev = intel_lvds->base.base.dev; |
74 | struct drm_i915_private *dev_priv = dev->dev_private; | 74 | struct drm_i915_private *dev_priv = dev->dev_private; |
@@ -82,26 +82,60 @@ static void intel_lvds_set_power(struct intel_lvds *intel_lvds, bool on) | |||
82 | lvds_reg = LVDS; | 82 | lvds_reg = LVDS; |
83 | } | 83 | } |
84 | 84 | ||
85 | if (on) { | 85 | I915_WRITE(lvds_reg, I915_READ(lvds_reg) | LVDS_PORT_EN); |
86 | I915_WRITE(lvds_reg, I915_READ(lvds_reg) | LVDS_PORT_EN); | ||
87 | I915_WRITE(ctl_reg, I915_READ(ctl_reg) | POWER_TARGET_ON); | ||
88 | intel_panel_set_backlight(dev, dev_priv->backlight_level); | ||
89 | } else { | ||
90 | dev_priv->backlight_level = intel_panel_get_backlight(dev); | ||
91 | |||
92 | intel_panel_set_backlight(dev, 0); | ||
93 | I915_WRITE(ctl_reg, I915_READ(ctl_reg) & ~POWER_TARGET_ON); | ||
94 | 86 | ||
95 | if (intel_lvds->pfit_control) { | 87 | if (intel_lvds->pfit_dirty) { |
96 | if (wait_for((I915_READ(PP_STATUS) & PP_ON) == 0, 1000)) | 88 | /* |
97 | DRM_ERROR("timed out waiting for panel to power off\n"); | 89 | * Enable automatic panel scaling so that non-native modes |
98 | I915_WRITE(PFIT_CONTROL, 0); | 90 | * fill the screen. The panel fitter should only be |
99 | intel_lvds->pfit_control = 0; | 91 | * adjusted whilst the pipe is disabled, according to |
92 | * register description and PRM. | ||
93 | */ | ||
94 | DRM_DEBUG_KMS("applying panel-fitter: %x, %x\n", | ||
95 | intel_lvds->pfit_control, | ||
96 | intel_lvds->pfit_pgm_ratios); | ||
97 | if (wait_for((I915_READ(PP_STATUS) & PP_ON) == 0, 1000)) { | ||
98 | DRM_ERROR("timed out waiting for panel to power off\n"); | ||
99 | } else { | ||
100 | I915_WRITE(PFIT_PGM_RATIOS, intel_lvds->pfit_pgm_ratios); | ||
101 | I915_WRITE(PFIT_CONTROL, intel_lvds->pfit_control); | ||
100 | intel_lvds->pfit_dirty = false; | 102 | intel_lvds->pfit_dirty = false; |
101 | } | 103 | } |
104 | } | ||
105 | |||
106 | I915_WRITE(ctl_reg, I915_READ(ctl_reg) | POWER_TARGET_ON); | ||
107 | POSTING_READ(lvds_reg); | ||
108 | |||
109 | intel_panel_enable_backlight(dev); | ||
110 | } | ||
102 | 111 | ||
103 | I915_WRITE(lvds_reg, I915_READ(lvds_reg) & ~LVDS_PORT_EN); | 112 | static void intel_lvds_disable(struct intel_lvds *intel_lvds) |
113 | { | ||
114 | struct drm_device *dev = intel_lvds->base.base.dev; | ||
115 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
116 | u32 ctl_reg, lvds_reg; | ||
117 | |||
118 | if (HAS_PCH_SPLIT(dev)) { | ||
119 | ctl_reg = PCH_PP_CONTROL; | ||
120 | lvds_reg = PCH_LVDS; | ||
121 | } else { | ||
122 | ctl_reg = PP_CONTROL; | ||
123 | lvds_reg = LVDS; | ||
124 | } | ||
125 | |||
126 | intel_panel_disable_backlight(dev); | ||
127 | |||
128 | I915_WRITE(ctl_reg, I915_READ(ctl_reg) & ~POWER_TARGET_ON); | ||
129 | |||
130 | if (intel_lvds->pfit_control) { | ||
131 | if (wait_for((I915_READ(PP_STATUS) & PP_ON) == 0, 1000)) | ||
132 | DRM_ERROR("timed out waiting for panel to power off\n"); | ||
133 | |||
134 | I915_WRITE(PFIT_CONTROL, 0); | ||
135 | intel_lvds->pfit_dirty = true; | ||
104 | } | 136 | } |
137 | |||
138 | I915_WRITE(lvds_reg, I915_READ(lvds_reg) & ~LVDS_PORT_EN); | ||
105 | POSTING_READ(lvds_reg); | 139 | POSTING_READ(lvds_reg); |
106 | } | 140 | } |
107 | 141 | ||
@@ -110,9 +144,9 @@ static void intel_lvds_dpms(struct drm_encoder *encoder, int mode) | |||
110 | struct intel_lvds *intel_lvds = to_intel_lvds(encoder); | 144 | struct intel_lvds *intel_lvds = to_intel_lvds(encoder); |
111 | 145 | ||
112 | if (mode == DRM_MODE_DPMS_ON) | 146 | if (mode == DRM_MODE_DPMS_ON) |
113 | intel_lvds_set_power(intel_lvds, true); | 147 | intel_lvds_enable(intel_lvds); |
114 | else | 148 | else |
115 | intel_lvds_set_power(intel_lvds, false); | 149 | intel_lvds_disable(intel_lvds); |
116 | 150 | ||
117 | /* XXX: We never power down the LVDS pairs. */ | 151 | /* XXX: We never power down the LVDS pairs. */ |
118 | } | 152 | } |
@@ -269,14 +303,13 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder, | |||
269 | u32 scaled_width = adjusted_mode->hdisplay * mode->vdisplay; | 303 | u32 scaled_width = adjusted_mode->hdisplay * mode->vdisplay; |
270 | u32 scaled_height = mode->hdisplay * adjusted_mode->vdisplay; | 304 | u32 scaled_height = mode->hdisplay * adjusted_mode->vdisplay; |
271 | 305 | ||
272 | pfit_control |= PFIT_ENABLE; | ||
273 | /* 965+ is easy, it does everything in hw */ | 306 | /* 965+ is easy, it does everything in hw */ |
274 | if (scaled_width > scaled_height) | 307 | if (scaled_width > scaled_height) |
275 | pfit_control |= PFIT_SCALING_PILLAR; | 308 | pfit_control |= PFIT_ENABLE | PFIT_SCALING_PILLAR; |
276 | else if (scaled_width < scaled_height) | 309 | else if (scaled_width < scaled_height) |
277 | pfit_control |= PFIT_SCALING_LETTER; | 310 | pfit_control |= PFIT_ENABLE | PFIT_SCALING_LETTER; |
278 | else | 311 | else if (adjusted_mode->hdisplay != mode->hdisplay) |
279 | pfit_control |= PFIT_SCALING_AUTO; | 312 | pfit_control |= PFIT_ENABLE | PFIT_SCALING_AUTO; |
280 | } else { | 313 | } else { |
281 | u32 scaled_width = adjusted_mode->hdisplay * mode->vdisplay; | 314 | u32 scaled_width = adjusted_mode->hdisplay * mode->vdisplay; |
282 | u32 scaled_height = mode->hdisplay * adjusted_mode->vdisplay; | 315 | u32 scaled_height = mode->hdisplay * adjusted_mode->vdisplay; |
@@ -323,13 +356,17 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder, | |||
323 | * Full scaling, even if it changes the aspect ratio. | 356 | * Full scaling, even if it changes the aspect ratio. |
324 | * Fortunately this is all done for us in hw. | 357 | * Fortunately this is all done for us in hw. |
325 | */ | 358 | */ |
326 | pfit_control |= PFIT_ENABLE; | 359 | if (mode->vdisplay != adjusted_mode->vdisplay || |
327 | if (INTEL_INFO(dev)->gen >= 4) | 360 | mode->hdisplay != adjusted_mode->hdisplay) { |
328 | pfit_control |= PFIT_SCALING_AUTO; | 361 | pfit_control |= PFIT_ENABLE; |
329 | else | 362 | if (INTEL_INFO(dev)->gen >= 4) |
330 | pfit_control |= (VERT_AUTO_SCALE | HORIZ_AUTO_SCALE | | 363 | pfit_control |= PFIT_SCALING_AUTO; |
331 | VERT_INTERP_BILINEAR | | 364 | else |
332 | HORIZ_INTERP_BILINEAR); | 365 | pfit_control |= (VERT_AUTO_SCALE | |
366 | VERT_INTERP_BILINEAR | | ||
367 | HORIZ_AUTO_SCALE | | ||
368 | HORIZ_INTERP_BILINEAR); | ||
369 | } | ||
333 | break; | 370 | break; |
334 | 371 | ||
335 | default: | 372 | default: |
@@ -337,6 +374,10 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder, | |||
337 | } | 374 | } |
338 | 375 | ||
339 | out: | 376 | out: |
377 | if ((pfit_control & PFIT_ENABLE) == 0) { | ||
378 | pfit_control = 0; | ||
379 | pfit_pgm_ratios = 0; | ||
380 | } | ||
340 | if (pfit_control != intel_lvds->pfit_control || | 381 | if (pfit_control != intel_lvds->pfit_control || |
341 | pfit_pgm_ratios != intel_lvds->pfit_pgm_ratios) { | 382 | pfit_pgm_ratios != intel_lvds->pfit_pgm_ratios) { |
342 | intel_lvds->pfit_control = pfit_control; | 383 | intel_lvds->pfit_control = pfit_control; |
@@ -360,8 +401,6 @@ static void intel_lvds_prepare(struct drm_encoder *encoder) | |||
360 | struct drm_i915_private *dev_priv = dev->dev_private; | 401 | struct drm_i915_private *dev_priv = dev->dev_private; |
361 | struct intel_lvds *intel_lvds = to_intel_lvds(encoder); | 402 | struct intel_lvds *intel_lvds = to_intel_lvds(encoder); |
362 | 403 | ||
363 | dev_priv->backlight_level = intel_panel_get_backlight(dev); | ||
364 | |||
365 | /* We try to do the minimum that is necessary in order to unlock | 404 | /* We try to do the minimum that is necessary in order to unlock |
366 | * the registers for mode setting. | 405 | * the registers for mode setting. |
367 | * | 406 | * |
@@ -392,9 +431,6 @@ static void intel_lvds_commit(struct drm_encoder *encoder) | |||
392 | struct drm_i915_private *dev_priv = dev->dev_private; | 431 | struct drm_i915_private *dev_priv = dev->dev_private; |
393 | struct intel_lvds *intel_lvds = to_intel_lvds(encoder); | 432 | struct intel_lvds *intel_lvds = to_intel_lvds(encoder); |
394 | 433 | ||
395 | if (dev_priv->backlight_level == 0) | ||
396 | dev_priv->backlight_level = intel_panel_get_max_backlight(dev); | ||
397 | |||
398 | /* Undo any unlocking done in prepare to prevent accidental | 434 | /* Undo any unlocking done in prepare to prevent accidental |
399 | * adjustment of the registers. | 435 | * adjustment of the registers. |
400 | */ | 436 | */ |
@@ -411,43 +447,18 @@ static void intel_lvds_commit(struct drm_encoder *encoder) | |||
411 | /* Always do a full power on as we do not know what state | 447 | /* Always do a full power on as we do not know what state |
412 | * we were left in. | 448 | * we were left in. |
413 | */ | 449 | */ |
414 | intel_lvds_set_power(intel_lvds, true); | 450 | intel_lvds_enable(intel_lvds); |
415 | } | 451 | } |
416 | 452 | ||
417 | static void intel_lvds_mode_set(struct drm_encoder *encoder, | 453 | static void intel_lvds_mode_set(struct drm_encoder *encoder, |
418 | struct drm_display_mode *mode, | 454 | struct drm_display_mode *mode, |
419 | struct drm_display_mode *adjusted_mode) | 455 | struct drm_display_mode *adjusted_mode) |
420 | { | 456 | { |
421 | struct drm_device *dev = encoder->dev; | ||
422 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
423 | struct intel_lvds *intel_lvds = to_intel_lvds(encoder); | ||
424 | |||
425 | /* | 457 | /* |
426 | * The LVDS pin pair will already have been turned on in the | 458 | * The LVDS pin pair will already have been turned on in the |
427 | * intel_crtc_mode_set since it has a large impact on the DPLL | 459 | * intel_crtc_mode_set since it has a large impact on the DPLL |
428 | * settings. | 460 | * settings. |
429 | */ | 461 | */ |
430 | |||
431 | if (HAS_PCH_SPLIT(dev)) | ||
432 | return; | ||
433 | |||
434 | if (!intel_lvds->pfit_dirty) | ||
435 | return; | ||
436 | |||
437 | /* | ||
438 | * Enable automatic panel scaling so that non-native modes fill the | ||
439 | * screen. Should be enabled before the pipe is enabled, according to | ||
440 | * register description and PRM. | ||
441 | */ | ||
442 | DRM_DEBUG_KMS("applying panel-fitter: %x, %x\n", | ||
443 | intel_lvds->pfit_control, | ||
444 | intel_lvds->pfit_pgm_ratios); | ||
445 | if (wait_for((I915_READ(PP_STATUS) & PP_ON) == 0, 1000)) | ||
446 | DRM_ERROR("timed out waiting for panel to power off\n"); | ||
447 | |||
448 | I915_WRITE(PFIT_PGM_RATIOS, intel_lvds->pfit_pgm_ratios); | ||
449 | I915_WRITE(PFIT_CONTROL, intel_lvds->pfit_control); | ||
450 | intel_lvds->pfit_dirty = false; | ||
451 | } | 462 | } |
452 | 463 | ||
453 | /** | 464 | /** |
@@ -693,6 +704,14 @@ static const struct dmi_system_id intel_no_lvds[] = { | |||
693 | }, | 704 | }, |
694 | { | 705 | { |
695 | .callback = intel_no_lvds_dmi_callback, | 706 | .callback = intel_no_lvds_dmi_callback, |
707 | .ident = "AOpen i915GMm-HFS", | ||
708 | .matches = { | ||
709 | DMI_MATCH(DMI_BOARD_VENDOR, "AOpen"), | ||
710 | DMI_MATCH(DMI_BOARD_NAME, "i915GMm-HFS"), | ||
711 | }, | ||
712 | }, | ||
713 | { | ||
714 | .callback = intel_no_lvds_dmi_callback, | ||
696 | .ident = "Aopen i945GTt-VFA", | 715 | .ident = "Aopen i945GTt-VFA", |
697 | .matches = { | 716 | .matches = { |
698 | DMI_MATCH(DMI_PRODUCT_VERSION, "AO00001JW"), | 717 | DMI_MATCH(DMI_PRODUCT_VERSION, "AO00001JW"), |
@@ -837,7 +856,7 @@ static bool intel_lvds_ddc_probe(struct drm_device *dev, u8 pin) | |||
837 | * Create the connector, register the LVDS DDC bus, and try to figure out what | 856 | * Create the connector, register the LVDS DDC bus, and try to figure out what |
838 | * modes we can display on the LVDS panel (if present). | 857 | * modes we can display on the LVDS panel (if present). |
839 | */ | 858 | */ |
840 | void intel_lvds_init(struct drm_device *dev) | 859 | bool intel_lvds_init(struct drm_device *dev) |
841 | { | 860 | { |
842 | struct drm_i915_private *dev_priv = dev->dev_private; | 861 | struct drm_i915_private *dev_priv = dev->dev_private; |
843 | struct intel_lvds *intel_lvds; | 862 | struct intel_lvds *intel_lvds; |
@@ -853,37 +872,37 @@ void intel_lvds_init(struct drm_device *dev) | |||
853 | 872 | ||
854 | /* Skip init on machines we know falsely report LVDS */ | 873 | /* Skip init on machines we know falsely report LVDS */ |
855 | if (dmi_check_system(intel_no_lvds)) | 874 | if (dmi_check_system(intel_no_lvds)) |
856 | return; | 875 | return false; |
857 | 876 | ||
858 | pin = GMBUS_PORT_PANEL; | 877 | pin = GMBUS_PORT_PANEL; |
859 | if (!lvds_is_present_in_vbt(dev, &pin)) { | 878 | if (!lvds_is_present_in_vbt(dev, &pin)) { |
860 | DRM_DEBUG_KMS("LVDS is not present in VBT\n"); | 879 | DRM_DEBUG_KMS("LVDS is not present in VBT\n"); |
861 | return; | 880 | return false; |
862 | } | 881 | } |
863 | 882 | ||
864 | if (HAS_PCH_SPLIT(dev)) { | 883 | if (HAS_PCH_SPLIT(dev)) { |
865 | if ((I915_READ(PCH_LVDS) & LVDS_DETECTED) == 0) | 884 | if ((I915_READ(PCH_LVDS) & LVDS_DETECTED) == 0) |
866 | return; | 885 | return false; |
867 | if (dev_priv->edp.support) { | 886 | if (dev_priv->edp.support) { |
868 | DRM_DEBUG_KMS("disable LVDS for eDP support\n"); | 887 | DRM_DEBUG_KMS("disable LVDS for eDP support\n"); |
869 | return; | 888 | return false; |
870 | } | 889 | } |
871 | } | 890 | } |
872 | 891 | ||
873 | if (!intel_lvds_ddc_probe(dev, pin)) { | 892 | if (!intel_lvds_ddc_probe(dev, pin)) { |
874 | DRM_DEBUG_KMS("LVDS did not respond to DDC probe\n"); | 893 | DRM_DEBUG_KMS("LVDS did not respond to DDC probe\n"); |
875 | return; | 894 | return false; |
876 | } | 895 | } |
877 | 896 | ||
878 | intel_lvds = kzalloc(sizeof(struct intel_lvds), GFP_KERNEL); | 897 | intel_lvds = kzalloc(sizeof(struct intel_lvds), GFP_KERNEL); |
879 | if (!intel_lvds) { | 898 | if (!intel_lvds) { |
880 | return; | 899 | return false; |
881 | } | 900 | } |
882 | 901 | ||
883 | intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL); | 902 | intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL); |
884 | if (!intel_connector) { | 903 | if (!intel_connector) { |
885 | kfree(intel_lvds); | 904 | kfree(intel_lvds); |
886 | return; | 905 | return false; |
887 | } | 906 | } |
888 | 907 | ||
889 | if (!HAS_PCH_SPLIT(dev)) { | 908 | if (!HAS_PCH_SPLIT(dev)) { |
@@ -904,6 +923,8 @@ void intel_lvds_init(struct drm_device *dev) | |||
904 | 923 | ||
905 | intel_encoder->clone_mask = (1 << INTEL_LVDS_CLONE_BIT); | 924 | intel_encoder->clone_mask = (1 << INTEL_LVDS_CLONE_BIT); |
906 | intel_encoder->crtc_mask = (1 << 1); | 925 | intel_encoder->crtc_mask = (1 << 1); |
926 | if (INTEL_INFO(dev)->gen >= 5) | ||
927 | intel_encoder->crtc_mask |= (1 << 0); | ||
907 | drm_encoder_helper_add(encoder, &intel_lvds_helper_funcs); | 928 | drm_encoder_helper_add(encoder, &intel_lvds_helper_funcs); |
908 | drm_connector_helper_add(connector, &intel_lvds_connector_helper_funcs); | 929 | drm_connector_helper_add(connector, &intel_lvds_connector_helper_funcs); |
909 | connector->display_info.subpixel_order = SubPixelHorizontalRGB; | 930 | connector->display_info.subpixel_order = SubPixelHorizontalRGB; |
@@ -1009,10 +1030,18 @@ void intel_lvds_init(struct drm_device *dev) | |||
1009 | out: | 1030 | out: |
1010 | if (HAS_PCH_SPLIT(dev)) { | 1031 | if (HAS_PCH_SPLIT(dev)) { |
1011 | u32 pwm; | 1032 | u32 pwm; |
1012 | /* make sure PWM is enabled */ | 1033 | |
1034 | pipe = (I915_READ(PCH_LVDS) & LVDS_PIPEB_SELECT) ? 1 : 0; | ||
1035 | |||
1036 | /* make sure PWM is enabled and locked to the LVDS pipe */ | ||
1013 | pwm = I915_READ(BLC_PWM_CPU_CTL2); | 1037 | pwm = I915_READ(BLC_PWM_CPU_CTL2); |
1014 | pwm |= (PWM_ENABLE | PWM_PIPE_B); | 1038 | if (pipe == 0 && (pwm & PWM_PIPE_B)) |
1015 | I915_WRITE(BLC_PWM_CPU_CTL2, pwm); | 1039 | I915_WRITE(BLC_PWM_CPU_CTL2, pwm & ~PWM_ENABLE); |
1040 | if (pipe) | ||
1041 | pwm |= PWM_PIPE_B; | ||
1042 | else | ||
1043 | pwm &= ~PWM_PIPE_B; | ||
1044 | I915_WRITE(BLC_PWM_CPU_CTL2, pwm | PWM_ENABLE); | ||
1016 | 1045 | ||
1017 | pwm = I915_READ(BLC_PWM_PCH_CTL1); | 1046 | pwm = I915_READ(BLC_PWM_PCH_CTL1); |
1018 | pwm |= PWM_PCH_ENABLE; | 1047 | pwm |= PWM_PCH_ENABLE; |
@@ -1026,7 +1055,7 @@ out: | |||
1026 | /* keep the LVDS connector */ | 1055 | /* keep the LVDS connector */ |
1027 | dev_priv->int_lvds_connector = connector; | 1056 | dev_priv->int_lvds_connector = connector; |
1028 | drm_sysfs_connector_add(connector); | 1057 | drm_sysfs_connector_add(connector); |
1029 | return; | 1058 | return true; |
1030 | 1059 | ||
1031 | failed: | 1060 | failed: |
1032 | DRM_DEBUG_KMS("No LVDS modes found, disabling.\n"); | 1061 | DRM_DEBUG_KMS("No LVDS modes found, disabling.\n"); |
@@ -1034,4 +1063,5 @@ failed: | |||
1034 | drm_encoder_cleanup(encoder); | 1063 | drm_encoder_cleanup(encoder); |
1035 | kfree(intel_lvds); | 1064 | kfree(intel_lvds); |
1036 | kfree(intel_connector); | 1065 | kfree(intel_connector); |
1066 | return false; | ||
1037 | } | 1067 | } |
diff --git a/drivers/gpu/drm/i915/intel_opregion.c b/drivers/gpu/drm/i915/intel_opregion.c index 9b0d9a867aea..64fd64443ca6 100644 --- a/drivers/gpu/drm/i915/intel_opregion.c +++ b/drivers/gpu/drm/i915/intel_opregion.c | |||
@@ -26,6 +26,7 @@ | |||
26 | */ | 26 | */ |
27 | 27 | ||
28 | #include <linux/acpi.h> | 28 | #include <linux/acpi.h> |
29 | #include <linux/acpi_io.h> | ||
29 | #include <acpi/video.h> | 30 | #include <acpi/video.h> |
30 | 31 | ||
31 | #include "drmP.h" | 32 | #include "drmP.h" |
@@ -273,14 +274,8 @@ void intel_opregion_enable_asle(struct drm_device *dev) | |||
273 | struct opregion_asle *asle = dev_priv->opregion.asle; | 274 | struct opregion_asle *asle = dev_priv->opregion.asle; |
274 | 275 | ||
275 | if (asle) { | 276 | if (asle) { |
276 | if (IS_MOBILE(dev)) { | 277 | if (IS_MOBILE(dev)) |
277 | unsigned long irqflags; | ||
278 | |||
279 | spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags); | ||
280 | intel_enable_asle(dev); | 278 | intel_enable_asle(dev); |
281 | spin_unlock_irqrestore(&dev_priv->user_irq_lock, | ||
282 | irqflags); | ||
283 | } | ||
284 | 279 | ||
285 | asle->tche = ASLE_ALS_EN | ASLE_BLC_EN | ASLE_PFIT_EN | | 280 | asle->tche = ASLE_ALS_EN | ASLE_BLC_EN | ASLE_PFIT_EN | |
286 | ASLE_PFMB_EN; | 281 | ASLE_PFMB_EN; |
@@ -482,7 +477,7 @@ int intel_opregion_setup(struct drm_device *dev) | |||
482 | return -ENOTSUPP; | 477 | return -ENOTSUPP; |
483 | } | 478 | } |
484 | 479 | ||
485 | base = ioremap(asls, OPREGION_SIZE); | 480 | base = acpi_os_ioremap(asls, OPREGION_SIZE); |
486 | if (!base) | 481 | if (!base) |
487 | return -ENOMEM; | 482 | return -ENOMEM; |
488 | 483 | ||
diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c index 02ff0a481f47..3fbb98b948d6 100644 --- a/drivers/gpu/drm/i915/intel_overlay.c +++ b/drivers/gpu/drm/i915/intel_overlay.c | |||
@@ -221,15 +221,16 @@ static int intel_overlay_do_wait_request(struct intel_overlay *overlay, | |||
221 | int ret; | 221 | int ret; |
222 | 222 | ||
223 | BUG_ON(overlay->last_flip_req); | 223 | BUG_ON(overlay->last_flip_req); |
224 | overlay->last_flip_req = | 224 | ret = i915_add_request(dev, NULL, request, LP_RING(dev_priv)); |
225 | i915_add_request(dev, NULL, request, &dev_priv->render_ring); | 225 | if (ret) { |
226 | if (overlay->last_flip_req == 0) | 226 | kfree(request); |
227 | return -ENOMEM; | 227 | return ret; |
228 | 228 | } | |
229 | overlay->last_flip_req = request->seqno; | ||
229 | overlay->flip_tail = tail; | 230 | overlay->flip_tail = tail; |
230 | ret = i915_do_wait_request(dev, | 231 | ret = i915_do_wait_request(dev, |
231 | overlay->last_flip_req, true, | 232 | overlay->last_flip_req, true, |
232 | &dev_priv->render_ring); | 233 | LP_RING(dev_priv)); |
233 | if (ret) | 234 | if (ret) |
234 | return ret; | 235 | return ret; |
235 | 236 | ||
@@ -289,6 +290,7 @@ i830_deactivate_pipe_a(struct drm_device *dev) | |||
289 | static int intel_overlay_on(struct intel_overlay *overlay) | 290 | static int intel_overlay_on(struct intel_overlay *overlay) |
290 | { | 291 | { |
291 | struct drm_device *dev = overlay->dev; | 292 | struct drm_device *dev = overlay->dev; |
293 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
292 | struct drm_i915_gem_request *request; | 294 | struct drm_i915_gem_request *request; |
293 | int pipe_a_quirk = 0; | 295 | int pipe_a_quirk = 0; |
294 | int ret; | 296 | int ret; |
@@ -308,7 +310,12 @@ static int intel_overlay_on(struct intel_overlay *overlay) | |||
308 | goto out; | 310 | goto out; |
309 | } | 311 | } |
310 | 312 | ||
311 | BEGIN_LP_RING(4); | 313 | ret = BEGIN_LP_RING(4); |
314 | if (ret) { | ||
315 | kfree(request); | ||
316 | goto out; | ||
317 | } | ||
318 | |||
312 | OUT_RING(MI_OVERLAY_FLIP | MI_OVERLAY_ON); | 319 | OUT_RING(MI_OVERLAY_FLIP | MI_OVERLAY_ON); |
313 | OUT_RING(overlay->flip_addr | OFC_UPDATE); | 320 | OUT_RING(overlay->flip_addr | OFC_UPDATE); |
314 | OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP); | 321 | OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP); |
@@ -332,6 +339,7 @@ static int intel_overlay_continue(struct intel_overlay *overlay, | |||
332 | struct drm_i915_gem_request *request; | 339 | struct drm_i915_gem_request *request; |
333 | u32 flip_addr = overlay->flip_addr; | 340 | u32 flip_addr = overlay->flip_addr; |
334 | u32 tmp; | 341 | u32 tmp; |
342 | int ret; | ||
335 | 343 | ||
336 | BUG_ON(!overlay->active); | 344 | BUG_ON(!overlay->active); |
337 | 345 | ||
@@ -347,36 +355,44 @@ static int intel_overlay_continue(struct intel_overlay *overlay, | |||
347 | if (tmp & (1 << 17)) | 355 | if (tmp & (1 << 17)) |
348 | DRM_DEBUG("overlay underrun, DOVSTA: %x\n", tmp); | 356 | DRM_DEBUG("overlay underrun, DOVSTA: %x\n", tmp); |
349 | 357 | ||
350 | BEGIN_LP_RING(2); | 358 | ret = BEGIN_LP_RING(2); |
359 | if (ret) { | ||
360 | kfree(request); | ||
361 | return ret; | ||
362 | } | ||
351 | OUT_RING(MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE); | 363 | OUT_RING(MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE); |
352 | OUT_RING(flip_addr); | 364 | OUT_RING(flip_addr); |
353 | ADVANCE_LP_RING(); | 365 | ADVANCE_LP_RING(); |
354 | 366 | ||
355 | overlay->last_flip_req = | 367 | ret = i915_add_request(dev, NULL, request, LP_RING(dev_priv)); |
356 | i915_add_request(dev, NULL, request, &dev_priv->render_ring); | 368 | if (ret) { |
369 | kfree(request); | ||
370 | return ret; | ||
371 | } | ||
372 | |||
373 | overlay->last_flip_req = request->seqno; | ||
357 | return 0; | 374 | return 0; |
358 | } | 375 | } |
359 | 376 | ||
360 | static void intel_overlay_release_old_vid_tail(struct intel_overlay *overlay) | 377 | static void intel_overlay_release_old_vid_tail(struct intel_overlay *overlay) |
361 | { | 378 | { |
362 | struct drm_gem_object *obj = &overlay->old_vid_bo->base; | 379 | struct drm_i915_gem_object *obj = overlay->old_vid_bo; |
363 | 380 | ||
364 | i915_gem_object_unpin(obj); | 381 | i915_gem_object_unpin(obj); |
365 | drm_gem_object_unreference(obj); | 382 | drm_gem_object_unreference(&obj->base); |
366 | 383 | ||
367 | overlay->old_vid_bo = NULL; | 384 | overlay->old_vid_bo = NULL; |
368 | } | 385 | } |
369 | 386 | ||
370 | static void intel_overlay_off_tail(struct intel_overlay *overlay) | 387 | static void intel_overlay_off_tail(struct intel_overlay *overlay) |
371 | { | 388 | { |
372 | struct drm_gem_object *obj; | 389 | struct drm_i915_gem_object *obj = overlay->vid_bo; |
373 | 390 | ||
374 | /* never have the overlay hw on without showing a frame */ | 391 | /* never have the overlay hw on without showing a frame */ |
375 | BUG_ON(!overlay->vid_bo); | 392 | BUG_ON(!overlay->vid_bo); |
376 | obj = &overlay->vid_bo->base; | ||
377 | 393 | ||
378 | i915_gem_object_unpin(obj); | 394 | i915_gem_object_unpin(obj); |
379 | drm_gem_object_unreference(obj); | 395 | drm_gem_object_unreference(&obj->base); |
380 | overlay->vid_bo = NULL; | 396 | overlay->vid_bo = NULL; |
381 | 397 | ||
382 | overlay->crtc->overlay = NULL; | 398 | overlay->crtc->overlay = NULL; |
@@ -389,8 +405,10 @@ static int intel_overlay_off(struct intel_overlay *overlay, | |||
389 | bool interruptible) | 405 | bool interruptible) |
390 | { | 406 | { |
391 | struct drm_device *dev = overlay->dev; | 407 | struct drm_device *dev = overlay->dev; |
408 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
392 | u32 flip_addr = overlay->flip_addr; | 409 | u32 flip_addr = overlay->flip_addr; |
393 | struct drm_i915_gem_request *request; | 410 | struct drm_i915_gem_request *request; |
411 | int ret; | ||
394 | 412 | ||
395 | BUG_ON(!overlay->active); | 413 | BUG_ON(!overlay->active); |
396 | 414 | ||
@@ -404,7 +422,11 @@ static int intel_overlay_off(struct intel_overlay *overlay, | |||
404 | * of the hw. Do it in both cases */ | 422 | * of the hw. Do it in both cases */ |
405 | flip_addr |= OFC_UPDATE; | 423 | flip_addr |= OFC_UPDATE; |
406 | 424 | ||
407 | BEGIN_LP_RING(6); | 425 | ret = BEGIN_LP_RING(6); |
426 | if (ret) { | ||
427 | kfree(request); | ||
428 | return ret; | ||
429 | } | ||
408 | /* wait for overlay to go idle */ | 430 | /* wait for overlay to go idle */ |
409 | OUT_RING(MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE); | 431 | OUT_RING(MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE); |
410 | OUT_RING(flip_addr); | 432 | OUT_RING(flip_addr); |
@@ -432,7 +454,7 @@ static int intel_overlay_recover_from_interrupt(struct intel_overlay *overlay, | |||
432 | return 0; | 454 | return 0; |
433 | 455 | ||
434 | ret = i915_do_wait_request(dev, overlay->last_flip_req, | 456 | ret = i915_do_wait_request(dev, overlay->last_flip_req, |
435 | interruptible, &dev_priv->render_ring); | 457 | interruptible, LP_RING(dev_priv)); |
436 | if (ret) | 458 | if (ret) |
437 | return ret; | 459 | return ret; |
438 | 460 | ||
@@ -467,7 +489,12 @@ static int intel_overlay_release_old_vid(struct intel_overlay *overlay) | |||
467 | if (request == NULL) | 489 | if (request == NULL) |
468 | return -ENOMEM; | 490 | return -ENOMEM; |
469 | 491 | ||
470 | BEGIN_LP_RING(2); | 492 | ret = BEGIN_LP_RING(2); |
493 | if (ret) { | ||
494 | kfree(request); | ||
495 | return ret; | ||
496 | } | ||
497 | |||
471 | OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP); | 498 | OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP); |
472 | OUT_RING(MI_NOOP); | 499 | OUT_RING(MI_NOOP); |
473 | ADVANCE_LP_RING(); | 500 | ADVANCE_LP_RING(); |
@@ -736,13 +763,12 @@ static u32 overlay_cmd_reg(struct put_image_params *params) | |||
736 | } | 763 | } |
737 | 764 | ||
738 | static int intel_overlay_do_put_image(struct intel_overlay *overlay, | 765 | static int intel_overlay_do_put_image(struct intel_overlay *overlay, |
739 | struct drm_gem_object *new_bo, | 766 | struct drm_i915_gem_object *new_bo, |
740 | struct put_image_params *params) | 767 | struct put_image_params *params) |
741 | { | 768 | { |
742 | int ret, tmp_width; | 769 | int ret, tmp_width; |
743 | struct overlay_registers *regs; | 770 | struct overlay_registers *regs; |
744 | bool scale_changed = false; | 771 | bool scale_changed = false; |
745 | struct drm_i915_gem_object *bo_priv = to_intel_bo(new_bo); | ||
746 | struct drm_device *dev = overlay->dev; | 772 | struct drm_device *dev = overlay->dev; |
747 | 773 | ||
748 | BUG_ON(!mutex_is_locked(&dev->struct_mutex)); | 774 | BUG_ON(!mutex_is_locked(&dev->struct_mutex)); |
@@ -753,7 +779,7 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay, | |||
753 | if (ret != 0) | 779 | if (ret != 0) |
754 | return ret; | 780 | return ret; |
755 | 781 | ||
756 | ret = i915_gem_object_pin(new_bo, PAGE_SIZE); | 782 | ret = i915_gem_object_pin(new_bo, PAGE_SIZE, true); |
757 | if (ret != 0) | 783 | if (ret != 0) |
758 | return ret; | 784 | return ret; |
759 | 785 | ||
@@ -761,6 +787,10 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay, | |||
761 | if (ret != 0) | 787 | if (ret != 0) |
762 | goto out_unpin; | 788 | goto out_unpin; |
763 | 789 | ||
790 | ret = i915_gem_object_put_fence(new_bo); | ||
791 | if (ret) | ||
792 | goto out_unpin; | ||
793 | |||
764 | if (!overlay->active) { | 794 | if (!overlay->active) { |
765 | regs = intel_overlay_map_regs(overlay); | 795 | regs = intel_overlay_map_regs(overlay); |
766 | if (!regs) { | 796 | if (!regs) { |
@@ -797,7 +827,7 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay, | |||
797 | regs->SWIDTHSW = calc_swidthsw(overlay->dev, | 827 | regs->SWIDTHSW = calc_swidthsw(overlay->dev, |
798 | params->offset_Y, tmp_width); | 828 | params->offset_Y, tmp_width); |
799 | regs->SHEIGHT = params->src_h; | 829 | regs->SHEIGHT = params->src_h; |
800 | regs->OBUF_0Y = bo_priv->gtt_offset + params-> offset_Y; | 830 | regs->OBUF_0Y = new_bo->gtt_offset + params-> offset_Y; |
801 | regs->OSTRIDE = params->stride_Y; | 831 | regs->OSTRIDE = params->stride_Y; |
802 | 832 | ||
803 | if (params->format & I915_OVERLAY_YUV_PLANAR) { | 833 | if (params->format & I915_OVERLAY_YUV_PLANAR) { |
@@ -811,8 +841,8 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay, | |||
811 | params->src_w/uv_hscale); | 841 | params->src_w/uv_hscale); |
812 | regs->SWIDTHSW |= max_t(u32, tmp_U, tmp_V) << 16; | 842 | regs->SWIDTHSW |= max_t(u32, tmp_U, tmp_V) << 16; |
813 | regs->SHEIGHT |= (params->src_h/uv_vscale) << 16; | 843 | regs->SHEIGHT |= (params->src_h/uv_vscale) << 16; |
814 | regs->OBUF_0U = bo_priv->gtt_offset + params->offset_U; | 844 | regs->OBUF_0U = new_bo->gtt_offset + params->offset_U; |
815 | regs->OBUF_0V = bo_priv->gtt_offset + params->offset_V; | 845 | regs->OBUF_0V = new_bo->gtt_offset + params->offset_V; |
816 | regs->OSTRIDE |= params->stride_UV << 16; | 846 | regs->OSTRIDE |= params->stride_UV << 16; |
817 | } | 847 | } |
818 | 848 | ||
@@ -829,7 +859,7 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay, | |||
829 | goto out_unpin; | 859 | goto out_unpin; |
830 | 860 | ||
831 | overlay->old_vid_bo = overlay->vid_bo; | 861 | overlay->old_vid_bo = overlay->vid_bo; |
832 | overlay->vid_bo = to_intel_bo(new_bo); | 862 | overlay->vid_bo = new_bo; |
833 | 863 | ||
834 | return 0; | 864 | return 0; |
835 | 865 | ||
@@ -942,7 +972,7 @@ static int check_overlay_scaling(struct put_image_params *rec) | |||
942 | 972 | ||
943 | static int check_overlay_src(struct drm_device *dev, | 973 | static int check_overlay_src(struct drm_device *dev, |
944 | struct drm_intel_overlay_put_image *rec, | 974 | struct drm_intel_overlay_put_image *rec, |
945 | struct drm_gem_object *new_bo) | 975 | struct drm_i915_gem_object *new_bo) |
946 | { | 976 | { |
947 | int uv_hscale = uv_hsubsampling(rec->flags); | 977 | int uv_hscale = uv_hsubsampling(rec->flags); |
948 | int uv_vscale = uv_vsubsampling(rec->flags); | 978 | int uv_vscale = uv_vsubsampling(rec->flags); |
@@ -1027,7 +1057,7 @@ static int check_overlay_src(struct drm_device *dev, | |||
1027 | return -EINVAL; | 1057 | return -EINVAL; |
1028 | 1058 | ||
1029 | tmp = rec->stride_Y*rec->src_height; | 1059 | tmp = rec->stride_Y*rec->src_height; |
1030 | if (rec->offset_Y + tmp > new_bo->size) | 1060 | if (rec->offset_Y + tmp > new_bo->base.size) |
1031 | return -EINVAL; | 1061 | return -EINVAL; |
1032 | break; | 1062 | break; |
1033 | 1063 | ||
@@ -1038,12 +1068,12 @@ static int check_overlay_src(struct drm_device *dev, | |||
1038 | return -EINVAL; | 1068 | return -EINVAL; |
1039 | 1069 | ||
1040 | tmp = rec->stride_Y * rec->src_height; | 1070 | tmp = rec->stride_Y * rec->src_height; |
1041 | if (rec->offset_Y + tmp > new_bo->size) | 1071 | if (rec->offset_Y + tmp > new_bo->base.size) |
1042 | return -EINVAL; | 1072 | return -EINVAL; |
1043 | 1073 | ||
1044 | tmp = rec->stride_UV * (rec->src_height / uv_vscale); | 1074 | tmp = rec->stride_UV * (rec->src_height / uv_vscale); |
1045 | if (rec->offset_U + tmp > new_bo->size || | 1075 | if (rec->offset_U + tmp > new_bo->base.size || |
1046 | rec->offset_V + tmp > new_bo->size) | 1076 | rec->offset_V + tmp > new_bo->base.size) |
1047 | return -EINVAL; | 1077 | return -EINVAL; |
1048 | break; | 1078 | break; |
1049 | } | 1079 | } |
@@ -1086,7 +1116,7 @@ int intel_overlay_put_image(struct drm_device *dev, void *data, | |||
1086 | struct intel_overlay *overlay; | 1116 | struct intel_overlay *overlay; |
1087 | struct drm_mode_object *drmmode_obj; | 1117 | struct drm_mode_object *drmmode_obj; |
1088 | struct intel_crtc *crtc; | 1118 | struct intel_crtc *crtc; |
1089 | struct drm_gem_object *new_bo; | 1119 | struct drm_i915_gem_object *new_bo; |
1090 | struct put_image_params *params; | 1120 | struct put_image_params *params; |
1091 | int ret; | 1121 | int ret; |
1092 | 1122 | ||
@@ -1125,8 +1155,8 @@ int intel_overlay_put_image(struct drm_device *dev, void *data, | |||
1125 | } | 1155 | } |
1126 | crtc = to_intel_crtc(obj_to_crtc(drmmode_obj)); | 1156 | crtc = to_intel_crtc(obj_to_crtc(drmmode_obj)); |
1127 | 1157 | ||
1128 | new_bo = drm_gem_object_lookup(dev, file_priv, | 1158 | new_bo = to_intel_bo(drm_gem_object_lookup(dev, file_priv, |
1129 | put_image_rec->bo_handle); | 1159 | put_image_rec->bo_handle)); |
1130 | if (!new_bo) { | 1160 | if (!new_bo) { |
1131 | ret = -ENOENT; | 1161 | ret = -ENOENT; |
1132 | goto out_free; | 1162 | goto out_free; |
@@ -1135,6 +1165,12 @@ int intel_overlay_put_image(struct drm_device *dev, void *data, | |||
1135 | mutex_lock(&dev->mode_config.mutex); | 1165 | mutex_lock(&dev->mode_config.mutex); |
1136 | mutex_lock(&dev->struct_mutex); | 1166 | mutex_lock(&dev->struct_mutex); |
1137 | 1167 | ||
1168 | if (new_bo->tiling_mode) { | ||
1169 | DRM_ERROR("buffer used for overlay image can not be tiled\n"); | ||
1170 | ret = -EINVAL; | ||
1171 | goto out_unlock; | ||
1172 | } | ||
1173 | |||
1138 | ret = intel_overlay_recover_from_interrupt(overlay, true); | 1174 | ret = intel_overlay_recover_from_interrupt(overlay, true); |
1139 | if (ret != 0) | 1175 | if (ret != 0) |
1140 | goto out_unlock; | 1176 | goto out_unlock; |
@@ -1217,7 +1253,7 @@ int intel_overlay_put_image(struct drm_device *dev, void *data, | |||
1217 | out_unlock: | 1253 | out_unlock: |
1218 | mutex_unlock(&dev->struct_mutex); | 1254 | mutex_unlock(&dev->struct_mutex); |
1219 | mutex_unlock(&dev->mode_config.mutex); | 1255 | mutex_unlock(&dev->mode_config.mutex); |
1220 | drm_gem_object_unreference_unlocked(new_bo); | 1256 | drm_gem_object_unreference_unlocked(&new_bo->base); |
1221 | out_free: | 1257 | out_free: |
1222 | kfree(params); | 1258 | kfree(params); |
1223 | 1259 | ||
@@ -1370,7 +1406,7 @@ void intel_setup_overlay(struct drm_device *dev) | |||
1370 | { | 1406 | { |
1371 | drm_i915_private_t *dev_priv = dev->dev_private; | 1407 | drm_i915_private_t *dev_priv = dev->dev_private; |
1372 | struct intel_overlay *overlay; | 1408 | struct intel_overlay *overlay; |
1373 | struct drm_gem_object *reg_bo; | 1409 | struct drm_i915_gem_object *reg_bo; |
1374 | struct overlay_registers *regs; | 1410 | struct overlay_registers *regs; |
1375 | int ret; | 1411 | int ret; |
1376 | 1412 | ||
@@ -1385,7 +1421,7 @@ void intel_setup_overlay(struct drm_device *dev) | |||
1385 | reg_bo = i915_gem_alloc_object(dev, PAGE_SIZE); | 1421 | reg_bo = i915_gem_alloc_object(dev, PAGE_SIZE); |
1386 | if (!reg_bo) | 1422 | if (!reg_bo) |
1387 | goto out_free; | 1423 | goto out_free; |
1388 | overlay->reg_bo = to_intel_bo(reg_bo); | 1424 | overlay->reg_bo = reg_bo; |
1389 | 1425 | ||
1390 | if (OVERLAY_NEEDS_PHYSICAL(dev)) { | 1426 | if (OVERLAY_NEEDS_PHYSICAL(dev)) { |
1391 | ret = i915_gem_attach_phys_object(dev, reg_bo, | 1427 | ret = i915_gem_attach_phys_object(dev, reg_bo, |
@@ -1395,14 +1431,14 @@ void intel_setup_overlay(struct drm_device *dev) | |||
1395 | DRM_ERROR("failed to attach phys overlay regs\n"); | 1431 | DRM_ERROR("failed to attach phys overlay regs\n"); |
1396 | goto out_free_bo; | 1432 | goto out_free_bo; |
1397 | } | 1433 | } |
1398 | overlay->flip_addr = overlay->reg_bo->phys_obj->handle->busaddr; | 1434 | overlay->flip_addr = reg_bo->phys_obj->handle->busaddr; |
1399 | } else { | 1435 | } else { |
1400 | ret = i915_gem_object_pin(reg_bo, PAGE_SIZE); | 1436 | ret = i915_gem_object_pin(reg_bo, PAGE_SIZE, true); |
1401 | if (ret) { | 1437 | if (ret) { |
1402 | DRM_ERROR("failed to pin overlay register bo\n"); | 1438 | DRM_ERROR("failed to pin overlay register bo\n"); |
1403 | goto out_free_bo; | 1439 | goto out_free_bo; |
1404 | } | 1440 | } |
1405 | overlay->flip_addr = overlay->reg_bo->gtt_offset; | 1441 | overlay->flip_addr = reg_bo->gtt_offset; |
1406 | 1442 | ||
1407 | ret = i915_gem_object_set_to_gtt_domain(reg_bo, true); | 1443 | ret = i915_gem_object_set_to_gtt_domain(reg_bo, true); |
1408 | if (ret) { | 1444 | if (ret) { |
@@ -1434,7 +1470,7 @@ void intel_setup_overlay(struct drm_device *dev) | |||
1434 | out_unpin_bo: | 1470 | out_unpin_bo: |
1435 | i915_gem_object_unpin(reg_bo); | 1471 | i915_gem_object_unpin(reg_bo); |
1436 | out_free_bo: | 1472 | out_free_bo: |
1437 | drm_gem_object_unreference(reg_bo); | 1473 | drm_gem_object_unreference(®_bo->base); |
1438 | out_free: | 1474 | out_free: |
1439 | kfree(overlay); | 1475 | kfree(overlay); |
1440 | return; | 1476 | return; |
diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c index 92ff8f385278..c65992df458d 100644 --- a/drivers/gpu/drm/i915/intel_panel.c +++ b/drivers/gpu/drm/i915/intel_panel.c | |||
@@ -125,15 +125,55 @@ static int is_backlight_combination_mode(struct drm_device *dev) | |||
125 | return 0; | 125 | return 0; |
126 | } | 126 | } |
127 | 127 | ||
128 | static u32 i915_read_blc_pwm_ctl(struct drm_i915_private *dev_priv) | ||
129 | { | ||
130 | u32 val; | ||
131 | |||
132 | /* Restore the CTL value if it lost, e.g. GPU reset */ | ||
133 | |||
134 | if (HAS_PCH_SPLIT(dev_priv->dev)) { | ||
135 | val = I915_READ(BLC_PWM_PCH_CTL2); | ||
136 | if (dev_priv->saveBLC_PWM_CTL2 == 0) { | ||
137 | dev_priv->saveBLC_PWM_CTL2 = val; | ||
138 | } else if (val == 0) { | ||
139 | I915_WRITE(BLC_PWM_PCH_CTL2, | ||
140 | dev_priv->saveBLC_PWM_CTL); | ||
141 | val = dev_priv->saveBLC_PWM_CTL; | ||
142 | } | ||
143 | } else { | ||
144 | val = I915_READ(BLC_PWM_CTL); | ||
145 | if (dev_priv->saveBLC_PWM_CTL == 0) { | ||
146 | dev_priv->saveBLC_PWM_CTL = val; | ||
147 | dev_priv->saveBLC_PWM_CTL2 = I915_READ(BLC_PWM_CTL2); | ||
148 | } else if (val == 0) { | ||
149 | I915_WRITE(BLC_PWM_CTL, | ||
150 | dev_priv->saveBLC_PWM_CTL); | ||
151 | I915_WRITE(BLC_PWM_CTL2, | ||
152 | dev_priv->saveBLC_PWM_CTL2); | ||
153 | val = dev_priv->saveBLC_PWM_CTL; | ||
154 | } | ||
155 | } | ||
156 | |||
157 | return val; | ||
158 | } | ||
159 | |||
128 | u32 intel_panel_get_max_backlight(struct drm_device *dev) | 160 | u32 intel_panel_get_max_backlight(struct drm_device *dev) |
129 | { | 161 | { |
130 | struct drm_i915_private *dev_priv = dev->dev_private; | 162 | struct drm_i915_private *dev_priv = dev->dev_private; |
131 | u32 max; | 163 | u32 max; |
132 | 164 | ||
165 | max = i915_read_blc_pwm_ctl(dev_priv); | ||
166 | if (max == 0) { | ||
167 | /* XXX add code here to query mode clock or hardware clock | ||
168 | * and program max PWM appropriately. | ||
169 | */ | ||
170 | printk_once(KERN_WARNING "fixme: max PWM is zero.\n"); | ||
171 | return 1; | ||
172 | } | ||
173 | |||
133 | if (HAS_PCH_SPLIT(dev)) { | 174 | if (HAS_PCH_SPLIT(dev)) { |
134 | max = I915_READ(BLC_PWM_PCH_CTL2) >> 16; | 175 | max >>= 16; |
135 | } else { | 176 | } else { |
136 | max = I915_READ(BLC_PWM_CTL); | ||
137 | if (IS_PINEVIEW(dev)) { | 177 | if (IS_PINEVIEW(dev)) { |
138 | max >>= 17; | 178 | max >>= 17; |
139 | } else { | 179 | } else { |
@@ -146,14 +186,6 @@ u32 intel_panel_get_max_backlight(struct drm_device *dev) | |||
146 | max *= 0xff; | 186 | max *= 0xff; |
147 | } | 187 | } |
148 | 188 | ||
149 | if (max == 0) { | ||
150 | /* XXX add code here to query mode clock or hardware clock | ||
151 | * and program max PWM appropriately. | ||
152 | */ | ||
153 | DRM_ERROR("fixme: max PWM is zero.\n"); | ||
154 | max = 1; | ||
155 | } | ||
156 | |||
157 | DRM_DEBUG_DRIVER("max backlight PWM = %d\n", max); | 189 | DRM_DEBUG_DRIVER("max backlight PWM = %d\n", max); |
158 | return max; | 190 | return max; |
159 | } | 191 | } |
@@ -218,3 +250,34 @@ void intel_panel_set_backlight(struct drm_device *dev, u32 level) | |||
218 | tmp &= ~BACKLIGHT_DUTY_CYCLE_MASK; | 250 | tmp &= ~BACKLIGHT_DUTY_CYCLE_MASK; |
219 | I915_WRITE(BLC_PWM_CTL, tmp | level); | 251 | I915_WRITE(BLC_PWM_CTL, tmp | level); |
220 | } | 252 | } |
253 | |||
254 | void intel_panel_disable_backlight(struct drm_device *dev) | ||
255 | { | ||
256 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
257 | |||
258 | if (dev_priv->backlight_enabled) { | ||
259 | dev_priv->backlight_level = intel_panel_get_backlight(dev); | ||
260 | dev_priv->backlight_enabled = false; | ||
261 | } | ||
262 | |||
263 | intel_panel_set_backlight(dev, 0); | ||
264 | } | ||
265 | |||
266 | void intel_panel_enable_backlight(struct drm_device *dev) | ||
267 | { | ||
268 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
269 | |||
270 | if (dev_priv->backlight_level == 0) | ||
271 | dev_priv->backlight_level = intel_panel_get_max_backlight(dev); | ||
272 | |||
273 | intel_panel_set_backlight(dev, dev_priv->backlight_level); | ||
274 | dev_priv->backlight_enabled = true; | ||
275 | } | ||
276 | |||
277 | void intel_panel_setup_backlight(struct drm_device *dev) | ||
278 | { | ||
279 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
280 | |||
281 | dev_priv->backlight_level = intel_panel_get_backlight(dev); | ||
282 | dev_priv->backlight_enabled = dev_priv->backlight_level != 0; | ||
283 | } | ||
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index b83306f9244b..6218fa97aa1e 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c | |||
@@ -34,6 +34,14 @@ | |||
34 | #include "i915_trace.h" | 34 | #include "i915_trace.h" |
35 | #include "intel_drv.h" | 35 | #include "intel_drv.h" |
36 | 36 | ||
37 | static inline int ring_space(struct intel_ring_buffer *ring) | ||
38 | { | ||
39 | int space = (ring->head & HEAD_ADDR) - (ring->tail + 8); | ||
40 | if (space < 0) | ||
41 | space += ring->size; | ||
42 | return space; | ||
43 | } | ||
44 | |||
37 | static u32 i915_gem_get_seqno(struct drm_device *dev) | 45 | static u32 i915_gem_get_seqno(struct drm_device *dev) |
38 | { | 46 | { |
39 | drm_i915_private_t *dev_priv = dev->dev_private; | 47 | drm_i915_private_t *dev_priv = dev->dev_private; |
@@ -48,14 +56,15 @@ static u32 i915_gem_get_seqno(struct drm_device *dev) | |||
48 | return seqno; | 56 | return seqno; |
49 | } | 57 | } |
50 | 58 | ||
51 | static void | 59 | static int |
52 | render_ring_flush(struct drm_device *dev, | 60 | render_ring_flush(struct intel_ring_buffer *ring, |
53 | struct intel_ring_buffer *ring, | ||
54 | u32 invalidate_domains, | 61 | u32 invalidate_domains, |
55 | u32 flush_domains) | 62 | u32 flush_domains) |
56 | { | 63 | { |
64 | struct drm_device *dev = ring->dev; | ||
57 | drm_i915_private_t *dev_priv = dev->dev_private; | 65 | drm_i915_private_t *dev_priv = dev->dev_private; |
58 | u32 cmd; | 66 | u32 cmd; |
67 | int ret; | ||
59 | 68 | ||
60 | #if WATCH_EXEC | 69 | #if WATCH_EXEC |
61 | DRM_INFO("%s: invalidate %08x flush %08x\n", __func__, | 70 | DRM_INFO("%s: invalidate %08x flush %08x\n", __func__, |
@@ -109,79 +118,87 @@ render_ring_flush(struct drm_device *dev, | |||
109 | if (invalidate_domains & I915_GEM_DOMAIN_INSTRUCTION) | 118 | if (invalidate_domains & I915_GEM_DOMAIN_INSTRUCTION) |
110 | cmd |= MI_EXE_FLUSH; | 119 | cmd |= MI_EXE_FLUSH; |
111 | 120 | ||
121 | if (invalidate_domains & I915_GEM_DOMAIN_COMMAND && | ||
122 | (IS_G4X(dev) || IS_GEN5(dev))) | ||
123 | cmd |= MI_INVALIDATE_ISP; | ||
124 | |||
112 | #if WATCH_EXEC | 125 | #if WATCH_EXEC |
113 | DRM_INFO("%s: queue flush %08x to ring\n", __func__, cmd); | 126 | DRM_INFO("%s: queue flush %08x to ring\n", __func__, cmd); |
114 | #endif | 127 | #endif |
115 | intel_ring_begin(dev, ring, 2); | 128 | ret = intel_ring_begin(ring, 2); |
116 | intel_ring_emit(dev, ring, cmd); | 129 | if (ret) |
117 | intel_ring_emit(dev, ring, MI_NOOP); | 130 | return ret; |
118 | intel_ring_advance(dev, ring); | 131 | |
132 | intel_ring_emit(ring, cmd); | ||
133 | intel_ring_emit(ring, MI_NOOP); | ||
134 | intel_ring_advance(ring); | ||
119 | } | 135 | } |
136 | |||
137 | return 0; | ||
120 | } | 138 | } |
121 | 139 | ||
122 | static void ring_write_tail(struct drm_device *dev, | 140 | static void ring_write_tail(struct intel_ring_buffer *ring, |
123 | struct intel_ring_buffer *ring, | ||
124 | u32 value) | 141 | u32 value) |
125 | { | 142 | { |
126 | drm_i915_private_t *dev_priv = dev->dev_private; | 143 | drm_i915_private_t *dev_priv = ring->dev->dev_private; |
127 | I915_WRITE_TAIL(ring, value); | 144 | I915_WRITE_TAIL(ring, value); |
128 | } | 145 | } |
129 | 146 | ||
130 | u32 intel_ring_get_active_head(struct drm_device *dev, | 147 | u32 intel_ring_get_active_head(struct intel_ring_buffer *ring) |
131 | struct intel_ring_buffer *ring) | ||
132 | { | 148 | { |
133 | drm_i915_private_t *dev_priv = dev->dev_private; | 149 | drm_i915_private_t *dev_priv = ring->dev->dev_private; |
134 | u32 acthd_reg = INTEL_INFO(dev)->gen >= 4 ? | 150 | u32 acthd_reg = INTEL_INFO(ring->dev)->gen >= 4 ? |
135 | RING_ACTHD(ring->mmio_base) : ACTHD; | 151 | RING_ACTHD(ring->mmio_base) : ACTHD; |
136 | 152 | ||
137 | return I915_READ(acthd_reg); | 153 | return I915_READ(acthd_reg); |
138 | } | 154 | } |
139 | 155 | ||
140 | static int init_ring_common(struct drm_device *dev, | 156 | static int init_ring_common(struct intel_ring_buffer *ring) |
141 | struct intel_ring_buffer *ring) | ||
142 | { | 157 | { |
158 | drm_i915_private_t *dev_priv = ring->dev->dev_private; | ||
159 | struct drm_i915_gem_object *obj = ring->obj; | ||
143 | u32 head; | 160 | u32 head; |
144 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
145 | struct drm_i915_gem_object *obj_priv; | ||
146 | obj_priv = to_intel_bo(ring->gem_object); | ||
147 | 161 | ||
148 | /* Stop the ring if it's running. */ | 162 | /* Stop the ring if it's running. */ |
149 | I915_WRITE_CTL(ring, 0); | 163 | I915_WRITE_CTL(ring, 0); |
150 | I915_WRITE_HEAD(ring, 0); | 164 | I915_WRITE_HEAD(ring, 0); |
151 | ring->write_tail(dev, ring, 0); | 165 | ring->write_tail(ring, 0); |
152 | 166 | ||
153 | /* Initialize the ring. */ | 167 | /* Initialize the ring. */ |
154 | I915_WRITE_START(ring, obj_priv->gtt_offset); | 168 | I915_WRITE_START(ring, obj->gtt_offset); |
155 | head = I915_READ_HEAD(ring) & HEAD_ADDR; | 169 | head = I915_READ_HEAD(ring) & HEAD_ADDR; |
156 | 170 | ||
157 | /* G45 ring initialization fails to reset head to zero */ | 171 | /* G45 ring initialization fails to reset head to zero */ |
158 | if (head != 0) { | 172 | if (head != 0) { |
159 | DRM_ERROR("%s head not reset to zero " | 173 | DRM_DEBUG_KMS("%s head not reset to zero " |
160 | "ctl %08x head %08x tail %08x start %08x\n", | 174 | "ctl %08x head %08x tail %08x start %08x\n", |
161 | ring->name, | 175 | ring->name, |
162 | I915_READ_CTL(ring), | 176 | I915_READ_CTL(ring), |
163 | I915_READ_HEAD(ring), | 177 | I915_READ_HEAD(ring), |
164 | I915_READ_TAIL(ring), | 178 | I915_READ_TAIL(ring), |
165 | I915_READ_START(ring)); | 179 | I915_READ_START(ring)); |
166 | 180 | ||
167 | I915_WRITE_HEAD(ring, 0); | 181 | I915_WRITE_HEAD(ring, 0); |
168 | 182 | ||
169 | DRM_ERROR("%s head forced to zero " | 183 | if (I915_READ_HEAD(ring) & HEAD_ADDR) { |
170 | "ctl %08x head %08x tail %08x start %08x\n", | 184 | DRM_ERROR("failed to set %s head to zero " |
171 | ring->name, | 185 | "ctl %08x head %08x tail %08x start %08x\n", |
172 | I915_READ_CTL(ring), | 186 | ring->name, |
173 | I915_READ_HEAD(ring), | 187 | I915_READ_CTL(ring), |
174 | I915_READ_TAIL(ring), | 188 | I915_READ_HEAD(ring), |
175 | I915_READ_START(ring)); | 189 | I915_READ_TAIL(ring), |
190 | I915_READ_START(ring)); | ||
191 | } | ||
176 | } | 192 | } |
177 | 193 | ||
178 | I915_WRITE_CTL(ring, | 194 | I915_WRITE_CTL(ring, |
179 | ((ring->gem_object->size - PAGE_SIZE) & RING_NR_PAGES) | 195 | ((ring->size - PAGE_SIZE) & RING_NR_PAGES) |
180 | | RING_REPORT_64K | RING_VALID); | 196 | | RING_REPORT_64K | RING_VALID); |
181 | 197 | ||
182 | head = I915_READ_HEAD(ring) & HEAD_ADDR; | ||
183 | /* If the head is still not zero, the ring is dead */ | 198 | /* If the head is still not zero, the ring is dead */ |
184 | if (head != 0) { | 199 | if ((I915_READ_CTL(ring) & RING_VALID) == 0 || |
200 | I915_READ_START(ring) != obj->gtt_offset || | ||
201 | (I915_READ_HEAD(ring) & HEAD_ADDR) != 0) { | ||
185 | DRM_ERROR("%s initialization failed " | 202 | DRM_ERROR("%s initialization failed " |
186 | "ctl %08x head %08x tail %08x start %08x\n", | 203 | "ctl %08x head %08x tail %08x start %08x\n", |
187 | ring->name, | 204 | ring->name, |
@@ -192,344 +209,569 @@ static int init_ring_common(struct drm_device *dev, | |||
192 | return -EIO; | 209 | return -EIO; |
193 | } | 210 | } |
194 | 211 | ||
195 | if (!drm_core_check_feature(dev, DRIVER_MODESET)) | 212 | if (!drm_core_check_feature(ring->dev, DRIVER_MODESET)) |
196 | i915_kernel_lost_context(dev); | 213 | i915_kernel_lost_context(ring->dev); |
197 | else { | 214 | else { |
198 | ring->head = I915_READ_HEAD(ring) & HEAD_ADDR; | 215 | ring->head = I915_READ_HEAD(ring); |
199 | ring->tail = I915_READ_TAIL(ring) & TAIL_ADDR; | 216 | ring->tail = I915_READ_TAIL(ring) & TAIL_ADDR; |
200 | ring->space = ring->head - (ring->tail + 8); | 217 | ring->space = ring_space(ring); |
201 | if (ring->space < 0) | ||
202 | ring->space += ring->size; | ||
203 | } | 218 | } |
219 | |||
204 | return 0; | 220 | return 0; |
205 | } | 221 | } |
206 | 222 | ||
207 | static int init_render_ring(struct drm_device *dev, | 223 | /* |
208 | struct intel_ring_buffer *ring) | 224 | * 965+ support PIPE_CONTROL commands, which provide finer grained control |
225 | * over cache flushing. | ||
226 | */ | ||
227 | struct pipe_control { | ||
228 | struct drm_i915_gem_object *obj; | ||
229 | volatile u32 *cpu_page; | ||
230 | u32 gtt_offset; | ||
231 | }; | ||
232 | |||
233 | static int | ||
234 | init_pipe_control(struct intel_ring_buffer *ring) | ||
209 | { | 235 | { |
210 | drm_i915_private_t *dev_priv = dev->dev_private; | 236 | struct pipe_control *pc; |
211 | int ret = init_ring_common(dev, ring); | 237 | struct drm_i915_gem_object *obj; |
212 | int mode; | 238 | int ret; |
239 | |||
240 | if (ring->private) | ||
241 | return 0; | ||
242 | |||
243 | pc = kmalloc(sizeof(*pc), GFP_KERNEL); | ||
244 | if (!pc) | ||
245 | return -ENOMEM; | ||
246 | |||
247 | obj = i915_gem_alloc_object(ring->dev, 4096); | ||
248 | if (obj == NULL) { | ||
249 | DRM_ERROR("Failed to allocate seqno page\n"); | ||
250 | ret = -ENOMEM; | ||
251 | goto err; | ||
252 | } | ||
253 | obj->agp_type = AGP_USER_CACHED_MEMORY; | ||
254 | |||
255 | ret = i915_gem_object_pin(obj, 4096, true); | ||
256 | if (ret) | ||
257 | goto err_unref; | ||
258 | |||
259 | pc->gtt_offset = obj->gtt_offset; | ||
260 | pc->cpu_page = kmap(obj->pages[0]); | ||
261 | if (pc->cpu_page == NULL) | ||
262 | goto err_unpin; | ||
263 | |||
264 | pc->obj = obj; | ||
265 | ring->private = pc; | ||
266 | return 0; | ||
267 | |||
268 | err_unpin: | ||
269 | i915_gem_object_unpin(obj); | ||
270 | err_unref: | ||
271 | drm_gem_object_unreference(&obj->base); | ||
272 | err: | ||
273 | kfree(pc); | ||
274 | return ret; | ||
275 | } | ||
276 | |||
277 | static void | ||
278 | cleanup_pipe_control(struct intel_ring_buffer *ring) | ||
279 | { | ||
280 | struct pipe_control *pc = ring->private; | ||
281 | struct drm_i915_gem_object *obj; | ||
282 | |||
283 | if (!ring->private) | ||
284 | return; | ||
285 | |||
286 | obj = pc->obj; | ||
287 | kunmap(obj->pages[0]); | ||
288 | i915_gem_object_unpin(obj); | ||
289 | drm_gem_object_unreference(&obj->base); | ||
290 | |||
291 | kfree(pc); | ||
292 | ring->private = NULL; | ||
293 | } | ||
294 | |||
295 | static int init_render_ring(struct intel_ring_buffer *ring) | ||
296 | { | ||
297 | struct drm_device *dev = ring->dev; | ||
298 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
299 | int ret = init_ring_common(ring); | ||
213 | 300 | ||
214 | if (INTEL_INFO(dev)->gen > 3) { | 301 | if (INTEL_INFO(dev)->gen > 3) { |
215 | mode = VS_TIMER_DISPATCH << 16 | VS_TIMER_DISPATCH; | 302 | int mode = VS_TIMER_DISPATCH << 16 | VS_TIMER_DISPATCH; |
216 | if (IS_GEN6(dev)) | 303 | if (IS_GEN6(dev)) |
217 | mode |= MI_FLUSH_ENABLE << 16 | MI_FLUSH_ENABLE; | 304 | mode |= MI_FLUSH_ENABLE << 16 | MI_FLUSH_ENABLE; |
218 | I915_WRITE(MI_MODE, mode); | 305 | I915_WRITE(MI_MODE, mode); |
219 | } | 306 | } |
307 | |||
308 | if (INTEL_INFO(dev)->gen >= 6) { | ||
309 | } else if (IS_GEN5(dev)) { | ||
310 | ret = init_pipe_control(ring); | ||
311 | if (ret) | ||
312 | return ret; | ||
313 | } | ||
314 | |||
220 | return ret; | 315 | return ret; |
221 | } | 316 | } |
222 | 317 | ||
223 | #define PIPE_CONTROL_FLUSH(addr) \ | 318 | static void render_ring_cleanup(struct intel_ring_buffer *ring) |
319 | { | ||
320 | if (!ring->private) | ||
321 | return; | ||
322 | |||
323 | cleanup_pipe_control(ring); | ||
324 | } | ||
325 | |||
326 | static void | ||
327 | update_semaphore(struct intel_ring_buffer *ring, int i, u32 seqno) | ||
328 | { | ||
329 | struct drm_device *dev = ring->dev; | ||
330 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
331 | int id; | ||
332 | |||
333 | /* | ||
334 | * cs -> 1 = vcs, 0 = bcs | ||
335 | * vcs -> 1 = bcs, 0 = cs, | ||
336 | * bcs -> 1 = cs, 0 = vcs. | ||
337 | */ | ||
338 | id = ring - dev_priv->ring; | ||
339 | id += 2 - i; | ||
340 | id %= 3; | ||
341 | |||
342 | intel_ring_emit(ring, | ||
343 | MI_SEMAPHORE_MBOX | | ||
344 | MI_SEMAPHORE_REGISTER | | ||
345 | MI_SEMAPHORE_UPDATE); | ||
346 | intel_ring_emit(ring, seqno); | ||
347 | intel_ring_emit(ring, | ||
348 | RING_SYNC_0(dev_priv->ring[id].mmio_base) + 4*i); | ||
349 | } | ||
350 | |||
351 | static int | ||
352 | gen6_add_request(struct intel_ring_buffer *ring, | ||
353 | u32 *result) | ||
354 | { | ||
355 | u32 seqno; | ||
356 | int ret; | ||
357 | |||
358 | ret = intel_ring_begin(ring, 10); | ||
359 | if (ret) | ||
360 | return ret; | ||
361 | |||
362 | seqno = i915_gem_get_seqno(ring->dev); | ||
363 | update_semaphore(ring, 0, seqno); | ||
364 | update_semaphore(ring, 1, seqno); | ||
365 | |||
366 | intel_ring_emit(ring, MI_STORE_DWORD_INDEX); | ||
367 | intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT); | ||
368 | intel_ring_emit(ring, seqno); | ||
369 | intel_ring_emit(ring, MI_USER_INTERRUPT); | ||
370 | intel_ring_advance(ring); | ||
371 | |||
372 | *result = seqno; | ||
373 | return 0; | ||
374 | } | ||
375 | |||
376 | int | ||
377 | intel_ring_sync(struct intel_ring_buffer *ring, | ||
378 | struct intel_ring_buffer *to, | ||
379 | u32 seqno) | ||
380 | { | ||
381 | int ret; | ||
382 | |||
383 | ret = intel_ring_begin(ring, 4); | ||
384 | if (ret) | ||
385 | return ret; | ||
386 | |||
387 | intel_ring_emit(ring, | ||
388 | MI_SEMAPHORE_MBOX | | ||
389 | MI_SEMAPHORE_REGISTER | | ||
390 | intel_ring_sync_index(ring, to) << 17 | | ||
391 | MI_SEMAPHORE_COMPARE); | ||
392 | intel_ring_emit(ring, seqno); | ||
393 | intel_ring_emit(ring, 0); | ||
394 | intel_ring_emit(ring, MI_NOOP); | ||
395 | intel_ring_advance(ring); | ||
396 | |||
397 | return 0; | ||
398 | } | ||
399 | |||
400 | #define PIPE_CONTROL_FLUSH(ring__, addr__) \ | ||
224 | do { \ | 401 | do { \ |
225 | OUT_RING(GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE | \ | 402 | intel_ring_emit(ring__, GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE | \ |
226 | PIPE_CONTROL_DEPTH_STALL | 2); \ | 403 | PIPE_CONTROL_DEPTH_STALL | 2); \ |
227 | OUT_RING(addr | PIPE_CONTROL_GLOBAL_GTT); \ | 404 | intel_ring_emit(ring__, (addr__) | PIPE_CONTROL_GLOBAL_GTT); \ |
228 | OUT_RING(0); \ | 405 | intel_ring_emit(ring__, 0); \ |
229 | OUT_RING(0); \ | 406 | intel_ring_emit(ring__, 0); \ |
230 | } while (0) | 407 | } while (0) |
231 | 408 | ||
232 | /** | 409 | static int |
233 | * Creates a new sequence number, emitting a write of it to the status page | 410 | pc_render_add_request(struct intel_ring_buffer *ring, |
234 | * plus an interrupt, which will trigger i915_user_interrupt_handler. | 411 | u32 *result) |
235 | * | ||
236 | * Must be called with struct_lock held. | ||
237 | * | ||
238 | * Returned sequence numbers are nonzero on success. | ||
239 | */ | ||
240 | static u32 | ||
241 | render_ring_add_request(struct drm_device *dev, | ||
242 | struct intel_ring_buffer *ring, | ||
243 | u32 flush_domains) | ||
244 | { | 412 | { |
245 | drm_i915_private_t *dev_priv = dev->dev_private; | 413 | struct drm_device *dev = ring->dev; |
246 | u32 seqno; | 414 | u32 seqno = i915_gem_get_seqno(dev); |
415 | struct pipe_control *pc = ring->private; | ||
416 | u32 scratch_addr = pc->gtt_offset + 128; | ||
417 | int ret; | ||
247 | 418 | ||
248 | seqno = i915_gem_get_seqno(dev); | 419 | /* For Ironlake, MI_USER_INTERRUPT was deprecated and apparently |
249 | 420 | * incoherent with writes to memory, i.e. completely fubar, | |
250 | if (IS_GEN6(dev)) { | 421 | * so we need to use PIPE_NOTIFY instead. |
251 | BEGIN_LP_RING(6); | 422 | * |
252 | OUT_RING(GFX_OP_PIPE_CONTROL | 3); | 423 | * However, we also need to workaround the qword write |
253 | OUT_RING(PIPE_CONTROL_QW_WRITE | | 424 | * incoherence by flushing the 6 PIPE_NOTIFY buffers out to |
254 | PIPE_CONTROL_WC_FLUSH | PIPE_CONTROL_IS_FLUSH | | 425 | * memory before requesting an interrupt. |
255 | PIPE_CONTROL_NOTIFY); | 426 | */ |
256 | OUT_RING(dev_priv->seqno_gfx_addr | PIPE_CONTROL_GLOBAL_GTT); | 427 | ret = intel_ring_begin(ring, 32); |
257 | OUT_RING(seqno); | 428 | if (ret) |
258 | OUT_RING(0); | 429 | return ret; |
259 | OUT_RING(0); | 430 | |
260 | ADVANCE_LP_RING(); | 431 | intel_ring_emit(ring, GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE | |
261 | } else if (HAS_PIPE_CONTROL(dev)) { | 432 | PIPE_CONTROL_WC_FLUSH | PIPE_CONTROL_TC_FLUSH); |
262 | u32 scratch_addr = dev_priv->seqno_gfx_addr + 128; | 433 | intel_ring_emit(ring, pc->gtt_offset | PIPE_CONTROL_GLOBAL_GTT); |
434 | intel_ring_emit(ring, seqno); | ||
435 | intel_ring_emit(ring, 0); | ||
436 | PIPE_CONTROL_FLUSH(ring, scratch_addr); | ||
437 | scratch_addr += 128; /* write to separate cachelines */ | ||
438 | PIPE_CONTROL_FLUSH(ring, scratch_addr); | ||
439 | scratch_addr += 128; | ||
440 | PIPE_CONTROL_FLUSH(ring, scratch_addr); | ||
441 | scratch_addr += 128; | ||
442 | PIPE_CONTROL_FLUSH(ring, scratch_addr); | ||
443 | scratch_addr += 128; | ||
444 | PIPE_CONTROL_FLUSH(ring, scratch_addr); | ||
445 | scratch_addr += 128; | ||
446 | PIPE_CONTROL_FLUSH(ring, scratch_addr); | ||
447 | intel_ring_emit(ring, GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE | | ||
448 | PIPE_CONTROL_WC_FLUSH | PIPE_CONTROL_TC_FLUSH | | ||
449 | PIPE_CONTROL_NOTIFY); | ||
450 | intel_ring_emit(ring, pc->gtt_offset | PIPE_CONTROL_GLOBAL_GTT); | ||
451 | intel_ring_emit(ring, seqno); | ||
452 | intel_ring_emit(ring, 0); | ||
453 | intel_ring_advance(ring); | ||
454 | |||
455 | *result = seqno; | ||
456 | return 0; | ||
457 | } | ||
263 | 458 | ||
264 | /* | 459 | static int |
265 | * Workaround qword write incoherence by flushing the | 460 | render_ring_add_request(struct intel_ring_buffer *ring, |
266 | * PIPE_NOTIFY buffers out to memory before requesting | 461 | u32 *result) |
267 | * an interrupt. | 462 | { |
268 | */ | 463 | struct drm_device *dev = ring->dev; |
269 | BEGIN_LP_RING(32); | 464 | u32 seqno = i915_gem_get_seqno(dev); |
270 | OUT_RING(GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE | | 465 | int ret; |
271 | PIPE_CONTROL_WC_FLUSH | PIPE_CONTROL_TC_FLUSH); | ||
272 | OUT_RING(dev_priv->seqno_gfx_addr | PIPE_CONTROL_GLOBAL_GTT); | ||
273 | OUT_RING(seqno); | ||
274 | OUT_RING(0); | ||
275 | PIPE_CONTROL_FLUSH(scratch_addr); | ||
276 | scratch_addr += 128; /* write to separate cachelines */ | ||
277 | PIPE_CONTROL_FLUSH(scratch_addr); | ||
278 | scratch_addr += 128; | ||
279 | PIPE_CONTROL_FLUSH(scratch_addr); | ||
280 | scratch_addr += 128; | ||
281 | PIPE_CONTROL_FLUSH(scratch_addr); | ||
282 | scratch_addr += 128; | ||
283 | PIPE_CONTROL_FLUSH(scratch_addr); | ||
284 | scratch_addr += 128; | ||
285 | PIPE_CONTROL_FLUSH(scratch_addr); | ||
286 | OUT_RING(GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE | | ||
287 | PIPE_CONTROL_WC_FLUSH | PIPE_CONTROL_TC_FLUSH | | ||
288 | PIPE_CONTROL_NOTIFY); | ||
289 | OUT_RING(dev_priv->seqno_gfx_addr | PIPE_CONTROL_GLOBAL_GTT); | ||
290 | OUT_RING(seqno); | ||
291 | OUT_RING(0); | ||
292 | ADVANCE_LP_RING(); | ||
293 | } else { | ||
294 | BEGIN_LP_RING(4); | ||
295 | OUT_RING(MI_STORE_DWORD_INDEX); | ||
296 | OUT_RING(I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT); | ||
297 | OUT_RING(seqno); | ||
298 | 466 | ||
299 | OUT_RING(MI_USER_INTERRUPT); | 467 | ret = intel_ring_begin(ring, 4); |
300 | ADVANCE_LP_RING(); | 468 | if (ret) |
301 | } | 469 | return ret; |
302 | return seqno; | 470 | |
471 | intel_ring_emit(ring, MI_STORE_DWORD_INDEX); | ||
472 | intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT); | ||
473 | intel_ring_emit(ring, seqno); | ||
474 | intel_ring_emit(ring, MI_USER_INTERRUPT); | ||
475 | intel_ring_advance(ring); | ||
476 | |||
477 | *result = seqno; | ||
478 | return 0; | ||
303 | } | 479 | } |
304 | 480 | ||
305 | static u32 | 481 | static u32 |
306 | render_ring_get_seqno(struct drm_device *dev, | 482 | ring_get_seqno(struct intel_ring_buffer *ring) |
307 | struct intel_ring_buffer *ring) | ||
308 | { | 483 | { |
309 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | 484 | return intel_read_status_page(ring, I915_GEM_HWS_INDEX); |
310 | if (HAS_PIPE_CONTROL(dev)) | 485 | } |
311 | return ((volatile u32 *)(dev_priv->seqno_page))[0]; | 486 | |
312 | else | 487 | static u32 |
313 | return intel_read_status_page(ring, I915_GEM_HWS_INDEX); | 488 | pc_render_get_seqno(struct intel_ring_buffer *ring) |
489 | { | ||
490 | struct pipe_control *pc = ring->private; | ||
491 | return pc->cpu_page[0]; | ||
492 | } | ||
493 | |||
494 | static void | ||
495 | ironlake_enable_irq(drm_i915_private_t *dev_priv, u32 mask) | ||
496 | { | ||
497 | dev_priv->gt_irq_mask &= ~mask; | ||
498 | I915_WRITE(GTIMR, dev_priv->gt_irq_mask); | ||
499 | POSTING_READ(GTIMR); | ||
314 | } | 500 | } |
315 | 501 | ||
316 | static void | 502 | static void |
317 | render_ring_get_user_irq(struct drm_device *dev, | 503 | ironlake_disable_irq(drm_i915_private_t *dev_priv, u32 mask) |
318 | struct intel_ring_buffer *ring) | 504 | { |
505 | dev_priv->gt_irq_mask |= mask; | ||
506 | I915_WRITE(GTIMR, dev_priv->gt_irq_mask); | ||
507 | POSTING_READ(GTIMR); | ||
508 | } | ||
509 | |||
510 | static void | ||
511 | i915_enable_irq(drm_i915_private_t *dev_priv, u32 mask) | ||
512 | { | ||
513 | dev_priv->irq_mask &= ~mask; | ||
514 | I915_WRITE(IMR, dev_priv->irq_mask); | ||
515 | POSTING_READ(IMR); | ||
516 | } | ||
517 | |||
518 | static void | ||
519 | i915_disable_irq(drm_i915_private_t *dev_priv, u32 mask) | ||
520 | { | ||
521 | dev_priv->irq_mask |= mask; | ||
522 | I915_WRITE(IMR, dev_priv->irq_mask); | ||
523 | POSTING_READ(IMR); | ||
524 | } | ||
525 | |||
526 | static bool | ||
527 | render_ring_get_irq(struct intel_ring_buffer *ring) | ||
319 | { | 528 | { |
320 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | 529 | struct drm_device *dev = ring->dev; |
321 | unsigned long irqflags; | 530 | drm_i915_private_t *dev_priv = dev->dev_private; |
322 | 531 | ||
323 | spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags); | 532 | if (!dev->irq_enabled) |
324 | if (dev->irq_enabled && (++ring->user_irq_refcount == 1)) { | 533 | return false; |
534 | |||
535 | spin_lock(&ring->irq_lock); | ||
536 | if (ring->irq_refcount++ == 0) { | ||
325 | if (HAS_PCH_SPLIT(dev)) | 537 | if (HAS_PCH_SPLIT(dev)) |
326 | ironlake_enable_graphics_irq(dev_priv, GT_PIPE_NOTIFY); | 538 | ironlake_enable_irq(dev_priv, |
539 | GT_PIPE_NOTIFY | GT_USER_INTERRUPT); | ||
327 | else | 540 | else |
328 | i915_enable_irq(dev_priv, I915_USER_INTERRUPT); | 541 | i915_enable_irq(dev_priv, I915_USER_INTERRUPT); |
329 | } | 542 | } |
330 | spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags); | 543 | spin_unlock(&ring->irq_lock); |
544 | |||
545 | return true; | ||
331 | } | 546 | } |
332 | 547 | ||
333 | static void | 548 | static void |
334 | render_ring_put_user_irq(struct drm_device *dev, | 549 | render_ring_put_irq(struct intel_ring_buffer *ring) |
335 | struct intel_ring_buffer *ring) | ||
336 | { | 550 | { |
337 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | 551 | struct drm_device *dev = ring->dev; |
338 | unsigned long irqflags; | 552 | drm_i915_private_t *dev_priv = dev->dev_private; |
339 | 553 | ||
340 | spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags); | 554 | spin_lock(&ring->irq_lock); |
341 | BUG_ON(dev->irq_enabled && ring->user_irq_refcount <= 0); | 555 | if (--ring->irq_refcount == 0) { |
342 | if (dev->irq_enabled && (--ring->user_irq_refcount == 0)) { | ||
343 | if (HAS_PCH_SPLIT(dev)) | 556 | if (HAS_PCH_SPLIT(dev)) |
344 | ironlake_disable_graphics_irq(dev_priv, GT_PIPE_NOTIFY); | 557 | ironlake_disable_irq(dev_priv, |
558 | GT_USER_INTERRUPT | | ||
559 | GT_PIPE_NOTIFY); | ||
345 | else | 560 | else |
346 | i915_disable_irq(dev_priv, I915_USER_INTERRUPT); | 561 | i915_disable_irq(dev_priv, I915_USER_INTERRUPT); |
347 | } | 562 | } |
348 | spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags); | 563 | spin_unlock(&ring->irq_lock); |
349 | } | 564 | } |
350 | 565 | ||
351 | void intel_ring_setup_status_page(struct drm_device *dev, | 566 | void intel_ring_setup_status_page(struct intel_ring_buffer *ring) |
352 | struct intel_ring_buffer *ring) | ||
353 | { | 567 | { |
354 | drm_i915_private_t *dev_priv = dev->dev_private; | 568 | drm_i915_private_t *dev_priv = ring->dev->dev_private; |
355 | if (IS_GEN6(dev)) { | 569 | u32 mmio = IS_GEN6(ring->dev) ? |
356 | I915_WRITE(RING_HWS_PGA_GEN6(ring->mmio_base), | 570 | RING_HWS_PGA_GEN6(ring->mmio_base) : |
357 | ring->status_page.gfx_addr); | 571 | RING_HWS_PGA(ring->mmio_base); |
358 | I915_READ(RING_HWS_PGA_GEN6(ring->mmio_base)); /* posting read */ | 572 | I915_WRITE(mmio, (u32)ring->status_page.gfx_addr); |
359 | } else { | 573 | POSTING_READ(mmio); |
360 | I915_WRITE(RING_HWS_PGA(ring->mmio_base), | ||
361 | ring->status_page.gfx_addr); | ||
362 | I915_READ(RING_HWS_PGA(ring->mmio_base)); /* posting read */ | ||
363 | } | ||
364 | |||
365 | } | 574 | } |
366 | 575 | ||
367 | static void | 576 | static int |
368 | bsd_ring_flush(struct drm_device *dev, | 577 | bsd_ring_flush(struct intel_ring_buffer *ring, |
369 | struct intel_ring_buffer *ring, | 578 | u32 invalidate_domains, |
370 | u32 invalidate_domains, | 579 | u32 flush_domains) |
371 | u32 flush_domains) | ||
372 | { | 580 | { |
373 | intel_ring_begin(dev, ring, 2); | 581 | int ret; |
374 | intel_ring_emit(dev, ring, MI_FLUSH); | ||
375 | intel_ring_emit(dev, ring, MI_NOOP); | ||
376 | intel_ring_advance(dev, ring); | ||
377 | } | ||
378 | 582 | ||
379 | static int init_bsd_ring(struct drm_device *dev, | 583 | if ((flush_domains & I915_GEM_DOMAIN_RENDER) == 0) |
380 | struct intel_ring_buffer *ring) | 584 | return 0; |
381 | { | 585 | |
382 | return init_ring_common(dev, ring); | 586 | ret = intel_ring_begin(ring, 2); |
587 | if (ret) | ||
588 | return ret; | ||
589 | |||
590 | intel_ring_emit(ring, MI_FLUSH); | ||
591 | intel_ring_emit(ring, MI_NOOP); | ||
592 | intel_ring_advance(ring); | ||
593 | return 0; | ||
383 | } | 594 | } |
384 | 595 | ||
385 | static u32 | 596 | static int |
386 | ring_add_request(struct drm_device *dev, | 597 | ring_add_request(struct intel_ring_buffer *ring, |
387 | struct intel_ring_buffer *ring, | 598 | u32 *result) |
388 | u32 flush_domains) | ||
389 | { | 599 | { |
390 | u32 seqno; | 600 | u32 seqno; |
601 | int ret; | ||
391 | 602 | ||
392 | seqno = i915_gem_get_seqno(dev); | 603 | ret = intel_ring_begin(ring, 4); |
604 | if (ret) | ||
605 | return ret; | ||
393 | 606 | ||
394 | intel_ring_begin(dev, ring, 4); | 607 | seqno = i915_gem_get_seqno(ring->dev); |
395 | intel_ring_emit(dev, ring, MI_STORE_DWORD_INDEX); | 608 | |
396 | intel_ring_emit(dev, ring, | 609 | intel_ring_emit(ring, MI_STORE_DWORD_INDEX); |
397 | I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT); | 610 | intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT); |
398 | intel_ring_emit(dev, ring, seqno); | 611 | intel_ring_emit(ring, seqno); |
399 | intel_ring_emit(dev, ring, MI_USER_INTERRUPT); | 612 | intel_ring_emit(ring, MI_USER_INTERRUPT); |
400 | intel_ring_advance(dev, ring); | 613 | intel_ring_advance(ring); |
401 | 614 | ||
402 | DRM_DEBUG_DRIVER("%s %d\n", ring->name, seqno); | 615 | DRM_DEBUG_DRIVER("%s %d\n", ring->name, seqno); |
616 | *result = seqno; | ||
617 | return 0; | ||
618 | } | ||
403 | 619 | ||
404 | return seqno; | 620 | static bool |
621 | ring_get_irq(struct intel_ring_buffer *ring, u32 flag) | ||
622 | { | ||
623 | struct drm_device *dev = ring->dev; | ||
624 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
625 | |||
626 | if (!dev->irq_enabled) | ||
627 | return false; | ||
628 | |||
629 | spin_lock(&ring->irq_lock); | ||
630 | if (ring->irq_refcount++ == 0) | ||
631 | ironlake_enable_irq(dev_priv, flag); | ||
632 | spin_unlock(&ring->irq_lock); | ||
633 | |||
634 | return true; | ||
405 | } | 635 | } |
406 | 636 | ||
407 | static void | 637 | static void |
408 | bsd_ring_get_user_irq(struct drm_device *dev, | 638 | ring_put_irq(struct intel_ring_buffer *ring, u32 flag) |
409 | struct intel_ring_buffer *ring) | ||
410 | { | 639 | { |
411 | /* do nothing */ | 640 | struct drm_device *dev = ring->dev; |
641 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
642 | |||
643 | spin_lock(&ring->irq_lock); | ||
644 | if (--ring->irq_refcount == 0) | ||
645 | ironlake_disable_irq(dev_priv, flag); | ||
646 | spin_unlock(&ring->irq_lock); | ||
412 | } | 647 | } |
648 | |||
649 | static bool | ||
650 | gen6_ring_get_irq(struct intel_ring_buffer *ring, u32 gflag, u32 rflag) | ||
651 | { | ||
652 | struct drm_device *dev = ring->dev; | ||
653 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
654 | |||
655 | if (!dev->irq_enabled) | ||
656 | return false; | ||
657 | |||
658 | spin_lock(&ring->irq_lock); | ||
659 | if (ring->irq_refcount++ == 0) { | ||
660 | ring->irq_mask &= ~rflag; | ||
661 | I915_WRITE_IMR(ring, ring->irq_mask); | ||
662 | ironlake_enable_irq(dev_priv, gflag); | ||
663 | } | ||
664 | spin_unlock(&ring->irq_lock); | ||
665 | |||
666 | return true; | ||
667 | } | ||
668 | |||
413 | static void | 669 | static void |
414 | bsd_ring_put_user_irq(struct drm_device *dev, | 670 | gen6_ring_put_irq(struct intel_ring_buffer *ring, u32 gflag, u32 rflag) |
415 | struct intel_ring_buffer *ring) | ||
416 | { | 671 | { |
417 | /* do nothing */ | 672 | struct drm_device *dev = ring->dev; |
673 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
674 | |||
675 | spin_lock(&ring->irq_lock); | ||
676 | if (--ring->irq_refcount == 0) { | ||
677 | ring->irq_mask |= rflag; | ||
678 | I915_WRITE_IMR(ring, ring->irq_mask); | ||
679 | ironlake_disable_irq(dev_priv, gflag); | ||
680 | } | ||
681 | spin_unlock(&ring->irq_lock); | ||
418 | } | 682 | } |
419 | 683 | ||
420 | static u32 | 684 | static bool |
421 | ring_status_page_get_seqno(struct drm_device *dev, | 685 | bsd_ring_get_irq(struct intel_ring_buffer *ring) |
422 | struct intel_ring_buffer *ring) | ||
423 | { | 686 | { |
424 | return intel_read_status_page(ring, I915_GEM_HWS_INDEX); | 687 | return ring_get_irq(ring, GT_BSD_USER_INTERRUPT); |
688 | } | ||
689 | static void | ||
690 | bsd_ring_put_irq(struct intel_ring_buffer *ring) | ||
691 | { | ||
692 | ring_put_irq(ring, GT_BSD_USER_INTERRUPT); | ||
425 | } | 693 | } |
426 | 694 | ||
427 | static int | 695 | static int |
428 | ring_dispatch_gem_execbuffer(struct drm_device *dev, | 696 | ring_dispatch_execbuffer(struct intel_ring_buffer *ring, u32 offset, u32 length) |
429 | struct intel_ring_buffer *ring, | ||
430 | struct drm_i915_gem_execbuffer2 *exec, | ||
431 | struct drm_clip_rect *cliprects, | ||
432 | uint64_t exec_offset) | ||
433 | { | 697 | { |
434 | uint32_t exec_start; | 698 | int ret; |
435 | exec_start = (uint32_t) exec_offset + exec->batch_start_offset; | 699 | |
436 | intel_ring_begin(dev, ring, 2); | 700 | ret = intel_ring_begin(ring, 2); |
437 | intel_ring_emit(dev, ring, MI_BATCH_BUFFER_START | | 701 | if (ret) |
438 | (2 << 6) | MI_BATCH_NON_SECURE_I965); | 702 | return ret; |
439 | intel_ring_emit(dev, ring, exec_start); | 703 | |
440 | intel_ring_advance(dev, ring); | 704 | intel_ring_emit(ring, |
705 | MI_BATCH_BUFFER_START | (2 << 6) | | ||
706 | MI_BATCH_NON_SECURE_I965); | ||
707 | intel_ring_emit(ring, offset); | ||
708 | intel_ring_advance(ring); | ||
709 | |||
441 | return 0; | 710 | return 0; |
442 | } | 711 | } |
443 | 712 | ||
444 | static int | 713 | static int |
445 | render_ring_dispatch_gem_execbuffer(struct drm_device *dev, | 714 | render_ring_dispatch_execbuffer(struct intel_ring_buffer *ring, |
446 | struct intel_ring_buffer *ring, | 715 | u32 offset, u32 len) |
447 | struct drm_i915_gem_execbuffer2 *exec, | ||
448 | struct drm_clip_rect *cliprects, | ||
449 | uint64_t exec_offset) | ||
450 | { | 716 | { |
717 | struct drm_device *dev = ring->dev; | ||
451 | drm_i915_private_t *dev_priv = dev->dev_private; | 718 | drm_i915_private_t *dev_priv = dev->dev_private; |
452 | int nbox = exec->num_cliprects; | 719 | int ret; |
453 | int i = 0, count; | ||
454 | uint32_t exec_start, exec_len; | ||
455 | exec_start = (uint32_t) exec_offset + exec->batch_start_offset; | ||
456 | exec_len = (uint32_t) exec->batch_len; | ||
457 | 720 | ||
458 | trace_i915_gem_request_submit(dev, dev_priv->next_seqno + 1); | 721 | trace_i915_gem_request_submit(dev, dev_priv->next_seqno + 1); |
459 | 722 | ||
460 | count = nbox ? nbox : 1; | 723 | if (IS_I830(dev) || IS_845G(dev)) { |
724 | ret = intel_ring_begin(ring, 4); | ||
725 | if (ret) | ||
726 | return ret; | ||
461 | 727 | ||
462 | for (i = 0; i < count; i++) { | 728 | intel_ring_emit(ring, MI_BATCH_BUFFER); |
463 | if (i < nbox) { | 729 | intel_ring_emit(ring, offset | MI_BATCH_NON_SECURE); |
464 | int ret = i915_emit_box(dev, cliprects, i, | 730 | intel_ring_emit(ring, offset + len - 8); |
465 | exec->DR1, exec->DR4); | 731 | intel_ring_emit(ring, 0); |
466 | if (ret) | 732 | } else { |
467 | return ret; | 733 | ret = intel_ring_begin(ring, 2); |
468 | } | 734 | if (ret) |
735 | return ret; | ||
469 | 736 | ||
470 | if (IS_I830(dev) || IS_845G(dev)) { | 737 | if (INTEL_INFO(dev)->gen >= 4) { |
471 | intel_ring_begin(dev, ring, 4); | 738 | intel_ring_emit(ring, |
472 | intel_ring_emit(dev, ring, MI_BATCH_BUFFER); | 739 | MI_BATCH_BUFFER_START | (2 << 6) | |
473 | intel_ring_emit(dev, ring, | 740 | MI_BATCH_NON_SECURE_I965); |
474 | exec_start | MI_BATCH_NON_SECURE); | 741 | intel_ring_emit(ring, offset); |
475 | intel_ring_emit(dev, ring, exec_start + exec_len - 4); | ||
476 | intel_ring_emit(dev, ring, 0); | ||
477 | } else { | 742 | } else { |
478 | intel_ring_begin(dev, ring, 2); | 743 | intel_ring_emit(ring, |
479 | if (INTEL_INFO(dev)->gen >= 4) { | 744 | MI_BATCH_BUFFER_START | (2 << 6)); |
480 | intel_ring_emit(dev, ring, | 745 | intel_ring_emit(ring, offset | MI_BATCH_NON_SECURE); |
481 | MI_BATCH_BUFFER_START | (2 << 6) | ||
482 | | MI_BATCH_NON_SECURE_I965); | ||
483 | intel_ring_emit(dev, ring, exec_start); | ||
484 | } else { | ||
485 | intel_ring_emit(dev, ring, MI_BATCH_BUFFER_START | ||
486 | | (2 << 6)); | ||
487 | intel_ring_emit(dev, ring, exec_start | | ||
488 | MI_BATCH_NON_SECURE); | ||
489 | } | ||
490 | } | 746 | } |
491 | intel_ring_advance(dev, ring); | ||
492 | } | ||
493 | |||
494 | if (IS_G4X(dev) || IS_GEN5(dev)) { | ||
495 | intel_ring_begin(dev, ring, 2); | ||
496 | intel_ring_emit(dev, ring, MI_FLUSH | | ||
497 | MI_NO_WRITE_FLUSH | | ||
498 | MI_INVALIDATE_ISP ); | ||
499 | intel_ring_emit(dev, ring, MI_NOOP); | ||
500 | intel_ring_advance(dev, ring); | ||
501 | } | 747 | } |
502 | /* XXX breadcrumb */ | 748 | intel_ring_advance(ring); |
503 | 749 | ||
504 | return 0; | 750 | return 0; |
505 | } | 751 | } |
506 | 752 | ||
507 | static void cleanup_status_page(struct drm_device *dev, | 753 | static void cleanup_status_page(struct intel_ring_buffer *ring) |
508 | struct intel_ring_buffer *ring) | ||
509 | { | 754 | { |
510 | drm_i915_private_t *dev_priv = dev->dev_private; | 755 | drm_i915_private_t *dev_priv = ring->dev->dev_private; |
511 | struct drm_gem_object *obj; | 756 | struct drm_i915_gem_object *obj; |
512 | struct drm_i915_gem_object *obj_priv; | ||
513 | 757 | ||
514 | obj = ring->status_page.obj; | 758 | obj = ring->status_page.obj; |
515 | if (obj == NULL) | 759 | if (obj == NULL) |
516 | return; | 760 | return; |
517 | obj_priv = to_intel_bo(obj); | ||
518 | 761 | ||
519 | kunmap(obj_priv->pages[0]); | 762 | kunmap(obj->pages[0]); |
520 | i915_gem_object_unpin(obj); | 763 | i915_gem_object_unpin(obj); |
521 | drm_gem_object_unreference(obj); | 764 | drm_gem_object_unreference(&obj->base); |
522 | ring->status_page.obj = NULL; | 765 | ring->status_page.obj = NULL; |
523 | 766 | ||
524 | memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map)); | 767 | memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map)); |
525 | } | 768 | } |
526 | 769 | ||
527 | static int init_status_page(struct drm_device *dev, | 770 | static int init_status_page(struct intel_ring_buffer *ring) |
528 | struct intel_ring_buffer *ring) | ||
529 | { | 771 | { |
772 | struct drm_device *dev = ring->dev; | ||
530 | drm_i915_private_t *dev_priv = dev->dev_private; | 773 | drm_i915_private_t *dev_priv = dev->dev_private; |
531 | struct drm_gem_object *obj; | 774 | struct drm_i915_gem_object *obj; |
532 | struct drm_i915_gem_object *obj_priv; | ||
533 | int ret; | 775 | int ret; |
534 | 776 | ||
535 | obj = i915_gem_alloc_object(dev, 4096); | 777 | obj = i915_gem_alloc_object(dev, 4096); |
@@ -538,16 +780,15 @@ static int init_status_page(struct drm_device *dev, | |||
538 | ret = -ENOMEM; | 780 | ret = -ENOMEM; |
539 | goto err; | 781 | goto err; |
540 | } | 782 | } |
541 | obj_priv = to_intel_bo(obj); | 783 | obj->agp_type = AGP_USER_CACHED_MEMORY; |
542 | obj_priv->agp_type = AGP_USER_CACHED_MEMORY; | ||
543 | 784 | ||
544 | ret = i915_gem_object_pin(obj, 4096); | 785 | ret = i915_gem_object_pin(obj, 4096, true); |
545 | if (ret != 0) { | 786 | if (ret != 0) { |
546 | goto err_unref; | 787 | goto err_unref; |
547 | } | 788 | } |
548 | 789 | ||
549 | ring->status_page.gfx_addr = obj_priv->gtt_offset; | 790 | ring->status_page.gfx_addr = obj->gtt_offset; |
550 | ring->status_page.page_addr = kmap(obj_priv->pages[0]); | 791 | ring->status_page.page_addr = kmap(obj->pages[0]); |
551 | if (ring->status_page.page_addr == NULL) { | 792 | if (ring->status_page.page_addr == NULL) { |
552 | memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map)); | 793 | memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map)); |
553 | goto err_unpin; | 794 | goto err_unpin; |
@@ -555,7 +796,7 @@ static int init_status_page(struct drm_device *dev, | |||
555 | ring->status_page.obj = obj; | 796 | ring->status_page.obj = obj; |
556 | memset(ring->status_page.page_addr, 0, PAGE_SIZE); | 797 | memset(ring->status_page.page_addr, 0, PAGE_SIZE); |
557 | 798 | ||
558 | intel_ring_setup_status_page(dev, ring); | 799 | intel_ring_setup_status_page(ring); |
559 | DRM_DEBUG_DRIVER("%s hws offset: 0x%08x\n", | 800 | DRM_DEBUG_DRIVER("%s hws offset: 0x%08x\n", |
560 | ring->name, ring->status_page.gfx_addr); | 801 | ring->name, ring->status_page.gfx_addr); |
561 | 802 | ||
@@ -564,7 +805,7 @@ static int init_status_page(struct drm_device *dev, | |||
564 | err_unpin: | 805 | err_unpin: |
565 | i915_gem_object_unpin(obj); | 806 | i915_gem_object_unpin(obj); |
566 | err_unref: | 807 | err_unref: |
567 | drm_gem_object_unreference(obj); | 808 | drm_gem_object_unreference(&obj->base); |
568 | err: | 809 | err: |
569 | return ret; | 810 | return ret; |
570 | } | 811 | } |
@@ -572,9 +813,7 @@ err: | |||
572 | int intel_init_ring_buffer(struct drm_device *dev, | 813 | int intel_init_ring_buffer(struct drm_device *dev, |
573 | struct intel_ring_buffer *ring) | 814 | struct intel_ring_buffer *ring) |
574 | { | 815 | { |
575 | struct drm_i915_private *dev_priv = dev->dev_private; | 816 | struct drm_i915_gem_object *obj; |
576 | struct drm_i915_gem_object *obj_priv; | ||
577 | struct drm_gem_object *obj; | ||
578 | int ret; | 817 | int ret; |
579 | 818 | ||
580 | ring->dev = dev; | 819 | ring->dev = dev; |
@@ -582,8 +821,11 @@ int intel_init_ring_buffer(struct drm_device *dev, | |||
582 | INIT_LIST_HEAD(&ring->request_list); | 821 | INIT_LIST_HEAD(&ring->request_list); |
583 | INIT_LIST_HEAD(&ring->gpu_write_list); | 822 | INIT_LIST_HEAD(&ring->gpu_write_list); |
584 | 823 | ||
824 | spin_lock_init(&ring->irq_lock); | ||
825 | ring->irq_mask = ~0; | ||
826 | |||
585 | if (I915_NEED_GFX_HWS(dev)) { | 827 | if (I915_NEED_GFX_HWS(dev)) { |
586 | ret = init_status_page(dev, ring); | 828 | ret = init_status_page(ring); |
587 | if (ret) | 829 | if (ret) |
588 | return ret; | 830 | return ret; |
589 | } | 831 | } |
@@ -595,15 +837,14 @@ int intel_init_ring_buffer(struct drm_device *dev, | |||
595 | goto err_hws; | 837 | goto err_hws; |
596 | } | 838 | } |
597 | 839 | ||
598 | ring->gem_object = obj; | 840 | ring->obj = obj; |
599 | 841 | ||
600 | ret = i915_gem_object_pin(obj, PAGE_SIZE); | 842 | ret = i915_gem_object_pin(obj, PAGE_SIZE, true); |
601 | if (ret) | 843 | if (ret) |
602 | goto err_unref; | 844 | goto err_unref; |
603 | 845 | ||
604 | obj_priv = to_intel_bo(obj); | ||
605 | ring->map.size = ring->size; | 846 | ring->map.size = ring->size; |
606 | ring->map.offset = dev->agp->base + obj_priv->gtt_offset; | 847 | ring->map.offset = dev->agp->base + obj->gtt_offset; |
607 | ring->map.type = 0; | 848 | ring->map.type = 0; |
608 | ring->map.flags = 0; | 849 | ring->map.flags = 0; |
609 | ring->map.mtrr = 0; | 850 | ring->map.mtrr = 0; |
@@ -616,60 +857,64 @@ int intel_init_ring_buffer(struct drm_device *dev, | |||
616 | } | 857 | } |
617 | 858 | ||
618 | ring->virtual_start = ring->map.handle; | 859 | ring->virtual_start = ring->map.handle; |
619 | ret = ring->init(dev, ring); | 860 | ret = ring->init(ring); |
620 | if (ret) | 861 | if (ret) |
621 | goto err_unmap; | 862 | goto err_unmap; |
622 | 863 | ||
623 | if (!drm_core_check_feature(dev, DRIVER_MODESET)) | 864 | /* Workaround an erratum on the i830 which causes a hang if |
624 | i915_kernel_lost_context(dev); | 865 | * the TAIL pointer points to within the last 2 cachelines |
625 | else { | 866 | * of the buffer. |
626 | ring->head = I915_READ_HEAD(ring) & HEAD_ADDR; | 867 | */ |
627 | ring->tail = I915_READ_TAIL(ring) & TAIL_ADDR; | 868 | ring->effective_size = ring->size; |
628 | ring->space = ring->head - (ring->tail + 8); | 869 | if (IS_I830(ring->dev)) |
629 | if (ring->space < 0) | 870 | ring->effective_size -= 128; |
630 | ring->space += ring->size; | 871 | |
631 | } | 872 | return 0; |
632 | return ret; | ||
633 | 873 | ||
634 | err_unmap: | 874 | err_unmap: |
635 | drm_core_ioremapfree(&ring->map, dev); | 875 | drm_core_ioremapfree(&ring->map, dev); |
636 | err_unpin: | 876 | err_unpin: |
637 | i915_gem_object_unpin(obj); | 877 | i915_gem_object_unpin(obj); |
638 | err_unref: | 878 | err_unref: |
639 | drm_gem_object_unreference(obj); | 879 | drm_gem_object_unreference(&obj->base); |
640 | ring->gem_object = NULL; | 880 | ring->obj = NULL; |
641 | err_hws: | 881 | err_hws: |
642 | cleanup_status_page(dev, ring); | 882 | cleanup_status_page(ring); |
643 | return ret; | 883 | return ret; |
644 | } | 884 | } |
645 | 885 | ||
646 | void intel_cleanup_ring_buffer(struct drm_device *dev, | 886 | void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring) |
647 | struct intel_ring_buffer *ring) | ||
648 | { | 887 | { |
649 | if (ring->gem_object == NULL) | 888 | struct drm_i915_private *dev_priv; |
889 | int ret; | ||
890 | |||
891 | if (ring->obj == NULL) | ||
650 | return; | 892 | return; |
651 | 893 | ||
652 | drm_core_ioremapfree(&ring->map, dev); | 894 | /* Disable the ring buffer. The ring must be idle at this point */ |
895 | dev_priv = ring->dev->dev_private; | ||
896 | ret = intel_wait_ring_buffer(ring, ring->size - 8); | ||
897 | I915_WRITE_CTL(ring, 0); | ||
898 | |||
899 | drm_core_ioremapfree(&ring->map, ring->dev); | ||
653 | 900 | ||
654 | i915_gem_object_unpin(ring->gem_object); | 901 | i915_gem_object_unpin(ring->obj); |
655 | drm_gem_object_unreference(ring->gem_object); | 902 | drm_gem_object_unreference(&ring->obj->base); |
656 | ring->gem_object = NULL; | 903 | ring->obj = NULL; |
657 | 904 | ||
658 | if (ring->cleanup) | 905 | if (ring->cleanup) |
659 | ring->cleanup(ring); | 906 | ring->cleanup(ring); |
660 | 907 | ||
661 | cleanup_status_page(dev, ring); | 908 | cleanup_status_page(ring); |
662 | } | 909 | } |
663 | 910 | ||
664 | static int intel_wrap_ring_buffer(struct drm_device *dev, | 911 | static int intel_wrap_ring_buffer(struct intel_ring_buffer *ring) |
665 | struct intel_ring_buffer *ring) | ||
666 | { | 912 | { |
667 | unsigned int *virt; | 913 | unsigned int *virt; |
668 | int rem; | 914 | int rem = ring->size - ring->tail; |
669 | rem = ring->size - ring->tail; | ||
670 | 915 | ||
671 | if (ring->space < rem) { | 916 | if (ring->space < rem) { |
672 | int ret = intel_wait_ring_buffer(dev, ring, rem); | 917 | int ret = intel_wait_ring_buffer(ring, rem); |
673 | if (ret) | 918 | if (ret) |
674 | return ret; | 919 | return ret; |
675 | } | 920 | } |
@@ -682,24 +927,25 @@ static int intel_wrap_ring_buffer(struct drm_device *dev, | |||
682 | } | 927 | } |
683 | 928 | ||
684 | ring->tail = 0; | 929 | ring->tail = 0; |
685 | ring->space = ring->head - 8; | 930 | ring->space = ring_space(ring); |
686 | 931 | ||
687 | return 0; | 932 | return 0; |
688 | } | 933 | } |
689 | 934 | ||
690 | int intel_wait_ring_buffer(struct drm_device *dev, | 935 | int intel_wait_ring_buffer(struct intel_ring_buffer *ring, int n) |
691 | struct intel_ring_buffer *ring, int n) | ||
692 | { | 936 | { |
937 | struct drm_device *dev = ring->dev; | ||
938 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
693 | unsigned long end; | 939 | unsigned long end; |
694 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
695 | u32 head; | 940 | u32 head; |
696 | 941 | ||
942 | /* If the reported head position has wrapped or hasn't advanced, | ||
943 | * fallback to the slow and accurate path. | ||
944 | */ | ||
697 | head = intel_read_status_page(ring, 4); | 945 | head = intel_read_status_page(ring, 4); |
698 | if (head) { | 946 | if (head > ring->head) { |
699 | ring->head = head & HEAD_ADDR; | 947 | ring->head = head; |
700 | ring->space = ring->head - (ring->tail + 8); | 948 | ring->space = ring_space(ring); |
701 | if (ring->space < 0) | ||
702 | ring->space += ring->size; | ||
703 | if (ring->space >= n) | 949 | if (ring->space >= n) |
704 | return 0; | 950 | return 0; |
705 | } | 951 | } |
@@ -707,12 +953,10 @@ int intel_wait_ring_buffer(struct drm_device *dev, | |||
707 | trace_i915_ring_wait_begin (dev); | 953 | trace_i915_ring_wait_begin (dev); |
708 | end = jiffies + 3 * HZ; | 954 | end = jiffies + 3 * HZ; |
709 | do { | 955 | do { |
710 | ring->head = I915_READ_HEAD(ring) & HEAD_ADDR; | 956 | ring->head = I915_READ_HEAD(ring); |
711 | ring->space = ring->head - (ring->tail + 8); | 957 | ring->space = ring_space(ring); |
712 | if (ring->space < 0) | ||
713 | ring->space += ring->size; | ||
714 | if (ring->space >= n) { | 958 | if (ring->space >= n) { |
715 | trace_i915_ring_wait_end (dev); | 959 | trace_i915_ring_wait_end(dev); |
716 | return 0; | 960 | return 0; |
717 | } | 961 | } |
718 | 962 | ||
@@ -723,29 +967,39 @@ int intel_wait_ring_buffer(struct drm_device *dev, | |||
723 | } | 967 | } |
724 | 968 | ||
725 | msleep(1); | 969 | msleep(1); |
970 | if (atomic_read(&dev_priv->mm.wedged)) | ||
971 | return -EAGAIN; | ||
726 | } while (!time_after(jiffies, end)); | 972 | } while (!time_after(jiffies, end)); |
727 | trace_i915_ring_wait_end (dev); | 973 | trace_i915_ring_wait_end (dev); |
728 | return -EBUSY; | 974 | return -EBUSY; |
729 | } | 975 | } |
730 | 976 | ||
731 | void intel_ring_begin(struct drm_device *dev, | 977 | int intel_ring_begin(struct intel_ring_buffer *ring, |
732 | struct intel_ring_buffer *ring, | 978 | int num_dwords) |
733 | int num_dwords) | ||
734 | { | 979 | { |
735 | int n = 4*num_dwords; | 980 | int n = 4*num_dwords; |
736 | if (unlikely(ring->tail + n > ring->size)) | 981 | int ret; |
737 | intel_wrap_ring_buffer(dev, ring); | 982 | |
738 | if (unlikely(ring->space < n)) | 983 | if (unlikely(ring->tail + n > ring->effective_size)) { |
739 | intel_wait_ring_buffer(dev, ring, n); | 984 | ret = intel_wrap_ring_buffer(ring); |
985 | if (unlikely(ret)) | ||
986 | return ret; | ||
987 | } | ||
988 | |||
989 | if (unlikely(ring->space < n)) { | ||
990 | ret = intel_wait_ring_buffer(ring, n); | ||
991 | if (unlikely(ret)) | ||
992 | return ret; | ||
993 | } | ||
740 | 994 | ||
741 | ring->space -= n; | 995 | ring->space -= n; |
996 | return 0; | ||
742 | } | 997 | } |
743 | 998 | ||
744 | void intel_ring_advance(struct drm_device *dev, | 999 | void intel_ring_advance(struct intel_ring_buffer *ring) |
745 | struct intel_ring_buffer *ring) | ||
746 | { | 1000 | { |
747 | ring->tail &= ring->size - 1; | 1001 | ring->tail &= ring->size - 1; |
748 | ring->write_tail(dev, ring, ring->tail); | 1002 | ring->write_tail(ring, ring->tail); |
749 | } | 1003 | } |
750 | 1004 | ||
751 | static const struct intel_ring_buffer render_ring = { | 1005 | static const struct intel_ring_buffer render_ring = { |
@@ -757,10 +1011,11 @@ static const struct intel_ring_buffer render_ring = { | |||
757 | .write_tail = ring_write_tail, | 1011 | .write_tail = ring_write_tail, |
758 | .flush = render_ring_flush, | 1012 | .flush = render_ring_flush, |
759 | .add_request = render_ring_add_request, | 1013 | .add_request = render_ring_add_request, |
760 | .get_seqno = render_ring_get_seqno, | 1014 | .get_seqno = ring_get_seqno, |
761 | .user_irq_get = render_ring_get_user_irq, | 1015 | .irq_get = render_ring_get_irq, |
762 | .user_irq_put = render_ring_put_user_irq, | 1016 | .irq_put = render_ring_put_irq, |
763 | .dispatch_gem_execbuffer = render_ring_dispatch_gem_execbuffer, | 1017 | .dispatch_execbuffer = render_ring_dispatch_execbuffer, |
1018 | .cleanup = render_ring_cleanup, | ||
764 | }; | 1019 | }; |
765 | 1020 | ||
766 | /* ring buffer for bit-stream decoder */ | 1021 | /* ring buffer for bit-stream decoder */ |
@@ -770,22 +1025,21 @@ static const struct intel_ring_buffer bsd_ring = { | |||
770 | .id = RING_BSD, | 1025 | .id = RING_BSD, |
771 | .mmio_base = BSD_RING_BASE, | 1026 | .mmio_base = BSD_RING_BASE, |
772 | .size = 32 * PAGE_SIZE, | 1027 | .size = 32 * PAGE_SIZE, |
773 | .init = init_bsd_ring, | 1028 | .init = init_ring_common, |
774 | .write_tail = ring_write_tail, | 1029 | .write_tail = ring_write_tail, |
775 | .flush = bsd_ring_flush, | 1030 | .flush = bsd_ring_flush, |
776 | .add_request = ring_add_request, | 1031 | .add_request = ring_add_request, |
777 | .get_seqno = ring_status_page_get_seqno, | 1032 | .get_seqno = ring_get_seqno, |
778 | .user_irq_get = bsd_ring_get_user_irq, | 1033 | .irq_get = bsd_ring_get_irq, |
779 | .user_irq_put = bsd_ring_put_user_irq, | 1034 | .irq_put = bsd_ring_put_irq, |
780 | .dispatch_gem_execbuffer = ring_dispatch_gem_execbuffer, | 1035 | .dispatch_execbuffer = ring_dispatch_execbuffer, |
781 | }; | 1036 | }; |
782 | 1037 | ||
783 | 1038 | ||
784 | static void gen6_bsd_ring_write_tail(struct drm_device *dev, | 1039 | static void gen6_bsd_ring_write_tail(struct intel_ring_buffer *ring, |
785 | struct intel_ring_buffer *ring, | ||
786 | u32 value) | 1040 | u32 value) |
787 | { | 1041 | { |
788 | drm_i915_private_t *dev_priv = dev->dev_private; | 1042 | drm_i915_private_t *dev_priv = ring->dev->dev_private; |
789 | 1043 | ||
790 | /* Every tail move must follow the sequence below */ | 1044 | /* Every tail move must follow the sequence below */ |
791 | I915_WRITE(GEN6_BSD_SLEEP_PSMI_CONTROL, | 1045 | I915_WRITE(GEN6_BSD_SLEEP_PSMI_CONTROL, |
@@ -804,69 +1058,109 @@ static void gen6_bsd_ring_write_tail(struct drm_device *dev, | |||
804 | GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_ENABLE); | 1058 | GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_ENABLE); |
805 | } | 1059 | } |
806 | 1060 | ||
807 | static void gen6_ring_flush(struct drm_device *dev, | 1061 | static int gen6_ring_flush(struct intel_ring_buffer *ring, |
808 | struct intel_ring_buffer *ring, | 1062 | u32 invalidate_domains, |
809 | u32 invalidate_domains, | 1063 | u32 flush_domains) |
810 | u32 flush_domains) | ||
811 | { | 1064 | { |
812 | intel_ring_begin(dev, ring, 4); | 1065 | int ret; |
813 | intel_ring_emit(dev, ring, MI_FLUSH_DW); | 1066 | |
814 | intel_ring_emit(dev, ring, 0); | 1067 | if ((flush_domains & I915_GEM_DOMAIN_RENDER) == 0) |
815 | intel_ring_emit(dev, ring, 0); | 1068 | return 0; |
816 | intel_ring_emit(dev, ring, 0); | 1069 | |
817 | intel_ring_advance(dev, ring); | 1070 | ret = intel_ring_begin(ring, 4); |
1071 | if (ret) | ||
1072 | return ret; | ||
1073 | |||
1074 | intel_ring_emit(ring, MI_FLUSH_DW); | ||
1075 | intel_ring_emit(ring, 0); | ||
1076 | intel_ring_emit(ring, 0); | ||
1077 | intel_ring_emit(ring, 0); | ||
1078 | intel_ring_advance(ring); | ||
1079 | return 0; | ||
818 | } | 1080 | } |
819 | 1081 | ||
820 | static int | 1082 | static int |
821 | gen6_ring_dispatch_gem_execbuffer(struct drm_device *dev, | 1083 | gen6_ring_dispatch_execbuffer(struct intel_ring_buffer *ring, |
822 | struct intel_ring_buffer *ring, | 1084 | u32 offset, u32 len) |
823 | struct drm_i915_gem_execbuffer2 *exec, | ||
824 | struct drm_clip_rect *cliprects, | ||
825 | uint64_t exec_offset) | ||
826 | { | 1085 | { |
827 | uint32_t exec_start; | 1086 | int ret; |
828 | 1087 | ||
829 | exec_start = (uint32_t) exec_offset + exec->batch_start_offset; | 1088 | ret = intel_ring_begin(ring, 2); |
1089 | if (ret) | ||
1090 | return ret; | ||
830 | 1091 | ||
831 | intel_ring_begin(dev, ring, 2); | 1092 | intel_ring_emit(ring, MI_BATCH_BUFFER_START | MI_BATCH_NON_SECURE_I965); |
832 | intel_ring_emit(dev, ring, | ||
833 | MI_BATCH_BUFFER_START | MI_BATCH_NON_SECURE_I965); | ||
834 | /* bit0-7 is the length on GEN6+ */ | 1093 | /* bit0-7 is the length on GEN6+ */ |
835 | intel_ring_emit(dev, ring, exec_start); | 1094 | intel_ring_emit(ring, offset); |
836 | intel_ring_advance(dev, ring); | 1095 | intel_ring_advance(ring); |
837 | 1096 | ||
838 | return 0; | 1097 | return 0; |
839 | } | 1098 | } |
840 | 1099 | ||
1100 | static bool | ||
1101 | gen6_render_ring_get_irq(struct intel_ring_buffer *ring) | ||
1102 | { | ||
1103 | return gen6_ring_get_irq(ring, | ||
1104 | GT_USER_INTERRUPT, | ||
1105 | GEN6_RENDER_USER_INTERRUPT); | ||
1106 | } | ||
1107 | |||
1108 | static void | ||
1109 | gen6_render_ring_put_irq(struct intel_ring_buffer *ring) | ||
1110 | { | ||
1111 | return gen6_ring_put_irq(ring, | ||
1112 | GT_USER_INTERRUPT, | ||
1113 | GEN6_RENDER_USER_INTERRUPT); | ||
1114 | } | ||
1115 | |||
1116 | static bool | ||
1117 | gen6_bsd_ring_get_irq(struct intel_ring_buffer *ring) | ||
1118 | { | ||
1119 | return gen6_ring_get_irq(ring, | ||
1120 | GT_GEN6_BSD_USER_INTERRUPT, | ||
1121 | GEN6_BSD_USER_INTERRUPT); | ||
1122 | } | ||
1123 | |||
1124 | static void | ||
1125 | gen6_bsd_ring_put_irq(struct intel_ring_buffer *ring) | ||
1126 | { | ||
1127 | return gen6_ring_put_irq(ring, | ||
1128 | GT_GEN6_BSD_USER_INTERRUPT, | ||
1129 | GEN6_BSD_USER_INTERRUPT); | ||
1130 | } | ||
1131 | |||
841 | /* ring buffer for Video Codec for Gen6+ */ | 1132 | /* ring buffer for Video Codec for Gen6+ */ |
842 | static const struct intel_ring_buffer gen6_bsd_ring = { | 1133 | static const struct intel_ring_buffer gen6_bsd_ring = { |
843 | .name = "gen6 bsd ring", | 1134 | .name = "gen6 bsd ring", |
844 | .id = RING_BSD, | 1135 | .id = RING_BSD, |
845 | .mmio_base = GEN6_BSD_RING_BASE, | 1136 | .mmio_base = GEN6_BSD_RING_BASE, |
846 | .size = 32 * PAGE_SIZE, | 1137 | .size = 32 * PAGE_SIZE, |
847 | .init = init_bsd_ring, | 1138 | .init = init_ring_common, |
848 | .write_tail = gen6_bsd_ring_write_tail, | 1139 | .write_tail = gen6_bsd_ring_write_tail, |
849 | .flush = gen6_ring_flush, | 1140 | .flush = gen6_ring_flush, |
850 | .add_request = ring_add_request, | 1141 | .add_request = gen6_add_request, |
851 | .get_seqno = ring_status_page_get_seqno, | 1142 | .get_seqno = ring_get_seqno, |
852 | .user_irq_get = bsd_ring_get_user_irq, | 1143 | .irq_get = gen6_bsd_ring_get_irq, |
853 | .user_irq_put = bsd_ring_put_user_irq, | 1144 | .irq_put = gen6_bsd_ring_put_irq, |
854 | .dispatch_gem_execbuffer = gen6_ring_dispatch_gem_execbuffer, | 1145 | .dispatch_execbuffer = gen6_ring_dispatch_execbuffer, |
855 | }; | 1146 | }; |
856 | 1147 | ||
857 | /* Blitter support (SandyBridge+) */ | 1148 | /* Blitter support (SandyBridge+) */ |
858 | 1149 | ||
859 | static void | 1150 | static bool |
860 | blt_ring_get_user_irq(struct drm_device *dev, | 1151 | blt_ring_get_irq(struct intel_ring_buffer *ring) |
861 | struct intel_ring_buffer *ring) | ||
862 | { | 1152 | { |
863 | /* do nothing */ | 1153 | return gen6_ring_get_irq(ring, |
1154 | GT_BLT_USER_INTERRUPT, | ||
1155 | GEN6_BLITTER_USER_INTERRUPT); | ||
864 | } | 1156 | } |
1157 | |||
865 | static void | 1158 | static void |
866 | blt_ring_put_user_irq(struct drm_device *dev, | 1159 | blt_ring_put_irq(struct intel_ring_buffer *ring) |
867 | struct intel_ring_buffer *ring) | ||
868 | { | 1160 | { |
869 | /* do nothing */ | 1161 | gen6_ring_put_irq(ring, |
1162 | GT_BLT_USER_INTERRUPT, | ||
1163 | GEN6_BLITTER_USER_INTERRUPT); | ||
870 | } | 1164 | } |
871 | 1165 | ||
872 | 1166 | ||
@@ -884,32 +1178,31 @@ to_blt_workaround(struct intel_ring_buffer *ring) | |||
884 | return ring->private; | 1178 | return ring->private; |
885 | } | 1179 | } |
886 | 1180 | ||
887 | static int blt_ring_init(struct drm_device *dev, | 1181 | static int blt_ring_init(struct intel_ring_buffer *ring) |
888 | struct intel_ring_buffer *ring) | ||
889 | { | 1182 | { |
890 | if (NEED_BLT_WORKAROUND(dev)) { | 1183 | if (NEED_BLT_WORKAROUND(ring->dev)) { |
891 | struct drm_i915_gem_object *obj; | 1184 | struct drm_i915_gem_object *obj; |
892 | u32 __iomem *ptr; | 1185 | u32 *ptr; |
893 | int ret; | 1186 | int ret; |
894 | 1187 | ||
895 | obj = to_intel_bo(i915_gem_alloc_object(dev, 4096)); | 1188 | obj = i915_gem_alloc_object(ring->dev, 4096); |
896 | if (obj == NULL) | 1189 | if (obj == NULL) |
897 | return -ENOMEM; | 1190 | return -ENOMEM; |
898 | 1191 | ||
899 | ret = i915_gem_object_pin(&obj->base, 4096); | 1192 | ret = i915_gem_object_pin(obj, 4096, true); |
900 | if (ret) { | 1193 | if (ret) { |
901 | drm_gem_object_unreference(&obj->base); | 1194 | drm_gem_object_unreference(&obj->base); |
902 | return ret; | 1195 | return ret; |
903 | } | 1196 | } |
904 | 1197 | ||
905 | ptr = kmap(obj->pages[0]); | 1198 | ptr = kmap(obj->pages[0]); |
906 | iowrite32(MI_BATCH_BUFFER_END, ptr); | 1199 | *ptr++ = MI_BATCH_BUFFER_END; |
907 | iowrite32(MI_NOOP, ptr+1); | 1200 | *ptr++ = MI_NOOP; |
908 | kunmap(obj->pages[0]); | 1201 | kunmap(obj->pages[0]); |
909 | 1202 | ||
910 | ret = i915_gem_object_set_to_gtt_domain(&obj->base, false); | 1203 | ret = i915_gem_object_set_to_gtt_domain(obj, false); |
911 | if (ret) { | 1204 | if (ret) { |
912 | i915_gem_object_unpin(&obj->base); | 1205 | i915_gem_object_unpin(obj); |
913 | drm_gem_object_unreference(&obj->base); | 1206 | drm_gem_object_unreference(&obj->base); |
914 | return ret; | 1207 | return ret; |
915 | } | 1208 | } |
@@ -917,51 +1210,44 @@ static int blt_ring_init(struct drm_device *dev, | |||
917 | ring->private = obj; | 1210 | ring->private = obj; |
918 | } | 1211 | } |
919 | 1212 | ||
920 | return init_ring_common(dev, ring); | 1213 | return init_ring_common(ring); |
921 | } | 1214 | } |
922 | 1215 | ||
923 | static void blt_ring_begin(struct drm_device *dev, | 1216 | static int blt_ring_begin(struct intel_ring_buffer *ring, |
924 | struct intel_ring_buffer *ring, | ||
925 | int num_dwords) | 1217 | int num_dwords) |
926 | { | 1218 | { |
927 | if (ring->private) { | 1219 | if (ring->private) { |
928 | intel_ring_begin(dev, ring, num_dwords+2); | 1220 | int ret = intel_ring_begin(ring, num_dwords+2); |
929 | intel_ring_emit(dev, ring, MI_BATCH_BUFFER_START); | 1221 | if (ret) |
930 | intel_ring_emit(dev, ring, to_blt_workaround(ring)->gtt_offset); | 1222 | return ret; |
1223 | |||
1224 | intel_ring_emit(ring, MI_BATCH_BUFFER_START); | ||
1225 | intel_ring_emit(ring, to_blt_workaround(ring)->gtt_offset); | ||
1226 | |||
1227 | return 0; | ||
931 | } else | 1228 | } else |
932 | intel_ring_begin(dev, ring, 4); | 1229 | return intel_ring_begin(ring, 4); |
933 | } | 1230 | } |
934 | 1231 | ||
935 | static void blt_ring_flush(struct drm_device *dev, | 1232 | static int blt_ring_flush(struct intel_ring_buffer *ring, |
936 | struct intel_ring_buffer *ring, | ||
937 | u32 invalidate_domains, | 1233 | u32 invalidate_domains, |
938 | u32 flush_domains) | 1234 | u32 flush_domains) |
939 | { | 1235 | { |
940 | blt_ring_begin(dev, ring, 4); | 1236 | int ret; |
941 | intel_ring_emit(dev, ring, MI_FLUSH_DW); | ||
942 | intel_ring_emit(dev, ring, 0); | ||
943 | intel_ring_emit(dev, ring, 0); | ||
944 | intel_ring_emit(dev, ring, 0); | ||
945 | intel_ring_advance(dev, ring); | ||
946 | } | ||
947 | 1237 | ||
948 | static u32 | 1238 | if ((flush_domains & I915_GEM_DOMAIN_RENDER) == 0) |
949 | blt_ring_add_request(struct drm_device *dev, | 1239 | return 0; |
950 | struct intel_ring_buffer *ring, | ||
951 | u32 flush_domains) | ||
952 | { | ||
953 | u32 seqno = i915_gem_get_seqno(dev); | ||
954 | 1240 | ||
955 | blt_ring_begin(dev, ring, 4); | 1241 | ret = blt_ring_begin(ring, 4); |
956 | intel_ring_emit(dev, ring, MI_STORE_DWORD_INDEX); | 1242 | if (ret) |
957 | intel_ring_emit(dev, ring, | 1243 | return ret; |
958 | I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT); | ||
959 | intel_ring_emit(dev, ring, seqno); | ||
960 | intel_ring_emit(dev, ring, MI_USER_INTERRUPT); | ||
961 | intel_ring_advance(dev, ring); | ||
962 | 1244 | ||
963 | DRM_DEBUG_DRIVER("%s %d\n", ring->name, seqno); | 1245 | intel_ring_emit(ring, MI_FLUSH_DW); |
964 | return seqno; | 1246 | intel_ring_emit(ring, 0); |
1247 | intel_ring_emit(ring, 0); | ||
1248 | intel_ring_emit(ring, 0); | ||
1249 | intel_ring_advance(ring); | ||
1250 | return 0; | ||
965 | } | 1251 | } |
966 | 1252 | ||
967 | static void blt_ring_cleanup(struct intel_ring_buffer *ring) | 1253 | static void blt_ring_cleanup(struct intel_ring_buffer *ring) |
@@ -982,47 +1268,98 @@ static const struct intel_ring_buffer gen6_blt_ring = { | |||
982 | .init = blt_ring_init, | 1268 | .init = blt_ring_init, |
983 | .write_tail = ring_write_tail, | 1269 | .write_tail = ring_write_tail, |
984 | .flush = blt_ring_flush, | 1270 | .flush = blt_ring_flush, |
985 | .add_request = blt_ring_add_request, | 1271 | .add_request = gen6_add_request, |
986 | .get_seqno = ring_status_page_get_seqno, | 1272 | .get_seqno = ring_get_seqno, |
987 | .user_irq_get = blt_ring_get_user_irq, | 1273 | .irq_get = blt_ring_get_irq, |
988 | .user_irq_put = blt_ring_put_user_irq, | 1274 | .irq_put = blt_ring_put_irq, |
989 | .dispatch_gem_execbuffer = gen6_ring_dispatch_gem_execbuffer, | 1275 | .dispatch_execbuffer = gen6_ring_dispatch_execbuffer, |
990 | .cleanup = blt_ring_cleanup, | 1276 | .cleanup = blt_ring_cleanup, |
991 | }; | 1277 | }; |
992 | 1278 | ||
993 | int intel_init_render_ring_buffer(struct drm_device *dev) | 1279 | int intel_init_render_ring_buffer(struct drm_device *dev) |
994 | { | 1280 | { |
995 | drm_i915_private_t *dev_priv = dev->dev_private; | 1281 | drm_i915_private_t *dev_priv = dev->dev_private; |
996 | 1282 | struct intel_ring_buffer *ring = &dev_priv->ring[RCS]; | |
997 | dev_priv->render_ring = render_ring; | 1283 | |
1284 | *ring = render_ring; | ||
1285 | if (INTEL_INFO(dev)->gen >= 6) { | ||
1286 | ring->add_request = gen6_add_request; | ||
1287 | ring->irq_get = gen6_render_ring_get_irq; | ||
1288 | ring->irq_put = gen6_render_ring_put_irq; | ||
1289 | } else if (IS_GEN5(dev)) { | ||
1290 | ring->add_request = pc_render_add_request; | ||
1291 | ring->get_seqno = pc_render_get_seqno; | ||
1292 | } | ||
998 | 1293 | ||
999 | if (!I915_NEED_GFX_HWS(dev)) { | 1294 | if (!I915_NEED_GFX_HWS(dev)) { |
1000 | dev_priv->render_ring.status_page.page_addr | 1295 | ring->status_page.page_addr = dev_priv->status_page_dmah->vaddr; |
1001 | = dev_priv->status_page_dmah->vaddr; | 1296 | memset(ring->status_page.page_addr, 0, PAGE_SIZE); |
1002 | memset(dev_priv->render_ring.status_page.page_addr, | 1297 | } |
1003 | 0, PAGE_SIZE); | 1298 | |
1299 | return intel_init_ring_buffer(dev, ring); | ||
1300 | } | ||
1301 | |||
1302 | int intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size) | ||
1303 | { | ||
1304 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
1305 | struct intel_ring_buffer *ring = &dev_priv->ring[RCS]; | ||
1306 | |||
1307 | *ring = render_ring; | ||
1308 | if (INTEL_INFO(dev)->gen >= 6) { | ||
1309 | ring->add_request = gen6_add_request; | ||
1310 | ring->irq_get = gen6_render_ring_get_irq; | ||
1311 | ring->irq_put = gen6_render_ring_put_irq; | ||
1312 | } else if (IS_GEN5(dev)) { | ||
1313 | ring->add_request = pc_render_add_request; | ||
1314 | ring->get_seqno = pc_render_get_seqno; | ||
1004 | } | 1315 | } |
1005 | 1316 | ||
1006 | return intel_init_ring_buffer(dev, &dev_priv->render_ring); | 1317 | ring->dev = dev; |
1318 | INIT_LIST_HEAD(&ring->active_list); | ||
1319 | INIT_LIST_HEAD(&ring->request_list); | ||
1320 | INIT_LIST_HEAD(&ring->gpu_write_list); | ||
1321 | |||
1322 | ring->size = size; | ||
1323 | ring->effective_size = ring->size; | ||
1324 | if (IS_I830(ring->dev)) | ||
1325 | ring->effective_size -= 128; | ||
1326 | |||
1327 | ring->map.offset = start; | ||
1328 | ring->map.size = size; | ||
1329 | ring->map.type = 0; | ||
1330 | ring->map.flags = 0; | ||
1331 | ring->map.mtrr = 0; | ||
1332 | |||
1333 | drm_core_ioremap_wc(&ring->map, dev); | ||
1334 | if (ring->map.handle == NULL) { | ||
1335 | DRM_ERROR("can not ioremap virtual address for" | ||
1336 | " ring buffer\n"); | ||
1337 | return -ENOMEM; | ||
1338 | } | ||
1339 | |||
1340 | ring->virtual_start = (void __force __iomem *)ring->map.handle; | ||
1341 | return 0; | ||
1007 | } | 1342 | } |
1008 | 1343 | ||
1009 | int intel_init_bsd_ring_buffer(struct drm_device *dev) | 1344 | int intel_init_bsd_ring_buffer(struct drm_device *dev) |
1010 | { | 1345 | { |
1011 | drm_i915_private_t *dev_priv = dev->dev_private; | 1346 | drm_i915_private_t *dev_priv = dev->dev_private; |
1347 | struct intel_ring_buffer *ring = &dev_priv->ring[VCS]; | ||
1012 | 1348 | ||
1013 | if (IS_GEN6(dev)) | 1349 | if (IS_GEN6(dev)) |
1014 | dev_priv->bsd_ring = gen6_bsd_ring; | 1350 | *ring = gen6_bsd_ring; |
1015 | else | 1351 | else |
1016 | dev_priv->bsd_ring = bsd_ring; | 1352 | *ring = bsd_ring; |
1017 | 1353 | ||
1018 | return intel_init_ring_buffer(dev, &dev_priv->bsd_ring); | 1354 | return intel_init_ring_buffer(dev, ring); |
1019 | } | 1355 | } |
1020 | 1356 | ||
1021 | int intel_init_blt_ring_buffer(struct drm_device *dev) | 1357 | int intel_init_blt_ring_buffer(struct drm_device *dev) |
1022 | { | 1358 | { |
1023 | drm_i915_private_t *dev_priv = dev->dev_private; | 1359 | drm_i915_private_t *dev_priv = dev->dev_private; |
1360 | struct intel_ring_buffer *ring = &dev_priv->ring[BCS]; | ||
1024 | 1361 | ||
1025 | dev_priv->blt_ring = gen6_blt_ring; | 1362 | *ring = gen6_blt_ring; |
1026 | 1363 | ||
1027 | return intel_init_ring_buffer(dev, &dev_priv->blt_ring); | 1364 | return intel_init_ring_buffer(dev, ring); |
1028 | } | 1365 | } |
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h index 3126c2681983..6d6fde85a636 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.h +++ b/drivers/gpu/drm/i915/intel_ringbuffer.h | |||
@@ -1,22 +1,40 @@ | |||
1 | #ifndef _INTEL_RINGBUFFER_H_ | 1 | #ifndef _INTEL_RINGBUFFER_H_ |
2 | #define _INTEL_RINGBUFFER_H_ | 2 | #define _INTEL_RINGBUFFER_H_ |
3 | 3 | ||
4 | enum { | ||
5 | RCS = 0x0, | ||
6 | VCS, | ||
7 | BCS, | ||
8 | I915_NUM_RINGS, | ||
9 | }; | ||
10 | |||
4 | struct intel_hw_status_page { | 11 | struct intel_hw_status_page { |
5 | void *page_addr; | 12 | u32 __iomem *page_addr; |
6 | unsigned int gfx_addr; | 13 | unsigned int gfx_addr; |
7 | struct drm_gem_object *obj; | 14 | struct drm_i915_gem_object *obj; |
8 | }; | 15 | }; |
9 | 16 | ||
10 | #define I915_READ_TAIL(ring) I915_READ(RING_TAIL(ring->mmio_base)) | 17 | #define I915_RING_READ(reg) i915_safe_read(dev_priv, reg) |
11 | #define I915_WRITE_TAIL(ring, val) I915_WRITE(RING_TAIL(ring->mmio_base), val) | 18 | |
12 | #define I915_READ_START(ring) I915_READ(RING_START(ring->mmio_base)) | 19 | #define I915_READ_TAIL(ring) I915_RING_READ(RING_TAIL((ring)->mmio_base)) |
13 | #define I915_WRITE_START(ring, val) I915_WRITE(RING_START(ring->mmio_base), val) | 20 | #define I915_WRITE_TAIL(ring, val) I915_WRITE(RING_TAIL((ring)->mmio_base), val) |
14 | #define I915_READ_HEAD(ring) I915_READ(RING_HEAD(ring->mmio_base)) | 21 | |
15 | #define I915_WRITE_HEAD(ring, val) I915_WRITE(RING_HEAD(ring->mmio_base), val) | 22 | #define I915_READ_START(ring) I915_RING_READ(RING_START((ring)->mmio_base)) |
16 | #define I915_READ_CTL(ring) I915_READ(RING_CTL(ring->mmio_base)) | 23 | #define I915_WRITE_START(ring, val) I915_WRITE(RING_START((ring)->mmio_base), val) |
17 | #define I915_WRITE_CTL(ring, val) I915_WRITE(RING_CTL(ring->mmio_base), val) | 24 | |
25 | #define I915_READ_HEAD(ring) I915_RING_READ(RING_HEAD((ring)->mmio_base)) | ||
26 | #define I915_WRITE_HEAD(ring, val) I915_WRITE(RING_HEAD((ring)->mmio_base), val) | ||
27 | |||
28 | #define I915_READ_CTL(ring) I915_RING_READ(RING_CTL((ring)->mmio_base)) | ||
29 | #define I915_WRITE_CTL(ring, val) I915_WRITE(RING_CTL((ring)->mmio_base), val) | ||
30 | |||
31 | #define I915_WRITE_IMR(ring, val) I915_WRITE(RING_IMR((ring)->mmio_base), val) | ||
32 | #define I915_READ_IMR(ring) I915_RING_READ(RING_IMR((ring)->mmio_base)) | ||
33 | |||
34 | #define I915_READ_NOPID(ring) I915_RING_READ(RING_NOPID((ring)->mmio_base)) | ||
35 | #define I915_READ_SYNC_0(ring) I915_RING_READ(RING_SYNC_0((ring)->mmio_base)) | ||
36 | #define I915_READ_SYNC_1(ring) I915_RING_READ(RING_SYNC_1((ring)->mmio_base)) | ||
18 | 37 | ||
19 | struct drm_i915_gem_execbuffer2; | ||
20 | struct intel_ring_buffer { | 38 | struct intel_ring_buffer { |
21 | const char *name; | 39 | const char *name; |
22 | enum intel_ring_id { | 40 | enum intel_ring_id { |
@@ -25,44 +43,38 @@ struct intel_ring_buffer { | |||
25 | RING_BLT = 0x4, | 43 | RING_BLT = 0x4, |
26 | } id; | 44 | } id; |
27 | u32 mmio_base; | 45 | u32 mmio_base; |
28 | unsigned long size; | ||
29 | void *virtual_start; | 46 | void *virtual_start; |
30 | struct drm_device *dev; | 47 | struct drm_device *dev; |
31 | struct drm_gem_object *gem_object; | 48 | struct drm_i915_gem_object *obj; |
32 | 49 | ||
33 | unsigned int head; | 50 | u32 head; |
34 | unsigned int tail; | 51 | u32 tail; |
35 | int space; | 52 | int space; |
53 | int size; | ||
54 | int effective_size; | ||
36 | struct intel_hw_status_page status_page; | 55 | struct intel_hw_status_page status_page; |
37 | 56 | ||
38 | u32 irq_gem_seqno; /* last seq seem at irq time */ | 57 | spinlock_t irq_lock; |
39 | u32 waiting_gem_seqno; | 58 | u32 irq_refcount; |
40 | int user_irq_refcount; | 59 | u32 irq_mask; |
41 | void (*user_irq_get)(struct drm_device *dev, | 60 | u32 irq_seqno; /* last seq seem at irq time */ |
42 | struct intel_ring_buffer *ring); | 61 | u32 waiting_seqno; |
43 | void (*user_irq_put)(struct drm_device *dev, | 62 | u32 sync_seqno[I915_NUM_RINGS-1]; |
44 | struct intel_ring_buffer *ring); | 63 | bool __must_check (*irq_get)(struct intel_ring_buffer *ring); |
64 | void (*irq_put)(struct intel_ring_buffer *ring); | ||
45 | 65 | ||
46 | int (*init)(struct drm_device *dev, | 66 | int (*init)(struct intel_ring_buffer *ring); |
47 | struct intel_ring_buffer *ring); | ||
48 | 67 | ||
49 | void (*write_tail)(struct drm_device *dev, | 68 | void (*write_tail)(struct intel_ring_buffer *ring, |
50 | struct intel_ring_buffer *ring, | ||
51 | u32 value); | 69 | u32 value); |
52 | void (*flush)(struct drm_device *dev, | 70 | int __must_check (*flush)(struct intel_ring_buffer *ring, |
53 | struct intel_ring_buffer *ring, | 71 | u32 invalidate_domains, |
54 | u32 invalidate_domains, | 72 | u32 flush_domains); |
55 | u32 flush_domains); | 73 | int (*add_request)(struct intel_ring_buffer *ring, |
56 | u32 (*add_request)(struct drm_device *dev, | 74 | u32 *seqno); |
57 | struct intel_ring_buffer *ring, | 75 | u32 (*get_seqno)(struct intel_ring_buffer *ring); |
58 | u32 flush_domains); | 76 | int (*dispatch_execbuffer)(struct intel_ring_buffer *ring, |
59 | u32 (*get_seqno)(struct drm_device *dev, | 77 | u32 offset, u32 length); |
60 | struct intel_ring_buffer *ring); | ||
61 | int (*dispatch_gem_execbuffer)(struct drm_device *dev, | ||
62 | struct intel_ring_buffer *ring, | ||
63 | struct drm_i915_gem_execbuffer2 *exec, | ||
64 | struct drm_clip_rect *cliprects, | ||
65 | uint64_t exec_offset); | ||
66 | void (*cleanup)(struct intel_ring_buffer *ring); | 78 | void (*cleanup)(struct intel_ring_buffer *ring); |
67 | 79 | ||
68 | /** | 80 | /** |
@@ -95,7 +107,7 @@ struct intel_ring_buffer { | |||
95 | /** | 107 | /** |
96 | * Do we have some not yet emitted requests outstanding? | 108 | * Do we have some not yet emitted requests outstanding? |
97 | */ | 109 | */ |
98 | bool outstanding_lazy_request; | 110 | u32 outstanding_lazy_request; |
99 | 111 | ||
100 | wait_queue_head_t irq_queue; | 112 | wait_queue_head_t irq_queue; |
101 | drm_local_map_t map; | 113 | drm_local_map_t map; |
@@ -104,44 +116,57 @@ struct intel_ring_buffer { | |||
104 | }; | 116 | }; |
105 | 117 | ||
106 | static inline u32 | 118 | static inline u32 |
119 | intel_ring_sync_index(struct intel_ring_buffer *ring, | ||
120 | struct intel_ring_buffer *other) | ||
121 | { | ||
122 | int idx; | ||
123 | |||
124 | /* | ||
125 | * cs -> 0 = vcs, 1 = bcs | ||
126 | * vcs -> 0 = bcs, 1 = cs, | ||
127 | * bcs -> 0 = cs, 1 = vcs. | ||
128 | */ | ||
129 | |||
130 | idx = (other - ring) - 1; | ||
131 | if (idx < 0) | ||
132 | idx += I915_NUM_RINGS; | ||
133 | |||
134 | return idx; | ||
135 | } | ||
136 | |||
137 | static inline u32 | ||
107 | intel_read_status_page(struct intel_ring_buffer *ring, | 138 | intel_read_status_page(struct intel_ring_buffer *ring, |
108 | int reg) | 139 | int reg) |
109 | { | 140 | { |
110 | u32 *regs = ring->status_page.page_addr; | 141 | return ioread32(ring->status_page.page_addr + reg); |
111 | return regs[reg]; | ||
112 | } | 142 | } |
113 | 143 | ||
114 | int intel_init_ring_buffer(struct drm_device *dev, | 144 | void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring); |
115 | struct intel_ring_buffer *ring); | 145 | int __must_check intel_wait_ring_buffer(struct intel_ring_buffer *ring, int n); |
116 | void intel_cleanup_ring_buffer(struct drm_device *dev, | 146 | int __must_check intel_ring_begin(struct intel_ring_buffer *ring, int n); |
117 | struct intel_ring_buffer *ring); | 147 | |
118 | int intel_wait_ring_buffer(struct drm_device *dev, | 148 | static inline void intel_ring_emit(struct intel_ring_buffer *ring, |
119 | struct intel_ring_buffer *ring, int n); | 149 | u32 data) |
120 | void intel_ring_begin(struct drm_device *dev, | ||
121 | struct intel_ring_buffer *ring, int n); | ||
122 | |||
123 | static inline void intel_ring_emit(struct drm_device *dev, | ||
124 | struct intel_ring_buffer *ring, | ||
125 | unsigned int data) | ||
126 | { | 150 | { |
127 | unsigned int *virt = ring->virtual_start + ring->tail; | 151 | iowrite32(data, ring->virtual_start + ring->tail); |
128 | *virt = data; | ||
129 | ring->tail += 4; | 152 | ring->tail += 4; |
130 | } | 153 | } |
131 | 154 | ||
132 | void intel_ring_advance(struct drm_device *dev, | 155 | void intel_ring_advance(struct intel_ring_buffer *ring); |
133 | struct intel_ring_buffer *ring); | ||
134 | 156 | ||
135 | u32 intel_ring_get_seqno(struct drm_device *dev, | 157 | u32 intel_ring_get_seqno(struct intel_ring_buffer *ring); |
136 | struct intel_ring_buffer *ring); | 158 | int intel_ring_sync(struct intel_ring_buffer *ring, |
159 | struct intel_ring_buffer *to, | ||
160 | u32 seqno); | ||
137 | 161 | ||
138 | int intel_init_render_ring_buffer(struct drm_device *dev); | 162 | int intel_init_render_ring_buffer(struct drm_device *dev); |
139 | int intel_init_bsd_ring_buffer(struct drm_device *dev); | 163 | int intel_init_bsd_ring_buffer(struct drm_device *dev); |
140 | int intel_init_blt_ring_buffer(struct drm_device *dev); | 164 | int intel_init_blt_ring_buffer(struct drm_device *dev); |
141 | 165 | ||
142 | u32 intel_ring_get_active_head(struct drm_device *dev, | 166 | u32 intel_ring_get_active_head(struct intel_ring_buffer *ring); |
143 | struct intel_ring_buffer *ring); | 167 | void intel_ring_setup_status_page(struct intel_ring_buffer *ring); |
144 | void intel_ring_setup_status_page(struct drm_device *dev, | 168 | |
145 | struct intel_ring_buffer *ring); | 169 | /* DRI warts */ |
170 | int intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size); | ||
146 | 171 | ||
147 | #endif /* _INTEL_RINGBUFFER_H_ */ | 172 | #endif /* _INTEL_RINGBUFFER_H_ */ |
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c index de158b76bcd5..318f398e6b2e 100644 --- a/drivers/gpu/drm/i915/intel_sdvo.c +++ b/drivers/gpu/drm/i915/intel_sdvo.c | |||
@@ -107,7 +107,8 @@ struct intel_sdvo { | |||
107 | * This is set if we treat the device as HDMI, instead of DVI. | 107 | * This is set if we treat the device as HDMI, instead of DVI. |
108 | */ | 108 | */ |
109 | bool is_hdmi; | 109 | bool is_hdmi; |
110 | bool has_audio; | 110 | bool has_hdmi_monitor; |
111 | bool has_hdmi_audio; | ||
111 | 112 | ||
112 | /** | 113 | /** |
113 | * This is set if we detect output of sdvo device as LVDS and | 114 | * This is set if we detect output of sdvo device as LVDS and |
@@ -472,20 +473,6 @@ static bool intel_sdvo_write_cmd(struct intel_sdvo *intel_sdvo, u8 cmd, | |||
472 | return false; | 473 | return false; |
473 | } | 474 | } |
474 | 475 | ||
475 | i = 3; | ||
476 | while (status == SDVO_CMD_STATUS_PENDING && i--) { | ||
477 | if (!intel_sdvo_read_byte(intel_sdvo, | ||
478 | SDVO_I2C_CMD_STATUS, | ||
479 | &status)) | ||
480 | return false; | ||
481 | } | ||
482 | if (status != SDVO_CMD_STATUS_SUCCESS) { | ||
483 | DRM_DEBUG_KMS("command returns response %s [%d]\n", | ||
484 | status <= SDVO_CMD_STATUS_SCALING_NOT_SUPP ? cmd_status_names[status] : "???", | ||
485 | status); | ||
486 | return false; | ||
487 | } | ||
488 | |||
489 | return true; | 476 | return true; |
490 | } | 477 | } |
491 | 478 | ||
@@ -496,6 +483,8 @@ static bool intel_sdvo_read_response(struct intel_sdvo *intel_sdvo, | |||
496 | u8 status; | 483 | u8 status; |
497 | int i; | 484 | int i; |
498 | 485 | ||
486 | DRM_DEBUG_KMS("%s: R: ", SDVO_NAME(intel_sdvo)); | ||
487 | |||
499 | /* | 488 | /* |
500 | * The documentation states that all commands will be | 489 | * The documentation states that all commands will be |
501 | * processed within 15µs, and that we need only poll | 490 | * processed within 15µs, and that we need only poll |
@@ -504,14 +493,19 @@ static bool intel_sdvo_read_response(struct intel_sdvo *intel_sdvo, | |||
504 | * | 493 | * |
505 | * Check 5 times in case the hardware failed to read the docs. | 494 | * Check 5 times in case the hardware failed to read the docs. |
506 | */ | 495 | */ |
507 | do { | 496 | if (!intel_sdvo_read_byte(intel_sdvo, |
497 | SDVO_I2C_CMD_STATUS, | ||
498 | &status)) | ||
499 | goto log_fail; | ||
500 | |||
501 | while (status == SDVO_CMD_STATUS_PENDING && retry--) { | ||
502 | udelay(15); | ||
508 | if (!intel_sdvo_read_byte(intel_sdvo, | 503 | if (!intel_sdvo_read_byte(intel_sdvo, |
509 | SDVO_I2C_CMD_STATUS, | 504 | SDVO_I2C_CMD_STATUS, |
510 | &status)) | 505 | &status)) |
511 | return false; | 506 | goto log_fail; |
512 | } while (status == SDVO_CMD_STATUS_PENDING && --retry); | 507 | } |
513 | 508 | ||
514 | DRM_DEBUG_KMS("%s: R: ", SDVO_NAME(intel_sdvo)); | ||
515 | if (status <= SDVO_CMD_STATUS_SCALING_NOT_SUPP) | 509 | if (status <= SDVO_CMD_STATUS_SCALING_NOT_SUPP) |
516 | DRM_LOG_KMS("(%s)", cmd_status_names[status]); | 510 | DRM_LOG_KMS("(%s)", cmd_status_names[status]); |
517 | else | 511 | else |
@@ -532,7 +526,7 @@ static bool intel_sdvo_read_response(struct intel_sdvo *intel_sdvo, | |||
532 | return true; | 526 | return true; |
533 | 527 | ||
534 | log_fail: | 528 | log_fail: |
535 | DRM_LOG_KMS("\n"); | 529 | DRM_LOG_KMS("... failed\n"); |
536 | return false; | 530 | return false; |
537 | } | 531 | } |
538 | 532 | ||
@@ -549,6 +543,7 @@ static int intel_sdvo_get_pixel_multiplier(struct drm_display_mode *mode) | |||
549 | static bool intel_sdvo_set_control_bus_switch(struct intel_sdvo *intel_sdvo, | 543 | static bool intel_sdvo_set_control_bus_switch(struct intel_sdvo *intel_sdvo, |
550 | u8 ddc_bus) | 544 | u8 ddc_bus) |
551 | { | 545 | { |
546 | /* This must be the immediately preceding write before the i2c xfer */ | ||
552 | return intel_sdvo_write_cmd(intel_sdvo, | 547 | return intel_sdvo_write_cmd(intel_sdvo, |
553 | SDVO_CMD_SET_CONTROL_BUS_SWITCH, | 548 | SDVO_CMD_SET_CONTROL_BUS_SWITCH, |
554 | &ddc_bus, 1); | 549 | &ddc_bus, 1); |
@@ -556,7 +551,10 @@ static bool intel_sdvo_set_control_bus_switch(struct intel_sdvo *intel_sdvo, | |||
556 | 551 | ||
557 | static bool intel_sdvo_set_value(struct intel_sdvo *intel_sdvo, u8 cmd, const void *data, int len) | 552 | static bool intel_sdvo_set_value(struct intel_sdvo *intel_sdvo, u8 cmd, const void *data, int len) |
558 | { | 553 | { |
559 | return intel_sdvo_write_cmd(intel_sdvo, cmd, data, len); | 554 | if (!intel_sdvo_write_cmd(intel_sdvo, cmd, data, len)) |
555 | return false; | ||
556 | |||
557 | return intel_sdvo_read_response(intel_sdvo, NULL, 0); | ||
560 | } | 558 | } |
561 | 559 | ||
562 | static bool | 560 | static bool |
@@ -858,18 +856,21 @@ static bool intel_sdvo_set_avi_infoframe(struct intel_sdvo *intel_sdvo) | |||
858 | 856 | ||
859 | intel_dip_infoframe_csum(&avi_if); | 857 | intel_dip_infoframe_csum(&avi_if); |
860 | 858 | ||
861 | if (!intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_SET_HBUF_INDEX, | 859 | if (!intel_sdvo_set_value(intel_sdvo, |
860 | SDVO_CMD_SET_HBUF_INDEX, | ||
862 | set_buf_index, 2)) | 861 | set_buf_index, 2)) |
863 | return false; | 862 | return false; |
864 | 863 | ||
865 | for (i = 0; i < sizeof(avi_if); i += 8) { | 864 | for (i = 0; i < sizeof(avi_if); i += 8) { |
866 | if (!intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_SET_HBUF_DATA, | 865 | if (!intel_sdvo_set_value(intel_sdvo, |
866 | SDVO_CMD_SET_HBUF_DATA, | ||
867 | data, 8)) | 867 | data, 8)) |
868 | return false; | 868 | return false; |
869 | data++; | 869 | data++; |
870 | } | 870 | } |
871 | 871 | ||
872 | return intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_SET_HBUF_TXRATE, | 872 | return intel_sdvo_set_value(intel_sdvo, |
873 | SDVO_CMD_SET_HBUF_TXRATE, | ||
873 | &tx_rate, 1); | 874 | &tx_rate, 1); |
874 | } | 875 | } |
875 | 876 | ||
@@ -1023,9 +1024,13 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder, | |||
1023 | if (!intel_sdvo_set_target_input(intel_sdvo)) | 1024 | if (!intel_sdvo_set_target_input(intel_sdvo)) |
1024 | return; | 1025 | return; |
1025 | 1026 | ||
1026 | if (intel_sdvo->is_hdmi && | 1027 | if (intel_sdvo->has_hdmi_monitor) { |
1027 | !intel_sdvo_set_avi_infoframe(intel_sdvo)) | 1028 | intel_sdvo_set_encode(intel_sdvo, SDVO_ENCODE_HDMI); |
1028 | return; | 1029 | intel_sdvo_set_colorimetry(intel_sdvo, |
1030 | SDVO_COLORIMETRY_RGB256); | ||
1031 | intel_sdvo_set_avi_infoframe(intel_sdvo); | ||
1032 | } else | ||
1033 | intel_sdvo_set_encode(intel_sdvo, SDVO_ENCODE_DVI); | ||
1029 | 1034 | ||
1030 | if (intel_sdvo->is_tv && | 1035 | if (intel_sdvo->is_tv && |
1031 | !intel_sdvo_set_tv_format(intel_sdvo)) | 1036 | !intel_sdvo_set_tv_format(intel_sdvo)) |
@@ -1044,7 +1049,9 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder, | |||
1044 | 1049 | ||
1045 | /* Set the SDVO control regs. */ | 1050 | /* Set the SDVO control regs. */ |
1046 | if (INTEL_INFO(dev)->gen >= 4) { | 1051 | if (INTEL_INFO(dev)->gen >= 4) { |
1047 | sdvox = SDVO_BORDER_ENABLE; | 1052 | sdvox = 0; |
1053 | if (INTEL_INFO(dev)->gen < 5) | ||
1054 | sdvox |= SDVO_BORDER_ENABLE; | ||
1048 | if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC) | 1055 | if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC) |
1049 | sdvox |= SDVO_VSYNC_ACTIVE_HIGH; | 1056 | sdvox |= SDVO_VSYNC_ACTIVE_HIGH; |
1050 | if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC) | 1057 | if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC) |
@@ -1063,7 +1070,7 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder, | |||
1063 | } | 1070 | } |
1064 | if (intel_crtc->pipe == 1) | 1071 | if (intel_crtc->pipe == 1) |
1065 | sdvox |= SDVO_PIPE_B_SELECT; | 1072 | sdvox |= SDVO_PIPE_B_SELECT; |
1066 | if (intel_sdvo->has_audio) | 1073 | if (intel_sdvo->has_hdmi_audio) |
1067 | sdvox |= SDVO_AUDIO_ENABLE; | 1074 | sdvox |= SDVO_AUDIO_ENABLE; |
1068 | 1075 | ||
1069 | if (INTEL_INFO(dev)->gen >= 4) { | 1076 | if (INTEL_INFO(dev)->gen >= 4) { |
@@ -1074,7 +1081,8 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder, | |||
1074 | sdvox |= (pixel_multiplier - 1) << SDVO_PORT_MULTIPLY_SHIFT; | 1081 | sdvox |= (pixel_multiplier - 1) << SDVO_PORT_MULTIPLY_SHIFT; |
1075 | } | 1082 | } |
1076 | 1083 | ||
1077 | if (input_dtd.part2.sdvo_flags & SDVO_NEED_TO_STALL) | 1084 | if (input_dtd.part2.sdvo_flags & SDVO_NEED_TO_STALL && |
1085 | INTEL_INFO(dev)->gen < 5) | ||
1078 | sdvox |= SDVO_STALL_SELECT; | 1086 | sdvox |= SDVO_STALL_SELECT; |
1079 | intel_sdvo_write_sdvox(intel_sdvo, sdvox); | 1087 | intel_sdvo_write_sdvox(intel_sdvo, sdvox); |
1080 | } | 1088 | } |
@@ -1295,55 +1303,14 @@ intel_sdvo_get_edid(struct drm_connector *connector) | |||
1295 | return drm_get_edid(connector, &sdvo->ddc); | 1303 | return drm_get_edid(connector, &sdvo->ddc); |
1296 | } | 1304 | } |
1297 | 1305 | ||
1298 | static struct drm_connector * | ||
1299 | intel_find_analog_connector(struct drm_device *dev) | ||
1300 | { | ||
1301 | struct drm_connector *connector; | ||
1302 | struct intel_sdvo *encoder; | ||
1303 | |||
1304 | list_for_each_entry(encoder, | ||
1305 | &dev->mode_config.encoder_list, | ||
1306 | base.base.head) { | ||
1307 | if (encoder->base.type == INTEL_OUTPUT_ANALOG) { | ||
1308 | list_for_each_entry(connector, | ||
1309 | &dev->mode_config.connector_list, | ||
1310 | head) { | ||
1311 | if (&encoder->base == | ||
1312 | intel_attached_encoder(connector)) | ||
1313 | return connector; | ||
1314 | } | ||
1315 | } | ||
1316 | } | ||
1317 | |||
1318 | return NULL; | ||
1319 | } | ||
1320 | |||
1321 | static int | ||
1322 | intel_analog_is_connected(struct drm_device *dev) | ||
1323 | { | ||
1324 | struct drm_connector *analog_connector; | ||
1325 | |||
1326 | analog_connector = intel_find_analog_connector(dev); | ||
1327 | if (!analog_connector) | ||
1328 | return false; | ||
1329 | |||
1330 | if (analog_connector->funcs->detect(analog_connector, false) == | ||
1331 | connector_status_disconnected) | ||
1332 | return false; | ||
1333 | |||
1334 | return true; | ||
1335 | } | ||
1336 | |||
1337 | /* Mac mini hack -- use the same DDC as the analog connector */ | 1306 | /* Mac mini hack -- use the same DDC as the analog connector */ |
1338 | static struct edid * | 1307 | static struct edid * |
1339 | intel_sdvo_get_analog_edid(struct drm_connector *connector) | 1308 | intel_sdvo_get_analog_edid(struct drm_connector *connector) |
1340 | { | 1309 | { |
1341 | struct drm_i915_private *dev_priv = connector->dev->dev_private; | 1310 | struct drm_i915_private *dev_priv = connector->dev->dev_private; |
1342 | 1311 | ||
1343 | if (!intel_analog_is_connected(connector->dev)) | 1312 | return drm_get_edid(connector, |
1344 | return NULL; | 1313 | &dev_priv->gmbus[dev_priv->crt_ddc_pin].adapter); |
1345 | |||
1346 | return drm_get_edid(connector, &dev_priv->gmbus[dev_priv->crt_ddc_pin].adapter); | ||
1347 | } | 1314 | } |
1348 | 1315 | ||
1349 | enum drm_connector_status | 1316 | enum drm_connector_status |
@@ -1388,8 +1355,10 @@ intel_sdvo_hdmi_sink_detect(struct drm_connector *connector) | |||
1388 | /* DDC bus is shared, match EDID to connector type */ | 1355 | /* DDC bus is shared, match EDID to connector type */ |
1389 | if (edid->input & DRM_EDID_INPUT_DIGITAL) { | 1356 | if (edid->input & DRM_EDID_INPUT_DIGITAL) { |
1390 | status = connector_status_connected; | 1357 | status = connector_status_connected; |
1391 | intel_sdvo->is_hdmi = drm_detect_hdmi_monitor(edid); | 1358 | if (intel_sdvo->is_hdmi) { |
1392 | intel_sdvo->has_audio = drm_detect_monitor_audio(edid); | 1359 | intel_sdvo->has_hdmi_monitor = drm_detect_hdmi_monitor(edid); |
1360 | intel_sdvo->has_hdmi_audio = drm_detect_monitor_audio(edid); | ||
1361 | } | ||
1393 | } | 1362 | } |
1394 | connector->display_info.raw_edid = NULL; | 1363 | connector->display_info.raw_edid = NULL; |
1395 | kfree(edid); | 1364 | kfree(edid); |
@@ -1398,7 +1367,7 @@ intel_sdvo_hdmi_sink_detect(struct drm_connector *connector) | |||
1398 | if (status == connector_status_connected) { | 1367 | if (status == connector_status_connected) { |
1399 | struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector); | 1368 | struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector); |
1400 | if (intel_sdvo_connector->force_audio) | 1369 | if (intel_sdvo_connector->force_audio) |
1401 | intel_sdvo->has_audio = intel_sdvo_connector->force_audio > 0; | 1370 | intel_sdvo->has_hdmi_audio = intel_sdvo_connector->force_audio > 0; |
1402 | } | 1371 | } |
1403 | 1372 | ||
1404 | return status; | 1373 | return status; |
@@ -1415,10 +1384,12 @@ intel_sdvo_detect(struct drm_connector *connector, bool force) | |||
1415 | if (!intel_sdvo_write_cmd(intel_sdvo, | 1384 | if (!intel_sdvo_write_cmd(intel_sdvo, |
1416 | SDVO_CMD_GET_ATTACHED_DISPLAYS, NULL, 0)) | 1385 | SDVO_CMD_GET_ATTACHED_DISPLAYS, NULL, 0)) |
1417 | return connector_status_unknown; | 1386 | return connector_status_unknown; |
1418 | if (intel_sdvo->is_tv) { | 1387 | |
1419 | /* add 30ms delay when the output type is SDVO-TV */ | 1388 | /* add 30ms delay when the output type might be TV */ |
1389 | if (intel_sdvo->caps.output_flags & | ||
1390 | (SDVO_OUTPUT_SVID0 | SDVO_OUTPUT_CVBS0)) | ||
1420 | mdelay(30); | 1391 | mdelay(30); |
1421 | } | 1392 | |
1422 | if (!intel_sdvo_read_response(intel_sdvo, &response, 2)) | 1393 | if (!intel_sdvo_read_response(intel_sdvo, &response, 2)) |
1423 | return connector_status_unknown; | 1394 | return connector_status_unknown; |
1424 | 1395 | ||
@@ -1431,6 +1402,9 @@ intel_sdvo_detect(struct drm_connector *connector, bool force) | |||
1431 | 1402 | ||
1432 | intel_sdvo->attached_output = response; | 1403 | intel_sdvo->attached_output = response; |
1433 | 1404 | ||
1405 | intel_sdvo->has_hdmi_monitor = false; | ||
1406 | intel_sdvo->has_hdmi_audio = false; | ||
1407 | |||
1434 | if ((intel_sdvo_connector->output_flag & response) == 0) | 1408 | if ((intel_sdvo_connector->output_flag & response) == 0) |
1435 | ret = connector_status_disconnected; | 1409 | ret = connector_status_disconnected; |
1436 | else if (response & SDVO_TMDS_MASK) | 1410 | else if (response & SDVO_TMDS_MASK) |
@@ -1472,8 +1446,10 @@ static void intel_sdvo_get_ddc_modes(struct drm_connector *connector) | |||
1472 | edid = intel_sdvo_get_analog_edid(connector); | 1446 | edid = intel_sdvo_get_analog_edid(connector); |
1473 | 1447 | ||
1474 | if (edid != NULL) { | 1448 | if (edid != NULL) { |
1475 | drm_mode_connector_update_edid_property(connector, edid); | 1449 | if (edid->input & DRM_EDID_INPUT_DIGITAL) { |
1476 | drm_add_edid_modes(connector, edid); | 1450 | drm_mode_connector_update_edid_property(connector, edid); |
1451 | drm_add_edid_modes(connector, edid); | ||
1452 | } | ||
1477 | connector->display_info.raw_edid = NULL; | 1453 | connector->display_info.raw_edid = NULL; |
1478 | kfree(edid); | 1454 | kfree(edid); |
1479 | } | 1455 | } |
@@ -1484,7 +1460,7 @@ static void intel_sdvo_get_ddc_modes(struct drm_connector *connector) | |||
1484 | * Note! This is in reply order (see loop in get_tv_modes). | 1460 | * Note! This is in reply order (see loop in get_tv_modes). |
1485 | * XXX: all 60Hz refresh? | 1461 | * XXX: all 60Hz refresh? |
1486 | */ | 1462 | */ |
1487 | struct drm_display_mode sdvo_tv_modes[] = { | 1463 | static const struct drm_display_mode sdvo_tv_modes[] = { |
1488 | { DRM_MODE("320x200", DRM_MODE_TYPE_DRIVER, 5815, 320, 321, 384, | 1464 | { DRM_MODE("320x200", DRM_MODE_TYPE_DRIVER, 5815, 320, 321, 384, |
1489 | 416, 0, 200, 201, 232, 233, 0, | 1465 | 416, 0, 200, 201, 232, 233, 0, |
1490 | DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, | 1466 | DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, |
@@ -1713,12 +1689,12 @@ intel_sdvo_set_property(struct drm_connector *connector, | |||
1713 | 1689 | ||
1714 | intel_sdvo_connector->force_audio = val; | 1690 | intel_sdvo_connector->force_audio = val; |
1715 | 1691 | ||
1716 | if (val > 0 && intel_sdvo->has_audio) | 1692 | if (val > 0 && intel_sdvo->has_hdmi_audio) |
1717 | return 0; | 1693 | return 0; |
1718 | if (val < 0 && !intel_sdvo->has_audio) | 1694 | if (val < 0 && !intel_sdvo->has_hdmi_audio) |
1719 | return 0; | 1695 | return 0; |
1720 | 1696 | ||
1721 | intel_sdvo->has_audio = val > 0; | 1697 | intel_sdvo->has_hdmi_audio = val > 0; |
1722 | goto done; | 1698 | goto done; |
1723 | } | 1699 | } |
1724 | 1700 | ||
@@ -1942,28 +1918,18 @@ intel_sdvo_select_i2c_bus(struct drm_i915_private *dev_priv, | |||
1942 | speed = mapping->i2c_speed; | 1918 | speed = mapping->i2c_speed; |
1943 | } | 1919 | } |
1944 | 1920 | ||
1945 | sdvo->i2c = &dev_priv->gmbus[pin].adapter; | 1921 | if (pin < GMBUS_NUM_PORTS) { |
1946 | intel_gmbus_set_speed(sdvo->i2c, speed); | 1922 | sdvo->i2c = &dev_priv->gmbus[pin].adapter; |
1947 | intel_gmbus_force_bit(sdvo->i2c, true); | 1923 | intel_gmbus_set_speed(sdvo->i2c, speed); |
1924 | intel_gmbus_force_bit(sdvo->i2c, true); | ||
1925 | } else | ||
1926 | sdvo->i2c = &dev_priv->gmbus[GMBUS_PORT_DPB].adapter; | ||
1948 | } | 1927 | } |
1949 | 1928 | ||
1950 | static bool | 1929 | static bool |
1951 | intel_sdvo_is_hdmi_connector(struct intel_sdvo *intel_sdvo, int device) | 1930 | intel_sdvo_is_hdmi_connector(struct intel_sdvo *intel_sdvo, int device) |
1952 | { | 1931 | { |
1953 | int is_hdmi; | 1932 | return intel_sdvo_check_supp_encode(intel_sdvo); |
1954 | |||
1955 | if (!intel_sdvo_check_supp_encode(intel_sdvo)) | ||
1956 | return false; | ||
1957 | |||
1958 | if (!intel_sdvo_set_target_output(intel_sdvo, | ||
1959 | device == 0 ? SDVO_OUTPUT_TMDS0 : SDVO_OUTPUT_TMDS1)) | ||
1960 | return false; | ||
1961 | |||
1962 | is_hdmi = 0; | ||
1963 | if (!intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_ENCODE, &is_hdmi, 1)) | ||
1964 | return false; | ||
1965 | |||
1966 | return !!is_hdmi; | ||
1967 | } | 1933 | } |
1968 | 1934 | ||
1969 | static u8 | 1935 | static u8 |
@@ -2065,10 +2031,6 @@ intel_sdvo_dvi_init(struct intel_sdvo *intel_sdvo, int device) | |||
2065 | connector->connector_type = DRM_MODE_CONNECTOR_DVID; | 2031 | connector->connector_type = DRM_MODE_CONNECTOR_DVID; |
2066 | 2032 | ||
2067 | if (intel_sdvo_is_hdmi_connector(intel_sdvo, device)) { | 2033 | if (intel_sdvo_is_hdmi_connector(intel_sdvo, device)) { |
2068 | /* enable hdmi encoding mode if supported */ | ||
2069 | intel_sdvo_set_encode(intel_sdvo, SDVO_ENCODE_HDMI); | ||
2070 | intel_sdvo_set_colorimetry(intel_sdvo, | ||
2071 | SDVO_COLORIMETRY_RGB256); | ||
2072 | connector->connector_type = DRM_MODE_CONNECTOR_HDMIA; | 2034 | connector->connector_type = DRM_MODE_CONNECTOR_HDMIA; |
2073 | intel_sdvo->is_hdmi = true; | 2035 | intel_sdvo->is_hdmi = true; |
2074 | } | 2036 | } |
@@ -2076,8 +2038,8 @@ intel_sdvo_dvi_init(struct intel_sdvo *intel_sdvo, int device) | |||
2076 | (1 << INTEL_ANALOG_CLONE_BIT)); | 2038 | (1 << INTEL_ANALOG_CLONE_BIT)); |
2077 | 2039 | ||
2078 | intel_sdvo_connector_init(intel_sdvo_connector, intel_sdvo); | 2040 | intel_sdvo_connector_init(intel_sdvo_connector, intel_sdvo); |
2079 | 2041 | if (intel_sdvo->is_hdmi) | |
2080 | intel_sdvo_add_hdmi_properties(intel_sdvo_connector); | 2042 | intel_sdvo_add_hdmi_properties(intel_sdvo_connector); |
2081 | 2043 | ||
2082 | return true; | 2044 | return true; |
2083 | } | 2045 | } |
diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c index 2f7681989316..93206e4eaa6f 100644 --- a/drivers/gpu/drm/i915/intel_tv.c +++ b/drivers/gpu/drm/i915/intel_tv.c | |||
@@ -1245,10 +1245,11 @@ intel_tv_detect_type (struct intel_tv *intel_tv) | |||
1245 | int type; | 1245 | int type; |
1246 | 1246 | ||
1247 | /* Disable TV interrupts around load detect or we'll recurse */ | 1247 | /* Disable TV interrupts around load detect or we'll recurse */ |
1248 | spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags); | 1248 | spin_lock_irqsave(&dev_priv->irq_lock, irqflags); |
1249 | i915_disable_pipestat(dev_priv, 0, PIPE_HOTPLUG_INTERRUPT_ENABLE | | 1249 | i915_disable_pipestat(dev_priv, 0, |
1250 | PIPE_HOTPLUG_INTERRUPT_ENABLE | | ||
1250 | PIPE_HOTPLUG_TV_INTERRUPT_ENABLE); | 1251 | PIPE_HOTPLUG_TV_INTERRUPT_ENABLE); |
1251 | spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags); | 1252 | spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); |
1252 | 1253 | ||
1253 | save_tv_dac = tv_dac = I915_READ(TV_DAC); | 1254 | save_tv_dac = tv_dac = I915_READ(TV_DAC); |
1254 | save_tv_ctl = tv_ctl = I915_READ(TV_CTL); | 1255 | save_tv_ctl = tv_ctl = I915_READ(TV_CTL); |
@@ -1301,10 +1302,11 @@ intel_tv_detect_type (struct intel_tv *intel_tv) | |||
1301 | I915_WRITE(TV_CTL, save_tv_ctl); | 1302 | I915_WRITE(TV_CTL, save_tv_ctl); |
1302 | 1303 | ||
1303 | /* Restore interrupt config */ | 1304 | /* Restore interrupt config */ |
1304 | spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags); | 1305 | spin_lock_irqsave(&dev_priv->irq_lock, irqflags); |
1305 | i915_enable_pipestat(dev_priv, 0, PIPE_HOTPLUG_INTERRUPT_ENABLE | | 1306 | i915_enable_pipestat(dev_priv, 0, |
1307 | PIPE_HOTPLUG_INTERRUPT_ENABLE | | ||
1306 | PIPE_HOTPLUG_TV_INTERRUPT_ENABLE); | 1308 | PIPE_HOTPLUG_TV_INTERRUPT_ENABLE); |
1307 | spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags); | 1309 | spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); |
1308 | 1310 | ||
1309 | return type; | 1311 | return type; |
1310 | } | 1312 | } |
diff --git a/drivers/gpu/drm/mga/mga_dma.c b/drivers/gpu/drm/mga/mga_dma.c index 08868ac3048a..1e1eb1d7e971 100644 --- a/drivers/gpu/drm/mga/mga_dma.c +++ b/drivers/gpu/drm/mga/mga_dma.c | |||
@@ -703,7 +703,7 @@ static int mga_do_pci_dma_bootstrap(struct drm_device *dev, | |||
703 | static int mga_do_dma_bootstrap(struct drm_device *dev, | 703 | static int mga_do_dma_bootstrap(struct drm_device *dev, |
704 | drm_mga_dma_bootstrap_t *dma_bs) | 704 | drm_mga_dma_bootstrap_t *dma_bs) |
705 | { | 705 | { |
706 | const int is_agp = (dma_bs->agp_mode != 0) && drm_device_is_agp(dev); | 706 | const int is_agp = (dma_bs->agp_mode != 0) && drm_pci_device_is_agp(dev); |
707 | int err; | 707 | int err; |
708 | drm_mga_private_t *const dev_priv = | 708 | drm_mga_private_t *const dev_priv = |
709 | (drm_mga_private_t *) dev->dev_private; | 709 | (drm_mga_private_t *) dev->dev_private; |
diff --git a/drivers/gpu/drm/mga/mga_drv.c b/drivers/gpu/drm/mga/mga_drv.c index 0aaf5f67a436..42d31874edf2 100644 --- a/drivers/gpu/drm/mga/mga_drv.c +++ b/drivers/gpu/drm/mga/mga_drv.c | |||
@@ -75,10 +75,6 @@ static struct drm_driver driver = { | |||
75 | #endif | 75 | #endif |
76 | .llseek = noop_llseek, | 76 | .llseek = noop_llseek, |
77 | }, | 77 | }, |
78 | .pci_driver = { | ||
79 | .name = DRIVER_NAME, | ||
80 | .id_table = pciidlist, | ||
81 | }, | ||
82 | 78 | ||
83 | .name = DRIVER_NAME, | 79 | .name = DRIVER_NAME, |
84 | .desc = DRIVER_DESC, | 80 | .desc = DRIVER_DESC, |
@@ -88,15 +84,20 @@ static struct drm_driver driver = { | |||
88 | .patchlevel = DRIVER_PATCHLEVEL, | 84 | .patchlevel = DRIVER_PATCHLEVEL, |
89 | }; | 85 | }; |
90 | 86 | ||
87 | static struct pci_driver mga_pci_driver = { | ||
88 | .name = DRIVER_NAME, | ||
89 | .id_table = pciidlist, | ||
90 | }; | ||
91 | |||
91 | static int __init mga_init(void) | 92 | static int __init mga_init(void) |
92 | { | 93 | { |
93 | driver.num_ioctls = mga_max_ioctl; | 94 | driver.num_ioctls = mga_max_ioctl; |
94 | return drm_init(&driver); | 95 | return drm_pci_init(&driver, &mga_pci_driver); |
95 | } | 96 | } |
96 | 97 | ||
97 | static void __exit mga_exit(void) | 98 | static void __exit mga_exit(void) |
98 | { | 99 | { |
99 | drm_exit(&driver); | 100 | drm_pci_exit(&driver, &mga_pci_driver); |
100 | } | 101 | } |
101 | 102 | ||
102 | module_init(mga_init); | 103 | module_init(mga_init); |
diff --git a/drivers/gpu/drm/nouveau/Kconfig b/drivers/gpu/drm/nouveau/Kconfig index 21d6c29c2d21..de70959b9ed5 100644 --- a/drivers/gpu/drm/nouveau/Kconfig +++ b/drivers/gpu/drm/nouveau/Kconfig | |||
@@ -8,7 +8,7 @@ config DRM_NOUVEAU | |||
8 | select FB_CFB_COPYAREA | 8 | select FB_CFB_COPYAREA |
9 | select FB_CFB_IMAGEBLIT | 9 | select FB_CFB_IMAGEBLIT |
10 | select FB | 10 | select FB |
11 | select FRAMEBUFFER_CONSOLE if !EMBEDDED | 11 | select FRAMEBUFFER_CONSOLE if !EXPERT |
12 | select FB_BACKLIGHT if DRM_NOUVEAU_BACKLIGHT | 12 | select FB_BACKLIGHT if DRM_NOUVEAU_BACKLIGHT |
13 | select ACPI_VIDEO if ACPI && X86 && BACKLIGHT_CLASS_DEVICE && VIDEO_OUTPUT_CONTROL && INPUT | 13 | select ACPI_VIDEO if ACPI && X86 && BACKLIGHT_CLASS_DEVICE && VIDEO_OUTPUT_CONTROL && INPUT |
14 | help | 14 | help |
diff --git a/drivers/gpu/drm/nouveau/nouveau_acpi.c b/drivers/gpu/drm/nouveau/nouveau_acpi.c index 119152606e4c..a54238058dc5 100644 --- a/drivers/gpu/drm/nouveau/nouveau_acpi.c +++ b/drivers/gpu/drm/nouveau/nouveau_acpi.c | |||
@@ -130,10 +130,15 @@ static int nouveau_dsm_init(void) | |||
130 | 130 | ||
131 | static int nouveau_dsm_get_client_id(struct pci_dev *pdev) | 131 | static int nouveau_dsm_get_client_id(struct pci_dev *pdev) |
132 | { | 132 | { |
133 | if (nouveau_dsm_priv.dhandle == DEVICE_ACPI_HANDLE(&pdev->dev)) | 133 | /* easy option one - intel vendor ID means Integrated */ |
134 | if (pdev->vendor == PCI_VENDOR_ID_INTEL) | ||
134 | return VGA_SWITCHEROO_IGD; | 135 | return VGA_SWITCHEROO_IGD; |
135 | else | 136 | |
136 | return VGA_SWITCHEROO_DIS; | 137 | /* is this device on Bus 0? - this may need improving */ |
138 | if (pdev->bus->number == 0) | ||
139 | return VGA_SWITCHEROO_IGD; | ||
140 | |||
141 | return VGA_SWITCHEROO_DIS; | ||
137 | } | 142 | } |
138 | 143 | ||
139 | static struct vga_switcheroo_handler nouveau_dsm_handler = { | 144 | static struct vga_switcheroo_handler nouveau_dsm_handler = { |
diff --git a/drivers/gpu/drm/nouveau/nouveau_backlight.c b/drivers/gpu/drm/nouveau/nouveau_backlight.c index b14c81110575..d3a9c6e02477 100644 --- a/drivers/gpu/drm/nouveau/nouveau_backlight.c +++ b/drivers/gpu/drm/nouveau/nouveau_backlight.c | |||
@@ -59,7 +59,7 @@ static int nv40_set_intensity(struct backlight_device *bd) | |||
59 | return 0; | 59 | return 0; |
60 | } | 60 | } |
61 | 61 | ||
62 | static struct backlight_ops nv40_bl_ops = { | 62 | static const struct backlight_ops nv40_bl_ops = { |
63 | .options = BL_CORE_SUSPENDRESUME, | 63 | .options = BL_CORE_SUSPENDRESUME, |
64 | .get_brightness = nv40_get_intensity, | 64 | .get_brightness = nv40_get_intensity, |
65 | .update_status = nv40_set_intensity, | 65 | .update_status = nv40_set_intensity, |
@@ -82,7 +82,7 @@ static int nv50_set_intensity(struct backlight_device *bd) | |||
82 | return 0; | 82 | return 0; |
83 | } | 83 | } |
84 | 84 | ||
85 | static struct backlight_ops nv50_bl_ops = { | 85 | static const struct backlight_ops nv50_bl_ops = { |
86 | .options = BL_CORE_SUSPENDRESUME, | 86 | .options = BL_CORE_SUSPENDRESUME, |
87 | .get_brightness = nv50_get_intensity, | 87 | .get_brightness = nv50_get_intensity, |
88 | .update_status = nv50_set_intensity, | 88 | .update_status = nv50_set_intensity, |
diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c index 6faf3cfc74b9..6bdab891c64e 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bios.c +++ b/drivers/gpu/drm/nouveau/nouveau_bios.c | |||
@@ -1927,7 +1927,7 @@ init_ltime(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) | |||
1927 | * offset (8 bit): opcode | 1927 | * offset (8 bit): opcode |
1928 | * offset + 1 (16 bit): time | 1928 | * offset + 1 (16 bit): time |
1929 | * | 1929 | * |
1930 | * Sleep for "time" miliseconds. | 1930 | * Sleep for "time" milliseconds. |
1931 | */ | 1931 | */ |
1932 | 1932 | ||
1933 | unsigned time = ROM16(bios->data[offset + 1]); | 1933 | unsigned time = ROM16(bios->data[offset + 1]); |
@@ -1935,7 +1935,7 @@ init_ltime(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) | |||
1935 | if (!iexec->execute) | 1935 | if (!iexec->execute) |
1936 | return 3; | 1936 | return 3; |
1937 | 1937 | ||
1938 | BIOSLOG(bios, "0x%04X: Sleeping for 0x%04X miliseconds\n", | 1938 | BIOSLOG(bios, "0x%04X: Sleeping for 0x%04X milliseconds\n", |
1939 | offset, time); | 1939 | offset, time); |
1940 | 1940 | ||
1941 | msleep(time); | 1941 | msleep(time); |
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.c b/drivers/gpu/drm/nouveau/nouveau_drv.c index bb170570938b..155ebdcbf06f 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drv.c +++ b/drivers/gpu/drm/nouveau/nouveau_drv.c | |||
@@ -171,6 +171,9 @@ nouveau_pci_suspend(struct pci_dev *pdev, pm_message_t pm_state) | |||
171 | if (pm_state.event == PM_EVENT_PRETHAW) | 171 | if (pm_state.event == PM_EVENT_PRETHAW) |
172 | return 0; | 172 | return 0; |
173 | 173 | ||
174 | if (dev->switch_power_state == DRM_SWITCH_POWER_OFF) | ||
175 | return 0; | ||
176 | |||
174 | NV_INFO(dev, "Disabling fbcon acceleration...\n"); | 177 | NV_INFO(dev, "Disabling fbcon acceleration...\n"); |
175 | nouveau_fbcon_save_disable_accel(dev); | 178 | nouveau_fbcon_save_disable_accel(dev); |
176 | 179 | ||
@@ -231,9 +234,9 @@ nouveau_pci_suspend(struct pci_dev *pdev, pm_message_t pm_state) | |||
231 | pci_set_power_state(pdev, PCI_D3hot); | 234 | pci_set_power_state(pdev, PCI_D3hot); |
232 | } | 235 | } |
233 | 236 | ||
234 | acquire_console_sem(); | 237 | console_lock(); |
235 | nouveau_fbcon_set_suspend(dev, 1); | 238 | nouveau_fbcon_set_suspend(dev, 1); |
236 | release_console_sem(); | 239 | console_unlock(); |
237 | nouveau_fbcon_restore_accel(dev); | 240 | nouveau_fbcon_restore_accel(dev); |
238 | return 0; | 241 | return 0; |
239 | 242 | ||
@@ -254,6 +257,9 @@ nouveau_pci_resume(struct pci_dev *pdev) | |||
254 | struct drm_crtc *crtc; | 257 | struct drm_crtc *crtc; |
255 | int ret, i; | 258 | int ret, i; |
256 | 259 | ||
260 | if (dev->switch_power_state == DRM_SWITCH_POWER_OFF) | ||
261 | return 0; | ||
262 | |||
257 | nouveau_fbcon_save_disable_accel(dev); | 263 | nouveau_fbcon_save_disable_accel(dev); |
258 | 264 | ||
259 | NV_INFO(dev, "We're back, enabling device...\n"); | 265 | NV_INFO(dev, "We're back, enabling device...\n"); |
@@ -353,9 +359,9 @@ nouveau_pci_resume(struct pci_dev *pdev) | |||
353 | nv_crtc->lut.depth = 0; | 359 | nv_crtc->lut.depth = 0; |
354 | } | 360 | } |
355 | 361 | ||
356 | acquire_console_sem(); | 362 | console_lock(); |
357 | nouveau_fbcon_set_suspend(dev, 0); | 363 | nouveau_fbcon_set_suspend(dev, 0); |
358 | release_console_sem(); | 364 | console_unlock(); |
359 | 365 | ||
360 | nouveau_fbcon_zfill_all(dev); | 366 | nouveau_fbcon_zfill_all(dev); |
361 | 367 | ||
@@ -402,14 +408,6 @@ static struct drm_driver driver = { | |||
402 | #endif | 408 | #endif |
403 | .llseek = noop_llseek, | 409 | .llseek = noop_llseek, |
404 | }, | 410 | }, |
405 | .pci_driver = { | ||
406 | .name = DRIVER_NAME, | ||
407 | .id_table = pciidlist, | ||
408 | .probe = nouveau_pci_probe, | ||
409 | .remove = nouveau_pci_remove, | ||
410 | .suspend = nouveau_pci_suspend, | ||
411 | .resume = nouveau_pci_resume | ||
412 | }, | ||
413 | 411 | ||
414 | .gem_init_object = nouveau_gem_object_new, | 412 | .gem_init_object = nouveau_gem_object_new, |
415 | .gem_free_object = nouveau_gem_object_del, | 413 | .gem_free_object = nouveau_gem_object_del, |
@@ -426,6 +424,15 @@ static struct drm_driver driver = { | |||
426 | .patchlevel = DRIVER_PATCHLEVEL, | 424 | .patchlevel = DRIVER_PATCHLEVEL, |
427 | }; | 425 | }; |
428 | 426 | ||
427 | static struct pci_driver nouveau_pci_driver = { | ||
428 | .name = DRIVER_NAME, | ||
429 | .id_table = pciidlist, | ||
430 | .probe = nouveau_pci_probe, | ||
431 | .remove = nouveau_pci_remove, | ||
432 | .suspend = nouveau_pci_suspend, | ||
433 | .resume = nouveau_pci_resume | ||
434 | }; | ||
435 | |||
429 | static int __init nouveau_init(void) | 436 | static int __init nouveau_init(void) |
430 | { | 437 | { |
431 | driver.num_ioctls = nouveau_max_ioctl; | 438 | driver.num_ioctls = nouveau_max_ioctl; |
@@ -443,7 +450,7 @@ static int __init nouveau_init(void) | |||
443 | return 0; | 450 | return 0; |
444 | 451 | ||
445 | nouveau_register_dsm_handler(); | 452 | nouveau_register_dsm_handler(); |
446 | return drm_init(&driver); | 453 | return drm_pci_init(&driver, &nouveau_pci_driver); |
447 | } | 454 | } |
448 | 455 | ||
449 | static void __exit nouveau_exit(void) | 456 | static void __exit nouveau_exit(void) |
@@ -451,7 +458,7 @@ static void __exit nouveau_exit(void) | |||
451 | if (!nouveau_modeset) | 458 | if (!nouveau_modeset) |
452 | return; | 459 | return; |
453 | 460 | ||
454 | drm_exit(&driver); | 461 | drm_pci_exit(&driver, &nouveau_pci_driver); |
455 | nouveau_unregister_dsm_handler(); | 462 | nouveau_unregister_dsm_handler(); |
456 | } | 463 | } |
457 | 464 | ||
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h index ca707d18d06e..9821fcacc3d2 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drv.h +++ b/drivers/gpu/drm/nouveau/nouveau_drv.h | |||
@@ -754,6 +754,8 @@ struct drm_nouveau_private { | |||
754 | 754 | ||
755 | struct nouveau_fbdev *nfbdev; | 755 | struct nouveau_fbdev *nfbdev; |
756 | struct apertures_struct *apertures; | 756 | struct apertures_struct *apertures; |
757 | |||
758 | bool powered_down; | ||
757 | }; | 759 | }; |
758 | 760 | ||
759 | static inline struct drm_nouveau_private * | 761 | static inline struct drm_nouveau_private * |
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c index f988362c112e..60769d2f9a66 100644 --- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c +++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c | |||
@@ -362,10 +362,6 @@ nouveau_fbcon_create(struct nouveau_fbdev *nfbdev, | |||
362 | drm_fb_helper_fill_fix(info, fb->pitch, fb->depth); | 362 | drm_fb_helper_fill_fix(info, fb->pitch, fb->depth); |
363 | drm_fb_helper_fill_var(info, &nfbdev->helper, sizes->fb_width, sizes->fb_height); | 363 | drm_fb_helper_fill_var(info, &nfbdev->helper, sizes->fb_width, sizes->fb_height); |
364 | 364 | ||
365 | /* FIXME: we really shouldn't expose mmio space at all */ | ||
366 | info->fix.mmio_start = pci_resource_start(pdev, 1); | ||
367 | info->fix.mmio_len = pci_resource_len(pdev, 1); | ||
368 | |||
369 | /* Set aperture base/size for vesafb takeover */ | 365 | /* Set aperture base/size for vesafb takeover */ |
370 | info->apertures = dev_priv->apertures; | 366 | info->apertures = dev_priv->apertures; |
371 | if (!info->apertures) { | 367 | if (!info->apertures) { |
diff --git a/drivers/gpu/drm/nouveau/nouveau_mem.c b/drivers/gpu/drm/nouveau/nouveau_mem.c index 26347b7cd872..123969dd4f56 100644 --- a/drivers/gpu/drm/nouveau/nouveau_mem.c +++ b/drivers/gpu/drm/nouveau/nouveau_mem.c | |||
@@ -480,7 +480,7 @@ nouveau_mem_gart_init(struct drm_device *dev) | |||
480 | dev_priv->gart_info.type = NOUVEAU_GART_NONE; | 480 | dev_priv->gart_info.type = NOUVEAU_GART_NONE; |
481 | 481 | ||
482 | #if !defined(__powerpc__) && !defined(__ia64__) | 482 | #if !defined(__powerpc__) && !defined(__ia64__) |
483 | if (drm_device_is_agp(dev) && dev->agp && nouveau_agpmode) { | 483 | if (drm_pci_device_is_agp(dev) && dev->agp && nouveau_agpmode) { |
484 | ret = nouveau_mem_init_agp(dev); | 484 | ret = nouveau_mem_init_agp(dev); |
485 | if (ret) | 485 | if (ret) |
486 | NV_ERROR(dev, "Error initialising AGP: %d\n", ret); | 486 | NV_ERROR(dev, "Error initialising AGP: %d\n", ret); |
diff --git a/drivers/gpu/drm/nouveau/nouveau_object.c b/drivers/gpu/drm/nouveau/nouveau_object.c index 30b6544467ca..03adfe4c7665 100644 --- a/drivers/gpu/drm/nouveau/nouveau_object.c +++ b/drivers/gpu/drm/nouveau/nouveau_object.c | |||
@@ -909,7 +909,7 @@ nouveau_gpuobj_channel_takedown(struct nouveau_channel *chan) | |||
909 | nouveau_vm_ref(NULL, &chan->vm, chan->vm_pd); | 909 | nouveau_vm_ref(NULL, &chan->vm, chan->vm_pd); |
910 | nouveau_gpuobj_ref(NULL, &chan->vm_pd); | 910 | nouveau_gpuobj_ref(NULL, &chan->vm_pd); |
911 | 911 | ||
912 | if (chan->ramin_heap.free_stack.next) | 912 | if (drm_mm_initialized(&chan->ramin_heap)) |
913 | drm_mm_takedown(&chan->ramin_heap); | 913 | drm_mm_takedown(&chan->ramin_heap); |
914 | nouveau_gpuobj_ref(NULL, &chan->ramin); | 914 | nouveau_gpuobj_ref(NULL, &chan->ramin); |
915 | } | 915 | } |
diff --git a/drivers/gpu/drm/nouveau/nouveau_sgdma.c b/drivers/gpu/drm/nouveau/nouveau_sgdma.c index 9a250eb53098..07b115184b87 100644 --- a/drivers/gpu/drm/nouveau/nouveau_sgdma.c +++ b/drivers/gpu/drm/nouveau/nouveau_sgdma.c | |||
@@ -12,6 +12,7 @@ struct nouveau_sgdma_be { | |||
12 | struct drm_device *dev; | 12 | struct drm_device *dev; |
13 | 13 | ||
14 | dma_addr_t *pages; | 14 | dma_addr_t *pages; |
15 | bool *ttm_alloced; | ||
15 | unsigned nr_pages; | 16 | unsigned nr_pages; |
16 | 17 | ||
17 | u64 offset; | 18 | u64 offset; |
@@ -20,7 +21,8 @@ struct nouveau_sgdma_be { | |||
20 | 21 | ||
21 | static int | 22 | static int |
22 | nouveau_sgdma_populate(struct ttm_backend *be, unsigned long num_pages, | 23 | nouveau_sgdma_populate(struct ttm_backend *be, unsigned long num_pages, |
23 | struct page **pages, struct page *dummy_read_page) | 24 | struct page **pages, struct page *dummy_read_page, |
25 | dma_addr_t *dma_addrs) | ||
24 | { | 26 | { |
25 | struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be; | 27 | struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be; |
26 | struct drm_device *dev = nvbe->dev; | 28 | struct drm_device *dev = nvbe->dev; |
@@ -34,15 +36,25 @@ nouveau_sgdma_populate(struct ttm_backend *be, unsigned long num_pages, | |||
34 | if (!nvbe->pages) | 36 | if (!nvbe->pages) |
35 | return -ENOMEM; | 37 | return -ENOMEM; |
36 | 38 | ||
39 | nvbe->ttm_alloced = kmalloc(sizeof(bool) * num_pages, GFP_KERNEL); | ||
40 | if (!nvbe->ttm_alloced) | ||
41 | return -ENOMEM; | ||
42 | |||
37 | nvbe->nr_pages = 0; | 43 | nvbe->nr_pages = 0; |
38 | while (num_pages--) { | 44 | while (num_pages--) { |
39 | nvbe->pages[nvbe->nr_pages] = | 45 | if (dma_addrs[nvbe->nr_pages] != DMA_ERROR_CODE) { |
40 | pci_map_page(dev->pdev, pages[nvbe->nr_pages], 0, | 46 | nvbe->pages[nvbe->nr_pages] = |
47 | dma_addrs[nvbe->nr_pages]; | ||
48 | nvbe->ttm_alloced[nvbe->nr_pages] = true; | ||
49 | } else { | ||
50 | nvbe->pages[nvbe->nr_pages] = | ||
51 | pci_map_page(dev->pdev, pages[nvbe->nr_pages], 0, | ||
41 | PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); | 52 | PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); |
42 | if (pci_dma_mapping_error(dev->pdev, | 53 | if (pci_dma_mapping_error(dev->pdev, |
43 | nvbe->pages[nvbe->nr_pages])) { | 54 | nvbe->pages[nvbe->nr_pages])) { |
44 | be->func->clear(be); | 55 | be->func->clear(be); |
45 | return -EFAULT; | 56 | return -EFAULT; |
57 | } | ||
46 | } | 58 | } |
47 | 59 | ||
48 | nvbe->nr_pages++; | 60 | nvbe->nr_pages++; |
@@ -65,11 +77,14 @@ nouveau_sgdma_clear(struct ttm_backend *be) | |||
65 | be->func->unbind(be); | 77 | be->func->unbind(be); |
66 | 78 | ||
67 | while (nvbe->nr_pages--) { | 79 | while (nvbe->nr_pages--) { |
68 | pci_unmap_page(dev->pdev, nvbe->pages[nvbe->nr_pages], | 80 | if (!nvbe->ttm_alloced[nvbe->nr_pages]) |
81 | pci_unmap_page(dev->pdev, nvbe->pages[nvbe->nr_pages], | ||
69 | PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); | 82 | PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); |
70 | } | 83 | } |
71 | kfree(nvbe->pages); | 84 | kfree(nvbe->pages); |
85 | kfree(nvbe->ttm_alloced); | ||
72 | nvbe->pages = NULL; | 86 | nvbe->pages = NULL; |
87 | nvbe->ttm_alloced = NULL; | ||
73 | nvbe->nr_pages = 0; | 88 | nvbe->nr_pages = 0; |
74 | } | 89 | } |
75 | } | 90 | } |
diff --git a/drivers/gpu/drm/nouveau/nouveau_state.c b/drivers/gpu/drm/nouveau/nouveau_state.c index d5b17b6ccd3a..2148d01354da 100644 --- a/drivers/gpu/drm/nouveau/nouveau_state.c +++ b/drivers/gpu/drm/nouveau/nouveau_state.c | |||
@@ -596,15 +596,25 @@ static void nouveau_switcheroo_set_state(struct pci_dev *pdev, | |||
596 | pm_message_t pmm = { .event = PM_EVENT_SUSPEND }; | 596 | pm_message_t pmm = { .event = PM_EVENT_SUSPEND }; |
597 | if (state == VGA_SWITCHEROO_ON) { | 597 | if (state == VGA_SWITCHEROO_ON) { |
598 | printk(KERN_ERR "VGA switcheroo: switched nouveau on\n"); | 598 | printk(KERN_ERR "VGA switcheroo: switched nouveau on\n"); |
599 | dev->switch_power_state = DRM_SWITCH_POWER_CHANGING; | ||
599 | nouveau_pci_resume(pdev); | 600 | nouveau_pci_resume(pdev); |
600 | drm_kms_helper_poll_enable(dev); | 601 | drm_kms_helper_poll_enable(dev); |
602 | dev->switch_power_state = DRM_SWITCH_POWER_ON; | ||
601 | } else { | 603 | } else { |
602 | printk(KERN_ERR "VGA switcheroo: switched nouveau off\n"); | 604 | printk(KERN_ERR "VGA switcheroo: switched nouveau off\n"); |
605 | dev->switch_power_state = DRM_SWITCH_POWER_CHANGING; | ||
603 | drm_kms_helper_poll_disable(dev); | 606 | drm_kms_helper_poll_disable(dev); |
604 | nouveau_pci_suspend(pdev, pmm); | 607 | nouveau_pci_suspend(pdev, pmm); |
608 | dev->switch_power_state = DRM_SWITCH_POWER_OFF; | ||
605 | } | 609 | } |
606 | } | 610 | } |
607 | 611 | ||
612 | static void nouveau_switcheroo_reprobe(struct pci_dev *pdev) | ||
613 | { | ||
614 | struct drm_device *dev = pci_get_drvdata(pdev); | ||
615 | nouveau_fbcon_output_poll_changed(dev); | ||
616 | } | ||
617 | |||
608 | static bool nouveau_switcheroo_can_switch(struct pci_dev *pdev) | 618 | static bool nouveau_switcheroo_can_switch(struct pci_dev *pdev) |
609 | { | 619 | { |
610 | struct drm_device *dev = pci_get_drvdata(pdev); | 620 | struct drm_device *dev = pci_get_drvdata(pdev); |
@@ -625,6 +635,7 @@ nouveau_card_init(struct drm_device *dev) | |||
625 | 635 | ||
626 | vga_client_register(dev->pdev, dev, NULL, nouveau_vga_set_decode); | 636 | vga_client_register(dev->pdev, dev, NULL, nouveau_vga_set_decode); |
627 | vga_switcheroo_register_client(dev->pdev, nouveau_switcheroo_set_state, | 637 | vga_switcheroo_register_client(dev->pdev, nouveau_switcheroo_set_state, |
638 | nouveau_switcheroo_reprobe, | ||
628 | nouveau_switcheroo_can_switch); | 639 | nouveau_switcheroo_can_switch); |
629 | 640 | ||
630 | /* Initialise internal driver API hooks */ | 641 | /* Initialise internal driver API hooks */ |
@@ -1054,6 +1065,7 @@ err_out: | |||
1054 | 1065 | ||
1055 | void nouveau_lastclose(struct drm_device *dev) | 1066 | void nouveau_lastclose(struct drm_device *dev) |
1056 | { | 1067 | { |
1068 | vga_switcheroo_process_delayed_switch(); | ||
1057 | } | 1069 | } |
1058 | 1070 | ||
1059 | int nouveau_unload(struct drm_device *dev) | 1071 | int nouveau_unload(struct drm_device *dev) |
@@ -1091,9 +1103,9 @@ int nouveau_ioctl_getparam(struct drm_device *dev, void *data, | |||
1091 | getparam->value = dev->pci_device; | 1103 | getparam->value = dev->pci_device; |
1092 | break; | 1104 | break; |
1093 | case NOUVEAU_GETPARAM_BUS_TYPE: | 1105 | case NOUVEAU_GETPARAM_BUS_TYPE: |
1094 | if (drm_device_is_agp(dev)) | 1106 | if (drm_pci_device_is_agp(dev)) |
1095 | getparam->value = NV_AGP; | 1107 | getparam->value = NV_AGP; |
1096 | else if (drm_device_is_pcie(dev)) | 1108 | else if (drm_pci_device_is_pcie(dev)) |
1097 | getparam->value = NV_PCIE; | 1109 | getparam->value = NV_PCIE; |
1098 | else | 1110 | else |
1099 | getparam->value = NV_PCI; | 1111 | getparam->value = NV_PCI; |
diff --git a/drivers/gpu/drm/nouveau/nv17_tv.c b/drivers/gpu/drm/nouveau/nv17_tv.c index 28119fd19d03..3900cebba560 100644 --- a/drivers/gpu/drm/nouveau/nv17_tv.c +++ b/drivers/gpu/drm/nouveau/nv17_tv.c | |||
@@ -197,10 +197,12 @@ static int nv17_tv_get_ld_modes(struct drm_encoder *encoder, | |||
197 | struct drm_connector *connector) | 197 | struct drm_connector *connector) |
198 | { | 198 | { |
199 | struct nv17_tv_norm_params *tv_norm = get_tv_norm(encoder); | 199 | struct nv17_tv_norm_params *tv_norm = get_tv_norm(encoder); |
200 | struct drm_display_mode *mode, *tv_mode; | 200 | const struct drm_display_mode *tv_mode; |
201 | int n = 0; | 201 | int n = 0; |
202 | 202 | ||
203 | for (tv_mode = nv17_tv_modes; tv_mode->hdisplay; tv_mode++) { | 203 | for (tv_mode = nv17_tv_modes; tv_mode->hdisplay; tv_mode++) { |
204 | struct drm_display_mode *mode; | ||
205 | |||
204 | mode = drm_mode_duplicate(encoder->dev, tv_mode); | 206 | mode = drm_mode_duplicate(encoder->dev, tv_mode); |
205 | 207 | ||
206 | mode->clock = tv_norm->tv_enc_mode.vrefresh * | 208 | mode->clock = tv_norm->tv_enc_mode.vrefresh * |
diff --git a/drivers/gpu/drm/nouveau/nv17_tv.h b/drivers/gpu/drm/nouveau/nv17_tv.h index 6bf03840f9eb..622e72221682 100644 --- a/drivers/gpu/drm/nouveau/nv17_tv.h +++ b/drivers/gpu/drm/nouveau/nv17_tv.h | |||
@@ -112,7 +112,7 @@ extern struct nv17_tv_norm_params { | |||
112 | } nv17_tv_norms[NUM_TV_NORMS]; | 112 | } nv17_tv_norms[NUM_TV_NORMS]; |
113 | #define get_tv_norm(enc) (&nv17_tv_norms[to_tv_enc(enc)->tv_norm]) | 113 | #define get_tv_norm(enc) (&nv17_tv_norms[to_tv_enc(enc)->tv_norm]) |
114 | 114 | ||
115 | extern struct drm_display_mode nv17_tv_modes[]; | 115 | extern const struct drm_display_mode nv17_tv_modes[]; |
116 | 116 | ||
117 | static inline int interpolate(int y0, int y1, int y2, int x) | 117 | static inline int interpolate(int y0, int y1, int y2, int x) |
118 | { | 118 | { |
diff --git a/drivers/gpu/drm/nouveau/nv17_tv_modes.c b/drivers/gpu/drm/nouveau/nv17_tv_modes.c index 9d3893c50a41..4d1d29f60307 100644 --- a/drivers/gpu/drm/nouveau/nv17_tv_modes.c +++ b/drivers/gpu/drm/nouveau/nv17_tv_modes.c | |||
@@ -438,7 +438,7 @@ void nv17_tv_state_load(struct drm_device *dev, struct nv17_tv_state *state) | |||
438 | 438 | ||
439 | /* Timings similar to the ones the blob sets */ | 439 | /* Timings similar to the ones the blob sets */ |
440 | 440 | ||
441 | struct drm_display_mode nv17_tv_modes[] = { | 441 | const struct drm_display_mode nv17_tv_modes[] = { |
442 | { DRM_MODE("320x200", DRM_MODE_TYPE_DRIVER, 0, | 442 | { DRM_MODE("320x200", DRM_MODE_TYPE_DRIVER, 0, |
443 | 320, 344, 392, 560, 0, 200, 200, 202, 220, 0, | 443 | 320, 344, 392, 560, 0, 200, 200, 202, 220, 0, |
444 | DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC | 444 | DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
diff --git a/drivers/gpu/drm/nouveau/nv50_instmem.c b/drivers/gpu/drm/nouveau/nv50_instmem.c index ea0041810ae3..300285ae8e9e 100644 --- a/drivers/gpu/drm/nouveau/nv50_instmem.c +++ b/drivers/gpu/drm/nouveau/nv50_instmem.c | |||
@@ -56,7 +56,7 @@ nv50_channel_del(struct nouveau_channel **pchan) | |||
56 | nouveau_gpuobj_ref(NULL, &chan->ramfc); | 56 | nouveau_gpuobj_ref(NULL, &chan->ramfc); |
57 | nouveau_vm_ref(NULL, &chan->vm, chan->vm_pd); | 57 | nouveau_vm_ref(NULL, &chan->vm, chan->vm_pd); |
58 | nouveau_gpuobj_ref(NULL, &chan->vm_pd); | 58 | nouveau_gpuobj_ref(NULL, &chan->vm_pd); |
59 | if (chan->ramin_heap.free_stack.next) | 59 | if (drm_mm_initialized(&chan->ramin_heap)) |
60 | drm_mm_takedown(&chan->ramin_heap); | 60 | drm_mm_takedown(&chan->ramin_heap); |
61 | nouveau_gpuobj_ref(NULL, &chan->ramin); | 61 | nouveau_gpuobj_ref(NULL, &chan->ramin); |
62 | kfree(chan); | 62 | kfree(chan); |
@@ -259,7 +259,7 @@ nv50_instmem_takedown(struct drm_device *dev) | |||
259 | nouveau_gpuobj_ref(NULL, &dev_priv->bar3_vm->pgt[0].obj[0]); | 259 | nouveau_gpuobj_ref(NULL, &dev_priv->bar3_vm->pgt[0].obj[0]); |
260 | nouveau_vm_ref(NULL, &dev_priv->bar3_vm, NULL); | 260 | nouveau_vm_ref(NULL, &dev_priv->bar3_vm, NULL); |
261 | 261 | ||
262 | if (dev_priv->ramin_heap.free_stack.next) | 262 | if (drm_mm_initialized(&dev_priv->ramin_heap)) |
263 | drm_mm_takedown(&dev_priv->ramin_heap); | 263 | drm_mm_takedown(&dev_priv->ramin_heap); |
264 | 264 | ||
265 | dev_priv->engine.instmem.priv = NULL; | 265 | dev_priv->engine.instmem.priv = NULL; |
diff --git a/drivers/gpu/drm/nouveau/nvc0_instmem.c b/drivers/gpu/drm/nouveau/nvc0_instmem.c index c09091749054..82357d2df1f4 100644 --- a/drivers/gpu/drm/nouveau/nvc0_instmem.c +++ b/drivers/gpu/drm/nouveau/nvc0_instmem.c | |||
@@ -67,7 +67,7 @@ nvc0_channel_del(struct nouveau_channel **pchan) | |||
67 | return; | 67 | return; |
68 | 68 | ||
69 | nouveau_vm_ref(NULL, &chan->vm, NULL); | 69 | nouveau_vm_ref(NULL, &chan->vm, NULL); |
70 | if (chan->ramin_heap.free_stack.next) | 70 | if (drm_mm_initialized(&chan->ramin_heap)) |
71 | drm_mm_takedown(&chan->ramin_heap); | 71 | drm_mm_takedown(&chan->ramin_heap); |
72 | nouveau_gpuobj_ref(NULL, &chan->ramin); | 72 | nouveau_gpuobj_ref(NULL, &chan->ramin); |
73 | kfree(chan); | 73 | kfree(chan); |
diff --git a/drivers/gpu/drm/r128/r128_drv.c b/drivers/gpu/drm/r128/r128_drv.c index 18c3c71e41b1..b9e8efd2b754 100644 --- a/drivers/gpu/drm/r128/r128_drv.c +++ b/drivers/gpu/drm/r128/r128_drv.c | |||
@@ -71,10 +71,7 @@ static struct drm_driver driver = { | |||
71 | #endif | 71 | #endif |
72 | .llseek = noop_llseek, | 72 | .llseek = noop_llseek, |
73 | }, | 73 | }, |
74 | .pci_driver = { | 74 | |
75 | .name = DRIVER_NAME, | ||
76 | .id_table = pciidlist, | ||
77 | }, | ||
78 | 75 | ||
79 | .name = DRIVER_NAME, | 76 | .name = DRIVER_NAME, |
80 | .desc = DRIVER_DESC, | 77 | .desc = DRIVER_DESC, |
@@ -89,16 +86,21 @@ int r128_driver_load(struct drm_device *dev, unsigned long flags) | |||
89 | return drm_vblank_init(dev, 1); | 86 | return drm_vblank_init(dev, 1); |
90 | } | 87 | } |
91 | 88 | ||
89 | static struct pci_driver r128_pci_driver = { | ||
90 | .name = DRIVER_NAME, | ||
91 | .id_table = pciidlist, | ||
92 | }; | ||
93 | |||
92 | static int __init r128_init(void) | 94 | static int __init r128_init(void) |
93 | { | 95 | { |
94 | driver.num_ioctls = r128_max_ioctl; | 96 | driver.num_ioctls = r128_max_ioctl; |
95 | 97 | ||
96 | return drm_init(&driver); | 98 | return drm_pci_init(&driver, &r128_pci_driver); |
97 | } | 99 | } |
98 | 100 | ||
99 | static void __exit r128_exit(void) | 101 | static void __exit r128_exit(void) |
100 | { | 102 | { |
101 | drm_exit(&driver); | 103 | drm_pci_exit(&driver, &r128_pci_driver); |
102 | } | 104 | } |
103 | 105 | ||
104 | module_init(r128_init); | 106 | module_init(r128_init); |
diff --git a/drivers/gpu/drm/radeon/Makefile b/drivers/gpu/drm/radeon/Makefile index e97e6f842699..e47eecfc2df4 100644 --- a/drivers/gpu/drm/radeon/Makefile +++ b/drivers/gpu/drm/radeon/Makefile | |||
@@ -66,7 +66,7 @@ radeon-y += radeon_device.o radeon_asic.o radeon_kms.o \ | |||
66 | r200.o radeon_legacy_tv.o r600_cs.o r600_blit.o r600_blit_shaders.o \ | 66 | r200.o radeon_legacy_tv.o r600_cs.o r600_blit.o r600_blit_shaders.o \ |
67 | r600_blit_kms.o radeon_pm.o atombios_dp.o r600_audio.o r600_hdmi.o \ | 67 | r600_blit_kms.o radeon_pm.o atombios_dp.o r600_audio.o r600_hdmi.o \ |
68 | evergreen.o evergreen_cs.o evergreen_blit_shaders.o evergreen_blit_kms.o \ | 68 | evergreen.o evergreen_cs.o evergreen_blit_shaders.o evergreen_blit_kms.o \ |
69 | radeon_trace_points.o | 69 | radeon_trace_points.o ni.o |
70 | 70 | ||
71 | radeon-$(CONFIG_COMPAT) += radeon_ioc32.o | 71 | radeon-$(CONFIG_COMPAT) += radeon_ioc32.o |
72 | radeon-$(CONFIG_VGA_SWITCHEROO) += radeon_atpx_handler.o | 72 | radeon-$(CONFIG_VGA_SWITCHEROO) += radeon_atpx_handler.o |
diff --git a/drivers/gpu/drm/radeon/atom.c b/drivers/gpu/drm/radeon/atom.c index 8e421f644a54..258fa5e7a2d9 100644 --- a/drivers/gpu/drm/radeon/atom.c +++ b/drivers/gpu/drm/radeon/atom.c | |||
@@ -112,6 +112,7 @@ static uint32_t atom_iio_execute(struct atom_context *ctx, int base, | |||
112 | base += 3; | 112 | base += 3; |
113 | break; | 113 | break; |
114 | case ATOM_IIO_WRITE: | 114 | case ATOM_IIO_WRITE: |
115 | (void)ctx->card->ioreg_read(ctx->card, CU16(base + 1)); | ||
115 | ctx->card->ioreg_write(ctx->card, CU16(base + 1), temp); | 116 | ctx->card->ioreg_write(ctx->card, CU16(base + 1), temp); |
116 | base += 3; | 117 | base += 3; |
117 | break; | 118 | break; |
@@ -733,16 +734,16 @@ static void atom_op_jump(atom_exec_context *ctx, int *ptr, int arg) | |||
733 | static void atom_op_mask(atom_exec_context *ctx, int *ptr, int arg) | 734 | static void atom_op_mask(atom_exec_context *ctx, int *ptr, int arg) |
734 | { | 735 | { |
735 | uint8_t attr = U8((*ptr)++); | 736 | uint8_t attr = U8((*ptr)++); |
736 | uint32_t dst, src1, src2, saved; | 737 | uint32_t dst, mask, src, saved; |
737 | int dptr = *ptr; | 738 | int dptr = *ptr; |
738 | SDEBUG(" dst: "); | 739 | SDEBUG(" dst: "); |
739 | dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1); | 740 | dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1); |
740 | SDEBUG(" src1: "); | 741 | mask = atom_get_src_direct(ctx, ((attr >> 3) & 7), ptr); |
741 | src1 = atom_get_src_direct(ctx, ((attr >> 3) & 7), ptr); | 742 | SDEBUG(" mask: 0x%08x", mask); |
742 | SDEBUG(" src2: "); | 743 | SDEBUG(" src: "); |
743 | src2 = atom_get_src(ctx, attr, ptr); | 744 | src = atom_get_src(ctx, attr, ptr); |
744 | dst &= src1; | 745 | dst &= mask; |
745 | dst |= src2; | 746 | dst |= src; |
746 | SDEBUG(" dst: "); | 747 | SDEBUG(" dst: "); |
747 | atom_put_dst(ctx, arg, attr, &dptr, dst, saved); | 748 | atom_put_dst(ctx, arg, attr, &dptr, dst, saved); |
748 | } | 749 | } |
diff --git a/drivers/gpu/drm/radeon/atombios.h b/drivers/gpu/drm/radeon/atombios.h index 58a0cd02c0a2..04b269d14a59 100644 --- a/drivers/gpu/drm/radeon/atombios.h +++ b/drivers/gpu/drm/radeon/atombios.h | |||
@@ -1629,7 +1629,7 @@ typedef struct _GET_ENGINE_CLOCK_PARAMETERS | |||
1629 | typedef struct _READ_EDID_FROM_HW_I2C_DATA_PARAMETERS | 1629 | typedef struct _READ_EDID_FROM_HW_I2C_DATA_PARAMETERS |
1630 | { | 1630 | { |
1631 | USHORT usPrescale; //Ratio between Engine clock and I2C clock | 1631 | USHORT usPrescale; //Ratio between Engine clock and I2C clock |
1632 | USHORT usVRAMAddress; //Adress in Frame Buffer where to pace raw EDID | 1632 | USHORT usVRAMAddress; //Address in Frame Buffer where to pace raw EDID |
1633 | USHORT usStatus; //When use output: lower byte EDID checksum, high byte hardware status | 1633 | USHORT usStatus; //When use output: lower byte EDID checksum, high byte hardware status |
1634 | //WHen use input: lower byte as 'byte to read':currently limited to 128byte or 1byte | 1634 | //WHen use input: lower byte as 'byte to read':currently limited to 128byte or 1byte |
1635 | UCHAR ucSlaveAddr; //Read from which slave | 1635 | UCHAR ucSlaveAddr; //Read from which slave |
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c index df2b6f2b35f8..d56f08d3cbdc 100644 --- a/drivers/gpu/drm/radeon/atombios_crtc.c +++ b/drivers/gpu/drm/radeon/atombios_crtc.c | |||
@@ -253,7 +253,8 @@ void atombios_crtc_dpms(struct drm_crtc *crtc, int mode) | |||
253 | case DRM_MODE_DPMS_SUSPEND: | 253 | case DRM_MODE_DPMS_SUSPEND: |
254 | case DRM_MODE_DPMS_OFF: | 254 | case DRM_MODE_DPMS_OFF: |
255 | drm_vblank_pre_modeset(dev, radeon_crtc->crtc_id); | 255 | drm_vblank_pre_modeset(dev, radeon_crtc->crtc_id); |
256 | atombios_blank_crtc(crtc, ATOM_ENABLE); | 256 | if (radeon_crtc->enabled) |
257 | atombios_blank_crtc(crtc, ATOM_ENABLE); | ||
257 | if (ASIC_IS_DCE3(rdev)) | 258 | if (ASIC_IS_DCE3(rdev)) |
258 | atombios_enable_crtc_memreq(crtc, ATOM_DISABLE); | 259 | atombios_enable_crtc_memreq(crtc, ATOM_DISABLE); |
259 | atombios_enable_crtc(crtc, ATOM_DISABLE); | 260 | atombios_enable_crtc(crtc, ATOM_DISABLE); |
@@ -402,6 +403,7 @@ union atom_enable_ss { | |||
402 | ENABLE_LVDS_SS_PARAMETERS_V2 lvds_ss_2; | 403 | ENABLE_LVDS_SS_PARAMETERS_V2 lvds_ss_2; |
403 | ENABLE_SPREAD_SPECTRUM_ON_PPLL_PS_ALLOCATION v1; | 404 | ENABLE_SPREAD_SPECTRUM_ON_PPLL_PS_ALLOCATION v1; |
404 | ENABLE_SPREAD_SPECTRUM_ON_PPLL_V2 v2; | 405 | ENABLE_SPREAD_SPECTRUM_ON_PPLL_V2 v2; |
406 | ENABLE_SPREAD_SPECTRUM_ON_PPLL_V3 v3; | ||
405 | }; | 407 | }; |
406 | 408 | ||
407 | static void atombios_crtc_program_ss(struct drm_crtc *crtc, | 409 | static void atombios_crtc_program_ss(struct drm_crtc *crtc, |
@@ -416,7 +418,30 @@ static void atombios_crtc_program_ss(struct drm_crtc *crtc, | |||
416 | 418 | ||
417 | memset(&args, 0, sizeof(args)); | 419 | memset(&args, 0, sizeof(args)); |
418 | 420 | ||
419 | if (ASIC_IS_DCE4(rdev)) { | 421 | if (ASIC_IS_DCE5(rdev)) { |
422 | args.v3.usSpreadSpectrumAmountFrac = 0; | ||
423 | args.v3.ucSpreadSpectrumType = ss->type; | ||
424 | switch (pll_id) { | ||
425 | case ATOM_PPLL1: | ||
426 | args.v3.ucSpreadSpectrumType |= ATOM_PPLL_SS_TYPE_V3_P1PLL; | ||
427 | args.v3.usSpreadSpectrumAmount = ss->amount; | ||
428 | args.v3.usSpreadSpectrumStep = ss->step; | ||
429 | break; | ||
430 | case ATOM_PPLL2: | ||
431 | args.v3.ucSpreadSpectrumType |= ATOM_PPLL_SS_TYPE_V3_P2PLL; | ||
432 | args.v3.usSpreadSpectrumAmount = ss->amount; | ||
433 | args.v3.usSpreadSpectrumStep = ss->step; | ||
434 | break; | ||
435 | case ATOM_DCPLL: | ||
436 | args.v3.ucSpreadSpectrumType |= ATOM_PPLL_SS_TYPE_V3_DCPLL; | ||
437 | args.v3.usSpreadSpectrumAmount = 0; | ||
438 | args.v3.usSpreadSpectrumStep = 0; | ||
439 | break; | ||
440 | case ATOM_PPLL_INVALID: | ||
441 | return; | ||
442 | } | ||
443 | args.v2.ucEnable = enable; | ||
444 | } else if (ASIC_IS_DCE4(rdev)) { | ||
420 | args.v2.usSpreadSpectrumPercentage = cpu_to_le16(ss->percentage); | 445 | args.v2.usSpreadSpectrumPercentage = cpu_to_le16(ss->percentage); |
421 | args.v2.ucSpreadSpectrumType = ss->type; | 446 | args.v2.ucSpreadSpectrumType = ss->type; |
422 | switch (pll_id) { | 447 | switch (pll_id) { |
@@ -530,7 +555,8 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc, | |||
530 | dp_clock = dig_connector->dp_clock; | 555 | dp_clock = dig_connector->dp_clock; |
531 | } | 556 | } |
532 | } | 557 | } |
533 | 558 | /* this might work properly with the new pll algo */ | |
559 | #if 0 /* doesn't work properly on some laptops */ | ||
534 | /* use recommended ref_div for ss */ | 560 | /* use recommended ref_div for ss */ |
535 | if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) { | 561 | if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) { |
536 | if (ss_enabled) { | 562 | if (ss_enabled) { |
@@ -540,13 +566,18 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc, | |||
540 | } | 566 | } |
541 | } | 567 | } |
542 | } | 568 | } |
543 | 569 | #endif | |
544 | if (ASIC_IS_AVIVO(rdev)) { | 570 | if (ASIC_IS_AVIVO(rdev)) { |
545 | /* DVO wants 2x pixel clock if the DVO chip is in 12 bit mode */ | 571 | /* DVO wants 2x pixel clock if the DVO chip is in 12 bit mode */ |
546 | if (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1) | 572 | if (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1) |
547 | adjusted_clock = mode->clock * 2; | 573 | adjusted_clock = mode->clock * 2; |
548 | if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT)) | 574 | if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT)) |
549 | pll->flags |= RADEON_PLL_PREFER_CLOSEST_LOWER; | 575 | pll->flags |= RADEON_PLL_PREFER_CLOSEST_LOWER; |
576 | /* rv515 needs more testing with this option */ | ||
577 | if (rdev->family != CHIP_RV515) { | ||
578 | if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) | ||
579 | pll->flags |= RADEON_PLL_IS_LCD; | ||
580 | } | ||
550 | } else { | 581 | } else { |
551 | if (encoder->encoder_type != DRM_MODE_ENCODER_DAC) | 582 | if (encoder->encoder_type != DRM_MODE_ENCODER_DAC) |
552 | pll->flags |= RADEON_PLL_NO_ODD_POST_DIV; | 583 | pll->flags |= RADEON_PLL_NO_ODD_POST_DIV; |
@@ -581,14 +612,9 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc, | |||
581 | args.v1.usPixelClock = cpu_to_le16(mode->clock / 10); | 612 | args.v1.usPixelClock = cpu_to_le16(mode->clock / 10); |
582 | args.v1.ucTransmitterID = radeon_encoder->encoder_id; | 613 | args.v1.ucTransmitterID = radeon_encoder->encoder_id; |
583 | args.v1.ucEncodeMode = encoder_mode; | 614 | args.v1.ucEncodeMode = encoder_mode; |
584 | if (encoder_mode == ATOM_ENCODER_MODE_DP) { | 615 | if (ss_enabled) |
585 | if (ss_enabled) | ||
586 | args.v1.ucConfig |= | ||
587 | ADJUST_DISPLAY_CONFIG_SS_ENABLE; | ||
588 | } else if (encoder_mode == ATOM_ENCODER_MODE_LVDS) { | ||
589 | args.v1.ucConfig |= | 616 | args.v1.ucConfig |= |
590 | ADJUST_DISPLAY_CONFIG_SS_ENABLE; | 617 | ADJUST_DISPLAY_CONFIG_SS_ENABLE; |
591 | } | ||
592 | 618 | ||
593 | atom_execute_table(rdev->mode_info.atom_context, | 619 | atom_execute_table(rdev->mode_info.atom_context, |
594 | index, (uint32_t *)&args); | 620 | index, (uint32_t *)&args); |
@@ -599,12 +625,12 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc, | |||
599 | args.v3.sInput.ucTransmitterID = radeon_encoder->encoder_id; | 625 | args.v3.sInput.ucTransmitterID = radeon_encoder->encoder_id; |
600 | args.v3.sInput.ucEncodeMode = encoder_mode; | 626 | args.v3.sInput.ucEncodeMode = encoder_mode; |
601 | args.v3.sInput.ucDispPllConfig = 0; | 627 | args.v3.sInput.ucDispPllConfig = 0; |
628 | if (ss_enabled) | ||
629 | args.v3.sInput.ucDispPllConfig |= | ||
630 | DISPPLL_CONFIG_SS_ENABLE; | ||
602 | if (radeon_encoder->devices & (ATOM_DEVICE_DFP_SUPPORT)) { | 631 | if (radeon_encoder->devices & (ATOM_DEVICE_DFP_SUPPORT)) { |
603 | struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; | 632 | struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; |
604 | if (encoder_mode == ATOM_ENCODER_MODE_DP) { | 633 | if (encoder_mode == ATOM_ENCODER_MODE_DP) { |
605 | if (ss_enabled) | ||
606 | args.v3.sInput.ucDispPllConfig |= | ||
607 | DISPPLL_CONFIG_SS_ENABLE; | ||
608 | args.v3.sInput.ucDispPllConfig |= | 634 | args.v3.sInput.ucDispPllConfig |= |
609 | DISPPLL_CONFIG_COHERENT_MODE; | 635 | DISPPLL_CONFIG_COHERENT_MODE; |
610 | /* 16200 or 27000 */ | 636 | /* 16200 or 27000 */ |
@@ -624,18 +650,11 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc, | |||
624 | } | 650 | } |
625 | } else if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) { | 651 | } else if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) { |
626 | if (encoder_mode == ATOM_ENCODER_MODE_DP) { | 652 | if (encoder_mode == ATOM_ENCODER_MODE_DP) { |
627 | if (ss_enabled) | ||
628 | args.v3.sInput.ucDispPllConfig |= | ||
629 | DISPPLL_CONFIG_SS_ENABLE; | ||
630 | args.v3.sInput.ucDispPllConfig |= | 653 | args.v3.sInput.ucDispPllConfig |= |
631 | DISPPLL_CONFIG_COHERENT_MODE; | 654 | DISPPLL_CONFIG_COHERENT_MODE; |
632 | /* 16200 or 27000 */ | 655 | /* 16200 or 27000 */ |
633 | args.v3.sInput.usPixelClock = cpu_to_le16(dp_clock / 10); | 656 | args.v3.sInput.usPixelClock = cpu_to_le16(dp_clock / 10); |
634 | } else if (encoder_mode == ATOM_ENCODER_MODE_LVDS) { | 657 | } else if (encoder_mode != ATOM_ENCODER_MODE_LVDS) { |
635 | if (ss_enabled) | ||
636 | args.v3.sInput.ucDispPllConfig |= | ||
637 | DISPPLL_CONFIG_SS_ENABLE; | ||
638 | } else { | ||
639 | if (mode->clock > 165000) | 658 | if (mode->clock > 165000) |
640 | args.v3.sInput.ucDispPllConfig |= | 659 | args.v3.sInput.ucDispPllConfig |= |
641 | DISPPLL_CONFIG_DUAL_LINK; | 660 | DISPPLL_CONFIG_DUAL_LINK; |
@@ -672,9 +691,14 @@ union set_pixel_clock { | |||
672 | PIXEL_CLOCK_PARAMETERS_V2 v2; | 691 | PIXEL_CLOCK_PARAMETERS_V2 v2; |
673 | PIXEL_CLOCK_PARAMETERS_V3 v3; | 692 | PIXEL_CLOCK_PARAMETERS_V3 v3; |
674 | PIXEL_CLOCK_PARAMETERS_V5 v5; | 693 | PIXEL_CLOCK_PARAMETERS_V5 v5; |
694 | PIXEL_CLOCK_PARAMETERS_V6 v6; | ||
675 | }; | 695 | }; |
676 | 696 | ||
677 | static void atombios_crtc_set_dcpll(struct drm_crtc *crtc) | 697 | /* on DCE5, make sure the voltage is high enough to support the |
698 | * required disp clk. | ||
699 | */ | ||
700 | static void atombios_crtc_set_dcpll(struct drm_crtc *crtc, | ||
701 | u32 dispclk) | ||
678 | { | 702 | { |
679 | struct drm_device *dev = crtc->dev; | 703 | struct drm_device *dev = crtc->dev; |
680 | struct radeon_device *rdev = dev->dev_private; | 704 | struct radeon_device *rdev = dev->dev_private; |
@@ -697,9 +721,16 @@ static void atombios_crtc_set_dcpll(struct drm_crtc *crtc) | |||
697 | * SetPixelClock provides the dividers | 721 | * SetPixelClock provides the dividers |
698 | */ | 722 | */ |
699 | args.v5.ucCRTC = ATOM_CRTC_INVALID; | 723 | args.v5.ucCRTC = ATOM_CRTC_INVALID; |
700 | args.v5.usPixelClock = rdev->clock.default_dispclk; | 724 | args.v5.usPixelClock = dispclk; |
701 | args.v5.ucPpll = ATOM_DCPLL; | 725 | args.v5.ucPpll = ATOM_DCPLL; |
702 | break; | 726 | break; |
727 | case 6: | ||
728 | /* if the default dcpll clock is specified, | ||
729 | * SetPixelClock provides the dividers | ||
730 | */ | ||
731 | args.v6.ulDispEngClkFreq = dispclk; | ||
732 | args.v6.ucPpll = ATOM_DCPLL; | ||
733 | break; | ||
703 | default: | 734 | default: |
704 | DRM_ERROR("Unknown table version %d %d\n", frev, crev); | 735 | DRM_ERROR("Unknown table version %d %d\n", frev, crev); |
705 | return; | 736 | return; |
@@ -783,6 +814,18 @@ static void atombios_crtc_program_pll(struct drm_crtc *crtc, | |||
783 | args.v5.ucEncoderMode = encoder_mode; | 814 | args.v5.ucEncoderMode = encoder_mode; |
784 | args.v5.ucPpll = pll_id; | 815 | args.v5.ucPpll = pll_id; |
785 | break; | 816 | break; |
817 | case 6: | ||
818 | args.v6.ulCrtcPclkFreq.ucCRTC = crtc_id; | ||
819 | args.v6.ulCrtcPclkFreq.ulPixelClock = cpu_to_le32(clock / 10); | ||
820 | args.v6.ucRefDiv = ref_div; | ||
821 | args.v6.usFbDiv = cpu_to_le16(fb_div); | ||
822 | args.v6.ulFbDivDecFrac = cpu_to_le32(frac_fb_div * 100000); | ||
823 | args.v6.ucPostDiv = post_div; | ||
824 | args.v6.ucMiscInfo = 0; /* HDMI depth, etc. */ | ||
825 | args.v6.ucTransmitterID = encoder_id; | ||
826 | args.v6.ucEncoderMode = encoder_mode; | ||
827 | args.v6.ucPpll = pll_id; | ||
828 | break; | ||
786 | default: | 829 | default: |
787 | DRM_ERROR("Unknown table version %d %d\n", frev, crev); | 830 | DRM_ERROR("Unknown table version %d %d\n", frev, crev); |
788 | return; | 831 | return; |
@@ -914,8 +957,16 @@ static void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode | |||
914 | /* adjust pixel clock as needed */ | 957 | /* adjust pixel clock as needed */ |
915 | adjusted_clock = atombios_adjust_pll(crtc, mode, pll, ss_enabled, &ss); | 958 | adjusted_clock = atombios_adjust_pll(crtc, mode, pll, ss_enabled, &ss); |
916 | 959 | ||
917 | radeon_compute_pll(pll, adjusted_clock, &pll_clock, &fb_div, &frac_fb_div, | 960 | /* rv515 seems happier with the old algo */ |
918 | &ref_div, &post_div); | 961 | if (rdev->family == CHIP_RV515) |
962 | radeon_compute_pll_legacy(pll, adjusted_clock, &pll_clock, &fb_div, &frac_fb_div, | ||
963 | &ref_div, &post_div); | ||
964 | else if (ASIC_IS_AVIVO(rdev)) | ||
965 | radeon_compute_pll_avivo(pll, adjusted_clock, &pll_clock, &fb_div, &frac_fb_div, | ||
966 | &ref_div, &post_div); | ||
967 | else | ||
968 | radeon_compute_pll_legacy(pll, adjusted_clock, &pll_clock, &fb_div, &frac_fb_div, | ||
969 | &ref_div, &post_div); | ||
919 | 970 | ||
920 | atombios_crtc_program_ss(crtc, ATOM_DISABLE, radeon_crtc->pll_id, &ss); | 971 | atombios_crtc_program_ss(crtc, ATOM_DISABLE, radeon_crtc->pll_id, &ss); |
921 | 972 | ||
@@ -957,6 +1008,7 @@ static int evergreen_crtc_do_set_base(struct drm_crtc *crtc, | |||
957 | struct radeon_bo *rbo; | 1008 | struct radeon_bo *rbo; |
958 | uint64_t fb_location; | 1009 | uint64_t fb_location; |
959 | uint32_t fb_format, fb_pitch_pixels, tiling_flags; | 1010 | uint32_t fb_format, fb_pitch_pixels, tiling_flags; |
1011 | u32 fb_swap = EVERGREEN_GRPH_ENDIAN_SWAP(EVERGREEN_GRPH_ENDIAN_NONE); | ||
960 | int r; | 1012 | int r; |
961 | 1013 | ||
962 | /* no fb bound */ | 1014 | /* no fb bound */ |
@@ -978,7 +1030,7 @@ static int evergreen_crtc_do_set_base(struct drm_crtc *crtc, | |||
978 | * just update base pointers | 1030 | * just update base pointers |
979 | */ | 1031 | */ |
980 | obj = radeon_fb->obj; | 1032 | obj = radeon_fb->obj; |
981 | rbo = obj->driver_private; | 1033 | rbo = gem_to_radeon_bo(obj); |
982 | r = radeon_bo_reserve(rbo, false); | 1034 | r = radeon_bo_reserve(rbo, false); |
983 | if (unlikely(r != 0)) | 1035 | if (unlikely(r != 0)) |
984 | return r; | 1036 | return r; |
@@ -1008,11 +1060,17 @@ static int evergreen_crtc_do_set_base(struct drm_crtc *crtc, | |||
1008 | case 16: | 1060 | case 16: |
1009 | fb_format = (EVERGREEN_GRPH_DEPTH(EVERGREEN_GRPH_DEPTH_16BPP) | | 1061 | fb_format = (EVERGREEN_GRPH_DEPTH(EVERGREEN_GRPH_DEPTH_16BPP) | |
1010 | EVERGREEN_GRPH_FORMAT(EVERGREEN_GRPH_FORMAT_ARGB565)); | 1062 | EVERGREEN_GRPH_FORMAT(EVERGREEN_GRPH_FORMAT_ARGB565)); |
1063 | #ifdef __BIG_ENDIAN | ||
1064 | fb_swap = EVERGREEN_GRPH_ENDIAN_SWAP(EVERGREEN_GRPH_ENDIAN_8IN16); | ||
1065 | #endif | ||
1011 | break; | 1066 | break; |
1012 | case 24: | 1067 | case 24: |
1013 | case 32: | 1068 | case 32: |
1014 | fb_format = (EVERGREEN_GRPH_DEPTH(EVERGREEN_GRPH_DEPTH_32BPP) | | 1069 | fb_format = (EVERGREEN_GRPH_DEPTH(EVERGREEN_GRPH_DEPTH_32BPP) | |
1015 | EVERGREEN_GRPH_FORMAT(EVERGREEN_GRPH_FORMAT_ARGB8888)); | 1070 | EVERGREEN_GRPH_FORMAT(EVERGREEN_GRPH_FORMAT_ARGB8888)); |
1071 | #ifdef __BIG_ENDIAN | ||
1072 | fb_swap = EVERGREEN_GRPH_ENDIAN_SWAP(EVERGREEN_GRPH_ENDIAN_8IN32); | ||
1073 | #endif | ||
1016 | break; | 1074 | break; |
1017 | default: | 1075 | default: |
1018 | DRM_ERROR("Unsupported screen depth %d\n", | 1076 | DRM_ERROR("Unsupported screen depth %d\n", |
@@ -1057,6 +1115,7 @@ static int evergreen_crtc_do_set_base(struct drm_crtc *crtc, | |||
1057 | WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + radeon_crtc->crtc_offset, | 1115 | WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + radeon_crtc->crtc_offset, |
1058 | (u32) fb_location & EVERGREEN_GRPH_SURFACE_ADDRESS_MASK); | 1116 | (u32) fb_location & EVERGREEN_GRPH_SURFACE_ADDRESS_MASK); |
1059 | WREG32(EVERGREEN_GRPH_CONTROL + radeon_crtc->crtc_offset, fb_format); | 1117 | WREG32(EVERGREEN_GRPH_CONTROL + radeon_crtc->crtc_offset, fb_format); |
1118 | WREG32(EVERGREEN_GRPH_SWAP_CONTROL + radeon_crtc->crtc_offset, fb_swap); | ||
1060 | 1119 | ||
1061 | WREG32(EVERGREEN_GRPH_SURFACE_OFFSET_X + radeon_crtc->crtc_offset, 0); | 1120 | WREG32(EVERGREEN_GRPH_SURFACE_OFFSET_X + radeon_crtc->crtc_offset, 0); |
1062 | WREG32(EVERGREEN_GRPH_SURFACE_OFFSET_Y + radeon_crtc->crtc_offset, 0); | 1121 | WREG32(EVERGREEN_GRPH_SURFACE_OFFSET_Y + radeon_crtc->crtc_offset, 0); |
@@ -1086,7 +1145,7 @@ static int evergreen_crtc_do_set_base(struct drm_crtc *crtc, | |||
1086 | 1145 | ||
1087 | if (!atomic && fb && fb != crtc->fb) { | 1146 | if (!atomic && fb && fb != crtc->fb) { |
1088 | radeon_fb = to_radeon_framebuffer(fb); | 1147 | radeon_fb = to_radeon_framebuffer(fb); |
1089 | rbo = radeon_fb->obj->driver_private; | 1148 | rbo = gem_to_radeon_bo(radeon_fb->obj); |
1090 | r = radeon_bo_reserve(rbo, false); | 1149 | r = radeon_bo_reserve(rbo, false); |
1091 | if (unlikely(r != 0)) | 1150 | if (unlikely(r != 0)) |
1092 | return r; | 1151 | return r; |
@@ -1113,6 +1172,7 @@ static int avivo_crtc_do_set_base(struct drm_crtc *crtc, | |||
1113 | struct drm_framebuffer *target_fb; | 1172 | struct drm_framebuffer *target_fb; |
1114 | uint64_t fb_location; | 1173 | uint64_t fb_location; |
1115 | uint32_t fb_format, fb_pitch_pixels, tiling_flags; | 1174 | uint32_t fb_format, fb_pitch_pixels, tiling_flags; |
1175 | u32 fb_swap = R600_D1GRPH_SWAP_ENDIAN_NONE; | ||
1116 | int r; | 1176 | int r; |
1117 | 1177 | ||
1118 | /* no fb bound */ | 1178 | /* no fb bound */ |
@@ -1131,7 +1191,7 @@ static int avivo_crtc_do_set_base(struct drm_crtc *crtc, | |||
1131 | } | 1191 | } |
1132 | 1192 | ||
1133 | obj = radeon_fb->obj; | 1193 | obj = radeon_fb->obj; |
1134 | rbo = obj->driver_private; | 1194 | rbo = gem_to_radeon_bo(obj); |
1135 | r = radeon_bo_reserve(rbo, false); | 1195 | r = radeon_bo_reserve(rbo, false); |
1136 | if (unlikely(r != 0)) | 1196 | if (unlikely(r != 0)) |
1137 | return r; | 1197 | return r; |
@@ -1166,12 +1226,18 @@ static int avivo_crtc_do_set_base(struct drm_crtc *crtc, | |||
1166 | fb_format = | 1226 | fb_format = |
1167 | AVIVO_D1GRPH_CONTROL_DEPTH_16BPP | | 1227 | AVIVO_D1GRPH_CONTROL_DEPTH_16BPP | |
1168 | AVIVO_D1GRPH_CONTROL_16BPP_RGB565; | 1228 | AVIVO_D1GRPH_CONTROL_16BPP_RGB565; |
1229 | #ifdef __BIG_ENDIAN | ||
1230 | fb_swap = R600_D1GRPH_SWAP_ENDIAN_16BIT; | ||
1231 | #endif | ||
1169 | break; | 1232 | break; |
1170 | case 24: | 1233 | case 24: |
1171 | case 32: | 1234 | case 32: |
1172 | fb_format = | 1235 | fb_format = |
1173 | AVIVO_D1GRPH_CONTROL_DEPTH_32BPP | | 1236 | AVIVO_D1GRPH_CONTROL_DEPTH_32BPP | |
1174 | AVIVO_D1GRPH_CONTROL_32BPP_ARGB8888; | 1237 | AVIVO_D1GRPH_CONTROL_32BPP_ARGB8888; |
1238 | #ifdef __BIG_ENDIAN | ||
1239 | fb_swap = R600_D1GRPH_SWAP_ENDIAN_32BIT; | ||
1240 | #endif | ||
1175 | break; | 1241 | break; |
1176 | default: | 1242 | default: |
1177 | DRM_ERROR("Unsupported screen depth %d\n", | 1243 | DRM_ERROR("Unsupported screen depth %d\n", |
@@ -1211,6 +1277,8 @@ static int avivo_crtc_do_set_base(struct drm_crtc *crtc, | |||
1211 | WREG32(AVIVO_D1GRPH_SECONDARY_SURFACE_ADDRESS + | 1277 | WREG32(AVIVO_D1GRPH_SECONDARY_SURFACE_ADDRESS + |
1212 | radeon_crtc->crtc_offset, (u32) fb_location); | 1278 | radeon_crtc->crtc_offset, (u32) fb_location); |
1213 | WREG32(AVIVO_D1GRPH_CONTROL + radeon_crtc->crtc_offset, fb_format); | 1279 | WREG32(AVIVO_D1GRPH_CONTROL + radeon_crtc->crtc_offset, fb_format); |
1280 | if (rdev->family >= CHIP_R600) | ||
1281 | WREG32(R600_D1GRPH_SWAP_CONTROL + radeon_crtc->crtc_offset, fb_swap); | ||
1214 | 1282 | ||
1215 | WREG32(AVIVO_D1GRPH_SURFACE_OFFSET_X + radeon_crtc->crtc_offset, 0); | 1283 | WREG32(AVIVO_D1GRPH_SURFACE_OFFSET_X + radeon_crtc->crtc_offset, 0); |
1216 | WREG32(AVIVO_D1GRPH_SURFACE_OFFSET_Y + radeon_crtc->crtc_offset, 0); | 1284 | WREG32(AVIVO_D1GRPH_SURFACE_OFFSET_Y + radeon_crtc->crtc_offset, 0); |
@@ -1240,7 +1308,7 @@ static int avivo_crtc_do_set_base(struct drm_crtc *crtc, | |||
1240 | 1308 | ||
1241 | if (!atomic && fb && fb != crtc->fb) { | 1309 | if (!atomic && fb && fb != crtc->fb) { |
1242 | radeon_fb = to_radeon_framebuffer(fb); | 1310 | radeon_fb = to_radeon_framebuffer(fb); |
1243 | rbo = radeon_fb->obj->driver_private; | 1311 | rbo = gem_to_radeon_bo(radeon_fb->obj); |
1244 | r = radeon_bo_reserve(rbo, false); | 1312 | r = radeon_bo_reserve(rbo, false); |
1245 | if (unlikely(r != 0)) | 1313 | if (unlikely(r != 0)) |
1246 | return r; | 1314 | return r; |
@@ -1376,7 +1444,8 @@ int atombios_crtc_mode_set(struct drm_crtc *crtc, | |||
1376 | rdev->clock.default_dispclk); | 1444 | rdev->clock.default_dispclk); |
1377 | if (ss_enabled) | 1445 | if (ss_enabled) |
1378 | atombios_crtc_program_ss(crtc, ATOM_DISABLE, ATOM_DCPLL, &ss); | 1446 | atombios_crtc_program_ss(crtc, ATOM_DISABLE, ATOM_DCPLL, &ss); |
1379 | atombios_crtc_set_dcpll(crtc); | 1447 | /* XXX: DCE5, make sure voltage, dispclk is high enough */ |
1448 | atombios_crtc_set_dcpll(crtc, rdev->clock.default_dispclk); | ||
1380 | if (ss_enabled) | 1449 | if (ss_enabled) |
1381 | atombios_crtc_program_ss(crtc, ATOM_ENABLE, ATOM_DCPLL, &ss); | 1450 | atombios_crtc_program_ss(crtc, ATOM_ENABLE, ATOM_DCPLL, &ss); |
1382 | } | 1451 | } |
diff --git a/drivers/gpu/drm/radeon/atombios_dp.c b/drivers/gpu/drm/radeon/atombios_dp.c index 4e7778d44b8d..695de9a38506 100644 --- a/drivers/gpu/drm/radeon/atombios_dp.c +++ b/drivers/gpu/drm/radeon/atombios_dp.c | |||
@@ -187,9 +187,9 @@ static int dp_link_clock_for_mode_clock(u8 dpcd[DP_DPCD_SIZE], int mode_clock) | |||
187 | int dp_mode_valid(u8 dpcd[DP_DPCD_SIZE], int mode_clock) | 187 | int dp_mode_valid(u8 dpcd[DP_DPCD_SIZE], int mode_clock) |
188 | { | 188 | { |
189 | int lanes = dp_lanes_for_mode_clock(dpcd, mode_clock); | 189 | int lanes = dp_lanes_for_mode_clock(dpcd, mode_clock); |
190 | int bw = dp_lanes_for_mode_clock(dpcd, mode_clock); | 190 | int dp_clock = dp_link_clock_for_mode_clock(dpcd, mode_clock); |
191 | 191 | ||
192 | if ((lanes == 0) || (bw == 0)) | 192 | if ((lanes == 0) || (dp_clock == 0)) |
193 | return MODE_CLOCK_HIGH; | 193 | return MODE_CLOCK_HIGH; |
194 | 194 | ||
195 | return MODE_OK; | 195 | return MODE_OK; |
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c index f7d7477daffb..ffdc8332b76e 100644 --- a/drivers/gpu/drm/radeon/evergreen.c +++ b/drivers/gpu/drm/radeon/evergreen.c | |||
@@ -39,6 +39,7 @@ | |||
39 | 39 | ||
40 | static void evergreen_gpu_init(struct radeon_device *rdev); | 40 | static void evergreen_gpu_init(struct radeon_device *rdev); |
41 | void evergreen_fini(struct radeon_device *rdev); | 41 | void evergreen_fini(struct radeon_device *rdev); |
42 | static void evergreen_pcie_gen2_enable(struct radeon_device *rdev); | ||
42 | 43 | ||
43 | void evergreen_pre_page_flip(struct radeon_device *rdev, int crtc) | 44 | void evergreen_pre_page_flip(struct radeon_device *rdev, int crtc) |
44 | { | 45 | { |
@@ -96,26 +97,29 @@ u32 evergreen_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base) | |||
96 | } | 97 | } |
97 | 98 | ||
98 | /* get temperature in millidegrees */ | 99 | /* get temperature in millidegrees */ |
99 | u32 evergreen_get_temp(struct radeon_device *rdev) | 100 | int evergreen_get_temp(struct radeon_device *rdev) |
100 | { | 101 | { |
101 | u32 temp = (RREG32(CG_MULT_THERMAL_STATUS) & ASIC_T_MASK) >> | 102 | u32 temp = (RREG32(CG_MULT_THERMAL_STATUS) & ASIC_T_MASK) >> |
102 | ASIC_T_SHIFT; | 103 | ASIC_T_SHIFT; |
103 | u32 actual_temp = 0; | 104 | u32 actual_temp = 0; |
104 | 105 | ||
105 | if ((temp >> 10) & 1) | 106 | if (temp & 0x400) |
106 | actual_temp = 0; | 107 | actual_temp = -256; |
107 | else if ((temp >> 9) & 1) | 108 | else if (temp & 0x200) |
108 | actual_temp = 255; | 109 | actual_temp = 255; |
109 | else | 110 | else if (temp & 0x100) { |
110 | actual_temp = (temp >> 1) & 0xff; | 111 | actual_temp = temp & 0x1ff; |
112 | actual_temp |= ~0x1ff; | ||
113 | } else | ||
114 | actual_temp = temp & 0xff; | ||
111 | 115 | ||
112 | return actual_temp * 1000; | 116 | return (actual_temp * 1000) / 2; |
113 | } | 117 | } |
114 | 118 | ||
115 | u32 sumo_get_temp(struct radeon_device *rdev) | 119 | int sumo_get_temp(struct radeon_device *rdev) |
116 | { | 120 | { |
117 | u32 temp = RREG32(CG_THERMAL_STATUS) & 0xff; | 121 | u32 temp = RREG32(CG_THERMAL_STATUS) & 0xff; |
118 | u32 actual_temp = (temp >> 1) & 0xff; | 122 | int actual_temp = temp - 49; |
119 | 123 | ||
120 | return actual_temp * 1000; | 124 | return actual_temp * 1000; |
121 | } | 125 | } |
@@ -400,16 +404,28 @@ static u32 evergreen_line_buffer_adjust(struct radeon_device *rdev, | |||
400 | case 0: | 404 | case 0: |
401 | case 4: | 405 | case 4: |
402 | default: | 406 | default: |
403 | return 3840 * 2; | 407 | if (ASIC_IS_DCE5(rdev)) |
408 | return 4096 * 2; | ||
409 | else | ||
410 | return 3840 * 2; | ||
404 | case 1: | 411 | case 1: |
405 | case 5: | 412 | case 5: |
406 | return 5760 * 2; | 413 | if (ASIC_IS_DCE5(rdev)) |
414 | return 6144 * 2; | ||
415 | else | ||
416 | return 5760 * 2; | ||
407 | case 2: | 417 | case 2: |
408 | case 6: | 418 | case 6: |
409 | return 7680 * 2; | 419 | if (ASIC_IS_DCE5(rdev)) |
420 | return 8192 * 2; | ||
421 | else | ||
422 | return 7680 * 2; | ||
410 | case 3: | 423 | case 3: |
411 | case 7: | 424 | case 7: |
412 | return 1920 * 2; | 425 | if (ASIC_IS_DCE5(rdev)) |
426 | return 2048 * 2; | ||
427 | else | ||
428 | return 1920 * 2; | ||
413 | } | 429 | } |
414 | } | 430 | } |
415 | 431 | ||
@@ -811,6 +827,8 @@ void evergreen_pcie_gart_tlb_flush(struct radeon_device *rdev) | |||
811 | unsigned i; | 827 | unsigned i; |
812 | u32 tmp; | 828 | u32 tmp; |
813 | 829 | ||
830 | WREG32(HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1); | ||
831 | |||
814 | WREG32(VM_CONTEXT0_REQUEST_RESPONSE, REQUEST_TYPE(1)); | 832 | WREG32(VM_CONTEXT0_REQUEST_RESPONSE, REQUEST_TYPE(1)); |
815 | for (i = 0; i < rdev->usec_timeout; i++) { | 833 | for (i = 0; i < rdev->usec_timeout; i++) { |
816 | /* read MC_STATUS */ | 834 | /* read MC_STATUS */ |
@@ -1144,7 +1162,7 @@ static void evergreen_mc_program(struct radeon_device *rdev) | |||
1144 | tmp |= ((rdev->mc.vram_start >> 24) & 0xFFFF); | 1162 | tmp |= ((rdev->mc.vram_start >> 24) & 0xFFFF); |
1145 | WREG32(MC_VM_FB_LOCATION, tmp); | 1163 | WREG32(MC_VM_FB_LOCATION, tmp); |
1146 | WREG32(HDP_NONSURFACE_BASE, (rdev->mc.vram_start >> 8)); | 1164 | WREG32(HDP_NONSURFACE_BASE, (rdev->mc.vram_start >> 8)); |
1147 | WREG32(HDP_NONSURFACE_INFO, (2 << 7)); | 1165 | WREG32(HDP_NONSURFACE_INFO, (2 << 7) | (1 << 30)); |
1148 | WREG32(HDP_NONSURFACE_SIZE, 0x3FFFFFFF); | 1166 | WREG32(HDP_NONSURFACE_SIZE, 0x3FFFFFFF); |
1149 | if (rdev->flags & RADEON_IS_AGP) { | 1167 | if (rdev->flags & RADEON_IS_AGP) { |
1150 | WREG32(MC_VM_AGP_TOP, rdev->mc.gtt_end >> 16); | 1168 | WREG32(MC_VM_AGP_TOP, rdev->mc.gtt_end >> 16); |
@@ -1167,6 +1185,18 @@ static void evergreen_mc_program(struct radeon_device *rdev) | |||
1167 | /* | 1185 | /* |
1168 | * CP. | 1186 | * CP. |
1169 | */ | 1187 | */ |
1188 | void evergreen_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib) | ||
1189 | { | ||
1190 | /* set to DX10/11 mode */ | ||
1191 | radeon_ring_write(rdev, PACKET3(PACKET3_MODE_CONTROL, 0)); | ||
1192 | radeon_ring_write(rdev, 1); | ||
1193 | /* FIXME: implement */ | ||
1194 | radeon_ring_write(rdev, PACKET3(PACKET3_INDIRECT_BUFFER, 2)); | ||
1195 | radeon_ring_write(rdev, ib->gpu_addr & 0xFFFFFFFC); | ||
1196 | radeon_ring_write(rdev, upper_32_bits(ib->gpu_addr) & 0xFF); | ||
1197 | radeon_ring_write(rdev, ib->length_dw); | ||
1198 | } | ||
1199 | |||
1170 | 1200 | ||
1171 | static int evergreen_cp_load_microcode(struct radeon_device *rdev) | 1201 | static int evergreen_cp_load_microcode(struct radeon_device *rdev) |
1172 | { | 1202 | { |
@@ -1218,7 +1248,7 @@ static int evergreen_cp_start(struct radeon_device *rdev) | |||
1218 | cp_me = 0xff; | 1248 | cp_me = 0xff; |
1219 | WREG32(CP_ME_CNTL, cp_me); | 1249 | WREG32(CP_ME_CNTL, cp_me); |
1220 | 1250 | ||
1221 | r = radeon_ring_lock(rdev, evergreen_default_size + 15); | 1251 | r = radeon_ring_lock(rdev, evergreen_default_size + 19); |
1222 | if (r) { | 1252 | if (r) { |
1223 | DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r); | 1253 | DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r); |
1224 | return r; | 1254 | return r; |
@@ -1251,6 +1281,11 @@ static int evergreen_cp_start(struct radeon_device *rdev) | |||
1251 | radeon_ring_write(rdev, 0xffffffff); | 1281 | radeon_ring_write(rdev, 0xffffffff); |
1252 | radeon_ring_write(rdev, 0xffffffff); | 1282 | radeon_ring_write(rdev, 0xffffffff); |
1253 | 1283 | ||
1284 | radeon_ring_write(rdev, 0xc0026900); | ||
1285 | radeon_ring_write(rdev, 0x00000316); | ||
1286 | radeon_ring_write(rdev, 0x0000000e); /* VGT_VERTEX_REUSE_BLOCK_CNTL */ | ||
1287 | radeon_ring_write(rdev, 0x00000010); /* */ | ||
1288 | |||
1254 | radeon_ring_unlock_commit(rdev); | 1289 | radeon_ring_unlock_commit(rdev); |
1255 | 1290 | ||
1256 | return 0; | 1291 | return 0; |
@@ -1369,11 +1404,14 @@ static u32 evergreen_get_tile_pipe_to_backend_map(struct radeon_device *rdev, | |||
1369 | case CHIP_CEDAR: | 1404 | case CHIP_CEDAR: |
1370 | case CHIP_REDWOOD: | 1405 | case CHIP_REDWOOD: |
1371 | case CHIP_PALM: | 1406 | case CHIP_PALM: |
1407 | case CHIP_TURKS: | ||
1408 | case CHIP_CAICOS: | ||
1372 | force_no_swizzle = false; | 1409 | force_no_swizzle = false; |
1373 | break; | 1410 | break; |
1374 | case CHIP_CYPRESS: | 1411 | case CHIP_CYPRESS: |
1375 | case CHIP_HEMLOCK: | 1412 | case CHIP_HEMLOCK: |
1376 | case CHIP_JUNIPER: | 1413 | case CHIP_JUNIPER: |
1414 | case CHIP_BARTS: | ||
1377 | default: | 1415 | default: |
1378 | force_no_swizzle = true; | 1416 | force_no_swizzle = true; |
1379 | break; | 1417 | break; |
@@ -1487,6 +1525,7 @@ static void evergreen_program_channel_remap(struct radeon_device *rdev) | |||
1487 | switch (rdev->family) { | 1525 | switch (rdev->family) { |
1488 | case CHIP_HEMLOCK: | 1526 | case CHIP_HEMLOCK: |
1489 | case CHIP_CYPRESS: | 1527 | case CHIP_CYPRESS: |
1528 | case CHIP_BARTS: | ||
1490 | tcp_chan_steer_lo = 0x54763210; | 1529 | tcp_chan_steer_lo = 0x54763210; |
1491 | tcp_chan_steer_hi = 0x0000ba98; | 1530 | tcp_chan_steer_hi = 0x0000ba98; |
1492 | break; | 1531 | break; |
@@ -1494,6 +1533,8 @@ static void evergreen_program_channel_remap(struct radeon_device *rdev) | |||
1494 | case CHIP_REDWOOD: | 1533 | case CHIP_REDWOOD: |
1495 | case CHIP_CEDAR: | 1534 | case CHIP_CEDAR: |
1496 | case CHIP_PALM: | 1535 | case CHIP_PALM: |
1536 | case CHIP_TURKS: | ||
1537 | case CHIP_CAICOS: | ||
1497 | default: | 1538 | default: |
1498 | tcp_chan_steer_lo = 0x76543210; | 1539 | tcp_chan_steer_lo = 0x76543210; |
1499 | tcp_chan_steer_hi = 0x0000ba98; | 1540 | tcp_chan_steer_hi = 0x0000ba98; |
@@ -1637,6 +1678,69 @@ static void evergreen_gpu_init(struct radeon_device *rdev) | |||
1637 | rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30; | 1678 | rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30; |
1638 | rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130; | 1679 | rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130; |
1639 | break; | 1680 | break; |
1681 | case CHIP_BARTS: | ||
1682 | rdev->config.evergreen.num_ses = 2; | ||
1683 | rdev->config.evergreen.max_pipes = 4; | ||
1684 | rdev->config.evergreen.max_tile_pipes = 8; | ||
1685 | rdev->config.evergreen.max_simds = 7; | ||
1686 | rdev->config.evergreen.max_backends = 4 * rdev->config.evergreen.num_ses; | ||
1687 | rdev->config.evergreen.max_gprs = 256; | ||
1688 | rdev->config.evergreen.max_threads = 248; | ||
1689 | rdev->config.evergreen.max_gs_threads = 32; | ||
1690 | rdev->config.evergreen.max_stack_entries = 512; | ||
1691 | rdev->config.evergreen.sx_num_of_sets = 4; | ||
1692 | rdev->config.evergreen.sx_max_export_size = 256; | ||
1693 | rdev->config.evergreen.sx_max_export_pos_size = 64; | ||
1694 | rdev->config.evergreen.sx_max_export_smx_size = 192; | ||
1695 | rdev->config.evergreen.max_hw_contexts = 8; | ||
1696 | rdev->config.evergreen.sq_num_cf_insts = 2; | ||
1697 | |||
1698 | rdev->config.evergreen.sc_prim_fifo_size = 0x100; | ||
1699 | rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30; | ||
1700 | rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130; | ||
1701 | break; | ||
1702 | case CHIP_TURKS: | ||
1703 | rdev->config.evergreen.num_ses = 1; | ||
1704 | rdev->config.evergreen.max_pipes = 4; | ||
1705 | rdev->config.evergreen.max_tile_pipes = 4; | ||
1706 | rdev->config.evergreen.max_simds = 6; | ||
1707 | rdev->config.evergreen.max_backends = 2 * rdev->config.evergreen.num_ses; | ||
1708 | rdev->config.evergreen.max_gprs = 256; | ||
1709 | rdev->config.evergreen.max_threads = 248; | ||
1710 | rdev->config.evergreen.max_gs_threads = 32; | ||
1711 | rdev->config.evergreen.max_stack_entries = 256; | ||
1712 | rdev->config.evergreen.sx_num_of_sets = 4; | ||
1713 | rdev->config.evergreen.sx_max_export_size = 256; | ||
1714 | rdev->config.evergreen.sx_max_export_pos_size = 64; | ||
1715 | rdev->config.evergreen.sx_max_export_smx_size = 192; | ||
1716 | rdev->config.evergreen.max_hw_contexts = 8; | ||
1717 | rdev->config.evergreen.sq_num_cf_insts = 2; | ||
1718 | |||
1719 | rdev->config.evergreen.sc_prim_fifo_size = 0x100; | ||
1720 | rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30; | ||
1721 | rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130; | ||
1722 | break; | ||
1723 | case CHIP_CAICOS: | ||
1724 | rdev->config.evergreen.num_ses = 1; | ||
1725 | rdev->config.evergreen.max_pipes = 4; | ||
1726 | rdev->config.evergreen.max_tile_pipes = 2; | ||
1727 | rdev->config.evergreen.max_simds = 2; | ||
1728 | rdev->config.evergreen.max_backends = 1 * rdev->config.evergreen.num_ses; | ||
1729 | rdev->config.evergreen.max_gprs = 256; | ||
1730 | rdev->config.evergreen.max_threads = 192; | ||
1731 | rdev->config.evergreen.max_gs_threads = 16; | ||
1732 | rdev->config.evergreen.max_stack_entries = 256; | ||
1733 | rdev->config.evergreen.sx_num_of_sets = 4; | ||
1734 | rdev->config.evergreen.sx_max_export_size = 128; | ||
1735 | rdev->config.evergreen.sx_max_export_pos_size = 32; | ||
1736 | rdev->config.evergreen.sx_max_export_smx_size = 96; | ||
1737 | rdev->config.evergreen.max_hw_contexts = 4; | ||
1738 | rdev->config.evergreen.sq_num_cf_insts = 1; | ||
1739 | |||
1740 | rdev->config.evergreen.sc_prim_fifo_size = 0x40; | ||
1741 | rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30; | ||
1742 | rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130; | ||
1743 | break; | ||
1640 | } | 1744 | } |
1641 | 1745 | ||
1642 | /* Initialize HDP */ | 1746 | /* Initialize HDP */ |
@@ -1778,6 +1882,7 @@ static void evergreen_gpu_init(struct radeon_device *rdev) | |||
1778 | switch (rdev->family) { | 1882 | switch (rdev->family) { |
1779 | case CHIP_CYPRESS: | 1883 | case CHIP_CYPRESS: |
1780 | case CHIP_HEMLOCK: | 1884 | case CHIP_HEMLOCK: |
1885 | case CHIP_BARTS: | ||
1781 | gb_backend_map = 0x66442200; | 1886 | gb_backend_map = 0x66442200; |
1782 | break; | 1887 | break; |
1783 | case CHIP_JUNIPER: | 1888 | case CHIP_JUNIPER: |
@@ -1916,6 +2021,7 @@ static void evergreen_gpu_init(struct radeon_device *rdev) | |||
1916 | switch (rdev->family) { | 2021 | switch (rdev->family) { |
1917 | case CHIP_CEDAR: | 2022 | case CHIP_CEDAR: |
1918 | case CHIP_PALM: | 2023 | case CHIP_PALM: |
2024 | case CHIP_CAICOS: | ||
1919 | /* no vertex cache */ | 2025 | /* no vertex cache */ |
1920 | sq_config &= ~VC_ENABLE; | 2026 | sq_config &= ~VC_ENABLE; |
1921 | break; | 2027 | break; |
@@ -1975,6 +2081,7 @@ static void evergreen_gpu_init(struct radeon_device *rdev) | |||
1975 | switch (rdev->family) { | 2081 | switch (rdev->family) { |
1976 | case CHIP_CEDAR: | 2082 | case CHIP_CEDAR: |
1977 | case CHIP_PALM: | 2083 | case CHIP_PALM: |
2084 | case CHIP_CAICOS: | ||
1978 | vgt_cache_invalidation = CACHE_INVALIDATION(TC_ONLY); | 2085 | vgt_cache_invalidation = CACHE_INVALIDATION(TC_ONLY); |
1979 | break; | 2086 | break; |
1980 | default: | 2087 | default: |
@@ -1985,6 +2092,7 @@ static void evergreen_gpu_init(struct radeon_device *rdev) | |||
1985 | WREG32(VGT_CACHE_INVALIDATION, vgt_cache_invalidation); | 2092 | WREG32(VGT_CACHE_INVALIDATION, vgt_cache_invalidation); |
1986 | 2093 | ||
1987 | WREG32(VGT_GS_VERTEX_REUSE, 16); | 2094 | WREG32(VGT_GS_VERTEX_REUSE, 16); |
2095 | WREG32(PA_SU_LINE_STIPPLE_VALUE, 0); | ||
1988 | WREG32(PA_SC_LINE_STIPPLE_STATE, 0); | 2096 | WREG32(PA_SC_LINE_STIPPLE_STATE, 0); |
1989 | 2097 | ||
1990 | WREG32(VGT_VERTEX_REUSE_BLOCK_CNTL, 14); | 2098 | WREG32(VGT_VERTEX_REUSE_BLOCK_CNTL, 14); |
@@ -2083,16 +2191,40 @@ int evergreen_mc_init(struct radeon_device *rdev) | |||
2083 | 2191 | ||
2084 | bool evergreen_gpu_is_lockup(struct radeon_device *rdev) | 2192 | bool evergreen_gpu_is_lockup(struct radeon_device *rdev) |
2085 | { | 2193 | { |
2086 | /* FIXME: implement for evergreen */ | 2194 | u32 srbm_status; |
2087 | return false; | 2195 | u32 grbm_status; |
2196 | u32 grbm_status_se0, grbm_status_se1; | ||
2197 | struct r100_gpu_lockup *lockup = &rdev->config.evergreen.lockup; | ||
2198 | int r; | ||
2199 | |||
2200 | srbm_status = RREG32(SRBM_STATUS); | ||
2201 | grbm_status = RREG32(GRBM_STATUS); | ||
2202 | grbm_status_se0 = RREG32(GRBM_STATUS_SE0); | ||
2203 | grbm_status_se1 = RREG32(GRBM_STATUS_SE1); | ||
2204 | if (!(grbm_status & GUI_ACTIVE)) { | ||
2205 | r100_gpu_lockup_update(lockup, &rdev->cp); | ||
2206 | return false; | ||
2207 | } | ||
2208 | /* force CP activities */ | ||
2209 | r = radeon_ring_lock(rdev, 2); | ||
2210 | if (!r) { | ||
2211 | /* PACKET2 NOP */ | ||
2212 | radeon_ring_write(rdev, 0x80000000); | ||
2213 | radeon_ring_write(rdev, 0x80000000); | ||
2214 | radeon_ring_unlock_commit(rdev); | ||
2215 | } | ||
2216 | rdev->cp.rptr = RREG32(CP_RB_RPTR); | ||
2217 | return r100_gpu_cp_is_lockup(rdev, lockup, &rdev->cp); | ||
2088 | } | 2218 | } |
2089 | 2219 | ||
2090 | static int evergreen_gpu_soft_reset(struct radeon_device *rdev) | 2220 | static int evergreen_gpu_soft_reset(struct radeon_device *rdev) |
2091 | { | 2221 | { |
2092 | struct evergreen_mc_save save; | 2222 | struct evergreen_mc_save save; |
2093 | u32 srbm_reset = 0; | ||
2094 | u32 grbm_reset = 0; | 2223 | u32 grbm_reset = 0; |
2095 | 2224 | ||
2225 | if (!(RREG32(GRBM_STATUS) & GUI_ACTIVE)) | ||
2226 | return 0; | ||
2227 | |||
2096 | dev_info(rdev->dev, "GPU softreset \n"); | 2228 | dev_info(rdev->dev, "GPU softreset \n"); |
2097 | dev_info(rdev->dev, " GRBM_STATUS=0x%08X\n", | 2229 | dev_info(rdev->dev, " GRBM_STATUS=0x%08X\n", |
2098 | RREG32(GRBM_STATUS)); | 2230 | RREG32(GRBM_STATUS)); |
@@ -2129,16 +2261,6 @@ static int evergreen_gpu_soft_reset(struct radeon_device *rdev) | |||
2129 | udelay(50); | 2261 | udelay(50); |
2130 | WREG32(GRBM_SOFT_RESET, 0); | 2262 | WREG32(GRBM_SOFT_RESET, 0); |
2131 | (void)RREG32(GRBM_SOFT_RESET); | 2263 | (void)RREG32(GRBM_SOFT_RESET); |
2132 | |||
2133 | /* reset all the system blocks */ | ||
2134 | srbm_reset = SRBM_SOFT_RESET_ALL_MASK; | ||
2135 | |||
2136 | dev_info(rdev->dev, " SRBM_SOFT_RESET=0x%08X\n", srbm_reset); | ||
2137 | WREG32(SRBM_SOFT_RESET, srbm_reset); | ||
2138 | (void)RREG32(SRBM_SOFT_RESET); | ||
2139 | udelay(50); | ||
2140 | WREG32(SRBM_SOFT_RESET, 0); | ||
2141 | (void)RREG32(SRBM_SOFT_RESET); | ||
2142 | /* Wait a little for things to settle down */ | 2264 | /* Wait a little for things to settle down */ |
2143 | udelay(50); | 2265 | udelay(50); |
2144 | dev_info(rdev->dev, " GRBM_STATUS=0x%08X\n", | 2266 | dev_info(rdev->dev, " GRBM_STATUS=0x%08X\n", |
@@ -2149,10 +2271,6 @@ static int evergreen_gpu_soft_reset(struct radeon_device *rdev) | |||
2149 | RREG32(GRBM_STATUS_SE1)); | 2271 | RREG32(GRBM_STATUS_SE1)); |
2150 | dev_info(rdev->dev, " SRBM_STATUS=0x%08X\n", | 2272 | dev_info(rdev->dev, " SRBM_STATUS=0x%08X\n", |
2151 | RREG32(SRBM_STATUS)); | 2273 | RREG32(SRBM_STATUS)); |
2152 | /* After reset we need to reinit the asic as GPU often endup in an | ||
2153 | * incoherent state. | ||
2154 | */ | ||
2155 | atom_asic_init(rdev->mode_info.atom_context); | ||
2156 | evergreen_mc_resume(rdev, &save); | 2274 | evergreen_mc_resume(rdev, &save); |
2157 | return 0; | 2275 | return 0; |
2158 | } | 2276 | } |
@@ -2747,7 +2865,7 @@ restart_ih: | |||
2747 | if (wptr != rdev->ih.wptr) | 2865 | if (wptr != rdev->ih.wptr) |
2748 | goto restart_ih; | 2866 | goto restart_ih; |
2749 | if (queue_hotplug) | 2867 | if (queue_hotplug) |
2750 | queue_work(rdev->wq, &rdev->hotplug_work); | 2868 | schedule_work(&rdev->hotplug_work); |
2751 | rdev->ih.rptr = rptr; | 2869 | rdev->ih.rptr = rptr; |
2752 | WREG32(IH_RB_RPTR, rdev->ih.rptr); | 2870 | WREG32(IH_RB_RPTR, rdev->ih.rptr); |
2753 | spin_unlock_irqrestore(&rdev->ih.lock, flags); | 2871 | spin_unlock_irqrestore(&rdev->ih.lock, flags); |
@@ -2758,12 +2876,31 @@ static int evergreen_startup(struct radeon_device *rdev) | |||
2758 | { | 2876 | { |
2759 | int r; | 2877 | int r; |
2760 | 2878 | ||
2761 | if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) { | 2879 | /* enable pcie gen2 link */ |
2762 | r = r600_init_microcode(rdev); | 2880 | if (!ASIC_IS_DCE5(rdev)) |
2881 | evergreen_pcie_gen2_enable(rdev); | ||
2882 | |||
2883 | if (ASIC_IS_DCE5(rdev)) { | ||
2884 | if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw || !rdev->mc_fw) { | ||
2885 | r = ni_init_microcode(rdev); | ||
2886 | if (r) { | ||
2887 | DRM_ERROR("Failed to load firmware!\n"); | ||
2888 | return r; | ||
2889 | } | ||
2890 | } | ||
2891 | r = btc_mc_load_microcode(rdev); | ||
2763 | if (r) { | 2892 | if (r) { |
2764 | DRM_ERROR("Failed to load firmware!\n"); | 2893 | DRM_ERROR("Failed to load MC firmware!\n"); |
2765 | return r; | 2894 | return r; |
2766 | } | 2895 | } |
2896 | } else { | ||
2897 | if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) { | ||
2898 | r = r600_init_microcode(rdev); | ||
2899 | if (r) { | ||
2900 | DRM_ERROR("Failed to load firmware!\n"); | ||
2901 | return r; | ||
2902 | } | ||
2903 | } | ||
2767 | } | 2904 | } |
2768 | 2905 | ||
2769 | evergreen_mc_program(rdev); | 2906 | evergreen_mc_program(rdev); |
@@ -2782,6 +2919,11 @@ static int evergreen_startup(struct radeon_device *rdev) | |||
2782 | rdev->asic->copy = NULL; | 2919 | rdev->asic->copy = NULL; |
2783 | dev_warn(rdev->dev, "failed blitter (%d) falling back to memcpy\n", r); | 2920 | dev_warn(rdev->dev, "failed blitter (%d) falling back to memcpy\n", r); |
2784 | } | 2921 | } |
2922 | /* XXX: ontario has problems blitting to gart at the moment */ | ||
2923 | if (rdev->family == CHIP_PALM) { | ||
2924 | rdev->asic->copy = NULL; | ||
2925 | rdev->mc.active_vram_size = rdev->mc.visible_vram_size; | ||
2926 | } | ||
2785 | 2927 | ||
2786 | /* allocate wb buffer */ | 2928 | /* allocate wb buffer */ |
2787 | r = radeon_wb_init(rdev); | 2929 | r = radeon_wb_init(rdev); |
@@ -2814,6 +2956,11 @@ int evergreen_resume(struct radeon_device *rdev) | |||
2814 | { | 2956 | { |
2815 | int r; | 2957 | int r; |
2816 | 2958 | ||
2959 | /* reset the asic, the gfx blocks are often in a bad state | ||
2960 | * after the driver is unloaded or after a resume | ||
2961 | */ | ||
2962 | if (radeon_asic_reset(rdev)) | ||
2963 | dev_warn(rdev->dev, "GPU reset failed !\n"); | ||
2817 | /* Do not reset GPU before posting, on rv770 hw unlike on r500 hw, | 2964 | /* Do not reset GPU before posting, on rv770 hw unlike on r500 hw, |
2818 | * posting will perform necessary task to bring back GPU into good | 2965 | * posting will perform necessary task to bring back GPU into good |
2819 | * shape. | 2966 | * shape. |
@@ -2879,31 +3026,6 @@ int evergreen_copy_blit(struct radeon_device *rdev, | |||
2879 | return 0; | 3026 | return 0; |
2880 | } | 3027 | } |
2881 | 3028 | ||
2882 | static bool evergreen_card_posted(struct radeon_device *rdev) | ||
2883 | { | ||
2884 | u32 reg; | ||
2885 | |||
2886 | /* first check CRTCs */ | ||
2887 | if (rdev->flags & RADEON_IS_IGP) | ||
2888 | reg = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET) | | ||
2889 | RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET); | ||
2890 | else | ||
2891 | reg = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET) | | ||
2892 | RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET) | | ||
2893 | RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET) | | ||
2894 | RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET) | | ||
2895 | RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET) | | ||
2896 | RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET); | ||
2897 | if (reg & EVERGREEN_CRTC_MASTER_EN) | ||
2898 | return true; | ||
2899 | |||
2900 | /* then check MEM_SIZE, in case the crtcs are off */ | ||
2901 | if (RREG32(CONFIG_MEMSIZE)) | ||
2902 | return true; | ||
2903 | |||
2904 | return false; | ||
2905 | } | ||
2906 | |||
2907 | /* Plan is to move initialization in that function and use | 3029 | /* Plan is to move initialization in that function and use |
2908 | * helper function so that radeon_device_init pretty much | 3030 | * helper function so that radeon_device_init pretty much |
2909 | * do nothing more than calling asic specific function. This | 3031 | * do nothing more than calling asic specific function. This |
@@ -2934,8 +3056,13 @@ int evergreen_init(struct radeon_device *rdev) | |||
2934 | r = radeon_atombios_init(rdev); | 3056 | r = radeon_atombios_init(rdev); |
2935 | if (r) | 3057 | if (r) |
2936 | return r; | 3058 | return r; |
3059 | /* reset the asic, the gfx blocks are often in a bad state | ||
3060 | * after the driver is unloaded or after a resume | ||
3061 | */ | ||
3062 | if (radeon_asic_reset(rdev)) | ||
3063 | dev_warn(rdev->dev, "GPU reset failed !\n"); | ||
2937 | /* Post card if necessary */ | 3064 | /* Post card if necessary */ |
2938 | if (!evergreen_card_posted(rdev)) { | 3065 | if (!radeon_card_posted(rdev)) { |
2939 | if (!rdev->bios) { | 3066 | if (!rdev->bios) { |
2940 | dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n"); | 3067 | dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n"); |
2941 | return -EINVAL; | 3068 | return -EINVAL; |
@@ -3025,3 +3152,55 @@ void evergreen_fini(struct radeon_device *rdev) | |||
3025 | rdev->bios = NULL; | 3152 | rdev->bios = NULL; |
3026 | radeon_dummy_page_fini(rdev); | 3153 | radeon_dummy_page_fini(rdev); |
3027 | } | 3154 | } |
3155 | |||
3156 | static void evergreen_pcie_gen2_enable(struct radeon_device *rdev) | ||
3157 | { | ||
3158 | u32 link_width_cntl, speed_cntl; | ||
3159 | |||
3160 | if (radeon_pcie_gen2 == 0) | ||
3161 | return; | ||
3162 | |||
3163 | if (rdev->flags & RADEON_IS_IGP) | ||
3164 | return; | ||
3165 | |||
3166 | if (!(rdev->flags & RADEON_IS_PCIE)) | ||
3167 | return; | ||
3168 | |||
3169 | /* x2 cards have a special sequence */ | ||
3170 | if (ASIC_IS_X2(rdev)) | ||
3171 | return; | ||
3172 | |||
3173 | speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL); | ||
3174 | if ((speed_cntl & LC_OTHER_SIDE_EVER_SENT_GEN2) || | ||
3175 | (speed_cntl & LC_OTHER_SIDE_SUPPORTS_GEN2)) { | ||
3176 | |||
3177 | link_width_cntl = RREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL); | ||
3178 | link_width_cntl &= ~LC_UPCONFIGURE_DIS; | ||
3179 | WREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl); | ||
3180 | |||
3181 | speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL); | ||
3182 | speed_cntl &= ~LC_TARGET_LINK_SPEED_OVERRIDE_EN; | ||
3183 | WREG32_PCIE_P(PCIE_LC_SPEED_CNTL, speed_cntl); | ||
3184 | |||
3185 | speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL); | ||
3186 | speed_cntl |= LC_CLR_FAILED_SPD_CHANGE_CNT; | ||
3187 | WREG32_PCIE_P(PCIE_LC_SPEED_CNTL, speed_cntl); | ||
3188 | |||
3189 | speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL); | ||
3190 | speed_cntl &= ~LC_CLR_FAILED_SPD_CHANGE_CNT; | ||
3191 | WREG32_PCIE_P(PCIE_LC_SPEED_CNTL, speed_cntl); | ||
3192 | |||
3193 | speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL); | ||
3194 | speed_cntl |= LC_GEN2_EN_STRAP; | ||
3195 | WREG32_PCIE_P(PCIE_LC_SPEED_CNTL, speed_cntl); | ||
3196 | |||
3197 | } else { | ||
3198 | link_width_cntl = RREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL); | ||
3199 | /* XXX: only disable it if gen1 bridge vendor == 0x111d or 0x1106 */ | ||
3200 | if (1) | ||
3201 | link_width_cntl |= LC_UPCONFIGURE_DIS; | ||
3202 | else | ||
3203 | link_width_cntl &= ~LC_UPCONFIGURE_DIS; | ||
3204 | WREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl); | ||
3205 | } | ||
3206 | } | ||
diff --git a/drivers/gpu/drm/radeon/evergreen_blit_kms.c b/drivers/gpu/drm/radeon/evergreen_blit_kms.c index 2ccd1f0545fe..2ed930e02f3a 100644 --- a/drivers/gpu/drm/radeon/evergreen_blit_kms.c +++ b/drivers/gpu/drm/radeon/evergreen_blit_kms.c | |||
@@ -148,7 +148,8 @@ set_vtx_resource(struct radeon_device *rdev, u64 gpu_addr) | |||
148 | radeon_ring_write(rdev, SQ_TEX_VTX_VALID_BUFFER << 30); | 148 | radeon_ring_write(rdev, SQ_TEX_VTX_VALID_BUFFER << 30); |
149 | 149 | ||
150 | if ((rdev->family == CHIP_CEDAR) || | 150 | if ((rdev->family == CHIP_CEDAR) || |
151 | (rdev->family == CHIP_PALM)) | 151 | (rdev->family == CHIP_PALM) || |
152 | (rdev->family == CHIP_CAICOS)) | ||
152 | cp_set_surface_sync(rdev, | 153 | cp_set_surface_sync(rdev, |
153 | PACKET3_TC_ACTION_ENA, 48, gpu_addr); | 154 | PACKET3_TC_ACTION_ENA, 48, gpu_addr); |
154 | else | 155 | else |
@@ -231,7 +232,7 @@ draw_auto(struct radeon_device *rdev) | |||
231 | 232 | ||
232 | } | 233 | } |
233 | 234 | ||
234 | /* emits 30 */ | 235 | /* emits 36 */ |
235 | static void | 236 | static void |
236 | set_default_state(struct radeon_device *rdev) | 237 | set_default_state(struct radeon_device *rdev) |
237 | { | 238 | { |
@@ -244,6 +245,8 @@ set_default_state(struct radeon_device *rdev) | |||
244 | int num_hs_threads, num_ls_threads; | 245 | int num_hs_threads, num_ls_threads; |
245 | int num_ps_stack_entries, num_vs_stack_entries, num_gs_stack_entries, num_es_stack_entries; | 246 | int num_ps_stack_entries, num_vs_stack_entries, num_gs_stack_entries, num_es_stack_entries; |
246 | int num_hs_stack_entries, num_ls_stack_entries; | 247 | int num_hs_stack_entries, num_ls_stack_entries; |
248 | u64 gpu_addr; | ||
249 | int dwords; | ||
247 | 250 | ||
248 | switch (rdev->family) { | 251 | switch (rdev->family) { |
249 | case CHIP_CEDAR: | 252 | case CHIP_CEDAR: |
@@ -353,10 +356,74 @@ set_default_state(struct radeon_device *rdev) | |||
353 | num_hs_stack_entries = 42; | 356 | num_hs_stack_entries = 42; |
354 | num_ls_stack_entries = 42; | 357 | num_ls_stack_entries = 42; |
355 | break; | 358 | break; |
359 | case CHIP_BARTS: | ||
360 | num_ps_gprs = 93; | ||
361 | num_vs_gprs = 46; | ||
362 | num_temp_gprs = 4; | ||
363 | num_gs_gprs = 31; | ||
364 | num_es_gprs = 31; | ||
365 | num_hs_gprs = 23; | ||
366 | num_ls_gprs = 23; | ||
367 | num_ps_threads = 128; | ||
368 | num_vs_threads = 20; | ||
369 | num_gs_threads = 20; | ||
370 | num_es_threads = 20; | ||
371 | num_hs_threads = 20; | ||
372 | num_ls_threads = 20; | ||
373 | num_ps_stack_entries = 85; | ||
374 | num_vs_stack_entries = 85; | ||
375 | num_gs_stack_entries = 85; | ||
376 | num_es_stack_entries = 85; | ||
377 | num_hs_stack_entries = 85; | ||
378 | num_ls_stack_entries = 85; | ||
379 | break; | ||
380 | case CHIP_TURKS: | ||
381 | num_ps_gprs = 93; | ||
382 | num_vs_gprs = 46; | ||
383 | num_temp_gprs = 4; | ||
384 | num_gs_gprs = 31; | ||
385 | num_es_gprs = 31; | ||
386 | num_hs_gprs = 23; | ||
387 | num_ls_gprs = 23; | ||
388 | num_ps_threads = 128; | ||
389 | num_vs_threads = 20; | ||
390 | num_gs_threads = 20; | ||
391 | num_es_threads = 20; | ||
392 | num_hs_threads = 20; | ||
393 | num_ls_threads = 20; | ||
394 | num_ps_stack_entries = 42; | ||
395 | num_vs_stack_entries = 42; | ||
396 | num_gs_stack_entries = 42; | ||
397 | num_es_stack_entries = 42; | ||
398 | num_hs_stack_entries = 42; | ||
399 | num_ls_stack_entries = 42; | ||
400 | break; | ||
401 | case CHIP_CAICOS: | ||
402 | num_ps_gprs = 93; | ||
403 | num_vs_gprs = 46; | ||
404 | num_temp_gprs = 4; | ||
405 | num_gs_gprs = 31; | ||
406 | num_es_gprs = 31; | ||
407 | num_hs_gprs = 23; | ||
408 | num_ls_gprs = 23; | ||
409 | num_ps_threads = 128; | ||
410 | num_vs_threads = 10; | ||
411 | num_gs_threads = 10; | ||
412 | num_es_threads = 10; | ||
413 | num_hs_threads = 10; | ||
414 | num_ls_threads = 10; | ||
415 | num_ps_stack_entries = 42; | ||
416 | num_vs_stack_entries = 42; | ||
417 | num_gs_stack_entries = 42; | ||
418 | num_es_stack_entries = 42; | ||
419 | num_hs_stack_entries = 42; | ||
420 | num_ls_stack_entries = 42; | ||
421 | break; | ||
356 | } | 422 | } |
357 | 423 | ||
358 | if ((rdev->family == CHIP_CEDAR) || | 424 | if ((rdev->family == CHIP_CEDAR) || |
359 | (rdev->family == CHIP_PALM)) | 425 | (rdev->family == CHIP_PALM) || |
426 | (rdev->family == CHIP_CAICOS)) | ||
360 | sq_config = 0; | 427 | sq_config = 0; |
361 | else | 428 | else |
362 | sq_config = VC_ENABLE; | 429 | sq_config = VC_ENABLE; |
@@ -432,6 +499,18 @@ set_default_state(struct radeon_device *rdev) | |||
432 | radeon_ring_write(rdev, 0x00000000); | 499 | radeon_ring_write(rdev, 0x00000000); |
433 | radeon_ring_write(rdev, 0x00000000); | 500 | radeon_ring_write(rdev, 0x00000000); |
434 | 501 | ||
502 | /* set to DX10/11 mode */ | ||
503 | radeon_ring_write(rdev, PACKET3(PACKET3_MODE_CONTROL, 0)); | ||
504 | radeon_ring_write(rdev, 1); | ||
505 | |||
506 | /* emit an IB pointing at default state */ | ||
507 | dwords = ALIGN(rdev->r600_blit.state_len, 0x10); | ||
508 | gpu_addr = rdev->r600_blit.shader_gpu_addr + rdev->r600_blit.state_offset; | ||
509 | radeon_ring_write(rdev, PACKET3(PACKET3_INDIRECT_BUFFER, 2)); | ||
510 | radeon_ring_write(rdev, gpu_addr & 0xFFFFFFFC); | ||
511 | radeon_ring_write(rdev, upper_32_bits(gpu_addr) & 0xFF); | ||
512 | radeon_ring_write(rdev, dwords); | ||
513 | |||
435 | } | 514 | } |
436 | 515 | ||
437 | static inline uint32_t i2f(uint32_t input) | 516 | static inline uint32_t i2f(uint32_t input) |
@@ -462,8 +541,10 @@ static inline uint32_t i2f(uint32_t input) | |||
462 | int evergreen_blit_init(struct radeon_device *rdev) | 541 | int evergreen_blit_init(struct radeon_device *rdev) |
463 | { | 542 | { |
464 | u32 obj_size; | 543 | u32 obj_size; |
465 | int r; | 544 | int r, dwords; |
466 | void *ptr; | 545 | void *ptr; |
546 | u32 packet2s[16]; | ||
547 | int num_packet2s = 0; | ||
467 | 548 | ||
468 | /* pin copy shader into vram if already initialized */ | 549 | /* pin copy shader into vram if already initialized */ |
469 | if (rdev->r600_blit.shader_obj) | 550 | if (rdev->r600_blit.shader_obj) |
@@ -471,8 +552,17 @@ int evergreen_blit_init(struct radeon_device *rdev) | |||
471 | 552 | ||
472 | mutex_init(&rdev->r600_blit.mutex); | 553 | mutex_init(&rdev->r600_blit.mutex); |
473 | rdev->r600_blit.state_offset = 0; | 554 | rdev->r600_blit.state_offset = 0; |
474 | rdev->r600_blit.state_len = 0; | 555 | |
475 | obj_size = 0; | 556 | rdev->r600_blit.state_len = evergreen_default_size; |
557 | |||
558 | dwords = rdev->r600_blit.state_len; | ||
559 | while (dwords & 0xf) { | ||
560 | packet2s[num_packet2s++] = PACKET2(0); | ||
561 | dwords++; | ||
562 | } | ||
563 | |||
564 | obj_size = dwords * 4; | ||
565 | obj_size = ALIGN(obj_size, 256); | ||
476 | 566 | ||
477 | rdev->r600_blit.vs_offset = obj_size; | 567 | rdev->r600_blit.vs_offset = obj_size; |
478 | obj_size += evergreen_vs_size * 4; | 568 | obj_size += evergreen_vs_size * 4; |
@@ -482,7 +572,7 @@ int evergreen_blit_init(struct radeon_device *rdev) | |||
482 | obj_size += evergreen_ps_size * 4; | 572 | obj_size += evergreen_ps_size * 4; |
483 | obj_size = ALIGN(obj_size, 256); | 573 | obj_size = ALIGN(obj_size, 256); |
484 | 574 | ||
485 | r = radeon_bo_create(rdev, NULL, obj_size, PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM, | 575 | r = radeon_bo_create(rdev, obj_size, PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM, |
486 | &rdev->r600_blit.shader_obj); | 576 | &rdev->r600_blit.shader_obj); |
487 | if (r) { | 577 | if (r) { |
488 | DRM_ERROR("evergreen failed to allocate shader\n"); | 578 | DRM_ERROR("evergreen failed to allocate shader\n"); |
@@ -502,6 +592,12 @@ int evergreen_blit_init(struct radeon_device *rdev) | |||
502 | return r; | 592 | return r; |
503 | } | 593 | } |
504 | 594 | ||
595 | memcpy_toio(ptr + rdev->r600_blit.state_offset, | ||
596 | evergreen_default_state, rdev->r600_blit.state_len * 4); | ||
597 | |||
598 | if (num_packet2s) | ||
599 | memcpy_toio(ptr + rdev->r600_blit.state_offset + (rdev->r600_blit.state_len * 4), | ||
600 | packet2s, num_packet2s * 4); | ||
505 | memcpy(ptr + rdev->r600_blit.vs_offset, evergreen_vs, evergreen_vs_size * 4); | 601 | memcpy(ptr + rdev->r600_blit.vs_offset, evergreen_vs, evergreen_vs_size * 4); |
506 | memcpy(ptr + rdev->r600_blit.ps_offset, evergreen_ps, evergreen_ps_size * 4); | 602 | memcpy(ptr + rdev->r600_blit.ps_offset, evergreen_ps, evergreen_ps_size * 4); |
507 | radeon_bo_kunmap(rdev->r600_blit.shader_obj); | 603 | radeon_bo_kunmap(rdev->r600_blit.shader_obj); |
@@ -587,7 +683,7 @@ int evergreen_blit_prepare_copy(struct radeon_device *rdev, int size_bytes) | |||
587 | /* calculate number of loops correctly */ | 683 | /* calculate number of loops correctly */ |
588 | ring_size = num_loops * dwords_per_loop; | 684 | ring_size = num_loops * dwords_per_loop; |
589 | /* set default + shaders */ | 685 | /* set default + shaders */ |
590 | ring_size += 46; /* shaders + def state */ | 686 | ring_size += 52; /* shaders + def state */ |
591 | ring_size += 10; /* fence emit for VB IB */ | 687 | ring_size += 10; /* fence emit for VB IB */ |
592 | ring_size += 5; /* done copy */ | 688 | ring_size += 5; /* done copy */ |
593 | ring_size += 10; /* fence emit for done copy */ | 689 | ring_size += 10; /* fence emit for done copy */ |
@@ -595,7 +691,7 @@ int evergreen_blit_prepare_copy(struct radeon_device *rdev, int size_bytes) | |||
595 | if (r) | 691 | if (r) |
596 | return r; | 692 | return r; |
597 | 693 | ||
598 | set_default_state(rdev); /* 30 */ | 694 | set_default_state(rdev); /* 36 */ |
599 | set_shaders(rdev); /* 16 */ | 695 | set_shaders(rdev); /* 16 */ |
600 | return 0; | 696 | return 0; |
601 | } | 697 | } |
diff --git a/drivers/gpu/drm/radeon/evergreend.h b/drivers/gpu/drm/radeon/evergreend.h index 5b869ce86917..afec1aca2a73 100644 --- a/drivers/gpu/drm/radeon/evergreend.h +++ b/drivers/gpu/drm/radeon/evergreend.h | |||
@@ -176,6 +176,7 @@ | |||
176 | #define HDP_NONSURFACE_BASE 0x2C04 | 176 | #define HDP_NONSURFACE_BASE 0x2C04 |
177 | #define HDP_NONSURFACE_INFO 0x2C08 | 177 | #define HDP_NONSURFACE_INFO 0x2C08 |
178 | #define HDP_NONSURFACE_SIZE 0x2C0C | 178 | #define HDP_NONSURFACE_SIZE 0x2C0C |
179 | #define HDP_MEM_COHERENCY_FLUSH_CNTL 0x5480 | ||
179 | #define HDP_REG_COHERENCY_FLUSH_CNTL 0x54A0 | 180 | #define HDP_REG_COHERENCY_FLUSH_CNTL 0x54A0 |
180 | #define HDP_TILING_CONFIG 0x2F3C | 181 | #define HDP_TILING_CONFIG 0x2F3C |
181 | 182 | ||
@@ -239,6 +240,7 @@ | |||
239 | #define FORCE_EOV_MAX_CLK_CNT(x) ((x) << 0) | 240 | #define FORCE_EOV_MAX_CLK_CNT(x) ((x) << 0) |
240 | #define FORCE_EOV_MAX_REZ_CNT(x) ((x) << 16) | 241 | #define FORCE_EOV_MAX_REZ_CNT(x) ((x) << 16) |
241 | #define PA_SC_LINE_STIPPLE 0x28A0C | 242 | #define PA_SC_LINE_STIPPLE 0x28A0C |
243 | #define PA_SU_LINE_STIPPLE_VALUE 0x8A60 | ||
242 | #define PA_SC_LINE_STIPPLE_STATE 0x8B10 | 244 | #define PA_SC_LINE_STIPPLE_STATE 0x8B10 |
243 | 245 | ||
244 | #define SCRATCH_REG0 0x8500 | 246 | #define SCRATCH_REG0 0x8500 |
@@ -580,6 +582,44 @@ | |||
580 | # define DC_HPDx_RX_INT_TIMER(x) ((x) << 16) | 582 | # define DC_HPDx_RX_INT_TIMER(x) ((x) << 16) |
581 | # define DC_HPDx_EN (1 << 28) | 583 | # define DC_HPDx_EN (1 << 28) |
582 | 584 | ||
585 | /* PCIE link stuff */ | ||
586 | #define PCIE_LC_TRAINING_CNTL 0xa1 /* PCIE_P */ | ||
587 | #define PCIE_LC_LINK_WIDTH_CNTL 0xa2 /* PCIE_P */ | ||
588 | # define LC_LINK_WIDTH_SHIFT 0 | ||
589 | # define LC_LINK_WIDTH_MASK 0x7 | ||
590 | # define LC_LINK_WIDTH_X0 0 | ||
591 | # define LC_LINK_WIDTH_X1 1 | ||
592 | # define LC_LINK_WIDTH_X2 2 | ||
593 | # define LC_LINK_WIDTH_X4 3 | ||
594 | # define LC_LINK_WIDTH_X8 4 | ||
595 | # define LC_LINK_WIDTH_X16 6 | ||
596 | # define LC_LINK_WIDTH_RD_SHIFT 4 | ||
597 | # define LC_LINK_WIDTH_RD_MASK 0x70 | ||
598 | # define LC_RECONFIG_ARC_MISSING_ESCAPE (1 << 7) | ||
599 | # define LC_RECONFIG_NOW (1 << 8) | ||
600 | # define LC_RENEGOTIATION_SUPPORT (1 << 9) | ||
601 | # define LC_RENEGOTIATE_EN (1 << 10) | ||
602 | # define LC_SHORT_RECONFIG_EN (1 << 11) | ||
603 | # define LC_UPCONFIGURE_SUPPORT (1 << 12) | ||
604 | # define LC_UPCONFIGURE_DIS (1 << 13) | ||
605 | #define PCIE_LC_SPEED_CNTL 0xa4 /* PCIE_P */ | ||
606 | # define LC_GEN2_EN_STRAP (1 << 0) | ||
607 | # define LC_TARGET_LINK_SPEED_OVERRIDE_EN (1 << 1) | ||
608 | # define LC_FORCE_EN_HW_SPEED_CHANGE (1 << 5) | ||
609 | # define LC_FORCE_DIS_HW_SPEED_CHANGE (1 << 6) | ||
610 | # define LC_SPEED_CHANGE_ATTEMPTS_ALLOWED_MASK (0x3 << 8) | ||
611 | # define LC_SPEED_CHANGE_ATTEMPTS_ALLOWED_SHIFT 3 | ||
612 | # define LC_CURRENT_DATA_RATE (1 << 11) | ||
613 | # define LC_VOLTAGE_TIMER_SEL_MASK (0xf << 14) | ||
614 | # define LC_CLR_FAILED_SPD_CHANGE_CNT (1 << 21) | ||
615 | # define LC_OTHER_SIDE_EVER_SENT_GEN2 (1 << 23) | ||
616 | # define LC_OTHER_SIDE_SUPPORTS_GEN2 (1 << 24) | ||
617 | #define MM_CFGREGS_CNTL 0x544c | ||
618 | # define MM_WR_TO_CFG_EN (1 << 3) | ||
619 | #define LINK_CNTL2 0x88 /* F0 */ | ||
620 | # define TARGET_LINK_SPEED_MASK (0xf << 0) | ||
621 | # define SELECTABLE_DEEMPHASIS (1 << 6) | ||
622 | |||
583 | /* | 623 | /* |
584 | * PM4 | 624 | * PM4 |
585 | */ | 625 | */ |
@@ -609,10 +649,11 @@ | |||
609 | #define PACKET3_NOP 0x10 | 649 | #define PACKET3_NOP 0x10 |
610 | #define PACKET3_SET_BASE 0x11 | 650 | #define PACKET3_SET_BASE 0x11 |
611 | #define PACKET3_CLEAR_STATE 0x12 | 651 | #define PACKET3_CLEAR_STATE 0x12 |
612 | #define PACKET3_INDIRECT_BUFFER_SIZE 0x13 | 652 | #define PACKET3_INDEX_BUFFER_SIZE 0x13 |
613 | #define PACKET3_DISPATCH_DIRECT 0x15 | 653 | #define PACKET3_DISPATCH_DIRECT 0x15 |
614 | #define PACKET3_DISPATCH_INDIRECT 0x16 | 654 | #define PACKET3_DISPATCH_INDIRECT 0x16 |
615 | #define PACKET3_INDIRECT_BUFFER_END 0x17 | 655 | #define PACKET3_INDIRECT_BUFFER_END 0x17 |
656 | #define PACKET3_MODE_CONTROL 0x18 | ||
616 | #define PACKET3_SET_PREDICATION 0x20 | 657 | #define PACKET3_SET_PREDICATION 0x20 |
617 | #define PACKET3_REG_RMW 0x21 | 658 | #define PACKET3_REG_RMW 0x21 |
618 | #define PACKET3_COND_EXEC 0x22 | 659 | #define PACKET3_COND_EXEC 0x22 |
@@ -650,14 +691,14 @@ | |||
650 | # define PACKET3_CB8_DEST_BASE_ENA (1 << 15) | 691 | # define PACKET3_CB8_DEST_BASE_ENA (1 << 15) |
651 | # define PACKET3_CB9_DEST_BASE_ENA (1 << 16) | 692 | # define PACKET3_CB9_DEST_BASE_ENA (1 << 16) |
652 | # define PACKET3_CB10_DEST_BASE_ENA (1 << 17) | 693 | # define PACKET3_CB10_DEST_BASE_ENA (1 << 17) |
653 | # define PACKET3_CB11_DEST_BASE_ENA (1 << 17) | 694 | # define PACKET3_CB11_DEST_BASE_ENA (1 << 18) |
654 | # define PACKET3_FULL_CACHE_ENA (1 << 20) | 695 | # define PACKET3_FULL_CACHE_ENA (1 << 20) |
655 | # define PACKET3_TC_ACTION_ENA (1 << 23) | 696 | # define PACKET3_TC_ACTION_ENA (1 << 23) |
656 | # define PACKET3_VC_ACTION_ENA (1 << 24) | 697 | # define PACKET3_VC_ACTION_ENA (1 << 24) |
657 | # define PACKET3_CB_ACTION_ENA (1 << 25) | 698 | # define PACKET3_CB_ACTION_ENA (1 << 25) |
658 | # define PACKET3_DB_ACTION_ENA (1 << 26) | 699 | # define PACKET3_DB_ACTION_ENA (1 << 26) |
659 | # define PACKET3_SH_ACTION_ENA (1 << 27) | 700 | # define PACKET3_SH_ACTION_ENA (1 << 27) |
660 | # define PACKET3_SMX_ACTION_ENA (1 << 28) | 701 | # define PACKET3_SX_ACTION_ENA (1 << 28) |
661 | #define PACKET3_ME_INITIALIZE 0x44 | 702 | #define PACKET3_ME_INITIALIZE 0x44 |
662 | #define PACKET3_ME_INITIALIZE_DEVICE_ID(x) ((x) << 16) | 703 | #define PACKET3_ME_INITIALIZE_DEVICE_ID(x) ((x) << 16) |
663 | #define PACKET3_COND_WRITE 0x45 | 704 | #define PACKET3_COND_WRITE 0x45 |
diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c new file mode 100644 index 000000000000..5e0bef80ad7f --- /dev/null +++ b/drivers/gpu/drm/radeon/ni.c | |||
@@ -0,0 +1,316 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Advanced Micro Devices, Inc. | ||
3 | * | ||
4 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
5 | * copy of this software and associated documentation files (the "Software"), | ||
6 | * to deal in the Software without restriction, including without limitation | ||
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
8 | * and/or sell copies of the Software, and to permit persons to whom the | ||
9 | * Software is furnished to do so, subject to the following conditions: | ||
10 | * | ||
11 | * The above copyright notice and this permission notice shall be included in | ||
12 | * all copies or substantial portions of the Software. | ||
13 | * | ||
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
20 | * OTHER DEALINGS IN THE SOFTWARE. | ||
21 | * | ||
22 | * Authors: Alex Deucher | ||
23 | */ | ||
24 | #include <linux/firmware.h> | ||
25 | #include <linux/platform_device.h> | ||
26 | #include <linux/slab.h> | ||
27 | #include "drmP.h" | ||
28 | #include "radeon.h" | ||
29 | #include "radeon_asic.h" | ||
30 | #include "radeon_drm.h" | ||
31 | #include "nid.h" | ||
32 | #include "atom.h" | ||
33 | #include "ni_reg.h" | ||
34 | |||
35 | #define EVERGREEN_PFP_UCODE_SIZE 1120 | ||
36 | #define EVERGREEN_PM4_UCODE_SIZE 1376 | ||
37 | #define EVERGREEN_RLC_UCODE_SIZE 768 | ||
38 | #define BTC_MC_UCODE_SIZE 6024 | ||
39 | |||
40 | /* Firmware Names */ | ||
41 | MODULE_FIRMWARE("radeon/BARTS_pfp.bin"); | ||
42 | MODULE_FIRMWARE("radeon/BARTS_me.bin"); | ||
43 | MODULE_FIRMWARE("radeon/BARTS_mc.bin"); | ||
44 | MODULE_FIRMWARE("radeon/BTC_rlc.bin"); | ||
45 | MODULE_FIRMWARE("radeon/TURKS_pfp.bin"); | ||
46 | MODULE_FIRMWARE("radeon/TURKS_me.bin"); | ||
47 | MODULE_FIRMWARE("radeon/TURKS_mc.bin"); | ||
48 | MODULE_FIRMWARE("radeon/CAICOS_pfp.bin"); | ||
49 | MODULE_FIRMWARE("radeon/CAICOS_me.bin"); | ||
50 | MODULE_FIRMWARE("radeon/CAICOS_mc.bin"); | ||
51 | |||
52 | #define BTC_IO_MC_REGS_SIZE 29 | ||
53 | |||
54 | static const u32 barts_io_mc_regs[BTC_IO_MC_REGS_SIZE][2] = { | ||
55 | {0x00000077, 0xff010100}, | ||
56 | {0x00000078, 0x00000000}, | ||
57 | {0x00000079, 0x00001434}, | ||
58 | {0x0000007a, 0xcc08ec08}, | ||
59 | {0x0000007b, 0x00040000}, | ||
60 | {0x0000007c, 0x000080c0}, | ||
61 | {0x0000007d, 0x09000000}, | ||
62 | {0x0000007e, 0x00210404}, | ||
63 | {0x00000081, 0x08a8e800}, | ||
64 | {0x00000082, 0x00030444}, | ||
65 | {0x00000083, 0x00000000}, | ||
66 | {0x00000085, 0x00000001}, | ||
67 | {0x00000086, 0x00000002}, | ||
68 | {0x00000087, 0x48490000}, | ||
69 | {0x00000088, 0x20244647}, | ||
70 | {0x00000089, 0x00000005}, | ||
71 | {0x0000008b, 0x66030000}, | ||
72 | {0x0000008c, 0x00006603}, | ||
73 | {0x0000008d, 0x00000100}, | ||
74 | {0x0000008f, 0x00001c0a}, | ||
75 | {0x00000090, 0xff000001}, | ||
76 | {0x00000094, 0x00101101}, | ||
77 | {0x00000095, 0x00000fff}, | ||
78 | {0x00000096, 0x00116fff}, | ||
79 | {0x00000097, 0x60010000}, | ||
80 | {0x00000098, 0x10010000}, | ||
81 | {0x00000099, 0x00006000}, | ||
82 | {0x0000009a, 0x00001000}, | ||
83 | {0x0000009f, 0x00946a00} | ||
84 | }; | ||
85 | |||
86 | static const u32 turks_io_mc_regs[BTC_IO_MC_REGS_SIZE][2] = { | ||
87 | {0x00000077, 0xff010100}, | ||
88 | {0x00000078, 0x00000000}, | ||
89 | {0x00000079, 0x00001434}, | ||
90 | {0x0000007a, 0xcc08ec08}, | ||
91 | {0x0000007b, 0x00040000}, | ||
92 | {0x0000007c, 0x000080c0}, | ||
93 | {0x0000007d, 0x09000000}, | ||
94 | {0x0000007e, 0x00210404}, | ||
95 | {0x00000081, 0x08a8e800}, | ||
96 | {0x00000082, 0x00030444}, | ||
97 | {0x00000083, 0x00000000}, | ||
98 | {0x00000085, 0x00000001}, | ||
99 | {0x00000086, 0x00000002}, | ||
100 | {0x00000087, 0x48490000}, | ||
101 | {0x00000088, 0x20244647}, | ||
102 | {0x00000089, 0x00000005}, | ||
103 | {0x0000008b, 0x66030000}, | ||
104 | {0x0000008c, 0x00006603}, | ||
105 | {0x0000008d, 0x00000100}, | ||
106 | {0x0000008f, 0x00001c0a}, | ||
107 | {0x00000090, 0xff000001}, | ||
108 | {0x00000094, 0x00101101}, | ||
109 | {0x00000095, 0x00000fff}, | ||
110 | {0x00000096, 0x00116fff}, | ||
111 | {0x00000097, 0x60010000}, | ||
112 | {0x00000098, 0x10010000}, | ||
113 | {0x00000099, 0x00006000}, | ||
114 | {0x0000009a, 0x00001000}, | ||
115 | {0x0000009f, 0x00936a00} | ||
116 | }; | ||
117 | |||
118 | static const u32 caicos_io_mc_regs[BTC_IO_MC_REGS_SIZE][2] = { | ||
119 | {0x00000077, 0xff010100}, | ||
120 | {0x00000078, 0x00000000}, | ||
121 | {0x00000079, 0x00001434}, | ||
122 | {0x0000007a, 0xcc08ec08}, | ||
123 | {0x0000007b, 0x00040000}, | ||
124 | {0x0000007c, 0x000080c0}, | ||
125 | {0x0000007d, 0x09000000}, | ||
126 | {0x0000007e, 0x00210404}, | ||
127 | {0x00000081, 0x08a8e800}, | ||
128 | {0x00000082, 0x00030444}, | ||
129 | {0x00000083, 0x00000000}, | ||
130 | {0x00000085, 0x00000001}, | ||
131 | {0x00000086, 0x00000002}, | ||
132 | {0x00000087, 0x48490000}, | ||
133 | {0x00000088, 0x20244647}, | ||
134 | {0x00000089, 0x00000005}, | ||
135 | {0x0000008b, 0x66030000}, | ||
136 | {0x0000008c, 0x00006603}, | ||
137 | {0x0000008d, 0x00000100}, | ||
138 | {0x0000008f, 0x00001c0a}, | ||
139 | {0x00000090, 0xff000001}, | ||
140 | {0x00000094, 0x00101101}, | ||
141 | {0x00000095, 0x00000fff}, | ||
142 | {0x00000096, 0x00116fff}, | ||
143 | {0x00000097, 0x60010000}, | ||
144 | {0x00000098, 0x10010000}, | ||
145 | {0x00000099, 0x00006000}, | ||
146 | {0x0000009a, 0x00001000}, | ||
147 | {0x0000009f, 0x00916a00} | ||
148 | }; | ||
149 | |||
150 | int btc_mc_load_microcode(struct radeon_device *rdev) | ||
151 | { | ||
152 | const __be32 *fw_data; | ||
153 | u32 mem_type, running, blackout = 0; | ||
154 | u32 *io_mc_regs; | ||
155 | int i; | ||
156 | |||
157 | if (!rdev->mc_fw) | ||
158 | return -EINVAL; | ||
159 | |||
160 | switch (rdev->family) { | ||
161 | case CHIP_BARTS: | ||
162 | io_mc_regs = (u32 *)&barts_io_mc_regs; | ||
163 | break; | ||
164 | case CHIP_TURKS: | ||
165 | io_mc_regs = (u32 *)&turks_io_mc_regs; | ||
166 | break; | ||
167 | case CHIP_CAICOS: | ||
168 | default: | ||
169 | io_mc_regs = (u32 *)&caicos_io_mc_regs; | ||
170 | break; | ||
171 | } | ||
172 | |||
173 | mem_type = (RREG32(MC_SEQ_MISC0) & MC_SEQ_MISC0_GDDR5_MASK) >> MC_SEQ_MISC0_GDDR5_SHIFT; | ||
174 | running = RREG32(MC_SEQ_SUP_CNTL) & RUN_MASK; | ||
175 | |||
176 | if ((mem_type == MC_SEQ_MISC0_GDDR5_VALUE) && (running == 0)) { | ||
177 | if (running) { | ||
178 | blackout = RREG32(MC_SHARED_BLACKOUT_CNTL); | ||
179 | WREG32(MC_SHARED_BLACKOUT_CNTL, 1); | ||
180 | } | ||
181 | |||
182 | /* reset the engine and set to writable */ | ||
183 | WREG32(MC_SEQ_SUP_CNTL, 0x00000008); | ||
184 | WREG32(MC_SEQ_SUP_CNTL, 0x00000010); | ||
185 | |||
186 | /* load mc io regs */ | ||
187 | for (i = 0; i < BTC_IO_MC_REGS_SIZE; i++) { | ||
188 | WREG32(MC_SEQ_IO_DEBUG_INDEX, io_mc_regs[(i << 1)]); | ||
189 | WREG32(MC_SEQ_IO_DEBUG_DATA, io_mc_regs[(i << 1) + 1]); | ||
190 | } | ||
191 | /* load the MC ucode */ | ||
192 | fw_data = (const __be32 *)rdev->mc_fw->data; | ||
193 | for (i = 0; i < BTC_MC_UCODE_SIZE; i++) | ||
194 | WREG32(MC_SEQ_SUP_PGM, be32_to_cpup(fw_data++)); | ||
195 | |||
196 | /* put the engine back into the active state */ | ||
197 | WREG32(MC_SEQ_SUP_CNTL, 0x00000008); | ||
198 | WREG32(MC_SEQ_SUP_CNTL, 0x00000004); | ||
199 | WREG32(MC_SEQ_SUP_CNTL, 0x00000001); | ||
200 | |||
201 | /* wait for training to complete */ | ||
202 | while (!(RREG32(MC_IO_PAD_CNTL_D0) & MEM_FALL_OUT_CMD)) | ||
203 | udelay(10); | ||
204 | |||
205 | if (running) | ||
206 | WREG32(MC_SHARED_BLACKOUT_CNTL, blackout); | ||
207 | } | ||
208 | |||
209 | return 0; | ||
210 | } | ||
211 | |||
212 | int ni_init_microcode(struct radeon_device *rdev) | ||
213 | { | ||
214 | struct platform_device *pdev; | ||
215 | const char *chip_name; | ||
216 | const char *rlc_chip_name; | ||
217 | size_t pfp_req_size, me_req_size, rlc_req_size, mc_req_size; | ||
218 | char fw_name[30]; | ||
219 | int err; | ||
220 | |||
221 | DRM_DEBUG("\n"); | ||
222 | |||
223 | pdev = platform_device_register_simple("radeon_cp", 0, NULL, 0); | ||
224 | err = IS_ERR(pdev); | ||
225 | if (err) { | ||
226 | printk(KERN_ERR "radeon_cp: Failed to register firmware\n"); | ||
227 | return -EINVAL; | ||
228 | } | ||
229 | |||
230 | switch (rdev->family) { | ||
231 | case CHIP_BARTS: | ||
232 | chip_name = "BARTS"; | ||
233 | rlc_chip_name = "BTC"; | ||
234 | break; | ||
235 | case CHIP_TURKS: | ||
236 | chip_name = "TURKS"; | ||
237 | rlc_chip_name = "BTC"; | ||
238 | break; | ||
239 | case CHIP_CAICOS: | ||
240 | chip_name = "CAICOS"; | ||
241 | rlc_chip_name = "BTC"; | ||
242 | break; | ||
243 | default: BUG(); | ||
244 | } | ||
245 | |||
246 | pfp_req_size = EVERGREEN_PFP_UCODE_SIZE * 4; | ||
247 | me_req_size = EVERGREEN_PM4_UCODE_SIZE * 4; | ||
248 | rlc_req_size = EVERGREEN_RLC_UCODE_SIZE * 4; | ||
249 | mc_req_size = BTC_MC_UCODE_SIZE * 4; | ||
250 | |||
251 | DRM_INFO("Loading %s Microcode\n", chip_name); | ||
252 | |||
253 | snprintf(fw_name, sizeof(fw_name), "radeon/%s_pfp.bin", chip_name); | ||
254 | err = request_firmware(&rdev->pfp_fw, fw_name, &pdev->dev); | ||
255 | if (err) | ||
256 | goto out; | ||
257 | if (rdev->pfp_fw->size != pfp_req_size) { | ||
258 | printk(KERN_ERR | ||
259 | "ni_cp: Bogus length %zu in firmware \"%s\"\n", | ||
260 | rdev->pfp_fw->size, fw_name); | ||
261 | err = -EINVAL; | ||
262 | goto out; | ||
263 | } | ||
264 | |||
265 | snprintf(fw_name, sizeof(fw_name), "radeon/%s_me.bin", chip_name); | ||
266 | err = request_firmware(&rdev->me_fw, fw_name, &pdev->dev); | ||
267 | if (err) | ||
268 | goto out; | ||
269 | if (rdev->me_fw->size != me_req_size) { | ||
270 | printk(KERN_ERR | ||
271 | "ni_cp: Bogus length %zu in firmware \"%s\"\n", | ||
272 | rdev->me_fw->size, fw_name); | ||
273 | err = -EINVAL; | ||
274 | } | ||
275 | |||
276 | snprintf(fw_name, sizeof(fw_name), "radeon/%s_rlc.bin", rlc_chip_name); | ||
277 | err = request_firmware(&rdev->rlc_fw, fw_name, &pdev->dev); | ||
278 | if (err) | ||
279 | goto out; | ||
280 | if (rdev->rlc_fw->size != rlc_req_size) { | ||
281 | printk(KERN_ERR | ||
282 | "ni_rlc: Bogus length %zu in firmware \"%s\"\n", | ||
283 | rdev->rlc_fw->size, fw_name); | ||
284 | err = -EINVAL; | ||
285 | } | ||
286 | |||
287 | snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", chip_name); | ||
288 | err = request_firmware(&rdev->mc_fw, fw_name, &pdev->dev); | ||
289 | if (err) | ||
290 | goto out; | ||
291 | if (rdev->mc_fw->size != mc_req_size) { | ||
292 | printk(KERN_ERR | ||
293 | "ni_mc: Bogus length %zu in firmware \"%s\"\n", | ||
294 | rdev->mc_fw->size, fw_name); | ||
295 | err = -EINVAL; | ||
296 | } | ||
297 | out: | ||
298 | platform_device_unregister(pdev); | ||
299 | |||
300 | if (err) { | ||
301 | if (err != -EINVAL) | ||
302 | printk(KERN_ERR | ||
303 | "ni_cp: Failed to load firmware \"%s\"\n", | ||
304 | fw_name); | ||
305 | release_firmware(rdev->pfp_fw); | ||
306 | rdev->pfp_fw = NULL; | ||
307 | release_firmware(rdev->me_fw); | ||
308 | rdev->me_fw = NULL; | ||
309 | release_firmware(rdev->rlc_fw); | ||
310 | rdev->rlc_fw = NULL; | ||
311 | release_firmware(rdev->mc_fw); | ||
312 | rdev->mc_fw = NULL; | ||
313 | } | ||
314 | return err; | ||
315 | } | ||
316 | |||
diff --git a/drivers/gpu/drm/radeon/ni_reg.h b/drivers/gpu/drm/radeon/ni_reg.h new file mode 100644 index 000000000000..5db7b7d6feb0 --- /dev/null +++ b/drivers/gpu/drm/radeon/ni_reg.h | |||
@@ -0,0 +1,86 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Advanced Micro Devices, Inc. | ||
3 | * | ||
4 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
5 | * copy of this software and associated documentation files (the "Software"), | ||
6 | * to deal in the Software without restriction, including without limitation | ||
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
8 | * and/or sell copies of the Software, and to permit persons to whom the | ||
9 | * Software is furnished to do so, subject to the following conditions: | ||
10 | * | ||
11 | * The above copyright notice and this permission notice shall be included in | ||
12 | * all copies or substantial portions of the Software. | ||
13 | * | ||
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
20 | * OTHER DEALINGS IN THE SOFTWARE. | ||
21 | * | ||
22 | * Authors: Alex Deucher | ||
23 | */ | ||
24 | #ifndef __NI_REG_H__ | ||
25 | #define __NI_REG_H__ | ||
26 | |||
27 | /* northern islands - DCE5 */ | ||
28 | |||
29 | #define NI_INPUT_GAMMA_CONTROL 0x6840 | ||
30 | # define NI_GRPH_INPUT_GAMMA_MODE(x) (((x) & 0x3) << 0) | ||
31 | # define NI_INPUT_GAMMA_USE_LUT 0 | ||
32 | # define NI_INPUT_GAMMA_BYPASS 1 | ||
33 | # define NI_INPUT_GAMMA_SRGB_24 2 | ||
34 | # define NI_INPUT_GAMMA_XVYCC_222 3 | ||
35 | # define NI_OVL_INPUT_GAMMA_MODE(x) (((x) & 0x3) << 4) | ||
36 | |||
37 | #define NI_PRESCALE_GRPH_CONTROL 0x68b4 | ||
38 | # define NI_GRPH_PRESCALE_BYPASS (1 << 4) | ||
39 | |||
40 | #define NI_PRESCALE_OVL_CONTROL 0x68c4 | ||
41 | # define NI_OVL_PRESCALE_BYPASS (1 << 4) | ||
42 | |||
43 | #define NI_INPUT_CSC_CONTROL 0x68d4 | ||
44 | # define NI_INPUT_CSC_GRPH_MODE(x) (((x) & 0x3) << 0) | ||
45 | # define NI_INPUT_CSC_BYPASS 0 | ||
46 | # define NI_INPUT_CSC_PROG_COEFF 1 | ||
47 | # define NI_INPUT_CSC_PROG_SHARED_MATRIXA 2 | ||
48 | # define NI_INPUT_CSC_OVL_MODE(x) (((x) & 0x3) << 4) | ||
49 | |||
50 | #define NI_OUTPUT_CSC_CONTROL 0x68f0 | ||
51 | # define NI_OUTPUT_CSC_GRPH_MODE(x) (((x) & 0x7) << 0) | ||
52 | # define NI_OUTPUT_CSC_BYPASS 0 | ||
53 | # define NI_OUTPUT_CSC_TV_RGB 1 | ||
54 | # define NI_OUTPUT_CSC_YCBCR_601 2 | ||
55 | # define NI_OUTPUT_CSC_YCBCR_709 3 | ||
56 | # define NI_OUTPUT_CSC_PROG_COEFF 4 | ||
57 | # define NI_OUTPUT_CSC_PROG_SHARED_MATRIXB 5 | ||
58 | # define NI_OUTPUT_CSC_OVL_MODE(x) (((x) & 0x7) << 4) | ||
59 | |||
60 | #define NI_DEGAMMA_CONTROL 0x6960 | ||
61 | # define NI_GRPH_DEGAMMA_MODE(x) (((x) & 0x3) << 0) | ||
62 | # define NI_DEGAMMA_BYPASS 0 | ||
63 | # define NI_DEGAMMA_SRGB_24 1 | ||
64 | # define NI_DEGAMMA_XVYCC_222 2 | ||
65 | # define NI_OVL_DEGAMMA_MODE(x) (((x) & 0x3) << 4) | ||
66 | # define NI_ICON_DEGAMMA_MODE(x) (((x) & 0x3) << 8) | ||
67 | # define NI_CURSOR_DEGAMMA_MODE(x) (((x) & 0x3) << 12) | ||
68 | |||
69 | #define NI_GAMUT_REMAP_CONTROL 0x6964 | ||
70 | # define NI_GRPH_GAMUT_REMAP_MODE(x) (((x) & 0x3) << 0) | ||
71 | # define NI_GAMUT_REMAP_BYPASS 0 | ||
72 | # define NI_GAMUT_REMAP_PROG_COEFF 1 | ||
73 | # define NI_GAMUT_REMAP_PROG_SHARED_MATRIXA 2 | ||
74 | # define NI_GAMUT_REMAP_PROG_SHARED_MATRIXB 3 | ||
75 | # define NI_OVL_GAMUT_REMAP_MODE(x) (((x) & 0x3) << 4) | ||
76 | |||
77 | #define NI_REGAMMA_CONTROL 0x6a80 | ||
78 | # define NI_GRPH_REGAMMA_MODE(x) (((x) & 0x7) << 0) | ||
79 | # define NI_REGAMMA_BYPASS 0 | ||
80 | # define NI_REGAMMA_SRGB_24 1 | ||
81 | # define NI_REGAMMA_XVYCC_222 2 | ||
82 | # define NI_REGAMMA_PROG_A 3 | ||
83 | # define NI_REGAMMA_PROG_B 4 | ||
84 | # define NI_OVL_REGAMMA_MODE(x) (((x) & 0x7) << 4) | ||
85 | |||
86 | #endif | ||
diff --git a/drivers/gpu/drm/radeon/nid.h b/drivers/gpu/drm/radeon/nid.h new file mode 100644 index 000000000000..f7b445390e02 --- /dev/null +++ b/drivers/gpu/drm/radeon/nid.h | |||
@@ -0,0 +1,41 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Advanced Micro Devices, Inc. | ||
3 | * | ||
4 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
5 | * copy of this software and associated documentation files (the "Software"), | ||
6 | * to deal in the Software without restriction, including without limitation | ||
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
8 | * and/or sell copies of the Software, and to permit persons to whom the | ||
9 | * Software is furnished to do so, subject to the following conditions: | ||
10 | * | ||
11 | * The above copyright notice and this permission notice shall be included in | ||
12 | * all copies or substantial portions of the Software. | ||
13 | * | ||
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
20 | * OTHER DEALINGS IN THE SOFTWARE. | ||
21 | * | ||
22 | * Authors: Alex Deucher | ||
23 | */ | ||
24 | #ifndef NI_H | ||
25 | #define NI_H | ||
26 | |||
27 | #define MC_SHARED_BLACKOUT_CNTL 0x20ac | ||
28 | #define MC_SEQ_SUP_CNTL 0x28c8 | ||
29 | #define RUN_MASK (1 << 0) | ||
30 | #define MC_SEQ_SUP_PGM 0x28cc | ||
31 | #define MC_IO_PAD_CNTL_D0 0x29d0 | ||
32 | #define MEM_FALL_OUT_CMD (1 << 8) | ||
33 | #define MC_SEQ_MISC0 0x2a00 | ||
34 | #define MC_SEQ_MISC0_GDDR5_SHIFT 28 | ||
35 | #define MC_SEQ_MISC0_GDDR5_MASK 0xf0000000 | ||
36 | #define MC_SEQ_MISC0_GDDR5_VALUE 5 | ||
37 | #define MC_SEQ_IO_DEBUG_INDEX 0x2a44 | ||
38 | #define MC_SEQ_IO_DEBUG_DATA 0x2a48 | ||
39 | |||
40 | #endif | ||
41 | |||
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c index 300b4a64d8fe..5f15820efe12 100644 --- a/drivers/gpu/drm/radeon/r100.c +++ b/drivers/gpu/drm/radeon/r100.c | |||
@@ -682,7 +682,7 @@ int r100_irq_process(struct radeon_device *rdev) | |||
682 | /* reset gui idle ack. the status bit is broken */ | 682 | /* reset gui idle ack. the status bit is broken */ |
683 | rdev->irq.gui_idle_acked = false; | 683 | rdev->irq.gui_idle_acked = false; |
684 | if (queue_hotplug) | 684 | if (queue_hotplug) |
685 | queue_work(rdev->wq, &rdev->hotplug_work); | 685 | schedule_work(&rdev->hotplug_work); |
686 | if (rdev->msi_enabled) { | 686 | if (rdev->msi_enabled) { |
687 | switch (rdev->family) { | 687 | switch (rdev->family) { |
688 | case CHIP_RS400: | 688 | case CHIP_RS400: |
@@ -1031,8 +1031,8 @@ int r100_cp_init(struct radeon_device *rdev, unsigned ring_size) | |||
1031 | WREG32(RADEON_CP_CSQ_MODE, | 1031 | WREG32(RADEON_CP_CSQ_MODE, |
1032 | REG_SET(RADEON_INDIRECT2_START, indirect2_start) | | 1032 | REG_SET(RADEON_INDIRECT2_START, indirect2_start) | |
1033 | REG_SET(RADEON_INDIRECT1_START, indirect1_start)); | 1033 | REG_SET(RADEON_INDIRECT1_START, indirect1_start)); |
1034 | WREG32(0x718, 0); | 1034 | WREG32(RADEON_CP_RB_WPTR_DELAY, 0); |
1035 | WREG32(0x744, 0x00004D4D); | 1035 | WREG32(RADEON_CP_CSQ_MODE, 0x00004D4D); |
1036 | WREG32(RADEON_CP_CSQ_CNTL, RADEON_CSQ_PRIBM_INDBM); | 1036 | WREG32(RADEON_CP_CSQ_CNTL, RADEON_CSQ_PRIBM_INDBM); |
1037 | radeon_ring_start(rdev); | 1037 | radeon_ring_start(rdev); |
1038 | r = radeon_ring_test(rdev); | 1038 | r = radeon_ring_test(rdev); |
@@ -2086,12 +2086,13 @@ int r100_asic_reset(struct radeon_device *rdev) | |||
2086 | { | 2086 | { |
2087 | struct r100_mc_save save; | 2087 | struct r100_mc_save save; |
2088 | u32 status, tmp; | 2088 | u32 status, tmp; |
2089 | int ret = 0; | ||
2089 | 2090 | ||
2090 | r100_mc_stop(rdev, &save); | ||
2091 | status = RREG32(R_000E40_RBBM_STATUS); | 2091 | status = RREG32(R_000E40_RBBM_STATUS); |
2092 | if (!G_000E40_GUI_ACTIVE(status)) { | 2092 | if (!G_000E40_GUI_ACTIVE(status)) { |
2093 | return 0; | 2093 | return 0; |
2094 | } | 2094 | } |
2095 | r100_mc_stop(rdev, &save); | ||
2095 | status = RREG32(R_000E40_RBBM_STATUS); | 2096 | status = RREG32(R_000E40_RBBM_STATUS); |
2096 | dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status); | 2097 | dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status); |
2097 | /* stop CP */ | 2098 | /* stop CP */ |
@@ -2131,11 +2132,11 @@ int r100_asic_reset(struct radeon_device *rdev) | |||
2131 | G_000E40_TAM_BUSY(status) || G_000E40_PB_BUSY(status)) { | 2132 | G_000E40_TAM_BUSY(status) || G_000E40_PB_BUSY(status)) { |
2132 | dev_err(rdev->dev, "failed to reset GPU\n"); | 2133 | dev_err(rdev->dev, "failed to reset GPU\n"); |
2133 | rdev->gpu_lockup = true; | 2134 | rdev->gpu_lockup = true; |
2134 | return -1; | 2135 | ret = -1; |
2135 | } | 2136 | } else |
2137 | dev_info(rdev->dev, "GPU reset succeed\n"); | ||
2136 | r100_mc_resume(rdev, &save); | 2138 | r100_mc_resume(rdev, &save); |
2137 | dev_info(rdev->dev, "GPU reset succeed\n"); | 2139 | return ret; |
2138 | return 0; | ||
2139 | } | 2140 | } |
2140 | 2141 | ||
2141 | void r100_set_common_regs(struct radeon_device *rdev) | 2142 | void r100_set_common_regs(struct radeon_device *rdev) |
@@ -2346,10 +2347,10 @@ void r100_vga_set_state(struct radeon_device *rdev, bool state) | |||
2346 | 2347 | ||
2347 | temp = RREG32(RADEON_CONFIG_CNTL); | 2348 | temp = RREG32(RADEON_CONFIG_CNTL); |
2348 | if (state == false) { | 2349 | if (state == false) { |
2349 | temp &= ~(1<<8); | 2350 | temp &= ~RADEON_CFG_VGA_RAM_EN; |
2350 | temp |= (1<<9); | 2351 | temp |= RADEON_CFG_VGA_IO_DIS; |
2351 | } else { | 2352 | } else { |
2352 | temp &= ~(1<<9); | 2353 | temp &= ~RADEON_CFG_VGA_IO_DIS; |
2353 | } | 2354 | } |
2354 | WREG32(RADEON_CONFIG_CNTL, temp); | 2355 | WREG32(RADEON_CONFIG_CNTL, temp); |
2355 | } | 2356 | } |
@@ -3521,7 +3522,7 @@ int r100_ring_test(struct radeon_device *rdev) | |||
3521 | if (i < rdev->usec_timeout) { | 3522 | if (i < rdev->usec_timeout) { |
3522 | DRM_INFO("ring test succeeded in %d usecs\n", i); | 3523 | DRM_INFO("ring test succeeded in %d usecs\n", i); |
3523 | } else { | 3524 | } else { |
3524 | DRM_ERROR("radeon: ring test failed (sracth(0x%04X)=0x%08X)\n", | 3525 | DRM_ERROR("radeon: ring test failed (scratch(0x%04X)=0x%08X)\n", |
3525 | scratch, tmp); | 3526 | scratch, tmp); |
3526 | r = -EINVAL; | 3527 | r = -EINVAL; |
3527 | } | 3528 | } |
diff --git a/drivers/gpu/drm/radeon/r100d.h b/drivers/gpu/drm/radeon/r100d.h index b121b6c678d4..eab91760fae0 100644 --- a/drivers/gpu/drm/radeon/r100d.h +++ b/drivers/gpu/drm/radeon/r100d.h | |||
@@ -551,7 +551,7 @@ | |||
551 | #define S_000360_CUR2_LOCK(x) (((x) & 0x1) << 31) | 551 | #define S_000360_CUR2_LOCK(x) (((x) & 0x1) << 31) |
552 | #define G_000360_CUR2_LOCK(x) (((x) >> 31) & 0x1) | 552 | #define G_000360_CUR2_LOCK(x) (((x) >> 31) & 0x1) |
553 | #define C_000360_CUR2_LOCK 0x7FFFFFFF | 553 | #define C_000360_CUR2_LOCK 0x7FFFFFFF |
554 | #define R_0003C2_GENMO_WT 0x0003C0 | 554 | #define R_0003C2_GENMO_WT 0x0003C2 |
555 | #define S_0003C2_GENMO_MONO_ADDRESS_B(x) (((x) & 0x1) << 0) | 555 | #define S_0003C2_GENMO_MONO_ADDRESS_B(x) (((x) & 0x1) << 0) |
556 | #define G_0003C2_GENMO_MONO_ADDRESS_B(x) (((x) >> 0) & 0x1) | 556 | #define G_0003C2_GENMO_MONO_ADDRESS_B(x) (((x) >> 0) & 0x1) |
557 | #define C_0003C2_GENMO_MONO_ADDRESS_B 0xFE | 557 | #define C_0003C2_GENMO_MONO_ADDRESS_B 0xFE |
diff --git a/drivers/gpu/drm/radeon/r300.c b/drivers/gpu/drm/radeon/r300.c index cde1d3480d93..55fe5ba7def3 100644 --- a/drivers/gpu/drm/radeon/r300.c +++ b/drivers/gpu/drm/radeon/r300.c | |||
@@ -69,6 +69,9 @@ void rv370_pcie_gart_tlb_flush(struct radeon_device *rdev) | |||
69 | mb(); | 69 | mb(); |
70 | } | 70 | } |
71 | 71 | ||
72 | #define R300_PTE_WRITEABLE (1 << 2) | ||
73 | #define R300_PTE_READABLE (1 << 3) | ||
74 | |||
72 | int rv370_pcie_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr) | 75 | int rv370_pcie_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr) |
73 | { | 76 | { |
74 | void __iomem *ptr = (void *)rdev->gart.table.vram.ptr; | 77 | void __iomem *ptr = (void *)rdev->gart.table.vram.ptr; |
@@ -78,7 +81,7 @@ int rv370_pcie_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr) | |||
78 | } | 81 | } |
79 | addr = (lower_32_bits(addr) >> 8) | | 82 | addr = (lower_32_bits(addr) >> 8) | |
80 | ((upper_32_bits(addr) & 0xff) << 24) | | 83 | ((upper_32_bits(addr) & 0xff) << 24) | |
81 | 0xc; | 84 | R300_PTE_WRITEABLE | R300_PTE_READABLE; |
82 | /* on x86 we want this to be CPU endian, on powerpc | 85 | /* on x86 we want this to be CPU endian, on powerpc |
83 | * on powerpc without HW swappers, it'll get swapped on way | 86 | * on powerpc without HW swappers, it'll get swapped on way |
84 | * into VRAM - so no need for cpu_to_le32 on VRAM tables */ | 87 | * into VRAM - so no need for cpu_to_le32 on VRAM tables */ |
@@ -135,7 +138,7 @@ int rv370_pcie_gart_enable(struct radeon_device *rdev) | |||
135 | WREG32_PCIE(RADEON_PCIE_TX_DISCARD_RD_ADDR_LO, rdev->mc.vram_start); | 138 | WREG32_PCIE(RADEON_PCIE_TX_DISCARD_RD_ADDR_LO, rdev->mc.vram_start); |
136 | WREG32_PCIE(RADEON_PCIE_TX_DISCARD_RD_ADDR_HI, 0); | 139 | WREG32_PCIE(RADEON_PCIE_TX_DISCARD_RD_ADDR_HI, 0); |
137 | /* Clear error */ | 140 | /* Clear error */ |
138 | WREG32_PCIE(0x18, 0); | 141 | WREG32_PCIE(RADEON_PCIE_TX_GART_ERROR, 0); |
139 | tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_CNTL); | 142 | tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_CNTL); |
140 | tmp |= RADEON_PCIE_TX_GART_EN; | 143 | tmp |= RADEON_PCIE_TX_GART_EN; |
141 | tmp |= RADEON_PCIE_TX_GART_UNMAPPED_ACCESS_DISCARD; | 144 | tmp |= RADEON_PCIE_TX_GART_UNMAPPED_ACCESS_DISCARD; |
@@ -405,12 +408,13 @@ int r300_asic_reset(struct radeon_device *rdev) | |||
405 | { | 408 | { |
406 | struct r100_mc_save save; | 409 | struct r100_mc_save save; |
407 | u32 status, tmp; | 410 | u32 status, tmp; |
411 | int ret = 0; | ||
408 | 412 | ||
409 | r100_mc_stop(rdev, &save); | ||
410 | status = RREG32(R_000E40_RBBM_STATUS); | 413 | status = RREG32(R_000E40_RBBM_STATUS); |
411 | if (!G_000E40_GUI_ACTIVE(status)) { | 414 | if (!G_000E40_GUI_ACTIVE(status)) { |
412 | return 0; | 415 | return 0; |
413 | } | 416 | } |
417 | r100_mc_stop(rdev, &save); | ||
414 | status = RREG32(R_000E40_RBBM_STATUS); | 418 | status = RREG32(R_000E40_RBBM_STATUS); |
415 | dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status); | 419 | dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status); |
416 | /* stop CP */ | 420 | /* stop CP */ |
@@ -451,11 +455,11 @@ int r300_asic_reset(struct radeon_device *rdev) | |||
451 | if (G_000E40_GA_BUSY(status) || G_000E40_VAP_BUSY(status)) { | 455 | if (G_000E40_GA_BUSY(status) || G_000E40_VAP_BUSY(status)) { |
452 | dev_err(rdev->dev, "failed to reset GPU\n"); | 456 | dev_err(rdev->dev, "failed to reset GPU\n"); |
453 | rdev->gpu_lockup = true; | 457 | rdev->gpu_lockup = true; |
454 | return -1; | 458 | ret = -1; |
455 | } | 459 | } else |
460 | dev_info(rdev->dev, "GPU reset succeed\n"); | ||
456 | r100_mc_resume(rdev, &save); | 461 | r100_mc_resume(rdev, &save); |
457 | dev_info(rdev->dev, "GPU reset succeed\n"); | 462 | return ret; |
458 | return 0; | ||
459 | } | 463 | } |
460 | 464 | ||
461 | /* | 465 | /* |
@@ -558,10 +562,7 @@ int rv370_get_pcie_lanes(struct radeon_device *rdev) | |||
558 | 562 | ||
559 | /* FIXME wait for idle */ | 563 | /* FIXME wait for idle */ |
560 | 564 | ||
561 | if (rdev->family < CHIP_R600) | 565 | link_width_cntl = RREG32_PCIE(RADEON_PCIE_LC_LINK_WIDTH_CNTL); |
562 | link_width_cntl = RREG32_PCIE(RADEON_PCIE_LC_LINK_WIDTH_CNTL); | ||
563 | else | ||
564 | link_width_cntl = RREG32_PCIE_P(RADEON_PCIE_LC_LINK_WIDTH_CNTL); | ||
565 | 566 | ||
566 | switch ((link_width_cntl & RADEON_PCIE_LC_LINK_WIDTH_RD_MASK) >> RADEON_PCIE_LC_LINK_WIDTH_RD_SHIFT) { | 567 | switch ((link_width_cntl & RADEON_PCIE_LC_LINK_WIDTH_RD_MASK) >> RADEON_PCIE_LC_LINK_WIDTH_RD_SHIFT) { |
567 | case RADEON_PCIE_LC_LINK_WIDTH_X0: | 568 | case RADEON_PCIE_LC_LINK_WIDTH_X0: |
@@ -745,6 +746,11 @@ static int r300_packet0_check(struct radeon_cs_parser *p, | |||
745 | break; | 746 | break; |
746 | case 0x4E00: | 747 | case 0x4E00: |
747 | /* RB3D_CCTL */ | 748 | /* RB3D_CCTL */ |
749 | if ((idx_value & (1 << 10)) && /* CMASK_ENABLE */ | ||
750 | p->rdev->cmask_filp != p->filp) { | ||
751 | DRM_ERROR("Invalid RB3D_CCTL: Cannot enable CMASK.\n"); | ||
752 | return -EINVAL; | ||
753 | } | ||
748 | track->num_cb = ((idx_value >> 5) & 0x3) + 1; | 754 | track->num_cb = ((idx_value >> 5) & 0x3) + 1; |
749 | break; | 755 | break; |
750 | case 0x4E38: | 756 | case 0x4E38: |
@@ -787,6 +793,13 @@ static int r300_packet0_check(struct radeon_cs_parser *p, | |||
787 | case 15: | 793 | case 15: |
788 | track->cb[i].cpp = 2; | 794 | track->cb[i].cpp = 2; |
789 | break; | 795 | break; |
796 | case 5: | ||
797 | if (p->rdev->family < CHIP_RV515) { | ||
798 | DRM_ERROR("Invalid color buffer format (%d)!\n", | ||
799 | ((idx_value >> 21) & 0xF)); | ||
800 | return -EINVAL; | ||
801 | } | ||
802 | /* Pass through. */ | ||
790 | case 6: | 803 | case 6: |
791 | track->cb[i].cpp = 4; | 804 | track->cb[i].cpp = 4; |
792 | break; | 805 | break; |
@@ -1199,6 +1212,10 @@ static int r300_packet3_check(struct radeon_cs_parser *p, | |||
1199 | if (p->rdev->hyperz_filp != p->filp) | 1212 | if (p->rdev->hyperz_filp != p->filp) |
1200 | return -EINVAL; | 1213 | return -EINVAL; |
1201 | break; | 1214 | break; |
1215 | case PACKET3_3D_CLEAR_CMASK: | ||
1216 | if (p->rdev->cmask_filp != p->filp) | ||
1217 | return -EINVAL; | ||
1218 | break; | ||
1202 | case PACKET3_NOP: | 1219 | case PACKET3_NOP: |
1203 | break; | 1220 | break; |
1204 | default: | 1221 | default: |
diff --git a/drivers/gpu/drm/radeon/r300d.h b/drivers/gpu/drm/radeon/r300d.h index 0c036c60d9df..1f519a5ffb8c 100644 --- a/drivers/gpu/drm/radeon/r300d.h +++ b/drivers/gpu/drm/radeon/r300d.h | |||
@@ -54,6 +54,7 @@ | |||
54 | #define PACKET3_3D_DRAW_IMMD_2 0x35 | 54 | #define PACKET3_3D_DRAW_IMMD_2 0x35 |
55 | #define PACKET3_3D_DRAW_INDX_2 0x36 | 55 | #define PACKET3_3D_DRAW_INDX_2 0x36 |
56 | #define PACKET3_3D_CLEAR_HIZ 0x37 | 56 | #define PACKET3_3D_CLEAR_HIZ 0x37 |
57 | #define PACKET3_3D_CLEAR_CMASK 0x38 | ||
57 | #define PACKET3_BITBLT_MULTI 0x9B | 58 | #define PACKET3_BITBLT_MULTI 0x9B |
58 | 59 | ||
59 | #define PACKET0(reg, n) (CP_PACKET0 | \ | 60 | #define PACKET0(reg, n) (CP_PACKET0 | \ |
diff --git a/drivers/gpu/drm/radeon/r420.c b/drivers/gpu/drm/radeon/r420.c index c387346f93a9..0b59ed7c7d2c 100644 --- a/drivers/gpu/drm/radeon/r420.c +++ b/drivers/gpu/drm/radeon/r420.c | |||
@@ -96,7 +96,7 @@ void r420_pipes_init(struct radeon_device *rdev) | |||
96 | "programming pipes. Bad things might happen.\n"); | 96 | "programming pipes. Bad things might happen.\n"); |
97 | } | 97 | } |
98 | /* get max number of pipes */ | 98 | /* get max number of pipes */ |
99 | gb_pipe_select = RREG32(0x402C); | 99 | gb_pipe_select = RREG32(R400_GB_PIPE_SELECT); |
100 | num_pipes = ((gb_pipe_select >> 12) & 3) + 1; | 100 | num_pipes = ((gb_pipe_select >> 12) & 3) + 1; |
101 | 101 | ||
102 | /* SE chips have 1 pipe */ | 102 | /* SE chips have 1 pipe */ |
diff --git a/drivers/gpu/drm/radeon/r520.c b/drivers/gpu/drm/radeon/r520.c index 3c8677f9e385..2ce80d976568 100644 --- a/drivers/gpu/drm/radeon/r520.c +++ b/drivers/gpu/drm/radeon/r520.c | |||
@@ -79,8 +79,8 @@ static void r520_gpu_init(struct radeon_device *rdev) | |||
79 | WREG32(0x4128, 0xFF); | 79 | WREG32(0x4128, 0xFF); |
80 | } | 80 | } |
81 | r420_pipes_init(rdev); | 81 | r420_pipes_init(rdev); |
82 | gb_pipe_select = RREG32(0x402C); | 82 | gb_pipe_select = RREG32(R400_GB_PIPE_SELECT); |
83 | tmp = RREG32(0x170C); | 83 | tmp = RREG32(R300_DST_PIPE_CONFIG); |
84 | pipe_select_current = (tmp >> 2) & 3; | 84 | pipe_select_current = (tmp >> 2) & 3; |
85 | tmp = (1 << pipe_select_current) | | 85 | tmp = (1 << pipe_select_current) | |
86 | (((gb_pipe_select >> 8) & 0xF) << 4); | 86 | (((gb_pipe_select >> 8) & 0xF) << 4); |
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c index 53bfe3afb0fa..be780a6b9b1d 100644 --- a/drivers/gpu/drm/radeon/r600.c +++ b/drivers/gpu/drm/radeon/r600.c | |||
@@ -94,14 +94,19 @@ int r600_mc_wait_for_idle(struct radeon_device *rdev); | |||
94 | void r600_gpu_init(struct radeon_device *rdev); | 94 | void r600_gpu_init(struct radeon_device *rdev); |
95 | void r600_fini(struct radeon_device *rdev); | 95 | void r600_fini(struct radeon_device *rdev); |
96 | void r600_irq_disable(struct radeon_device *rdev); | 96 | void r600_irq_disable(struct radeon_device *rdev); |
97 | static void r600_pcie_gen2_enable(struct radeon_device *rdev); | ||
97 | 98 | ||
98 | /* get temperature in millidegrees */ | 99 | /* get temperature in millidegrees */ |
99 | u32 rv6xx_get_temp(struct radeon_device *rdev) | 100 | int rv6xx_get_temp(struct radeon_device *rdev) |
100 | { | 101 | { |
101 | u32 temp = (RREG32(CG_THERMAL_STATUS) & ASIC_T_MASK) >> | 102 | u32 temp = (RREG32(CG_THERMAL_STATUS) & ASIC_T_MASK) >> |
102 | ASIC_T_SHIFT; | 103 | ASIC_T_SHIFT; |
104 | int actual_temp = temp & 0xff; | ||
103 | 105 | ||
104 | return temp * 1000; | 106 | if (temp & 0x100) |
107 | actual_temp -= 256; | ||
108 | |||
109 | return actual_temp * 1000; | ||
105 | } | 110 | } |
106 | 111 | ||
107 | void r600_pm_get_dynpm_state(struct radeon_device *rdev) | 112 | void r600_pm_get_dynpm_state(struct radeon_device *rdev) |
@@ -881,12 +886,15 @@ void r600_pcie_gart_tlb_flush(struct radeon_device *rdev) | |||
881 | u32 tmp; | 886 | u32 tmp; |
882 | 887 | ||
883 | /* flush hdp cache so updates hit vram */ | 888 | /* flush hdp cache so updates hit vram */ |
884 | if ((rdev->family >= CHIP_RV770) && (rdev->family <= CHIP_RV740)) { | 889 | if ((rdev->family >= CHIP_RV770) && (rdev->family <= CHIP_RV740) && |
890 | !(rdev->flags & RADEON_IS_AGP)) { | ||
885 | void __iomem *ptr = (void *)rdev->gart.table.vram.ptr; | 891 | void __iomem *ptr = (void *)rdev->gart.table.vram.ptr; |
886 | u32 tmp; | 892 | u32 tmp; |
887 | 893 | ||
888 | /* r7xx hw bug. write to HDP_DEBUG1 followed by fb read | 894 | /* r7xx hw bug. write to HDP_DEBUG1 followed by fb read |
889 | * rather than write to HDP_REG_COHERENCY_FLUSH_CNTL | 895 | * rather than write to HDP_REG_COHERENCY_FLUSH_CNTL |
896 | * This seems to cause problems on some AGP cards. Just use the old | ||
897 | * method for them. | ||
890 | */ | 898 | */ |
891 | WREG32(HDP_DEBUG1, 0); | 899 | WREG32(HDP_DEBUG1, 0); |
892 | tmp = readl((void __iomem *)ptr); | 900 | tmp = readl((void __iomem *)ptr); |
@@ -1198,8 +1206,10 @@ static void r600_vram_gtt_location(struct radeon_device *rdev, struct radeon_mc | |||
1198 | mc->vram_end, mc->real_vram_size >> 20); | 1206 | mc->vram_end, mc->real_vram_size >> 20); |
1199 | } else { | 1207 | } else { |
1200 | u64 base = 0; | 1208 | u64 base = 0; |
1201 | if (rdev->flags & RADEON_IS_IGP) | 1209 | if (rdev->flags & RADEON_IS_IGP) { |
1202 | base = (RREG32(MC_VM_FB_LOCATION) & 0xFFFF) << 24; | 1210 | base = RREG32(MC_VM_FB_LOCATION) & 0xFFFF; |
1211 | base <<= 24; | ||
1212 | } | ||
1203 | radeon_vram_location(rdev, &rdev->mc, base); | 1213 | radeon_vram_location(rdev, &rdev->mc, base); |
1204 | rdev->mc.gtt_base_align = 0; | 1214 | rdev->mc.gtt_base_align = 0; |
1205 | radeon_gtt_location(rdev, mc); | 1215 | radeon_gtt_location(rdev, mc); |
@@ -1281,6 +1291,9 @@ int r600_gpu_soft_reset(struct radeon_device *rdev) | |||
1281 | S_008014_CB2_BUSY(1) | S_008014_CB3_BUSY(1); | 1291 | S_008014_CB2_BUSY(1) | S_008014_CB3_BUSY(1); |
1282 | u32 tmp; | 1292 | u32 tmp; |
1283 | 1293 | ||
1294 | if (!(RREG32(GRBM_STATUS) & GUI_ACTIVE)) | ||
1295 | return 0; | ||
1296 | |||
1284 | dev_info(rdev->dev, "GPU softreset \n"); | 1297 | dev_info(rdev->dev, "GPU softreset \n"); |
1285 | dev_info(rdev->dev, " R_008010_GRBM_STATUS=0x%08X\n", | 1298 | dev_info(rdev->dev, " R_008010_GRBM_STATUS=0x%08X\n", |
1286 | RREG32(R_008010_GRBM_STATUS)); | 1299 | RREG32(R_008010_GRBM_STATUS)); |
@@ -1340,13 +1353,19 @@ bool r600_gpu_is_lockup(struct radeon_device *rdev) | |||
1340 | u32 srbm_status; | 1353 | u32 srbm_status; |
1341 | u32 grbm_status; | 1354 | u32 grbm_status; |
1342 | u32 grbm_status2; | 1355 | u32 grbm_status2; |
1356 | struct r100_gpu_lockup *lockup; | ||
1343 | int r; | 1357 | int r; |
1344 | 1358 | ||
1359 | if (rdev->family >= CHIP_RV770) | ||
1360 | lockup = &rdev->config.rv770.lockup; | ||
1361 | else | ||
1362 | lockup = &rdev->config.r600.lockup; | ||
1363 | |||
1345 | srbm_status = RREG32(R_000E50_SRBM_STATUS); | 1364 | srbm_status = RREG32(R_000E50_SRBM_STATUS); |
1346 | grbm_status = RREG32(R_008010_GRBM_STATUS); | 1365 | grbm_status = RREG32(R_008010_GRBM_STATUS); |
1347 | grbm_status2 = RREG32(R_008014_GRBM_STATUS2); | 1366 | grbm_status2 = RREG32(R_008014_GRBM_STATUS2); |
1348 | if (!G_008010_GUI_ACTIVE(grbm_status)) { | 1367 | if (!G_008010_GUI_ACTIVE(grbm_status)) { |
1349 | r100_gpu_lockup_update(&rdev->config.r300.lockup, &rdev->cp); | 1368 | r100_gpu_lockup_update(lockup, &rdev->cp); |
1350 | return false; | 1369 | return false; |
1351 | } | 1370 | } |
1352 | /* force CP activities */ | 1371 | /* force CP activities */ |
@@ -1358,7 +1377,7 @@ bool r600_gpu_is_lockup(struct radeon_device *rdev) | |||
1358 | radeon_ring_unlock_commit(rdev); | 1377 | radeon_ring_unlock_commit(rdev); |
1359 | } | 1378 | } |
1360 | rdev->cp.rptr = RREG32(R600_CP_RB_RPTR); | 1379 | rdev->cp.rptr = RREG32(R600_CP_RB_RPTR); |
1361 | return r100_gpu_cp_is_lockup(rdev, &rdev->config.r300.lockup, &rdev->cp); | 1380 | return r100_gpu_cp_is_lockup(rdev, lockup, &rdev->cp); |
1362 | } | 1381 | } |
1363 | 1382 | ||
1364 | int r600_asic_reset(struct radeon_device *rdev) | 1383 | int r600_asic_reset(struct radeon_device *rdev) |
@@ -2346,28 +2365,13 @@ void r600_clear_surface_reg(struct radeon_device *rdev, int reg) | |||
2346 | /* FIXME: implement */ | 2365 | /* FIXME: implement */ |
2347 | } | 2366 | } |
2348 | 2367 | ||
2349 | |||
2350 | bool r600_card_posted(struct radeon_device *rdev) | ||
2351 | { | ||
2352 | uint32_t reg; | ||
2353 | |||
2354 | /* first check CRTCs */ | ||
2355 | reg = RREG32(D1CRTC_CONTROL) | | ||
2356 | RREG32(D2CRTC_CONTROL); | ||
2357 | if (reg & CRTC_EN) | ||
2358 | return true; | ||
2359 | |||
2360 | /* then check MEM_SIZE, in case the crtcs are off */ | ||
2361 | if (RREG32(CONFIG_MEMSIZE)) | ||
2362 | return true; | ||
2363 | |||
2364 | return false; | ||
2365 | } | ||
2366 | |||
2367 | int r600_startup(struct radeon_device *rdev) | 2368 | int r600_startup(struct radeon_device *rdev) |
2368 | { | 2369 | { |
2369 | int r; | 2370 | int r; |
2370 | 2371 | ||
2372 | /* enable pcie gen2 link */ | ||
2373 | r600_pcie_gen2_enable(rdev); | ||
2374 | |||
2371 | if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) { | 2375 | if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) { |
2372 | r = r600_init_microcode(rdev); | 2376 | r = r600_init_microcode(rdev); |
2373 | if (r) { | 2377 | if (r) { |
@@ -2521,7 +2525,7 @@ int r600_init(struct radeon_device *rdev) | |||
2521 | if (r) | 2525 | if (r) |
2522 | return r; | 2526 | return r; |
2523 | /* Post card if necessary */ | 2527 | /* Post card if necessary */ |
2524 | if (!r600_card_posted(rdev)) { | 2528 | if (!radeon_card_posted(rdev)) { |
2525 | if (!rdev->bios) { | 2529 | if (!rdev->bios) { |
2526 | dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n"); | 2530 | dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n"); |
2527 | return -EINVAL; | 2531 | return -EINVAL; |
@@ -2724,7 +2728,7 @@ static int r600_ih_ring_alloc(struct radeon_device *rdev) | |||
2724 | 2728 | ||
2725 | /* Allocate ring buffer */ | 2729 | /* Allocate ring buffer */ |
2726 | if (rdev->ih.ring_obj == NULL) { | 2730 | if (rdev->ih.ring_obj == NULL) { |
2727 | r = radeon_bo_create(rdev, NULL, rdev->ih.ring_size, | 2731 | r = radeon_bo_create(rdev, rdev->ih.ring_size, |
2728 | PAGE_SIZE, true, | 2732 | PAGE_SIZE, true, |
2729 | RADEON_GEM_DOMAIN_GTT, | 2733 | RADEON_GEM_DOMAIN_GTT, |
2730 | &rdev->ih.ring_obj); | 2734 | &rdev->ih.ring_obj); |
@@ -3431,7 +3435,7 @@ restart_ih: | |||
3431 | if (wptr != rdev->ih.wptr) | 3435 | if (wptr != rdev->ih.wptr) |
3432 | goto restart_ih; | 3436 | goto restart_ih; |
3433 | if (queue_hotplug) | 3437 | if (queue_hotplug) |
3434 | queue_work(rdev->wq, &rdev->hotplug_work); | 3438 | schedule_work(&rdev->hotplug_work); |
3435 | rdev->ih.rptr = rptr; | 3439 | rdev->ih.rptr = rptr; |
3436 | WREG32(IH_RB_RPTR, rdev->ih.rptr); | 3440 | WREG32(IH_RB_RPTR, rdev->ih.rptr); |
3437 | spin_unlock_irqrestore(&rdev->ih.lock, flags); | 3441 | spin_unlock_irqrestore(&rdev->ih.lock, flags); |
@@ -3506,10 +3510,12 @@ int r600_debugfs_mc_info_init(struct radeon_device *rdev) | |||
3506 | void r600_ioctl_wait_idle(struct radeon_device *rdev, struct radeon_bo *bo) | 3510 | void r600_ioctl_wait_idle(struct radeon_device *rdev, struct radeon_bo *bo) |
3507 | { | 3511 | { |
3508 | /* r7xx hw bug. write to HDP_DEBUG1 followed by fb read | 3512 | /* r7xx hw bug. write to HDP_DEBUG1 followed by fb read |
3509 | * rather than write to HDP_REG_COHERENCY_FLUSH_CNTL | 3513 | * rather than write to HDP_REG_COHERENCY_FLUSH_CNTL. |
3514 | * This seems to cause problems on some AGP cards. Just use the old | ||
3515 | * method for them. | ||
3510 | */ | 3516 | */ |
3511 | if ((rdev->family >= CHIP_RV770) && (rdev->family <= CHIP_RV740) && | 3517 | if ((rdev->family >= CHIP_RV770) && (rdev->family <= CHIP_RV740) && |
3512 | rdev->vram_scratch.ptr) { | 3518 | rdev->vram_scratch.ptr && !(rdev->flags & RADEON_IS_AGP)) { |
3513 | void __iomem *ptr = (void *)rdev->vram_scratch.ptr; | 3519 | void __iomem *ptr = (void *)rdev->vram_scratch.ptr; |
3514 | u32 tmp; | 3520 | u32 tmp; |
3515 | 3521 | ||
@@ -3518,3 +3524,222 @@ void r600_ioctl_wait_idle(struct radeon_device *rdev, struct radeon_bo *bo) | |||
3518 | } else | 3524 | } else |
3519 | WREG32(R_005480_HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1); | 3525 | WREG32(R_005480_HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1); |
3520 | } | 3526 | } |
3527 | |||
3528 | void r600_set_pcie_lanes(struct radeon_device *rdev, int lanes) | ||
3529 | { | ||
3530 | u32 link_width_cntl, mask, target_reg; | ||
3531 | |||
3532 | if (rdev->flags & RADEON_IS_IGP) | ||
3533 | return; | ||
3534 | |||
3535 | if (!(rdev->flags & RADEON_IS_PCIE)) | ||
3536 | return; | ||
3537 | |||
3538 | /* x2 cards have a special sequence */ | ||
3539 | if (ASIC_IS_X2(rdev)) | ||
3540 | return; | ||
3541 | |||
3542 | /* FIXME wait for idle */ | ||
3543 | |||
3544 | switch (lanes) { | ||
3545 | case 0: | ||
3546 | mask = RADEON_PCIE_LC_LINK_WIDTH_X0; | ||
3547 | break; | ||
3548 | case 1: | ||
3549 | mask = RADEON_PCIE_LC_LINK_WIDTH_X1; | ||
3550 | break; | ||
3551 | case 2: | ||
3552 | mask = RADEON_PCIE_LC_LINK_WIDTH_X2; | ||
3553 | break; | ||
3554 | case 4: | ||
3555 | mask = RADEON_PCIE_LC_LINK_WIDTH_X4; | ||
3556 | break; | ||
3557 | case 8: | ||
3558 | mask = RADEON_PCIE_LC_LINK_WIDTH_X8; | ||
3559 | break; | ||
3560 | case 12: | ||
3561 | mask = RADEON_PCIE_LC_LINK_WIDTH_X12; | ||
3562 | break; | ||
3563 | case 16: | ||
3564 | default: | ||
3565 | mask = RADEON_PCIE_LC_LINK_WIDTH_X16; | ||
3566 | break; | ||
3567 | } | ||
3568 | |||
3569 | link_width_cntl = RREG32_PCIE_P(RADEON_PCIE_LC_LINK_WIDTH_CNTL); | ||
3570 | |||
3571 | if ((link_width_cntl & RADEON_PCIE_LC_LINK_WIDTH_RD_MASK) == | ||
3572 | (mask << RADEON_PCIE_LC_LINK_WIDTH_RD_SHIFT)) | ||
3573 | return; | ||
3574 | |||
3575 | if (link_width_cntl & R600_PCIE_LC_UPCONFIGURE_DIS) | ||
3576 | return; | ||
3577 | |||
3578 | link_width_cntl &= ~(RADEON_PCIE_LC_LINK_WIDTH_MASK | | ||
3579 | RADEON_PCIE_LC_RECONFIG_NOW | | ||
3580 | R600_PCIE_LC_RENEGOTIATE_EN | | ||
3581 | R600_PCIE_LC_RECONFIG_ARC_MISSING_ESCAPE); | ||
3582 | link_width_cntl |= mask; | ||
3583 | |||
3584 | WREG32_PCIE_P(RADEON_PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl); | ||
3585 | |||
3586 | /* some northbridges can renegotiate the link rather than requiring | ||
3587 | * a complete re-config. | ||
3588 | * e.g., AMD 780/790 northbridges (pci ids: 0x5956, 0x5957, 0x5958, etc.) | ||
3589 | */ | ||
3590 | if (link_width_cntl & R600_PCIE_LC_RENEGOTIATION_SUPPORT) | ||
3591 | link_width_cntl |= R600_PCIE_LC_RENEGOTIATE_EN | R600_PCIE_LC_UPCONFIGURE_SUPPORT; | ||
3592 | else | ||
3593 | link_width_cntl |= R600_PCIE_LC_RECONFIG_ARC_MISSING_ESCAPE; | ||
3594 | |||
3595 | WREG32_PCIE_P(RADEON_PCIE_LC_LINK_WIDTH_CNTL, (link_width_cntl | | ||
3596 | RADEON_PCIE_LC_RECONFIG_NOW)); | ||
3597 | |||
3598 | if (rdev->family >= CHIP_RV770) | ||
3599 | target_reg = R700_TARGET_AND_CURRENT_PROFILE_INDEX; | ||
3600 | else | ||
3601 | target_reg = R600_TARGET_AND_CURRENT_PROFILE_INDEX; | ||
3602 | |||
3603 | /* wait for lane set to complete */ | ||
3604 | link_width_cntl = RREG32(target_reg); | ||
3605 | while (link_width_cntl == 0xffffffff) | ||
3606 | link_width_cntl = RREG32(target_reg); | ||
3607 | |||
3608 | } | ||
3609 | |||
3610 | int r600_get_pcie_lanes(struct radeon_device *rdev) | ||
3611 | { | ||
3612 | u32 link_width_cntl; | ||
3613 | |||
3614 | if (rdev->flags & RADEON_IS_IGP) | ||
3615 | return 0; | ||
3616 | |||
3617 | if (!(rdev->flags & RADEON_IS_PCIE)) | ||
3618 | return 0; | ||
3619 | |||
3620 | /* x2 cards have a special sequence */ | ||
3621 | if (ASIC_IS_X2(rdev)) | ||
3622 | return 0; | ||
3623 | |||
3624 | /* FIXME wait for idle */ | ||
3625 | |||
3626 | link_width_cntl = RREG32_PCIE_P(RADEON_PCIE_LC_LINK_WIDTH_CNTL); | ||
3627 | |||
3628 | switch ((link_width_cntl & RADEON_PCIE_LC_LINK_WIDTH_RD_MASK) >> RADEON_PCIE_LC_LINK_WIDTH_RD_SHIFT) { | ||
3629 | case RADEON_PCIE_LC_LINK_WIDTH_X0: | ||
3630 | return 0; | ||
3631 | case RADEON_PCIE_LC_LINK_WIDTH_X1: | ||
3632 | return 1; | ||
3633 | case RADEON_PCIE_LC_LINK_WIDTH_X2: | ||
3634 | return 2; | ||
3635 | case RADEON_PCIE_LC_LINK_WIDTH_X4: | ||
3636 | return 4; | ||
3637 | case RADEON_PCIE_LC_LINK_WIDTH_X8: | ||
3638 | return 8; | ||
3639 | case RADEON_PCIE_LC_LINK_WIDTH_X16: | ||
3640 | default: | ||
3641 | return 16; | ||
3642 | } | ||
3643 | } | ||
3644 | |||
3645 | static void r600_pcie_gen2_enable(struct radeon_device *rdev) | ||
3646 | { | ||
3647 | u32 link_width_cntl, lanes, speed_cntl, training_cntl, tmp; | ||
3648 | u16 link_cntl2; | ||
3649 | |||
3650 | if (radeon_pcie_gen2 == 0) | ||
3651 | return; | ||
3652 | |||
3653 | if (rdev->flags & RADEON_IS_IGP) | ||
3654 | return; | ||
3655 | |||
3656 | if (!(rdev->flags & RADEON_IS_PCIE)) | ||
3657 | return; | ||
3658 | |||
3659 | /* x2 cards have a special sequence */ | ||
3660 | if (ASIC_IS_X2(rdev)) | ||
3661 | return; | ||
3662 | |||
3663 | /* only RV6xx+ chips are supported */ | ||
3664 | if (rdev->family <= CHIP_R600) | ||
3665 | return; | ||
3666 | |||
3667 | /* 55 nm r6xx asics */ | ||
3668 | if ((rdev->family == CHIP_RV670) || | ||
3669 | (rdev->family == CHIP_RV620) || | ||
3670 | (rdev->family == CHIP_RV635)) { | ||
3671 | /* advertise upconfig capability */ | ||
3672 | link_width_cntl = RREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL); | ||
3673 | link_width_cntl &= ~LC_UPCONFIGURE_DIS; | ||
3674 | WREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl); | ||
3675 | link_width_cntl = RREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL); | ||
3676 | if (link_width_cntl & LC_RENEGOTIATION_SUPPORT) { | ||
3677 | lanes = (link_width_cntl & LC_LINK_WIDTH_RD_MASK) >> LC_LINK_WIDTH_RD_SHIFT; | ||
3678 | link_width_cntl &= ~(LC_LINK_WIDTH_MASK | | ||
3679 | LC_RECONFIG_ARC_MISSING_ESCAPE); | ||
3680 | link_width_cntl |= lanes | LC_RECONFIG_NOW | LC_RENEGOTIATE_EN; | ||
3681 | WREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl); | ||
3682 | } else { | ||
3683 | link_width_cntl |= LC_UPCONFIGURE_DIS; | ||
3684 | WREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl); | ||
3685 | } | ||
3686 | } | ||
3687 | |||
3688 | speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL); | ||
3689 | if ((speed_cntl & LC_OTHER_SIDE_EVER_SENT_GEN2) && | ||
3690 | (speed_cntl & LC_OTHER_SIDE_SUPPORTS_GEN2)) { | ||
3691 | |||
3692 | /* 55 nm r6xx asics */ | ||
3693 | if ((rdev->family == CHIP_RV670) || | ||
3694 | (rdev->family == CHIP_RV620) || | ||
3695 | (rdev->family == CHIP_RV635)) { | ||
3696 | WREG32(MM_CFGREGS_CNTL, 0x8); | ||
3697 | link_cntl2 = RREG32(0x4088); | ||
3698 | WREG32(MM_CFGREGS_CNTL, 0); | ||
3699 | /* not supported yet */ | ||
3700 | if (link_cntl2 & SELECTABLE_DEEMPHASIS) | ||
3701 | return; | ||
3702 | } | ||
3703 | |||
3704 | speed_cntl &= ~LC_SPEED_CHANGE_ATTEMPTS_ALLOWED_MASK; | ||
3705 | speed_cntl |= (0x3 << LC_SPEED_CHANGE_ATTEMPTS_ALLOWED_SHIFT); | ||
3706 | speed_cntl &= ~LC_VOLTAGE_TIMER_SEL_MASK; | ||
3707 | speed_cntl &= ~LC_FORCE_DIS_HW_SPEED_CHANGE; | ||
3708 | speed_cntl |= LC_FORCE_EN_HW_SPEED_CHANGE; | ||
3709 | WREG32_PCIE_P(PCIE_LC_SPEED_CNTL, speed_cntl); | ||
3710 | |||
3711 | tmp = RREG32(0x541c); | ||
3712 | WREG32(0x541c, tmp | 0x8); | ||
3713 | WREG32(MM_CFGREGS_CNTL, MM_WR_TO_CFG_EN); | ||
3714 | link_cntl2 = RREG16(0x4088); | ||
3715 | link_cntl2 &= ~TARGET_LINK_SPEED_MASK; | ||
3716 | link_cntl2 |= 0x2; | ||
3717 | WREG16(0x4088, link_cntl2); | ||
3718 | WREG32(MM_CFGREGS_CNTL, 0); | ||
3719 | |||
3720 | if ((rdev->family == CHIP_RV670) || | ||
3721 | (rdev->family == CHIP_RV620) || | ||
3722 | (rdev->family == CHIP_RV635)) { | ||
3723 | training_cntl = RREG32_PCIE_P(PCIE_LC_TRAINING_CNTL); | ||
3724 | training_cntl &= ~LC_POINT_7_PLUS_EN; | ||
3725 | WREG32_PCIE_P(PCIE_LC_TRAINING_CNTL, training_cntl); | ||
3726 | } else { | ||
3727 | speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL); | ||
3728 | speed_cntl &= ~LC_TARGET_LINK_SPEED_OVERRIDE_EN; | ||
3729 | WREG32_PCIE_P(PCIE_LC_SPEED_CNTL, speed_cntl); | ||
3730 | } | ||
3731 | |||
3732 | speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL); | ||
3733 | speed_cntl |= LC_GEN2_EN_STRAP; | ||
3734 | WREG32_PCIE_P(PCIE_LC_SPEED_CNTL, speed_cntl); | ||
3735 | |||
3736 | } else { | ||
3737 | link_width_cntl = RREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL); | ||
3738 | /* XXX: only disable it if gen1 bridge vendor == 0x111d or 0x1106 */ | ||
3739 | if (1) | ||
3740 | link_width_cntl |= LC_UPCONFIGURE_DIS; | ||
3741 | else | ||
3742 | link_width_cntl &= ~LC_UPCONFIGURE_DIS; | ||
3743 | WREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl); | ||
3744 | } | ||
3745 | } | ||
diff --git a/drivers/gpu/drm/radeon/r600_audio.c b/drivers/gpu/drm/radeon/r600_audio.c index b5443fe1c1d1..846fae576399 100644 --- a/drivers/gpu/drm/radeon/r600_audio.c +++ b/drivers/gpu/drm/radeon/r600_audio.c | |||
@@ -26,6 +26,7 @@ | |||
26 | #include "drmP.h" | 26 | #include "drmP.h" |
27 | #include "radeon.h" | 27 | #include "radeon.h" |
28 | #include "radeon_reg.h" | 28 | #include "radeon_reg.h" |
29 | #include "radeon_asic.h" | ||
29 | #include "atom.h" | 30 | #include "atom.h" |
30 | 31 | ||
31 | #define AUDIO_TIMER_INTERVALL 100 /* 1/10 sekund should be enough */ | 32 | #define AUDIO_TIMER_INTERVALL 100 /* 1/10 sekund should be enough */ |
diff --git a/drivers/gpu/drm/radeon/r600_blit_kms.c b/drivers/gpu/drm/radeon/r600_blit_kms.c index 86e5aa07f0db..16e211a614d7 100644 --- a/drivers/gpu/drm/radeon/r600_blit_kms.c +++ b/drivers/gpu/drm/radeon/r600_blit_kms.c | |||
@@ -501,7 +501,7 @@ int r600_blit_init(struct radeon_device *rdev) | |||
501 | obj_size += r6xx_ps_size * 4; | 501 | obj_size += r6xx_ps_size * 4; |
502 | obj_size = ALIGN(obj_size, 256); | 502 | obj_size = ALIGN(obj_size, 256); |
503 | 503 | ||
504 | r = radeon_bo_create(rdev, NULL, obj_size, PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM, | 504 | r = radeon_bo_create(rdev, obj_size, PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM, |
505 | &rdev->r600_blit.shader_obj); | 505 | &rdev->r600_blit.shader_obj); |
506 | if (r) { | 506 | if (r) { |
507 | DRM_ERROR("r600 failed to allocate shader\n"); | 507 | DRM_ERROR("r600 failed to allocate shader\n"); |
diff --git a/drivers/gpu/drm/radeon/r600_cs.c b/drivers/gpu/drm/radeon/r600_cs.c index 9bebac1ec006..4706294f0ae0 100644 --- a/drivers/gpu/drm/radeon/r600_cs.c +++ b/drivers/gpu/drm/radeon/r600_cs.c | |||
@@ -71,75 +71,164 @@ struct r600_cs_track { | |||
71 | u64 db_bo_mc; | 71 | u64 db_bo_mc; |
72 | }; | 72 | }; |
73 | 73 | ||
74 | #define FMT_8_BIT(fmt, vc) [fmt] = { 1, 1, 1, vc } | ||
75 | #define FMT_16_BIT(fmt, vc) [fmt] = { 1, 1, 2, vc } | ||
76 | #define FMT_24_BIT(fmt) [fmt] = { 1, 1, 3, 0 } | ||
77 | #define FMT_32_BIT(fmt, vc) [fmt] = { 1, 1, 4, vc } | ||
78 | #define FMT_48_BIT(fmt) [fmt] = { 1, 1, 6, 0 } | ||
79 | #define FMT_64_BIT(fmt, vc) [fmt] = { 1, 1, 8, vc } | ||
80 | #define FMT_96_BIT(fmt) [fmt] = { 1, 1, 12, 0 } | ||
81 | #define FMT_128_BIT(fmt, vc) [fmt] = { 1, 1, 16, vc } | ||
82 | |||
83 | struct gpu_formats { | ||
84 | unsigned blockwidth; | ||
85 | unsigned blockheight; | ||
86 | unsigned blocksize; | ||
87 | unsigned valid_color; | ||
88 | }; | ||
89 | |||
90 | static const struct gpu_formats color_formats_table[] = { | ||
91 | /* 8 bit */ | ||
92 | FMT_8_BIT(V_038004_COLOR_8, 1), | ||
93 | FMT_8_BIT(V_038004_COLOR_4_4, 1), | ||
94 | FMT_8_BIT(V_038004_COLOR_3_3_2, 1), | ||
95 | FMT_8_BIT(V_038004_FMT_1, 0), | ||
96 | |||
97 | /* 16-bit */ | ||
98 | FMT_16_BIT(V_038004_COLOR_16, 1), | ||
99 | FMT_16_BIT(V_038004_COLOR_16_FLOAT, 1), | ||
100 | FMT_16_BIT(V_038004_COLOR_8_8, 1), | ||
101 | FMT_16_BIT(V_038004_COLOR_5_6_5, 1), | ||
102 | FMT_16_BIT(V_038004_COLOR_6_5_5, 1), | ||
103 | FMT_16_BIT(V_038004_COLOR_1_5_5_5, 1), | ||
104 | FMT_16_BIT(V_038004_COLOR_4_4_4_4, 1), | ||
105 | FMT_16_BIT(V_038004_COLOR_5_5_5_1, 1), | ||
106 | |||
107 | /* 24-bit */ | ||
108 | FMT_24_BIT(V_038004_FMT_8_8_8), | ||
109 | |||
110 | /* 32-bit */ | ||
111 | FMT_32_BIT(V_038004_COLOR_32, 1), | ||
112 | FMT_32_BIT(V_038004_COLOR_32_FLOAT, 1), | ||
113 | FMT_32_BIT(V_038004_COLOR_16_16, 1), | ||
114 | FMT_32_BIT(V_038004_COLOR_16_16_FLOAT, 1), | ||
115 | FMT_32_BIT(V_038004_COLOR_8_24, 1), | ||
116 | FMT_32_BIT(V_038004_COLOR_8_24_FLOAT, 1), | ||
117 | FMT_32_BIT(V_038004_COLOR_24_8, 1), | ||
118 | FMT_32_BIT(V_038004_COLOR_24_8_FLOAT, 1), | ||
119 | FMT_32_BIT(V_038004_COLOR_10_11_11, 1), | ||
120 | FMT_32_BIT(V_038004_COLOR_10_11_11_FLOAT, 1), | ||
121 | FMT_32_BIT(V_038004_COLOR_11_11_10, 1), | ||
122 | FMT_32_BIT(V_038004_COLOR_11_11_10_FLOAT, 1), | ||
123 | FMT_32_BIT(V_038004_COLOR_2_10_10_10, 1), | ||
124 | FMT_32_BIT(V_038004_COLOR_8_8_8_8, 1), | ||
125 | FMT_32_BIT(V_038004_COLOR_10_10_10_2, 1), | ||
126 | FMT_32_BIT(V_038004_FMT_5_9_9_9_SHAREDEXP, 0), | ||
127 | FMT_32_BIT(V_038004_FMT_32_AS_8, 0), | ||
128 | FMT_32_BIT(V_038004_FMT_32_AS_8_8, 0), | ||
129 | |||
130 | /* 48-bit */ | ||
131 | FMT_48_BIT(V_038004_FMT_16_16_16), | ||
132 | FMT_48_BIT(V_038004_FMT_16_16_16_FLOAT), | ||
133 | |||
134 | /* 64-bit */ | ||
135 | FMT_64_BIT(V_038004_COLOR_X24_8_32_FLOAT, 1), | ||
136 | FMT_64_BIT(V_038004_COLOR_32_32, 1), | ||
137 | FMT_64_BIT(V_038004_COLOR_32_32_FLOAT, 1), | ||
138 | FMT_64_BIT(V_038004_COLOR_16_16_16_16, 1), | ||
139 | FMT_64_BIT(V_038004_COLOR_16_16_16_16_FLOAT, 1), | ||
140 | |||
141 | FMT_96_BIT(V_038004_FMT_32_32_32), | ||
142 | FMT_96_BIT(V_038004_FMT_32_32_32_FLOAT), | ||
143 | |||
144 | /* 128-bit */ | ||
145 | FMT_128_BIT(V_038004_COLOR_32_32_32_32, 1), | ||
146 | FMT_128_BIT(V_038004_COLOR_32_32_32_32_FLOAT, 1), | ||
147 | |||
148 | [V_038004_FMT_GB_GR] = { 2, 1, 4, 0 }, | ||
149 | [V_038004_FMT_BG_RG] = { 2, 1, 4, 0 }, | ||
150 | |||
151 | /* block compressed formats */ | ||
152 | [V_038004_FMT_BC1] = { 4, 4, 8, 0 }, | ||
153 | [V_038004_FMT_BC2] = { 4, 4, 16, 0 }, | ||
154 | [V_038004_FMT_BC3] = { 4, 4, 16, 0 }, | ||
155 | [V_038004_FMT_BC4] = { 4, 4, 8, 0 }, | ||
156 | [V_038004_FMT_BC5] = { 4, 4, 16, 0}, | ||
157 | |||
158 | }; | ||
159 | |||
160 | static inline bool fmt_is_valid_color(u32 format) | ||
161 | { | ||
162 | if (format > ARRAY_SIZE(color_formats_table)) | ||
163 | return false; | ||
164 | |||
165 | if (color_formats_table[format].valid_color) | ||
166 | return true; | ||
167 | |||
168 | return false; | ||
169 | } | ||
170 | |||
171 | static inline bool fmt_is_valid_texture(u32 format) | ||
172 | { | ||
173 | if (format > ARRAY_SIZE(color_formats_table)) | ||
174 | return false; | ||
175 | |||
176 | if (color_formats_table[format].blockwidth > 0) | ||
177 | return true; | ||
178 | |||
179 | return false; | ||
180 | } | ||
181 | |||
182 | static inline int fmt_get_blocksize(u32 format) | ||
183 | { | ||
184 | if (format > ARRAY_SIZE(color_formats_table)) | ||
185 | return 0; | ||
186 | |||
187 | return color_formats_table[format].blocksize; | ||
188 | } | ||
189 | |||
190 | static inline int fmt_get_nblocksx(u32 format, u32 w) | ||
191 | { | ||
192 | unsigned bw; | ||
193 | if (format > ARRAY_SIZE(color_formats_table)) | ||
194 | return 0; | ||
195 | |||
196 | bw = color_formats_table[format].blockwidth; | ||
197 | if (bw == 0) | ||
198 | return 0; | ||
199 | |||
200 | return (w + bw - 1) / bw; | ||
201 | } | ||
202 | |||
203 | static inline int fmt_get_nblocksy(u32 format, u32 h) | ||
204 | { | ||
205 | unsigned bh; | ||
206 | if (format > ARRAY_SIZE(color_formats_table)) | ||
207 | return 0; | ||
208 | |||
209 | bh = color_formats_table[format].blockheight; | ||
210 | if (bh == 0) | ||
211 | return 0; | ||
212 | |||
213 | return (h + bh - 1) / bh; | ||
214 | } | ||
215 | |||
74 | static inline int r600_bpe_from_format(u32 *bpe, u32 format) | 216 | static inline int r600_bpe_from_format(u32 *bpe, u32 format) |
75 | { | 217 | { |
76 | switch (format) { | 218 | unsigned res; |
77 | case V_038004_COLOR_8: | 219 | if (format > ARRAY_SIZE(color_formats_table)) |
78 | case V_038004_COLOR_4_4: | 220 | goto fail; |
79 | case V_038004_COLOR_3_3_2: | 221 | |
80 | case V_038004_FMT_1: | 222 | res = color_formats_table[format].blocksize; |
81 | *bpe = 1; | 223 | if (res == 0) |
82 | break; | 224 | goto fail; |
83 | case V_038004_COLOR_16: | 225 | |
84 | case V_038004_COLOR_16_FLOAT: | 226 | *bpe = res; |
85 | case V_038004_COLOR_8_8: | ||
86 | case V_038004_COLOR_5_6_5: | ||
87 | case V_038004_COLOR_6_5_5: | ||
88 | case V_038004_COLOR_1_5_5_5: | ||
89 | case V_038004_COLOR_4_4_4_4: | ||
90 | case V_038004_COLOR_5_5_5_1: | ||
91 | *bpe = 2; | ||
92 | break; | ||
93 | case V_038004_FMT_8_8_8: | ||
94 | *bpe = 3; | ||
95 | break; | ||
96 | case V_038004_COLOR_32: | ||
97 | case V_038004_COLOR_32_FLOAT: | ||
98 | case V_038004_COLOR_16_16: | ||
99 | case V_038004_COLOR_16_16_FLOAT: | ||
100 | case V_038004_COLOR_8_24: | ||
101 | case V_038004_COLOR_8_24_FLOAT: | ||
102 | case V_038004_COLOR_24_8: | ||
103 | case V_038004_COLOR_24_8_FLOAT: | ||
104 | case V_038004_COLOR_10_11_11: | ||
105 | case V_038004_COLOR_10_11_11_FLOAT: | ||
106 | case V_038004_COLOR_11_11_10: | ||
107 | case V_038004_COLOR_11_11_10_FLOAT: | ||
108 | case V_038004_COLOR_2_10_10_10: | ||
109 | case V_038004_COLOR_8_8_8_8: | ||
110 | case V_038004_COLOR_10_10_10_2: | ||
111 | case V_038004_FMT_5_9_9_9_SHAREDEXP: | ||
112 | case V_038004_FMT_32_AS_8: | ||
113 | case V_038004_FMT_32_AS_8_8: | ||
114 | *bpe = 4; | ||
115 | break; | ||
116 | case V_038004_COLOR_X24_8_32_FLOAT: | ||
117 | case V_038004_COLOR_32_32: | ||
118 | case V_038004_COLOR_32_32_FLOAT: | ||
119 | case V_038004_COLOR_16_16_16_16: | ||
120 | case V_038004_COLOR_16_16_16_16_FLOAT: | ||
121 | *bpe = 8; | ||
122 | break; | ||
123 | case V_038004_FMT_16_16_16: | ||
124 | case V_038004_FMT_16_16_16_FLOAT: | ||
125 | *bpe = 6; | ||
126 | break; | ||
127 | case V_038004_FMT_32_32_32: | ||
128 | case V_038004_FMT_32_32_32_FLOAT: | ||
129 | *bpe = 12; | ||
130 | break; | ||
131 | case V_038004_COLOR_32_32_32_32: | ||
132 | case V_038004_COLOR_32_32_32_32_FLOAT: | ||
133 | *bpe = 16; | ||
134 | break; | ||
135 | case V_038004_FMT_GB_GR: | ||
136 | case V_038004_FMT_BG_RG: | ||
137 | case V_038004_COLOR_INVALID: | ||
138 | default: | ||
139 | *bpe = 16; | ||
140 | return -EINVAL; | ||
141 | } | ||
142 | return 0; | 227 | return 0; |
228 | |||
229 | fail: | ||
230 | *bpe = 16; | ||
231 | return -EINVAL; | ||
143 | } | 232 | } |
144 | 233 | ||
145 | struct array_mode_checker { | 234 | struct array_mode_checker { |
@@ -148,7 +237,7 @@ struct array_mode_checker { | |||
148 | u32 nbanks; | 237 | u32 nbanks; |
149 | u32 npipes; | 238 | u32 npipes; |
150 | u32 nsamples; | 239 | u32 nsamples; |
151 | u32 bpe; | 240 | u32 blocksize; |
152 | }; | 241 | }; |
153 | 242 | ||
154 | /* returns alignment in pixels for pitch/height/depth and bytes for base */ | 243 | /* returns alignment in pixels for pitch/height/depth and bytes for base */ |
@@ -162,7 +251,7 @@ static inline int r600_get_array_mode_alignment(struct array_mode_checker *value | |||
162 | u32 tile_height = 8; | 251 | u32 tile_height = 8; |
163 | u32 macro_tile_width = values->nbanks; | 252 | u32 macro_tile_width = values->nbanks; |
164 | u32 macro_tile_height = values->npipes; | 253 | u32 macro_tile_height = values->npipes; |
165 | u32 tile_bytes = tile_width * tile_height * values->bpe * values->nsamples; | 254 | u32 tile_bytes = tile_width * tile_height * values->blocksize * values->nsamples; |
166 | u32 macro_tile_bytes = macro_tile_width * macro_tile_height * tile_bytes; | 255 | u32 macro_tile_bytes = macro_tile_width * macro_tile_height * tile_bytes; |
167 | 256 | ||
168 | switch (values->array_mode) { | 257 | switch (values->array_mode) { |
@@ -174,7 +263,7 @@ static inline int r600_get_array_mode_alignment(struct array_mode_checker *value | |||
174 | *base_align = 1; | 263 | *base_align = 1; |
175 | break; | 264 | break; |
176 | case ARRAY_LINEAR_ALIGNED: | 265 | case ARRAY_LINEAR_ALIGNED: |
177 | *pitch_align = max((u32)64, (u32)(values->group_size / values->bpe)); | 266 | *pitch_align = max((u32)64, (u32)(values->group_size / values->blocksize)); |
178 | *height_align = tile_height; | 267 | *height_align = tile_height; |
179 | *depth_align = 1; | 268 | *depth_align = 1; |
180 | *base_align = values->group_size; | 269 | *base_align = values->group_size; |
@@ -182,7 +271,7 @@ static inline int r600_get_array_mode_alignment(struct array_mode_checker *value | |||
182 | case ARRAY_1D_TILED_THIN1: | 271 | case ARRAY_1D_TILED_THIN1: |
183 | *pitch_align = max((u32)tile_width, | 272 | *pitch_align = max((u32)tile_width, |
184 | (u32)(values->group_size / | 273 | (u32)(values->group_size / |
185 | (tile_height * values->bpe * values->nsamples))); | 274 | (tile_height * values->blocksize * values->nsamples))); |
186 | *height_align = tile_height; | 275 | *height_align = tile_height; |
187 | *depth_align = 1; | 276 | *depth_align = 1; |
188 | *base_align = values->group_size; | 277 | *base_align = values->group_size; |
@@ -190,12 +279,12 @@ static inline int r600_get_array_mode_alignment(struct array_mode_checker *value | |||
190 | case ARRAY_2D_TILED_THIN1: | 279 | case ARRAY_2D_TILED_THIN1: |
191 | *pitch_align = max((u32)macro_tile_width, | 280 | *pitch_align = max((u32)macro_tile_width, |
192 | (u32)(((values->group_size / tile_height) / | 281 | (u32)(((values->group_size / tile_height) / |
193 | (values->bpe * values->nsamples)) * | 282 | (values->blocksize * values->nsamples)) * |
194 | values->nbanks)) * tile_width; | 283 | values->nbanks)) * tile_width; |
195 | *height_align = macro_tile_height * tile_height; | 284 | *height_align = macro_tile_height * tile_height; |
196 | *depth_align = 1; | 285 | *depth_align = 1; |
197 | *base_align = max(macro_tile_bytes, | 286 | *base_align = max(macro_tile_bytes, |
198 | (*pitch_align) * values->bpe * (*height_align) * values->nsamples); | 287 | (*pitch_align) * values->blocksize * (*height_align) * values->nsamples); |
199 | break; | 288 | break; |
200 | default: | 289 | default: |
201 | return -EINVAL; | 290 | return -EINVAL; |
@@ -234,21 +323,22 @@ static void r600_cs_track_init(struct r600_cs_track *track) | |||
234 | static inline int r600_cs_track_validate_cb(struct radeon_cs_parser *p, int i) | 323 | static inline int r600_cs_track_validate_cb(struct radeon_cs_parser *p, int i) |
235 | { | 324 | { |
236 | struct r600_cs_track *track = p->track; | 325 | struct r600_cs_track *track = p->track; |
237 | u32 bpe = 0, slice_tile_max, size, tmp; | 326 | u32 slice_tile_max, size, tmp; |
238 | u32 height, height_align, pitch, pitch_align, depth_align; | 327 | u32 height, height_align, pitch, pitch_align, depth_align; |
239 | u64 base_offset, base_align; | 328 | u64 base_offset, base_align; |
240 | struct array_mode_checker array_check; | 329 | struct array_mode_checker array_check; |
241 | volatile u32 *ib = p->ib->ptr; | 330 | volatile u32 *ib = p->ib->ptr; |
242 | unsigned array_mode; | 331 | unsigned array_mode; |
243 | 332 | u32 format; | |
244 | if (G_0280A0_TILE_MODE(track->cb_color_info[i])) { | 333 | if (G_0280A0_TILE_MODE(track->cb_color_info[i])) { |
245 | dev_warn(p->dev, "FMASK or CMASK buffer are not supported by this kernel\n"); | 334 | dev_warn(p->dev, "FMASK or CMASK buffer are not supported by this kernel\n"); |
246 | return -EINVAL; | 335 | return -EINVAL; |
247 | } | 336 | } |
248 | size = radeon_bo_size(track->cb_color_bo[i]) - track->cb_color_bo_offset[i]; | 337 | size = radeon_bo_size(track->cb_color_bo[i]) - track->cb_color_bo_offset[i]; |
249 | if (r600_bpe_from_format(&bpe, G_0280A0_FORMAT(track->cb_color_info[i]))) { | 338 | format = G_0280A0_FORMAT(track->cb_color_info[i]); |
339 | if (!fmt_is_valid_color(format)) { | ||
250 | dev_warn(p->dev, "%s:%d cb invalid format %d for %d (0x%08X)\n", | 340 | dev_warn(p->dev, "%s:%d cb invalid format %d for %d (0x%08X)\n", |
251 | __func__, __LINE__, G_0280A0_FORMAT(track->cb_color_info[i]), | 341 | __func__, __LINE__, format, |
252 | i, track->cb_color_info[i]); | 342 | i, track->cb_color_info[i]); |
253 | return -EINVAL; | 343 | return -EINVAL; |
254 | } | 344 | } |
@@ -267,7 +357,7 @@ static inline int r600_cs_track_validate_cb(struct radeon_cs_parser *p, int i) | |||
267 | array_check.nbanks = track->nbanks; | 357 | array_check.nbanks = track->nbanks; |
268 | array_check.npipes = track->npipes; | 358 | array_check.npipes = track->npipes; |
269 | array_check.nsamples = track->nsamples; | 359 | array_check.nsamples = track->nsamples; |
270 | array_check.bpe = bpe; | 360 | array_check.blocksize = fmt_get_blocksize(format); |
271 | if (r600_get_array_mode_alignment(&array_check, | 361 | if (r600_get_array_mode_alignment(&array_check, |
272 | &pitch_align, &height_align, &depth_align, &base_align)) { | 362 | &pitch_align, &height_align, &depth_align, &base_align)) { |
273 | dev_warn(p->dev, "%s invalid tiling %d for %d (0x%08X)\n", __func__, | 363 | dev_warn(p->dev, "%s invalid tiling %d for %d (0x%08X)\n", __func__, |
@@ -310,16 +400,15 @@ static inline int r600_cs_track_validate_cb(struct radeon_cs_parser *p, int i) | |||
310 | } | 400 | } |
311 | 401 | ||
312 | /* check offset */ | 402 | /* check offset */ |
313 | tmp = height * pitch * bpe; | 403 | tmp = fmt_get_nblocksy(format, height) * fmt_get_nblocksx(format, pitch) * fmt_get_blocksize(format); |
314 | if ((tmp + track->cb_color_bo_offset[i]) > radeon_bo_size(track->cb_color_bo[i])) { | 404 | if ((tmp + track->cb_color_bo_offset[i]) > radeon_bo_size(track->cb_color_bo[i])) { |
315 | if (array_mode == V_0280A0_ARRAY_LINEAR_GENERAL) { | 405 | if (array_mode == V_0280A0_ARRAY_LINEAR_GENERAL) { |
316 | /* the initial DDX does bad things with the CB size occasionally */ | 406 | /* the initial DDX does bad things with the CB size occasionally */ |
317 | /* it rounds up height too far for slice tile max but the BO is smaller */ | 407 | /* it rounds up height too far for slice tile max but the BO is smaller */ |
318 | tmp = (height - 7) * pitch * bpe; | 408 | /* r600c,g also seem to flush at bad times in some apps resulting in |
319 | if ((tmp + track->cb_color_bo_offset[i]) > radeon_bo_size(track->cb_color_bo[i])) { | 409 | * bogus values here. So for linear just allow anything to avoid breaking |
320 | dev_warn(p->dev, "%s offset[%d] %d %d %lu too big\n", __func__, i, track->cb_color_bo_offset[i], tmp, radeon_bo_size(track->cb_color_bo[i])); | 410 | * broken userspace. |
321 | return -EINVAL; | 411 | */ |
322 | } | ||
323 | } else { | 412 | } else { |
324 | dev_warn(p->dev, "%s offset[%d] %d %d %lu too big\n", __func__, i, track->cb_color_bo_offset[i], tmp, radeon_bo_size(track->cb_color_bo[i])); | 413 | dev_warn(p->dev, "%s offset[%d] %d %d %lu too big\n", __func__, i, track->cb_color_bo_offset[i], tmp, radeon_bo_size(track->cb_color_bo[i])); |
325 | return -EINVAL; | 414 | return -EINVAL; |
@@ -433,7 +522,7 @@ static int r600_cs_track_check(struct radeon_cs_parser *p) | |||
433 | array_check.nbanks = track->nbanks; | 522 | array_check.nbanks = track->nbanks; |
434 | array_check.npipes = track->npipes; | 523 | array_check.npipes = track->npipes; |
435 | array_check.nsamples = track->nsamples; | 524 | array_check.nsamples = track->nsamples; |
436 | array_check.bpe = bpe; | 525 | array_check.blocksize = bpe; |
437 | if (r600_get_array_mode_alignment(&array_check, | 526 | if (r600_get_array_mode_alignment(&array_check, |
438 | &pitch_align, &height_align, &depth_align, &base_align)) { | 527 | &pitch_align, &height_align, &depth_align, &base_align)) { |
439 | dev_warn(p->dev, "%s invalid tiling %d (0x%08X)\n", __func__, | 528 | dev_warn(p->dev, "%s invalid tiling %d (0x%08X)\n", __func__, |
@@ -1108,39 +1197,61 @@ static inline int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx | |||
1108 | return 0; | 1197 | return 0; |
1109 | } | 1198 | } |
1110 | 1199 | ||
1111 | static inline unsigned minify(unsigned size, unsigned levels) | 1200 | static inline unsigned mip_minify(unsigned size, unsigned level) |
1112 | { | 1201 | { |
1113 | size = size >> levels; | 1202 | unsigned val; |
1114 | if (size < 1) | 1203 | |
1115 | size = 1; | 1204 | val = max(1U, size >> level); |
1116 | return size; | 1205 | if (level > 0) |
1206 | val = roundup_pow_of_two(val); | ||
1207 | return val; | ||
1117 | } | 1208 | } |
1118 | 1209 | ||
1119 | static void r600_texture_size(unsigned nfaces, unsigned blevel, unsigned nlevels, | 1210 | static void r600_texture_size(unsigned nfaces, unsigned blevel, unsigned llevel, |
1120 | unsigned w0, unsigned h0, unsigned d0, unsigned bpe, | 1211 | unsigned w0, unsigned h0, unsigned d0, unsigned format, |
1121 | unsigned pitch_align, | 1212 | unsigned block_align, unsigned height_align, unsigned base_align, |
1122 | unsigned *l0_size, unsigned *mipmap_size) | 1213 | unsigned *l0_size, unsigned *mipmap_size) |
1123 | { | 1214 | { |
1124 | unsigned offset, i, level, face; | 1215 | unsigned offset, i, level; |
1125 | unsigned width, height, depth, rowstride, size; | 1216 | unsigned width, height, depth, size; |
1126 | 1217 | unsigned blocksize; | |
1127 | w0 = minify(w0, 0); | 1218 | unsigned nbx, nby; |
1128 | h0 = minify(h0, 0); | 1219 | unsigned nlevels = llevel - blevel + 1; |
1129 | d0 = minify(d0, 0); | 1220 | |
1221 | *l0_size = -1; | ||
1222 | blocksize = fmt_get_blocksize(format); | ||
1223 | |||
1224 | w0 = mip_minify(w0, 0); | ||
1225 | h0 = mip_minify(h0, 0); | ||
1226 | d0 = mip_minify(d0, 0); | ||
1130 | for(i = 0, offset = 0, level = blevel; i < nlevels; i++, level++) { | 1227 | for(i = 0, offset = 0, level = blevel; i < nlevels; i++, level++) { |
1131 | width = minify(w0, i); | 1228 | width = mip_minify(w0, i); |
1132 | height = minify(h0, i); | 1229 | nbx = fmt_get_nblocksx(format, width); |
1133 | depth = minify(d0, i); | 1230 | |
1134 | for(face = 0; face < nfaces; face++) { | 1231 | nbx = round_up(nbx, block_align); |
1135 | rowstride = ALIGN((width * bpe), pitch_align); | 1232 | |
1136 | size = height * rowstride * depth; | 1233 | height = mip_minify(h0, i); |
1137 | offset += size; | 1234 | nby = fmt_get_nblocksy(format, height); |
1138 | offset = (offset + 0x1f) & ~0x1f; | 1235 | nby = round_up(nby, height_align); |
1139 | } | 1236 | |
1237 | depth = mip_minify(d0, i); | ||
1238 | |||
1239 | size = nbx * nby * blocksize; | ||
1240 | if (nfaces) | ||
1241 | size *= nfaces; | ||
1242 | else | ||
1243 | size *= depth; | ||
1244 | |||
1245 | if (i == 0) | ||
1246 | *l0_size = size; | ||
1247 | |||
1248 | if (i == 0 || i == 1) | ||
1249 | offset = round_up(offset, base_align); | ||
1250 | |||
1251 | offset += size; | ||
1140 | } | 1252 | } |
1141 | *l0_size = ALIGN((w0 * bpe), pitch_align) * h0 * d0; | ||
1142 | *mipmap_size = offset; | 1253 | *mipmap_size = offset; |
1143 | if (!nlevels) | 1254 | if (llevel == 0) |
1144 | *mipmap_size = *l0_size; | 1255 | *mipmap_size = *l0_size; |
1145 | if (!blevel) | 1256 | if (!blevel) |
1146 | *mipmap_size -= *l0_size; | 1257 | *mipmap_size -= *l0_size; |
@@ -1164,11 +1275,13 @@ static inline int r600_check_texture_resource(struct radeon_cs_parser *p, u32 i | |||
1164 | u32 tiling_flags) | 1275 | u32 tiling_flags) |
1165 | { | 1276 | { |
1166 | struct r600_cs_track *track = p->track; | 1277 | struct r600_cs_track *track = p->track; |
1167 | u32 nfaces, nlevels, blevel, w0, h0, d0, bpe = 0; | 1278 | u32 nfaces, llevel, blevel, w0, h0, d0; |
1168 | u32 word0, word1, l0_size, mipmap_size; | 1279 | u32 word0, word1, l0_size, mipmap_size; |
1169 | u32 height_align, pitch, pitch_align, depth_align; | 1280 | u32 height_align, pitch, pitch_align, depth_align; |
1281 | u32 array, barray, larray; | ||
1170 | u64 base_align; | 1282 | u64 base_align; |
1171 | struct array_mode_checker array_check; | 1283 | struct array_mode_checker array_check; |
1284 | u32 format; | ||
1172 | 1285 | ||
1173 | /* on legacy kernel we don't perform advanced check */ | 1286 | /* on legacy kernel we don't perform advanced check */ |
1174 | if (p->rdev == NULL) | 1287 | if (p->rdev == NULL) |
@@ -1194,19 +1307,25 @@ static inline int r600_check_texture_resource(struct radeon_cs_parser *p, u32 i | |||
1194 | case V_038000_SQ_TEX_DIM_3D: | 1307 | case V_038000_SQ_TEX_DIM_3D: |
1195 | break; | 1308 | break; |
1196 | case V_038000_SQ_TEX_DIM_CUBEMAP: | 1309 | case V_038000_SQ_TEX_DIM_CUBEMAP: |
1197 | nfaces = 6; | 1310 | if (p->family >= CHIP_RV770) |
1311 | nfaces = 8; | ||
1312 | else | ||
1313 | nfaces = 6; | ||
1198 | break; | 1314 | break; |
1199 | case V_038000_SQ_TEX_DIM_1D_ARRAY: | 1315 | case V_038000_SQ_TEX_DIM_1D_ARRAY: |
1200 | case V_038000_SQ_TEX_DIM_2D_ARRAY: | 1316 | case V_038000_SQ_TEX_DIM_2D_ARRAY: |
1317 | array = 1; | ||
1318 | break; | ||
1201 | case V_038000_SQ_TEX_DIM_2D_MSAA: | 1319 | case V_038000_SQ_TEX_DIM_2D_MSAA: |
1202 | case V_038000_SQ_TEX_DIM_2D_ARRAY_MSAA: | 1320 | case V_038000_SQ_TEX_DIM_2D_ARRAY_MSAA: |
1203 | default: | 1321 | default: |
1204 | dev_warn(p->dev, "this kernel doesn't support %d texture dim\n", G_038000_DIM(word0)); | 1322 | dev_warn(p->dev, "this kernel doesn't support %d texture dim\n", G_038000_DIM(word0)); |
1205 | return -EINVAL; | 1323 | return -EINVAL; |
1206 | } | 1324 | } |
1207 | if (r600_bpe_from_format(&bpe, G_038004_DATA_FORMAT(word1))) { | 1325 | format = G_038004_DATA_FORMAT(word1); |
1326 | if (!fmt_is_valid_texture(format)) { | ||
1208 | dev_warn(p->dev, "%s:%d texture invalid format %d\n", | 1327 | dev_warn(p->dev, "%s:%d texture invalid format %d\n", |
1209 | __func__, __LINE__, G_038004_DATA_FORMAT(word1)); | 1328 | __func__, __LINE__, format); |
1210 | return -EINVAL; | 1329 | return -EINVAL; |
1211 | } | 1330 | } |
1212 | 1331 | ||
@@ -1217,7 +1336,7 @@ static inline int r600_check_texture_resource(struct radeon_cs_parser *p, u32 i | |||
1217 | array_check.nbanks = track->nbanks; | 1336 | array_check.nbanks = track->nbanks; |
1218 | array_check.npipes = track->npipes; | 1337 | array_check.npipes = track->npipes; |
1219 | array_check.nsamples = 1; | 1338 | array_check.nsamples = 1; |
1220 | array_check.bpe = bpe; | 1339 | array_check.blocksize = fmt_get_blocksize(format); |
1221 | if (r600_get_array_mode_alignment(&array_check, | 1340 | if (r600_get_array_mode_alignment(&array_check, |
1222 | &pitch_align, &height_align, &depth_align, &base_align)) { | 1341 | &pitch_align, &height_align, &depth_align, &base_align)) { |
1223 | dev_warn(p->dev, "%s:%d tex array mode (%d) invalid\n", | 1342 | dev_warn(p->dev, "%s:%d tex array mode (%d) invalid\n", |
@@ -1246,22 +1365,29 @@ static inline int r600_check_texture_resource(struct radeon_cs_parser *p, u32 i | |||
1246 | word0 = radeon_get_ib_value(p, idx + 4); | 1365 | word0 = radeon_get_ib_value(p, idx + 4); |
1247 | word1 = radeon_get_ib_value(p, idx + 5); | 1366 | word1 = radeon_get_ib_value(p, idx + 5); |
1248 | blevel = G_038010_BASE_LEVEL(word0); | 1367 | blevel = G_038010_BASE_LEVEL(word0); |
1249 | nlevels = G_038014_LAST_LEVEL(word1); | 1368 | llevel = G_038014_LAST_LEVEL(word1); |
1250 | r600_texture_size(nfaces, blevel, nlevels, w0, h0, d0, bpe, | 1369 | if (array == 1) { |
1251 | (pitch_align * bpe), | 1370 | barray = G_038014_BASE_ARRAY(word1); |
1371 | larray = G_038014_LAST_ARRAY(word1); | ||
1372 | |||
1373 | nfaces = larray - barray + 1; | ||
1374 | } | ||
1375 | r600_texture_size(nfaces, blevel, llevel, w0, h0, d0, format, | ||
1376 | pitch_align, height_align, base_align, | ||
1252 | &l0_size, &mipmap_size); | 1377 | &l0_size, &mipmap_size); |
1253 | /* using get ib will give us the offset into the texture bo */ | 1378 | /* using get ib will give us the offset into the texture bo */ |
1254 | word0 = radeon_get_ib_value(p, idx + 2) << 8; | 1379 | word0 = radeon_get_ib_value(p, idx + 2) << 8; |
1255 | if ((l0_size + word0) > radeon_bo_size(texture)) { | 1380 | if ((l0_size + word0) > radeon_bo_size(texture)) { |
1256 | dev_warn(p->dev, "texture bo too small (%d %d %d %d -> %d have %ld)\n", | 1381 | dev_warn(p->dev, "texture bo too small (%d %d %d %d -> %d have %ld)\n", |
1257 | w0, h0, bpe, word0, l0_size, radeon_bo_size(texture)); | 1382 | w0, h0, format, word0, l0_size, radeon_bo_size(texture)); |
1383 | dev_warn(p->dev, "alignments %d %d %d %lld\n", pitch, pitch_align, height_align, base_align); | ||
1258 | return -EINVAL; | 1384 | return -EINVAL; |
1259 | } | 1385 | } |
1260 | /* using get ib will give us the offset into the mipmap bo */ | 1386 | /* using get ib will give us the offset into the mipmap bo */ |
1261 | word0 = radeon_get_ib_value(p, idx + 3) << 8; | 1387 | word0 = radeon_get_ib_value(p, idx + 3) << 8; |
1262 | if ((mipmap_size + word0) > radeon_bo_size(mipmap)) { | 1388 | if ((mipmap_size + word0) > radeon_bo_size(mipmap)) { |
1263 | /*dev_warn(p->dev, "mipmap bo too small (%d %d %d %d %d %d -> %d have %ld)\n", | 1389 | /*dev_warn(p->dev, "mipmap bo too small (%d %d %d %d %d %d -> %d have %ld)\n", |
1264 | w0, h0, bpe, blevel, nlevels, word0, mipmap_size, radeon_bo_size(texture));*/ | 1390 | w0, h0, format, blevel, nlevels, word0, mipmap_size, radeon_bo_size(texture));*/ |
1265 | } | 1391 | } |
1266 | return 0; | 1392 | return 0; |
1267 | } | 1393 | } |
diff --git a/drivers/gpu/drm/radeon/r600_hdmi.c b/drivers/gpu/drm/radeon/r600_hdmi.c index e6a58ed48dcf..50db6d62eec2 100644 --- a/drivers/gpu/drm/radeon/r600_hdmi.c +++ b/drivers/gpu/drm/radeon/r600_hdmi.c | |||
@@ -26,6 +26,7 @@ | |||
26 | #include "drmP.h" | 26 | #include "drmP.h" |
27 | #include "radeon_drm.h" | 27 | #include "radeon_drm.h" |
28 | #include "radeon.h" | 28 | #include "radeon.h" |
29 | #include "radeon_asic.h" | ||
29 | #include "atom.h" | 30 | #include "atom.h" |
30 | 31 | ||
31 | /* | 32 | /* |
diff --git a/drivers/gpu/drm/radeon/r600_reg.h b/drivers/gpu/drm/radeon/r600_reg.h index d84612ae47e0..f869897c7456 100644 --- a/drivers/gpu/drm/radeon/r600_reg.h +++ b/drivers/gpu/drm/radeon/r600_reg.h | |||
@@ -81,11 +81,16 @@ | |||
81 | #define R600_MEDIUM_VID_LOWER_GPIO_CNTL 0x720 | 81 | #define R600_MEDIUM_VID_LOWER_GPIO_CNTL 0x720 |
82 | #define R600_LOW_VID_LOWER_GPIO_CNTL 0x724 | 82 | #define R600_LOW_VID_LOWER_GPIO_CNTL 0x724 |
83 | 83 | ||
84 | 84 | #define R600_D1GRPH_SWAP_CONTROL 0x610C | |
85 | # define R600_D1GRPH_SWAP_ENDIAN_NONE (0 << 0) | ||
86 | # define R600_D1GRPH_SWAP_ENDIAN_16BIT (1 << 0) | ||
87 | # define R600_D1GRPH_SWAP_ENDIAN_32BIT (2 << 0) | ||
88 | # define R600_D1GRPH_SWAP_ENDIAN_64BIT (3 << 0) | ||
85 | 89 | ||
86 | #define R600_HDP_NONSURFACE_BASE 0x2c04 | 90 | #define R600_HDP_NONSURFACE_BASE 0x2c04 |
87 | 91 | ||
88 | #define R600_BUS_CNTL 0x5420 | 92 | #define R600_BUS_CNTL 0x5420 |
93 | # define R600_BIOS_ROM_DIS (1 << 1) | ||
89 | #define R600_CONFIG_CNTL 0x5424 | 94 | #define R600_CONFIG_CNTL 0x5424 |
90 | #define R600_CONFIG_MEMSIZE 0x5428 | 95 | #define R600_CONFIG_MEMSIZE 0x5428 |
91 | #define R600_CONFIG_F0_BASE 0x542C | 96 | #define R600_CONFIG_F0_BASE 0x542C |
diff --git a/drivers/gpu/drm/radeon/r600d.h b/drivers/gpu/drm/radeon/r600d.h index c89cfa8e0c05..d1f598663da7 100644 --- a/drivers/gpu/drm/radeon/r600d.h +++ b/drivers/gpu/drm/radeon/r600d.h | |||
@@ -737,6 +737,45 @@ | |||
737 | # define DxGRPH_PFLIP_INT_MASK (1 << 0) | 737 | # define DxGRPH_PFLIP_INT_MASK (1 << 0) |
738 | # define DxGRPH_PFLIP_INT_TYPE (1 << 8) | 738 | # define DxGRPH_PFLIP_INT_TYPE (1 << 8) |
739 | 739 | ||
740 | /* PCIE link stuff */ | ||
741 | #define PCIE_LC_TRAINING_CNTL 0xa1 /* PCIE_P */ | ||
742 | # define LC_POINT_7_PLUS_EN (1 << 6) | ||
743 | #define PCIE_LC_LINK_WIDTH_CNTL 0xa2 /* PCIE_P */ | ||
744 | # define LC_LINK_WIDTH_SHIFT 0 | ||
745 | # define LC_LINK_WIDTH_MASK 0x7 | ||
746 | # define LC_LINK_WIDTH_X0 0 | ||
747 | # define LC_LINK_WIDTH_X1 1 | ||
748 | # define LC_LINK_WIDTH_X2 2 | ||
749 | # define LC_LINK_WIDTH_X4 3 | ||
750 | # define LC_LINK_WIDTH_X8 4 | ||
751 | # define LC_LINK_WIDTH_X16 6 | ||
752 | # define LC_LINK_WIDTH_RD_SHIFT 4 | ||
753 | # define LC_LINK_WIDTH_RD_MASK 0x70 | ||
754 | # define LC_RECONFIG_ARC_MISSING_ESCAPE (1 << 7) | ||
755 | # define LC_RECONFIG_NOW (1 << 8) | ||
756 | # define LC_RENEGOTIATION_SUPPORT (1 << 9) | ||
757 | # define LC_RENEGOTIATE_EN (1 << 10) | ||
758 | # define LC_SHORT_RECONFIG_EN (1 << 11) | ||
759 | # define LC_UPCONFIGURE_SUPPORT (1 << 12) | ||
760 | # define LC_UPCONFIGURE_DIS (1 << 13) | ||
761 | #define PCIE_LC_SPEED_CNTL 0xa4 /* PCIE_P */ | ||
762 | # define LC_GEN2_EN_STRAP (1 << 0) | ||
763 | # define LC_TARGET_LINK_SPEED_OVERRIDE_EN (1 << 1) | ||
764 | # define LC_FORCE_EN_HW_SPEED_CHANGE (1 << 5) | ||
765 | # define LC_FORCE_DIS_HW_SPEED_CHANGE (1 << 6) | ||
766 | # define LC_SPEED_CHANGE_ATTEMPTS_ALLOWED_MASK (0x3 << 8) | ||
767 | # define LC_SPEED_CHANGE_ATTEMPTS_ALLOWED_SHIFT 3 | ||
768 | # define LC_CURRENT_DATA_RATE (1 << 11) | ||
769 | # define LC_VOLTAGE_TIMER_SEL_MASK (0xf << 14) | ||
770 | # define LC_CLR_FAILED_SPD_CHANGE_CNT (1 << 21) | ||
771 | # define LC_OTHER_SIDE_EVER_SENT_GEN2 (1 << 23) | ||
772 | # define LC_OTHER_SIDE_SUPPORTS_GEN2 (1 << 24) | ||
773 | #define MM_CFGREGS_CNTL 0x544c | ||
774 | # define MM_WR_TO_CFG_EN (1 << 3) | ||
775 | #define LINK_CNTL2 0x88 /* F0 */ | ||
776 | # define TARGET_LINK_SPEED_MASK (0xf << 0) | ||
777 | # define SELECTABLE_DEEMPHASIS (1 << 6) | ||
778 | |||
740 | /* | 779 | /* |
741 | * PM4 | 780 | * PM4 |
742 | */ | 781 | */ |
@@ -1264,6 +1303,11 @@ | |||
1264 | #define V_038004_FMT_16_16_16_FLOAT 0x0000002E | 1303 | #define V_038004_FMT_16_16_16_FLOAT 0x0000002E |
1265 | #define V_038004_FMT_32_32_32 0x0000002F | 1304 | #define V_038004_FMT_32_32_32 0x0000002F |
1266 | #define V_038004_FMT_32_32_32_FLOAT 0x00000030 | 1305 | #define V_038004_FMT_32_32_32_FLOAT 0x00000030 |
1306 | #define V_038004_FMT_BC1 0x00000031 | ||
1307 | #define V_038004_FMT_BC2 0x00000032 | ||
1308 | #define V_038004_FMT_BC3 0x00000033 | ||
1309 | #define V_038004_FMT_BC4 0x00000034 | ||
1310 | #define V_038004_FMT_BC5 0x00000035 | ||
1267 | #define R_038010_SQ_TEX_RESOURCE_WORD4_0 0x038010 | 1311 | #define R_038010_SQ_TEX_RESOURCE_WORD4_0 0x038010 |
1268 | #define S_038010_FORMAT_COMP_X(x) (((x) & 0x3) << 0) | 1312 | #define S_038010_FORMAT_COMP_X(x) (((x) & 0x3) << 0) |
1269 | #define G_038010_FORMAT_COMP_X(x) (((x) >> 0) & 0x3) | 1313 | #define G_038010_FORMAT_COMP_X(x) (((x) >> 0) & 0x3) |
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h index 431d4186ddf0..55fefe763965 100644 --- a/drivers/gpu/drm/radeon/radeon.h +++ b/drivers/gpu/drm/radeon/radeon.h | |||
@@ -92,6 +92,7 @@ extern int radeon_tv; | |||
92 | extern int radeon_audio; | 92 | extern int radeon_audio; |
93 | extern int radeon_disp_priority; | 93 | extern int radeon_disp_priority; |
94 | extern int radeon_hw_i2c; | 94 | extern int radeon_hw_i2c; |
95 | extern int radeon_pcie_gen2; | ||
95 | 96 | ||
96 | /* | 97 | /* |
97 | * Copy from radeon_drv.h so we don't have to include both and have conflicting | 98 | * Copy from radeon_drv.h so we don't have to include both and have conflicting |
@@ -178,10 +179,10 @@ void radeon_combios_get_power_modes(struct radeon_device *rdev); | |||
178 | void radeon_atombios_get_power_modes(struct radeon_device *rdev); | 179 | void radeon_atombios_get_power_modes(struct radeon_device *rdev); |
179 | void radeon_atom_set_voltage(struct radeon_device *rdev, u16 level); | 180 | void radeon_atom_set_voltage(struct radeon_device *rdev, u16 level); |
180 | void rs690_pm_info(struct radeon_device *rdev); | 181 | void rs690_pm_info(struct radeon_device *rdev); |
181 | extern u32 rv6xx_get_temp(struct radeon_device *rdev); | 182 | extern int rv6xx_get_temp(struct radeon_device *rdev); |
182 | extern u32 rv770_get_temp(struct radeon_device *rdev); | 183 | extern int rv770_get_temp(struct radeon_device *rdev); |
183 | extern u32 evergreen_get_temp(struct radeon_device *rdev); | 184 | extern int evergreen_get_temp(struct radeon_device *rdev); |
184 | extern u32 sumo_get_temp(struct radeon_device *rdev); | 185 | extern int sumo_get_temp(struct radeon_device *rdev); |
185 | 186 | ||
186 | /* | 187 | /* |
187 | * Fences. | 188 | * Fences. |
@@ -257,8 +258,9 @@ struct radeon_bo { | |||
257 | int surface_reg; | 258 | int surface_reg; |
258 | /* Constant after initialization */ | 259 | /* Constant after initialization */ |
259 | struct radeon_device *rdev; | 260 | struct radeon_device *rdev; |
260 | struct drm_gem_object *gobj; | 261 | struct drm_gem_object gem_base; |
261 | }; | 262 | }; |
263 | #define gem_to_radeon_bo(gobj) container_of((gobj), struct radeon_bo, gem_base) | ||
262 | 264 | ||
263 | struct radeon_bo_list { | 265 | struct radeon_bo_list { |
264 | struct ttm_validate_buffer tv; | 266 | struct ttm_validate_buffer tv; |
@@ -287,6 +289,15 @@ int radeon_gem_object_pin(struct drm_gem_object *obj, uint32_t pin_domain, | |||
287 | uint64_t *gpu_addr); | 289 | uint64_t *gpu_addr); |
288 | void radeon_gem_object_unpin(struct drm_gem_object *obj); | 290 | void radeon_gem_object_unpin(struct drm_gem_object *obj); |
289 | 291 | ||
292 | int radeon_mode_dumb_create(struct drm_file *file_priv, | ||
293 | struct drm_device *dev, | ||
294 | struct drm_mode_create_dumb *args); | ||
295 | int radeon_mode_dumb_mmap(struct drm_file *filp, | ||
296 | struct drm_device *dev, | ||
297 | uint32_t handle, uint64_t *offset_p); | ||
298 | int radeon_mode_dumb_destroy(struct drm_file *file_priv, | ||
299 | struct drm_device *dev, | ||
300 | uint32_t handle); | ||
290 | 301 | ||
291 | /* | 302 | /* |
292 | * GART structures, functions & helpers | 303 | * GART structures, functions & helpers |
@@ -318,6 +329,7 @@ struct radeon_gart { | |||
318 | union radeon_gart_table table; | 329 | union radeon_gart_table table; |
319 | struct page **pages; | 330 | struct page **pages; |
320 | dma_addr_t *pages_addr; | 331 | dma_addr_t *pages_addr; |
332 | bool *ttm_alloced; | ||
321 | bool ready; | 333 | bool ready; |
322 | }; | 334 | }; |
323 | 335 | ||
@@ -330,7 +342,8 @@ void radeon_gart_fini(struct radeon_device *rdev); | |||
330 | void radeon_gart_unbind(struct radeon_device *rdev, unsigned offset, | 342 | void radeon_gart_unbind(struct radeon_device *rdev, unsigned offset, |
331 | int pages); | 343 | int pages); |
332 | int radeon_gart_bind(struct radeon_device *rdev, unsigned offset, | 344 | int radeon_gart_bind(struct radeon_device *rdev, unsigned offset, |
333 | int pages, struct page **pagelist); | 345 | int pages, struct page **pagelist, |
346 | dma_addr_t *dma_addr); | ||
334 | 347 | ||
335 | 348 | ||
336 | /* | 349 | /* |
@@ -739,6 +752,7 @@ enum radeon_int_thermal_type { | |||
739 | THERMAL_TYPE_RV770, | 752 | THERMAL_TYPE_RV770, |
740 | THERMAL_TYPE_EVERGREEN, | 753 | THERMAL_TYPE_EVERGREEN, |
741 | THERMAL_TYPE_SUMO, | 754 | THERMAL_TYPE_SUMO, |
755 | THERMAL_TYPE_NI, | ||
742 | }; | 756 | }; |
743 | 757 | ||
744 | struct radeon_voltage { | 758 | struct radeon_voltage { |
@@ -810,8 +824,7 @@ struct radeon_pm { | |||
810 | fixed20_12 sclk; | 824 | fixed20_12 sclk; |
811 | fixed20_12 mclk; | 825 | fixed20_12 mclk; |
812 | fixed20_12 needed_bandwidth; | 826 | fixed20_12 needed_bandwidth; |
813 | /* XXX: use a define for num power modes */ | 827 | struct radeon_power_state *power_state; |
814 | struct radeon_power_state power_state[8]; | ||
815 | /* number of valid power states */ | 828 | /* number of valid power states */ |
816 | int num_power_states; | 829 | int num_power_states; |
817 | int current_power_state_index; | 830 | int current_power_state_index; |
@@ -822,6 +835,9 @@ struct radeon_pm { | |||
822 | u32 current_sclk; | 835 | u32 current_sclk; |
823 | u32 current_mclk; | 836 | u32 current_mclk; |
824 | u32 current_vddc; | 837 | u32 current_vddc; |
838 | u32 default_sclk; | ||
839 | u32 default_mclk; | ||
840 | u32 default_vddc; | ||
825 | struct radeon_i2c_chan *i2c_bus; | 841 | struct radeon_i2c_chan *i2c_bus; |
826 | /* selected pm method */ | 842 | /* selected pm method */ |
827 | enum radeon_pm_method pm_method; | 843 | enum radeon_pm_method pm_method; |
@@ -1031,6 +1047,7 @@ struct evergreen_asic { | |||
1031 | unsigned tiling_npipes; | 1047 | unsigned tiling_npipes; |
1032 | unsigned tiling_group_size; | 1048 | unsigned tiling_group_size; |
1033 | unsigned tile_config; | 1049 | unsigned tile_config; |
1050 | struct r100_gpu_lockup lockup; | ||
1034 | }; | 1051 | }; |
1035 | 1052 | ||
1036 | union radeon_asic_config { | 1053 | union radeon_asic_config { |
@@ -1147,11 +1164,11 @@ struct radeon_device { | |||
1147 | const struct firmware *me_fw; /* all family ME firmware */ | 1164 | const struct firmware *me_fw; /* all family ME firmware */ |
1148 | const struct firmware *pfp_fw; /* r6/700 PFP firmware */ | 1165 | const struct firmware *pfp_fw; /* r6/700 PFP firmware */ |
1149 | const struct firmware *rlc_fw; /* r6/700 RLC firmware */ | 1166 | const struct firmware *rlc_fw; /* r6/700 RLC firmware */ |
1167 | const struct firmware *mc_fw; /* NI MC firmware */ | ||
1150 | struct r600_blit r600_blit; | 1168 | struct r600_blit r600_blit; |
1151 | struct r700_vram_scratch vram_scratch; | 1169 | struct r700_vram_scratch vram_scratch; |
1152 | int msi_enabled; /* msi enabled */ | 1170 | int msi_enabled; /* msi enabled */ |
1153 | struct r600_ih ih; /* r6/700 interrupt ring */ | 1171 | struct r600_ih ih; /* r6/700 interrupt ring */ |
1154 | struct workqueue_struct *wq; | ||
1155 | struct work_struct hotplug_work; | 1172 | struct work_struct hotplug_work; |
1156 | int num_crtc; /* number of crtcs */ | 1173 | int num_crtc; /* number of crtcs */ |
1157 | struct mutex dc_hw_i2c_mutex; /* display controller hw i2c mutex */ | 1174 | struct mutex dc_hw_i2c_mutex; /* display controller hw i2c mutex */ |
@@ -1166,10 +1183,10 @@ struct radeon_device { | |||
1166 | uint8_t audio_status_bits; | 1183 | uint8_t audio_status_bits; |
1167 | uint8_t audio_category_code; | 1184 | uint8_t audio_category_code; |
1168 | 1185 | ||
1169 | bool powered_down; | ||
1170 | struct notifier_block acpi_nb; | 1186 | struct notifier_block acpi_nb; |
1171 | /* only one userspace can use Hyperz features at a time */ | 1187 | /* only one userspace can use Hyperz features or CMASK at a time */ |
1172 | struct drm_file *hyperz_filp; | 1188 | struct drm_file *hyperz_filp; |
1189 | struct drm_file *cmask_filp; | ||
1173 | /* i2c buses */ | 1190 | /* i2c buses */ |
1174 | struct radeon_i2c_chan *i2c_bus[RADEON_MAX_I2C_BUS]; | 1191 | struct radeon_i2c_chan *i2c_bus[RADEON_MAX_I2C_BUS]; |
1175 | }; | 1192 | }; |
@@ -1181,19 +1198,6 @@ int radeon_device_init(struct radeon_device *rdev, | |||
1181 | void radeon_device_fini(struct radeon_device *rdev); | 1198 | void radeon_device_fini(struct radeon_device *rdev); |
1182 | int radeon_gpu_wait_for_idle(struct radeon_device *rdev); | 1199 | int radeon_gpu_wait_for_idle(struct radeon_device *rdev); |
1183 | 1200 | ||
1184 | /* r600 blit */ | ||
1185 | int r600_blit_prepare_copy(struct radeon_device *rdev, int size_bytes); | ||
1186 | void r600_blit_done_copy(struct radeon_device *rdev, struct radeon_fence *fence); | ||
1187 | void r600_kms_blit_copy(struct radeon_device *rdev, | ||
1188 | u64 src_gpu_addr, u64 dst_gpu_addr, | ||
1189 | int size_bytes); | ||
1190 | /* evergreen blit */ | ||
1191 | int evergreen_blit_prepare_copy(struct radeon_device *rdev, int size_bytes); | ||
1192 | void evergreen_blit_done_copy(struct radeon_device *rdev, struct radeon_fence *fence); | ||
1193 | void evergreen_kms_blit_copy(struct radeon_device *rdev, | ||
1194 | u64 src_gpu_addr, u64 dst_gpu_addr, | ||
1195 | int size_bytes); | ||
1196 | |||
1197 | static inline uint32_t r100_mm_rreg(struct radeon_device *rdev, uint32_t reg) | 1201 | static inline uint32_t r100_mm_rreg(struct radeon_device *rdev, uint32_t reg) |
1198 | { | 1202 | { |
1199 | if (reg < rdev->rmmio_size) | 1203 | if (reg < rdev->rmmio_size) |
@@ -1244,6 +1248,8 @@ static inline void r100_io_wreg(struct radeon_device *rdev, u32 reg, u32 v) | |||
1244 | */ | 1248 | */ |
1245 | #define RREG8(reg) readb(((void __iomem *)rdev->rmmio) + (reg)) | 1249 | #define RREG8(reg) readb(((void __iomem *)rdev->rmmio) + (reg)) |
1246 | #define WREG8(reg, v) writeb(v, ((void __iomem *)rdev->rmmio) + (reg)) | 1250 | #define WREG8(reg, v) writeb(v, ((void __iomem *)rdev->rmmio) + (reg)) |
1251 | #define RREG16(reg) readw(((void __iomem *)rdev->rmmio) + (reg)) | ||
1252 | #define WREG16(reg, v) writew(v, ((void __iomem *)rdev->rmmio) + (reg)) | ||
1247 | #define RREG32(reg) r100_mm_rreg(rdev, (reg)) | 1253 | #define RREG32(reg) r100_mm_rreg(rdev, (reg)) |
1248 | #define DREG32(reg) printk(KERN_INFO "REGISTER: " #reg " : 0x%08X\n", r100_mm_rreg(rdev, (reg))) | 1254 | #define DREG32(reg) printk(KERN_INFO "REGISTER: " #reg " : 0x%08X\n", r100_mm_rreg(rdev, (reg))) |
1249 | #define WREG32(reg, v) r100_mm_wreg(rdev, (reg), (v)) | 1255 | #define WREG32(reg, v) r100_mm_wreg(rdev, (reg), (v)) |
@@ -1317,6 +1323,14 @@ void r100_pll_errata_after_index(struct radeon_device *rdev); | |||
1317 | (rdev->family == CHIP_RV410) || \ | 1323 | (rdev->family == CHIP_RV410) || \ |
1318 | (rdev->family == CHIP_RS400) || \ | 1324 | (rdev->family == CHIP_RS400) || \ |
1319 | (rdev->family == CHIP_RS480)) | 1325 | (rdev->family == CHIP_RS480)) |
1326 | #define ASIC_IS_X2(rdev) ((rdev->ddev->pdev->device == 0x9441) || \ | ||
1327 | (rdev->ddev->pdev->device == 0x9443) || \ | ||
1328 | (rdev->ddev->pdev->device == 0x944B) || \ | ||
1329 | (rdev->ddev->pdev->device == 0x9506) || \ | ||
1330 | (rdev->ddev->pdev->device == 0x9509) || \ | ||
1331 | (rdev->ddev->pdev->device == 0x950F) || \ | ||
1332 | (rdev->ddev->pdev->device == 0x689C) || \ | ||
1333 | (rdev->ddev->pdev->device == 0x689D)) | ||
1320 | #define ASIC_IS_AVIVO(rdev) ((rdev->family >= CHIP_RS600)) | 1334 | #define ASIC_IS_AVIVO(rdev) ((rdev->family >= CHIP_RS600)) |
1321 | #define ASIC_IS_DCE2(rdev) ((rdev->family == CHIP_RS600) || \ | 1335 | #define ASIC_IS_DCE2(rdev) ((rdev->family == CHIP_RS600) || \ |
1322 | (rdev->family == CHIP_RS690) || \ | 1336 | (rdev->family == CHIP_RS690) || \ |
@@ -1325,7 +1339,9 @@ void r100_pll_errata_after_index(struct radeon_device *rdev); | |||
1325 | #define ASIC_IS_DCE3(rdev) ((rdev->family >= CHIP_RV620)) | 1339 | #define ASIC_IS_DCE3(rdev) ((rdev->family >= CHIP_RV620)) |
1326 | #define ASIC_IS_DCE32(rdev) ((rdev->family >= CHIP_RV730)) | 1340 | #define ASIC_IS_DCE32(rdev) ((rdev->family >= CHIP_RV730)) |
1327 | #define ASIC_IS_DCE4(rdev) ((rdev->family >= CHIP_CEDAR)) | 1341 | #define ASIC_IS_DCE4(rdev) ((rdev->family >= CHIP_CEDAR)) |
1328 | #define ASIC_IS_DCE41(rdev) ((rdev->family >= CHIP_PALM)) | 1342 | #define ASIC_IS_DCE41(rdev) ((rdev->family >= CHIP_PALM) && \ |
1343 | (rdev->flags & RADEON_IS_IGP)) | ||
1344 | #define ASIC_IS_DCE5(rdev) ((rdev->family >= CHIP_BARTS)) | ||
1329 | 1345 | ||
1330 | /* | 1346 | /* |
1331 | * BIOS helpers. | 1347 | * BIOS helpers. |
@@ -1432,118 +1448,15 @@ extern void radeon_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc | |||
1432 | extern int radeon_resume_kms(struct drm_device *dev); | 1448 | extern int radeon_resume_kms(struct drm_device *dev); |
1433 | extern int radeon_suspend_kms(struct drm_device *dev, pm_message_t state); | 1449 | extern int radeon_suspend_kms(struct drm_device *dev, pm_message_t state); |
1434 | 1450 | ||
1435 | /* r100,rv100,rs100,rv200,rs200,r200,rv250,rs300,rv280 */ | 1451 | /* |
1436 | extern void r100_gpu_lockup_update(struct r100_gpu_lockup *lockup, struct radeon_cp *cp); | 1452 | * r600 functions used by radeon_encoder.c |
1437 | extern bool r100_gpu_cp_is_lockup(struct radeon_device *rdev, struct r100_gpu_lockup *lockup, struct radeon_cp *cp); | 1453 | */ |
1438 | |||
1439 | /* rv200,rv250,rv280 */ | ||
1440 | extern void r200_set_safe_registers(struct radeon_device *rdev); | ||
1441 | |||
1442 | /* r300,r350,rv350,rv370,rv380 */ | ||
1443 | extern void r300_set_reg_safe(struct radeon_device *rdev); | ||
1444 | extern void r300_mc_program(struct radeon_device *rdev); | ||
1445 | extern void r300_mc_init(struct radeon_device *rdev); | ||
1446 | extern void r300_clock_startup(struct radeon_device *rdev); | ||
1447 | extern int r300_mc_wait_for_idle(struct radeon_device *rdev); | ||
1448 | extern int rv370_pcie_gart_init(struct radeon_device *rdev); | ||
1449 | extern void rv370_pcie_gart_fini(struct radeon_device *rdev); | ||
1450 | extern int rv370_pcie_gart_enable(struct radeon_device *rdev); | ||
1451 | extern void rv370_pcie_gart_disable(struct radeon_device *rdev); | ||
1452 | |||
1453 | /* r420,r423,rv410 */ | ||
1454 | extern u32 r420_mc_rreg(struct radeon_device *rdev, u32 reg); | ||
1455 | extern void r420_mc_wreg(struct radeon_device *rdev, u32 reg, u32 v); | ||
1456 | extern int r420_debugfs_pipes_info_init(struct radeon_device *rdev); | ||
1457 | extern void r420_pipes_init(struct radeon_device *rdev); | ||
1458 | |||
1459 | /* rv515 */ | ||
1460 | struct rv515_mc_save { | ||
1461 | u32 d1vga_control; | ||
1462 | u32 d2vga_control; | ||
1463 | u32 vga_render_control; | ||
1464 | u32 vga_hdp_control; | ||
1465 | u32 d1crtc_control; | ||
1466 | u32 d2crtc_control; | ||
1467 | }; | ||
1468 | extern void rv515_bandwidth_avivo_update(struct radeon_device *rdev); | ||
1469 | extern void rv515_vga_render_disable(struct radeon_device *rdev); | ||
1470 | extern void rv515_set_safe_registers(struct radeon_device *rdev); | ||
1471 | extern void rv515_mc_stop(struct radeon_device *rdev, struct rv515_mc_save *save); | ||
1472 | extern void rv515_mc_resume(struct radeon_device *rdev, struct rv515_mc_save *save); | ||
1473 | extern void rv515_clock_startup(struct radeon_device *rdev); | ||
1474 | extern void rv515_debugfs(struct radeon_device *rdev); | ||
1475 | extern int rv515_suspend(struct radeon_device *rdev); | ||
1476 | |||
1477 | /* rs400 */ | ||
1478 | extern int rs400_gart_init(struct radeon_device *rdev); | ||
1479 | extern int rs400_gart_enable(struct radeon_device *rdev); | ||
1480 | extern void rs400_gart_adjust_size(struct radeon_device *rdev); | ||
1481 | extern void rs400_gart_disable(struct radeon_device *rdev); | ||
1482 | extern void rs400_gart_fini(struct radeon_device *rdev); | ||
1483 | |||
1484 | /* rs600 */ | ||
1485 | extern void rs600_set_safe_registers(struct radeon_device *rdev); | ||
1486 | extern int rs600_irq_set(struct radeon_device *rdev); | ||
1487 | extern void rs600_irq_disable(struct radeon_device *rdev); | ||
1488 | |||
1489 | /* rs690, rs740 */ | ||
1490 | extern void rs690_line_buffer_adjust(struct radeon_device *rdev, | ||
1491 | struct drm_display_mode *mode1, | ||
1492 | struct drm_display_mode *mode2); | ||
1493 | |||
1494 | /* r600, rv610, rv630, rv620, rv635, rv670, rs780, rs880 */ | ||
1495 | extern bool r600_card_posted(struct radeon_device *rdev); | ||
1496 | extern void r600_cp_stop(struct radeon_device *rdev); | ||
1497 | extern int r600_cp_start(struct radeon_device *rdev); | ||
1498 | extern void r600_ring_init(struct radeon_device *rdev, unsigned ring_size); | ||
1499 | extern int r600_cp_resume(struct radeon_device *rdev); | ||
1500 | extern void r600_cp_fini(struct radeon_device *rdev); | ||
1501 | extern int r600_count_pipe_bits(uint32_t val); | ||
1502 | extern int r600_mc_wait_for_idle(struct radeon_device *rdev); | ||
1503 | extern int r600_pcie_gart_init(struct radeon_device *rdev); | ||
1504 | extern void r600_pcie_gart_tlb_flush(struct radeon_device *rdev); | ||
1505 | extern int r600_ib_test(struct radeon_device *rdev); | ||
1506 | extern int r600_ring_test(struct radeon_device *rdev); | ||
1507 | extern void r600_scratch_init(struct radeon_device *rdev); | ||
1508 | extern int r600_blit_init(struct radeon_device *rdev); | ||
1509 | extern void r600_blit_fini(struct radeon_device *rdev); | ||
1510 | extern int r600_init_microcode(struct radeon_device *rdev); | ||
1511 | extern int r600_asic_reset(struct radeon_device *rdev); | ||
1512 | /* r600 irq */ | ||
1513 | extern int r600_irq_init(struct radeon_device *rdev); | ||
1514 | extern void r600_irq_fini(struct radeon_device *rdev); | ||
1515 | extern void r600_ih_ring_init(struct radeon_device *rdev, unsigned ring_size); | ||
1516 | extern int r600_irq_set(struct radeon_device *rdev); | ||
1517 | extern void r600_irq_suspend(struct radeon_device *rdev); | ||
1518 | extern void r600_disable_interrupts(struct radeon_device *rdev); | ||
1519 | extern void r600_rlc_stop(struct radeon_device *rdev); | ||
1520 | /* r600 audio */ | ||
1521 | extern int r600_audio_init(struct radeon_device *rdev); | ||
1522 | extern int r600_audio_tmds_index(struct drm_encoder *encoder); | ||
1523 | extern void r600_audio_set_clock(struct drm_encoder *encoder, int clock); | ||
1524 | extern int r600_audio_channels(struct radeon_device *rdev); | ||
1525 | extern int r600_audio_bits_per_sample(struct radeon_device *rdev); | ||
1526 | extern int r600_audio_rate(struct radeon_device *rdev); | ||
1527 | extern uint8_t r600_audio_status_bits(struct radeon_device *rdev); | ||
1528 | extern uint8_t r600_audio_category_code(struct radeon_device *rdev); | ||
1529 | extern void r600_audio_schedule_polling(struct radeon_device *rdev); | ||
1530 | extern void r600_audio_enable_polling(struct drm_encoder *encoder); | ||
1531 | extern void r600_audio_disable_polling(struct drm_encoder *encoder); | ||
1532 | extern void r600_audio_fini(struct radeon_device *rdev); | ||
1533 | extern void r600_hdmi_init(struct drm_encoder *encoder); | ||
1534 | extern void r600_hdmi_enable(struct drm_encoder *encoder); | 1454 | extern void r600_hdmi_enable(struct drm_encoder *encoder); |
1535 | extern void r600_hdmi_disable(struct drm_encoder *encoder); | 1455 | extern void r600_hdmi_disable(struct drm_encoder *encoder); |
1536 | extern void r600_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode *mode); | 1456 | extern void r600_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode *mode); |
1537 | extern int r600_hdmi_buffer_status_changed(struct drm_encoder *encoder); | ||
1538 | extern void r600_hdmi_update_audio_settings(struct drm_encoder *encoder); | ||
1539 | 1457 | ||
1540 | extern void r700_vram_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc); | 1458 | extern int ni_init_microcode(struct radeon_device *rdev); |
1541 | extern void r700_cp_stop(struct radeon_device *rdev); | 1459 | extern int btc_mc_load_microcode(struct radeon_device *rdev); |
1542 | extern void r700_cp_fini(struct radeon_device *rdev); | ||
1543 | extern void evergreen_disable_interrupt_state(struct radeon_device *rdev); | ||
1544 | extern int evergreen_irq_set(struct radeon_device *rdev); | ||
1545 | extern int evergreen_blit_init(struct radeon_device *rdev); | ||
1546 | extern void evergreen_blit_fini(struct radeon_device *rdev); | ||
1547 | 1460 | ||
1548 | /* radeon_acpi.c */ | 1461 | /* radeon_acpi.c */ |
1549 | #if defined(CONFIG_ACPI) | 1462 | #if defined(CONFIG_ACPI) |
@@ -1552,14 +1465,6 @@ extern int radeon_acpi_init(struct radeon_device *rdev); | |||
1552 | static inline int radeon_acpi_init(struct radeon_device *rdev) { return 0; } | 1465 | static inline int radeon_acpi_init(struct radeon_device *rdev) { return 0; } |
1553 | #endif | 1466 | #endif |
1554 | 1467 | ||
1555 | /* evergreen */ | ||
1556 | struct evergreen_mc_save { | ||
1557 | u32 vga_control[6]; | ||
1558 | u32 vga_render_control; | ||
1559 | u32 vga_hdp_control; | ||
1560 | u32 crtc_control[6]; | ||
1561 | }; | ||
1562 | |||
1563 | #include "radeon_object.h" | 1468 | #include "radeon_object.h" |
1564 | 1469 | ||
1565 | #endif | 1470 | #endif |
diff --git a/drivers/gpu/drm/radeon/radeon_asic.c b/drivers/gpu/drm/radeon/radeon_asic.c index 3d73fe484f42..e75d63b8e21d 100644 --- a/drivers/gpu/drm/radeon/radeon_asic.c +++ b/drivers/gpu/drm/radeon/radeon_asic.c | |||
@@ -94,7 +94,7 @@ static void radeon_register_accessor_init(struct radeon_device *rdev) | |||
94 | rdev->mc_rreg = &rs600_mc_rreg; | 94 | rdev->mc_rreg = &rs600_mc_rreg; |
95 | rdev->mc_wreg = &rs600_mc_wreg; | 95 | rdev->mc_wreg = &rs600_mc_wreg; |
96 | } | 96 | } |
97 | if ((rdev->family >= CHIP_R600) && (rdev->family <= CHIP_RV740)) { | 97 | if ((rdev->family >= CHIP_R600) && (rdev->family <= CHIP_HEMLOCK)) { |
98 | rdev->pciep_rreg = &r600_pciep_rreg; | 98 | rdev->pciep_rreg = &r600_pciep_rreg; |
99 | rdev->pciep_wreg = &r600_pciep_wreg; | 99 | rdev->pciep_wreg = &r600_pciep_wreg; |
100 | } | 100 | } |
@@ -631,8 +631,8 @@ static struct radeon_asic r600_asic = { | |||
631 | .set_engine_clock = &radeon_atom_set_engine_clock, | 631 | .set_engine_clock = &radeon_atom_set_engine_clock, |
632 | .get_memory_clock = &radeon_atom_get_memory_clock, | 632 | .get_memory_clock = &radeon_atom_get_memory_clock, |
633 | .set_memory_clock = &radeon_atom_set_memory_clock, | 633 | .set_memory_clock = &radeon_atom_set_memory_clock, |
634 | .get_pcie_lanes = &rv370_get_pcie_lanes, | 634 | .get_pcie_lanes = &r600_get_pcie_lanes, |
635 | .set_pcie_lanes = NULL, | 635 | .set_pcie_lanes = &r600_set_pcie_lanes, |
636 | .set_clock_gating = NULL, | 636 | .set_clock_gating = NULL, |
637 | .set_surface_reg = r600_set_surface_reg, | 637 | .set_surface_reg = r600_set_surface_reg, |
638 | .clear_surface_reg = r600_clear_surface_reg, | 638 | .clear_surface_reg = r600_clear_surface_reg, |
@@ -725,8 +725,8 @@ static struct radeon_asic rv770_asic = { | |||
725 | .set_engine_clock = &radeon_atom_set_engine_clock, | 725 | .set_engine_clock = &radeon_atom_set_engine_clock, |
726 | .get_memory_clock = &radeon_atom_get_memory_clock, | 726 | .get_memory_clock = &radeon_atom_get_memory_clock, |
727 | .set_memory_clock = &radeon_atom_set_memory_clock, | 727 | .set_memory_clock = &radeon_atom_set_memory_clock, |
728 | .get_pcie_lanes = &rv370_get_pcie_lanes, | 728 | .get_pcie_lanes = &r600_get_pcie_lanes, |
729 | .set_pcie_lanes = NULL, | 729 | .set_pcie_lanes = &r600_set_pcie_lanes, |
730 | .set_clock_gating = &radeon_atom_set_clock_gating, | 730 | .set_clock_gating = &radeon_atom_set_clock_gating, |
731 | .set_surface_reg = r600_set_surface_reg, | 731 | .set_surface_reg = r600_set_surface_reg, |
732 | .clear_surface_reg = r600_clear_surface_reg, | 732 | .clear_surface_reg = r600_clear_surface_reg, |
@@ -759,7 +759,7 @@ static struct radeon_asic evergreen_asic = { | |||
759 | .gart_tlb_flush = &evergreen_pcie_gart_tlb_flush, | 759 | .gart_tlb_flush = &evergreen_pcie_gart_tlb_flush, |
760 | .gart_set_page = &rs600_gart_set_page, | 760 | .gart_set_page = &rs600_gart_set_page, |
761 | .ring_test = &r600_ring_test, | 761 | .ring_test = &r600_ring_test, |
762 | .ring_ib_execute = &r600_ring_ib_execute, | 762 | .ring_ib_execute = &evergreen_ring_ib_execute, |
763 | .irq_set = &evergreen_irq_set, | 763 | .irq_set = &evergreen_irq_set, |
764 | .irq_process = &evergreen_irq_process, | 764 | .irq_process = &evergreen_irq_process, |
765 | .get_vblank_counter = &evergreen_get_vblank_counter, | 765 | .get_vblank_counter = &evergreen_get_vblank_counter, |
@@ -772,8 +772,8 @@ static struct radeon_asic evergreen_asic = { | |||
772 | .set_engine_clock = &radeon_atom_set_engine_clock, | 772 | .set_engine_clock = &radeon_atom_set_engine_clock, |
773 | .get_memory_clock = &radeon_atom_get_memory_clock, | 773 | .get_memory_clock = &radeon_atom_get_memory_clock, |
774 | .set_memory_clock = &radeon_atom_set_memory_clock, | 774 | .set_memory_clock = &radeon_atom_set_memory_clock, |
775 | .get_pcie_lanes = NULL, | 775 | .get_pcie_lanes = &r600_get_pcie_lanes, |
776 | .set_pcie_lanes = NULL, | 776 | .set_pcie_lanes = &r600_set_pcie_lanes, |
777 | .set_clock_gating = NULL, | 777 | .set_clock_gating = NULL, |
778 | .set_surface_reg = r600_set_surface_reg, | 778 | .set_surface_reg = r600_set_surface_reg, |
779 | .clear_surface_reg = r600_clear_surface_reg, | 779 | .clear_surface_reg = r600_clear_surface_reg, |
@@ -805,7 +805,7 @@ static struct radeon_asic sumo_asic = { | |||
805 | .gart_tlb_flush = &evergreen_pcie_gart_tlb_flush, | 805 | .gart_tlb_flush = &evergreen_pcie_gart_tlb_flush, |
806 | .gart_set_page = &rs600_gart_set_page, | 806 | .gart_set_page = &rs600_gart_set_page, |
807 | .ring_test = &r600_ring_test, | 807 | .ring_test = &r600_ring_test, |
808 | .ring_ib_execute = &r600_ring_ib_execute, | 808 | .ring_ib_execute = &evergreen_ring_ib_execute, |
809 | .irq_set = &evergreen_irq_set, | 809 | .irq_set = &evergreen_irq_set, |
810 | .irq_process = &evergreen_irq_process, | 810 | .irq_process = &evergreen_irq_process, |
811 | .get_vblank_counter = &evergreen_get_vblank_counter, | 811 | .get_vblank_counter = &evergreen_get_vblank_counter, |
@@ -836,6 +836,52 @@ static struct radeon_asic sumo_asic = { | |||
836 | .pm_get_dynpm_state = &r600_pm_get_dynpm_state, | 836 | .pm_get_dynpm_state = &r600_pm_get_dynpm_state, |
837 | }; | 837 | }; |
838 | 838 | ||
839 | static struct radeon_asic btc_asic = { | ||
840 | .init = &evergreen_init, | ||
841 | .fini = &evergreen_fini, | ||
842 | .suspend = &evergreen_suspend, | ||
843 | .resume = &evergreen_resume, | ||
844 | .cp_commit = &r600_cp_commit, | ||
845 | .gpu_is_lockup = &evergreen_gpu_is_lockup, | ||
846 | .asic_reset = &evergreen_asic_reset, | ||
847 | .vga_set_state = &r600_vga_set_state, | ||
848 | .gart_tlb_flush = &evergreen_pcie_gart_tlb_flush, | ||
849 | .gart_set_page = &rs600_gart_set_page, | ||
850 | .ring_test = &r600_ring_test, | ||
851 | .ring_ib_execute = &evergreen_ring_ib_execute, | ||
852 | .irq_set = &evergreen_irq_set, | ||
853 | .irq_process = &evergreen_irq_process, | ||
854 | .get_vblank_counter = &evergreen_get_vblank_counter, | ||
855 | .fence_ring_emit = &r600_fence_ring_emit, | ||
856 | .cs_parse = &evergreen_cs_parse, | ||
857 | .copy_blit = &evergreen_copy_blit, | ||
858 | .copy_dma = &evergreen_copy_blit, | ||
859 | .copy = &evergreen_copy_blit, | ||
860 | .get_engine_clock = &radeon_atom_get_engine_clock, | ||
861 | .set_engine_clock = &radeon_atom_set_engine_clock, | ||
862 | .get_memory_clock = &radeon_atom_get_memory_clock, | ||
863 | .set_memory_clock = &radeon_atom_set_memory_clock, | ||
864 | .get_pcie_lanes = NULL, | ||
865 | .set_pcie_lanes = NULL, | ||
866 | .set_clock_gating = NULL, | ||
867 | .set_surface_reg = r600_set_surface_reg, | ||
868 | .clear_surface_reg = r600_clear_surface_reg, | ||
869 | .bandwidth_update = &evergreen_bandwidth_update, | ||
870 | .hpd_init = &evergreen_hpd_init, | ||
871 | .hpd_fini = &evergreen_hpd_fini, | ||
872 | .hpd_sense = &evergreen_hpd_sense, | ||
873 | .hpd_set_polarity = &evergreen_hpd_set_polarity, | ||
874 | .gui_idle = &r600_gui_idle, | ||
875 | .pm_misc = &evergreen_pm_misc, | ||
876 | .pm_prepare = &evergreen_pm_prepare, | ||
877 | .pm_finish = &evergreen_pm_finish, | ||
878 | .pm_init_profile = &r600_pm_init_profile, | ||
879 | .pm_get_dynpm_state = &r600_pm_get_dynpm_state, | ||
880 | .pre_page_flip = &evergreen_pre_page_flip, | ||
881 | .page_flip = &evergreen_page_flip, | ||
882 | .post_page_flip = &evergreen_post_page_flip, | ||
883 | }; | ||
884 | |||
839 | int radeon_asic_init(struct radeon_device *rdev) | 885 | int radeon_asic_init(struct radeon_device *rdev) |
840 | { | 886 | { |
841 | radeon_register_accessor_init(rdev); | 887 | radeon_register_accessor_init(rdev); |
@@ -923,6 +969,11 @@ int radeon_asic_init(struct radeon_device *rdev) | |||
923 | case CHIP_PALM: | 969 | case CHIP_PALM: |
924 | rdev->asic = &sumo_asic; | 970 | rdev->asic = &sumo_asic; |
925 | break; | 971 | break; |
972 | case CHIP_BARTS: | ||
973 | case CHIP_TURKS: | ||
974 | case CHIP_CAICOS: | ||
975 | rdev->asic = &btc_asic; | ||
976 | break; | ||
926 | default: | 977 | default: |
927 | /* FIXME: not supported yet */ | 978 | /* FIXME: not supported yet */ |
928 | return -EINVAL; | 979 | return -EINVAL; |
diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h index 4970eda1bd41..1c7317e3aa8c 100644 --- a/drivers/gpu/drm/radeon/radeon_asic.h +++ b/drivers/gpu/drm/radeon/radeon_asic.h | |||
@@ -57,8 +57,6 @@ int r100_init(struct radeon_device *rdev); | |||
57 | void r100_fini(struct radeon_device *rdev); | 57 | void r100_fini(struct radeon_device *rdev); |
58 | int r100_suspend(struct radeon_device *rdev); | 58 | int r100_suspend(struct radeon_device *rdev); |
59 | int r100_resume(struct radeon_device *rdev); | 59 | int r100_resume(struct radeon_device *rdev); |
60 | uint32_t r100_mm_rreg(struct radeon_device *rdev, uint32_t reg); | ||
61 | void r100_mm_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v); | ||
62 | void r100_vga_set_state(struct radeon_device *rdev, bool state); | 60 | void r100_vga_set_state(struct radeon_device *rdev, bool state); |
63 | bool r100_gpu_is_lockup(struct radeon_device *rdev); | 61 | bool r100_gpu_is_lockup(struct radeon_device *rdev); |
64 | int r100_asic_reset(struct radeon_device *rdev); | 62 | int r100_asic_reset(struct radeon_device *rdev); |
@@ -102,6 +100,11 @@ int r100_pci_gart_enable(struct radeon_device *rdev); | |||
102 | void r100_pci_gart_disable(struct radeon_device *rdev); | 100 | void r100_pci_gart_disable(struct radeon_device *rdev); |
103 | int r100_debugfs_mc_info_init(struct radeon_device *rdev); | 101 | int r100_debugfs_mc_info_init(struct radeon_device *rdev); |
104 | int r100_gui_wait_for_idle(struct radeon_device *rdev); | 102 | int r100_gui_wait_for_idle(struct radeon_device *rdev); |
103 | void r100_gpu_lockup_update(struct r100_gpu_lockup *lockup, | ||
104 | struct radeon_cp *cp); | ||
105 | bool r100_gpu_cp_is_lockup(struct radeon_device *rdev, | ||
106 | struct r100_gpu_lockup *lockup, | ||
107 | struct radeon_cp *cp); | ||
105 | void r100_ib_fini(struct radeon_device *rdev); | 108 | void r100_ib_fini(struct radeon_device *rdev); |
106 | int r100_ib_init(struct radeon_device *rdev); | 109 | int r100_ib_init(struct radeon_device *rdev); |
107 | void r100_irq_disable(struct radeon_device *rdev); | 110 | void r100_irq_disable(struct radeon_device *rdev); |
@@ -138,10 +141,11 @@ extern void r100_post_page_flip(struct radeon_device *rdev, int crtc); | |||
138 | * r200,rv250,rs300,rv280 | 141 | * r200,rv250,rs300,rv280 |
139 | */ | 142 | */ |
140 | extern int r200_copy_dma(struct radeon_device *rdev, | 143 | extern int r200_copy_dma(struct radeon_device *rdev, |
141 | uint64_t src_offset, | 144 | uint64_t src_offset, |
142 | uint64_t dst_offset, | 145 | uint64_t dst_offset, |
143 | unsigned num_pages, | 146 | unsigned num_pages, |
144 | struct radeon_fence *fence); | 147 | struct radeon_fence *fence); |
148 | void r200_set_safe_registers(struct radeon_device *rdev); | ||
145 | 149 | ||
146 | /* | 150 | /* |
147 | * r300,r350,rv350,rv380 | 151 | * r300,r350,rv350,rv380 |
@@ -158,10 +162,17 @@ extern void r300_fence_ring_emit(struct radeon_device *rdev, | |||
158 | extern int r300_cs_parse(struct radeon_cs_parser *p); | 162 | extern int r300_cs_parse(struct radeon_cs_parser *p); |
159 | extern void rv370_pcie_gart_tlb_flush(struct radeon_device *rdev); | 163 | extern void rv370_pcie_gart_tlb_flush(struct radeon_device *rdev); |
160 | extern int rv370_pcie_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr); | 164 | extern int rv370_pcie_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr); |
161 | extern uint32_t rv370_pcie_rreg(struct radeon_device *rdev, uint32_t reg); | ||
162 | extern void rv370_pcie_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v); | ||
163 | extern void rv370_set_pcie_lanes(struct radeon_device *rdev, int lanes); | 165 | extern void rv370_set_pcie_lanes(struct radeon_device *rdev, int lanes); |
164 | extern int rv370_get_pcie_lanes(struct radeon_device *rdev); | 166 | extern int rv370_get_pcie_lanes(struct radeon_device *rdev); |
167 | extern void r300_set_reg_safe(struct radeon_device *rdev); | ||
168 | extern void r300_mc_program(struct radeon_device *rdev); | ||
169 | extern void r300_mc_init(struct radeon_device *rdev); | ||
170 | extern void r300_clock_startup(struct radeon_device *rdev); | ||
171 | extern int r300_mc_wait_for_idle(struct radeon_device *rdev); | ||
172 | extern int rv370_pcie_gart_init(struct radeon_device *rdev); | ||
173 | extern void rv370_pcie_gart_fini(struct radeon_device *rdev); | ||
174 | extern int rv370_pcie_gart_enable(struct radeon_device *rdev); | ||
175 | extern void rv370_pcie_gart_disable(struct radeon_device *rdev); | ||
165 | 176 | ||
166 | /* | 177 | /* |
167 | * r420,r423,rv410 | 178 | * r420,r423,rv410 |
@@ -171,6 +182,10 @@ extern void r420_fini(struct radeon_device *rdev); | |||
171 | extern int r420_suspend(struct radeon_device *rdev); | 182 | extern int r420_suspend(struct radeon_device *rdev); |
172 | extern int r420_resume(struct radeon_device *rdev); | 183 | extern int r420_resume(struct radeon_device *rdev); |
173 | extern void r420_pm_init_profile(struct radeon_device *rdev); | 184 | extern void r420_pm_init_profile(struct radeon_device *rdev); |
185 | extern u32 r420_mc_rreg(struct radeon_device *rdev, u32 reg); | ||
186 | extern void r420_mc_wreg(struct radeon_device *rdev, u32 reg, u32 v); | ||
187 | extern int r420_debugfs_pipes_info_init(struct radeon_device *rdev); | ||
188 | extern void r420_pipes_init(struct radeon_device *rdev); | ||
174 | 189 | ||
175 | /* | 190 | /* |
176 | * rs400,rs480 | 191 | * rs400,rs480 |
@@ -183,6 +198,11 @@ void rs400_gart_tlb_flush(struct radeon_device *rdev); | |||
183 | int rs400_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr); | 198 | int rs400_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr); |
184 | uint32_t rs400_mc_rreg(struct radeon_device *rdev, uint32_t reg); | 199 | uint32_t rs400_mc_rreg(struct radeon_device *rdev, uint32_t reg); |
185 | void rs400_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v); | 200 | void rs400_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v); |
201 | int rs400_gart_init(struct radeon_device *rdev); | ||
202 | int rs400_gart_enable(struct radeon_device *rdev); | ||
203 | void rs400_gart_adjust_size(struct radeon_device *rdev); | ||
204 | void rs400_gart_disable(struct radeon_device *rdev); | ||
205 | void rs400_gart_fini(struct radeon_device *rdev); | ||
186 | 206 | ||
187 | /* | 207 | /* |
188 | * rs600. | 208 | * rs600. |
@@ -194,6 +214,7 @@ extern int rs600_suspend(struct radeon_device *rdev); | |||
194 | extern int rs600_resume(struct radeon_device *rdev); | 214 | extern int rs600_resume(struct radeon_device *rdev); |
195 | int rs600_irq_set(struct radeon_device *rdev); | 215 | int rs600_irq_set(struct radeon_device *rdev); |
196 | int rs600_irq_process(struct radeon_device *rdev); | 216 | int rs600_irq_process(struct radeon_device *rdev); |
217 | void rs600_irq_disable(struct radeon_device *rdev); | ||
197 | u32 rs600_get_vblank_counter(struct radeon_device *rdev, int crtc); | 218 | u32 rs600_get_vblank_counter(struct radeon_device *rdev, int crtc); |
198 | void rs600_gart_tlb_flush(struct radeon_device *rdev); | 219 | void rs600_gart_tlb_flush(struct radeon_device *rdev); |
199 | int rs600_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr); | 220 | int rs600_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr); |
@@ -211,6 +232,8 @@ extern void rs600_pm_finish(struct radeon_device *rdev); | |||
211 | extern void rs600_pre_page_flip(struct radeon_device *rdev, int crtc); | 232 | extern void rs600_pre_page_flip(struct radeon_device *rdev, int crtc); |
212 | extern u32 rs600_page_flip(struct radeon_device *rdev, int crtc, u64 crtc_base); | 233 | extern u32 rs600_page_flip(struct radeon_device *rdev, int crtc, u64 crtc_base); |
213 | extern void rs600_post_page_flip(struct radeon_device *rdev, int crtc); | 234 | extern void rs600_post_page_flip(struct radeon_device *rdev, int crtc); |
235 | void rs600_set_safe_registers(struct radeon_device *rdev); | ||
236 | |||
214 | 237 | ||
215 | /* | 238 | /* |
216 | * rs690,rs740 | 239 | * rs690,rs740 |
@@ -222,20 +245,37 @@ int rs690_suspend(struct radeon_device *rdev); | |||
222 | uint32_t rs690_mc_rreg(struct radeon_device *rdev, uint32_t reg); | 245 | uint32_t rs690_mc_rreg(struct radeon_device *rdev, uint32_t reg); |
223 | void rs690_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v); | 246 | void rs690_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v); |
224 | void rs690_bandwidth_update(struct radeon_device *rdev); | 247 | void rs690_bandwidth_update(struct radeon_device *rdev); |
248 | void rs690_line_buffer_adjust(struct radeon_device *rdev, | ||
249 | struct drm_display_mode *mode1, | ||
250 | struct drm_display_mode *mode2); | ||
225 | 251 | ||
226 | /* | 252 | /* |
227 | * rv515 | 253 | * rv515 |
228 | */ | 254 | */ |
255 | struct rv515_mc_save { | ||
256 | u32 d1vga_control; | ||
257 | u32 d2vga_control; | ||
258 | u32 vga_render_control; | ||
259 | u32 vga_hdp_control; | ||
260 | u32 d1crtc_control; | ||
261 | u32 d2crtc_control; | ||
262 | }; | ||
229 | int rv515_init(struct radeon_device *rdev); | 263 | int rv515_init(struct radeon_device *rdev); |
230 | void rv515_fini(struct radeon_device *rdev); | 264 | void rv515_fini(struct radeon_device *rdev); |
231 | uint32_t rv515_mc_rreg(struct radeon_device *rdev, uint32_t reg); | 265 | uint32_t rv515_mc_rreg(struct radeon_device *rdev, uint32_t reg); |
232 | void rv515_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v); | 266 | void rv515_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v); |
233 | void rv515_ring_start(struct radeon_device *rdev); | 267 | void rv515_ring_start(struct radeon_device *rdev); |
234 | uint32_t rv515_pcie_rreg(struct radeon_device *rdev, uint32_t reg); | ||
235 | void rv515_pcie_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v); | ||
236 | void rv515_bandwidth_update(struct radeon_device *rdev); | 268 | void rv515_bandwidth_update(struct radeon_device *rdev); |
237 | int rv515_resume(struct radeon_device *rdev); | 269 | int rv515_resume(struct radeon_device *rdev); |
238 | int rv515_suspend(struct radeon_device *rdev); | 270 | int rv515_suspend(struct radeon_device *rdev); |
271 | void rv515_bandwidth_avivo_update(struct radeon_device *rdev); | ||
272 | void rv515_vga_render_disable(struct radeon_device *rdev); | ||
273 | void rv515_set_safe_registers(struct radeon_device *rdev); | ||
274 | void rv515_mc_stop(struct radeon_device *rdev, struct rv515_mc_save *save); | ||
275 | void rv515_mc_resume(struct radeon_device *rdev, struct rv515_mc_save *save); | ||
276 | void rv515_clock_startup(struct radeon_device *rdev); | ||
277 | void rv515_debugfs(struct radeon_device *rdev); | ||
278 | |||
239 | 279 | ||
240 | /* | 280 | /* |
241 | * r520,rv530,rv560,rv570,r580 | 281 | * r520,rv530,rv560,rv570,r580 |
@@ -260,14 +300,13 @@ void r600_pciep_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v); | |||
260 | int r600_cs_parse(struct radeon_cs_parser *p); | 300 | int r600_cs_parse(struct radeon_cs_parser *p); |
261 | void r600_fence_ring_emit(struct radeon_device *rdev, | 301 | void r600_fence_ring_emit(struct radeon_device *rdev, |
262 | struct radeon_fence *fence); | 302 | struct radeon_fence *fence); |
263 | int r600_irq_process(struct radeon_device *rdev); | ||
264 | int r600_irq_set(struct radeon_device *rdev); | ||
265 | bool r600_gpu_is_lockup(struct radeon_device *rdev); | 303 | bool r600_gpu_is_lockup(struct radeon_device *rdev); |
266 | int r600_asic_reset(struct radeon_device *rdev); | 304 | int r600_asic_reset(struct radeon_device *rdev); |
267 | int r600_set_surface_reg(struct radeon_device *rdev, int reg, | 305 | int r600_set_surface_reg(struct radeon_device *rdev, int reg, |
268 | uint32_t tiling_flags, uint32_t pitch, | 306 | uint32_t tiling_flags, uint32_t pitch, |
269 | uint32_t offset, uint32_t obj_size); | 307 | uint32_t offset, uint32_t obj_size); |
270 | void r600_clear_surface_reg(struct radeon_device *rdev, int reg); | 308 | void r600_clear_surface_reg(struct radeon_device *rdev, int reg); |
309 | int r600_ib_test(struct radeon_device *rdev); | ||
271 | void r600_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib); | 310 | void r600_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib); |
272 | int r600_ring_test(struct radeon_device *rdev); | 311 | int r600_ring_test(struct radeon_device *rdev); |
273 | int r600_copy_blit(struct radeon_device *rdev, | 312 | int r600_copy_blit(struct radeon_device *rdev, |
@@ -284,6 +323,52 @@ extern void r600_pm_misc(struct radeon_device *rdev); | |||
284 | extern void r600_pm_init_profile(struct radeon_device *rdev); | 323 | extern void r600_pm_init_profile(struct radeon_device *rdev); |
285 | extern void rs780_pm_init_profile(struct radeon_device *rdev); | 324 | extern void rs780_pm_init_profile(struct radeon_device *rdev); |
286 | extern void r600_pm_get_dynpm_state(struct radeon_device *rdev); | 325 | extern void r600_pm_get_dynpm_state(struct radeon_device *rdev); |
326 | extern void r600_set_pcie_lanes(struct radeon_device *rdev, int lanes); | ||
327 | extern int r600_get_pcie_lanes(struct radeon_device *rdev); | ||
328 | bool r600_card_posted(struct radeon_device *rdev); | ||
329 | void r600_cp_stop(struct radeon_device *rdev); | ||
330 | int r600_cp_start(struct radeon_device *rdev); | ||
331 | void r600_ring_init(struct radeon_device *rdev, unsigned ring_size); | ||
332 | int r600_cp_resume(struct radeon_device *rdev); | ||
333 | void r600_cp_fini(struct radeon_device *rdev); | ||
334 | int r600_count_pipe_bits(uint32_t val); | ||
335 | int r600_mc_wait_for_idle(struct radeon_device *rdev); | ||
336 | int r600_pcie_gart_init(struct radeon_device *rdev); | ||
337 | void r600_scratch_init(struct radeon_device *rdev); | ||
338 | int r600_blit_init(struct radeon_device *rdev); | ||
339 | void r600_blit_fini(struct radeon_device *rdev); | ||
340 | int r600_init_microcode(struct radeon_device *rdev); | ||
341 | /* r600 irq */ | ||
342 | int r600_irq_process(struct radeon_device *rdev); | ||
343 | int r600_irq_init(struct radeon_device *rdev); | ||
344 | void r600_irq_fini(struct radeon_device *rdev); | ||
345 | void r600_ih_ring_init(struct radeon_device *rdev, unsigned ring_size); | ||
346 | int r600_irq_set(struct radeon_device *rdev); | ||
347 | void r600_irq_suspend(struct radeon_device *rdev); | ||
348 | void r600_disable_interrupts(struct radeon_device *rdev); | ||
349 | void r600_rlc_stop(struct radeon_device *rdev); | ||
350 | /* r600 audio */ | ||
351 | int r600_audio_init(struct radeon_device *rdev); | ||
352 | int r600_audio_tmds_index(struct drm_encoder *encoder); | ||
353 | void r600_audio_set_clock(struct drm_encoder *encoder, int clock); | ||
354 | int r600_audio_channels(struct radeon_device *rdev); | ||
355 | int r600_audio_bits_per_sample(struct radeon_device *rdev); | ||
356 | int r600_audio_rate(struct radeon_device *rdev); | ||
357 | uint8_t r600_audio_status_bits(struct radeon_device *rdev); | ||
358 | uint8_t r600_audio_category_code(struct radeon_device *rdev); | ||
359 | void r600_audio_schedule_polling(struct radeon_device *rdev); | ||
360 | void r600_audio_enable_polling(struct drm_encoder *encoder); | ||
361 | void r600_audio_disable_polling(struct drm_encoder *encoder); | ||
362 | void r600_audio_fini(struct radeon_device *rdev); | ||
363 | void r600_hdmi_init(struct drm_encoder *encoder); | ||
364 | int r600_hdmi_buffer_status_changed(struct drm_encoder *encoder); | ||
365 | void r600_hdmi_update_audio_settings(struct drm_encoder *encoder); | ||
366 | /* r600 blit */ | ||
367 | int r600_blit_prepare_copy(struct radeon_device *rdev, int size_bytes); | ||
368 | void r600_blit_done_copy(struct radeon_device *rdev, struct radeon_fence *fence); | ||
369 | void r600_kms_blit_copy(struct radeon_device *rdev, | ||
370 | u64 src_gpu_addr, u64 dst_gpu_addr, | ||
371 | int size_bytes); | ||
287 | 372 | ||
288 | /* | 373 | /* |
289 | * rv770,rv730,rv710,rv740 | 374 | * rv770,rv730,rv710,rv740 |
@@ -292,12 +377,21 @@ int rv770_init(struct radeon_device *rdev); | |||
292 | void rv770_fini(struct radeon_device *rdev); | 377 | void rv770_fini(struct radeon_device *rdev); |
293 | int rv770_suspend(struct radeon_device *rdev); | 378 | int rv770_suspend(struct radeon_device *rdev); |
294 | int rv770_resume(struct radeon_device *rdev); | 379 | int rv770_resume(struct radeon_device *rdev); |
295 | extern void rv770_pm_misc(struct radeon_device *rdev); | 380 | void rv770_pm_misc(struct radeon_device *rdev); |
296 | extern u32 rv770_page_flip(struct radeon_device *rdev, int crtc, u64 crtc_base); | 381 | u32 rv770_page_flip(struct radeon_device *rdev, int crtc, u64 crtc_base); |
382 | void r700_vram_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc); | ||
383 | void r700_cp_stop(struct radeon_device *rdev); | ||
384 | void r700_cp_fini(struct radeon_device *rdev); | ||
297 | 385 | ||
298 | /* | 386 | /* |
299 | * evergreen | 387 | * evergreen |
300 | */ | 388 | */ |
389 | struct evergreen_mc_save { | ||
390 | u32 vga_control[6]; | ||
391 | u32 vga_render_control; | ||
392 | u32 vga_hdp_control; | ||
393 | u32 crtc_control[6]; | ||
394 | }; | ||
301 | void evergreen_pcie_gart_tlb_flush(struct radeon_device *rdev); | 395 | void evergreen_pcie_gart_tlb_flush(struct radeon_device *rdev); |
302 | int evergreen_init(struct radeon_device *rdev); | 396 | int evergreen_init(struct radeon_device *rdev); |
303 | void evergreen_fini(struct radeon_device *rdev); | 397 | void evergreen_fini(struct radeon_device *rdev); |
@@ -306,6 +400,7 @@ int evergreen_resume(struct radeon_device *rdev); | |||
306 | bool evergreen_gpu_is_lockup(struct radeon_device *rdev); | 400 | bool evergreen_gpu_is_lockup(struct radeon_device *rdev); |
307 | int evergreen_asic_reset(struct radeon_device *rdev); | 401 | int evergreen_asic_reset(struct radeon_device *rdev); |
308 | void evergreen_bandwidth_update(struct radeon_device *rdev); | 402 | void evergreen_bandwidth_update(struct radeon_device *rdev); |
403 | void evergreen_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib); | ||
309 | int evergreen_copy_blit(struct radeon_device *rdev, | 404 | int evergreen_copy_blit(struct radeon_device *rdev, |
310 | uint64_t src_offset, uint64_t dst_offset, | 405 | uint64_t src_offset, uint64_t dst_offset, |
311 | unsigned num_pages, struct radeon_fence *fence); | 406 | unsigned num_pages, struct radeon_fence *fence); |
@@ -324,5 +419,15 @@ extern void evergreen_pm_finish(struct radeon_device *rdev); | |||
324 | extern void evergreen_pre_page_flip(struct radeon_device *rdev, int crtc); | 419 | extern void evergreen_pre_page_flip(struct radeon_device *rdev, int crtc); |
325 | extern u32 evergreen_page_flip(struct radeon_device *rdev, int crtc, u64 crtc_base); | 420 | extern u32 evergreen_page_flip(struct radeon_device *rdev, int crtc, u64 crtc_base); |
326 | extern void evergreen_post_page_flip(struct radeon_device *rdev, int crtc); | 421 | extern void evergreen_post_page_flip(struct radeon_device *rdev, int crtc); |
422 | void evergreen_disable_interrupt_state(struct radeon_device *rdev); | ||
423 | int evergreen_blit_init(struct radeon_device *rdev); | ||
424 | void evergreen_blit_fini(struct radeon_device *rdev); | ||
425 | /* evergreen blit */ | ||
426 | int evergreen_blit_prepare_copy(struct radeon_device *rdev, int size_bytes); | ||
427 | void evergreen_blit_done_copy(struct radeon_device *rdev, struct radeon_fence *fence); | ||
428 | void evergreen_kms_blit_copy(struct radeon_device *rdev, | ||
429 | u64 src_gpu_addr, u64 dst_gpu_addr, | ||
430 | int size_bytes); | ||
431 | |||
327 | 432 | ||
328 | #endif | 433 | #endif |
diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c index ac882639b3ed..5c1cc7ad9a15 100644 --- a/drivers/gpu/drm/radeon/radeon_atombios.c +++ b/drivers/gpu/drm/radeon/radeon_atombios.c | |||
@@ -37,7 +37,7 @@ radeon_get_encoder_enum(struct drm_device *dev, uint32_t supported_device, | |||
37 | extern void radeon_link_encoder_connector(struct drm_device *dev); | 37 | extern void radeon_link_encoder_connector(struct drm_device *dev); |
38 | extern void | 38 | extern void |
39 | radeon_add_atom_encoder(struct drm_device *dev, uint32_t encoder_enum, | 39 | radeon_add_atom_encoder(struct drm_device *dev, uint32_t encoder_enum, |
40 | uint32_t supported_device); | 40 | uint32_t supported_device, u16 caps); |
41 | 41 | ||
42 | /* from radeon_connector.c */ | 42 | /* from radeon_connector.c */ |
43 | extern void | 43 | extern void |
@@ -98,6 +98,14 @@ static inline struct radeon_i2c_bus_rec radeon_lookup_i2c_gpio(struct radeon_dev | |||
98 | } | 98 | } |
99 | } | 99 | } |
100 | 100 | ||
101 | /* some DCE3 boards have bad data for this entry */ | ||
102 | if (ASIC_IS_DCE3(rdev)) { | ||
103 | if ((i == 4) && | ||
104 | (gpio->usClkMaskRegisterIndex == 0x1fda) && | ||
105 | (gpio->sucI2cId.ucAccess == 0x94)) | ||
106 | gpio->sucI2cId.ucAccess = 0x14; | ||
107 | } | ||
108 | |||
101 | if (gpio->sucI2cId.ucAccess == id) { | 109 | if (gpio->sucI2cId.ucAccess == id) { |
102 | i2c.mask_clk_reg = le16_to_cpu(gpio->usClkMaskRegisterIndex) * 4; | 110 | i2c.mask_clk_reg = le16_to_cpu(gpio->usClkMaskRegisterIndex) * 4; |
103 | i2c.mask_data_reg = le16_to_cpu(gpio->usDataMaskRegisterIndex) * 4; | 111 | i2c.mask_data_reg = le16_to_cpu(gpio->usDataMaskRegisterIndex) * 4; |
@@ -174,6 +182,14 @@ void radeon_atombios_i2c_init(struct radeon_device *rdev) | |||
174 | } | 182 | } |
175 | } | 183 | } |
176 | 184 | ||
185 | /* some DCE3 boards have bad data for this entry */ | ||
186 | if (ASIC_IS_DCE3(rdev)) { | ||
187 | if ((i == 4) && | ||
188 | (gpio->usClkMaskRegisterIndex == 0x1fda) && | ||
189 | (gpio->sucI2cId.ucAccess == 0x94)) | ||
190 | gpio->sucI2cId.ucAccess = 0x14; | ||
191 | } | ||
192 | |||
177 | i2c.mask_clk_reg = le16_to_cpu(gpio->usClkMaskRegisterIndex) * 4; | 193 | i2c.mask_clk_reg = le16_to_cpu(gpio->usClkMaskRegisterIndex) * 4; |
178 | i2c.mask_data_reg = le16_to_cpu(gpio->usDataMaskRegisterIndex) * 4; | 194 | i2c.mask_data_reg = le16_to_cpu(gpio->usDataMaskRegisterIndex) * 4; |
179 | i2c.en_clk_reg = le16_to_cpu(gpio->usClkEnRegisterIndex) * 4; | 195 | i2c.en_clk_reg = le16_to_cpu(gpio->usClkEnRegisterIndex) * 4; |
@@ -297,7 +313,6 @@ static bool radeon_atom_apply_quirks(struct drm_device *dev, | |||
297 | uint16_t *line_mux, | 313 | uint16_t *line_mux, |
298 | struct radeon_hpd *hpd) | 314 | struct radeon_hpd *hpd) |
299 | { | 315 | { |
300 | struct radeon_device *rdev = dev->dev_private; | ||
301 | 316 | ||
302 | /* Asus M2A-VM HDMI board lists the DVI port as HDMI */ | 317 | /* Asus M2A-VM HDMI board lists the DVI port as HDMI */ |
303 | if ((dev->pdev->device == 0x791e) && | 318 | if ((dev->pdev->device == 0x791e) && |
@@ -372,6 +387,13 @@ static bool radeon_atom_apply_quirks(struct drm_device *dev, | |||
372 | *line_mux = 0x90; | 387 | *line_mux = 0x90; |
373 | } | 388 | } |
374 | 389 | ||
390 | /* mac rv630, rv730, others */ | ||
391 | if ((supported_device == ATOM_DEVICE_TV1_SUPPORT) && | ||
392 | (*connector_type == DRM_MODE_CONNECTOR_DVII)) { | ||
393 | *connector_type = DRM_MODE_CONNECTOR_9PinDIN; | ||
394 | *line_mux = CONNECTOR_7PIN_DIN_ENUM_ID1; | ||
395 | } | ||
396 | |||
375 | /* ASUS HD 3600 XT board lists the DVI port as HDMI */ | 397 | /* ASUS HD 3600 XT board lists the DVI port as HDMI */ |
376 | if ((dev->pdev->device == 0x9598) && | 398 | if ((dev->pdev->device == 0x9598) && |
377 | (dev->pdev->subsystem_vendor == 0x1043) && | 399 | (dev->pdev->subsystem_vendor == 0x1043) && |
@@ -409,21 +431,23 @@ static bool radeon_atom_apply_quirks(struct drm_device *dev, | |||
409 | } | 431 | } |
410 | } | 432 | } |
411 | 433 | ||
412 | /* Acer laptop reports DVI-D as DVI-I and hpd pins reversed */ | 434 | /* Acer laptop (Acer TravelMate 5730G) has an HDMI port |
435 | * on the laptop and a DVI port on the docking station and | ||
436 | * both share the same encoder, hpd pin, and ddc line. | ||
437 | * So while the bios table is technically correct, | ||
438 | * we drop the DVI port here since xrandr has no concept of | ||
439 | * encoders and will try and drive both connectors | ||
440 | * with different crtcs which isn't possible on the hardware | ||
441 | * side and leaves no crtcs for LVDS or VGA. | ||
442 | */ | ||
413 | if ((dev->pdev->device == 0x95c4) && | 443 | if ((dev->pdev->device == 0x95c4) && |
414 | (dev->pdev->subsystem_vendor == 0x1025) && | 444 | (dev->pdev->subsystem_vendor == 0x1025) && |
415 | (dev->pdev->subsystem_device == 0x013c)) { | 445 | (dev->pdev->subsystem_device == 0x013c)) { |
416 | struct radeon_gpio_rec gpio; | ||
417 | |||
418 | if ((*connector_type == DRM_MODE_CONNECTOR_DVII) && | 446 | if ((*connector_type == DRM_MODE_CONNECTOR_DVII) && |
419 | (supported_device == ATOM_DEVICE_DFP1_SUPPORT)) { | 447 | (supported_device == ATOM_DEVICE_DFP1_SUPPORT)) { |
420 | gpio = radeon_lookup_gpio(rdev, 6); | 448 | /* actually it's a DVI-D port not DVI-I */ |
421 | *hpd = radeon_atom_get_hpd_info_from_gpio(rdev, &gpio); | ||
422 | *connector_type = DRM_MODE_CONNECTOR_DVID; | 449 | *connector_type = DRM_MODE_CONNECTOR_DVID; |
423 | } else if ((*connector_type == DRM_MODE_CONNECTOR_HDMIA) && | 450 | return false; |
424 | (supported_device == ATOM_DEVICE_DFP1_SUPPORT)) { | ||
425 | gpio = radeon_lookup_gpio(rdev, 7); | ||
426 | *hpd = radeon_atom_get_hpd_info_from_gpio(rdev, &gpio); | ||
427 | } | 451 | } |
428 | } | 452 | } |
429 | 453 | ||
@@ -509,6 +533,7 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev) | |||
509 | u16 size, data_offset; | 533 | u16 size, data_offset; |
510 | u8 frev, crev; | 534 | u8 frev, crev; |
511 | ATOM_CONNECTOR_OBJECT_TABLE *con_obj; | 535 | ATOM_CONNECTOR_OBJECT_TABLE *con_obj; |
536 | ATOM_ENCODER_OBJECT_TABLE *enc_obj; | ||
512 | ATOM_OBJECT_TABLE *router_obj; | 537 | ATOM_OBJECT_TABLE *router_obj; |
513 | ATOM_DISPLAY_OBJECT_PATH_TABLE *path_obj; | 538 | ATOM_DISPLAY_OBJECT_PATH_TABLE *path_obj; |
514 | ATOM_OBJECT_HEADER *obj_header; | 539 | ATOM_OBJECT_HEADER *obj_header; |
@@ -533,6 +558,9 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev) | |||
533 | con_obj = (ATOM_CONNECTOR_OBJECT_TABLE *) | 558 | con_obj = (ATOM_CONNECTOR_OBJECT_TABLE *) |
534 | (ctx->bios + data_offset + | 559 | (ctx->bios + data_offset + |
535 | le16_to_cpu(obj_header->usConnectorObjectTableOffset)); | 560 | le16_to_cpu(obj_header->usConnectorObjectTableOffset)); |
561 | enc_obj = (ATOM_ENCODER_OBJECT_TABLE *) | ||
562 | (ctx->bios + data_offset + | ||
563 | le16_to_cpu(obj_header->usEncoderObjectTableOffset)); | ||
536 | router_obj = (ATOM_OBJECT_TABLE *) | 564 | router_obj = (ATOM_OBJECT_TABLE *) |
537 | (ctx->bios + data_offset + | 565 | (ctx->bios + data_offset + |
538 | le16_to_cpu(obj_header->usRouterObjectTableOffset)); | 566 | le16_to_cpu(obj_header->usRouterObjectTableOffset)); |
@@ -638,14 +666,35 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev) | |||
638 | OBJECT_TYPE_MASK) >> OBJECT_TYPE_SHIFT; | 666 | OBJECT_TYPE_MASK) >> OBJECT_TYPE_SHIFT; |
639 | 667 | ||
640 | if (grph_obj_type == GRAPH_OBJECT_TYPE_ENCODER) { | 668 | if (grph_obj_type == GRAPH_OBJECT_TYPE_ENCODER) { |
641 | u16 encoder_obj = le16_to_cpu(path->usGraphicObjIds[j]); | 669 | for (k = 0; k < enc_obj->ucNumberOfObjects; k++) { |
642 | 670 | u16 encoder_obj = le16_to_cpu(enc_obj->asObjects[k].usObjectID); | |
643 | radeon_add_atom_encoder(dev, | 671 | if (le16_to_cpu(path->usGraphicObjIds[j]) == encoder_obj) { |
644 | encoder_obj, | 672 | ATOM_COMMON_RECORD_HEADER *record = (ATOM_COMMON_RECORD_HEADER *) |
645 | le16_to_cpu | 673 | (ctx->bios + data_offset + |
646 | (path-> | 674 | le16_to_cpu(enc_obj->asObjects[k].usRecordOffset)); |
647 | usDeviceTag)); | 675 | ATOM_ENCODER_CAP_RECORD *cap_record; |
676 | u16 caps = 0; | ||
648 | 677 | ||
678 | while (record->ucRecordType > 0 && | ||
679 | record->ucRecordType <= ATOM_MAX_OBJECT_RECORD_NUMBER) { | ||
680 | switch (record->ucRecordType) { | ||
681 | case ATOM_ENCODER_CAP_RECORD_TYPE: | ||
682 | cap_record =(ATOM_ENCODER_CAP_RECORD *) | ||
683 | record; | ||
684 | caps = le16_to_cpu(cap_record->usEncoderCap); | ||
685 | break; | ||
686 | } | ||
687 | record = (ATOM_COMMON_RECORD_HEADER *) | ||
688 | ((char *)record + record->ucRecordSize); | ||
689 | } | ||
690 | radeon_add_atom_encoder(dev, | ||
691 | encoder_obj, | ||
692 | le16_to_cpu | ||
693 | (path-> | ||
694 | usDeviceTag), | ||
695 | caps); | ||
696 | } | ||
697 | } | ||
649 | } else if (grph_obj_type == GRAPH_OBJECT_TYPE_ROUTER) { | 698 | } else if (grph_obj_type == GRAPH_OBJECT_TYPE_ROUTER) { |
650 | for (k = 0; k < router_obj->ucNumberOfObjects; k++) { | 699 | for (k = 0; k < router_obj->ucNumberOfObjects; k++) { |
651 | u16 router_obj_id = le16_to_cpu(router_obj->asObjects[k].usObjectID); | 700 | u16 router_obj_id = le16_to_cpu(router_obj->asObjects[k].usObjectID); |
@@ -979,7 +1028,8 @@ bool radeon_get_atom_connector_info_from_supported_devices_table(struct | |||
979 | radeon_get_encoder_enum(dev, | 1028 | radeon_get_encoder_enum(dev, |
980 | (1 << i), | 1029 | (1 << i), |
981 | dac), | 1030 | dac), |
982 | (1 << i)); | 1031 | (1 << i), |
1032 | 0); | ||
983 | else | 1033 | else |
984 | radeon_add_legacy_encoder(dev, | 1034 | radeon_add_legacy_encoder(dev, |
985 | radeon_get_encoder_enum(dev, | 1035 | radeon_get_encoder_enum(dev, |
@@ -1058,6 +1108,7 @@ union firmware_info { | |||
1058 | ATOM_FIRMWARE_INFO_V1_3 info_13; | 1108 | ATOM_FIRMWARE_INFO_V1_3 info_13; |
1059 | ATOM_FIRMWARE_INFO_V1_4 info_14; | 1109 | ATOM_FIRMWARE_INFO_V1_4 info_14; |
1060 | ATOM_FIRMWARE_INFO_V2_1 info_21; | 1110 | ATOM_FIRMWARE_INFO_V2_1 info_21; |
1111 | ATOM_FIRMWARE_INFO_V2_2 info_22; | ||
1061 | }; | 1112 | }; |
1062 | 1113 | ||
1063 | bool radeon_atom_get_clock_info(struct drm_device *dev) | 1114 | bool radeon_atom_get_clock_info(struct drm_device *dev) |
@@ -1112,16 +1163,6 @@ bool radeon_atom_get_clock_info(struct drm_device *dev) | |||
1112 | p1pll->pll_out_min = 64800; | 1163 | p1pll->pll_out_min = 64800; |
1113 | else | 1164 | else |
1114 | p1pll->pll_out_min = 20000; | 1165 | p1pll->pll_out_min = 20000; |
1115 | } else if (p1pll->pll_out_min > 64800) { | ||
1116 | /* Limiting the pll output range is a good thing generally as | ||
1117 | * it limits the number of possible pll combinations for a given | ||
1118 | * frequency presumably to the ones that work best on each card. | ||
1119 | * However, certain duallink DVI monitors seem to like | ||
1120 | * pll combinations that would be limited by this at least on | ||
1121 | * pre-DCE 3.0 r6xx hardware. This might need to be adjusted per | ||
1122 | * family. | ||
1123 | */ | ||
1124 | p1pll->pll_out_min = 64800; | ||
1125 | } | 1166 | } |
1126 | 1167 | ||
1127 | p1pll->pll_in_min = | 1168 | p1pll->pll_in_min = |
@@ -1132,8 +1173,12 @@ bool radeon_atom_get_clock_info(struct drm_device *dev) | |||
1132 | *p2pll = *p1pll; | 1173 | *p2pll = *p1pll; |
1133 | 1174 | ||
1134 | /* system clock */ | 1175 | /* system clock */ |
1135 | spll->reference_freq = | 1176 | if (ASIC_IS_DCE4(rdev)) |
1136 | le16_to_cpu(firmware_info->info.usReferenceClock); | 1177 | spll->reference_freq = |
1178 | le16_to_cpu(firmware_info->info_21.usCoreReferenceClock); | ||
1179 | else | ||
1180 | spll->reference_freq = | ||
1181 | le16_to_cpu(firmware_info->info.usReferenceClock); | ||
1137 | spll->reference_div = 0; | 1182 | spll->reference_div = 0; |
1138 | 1183 | ||
1139 | spll->pll_out_min = | 1184 | spll->pll_out_min = |
@@ -1155,8 +1200,12 @@ bool radeon_atom_get_clock_info(struct drm_device *dev) | |||
1155 | le16_to_cpu(firmware_info->info.usMaxEngineClockPLL_Input); | 1200 | le16_to_cpu(firmware_info->info.usMaxEngineClockPLL_Input); |
1156 | 1201 | ||
1157 | /* memory clock */ | 1202 | /* memory clock */ |
1158 | mpll->reference_freq = | 1203 | if (ASIC_IS_DCE4(rdev)) |
1159 | le16_to_cpu(firmware_info->info.usReferenceClock); | 1204 | mpll->reference_freq = |
1205 | le16_to_cpu(firmware_info->info_21.usMemoryReferenceClock); | ||
1206 | else | ||
1207 | mpll->reference_freq = | ||
1208 | le16_to_cpu(firmware_info->info.usReferenceClock); | ||
1160 | mpll->reference_div = 0; | 1209 | mpll->reference_div = 0; |
1161 | 1210 | ||
1162 | mpll->pll_out_min = | 1211 | mpll->pll_out_min = |
@@ -1185,8 +1234,12 @@ bool radeon_atom_get_clock_info(struct drm_device *dev) | |||
1185 | if (ASIC_IS_DCE4(rdev)) { | 1234 | if (ASIC_IS_DCE4(rdev)) { |
1186 | rdev->clock.default_dispclk = | 1235 | rdev->clock.default_dispclk = |
1187 | le32_to_cpu(firmware_info->info_21.ulDefaultDispEngineClkFreq); | 1236 | le32_to_cpu(firmware_info->info_21.ulDefaultDispEngineClkFreq); |
1188 | if (rdev->clock.default_dispclk == 0) | 1237 | if (rdev->clock.default_dispclk == 0) { |
1189 | rdev->clock.default_dispclk = 60000; /* 600 Mhz */ | 1238 | if (ASIC_IS_DCE5(rdev)) |
1239 | rdev->clock.default_dispclk = 54000; /* 540 Mhz */ | ||
1240 | else | ||
1241 | rdev->clock.default_dispclk = 60000; /* 600 Mhz */ | ||
1242 | } | ||
1190 | rdev->clock.dp_extclk = | 1243 | rdev->clock.dp_extclk = |
1191 | le16_to_cpu(firmware_info->info_21.usUniphyDPModeExtClkFreq); | 1244 | le16_to_cpu(firmware_info->info_21.usUniphyDPModeExtClkFreq); |
1192 | } | 1245 | } |
@@ -1500,6 +1553,9 @@ struct radeon_encoder_atom_dig *radeon_atombios_get_lvds_info(struct | |||
1500 | if (misc & ATOM_DOUBLE_CLOCK_MODE) | 1553 | if (misc & ATOM_DOUBLE_CLOCK_MODE) |
1501 | lvds->native_mode.flags |= DRM_MODE_FLAG_DBLSCAN; | 1554 | lvds->native_mode.flags |= DRM_MODE_FLAG_DBLSCAN; |
1502 | 1555 | ||
1556 | lvds->native_mode.width_mm = lvds_info->info.sLCDTiming.usImageHSize; | ||
1557 | lvds->native_mode.height_mm = lvds_info->info.sLCDTiming.usImageVSize; | ||
1558 | |||
1503 | /* set crtc values */ | 1559 | /* set crtc values */ |
1504 | drm_mode_set_crtcinfo(&lvds->native_mode, CRTC_INTERLACE_HALVE_V); | 1560 | drm_mode_set_crtcinfo(&lvds->native_mode, CRTC_INTERLACE_HALVE_V); |
1505 | 1561 | ||
@@ -1512,6 +1568,59 @@ struct radeon_encoder_atom_dig *radeon_atombios_get_lvds_info(struct | |||
1512 | else | 1568 | else |
1513 | lvds->linkb = false; | 1569 | lvds->linkb = false; |
1514 | 1570 | ||
1571 | /* parse the lcd record table */ | ||
1572 | if (lvds_info->info.usModePatchTableOffset) { | ||
1573 | ATOM_FAKE_EDID_PATCH_RECORD *fake_edid_record; | ||
1574 | ATOM_PANEL_RESOLUTION_PATCH_RECORD *panel_res_record; | ||
1575 | bool bad_record = false; | ||
1576 | u8 *record = (u8 *)(mode_info->atom_context->bios + | ||
1577 | data_offset + | ||
1578 | lvds_info->info.usModePatchTableOffset); | ||
1579 | while (*record != ATOM_RECORD_END_TYPE) { | ||
1580 | switch (*record) { | ||
1581 | case LCD_MODE_PATCH_RECORD_MODE_TYPE: | ||
1582 | record += sizeof(ATOM_PATCH_RECORD_MODE); | ||
1583 | break; | ||
1584 | case LCD_RTS_RECORD_TYPE: | ||
1585 | record += sizeof(ATOM_LCD_RTS_RECORD); | ||
1586 | break; | ||
1587 | case LCD_CAP_RECORD_TYPE: | ||
1588 | record += sizeof(ATOM_LCD_MODE_CONTROL_CAP); | ||
1589 | break; | ||
1590 | case LCD_FAKE_EDID_PATCH_RECORD_TYPE: | ||
1591 | fake_edid_record = (ATOM_FAKE_EDID_PATCH_RECORD *)record; | ||
1592 | if (fake_edid_record->ucFakeEDIDLength) { | ||
1593 | struct edid *edid; | ||
1594 | int edid_size = | ||
1595 | max((int)EDID_LENGTH, (int)fake_edid_record->ucFakeEDIDLength); | ||
1596 | edid = kmalloc(edid_size, GFP_KERNEL); | ||
1597 | if (edid) { | ||
1598 | memcpy((u8 *)edid, (u8 *)&fake_edid_record->ucFakeEDIDString[0], | ||
1599 | fake_edid_record->ucFakeEDIDLength); | ||
1600 | |||
1601 | if (drm_edid_is_valid(edid)) | ||
1602 | rdev->mode_info.bios_hardcoded_edid = edid; | ||
1603 | else | ||
1604 | kfree(edid); | ||
1605 | } | ||
1606 | } | ||
1607 | record += sizeof(ATOM_FAKE_EDID_PATCH_RECORD); | ||
1608 | break; | ||
1609 | case LCD_PANEL_RESOLUTION_RECORD_TYPE: | ||
1610 | panel_res_record = (ATOM_PANEL_RESOLUTION_PATCH_RECORD *)record; | ||
1611 | lvds->native_mode.width_mm = panel_res_record->usHSize; | ||
1612 | lvds->native_mode.height_mm = panel_res_record->usVSize; | ||
1613 | record += sizeof(ATOM_PANEL_RESOLUTION_PATCH_RECORD); | ||
1614 | break; | ||
1615 | default: | ||
1616 | DRM_ERROR("Bad LCD record %d\n", *record); | ||
1617 | bad_record = true; | ||
1618 | break; | ||
1619 | } | ||
1620 | if (bad_record) | ||
1621 | break; | ||
1622 | } | ||
1623 | } | ||
1515 | } | 1624 | } |
1516 | return lvds; | 1625 | return lvds; |
1517 | } | 1626 | } |
@@ -1768,6 +1877,7 @@ static const char *pp_lib_thermal_controller_names[] = { | |||
1768 | "Evergreen", | 1877 | "Evergreen", |
1769 | "emc2103", | 1878 | "emc2103", |
1770 | "Sumo", | 1879 | "Sumo", |
1880 | "Northern Islands", | ||
1771 | }; | 1881 | }; |
1772 | 1882 | ||
1773 | union power_info { | 1883 | union power_info { |
@@ -1867,6 +1977,9 @@ static int radeon_atombios_parse_power_table_1_3(struct radeon_device *rdev) | |||
1867 | num_modes = power_info->info.ucNumOfPowerModeEntries; | 1977 | num_modes = power_info->info.ucNumOfPowerModeEntries; |
1868 | if (num_modes > ATOM_MAX_NUMBEROF_POWER_BLOCK) | 1978 | if (num_modes > ATOM_MAX_NUMBEROF_POWER_BLOCK) |
1869 | num_modes = ATOM_MAX_NUMBEROF_POWER_BLOCK; | 1979 | num_modes = ATOM_MAX_NUMBEROF_POWER_BLOCK; |
1980 | rdev->pm.power_state = kzalloc(sizeof(struct radeon_power_state) * num_modes, GFP_KERNEL); | ||
1981 | if (!rdev->pm.power_state) | ||
1982 | return state_index; | ||
1870 | /* last mode is usually default, array is low to high */ | 1983 | /* last mode is usually default, array is low to high */ |
1871 | for (i = 0; i < num_modes; i++) { | 1984 | for (i = 0; i < num_modes; i++) { |
1872 | rdev->pm.power_state[state_index].clock_info[0].voltage.type = VOLTAGE_NONE; | 1985 | rdev->pm.power_state[state_index].clock_info[0].voltage.type = VOLTAGE_NONE; |
@@ -2031,6 +2144,11 @@ static void radeon_atombios_add_pplib_thermal_controller(struct radeon_device *r | |||
2031 | (controller->ucFanParameters & | 2144 | (controller->ucFanParameters & |
2032 | ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with"); | 2145 | ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with"); |
2033 | rdev->pm.int_thermal_type = THERMAL_TYPE_SUMO; | 2146 | rdev->pm.int_thermal_type = THERMAL_TYPE_SUMO; |
2147 | } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_NISLANDS) { | ||
2148 | DRM_INFO("Internal thermal controller %s fan control\n", | ||
2149 | (controller->ucFanParameters & | ||
2150 | ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with"); | ||
2151 | rdev->pm.int_thermal_type = THERMAL_TYPE_NI; | ||
2034 | } else if ((controller->ucType == | 2152 | } else if ((controller->ucType == |
2035 | ATOM_PP_THERMALCONTROLLER_EXTERNAL_GPIO) || | 2153 | ATOM_PP_THERMALCONTROLLER_EXTERNAL_GPIO) || |
2036 | (controller->ucType == | 2154 | (controller->ucType == |
@@ -2120,15 +2238,22 @@ static void radeon_atombios_parse_pplib_non_clock_info(struct radeon_device *rde | |||
2120 | rdev->pm.default_power_state_index = state_index; | 2238 | rdev->pm.default_power_state_index = state_index; |
2121 | rdev->pm.power_state[state_index].default_clock_mode = | 2239 | rdev->pm.power_state[state_index].default_clock_mode = |
2122 | &rdev->pm.power_state[state_index].clock_info[mode_index - 1]; | 2240 | &rdev->pm.power_state[state_index].clock_info[mode_index - 1]; |
2123 | /* patch the table values with the default slck/mclk from firmware info */ | 2241 | if (ASIC_IS_DCE5(rdev)) { |
2124 | for (j = 0; j < mode_index; j++) { | 2242 | /* NI chips post without MC ucode, so default clocks are strobe mode only */ |
2125 | rdev->pm.power_state[state_index].clock_info[j].mclk = | 2243 | rdev->pm.default_sclk = rdev->pm.power_state[state_index].clock_info[0].sclk; |
2126 | rdev->clock.default_mclk; | 2244 | rdev->pm.default_mclk = rdev->pm.power_state[state_index].clock_info[0].mclk; |
2127 | rdev->pm.power_state[state_index].clock_info[j].sclk = | 2245 | rdev->pm.default_vddc = rdev->pm.power_state[state_index].clock_info[0].voltage.voltage; |
2128 | rdev->clock.default_sclk; | 2246 | } else { |
2129 | if (vddc) | 2247 | /* patch the table values with the default slck/mclk from firmware info */ |
2130 | rdev->pm.power_state[state_index].clock_info[j].voltage.voltage = | 2248 | for (j = 0; j < mode_index; j++) { |
2131 | vddc; | 2249 | rdev->pm.power_state[state_index].clock_info[j].mclk = |
2250 | rdev->clock.default_mclk; | ||
2251 | rdev->pm.power_state[state_index].clock_info[j].sclk = | ||
2252 | rdev->clock.default_sclk; | ||
2253 | if (vddc) | ||
2254 | rdev->pm.power_state[state_index].clock_info[j].voltage.voltage = | ||
2255 | vddc; | ||
2256 | } | ||
2132 | } | 2257 | } |
2133 | } | 2258 | } |
2134 | } | 2259 | } |
@@ -2206,6 +2331,10 @@ static int radeon_atombios_parse_power_table_4_5(struct radeon_device *rdev) | |||
2206 | power_info = (union power_info *)(mode_info->atom_context->bios + data_offset); | 2331 | power_info = (union power_info *)(mode_info->atom_context->bios + data_offset); |
2207 | 2332 | ||
2208 | radeon_atombios_add_pplib_thermal_controller(rdev, &power_info->pplib.sThermalController); | 2333 | radeon_atombios_add_pplib_thermal_controller(rdev, &power_info->pplib.sThermalController); |
2334 | rdev->pm.power_state = kzalloc(sizeof(struct radeon_power_state) * | ||
2335 | power_info->pplib.ucNumStates, GFP_KERNEL); | ||
2336 | if (!rdev->pm.power_state) | ||
2337 | return state_index; | ||
2209 | /* first mode is usually default, followed by low to high */ | 2338 | /* first mode is usually default, followed by low to high */ |
2210 | for (i = 0; i < power_info->pplib.ucNumStates; i++) { | 2339 | for (i = 0; i < power_info->pplib.ucNumStates; i++) { |
2211 | mode_index = 0; | 2340 | mode_index = 0; |
@@ -2286,6 +2415,10 @@ static int radeon_atombios_parse_power_table_6(struct radeon_device *rdev) | |||
2286 | non_clock_info_array = (struct NonClockInfoArray *) | 2415 | non_clock_info_array = (struct NonClockInfoArray *) |
2287 | (mode_info->atom_context->bios + data_offset + | 2416 | (mode_info->atom_context->bios + data_offset + |
2288 | power_info->pplib.usNonClockInfoArrayOffset); | 2417 | power_info->pplib.usNonClockInfoArrayOffset); |
2418 | rdev->pm.power_state = kzalloc(sizeof(struct radeon_power_state) * | ||
2419 | state_array->ucNumEntries, GFP_KERNEL); | ||
2420 | if (!rdev->pm.power_state) | ||
2421 | return state_index; | ||
2289 | for (i = 0; i < state_array->ucNumEntries; i++) { | 2422 | for (i = 0; i < state_array->ucNumEntries; i++) { |
2290 | mode_index = 0; | 2423 | mode_index = 0; |
2291 | power_state = (union pplib_power_state *)&state_array->states[i]; | 2424 | power_state = (union pplib_power_state *)&state_array->states[i]; |
@@ -2359,19 +2492,22 @@ void radeon_atombios_get_power_modes(struct radeon_device *rdev) | |||
2359 | break; | 2492 | break; |
2360 | } | 2493 | } |
2361 | } else { | 2494 | } else { |
2362 | /* add the default mode */ | 2495 | rdev->pm.power_state = kzalloc(sizeof(struct radeon_power_state), GFP_KERNEL); |
2363 | rdev->pm.power_state[state_index].type = | 2496 | if (rdev->pm.power_state) { |
2364 | POWER_STATE_TYPE_DEFAULT; | 2497 | /* add the default mode */ |
2365 | rdev->pm.power_state[state_index].num_clock_modes = 1; | 2498 | rdev->pm.power_state[state_index].type = |
2366 | rdev->pm.power_state[state_index].clock_info[0].mclk = rdev->clock.default_mclk; | 2499 | POWER_STATE_TYPE_DEFAULT; |
2367 | rdev->pm.power_state[state_index].clock_info[0].sclk = rdev->clock.default_sclk; | 2500 | rdev->pm.power_state[state_index].num_clock_modes = 1; |
2368 | rdev->pm.power_state[state_index].default_clock_mode = | 2501 | rdev->pm.power_state[state_index].clock_info[0].mclk = rdev->clock.default_mclk; |
2369 | &rdev->pm.power_state[state_index].clock_info[0]; | 2502 | rdev->pm.power_state[state_index].clock_info[0].sclk = rdev->clock.default_sclk; |
2370 | rdev->pm.power_state[state_index].clock_info[0].voltage.type = VOLTAGE_NONE; | 2503 | rdev->pm.power_state[state_index].default_clock_mode = |
2371 | rdev->pm.power_state[state_index].pcie_lanes = 16; | 2504 | &rdev->pm.power_state[state_index].clock_info[0]; |
2372 | rdev->pm.default_power_state_index = state_index; | 2505 | rdev->pm.power_state[state_index].clock_info[0].voltage.type = VOLTAGE_NONE; |
2373 | rdev->pm.power_state[state_index].flags = 0; | 2506 | rdev->pm.power_state[state_index].pcie_lanes = 16; |
2374 | state_index++; | 2507 | rdev->pm.default_power_state_index = state_index; |
2508 | rdev->pm.power_state[state_index].flags = 0; | ||
2509 | state_index++; | ||
2510 | } | ||
2375 | } | 2511 | } |
2376 | 2512 | ||
2377 | rdev->pm.num_power_states = state_index; | 2513 | rdev->pm.num_power_states = state_index; |
@@ -2487,7 +2623,7 @@ void radeon_atom_initialize_bios_scratch_regs(struct drm_device *dev) | |||
2487 | bios_2_scratch &= ~ATOM_S2_VRI_BRIGHT_ENABLE; | 2623 | bios_2_scratch &= ~ATOM_S2_VRI_BRIGHT_ENABLE; |
2488 | 2624 | ||
2489 | /* tell the bios not to handle mode switching */ | 2625 | /* tell the bios not to handle mode switching */ |
2490 | bios_6_scratch |= (ATOM_S6_ACC_BLOCK_DISPLAY_SWITCH | ATOM_S6_ACC_MODE); | 2626 | bios_6_scratch |= ATOM_S6_ACC_BLOCK_DISPLAY_SWITCH; |
2491 | 2627 | ||
2492 | if (rdev->family >= CHIP_R600) { | 2628 | if (rdev->family >= CHIP_R600) { |
2493 | WREG32(R600_BIOS_2_SCRATCH, bios_2_scratch); | 2629 | WREG32(R600_BIOS_2_SCRATCH, bios_2_scratch); |
@@ -2538,10 +2674,13 @@ void radeon_atom_output_lock(struct drm_encoder *encoder, bool lock) | |||
2538 | else | 2674 | else |
2539 | bios_6_scratch = RREG32(RADEON_BIOS_6_SCRATCH); | 2675 | bios_6_scratch = RREG32(RADEON_BIOS_6_SCRATCH); |
2540 | 2676 | ||
2541 | if (lock) | 2677 | if (lock) { |
2542 | bios_6_scratch |= ATOM_S6_CRITICAL_STATE; | 2678 | bios_6_scratch |= ATOM_S6_CRITICAL_STATE; |
2543 | else | 2679 | bios_6_scratch &= ~ATOM_S6_ACC_MODE; |
2680 | } else { | ||
2544 | bios_6_scratch &= ~ATOM_S6_CRITICAL_STATE; | 2681 | bios_6_scratch &= ~ATOM_S6_CRITICAL_STATE; |
2682 | bios_6_scratch |= ATOM_S6_ACC_MODE; | ||
2683 | } | ||
2545 | 2684 | ||
2546 | if (rdev->family >= CHIP_R600) | 2685 | if (rdev->family >= CHIP_R600) |
2547 | WREG32(R600_BIOS_6_SCRATCH, bios_6_scratch); | 2686 | WREG32(R600_BIOS_6_SCRATCH, bios_6_scratch); |
diff --git a/drivers/gpu/drm/radeon/radeon_benchmark.c b/drivers/gpu/drm/radeon/radeon_benchmark.c index c558685cc637..10191d9372d8 100644 --- a/drivers/gpu/drm/radeon/radeon_benchmark.c +++ b/drivers/gpu/drm/radeon/radeon_benchmark.c | |||
@@ -41,7 +41,7 @@ void radeon_benchmark_move(struct radeon_device *rdev, unsigned bsize, | |||
41 | 41 | ||
42 | size = bsize; | 42 | size = bsize; |
43 | n = 1024; | 43 | n = 1024; |
44 | r = radeon_bo_create(rdev, NULL, size, PAGE_SIZE, true, sdomain, &sobj); | 44 | r = radeon_bo_create(rdev, size, PAGE_SIZE, true, sdomain, &sobj); |
45 | if (r) { | 45 | if (r) { |
46 | goto out_cleanup; | 46 | goto out_cleanup; |
47 | } | 47 | } |
@@ -53,7 +53,7 @@ void radeon_benchmark_move(struct radeon_device *rdev, unsigned bsize, | |||
53 | if (r) { | 53 | if (r) { |
54 | goto out_cleanup; | 54 | goto out_cleanup; |
55 | } | 55 | } |
56 | r = radeon_bo_create(rdev, NULL, size, PAGE_SIZE, true, ddomain, &dobj); | 56 | r = radeon_bo_create(rdev, size, PAGE_SIZE, true, ddomain, &dobj); |
57 | if (r) { | 57 | if (r) { |
58 | goto out_cleanup; | 58 | goto out_cleanup; |
59 | } | 59 | } |
diff --git a/drivers/gpu/drm/radeon/radeon_bios.c b/drivers/gpu/drm/radeon/radeon_bios.c index 654787ec43f4..1aba85cad1a8 100644 --- a/drivers/gpu/drm/radeon/radeon_bios.c +++ b/drivers/gpu/drm/radeon/radeon_bios.c | |||
@@ -130,6 +130,46 @@ static bool radeon_atrm_get_bios(struct radeon_device *rdev) | |||
130 | } | 130 | } |
131 | return true; | 131 | return true; |
132 | } | 132 | } |
133 | |||
134 | static bool ni_read_disabled_bios(struct radeon_device *rdev) | ||
135 | { | ||
136 | u32 bus_cntl; | ||
137 | u32 d1vga_control; | ||
138 | u32 d2vga_control; | ||
139 | u32 vga_render_control; | ||
140 | u32 rom_cntl; | ||
141 | bool r; | ||
142 | |||
143 | bus_cntl = RREG32(R600_BUS_CNTL); | ||
144 | d1vga_control = RREG32(AVIVO_D1VGA_CONTROL); | ||
145 | d2vga_control = RREG32(AVIVO_D2VGA_CONTROL); | ||
146 | vga_render_control = RREG32(AVIVO_VGA_RENDER_CONTROL); | ||
147 | rom_cntl = RREG32(R600_ROM_CNTL); | ||
148 | |||
149 | /* enable the rom */ | ||
150 | WREG32(R600_BUS_CNTL, (bus_cntl & ~R600_BIOS_ROM_DIS)); | ||
151 | /* Disable VGA mode */ | ||
152 | WREG32(AVIVO_D1VGA_CONTROL, | ||
153 | (d1vga_control & ~(AVIVO_DVGA_CONTROL_MODE_ENABLE | | ||
154 | AVIVO_DVGA_CONTROL_TIMING_SELECT))); | ||
155 | WREG32(AVIVO_D2VGA_CONTROL, | ||
156 | (d2vga_control & ~(AVIVO_DVGA_CONTROL_MODE_ENABLE | | ||
157 | AVIVO_DVGA_CONTROL_TIMING_SELECT))); | ||
158 | WREG32(AVIVO_VGA_RENDER_CONTROL, | ||
159 | (vga_render_control & ~AVIVO_VGA_VSTATUS_CNTL_MASK)); | ||
160 | WREG32(R600_ROM_CNTL, rom_cntl | R600_SCK_OVERWRITE); | ||
161 | |||
162 | r = radeon_read_bios(rdev); | ||
163 | |||
164 | /* restore regs */ | ||
165 | WREG32(R600_BUS_CNTL, bus_cntl); | ||
166 | WREG32(AVIVO_D1VGA_CONTROL, d1vga_control); | ||
167 | WREG32(AVIVO_D2VGA_CONTROL, d2vga_control); | ||
168 | WREG32(AVIVO_VGA_RENDER_CONTROL, vga_render_control); | ||
169 | WREG32(R600_ROM_CNTL, rom_cntl); | ||
170 | return r; | ||
171 | } | ||
172 | |||
133 | static bool r700_read_disabled_bios(struct radeon_device *rdev) | 173 | static bool r700_read_disabled_bios(struct radeon_device *rdev) |
134 | { | 174 | { |
135 | uint32_t viph_control; | 175 | uint32_t viph_control; |
@@ -143,7 +183,7 @@ static bool r700_read_disabled_bios(struct radeon_device *rdev) | |||
143 | bool r; | 183 | bool r; |
144 | 184 | ||
145 | viph_control = RREG32(RADEON_VIPH_CONTROL); | 185 | viph_control = RREG32(RADEON_VIPH_CONTROL); |
146 | bus_cntl = RREG32(RADEON_BUS_CNTL); | 186 | bus_cntl = RREG32(R600_BUS_CNTL); |
147 | d1vga_control = RREG32(AVIVO_D1VGA_CONTROL); | 187 | d1vga_control = RREG32(AVIVO_D1VGA_CONTROL); |
148 | d2vga_control = RREG32(AVIVO_D2VGA_CONTROL); | 188 | d2vga_control = RREG32(AVIVO_D2VGA_CONTROL); |
149 | vga_render_control = RREG32(AVIVO_VGA_RENDER_CONTROL); | 189 | vga_render_control = RREG32(AVIVO_VGA_RENDER_CONTROL); |
@@ -152,7 +192,7 @@ static bool r700_read_disabled_bios(struct radeon_device *rdev) | |||
152 | /* disable VIP */ | 192 | /* disable VIP */ |
153 | WREG32(RADEON_VIPH_CONTROL, (viph_control & ~RADEON_VIPH_EN)); | 193 | WREG32(RADEON_VIPH_CONTROL, (viph_control & ~RADEON_VIPH_EN)); |
154 | /* enable the rom */ | 194 | /* enable the rom */ |
155 | WREG32(RADEON_BUS_CNTL, (bus_cntl & ~RADEON_BUS_BIOS_DIS_ROM)); | 195 | WREG32(R600_BUS_CNTL, (bus_cntl & ~R600_BIOS_ROM_DIS)); |
156 | /* Disable VGA mode */ | 196 | /* Disable VGA mode */ |
157 | WREG32(AVIVO_D1VGA_CONTROL, | 197 | WREG32(AVIVO_D1VGA_CONTROL, |
158 | (d1vga_control & ~(AVIVO_DVGA_CONTROL_MODE_ENABLE | | 198 | (d1vga_control & ~(AVIVO_DVGA_CONTROL_MODE_ENABLE | |
@@ -191,7 +231,7 @@ static bool r700_read_disabled_bios(struct radeon_device *rdev) | |||
191 | cg_spll_status = RREG32(R600_CG_SPLL_STATUS); | 231 | cg_spll_status = RREG32(R600_CG_SPLL_STATUS); |
192 | } | 232 | } |
193 | WREG32(RADEON_VIPH_CONTROL, viph_control); | 233 | WREG32(RADEON_VIPH_CONTROL, viph_control); |
194 | WREG32(RADEON_BUS_CNTL, bus_cntl); | 234 | WREG32(R600_BUS_CNTL, bus_cntl); |
195 | WREG32(AVIVO_D1VGA_CONTROL, d1vga_control); | 235 | WREG32(AVIVO_D1VGA_CONTROL, d1vga_control); |
196 | WREG32(AVIVO_D2VGA_CONTROL, d2vga_control); | 236 | WREG32(AVIVO_D2VGA_CONTROL, d2vga_control); |
197 | WREG32(AVIVO_VGA_RENDER_CONTROL, vga_render_control); | 237 | WREG32(AVIVO_VGA_RENDER_CONTROL, vga_render_control); |
@@ -216,7 +256,7 @@ static bool r600_read_disabled_bios(struct radeon_device *rdev) | |||
216 | bool r; | 256 | bool r; |
217 | 257 | ||
218 | viph_control = RREG32(RADEON_VIPH_CONTROL); | 258 | viph_control = RREG32(RADEON_VIPH_CONTROL); |
219 | bus_cntl = RREG32(RADEON_BUS_CNTL); | 259 | bus_cntl = RREG32(R600_BUS_CNTL); |
220 | d1vga_control = RREG32(AVIVO_D1VGA_CONTROL); | 260 | d1vga_control = RREG32(AVIVO_D1VGA_CONTROL); |
221 | d2vga_control = RREG32(AVIVO_D2VGA_CONTROL); | 261 | d2vga_control = RREG32(AVIVO_D2VGA_CONTROL); |
222 | vga_render_control = RREG32(AVIVO_VGA_RENDER_CONTROL); | 262 | vga_render_control = RREG32(AVIVO_VGA_RENDER_CONTROL); |
@@ -231,7 +271,7 @@ static bool r600_read_disabled_bios(struct radeon_device *rdev) | |||
231 | /* disable VIP */ | 271 | /* disable VIP */ |
232 | WREG32(RADEON_VIPH_CONTROL, (viph_control & ~RADEON_VIPH_EN)); | 272 | WREG32(RADEON_VIPH_CONTROL, (viph_control & ~RADEON_VIPH_EN)); |
233 | /* enable the rom */ | 273 | /* enable the rom */ |
234 | WREG32(RADEON_BUS_CNTL, (bus_cntl & ~RADEON_BUS_BIOS_DIS_ROM)); | 274 | WREG32(R600_BUS_CNTL, (bus_cntl & ~R600_BIOS_ROM_DIS)); |
235 | /* Disable VGA mode */ | 275 | /* Disable VGA mode */ |
236 | WREG32(AVIVO_D1VGA_CONTROL, | 276 | WREG32(AVIVO_D1VGA_CONTROL, |
237 | (d1vga_control & ~(AVIVO_DVGA_CONTROL_MODE_ENABLE | | 277 | (d1vga_control & ~(AVIVO_DVGA_CONTROL_MODE_ENABLE | |
@@ -262,7 +302,7 @@ static bool r600_read_disabled_bios(struct radeon_device *rdev) | |||
262 | 302 | ||
263 | /* restore regs */ | 303 | /* restore regs */ |
264 | WREG32(RADEON_VIPH_CONTROL, viph_control); | 304 | WREG32(RADEON_VIPH_CONTROL, viph_control); |
265 | WREG32(RADEON_BUS_CNTL, bus_cntl); | 305 | WREG32(R600_BUS_CNTL, bus_cntl); |
266 | WREG32(AVIVO_D1VGA_CONTROL, d1vga_control); | 306 | WREG32(AVIVO_D1VGA_CONTROL, d1vga_control); |
267 | WREG32(AVIVO_D2VGA_CONTROL, d2vga_control); | 307 | WREG32(AVIVO_D2VGA_CONTROL, d2vga_control); |
268 | WREG32(AVIVO_VGA_RENDER_CONTROL, vga_render_control); | 308 | WREG32(AVIVO_VGA_RENDER_CONTROL, vga_render_control); |
@@ -415,6 +455,8 @@ static bool radeon_read_disabled_bios(struct radeon_device *rdev) | |||
415 | { | 455 | { |
416 | if (rdev->flags & RADEON_IS_IGP) | 456 | if (rdev->flags & RADEON_IS_IGP) |
417 | return igp_read_bios_from_vram(rdev); | 457 | return igp_read_bios_from_vram(rdev); |
458 | else if (rdev->family >= CHIP_BARTS) | ||
459 | return ni_read_disabled_bios(rdev); | ||
418 | else if (rdev->family >= CHIP_RV770) | 460 | else if (rdev->family >= CHIP_RV770) |
419 | return r700_read_disabled_bios(rdev); | 461 | return r700_read_disabled_bios(rdev); |
420 | else if (rdev->family >= CHIP_R600) | 462 | else if (rdev->family >= CHIP_R600) |
diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c index 3bddea5b5295..d27ef74590cd 100644 --- a/drivers/gpu/drm/radeon/radeon_combios.c +++ b/drivers/gpu/drm/radeon/radeon_combios.c | |||
@@ -471,8 +471,9 @@ bool radeon_combios_check_hardcoded_edid(struct radeon_device *rdev) | |||
471 | return true; | 471 | return true; |
472 | } | 472 | } |
473 | 473 | ||
474 | /* this is used for atom LCDs as well */ | ||
474 | struct edid * | 475 | struct edid * |
475 | radeon_combios_get_hardcoded_edid(struct radeon_device *rdev) | 476 | radeon_bios_get_hardcoded_edid(struct radeon_device *rdev) |
476 | { | 477 | { |
477 | if (rdev->mode_info.bios_hardcoded_edid) | 478 | if (rdev->mode_info.bios_hardcoded_edid) |
478 | return rdev->mode_info.bios_hardcoded_edid; | 479 | return rdev->mode_info.bios_hardcoded_edid; |
@@ -729,7 +730,7 @@ void radeon_combios_i2c_init(struct radeon_device *rdev) | |||
729 | clk = RBIOS8(offset + 3 + (i * 5) + 3); | 730 | clk = RBIOS8(offset + 3 + (i * 5) + 3); |
730 | data = RBIOS8(offset + 3 + (i * 5) + 4); | 731 | data = RBIOS8(offset + 3 + (i * 5) + 4); |
731 | i2c = combios_setup_i2c_bus(rdev, DDC_MONID, | 732 | i2c = combios_setup_i2c_bus(rdev, DDC_MONID, |
732 | clk, data); | 733 | (1 << clk), (1 << data)); |
733 | rdev->i2c_bus[4] = radeon_i2c_create(dev, &i2c, "GPIOPAD_MASK"); | 734 | rdev->i2c_bus[4] = radeon_i2c_create(dev, &i2c, "GPIOPAD_MASK"); |
734 | break; | 735 | break; |
735 | } | 736 | } |
@@ -2441,6 +2442,17 @@ void radeon_combios_get_power_modes(struct radeon_device *rdev) | |||
2441 | 2442 | ||
2442 | rdev->pm.default_power_state_index = -1; | 2443 | rdev->pm.default_power_state_index = -1; |
2443 | 2444 | ||
2445 | /* allocate 2 power states */ | ||
2446 | rdev->pm.power_state = kzalloc(sizeof(struct radeon_power_state) * 2, GFP_KERNEL); | ||
2447 | if (!rdev->pm.power_state) { | ||
2448 | rdev->pm.default_power_state_index = state_index; | ||
2449 | rdev->pm.num_power_states = 0; | ||
2450 | |||
2451 | rdev->pm.current_power_state_index = rdev->pm.default_power_state_index; | ||
2452 | rdev->pm.current_clock_mode_index = 0; | ||
2453 | return; | ||
2454 | } | ||
2455 | |||
2444 | if (rdev->flags & RADEON_IS_MOBILITY) { | 2456 | if (rdev->flags & RADEON_IS_MOBILITY) { |
2445 | offset = combios_get_table_offset(dev, COMBIOS_POWERPLAY_INFO_TABLE); | 2457 | offset = combios_get_table_offset(dev, COMBIOS_POWERPLAY_INFO_TABLE); |
2446 | if (offset) { | 2458 | if (offset) { |
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c index 3bef9f6d66fd..22b7e3dc0eca 100644 --- a/drivers/gpu/drm/radeon/radeon_connectors.c +++ b/drivers/gpu/drm/radeon/radeon_connectors.c | |||
@@ -472,6 +472,9 @@ static int radeon_lvds_get_modes(struct drm_connector *connector) | |||
472 | if (mode) { | 472 | if (mode) { |
473 | ret = 1; | 473 | ret = 1; |
474 | drm_mode_probed_add(connector, mode); | 474 | drm_mode_probed_add(connector, mode); |
475 | /* add the width/height from vbios tables if available */ | ||
476 | connector->display_info.width_mm = mode->width_mm; | ||
477 | connector->display_info.height_mm = mode->height_mm; | ||
475 | /* add scaled modes */ | 478 | /* add scaled modes */ |
476 | radeon_add_common_modes(encoder, connector); | 479 | radeon_add_common_modes(encoder, connector); |
477 | } | 480 | } |
@@ -1175,6 +1178,8 @@ radeon_add_atom_connector(struct drm_device *dev, | |||
1175 | /* no HPD on analog connectors */ | 1178 | /* no HPD on analog connectors */ |
1176 | radeon_connector->hpd.hpd = RADEON_HPD_NONE; | 1179 | radeon_connector->hpd.hpd = RADEON_HPD_NONE; |
1177 | connector->polled = DRM_CONNECTOR_POLL_CONNECT; | 1180 | connector->polled = DRM_CONNECTOR_POLL_CONNECT; |
1181 | connector->interlace_allowed = true; | ||
1182 | connector->doublescan_allowed = true; | ||
1178 | break; | 1183 | break; |
1179 | case DRM_MODE_CONNECTOR_DVIA: | 1184 | case DRM_MODE_CONNECTOR_DVIA: |
1180 | drm_connector_init(dev, &radeon_connector->base, &radeon_vga_connector_funcs, connector_type); | 1185 | drm_connector_init(dev, &radeon_connector->base, &radeon_vga_connector_funcs, connector_type); |
@@ -1190,6 +1195,8 @@ radeon_add_atom_connector(struct drm_device *dev, | |||
1190 | 1); | 1195 | 1); |
1191 | /* no HPD on analog connectors */ | 1196 | /* no HPD on analog connectors */ |
1192 | radeon_connector->hpd.hpd = RADEON_HPD_NONE; | 1197 | radeon_connector->hpd.hpd = RADEON_HPD_NONE; |
1198 | connector->interlace_allowed = true; | ||
1199 | connector->doublescan_allowed = true; | ||
1193 | break; | 1200 | break; |
1194 | case DRM_MODE_CONNECTOR_DVII: | 1201 | case DRM_MODE_CONNECTOR_DVII: |
1195 | case DRM_MODE_CONNECTOR_DVID: | 1202 | case DRM_MODE_CONNECTOR_DVID: |
@@ -1212,7 +1219,7 @@ radeon_add_atom_connector(struct drm_device *dev, | |||
1212 | if (ASIC_IS_AVIVO(rdev)) { | 1219 | if (ASIC_IS_AVIVO(rdev)) { |
1213 | drm_connector_attach_property(&radeon_connector->base, | 1220 | drm_connector_attach_property(&radeon_connector->base, |
1214 | rdev->mode_info.underscan_property, | 1221 | rdev->mode_info.underscan_property, |
1215 | UNDERSCAN_AUTO); | 1222 | UNDERSCAN_OFF); |
1216 | drm_connector_attach_property(&radeon_connector->base, | 1223 | drm_connector_attach_property(&radeon_connector->base, |
1217 | rdev->mode_info.underscan_hborder_property, | 1224 | rdev->mode_info.underscan_hborder_property, |
1218 | 0); | 1225 | 0); |
@@ -1226,6 +1233,11 @@ radeon_add_atom_connector(struct drm_device *dev, | |||
1226 | rdev->mode_info.load_detect_property, | 1233 | rdev->mode_info.load_detect_property, |
1227 | 1); | 1234 | 1); |
1228 | } | 1235 | } |
1236 | connector->interlace_allowed = true; | ||
1237 | if (connector_type == DRM_MODE_CONNECTOR_DVII) | ||
1238 | connector->doublescan_allowed = true; | ||
1239 | else | ||
1240 | connector->doublescan_allowed = false; | ||
1229 | break; | 1241 | break; |
1230 | case DRM_MODE_CONNECTOR_HDMIA: | 1242 | case DRM_MODE_CONNECTOR_HDMIA: |
1231 | case DRM_MODE_CONNECTOR_HDMIB: | 1243 | case DRM_MODE_CONNECTOR_HDMIB: |
@@ -1247,7 +1259,7 @@ radeon_add_atom_connector(struct drm_device *dev, | |||
1247 | if (ASIC_IS_AVIVO(rdev)) { | 1259 | if (ASIC_IS_AVIVO(rdev)) { |
1248 | drm_connector_attach_property(&radeon_connector->base, | 1260 | drm_connector_attach_property(&radeon_connector->base, |
1249 | rdev->mode_info.underscan_property, | 1261 | rdev->mode_info.underscan_property, |
1250 | UNDERSCAN_AUTO); | 1262 | UNDERSCAN_OFF); |
1251 | drm_connector_attach_property(&radeon_connector->base, | 1263 | drm_connector_attach_property(&radeon_connector->base, |
1252 | rdev->mode_info.underscan_hborder_property, | 1264 | rdev->mode_info.underscan_hborder_property, |
1253 | 0); | 1265 | 0); |
@@ -1256,6 +1268,11 @@ radeon_add_atom_connector(struct drm_device *dev, | |||
1256 | 0); | 1268 | 0); |
1257 | } | 1269 | } |
1258 | subpixel_order = SubPixelHorizontalRGB; | 1270 | subpixel_order = SubPixelHorizontalRGB; |
1271 | connector->interlace_allowed = true; | ||
1272 | if (connector_type == DRM_MODE_CONNECTOR_HDMIB) | ||
1273 | connector->doublescan_allowed = true; | ||
1274 | else | ||
1275 | connector->doublescan_allowed = false; | ||
1259 | break; | 1276 | break; |
1260 | case DRM_MODE_CONNECTOR_DisplayPort: | 1277 | case DRM_MODE_CONNECTOR_DisplayPort: |
1261 | case DRM_MODE_CONNECTOR_eDP: | 1278 | case DRM_MODE_CONNECTOR_eDP: |
@@ -1285,7 +1302,7 @@ radeon_add_atom_connector(struct drm_device *dev, | |||
1285 | if (ASIC_IS_AVIVO(rdev)) { | 1302 | if (ASIC_IS_AVIVO(rdev)) { |
1286 | drm_connector_attach_property(&radeon_connector->base, | 1303 | drm_connector_attach_property(&radeon_connector->base, |
1287 | rdev->mode_info.underscan_property, | 1304 | rdev->mode_info.underscan_property, |
1288 | UNDERSCAN_AUTO); | 1305 | UNDERSCAN_OFF); |
1289 | drm_connector_attach_property(&radeon_connector->base, | 1306 | drm_connector_attach_property(&radeon_connector->base, |
1290 | rdev->mode_info.underscan_hborder_property, | 1307 | rdev->mode_info.underscan_hborder_property, |
1291 | 0); | 1308 | 0); |
@@ -1293,6 +1310,9 @@ radeon_add_atom_connector(struct drm_device *dev, | |||
1293 | rdev->mode_info.underscan_vborder_property, | 1310 | rdev->mode_info.underscan_vborder_property, |
1294 | 0); | 1311 | 0); |
1295 | } | 1312 | } |
1313 | connector->interlace_allowed = true; | ||
1314 | /* in theory with a DP to VGA converter... */ | ||
1315 | connector->doublescan_allowed = false; | ||
1296 | break; | 1316 | break; |
1297 | case DRM_MODE_CONNECTOR_SVIDEO: | 1317 | case DRM_MODE_CONNECTOR_SVIDEO: |
1298 | case DRM_MODE_CONNECTOR_Composite: | 1318 | case DRM_MODE_CONNECTOR_Composite: |
@@ -1308,6 +1328,8 @@ radeon_add_atom_connector(struct drm_device *dev, | |||
1308 | radeon_atombios_get_tv_info(rdev)); | 1328 | radeon_atombios_get_tv_info(rdev)); |
1309 | /* no HPD on analog connectors */ | 1329 | /* no HPD on analog connectors */ |
1310 | radeon_connector->hpd.hpd = RADEON_HPD_NONE; | 1330 | radeon_connector->hpd.hpd = RADEON_HPD_NONE; |
1331 | connector->interlace_allowed = false; | ||
1332 | connector->doublescan_allowed = false; | ||
1311 | break; | 1333 | break; |
1312 | case DRM_MODE_CONNECTOR_LVDS: | 1334 | case DRM_MODE_CONNECTOR_LVDS: |
1313 | radeon_dig_connector = kzalloc(sizeof(struct radeon_connector_atom_dig), GFP_KERNEL); | 1335 | radeon_dig_connector = kzalloc(sizeof(struct radeon_connector_atom_dig), GFP_KERNEL); |
@@ -1326,6 +1348,8 @@ radeon_add_atom_connector(struct drm_device *dev, | |||
1326 | dev->mode_config.scaling_mode_property, | 1348 | dev->mode_config.scaling_mode_property, |
1327 | DRM_MODE_SCALE_FULLSCREEN); | 1349 | DRM_MODE_SCALE_FULLSCREEN); |
1328 | subpixel_order = SubPixelHorizontalRGB; | 1350 | subpixel_order = SubPixelHorizontalRGB; |
1351 | connector->interlace_allowed = false; | ||
1352 | connector->doublescan_allowed = false; | ||
1329 | break; | 1353 | break; |
1330 | } | 1354 | } |
1331 | 1355 | ||
@@ -1403,6 +1427,8 @@ radeon_add_legacy_connector(struct drm_device *dev, | |||
1403 | /* no HPD on analog connectors */ | 1427 | /* no HPD on analog connectors */ |
1404 | radeon_connector->hpd.hpd = RADEON_HPD_NONE; | 1428 | radeon_connector->hpd.hpd = RADEON_HPD_NONE; |
1405 | connector->polled = DRM_CONNECTOR_POLL_CONNECT; | 1429 | connector->polled = DRM_CONNECTOR_POLL_CONNECT; |
1430 | connector->interlace_allowed = true; | ||
1431 | connector->doublescan_allowed = true; | ||
1406 | break; | 1432 | break; |
1407 | case DRM_MODE_CONNECTOR_DVIA: | 1433 | case DRM_MODE_CONNECTOR_DVIA: |
1408 | drm_connector_init(dev, &radeon_connector->base, &radeon_vga_connector_funcs, connector_type); | 1434 | drm_connector_init(dev, &radeon_connector->base, &radeon_vga_connector_funcs, connector_type); |
@@ -1418,6 +1444,8 @@ radeon_add_legacy_connector(struct drm_device *dev, | |||
1418 | 1); | 1444 | 1); |
1419 | /* no HPD on analog connectors */ | 1445 | /* no HPD on analog connectors */ |
1420 | radeon_connector->hpd.hpd = RADEON_HPD_NONE; | 1446 | radeon_connector->hpd.hpd = RADEON_HPD_NONE; |
1447 | connector->interlace_allowed = true; | ||
1448 | connector->doublescan_allowed = true; | ||
1421 | break; | 1449 | break; |
1422 | case DRM_MODE_CONNECTOR_DVII: | 1450 | case DRM_MODE_CONNECTOR_DVII: |
1423 | case DRM_MODE_CONNECTOR_DVID: | 1451 | case DRM_MODE_CONNECTOR_DVID: |
@@ -1435,6 +1463,11 @@ radeon_add_legacy_connector(struct drm_device *dev, | |||
1435 | 1); | 1463 | 1); |
1436 | } | 1464 | } |
1437 | subpixel_order = SubPixelHorizontalRGB; | 1465 | subpixel_order = SubPixelHorizontalRGB; |
1466 | connector->interlace_allowed = true; | ||
1467 | if (connector_type == DRM_MODE_CONNECTOR_DVII) | ||
1468 | connector->doublescan_allowed = true; | ||
1469 | else | ||
1470 | connector->doublescan_allowed = false; | ||
1438 | break; | 1471 | break; |
1439 | case DRM_MODE_CONNECTOR_SVIDEO: | 1472 | case DRM_MODE_CONNECTOR_SVIDEO: |
1440 | case DRM_MODE_CONNECTOR_Composite: | 1473 | case DRM_MODE_CONNECTOR_Composite: |
@@ -1457,6 +1490,8 @@ radeon_add_legacy_connector(struct drm_device *dev, | |||
1457 | radeon_combios_get_tv_info(rdev)); | 1490 | radeon_combios_get_tv_info(rdev)); |
1458 | /* no HPD on analog connectors */ | 1491 | /* no HPD on analog connectors */ |
1459 | radeon_connector->hpd.hpd = RADEON_HPD_NONE; | 1492 | radeon_connector->hpd.hpd = RADEON_HPD_NONE; |
1493 | connector->interlace_allowed = false; | ||
1494 | connector->doublescan_allowed = false; | ||
1460 | break; | 1495 | break; |
1461 | case DRM_MODE_CONNECTOR_LVDS: | 1496 | case DRM_MODE_CONNECTOR_LVDS: |
1462 | drm_connector_init(dev, &radeon_connector->base, &radeon_lvds_connector_funcs, connector_type); | 1497 | drm_connector_init(dev, &radeon_connector->base, &radeon_lvds_connector_funcs, connector_type); |
@@ -1470,6 +1505,8 @@ radeon_add_legacy_connector(struct drm_device *dev, | |||
1470 | dev->mode_config.scaling_mode_property, | 1505 | dev->mode_config.scaling_mode_property, |
1471 | DRM_MODE_SCALE_FULLSCREEN); | 1506 | DRM_MODE_SCALE_FULLSCREEN); |
1472 | subpixel_order = SubPixelHorizontalRGB; | 1507 | subpixel_order = SubPixelHorizontalRGB; |
1508 | connector->interlace_allowed = false; | ||
1509 | connector->doublescan_allowed = false; | ||
1473 | break; | 1510 | break; |
1474 | } | 1511 | } |
1475 | 1512 | ||
diff --git a/drivers/gpu/drm/radeon/radeon_cp.c b/drivers/gpu/drm/radeon/radeon_cp.c index eb6b9eed7349..3d599e33b9cc 100644 --- a/drivers/gpu/drm/radeon/radeon_cp.c +++ b/drivers/gpu/drm/radeon/radeon_cp.c | |||
@@ -2113,9 +2113,9 @@ int radeon_driver_load(struct drm_device *dev, unsigned long flags) | |||
2113 | break; | 2113 | break; |
2114 | } | 2114 | } |
2115 | 2115 | ||
2116 | if (drm_device_is_agp(dev)) | 2116 | if (drm_pci_device_is_agp(dev)) |
2117 | dev_priv->flags |= RADEON_IS_AGP; | 2117 | dev_priv->flags |= RADEON_IS_AGP; |
2118 | else if (drm_device_is_pcie(dev)) | 2118 | else if (drm_pci_device_is_pcie(dev)) |
2119 | dev_priv->flags |= RADEON_IS_PCIE; | 2119 | dev_priv->flags |= RADEON_IS_PCIE; |
2120 | else | 2120 | else |
2121 | dev_priv->flags |= RADEON_IS_PCI; | 2121 | dev_priv->flags |= RADEON_IS_PCI; |
diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c index 35b5eb8fbe2a..8c1916941871 100644 --- a/drivers/gpu/drm/radeon/radeon_cs.c +++ b/drivers/gpu/drm/radeon/radeon_cs.c | |||
@@ -75,7 +75,7 @@ int radeon_cs_parser_relocs(struct radeon_cs_parser *p) | |||
75 | return -ENOENT; | 75 | return -ENOENT; |
76 | } | 76 | } |
77 | p->relocs_ptr[i] = &p->relocs[i]; | 77 | p->relocs_ptr[i] = &p->relocs[i]; |
78 | p->relocs[i].robj = p->relocs[i].gobj->driver_private; | 78 | p->relocs[i].robj = gem_to_radeon_bo(p->relocs[i].gobj); |
79 | p->relocs[i].lobj.bo = p->relocs[i].robj; | 79 | p->relocs[i].lobj.bo = p->relocs[i].robj; |
80 | p->relocs[i].lobj.wdomain = r->write_domain; | 80 | p->relocs[i].lobj.wdomain = r->write_domain; |
81 | p->relocs[i].lobj.rdomain = r->read_domains; | 81 | p->relocs[i].lobj.rdomain = r->read_domains; |
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c index 3952cf3d0ee9..7c0a3f26ab5e 100644 --- a/drivers/gpu/drm/radeon/radeon_device.c +++ b/drivers/gpu/drm/radeon/radeon_device.c | |||
@@ -82,6 +82,9 @@ static const char radeon_family_name[][16] = { | |||
82 | "CYPRESS", | 82 | "CYPRESS", |
83 | "HEMLOCK", | 83 | "HEMLOCK", |
84 | "PALM", | 84 | "PALM", |
85 | "BARTS", | ||
86 | "TURKS", | ||
87 | "CAICOS", | ||
85 | "LAST", | 88 | "LAST", |
86 | }; | 89 | }; |
87 | 90 | ||
@@ -181,7 +184,7 @@ int radeon_wb_init(struct radeon_device *rdev) | |||
181 | int r; | 184 | int r; |
182 | 185 | ||
183 | if (rdev->wb.wb_obj == NULL) { | 186 | if (rdev->wb.wb_obj == NULL) { |
184 | r = radeon_bo_create(rdev, NULL, RADEON_GPU_PAGE_SIZE, PAGE_SIZE, true, | 187 | r = radeon_bo_create(rdev, RADEON_GPU_PAGE_SIZE, PAGE_SIZE, true, |
185 | RADEON_GEM_DOMAIN_GTT, &rdev->wb.wb_obj); | 188 | RADEON_GEM_DOMAIN_GTT, &rdev->wb.wb_obj); |
186 | if (r) { | 189 | if (r) { |
187 | dev_warn(rdev->dev, "(%d) create WB bo failed\n", r); | 190 | dev_warn(rdev->dev, "(%d) create WB bo failed\n", r); |
@@ -225,6 +228,11 @@ int radeon_wb_init(struct radeon_device *rdev) | |||
225 | rdev->wb.use_event = true; | 228 | rdev->wb.use_event = true; |
226 | } | 229 | } |
227 | } | 230 | } |
231 | /* always use writeback/events on NI */ | ||
232 | if (ASIC_IS_DCE5(rdev)) { | ||
233 | rdev->wb.enabled = true; | ||
234 | rdev->wb.use_event = true; | ||
235 | } | ||
228 | 236 | ||
229 | dev_info(rdev->dev, "WB %sabled\n", rdev->wb.enabled ? "en" : "dis"); | 237 | dev_info(rdev->dev, "WB %sabled\n", rdev->wb.enabled ? "en" : "dis"); |
230 | 238 | ||
@@ -287,7 +295,7 @@ void radeon_vram_location(struct radeon_device *rdev, struct radeon_mc *mc, u64 | |||
287 | mc->mc_vram_size = mc->aper_size; | 295 | mc->mc_vram_size = mc->aper_size; |
288 | } | 296 | } |
289 | mc->vram_end = mc->vram_start + mc->mc_vram_size - 1; | 297 | mc->vram_end = mc->vram_start + mc->mc_vram_size - 1; |
290 | dev_info(rdev->dev, "VRAM: %lluM 0x%08llX - 0x%08llX (%lluM used)\n", | 298 | dev_info(rdev->dev, "VRAM: %lluM 0x%016llX - 0x%016llX (%lluM used)\n", |
291 | mc->mc_vram_size >> 20, mc->vram_start, | 299 | mc->mc_vram_size >> 20, mc->vram_start, |
292 | mc->vram_end, mc->real_vram_size >> 20); | 300 | mc->vram_end, mc->real_vram_size >> 20); |
293 | } | 301 | } |
@@ -324,7 +332,7 @@ void radeon_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc) | |||
324 | mc->gtt_start = (mc->vram_end + 1 + mc->gtt_base_align) & ~mc->gtt_base_align; | 332 | mc->gtt_start = (mc->vram_end + 1 + mc->gtt_base_align) & ~mc->gtt_base_align; |
325 | } | 333 | } |
326 | mc->gtt_end = mc->gtt_start + mc->gtt_size - 1; | 334 | mc->gtt_end = mc->gtt_start + mc->gtt_size - 1; |
327 | dev_info(rdev->dev, "GTT: %lluM 0x%08llX - 0x%08llX\n", | 335 | dev_info(rdev->dev, "GTT: %lluM 0x%016llX - 0x%016llX\n", |
328 | mc->gtt_size >> 20, mc->gtt_start, mc->gtt_end); | 336 | mc->gtt_size >> 20, mc->gtt_start, mc->gtt_end); |
329 | } | 337 | } |
330 | 338 | ||
@@ -642,20 +650,20 @@ void radeon_check_arguments(struct radeon_device *rdev) | |||
642 | static void radeon_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_state state) | 650 | static void radeon_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_state state) |
643 | { | 651 | { |
644 | struct drm_device *dev = pci_get_drvdata(pdev); | 652 | struct drm_device *dev = pci_get_drvdata(pdev); |
645 | struct radeon_device *rdev = dev->dev_private; | ||
646 | pm_message_t pmm = { .event = PM_EVENT_SUSPEND }; | 653 | pm_message_t pmm = { .event = PM_EVENT_SUSPEND }; |
647 | if (state == VGA_SWITCHEROO_ON) { | 654 | if (state == VGA_SWITCHEROO_ON) { |
648 | printk(KERN_INFO "radeon: switched on\n"); | 655 | printk(KERN_INFO "radeon: switched on\n"); |
649 | /* don't suspend or resume card normally */ | 656 | /* don't suspend or resume card normally */ |
650 | rdev->powered_down = false; | 657 | dev->switch_power_state = DRM_SWITCH_POWER_CHANGING; |
651 | radeon_resume_kms(dev); | 658 | radeon_resume_kms(dev); |
659 | dev->switch_power_state = DRM_SWITCH_POWER_ON; | ||
652 | drm_kms_helper_poll_enable(dev); | 660 | drm_kms_helper_poll_enable(dev); |
653 | } else { | 661 | } else { |
654 | printk(KERN_INFO "radeon: switched off\n"); | 662 | printk(KERN_INFO "radeon: switched off\n"); |
655 | drm_kms_helper_poll_disable(dev); | 663 | drm_kms_helper_poll_disable(dev); |
664 | dev->switch_power_state = DRM_SWITCH_POWER_CHANGING; | ||
656 | radeon_suspend_kms(dev, pmm); | 665 | radeon_suspend_kms(dev, pmm); |
657 | /* don't suspend or resume card normally */ | 666 | dev->switch_power_state = DRM_SWITCH_POWER_OFF; |
658 | rdev->powered_down = true; | ||
659 | } | 667 | } |
660 | } | 668 | } |
661 | 669 | ||
@@ -710,11 +718,6 @@ int radeon_device_init(struct radeon_device *rdev, | |||
710 | init_waitqueue_head(&rdev->irq.vblank_queue); | 718 | init_waitqueue_head(&rdev->irq.vblank_queue); |
711 | init_waitqueue_head(&rdev->irq.idle_queue); | 719 | init_waitqueue_head(&rdev->irq.idle_queue); |
712 | 720 | ||
713 | /* setup workqueue */ | ||
714 | rdev->wq = create_workqueue("radeon"); | ||
715 | if (rdev->wq == NULL) | ||
716 | return -ENOMEM; | ||
717 | |||
718 | /* Set asic functions */ | 721 | /* Set asic functions */ |
719 | r = radeon_asic_init(rdev); | 722 | r = radeon_asic_init(rdev); |
720 | if (r) | 723 | if (r) |
@@ -779,6 +782,7 @@ int radeon_device_init(struct radeon_device *rdev, | |||
779 | vga_client_register(rdev->pdev, rdev, NULL, radeon_vga_set_decode); | 782 | vga_client_register(rdev->pdev, rdev, NULL, radeon_vga_set_decode); |
780 | vga_switcheroo_register_client(rdev->pdev, | 783 | vga_switcheroo_register_client(rdev->pdev, |
781 | radeon_switcheroo_set_state, | 784 | radeon_switcheroo_set_state, |
785 | NULL, | ||
782 | radeon_switcheroo_can_switch); | 786 | radeon_switcheroo_can_switch); |
783 | 787 | ||
784 | r = radeon_init(rdev); | 788 | r = radeon_init(rdev); |
@@ -812,7 +816,6 @@ void radeon_device_fini(struct radeon_device *rdev) | |||
812 | /* evict vram memory */ | 816 | /* evict vram memory */ |
813 | radeon_bo_evict_vram(rdev); | 817 | radeon_bo_evict_vram(rdev); |
814 | radeon_fini(rdev); | 818 | radeon_fini(rdev); |
815 | destroy_workqueue(rdev->wq); | ||
816 | vga_switcheroo_unregister_client(rdev->pdev); | 819 | vga_switcheroo_unregister_client(rdev->pdev); |
817 | vga_client_register(rdev->pdev, NULL, NULL, NULL); | 820 | vga_client_register(rdev->pdev, NULL, NULL, NULL); |
818 | if (rdev->rio_mem) | 821 | if (rdev->rio_mem) |
@@ -841,7 +844,7 @@ int radeon_suspend_kms(struct drm_device *dev, pm_message_t state) | |||
841 | } | 844 | } |
842 | rdev = dev->dev_private; | 845 | rdev = dev->dev_private; |
843 | 846 | ||
844 | if (rdev->powered_down) | 847 | if (dev->switch_power_state == DRM_SWITCH_POWER_OFF) |
845 | return 0; | 848 | return 0; |
846 | 849 | ||
847 | /* turn off display hw */ | 850 | /* turn off display hw */ |
@@ -857,7 +860,7 @@ int radeon_suspend_kms(struct drm_device *dev, pm_message_t state) | |||
857 | if (rfb == NULL || rfb->obj == NULL) { | 860 | if (rfb == NULL || rfb->obj == NULL) { |
858 | continue; | 861 | continue; |
859 | } | 862 | } |
860 | robj = rfb->obj->driver_private; | 863 | robj = gem_to_radeon_bo(rfb->obj); |
861 | /* don't unpin kernel fb objects */ | 864 | /* don't unpin kernel fb objects */ |
862 | if (!radeon_fbdev_robj_is_fb(rdev, robj)) { | 865 | if (!radeon_fbdev_robj_is_fb(rdev, robj)) { |
863 | r = radeon_bo_reserve(robj, false); | 866 | r = radeon_bo_reserve(robj, false); |
@@ -888,9 +891,9 @@ int radeon_suspend_kms(struct drm_device *dev, pm_message_t state) | |||
888 | pci_disable_device(dev->pdev); | 891 | pci_disable_device(dev->pdev); |
889 | pci_set_power_state(dev->pdev, PCI_D3hot); | 892 | pci_set_power_state(dev->pdev, PCI_D3hot); |
890 | } | 893 | } |
891 | acquire_console_sem(); | 894 | console_lock(); |
892 | radeon_fbdev_set_suspend(rdev, 1); | 895 | radeon_fbdev_set_suspend(rdev, 1); |
893 | release_console_sem(); | 896 | console_unlock(); |
894 | return 0; | 897 | return 0; |
895 | } | 898 | } |
896 | 899 | ||
@@ -899,14 +902,14 @@ int radeon_resume_kms(struct drm_device *dev) | |||
899 | struct drm_connector *connector; | 902 | struct drm_connector *connector; |
900 | struct radeon_device *rdev = dev->dev_private; | 903 | struct radeon_device *rdev = dev->dev_private; |
901 | 904 | ||
902 | if (rdev->powered_down) | 905 | if (dev->switch_power_state == DRM_SWITCH_POWER_OFF) |
903 | return 0; | 906 | return 0; |
904 | 907 | ||
905 | acquire_console_sem(); | 908 | console_lock(); |
906 | pci_set_power_state(dev->pdev, PCI_D0); | 909 | pci_set_power_state(dev->pdev, PCI_D0); |
907 | pci_restore_state(dev->pdev); | 910 | pci_restore_state(dev->pdev); |
908 | if (pci_enable_device(dev->pdev)) { | 911 | if (pci_enable_device(dev->pdev)) { |
909 | release_console_sem(); | 912 | console_unlock(); |
910 | return -1; | 913 | return -1; |
911 | } | 914 | } |
912 | pci_set_master(dev->pdev); | 915 | pci_set_master(dev->pdev); |
@@ -916,18 +919,17 @@ int radeon_resume_kms(struct drm_device *dev) | |||
916 | radeon_pm_resume(rdev); | 919 | radeon_pm_resume(rdev); |
917 | radeon_restore_bios_scratch_regs(rdev); | 920 | radeon_restore_bios_scratch_regs(rdev); |
918 | 921 | ||
919 | /* turn on display hw */ | ||
920 | list_for_each_entry(connector, &dev->mode_config.connector_list, head) { | ||
921 | drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON); | ||
922 | } | ||
923 | |||
924 | radeon_fbdev_set_suspend(rdev, 0); | 922 | radeon_fbdev_set_suspend(rdev, 0); |
925 | release_console_sem(); | 923 | console_unlock(); |
926 | 924 | ||
927 | /* reset hpd state */ | 925 | /* reset hpd state */ |
928 | radeon_hpd_init(rdev); | 926 | radeon_hpd_init(rdev); |
929 | /* blat the mode back in */ | 927 | /* blat the mode back in */ |
930 | drm_helper_resume_force_mode(dev); | 928 | drm_helper_resume_force_mode(dev); |
929 | /* turn on display hw */ | ||
930 | list_for_each_entry(connector, &dev->mode_config.connector_list, head) { | ||
931 | drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON); | ||
932 | } | ||
931 | return 0; | 933 | return 0; |
932 | } | 934 | } |
933 | 935 | ||
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c index 7b17e639ab32..4409975a363c 100644 --- a/drivers/gpu/drm/radeon/radeon_display.c +++ b/drivers/gpu/drm/radeon/radeon_display.c | |||
@@ -68,7 +68,7 @@ static void avivo_crtc_load_lut(struct drm_crtc *crtc) | |||
68 | WREG32(AVIVO_D1GRPH_LUT_SEL + radeon_crtc->crtc_offset, radeon_crtc->crtc_id); | 68 | WREG32(AVIVO_D1GRPH_LUT_SEL + radeon_crtc->crtc_offset, radeon_crtc->crtc_id); |
69 | } | 69 | } |
70 | 70 | ||
71 | static void evergreen_crtc_load_lut(struct drm_crtc *crtc) | 71 | static void dce4_crtc_load_lut(struct drm_crtc *crtc) |
72 | { | 72 | { |
73 | struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); | 73 | struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); |
74 | struct drm_device *dev = crtc->dev; | 74 | struct drm_device *dev = crtc->dev; |
@@ -98,6 +98,66 @@ static void evergreen_crtc_load_lut(struct drm_crtc *crtc) | |||
98 | } | 98 | } |
99 | } | 99 | } |
100 | 100 | ||
101 | static void dce5_crtc_load_lut(struct drm_crtc *crtc) | ||
102 | { | ||
103 | struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); | ||
104 | struct drm_device *dev = crtc->dev; | ||
105 | struct radeon_device *rdev = dev->dev_private; | ||
106 | int i; | ||
107 | |||
108 | DRM_DEBUG_KMS("%d\n", radeon_crtc->crtc_id); | ||
109 | |||
110 | WREG32(NI_INPUT_CSC_CONTROL + radeon_crtc->crtc_offset, | ||
111 | (NI_INPUT_CSC_GRPH_MODE(NI_INPUT_CSC_BYPASS) | | ||
112 | NI_INPUT_CSC_OVL_MODE(NI_INPUT_CSC_BYPASS))); | ||
113 | WREG32(NI_PRESCALE_GRPH_CONTROL + radeon_crtc->crtc_offset, | ||
114 | NI_GRPH_PRESCALE_BYPASS); | ||
115 | WREG32(NI_PRESCALE_OVL_CONTROL + radeon_crtc->crtc_offset, | ||
116 | NI_OVL_PRESCALE_BYPASS); | ||
117 | WREG32(NI_INPUT_GAMMA_CONTROL + radeon_crtc->crtc_offset, | ||
118 | (NI_GRPH_INPUT_GAMMA_MODE(NI_INPUT_GAMMA_USE_LUT) | | ||
119 | NI_OVL_INPUT_GAMMA_MODE(NI_INPUT_GAMMA_USE_LUT))); | ||
120 | |||
121 | WREG32(EVERGREEN_DC_LUT_CONTROL + radeon_crtc->crtc_offset, 0); | ||
122 | |||
123 | WREG32(EVERGREEN_DC_LUT_BLACK_OFFSET_BLUE + radeon_crtc->crtc_offset, 0); | ||
124 | WREG32(EVERGREEN_DC_LUT_BLACK_OFFSET_GREEN + radeon_crtc->crtc_offset, 0); | ||
125 | WREG32(EVERGREEN_DC_LUT_BLACK_OFFSET_RED + radeon_crtc->crtc_offset, 0); | ||
126 | |||
127 | WREG32(EVERGREEN_DC_LUT_WHITE_OFFSET_BLUE + radeon_crtc->crtc_offset, 0xffff); | ||
128 | WREG32(EVERGREEN_DC_LUT_WHITE_OFFSET_GREEN + radeon_crtc->crtc_offset, 0xffff); | ||
129 | WREG32(EVERGREEN_DC_LUT_WHITE_OFFSET_RED + radeon_crtc->crtc_offset, 0xffff); | ||
130 | |||
131 | WREG32(EVERGREEN_DC_LUT_RW_MODE + radeon_crtc->crtc_offset, 0); | ||
132 | WREG32(EVERGREEN_DC_LUT_WRITE_EN_MASK + radeon_crtc->crtc_offset, 0x00000007); | ||
133 | |||
134 | WREG32(EVERGREEN_DC_LUT_RW_INDEX + radeon_crtc->crtc_offset, 0); | ||
135 | for (i = 0; i < 256; i++) { | ||
136 | WREG32(EVERGREEN_DC_LUT_30_COLOR + radeon_crtc->crtc_offset, | ||
137 | (radeon_crtc->lut_r[i] << 20) | | ||
138 | (radeon_crtc->lut_g[i] << 10) | | ||
139 | (radeon_crtc->lut_b[i] << 0)); | ||
140 | } | ||
141 | |||
142 | WREG32(NI_DEGAMMA_CONTROL + radeon_crtc->crtc_offset, | ||
143 | (NI_GRPH_DEGAMMA_MODE(NI_DEGAMMA_BYPASS) | | ||
144 | NI_OVL_DEGAMMA_MODE(NI_DEGAMMA_BYPASS) | | ||
145 | NI_ICON_DEGAMMA_MODE(NI_DEGAMMA_BYPASS) | | ||
146 | NI_CURSOR_DEGAMMA_MODE(NI_DEGAMMA_BYPASS))); | ||
147 | WREG32(NI_GAMUT_REMAP_CONTROL + radeon_crtc->crtc_offset, | ||
148 | (NI_GRPH_GAMUT_REMAP_MODE(NI_GAMUT_REMAP_BYPASS) | | ||
149 | NI_OVL_GAMUT_REMAP_MODE(NI_GAMUT_REMAP_BYPASS))); | ||
150 | WREG32(NI_REGAMMA_CONTROL + radeon_crtc->crtc_offset, | ||
151 | (NI_GRPH_REGAMMA_MODE(NI_REGAMMA_BYPASS) | | ||
152 | NI_OVL_REGAMMA_MODE(NI_REGAMMA_BYPASS))); | ||
153 | WREG32(NI_OUTPUT_CSC_CONTROL + radeon_crtc->crtc_offset, | ||
154 | (NI_OUTPUT_CSC_GRPH_MODE(NI_OUTPUT_CSC_BYPASS) | | ||
155 | NI_OUTPUT_CSC_OVL_MODE(NI_OUTPUT_CSC_BYPASS))); | ||
156 | /* XXX match this to the depth of the crtc fmt block, move to modeset? */ | ||
157 | WREG32(0x6940 + radeon_crtc->crtc_offset, 0); | ||
158 | |||
159 | } | ||
160 | |||
101 | static void legacy_crtc_load_lut(struct drm_crtc *crtc) | 161 | static void legacy_crtc_load_lut(struct drm_crtc *crtc) |
102 | { | 162 | { |
103 | struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); | 163 | struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); |
@@ -130,8 +190,10 @@ void radeon_crtc_load_lut(struct drm_crtc *crtc) | |||
130 | if (!crtc->enabled) | 190 | if (!crtc->enabled) |
131 | return; | 191 | return; |
132 | 192 | ||
133 | if (ASIC_IS_DCE4(rdev)) | 193 | if (ASIC_IS_DCE5(rdev)) |
134 | evergreen_crtc_load_lut(crtc); | 194 | dce5_crtc_load_lut(crtc); |
195 | else if (ASIC_IS_DCE4(rdev)) | ||
196 | dce4_crtc_load_lut(crtc); | ||
135 | else if (ASIC_IS_AVIVO(rdev)) | 197 | else if (ASIC_IS_AVIVO(rdev)) |
136 | avivo_crtc_load_lut(crtc); | 198 | avivo_crtc_load_lut(crtc); |
137 | else | 199 | else |
@@ -309,7 +371,7 @@ static int radeon_crtc_page_flip(struct drm_crtc *crtc, | |||
309 | new_radeon_fb = to_radeon_framebuffer(fb); | 371 | new_radeon_fb = to_radeon_framebuffer(fb); |
310 | /* schedule unpin of the old buffer */ | 372 | /* schedule unpin of the old buffer */ |
311 | obj = old_radeon_fb->obj; | 373 | obj = old_radeon_fb->obj; |
312 | rbo = obj->driver_private; | 374 | rbo = gem_to_radeon_bo(obj); |
313 | work->old_rbo = rbo; | 375 | work->old_rbo = rbo; |
314 | INIT_WORK(&work->work, radeon_unpin_work_func); | 376 | INIT_WORK(&work->work, radeon_unpin_work_func); |
315 | 377 | ||
@@ -329,7 +391,7 @@ static int radeon_crtc_page_flip(struct drm_crtc *crtc, | |||
329 | 391 | ||
330 | /* pin the new buffer */ | 392 | /* pin the new buffer */ |
331 | obj = new_radeon_fb->obj; | 393 | obj = new_radeon_fb->obj; |
332 | rbo = obj->driver_private; | 394 | rbo = gem_to_radeon_bo(obj); |
333 | 395 | ||
334 | DRM_DEBUG_DRIVER("flip-ioctl() cur_fbo = %p, cur_bbo = %p\n", | 396 | DRM_DEBUG_DRIVER("flip-ioctl() cur_fbo = %p, cur_bbo = %p\n", |
335 | work->old_rbo, rbo); | 397 | work->old_rbo, rbo); |
@@ -679,9 +741,17 @@ int radeon_ddc_get_modes(struct radeon_connector *radeon_connector) | |||
679 | if (!radeon_connector->edid) { | 741 | if (!radeon_connector->edid) { |
680 | radeon_connector->edid = drm_get_edid(&radeon_connector->base, &radeon_connector->ddc_bus->adapter); | 742 | radeon_connector->edid = drm_get_edid(&radeon_connector->base, &radeon_connector->ddc_bus->adapter); |
681 | } | 743 | } |
682 | /* some servers provide a hardcoded edid in rom for KVMs */ | 744 | |
683 | if (!radeon_connector->edid) | 745 | if (!radeon_connector->edid) { |
684 | radeon_connector->edid = radeon_combios_get_hardcoded_edid(rdev); | 746 | if (rdev->is_atom_bios) { |
747 | /* some laptops provide a hardcoded edid in rom for LCDs */ | ||
748 | if (((radeon_connector->base.connector_type == DRM_MODE_CONNECTOR_LVDS) || | ||
749 | (radeon_connector->base.connector_type == DRM_MODE_CONNECTOR_eDP))) | ||
750 | radeon_connector->edid = radeon_bios_get_hardcoded_edid(rdev); | ||
751 | } else | ||
752 | /* some servers provide a hardcoded edid in rom for KVMs */ | ||
753 | radeon_connector->edid = radeon_bios_get_hardcoded_edid(rdev); | ||
754 | } | ||
685 | if (radeon_connector->edid) { | 755 | if (radeon_connector->edid) { |
686 | drm_mode_connector_update_edid_property(&radeon_connector->base, radeon_connector->edid); | 756 | drm_mode_connector_update_edid_property(&radeon_connector->base, radeon_connector->edid); |
687 | ret = drm_add_edid_modes(&radeon_connector->base, radeon_connector->edid); | 757 | ret = drm_add_edid_modes(&radeon_connector->base, radeon_connector->edid); |
@@ -710,6 +780,115 @@ static int radeon_ddc_dump(struct drm_connector *connector) | |||
710 | return ret; | 780 | return ret; |
711 | } | 781 | } |
712 | 782 | ||
783 | /* avivo */ | ||
784 | static void avivo_get_fb_div(struct radeon_pll *pll, | ||
785 | u32 target_clock, | ||
786 | u32 post_div, | ||
787 | u32 ref_div, | ||
788 | u32 *fb_div, | ||
789 | u32 *frac_fb_div) | ||
790 | { | ||
791 | u32 tmp = post_div * ref_div; | ||
792 | |||
793 | tmp *= target_clock; | ||
794 | *fb_div = tmp / pll->reference_freq; | ||
795 | *frac_fb_div = tmp % pll->reference_freq; | ||
796 | } | ||
797 | |||
798 | static u32 avivo_get_post_div(struct radeon_pll *pll, | ||
799 | u32 target_clock) | ||
800 | { | ||
801 | u32 vco, post_div, tmp; | ||
802 | |||
803 | if (pll->flags & RADEON_PLL_USE_POST_DIV) | ||
804 | return pll->post_div; | ||
805 | |||
806 | if (pll->flags & RADEON_PLL_PREFER_MINM_OVER_MAXP) { | ||
807 | if (pll->flags & RADEON_PLL_IS_LCD) | ||
808 | vco = pll->lcd_pll_out_min; | ||
809 | else | ||
810 | vco = pll->pll_out_min; | ||
811 | } else { | ||
812 | if (pll->flags & RADEON_PLL_IS_LCD) | ||
813 | vco = pll->lcd_pll_out_max; | ||
814 | else | ||
815 | vco = pll->pll_out_max; | ||
816 | } | ||
817 | |||
818 | post_div = vco / target_clock; | ||
819 | tmp = vco % target_clock; | ||
820 | |||
821 | if (pll->flags & RADEON_PLL_PREFER_MINM_OVER_MAXP) { | ||
822 | if (tmp) | ||
823 | post_div++; | ||
824 | } else { | ||
825 | if (!tmp) | ||
826 | post_div--; | ||
827 | } | ||
828 | |||
829 | return post_div; | ||
830 | } | ||
831 | |||
832 | #define MAX_TOLERANCE 10 | ||
833 | |||
834 | void radeon_compute_pll_avivo(struct radeon_pll *pll, | ||
835 | u32 freq, | ||
836 | u32 *dot_clock_p, | ||
837 | u32 *fb_div_p, | ||
838 | u32 *frac_fb_div_p, | ||
839 | u32 *ref_div_p, | ||
840 | u32 *post_div_p) | ||
841 | { | ||
842 | u32 target_clock = freq / 10; | ||
843 | u32 post_div = avivo_get_post_div(pll, target_clock); | ||
844 | u32 ref_div = pll->min_ref_div; | ||
845 | u32 fb_div = 0, frac_fb_div = 0, tmp; | ||
846 | |||
847 | if (pll->flags & RADEON_PLL_USE_REF_DIV) | ||
848 | ref_div = pll->reference_div; | ||
849 | |||
850 | if (pll->flags & RADEON_PLL_USE_FRAC_FB_DIV) { | ||
851 | avivo_get_fb_div(pll, target_clock, post_div, ref_div, &fb_div, &frac_fb_div); | ||
852 | frac_fb_div = (100 * frac_fb_div) / pll->reference_freq; | ||
853 | if (frac_fb_div >= 5) { | ||
854 | frac_fb_div -= 5; | ||
855 | frac_fb_div = frac_fb_div / 10; | ||
856 | frac_fb_div++; | ||
857 | } | ||
858 | if (frac_fb_div >= 10) { | ||
859 | fb_div++; | ||
860 | frac_fb_div = 0; | ||
861 | } | ||
862 | } else { | ||
863 | while (ref_div <= pll->max_ref_div) { | ||
864 | avivo_get_fb_div(pll, target_clock, post_div, ref_div, | ||
865 | &fb_div, &frac_fb_div); | ||
866 | if (frac_fb_div >= (pll->reference_freq / 2)) | ||
867 | fb_div++; | ||
868 | frac_fb_div = 0; | ||
869 | tmp = (pll->reference_freq * fb_div) / (post_div * ref_div); | ||
870 | tmp = (tmp * 10000) / target_clock; | ||
871 | |||
872 | if (tmp > (10000 + MAX_TOLERANCE)) | ||
873 | ref_div++; | ||
874 | else if (tmp >= (10000 - MAX_TOLERANCE)) | ||
875 | break; | ||
876 | else | ||
877 | ref_div++; | ||
878 | } | ||
879 | } | ||
880 | |||
881 | *dot_clock_p = ((pll->reference_freq * fb_div * 10) + (pll->reference_freq * frac_fb_div)) / | ||
882 | (ref_div * post_div * 10); | ||
883 | *fb_div_p = fb_div; | ||
884 | *frac_fb_div_p = frac_fb_div; | ||
885 | *ref_div_p = ref_div; | ||
886 | *post_div_p = post_div; | ||
887 | DRM_DEBUG_KMS("%d, pll dividers - fb: %d.%d ref: %d, post %d\n", | ||
888 | *dot_clock_p, fb_div, frac_fb_div, ref_div, post_div); | ||
889 | } | ||
890 | |||
891 | /* pre-avivo */ | ||
713 | static inline uint32_t radeon_div(uint64_t n, uint32_t d) | 892 | static inline uint32_t radeon_div(uint64_t n, uint32_t d) |
714 | { | 893 | { |
715 | uint64_t mod; | 894 | uint64_t mod; |
@@ -720,13 +899,13 @@ static inline uint32_t radeon_div(uint64_t n, uint32_t d) | |||
720 | return n; | 899 | return n; |
721 | } | 900 | } |
722 | 901 | ||
723 | void radeon_compute_pll(struct radeon_pll *pll, | 902 | void radeon_compute_pll_legacy(struct radeon_pll *pll, |
724 | uint64_t freq, | 903 | uint64_t freq, |
725 | uint32_t *dot_clock_p, | 904 | uint32_t *dot_clock_p, |
726 | uint32_t *fb_div_p, | 905 | uint32_t *fb_div_p, |
727 | uint32_t *frac_fb_div_p, | 906 | uint32_t *frac_fb_div_p, |
728 | uint32_t *ref_div_p, | 907 | uint32_t *ref_div_p, |
729 | uint32_t *post_div_p) | 908 | uint32_t *post_div_p) |
730 | { | 909 | { |
731 | uint32_t min_ref_div = pll->min_ref_div; | 910 | uint32_t min_ref_div = pll->min_ref_div; |
732 | uint32_t max_ref_div = pll->max_ref_div; | 911 | uint32_t max_ref_div = pll->max_ref_div; |
@@ -756,6 +935,9 @@ void radeon_compute_pll(struct radeon_pll *pll, | |||
756 | pll_out_max = pll->pll_out_max; | 935 | pll_out_max = pll->pll_out_max; |
757 | } | 936 | } |
758 | 937 | ||
938 | if (pll_out_min > 64800) | ||
939 | pll_out_min = 64800; | ||
940 | |||
759 | if (pll->flags & RADEON_PLL_USE_REF_DIV) | 941 | if (pll->flags & RADEON_PLL_USE_REF_DIV) |
760 | min_ref_div = max_ref_div = pll->reference_div; | 942 | min_ref_div = max_ref_div = pll->reference_div; |
761 | else { | 943 | else { |
@@ -779,7 +961,7 @@ void radeon_compute_pll(struct radeon_pll *pll, | |||
779 | max_fractional_feed_div = pll->max_frac_feedback_div; | 961 | max_fractional_feed_div = pll->max_frac_feedback_div; |
780 | } | 962 | } |
781 | 963 | ||
782 | for (post_div = max_post_div; post_div >= min_post_div; --post_div) { | 964 | for (post_div = min_post_div; post_div <= max_post_div; ++post_div) { |
783 | uint32_t ref_div; | 965 | uint32_t ref_div; |
784 | 966 | ||
785 | if ((pll->flags & RADEON_PLL_NO_ODD_POST_DIV) && (post_div & 1)) | 967 | if ((pll->flags & RADEON_PLL_NO_ODD_POST_DIV) && (post_div & 1)) |
@@ -895,6 +1077,10 @@ void radeon_compute_pll(struct radeon_pll *pll, | |||
895 | *frac_fb_div_p = best_frac_feedback_div; | 1077 | *frac_fb_div_p = best_frac_feedback_div; |
896 | *ref_div_p = best_ref_div; | 1078 | *ref_div_p = best_ref_div; |
897 | *post_div_p = best_post_div; | 1079 | *post_div_p = best_post_div; |
1080 | DRM_DEBUG_KMS("%d %d, pll dividers - fb: %d.%d ref: %d, post %d\n", | ||
1081 | freq, best_freq / 1000, best_feedback_div, best_frac_feedback_div, | ||
1082 | best_ref_div, best_post_div); | ||
1083 | |||
898 | } | 1084 | } |
899 | 1085 | ||
900 | static void radeon_user_framebuffer_destroy(struct drm_framebuffer *fb) | 1086 | static void radeon_user_framebuffer_destroy(struct drm_framebuffer *fb) |
@@ -1111,7 +1297,10 @@ int radeon_modeset_init(struct radeon_device *rdev) | |||
1111 | 1297 | ||
1112 | rdev->ddev->mode_config.funcs = (void *)&radeon_mode_funcs; | 1298 | rdev->ddev->mode_config.funcs = (void *)&radeon_mode_funcs; |
1113 | 1299 | ||
1114 | if (ASIC_IS_AVIVO(rdev)) { | 1300 | if (ASIC_IS_DCE5(rdev)) { |
1301 | rdev->ddev->mode_config.max_width = 16384; | ||
1302 | rdev->ddev->mode_config.max_height = 16384; | ||
1303 | } else if (ASIC_IS_AVIVO(rdev)) { | ||
1115 | rdev->ddev->mode_config.max_width = 8192; | 1304 | rdev->ddev->mode_config.max_width = 8192; |
1116 | rdev->ddev->mode_config.max_height = 8192; | 1305 | rdev->ddev->mode_config.max_height = 8192; |
1117 | } else { | 1306 | } else { |
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c index a92d2a5cea90..8a0df3d2a4c3 100644 --- a/drivers/gpu/drm/radeon/radeon_drv.c +++ b/drivers/gpu/drm/radeon/radeon_drv.c | |||
@@ -48,7 +48,7 @@ | |||
48 | * - 2.5.0 - add get accel 2 to work around ddx breakage for evergreen | 48 | * - 2.5.0 - add get accel 2 to work around ddx breakage for evergreen |
49 | * - 2.6.0 - add tiling config query (r6xx+), add initial HiZ support (r300->r500) | 49 | * - 2.6.0 - add tiling config query (r6xx+), add initial HiZ support (r300->r500) |
50 | * 2.7.0 - fixups for r600 2D tiling support. (no external ABI change), add eg dyn gpr regs | 50 | * 2.7.0 - fixups for r600 2D tiling support. (no external ABI change), add eg dyn gpr regs |
51 | * 2.8.0 - pageflip support | 51 | * 2.8.0 - pageflip support, r500 US_FORMAT regs. r500 ARGB2101010 colorbuf, r300->r500 CMASK, clock crystal query |
52 | */ | 52 | */ |
53 | #define KMS_DRIVER_MAJOR 2 | 53 | #define KMS_DRIVER_MAJOR 2 |
54 | #define KMS_DRIVER_MINOR 8 | 54 | #define KMS_DRIVER_MINOR 8 |
@@ -84,6 +84,16 @@ extern int radeon_get_crtc_scanoutpos(struct drm_device *dev, int crtc, | |||
84 | extern struct drm_ioctl_desc radeon_ioctls_kms[]; | 84 | extern struct drm_ioctl_desc radeon_ioctls_kms[]; |
85 | extern int radeon_max_kms_ioctl; | 85 | extern int radeon_max_kms_ioctl; |
86 | int radeon_mmap(struct file *filp, struct vm_area_struct *vma); | 86 | int radeon_mmap(struct file *filp, struct vm_area_struct *vma); |
87 | int radeon_mode_dumb_mmap(struct drm_file *filp, | ||
88 | struct drm_device *dev, | ||
89 | uint32_t handle, uint64_t *offset_p); | ||
90 | int radeon_mode_dumb_create(struct drm_file *file_priv, | ||
91 | struct drm_device *dev, | ||
92 | struct drm_mode_create_dumb *args); | ||
93 | int radeon_mode_dumb_destroy(struct drm_file *file_priv, | ||
94 | struct drm_device *dev, | ||
95 | uint32_t handle); | ||
96 | |||
87 | #if defined(CONFIG_DEBUG_FS) | 97 | #if defined(CONFIG_DEBUG_FS) |
88 | int radeon_debugfs_init(struct drm_minor *minor); | 98 | int radeon_debugfs_init(struct drm_minor *minor); |
89 | void radeon_debugfs_cleanup(struct drm_minor *minor); | 99 | void radeon_debugfs_cleanup(struct drm_minor *minor); |
@@ -104,6 +114,7 @@ int radeon_tv = 1; | |||
104 | int radeon_audio = 1; | 114 | int radeon_audio = 1; |
105 | int radeon_disp_priority = 0; | 115 | int radeon_disp_priority = 0; |
106 | int radeon_hw_i2c = 0; | 116 | int radeon_hw_i2c = 0; |
117 | int radeon_pcie_gen2 = 0; | ||
107 | 118 | ||
108 | MODULE_PARM_DESC(no_wb, "Disable AGP writeback for scratch registers"); | 119 | MODULE_PARM_DESC(no_wb, "Disable AGP writeback for scratch registers"); |
109 | module_param_named(no_wb, radeon_no_wb, int, 0444); | 120 | module_param_named(no_wb, radeon_no_wb, int, 0444); |
@@ -147,6 +158,9 @@ module_param_named(disp_priority, radeon_disp_priority, int, 0444); | |||
147 | MODULE_PARM_DESC(hw_i2c, "hw i2c engine enable (0 = disable)"); | 158 | MODULE_PARM_DESC(hw_i2c, "hw i2c engine enable (0 = disable)"); |
148 | module_param_named(hw_i2c, radeon_hw_i2c, int, 0444); | 159 | module_param_named(hw_i2c, radeon_hw_i2c, int, 0444); |
149 | 160 | ||
161 | MODULE_PARM_DESC(pcie_gen2, "PCIE Gen2 mode (1 = enable)"); | ||
162 | module_param_named(pcie_gen2, radeon_pcie_gen2, int, 0444); | ||
163 | |||
150 | static int radeon_suspend(struct drm_device *dev, pm_message_t state) | 164 | static int radeon_suspend(struct drm_device *dev, pm_message_t state) |
151 | { | 165 | { |
152 | drm_radeon_private_t *dev_priv = dev->dev_private; | 166 | drm_radeon_private_t *dev_priv = dev->dev_private; |
@@ -224,11 +238,6 @@ static struct drm_driver driver_old = { | |||
224 | .llseek = noop_llseek, | 238 | .llseek = noop_llseek, |
225 | }, | 239 | }, |
226 | 240 | ||
227 | .pci_driver = { | ||
228 | .name = DRIVER_NAME, | ||
229 | .id_table = pciidlist, | ||
230 | }, | ||
231 | |||
232 | .name = DRIVER_NAME, | 241 | .name = DRIVER_NAME, |
233 | .desc = DRIVER_DESC, | 242 | .desc = DRIVER_DESC, |
234 | .date = DRIVER_DATE, | 243 | .date = DRIVER_DATE, |
@@ -239,9 +248,28 @@ static struct drm_driver driver_old = { | |||
239 | 248 | ||
240 | static struct drm_driver kms_driver; | 249 | static struct drm_driver kms_driver; |
241 | 250 | ||
251 | static void radeon_kick_out_firmware_fb(struct pci_dev *pdev) | ||
252 | { | ||
253 | struct apertures_struct *ap; | ||
254 | bool primary = false; | ||
255 | |||
256 | ap = alloc_apertures(1); | ||
257 | ap->ranges[0].base = pci_resource_start(pdev, 0); | ||
258 | ap->ranges[0].size = pci_resource_len(pdev, 0); | ||
259 | |||
260 | #ifdef CONFIG_X86 | ||
261 | primary = pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW; | ||
262 | #endif | ||
263 | remove_conflicting_framebuffers(ap, "radeondrmfb", primary); | ||
264 | kfree(ap); | ||
265 | } | ||
266 | |||
242 | static int __devinit | 267 | static int __devinit |
243 | radeon_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | 268 | radeon_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) |
244 | { | 269 | { |
270 | /* Get rid of things like offb */ | ||
271 | radeon_kick_out_firmware_fb(pdev); | ||
272 | |||
245 | return drm_get_pci_dev(pdev, ent, &kms_driver); | 273 | return drm_get_pci_dev(pdev, ent, &kms_driver); |
246 | } | 274 | } |
247 | 275 | ||
@@ -299,6 +327,9 @@ static struct drm_driver kms_driver = { | |||
299 | .gem_init_object = radeon_gem_object_init, | 327 | .gem_init_object = radeon_gem_object_init, |
300 | .gem_free_object = radeon_gem_object_free, | 328 | .gem_free_object = radeon_gem_object_free, |
301 | .dma_ioctl = radeon_dma_ioctl_kms, | 329 | .dma_ioctl = radeon_dma_ioctl_kms, |
330 | .dumb_create = radeon_mode_dumb_create, | ||
331 | .dumb_map_offset = radeon_mode_dumb_mmap, | ||
332 | .dumb_destroy = radeon_mode_dumb_destroy, | ||
302 | .fops = { | 333 | .fops = { |
303 | .owner = THIS_MODULE, | 334 | .owner = THIS_MODULE, |
304 | .open = drm_open, | 335 | .open = drm_open, |
@@ -313,15 +344,6 @@ static struct drm_driver kms_driver = { | |||
313 | #endif | 344 | #endif |
314 | }, | 345 | }, |
315 | 346 | ||
316 | .pci_driver = { | ||
317 | .name = DRIVER_NAME, | ||
318 | .id_table = pciidlist, | ||
319 | .probe = radeon_pci_probe, | ||
320 | .remove = radeon_pci_remove, | ||
321 | .suspend = radeon_pci_suspend, | ||
322 | .resume = radeon_pci_resume, | ||
323 | }, | ||
324 | |||
325 | .name = DRIVER_NAME, | 347 | .name = DRIVER_NAME, |
326 | .desc = DRIVER_DESC, | 348 | .desc = DRIVER_DESC, |
327 | .date = DRIVER_DATE, | 349 | .date = DRIVER_DATE, |
@@ -331,15 +353,32 @@ static struct drm_driver kms_driver = { | |||
331 | }; | 353 | }; |
332 | 354 | ||
333 | static struct drm_driver *driver; | 355 | static struct drm_driver *driver; |
356 | static struct pci_driver *pdriver; | ||
357 | |||
358 | static struct pci_driver radeon_pci_driver = { | ||
359 | .name = DRIVER_NAME, | ||
360 | .id_table = pciidlist, | ||
361 | }; | ||
362 | |||
363 | static struct pci_driver radeon_kms_pci_driver = { | ||
364 | .name = DRIVER_NAME, | ||
365 | .id_table = pciidlist, | ||
366 | .probe = radeon_pci_probe, | ||
367 | .remove = radeon_pci_remove, | ||
368 | .suspend = radeon_pci_suspend, | ||
369 | .resume = radeon_pci_resume, | ||
370 | }; | ||
334 | 371 | ||
335 | static int __init radeon_init(void) | 372 | static int __init radeon_init(void) |
336 | { | 373 | { |
337 | driver = &driver_old; | 374 | driver = &driver_old; |
375 | pdriver = &radeon_pci_driver; | ||
338 | driver->num_ioctls = radeon_max_ioctl; | 376 | driver->num_ioctls = radeon_max_ioctl; |
339 | #ifdef CONFIG_VGA_CONSOLE | 377 | #ifdef CONFIG_VGA_CONSOLE |
340 | if (vgacon_text_force() && radeon_modeset == -1) { | 378 | if (vgacon_text_force() && radeon_modeset == -1) { |
341 | DRM_INFO("VGACON disable radeon kernel modesetting.\n"); | 379 | DRM_INFO("VGACON disable radeon kernel modesetting.\n"); |
342 | driver = &driver_old; | 380 | driver = &driver_old; |
381 | pdriver = &radeon_pci_driver; | ||
343 | driver->driver_features &= ~DRIVER_MODESET; | 382 | driver->driver_features &= ~DRIVER_MODESET; |
344 | radeon_modeset = 0; | 383 | radeon_modeset = 0; |
345 | } | 384 | } |
@@ -357,18 +396,19 @@ static int __init radeon_init(void) | |||
357 | if (radeon_modeset == 1) { | 396 | if (radeon_modeset == 1) { |
358 | DRM_INFO("radeon kernel modesetting enabled.\n"); | 397 | DRM_INFO("radeon kernel modesetting enabled.\n"); |
359 | driver = &kms_driver; | 398 | driver = &kms_driver; |
399 | pdriver = &radeon_kms_pci_driver; | ||
360 | driver->driver_features |= DRIVER_MODESET; | 400 | driver->driver_features |= DRIVER_MODESET; |
361 | driver->num_ioctls = radeon_max_kms_ioctl; | 401 | driver->num_ioctls = radeon_max_kms_ioctl; |
362 | radeon_register_atpx_handler(); | 402 | radeon_register_atpx_handler(); |
363 | } | 403 | } |
364 | /* if the vga console setting is enabled still | 404 | /* if the vga console setting is enabled still |
365 | * let modprobe override it */ | 405 | * let modprobe override it */ |
366 | return drm_init(driver); | 406 | return drm_pci_init(driver, pdriver); |
367 | } | 407 | } |
368 | 408 | ||
369 | static void __exit radeon_exit(void) | 409 | static void __exit radeon_exit(void) |
370 | { | 410 | { |
371 | drm_exit(driver); | 411 | drm_pci_exit(driver, pdriver); |
372 | radeon_unregister_atpx_handler(); | 412 | radeon_unregister_atpx_handler(); |
373 | } | 413 | } |
374 | 414 | ||
diff --git a/drivers/gpu/drm/radeon/radeon_encoders.c b/drivers/gpu/drm/radeon/radeon_encoders.c index e4e64a80b58d..d4a542247618 100644 --- a/drivers/gpu/drm/radeon/radeon_encoders.c +++ b/drivers/gpu/drm/radeon/radeon_encoders.c | |||
@@ -641,7 +641,7 @@ atombios_get_encoder_mode(struct drm_encoder *encoder) | |||
641 | switch (connector->connector_type) { | 641 | switch (connector->connector_type) { |
642 | case DRM_MODE_CONNECTOR_DVII: | 642 | case DRM_MODE_CONNECTOR_DVII: |
643 | case DRM_MODE_CONNECTOR_HDMIB: /* HDMI-B is basically DL-DVI; analog works fine */ | 643 | case DRM_MODE_CONNECTOR_HDMIB: /* HDMI-B is basically DL-DVI; analog works fine */ |
644 | if (drm_detect_hdmi_monitor(radeon_connector->edid)) { | 644 | if (drm_detect_monitor_audio(radeon_connector->edid) && radeon_audio) { |
645 | /* fix me */ | 645 | /* fix me */ |
646 | if (ASIC_IS_DCE4(rdev)) | 646 | if (ASIC_IS_DCE4(rdev)) |
647 | return ATOM_ENCODER_MODE_DVI; | 647 | return ATOM_ENCODER_MODE_DVI; |
@@ -655,7 +655,7 @@ atombios_get_encoder_mode(struct drm_encoder *encoder) | |||
655 | case DRM_MODE_CONNECTOR_DVID: | 655 | case DRM_MODE_CONNECTOR_DVID: |
656 | case DRM_MODE_CONNECTOR_HDMIA: | 656 | case DRM_MODE_CONNECTOR_HDMIA: |
657 | default: | 657 | default: |
658 | if (drm_detect_hdmi_monitor(radeon_connector->edid)) { | 658 | if (drm_detect_monitor_audio(radeon_connector->edid) && radeon_audio) { |
659 | /* fix me */ | 659 | /* fix me */ |
660 | if (ASIC_IS_DCE4(rdev)) | 660 | if (ASIC_IS_DCE4(rdev)) |
661 | return ATOM_ENCODER_MODE_DVI; | 661 | return ATOM_ENCODER_MODE_DVI; |
@@ -673,7 +673,7 @@ atombios_get_encoder_mode(struct drm_encoder *encoder) | |||
673 | if ((dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) || | 673 | if ((dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) || |
674 | (dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_eDP)) | 674 | (dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_eDP)) |
675 | return ATOM_ENCODER_MODE_DP; | 675 | return ATOM_ENCODER_MODE_DP; |
676 | else if (drm_detect_hdmi_monitor(radeon_connector->edid)) { | 676 | else if (drm_detect_monitor_audio(radeon_connector->edid) && radeon_audio) { |
677 | /* fix me */ | 677 | /* fix me */ |
678 | if (ASIC_IS_DCE4(rdev)) | 678 | if (ASIC_IS_DCE4(rdev)) |
679 | return ATOM_ENCODER_MODE_DVI; | 679 | return ATOM_ENCODER_MODE_DVI; |
@@ -712,7 +712,7 @@ atombios_get_encoder_mode(struct drm_encoder *encoder) | |||
712 | * - 2 DIG encoder blocks. | 712 | * - 2 DIG encoder blocks. |
713 | * DIG1/2 can drive UNIPHY0/1/2 link A or link B | 713 | * DIG1/2 can drive UNIPHY0/1/2 link A or link B |
714 | * | 714 | * |
715 | * DCE 4.0 | 715 | * DCE 4.0/5.0 |
716 | * - 3 DIG transmitter blocks UNIPHY0/1/2 (links A and B). | 716 | * - 3 DIG transmitter blocks UNIPHY0/1/2 (links A and B). |
717 | * Supports up to 6 digital outputs | 717 | * Supports up to 6 digital outputs |
718 | * - 6 DIG encoder blocks. | 718 | * - 6 DIG encoder blocks. |
@@ -743,6 +743,7 @@ union dig_encoder_control { | |||
743 | DIG_ENCODER_CONTROL_PS_ALLOCATION v1; | 743 | DIG_ENCODER_CONTROL_PS_ALLOCATION v1; |
744 | DIG_ENCODER_CONTROL_PARAMETERS_V2 v2; | 744 | DIG_ENCODER_CONTROL_PARAMETERS_V2 v2; |
745 | DIG_ENCODER_CONTROL_PARAMETERS_V3 v3; | 745 | DIG_ENCODER_CONTROL_PARAMETERS_V3 v3; |
746 | DIG_ENCODER_CONTROL_PARAMETERS_V4 v4; | ||
746 | }; | 747 | }; |
747 | 748 | ||
748 | void | 749 | void |
@@ -758,6 +759,7 @@ atombios_dig_encoder_setup(struct drm_encoder *encoder, int action) | |||
758 | uint8_t frev, crev; | 759 | uint8_t frev, crev; |
759 | int dp_clock = 0; | 760 | int dp_clock = 0; |
760 | int dp_lane_count = 0; | 761 | int dp_lane_count = 0; |
762 | int hpd_id = RADEON_HPD_NONE; | ||
761 | 763 | ||
762 | if (connector) { | 764 | if (connector) { |
763 | struct radeon_connector *radeon_connector = to_radeon_connector(connector); | 765 | struct radeon_connector *radeon_connector = to_radeon_connector(connector); |
@@ -766,6 +768,7 @@ atombios_dig_encoder_setup(struct drm_encoder *encoder, int action) | |||
766 | 768 | ||
767 | dp_clock = dig_connector->dp_clock; | 769 | dp_clock = dig_connector->dp_clock; |
768 | dp_lane_count = dig_connector->dp_lane_count; | 770 | dp_lane_count = dig_connector->dp_lane_count; |
771 | hpd_id = radeon_connector->hpd.hpd; | ||
769 | } | 772 | } |
770 | 773 | ||
771 | /* no dig encoder assigned */ | 774 | /* no dig encoder assigned */ |
@@ -790,19 +793,36 @@ atombios_dig_encoder_setup(struct drm_encoder *encoder, int action) | |||
790 | args.v1.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10); | 793 | args.v1.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10); |
791 | args.v1.ucEncoderMode = atombios_get_encoder_mode(encoder); | 794 | args.v1.ucEncoderMode = atombios_get_encoder_mode(encoder); |
792 | 795 | ||
793 | if (args.v1.ucEncoderMode == ATOM_ENCODER_MODE_DP) { | 796 | if ((args.v1.ucEncoderMode == ATOM_ENCODER_MODE_DP) || |
794 | if (dp_clock == 270000) | 797 | (args.v1.ucEncoderMode == ATOM_ENCODER_MODE_DP_MST)) |
795 | args.v1.ucConfig |= ATOM_ENCODER_CONFIG_DPLINKRATE_2_70GHZ; | ||
796 | args.v1.ucLaneNum = dp_lane_count; | 798 | args.v1.ucLaneNum = dp_lane_count; |
797 | } else if (radeon_encoder->pixel_clock > 165000) | 799 | else if (radeon_encoder->pixel_clock > 165000) |
798 | args.v1.ucLaneNum = 8; | 800 | args.v1.ucLaneNum = 8; |
799 | else | 801 | else |
800 | args.v1.ucLaneNum = 4; | 802 | args.v1.ucLaneNum = 4; |
801 | 803 | ||
802 | if (ASIC_IS_DCE4(rdev)) { | 804 | if (ASIC_IS_DCE5(rdev)) { |
805 | if ((args.v1.ucEncoderMode == ATOM_ENCODER_MODE_DP) || | ||
806 | (args.v1.ucEncoderMode == ATOM_ENCODER_MODE_DP_MST)) { | ||
807 | if (dp_clock == 270000) | ||
808 | args.v1.ucConfig |= ATOM_ENCODER_CONFIG_V4_DPLINKRATE_2_70GHZ; | ||
809 | else if (dp_clock == 540000) | ||
810 | args.v1.ucConfig |= ATOM_ENCODER_CONFIG_V4_DPLINKRATE_5_40GHZ; | ||
811 | } | ||
812 | args.v4.acConfig.ucDigSel = dig->dig_encoder; | ||
813 | args.v4.ucBitPerColor = PANEL_8BIT_PER_COLOR; | ||
814 | if (hpd_id == RADEON_HPD_NONE) | ||
815 | args.v4.ucHPD_ID = 0; | ||
816 | else | ||
817 | args.v4.ucHPD_ID = hpd_id + 1; | ||
818 | } else if (ASIC_IS_DCE4(rdev)) { | ||
819 | if ((args.v1.ucEncoderMode == ATOM_ENCODER_MODE_DP) && (dp_clock == 270000)) | ||
820 | args.v1.ucConfig |= ATOM_ENCODER_CONFIG_V3_DPLINKRATE_2_70GHZ; | ||
803 | args.v3.acConfig.ucDigSel = dig->dig_encoder; | 821 | args.v3.acConfig.ucDigSel = dig->dig_encoder; |
804 | args.v3.ucBitPerColor = PANEL_8BIT_PER_COLOR; | 822 | args.v3.ucBitPerColor = PANEL_8BIT_PER_COLOR; |
805 | } else { | 823 | } else { |
824 | if ((args.v1.ucEncoderMode == ATOM_ENCODER_MODE_DP) && (dp_clock == 270000)) | ||
825 | args.v1.ucConfig |= ATOM_ENCODER_CONFIG_DPLINKRATE_2_70GHZ; | ||
806 | switch (radeon_encoder->encoder_id) { | 826 | switch (radeon_encoder->encoder_id) { |
807 | case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: | 827 | case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: |
808 | args.v1.ucConfig = ATOM_ENCODER_CONFIG_V2_TRANSMITTER1; | 828 | args.v1.ucConfig = ATOM_ENCODER_CONFIG_V2_TRANSMITTER1; |
@@ -829,6 +849,7 @@ union dig_transmitter_control { | |||
829 | DIG_TRANSMITTER_CONTROL_PS_ALLOCATION v1; | 849 | DIG_TRANSMITTER_CONTROL_PS_ALLOCATION v1; |
830 | DIG_TRANSMITTER_CONTROL_PARAMETERS_V2 v2; | 850 | DIG_TRANSMITTER_CONTROL_PARAMETERS_V2 v2; |
831 | DIG_TRANSMITTER_CONTROL_PARAMETERS_V3 v3; | 851 | DIG_TRANSMITTER_CONTROL_PARAMETERS_V3 v3; |
852 | DIG_TRANSMITTER_CONTROL_PARAMETERS_V4 v4; | ||
832 | }; | 853 | }; |
833 | 854 | ||
834 | void | 855 | void |
@@ -910,15 +931,9 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t | |||
910 | else | 931 | else |
911 | args.v3.ucLaneNum = 4; | 932 | args.v3.ucLaneNum = 4; |
912 | 933 | ||
913 | if (ASIC_IS_DCE41(rdev)) { | 934 | if (dig->linkb) { |
914 | args.v3.acConfig.ucEncoderSel = dig->dig_encoder; | 935 | args.v3.acConfig.ucLinkSel = 1; |
915 | if (dig->linkb) | 936 | args.v3.acConfig.ucEncoderSel = 1; |
916 | args.v3.acConfig.ucLinkSel = 1; | ||
917 | } else { | ||
918 | if (dig->linkb) { | ||
919 | args.v3.acConfig.ucLinkSel = 1; | ||
920 | args.v3.acConfig.ucEncoderSel = 1; | ||
921 | } | ||
922 | } | 937 | } |
923 | 938 | ||
924 | /* Select the PLL for the PHY | 939 | /* Select the PLL for the PHY |
@@ -929,10 +944,18 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t | |||
929 | struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc); | 944 | struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc); |
930 | pll_id = radeon_crtc->pll_id; | 945 | pll_id = radeon_crtc->pll_id; |
931 | } | 946 | } |
932 | if (is_dp && rdev->clock.dp_extclk) | 947 | |
933 | args.v3.acConfig.ucRefClkSource = 2; /* external src */ | 948 | if (ASIC_IS_DCE5(rdev)) { |
934 | else | 949 | if (is_dp && rdev->clock.dp_extclk) |
935 | args.v3.acConfig.ucRefClkSource = pll_id; | 950 | args.v4.acConfig.ucRefClkSource = 3; /* external src */ |
951 | else | ||
952 | args.v4.acConfig.ucRefClkSource = pll_id; | ||
953 | } else { | ||
954 | if (is_dp && rdev->clock.dp_extclk) | ||
955 | args.v3.acConfig.ucRefClkSource = 2; /* external src */ | ||
956 | else | ||
957 | args.v3.acConfig.ucRefClkSource = pll_id; | ||
958 | } | ||
936 | 959 | ||
937 | switch (radeon_encoder->encoder_id) { | 960 | switch (radeon_encoder->encoder_id) { |
938 | case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: | 961 | case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: |
@@ -1040,7 +1063,7 @@ atombios_set_edp_panel_power(struct drm_connector *connector, int action) | |||
1040 | if (!ASIC_IS_DCE4(rdev)) | 1063 | if (!ASIC_IS_DCE4(rdev)) |
1041 | return; | 1064 | return; |
1042 | 1065 | ||
1043 | if ((action != ATOM_TRANSMITTER_ACTION_POWER_ON) || | 1066 | if ((action != ATOM_TRANSMITTER_ACTION_POWER_ON) && |
1044 | (action != ATOM_TRANSMITTER_ACTION_POWER_OFF)) | 1067 | (action != ATOM_TRANSMITTER_ACTION_POWER_OFF)) |
1045 | return; | 1068 | return; |
1046 | 1069 | ||
@@ -1204,6 +1227,8 @@ radeon_atom_encoder_dpms(struct drm_encoder *encoder, int mode) | |||
1204 | DISPLAY_DEVICE_OUTPUT_CONTROL_PS_ALLOCATION args; | 1227 | DISPLAY_DEVICE_OUTPUT_CONTROL_PS_ALLOCATION args; |
1205 | int index = 0; | 1228 | int index = 0; |
1206 | bool is_dig = false; | 1229 | bool is_dig = false; |
1230 | bool is_dce5_dac = false; | ||
1231 | bool is_dce5_dvo = false; | ||
1207 | 1232 | ||
1208 | memset(&args, 0, sizeof(args)); | 1233 | memset(&args, 0, sizeof(args)); |
1209 | 1234 | ||
@@ -1226,7 +1251,9 @@ radeon_atom_encoder_dpms(struct drm_encoder *encoder, int mode) | |||
1226 | index = GetIndexIntoMasterTable(COMMAND, DVOOutputControl); | 1251 | index = GetIndexIntoMasterTable(COMMAND, DVOOutputControl); |
1227 | break; | 1252 | break; |
1228 | case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1: | 1253 | case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1: |
1229 | if (ASIC_IS_DCE3(rdev)) | 1254 | if (ASIC_IS_DCE5(rdev)) |
1255 | is_dce5_dvo = true; | ||
1256 | else if (ASIC_IS_DCE3(rdev)) | ||
1230 | is_dig = true; | 1257 | is_dig = true; |
1231 | else | 1258 | else |
1232 | index = GetIndexIntoMasterTable(COMMAND, DVOOutputControl); | 1259 | index = GetIndexIntoMasterTable(COMMAND, DVOOutputControl); |
@@ -1242,12 +1269,16 @@ radeon_atom_encoder_dpms(struct drm_encoder *encoder, int mode) | |||
1242 | break; | 1269 | break; |
1243 | case ENCODER_OBJECT_ID_INTERNAL_DAC1: | 1270 | case ENCODER_OBJECT_ID_INTERNAL_DAC1: |
1244 | case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1: | 1271 | case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1: |
1245 | if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT)) | 1272 | if (ASIC_IS_DCE5(rdev)) |
1246 | index = GetIndexIntoMasterTable(COMMAND, TV1OutputControl); | 1273 | is_dce5_dac = true; |
1247 | else if (radeon_encoder->active_device & (ATOM_DEVICE_CV_SUPPORT)) | 1274 | else { |
1248 | index = GetIndexIntoMasterTable(COMMAND, CV1OutputControl); | 1275 | if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT)) |
1249 | else | 1276 | index = GetIndexIntoMasterTable(COMMAND, TV1OutputControl); |
1250 | index = GetIndexIntoMasterTable(COMMAND, DAC1OutputControl); | 1277 | else if (radeon_encoder->active_device & (ATOM_DEVICE_CV_SUPPORT)) |
1278 | index = GetIndexIntoMasterTable(COMMAND, CV1OutputControl); | ||
1279 | else | ||
1280 | index = GetIndexIntoMasterTable(COMMAND, DAC1OutputControl); | ||
1281 | } | ||
1251 | break; | 1282 | break; |
1252 | case ENCODER_OBJECT_ID_INTERNAL_DAC2: | 1283 | case ENCODER_OBJECT_ID_INTERNAL_DAC2: |
1253 | case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2: | 1284 | case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2: |
@@ -1306,6 +1337,28 @@ radeon_atom_encoder_dpms(struct drm_encoder *encoder, int mode) | |||
1306 | atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_LCD_BLOFF, 0, 0); | 1337 | atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_LCD_BLOFF, 0, 0); |
1307 | break; | 1338 | break; |
1308 | } | 1339 | } |
1340 | } else if (is_dce5_dac) { | ||
1341 | switch (mode) { | ||
1342 | case DRM_MODE_DPMS_ON: | ||
1343 | atombios_dac_setup(encoder, ATOM_ENABLE); | ||
1344 | break; | ||
1345 | case DRM_MODE_DPMS_STANDBY: | ||
1346 | case DRM_MODE_DPMS_SUSPEND: | ||
1347 | case DRM_MODE_DPMS_OFF: | ||
1348 | atombios_dac_setup(encoder, ATOM_DISABLE); | ||
1349 | break; | ||
1350 | } | ||
1351 | } else if (is_dce5_dvo) { | ||
1352 | switch (mode) { | ||
1353 | case DRM_MODE_DPMS_ON: | ||
1354 | atombios_dvo_setup(encoder, ATOM_ENABLE); | ||
1355 | break; | ||
1356 | case DRM_MODE_DPMS_STANDBY: | ||
1357 | case DRM_MODE_DPMS_SUSPEND: | ||
1358 | case DRM_MODE_DPMS_OFF: | ||
1359 | atombios_dvo_setup(encoder, ATOM_DISABLE); | ||
1360 | break; | ||
1361 | } | ||
1309 | } else { | 1362 | } else { |
1310 | switch (mode) { | 1363 | switch (mode) { |
1311 | case DRM_MODE_DPMS_ON: | 1364 | case DRM_MODE_DPMS_ON: |
@@ -1335,7 +1388,7 @@ radeon_atom_encoder_dpms(struct drm_encoder *encoder, int mode) | |||
1335 | switch (mode) { | 1388 | switch (mode) { |
1336 | case DRM_MODE_DPMS_ON: | 1389 | case DRM_MODE_DPMS_ON: |
1337 | default: | 1390 | default: |
1338 | if (ASIC_IS_DCE41(rdev) && (rdev->flags & RADEON_IS_IGP)) | 1391 | if (ASIC_IS_DCE41(rdev)) |
1339 | action = EXTERNAL_ENCODER_ACTION_V3_ENABLE_OUTPUT; | 1392 | action = EXTERNAL_ENCODER_ACTION_V3_ENABLE_OUTPUT; |
1340 | else | 1393 | else |
1341 | action = ATOM_ENABLE; | 1394 | action = ATOM_ENABLE; |
@@ -1343,7 +1396,7 @@ radeon_atom_encoder_dpms(struct drm_encoder *encoder, int mode) | |||
1343 | case DRM_MODE_DPMS_STANDBY: | 1396 | case DRM_MODE_DPMS_STANDBY: |
1344 | case DRM_MODE_DPMS_SUSPEND: | 1397 | case DRM_MODE_DPMS_SUSPEND: |
1345 | case DRM_MODE_DPMS_OFF: | 1398 | case DRM_MODE_DPMS_OFF: |
1346 | if (ASIC_IS_DCE41(rdev) && (rdev->flags & RADEON_IS_IGP)) | 1399 | if (ASIC_IS_DCE41(rdev)) |
1347 | action = EXTERNAL_ENCODER_ACTION_V3_DISABLE_OUTPUT; | 1400 | action = EXTERNAL_ENCODER_ACTION_V3_DISABLE_OUTPUT; |
1348 | else | 1401 | else |
1349 | action = ATOM_DISABLE; | 1402 | action = ATOM_DISABLE; |
@@ -1535,32 +1588,35 @@ static int radeon_atom_pick_dig_encoder(struct drm_encoder *encoder) | |||
1535 | struct radeon_encoder_atom_dig *dig; | 1588 | struct radeon_encoder_atom_dig *dig; |
1536 | uint32_t dig_enc_in_use = 0; | 1589 | uint32_t dig_enc_in_use = 0; |
1537 | 1590 | ||
1538 | /* on DCE41 and encoder can driver any phy so just crtc id */ | 1591 | /* DCE4/5 */ |
1539 | if (ASIC_IS_DCE41(rdev)) { | ||
1540 | return radeon_crtc->crtc_id; | ||
1541 | } | ||
1542 | |||
1543 | if (ASIC_IS_DCE4(rdev)) { | 1592 | if (ASIC_IS_DCE4(rdev)) { |
1544 | dig = radeon_encoder->enc_priv; | 1593 | dig = radeon_encoder->enc_priv; |
1545 | switch (radeon_encoder->encoder_id) { | 1594 | if (ASIC_IS_DCE41(rdev)) { |
1546 | case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: | ||
1547 | if (dig->linkb) | 1595 | if (dig->linkb) |
1548 | return 1; | 1596 | return 1; |
1549 | else | 1597 | else |
1550 | return 0; | 1598 | return 0; |
1551 | break; | 1599 | } else { |
1552 | case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1: | 1600 | switch (radeon_encoder->encoder_id) { |
1553 | if (dig->linkb) | 1601 | case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: |
1554 | return 3; | 1602 | if (dig->linkb) |
1555 | else | 1603 | return 1; |
1556 | return 2; | 1604 | else |
1557 | break; | 1605 | return 0; |
1558 | case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2: | 1606 | break; |
1559 | if (dig->linkb) | 1607 | case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1: |
1560 | return 5; | 1608 | if (dig->linkb) |
1561 | else | 1609 | return 3; |
1562 | return 4; | 1610 | else |
1563 | break; | 1611 | return 2; |
1612 | break; | ||
1613 | case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2: | ||
1614 | if (dig->linkb) | ||
1615 | return 5; | ||
1616 | else | ||
1617 | return 4; | ||
1618 | break; | ||
1619 | } | ||
1564 | } | 1620 | } |
1565 | } | 1621 | } |
1566 | 1622 | ||
@@ -1667,7 +1723,7 @@ radeon_atom_encoder_mode_set(struct drm_encoder *encoder, | |||
1667 | } | 1723 | } |
1668 | 1724 | ||
1669 | if (ext_encoder) { | 1725 | if (ext_encoder) { |
1670 | if (ASIC_IS_DCE41(rdev) && (rdev->flags & RADEON_IS_IGP)) { | 1726 | if (ASIC_IS_DCE41(rdev)) { |
1671 | atombios_external_encoder_setup(encoder, ext_encoder, | 1727 | atombios_external_encoder_setup(encoder, ext_encoder, |
1672 | EXTERNAL_ENCODER_ACTION_V3_ENCODER_INIT); | 1728 | EXTERNAL_ENCODER_ACTION_V3_ENCODER_INIT); |
1673 | atombios_external_encoder_setup(encoder, ext_encoder, | 1729 | atombios_external_encoder_setup(encoder, ext_encoder, |
@@ -1990,7 +2046,10 @@ radeon_atombios_set_dig_info(struct radeon_encoder *radeon_encoder) | |||
1990 | } | 2046 | } |
1991 | 2047 | ||
1992 | void | 2048 | void |
1993 | radeon_add_atom_encoder(struct drm_device *dev, uint32_t encoder_enum, uint32_t supported_device) | 2049 | radeon_add_atom_encoder(struct drm_device *dev, |
2050 | uint32_t encoder_enum, | ||
2051 | uint32_t supported_device, | ||
2052 | u16 caps) | ||
1994 | { | 2053 | { |
1995 | struct radeon_device *rdev = dev->dev_private; | 2054 | struct radeon_device *rdev = dev->dev_private; |
1996 | struct drm_encoder *encoder; | 2055 | struct drm_encoder *encoder; |
@@ -2033,6 +2092,7 @@ radeon_add_atom_encoder(struct drm_device *dev, uint32_t encoder_enum, uint32_t | |||
2033 | radeon_encoder->rmx_type = RMX_OFF; | 2092 | radeon_encoder->rmx_type = RMX_OFF; |
2034 | radeon_encoder->underscan_type = UNDERSCAN_OFF; | 2093 | radeon_encoder->underscan_type = UNDERSCAN_OFF; |
2035 | radeon_encoder->is_ext_encoder = false; | 2094 | radeon_encoder->is_ext_encoder = false; |
2095 | radeon_encoder->caps = caps; | ||
2036 | 2096 | ||
2037 | switch (radeon_encoder->encoder_id) { | 2097 | switch (radeon_encoder->encoder_id) { |
2038 | case ENCODER_OBJECT_ID_INTERNAL_LVDS: | 2098 | case ENCODER_OBJECT_ID_INTERNAL_LVDS: |
diff --git a/drivers/gpu/drm/radeon/radeon_family.h b/drivers/gpu/drm/radeon/radeon_family.h index 4c222d5437d1..1ca55eb09ad3 100644 --- a/drivers/gpu/drm/radeon/radeon_family.h +++ b/drivers/gpu/drm/radeon/radeon_family.h | |||
@@ -81,6 +81,9 @@ enum radeon_family { | |||
81 | CHIP_CYPRESS, | 81 | CHIP_CYPRESS, |
82 | CHIP_HEMLOCK, | 82 | CHIP_HEMLOCK, |
83 | CHIP_PALM, | 83 | CHIP_PALM, |
84 | CHIP_BARTS, | ||
85 | CHIP_TURKS, | ||
86 | CHIP_CAICOS, | ||
84 | CHIP_LAST, | 87 | CHIP_LAST, |
85 | }; | 88 | }; |
86 | 89 | ||
diff --git a/drivers/gpu/drm/radeon/radeon_fb.c b/drivers/gpu/drm/radeon/radeon_fb.c index efa211898fe6..28431e78ab56 100644 --- a/drivers/gpu/drm/radeon/radeon_fb.c +++ b/drivers/gpu/drm/radeon/radeon_fb.c | |||
@@ -64,7 +64,7 @@ static struct fb_ops radeonfb_ops = { | |||
64 | }; | 64 | }; |
65 | 65 | ||
66 | 66 | ||
67 | static int radeon_align_pitch(struct radeon_device *rdev, int width, int bpp, bool tiled) | 67 | int radeon_align_pitch(struct radeon_device *rdev, int width, int bpp, bool tiled) |
68 | { | 68 | { |
69 | int aligned = width; | 69 | int aligned = width; |
70 | int align_large = (ASIC_IS_AVIVO(rdev)) || tiled; | 70 | int align_large = (ASIC_IS_AVIVO(rdev)) || tiled; |
@@ -90,7 +90,7 @@ static int radeon_align_pitch(struct radeon_device *rdev, int width, int bpp, bo | |||
90 | 90 | ||
91 | static void radeonfb_destroy_pinned_object(struct drm_gem_object *gobj) | 91 | static void radeonfb_destroy_pinned_object(struct drm_gem_object *gobj) |
92 | { | 92 | { |
93 | struct radeon_bo *rbo = gobj->driver_private; | 93 | struct radeon_bo *rbo = gem_to_radeon_bo(gobj); |
94 | int ret; | 94 | int ret; |
95 | 95 | ||
96 | ret = radeon_bo_reserve(rbo, false); | 96 | ret = radeon_bo_reserve(rbo, false); |
@@ -128,7 +128,7 @@ static int radeonfb_create_pinned_object(struct radeon_fbdev *rfbdev, | |||
128 | aligned_size); | 128 | aligned_size); |
129 | return -ENOMEM; | 129 | return -ENOMEM; |
130 | } | 130 | } |
131 | rbo = gobj->driver_private; | 131 | rbo = gem_to_radeon_bo(gobj); |
132 | 132 | ||
133 | if (fb_tiled) | 133 | if (fb_tiled) |
134 | tiling_flags = RADEON_TILING_MACRO; | 134 | tiling_flags = RADEON_TILING_MACRO; |
@@ -202,7 +202,7 @@ static int radeonfb_create(struct radeon_fbdev *rfbdev, | |||
202 | mode_cmd.depth = sizes->surface_depth; | 202 | mode_cmd.depth = sizes->surface_depth; |
203 | 203 | ||
204 | ret = radeonfb_create_pinned_object(rfbdev, &mode_cmd, &gobj); | 204 | ret = radeonfb_create_pinned_object(rfbdev, &mode_cmd, &gobj); |
205 | rbo = gobj->driver_private; | 205 | rbo = gem_to_radeon_bo(gobj); |
206 | 206 | ||
207 | /* okay we have an object now allocate the framebuffer */ | 207 | /* okay we have an object now allocate the framebuffer */ |
208 | info = framebuffer_alloc(0, device); | 208 | info = framebuffer_alloc(0, device); |
@@ -245,10 +245,8 @@ static int radeonfb_create(struct radeon_fbdev *rfbdev, | |||
245 | goto out_unref; | 245 | goto out_unref; |
246 | } | 246 | } |
247 | info->apertures->ranges[0].base = rdev->ddev->mode_config.fb_base; | 247 | info->apertures->ranges[0].base = rdev->ddev->mode_config.fb_base; |
248 | info->apertures->ranges[0].size = rdev->mc.real_vram_size; | 248 | info->apertures->ranges[0].size = rdev->mc.aper_size; |
249 | 249 | ||
250 | info->fix.mmio_start = 0; | ||
251 | info->fix.mmio_len = 0; | ||
252 | info->pixmap.size = 64*1024; | 250 | info->pixmap.size = 64*1024; |
253 | info->pixmap.buf_align = 8; | 251 | info->pixmap.buf_align = 8; |
254 | info->pixmap.access_align = 32; | 252 | info->pixmap.access_align = 32; |
@@ -405,14 +403,14 @@ int radeon_fbdev_total_size(struct radeon_device *rdev) | |||
405 | struct radeon_bo *robj; | 403 | struct radeon_bo *robj; |
406 | int size = 0; | 404 | int size = 0; |
407 | 405 | ||
408 | robj = rdev->mode_info.rfbdev->rfb.obj->driver_private; | 406 | robj = gem_to_radeon_bo(rdev->mode_info.rfbdev->rfb.obj); |
409 | size += radeon_bo_size(robj); | 407 | size += radeon_bo_size(robj); |
410 | return size; | 408 | return size; |
411 | } | 409 | } |
412 | 410 | ||
413 | bool radeon_fbdev_robj_is_fb(struct radeon_device *rdev, struct radeon_bo *robj) | 411 | bool radeon_fbdev_robj_is_fb(struct radeon_device *rdev, struct radeon_bo *robj) |
414 | { | 412 | { |
415 | if (robj == rdev->mode_info.rfbdev->rfb.obj->driver_private) | 413 | if (robj == gem_to_radeon_bo(rdev->mode_info.rfbdev->rfb.obj)) |
416 | return true; | 414 | return true; |
417 | return false; | 415 | return false; |
418 | } | 416 | } |
diff --git a/drivers/gpu/drm/radeon/radeon_gart.c b/drivers/gpu/drm/radeon/radeon_gart.c index 65016117d95f..f0534ef2f331 100644 --- a/drivers/gpu/drm/radeon/radeon_gart.c +++ b/drivers/gpu/drm/radeon/radeon_gart.c | |||
@@ -78,7 +78,7 @@ int radeon_gart_table_vram_alloc(struct radeon_device *rdev) | |||
78 | int r; | 78 | int r; |
79 | 79 | ||
80 | if (rdev->gart.table.vram.robj == NULL) { | 80 | if (rdev->gart.table.vram.robj == NULL) { |
81 | r = radeon_bo_create(rdev, NULL, rdev->gart.table_size, | 81 | r = radeon_bo_create(rdev, rdev->gart.table_size, |
82 | PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM, | 82 | PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM, |
83 | &rdev->gart.table.vram.robj); | 83 | &rdev->gart.table.vram.robj); |
84 | if (r) { | 84 | if (r) { |
@@ -149,8 +149,9 @@ void radeon_gart_unbind(struct radeon_device *rdev, unsigned offset, | |||
149 | p = t / (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); | 149 | p = t / (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); |
150 | for (i = 0; i < pages; i++, p++) { | 150 | for (i = 0; i < pages; i++, p++) { |
151 | if (rdev->gart.pages[p]) { | 151 | if (rdev->gart.pages[p]) { |
152 | pci_unmap_page(rdev->pdev, rdev->gart.pages_addr[p], | 152 | if (!rdev->gart.ttm_alloced[p]) |
153 | PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); | 153 | pci_unmap_page(rdev->pdev, rdev->gart.pages_addr[p], |
154 | PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); | ||
154 | rdev->gart.pages[p] = NULL; | 155 | rdev->gart.pages[p] = NULL; |
155 | rdev->gart.pages_addr[p] = rdev->dummy_page.addr; | 156 | rdev->gart.pages_addr[p] = rdev->dummy_page.addr; |
156 | page_base = rdev->gart.pages_addr[p]; | 157 | page_base = rdev->gart.pages_addr[p]; |
@@ -165,7 +166,7 @@ void radeon_gart_unbind(struct radeon_device *rdev, unsigned offset, | |||
165 | } | 166 | } |
166 | 167 | ||
167 | int radeon_gart_bind(struct radeon_device *rdev, unsigned offset, | 168 | int radeon_gart_bind(struct radeon_device *rdev, unsigned offset, |
168 | int pages, struct page **pagelist) | 169 | int pages, struct page **pagelist, dma_addr_t *dma_addr) |
169 | { | 170 | { |
170 | unsigned t; | 171 | unsigned t; |
171 | unsigned p; | 172 | unsigned p; |
@@ -180,15 +181,22 @@ int radeon_gart_bind(struct radeon_device *rdev, unsigned offset, | |||
180 | p = t / (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); | 181 | p = t / (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); |
181 | 182 | ||
182 | for (i = 0; i < pages; i++, p++) { | 183 | for (i = 0; i < pages; i++, p++) { |
183 | /* we need to support large memory configurations */ | 184 | /* On TTM path, we only use the DMA API if TTM_PAGE_FLAG_DMA32 |
184 | /* assume that unbind have already been call on the range */ | 185 | * is requested. */ |
185 | rdev->gart.pages_addr[p] = pci_map_page(rdev->pdev, pagelist[i], | 186 | if (dma_addr[i] != DMA_ERROR_CODE) { |
187 | rdev->gart.ttm_alloced[p] = true; | ||
188 | rdev->gart.pages_addr[p] = dma_addr[i]; | ||
189 | } else { | ||
190 | /* we need to support large memory configurations */ | ||
191 | /* assume that unbind have already been call on the range */ | ||
192 | rdev->gart.pages_addr[p] = pci_map_page(rdev->pdev, pagelist[i], | ||
186 | 0, PAGE_SIZE, | 193 | 0, PAGE_SIZE, |
187 | PCI_DMA_BIDIRECTIONAL); | 194 | PCI_DMA_BIDIRECTIONAL); |
188 | if (pci_dma_mapping_error(rdev->pdev, rdev->gart.pages_addr[p])) { | 195 | if (pci_dma_mapping_error(rdev->pdev, rdev->gart.pages_addr[p])) { |
189 | /* FIXME: failed to map page (return -ENOMEM?) */ | 196 | /* FIXME: failed to map page (return -ENOMEM?) */ |
190 | radeon_gart_unbind(rdev, offset, pages); | 197 | radeon_gart_unbind(rdev, offset, pages); |
191 | return -ENOMEM; | 198 | return -ENOMEM; |
199 | } | ||
192 | } | 200 | } |
193 | rdev->gart.pages[p] = pagelist[i]; | 201 | rdev->gart.pages[p] = pagelist[i]; |
194 | page_base = rdev->gart.pages_addr[p]; | 202 | page_base = rdev->gart.pages_addr[p]; |
@@ -251,6 +259,12 @@ int radeon_gart_init(struct radeon_device *rdev) | |||
251 | radeon_gart_fini(rdev); | 259 | radeon_gart_fini(rdev); |
252 | return -ENOMEM; | 260 | return -ENOMEM; |
253 | } | 261 | } |
262 | rdev->gart.ttm_alloced = kzalloc(sizeof(bool) * | ||
263 | rdev->gart.num_cpu_pages, GFP_KERNEL); | ||
264 | if (rdev->gart.ttm_alloced == NULL) { | ||
265 | radeon_gart_fini(rdev); | ||
266 | return -ENOMEM; | ||
267 | } | ||
254 | /* set GART entry to point to the dummy page by default */ | 268 | /* set GART entry to point to the dummy page by default */ |
255 | for (i = 0; i < rdev->gart.num_cpu_pages; i++) { | 269 | for (i = 0; i < rdev->gart.num_cpu_pages; i++) { |
256 | rdev->gart.pages_addr[i] = rdev->dummy_page.addr; | 270 | rdev->gart.pages_addr[i] = rdev->dummy_page.addr; |
@@ -267,6 +281,8 @@ void radeon_gart_fini(struct radeon_device *rdev) | |||
267 | rdev->gart.ready = false; | 281 | rdev->gart.ready = false; |
268 | kfree(rdev->gart.pages); | 282 | kfree(rdev->gart.pages); |
269 | kfree(rdev->gart.pages_addr); | 283 | kfree(rdev->gart.pages_addr); |
284 | kfree(rdev->gart.ttm_alloced); | ||
270 | rdev->gart.pages = NULL; | 285 | rdev->gart.pages = NULL; |
271 | rdev->gart.pages_addr = NULL; | 286 | rdev->gart.pages_addr = NULL; |
287 | rdev->gart.ttm_alloced = NULL; | ||
272 | } | 288 | } |
diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c index df95eb83dac6..a419b67d8401 100644 --- a/drivers/gpu/drm/radeon/radeon_gem.c +++ b/drivers/gpu/drm/radeon/radeon_gem.c | |||
@@ -32,21 +32,18 @@ | |||
32 | 32 | ||
33 | int radeon_gem_object_init(struct drm_gem_object *obj) | 33 | int radeon_gem_object_init(struct drm_gem_object *obj) |
34 | { | 34 | { |
35 | /* we do nothings here */ | 35 | BUG(); |
36 | |||
36 | return 0; | 37 | return 0; |
37 | } | 38 | } |
38 | 39 | ||
39 | void radeon_gem_object_free(struct drm_gem_object *gobj) | 40 | void radeon_gem_object_free(struct drm_gem_object *gobj) |
40 | { | 41 | { |
41 | struct radeon_bo *robj = gobj->driver_private; | 42 | struct radeon_bo *robj = gem_to_radeon_bo(gobj); |
42 | 43 | ||
43 | gobj->driver_private = NULL; | ||
44 | if (robj) { | 44 | if (robj) { |
45 | radeon_bo_unref(&robj); | 45 | radeon_bo_unref(&robj); |
46 | } | 46 | } |
47 | |||
48 | drm_gem_object_release(gobj); | ||
49 | kfree(gobj); | ||
50 | } | 47 | } |
51 | 48 | ||
52 | int radeon_gem_object_create(struct radeon_device *rdev, int size, | 49 | int radeon_gem_object_create(struct radeon_device *rdev, int size, |
@@ -54,36 +51,34 @@ int radeon_gem_object_create(struct radeon_device *rdev, int size, | |||
54 | bool discardable, bool kernel, | 51 | bool discardable, bool kernel, |
55 | struct drm_gem_object **obj) | 52 | struct drm_gem_object **obj) |
56 | { | 53 | { |
57 | struct drm_gem_object *gobj; | ||
58 | struct radeon_bo *robj; | 54 | struct radeon_bo *robj; |
59 | int r; | 55 | int r; |
60 | 56 | ||
61 | *obj = NULL; | 57 | *obj = NULL; |
62 | gobj = drm_gem_object_alloc(rdev->ddev, size); | ||
63 | if (!gobj) { | ||
64 | return -ENOMEM; | ||
65 | } | ||
66 | /* At least align on page size */ | 58 | /* At least align on page size */ |
67 | if (alignment < PAGE_SIZE) { | 59 | if (alignment < PAGE_SIZE) { |
68 | alignment = PAGE_SIZE; | 60 | alignment = PAGE_SIZE; |
69 | } | 61 | } |
70 | r = radeon_bo_create(rdev, gobj, size, alignment, kernel, initial_domain, &robj); | 62 | r = radeon_bo_create(rdev, size, alignment, kernel, initial_domain, &robj); |
71 | if (r) { | 63 | if (r) { |
72 | if (r != -ERESTARTSYS) | 64 | if (r != -ERESTARTSYS) |
73 | DRM_ERROR("Failed to allocate GEM object (%d, %d, %u, %d)\n", | 65 | DRM_ERROR("Failed to allocate GEM object (%d, %d, %u, %d)\n", |
74 | size, initial_domain, alignment, r); | 66 | size, initial_domain, alignment, r); |
75 | drm_gem_object_unreference_unlocked(gobj); | ||
76 | return r; | 67 | return r; |
77 | } | 68 | } |
78 | gobj->driver_private = robj; | 69 | *obj = &robj->gem_base; |
79 | *obj = gobj; | 70 | |
71 | mutex_lock(&rdev->gem.mutex); | ||
72 | list_add_tail(&robj->list, &rdev->gem.objects); | ||
73 | mutex_unlock(&rdev->gem.mutex); | ||
74 | |||
80 | return 0; | 75 | return 0; |
81 | } | 76 | } |
82 | 77 | ||
83 | int radeon_gem_object_pin(struct drm_gem_object *obj, uint32_t pin_domain, | 78 | int radeon_gem_object_pin(struct drm_gem_object *obj, uint32_t pin_domain, |
84 | uint64_t *gpu_addr) | 79 | uint64_t *gpu_addr) |
85 | { | 80 | { |
86 | struct radeon_bo *robj = obj->driver_private; | 81 | struct radeon_bo *robj = gem_to_radeon_bo(obj); |
87 | int r; | 82 | int r; |
88 | 83 | ||
89 | r = radeon_bo_reserve(robj, false); | 84 | r = radeon_bo_reserve(robj, false); |
@@ -96,7 +91,7 @@ int radeon_gem_object_pin(struct drm_gem_object *obj, uint32_t pin_domain, | |||
96 | 91 | ||
97 | void radeon_gem_object_unpin(struct drm_gem_object *obj) | 92 | void radeon_gem_object_unpin(struct drm_gem_object *obj) |
98 | { | 93 | { |
99 | struct radeon_bo *robj = obj->driver_private; | 94 | struct radeon_bo *robj = gem_to_radeon_bo(obj); |
100 | int r; | 95 | int r; |
101 | 96 | ||
102 | r = radeon_bo_reserve(robj, false); | 97 | r = radeon_bo_reserve(robj, false); |
@@ -114,7 +109,7 @@ int radeon_gem_set_domain(struct drm_gem_object *gobj, | |||
114 | int r; | 109 | int r; |
115 | 110 | ||
116 | /* FIXME: reeimplement */ | 111 | /* FIXME: reeimplement */ |
117 | robj = gobj->driver_private; | 112 | robj = gem_to_radeon_bo(gobj); |
118 | /* work out where to validate the buffer to */ | 113 | /* work out where to validate the buffer to */ |
119 | domain = wdomain; | 114 | domain = wdomain; |
120 | if (!domain) { | 115 | if (!domain) { |
@@ -228,7 +223,7 @@ int radeon_gem_set_domain_ioctl(struct drm_device *dev, void *data, | |||
228 | if (gobj == NULL) { | 223 | if (gobj == NULL) { |
229 | return -ENOENT; | 224 | return -ENOENT; |
230 | } | 225 | } |
231 | robj = gobj->driver_private; | 226 | robj = gem_to_radeon_bo(gobj); |
232 | 227 | ||
233 | r = radeon_gem_set_domain(gobj, args->read_domains, args->write_domain); | 228 | r = radeon_gem_set_domain(gobj, args->read_domains, args->write_domain); |
234 | 229 | ||
@@ -236,23 +231,31 @@ int radeon_gem_set_domain_ioctl(struct drm_device *dev, void *data, | |||
236 | return r; | 231 | return r; |
237 | } | 232 | } |
238 | 233 | ||
239 | int radeon_gem_mmap_ioctl(struct drm_device *dev, void *data, | 234 | int radeon_mode_dumb_mmap(struct drm_file *filp, |
240 | struct drm_file *filp) | 235 | struct drm_device *dev, |
236 | uint32_t handle, uint64_t *offset_p) | ||
241 | { | 237 | { |
242 | struct drm_radeon_gem_mmap *args = data; | ||
243 | struct drm_gem_object *gobj; | 238 | struct drm_gem_object *gobj; |
244 | struct radeon_bo *robj; | 239 | struct radeon_bo *robj; |
245 | 240 | ||
246 | gobj = drm_gem_object_lookup(dev, filp, args->handle); | 241 | gobj = drm_gem_object_lookup(dev, filp, handle); |
247 | if (gobj == NULL) { | 242 | if (gobj == NULL) { |
248 | return -ENOENT; | 243 | return -ENOENT; |
249 | } | 244 | } |
250 | robj = gobj->driver_private; | 245 | robj = gem_to_radeon_bo(gobj); |
251 | args->addr_ptr = radeon_bo_mmap_offset(robj); | 246 | *offset_p = radeon_bo_mmap_offset(robj); |
252 | drm_gem_object_unreference_unlocked(gobj); | 247 | drm_gem_object_unreference_unlocked(gobj); |
253 | return 0; | 248 | return 0; |
254 | } | 249 | } |
255 | 250 | ||
251 | int radeon_gem_mmap_ioctl(struct drm_device *dev, void *data, | ||
252 | struct drm_file *filp) | ||
253 | { | ||
254 | struct drm_radeon_gem_mmap *args = data; | ||
255 | |||
256 | return radeon_mode_dumb_mmap(filp, dev, args->handle, &args->addr_ptr); | ||
257 | } | ||
258 | |||
256 | int radeon_gem_busy_ioctl(struct drm_device *dev, void *data, | 259 | int radeon_gem_busy_ioctl(struct drm_device *dev, void *data, |
257 | struct drm_file *filp) | 260 | struct drm_file *filp) |
258 | { | 261 | { |
@@ -266,7 +269,7 @@ int radeon_gem_busy_ioctl(struct drm_device *dev, void *data, | |||
266 | if (gobj == NULL) { | 269 | if (gobj == NULL) { |
267 | return -ENOENT; | 270 | return -ENOENT; |
268 | } | 271 | } |
269 | robj = gobj->driver_private; | 272 | robj = gem_to_radeon_bo(gobj); |
270 | r = radeon_bo_wait(robj, &cur_placement, true); | 273 | r = radeon_bo_wait(robj, &cur_placement, true); |
271 | switch (cur_placement) { | 274 | switch (cur_placement) { |
272 | case TTM_PL_VRAM: | 275 | case TTM_PL_VRAM: |
@@ -296,7 +299,7 @@ int radeon_gem_wait_idle_ioctl(struct drm_device *dev, void *data, | |||
296 | if (gobj == NULL) { | 299 | if (gobj == NULL) { |
297 | return -ENOENT; | 300 | return -ENOENT; |
298 | } | 301 | } |
299 | robj = gobj->driver_private; | 302 | robj = gem_to_radeon_bo(gobj); |
300 | r = radeon_bo_wait(robj, NULL, false); | 303 | r = radeon_bo_wait(robj, NULL, false); |
301 | /* callback hw specific functions if any */ | 304 | /* callback hw specific functions if any */ |
302 | if (robj->rdev->asic->ioctl_wait_idle) | 305 | if (robj->rdev->asic->ioctl_wait_idle) |
@@ -317,7 +320,7 @@ int radeon_gem_set_tiling_ioctl(struct drm_device *dev, void *data, | |||
317 | gobj = drm_gem_object_lookup(dev, filp, args->handle); | 320 | gobj = drm_gem_object_lookup(dev, filp, args->handle); |
318 | if (gobj == NULL) | 321 | if (gobj == NULL) |
319 | return -ENOENT; | 322 | return -ENOENT; |
320 | robj = gobj->driver_private; | 323 | robj = gem_to_radeon_bo(gobj); |
321 | r = radeon_bo_set_tiling_flags(robj, args->tiling_flags, args->pitch); | 324 | r = radeon_bo_set_tiling_flags(robj, args->tiling_flags, args->pitch); |
322 | drm_gem_object_unreference_unlocked(gobj); | 325 | drm_gem_object_unreference_unlocked(gobj); |
323 | return r; | 326 | return r; |
@@ -335,7 +338,7 @@ int radeon_gem_get_tiling_ioctl(struct drm_device *dev, void *data, | |||
335 | gobj = drm_gem_object_lookup(dev, filp, args->handle); | 338 | gobj = drm_gem_object_lookup(dev, filp, args->handle); |
336 | if (gobj == NULL) | 339 | if (gobj == NULL) |
337 | return -ENOENT; | 340 | return -ENOENT; |
338 | rbo = gobj->driver_private; | 341 | rbo = gem_to_radeon_bo(gobj); |
339 | r = radeon_bo_reserve(rbo, false); | 342 | r = radeon_bo_reserve(rbo, false); |
340 | if (unlikely(r != 0)) | 343 | if (unlikely(r != 0)) |
341 | goto out; | 344 | goto out; |
@@ -345,3 +348,38 @@ out: | |||
345 | drm_gem_object_unreference_unlocked(gobj); | 348 | drm_gem_object_unreference_unlocked(gobj); |
346 | return r; | 349 | return r; |
347 | } | 350 | } |
351 | |||
352 | int radeon_mode_dumb_create(struct drm_file *file_priv, | ||
353 | struct drm_device *dev, | ||
354 | struct drm_mode_create_dumb *args) | ||
355 | { | ||
356 | struct radeon_device *rdev = dev->dev_private; | ||
357 | struct drm_gem_object *gobj; | ||
358 | int r; | ||
359 | |||
360 | args->pitch = radeon_align_pitch(rdev, args->width, args->bpp, 0) * ((args->bpp + 1) / 8); | ||
361 | args->size = args->pitch * args->height; | ||
362 | args->size = ALIGN(args->size, PAGE_SIZE); | ||
363 | |||
364 | r = radeon_gem_object_create(rdev, args->size, 0, | ||
365 | RADEON_GEM_DOMAIN_VRAM, | ||
366 | false, ttm_bo_type_device, | ||
367 | &gobj); | ||
368 | if (r) | ||
369 | return -ENOMEM; | ||
370 | |||
371 | r = drm_gem_handle_create(file_priv, gobj, &args->handle); | ||
372 | if (r) { | ||
373 | drm_gem_object_unreference_unlocked(gobj); | ||
374 | return r; | ||
375 | } | ||
376 | drm_gem_object_handle_unreference_unlocked(gobj); | ||
377 | return 0; | ||
378 | } | ||
379 | |||
380 | int radeon_mode_dumb_destroy(struct drm_file *file_priv, | ||
381 | struct drm_device *dev, | ||
382 | uint32_t handle) | ||
383 | { | ||
384 | return drm_gem_handle_delete(file_priv, handle); | ||
385 | } | ||
diff --git a/drivers/gpu/drm/radeon/radeon_irq_kms.c b/drivers/gpu/drm/radeon/radeon_irq_kms.c index c6861bb751ad..9ec830c77af0 100644 --- a/drivers/gpu/drm/radeon/radeon_irq_kms.c +++ b/drivers/gpu/drm/radeon/radeon_irq_kms.c | |||
@@ -64,8 +64,6 @@ void radeon_driver_irq_preinstall_kms(struct drm_device *dev) | |||
64 | struct radeon_device *rdev = dev->dev_private; | 64 | struct radeon_device *rdev = dev->dev_private; |
65 | unsigned i; | 65 | unsigned i; |
66 | 66 | ||
67 | INIT_WORK(&rdev->hotplug_work, radeon_hotplug_work_func); | ||
68 | |||
69 | /* Disable *all* interrupts */ | 67 | /* Disable *all* interrupts */ |
70 | rdev->irq.sw_int = false; | 68 | rdev->irq.sw_int = false; |
71 | rdev->irq.gui_idle = false; | 69 | rdev->irq.gui_idle = false; |
@@ -112,9 +110,14 @@ void radeon_driver_irq_uninstall_kms(struct drm_device *dev) | |||
112 | 110 | ||
113 | int radeon_irq_kms_init(struct radeon_device *rdev) | 111 | int radeon_irq_kms_init(struct radeon_device *rdev) |
114 | { | 112 | { |
113 | int i; | ||
115 | int r = 0; | 114 | int r = 0; |
116 | 115 | ||
116 | INIT_WORK(&rdev->hotplug_work, radeon_hotplug_work_func); | ||
117 | |||
117 | spin_lock_init(&rdev->irq.sw_lock); | 118 | spin_lock_init(&rdev->irq.sw_lock); |
119 | for (i = 0; i < rdev->num_crtc; i++) | ||
120 | spin_lock_init(&rdev->irq.pflip_lock[i]); | ||
118 | r = drm_vblank_init(rdev->ddev, rdev->num_crtc); | 121 | r = drm_vblank_init(rdev->ddev, rdev->num_crtc); |
119 | if (r) { | 122 | if (r) { |
120 | return r; | 123 | return r; |
@@ -152,6 +155,7 @@ void radeon_irq_kms_fini(struct radeon_device *rdev) | |||
152 | if (rdev->msi_enabled) | 155 | if (rdev->msi_enabled) |
153 | pci_disable_msi(rdev->pdev); | 156 | pci_disable_msi(rdev->pdev); |
154 | } | 157 | } |
158 | flush_work_sync(&rdev->hotplug_work); | ||
155 | } | 159 | } |
156 | 160 | ||
157 | void radeon_irq_kms_sw_irq_get(struct radeon_device *rdev) | 161 | void radeon_irq_kms_sw_irq_get(struct radeon_device *rdev) |
diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c index 4bf423ca4c12..4057ebf5195d 100644 --- a/drivers/gpu/drm/radeon/radeon_kms.c +++ b/drivers/gpu/drm/radeon/radeon_kms.c | |||
@@ -58,9 +58,9 @@ int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags) | |||
58 | dev->dev_private = (void *)rdev; | 58 | dev->dev_private = (void *)rdev; |
59 | 59 | ||
60 | /* update BUS flag */ | 60 | /* update BUS flag */ |
61 | if (drm_device_is_agp(dev)) { | 61 | if (drm_pci_device_is_agp(dev)) { |
62 | flags |= RADEON_IS_AGP; | 62 | flags |= RADEON_IS_AGP; |
63 | } else if (drm_device_is_pcie(dev)) { | 63 | } else if (drm_pci_device_is_pcie(dev)) { |
64 | flags |= RADEON_IS_PCIE; | 64 | flags |= RADEON_IS_PCIE; |
65 | } else { | 65 | } else { |
66 | flags |= RADEON_IS_PCI; | 66 | flags |= RADEON_IS_PCI; |
@@ -96,9 +96,27 @@ out: | |||
96 | return r; | 96 | return r; |
97 | } | 97 | } |
98 | 98 | ||
99 | static void radeon_set_filp_rights(struct drm_device *dev, | ||
100 | struct drm_file **owner, | ||
101 | struct drm_file *applier, | ||
102 | uint32_t *value) | ||
103 | { | ||
104 | mutex_lock(&dev->struct_mutex); | ||
105 | if (*value == 1) { | ||
106 | /* wants rights */ | ||
107 | if (!*owner) | ||
108 | *owner = applier; | ||
109 | } else if (*value == 0) { | ||
110 | /* revokes rights */ | ||
111 | if (*owner == applier) | ||
112 | *owner = NULL; | ||
113 | } | ||
114 | *value = *owner == applier ? 1 : 0; | ||
115 | mutex_unlock(&dev->struct_mutex); | ||
116 | } | ||
99 | 117 | ||
100 | /* | 118 | /* |
101 | * Userspace get informations ioctl | 119 | * Userspace get information ioctl |
102 | */ | 120 | */ |
103 | int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) | 121 | int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) |
104 | { | 122 | { |
@@ -173,18 +191,19 @@ int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) | |||
173 | DRM_DEBUG_KMS("WANT_HYPERZ: invalid value %d\n", value); | 191 | DRM_DEBUG_KMS("WANT_HYPERZ: invalid value %d\n", value); |
174 | return -EINVAL; | 192 | return -EINVAL; |
175 | } | 193 | } |
176 | mutex_lock(&dev->struct_mutex); | 194 | radeon_set_filp_rights(dev, &rdev->hyperz_filp, filp, &value); |
177 | if (value == 1) { | 195 | break; |
178 | /* wants hyper-z */ | 196 | case RADEON_INFO_WANT_CMASK: |
179 | if (!rdev->hyperz_filp) | 197 | /* The same logic as Hyper-Z. */ |
180 | rdev->hyperz_filp = filp; | 198 | if (value >= 2) { |
181 | } else if (value == 0) { | 199 | DRM_DEBUG_KMS("WANT_CMASK: invalid value %d\n", value); |
182 | /* revokes hyper-z */ | 200 | return -EINVAL; |
183 | if (rdev->hyperz_filp == filp) | ||
184 | rdev->hyperz_filp = NULL; | ||
185 | } | 201 | } |
186 | value = rdev->hyperz_filp == filp ? 1 : 0; | 202 | radeon_set_filp_rights(dev, &rdev->cmask_filp, filp, &value); |
187 | mutex_unlock(&dev->struct_mutex); | 203 | break; |
204 | case RADEON_INFO_CLOCK_CRYSTAL_FREQ: | ||
205 | /* return clock value in KHz */ | ||
206 | value = rdev->clock.spll.reference_freq * 10; | ||
188 | break; | 207 | break; |
189 | default: | 208 | default: |
190 | DRM_DEBUG_KMS("Invalid request %d\n", info->request); | 209 | DRM_DEBUG_KMS("Invalid request %d\n", info->request); |
@@ -203,10 +222,6 @@ int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) | |||
203 | */ | 222 | */ |
204 | int radeon_driver_firstopen_kms(struct drm_device *dev) | 223 | int radeon_driver_firstopen_kms(struct drm_device *dev) |
205 | { | 224 | { |
206 | struct radeon_device *rdev = dev->dev_private; | ||
207 | |||
208 | if (rdev->powered_down) | ||
209 | return -EINVAL; | ||
210 | return 0; | 225 | return 0; |
211 | } | 226 | } |
212 | 227 | ||
@@ -232,6 +247,8 @@ void radeon_driver_preclose_kms(struct drm_device *dev, | |||
232 | struct radeon_device *rdev = dev->dev_private; | 247 | struct radeon_device *rdev = dev->dev_private; |
233 | if (rdev->hyperz_filp == file_priv) | 248 | if (rdev->hyperz_filp == file_priv) |
234 | rdev->hyperz_filp = NULL; | 249 | rdev->hyperz_filp = NULL; |
250 | if (rdev->cmask_filp == file_priv) | ||
251 | rdev->cmask_filp = NULL; | ||
235 | } | 252 | } |
236 | 253 | ||
237 | /* | 254 | /* |
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c index ace2e6384d40..9ae599eb2e6d 100644 --- a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c +++ b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c | |||
@@ -415,7 +415,7 @@ int radeon_crtc_do_set_base(struct drm_crtc *crtc, | |||
415 | 415 | ||
416 | /* Pin framebuffer & get tilling informations */ | 416 | /* Pin framebuffer & get tilling informations */ |
417 | obj = radeon_fb->obj; | 417 | obj = radeon_fb->obj; |
418 | rbo = obj->driver_private; | 418 | rbo = gem_to_radeon_bo(obj); |
419 | r = radeon_bo_reserve(rbo, false); | 419 | r = radeon_bo_reserve(rbo, false); |
420 | if (unlikely(r != 0)) | 420 | if (unlikely(r != 0)) |
421 | return r; | 421 | return r; |
@@ -520,7 +520,7 @@ int radeon_crtc_do_set_base(struct drm_crtc *crtc, | |||
520 | 520 | ||
521 | if (!atomic && fb && fb != crtc->fb) { | 521 | if (!atomic && fb && fb != crtc->fb) { |
522 | radeon_fb = to_radeon_framebuffer(fb); | 522 | radeon_fb = to_radeon_framebuffer(fb); |
523 | rbo = radeon_fb->obj->driver_private; | 523 | rbo = gem_to_radeon_bo(radeon_fb->obj); |
524 | r = radeon_bo_reserve(rbo, false); | 524 | r = radeon_bo_reserve(rbo, false); |
525 | if (unlikely(r != 0)) | 525 | if (unlikely(r != 0)) |
526 | return r; | 526 | return r; |
@@ -778,9 +778,9 @@ static void radeon_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode) | |||
778 | DRM_DEBUG_KMS("\n"); | 778 | DRM_DEBUG_KMS("\n"); |
779 | 779 | ||
780 | if (!use_bios_divs) { | 780 | if (!use_bios_divs) { |
781 | radeon_compute_pll(pll, mode->clock, | 781 | radeon_compute_pll_legacy(pll, mode->clock, |
782 | &freq, &feedback_div, &frac_fb_div, | 782 | &freq, &feedback_div, &frac_fb_div, |
783 | &reference_div, &post_divider); | 783 | &reference_div, &post_divider); |
784 | 784 | ||
785 | for (post_div = &post_divs[0]; post_div->divider; ++post_div) { | 785 | for (post_div = &post_divs[0]; post_div->divider; ++post_div) { |
786 | if (post_div->divider == post_divider) | 786 | if (post_div->divider == post_divider) |
diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h index f406f02bf14e..c3f23f6ff60e 100644 --- a/drivers/gpu/drm/radeon/radeon_mode.h +++ b/drivers/gpu/drm/radeon/radeon_mode.h | |||
@@ -149,6 +149,7 @@ struct radeon_tmds_pll { | |||
149 | #define RADEON_PLL_PREFER_CLOSEST_LOWER (1 << 11) | 149 | #define RADEON_PLL_PREFER_CLOSEST_LOWER (1 << 11) |
150 | #define RADEON_PLL_USE_POST_DIV (1 << 12) | 150 | #define RADEON_PLL_USE_POST_DIV (1 << 12) |
151 | #define RADEON_PLL_IS_LCD (1 << 13) | 151 | #define RADEON_PLL_IS_LCD (1 << 13) |
152 | #define RADEON_PLL_PREFER_MINM_OVER_MAXP (1 << 14) | ||
152 | 153 | ||
153 | struct radeon_pll { | 154 | struct radeon_pll { |
154 | /* reference frequency */ | 155 | /* reference frequency */ |
@@ -379,6 +380,7 @@ struct radeon_encoder { | |||
379 | int hdmi_audio_workaround; | 380 | int hdmi_audio_workaround; |
380 | int hdmi_buffer_status; | 381 | int hdmi_buffer_status; |
381 | bool is_ext_encoder; | 382 | bool is_ext_encoder; |
383 | u16 caps; | ||
382 | }; | 384 | }; |
383 | 385 | ||
384 | struct radeon_connector_atom_dig { | 386 | struct radeon_connector_atom_dig { |
@@ -509,13 +511,21 @@ extern bool radeon_atombios_get_asic_ss_info(struct radeon_device *rdev, | |||
509 | struct radeon_atom_ss *ss, | 511 | struct radeon_atom_ss *ss, |
510 | int id, u32 clock); | 512 | int id, u32 clock); |
511 | 513 | ||
512 | extern void radeon_compute_pll(struct radeon_pll *pll, | 514 | extern void radeon_compute_pll_legacy(struct radeon_pll *pll, |
513 | uint64_t freq, | 515 | uint64_t freq, |
514 | uint32_t *dot_clock_p, | 516 | uint32_t *dot_clock_p, |
515 | uint32_t *fb_div_p, | 517 | uint32_t *fb_div_p, |
516 | uint32_t *frac_fb_div_p, | 518 | uint32_t *frac_fb_div_p, |
517 | uint32_t *ref_div_p, | 519 | uint32_t *ref_div_p, |
518 | uint32_t *post_div_p); | 520 | uint32_t *post_div_p); |
521 | |||
522 | extern void radeon_compute_pll_avivo(struct radeon_pll *pll, | ||
523 | u32 freq, | ||
524 | u32 *dot_clock_p, | ||
525 | u32 *fb_div_p, | ||
526 | u32 *frac_fb_div_p, | ||
527 | u32 *ref_div_p, | ||
528 | u32 *post_div_p); | ||
519 | 529 | ||
520 | extern void radeon_setup_encoder_clones(struct drm_device *dev); | 530 | extern void radeon_setup_encoder_clones(struct drm_device *dev); |
521 | 531 | ||
@@ -566,7 +576,7 @@ extern int radeon_get_crtc_scanoutpos(struct drm_device *dev, int crtc, | |||
566 | 576 | ||
567 | extern bool radeon_combios_check_hardcoded_edid(struct radeon_device *rdev); | 577 | extern bool radeon_combios_check_hardcoded_edid(struct radeon_device *rdev); |
568 | extern struct edid * | 578 | extern struct edid * |
569 | radeon_combios_get_hardcoded_edid(struct radeon_device *rdev); | 579 | radeon_bios_get_hardcoded_edid(struct radeon_device *rdev); |
570 | extern bool radeon_atom_get_clock_info(struct drm_device *dev); | 580 | extern bool radeon_atom_get_clock_info(struct drm_device *dev); |
571 | extern bool radeon_combios_get_clock_info(struct drm_device *dev); | 581 | extern bool radeon_combios_get_clock_info(struct drm_device *dev); |
572 | extern struct radeon_encoder_atom_dig * | 582 | extern struct radeon_encoder_atom_dig * |
@@ -665,4 +675,5 @@ void radeon_fb_output_poll_changed(struct radeon_device *rdev); | |||
665 | 675 | ||
666 | void radeon_crtc_handle_flip(struct radeon_device *rdev, int crtc_id); | 676 | void radeon_crtc_handle_flip(struct radeon_device *rdev, int crtc_id); |
667 | 677 | ||
678 | int radeon_align_pitch(struct radeon_device *rdev, int width, int bpp, bool tiled); | ||
668 | #endif | 679 | #endif |
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c index 8bdf0ba2983a..8758d02cca1a 100644 --- a/drivers/gpu/drm/radeon/radeon_object.c +++ b/drivers/gpu/drm/radeon/radeon_object.c | |||
@@ -55,6 +55,7 @@ static void radeon_ttm_bo_destroy(struct ttm_buffer_object *tbo) | |||
55 | list_del_init(&bo->list); | 55 | list_del_init(&bo->list); |
56 | mutex_unlock(&bo->rdev->gem.mutex); | 56 | mutex_unlock(&bo->rdev->gem.mutex); |
57 | radeon_bo_clear_surface_reg(bo); | 57 | radeon_bo_clear_surface_reg(bo); |
58 | drm_gem_object_release(&bo->gem_base); | ||
58 | kfree(bo); | 59 | kfree(bo); |
59 | } | 60 | } |
60 | 61 | ||
@@ -70,7 +71,7 @@ void radeon_ttm_placement_from_domain(struct radeon_bo *rbo, u32 domain) | |||
70 | u32 c = 0; | 71 | u32 c = 0; |
71 | 72 | ||
72 | rbo->placement.fpfn = 0; | 73 | rbo->placement.fpfn = 0; |
73 | rbo->placement.lpfn = rbo->rdev->mc.active_vram_size >> PAGE_SHIFT; | 74 | rbo->placement.lpfn = 0; |
74 | rbo->placement.placement = rbo->placements; | 75 | rbo->placement.placement = rbo->placements; |
75 | rbo->placement.busy_placement = rbo->placements; | 76 | rbo->placement.busy_placement = rbo->placements; |
76 | if (domain & RADEON_GEM_DOMAIN_VRAM) | 77 | if (domain & RADEON_GEM_DOMAIN_VRAM) |
@@ -86,15 +87,18 @@ void radeon_ttm_placement_from_domain(struct radeon_bo *rbo, u32 domain) | |||
86 | rbo->placement.num_busy_placement = c; | 87 | rbo->placement.num_busy_placement = c; |
87 | } | 88 | } |
88 | 89 | ||
89 | int radeon_bo_create(struct radeon_device *rdev, struct drm_gem_object *gobj, | 90 | int radeon_bo_create(struct radeon_device *rdev, |
90 | unsigned long size, int byte_align, bool kernel, u32 domain, | 91 | unsigned long size, int byte_align, bool kernel, u32 domain, |
91 | struct radeon_bo **bo_ptr) | 92 | struct radeon_bo **bo_ptr) |
92 | { | 93 | { |
93 | struct radeon_bo *bo; | 94 | struct radeon_bo *bo; |
94 | enum ttm_bo_type type; | 95 | enum ttm_bo_type type; |
95 | int page_align = roundup(byte_align, PAGE_SIZE) >> PAGE_SHIFT; | 96 | unsigned long page_align = roundup(byte_align, PAGE_SIZE) >> PAGE_SHIFT; |
97 | unsigned long max_size = 0; | ||
96 | int r; | 98 | int r; |
97 | 99 | ||
100 | size = ALIGN(size, PAGE_SIZE); | ||
101 | |||
98 | if (unlikely(rdev->mman.bdev.dev_mapping == NULL)) { | 102 | if (unlikely(rdev->mman.bdev.dev_mapping == NULL)) { |
99 | rdev->mman.bdev.dev_mapping = rdev->ddev->dev_mapping; | 103 | rdev->mman.bdev.dev_mapping = rdev->ddev->dev_mapping; |
100 | } | 104 | } |
@@ -105,12 +109,25 @@ int radeon_bo_create(struct radeon_device *rdev, struct drm_gem_object *gobj, | |||
105 | } | 109 | } |
106 | *bo_ptr = NULL; | 110 | *bo_ptr = NULL; |
107 | 111 | ||
112 | /* maximun bo size is the minimun btw visible vram and gtt size */ | ||
113 | max_size = min(rdev->mc.visible_vram_size, rdev->mc.gtt_size); | ||
114 | if ((page_align << PAGE_SHIFT) >= max_size) { | ||
115 | printk(KERN_WARNING "%s:%d alloc size %ldM bigger than %ldMb limit\n", | ||
116 | __func__, __LINE__, page_align >> (20 - PAGE_SHIFT), max_size >> 20); | ||
117 | return -ENOMEM; | ||
118 | } | ||
119 | |||
108 | retry: | 120 | retry: |
109 | bo = kzalloc(sizeof(struct radeon_bo), GFP_KERNEL); | 121 | bo = kzalloc(sizeof(struct radeon_bo), GFP_KERNEL); |
110 | if (bo == NULL) | 122 | if (bo == NULL) |
111 | return -ENOMEM; | 123 | return -ENOMEM; |
124 | r = drm_gem_object_init(rdev->ddev, &bo->gem_base, size); | ||
125 | if (unlikely(r)) { | ||
126 | kfree(bo); | ||
127 | return r; | ||
128 | } | ||
112 | bo->rdev = rdev; | 129 | bo->rdev = rdev; |
113 | bo->gobj = gobj; | 130 | bo->gem_base.driver_private = NULL; |
114 | bo->surface_reg = -1; | 131 | bo->surface_reg = -1; |
115 | INIT_LIST_HEAD(&bo->list); | 132 | INIT_LIST_HEAD(&bo->list); |
116 | radeon_ttm_placement_from_domain(bo, domain); | 133 | radeon_ttm_placement_from_domain(bo, domain); |
@@ -133,12 +150,9 @@ retry: | |||
133 | return r; | 150 | return r; |
134 | } | 151 | } |
135 | *bo_ptr = bo; | 152 | *bo_ptr = bo; |
136 | if (gobj) { | 153 | |
137 | mutex_lock(&bo->rdev->gem.mutex); | ||
138 | list_add_tail(&bo->list, &rdev->gem.objects); | ||
139 | mutex_unlock(&bo->rdev->gem.mutex); | ||
140 | } | ||
141 | trace_radeon_bo_create(bo); | 154 | trace_radeon_bo_create(bo); |
155 | |||
142 | return 0; | 156 | return 0; |
143 | } | 157 | } |
144 | 158 | ||
@@ -251,7 +265,6 @@ int radeon_bo_evict_vram(struct radeon_device *rdev) | |||
251 | void radeon_bo_force_delete(struct radeon_device *rdev) | 265 | void radeon_bo_force_delete(struct radeon_device *rdev) |
252 | { | 266 | { |
253 | struct radeon_bo *bo, *n; | 267 | struct radeon_bo *bo, *n; |
254 | struct drm_gem_object *gobj; | ||
255 | 268 | ||
256 | if (list_empty(&rdev->gem.objects)) { | 269 | if (list_empty(&rdev->gem.objects)) { |
257 | return; | 270 | return; |
@@ -259,16 +272,14 @@ void radeon_bo_force_delete(struct radeon_device *rdev) | |||
259 | dev_err(rdev->dev, "Userspace still has active objects !\n"); | 272 | dev_err(rdev->dev, "Userspace still has active objects !\n"); |
260 | list_for_each_entry_safe(bo, n, &rdev->gem.objects, list) { | 273 | list_for_each_entry_safe(bo, n, &rdev->gem.objects, list) { |
261 | mutex_lock(&rdev->ddev->struct_mutex); | 274 | mutex_lock(&rdev->ddev->struct_mutex); |
262 | gobj = bo->gobj; | ||
263 | dev_err(rdev->dev, "%p %p %lu %lu force free\n", | 275 | dev_err(rdev->dev, "%p %p %lu %lu force free\n", |
264 | gobj, bo, (unsigned long)gobj->size, | 276 | &bo->gem_base, bo, (unsigned long)bo->gem_base.size, |
265 | *((unsigned long *)&gobj->refcount)); | 277 | *((unsigned long *)&bo->gem_base.refcount)); |
266 | mutex_lock(&bo->rdev->gem.mutex); | 278 | mutex_lock(&bo->rdev->gem.mutex); |
267 | list_del_init(&bo->list); | 279 | list_del_init(&bo->list); |
268 | mutex_unlock(&bo->rdev->gem.mutex); | 280 | mutex_unlock(&bo->rdev->gem.mutex); |
269 | radeon_bo_unref(&bo); | 281 | radeon_bo_unref(&bo); |
270 | gobj->driver_private = NULL; | 282 | drm_gem_object_unreference(&bo->gem_base); |
271 | drm_gem_object_unreference(gobj); | ||
272 | mutex_unlock(&rdev->ddev->struct_mutex); | 283 | mutex_unlock(&rdev->ddev->struct_mutex); |
273 | } | 284 | } |
274 | } | 285 | } |
diff --git a/drivers/gpu/drm/radeon/radeon_object.h b/drivers/gpu/drm/radeon/radeon_object.h index 22d4c237dea5..7f8e778dba46 100644 --- a/drivers/gpu/drm/radeon/radeon_object.h +++ b/drivers/gpu/drm/radeon/radeon_object.h | |||
@@ -137,10 +137,9 @@ static inline int radeon_bo_wait(struct radeon_bo *bo, u32 *mem_type, | |||
137 | } | 137 | } |
138 | 138 | ||
139 | extern int radeon_bo_create(struct radeon_device *rdev, | 139 | extern int radeon_bo_create(struct radeon_device *rdev, |
140 | struct drm_gem_object *gobj, unsigned long size, | 140 | unsigned long size, int byte_align, |
141 | int byte_align, | 141 | bool kernel, u32 domain, |
142 | bool kernel, u32 domain, | 142 | struct radeon_bo **bo_ptr); |
143 | struct radeon_bo **bo_ptr); | ||
144 | extern int radeon_bo_kmap(struct radeon_bo *bo, void **ptr); | 143 | extern int radeon_bo_kmap(struct radeon_bo *bo, void **ptr); |
145 | extern void radeon_bo_kunmap(struct radeon_bo *bo); | 144 | extern void radeon_bo_kunmap(struct radeon_bo *bo); |
146 | extern void radeon_bo_unref(struct radeon_bo **bo); | 145 | extern void radeon_bo_unref(struct radeon_bo **bo); |
diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c index 4de7776bd1c5..2aed03bde4b2 100644 --- a/drivers/gpu/drm/radeon/radeon_pm.c +++ b/drivers/gpu/drm/radeon/radeon_pm.c | |||
@@ -167,13 +167,13 @@ static void radeon_set_power_state(struct radeon_device *rdev) | |||
167 | if (radeon_gui_idle(rdev)) { | 167 | if (radeon_gui_idle(rdev)) { |
168 | sclk = rdev->pm.power_state[rdev->pm.requested_power_state_index]. | 168 | sclk = rdev->pm.power_state[rdev->pm.requested_power_state_index]. |
169 | clock_info[rdev->pm.requested_clock_mode_index].sclk; | 169 | clock_info[rdev->pm.requested_clock_mode_index].sclk; |
170 | if (sclk > rdev->clock.default_sclk) | 170 | if (sclk > rdev->pm.default_sclk) |
171 | sclk = rdev->clock.default_sclk; | 171 | sclk = rdev->pm.default_sclk; |
172 | 172 | ||
173 | mclk = rdev->pm.power_state[rdev->pm.requested_power_state_index]. | 173 | mclk = rdev->pm.power_state[rdev->pm.requested_power_state_index]. |
174 | clock_info[rdev->pm.requested_clock_mode_index].mclk; | 174 | clock_info[rdev->pm.requested_clock_mode_index].mclk; |
175 | if (mclk > rdev->clock.default_mclk) | 175 | if (mclk > rdev->pm.default_mclk) |
176 | mclk = rdev->clock.default_mclk; | 176 | mclk = rdev->pm.default_mclk; |
177 | 177 | ||
178 | /* upvolt before raising clocks, downvolt after lowering clocks */ | 178 | /* upvolt before raising clocks, downvolt after lowering clocks */ |
179 | if (sclk < rdev->pm.current_sclk) | 179 | if (sclk < rdev->pm.current_sclk) |
@@ -405,20 +405,13 @@ static ssize_t radeon_set_pm_method(struct device *dev, | |||
405 | rdev->pm.dynpm_planned_action = DYNPM_ACTION_DEFAULT; | 405 | rdev->pm.dynpm_planned_action = DYNPM_ACTION_DEFAULT; |
406 | mutex_unlock(&rdev->pm.mutex); | 406 | mutex_unlock(&rdev->pm.mutex); |
407 | } else if (strncmp("profile", buf, strlen("profile")) == 0) { | 407 | } else if (strncmp("profile", buf, strlen("profile")) == 0) { |
408 | bool flush_wq = false; | ||
409 | |||
410 | mutex_lock(&rdev->pm.mutex); | 408 | mutex_lock(&rdev->pm.mutex); |
411 | if (rdev->pm.pm_method == PM_METHOD_DYNPM) { | ||
412 | cancel_delayed_work(&rdev->pm.dynpm_idle_work); | ||
413 | flush_wq = true; | ||
414 | } | ||
415 | /* disable dynpm */ | 409 | /* disable dynpm */ |
416 | rdev->pm.dynpm_state = DYNPM_STATE_DISABLED; | 410 | rdev->pm.dynpm_state = DYNPM_STATE_DISABLED; |
417 | rdev->pm.dynpm_planned_action = DYNPM_ACTION_NONE; | 411 | rdev->pm.dynpm_planned_action = DYNPM_ACTION_NONE; |
418 | rdev->pm.pm_method = PM_METHOD_PROFILE; | 412 | rdev->pm.pm_method = PM_METHOD_PROFILE; |
419 | mutex_unlock(&rdev->pm.mutex); | 413 | mutex_unlock(&rdev->pm.mutex); |
420 | if (flush_wq) | 414 | cancel_delayed_work_sync(&rdev->pm.dynpm_idle_work); |
421 | flush_workqueue(rdev->wq); | ||
422 | } else { | 415 | } else { |
423 | DRM_ERROR("invalid power method!\n"); | 416 | DRM_ERROR("invalid power method!\n"); |
424 | goto fail; | 417 | goto fail; |
@@ -437,7 +430,7 @@ static ssize_t radeon_hwmon_show_temp(struct device *dev, | |||
437 | { | 430 | { |
438 | struct drm_device *ddev = pci_get_drvdata(to_pci_dev(dev)); | 431 | struct drm_device *ddev = pci_get_drvdata(to_pci_dev(dev)); |
439 | struct radeon_device *rdev = ddev->dev_private; | 432 | struct radeon_device *rdev = ddev->dev_private; |
440 | u32 temp; | 433 | int temp; |
441 | 434 | ||
442 | switch (rdev->pm.int_thermal_type) { | 435 | switch (rdev->pm.int_thermal_type) { |
443 | case THERMAL_TYPE_RV6XX: | 436 | case THERMAL_TYPE_RV6XX: |
@@ -447,6 +440,7 @@ static ssize_t radeon_hwmon_show_temp(struct device *dev, | |||
447 | temp = rv770_get_temp(rdev); | 440 | temp = rv770_get_temp(rdev); |
448 | break; | 441 | break; |
449 | case THERMAL_TYPE_EVERGREEN: | 442 | case THERMAL_TYPE_EVERGREEN: |
443 | case THERMAL_TYPE_NI: | ||
450 | temp = evergreen_get_temp(rdev); | 444 | temp = evergreen_get_temp(rdev); |
451 | break; | 445 | break; |
452 | case THERMAL_TYPE_SUMO: | 446 | case THERMAL_TYPE_SUMO: |
@@ -524,34 +518,39 @@ static void radeon_hwmon_fini(struct radeon_device *rdev) | |||
524 | 518 | ||
525 | void radeon_pm_suspend(struct radeon_device *rdev) | 519 | void radeon_pm_suspend(struct radeon_device *rdev) |
526 | { | 520 | { |
527 | bool flush_wq = false; | ||
528 | |||
529 | mutex_lock(&rdev->pm.mutex); | 521 | mutex_lock(&rdev->pm.mutex); |
530 | if (rdev->pm.pm_method == PM_METHOD_DYNPM) { | 522 | if (rdev->pm.pm_method == PM_METHOD_DYNPM) { |
531 | cancel_delayed_work(&rdev->pm.dynpm_idle_work); | ||
532 | if (rdev->pm.dynpm_state == DYNPM_STATE_ACTIVE) | 523 | if (rdev->pm.dynpm_state == DYNPM_STATE_ACTIVE) |
533 | rdev->pm.dynpm_state = DYNPM_STATE_SUSPENDED; | 524 | rdev->pm.dynpm_state = DYNPM_STATE_SUSPENDED; |
534 | flush_wq = true; | ||
535 | } | 525 | } |
536 | mutex_unlock(&rdev->pm.mutex); | 526 | mutex_unlock(&rdev->pm.mutex); |
537 | if (flush_wq) | 527 | |
538 | flush_workqueue(rdev->wq); | 528 | cancel_delayed_work_sync(&rdev->pm.dynpm_idle_work); |
539 | } | 529 | } |
540 | 530 | ||
541 | void radeon_pm_resume(struct radeon_device *rdev) | 531 | void radeon_pm_resume(struct radeon_device *rdev) |
542 | { | 532 | { |
533 | /* set up the default clocks if the MC ucode is loaded */ | ||
534 | if (ASIC_IS_DCE5(rdev) && rdev->mc_fw) { | ||
535 | if (rdev->pm.default_vddc) | ||
536 | radeon_atom_set_voltage(rdev, rdev->pm.default_vddc); | ||
537 | if (rdev->pm.default_sclk) | ||
538 | radeon_set_engine_clock(rdev, rdev->pm.default_sclk); | ||
539 | if (rdev->pm.default_mclk) | ||
540 | radeon_set_memory_clock(rdev, rdev->pm.default_mclk); | ||
541 | } | ||
543 | /* asic init will reset the default power state */ | 542 | /* asic init will reset the default power state */ |
544 | mutex_lock(&rdev->pm.mutex); | 543 | mutex_lock(&rdev->pm.mutex); |
545 | rdev->pm.current_power_state_index = rdev->pm.default_power_state_index; | 544 | rdev->pm.current_power_state_index = rdev->pm.default_power_state_index; |
546 | rdev->pm.current_clock_mode_index = 0; | 545 | rdev->pm.current_clock_mode_index = 0; |
547 | rdev->pm.current_sclk = rdev->clock.default_sclk; | 546 | rdev->pm.current_sclk = rdev->pm.default_sclk; |
548 | rdev->pm.current_mclk = rdev->clock.default_mclk; | 547 | rdev->pm.current_mclk = rdev->pm.default_mclk; |
549 | rdev->pm.current_vddc = rdev->pm.power_state[rdev->pm.default_power_state_index].clock_info[0].voltage.voltage; | 548 | rdev->pm.current_vddc = rdev->pm.power_state[rdev->pm.default_power_state_index].clock_info[0].voltage.voltage; |
550 | if (rdev->pm.pm_method == PM_METHOD_DYNPM | 549 | if (rdev->pm.pm_method == PM_METHOD_DYNPM |
551 | && rdev->pm.dynpm_state == DYNPM_STATE_SUSPENDED) { | 550 | && rdev->pm.dynpm_state == DYNPM_STATE_SUSPENDED) { |
552 | rdev->pm.dynpm_state = DYNPM_STATE_ACTIVE; | 551 | rdev->pm.dynpm_state = DYNPM_STATE_ACTIVE; |
553 | queue_delayed_work(rdev->wq, &rdev->pm.dynpm_idle_work, | 552 | schedule_delayed_work(&rdev->pm.dynpm_idle_work, |
554 | msecs_to_jiffies(RADEON_IDLE_LOOP_MS)); | 553 | msecs_to_jiffies(RADEON_IDLE_LOOP_MS)); |
555 | } | 554 | } |
556 | mutex_unlock(&rdev->pm.mutex); | 555 | mutex_unlock(&rdev->pm.mutex); |
557 | radeon_pm_compute_clocks(rdev); | 556 | radeon_pm_compute_clocks(rdev); |
@@ -568,6 +567,8 @@ int radeon_pm_init(struct radeon_device *rdev) | |||
568 | rdev->pm.dynpm_planned_action = DYNPM_ACTION_NONE; | 567 | rdev->pm.dynpm_planned_action = DYNPM_ACTION_NONE; |
569 | rdev->pm.dynpm_can_upclock = true; | 568 | rdev->pm.dynpm_can_upclock = true; |
570 | rdev->pm.dynpm_can_downclock = true; | 569 | rdev->pm.dynpm_can_downclock = true; |
570 | rdev->pm.default_sclk = rdev->clock.default_sclk; | ||
571 | rdev->pm.default_mclk = rdev->clock.default_mclk; | ||
571 | rdev->pm.current_sclk = rdev->clock.default_sclk; | 572 | rdev->pm.current_sclk = rdev->clock.default_sclk; |
572 | rdev->pm.current_mclk = rdev->clock.default_mclk; | 573 | rdev->pm.current_mclk = rdev->clock.default_mclk; |
573 | rdev->pm.int_thermal_type = THERMAL_TYPE_NONE; | 574 | rdev->pm.int_thermal_type = THERMAL_TYPE_NONE; |
@@ -579,12 +580,24 @@ int radeon_pm_init(struct radeon_device *rdev) | |||
579 | radeon_combios_get_power_modes(rdev); | 580 | radeon_combios_get_power_modes(rdev); |
580 | radeon_pm_print_states(rdev); | 581 | radeon_pm_print_states(rdev); |
581 | radeon_pm_init_profile(rdev); | 582 | radeon_pm_init_profile(rdev); |
583 | /* set up the default clocks if the MC ucode is loaded */ | ||
584 | if (ASIC_IS_DCE5(rdev) && rdev->mc_fw) { | ||
585 | if (rdev->pm.default_vddc) | ||
586 | radeon_atom_set_voltage(rdev, rdev->pm.default_vddc); | ||
587 | if (rdev->pm.default_sclk) | ||
588 | radeon_set_engine_clock(rdev, rdev->pm.default_sclk); | ||
589 | if (rdev->pm.default_mclk) | ||
590 | radeon_set_memory_clock(rdev, rdev->pm.default_mclk); | ||
591 | } | ||
582 | } | 592 | } |
583 | 593 | ||
584 | /* set up the internal thermal sensor if applicable */ | 594 | /* set up the internal thermal sensor if applicable */ |
585 | ret = radeon_hwmon_init(rdev); | 595 | ret = radeon_hwmon_init(rdev); |
586 | if (ret) | 596 | if (ret) |
587 | return ret; | 597 | return ret; |
598 | |||
599 | INIT_DELAYED_WORK(&rdev->pm.dynpm_idle_work, radeon_dynpm_idle_work_handler); | ||
600 | |||
588 | if (rdev->pm.num_power_states > 1) { | 601 | if (rdev->pm.num_power_states > 1) { |
589 | /* where's the best place to put these? */ | 602 | /* where's the best place to put these? */ |
590 | ret = device_create_file(rdev->dev, &dev_attr_power_profile); | 603 | ret = device_create_file(rdev->dev, &dev_attr_power_profile); |
@@ -598,8 +611,6 @@ int radeon_pm_init(struct radeon_device *rdev) | |||
598 | rdev->acpi_nb.notifier_call = radeon_acpi_event; | 611 | rdev->acpi_nb.notifier_call = radeon_acpi_event; |
599 | register_acpi_notifier(&rdev->acpi_nb); | 612 | register_acpi_notifier(&rdev->acpi_nb); |
600 | #endif | 613 | #endif |
601 | INIT_DELAYED_WORK(&rdev->pm.dynpm_idle_work, radeon_dynpm_idle_work_handler); | ||
602 | |||
603 | if (radeon_debugfs_pm_init(rdev)) { | 614 | if (radeon_debugfs_pm_init(rdev)) { |
604 | DRM_ERROR("Failed to register debugfs file for PM!\n"); | 615 | DRM_ERROR("Failed to register debugfs file for PM!\n"); |
605 | } | 616 | } |
@@ -613,25 +624,20 @@ int radeon_pm_init(struct radeon_device *rdev) | |||
613 | void radeon_pm_fini(struct radeon_device *rdev) | 624 | void radeon_pm_fini(struct radeon_device *rdev) |
614 | { | 625 | { |
615 | if (rdev->pm.num_power_states > 1) { | 626 | if (rdev->pm.num_power_states > 1) { |
616 | bool flush_wq = false; | ||
617 | |||
618 | mutex_lock(&rdev->pm.mutex); | 627 | mutex_lock(&rdev->pm.mutex); |
619 | if (rdev->pm.pm_method == PM_METHOD_PROFILE) { | 628 | if (rdev->pm.pm_method == PM_METHOD_PROFILE) { |
620 | rdev->pm.profile = PM_PROFILE_DEFAULT; | 629 | rdev->pm.profile = PM_PROFILE_DEFAULT; |
621 | radeon_pm_update_profile(rdev); | 630 | radeon_pm_update_profile(rdev); |
622 | radeon_pm_set_clocks(rdev); | 631 | radeon_pm_set_clocks(rdev); |
623 | } else if (rdev->pm.pm_method == PM_METHOD_DYNPM) { | 632 | } else if (rdev->pm.pm_method == PM_METHOD_DYNPM) { |
624 | /* cancel work */ | ||
625 | cancel_delayed_work(&rdev->pm.dynpm_idle_work); | ||
626 | flush_wq = true; | ||
627 | /* reset default clocks */ | 633 | /* reset default clocks */ |
628 | rdev->pm.dynpm_state = DYNPM_STATE_DISABLED; | 634 | rdev->pm.dynpm_state = DYNPM_STATE_DISABLED; |
629 | rdev->pm.dynpm_planned_action = DYNPM_ACTION_DEFAULT; | 635 | rdev->pm.dynpm_planned_action = DYNPM_ACTION_DEFAULT; |
630 | radeon_pm_set_clocks(rdev); | 636 | radeon_pm_set_clocks(rdev); |
631 | } | 637 | } |
632 | mutex_unlock(&rdev->pm.mutex); | 638 | mutex_unlock(&rdev->pm.mutex); |
633 | if (flush_wq) | 639 | |
634 | flush_workqueue(rdev->wq); | 640 | cancel_delayed_work_sync(&rdev->pm.dynpm_idle_work); |
635 | 641 | ||
636 | device_remove_file(rdev->dev, &dev_attr_power_profile); | 642 | device_remove_file(rdev->dev, &dev_attr_power_profile); |
637 | device_remove_file(rdev->dev, &dev_attr_power_method); | 643 | device_remove_file(rdev->dev, &dev_attr_power_method); |
@@ -640,6 +646,9 @@ void radeon_pm_fini(struct radeon_device *rdev) | |||
640 | #endif | 646 | #endif |
641 | } | 647 | } |
642 | 648 | ||
649 | if (rdev->pm.power_state) | ||
650 | kfree(rdev->pm.power_state); | ||
651 | |||
643 | radeon_hwmon_fini(rdev); | 652 | radeon_hwmon_fini(rdev); |
644 | } | 653 | } |
645 | 654 | ||
@@ -690,12 +699,12 @@ void radeon_pm_compute_clocks(struct radeon_device *rdev) | |||
690 | radeon_pm_get_dynpm_state(rdev); | 699 | radeon_pm_get_dynpm_state(rdev); |
691 | radeon_pm_set_clocks(rdev); | 700 | radeon_pm_set_clocks(rdev); |
692 | 701 | ||
693 | queue_delayed_work(rdev->wq, &rdev->pm.dynpm_idle_work, | 702 | schedule_delayed_work(&rdev->pm.dynpm_idle_work, |
694 | msecs_to_jiffies(RADEON_IDLE_LOOP_MS)); | 703 | msecs_to_jiffies(RADEON_IDLE_LOOP_MS)); |
695 | } else if (rdev->pm.dynpm_state == DYNPM_STATE_PAUSED) { | 704 | } else if (rdev->pm.dynpm_state == DYNPM_STATE_PAUSED) { |
696 | rdev->pm.dynpm_state = DYNPM_STATE_ACTIVE; | 705 | rdev->pm.dynpm_state = DYNPM_STATE_ACTIVE; |
697 | queue_delayed_work(rdev->wq, &rdev->pm.dynpm_idle_work, | 706 | schedule_delayed_work(&rdev->pm.dynpm_idle_work, |
698 | msecs_to_jiffies(RADEON_IDLE_LOOP_MS)); | 707 | msecs_to_jiffies(RADEON_IDLE_LOOP_MS)); |
699 | DRM_DEBUG_DRIVER("radeon: dynamic power management activated\n"); | 708 | DRM_DEBUG_DRIVER("radeon: dynamic power management activated\n"); |
700 | } | 709 | } |
701 | } else { /* count == 0 */ | 710 | } else { /* count == 0 */ |
@@ -800,8 +809,8 @@ static void radeon_dynpm_idle_work_handler(struct work_struct *work) | |||
800 | radeon_pm_set_clocks(rdev); | 809 | radeon_pm_set_clocks(rdev); |
801 | } | 810 | } |
802 | 811 | ||
803 | queue_delayed_work(rdev->wq, &rdev->pm.dynpm_idle_work, | 812 | schedule_delayed_work(&rdev->pm.dynpm_idle_work, |
804 | msecs_to_jiffies(RADEON_IDLE_LOOP_MS)); | 813 | msecs_to_jiffies(RADEON_IDLE_LOOP_MS)); |
805 | } | 814 | } |
806 | mutex_unlock(&rdev->pm.mutex); | 815 | mutex_unlock(&rdev->pm.mutex); |
807 | ttm_bo_unlock_delayed_workqueue(&rdev->mman.bdev, resched); | 816 | ttm_bo_unlock_delayed_workqueue(&rdev->mman.bdev, resched); |
@@ -818,9 +827,9 @@ static int radeon_debugfs_pm_info(struct seq_file *m, void *data) | |||
818 | struct drm_device *dev = node->minor->dev; | 827 | struct drm_device *dev = node->minor->dev; |
819 | struct radeon_device *rdev = dev->dev_private; | 828 | struct radeon_device *rdev = dev->dev_private; |
820 | 829 | ||
821 | seq_printf(m, "default engine clock: %u0 kHz\n", rdev->clock.default_sclk); | 830 | seq_printf(m, "default engine clock: %u0 kHz\n", rdev->pm.default_sclk); |
822 | seq_printf(m, "current engine clock: %u0 kHz\n", radeon_get_engine_clock(rdev)); | 831 | seq_printf(m, "current engine clock: %u0 kHz\n", radeon_get_engine_clock(rdev)); |
823 | seq_printf(m, "default memory clock: %u0 kHz\n", rdev->clock.default_mclk); | 832 | seq_printf(m, "default memory clock: %u0 kHz\n", rdev->pm.default_mclk); |
824 | if (rdev->asic->get_memory_clock) | 833 | if (rdev->asic->get_memory_clock) |
825 | seq_printf(m, "current memory clock: %u0 kHz\n", radeon_get_memory_clock(rdev)); | 834 | seq_printf(m, "current memory clock: %u0 kHz\n", radeon_get_memory_clock(rdev)); |
826 | if (rdev->pm.current_vddc) | 835 | if (rdev->pm.current_vddc) |
diff --git a/drivers/gpu/drm/radeon/radeon_reg.h b/drivers/gpu/drm/radeon/radeon_reg.h index 0a310b7f71c3..ec93a75369e6 100644 --- a/drivers/gpu/drm/radeon/radeon_reg.h +++ b/drivers/gpu/drm/radeon/radeon_reg.h | |||
@@ -55,6 +55,7 @@ | |||
55 | #include "r500_reg.h" | 55 | #include "r500_reg.h" |
56 | #include "r600_reg.h" | 56 | #include "r600_reg.h" |
57 | #include "evergreen_reg.h" | 57 | #include "evergreen_reg.h" |
58 | #include "ni_reg.h" | ||
58 | 59 | ||
59 | #define RADEON_MC_AGP_LOCATION 0x014c | 60 | #define RADEON_MC_AGP_LOCATION 0x014c |
60 | #define RADEON_MC_AGP_START_MASK 0x0000FFFF | 61 | #define RADEON_MC_AGP_START_MASK 0x0000FFFF |
@@ -320,6 +321,15 @@ | |||
320 | # define RADEON_PCIE_LC_RECONFIG_NOW (1 << 8) | 321 | # define RADEON_PCIE_LC_RECONFIG_NOW (1 << 8) |
321 | # define RADEON_PCIE_LC_RECONFIG_LATER (1 << 9) | 322 | # define RADEON_PCIE_LC_RECONFIG_LATER (1 << 9) |
322 | # define RADEON_PCIE_LC_SHORT_RECONFIG_EN (1 << 10) | 323 | # define RADEON_PCIE_LC_SHORT_RECONFIG_EN (1 << 10) |
324 | # define R600_PCIE_LC_RECONFIG_ARC_MISSING_ESCAPE (1 << 7) | ||
325 | # define R600_PCIE_LC_RENEGOTIATION_SUPPORT (1 << 9) | ||
326 | # define R600_PCIE_LC_RENEGOTIATE_EN (1 << 10) | ||
327 | # define R600_PCIE_LC_SHORT_RECONFIG_EN (1 << 11) | ||
328 | # define R600_PCIE_LC_UPCONFIGURE_SUPPORT (1 << 12) | ||
329 | # define R600_PCIE_LC_UPCONFIGURE_DIS (1 << 13) | ||
330 | |||
331 | #define R600_TARGET_AND_CURRENT_PROFILE_INDEX 0x70c | ||
332 | #define R700_TARGET_AND_CURRENT_PROFILE_INDEX 0x66c | ||
323 | 333 | ||
324 | #define RADEON_CACHE_CNTL 0x1724 | 334 | #define RADEON_CACHE_CNTL 0x1724 |
325 | #define RADEON_CACHE_LINE 0x0f0c /* PCI */ | 335 | #define RADEON_CACHE_LINE 0x0f0c /* PCI */ |
@@ -365,6 +375,8 @@ | |||
365 | #define RADEON_CONFIG_APER_SIZE 0x0108 | 375 | #define RADEON_CONFIG_APER_SIZE 0x0108 |
366 | #define RADEON_CONFIG_BONDS 0x00e8 | 376 | #define RADEON_CONFIG_BONDS 0x00e8 |
367 | #define RADEON_CONFIG_CNTL 0x00e0 | 377 | #define RADEON_CONFIG_CNTL 0x00e0 |
378 | # define RADEON_CFG_VGA_RAM_EN (1 << 8) | ||
379 | # define RADEON_CFG_VGA_IO_DIS (1 << 9) | ||
368 | # define RADEON_CFG_ATI_REV_A11 (0 << 16) | 380 | # define RADEON_CFG_ATI_REV_A11 (0 << 16) |
369 | # define RADEON_CFG_ATI_REV_A12 (1 << 16) | 381 | # define RADEON_CFG_ATI_REV_A12 (1 << 16) |
370 | # define RADEON_CFG_ATI_REV_A13 (2 << 16) | 382 | # define RADEON_CFG_ATI_REV_A13 (2 << 16) |
diff --git a/drivers/gpu/drm/radeon/radeon_ring.c b/drivers/gpu/drm/radeon/radeon_ring.c index 06e79822a2bf..992d99d13fc5 100644 --- a/drivers/gpu/drm/radeon/radeon_ring.c +++ b/drivers/gpu/drm/radeon/radeon_ring.c | |||
@@ -175,7 +175,7 @@ int radeon_ib_pool_init(struct radeon_device *rdev) | |||
175 | return 0; | 175 | return 0; |
176 | INIT_LIST_HEAD(&rdev->ib_pool.bogus_ib); | 176 | INIT_LIST_HEAD(&rdev->ib_pool.bogus_ib); |
177 | /* Allocate 1M object buffer */ | 177 | /* Allocate 1M object buffer */ |
178 | r = radeon_bo_create(rdev, NULL, RADEON_IB_POOL_SIZE*64*1024, | 178 | r = radeon_bo_create(rdev, RADEON_IB_POOL_SIZE*64*1024, |
179 | PAGE_SIZE, true, RADEON_GEM_DOMAIN_GTT, | 179 | PAGE_SIZE, true, RADEON_GEM_DOMAIN_GTT, |
180 | &rdev->ib_pool.robj); | 180 | &rdev->ib_pool.robj); |
181 | if (r) { | 181 | if (r) { |
@@ -332,7 +332,7 @@ int radeon_ring_init(struct radeon_device *rdev, unsigned ring_size) | |||
332 | rdev->cp.ring_size = ring_size; | 332 | rdev->cp.ring_size = ring_size; |
333 | /* Allocate ring buffer */ | 333 | /* Allocate ring buffer */ |
334 | if (rdev->cp.ring_obj == NULL) { | 334 | if (rdev->cp.ring_obj == NULL) { |
335 | r = radeon_bo_create(rdev, NULL, rdev->cp.ring_size, PAGE_SIZE, true, | 335 | r = radeon_bo_create(rdev, rdev->cp.ring_size, PAGE_SIZE, true, |
336 | RADEON_GEM_DOMAIN_GTT, | 336 | RADEON_GEM_DOMAIN_GTT, |
337 | &rdev->cp.ring_obj); | 337 | &rdev->cp.ring_obj); |
338 | if (r) { | 338 | if (r) { |
diff --git a/drivers/gpu/drm/radeon/radeon_test.c b/drivers/gpu/drm/radeon/radeon_test.c index 5b44f652145c..dee4a0c1b4b2 100644 --- a/drivers/gpu/drm/radeon/radeon_test.c +++ b/drivers/gpu/drm/radeon/radeon_test.c | |||
@@ -52,7 +52,7 @@ void radeon_test_moves(struct radeon_device *rdev) | |||
52 | goto out_cleanup; | 52 | goto out_cleanup; |
53 | } | 53 | } |
54 | 54 | ||
55 | r = radeon_bo_create(rdev, NULL, size, PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM, | 55 | r = radeon_bo_create(rdev, size, PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM, |
56 | &vram_obj); | 56 | &vram_obj); |
57 | if (r) { | 57 | if (r) { |
58 | DRM_ERROR("Failed to create VRAM object\n"); | 58 | DRM_ERROR("Failed to create VRAM object\n"); |
@@ -71,7 +71,7 @@ void radeon_test_moves(struct radeon_device *rdev) | |||
71 | void **gtt_start, **gtt_end; | 71 | void **gtt_start, **gtt_end; |
72 | void **vram_start, **vram_end; | 72 | void **vram_start, **vram_end; |
73 | 73 | ||
74 | r = radeon_bo_create(rdev, NULL, size, PAGE_SIZE, true, | 74 | r = radeon_bo_create(rdev, size, PAGE_SIZE, true, |
75 | RADEON_GEM_DOMAIN_GTT, gtt_obj + i); | 75 | RADEON_GEM_DOMAIN_GTT, gtt_obj + i); |
76 | if (r) { | 76 | if (r) { |
77 | DRM_ERROR("Failed to create GTT object %d\n", i); | 77 | DRM_ERROR("Failed to create GTT object %d\n", i); |
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c index 1272e4b6a1d4..df5734d0c4af 100644 --- a/drivers/gpu/drm/radeon/radeon_ttm.c +++ b/drivers/gpu/drm/radeon/radeon_ttm.c | |||
@@ -529,7 +529,7 @@ int radeon_ttm_init(struct radeon_device *rdev) | |||
529 | DRM_ERROR("Failed initializing VRAM heap.\n"); | 529 | DRM_ERROR("Failed initializing VRAM heap.\n"); |
530 | return r; | 530 | return r; |
531 | } | 531 | } |
532 | r = radeon_bo_create(rdev, NULL, 256 * 1024, PAGE_SIZE, true, | 532 | r = radeon_bo_create(rdev, 256 * 1024, PAGE_SIZE, true, |
533 | RADEON_GEM_DOMAIN_VRAM, | 533 | RADEON_GEM_DOMAIN_VRAM, |
534 | &rdev->stollen_vga_memory); | 534 | &rdev->stollen_vga_memory); |
535 | if (r) { | 535 | if (r) { |
@@ -647,6 +647,7 @@ struct radeon_ttm_backend { | |||
647 | unsigned long num_pages; | 647 | unsigned long num_pages; |
648 | struct page **pages; | 648 | struct page **pages; |
649 | struct page *dummy_read_page; | 649 | struct page *dummy_read_page; |
650 | dma_addr_t *dma_addrs; | ||
650 | bool populated; | 651 | bool populated; |
651 | bool bound; | 652 | bool bound; |
652 | unsigned offset; | 653 | unsigned offset; |
@@ -655,12 +656,14 @@ struct radeon_ttm_backend { | |||
655 | static int radeon_ttm_backend_populate(struct ttm_backend *backend, | 656 | static int radeon_ttm_backend_populate(struct ttm_backend *backend, |
656 | unsigned long num_pages, | 657 | unsigned long num_pages, |
657 | struct page **pages, | 658 | struct page **pages, |
658 | struct page *dummy_read_page) | 659 | struct page *dummy_read_page, |
660 | dma_addr_t *dma_addrs) | ||
659 | { | 661 | { |
660 | struct radeon_ttm_backend *gtt; | 662 | struct radeon_ttm_backend *gtt; |
661 | 663 | ||
662 | gtt = container_of(backend, struct radeon_ttm_backend, backend); | 664 | gtt = container_of(backend, struct radeon_ttm_backend, backend); |
663 | gtt->pages = pages; | 665 | gtt->pages = pages; |
666 | gtt->dma_addrs = dma_addrs; | ||
664 | gtt->num_pages = num_pages; | 667 | gtt->num_pages = num_pages; |
665 | gtt->dummy_read_page = dummy_read_page; | 668 | gtt->dummy_read_page = dummy_read_page; |
666 | gtt->populated = true; | 669 | gtt->populated = true; |
@@ -673,6 +676,7 @@ static void radeon_ttm_backend_clear(struct ttm_backend *backend) | |||
673 | 676 | ||
674 | gtt = container_of(backend, struct radeon_ttm_backend, backend); | 677 | gtt = container_of(backend, struct radeon_ttm_backend, backend); |
675 | gtt->pages = NULL; | 678 | gtt->pages = NULL; |
679 | gtt->dma_addrs = NULL; | ||
676 | gtt->num_pages = 0; | 680 | gtt->num_pages = 0; |
677 | gtt->dummy_read_page = NULL; | 681 | gtt->dummy_read_page = NULL; |
678 | gtt->populated = false; | 682 | gtt->populated = false; |
@@ -693,7 +697,7 @@ static int radeon_ttm_backend_bind(struct ttm_backend *backend, | |||
693 | gtt->num_pages, bo_mem, backend); | 697 | gtt->num_pages, bo_mem, backend); |
694 | } | 698 | } |
695 | r = radeon_gart_bind(gtt->rdev, gtt->offset, | 699 | r = radeon_gart_bind(gtt->rdev, gtt->offset, |
696 | gtt->num_pages, gtt->pages); | 700 | gtt->num_pages, gtt->pages, gtt->dma_addrs); |
697 | if (r) { | 701 | if (r) { |
698 | DRM_ERROR("failed to bind %lu pages at 0x%08X\n", | 702 | DRM_ERROR("failed to bind %lu pages at 0x%08X\n", |
699 | gtt->num_pages, gtt->offset); | 703 | gtt->num_pages, gtt->offset); |
diff --git a/drivers/gpu/drm/radeon/reg_srcs/evergreen b/drivers/gpu/drm/radeon/reg_srcs/evergreen index ac40fd39d787..9177f9191837 100644 --- a/drivers/gpu/drm/radeon/reg_srcs/evergreen +++ b/drivers/gpu/drm/radeon/reg_srcs/evergreen | |||
@@ -439,7 +439,7 @@ evergreen 0x9400 | |||
439 | 0x000286EC SPI_COMPUTE_NUM_THREAD_X | 439 | 0x000286EC SPI_COMPUTE_NUM_THREAD_X |
440 | 0x000286F0 SPI_COMPUTE_NUM_THREAD_Y | 440 | 0x000286F0 SPI_COMPUTE_NUM_THREAD_Y |
441 | 0x000286F4 SPI_COMPUTE_NUM_THREAD_Z | 441 | 0x000286F4 SPI_COMPUTE_NUM_THREAD_Z |
442 | 0x000286F8 GDS_ADDR_SIZE | 442 | 0x00028724 GDS_ADDR_SIZE |
443 | 0x00028780 CB_BLEND0_CONTROL | 443 | 0x00028780 CB_BLEND0_CONTROL |
444 | 0x00028784 CB_BLEND1_CONTROL | 444 | 0x00028784 CB_BLEND1_CONTROL |
445 | 0x00028788 CB_BLEND2_CONTROL | 445 | 0x00028788 CB_BLEND2_CONTROL |
diff --git a/drivers/gpu/drm/radeon/reg_srcs/rv515 b/drivers/gpu/drm/radeon/reg_srcs/rv515 index b3f9f1d92005..ef422bbacfc1 100644 --- a/drivers/gpu/drm/radeon/reg_srcs/rv515 +++ b/drivers/gpu/drm/radeon/reg_srcs/rv515 | |||
@@ -304,6 +304,22 @@ rv515 0x6d40 | |||
304 | 0x4630 US_CODE_ADDR | 304 | 0x4630 US_CODE_ADDR |
305 | 0x4634 US_CODE_RANGE | 305 | 0x4634 US_CODE_RANGE |
306 | 0x4638 US_CODE_OFFSET | 306 | 0x4638 US_CODE_OFFSET |
307 | 0x4640 US_FORMAT0_0 | ||
308 | 0x4644 US_FORMAT0_1 | ||
309 | 0x4648 US_FORMAT0_2 | ||
310 | 0x464C US_FORMAT0_3 | ||
311 | 0x4650 US_FORMAT0_4 | ||
312 | 0x4654 US_FORMAT0_5 | ||
313 | 0x4658 US_FORMAT0_6 | ||
314 | 0x465C US_FORMAT0_7 | ||
315 | 0x4660 US_FORMAT0_8 | ||
316 | 0x4664 US_FORMAT0_9 | ||
317 | 0x4668 US_FORMAT0_10 | ||
318 | 0x466C US_FORMAT0_11 | ||
319 | 0x4670 US_FORMAT0_12 | ||
320 | 0x4674 US_FORMAT0_13 | ||
321 | 0x4678 US_FORMAT0_14 | ||
322 | 0x467C US_FORMAT0_15 | ||
307 | 0x46A4 US_OUT_FMT_0 | 323 | 0x46A4 US_OUT_FMT_0 |
308 | 0x46A8 US_OUT_FMT_1 | 324 | 0x46A8 US_OUT_FMT_1 |
309 | 0x46AC US_OUT_FMT_2 | 325 | 0x46AC US_OUT_FMT_2 |
diff --git a/drivers/gpu/drm/radeon/rs400.c b/drivers/gpu/drm/radeon/rs400.c index 5512e4e5e636..c76283d9eb3d 100644 --- a/drivers/gpu/drm/radeon/rs400.c +++ b/drivers/gpu/drm/radeon/rs400.c | |||
@@ -203,6 +203,9 @@ void rs400_gart_fini(struct radeon_device *rdev) | |||
203 | radeon_gart_table_ram_free(rdev); | 203 | radeon_gart_table_ram_free(rdev); |
204 | } | 204 | } |
205 | 205 | ||
206 | #define RS400_PTE_WRITEABLE (1 << 2) | ||
207 | #define RS400_PTE_READABLE (1 << 3) | ||
208 | |||
206 | int rs400_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr) | 209 | int rs400_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr) |
207 | { | 210 | { |
208 | uint32_t entry; | 211 | uint32_t entry; |
@@ -213,7 +216,7 @@ int rs400_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr) | |||
213 | 216 | ||
214 | entry = (lower_32_bits(addr) & PAGE_MASK) | | 217 | entry = (lower_32_bits(addr) & PAGE_MASK) | |
215 | ((upper_32_bits(addr) & 0xff) << 4) | | 218 | ((upper_32_bits(addr) & 0xff) << 4) | |
216 | 0xc; | 219 | RS400_PTE_WRITEABLE | RS400_PTE_READABLE; |
217 | entry = cpu_to_le32(entry); | 220 | entry = cpu_to_le32(entry); |
218 | rdev->gart.table.ram.ptr[i] = entry; | 221 | rdev->gart.table.ram.ptr[i] = entry; |
219 | return 0; | 222 | return 0; |
@@ -226,8 +229,8 @@ int rs400_mc_wait_for_idle(struct radeon_device *rdev) | |||
226 | 229 | ||
227 | for (i = 0; i < rdev->usec_timeout; i++) { | 230 | for (i = 0; i < rdev->usec_timeout; i++) { |
228 | /* read MC_STATUS */ | 231 | /* read MC_STATUS */ |
229 | tmp = RREG32(0x0150); | 232 | tmp = RREG32(RADEON_MC_STATUS); |
230 | if (tmp & (1 << 2)) { | 233 | if (tmp & RADEON_MC_IDLE) { |
231 | return 0; | 234 | return 0; |
232 | } | 235 | } |
233 | DRM_UDELAY(1); | 236 | DRM_UDELAY(1); |
@@ -241,7 +244,7 @@ void rs400_gpu_init(struct radeon_device *rdev) | |||
241 | r420_pipes_init(rdev); | 244 | r420_pipes_init(rdev); |
242 | if (rs400_mc_wait_for_idle(rdev)) { | 245 | if (rs400_mc_wait_for_idle(rdev)) { |
243 | printk(KERN_WARNING "rs400: Failed to wait MC idle while " | 246 | printk(KERN_WARNING "rs400: Failed to wait MC idle while " |
244 | "programming pipes. Bad things might happen. %08x\n", RREG32(0x150)); | 247 | "programming pipes. Bad things might happen. %08x\n", RREG32(RADEON_MC_STATUS)); |
245 | } | 248 | } |
246 | } | 249 | } |
247 | 250 | ||
@@ -300,9 +303,9 @@ static int rs400_debugfs_gart_info(struct seq_file *m, void *data) | |||
300 | seq_printf(m, "MCCFG_AGP_BASE_2 0x%08x\n", tmp); | 303 | seq_printf(m, "MCCFG_AGP_BASE_2 0x%08x\n", tmp); |
301 | tmp = RREG32_MC(RS690_MCCFG_AGP_LOCATION); | 304 | tmp = RREG32_MC(RS690_MCCFG_AGP_LOCATION); |
302 | seq_printf(m, "MCCFG_AGP_LOCATION 0x%08x\n", tmp); | 305 | seq_printf(m, "MCCFG_AGP_LOCATION 0x%08x\n", tmp); |
303 | tmp = RREG32_MC(0x100); | 306 | tmp = RREG32_MC(RS690_MCCFG_FB_LOCATION); |
304 | seq_printf(m, "MCCFG_FB_LOCATION 0x%08x\n", tmp); | 307 | seq_printf(m, "MCCFG_FB_LOCATION 0x%08x\n", tmp); |
305 | tmp = RREG32(0x134); | 308 | tmp = RREG32(RS690_HDP_FB_LOCATION); |
306 | seq_printf(m, "HDP_FB_LOCATION 0x%08x\n", tmp); | 309 | seq_printf(m, "HDP_FB_LOCATION 0x%08x\n", tmp); |
307 | } else { | 310 | } else { |
308 | tmp = RREG32(RADEON_AGP_BASE); | 311 | tmp = RREG32(RADEON_AGP_BASE); |
diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c index 9a85b1614c86..5afe294ed51f 100644 --- a/drivers/gpu/drm/radeon/rs600.c +++ b/drivers/gpu/drm/radeon/rs600.c | |||
@@ -339,16 +339,16 @@ void rs600_bm_disable(struct radeon_device *rdev) | |||
339 | 339 | ||
340 | int rs600_asic_reset(struct radeon_device *rdev) | 340 | int rs600_asic_reset(struct radeon_device *rdev) |
341 | { | 341 | { |
342 | u32 status, tmp; | ||
343 | |||
344 | struct rv515_mc_save save; | 342 | struct rv515_mc_save save; |
343 | u32 status, tmp; | ||
344 | int ret = 0; | ||
345 | 345 | ||
346 | /* Stops all mc clients */ | ||
347 | rv515_mc_stop(rdev, &save); | ||
348 | status = RREG32(R_000E40_RBBM_STATUS); | 346 | status = RREG32(R_000E40_RBBM_STATUS); |
349 | if (!G_000E40_GUI_ACTIVE(status)) { | 347 | if (!G_000E40_GUI_ACTIVE(status)) { |
350 | return 0; | 348 | return 0; |
351 | } | 349 | } |
350 | /* Stops all mc clients */ | ||
351 | rv515_mc_stop(rdev, &save); | ||
352 | status = RREG32(R_000E40_RBBM_STATUS); | 352 | status = RREG32(R_000E40_RBBM_STATUS); |
353 | dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status); | 353 | dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status); |
354 | /* stop CP */ | 354 | /* stop CP */ |
@@ -392,11 +392,11 @@ int rs600_asic_reset(struct radeon_device *rdev) | |||
392 | if (G_000E40_GA_BUSY(status) || G_000E40_VAP_BUSY(status)) { | 392 | if (G_000E40_GA_BUSY(status) || G_000E40_VAP_BUSY(status)) { |
393 | dev_err(rdev->dev, "failed to reset GPU\n"); | 393 | dev_err(rdev->dev, "failed to reset GPU\n"); |
394 | rdev->gpu_lockup = true; | 394 | rdev->gpu_lockup = true; |
395 | return -1; | 395 | ret = -1; |
396 | } | 396 | } else |
397 | dev_info(rdev->dev, "GPU reset succeed\n"); | ||
397 | rv515_mc_resume(rdev, &save); | 398 | rv515_mc_resume(rdev, &save); |
398 | dev_info(rdev->dev, "GPU reset succeed\n"); | 399 | return ret; |
399 | return 0; | ||
400 | } | 400 | } |
401 | 401 | ||
402 | /* | 402 | /* |
@@ -692,7 +692,7 @@ int rs600_irq_process(struct radeon_device *rdev) | |||
692 | /* reset gui idle ack. the status bit is broken */ | 692 | /* reset gui idle ack. the status bit is broken */ |
693 | rdev->irq.gui_idle_acked = false; | 693 | rdev->irq.gui_idle_acked = false; |
694 | if (queue_hotplug) | 694 | if (queue_hotplug) |
695 | queue_work(rdev->wq, &rdev->hotplug_work); | 695 | schedule_work(&rdev->hotplug_work); |
696 | if (rdev->msi_enabled) { | 696 | if (rdev->msi_enabled) { |
697 | switch (rdev->family) { | 697 | switch (rdev->family) { |
698 | case CHIP_RS600: | 698 | case CHIP_RS600: |
diff --git a/drivers/gpu/drm/radeon/rv515.c b/drivers/gpu/drm/radeon/rv515.c index 5d569f41f4ae..64b57af93714 100644 --- a/drivers/gpu/drm/radeon/rv515.c +++ b/drivers/gpu/drm/radeon/rv515.c | |||
@@ -69,13 +69,13 @@ void rv515_ring_start(struct radeon_device *rdev) | |||
69 | ISYNC_CPSCRATCH_IDLEGUI); | 69 | ISYNC_CPSCRATCH_IDLEGUI); |
70 | radeon_ring_write(rdev, PACKET0(WAIT_UNTIL, 0)); | 70 | radeon_ring_write(rdev, PACKET0(WAIT_UNTIL, 0)); |
71 | radeon_ring_write(rdev, WAIT_2D_IDLECLEAN | WAIT_3D_IDLECLEAN); | 71 | radeon_ring_write(rdev, WAIT_2D_IDLECLEAN | WAIT_3D_IDLECLEAN); |
72 | radeon_ring_write(rdev, PACKET0(0x170C, 0)); | 72 | radeon_ring_write(rdev, PACKET0(R300_DST_PIPE_CONFIG, 0)); |
73 | radeon_ring_write(rdev, 1 << 31); | 73 | radeon_ring_write(rdev, R300_PIPE_AUTO_CONFIG); |
74 | radeon_ring_write(rdev, PACKET0(GB_SELECT, 0)); | 74 | radeon_ring_write(rdev, PACKET0(GB_SELECT, 0)); |
75 | radeon_ring_write(rdev, 0); | 75 | radeon_ring_write(rdev, 0); |
76 | radeon_ring_write(rdev, PACKET0(GB_ENABLE, 0)); | 76 | radeon_ring_write(rdev, PACKET0(GB_ENABLE, 0)); |
77 | radeon_ring_write(rdev, 0); | 77 | radeon_ring_write(rdev, 0); |
78 | radeon_ring_write(rdev, PACKET0(0x42C8, 0)); | 78 | radeon_ring_write(rdev, PACKET0(R500_SU_REG_DEST, 0)); |
79 | radeon_ring_write(rdev, (1 << rdev->num_gb_pipes) - 1); | 79 | radeon_ring_write(rdev, (1 << rdev->num_gb_pipes) - 1); |
80 | radeon_ring_write(rdev, PACKET0(VAP_INDEX_OFFSET, 0)); | 80 | radeon_ring_write(rdev, PACKET0(VAP_INDEX_OFFSET, 0)); |
81 | radeon_ring_write(rdev, 0); | 81 | radeon_ring_write(rdev, 0); |
@@ -153,8 +153,8 @@ void rv515_gpu_init(struct radeon_device *rdev) | |||
153 | } | 153 | } |
154 | rv515_vga_render_disable(rdev); | 154 | rv515_vga_render_disable(rdev); |
155 | r420_pipes_init(rdev); | 155 | r420_pipes_init(rdev); |
156 | gb_pipe_select = RREG32(0x402C); | 156 | gb_pipe_select = RREG32(R400_GB_PIPE_SELECT); |
157 | tmp = RREG32(0x170C); | 157 | tmp = RREG32(R300_DST_PIPE_CONFIG); |
158 | pipe_select_current = (tmp >> 2) & 3; | 158 | pipe_select_current = (tmp >> 2) & 3; |
159 | tmp = (1 << pipe_select_current) | | 159 | tmp = (1 << pipe_select_current) | |
160 | (((gb_pipe_select >> 8) & 0xF) << 4); | 160 | (((gb_pipe_select >> 8) & 0xF) << 4); |
diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c index 645aa1fd7611..3a95999d2fef 100644 --- a/drivers/gpu/drm/radeon/rv770.c +++ b/drivers/gpu/drm/radeon/rv770.c | |||
@@ -41,6 +41,7 @@ | |||
41 | 41 | ||
42 | static void rv770_gpu_init(struct radeon_device *rdev); | 42 | static void rv770_gpu_init(struct radeon_device *rdev); |
43 | void rv770_fini(struct radeon_device *rdev); | 43 | void rv770_fini(struct radeon_device *rdev); |
44 | static void rv770_pcie_gen2_enable(struct radeon_device *rdev); | ||
44 | 45 | ||
45 | u32 rv770_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base) | 46 | u32 rv770_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base) |
46 | { | 47 | { |
@@ -77,18 +78,23 @@ u32 rv770_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base) | |||
77 | } | 78 | } |
78 | 79 | ||
79 | /* get temperature in millidegrees */ | 80 | /* get temperature in millidegrees */ |
80 | u32 rv770_get_temp(struct radeon_device *rdev) | 81 | int rv770_get_temp(struct radeon_device *rdev) |
81 | { | 82 | { |
82 | u32 temp = (RREG32(CG_MULT_THERMAL_STATUS) & ASIC_T_MASK) >> | 83 | u32 temp = (RREG32(CG_MULT_THERMAL_STATUS) & ASIC_T_MASK) >> |
83 | ASIC_T_SHIFT; | 84 | ASIC_T_SHIFT; |
84 | u32 actual_temp = 0; | 85 | int actual_temp; |
85 | 86 | ||
86 | if ((temp >> 9) & 1) | 87 | if (temp & 0x400) |
87 | actual_temp = 0; | 88 | actual_temp = -256; |
88 | else | 89 | else if (temp & 0x200) |
89 | actual_temp = (temp >> 1) & 0xff; | 90 | actual_temp = 255; |
90 | 91 | else if (temp & 0x100) { | |
91 | return actual_temp * 1000; | 92 | actual_temp = temp & 0x1ff; |
93 | actual_temp |= ~0x1ff; | ||
94 | } else | ||
95 | actual_temp = temp & 0xff; | ||
96 | |||
97 | return (actual_temp * 1000) / 2; | ||
92 | } | 98 | } |
93 | 99 | ||
94 | void rv770_pm_misc(struct radeon_device *rdev) | 100 | void rv770_pm_misc(struct radeon_device *rdev) |
@@ -993,7 +999,7 @@ static int rv770_vram_scratch_init(struct radeon_device *rdev) | |||
993 | u64 gpu_addr; | 999 | u64 gpu_addr; |
994 | 1000 | ||
995 | if (rdev->vram_scratch.robj == NULL) { | 1001 | if (rdev->vram_scratch.robj == NULL) { |
996 | r = radeon_bo_create(rdev, NULL, RADEON_GPU_PAGE_SIZE, | 1002 | r = radeon_bo_create(rdev, RADEON_GPU_PAGE_SIZE, |
997 | PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM, | 1003 | PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM, |
998 | &rdev->vram_scratch.robj); | 1004 | &rdev->vram_scratch.robj); |
999 | if (r) { | 1005 | if (r) { |
@@ -1124,6 +1130,9 @@ static int rv770_startup(struct radeon_device *rdev) | |||
1124 | { | 1130 | { |
1125 | int r; | 1131 | int r; |
1126 | 1132 | ||
1133 | /* enable pcie gen2 link */ | ||
1134 | rv770_pcie_gen2_enable(rdev); | ||
1135 | |||
1127 | if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) { | 1136 | if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) { |
1128 | r = r600_init_microcode(rdev); | 1137 | r = r600_init_microcode(rdev); |
1129 | if (r) { | 1138 | if (r) { |
@@ -1264,7 +1273,7 @@ int rv770_init(struct radeon_device *rdev) | |||
1264 | if (r) | 1273 | if (r) |
1265 | return r; | 1274 | return r; |
1266 | /* Post card if necessary */ | 1275 | /* Post card if necessary */ |
1267 | if (!r600_card_posted(rdev)) { | 1276 | if (!radeon_card_posted(rdev)) { |
1268 | if (!rdev->bios) { | 1277 | if (!rdev->bios) { |
1269 | dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n"); | 1278 | dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n"); |
1270 | return -EINVAL; | 1279 | return -EINVAL; |
@@ -1362,3 +1371,78 @@ void rv770_fini(struct radeon_device *rdev) | |||
1362 | rdev->bios = NULL; | 1371 | rdev->bios = NULL; |
1363 | radeon_dummy_page_fini(rdev); | 1372 | radeon_dummy_page_fini(rdev); |
1364 | } | 1373 | } |
1374 | |||
1375 | static void rv770_pcie_gen2_enable(struct radeon_device *rdev) | ||
1376 | { | ||
1377 | u32 link_width_cntl, lanes, speed_cntl, tmp; | ||
1378 | u16 link_cntl2; | ||
1379 | |||
1380 | if (radeon_pcie_gen2 == 0) | ||
1381 | return; | ||
1382 | |||
1383 | if (rdev->flags & RADEON_IS_IGP) | ||
1384 | return; | ||
1385 | |||
1386 | if (!(rdev->flags & RADEON_IS_PCIE)) | ||
1387 | return; | ||
1388 | |||
1389 | /* x2 cards have a special sequence */ | ||
1390 | if (ASIC_IS_X2(rdev)) | ||
1391 | return; | ||
1392 | |||
1393 | /* advertise upconfig capability */ | ||
1394 | link_width_cntl = RREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL); | ||
1395 | link_width_cntl &= ~LC_UPCONFIGURE_DIS; | ||
1396 | WREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl); | ||
1397 | link_width_cntl = RREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL); | ||
1398 | if (link_width_cntl & LC_RENEGOTIATION_SUPPORT) { | ||
1399 | lanes = (link_width_cntl & LC_LINK_WIDTH_RD_MASK) >> LC_LINK_WIDTH_RD_SHIFT; | ||
1400 | link_width_cntl &= ~(LC_LINK_WIDTH_MASK | | ||
1401 | LC_RECONFIG_ARC_MISSING_ESCAPE); | ||
1402 | link_width_cntl |= lanes | LC_RECONFIG_NOW | | ||
1403 | LC_RENEGOTIATE_EN | LC_UPCONFIGURE_SUPPORT; | ||
1404 | WREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl); | ||
1405 | } else { | ||
1406 | link_width_cntl |= LC_UPCONFIGURE_DIS; | ||
1407 | WREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl); | ||
1408 | } | ||
1409 | |||
1410 | speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL); | ||
1411 | if ((speed_cntl & LC_OTHER_SIDE_EVER_SENT_GEN2) && | ||
1412 | (speed_cntl & LC_OTHER_SIDE_SUPPORTS_GEN2)) { | ||
1413 | |||
1414 | tmp = RREG32(0x541c); | ||
1415 | WREG32(0x541c, tmp | 0x8); | ||
1416 | WREG32(MM_CFGREGS_CNTL, MM_WR_TO_CFG_EN); | ||
1417 | link_cntl2 = RREG16(0x4088); | ||
1418 | link_cntl2 &= ~TARGET_LINK_SPEED_MASK; | ||
1419 | link_cntl2 |= 0x2; | ||
1420 | WREG16(0x4088, link_cntl2); | ||
1421 | WREG32(MM_CFGREGS_CNTL, 0); | ||
1422 | |||
1423 | speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL); | ||
1424 | speed_cntl &= ~LC_TARGET_LINK_SPEED_OVERRIDE_EN; | ||
1425 | WREG32_PCIE_P(PCIE_LC_SPEED_CNTL, speed_cntl); | ||
1426 | |||
1427 | speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL); | ||
1428 | speed_cntl |= LC_CLR_FAILED_SPD_CHANGE_CNT; | ||
1429 | WREG32_PCIE_P(PCIE_LC_SPEED_CNTL, speed_cntl); | ||
1430 | |||
1431 | speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL); | ||
1432 | speed_cntl &= ~LC_CLR_FAILED_SPD_CHANGE_CNT; | ||
1433 | WREG32_PCIE_P(PCIE_LC_SPEED_CNTL, speed_cntl); | ||
1434 | |||
1435 | speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL); | ||
1436 | speed_cntl |= LC_GEN2_EN_STRAP; | ||
1437 | WREG32_PCIE_P(PCIE_LC_SPEED_CNTL, speed_cntl); | ||
1438 | |||
1439 | } else { | ||
1440 | link_width_cntl = RREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL); | ||
1441 | /* XXX: only disable it if gen1 bridge vendor == 0x111d or 0x1106 */ | ||
1442 | if (1) | ||
1443 | link_width_cntl |= LC_UPCONFIGURE_DIS; | ||
1444 | else | ||
1445 | link_width_cntl &= ~LC_UPCONFIGURE_DIS; | ||
1446 | WREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl); | ||
1447 | } | ||
1448 | } | ||
diff --git a/drivers/gpu/drm/radeon/rv770d.h b/drivers/gpu/drm/radeon/rv770d.h index fc77e1e1a179..abc8cf5a3672 100644 --- a/drivers/gpu/drm/radeon/rv770d.h +++ b/drivers/gpu/drm/radeon/rv770d.h | |||
@@ -360,4 +360,42 @@ | |||
360 | #define D1GRPH_SECONDARY_SURFACE_ADDRESS_HIGH 0x691c | 360 | #define D1GRPH_SECONDARY_SURFACE_ADDRESS_HIGH 0x691c |
361 | #define D2GRPH_SECONDARY_SURFACE_ADDRESS_HIGH 0x611c | 361 | #define D2GRPH_SECONDARY_SURFACE_ADDRESS_HIGH 0x611c |
362 | 362 | ||
363 | /* PCIE link stuff */ | ||
364 | #define PCIE_LC_TRAINING_CNTL 0xa1 /* PCIE_P */ | ||
365 | #define PCIE_LC_LINK_WIDTH_CNTL 0xa2 /* PCIE_P */ | ||
366 | # define LC_LINK_WIDTH_SHIFT 0 | ||
367 | # define LC_LINK_WIDTH_MASK 0x7 | ||
368 | # define LC_LINK_WIDTH_X0 0 | ||
369 | # define LC_LINK_WIDTH_X1 1 | ||
370 | # define LC_LINK_WIDTH_X2 2 | ||
371 | # define LC_LINK_WIDTH_X4 3 | ||
372 | # define LC_LINK_WIDTH_X8 4 | ||
373 | # define LC_LINK_WIDTH_X16 6 | ||
374 | # define LC_LINK_WIDTH_RD_SHIFT 4 | ||
375 | # define LC_LINK_WIDTH_RD_MASK 0x70 | ||
376 | # define LC_RECONFIG_ARC_MISSING_ESCAPE (1 << 7) | ||
377 | # define LC_RECONFIG_NOW (1 << 8) | ||
378 | # define LC_RENEGOTIATION_SUPPORT (1 << 9) | ||
379 | # define LC_RENEGOTIATE_EN (1 << 10) | ||
380 | # define LC_SHORT_RECONFIG_EN (1 << 11) | ||
381 | # define LC_UPCONFIGURE_SUPPORT (1 << 12) | ||
382 | # define LC_UPCONFIGURE_DIS (1 << 13) | ||
383 | #define PCIE_LC_SPEED_CNTL 0xa4 /* PCIE_P */ | ||
384 | # define LC_GEN2_EN_STRAP (1 << 0) | ||
385 | # define LC_TARGET_LINK_SPEED_OVERRIDE_EN (1 << 1) | ||
386 | # define LC_FORCE_EN_HW_SPEED_CHANGE (1 << 5) | ||
387 | # define LC_FORCE_DIS_HW_SPEED_CHANGE (1 << 6) | ||
388 | # define LC_SPEED_CHANGE_ATTEMPTS_ALLOWED_MASK (0x3 << 8) | ||
389 | # define LC_SPEED_CHANGE_ATTEMPTS_ALLOWED_SHIFT 3 | ||
390 | # define LC_CURRENT_DATA_RATE (1 << 11) | ||
391 | # define LC_VOLTAGE_TIMER_SEL_MASK (0xf << 14) | ||
392 | # define LC_CLR_FAILED_SPD_CHANGE_CNT (1 << 21) | ||
393 | # define LC_OTHER_SIDE_EVER_SENT_GEN2 (1 << 23) | ||
394 | # define LC_OTHER_SIDE_SUPPORTS_GEN2 (1 << 24) | ||
395 | #define MM_CFGREGS_CNTL 0x544c | ||
396 | # define MM_WR_TO_CFG_EN (1 << 3) | ||
397 | #define LINK_CNTL2 0x88 /* F0 */ | ||
398 | # define TARGET_LINK_SPEED_MASK (0xf << 0) | ||
399 | # define SELECTABLE_DEEMPHASIS (1 << 6) | ||
400 | |||
363 | #endif | 401 | #endif |
diff --git a/drivers/gpu/drm/savage/savage_drv.c b/drivers/gpu/drm/savage/savage_drv.c index fa64d25d4248..6464490b240b 100644 --- a/drivers/gpu/drm/savage/savage_drv.c +++ b/drivers/gpu/drm/savage/savage_drv.c | |||
@@ -55,11 +55,6 @@ static struct drm_driver driver = { | |||
55 | .llseek = noop_llseek, | 55 | .llseek = noop_llseek, |
56 | }, | 56 | }, |
57 | 57 | ||
58 | .pci_driver = { | ||
59 | .name = DRIVER_NAME, | ||
60 | .id_table = pciidlist, | ||
61 | }, | ||
62 | |||
63 | .name = DRIVER_NAME, | 58 | .name = DRIVER_NAME, |
64 | .desc = DRIVER_DESC, | 59 | .desc = DRIVER_DESC, |
65 | .date = DRIVER_DATE, | 60 | .date = DRIVER_DATE, |
@@ -68,15 +63,20 @@ static struct drm_driver driver = { | |||
68 | .patchlevel = DRIVER_PATCHLEVEL, | 63 | .patchlevel = DRIVER_PATCHLEVEL, |
69 | }; | 64 | }; |
70 | 65 | ||
66 | static struct pci_driver savage_pci_driver = { | ||
67 | .name = DRIVER_NAME, | ||
68 | .id_table = pciidlist, | ||
69 | }; | ||
70 | |||
71 | static int __init savage_init(void) | 71 | static int __init savage_init(void) |
72 | { | 72 | { |
73 | driver.num_ioctls = savage_max_ioctl; | 73 | driver.num_ioctls = savage_max_ioctl; |
74 | return drm_init(&driver); | 74 | return drm_pci_init(&driver, &savage_pci_driver); |
75 | } | 75 | } |
76 | 76 | ||
77 | static void __exit savage_exit(void) | 77 | static void __exit savage_exit(void) |
78 | { | 78 | { |
79 | drm_exit(&driver); | 79 | drm_pci_exit(&driver, &savage_pci_driver); |
80 | } | 80 | } |
81 | 81 | ||
82 | module_init(savage_init); | 82 | module_init(savage_init); |
diff --git a/drivers/gpu/drm/sis/sis_drv.c b/drivers/gpu/drm/sis/sis_drv.c index 4caf5d01cfd3..46d5be6e97e5 100644 --- a/drivers/gpu/drm/sis/sis_drv.c +++ b/drivers/gpu/drm/sis/sis_drv.c | |||
@@ -82,10 +82,6 @@ static struct drm_driver driver = { | |||
82 | .fasync = drm_fasync, | 82 | .fasync = drm_fasync, |
83 | .llseek = noop_llseek, | 83 | .llseek = noop_llseek, |
84 | }, | 84 | }, |
85 | .pci_driver = { | ||
86 | .name = DRIVER_NAME, | ||
87 | .id_table = pciidlist, | ||
88 | }, | ||
89 | 85 | ||
90 | .name = DRIVER_NAME, | 86 | .name = DRIVER_NAME, |
91 | .desc = DRIVER_DESC, | 87 | .desc = DRIVER_DESC, |
@@ -95,15 +91,20 @@ static struct drm_driver driver = { | |||
95 | .patchlevel = DRIVER_PATCHLEVEL, | 91 | .patchlevel = DRIVER_PATCHLEVEL, |
96 | }; | 92 | }; |
97 | 93 | ||
94 | static struct pci_driver sis_pci_driver = { | ||
95 | .name = DRIVER_NAME, | ||
96 | .id_table = pciidlist, | ||
97 | }; | ||
98 | |||
98 | static int __init sis_init(void) | 99 | static int __init sis_init(void) |
99 | { | 100 | { |
100 | driver.num_ioctls = sis_max_ioctl; | 101 | driver.num_ioctls = sis_max_ioctl; |
101 | return drm_init(&driver); | 102 | return drm_pci_init(&driver, &sis_pci_driver); |
102 | } | 103 | } |
103 | 104 | ||
104 | static void __exit sis_exit(void) | 105 | static void __exit sis_exit(void) |
105 | { | 106 | { |
106 | drm_exit(&driver); | 107 | drm_pci_exit(&driver, &sis_pci_driver); |
107 | } | 108 | } |
108 | 109 | ||
109 | module_init(sis_init); | 110 | module_init(sis_init); |
diff --git a/drivers/gpu/drm/tdfx/tdfx_drv.c b/drivers/gpu/drm/tdfx/tdfx_drv.c index b70fa91d761a..8bf98810a8d6 100644 --- a/drivers/gpu/drm/tdfx/tdfx_drv.c +++ b/drivers/gpu/drm/tdfx/tdfx_drv.c | |||
@@ -52,10 +52,6 @@ static struct drm_driver driver = { | |||
52 | .fasync = drm_fasync, | 52 | .fasync = drm_fasync, |
53 | .llseek = noop_llseek, | 53 | .llseek = noop_llseek, |
54 | }, | 54 | }, |
55 | .pci_driver = { | ||
56 | .name = DRIVER_NAME, | ||
57 | .id_table = pciidlist, | ||
58 | }, | ||
59 | 55 | ||
60 | .name = DRIVER_NAME, | 56 | .name = DRIVER_NAME, |
61 | .desc = DRIVER_DESC, | 57 | .desc = DRIVER_DESC, |
@@ -65,14 +61,19 @@ static struct drm_driver driver = { | |||
65 | .patchlevel = DRIVER_PATCHLEVEL, | 61 | .patchlevel = DRIVER_PATCHLEVEL, |
66 | }; | 62 | }; |
67 | 63 | ||
64 | static struct pci_driver tdfx_pci_driver = { | ||
65 | .name = DRIVER_NAME, | ||
66 | .id_table = pciidlist, | ||
67 | }; | ||
68 | |||
68 | static int __init tdfx_init(void) | 69 | static int __init tdfx_init(void) |
69 | { | 70 | { |
70 | return drm_init(&driver); | 71 | return drm_pci_init(&driver, &tdfx_pci_driver); |
71 | } | 72 | } |
72 | 73 | ||
73 | static void __exit tdfx_exit(void) | 74 | static void __exit tdfx_exit(void) |
74 | { | 75 | { |
75 | drm_exit(&driver); | 76 | drm_pci_exit(&driver, &tdfx_pci_driver); |
76 | } | 77 | } |
77 | 78 | ||
78 | module_init(tdfx_init); | 79 | module_init(tdfx_init); |
diff --git a/drivers/gpu/drm/ttm/ttm_agp_backend.c b/drivers/gpu/drm/ttm/ttm_agp_backend.c index f999e36f30b4..1c4a72f681c1 100644 --- a/drivers/gpu/drm/ttm/ttm_agp_backend.c +++ b/drivers/gpu/drm/ttm/ttm_agp_backend.c | |||
@@ -47,7 +47,8 @@ struct ttm_agp_backend { | |||
47 | 47 | ||
48 | static int ttm_agp_populate(struct ttm_backend *backend, | 48 | static int ttm_agp_populate(struct ttm_backend *backend, |
49 | unsigned long num_pages, struct page **pages, | 49 | unsigned long num_pages, struct page **pages, |
50 | struct page *dummy_read_page) | 50 | struct page *dummy_read_page, |
51 | dma_addr_t *dma_addrs) | ||
51 | { | 52 | { |
52 | struct ttm_agp_backend *agp_be = | 53 | struct ttm_agp_backend *agp_be = |
53 | container_of(backend, struct ttm_agp_backend, backend); | 54 | container_of(backend, struct ttm_agp_backend, backend); |
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c index cf2ec562550e..0b6a55ac2f87 100644 --- a/drivers/gpu/drm/ttm/ttm_bo.c +++ b/drivers/gpu/drm/ttm/ttm_bo.c | |||
@@ -406,11 +406,12 @@ static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo, | |||
406 | } | 406 | } |
407 | 407 | ||
408 | if (bo->mem.mem_type == TTM_PL_SYSTEM) { | 408 | if (bo->mem.mem_type == TTM_PL_SYSTEM) { |
409 | if (bdev->driver->move_notify) | ||
410 | bdev->driver->move_notify(bo, mem); | ||
409 | bo->mem = *mem; | 411 | bo->mem = *mem; |
410 | mem->mm_node = NULL; | 412 | mem->mm_node = NULL; |
411 | goto moved; | 413 | goto moved; |
412 | } | 414 | } |
413 | |||
414 | } | 415 | } |
415 | 416 | ||
416 | if (bdev->driver->move_notify) | 417 | if (bdev->driver->move_notify) |
@@ -1500,8 +1501,7 @@ int ttm_bo_device_release(struct ttm_bo_device *bdev) | |||
1500 | list_del(&bdev->device_list); | 1501 | list_del(&bdev->device_list); |
1501 | mutex_unlock(&glob->device_list_mutex); | 1502 | mutex_unlock(&glob->device_list_mutex); |
1502 | 1503 | ||
1503 | if (!cancel_delayed_work(&bdev->wq)) | 1504 | cancel_delayed_work_sync(&bdev->wq); |
1504 | flush_scheduled_work(); | ||
1505 | 1505 | ||
1506 | while (ttm_bo_delayed_delete(bdev, true)) | 1506 | while (ttm_bo_delayed_delete(bdev, true)) |
1507 | ; | 1507 | ; |
diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c index b1e02fffd3cc..737a2a2e46a5 100644 --- a/drivers/gpu/drm/ttm/ttm_page_alloc.c +++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c | |||
@@ -38,6 +38,7 @@ | |||
38 | #include <linux/mm.h> | 38 | #include <linux/mm.h> |
39 | #include <linux/seq_file.h> /* for seq_printf */ | 39 | #include <linux/seq_file.h> /* for seq_printf */ |
40 | #include <linux/slab.h> | 40 | #include <linux/slab.h> |
41 | #include <linux/dma-mapping.h> | ||
41 | 42 | ||
42 | #include <asm/atomic.h> | 43 | #include <asm/atomic.h> |
43 | 44 | ||
@@ -662,7 +663,8 @@ out: | |||
662 | * cached pages. | 663 | * cached pages. |
663 | */ | 664 | */ |
664 | int ttm_get_pages(struct list_head *pages, int flags, | 665 | int ttm_get_pages(struct list_head *pages, int flags, |
665 | enum ttm_caching_state cstate, unsigned count) | 666 | enum ttm_caching_state cstate, unsigned count, |
667 | dma_addr_t *dma_address) | ||
666 | { | 668 | { |
667 | struct ttm_page_pool *pool = ttm_get_pool(flags, cstate); | 669 | struct ttm_page_pool *pool = ttm_get_pool(flags, cstate); |
668 | struct page *p = NULL; | 670 | struct page *p = NULL; |
@@ -681,14 +683,22 @@ int ttm_get_pages(struct list_head *pages, int flags, | |||
681 | gfp_flags |= GFP_HIGHUSER; | 683 | gfp_flags |= GFP_HIGHUSER; |
682 | 684 | ||
683 | for (r = 0; r < count; ++r) { | 685 | for (r = 0; r < count; ++r) { |
684 | p = alloc_page(gfp_flags); | 686 | if ((flags & TTM_PAGE_FLAG_DMA32) && dma_address) { |
687 | void *addr; | ||
688 | addr = dma_alloc_coherent(NULL, PAGE_SIZE, | ||
689 | &dma_address[r], | ||
690 | gfp_flags); | ||
691 | if (addr == NULL) | ||
692 | return -ENOMEM; | ||
693 | p = virt_to_page(addr); | ||
694 | } else | ||
695 | p = alloc_page(gfp_flags); | ||
685 | if (!p) { | 696 | if (!p) { |
686 | 697 | ||
687 | printk(KERN_ERR TTM_PFX | 698 | printk(KERN_ERR TTM_PFX |
688 | "Unable to allocate page."); | 699 | "Unable to allocate page."); |
689 | return -ENOMEM; | 700 | return -ENOMEM; |
690 | } | 701 | } |
691 | |||
692 | list_add(&p->lru, pages); | 702 | list_add(&p->lru, pages); |
693 | } | 703 | } |
694 | return 0; | 704 | return 0; |
@@ -720,7 +730,7 @@ int ttm_get_pages(struct list_head *pages, int flags, | |||
720 | printk(KERN_ERR TTM_PFX | 730 | printk(KERN_ERR TTM_PFX |
721 | "Failed to allocate extra pages " | 731 | "Failed to allocate extra pages " |
722 | "for large request."); | 732 | "for large request."); |
723 | ttm_put_pages(pages, 0, flags, cstate); | 733 | ttm_put_pages(pages, 0, flags, cstate, NULL); |
724 | return r; | 734 | return r; |
725 | } | 735 | } |
726 | } | 736 | } |
@@ -731,17 +741,29 @@ int ttm_get_pages(struct list_head *pages, int flags, | |||
731 | 741 | ||
732 | /* Put all pages in pages list to correct pool to wait for reuse */ | 742 | /* Put all pages in pages list to correct pool to wait for reuse */ |
733 | void ttm_put_pages(struct list_head *pages, unsigned page_count, int flags, | 743 | void ttm_put_pages(struct list_head *pages, unsigned page_count, int flags, |
734 | enum ttm_caching_state cstate) | 744 | enum ttm_caching_state cstate, dma_addr_t *dma_address) |
735 | { | 745 | { |
736 | unsigned long irq_flags; | 746 | unsigned long irq_flags; |
737 | struct ttm_page_pool *pool = ttm_get_pool(flags, cstate); | 747 | struct ttm_page_pool *pool = ttm_get_pool(flags, cstate); |
738 | struct page *p, *tmp; | 748 | struct page *p, *tmp; |
749 | unsigned r; | ||
739 | 750 | ||
740 | if (pool == NULL) { | 751 | if (pool == NULL) { |
741 | /* No pool for this memory type so free the pages */ | 752 | /* No pool for this memory type so free the pages */ |
742 | 753 | ||
754 | r = page_count-1; | ||
743 | list_for_each_entry_safe(p, tmp, pages, lru) { | 755 | list_for_each_entry_safe(p, tmp, pages, lru) { |
744 | __free_page(p); | 756 | if ((flags & TTM_PAGE_FLAG_DMA32) && dma_address) { |
757 | void *addr = page_address(p); | ||
758 | WARN_ON(!addr || !dma_address[r]); | ||
759 | if (addr) | ||
760 | dma_free_coherent(NULL, PAGE_SIZE, | ||
761 | addr, | ||
762 | dma_address[r]); | ||
763 | dma_address[r] = 0; | ||
764 | } else | ||
765 | __free_page(p); | ||
766 | r--; | ||
745 | } | 767 | } |
746 | /* Make the pages list empty */ | 768 | /* Make the pages list empty */ |
747 | INIT_LIST_HEAD(pages); | 769 | INIT_LIST_HEAD(pages); |
diff --git a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c index af789dc869b9..86d5b1745a45 100644 --- a/drivers/gpu/drm/ttm/ttm_tt.c +++ b/drivers/gpu/drm/ttm/ttm_tt.c | |||
@@ -49,12 +49,16 @@ static int ttm_tt_swapin(struct ttm_tt *ttm); | |||
49 | static void ttm_tt_alloc_page_directory(struct ttm_tt *ttm) | 49 | static void ttm_tt_alloc_page_directory(struct ttm_tt *ttm) |
50 | { | 50 | { |
51 | ttm->pages = drm_calloc_large(ttm->num_pages, sizeof(*ttm->pages)); | 51 | ttm->pages = drm_calloc_large(ttm->num_pages, sizeof(*ttm->pages)); |
52 | ttm->dma_address = drm_calloc_large(ttm->num_pages, | ||
53 | sizeof(*ttm->dma_address)); | ||
52 | } | 54 | } |
53 | 55 | ||
54 | static void ttm_tt_free_page_directory(struct ttm_tt *ttm) | 56 | static void ttm_tt_free_page_directory(struct ttm_tt *ttm) |
55 | { | 57 | { |
56 | drm_free_large(ttm->pages); | 58 | drm_free_large(ttm->pages); |
57 | ttm->pages = NULL; | 59 | ttm->pages = NULL; |
60 | drm_free_large(ttm->dma_address); | ||
61 | ttm->dma_address = NULL; | ||
58 | } | 62 | } |
59 | 63 | ||
60 | static void ttm_tt_free_user_pages(struct ttm_tt *ttm) | 64 | static void ttm_tt_free_user_pages(struct ttm_tt *ttm) |
@@ -105,7 +109,8 @@ static struct page *__ttm_tt_get_page(struct ttm_tt *ttm, int index) | |||
105 | 109 | ||
106 | INIT_LIST_HEAD(&h); | 110 | INIT_LIST_HEAD(&h); |
107 | 111 | ||
108 | ret = ttm_get_pages(&h, ttm->page_flags, ttm->caching_state, 1); | 112 | ret = ttm_get_pages(&h, ttm->page_flags, ttm->caching_state, 1, |
113 | &ttm->dma_address[index]); | ||
109 | 114 | ||
110 | if (ret != 0) | 115 | if (ret != 0) |
111 | return NULL; | 116 | return NULL; |
@@ -164,7 +169,7 @@ int ttm_tt_populate(struct ttm_tt *ttm) | |||
164 | } | 169 | } |
165 | 170 | ||
166 | be->func->populate(be, ttm->num_pages, ttm->pages, | 171 | be->func->populate(be, ttm->num_pages, ttm->pages, |
167 | ttm->dummy_read_page); | 172 | ttm->dummy_read_page, ttm->dma_address); |
168 | ttm->state = tt_unbound; | 173 | ttm->state = tt_unbound; |
169 | return 0; | 174 | return 0; |
170 | } | 175 | } |
@@ -298,7 +303,8 @@ static void ttm_tt_free_alloced_pages(struct ttm_tt *ttm) | |||
298 | count++; | 303 | count++; |
299 | } | 304 | } |
300 | } | 305 | } |
301 | ttm_put_pages(&h, count, ttm->page_flags, ttm->caching_state); | 306 | ttm_put_pages(&h, count, ttm->page_flags, ttm->caching_state, |
307 | ttm->dma_address); | ||
302 | ttm->state = tt_unpopulated; | 308 | ttm->state = tt_unpopulated; |
303 | ttm->first_himem_page = ttm->num_pages; | 309 | ttm->first_himem_page = ttm->num_pages; |
304 | ttm->last_lomem_page = -1; | 310 | ttm->last_lomem_page = -1; |
diff --git a/drivers/gpu/drm/via/via_drv.c b/drivers/gpu/drm/via/via_drv.c index e1ff4e7a6eb0..920a55214bcf 100644 --- a/drivers/gpu/drm/via/via_drv.c +++ b/drivers/gpu/drm/via/via_drv.c | |||
@@ -62,10 +62,6 @@ static struct drm_driver driver = { | |||
62 | .fasync = drm_fasync, | 62 | .fasync = drm_fasync, |
63 | .llseek = noop_llseek, | 63 | .llseek = noop_llseek, |
64 | }, | 64 | }, |
65 | .pci_driver = { | ||
66 | .name = DRIVER_NAME, | ||
67 | .id_table = pciidlist, | ||
68 | }, | ||
69 | 65 | ||
70 | .name = DRIVER_NAME, | 66 | .name = DRIVER_NAME, |
71 | .desc = DRIVER_DESC, | 67 | .desc = DRIVER_DESC, |
@@ -75,16 +71,21 @@ static struct drm_driver driver = { | |||
75 | .patchlevel = DRIVER_PATCHLEVEL, | 71 | .patchlevel = DRIVER_PATCHLEVEL, |
76 | }; | 72 | }; |
77 | 73 | ||
74 | static struct pci_driver via_pci_driver = { | ||
75 | .name = DRIVER_NAME, | ||
76 | .id_table = pciidlist, | ||
77 | }; | ||
78 | |||
78 | static int __init via_init(void) | 79 | static int __init via_init(void) |
79 | { | 80 | { |
80 | driver.num_ioctls = via_max_ioctl; | 81 | driver.num_ioctls = via_max_ioctl; |
81 | via_init_command_verifier(); | 82 | via_init_command_verifier(); |
82 | return drm_init(&driver); | 83 | return drm_pci_init(&driver, &via_pci_driver); |
83 | } | 84 | } |
84 | 85 | ||
85 | static void __exit via_exit(void) | 86 | static void __exit via_exit(void) |
86 | { | 87 | { |
87 | drm_exit(&driver); | 88 | drm_pci_exit(&driver, &via_pci_driver); |
88 | } | 89 | } |
89 | 90 | ||
90 | module_init(via_init); | 91 | module_init(via_init); |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c b/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c index 80bc37b274e7..87e43e0733bf 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c | |||
@@ -102,7 +102,8 @@ struct vmw_ttm_backend { | |||
102 | 102 | ||
103 | static int vmw_ttm_populate(struct ttm_backend *backend, | 103 | static int vmw_ttm_populate(struct ttm_backend *backend, |
104 | unsigned long num_pages, struct page **pages, | 104 | unsigned long num_pages, struct page **pages, |
105 | struct page *dummy_read_page) | 105 | struct page *dummy_read_page, |
106 | dma_addr_t *dma_addrs) | ||
106 | { | 107 | { |
107 | struct vmw_ttm_backend *vmw_be = | 108 | struct vmw_ttm_backend *vmw_be = |
108 | container_of(backend, struct vmw_ttm_backend, backend); | 109 | container_of(backend, struct vmw_ttm_backend, backend); |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c index 10ca97ee0206..96949b93d920 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c | |||
@@ -909,15 +909,6 @@ static struct drm_driver driver = { | |||
909 | #endif | 909 | #endif |
910 | .llseek = noop_llseek, | 910 | .llseek = noop_llseek, |
911 | }, | 911 | }, |
912 | .pci_driver = { | ||
913 | .name = VMWGFX_DRIVER_NAME, | ||
914 | .id_table = vmw_pci_id_list, | ||
915 | .probe = vmw_probe, | ||
916 | .remove = vmw_remove, | ||
917 | .driver = { | ||
918 | .pm = &vmw_pm_ops | ||
919 | } | ||
920 | }, | ||
921 | .name = VMWGFX_DRIVER_NAME, | 912 | .name = VMWGFX_DRIVER_NAME, |
922 | .desc = VMWGFX_DRIVER_DESC, | 913 | .desc = VMWGFX_DRIVER_DESC, |
923 | .date = VMWGFX_DRIVER_DATE, | 914 | .date = VMWGFX_DRIVER_DATE, |
@@ -926,6 +917,16 @@ static struct drm_driver driver = { | |||
926 | .patchlevel = VMWGFX_DRIVER_PATCHLEVEL | 917 | .patchlevel = VMWGFX_DRIVER_PATCHLEVEL |
927 | }; | 918 | }; |
928 | 919 | ||
920 | static struct pci_driver vmw_pci_driver = { | ||
921 | .name = VMWGFX_DRIVER_NAME, | ||
922 | .id_table = vmw_pci_id_list, | ||
923 | .probe = vmw_probe, | ||
924 | .remove = vmw_remove, | ||
925 | .driver = { | ||
926 | .pm = &vmw_pm_ops | ||
927 | } | ||
928 | }; | ||
929 | |||
929 | static int vmw_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | 930 | static int vmw_probe(struct pci_dev *pdev, const struct pci_device_id *ent) |
930 | { | 931 | { |
931 | return drm_get_pci_dev(pdev, ent, &driver); | 932 | return drm_get_pci_dev(pdev, ent, &driver); |
@@ -934,7 +935,7 @@ static int vmw_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
934 | static int __init vmwgfx_init(void) | 935 | static int __init vmwgfx_init(void) |
935 | { | 936 | { |
936 | int ret; | 937 | int ret; |
937 | ret = drm_init(&driver); | 938 | ret = drm_pci_init(&driver, &vmw_pci_driver); |
938 | if (ret) | 939 | if (ret) |
939 | DRM_ERROR("Failed initializing DRM.\n"); | 940 | DRM_ERROR("Failed initializing DRM.\n"); |
940 | return ret; | 941 | return ret; |
@@ -942,7 +943,7 @@ static int __init vmwgfx_init(void) | |||
942 | 943 | ||
943 | static void __exit vmwgfx_exit(void) | 944 | static void __exit vmwgfx_exit(void) |
944 | { | 945 | { |
945 | drm_exit(&driver); | 946 | drm_pci_exit(&driver, &vmw_pci_driver); |
946 | } | 947 | } |
947 | 948 | ||
948 | module_init(vmwgfx_init); | 949 | module_init(vmwgfx_init); |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c index 41d9a5b73c03..bfab60c938ac 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c | |||
@@ -480,9 +480,6 @@ int vmw_fb_init(struct vmw_private *vmw_priv) | |||
480 | info->fix.smem_start = 0; | 480 | info->fix.smem_start = 0; |
481 | info->fix.smem_len = fb_size; | 481 | info->fix.smem_len = fb_size; |
482 | 482 | ||
483 | info->fix.mmio_start = 0; | ||
484 | info->fix.mmio_len = 0; | ||
485 | |||
486 | info->pseudo_palette = par->pseudo_palette; | 483 | info->pseudo_palette = par->pseudo_palette; |
487 | info->screen_base = par->vmalloc; | 484 | info->screen_base = par->vmalloc; |
488 | info->screen_size = fb_size; | 485 | info->screen_size = fb_size; |
@@ -659,7 +656,7 @@ int vmw_fb_off(struct vmw_private *vmw_priv) | |||
659 | par->dirty.active = false; | 656 | par->dirty.active = false; |
660 | spin_unlock_irqrestore(&par->dirty.lock, flags); | 657 | spin_unlock_irqrestore(&par->dirty.lock, flags); |
661 | 658 | ||
662 | flush_scheduled_work(); | 659 | flush_delayed_work_sync(&info->deferred_work); |
663 | 660 | ||
664 | par->bo_ptr = NULL; | 661 | par->bo_ptr = NULL; |
665 | ttm_bo_kunmap(&par->map); | 662 | ttm_bo_kunmap(&par->map); |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c index 29113c9b26a8..b3a2cd5118d7 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c | |||
@@ -345,7 +345,7 @@ static enum drm_connector_status | |||
345 | return connector_status_disconnected; | 345 | return connector_status_disconnected; |
346 | } | 346 | } |
347 | 347 | ||
348 | static struct drm_display_mode vmw_ldu_connector_builtin[] = { | 348 | static const struct drm_display_mode vmw_ldu_connector_builtin[] = { |
349 | /* 640x480@60Hz */ | 349 | /* 640x480@60Hz */ |
350 | { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 25175, 640, 656, | 350 | { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 25175, 640, 656, |
351 | 752, 800, 0, 480, 489, 492, 525, 0, | 351 | 752, 800, 0, 480, 489, 492, 525, 0, |
@@ -429,7 +429,6 @@ static int vmw_ldu_connector_fill_modes(struct drm_connector *connector, | |||
429 | struct drm_device *dev = connector->dev; | 429 | struct drm_device *dev = connector->dev; |
430 | struct vmw_private *dev_priv = vmw_priv(dev); | 430 | struct vmw_private *dev_priv = vmw_priv(dev); |
431 | struct drm_display_mode *mode = NULL; | 431 | struct drm_display_mode *mode = NULL; |
432 | struct drm_display_mode *bmode; | ||
433 | struct drm_display_mode prefmode = { DRM_MODE("preferred", | 432 | struct drm_display_mode prefmode = { DRM_MODE("preferred", |
434 | DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED, | 433 | DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED, |
435 | 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, | 434 | 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, |
@@ -459,6 +458,8 @@ static int vmw_ldu_connector_fill_modes(struct drm_connector *connector, | |||
459 | } | 458 | } |
460 | 459 | ||
461 | for (i = 0; vmw_ldu_connector_builtin[i].type != 0; i++) { | 460 | for (i = 0; vmw_ldu_connector_builtin[i].type != 0; i++) { |
461 | const struct drm_display_mode *bmode; | ||
462 | |||
462 | bmode = &vmw_ldu_connector_builtin[i]; | 463 | bmode = &vmw_ldu_connector_builtin[i]; |
463 | if (bmode->hdisplay > max_width || | 464 | if (bmode->hdisplay > max_width || |
464 | bmode->vdisplay > max_height) | 465 | bmode->vdisplay > max_height) |
diff --git a/drivers/gpu/stub/Kconfig b/drivers/gpu/stub/Kconfig index 0e1edd7311ff..70e60a4bb678 100644 --- a/drivers/gpu/stub/Kconfig +++ b/drivers/gpu/stub/Kconfig | |||
@@ -1,12 +1,13 @@ | |||
1 | config STUB_POULSBO | 1 | config STUB_POULSBO |
2 | tristate "Intel GMA500 Stub Driver" | 2 | tristate "Intel GMA500 Stub Driver" |
3 | depends on PCI | 3 | depends on PCI |
4 | depends on NET # for THERMAL | ||
4 | # Poulsbo stub depends on ACPI_VIDEO when ACPI is enabled | 5 | # Poulsbo stub depends on ACPI_VIDEO when ACPI is enabled |
5 | # but for select to work, need to select ACPI_VIDEO's dependencies, ick | 6 | # but for select to work, need to select ACPI_VIDEO's dependencies, ick |
6 | select VIDEO_OUTPUT_CONTROL if ACPI | ||
7 | select BACKLIGHT_CLASS_DEVICE if ACPI | 7 | select BACKLIGHT_CLASS_DEVICE if ACPI |
8 | select INPUT if ACPI | 8 | select INPUT if ACPI |
9 | select ACPI_VIDEO if ACPI | 9 | select ACPI_VIDEO if ACPI |
10 | select THERMAL if ACPI | ||
10 | help | 11 | help |
11 | Choose this option if you have a system that has Intel GMA500 | 12 | Choose this option if you have a system that has Intel GMA500 |
12 | (Poulsbo) integrated graphics. If M is selected, the module will | 13 | (Poulsbo) integrated graphics. If M is selected, the module will |
diff --git a/drivers/gpu/vga/Kconfig b/drivers/gpu/vga/Kconfig index 8d0e31a22027..96c83a9a76bb 100644 --- a/drivers/gpu/vga/Kconfig +++ b/drivers/gpu/vga/Kconfig | |||
@@ -1,5 +1,5 @@ | |||
1 | config VGA_ARB | 1 | config VGA_ARB |
2 | bool "VGA Arbitration" if EMBEDDED | 2 | bool "VGA Arbitration" if EXPERT |
3 | default y | 3 | default y |
4 | depends on PCI | 4 | depends on PCI |
5 | help | 5 | help |
diff --git a/drivers/gpu/vga/vga_switcheroo.c b/drivers/gpu/vga/vga_switcheroo.c index c8768f38511e..e01cacba685f 100644 --- a/drivers/gpu/vga/vga_switcheroo.c +++ b/drivers/gpu/vga/vga_switcheroo.c | |||
@@ -33,6 +33,7 @@ struct vga_switcheroo_client { | |||
33 | struct fb_info *fb_info; | 33 | struct fb_info *fb_info; |
34 | int pwr_state; | 34 | int pwr_state; |
35 | void (*set_gpu_state)(struct pci_dev *pdev, enum vga_switcheroo_state); | 35 | void (*set_gpu_state)(struct pci_dev *pdev, enum vga_switcheroo_state); |
36 | void (*reprobe)(struct pci_dev *pdev); | ||
36 | bool (*can_switch)(struct pci_dev *pdev); | 37 | bool (*can_switch)(struct pci_dev *pdev); |
37 | int id; | 38 | int id; |
38 | bool active; | 39 | bool active; |
@@ -103,6 +104,7 @@ static void vga_switcheroo_enable(void) | |||
103 | 104 | ||
104 | int vga_switcheroo_register_client(struct pci_dev *pdev, | 105 | int vga_switcheroo_register_client(struct pci_dev *pdev, |
105 | void (*set_gpu_state)(struct pci_dev *pdev, enum vga_switcheroo_state), | 106 | void (*set_gpu_state)(struct pci_dev *pdev, enum vga_switcheroo_state), |
107 | void (*reprobe)(struct pci_dev *pdev), | ||
106 | bool (*can_switch)(struct pci_dev *pdev)) | 108 | bool (*can_switch)(struct pci_dev *pdev)) |
107 | { | 109 | { |
108 | int index; | 110 | int index; |
@@ -117,6 +119,7 @@ int vga_switcheroo_register_client(struct pci_dev *pdev, | |||
117 | vgasr_priv.clients[index].pwr_state = VGA_SWITCHEROO_ON; | 119 | vgasr_priv.clients[index].pwr_state = VGA_SWITCHEROO_ON; |
118 | vgasr_priv.clients[index].pdev = pdev; | 120 | vgasr_priv.clients[index].pdev = pdev; |
119 | vgasr_priv.clients[index].set_gpu_state = set_gpu_state; | 121 | vgasr_priv.clients[index].set_gpu_state = set_gpu_state; |
122 | vgasr_priv.clients[index].reprobe = reprobe; | ||
120 | vgasr_priv.clients[index].can_switch = can_switch; | 123 | vgasr_priv.clients[index].can_switch = can_switch; |
121 | vgasr_priv.clients[index].id = -1; | 124 | vgasr_priv.clients[index].id = -1; |
122 | if (pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW) | 125 | if (pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW) |
@@ -174,7 +177,8 @@ static int vga_switcheroo_show(struct seq_file *m, void *v) | |||
174 | int i; | 177 | int i; |
175 | mutex_lock(&vgasr_mutex); | 178 | mutex_lock(&vgasr_mutex); |
176 | for (i = 0; i < VGA_SWITCHEROO_MAX_CLIENTS; i++) { | 179 | for (i = 0; i < VGA_SWITCHEROO_MAX_CLIENTS; i++) { |
177 | seq_printf(m, "%d:%c:%s:%s\n", i, | 180 | seq_printf(m, "%d:%s:%c:%s:%s\n", i, |
181 | vgasr_priv.clients[i].id == VGA_SWITCHEROO_DIS ? "DIS" : "IGD", | ||
178 | vgasr_priv.clients[i].active ? '+' : ' ', | 182 | vgasr_priv.clients[i].active ? '+' : ' ', |
179 | vgasr_priv.clients[i].pwr_state ? "Pwr" : "Off", | 183 | vgasr_priv.clients[i].pwr_state ? "Pwr" : "Off", |
180 | pci_name(vgasr_priv.clients[i].pdev)); | 184 | pci_name(vgasr_priv.clients[i].pdev)); |
@@ -190,9 +194,8 @@ static int vga_switcheroo_debugfs_open(struct inode *inode, struct file *file) | |||
190 | 194 | ||
191 | static int vga_switchon(struct vga_switcheroo_client *client) | 195 | static int vga_switchon(struct vga_switcheroo_client *client) |
192 | { | 196 | { |
193 | int ret; | 197 | if (vgasr_priv.handler->power_state) |
194 | 198 | vgasr_priv.handler->power_state(client->id, VGA_SWITCHEROO_ON); | |
195 | ret = vgasr_priv.handler->power_state(client->id, VGA_SWITCHEROO_ON); | ||
196 | /* call the driver callback to turn on device */ | 199 | /* call the driver callback to turn on device */ |
197 | client->set_gpu_state(client->pdev, VGA_SWITCHEROO_ON); | 200 | client->set_gpu_state(client->pdev, VGA_SWITCHEROO_ON); |
198 | client->pwr_state = VGA_SWITCHEROO_ON; | 201 | client->pwr_state = VGA_SWITCHEROO_ON; |
@@ -203,12 +206,14 @@ static int vga_switchoff(struct vga_switcheroo_client *client) | |||
203 | { | 206 | { |
204 | /* call the driver callback to turn off device */ | 207 | /* call the driver callback to turn off device */ |
205 | client->set_gpu_state(client->pdev, VGA_SWITCHEROO_OFF); | 208 | client->set_gpu_state(client->pdev, VGA_SWITCHEROO_OFF); |
206 | vgasr_priv.handler->power_state(client->id, VGA_SWITCHEROO_OFF); | 209 | if (vgasr_priv.handler->power_state) |
210 | vgasr_priv.handler->power_state(client->id, VGA_SWITCHEROO_OFF); | ||
207 | client->pwr_state = VGA_SWITCHEROO_OFF; | 211 | client->pwr_state = VGA_SWITCHEROO_OFF; |
208 | return 0; | 212 | return 0; |
209 | } | 213 | } |
210 | 214 | ||
211 | static int vga_switchto(struct vga_switcheroo_client *new_client) | 215 | /* stage one happens before delay */ |
216 | static int vga_switchto_stage1(struct vga_switcheroo_client *new_client) | ||
212 | { | 217 | { |
213 | int ret; | 218 | int ret; |
214 | int i; | 219 | int i; |
@@ -235,10 +240,28 @@ static int vga_switchto(struct vga_switcheroo_client *new_client) | |||
235 | vga_switchon(new_client); | 240 | vga_switchon(new_client); |
236 | 241 | ||
237 | /* swap shadow resource to denote boot VGA device has changed so X starts on new device */ | 242 | /* swap shadow resource to denote boot VGA device has changed so X starts on new device */ |
238 | active->active = false; | ||
239 | |||
240 | active->pdev->resource[PCI_ROM_RESOURCE].flags &= ~IORESOURCE_ROM_SHADOW; | 243 | active->pdev->resource[PCI_ROM_RESOURCE].flags &= ~IORESOURCE_ROM_SHADOW; |
241 | new_client->pdev->resource[PCI_ROM_RESOURCE].flags |= IORESOURCE_ROM_SHADOW; | 244 | new_client->pdev->resource[PCI_ROM_RESOURCE].flags |= IORESOURCE_ROM_SHADOW; |
245 | return 0; | ||
246 | } | ||
247 | |||
248 | /* post delay */ | ||
249 | static int vga_switchto_stage2(struct vga_switcheroo_client *new_client) | ||
250 | { | ||
251 | int ret; | ||
252 | int i; | ||
253 | struct vga_switcheroo_client *active = NULL; | ||
254 | |||
255 | for (i = 0; i < VGA_SWITCHEROO_MAX_CLIENTS; i++) { | ||
256 | if (vgasr_priv.clients[i].active == true) { | ||
257 | active = &vgasr_priv.clients[i]; | ||
258 | break; | ||
259 | } | ||
260 | } | ||
261 | if (!active) | ||
262 | return 0; | ||
263 | |||
264 | active->active = false; | ||
242 | 265 | ||
243 | if (new_client->fb_info) { | 266 | if (new_client->fb_info) { |
244 | struct fb_event event; | 267 | struct fb_event event; |
@@ -250,6 +273,9 @@ static int vga_switchto(struct vga_switcheroo_client *new_client) | |||
250 | if (ret) | 273 | if (ret) |
251 | return ret; | 274 | return ret; |
252 | 275 | ||
276 | if (new_client->reprobe) | ||
277 | new_client->reprobe(new_client->pdev); | ||
278 | |||
253 | if (active->pwr_state == VGA_SWITCHEROO_ON) | 279 | if (active->pwr_state == VGA_SWITCHEROO_ON) |
254 | vga_switchoff(active); | 280 | vga_switchoff(active); |
255 | 281 | ||
@@ -265,6 +291,7 @@ vga_switcheroo_debugfs_write(struct file *filp, const char __user *ubuf, | |||
265 | const char *pdev_name; | 291 | const char *pdev_name; |
266 | int i, ret; | 292 | int i, ret; |
267 | bool delay = false, can_switch; | 293 | bool delay = false, can_switch; |
294 | bool just_mux = false; | ||
268 | int client_id = -1; | 295 | int client_id = -1; |
269 | struct vga_switcheroo_client *client = NULL; | 296 | struct vga_switcheroo_client *client = NULL; |
270 | 297 | ||
@@ -319,6 +346,15 @@ vga_switcheroo_debugfs_write(struct file *filp, const char __user *ubuf, | |||
319 | if (strncmp(usercmd, "DIS", 3) == 0) | 346 | if (strncmp(usercmd, "DIS", 3) == 0) |
320 | client_id = VGA_SWITCHEROO_DIS; | 347 | client_id = VGA_SWITCHEROO_DIS; |
321 | 348 | ||
349 | if (strncmp(usercmd, "MIGD", 4) == 0) { | ||
350 | just_mux = true; | ||
351 | client_id = VGA_SWITCHEROO_IGD; | ||
352 | } | ||
353 | if (strncmp(usercmd, "MDIS", 4) == 0) { | ||
354 | just_mux = true; | ||
355 | client_id = VGA_SWITCHEROO_DIS; | ||
356 | } | ||
357 | |||
322 | if (client_id == -1) | 358 | if (client_id == -1) |
323 | goto out; | 359 | goto out; |
324 | 360 | ||
@@ -330,6 +366,12 @@ vga_switcheroo_debugfs_write(struct file *filp, const char __user *ubuf, | |||
330 | } | 366 | } |
331 | 367 | ||
332 | vgasr_priv.delayed_switch_active = false; | 368 | vgasr_priv.delayed_switch_active = false; |
369 | |||
370 | if (just_mux) { | ||
371 | ret = vgasr_priv.handler->switchto(client_id); | ||
372 | goto out; | ||
373 | } | ||
374 | |||
333 | /* okay we want a switch - test if devices are willing to switch */ | 375 | /* okay we want a switch - test if devices are willing to switch */ |
334 | can_switch = true; | 376 | can_switch = true; |
335 | for (i = 0; i < VGA_SWITCHEROO_MAX_CLIENTS; i++) { | 377 | for (i = 0; i < VGA_SWITCHEROO_MAX_CLIENTS; i++) { |
@@ -345,18 +387,22 @@ vga_switcheroo_debugfs_write(struct file *filp, const char __user *ubuf, | |||
345 | 387 | ||
346 | if (can_switch == true) { | 388 | if (can_switch == true) { |
347 | pdev_name = pci_name(client->pdev); | 389 | pdev_name = pci_name(client->pdev); |
348 | ret = vga_switchto(client); | 390 | ret = vga_switchto_stage1(client); |
349 | if (ret) | 391 | if (ret) |
350 | printk(KERN_ERR "vga_switcheroo: switching failed %d\n", ret); | 392 | printk(KERN_ERR "vga_switcheroo: switching failed stage 1 %d\n", ret); |
393 | |||
394 | ret = vga_switchto_stage2(client); | ||
395 | if (ret) | ||
396 | printk(KERN_ERR "vga_switcheroo: switching failed stage 2 %d\n", ret); | ||
397 | |||
351 | } else { | 398 | } else { |
352 | printk(KERN_INFO "vga_switcheroo: setting delayed switch to client %d\n", client->id); | 399 | printk(KERN_INFO "vga_switcheroo: setting delayed switch to client %d\n", client->id); |
353 | vgasr_priv.delayed_switch_active = true; | 400 | vgasr_priv.delayed_switch_active = true; |
354 | vgasr_priv.delayed_client_id = client_id; | 401 | vgasr_priv.delayed_client_id = client_id; |
355 | 402 | ||
356 | /* we should at least power up the card to | 403 | ret = vga_switchto_stage1(client); |
357 | make the switch faster */ | 404 | if (ret) |
358 | if (client->pwr_state == VGA_SWITCHEROO_OFF) | 405 | printk(KERN_ERR "vga_switcheroo: delayed switching stage 1 failed %d\n", ret); |
359 | vga_switchon(client); | ||
360 | } | 406 | } |
361 | 407 | ||
362 | out: | 408 | out: |
@@ -438,9 +484,9 @@ int vga_switcheroo_process_delayed_switch(void) | |||
438 | goto err; | 484 | goto err; |
439 | 485 | ||
440 | pdev_name = pci_name(client->pdev); | 486 | pdev_name = pci_name(client->pdev); |
441 | ret = vga_switchto(client); | 487 | ret = vga_switchto_stage2(client); |
442 | if (ret) | 488 | if (ret) |
443 | printk(KERN_ERR "vga_switcheroo: delayed switching failed %d\n", ret); | 489 | printk(KERN_ERR "vga_switcheroo: delayed switching failed stage 2 %d\n", ret); |
444 | 490 | ||
445 | vgasr_priv.delayed_switch_active = false; | 491 | vgasr_priv.delayed_switch_active = false; |
446 | err = 0; | 492 | err = 0; |
diff --git a/drivers/gpu/vga/vgaarb.c b/drivers/gpu/vga/vgaarb.c index c380c65da417..ace2b1623b21 100644 --- a/drivers/gpu/vga/vgaarb.c +++ b/drivers/gpu/vga/vgaarb.c | |||
@@ -636,7 +636,7 @@ int vga_client_register(struct pci_dev *pdev, void *cookie, | |||
636 | void (*irq_set_state)(void *cookie, bool state), | 636 | void (*irq_set_state)(void *cookie, bool state), |
637 | unsigned int (*set_vga_decode)(void *cookie, bool decode)) | 637 | unsigned int (*set_vga_decode)(void *cookie, bool decode)) |
638 | { | 638 | { |
639 | int ret = -1; | 639 | int ret = -ENODEV; |
640 | struct vga_device *vgadev; | 640 | struct vga_device *vgadev; |
641 | unsigned long flags; | 641 | unsigned long flags; |
642 | 642 | ||