diff options
Diffstat (limited to 'drivers/gpu')
32 files changed, 267 insertions, 178 deletions
diff --git a/drivers/gpu/drm/drm_fb_cma_helper.c b/drivers/gpu/drm/drm_fb_cma_helper.c index 09e11a5d921a..fd9d0af4d536 100644 --- a/drivers/gpu/drm/drm_fb_cma_helper.c +++ b/drivers/gpu/drm/drm_fb_cma_helper.c | |||
| @@ -206,7 +206,7 @@ static int drm_fbdev_cma_create(struct drm_fb_helper *helper, | |||
| 206 | size_t size; | 206 | size_t size; |
| 207 | int ret; | 207 | int ret; |
| 208 | 208 | ||
| 209 | DRM_DEBUG_KMS("surface width(%d), height(%d) and bpp(%d\n", | 209 | DRM_DEBUG_KMS("surface width(%d), height(%d) and bpp(%d)\n", |
| 210 | sizes->surface_width, sizes->surface_height, | 210 | sizes->surface_width, sizes->surface_height, |
| 211 | sizes->surface_bpp); | 211 | sizes->surface_bpp); |
| 212 | 212 | ||
| @@ -220,7 +220,7 @@ static int drm_fbdev_cma_create(struct drm_fb_helper *helper, | |||
| 220 | 220 | ||
| 221 | size = mode_cmd.pitches[0] * mode_cmd.height; | 221 | size = mode_cmd.pitches[0] * mode_cmd.height; |
| 222 | obj = drm_gem_cma_create(dev, size); | 222 | obj = drm_gem_cma_create(dev, size); |
| 223 | if (!obj) | 223 | if (IS_ERR(obj)) |
| 224 | return -ENOMEM; | 224 | return -ENOMEM; |
| 225 | 225 | ||
| 226 | fbi = framebuffer_alloc(0, dev->dev); | 226 | fbi = framebuffer_alloc(0, dev->dev); |
diff --git a/drivers/gpu/drm/drm_info.c b/drivers/gpu/drm/drm_info.c index 441ebc1bdbef..d4b20ceda3fb 100644 --- a/drivers/gpu/drm/drm_info.c +++ b/drivers/gpu/drm/drm_info.c | |||
| @@ -205,8 +205,6 @@ static int drm_gem_one_name_info(int id, void *ptr, void *data) | |||
| 205 | struct drm_gem_object *obj = ptr; | 205 | struct drm_gem_object *obj = ptr; |
| 206 | struct seq_file *m = data; | 206 | struct seq_file *m = data; |
| 207 | 207 | ||
| 208 | seq_printf(m, "name %d size %zd\n", obj->name, obj->size); | ||
| 209 | |||
| 210 | seq_printf(m, "%6d %8zd %7d %8d\n", | 208 | seq_printf(m, "%6d %8zd %7d %8d\n", |
| 211 | obj->name, obj->size, | 209 | obj->name, obj->size, |
| 212 | atomic_read(&obj->handle_count), | 210 | atomic_read(&obj->handle_count), |
diff --git a/drivers/gpu/drm/drm_platform.c b/drivers/gpu/drm/drm_platform.c index aaeb6f8d69ce..b8a282ea8751 100644 --- a/drivers/gpu/drm/drm_platform.c +++ b/drivers/gpu/drm/drm_platform.c | |||
| @@ -64,7 +64,6 @@ int drm_get_platform_dev(struct platform_device *platdev, | |||
| 64 | } | 64 | } |
| 65 | 65 | ||
| 66 | if (drm_core_check_feature(dev, DRIVER_MODESET)) { | 66 | if (drm_core_check_feature(dev, DRIVER_MODESET)) { |
| 67 | dev_set_drvdata(&platdev->dev, dev); | ||
| 68 | ret = drm_get_minor(dev, &dev->control, DRM_MINOR_CONTROL); | 67 | ret = drm_get_minor(dev, &dev->control, DRM_MINOR_CONTROL); |
| 69 | if (ret) | 68 | if (ret) |
| 70 | goto err_g1; | 69 | goto err_g1; |
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index aac4e5e1a5b9..6770ee6084b4 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c | |||
| @@ -118,6 +118,13 @@ module_param_named(i915_enable_ppgtt, i915_enable_ppgtt, int, 0600); | |||
| 118 | MODULE_PARM_DESC(i915_enable_ppgtt, | 118 | MODULE_PARM_DESC(i915_enable_ppgtt, |
| 119 | "Enable PPGTT (default: true)"); | 119 | "Enable PPGTT (default: true)"); |
| 120 | 120 | ||
| 121 | unsigned int i915_preliminary_hw_support __read_mostly = 0; | ||
| 122 | module_param_named(preliminary_hw_support, i915_preliminary_hw_support, int, 0600); | ||
| 123 | MODULE_PARM_DESC(preliminary_hw_support, | ||
| 124 | "Enable preliminary hardware support. " | ||
| 125 | "Enable Haswell and ValleyView Support. " | ||
| 126 | "(default: false)"); | ||
| 127 | |||
| 121 | static struct drm_driver driver; | 128 | static struct drm_driver driver; |
| 122 | extern int intel_agp_enabled; | 129 | extern int intel_agp_enabled; |
| 123 | 130 | ||
| @@ -826,6 +833,12 @@ i915_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
| 826 | struct intel_device_info *intel_info = | 833 | struct intel_device_info *intel_info = |
| 827 | (struct intel_device_info *) ent->driver_data; | 834 | (struct intel_device_info *) ent->driver_data; |
| 828 | 835 | ||
| 836 | if (intel_info->is_haswell || intel_info->is_valleyview) | ||
| 837 | if(!i915_preliminary_hw_support) { | ||
| 838 | DRM_ERROR("Preliminary hardware support disabled\n"); | ||
| 839 | return -ENODEV; | ||
| 840 | } | ||
| 841 | |||
| 829 | /* Only bind to function 0 of the device. Early generations | 842 | /* Only bind to function 0 of the device. Early generations |
| 830 | * used function 1 as a placeholder for multi-head. This causes | 843 | * used function 1 as a placeholder for multi-head. This causes |
| 831 | * us confusion instead, especially on the systems where both | 844 | * us confusion instead, especially on the systems where both |
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index b84f7861e438..f511fa2f4168 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h | |||
| @@ -1217,6 +1217,7 @@ extern int i915_enable_rc6 __read_mostly; | |||
| 1217 | extern int i915_enable_fbc __read_mostly; | 1217 | extern int i915_enable_fbc __read_mostly; |
| 1218 | extern bool i915_enable_hangcheck __read_mostly; | 1218 | extern bool i915_enable_hangcheck __read_mostly; |
| 1219 | extern int i915_enable_ppgtt __read_mostly; | 1219 | extern int i915_enable_ppgtt __read_mostly; |
| 1220 | extern unsigned int i915_preliminary_hw_support __read_mostly; | ||
| 1220 | 1221 | ||
| 1221 | extern int i915_suspend(struct drm_device *dev, pm_message_t state); | 1222 | extern int i915_suspend(struct drm_device *dev, pm_message_t state); |
| 1222 | extern int i915_resume(struct drm_device *dev); | 1223 | extern int i915_resume(struct drm_device *dev); |
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index d33d02d13c96..107f09befe92 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c | |||
| @@ -1407,8 +1407,10 @@ out: | |||
| 1407 | return VM_FAULT_NOPAGE; | 1407 | return VM_FAULT_NOPAGE; |
| 1408 | case -ENOMEM: | 1408 | case -ENOMEM: |
| 1409 | return VM_FAULT_OOM; | 1409 | return VM_FAULT_OOM; |
| 1410 | case -ENOSPC: | ||
| 1411 | return VM_FAULT_SIGBUS; | ||
| 1410 | default: | 1412 | default: |
| 1411 | WARN_ON_ONCE(ret); | 1413 | WARN_ONCE(ret, "unhandled error in i915_gem_fault: %i\n", ret); |
| 1412 | return VM_FAULT_SIGBUS; | 1414 | return VM_FAULT_SIGBUS; |
| 1413 | } | 1415 | } |
| 1414 | } | 1416 | } |
| @@ -1822,10 +1824,11 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj) | |||
| 1822 | sg_set_page(sg, page, PAGE_SIZE, 0); | 1824 | sg_set_page(sg, page, PAGE_SIZE, 0); |
| 1823 | } | 1825 | } |
| 1824 | 1826 | ||
| 1827 | obj->pages = st; | ||
| 1828 | |||
| 1825 | if (i915_gem_object_needs_bit17_swizzle(obj)) | 1829 | if (i915_gem_object_needs_bit17_swizzle(obj)) |
| 1826 | i915_gem_object_do_bit_17_swizzle(obj); | 1830 | i915_gem_object_do_bit_17_swizzle(obj); |
| 1827 | 1831 | ||
| 1828 | obj->pages = st; | ||
| 1829 | return 0; | 1832 | return 0; |
| 1830 | 1833 | ||
| 1831 | err_pages: | 1834 | err_pages: |
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c index 893f30164b7e..f78061af7045 100644 --- a/drivers/gpu/drm/i915/intel_crt.c +++ b/drivers/gpu/drm/i915/intel_crt.c | |||
| @@ -219,20 +219,7 @@ static void intel_crt_mode_set(struct drm_encoder *encoder, | |||
| 219 | intel_encoder_to_crt(to_intel_encoder(encoder)); | 219 | intel_encoder_to_crt(to_intel_encoder(encoder)); |
| 220 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | 220 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
| 221 | struct drm_i915_private *dev_priv = dev->dev_private; | 221 | struct drm_i915_private *dev_priv = dev->dev_private; |
| 222 | int dpll_md_reg; | 222 | u32 adpa; |
| 223 | u32 adpa, dpll_md; | ||
| 224 | |||
| 225 | dpll_md_reg = DPLL_MD(intel_crtc->pipe); | ||
| 226 | |||
| 227 | /* | ||
| 228 | * Disable separate mode multiplier used when cloning SDVO to CRT | ||
| 229 | * XXX this needs to be adjusted when we really are cloning | ||
| 230 | */ | ||
| 231 | if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev)) { | ||
| 232 | dpll_md = I915_READ(dpll_md_reg); | ||
| 233 | I915_WRITE(dpll_md_reg, | ||
| 234 | dpll_md & ~DPLL_MD_UDI_MULTIPLIER_MASK); | ||
| 235 | } | ||
| 236 | 223 | ||
| 237 | adpa = ADPA_HOTPLUG_BITS; | 224 | adpa = ADPA_HOTPLUG_BITS; |
| 238 | if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC) | 225 | if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC) |
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 682bd3729baf..461a637f1ef7 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c | |||
| @@ -7892,6 +7892,34 @@ struct intel_quirk { | |||
| 7892 | void (*hook)(struct drm_device *dev); | 7892 | void (*hook)(struct drm_device *dev); |
| 7893 | }; | 7893 | }; |
| 7894 | 7894 | ||
| 7895 | /* For systems that don't have a meaningful PCI subdevice/subvendor ID */ | ||
| 7896 | struct intel_dmi_quirk { | ||
| 7897 | void (*hook)(struct drm_device *dev); | ||
| 7898 | const struct dmi_system_id (*dmi_id_list)[]; | ||
| 7899 | }; | ||
| 7900 | |||
| 7901 | static int intel_dmi_reverse_brightness(const struct dmi_system_id *id) | ||
| 7902 | { | ||
| 7903 | DRM_INFO("Backlight polarity reversed on %s\n", id->ident); | ||
| 7904 | return 1; | ||
| 7905 | } | ||
| 7906 | |||
| 7907 | static const struct intel_dmi_quirk intel_dmi_quirks[] = { | ||
| 7908 | { | ||
| 7909 | .dmi_id_list = &(const struct dmi_system_id[]) { | ||
| 7910 | { | ||
| 7911 | .callback = intel_dmi_reverse_brightness, | ||
| 7912 | .ident = "NCR Corporation", | ||
| 7913 | .matches = {DMI_MATCH(DMI_SYS_VENDOR, "NCR Corporation"), | ||
| 7914 | DMI_MATCH(DMI_PRODUCT_NAME, ""), | ||
| 7915 | }, | ||
| 7916 | }, | ||
| 7917 | { } /* terminating entry */ | ||
| 7918 | }, | ||
| 7919 | .hook = quirk_invert_brightness, | ||
| 7920 | }, | ||
| 7921 | }; | ||
| 7922 | |||
| 7895 | static struct intel_quirk intel_quirks[] = { | 7923 | static struct intel_quirk intel_quirks[] = { |
| 7896 | /* HP Mini needs pipe A force quirk (LP: #322104) */ | 7924 | /* HP Mini needs pipe A force quirk (LP: #322104) */ |
| 7897 | { 0x27ae, 0x103c, 0x361a, quirk_pipea_force }, | 7925 | { 0x27ae, 0x103c, 0x361a, quirk_pipea_force }, |
| @@ -7931,6 +7959,10 @@ static void intel_init_quirks(struct drm_device *dev) | |||
| 7931 | q->subsystem_device == PCI_ANY_ID)) | 7959 | q->subsystem_device == PCI_ANY_ID)) |
| 7932 | q->hook(dev); | 7960 | q->hook(dev); |
| 7933 | } | 7961 | } |
| 7962 | for (i = 0; i < ARRAY_SIZE(intel_dmi_quirks); i++) { | ||
| 7963 | if (dmi_check_system(*intel_dmi_quirks[i].dmi_id_list) != 0) | ||
| 7964 | intel_dmi_quirks[i].hook(dev); | ||
| 7965 | } | ||
| 7934 | } | 7966 | } |
| 7935 | 7967 | ||
| 7936 | /* Disable the VGA plane that we never use */ | 7968 | /* Disable the VGA plane that we never use */ |
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index 1b727a5c9ee5..368ed8ef1600 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c | |||
| @@ -1797,7 +1797,8 @@ intel_dp_start_link_train(struct intel_dp *intel_dp) | |||
| 1797 | if ((intel_dp->train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0) | 1797 | if ((intel_dp->train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0) |
| 1798 | break; | 1798 | break; |
| 1799 | if (i == intel_dp->lane_count && voltage_tries == 5) { | 1799 | if (i == intel_dp->lane_count && voltage_tries == 5) { |
| 1800 | if (++loop_tries == 5) { | 1800 | ++loop_tries; |
| 1801 | if (loop_tries == 5) { | ||
| 1801 | DRM_DEBUG_KMS("too many full retries, give up\n"); | 1802 | DRM_DEBUG_KMS("too many full retries, give up\n"); |
| 1802 | break; | 1803 | break; |
| 1803 | } | 1804 | } |
| @@ -1807,11 +1808,15 @@ intel_dp_start_link_train(struct intel_dp *intel_dp) | |||
| 1807 | } | 1808 | } |
| 1808 | 1809 | ||
| 1809 | /* Check to see if we've tried the same voltage 5 times */ | 1810 | /* Check to see if we've tried the same voltage 5 times */ |
| 1810 | if ((intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) != voltage) { | 1811 | if ((intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) == voltage) { |
| 1811 | voltage = intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK; | ||
| 1812 | voltage_tries = 0; | ||
| 1813 | } else | ||
| 1814 | ++voltage_tries; | 1812 | ++voltage_tries; |
| 1813 | if (voltage_tries == 5) { | ||
| 1814 | DRM_DEBUG_KMS("too many voltage retries, give up\n"); | ||
| 1815 | break; | ||
| 1816 | } | ||
| 1817 | } else | ||
| 1818 | voltage_tries = 0; | ||
| 1819 | voltage = intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK; | ||
| 1815 | 1820 | ||
| 1816 | /* Compute new intel_dp->train_set as requested by target */ | 1821 | /* Compute new intel_dp->train_set as requested by target */ |
| 1817 | intel_get_adjust_train(intel_dp, link_status); | 1822 | intel_get_adjust_train(intel_dp, link_status); |
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c index e3166df55daa..edba93b3474b 100644 --- a/drivers/gpu/drm/i915/intel_lvds.c +++ b/drivers/gpu/drm/i915/intel_lvds.c | |||
| @@ -777,6 +777,14 @@ static const struct dmi_system_id intel_no_lvds[] = { | |||
| 777 | DMI_MATCH(DMI_BOARD_NAME, "D525TUD"), | 777 | DMI_MATCH(DMI_BOARD_NAME, "D525TUD"), |
| 778 | }, | 778 | }, |
| 779 | }, | 779 | }, |
| 780 | { | ||
| 781 | .callback = intel_no_lvds_dmi_callback, | ||
| 782 | .ident = "Supermicro X7SPA-H", | ||
| 783 | .matches = { | ||
| 784 | DMI_MATCH(DMI_SYS_VENDOR, "Supermicro"), | ||
| 785 | DMI_MATCH(DMI_PRODUCT_NAME, "X7SPA-H"), | ||
| 786 | }, | ||
| 787 | }, | ||
| 780 | 788 | ||
| 781 | { } /* terminating entry */ | 789 | { } /* terminating entry */ |
| 782 | }; | 790 | }; |
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c index 0007a4d9bf6e..c01d97db0061 100644 --- a/drivers/gpu/drm/i915/intel_sdvo.c +++ b/drivers/gpu/drm/i915/intel_sdvo.c | |||
| @@ -139,6 +139,11 @@ struct intel_sdvo { | |||
| 139 | 139 | ||
| 140 | /* DDC bus used by this SDVO encoder */ | 140 | /* DDC bus used by this SDVO encoder */ |
| 141 | uint8_t ddc_bus; | 141 | uint8_t ddc_bus; |
| 142 | |||
| 143 | /* | ||
| 144 | * the sdvo flag gets lost in round trip: dtd->adjusted_mode->dtd | ||
| 145 | */ | ||
| 146 | uint8_t dtd_sdvo_flags; | ||
| 142 | }; | 147 | }; |
| 143 | 148 | ||
| 144 | struct intel_sdvo_connector { | 149 | struct intel_sdvo_connector { |
| @@ -984,6 +989,7 @@ intel_sdvo_get_preferred_input_mode(struct intel_sdvo *intel_sdvo, | |||
| 984 | return false; | 989 | return false; |
| 985 | 990 | ||
| 986 | intel_sdvo_get_mode_from_dtd(adjusted_mode, &input_dtd); | 991 | intel_sdvo_get_mode_from_dtd(adjusted_mode, &input_dtd); |
| 992 | intel_sdvo->dtd_sdvo_flags = input_dtd.part2.sdvo_flags; | ||
| 987 | 993 | ||
| 988 | return true; | 994 | return true; |
| 989 | } | 995 | } |
| @@ -1092,6 +1098,8 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder, | |||
| 1092 | * adjusted_mode. | 1098 | * adjusted_mode. |
| 1093 | */ | 1099 | */ |
| 1094 | intel_sdvo_get_dtd_from_mode(&input_dtd, adjusted_mode); | 1100 | intel_sdvo_get_dtd_from_mode(&input_dtd, adjusted_mode); |
| 1101 | if (intel_sdvo->is_tv || intel_sdvo->is_lvds) | ||
| 1102 | input_dtd.part2.sdvo_flags = intel_sdvo->dtd_sdvo_flags; | ||
| 1095 | if (!intel_sdvo_set_input_timing(intel_sdvo, &input_dtd)) | 1103 | if (!intel_sdvo_set_input_timing(intel_sdvo, &input_dtd)) |
| 1096 | DRM_INFO("Setting input timings on %s failed\n", | 1104 | DRM_INFO("Setting input timings on %s failed\n", |
| 1097 | SDVO_NAME(intel_sdvo)); | 1105 | SDVO_NAME(intel_sdvo)); |
| @@ -2277,10 +2285,8 @@ intel_sdvo_lvds_init(struct intel_sdvo *intel_sdvo, int device) | |||
| 2277 | intel_sdvo_connector->output_flag = SDVO_OUTPUT_LVDS1; | 2285 | intel_sdvo_connector->output_flag = SDVO_OUTPUT_LVDS1; |
| 2278 | } | 2286 | } |
| 2279 | 2287 | ||
| 2280 | /* SDVO LVDS is cloneable because the SDVO encoder does the upscaling, | 2288 | /* SDVO LVDS is not cloneable because the input mode gets adjusted by the encoder */ |
| 2281 | * as opposed to native LVDS, where we upscale with the panel-fitter | 2289 | intel_sdvo->base.cloneable = false; |
| 2282 | * (and hence only the native LVDS resolution could be cloned). */ | ||
| 2283 | intel_sdvo->base.cloneable = true; | ||
| 2284 | 2290 | ||
| 2285 | intel_sdvo_connector_init(intel_sdvo_connector, intel_sdvo); | 2291 | intel_sdvo_connector_init(intel_sdvo_connector, intel_sdvo); |
| 2286 | if (!intel_sdvo_create_enhance_property(intel_sdvo, intel_sdvo_connector)) | 2292 | if (!intel_sdvo_create_enhance_property(intel_sdvo, intel_sdvo_connector)) |
diff --git a/drivers/gpu/drm/nouveau/core/core/gpuobj.c b/drivers/gpu/drm/nouveau/core/core/gpuobj.c index 1f34549aff18..70586fde69cf 100644 --- a/drivers/gpu/drm/nouveau/core/core/gpuobj.c +++ b/drivers/gpu/drm/nouveau/core/core/gpuobj.c | |||
| @@ -39,6 +39,11 @@ nouveau_gpuobj_destroy(struct nouveau_gpuobj *gpuobj) | |||
| 39 | nv_wo32(gpuobj, i, 0x00000000); | 39 | nv_wo32(gpuobj, i, 0x00000000); |
| 40 | } | 40 | } |
| 41 | 41 | ||
| 42 | if (gpuobj->node) { | ||
| 43 | nouveau_mm_free(&nv_gpuobj(gpuobj->parent)->heap, | ||
| 44 | &gpuobj->node); | ||
| 45 | } | ||
| 46 | |||
| 42 | if (gpuobj->heap.block_size) | 47 | if (gpuobj->heap.block_size) |
| 43 | nouveau_mm_fini(&gpuobj->heap); | 48 | nouveau_mm_fini(&gpuobj->heap); |
| 44 | 49 | ||
diff --git a/drivers/gpu/drm/nouveau/core/core/mm.c b/drivers/gpu/drm/nouveau/core/core/mm.c index bfddf87926dd..4d6206448670 100644 --- a/drivers/gpu/drm/nouveau/core/core/mm.c +++ b/drivers/gpu/drm/nouveau/core/core/mm.c | |||
| @@ -236,7 +236,7 @@ nouveau_mm_fini(struct nouveau_mm *mm) | |||
| 236 | int nodes = 0; | 236 | int nodes = 0; |
| 237 | 237 | ||
| 238 | list_for_each_entry(node, &mm->nodes, nl_entry) { | 238 | list_for_each_entry(node, &mm->nodes, nl_entry) { |
| 239 | if (nodes++ == mm->heap_nodes) | 239 | if (WARN_ON(nodes++ == mm->heap_nodes)) |
| 240 | return -EBUSY; | 240 | return -EBUSY; |
| 241 | } | 241 | } |
| 242 | 242 | ||
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/base.c b/drivers/gpu/drm/nouveau/core/subdev/bios/base.c index dcb5c2befc92..70ca7d5a1aa1 100644 --- a/drivers/gpu/drm/nouveau/core/subdev/bios/base.c +++ b/drivers/gpu/drm/nouveau/core/subdev/bios/base.c | |||
| @@ -72,7 +72,7 @@ nouveau_bios_shadow_of(struct nouveau_bios *bios) | |||
| 72 | } | 72 | } |
| 73 | 73 | ||
| 74 | data = of_get_property(dn, "NVDA,BMP", &size); | 74 | data = of_get_property(dn, "NVDA,BMP", &size); |
| 75 | if (data) { | 75 | if (data && size) { |
| 76 | bios->size = size; | 76 | bios->size = size; |
| 77 | bios->data = kmalloc(bios->size, GFP_KERNEL); | 77 | bios->data = kmalloc(bios->size, GFP_KERNEL); |
| 78 | if (bios->data) | 78 | if (bios->data) |
| @@ -104,6 +104,9 @@ nouveau_bios_shadow_pramin(struct nouveau_bios *bios) | |||
| 104 | goto out; | 104 | goto out; |
| 105 | 105 | ||
| 106 | bios->size = nv_rd08(bios, 0x700002) * 512; | 106 | bios->size = nv_rd08(bios, 0x700002) * 512; |
| 107 | if (!bios->size) | ||
| 108 | goto out; | ||
| 109 | |||
| 107 | bios->data = kmalloc(bios->size, GFP_KERNEL); | 110 | bios->data = kmalloc(bios->size, GFP_KERNEL); |
| 108 | if (bios->data) { | 111 | if (bios->data) { |
| 109 | for (i = 0; i < bios->size; i++) | 112 | for (i = 0; i < bios->size; i++) |
| @@ -155,6 +158,9 @@ nouveau_bios_shadow_prom(struct nouveau_bios *bios) | |||
| 155 | 158 | ||
| 156 | /* read entire bios image to system memory */ | 159 | /* read entire bios image to system memory */ |
| 157 | bios->size = nv_rd08(bios, 0x300002) * 512; | 160 | bios->size = nv_rd08(bios, 0x300002) * 512; |
| 161 | if (!bios->size) | ||
| 162 | goto out; | ||
| 163 | |||
| 158 | bios->data = kmalloc(bios->size, GFP_KERNEL); | 164 | bios->data = kmalloc(bios->size, GFP_KERNEL); |
| 159 | if (bios->data) { | 165 | if (bios->data) { |
| 160 | for (i = 0; i < bios->size; i++) | 166 | for (i = 0; i < bios->size; i++) |
| @@ -186,14 +192,22 @@ nouveau_bios_shadow_acpi(struct nouveau_bios *bios) | |||
| 186 | { | 192 | { |
| 187 | struct pci_dev *pdev = nv_device(bios)->pdev; | 193 | struct pci_dev *pdev = nv_device(bios)->pdev; |
| 188 | int ret, cnt, i; | 194 | int ret, cnt, i; |
| 189 | u8 data[3]; | ||
| 190 | 195 | ||
| 191 | if (!nouveau_acpi_rom_supported(pdev)) | 196 | if (!nouveau_acpi_rom_supported(pdev)) { |
| 197 | bios->data = NULL; | ||
| 192 | return; | 198 | return; |
| 199 | } | ||
| 193 | 200 | ||
| 194 | bios->size = 0; | 201 | bios->size = 0; |
| 195 | if (nouveau_acpi_get_bios_chunk(data, 0, 3) == 3) | 202 | bios->data = kmalloc(4096, GFP_KERNEL); |
| 196 | bios->size = data[2] * 512; | 203 | if (bios->data) { |
| 204 | if (nouveau_acpi_get_bios_chunk(bios->data, 0, 4096) == 4096) | ||
| 205 | bios->size = bios->data[2] * 512; | ||
| 206 | kfree(bios->data); | ||
| 207 | } | ||
| 208 | |||
| 209 | if (!bios->size) | ||
| 210 | return; | ||
| 197 | 211 | ||
| 198 | bios->data = kmalloc(bios->size, GFP_KERNEL); | 212 | bios->data = kmalloc(bios->size, GFP_KERNEL); |
| 199 | for (i = 0; bios->data && i < bios->size; i += cnt) { | 213 | for (i = 0; bios->data && i < bios->size; i += cnt) { |
| @@ -229,12 +243,14 @@ nouveau_bios_shadow_pci(struct nouveau_bios *bios) | |||
| 229 | static int | 243 | static int |
| 230 | nouveau_bios_score(struct nouveau_bios *bios, const bool writeable) | 244 | nouveau_bios_score(struct nouveau_bios *bios, const bool writeable) |
| 231 | { | 245 | { |
| 232 | if (!bios->data || bios->data[0] != 0x55 || bios->data[1] != 0xAA) { | 246 | if (bios->size < 3 || !bios->data || bios->data[0] != 0x55 || |
| 247 | bios->data[1] != 0xAA) { | ||
| 233 | nv_info(bios, "... signature not found\n"); | 248 | nv_info(bios, "... signature not found\n"); |
| 234 | return 0; | 249 | return 0; |
| 235 | } | 250 | } |
| 236 | 251 | ||
| 237 | if (nvbios_checksum(bios->data, bios->data[2] * 512)) { | 252 | if (nvbios_checksum(bios->data, |
| 253 | min_t(u32, bios->data[2] * 512, bios->size))) { | ||
| 238 | nv_info(bios, "... checksum invalid\n"); | 254 | nv_info(bios, "... checksum invalid\n"); |
| 239 | /* if a ro image is somewhat bad, it's probably all rubbish */ | 255 | /* if a ro image is somewhat bad, it's probably all rubbish */ |
| 240 | return writeable ? 2 : 1; | 256 | return writeable ? 2 : 1; |
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/pll.c b/drivers/gpu/drm/nouveau/core/subdev/bios/pll.c index 5e5f4cddae3c..f835501203e5 100644 --- a/drivers/gpu/drm/nouveau/core/subdev/bios/pll.c +++ b/drivers/gpu/drm/nouveau/core/subdev/bios/pll.c | |||
| @@ -157,11 +157,10 @@ pll_map_reg(struct nouveau_bios *bios, u32 reg, u32 *type, u8 *ver, u8 *len) | |||
| 157 | while (map->reg) { | 157 | while (map->reg) { |
| 158 | if (map->reg == reg && *ver >= 0x20) { | 158 | if (map->reg == reg && *ver >= 0x20) { |
| 159 | u16 addr = (data += hdr); | 159 | u16 addr = (data += hdr); |
| 160 | *type = map->type; | ||
| 160 | while (cnt--) { | 161 | while (cnt--) { |
| 161 | if (nv_ro32(bios, data) == map->reg) { | 162 | if (nv_ro32(bios, data) == map->reg) |
| 162 | *type = map->type; | ||
| 163 | return data; | 163 | return data; |
| 164 | } | ||
| 165 | data += *len; | 164 | data += *len; |
| 166 | } | 165 | } |
| 167 | return addr; | 166 | return addr; |
| @@ -200,11 +199,10 @@ pll_map_type(struct nouveau_bios *bios, u8 type, u32 *reg, u8 *ver, u8 *len) | |||
| 200 | while (map->reg) { | 199 | while (map->reg) { |
| 201 | if (map->type == type && *ver >= 0x20) { | 200 | if (map->type == type && *ver >= 0x20) { |
| 202 | u16 addr = (data += hdr); | 201 | u16 addr = (data += hdr); |
| 202 | *reg = map->reg; | ||
| 203 | while (cnt--) { | 203 | while (cnt--) { |
| 204 | if (nv_ro32(bios, data) == map->reg) { | 204 | if (nv_ro32(bios, data) == map->reg) |
| 205 | *reg = map->reg; | ||
| 206 | return data; | 205 | return data; |
| 207 | } | ||
| 208 | data += *len; | 206 | data += *len; |
| 209 | } | 207 | } |
| 210 | return addr; | 208 | return addr; |
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nv50.c b/drivers/gpu/drm/nouveau/core/subdev/fb/nv50.c index 42d7539e6525..27fb1af7a779 100644 --- a/drivers/gpu/drm/nouveau/core/subdev/fb/nv50.c +++ b/drivers/gpu/drm/nouveau/core/subdev/fb/nv50.c | |||
| @@ -237,6 +237,7 @@ nv50_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine, | |||
| 237 | return ret; | 237 | return ret; |
| 238 | 238 | ||
| 239 | priv->base.ram.stolen = (u64)nv_rd32(priv, 0x100e10) << 12; | 239 | priv->base.ram.stolen = (u64)nv_rd32(priv, 0x100e10) << 12; |
| 240 | priv->base.ram.type = NV_MEM_TYPE_STOLEN; | ||
| 240 | break; | 241 | break; |
| 241 | default: | 242 | default: |
| 242 | ret = nouveau_mm_init(&priv->base.vram, rsvd_head, size, | 243 | ret = nouveau_mm_init(&priv->base.vram, rsvd_head, size, |
diff --git a/drivers/gpu/drm/nouveau/core/subdev/vm/nv41.c b/drivers/gpu/drm/nouveau/core/subdev/vm/nv41.c index 0203e1e12caa..49050d991e75 100644 --- a/drivers/gpu/drm/nouveau/core/subdev/vm/nv41.c +++ b/drivers/gpu/drm/nouveau/core/subdev/vm/nv41.c | |||
| @@ -92,7 +92,8 @@ nv41_vmmgr_ctor(struct nouveau_object *parent, struct nouveau_object *engine, | |||
| 92 | struct nv04_vmmgr_priv *priv; | 92 | struct nv04_vmmgr_priv *priv; |
| 93 | int ret; | 93 | int ret; |
| 94 | 94 | ||
| 95 | if (!nouveau_boolopt(device->cfgopt, "NvPCIE", true)) { | 95 | if (pci_find_capability(device->pdev, PCI_CAP_ID_AGP) || |
| 96 | !nouveau_boolopt(device->cfgopt, "NvPCIE", true)) { | ||
| 96 | return nouveau_object_ctor(parent, engine, &nv04_vmmgr_oclass, | 97 | return nouveau_object_ctor(parent, engine, &nv04_vmmgr_oclass, |
| 97 | data, size, pobject); | 98 | data, size, pobject); |
| 98 | } | 99 | } |
diff --git a/drivers/gpu/drm/nouveau/core/subdev/vm/nv44.c b/drivers/gpu/drm/nouveau/core/subdev/vm/nv44.c index 0ac18d05a146..aa8131436e3d 100644 --- a/drivers/gpu/drm/nouveau/core/subdev/vm/nv44.c +++ b/drivers/gpu/drm/nouveau/core/subdev/vm/nv44.c | |||
| @@ -163,7 +163,8 @@ nv44_vmmgr_ctor(struct nouveau_object *parent, struct nouveau_object *engine, | |||
| 163 | struct nv04_vmmgr_priv *priv; | 163 | struct nv04_vmmgr_priv *priv; |
| 164 | int ret; | 164 | int ret; |
| 165 | 165 | ||
| 166 | if (!nouveau_boolopt(device->cfgopt, "NvPCIE", true)) { | 166 | if (pci_find_capability(device->pdev, PCI_CAP_ID_AGP) || |
| 167 | !nouveau_boolopt(device->cfgopt, "NvPCIE", true)) { | ||
| 167 | return nouveau_object_ctor(parent, engine, &nv04_vmmgr_oclass, | 168 | return nouveau_object_ctor(parent, engine, &nv04_vmmgr_oclass, |
| 168 | data, size, pobject); | 169 | data, size, pobject); |
| 169 | } | 170 | } |
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c index 8f98e5a8c488..d2f8ffeed742 100644 --- a/drivers/gpu/drm/nouveau/nouveau_display.c +++ b/drivers/gpu/drm/nouveau/nouveau_display.c | |||
| @@ -530,9 +530,11 @@ nouveau_page_flip_reserve(struct nouveau_bo *old_bo, | |||
| 530 | if (ret) | 530 | if (ret) |
| 531 | goto fail; | 531 | goto fail; |
| 532 | 532 | ||
| 533 | ret = ttm_bo_reserve(&old_bo->bo, false, false, false, 0); | 533 | if (likely(old_bo != new_bo)) { |
| 534 | if (ret) | 534 | ret = ttm_bo_reserve(&old_bo->bo, false, false, false, 0); |
| 535 | goto fail_unreserve; | 535 | if (ret) |
| 536 | goto fail_unreserve; | ||
| 537 | } | ||
| 536 | 538 | ||
| 537 | return 0; | 539 | return 0; |
| 538 | 540 | ||
| @@ -551,8 +553,10 @@ nouveau_page_flip_unreserve(struct nouveau_bo *old_bo, | |||
| 551 | nouveau_bo_fence(new_bo, fence); | 553 | nouveau_bo_fence(new_bo, fence); |
| 552 | ttm_bo_unreserve(&new_bo->bo); | 554 | ttm_bo_unreserve(&new_bo->bo); |
| 553 | 555 | ||
| 554 | nouveau_bo_fence(old_bo, fence); | 556 | if (likely(old_bo != new_bo)) { |
| 555 | ttm_bo_unreserve(&old_bo->bo); | 557 | nouveau_bo_fence(old_bo, fence); |
| 558 | ttm_bo_unreserve(&old_bo->bo); | ||
| 559 | } | ||
| 556 | 560 | ||
| 557 | nouveau_bo_unpin(old_bo); | 561 | nouveau_bo_unpin(old_bo); |
| 558 | } | 562 | } |
diff --git a/drivers/gpu/drm/radeon/atombios_encoders.c b/drivers/gpu/drm/radeon/atombios_encoders.c index 49cbb3795a10..ba498f8e47a2 100644 --- a/drivers/gpu/drm/radeon/atombios_encoders.c +++ b/drivers/gpu/drm/radeon/atombios_encoders.c | |||
| @@ -184,6 +184,7 @@ void radeon_atom_backlight_init(struct radeon_encoder *radeon_encoder, | |||
| 184 | struct radeon_backlight_privdata *pdata; | 184 | struct radeon_backlight_privdata *pdata; |
| 185 | struct radeon_encoder_atom_dig *dig; | 185 | struct radeon_encoder_atom_dig *dig; |
| 186 | u8 backlight_level; | 186 | u8 backlight_level; |
| 187 | char bl_name[16]; | ||
| 187 | 188 | ||
| 188 | if (!radeon_encoder->enc_priv) | 189 | if (!radeon_encoder->enc_priv) |
| 189 | return; | 190 | return; |
| @@ -203,7 +204,9 @@ void radeon_atom_backlight_init(struct radeon_encoder *radeon_encoder, | |||
| 203 | memset(&props, 0, sizeof(props)); | 204 | memset(&props, 0, sizeof(props)); |
| 204 | props.max_brightness = RADEON_MAX_BL_LEVEL; | 205 | props.max_brightness = RADEON_MAX_BL_LEVEL; |
| 205 | props.type = BACKLIGHT_RAW; | 206 | props.type = BACKLIGHT_RAW; |
| 206 | bd = backlight_device_register("radeon_bl", &drm_connector->kdev, | 207 | snprintf(bl_name, sizeof(bl_name), |
| 208 | "radeon_bl%d", dev->primary->index); | ||
| 209 | bd = backlight_device_register(bl_name, &drm_connector->kdev, | ||
| 207 | pdata, &radeon_atom_backlight_ops, &props); | 210 | pdata, &radeon_atom_backlight_ops, &props); |
| 208 | if (IS_ERR(bd)) { | 211 | if (IS_ERR(bd)) { |
| 209 | DRM_ERROR("Backlight registration failed\n"); | 212 | DRM_ERROR("Backlight registration failed\n"); |
diff --git a/drivers/gpu/drm/radeon/evergreen_cs.c b/drivers/gpu/drm/radeon/evergreen_cs.c index 573ed1bc6cf7..30271b641913 100644 --- a/drivers/gpu/drm/radeon/evergreen_cs.c +++ b/drivers/gpu/drm/radeon/evergreen_cs.c | |||
| @@ -2829,6 +2829,7 @@ static bool evergreen_vm_reg_valid(u32 reg) | |||
| 2829 | case CAYMAN_SQ_EX_ALLOC_TABLE_SLOTS: | 2829 | case CAYMAN_SQ_EX_ALLOC_TABLE_SLOTS: |
| 2830 | return true; | 2830 | return true; |
| 2831 | default: | 2831 | default: |
| 2832 | DRM_ERROR("Invalid register 0x%x in CS\n", reg); | ||
| 2832 | return false; | 2833 | return false; |
| 2833 | } | 2834 | } |
| 2834 | } | 2835 | } |
diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c index 8c74c729586d..81e6a568c29d 100644 --- a/drivers/gpu/drm/radeon/ni.c +++ b/drivers/gpu/drm/radeon/ni.c | |||
| @@ -1538,26 +1538,31 @@ void cayman_vm_set_page(struct radeon_device *rdev, uint64_t pe, | |||
| 1538 | { | 1538 | { |
| 1539 | struct radeon_ring *ring = &rdev->ring[rdev->asic->vm.pt_ring_index]; | 1539 | struct radeon_ring *ring = &rdev->ring[rdev->asic->vm.pt_ring_index]; |
| 1540 | uint32_t r600_flags = cayman_vm_page_flags(rdev, flags); | 1540 | uint32_t r600_flags = cayman_vm_page_flags(rdev, flags); |
| 1541 | int i; | ||
| 1542 | 1541 | ||
| 1543 | radeon_ring_write(ring, PACKET3(PACKET3_ME_WRITE, 1 + count * 2)); | 1542 | while (count) { |
| 1544 | radeon_ring_write(ring, pe); | 1543 | unsigned ndw = 1 + count * 2; |
| 1545 | radeon_ring_write(ring, upper_32_bits(pe) & 0xff); | 1544 | if (ndw > 0x3FFF) |
| 1546 | for (i = 0; i < count; ++i) { | 1545 | ndw = 0x3FFF; |
| 1547 | uint64_t value = 0; | 1546 | |
| 1548 | if (flags & RADEON_VM_PAGE_SYSTEM) { | 1547 | radeon_ring_write(ring, PACKET3(PACKET3_ME_WRITE, ndw)); |
| 1549 | value = radeon_vm_map_gart(rdev, addr); | 1548 | radeon_ring_write(ring, pe); |
| 1550 | value &= 0xFFFFFFFFFFFFF000ULL; | 1549 | radeon_ring_write(ring, upper_32_bits(pe) & 0xff); |
| 1551 | addr += incr; | 1550 | for (; ndw > 1; ndw -= 2, --count, pe += 8) { |
| 1552 | 1551 | uint64_t value = 0; | |
| 1553 | } else if (flags & RADEON_VM_PAGE_VALID) { | 1552 | if (flags & RADEON_VM_PAGE_SYSTEM) { |
| 1554 | value = addr; | 1553 | value = radeon_vm_map_gart(rdev, addr); |
| 1555 | addr += incr; | 1554 | value &= 0xFFFFFFFFFFFFF000ULL; |
| 1556 | } | 1555 | addr += incr; |
| 1556 | |||
| 1557 | } else if (flags & RADEON_VM_PAGE_VALID) { | ||
| 1558 | value = addr; | ||
| 1559 | addr += incr; | ||
| 1560 | } | ||
| 1557 | 1561 | ||
| 1558 | value |= r600_flags; | 1562 | value |= r600_flags; |
| 1559 | radeon_ring_write(ring, value); | 1563 | radeon_ring_write(ring, value); |
| 1560 | radeon_ring_write(ring, upper_32_bits(value)); | 1564 | radeon_ring_write(ring, upper_32_bits(value)); |
| 1565 | } | ||
| 1561 | } | 1566 | } |
| 1562 | } | 1567 | } |
| 1563 | 1568 | ||
| @@ -1586,4 +1591,8 @@ void cayman_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm) | |||
| 1586 | /* bits 0-7 are the VM contexts0-7 */ | 1591 | /* bits 0-7 are the VM contexts0-7 */ |
| 1587 | radeon_ring_write(ring, PACKET0(VM_INVALIDATE_REQUEST, 0)); | 1592 | radeon_ring_write(ring, PACKET0(VM_INVALIDATE_REQUEST, 0)); |
| 1588 | radeon_ring_write(ring, 1 << vm->id); | 1593 | radeon_ring_write(ring, 1 << vm->id); |
| 1594 | |||
| 1595 | /* sync PFP to ME, otherwise we might get invalid PFP reads */ | ||
| 1596 | radeon_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0)); | ||
| 1597 | radeon_ring_write(ring, 0x0); | ||
| 1589 | } | 1598 | } |
diff --git a/drivers/gpu/drm/radeon/nid.h b/drivers/gpu/drm/radeon/nid.h index 2423d1b5d385..cbef6815907a 100644 --- a/drivers/gpu/drm/radeon/nid.h +++ b/drivers/gpu/drm/radeon/nid.h | |||
| @@ -502,6 +502,7 @@ | |||
| 502 | #define PACKET3_MPEG_INDEX 0x3A | 502 | #define PACKET3_MPEG_INDEX 0x3A |
| 503 | #define PACKET3_WAIT_REG_MEM 0x3C | 503 | #define PACKET3_WAIT_REG_MEM 0x3C |
| 504 | #define PACKET3_MEM_WRITE 0x3D | 504 | #define PACKET3_MEM_WRITE 0x3D |
| 505 | #define PACKET3_PFP_SYNC_ME 0x42 | ||
| 505 | #define PACKET3_SURFACE_SYNC 0x43 | 506 | #define PACKET3_SURFACE_SYNC 0x43 |
| 506 | # define PACKET3_CB0_DEST_BASE_ENA (1 << 6) | 507 | # define PACKET3_CB0_DEST_BASE_ENA (1 << 6) |
| 507 | # define PACKET3_CB1_DEST_BASE_ENA (1 << 7) | 508 | # define PACKET3_CB1_DEST_BASE_ENA (1 << 7) |
diff --git a/drivers/gpu/drm/radeon/radeon_atpx_handler.c b/drivers/gpu/drm/radeon/radeon_atpx_handler.c index 1aa3f910b993..37f6a907aea4 100644 --- a/drivers/gpu/drm/radeon/radeon_atpx_handler.c +++ b/drivers/gpu/drm/radeon/radeon_atpx_handler.c | |||
| @@ -87,7 +87,7 @@ static union acpi_object *radeon_atpx_call(acpi_handle handle, int function, | |||
| 87 | atpx_arg_elements[1].integer.value = 0; | 87 | atpx_arg_elements[1].integer.value = 0; |
| 88 | } | 88 | } |
| 89 | 89 | ||
| 90 | status = acpi_evaluate_object(handle, "ATPX", &atpx_arg, &buffer); | 90 | status = acpi_evaluate_object(handle, NULL, &atpx_arg, &buffer); |
| 91 | 91 | ||
| 92 | /* Fail only if calling the method fails and ATPX is supported */ | 92 | /* Fail only if calling the method fails and ATPX is supported */ |
| 93 | if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) { | 93 | if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) { |
| @@ -373,11 +373,11 @@ static int radeon_atpx_power_state(enum vga_switcheroo_client_id id, | |||
| 373 | } | 373 | } |
| 374 | 374 | ||
| 375 | /** | 375 | /** |
| 376 | * radeon_atpx_pci_probe_handle - look up the ATRM and ATPX handles | 376 | * radeon_atpx_pci_probe_handle - look up the ATPX handle |
| 377 | * | 377 | * |
| 378 | * @pdev: pci device | 378 | * @pdev: pci device |
| 379 | * | 379 | * |
| 380 | * Look up the ATPX and ATRM handles (all asics). | 380 | * Look up the ATPX handles (all asics). |
| 381 | * Returns true if the handles are found, false if not. | 381 | * Returns true if the handles are found, false if not. |
| 382 | */ | 382 | */ |
| 383 | static bool radeon_atpx_pci_probe_handle(struct pci_dev *pdev) | 383 | static bool radeon_atpx_pci_probe_handle(struct pci_dev *pdev) |
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c index bd13ca09eb62..e2f5f888c374 100644 --- a/drivers/gpu/drm/radeon/radeon_device.c +++ b/drivers/gpu/drm/radeon/radeon_device.c | |||
| @@ -355,6 +355,8 @@ int radeon_wb_init(struct radeon_device *rdev) | |||
| 355 | */ | 355 | */ |
| 356 | void radeon_vram_location(struct radeon_device *rdev, struct radeon_mc *mc, u64 base) | 356 | void radeon_vram_location(struct radeon_device *rdev, struct radeon_mc *mc, u64 base) |
| 357 | { | 357 | { |
| 358 | uint64_t limit = (uint64_t)radeon_vram_limit << 20; | ||
| 359 | |||
| 358 | mc->vram_start = base; | 360 | mc->vram_start = base; |
| 359 | if (mc->mc_vram_size > (0xFFFFFFFF - base + 1)) { | 361 | if (mc->mc_vram_size > (0xFFFFFFFF - base + 1)) { |
| 360 | dev_warn(rdev->dev, "limiting VRAM to PCI aperture size\n"); | 362 | dev_warn(rdev->dev, "limiting VRAM to PCI aperture size\n"); |
| @@ -368,8 +370,8 @@ void radeon_vram_location(struct radeon_device *rdev, struct radeon_mc *mc, u64 | |||
| 368 | mc->mc_vram_size = mc->aper_size; | 370 | mc->mc_vram_size = mc->aper_size; |
| 369 | } | 371 | } |
| 370 | mc->vram_end = mc->vram_start + mc->mc_vram_size - 1; | 372 | mc->vram_end = mc->vram_start + mc->mc_vram_size - 1; |
| 371 | if (radeon_vram_limit && radeon_vram_limit < mc->real_vram_size) | 373 | if (limit && limit < mc->real_vram_size) |
| 372 | mc->real_vram_size = radeon_vram_limit; | 374 | mc->real_vram_size = limit; |
| 373 | dev_info(rdev->dev, "VRAM: %lluM 0x%016llX - 0x%016llX (%lluM used)\n", | 375 | dev_info(rdev->dev, "VRAM: %lluM 0x%016llX - 0x%016llX (%lluM used)\n", |
| 374 | mc->mc_vram_size >> 20, mc->vram_start, | 376 | mc->mc_vram_size >> 20, mc->vram_start, |
| 375 | mc->vram_end, mc->real_vram_size >> 20); | 377 | mc->vram_end, mc->real_vram_size >> 20); |
| @@ -835,6 +837,19 @@ static unsigned int radeon_vga_set_decode(void *cookie, bool state) | |||
| 835 | } | 837 | } |
| 836 | 838 | ||
| 837 | /** | 839 | /** |
| 840 | * radeon_check_pot_argument - check that argument is a power of two | ||
| 841 | * | ||
| 842 | * @arg: value to check | ||
| 843 | * | ||
| 844 | * Validates that a certain argument is a power of two (all asics). | ||
| 845 | * Returns true if argument is valid. | ||
| 846 | */ | ||
| 847 | static bool radeon_check_pot_argument(int arg) | ||
| 848 | { | ||
| 849 | return (arg & (arg - 1)) == 0; | ||
| 850 | } | ||
| 851 | |||
| 852 | /** | ||
| 838 | * radeon_check_arguments - validate module params | 853 | * radeon_check_arguments - validate module params |
| 839 | * | 854 | * |
| 840 | * @rdev: radeon_device pointer | 855 | * @rdev: radeon_device pointer |
| @@ -845,52 +860,25 @@ static unsigned int radeon_vga_set_decode(void *cookie, bool state) | |||
| 845 | static void radeon_check_arguments(struct radeon_device *rdev) | 860 | static void radeon_check_arguments(struct radeon_device *rdev) |
| 846 | { | 861 | { |
| 847 | /* vramlimit must be a power of two */ | 862 | /* vramlimit must be a power of two */ |
| 848 | switch (radeon_vram_limit) { | 863 | if (!radeon_check_pot_argument(radeon_vram_limit)) { |
| 849 | case 0: | ||
| 850 | case 4: | ||
| 851 | case 8: | ||
| 852 | case 16: | ||
| 853 | case 32: | ||
| 854 | case 64: | ||
| 855 | case 128: | ||
| 856 | case 256: | ||
| 857 | case 512: | ||
| 858 | case 1024: | ||
| 859 | case 2048: | ||
| 860 | case 4096: | ||
| 861 | break; | ||
| 862 | default: | ||
| 863 | dev_warn(rdev->dev, "vram limit (%d) must be a power of 2\n", | 864 | dev_warn(rdev->dev, "vram limit (%d) must be a power of 2\n", |
| 864 | radeon_vram_limit); | 865 | radeon_vram_limit); |
| 865 | radeon_vram_limit = 0; | 866 | radeon_vram_limit = 0; |
| 866 | break; | ||
| 867 | } | 867 | } |
| 868 | radeon_vram_limit = radeon_vram_limit << 20; | 868 | |
| 869 | /* gtt size must be power of two and greater or equal to 32M */ | 869 | /* gtt size must be power of two and greater or equal to 32M */ |
| 870 | switch (radeon_gart_size) { | 870 | if (radeon_gart_size < 32) { |
| 871 | case 4: | ||
| 872 | case 8: | ||
| 873 | case 16: | ||
| 874 | dev_warn(rdev->dev, "gart size (%d) too small forcing to 512M\n", | 871 | dev_warn(rdev->dev, "gart size (%d) too small forcing to 512M\n", |
| 875 | radeon_gart_size); | 872 | radeon_gart_size); |
| 876 | radeon_gart_size = 512; | 873 | radeon_gart_size = 512; |
| 877 | break; | 874 | |
| 878 | case 32: | 875 | } else if (!radeon_check_pot_argument(radeon_gart_size)) { |
| 879 | case 64: | ||
| 880 | case 128: | ||
| 881 | case 256: | ||
| 882 | case 512: | ||
| 883 | case 1024: | ||
| 884 | case 2048: | ||
| 885 | case 4096: | ||
| 886 | break; | ||
| 887 | default: | ||
| 888 | dev_warn(rdev->dev, "gart size (%d) must be a power of 2\n", | 876 | dev_warn(rdev->dev, "gart size (%d) must be a power of 2\n", |
| 889 | radeon_gart_size); | 877 | radeon_gart_size); |
| 890 | radeon_gart_size = 512; | 878 | radeon_gart_size = 512; |
| 891 | break; | ||
| 892 | } | 879 | } |
| 893 | rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024; | 880 | rdev->mc.gtt_size = (uint64_t)radeon_gart_size << 20; |
| 881 | |||
| 894 | /* AGP mode can only be -1, 1, 2, 4, 8 */ | 882 | /* AGP mode can only be -1, 1, 2, 4, 8 */ |
| 895 | switch (radeon_agpmode) { | 883 | switch (radeon_agpmode) { |
| 896 | case -1: | 884 | case -1: |
diff --git a/drivers/gpu/drm/radeon/radeon_gart.c b/drivers/gpu/drm/radeon/radeon_gart.c index a7677dd1ce98..4debd60e5aa6 100644 --- a/drivers/gpu/drm/radeon/radeon_gart.c +++ b/drivers/gpu/drm/radeon/radeon_gart.c | |||
| @@ -355,14 +355,13 @@ int radeon_gart_init(struct radeon_device *rdev) | |||
| 355 | DRM_INFO("GART: num cpu pages %u, num gpu pages %u\n", | 355 | DRM_INFO("GART: num cpu pages %u, num gpu pages %u\n", |
| 356 | rdev->gart.num_cpu_pages, rdev->gart.num_gpu_pages); | 356 | rdev->gart.num_cpu_pages, rdev->gart.num_gpu_pages); |
| 357 | /* Allocate pages table */ | 357 | /* Allocate pages table */ |
| 358 | rdev->gart.pages = kzalloc(sizeof(void *) * rdev->gart.num_cpu_pages, | 358 | rdev->gart.pages = vzalloc(sizeof(void *) * rdev->gart.num_cpu_pages); |
| 359 | GFP_KERNEL); | ||
| 360 | if (rdev->gart.pages == NULL) { | 359 | if (rdev->gart.pages == NULL) { |
| 361 | radeon_gart_fini(rdev); | 360 | radeon_gart_fini(rdev); |
| 362 | return -ENOMEM; | 361 | return -ENOMEM; |
| 363 | } | 362 | } |
| 364 | rdev->gart.pages_addr = kzalloc(sizeof(dma_addr_t) * | 363 | rdev->gart.pages_addr = vzalloc(sizeof(dma_addr_t) * |
| 365 | rdev->gart.num_cpu_pages, GFP_KERNEL); | 364 | rdev->gart.num_cpu_pages); |
| 366 | if (rdev->gart.pages_addr == NULL) { | 365 | if (rdev->gart.pages_addr == NULL) { |
| 367 | radeon_gart_fini(rdev); | 366 | radeon_gart_fini(rdev); |
| 368 | return -ENOMEM; | 367 | return -ENOMEM; |
| @@ -388,8 +387,8 @@ void radeon_gart_fini(struct radeon_device *rdev) | |||
| 388 | radeon_gart_unbind(rdev, 0, rdev->gart.num_cpu_pages); | 387 | radeon_gart_unbind(rdev, 0, rdev->gart.num_cpu_pages); |
| 389 | } | 388 | } |
| 390 | rdev->gart.ready = false; | 389 | rdev->gart.ready = false; |
| 391 | kfree(rdev->gart.pages); | 390 | vfree(rdev->gart.pages); |
| 392 | kfree(rdev->gart.pages_addr); | 391 | vfree(rdev->gart.pages_addr); |
| 393 | rdev->gart.pages = NULL; | 392 | rdev->gart.pages = NULL; |
| 394 | rdev->gart.pages_addr = NULL; | 393 | rdev->gart.pages_addr = NULL; |
| 395 | 394 | ||
| @@ -577,7 +576,7 @@ void radeon_vm_manager_fini(struct radeon_device *rdev) | |||
| 577 | * | 576 | * |
| 578 | * Global and local mutex must be locked! | 577 | * Global and local mutex must be locked! |
| 579 | */ | 578 | */ |
| 580 | int radeon_vm_evict(struct radeon_device *rdev, struct radeon_vm *vm) | 579 | static int radeon_vm_evict(struct radeon_device *rdev, struct radeon_vm *vm) |
| 581 | { | 580 | { |
| 582 | struct radeon_vm *vm_evict; | 581 | struct radeon_vm *vm_evict; |
| 583 | 582 | ||
| @@ -1036,8 +1035,7 @@ static void radeon_vm_update_ptes(struct radeon_device *rdev, | |||
| 1036 | pte = radeon_sa_bo_gpu_addr(vm->page_tables[pt_idx]); | 1035 | pte = radeon_sa_bo_gpu_addr(vm->page_tables[pt_idx]); |
| 1037 | pte += (addr & mask) * 8; | 1036 | pte += (addr & mask) * 8; |
| 1038 | 1037 | ||
| 1039 | if (((last_pte + 8 * count) != pte) || | 1038 | if ((last_pte + 8 * count) != pte) { |
| 1040 | ((count + nptes) > 1 << 11)) { | ||
| 1041 | 1039 | ||
| 1042 | if (count) { | 1040 | if (count) { |
| 1043 | radeon_asic_vm_set_page(rdev, last_pte, | 1041 | radeon_asic_vm_set_page(rdev, last_pte, |
| @@ -1148,17 +1146,17 @@ int radeon_vm_bo_update_pte(struct radeon_device *rdev, | |||
| 1148 | 1146 | ||
| 1149 | if (RADEON_VM_BLOCK_SIZE > 11) | 1147 | if (RADEON_VM_BLOCK_SIZE > 11) |
| 1150 | /* reserve space for one header for every 2k dwords */ | 1148 | /* reserve space for one header for every 2k dwords */ |
| 1151 | ndw += (nptes >> 11) * 3; | 1149 | ndw += (nptes >> 11) * 4; |
| 1152 | else | 1150 | else |
| 1153 | /* reserve space for one header for | 1151 | /* reserve space for one header for |
| 1154 | every (1 << BLOCK_SIZE) entries */ | 1152 | every (1 << BLOCK_SIZE) entries */ |
| 1155 | ndw += (nptes >> RADEON_VM_BLOCK_SIZE) * 3; | 1153 | ndw += (nptes >> RADEON_VM_BLOCK_SIZE) * 4; |
| 1156 | 1154 | ||
| 1157 | /* reserve space for pte addresses */ | 1155 | /* reserve space for pte addresses */ |
| 1158 | ndw += nptes * 2; | 1156 | ndw += nptes * 2; |
| 1159 | 1157 | ||
| 1160 | /* reserve space for one header for every 2k dwords */ | 1158 | /* reserve space for one header for every 2k dwords */ |
| 1161 | ndw += (npdes >> 11) * 3; | 1159 | ndw += (npdes >> 11) * 4; |
| 1162 | 1160 | ||
| 1163 | /* reserve space for pde addresses */ | 1161 | /* reserve space for pde addresses */ |
| 1164 | ndw += npdes * 2; | 1162 | ndw += npdes * 2; |
diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c index f38fbcc46935..fe5c1f6b7957 100644 --- a/drivers/gpu/drm/radeon/radeon_gem.c +++ b/drivers/gpu/drm/radeon/radeon_gem.c | |||
| @@ -53,6 +53,7 @@ int radeon_gem_object_create(struct radeon_device *rdev, int size, | |||
| 53 | struct drm_gem_object **obj) | 53 | struct drm_gem_object **obj) |
| 54 | { | 54 | { |
| 55 | struct radeon_bo *robj; | 55 | struct radeon_bo *robj; |
| 56 | unsigned long max_size; | ||
| 56 | int r; | 57 | int r; |
| 57 | 58 | ||
| 58 | *obj = NULL; | 59 | *obj = NULL; |
| @@ -60,11 +61,26 @@ int radeon_gem_object_create(struct radeon_device *rdev, int size, | |||
| 60 | if (alignment < PAGE_SIZE) { | 61 | if (alignment < PAGE_SIZE) { |
| 61 | alignment = PAGE_SIZE; | 62 | alignment = PAGE_SIZE; |
| 62 | } | 63 | } |
| 64 | |||
| 65 | /* maximun bo size is the minimun btw visible vram and gtt size */ | ||
| 66 | max_size = min(rdev->mc.visible_vram_size, rdev->mc.gtt_size); | ||
| 67 | if (size > max_size) { | ||
| 68 | printk(KERN_WARNING "%s:%d alloc size %dMb bigger than %ldMb limit\n", | ||
| 69 | __func__, __LINE__, size >> 20, max_size >> 20); | ||
| 70 | return -ENOMEM; | ||
| 71 | } | ||
| 72 | |||
| 73 | retry: | ||
| 63 | r = radeon_bo_create(rdev, size, alignment, kernel, initial_domain, NULL, &robj); | 74 | r = radeon_bo_create(rdev, size, alignment, kernel, initial_domain, NULL, &robj); |
| 64 | if (r) { | 75 | if (r) { |
| 65 | if (r != -ERESTARTSYS) | 76 | if (r != -ERESTARTSYS) { |
| 77 | if (initial_domain == RADEON_GEM_DOMAIN_VRAM) { | ||
| 78 | initial_domain |= RADEON_GEM_DOMAIN_GTT; | ||
| 79 | goto retry; | ||
| 80 | } | ||
| 66 | DRM_ERROR("Failed to allocate GEM object (%d, %d, %u, %d)\n", | 81 | DRM_ERROR("Failed to allocate GEM object (%d, %d, %u, %d)\n", |
| 67 | size, initial_domain, alignment, r); | 82 | size, initial_domain, alignment, r); |
| 83 | } | ||
| 68 | return r; | 84 | return r; |
| 69 | } | 85 | } |
| 70 | *obj = &robj->gem_base; | 86 | *obj = &robj->gem_base; |
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c index a13ad9d707cf..0063df9d166d 100644 --- a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c +++ b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c | |||
| @@ -370,6 +370,7 @@ void radeon_legacy_backlight_init(struct radeon_encoder *radeon_encoder, | |||
| 370 | struct backlight_properties props; | 370 | struct backlight_properties props; |
| 371 | struct radeon_backlight_privdata *pdata; | 371 | struct radeon_backlight_privdata *pdata; |
| 372 | uint8_t backlight_level; | 372 | uint8_t backlight_level; |
| 373 | char bl_name[16]; | ||
| 373 | 374 | ||
| 374 | if (!radeon_encoder->enc_priv) | 375 | if (!radeon_encoder->enc_priv) |
| 375 | return; | 376 | return; |
| @@ -389,7 +390,9 @@ void radeon_legacy_backlight_init(struct radeon_encoder *radeon_encoder, | |||
| 389 | memset(&props, 0, sizeof(props)); | 390 | memset(&props, 0, sizeof(props)); |
| 390 | props.max_brightness = RADEON_MAX_BL_LEVEL; | 391 | props.max_brightness = RADEON_MAX_BL_LEVEL; |
| 391 | props.type = BACKLIGHT_RAW; | 392 | props.type = BACKLIGHT_RAW; |
| 392 | bd = backlight_device_register("radeon_bl", &drm_connector->kdev, | 393 | snprintf(bl_name, sizeof(bl_name), |
| 394 | "radeon_bl%d", dev->primary->index); | ||
| 395 | bd = backlight_device_register(bl_name, &drm_connector->kdev, | ||
| 393 | pdata, &radeon_backlight_ops, &props); | 396 | pdata, &radeon_backlight_ops, &props); |
| 394 | if (IS_ERR(bd)) { | 397 | if (IS_ERR(bd)) { |
| 395 | DRM_ERROR("Backlight registration failed\n"); | 398 | DRM_ERROR("Backlight registration failed\n"); |
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c index 8b27dd6e3144..b91118ccef86 100644 --- a/drivers/gpu/drm/radeon/radeon_object.c +++ b/drivers/gpu/drm/radeon/radeon_object.c | |||
| @@ -105,7 +105,6 @@ int radeon_bo_create(struct radeon_device *rdev, | |||
| 105 | struct radeon_bo *bo; | 105 | struct radeon_bo *bo; |
| 106 | enum ttm_bo_type type; | 106 | enum ttm_bo_type type; |
| 107 | unsigned long page_align = roundup(byte_align, PAGE_SIZE) >> PAGE_SHIFT; | 107 | unsigned long page_align = roundup(byte_align, PAGE_SIZE) >> PAGE_SHIFT; |
| 108 | unsigned long max_size = 0; | ||
| 109 | size_t acc_size; | 108 | size_t acc_size; |
| 110 | int r; | 109 | int r; |
| 111 | 110 | ||
| @@ -121,18 +120,9 @@ int radeon_bo_create(struct radeon_device *rdev, | |||
| 121 | } | 120 | } |
| 122 | *bo_ptr = NULL; | 121 | *bo_ptr = NULL; |
| 123 | 122 | ||
| 124 | /* maximun bo size is the minimun btw visible vram and gtt size */ | ||
| 125 | max_size = min(rdev->mc.visible_vram_size, rdev->mc.gtt_size); | ||
| 126 | if ((page_align << PAGE_SHIFT) >= max_size) { | ||
| 127 | printk(KERN_WARNING "%s:%d alloc size %ldM bigger than %ldMb limit\n", | ||
| 128 | __func__, __LINE__, page_align >> (20 - PAGE_SHIFT), max_size >> 20); | ||
| 129 | return -ENOMEM; | ||
| 130 | } | ||
| 131 | |||
| 132 | acc_size = ttm_bo_dma_acc_size(&rdev->mman.bdev, size, | 123 | acc_size = ttm_bo_dma_acc_size(&rdev->mman.bdev, size, |
| 133 | sizeof(struct radeon_bo)); | 124 | sizeof(struct radeon_bo)); |
| 134 | 125 | ||
| 135 | retry: | ||
| 136 | bo = kzalloc(sizeof(struct radeon_bo), GFP_KERNEL); | 126 | bo = kzalloc(sizeof(struct radeon_bo), GFP_KERNEL); |
| 137 | if (bo == NULL) | 127 | if (bo == NULL) |
| 138 | return -ENOMEM; | 128 | return -ENOMEM; |
| @@ -154,15 +144,6 @@ retry: | |||
| 154 | acc_size, sg, &radeon_ttm_bo_destroy); | 144 | acc_size, sg, &radeon_ttm_bo_destroy); |
| 155 | up_read(&rdev->pm.mclk_lock); | 145 | up_read(&rdev->pm.mclk_lock); |
| 156 | if (unlikely(r != 0)) { | 146 | if (unlikely(r != 0)) { |
| 157 | if (r != -ERESTARTSYS) { | ||
| 158 | if (domain == RADEON_GEM_DOMAIN_VRAM) { | ||
| 159 | domain |= RADEON_GEM_DOMAIN_GTT; | ||
| 160 | goto retry; | ||
| 161 | } | ||
| 162 | dev_err(rdev->dev, | ||
| 163 | "object_init failed for (%lu, 0x%08X)\n", | ||
| 164 | size, domain); | ||
| 165 | } | ||
| 166 | return r; | 147 | return r; |
| 167 | } | 148 | } |
| 168 | *bo_ptr = bo; | 149 | *bo_ptr = bo; |
diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c index df8dd7701643..b0db712060fb 100644 --- a/drivers/gpu/drm/radeon/si.c +++ b/drivers/gpu/drm/radeon/si.c | |||
| @@ -2808,26 +2808,31 @@ void si_vm_set_page(struct radeon_device *rdev, uint64_t pe, | |||
| 2808 | { | 2808 | { |
| 2809 | struct radeon_ring *ring = &rdev->ring[rdev->asic->vm.pt_ring_index]; | 2809 | struct radeon_ring *ring = &rdev->ring[rdev->asic->vm.pt_ring_index]; |
| 2810 | uint32_t r600_flags = cayman_vm_page_flags(rdev, flags); | 2810 | uint32_t r600_flags = cayman_vm_page_flags(rdev, flags); |
| 2811 | int i; | ||
| 2812 | uint64_t value; | ||
| 2813 | 2811 | ||
| 2814 | radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 2 + count * 2)); | 2812 | while (count) { |
| 2815 | radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) | | 2813 | unsigned ndw = 2 + count * 2; |
| 2816 | WRITE_DATA_DST_SEL(1))); | 2814 | if (ndw > 0x3FFE) |
| 2817 | radeon_ring_write(ring, pe); | 2815 | ndw = 0x3FFE; |
| 2818 | radeon_ring_write(ring, upper_32_bits(pe)); | 2816 | |
| 2819 | for (i = 0; i < count; ++i) { | 2817 | radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, ndw)); |
| 2820 | if (flags & RADEON_VM_PAGE_SYSTEM) { | 2818 | radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) | |
| 2821 | value = radeon_vm_map_gart(rdev, addr); | 2819 | WRITE_DATA_DST_SEL(1))); |
| 2822 | value &= 0xFFFFFFFFFFFFF000ULL; | 2820 | radeon_ring_write(ring, pe); |
| 2823 | } else if (flags & RADEON_VM_PAGE_VALID) | 2821 | radeon_ring_write(ring, upper_32_bits(pe)); |
| 2824 | value = addr; | 2822 | for (; ndw > 2; ndw -= 2, --count, pe += 8) { |
| 2825 | else | 2823 | uint64_t value; |
| 2826 | value = 0; | 2824 | if (flags & RADEON_VM_PAGE_SYSTEM) { |
| 2827 | addr += incr; | 2825 | value = radeon_vm_map_gart(rdev, addr); |
| 2828 | value |= r600_flags; | 2826 | value &= 0xFFFFFFFFFFFFF000ULL; |
| 2829 | radeon_ring_write(ring, value); | 2827 | } else if (flags & RADEON_VM_PAGE_VALID) |
| 2830 | radeon_ring_write(ring, upper_32_bits(value)); | 2828 | value = addr; |
| 2829 | else | ||
| 2830 | value = 0; | ||
| 2831 | addr += incr; | ||
| 2832 | value |= r600_flags; | ||
| 2833 | radeon_ring_write(ring, value); | ||
| 2834 | radeon_ring_write(ring, upper_32_bits(value)); | ||
| 2835 | } | ||
| 2831 | } | 2836 | } |
| 2832 | } | 2837 | } |
| 2833 | 2838 | ||
| @@ -2868,6 +2873,10 @@ void si_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm) | |||
| 2868 | radeon_ring_write(ring, VM_INVALIDATE_REQUEST >> 2); | 2873 | radeon_ring_write(ring, VM_INVALIDATE_REQUEST >> 2); |
| 2869 | radeon_ring_write(ring, 0); | 2874 | radeon_ring_write(ring, 0); |
| 2870 | radeon_ring_write(ring, 1 << vm->id); | 2875 | radeon_ring_write(ring, 1 << vm->id); |
| 2876 | |||
| 2877 | /* sync PFP to ME, otherwise we might get invalid PFP reads */ | ||
| 2878 | radeon_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0)); | ||
| 2879 | radeon_ring_write(ring, 0x0); | ||
| 2871 | } | 2880 | } |
| 2872 | 2881 | ||
| 2873 | /* | 2882 | /* |
diff --git a/drivers/gpu/drm/shmobile/shmob_drm_drv.c b/drivers/gpu/drm/shmobile/shmob_drm_drv.c index c71d493fd0c5..1c350fc4e449 100644 --- a/drivers/gpu/drm/shmobile/shmob_drm_drv.c +++ b/drivers/gpu/drm/shmobile/shmob_drm_drv.c | |||
| @@ -201,6 +201,8 @@ static int shmob_drm_load(struct drm_device *dev, unsigned long flags) | |||
| 201 | goto done; | 201 | goto done; |
| 202 | } | 202 | } |
| 203 | 203 | ||
| 204 | platform_set_drvdata(pdev, sdev); | ||
| 205 | |||
| 204 | done: | 206 | done: |
| 205 | if (ret) | 207 | if (ret) |
| 206 | shmob_drm_unload(dev); | 208 | shmob_drm_unload(dev); |
| @@ -299,11 +301,9 @@ static struct drm_driver shmob_drm_driver = { | |||
| 299 | #if CONFIG_PM_SLEEP | 301 | #if CONFIG_PM_SLEEP |
| 300 | static int shmob_drm_pm_suspend(struct device *dev) | 302 | static int shmob_drm_pm_suspend(struct device *dev) |
| 301 | { | 303 | { |
| 302 | struct platform_device *pdev = to_platform_device(dev); | 304 | struct shmob_drm_device *sdev = dev_get_drvdata(dev); |
| 303 | struct drm_device *ddev = platform_get_drvdata(pdev); | ||
| 304 | struct shmob_drm_device *sdev = ddev->dev_private; | ||
| 305 | 305 | ||
| 306 | drm_kms_helper_poll_disable(ddev); | 306 | drm_kms_helper_poll_disable(sdev->ddev); |
| 307 | shmob_drm_crtc_suspend(&sdev->crtc); | 307 | shmob_drm_crtc_suspend(&sdev->crtc); |
| 308 | 308 | ||
| 309 | return 0; | 309 | return 0; |
| @@ -311,9 +311,7 @@ static int shmob_drm_pm_suspend(struct device *dev) | |||
| 311 | 311 | ||
| 312 | static int shmob_drm_pm_resume(struct device *dev) | 312 | static int shmob_drm_pm_resume(struct device *dev) |
| 313 | { | 313 | { |
| 314 | struct platform_device *pdev = to_platform_device(dev); | 314 | struct shmob_drm_device *sdev = dev_get_drvdata(dev); |
| 315 | struct drm_device *ddev = platform_get_drvdata(pdev); | ||
| 316 | struct shmob_drm_device *sdev = ddev->dev_private; | ||
| 317 | 315 | ||
| 318 | mutex_lock(&sdev->ddev->mode_config.mutex); | 316 | mutex_lock(&sdev->ddev->mode_config.mutex); |
| 319 | shmob_drm_crtc_resume(&sdev->crtc); | 317 | shmob_drm_crtc_resume(&sdev->crtc); |
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c index 402ab69f9f99..bf6e4b5a73b5 100644 --- a/drivers/gpu/drm/ttm/ttm_bo.c +++ b/drivers/gpu/drm/ttm/ttm_bo.c | |||
| @@ -580,6 +580,7 @@ retry: | |||
| 580 | if (unlikely(ret != 0)) | 580 | if (unlikely(ret != 0)) |
| 581 | return ret; | 581 | return ret; |
| 582 | 582 | ||
| 583 | retry_reserve: | ||
| 583 | spin_lock(&glob->lru_lock); | 584 | spin_lock(&glob->lru_lock); |
| 584 | 585 | ||
| 585 | if (unlikely(list_empty(&bo->ddestroy))) { | 586 | if (unlikely(list_empty(&bo->ddestroy))) { |
| @@ -587,14 +588,20 @@ retry: | |||
| 587 | return 0; | 588 | return 0; |
| 588 | } | 589 | } |
| 589 | 590 | ||
| 590 | ret = ttm_bo_reserve_locked(bo, interruptible, | 591 | ret = ttm_bo_reserve_locked(bo, false, true, false, 0); |
| 591 | no_wait_reserve, false, 0); | ||
| 592 | 592 | ||
| 593 | if (unlikely(ret != 0)) { | 593 | if (unlikely(ret == -EBUSY)) { |
| 594 | spin_unlock(&glob->lru_lock); | 594 | spin_unlock(&glob->lru_lock); |
| 595 | return ret; | 595 | if (likely(!no_wait_reserve)) |
| 596 | ret = ttm_bo_wait_unreserved(bo, interruptible); | ||
| 597 | if (unlikely(ret != 0)) | ||
| 598 | return ret; | ||
| 599 | |||
| 600 | goto retry_reserve; | ||
| 596 | } | 601 | } |
| 597 | 602 | ||
| 603 | BUG_ON(ret != 0); | ||
| 604 | |||
| 598 | /** | 605 | /** |
| 599 | * We can re-check for sync object without taking | 606 | * We can re-check for sync object without taking |
| 600 | * the bo::lock since setting the sync object requires | 607 | * the bo::lock since setting the sync object requires |
| @@ -811,17 +818,14 @@ retry: | |||
| 811 | no_wait_reserve, no_wait_gpu); | 818 | no_wait_reserve, no_wait_gpu); |
| 812 | kref_put(&bo->list_kref, ttm_bo_release_list); | 819 | kref_put(&bo->list_kref, ttm_bo_release_list); |
| 813 | 820 | ||
| 814 | if (likely(ret == 0 || ret == -ERESTARTSYS)) | 821 | return ret; |
| 815 | return ret; | ||
| 816 | |||
| 817 | goto retry; | ||
| 818 | } | 822 | } |
| 819 | 823 | ||
| 820 | ret = ttm_bo_reserve_locked(bo, false, no_wait_reserve, false, 0); | 824 | ret = ttm_bo_reserve_locked(bo, false, true, false, 0); |
| 821 | 825 | ||
| 822 | if (unlikely(ret == -EBUSY)) { | 826 | if (unlikely(ret == -EBUSY)) { |
| 823 | spin_unlock(&glob->lru_lock); | 827 | spin_unlock(&glob->lru_lock); |
| 824 | if (likely(!no_wait_gpu)) | 828 | if (likely(!no_wait_reserve)) |
| 825 | ret = ttm_bo_wait_unreserved(bo, interruptible); | 829 | ret = ttm_bo_wait_unreserved(bo, interruptible); |
| 826 | 830 | ||
| 827 | kref_put(&bo->list_kref, ttm_bo_release_list); | 831 | kref_put(&bo->list_kref, ttm_bo_release_list); |
