diff options
Diffstat (limited to 'drivers/gpu/drm')
88 files changed, 3200 insertions, 1790 deletions
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c index fa19c2b9820f..d91fb8c0b7b3 100644 --- a/drivers/gpu/drm/drm_crtc.c +++ b/drivers/gpu/drm/drm_crtc.c | |||
@@ -283,7 +283,7 @@ EXPORT_SYMBOL(drm_mode_object_find); | |||
283 | * functions & device file and adds it to the master fd list. | 283 | * functions & device file and adds it to the master fd list. |
284 | * | 284 | * |
285 | * RETURNS: | 285 | * RETURNS: |
286 | * Zero on success, error code on falure. | 286 | * Zero on success, error code on failure. |
287 | */ | 287 | */ |
288 | int drm_framebuffer_init(struct drm_device *dev, struct drm_framebuffer *fb, | 288 | int drm_framebuffer_init(struct drm_device *dev, struct drm_framebuffer *fb, |
289 | const struct drm_framebuffer_funcs *funcs) | 289 | const struct drm_framebuffer_funcs *funcs) |
@@ -2434,7 +2434,7 @@ int drm_mode_connector_property_set_ioctl(struct drm_device *dev, | |||
2434 | } else if (connector->funcs->set_property) | 2434 | } else if (connector->funcs->set_property) |
2435 | ret = connector->funcs->set_property(connector, property, out_resp->value); | 2435 | ret = connector->funcs->set_property(connector, property, out_resp->value); |
2436 | 2436 | ||
2437 | /* store the property value if succesful */ | 2437 | /* store the property value if successful */ |
2438 | if (!ret) | 2438 | if (!ret) |
2439 | drm_connector_property_set_value(connector, property, out_resp->value); | 2439 | drm_connector_property_set_value(connector, property, out_resp->value); |
2440 | out: | 2440 | out: |
diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c index 7d0f00a935fa..f2aaf39be398 100644 --- a/drivers/gpu/drm/drm_crtc_helper.c +++ b/drivers/gpu/drm/drm_crtc_helper.c | |||
@@ -836,11 +836,7 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set) | |||
836 | mode_changed = true; | 836 | mode_changed = true; |
837 | } else if (set->fb == NULL) { | 837 | } else if (set->fb == NULL) { |
838 | mode_changed = true; | 838 | mode_changed = true; |
839 | } else if ((set->fb->bits_per_pixel != | 839 | } else |
840 | set->crtc->fb->bits_per_pixel) || | ||
841 | set->fb->depth != set->crtc->fb->depth) | ||
842 | fb_changed = true; | ||
843 | else | ||
844 | fb_changed = true; | 840 | fb_changed = true; |
845 | } | 841 | } |
846 | 842 | ||
diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c index 766c46875a20..f3c58e2bd75c 100644 --- a/drivers/gpu/drm/drm_drv.c +++ b/drivers/gpu/drm/drm_drv.c | |||
@@ -125,28 +125,28 @@ static struct drm_ioctl_desc drm_ioctls[] = { | |||
125 | 125 | ||
126 | DRM_IOCTL_DEF(DRM_IOCTL_UPDATE_DRAW, drm_update_drawable_info, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), | 126 | DRM_IOCTL_DEF(DRM_IOCTL_UPDATE_DRAW, drm_update_drawable_info, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), |
127 | 127 | ||
128 | DRM_IOCTL_DEF(DRM_IOCTL_GEM_CLOSE, drm_gem_close_ioctl, 0), | 128 | DRM_IOCTL_DEF(DRM_IOCTL_GEM_CLOSE, drm_gem_close_ioctl, DRM_UNLOCKED), |
129 | DRM_IOCTL_DEF(DRM_IOCTL_GEM_FLINK, drm_gem_flink_ioctl, DRM_AUTH), | 129 | DRM_IOCTL_DEF(DRM_IOCTL_GEM_FLINK, drm_gem_flink_ioctl, DRM_AUTH|DRM_UNLOCKED), |
130 | DRM_IOCTL_DEF(DRM_IOCTL_GEM_OPEN, drm_gem_open_ioctl, DRM_AUTH), | 130 | DRM_IOCTL_DEF(DRM_IOCTL_GEM_OPEN, drm_gem_open_ioctl, DRM_AUTH|DRM_UNLOCKED), |
131 | 131 | ||
132 | DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETRESOURCES, drm_mode_getresources, DRM_MASTER|DRM_CONTROL_ALLOW), | 132 | DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETRESOURCES, drm_mode_getresources, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), |
133 | DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETCRTC, drm_mode_getcrtc, DRM_MASTER|DRM_CONTROL_ALLOW), | 133 | DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETCRTC, drm_mode_getcrtc, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), |
134 | DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETCRTC, drm_mode_setcrtc, DRM_MASTER|DRM_CONTROL_ALLOW), | 134 | DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETCRTC, drm_mode_setcrtc, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), |
135 | DRM_IOCTL_DEF(DRM_IOCTL_MODE_CURSOR, drm_mode_cursor_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW), | 135 | DRM_IOCTL_DEF(DRM_IOCTL_MODE_CURSOR, drm_mode_cursor_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), |
136 | DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETGAMMA, drm_mode_gamma_get_ioctl, DRM_MASTER), | 136 | DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETGAMMA, drm_mode_gamma_get_ioctl, DRM_MASTER|DRM_UNLOCKED), |
137 | DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETGAMMA, drm_mode_gamma_set_ioctl, DRM_MASTER), | 137 | DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETGAMMA, drm_mode_gamma_set_ioctl, DRM_MASTER|DRM_UNLOCKED), |
138 | DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETENCODER, drm_mode_getencoder, DRM_MASTER|DRM_CONTROL_ALLOW), | 138 | DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETENCODER, drm_mode_getencoder, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), |
139 | DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETCONNECTOR, drm_mode_getconnector, DRM_MASTER|DRM_CONTROL_ALLOW), | 139 | DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETCONNECTOR, drm_mode_getconnector, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), |
140 | DRM_IOCTL_DEF(DRM_IOCTL_MODE_ATTACHMODE, drm_mode_attachmode_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW), | 140 | DRM_IOCTL_DEF(DRM_IOCTL_MODE_ATTACHMODE, drm_mode_attachmode_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), |
141 | DRM_IOCTL_DEF(DRM_IOCTL_MODE_DETACHMODE, drm_mode_detachmode_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW), | 141 | DRM_IOCTL_DEF(DRM_IOCTL_MODE_DETACHMODE, drm_mode_detachmode_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), |
142 | DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPROPERTY, drm_mode_getproperty_ioctl, DRM_MASTER | DRM_CONTROL_ALLOW), | 142 | DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPROPERTY, drm_mode_getproperty_ioctl, DRM_MASTER | DRM_CONTROL_ALLOW|DRM_UNLOCKED), |
143 | DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETPROPERTY, drm_mode_connector_property_set_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW), | 143 | DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETPROPERTY, drm_mode_connector_property_set_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), |
144 | DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPROPBLOB, drm_mode_getblob_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW), | 144 | DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPROPBLOB, drm_mode_getblob_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), |
145 | DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETFB, drm_mode_getfb, DRM_MASTER|DRM_CONTROL_ALLOW), | 145 | DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETFB, drm_mode_getfb, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), |
146 | DRM_IOCTL_DEF(DRM_IOCTL_MODE_ADDFB, drm_mode_addfb, DRM_MASTER|DRM_CONTROL_ALLOW), | 146 | DRM_IOCTL_DEF(DRM_IOCTL_MODE_ADDFB, drm_mode_addfb, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), |
147 | DRM_IOCTL_DEF(DRM_IOCTL_MODE_RMFB, drm_mode_rmfb, DRM_MASTER|DRM_CONTROL_ALLOW), | 147 | DRM_IOCTL_DEF(DRM_IOCTL_MODE_RMFB, drm_mode_rmfb, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), |
148 | DRM_IOCTL_DEF(DRM_IOCTL_MODE_PAGE_FLIP, drm_mode_page_flip_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW), | 148 | DRM_IOCTL_DEF(DRM_IOCTL_MODE_PAGE_FLIP, drm_mode_page_flip_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), |
149 | DRM_IOCTL_DEF(DRM_IOCTL_MODE_DIRTYFB, drm_mode_dirtyfb_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW) | 149 | DRM_IOCTL_DEF(DRM_IOCTL_MODE_DIRTYFB, drm_mode_dirtyfb_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED) |
150 | }; | 150 | }; |
151 | 151 | ||
152 | #define DRM_CORE_IOCTL_COUNT ARRAY_SIZE( drm_ioctls ) | 152 | #define DRM_CORE_IOCTL_COUNT ARRAY_SIZE( drm_ioctls ) |
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c index f41e91ceaea6..f97e7c42ac8e 100644 --- a/drivers/gpu/drm/drm_edid.c +++ b/drivers/gpu/drm/drm_edid.c | |||
@@ -598,6 +598,50 @@ struct drm_display_mode *drm_mode_std(struct drm_device *dev, | |||
598 | return mode; | 598 | return mode; |
599 | } | 599 | } |
600 | 600 | ||
601 | /* | ||
602 | * EDID is delightfully ambiguous about how interlaced modes are to be | ||
603 | * encoded. Our internal representation is of frame height, but some | ||
604 | * HDTV detailed timings are encoded as field height. | ||
605 | * | ||
606 | * The format list here is from CEA, in frame size. Technically we | ||
607 | * should be checking refresh rate too. Whatever. | ||
608 | */ | ||
609 | static void | ||
610 | drm_mode_do_interlace_quirk(struct drm_display_mode *mode, | ||
611 | struct detailed_pixel_timing *pt) | ||
612 | { | ||
613 | int i; | ||
614 | static const struct { | ||
615 | int w, h; | ||
616 | } cea_interlaced[] = { | ||
617 | { 1920, 1080 }, | ||
618 | { 720, 480 }, | ||
619 | { 1440, 480 }, | ||
620 | { 2880, 480 }, | ||
621 | { 720, 576 }, | ||
622 | { 1440, 576 }, | ||
623 | { 2880, 576 }, | ||
624 | }; | ||
625 | static const int n_sizes = | ||
626 | sizeof(cea_interlaced)/sizeof(cea_interlaced[0]); | ||
627 | |||
628 | if (!(pt->misc & DRM_EDID_PT_INTERLACED)) | ||
629 | return; | ||
630 | |||
631 | for (i = 0; i < n_sizes; i++) { | ||
632 | if ((mode->hdisplay == cea_interlaced[i].w) && | ||
633 | (mode->vdisplay == cea_interlaced[i].h / 2)) { | ||
634 | mode->vdisplay *= 2; | ||
635 | mode->vsync_start *= 2; | ||
636 | mode->vsync_end *= 2; | ||
637 | mode->vtotal *= 2; | ||
638 | mode->vtotal |= 1; | ||
639 | } | ||
640 | } | ||
641 | |||
642 | mode->flags |= DRM_MODE_FLAG_INTERLACE; | ||
643 | } | ||
644 | |||
601 | /** | 645 | /** |
602 | * drm_mode_detailed - create a new mode from an EDID detailed timing section | 646 | * drm_mode_detailed - create a new mode from an EDID detailed timing section |
603 | * @dev: DRM device (needed to create new mode) | 647 | * @dev: DRM device (needed to create new mode) |
@@ -680,8 +724,7 @@ static struct drm_display_mode *drm_mode_detailed(struct drm_device *dev, | |||
680 | 724 | ||
681 | drm_mode_set_name(mode); | 725 | drm_mode_set_name(mode); |
682 | 726 | ||
683 | if (pt->misc & DRM_EDID_PT_INTERLACED) | 727 | drm_mode_do_interlace_quirk(mode, pt); |
684 | mode->flags |= DRM_MODE_FLAG_INTERLACE; | ||
685 | 728 | ||
686 | if (quirks & EDID_QUIRK_DETAILED_SYNC_PP) { | 729 | if (quirks & EDID_QUIRK_DETAILED_SYNC_PP) { |
687 | pt->misc |= DRM_EDID_PT_HSYNC_POSITIVE | DRM_EDID_PT_VSYNC_POSITIVE; | 730 | pt->misc |= DRM_EDID_PT_HSYNC_POSITIVE | DRM_EDID_PT_VSYNC_POSITIVE; |
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c index 0f9e90552dc4..50549703584f 100644 --- a/drivers/gpu/drm/drm_fb_helper.c +++ b/drivers/gpu/drm/drm_fb_helper.c | |||
@@ -27,6 +27,7 @@ | |||
27 | * Dave Airlie <airlied@linux.ie> | 27 | * Dave Airlie <airlied@linux.ie> |
28 | * Jesse Barnes <jesse.barnes@intel.com> | 28 | * Jesse Barnes <jesse.barnes@intel.com> |
29 | */ | 29 | */ |
30 | #include <linux/kernel.h> | ||
30 | #include <linux/sysrq.h> | 31 | #include <linux/sysrq.h> |
31 | #include <linux/fb.h> | 32 | #include <linux/fb.h> |
32 | #include "drmP.h" | 33 | #include "drmP.h" |
@@ -50,21 +51,6 @@ int drm_fb_helper_add_connector(struct drm_connector *connector) | |||
50 | } | 51 | } |
51 | EXPORT_SYMBOL(drm_fb_helper_add_connector); | 52 | EXPORT_SYMBOL(drm_fb_helper_add_connector); |
52 | 53 | ||
53 | static int my_atoi(const char *name) | ||
54 | { | ||
55 | int val = 0; | ||
56 | |||
57 | for (;; name++) { | ||
58 | switch (*name) { | ||
59 | case '0' ... '9': | ||
60 | val = 10*val+(*name-'0'); | ||
61 | break; | ||
62 | default: | ||
63 | return val; | ||
64 | } | ||
65 | } | ||
66 | } | ||
67 | |||
68 | /** | 54 | /** |
69 | * drm_fb_helper_connector_parse_command_line - parse command line for connector | 55 | * drm_fb_helper_connector_parse_command_line - parse command line for connector |
70 | * @connector - connector to parse line for | 56 | * @connector - connector to parse line for |
@@ -111,7 +97,7 @@ static bool drm_fb_helper_connector_parse_command_line(struct drm_connector *con | |||
111 | namelen = i; | 97 | namelen = i; |
112 | if (!refresh_specified && !bpp_specified && | 98 | if (!refresh_specified && !bpp_specified && |
113 | !yres_specified) { | 99 | !yres_specified) { |
114 | refresh = my_atoi(&name[i+1]); | 100 | refresh = simple_strtol(&name[i+1], NULL, 10); |
115 | refresh_specified = 1; | 101 | refresh_specified = 1; |
116 | if (cvt || rb) | 102 | if (cvt || rb) |
117 | cvt = 0; | 103 | cvt = 0; |
@@ -121,7 +107,7 @@ static bool drm_fb_helper_connector_parse_command_line(struct drm_connector *con | |||
121 | case '-': | 107 | case '-': |
122 | namelen = i; | 108 | namelen = i; |
123 | if (!bpp_specified && !yres_specified) { | 109 | if (!bpp_specified && !yres_specified) { |
124 | bpp = my_atoi(&name[i+1]); | 110 | bpp = simple_strtol(&name[i+1], NULL, 10); |
125 | bpp_specified = 1; | 111 | bpp_specified = 1; |
126 | if (cvt || rb) | 112 | if (cvt || rb) |
127 | cvt = 0; | 113 | cvt = 0; |
@@ -130,7 +116,7 @@ static bool drm_fb_helper_connector_parse_command_line(struct drm_connector *con | |||
130 | break; | 116 | break; |
131 | case 'x': | 117 | case 'x': |
132 | if (!yres_specified) { | 118 | if (!yres_specified) { |
133 | yres = my_atoi(&name[i+1]); | 119 | yres = simple_strtol(&name[i+1], NULL, 10); |
134 | yres_specified = 1; | 120 | yres_specified = 1; |
135 | } else | 121 | } else |
136 | goto done; | 122 | goto done; |
@@ -170,7 +156,7 @@ static bool drm_fb_helper_connector_parse_command_line(struct drm_connector *con | |||
170 | } | 156 | } |
171 | } | 157 | } |
172 | if (i < 0 && yres_specified) { | 158 | if (i < 0 && yres_specified) { |
173 | xres = my_atoi(name); | 159 | xres = simple_strtol(name, NULL, 10); |
174 | res_specified = 1; | 160 | res_specified = 1; |
175 | } | 161 | } |
176 | done: | 162 | done: |
@@ -694,7 +680,7 @@ int drm_fb_helper_set_par(struct fb_info *info) | |||
694 | int i; | 680 | int i; |
695 | 681 | ||
696 | if (var->pixclock != 0) { | 682 | if (var->pixclock != 0) { |
697 | DRM_ERROR("PIXEL CLCOK SET\n"); | 683 | DRM_ERROR("PIXEL CLOCK SET\n"); |
698 | return -EINVAL; | 684 | return -EINVAL; |
699 | } | 685 | } |
700 | 686 | ||
diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c index e9dbb481c469..aa89d4b0b4c4 100644 --- a/drivers/gpu/drm/drm_gem.c +++ b/drivers/gpu/drm/drm_gem.c | |||
@@ -142,19 +142,6 @@ drm_gem_object_alloc(struct drm_device *dev, size_t size) | |||
142 | if (IS_ERR(obj->filp)) | 142 | if (IS_ERR(obj->filp)) |
143 | goto free; | 143 | goto free; |
144 | 144 | ||
145 | /* Basically we want to disable the OOM killer and handle ENOMEM | ||
146 | * ourselves by sacrificing pages from cached buffers. | ||
147 | * XXX shmem_file_[gs]et_gfp_mask() | ||
148 | */ | ||
149 | mapping_set_gfp_mask(obj->filp->f_path.dentry->d_inode->i_mapping, | ||
150 | GFP_HIGHUSER | | ||
151 | __GFP_COLD | | ||
152 | __GFP_FS | | ||
153 | __GFP_RECLAIMABLE | | ||
154 | __GFP_NORETRY | | ||
155 | __GFP_NOWARN | | ||
156 | __GFP_NOMEMALLOC); | ||
157 | |||
158 | kref_init(&obj->refcount); | 145 | kref_init(&obj->refcount); |
159 | kref_init(&obj->handlecount); | 146 | kref_init(&obj->handlecount); |
160 | obj->size = size; | 147 | obj->size = size; |
@@ -205,9 +192,7 @@ drm_gem_handle_delete(struct drm_file *filp, u32 handle) | |||
205 | idr_remove(&filp->object_idr, handle); | 192 | idr_remove(&filp->object_idr, handle); |
206 | spin_unlock(&filp->table_lock); | 193 | spin_unlock(&filp->table_lock); |
207 | 194 | ||
208 | mutex_lock(&dev->struct_mutex); | 195 | drm_gem_object_handle_unreference_unlocked(obj); |
209 | drm_gem_object_handle_unreference(obj); | ||
210 | mutex_unlock(&dev->struct_mutex); | ||
211 | 196 | ||
212 | return 0; | 197 | return 0; |
213 | } | 198 | } |
@@ -338,9 +323,7 @@ again: | |||
338 | } | 323 | } |
339 | 324 | ||
340 | err: | 325 | err: |
341 | mutex_lock(&dev->struct_mutex); | 326 | drm_gem_object_unreference_unlocked(obj); |
342 | drm_gem_object_unreference(obj); | ||
343 | mutex_unlock(&dev->struct_mutex); | ||
344 | return ret; | 327 | return ret; |
345 | } | 328 | } |
346 | 329 | ||
@@ -371,9 +354,7 @@ drm_gem_open_ioctl(struct drm_device *dev, void *data, | |||
371 | return -ENOENT; | 354 | return -ENOENT; |
372 | 355 | ||
373 | ret = drm_gem_handle_create(file_priv, obj, &handle); | 356 | ret = drm_gem_handle_create(file_priv, obj, &handle); |
374 | mutex_lock(&dev->struct_mutex); | 357 | drm_gem_object_unreference_unlocked(obj); |
375 | drm_gem_object_unreference(obj); | ||
376 | mutex_unlock(&dev->struct_mutex); | ||
377 | if (ret) | 358 | if (ret) |
378 | return ret; | 359 | return ret; |
379 | 360 | ||
@@ -403,7 +384,7 @@ drm_gem_object_release_handle(int id, void *ptr, void *data) | |||
403 | { | 384 | { |
404 | struct drm_gem_object *obj = ptr; | 385 | struct drm_gem_object *obj = ptr; |
405 | 386 | ||
406 | drm_gem_object_handle_unreference(obj); | 387 | drm_gem_object_handle_unreference_unlocked(obj); |
407 | 388 | ||
408 | return 0; | 389 | return 0; |
409 | } | 390 | } |
@@ -416,16 +397,25 @@ drm_gem_object_release_handle(int id, void *ptr, void *data) | |||
416 | void | 397 | void |
417 | drm_gem_release(struct drm_device *dev, struct drm_file *file_private) | 398 | drm_gem_release(struct drm_device *dev, struct drm_file *file_private) |
418 | { | 399 | { |
419 | mutex_lock(&dev->struct_mutex); | ||
420 | idr_for_each(&file_private->object_idr, | 400 | idr_for_each(&file_private->object_idr, |
421 | &drm_gem_object_release_handle, NULL); | 401 | &drm_gem_object_release_handle, NULL); |
422 | 402 | ||
423 | idr_destroy(&file_private->object_idr); | 403 | idr_destroy(&file_private->object_idr); |
424 | mutex_unlock(&dev->struct_mutex); | 404 | } |
405 | |||
406 | static void | ||
407 | drm_gem_object_free_common(struct drm_gem_object *obj) | ||
408 | { | ||
409 | struct drm_device *dev = obj->dev; | ||
410 | fput(obj->filp); | ||
411 | atomic_dec(&dev->object_count); | ||
412 | atomic_sub(obj->size, &dev->object_memory); | ||
413 | kfree(obj); | ||
425 | } | 414 | } |
426 | 415 | ||
427 | /** | 416 | /** |
428 | * Called after the last reference to the object has been lost. | 417 | * Called after the last reference to the object has been lost. |
418 | * Must be called holding struct_ mutex | ||
429 | * | 419 | * |
430 | * Frees the object | 420 | * Frees the object |
431 | */ | 421 | */ |
@@ -440,14 +430,40 @@ drm_gem_object_free(struct kref *kref) | |||
440 | if (dev->driver->gem_free_object != NULL) | 430 | if (dev->driver->gem_free_object != NULL) |
441 | dev->driver->gem_free_object(obj); | 431 | dev->driver->gem_free_object(obj); |
442 | 432 | ||
443 | fput(obj->filp); | 433 | drm_gem_object_free_common(obj); |
444 | atomic_dec(&dev->object_count); | ||
445 | atomic_sub(obj->size, &dev->object_memory); | ||
446 | kfree(obj); | ||
447 | } | 434 | } |
448 | EXPORT_SYMBOL(drm_gem_object_free); | 435 | EXPORT_SYMBOL(drm_gem_object_free); |
449 | 436 | ||
450 | /** | 437 | /** |
438 | * Called after the last reference to the object has been lost. | ||
439 | * Must be called without holding struct_mutex | ||
440 | * | ||
441 | * Frees the object | ||
442 | */ | ||
443 | void | ||
444 | drm_gem_object_free_unlocked(struct kref *kref) | ||
445 | { | ||
446 | struct drm_gem_object *obj = (struct drm_gem_object *) kref; | ||
447 | struct drm_device *dev = obj->dev; | ||
448 | |||
449 | if (dev->driver->gem_free_object_unlocked != NULL) | ||
450 | dev->driver->gem_free_object_unlocked(obj); | ||
451 | else if (dev->driver->gem_free_object != NULL) { | ||
452 | mutex_lock(&dev->struct_mutex); | ||
453 | dev->driver->gem_free_object(obj); | ||
454 | mutex_unlock(&dev->struct_mutex); | ||
455 | } | ||
456 | |||
457 | drm_gem_object_free_common(obj); | ||
458 | } | ||
459 | EXPORT_SYMBOL(drm_gem_object_free_unlocked); | ||
460 | |||
461 | static void drm_gem_object_ref_bug(struct kref *list_kref) | ||
462 | { | ||
463 | BUG(); | ||
464 | } | ||
465 | |||
466 | /** | ||
451 | * Called after the last handle to the object has been closed | 467 | * Called after the last handle to the object has been closed |
452 | * | 468 | * |
453 | * Removes any name for the object. Note that this must be | 469 | * Removes any name for the object. Note that this must be |
@@ -471,8 +487,10 @@ drm_gem_object_handle_free(struct kref *kref) | |||
471 | /* | 487 | /* |
472 | * The object name held a reference to this object, drop | 488 | * The object name held a reference to this object, drop |
473 | * that now. | 489 | * that now. |
490 | * | ||
491 | * This cannot be the last reference, since the handle holds one too. | ||
474 | */ | 492 | */ |
475 | drm_gem_object_unreference(obj); | 493 | kref_put(&obj->refcount, drm_gem_object_ref_bug); |
476 | } else | 494 | } else |
477 | spin_unlock(&dev->object_name_lock); | 495 | spin_unlock(&dev->object_name_lock); |
478 | 496 | ||
@@ -490,11 +508,8 @@ EXPORT_SYMBOL(drm_gem_vm_open); | |||
490 | void drm_gem_vm_close(struct vm_area_struct *vma) | 508 | void drm_gem_vm_close(struct vm_area_struct *vma) |
491 | { | 509 | { |
492 | struct drm_gem_object *obj = vma->vm_private_data; | 510 | struct drm_gem_object *obj = vma->vm_private_data; |
493 | struct drm_device *dev = obj->dev; | ||
494 | 511 | ||
495 | mutex_lock(&dev->struct_mutex); | 512 | drm_gem_object_unreference_unlocked(obj); |
496 | drm_gem_object_unreference(obj); | ||
497 | mutex_unlock(&dev->struct_mutex); | ||
498 | } | 513 | } |
499 | EXPORT_SYMBOL(drm_gem_vm_close); | 514 | EXPORT_SYMBOL(drm_gem_vm_close); |
500 | 515 | ||
diff --git a/drivers/gpu/drm/drm_mm.c b/drivers/gpu/drm/drm_mm.c index cdec32977129..2ac074c8f5d2 100644 --- a/drivers/gpu/drm/drm_mm.c +++ b/drivers/gpu/drm/drm_mm.c | |||
@@ -405,7 +405,8 @@ struct drm_mm_node *drm_mm_search_free_in_range(const struct drm_mm *mm, | |||
405 | wasted += alignment - tmp; | 405 | wasted += alignment - tmp; |
406 | } | 406 | } |
407 | 407 | ||
408 | if (entry->size >= size + wasted) { | 408 | if (entry->size >= size + wasted && |
409 | (entry->start + wasted + size) <= end) { | ||
409 | if (!best_match) | 410 | if (!best_match) |
410 | return entry; | 411 | return entry; |
411 | if (entry->size < best_size) { | 412 | if (entry->size < best_size) { |
diff --git a/drivers/gpu/drm/drm_modes.c b/drivers/gpu/drm/drm_modes.c index 6d81a02463a3..76d63394c776 100644 --- a/drivers/gpu/drm/drm_modes.c +++ b/drivers/gpu/drm/drm_modes.c | |||
@@ -1,9 +1,4 @@ | |||
1 | /* | 1 | /* |
2 | * The list_sort function is (presumably) licensed under the GPL (see the | ||
3 | * top level "COPYING" file for details). | ||
4 | * | ||
5 | * The remainder of this file is: | ||
6 | * | ||
7 | * Copyright © 1997-2003 by The XFree86 Project, Inc. | 2 | * Copyright © 1997-2003 by The XFree86 Project, Inc. |
8 | * Copyright © 2007 Dave Airlie | 3 | * Copyright © 2007 Dave Airlie |
9 | * Copyright © 2007-2008 Intel Corporation | 4 | * Copyright © 2007-2008 Intel Corporation |
@@ -36,6 +31,7 @@ | |||
36 | */ | 31 | */ |
37 | 32 | ||
38 | #include <linux/list.h> | 33 | #include <linux/list.h> |
34 | #include <linux/list_sort.h> | ||
39 | #include "drmP.h" | 35 | #include "drmP.h" |
40 | #include "drm.h" | 36 | #include "drm.h" |
41 | #include "drm_crtc.h" | 37 | #include "drm_crtc.h" |
@@ -855,6 +851,7 @@ EXPORT_SYMBOL(drm_mode_prune_invalid); | |||
855 | 851 | ||
856 | /** | 852 | /** |
857 | * drm_mode_compare - compare modes for favorability | 853 | * drm_mode_compare - compare modes for favorability |
854 | * @priv: unused | ||
858 | * @lh_a: list_head for first mode | 855 | * @lh_a: list_head for first mode |
859 | * @lh_b: list_head for second mode | 856 | * @lh_b: list_head for second mode |
860 | * | 857 | * |
@@ -868,7 +865,7 @@ EXPORT_SYMBOL(drm_mode_prune_invalid); | |||
868 | * Negative if @lh_a is better than @lh_b, zero if they're equivalent, or | 865 | * Negative if @lh_a is better than @lh_b, zero if they're equivalent, or |
869 | * positive if @lh_b is better than @lh_a. | 866 | * positive if @lh_b is better than @lh_a. |
870 | */ | 867 | */ |
871 | static int drm_mode_compare(struct list_head *lh_a, struct list_head *lh_b) | 868 | static int drm_mode_compare(void *priv, struct list_head *lh_a, struct list_head *lh_b) |
872 | { | 869 | { |
873 | struct drm_display_mode *a = list_entry(lh_a, struct drm_display_mode, head); | 870 | struct drm_display_mode *a = list_entry(lh_a, struct drm_display_mode, head); |
874 | struct drm_display_mode *b = list_entry(lh_b, struct drm_display_mode, head); | 871 | struct drm_display_mode *b = list_entry(lh_b, struct drm_display_mode, head); |
@@ -885,85 +882,6 @@ static int drm_mode_compare(struct list_head *lh_a, struct list_head *lh_b) | |||
885 | return diff; | 882 | return diff; |
886 | } | 883 | } |
887 | 884 | ||
888 | /* FIXME: what we don't have a list sort function? */ | ||
889 | /* list sort from Mark J Roberts (mjr@znex.org) */ | ||
890 | void list_sort(struct list_head *head, | ||
891 | int (*cmp)(struct list_head *a, struct list_head *b)) | ||
892 | { | ||
893 | struct list_head *p, *q, *e, *list, *tail, *oldhead; | ||
894 | int insize, nmerges, psize, qsize, i; | ||
895 | |||
896 | list = head->next; | ||
897 | list_del(head); | ||
898 | insize = 1; | ||
899 | for (;;) { | ||
900 | p = oldhead = list; | ||
901 | list = tail = NULL; | ||
902 | nmerges = 0; | ||
903 | |||
904 | while (p) { | ||
905 | nmerges++; | ||
906 | q = p; | ||
907 | psize = 0; | ||
908 | for (i = 0; i < insize; i++) { | ||
909 | psize++; | ||
910 | q = q->next == oldhead ? NULL : q->next; | ||
911 | if (!q) | ||
912 | break; | ||
913 | } | ||
914 | |||
915 | qsize = insize; | ||
916 | while (psize > 0 || (qsize > 0 && q)) { | ||
917 | if (!psize) { | ||
918 | e = q; | ||
919 | q = q->next; | ||
920 | qsize--; | ||
921 | if (q == oldhead) | ||
922 | q = NULL; | ||
923 | } else if (!qsize || !q) { | ||
924 | e = p; | ||
925 | p = p->next; | ||
926 | psize--; | ||
927 | if (p == oldhead) | ||
928 | p = NULL; | ||
929 | } else if (cmp(p, q) <= 0) { | ||
930 | e = p; | ||
931 | p = p->next; | ||
932 | psize--; | ||
933 | if (p == oldhead) | ||
934 | p = NULL; | ||
935 | } else { | ||
936 | e = q; | ||
937 | q = q->next; | ||
938 | qsize--; | ||
939 | if (q == oldhead) | ||
940 | q = NULL; | ||
941 | } | ||
942 | if (tail) | ||
943 | tail->next = e; | ||
944 | else | ||
945 | list = e; | ||
946 | e->prev = tail; | ||
947 | tail = e; | ||
948 | } | ||
949 | p = q; | ||
950 | } | ||
951 | |||
952 | tail->next = list; | ||
953 | list->prev = tail; | ||
954 | |||
955 | if (nmerges <= 1) | ||
956 | break; | ||
957 | |||
958 | insize *= 2; | ||
959 | } | ||
960 | |||
961 | head->next = list; | ||
962 | head->prev = list->prev; | ||
963 | list->prev->next = head; | ||
964 | list->prev = head; | ||
965 | } | ||
966 | |||
967 | /** | 885 | /** |
968 | * drm_mode_sort - sort mode list | 886 | * drm_mode_sort - sort mode list |
969 | * @mode_list: list to sort | 887 | * @mode_list: list to sort |
@@ -975,7 +893,7 @@ void list_sort(struct list_head *head, | |||
975 | */ | 893 | */ |
976 | void drm_mode_sort(struct list_head *mode_list) | 894 | void drm_mode_sort(struct list_head *mode_list) |
977 | { | 895 | { |
978 | list_sort(mode_list, drm_mode_compare); | 896 | list_sort(NULL, mode_list, drm_mode_compare); |
979 | } | 897 | } |
980 | EXPORT_SYMBOL(drm_mode_sort); | 898 | EXPORT_SYMBOL(drm_mode_sort); |
981 | 899 | ||
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index 18476bf0b580..5eed46312442 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c | |||
@@ -272,7 +272,7 @@ static void i915_dump_pages(struct seq_file *m, struct page **pages, int page_co | |||
272 | mem = kmap_atomic(pages[page], KM_USER0); | 272 | mem = kmap_atomic(pages[page], KM_USER0); |
273 | for (i = 0; i < PAGE_SIZE; i += 4) | 273 | for (i = 0; i < PAGE_SIZE; i += 4) |
274 | seq_printf(m, "%08x : %08x\n", i, mem[i / 4]); | 274 | seq_printf(m, "%08x : %08x\n", i, mem[i / 4]); |
275 | kunmap_atomic(pages[page], KM_USER0); | 275 | kunmap_atomic(mem, KM_USER0); |
276 | } | 276 | } |
277 | } | 277 | } |
278 | 278 | ||
@@ -290,7 +290,7 @@ static int i915_batchbuffer_info(struct seq_file *m, void *data) | |||
290 | list_for_each_entry(obj_priv, &dev_priv->mm.active_list, list) { | 290 | list_for_each_entry(obj_priv, &dev_priv->mm.active_list, list) { |
291 | obj = obj_priv->obj; | 291 | obj = obj_priv->obj; |
292 | if (obj->read_domains & I915_GEM_DOMAIN_COMMAND) { | 292 | if (obj->read_domains & I915_GEM_DOMAIN_COMMAND) { |
293 | ret = i915_gem_object_get_pages(obj); | 293 | ret = i915_gem_object_get_pages(obj, 0); |
294 | if (ret) { | 294 | if (ret) { |
295 | DRM_ERROR("Failed to get pages: %d\n", ret); | 295 | DRM_ERROR("Failed to get pages: %d\n", ret); |
296 | spin_unlock(&dev_priv->mm.active_list_lock); | 296 | spin_unlock(&dev_priv->mm.active_list_lock); |
@@ -350,6 +350,36 @@ static int i915_ringbuffer_info(struct seq_file *m, void *data) | |||
350 | return 0; | 350 | return 0; |
351 | } | 351 | } |
352 | 352 | ||
353 | static const char *pin_flag(int pinned) | ||
354 | { | ||
355 | if (pinned > 0) | ||
356 | return " P"; | ||
357 | else if (pinned < 0) | ||
358 | return " p"; | ||
359 | else | ||
360 | return ""; | ||
361 | } | ||
362 | |||
363 | static const char *tiling_flag(int tiling) | ||
364 | { | ||
365 | switch (tiling) { | ||
366 | default: | ||
367 | case I915_TILING_NONE: return ""; | ||
368 | case I915_TILING_X: return " X"; | ||
369 | case I915_TILING_Y: return " Y"; | ||
370 | } | ||
371 | } | ||
372 | |||
373 | static const char *dirty_flag(int dirty) | ||
374 | { | ||
375 | return dirty ? " dirty" : ""; | ||
376 | } | ||
377 | |||
378 | static const char *purgeable_flag(int purgeable) | ||
379 | { | ||
380 | return purgeable ? " purgeable" : ""; | ||
381 | } | ||
382 | |||
353 | static int i915_error_state(struct seq_file *m, void *unused) | 383 | static int i915_error_state(struct seq_file *m, void *unused) |
354 | { | 384 | { |
355 | struct drm_info_node *node = (struct drm_info_node *) m->private; | 385 | struct drm_info_node *node = (struct drm_info_node *) m->private; |
@@ -357,6 +387,7 @@ static int i915_error_state(struct seq_file *m, void *unused) | |||
357 | drm_i915_private_t *dev_priv = dev->dev_private; | 387 | drm_i915_private_t *dev_priv = dev->dev_private; |
358 | struct drm_i915_error_state *error; | 388 | struct drm_i915_error_state *error; |
359 | unsigned long flags; | 389 | unsigned long flags; |
390 | int i, page, offset, elt; | ||
360 | 391 | ||
361 | spin_lock_irqsave(&dev_priv->error_lock, flags); | 392 | spin_lock_irqsave(&dev_priv->error_lock, flags); |
362 | if (!dev_priv->first_error) { | 393 | if (!dev_priv->first_error) { |
@@ -368,6 +399,7 @@ static int i915_error_state(struct seq_file *m, void *unused) | |||
368 | 399 | ||
369 | seq_printf(m, "Time: %ld s %ld us\n", error->time.tv_sec, | 400 | seq_printf(m, "Time: %ld s %ld us\n", error->time.tv_sec, |
370 | error->time.tv_usec); | 401 | error->time.tv_usec); |
402 | seq_printf(m, "PCI ID: 0x%04x\n", dev->pci_device); | ||
371 | seq_printf(m, "EIR: 0x%08x\n", error->eir); | 403 | seq_printf(m, "EIR: 0x%08x\n", error->eir); |
372 | seq_printf(m, " PGTBL_ER: 0x%08x\n", error->pgtbl_er); | 404 | seq_printf(m, " PGTBL_ER: 0x%08x\n", error->pgtbl_er); |
373 | seq_printf(m, " INSTPM: 0x%08x\n", error->instpm); | 405 | seq_printf(m, " INSTPM: 0x%08x\n", error->instpm); |
@@ -379,6 +411,59 @@ static int i915_error_state(struct seq_file *m, void *unused) | |||
379 | seq_printf(m, " INSTPS: 0x%08x\n", error->instps); | 411 | seq_printf(m, " INSTPS: 0x%08x\n", error->instps); |
380 | seq_printf(m, " INSTDONE1: 0x%08x\n", error->instdone1); | 412 | seq_printf(m, " INSTDONE1: 0x%08x\n", error->instdone1); |
381 | } | 413 | } |
414 | seq_printf(m, "seqno: 0x%08x\n", error->seqno); | ||
415 | |||
416 | if (error->active_bo_count) { | ||
417 | seq_printf(m, "Buffers [%d]:\n", error->active_bo_count); | ||
418 | |||
419 | for (i = 0; i < error->active_bo_count; i++) { | ||
420 | seq_printf(m, " %08x %8zd %08x %08x %08x%s%s%s%s", | ||
421 | error->active_bo[i].gtt_offset, | ||
422 | error->active_bo[i].size, | ||
423 | error->active_bo[i].read_domains, | ||
424 | error->active_bo[i].write_domain, | ||
425 | error->active_bo[i].seqno, | ||
426 | pin_flag(error->active_bo[i].pinned), | ||
427 | tiling_flag(error->active_bo[i].tiling), | ||
428 | dirty_flag(error->active_bo[i].dirty), | ||
429 | purgeable_flag(error->active_bo[i].purgeable)); | ||
430 | |||
431 | if (error->active_bo[i].name) | ||
432 | seq_printf(m, " (name: %d)", error->active_bo[i].name); | ||
433 | if (error->active_bo[i].fence_reg != I915_FENCE_REG_NONE) | ||
434 | seq_printf(m, " (fence: %d)", error->active_bo[i].fence_reg); | ||
435 | |||
436 | seq_printf(m, "\n"); | ||
437 | } | ||
438 | } | ||
439 | |||
440 | for (i = 0; i < ARRAY_SIZE(error->batchbuffer); i++) { | ||
441 | if (error->batchbuffer[i]) { | ||
442 | struct drm_i915_error_object *obj = error->batchbuffer[i]; | ||
443 | |||
444 | seq_printf(m, "--- gtt_offset = 0x%08x\n", obj->gtt_offset); | ||
445 | offset = 0; | ||
446 | for (page = 0; page < obj->page_count; page++) { | ||
447 | for (elt = 0; elt < PAGE_SIZE/4; elt++) { | ||
448 | seq_printf(m, "%08x : %08x\n", offset, obj->pages[page][elt]); | ||
449 | offset += 4; | ||
450 | } | ||
451 | } | ||
452 | } | ||
453 | } | ||
454 | |||
455 | if (error->ringbuffer) { | ||
456 | struct drm_i915_error_object *obj = error->ringbuffer; | ||
457 | |||
458 | seq_printf(m, "--- ringbuffer = 0x%08x\n", obj->gtt_offset); | ||
459 | offset = 0; | ||
460 | for (page = 0; page < obj->page_count; page++) { | ||
461 | for (elt = 0; elt < PAGE_SIZE/4; elt++) { | ||
462 | seq_printf(m, "%08x : %08x\n", offset, obj->pages[page][elt]); | ||
463 | offset += 4; | ||
464 | } | ||
465 | } | ||
466 | } | ||
382 | 467 | ||
383 | out: | 468 | out: |
384 | spin_unlock_irqrestore(&dev_priv->error_lock, flags); | 469 | spin_unlock_irqrestore(&dev_priv->error_lock, flags); |
@@ -386,30 +471,161 @@ out: | |||
386 | return 0; | 471 | return 0; |
387 | } | 472 | } |
388 | 473 | ||
389 | static int i915_registers_info(struct seq_file *m, void *data) { | 474 | static int i915_rstdby_delays(struct seq_file *m, void *unused) |
475 | { | ||
476 | struct drm_info_node *node = (struct drm_info_node *) m->private; | ||
477 | struct drm_device *dev = node->minor->dev; | ||
478 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
479 | u16 crstanddelay = I915_READ16(CRSTANDVID); | ||
480 | |||
481 | seq_printf(m, "w/ctx: %d, w/o ctx: %d\n", (crstanddelay >> 8) & 0x3f, (crstanddelay & 0x3f)); | ||
482 | |||
483 | return 0; | ||
484 | } | ||
485 | |||
486 | static int i915_cur_delayinfo(struct seq_file *m, void *unused) | ||
487 | { | ||
488 | struct drm_info_node *node = (struct drm_info_node *) m->private; | ||
489 | struct drm_device *dev = node->minor->dev; | ||
490 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
491 | u16 rgvswctl = I915_READ16(MEMSWCTL); | ||
492 | |||
493 | seq_printf(m, "Last command: 0x%01x\n", (rgvswctl >> 13) & 0x3); | ||
494 | seq_printf(m, "Command status: %d\n", (rgvswctl >> 12) & 1); | ||
495 | seq_printf(m, "P%d DELAY 0x%02x\n", (rgvswctl >> 8) & 0xf, | ||
496 | rgvswctl & 0x3f); | ||
497 | |||
498 | return 0; | ||
499 | } | ||
500 | |||
501 | static int i915_delayfreq_table(struct seq_file *m, void *unused) | ||
502 | { | ||
390 | struct drm_info_node *node = (struct drm_info_node *) m->private; | 503 | struct drm_info_node *node = (struct drm_info_node *) m->private; |
391 | struct drm_device *dev = node->minor->dev; | 504 | struct drm_device *dev = node->minor->dev; |
392 | drm_i915_private_t *dev_priv = dev->dev_private; | 505 | drm_i915_private_t *dev_priv = dev->dev_private; |
393 | uint32_t reg; | 506 | u32 delayfreq; |
394 | 507 | int i; | |
395 | #define DUMP_RANGE(start, end) \ | 508 | |
396 | for (reg=start; reg < end; reg += 4) \ | 509 | for (i = 0; i < 16; i++) { |
397 | seq_printf(m, "%08x\t%08x\n", reg, I915_READ(reg)); | 510 | delayfreq = I915_READ(PXVFREQ_BASE + i * 4); |
398 | 511 | seq_printf(m, "P%02dVIDFREQ: 0x%08x\n", i, delayfreq); | |
399 | DUMP_RANGE(0x00000, 0x00fff); /* VGA registers */ | 512 | } |
400 | DUMP_RANGE(0x02000, 0x02fff); /* instruction, memory, interrupt control registers */ | 513 | |
401 | DUMP_RANGE(0x03000, 0x031ff); /* FENCE and PPGTT control registers */ | 514 | return 0; |
402 | DUMP_RANGE(0x03200, 0x03fff); /* frame buffer compression registers */ | 515 | } |
403 | DUMP_RANGE(0x05000, 0x05fff); /* I/O control registers */ | 516 | |
404 | DUMP_RANGE(0x06000, 0x06fff); /* clock control registers */ | 517 | static inline int MAP_TO_MV(int map) |
405 | DUMP_RANGE(0x07000, 0x07fff); /* 3D internal debug registers */ | 518 | { |
406 | DUMP_RANGE(0x07400, 0x088ff); /* GPE debug registers */ | 519 | return 1250 - (map * 25); |
407 | DUMP_RANGE(0x0a000, 0x0afff); /* display palette registers */ | 520 | } |
408 | DUMP_RANGE(0x10000, 0x13fff); /* MMIO MCHBAR */ | 521 | |
409 | DUMP_RANGE(0x30000, 0x3ffff); /* overlay registers */ | 522 | static int i915_inttoext_table(struct seq_file *m, void *unused) |
410 | DUMP_RANGE(0x60000, 0x6ffff); /* display engine pipeline registers */ | 523 | { |
411 | DUMP_RANGE(0x70000, 0x72fff); /* display and cursor registers */ | 524 | struct drm_info_node *node = (struct drm_info_node *) m->private; |
412 | DUMP_RANGE(0x73000, 0x73fff); /* performance counters */ | 525 | struct drm_device *dev = node->minor->dev; |
526 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
527 | u32 inttoext; | ||
528 | int i; | ||
529 | |||
530 | for (i = 1; i <= 32; i++) { | ||
531 | inttoext = I915_READ(INTTOEXT_BASE_ILK + i * 4); | ||
532 | seq_printf(m, "INTTOEXT%02d: 0x%08x\n", i, inttoext); | ||
533 | } | ||
534 | |||
535 | return 0; | ||
536 | } | ||
537 | |||
538 | static int i915_drpc_info(struct seq_file *m, void *unused) | ||
539 | { | ||
540 | struct drm_info_node *node = (struct drm_info_node *) m->private; | ||
541 | struct drm_device *dev = node->minor->dev; | ||
542 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
543 | u32 rgvmodectl = I915_READ(MEMMODECTL); | ||
544 | |||
545 | seq_printf(m, "HD boost: %s\n", (rgvmodectl & MEMMODE_BOOST_EN) ? | ||
546 | "yes" : "no"); | ||
547 | seq_printf(m, "Boost freq: %d\n", | ||
548 | (rgvmodectl & MEMMODE_BOOST_FREQ_MASK) >> | ||
549 | MEMMODE_BOOST_FREQ_SHIFT); | ||
550 | seq_printf(m, "HW control enabled: %s\n", | ||
551 | rgvmodectl & MEMMODE_HWIDLE_EN ? "yes" : "no"); | ||
552 | seq_printf(m, "SW control enabled: %s\n", | ||
553 | rgvmodectl & MEMMODE_SWMODE_EN ? "yes" : "no"); | ||
554 | seq_printf(m, "Gated voltage change: %s\n", | ||
555 | rgvmodectl & MEMMODE_RCLK_GATE ? "yes" : "no"); | ||
556 | seq_printf(m, "Starting frequency: P%d\n", | ||
557 | (rgvmodectl & MEMMODE_FSTART_MASK) >> MEMMODE_FSTART_SHIFT); | ||
558 | seq_printf(m, "Max frequency: P%d\n", | ||
559 | (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT); | ||
560 | seq_printf(m, "Min frequency: P%d\n", (rgvmodectl & MEMMODE_FMIN_MASK)); | ||
561 | |||
562 | return 0; | ||
563 | } | ||
564 | |||
565 | static int i915_fbc_status(struct seq_file *m, void *unused) | ||
566 | { | ||
567 | struct drm_info_node *node = (struct drm_info_node *) m->private; | ||
568 | struct drm_device *dev = node->minor->dev; | ||
569 | struct drm_crtc *crtc; | ||
570 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
571 | bool fbc_enabled = false; | ||
572 | |||
573 | if (!dev_priv->display.fbc_enabled) { | ||
574 | seq_printf(m, "FBC unsupported on this chipset\n"); | ||
575 | return 0; | ||
576 | } | ||
577 | |||
578 | list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { | ||
579 | if (!crtc->enabled) | ||
580 | continue; | ||
581 | if (dev_priv->display.fbc_enabled(crtc)) | ||
582 | fbc_enabled = true; | ||
583 | } | ||
584 | |||
585 | if (fbc_enabled) { | ||
586 | seq_printf(m, "FBC enabled\n"); | ||
587 | } else { | ||
588 | seq_printf(m, "FBC disabled: "); | ||
589 | switch (dev_priv->no_fbc_reason) { | ||
590 | case FBC_STOLEN_TOO_SMALL: | ||
591 | seq_printf(m, "not enough stolen memory"); | ||
592 | break; | ||
593 | case FBC_UNSUPPORTED_MODE: | ||
594 | seq_printf(m, "mode not supported"); | ||
595 | break; | ||
596 | case FBC_MODE_TOO_LARGE: | ||
597 | seq_printf(m, "mode too large"); | ||
598 | break; | ||
599 | case FBC_BAD_PLANE: | ||
600 | seq_printf(m, "FBC unsupported on plane"); | ||
601 | break; | ||
602 | case FBC_NOT_TILED: | ||
603 | seq_printf(m, "scanout buffer not tiled"); | ||
604 | break; | ||
605 | default: | ||
606 | seq_printf(m, "unknown reason"); | ||
607 | } | ||
608 | seq_printf(m, "\n"); | ||
609 | } | ||
610 | return 0; | ||
611 | } | ||
612 | |||
613 | static int i915_sr_status(struct seq_file *m, void *unused) | ||
614 | { | ||
615 | struct drm_info_node *node = (struct drm_info_node *) m->private; | ||
616 | struct drm_device *dev = node->minor->dev; | ||
617 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
618 | bool sr_enabled = false; | ||
619 | |||
620 | if (IS_I965G(dev) || IS_I945G(dev) || IS_I945GM(dev)) | ||
621 | sr_enabled = I915_READ(FW_BLC_SELF) & FW_BLC_SELF_EN; | ||
622 | else if (IS_I915GM(dev)) | ||
623 | sr_enabled = I915_READ(INSTPM) & INSTPM_SELF_EN; | ||
624 | else if (IS_PINEVIEW(dev)) | ||
625 | sr_enabled = I915_READ(DSPFW3) & PINEVIEW_SELF_REFRESH_EN; | ||
626 | |||
627 | seq_printf(m, "self-refresh: %s\n", sr_enabled ? "enabled" : | ||
628 | "disabled"); | ||
413 | 629 | ||
414 | return 0; | 630 | return 0; |
415 | } | 631 | } |
@@ -519,7 +735,6 @@ static int i915_wedged_create(struct dentry *root, struct drm_minor *minor) | |||
519 | } | 735 | } |
520 | 736 | ||
521 | static struct drm_info_list i915_debugfs_list[] = { | 737 | static struct drm_info_list i915_debugfs_list[] = { |
522 | {"i915_regs", i915_registers_info, 0}, | ||
523 | {"i915_gem_active", i915_gem_object_list_info, 0, (void *) ACTIVE_LIST}, | 738 | {"i915_gem_active", i915_gem_object_list_info, 0, (void *) ACTIVE_LIST}, |
524 | {"i915_gem_flushing", i915_gem_object_list_info, 0, (void *) FLUSHING_LIST}, | 739 | {"i915_gem_flushing", i915_gem_object_list_info, 0, (void *) FLUSHING_LIST}, |
525 | {"i915_gem_inactive", i915_gem_object_list_info, 0, (void *) INACTIVE_LIST}, | 740 | {"i915_gem_inactive", i915_gem_object_list_info, 0, (void *) INACTIVE_LIST}, |
@@ -532,6 +747,13 @@ static struct drm_info_list i915_debugfs_list[] = { | |||
532 | {"i915_ringbuffer_info", i915_ringbuffer_info, 0}, | 747 | {"i915_ringbuffer_info", i915_ringbuffer_info, 0}, |
533 | {"i915_batchbuffers", i915_batchbuffer_info, 0}, | 748 | {"i915_batchbuffers", i915_batchbuffer_info, 0}, |
534 | {"i915_error_state", i915_error_state, 0}, | 749 | {"i915_error_state", i915_error_state, 0}, |
750 | {"i915_rstdby_delays", i915_rstdby_delays, 0}, | ||
751 | {"i915_cur_delayinfo", i915_cur_delayinfo, 0}, | ||
752 | {"i915_delayfreq_table", i915_delayfreq_table, 0}, | ||
753 | {"i915_inttoext_table", i915_inttoext_table, 0}, | ||
754 | {"i915_drpc_info", i915_drpc_info, 0}, | ||
755 | {"i915_fbc_status", i915_fbc_status, 0}, | ||
756 | {"i915_sr_status", i915_sr_status, 0}, | ||
535 | }; | 757 | }; |
536 | #define I915_DEBUGFS_ENTRIES ARRAY_SIZE(i915_debugfs_list) | 758 | #define I915_DEBUGFS_ENTRIES ARRAY_SIZE(i915_debugfs_list) |
537 | 759 | ||
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c index 02607ed61399..dbfe07c90cbc 100644 --- a/drivers/gpu/drm/i915/i915_dma.c +++ b/drivers/gpu/drm/i915/i915_dma.c | |||
@@ -35,6 +35,8 @@ | |||
35 | #include "i915_drv.h" | 35 | #include "i915_drv.h" |
36 | #include "i915_trace.h" | 36 | #include "i915_trace.h" |
37 | #include <linux/vgaarb.h> | 37 | #include <linux/vgaarb.h> |
38 | #include <linux/acpi.h> | ||
39 | #include <linux/pnp.h> | ||
38 | 40 | ||
39 | /* Really want an OS-independent resettable timer. Would like to have | 41 | /* Really want an OS-independent resettable timer. Would like to have |
40 | * this loop run for (eg) 3 sec, but have the timer reset every time | 42 | * this loop run for (eg) 3 sec, but have the timer reset every time |
@@ -134,6 +136,10 @@ static int i915_init_phys_hws(struct drm_device *dev) | |||
134 | 136 | ||
135 | memset(dev_priv->hw_status_page, 0, PAGE_SIZE); | 137 | memset(dev_priv->hw_status_page, 0, PAGE_SIZE); |
136 | 138 | ||
139 | if (IS_I965G(dev)) | ||
140 | dev_priv->dma_status_page |= (dev_priv->dma_status_page >> 28) & | ||
141 | 0xf0; | ||
142 | |||
137 | I915_WRITE(HWS_PGA, dev_priv->dma_status_page); | 143 | I915_WRITE(HWS_PGA, dev_priv->dma_status_page); |
138 | DRM_DEBUG_DRIVER("Enabled hardware status page\n"); | 144 | DRM_DEBUG_DRIVER("Enabled hardware status page\n"); |
139 | return 0; | 145 | return 0; |
@@ -731,8 +737,10 @@ static int i915_cmdbuffer(struct drm_device *dev, void *data, | |||
731 | if (cmdbuf->num_cliprects) { | 737 | if (cmdbuf->num_cliprects) { |
732 | cliprects = kcalloc(cmdbuf->num_cliprects, | 738 | cliprects = kcalloc(cmdbuf->num_cliprects, |
733 | sizeof(struct drm_clip_rect), GFP_KERNEL); | 739 | sizeof(struct drm_clip_rect), GFP_KERNEL); |
734 | if (cliprects == NULL) | 740 | if (cliprects == NULL) { |
741 | ret = -ENOMEM; | ||
735 | goto fail_batch_free; | 742 | goto fail_batch_free; |
743 | } | ||
736 | 744 | ||
737 | ret = copy_from_user(cliprects, cmdbuf->cliprects, | 745 | ret = copy_from_user(cliprects, cmdbuf->cliprects, |
738 | cmdbuf->num_cliprects * | 746 | cmdbuf->num_cliprects * |
@@ -813,9 +821,13 @@ static int i915_getparam(struct drm_device *dev, void *data, | |||
813 | case I915_PARAM_HAS_PAGEFLIPPING: | 821 | case I915_PARAM_HAS_PAGEFLIPPING: |
814 | value = 1; | 822 | value = 1; |
815 | break; | 823 | break; |
824 | case I915_PARAM_HAS_EXECBUF2: | ||
825 | /* depends on GEM */ | ||
826 | value = dev_priv->has_gem; | ||
827 | break; | ||
816 | default: | 828 | default: |
817 | DRM_DEBUG_DRIVER("Unknown parameter %d\n", | 829 | DRM_DEBUG_DRIVER("Unknown parameter %d\n", |
818 | param->param); | 830 | param->param); |
819 | return -EINVAL; | 831 | return -EINVAL; |
820 | } | 832 | } |
821 | 833 | ||
@@ -923,6 +935,120 @@ static int i915_get_bridge_dev(struct drm_device *dev) | |||
923 | return 0; | 935 | return 0; |
924 | } | 936 | } |
925 | 937 | ||
938 | #define MCHBAR_I915 0x44 | ||
939 | #define MCHBAR_I965 0x48 | ||
940 | #define MCHBAR_SIZE (4*4096) | ||
941 | |||
942 | #define DEVEN_REG 0x54 | ||
943 | #define DEVEN_MCHBAR_EN (1 << 28) | ||
944 | |||
945 | /* Allocate space for the MCH regs if needed, return nonzero on error */ | ||
946 | static int | ||
947 | intel_alloc_mchbar_resource(struct drm_device *dev) | ||
948 | { | ||
949 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
950 | int reg = IS_I965G(dev) ? MCHBAR_I965 : MCHBAR_I915; | ||
951 | u32 temp_lo, temp_hi = 0; | ||
952 | u64 mchbar_addr; | ||
953 | int ret = 0; | ||
954 | |||
955 | if (IS_I965G(dev)) | ||
956 | pci_read_config_dword(dev_priv->bridge_dev, reg + 4, &temp_hi); | ||
957 | pci_read_config_dword(dev_priv->bridge_dev, reg, &temp_lo); | ||
958 | mchbar_addr = ((u64)temp_hi << 32) | temp_lo; | ||
959 | |||
960 | /* If ACPI doesn't have it, assume we need to allocate it ourselves */ | ||
961 | #ifdef CONFIG_PNP | ||
962 | if (mchbar_addr && | ||
963 | pnp_range_reserved(mchbar_addr, mchbar_addr + MCHBAR_SIZE)) { | ||
964 | ret = 0; | ||
965 | goto out; | ||
966 | } | ||
967 | #endif | ||
968 | |||
969 | /* Get some space for it */ | ||
970 | ret = pci_bus_alloc_resource(dev_priv->bridge_dev->bus, &dev_priv->mch_res, | ||
971 | MCHBAR_SIZE, MCHBAR_SIZE, | ||
972 | PCIBIOS_MIN_MEM, | ||
973 | 0, pcibios_align_resource, | ||
974 | dev_priv->bridge_dev); | ||
975 | if (ret) { | ||
976 | DRM_DEBUG_DRIVER("failed bus alloc: %d\n", ret); | ||
977 | dev_priv->mch_res.start = 0; | ||
978 | goto out; | ||
979 | } | ||
980 | |||
981 | if (IS_I965G(dev)) | ||
982 | pci_write_config_dword(dev_priv->bridge_dev, reg + 4, | ||
983 | upper_32_bits(dev_priv->mch_res.start)); | ||
984 | |||
985 | pci_write_config_dword(dev_priv->bridge_dev, reg, | ||
986 | lower_32_bits(dev_priv->mch_res.start)); | ||
987 | out: | ||
988 | return ret; | ||
989 | } | ||
990 | |||
991 | /* Setup MCHBAR if possible, return true if we should disable it again */ | ||
992 | static void | ||
993 | intel_setup_mchbar(struct drm_device *dev) | ||
994 | { | ||
995 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
996 | int mchbar_reg = IS_I965G(dev) ? MCHBAR_I965 : MCHBAR_I915; | ||
997 | u32 temp; | ||
998 | bool enabled; | ||
999 | |||
1000 | dev_priv->mchbar_need_disable = false; | ||
1001 | |||
1002 | if (IS_I915G(dev) || IS_I915GM(dev)) { | ||
1003 | pci_read_config_dword(dev_priv->bridge_dev, DEVEN_REG, &temp); | ||
1004 | enabled = !!(temp & DEVEN_MCHBAR_EN); | ||
1005 | } else { | ||
1006 | pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp); | ||
1007 | enabled = temp & 1; | ||
1008 | } | ||
1009 | |||
1010 | /* If it's already enabled, don't have to do anything */ | ||
1011 | if (enabled) | ||
1012 | return; | ||
1013 | |||
1014 | if (intel_alloc_mchbar_resource(dev)) | ||
1015 | return; | ||
1016 | |||
1017 | dev_priv->mchbar_need_disable = true; | ||
1018 | |||
1019 | /* Space is allocated or reserved, so enable it. */ | ||
1020 | if (IS_I915G(dev) || IS_I915GM(dev)) { | ||
1021 | pci_write_config_dword(dev_priv->bridge_dev, DEVEN_REG, | ||
1022 | temp | DEVEN_MCHBAR_EN); | ||
1023 | } else { | ||
1024 | pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp); | ||
1025 | pci_write_config_dword(dev_priv->bridge_dev, mchbar_reg, temp | 1); | ||
1026 | } | ||
1027 | } | ||
1028 | |||
1029 | static void | ||
1030 | intel_teardown_mchbar(struct drm_device *dev) | ||
1031 | { | ||
1032 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
1033 | int mchbar_reg = IS_I965G(dev) ? MCHBAR_I965 : MCHBAR_I915; | ||
1034 | u32 temp; | ||
1035 | |||
1036 | if (dev_priv->mchbar_need_disable) { | ||
1037 | if (IS_I915G(dev) || IS_I915GM(dev)) { | ||
1038 | pci_read_config_dword(dev_priv->bridge_dev, DEVEN_REG, &temp); | ||
1039 | temp &= ~DEVEN_MCHBAR_EN; | ||
1040 | pci_write_config_dword(dev_priv->bridge_dev, DEVEN_REG, temp); | ||
1041 | } else { | ||
1042 | pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp); | ||
1043 | temp &= ~1; | ||
1044 | pci_write_config_dword(dev_priv->bridge_dev, mchbar_reg, temp); | ||
1045 | } | ||
1046 | } | ||
1047 | |||
1048 | if (dev_priv->mch_res.start) | ||
1049 | release_resource(&dev_priv->mch_res); | ||
1050 | } | ||
1051 | |||
926 | /** | 1052 | /** |
927 | * i915_probe_agp - get AGP bootup configuration | 1053 | * i915_probe_agp - get AGP bootup configuration |
928 | * @pdev: PCI device | 1054 | * @pdev: PCI device |
@@ -1117,11 +1243,13 @@ static void i915_setup_compression(struct drm_device *dev, int size) | |||
1117 | { | 1243 | { |
1118 | struct drm_i915_private *dev_priv = dev->dev_private; | 1244 | struct drm_i915_private *dev_priv = dev->dev_private; |
1119 | struct drm_mm_node *compressed_fb, *compressed_llb; | 1245 | struct drm_mm_node *compressed_fb, *compressed_llb; |
1120 | unsigned long cfb_base, ll_base; | 1246 | unsigned long cfb_base; |
1247 | unsigned long ll_base = 0; | ||
1121 | 1248 | ||
1122 | /* Leave 1M for line length buffer & misc. */ | 1249 | /* Leave 1M for line length buffer & misc. */ |
1123 | compressed_fb = drm_mm_search_free(&dev_priv->vram, size, 4096, 0); | 1250 | compressed_fb = drm_mm_search_free(&dev_priv->vram, size, 4096, 0); |
1124 | if (!compressed_fb) { | 1251 | if (!compressed_fb) { |
1252 | dev_priv->no_fbc_reason = FBC_STOLEN_TOO_SMALL; | ||
1125 | i915_warn_stolen(dev); | 1253 | i915_warn_stolen(dev); |
1126 | return; | 1254 | return; |
1127 | } | 1255 | } |
@@ -1129,6 +1257,7 @@ static void i915_setup_compression(struct drm_device *dev, int size) | |||
1129 | compressed_fb = drm_mm_get_block(compressed_fb, size, 4096); | 1257 | compressed_fb = drm_mm_get_block(compressed_fb, size, 4096); |
1130 | if (!compressed_fb) { | 1258 | if (!compressed_fb) { |
1131 | i915_warn_stolen(dev); | 1259 | i915_warn_stolen(dev); |
1260 | dev_priv->no_fbc_reason = FBC_STOLEN_TOO_SMALL; | ||
1132 | return; | 1261 | return; |
1133 | } | 1262 | } |
1134 | 1263 | ||
@@ -1200,14 +1329,6 @@ static int i915_load_modeset_init(struct drm_device *dev, | |||
1200 | dev->mode_config.fb_base = drm_get_resource_start(dev, fb_bar) & | 1329 | dev->mode_config.fb_base = drm_get_resource_start(dev, fb_bar) & |
1201 | 0xff000000; | 1330 | 0xff000000; |
1202 | 1331 | ||
1203 | if (IS_MOBILE(dev) || IS_I9XX(dev)) | ||
1204 | dev_priv->cursor_needs_physical = true; | ||
1205 | else | ||
1206 | dev_priv->cursor_needs_physical = false; | ||
1207 | |||
1208 | if (IS_I965G(dev) || IS_G33(dev)) | ||
1209 | dev_priv->cursor_needs_physical = false; | ||
1210 | |||
1211 | /* Basic memrange allocator for stolen space (aka vram) */ | 1332 | /* Basic memrange allocator for stolen space (aka vram) */ |
1212 | drm_mm_init(&dev_priv->vram, 0, prealloc_size); | 1333 | drm_mm_init(&dev_priv->vram, 0, prealloc_size); |
1213 | DRM_INFO("set up %ldM of stolen space\n", prealloc_size / (1024*1024)); | 1334 | DRM_INFO("set up %ldM of stolen space\n", prealloc_size / (1024*1024)); |
@@ -1257,6 +1378,8 @@ static int i915_load_modeset_init(struct drm_device *dev, | |||
1257 | if (ret) | 1378 | if (ret) |
1258 | goto destroy_ringbuffer; | 1379 | goto destroy_ringbuffer; |
1259 | 1380 | ||
1381 | intel_modeset_init(dev); | ||
1382 | |||
1260 | ret = drm_irq_install(dev); | 1383 | ret = drm_irq_install(dev); |
1261 | if (ret) | 1384 | if (ret) |
1262 | goto destroy_ringbuffer; | 1385 | goto destroy_ringbuffer; |
@@ -1271,8 +1394,6 @@ static int i915_load_modeset_init(struct drm_device *dev, | |||
1271 | 1394 | ||
1272 | I915_WRITE(INSTPM, (1 << 5) | (1 << 21)); | 1395 | I915_WRITE(INSTPM, (1 << 5) | (1 << 21)); |
1273 | 1396 | ||
1274 | intel_modeset_init(dev); | ||
1275 | |||
1276 | drm_helper_initial_config(dev); | 1397 | drm_helper_initial_config(dev); |
1277 | 1398 | ||
1278 | return 0; | 1399 | return 0; |
@@ -1360,7 +1481,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) | |||
1360 | { | 1481 | { |
1361 | struct drm_i915_private *dev_priv = dev->dev_private; | 1482 | struct drm_i915_private *dev_priv = dev->dev_private; |
1362 | resource_size_t base, size; | 1483 | resource_size_t base, size; |
1363 | int ret = 0, mmio_bar = IS_I9XX(dev) ? 0 : 1; | 1484 | int ret = 0, mmio_bar; |
1364 | uint32_t agp_size, prealloc_size, prealloc_start; | 1485 | uint32_t agp_size, prealloc_size, prealloc_start; |
1365 | 1486 | ||
1366 | /* i915 has 4 more counters */ | 1487 | /* i915 has 4 more counters */ |
@@ -1376,8 +1497,10 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) | |||
1376 | 1497 | ||
1377 | dev->dev_private = (void *)dev_priv; | 1498 | dev->dev_private = (void *)dev_priv; |
1378 | dev_priv->dev = dev; | 1499 | dev_priv->dev = dev; |
1500 | dev_priv->info = (struct intel_device_info *) flags; | ||
1379 | 1501 | ||
1380 | /* Add register map (needed for suspend/resume) */ | 1502 | /* Add register map (needed for suspend/resume) */ |
1503 | mmio_bar = IS_I9XX(dev) ? 0 : 1; | ||
1381 | base = drm_get_resource_start(dev, mmio_bar); | 1504 | base = drm_get_resource_start(dev, mmio_bar); |
1382 | size = drm_get_resource_len(dev, mmio_bar); | 1505 | size = drm_get_resource_len(dev, mmio_bar); |
1383 | 1506 | ||
@@ -1445,6 +1568,9 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) | |||
1445 | dev->driver->get_vblank_counter = gm45_get_vblank_counter; | 1568 | dev->driver->get_vblank_counter = gm45_get_vblank_counter; |
1446 | } | 1569 | } |
1447 | 1570 | ||
1571 | /* Try to make sure MCHBAR is enabled before poking at it */ | ||
1572 | intel_setup_mchbar(dev); | ||
1573 | |||
1448 | i915_gem_load(dev); | 1574 | i915_gem_load(dev); |
1449 | 1575 | ||
1450 | /* Init HWS */ | 1576 | /* Init HWS */ |
@@ -1518,6 +1644,8 @@ int i915_driver_unload(struct drm_device *dev) | |||
1518 | { | 1644 | { |
1519 | struct drm_i915_private *dev_priv = dev->dev_private; | 1645 | struct drm_i915_private *dev_priv = dev->dev_private; |
1520 | 1646 | ||
1647 | i915_destroy_error_state(dev); | ||
1648 | |||
1521 | destroy_workqueue(dev_priv->wq); | 1649 | destroy_workqueue(dev_priv->wq); |
1522 | del_timer_sync(&dev_priv->hangcheck_timer); | 1650 | del_timer_sync(&dev_priv->hangcheck_timer); |
1523 | 1651 | ||
@@ -1564,6 +1692,8 @@ int i915_driver_unload(struct drm_device *dev) | |||
1564 | intel_cleanup_overlay(dev); | 1692 | intel_cleanup_overlay(dev); |
1565 | } | 1693 | } |
1566 | 1694 | ||
1695 | intel_teardown_mchbar(dev); | ||
1696 | |||
1567 | pci_dev_put(dev_priv->bridge_dev); | 1697 | pci_dev_put(dev_priv->bridge_dev); |
1568 | kfree(dev->dev_private); | 1698 | kfree(dev->dev_private); |
1569 | 1699 | ||
@@ -1652,6 +1782,7 @@ struct drm_ioctl_desc i915_ioctls[] = { | |||
1652 | DRM_IOCTL_DEF(DRM_I915_HWS_ADDR, i915_set_status_page, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), | 1782 | DRM_IOCTL_DEF(DRM_I915_HWS_ADDR, i915_set_status_page, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), |
1653 | DRM_IOCTL_DEF(DRM_I915_GEM_INIT, i915_gem_init_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), | 1783 | DRM_IOCTL_DEF(DRM_I915_GEM_INIT, i915_gem_init_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), |
1654 | DRM_IOCTL_DEF(DRM_I915_GEM_EXECBUFFER, i915_gem_execbuffer, DRM_AUTH), | 1784 | DRM_IOCTL_DEF(DRM_I915_GEM_EXECBUFFER, i915_gem_execbuffer, DRM_AUTH), |
1785 | DRM_IOCTL_DEF(DRM_I915_GEM_EXECBUFFER2, i915_gem_execbuffer2, DRM_AUTH), | ||
1655 | DRM_IOCTL_DEF(DRM_I915_GEM_PIN, i915_gem_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY), | 1786 | DRM_IOCTL_DEF(DRM_I915_GEM_PIN, i915_gem_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY), |
1656 | DRM_IOCTL_DEF(DRM_I915_GEM_UNPIN, i915_gem_unpin_ioctl, DRM_AUTH|DRM_ROOT_ONLY), | 1787 | DRM_IOCTL_DEF(DRM_I915_GEM_UNPIN, i915_gem_unpin_ioctl, DRM_AUTH|DRM_ROOT_ONLY), |
1657 | DRM_IOCTL_DEF(DRM_I915_GEM_BUSY, i915_gem_busy_ioctl, DRM_AUTH), | 1788 | DRM_IOCTL_DEF(DRM_I915_GEM_BUSY, i915_gem_busy_ioctl, DRM_AUTH), |
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index 24286ca168fc..742bd8f738ca 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c | |||
@@ -33,7 +33,6 @@ | |||
33 | #include "i915_drm.h" | 33 | #include "i915_drm.h" |
34 | #include "i915_drv.h" | 34 | #include "i915_drv.h" |
35 | 35 | ||
36 | #include "drm_pciids.h" | ||
37 | #include <linux/console.h> | 36 | #include <linux/console.h> |
38 | #include "drm_crtc_helper.h" | 37 | #include "drm_crtc_helper.h" |
39 | 38 | ||
@@ -46,36 +45,149 @@ module_param_named(fbpercrtc, i915_fbpercrtc, int, 0400); | |||
46 | unsigned int i915_powersave = 1; | 45 | unsigned int i915_powersave = 1; |
47 | module_param_named(powersave, i915_powersave, int, 0400); | 46 | module_param_named(powersave, i915_powersave, int, 0400); |
48 | 47 | ||
48 | unsigned int i915_lvds_downclock = 0; | ||
49 | module_param_named(lvds_downclock, i915_lvds_downclock, int, 0400); | ||
50 | |||
49 | static struct drm_driver driver; | 51 | static struct drm_driver driver; |
50 | 52 | ||
51 | static struct pci_device_id pciidlist[] = { | 53 | #define INTEL_VGA_DEVICE(id, info) { \ |
52 | i915_PCI_IDS | 54 | .class = PCI_CLASS_DISPLAY_VGA << 8, \ |
55 | .class_mask = 0xffff00, \ | ||
56 | .vendor = 0x8086, \ | ||
57 | .device = id, \ | ||
58 | .subvendor = PCI_ANY_ID, \ | ||
59 | .subdevice = PCI_ANY_ID, \ | ||
60 | .driver_data = (unsigned long) info } | ||
61 | |||
62 | const static struct intel_device_info intel_i830_info = { | ||
63 | .is_i8xx = 1, .is_mobile = 1, .cursor_needs_physical = 1, | ||
64 | }; | ||
65 | |||
66 | const static struct intel_device_info intel_845g_info = { | ||
67 | .is_i8xx = 1, | ||
68 | }; | ||
69 | |||
70 | const static struct intel_device_info intel_i85x_info = { | ||
71 | .is_i8xx = 1, .is_mobile = 1, .cursor_needs_physical = 1, | ||
72 | }; | ||
73 | |||
74 | const static struct intel_device_info intel_i865g_info = { | ||
75 | .is_i8xx = 1, | ||
76 | }; | ||
77 | |||
78 | const static struct intel_device_info intel_i915g_info = { | ||
79 | .is_i915g = 1, .is_i9xx = 1, .cursor_needs_physical = 1, | ||
80 | }; | ||
81 | const static struct intel_device_info intel_i915gm_info = { | ||
82 | .is_i9xx = 1, .is_mobile = 1, .has_fbc = 1, | ||
83 | .cursor_needs_physical = 1, | ||
84 | }; | ||
85 | const static struct intel_device_info intel_i945g_info = { | ||
86 | .is_i9xx = 1, .has_hotplug = 1, .cursor_needs_physical = 1, | ||
87 | }; | ||
88 | const static struct intel_device_info intel_i945gm_info = { | ||
89 | .is_i945gm = 1, .is_i9xx = 1, .is_mobile = 1, .has_fbc = 1, | ||
90 | .has_hotplug = 1, .cursor_needs_physical = 1, | ||
91 | }; | ||
92 | |||
93 | const static struct intel_device_info intel_i965g_info = { | ||
94 | .is_i965g = 1, .is_i9xx = 1, .has_hotplug = 1, | ||
95 | }; | ||
96 | |||
97 | const static struct intel_device_info intel_i965gm_info = { | ||
98 | .is_i965g = 1, .is_mobile = 1, .is_i965gm = 1, .is_i9xx = 1, | ||
99 | .is_mobile = 1, .has_fbc = 1, .has_rc6 = 1, | ||
100 | .has_hotplug = 1, | ||
101 | }; | ||
102 | |||
103 | const static struct intel_device_info intel_g33_info = { | ||
104 | .is_g33 = 1, .is_i9xx = 1, .need_gfx_hws = 1, | ||
105 | .has_hotplug = 1, | ||
106 | }; | ||
107 | |||
108 | const static struct intel_device_info intel_g45_info = { | ||
109 | .is_i965g = 1, .is_g4x = 1, .is_i9xx = 1, .need_gfx_hws = 1, | ||
110 | .has_pipe_cxsr = 1, | ||
111 | .has_hotplug = 1, | ||
112 | }; | ||
113 | |||
114 | const static struct intel_device_info intel_gm45_info = { | ||
115 | .is_i965g = 1, .is_mobile = 1, .is_g4x = 1, .is_i9xx = 1, | ||
116 | .is_mobile = 1, .need_gfx_hws = 1, .has_fbc = 1, .has_rc6 = 1, | ||
117 | .has_pipe_cxsr = 1, | ||
118 | .has_hotplug = 1, | ||
119 | }; | ||
120 | |||
121 | const static struct intel_device_info intel_pineview_info = { | ||
122 | .is_g33 = 1, .is_pineview = 1, .is_mobile = 1, .is_i9xx = 1, | ||
123 | .need_gfx_hws = 1, | ||
124 | .has_hotplug = 1, | ||
125 | }; | ||
126 | |||
127 | const static struct intel_device_info intel_ironlake_d_info = { | ||
128 | .is_ironlake = 1, .is_i965g = 1, .is_i9xx = 1, .need_gfx_hws = 1, | ||
129 | .has_pipe_cxsr = 1, | ||
130 | .has_hotplug = 1, | ||
131 | }; | ||
132 | |||
133 | const static struct intel_device_info intel_ironlake_m_info = { | ||
134 | .is_ironlake = 1, .is_mobile = 1, .is_i965g = 1, .is_i9xx = 1, | ||
135 | .need_gfx_hws = 1, .has_rc6 = 1, | ||
136 | .has_hotplug = 1, | ||
137 | }; | ||
138 | |||
139 | const static struct pci_device_id pciidlist[] = { | ||
140 | INTEL_VGA_DEVICE(0x3577, &intel_i830_info), | ||
141 | INTEL_VGA_DEVICE(0x2562, &intel_845g_info), | ||
142 | INTEL_VGA_DEVICE(0x3582, &intel_i85x_info), | ||
143 | INTEL_VGA_DEVICE(0x35e8, &intel_i85x_info), | ||
144 | INTEL_VGA_DEVICE(0x2572, &intel_i865g_info), | ||
145 | INTEL_VGA_DEVICE(0x2582, &intel_i915g_info), | ||
146 | INTEL_VGA_DEVICE(0x258a, &intel_i915g_info), | ||
147 | INTEL_VGA_DEVICE(0x2592, &intel_i915gm_info), | ||
148 | INTEL_VGA_DEVICE(0x2772, &intel_i945g_info), | ||
149 | INTEL_VGA_DEVICE(0x27a2, &intel_i945gm_info), | ||
150 | INTEL_VGA_DEVICE(0x27ae, &intel_i945gm_info), | ||
151 | INTEL_VGA_DEVICE(0x2972, &intel_i965g_info), | ||
152 | INTEL_VGA_DEVICE(0x2982, &intel_i965g_info), | ||
153 | INTEL_VGA_DEVICE(0x2992, &intel_i965g_info), | ||
154 | INTEL_VGA_DEVICE(0x29a2, &intel_i965g_info), | ||
155 | INTEL_VGA_DEVICE(0x29b2, &intel_g33_info), | ||
156 | INTEL_VGA_DEVICE(0x29c2, &intel_g33_info), | ||
157 | INTEL_VGA_DEVICE(0x29d2, &intel_g33_info), | ||
158 | INTEL_VGA_DEVICE(0x2a02, &intel_i965gm_info), | ||
159 | INTEL_VGA_DEVICE(0x2a12, &intel_i965gm_info), | ||
160 | INTEL_VGA_DEVICE(0x2a42, &intel_gm45_info), | ||
161 | INTEL_VGA_DEVICE(0x2e02, &intel_g45_info), | ||
162 | INTEL_VGA_DEVICE(0x2e12, &intel_g45_info), | ||
163 | INTEL_VGA_DEVICE(0x2e22, &intel_g45_info), | ||
164 | INTEL_VGA_DEVICE(0x2e32, &intel_g45_info), | ||
165 | INTEL_VGA_DEVICE(0x2e42, &intel_g45_info), | ||
166 | INTEL_VGA_DEVICE(0xa001, &intel_pineview_info), | ||
167 | INTEL_VGA_DEVICE(0xa011, &intel_pineview_info), | ||
168 | INTEL_VGA_DEVICE(0x0042, &intel_ironlake_d_info), | ||
169 | INTEL_VGA_DEVICE(0x0046, &intel_ironlake_m_info), | ||
170 | {0, 0, 0} | ||
53 | }; | 171 | }; |
54 | 172 | ||
55 | #if defined(CONFIG_DRM_I915_KMS) | 173 | #if defined(CONFIG_DRM_I915_KMS) |
56 | MODULE_DEVICE_TABLE(pci, pciidlist); | 174 | MODULE_DEVICE_TABLE(pci, pciidlist); |
57 | #endif | 175 | #endif |
58 | 176 | ||
59 | static int i915_suspend(struct drm_device *dev, pm_message_t state) | 177 | static int i915_drm_freeze(struct drm_device *dev) |
60 | { | 178 | { |
61 | struct drm_i915_private *dev_priv = dev->dev_private; | 179 | struct drm_i915_private *dev_priv = dev->dev_private; |
62 | 180 | ||
63 | if (!dev || !dev_priv) { | ||
64 | DRM_ERROR("dev: %p, dev_priv: %p\n", dev, dev_priv); | ||
65 | DRM_ERROR("DRM not initialized, aborting suspend.\n"); | ||
66 | return -ENODEV; | ||
67 | } | ||
68 | |||
69 | if (state.event == PM_EVENT_PRETHAW) | ||
70 | return 0; | ||
71 | |||
72 | pci_save_state(dev->pdev); | 181 | pci_save_state(dev->pdev); |
73 | 182 | ||
74 | /* If KMS is active, we do the leavevt stuff here */ | 183 | /* If KMS is active, we do the leavevt stuff here */ |
75 | if (drm_core_check_feature(dev, DRIVER_MODESET)) { | 184 | if (drm_core_check_feature(dev, DRIVER_MODESET)) { |
76 | if (i915_gem_idle(dev)) | 185 | int error = i915_gem_idle(dev); |
186 | if (error) { | ||
77 | dev_err(&dev->pdev->dev, | 187 | dev_err(&dev->pdev->dev, |
78 | "GEM idle failed, resume may fail\n"); | 188 | "GEM idle failed, resume might fail\n"); |
189 | return error; | ||
190 | } | ||
79 | drm_irq_uninstall(dev); | 191 | drm_irq_uninstall(dev); |
80 | } | 192 | } |
81 | 193 | ||
@@ -83,26 +195,42 @@ static int i915_suspend(struct drm_device *dev, pm_message_t state) | |||
83 | 195 | ||
84 | intel_opregion_free(dev, 1); | 196 | intel_opregion_free(dev, 1); |
85 | 197 | ||
198 | /* Modeset on resume, not lid events */ | ||
199 | dev_priv->modeset_on_lid = 0; | ||
200 | |||
201 | return 0; | ||
202 | } | ||
203 | |||
204 | static int i915_suspend(struct drm_device *dev, pm_message_t state) | ||
205 | { | ||
206 | int error; | ||
207 | |||
208 | if (!dev || !dev->dev_private) { | ||
209 | DRM_ERROR("dev: %p\n", dev); | ||
210 | DRM_ERROR("DRM not initialized, aborting suspend.\n"); | ||
211 | return -ENODEV; | ||
212 | } | ||
213 | |||
214 | if (state.event == PM_EVENT_PRETHAW) | ||
215 | return 0; | ||
216 | |||
217 | error = i915_drm_freeze(dev); | ||
218 | if (error) | ||
219 | return error; | ||
220 | |||
86 | if (state.event == PM_EVENT_SUSPEND) { | 221 | if (state.event == PM_EVENT_SUSPEND) { |
87 | /* Shut down the device */ | 222 | /* Shut down the device */ |
88 | pci_disable_device(dev->pdev); | 223 | pci_disable_device(dev->pdev); |
89 | pci_set_power_state(dev->pdev, PCI_D3hot); | 224 | pci_set_power_state(dev->pdev, PCI_D3hot); |
90 | } | 225 | } |
91 | 226 | ||
92 | /* Modeset on resume, not lid events */ | ||
93 | dev_priv->modeset_on_lid = 0; | ||
94 | |||
95 | return 0; | 227 | return 0; |
96 | } | 228 | } |
97 | 229 | ||
98 | static int i915_resume(struct drm_device *dev) | 230 | static int i915_drm_thaw(struct drm_device *dev) |
99 | { | 231 | { |
100 | struct drm_i915_private *dev_priv = dev->dev_private; | 232 | struct drm_i915_private *dev_priv = dev->dev_private; |
101 | int ret = 0; | 233 | int error = 0; |
102 | |||
103 | if (pci_enable_device(dev->pdev)) | ||
104 | return -1; | ||
105 | pci_set_master(dev->pdev); | ||
106 | 234 | ||
107 | i915_restore_state(dev); | 235 | i915_restore_state(dev); |
108 | 236 | ||
@@ -113,21 +241,28 @@ static int i915_resume(struct drm_device *dev) | |||
113 | mutex_lock(&dev->struct_mutex); | 241 | mutex_lock(&dev->struct_mutex); |
114 | dev_priv->mm.suspended = 0; | 242 | dev_priv->mm.suspended = 0; |
115 | 243 | ||
116 | ret = i915_gem_init_ringbuffer(dev); | 244 | error = i915_gem_init_ringbuffer(dev); |
117 | if (ret != 0) | ||
118 | ret = -1; | ||
119 | mutex_unlock(&dev->struct_mutex); | 245 | mutex_unlock(&dev->struct_mutex); |
120 | 246 | ||
121 | drm_irq_install(dev); | 247 | drm_irq_install(dev); |
122 | } | 248 | |
123 | if (drm_core_check_feature(dev, DRIVER_MODESET)) { | ||
124 | /* Resume the modeset for every activated CRTC */ | 249 | /* Resume the modeset for every activated CRTC */ |
125 | drm_helper_resume_force_mode(dev); | 250 | drm_helper_resume_force_mode(dev); |
126 | } | 251 | } |
127 | 252 | ||
128 | dev_priv->modeset_on_lid = 0; | 253 | dev_priv->modeset_on_lid = 0; |
129 | 254 | ||
130 | return ret; | 255 | return error; |
256 | } | ||
257 | |||
258 | static int i915_resume(struct drm_device *dev) | ||
259 | { | ||
260 | if (pci_enable_device(dev->pdev)) | ||
261 | return -EIO; | ||
262 | |||
263 | pci_set_master(dev->pdev); | ||
264 | |||
265 | return i915_drm_thaw(dev); | ||
131 | } | 266 | } |
132 | 267 | ||
133 | /** | 268 | /** |
@@ -268,22 +403,73 @@ i915_pci_remove(struct pci_dev *pdev) | |||
268 | drm_put_dev(dev); | 403 | drm_put_dev(dev); |
269 | } | 404 | } |
270 | 405 | ||
271 | static int | 406 | static int i915_pm_suspend(struct device *dev) |
272 | i915_pci_suspend(struct pci_dev *pdev, pm_message_t state) | ||
273 | { | 407 | { |
274 | struct drm_device *dev = pci_get_drvdata(pdev); | 408 | struct pci_dev *pdev = to_pci_dev(dev); |
409 | struct drm_device *drm_dev = pci_get_drvdata(pdev); | ||
410 | int error; | ||
411 | |||
412 | if (!drm_dev || !drm_dev->dev_private) { | ||
413 | dev_err(dev, "DRM not initialized, aborting suspend.\n"); | ||
414 | return -ENODEV; | ||
415 | } | ||
275 | 416 | ||
276 | return i915_suspend(dev, state); | 417 | error = i915_drm_freeze(drm_dev); |
418 | if (error) | ||
419 | return error; | ||
420 | |||
421 | pci_disable_device(pdev); | ||
422 | pci_set_power_state(pdev, PCI_D3hot); | ||
423 | |||
424 | return 0; | ||
277 | } | 425 | } |
278 | 426 | ||
279 | static int | 427 | static int i915_pm_resume(struct device *dev) |
280 | i915_pci_resume(struct pci_dev *pdev) | ||
281 | { | 428 | { |
282 | struct drm_device *dev = pci_get_drvdata(pdev); | 429 | struct pci_dev *pdev = to_pci_dev(dev); |
430 | struct drm_device *drm_dev = pci_get_drvdata(pdev); | ||
431 | |||
432 | return i915_resume(drm_dev); | ||
433 | } | ||
434 | |||
435 | static int i915_pm_freeze(struct device *dev) | ||
436 | { | ||
437 | struct pci_dev *pdev = to_pci_dev(dev); | ||
438 | struct drm_device *drm_dev = pci_get_drvdata(pdev); | ||
439 | |||
440 | if (!drm_dev || !drm_dev->dev_private) { | ||
441 | dev_err(dev, "DRM not initialized, aborting suspend.\n"); | ||
442 | return -ENODEV; | ||
443 | } | ||
444 | |||
445 | return i915_drm_freeze(drm_dev); | ||
446 | } | ||
283 | 447 | ||
284 | return i915_resume(dev); | 448 | static int i915_pm_thaw(struct device *dev) |
449 | { | ||
450 | struct pci_dev *pdev = to_pci_dev(dev); | ||
451 | struct drm_device *drm_dev = pci_get_drvdata(pdev); | ||
452 | |||
453 | return i915_drm_thaw(drm_dev); | ||
285 | } | 454 | } |
286 | 455 | ||
456 | static int i915_pm_poweroff(struct device *dev) | ||
457 | { | ||
458 | struct pci_dev *pdev = to_pci_dev(dev); | ||
459 | struct drm_device *drm_dev = pci_get_drvdata(pdev); | ||
460 | |||
461 | return i915_drm_freeze(drm_dev); | ||
462 | } | ||
463 | |||
464 | const struct dev_pm_ops i915_pm_ops = { | ||
465 | .suspend = i915_pm_suspend, | ||
466 | .resume = i915_pm_resume, | ||
467 | .freeze = i915_pm_freeze, | ||
468 | .thaw = i915_pm_thaw, | ||
469 | .poweroff = i915_pm_poweroff, | ||
470 | .restore = i915_pm_resume, | ||
471 | }; | ||
472 | |||
287 | static struct vm_operations_struct i915_gem_vm_ops = { | 473 | static struct vm_operations_struct i915_gem_vm_ops = { |
288 | .fault = i915_gem_fault, | 474 | .fault = i915_gem_fault, |
289 | .open = drm_gem_vm_open, | 475 | .open = drm_gem_vm_open, |
@@ -303,8 +489,11 @@ static struct drm_driver driver = { | |||
303 | .lastclose = i915_driver_lastclose, | 489 | .lastclose = i915_driver_lastclose, |
304 | .preclose = i915_driver_preclose, | 490 | .preclose = i915_driver_preclose, |
305 | .postclose = i915_driver_postclose, | 491 | .postclose = i915_driver_postclose, |
492 | |||
493 | /* Used in place of i915_pm_ops for non-DRIVER_MODESET */ | ||
306 | .suspend = i915_suspend, | 494 | .suspend = i915_suspend, |
307 | .resume = i915_resume, | 495 | .resume = i915_resume, |
496 | |||
308 | .device_is_agp = i915_driver_device_is_agp, | 497 | .device_is_agp = i915_driver_device_is_agp, |
309 | .enable_vblank = i915_enable_vblank, | 498 | .enable_vblank = i915_enable_vblank, |
310 | .disable_vblank = i915_disable_vblank, | 499 | .disable_vblank = i915_disable_vblank, |
@@ -344,10 +533,7 @@ static struct drm_driver driver = { | |||
344 | .id_table = pciidlist, | 533 | .id_table = pciidlist, |
345 | .probe = i915_pci_probe, | 534 | .probe = i915_pci_probe, |
346 | .remove = i915_pci_remove, | 535 | .remove = i915_pci_remove, |
347 | #ifdef CONFIG_PM | 536 | .driver.pm = &i915_pm_ops, |
348 | .resume = i915_pci_resume, | ||
349 | .suspend = i915_pci_suspend, | ||
350 | #endif | ||
351 | }, | 537 | }, |
352 | 538 | ||
353 | .name = DRIVER_NAME, | 539 | .name = DRIVER_NAME, |
@@ -385,6 +571,11 @@ static int __init i915_init(void) | |||
385 | driver.driver_features &= ~DRIVER_MODESET; | 571 | driver.driver_features &= ~DRIVER_MODESET; |
386 | #endif | 572 | #endif |
387 | 573 | ||
574 | if (!(driver.driver_features & DRIVER_MODESET)) { | ||
575 | driver.suspend = i915_suspend; | ||
576 | driver.resume = i915_resume; | ||
577 | } | ||
578 | |||
388 | return drm_init(&driver); | 579 | return drm_init(&driver); |
389 | } | 580 | } |
390 | 581 | ||
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index fbecac72f5bb..ec06d4865a5f 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h | |||
@@ -150,7 +150,27 @@ struct drm_i915_error_state { | |||
150 | u32 instps; | 150 | u32 instps; |
151 | u32 instdone1; | 151 | u32 instdone1; |
152 | u32 seqno; | 152 | u32 seqno; |
153 | u64 bbaddr; | ||
153 | struct timeval time; | 154 | struct timeval time; |
155 | struct drm_i915_error_object { | ||
156 | int page_count; | ||
157 | u32 gtt_offset; | ||
158 | u32 *pages[0]; | ||
159 | } *ringbuffer, *batchbuffer[2]; | ||
160 | struct drm_i915_error_buffer { | ||
161 | size_t size; | ||
162 | u32 name; | ||
163 | u32 seqno; | ||
164 | u32 gtt_offset; | ||
165 | u32 read_domains; | ||
166 | u32 write_domain; | ||
167 | u32 fence_reg; | ||
168 | s32 pinned:2; | ||
169 | u32 tiling:2; | ||
170 | u32 dirty:1; | ||
171 | u32 purgeable:1; | ||
172 | } *active_bo; | ||
173 | u32 active_bo_count; | ||
154 | }; | 174 | }; |
155 | 175 | ||
156 | struct drm_i915_display_funcs { | 176 | struct drm_i915_display_funcs { |
@@ -172,9 +192,39 @@ struct drm_i915_display_funcs { | |||
172 | 192 | ||
173 | struct intel_overlay; | 193 | struct intel_overlay; |
174 | 194 | ||
195 | struct intel_device_info { | ||
196 | u8 is_mobile : 1; | ||
197 | u8 is_i8xx : 1; | ||
198 | u8 is_i915g : 1; | ||
199 | u8 is_i9xx : 1; | ||
200 | u8 is_i945gm : 1; | ||
201 | u8 is_i965g : 1; | ||
202 | u8 is_i965gm : 1; | ||
203 | u8 is_g33 : 1; | ||
204 | u8 need_gfx_hws : 1; | ||
205 | u8 is_g4x : 1; | ||
206 | u8 is_pineview : 1; | ||
207 | u8 is_ironlake : 1; | ||
208 | u8 has_fbc : 1; | ||
209 | u8 has_rc6 : 1; | ||
210 | u8 has_pipe_cxsr : 1; | ||
211 | u8 has_hotplug : 1; | ||
212 | u8 cursor_needs_physical : 1; | ||
213 | }; | ||
214 | |||
215 | enum no_fbc_reason { | ||
216 | FBC_STOLEN_TOO_SMALL, /* not enough space to hold compressed buffers */ | ||
217 | FBC_UNSUPPORTED_MODE, /* interlace or doublescanned mode */ | ||
218 | FBC_MODE_TOO_LARGE, /* mode too large for compression */ | ||
219 | FBC_BAD_PLANE, /* fbc not supported on plane */ | ||
220 | FBC_NOT_TILED, /* buffer not tiled */ | ||
221 | }; | ||
222 | |||
175 | typedef struct drm_i915_private { | 223 | typedef struct drm_i915_private { |
176 | struct drm_device *dev; | 224 | struct drm_device *dev; |
177 | 225 | ||
226 | const struct intel_device_info *info; | ||
227 | |||
178 | int has_gem; | 228 | int has_gem; |
179 | 229 | ||
180 | void __iomem *regs; | 230 | void __iomem *regs; |
@@ -232,8 +282,6 @@ typedef struct drm_i915_private { | |||
232 | int hangcheck_count; | 282 | int hangcheck_count; |
233 | uint32_t last_acthd; | 283 | uint32_t last_acthd; |
234 | 284 | ||
235 | bool cursor_needs_physical; | ||
236 | |||
237 | struct drm_mm vram; | 285 | struct drm_mm vram; |
238 | 286 | ||
239 | unsigned long cfb_size; | 287 | unsigned long cfb_size; |
@@ -263,6 +311,7 @@ typedef struct drm_i915_private { | |||
263 | unsigned int lvds_use_ssc:1; | 311 | unsigned int lvds_use_ssc:1; |
264 | unsigned int edp_support:1; | 312 | unsigned int edp_support:1; |
265 | int lvds_ssc_freq; | 313 | int lvds_ssc_freq; |
314 | int edp_bpp; | ||
266 | 315 | ||
267 | struct notifier_block lid_notifier; | 316 | struct notifier_block lid_notifier; |
268 | 317 | ||
@@ -287,8 +336,6 @@ typedef struct drm_i915_private { | |||
287 | u32 saveDSPACNTR; | 336 | u32 saveDSPACNTR; |
288 | u32 saveDSPBCNTR; | 337 | u32 saveDSPBCNTR; |
289 | u32 saveDSPARB; | 338 | u32 saveDSPARB; |
290 | u32 saveRENDERSTANDBY; | ||
291 | u32 savePWRCTXA; | ||
292 | u32 saveHWS; | 339 | u32 saveHWS; |
293 | u32 savePIPEACONF; | 340 | u32 savePIPEACONF; |
294 | u32 savePIPEBCONF; | 341 | u32 savePIPEBCONF; |
@@ -433,6 +480,7 @@ typedef struct drm_i915_private { | |||
433 | u32 savePIPEB_DATA_N1; | 480 | u32 savePIPEB_DATA_N1; |
434 | u32 savePIPEB_LINK_M1; | 481 | u32 savePIPEB_LINK_M1; |
435 | u32 savePIPEB_LINK_N1; | 482 | u32 savePIPEB_LINK_N1; |
483 | u32 saveMCHBAR_RENDER_STANDBY; | ||
436 | 484 | ||
437 | struct { | 485 | struct { |
438 | struct drm_mm gtt_space; | 486 | struct drm_mm gtt_space; |
@@ -474,6 +522,15 @@ typedef struct drm_i915_private { | |||
474 | struct list_head flushing_list; | 522 | struct list_head flushing_list; |
475 | 523 | ||
476 | /** | 524 | /** |
525 | * List of objects currently pending a GPU write flush. | ||
526 | * | ||
527 | * All elements on this list will belong to either the | ||
528 | * active_list or flushing_list, last_rendering_seqno can | ||
529 | * be used to differentiate between the two elements. | ||
530 | */ | ||
531 | struct list_head gpu_write_list; | ||
532 | |||
533 | /** | ||
477 | * LRU list of objects which are not in the ringbuffer and | 534 | * LRU list of objects which are not in the ringbuffer and |
478 | * are ready to unbind, but are still in the GTT. | 535 | * are ready to unbind, but are still in the GTT. |
479 | * | 536 | * |
@@ -561,6 +618,15 @@ typedef struct drm_i915_private { | |||
561 | u16 orig_clock; | 618 | u16 orig_clock; |
562 | int child_dev_num; | 619 | int child_dev_num; |
563 | struct child_device_config *child_dev; | 620 | struct child_device_config *child_dev; |
621 | struct drm_connector *int_lvds_connector; | ||
622 | |||
623 | bool mchbar_need_disable; | ||
624 | |||
625 | u8 cur_delay; | ||
626 | u8 min_delay; | ||
627 | u8 max_delay; | ||
628 | |||
629 | enum no_fbc_reason no_fbc_reason; | ||
564 | } drm_i915_private_t; | 630 | } drm_i915_private_t; |
565 | 631 | ||
566 | /** driver private structure attached to each drm_gem_object */ | 632 | /** driver private structure attached to each drm_gem_object */ |
@@ -572,6 +638,8 @@ struct drm_i915_gem_object { | |||
572 | 638 | ||
573 | /** This object's place on the active/flushing/inactive lists */ | 639 | /** This object's place on the active/flushing/inactive lists */ |
574 | struct list_head list; | 640 | struct list_head list; |
641 | /** This object's place on GPU write list */ | ||
642 | struct list_head gpu_write_list; | ||
575 | 643 | ||
576 | /** This object's place on the fenced object LRU */ | 644 | /** This object's place on the fenced object LRU */ |
577 | struct list_head fence_list; | 645 | struct list_head fence_list; |
@@ -703,6 +771,7 @@ extern struct drm_ioctl_desc i915_ioctls[]; | |||
703 | extern int i915_max_ioctl; | 771 | extern int i915_max_ioctl; |
704 | extern unsigned int i915_fbpercrtc; | 772 | extern unsigned int i915_fbpercrtc; |
705 | extern unsigned int i915_powersave; | 773 | extern unsigned int i915_powersave; |
774 | extern unsigned int i915_lvds_downclock; | ||
706 | 775 | ||
707 | extern void i915_save_display(struct drm_device *dev); | 776 | extern void i915_save_display(struct drm_device *dev); |
708 | extern void i915_restore_display(struct drm_device *dev); | 777 | extern void i915_restore_display(struct drm_device *dev); |
@@ -729,6 +798,7 @@ extern int i965_reset(struct drm_device *dev, u8 flags); | |||
729 | 798 | ||
730 | /* i915_irq.c */ | 799 | /* i915_irq.c */ |
731 | void i915_hangcheck_elapsed(unsigned long data); | 800 | void i915_hangcheck_elapsed(unsigned long data); |
801 | void i915_destroy_error_state(struct drm_device *dev); | ||
732 | extern int i915_irq_emit(struct drm_device *dev, void *data, | 802 | extern int i915_irq_emit(struct drm_device *dev, void *data, |
733 | struct drm_file *file_priv); | 803 | struct drm_file *file_priv); |
734 | extern int i915_irq_wait(struct drm_device *dev, void *data, | 804 | extern int i915_irq_wait(struct drm_device *dev, void *data, |
@@ -794,6 +864,8 @@ int i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data, | |||
794 | struct drm_file *file_priv); | 864 | struct drm_file *file_priv); |
795 | int i915_gem_execbuffer(struct drm_device *dev, void *data, | 865 | int i915_gem_execbuffer(struct drm_device *dev, void *data, |
796 | struct drm_file *file_priv); | 866 | struct drm_file *file_priv); |
867 | int i915_gem_execbuffer2(struct drm_device *dev, void *data, | ||
868 | struct drm_file *file_priv); | ||
797 | int i915_gem_pin_ioctl(struct drm_device *dev, void *data, | 869 | int i915_gem_pin_ioctl(struct drm_device *dev, void *data, |
798 | struct drm_file *file_priv); | 870 | struct drm_file *file_priv); |
799 | int i915_gem_unpin_ioctl(struct drm_device *dev, void *data, | 871 | int i915_gem_unpin_ioctl(struct drm_device *dev, void *data, |
@@ -843,12 +915,13 @@ int i915_do_wait_request(struct drm_device *dev, uint32_t seqno, int interruptib | |||
843 | int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf); | 915 | int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf); |
844 | int i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, | 916 | int i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, |
845 | int write); | 917 | int write); |
918 | int i915_gem_object_set_to_display_plane(struct drm_gem_object *obj); | ||
846 | int i915_gem_attach_phys_object(struct drm_device *dev, | 919 | int i915_gem_attach_phys_object(struct drm_device *dev, |
847 | struct drm_gem_object *obj, int id); | 920 | struct drm_gem_object *obj, int id); |
848 | void i915_gem_detach_phys_object(struct drm_device *dev, | 921 | void i915_gem_detach_phys_object(struct drm_device *dev, |
849 | struct drm_gem_object *obj); | 922 | struct drm_gem_object *obj); |
850 | void i915_gem_free_all_phys_object(struct drm_device *dev); | 923 | void i915_gem_free_all_phys_object(struct drm_device *dev); |
851 | int i915_gem_object_get_pages(struct drm_gem_object *obj); | 924 | int i915_gem_object_get_pages(struct drm_gem_object *obj, gfp_t gfpmask); |
852 | void i915_gem_object_put_pages(struct drm_gem_object *obj); | 925 | void i915_gem_object_put_pages(struct drm_gem_object *obj); |
853 | void i915_gem_release(struct drm_device * dev, struct drm_file *file_priv); | 926 | void i915_gem_release(struct drm_device * dev, struct drm_file *file_priv); |
854 | void i915_gem_object_flush_write_domain(struct drm_gem_object *obj); | 927 | void i915_gem_object_flush_write_domain(struct drm_gem_object *obj); |
@@ -860,6 +933,10 @@ void i915_gem_shrinker_exit(void); | |||
860 | void i915_gem_detect_bit_6_swizzle(struct drm_device *dev); | 933 | void i915_gem_detect_bit_6_swizzle(struct drm_device *dev); |
861 | void i915_gem_object_do_bit_17_swizzle(struct drm_gem_object *obj); | 934 | void i915_gem_object_do_bit_17_swizzle(struct drm_gem_object *obj); |
862 | void i915_gem_object_save_bit_17_swizzle(struct drm_gem_object *obj); | 935 | void i915_gem_object_save_bit_17_swizzle(struct drm_gem_object *obj); |
936 | bool i915_tiling_ok(struct drm_device *dev, int stride, int size, | ||
937 | int tiling_mode); | ||
938 | bool i915_gem_object_fence_offset_ok(struct drm_gem_object *obj, | ||
939 | int tiling_mode); | ||
863 | 940 | ||
864 | /* i915_gem_debug.c */ | 941 | /* i915_gem_debug.c */ |
865 | void i915_gem_dump_object(struct drm_gem_object *obj, int len, | 942 | void i915_gem_dump_object(struct drm_gem_object *obj, int len, |
@@ -982,67 +1059,33 @@ extern void g4x_disable_fbc(struct drm_device *dev); | |||
982 | extern int i915_wrap_ring(struct drm_device * dev); | 1059 | extern int i915_wrap_ring(struct drm_device * dev); |
983 | extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller); | 1060 | extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller); |
984 | 1061 | ||
985 | #define IS_I830(dev) ((dev)->pci_device == 0x3577) | 1062 | #define INTEL_INFO(dev) (((struct drm_i915_private *) (dev)->dev_private)->info) |
986 | #define IS_845G(dev) ((dev)->pci_device == 0x2562) | 1063 | |
987 | #define IS_I85X(dev) ((dev)->pci_device == 0x3582) | 1064 | #define IS_I830(dev) ((dev)->pci_device == 0x3577) |
988 | #define IS_I865G(dev) ((dev)->pci_device == 0x2572) | 1065 | #define IS_845G(dev) ((dev)->pci_device == 0x2562) |
989 | #define IS_I8XX(dev) (IS_I830(dev) || IS_845G(dev) || IS_I85X(dev) || IS_I865G(dev)) | 1066 | #define IS_I85X(dev) ((dev)->pci_device == 0x3582) |
990 | 1067 | #define IS_I865G(dev) ((dev)->pci_device == 0x2572) | |
991 | #define IS_I915G(dev) ((dev)->pci_device == 0x2582 || (dev)->pci_device == 0x258a) | 1068 | #define IS_I8XX(dev) (INTEL_INFO(dev)->is_i8xx) |
992 | #define IS_I915GM(dev) ((dev)->pci_device == 0x2592) | 1069 | #define IS_I915G(dev) (INTEL_INFO(dev)->is_i915g) |
993 | #define IS_I945G(dev) ((dev)->pci_device == 0x2772) | 1070 | #define IS_I915GM(dev) ((dev)->pci_device == 0x2592) |
994 | #define IS_I945GM(dev) ((dev)->pci_device == 0x27A2 ||\ | 1071 | #define IS_I945G(dev) ((dev)->pci_device == 0x2772) |
995 | (dev)->pci_device == 0x27AE) | 1072 | #define IS_I945GM(dev) (INTEL_INFO(dev)->is_i945gm) |
996 | #define IS_I965G(dev) ((dev)->pci_device == 0x2972 || \ | 1073 | #define IS_I965G(dev) (INTEL_INFO(dev)->is_i965g) |
997 | (dev)->pci_device == 0x2982 || \ | 1074 | #define IS_I965GM(dev) (INTEL_INFO(dev)->is_i965gm) |
998 | (dev)->pci_device == 0x2992 || \ | 1075 | #define IS_GM45(dev) ((dev)->pci_device == 0x2A42) |
999 | (dev)->pci_device == 0x29A2 || \ | 1076 | #define IS_G4X(dev) (INTEL_INFO(dev)->is_g4x) |
1000 | (dev)->pci_device == 0x2A02 || \ | 1077 | #define IS_PINEVIEW_G(dev) ((dev)->pci_device == 0xa001) |
1001 | (dev)->pci_device == 0x2A12 || \ | 1078 | #define IS_PINEVIEW_M(dev) ((dev)->pci_device == 0xa011) |
1002 | (dev)->pci_device == 0x2A42 || \ | 1079 | #define IS_PINEVIEW(dev) (INTEL_INFO(dev)->is_pineview) |
1003 | (dev)->pci_device == 0x2E02 || \ | 1080 | #define IS_G33(dev) (INTEL_INFO(dev)->is_g33) |
1004 | (dev)->pci_device == 0x2E12 || \ | ||
1005 | (dev)->pci_device == 0x2E22 || \ | ||
1006 | (dev)->pci_device == 0x2E32 || \ | ||
1007 | (dev)->pci_device == 0x2E42 || \ | ||
1008 | (dev)->pci_device == 0x0042 || \ | ||
1009 | (dev)->pci_device == 0x0046) | ||
1010 | |||
1011 | #define IS_I965GM(dev) ((dev)->pci_device == 0x2A02 || \ | ||
1012 | (dev)->pci_device == 0x2A12) | ||
1013 | |||
1014 | #define IS_GM45(dev) ((dev)->pci_device == 0x2A42) | ||
1015 | |||
1016 | #define IS_G4X(dev) ((dev)->pci_device == 0x2E02 || \ | ||
1017 | (dev)->pci_device == 0x2E12 || \ | ||
1018 | (dev)->pci_device == 0x2E22 || \ | ||
1019 | (dev)->pci_device == 0x2E32 || \ | ||
1020 | (dev)->pci_device == 0x2E42 || \ | ||
1021 | IS_GM45(dev)) | ||
1022 | |||
1023 | #define IS_PINEVIEW_G(dev) ((dev)->pci_device == 0xa001) | ||
1024 | #define IS_PINEVIEW_M(dev) ((dev)->pci_device == 0xa011) | ||
1025 | #define IS_PINEVIEW(dev) (IS_PINEVIEW_G(dev) || IS_PINEVIEW_M(dev)) | ||
1026 | |||
1027 | #define IS_G33(dev) ((dev)->pci_device == 0x29C2 || \ | ||
1028 | (dev)->pci_device == 0x29B2 || \ | ||
1029 | (dev)->pci_device == 0x29D2 || \ | ||
1030 | (IS_PINEVIEW(dev))) | ||
1031 | |||
1032 | #define IS_IRONLAKE_D(dev) ((dev)->pci_device == 0x0042) | 1081 | #define IS_IRONLAKE_D(dev) ((dev)->pci_device == 0x0042) |
1033 | #define IS_IRONLAKE_M(dev) ((dev)->pci_device == 0x0046) | 1082 | #define IS_IRONLAKE_M(dev) ((dev)->pci_device == 0x0046) |
1034 | #define IS_IRONLAKE(dev) (IS_IRONLAKE_D(dev) || IS_IRONLAKE_M(dev)) | 1083 | #define IS_IRONLAKE(dev) (INTEL_INFO(dev)->is_ironlake) |
1035 | 1084 | #define IS_I9XX(dev) (INTEL_INFO(dev)->is_i9xx) | |
1036 | #define IS_I9XX(dev) (IS_I915G(dev) || IS_I915GM(dev) || IS_I945G(dev) || \ | 1085 | #define IS_MOBILE(dev) (INTEL_INFO(dev)->is_mobile) |
1037 | IS_I945GM(dev) || IS_I965G(dev) || IS_G33(dev) || \ | ||
1038 | IS_IRONLAKE(dev)) | ||
1039 | 1086 | ||
1040 | #define IS_MOBILE(dev) (IS_I830(dev) || IS_I85X(dev) || IS_I915GM(dev) || \ | 1087 | #define I915_NEED_GFX_HWS(dev) (INTEL_INFO(dev)->need_gfx_hws) |
1041 | IS_I945GM(dev) || IS_I965GM(dev) || IS_GM45(dev) || \ | ||
1042 | IS_PINEVIEW(dev) || IS_IRONLAKE_M(dev)) | ||
1043 | 1088 | ||
1044 | #define I915_NEED_GFX_HWS(dev) (IS_G33(dev) || IS_GM45(dev) || IS_G4X(dev) || \ | ||
1045 | IS_IRONLAKE(dev)) | ||
1046 | /* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte | 1089 | /* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte |
1047 | * rows, which changed the alignment requirements and fence programming. | 1090 | * rows, which changed the alignment requirements and fence programming. |
1048 | */ | 1091 | */ |
@@ -1054,17 +1097,14 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller); | |||
1054 | #define SUPPORTS_EDP(dev) (IS_IRONLAKE_M(dev)) | 1097 | #define SUPPORTS_EDP(dev) (IS_IRONLAKE_M(dev)) |
1055 | #define SUPPORTS_TV(dev) (IS_I9XX(dev) && IS_MOBILE(dev) && \ | 1098 | #define SUPPORTS_TV(dev) (IS_I9XX(dev) && IS_MOBILE(dev) && \ |
1056 | !IS_IRONLAKE(dev) && !IS_PINEVIEW(dev)) | 1099 | !IS_IRONLAKE(dev) && !IS_PINEVIEW(dev)) |
1057 | #define I915_HAS_HOTPLUG(dev) (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev) || IS_I965G(dev)) | 1100 | #define I915_HAS_HOTPLUG(dev) (INTEL_INFO(dev)->has_hotplug) |
1058 | /* dsparb controlled by hw only */ | 1101 | /* dsparb controlled by hw only */ |
1059 | #define DSPARB_HWCONTROL(dev) (IS_G4X(dev) || IS_IRONLAKE(dev)) | 1102 | #define DSPARB_HWCONTROL(dev) (IS_G4X(dev) || IS_IRONLAKE(dev)) |
1060 | 1103 | ||
1061 | #define HAS_FW_BLC(dev) (IS_I9XX(dev) || IS_G4X(dev) || IS_IRONLAKE(dev)) | 1104 | #define HAS_FW_BLC(dev) (IS_I9XX(dev) || IS_G4X(dev) || IS_IRONLAKE(dev)) |
1062 | #define HAS_PIPE_CXSR(dev) (IS_G4X(dev) || IS_IRONLAKE(dev)) | 1105 | #define HAS_PIPE_CXSR(dev) (INTEL_INFO(dev)->has_pipe_cxsr) |
1063 | #define I915_HAS_FBC(dev) (IS_MOBILE(dev) && \ | 1106 | #define I915_HAS_FBC(dev) (INTEL_INFO(dev)->has_fbc) |
1064 | (IS_I9XX(dev) || IS_GM45(dev)) && \ | 1107 | #define I915_HAS_RC6(dev) (INTEL_INFO(dev)->has_rc6) |
1065 | !IS_PINEVIEW(dev) && \ | ||
1066 | !IS_IRONLAKE(dev)) | ||
1067 | #define I915_HAS_RC6(dev) (IS_I965GM(dev) || IS_GM45(dev) || IS_IRONLAKE_M(dev)) | ||
1068 | 1108 | ||
1069 | #define PRIMARY_RINGBUFFER_SIZE (128*1024) | 1109 | #define PRIMARY_RINGBUFFER_SIZE (128*1024) |
1070 | 1110 | ||
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index c7f0cbec4e84..b5df30ca0fa2 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c | |||
@@ -128,9 +128,7 @@ i915_gem_create_ioctl(struct drm_device *dev, void *data, | |||
128 | return -ENOMEM; | 128 | return -ENOMEM; |
129 | 129 | ||
130 | ret = drm_gem_handle_create(file_priv, obj, &handle); | 130 | ret = drm_gem_handle_create(file_priv, obj, &handle); |
131 | mutex_lock(&dev->struct_mutex); | 131 | drm_gem_object_handle_unreference_unlocked(obj); |
132 | drm_gem_object_handle_unreference(obj); | ||
133 | mutex_unlock(&dev->struct_mutex); | ||
134 | 132 | ||
135 | if (ret) | 133 | if (ret) |
136 | return ret; | 134 | return ret; |
@@ -277,7 +275,7 @@ i915_gem_shmem_pread_fast(struct drm_device *dev, struct drm_gem_object *obj, | |||
277 | 275 | ||
278 | mutex_lock(&dev->struct_mutex); | 276 | mutex_lock(&dev->struct_mutex); |
279 | 277 | ||
280 | ret = i915_gem_object_get_pages(obj); | 278 | ret = i915_gem_object_get_pages(obj, 0); |
281 | if (ret != 0) | 279 | if (ret != 0) |
282 | goto fail_unlock; | 280 | goto fail_unlock; |
283 | 281 | ||
@@ -321,40 +319,24 @@ fail_unlock: | |||
321 | return ret; | 319 | return ret; |
322 | } | 320 | } |
323 | 321 | ||
324 | static inline gfp_t | ||
325 | i915_gem_object_get_page_gfp_mask (struct drm_gem_object *obj) | ||
326 | { | ||
327 | return mapping_gfp_mask(obj->filp->f_path.dentry->d_inode->i_mapping); | ||
328 | } | ||
329 | |||
330 | static inline void | ||
331 | i915_gem_object_set_page_gfp_mask (struct drm_gem_object *obj, gfp_t gfp) | ||
332 | { | ||
333 | mapping_set_gfp_mask(obj->filp->f_path.dentry->d_inode->i_mapping, gfp); | ||
334 | } | ||
335 | |||
336 | static int | 322 | static int |
337 | i915_gem_object_get_pages_or_evict(struct drm_gem_object *obj) | 323 | i915_gem_object_get_pages_or_evict(struct drm_gem_object *obj) |
338 | { | 324 | { |
339 | int ret; | 325 | int ret; |
340 | 326 | ||
341 | ret = i915_gem_object_get_pages(obj); | 327 | ret = i915_gem_object_get_pages(obj, __GFP_NORETRY | __GFP_NOWARN); |
342 | 328 | ||
343 | /* If we've insufficient memory to map in the pages, attempt | 329 | /* If we've insufficient memory to map in the pages, attempt |
344 | * to make some space by throwing out some old buffers. | 330 | * to make some space by throwing out some old buffers. |
345 | */ | 331 | */ |
346 | if (ret == -ENOMEM) { | 332 | if (ret == -ENOMEM) { |
347 | struct drm_device *dev = obj->dev; | 333 | struct drm_device *dev = obj->dev; |
348 | gfp_t gfp; | ||
349 | 334 | ||
350 | ret = i915_gem_evict_something(dev, obj->size); | 335 | ret = i915_gem_evict_something(dev, obj->size); |
351 | if (ret) | 336 | if (ret) |
352 | return ret; | 337 | return ret; |
353 | 338 | ||
354 | gfp = i915_gem_object_get_page_gfp_mask(obj); | 339 | ret = i915_gem_object_get_pages(obj, 0); |
355 | i915_gem_object_set_page_gfp_mask(obj, gfp & ~__GFP_NORETRY); | ||
356 | ret = i915_gem_object_get_pages(obj); | ||
357 | i915_gem_object_set_page_gfp_mask (obj, gfp); | ||
358 | } | 340 | } |
359 | 341 | ||
360 | return ret; | 342 | return ret; |
@@ -504,7 +486,7 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data, | |||
504 | */ | 486 | */ |
505 | if (args->offset > obj->size || args->size > obj->size || | 487 | if (args->offset > obj->size || args->size > obj->size || |
506 | args->offset + args->size > obj->size) { | 488 | args->offset + args->size > obj->size) { |
507 | drm_gem_object_unreference(obj); | 489 | drm_gem_object_unreference_unlocked(obj); |
508 | return -EINVAL; | 490 | return -EINVAL; |
509 | } | 491 | } |
510 | 492 | ||
@@ -517,7 +499,7 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data, | |||
517 | file_priv); | 499 | file_priv); |
518 | } | 500 | } |
519 | 501 | ||
520 | drm_gem_object_unreference(obj); | 502 | drm_gem_object_unreference_unlocked(obj); |
521 | 503 | ||
522 | return ret; | 504 | return ret; |
523 | } | 505 | } |
@@ -790,7 +772,7 @@ i915_gem_shmem_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj, | |||
790 | 772 | ||
791 | mutex_lock(&dev->struct_mutex); | 773 | mutex_lock(&dev->struct_mutex); |
792 | 774 | ||
793 | ret = i915_gem_object_get_pages(obj); | 775 | ret = i915_gem_object_get_pages(obj, 0); |
794 | if (ret != 0) | 776 | if (ret != 0) |
795 | goto fail_unlock; | 777 | goto fail_unlock; |
796 | 778 | ||
@@ -977,7 +959,7 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data, | |||
977 | */ | 959 | */ |
978 | if (args->offset > obj->size || args->size > obj->size || | 960 | if (args->offset > obj->size || args->size > obj->size || |
979 | args->offset + args->size > obj->size) { | 961 | args->offset + args->size > obj->size) { |
980 | drm_gem_object_unreference(obj); | 962 | drm_gem_object_unreference_unlocked(obj); |
981 | return -EINVAL; | 963 | return -EINVAL; |
982 | } | 964 | } |
983 | 965 | ||
@@ -1011,7 +993,7 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data, | |||
1011 | DRM_INFO("pwrite failed %d\n", ret); | 993 | DRM_INFO("pwrite failed %d\n", ret); |
1012 | #endif | 994 | #endif |
1013 | 995 | ||
1014 | drm_gem_object_unreference(obj); | 996 | drm_gem_object_unreference_unlocked(obj); |
1015 | 997 | ||
1016 | return ret; | 998 | return ret; |
1017 | } | 999 | } |
@@ -1154,9 +1136,7 @@ i915_gem_mmap_ioctl(struct drm_device *dev, void *data, | |||
1154 | PROT_READ | PROT_WRITE, MAP_SHARED, | 1136 | PROT_READ | PROT_WRITE, MAP_SHARED, |
1155 | args->offset); | 1137 | args->offset); |
1156 | up_write(¤t->mm->mmap_sem); | 1138 | up_write(¤t->mm->mmap_sem); |
1157 | mutex_lock(&dev->struct_mutex); | 1139 | drm_gem_object_unreference_unlocked(obj); |
1158 | drm_gem_object_unreference(obj); | ||
1159 | mutex_unlock(&dev->struct_mutex); | ||
1160 | if (IS_ERR((void *)addr)) | 1140 | if (IS_ERR((void *)addr)) |
1161 | return addr; | 1141 | return addr; |
1162 | 1142 | ||
@@ -1310,7 +1290,7 @@ out_free_list: | |||
1310 | * i915_gem_release_mmap - remove physical page mappings | 1290 | * i915_gem_release_mmap - remove physical page mappings |
1311 | * @obj: obj in question | 1291 | * @obj: obj in question |
1312 | * | 1292 | * |
1313 | * Preserve the reservation of the mmaping with the DRM core code, but | 1293 | * Preserve the reservation of the mmapping with the DRM core code, but |
1314 | * relinquish ownership of the pages back to the system. | 1294 | * relinquish ownership of the pages back to the system. |
1315 | * | 1295 | * |
1316 | * It is vital that we remove the page mapping if we have mapped a tiled | 1296 | * It is vital that we remove the page mapping if we have mapped a tiled |
@@ -1568,6 +1548,8 @@ i915_gem_object_move_to_inactive(struct drm_gem_object *obj) | |||
1568 | else | 1548 | else |
1569 | list_move_tail(&obj_priv->list, &dev_priv->mm.inactive_list); | 1549 | list_move_tail(&obj_priv->list, &dev_priv->mm.inactive_list); |
1570 | 1550 | ||
1551 | BUG_ON(!list_empty(&obj_priv->gpu_write_list)); | ||
1552 | |||
1571 | obj_priv->last_rendering_seqno = 0; | 1553 | obj_priv->last_rendering_seqno = 0; |
1572 | if (obj_priv->active) { | 1554 | if (obj_priv->active) { |
1573 | obj_priv->active = 0; | 1555 | obj_priv->active = 0; |
@@ -1638,7 +1620,8 @@ i915_add_request(struct drm_device *dev, struct drm_file *file_priv, | |||
1638 | struct drm_i915_gem_object *obj_priv, *next; | 1620 | struct drm_i915_gem_object *obj_priv, *next; |
1639 | 1621 | ||
1640 | list_for_each_entry_safe(obj_priv, next, | 1622 | list_for_each_entry_safe(obj_priv, next, |
1641 | &dev_priv->mm.flushing_list, list) { | 1623 | &dev_priv->mm.gpu_write_list, |
1624 | gpu_write_list) { | ||
1642 | struct drm_gem_object *obj = obj_priv->obj; | 1625 | struct drm_gem_object *obj = obj_priv->obj; |
1643 | 1626 | ||
1644 | if ((obj->write_domain & flush_domains) == | 1627 | if ((obj->write_domain & flush_domains) == |
@@ -1646,6 +1629,7 @@ i915_add_request(struct drm_device *dev, struct drm_file *file_priv, | |||
1646 | uint32_t old_write_domain = obj->write_domain; | 1629 | uint32_t old_write_domain = obj->write_domain; |
1647 | 1630 | ||
1648 | obj->write_domain = 0; | 1631 | obj->write_domain = 0; |
1632 | list_del_init(&obj_priv->gpu_write_list); | ||
1649 | i915_gem_object_move_to_active(obj, seqno); | 1633 | i915_gem_object_move_to_active(obj, seqno); |
1650 | 1634 | ||
1651 | trace_i915_gem_object_change_domain(obj, | 1635 | trace_i915_gem_object_change_domain(obj, |
@@ -2021,9 +2005,6 @@ i915_gem_object_unbind(struct drm_gem_object *obj) | |||
2021 | /* blow away mappings if mapped through GTT */ | 2005 | /* blow away mappings if mapped through GTT */ |
2022 | i915_gem_release_mmap(obj); | 2006 | i915_gem_release_mmap(obj); |
2023 | 2007 | ||
2024 | if (obj_priv->fence_reg != I915_FENCE_REG_NONE) | ||
2025 | i915_gem_clear_fence_reg(obj); | ||
2026 | |||
2027 | /* Move the object to the CPU domain to ensure that | 2008 | /* Move the object to the CPU domain to ensure that |
2028 | * any possible CPU writes while it's not in the GTT | 2009 | * any possible CPU writes while it's not in the GTT |
2029 | * are flushed when we go to remap it. This will | 2010 | * are flushed when we go to remap it. This will |
@@ -2039,6 +2020,10 @@ i915_gem_object_unbind(struct drm_gem_object *obj) | |||
2039 | 2020 | ||
2040 | BUG_ON(obj_priv->active); | 2021 | BUG_ON(obj_priv->active); |
2041 | 2022 | ||
2023 | /* release the fence reg _after_ flushing */ | ||
2024 | if (obj_priv->fence_reg != I915_FENCE_REG_NONE) | ||
2025 | i915_gem_clear_fence_reg(obj); | ||
2026 | |||
2042 | if (obj_priv->agp_mem != NULL) { | 2027 | if (obj_priv->agp_mem != NULL) { |
2043 | drm_unbind_agp(obj_priv->agp_mem); | 2028 | drm_unbind_agp(obj_priv->agp_mem); |
2044 | drm_free_agp(obj_priv->agp_mem, obj->size / PAGE_SIZE); | 2029 | drm_free_agp(obj_priv->agp_mem, obj->size / PAGE_SIZE); |
@@ -2099,8 +2084,8 @@ static int | |||
2099 | i915_gem_evict_everything(struct drm_device *dev) | 2084 | i915_gem_evict_everything(struct drm_device *dev) |
2100 | { | 2085 | { |
2101 | drm_i915_private_t *dev_priv = dev->dev_private; | 2086 | drm_i915_private_t *dev_priv = dev->dev_private; |
2102 | uint32_t seqno; | ||
2103 | int ret; | 2087 | int ret; |
2088 | uint32_t seqno; | ||
2104 | bool lists_empty; | 2089 | bool lists_empty; |
2105 | 2090 | ||
2106 | spin_lock(&dev_priv->mm.active_list_lock); | 2091 | spin_lock(&dev_priv->mm.active_list_lock); |
@@ -2122,6 +2107,8 @@ i915_gem_evict_everything(struct drm_device *dev) | |||
2122 | if (ret) | 2107 | if (ret) |
2123 | return ret; | 2108 | return ret; |
2124 | 2109 | ||
2110 | BUG_ON(!list_empty(&dev_priv->mm.flushing_list)); | ||
2111 | |||
2125 | ret = i915_gem_evict_from_inactive_list(dev); | 2112 | ret = i915_gem_evict_from_inactive_list(dev); |
2126 | if (ret) | 2113 | if (ret) |
2127 | return ret; | 2114 | return ret; |
@@ -2229,7 +2216,8 @@ i915_gem_evict_something(struct drm_device *dev, int min_size) | |||
2229 | } | 2216 | } |
2230 | 2217 | ||
2231 | int | 2218 | int |
2232 | i915_gem_object_get_pages(struct drm_gem_object *obj) | 2219 | i915_gem_object_get_pages(struct drm_gem_object *obj, |
2220 | gfp_t gfpmask) | ||
2233 | { | 2221 | { |
2234 | struct drm_i915_gem_object *obj_priv = obj->driver_private; | 2222 | struct drm_i915_gem_object *obj_priv = obj->driver_private; |
2235 | int page_count, i; | 2223 | int page_count, i; |
@@ -2255,7 +2243,10 @@ i915_gem_object_get_pages(struct drm_gem_object *obj) | |||
2255 | inode = obj->filp->f_path.dentry->d_inode; | 2243 | inode = obj->filp->f_path.dentry->d_inode; |
2256 | mapping = inode->i_mapping; | 2244 | mapping = inode->i_mapping; |
2257 | for (i = 0; i < page_count; i++) { | 2245 | for (i = 0; i < page_count; i++) { |
2258 | page = read_mapping_page(mapping, i, NULL); | 2246 | page = read_cache_page_gfp(mapping, i, |
2247 | mapping_gfp_mask (mapping) | | ||
2248 | __GFP_COLD | | ||
2249 | gfpmask); | ||
2259 | if (IS_ERR(page)) { | 2250 | if (IS_ERR(page)) { |
2260 | ret = PTR_ERR(page); | 2251 | ret = PTR_ERR(page); |
2261 | i915_gem_object_put_pages(obj); | 2252 | i915_gem_object_put_pages(obj); |
@@ -2549,6 +2540,12 @@ i915_gem_object_put_fence_reg(struct drm_gem_object *obj) | |||
2549 | if (obj_priv->fence_reg == I915_FENCE_REG_NONE) | 2540 | if (obj_priv->fence_reg == I915_FENCE_REG_NONE) |
2550 | return 0; | 2541 | return 0; |
2551 | 2542 | ||
2543 | /* If we've changed tiling, GTT-mappings of the object | ||
2544 | * need to re-fault to ensure that the correct fence register | ||
2545 | * setup is in place. | ||
2546 | */ | ||
2547 | i915_gem_release_mmap(obj); | ||
2548 | |||
2552 | /* On the i915, GPU access to tiled buffers is via a fence, | 2549 | /* On the i915, GPU access to tiled buffers is via a fence, |
2553 | * therefore we must wait for any outstanding access to complete | 2550 | * therefore we must wait for any outstanding access to complete |
2554 | * before clearing the fence. | 2551 | * before clearing the fence. |
@@ -2557,12 +2554,12 @@ i915_gem_object_put_fence_reg(struct drm_gem_object *obj) | |||
2557 | int ret; | 2554 | int ret; |
2558 | 2555 | ||
2559 | i915_gem_object_flush_gpu_write_domain(obj); | 2556 | i915_gem_object_flush_gpu_write_domain(obj); |
2560 | i915_gem_object_flush_gtt_write_domain(obj); | ||
2561 | ret = i915_gem_object_wait_rendering(obj); | 2557 | ret = i915_gem_object_wait_rendering(obj); |
2562 | if (ret != 0) | 2558 | if (ret != 0) |
2563 | return ret; | 2559 | return ret; |
2564 | } | 2560 | } |
2565 | 2561 | ||
2562 | i915_gem_object_flush_gtt_write_domain(obj); | ||
2566 | i915_gem_clear_fence_reg (obj); | 2563 | i915_gem_clear_fence_reg (obj); |
2567 | 2564 | ||
2568 | return 0; | 2565 | return 0; |
@@ -2578,12 +2575,9 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment) | |||
2578 | drm_i915_private_t *dev_priv = dev->dev_private; | 2575 | drm_i915_private_t *dev_priv = dev->dev_private; |
2579 | struct drm_i915_gem_object *obj_priv = obj->driver_private; | 2576 | struct drm_i915_gem_object *obj_priv = obj->driver_private; |
2580 | struct drm_mm_node *free_space; | 2577 | struct drm_mm_node *free_space; |
2581 | bool retry_alloc = false; | 2578 | gfp_t gfpmask = __GFP_NORETRY | __GFP_NOWARN; |
2582 | int ret; | 2579 | int ret; |
2583 | 2580 | ||
2584 | if (dev_priv->mm.suspended) | ||
2585 | return -EBUSY; | ||
2586 | |||
2587 | if (obj_priv->madv != I915_MADV_WILLNEED) { | 2581 | if (obj_priv->madv != I915_MADV_WILLNEED) { |
2588 | DRM_ERROR("Attempting to bind a purgeable object\n"); | 2582 | DRM_ERROR("Attempting to bind a purgeable object\n"); |
2589 | return -EINVAL; | 2583 | return -EINVAL; |
@@ -2625,15 +2619,7 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment) | |||
2625 | DRM_INFO("Binding object of size %zd at 0x%08x\n", | 2619 | DRM_INFO("Binding object of size %zd at 0x%08x\n", |
2626 | obj->size, obj_priv->gtt_offset); | 2620 | obj->size, obj_priv->gtt_offset); |
2627 | #endif | 2621 | #endif |
2628 | if (retry_alloc) { | 2622 | ret = i915_gem_object_get_pages(obj, gfpmask); |
2629 | i915_gem_object_set_page_gfp_mask (obj, | ||
2630 | i915_gem_object_get_page_gfp_mask (obj) & ~__GFP_NORETRY); | ||
2631 | } | ||
2632 | ret = i915_gem_object_get_pages(obj); | ||
2633 | if (retry_alloc) { | ||
2634 | i915_gem_object_set_page_gfp_mask (obj, | ||
2635 | i915_gem_object_get_page_gfp_mask (obj) | __GFP_NORETRY); | ||
2636 | } | ||
2637 | if (ret) { | 2623 | if (ret) { |
2638 | drm_mm_put_block(obj_priv->gtt_space); | 2624 | drm_mm_put_block(obj_priv->gtt_space); |
2639 | obj_priv->gtt_space = NULL; | 2625 | obj_priv->gtt_space = NULL; |
@@ -2643,9 +2629,9 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment) | |||
2643 | ret = i915_gem_evict_something(dev, obj->size); | 2629 | ret = i915_gem_evict_something(dev, obj->size); |
2644 | if (ret) { | 2630 | if (ret) { |
2645 | /* now try to shrink everyone else */ | 2631 | /* now try to shrink everyone else */ |
2646 | if (! retry_alloc) { | 2632 | if (gfpmask) { |
2647 | retry_alloc = true; | 2633 | gfpmask = 0; |
2648 | goto search_free; | 2634 | goto search_free; |
2649 | } | 2635 | } |
2650 | 2636 | ||
2651 | return ret; | 2637 | return ret; |
@@ -2723,7 +2709,7 @@ i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj) | |||
2723 | old_write_domain = obj->write_domain; | 2709 | old_write_domain = obj->write_domain; |
2724 | i915_gem_flush(dev, 0, obj->write_domain); | 2710 | i915_gem_flush(dev, 0, obj->write_domain); |
2725 | seqno = i915_add_request(dev, NULL, obj->write_domain); | 2711 | seqno = i915_add_request(dev, NULL, obj->write_domain); |
2726 | obj->write_domain = 0; | 2712 | BUG_ON(obj->write_domain); |
2727 | i915_gem_object_move_to_active(obj, seqno); | 2713 | i915_gem_object_move_to_active(obj, seqno); |
2728 | 2714 | ||
2729 | trace_i915_gem_object_change_domain(obj, | 2715 | trace_i915_gem_object_change_domain(obj, |
@@ -2839,6 +2825,57 @@ i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, int write) | |||
2839 | return 0; | 2825 | return 0; |
2840 | } | 2826 | } |
2841 | 2827 | ||
2828 | /* | ||
2829 | * Prepare buffer for display plane. Use uninterruptible for possible flush | ||
2830 | * wait, as in modesetting process we're not supposed to be interrupted. | ||
2831 | */ | ||
2832 | int | ||
2833 | i915_gem_object_set_to_display_plane(struct drm_gem_object *obj) | ||
2834 | { | ||
2835 | struct drm_device *dev = obj->dev; | ||
2836 | struct drm_i915_gem_object *obj_priv = obj->driver_private; | ||
2837 | uint32_t old_write_domain, old_read_domains; | ||
2838 | int ret; | ||
2839 | |||
2840 | /* Not valid to be called on unbound objects. */ | ||
2841 | if (obj_priv->gtt_space == NULL) | ||
2842 | return -EINVAL; | ||
2843 | |||
2844 | i915_gem_object_flush_gpu_write_domain(obj); | ||
2845 | |||
2846 | /* Wait on any GPU rendering and flushing to occur. */ | ||
2847 | if (obj_priv->active) { | ||
2848 | #if WATCH_BUF | ||
2849 | DRM_INFO("%s: object %p wait for seqno %08x\n", | ||
2850 | __func__, obj, obj_priv->last_rendering_seqno); | ||
2851 | #endif | ||
2852 | ret = i915_do_wait_request(dev, obj_priv->last_rendering_seqno, 0); | ||
2853 | if (ret != 0) | ||
2854 | return ret; | ||
2855 | } | ||
2856 | |||
2857 | old_write_domain = obj->write_domain; | ||
2858 | old_read_domains = obj->read_domains; | ||
2859 | |||
2860 | obj->read_domains &= I915_GEM_DOMAIN_GTT; | ||
2861 | |||
2862 | i915_gem_object_flush_cpu_write_domain(obj); | ||
2863 | |||
2864 | /* It should now be out of any other write domains, and we can update | ||
2865 | * the domain values for our changes. | ||
2866 | */ | ||
2867 | BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_GTT) != 0); | ||
2868 | obj->read_domains |= I915_GEM_DOMAIN_GTT; | ||
2869 | obj->write_domain = I915_GEM_DOMAIN_GTT; | ||
2870 | obj_priv->dirty = 1; | ||
2871 | |||
2872 | trace_i915_gem_object_change_domain(obj, | ||
2873 | old_read_domains, | ||
2874 | old_write_domain); | ||
2875 | |||
2876 | return 0; | ||
2877 | } | ||
2878 | |||
2842 | /** | 2879 | /** |
2843 | * Moves a single object to the CPU read, and possibly write domain. | 2880 | * Moves a single object to the CPU read, and possibly write domain. |
2844 | * | 2881 | * |
@@ -3198,7 +3235,7 @@ i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj, | |||
3198 | static int | 3235 | static int |
3199 | i915_gem_object_pin_and_relocate(struct drm_gem_object *obj, | 3236 | i915_gem_object_pin_and_relocate(struct drm_gem_object *obj, |
3200 | struct drm_file *file_priv, | 3237 | struct drm_file *file_priv, |
3201 | struct drm_i915_gem_exec_object *entry, | 3238 | struct drm_i915_gem_exec_object2 *entry, |
3202 | struct drm_i915_gem_relocation_entry *relocs) | 3239 | struct drm_i915_gem_relocation_entry *relocs) |
3203 | { | 3240 | { |
3204 | struct drm_device *dev = obj->dev; | 3241 | struct drm_device *dev = obj->dev; |
@@ -3206,12 +3243,36 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj, | |||
3206 | struct drm_i915_gem_object *obj_priv = obj->driver_private; | 3243 | struct drm_i915_gem_object *obj_priv = obj->driver_private; |
3207 | int i, ret; | 3244 | int i, ret; |
3208 | void __iomem *reloc_page; | 3245 | void __iomem *reloc_page; |
3246 | bool need_fence; | ||
3247 | |||
3248 | need_fence = entry->flags & EXEC_OBJECT_NEEDS_FENCE && | ||
3249 | obj_priv->tiling_mode != I915_TILING_NONE; | ||
3250 | |||
3251 | /* Check fence reg constraints and rebind if necessary */ | ||
3252 | if (need_fence && !i915_gem_object_fence_offset_ok(obj, | ||
3253 | obj_priv->tiling_mode)) | ||
3254 | i915_gem_object_unbind(obj); | ||
3209 | 3255 | ||
3210 | /* Choose the GTT offset for our buffer and put it there. */ | 3256 | /* Choose the GTT offset for our buffer and put it there. */ |
3211 | ret = i915_gem_object_pin(obj, (uint32_t) entry->alignment); | 3257 | ret = i915_gem_object_pin(obj, (uint32_t) entry->alignment); |
3212 | if (ret) | 3258 | if (ret) |
3213 | return ret; | 3259 | return ret; |
3214 | 3260 | ||
3261 | /* | ||
3262 | * Pre-965 chips need a fence register set up in order to | ||
3263 | * properly handle blits to/from tiled surfaces. | ||
3264 | */ | ||
3265 | if (need_fence) { | ||
3266 | ret = i915_gem_object_get_fence_reg(obj); | ||
3267 | if (ret != 0) { | ||
3268 | if (ret != -EBUSY && ret != -ERESTARTSYS) | ||
3269 | DRM_ERROR("Failure to install fence: %d\n", | ||
3270 | ret); | ||
3271 | i915_gem_object_unpin(obj); | ||
3272 | return ret; | ||
3273 | } | ||
3274 | } | ||
3275 | |||
3215 | entry->offset = obj_priv->gtt_offset; | 3276 | entry->offset = obj_priv->gtt_offset; |
3216 | 3277 | ||
3217 | /* Apply the relocations, using the GTT aperture to avoid cache | 3278 | /* Apply the relocations, using the GTT aperture to avoid cache |
@@ -3373,7 +3434,7 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj, | |||
3373 | */ | 3434 | */ |
3374 | static int | 3435 | static int |
3375 | i915_dispatch_gem_execbuffer(struct drm_device *dev, | 3436 | i915_dispatch_gem_execbuffer(struct drm_device *dev, |
3376 | struct drm_i915_gem_execbuffer *exec, | 3437 | struct drm_i915_gem_execbuffer2 *exec, |
3377 | struct drm_clip_rect *cliprects, | 3438 | struct drm_clip_rect *cliprects, |
3378 | uint64_t exec_offset) | 3439 | uint64_t exec_offset) |
3379 | { | 3440 | { |
@@ -3463,7 +3524,7 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file_priv) | |||
3463 | } | 3524 | } |
3464 | 3525 | ||
3465 | static int | 3526 | static int |
3466 | i915_gem_get_relocs_from_user(struct drm_i915_gem_exec_object *exec_list, | 3527 | i915_gem_get_relocs_from_user(struct drm_i915_gem_exec_object2 *exec_list, |
3467 | uint32_t buffer_count, | 3528 | uint32_t buffer_count, |
3468 | struct drm_i915_gem_relocation_entry **relocs) | 3529 | struct drm_i915_gem_relocation_entry **relocs) |
3469 | { | 3530 | { |
@@ -3478,8 +3539,10 @@ i915_gem_get_relocs_from_user(struct drm_i915_gem_exec_object *exec_list, | |||
3478 | } | 3539 | } |
3479 | 3540 | ||
3480 | *relocs = drm_calloc_large(reloc_count, sizeof(**relocs)); | 3541 | *relocs = drm_calloc_large(reloc_count, sizeof(**relocs)); |
3481 | if (*relocs == NULL) | 3542 | if (*relocs == NULL) { |
3543 | DRM_ERROR("failed to alloc relocs, count %d\n", reloc_count); | ||
3482 | return -ENOMEM; | 3544 | return -ENOMEM; |
3545 | } | ||
3483 | 3546 | ||
3484 | for (i = 0; i < buffer_count; i++) { | 3547 | for (i = 0; i < buffer_count; i++) { |
3485 | struct drm_i915_gem_relocation_entry __user *user_relocs; | 3548 | struct drm_i915_gem_relocation_entry __user *user_relocs; |
@@ -3503,13 +3566,16 @@ i915_gem_get_relocs_from_user(struct drm_i915_gem_exec_object *exec_list, | |||
3503 | } | 3566 | } |
3504 | 3567 | ||
3505 | static int | 3568 | static int |
3506 | i915_gem_put_relocs_to_user(struct drm_i915_gem_exec_object *exec_list, | 3569 | i915_gem_put_relocs_to_user(struct drm_i915_gem_exec_object2 *exec_list, |
3507 | uint32_t buffer_count, | 3570 | uint32_t buffer_count, |
3508 | struct drm_i915_gem_relocation_entry *relocs) | 3571 | struct drm_i915_gem_relocation_entry *relocs) |
3509 | { | 3572 | { |
3510 | uint32_t reloc_count = 0, i; | 3573 | uint32_t reloc_count = 0, i; |
3511 | int ret = 0; | 3574 | int ret = 0; |
3512 | 3575 | ||
3576 | if (relocs == NULL) | ||
3577 | return 0; | ||
3578 | |||
3513 | for (i = 0; i < buffer_count; i++) { | 3579 | for (i = 0; i < buffer_count; i++) { |
3514 | struct drm_i915_gem_relocation_entry __user *user_relocs; | 3580 | struct drm_i915_gem_relocation_entry __user *user_relocs; |
3515 | int unwritten; | 3581 | int unwritten; |
@@ -3536,7 +3602,7 @@ err: | |||
3536 | } | 3602 | } |
3537 | 3603 | ||
3538 | static int | 3604 | static int |
3539 | i915_gem_check_execbuffer (struct drm_i915_gem_execbuffer *exec, | 3605 | i915_gem_check_execbuffer (struct drm_i915_gem_execbuffer2 *exec, |
3540 | uint64_t exec_offset) | 3606 | uint64_t exec_offset) |
3541 | { | 3607 | { |
3542 | uint32_t exec_start, exec_len; | 3608 | uint32_t exec_start, exec_len; |
@@ -3589,18 +3655,18 @@ i915_gem_wait_for_pending_flip(struct drm_device *dev, | |||
3589 | } | 3655 | } |
3590 | 3656 | ||
3591 | int | 3657 | int |
3592 | i915_gem_execbuffer(struct drm_device *dev, void *data, | 3658 | i915_gem_do_execbuffer(struct drm_device *dev, void *data, |
3593 | struct drm_file *file_priv) | 3659 | struct drm_file *file_priv, |
3660 | struct drm_i915_gem_execbuffer2 *args, | ||
3661 | struct drm_i915_gem_exec_object2 *exec_list) | ||
3594 | { | 3662 | { |
3595 | drm_i915_private_t *dev_priv = dev->dev_private; | 3663 | drm_i915_private_t *dev_priv = dev->dev_private; |
3596 | struct drm_i915_gem_execbuffer *args = data; | ||
3597 | struct drm_i915_gem_exec_object *exec_list = NULL; | ||
3598 | struct drm_gem_object **object_list = NULL; | 3664 | struct drm_gem_object **object_list = NULL; |
3599 | struct drm_gem_object *batch_obj; | 3665 | struct drm_gem_object *batch_obj; |
3600 | struct drm_i915_gem_object *obj_priv; | 3666 | struct drm_i915_gem_object *obj_priv; |
3601 | struct drm_clip_rect *cliprects = NULL; | 3667 | struct drm_clip_rect *cliprects = NULL; |
3602 | struct drm_i915_gem_relocation_entry *relocs; | 3668 | struct drm_i915_gem_relocation_entry *relocs = NULL; |
3603 | int ret, ret2, i, pinned = 0; | 3669 | int ret = 0, ret2, i, pinned = 0; |
3604 | uint64_t exec_offset; | 3670 | uint64_t exec_offset; |
3605 | uint32_t seqno, flush_domains, reloc_index; | 3671 | uint32_t seqno, flush_domains, reloc_index; |
3606 | int pin_tries, flips; | 3672 | int pin_tries, flips; |
@@ -3614,31 +3680,21 @@ i915_gem_execbuffer(struct drm_device *dev, void *data, | |||
3614 | DRM_ERROR("execbuf with %d buffers\n", args->buffer_count); | 3680 | DRM_ERROR("execbuf with %d buffers\n", args->buffer_count); |
3615 | return -EINVAL; | 3681 | return -EINVAL; |
3616 | } | 3682 | } |
3617 | /* Copy in the exec list from userland */ | ||
3618 | exec_list = drm_malloc_ab(sizeof(*exec_list), args->buffer_count); | ||
3619 | object_list = drm_malloc_ab(sizeof(*object_list), args->buffer_count); | 3683 | object_list = drm_malloc_ab(sizeof(*object_list), args->buffer_count); |
3620 | if (exec_list == NULL || object_list == NULL) { | 3684 | if (object_list == NULL) { |
3621 | DRM_ERROR("Failed to allocate exec or object list " | 3685 | DRM_ERROR("Failed to allocate object list for %d buffers\n", |
3622 | "for %d buffers\n", | ||
3623 | args->buffer_count); | 3686 | args->buffer_count); |
3624 | ret = -ENOMEM; | 3687 | ret = -ENOMEM; |
3625 | goto pre_mutex_err; | 3688 | goto pre_mutex_err; |
3626 | } | 3689 | } |
3627 | ret = copy_from_user(exec_list, | ||
3628 | (struct drm_i915_relocation_entry __user *) | ||
3629 | (uintptr_t) args->buffers_ptr, | ||
3630 | sizeof(*exec_list) * args->buffer_count); | ||
3631 | if (ret != 0) { | ||
3632 | DRM_ERROR("copy %d exec entries failed %d\n", | ||
3633 | args->buffer_count, ret); | ||
3634 | goto pre_mutex_err; | ||
3635 | } | ||
3636 | 3690 | ||
3637 | if (args->num_cliprects != 0) { | 3691 | if (args->num_cliprects != 0) { |
3638 | cliprects = kcalloc(args->num_cliprects, sizeof(*cliprects), | 3692 | cliprects = kcalloc(args->num_cliprects, sizeof(*cliprects), |
3639 | GFP_KERNEL); | 3693 | GFP_KERNEL); |
3640 | if (cliprects == NULL) | 3694 | if (cliprects == NULL) { |
3695 | ret = -ENOMEM; | ||
3641 | goto pre_mutex_err; | 3696 | goto pre_mutex_err; |
3697 | } | ||
3642 | 3698 | ||
3643 | ret = copy_from_user(cliprects, | 3699 | ret = copy_from_user(cliprects, |
3644 | (struct drm_clip_rect __user *) | 3700 | (struct drm_clip_rect __user *) |
@@ -3680,6 +3736,8 @@ i915_gem_execbuffer(struct drm_device *dev, void *data, | |||
3680 | if (object_list[i] == NULL) { | 3736 | if (object_list[i] == NULL) { |
3681 | DRM_ERROR("Invalid object handle %d at index %d\n", | 3737 | DRM_ERROR("Invalid object handle %d at index %d\n", |
3682 | exec_list[i].handle, i); | 3738 | exec_list[i].handle, i); |
3739 | /* prevent error path from reading uninitialized data */ | ||
3740 | args->buffer_count = i + 1; | ||
3683 | ret = -EBADF; | 3741 | ret = -EBADF; |
3684 | goto err; | 3742 | goto err; |
3685 | } | 3743 | } |
@@ -3688,6 +3746,8 @@ i915_gem_execbuffer(struct drm_device *dev, void *data, | |||
3688 | if (obj_priv->in_execbuffer) { | 3746 | if (obj_priv->in_execbuffer) { |
3689 | DRM_ERROR("Object %p appears more than once in object list\n", | 3747 | DRM_ERROR("Object %p appears more than once in object list\n", |
3690 | object_list[i]); | 3748 | object_list[i]); |
3749 | /* prevent error path from reading uninitialized data */ | ||
3750 | args->buffer_count = i + 1; | ||
3691 | ret = -EBADF; | 3751 | ret = -EBADF; |
3692 | goto err; | 3752 | goto err; |
3693 | } | 3753 | } |
@@ -3801,16 +3861,23 @@ i915_gem_execbuffer(struct drm_device *dev, void *data, | |||
3801 | i915_gem_flush(dev, | 3861 | i915_gem_flush(dev, |
3802 | dev->invalidate_domains, | 3862 | dev->invalidate_domains, |
3803 | dev->flush_domains); | 3863 | dev->flush_domains); |
3804 | if (dev->flush_domains) | 3864 | if (dev->flush_domains & I915_GEM_GPU_DOMAINS) |
3805 | (void)i915_add_request(dev, file_priv, | 3865 | (void)i915_add_request(dev, file_priv, |
3806 | dev->flush_domains); | 3866 | dev->flush_domains); |
3807 | } | 3867 | } |
3808 | 3868 | ||
3809 | for (i = 0; i < args->buffer_count; i++) { | 3869 | for (i = 0; i < args->buffer_count; i++) { |
3810 | struct drm_gem_object *obj = object_list[i]; | 3870 | struct drm_gem_object *obj = object_list[i]; |
3871 | struct drm_i915_gem_object *obj_priv = obj->driver_private; | ||
3811 | uint32_t old_write_domain = obj->write_domain; | 3872 | uint32_t old_write_domain = obj->write_domain; |
3812 | 3873 | ||
3813 | obj->write_domain = obj->pending_write_domain; | 3874 | obj->write_domain = obj->pending_write_domain; |
3875 | if (obj->write_domain) | ||
3876 | list_move_tail(&obj_priv->gpu_write_list, | ||
3877 | &dev_priv->mm.gpu_write_list); | ||
3878 | else | ||
3879 | list_del_init(&obj_priv->gpu_write_list); | ||
3880 | |||
3814 | trace_i915_gem_object_change_domain(obj, | 3881 | trace_i915_gem_object_change_domain(obj, |
3815 | obj->read_domains, | 3882 | obj->read_domains, |
3816 | old_write_domain); | 3883 | old_write_domain); |
@@ -3884,8 +3951,101 @@ err: | |||
3884 | 3951 | ||
3885 | mutex_unlock(&dev->struct_mutex); | 3952 | mutex_unlock(&dev->struct_mutex); |
3886 | 3953 | ||
3954 | pre_mutex_err: | ||
3955 | /* Copy the updated relocations out regardless of current error | ||
3956 | * state. Failure to update the relocs would mean that the next | ||
3957 | * time userland calls execbuf, it would do so with presumed offset | ||
3958 | * state that didn't match the actual object state. | ||
3959 | */ | ||
3960 | ret2 = i915_gem_put_relocs_to_user(exec_list, args->buffer_count, | ||
3961 | relocs); | ||
3962 | if (ret2 != 0) { | ||
3963 | DRM_ERROR("Failed to copy relocations back out: %d\n", ret2); | ||
3964 | |||
3965 | if (ret == 0) | ||
3966 | ret = ret2; | ||
3967 | } | ||
3968 | |||
3969 | drm_free_large(object_list); | ||
3970 | kfree(cliprects); | ||
3971 | |||
3972 | return ret; | ||
3973 | } | ||
3974 | |||
3975 | /* | ||
3976 | * Legacy execbuffer just creates an exec2 list from the original exec object | ||
3977 | * list array and passes it to the real function. | ||
3978 | */ | ||
3979 | int | ||
3980 | i915_gem_execbuffer(struct drm_device *dev, void *data, | ||
3981 | struct drm_file *file_priv) | ||
3982 | { | ||
3983 | struct drm_i915_gem_execbuffer *args = data; | ||
3984 | struct drm_i915_gem_execbuffer2 exec2; | ||
3985 | struct drm_i915_gem_exec_object *exec_list = NULL; | ||
3986 | struct drm_i915_gem_exec_object2 *exec2_list = NULL; | ||
3987 | int ret, i; | ||
3988 | |||
3989 | #if WATCH_EXEC | ||
3990 | DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n", | ||
3991 | (int) args->buffers_ptr, args->buffer_count, args->batch_len); | ||
3992 | #endif | ||
3993 | |||
3994 | if (args->buffer_count < 1) { | ||
3995 | DRM_ERROR("execbuf with %d buffers\n", args->buffer_count); | ||
3996 | return -EINVAL; | ||
3997 | } | ||
3998 | |||
3999 | /* Copy in the exec list from userland */ | ||
4000 | exec_list = drm_malloc_ab(sizeof(*exec_list), args->buffer_count); | ||
4001 | exec2_list = drm_malloc_ab(sizeof(*exec2_list), args->buffer_count); | ||
4002 | if (exec_list == NULL || exec2_list == NULL) { | ||
4003 | DRM_ERROR("Failed to allocate exec list for %d buffers\n", | ||
4004 | args->buffer_count); | ||
4005 | drm_free_large(exec_list); | ||
4006 | drm_free_large(exec2_list); | ||
4007 | return -ENOMEM; | ||
4008 | } | ||
4009 | ret = copy_from_user(exec_list, | ||
4010 | (struct drm_i915_relocation_entry __user *) | ||
4011 | (uintptr_t) args->buffers_ptr, | ||
4012 | sizeof(*exec_list) * args->buffer_count); | ||
4013 | if (ret != 0) { | ||
4014 | DRM_ERROR("copy %d exec entries failed %d\n", | ||
4015 | args->buffer_count, ret); | ||
4016 | drm_free_large(exec_list); | ||
4017 | drm_free_large(exec2_list); | ||
4018 | return -EFAULT; | ||
4019 | } | ||
4020 | |||
4021 | for (i = 0; i < args->buffer_count; i++) { | ||
4022 | exec2_list[i].handle = exec_list[i].handle; | ||
4023 | exec2_list[i].relocation_count = exec_list[i].relocation_count; | ||
4024 | exec2_list[i].relocs_ptr = exec_list[i].relocs_ptr; | ||
4025 | exec2_list[i].alignment = exec_list[i].alignment; | ||
4026 | exec2_list[i].offset = exec_list[i].offset; | ||
4027 | if (!IS_I965G(dev)) | ||
4028 | exec2_list[i].flags = EXEC_OBJECT_NEEDS_FENCE; | ||
4029 | else | ||
4030 | exec2_list[i].flags = 0; | ||
4031 | } | ||
4032 | |||
4033 | exec2.buffers_ptr = args->buffers_ptr; | ||
4034 | exec2.buffer_count = args->buffer_count; | ||
4035 | exec2.batch_start_offset = args->batch_start_offset; | ||
4036 | exec2.batch_len = args->batch_len; | ||
4037 | exec2.DR1 = args->DR1; | ||
4038 | exec2.DR4 = args->DR4; | ||
4039 | exec2.num_cliprects = args->num_cliprects; | ||
4040 | exec2.cliprects_ptr = args->cliprects_ptr; | ||
4041 | exec2.flags = 0; | ||
4042 | |||
4043 | ret = i915_gem_do_execbuffer(dev, data, file_priv, &exec2, exec2_list); | ||
3887 | if (!ret) { | 4044 | if (!ret) { |
3888 | /* Copy the new buffer offsets back to the user's exec list. */ | 4045 | /* Copy the new buffer offsets back to the user's exec list. */ |
4046 | for (i = 0; i < args->buffer_count; i++) | ||
4047 | exec_list[i].offset = exec2_list[i].offset; | ||
4048 | /* ... and back out to userspace */ | ||
3889 | ret = copy_to_user((struct drm_i915_relocation_entry __user *) | 4049 | ret = copy_to_user((struct drm_i915_relocation_entry __user *) |
3890 | (uintptr_t) args->buffers_ptr, | 4050 | (uintptr_t) args->buffers_ptr, |
3891 | exec_list, | 4051 | exec_list, |
@@ -3898,25 +4058,62 @@ err: | |||
3898 | } | 4058 | } |
3899 | } | 4059 | } |
3900 | 4060 | ||
3901 | /* Copy the updated relocations out regardless of current error | 4061 | drm_free_large(exec_list); |
3902 | * state. Failure to update the relocs would mean that the next | 4062 | drm_free_large(exec2_list); |
3903 | * time userland calls execbuf, it would do so with presumed offset | 4063 | return ret; |
3904 | * state that didn't match the actual object state. | 4064 | } |
3905 | */ | ||
3906 | ret2 = i915_gem_put_relocs_to_user(exec_list, args->buffer_count, | ||
3907 | relocs); | ||
3908 | if (ret2 != 0) { | ||
3909 | DRM_ERROR("Failed to copy relocations back out: %d\n", ret2); | ||
3910 | 4065 | ||
3911 | if (ret == 0) | 4066 | int |
3912 | ret = ret2; | 4067 | i915_gem_execbuffer2(struct drm_device *dev, void *data, |
4068 | struct drm_file *file_priv) | ||
4069 | { | ||
4070 | struct drm_i915_gem_execbuffer2 *args = data; | ||
4071 | struct drm_i915_gem_exec_object2 *exec2_list = NULL; | ||
4072 | int ret; | ||
4073 | |||
4074 | #if WATCH_EXEC | ||
4075 | DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n", | ||
4076 | (int) args->buffers_ptr, args->buffer_count, args->batch_len); | ||
4077 | #endif | ||
4078 | |||
4079 | if (args->buffer_count < 1) { | ||
4080 | DRM_ERROR("execbuf2 with %d buffers\n", args->buffer_count); | ||
4081 | return -EINVAL; | ||
3913 | } | 4082 | } |
3914 | 4083 | ||
3915 | pre_mutex_err: | 4084 | exec2_list = drm_malloc_ab(sizeof(*exec2_list), args->buffer_count); |
3916 | drm_free_large(object_list); | 4085 | if (exec2_list == NULL) { |
3917 | drm_free_large(exec_list); | 4086 | DRM_ERROR("Failed to allocate exec list for %d buffers\n", |
3918 | kfree(cliprects); | 4087 | args->buffer_count); |
4088 | return -ENOMEM; | ||
4089 | } | ||
4090 | ret = copy_from_user(exec2_list, | ||
4091 | (struct drm_i915_relocation_entry __user *) | ||
4092 | (uintptr_t) args->buffers_ptr, | ||
4093 | sizeof(*exec2_list) * args->buffer_count); | ||
4094 | if (ret != 0) { | ||
4095 | DRM_ERROR("copy %d exec entries failed %d\n", | ||
4096 | args->buffer_count, ret); | ||
4097 | drm_free_large(exec2_list); | ||
4098 | return -EFAULT; | ||
4099 | } | ||
3919 | 4100 | ||
4101 | ret = i915_gem_do_execbuffer(dev, data, file_priv, args, exec2_list); | ||
4102 | if (!ret) { | ||
4103 | /* Copy the new buffer offsets back to the user's exec list. */ | ||
4104 | ret = copy_to_user((struct drm_i915_relocation_entry __user *) | ||
4105 | (uintptr_t) args->buffers_ptr, | ||
4106 | exec2_list, | ||
4107 | sizeof(*exec2_list) * args->buffer_count); | ||
4108 | if (ret) { | ||
4109 | ret = -EFAULT; | ||
4110 | DRM_ERROR("failed to copy %d exec entries " | ||
4111 | "back to user (%d)\n", | ||
4112 | args->buffer_count, ret); | ||
4113 | } | ||
4114 | } | ||
4115 | |||
4116 | drm_free_large(exec2_list); | ||
3920 | return ret; | 4117 | return ret; |
3921 | } | 4118 | } |
3922 | 4119 | ||
@@ -3933,19 +4130,7 @@ i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment) | |||
3933 | if (ret) | 4130 | if (ret) |
3934 | return ret; | 4131 | return ret; |
3935 | } | 4132 | } |
3936 | /* | 4133 | |
3937 | * Pre-965 chips need a fence register set up in order to | ||
3938 | * properly handle tiled surfaces. | ||
3939 | */ | ||
3940 | if (!IS_I965G(dev) && obj_priv->tiling_mode != I915_TILING_NONE) { | ||
3941 | ret = i915_gem_object_get_fence_reg(obj); | ||
3942 | if (ret != 0) { | ||
3943 | if (ret != -EBUSY && ret != -ERESTARTSYS) | ||
3944 | DRM_ERROR("Failure to install fence: %d\n", | ||
3945 | ret); | ||
3946 | return ret; | ||
3947 | } | ||
3948 | } | ||
3949 | obj_priv->pin_count++; | 4134 | obj_priv->pin_count++; |
3950 | 4135 | ||
3951 | /* If the object is not active and not pending a flush, | 4136 | /* If the object is not active and not pending a flush, |
@@ -4203,6 +4388,7 @@ int i915_gem_init_object(struct drm_gem_object *obj) | |||
4203 | obj_priv->obj = obj; | 4388 | obj_priv->obj = obj; |
4204 | obj_priv->fence_reg = I915_FENCE_REG_NONE; | 4389 | obj_priv->fence_reg = I915_FENCE_REG_NONE; |
4205 | INIT_LIST_HEAD(&obj_priv->list); | 4390 | INIT_LIST_HEAD(&obj_priv->list); |
4391 | INIT_LIST_HEAD(&obj_priv->gpu_write_list); | ||
4206 | INIT_LIST_HEAD(&obj_priv->fence_list); | 4392 | INIT_LIST_HEAD(&obj_priv->fence_list); |
4207 | obj_priv->madv = I915_MADV_WILLNEED; | 4393 | obj_priv->madv = I915_MADV_WILLNEED; |
4208 | 4394 | ||
@@ -4258,129 +4444,73 @@ i915_gem_evict_from_inactive_list(struct drm_device *dev) | |||
4258 | return 0; | 4444 | return 0; |
4259 | } | 4445 | } |
4260 | 4446 | ||
4261 | int | 4447 | static int |
4262 | i915_gem_idle(struct drm_device *dev) | 4448 | i915_gpu_idle(struct drm_device *dev) |
4263 | { | 4449 | { |
4264 | drm_i915_private_t *dev_priv = dev->dev_private; | 4450 | drm_i915_private_t *dev_priv = dev->dev_private; |
4265 | uint32_t seqno, cur_seqno, last_seqno; | 4451 | bool lists_empty; |
4266 | int stuck, ret; | 4452 | uint32_t seqno; |
4267 | 4453 | ||
4268 | mutex_lock(&dev->struct_mutex); | 4454 | spin_lock(&dev_priv->mm.active_list_lock); |
4455 | lists_empty = list_empty(&dev_priv->mm.flushing_list) && | ||
4456 | list_empty(&dev_priv->mm.active_list); | ||
4457 | spin_unlock(&dev_priv->mm.active_list_lock); | ||
4269 | 4458 | ||
4270 | if (dev_priv->mm.suspended || dev_priv->ring.ring_obj == NULL) { | 4459 | if (lists_empty) |
4271 | mutex_unlock(&dev->struct_mutex); | ||
4272 | return 0; | 4460 | return 0; |
4273 | } | ||
4274 | 4461 | ||
4275 | /* Hack! Don't let anybody do execbuf while we don't control the chip. | 4462 | /* Flush everything onto the inactive list. */ |
4276 | * We need to replace this with a semaphore, or something. | ||
4277 | */ | ||
4278 | dev_priv->mm.suspended = 1; | ||
4279 | del_timer(&dev_priv->hangcheck_timer); | ||
4280 | |||
4281 | /* Cancel the retire work handler, wait for it to finish if running | ||
4282 | */ | ||
4283 | mutex_unlock(&dev->struct_mutex); | ||
4284 | cancel_delayed_work_sync(&dev_priv->mm.retire_work); | ||
4285 | mutex_lock(&dev->struct_mutex); | ||
4286 | |||
4287 | i915_kernel_lost_context(dev); | ||
4288 | |||
4289 | /* Flush the GPU along with all non-CPU write domains | ||
4290 | */ | ||
4291 | i915_gem_flush(dev, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS); | 4463 | i915_gem_flush(dev, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS); |
4292 | seqno = i915_add_request(dev, NULL, I915_GEM_GPU_DOMAINS); | 4464 | seqno = i915_add_request(dev, NULL, I915_GEM_GPU_DOMAINS); |
4293 | 4465 | if (seqno == 0) | |
4294 | if (seqno == 0) { | ||
4295 | mutex_unlock(&dev->struct_mutex); | ||
4296 | return -ENOMEM; | 4466 | return -ENOMEM; |
4297 | } | ||
4298 | |||
4299 | dev_priv->mm.waiting_gem_seqno = seqno; | ||
4300 | last_seqno = 0; | ||
4301 | stuck = 0; | ||
4302 | for (;;) { | ||
4303 | cur_seqno = i915_get_gem_seqno(dev); | ||
4304 | if (i915_seqno_passed(cur_seqno, seqno)) | ||
4305 | break; | ||
4306 | if (last_seqno == cur_seqno) { | ||
4307 | if (stuck++ > 100) { | ||
4308 | DRM_ERROR("hardware wedged\n"); | ||
4309 | atomic_set(&dev_priv->mm.wedged, 1); | ||
4310 | DRM_WAKEUP(&dev_priv->irq_queue); | ||
4311 | break; | ||
4312 | } | ||
4313 | } | ||
4314 | msleep(10); | ||
4315 | last_seqno = cur_seqno; | ||
4316 | } | ||
4317 | dev_priv->mm.waiting_gem_seqno = 0; | ||
4318 | |||
4319 | i915_gem_retire_requests(dev); | ||
4320 | |||
4321 | spin_lock(&dev_priv->mm.active_list_lock); | ||
4322 | if (!atomic_read(&dev_priv->mm.wedged)) { | ||
4323 | /* Active and flushing should now be empty as we've | ||
4324 | * waited for a sequence higher than any pending execbuffer | ||
4325 | */ | ||
4326 | WARN_ON(!list_empty(&dev_priv->mm.active_list)); | ||
4327 | WARN_ON(!list_empty(&dev_priv->mm.flushing_list)); | ||
4328 | /* Request should now be empty as we've also waited | ||
4329 | * for the last request in the list | ||
4330 | */ | ||
4331 | WARN_ON(!list_empty(&dev_priv->mm.request_list)); | ||
4332 | } | ||
4333 | |||
4334 | /* Empty the active and flushing lists to inactive. If there's | ||
4335 | * anything left at this point, it means that we're wedged and | ||
4336 | * nothing good's going to happen by leaving them there. So strip | ||
4337 | * the GPU domains and just stuff them onto inactive. | ||
4338 | */ | ||
4339 | while (!list_empty(&dev_priv->mm.active_list)) { | ||
4340 | struct drm_gem_object *obj; | ||
4341 | uint32_t old_write_domain; | ||
4342 | |||
4343 | obj = list_first_entry(&dev_priv->mm.active_list, | ||
4344 | struct drm_i915_gem_object, | ||
4345 | list)->obj; | ||
4346 | old_write_domain = obj->write_domain; | ||
4347 | obj->write_domain &= ~I915_GEM_GPU_DOMAINS; | ||
4348 | i915_gem_object_move_to_inactive(obj); | ||
4349 | 4467 | ||
4350 | trace_i915_gem_object_change_domain(obj, | 4468 | return i915_wait_request(dev, seqno); |
4351 | obj->read_domains, | 4469 | } |
4352 | old_write_domain); | ||
4353 | } | ||
4354 | spin_unlock(&dev_priv->mm.active_list_lock); | ||
4355 | 4470 | ||
4356 | while (!list_empty(&dev_priv->mm.flushing_list)) { | 4471 | int |
4357 | struct drm_gem_object *obj; | 4472 | i915_gem_idle(struct drm_device *dev) |
4358 | uint32_t old_write_domain; | 4473 | { |
4474 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
4475 | int ret; | ||
4359 | 4476 | ||
4360 | obj = list_first_entry(&dev_priv->mm.flushing_list, | 4477 | mutex_lock(&dev->struct_mutex); |
4361 | struct drm_i915_gem_object, | ||
4362 | list)->obj; | ||
4363 | old_write_domain = obj->write_domain; | ||
4364 | obj->write_domain &= ~I915_GEM_GPU_DOMAINS; | ||
4365 | i915_gem_object_move_to_inactive(obj); | ||
4366 | 4478 | ||
4367 | trace_i915_gem_object_change_domain(obj, | 4479 | if (dev_priv->mm.suspended || dev_priv->ring.ring_obj == NULL) { |
4368 | obj->read_domains, | 4480 | mutex_unlock(&dev->struct_mutex); |
4369 | old_write_domain); | 4481 | return 0; |
4370 | } | 4482 | } |
4371 | 4483 | ||
4372 | 4484 | ret = i915_gpu_idle(dev); | |
4373 | /* Move all inactive buffers out of the GTT. */ | ||
4374 | ret = i915_gem_evict_from_inactive_list(dev); | ||
4375 | WARN_ON(!list_empty(&dev_priv->mm.inactive_list)); | ||
4376 | if (ret) { | 4485 | if (ret) { |
4377 | mutex_unlock(&dev->struct_mutex); | 4486 | mutex_unlock(&dev->struct_mutex); |
4378 | return ret; | 4487 | return ret; |
4379 | } | 4488 | } |
4380 | 4489 | ||
4490 | /* Under UMS, be paranoid and evict. */ | ||
4491 | if (!drm_core_check_feature(dev, DRIVER_MODESET)) { | ||
4492 | ret = i915_gem_evict_from_inactive_list(dev); | ||
4493 | if (ret) { | ||
4494 | mutex_unlock(&dev->struct_mutex); | ||
4495 | return ret; | ||
4496 | } | ||
4497 | } | ||
4498 | |||
4499 | /* Hack! Don't let anybody do execbuf while we don't control the chip. | ||
4500 | * We need to replace this with a semaphore, or something. | ||
4501 | * And not confound mm.suspended! | ||
4502 | */ | ||
4503 | dev_priv->mm.suspended = 1; | ||
4504 | del_timer(&dev_priv->hangcheck_timer); | ||
4505 | |||
4506 | i915_kernel_lost_context(dev); | ||
4381 | i915_gem_cleanup_ringbuffer(dev); | 4507 | i915_gem_cleanup_ringbuffer(dev); |
4508 | |||
4382 | mutex_unlock(&dev->struct_mutex); | 4509 | mutex_unlock(&dev->struct_mutex); |
4383 | 4510 | ||
4511 | /* Cancel the retire work handler, which should be idle now. */ | ||
4512 | cancel_delayed_work_sync(&dev_priv->mm.retire_work); | ||
4513 | |||
4384 | return 0; | 4514 | return 0; |
4385 | } | 4515 | } |
4386 | 4516 | ||
@@ -4654,6 +4784,7 @@ i915_gem_load(struct drm_device *dev) | |||
4654 | spin_lock_init(&dev_priv->mm.active_list_lock); | 4784 | spin_lock_init(&dev_priv->mm.active_list_lock); |
4655 | INIT_LIST_HEAD(&dev_priv->mm.active_list); | 4785 | INIT_LIST_HEAD(&dev_priv->mm.active_list); |
4656 | INIT_LIST_HEAD(&dev_priv->mm.flushing_list); | 4786 | INIT_LIST_HEAD(&dev_priv->mm.flushing_list); |
4787 | INIT_LIST_HEAD(&dev_priv->mm.gpu_write_list); | ||
4657 | INIT_LIST_HEAD(&dev_priv->mm.inactive_list); | 4788 | INIT_LIST_HEAD(&dev_priv->mm.inactive_list); |
4658 | INIT_LIST_HEAD(&dev_priv->mm.request_list); | 4789 | INIT_LIST_HEAD(&dev_priv->mm.request_list); |
4659 | INIT_LIST_HEAD(&dev_priv->mm.fence_list); | 4790 | INIT_LIST_HEAD(&dev_priv->mm.fence_list); |
@@ -4666,7 +4797,8 @@ i915_gem_load(struct drm_device *dev) | |||
4666 | spin_unlock(&shrink_list_lock); | 4797 | spin_unlock(&shrink_list_lock); |
4667 | 4798 | ||
4668 | /* Old X drivers will take 0-2 for front, back, depth buffers */ | 4799 | /* Old X drivers will take 0-2 for front, back, depth buffers */ |
4669 | dev_priv->fence_reg_start = 3; | 4800 | if (!drm_core_check_feature(dev, DRIVER_MODESET)) |
4801 | dev_priv->fence_reg_start = 3; | ||
4670 | 4802 | ||
4671 | if (IS_I965G(dev) || IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) | 4803 | if (IS_I965G(dev) || IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) |
4672 | dev_priv->num_fence_regs = 16; | 4804 | dev_priv->num_fence_regs = 16; |
@@ -4766,7 +4898,7 @@ void i915_gem_detach_phys_object(struct drm_device *dev, | |||
4766 | if (!obj_priv->phys_obj) | 4898 | if (!obj_priv->phys_obj) |
4767 | return; | 4899 | return; |
4768 | 4900 | ||
4769 | ret = i915_gem_object_get_pages(obj); | 4901 | ret = i915_gem_object_get_pages(obj, 0); |
4770 | if (ret) | 4902 | if (ret) |
4771 | goto out; | 4903 | goto out; |
4772 | 4904 | ||
@@ -4824,7 +4956,7 @@ i915_gem_attach_phys_object(struct drm_device *dev, | |||
4824 | obj_priv->phys_obj = dev_priv->mm.phys_objs[id - 1]; | 4956 | obj_priv->phys_obj = dev_priv->mm.phys_objs[id - 1]; |
4825 | obj_priv->phys_obj->cur_obj = obj; | 4957 | obj_priv->phys_obj->cur_obj = obj; |
4826 | 4958 | ||
4827 | ret = i915_gem_object_get_pages(obj); | 4959 | ret = i915_gem_object_get_pages(obj, 0); |
4828 | if (ret) { | 4960 | if (ret) { |
4829 | DRM_ERROR("failed to get page list\n"); | 4961 | DRM_ERROR("failed to get page list\n"); |
4830 | goto out; | 4962 | goto out; |
diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c index 30d6af6c09bb..20653776965a 100644 --- a/drivers/gpu/drm/i915/i915_gem_tiling.c +++ b/drivers/gpu/drm/i915/i915_gem_tiling.c | |||
@@ -25,8 +25,6 @@ | |||
25 | * | 25 | * |
26 | */ | 26 | */ |
27 | 27 | ||
28 | #include <linux/acpi.h> | ||
29 | #include <linux/pnp.h> | ||
30 | #include "linux/string.h" | 28 | #include "linux/string.h" |
31 | #include "linux/bitops.h" | 29 | #include "linux/bitops.h" |
32 | #include "drmP.h" | 30 | #include "drmP.h" |
@@ -83,120 +81,6 @@ | |||
83 | * to match what the GPU expects. | 81 | * to match what the GPU expects. |
84 | */ | 82 | */ |
85 | 83 | ||
86 | #define MCHBAR_I915 0x44 | ||
87 | #define MCHBAR_I965 0x48 | ||
88 | #define MCHBAR_SIZE (4*4096) | ||
89 | |||
90 | #define DEVEN_REG 0x54 | ||
91 | #define DEVEN_MCHBAR_EN (1 << 28) | ||
92 | |||
93 | /* Allocate space for the MCH regs if needed, return nonzero on error */ | ||
94 | static int | ||
95 | intel_alloc_mchbar_resource(struct drm_device *dev) | ||
96 | { | ||
97 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
98 | int reg = IS_I965G(dev) ? MCHBAR_I965 : MCHBAR_I915; | ||
99 | u32 temp_lo, temp_hi = 0; | ||
100 | u64 mchbar_addr; | ||
101 | int ret = 0; | ||
102 | |||
103 | if (IS_I965G(dev)) | ||
104 | pci_read_config_dword(dev_priv->bridge_dev, reg + 4, &temp_hi); | ||
105 | pci_read_config_dword(dev_priv->bridge_dev, reg, &temp_lo); | ||
106 | mchbar_addr = ((u64)temp_hi << 32) | temp_lo; | ||
107 | |||
108 | /* If ACPI doesn't have it, assume we need to allocate it ourselves */ | ||
109 | #ifdef CONFIG_PNP | ||
110 | if (mchbar_addr && | ||
111 | pnp_range_reserved(mchbar_addr, mchbar_addr + MCHBAR_SIZE)) { | ||
112 | ret = 0; | ||
113 | goto out; | ||
114 | } | ||
115 | #endif | ||
116 | |||
117 | /* Get some space for it */ | ||
118 | ret = pci_bus_alloc_resource(dev_priv->bridge_dev->bus, &dev_priv->mch_res, | ||
119 | MCHBAR_SIZE, MCHBAR_SIZE, | ||
120 | PCIBIOS_MIN_MEM, | ||
121 | 0, pcibios_align_resource, | ||
122 | dev_priv->bridge_dev); | ||
123 | if (ret) { | ||
124 | DRM_DEBUG_DRIVER("failed bus alloc: %d\n", ret); | ||
125 | dev_priv->mch_res.start = 0; | ||
126 | goto out; | ||
127 | } | ||
128 | |||
129 | if (IS_I965G(dev)) | ||
130 | pci_write_config_dword(dev_priv->bridge_dev, reg + 4, | ||
131 | upper_32_bits(dev_priv->mch_res.start)); | ||
132 | |||
133 | pci_write_config_dword(dev_priv->bridge_dev, reg, | ||
134 | lower_32_bits(dev_priv->mch_res.start)); | ||
135 | out: | ||
136 | return ret; | ||
137 | } | ||
138 | |||
139 | /* Setup MCHBAR if possible, return true if we should disable it again */ | ||
140 | static bool | ||
141 | intel_setup_mchbar(struct drm_device *dev) | ||
142 | { | ||
143 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
144 | int mchbar_reg = IS_I965G(dev) ? MCHBAR_I965 : MCHBAR_I915; | ||
145 | u32 temp; | ||
146 | bool need_disable = false, enabled; | ||
147 | |||
148 | if (IS_I915G(dev) || IS_I915GM(dev)) { | ||
149 | pci_read_config_dword(dev_priv->bridge_dev, DEVEN_REG, &temp); | ||
150 | enabled = !!(temp & DEVEN_MCHBAR_EN); | ||
151 | } else { | ||
152 | pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp); | ||
153 | enabled = temp & 1; | ||
154 | } | ||
155 | |||
156 | /* If it's already enabled, don't have to do anything */ | ||
157 | if (enabled) | ||
158 | goto out; | ||
159 | |||
160 | if (intel_alloc_mchbar_resource(dev)) | ||
161 | goto out; | ||
162 | |||
163 | need_disable = true; | ||
164 | |||
165 | /* Space is allocated or reserved, so enable it. */ | ||
166 | if (IS_I915G(dev) || IS_I915GM(dev)) { | ||
167 | pci_write_config_dword(dev_priv->bridge_dev, DEVEN_REG, | ||
168 | temp | DEVEN_MCHBAR_EN); | ||
169 | } else { | ||
170 | pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp); | ||
171 | pci_write_config_dword(dev_priv->bridge_dev, mchbar_reg, temp | 1); | ||
172 | } | ||
173 | out: | ||
174 | return need_disable; | ||
175 | } | ||
176 | |||
177 | static void | ||
178 | intel_teardown_mchbar(struct drm_device *dev, bool disable) | ||
179 | { | ||
180 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
181 | int mchbar_reg = IS_I965G(dev) ? MCHBAR_I965 : MCHBAR_I915; | ||
182 | u32 temp; | ||
183 | |||
184 | if (disable) { | ||
185 | if (IS_I915G(dev) || IS_I915GM(dev)) { | ||
186 | pci_read_config_dword(dev_priv->bridge_dev, DEVEN_REG, &temp); | ||
187 | temp &= ~DEVEN_MCHBAR_EN; | ||
188 | pci_write_config_dword(dev_priv->bridge_dev, DEVEN_REG, temp); | ||
189 | } else { | ||
190 | pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp); | ||
191 | temp &= ~1; | ||
192 | pci_write_config_dword(dev_priv->bridge_dev, mchbar_reg, temp); | ||
193 | } | ||
194 | } | ||
195 | |||
196 | if (dev_priv->mch_res.start) | ||
197 | release_resource(&dev_priv->mch_res); | ||
198 | } | ||
199 | |||
200 | /** | 84 | /** |
201 | * Detects bit 6 swizzling of address lookup between IGD access and CPU | 85 | * Detects bit 6 swizzling of address lookup between IGD access and CPU |
202 | * access through main memory. | 86 | * access through main memory. |
@@ -207,7 +91,6 @@ i915_gem_detect_bit_6_swizzle(struct drm_device *dev) | |||
207 | drm_i915_private_t *dev_priv = dev->dev_private; | 91 | drm_i915_private_t *dev_priv = dev->dev_private; |
208 | uint32_t swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN; | 92 | uint32_t swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN; |
209 | uint32_t swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN; | 93 | uint32_t swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN; |
210 | bool need_disable; | ||
211 | 94 | ||
212 | if (IS_IRONLAKE(dev)) { | 95 | if (IS_IRONLAKE(dev)) { |
213 | /* On Ironlake whatever DRAM config, GPU always do | 96 | /* On Ironlake whatever DRAM config, GPU always do |
@@ -224,9 +107,6 @@ i915_gem_detect_bit_6_swizzle(struct drm_device *dev) | |||
224 | } else if (IS_MOBILE(dev)) { | 107 | } else if (IS_MOBILE(dev)) { |
225 | uint32_t dcc; | 108 | uint32_t dcc; |
226 | 109 | ||
227 | /* Try to make sure MCHBAR is enabled before poking at it */ | ||
228 | need_disable = intel_setup_mchbar(dev); | ||
229 | |||
230 | /* On mobile 9xx chipsets, channel interleave by the CPU is | 110 | /* On mobile 9xx chipsets, channel interleave by the CPU is |
231 | * determined by DCC. For single-channel, neither the CPU | 111 | * determined by DCC. For single-channel, neither the CPU |
232 | * nor the GPU do swizzling. For dual channel interleaved, | 112 | * nor the GPU do swizzling. For dual channel interleaved, |
@@ -266,8 +146,6 @@ i915_gem_detect_bit_6_swizzle(struct drm_device *dev) | |||
266 | swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN; | 146 | swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN; |
267 | swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN; | 147 | swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN; |
268 | } | 148 | } |
269 | |||
270 | intel_teardown_mchbar(dev, need_disable); | ||
271 | } else { | 149 | } else { |
272 | /* The 965, G33, and newer, have a very flexible memory | 150 | /* The 965, G33, and newer, have a very flexible memory |
273 | * configuration. It will enable dual-channel mode | 151 | * configuration. It will enable dual-channel mode |
@@ -302,37 +180,8 @@ i915_gem_detect_bit_6_swizzle(struct drm_device *dev) | |||
302 | dev_priv->mm.bit_6_swizzle_y = swizzle_y; | 180 | dev_priv->mm.bit_6_swizzle_y = swizzle_y; |
303 | } | 181 | } |
304 | 182 | ||
305 | |||
306 | /** | ||
307 | * Returns the size of the fence for a tiled object of the given size. | ||
308 | */ | ||
309 | static int | ||
310 | i915_get_fence_size(struct drm_device *dev, int size) | ||
311 | { | ||
312 | int i; | ||
313 | int start; | ||
314 | |||
315 | if (IS_I965G(dev)) { | ||
316 | /* The 965 can have fences at any page boundary. */ | ||
317 | return ALIGN(size, 4096); | ||
318 | } else { | ||
319 | /* Align the size to a power of two greater than the smallest | ||
320 | * fence size. | ||
321 | */ | ||
322 | if (IS_I9XX(dev)) | ||
323 | start = 1024 * 1024; | ||
324 | else | ||
325 | start = 512 * 1024; | ||
326 | |||
327 | for (i = start; i < size; i <<= 1) | ||
328 | ; | ||
329 | |||
330 | return i; | ||
331 | } | ||
332 | } | ||
333 | |||
334 | /* Check pitch constriants for all chips & tiling formats */ | 183 | /* Check pitch constriants for all chips & tiling formats */ |
335 | static bool | 184 | bool |
336 | i915_tiling_ok(struct drm_device *dev, int stride, int size, int tiling_mode) | 185 | i915_tiling_ok(struct drm_device *dev, int stride, int size, int tiling_mode) |
337 | { | 186 | { |
338 | int tile_width; | 187 | int tile_width; |
@@ -384,16 +233,10 @@ i915_tiling_ok(struct drm_device *dev, int stride, int size, int tiling_mode) | |||
384 | if (stride & (stride - 1)) | 233 | if (stride & (stride - 1)) |
385 | return false; | 234 | return false; |
386 | 235 | ||
387 | /* We don't 0handle the aperture area covered by the fence being bigger | ||
388 | * than the object size. | ||
389 | */ | ||
390 | if (i915_get_fence_size(dev, size) != size) | ||
391 | return false; | ||
392 | |||
393 | return true; | 236 | return true; |
394 | } | 237 | } |
395 | 238 | ||
396 | static bool | 239 | bool |
397 | i915_gem_object_fence_offset_ok(struct drm_gem_object *obj, int tiling_mode) | 240 | i915_gem_object_fence_offset_ok(struct drm_gem_object *obj, int tiling_mode) |
398 | { | 241 | { |
399 | struct drm_device *dev = obj->dev; | 242 | struct drm_device *dev = obj->dev; |
@@ -440,9 +283,7 @@ i915_gem_set_tiling(struct drm_device *dev, void *data, | |||
440 | obj_priv = obj->driver_private; | 283 | obj_priv = obj->driver_private; |
441 | 284 | ||
442 | if (!i915_tiling_ok(dev, args->stride, obj->size, args->tiling_mode)) { | 285 | if (!i915_tiling_ok(dev, args->stride, obj->size, args->tiling_mode)) { |
443 | mutex_lock(&dev->struct_mutex); | 286 | drm_gem_object_unreference_unlocked(obj); |
444 | drm_gem_object_unreference(obj); | ||
445 | mutex_unlock(&dev->struct_mutex); | ||
446 | return -EINVAL; | 287 | return -EINVAL; |
447 | } | 288 | } |
448 | 289 | ||
@@ -495,12 +336,6 @@ i915_gem_set_tiling(struct drm_device *dev, void *data, | |||
495 | goto err; | 336 | goto err; |
496 | } | 337 | } |
497 | 338 | ||
498 | /* If we've changed tiling, GTT-mappings of the object | ||
499 | * need to re-fault to ensure that the correct fence register | ||
500 | * setup is in place. | ||
501 | */ | ||
502 | i915_gem_release_mmap(obj); | ||
503 | |||
504 | obj_priv->tiling_mode = args->tiling_mode; | 339 | obj_priv->tiling_mode = args->tiling_mode; |
505 | obj_priv->stride = args->stride; | 340 | obj_priv->stride = args->stride; |
506 | } | 341 | } |
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 85f4c5de97e2..ba1d8314c1ce 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c | |||
@@ -269,12 +269,62 @@ static void i915_hotplug_work_func(struct work_struct *work) | |||
269 | drm_sysfs_hotplug_event(dev); | 269 | drm_sysfs_hotplug_event(dev); |
270 | } | 270 | } |
271 | 271 | ||
272 | static void i915_handle_rps_change(struct drm_device *dev) | ||
273 | { | ||
274 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
275 | u32 busy_up, busy_down, max_avg, min_avg; | ||
276 | u16 rgvswctl; | ||
277 | u8 new_delay = dev_priv->cur_delay; | ||
278 | |||
279 | I915_WRITE(MEMINTRSTS, I915_READ(MEMINTRSTS) & ~MEMINT_EVAL_CHG); | ||
280 | busy_up = I915_READ(RCPREVBSYTUPAVG); | ||
281 | busy_down = I915_READ(RCPREVBSYTDNAVG); | ||
282 | max_avg = I915_READ(RCBMAXAVG); | ||
283 | min_avg = I915_READ(RCBMINAVG); | ||
284 | |||
285 | /* Handle RCS change request from hw */ | ||
286 | if (busy_up > max_avg) { | ||
287 | if (dev_priv->cur_delay != dev_priv->max_delay) | ||
288 | new_delay = dev_priv->cur_delay - 1; | ||
289 | if (new_delay < dev_priv->max_delay) | ||
290 | new_delay = dev_priv->max_delay; | ||
291 | } else if (busy_down < min_avg) { | ||
292 | if (dev_priv->cur_delay != dev_priv->min_delay) | ||
293 | new_delay = dev_priv->cur_delay + 1; | ||
294 | if (new_delay > dev_priv->min_delay) | ||
295 | new_delay = dev_priv->min_delay; | ||
296 | } | ||
297 | |||
298 | DRM_DEBUG("rps change requested: %d -> %d\n", | ||
299 | dev_priv->cur_delay, new_delay); | ||
300 | |||
301 | rgvswctl = I915_READ(MEMSWCTL); | ||
302 | if (rgvswctl & MEMCTL_CMD_STS) { | ||
303 | DRM_ERROR("gpu busy, RCS change rejected\n"); | ||
304 | return; /* still busy with another command */ | ||
305 | } | ||
306 | |||
307 | /* Program the new state */ | ||
308 | rgvswctl = (MEMCTL_CMD_CHFREQ << MEMCTL_CMD_SHIFT) | | ||
309 | (new_delay << MEMCTL_FREQ_SHIFT) | MEMCTL_SFCAVM; | ||
310 | I915_WRITE(MEMSWCTL, rgvswctl); | ||
311 | POSTING_READ(MEMSWCTL); | ||
312 | |||
313 | rgvswctl |= MEMCTL_CMD_STS; | ||
314 | I915_WRITE(MEMSWCTL, rgvswctl); | ||
315 | |||
316 | dev_priv->cur_delay = new_delay; | ||
317 | |||
318 | DRM_DEBUG("rps changed\n"); | ||
319 | |||
320 | return; | ||
321 | } | ||
322 | |||
272 | irqreturn_t ironlake_irq_handler(struct drm_device *dev) | 323 | irqreturn_t ironlake_irq_handler(struct drm_device *dev) |
273 | { | 324 | { |
274 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | 325 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; |
275 | int ret = IRQ_NONE; | 326 | int ret = IRQ_NONE; |
276 | u32 de_iir, gt_iir, de_ier, pch_iir; | 327 | u32 de_iir, gt_iir, de_ier, pch_iir; |
277 | u32 new_de_iir, new_gt_iir, new_pch_iir; | ||
278 | struct drm_i915_master_private *master_priv; | 328 | struct drm_i915_master_private *master_priv; |
279 | 329 | ||
280 | /* disable master interrupt before clearing iir */ | 330 | /* disable master interrupt before clearing iir */ |
@@ -286,49 +336,63 @@ irqreturn_t ironlake_irq_handler(struct drm_device *dev) | |||
286 | gt_iir = I915_READ(GTIIR); | 336 | gt_iir = I915_READ(GTIIR); |
287 | pch_iir = I915_READ(SDEIIR); | 337 | pch_iir = I915_READ(SDEIIR); |
288 | 338 | ||
289 | for (;;) { | 339 | if (de_iir == 0 && gt_iir == 0 && pch_iir == 0) |
290 | if (de_iir == 0 && gt_iir == 0 && pch_iir == 0) | 340 | goto done; |
291 | break; | ||
292 | 341 | ||
293 | ret = IRQ_HANDLED; | 342 | ret = IRQ_HANDLED; |
294 | 343 | ||
295 | /* should clear PCH hotplug event before clear CPU irq */ | 344 | if (dev->primary->master) { |
296 | I915_WRITE(SDEIIR, pch_iir); | 345 | master_priv = dev->primary->master->driver_priv; |
297 | new_pch_iir = I915_READ(SDEIIR); | 346 | if (master_priv->sarea_priv) |
347 | master_priv->sarea_priv->last_dispatch = | ||
348 | READ_BREADCRUMB(dev_priv); | ||
349 | } | ||
298 | 350 | ||
299 | I915_WRITE(DEIIR, de_iir); | 351 | if (gt_iir & GT_USER_INTERRUPT) { |
300 | new_de_iir = I915_READ(DEIIR); | 352 | u32 seqno = i915_get_gem_seqno(dev); |
301 | I915_WRITE(GTIIR, gt_iir); | 353 | dev_priv->mm.irq_gem_seqno = seqno; |
302 | new_gt_iir = I915_READ(GTIIR); | 354 | trace_i915_gem_request_complete(dev, seqno); |
355 | DRM_WAKEUP(&dev_priv->irq_queue); | ||
356 | dev_priv->hangcheck_count = 0; | ||
357 | mod_timer(&dev_priv->hangcheck_timer, jiffies + DRM_I915_HANGCHECK_PERIOD); | ||
358 | } | ||
303 | 359 | ||
304 | if (dev->primary->master) { | 360 | if (de_iir & DE_GSE) |
305 | master_priv = dev->primary->master->driver_priv; | 361 | ironlake_opregion_gse_intr(dev); |
306 | if (master_priv->sarea_priv) | ||
307 | master_priv->sarea_priv->last_dispatch = | ||
308 | READ_BREADCRUMB(dev_priv); | ||
309 | } | ||
310 | 362 | ||
311 | if (gt_iir & GT_USER_INTERRUPT) { | 363 | if (de_iir & DE_PLANEA_FLIP_DONE) { |
312 | u32 seqno = i915_get_gem_seqno(dev); | 364 | intel_prepare_page_flip(dev, 0); |
313 | dev_priv->mm.irq_gem_seqno = seqno; | 365 | intel_finish_page_flip(dev, 0); |
314 | trace_i915_gem_request_complete(dev, seqno); | 366 | } |
315 | DRM_WAKEUP(&dev_priv->irq_queue); | ||
316 | } | ||
317 | 367 | ||
318 | if (de_iir & DE_GSE) | 368 | if (de_iir & DE_PLANEB_FLIP_DONE) { |
319 | ironlake_opregion_gse_intr(dev); | 369 | intel_prepare_page_flip(dev, 1); |
370 | intel_finish_page_flip(dev, 1); | ||
371 | } | ||
320 | 372 | ||
321 | /* check event from PCH */ | 373 | if (de_iir & DE_PIPEA_VBLANK) |
322 | if ((de_iir & DE_PCH_EVENT) && | 374 | drm_handle_vblank(dev, 0); |
323 | (pch_iir & SDE_HOTPLUG_MASK)) { | 375 | |
324 | queue_work(dev_priv->wq, &dev_priv->hotplug_work); | 376 | if (de_iir & DE_PIPEB_VBLANK) |
325 | } | 377 | drm_handle_vblank(dev, 1); |
326 | 378 | ||
327 | de_iir = new_de_iir; | 379 | /* check event from PCH */ |
328 | gt_iir = new_gt_iir; | 380 | if ((de_iir & DE_PCH_EVENT) && |
329 | pch_iir = new_pch_iir; | 381 | (pch_iir & SDE_HOTPLUG_MASK)) { |
382 | queue_work(dev_priv->wq, &dev_priv->hotplug_work); | ||
330 | } | 383 | } |
331 | 384 | ||
385 | if (de_iir & DE_PCU_EVENT) { | ||
386 | I915_WRITE(MEMINTRSTS, I915_READ(MEMINTRSTS)); | ||
387 | i915_handle_rps_change(dev); | ||
388 | } | ||
389 | |||
390 | /* should clear PCH hotplug event before clear CPU irq */ | ||
391 | I915_WRITE(SDEIIR, pch_iir); | ||
392 | I915_WRITE(GTIIR, gt_iir); | ||
393 | I915_WRITE(DEIIR, de_iir); | ||
394 | |||
395 | done: | ||
332 | I915_WRITE(DEIER, de_ier); | 396 | I915_WRITE(DEIER, de_ier); |
333 | (void)I915_READ(DEIER); | 397 | (void)I915_READ(DEIER); |
334 | 398 | ||
@@ -368,6 +432,121 @@ static void i915_error_work_func(struct work_struct *work) | |||
368 | } | 432 | } |
369 | } | 433 | } |
370 | 434 | ||
435 | static struct drm_i915_error_object * | ||
436 | i915_error_object_create(struct drm_device *dev, | ||
437 | struct drm_gem_object *src) | ||
438 | { | ||
439 | struct drm_i915_error_object *dst; | ||
440 | struct drm_i915_gem_object *src_priv; | ||
441 | int page, page_count; | ||
442 | |||
443 | if (src == NULL) | ||
444 | return NULL; | ||
445 | |||
446 | src_priv = src->driver_private; | ||
447 | if (src_priv->pages == NULL) | ||
448 | return NULL; | ||
449 | |||
450 | page_count = src->size / PAGE_SIZE; | ||
451 | |||
452 | dst = kmalloc(sizeof(*dst) + page_count * sizeof (u32 *), GFP_ATOMIC); | ||
453 | if (dst == NULL) | ||
454 | return NULL; | ||
455 | |||
456 | for (page = 0; page < page_count; page++) { | ||
457 | void *s, *d = kmalloc(PAGE_SIZE, GFP_ATOMIC); | ||
458 | if (d == NULL) | ||
459 | goto unwind; | ||
460 | s = kmap_atomic(src_priv->pages[page], KM_USER0); | ||
461 | memcpy(d, s, PAGE_SIZE); | ||
462 | kunmap_atomic(s, KM_USER0); | ||
463 | dst->pages[page] = d; | ||
464 | } | ||
465 | dst->page_count = page_count; | ||
466 | dst->gtt_offset = src_priv->gtt_offset; | ||
467 | |||
468 | return dst; | ||
469 | |||
470 | unwind: | ||
471 | while (page--) | ||
472 | kfree(dst->pages[page]); | ||
473 | kfree(dst); | ||
474 | return NULL; | ||
475 | } | ||
476 | |||
477 | static void | ||
478 | i915_error_object_free(struct drm_i915_error_object *obj) | ||
479 | { | ||
480 | int page; | ||
481 | |||
482 | if (obj == NULL) | ||
483 | return; | ||
484 | |||
485 | for (page = 0; page < obj->page_count; page++) | ||
486 | kfree(obj->pages[page]); | ||
487 | |||
488 | kfree(obj); | ||
489 | } | ||
490 | |||
491 | static void | ||
492 | i915_error_state_free(struct drm_device *dev, | ||
493 | struct drm_i915_error_state *error) | ||
494 | { | ||
495 | i915_error_object_free(error->batchbuffer[0]); | ||
496 | i915_error_object_free(error->batchbuffer[1]); | ||
497 | i915_error_object_free(error->ringbuffer); | ||
498 | kfree(error->active_bo); | ||
499 | kfree(error); | ||
500 | } | ||
501 | |||
502 | static u32 | ||
503 | i915_get_bbaddr(struct drm_device *dev, u32 *ring) | ||
504 | { | ||
505 | u32 cmd; | ||
506 | |||
507 | if (IS_I830(dev) || IS_845G(dev)) | ||
508 | cmd = MI_BATCH_BUFFER; | ||
509 | else if (IS_I965G(dev)) | ||
510 | cmd = (MI_BATCH_BUFFER_START | (2 << 6) | | ||
511 | MI_BATCH_NON_SECURE_I965); | ||
512 | else | ||
513 | cmd = (MI_BATCH_BUFFER_START | (2 << 6)); | ||
514 | |||
515 | return ring[0] == cmd ? ring[1] : 0; | ||
516 | } | ||
517 | |||
518 | static u32 | ||
519 | i915_ringbuffer_last_batch(struct drm_device *dev) | ||
520 | { | ||
521 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
522 | u32 head, bbaddr; | ||
523 | u32 *ring; | ||
524 | |||
525 | /* Locate the current position in the ringbuffer and walk back | ||
526 | * to find the most recently dispatched batch buffer. | ||
527 | */ | ||
528 | bbaddr = 0; | ||
529 | head = I915_READ(PRB0_HEAD) & HEAD_ADDR; | ||
530 | ring = (u32 *)(dev_priv->ring.virtual_start + head); | ||
531 | |||
532 | while (--ring >= (u32 *)dev_priv->ring.virtual_start) { | ||
533 | bbaddr = i915_get_bbaddr(dev, ring); | ||
534 | if (bbaddr) | ||
535 | break; | ||
536 | } | ||
537 | |||
538 | if (bbaddr == 0) { | ||
539 | ring = (u32 *)(dev_priv->ring.virtual_start + dev_priv->ring.Size); | ||
540 | while (--ring >= (u32 *)dev_priv->ring.virtual_start) { | ||
541 | bbaddr = i915_get_bbaddr(dev, ring); | ||
542 | if (bbaddr) | ||
543 | break; | ||
544 | } | ||
545 | } | ||
546 | |||
547 | return bbaddr; | ||
548 | } | ||
549 | |||
371 | /** | 550 | /** |
372 | * i915_capture_error_state - capture an error record for later analysis | 551 | * i915_capture_error_state - capture an error record for later analysis |
373 | * @dev: drm device | 552 | * @dev: drm device |
@@ -380,19 +559,26 @@ static void i915_error_work_func(struct work_struct *work) | |||
380 | static void i915_capture_error_state(struct drm_device *dev) | 559 | static void i915_capture_error_state(struct drm_device *dev) |
381 | { | 560 | { |
382 | struct drm_i915_private *dev_priv = dev->dev_private; | 561 | struct drm_i915_private *dev_priv = dev->dev_private; |
562 | struct drm_i915_gem_object *obj_priv; | ||
383 | struct drm_i915_error_state *error; | 563 | struct drm_i915_error_state *error; |
564 | struct drm_gem_object *batchbuffer[2]; | ||
384 | unsigned long flags; | 565 | unsigned long flags; |
566 | u32 bbaddr; | ||
567 | int count; | ||
385 | 568 | ||
386 | spin_lock_irqsave(&dev_priv->error_lock, flags); | 569 | spin_lock_irqsave(&dev_priv->error_lock, flags); |
387 | if (dev_priv->first_error) | 570 | error = dev_priv->first_error; |
388 | goto out; | 571 | spin_unlock_irqrestore(&dev_priv->error_lock, flags); |
572 | if (error) | ||
573 | return; | ||
389 | 574 | ||
390 | error = kmalloc(sizeof(*error), GFP_ATOMIC); | 575 | error = kmalloc(sizeof(*error), GFP_ATOMIC); |
391 | if (!error) { | 576 | if (!error) { |
392 | DRM_DEBUG_DRIVER("out ot memory, not capturing error state\n"); | 577 | DRM_DEBUG_DRIVER("out of memory, not capturing error state\n"); |
393 | goto out; | 578 | return; |
394 | } | 579 | } |
395 | 580 | ||
581 | error->seqno = i915_get_gem_seqno(dev); | ||
396 | error->eir = I915_READ(EIR); | 582 | error->eir = I915_READ(EIR); |
397 | error->pgtbl_er = I915_READ(PGTBL_ER); | 583 | error->pgtbl_er = I915_READ(PGTBL_ER); |
398 | error->pipeastat = I915_READ(PIPEASTAT); | 584 | error->pipeastat = I915_READ(PIPEASTAT); |
@@ -403,6 +589,7 @@ static void i915_capture_error_state(struct drm_device *dev) | |||
403 | error->ipehr = I915_READ(IPEHR); | 589 | error->ipehr = I915_READ(IPEHR); |
404 | error->instdone = I915_READ(INSTDONE); | 590 | error->instdone = I915_READ(INSTDONE); |
405 | error->acthd = I915_READ(ACTHD); | 591 | error->acthd = I915_READ(ACTHD); |
592 | error->bbaddr = 0; | ||
406 | } else { | 593 | } else { |
407 | error->ipeir = I915_READ(IPEIR_I965); | 594 | error->ipeir = I915_READ(IPEIR_I965); |
408 | error->ipehr = I915_READ(IPEHR_I965); | 595 | error->ipehr = I915_READ(IPEHR_I965); |
@@ -410,14 +597,101 @@ static void i915_capture_error_state(struct drm_device *dev) | |||
410 | error->instps = I915_READ(INSTPS); | 597 | error->instps = I915_READ(INSTPS); |
411 | error->instdone1 = I915_READ(INSTDONE1); | 598 | error->instdone1 = I915_READ(INSTDONE1); |
412 | error->acthd = I915_READ(ACTHD_I965); | 599 | error->acthd = I915_READ(ACTHD_I965); |
600 | error->bbaddr = I915_READ64(BB_ADDR); | ||
413 | } | 601 | } |
414 | 602 | ||
415 | do_gettimeofday(&error->time); | 603 | bbaddr = i915_ringbuffer_last_batch(dev); |
604 | |||
605 | /* Grab the current batchbuffer, most likely to have crashed. */ | ||
606 | batchbuffer[0] = NULL; | ||
607 | batchbuffer[1] = NULL; | ||
608 | count = 0; | ||
609 | list_for_each_entry(obj_priv, &dev_priv->mm.active_list, list) { | ||
610 | struct drm_gem_object *obj = obj_priv->obj; | ||
611 | |||
612 | if (batchbuffer[0] == NULL && | ||
613 | bbaddr >= obj_priv->gtt_offset && | ||
614 | bbaddr < obj_priv->gtt_offset + obj->size) | ||
615 | batchbuffer[0] = obj; | ||
616 | |||
617 | if (batchbuffer[1] == NULL && | ||
618 | error->acthd >= obj_priv->gtt_offset && | ||
619 | error->acthd < obj_priv->gtt_offset + obj->size && | ||
620 | batchbuffer[0] != obj) | ||
621 | batchbuffer[1] = obj; | ||
622 | |||
623 | count++; | ||
624 | } | ||
625 | |||
626 | /* We need to copy these to an anonymous buffer as the simplest | ||
627 | * method to avoid being overwritten by userpace. | ||
628 | */ | ||
629 | error->batchbuffer[0] = i915_error_object_create(dev, batchbuffer[0]); | ||
630 | error->batchbuffer[1] = i915_error_object_create(dev, batchbuffer[1]); | ||
631 | |||
632 | /* Record the ringbuffer */ | ||
633 | error->ringbuffer = i915_error_object_create(dev, dev_priv->ring.ring_obj); | ||
634 | |||
635 | /* Record buffers on the active list. */ | ||
636 | error->active_bo = NULL; | ||
637 | error->active_bo_count = 0; | ||
638 | |||
639 | if (count) | ||
640 | error->active_bo = kmalloc(sizeof(*error->active_bo)*count, | ||
641 | GFP_ATOMIC); | ||
642 | |||
643 | if (error->active_bo) { | ||
644 | int i = 0; | ||
645 | list_for_each_entry(obj_priv, &dev_priv->mm.active_list, list) { | ||
646 | struct drm_gem_object *obj = obj_priv->obj; | ||
647 | |||
648 | error->active_bo[i].size = obj->size; | ||
649 | error->active_bo[i].name = obj->name; | ||
650 | error->active_bo[i].seqno = obj_priv->last_rendering_seqno; | ||
651 | error->active_bo[i].gtt_offset = obj_priv->gtt_offset; | ||
652 | error->active_bo[i].read_domains = obj->read_domains; | ||
653 | error->active_bo[i].write_domain = obj->write_domain; | ||
654 | error->active_bo[i].fence_reg = obj_priv->fence_reg; | ||
655 | error->active_bo[i].pinned = 0; | ||
656 | if (obj_priv->pin_count > 0) | ||
657 | error->active_bo[i].pinned = 1; | ||
658 | if (obj_priv->user_pin_count > 0) | ||
659 | error->active_bo[i].pinned = -1; | ||
660 | error->active_bo[i].tiling = obj_priv->tiling_mode; | ||
661 | error->active_bo[i].dirty = obj_priv->dirty; | ||
662 | error->active_bo[i].purgeable = obj_priv->madv != I915_MADV_WILLNEED; | ||
663 | |||
664 | if (++i == count) | ||
665 | break; | ||
666 | } | ||
667 | error->active_bo_count = i; | ||
668 | } | ||
416 | 669 | ||
417 | dev_priv->first_error = error; | 670 | do_gettimeofday(&error->time); |
418 | 671 | ||
419 | out: | 672 | spin_lock_irqsave(&dev_priv->error_lock, flags); |
673 | if (dev_priv->first_error == NULL) { | ||
674 | dev_priv->first_error = error; | ||
675 | error = NULL; | ||
676 | } | ||
420 | spin_unlock_irqrestore(&dev_priv->error_lock, flags); | 677 | spin_unlock_irqrestore(&dev_priv->error_lock, flags); |
678 | |||
679 | if (error) | ||
680 | i915_error_state_free(dev, error); | ||
681 | } | ||
682 | |||
683 | void i915_destroy_error_state(struct drm_device *dev) | ||
684 | { | ||
685 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
686 | struct drm_i915_error_state *error; | ||
687 | |||
688 | spin_lock(&dev_priv->error_lock); | ||
689 | error = dev_priv->first_error; | ||
690 | dev_priv->first_error = NULL; | ||
691 | spin_unlock(&dev_priv->error_lock); | ||
692 | |||
693 | if (error) | ||
694 | i915_error_state_free(dev, error); | ||
421 | } | 695 | } |
422 | 696 | ||
423 | /** | 697 | /** |
@@ -852,11 +1126,11 @@ int i915_enable_vblank(struct drm_device *dev, int pipe) | |||
852 | if (!(pipeconf & PIPEACONF_ENABLE)) | 1126 | if (!(pipeconf & PIPEACONF_ENABLE)) |
853 | return -EINVAL; | 1127 | return -EINVAL; |
854 | 1128 | ||
855 | if (IS_IRONLAKE(dev)) | ||
856 | return 0; | ||
857 | |||
858 | spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags); | 1129 | spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags); |
859 | if (IS_I965G(dev)) | 1130 | if (IS_IRONLAKE(dev)) |
1131 | ironlake_enable_display_irq(dev_priv, (pipe == 0) ? | ||
1132 | DE_PIPEA_VBLANK: DE_PIPEB_VBLANK); | ||
1133 | else if (IS_I965G(dev)) | ||
860 | i915_enable_pipestat(dev_priv, pipe, | 1134 | i915_enable_pipestat(dev_priv, pipe, |
861 | PIPE_START_VBLANK_INTERRUPT_ENABLE); | 1135 | PIPE_START_VBLANK_INTERRUPT_ENABLE); |
862 | else | 1136 | else |
@@ -874,13 +1148,14 @@ void i915_disable_vblank(struct drm_device *dev, int pipe) | |||
874 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | 1148 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; |
875 | unsigned long irqflags; | 1149 | unsigned long irqflags; |
876 | 1150 | ||
877 | if (IS_IRONLAKE(dev)) | ||
878 | return; | ||
879 | |||
880 | spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags); | 1151 | spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags); |
881 | i915_disable_pipestat(dev_priv, pipe, | 1152 | if (IS_IRONLAKE(dev)) |
882 | PIPE_VBLANK_INTERRUPT_ENABLE | | 1153 | ironlake_disable_display_irq(dev_priv, (pipe == 0) ? |
883 | PIPE_START_VBLANK_INTERRUPT_ENABLE); | 1154 | DE_PIPEA_VBLANK: DE_PIPEB_VBLANK); |
1155 | else | ||
1156 | i915_disable_pipestat(dev_priv, pipe, | ||
1157 | PIPE_VBLANK_INTERRUPT_ENABLE | | ||
1158 | PIPE_START_VBLANK_INTERRUPT_ENABLE); | ||
884 | spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags); | 1159 | spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags); |
885 | } | 1160 | } |
886 | 1161 | ||
@@ -1023,13 +1298,14 @@ static int ironlake_irq_postinstall(struct drm_device *dev) | |||
1023 | { | 1298 | { |
1024 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | 1299 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; |
1025 | /* enable kind of interrupts always enabled */ | 1300 | /* enable kind of interrupts always enabled */ |
1026 | u32 display_mask = DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT; | 1301 | u32 display_mask = DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT | |
1302 | DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE; | ||
1027 | u32 render_mask = GT_USER_INTERRUPT; | 1303 | u32 render_mask = GT_USER_INTERRUPT; |
1028 | u32 hotplug_mask = SDE_CRT_HOTPLUG | SDE_PORTB_HOTPLUG | | 1304 | u32 hotplug_mask = SDE_CRT_HOTPLUG | SDE_PORTB_HOTPLUG | |
1029 | SDE_PORTC_HOTPLUG | SDE_PORTD_HOTPLUG; | 1305 | SDE_PORTC_HOTPLUG | SDE_PORTD_HOTPLUG; |
1030 | 1306 | ||
1031 | dev_priv->irq_mask_reg = ~display_mask; | 1307 | dev_priv->irq_mask_reg = ~display_mask; |
1032 | dev_priv->de_irq_enable_reg = display_mask; | 1308 | dev_priv->de_irq_enable_reg = display_mask | DE_PIPEA_VBLANK | DE_PIPEB_VBLANK; |
1033 | 1309 | ||
1034 | /* should always can generate irq */ | 1310 | /* should always can generate irq */ |
1035 | I915_WRITE(DEIIR, I915_READ(DEIIR)); | 1311 | I915_WRITE(DEIIR, I915_READ(DEIIR)); |
@@ -1054,6 +1330,13 @@ static int ironlake_irq_postinstall(struct drm_device *dev) | |||
1054 | I915_WRITE(SDEIER, dev_priv->pch_irq_enable_reg); | 1330 | I915_WRITE(SDEIER, dev_priv->pch_irq_enable_reg); |
1055 | (void) I915_READ(SDEIER); | 1331 | (void) I915_READ(SDEIER); |
1056 | 1332 | ||
1333 | if (IS_IRONLAKE_M(dev)) { | ||
1334 | /* Clear & enable PCU event interrupts */ | ||
1335 | I915_WRITE(DEIIR, DE_PCU_EVENT); | ||
1336 | I915_WRITE(DEIER, I915_READ(DEIER) | DE_PCU_EVENT); | ||
1337 | ironlake_enable_display_irq(dev_priv, DE_PCU_EVENT); | ||
1338 | } | ||
1339 | |||
1057 | return 0; | 1340 | return 0; |
1058 | } | 1341 | } |
1059 | 1342 | ||
@@ -1084,6 +1367,10 @@ void i915_driver_irq_preinstall(struct drm_device * dev) | |||
1084 | (void) I915_READ(IER); | 1367 | (void) I915_READ(IER); |
1085 | } | 1368 | } |
1086 | 1369 | ||
1370 | /* | ||
1371 | * Must be called after intel_modeset_init or hotplug interrupts won't be | ||
1372 | * enabled correctly. | ||
1373 | */ | ||
1087 | int i915_driver_irq_postinstall(struct drm_device *dev) | 1374 | int i915_driver_irq_postinstall(struct drm_device *dev) |
1088 | { | 1375 | { |
1089 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | 1376 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; |
@@ -1106,19 +1393,23 @@ int i915_driver_irq_postinstall(struct drm_device *dev) | |||
1106 | if (I915_HAS_HOTPLUG(dev)) { | 1393 | if (I915_HAS_HOTPLUG(dev)) { |
1107 | u32 hotplug_en = I915_READ(PORT_HOTPLUG_EN); | 1394 | u32 hotplug_en = I915_READ(PORT_HOTPLUG_EN); |
1108 | 1395 | ||
1109 | /* Leave other bits alone */ | 1396 | /* Note HDMI and DP share bits */ |
1110 | hotplug_en |= HOTPLUG_EN_MASK; | 1397 | if (dev_priv->hotplug_supported_mask & HDMIB_HOTPLUG_INT_STATUS) |
1398 | hotplug_en |= HDMIB_HOTPLUG_INT_EN; | ||
1399 | if (dev_priv->hotplug_supported_mask & HDMIC_HOTPLUG_INT_STATUS) | ||
1400 | hotplug_en |= HDMIC_HOTPLUG_INT_EN; | ||
1401 | if (dev_priv->hotplug_supported_mask & HDMID_HOTPLUG_INT_STATUS) | ||
1402 | hotplug_en |= HDMID_HOTPLUG_INT_EN; | ||
1403 | if (dev_priv->hotplug_supported_mask & SDVOC_HOTPLUG_INT_STATUS) | ||
1404 | hotplug_en |= SDVOC_HOTPLUG_INT_EN; | ||
1405 | if (dev_priv->hotplug_supported_mask & SDVOB_HOTPLUG_INT_STATUS) | ||
1406 | hotplug_en |= SDVOB_HOTPLUG_INT_EN; | ||
1407 | if (dev_priv->hotplug_supported_mask & CRT_HOTPLUG_INT_STATUS) | ||
1408 | hotplug_en |= CRT_HOTPLUG_INT_EN; | ||
1409 | /* Ignore TV since it's buggy */ | ||
1410 | |||
1111 | I915_WRITE(PORT_HOTPLUG_EN, hotplug_en); | 1411 | I915_WRITE(PORT_HOTPLUG_EN, hotplug_en); |
1112 | 1412 | ||
1113 | dev_priv->hotplug_supported_mask = CRT_HOTPLUG_INT_STATUS | | ||
1114 | TV_HOTPLUG_INT_STATUS | SDVOC_HOTPLUG_INT_STATUS | | ||
1115 | SDVOB_HOTPLUG_INT_STATUS; | ||
1116 | if (IS_G4X(dev)) { | ||
1117 | dev_priv->hotplug_supported_mask |= | ||
1118 | HDMIB_HOTPLUG_INT_STATUS | | ||
1119 | HDMIC_HOTPLUG_INT_STATUS | | ||
1120 | HDMID_HOTPLUG_INT_STATUS; | ||
1121 | } | ||
1122 | /* Enable in IER... */ | 1413 | /* Enable in IER... */ |
1123 | enable_mask |= I915_DISPLAY_PORT_INTERRUPT; | 1414 | enable_mask |= I915_DISPLAY_PORT_INTERRUPT; |
1124 | /* and unmask in IMR */ | 1415 | /* and unmask in IMR */ |
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 974b3cf70618..eff8d850a758 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h | |||
@@ -61,6 +61,7 @@ | |||
61 | #define GC_CLOCK_100_200 (1 << 0) | 61 | #define GC_CLOCK_100_200 (1 << 0) |
62 | #define GC_CLOCK_100_133 (2 << 0) | 62 | #define GC_CLOCK_100_133 (2 << 0) |
63 | #define GC_CLOCK_166_250 (3 << 0) | 63 | #define GC_CLOCK_166_250 (3 << 0) |
64 | #define GCFGC2 0xda | ||
64 | #define GCFGC 0xf0 /* 915+ only */ | 65 | #define GCFGC 0xf0 /* 915+ only */ |
65 | #define GC_LOW_FREQUENCY_ENABLE (1 << 7) | 66 | #define GC_LOW_FREQUENCY_ENABLE (1 << 7) |
66 | #define GC_DISPLAY_CLOCK_190_200_MHZ (0 << 4) | 67 | #define GC_DISPLAY_CLOCK_190_200_MHZ (0 << 4) |
@@ -282,7 +283,7 @@ | |||
282 | #define I915_PIPE_CONTROL_NOTIFY_INTERRUPT (1<<18) | 283 | #define I915_PIPE_CONTROL_NOTIFY_INTERRUPT (1<<18) |
283 | #define I915_DISPLAY_PORT_INTERRUPT (1<<17) | 284 | #define I915_DISPLAY_PORT_INTERRUPT (1<<17) |
284 | #define I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT (1<<15) | 285 | #define I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT (1<<15) |
285 | #define I915_GMCH_THERMAL_SENSOR_EVENT_INTERRUPT (1<<14) | 286 | #define I915_GMCH_THERMAL_SENSOR_EVENT_INTERRUPT (1<<14) /* p-state */ |
286 | #define I915_HWB_OOM_INTERRUPT (1<<13) | 287 | #define I915_HWB_OOM_INTERRUPT (1<<13) |
287 | #define I915_SYNC_STATUS_INTERRUPT (1<<12) | 288 | #define I915_SYNC_STATUS_INTERRUPT (1<<12) |
288 | #define I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT (1<<11) | 289 | #define I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT (1<<11) |
@@ -306,11 +307,14 @@ | |||
306 | #define I915_ERROR_MEMORY_REFRESH (1<<1) | 307 | #define I915_ERROR_MEMORY_REFRESH (1<<1) |
307 | #define I915_ERROR_INSTRUCTION (1<<0) | 308 | #define I915_ERROR_INSTRUCTION (1<<0) |
308 | #define INSTPM 0x020c0 | 309 | #define INSTPM 0x020c0 |
310 | #define INSTPM_SELF_EN (1<<12) /* 915GM only */ | ||
309 | #define ACTHD 0x020c8 | 311 | #define ACTHD 0x020c8 |
310 | #define FW_BLC 0x020d8 | 312 | #define FW_BLC 0x020d8 |
311 | #define FW_BLC2 0x020dc | 313 | #define FW_BLC2 0x020dc |
312 | #define FW_BLC_SELF 0x020e0 /* 915+ only */ | 314 | #define FW_BLC_SELF 0x020e0 /* 915+ only */ |
313 | #define FW_BLC_SELF_EN (1<<15) | 315 | #define FW_BLC_SELF_EN_MASK (1<<31) |
316 | #define FW_BLC_SELF_FIFO_MASK (1<<16) /* 945 only */ | ||
317 | #define FW_BLC_SELF_EN (1<<15) /* 945 only */ | ||
314 | #define MM_BURST_LENGTH 0x00700000 | 318 | #define MM_BURST_LENGTH 0x00700000 |
315 | #define MM_FIFO_WATERMARK 0x0001F000 | 319 | #define MM_FIFO_WATERMARK 0x0001F000 |
316 | #define LM_BURST_LENGTH 0x00000700 | 320 | #define LM_BURST_LENGTH 0x00000700 |
@@ -324,6 +328,7 @@ | |||
324 | #define CM0_COLOR_EVICT_DISABLE (1<<3) | 328 | #define CM0_COLOR_EVICT_DISABLE (1<<3) |
325 | #define CM0_DEPTH_WRITE_DISABLE (1<<1) | 329 | #define CM0_DEPTH_WRITE_DISABLE (1<<1) |
326 | #define CM0_RC_OP_FLUSH_DISABLE (1<<0) | 330 | #define CM0_RC_OP_FLUSH_DISABLE (1<<0) |
331 | #define BB_ADDR 0x02140 /* 8 bytes */ | ||
327 | #define GFX_FLSH_CNTL 0x02170 /* 915+ only */ | 332 | #define GFX_FLSH_CNTL 0x02170 /* 915+ only */ |
328 | 333 | ||
329 | 334 | ||
@@ -338,6 +343,7 @@ | |||
338 | #define FBC_CTL_PERIODIC (1<<30) | 343 | #define FBC_CTL_PERIODIC (1<<30) |
339 | #define FBC_CTL_INTERVAL_SHIFT (16) | 344 | #define FBC_CTL_INTERVAL_SHIFT (16) |
340 | #define FBC_CTL_UNCOMPRESSIBLE (1<<14) | 345 | #define FBC_CTL_UNCOMPRESSIBLE (1<<14) |
346 | #define FBC_C3_IDLE (1<<13) | ||
341 | #define FBC_CTL_STRIDE_SHIFT (5) | 347 | #define FBC_CTL_STRIDE_SHIFT (5) |
342 | #define FBC_CTL_FENCENO (1<<0) | 348 | #define FBC_CTL_FENCENO (1<<0) |
343 | #define FBC_COMMAND 0x0320c | 349 | #define FBC_COMMAND 0x0320c |
@@ -783,10 +789,144 @@ | |||
783 | #define CLKCFG_MEM_800 (3 << 4) | 789 | #define CLKCFG_MEM_800 (3 << 4) |
784 | #define CLKCFG_MEM_MASK (7 << 4) | 790 | #define CLKCFG_MEM_MASK (7 << 4) |
785 | 791 | ||
786 | /** GM965 GM45 render standby register */ | 792 | #define CRSTANDVID 0x11100 |
787 | #define MCHBAR_RENDER_STANDBY 0x111B8 | 793 | #define PXVFREQ_BASE 0x11110 /* P[0-15]VIDFREQ (0x1114c) (Ironlake) */ |
794 | #define PXVFREQ_PX_MASK 0x7f000000 | ||
795 | #define PXVFREQ_PX_SHIFT 24 | ||
796 | #define VIDFREQ_BASE 0x11110 | ||
797 | #define VIDFREQ1 0x11110 /* VIDFREQ1-4 (0x1111c) (Cantiga) */ | ||
798 | #define VIDFREQ2 0x11114 | ||
799 | #define VIDFREQ3 0x11118 | ||
800 | #define VIDFREQ4 0x1111c | ||
801 | #define VIDFREQ_P0_MASK 0x1f000000 | ||
802 | #define VIDFREQ_P0_SHIFT 24 | ||
803 | #define VIDFREQ_P0_CSCLK_MASK 0x00f00000 | ||
804 | #define VIDFREQ_P0_CSCLK_SHIFT 20 | ||
805 | #define VIDFREQ_P0_CRCLK_MASK 0x000f0000 | ||
806 | #define VIDFREQ_P0_CRCLK_SHIFT 16 | ||
807 | #define VIDFREQ_P1_MASK 0x00001f00 | ||
808 | #define VIDFREQ_P1_SHIFT 8 | ||
809 | #define VIDFREQ_P1_CSCLK_MASK 0x000000f0 | ||
810 | #define VIDFREQ_P1_CSCLK_SHIFT 4 | ||
811 | #define VIDFREQ_P1_CRCLK_MASK 0x0000000f | ||
812 | #define INTTOEXT_BASE_ILK 0x11300 | ||
813 | #define INTTOEXT_BASE 0x11120 /* INTTOEXT1-8 (0x1113c) */ | ||
814 | #define INTTOEXT_MAP3_SHIFT 24 | ||
815 | #define INTTOEXT_MAP3_MASK (0x1f << INTTOEXT_MAP3_SHIFT) | ||
816 | #define INTTOEXT_MAP2_SHIFT 16 | ||
817 | #define INTTOEXT_MAP2_MASK (0x1f << INTTOEXT_MAP2_SHIFT) | ||
818 | #define INTTOEXT_MAP1_SHIFT 8 | ||
819 | #define INTTOEXT_MAP1_MASK (0x1f << INTTOEXT_MAP1_SHIFT) | ||
820 | #define INTTOEXT_MAP0_SHIFT 0 | ||
821 | #define INTTOEXT_MAP0_MASK (0x1f << INTTOEXT_MAP0_SHIFT) | ||
822 | #define MEMSWCTL 0x11170 /* Ironlake only */ | ||
823 | #define MEMCTL_CMD_MASK 0xe000 | ||
824 | #define MEMCTL_CMD_SHIFT 13 | ||
825 | #define MEMCTL_CMD_RCLK_OFF 0 | ||
826 | #define MEMCTL_CMD_RCLK_ON 1 | ||
827 | #define MEMCTL_CMD_CHFREQ 2 | ||
828 | #define MEMCTL_CMD_CHVID 3 | ||
829 | #define MEMCTL_CMD_VMMOFF 4 | ||
830 | #define MEMCTL_CMD_VMMON 5 | ||
831 | #define MEMCTL_CMD_STS (1<<12) /* write 1 triggers command, clears | ||
832 | when command complete */ | ||
833 | #define MEMCTL_FREQ_MASK 0x0f00 /* jitter, from 0-15 */ | ||
834 | #define MEMCTL_FREQ_SHIFT 8 | ||
835 | #define MEMCTL_SFCAVM (1<<7) | ||
836 | #define MEMCTL_TGT_VID_MASK 0x007f | ||
837 | #define MEMIHYST 0x1117c | ||
838 | #define MEMINTREN 0x11180 /* 16 bits */ | ||
839 | #define MEMINT_RSEXIT_EN (1<<8) | ||
840 | #define MEMINT_CX_SUPR_EN (1<<7) | ||
841 | #define MEMINT_CONT_BUSY_EN (1<<6) | ||
842 | #define MEMINT_AVG_BUSY_EN (1<<5) | ||
843 | #define MEMINT_EVAL_CHG_EN (1<<4) | ||
844 | #define MEMINT_MON_IDLE_EN (1<<3) | ||
845 | #define MEMINT_UP_EVAL_EN (1<<2) | ||
846 | #define MEMINT_DOWN_EVAL_EN (1<<1) | ||
847 | #define MEMINT_SW_CMD_EN (1<<0) | ||
848 | #define MEMINTRSTR 0x11182 /* 16 bits */ | ||
849 | #define MEM_RSEXIT_MASK 0xc000 | ||
850 | #define MEM_RSEXIT_SHIFT 14 | ||
851 | #define MEM_CONT_BUSY_MASK 0x3000 | ||
852 | #define MEM_CONT_BUSY_SHIFT 12 | ||
853 | #define MEM_AVG_BUSY_MASK 0x0c00 | ||
854 | #define MEM_AVG_BUSY_SHIFT 10 | ||
855 | #define MEM_EVAL_CHG_MASK 0x0300 | ||
856 | #define MEM_EVAL_BUSY_SHIFT 8 | ||
857 | #define MEM_MON_IDLE_MASK 0x00c0 | ||
858 | #define MEM_MON_IDLE_SHIFT 6 | ||
859 | #define MEM_UP_EVAL_MASK 0x0030 | ||
860 | #define MEM_UP_EVAL_SHIFT 4 | ||
861 | #define MEM_DOWN_EVAL_MASK 0x000c | ||
862 | #define MEM_DOWN_EVAL_SHIFT 2 | ||
863 | #define MEM_SW_CMD_MASK 0x0003 | ||
864 | #define MEM_INT_STEER_GFX 0 | ||
865 | #define MEM_INT_STEER_CMR 1 | ||
866 | #define MEM_INT_STEER_SMI 2 | ||
867 | #define MEM_INT_STEER_SCI 3 | ||
868 | #define MEMINTRSTS 0x11184 | ||
869 | #define MEMINT_RSEXIT (1<<7) | ||
870 | #define MEMINT_CONT_BUSY (1<<6) | ||
871 | #define MEMINT_AVG_BUSY (1<<5) | ||
872 | #define MEMINT_EVAL_CHG (1<<4) | ||
873 | #define MEMINT_MON_IDLE (1<<3) | ||
874 | #define MEMINT_UP_EVAL (1<<2) | ||
875 | #define MEMINT_DOWN_EVAL (1<<1) | ||
876 | #define MEMINT_SW_CMD (1<<0) | ||
877 | #define MEMMODECTL 0x11190 | ||
878 | #define MEMMODE_BOOST_EN (1<<31) | ||
879 | #define MEMMODE_BOOST_FREQ_MASK 0x0f000000 /* jitter for boost, 0-15 */ | ||
880 | #define MEMMODE_BOOST_FREQ_SHIFT 24 | ||
881 | #define MEMMODE_IDLE_MODE_MASK 0x00030000 | ||
882 | #define MEMMODE_IDLE_MODE_SHIFT 16 | ||
883 | #define MEMMODE_IDLE_MODE_EVAL 0 | ||
884 | #define MEMMODE_IDLE_MODE_CONT 1 | ||
885 | #define MEMMODE_HWIDLE_EN (1<<15) | ||
886 | #define MEMMODE_SWMODE_EN (1<<14) | ||
887 | #define MEMMODE_RCLK_GATE (1<<13) | ||
888 | #define MEMMODE_HW_UPDATE (1<<12) | ||
889 | #define MEMMODE_FSTART_MASK 0x00000f00 /* starting jitter, 0-15 */ | ||
890 | #define MEMMODE_FSTART_SHIFT 8 | ||
891 | #define MEMMODE_FMAX_MASK 0x000000f0 /* max jitter, 0-15 */ | ||
892 | #define MEMMODE_FMAX_SHIFT 4 | ||
893 | #define MEMMODE_FMIN_MASK 0x0000000f /* min jitter, 0-15 */ | ||
894 | #define RCBMAXAVG 0x1119c | ||
895 | #define MEMSWCTL2 0x1119e /* Cantiga only */ | ||
896 | #define SWMEMCMD_RENDER_OFF (0 << 13) | ||
897 | #define SWMEMCMD_RENDER_ON (1 << 13) | ||
898 | #define SWMEMCMD_SWFREQ (2 << 13) | ||
899 | #define SWMEMCMD_TARVID (3 << 13) | ||
900 | #define SWMEMCMD_VRM_OFF (4 << 13) | ||
901 | #define SWMEMCMD_VRM_ON (5 << 13) | ||
902 | #define CMDSTS (1<<12) | ||
903 | #define SFCAVM (1<<11) | ||
904 | #define SWFREQ_MASK 0x0380 /* P0-7 */ | ||
905 | #define SWFREQ_SHIFT 7 | ||
906 | #define TARVID_MASK 0x001f | ||
907 | #define MEMSTAT_CTG 0x111a0 | ||
908 | #define RCBMINAVG 0x111a0 | ||
909 | #define RCUPEI 0x111b0 | ||
910 | #define RCDNEI 0x111b4 | ||
911 | #define MCHBAR_RENDER_STANDBY 0x111b8 | ||
788 | #define RCX_SW_EXIT (1<<23) | 912 | #define RCX_SW_EXIT (1<<23) |
789 | #define RSX_STATUS_MASK 0x00700000 | 913 | #define RSX_STATUS_MASK 0x00700000 |
914 | #define VIDCTL 0x111c0 | ||
915 | #define VIDSTS 0x111c8 | ||
916 | #define VIDSTART 0x111cc /* 8 bits */ | ||
917 | #define MEMSTAT_ILK 0x111f8 | ||
918 | #define MEMSTAT_VID_MASK 0x7f00 | ||
919 | #define MEMSTAT_VID_SHIFT 8 | ||
920 | #define MEMSTAT_PSTATE_MASK 0x00f8 | ||
921 | #define MEMSTAT_PSTATE_SHIFT 3 | ||
922 | #define MEMSTAT_MON_ACTV (1<<2) | ||
923 | #define MEMSTAT_SRC_CTL_MASK 0x0003 | ||
924 | #define MEMSTAT_SRC_CTL_CORE 0 | ||
925 | #define MEMSTAT_SRC_CTL_TRB 1 | ||
926 | #define MEMSTAT_SRC_CTL_THM 2 | ||
927 | #define MEMSTAT_SRC_CTL_STDBY 3 | ||
928 | #define RCPREVBSYTUPAVG 0x113b8 | ||
929 | #define RCPREVBSYTDNAVG 0x113bc | ||
790 | #define PEG_BAND_GAP_DATA 0x14d68 | 930 | #define PEG_BAND_GAP_DATA 0x14d68 |
791 | 931 | ||
792 | /* | 932 | /* |
@@ -879,13 +1019,6 @@ | |||
879 | #define CRT_HOTPLUG_DETECT_VOLTAGE_475MV (1 << 2) | 1019 | #define CRT_HOTPLUG_DETECT_VOLTAGE_475MV (1 << 2) |
880 | #define CRT_HOTPLUG_MASK (0x3fc) /* Bits 9-2 */ | 1020 | #define CRT_HOTPLUG_MASK (0x3fc) /* Bits 9-2 */ |
881 | #define CRT_FORCE_HOTPLUG_MASK 0xfffffe1f | 1021 | #define CRT_FORCE_HOTPLUG_MASK 0xfffffe1f |
882 | #define HOTPLUG_EN_MASK (HDMIB_HOTPLUG_INT_EN | \ | ||
883 | HDMIC_HOTPLUG_INT_EN | \ | ||
884 | HDMID_HOTPLUG_INT_EN | \ | ||
885 | SDVOB_HOTPLUG_INT_EN | \ | ||
886 | SDVOC_HOTPLUG_INT_EN | \ | ||
887 | CRT_HOTPLUG_INT_EN) | ||
888 | |||
889 | 1022 | ||
890 | #define PORT_HOTPLUG_STAT 0x61114 | 1023 | #define PORT_HOTPLUG_STAT 0x61114 |
891 | #define HDMIB_HOTPLUG_INT_STATUS (1 << 29) | 1024 | #define HDMIB_HOTPLUG_INT_STATUS (1 << 29) |
@@ -982,6 +1115,8 @@ | |||
982 | #define LVDS_PORT_EN (1 << 31) | 1115 | #define LVDS_PORT_EN (1 << 31) |
983 | /* Selects pipe B for LVDS data. Must be set on pre-965. */ | 1116 | /* Selects pipe B for LVDS data. Must be set on pre-965. */ |
984 | #define LVDS_PIPEB_SELECT (1 << 30) | 1117 | #define LVDS_PIPEB_SELECT (1 << 30) |
1118 | /* LVDS dithering flag on 965/g4x platform */ | ||
1119 | #define LVDS_ENABLE_DITHER (1 << 25) | ||
985 | /* Enable border for unscaled (or aspect-scaled) display */ | 1120 | /* Enable border for unscaled (or aspect-scaled) display */ |
986 | #define LVDS_BORDER_ENABLE (1 << 15) | 1121 | #define LVDS_BORDER_ENABLE (1 << 15) |
987 | /* | 1122 | /* |
@@ -1751,6 +1886,8 @@ | |||
1751 | 1886 | ||
1752 | /* Display & cursor control */ | 1887 | /* Display & cursor control */ |
1753 | 1888 | ||
1889 | /* dithering flag on Ironlake */ | ||
1890 | #define PIPE_ENABLE_DITHER (1 << 4) | ||
1754 | /* Pipe A */ | 1891 | /* Pipe A */ |
1755 | #define PIPEADSL 0x70000 | 1892 | #define PIPEADSL 0x70000 |
1756 | #define PIPEACONF 0x70008 | 1893 | #define PIPEACONF 0x70008 |
@@ -1818,7 +1955,7 @@ | |||
1818 | #define DSPFW_PLANEB_SHIFT 8 | 1955 | #define DSPFW_PLANEB_SHIFT 8 |
1819 | #define DSPFW2 0x70038 | 1956 | #define DSPFW2 0x70038 |
1820 | #define DSPFW_CURSORA_MASK 0x00003f00 | 1957 | #define DSPFW_CURSORA_MASK 0x00003f00 |
1821 | #define DSPFW_CURSORA_SHIFT 16 | 1958 | #define DSPFW_CURSORA_SHIFT 8 |
1822 | #define DSPFW3 0x7003c | 1959 | #define DSPFW3 0x7003c |
1823 | #define DSPFW_HPLL_SR_EN (1<<31) | 1960 | #define DSPFW_HPLL_SR_EN (1<<31) |
1824 | #define DSPFW_CURSOR_SR_SHIFT 24 | 1961 | #define DSPFW_CURSOR_SR_SHIFT 24 |
diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c index d5ebb00a9d49..ac0d1a73ac22 100644 --- a/drivers/gpu/drm/i915/i915_suspend.c +++ b/drivers/gpu/drm/i915/i915_suspend.c | |||
@@ -682,6 +682,8 @@ void i915_restore_display(struct drm_device *dev) | |||
682 | I915_WRITE(PCH_PP_OFF_DELAYS, dev_priv->savePP_OFF_DELAYS); | 682 | I915_WRITE(PCH_PP_OFF_DELAYS, dev_priv->savePP_OFF_DELAYS); |
683 | I915_WRITE(PCH_PP_DIVISOR, dev_priv->savePP_DIVISOR); | 683 | I915_WRITE(PCH_PP_DIVISOR, dev_priv->savePP_DIVISOR); |
684 | I915_WRITE(PCH_PP_CONTROL, dev_priv->savePP_CONTROL); | 684 | I915_WRITE(PCH_PP_CONTROL, dev_priv->savePP_CONTROL); |
685 | I915_WRITE(MCHBAR_RENDER_STANDBY, | ||
686 | dev_priv->saveMCHBAR_RENDER_STANDBY); | ||
685 | } else { | 687 | } else { |
686 | I915_WRITE(PFIT_PGM_RATIOS, dev_priv->savePFIT_PGM_RATIOS); | 688 | I915_WRITE(PFIT_PGM_RATIOS, dev_priv->savePFIT_PGM_RATIOS); |
687 | I915_WRITE(BLC_PWM_CTL, dev_priv->saveBLC_PWM_CTL); | 689 | I915_WRITE(BLC_PWM_CTL, dev_priv->saveBLC_PWM_CTL); |
@@ -732,12 +734,6 @@ int i915_save_state(struct drm_device *dev) | |||
732 | 734 | ||
733 | pci_read_config_byte(dev->pdev, LBB, &dev_priv->saveLBB); | 735 | pci_read_config_byte(dev->pdev, LBB, &dev_priv->saveLBB); |
734 | 736 | ||
735 | /* Render Standby */ | ||
736 | if (I915_HAS_RC6(dev)) { | ||
737 | dev_priv->saveRENDERSTANDBY = I915_READ(MCHBAR_RENDER_STANDBY); | ||
738 | dev_priv->savePWRCTXA = I915_READ(PWRCTXA); | ||
739 | } | ||
740 | |||
741 | /* Hardware status page */ | 737 | /* Hardware status page */ |
742 | dev_priv->saveHWS = I915_READ(HWS_PGA); | 738 | dev_priv->saveHWS = I915_READ(HWS_PGA); |
743 | 739 | ||
@@ -751,11 +747,16 @@ int i915_save_state(struct drm_device *dev) | |||
751 | dev_priv->saveGTIMR = I915_READ(GTIMR); | 747 | dev_priv->saveGTIMR = I915_READ(GTIMR); |
752 | dev_priv->saveFDI_RXA_IMR = I915_READ(FDI_RXA_IMR); | 748 | dev_priv->saveFDI_RXA_IMR = I915_READ(FDI_RXA_IMR); |
753 | dev_priv->saveFDI_RXB_IMR = I915_READ(FDI_RXB_IMR); | 749 | dev_priv->saveFDI_RXB_IMR = I915_READ(FDI_RXB_IMR); |
750 | dev_priv->saveMCHBAR_RENDER_STANDBY = | ||
751 | I915_READ(MCHBAR_RENDER_STANDBY); | ||
754 | } else { | 752 | } else { |
755 | dev_priv->saveIER = I915_READ(IER); | 753 | dev_priv->saveIER = I915_READ(IER); |
756 | dev_priv->saveIMR = I915_READ(IMR); | 754 | dev_priv->saveIMR = I915_READ(IMR); |
757 | } | 755 | } |
758 | 756 | ||
757 | if (IS_IRONLAKE_M(dev)) | ||
758 | ironlake_disable_drps(dev); | ||
759 | |||
759 | /* Cache mode state */ | 760 | /* Cache mode state */ |
760 | dev_priv->saveCACHE_MODE_0 = I915_READ(CACHE_MODE_0); | 761 | dev_priv->saveCACHE_MODE_0 = I915_READ(CACHE_MODE_0); |
761 | 762 | ||
@@ -793,12 +794,6 @@ int i915_restore_state(struct drm_device *dev) | |||
793 | 794 | ||
794 | pci_write_config_byte(dev->pdev, LBB, dev_priv->saveLBB); | 795 | pci_write_config_byte(dev->pdev, LBB, dev_priv->saveLBB); |
795 | 796 | ||
796 | /* Render Standby */ | ||
797 | if (I915_HAS_RC6(dev)) { | ||
798 | I915_WRITE(MCHBAR_RENDER_STANDBY, dev_priv->saveRENDERSTANDBY); | ||
799 | I915_WRITE(PWRCTXA, dev_priv->savePWRCTXA); | ||
800 | } | ||
801 | |||
802 | /* Hardware status page */ | 797 | /* Hardware status page */ |
803 | I915_WRITE(HWS_PGA, dev_priv->saveHWS); | 798 | I915_WRITE(HWS_PGA, dev_priv->saveHWS); |
804 | 799 | ||
@@ -832,6 +827,9 @@ int i915_restore_state(struct drm_device *dev) | |||
832 | /* Clock gating state */ | 827 | /* Clock gating state */ |
833 | intel_init_clock_gating(dev); | 828 | intel_init_clock_gating(dev); |
834 | 829 | ||
830 | if (IS_IRONLAKE_M(dev)) | ||
831 | ironlake_enable_drps(dev); | ||
832 | |||
835 | /* Cache mode state */ | 833 | /* Cache mode state */ |
836 | I915_WRITE (CACHE_MODE_0, dev_priv->saveCACHE_MODE_0 | 0xffff0000); | 834 | I915_WRITE (CACHE_MODE_0, dev_priv->saveCACHE_MODE_0 | 0xffff0000); |
837 | 835 | ||
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c index f27567747580..15fbc1b5a83e 100644 --- a/drivers/gpu/drm/i915/intel_bios.c +++ b/drivers/gpu/drm/i915/intel_bios.c | |||
@@ -33,6 +33,8 @@ | |||
33 | #define SLAVE_ADDR1 0x70 | 33 | #define SLAVE_ADDR1 0x70 |
34 | #define SLAVE_ADDR2 0x72 | 34 | #define SLAVE_ADDR2 0x72 |
35 | 35 | ||
36 | static int panel_type; | ||
37 | |||
36 | static void * | 38 | static void * |
37 | find_section(struct bdb_header *bdb, int section_id) | 39 | find_section(struct bdb_header *bdb, int section_id) |
38 | { | 40 | { |
@@ -128,6 +130,7 @@ parse_lfp_panel_data(struct drm_i915_private *dev_priv, | |||
128 | dev_priv->lvds_dither = lvds_options->pixel_dither; | 130 | dev_priv->lvds_dither = lvds_options->pixel_dither; |
129 | if (lvds_options->panel_type == 0xff) | 131 | if (lvds_options->panel_type == 0xff) |
130 | return; | 132 | return; |
133 | panel_type = lvds_options->panel_type; | ||
131 | 134 | ||
132 | lvds_lfp_data = find_section(bdb, BDB_LVDS_LFP_DATA); | 135 | lvds_lfp_data = find_section(bdb, BDB_LVDS_LFP_DATA); |
133 | if (!lvds_lfp_data) | 136 | if (!lvds_lfp_data) |
@@ -197,7 +200,8 @@ parse_lfp_panel_data(struct drm_i915_private *dev_priv, | |||
197 | memset(temp_mode, 0, sizeof(*temp_mode)); | 200 | memset(temp_mode, 0, sizeof(*temp_mode)); |
198 | } | 201 | } |
199 | kfree(temp_mode); | 202 | kfree(temp_mode); |
200 | if (temp_downclock < panel_fixed_mode->clock) { | 203 | if (temp_downclock < panel_fixed_mode->clock && |
204 | i915_lvds_downclock) { | ||
201 | dev_priv->lvds_downclock_avail = 1; | 205 | dev_priv->lvds_downclock_avail = 1; |
202 | dev_priv->lvds_downclock = temp_downclock; | 206 | dev_priv->lvds_downclock = temp_downclock; |
203 | DRM_DEBUG_KMS("LVDS downclock is found in VBT. ", | 207 | DRM_DEBUG_KMS("LVDS downclock is found in VBT. ", |
@@ -405,6 +409,34 @@ parse_driver_features(struct drm_i915_private *dev_priv, | |||
405 | } | 409 | } |
406 | 410 | ||
407 | static void | 411 | static void |
412 | parse_edp(struct drm_i915_private *dev_priv, struct bdb_header *bdb) | ||
413 | { | ||
414 | struct bdb_edp *edp; | ||
415 | |||
416 | edp = find_section(bdb, BDB_EDP); | ||
417 | if (!edp) { | ||
418 | if (SUPPORTS_EDP(dev_priv->dev) && dev_priv->edp_support) { | ||
419 | DRM_DEBUG_KMS("No eDP BDB found but eDP panel supported,\ | ||
420 | assume 18bpp panel color depth.\n"); | ||
421 | dev_priv->edp_bpp = 18; | ||
422 | } | ||
423 | return; | ||
424 | } | ||
425 | |||
426 | switch ((edp->color_depth >> (panel_type * 2)) & 3) { | ||
427 | case EDP_18BPP: | ||
428 | dev_priv->edp_bpp = 18; | ||
429 | break; | ||
430 | case EDP_24BPP: | ||
431 | dev_priv->edp_bpp = 24; | ||
432 | break; | ||
433 | case EDP_30BPP: | ||
434 | dev_priv->edp_bpp = 30; | ||
435 | break; | ||
436 | } | ||
437 | } | ||
438 | |||
439 | static void | ||
408 | parse_device_mapping(struct drm_i915_private *dev_priv, | 440 | parse_device_mapping(struct drm_i915_private *dev_priv, |
409 | struct bdb_header *bdb) | 441 | struct bdb_header *bdb) |
410 | { | 442 | { |
@@ -521,6 +553,7 @@ intel_init_bios(struct drm_device *dev) | |||
521 | parse_sdvo_device_mapping(dev_priv, bdb); | 553 | parse_sdvo_device_mapping(dev_priv, bdb); |
522 | parse_device_mapping(dev_priv, bdb); | 554 | parse_device_mapping(dev_priv, bdb); |
523 | parse_driver_features(dev_priv, bdb); | 555 | parse_driver_features(dev_priv, bdb); |
556 | parse_edp(dev_priv, bdb); | ||
524 | 557 | ||
525 | pci_unmap_rom(pdev, bios); | 558 | pci_unmap_rom(pdev, bios); |
526 | 559 | ||
diff --git a/drivers/gpu/drm/i915/intel_bios.h b/drivers/gpu/drm/i915/intel_bios.h index 425ac9d7f724..4c18514f6f80 100644 --- a/drivers/gpu/drm/i915/intel_bios.h +++ b/drivers/gpu/drm/i915/intel_bios.h | |||
@@ -98,6 +98,7 @@ struct vbios_data { | |||
98 | #define BDB_SDVO_LVDS_PNP_IDS 24 | 98 | #define BDB_SDVO_LVDS_PNP_IDS 24 |
99 | #define BDB_SDVO_LVDS_POWER_SEQ 25 | 99 | #define BDB_SDVO_LVDS_POWER_SEQ 25 |
100 | #define BDB_TV_OPTIONS 26 | 100 | #define BDB_TV_OPTIONS 26 |
101 | #define BDB_EDP 27 | ||
101 | #define BDB_LVDS_OPTIONS 40 | 102 | #define BDB_LVDS_OPTIONS 40 |
102 | #define BDB_LVDS_LFP_DATA_PTRS 41 | 103 | #define BDB_LVDS_LFP_DATA_PTRS 41 |
103 | #define BDB_LVDS_LFP_DATA 42 | 104 | #define BDB_LVDS_LFP_DATA 42 |
@@ -426,6 +427,45 @@ struct bdb_driver_features { | |||
426 | u8 custom_vbt_version; | 427 | u8 custom_vbt_version; |
427 | } __attribute__((packed)); | 428 | } __attribute__((packed)); |
428 | 429 | ||
430 | #define EDP_18BPP 0 | ||
431 | #define EDP_24BPP 1 | ||
432 | #define EDP_30BPP 2 | ||
433 | #define EDP_RATE_1_62 0 | ||
434 | #define EDP_RATE_2_7 1 | ||
435 | #define EDP_LANE_1 0 | ||
436 | #define EDP_LANE_2 1 | ||
437 | #define EDP_LANE_4 3 | ||
438 | #define EDP_PREEMPHASIS_NONE 0 | ||
439 | #define EDP_PREEMPHASIS_3_5dB 1 | ||
440 | #define EDP_PREEMPHASIS_6dB 2 | ||
441 | #define EDP_PREEMPHASIS_9_5dB 3 | ||
442 | #define EDP_VSWING_0_4V 0 | ||
443 | #define EDP_VSWING_0_6V 1 | ||
444 | #define EDP_VSWING_0_8V 2 | ||
445 | #define EDP_VSWING_1_2V 3 | ||
446 | |||
447 | struct edp_power_seq { | ||
448 | u16 t3; | ||
449 | u16 t7; | ||
450 | u16 t9; | ||
451 | u16 t10; | ||
452 | u16 t12; | ||
453 | } __attribute__ ((packed)); | ||
454 | |||
455 | struct edp_link_params { | ||
456 | u8 rate:4; | ||
457 | u8 lanes:4; | ||
458 | u8 preemphasis:4; | ||
459 | u8 vswing:4; | ||
460 | } __attribute__ ((packed)); | ||
461 | |||
462 | struct bdb_edp { | ||
463 | struct edp_power_seq power_seqs[16]; | ||
464 | u32 color_depth; | ||
465 | u32 sdrrs_msa_timing_delay; | ||
466 | struct edp_link_params link_params[16]; | ||
467 | } __attribute__ ((packed)); | ||
468 | |||
429 | bool intel_init_bios(struct drm_device *dev); | 469 | bool intel_init_bios(struct drm_device *dev); |
430 | 470 | ||
431 | /* | 471 | /* |
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c index 9f3d3e563414..79dd4026586f 100644 --- a/drivers/gpu/drm/i915/intel_crt.c +++ b/drivers/gpu/drm/i915/intel_crt.c | |||
@@ -157,6 +157,9 @@ static bool intel_ironlake_crt_detect_hotplug(struct drm_connector *connector) | |||
157 | adpa = I915_READ(PCH_ADPA); | 157 | adpa = I915_READ(PCH_ADPA); |
158 | 158 | ||
159 | adpa &= ~ADPA_CRT_HOTPLUG_MASK; | 159 | adpa &= ~ADPA_CRT_HOTPLUG_MASK; |
160 | /* disable HPD first */ | ||
161 | I915_WRITE(PCH_ADPA, adpa); | ||
162 | (void)I915_READ(PCH_ADPA); | ||
160 | 163 | ||
161 | adpa |= (ADPA_CRT_HOTPLUG_PERIOD_128 | | 164 | adpa |= (ADPA_CRT_HOTPLUG_PERIOD_128 | |
162 | ADPA_CRT_HOTPLUG_WARMUP_10MS | | 165 | ADPA_CRT_HOTPLUG_WARMUP_10MS | |
@@ -548,4 +551,6 @@ void intel_crt_init(struct drm_device *dev) | |||
548 | drm_connector_helper_add(connector, &intel_crt_connector_helper_funcs); | 551 | drm_connector_helper_add(connector, &intel_crt_connector_helper_funcs); |
549 | 552 | ||
550 | drm_sysfs_connector_add(connector); | 553 | drm_sysfs_connector_add(connector); |
554 | |||
555 | dev_priv->hotplug_supported_mask |= CRT_HOTPLUG_INT_STATUS; | ||
551 | } | 556 | } |
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 52cd9b006da2..1b5cd833bc70 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c | |||
@@ -70,8 +70,6 @@ struct intel_limit { | |||
70 | intel_p2_t p2; | 70 | intel_p2_t p2; |
71 | bool (* find_pll)(const intel_limit_t *, struct drm_crtc *, | 71 | bool (* find_pll)(const intel_limit_t *, struct drm_crtc *, |
72 | int, int, intel_clock_t *); | 72 | int, int, intel_clock_t *); |
73 | bool (* find_reduced_pll)(const intel_limit_t *, struct drm_crtc *, | ||
74 | int, int, intel_clock_t *); | ||
75 | }; | 73 | }; |
76 | 74 | ||
77 | #define I8XX_DOT_MIN 25000 | 75 | #define I8XX_DOT_MIN 25000 |
@@ -242,38 +240,93 @@ struct intel_limit { | |||
242 | #define IRONLAKE_DOT_MAX 350000 | 240 | #define IRONLAKE_DOT_MAX 350000 |
243 | #define IRONLAKE_VCO_MIN 1760000 | 241 | #define IRONLAKE_VCO_MIN 1760000 |
244 | #define IRONLAKE_VCO_MAX 3510000 | 242 | #define IRONLAKE_VCO_MAX 3510000 |
245 | #define IRONLAKE_N_MIN 1 | ||
246 | #define IRONLAKE_N_MAX 5 | ||
247 | #define IRONLAKE_M_MIN 79 | ||
248 | #define IRONLAKE_M_MAX 118 | ||
249 | #define IRONLAKE_M1_MIN 12 | 243 | #define IRONLAKE_M1_MIN 12 |
250 | #define IRONLAKE_M1_MAX 23 | 244 | #define IRONLAKE_M1_MAX 22 |
251 | #define IRONLAKE_M2_MIN 5 | 245 | #define IRONLAKE_M2_MIN 5 |
252 | #define IRONLAKE_M2_MAX 9 | 246 | #define IRONLAKE_M2_MAX 9 |
253 | #define IRONLAKE_P_SDVO_DAC_MIN 5 | ||
254 | #define IRONLAKE_P_SDVO_DAC_MAX 80 | ||
255 | #define IRONLAKE_P_LVDS_MIN 28 | ||
256 | #define IRONLAKE_P_LVDS_MAX 112 | ||
257 | #define IRONLAKE_P1_MIN 1 | ||
258 | #define IRONLAKE_P1_MAX 8 | ||
259 | #define IRONLAKE_P2_SDVO_DAC_SLOW 10 | ||
260 | #define IRONLAKE_P2_SDVO_DAC_FAST 5 | ||
261 | #define IRONLAKE_P2_LVDS_SLOW 14 /* single channel */ | ||
262 | #define IRONLAKE_P2_LVDS_FAST 7 /* double channel */ | ||
263 | #define IRONLAKE_P2_DOT_LIMIT 225000 /* 225Mhz */ | 247 | #define IRONLAKE_P2_DOT_LIMIT 225000 /* 225Mhz */ |
264 | 248 | ||
249 | /* We have parameter ranges for different type of outputs. */ | ||
250 | |||
251 | /* DAC & HDMI Refclk 120Mhz */ | ||
252 | #define IRONLAKE_DAC_N_MIN 1 | ||
253 | #define IRONLAKE_DAC_N_MAX 5 | ||
254 | #define IRONLAKE_DAC_M_MIN 79 | ||
255 | #define IRONLAKE_DAC_M_MAX 127 | ||
256 | #define IRONLAKE_DAC_P_MIN 5 | ||
257 | #define IRONLAKE_DAC_P_MAX 80 | ||
258 | #define IRONLAKE_DAC_P1_MIN 1 | ||
259 | #define IRONLAKE_DAC_P1_MAX 8 | ||
260 | #define IRONLAKE_DAC_P2_SLOW 10 | ||
261 | #define IRONLAKE_DAC_P2_FAST 5 | ||
262 | |||
263 | /* LVDS single-channel 120Mhz refclk */ | ||
264 | #define IRONLAKE_LVDS_S_N_MIN 1 | ||
265 | #define IRONLAKE_LVDS_S_N_MAX 3 | ||
266 | #define IRONLAKE_LVDS_S_M_MIN 79 | ||
267 | #define IRONLAKE_LVDS_S_M_MAX 118 | ||
268 | #define IRONLAKE_LVDS_S_P_MIN 28 | ||
269 | #define IRONLAKE_LVDS_S_P_MAX 112 | ||
270 | #define IRONLAKE_LVDS_S_P1_MIN 2 | ||
271 | #define IRONLAKE_LVDS_S_P1_MAX 8 | ||
272 | #define IRONLAKE_LVDS_S_P2_SLOW 14 | ||
273 | #define IRONLAKE_LVDS_S_P2_FAST 14 | ||
274 | |||
275 | /* LVDS dual-channel 120Mhz refclk */ | ||
276 | #define IRONLAKE_LVDS_D_N_MIN 1 | ||
277 | #define IRONLAKE_LVDS_D_N_MAX 3 | ||
278 | #define IRONLAKE_LVDS_D_M_MIN 79 | ||
279 | #define IRONLAKE_LVDS_D_M_MAX 127 | ||
280 | #define IRONLAKE_LVDS_D_P_MIN 14 | ||
281 | #define IRONLAKE_LVDS_D_P_MAX 56 | ||
282 | #define IRONLAKE_LVDS_D_P1_MIN 2 | ||
283 | #define IRONLAKE_LVDS_D_P1_MAX 8 | ||
284 | #define IRONLAKE_LVDS_D_P2_SLOW 7 | ||
285 | #define IRONLAKE_LVDS_D_P2_FAST 7 | ||
286 | |||
287 | /* LVDS single-channel 100Mhz refclk */ | ||
288 | #define IRONLAKE_LVDS_S_SSC_N_MIN 1 | ||
289 | #define IRONLAKE_LVDS_S_SSC_N_MAX 2 | ||
290 | #define IRONLAKE_LVDS_S_SSC_M_MIN 79 | ||
291 | #define IRONLAKE_LVDS_S_SSC_M_MAX 126 | ||
292 | #define IRONLAKE_LVDS_S_SSC_P_MIN 28 | ||
293 | #define IRONLAKE_LVDS_S_SSC_P_MAX 112 | ||
294 | #define IRONLAKE_LVDS_S_SSC_P1_MIN 2 | ||
295 | #define IRONLAKE_LVDS_S_SSC_P1_MAX 8 | ||
296 | #define IRONLAKE_LVDS_S_SSC_P2_SLOW 14 | ||
297 | #define IRONLAKE_LVDS_S_SSC_P2_FAST 14 | ||
298 | |||
299 | /* LVDS dual-channel 100Mhz refclk */ | ||
300 | #define IRONLAKE_LVDS_D_SSC_N_MIN 1 | ||
301 | #define IRONLAKE_LVDS_D_SSC_N_MAX 3 | ||
302 | #define IRONLAKE_LVDS_D_SSC_M_MIN 79 | ||
303 | #define IRONLAKE_LVDS_D_SSC_M_MAX 126 | ||
304 | #define IRONLAKE_LVDS_D_SSC_P_MIN 14 | ||
305 | #define IRONLAKE_LVDS_D_SSC_P_MAX 42 | ||
306 | #define IRONLAKE_LVDS_D_SSC_P1_MIN 2 | ||
307 | #define IRONLAKE_LVDS_D_SSC_P1_MAX 6 | ||
308 | #define IRONLAKE_LVDS_D_SSC_P2_SLOW 7 | ||
309 | #define IRONLAKE_LVDS_D_SSC_P2_FAST 7 | ||
310 | |||
311 | /* DisplayPort */ | ||
312 | #define IRONLAKE_DP_N_MIN 1 | ||
313 | #define IRONLAKE_DP_N_MAX 2 | ||
314 | #define IRONLAKE_DP_M_MIN 81 | ||
315 | #define IRONLAKE_DP_M_MAX 90 | ||
316 | #define IRONLAKE_DP_P_MIN 10 | ||
317 | #define IRONLAKE_DP_P_MAX 20 | ||
318 | #define IRONLAKE_DP_P2_FAST 10 | ||
319 | #define IRONLAKE_DP_P2_SLOW 10 | ||
320 | #define IRONLAKE_DP_P2_LIMIT 0 | ||
321 | #define IRONLAKE_DP_P1_MIN 1 | ||
322 | #define IRONLAKE_DP_P1_MAX 2 | ||
323 | |||
265 | static bool | 324 | static bool |
266 | intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc, | 325 | intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc, |
267 | int target, int refclk, intel_clock_t *best_clock); | 326 | int target, int refclk, intel_clock_t *best_clock); |
268 | static bool | 327 | static bool |
269 | intel_find_best_reduced_PLL(const intel_limit_t *limit, struct drm_crtc *crtc, | ||
270 | int target, int refclk, intel_clock_t *best_clock); | ||
271 | static bool | ||
272 | intel_g4x_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc, | 328 | intel_g4x_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc, |
273 | int target, int refclk, intel_clock_t *best_clock); | 329 | int target, int refclk, intel_clock_t *best_clock); |
274 | static bool | ||
275 | intel_ironlake_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc, | ||
276 | int target, int refclk, intel_clock_t *best_clock); | ||
277 | 330 | ||
278 | static bool | 331 | static bool |
279 | intel_find_pll_g4x_dp(const intel_limit_t *, struct drm_crtc *crtc, | 332 | intel_find_pll_g4x_dp(const intel_limit_t *, struct drm_crtc *crtc, |
@@ -294,7 +347,6 @@ static const intel_limit_t intel_limits_i8xx_dvo = { | |||
294 | .p2 = { .dot_limit = I8XX_P2_SLOW_LIMIT, | 347 | .p2 = { .dot_limit = I8XX_P2_SLOW_LIMIT, |
295 | .p2_slow = I8XX_P2_SLOW, .p2_fast = I8XX_P2_FAST }, | 348 | .p2_slow = I8XX_P2_SLOW, .p2_fast = I8XX_P2_FAST }, |
296 | .find_pll = intel_find_best_PLL, | 349 | .find_pll = intel_find_best_PLL, |
297 | .find_reduced_pll = intel_find_best_reduced_PLL, | ||
298 | }; | 350 | }; |
299 | 351 | ||
300 | static const intel_limit_t intel_limits_i8xx_lvds = { | 352 | static const intel_limit_t intel_limits_i8xx_lvds = { |
@@ -309,7 +361,6 @@ static const intel_limit_t intel_limits_i8xx_lvds = { | |||
309 | .p2 = { .dot_limit = I8XX_P2_SLOW_LIMIT, | 361 | .p2 = { .dot_limit = I8XX_P2_SLOW_LIMIT, |
310 | .p2_slow = I8XX_P2_LVDS_SLOW, .p2_fast = I8XX_P2_LVDS_FAST }, | 362 | .p2_slow = I8XX_P2_LVDS_SLOW, .p2_fast = I8XX_P2_LVDS_FAST }, |
311 | .find_pll = intel_find_best_PLL, | 363 | .find_pll = intel_find_best_PLL, |
312 | .find_reduced_pll = intel_find_best_reduced_PLL, | ||
313 | }; | 364 | }; |
314 | 365 | ||
315 | static const intel_limit_t intel_limits_i9xx_sdvo = { | 366 | static const intel_limit_t intel_limits_i9xx_sdvo = { |
@@ -324,7 +375,6 @@ static const intel_limit_t intel_limits_i9xx_sdvo = { | |||
324 | .p2 = { .dot_limit = I9XX_P2_SDVO_DAC_SLOW_LIMIT, | 375 | .p2 = { .dot_limit = I9XX_P2_SDVO_DAC_SLOW_LIMIT, |
325 | .p2_slow = I9XX_P2_SDVO_DAC_SLOW, .p2_fast = I9XX_P2_SDVO_DAC_FAST }, | 376 | .p2_slow = I9XX_P2_SDVO_DAC_SLOW, .p2_fast = I9XX_P2_SDVO_DAC_FAST }, |
326 | .find_pll = intel_find_best_PLL, | 377 | .find_pll = intel_find_best_PLL, |
327 | .find_reduced_pll = intel_find_best_reduced_PLL, | ||
328 | }; | 378 | }; |
329 | 379 | ||
330 | static const intel_limit_t intel_limits_i9xx_lvds = { | 380 | static const intel_limit_t intel_limits_i9xx_lvds = { |
@@ -342,7 +392,6 @@ static const intel_limit_t intel_limits_i9xx_lvds = { | |||
342 | .p2 = { .dot_limit = I9XX_P2_LVDS_SLOW_LIMIT, | 392 | .p2 = { .dot_limit = I9XX_P2_LVDS_SLOW_LIMIT, |
343 | .p2_slow = I9XX_P2_LVDS_SLOW, .p2_fast = I9XX_P2_LVDS_FAST }, | 393 | .p2_slow = I9XX_P2_LVDS_SLOW, .p2_fast = I9XX_P2_LVDS_FAST }, |
344 | .find_pll = intel_find_best_PLL, | 394 | .find_pll = intel_find_best_PLL, |
345 | .find_reduced_pll = intel_find_best_reduced_PLL, | ||
346 | }; | 395 | }; |
347 | 396 | ||
348 | /* below parameter and function is for G4X Chipset Family*/ | 397 | /* below parameter and function is for G4X Chipset Family*/ |
@@ -360,7 +409,6 @@ static const intel_limit_t intel_limits_g4x_sdvo = { | |||
360 | .p2_fast = G4X_P2_SDVO_FAST | 409 | .p2_fast = G4X_P2_SDVO_FAST |
361 | }, | 410 | }, |
362 | .find_pll = intel_g4x_find_best_PLL, | 411 | .find_pll = intel_g4x_find_best_PLL, |
363 | .find_reduced_pll = intel_g4x_find_best_PLL, | ||
364 | }; | 412 | }; |
365 | 413 | ||
366 | static const intel_limit_t intel_limits_g4x_hdmi = { | 414 | static const intel_limit_t intel_limits_g4x_hdmi = { |
@@ -377,7 +425,6 @@ static const intel_limit_t intel_limits_g4x_hdmi = { | |||
377 | .p2_fast = G4X_P2_HDMI_DAC_FAST | 425 | .p2_fast = G4X_P2_HDMI_DAC_FAST |
378 | }, | 426 | }, |
379 | .find_pll = intel_g4x_find_best_PLL, | 427 | .find_pll = intel_g4x_find_best_PLL, |
380 | .find_reduced_pll = intel_g4x_find_best_PLL, | ||
381 | }; | 428 | }; |
382 | 429 | ||
383 | static const intel_limit_t intel_limits_g4x_single_channel_lvds = { | 430 | static const intel_limit_t intel_limits_g4x_single_channel_lvds = { |
@@ -402,7 +449,6 @@ static const intel_limit_t intel_limits_g4x_single_channel_lvds = { | |||
402 | .p2_fast = G4X_P2_SINGLE_CHANNEL_LVDS_FAST | 449 | .p2_fast = G4X_P2_SINGLE_CHANNEL_LVDS_FAST |
403 | }, | 450 | }, |
404 | .find_pll = intel_g4x_find_best_PLL, | 451 | .find_pll = intel_g4x_find_best_PLL, |
405 | .find_reduced_pll = intel_g4x_find_best_PLL, | ||
406 | }; | 452 | }; |
407 | 453 | ||
408 | static const intel_limit_t intel_limits_g4x_dual_channel_lvds = { | 454 | static const intel_limit_t intel_limits_g4x_dual_channel_lvds = { |
@@ -427,7 +473,6 @@ static const intel_limit_t intel_limits_g4x_dual_channel_lvds = { | |||
427 | .p2_fast = G4X_P2_DUAL_CHANNEL_LVDS_FAST | 473 | .p2_fast = G4X_P2_DUAL_CHANNEL_LVDS_FAST |
428 | }, | 474 | }, |
429 | .find_pll = intel_g4x_find_best_PLL, | 475 | .find_pll = intel_g4x_find_best_PLL, |
430 | .find_reduced_pll = intel_g4x_find_best_PLL, | ||
431 | }; | 476 | }; |
432 | 477 | ||
433 | static const intel_limit_t intel_limits_g4x_display_port = { | 478 | static const intel_limit_t intel_limits_g4x_display_port = { |
@@ -465,7 +510,6 @@ static const intel_limit_t intel_limits_pineview_sdvo = { | |||
465 | .p2 = { .dot_limit = I9XX_P2_SDVO_DAC_SLOW_LIMIT, | 510 | .p2 = { .dot_limit = I9XX_P2_SDVO_DAC_SLOW_LIMIT, |
466 | .p2_slow = I9XX_P2_SDVO_DAC_SLOW, .p2_fast = I9XX_P2_SDVO_DAC_FAST }, | 511 | .p2_slow = I9XX_P2_SDVO_DAC_SLOW, .p2_fast = I9XX_P2_SDVO_DAC_FAST }, |
467 | .find_pll = intel_find_best_PLL, | 512 | .find_pll = intel_find_best_PLL, |
468 | .find_reduced_pll = intel_find_best_reduced_PLL, | ||
469 | }; | 513 | }; |
470 | 514 | ||
471 | static const intel_limit_t intel_limits_pineview_lvds = { | 515 | static const intel_limit_t intel_limits_pineview_lvds = { |
@@ -481,46 +525,135 @@ static const intel_limit_t intel_limits_pineview_lvds = { | |||
481 | .p2 = { .dot_limit = I9XX_P2_LVDS_SLOW_LIMIT, | 525 | .p2 = { .dot_limit = I9XX_P2_LVDS_SLOW_LIMIT, |
482 | .p2_slow = I9XX_P2_LVDS_SLOW, .p2_fast = I9XX_P2_LVDS_SLOW }, | 526 | .p2_slow = I9XX_P2_LVDS_SLOW, .p2_fast = I9XX_P2_LVDS_SLOW }, |
483 | .find_pll = intel_find_best_PLL, | 527 | .find_pll = intel_find_best_PLL, |
484 | .find_reduced_pll = intel_find_best_reduced_PLL, | ||
485 | }; | 528 | }; |
486 | 529 | ||
487 | static const intel_limit_t intel_limits_ironlake_sdvo = { | 530 | static const intel_limit_t intel_limits_ironlake_dac = { |
531 | .dot = { .min = IRONLAKE_DOT_MIN, .max = IRONLAKE_DOT_MAX }, | ||
532 | .vco = { .min = IRONLAKE_VCO_MIN, .max = IRONLAKE_VCO_MAX }, | ||
533 | .n = { .min = IRONLAKE_DAC_N_MIN, .max = IRONLAKE_DAC_N_MAX }, | ||
534 | .m = { .min = IRONLAKE_DAC_M_MIN, .max = IRONLAKE_DAC_M_MAX }, | ||
535 | .m1 = { .min = IRONLAKE_M1_MIN, .max = IRONLAKE_M1_MAX }, | ||
536 | .m2 = { .min = IRONLAKE_M2_MIN, .max = IRONLAKE_M2_MAX }, | ||
537 | .p = { .min = IRONLAKE_DAC_P_MIN, .max = IRONLAKE_DAC_P_MAX }, | ||
538 | .p1 = { .min = IRONLAKE_DAC_P1_MIN, .max = IRONLAKE_DAC_P1_MAX }, | ||
539 | .p2 = { .dot_limit = IRONLAKE_P2_DOT_LIMIT, | ||
540 | .p2_slow = IRONLAKE_DAC_P2_SLOW, | ||
541 | .p2_fast = IRONLAKE_DAC_P2_FAST }, | ||
542 | .find_pll = intel_g4x_find_best_PLL, | ||
543 | }; | ||
544 | |||
545 | static const intel_limit_t intel_limits_ironlake_single_lvds = { | ||
546 | .dot = { .min = IRONLAKE_DOT_MIN, .max = IRONLAKE_DOT_MAX }, | ||
547 | .vco = { .min = IRONLAKE_VCO_MIN, .max = IRONLAKE_VCO_MAX }, | ||
548 | .n = { .min = IRONLAKE_LVDS_S_N_MIN, .max = IRONLAKE_LVDS_S_N_MAX }, | ||
549 | .m = { .min = IRONLAKE_LVDS_S_M_MIN, .max = IRONLAKE_LVDS_S_M_MAX }, | ||
550 | .m1 = { .min = IRONLAKE_M1_MIN, .max = IRONLAKE_M1_MAX }, | ||
551 | .m2 = { .min = IRONLAKE_M2_MIN, .max = IRONLAKE_M2_MAX }, | ||
552 | .p = { .min = IRONLAKE_LVDS_S_P_MIN, .max = IRONLAKE_LVDS_S_P_MAX }, | ||
553 | .p1 = { .min = IRONLAKE_LVDS_S_P1_MIN, .max = IRONLAKE_LVDS_S_P1_MAX }, | ||
554 | .p2 = { .dot_limit = IRONLAKE_P2_DOT_LIMIT, | ||
555 | .p2_slow = IRONLAKE_LVDS_S_P2_SLOW, | ||
556 | .p2_fast = IRONLAKE_LVDS_S_P2_FAST }, | ||
557 | .find_pll = intel_g4x_find_best_PLL, | ||
558 | }; | ||
559 | |||
560 | static const intel_limit_t intel_limits_ironlake_dual_lvds = { | ||
561 | .dot = { .min = IRONLAKE_DOT_MIN, .max = IRONLAKE_DOT_MAX }, | ||
562 | .vco = { .min = IRONLAKE_VCO_MIN, .max = IRONLAKE_VCO_MAX }, | ||
563 | .n = { .min = IRONLAKE_LVDS_D_N_MIN, .max = IRONLAKE_LVDS_D_N_MAX }, | ||
564 | .m = { .min = IRONLAKE_LVDS_D_M_MIN, .max = IRONLAKE_LVDS_D_M_MAX }, | ||
565 | .m1 = { .min = IRONLAKE_M1_MIN, .max = IRONLAKE_M1_MAX }, | ||
566 | .m2 = { .min = IRONLAKE_M2_MIN, .max = IRONLAKE_M2_MAX }, | ||
567 | .p = { .min = IRONLAKE_LVDS_D_P_MIN, .max = IRONLAKE_LVDS_D_P_MAX }, | ||
568 | .p1 = { .min = IRONLAKE_LVDS_D_P1_MIN, .max = IRONLAKE_LVDS_D_P1_MAX }, | ||
569 | .p2 = { .dot_limit = IRONLAKE_P2_DOT_LIMIT, | ||
570 | .p2_slow = IRONLAKE_LVDS_D_P2_SLOW, | ||
571 | .p2_fast = IRONLAKE_LVDS_D_P2_FAST }, | ||
572 | .find_pll = intel_g4x_find_best_PLL, | ||
573 | }; | ||
574 | |||
575 | static const intel_limit_t intel_limits_ironlake_single_lvds_100m = { | ||
488 | .dot = { .min = IRONLAKE_DOT_MIN, .max = IRONLAKE_DOT_MAX }, | 576 | .dot = { .min = IRONLAKE_DOT_MIN, .max = IRONLAKE_DOT_MAX }, |
489 | .vco = { .min = IRONLAKE_VCO_MIN, .max = IRONLAKE_VCO_MAX }, | 577 | .vco = { .min = IRONLAKE_VCO_MIN, .max = IRONLAKE_VCO_MAX }, |
490 | .n = { .min = IRONLAKE_N_MIN, .max = IRONLAKE_N_MAX }, | 578 | .n = { .min = IRONLAKE_LVDS_S_SSC_N_MIN, .max = IRONLAKE_LVDS_S_SSC_N_MAX }, |
491 | .m = { .min = IRONLAKE_M_MIN, .max = IRONLAKE_M_MAX }, | 579 | .m = { .min = IRONLAKE_LVDS_S_SSC_M_MIN, .max = IRONLAKE_LVDS_S_SSC_M_MAX }, |
492 | .m1 = { .min = IRONLAKE_M1_MIN, .max = IRONLAKE_M1_MAX }, | 580 | .m1 = { .min = IRONLAKE_M1_MIN, .max = IRONLAKE_M1_MAX }, |
493 | .m2 = { .min = IRONLAKE_M2_MIN, .max = IRONLAKE_M2_MAX }, | 581 | .m2 = { .min = IRONLAKE_M2_MIN, .max = IRONLAKE_M2_MAX }, |
494 | .p = { .min = IRONLAKE_P_SDVO_DAC_MIN, .max = IRONLAKE_P_SDVO_DAC_MAX }, | 582 | .p = { .min = IRONLAKE_LVDS_S_SSC_P_MIN, .max = IRONLAKE_LVDS_S_SSC_P_MAX }, |
495 | .p1 = { .min = IRONLAKE_P1_MIN, .max = IRONLAKE_P1_MAX }, | 583 | .p1 = { .min = IRONLAKE_LVDS_S_SSC_P1_MIN,.max = IRONLAKE_LVDS_S_SSC_P1_MAX }, |
496 | .p2 = { .dot_limit = IRONLAKE_P2_DOT_LIMIT, | 584 | .p2 = { .dot_limit = IRONLAKE_P2_DOT_LIMIT, |
497 | .p2_slow = IRONLAKE_P2_SDVO_DAC_SLOW, | 585 | .p2_slow = IRONLAKE_LVDS_S_SSC_P2_SLOW, |
498 | .p2_fast = IRONLAKE_P2_SDVO_DAC_FAST }, | 586 | .p2_fast = IRONLAKE_LVDS_S_SSC_P2_FAST }, |
499 | .find_pll = intel_ironlake_find_best_PLL, | 587 | .find_pll = intel_g4x_find_best_PLL, |
500 | }; | 588 | }; |
501 | 589 | ||
502 | static const intel_limit_t intel_limits_ironlake_lvds = { | 590 | static const intel_limit_t intel_limits_ironlake_dual_lvds_100m = { |
503 | .dot = { .min = IRONLAKE_DOT_MIN, .max = IRONLAKE_DOT_MAX }, | 591 | .dot = { .min = IRONLAKE_DOT_MIN, .max = IRONLAKE_DOT_MAX }, |
504 | .vco = { .min = IRONLAKE_VCO_MIN, .max = IRONLAKE_VCO_MAX }, | 592 | .vco = { .min = IRONLAKE_VCO_MIN, .max = IRONLAKE_VCO_MAX }, |
505 | .n = { .min = IRONLAKE_N_MIN, .max = IRONLAKE_N_MAX }, | 593 | .n = { .min = IRONLAKE_LVDS_D_SSC_N_MIN, .max = IRONLAKE_LVDS_D_SSC_N_MAX }, |
506 | .m = { .min = IRONLAKE_M_MIN, .max = IRONLAKE_M_MAX }, | 594 | .m = { .min = IRONLAKE_LVDS_D_SSC_M_MIN, .max = IRONLAKE_LVDS_D_SSC_M_MAX }, |
507 | .m1 = { .min = IRONLAKE_M1_MIN, .max = IRONLAKE_M1_MAX }, | 595 | .m1 = { .min = IRONLAKE_M1_MIN, .max = IRONLAKE_M1_MAX }, |
508 | .m2 = { .min = IRONLAKE_M2_MIN, .max = IRONLAKE_M2_MAX }, | 596 | .m2 = { .min = IRONLAKE_M2_MIN, .max = IRONLAKE_M2_MAX }, |
509 | .p = { .min = IRONLAKE_P_LVDS_MIN, .max = IRONLAKE_P_LVDS_MAX }, | 597 | .p = { .min = IRONLAKE_LVDS_D_SSC_P_MIN, .max = IRONLAKE_LVDS_D_SSC_P_MAX }, |
510 | .p1 = { .min = IRONLAKE_P1_MIN, .max = IRONLAKE_P1_MAX }, | 598 | .p1 = { .min = IRONLAKE_LVDS_D_SSC_P1_MIN,.max = IRONLAKE_LVDS_D_SSC_P1_MAX }, |
511 | .p2 = { .dot_limit = IRONLAKE_P2_DOT_LIMIT, | 599 | .p2 = { .dot_limit = IRONLAKE_P2_DOT_LIMIT, |
512 | .p2_slow = IRONLAKE_P2_LVDS_SLOW, | 600 | .p2_slow = IRONLAKE_LVDS_D_SSC_P2_SLOW, |
513 | .p2_fast = IRONLAKE_P2_LVDS_FAST }, | 601 | .p2_fast = IRONLAKE_LVDS_D_SSC_P2_FAST }, |
514 | .find_pll = intel_ironlake_find_best_PLL, | 602 | .find_pll = intel_g4x_find_best_PLL, |
603 | }; | ||
604 | |||
605 | static const intel_limit_t intel_limits_ironlake_display_port = { | ||
606 | .dot = { .min = IRONLAKE_DOT_MIN, | ||
607 | .max = IRONLAKE_DOT_MAX }, | ||
608 | .vco = { .min = IRONLAKE_VCO_MIN, | ||
609 | .max = IRONLAKE_VCO_MAX}, | ||
610 | .n = { .min = IRONLAKE_DP_N_MIN, | ||
611 | .max = IRONLAKE_DP_N_MAX }, | ||
612 | .m = { .min = IRONLAKE_DP_M_MIN, | ||
613 | .max = IRONLAKE_DP_M_MAX }, | ||
614 | .m1 = { .min = IRONLAKE_M1_MIN, | ||
615 | .max = IRONLAKE_M1_MAX }, | ||
616 | .m2 = { .min = IRONLAKE_M2_MIN, | ||
617 | .max = IRONLAKE_M2_MAX }, | ||
618 | .p = { .min = IRONLAKE_DP_P_MIN, | ||
619 | .max = IRONLAKE_DP_P_MAX }, | ||
620 | .p1 = { .min = IRONLAKE_DP_P1_MIN, | ||
621 | .max = IRONLAKE_DP_P1_MAX}, | ||
622 | .p2 = { .dot_limit = IRONLAKE_DP_P2_LIMIT, | ||
623 | .p2_slow = IRONLAKE_DP_P2_SLOW, | ||
624 | .p2_fast = IRONLAKE_DP_P2_FAST }, | ||
625 | .find_pll = intel_find_pll_ironlake_dp, | ||
515 | }; | 626 | }; |
516 | 627 | ||
517 | static const intel_limit_t *intel_ironlake_limit(struct drm_crtc *crtc) | 628 | static const intel_limit_t *intel_ironlake_limit(struct drm_crtc *crtc) |
518 | { | 629 | { |
630 | struct drm_device *dev = crtc->dev; | ||
631 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
519 | const intel_limit_t *limit; | 632 | const intel_limit_t *limit; |
520 | if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) | 633 | int refclk = 120; |
521 | limit = &intel_limits_ironlake_lvds; | 634 | |
635 | if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) { | ||
636 | if (dev_priv->lvds_use_ssc && dev_priv->lvds_ssc_freq == 100) | ||
637 | refclk = 100; | ||
638 | |||
639 | if ((I915_READ(PCH_LVDS) & LVDS_CLKB_POWER_MASK) == | ||
640 | LVDS_CLKB_POWER_UP) { | ||
641 | /* LVDS dual channel */ | ||
642 | if (refclk == 100) | ||
643 | limit = &intel_limits_ironlake_dual_lvds_100m; | ||
644 | else | ||
645 | limit = &intel_limits_ironlake_dual_lvds; | ||
646 | } else { | ||
647 | if (refclk == 100) | ||
648 | limit = &intel_limits_ironlake_single_lvds_100m; | ||
649 | else | ||
650 | limit = &intel_limits_ironlake_single_lvds; | ||
651 | } | ||
652 | } else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT) || | ||
653 | HAS_eDP) | ||
654 | limit = &intel_limits_ironlake_display_port; | ||
522 | else | 655 | else |
523 | limit = &intel_limits_ironlake_sdvo; | 656 | limit = &intel_limits_ironlake_dac; |
524 | 657 | ||
525 | return limit; | 658 | return limit; |
526 | } | 659 | } |
@@ -737,46 +870,6 @@ intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc, | |||
737 | return (err != target); | 870 | return (err != target); |
738 | } | 871 | } |
739 | 872 | ||
740 | |||
741 | static bool | ||
742 | intel_find_best_reduced_PLL(const intel_limit_t *limit, struct drm_crtc *crtc, | ||
743 | int target, int refclk, intel_clock_t *best_clock) | ||
744 | |||
745 | { | ||
746 | struct drm_device *dev = crtc->dev; | ||
747 | intel_clock_t clock; | ||
748 | int err = target; | ||
749 | bool found = false; | ||
750 | |||
751 | memcpy(&clock, best_clock, sizeof(intel_clock_t)); | ||
752 | |||
753 | for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max; clock.m1++) { | ||
754 | for (clock.m2 = limit->m2.min; clock.m2 <= limit->m2.max; clock.m2++) { | ||
755 | /* m1 is always 0 in Pineview */ | ||
756 | if (clock.m2 >= clock.m1 && !IS_PINEVIEW(dev)) | ||
757 | break; | ||
758 | for (clock.n = limit->n.min; clock.n <= limit->n.max; | ||
759 | clock.n++) { | ||
760 | int this_err; | ||
761 | |||
762 | intel_clock(dev, refclk, &clock); | ||
763 | |||
764 | if (!intel_PLL_is_valid(crtc, &clock)) | ||
765 | continue; | ||
766 | |||
767 | this_err = abs(clock.dot - target); | ||
768 | if (this_err < err) { | ||
769 | *best_clock = clock; | ||
770 | err = this_err; | ||
771 | found = true; | ||
772 | } | ||
773 | } | ||
774 | } | ||
775 | } | ||
776 | |||
777 | return found; | ||
778 | } | ||
779 | |||
780 | static bool | 873 | static bool |
781 | intel_g4x_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc, | 874 | intel_g4x_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc, |
782 | int target, int refclk, intel_clock_t *best_clock) | 875 | int target, int refclk, intel_clock_t *best_clock) |
@@ -791,7 +884,13 @@ intel_g4x_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc, | |||
791 | found = false; | 884 | found = false; |
792 | 885 | ||
793 | if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) { | 886 | if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) { |
794 | if ((I915_READ(LVDS) & LVDS_CLKB_POWER_MASK) == | 887 | int lvds_reg; |
888 | |||
889 | if (IS_IRONLAKE(dev)) | ||
890 | lvds_reg = PCH_LVDS; | ||
891 | else | ||
892 | lvds_reg = LVDS; | ||
893 | if ((I915_READ(lvds_reg) & LVDS_CLKB_POWER_MASK) == | ||
795 | LVDS_CLKB_POWER_UP) | 894 | LVDS_CLKB_POWER_UP) |
796 | clock.p2 = limit->p2.p2_fast; | 895 | clock.p2 = limit->p2.p2_fast; |
797 | else | 896 | else |
@@ -839,6 +938,11 @@ intel_find_pll_ironlake_dp(const intel_limit_t *limit, struct drm_crtc *crtc, | |||
839 | { | 938 | { |
840 | struct drm_device *dev = crtc->dev; | 939 | struct drm_device *dev = crtc->dev; |
841 | intel_clock_t clock; | 940 | intel_clock_t clock; |
941 | |||
942 | /* return directly when it is eDP */ | ||
943 | if (HAS_eDP) | ||
944 | return true; | ||
945 | |||
842 | if (target < 200000) { | 946 | if (target < 200000) { |
843 | clock.n = 1; | 947 | clock.n = 1; |
844 | clock.p1 = 2; | 948 | clock.p1 = 2; |
@@ -857,68 +961,6 @@ intel_find_pll_ironlake_dp(const intel_limit_t *limit, struct drm_crtc *crtc, | |||
857 | return true; | 961 | return true; |
858 | } | 962 | } |
859 | 963 | ||
860 | static bool | ||
861 | intel_ironlake_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc, | ||
862 | int target, int refclk, intel_clock_t *best_clock) | ||
863 | { | ||
864 | struct drm_device *dev = crtc->dev; | ||
865 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
866 | intel_clock_t clock; | ||
867 | int err_most = 47; | ||
868 | int err_min = 10000; | ||
869 | |||
870 | /* eDP has only 2 clock choice, no n/m/p setting */ | ||
871 | if (HAS_eDP) | ||
872 | return true; | ||
873 | |||
874 | if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) | ||
875 | return intel_find_pll_ironlake_dp(limit, crtc, target, | ||
876 | refclk, best_clock); | ||
877 | |||
878 | if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) { | ||
879 | if ((I915_READ(PCH_LVDS) & LVDS_CLKB_POWER_MASK) == | ||
880 | LVDS_CLKB_POWER_UP) | ||
881 | clock.p2 = limit->p2.p2_fast; | ||
882 | else | ||
883 | clock.p2 = limit->p2.p2_slow; | ||
884 | } else { | ||
885 | if (target < limit->p2.dot_limit) | ||
886 | clock.p2 = limit->p2.p2_slow; | ||
887 | else | ||
888 | clock.p2 = limit->p2.p2_fast; | ||
889 | } | ||
890 | |||
891 | memset(best_clock, 0, sizeof(*best_clock)); | ||
892 | for (clock.p1 = limit->p1.max; clock.p1 >= limit->p1.min; clock.p1--) { | ||
893 | /* based on hardware requriment prefer smaller n to precision */ | ||
894 | for (clock.n = limit->n.min; clock.n <= limit->n.max; clock.n++) { | ||
895 | /* based on hardware requirment prefere larger m1,m2 */ | ||
896 | for (clock.m1 = limit->m1.max; | ||
897 | clock.m1 >= limit->m1.min; clock.m1--) { | ||
898 | for (clock.m2 = limit->m2.max; | ||
899 | clock.m2 >= limit->m2.min; clock.m2--) { | ||
900 | int this_err; | ||
901 | |||
902 | intel_clock(dev, refclk, &clock); | ||
903 | if (!intel_PLL_is_valid(crtc, &clock)) | ||
904 | continue; | ||
905 | this_err = abs((10000 - (target*10000/clock.dot))); | ||
906 | if (this_err < err_most) { | ||
907 | *best_clock = clock; | ||
908 | /* found on first matching */ | ||
909 | goto out; | ||
910 | } else if (this_err < err_min) { | ||
911 | *best_clock = clock; | ||
912 | err_min = this_err; | ||
913 | } | ||
914 | } | ||
915 | } | ||
916 | } | ||
917 | } | ||
918 | out: | ||
919 | return true; | ||
920 | } | ||
921 | |||
922 | /* DisplayPort has only two frequencies, 162MHz and 270MHz */ | 964 | /* DisplayPort has only two frequencies, 162MHz and 270MHz */ |
923 | static bool | 965 | static bool |
924 | intel_find_pll_g4x_dp(const intel_limit_t *limit, struct drm_crtc *crtc, | 966 | intel_find_pll_g4x_dp(const intel_limit_t *limit, struct drm_crtc *crtc, |
@@ -989,6 +1031,8 @@ static void i8xx_enable_fbc(struct drm_crtc *crtc, unsigned long interval) | |||
989 | 1031 | ||
990 | /* enable it... */ | 1032 | /* enable it... */ |
991 | fbc_ctl = FBC_CTL_EN | FBC_CTL_PERIODIC; | 1033 | fbc_ctl = FBC_CTL_EN | FBC_CTL_PERIODIC; |
1034 | if (IS_I945GM(dev)) | ||
1035 | fbc_ctl |= FBC_C3_IDLE; /* 945 needs special SR handling */ | ||
992 | fbc_ctl |= (dev_priv->cfb_pitch & 0xff) << FBC_CTL_STRIDE_SHIFT; | 1036 | fbc_ctl |= (dev_priv->cfb_pitch & 0xff) << FBC_CTL_STRIDE_SHIFT; |
993 | fbc_ctl |= (interval & 0x2fff) << FBC_CTL_INTERVAL_SHIFT; | 1037 | fbc_ctl |= (interval & 0x2fff) << FBC_CTL_INTERVAL_SHIFT; |
994 | if (obj_priv->tiling_mode != I915_TILING_NONE) | 1038 | if (obj_priv->tiling_mode != I915_TILING_NONE) |
@@ -1144,25 +1188,30 @@ static void intel_update_fbc(struct drm_crtc *crtc, | |||
1144 | if (intel_fb->obj->size > dev_priv->cfb_size) { | 1188 | if (intel_fb->obj->size > dev_priv->cfb_size) { |
1145 | DRM_DEBUG_KMS("framebuffer too large, disabling " | 1189 | DRM_DEBUG_KMS("framebuffer too large, disabling " |
1146 | "compression\n"); | 1190 | "compression\n"); |
1191 | dev_priv->no_fbc_reason = FBC_STOLEN_TOO_SMALL; | ||
1147 | goto out_disable; | 1192 | goto out_disable; |
1148 | } | 1193 | } |
1149 | if ((mode->flags & DRM_MODE_FLAG_INTERLACE) || | 1194 | if ((mode->flags & DRM_MODE_FLAG_INTERLACE) || |
1150 | (mode->flags & DRM_MODE_FLAG_DBLSCAN)) { | 1195 | (mode->flags & DRM_MODE_FLAG_DBLSCAN)) { |
1151 | DRM_DEBUG_KMS("mode incompatible with compression, " | 1196 | DRM_DEBUG_KMS("mode incompatible with compression, " |
1152 | "disabling\n"); | 1197 | "disabling\n"); |
1198 | dev_priv->no_fbc_reason = FBC_UNSUPPORTED_MODE; | ||
1153 | goto out_disable; | 1199 | goto out_disable; |
1154 | } | 1200 | } |
1155 | if ((mode->hdisplay > 2048) || | 1201 | if ((mode->hdisplay > 2048) || |
1156 | (mode->vdisplay > 1536)) { | 1202 | (mode->vdisplay > 1536)) { |
1157 | DRM_DEBUG_KMS("mode too large for compression, disabling\n"); | 1203 | DRM_DEBUG_KMS("mode too large for compression, disabling\n"); |
1204 | dev_priv->no_fbc_reason = FBC_MODE_TOO_LARGE; | ||
1158 | goto out_disable; | 1205 | goto out_disable; |
1159 | } | 1206 | } |
1160 | if ((IS_I915GM(dev) || IS_I945GM(dev)) && plane != 0) { | 1207 | if ((IS_I915GM(dev) || IS_I945GM(dev)) && plane != 0) { |
1161 | DRM_DEBUG_KMS("plane not 0, disabling compression\n"); | 1208 | DRM_DEBUG_KMS("plane not 0, disabling compression\n"); |
1209 | dev_priv->no_fbc_reason = FBC_BAD_PLANE; | ||
1162 | goto out_disable; | 1210 | goto out_disable; |
1163 | } | 1211 | } |
1164 | if (obj_priv->tiling_mode != I915_TILING_X) { | 1212 | if (obj_priv->tiling_mode != I915_TILING_X) { |
1165 | DRM_DEBUG_KMS("framebuffer not tiled, disabling compression\n"); | 1213 | DRM_DEBUG_KMS("framebuffer not tiled, disabling compression\n"); |
1214 | dev_priv->no_fbc_reason = FBC_NOT_TILED; | ||
1166 | goto out_disable; | 1215 | goto out_disable; |
1167 | } | 1216 | } |
1168 | 1217 | ||
@@ -1282,7 +1331,7 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y, | |||
1282 | return ret; | 1331 | return ret; |
1283 | } | 1332 | } |
1284 | 1333 | ||
1285 | ret = i915_gem_object_set_to_gtt_domain(obj, 1); | 1334 | ret = i915_gem_object_set_to_display_plane(obj); |
1286 | if (ret != 0) { | 1335 | if (ret != 0) { |
1287 | i915_gem_object_unpin(obj); | 1336 | i915_gem_object_unpin(obj); |
1288 | mutex_unlock(&dev->struct_mutex); | 1337 | mutex_unlock(&dev->struct_mutex); |
@@ -1493,6 +1542,10 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode) | |||
1493 | int trans_vsync_reg = (pipe == 0) ? TRANS_VSYNC_A : TRANS_VSYNC_B; | 1542 | int trans_vsync_reg = (pipe == 0) ? TRANS_VSYNC_A : TRANS_VSYNC_B; |
1494 | u32 temp; | 1543 | u32 temp; |
1495 | int tries = 5, j, n; | 1544 | int tries = 5, j, n; |
1545 | u32 pipe_bpc; | ||
1546 | |||
1547 | temp = I915_READ(pipeconf_reg); | ||
1548 | pipe_bpc = temp & PIPE_BPC_MASK; | ||
1496 | 1549 | ||
1497 | /* XXX: When our outputs are all unaware of DPMS modes other than off | 1550 | /* XXX: When our outputs are all unaware of DPMS modes other than off |
1498 | * and on, we should map those modes to DRM_MODE_DPMS_OFF in the CRTC. | 1551 | * and on, we should map those modes to DRM_MODE_DPMS_OFF in the CRTC. |
@@ -1524,6 +1577,12 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode) | |||
1524 | 1577 | ||
1525 | /* enable PCH FDI RX PLL, wait warmup plus DMI latency */ | 1578 | /* enable PCH FDI RX PLL, wait warmup plus DMI latency */ |
1526 | temp = I915_READ(fdi_rx_reg); | 1579 | temp = I915_READ(fdi_rx_reg); |
1580 | /* | ||
1581 | * make the BPC in FDI Rx be consistent with that in | ||
1582 | * pipeconf reg. | ||
1583 | */ | ||
1584 | temp &= ~(0x7 << 16); | ||
1585 | temp |= (pipe_bpc << 11); | ||
1527 | I915_WRITE(fdi_rx_reg, temp | FDI_RX_PLL_ENABLE | | 1586 | I915_WRITE(fdi_rx_reg, temp | FDI_RX_PLL_ENABLE | |
1528 | FDI_SEL_PCDCLK | | 1587 | FDI_SEL_PCDCLK | |
1529 | FDI_DP_PORT_WIDTH_X4); /* default 4 lanes */ | 1588 | FDI_DP_PORT_WIDTH_X4); /* default 4 lanes */ |
@@ -1666,6 +1725,12 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode) | |||
1666 | 1725 | ||
1667 | /* enable PCH transcoder */ | 1726 | /* enable PCH transcoder */ |
1668 | temp = I915_READ(transconf_reg); | 1727 | temp = I915_READ(transconf_reg); |
1728 | /* | ||
1729 | * make the BPC in transcoder be consistent with | ||
1730 | * that in pipeconf reg. | ||
1731 | */ | ||
1732 | temp &= ~PIPE_BPC_MASK; | ||
1733 | temp |= pipe_bpc; | ||
1669 | I915_WRITE(transconf_reg, temp | TRANS_ENABLE); | 1734 | I915_WRITE(transconf_reg, temp | TRANS_ENABLE); |
1670 | I915_READ(transconf_reg); | 1735 | I915_READ(transconf_reg); |
1671 | 1736 | ||
@@ -1697,6 +1762,7 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode) | |||
1697 | case DRM_MODE_DPMS_OFF: | 1762 | case DRM_MODE_DPMS_OFF: |
1698 | DRM_DEBUG_KMS("crtc %d dpms off\n", pipe); | 1763 | DRM_DEBUG_KMS("crtc %d dpms off\n", pipe); |
1699 | 1764 | ||
1765 | drm_vblank_off(dev, pipe); | ||
1700 | /* Disable display plane */ | 1766 | /* Disable display plane */ |
1701 | temp = I915_READ(dspcntr_reg); | 1767 | temp = I915_READ(dspcntr_reg); |
1702 | if ((temp & DISPLAY_PLANE_ENABLE) != 0) { | 1768 | if ((temp & DISPLAY_PLANE_ENABLE) != 0) { |
@@ -1745,6 +1811,9 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode) | |||
1745 | I915_READ(fdi_tx_reg); | 1811 | I915_READ(fdi_tx_reg); |
1746 | 1812 | ||
1747 | temp = I915_READ(fdi_rx_reg); | 1813 | temp = I915_READ(fdi_rx_reg); |
1814 | /* BPC in FDI rx is consistent with that in pipeconf */ | ||
1815 | temp &= ~(0x07 << 16); | ||
1816 | temp |= (pipe_bpc << 11); | ||
1748 | I915_WRITE(fdi_rx_reg, temp & ~FDI_RX_ENABLE); | 1817 | I915_WRITE(fdi_rx_reg, temp & ~FDI_RX_ENABLE); |
1749 | I915_READ(fdi_rx_reg); | 1818 | I915_READ(fdi_rx_reg); |
1750 | 1819 | ||
@@ -1789,7 +1858,12 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode) | |||
1789 | } | 1858 | } |
1790 | } | 1859 | } |
1791 | } | 1860 | } |
1792 | 1861 | temp = I915_READ(transconf_reg); | |
1862 | /* BPC in transcoder is consistent with that in pipeconf */ | ||
1863 | temp &= ~PIPE_BPC_MASK; | ||
1864 | temp |= pipe_bpc; | ||
1865 | I915_WRITE(transconf_reg, temp); | ||
1866 | I915_READ(transconf_reg); | ||
1793 | udelay(100); | 1867 | udelay(100); |
1794 | 1868 | ||
1795 | /* disable PCH DPLL */ | 1869 | /* disable PCH DPLL */ |
@@ -2448,7 +2522,7 @@ static void pineview_enable_cxsr(struct drm_device *dev, unsigned long clock, | |||
2448 | * A value of 5us seems to be a good balance; safe for very low end | 2522 | * A value of 5us seems to be a good balance; safe for very low end |
2449 | * platforms but not overly aggressive on lower latency configs. | 2523 | * platforms but not overly aggressive on lower latency configs. |
2450 | */ | 2524 | */ |
2451 | const static int latency_ns = 5000; | 2525 | static const int latency_ns = 5000; |
2452 | 2526 | ||
2453 | static int i9xx_get_fifo_size(struct drm_device *dev, int plane) | 2527 | static int i9xx_get_fifo_size(struct drm_device *dev, int plane) |
2454 | { | 2528 | { |
@@ -2559,7 +2633,7 @@ static void g4x_update_wm(struct drm_device *dev, int planea_clock, | |||
2559 | /* Calc sr entries for one plane configs */ | 2633 | /* Calc sr entries for one plane configs */ |
2560 | if (sr_hdisplay && (!planea_clock || !planeb_clock)) { | 2634 | if (sr_hdisplay && (!planea_clock || !planeb_clock)) { |
2561 | /* self-refresh has much higher latency */ | 2635 | /* self-refresh has much higher latency */ |
2562 | const static int sr_latency_ns = 12000; | 2636 | static const int sr_latency_ns = 12000; |
2563 | 2637 | ||
2564 | sr_clock = planea_clock ? planea_clock : planeb_clock; | 2638 | sr_clock = planea_clock ? planea_clock : planeb_clock; |
2565 | line_time_us = ((sr_hdisplay * 1000) / sr_clock); | 2639 | line_time_us = ((sr_hdisplay * 1000) / sr_clock); |
@@ -2570,6 +2644,10 @@ static void g4x_update_wm(struct drm_device *dev, int planea_clock, | |||
2570 | sr_entries = roundup(sr_entries / cacheline_size, 1); | 2644 | sr_entries = roundup(sr_entries / cacheline_size, 1); |
2571 | DRM_DEBUG("self-refresh entries: %d\n", sr_entries); | 2645 | DRM_DEBUG("self-refresh entries: %d\n", sr_entries); |
2572 | I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN); | 2646 | I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN); |
2647 | } else { | ||
2648 | /* Turn off self refresh if both pipes are enabled */ | ||
2649 | I915_WRITE(FW_BLC_SELF, I915_READ(FW_BLC_SELF) | ||
2650 | & ~FW_BLC_SELF_EN); | ||
2573 | } | 2651 | } |
2574 | 2652 | ||
2575 | DRM_DEBUG("Setting FIFO watermarks - A: %d, B: %d, SR %d\n", | 2653 | DRM_DEBUG("Setting FIFO watermarks - A: %d, B: %d, SR %d\n", |
@@ -2598,7 +2676,7 @@ static void i965_update_wm(struct drm_device *dev, int planea_clock, | |||
2598 | /* Calc sr entries for one plane configs */ | 2676 | /* Calc sr entries for one plane configs */ |
2599 | if (sr_hdisplay && (!planea_clock || !planeb_clock)) { | 2677 | if (sr_hdisplay && (!planea_clock || !planeb_clock)) { |
2600 | /* self-refresh has much higher latency */ | 2678 | /* self-refresh has much higher latency */ |
2601 | const static int sr_latency_ns = 12000; | 2679 | static const int sr_latency_ns = 12000; |
2602 | 2680 | ||
2603 | sr_clock = planea_clock ? planea_clock : planeb_clock; | 2681 | sr_clock = planea_clock ? planea_clock : planeb_clock; |
2604 | line_time_us = ((sr_hdisplay * 1000) / sr_clock); | 2682 | line_time_us = ((sr_hdisplay * 1000) / sr_clock); |
@@ -2613,6 +2691,10 @@ static void i965_update_wm(struct drm_device *dev, int planea_clock, | |||
2613 | srwm = 1; | 2691 | srwm = 1; |
2614 | srwm &= 0x3f; | 2692 | srwm &= 0x3f; |
2615 | I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN); | 2693 | I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN); |
2694 | } else { | ||
2695 | /* Turn off self refresh if both pipes are enabled */ | ||
2696 | I915_WRITE(FW_BLC_SELF, I915_READ(FW_BLC_SELF) | ||
2697 | & ~FW_BLC_SELF_EN); | ||
2616 | } | 2698 | } |
2617 | 2699 | ||
2618 | DRM_DEBUG_KMS("Setting FIFO watermarks - A: 8, B: 8, C: 8, SR %d\n", | 2700 | DRM_DEBUG_KMS("Setting FIFO watermarks - A: 8, B: 8, C: 8, SR %d\n", |
@@ -2667,7 +2749,7 @@ static void i9xx_update_wm(struct drm_device *dev, int planea_clock, | |||
2667 | if (HAS_FW_BLC(dev) && sr_hdisplay && | 2749 | if (HAS_FW_BLC(dev) && sr_hdisplay && |
2668 | (!planea_clock || !planeb_clock)) { | 2750 | (!planea_clock || !planeb_clock)) { |
2669 | /* self-refresh has much higher latency */ | 2751 | /* self-refresh has much higher latency */ |
2670 | const static int sr_latency_ns = 6000; | 2752 | static const int sr_latency_ns = 6000; |
2671 | 2753 | ||
2672 | sr_clock = planea_clock ? planea_clock : planeb_clock; | 2754 | sr_clock = planea_clock ? planea_clock : planeb_clock; |
2673 | line_time_us = ((sr_hdisplay * 1000) / sr_clock); | 2755 | line_time_us = ((sr_hdisplay * 1000) / sr_clock); |
@@ -2680,7 +2762,22 @@ static void i9xx_update_wm(struct drm_device *dev, int planea_clock, | |||
2680 | srwm = total_size - sr_entries; | 2762 | srwm = total_size - sr_entries; |
2681 | if (srwm < 0) | 2763 | if (srwm < 0) |
2682 | srwm = 1; | 2764 | srwm = 1; |
2683 | I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN | (srwm & 0x3f)); | 2765 | |
2766 | if (IS_I945G(dev) || IS_I945GM(dev)) | ||
2767 | I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_FIFO_MASK | (srwm & 0xff)); | ||
2768 | else if (IS_I915GM(dev)) { | ||
2769 | /* 915M has a smaller SRWM field */ | ||
2770 | I915_WRITE(FW_BLC_SELF, srwm & 0x3f); | ||
2771 | I915_WRITE(INSTPM, I915_READ(INSTPM) | INSTPM_SELF_EN); | ||
2772 | } | ||
2773 | } else { | ||
2774 | /* Turn off self refresh if both pipes are enabled */ | ||
2775 | if (IS_I945G(dev) || IS_I945GM(dev)) { | ||
2776 | I915_WRITE(FW_BLC_SELF, I915_READ(FW_BLC_SELF) | ||
2777 | & ~FW_BLC_SELF_EN); | ||
2778 | } else if (IS_I915GM(dev)) { | ||
2779 | I915_WRITE(INSTPM, I915_READ(INSTPM) & ~INSTPM_SELF_EN); | ||
2780 | } | ||
2684 | } | 2781 | } |
2685 | 2782 | ||
2686 | DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d, B: %d, C: %d, SR %d\n", | 2783 | DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d, B: %d, C: %d, SR %d\n", |
@@ -2906,10 +3003,8 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, | |||
2906 | return -EINVAL; | 3003 | return -EINVAL; |
2907 | } | 3004 | } |
2908 | 3005 | ||
2909 | if (is_lvds && limit->find_reduced_pll && | 3006 | if (is_lvds && dev_priv->lvds_downclock_avail) { |
2910 | dev_priv->lvds_downclock_avail) { | 3007 | has_reduced_clock = limit->find_pll(limit, crtc, |
2911 | memcpy(&reduced_clock, &clock, sizeof(intel_clock_t)); | ||
2912 | has_reduced_clock = limit->find_reduced_pll(limit, crtc, | ||
2913 | dev_priv->lvds_downclock, | 3008 | dev_priv->lvds_downclock, |
2914 | refclk, | 3009 | refclk, |
2915 | &reduced_clock); | 3010 | &reduced_clock); |
@@ -2969,6 +3064,33 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, | |||
2969 | 3064 | ||
2970 | /* determine panel color depth */ | 3065 | /* determine panel color depth */ |
2971 | temp = I915_READ(pipeconf_reg); | 3066 | temp = I915_READ(pipeconf_reg); |
3067 | temp &= ~PIPE_BPC_MASK; | ||
3068 | if (is_lvds) { | ||
3069 | int lvds_reg = I915_READ(PCH_LVDS); | ||
3070 | /* the BPC will be 6 if it is 18-bit LVDS panel */ | ||
3071 | if ((lvds_reg & LVDS_A3_POWER_MASK) == LVDS_A3_POWER_UP) | ||
3072 | temp |= PIPE_8BPC; | ||
3073 | else | ||
3074 | temp |= PIPE_6BPC; | ||
3075 | } else if (is_edp) { | ||
3076 | switch (dev_priv->edp_bpp/3) { | ||
3077 | case 8: | ||
3078 | temp |= PIPE_8BPC; | ||
3079 | break; | ||
3080 | case 10: | ||
3081 | temp |= PIPE_10BPC; | ||
3082 | break; | ||
3083 | case 6: | ||
3084 | temp |= PIPE_6BPC; | ||
3085 | break; | ||
3086 | case 12: | ||
3087 | temp |= PIPE_12BPC; | ||
3088 | break; | ||
3089 | } | ||
3090 | } else | ||
3091 | temp |= PIPE_8BPC; | ||
3092 | I915_WRITE(pipeconf_reg, temp); | ||
3093 | I915_READ(pipeconf_reg); | ||
2972 | 3094 | ||
2973 | switch (temp & PIPE_BPC_MASK) { | 3095 | switch (temp & PIPE_BPC_MASK) { |
2974 | case PIPE_8BPC: | 3096 | case PIPE_8BPC: |
@@ -3195,7 +3317,20 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, | |||
3195 | * appropriately here, but we need to look more thoroughly into how | 3317 | * appropriately here, but we need to look more thoroughly into how |
3196 | * panels behave in the two modes. | 3318 | * panels behave in the two modes. |
3197 | */ | 3319 | */ |
3198 | 3320 | /* set the dithering flag */ | |
3321 | if (IS_I965G(dev)) { | ||
3322 | if (dev_priv->lvds_dither) { | ||
3323 | if (IS_IRONLAKE(dev)) | ||
3324 | pipeconf |= PIPE_ENABLE_DITHER; | ||
3325 | else | ||
3326 | lvds |= LVDS_ENABLE_DITHER; | ||
3327 | } else { | ||
3328 | if (IS_IRONLAKE(dev)) | ||
3329 | pipeconf &= ~PIPE_ENABLE_DITHER; | ||
3330 | else | ||
3331 | lvds &= ~LVDS_ENABLE_DITHER; | ||
3332 | } | ||
3333 | } | ||
3199 | I915_WRITE(lvds_reg, lvds); | 3334 | I915_WRITE(lvds_reg, lvds); |
3200 | I915_READ(lvds_reg); | 3335 | I915_READ(lvds_reg); |
3201 | } | 3336 | } |
@@ -3385,7 +3520,7 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc, | |||
3385 | 3520 | ||
3386 | /* we only need to pin inside GTT if cursor is non-phy */ | 3521 | /* we only need to pin inside GTT if cursor is non-phy */ |
3387 | mutex_lock(&dev->struct_mutex); | 3522 | mutex_lock(&dev->struct_mutex); |
3388 | if (!dev_priv->cursor_needs_physical) { | 3523 | if (!dev_priv->info->cursor_needs_physical) { |
3389 | ret = i915_gem_object_pin(bo, PAGE_SIZE); | 3524 | ret = i915_gem_object_pin(bo, PAGE_SIZE); |
3390 | if (ret) { | 3525 | if (ret) { |
3391 | DRM_ERROR("failed to pin cursor bo\n"); | 3526 | DRM_ERROR("failed to pin cursor bo\n"); |
@@ -3420,7 +3555,7 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc, | |||
3420 | I915_WRITE(base, addr); | 3555 | I915_WRITE(base, addr); |
3421 | 3556 | ||
3422 | if (intel_crtc->cursor_bo) { | 3557 | if (intel_crtc->cursor_bo) { |
3423 | if (dev_priv->cursor_needs_physical) { | 3558 | if (dev_priv->info->cursor_needs_physical) { |
3424 | if (intel_crtc->cursor_bo != bo) | 3559 | if (intel_crtc->cursor_bo != bo) |
3425 | i915_gem_detach_phys_object(dev, intel_crtc->cursor_bo); | 3560 | i915_gem_detach_phys_object(dev, intel_crtc->cursor_bo); |
3426 | } else | 3561 | } else |
@@ -3434,11 +3569,10 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc, | |||
3434 | intel_crtc->cursor_bo = bo; | 3569 | intel_crtc->cursor_bo = bo; |
3435 | 3570 | ||
3436 | return 0; | 3571 | return 0; |
3437 | fail: | ||
3438 | mutex_lock(&dev->struct_mutex); | ||
3439 | fail_locked: | 3572 | fail_locked: |
3440 | drm_gem_object_unreference(bo); | ||
3441 | mutex_unlock(&dev->struct_mutex); | 3573 | mutex_unlock(&dev->struct_mutex); |
3574 | fail: | ||
3575 | drm_gem_object_unreference_unlocked(bo); | ||
3442 | return ret; | 3576 | return ret; |
3443 | } | 3577 | } |
3444 | 3578 | ||
@@ -3779,125 +3913,6 @@ static void intel_gpu_idle_timer(unsigned long arg) | |||
3779 | queue_work(dev_priv->wq, &dev_priv->idle_work); | 3913 | queue_work(dev_priv->wq, &dev_priv->idle_work); |
3780 | } | 3914 | } |
3781 | 3915 | ||
3782 | void intel_increase_renderclock(struct drm_device *dev, bool schedule) | ||
3783 | { | ||
3784 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
3785 | |||
3786 | if (IS_IRONLAKE(dev)) | ||
3787 | return; | ||
3788 | |||
3789 | if (!dev_priv->render_reclock_avail) { | ||
3790 | DRM_DEBUG_DRIVER("not reclocking render clock\n"); | ||
3791 | return; | ||
3792 | } | ||
3793 | |||
3794 | /* Restore render clock frequency to original value */ | ||
3795 | if (IS_G4X(dev) || IS_I9XX(dev)) | ||
3796 | pci_write_config_word(dev->pdev, GCFGC, dev_priv->orig_clock); | ||
3797 | else if (IS_I85X(dev)) | ||
3798 | pci_write_config_word(dev->pdev, HPLLCC, dev_priv->orig_clock); | ||
3799 | DRM_DEBUG_DRIVER("increasing render clock frequency\n"); | ||
3800 | |||
3801 | /* Schedule downclock */ | ||
3802 | if (schedule) | ||
3803 | mod_timer(&dev_priv->idle_timer, jiffies + | ||
3804 | msecs_to_jiffies(GPU_IDLE_TIMEOUT)); | ||
3805 | } | ||
3806 | |||
3807 | void intel_decrease_renderclock(struct drm_device *dev) | ||
3808 | { | ||
3809 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
3810 | |||
3811 | if (IS_IRONLAKE(dev)) | ||
3812 | return; | ||
3813 | |||
3814 | if (!dev_priv->render_reclock_avail) { | ||
3815 | DRM_DEBUG_DRIVER("not reclocking render clock\n"); | ||
3816 | return; | ||
3817 | } | ||
3818 | |||
3819 | if (IS_G4X(dev)) { | ||
3820 | u16 gcfgc; | ||
3821 | |||
3822 | /* Adjust render clock... */ | ||
3823 | pci_read_config_word(dev->pdev, GCFGC, &gcfgc); | ||
3824 | |||
3825 | /* Down to minimum... */ | ||
3826 | gcfgc &= ~GM45_GC_RENDER_CLOCK_MASK; | ||
3827 | gcfgc |= GM45_GC_RENDER_CLOCK_266_MHZ; | ||
3828 | |||
3829 | pci_write_config_word(dev->pdev, GCFGC, gcfgc); | ||
3830 | } else if (IS_I965G(dev)) { | ||
3831 | u16 gcfgc; | ||
3832 | |||
3833 | /* Adjust render clock... */ | ||
3834 | pci_read_config_word(dev->pdev, GCFGC, &gcfgc); | ||
3835 | |||
3836 | /* Down to minimum... */ | ||
3837 | gcfgc &= ~I965_GC_RENDER_CLOCK_MASK; | ||
3838 | gcfgc |= I965_GC_RENDER_CLOCK_267_MHZ; | ||
3839 | |||
3840 | pci_write_config_word(dev->pdev, GCFGC, gcfgc); | ||
3841 | } else if (IS_I945G(dev) || IS_I945GM(dev)) { | ||
3842 | u16 gcfgc; | ||
3843 | |||
3844 | /* Adjust render clock... */ | ||
3845 | pci_read_config_word(dev->pdev, GCFGC, &gcfgc); | ||
3846 | |||
3847 | /* Down to minimum... */ | ||
3848 | gcfgc &= ~I945_GC_RENDER_CLOCK_MASK; | ||
3849 | gcfgc |= I945_GC_RENDER_CLOCK_166_MHZ; | ||
3850 | |||
3851 | pci_write_config_word(dev->pdev, GCFGC, gcfgc); | ||
3852 | } else if (IS_I915G(dev)) { | ||
3853 | u16 gcfgc; | ||
3854 | |||
3855 | /* Adjust render clock... */ | ||
3856 | pci_read_config_word(dev->pdev, GCFGC, &gcfgc); | ||
3857 | |||
3858 | /* Down to minimum... */ | ||
3859 | gcfgc &= ~I915_GC_RENDER_CLOCK_MASK; | ||
3860 | gcfgc |= I915_GC_RENDER_CLOCK_166_MHZ; | ||
3861 | |||
3862 | pci_write_config_word(dev->pdev, GCFGC, gcfgc); | ||
3863 | } else if (IS_I85X(dev)) { | ||
3864 | u16 hpllcc; | ||
3865 | |||
3866 | /* Adjust render clock... */ | ||
3867 | pci_read_config_word(dev->pdev, HPLLCC, &hpllcc); | ||
3868 | |||
3869 | /* Up to maximum... */ | ||
3870 | hpllcc &= ~GC_CLOCK_CONTROL_MASK; | ||
3871 | hpllcc |= GC_CLOCK_133_200; | ||
3872 | |||
3873 | pci_write_config_word(dev->pdev, HPLLCC, hpllcc); | ||
3874 | } | ||
3875 | DRM_DEBUG_DRIVER("decreasing render clock frequency\n"); | ||
3876 | } | ||
3877 | |||
3878 | /* Note that no increase function is needed for this - increase_renderclock() | ||
3879 | * will also rewrite these bits | ||
3880 | */ | ||
3881 | void intel_decrease_displayclock(struct drm_device *dev) | ||
3882 | { | ||
3883 | if (IS_IRONLAKE(dev)) | ||
3884 | return; | ||
3885 | |||
3886 | if (IS_I945G(dev) || IS_I945GM(dev) || IS_I915G(dev) || | ||
3887 | IS_I915GM(dev)) { | ||
3888 | u16 gcfgc; | ||
3889 | |||
3890 | /* Adjust render clock... */ | ||
3891 | pci_read_config_word(dev->pdev, GCFGC, &gcfgc); | ||
3892 | |||
3893 | /* Down to minimum... */ | ||
3894 | gcfgc &= ~0xf0; | ||
3895 | gcfgc |= 0x80; | ||
3896 | |||
3897 | pci_write_config_word(dev->pdev, GCFGC, gcfgc); | ||
3898 | } | ||
3899 | } | ||
3900 | |||
3901 | #define CRTC_IDLE_TIMEOUT 1000 /* ms */ | 3916 | #define CRTC_IDLE_TIMEOUT 1000 /* ms */ |
3902 | 3917 | ||
3903 | static void intel_crtc_idle_timer(unsigned long arg) | 3918 | static void intel_crtc_idle_timer(unsigned long arg) |
@@ -4011,10 +4026,9 @@ static void intel_idle_update(struct work_struct *work) | |||
4011 | 4026 | ||
4012 | mutex_lock(&dev->struct_mutex); | 4027 | mutex_lock(&dev->struct_mutex); |
4013 | 4028 | ||
4014 | /* GPU isn't processing, downclock it. */ | 4029 | if (IS_I945G(dev) || IS_I945GM(dev)) { |
4015 | if (!dev_priv->busy) { | 4030 | DRM_DEBUG_DRIVER("enable memory self refresh on 945\n"); |
4016 | intel_decrease_renderclock(dev); | 4031 | I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN_MASK | FW_BLC_SELF_EN); |
4017 | intel_decrease_displayclock(dev); | ||
4018 | } | 4032 | } |
4019 | 4033 | ||
4020 | list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { | 4034 | list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { |
@@ -4051,12 +4065,18 @@ void intel_mark_busy(struct drm_device *dev, struct drm_gem_object *obj) | |||
4051 | return; | 4065 | return; |
4052 | 4066 | ||
4053 | if (!dev_priv->busy) { | 4067 | if (!dev_priv->busy) { |
4068 | if (IS_I945G(dev) || IS_I945GM(dev)) { | ||
4069 | u32 fw_blc_self; | ||
4070 | |||
4071 | DRM_DEBUG_DRIVER("disable memory self refresh on 945\n"); | ||
4072 | fw_blc_self = I915_READ(FW_BLC_SELF); | ||
4073 | fw_blc_self &= ~FW_BLC_SELF_EN; | ||
4074 | I915_WRITE(FW_BLC_SELF, fw_blc_self | FW_BLC_SELF_EN_MASK); | ||
4075 | } | ||
4054 | dev_priv->busy = true; | 4076 | dev_priv->busy = true; |
4055 | intel_increase_renderclock(dev, true); | 4077 | } else |
4056 | } else { | ||
4057 | mod_timer(&dev_priv->idle_timer, jiffies + | 4078 | mod_timer(&dev_priv->idle_timer, jiffies + |
4058 | msecs_to_jiffies(GPU_IDLE_TIMEOUT)); | 4079 | msecs_to_jiffies(GPU_IDLE_TIMEOUT)); |
4059 | } | ||
4060 | 4080 | ||
4061 | list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { | 4081 | list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { |
4062 | if (!crtc->fb) | 4082 | if (!crtc->fb) |
@@ -4066,6 +4086,14 @@ void intel_mark_busy(struct drm_device *dev, struct drm_gem_object *obj) | |||
4066 | intel_fb = to_intel_framebuffer(crtc->fb); | 4086 | intel_fb = to_intel_framebuffer(crtc->fb); |
4067 | if (intel_fb->obj == obj) { | 4087 | if (intel_fb->obj == obj) { |
4068 | if (!intel_crtc->busy) { | 4088 | if (!intel_crtc->busy) { |
4089 | if (IS_I945G(dev) || IS_I945GM(dev)) { | ||
4090 | u32 fw_blc_self; | ||
4091 | |||
4092 | DRM_DEBUG_DRIVER("disable memory self refresh on 945\n"); | ||
4093 | fw_blc_self = I915_READ(FW_BLC_SELF); | ||
4094 | fw_blc_self &= ~FW_BLC_SELF_EN; | ||
4095 | I915_WRITE(FW_BLC_SELF, fw_blc_self | FW_BLC_SELF_EN_MASK); | ||
4096 | } | ||
4069 | /* Non-busy -> busy, upclock */ | 4097 | /* Non-busy -> busy, upclock */ |
4070 | intel_increase_pllclock(crtc, true); | 4098 | intel_increase_pllclock(crtc, true); |
4071 | intel_crtc->busy = true; | 4099 | intel_crtc->busy = true; |
@@ -4089,7 +4117,8 @@ static void intel_crtc_destroy(struct drm_crtc *crtc) | |||
4089 | struct intel_unpin_work { | 4117 | struct intel_unpin_work { |
4090 | struct work_struct work; | 4118 | struct work_struct work; |
4091 | struct drm_device *dev; | 4119 | struct drm_device *dev; |
4092 | struct drm_gem_object *obj; | 4120 | struct drm_gem_object *old_fb_obj; |
4121 | struct drm_gem_object *pending_flip_obj; | ||
4093 | struct drm_pending_vblank_event *event; | 4122 | struct drm_pending_vblank_event *event; |
4094 | int pending; | 4123 | int pending; |
4095 | }; | 4124 | }; |
@@ -4100,8 +4129,9 @@ static void intel_unpin_work_fn(struct work_struct *__work) | |||
4100 | container_of(__work, struct intel_unpin_work, work); | 4129 | container_of(__work, struct intel_unpin_work, work); |
4101 | 4130 | ||
4102 | mutex_lock(&work->dev->struct_mutex); | 4131 | mutex_lock(&work->dev->struct_mutex); |
4103 | i915_gem_object_unpin(work->obj); | 4132 | i915_gem_object_unpin(work->old_fb_obj); |
4104 | drm_gem_object_unreference(work->obj); | 4133 | drm_gem_object_unreference(work->pending_flip_obj); |
4134 | drm_gem_object_unreference(work->old_fb_obj); | ||
4105 | mutex_unlock(&work->dev->struct_mutex); | 4135 | mutex_unlock(&work->dev->struct_mutex); |
4106 | kfree(work); | 4136 | kfree(work); |
4107 | } | 4137 | } |
@@ -4124,6 +4154,12 @@ void intel_finish_page_flip(struct drm_device *dev, int pipe) | |||
4124 | spin_lock_irqsave(&dev->event_lock, flags); | 4154 | spin_lock_irqsave(&dev->event_lock, flags); |
4125 | work = intel_crtc->unpin_work; | 4155 | work = intel_crtc->unpin_work; |
4126 | if (work == NULL || !work->pending) { | 4156 | if (work == NULL || !work->pending) { |
4157 | if (work && !work->pending) { | ||
4158 | obj_priv = work->pending_flip_obj->driver_private; | ||
4159 | DRM_DEBUG_DRIVER("flip finish: %p (%d) not pending?\n", | ||
4160 | obj_priv, | ||
4161 | atomic_read(&obj_priv->pending_flip)); | ||
4162 | } | ||
4127 | spin_unlock_irqrestore(&dev->event_lock, flags); | 4163 | spin_unlock_irqrestore(&dev->event_lock, flags); |
4128 | return; | 4164 | return; |
4129 | } | 4165 | } |
@@ -4144,8 +4180,11 @@ void intel_finish_page_flip(struct drm_device *dev, int pipe) | |||
4144 | 4180 | ||
4145 | spin_unlock_irqrestore(&dev->event_lock, flags); | 4181 | spin_unlock_irqrestore(&dev->event_lock, flags); |
4146 | 4182 | ||
4147 | obj_priv = work->obj->driver_private; | 4183 | obj_priv = work->pending_flip_obj->driver_private; |
4148 | if (atomic_dec_and_test(&obj_priv->pending_flip)) | 4184 | |
4185 | /* Initial scanout buffer will have a 0 pending flip count */ | ||
4186 | if ((atomic_read(&obj_priv->pending_flip) == 0) || | ||
4187 | atomic_dec_and_test(&obj_priv->pending_flip)) | ||
4149 | DRM_WAKEUP(&dev_priv->pending_flip_queue); | 4188 | DRM_WAKEUP(&dev_priv->pending_flip_queue); |
4150 | schedule_work(&work->work); | 4189 | schedule_work(&work->work); |
4151 | } | 4190 | } |
@@ -4158,8 +4197,11 @@ void intel_prepare_page_flip(struct drm_device *dev, int plane) | |||
4158 | unsigned long flags; | 4197 | unsigned long flags; |
4159 | 4198 | ||
4160 | spin_lock_irqsave(&dev->event_lock, flags); | 4199 | spin_lock_irqsave(&dev->event_lock, flags); |
4161 | if (intel_crtc->unpin_work) | 4200 | if (intel_crtc->unpin_work) { |
4162 | intel_crtc->unpin_work->pending = 1; | 4201 | intel_crtc->unpin_work->pending = 1; |
4202 | } else { | ||
4203 | DRM_DEBUG_DRIVER("preparing flip with no unpin work?\n"); | ||
4204 | } | ||
4163 | spin_unlock_irqrestore(&dev->event_lock, flags); | 4205 | spin_unlock_irqrestore(&dev->event_lock, flags); |
4164 | } | 4206 | } |
4165 | 4207 | ||
@@ -4175,7 +4217,8 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, | |||
4175 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | 4217 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
4176 | struct intel_unpin_work *work; | 4218 | struct intel_unpin_work *work; |
4177 | unsigned long flags; | 4219 | unsigned long flags; |
4178 | int ret; | 4220 | int pipesrc_reg = (intel_crtc->pipe == 0) ? PIPEASRC : PIPEBSRC; |
4221 | int ret, pipesrc; | ||
4179 | RING_LOCALS; | 4222 | RING_LOCALS; |
4180 | 4223 | ||
4181 | work = kzalloc(sizeof *work, GFP_KERNEL); | 4224 | work = kzalloc(sizeof *work, GFP_KERNEL); |
@@ -4187,12 +4230,13 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, | |||
4187 | work->event = event; | 4230 | work->event = event; |
4188 | work->dev = crtc->dev; | 4231 | work->dev = crtc->dev; |
4189 | intel_fb = to_intel_framebuffer(crtc->fb); | 4232 | intel_fb = to_intel_framebuffer(crtc->fb); |
4190 | work->obj = intel_fb->obj; | 4233 | work->old_fb_obj = intel_fb->obj; |
4191 | INIT_WORK(&work->work, intel_unpin_work_fn); | 4234 | INIT_WORK(&work->work, intel_unpin_work_fn); |
4192 | 4235 | ||
4193 | /* We borrow the event spin lock for protecting unpin_work */ | 4236 | /* We borrow the event spin lock for protecting unpin_work */ |
4194 | spin_lock_irqsave(&dev->event_lock, flags); | 4237 | spin_lock_irqsave(&dev->event_lock, flags); |
4195 | if (intel_crtc->unpin_work) { | 4238 | if (intel_crtc->unpin_work) { |
4239 | DRM_DEBUG_DRIVER("flip queue: crtc already busy\n"); | ||
4196 | spin_unlock_irqrestore(&dev->event_lock, flags); | 4240 | spin_unlock_irqrestore(&dev->event_lock, flags); |
4197 | kfree(work); | 4241 | kfree(work); |
4198 | mutex_unlock(&dev->struct_mutex); | 4242 | mutex_unlock(&dev->struct_mutex); |
@@ -4206,19 +4250,24 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, | |||
4206 | 4250 | ||
4207 | ret = intel_pin_and_fence_fb_obj(dev, obj); | 4251 | ret = intel_pin_and_fence_fb_obj(dev, obj); |
4208 | if (ret != 0) { | 4252 | if (ret != 0) { |
4253 | DRM_DEBUG_DRIVER("flip queue: %p pin & fence failed\n", | ||
4254 | obj->driver_private); | ||
4209 | kfree(work); | 4255 | kfree(work); |
4256 | intel_crtc->unpin_work = NULL; | ||
4210 | mutex_unlock(&dev->struct_mutex); | 4257 | mutex_unlock(&dev->struct_mutex); |
4211 | return ret; | 4258 | return ret; |
4212 | } | 4259 | } |
4213 | 4260 | ||
4214 | /* Reference the old fb object for the scheduled work. */ | 4261 | /* Reference the objects for the scheduled work. */ |
4215 | drm_gem_object_reference(work->obj); | 4262 | drm_gem_object_reference(work->old_fb_obj); |
4263 | drm_gem_object_reference(obj); | ||
4216 | 4264 | ||
4217 | crtc->fb = fb; | 4265 | crtc->fb = fb; |
4218 | i915_gem_object_flush_write_domain(obj); | 4266 | i915_gem_object_flush_write_domain(obj); |
4219 | drm_vblank_get(dev, intel_crtc->pipe); | 4267 | drm_vblank_get(dev, intel_crtc->pipe); |
4220 | obj_priv = obj->driver_private; | 4268 | obj_priv = obj->driver_private; |
4221 | atomic_inc(&obj_priv->pending_flip); | 4269 | atomic_inc(&obj_priv->pending_flip); |
4270 | work->pending_flip_obj = obj; | ||
4222 | 4271 | ||
4223 | BEGIN_LP_RING(4); | 4272 | BEGIN_LP_RING(4); |
4224 | OUT_RING(MI_DISPLAY_FLIP | | 4273 | OUT_RING(MI_DISPLAY_FLIP | |
@@ -4226,7 +4275,8 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, | |||
4226 | OUT_RING(fb->pitch); | 4275 | OUT_RING(fb->pitch); |
4227 | if (IS_I965G(dev)) { | 4276 | if (IS_I965G(dev)) { |
4228 | OUT_RING(obj_priv->gtt_offset | obj_priv->tiling_mode); | 4277 | OUT_RING(obj_priv->gtt_offset | obj_priv->tiling_mode); |
4229 | OUT_RING((fb->width << 16) | fb->height); | 4278 | pipesrc = I915_READ(pipesrc_reg); |
4279 | OUT_RING(pipesrc & 0x0fff0fff); | ||
4230 | } else { | 4280 | } else { |
4231 | OUT_RING(obj_priv->gtt_offset); | 4281 | OUT_RING(obj_priv->gtt_offset); |
4232 | OUT_RING(MI_NOOP); | 4282 | OUT_RING(MI_NOOP); |
@@ -4400,29 +4450,43 @@ static void intel_setup_outputs(struct drm_device *dev) | |||
4400 | bool found = false; | 4450 | bool found = false; |
4401 | 4451 | ||
4402 | if (I915_READ(SDVOB) & SDVO_DETECTED) { | 4452 | if (I915_READ(SDVOB) & SDVO_DETECTED) { |
4453 | DRM_DEBUG_KMS("probing SDVOB\n"); | ||
4403 | found = intel_sdvo_init(dev, SDVOB); | 4454 | found = intel_sdvo_init(dev, SDVOB); |
4404 | if (!found && SUPPORTS_INTEGRATED_HDMI(dev)) | 4455 | if (!found && SUPPORTS_INTEGRATED_HDMI(dev)) { |
4456 | DRM_DEBUG_KMS("probing HDMI on SDVOB\n"); | ||
4405 | intel_hdmi_init(dev, SDVOB); | 4457 | intel_hdmi_init(dev, SDVOB); |
4458 | } | ||
4406 | 4459 | ||
4407 | if (!found && SUPPORTS_INTEGRATED_DP(dev)) | 4460 | if (!found && SUPPORTS_INTEGRATED_DP(dev)) { |
4461 | DRM_DEBUG_KMS("probing DP_B\n"); | ||
4408 | intel_dp_init(dev, DP_B); | 4462 | intel_dp_init(dev, DP_B); |
4463 | } | ||
4409 | } | 4464 | } |
4410 | 4465 | ||
4411 | /* Before G4X SDVOC doesn't have its own detect register */ | 4466 | /* Before G4X SDVOC doesn't have its own detect register */ |
4412 | 4467 | ||
4413 | if (I915_READ(SDVOB) & SDVO_DETECTED) | 4468 | if (I915_READ(SDVOB) & SDVO_DETECTED) { |
4469 | DRM_DEBUG_KMS("probing SDVOC\n"); | ||
4414 | found = intel_sdvo_init(dev, SDVOC); | 4470 | found = intel_sdvo_init(dev, SDVOC); |
4471 | } | ||
4415 | 4472 | ||
4416 | if (!found && (I915_READ(SDVOC) & SDVO_DETECTED)) { | 4473 | if (!found && (I915_READ(SDVOC) & SDVO_DETECTED)) { |
4417 | 4474 | ||
4418 | if (SUPPORTS_INTEGRATED_HDMI(dev)) | 4475 | if (SUPPORTS_INTEGRATED_HDMI(dev)) { |
4476 | DRM_DEBUG_KMS("probing HDMI on SDVOC\n"); | ||
4419 | intel_hdmi_init(dev, SDVOC); | 4477 | intel_hdmi_init(dev, SDVOC); |
4420 | if (SUPPORTS_INTEGRATED_DP(dev)) | 4478 | } |
4479 | if (SUPPORTS_INTEGRATED_DP(dev)) { | ||
4480 | DRM_DEBUG_KMS("probing DP_C\n"); | ||
4421 | intel_dp_init(dev, DP_C); | 4481 | intel_dp_init(dev, DP_C); |
4482 | } | ||
4422 | } | 4483 | } |
4423 | 4484 | ||
4424 | if (SUPPORTS_INTEGRATED_DP(dev) && (I915_READ(DP_D) & DP_DETECTED)) | 4485 | if (SUPPORTS_INTEGRATED_DP(dev) && |
4486 | (I915_READ(DP_D) & DP_DETECTED)) { | ||
4487 | DRM_DEBUG_KMS("probing DP_D\n"); | ||
4425 | intel_dp_init(dev, DP_D); | 4488 | intel_dp_init(dev, DP_D); |
4489 | } | ||
4426 | } else if (IS_I8XX(dev)) | 4490 | } else if (IS_I8XX(dev)) |
4427 | intel_dvo_init(dev); | 4491 | intel_dvo_init(dev); |
4428 | 4492 | ||
@@ -4448,9 +4512,7 @@ static void intel_user_framebuffer_destroy(struct drm_framebuffer *fb) | |||
4448 | intelfb_remove(dev, fb); | 4512 | intelfb_remove(dev, fb); |
4449 | 4513 | ||
4450 | drm_framebuffer_cleanup(fb); | 4514 | drm_framebuffer_cleanup(fb); |
4451 | mutex_lock(&dev->struct_mutex); | 4515 | drm_gem_object_unreference_unlocked(intel_fb->obj); |
4452 | drm_gem_object_unreference(intel_fb->obj); | ||
4453 | mutex_unlock(&dev->struct_mutex); | ||
4454 | 4516 | ||
4455 | kfree(intel_fb); | 4517 | kfree(intel_fb); |
4456 | } | 4518 | } |
@@ -4513,9 +4575,7 @@ intel_user_framebuffer_create(struct drm_device *dev, | |||
4513 | 4575 | ||
4514 | ret = intel_framebuffer_create(dev, mode_cmd, &fb, obj); | 4576 | ret = intel_framebuffer_create(dev, mode_cmd, &fb, obj); |
4515 | if (ret) { | 4577 | if (ret) { |
4516 | mutex_lock(&dev->struct_mutex); | 4578 | drm_gem_object_unreference_unlocked(obj); |
4517 | drm_gem_object_unreference(obj); | ||
4518 | mutex_unlock(&dev->struct_mutex); | ||
4519 | return NULL; | 4579 | return NULL; |
4520 | } | 4580 | } |
4521 | 4581 | ||
@@ -4527,6 +4587,127 @@ static const struct drm_mode_config_funcs intel_mode_funcs = { | |||
4527 | .fb_changed = intelfb_probe, | 4587 | .fb_changed = intelfb_probe, |
4528 | }; | 4588 | }; |
4529 | 4589 | ||
4590 | static struct drm_gem_object * | ||
4591 | intel_alloc_power_context(struct drm_device *dev) | ||
4592 | { | ||
4593 | struct drm_gem_object *pwrctx; | ||
4594 | int ret; | ||
4595 | |||
4596 | pwrctx = drm_gem_object_alloc(dev, 4096); | ||
4597 | if (!pwrctx) { | ||
4598 | DRM_DEBUG("failed to alloc power context, RC6 disabled\n"); | ||
4599 | return NULL; | ||
4600 | } | ||
4601 | |||
4602 | mutex_lock(&dev->struct_mutex); | ||
4603 | ret = i915_gem_object_pin(pwrctx, 4096); | ||
4604 | if (ret) { | ||
4605 | DRM_ERROR("failed to pin power context: %d\n", ret); | ||
4606 | goto err_unref; | ||
4607 | } | ||
4608 | |||
4609 | ret = i915_gem_object_set_to_gtt_domain(pwrctx, 1); | ||
4610 | if (ret) { | ||
4611 | DRM_ERROR("failed to set-domain on power context: %d\n", ret); | ||
4612 | goto err_unpin; | ||
4613 | } | ||
4614 | mutex_unlock(&dev->struct_mutex); | ||
4615 | |||
4616 | return pwrctx; | ||
4617 | |||
4618 | err_unpin: | ||
4619 | i915_gem_object_unpin(pwrctx); | ||
4620 | err_unref: | ||
4621 | drm_gem_object_unreference(pwrctx); | ||
4622 | mutex_unlock(&dev->struct_mutex); | ||
4623 | return NULL; | ||
4624 | } | ||
4625 | |||
4626 | void ironlake_enable_drps(struct drm_device *dev) | ||
4627 | { | ||
4628 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
4629 | u32 rgvmodectl = I915_READ(MEMMODECTL), rgvswctl; | ||
4630 | u8 fmax, fmin, fstart, vstart; | ||
4631 | int i = 0; | ||
4632 | |||
4633 | /* 100ms RC evaluation intervals */ | ||
4634 | I915_WRITE(RCUPEI, 100000); | ||
4635 | I915_WRITE(RCDNEI, 100000); | ||
4636 | |||
4637 | /* Set max/min thresholds to 90ms and 80ms respectively */ | ||
4638 | I915_WRITE(RCBMAXAVG, 90000); | ||
4639 | I915_WRITE(RCBMINAVG, 80000); | ||
4640 | |||
4641 | I915_WRITE(MEMIHYST, 1); | ||
4642 | |||
4643 | /* Set up min, max, and cur for interrupt handling */ | ||
4644 | fmax = (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT; | ||
4645 | fmin = (rgvmodectl & MEMMODE_FMIN_MASK); | ||
4646 | fstart = (rgvmodectl & MEMMODE_FSTART_MASK) >> | ||
4647 | MEMMODE_FSTART_SHIFT; | ||
4648 | vstart = (I915_READ(PXVFREQ_BASE + (fstart * 4)) & PXVFREQ_PX_MASK) >> | ||
4649 | PXVFREQ_PX_SHIFT; | ||
4650 | |||
4651 | dev_priv->max_delay = fstart; /* can't go to fmax w/o IPS */ | ||
4652 | dev_priv->min_delay = fmin; | ||
4653 | dev_priv->cur_delay = fstart; | ||
4654 | |||
4655 | I915_WRITE(MEMINTREN, MEMINT_CX_SUPR_EN | MEMINT_EVAL_CHG_EN); | ||
4656 | |||
4657 | /* | ||
4658 | * Interrupts will be enabled in ironlake_irq_postinstall | ||
4659 | */ | ||
4660 | |||
4661 | I915_WRITE(VIDSTART, vstart); | ||
4662 | POSTING_READ(VIDSTART); | ||
4663 | |||
4664 | rgvmodectl |= MEMMODE_SWMODE_EN; | ||
4665 | I915_WRITE(MEMMODECTL, rgvmodectl); | ||
4666 | |||
4667 | while (I915_READ(MEMSWCTL) & MEMCTL_CMD_STS) { | ||
4668 | if (i++ > 100) { | ||
4669 | DRM_ERROR("stuck trying to change perf mode\n"); | ||
4670 | break; | ||
4671 | } | ||
4672 | msleep(1); | ||
4673 | } | ||
4674 | msleep(1); | ||
4675 | |||
4676 | rgvswctl = (MEMCTL_CMD_CHFREQ << MEMCTL_CMD_SHIFT) | | ||
4677 | (fstart << MEMCTL_FREQ_SHIFT) | MEMCTL_SFCAVM; | ||
4678 | I915_WRITE(MEMSWCTL, rgvswctl); | ||
4679 | POSTING_READ(MEMSWCTL); | ||
4680 | |||
4681 | rgvswctl |= MEMCTL_CMD_STS; | ||
4682 | I915_WRITE(MEMSWCTL, rgvswctl); | ||
4683 | } | ||
4684 | |||
4685 | void ironlake_disable_drps(struct drm_device *dev) | ||
4686 | { | ||
4687 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
4688 | u32 rgvswctl; | ||
4689 | u8 fstart; | ||
4690 | |||
4691 | /* Ack interrupts, disable EFC interrupt */ | ||
4692 | I915_WRITE(MEMINTREN, I915_READ(MEMINTREN) & ~MEMINT_EVAL_CHG_EN); | ||
4693 | I915_WRITE(MEMINTRSTS, MEMINT_EVAL_CHG); | ||
4694 | I915_WRITE(DEIER, I915_READ(DEIER) & ~DE_PCU_EVENT); | ||
4695 | I915_WRITE(DEIIR, DE_PCU_EVENT); | ||
4696 | I915_WRITE(DEIMR, I915_READ(DEIMR) | DE_PCU_EVENT); | ||
4697 | |||
4698 | /* Go back to the starting frequency */ | ||
4699 | fstart = (I915_READ(MEMMODECTL) & MEMMODE_FSTART_MASK) >> | ||
4700 | MEMMODE_FSTART_SHIFT; | ||
4701 | rgvswctl = (MEMCTL_CMD_CHFREQ << MEMCTL_CMD_SHIFT) | | ||
4702 | (fstart << MEMCTL_FREQ_SHIFT) | MEMCTL_SFCAVM; | ||
4703 | I915_WRITE(MEMSWCTL, rgvswctl); | ||
4704 | msleep(1); | ||
4705 | rgvswctl |= MEMCTL_CMD_STS; | ||
4706 | I915_WRITE(MEMSWCTL, rgvswctl); | ||
4707 | msleep(1); | ||
4708 | |||
4709 | } | ||
4710 | |||
4530 | void intel_init_clock_gating(struct drm_device *dev) | 4711 | void intel_init_clock_gating(struct drm_device *dev) |
4531 | { | 4712 | { |
4532 | struct drm_i915_private *dev_priv = dev->dev_private; | 4713 | struct drm_i915_private *dev_priv = dev->dev_private; |
@@ -4579,42 +4760,27 @@ void intel_init_clock_gating(struct drm_device *dev) | |||
4579 | * GPU can automatically power down the render unit if given a page | 4760 | * GPU can automatically power down the render unit if given a page |
4580 | * to save state. | 4761 | * to save state. |
4581 | */ | 4762 | */ |
4582 | if (I915_HAS_RC6(dev)) { | 4763 | if (I915_HAS_RC6(dev) && drm_core_check_feature(dev, DRIVER_MODESET)) { |
4583 | struct drm_gem_object *pwrctx; | 4764 | struct drm_i915_gem_object *obj_priv = NULL; |
4584 | struct drm_i915_gem_object *obj_priv; | ||
4585 | int ret; | ||
4586 | 4765 | ||
4587 | if (dev_priv->pwrctx) { | 4766 | if (dev_priv->pwrctx) { |
4588 | obj_priv = dev_priv->pwrctx->driver_private; | 4767 | obj_priv = dev_priv->pwrctx->driver_private; |
4589 | } else { | 4768 | } else { |
4590 | pwrctx = drm_gem_object_alloc(dev, 4096); | 4769 | struct drm_gem_object *pwrctx; |
4591 | if (!pwrctx) { | ||
4592 | DRM_DEBUG("failed to alloc power context, " | ||
4593 | "RC6 disabled\n"); | ||
4594 | goto out; | ||
4595 | } | ||
4596 | 4770 | ||
4597 | ret = i915_gem_object_pin(pwrctx, 4096); | 4771 | pwrctx = intel_alloc_power_context(dev); |
4598 | if (ret) { | 4772 | if (pwrctx) { |
4599 | DRM_ERROR("failed to pin power context: %d\n", | 4773 | dev_priv->pwrctx = pwrctx; |
4600 | ret); | 4774 | obj_priv = pwrctx->driver_private; |
4601 | drm_gem_object_unreference(pwrctx); | ||
4602 | goto out; | ||
4603 | } | 4775 | } |
4604 | |||
4605 | i915_gem_object_set_to_gtt_domain(pwrctx, 1); | ||
4606 | |||
4607 | dev_priv->pwrctx = pwrctx; | ||
4608 | obj_priv = pwrctx->driver_private; | ||
4609 | } | 4776 | } |
4610 | 4777 | ||
4611 | I915_WRITE(PWRCTXA, obj_priv->gtt_offset | PWRCTX_EN); | 4778 | if (obj_priv) { |
4612 | I915_WRITE(MCHBAR_RENDER_STANDBY, | 4779 | I915_WRITE(PWRCTXA, obj_priv->gtt_offset | PWRCTX_EN); |
4613 | I915_READ(MCHBAR_RENDER_STANDBY) & ~RCX_SW_EXIT); | 4780 | I915_WRITE(MCHBAR_RENDER_STANDBY, |
4781 | I915_READ(MCHBAR_RENDER_STANDBY) & ~RCX_SW_EXIT); | ||
4782 | } | ||
4614 | } | 4783 | } |
4615 | |||
4616 | out: | ||
4617 | return; | ||
4618 | } | 4784 | } |
4619 | 4785 | ||
4620 | /* Set up chip specific display functions */ | 4786 | /* Set up chip specific display functions */ |
@@ -4725,11 +4891,6 @@ void intel_modeset_init(struct drm_device *dev) | |||
4725 | DRM_DEBUG_KMS("%d display pipe%s available.\n", | 4891 | DRM_DEBUG_KMS("%d display pipe%s available.\n", |
4726 | num_pipe, num_pipe > 1 ? "s" : ""); | 4892 | num_pipe, num_pipe > 1 ? "s" : ""); |
4727 | 4893 | ||
4728 | if (IS_I85X(dev)) | ||
4729 | pci_read_config_word(dev->pdev, HPLLCC, &dev_priv->orig_clock); | ||
4730 | else if (IS_I9XX(dev) || IS_G4X(dev)) | ||
4731 | pci_read_config_word(dev->pdev, GCFGC, &dev_priv->orig_clock); | ||
4732 | |||
4733 | for (i = 0; i < num_pipe; i++) { | 4894 | for (i = 0; i < num_pipe; i++) { |
4734 | intel_crtc_init(dev, i); | 4895 | intel_crtc_init(dev, i); |
4735 | } | 4896 | } |
@@ -4738,6 +4899,9 @@ void intel_modeset_init(struct drm_device *dev) | |||
4738 | 4899 | ||
4739 | intel_init_clock_gating(dev); | 4900 | intel_init_clock_gating(dev); |
4740 | 4901 | ||
4902 | if (IS_IRONLAKE_M(dev)) | ||
4903 | ironlake_enable_drps(dev); | ||
4904 | |||
4741 | INIT_WORK(&dev_priv->idle_work, intel_idle_update); | 4905 | INIT_WORK(&dev_priv->idle_work, intel_idle_update); |
4742 | setup_timer(&dev_priv->idle_timer, intel_gpu_idle_timer, | 4906 | setup_timer(&dev_priv->idle_timer, intel_gpu_idle_timer, |
4743 | (unsigned long)dev); | 4907 | (unsigned long)dev); |
@@ -4770,7 +4934,6 @@ void intel_modeset_cleanup(struct drm_device *dev) | |||
4770 | del_timer_sync(&intel_crtc->idle_timer); | 4934 | del_timer_sync(&intel_crtc->idle_timer); |
4771 | } | 4935 | } |
4772 | 4936 | ||
4773 | intel_increase_renderclock(dev, false); | ||
4774 | del_timer_sync(&dev_priv->idle_timer); | 4937 | del_timer_sync(&dev_priv->idle_timer); |
4775 | 4938 | ||
4776 | if (dev_priv->display.disable_fbc) | 4939 | if (dev_priv->display.disable_fbc) |
@@ -4786,6 +4949,9 @@ void intel_modeset_cleanup(struct drm_device *dev) | |||
4786 | drm_gem_object_unreference(dev_priv->pwrctx); | 4949 | drm_gem_object_unreference(dev_priv->pwrctx); |
4787 | } | 4950 | } |
4788 | 4951 | ||
4952 | if (IS_IRONLAKE_M(dev)) | ||
4953 | ironlake_disable_drps(dev); | ||
4954 | |||
4789 | mutex_unlock(&dev->struct_mutex); | 4955 | mutex_unlock(&dev->struct_mutex); |
4790 | 4956 | ||
4791 | drm_mode_config_cleanup(dev); | 4957 | drm_mode_config_cleanup(dev); |
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index 4e7aa8b7b938..439506cefc14 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c | |||
@@ -125,9 +125,15 @@ intel_dp_link_clock(uint8_t link_bw) | |||
125 | 125 | ||
126 | /* I think this is a fiction */ | 126 | /* I think this is a fiction */ |
127 | static int | 127 | static int |
128 | intel_dp_link_required(int pixel_clock) | 128 | intel_dp_link_required(struct drm_device *dev, |
129 | struct intel_output *intel_output, int pixel_clock) | ||
129 | { | 130 | { |
130 | return pixel_clock * 3; | 131 | struct drm_i915_private *dev_priv = dev->dev_private; |
132 | |||
133 | if (IS_eDP(intel_output)) | ||
134 | return (pixel_clock * dev_priv->edp_bpp) / 8; | ||
135 | else | ||
136 | return pixel_clock * 3; | ||
131 | } | 137 | } |
132 | 138 | ||
133 | static int | 139 | static int |
@@ -138,7 +144,8 @@ intel_dp_mode_valid(struct drm_connector *connector, | |||
138 | int max_link_clock = intel_dp_link_clock(intel_dp_max_link_bw(intel_output)); | 144 | int max_link_clock = intel_dp_link_clock(intel_dp_max_link_bw(intel_output)); |
139 | int max_lanes = intel_dp_max_lane_count(intel_output); | 145 | int max_lanes = intel_dp_max_lane_count(intel_output); |
140 | 146 | ||
141 | if (intel_dp_link_required(mode->clock) > max_link_clock * max_lanes) | 147 | if (intel_dp_link_required(connector->dev, intel_output, mode->clock) |
148 | > max_link_clock * max_lanes) | ||
142 | return MODE_CLOCK_HIGH; | 149 | return MODE_CLOCK_HIGH; |
143 | 150 | ||
144 | if (mode->clock < 10000) | 151 | if (mode->clock < 10000) |
@@ -492,7 +499,8 @@ intel_dp_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode, | |||
492 | for (clock = 0; clock <= max_clock; clock++) { | 499 | for (clock = 0; clock <= max_clock; clock++) { |
493 | int link_avail = intel_dp_link_clock(bws[clock]) * lane_count; | 500 | int link_avail = intel_dp_link_clock(bws[clock]) * lane_count; |
494 | 501 | ||
495 | if (intel_dp_link_required(mode->clock) <= link_avail) { | 502 | if (intel_dp_link_required(encoder->dev, intel_output, mode->clock) |
503 | <= link_avail) { | ||
496 | dp_priv->link_bw = bws[clock]; | 504 | dp_priv->link_bw = bws[clock]; |
497 | dp_priv->lane_count = lane_count; | 505 | dp_priv->lane_count = lane_count; |
498 | adjusted_mode->clock = intel_dp_link_clock(dp_priv->link_bw); | 506 | adjusted_mode->clock = intel_dp_link_clock(dp_priv->link_bw); |
@@ -1289,53 +1297,7 @@ intel_dp_hot_plug(struct intel_output *intel_output) | |||
1289 | if (dp_priv->dpms_mode == DRM_MODE_DPMS_ON) | 1297 | if (dp_priv->dpms_mode == DRM_MODE_DPMS_ON) |
1290 | intel_dp_check_link_status(intel_output); | 1298 | intel_dp_check_link_status(intel_output); |
1291 | } | 1299 | } |
1292 | /* | 1300 | |
1293 | * Enumerate the child dev array parsed from VBT to check whether | ||
1294 | * the given DP is present. | ||
1295 | * If it is present, return 1. | ||
1296 | * If it is not present, return false. | ||
1297 | * If no child dev is parsed from VBT, it is assumed that the given | ||
1298 | * DP is present. | ||
1299 | */ | ||
1300 | static int dp_is_present_in_vbt(struct drm_device *dev, int dp_reg) | ||
1301 | { | ||
1302 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
1303 | struct child_device_config *p_child; | ||
1304 | int i, dp_port, ret; | ||
1305 | |||
1306 | if (!dev_priv->child_dev_num) | ||
1307 | return 1; | ||
1308 | |||
1309 | dp_port = 0; | ||
1310 | if (dp_reg == DP_B || dp_reg == PCH_DP_B) | ||
1311 | dp_port = PORT_IDPB; | ||
1312 | else if (dp_reg == DP_C || dp_reg == PCH_DP_C) | ||
1313 | dp_port = PORT_IDPC; | ||
1314 | else if (dp_reg == DP_D || dp_reg == PCH_DP_D) | ||
1315 | dp_port = PORT_IDPD; | ||
1316 | |||
1317 | ret = 0; | ||
1318 | for (i = 0; i < dev_priv->child_dev_num; i++) { | ||
1319 | p_child = dev_priv->child_dev + i; | ||
1320 | /* | ||
1321 | * If the device type is not DP, continue. | ||
1322 | */ | ||
1323 | if (p_child->device_type != DEVICE_TYPE_DP && | ||
1324 | p_child->device_type != DEVICE_TYPE_eDP) | ||
1325 | continue; | ||
1326 | /* Find the eDP port */ | ||
1327 | if (dp_reg == DP_A && p_child->device_type == DEVICE_TYPE_eDP) { | ||
1328 | ret = 1; | ||
1329 | break; | ||
1330 | } | ||
1331 | /* Find the DP port */ | ||
1332 | if (p_child->dvo_port == dp_port) { | ||
1333 | ret = 1; | ||
1334 | break; | ||
1335 | } | ||
1336 | } | ||
1337 | return ret; | ||
1338 | } | ||
1339 | void | 1301 | void |
1340 | intel_dp_init(struct drm_device *dev, int output_reg) | 1302 | intel_dp_init(struct drm_device *dev, int output_reg) |
1341 | { | 1303 | { |
@@ -1345,10 +1307,6 @@ intel_dp_init(struct drm_device *dev, int output_reg) | |||
1345 | struct intel_dp_priv *dp_priv; | 1307 | struct intel_dp_priv *dp_priv; |
1346 | const char *name = NULL; | 1308 | const char *name = NULL; |
1347 | 1309 | ||
1348 | if (!dp_is_present_in_vbt(dev, output_reg)) { | ||
1349 | DRM_DEBUG_KMS("DP is not present. Ignore it\n"); | ||
1350 | return; | ||
1351 | } | ||
1352 | intel_output = kcalloc(sizeof(struct intel_output) + | 1310 | intel_output = kcalloc(sizeof(struct intel_output) + |
1353 | sizeof(struct intel_dp_priv), 1, GFP_KERNEL); | 1311 | sizeof(struct intel_dp_priv), 1, GFP_KERNEL); |
1354 | if (!intel_output) | 1312 | if (!intel_output) |
@@ -1373,11 +1331,10 @@ intel_dp_init(struct drm_device *dev, int output_reg) | |||
1373 | else if (output_reg == DP_D || output_reg == PCH_DP_D) | 1331 | else if (output_reg == DP_D || output_reg == PCH_DP_D) |
1374 | intel_output->clone_mask = (1 << INTEL_DP_D_CLONE_BIT); | 1332 | intel_output->clone_mask = (1 << INTEL_DP_D_CLONE_BIT); |
1375 | 1333 | ||
1376 | if (IS_eDP(intel_output)) { | 1334 | if (IS_eDP(intel_output)) |
1377 | intel_output->crtc_mask = (1 << 1); | ||
1378 | intel_output->clone_mask = (1 << INTEL_EDP_CLONE_BIT); | 1335 | intel_output->clone_mask = (1 << INTEL_EDP_CLONE_BIT); |
1379 | } else | 1336 | |
1380 | intel_output->crtc_mask = (1 << 0) | (1 << 1); | 1337 | intel_output->crtc_mask = (1 << 0) | (1 << 1); |
1381 | connector->interlace_allowed = true; | 1338 | connector->interlace_allowed = true; |
1382 | connector->doublescan_allowed = 0; | 1339 | connector->doublescan_allowed = 0; |
1383 | 1340 | ||
@@ -1402,14 +1359,20 @@ intel_dp_init(struct drm_device *dev, int output_reg) | |||
1402 | break; | 1359 | break; |
1403 | case DP_B: | 1360 | case DP_B: |
1404 | case PCH_DP_B: | 1361 | case PCH_DP_B: |
1362 | dev_priv->hotplug_supported_mask |= | ||
1363 | HDMIB_HOTPLUG_INT_STATUS; | ||
1405 | name = "DPDDC-B"; | 1364 | name = "DPDDC-B"; |
1406 | break; | 1365 | break; |
1407 | case DP_C: | 1366 | case DP_C: |
1408 | case PCH_DP_C: | 1367 | case PCH_DP_C: |
1368 | dev_priv->hotplug_supported_mask |= | ||
1369 | HDMIC_HOTPLUG_INT_STATUS; | ||
1409 | name = "DPDDC-C"; | 1370 | name = "DPDDC-C"; |
1410 | break; | 1371 | break; |
1411 | case DP_D: | 1372 | case DP_D: |
1412 | case PCH_DP_D: | 1373 | case PCH_DP_D: |
1374 | dev_priv->hotplug_supported_mask |= | ||
1375 | HDMID_HOTPLUG_INT_STATUS; | ||
1413 | name = "DPDDC-D"; | 1376 | name = "DPDDC-D"; |
1414 | break; | 1377 | break; |
1415 | } | 1378 | } |
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index a51573da1ff6..3a467ca57857 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h | |||
@@ -209,6 +209,8 @@ extern void intel_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green, | |||
209 | extern void intel_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green, | 209 | extern void intel_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green, |
210 | u16 *blue, int regno); | 210 | u16 *blue, int regno); |
211 | extern void intel_init_clock_gating(struct drm_device *dev); | 211 | extern void intel_init_clock_gating(struct drm_device *dev); |
212 | extern void ironlake_enable_drps(struct drm_device *dev); | ||
213 | extern void ironlake_disable_drps(struct drm_device *dev); | ||
212 | 214 | ||
213 | extern int intel_framebuffer_create(struct drm_device *dev, | 215 | extern int intel_framebuffer_create(struct drm_device *dev, |
214 | struct drm_mode_fb_cmd *mode_cmd, | 216 | struct drm_mode_fb_cmd *mode_cmd, |
diff --git a/drivers/gpu/drm/i915/intel_fb.c b/drivers/gpu/drm/i915/intel_fb.c index d4823cc87895..aaabbcbe5905 100644 --- a/drivers/gpu/drm/i915/intel_fb.c +++ b/drivers/gpu/drm/i915/intel_fb.c | |||
@@ -70,7 +70,7 @@ static struct drm_fb_helper_funcs intel_fb_helper_funcs = { | |||
70 | 70 | ||
71 | 71 | ||
72 | /** | 72 | /** |
73 | * Curretly it is assumed that the old framebuffer is reused. | 73 | * Currently it is assumed that the old framebuffer is reused. |
74 | * | 74 | * |
75 | * LOCKING | 75 | * LOCKING |
76 | * caller should hold the mode config lock. | 76 | * caller should hold the mode config lock. |
@@ -148,7 +148,7 @@ static int intelfb_create(struct drm_device *dev, uint32_t fb_width, | |||
148 | 148 | ||
149 | mutex_lock(&dev->struct_mutex); | 149 | mutex_lock(&dev->struct_mutex); |
150 | 150 | ||
151 | ret = i915_gem_object_pin(fbo, PAGE_SIZE); | 151 | ret = i915_gem_object_pin(fbo, 64*1024); |
152 | if (ret) { | 152 | if (ret) { |
153 | DRM_ERROR("failed to pin fb: %d\n", ret); | 153 | DRM_ERROR("failed to pin fb: %d\n", ret); |
154 | goto out_unref; | 154 | goto out_unref; |
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c index f04dbbe7d400..0e268deed761 100644 --- a/drivers/gpu/drm/i915/intel_hdmi.c +++ b/drivers/gpu/drm/i915/intel_hdmi.c | |||
@@ -225,52 +225,6 @@ static const struct drm_encoder_funcs intel_hdmi_enc_funcs = { | |||
225 | .destroy = intel_hdmi_enc_destroy, | 225 | .destroy = intel_hdmi_enc_destroy, |
226 | }; | 226 | }; |
227 | 227 | ||
228 | /* | ||
229 | * Enumerate the child dev array parsed from VBT to check whether | ||
230 | * the given HDMI is present. | ||
231 | * If it is present, return 1. | ||
232 | * If it is not present, return false. | ||
233 | * If no child dev is parsed from VBT, it assumes that the given | ||
234 | * HDMI is present. | ||
235 | */ | ||
236 | static int hdmi_is_present_in_vbt(struct drm_device *dev, int hdmi_reg) | ||
237 | { | ||
238 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
239 | struct child_device_config *p_child; | ||
240 | int i, hdmi_port, ret; | ||
241 | |||
242 | if (!dev_priv->child_dev_num) | ||
243 | return 1; | ||
244 | |||
245 | if (hdmi_reg == SDVOB) | ||
246 | hdmi_port = DVO_B; | ||
247 | else if (hdmi_reg == SDVOC) | ||
248 | hdmi_port = DVO_C; | ||
249 | else if (hdmi_reg == HDMIB) | ||
250 | hdmi_port = DVO_B; | ||
251 | else if (hdmi_reg == HDMIC) | ||
252 | hdmi_port = DVO_C; | ||
253 | else if (hdmi_reg == HDMID) | ||
254 | hdmi_port = DVO_D; | ||
255 | else | ||
256 | return 0; | ||
257 | |||
258 | ret = 0; | ||
259 | for (i = 0; i < dev_priv->child_dev_num; i++) { | ||
260 | p_child = dev_priv->child_dev + i; | ||
261 | /* | ||
262 | * If the device type is not HDMI, continue. | ||
263 | */ | ||
264 | if (p_child->device_type != DEVICE_TYPE_HDMI) | ||
265 | continue; | ||
266 | /* Find the HDMI port */ | ||
267 | if (p_child->dvo_port == hdmi_port) { | ||
268 | ret = 1; | ||
269 | break; | ||
270 | } | ||
271 | } | ||
272 | return ret; | ||
273 | } | ||
274 | void intel_hdmi_init(struct drm_device *dev, int sdvox_reg) | 228 | void intel_hdmi_init(struct drm_device *dev, int sdvox_reg) |
275 | { | 229 | { |
276 | struct drm_i915_private *dev_priv = dev->dev_private; | 230 | struct drm_i915_private *dev_priv = dev->dev_private; |
@@ -278,10 +232,6 @@ void intel_hdmi_init(struct drm_device *dev, int sdvox_reg) | |||
278 | struct intel_output *intel_output; | 232 | struct intel_output *intel_output; |
279 | struct intel_hdmi_priv *hdmi_priv; | 233 | struct intel_hdmi_priv *hdmi_priv; |
280 | 234 | ||
281 | if (!hdmi_is_present_in_vbt(dev, sdvox_reg)) { | ||
282 | DRM_DEBUG_KMS("HDMI is not present. Ignored it \n"); | ||
283 | return; | ||
284 | } | ||
285 | intel_output = kcalloc(sizeof(struct intel_output) + | 235 | intel_output = kcalloc(sizeof(struct intel_output) + |
286 | sizeof(struct intel_hdmi_priv), 1, GFP_KERNEL); | 236 | sizeof(struct intel_hdmi_priv), 1, GFP_KERNEL); |
287 | if (!intel_output) | 237 | if (!intel_output) |
@@ -303,21 +253,26 @@ void intel_hdmi_init(struct drm_device *dev, int sdvox_reg) | |||
303 | if (sdvox_reg == SDVOB) { | 253 | if (sdvox_reg == SDVOB) { |
304 | intel_output->clone_mask = (1 << INTEL_HDMIB_CLONE_BIT); | 254 | intel_output->clone_mask = (1 << INTEL_HDMIB_CLONE_BIT); |
305 | intel_output->ddc_bus = intel_i2c_create(dev, GPIOE, "HDMIB"); | 255 | intel_output->ddc_bus = intel_i2c_create(dev, GPIOE, "HDMIB"); |
256 | dev_priv->hotplug_supported_mask |= HDMIB_HOTPLUG_INT_STATUS; | ||
306 | } else if (sdvox_reg == SDVOC) { | 257 | } else if (sdvox_reg == SDVOC) { |
307 | intel_output->clone_mask = (1 << INTEL_HDMIC_CLONE_BIT); | 258 | intel_output->clone_mask = (1 << INTEL_HDMIC_CLONE_BIT); |
308 | intel_output->ddc_bus = intel_i2c_create(dev, GPIOD, "HDMIC"); | 259 | intel_output->ddc_bus = intel_i2c_create(dev, GPIOD, "HDMIC"); |
260 | dev_priv->hotplug_supported_mask |= HDMIC_HOTPLUG_INT_STATUS; | ||
309 | } else if (sdvox_reg == HDMIB) { | 261 | } else if (sdvox_reg == HDMIB) { |
310 | intel_output->clone_mask = (1 << INTEL_HDMID_CLONE_BIT); | 262 | intel_output->clone_mask = (1 << INTEL_HDMID_CLONE_BIT); |
311 | intel_output->ddc_bus = intel_i2c_create(dev, PCH_GPIOE, | 263 | intel_output->ddc_bus = intel_i2c_create(dev, PCH_GPIOE, |
312 | "HDMIB"); | 264 | "HDMIB"); |
265 | dev_priv->hotplug_supported_mask |= HDMIB_HOTPLUG_INT_STATUS; | ||
313 | } else if (sdvox_reg == HDMIC) { | 266 | } else if (sdvox_reg == HDMIC) { |
314 | intel_output->clone_mask = (1 << INTEL_HDMIE_CLONE_BIT); | 267 | intel_output->clone_mask = (1 << INTEL_HDMIE_CLONE_BIT); |
315 | intel_output->ddc_bus = intel_i2c_create(dev, PCH_GPIOD, | 268 | intel_output->ddc_bus = intel_i2c_create(dev, PCH_GPIOD, |
316 | "HDMIC"); | 269 | "HDMIC"); |
270 | dev_priv->hotplug_supported_mask |= HDMIC_HOTPLUG_INT_STATUS; | ||
317 | } else if (sdvox_reg == HDMID) { | 271 | } else if (sdvox_reg == HDMID) { |
318 | intel_output->clone_mask = (1 << INTEL_HDMIF_CLONE_BIT); | 272 | intel_output->clone_mask = (1 << INTEL_HDMIF_CLONE_BIT); |
319 | intel_output->ddc_bus = intel_i2c_create(dev, PCH_GPIOF, | 273 | intel_output->ddc_bus = intel_i2c_create(dev, PCH_GPIOF, |
320 | "HDMID"); | 274 | "HDMID"); |
275 | dev_priv->hotplug_supported_mask |= HDMID_HOTPLUG_INT_STATUS; | ||
321 | } | 276 | } |
322 | if (!intel_output->ddc_bus) | 277 | if (!intel_output->ddc_bus) |
323 | goto err_connector; | 278 | goto err_connector; |
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c index b04d1e63d439..93031a75d112 100644 --- a/drivers/gpu/drm/i915/intel_lvds.c +++ b/drivers/gpu/drm/i915/intel_lvds.c | |||
@@ -602,12 +602,47 @@ static void intel_lvds_mode_set(struct drm_encoder *encoder, | |||
602 | /* Some lid devices report incorrect lid status, assume they're connected */ | 602 | /* Some lid devices report incorrect lid status, assume they're connected */ |
603 | static const struct dmi_system_id bad_lid_status[] = { | 603 | static const struct dmi_system_id bad_lid_status[] = { |
604 | { | 604 | { |
605 | .ident = "Compaq nx9020", | ||
606 | .matches = { | ||
607 | DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"), | ||
608 | DMI_MATCH(DMI_BOARD_NAME, "3084"), | ||
609 | }, | ||
610 | }, | ||
611 | { | ||
612 | .ident = "Samsung SX20S", | ||
613 | .matches = { | ||
614 | DMI_MATCH(DMI_SYS_VENDOR, "Samsung Electronics"), | ||
615 | DMI_MATCH(DMI_BOARD_NAME, "SX20S"), | ||
616 | }, | ||
617 | }, | ||
618 | { | ||
605 | .ident = "Aspire One", | 619 | .ident = "Aspire One", |
606 | .matches = { | 620 | .matches = { |
607 | DMI_MATCH(DMI_SYS_VENDOR, "Acer"), | 621 | DMI_MATCH(DMI_SYS_VENDOR, "Acer"), |
608 | DMI_MATCH(DMI_PRODUCT_NAME, "Aspire one"), | 622 | DMI_MATCH(DMI_PRODUCT_NAME, "Aspire one"), |
609 | }, | 623 | }, |
610 | }, | 624 | }, |
625 | { | ||
626 | .ident = "Aspire 1810T", | ||
627 | .matches = { | ||
628 | DMI_MATCH(DMI_SYS_VENDOR, "Acer"), | ||
629 | DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 1810T"), | ||
630 | }, | ||
631 | }, | ||
632 | { | ||
633 | .ident = "PC-81005", | ||
634 | .matches = { | ||
635 | DMI_MATCH(DMI_SYS_VENDOR, "MALATA"), | ||
636 | DMI_MATCH(DMI_PRODUCT_NAME, "PC-81005"), | ||
637 | }, | ||
638 | }, | ||
639 | { | ||
640 | .ident = "Clevo M5x0N", | ||
641 | .matches = { | ||
642 | DMI_MATCH(DMI_SYS_VENDOR, "CLEVO Co."), | ||
643 | DMI_MATCH(DMI_BOARD_NAME, "M5x0N"), | ||
644 | }, | ||
645 | }, | ||
611 | { } | 646 | { } |
612 | }; | 647 | }; |
613 | 648 | ||
@@ -620,9 +655,16 @@ static const struct dmi_system_id bad_lid_status[] = { | |||
620 | */ | 655 | */ |
621 | static enum drm_connector_status intel_lvds_detect(struct drm_connector *connector) | 656 | static enum drm_connector_status intel_lvds_detect(struct drm_connector *connector) |
622 | { | 657 | { |
658 | struct drm_device *dev = connector->dev; | ||
623 | enum drm_connector_status status = connector_status_connected; | 659 | enum drm_connector_status status = connector_status_connected; |
624 | 660 | ||
625 | if (!acpi_lid_open() && !dmi_check_system(bad_lid_status)) | 661 | /* ACPI lid methods were generally unreliable in this generation, so |
662 | * don't even bother. | ||
663 | */ | ||
664 | if (IS_I8XX(dev)) | ||
665 | return connector_status_connected; | ||
666 | |||
667 | if (!dmi_check_system(bad_lid_status) && !acpi_lid_open()) | ||
626 | status = connector_status_disconnected; | 668 | status = connector_status_disconnected; |
627 | 669 | ||
628 | return status; | 670 | return status; |
@@ -679,7 +721,14 @@ static int intel_lid_notify(struct notifier_block *nb, unsigned long val, | |||
679 | struct drm_i915_private *dev_priv = | 721 | struct drm_i915_private *dev_priv = |
680 | container_of(nb, struct drm_i915_private, lid_notifier); | 722 | container_of(nb, struct drm_i915_private, lid_notifier); |
681 | struct drm_device *dev = dev_priv->dev; | 723 | struct drm_device *dev = dev_priv->dev; |
724 | struct drm_connector *connector = dev_priv->int_lvds_connector; | ||
682 | 725 | ||
726 | /* | ||
727 | * check and update the status of LVDS connector after receiving | ||
728 | * the LID nofication event. | ||
729 | */ | ||
730 | if (connector) | ||
731 | connector->status = connector->funcs->detect(connector); | ||
683 | if (!acpi_lid_open()) { | 732 | if (!acpi_lid_open()) { |
684 | dev_priv->modeset_on_lid = 1; | 733 | dev_priv->modeset_on_lid = 1; |
685 | return NOTIFY_OK; | 734 | return NOTIFY_OK; |
@@ -854,65 +903,6 @@ static const struct dmi_system_id intel_no_lvds[] = { | |||
854 | { } /* terminating entry */ | 903 | { } /* terminating entry */ |
855 | }; | 904 | }; |
856 | 905 | ||
857 | #ifdef CONFIG_ACPI | ||
858 | /* | ||
859 | * check_lid_device -- check whether @handle is an ACPI LID device. | ||
860 | * @handle: ACPI device handle | ||
861 | * @level : depth in the ACPI namespace tree | ||
862 | * @context: the number of LID device when we find the device | ||
863 | * @rv: a return value to fill if desired (Not use) | ||
864 | */ | ||
865 | static acpi_status | ||
866 | check_lid_device(acpi_handle handle, u32 level, void *context, | ||
867 | void **return_value) | ||
868 | { | ||
869 | struct acpi_device *acpi_dev; | ||
870 | int *lid_present = context; | ||
871 | |||
872 | acpi_dev = NULL; | ||
873 | /* Get the acpi device for device handle */ | ||
874 | if (acpi_bus_get_device(handle, &acpi_dev) || !acpi_dev) { | ||
875 | /* If there is no ACPI device for handle, return */ | ||
876 | return AE_OK; | ||
877 | } | ||
878 | |||
879 | if (!strncmp(acpi_device_hid(acpi_dev), "PNP0C0D", 7)) | ||
880 | *lid_present = 1; | ||
881 | |||
882 | return AE_OK; | ||
883 | } | ||
884 | |||
885 | /** | ||
886 | * check whether there exists the ACPI LID device by enumerating the ACPI | ||
887 | * device tree. | ||
888 | */ | ||
889 | static int intel_lid_present(void) | ||
890 | { | ||
891 | int lid_present = 0; | ||
892 | |||
893 | if (acpi_disabled) { | ||
894 | /* If ACPI is disabled, there is no ACPI device tree to | ||
895 | * check, so assume the LID device would have been present. | ||
896 | */ | ||
897 | return 1; | ||
898 | } | ||
899 | |||
900 | acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT, | ||
901 | ACPI_UINT32_MAX, | ||
902 | check_lid_device, &lid_present, NULL); | ||
903 | |||
904 | return lid_present; | ||
905 | } | ||
906 | #else | ||
907 | static int intel_lid_present(void) | ||
908 | { | ||
909 | /* In the absence of ACPI built in, assume that the LID device would | ||
910 | * have been present. | ||
911 | */ | ||
912 | return 1; | ||
913 | } | ||
914 | #endif | ||
915 | |||
916 | /** | 906 | /** |
917 | * intel_find_lvds_downclock - find the reduced downclock for LVDS in EDID | 907 | * intel_find_lvds_downclock - find the reduced downclock for LVDS in EDID |
918 | * @dev: drm device | 908 | * @dev: drm device |
@@ -957,7 +947,8 @@ static void intel_find_lvds_downclock(struct drm_device *dev, | |||
957 | } | 947 | } |
958 | } | 948 | } |
959 | mutex_unlock(&dev->mode_config.mutex); | 949 | mutex_unlock(&dev->mode_config.mutex); |
960 | if (temp_downclock < panel_fixed_mode->clock) { | 950 | if (temp_downclock < panel_fixed_mode->clock && |
951 | i915_lvds_downclock) { | ||
961 | /* We found the downclock for LVDS. */ | 952 | /* We found the downclock for LVDS. */ |
962 | dev_priv->lvds_downclock_avail = 1; | 953 | dev_priv->lvds_downclock_avail = 1; |
963 | dev_priv->lvds_downclock = temp_downclock; | 954 | dev_priv->lvds_downclock = temp_downclock; |
@@ -1031,12 +1022,8 @@ void intel_lvds_init(struct drm_device *dev) | |||
1031 | if (dmi_check_system(intel_no_lvds)) | 1022 | if (dmi_check_system(intel_no_lvds)) |
1032 | return; | 1023 | return; |
1033 | 1024 | ||
1034 | /* | 1025 | if (!lvds_is_present_in_vbt(dev)) { |
1035 | * Assume LVDS is present if there's an ACPI lid device or if the | 1026 | DRM_DEBUG_KMS("LVDS is not present in VBT\n"); |
1036 | * device is present in the VBT. | ||
1037 | */ | ||
1038 | if (!lvds_is_present_in_vbt(dev) && !intel_lid_present()) { | ||
1039 | DRM_DEBUG_KMS("LVDS is not present in VBT and no lid detected\n"); | ||
1040 | return; | 1027 | return; |
1041 | } | 1028 | } |
1042 | 1029 | ||
@@ -1180,6 +1167,8 @@ out: | |||
1180 | DRM_DEBUG_KMS("lid notifier registration failed\n"); | 1167 | DRM_DEBUG_KMS("lid notifier registration failed\n"); |
1181 | dev_priv->lid_notifier.notifier_call = NULL; | 1168 | dev_priv->lid_notifier.notifier_call = NULL; |
1182 | } | 1169 | } |
1170 | /* keep the LVDS connector */ | ||
1171 | dev_priv->int_lvds_connector = connector; | ||
1183 | drm_sysfs_connector_add(connector); | 1172 | drm_sysfs_connector_add(connector); |
1184 | return; | 1173 | return; |
1185 | 1174 | ||
diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c index 2639591c72e9..c3fa406912b3 100644 --- a/drivers/gpu/drm/i915/intel_overlay.c +++ b/drivers/gpu/drm/i915/intel_overlay.c | |||
@@ -199,16 +199,11 @@ static struct overlay_registers *intel_overlay_map_regs_atomic(struct intel_over | |||
199 | 199 | ||
200 | static void intel_overlay_unmap_regs_atomic(struct intel_overlay *overlay) | 200 | static void intel_overlay_unmap_regs_atomic(struct intel_overlay *overlay) |
201 | { | 201 | { |
202 | struct drm_device *dev = overlay->dev; | ||
203 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
204 | |||
205 | if (OVERLAY_NONPHYSICAL(overlay->dev)) | 202 | if (OVERLAY_NONPHYSICAL(overlay->dev)) |
206 | io_mapping_unmap_atomic(overlay->virt_addr); | 203 | io_mapping_unmap_atomic(overlay->virt_addr); |
207 | 204 | ||
208 | overlay->virt_addr = NULL; | 205 | overlay->virt_addr = NULL; |
209 | 206 | ||
210 | I915_READ(OVADD); /* flush wc cashes */ | ||
211 | |||
212 | return; | 207 | return; |
213 | } | 208 | } |
214 | 209 | ||
@@ -225,9 +220,7 @@ static int intel_overlay_on(struct intel_overlay *overlay) | |||
225 | overlay->active = 1; | 220 | overlay->active = 1; |
226 | overlay->hw_wedged = NEEDS_WAIT_FOR_FLIP; | 221 | overlay->hw_wedged = NEEDS_WAIT_FOR_FLIP; |
227 | 222 | ||
228 | BEGIN_LP_RING(6); | 223 | BEGIN_LP_RING(4); |
229 | OUT_RING(MI_FLUSH); | ||
230 | OUT_RING(MI_NOOP); | ||
231 | OUT_RING(MI_OVERLAY_FLIP | MI_OVERLAY_ON); | 224 | OUT_RING(MI_OVERLAY_FLIP | MI_OVERLAY_ON); |
232 | OUT_RING(overlay->flip_addr | OFC_UPDATE); | 225 | OUT_RING(overlay->flip_addr | OFC_UPDATE); |
233 | OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP); | 226 | OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP); |
@@ -267,9 +260,7 @@ static void intel_overlay_continue(struct intel_overlay *overlay, | |||
267 | if (tmp & (1 << 17)) | 260 | if (tmp & (1 << 17)) |
268 | DRM_DEBUG("overlay underrun, DOVSTA: %x\n", tmp); | 261 | DRM_DEBUG("overlay underrun, DOVSTA: %x\n", tmp); |
269 | 262 | ||
270 | BEGIN_LP_RING(4); | 263 | BEGIN_LP_RING(2); |
271 | OUT_RING(MI_FLUSH); | ||
272 | OUT_RING(MI_NOOP); | ||
273 | OUT_RING(MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE); | 264 | OUT_RING(MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE); |
274 | OUT_RING(flip_addr); | 265 | OUT_RING(flip_addr); |
275 | ADVANCE_LP_RING(); | 266 | ADVANCE_LP_RING(); |
@@ -338,9 +329,7 @@ static int intel_overlay_off(struct intel_overlay *overlay) | |||
338 | /* wait for overlay to go idle */ | 329 | /* wait for overlay to go idle */ |
339 | overlay->hw_wedged = SWITCH_OFF_STAGE_1; | 330 | overlay->hw_wedged = SWITCH_OFF_STAGE_1; |
340 | 331 | ||
341 | BEGIN_LP_RING(6); | 332 | BEGIN_LP_RING(4); |
342 | OUT_RING(MI_FLUSH); | ||
343 | OUT_RING(MI_NOOP); | ||
344 | OUT_RING(MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE); | 333 | OUT_RING(MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE); |
345 | OUT_RING(flip_addr); | 334 | OUT_RING(flip_addr); |
346 | OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP); | 335 | OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP); |
@@ -358,9 +347,7 @@ static int intel_overlay_off(struct intel_overlay *overlay) | |||
358 | /* turn overlay off */ | 347 | /* turn overlay off */ |
359 | overlay->hw_wedged = SWITCH_OFF_STAGE_2; | 348 | overlay->hw_wedged = SWITCH_OFF_STAGE_2; |
360 | 349 | ||
361 | BEGIN_LP_RING(6); | 350 | BEGIN_LP_RING(4); |
362 | OUT_RING(MI_FLUSH); | ||
363 | OUT_RING(MI_NOOP); | ||
364 | OUT_RING(MI_OVERLAY_FLIP | MI_OVERLAY_OFF); | 351 | OUT_RING(MI_OVERLAY_FLIP | MI_OVERLAY_OFF); |
365 | OUT_RING(flip_addr); | 352 | OUT_RING(flip_addr); |
366 | OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP); | 353 | OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP); |
@@ -435,9 +422,7 @@ int intel_overlay_recover_from_interrupt(struct intel_overlay *overlay, | |||
435 | 422 | ||
436 | overlay->hw_wedged = SWITCH_OFF_STAGE_2; | 423 | overlay->hw_wedged = SWITCH_OFF_STAGE_2; |
437 | 424 | ||
438 | BEGIN_LP_RING(6); | 425 | BEGIN_LP_RING(4); |
439 | OUT_RING(MI_FLUSH); | ||
440 | OUT_RING(MI_NOOP); | ||
441 | OUT_RING(MI_OVERLAY_FLIP | MI_OVERLAY_OFF); | 426 | OUT_RING(MI_OVERLAY_FLIP | MI_OVERLAY_OFF); |
442 | OUT_RING(flip_addr); | 427 | OUT_RING(flip_addr); |
443 | OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP); | 428 | OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP); |
@@ -1179,7 +1164,7 @@ int intel_overlay_put_image(struct drm_device *dev, void *data, | |||
1179 | out_unlock: | 1164 | out_unlock: |
1180 | mutex_unlock(&dev->struct_mutex); | 1165 | mutex_unlock(&dev->struct_mutex); |
1181 | mutex_unlock(&dev->mode_config.mutex); | 1166 | mutex_unlock(&dev->mode_config.mutex); |
1182 | drm_gem_object_unreference(new_bo); | 1167 | drm_gem_object_unreference_unlocked(new_bo); |
1183 | kfree(params); | 1168 | kfree(params); |
1184 | 1169 | ||
1185 | return ret; | 1170 | return ret; |
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c index dba5147f4064..82678d30ab06 100644 --- a/drivers/gpu/drm/i915/intel_sdvo.c +++ b/drivers/gpu/drm/i915/intel_sdvo.c | |||
@@ -462,14 +462,63 @@ static int intel_sdvo_get_pixel_multiplier(struct drm_display_mode *mode) | |||
462 | } | 462 | } |
463 | 463 | ||
464 | /** | 464 | /** |
465 | * Don't check status code from this as it switches the bus back to the | 465 | * Try to read the response after issuie the DDC switch command. But it |
466 | * SDVO chips which defeats the purpose of doing a bus switch in the first | 466 | * is noted that we must do the action of reading response and issuing DDC |
467 | * place. | 467 | * switch command in one I2C transaction. Otherwise when we try to start |
468 | * another I2C transaction after issuing the DDC bus switch, it will be | ||
469 | * switched to the internal SDVO register. | ||
468 | */ | 470 | */ |
469 | static void intel_sdvo_set_control_bus_switch(struct intel_output *intel_output, | 471 | static void intel_sdvo_set_control_bus_switch(struct intel_output *intel_output, |
470 | u8 target) | 472 | u8 target) |
471 | { | 473 | { |
472 | intel_sdvo_write_cmd(intel_output, SDVO_CMD_SET_CONTROL_BUS_SWITCH, &target, 1); | 474 | struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv; |
475 | u8 out_buf[2], cmd_buf[2], ret_value[2], ret; | ||
476 | struct i2c_msg msgs[] = { | ||
477 | { | ||
478 | .addr = sdvo_priv->slave_addr >> 1, | ||
479 | .flags = 0, | ||
480 | .len = 2, | ||
481 | .buf = out_buf, | ||
482 | }, | ||
483 | /* the following two are to read the response */ | ||
484 | { | ||
485 | .addr = sdvo_priv->slave_addr >> 1, | ||
486 | .flags = 0, | ||
487 | .len = 1, | ||
488 | .buf = cmd_buf, | ||
489 | }, | ||
490 | { | ||
491 | .addr = sdvo_priv->slave_addr >> 1, | ||
492 | .flags = I2C_M_RD, | ||
493 | .len = 1, | ||
494 | .buf = ret_value, | ||
495 | }, | ||
496 | }; | ||
497 | |||
498 | intel_sdvo_debug_write(intel_output, SDVO_CMD_SET_CONTROL_BUS_SWITCH, | ||
499 | &target, 1); | ||
500 | /* write the DDC switch command argument */ | ||
501 | intel_sdvo_write_byte(intel_output, SDVO_I2C_ARG_0, target); | ||
502 | |||
503 | out_buf[0] = SDVO_I2C_OPCODE; | ||
504 | out_buf[1] = SDVO_CMD_SET_CONTROL_BUS_SWITCH; | ||
505 | cmd_buf[0] = SDVO_I2C_CMD_STATUS; | ||
506 | cmd_buf[1] = 0; | ||
507 | ret_value[0] = 0; | ||
508 | ret_value[1] = 0; | ||
509 | |||
510 | ret = i2c_transfer(intel_output->i2c_bus, msgs, 3); | ||
511 | if (ret != 3) { | ||
512 | /* failure in I2C transfer */ | ||
513 | DRM_DEBUG_KMS("I2c transfer returned %d\n", ret); | ||
514 | return; | ||
515 | } | ||
516 | if (ret_value[0] != SDVO_CMD_STATUS_SUCCESS) { | ||
517 | DRM_DEBUG_KMS("DDC switch command returns response %d\n", | ||
518 | ret_value[0]); | ||
519 | return; | ||
520 | } | ||
521 | return; | ||
473 | } | 522 | } |
474 | 523 | ||
475 | static bool intel_sdvo_set_target_input(struct intel_output *intel_output, bool target_0, bool target_1) | 524 | static bool intel_sdvo_set_target_input(struct intel_output *intel_output, bool target_0, bool target_1) |
@@ -1579,6 +1628,32 @@ intel_sdvo_hdmi_sink_detect(struct drm_connector *connector, u16 response) | |||
1579 | edid = drm_get_edid(&intel_output->base, | 1628 | edid = drm_get_edid(&intel_output->base, |
1580 | intel_output->ddc_bus); | 1629 | intel_output->ddc_bus); |
1581 | 1630 | ||
1631 | /* This is only applied to SDVO cards with multiple outputs */ | ||
1632 | if (edid == NULL && intel_sdvo_multifunc_encoder(intel_output)) { | ||
1633 | uint8_t saved_ddc, temp_ddc; | ||
1634 | saved_ddc = sdvo_priv->ddc_bus; | ||
1635 | temp_ddc = sdvo_priv->ddc_bus >> 1; | ||
1636 | /* | ||
1637 | * Don't use the 1 as the argument of DDC bus switch to get | ||
1638 | * the EDID. It is used for SDVO SPD ROM. | ||
1639 | */ | ||
1640 | while(temp_ddc > 1) { | ||
1641 | sdvo_priv->ddc_bus = temp_ddc; | ||
1642 | edid = drm_get_edid(&intel_output->base, | ||
1643 | intel_output->ddc_bus); | ||
1644 | if (edid) { | ||
1645 | /* | ||
1646 | * When we can get the EDID, maybe it is the | ||
1647 | * correct DDC bus. Update it. | ||
1648 | */ | ||
1649 | sdvo_priv->ddc_bus = temp_ddc; | ||
1650 | break; | ||
1651 | } | ||
1652 | temp_ddc >>= 1; | ||
1653 | } | ||
1654 | if (edid == NULL) | ||
1655 | sdvo_priv->ddc_bus = saved_ddc; | ||
1656 | } | ||
1582 | /* when there is no edid and no monitor is connected with VGA | 1657 | /* when there is no edid and no monitor is connected with VGA |
1583 | * port, try to use the CRT ddc to read the EDID for DVI-connector | 1658 | * port, try to use the CRT ddc to read the EDID for DVI-connector |
1584 | */ | 1659 | */ |
@@ -2270,6 +2345,14 @@ intel_sdvo_output_setup(struct intel_output *intel_output, uint16_t flags) | |||
2270 | connector->connector_type = DRM_MODE_CONNECTOR_VGA; | 2345 | connector->connector_type = DRM_MODE_CONNECTOR_VGA; |
2271 | intel_output->clone_mask = (1 << INTEL_SDVO_NON_TV_CLONE_BIT) | | 2346 | intel_output->clone_mask = (1 << INTEL_SDVO_NON_TV_CLONE_BIT) | |
2272 | (1 << INTEL_ANALOG_CLONE_BIT); | 2347 | (1 << INTEL_ANALOG_CLONE_BIT); |
2348 | } else if (flags & SDVO_OUTPUT_CVBS0) { | ||
2349 | |||
2350 | sdvo_priv->controlled_output = SDVO_OUTPUT_CVBS0; | ||
2351 | encoder->encoder_type = DRM_MODE_ENCODER_TVDAC; | ||
2352 | connector->connector_type = DRM_MODE_CONNECTOR_SVIDEO; | ||
2353 | sdvo_priv->is_tv = true; | ||
2354 | intel_output->needs_tv_clock = true; | ||
2355 | intel_output->clone_mask = 1 << INTEL_SDVO_TV_CLONE_BIT; | ||
2273 | } else if (flags & SDVO_OUTPUT_LVDS0) { | 2356 | } else if (flags & SDVO_OUTPUT_LVDS0) { |
2274 | 2357 | ||
2275 | sdvo_priv->controlled_output = SDVO_OUTPUT_LVDS0; | 2358 | sdvo_priv->controlled_output = SDVO_OUTPUT_LVDS0; |
@@ -2662,6 +2745,7 @@ static void intel_sdvo_create_enhance_property(struct drm_connector *connector) | |||
2662 | 2745 | ||
2663 | bool intel_sdvo_init(struct drm_device *dev, int output_device) | 2746 | bool intel_sdvo_init(struct drm_device *dev, int output_device) |
2664 | { | 2747 | { |
2748 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
2665 | struct drm_connector *connector; | 2749 | struct drm_connector *connector; |
2666 | struct intel_output *intel_output; | 2750 | struct intel_output *intel_output; |
2667 | struct intel_sdvo_priv *sdvo_priv; | 2751 | struct intel_sdvo_priv *sdvo_priv; |
@@ -2708,10 +2792,12 @@ bool intel_sdvo_init(struct drm_device *dev, int output_device) | |||
2708 | intel_output->ddc_bus = intel_i2c_create(dev, GPIOE, "SDVOB DDC BUS"); | 2792 | intel_output->ddc_bus = intel_i2c_create(dev, GPIOE, "SDVOB DDC BUS"); |
2709 | sdvo_priv->analog_ddc_bus = intel_i2c_create(dev, GPIOA, | 2793 | sdvo_priv->analog_ddc_bus = intel_i2c_create(dev, GPIOA, |
2710 | "SDVOB/VGA DDC BUS"); | 2794 | "SDVOB/VGA DDC BUS"); |
2795 | dev_priv->hotplug_supported_mask |= SDVOB_HOTPLUG_INT_STATUS; | ||
2711 | } else { | 2796 | } else { |
2712 | intel_output->ddc_bus = intel_i2c_create(dev, GPIOE, "SDVOC DDC BUS"); | 2797 | intel_output->ddc_bus = intel_i2c_create(dev, GPIOE, "SDVOC DDC BUS"); |
2713 | sdvo_priv->analog_ddc_bus = intel_i2c_create(dev, GPIOA, | 2798 | sdvo_priv->analog_ddc_bus = intel_i2c_create(dev, GPIOA, |
2714 | "SDVOC/VGA DDC BUS"); | 2799 | "SDVOC/VGA DDC BUS"); |
2800 | dev_priv->hotplug_supported_mask |= SDVOC_HOTPLUG_INT_STATUS; | ||
2715 | } | 2801 | } |
2716 | 2802 | ||
2717 | if (intel_output->ddc_bus == NULL) | 2803 | if (intel_output->ddc_bus == NULL) |
@@ -2720,7 +2806,7 @@ bool intel_sdvo_init(struct drm_device *dev, int output_device) | |||
2720 | /* Wrap with our custom algo which switches to DDC mode */ | 2806 | /* Wrap with our custom algo which switches to DDC mode */ |
2721 | intel_output->ddc_bus->algo = &intel_sdvo_i2c_bit_algo; | 2807 | intel_output->ddc_bus->algo = &intel_sdvo_i2c_bit_algo; |
2722 | 2808 | ||
2723 | /* In defaut case sdvo lvds is false */ | 2809 | /* In default case sdvo lvds is false */ |
2724 | intel_sdvo_get_capabilities(intel_output, &sdvo_priv->caps); | 2810 | intel_sdvo_get_capabilities(intel_output, &sdvo_priv->caps); |
2725 | 2811 | ||
2726 | if (intel_sdvo_output_setup(intel_output, | 2812 | if (intel_sdvo_output_setup(intel_output, |
diff --git a/drivers/gpu/drm/nouveau/nouveau_acpi.c b/drivers/gpu/drm/nouveau/nouveau_acpi.c index 1cf488247a16..48227e744753 100644 --- a/drivers/gpu/drm/nouveau/nouveau_acpi.c +++ b/drivers/gpu/drm/nouveau/nouveau_acpi.c | |||
@@ -90,21 +90,21 @@ int nouveau_hybrid_setup(struct drm_device *dev) | |||
90 | { | 90 | { |
91 | int result; | 91 | int result; |
92 | 92 | ||
93 | if (nouveau_dsm(dev, NOUVEAU_DSM_ACTIVE, NOUVEAU_DSM_ACTIVE_QUERY, | 93 | if (nouveau_dsm(dev, NOUVEAU_DSM_POWER, NOUVEAU_DSM_POWER_STATE, |
94 | &result)) | 94 | &result)) |
95 | return -ENODEV; | 95 | return -ENODEV; |
96 | 96 | ||
97 | NV_INFO(dev, "_DSM hardware status gave 0x%x\n", result); | 97 | NV_INFO(dev, "_DSM hardware status gave 0x%x\n", result); |
98 | 98 | ||
99 | if (result & 0x1) { /* Stamina mode - disable the external GPU */ | 99 | if (result) { /* Ensure that the external GPU is enabled */ |
100 | nouveau_dsm(dev, NOUVEAU_DSM_LED, NOUVEAU_DSM_LED_SPEED, NULL); | ||
101 | nouveau_dsm(dev, NOUVEAU_DSM_POWER, NOUVEAU_DSM_POWER_SPEED, | ||
102 | NULL); | ||
103 | } else { /* Stamina mode - disable the external GPU */ | ||
100 | nouveau_dsm(dev, NOUVEAU_DSM_LED, NOUVEAU_DSM_LED_STAMINA, | 104 | nouveau_dsm(dev, NOUVEAU_DSM_LED, NOUVEAU_DSM_LED_STAMINA, |
101 | NULL); | 105 | NULL); |
102 | nouveau_dsm(dev, NOUVEAU_DSM_POWER, NOUVEAU_DSM_POWER_STAMINA, | 106 | nouveau_dsm(dev, NOUVEAU_DSM_POWER, NOUVEAU_DSM_POWER_STAMINA, |
103 | NULL); | 107 | NULL); |
104 | } else { /* Ensure that the external GPU is enabled */ | ||
105 | nouveau_dsm(dev, NOUVEAU_DSM_LED, NOUVEAU_DSM_LED_SPEED, NULL); | ||
106 | nouveau_dsm(dev, NOUVEAU_DSM_POWER, NOUVEAU_DSM_POWER_SPEED, | ||
107 | NULL); | ||
108 | } | 108 | } |
109 | 109 | ||
110 | return 0; | 110 | return 0; |
diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c index d7f8d8b4a4b8..0e9cd1d49130 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bios.c +++ b/drivers/gpu/drm/nouveau/nouveau_bios.c | |||
@@ -1865,7 +1865,7 @@ init_compute_mem(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) | |||
1865 | 1865 | ||
1866 | struct drm_nouveau_private *dev_priv = bios->dev->dev_private; | 1866 | struct drm_nouveau_private *dev_priv = bios->dev->dev_private; |
1867 | 1867 | ||
1868 | if (dev_priv->card_type >= NV_50) | 1868 | if (dev_priv->card_type >= NV_40) |
1869 | return 1; | 1869 | return 1; |
1870 | 1870 | ||
1871 | /* | 1871 | /* |
@@ -3765,7 +3765,6 @@ nouveau_bios_run_display_table(struct drm_device *dev, struct dcb_entry *dcbent, | |||
3765 | */ | 3765 | */ |
3766 | 3766 | ||
3767 | struct drm_nouveau_private *dev_priv = dev->dev_private; | 3767 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
3768 | struct init_exec iexec = {true, false}; | ||
3769 | struct nvbios *bios = &dev_priv->VBIOS; | 3768 | struct nvbios *bios = &dev_priv->VBIOS; |
3770 | uint8_t *table = &bios->data[bios->display.script_table_ptr]; | 3769 | uint8_t *table = &bios->data[bios->display.script_table_ptr]; |
3771 | uint8_t *otable = NULL; | 3770 | uint8_t *otable = NULL; |
@@ -3845,8 +3844,6 @@ nouveau_bios_run_display_table(struct drm_device *dev, struct dcb_entry *dcbent, | |||
3845 | } | 3844 | } |
3846 | } | 3845 | } |
3847 | 3846 | ||
3848 | bios->display.output = dcbent; | ||
3849 | |||
3850 | if (pxclk == 0) { | 3847 | if (pxclk == 0) { |
3851 | script = ROM16(otable[6]); | 3848 | script = ROM16(otable[6]); |
3852 | if (!script) { | 3849 | if (!script) { |
@@ -3855,7 +3852,7 @@ nouveau_bios_run_display_table(struct drm_device *dev, struct dcb_entry *dcbent, | |||
3855 | } | 3852 | } |
3856 | 3853 | ||
3857 | NV_TRACE(dev, "0x%04X: parsing output script 0\n", script); | 3854 | NV_TRACE(dev, "0x%04X: parsing output script 0\n", script); |
3858 | parse_init_table(bios, script, &iexec); | 3855 | nouveau_bios_run_init_table(dev, script, dcbent); |
3859 | } else | 3856 | } else |
3860 | if (pxclk == -1) { | 3857 | if (pxclk == -1) { |
3861 | script = ROM16(otable[8]); | 3858 | script = ROM16(otable[8]); |
@@ -3865,7 +3862,7 @@ nouveau_bios_run_display_table(struct drm_device *dev, struct dcb_entry *dcbent, | |||
3865 | } | 3862 | } |
3866 | 3863 | ||
3867 | NV_TRACE(dev, "0x%04X: parsing output script 1\n", script); | 3864 | NV_TRACE(dev, "0x%04X: parsing output script 1\n", script); |
3868 | parse_init_table(bios, script, &iexec); | 3865 | nouveau_bios_run_init_table(dev, script, dcbent); |
3869 | } else | 3866 | } else |
3870 | if (pxclk == -2) { | 3867 | if (pxclk == -2) { |
3871 | if (table[4] >= 12) | 3868 | if (table[4] >= 12) |
@@ -3878,7 +3875,7 @@ nouveau_bios_run_display_table(struct drm_device *dev, struct dcb_entry *dcbent, | |||
3878 | } | 3875 | } |
3879 | 3876 | ||
3880 | NV_TRACE(dev, "0x%04X: parsing output script 2\n", script); | 3877 | NV_TRACE(dev, "0x%04X: parsing output script 2\n", script); |
3881 | parse_init_table(bios, script, &iexec); | 3878 | nouveau_bios_run_init_table(dev, script, dcbent); |
3882 | } else | 3879 | } else |
3883 | if (pxclk > 0) { | 3880 | if (pxclk > 0) { |
3884 | script = ROM16(otable[table[4] + i*6 + 2]); | 3881 | script = ROM16(otable[table[4] + i*6 + 2]); |
@@ -3890,7 +3887,7 @@ nouveau_bios_run_display_table(struct drm_device *dev, struct dcb_entry *dcbent, | |||
3890 | } | 3887 | } |
3891 | 3888 | ||
3892 | NV_TRACE(dev, "0x%04X: parsing clock script 0\n", script); | 3889 | NV_TRACE(dev, "0x%04X: parsing clock script 0\n", script); |
3893 | parse_init_table(bios, script, &iexec); | 3890 | nouveau_bios_run_init_table(dev, script, dcbent); |
3894 | } else | 3891 | } else |
3895 | if (pxclk < 0) { | 3892 | if (pxclk < 0) { |
3896 | script = ROM16(otable[table[4] + i*6 + 4]); | 3893 | script = ROM16(otable[table[4] + i*6 + 4]); |
@@ -3902,7 +3899,7 @@ nouveau_bios_run_display_table(struct drm_device *dev, struct dcb_entry *dcbent, | |||
3902 | } | 3899 | } |
3903 | 3900 | ||
3904 | NV_TRACE(dev, "0x%04X: parsing clock script 1\n", script); | 3901 | NV_TRACE(dev, "0x%04X: parsing clock script 1\n", script); |
3905 | parse_init_table(bios, script, &iexec); | 3902 | nouveau_bios_run_init_table(dev, script, dcbent); |
3906 | } | 3903 | } |
3907 | 3904 | ||
3908 | return 0; | 3905 | return 0; |
@@ -5865,9 +5862,11 @@ nouveau_bios_run_init_table(struct drm_device *dev, uint16_t table, | |||
5865 | struct nvbios *bios = &dev_priv->VBIOS; | 5862 | struct nvbios *bios = &dev_priv->VBIOS; |
5866 | struct init_exec iexec = { true, false }; | 5863 | struct init_exec iexec = { true, false }; |
5867 | 5864 | ||
5865 | mutex_lock(&bios->lock); | ||
5868 | bios->display.output = dcbent; | 5866 | bios->display.output = dcbent; |
5869 | parse_init_table(bios, table, &iexec); | 5867 | parse_init_table(bios, table, &iexec); |
5870 | bios->display.output = NULL; | 5868 | bios->display.output = NULL; |
5869 | mutex_unlock(&bios->lock); | ||
5871 | } | 5870 | } |
5872 | 5871 | ||
5873 | static bool NVInitVBIOS(struct drm_device *dev) | 5872 | static bool NVInitVBIOS(struct drm_device *dev) |
@@ -5876,6 +5875,7 @@ static bool NVInitVBIOS(struct drm_device *dev) | |||
5876 | struct nvbios *bios = &dev_priv->VBIOS; | 5875 | struct nvbios *bios = &dev_priv->VBIOS; |
5877 | 5876 | ||
5878 | memset(bios, 0, sizeof(struct nvbios)); | 5877 | memset(bios, 0, sizeof(struct nvbios)); |
5878 | mutex_init(&bios->lock); | ||
5879 | bios->dev = dev; | 5879 | bios->dev = dev; |
5880 | 5880 | ||
5881 | if (!NVShadowVBIOS(dev, bios->data)) | 5881 | if (!NVShadowVBIOS(dev, bios->data)) |
diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.h b/drivers/gpu/drm/nouveau/nouveau_bios.h index 058e98c76d89..fd94bd6dc264 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bios.h +++ b/drivers/gpu/drm/nouveau/nouveau_bios.h | |||
@@ -205,6 +205,8 @@ struct nvbios { | |||
205 | struct drm_device *dev; | 205 | struct drm_device *dev; |
206 | struct nouveau_bios_info pub; | 206 | struct nouveau_bios_info pub; |
207 | 207 | ||
208 | struct mutex lock; | ||
209 | |||
208 | uint8_t data[NV_PROM_SIZE]; | 210 | uint8_t data[NV_PROM_SIZE]; |
209 | unsigned int length; | 211 | unsigned int length; |
210 | bool execute; | 212 | bool execute; |
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c index db0ed4c13f98..028719fddf76 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bo.c +++ b/drivers/gpu/drm/nouveau/nouveau_bo.c | |||
@@ -65,8 +65,10 @@ nouveau_bo_fixup_align(struct drm_device *dev, | |||
65 | 65 | ||
66 | /* | 66 | /* |
67 | * Some of the tile_flags have a periodic structure of N*4096 bytes, | 67 | * Some of the tile_flags have a periodic structure of N*4096 bytes, |
68 | * align to to that as well as the page size. Overallocate memory to | 68 | * align to to that as well as the page size. Align the size to the |
69 | * avoid corruption of other buffer objects. | 69 | * appropriate boundaries. This does imply that sizes are rounded up |
70 | * 3-7 pages, so be aware of this and do not waste memory by allocating | ||
71 | * many small buffers. | ||
70 | */ | 72 | */ |
71 | if (dev_priv->card_type == NV_50) { | 73 | if (dev_priv->card_type == NV_50) { |
72 | uint32_t block_size = nouveau_mem_fb_amount(dev) >> 15; | 74 | uint32_t block_size = nouveau_mem_fb_amount(dev) >> 15; |
@@ -77,22 +79,20 @@ nouveau_bo_fixup_align(struct drm_device *dev, | |||
77 | case 0x2800: | 79 | case 0x2800: |
78 | case 0x4800: | 80 | case 0x4800: |
79 | case 0x7a00: | 81 | case 0x7a00: |
80 | *size = roundup(*size, block_size); | ||
81 | if (is_power_of_2(block_size)) { | 82 | if (is_power_of_2(block_size)) { |
82 | *size += 3 * block_size; | ||
83 | for (i = 1; i < 10; i++) { | 83 | for (i = 1; i < 10; i++) { |
84 | *align = 12 * i * block_size; | 84 | *align = 12 * i * block_size; |
85 | if (!(*align % 65536)) | 85 | if (!(*align % 65536)) |
86 | break; | 86 | break; |
87 | } | 87 | } |
88 | } else { | 88 | } else { |
89 | *size += 6 * block_size; | ||
90 | for (i = 1; i < 10; i++) { | 89 | for (i = 1; i < 10; i++) { |
91 | *align = 8 * i * block_size; | 90 | *align = 8 * i * block_size; |
92 | if (!(*align % 65536)) | 91 | if (!(*align % 65536)) |
93 | break; | 92 | break; |
94 | } | 93 | } |
95 | } | 94 | } |
95 | *size = roundup(*size, *align); | ||
96 | break; | 96 | break; |
97 | default: | 97 | default: |
98 | break; | 98 | break; |
diff --git a/drivers/gpu/drm/nouveau/nouveau_channel.c b/drivers/gpu/drm/nouveau/nouveau_channel.c index 343d718a9667..2281f99da7fc 100644 --- a/drivers/gpu/drm/nouveau/nouveau_channel.c +++ b/drivers/gpu/drm/nouveau/nouveau_channel.c | |||
@@ -278,12 +278,11 @@ nouveau_channel_free(struct nouveau_channel *chan) | |||
278 | /* Ensure the channel is no longer active on the GPU */ | 278 | /* Ensure the channel is no longer active on the GPU */ |
279 | pfifo->reassign(dev, false); | 279 | pfifo->reassign(dev, false); |
280 | 280 | ||
281 | if (pgraph->channel(dev) == chan) { | 281 | pgraph->fifo_access(dev, false); |
282 | pgraph->fifo_access(dev, false); | 282 | if (pgraph->channel(dev) == chan) |
283 | pgraph->unload_context(dev); | 283 | pgraph->unload_context(dev); |
284 | pgraph->fifo_access(dev, true); | ||
285 | } | ||
286 | pgraph->destroy_context(chan); | 284 | pgraph->destroy_context(chan); |
285 | pgraph->fifo_access(dev, true); | ||
287 | 286 | ||
288 | if (pfifo->channel_id(dev) == chan->id) { | 287 | if (pfifo->channel_id(dev) == chan->id) { |
289 | pfifo->disable(dev); | 288 | pfifo->disable(dev); |
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c index 7e6d673f3a23..d2f63353ea97 100644 --- a/drivers/gpu/drm/nouveau/nouveau_connector.c +++ b/drivers/gpu/drm/nouveau/nouveau_connector.c | |||
@@ -88,13 +88,14 @@ nouveau_connector_destroy(struct drm_connector *drm_connector) | |||
88 | { | 88 | { |
89 | struct nouveau_connector *nv_connector = | 89 | struct nouveau_connector *nv_connector = |
90 | nouveau_connector(drm_connector); | 90 | nouveau_connector(drm_connector); |
91 | struct drm_device *dev = nv_connector->base.dev; | 91 | struct drm_device *dev; |
92 | |||
93 | NV_DEBUG_KMS(dev, "\n"); | ||
94 | 92 | ||
95 | if (!nv_connector) | 93 | if (!nv_connector) |
96 | return; | 94 | return; |
97 | 95 | ||
96 | dev = nv_connector->base.dev; | ||
97 | NV_DEBUG_KMS(dev, "\n"); | ||
98 | |||
98 | kfree(nv_connector->edid); | 99 | kfree(nv_connector->edid); |
99 | drm_sysfs_connector_remove(drm_connector); | 100 | drm_sysfs_connector_remove(drm_connector); |
100 | drm_connector_cleanup(drm_connector); | 101 | drm_connector_cleanup(drm_connector); |
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c index dfc94391d71e..cf1c5c0a0abe 100644 --- a/drivers/gpu/drm/nouveau/nouveau_display.c +++ b/drivers/gpu/drm/nouveau/nouveau_display.c | |||
@@ -39,11 +39,8 @@ nouveau_user_framebuffer_destroy(struct drm_framebuffer *drm_fb) | |||
39 | if (drm_fb->fbdev) | 39 | if (drm_fb->fbdev) |
40 | nouveau_fbcon_remove(dev, drm_fb); | 40 | nouveau_fbcon_remove(dev, drm_fb); |
41 | 41 | ||
42 | if (fb->nvbo) { | 42 | if (fb->nvbo) |
43 | mutex_lock(&dev->struct_mutex); | 43 | drm_gem_object_unreference_unlocked(fb->nvbo->gem); |
44 | drm_gem_object_unreference(fb->nvbo->gem); | ||
45 | mutex_unlock(&dev->struct_mutex); | ||
46 | } | ||
47 | 44 | ||
48 | drm_framebuffer_cleanup(drm_fb); | 45 | drm_framebuffer_cleanup(drm_fb); |
49 | kfree(fb); | 46 | kfree(fb); |
diff --git a/drivers/gpu/drm/nouveau/nouveau_dp.c b/drivers/gpu/drm/nouveau/nouveau_dp.c index dd4937224220..f954ad93e81f 100644 --- a/drivers/gpu/drm/nouveau/nouveau_dp.c +++ b/drivers/gpu/drm/nouveau/nouveau_dp.c | |||
@@ -502,12 +502,12 @@ nouveau_dp_auxch(struct nouveau_i2c_chan *auxch, int cmd, int addr, | |||
502 | break; | 502 | break; |
503 | } | 503 | } |
504 | 504 | ||
505 | if ((stat & NV50_AUXCH_STAT_COUNT) != data_nr) { | ||
506 | ret = -EREMOTEIO; | ||
507 | goto out; | ||
508 | } | ||
509 | |||
510 | if (cmd & 1) { | 505 | if (cmd & 1) { |
506 | if ((stat & NV50_AUXCH_STAT_COUNT) != data_nr) { | ||
507 | ret = -EREMOTEIO; | ||
508 | goto out; | ||
509 | } | ||
510 | |||
511 | for (i = 0; i < 4; i++) { | 511 | for (i = 0; i < 4; i++) { |
512 | data32[i] = nv_rd32(dev, NV50_AUXCH_DATA_IN(index, i)); | 512 | data32[i] = nv_rd32(dev, NV50_AUXCH_DATA_IN(index, i)); |
513 | NV_DEBUG_KMS(dev, "rd %d: 0x%08x\n", i, data32[i]); | 513 | NV_DEBUG_KMS(dev, "rd %d: 0x%08x\n", i, data32[i]); |
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.c b/drivers/gpu/drm/nouveau/nouveau_drv.c index 343ab7f17ccc..da3b93b84502 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drv.c +++ b/drivers/gpu/drm/nouveau/nouveau_drv.c | |||
@@ -56,7 +56,7 @@ int nouveau_vram_pushbuf; | |||
56 | module_param_named(vram_pushbuf, nouveau_vram_pushbuf, int, 0400); | 56 | module_param_named(vram_pushbuf, nouveau_vram_pushbuf, int, 0400); |
57 | 57 | ||
58 | MODULE_PARM_DESC(vram_notify, "Force DMA notifiers to be in VRAM"); | 58 | MODULE_PARM_DESC(vram_notify, "Force DMA notifiers to be in VRAM"); |
59 | int nouveau_vram_notify; | 59 | int nouveau_vram_notify = 1; |
60 | module_param_named(vram_notify, nouveau_vram_notify, int, 0400); | 60 | module_param_named(vram_notify, nouveau_vram_notify, int, 0400); |
61 | 61 | ||
62 | MODULE_PARM_DESC(duallink, "Allow dual-link TMDS (>=GeForce 8)"); | 62 | MODULE_PARM_DESC(duallink, "Allow dual-link TMDS (>=GeForce 8)"); |
@@ -75,6 +75,14 @@ MODULE_PARM_DESC(ignorelid, "Ignore ACPI lid status"); | |||
75 | int nouveau_ignorelid = 0; | 75 | int nouveau_ignorelid = 0; |
76 | module_param_named(ignorelid, nouveau_ignorelid, int, 0400); | 76 | module_param_named(ignorelid, nouveau_ignorelid, int, 0400); |
77 | 77 | ||
78 | MODULE_PARM_DESC(noagp, "Disable all acceleration"); | ||
79 | int nouveau_noaccel = 0; | ||
80 | module_param_named(noaccel, nouveau_noaccel, int, 0400); | ||
81 | |||
82 | MODULE_PARM_DESC(noagp, "Disable fbcon acceleration"); | ||
83 | int nouveau_nofbaccel = 0; | ||
84 | module_param_named(nofbaccel, nouveau_nofbaccel, int, 0400); | ||
85 | |||
78 | MODULE_PARM_DESC(tv_norm, "Default TV norm.\n" | 86 | MODULE_PARM_DESC(tv_norm, "Default TV norm.\n" |
79 | "\t\tSupported: PAL, PAL-M, PAL-N, PAL-Nc, NTSC-M, NTSC-J,\n" | 87 | "\t\tSupported: PAL, PAL-M, PAL-N, PAL-Nc, NTSC-M, NTSC-J,\n" |
80 | "\t\t\thd480i, hd480p, hd576i, hd576p, hd720p, hd1080i.\n" | 88 | "\t\t\thd480i, hd480p, hd576i, hd576p, hd720p, hd1080i.\n" |
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h index 6b9690418bc7..1c15ef37b71c 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drv.h +++ b/drivers/gpu/drm/nouveau/nouveau_drv.h | |||
@@ -583,6 +583,7 @@ struct drm_nouveau_private { | |||
583 | uint64_t vm_end; | 583 | uint64_t vm_end; |
584 | struct nouveau_gpuobj *vm_vram_pt[NV50_VM_VRAM_NR]; | 584 | struct nouveau_gpuobj *vm_vram_pt[NV50_VM_VRAM_NR]; |
585 | int vm_vram_pt_nr; | 585 | int vm_vram_pt_nr; |
586 | uint64_t vram_sys_base; | ||
586 | 587 | ||
587 | /* the mtrr covering the FB */ | 588 | /* the mtrr covering the FB */ |
588 | int fb_mtrr; | 589 | int fb_mtrr; |
@@ -678,6 +679,8 @@ extern int nouveau_reg_debug; | |||
678 | extern char *nouveau_vbios; | 679 | extern char *nouveau_vbios; |
679 | extern int nouveau_ctxfw; | 680 | extern int nouveau_ctxfw; |
680 | extern int nouveau_ignorelid; | 681 | extern int nouveau_ignorelid; |
682 | extern int nouveau_nofbaccel; | ||
683 | extern int nouveau_noaccel; | ||
681 | 684 | ||
682 | /* nouveau_state.c */ | 685 | /* nouveau_state.c */ |
683 | extern void nouveau_preclose(struct drm_device *dev, struct drm_file *); | 686 | extern void nouveau_preclose(struct drm_device *dev, struct drm_file *); |
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c index 0b05c869e0e7..d48c59cdefe4 100644 --- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c +++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c | |||
@@ -107,6 +107,34 @@ static struct fb_ops nouveau_fbcon_ops = { | |||
107 | .fb_setcmap = drm_fb_helper_setcmap, | 107 | .fb_setcmap = drm_fb_helper_setcmap, |
108 | }; | 108 | }; |
109 | 109 | ||
110 | static struct fb_ops nv04_fbcon_ops = { | ||
111 | .owner = THIS_MODULE, | ||
112 | .fb_check_var = drm_fb_helper_check_var, | ||
113 | .fb_set_par = drm_fb_helper_set_par, | ||
114 | .fb_setcolreg = drm_fb_helper_setcolreg, | ||
115 | .fb_fillrect = nv04_fbcon_fillrect, | ||
116 | .fb_copyarea = nv04_fbcon_copyarea, | ||
117 | .fb_imageblit = nv04_fbcon_imageblit, | ||
118 | .fb_sync = nouveau_fbcon_sync, | ||
119 | .fb_pan_display = drm_fb_helper_pan_display, | ||
120 | .fb_blank = drm_fb_helper_blank, | ||
121 | .fb_setcmap = drm_fb_helper_setcmap, | ||
122 | }; | ||
123 | |||
124 | static struct fb_ops nv50_fbcon_ops = { | ||
125 | .owner = THIS_MODULE, | ||
126 | .fb_check_var = drm_fb_helper_check_var, | ||
127 | .fb_set_par = drm_fb_helper_set_par, | ||
128 | .fb_setcolreg = drm_fb_helper_setcolreg, | ||
129 | .fb_fillrect = nv50_fbcon_fillrect, | ||
130 | .fb_copyarea = nv50_fbcon_copyarea, | ||
131 | .fb_imageblit = nv50_fbcon_imageblit, | ||
132 | .fb_sync = nouveau_fbcon_sync, | ||
133 | .fb_pan_display = drm_fb_helper_pan_display, | ||
134 | .fb_blank = drm_fb_helper_blank, | ||
135 | .fb_setcmap = drm_fb_helper_setcmap, | ||
136 | }; | ||
137 | |||
110 | static void nouveau_fbcon_gamma_set(struct drm_crtc *crtc, u16 red, u16 green, | 138 | static void nouveau_fbcon_gamma_set(struct drm_crtc *crtc, u16 red, u16 green, |
111 | u16 blue, int regno) | 139 | u16 blue, int regno) |
112 | { | 140 | { |
@@ -267,8 +295,12 @@ nouveau_fbcon_create(struct drm_device *dev, uint32_t fb_width, | |||
267 | dev_priv->fbdev_info = info; | 295 | dev_priv->fbdev_info = info; |
268 | 296 | ||
269 | strcpy(info->fix.id, "nouveaufb"); | 297 | strcpy(info->fix.id, "nouveaufb"); |
270 | info->flags = FBINFO_DEFAULT | FBINFO_HWACCEL_COPYAREA | | 298 | if (nouveau_nofbaccel) |
271 | FBINFO_HWACCEL_FILLRECT | FBINFO_HWACCEL_IMAGEBLIT; | 299 | info->flags = FBINFO_DEFAULT | FBINFO_HWACCEL_DISABLED; |
300 | else | ||
301 | info->flags = FBINFO_DEFAULT | FBINFO_HWACCEL_COPYAREA | | ||
302 | FBINFO_HWACCEL_FILLRECT | | ||
303 | FBINFO_HWACCEL_IMAGEBLIT; | ||
272 | info->fbops = &nouveau_fbcon_ops; | 304 | info->fbops = &nouveau_fbcon_ops; |
273 | info->fix.smem_start = dev->mode_config.fb_base + nvbo->bo.offset - | 305 | info->fix.smem_start = dev->mode_config.fb_base + nvbo->bo.offset - |
274 | dev_priv->vm_vram_base; | 306 | dev_priv->vm_vram_base; |
@@ -316,13 +348,15 @@ nouveau_fbcon_create(struct drm_device *dev, uint32_t fb_width, | |||
316 | par->nouveau_fb = nouveau_fb; | 348 | par->nouveau_fb = nouveau_fb; |
317 | par->dev = dev; | 349 | par->dev = dev; |
318 | 350 | ||
319 | if (dev_priv->channel) { | 351 | if (dev_priv->channel && !nouveau_nofbaccel) { |
320 | switch (dev_priv->card_type) { | 352 | switch (dev_priv->card_type) { |
321 | case NV_50: | 353 | case NV_50: |
322 | nv50_fbcon_accel_init(info); | 354 | nv50_fbcon_accel_init(info); |
355 | info->fbops = &nv50_fbcon_ops; | ||
323 | break; | 356 | break; |
324 | default: | 357 | default: |
325 | nv04_fbcon_accel_init(info); | 358 | nv04_fbcon_accel_init(info); |
359 | info->fbops = &nv04_fbcon_ops; | ||
326 | break; | 360 | break; |
327 | }; | 361 | }; |
328 | } | 362 | } |
@@ -367,10 +401,8 @@ nouveau_fbcon_remove(struct drm_device *dev, struct drm_framebuffer *fb) | |||
367 | 401 | ||
368 | unregister_framebuffer(info); | 402 | unregister_framebuffer(info); |
369 | nouveau_bo_unmap(nouveau_fb->nvbo); | 403 | nouveau_bo_unmap(nouveau_fb->nvbo); |
370 | mutex_lock(&dev->struct_mutex); | 404 | drm_gem_object_unreference_unlocked(nouveau_fb->nvbo->gem); |
371 | drm_gem_object_unreference(nouveau_fb->nvbo->gem); | ||
372 | nouveau_fb->nvbo = NULL; | 405 | nouveau_fb->nvbo = NULL; |
373 | mutex_unlock(&dev->struct_mutex); | ||
374 | if (par) | 406 | if (par) |
375 | drm_fb_helper_free(&par->helper); | 407 | drm_fb_helper_free(&par->helper); |
376 | framebuffer_release(info); | 408 | framebuffer_release(info); |
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.h b/drivers/gpu/drm/nouveau/nouveau_fbcon.h index 462e0b87b4bd..f9c34e1a8c11 100644 --- a/drivers/gpu/drm/nouveau/nouveau_fbcon.h +++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.h | |||
@@ -40,7 +40,13 @@ int nouveau_fbcon_remove(struct drm_device *dev, struct drm_framebuffer *fb); | |||
40 | void nouveau_fbcon_restore(void); | 40 | void nouveau_fbcon_restore(void); |
41 | void nouveau_fbcon_zfill(struct drm_device *dev); | 41 | void nouveau_fbcon_zfill(struct drm_device *dev); |
42 | 42 | ||
43 | void nv04_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region); | ||
44 | void nv04_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect); | ||
45 | void nv04_fbcon_imageblit(struct fb_info *info, const struct fb_image *image); | ||
43 | int nv04_fbcon_accel_init(struct fb_info *info); | 46 | int nv04_fbcon_accel_init(struct fb_info *info); |
47 | void nv50_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect); | ||
48 | void nv50_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region); | ||
49 | void nv50_fbcon_imageblit(struct fb_info *info, const struct fb_image *image); | ||
44 | int nv50_fbcon_accel_init(struct fb_info *info); | 50 | int nv50_fbcon_accel_init(struct fb_info *info); |
45 | 51 | ||
46 | void nouveau_fbcon_gpu_lockup(struct fb_info *info); | 52 | void nouveau_fbcon_gpu_lockup(struct fb_info *info); |
diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c index 6ac804b0c9f9..34063c561899 100644 --- a/drivers/gpu/drm/nouveau/nouveau_gem.c +++ b/drivers/gpu/drm/nouveau/nouveau_gem.c | |||
@@ -167,12 +167,10 @@ nouveau_gem_ioctl_new(struct drm_device *dev, void *data, | |||
167 | 167 | ||
168 | ret = drm_gem_handle_create(file_priv, nvbo->gem, &req->info.handle); | 168 | ret = drm_gem_handle_create(file_priv, nvbo->gem, &req->info.handle); |
169 | out: | 169 | out: |
170 | mutex_lock(&dev->struct_mutex); | 170 | drm_gem_object_handle_unreference_unlocked(nvbo->gem); |
171 | drm_gem_object_handle_unreference(nvbo->gem); | ||
172 | mutex_unlock(&dev->struct_mutex); | ||
173 | 171 | ||
174 | if (ret) | 172 | if (ret) |
175 | drm_gem_object_unreference(nvbo->gem); | 173 | drm_gem_object_unreference_unlocked(nvbo->gem); |
176 | return ret; | 174 | return ret; |
177 | } | 175 | } |
178 | 176 | ||
@@ -865,9 +863,7 @@ nouveau_gem_ioctl_pin(struct drm_device *dev, void *data, | |||
865 | req->domain = NOUVEAU_GEM_DOMAIN_VRAM; | 863 | req->domain = NOUVEAU_GEM_DOMAIN_VRAM; |
866 | 864 | ||
867 | out: | 865 | out: |
868 | mutex_lock(&dev->struct_mutex); | 866 | drm_gem_object_unreference_unlocked(gem); |
869 | drm_gem_object_unreference(gem); | ||
870 | mutex_unlock(&dev->struct_mutex); | ||
871 | 867 | ||
872 | return ret; | 868 | return ret; |
873 | } | 869 | } |
@@ -891,9 +887,7 @@ nouveau_gem_ioctl_unpin(struct drm_device *dev, void *data, | |||
891 | 887 | ||
892 | ret = nouveau_bo_unpin(nouveau_gem_object(gem)); | 888 | ret = nouveau_bo_unpin(nouveau_gem_object(gem)); |
893 | 889 | ||
894 | mutex_lock(&dev->struct_mutex); | 890 | drm_gem_object_unreference_unlocked(gem); |
895 | drm_gem_object_unreference(gem); | ||
896 | mutex_unlock(&dev->struct_mutex); | ||
897 | 891 | ||
898 | return ret; | 892 | return ret; |
899 | } | 893 | } |
@@ -925,7 +919,9 @@ nouveau_gem_ioctl_cpu_prep(struct drm_device *dev, void *data, | |||
925 | } | 919 | } |
926 | 920 | ||
927 | if (req->flags & NOUVEAU_GEM_CPU_PREP_NOBLOCK) { | 921 | if (req->flags & NOUVEAU_GEM_CPU_PREP_NOBLOCK) { |
922 | spin_lock(&nvbo->bo.lock); | ||
928 | ret = ttm_bo_wait(&nvbo->bo, false, false, no_wait); | 923 | ret = ttm_bo_wait(&nvbo->bo, false, false, no_wait); |
924 | spin_unlock(&nvbo->bo.lock); | ||
929 | } else { | 925 | } else { |
930 | ret = ttm_bo_synccpu_write_grab(&nvbo->bo, no_wait); | 926 | ret = ttm_bo_synccpu_write_grab(&nvbo->bo, no_wait); |
931 | if (ret == 0) | 927 | if (ret == 0) |
@@ -933,9 +929,7 @@ nouveau_gem_ioctl_cpu_prep(struct drm_device *dev, void *data, | |||
933 | } | 929 | } |
934 | 930 | ||
935 | out: | 931 | out: |
936 | mutex_lock(&dev->struct_mutex); | 932 | drm_gem_object_unreference_unlocked(gem); |
937 | drm_gem_object_unreference(gem); | ||
938 | mutex_unlock(&dev->struct_mutex); | ||
939 | return ret; | 933 | return ret; |
940 | } | 934 | } |
941 | 935 | ||
@@ -963,9 +957,7 @@ nouveau_gem_ioctl_cpu_fini(struct drm_device *dev, void *data, | |||
963 | ret = 0; | 957 | ret = 0; |
964 | 958 | ||
965 | out: | 959 | out: |
966 | mutex_lock(&dev->struct_mutex); | 960 | drm_gem_object_unreference_unlocked(gem); |
967 | drm_gem_object_unreference(gem); | ||
968 | mutex_unlock(&dev->struct_mutex); | ||
969 | return ret; | 961 | return ret; |
970 | } | 962 | } |
971 | 963 | ||
@@ -984,9 +976,7 @@ nouveau_gem_ioctl_info(struct drm_device *dev, void *data, | |||
984 | return -EINVAL; | 976 | return -EINVAL; |
985 | 977 | ||
986 | ret = nouveau_gem_info(gem, req); | 978 | ret = nouveau_gem_info(gem, req); |
987 | mutex_lock(&dev->struct_mutex); | 979 | drm_gem_object_unreference_unlocked(gem); |
988 | drm_gem_object_unreference(gem); | ||
989 | mutex_unlock(&dev->struct_mutex); | ||
990 | return ret; | 980 | return ret; |
991 | } | 981 | } |
992 | 982 | ||
diff --git a/drivers/gpu/drm/nouveau/nouveau_grctx.c b/drivers/gpu/drm/nouveau/nouveau_grctx.c index 419f4c2b3b89..c7ebec696747 100644 --- a/drivers/gpu/drm/nouveau/nouveau_grctx.c +++ b/drivers/gpu/drm/nouveau/nouveau_grctx.c | |||
@@ -97,8 +97,8 @@ nouveau_grctx_prog_load(struct drm_device *dev) | |||
97 | } | 97 | } |
98 | 98 | ||
99 | pgraph->ctxvals = kmalloc(fw->size, GFP_KERNEL); | 99 | pgraph->ctxvals = kmalloc(fw->size, GFP_KERNEL); |
100 | if (!pgraph->ctxprog) { | 100 | if (!pgraph->ctxvals) { |
101 | NV_ERROR(dev, "OOM copying ctxprog\n"); | 101 | NV_ERROR(dev, "OOM copying ctxvals\n"); |
102 | release_firmware(fw); | 102 | release_firmware(fw); |
103 | nouveau_grctx_fini(dev); | 103 | nouveau_grctx_fini(dev); |
104 | return -ENOMEM; | 104 | return -ENOMEM; |
diff --git a/drivers/gpu/drm/nouveau/nouveau_irq.c b/drivers/gpu/drm/nouveau/nouveau_irq.c index 3b9bad66162a..447f9f69d6b1 100644 --- a/drivers/gpu/drm/nouveau/nouveau_irq.c +++ b/drivers/gpu/drm/nouveau/nouveau_irq.c | |||
@@ -211,6 +211,20 @@ nouveau_fifo_irq_handler(struct drm_device *dev) | |||
211 | get + 4); | 211 | get + 4); |
212 | } | 212 | } |
213 | 213 | ||
214 | if (status & NV_PFIFO_INTR_SEMAPHORE) { | ||
215 | uint32_t sem; | ||
216 | |||
217 | status &= ~NV_PFIFO_INTR_SEMAPHORE; | ||
218 | nv_wr32(dev, NV03_PFIFO_INTR_0, | ||
219 | NV_PFIFO_INTR_SEMAPHORE); | ||
220 | |||
221 | sem = nv_rd32(dev, NV10_PFIFO_CACHE1_SEMAPHORE); | ||
222 | nv_wr32(dev, NV10_PFIFO_CACHE1_SEMAPHORE, sem | 0x1); | ||
223 | |||
224 | nv_wr32(dev, NV03_PFIFO_CACHE1_GET, get + 4); | ||
225 | nv_wr32(dev, NV04_PFIFO_CACHE1_PULL0, 1); | ||
226 | } | ||
227 | |||
214 | if (status) { | 228 | if (status) { |
215 | NV_INFO(dev, "PFIFO_INTR 0x%08x - Ch %d\n", | 229 | NV_INFO(dev, "PFIFO_INTR 0x%08x - Ch %d\n", |
216 | status, chid); | 230 | status, chid); |
@@ -566,86 +580,99 @@ nouveau_pgraph_irq_handler(struct drm_device *dev) | |||
566 | static void | 580 | static void |
567 | nv50_pgraph_irq_handler(struct drm_device *dev) | 581 | nv50_pgraph_irq_handler(struct drm_device *dev) |
568 | { | 582 | { |
569 | uint32_t status, nsource; | 583 | uint32_t status; |
570 | 584 | ||
571 | status = nv_rd32(dev, NV03_PGRAPH_INTR); | 585 | while ((status = nv_rd32(dev, NV03_PGRAPH_INTR))) { |
572 | nsource = nv_rd32(dev, NV03_PGRAPH_NSOURCE); | 586 | uint32_t nsource = nv_rd32(dev, NV03_PGRAPH_NSOURCE); |
573 | 587 | ||
574 | if (status & 0x00000001) { | 588 | if (status & 0x00000001) { |
575 | nouveau_pgraph_intr_notify(dev, nsource); | 589 | nouveau_pgraph_intr_notify(dev, nsource); |
576 | status &= ~0x00000001; | 590 | status &= ~0x00000001; |
577 | nv_wr32(dev, NV03_PGRAPH_INTR, 0x00000001); | 591 | nv_wr32(dev, NV03_PGRAPH_INTR, 0x00000001); |
578 | } | 592 | } |
579 | 593 | ||
580 | if (status & 0x00000010) { | 594 | if (status & 0x00000010) { |
581 | nouveau_pgraph_intr_error(dev, nsource | | 595 | nouveau_pgraph_intr_error(dev, nsource | |
582 | NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD); | 596 | NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD); |
583 | 597 | ||
584 | status &= ~0x00000010; | 598 | status &= ~0x00000010; |
585 | nv_wr32(dev, NV03_PGRAPH_INTR, 0x00000010); | 599 | nv_wr32(dev, NV03_PGRAPH_INTR, 0x00000010); |
586 | } | 600 | } |
587 | 601 | ||
588 | if (status & 0x00001000) { | 602 | if (status & 0x00001000) { |
589 | nv_wr32(dev, 0x400500, 0x00000000); | 603 | nv_wr32(dev, 0x400500, 0x00000000); |
590 | nv_wr32(dev, NV03_PGRAPH_INTR, NV_PGRAPH_INTR_CONTEXT_SWITCH); | 604 | nv_wr32(dev, NV03_PGRAPH_INTR, |
591 | nv_wr32(dev, NV40_PGRAPH_INTR_EN, nv_rd32(dev, | 605 | NV_PGRAPH_INTR_CONTEXT_SWITCH); |
592 | NV40_PGRAPH_INTR_EN) & ~NV_PGRAPH_INTR_CONTEXT_SWITCH); | 606 | nv_wr32(dev, NV40_PGRAPH_INTR_EN, nv_rd32(dev, |
593 | nv_wr32(dev, 0x400500, 0x00010001); | 607 | NV40_PGRAPH_INTR_EN) & |
608 | ~NV_PGRAPH_INTR_CONTEXT_SWITCH); | ||
609 | nv_wr32(dev, 0x400500, 0x00010001); | ||
594 | 610 | ||
595 | nv50_graph_context_switch(dev); | 611 | nv50_graph_context_switch(dev); |
596 | 612 | ||
597 | status &= ~NV_PGRAPH_INTR_CONTEXT_SWITCH; | 613 | status &= ~NV_PGRAPH_INTR_CONTEXT_SWITCH; |
598 | } | 614 | } |
599 | 615 | ||
600 | if (status & 0x00100000) { | 616 | if (status & 0x00100000) { |
601 | nouveau_pgraph_intr_error(dev, nsource | | 617 | nouveau_pgraph_intr_error(dev, nsource | |
602 | NV03_PGRAPH_NSOURCE_DATA_ERROR); | 618 | NV03_PGRAPH_NSOURCE_DATA_ERROR); |
603 | 619 | ||
604 | status &= ~0x00100000; | 620 | status &= ~0x00100000; |
605 | nv_wr32(dev, NV03_PGRAPH_INTR, 0x00100000); | 621 | nv_wr32(dev, NV03_PGRAPH_INTR, 0x00100000); |
606 | } | 622 | } |
607 | 623 | ||
608 | if (status & 0x00200000) { | 624 | if (status & 0x00200000) { |
609 | int r; | 625 | int r; |
610 | 626 | ||
611 | nouveau_pgraph_intr_error(dev, nsource | | 627 | nouveau_pgraph_intr_error(dev, nsource | |
612 | NV03_PGRAPH_NSOURCE_PROTECTION_ERROR); | 628 | NV03_PGRAPH_NSOURCE_PROTECTION_ERROR); |
613 | 629 | ||
614 | NV_ERROR(dev, "magic set 1:\n"); | 630 | NV_ERROR(dev, "magic set 1:\n"); |
615 | for (r = 0x408900; r <= 0x408910; r += 4) | 631 | for (r = 0x408900; r <= 0x408910; r += 4) |
616 | NV_ERROR(dev, "\t0x%08x: 0x%08x\n", r, nv_rd32(dev, r)); | 632 | NV_ERROR(dev, "\t0x%08x: 0x%08x\n", r, |
617 | nv_wr32(dev, 0x408900, nv_rd32(dev, 0x408904) | 0xc0000000); | 633 | nv_rd32(dev, r)); |
618 | for (r = 0x408e08; r <= 0x408e24; r += 4) | 634 | nv_wr32(dev, 0x408900, |
619 | NV_ERROR(dev, "\t0x%08x: 0x%08x\n", r, nv_rd32(dev, r)); | 635 | nv_rd32(dev, 0x408904) | 0xc0000000); |
620 | nv_wr32(dev, 0x408e08, nv_rd32(dev, 0x408e08) | 0xc0000000); | 636 | for (r = 0x408e08; r <= 0x408e24; r += 4) |
621 | 637 | NV_ERROR(dev, "\t0x%08x: 0x%08x\n", r, | |
622 | NV_ERROR(dev, "magic set 2:\n"); | 638 | nv_rd32(dev, r)); |
623 | for (r = 0x409900; r <= 0x409910; r += 4) | 639 | nv_wr32(dev, 0x408e08, |
624 | NV_ERROR(dev, "\t0x%08x: 0x%08x\n", r, nv_rd32(dev, r)); | 640 | nv_rd32(dev, 0x408e08) | 0xc0000000); |
625 | nv_wr32(dev, 0x409900, nv_rd32(dev, 0x409904) | 0xc0000000); | 641 | |
626 | for (r = 0x409e08; r <= 0x409e24; r += 4) | 642 | NV_ERROR(dev, "magic set 2:\n"); |
627 | NV_ERROR(dev, "\t0x%08x: 0x%08x\n", r, nv_rd32(dev, r)); | 643 | for (r = 0x409900; r <= 0x409910; r += 4) |
628 | nv_wr32(dev, 0x409e08, nv_rd32(dev, 0x409e08) | 0xc0000000); | 644 | NV_ERROR(dev, "\t0x%08x: 0x%08x\n", r, |
629 | 645 | nv_rd32(dev, r)); | |
630 | status &= ~0x00200000; | 646 | nv_wr32(dev, 0x409900, |
631 | nv_wr32(dev, NV03_PGRAPH_NSOURCE, nsource); | 647 | nv_rd32(dev, 0x409904) | 0xc0000000); |
632 | nv_wr32(dev, NV03_PGRAPH_INTR, 0x00200000); | 648 | for (r = 0x409e08; r <= 0x409e24; r += 4) |
633 | } | 649 | NV_ERROR(dev, "\t0x%08x: 0x%08x\n", r, |
650 | nv_rd32(dev, r)); | ||
651 | nv_wr32(dev, 0x409e08, | ||
652 | nv_rd32(dev, 0x409e08) | 0xc0000000); | ||
653 | |||
654 | status &= ~0x00200000; | ||
655 | nv_wr32(dev, NV03_PGRAPH_NSOURCE, nsource); | ||
656 | nv_wr32(dev, NV03_PGRAPH_INTR, 0x00200000); | ||
657 | } | ||
634 | 658 | ||
635 | if (status) { | 659 | if (status) { |
636 | NV_INFO(dev, "Unhandled PGRAPH_INTR - 0x%08x\n", status); | 660 | NV_INFO(dev, "Unhandled PGRAPH_INTR - 0x%08x\n", |
637 | nv_wr32(dev, NV03_PGRAPH_INTR, status); | 661 | status); |
638 | } | 662 | nv_wr32(dev, NV03_PGRAPH_INTR, status); |
663 | } | ||
639 | 664 | ||
640 | { | 665 | { |
641 | const int isb = (1 << 16) | (1 << 0); | 666 | const int isb = (1 << 16) | (1 << 0); |
642 | 667 | ||
643 | if ((nv_rd32(dev, 0x400500) & isb) != isb) | 668 | if ((nv_rd32(dev, 0x400500) & isb) != isb) |
644 | nv_wr32(dev, 0x400500, nv_rd32(dev, 0x400500) | isb); | 669 | nv_wr32(dev, 0x400500, |
645 | nv_wr32(dev, 0x400824, nv_rd32(dev, 0x400824) & ~(1 << 31)); | 670 | nv_rd32(dev, 0x400500) | isb); |
671 | } | ||
646 | } | 672 | } |
647 | 673 | ||
648 | nv_wr32(dev, NV03_PMC_INTR_0, NV_PMC_INTR_0_PGRAPH_PENDING); | 674 | nv_wr32(dev, NV03_PMC_INTR_0, NV_PMC_INTR_0_PGRAPH_PENDING); |
675 | nv_wr32(dev, 0x400824, nv_rd32(dev, 0x400824) & ~(1 << 31)); | ||
649 | } | 676 | } |
650 | 677 | ||
651 | static void | 678 | static void |
diff --git a/drivers/gpu/drm/nouveau/nouveau_mem.c b/drivers/gpu/drm/nouveau/nouveau_mem.c index 8f3a12f614ed..2dc09dbd817d 100644 --- a/drivers/gpu/drm/nouveau/nouveau_mem.c +++ b/drivers/gpu/drm/nouveau/nouveau_mem.c | |||
@@ -285,53 +285,50 @@ nv50_mem_vm_bind_linear(struct drm_device *dev, uint64_t virt, uint32_t size, | |||
285 | uint32_t flags, uint64_t phys) | 285 | uint32_t flags, uint64_t phys) |
286 | { | 286 | { |
287 | struct drm_nouveau_private *dev_priv = dev->dev_private; | 287 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
288 | struct nouveau_gpuobj **pgt; | 288 | struct nouveau_gpuobj *pgt; |
289 | unsigned psz, pfl, pages; | 289 | unsigned block; |
290 | 290 | int i; | |
291 | if (virt >= dev_priv->vm_gart_base && | ||
292 | (virt + size) < (dev_priv->vm_gart_base + dev_priv->vm_gart_size)) { | ||
293 | psz = 12; | ||
294 | pgt = &dev_priv->gart_info.sg_ctxdma; | ||
295 | pfl = 0x21; | ||
296 | virt -= dev_priv->vm_gart_base; | ||
297 | } else | ||
298 | if (virt >= dev_priv->vm_vram_base && | ||
299 | (virt + size) < (dev_priv->vm_vram_base + dev_priv->vm_vram_size)) { | ||
300 | psz = 16; | ||
301 | pgt = dev_priv->vm_vram_pt; | ||
302 | pfl = 0x01; | ||
303 | virt -= dev_priv->vm_vram_base; | ||
304 | } else { | ||
305 | NV_ERROR(dev, "Invalid address: 0x%16llx-0x%16llx\n", | ||
306 | virt, virt + size - 1); | ||
307 | return -EINVAL; | ||
308 | } | ||
309 | 291 | ||
310 | pages = size >> psz; | 292 | virt = ((virt - dev_priv->vm_vram_base) >> 16) << 1; |
293 | size = (size >> 16) << 1; | ||
294 | |||
295 | phys |= ((uint64_t)flags << 32); | ||
296 | phys |= 1; | ||
297 | if (dev_priv->vram_sys_base) { | ||
298 | phys += dev_priv->vram_sys_base; | ||
299 | phys |= 0x30; | ||
300 | } | ||
311 | 301 | ||
312 | dev_priv->engine.instmem.prepare_access(dev, true); | 302 | dev_priv->engine.instmem.prepare_access(dev, true); |
313 | if (flags & 0x80000000) { | 303 | while (size) { |
314 | while (pages--) { | 304 | unsigned offset_h = upper_32_bits(phys); |
315 | struct nouveau_gpuobj *pt = pgt[virt >> 29]; | 305 | unsigned offset_l = lower_32_bits(phys); |
316 | unsigned pte = ((virt & 0x1fffffffULL) >> psz) << 1; | 306 | unsigned pte, end; |
307 | |||
308 | for (i = 7; i >= 0; i--) { | ||
309 | block = 1 << (i + 1); | ||
310 | if (size >= block && !(virt & (block - 1))) | ||
311 | break; | ||
312 | } | ||
313 | offset_l |= (i << 7); | ||
317 | 314 | ||
318 | nv_wo32(dev, pt, pte++, 0x00000000); | 315 | phys += block << 15; |
319 | nv_wo32(dev, pt, pte++, 0x00000000); | 316 | size -= block; |
320 | 317 | ||
321 | virt += (1 << psz); | 318 | while (block) { |
322 | } | 319 | pgt = dev_priv->vm_vram_pt[virt >> 14]; |
323 | } else { | 320 | pte = virt & 0x3ffe; |
324 | while (pages--) { | ||
325 | struct nouveau_gpuobj *pt = pgt[virt >> 29]; | ||
326 | unsigned pte = ((virt & 0x1fffffffULL) >> psz) << 1; | ||
327 | unsigned offset_h = upper_32_bits(phys) & 0xff; | ||
328 | unsigned offset_l = lower_32_bits(phys); | ||
329 | 321 | ||
330 | nv_wo32(dev, pt, pte++, offset_l | pfl); | 322 | end = pte + block; |
331 | nv_wo32(dev, pt, pte++, offset_h | flags); | 323 | if (end > 16384) |
324 | end = 16384; | ||
325 | block -= (end - pte); | ||
326 | virt += (end - pte); | ||
332 | 327 | ||
333 | phys += (1 << psz); | 328 | while (pte < end) { |
334 | virt += (1 << psz); | 329 | nv_wo32(dev, pgt, pte++, offset_l); |
330 | nv_wo32(dev, pgt, pte++, offset_h); | ||
331 | } | ||
335 | } | 332 | } |
336 | } | 333 | } |
337 | dev_priv->engine.instmem.finish_access(dev); | 334 | dev_priv->engine.instmem.finish_access(dev); |
@@ -356,7 +353,41 @@ nv50_mem_vm_bind_linear(struct drm_device *dev, uint64_t virt, uint32_t size, | |||
356 | void | 353 | void |
357 | nv50_mem_vm_unbind(struct drm_device *dev, uint64_t virt, uint32_t size) | 354 | nv50_mem_vm_unbind(struct drm_device *dev, uint64_t virt, uint32_t size) |
358 | { | 355 | { |
359 | nv50_mem_vm_bind_linear(dev, virt, size, 0x80000000, 0); | 356 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
357 | struct nouveau_gpuobj *pgt; | ||
358 | unsigned pages, pte, end; | ||
359 | |||
360 | virt -= dev_priv->vm_vram_base; | ||
361 | pages = (size >> 16) << 1; | ||
362 | |||
363 | dev_priv->engine.instmem.prepare_access(dev, true); | ||
364 | while (pages) { | ||
365 | pgt = dev_priv->vm_vram_pt[virt >> 29]; | ||
366 | pte = (virt & 0x1ffe0000ULL) >> 15; | ||
367 | |||
368 | end = pte + pages; | ||
369 | if (end > 16384) | ||
370 | end = 16384; | ||
371 | pages -= (end - pte); | ||
372 | virt += (end - pte) << 15; | ||
373 | |||
374 | while (pte < end) | ||
375 | nv_wo32(dev, pgt, pte++, 0); | ||
376 | } | ||
377 | dev_priv->engine.instmem.finish_access(dev); | ||
378 | |||
379 | nv_wr32(dev, 0x100c80, 0x00050001); | ||
380 | if (!nv_wait(0x100c80, 0x00000001, 0x00000000)) { | ||
381 | NV_ERROR(dev, "timeout: (0x100c80 & 1) == 0 (2)\n"); | ||
382 | NV_ERROR(dev, "0x100c80 = 0x%08x\n", nv_rd32(dev, 0x100c80)); | ||
383 | return; | ||
384 | } | ||
385 | |||
386 | nv_wr32(dev, 0x100c80, 0x00000001); | ||
387 | if (!nv_wait(0x100c80, 0x00000001, 0x00000000)) { | ||
388 | NV_ERROR(dev, "timeout: (0x100c80 & 1) == 0 (2)\n"); | ||
389 | NV_ERROR(dev, "0x100c80 = 0x%08x\n", nv_rd32(dev, 0x100c80)); | ||
390 | } | ||
360 | } | 391 | } |
361 | 392 | ||
362 | /* | 393 | /* |
diff --git a/drivers/gpu/drm/nouveau/nouveau_notifier.c b/drivers/gpu/drm/nouveau/nouveau_notifier.c index 6c66a34b6345..9537f3e30115 100644 --- a/drivers/gpu/drm/nouveau/nouveau_notifier.c +++ b/drivers/gpu/drm/nouveau/nouveau_notifier.c | |||
@@ -34,15 +34,20 @@ nouveau_notifier_init_channel(struct nouveau_channel *chan) | |||
34 | { | 34 | { |
35 | struct drm_device *dev = chan->dev; | 35 | struct drm_device *dev = chan->dev; |
36 | struct nouveau_bo *ntfy = NULL; | 36 | struct nouveau_bo *ntfy = NULL; |
37 | uint32_t flags; | ||
37 | int ret; | 38 | int ret; |
38 | 39 | ||
39 | ret = nouveau_gem_new(dev, NULL, PAGE_SIZE, 0, nouveau_vram_notify ? | 40 | if (nouveau_vram_notify) |
40 | TTM_PL_FLAG_VRAM : TTM_PL_FLAG_TT, | 41 | flags = TTM_PL_FLAG_VRAM; |
42 | else | ||
43 | flags = TTM_PL_FLAG_TT; | ||
44 | |||
45 | ret = nouveau_gem_new(dev, NULL, PAGE_SIZE, 0, flags, | ||
41 | 0, 0x0000, false, true, &ntfy); | 46 | 0, 0x0000, false, true, &ntfy); |
42 | if (ret) | 47 | if (ret) |
43 | return ret; | 48 | return ret; |
44 | 49 | ||
45 | ret = nouveau_bo_pin(ntfy, TTM_PL_FLAG_VRAM); | 50 | ret = nouveau_bo_pin(ntfy, flags); |
46 | if (ret) | 51 | if (ret) |
47 | goto out_err; | 52 | goto out_err; |
48 | 53 | ||
@@ -56,11 +61,8 @@ nouveau_notifier_init_channel(struct nouveau_channel *chan) | |||
56 | 61 | ||
57 | chan->notifier_bo = ntfy; | 62 | chan->notifier_bo = ntfy; |
58 | out_err: | 63 | out_err: |
59 | if (ret) { | 64 | if (ret) |
60 | mutex_lock(&dev->struct_mutex); | 65 | drm_gem_object_unreference_unlocked(ntfy->gem); |
61 | drm_gem_object_unreference(ntfy->gem); | ||
62 | mutex_unlock(&dev->struct_mutex); | ||
63 | } | ||
64 | 66 | ||
65 | return ret; | 67 | return ret; |
66 | } | 68 | } |
@@ -76,8 +78,8 @@ nouveau_notifier_takedown_channel(struct nouveau_channel *chan) | |||
76 | nouveau_bo_unmap(chan->notifier_bo); | 78 | nouveau_bo_unmap(chan->notifier_bo); |
77 | mutex_lock(&dev->struct_mutex); | 79 | mutex_lock(&dev->struct_mutex); |
78 | nouveau_bo_unpin(chan->notifier_bo); | 80 | nouveau_bo_unpin(chan->notifier_bo); |
79 | drm_gem_object_unreference(chan->notifier_bo->gem); | ||
80 | mutex_unlock(&dev->struct_mutex); | 81 | mutex_unlock(&dev->struct_mutex); |
82 | drm_gem_object_unreference_unlocked(chan->notifier_bo->gem); | ||
81 | nouveau_mem_takedown(&chan->notifier_heap); | 83 | nouveau_mem_takedown(&chan->notifier_heap); |
82 | } | 84 | } |
83 | 85 | ||
@@ -128,6 +130,8 @@ nouveau_notifier_alloc(struct nouveau_channel *chan, uint32_t handle, | |||
128 | target = NV_DMA_TARGET_PCI; | 130 | target = NV_DMA_TARGET_PCI; |
129 | } else { | 131 | } else { |
130 | target = NV_DMA_TARGET_AGP; | 132 | target = NV_DMA_TARGET_AGP; |
133 | if (dev_priv->card_type >= NV_50) | ||
134 | offset += dev_priv->vm_gart_base; | ||
131 | } | 135 | } |
132 | } else { | 136 | } else { |
133 | NV_ERROR(dev, "Bad DMA target, mem_type %d!\n", | 137 | NV_ERROR(dev, "Bad DMA target, mem_type %d!\n", |
diff --git a/drivers/gpu/drm/nouveau/nouveau_object.c b/drivers/gpu/drm/nouveau/nouveau_object.c index 6c2cf81716df..e7c100ba63a1 100644 --- a/drivers/gpu/drm/nouveau/nouveau_object.c +++ b/drivers/gpu/drm/nouveau/nouveau_object.c | |||
@@ -885,11 +885,12 @@ int | |||
885 | nouveau_gpuobj_sw_new(struct nouveau_channel *chan, int class, | 885 | nouveau_gpuobj_sw_new(struct nouveau_channel *chan, int class, |
886 | struct nouveau_gpuobj **gpuobj_ret) | 886 | struct nouveau_gpuobj **gpuobj_ret) |
887 | { | 887 | { |
888 | struct drm_nouveau_private *dev_priv = chan->dev->dev_private; | 888 | struct drm_nouveau_private *dev_priv; |
889 | struct nouveau_gpuobj *gpuobj; | 889 | struct nouveau_gpuobj *gpuobj; |
890 | 890 | ||
891 | if (!chan || !gpuobj_ret || *gpuobj_ret != NULL) | 891 | if (!chan || !gpuobj_ret || *gpuobj_ret != NULL) |
892 | return -EINVAL; | 892 | return -EINVAL; |
893 | dev_priv = chan->dev->dev_private; | ||
893 | 894 | ||
894 | gpuobj = kzalloc(sizeof(*gpuobj), GFP_KERNEL); | 895 | gpuobj = kzalloc(sizeof(*gpuobj), GFP_KERNEL); |
895 | if (!gpuobj) | 896 | if (!gpuobj) |
diff --git a/drivers/gpu/drm/nouveau/nouveau_reg.h b/drivers/gpu/drm/nouveau/nouveau_reg.h index 251f1b3b38b9..aa9b310e41be 100644 --- a/drivers/gpu/drm/nouveau/nouveau_reg.h +++ b/drivers/gpu/drm/nouveau/nouveau_reg.h | |||
@@ -99,6 +99,7 @@ | |||
99 | * the card will hang early on in the X init process. | 99 | * the card will hang early on in the X init process. |
100 | */ | 100 | */ |
101 | # define NV_PMC_ENABLE_UNK13 (1<<13) | 101 | # define NV_PMC_ENABLE_UNK13 (1<<13) |
102 | #define NV40_PMC_GRAPH_UNITS 0x00001540 | ||
102 | #define NV40_PMC_BACKLIGHT 0x000015f0 | 103 | #define NV40_PMC_BACKLIGHT 0x000015f0 |
103 | # define NV40_PMC_BACKLIGHT_MASK 0x001f0000 | 104 | # define NV40_PMC_BACKLIGHT_MASK 0x001f0000 |
104 | #define NV40_PMC_1700 0x00001700 | 105 | #define NV40_PMC_1700 0x00001700 |
diff --git a/drivers/gpu/drm/nouveau/nouveau_sgdma.c b/drivers/gpu/drm/nouveau/nouveau_sgdma.c index 4c7f1e403e80..ed1590577b6c 100644 --- a/drivers/gpu/drm/nouveau/nouveau_sgdma.c +++ b/drivers/gpu/drm/nouveau/nouveau_sgdma.c | |||
@@ -54,11 +54,12 @@ static void | |||
54 | nouveau_sgdma_clear(struct ttm_backend *be) | 54 | nouveau_sgdma_clear(struct ttm_backend *be) |
55 | { | 55 | { |
56 | struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be; | 56 | struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be; |
57 | struct drm_device *dev = nvbe->dev; | 57 | struct drm_device *dev; |
58 | |||
59 | NV_DEBUG(nvbe->dev, "\n"); | ||
60 | 58 | ||
61 | if (nvbe && nvbe->pages) { | 59 | if (nvbe && nvbe->pages) { |
60 | dev = nvbe->dev; | ||
61 | NV_DEBUG(dev, "\n"); | ||
62 | |||
62 | if (nvbe->bound) | 63 | if (nvbe->bound) |
63 | be->func->unbind(be); | 64 | be->func->unbind(be); |
64 | 65 | ||
diff --git a/drivers/gpu/drm/nouveau/nouveau_state.c b/drivers/gpu/drm/nouveau/nouveau_state.c index f2d0187ba152..a4851af5b05e 100644 --- a/drivers/gpu/drm/nouveau/nouveau_state.c +++ b/drivers/gpu/drm/nouveau/nouveau_state.c | |||
@@ -310,6 +310,14 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev) | |||
310 | static unsigned int | 310 | static unsigned int |
311 | nouveau_vga_set_decode(void *priv, bool state) | 311 | nouveau_vga_set_decode(void *priv, bool state) |
312 | { | 312 | { |
313 | struct drm_device *dev = priv; | ||
314 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
315 | |||
316 | if (dev_priv->chipset >= 0x40) | ||
317 | nv_wr32(dev, 0x88054, state); | ||
318 | else | ||
319 | nv_wr32(dev, 0x1854, state); | ||
320 | |||
313 | if (state) | 321 | if (state) |
314 | return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM | | 322 | return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM | |
315 | VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM; | 323 | VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM; |
@@ -427,15 +435,19 @@ nouveau_card_init(struct drm_device *dev) | |||
427 | if (ret) | 435 | if (ret) |
428 | goto out_timer; | 436 | goto out_timer; |
429 | 437 | ||
430 | /* PGRAPH */ | 438 | if (nouveau_noaccel) |
431 | ret = engine->graph.init(dev); | 439 | engine->graph.accel_blocked = true; |
432 | if (ret) | 440 | else { |
433 | goto out_fb; | 441 | /* PGRAPH */ |
442 | ret = engine->graph.init(dev); | ||
443 | if (ret) | ||
444 | goto out_fb; | ||
434 | 445 | ||
435 | /* PFIFO */ | 446 | /* PFIFO */ |
436 | ret = engine->fifo.init(dev); | 447 | ret = engine->fifo.init(dev); |
437 | if (ret) | 448 | if (ret) |
438 | goto out_graph; | 449 | goto out_graph; |
450 | } | ||
439 | 451 | ||
440 | /* this call irq_preinstall, register irq handler and | 452 | /* this call irq_preinstall, register irq handler and |
441 | * call irq_postinstall | 453 | * call irq_postinstall |
@@ -479,9 +491,11 @@ nouveau_card_init(struct drm_device *dev) | |||
479 | out_irq: | 491 | out_irq: |
480 | drm_irq_uninstall(dev); | 492 | drm_irq_uninstall(dev); |
481 | out_fifo: | 493 | out_fifo: |
482 | engine->fifo.takedown(dev); | 494 | if (!nouveau_noaccel) |
495 | engine->fifo.takedown(dev); | ||
483 | out_graph: | 496 | out_graph: |
484 | engine->graph.takedown(dev); | 497 | if (!nouveau_noaccel) |
498 | engine->graph.takedown(dev); | ||
485 | out_fb: | 499 | out_fb: |
486 | engine->fb.takedown(dev); | 500 | engine->fb.takedown(dev); |
487 | out_timer: | 501 | out_timer: |
@@ -518,8 +532,10 @@ static void nouveau_card_takedown(struct drm_device *dev) | |||
518 | dev_priv->channel = NULL; | 532 | dev_priv->channel = NULL; |
519 | } | 533 | } |
520 | 534 | ||
521 | engine->fifo.takedown(dev); | 535 | if (!nouveau_noaccel) { |
522 | engine->graph.takedown(dev); | 536 | engine->fifo.takedown(dev); |
537 | engine->graph.takedown(dev); | ||
538 | } | ||
523 | engine->fb.takedown(dev); | 539 | engine->fb.takedown(dev); |
524 | engine->timer.takedown(dev); | 540 | engine->timer.takedown(dev); |
525 | engine->mc.takedown(dev); | 541 | engine->mc.takedown(dev); |
@@ -817,6 +833,15 @@ int nouveau_ioctl_getparam(struct drm_device *dev, void *data, | |||
817 | case NOUVEAU_GETPARAM_VM_VRAM_BASE: | 833 | case NOUVEAU_GETPARAM_VM_VRAM_BASE: |
818 | getparam->value = dev_priv->vm_vram_base; | 834 | getparam->value = dev_priv->vm_vram_base; |
819 | break; | 835 | break; |
836 | case NOUVEAU_GETPARAM_GRAPH_UNITS: | ||
837 | /* NV40 and NV50 versions are quite different, but register | ||
838 | * address is the same. User is supposed to know the card | ||
839 | * family anyway... */ | ||
840 | if (dev_priv->chipset >= 0x40) { | ||
841 | getparam->value = nv_rd32(dev, NV40_PMC_GRAPH_UNITS); | ||
842 | break; | ||
843 | } | ||
844 | /* FALLTHRU */ | ||
820 | default: | 845 | default: |
821 | NV_ERROR(dev, "unknown parameter %lld\n", getparam->param); | 846 | NV_ERROR(dev, "unknown parameter %lld\n", getparam->param); |
822 | return -EINVAL; | 847 | return -EINVAL; |
diff --git a/drivers/gpu/drm/nouveau/nv04_crtc.c b/drivers/gpu/drm/nouveau/nv04_crtc.c index d2f143ed97c1..a1d1ebb073d9 100644 --- a/drivers/gpu/drm/nouveau/nv04_crtc.c +++ b/drivers/gpu/drm/nouveau/nv04_crtc.c | |||
@@ -926,9 +926,7 @@ nv04_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv, | |||
926 | nv_crtc->cursor.set_offset(nv_crtc, nv_crtc->cursor.offset); | 926 | nv_crtc->cursor.set_offset(nv_crtc, nv_crtc->cursor.offset); |
927 | nv_crtc->cursor.show(nv_crtc, true); | 927 | nv_crtc->cursor.show(nv_crtc, true); |
928 | out: | 928 | out: |
929 | mutex_lock(&dev->struct_mutex); | 929 | drm_gem_object_unreference_unlocked(gem); |
930 | drm_gem_object_unreference(gem); | ||
931 | mutex_unlock(&dev->struct_mutex); | ||
932 | return ret; | 930 | return ret; |
933 | } | 931 | } |
934 | 932 | ||
diff --git a/drivers/gpu/drm/nouveau/nv04_dac.c b/drivers/gpu/drm/nouveau/nv04_dac.c index d0e038d28948..1d73b15d70da 100644 --- a/drivers/gpu/drm/nouveau/nv04_dac.c +++ b/drivers/gpu/drm/nouveau/nv04_dac.c | |||
@@ -119,7 +119,7 @@ static enum drm_connector_status nv04_dac_detect(struct drm_encoder *encoder, | |||
119 | struct drm_connector *connector) | 119 | struct drm_connector *connector) |
120 | { | 120 | { |
121 | struct drm_device *dev = encoder->dev; | 121 | struct drm_device *dev = encoder->dev; |
122 | uint8_t saved_seq1, saved_pi, saved_rpc1; | 122 | uint8_t saved_seq1, saved_pi, saved_rpc1, saved_cr_mode; |
123 | uint8_t saved_palette0[3], saved_palette_mask; | 123 | uint8_t saved_palette0[3], saved_palette_mask; |
124 | uint32_t saved_rtest_ctrl, saved_rgen_ctrl; | 124 | uint32_t saved_rtest_ctrl, saved_rgen_ctrl; |
125 | int i; | 125 | int i; |
@@ -135,6 +135,9 @@ static enum drm_connector_status nv04_dac_detect(struct drm_encoder *encoder, | |||
135 | /* only implemented for head A for now */ | 135 | /* only implemented for head A for now */ |
136 | NVSetOwner(dev, 0); | 136 | NVSetOwner(dev, 0); |
137 | 137 | ||
138 | saved_cr_mode = NVReadVgaCrtc(dev, 0, NV_CIO_CR_MODE_INDEX); | ||
139 | NVWriteVgaCrtc(dev, 0, NV_CIO_CR_MODE_INDEX, saved_cr_mode | 0x80); | ||
140 | |||
138 | saved_seq1 = NVReadVgaSeq(dev, 0, NV_VIO_SR_CLOCK_INDEX); | 141 | saved_seq1 = NVReadVgaSeq(dev, 0, NV_VIO_SR_CLOCK_INDEX); |
139 | NVWriteVgaSeq(dev, 0, NV_VIO_SR_CLOCK_INDEX, saved_seq1 & ~0x20); | 142 | NVWriteVgaSeq(dev, 0, NV_VIO_SR_CLOCK_INDEX, saved_seq1 & ~0x20); |
140 | 143 | ||
@@ -203,6 +206,7 @@ out: | |||
203 | NVWriteVgaCrtc(dev, 0, NV_CIO_CRE_PIXEL_INDEX, saved_pi); | 206 | NVWriteVgaCrtc(dev, 0, NV_CIO_CRE_PIXEL_INDEX, saved_pi); |
204 | NVWriteVgaCrtc(dev, 0, NV_CIO_CRE_RPC1_INDEX, saved_rpc1); | 207 | NVWriteVgaCrtc(dev, 0, NV_CIO_CRE_RPC1_INDEX, saved_rpc1); |
205 | NVWriteVgaSeq(dev, 0, NV_VIO_SR_CLOCK_INDEX, saved_seq1); | 208 | NVWriteVgaSeq(dev, 0, NV_VIO_SR_CLOCK_INDEX, saved_seq1); |
209 | NVWriteVgaCrtc(dev, 0, NV_CIO_CR_MODE_INDEX, saved_cr_mode); | ||
206 | 210 | ||
207 | if (blue == 0x18) { | 211 | if (blue == 0x18) { |
208 | NV_INFO(dev, "Load detected on head A\n"); | 212 | NV_INFO(dev, "Load detected on head A\n"); |
diff --git a/drivers/gpu/drm/nouveau/nv04_fbcon.c b/drivers/gpu/drm/nouveau/nv04_fbcon.c index d910873c1368..fd01caabd5c3 100644 --- a/drivers/gpu/drm/nouveau/nv04_fbcon.c +++ b/drivers/gpu/drm/nouveau/nv04_fbcon.c | |||
@@ -27,7 +27,7 @@ | |||
27 | #include "nouveau_dma.h" | 27 | #include "nouveau_dma.h" |
28 | #include "nouveau_fbcon.h" | 28 | #include "nouveau_fbcon.h" |
29 | 29 | ||
30 | static void | 30 | void |
31 | nv04_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region) | 31 | nv04_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region) |
32 | { | 32 | { |
33 | struct nouveau_fbcon_par *par = info->par; | 33 | struct nouveau_fbcon_par *par = info->par; |
@@ -54,7 +54,7 @@ nv04_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region) | |||
54 | FIRE_RING(chan); | 54 | FIRE_RING(chan); |
55 | } | 55 | } |
56 | 56 | ||
57 | static void | 57 | void |
58 | nv04_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect) | 58 | nv04_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect) |
59 | { | 59 | { |
60 | struct nouveau_fbcon_par *par = info->par; | 60 | struct nouveau_fbcon_par *par = info->par; |
@@ -88,7 +88,7 @@ nv04_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect) | |||
88 | FIRE_RING(chan); | 88 | FIRE_RING(chan); |
89 | } | 89 | } |
90 | 90 | ||
91 | static void | 91 | void |
92 | nv04_fbcon_imageblit(struct fb_info *info, const struct fb_image *image) | 92 | nv04_fbcon_imageblit(struct fb_info *info, const struct fb_image *image) |
93 | { | 93 | { |
94 | struct nouveau_fbcon_par *par = info->par; | 94 | struct nouveau_fbcon_par *par = info->par; |
@@ -307,9 +307,6 @@ nv04_fbcon_accel_init(struct fb_info *info) | |||
307 | 307 | ||
308 | FIRE_RING(chan); | 308 | FIRE_RING(chan); |
309 | 309 | ||
310 | info->fbops->fb_fillrect = nv04_fbcon_fillrect; | ||
311 | info->fbops->fb_copyarea = nv04_fbcon_copyarea; | ||
312 | info->fbops->fb_imageblit = nv04_fbcon_imageblit; | ||
313 | return 0; | 310 | return 0; |
314 | } | 311 | } |
315 | 312 | ||
diff --git a/drivers/gpu/drm/nouveau/nv17_tv.c b/drivers/gpu/drm/nouveau/nv17_tv.c index 58b917c3341b..21ac6e49b6ee 100644 --- a/drivers/gpu/drm/nouveau/nv17_tv.c +++ b/drivers/gpu/drm/nouveau/nv17_tv.c | |||
@@ -579,6 +579,8 @@ static void nv17_tv_restore(struct drm_encoder *encoder) | |||
579 | nouveau_encoder(encoder)->restore.output); | 579 | nouveau_encoder(encoder)->restore.output); |
580 | 580 | ||
581 | nv17_tv_state_load(dev, &to_tv_enc(encoder)->saved_state); | 581 | nv17_tv_state_load(dev, &to_tv_enc(encoder)->saved_state); |
582 | |||
583 | nouveau_encoder(encoder)->last_dpms = NV_DPMS_CLEARED; | ||
582 | } | 584 | } |
583 | 585 | ||
584 | static int nv17_tv_create_resources(struct drm_encoder *encoder, | 586 | static int nv17_tv_create_resources(struct drm_encoder *encoder, |
diff --git a/drivers/gpu/drm/nouveau/nv50_crtc.c b/drivers/gpu/drm/nouveau/nv50_crtc.c index 40b7360841f8..cfabeb974a56 100644 --- a/drivers/gpu/drm/nouveau/nv50_crtc.c +++ b/drivers/gpu/drm/nouveau/nv50_crtc.c | |||
@@ -298,14 +298,17 @@ nv50_crtc_set_clock(struct drm_device *dev, int head, int pclk) | |||
298 | static void | 298 | static void |
299 | nv50_crtc_destroy(struct drm_crtc *crtc) | 299 | nv50_crtc_destroy(struct drm_crtc *crtc) |
300 | { | 300 | { |
301 | struct drm_device *dev = crtc->dev; | 301 | struct drm_device *dev; |
302 | struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); | 302 | struct nouveau_crtc *nv_crtc; |
303 | |||
304 | NV_DEBUG_KMS(dev, "\n"); | ||
305 | 303 | ||
306 | if (!crtc) | 304 | if (!crtc) |
307 | return; | 305 | return; |
308 | 306 | ||
307 | dev = crtc->dev; | ||
308 | nv_crtc = nouveau_crtc(crtc); | ||
309 | |||
310 | NV_DEBUG_KMS(dev, "\n"); | ||
311 | |||
309 | drm_crtc_cleanup(&nv_crtc->base); | 312 | drm_crtc_cleanup(&nv_crtc->base); |
310 | 313 | ||
311 | nv50_cursor_fini(nv_crtc); | 314 | nv50_cursor_fini(nv_crtc); |
@@ -355,9 +358,7 @@ nv50_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv, | |||
355 | nv_crtc->cursor.show(nv_crtc, true); | 358 | nv_crtc->cursor.show(nv_crtc, true); |
356 | 359 | ||
357 | out: | 360 | out: |
358 | mutex_lock(&dev->struct_mutex); | 361 | drm_gem_object_unreference_unlocked(gem); |
359 | drm_gem_object_unreference(gem); | ||
360 | mutex_unlock(&dev->struct_mutex); | ||
361 | return ret; | 362 | return ret; |
362 | } | 363 | } |
363 | 364 | ||
diff --git a/drivers/gpu/drm/nouveau/nv50_fbcon.c b/drivers/gpu/drm/nouveau/nv50_fbcon.c index e4f279ee61cf..0f57cdf7ccb2 100644 --- a/drivers/gpu/drm/nouveau/nv50_fbcon.c +++ b/drivers/gpu/drm/nouveau/nv50_fbcon.c | |||
@@ -3,7 +3,7 @@ | |||
3 | #include "nouveau_dma.h" | 3 | #include "nouveau_dma.h" |
4 | #include "nouveau_fbcon.h" | 4 | #include "nouveau_fbcon.h" |
5 | 5 | ||
6 | static void | 6 | void |
7 | nv50_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect) | 7 | nv50_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect) |
8 | { | 8 | { |
9 | struct nouveau_fbcon_par *par = info->par; | 9 | struct nouveau_fbcon_par *par = info->par; |
@@ -46,7 +46,7 @@ nv50_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect) | |||
46 | FIRE_RING(chan); | 46 | FIRE_RING(chan); |
47 | } | 47 | } |
48 | 48 | ||
49 | static void | 49 | void |
50 | nv50_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region) | 50 | nv50_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region) |
51 | { | 51 | { |
52 | struct nouveau_fbcon_par *par = info->par; | 52 | struct nouveau_fbcon_par *par = info->par; |
@@ -81,7 +81,7 @@ nv50_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region) | |||
81 | FIRE_RING(chan); | 81 | FIRE_RING(chan); |
82 | } | 82 | } |
83 | 83 | ||
84 | static void | 84 | void |
85 | nv50_fbcon_imageblit(struct fb_info *info, const struct fb_image *image) | 85 | nv50_fbcon_imageblit(struct fb_info *info, const struct fb_image *image) |
86 | { | 86 | { |
87 | struct nouveau_fbcon_par *par = info->par; | 87 | struct nouveau_fbcon_par *par = info->par; |
@@ -262,9 +262,6 @@ nv50_fbcon_accel_init(struct fb_info *info) | |||
262 | OUT_RING(chan, info->fix.smem_start - dev_priv->fb_phys + | 262 | OUT_RING(chan, info->fix.smem_start - dev_priv->fb_phys + |
263 | dev_priv->vm_vram_base); | 263 | dev_priv->vm_vram_base); |
264 | 264 | ||
265 | info->fbops->fb_fillrect = nv50_fbcon_fillrect; | ||
266 | info->fbops->fb_copyarea = nv50_fbcon_copyarea; | ||
267 | info->fbops->fb_imageblit = nv50_fbcon_imageblit; | ||
268 | return 0; | 265 | return 0; |
269 | } | 266 | } |
270 | 267 | ||
diff --git a/drivers/gpu/drm/nouveau/nv50_fifo.c b/drivers/gpu/drm/nouveau/nv50_fifo.c index 32b244bcb482..204a79ff10f4 100644 --- a/drivers/gpu/drm/nouveau/nv50_fifo.c +++ b/drivers/gpu/drm/nouveau/nv50_fifo.c | |||
@@ -317,17 +317,20 @@ void | |||
317 | nv50_fifo_destroy_context(struct nouveau_channel *chan) | 317 | nv50_fifo_destroy_context(struct nouveau_channel *chan) |
318 | { | 318 | { |
319 | struct drm_device *dev = chan->dev; | 319 | struct drm_device *dev = chan->dev; |
320 | struct nouveau_gpuobj_ref *ramfc = chan->ramfc; | ||
320 | 321 | ||
321 | NV_DEBUG(dev, "ch%d\n", chan->id); | 322 | NV_DEBUG(dev, "ch%d\n", chan->id); |
322 | 323 | ||
323 | nouveau_gpuobj_ref_del(dev, &chan->ramfc); | 324 | /* This will ensure the channel is seen as disabled. */ |
324 | nouveau_gpuobj_ref_del(dev, &chan->cache); | 325 | chan->ramfc = NULL; |
325 | |||
326 | nv50_fifo_channel_disable(dev, chan->id, false); | 326 | nv50_fifo_channel_disable(dev, chan->id, false); |
327 | 327 | ||
328 | /* Dummy channel, also used on ch 127 */ | 328 | /* Dummy channel, also used on ch 127 */ |
329 | if (chan->id == 0) | 329 | if (chan->id == 0) |
330 | nv50_fifo_channel_disable(dev, 127, false); | 330 | nv50_fifo_channel_disable(dev, 127, false); |
331 | |||
332 | nouveau_gpuobj_ref_del(dev, &ramfc); | ||
333 | nouveau_gpuobj_ref_del(dev, &chan->cache); | ||
331 | } | 334 | } |
332 | 335 | ||
333 | int | 336 | int |
diff --git a/drivers/gpu/drm/nouveau/nv50_graph.c b/drivers/gpu/drm/nouveau/nv50_graph.c index 20319e59d368..6d504801b514 100644 --- a/drivers/gpu/drm/nouveau/nv50_graph.c +++ b/drivers/gpu/drm/nouveau/nv50_graph.c | |||
@@ -165,6 +165,12 @@ nv50_graph_channel(struct drm_device *dev) | |||
165 | uint32_t inst; | 165 | uint32_t inst; |
166 | int i; | 166 | int i; |
167 | 167 | ||
168 | /* Be sure we're not in the middle of a context switch or bad things | ||
169 | * will happen, such as unloading the wrong pgraph context. | ||
170 | */ | ||
171 | if (!nv_wait(0x400300, 0x00000001, 0x00000000)) | ||
172 | NV_ERROR(dev, "Ctxprog is still running\n"); | ||
173 | |||
168 | inst = nv_rd32(dev, NV50_PGRAPH_CTXCTL_CUR); | 174 | inst = nv_rd32(dev, NV50_PGRAPH_CTXCTL_CUR); |
169 | if (!(inst & NV50_PGRAPH_CTXCTL_CUR_LOADED)) | 175 | if (!(inst & NV50_PGRAPH_CTXCTL_CUR_LOADED)) |
170 | return NULL; | 176 | return NULL; |
@@ -275,7 +281,7 @@ nv50_graph_load_context(struct nouveau_channel *chan) | |||
275 | int | 281 | int |
276 | nv50_graph_unload_context(struct drm_device *dev) | 282 | nv50_graph_unload_context(struct drm_device *dev) |
277 | { | 283 | { |
278 | uint32_t inst, fifo = nv_rd32(dev, 0x400500); | 284 | uint32_t inst; |
279 | 285 | ||
280 | inst = nv_rd32(dev, NV50_PGRAPH_CTXCTL_CUR); | 286 | inst = nv_rd32(dev, NV50_PGRAPH_CTXCTL_CUR); |
281 | if (!(inst & NV50_PGRAPH_CTXCTL_CUR_LOADED)) | 287 | if (!(inst & NV50_PGRAPH_CTXCTL_CUR_LOADED)) |
@@ -283,12 +289,10 @@ nv50_graph_unload_context(struct drm_device *dev) | |||
283 | inst &= NV50_PGRAPH_CTXCTL_CUR_INSTANCE; | 289 | inst &= NV50_PGRAPH_CTXCTL_CUR_INSTANCE; |
284 | 290 | ||
285 | nouveau_wait_for_idle(dev); | 291 | nouveau_wait_for_idle(dev); |
286 | nv_wr32(dev, 0x400500, fifo & ~1); | ||
287 | nv_wr32(dev, 0x400784, inst); | 292 | nv_wr32(dev, 0x400784, inst); |
288 | nv_wr32(dev, 0x400824, nv_rd32(dev, 0x400824) | 0x20); | 293 | nv_wr32(dev, 0x400824, nv_rd32(dev, 0x400824) | 0x20); |
289 | nv_wr32(dev, 0x400304, nv_rd32(dev, 0x400304) | 0x01); | 294 | nv_wr32(dev, 0x400304, nv_rd32(dev, 0x400304) | 0x01); |
290 | nouveau_wait_for_idle(dev); | 295 | nouveau_wait_for_idle(dev); |
291 | nv_wr32(dev, 0x400500, fifo); | ||
292 | 296 | ||
293 | nv_wr32(dev, NV50_PGRAPH_CTXCTL_CUR, inst); | 297 | nv_wr32(dev, NV50_PGRAPH_CTXCTL_CUR, inst); |
294 | return 0; | 298 | return 0; |
diff --git a/drivers/gpu/drm/nouveau/nv50_instmem.c b/drivers/gpu/drm/nouveau/nv50_instmem.c index 94400f777e7f..f0dc4e36ef05 100644 --- a/drivers/gpu/drm/nouveau/nv50_instmem.c +++ b/drivers/gpu/drm/nouveau/nv50_instmem.c | |||
@@ -76,6 +76,11 @@ nv50_instmem_init(struct drm_device *dev) | |||
76 | for (i = 0x1700; i <= 0x1710; i += 4) | 76 | for (i = 0x1700; i <= 0x1710; i += 4) |
77 | priv->save1700[(i-0x1700)/4] = nv_rd32(dev, i); | 77 | priv->save1700[(i-0x1700)/4] = nv_rd32(dev, i); |
78 | 78 | ||
79 | if (dev_priv->chipset == 0xaa || dev_priv->chipset == 0xac) | ||
80 | dev_priv->vram_sys_base = nv_rd32(dev, 0x100e10) << 12; | ||
81 | else | ||
82 | dev_priv->vram_sys_base = 0; | ||
83 | |||
79 | /* Reserve the last MiB of VRAM, we should probably try to avoid | 84 | /* Reserve the last MiB of VRAM, we should probably try to avoid |
80 | * setting up the below tables over the top of the VBIOS image at | 85 | * setting up the below tables over the top of the VBIOS image at |
81 | * some point. | 86 | * some point. |
@@ -172,16 +177,28 @@ nv50_instmem_init(struct drm_device *dev) | |||
172 | * We map the entire fake channel into the start of the PRAMIN BAR | 177 | * We map the entire fake channel into the start of the PRAMIN BAR |
173 | */ | 178 | */ |
174 | ret = nouveau_gpuobj_new_ref(dev, chan, NULL, 0, pt_size, 0x1000, | 179 | ret = nouveau_gpuobj_new_ref(dev, chan, NULL, 0, pt_size, 0x1000, |
175 | 0, &priv->pramin_pt); | 180 | 0, &priv->pramin_pt); |
176 | if (ret) | 181 | if (ret) |
177 | return ret; | 182 | return ret; |
178 | 183 | ||
179 | for (i = 0, v = c_offset; i < pt_size; i += 8, v += 0x1000) { | 184 | v = c_offset | 1; |
180 | if (v < (c_offset + c_size)) | 185 | if (dev_priv->vram_sys_base) { |
181 | BAR0_WI32(priv->pramin_pt->gpuobj, i + 0, v | 1); | 186 | v += dev_priv->vram_sys_base; |
182 | else | 187 | v |= 0x30; |
183 | BAR0_WI32(priv->pramin_pt->gpuobj, i + 0, 0x00000009); | 188 | } |
189 | |||
190 | i = 0; | ||
191 | while (v < dev_priv->vram_sys_base + c_offset + c_size) { | ||
192 | BAR0_WI32(priv->pramin_pt->gpuobj, i + 0, v); | ||
193 | BAR0_WI32(priv->pramin_pt->gpuobj, i + 4, 0x00000000); | ||
194 | v += 0x1000; | ||
195 | i += 8; | ||
196 | } | ||
197 | |||
198 | while (i < pt_size) { | ||
199 | BAR0_WI32(priv->pramin_pt->gpuobj, i + 0, 0x00000000); | ||
184 | BAR0_WI32(priv->pramin_pt->gpuobj, i + 4, 0x00000000); | 200 | BAR0_WI32(priv->pramin_pt->gpuobj, i + 4, 0x00000000); |
201 | i += 8; | ||
185 | } | 202 | } |
186 | 203 | ||
187 | BAR0_WI32(chan->vm_pd, 0x00, priv->pramin_pt->instance | 0x63); | 204 | BAR0_WI32(chan->vm_pd, 0x00, priv->pramin_pt->instance | 0x63); |
@@ -416,7 +433,9 @@ nv50_instmem_bind(struct drm_device *dev, struct nouveau_gpuobj *gpuobj) | |||
416 | { | 433 | { |
417 | struct drm_nouveau_private *dev_priv = dev->dev_private; | 434 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
418 | struct nv50_instmem_priv *priv = dev_priv->engine.instmem.priv; | 435 | struct nv50_instmem_priv *priv = dev_priv->engine.instmem.priv; |
419 | uint32_t pte, pte_end, vram; | 436 | struct nouveau_gpuobj *pramin_pt = priv->pramin_pt->gpuobj; |
437 | uint32_t pte, pte_end; | ||
438 | uint64_t vram; | ||
420 | 439 | ||
421 | if (!gpuobj->im_backing || !gpuobj->im_pramin || gpuobj->im_bound) | 440 | if (!gpuobj->im_backing || !gpuobj->im_pramin || gpuobj->im_bound) |
422 | return -EINVAL; | 441 | return -EINVAL; |
@@ -424,20 +443,24 @@ nv50_instmem_bind(struct drm_device *dev, struct nouveau_gpuobj *gpuobj) | |||
424 | NV_DEBUG(dev, "st=0x%0llx sz=0x%0llx\n", | 443 | NV_DEBUG(dev, "st=0x%0llx sz=0x%0llx\n", |
425 | gpuobj->im_pramin->start, gpuobj->im_pramin->size); | 444 | gpuobj->im_pramin->start, gpuobj->im_pramin->size); |
426 | 445 | ||
427 | pte = (gpuobj->im_pramin->start >> 12) << 3; | 446 | pte = (gpuobj->im_pramin->start >> 12) << 1; |
428 | pte_end = ((gpuobj->im_pramin->size >> 12) << 3) + pte; | 447 | pte_end = ((gpuobj->im_pramin->size >> 12) << 1) + pte; |
429 | vram = gpuobj->im_backing_start; | 448 | vram = gpuobj->im_backing_start; |
430 | 449 | ||
431 | NV_DEBUG(dev, "pramin=0x%llx, pte=%d, pte_end=%d\n", | 450 | NV_DEBUG(dev, "pramin=0x%llx, pte=%d, pte_end=%d\n", |
432 | gpuobj->im_pramin->start, pte, pte_end); | 451 | gpuobj->im_pramin->start, pte, pte_end); |
433 | NV_DEBUG(dev, "first vram page: 0x%08x\n", gpuobj->im_backing_start); | 452 | NV_DEBUG(dev, "first vram page: 0x%08x\n", gpuobj->im_backing_start); |
434 | 453 | ||
454 | vram |= 1; | ||
455 | if (dev_priv->vram_sys_base) { | ||
456 | vram += dev_priv->vram_sys_base; | ||
457 | vram |= 0x30; | ||
458 | } | ||
459 | |||
435 | dev_priv->engine.instmem.prepare_access(dev, true); | 460 | dev_priv->engine.instmem.prepare_access(dev, true); |
436 | while (pte < pte_end) { | 461 | while (pte < pte_end) { |
437 | nv_wo32(dev, priv->pramin_pt->gpuobj, (pte + 0)/4, vram | 1); | 462 | nv_wo32(dev, pramin_pt, pte++, lower_32_bits(vram)); |
438 | nv_wo32(dev, priv->pramin_pt->gpuobj, (pte + 4)/4, 0x00000000); | 463 | nv_wo32(dev, pramin_pt, pte++, upper_32_bits(vram)); |
439 | |||
440 | pte += 8; | ||
441 | vram += NV50_INSTMEM_PAGE_SIZE; | 464 | vram += NV50_INSTMEM_PAGE_SIZE; |
442 | } | 465 | } |
443 | dev_priv->engine.instmem.finish_access(dev); | 466 | dev_priv->engine.instmem.finish_access(dev); |
@@ -470,14 +493,13 @@ nv50_instmem_unbind(struct drm_device *dev, struct nouveau_gpuobj *gpuobj) | |||
470 | if (gpuobj->im_bound == 0) | 493 | if (gpuobj->im_bound == 0) |
471 | return -EINVAL; | 494 | return -EINVAL; |
472 | 495 | ||
473 | pte = (gpuobj->im_pramin->start >> 12) << 3; | 496 | pte = (gpuobj->im_pramin->start >> 12) << 1; |
474 | pte_end = ((gpuobj->im_pramin->size >> 12) << 3) + pte; | 497 | pte_end = ((gpuobj->im_pramin->size >> 12) << 1) + pte; |
475 | 498 | ||
476 | dev_priv->engine.instmem.prepare_access(dev, true); | 499 | dev_priv->engine.instmem.prepare_access(dev, true); |
477 | while (pte < pte_end) { | 500 | while (pte < pte_end) { |
478 | nv_wo32(dev, priv->pramin_pt->gpuobj, (pte + 0)/4, 0x00000009); | 501 | nv_wo32(dev, priv->pramin_pt->gpuobj, pte++, 0x00000000); |
479 | nv_wo32(dev, priv->pramin_pt->gpuobj, (pte + 4)/4, 0x00000000); | 502 | nv_wo32(dev, priv->pramin_pt->gpuobj, pte++, 0x00000000); |
480 | pte += 8; | ||
481 | } | 503 | } |
482 | dev_priv->engine.instmem.finish_access(dev); | 504 | dev_priv->engine.instmem.finish_access(dev); |
483 | 505 | ||
diff --git a/drivers/gpu/drm/nouveau/nv50_sor.c b/drivers/gpu/drm/nouveau/nv50_sor.c index ecf1936b8224..c2fff543b06f 100644 --- a/drivers/gpu/drm/nouveau/nv50_sor.c +++ b/drivers/gpu/drm/nouveau/nv50_sor.c | |||
@@ -101,6 +101,7 @@ nv50_sor_dpms(struct drm_encoder *encoder, int mode) | |||
101 | struct nouveau_encoder *nvenc = nouveau_encoder(enc); | 101 | struct nouveau_encoder *nvenc = nouveau_encoder(enc); |
102 | 102 | ||
103 | if (nvenc == nv_encoder || | 103 | if (nvenc == nv_encoder || |
104 | nvenc->disconnect != nv50_sor_disconnect || | ||
104 | nvenc->dcb->or != nv_encoder->dcb->or) | 105 | nvenc->dcb->or != nv_encoder->dcb->or) |
105 | continue; | 106 | continue; |
106 | 107 | ||
diff --git a/drivers/gpu/drm/radeon/Kconfig b/drivers/gpu/drm/radeon/Kconfig index 5982321be4d5..1c02d23f6fcc 100644 --- a/drivers/gpu/drm/radeon/Kconfig +++ b/drivers/gpu/drm/radeon/Kconfig | |||
@@ -1,10 +1,14 @@ | |||
1 | config DRM_RADEON_KMS | 1 | config DRM_RADEON_KMS |
2 | bool "Enable modesetting on radeon by default" | 2 | bool "Enable modesetting on radeon by default - NEW DRIVER" |
3 | depends on DRM_RADEON | 3 | depends on DRM_RADEON |
4 | help | 4 | help |
5 | Choose this option if you want kernel modesetting enabled by default, | 5 | Choose this option if you want kernel modesetting enabled by default. |
6 | and you have a new enough userspace to support this. Running old | 6 | |
7 | userspaces with this enabled will cause pain. | 7 | This is a completely new driver. It's only part of the existing drm |
8 | for compatibility reasons. It requires an entirely different graphics | ||
9 | stack above it and works very differently from the old drm stack. | ||
10 | i.e. don't enable this unless you know what you are doing it may | ||
11 | cause issues or bugs compared to the previous userspace driver stack. | ||
8 | 12 | ||
9 | When kernel modesetting is enabled the IOCTL of radeon/drm | 13 | When kernel modesetting is enabled the IOCTL of radeon/drm |
10 | driver are considered as invalid and an error message is printed | 14 | driver are considered as invalid and an error message is printed |
diff --git a/drivers/gpu/drm/radeon/atom.c b/drivers/gpu/drm/radeon/atom.c index bd0c843872b2..d75788feac6c 100644 --- a/drivers/gpu/drm/radeon/atom.c +++ b/drivers/gpu/drm/radeon/atom.c | |||
@@ -24,6 +24,7 @@ | |||
24 | 24 | ||
25 | #include <linux/module.h> | 25 | #include <linux/module.h> |
26 | #include <linux/sched.h> | 26 | #include <linux/sched.h> |
27 | #include <asm/unaligned.h> | ||
27 | 28 | ||
28 | #define ATOM_DEBUG | 29 | #define ATOM_DEBUG |
29 | 30 | ||
@@ -212,7 +213,9 @@ static uint32_t atom_get_src_int(atom_exec_context *ctx, uint8_t attr, | |||
212 | case ATOM_ARG_PS: | 213 | case ATOM_ARG_PS: |
213 | idx = U8(*ptr); | 214 | idx = U8(*ptr); |
214 | (*ptr)++; | 215 | (*ptr)++; |
215 | val = le32_to_cpu(ctx->ps[idx]); | 216 | /* get_unaligned_le32 avoids unaligned accesses from atombios |
217 | * tables, noticed on a DEC Alpha. */ | ||
218 | val = get_unaligned_le32((u32 *)&ctx->ps[idx]); | ||
216 | if (print) | 219 | if (print) |
217 | DEBUG("PS[0x%02X,0x%04X]", idx, val); | 220 | DEBUG("PS[0x%02X,0x%04X]", idx, val); |
218 | break; | 221 | break; |
@@ -641,7 +644,6 @@ static void atom_op_delay(atom_exec_context *ctx, int *ptr, int arg) | |||
641 | SDEBUG(" count: %d\n", count); | 644 | SDEBUG(" count: %d\n", count); |
642 | if (arg == ATOM_UNIT_MICROSEC) | 645 | if (arg == ATOM_UNIT_MICROSEC) |
643 | udelay(count); | 646 | udelay(count); |
644 | // schedule_timeout_uninterruptible(usecs_to_jiffies(count)); | ||
645 | else | 647 | else |
646 | schedule_timeout_uninterruptible(msecs_to_jiffies(count)); | 648 | schedule_timeout_uninterruptible(msecs_to_jiffies(count)); |
647 | } | 649 | } |
@@ -879,8 +881,6 @@ static void atom_op_shl(atom_exec_context *ctx, int *ptr, int arg) | |||
879 | uint8_t attr = U8((*ptr)++), shift; | 881 | uint8_t attr = U8((*ptr)++), shift; |
880 | uint32_t saved, dst; | 882 | uint32_t saved, dst; |
881 | int dptr = *ptr; | 883 | int dptr = *ptr; |
882 | attr &= 0x38; | ||
883 | attr |= atom_def_dst[attr >> 3] << 6; | ||
884 | SDEBUG(" dst: "); | 884 | SDEBUG(" dst: "); |
885 | dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1); | 885 | dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1); |
886 | shift = atom_get_src(ctx, attr, ptr); | 886 | shift = atom_get_src(ctx, attr, ptr); |
@@ -895,8 +895,6 @@ static void atom_op_shr(atom_exec_context *ctx, int *ptr, int arg) | |||
895 | uint8_t attr = U8((*ptr)++), shift; | 895 | uint8_t attr = U8((*ptr)++), shift; |
896 | uint32_t saved, dst; | 896 | uint32_t saved, dst; |
897 | int dptr = *ptr; | 897 | int dptr = *ptr; |
898 | attr &= 0x38; | ||
899 | attr |= atom_def_dst[attr >> 3] << 6; | ||
900 | SDEBUG(" dst: "); | 898 | SDEBUG(" dst: "); |
901 | dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1); | 899 | dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1); |
902 | shift = atom_get_src(ctx, attr, ptr); | 900 | shift = atom_get_src(ctx, attr, ptr); |
diff --git a/drivers/gpu/drm/radeon/atombios_dp.c b/drivers/gpu/drm/radeon/atombios_dp.c index 0b6f2cef1c52..8a133bda00a2 100644 --- a/drivers/gpu/drm/radeon/atombios_dp.c +++ b/drivers/gpu/drm/radeon/atombios_dp.c | |||
@@ -336,11 +336,13 @@ bool radeon_process_aux_ch(struct radeon_i2c_chan *chan, u8 *req_bytes, | |||
336 | union aux_channel_transaction args; | 336 | union aux_channel_transaction args; |
337 | int index = GetIndexIntoMasterTable(COMMAND, ProcessAuxChannelTransaction); | 337 | int index = GetIndexIntoMasterTable(COMMAND, ProcessAuxChannelTransaction); |
338 | unsigned char *base; | 338 | unsigned char *base; |
339 | int retry_count = 0; | ||
339 | 340 | ||
340 | memset(&args, 0, sizeof(args)); | 341 | memset(&args, 0, sizeof(args)); |
341 | 342 | ||
342 | base = (unsigned char *)rdev->mode_info.atom_context->scratch; | 343 | base = (unsigned char *)rdev->mode_info.atom_context->scratch; |
343 | 344 | ||
345 | retry: | ||
344 | memcpy(base, req_bytes, num_bytes); | 346 | memcpy(base, req_bytes, num_bytes); |
345 | 347 | ||
346 | args.v1.lpAuxRequest = 0; | 348 | args.v1.lpAuxRequest = 0; |
@@ -353,10 +355,12 @@ bool radeon_process_aux_ch(struct radeon_i2c_chan *chan, u8 *req_bytes, | |||
353 | 355 | ||
354 | atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); | 356 | atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); |
355 | 357 | ||
356 | if (args.v1.ucReplyStatus) { | 358 | if (args.v1.ucReplyStatus && !args.v1.ucDataOutLen) { |
357 | DRM_DEBUG("failed to get auxch %02x%02x %02x %02x 0x%02x %02x\n", | 359 | if (args.v1.ucReplyStatus == 0x20 && retry_count++ < 10) |
360 | goto retry; | ||
361 | DRM_DEBUG("failed to get auxch %02x%02x %02x %02x 0x%02x %02x after %d retries\n", | ||
358 | req_bytes[1], req_bytes[0], req_bytes[2], req_bytes[3], | 362 | req_bytes[1], req_bytes[0], req_bytes[2], req_bytes[3], |
359 | chan->rec.i2c_id, args.v1.ucReplyStatus); | 363 | chan->rec.i2c_id, args.v1.ucReplyStatus, retry_count); |
360 | return false; | 364 | return false; |
361 | } | 365 | } |
362 | 366 | ||
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c index f9a83358aa5a..c52290197292 100644 --- a/drivers/gpu/drm/radeon/r600.c +++ b/drivers/gpu/drm/radeon/r600.c | |||
@@ -1970,6 +1970,13 @@ int r600_resume(struct radeon_device *rdev) | |||
1970 | DRM_ERROR("radeon: failled testing IB (%d).\n", r); | 1970 | DRM_ERROR("radeon: failled testing IB (%d).\n", r); |
1971 | return r; | 1971 | return r; |
1972 | } | 1972 | } |
1973 | |||
1974 | r = r600_audio_init(rdev); | ||
1975 | if (r) { | ||
1976 | DRM_ERROR("radeon: audio resume failed\n"); | ||
1977 | return r; | ||
1978 | } | ||
1979 | |||
1973 | return r; | 1980 | return r; |
1974 | } | 1981 | } |
1975 | 1982 | ||
@@ -1977,6 +1984,7 @@ int r600_suspend(struct radeon_device *rdev) | |||
1977 | { | 1984 | { |
1978 | int r; | 1985 | int r; |
1979 | 1986 | ||
1987 | r600_audio_fini(rdev); | ||
1980 | /* FIXME: we should wait for ring to be empty */ | 1988 | /* FIXME: we should wait for ring to be empty */ |
1981 | r600_cp_stop(rdev); | 1989 | r600_cp_stop(rdev); |
1982 | rdev->cp.ready = false; | 1990 | rdev->cp.ready = false; |
diff --git a/drivers/gpu/drm/radeon/r600_audio.c b/drivers/gpu/drm/radeon/r600_audio.c index d7f6909afc01..387abaa275a4 100644 --- a/drivers/gpu/drm/radeon/r600_audio.c +++ b/drivers/gpu/drm/radeon/r600_audio.c | |||
@@ -270,6 +270,7 @@ void r600_audio_fini(struct radeon_device *rdev) | |||
270 | return; | 270 | return; |
271 | 271 | ||
272 | del_timer(&rdev->audio_timer); | 272 | del_timer(&rdev->audio_timer); |
273 | WREG32_P(R600_AUDIO_ENABLE, 0x0, ~0x81000000); | ||
273 | 274 | ||
274 | r600_audio_engine_enable(rdev, false); | 275 | r600_audio_engine_enable(rdev, false); |
275 | } | 276 | } |
diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c index 6f8619cd1a0d..93783b15c81d 100644 --- a/drivers/gpu/drm/radeon/radeon_atombios.c +++ b/drivers/gpu/drm/radeon/radeon_atombios.c | |||
@@ -213,6 +213,15 @@ static bool radeon_atom_apply_quirks(struct drm_device *dev, | |||
213 | *connector_type = DRM_MODE_CONNECTOR_DVID; | 213 | *connector_type = DRM_MODE_CONNECTOR_DVID; |
214 | } | 214 | } |
215 | 215 | ||
216 | /* Asrock RS600 board lists the DVI port as HDMI */ | ||
217 | if ((dev->pdev->device == 0x7941) && | ||
218 | (dev->pdev->subsystem_vendor == 0x1849) && | ||
219 | (dev->pdev->subsystem_device == 0x7941)) { | ||
220 | if ((*connector_type == DRM_MODE_CONNECTOR_HDMIA) && | ||
221 | (supported_device == ATOM_DEVICE_DFP3_SUPPORT)) | ||
222 | *connector_type = DRM_MODE_CONNECTOR_DVID; | ||
223 | } | ||
224 | |||
216 | /* a-bit f-i90hd - ciaranm on #radeonhd - this board has no DVI */ | 225 | /* a-bit f-i90hd - ciaranm on #radeonhd - this board has no DVI */ |
217 | if ((dev->pdev->device == 0x7941) && | 226 | if ((dev->pdev->device == 0x7941) && |
218 | (dev->pdev->subsystem_vendor == 0x147b) && | 227 | (dev->pdev->subsystem_vendor == 0x147b) && |
@@ -294,6 +303,15 @@ static bool radeon_atom_apply_quirks(struct drm_device *dev, | |||
294 | *connector_type = DRM_MODE_CONNECTOR_DVID; | 303 | *connector_type = DRM_MODE_CONNECTOR_DVID; |
295 | } | 304 | } |
296 | 305 | ||
306 | /* XFX Pine Group device rv730 reports no VGA DDC lines | ||
307 | * even though they are wired up to record 0x93 | ||
308 | */ | ||
309 | if ((dev->pdev->device == 0x9498) && | ||
310 | (dev->pdev->subsystem_vendor == 0x1682) && | ||
311 | (dev->pdev->subsystem_device == 0x2452)) { | ||
312 | struct radeon_device *rdev = dev->dev_private; | ||
313 | *i2c_bus = radeon_lookup_i2c_gpio(rdev, 0x93); | ||
314 | } | ||
297 | return true; | 315 | return true; |
298 | } | 316 | } |
299 | 317 | ||
diff --git a/drivers/gpu/drm/radeon/radeon_benchmark.c b/drivers/gpu/drm/radeon/radeon_benchmark.c index 4ddfd4b5bc51..7932dc4d6b90 100644 --- a/drivers/gpu/drm/radeon/radeon_benchmark.c +++ b/drivers/gpu/drm/radeon/radeon_benchmark.c | |||
@@ -65,31 +65,42 @@ void radeon_benchmark_move(struct radeon_device *rdev, unsigned bsize, | |||
65 | if (r) { | 65 | if (r) { |
66 | goto out_cleanup; | 66 | goto out_cleanup; |
67 | } | 67 | } |
68 | start_jiffies = jiffies; | 68 | |
69 | for (i = 0; i < n; i++) { | 69 | /* r100 doesn't have dma engine so skip the test */ |
70 | r = radeon_fence_create(rdev, &fence); | 70 | if (rdev->asic->copy_dma) { |
71 | if (r) { | 71 | |
72 | goto out_cleanup; | 72 | start_jiffies = jiffies; |
73 | for (i = 0; i < n; i++) { | ||
74 | r = radeon_fence_create(rdev, &fence); | ||
75 | if (r) { | ||
76 | goto out_cleanup; | ||
77 | } | ||
78 | |||
79 | r = radeon_copy_dma(rdev, saddr, daddr, | ||
80 | size / RADEON_GPU_PAGE_SIZE, fence); | ||
81 | |||
82 | if (r) { | ||
83 | goto out_cleanup; | ||
84 | } | ||
85 | r = radeon_fence_wait(fence, false); | ||
86 | if (r) { | ||
87 | goto out_cleanup; | ||
88 | } | ||
89 | radeon_fence_unref(&fence); | ||
73 | } | 90 | } |
74 | r = radeon_copy_dma(rdev, saddr, daddr, size / RADEON_GPU_PAGE_SIZE, fence); | 91 | end_jiffies = jiffies; |
75 | if (r) { | 92 | time = end_jiffies - start_jiffies; |
76 | goto out_cleanup; | 93 | time = jiffies_to_msecs(time); |
94 | if (time > 0) { | ||
95 | i = ((n * size) >> 10) / time; | ||
96 | printk(KERN_INFO "radeon: dma %u bo moves of %ukb from" | ||
97 | " %d to %d in %lums (%ukb/ms %ukb/s %uM/s)\n", | ||
98 | n, size >> 10, | ||
99 | sdomain, ddomain, time, | ||
100 | i, i * 1000, (i * 1000) / 1024); | ||
77 | } | 101 | } |
78 | r = radeon_fence_wait(fence, false); | ||
79 | if (r) { | ||
80 | goto out_cleanup; | ||
81 | } | ||
82 | radeon_fence_unref(&fence); | ||
83 | } | ||
84 | end_jiffies = jiffies; | ||
85 | time = end_jiffies - start_jiffies; | ||
86 | time = jiffies_to_msecs(time); | ||
87 | if (time > 0) { | ||
88 | i = ((n * size) >> 10) / time; | ||
89 | printk(KERN_INFO "radeon: dma %u bo moves of %ukb from %d to %d" | ||
90 | " in %lums (%ukb/ms %ukb/s %uM/s)\n", n, size >> 10, | ||
91 | sdomain, ddomain, time, i, i * 1000, (i * 1000) / 1024); | ||
92 | } | 102 | } |
103 | |||
93 | start_jiffies = jiffies; | 104 | start_jiffies = jiffies; |
94 | for (i = 0; i < n; i++) { | 105 | for (i = 0; i < n; i++) { |
95 | r = radeon_fence_create(rdev, &fence); | 106 | r = radeon_fence_create(rdev, &fence); |
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c index 6e9e7b59d67e..ee0083f982d8 100644 --- a/drivers/gpu/drm/radeon/radeon_connectors.c +++ b/drivers/gpu/drm/radeon/radeon_connectors.c | |||
@@ -578,14 +578,15 @@ static enum drm_connector_status radeon_vga_detect(struct drm_connector *connect | |||
578 | struct radeon_connector *radeon_connector = to_radeon_connector(connector); | 578 | struct radeon_connector *radeon_connector = to_radeon_connector(connector); |
579 | struct drm_encoder *encoder; | 579 | struct drm_encoder *encoder; |
580 | struct drm_encoder_helper_funcs *encoder_funcs; | 580 | struct drm_encoder_helper_funcs *encoder_funcs; |
581 | bool dret; | 581 | bool dret = false; |
582 | enum drm_connector_status ret = connector_status_disconnected; | 582 | enum drm_connector_status ret = connector_status_disconnected; |
583 | 583 | ||
584 | encoder = radeon_best_single_encoder(connector); | 584 | encoder = radeon_best_single_encoder(connector); |
585 | if (!encoder) | 585 | if (!encoder) |
586 | ret = connector_status_disconnected; | 586 | ret = connector_status_disconnected; |
587 | 587 | ||
588 | dret = radeon_ddc_probe(radeon_connector); | 588 | if (radeon_connector->ddc_bus) |
589 | dret = radeon_ddc_probe(radeon_connector); | ||
589 | if (dret) { | 590 | if (dret) { |
590 | if (radeon_connector->edid) { | 591 | if (radeon_connector->edid) { |
591 | kfree(radeon_connector->edid); | 592 | kfree(radeon_connector->edid); |
@@ -734,9 +735,10 @@ static enum drm_connector_status radeon_dvi_detect(struct drm_connector *connect | |||
734 | struct drm_mode_object *obj; | 735 | struct drm_mode_object *obj; |
735 | int i; | 736 | int i; |
736 | enum drm_connector_status ret = connector_status_disconnected; | 737 | enum drm_connector_status ret = connector_status_disconnected; |
737 | bool dret; | 738 | bool dret = false; |
738 | 739 | ||
739 | dret = radeon_ddc_probe(radeon_connector); | 740 | if (radeon_connector->ddc_bus) |
741 | dret = radeon_ddc_probe(radeon_connector); | ||
740 | if (dret) { | 742 | if (dret) { |
741 | if (radeon_connector->edid) { | 743 | if (radeon_connector->edid) { |
742 | kfree(radeon_connector->edid); | 744 | kfree(radeon_connector->edid); |
@@ -766,7 +768,7 @@ static enum drm_connector_status radeon_dvi_detect(struct drm_connector *connect | |||
766 | * connected and the DVI port disconnected. If the edid doesn't | 768 | * connected and the DVI port disconnected. If the edid doesn't |
767 | * say HDMI, vice versa. | 769 | * say HDMI, vice versa. |
768 | */ | 770 | */ |
769 | if (radeon_connector->shared_ddc && connector_status_connected) { | 771 | if (radeon_connector->shared_ddc && (ret == connector_status_connected)) { |
770 | struct drm_device *dev = connector->dev; | 772 | struct drm_device *dev = connector->dev; |
771 | struct drm_connector *list_connector; | 773 | struct drm_connector *list_connector; |
772 | struct radeon_connector *list_radeon_connector; | 774 | struct radeon_connector *list_radeon_connector; |
@@ -1044,8 +1046,7 @@ radeon_add_atom_connector(struct drm_device *dev, | |||
1044 | return; | 1046 | return; |
1045 | } | 1047 | } |
1046 | if (radeon_connector->ddc_bus && i2c_bus->valid) { | 1048 | if (radeon_connector->ddc_bus && i2c_bus->valid) { |
1047 | if (memcmp(&radeon_connector->ddc_bus->rec, i2c_bus, | 1049 | if (radeon_connector->ddc_bus->rec.i2c_id == i2c_bus->i2c_id) { |
1048 | sizeof(struct radeon_i2c_bus_rec)) == 0) { | ||
1049 | radeon_connector->shared_ddc = true; | 1050 | radeon_connector->shared_ddc = true; |
1050 | shared_ddc = true; | 1051 | shared_ddc = true; |
1051 | } | 1052 | } |
diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c index e9d085021c1f..70ba02ed7723 100644 --- a/drivers/gpu/drm/radeon/radeon_cs.c +++ b/drivers/gpu/drm/radeon/radeon_cs.c | |||
@@ -194,11 +194,8 @@ static void radeon_cs_parser_fini(struct radeon_cs_parser *parser, int error) | |||
194 | } | 194 | } |
195 | radeon_bo_list_unreserve(&parser->validated); | 195 | radeon_bo_list_unreserve(&parser->validated); |
196 | for (i = 0; i < parser->nrelocs; i++) { | 196 | for (i = 0; i < parser->nrelocs; i++) { |
197 | if (parser->relocs[i].gobj) { | 197 | if (parser->relocs[i].gobj) |
198 | mutex_lock(&parser->rdev->ddev->struct_mutex); | 198 | drm_gem_object_unreference_unlocked(parser->relocs[i].gobj); |
199 | drm_gem_object_unreference(parser->relocs[i].gobj); | ||
200 | mutex_unlock(&parser->rdev->ddev->struct_mutex); | ||
201 | } | ||
202 | } | 199 | } |
203 | kfree(parser->track); | 200 | kfree(parser->track); |
204 | kfree(parser->relocs); | 201 | kfree(parser->relocs); |
diff --git a/drivers/gpu/drm/radeon/radeon_cursor.c b/drivers/gpu/drm/radeon/radeon_cursor.c index 7ecf5e1b39c1..b7023fff89eb 100644 --- a/drivers/gpu/drm/radeon/radeon_cursor.c +++ b/drivers/gpu/drm/radeon/radeon_cursor.c | |||
@@ -186,17 +186,13 @@ int radeon_crtc_cursor_set(struct drm_crtc *crtc, | |||
186 | unpin: | 186 | unpin: |
187 | if (radeon_crtc->cursor_bo) { | 187 | if (radeon_crtc->cursor_bo) { |
188 | radeon_gem_object_unpin(radeon_crtc->cursor_bo); | 188 | radeon_gem_object_unpin(radeon_crtc->cursor_bo); |
189 | mutex_lock(&crtc->dev->struct_mutex); | 189 | drm_gem_object_unreference_unlocked(radeon_crtc->cursor_bo); |
190 | drm_gem_object_unreference(radeon_crtc->cursor_bo); | ||
191 | mutex_unlock(&crtc->dev->struct_mutex); | ||
192 | } | 190 | } |
193 | 191 | ||
194 | radeon_crtc->cursor_bo = obj; | 192 | radeon_crtc->cursor_bo = obj; |
195 | return 0; | 193 | return 0; |
196 | fail: | 194 | fail: |
197 | mutex_lock(&crtc->dev->struct_mutex); | 195 | drm_gem_object_unreference_unlocked(obj); |
198 | drm_gem_object_unreference(obj); | ||
199 | mutex_unlock(&crtc->dev->struct_mutex); | ||
200 | 196 | ||
201 | return 0; | 197 | return 0; |
202 | } | 198 | } |
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c index e35cc3da8f22..ba8d806dcf39 100644 --- a/drivers/gpu/drm/radeon/radeon_display.c +++ b/drivers/gpu/drm/radeon/radeon_display.c | |||
@@ -310,7 +310,7 @@ static void radeon_print_display_setup(struct drm_device *dev) | |||
310 | DRM_INFO(" %s\n", connector_names[connector->connector_type]); | 310 | DRM_INFO(" %s\n", connector_names[connector->connector_type]); |
311 | if (radeon_connector->hpd.hpd != RADEON_HPD_NONE) | 311 | if (radeon_connector->hpd.hpd != RADEON_HPD_NONE) |
312 | DRM_INFO(" %s\n", hpd_names[radeon_connector->hpd.hpd]); | 312 | DRM_INFO(" %s\n", hpd_names[radeon_connector->hpd.hpd]); |
313 | if (radeon_connector->ddc_bus) | 313 | if (radeon_connector->ddc_bus) { |
314 | DRM_INFO(" DDC: 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n", | 314 | DRM_INFO(" DDC: 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n", |
315 | radeon_connector->ddc_bus->rec.mask_clk_reg, | 315 | radeon_connector->ddc_bus->rec.mask_clk_reg, |
316 | radeon_connector->ddc_bus->rec.mask_data_reg, | 316 | radeon_connector->ddc_bus->rec.mask_data_reg, |
@@ -320,6 +320,15 @@ static void radeon_print_display_setup(struct drm_device *dev) | |||
320 | radeon_connector->ddc_bus->rec.en_data_reg, | 320 | radeon_connector->ddc_bus->rec.en_data_reg, |
321 | radeon_connector->ddc_bus->rec.y_clk_reg, | 321 | radeon_connector->ddc_bus->rec.y_clk_reg, |
322 | radeon_connector->ddc_bus->rec.y_data_reg); | 322 | radeon_connector->ddc_bus->rec.y_data_reg); |
323 | } else { | ||
324 | if (connector->connector_type == DRM_MODE_CONNECTOR_VGA || | ||
325 | connector->connector_type == DRM_MODE_CONNECTOR_DVII || | ||
326 | connector->connector_type == DRM_MODE_CONNECTOR_DVID || | ||
327 | connector->connector_type == DRM_MODE_CONNECTOR_DVIA || | ||
328 | connector->connector_type == DRM_MODE_CONNECTOR_HDMIA || | ||
329 | connector->connector_type == DRM_MODE_CONNECTOR_HDMIB) | ||
330 | DRM_INFO(" DDC: no ddc bus - possible BIOS bug - please report to xorg-driver-ati@lists.x.org\n"); | ||
331 | } | ||
323 | DRM_INFO(" Encoders:\n"); | 332 | DRM_INFO(" Encoders:\n"); |
324 | list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { | 333 | list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { |
325 | radeon_encoder = to_radeon_encoder(encoder); | 334 | radeon_encoder = to_radeon_encoder(encoder); |
@@ -801,11 +810,8 @@ static void radeon_user_framebuffer_destroy(struct drm_framebuffer *fb) | |||
801 | if (fb->fbdev) | 810 | if (fb->fbdev) |
802 | radeonfb_remove(dev, fb); | 811 | radeonfb_remove(dev, fb); |
803 | 812 | ||
804 | if (radeon_fb->obj) { | 813 | if (radeon_fb->obj) |
805 | mutex_lock(&dev->struct_mutex); | 814 | drm_gem_object_unreference_unlocked(radeon_fb->obj); |
806 | drm_gem_object_unreference(radeon_fb->obj); | ||
807 | mutex_unlock(&dev->struct_mutex); | ||
808 | } | ||
809 | drm_framebuffer_cleanup(fb); | 815 | drm_framebuffer_cleanup(fb); |
810 | kfree(radeon_fb); | 816 | kfree(radeon_fb); |
811 | } | 817 | } |
diff --git a/drivers/gpu/drm/radeon/radeon_drv.h b/drivers/gpu/drm/radeon/radeon_drv.h index f6d20cee5705..88f4d8669d84 100644 --- a/drivers/gpu/drm/radeon/radeon_drv.h +++ b/drivers/gpu/drm/radeon/radeon_drv.h | |||
@@ -106,9 +106,10 @@ | |||
106 | * 1.29- R500 3D cmd buffer support | 106 | * 1.29- R500 3D cmd buffer support |
107 | * 1.30- Add support for occlusion queries | 107 | * 1.30- Add support for occlusion queries |
108 | * 1.31- Add support for num Z pipes from GET_PARAM | 108 | * 1.31- Add support for num Z pipes from GET_PARAM |
109 | * 1.32- fixes for rv740 setup | ||
109 | */ | 110 | */ |
110 | #define DRIVER_MAJOR 1 | 111 | #define DRIVER_MAJOR 1 |
111 | #define DRIVER_MINOR 31 | 112 | #define DRIVER_MINOR 32 |
112 | #define DRIVER_PATCHLEVEL 0 | 113 | #define DRIVER_PATCHLEVEL 0 |
113 | 114 | ||
114 | enum radeon_cp_microcode_version { | 115 | enum radeon_cp_microcode_version { |
diff --git a/drivers/gpu/drm/radeon/radeon_fb.c b/drivers/gpu/drm/radeon/radeon_fb.c index c39ddda13840..cda112cc7a6c 100644 --- a/drivers/gpu/drm/radeon/radeon_fb.c +++ b/drivers/gpu/drm/radeon/radeon_fb.c | |||
@@ -59,7 +59,7 @@ static struct fb_ops radeonfb_ops = { | |||
59 | }; | 59 | }; |
60 | 60 | ||
61 | /** | 61 | /** |
62 | * Curretly it is assumed that the old framebuffer is reused. | 62 | * Currently it is assumed that the old framebuffer is reused. |
63 | * | 63 | * |
64 | * LOCKING | 64 | * LOCKING |
65 | * caller should hold the mode config lock. | 65 | * caller should hold the mode config lock. |
@@ -243,7 +243,7 @@ int radeonfb_create(struct drm_device *dev, | |||
243 | if (ret) | 243 | if (ret) |
244 | goto out_unref; | 244 | goto out_unref; |
245 | 245 | ||
246 | memset_io(fbptr, 0xff, aligned_size); | 246 | memset_io(fbptr, 0x0, aligned_size); |
247 | 247 | ||
248 | strcpy(info->fix.id, "radeondrmfb"); | 248 | strcpy(info->fix.id, "radeondrmfb"); |
249 | 249 | ||
diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c index db8e9a355a01..ef92d147d8f0 100644 --- a/drivers/gpu/drm/radeon/radeon_gem.c +++ b/drivers/gpu/drm/radeon/radeon_gem.c | |||
@@ -69,9 +69,7 @@ int radeon_gem_object_create(struct radeon_device *rdev, int size, | |||
69 | if (r != -ERESTARTSYS) | 69 | if (r != -ERESTARTSYS) |
70 | DRM_ERROR("Failed to allocate GEM object (%d, %d, %u, %d)\n", | 70 | DRM_ERROR("Failed to allocate GEM object (%d, %d, %u, %d)\n", |
71 | size, initial_domain, alignment, r); | 71 | size, initial_domain, alignment, r); |
72 | mutex_lock(&rdev->ddev->struct_mutex); | 72 | drm_gem_object_unreference_unlocked(gobj); |
73 | drm_gem_object_unreference(gobj); | ||
74 | mutex_unlock(&rdev->ddev->struct_mutex); | ||
75 | return r; | 73 | return r; |
76 | } | 74 | } |
77 | gobj->driver_private = robj; | 75 | gobj->driver_private = robj; |
@@ -202,14 +200,10 @@ int radeon_gem_create_ioctl(struct drm_device *dev, void *data, | |||
202 | } | 200 | } |
203 | r = drm_gem_handle_create(filp, gobj, &handle); | 201 | r = drm_gem_handle_create(filp, gobj, &handle); |
204 | if (r) { | 202 | if (r) { |
205 | mutex_lock(&dev->struct_mutex); | 203 | drm_gem_object_unreference_unlocked(gobj); |
206 | drm_gem_object_unreference(gobj); | ||
207 | mutex_unlock(&dev->struct_mutex); | ||
208 | return r; | 204 | return r; |
209 | } | 205 | } |
210 | mutex_lock(&dev->struct_mutex); | 206 | drm_gem_object_handle_unreference_unlocked(gobj); |
211 | drm_gem_object_handle_unreference(gobj); | ||
212 | mutex_unlock(&dev->struct_mutex); | ||
213 | args->handle = handle; | 207 | args->handle = handle; |
214 | return 0; | 208 | return 0; |
215 | } | 209 | } |
@@ -236,9 +230,7 @@ int radeon_gem_set_domain_ioctl(struct drm_device *dev, void *data, | |||
236 | 230 | ||
237 | r = radeon_gem_set_domain(gobj, args->read_domains, args->write_domain); | 231 | r = radeon_gem_set_domain(gobj, args->read_domains, args->write_domain); |
238 | 232 | ||
239 | mutex_lock(&dev->struct_mutex); | 233 | drm_gem_object_unreference_unlocked(gobj); |
240 | drm_gem_object_unreference(gobj); | ||
241 | mutex_unlock(&dev->struct_mutex); | ||
242 | return r; | 234 | return r; |
243 | } | 235 | } |
244 | 236 | ||
@@ -255,9 +247,7 @@ int radeon_gem_mmap_ioctl(struct drm_device *dev, void *data, | |||
255 | } | 247 | } |
256 | robj = gobj->driver_private; | 248 | robj = gobj->driver_private; |
257 | args->addr_ptr = radeon_bo_mmap_offset(robj); | 249 | args->addr_ptr = radeon_bo_mmap_offset(robj); |
258 | mutex_lock(&dev->struct_mutex); | 250 | drm_gem_object_unreference_unlocked(gobj); |
259 | drm_gem_object_unreference(gobj); | ||
260 | mutex_unlock(&dev->struct_mutex); | ||
261 | return 0; | 251 | return 0; |
262 | } | 252 | } |
263 | 253 | ||
@@ -288,9 +278,7 @@ int radeon_gem_busy_ioctl(struct drm_device *dev, void *data, | |||
288 | default: | 278 | default: |
289 | break; | 279 | break; |
290 | } | 280 | } |
291 | mutex_lock(&dev->struct_mutex); | 281 | drm_gem_object_unreference_unlocked(gobj); |
292 | drm_gem_object_unreference(gobj); | ||
293 | mutex_unlock(&dev->struct_mutex); | ||
294 | return r; | 282 | return r; |
295 | } | 283 | } |
296 | 284 | ||
@@ -311,9 +299,7 @@ int radeon_gem_wait_idle_ioctl(struct drm_device *dev, void *data, | |||
311 | /* callback hw specific functions if any */ | 299 | /* callback hw specific functions if any */ |
312 | if (robj->rdev->asic->ioctl_wait_idle) | 300 | if (robj->rdev->asic->ioctl_wait_idle) |
313 | robj->rdev->asic->ioctl_wait_idle(robj->rdev, robj); | 301 | robj->rdev->asic->ioctl_wait_idle(robj->rdev, robj); |
314 | mutex_lock(&dev->struct_mutex); | 302 | drm_gem_object_unreference_unlocked(gobj); |
315 | drm_gem_object_unreference(gobj); | ||
316 | mutex_unlock(&dev->struct_mutex); | ||
317 | return r; | 303 | return r; |
318 | } | 304 | } |
319 | 305 | ||
@@ -331,9 +317,7 @@ int radeon_gem_set_tiling_ioctl(struct drm_device *dev, void *data, | |||
331 | return -EINVAL; | 317 | return -EINVAL; |
332 | robj = gobj->driver_private; | 318 | robj = gobj->driver_private; |
333 | r = radeon_bo_set_tiling_flags(robj, args->tiling_flags, args->pitch); | 319 | r = radeon_bo_set_tiling_flags(robj, args->tiling_flags, args->pitch); |
334 | mutex_lock(&dev->struct_mutex); | 320 | drm_gem_object_unreference_unlocked(gobj); |
335 | drm_gem_object_unreference(gobj); | ||
336 | mutex_unlock(&dev->struct_mutex); | ||
337 | return r; | 321 | return r; |
338 | } | 322 | } |
339 | 323 | ||
@@ -356,8 +340,6 @@ int radeon_gem_get_tiling_ioctl(struct drm_device *dev, void *data, | |||
356 | radeon_bo_get_tiling_flags(rbo, &args->tiling_flags, &args->pitch); | 340 | radeon_bo_get_tiling_flags(rbo, &args->tiling_flags, &args->pitch); |
357 | radeon_bo_unreserve(rbo); | 341 | radeon_bo_unreserve(rbo); |
358 | out: | 342 | out: |
359 | mutex_lock(&dev->struct_mutex); | 343 | drm_gem_object_unreference_unlocked(gobj); |
360 | drm_gem_object_unreference(gobj); | ||
361 | mutex_unlock(&dev->struct_mutex); | ||
362 | return r; | 344 | return r; |
363 | } | 345 | } |
diff --git a/drivers/gpu/drm/radeon/radeon_ring.c b/drivers/gpu/drm/radeon/radeon_ring.c index 38fa14429320..e50513a62735 100644 --- a/drivers/gpu/drm/radeon/radeon_ring.c +++ b/drivers/gpu/drm/radeon/radeon_ring.c | |||
@@ -130,6 +130,8 @@ void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib **ib) | |||
130 | if (tmp == NULL) { | 130 | if (tmp == NULL) { |
131 | return; | 131 | return; |
132 | } | 132 | } |
133 | if (!tmp->fence->emited) | ||
134 | radeon_fence_unref(&tmp->fence); | ||
133 | mutex_lock(&rdev->ib_pool.mutex); | 135 | mutex_lock(&rdev->ib_pool.mutex); |
134 | tmp->free = true; | 136 | tmp->free = true; |
135 | mutex_unlock(&rdev->ib_pool.mutex); | 137 | mutex_unlock(&rdev->ib_pool.mutex); |
@@ -222,6 +224,7 @@ void radeon_ib_pool_fini(struct radeon_device *rdev) | |||
222 | } | 224 | } |
223 | mutex_lock(&rdev->ib_pool.mutex); | 225 | mutex_lock(&rdev->ib_pool.mutex); |
224 | radeon_ib_bogus_cleanup(rdev); | 226 | radeon_ib_bogus_cleanup(rdev); |
227 | |||
225 | if (rdev->ib_pool.robj) { | 228 | if (rdev->ib_pool.robj) { |
226 | r = radeon_bo_reserve(rdev->ib_pool.robj, false); | 229 | r = radeon_bo_reserve(rdev->ib_pool.robj, false); |
227 | if (likely(r == 0)) { | 230 | if (likely(r == 0)) { |
diff --git a/drivers/gpu/drm/radeon/radeon_state.c b/drivers/gpu/drm/radeon/radeon_state.c index 44b6d66b0ab3..32971b8272cf 100644 --- a/drivers/gpu/drm/radeon/radeon_state.c +++ b/drivers/gpu/drm/radeon/radeon_state.c | |||
@@ -1972,7 +1972,7 @@ static void radeon_apply_surface_regs(int surf_index, | |||
1972 | * Note that refcount can be at most 2, since during a free refcount=3 | 1972 | * Note that refcount can be at most 2, since during a free refcount=3 |
1973 | * might mean we have to allocate a new surface which might not always | 1973 | * might mean we have to allocate a new surface which might not always |
1974 | * be available. | 1974 | * be available. |
1975 | * For example : we allocate three contigous surfaces ABC. If B is | 1975 | * For example : we allocate three contiguous surfaces ABC. If B is |
1976 | * freed, we suddenly need two surfaces to store A and C, which might | 1976 | * freed, we suddenly need two surfaces to store A and C, which might |
1977 | * not always be available. | 1977 | * not always be available. |
1978 | */ | 1978 | */ |
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c index 1157e0f758fa..43c5ab34b634 100644 --- a/drivers/gpu/drm/radeon/radeon_ttm.c +++ b/drivers/gpu/drm/radeon/radeon_ttm.c | |||
@@ -409,7 +409,7 @@ static int radeon_bo_move(struct ttm_buffer_object *bo, | |||
409 | new_mem->mem_type == TTM_PL_SYSTEM) || | 409 | new_mem->mem_type == TTM_PL_SYSTEM) || |
410 | (old_mem->mem_type == TTM_PL_SYSTEM && | 410 | (old_mem->mem_type == TTM_PL_SYSTEM && |
411 | new_mem->mem_type == TTM_PL_TT)) { | 411 | new_mem->mem_type == TTM_PL_TT)) { |
412 | /* bind is enought */ | 412 | /* bind is enough */ |
413 | radeon_move_null(bo, new_mem); | 413 | radeon_move_null(bo, new_mem); |
414 | return 0; | 414 | return 0; |
415 | } | 415 | } |
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c index 1a3e909b7bba..c7320ce4567d 100644 --- a/drivers/gpu/drm/ttm/ttm_bo.c +++ b/drivers/gpu/drm/ttm/ttm_bo.c | |||
@@ -1020,6 +1020,12 @@ static int ttm_bo_mem_compat(struct ttm_placement *placement, | |||
1020 | struct ttm_mem_reg *mem) | 1020 | struct ttm_mem_reg *mem) |
1021 | { | 1021 | { |
1022 | int i; | 1022 | int i; |
1023 | struct drm_mm_node *node = mem->mm_node; | ||
1024 | |||
1025 | if (node && placement->lpfn != 0 && | ||
1026 | (node->start < placement->fpfn || | ||
1027 | node->start + node->size > placement->lpfn)) | ||
1028 | return -1; | ||
1023 | 1029 | ||
1024 | for (i = 0; i < placement->num_placement; i++) { | 1030 | for (i = 0; i < placement->num_placement; i++) { |
1025 | if ((placement->placement[i] & mem->placement & | 1031 | if ((placement->placement[i] & mem->placement & |
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c index 3f72fe1ddf8b..5ca37a58a98c 100644 --- a/drivers/gpu/drm/ttm/ttm_bo_util.c +++ b/drivers/gpu/drm/ttm/ttm_bo_util.c | |||
@@ -424,7 +424,7 @@ static int ttm_bo_kmap_ttm(struct ttm_buffer_object *bo, | |||
424 | 424 | ||
425 | /* | 425 | /* |
426 | * We need to use vmap to get the desired page protection | 426 | * We need to use vmap to get the desired page protection |
427 | * or to make the buffer object look contigous. | 427 | * or to make the buffer object look contiguous. |
428 | */ | 428 | */ |
429 | prot = (mem->placement & TTM_PL_FLAG_CACHED) ? | 429 | prot = (mem->placement & TTM_PL_FLAG_CACHED) ? |
430 | PAGE_KERNEL : | 430 | PAGE_KERNEL : |
diff --git a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c index e2123af7775a..a759170763bb 100644 --- a/drivers/gpu/drm/ttm/ttm_tt.c +++ b/drivers/gpu/drm/ttm/ttm_tt.c | |||
@@ -196,14 +196,15 @@ EXPORT_SYMBOL(ttm_tt_populate); | |||
196 | 196 | ||
197 | #ifdef CONFIG_X86 | 197 | #ifdef CONFIG_X86 |
198 | static inline int ttm_tt_set_page_caching(struct page *p, | 198 | static inline int ttm_tt_set_page_caching(struct page *p, |
199 | enum ttm_caching_state c_state) | 199 | enum ttm_caching_state c_old, |
200 | enum ttm_caching_state c_new) | ||
200 | { | 201 | { |
201 | int ret = 0; | 202 | int ret = 0; |
202 | 203 | ||
203 | if (PageHighMem(p)) | 204 | if (PageHighMem(p)) |
204 | return 0; | 205 | return 0; |
205 | 206 | ||
206 | if (get_page_memtype(p) != -1) { | 207 | if (c_old != tt_cached) { |
207 | /* p isn't in the default caching state, set it to | 208 | /* p isn't in the default caching state, set it to |
208 | * writeback first to free its current memtype. */ | 209 | * writeback first to free its current memtype. */ |
209 | 210 | ||
@@ -212,16 +213,17 @@ static inline int ttm_tt_set_page_caching(struct page *p, | |||
212 | return ret; | 213 | return ret; |
213 | } | 214 | } |
214 | 215 | ||
215 | if (c_state == tt_wc) | 216 | if (c_new == tt_wc) |
216 | ret = set_memory_wc((unsigned long) page_address(p), 1); | 217 | ret = set_memory_wc((unsigned long) page_address(p), 1); |
217 | else if (c_state == tt_uncached) | 218 | else if (c_new == tt_uncached) |
218 | ret = set_pages_uc(p, 1); | 219 | ret = set_pages_uc(p, 1); |
219 | 220 | ||
220 | return ret; | 221 | return ret; |
221 | } | 222 | } |
222 | #else /* CONFIG_X86 */ | 223 | #else /* CONFIG_X86 */ |
223 | static inline int ttm_tt_set_page_caching(struct page *p, | 224 | static inline int ttm_tt_set_page_caching(struct page *p, |
224 | enum ttm_caching_state c_state) | 225 | enum ttm_caching_state c_old, |
226 | enum ttm_caching_state c_new) | ||
225 | { | 227 | { |
226 | return 0; | 228 | return 0; |
227 | } | 229 | } |
@@ -254,7 +256,9 @@ static int ttm_tt_set_caching(struct ttm_tt *ttm, | |||
254 | for (i = 0; i < ttm->num_pages; ++i) { | 256 | for (i = 0; i < ttm->num_pages; ++i) { |
255 | cur_page = ttm->pages[i]; | 257 | cur_page = ttm->pages[i]; |
256 | if (likely(cur_page != NULL)) { | 258 | if (likely(cur_page != NULL)) { |
257 | ret = ttm_tt_set_page_caching(cur_page, c_state); | 259 | ret = ttm_tt_set_page_caching(cur_page, |
260 | ttm->caching_state, | ||
261 | c_state); | ||
258 | if (unlikely(ret != 0)) | 262 | if (unlikely(ret != 0)) |
259 | goto out_err; | 263 | goto out_err; |
260 | } | 264 | } |
@@ -268,7 +272,7 @@ out_err: | |||
268 | for (j = 0; j < i; ++j) { | 272 | for (j = 0; j < i; ++j) { |
269 | cur_page = ttm->pages[j]; | 273 | cur_page = ttm->pages[j]; |
270 | if (likely(cur_page != NULL)) { | 274 | if (likely(cur_page != NULL)) { |
271 | (void)ttm_tt_set_page_caching(cur_page, | 275 | (void)ttm_tt_set_page_caching(cur_page, c_state, |
272 | ttm->caching_state); | 276 | ttm->caching_state); |
273 | } | 277 | } |
274 | } | 278 | } |
@@ -476,7 +480,7 @@ static int ttm_tt_swapin(struct ttm_tt *ttm) | |||
476 | void *from_virtual; | 480 | void *from_virtual; |
477 | void *to_virtual; | 481 | void *to_virtual; |
478 | int i; | 482 | int i; |
479 | int ret; | 483 | int ret = -ENOMEM; |
480 | 484 | ||
481 | if (ttm->page_flags & TTM_PAGE_FLAG_USER) { | 485 | if (ttm->page_flags & TTM_PAGE_FLAG_USER) { |
482 | ret = ttm_tt_set_user(ttm, ttm->tsk, ttm->start, | 486 | ret = ttm_tt_set_user(ttm, ttm->tsk, ttm->start, |
@@ -495,8 +499,10 @@ static int ttm_tt_swapin(struct ttm_tt *ttm) | |||
495 | 499 | ||
496 | for (i = 0; i < ttm->num_pages; ++i) { | 500 | for (i = 0; i < ttm->num_pages; ++i) { |
497 | from_page = read_mapping_page(swap_space, i, NULL); | 501 | from_page = read_mapping_page(swap_space, i, NULL); |
498 | if (IS_ERR(from_page)) | 502 | if (IS_ERR(from_page)) { |
503 | ret = PTR_ERR(from_page); | ||
499 | goto out_err; | 504 | goto out_err; |
505 | } | ||
500 | to_page = __ttm_tt_get_page(ttm, i); | 506 | to_page = __ttm_tt_get_page(ttm, i); |
501 | if (unlikely(to_page == NULL)) | 507 | if (unlikely(to_page == NULL)) |
502 | goto out_err; | 508 | goto out_err; |
@@ -519,7 +525,7 @@ static int ttm_tt_swapin(struct ttm_tt *ttm) | |||
519 | return 0; | 525 | return 0; |
520 | out_err: | 526 | out_err: |
521 | ttm_tt_free_alloced_pages(ttm); | 527 | ttm_tt_free_alloced_pages(ttm); |
522 | return -ENOMEM; | 528 | return ret; |
523 | } | 529 | } |
524 | 530 | ||
525 | int ttm_tt_swapout(struct ttm_tt *ttm, struct file *persistant_swap_storage) | 531 | int ttm_tt_swapout(struct ttm_tt *ttm, struct file *persistant_swap_storage) |
@@ -531,6 +537,7 @@ int ttm_tt_swapout(struct ttm_tt *ttm, struct file *persistant_swap_storage) | |||
531 | void *from_virtual; | 537 | void *from_virtual; |
532 | void *to_virtual; | 538 | void *to_virtual; |
533 | int i; | 539 | int i; |
540 | int ret = -ENOMEM; | ||
534 | 541 | ||
535 | BUG_ON(ttm->state != tt_unbound && ttm->state != tt_unpopulated); | 542 | BUG_ON(ttm->state != tt_unbound && ttm->state != tt_unpopulated); |
536 | BUG_ON(ttm->caching_state != tt_cached); | 543 | BUG_ON(ttm->caching_state != tt_cached); |
@@ -553,7 +560,7 @@ int ttm_tt_swapout(struct ttm_tt *ttm, struct file *persistant_swap_storage) | |||
553 | 0); | 560 | 0); |
554 | if (unlikely(IS_ERR(swap_storage))) { | 561 | if (unlikely(IS_ERR(swap_storage))) { |
555 | printk(KERN_ERR "Failed allocating swap storage.\n"); | 562 | printk(KERN_ERR "Failed allocating swap storage.\n"); |
556 | return -ENOMEM; | 563 | return PTR_ERR(swap_storage); |
557 | } | 564 | } |
558 | } else | 565 | } else |
559 | swap_storage = persistant_swap_storage; | 566 | swap_storage = persistant_swap_storage; |
@@ -565,9 +572,10 @@ int ttm_tt_swapout(struct ttm_tt *ttm, struct file *persistant_swap_storage) | |||
565 | if (unlikely(from_page == NULL)) | 572 | if (unlikely(from_page == NULL)) |
566 | continue; | 573 | continue; |
567 | to_page = read_mapping_page(swap_space, i, NULL); | 574 | to_page = read_mapping_page(swap_space, i, NULL); |
568 | if (unlikely(to_page == NULL)) | 575 | if (unlikely(IS_ERR(to_page))) { |
576 | ret = PTR_ERR(to_page); | ||
569 | goto out_err; | 577 | goto out_err; |
570 | 578 | } | |
571 | preempt_disable(); | 579 | preempt_disable(); |
572 | from_virtual = kmap_atomic(from_page, KM_USER0); | 580 | from_virtual = kmap_atomic(from_page, KM_USER0); |
573 | to_virtual = kmap_atomic(to_page, KM_USER1); | 581 | to_virtual = kmap_atomic(to_page, KM_USER1); |
@@ -591,5 +599,5 @@ out_err: | |||
591 | if (!persistant_swap_storage) | 599 | if (!persistant_swap_storage) |
592 | fput(swap_storage); | 600 | fput(swap_storage); |
593 | 601 | ||
594 | return -ENOMEM; | 602 | return ret; |
595 | } | 603 | } |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c index a6e8f687fa64..0c9c0811f42d 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c | |||
@@ -348,22 +348,19 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset) | |||
348 | */ | 348 | */ |
349 | 349 | ||
350 | DRM_INFO("It appears like vesafb is loaded. " | 350 | DRM_INFO("It appears like vesafb is loaded. " |
351 | "Ignore above error if any. Entering stealth mode.\n"); | 351 | "Ignore above error if any.\n"); |
352 | ret = pci_request_region(dev->pdev, 2, "vmwgfx stealth probe"); | 352 | ret = pci_request_region(dev->pdev, 2, "vmwgfx stealth probe"); |
353 | if (unlikely(ret != 0)) { | 353 | if (unlikely(ret != 0)) { |
354 | DRM_ERROR("Failed reserving the SVGA MMIO resource.\n"); | 354 | DRM_ERROR("Failed reserving the SVGA MMIO resource.\n"); |
355 | goto out_no_device; | 355 | goto out_no_device; |
356 | } | 356 | } |
357 | vmw_kms_init(dev_priv); | ||
358 | vmw_overlay_init(dev_priv); | ||
359 | } else { | ||
360 | ret = vmw_request_device(dev_priv); | ||
361 | if (unlikely(ret != 0)) | ||
362 | goto out_no_device; | ||
363 | vmw_kms_init(dev_priv); | ||
364 | vmw_overlay_init(dev_priv); | ||
365 | vmw_fb_init(dev_priv); | ||
366 | } | 357 | } |
358 | ret = vmw_request_device(dev_priv); | ||
359 | if (unlikely(ret != 0)) | ||
360 | goto out_no_device; | ||
361 | vmw_kms_init(dev_priv); | ||
362 | vmw_overlay_init(dev_priv); | ||
363 | vmw_fb_init(dev_priv); | ||
367 | 364 | ||
368 | dev_priv->pm_nb.notifier_call = vmwgfx_pm_notifier; | 365 | dev_priv->pm_nb.notifier_call = vmwgfx_pm_notifier; |
369 | register_pm_notifier(&dev_priv->pm_nb); | 366 | register_pm_notifier(&dev_priv->pm_nb); |
@@ -406,17 +403,15 @@ static int vmw_driver_unload(struct drm_device *dev) | |||
406 | 403 | ||
407 | unregister_pm_notifier(&dev_priv->pm_nb); | 404 | unregister_pm_notifier(&dev_priv->pm_nb); |
408 | 405 | ||
409 | if (!dev_priv->stealth) { | 406 | vmw_fb_close(dev_priv); |
410 | vmw_fb_close(dev_priv); | 407 | vmw_kms_close(dev_priv); |
411 | vmw_kms_close(dev_priv); | 408 | vmw_overlay_close(dev_priv); |
412 | vmw_overlay_close(dev_priv); | 409 | vmw_release_device(dev_priv); |
413 | vmw_release_device(dev_priv); | 410 | if (dev_priv->stealth) |
414 | pci_release_regions(dev->pdev); | ||
415 | } else { | ||
416 | vmw_kms_close(dev_priv); | ||
417 | vmw_overlay_close(dev_priv); | ||
418 | pci_release_region(dev->pdev, 2); | 411 | pci_release_region(dev->pdev, 2); |
419 | } | 412 | else |
413 | pci_release_regions(dev->pdev); | ||
414 | |||
420 | if (dev_priv->capabilities & SVGA_CAP_IRQMASK) | 415 | if (dev_priv->capabilities & SVGA_CAP_IRQMASK) |
421 | drm_irq_uninstall(dev_priv->dev); | 416 | drm_irq_uninstall(dev_priv->dev); |
422 | if (dev->devname == vmw_devname) | 417 | if (dev->devname == vmw_devname) |
@@ -585,11 +580,6 @@ static int vmw_master_set(struct drm_device *dev, | |||
585 | int ret = 0; | 580 | int ret = 0; |
586 | 581 | ||
587 | DRM_INFO("Master set.\n"); | 582 | DRM_INFO("Master set.\n"); |
588 | if (dev_priv->stealth) { | ||
589 | ret = vmw_request_device(dev_priv); | ||
590 | if (unlikely(ret != 0)) | ||
591 | return ret; | ||
592 | } | ||
593 | 583 | ||
594 | if (active) { | 584 | if (active) { |
595 | BUG_ON(active != &dev_priv->fbdev_master); | 585 | BUG_ON(active != &dev_priv->fbdev_master); |
@@ -649,18 +639,11 @@ static void vmw_master_drop(struct drm_device *dev, | |||
649 | 639 | ||
650 | ttm_lock_set_kill(&vmaster->lock, true, SIGTERM); | 640 | ttm_lock_set_kill(&vmaster->lock, true, SIGTERM); |
651 | 641 | ||
652 | if (dev_priv->stealth) { | ||
653 | ret = ttm_bo_evict_mm(&dev_priv->bdev, TTM_PL_VRAM); | ||
654 | if (unlikely(ret != 0)) | ||
655 | DRM_ERROR("Unable to clean VRAM on master drop.\n"); | ||
656 | vmw_release_device(dev_priv); | ||
657 | } | ||
658 | dev_priv->active_master = &dev_priv->fbdev_master; | 642 | dev_priv->active_master = &dev_priv->fbdev_master; |
659 | ttm_lock_set_kill(&dev_priv->fbdev_master.lock, false, SIGTERM); | 643 | ttm_lock_set_kill(&dev_priv->fbdev_master.lock, false, SIGTERM); |
660 | ttm_vt_unlock(&dev_priv->fbdev_master.lock); | 644 | ttm_vt_unlock(&dev_priv->fbdev_master.lock); |
661 | 645 | ||
662 | if (!dev_priv->stealth) | 646 | vmw_fb_on(dev_priv); |
663 | vmw_fb_on(dev_priv); | ||
664 | } | 647 | } |
665 | 648 | ||
666 | 649 | ||
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h index 135be9688c90..356dc935ec13 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h | |||
@@ -39,10 +39,10 @@ | |||
39 | #include "ttm/ttm_execbuf_util.h" | 39 | #include "ttm/ttm_execbuf_util.h" |
40 | #include "ttm/ttm_module.h" | 40 | #include "ttm/ttm_module.h" |
41 | 41 | ||
42 | #define VMWGFX_DRIVER_DATE "20090724" | 42 | #define VMWGFX_DRIVER_DATE "20100209" |
43 | #define VMWGFX_DRIVER_MAJOR 0 | 43 | #define VMWGFX_DRIVER_MAJOR 1 |
44 | #define VMWGFX_DRIVER_MINOR 1 | 44 | #define VMWGFX_DRIVER_MINOR 0 |
45 | #define VMWGFX_DRIVER_PATCHLEVEL 2 | 45 | #define VMWGFX_DRIVER_PATCHLEVEL 0 |
46 | #define VMWGFX_FILE_PAGE_OFFSET 0x00100000 | 46 | #define VMWGFX_FILE_PAGE_OFFSET 0x00100000 |
47 | #define VMWGFX_FIFO_STATIC_SIZE (1024*1024) | 47 | #define VMWGFX_FIFO_STATIC_SIZE (1024*1024) |
48 | #define VMWGFX_MAX_RELOCATIONS 2048 | 48 | #define VMWGFX_MAX_RELOCATIONS 2048 |
@@ -113,6 +113,7 @@ struct vmw_fifo_state { | |||
113 | unsigned long static_buffer_size; | 113 | unsigned long static_buffer_size; |
114 | bool using_bounce_buffer; | 114 | bool using_bounce_buffer; |
115 | uint32_t capabilities; | 115 | uint32_t capabilities; |
116 | struct mutex fifo_mutex; | ||
116 | struct rw_semaphore rwsem; | 117 | struct rw_semaphore rwsem; |
117 | }; | 118 | }; |
118 | 119 | ||
@@ -213,7 +214,7 @@ struct vmw_private { | |||
213 | * Fencing and IRQs. | 214 | * Fencing and IRQs. |
214 | */ | 215 | */ |
215 | 216 | ||
216 | uint32_t fence_seq; | 217 | atomic_t fence_seq; |
217 | wait_queue_head_t fence_queue; | 218 | wait_queue_head_t fence_queue; |
218 | wait_queue_head_t fifo_queue; | 219 | wait_queue_head_t fifo_queue; |
219 | atomic_t fence_queue_waiters; | 220 | atomic_t fence_queue_waiters; |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c index d69caf92ffe7..0897359b3e4e 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c | |||
@@ -182,25 +182,19 @@ static int vmw_cmd_present_check(struct vmw_private *dev_priv, | |||
182 | return vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.sid); | 182 | return vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.sid); |
183 | } | 183 | } |
184 | 184 | ||
185 | static int vmw_cmd_dma(struct vmw_private *dev_priv, | 185 | static int vmw_translate_guest_ptr(struct vmw_private *dev_priv, |
186 | struct vmw_sw_context *sw_context, | 186 | struct vmw_sw_context *sw_context, |
187 | SVGA3dCmdHeader *header) | 187 | SVGAGuestPtr *ptr, |
188 | struct vmw_dma_buffer **vmw_bo_p) | ||
188 | { | 189 | { |
189 | uint32_t handle; | ||
190 | struct vmw_dma_buffer *vmw_bo = NULL; | 190 | struct vmw_dma_buffer *vmw_bo = NULL; |
191 | struct ttm_buffer_object *bo; | 191 | struct ttm_buffer_object *bo; |
192 | struct vmw_surface *srf = NULL; | 192 | uint32_t handle = ptr->gmrId; |
193 | struct vmw_dma_cmd { | ||
194 | SVGA3dCmdHeader header; | ||
195 | SVGA3dCmdSurfaceDMA dma; | ||
196 | } *cmd; | ||
197 | struct vmw_relocation *reloc; | 193 | struct vmw_relocation *reloc; |
198 | int ret; | ||
199 | uint32_t cur_validate_node; | 194 | uint32_t cur_validate_node; |
200 | struct ttm_validate_buffer *val_buf; | 195 | struct ttm_validate_buffer *val_buf; |
196 | int ret; | ||
201 | 197 | ||
202 | cmd = container_of(header, struct vmw_dma_cmd, header); | ||
203 | handle = cmd->dma.guest.ptr.gmrId; | ||
204 | ret = vmw_user_dmabuf_lookup(sw_context->tfile, handle, &vmw_bo); | 198 | ret = vmw_user_dmabuf_lookup(sw_context->tfile, handle, &vmw_bo); |
205 | if (unlikely(ret != 0)) { | 199 | if (unlikely(ret != 0)) { |
206 | DRM_ERROR("Could not find or use GMR region.\n"); | 200 | DRM_ERROR("Could not find or use GMR region.\n"); |
@@ -209,14 +203,14 @@ static int vmw_cmd_dma(struct vmw_private *dev_priv, | |||
209 | bo = &vmw_bo->base; | 203 | bo = &vmw_bo->base; |
210 | 204 | ||
211 | if (unlikely(sw_context->cur_reloc >= VMWGFX_MAX_RELOCATIONS)) { | 205 | if (unlikely(sw_context->cur_reloc >= VMWGFX_MAX_RELOCATIONS)) { |
212 | DRM_ERROR("Max number of DMA commands per submission" | 206 | DRM_ERROR("Max number relocations per submission" |
213 | " exceeded\n"); | 207 | " exceeded\n"); |
214 | ret = -EINVAL; | 208 | ret = -EINVAL; |
215 | goto out_no_reloc; | 209 | goto out_no_reloc; |
216 | } | 210 | } |
217 | 211 | ||
218 | reloc = &sw_context->relocs[sw_context->cur_reloc++]; | 212 | reloc = &sw_context->relocs[sw_context->cur_reloc++]; |
219 | reloc->location = &cmd->dma.guest.ptr; | 213 | reloc->location = ptr; |
220 | 214 | ||
221 | cur_validate_node = vmw_dmabuf_validate_node(bo, sw_context->cur_val_buf); | 215 | cur_validate_node = vmw_dmabuf_validate_node(bo, sw_context->cur_val_buf); |
222 | if (unlikely(cur_validate_node >= VMWGFX_MAX_GMRS)) { | 216 | if (unlikely(cur_validate_node >= VMWGFX_MAX_GMRS)) { |
@@ -234,7 +228,89 @@ static int vmw_cmd_dma(struct vmw_private *dev_priv, | |||
234 | list_add_tail(&val_buf->head, &sw_context->validate_nodes); | 228 | list_add_tail(&val_buf->head, &sw_context->validate_nodes); |
235 | ++sw_context->cur_val_buf; | 229 | ++sw_context->cur_val_buf; |
236 | } | 230 | } |
231 | *vmw_bo_p = vmw_bo; | ||
232 | return 0; | ||
233 | |||
234 | out_no_reloc: | ||
235 | vmw_dmabuf_unreference(&vmw_bo); | ||
236 | vmw_bo_p = NULL; | ||
237 | return ret; | ||
238 | } | ||
239 | |||
240 | static int vmw_cmd_end_query(struct vmw_private *dev_priv, | ||
241 | struct vmw_sw_context *sw_context, | ||
242 | SVGA3dCmdHeader *header) | ||
243 | { | ||
244 | struct vmw_dma_buffer *vmw_bo; | ||
245 | struct vmw_query_cmd { | ||
246 | SVGA3dCmdHeader header; | ||
247 | SVGA3dCmdEndQuery q; | ||
248 | } *cmd; | ||
249 | int ret; | ||
250 | |||
251 | cmd = container_of(header, struct vmw_query_cmd, header); | ||
252 | ret = vmw_cmd_cid_check(dev_priv, sw_context, header); | ||
253 | if (unlikely(ret != 0)) | ||
254 | return ret; | ||
255 | |||
256 | ret = vmw_translate_guest_ptr(dev_priv, sw_context, | ||
257 | &cmd->q.guestResult, | ||
258 | &vmw_bo); | ||
259 | if (unlikely(ret != 0)) | ||
260 | return ret; | ||
261 | |||
262 | vmw_dmabuf_unreference(&vmw_bo); | ||
263 | return 0; | ||
264 | } | ||
237 | 265 | ||
266 | static int vmw_cmd_wait_query(struct vmw_private *dev_priv, | ||
267 | struct vmw_sw_context *sw_context, | ||
268 | SVGA3dCmdHeader *header) | ||
269 | { | ||
270 | struct vmw_dma_buffer *vmw_bo; | ||
271 | struct vmw_query_cmd { | ||
272 | SVGA3dCmdHeader header; | ||
273 | SVGA3dCmdWaitForQuery q; | ||
274 | } *cmd; | ||
275 | int ret; | ||
276 | |||
277 | cmd = container_of(header, struct vmw_query_cmd, header); | ||
278 | ret = vmw_cmd_cid_check(dev_priv, sw_context, header); | ||
279 | if (unlikely(ret != 0)) | ||
280 | return ret; | ||
281 | |||
282 | ret = vmw_translate_guest_ptr(dev_priv, sw_context, | ||
283 | &cmd->q.guestResult, | ||
284 | &vmw_bo); | ||
285 | if (unlikely(ret != 0)) | ||
286 | return ret; | ||
287 | |||
288 | vmw_dmabuf_unreference(&vmw_bo); | ||
289 | return 0; | ||
290 | } | ||
291 | |||
292 | |||
293 | static int vmw_cmd_dma(struct vmw_private *dev_priv, | ||
294 | struct vmw_sw_context *sw_context, | ||
295 | SVGA3dCmdHeader *header) | ||
296 | { | ||
297 | struct vmw_dma_buffer *vmw_bo = NULL; | ||
298 | struct ttm_buffer_object *bo; | ||
299 | struct vmw_surface *srf = NULL; | ||
300 | struct vmw_dma_cmd { | ||
301 | SVGA3dCmdHeader header; | ||
302 | SVGA3dCmdSurfaceDMA dma; | ||
303 | } *cmd; | ||
304 | int ret; | ||
305 | |||
306 | cmd = container_of(header, struct vmw_dma_cmd, header); | ||
307 | ret = vmw_translate_guest_ptr(dev_priv, sw_context, | ||
308 | &cmd->dma.guest.ptr, | ||
309 | &vmw_bo); | ||
310 | if (unlikely(ret != 0)) | ||
311 | return ret; | ||
312 | |||
313 | bo = &vmw_bo->base; | ||
238 | ret = vmw_user_surface_lookup_handle(dev_priv, sw_context->tfile, | 314 | ret = vmw_user_surface_lookup_handle(dev_priv, sw_context->tfile, |
239 | cmd->dma.host.sid, &srf); | 315 | cmd->dma.host.sid, &srf); |
240 | if (ret) { | 316 | if (ret) { |
@@ -379,8 +455,8 @@ static vmw_cmd_func vmw_cmd_funcs[SVGA_3D_CMD_MAX] = { | |||
379 | VMW_CMD_DEF(SVGA_3D_CMD_DRAW_PRIMITIVES, &vmw_cmd_draw), | 455 | VMW_CMD_DEF(SVGA_3D_CMD_DRAW_PRIMITIVES, &vmw_cmd_draw), |
380 | VMW_CMD_DEF(SVGA_3D_CMD_SETSCISSORRECT, &vmw_cmd_cid_check), | 456 | VMW_CMD_DEF(SVGA_3D_CMD_SETSCISSORRECT, &vmw_cmd_cid_check), |
381 | VMW_CMD_DEF(SVGA_3D_CMD_BEGIN_QUERY, &vmw_cmd_cid_check), | 457 | VMW_CMD_DEF(SVGA_3D_CMD_BEGIN_QUERY, &vmw_cmd_cid_check), |
382 | VMW_CMD_DEF(SVGA_3D_CMD_END_QUERY, &vmw_cmd_cid_check), | 458 | VMW_CMD_DEF(SVGA_3D_CMD_END_QUERY, &vmw_cmd_end_query), |
383 | VMW_CMD_DEF(SVGA_3D_CMD_WAIT_FOR_QUERY, &vmw_cmd_cid_check), | 459 | VMW_CMD_DEF(SVGA_3D_CMD_WAIT_FOR_QUERY, &vmw_cmd_wait_query), |
384 | VMW_CMD_DEF(SVGA_3D_CMD_PRESENT_READBACK, &vmw_cmd_ok), | 460 | VMW_CMD_DEF(SVGA_3D_CMD_PRESENT_READBACK, &vmw_cmd_ok), |
385 | VMW_CMD_DEF(SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN, | 461 | VMW_CMD_DEF(SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN, |
386 | &vmw_cmd_blt_surf_screen_check) | 462 | &vmw_cmd_blt_surf_screen_check) |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c index 4f4f6432be8b..a93367041cdc 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c | |||
@@ -559,6 +559,9 @@ int vmw_fb_init(struct vmw_private *vmw_priv) | |||
559 | info->pixmap.scan_align = 1; | 559 | info->pixmap.scan_align = 1; |
560 | #endif | 560 | #endif |
561 | 561 | ||
562 | info->aperture_base = vmw_priv->vram_start; | ||
563 | info->aperture_size = vmw_priv->vram_size; | ||
564 | |||
562 | /* | 565 | /* |
563 | * Dirty & Deferred IO | 566 | * Dirty & Deferred IO |
564 | */ | 567 | */ |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c index 4157547cc6e4..39d43a01d846 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c | |||
@@ -74,6 +74,7 @@ int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo) | |||
74 | fifo->reserved_size = 0; | 74 | fifo->reserved_size = 0; |
75 | fifo->using_bounce_buffer = false; | 75 | fifo->using_bounce_buffer = false; |
76 | 76 | ||
77 | mutex_init(&fifo->fifo_mutex); | ||
77 | init_rwsem(&fifo->rwsem); | 78 | init_rwsem(&fifo->rwsem); |
78 | 79 | ||
79 | /* | 80 | /* |
@@ -117,7 +118,7 @@ int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo) | |||
117 | (unsigned int) min, | 118 | (unsigned int) min, |
118 | (unsigned int) fifo->capabilities); | 119 | (unsigned int) fifo->capabilities); |
119 | 120 | ||
120 | dev_priv->fence_seq = dev_priv->last_read_sequence; | 121 | atomic_set(&dev_priv->fence_seq, dev_priv->last_read_sequence); |
121 | iowrite32(dev_priv->last_read_sequence, fifo_mem + SVGA_FIFO_FENCE); | 122 | iowrite32(dev_priv->last_read_sequence, fifo_mem + SVGA_FIFO_FENCE); |
122 | 123 | ||
123 | return vmw_fifo_send_fence(dev_priv, &dummy); | 124 | return vmw_fifo_send_fence(dev_priv, &dummy); |
@@ -283,7 +284,7 @@ void *vmw_fifo_reserve(struct vmw_private *dev_priv, uint32_t bytes) | |||
283 | uint32_t reserveable = fifo_state->capabilities & SVGA_FIFO_CAP_RESERVE; | 284 | uint32_t reserveable = fifo_state->capabilities & SVGA_FIFO_CAP_RESERVE; |
284 | int ret; | 285 | int ret; |
285 | 286 | ||
286 | down_write(&fifo_state->rwsem); | 287 | mutex_lock(&fifo_state->fifo_mutex); |
287 | max = ioread32(fifo_mem + SVGA_FIFO_MAX); | 288 | max = ioread32(fifo_mem + SVGA_FIFO_MAX); |
288 | min = ioread32(fifo_mem + SVGA_FIFO_MIN); | 289 | min = ioread32(fifo_mem + SVGA_FIFO_MIN); |
289 | next_cmd = ioread32(fifo_mem + SVGA_FIFO_NEXT_CMD); | 290 | next_cmd = ioread32(fifo_mem + SVGA_FIFO_NEXT_CMD); |
@@ -351,7 +352,7 @@ void *vmw_fifo_reserve(struct vmw_private *dev_priv, uint32_t bytes) | |||
351 | } | 352 | } |
352 | out_err: | 353 | out_err: |
353 | fifo_state->reserved_size = 0; | 354 | fifo_state->reserved_size = 0; |
354 | up_write(&fifo_state->rwsem); | 355 | mutex_unlock(&fifo_state->fifo_mutex); |
355 | return NULL; | 356 | return NULL; |
356 | } | 357 | } |
357 | 358 | ||
@@ -426,6 +427,7 @@ void vmw_fifo_commit(struct vmw_private *dev_priv, uint32_t bytes) | |||
426 | 427 | ||
427 | } | 428 | } |
428 | 429 | ||
430 | down_write(&fifo_state->rwsem); | ||
429 | if (fifo_state->using_bounce_buffer || reserveable) { | 431 | if (fifo_state->using_bounce_buffer || reserveable) { |
430 | next_cmd += bytes; | 432 | next_cmd += bytes; |
431 | if (next_cmd >= max) | 433 | if (next_cmd >= max) |
@@ -437,8 +439,9 @@ void vmw_fifo_commit(struct vmw_private *dev_priv, uint32_t bytes) | |||
437 | if (reserveable) | 439 | if (reserveable) |
438 | iowrite32(0, fifo_mem + SVGA_FIFO_RESERVED); | 440 | iowrite32(0, fifo_mem + SVGA_FIFO_RESERVED); |
439 | mb(); | 441 | mb(); |
440 | vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC); | ||
441 | up_write(&fifo_state->rwsem); | 442 | up_write(&fifo_state->rwsem); |
443 | vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC); | ||
444 | mutex_unlock(&fifo_state->fifo_mutex); | ||
442 | } | 445 | } |
443 | 446 | ||
444 | int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *sequence) | 447 | int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *sequence) |
@@ -451,9 +454,7 @@ int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *sequence) | |||
451 | 454 | ||
452 | fm = vmw_fifo_reserve(dev_priv, bytes); | 455 | fm = vmw_fifo_reserve(dev_priv, bytes); |
453 | if (unlikely(fm == NULL)) { | 456 | if (unlikely(fm == NULL)) { |
454 | down_write(&fifo_state->rwsem); | 457 | *sequence = atomic_read(&dev_priv->fence_seq); |
455 | *sequence = dev_priv->fence_seq; | ||
456 | up_write(&fifo_state->rwsem); | ||
457 | ret = -ENOMEM; | 458 | ret = -ENOMEM; |
458 | (void)vmw_fallback_wait(dev_priv, false, true, *sequence, | 459 | (void)vmw_fallback_wait(dev_priv, false, true, *sequence, |
459 | false, 3*HZ); | 460 | false, 3*HZ); |
@@ -461,7 +462,7 @@ int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *sequence) | |||
461 | } | 462 | } |
462 | 463 | ||
463 | do { | 464 | do { |
464 | *sequence = dev_priv->fence_seq++; | 465 | *sequence = atomic_add_return(1, &dev_priv->fence_seq); |
465 | } while (*sequence == 0); | 466 | } while (*sequence == 0); |
466 | 467 | ||
467 | if (!(fifo_state->capabilities & SVGA_FIFO_CAP_FENCE)) { | 468 | if (!(fifo_state->capabilities & SVGA_FIFO_CAP_FENCE)) { |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c index 778851f9f1d6..1c7a316454d8 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c | |||
@@ -48,6 +48,12 @@ int vmw_getparam_ioctl(struct drm_device *dev, void *data, | |||
48 | case DRM_VMW_PARAM_FIFO_OFFSET: | 48 | case DRM_VMW_PARAM_FIFO_OFFSET: |
49 | param->value = dev_priv->mmio_start; | 49 | param->value = dev_priv->mmio_start; |
50 | break; | 50 | break; |
51 | case DRM_VMW_PARAM_HW_CAPS: | ||
52 | param->value = dev_priv->capabilities; | ||
53 | break; | ||
54 | case DRM_VMW_PARAM_FIFO_CAPS: | ||
55 | param->value = dev_priv->fifo.capabilities; | ||
56 | break; | ||
51 | default: | 57 | default: |
52 | DRM_ERROR("Illegal vmwgfx get param request: %d\n", | 58 | DRM_ERROR("Illegal vmwgfx get param request: %d\n", |
53 | param->param); | 59 | param->param); |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c index d40086fc8647..4d7cb5393860 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c | |||
@@ -85,19 +85,12 @@ bool vmw_fence_signaled(struct vmw_private *dev_priv, | |||
85 | return true; | 85 | return true; |
86 | 86 | ||
87 | /** | 87 | /** |
88 | * Below is to signal stale fences that have wrapped. | ||
89 | * First, block fence submission. | ||
90 | */ | ||
91 | |||
92 | down_read(&fifo_state->rwsem); | ||
93 | |||
94 | /** | ||
95 | * Then check if the sequence is higher than what we've actually | 88 | * Then check if the sequence is higher than what we've actually |
96 | * emitted. Then the fence is stale and signaled. | 89 | * emitted. Then the fence is stale and signaled. |
97 | */ | 90 | */ |
98 | 91 | ||
99 | ret = ((dev_priv->fence_seq - sequence) > VMW_FENCE_WRAP); | 92 | ret = ((atomic_read(&dev_priv->fence_seq) - sequence) |
100 | up_read(&fifo_state->rwsem); | 93 | > VMW_FENCE_WRAP); |
101 | 94 | ||
102 | return ret; | 95 | return ret; |
103 | } | 96 | } |
@@ -127,7 +120,7 @@ int vmw_fallback_wait(struct vmw_private *dev_priv, | |||
127 | 120 | ||
128 | if (fifo_idle) | 121 | if (fifo_idle) |
129 | down_read(&fifo_state->rwsem); | 122 | down_read(&fifo_state->rwsem); |
130 | signal_seq = dev_priv->fence_seq; | 123 | signal_seq = atomic_read(&dev_priv->fence_seq); |
131 | ret = 0; | 124 | ret = 0; |
132 | 125 | ||
133 | for (;;) { | 126 | for (;;) { |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c index eeba6d1d06e4..31f9afed0a63 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c | |||
@@ -769,10 +769,10 @@ int vmw_kms_init(struct vmw_private *dev_priv) | |||
769 | 769 | ||
770 | drm_mode_config_init(dev); | 770 | drm_mode_config_init(dev); |
771 | dev->mode_config.funcs = &vmw_kms_funcs; | 771 | dev->mode_config.funcs = &vmw_kms_funcs; |
772 | dev->mode_config.min_width = 640; | 772 | dev->mode_config.min_width = 1; |
773 | dev->mode_config.min_height = 480; | 773 | dev->mode_config.min_height = 1; |
774 | dev->mode_config.max_width = 2048; | 774 | dev->mode_config.max_width = dev_priv->fb_max_width; |
775 | dev->mode_config.max_height = 2048; | 775 | dev->mode_config.max_height = dev_priv->fb_max_height; |
776 | 776 | ||
777 | ret = vmw_kms_init_legacy_display_system(dev_priv); | 777 | ret = vmw_kms_init_legacy_display_system(dev_priv); |
778 | 778 | ||
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c index c7efbd47ab84..f8fbbc67a406 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c | |||
@@ -35,11 +35,6 @@ | |||
35 | #define VMW_RES_SURFACE ttm_driver_type1 | 35 | #define VMW_RES_SURFACE ttm_driver_type1 |
36 | #define VMW_RES_STREAM ttm_driver_type2 | 36 | #define VMW_RES_STREAM ttm_driver_type2 |
37 | 37 | ||
38 | /* XXX: This isn't a real hardware flag, but just a hack for kernel to | ||
39 | * know about primary surfaces. Find a better way to accomplish this. | ||
40 | */ | ||
41 | #define SVGA3D_SURFACE_HINT_SCANOUT (1 << 9) | ||
42 | |||
43 | struct vmw_user_context { | 38 | struct vmw_user_context { |
44 | struct ttm_base_object base; | 39 | struct ttm_base_object base; |
45 | struct vmw_resource res; | 40 | struct vmw_resource res; |
@@ -579,6 +574,7 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data, | |||
579 | 574 | ||
580 | srf->flags = req->flags; | 575 | srf->flags = req->flags; |
581 | srf->format = req->format; | 576 | srf->format = req->format; |
577 | srf->scanout = req->scanout; | ||
582 | memcpy(srf->mip_levels, req->mip_levels, sizeof(srf->mip_levels)); | 578 | memcpy(srf->mip_levels, req->mip_levels, sizeof(srf->mip_levels)); |
583 | srf->num_sizes = 0; | 579 | srf->num_sizes = 0; |
584 | for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i) | 580 | for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i) |
@@ -604,16 +600,6 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data, | |||
604 | if (unlikely(ret != 0)) | 600 | if (unlikely(ret != 0)) |
605 | goto out_err1; | 601 | goto out_err1; |
606 | 602 | ||
607 | if (srf->flags & SVGA3D_SURFACE_HINT_SCANOUT) { | ||
608 | /* we should not send this flag down to hardware since | ||
609 | * its not a official one | ||
610 | */ | ||
611 | srf->flags &= ~SVGA3D_SURFACE_HINT_SCANOUT; | ||
612 | srf->scanout = true; | ||
613 | } else { | ||
614 | srf->scanout = false; | ||
615 | } | ||
616 | |||
617 | if (srf->scanout && | 603 | if (srf->scanout && |
618 | srf->num_sizes == 1 && | 604 | srf->num_sizes == 1 && |
619 | srf->sizes[0].width == 64 && | 605 | srf->sizes[0].width == 64 && |