diff options
Diffstat (limited to 'drivers/gpu')
87 files changed, 8130 insertions, 2351 deletions
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig index c961fe415aef..39b393d38bb3 100644 --- a/drivers/gpu/drm/Kconfig +++ b/drivers/gpu/drm/Kconfig | |||
@@ -81,6 +81,7 @@ config DRM_I830 | |||
81 | 81 | ||
82 | config DRM_I915 | 82 | config DRM_I915 |
83 | tristate "i915 driver" | 83 | tristate "i915 driver" |
84 | depends on AGP_INTEL | ||
84 | select FB_CFB_FILLRECT | 85 | select FB_CFB_FILLRECT |
85 | select FB_CFB_COPYAREA | 86 | select FB_CFB_COPYAREA |
86 | select FB_CFB_IMAGEBLIT | 87 | select FB_CFB_IMAGEBLIT |
diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile index 4e89ab08b7b8..fe23f29f7cba 100644 --- a/drivers/gpu/drm/Makefile +++ b/drivers/gpu/drm/Makefile | |||
@@ -16,6 +16,7 @@ drm-y := drm_auth.o drm_bufs.o drm_cache.o \ | |||
16 | drm-$(CONFIG_COMPAT) += drm_ioc32.o | 16 | drm-$(CONFIG_COMPAT) += drm_ioc32.o |
17 | 17 | ||
18 | obj-$(CONFIG_DRM) += drm.o | 18 | obj-$(CONFIG_DRM) += drm.o |
19 | obj-$(CONFIG_DRM_TTM) += ttm/ | ||
19 | obj-$(CONFIG_DRM_TDFX) += tdfx/ | 20 | obj-$(CONFIG_DRM_TDFX) += tdfx/ |
20 | obj-$(CONFIG_DRM_R128) += r128/ | 21 | obj-$(CONFIG_DRM_R128) += r128/ |
21 | obj-$(CONFIG_DRM_RADEON)+= radeon/ | 22 | obj-$(CONFIG_DRM_RADEON)+= radeon/ |
@@ -26,4 +27,3 @@ obj-$(CONFIG_DRM_I915) += i915/ | |||
26 | obj-$(CONFIG_DRM_SIS) += sis/ | 27 | obj-$(CONFIG_DRM_SIS) += sis/ |
27 | obj-$(CONFIG_DRM_SAVAGE)+= savage/ | 28 | obj-$(CONFIG_DRM_SAVAGE)+= savage/ |
28 | obj-$(CONFIG_DRM_VIA) +=via/ | 29 | obj-$(CONFIG_DRM_VIA) +=via/ |
29 | obj-$(CONFIG_DRM_TTM) += ttm/ | ||
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c index 8fab7890a363..33be210d6723 100644 --- a/drivers/gpu/drm/drm_crtc.c +++ b/drivers/gpu/drm/drm_crtc.c | |||
@@ -1461,7 +1461,7 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data, | |||
1461 | goto out; | 1461 | goto out; |
1462 | } | 1462 | } |
1463 | 1463 | ||
1464 | if (crtc_req->count_connectors > 0 && !mode && !fb) { | 1464 | if (crtc_req->count_connectors > 0 && (!mode || !fb)) { |
1465 | DRM_DEBUG("Count connectors is %d but no mode or fb set\n", | 1465 | DRM_DEBUG("Count connectors is %d but no mode or fb set\n", |
1466 | crtc_req->count_connectors); | 1466 | crtc_req->count_connectors); |
1467 | ret = -EINVAL; | 1467 | ret = -EINVAL; |
diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c index a6f73f1e99d9..6aaa2cb23365 100644 --- a/drivers/gpu/drm/drm_crtc_helper.c +++ b/drivers/gpu/drm/drm_crtc_helper.c | |||
@@ -706,8 +706,8 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set) | |||
706 | struct drm_encoder **save_encoders, *new_encoder; | 706 | struct drm_encoder **save_encoders, *new_encoder; |
707 | struct drm_framebuffer *old_fb = NULL; | 707 | struct drm_framebuffer *old_fb = NULL; |
708 | bool save_enabled; | 708 | bool save_enabled; |
709 | bool mode_changed = false; | 709 | bool mode_changed = false; /* if true do a full mode set */ |
710 | bool fb_changed = false; | 710 | bool fb_changed = false; /* if true and !mode_changed just do a flip */ |
711 | struct drm_connector *connector; | 711 | struct drm_connector *connector; |
712 | int count = 0, ro, fail = 0; | 712 | int count = 0, ro, fail = 0; |
713 | struct drm_crtc_helper_funcs *crtc_funcs; | 713 | struct drm_crtc_helper_funcs *crtc_funcs; |
@@ -758,6 +758,8 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set) | |||
758 | if (set->crtc->fb == NULL) { | 758 | if (set->crtc->fb == NULL) { |
759 | DRM_DEBUG("crtc has no fb, full mode set\n"); | 759 | DRM_DEBUG("crtc has no fb, full mode set\n"); |
760 | mode_changed = true; | 760 | mode_changed = true; |
761 | } else if (set->fb == NULL) { | ||
762 | mode_changed = true; | ||
761 | } else if ((set->fb->bits_per_pixel != | 763 | } else if ((set->fb->bits_per_pixel != |
762 | set->crtc->fb->bits_per_pixel) || | 764 | set->crtc->fb->bits_per_pixel) || |
763 | set->fb->depth != set->crtc->fb->depth) | 765 | set->fb->depth != set->crtc->fb->depth) |
@@ -1090,6 +1092,8 @@ int drm_helper_resume_force_mode(struct drm_device *dev) | |||
1090 | if (ret == false) | 1092 | if (ret == false) |
1091 | DRM_ERROR("failed to set mode on crtc %p\n", crtc); | 1093 | DRM_ERROR("failed to set mode on crtc %p\n", crtc); |
1092 | } | 1094 | } |
1095 | /* disable the unused connectors while restoring the modesetting */ | ||
1096 | drm_helper_disable_unused_functions(dev); | ||
1093 | return 0; | 1097 | return 0; |
1094 | } | 1098 | } |
1095 | EXPORT_SYMBOL(drm_helper_resume_force_mode); | 1099 | EXPORT_SYMBOL(drm_helper_resume_force_mode); |
diff --git a/drivers/gpu/drm/drm_debugfs.c b/drivers/gpu/drm/drm_debugfs.c index 2960b6d73456..9903f270e440 100644 --- a/drivers/gpu/drm/drm_debugfs.c +++ b/drivers/gpu/drm/drm_debugfs.c | |||
@@ -101,6 +101,10 @@ int drm_debugfs_create_files(struct drm_info_list *files, int count, | |||
101 | continue; | 101 | continue; |
102 | 102 | ||
103 | tmp = kmalloc(sizeof(struct drm_info_node), GFP_KERNEL); | 103 | tmp = kmalloc(sizeof(struct drm_info_node), GFP_KERNEL); |
104 | if (tmp == NULL) { | ||
105 | ret = -1; | ||
106 | goto fail; | ||
107 | } | ||
104 | ent = debugfs_create_file(files[i].name, S_IFREG | S_IRUGO, | 108 | ent = debugfs_create_file(files[i].name, S_IFREG | S_IRUGO, |
105 | root, tmp, &drm_debugfs_fops); | 109 | root, tmp, &drm_debugfs_fops); |
106 | if (!ent) { | 110 | if (!ent) { |
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c index 7d0835226f6e..80cc6d06d61b 100644 --- a/drivers/gpu/drm/drm_edid.c +++ b/drivers/gpu/drm/drm_edid.c | |||
@@ -294,10 +294,10 @@ static struct drm_display_mode *drm_mode_detailed(struct drm_device *dev, | |||
294 | unsigned vactive = (pt->vactive_vblank_hi & 0xf0) << 4 | pt->vactive_lo; | 294 | unsigned vactive = (pt->vactive_vblank_hi & 0xf0) << 4 | pt->vactive_lo; |
295 | unsigned hblank = (pt->hactive_hblank_hi & 0xf) << 8 | pt->hblank_lo; | 295 | unsigned hblank = (pt->hactive_hblank_hi & 0xf) << 8 | pt->hblank_lo; |
296 | unsigned vblank = (pt->vactive_vblank_hi & 0xf) << 8 | pt->vblank_lo; | 296 | unsigned vblank = (pt->vactive_vblank_hi & 0xf) << 8 | pt->vblank_lo; |
297 | unsigned hsync_offset = (pt->hsync_vsync_offset_pulse_width_hi & 0x3) << 8 | pt->hsync_offset_lo; | 297 | unsigned hsync_offset = (pt->hsync_vsync_offset_pulse_width_hi & 0xc0) << 2 | pt->hsync_offset_lo; |
298 | unsigned hsync_pulse_width = (pt->hsync_vsync_offset_pulse_width_hi & 0xc) << 6 | pt->hsync_pulse_width_lo; | 298 | unsigned hsync_pulse_width = (pt->hsync_vsync_offset_pulse_width_hi & 0x30) << 4 | pt->hsync_pulse_width_lo; |
299 | unsigned vsync_offset = (pt->hsync_vsync_offset_pulse_width_hi & 0x30) | (pt->vsync_offset_pulse_width_lo & 0xf); | 299 | unsigned vsync_offset = (pt->hsync_vsync_offset_pulse_width_hi & 0xc) >> 2 | pt->vsync_offset_pulse_width_lo >> 4; |
300 | unsigned vsync_pulse_width = (pt->hsync_vsync_offset_pulse_width_hi & 0xc0) >> 2 | pt->vsync_offset_pulse_width_lo >> 4; | 300 | unsigned vsync_pulse_width = (pt->hsync_vsync_offset_pulse_width_hi & 0x3) << 4 | (pt->vsync_offset_pulse_width_lo & 0xf); |
301 | 301 | ||
302 | /* ignore tiny modes */ | 302 | /* ignore tiny modes */ |
303 | if (hactive < 64 || vactive < 64) | 303 | if (hactive < 64 || vactive < 64) |
@@ -347,8 +347,8 @@ static struct drm_display_mode *drm_mode_detailed(struct drm_device *dev, | |||
347 | mode->flags |= (pt->misc & DRM_EDID_PT_VSYNC_POSITIVE) ? | 347 | mode->flags |= (pt->misc & DRM_EDID_PT_VSYNC_POSITIVE) ? |
348 | DRM_MODE_FLAG_PVSYNC : DRM_MODE_FLAG_NVSYNC; | 348 | DRM_MODE_FLAG_PVSYNC : DRM_MODE_FLAG_NVSYNC; |
349 | 349 | ||
350 | mode->width_mm = pt->width_mm_lo | (pt->width_height_mm_hi & 0xf) << 8; | 350 | mode->width_mm = pt->width_mm_lo | (pt->width_height_mm_hi & 0xf0) << 4; |
351 | mode->height_mm = pt->height_mm_lo | (pt->width_height_mm_hi & 0xf0) << 4; | 351 | mode->height_mm = pt->height_mm_lo | (pt->width_height_mm_hi & 0xf) << 8; |
352 | 352 | ||
353 | if (quirks & EDID_QUIRK_DETAILED_IN_CM) { | 353 | if (quirks & EDID_QUIRK_DETAILED_IN_CM) { |
354 | mode->width_mm *= 10; | 354 | mode->width_mm *= 10; |
diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c index 8104ecaea26f..ffe8f4394d50 100644 --- a/drivers/gpu/drm/drm_gem.c +++ b/drivers/gpu/drm/drm_gem.c | |||
@@ -134,26 +134,29 @@ drm_gem_object_alloc(struct drm_device *dev, size_t size) | |||
134 | BUG_ON((size & (PAGE_SIZE - 1)) != 0); | 134 | BUG_ON((size & (PAGE_SIZE - 1)) != 0); |
135 | 135 | ||
136 | obj = kzalloc(sizeof(*obj), GFP_KERNEL); | 136 | obj = kzalloc(sizeof(*obj), GFP_KERNEL); |
137 | if (!obj) | ||
138 | goto free; | ||
137 | 139 | ||
138 | obj->dev = dev; | 140 | obj->dev = dev; |
139 | obj->filp = shmem_file_setup("drm mm object", size, VM_NORESERVE); | 141 | obj->filp = shmem_file_setup("drm mm object", size, VM_NORESERVE); |
140 | if (IS_ERR(obj->filp)) { | 142 | if (IS_ERR(obj->filp)) |
141 | kfree(obj); | 143 | goto free; |
142 | return NULL; | ||
143 | } | ||
144 | 144 | ||
145 | kref_init(&obj->refcount); | 145 | kref_init(&obj->refcount); |
146 | kref_init(&obj->handlecount); | 146 | kref_init(&obj->handlecount); |
147 | obj->size = size; | 147 | obj->size = size; |
148 | if (dev->driver->gem_init_object != NULL && | 148 | if (dev->driver->gem_init_object != NULL && |
149 | dev->driver->gem_init_object(obj) != 0) { | 149 | dev->driver->gem_init_object(obj) != 0) { |
150 | fput(obj->filp); | 150 | goto fput; |
151 | kfree(obj); | ||
152 | return NULL; | ||
153 | } | 151 | } |
154 | atomic_inc(&dev->object_count); | 152 | atomic_inc(&dev->object_count); |
155 | atomic_add(obj->size, &dev->object_memory); | 153 | atomic_add(obj->size, &dev->object_memory); |
156 | return obj; | 154 | return obj; |
155 | fput: | ||
156 | fput(obj->filp); | ||
157 | free: | ||
158 | kfree(obj); | ||
159 | return NULL; | ||
157 | } | 160 | } |
158 | EXPORT_SYMBOL(drm_gem_object_alloc); | 161 | EXPORT_SYMBOL(drm_gem_object_alloc); |
159 | 162 | ||
diff --git a/drivers/gpu/drm/drm_stub.c b/drivers/gpu/drm/drm_stub.c index 155a5bbce680..55bb8a82d612 100644 --- a/drivers/gpu/drm/drm_stub.c +++ b/drivers/gpu/drm/drm_stub.c | |||
@@ -489,7 +489,7 @@ int drm_put_minor(struct drm_minor **minor_p) | |||
489 | */ | 489 | */ |
490 | void drm_put_dev(struct drm_device *dev) | 490 | void drm_put_dev(struct drm_device *dev) |
491 | { | 491 | { |
492 | struct drm_driver *driver = dev->driver; | 492 | struct drm_driver *driver; |
493 | struct drm_map_list *r_list, *list_temp; | 493 | struct drm_map_list *r_list, *list_temp; |
494 | 494 | ||
495 | DRM_DEBUG("\n"); | 495 | DRM_DEBUG("\n"); |
@@ -498,6 +498,7 @@ void drm_put_dev(struct drm_device *dev) | |||
498 | DRM_ERROR("cleanup called no dev\n"); | 498 | DRM_ERROR("cleanup called no dev\n"); |
499 | return; | 499 | return; |
500 | } | 500 | } |
501 | driver = dev->driver; | ||
501 | 502 | ||
502 | drm_vblank_cleanup(dev); | 503 | drm_vblank_cleanup(dev); |
503 | 504 | ||
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile index 51c5a050aa73..30d6b99fb302 100644 --- a/drivers/gpu/drm/i915/Makefile +++ b/drivers/gpu/drm/i915/Makefile | |||
@@ -13,6 +13,8 @@ i915-y := i915_drv.o i915_dma.o i915_irq.o i915_mem.o \ | |||
13 | intel_crt.o \ | 13 | intel_crt.o \ |
14 | intel_lvds.o \ | 14 | intel_lvds.o \ |
15 | intel_bios.o \ | 15 | intel_bios.o \ |
16 | intel_dp.o \ | ||
17 | intel_dp_i2c.o \ | ||
16 | intel_hdmi.o \ | 18 | intel_hdmi.o \ |
17 | intel_sdvo.o \ | 19 | intel_sdvo.o \ |
18 | intel_modes.o \ | 20 | intel_modes.o \ |
diff --git a/drivers/gpu/drm/i915/dvo.h b/drivers/gpu/drm/i915/dvo.h index e747ac42fe3a..288fc50627e2 100644 --- a/drivers/gpu/drm/i915/dvo.h +++ b/drivers/gpu/drm/i915/dvo.h | |||
@@ -37,7 +37,7 @@ struct intel_dvo_device { | |||
37 | /* GPIO register used for i2c bus to control this device */ | 37 | /* GPIO register used for i2c bus to control this device */ |
38 | u32 gpio; | 38 | u32 gpio; |
39 | int slave_addr; | 39 | int slave_addr; |
40 | struct intel_i2c_chan *i2c_bus; | 40 | struct i2c_adapter *i2c_bus; |
41 | 41 | ||
42 | const struct intel_dvo_dev_ops *dev_ops; | 42 | const struct intel_dvo_dev_ops *dev_ops; |
43 | void *dev_priv; | 43 | void *dev_priv; |
@@ -52,7 +52,7 @@ struct intel_dvo_dev_ops { | |||
52 | * Returns NULL if the device does not exist. | 52 | * Returns NULL if the device does not exist. |
53 | */ | 53 | */ |
54 | bool (*init)(struct intel_dvo_device *dvo, | 54 | bool (*init)(struct intel_dvo_device *dvo, |
55 | struct intel_i2c_chan *i2cbus); | 55 | struct i2c_adapter *i2cbus); |
56 | 56 | ||
57 | /* | 57 | /* |
58 | * Called to allow the output a chance to create properties after the | 58 | * Called to allow the output a chance to create properties after the |
diff --git a/drivers/gpu/drm/i915/dvo_ch7017.c b/drivers/gpu/drm/i915/dvo_ch7017.c index 03d4b4973b02..621815b531db 100644 --- a/drivers/gpu/drm/i915/dvo_ch7017.c +++ b/drivers/gpu/drm/i915/dvo_ch7017.c | |||
@@ -176,19 +176,20 @@ static void ch7017_dpms(struct intel_dvo_device *dvo, int mode); | |||
176 | 176 | ||
177 | static bool ch7017_read(struct intel_dvo_device *dvo, int addr, uint8_t *val) | 177 | static bool ch7017_read(struct intel_dvo_device *dvo, int addr, uint8_t *val) |
178 | { | 178 | { |
179 | struct intel_i2c_chan *i2cbus = dvo->i2c_bus; | 179 | struct i2c_adapter *adapter = dvo->i2c_bus; |
180 | struct intel_i2c_chan *i2cbus = container_of(adapter, struct intel_i2c_chan, adapter); | ||
180 | u8 out_buf[2]; | 181 | u8 out_buf[2]; |
181 | u8 in_buf[2]; | 182 | u8 in_buf[2]; |
182 | 183 | ||
183 | struct i2c_msg msgs[] = { | 184 | struct i2c_msg msgs[] = { |
184 | { | 185 | { |
185 | .addr = i2cbus->slave_addr, | 186 | .addr = dvo->slave_addr, |
186 | .flags = 0, | 187 | .flags = 0, |
187 | .len = 1, | 188 | .len = 1, |
188 | .buf = out_buf, | 189 | .buf = out_buf, |
189 | }, | 190 | }, |
190 | { | 191 | { |
191 | .addr = i2cbus->slave_addr, | 192 | .addr = dvo->slave_addr, |
192 | .flags = I2C_M_RD, | 193 | .flags = I2C_M_RD, |
193 | .len = 1, | 194 | .len = 1, |
194 | .buf = in_buf, | 195 | .buf = in_buf, |
@@ -208,10 +209,11 @@ static bool ch7017_read(struct intel_dvo_device *dvo, int addr, uint8_t *val) | |||
208 | 209 | ||
209 | static bool ch7017_write(struct intel_dvo_device *dvo, int addr, uint8_t val) | 210 | static bool ch7017_write(struct intel_dvo_device *dvo, int addr, uint8_t val) |
210 | { | 211 | { |
211 | struct intel_i2c_chan *i2cbus = dvo->i2c_bus; | 212 | struct i2c_adapter *adapter = dvo->i2c_bus; |
213 | struct intel_i2c_chan *i2cbus = container_of(adapter, struct intel_i2c_chan, adapter); | ||
212 | uint8_t out_buf[2]; | 214 | uint8_t out_buf[2]; |
213 | struct i2c_msg msg = { | 215 | struct i2c_msg msg = { |
214 | .addr = i2cbus->slave_addr, | 216 | .addr = dvo->slave_addr, |
215 | .flags = 0, | 217 | .flags = 0, |
216 | .len = 2, | 218 | .len = 2, |
217 | .buf = out_buf, | 219 | .buf = out_buf, |
@@ -228,8 +230,9 @@ static bool ch7017_write(struct intel_dvo_device *dvo, int addr, uint8_t val) | |||
228 | 230 | ||
229 | /** Probes for a CH7017 on the given bus and slave address. */ | 231 | /** Probes for a CH7017 on the given bus and slave address. */ |
230 | static bool ch7017_init(struct intel_dvo_device *dvo, | 232 | static bool ch7017_init(struct intel_dvo_device *dvo, |
231 | struct intel_i2c_chan *i2cbus) | 233 | struct i2c_adapter *adapter) |
232 | { | 234 | { |
235 | struct intel_i2c_chan *i2cbus = container_of(adapter, struct intel_i2c_chan, adapter); | ||
233 | struct ch7017_priv *priv; | 236 | struct ch7017_priv *priv; |
234 | uint8_t val; | 237 | uint8_t val; |
235 | 238 | ||
@@ -237,8 +240,7 @@ static bool ch7017_init(struct intel_dvo_device *dvo, | |||
237 | if (priv == NULL) | 240 | if (priv == NULL) |
238 | return false; | 241 | return false; |
239 | 242 | ||
240 | dvo->i2c_bus = i2cbus; | 243 | dvo->i2c_bus = adapter; |
241 | dvo->i2c_bus->slave_addr = dvo->slave_addr; | ||
242 | dvo->dev_priv = priv; | 244 | dvo->dev_priv = priv; |
243 | 245 | ||
244 | if (!ch7017_read(dvo, CH7017_DEVICE_ID, &val)) | 246 | if (!ch7017_read(dvo, CH7017_DEVICE_ID, &val)) |
@@ -248,7 +250,7 @@ static bool ch7017_init(struct intel_dvo_device *dvo, | |||
248 | val != CH7018_DEVICE_ID_VALUE && | 250 | val != CH7018_DEVICE_ID_VALUE && |
249 | val != CH7019_DEVICE_ID_VALUE) { | 251 | val != CH7019_DEVICE_ID_VALUE) { |
250 | DRM_DEBUG("ch701x not detected, got %d: from %s Slave %d.\n", | 252 | DRM_DEBUG("ch701x not detected, got %d: from %s Slave %d.\n", |
251 | val, i2cbus->adapter.name,i2cbus->slave_addr); | 253 | val, i2cbus->adapter.name,dvo->slave_addr); |
252 | goto fail; | 254 | goto fail; |
253 | } | 255 | } |
254 | 256 | ||
diff --git a/drivers/gpu/drm/i915/dvo_ch7xxx.c b/drivers/gpu/drm/i915/dvo_ch7xxx.c index d2fd95dbd034..a9b896289680 100644 --- a/drivers/gpu/drm/i915/dvo_ch7xxx.c +++ b/drivers/gpu/drm/i915/dvo_ch7xxx.c | |||
@@ -123,19 +123,20 @@ static char *ch7xxx_get_id(uint8_t vid) | |||
123 | static bool ch7xxx_readb(struct intel_dvo_device *dvo, int addr, uint8_t *ch) | 123 | static bool ch7xxx_readb(struct intel_dvo_device *dvo, int addr, uint8_t *ch) |
124 | { | 124 | { |
125 | struct ch7xxx_priv *ch7xxx= dvo->dev_priv; | 125 | struct ch7xxx_priv *ch7xxx= dvo->dev_priv; |
126 | struct intel_i2c_chan *i2cbus = dvo->i2c_bus; | 126 | struct i2c_adapter *adapter = dvo->i2c_bus; |
127 | struct intel_i2c_chan *i2cbus = container_of(adapter, struct intel_i2c_chan, adapter); | ||
127 | u8 out_buf[2]; | 128 | u8 out_buf[2]; |
128 | u8 in_buf[2]; | 129 | u8 in_buf[2]; |
129 | 130 | ||
130 | struct i2c_msg msgs[] = { | 131 | struct i2c_msg msgs[] = { |
131 | { | 132 | { |
132 | .addr = i2cbus->slave_addr, | 133 | .addr = dvo->slave_addr, |
133 | .flags = 0, | 134 | .flags = 0, |
134 | .len = 1, | 135 | .len = 1, |
135 | .buf = out_buf, | 136 | .buf = out_buf, |
136 | }, | 137 | }, |
137 | { | 138 | { |
138 | .addr = i2cbus->slave_addr, | 139 | .addr = dvo->slave_addr, |
139 | .flags = I2C_M_RD, | 140 | .flags = I2C_M_RD, |
140 | .len = 1, | 141 | .len = 1, |
141 | .buf = in_buf, | 142 | .buf = in_buf, |
@@ -152,7 +153,7 @@ static bool ch7xxx_readb(struct intel_dvo_device *dvo, int addr, uint8_t *ch) | |||
152 | 153 | ||
153 | if (!ch7xxx->quiet) { | 154 | if (!ch7xxx->quiet) { |
154 | DRM_DEBUG("Unable to read register 0x%02x from %s:%02x.\n", | 155 | DRM_DEBUG("Unable to read register 0x%02x from %s:%02x.\n", |
155 | addr, i2cbus->adapter.name, i2cbus->slave_addr); | 156 | addr, i2cbus->adapter.name, dvo->slave_addr); |
156 | } | 157 | } |
157 | return false; | 158 | return false; |
158 | } | 159 | } |
@@ -161,10 +162,11 @@ static bool ch7xxx_readb(struct intel_dvo_device *dvo, int addr, uint8_t *ch) | |||
161 | static bool ch7xxx_writeb(struct intel_dvo_device *dvo, int addr, uint8_t ch) | 162 | static bool ch7xxx_writeb(struct intel_dvo_device *dvo, int addr, uint8_t ch) |
162 | { | 163 | { |
163 | struct ch7xxx_priv *ch7xxx = dvo->dev_priv; | 164 | struct ch7xxx_priv *ch7xxx = dvo->dev_priv; |
164 | struct intel_i2c_chan *i2cbus = dvo->i2c_bus; | 165 | struct i2c_adapter *adapter = dvo->i2c_bus; |
166 | struct intel_i2c_chan *i2cbus = container_of(adapter, struct intel_i2c_chan, adapter); | ||
165 | uint8_t out_buf[2]; | 167 | uint8_t out_buf[2]; |
166 | struct i2c_msg msg = { | 168 | struct i2c_msg msg = { |
167 | .addr = i2cbus->slave_addr, | 169 | .addr = dvo->slave_addr, |
168 | .flags = 0, | 170 | .flags = 0, |
169 | .len = 2, | 171 | .len = 2, |
170 | .buf = out_buf, | 172 | .buf = out_buf, |
@@ -178,14 +180,14 @@ static bool ch7xxx_writeb(struct intel_dvo_device *dvo, int addr, uint8_t ch) | |||
178 | 180 | ||
179 | if (!ch7xxx->quiet) { | 181 | if (!ch7xxx->quiet) { |
180 | DRM_DEBUG("Unable to write register 0x%02x to %s:%d.\n", | 182 | DRM_DEBUG("Unable to write register 0x%02x to %s:%d.\n", |
181 | addr, i2cbus->adapter.name, i2cbus->slave_addr); | 183 | addr, i2cbus->adapter.name, dvo->slave_addr); |
182 | } | 184 | } |
183 | 185 | ||
184 | return false; | 186 | return false; |
185 | } | 187 | } |
186 | 188 | ||
187 | static bool ch7xxx_init(struct intel_dvo_device *dvo, | 189 | static bool ch7xxx_init(struct intel_dvo_device *dvo, |
188 | struct intel_i2c_chan *i2cbus) | 190 | struct i2c_adapter *adapter) |
189 | { | 191 | { |
190 | /* this will detect the CH7xxx chip on the specified i2c bus */ | 192 | /* this will detect the CH7xxx chip on the specified i2c bus */ |
191 | struct ch7xxx_priv *ch7xxx; | 193 | struct ch7xxx_priv *ch7xxx; |
@@ -196,8 +198,7 @@ static bool ch7xxx_init(struct intel_dvo_device *dvo, | |||
196 | if (ch7xxx == NULL) | 198 | if (ch7xxx == NULL) |
197 | return false; | 199 | return false; |
198 | 200 | ||
199 | dvo->i2c_bus = i2cbus; | 201 | dvo->i2c_bus = adapter; |
200 | dvo->i2c_bus->slave_addr = dvo->slave_addr; | ||
201 | dvo->dev_priv = ch7xxx; | 202 | dvo->dev_priv = ch7xxx; |
202 | ch7xxx->quiet = true; | 203 | ch7xxx->quiet = true; |
203 | 204 | ||
@@ -207,7 +208,7 @@ static bool ch7xxx_init(struct intel_dvo_device *dvo, | |||
207 | name = ch7xxx_get_id(vendor); | 208 | name = ch7xxx_get_id(vendor); |
208 | if (!name) { | 209 | if (!name) { |
209 | DRM_DEBUG("ch7xxx not detected; got 0x%02x from %s slave %d.\n", | 210 | DRM_DEBUG("ch7xxx not detected; got 0x%02x from %s slave %d.\n", |
210 | vendor, i2cbus->adapter.name, i2cbus->slave_addr); | 211 | vendor, adapter->name, dvo->slave_addr); |
211 | goto out; | 212 | goto out; |
212 | } | 213 | } |
213 | 214 | ||
@@ -217,7 +218,7 @@ static bool ch7xxx_init(struct intel_dvo_device *dvo, | |||
217 | 218 | ||
218 | if (device != CH7xxx_DID) { | 219 | if (device != CH7xxx_DID) { |
219 | DRM_DEBUG("ch7xxx not detected; got 0x%02x from %s slave %d.\n", | 220 | DRM_DEBUG("ch7xxx not detected; got 0x%02x from %s slave %d.\n", |
220 | vendor, i2cbus->adapter.name, i2cbus->slave_addr); | 221 | vendor, adapter->name, dvo->slave_addr); |
221 | goto out; | 222 | goto out; |
222 | } | 223 | } |
223 | 224 | ||
diff --git a/drivers/gpu/drm/i915/dvo_ivch.c b/drivers/gpu/drm/i915/dvo_ivch.c index 0c8d375e8e37..aa176f9921fe 100644 --- a/drivers/gpu/drm/i915/dvo_ivch.c +++ b/drivers/gpu/drm/i915/dvo_ivch.c | |||
@@ -169,13 +169,14 @@ static void ivch_dump_regs(struct intel_dvo_device *dvo); | |||
169 | static bool ivch_read(struct intel_dvo_device *dvo, int addr, uint16_t *data) | 169 | static bool ivch_read(struct intel_dvo_device *dvo, int addr, uint16_t *data) |
170 | { | 170 | { |
171 | struct ivch_priv *priv = dvo->dev_priv; | 171 | struct ivch_priv *priv = dvo->dev_priv; |
172 | struct intel_i2c_chan *i2cbus = dvo->i2c_bus; | 172 | struct i2c_adapter *adapter = dvo->i2c_bus; |
173 | struct intel_i2c_chan *i2cbus = container_of(adapter, struct intel_i2c_chan, adapter); | ||
173 | u8 out_buf[1]; | 174 | u8 out_buf[1]; |
174 | u8 in_buf[2]; | 175 | u8 in_buf[2]; |
175 | 176 | ||
176 | struct i2c_msg msgs[] = { | 177 | struct i2c_msg msgs[] = { |
177 | { | 178 | { |
178 | .addr = i2cbus->slave_addr, | 179 | .addr = dvo->slave_addr, |
179 | .flags = I2C_M_RD, | 180 | .flags = I2C_M_RD, |
180 | .len = 0, | 181 | .len = 0, |
181 | }, | 182 | }, |
@@ -186,7 +187,7 @@ static bool ivch_read(struct intel_dvo_device *dvo, int addr, uint16_t *data) | |||
186 | .buf = out_buf, | 187 | .buf = out_buf, |
187 | }, | 188 | }, |
188 | { | 189 | { |
189 | .addr = i2cbus->slave_addr, | 190 | .addr = dvo->slave_addr, |
190 | .flags = I2C_M_RD | I2C_M_NOSTART, | 191 | .flags = I2C_M_RD | I2C_M_NOSTART, |
191 | .len = 2, | 192 | .len = 2, |
192 | .buf = in_buf, | 193 | .buf = in_buf, |
@@ -202,7 +203,7 @@ static bool ivch_read(struct intel_dvo_device *dvo, int addr, uint16_t *data) | |||
202 | 203 | ||
203 | if (!priv->quiet) { | 204 | if (!priv->quiet) { |
204 | DRM_DEBUG("Unable to read register 0x%02x from %s:%02x.\n", | 205 | DRM_DEBUG("Unable to read register 0x%02x from %s:%02x.\n", |
205 | addr, i2cbus->adapter.name, i2cbus->slave_addr); | 206 | addr, i2cbus->adapter.name, dvo->slave_addr); |
206 | } | 207 | } |
207 | return false; | 208 | return false; |
208 | } | 209 | } |
@@ -211,10 +212,11 @@ static bool ivch_read(struct intel_dvo_device *dvo, int addr, uint16_t *data) | |||
211 | static bool ivch_write(struct intel_dvo_device *dvo, int addr, uint16_t data) | 212 | static bool ivch_write(struct intel_dvo_device *dvo, int addr, uint16_t data) |
212 | { | 213 | { |
213 | struct ivch_priv *priv = dvo->dev_priv; | 214 | struct ivch_priv *priv = dvo->dev_priv; |
214 | struct intel_i2c_chan *i2cbus = dvo->i2c_bus; | 215 | struct i2c_adapter *adapter = dvo->i2c_bus; |
216 | struct intel_i2c_chan *i2cbus = container_of(adapter, struct intel_i2c_chan, adapter); | ||
215 | u8 out_buf[3]; | 217 | u8 out_buf[3]; |
216 | struct i2c_msg msg = { | 218 | struct i2c_msg msg = { |
217 | .addr = i2cbus->slave_addr, | 219 | .addr = dvo->slave_addr, |
218 | .flags = 0, | 220 | .flags = 0, |
219 | .len = 3, | 221 | .len = 3, |
220 | .buf = out_buf, | 222 | .buf = out_buf, |
@@ -229,7 +231,7 @@ static bool ivch_write(struct intel_dvo_device *dvo, int addr, uint16_t data) | |||
229 | 231 | ||
230 | if (!priv->quiet) { | 232 | if (!priv->quiet) { |
231 | DRM_DEBUG("Unable to write register 0x%02x to %s:%d.\n", | 233 | DRM_DEBUG("Unable to write register 0x%02x to %s:%d.\n", |
232 | addr, i2cbus->adapter.name, i2cbus->slave_addr); | 234 | addr, i2cbus->adapter.name, dvo->slave_addr); |
233 | } | 235 | } |
234 | 236 | ||
235 | return false; | 237 | return false; |
@@ -237,7 +239,7 @@ static bool ivch_write(struct intel_dvo_device *dvo, int addr, uint16_t data) | |||
237 | 239 | ||
238 | /** Probes the given bus and slave address for an ivch */ | 240 | /** Probes the given bus and slave address for an ivch */ |
239 | static bool ivch_init(struct intel_dvo_device *dvo, | 241 | static bool ivch_init(struct intel_dvo_device *dvo, |
240 | struct intel_i2c_chan *i2cbus) | 242 | struct i2c_adapter *adapter) |
241 | { | 243 | { |
242 | struct ivch_priv *priv; | 244 | struct ivch_priv *priv; |
243 | uint16_t temp; | 245 | uint16_t temp; |
@@ -246,8 +248,7 @@ static bool ivch_init(struct intel_dvo_device *dvo, | |||
246 | if (priv == NULL) | 248 | if (priv == NULL) |
247 | return false; | 249 | return false; |
248 | 250 | ||
249 | dvo->i2c_bus = i2cbus; | 251 | dvo->i2c_bus = adapter; |
250 | dvo->i2c_bus->slave_addr = dvo->slave_addr; | ||
251 | dvo->dev_priv = priv; | 252 | dvo->dev_priv = priv; |
252 | priv->quiet = true; | 253 | priv->quiet = true; |
253 | 254 | ||
diff --git a/drivers/gpu/drm/i915/dvo_sil164.c b/drivers/gpu/drm/i915/dvo_sil164.c index 033a4bb070b2..e1c1f7341e5c 100644 --- a/drivers/gpu/drm/i915/dvo_sil164.c +++ b/drivers/gpu/drm/i915/dvo_sil164.c | |||
@@ -76,19 +76,20 @@ struct sil164_priv { | |||
76 | static bool sil164_readb(struct intel_dvo_device *dvo, int addr, uint8_t *ch) | 76 | static bool sil164_readb(struct intel_dvo_device *dvo, int addr, uint8_t *ch) |
77 | { | 77 | { |
78 | struct sil164_priv *sil = dvo->dev_priv; | 78 | struct sil164_priv *sil = dvo->dev_priv; |
79 | struct intel_i2c_chan *i2cbus = dvo->i2c_bus; | 79 | struct i2c_adapter *adapter = dvo->i2c_bus; |
80 | struct intel_i2c_chan *i2cbus = container_of(adapter, struct intel_i2c_chan, adapter); | ||
80 | u8 out_buf[2]; | 81 | u8 out_buf[2]; |
81 | u8 in_buf[2]; | 82 | u8 in_buf[2]; |
82 | 83 | ||
83 | struct i2c_msg msgs[] = { | 84 | struct i2c_msg msgs[] = { |
84 | { | 85 | { |
85 | .addr = i2cbus->slave_addr, | 86 | .addr = dvo->slave_addr, |
86 | .flags = 0, | 87 | .flags = 0, |
87 | .len = 1, | 88 | .len = 1, |
88 | .buf = out_buf, | 89 | .buf = out_buf, |
89 | }, | 90 | }, |
90 | { | 91 | { |
91 | .addr = i2cbus->slave_addr, | 92 | .addr = dvo->slave_addr, |
92 | .flags = I2C_M_RD, | 93 | .flags = I2C_M_RD, |
93 | .len = 1, | 94 | .len = 1, |
94 | .buf = in_buf, | 95 | .buf = in_buf, |
@@ -105,7 +106,7 @@ static bool sil164_readb(struct intel_dvo_device *dvo, int addr, uint8_t *ch) | |||
105 | 106 | ||
106 | if (!sil->quiet) { | 107 | if (!sil->quiet) { |
107 | DRM_DEBUG("Unable to read register 0x%02x from %s:%02x.\n", | 108 | DRM_DEBUG("Unable to read register 0x%02x from %s:%02x.\n", |
108 | addr, i2cbus->adapter.name, i2cbus->slave_addr); | 109 | addr, i2cbus->adapter.name, dvo->slave_addr); |
109 | } | 110 | } |
110 | return false; | 111 | return false; |
111 | } | 112 | } |
@@ -113,10 +114,11 @@ static bool sil164_readb(struct intel_dvo_device *dvo, int addr, uint8_t *ch) | |||
113 | static bool sil164_writeb(struct intel_dvo_device *dvo, int addr, uint8_t ch) | 114 | static bool sil164_writeb(struct intel_dvo_device *dvo, int addr, uint8_t ch) |
114 | { | 115 | { |
115 | struct sil164_priv *sil= dvo->dev_priv; | 116 | struct sil164_priv *sil= dvo->dev_priv; |
116 | struct intel_i2c_chan *i2cbus = dvo->i2c_bus; | 117 | struct i2c_adapter *adapter = dvo->i2c_bus; |
118 | struct intel_i2c_chan *i2cbus = container_of(adapter, struct intel_i2c_chan, adapter); | ||
117 | uint8_t out_buf[2]; | 119 | uint8_t out_buf[2]; |
118 | struct i2c_msg msg = { | 120 | struct i2c_msg msg = { |
119 | .addr = i2cbus->slave_addr, | 121 | .addr = dvo->slave_addr, |
120 | .flags = 0, | 122 | .flags = 0, |
121 | .len = 2, | 123 | .len = 2, |
122 | .buf = out_buf, | 124 | .buf = out_buf, |
@@ -130,7 +132,7 @@ static bool sil164_writeb(struct intel_dvo_device *dvo, int addr, uint8_t ch) | |||
130 | 132 | ||
131 | if (!sil->quiet) { | 133 | if (!sil->quiet) { |
132 | DRM_DEBUG("Unable to write register 0x%02x to %s:%d.\n", | 134 | DRM_DEBUG("Unable to write register 0x%02x to %s:%d.\n", |
133 | addr, i2cbus->adapter.name, i2cbus->slave_addr); | 135 | addr, i2cbus->adapter.name, dvo->slave_addr); |
134 | } | 136 | } |
135 | 137 | ||
136 | return false; | 138 | return false; |
@@ -138,7 +140,7 @@ static bool sil164_writeb(struct intel_dvo_device *dvo, int addr, uint8_t ch) | |||
138 | 140 | ||
139 | /* Silicon Image 164 driver for chip on i2c bus */ | 141 | /* Silicon Image 164 driver for chip on i2c bus */ |
140 | static bool sil164_init(struct intel_dvo_device *dvo, | 142 | static bool sil164_init(struct intel_dvo_device *dvo, |
141 | struct intel_i2c_chan *i2cbus) | 143 | struct i2c_adapter *adapter) |
142 | { | 144 | { |
143 | /* this will detect the SIL164 chip on the specified i2c bus */ | 145 | /* this will detect the SIL164 chip on the specified i2c bus */ |
144 | struct sil164_priv *sil; | 146 | struct sil164_priv *sil; |
@@ -148,8 +150,7 @@ static bool sil164_init(struct intel_dvo_device *dvo, | |||
148 | if (sil == NULL) | 150 | if (sil == NULL) |
149 | return false; | 151 | return false; |
150 | 152 | ||
151 | dvo->i2c_bus = i2cbus; | 153 | dvo->i2c_bus = adapter; |
152 | dvo->i2c_bus->slave_addr = dvo->slave_addr; | ||
153 | dvo->dev_priv = sil; | 154 | dvo->dev_priv = sil; |
154 | sil->quiet = true; | 155 | sil->quiet = true; |
155 | 156 | ||
@@ -158,7 +159,7 @@ static bool sil164_init(struct intel_dvo_device *dvo, | |||
158 | 159 | ||
159 | if (ch != (SIL164_VID & 0xff)) { | 160 | if (ch != (SIL164_VID & 0xff)) { |
160 | DRM_DEBUG("sil164 not detected got %d: from %s Slave %d.\n", | 161 | DRM_DEBUG("sil164 not detected got %d: from %s Slave %d.\n", |
161 | ch, i2cbus->adapter.name, i2cbus->slave_addr); | 162 | ch, adapter->name, dvo->slave_addr); |
162 | goto out; | 163 | goto out; |
163 | } | 164 | } |
164 | 165 | ||
@@ -167,7 +168,7 @@ static bool sil164_init(struct intel_dvo_device *dvo, | |||
167 | 168 | ||
168 | if (ch != (SIL164_DID & 0xff)) { | 169 | if (ch != (SIL164_DID & 0xff)) { |
169 | DRM_DEBUG("sil164 not detected got %d: from %s Slave %d.\n", | 170 | DRM_DEBUG("sil164 not detected got %d: from %s Slave %d.\n", |
170 | ch, i2cbus->adapter.name, i2cbus->slave_addr); | 171 | ch, adapter->name, dvo->slave_addr); |
171 | goto out; | 172 | goto out; |
172 | } | 173 | } |
173 | sil->quiet = false; | 174 | sil->quiet = false; |
diff --git a/drivers/gpu/drm/i915/dvo_tfp410.c b/drivers/gpu/drm/i915/dvo_tfp410.c index 207fda806ebf..9ecc907384ec 100644 --- a/drivers/gpu/drm/i915/dvo_tfp410.c +++ b/drivers/gpu/drm/i915/dvo_tfp410.c | |||
@@ -101,19 +101,20 @@ struct tfp410_priv { | |||
101 | static bool tfp410_readb(struct intel_dvo_device *dvo, int addr, uint8_t *ch) | 101 | static bool tfp410_readb(struct intel_dvo_device *dvo, int addr, uint8_t *ch) |
102 | { | 102 | { |
103 | struct tfp410_priv *tfp = dvo->dev_priv; | 103 | struct tfp410_priv *tfp = dvo->dev_priv; |
104 | struct intel_i2c_chan *i2cbus = dvo->i2c_bus; | 104 | struct i2c_adapter *adapter = dvo->i2c_bus; |
105 | struct intel_i2c_chan *i2cbus = container_of(adapter, struct intel_i2c_chan, adapter); | ||
105 | u8 out_buf[2]; | 106 | u8 out_buf[2]; |
106 | u8 in_buf[2]; | 107 | u8 in_buf[2]; |
107 | 108 | ||
108 | struct i2c_msg msgs[] = { | 109 | struct i2c_msg msgs[] = { |
109 | { | 110 | { |
110 | .addr = i2cbus->slave_addr, | 111 | .addr = dvo->slave_addr, |
111 | .flags = 0, | 112 | .flags = 0, |
112 | .len = 1, | 113 | .len = 1, |
113 | .buf = out_buf, | 114 | .buf = out_buf, |
114 | }, | 115 | }, |
115 | { | 116 | { |
116 | .addr = i2cbus->slave_addr, | 117 | .addr = dvo->slave_addr, |
117 | .flags = I2C_M_RD, | 118 | .flags = I2C_M_RD, |
118 | .len = 1, | 119 | .len = 1, |
119 | .buf = in_buf, | 120 | .buf = in_buf, |
@@ -130,7 +131,7 @@ static bool tfp410_readb(struct intel_dvo_device *dvo, int addr, uint8_t *ch) | |||
130 | 131 | ||
131 | if (!tfp->quiet) { | 132 | if (!tfp->quiet) { |
132 | DRM_DEBUG("Unable to read register 0x%02x from %s:%02x.\n", | 133 | DRM_DEBUG("Unable to read register 0x%02x from %s:%02x.\n", |
133 | addr, i2cbus->adapter.name, i2cbus->slave_addr); | 134 | addr, i2cbus->adapter.name, dvo->slave_addr); |
134 | } | 135 | } |
135 | return false; | 136 | return false; |
136 | } | 137 | } |
@@ -138,10 +139,11 @@ static bool tfp410_readb(struct intel_dvo_device *dvo, int addr, uint8_t *ch) | |||
138 | static bool tfp410_writeb(struct intel_dvo_device *dvo, int addr, uint8_t ch) | 139 | static bool tfp410_writeb(struct intel_dvo_device *dvo, int addr, uint8_t ch) |
139 | { | 140 | { |
140 | struct tfp410_priv *tfp = dvo->dev_priv; | 141 | struct tfp410_priv *tfp = dvo->dev_priv; |
141 | struct intel_i2c_chan *i2cbus = dvo->i2c_bus; | 142 | struct i2c_adapter *adapter = dvo->i2c_bus; |
143 | struct intel_i2c_chan *i2cbus = container_of(adapter, struct intel_i2c_chan, adapter); | ||
142 | uint8_t out_buf[2]; | 144 | uint8_t out_buf[2]; |
143 | struct i2c_msg msg = { | 145 | struct i2c_msg msg = { |
144 | .addr = i2cbus->slave_addr, | 146 | .addr = dvo->slave_addr, |
145 | .flags = 0, | 147 | .flags = 0, |
146 | .len = 2, | 148 | .len = 2, |
147 | .buf = out_buf, | 149 | .buf = out_buf, |
@@ -155,7 +157,7 @@ static bool tfp410_writeb(struct intel_dvo_device *dvo, int addr, uint8_t ch) | |||
155 | 157 | ||
156 | if (!tfp->quiet) { | 158 | if (!tfp->quiet) { |
157 | DRM_DEBUG("Unable to write register 0x%02x to %s:%d.\n", | 159 | DRM_DEBUG("Unable to write register 0x%02x to %s:%d.\n", |
158 | addr, i2cbus->adapter.name, i2cbus->slave_addr); | 160 | addr, i2cbus->adapter.name, dvo->slave_addr); |
159 | } | 161 | } |
160 | 162 | ||
161 | return false; | 163 | return false; |
@@ -174,7 +176,7 @@ static int tfp410_getid(struct intel_dvo_device *dvo, int addr) | |||
174 | 176 | ||
175 | /* Ti TFP410 driver for chip on i2c bus */ | 177 | /* Ti TFP410 driver for chip on i2c bus */ |
176 | static bool tfp410_init(struct intel_dvo_device *dvo, | 178 | static bool tfp410_init(struct intel_dvo_device *dvo, |
177 | struct intel_i2c_chan *i2cbus) | 179 | struct i2c_adapter *adapter) |
178 | { | 180 | { |
179 | /* this will detect the tfp410 chip on the specified i2c bus */ | 181 | /* this will detect the tfp410 chip on the specified i2c bus */ |
180 | struct tfp410_priv *tfp; | 182 | struct tfp410_priv *tfp; |
@@ -184,20 +186,19 @@ static bool tfp410_init(struct intel_dvo_device *dvo, | |||
184 | if (tfp == NULL) | 186 | if (tfp == NULL) |
185 | return false; | 187 | return false; |
186 | 188 | ||
187 | dvo->i2c_bus = i2cbus; | 189 | dvo->i2c_bus = adapter; |
188 | dvo->i2c_bus->slave_addr = dvo->slave_addr; | ||
189 | dvo->dev_priv = tfp; | 190 | dvo->dev_priv = tfp; |
190 | tfp->quiet = true; | 191 | tfp->quiet = true; |
191 | 192 | ||
192 | if ((id = tfp410_getid(dvo, TFP410_VID_LO)) != TFP410_VID) { | 193 | if ((id = tfp410_getid(dvo, TFP410_VID_LO)) != TFP410_VID) { |
193 | DRM_DEBUG("tfp410 not detected got VID %X: from %s Slave %d.\n", | 194 | DRM_DEBUG("tfp410 not detected got VID %X: from %s Slave %d.\n", |
194 | id, i2cbus->adapter.name, i2cbus->slave_addr); | 195 | id, adapter->name, dvo->slave_addr); |
195 | goto out; | 196 | goto out; |
196 | } | 197 | } |
197 | 198 | ||
198 | if ((id = tfp410_getid(dvo, TFP410_DID_LO)) != TFP410_DID) { | 199 | if ((id = tfp410_getid(dvo, TFP410_DID_LO)) != TFP410_DID) { |
199 | DRM_DEBUG("tfp410 not detected got DID %X: from %s Slave %d.\n", | 200 | DRM_DEBUG("tfp410 not detected got DID %X: from %s Slave %d.\n", |
200 | id, i2cbus->adapter.name, i2cbus->slave_addr); | 201 | id, adapter->name, dvo->slave_addr); |
201 | goto out; | 202 | goto out; |
202 | } | 203 | } |
203 | tfp->quiet = false; | 204 | tfp->quiet = false; |
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c index f112c769d533..50d1f782768c 100644 --- a/drivers/gpu/drm/i915/i915_dma.c +++ b/drivers/gpu/drm/i915/i915_dma.c | |||
@@ -846,7 +846,7 @@ static int i915_set_status_page(struct drm_device *dev, void *data, | |||
846 | return 0; | 846 | return 0; |
847 | } | 847 | } |
848 | 848 | ||
849 | printk(KERN_DEBUG "set status page addr 0x%08x\n", (u32)hws->addr); | 849 | DRM_DEBUG("set status page addr 0x%08x\n", (u32)hws->addr); |
850 | 850 | ||
851 | dev_priv->status_gfx_addr = hws->addr & (0x1ffff<<12); | 851 | dev_priv->status_gfx_addr = hws->addr & (0x1ffff<<12); |
852 | 852 | ||
@@ -885,8 +885,8 @@ static int i915_set_status_page(struct drm_device *dev, void *data, | |||
885 | * some RAM for the framebuffer at early boot. This code figures out | 885 | * some RAM for the framebuffer at early boot. This code figures out |
886 | * how much was set aside so we can use it for our own purposes. | 886 | * how much was set aside so we can use it for our own purposes. |
887 | */ | 887 | */ |
888 | static int i915_probe_agp(struct drm_device *dev, unsigned long *aperture_size, | 888 | static int i915_probe_agp(struct drm_device *dev, uint32_t *aperture_size, |
889 | unsigned long *preallocated_size) | 889 | uint32_t *preallocated_size) |
890 | { | 890 | { |
891 | struct pci_dev *bridge_dev; | 891 | struct pci_dev *bridge_dev; |
892 | u16 tmp = 0; | 892 | u16 tmp = 0; |
@@ -984,10 +984,11 @@ static int i915_probe_agp(struct drm_device *dev, unsigned long *aperture_size, | |||
984 | return 0; | 984 | return 0; |
985 | } | 985 | } |
986 | 986 | ||
987 | static int i915_load_modeset_init(struct drm_device *dev) | 987 | static int i915_load_modeset_init(struct drm_device *dev, |
988 | unsigned long prealloc_size, | ||
989 | unsigned long agp_size) | ||
988 | { | 990 | { |
989 | struct drm_i915_private *dev_priv = dev->dev_private; | 991 | struct drm_i915_private *dev_priv = dev->dev_private; |
990 | unsigned long agp_size, prealloc_size; | ||
991 | int fb_bar = IS_I9XX(dev) ? 2 : 0; | 992 | int fb_bar = IS_I9XX(dev) ? 2 : 0; |
992 | int ret = 0; | 993 | int ret = 0; |
993 | 994 | ||
@@ -1002,10 +1003,6 @@ static int i915_load_modeset_init(struct drm_device *dev) | |||
1002 | if (IS_I965G(dev) || IS_G33(dev)) | 1003 | if (IS_I965G(dev) || IS_G33(dev)) |
1003 | dev_priv->cursor_needs_physical = false; | 1004 | dev_priv->cursor_needs_physical = false; |
1004 | 1005 | ||
1005 | ret = i915_probe_agp(dev, &agp_size, &prealloc_size); | ||
1006 | if (ret) | ||
1007 | goto out; | ||
1008 | |||
1009 | /* Basic memrange allocator for stolen space (aka vram) */ | 1006 | /* Basic memrange allocator for stolen space (aka vram) */ |
1010 | drm_mm_init(&dev_priv->vram, 0, prealloc_size); | 1007 | drm_mm_init(&dev_priv->vram, 0, prealloc_size); |
1011 | 1008 | ||
@@ -1082,6 +1079,44 @@ void i915_master_destroy(struct drm_device *dev, struct drm_master *master) | |||
1082 | master->driver_priv = NULL; | 1079 | master->driver_priv = NULL; |
1083 | } | 1080 | } |
1084 | 1081 | ||
1082 | static void i915_get_mem_freq(struct drm_device *dev) | ||
1083 | { | ||
1084 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
1085 | u32 tmp; | ||
1086 | |||
1087 | if (!IS_IGD(dev)) | ||
1088 | return; | ||
1089 | |||
1090 | tmp = I915_READ(CLKCFG); | ||
1091 | |||
1092 | switch (tmp & CLKCFG_FSB_MASK) { | ||
1093 | case CLKCFG_FSB_533: | ||
1094 | dev_priv->fsb_freq = 533; /* 133*4 */ | ||
1095 | break; | ||
1096 | case CLKCFG_FSB_800: | ||
1097 | dev_priv->fsb_freq = 800; /* 200*4 */ | ||
1098 | break; | ||
1099 | case CLKCFG_FSB_667: | ||
1100 | dev_priv->fsb_freq = 667; /* 167*4 */ | ||
1101 | break; | ||
1102 | case CLKCFG_FSB_400: | ||
1103 | dev_priv->fsb_freq = 400; /* 100*4 */ | ||
1104 | break; | ||
1105 | } | ||
1106 | |||
1107 | switch (tmp & CLKCFG_MEM_MASK) { | ||
1108 | case CLKCFG_MEM_533: | ||
1109 | dev_priv->mem_freq = 533; | ||
1110 | break; | ||
1111 | case CLKCFG_MEM_667: | ||
1112 | dev_priv->mem_freq = 667; | ||
1113 | break; | ||
1114 | case CLKCFG_MEM_800: | ||
1115 | dev_priv->mem_freq = 800; | ||
1116 | break; | ||
1117 | } | ||
1118 | } | ||
1119 | |||
1085 | /** | 1120 | /** |
1086 | * i915_driver_load - setup chip and create an initial config | 1121 | * i915_driver_load - setup chip and create an initial config |
1087 | * @dev: DRM device | 1122 | * @dev: DRM device |
@@ -1098,6 +1133,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) | |||
1098 | struct drm_i915_private *dev_priv = dev->dev_private; | 1133 | struct drm_i915_private *dev_priv = dev->dev_private; |
1099 | resource_size_t base, size; | 1134 | resource_size_t base, size; |
1100 | int ret = 0, mmio_bar = IS_I9XX(dev) ? 0 : 1; | 1135 | int ret = 0, mmio_bar = IS_I9XX(dev) ? 0 : 1; |
1136 | uint32_t agp_size, prealloc_size; | ||
1101 | 1137 | ||
1102 | /* i915 has 4 more counters */ | 1138 | /* i915 has 4 more counters */ |
1103 | dev->counters += 4; | 1139 | dev->counters += 4; |
@@ -1146,9 +1182,29 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) | |||
1146 | "performance may suffer.\n"); | 1182 | "performance may suffer.\n"); |
1147 | } | 1183 | } |
1148 | 1184 | ||
1185 | ret = i915_probe_agp(dev, &agp_size, &prealloc_size); | ||
1186 | if (ret) | ||
1187 | goto out_iomapfree; | ||
1188 | |||
1189 | dev_priv->wq = create_workqueue("i915"); | ||
1190 | if (dev_priv->wq == NULL) { | ||
1191 | DRM_ERROR("Failed to create our workqueue.\n"); | ||
1192 | ret = -ENOMEM; | ||
1193 | goto out_iomapfree; | ||
1194 | } | ||
1195 | |||
1149 | /* enable GEM by default */ | 1196 | /* enable GEM by default */ |
1150 | dev_priv->has_gem = 1; | 1197 | dev_priv->has_gem = 1; |
1151 | 1198 | ||
1199 | if (prealloc_size > agp_size * 3 / 4) { | ||
1200 | DRM_ERROR("Detected broken video BIOS with %d/%dkB of video " | ||
1201 | "memory stolen.\n", | ||
1202 | prealloc_size / 1024, agp_size / 1024); | ||
1203 | DRM_ERROR("Disabling GEM. (try reducing stolen memory or " | ||
1204 | "updating the BIOS to fix).\n"); | ||
1205 | dev_priv->has_gem = 0; | ||
1206 | } | ||
1207 | |||
1152 | dev->driver->get_vblank_counter = i915_get_vblank_counter; | 1208 | dev->driver->get_vblank_counter = i915_get_vblank_counter; |
1153 | dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */ | 1209 | dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */ |
1154 | if (IS_G4X(dev) || IS_IGDNG(dev)) { | 1210 | if (IS_G4X(dev) || IS_IGDNG(dev)) { |
@@ -1162,9 +1218,11 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) | |||
1162 | if (!I915_NEED_GFX_HWS(dev)) { | 1218 | if (!I915_NEED_GFX_HWS(dev)) { |
1163 | ret = i915_init_phys_hws(dev); | 1219 | ret = i915_init_phys_hws(dev); |
1164 | if (ret != 0) | 1220 | if (ret != 0) |
1165 | goto out_iomapfree; | 1221 | goto out_workqueue_free; |
1166 | } | 1222 | } |
1167 | 1223 | ||
1224 | i915_get_mem_freq(dev); | ||
1225 | |||
1168 | /* On the 945G/GM, the chipset reports the MSI capability on the | 1226 | /* On the 945G/GM, the chipset reports the MSI capability on the |
1169 | * integrated graphics even though the support isn't actually there | 1227 | * integrated graphics even though the support isn't actually there |
1170 | * according to the published specs. It doesn't appear to function | 1228 | * according to the published specs. It doesn't appear to function |
@@ -1180,6 +1238,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) | |||
1180 | pci_enable_msi(dev->pdev); | 1238 | pci_enable_msi(dev->pdev); |
1181 | 1239 | ||
1182 | spin_lock_init(&dev_priv->user_irq_lock); | 1240 | spin_lock_init(&dev_priv->user_irq_lock); |
1241 | spin_lock_init(&dev_priv->error_lock); | ||
1183 | dev_priv->user_irq_refcount = 0; | 1242 | dev_priv->user_irq_refcount = 0; |
1184 | 1243 | ||
1185 | ret = drm_vblank_init(dev, I915_NUM_PIPE); | 1244 | ret = drm_vblank_init(dev, I915_NUM_PIPE); |
@@ -1190,10 +1249,10 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) | |||
1190 | } | 1249 | } |
1191 | 1250 | ||
1192 | if (drm_core_check_feature(dev, DRIVER_MODESET)) { | 1251 | if (drm_core_check_feature(dev, DRIVER_MODESET)) { |
1193 | ret = i915_load_modeset_init(dev); | 1252 | ret = i915_load_modeset_init(dev, prealloc_size, agp_size); |
1194 | if (ret < 0) { | 1253 | if (ret < 0) { |
1195 | DRM_ERROR("failed to init modeset\n"); | 1254 | DRM_ERROR("failed to init modeset\n"); |
1196 | goto out_rmmap; | 1255 | goto out_workqueue_free; |
1197 | } | 1256 | } |
1198 | } | 1257 | } |
1199 | 1258 | ||
@@ -1204,6 +1263,8 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) | |||
1204 | 1263 | ||
1205 | return 0; | 1264 | return 0; |
1206 | 1265 | ||
1266 | out_workqueue_free: | ||
1267 | destroy_workqueue(dev_priv->wq); | ||
1207 | out_iomapfree: | 1268 | out_iomapfree: |
1208 | io_mapping_free(dev_priv->mm.gtt_mapping); | 1269 | io_mapping_free(dev_priv->mm.gtt_mapping); |
1209 | out_rmmap: | 1270 | out_rmmap: |
@@ -1217,6 +1278,8 @@ int i915_driver_unload(struct drm_device *dev) | |||
1217 | { | 1278 | { |
1218 | struct drm_i915_private *dev_priv = dev->dev_private; | 1279 | struct drm_i915_private *dev_priv = dev->dev_private; |
1219 | 1280 | ||
1281 | destroy_workqueue(dev_priv->wq); | ||
1282 | |||
1220 | io_mapping_free(dev_priv->mm.gtt_mapping); | 1283 | io_mapping_free(dev_priv->mm.gtt_mapping); |
1221 | if (dev_priv->mm.gtt_mtrr >= 0) { | 1284 | if (dev_priv->mm.gtt_mtrr >= 0) { |
1222 | mtrr_del(dev_priv->mm.gtt_mtrr, dev->agp->base, | 1285 | mtrr_del(dev_priv->mm.gtt_mtrr, dev->agp->base, |
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index 98560e1e899a..fc4b68aa2d05 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c | |||
@@ -35,6 +35,7 @@ | |||
35 | 35 | ||
36 | #include "drm_pciids.h" | 36 | #include "drm_pciids.h" |
37 | #include <linux/console.h> | 37 | #include <linux/console.h> |
38 | #include "drm_crtc_helper.h" | ||
38 | 39 | ||
39 | static unsigned int i915_modeset = -1; | 40 | static unsigned int i915_modeset = -1; |
40 | module_param_named(modeset, i915_modeset, int, 0400); | 41 | module_param_named(modeset, i915_modeset, int, 0400); |
@@ -57,8 +58,8 @@ static int i915_suspend(struct drm_device *dev, pm_message_t state) | |||
57 | struct drm_i915_private *dev_priv = dev->dev_private; | 58 | struct drm_i915_private *dev_priv = dev->dev_private; |
58 | 59 | ||
59 | if (!dev || !dev_priv) { | 60 | if (!dev || !dev_priv) { |
60 | printk(KERN_ERR "dev: %p, dev_priv: %p\n", dev, dev_priv); | 61 | DRM_ERROR("dev: %p, dev_priv: %p\n", dev, dev_priv); |
61 | printk(KERN_ERR "DRM not initialized, aborting suspend.\n"); | 62 | DRM_ERROR("DRM not initialized, aborting suspend.\n"); |
62 | return -ENODEV; | 63 | return -ENODEV; |
63 | } | 64 | } |
64 | 65 | ||
@@ -67,8 +68,6 @@ static int i915_suspend(struct drm_device *dev, pm_message_t state) | |||
67 | 68 | ||
68 | pci_save_state(dev->pdev); | 69 | pci_save_state(dev->pdev); |
69 | 70 | ||
70 | i915_save_state(dev); | ||
71 | |||
72 | /* If KMS is active, we do the leavevt stuff here */ | 71 | /* If KMS is active, we do the leavevt stuff here */ |
73 | if (drm_core_check_feature(dev, DRIVER_MODESET)) { | 72 | if (drm_core_check_feature(dev, DRIVER_MODESET)) { |
74 | if (i915_gem_idle(dev)) | 73 | if (i915_gem_idle(dev)) |
@@ -77,6 +76,8 @@ static int i915_suspend(struct drm_device *dev, pm_message_t state) | |||
77 | drm_irq_uninstall(dev); | 76 | drm_irq_uninstall(dev); |
78 | } | 77 | } |
79 | 78 | ||
79 | i915_save_state(dev); | ||
80 | |||
80 | intel_opregion_free(dev, 1); | 81 | intel_opregion_free(dev, 1); |
81 | 82 | ||
82 | if (state.event == PM_EVENT_SUSPEND) { | 83 | if (state.event == PM_EVENT_SUSPEND) { |
@@ -115,6 +116,10 @@ static int i915_resume(struct drm_device *dev) | |||
115 | 116 | ||
116 | drm_irq_install(dev); | 117 | drm_irq_install(dev); |
117 | } | 118 | } |
119 | if (drm_core_check_feature(dev, DRIVER_MODESET)) { | ||
120 | /* Resume the modeset for every activated CRTC */ | ||
121 | drm_helper_resume_force_mode(dev); | ||
122 | } | ||
118 | 123 | ||
119 | return ret; | 124 | return ret; |
120 | } | 125 | } |
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 7a84f04e8439..7537f57d8a87 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h | |||
@@ -133,6 +133,22 @@ struct sdvo_device_mapping { | |||
133 | u8 initialized; | 133 | u8 initialized; |
134 | }; | 134 | }; |
135 | 135 | ||
136 | struct drm_i915_error_state { | ||
137 | u32 eir; | ||
138 | u32 pgtbl_er; | ||
139 | u32 pipeastat; | ||
140 | u32 pipebstat; | ||
141 | u32 ipeir; | ||
142 | u32 ipehr; | ||
143 | u32 instdone; | ||
144 | u32 acthd; | ||
145 | u32 instpm; | ||
146 | u32 instps; | ||
147 | u32 instdone1; | ||
148 | u32 seqno; | ||
149 | struct timeval time; | ||
150 | }; | ||
151 | |||
136 | typedef struct drm_i915_private { | 152 | typedef struct drm_i915_private { |
137 | struct drm_device *dev; | 153 | struct drm_device *dev; |
138 | 154 | ||
@@ -203,12 +219,20 @@ typedef struct drm_i915_private { | |||
203 | unsigned int lvds_vbt:1; | 219 | unsigned int lvds_vbt:1; |
204 | unsigned int int_crt_support:1; | 220 | unsigned int int_crt_support:1; |
205 | unsigned int lvds_use_ssc:1; | 221 | unsigned int lvds_use_ssc:1; |
222 | unsigned int edp_support:1; | ||
206 | int lvds_ssc_freq; | 223 | int lvds_ssc_freq; |
207 | 224 | ||
208 | struct drm_i915_fence_reg fence_regs[16]; /* assume 965 */ | 225 | struct drm_i915_fence_reg fence_regs[16]; /* assume 965 */ |
209 | int fence_reg_start; /* 4 if userland hasn't ioctl'd us yet */ | 226 | int fence_reg_start; /* 4 if userland hasn't ioctl'd us yet */ |
210 | int num_fence_regs; /* 8 on pre-965, 16 otherwise */ | 227 | int num_fence_regs; /* 8 on pre-965, 16 otherwise */ |
211 | 228 | ||
229 | unsigned int fsb_freq, mem_freq; | ||
230 | |||
231 | spinlock_t error_lock; | ||
232 | struct drm_i915_error_state *first_error; | ||
233 | struct work_struct error_work; | ||
234 | struct workqueue_struct *wq; | ||
235 | |||
212 | /* Register state */ | 236 | /* Register state */ |
213 | u8 saveLBB; | 237 | u8 saveLBB; |
214 | u32 saveDSPACNTR; | 238 | u32 saveDSPACNTR; |
@@ -306,6 +330,17 @@ typedef struct drm_i915_private { | |||
306 | u32 saveCURBPOS; | 330 | u32 saveCURBPOS; |
307 | u32 saveCURBBASE; | 331 | u32 saveCURBBASE; |
308 | u32 saveCURSIZE; | 332 | u32 saveCURSIZE; |
333 | u32 saveDP_B; | ||
334 | u32 saveDP_C; | ||
335 | u32 saveDP_D; | ||
336 | u32 savePIPEA_GMCH_DATA_M; | ||
337 | u32 savePIPEB_GMCH_DATA_M; | ||
338 | u32 savePIPEA_GMCH_DATA_N; | ||
339 | u32 savePIPEB_GMCH_DATA_N; | ||
340 | u32 savePIPEA_DP_LINK_M; | ||
341 | u32 savePIPEB_DP_LINK_M; | ||
342 | u32 savePIPEA_DP_LINK_N; | ||
343 | u32 savePIPEB_DP_LINK_N; | ||
309 | 344 | ||
310 | struct { | 345 | struct { |
311 | struct drm_mm gtt_space; | 346 | struct drm_mm gtt_space; |
@@ -457,9 +492,6 @@ struct drm_i915_gem_object { | |||
457 | */ | 492 | */ |
458 | int fence_reg; | 493 | int fence_reg; |
459 | 494 | ||
460 | /** Boolean whether this object has a valid gtt offset. */ | ||
461 | int gtt_bound; | ||
462 | |||
463 | /** How many users have pinned this object in GTT space */ | 495 | /** How many users have pinned this object in GTT space */ |
464 | int pin_count; | 496 | int pin_count; |
465 | 497 | ||
@@ -644,6 +676,7 @@ void i915_gem_free_object(struct drm_gem_object *obj); | |||
644 | int i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment); | 676 | int i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment); |
645 | void i915_gem_object_unpin(struct drm_gem_object *obj); | 677 | void i915_gem_object_unpin(struct drm_gem_object *obj); |
646 | int i915_gem_object_unbind(struct drm_gem_object *obj); | 678 | int i915_gem_object_unbind(struct drm_gem_object *obj); |
679 | void i915_gem_release_mmap(struct drm_gem_object *obj); | ||
647 | void i915_gem_lastclose(struct drm_device *dev); | 680 | void i915_gem_lastclose(struct drm_device *dev); |
648 | uint32_t i915_get_gem_seqno(struct drm_device *dev); | 681 | uint32_t i915_get_gem_seqno(struct drm_device *dev); |
649 | int i915_gem_object_get_fence_reg(struct drm_gem_object *obj); | 682 | int i915_gem_object_get_fence_reg(struct drm_gem_object *obj); |
@@ -857,7 +890,11 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller); | |||
857 | #define HAS_128_BYTE_Y_TILING(dev) (IS_I9XX(dev) && !(IS_I915G(dev) || \ | 890 | #define HAS_128_BYTE_Y_TILING(dev) (IS_I9XX(dev) && !(IS_I915G(dev) || \ |
858 | IS_I915GM(dev))) | 891 | IS_I915GM(dev))) |
859 | #define SUPPORTS_INTEGRATED_HDMI(dev) (IS_G4X(dev) || IS_IGDNG(dev)) | 892 | #define SUPPORTS_INTEGRATED_HDMI(dev) (IS_G4X(dev) || IS_IGDNG(dev)) |
893 | #define SUPPORTS_INTEGRATED_DP(dev) (IS_G4X(dev) || IS_IGDNG(dev)) | ||
894 | #define SUPPORTS_EDP(dev) (IS_IGDNG_M(dev)) | ||
860 | #define I915_HAS_HOTPLUG(dev) (IS_I945G(dev) || IS_I945GM(dev) || IS_I965G(dev)) | 895 | #define I915_HAS_HOTPLUG(dev) (IS_I945G(dev) || IS_I945GM(dev) || IS_I965G(dev)) |
896 | /* dsparb controlled by hw only */ | ||
897 | #define DSPARB_HWCONTROL(dev) (IS_G4X(dev) || IS_IGDNG(dev)) | ||
861 | 898 | ||
862 | #define PRIMARY_RINGBUFFER_SIZE (128*1024) | 899 | #define PRIMARY_RINGBUFFER_SIZE (128*1024) |
863 | 900 | ||
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index fd2b8bdffe3f..140bee142fc2 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c | |||
@@ -1006,7 +1006,7 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data, | |||
1006 | 1006 | ||
1007 | mutex_lock(&dev->struct_mutex); | 1007 | mutex_lock(&dev->struct_mutex); |
1008 | #if WATCH_BUF | 1008 | #if WATCH_BUF |
1009 | DRM_INFO("set_domain_ioctl %p(%d), %08x %08x\n", | 1009 | DRM_INFO("set_domain_ioctl %p(%zd), %08x %08x\n", |
1010 | obj, obj->size, read_domains, write_domain); | 1010 | obj, obj->size, read_domains, write_domain); |
1011 | #endif | 1011 | #endif |
1012 | if (read_domains & I915_GEM_DOMAIN_GTT) { | 1012 | if (read_domains & I915_GEM_DOMAIN_GTT) { |
@@ -1050,7 +1050,7 @@ i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data, | |||
1050 | } | 1050 | } |
1051 | 1051 | ||
1052 | #if WATCH_BUF | 1052 | #if WATCH_BUF |
1053 | DRM_INFO("%s: sw_finish %d (%p %d)\n", | 1053 | DRM_INFO("%s: sw_finish %d (%p %zd)\n", |
1054 | __func__, args->handle, obj, obj->size); | 1054 | __func__, args->handle, obj, obj->size); |
1055 | #endif | 1055 | #endif |
1056 | obj_priv = obj->driver_private; | 1056 | obj_priv = obj->driver_private; |
@@ -1252,6 +1252,31 @@ out_free_list: | |||
1252 | return ret; | 1252 | return ret; |
1253 | } | 1253 | } |
1254 | 1254 | ||
1255 | /** | ||
1256 | * i915_gem_release_mmap - remove physical page mappings | ||
1257 | * @obj: obj in question | ||
1258 | * | ||
1259 | * Preserve the reservation of the mmaping with the DRM core code, but | ||
1260 | * relinquish ownership of the pages back to the system. | ||
1261 | * | ||
1262 | * It is vital that we remove the page mapping if we have mapped a tiled | ||
1263 | * object through the GTT and then lose the fence register due to | ||
1264 | * resource pressure. Similarly if the object has been moved out of the | ||
1265 | * aperture, than pages mapped into userspace must be revoked. Removing the | ||
1266 | * mapping will then trigger a page fault on the next user access, allowing | ||
1267 | * fixup by i915_gem_fault(). | ||
1268 | */ | ||
1269 | void | ||
1270 | i915_gem_release_mmap(struct drm_gem_object *obj) | ||
1271 | { | ||
1272 | struct drm_device *dev = obj->dev; | ||
1273 | struct drm_i915_gem_object *obj_priv = obj->driver_private; | ||
1274 | |||
1275 | if (dev->dev_mapping) | ||
1276 | unmap_mapping_range(dev->dev_mapping, | ||
1277 | obj_priv->mmap_offset, obj->size, 1); | ||
1278 | } | ||
1279 | |||
1255 | static void | 1280 | static void |
1256 | i915_gem_free_mmap_offset(struct drm_gem_object *obj) | 1281 | i915_gem_free_mmap_offset(struct drm_gem_object *obj) |
1257 | { | 1282 | { |
@@ -1545,7 +1570,7 @@ i915_add_request(struct drm_device *dev, struct drm_file *file_priv, | |||
1545 | } | 1570 | } |
1546 | 1571 | ||
1547 | if (was_empty && !dev_priv->mm.suspended) | 1572 | if (was_empty && !dev_priv->mm.suspended) |
1548 | schedule_delayed_work(&dev_priv->mm.retire_work, HZ); | 1573 | queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, HZ); |
1549 | return seqno; | 1574 | return seqno; |
1550 | } | 1575 | } |
1551 | 1576 | ||
@@ -1694,7 +1719,7 @@ i915_gem_retire_work_handler(struct work_struct *work) | |||
1694 | i915_gem_retire_requests(dev); | 1719 | i915_gem_retire_requests(dev); |
1695 | if (!dev_priv->mm.suspended && | 1720 | if (!dev_priv->mm.suspended && |
1696 | !list_empty(&dev_priv->mm.request_list)) | 1721 | !list_empty(&dev_priv->mm.request_list)) |
1697 | schedule_delayed_work(&dev_priv->mm.retire_work, HZ); | 1722 | queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, HZ); |
1698 | mutex_unlock(&dev->struct_mutex); | 1723 | mutex_unlock(&dev->struct_mutex); |
1699 | } | 1724 | } |
1700 | 1725 | ||
@@ -1861,7 +1886,6 @@ i915_gem_object_unbind(struct drm_gem_object *obj) | |||
1861 | { | 1886 | { |
1862 | struct drm_device *dev = obj->dev; | 1887 | struct drm_device *dev = obj->dev; |
1863 | struct drm_i915_gem_object *obj_priv = obj->driver_private; | 1888 | struct drm_i915_gem_object *obj_priv = obj->driver_private; |
1864 | loff_t offset; | ||
1865 | int ret = 0; | 1889 | int ret = 0; |
1866 | 1890 | ||
1867 | #if WATCH_BUF | 1891 | #if WATCH_BUF |
@@ -1898,9 +1922,7 @@ i915_gem_object_unbind(struct drm_gem_object *obj) | |||
1898 | BUG_ON(obj_priv->active); | 1922 | BUG_ON(obj_priv->active); |
1899 | 1923 | ||
1900 | /* blow away mappings if mapped through GTT */ | 1924 | /* blow away mappings if mapped through GTT */ |
1901 | offset = ((loff_t) obj->map_list.hash.key) << PAGE_SHIFT; | 1925 | i915_gem_release_mmap(obj); |
1902 | if (dev->dev_mapping) | ||
1903 | unmap_mapping_range(dev->dev_mapping, offset, obj->size, 1); | ||
1904 | 1926 | ||
1905 | if (obj_priv->fence_reg != I915_FENCE_REG_NONE) | 1927 | if (obj_priv->fence_reg != I915_FENCE_REG_NONE) |
1906 | i915_gem_clear_fence_reg(obj); | 1928 | i915_gem_clear_fence_reg(obj); |
@@ -2222,7 +2244,6 @@ try_again: | |||
2222 | /* None available, try to steal one or wait for a user to finish */ | 2244 | /* None available, try to steal one or wait for a user to finish */ |
2223 | if (i == dev_priv->num_fence_regs) { | 2245 | if (i == dev_priv->num_fence_regs) { |
2224 | uint32_t seqno = dev_priv->mm.next_gem_seqno; | 2246 | uint32_t seqno = dev_priv->mm.next_gem_seqno; |
2225 | loff_t offset; | ||
2226 | 2247 | ||
2227 | if (avail == 0) | 2248 | if (avail == 0) |
2228 | return -ENOSPC; | 2249 | return -ENOSPC; |
@@ -2274,10 +2295,7 @@ try_again: | |||
2274 | * Zap this virtual mapping so we can set up a fence again | 2295 | * Zap this virtual mapping so we can set up a fence again |
2275 | * for this object next time we need it. | 2296 | * for this object next time we need it. |
2276 | */ | 2297 | */ |
2277 | offset = ((loff_t) reg->obj->map_list.hash.key) << PAGE_SHIFT; | 2298 | i915_gem_release_mmap(reg->obj); |
2278 | if (dev->dev_mapping) | ||
2279 | unmap_mapping_range(dev->dev_mapping, offset, | ||
2280 | reg->obj->size, 1); | ||
2281 | old_obj_priv->fence_reg = I915_FENCE_REG_NONE; | 2299 | old_obj_priv->fence_reg = I915_FENCE_REG_NONE; |
2282 | } | 2300 | } |
2283 | 2301 | ||
@@ -2423,7 +2441,7 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment) | |||
2423 | } | 2441 | } |
2424 | 2442 | ||
2425 | #if WATCH_BUF | 2443 | #if WATCH_BUF |
2426 | DRM_INFO("Binding object of size %d at 0x%08x\n", | 2444 | DRM_INFO("Binding object of size %zd at 0x%08x\n", |
2427 | obj->size, obj_priv->gtt_offset); | 2445 | obj->size, obj_priv->gtt_offset); |
2428 | #endif | 2446 | #endif |
2429 | ret = i915_gem_object_get_pages(obj); | 2447 | ret = i915_gem_object_get_pages(obj); |
@@ -4227,6 +4245,7 @@ i915_gem_lastclose(struct drm_device *dev) | |||
4227 | void | 4245 | void |
4228 | i915_gem_load(struct drm_device *dev) | 4246 | i915_gem_load(struct drm_device *dev) |
4229 | { | 4247 | { |
4248 | int i; | ||
4230 | drm_i915_private_t *dev_priv = dev->dev_private; | 4249 | drm_i915_private_t *dev_priv = dev->dev_private; |
4231 | 4250 | ||
4232 | spin_lock_init(&dev_priv->mm.active_list_lock); | 4251 | spin_lock_init(&dev_priv->mm.active_list_lock); |
@@ -4246,6 +4265,18 @@ i915_gem_load(struct drm_device *dev) | |||
4246 | else | 4265 | else |
4247 | dev_priv->num_fence_regs = 8; | 4266 | dev_priv->num_fence_regs = 8; |
4248 | 4267 | ||
4268 | /* Initialize fence registers to zero */ | ||
4269 | if (IS_I965G(dev)) { | ||
4270 | for (i = 0; i < 16; i++) | ||
4271 | I915_WRITE64(FENCE_REG_965_0 + (i * 8), 0); | ||
4272 | } else { | ||
4273 | for (i = 0; i < 8; i++) | ||
4274 | I915_WRITE(FENCE_REG_830_0 + (i * 4), 0); | ||
4275 | if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) | ||
4276 | for (i = 0; i < 8; i++) | ||
4277 | I915_WRITE(FENCE_REG_945_8 + (i * 4), 0); | ||
4278 | } | ||
4279 | |||
4249 | i915_gem_detect_bit_6_swizzle(dev); | 4280 | i915_gem_detect_bit_6_swizzle(dev); |
4250 | } | 4281 | } |
4251 | 4282 | ||
diff --git a/drivers/gpu/drm/i915/i915_gem_debug.c b/drivers/gpu/drm/i915/i915_gem_debug.c index 8d0b943e2c5a..e602614bd3f8 100644 --- a/drivers/gpu/drm/i915/i915_gem_debug.c +++ b/drivers/gpu/drm/i915/i915_gem_debug.c | |||
@@ -87,7 +87,7 @@ i915_gem_dump_object(struct drm_gem_object *obj, int len, | |||
87 | chunk_len = page_len - chunk; | 87 | chunk_len = page_len - chunk; |
88 | if (chunk_len > 128) | 88 | if (chunk_len > 128) |
89 | chunk_len = 128; | 89 | chunk_len = 128; |
90 | i915_gem_dump_page(obj_priv->page_list[page], | 90 | i915_gem_dump_page(obj_priv->pages[page], |
91 | chunk, chunk + chunk_len, | 91 | chunk, chunk + chunk_len, |
92 | obj_priv->gtt_offset + | 92 | obj_priv->gtt_offset + |
93 | page * PAGE_SIZE, | 93 | page * PAGE_SIZE, |
@@ -143,7 +143,7 @@ i915_gem_object_check_coherency(struct drm_gem_object *obj, int handle) | |||
143 | uint32_t *backing_map = NULL; | 143 | uint32_t *backing_map = NULL; |
144 | int bad_count = 0; | 144 | int bad_count = 0; |
145 | 145 | ||
146 | DRM_INFO("%s: checking coherency of object %p@0x%08x (%d, %dkb):\n", | 146 | DRM_INFO("%s: checking coherency of object %p@0x%08x (%d, %zdkb):\n", |
147 | __func__, obj, obj_priv->gtt_offset, handle, | 147 | __func__, obj, obj_priv->gtt_offset, handle, |
148 | obj->size / 1024); | 148 | obj->size / 1024); |
149 | 149 | ||
@@ -157,7 +157,7 @@ i915_gem_object_check_coherency(struct drm_gem_object *obj, int handle) | |||
157 | for (page = 0; page < obj->size / PAGE_SIZE; page++) { | 157 | for (page = 0; page < obj->size / PAGE_SIZE; page++) { |
158 | int i; | 158 | int i; |
159 | 159 | ||
160 | backing_map = kmap_atomic(obj_priv->page_list[page], KM_USER0); | 160 | backing_map = kmap_atomic(obj_priv->pages[page], KM_USER0); |
161 | 161 | ||
162 | if (backing_map == NULL) { | 162 | if (backing_map == NULL) { |
163 | DRM_ERROR("failed to map backing page\n"); | 163 | DRM_ERROR("failed to map backing page\n"); |
diff --git a/drivers/gpu/drm/i915/i915_gem_debugfs.c b/drivers/gpu/drm/i915/i915_gem_debugfs.c index 28146e405e87..cb3b97405fbf 100644 --- a/drivers/gpu/drm/i915/i915_gem_debugfs.c +++ b/drivers/gpu/drm/i915/i915_gem_debugfs.c | |||
@@ -75,11 +75,10 @@ static int i915_gem_object_list_info(struct seq_file *m, void *data) | |||
75 | case ACTIVE_LIST: | 75 | case ACTIVE_LIST: |
76 | seq_printf(m, "Active:\n"); | 76 | seq_printf(m, "Active:\n"); |
77 | lock = &dev_priv->mm.active_list_lock; | 77 | lock = &dev_priv->mm.active_list_lock; |
78 | spin_lock(lock); | ||
79 | head = &dev_priv->mm.active_list; | 78 | head = &dev_priv->mm.active_list; |
80 | break; | 79 | break; |
81 | case INACTIVE_LIST: | 80 | case INACTIVE_LIST: |
82 | seq_printf(m, "Inctive:\n"); | 81 | seq_printf(m, "Inactive:\n"); |
83 | head = &dev_priv->mm.inactive_list; | 82 | head = &dev_priv->mm.inactive_list; |
84 | break; | 83 | break; |
85 | case FLUSHING_LIST: | 84 | case FLUSHING_LIST: |
@@ -91,6 +90,8 @@ static int i915_gem_object_list_info(struct seq_file *m, void *data) | |||
91 | return 0; | 90 | return 0; |
92 | } | 91 | } |
93 | 92 | ||
93 | if (lock) | ||
94 | spin_lock(lock); | ||
94 | list_for_each_entry(obj_priv, head, list) | 95 | list_for_each_entry(obj_priv, head, list) |
95 | { | 96 | { |
96 | struct drm_gem_object *obj = obj_priv->obj; | 97 | struct drm_gem_object *obj = obj_priv->obj; |
@@ -104,7 +105,10 @@ static int i915_gem_object_list_info(struct seq_file *m, void *data) | |||
104 | if (obj->name) | 105 | if (obj->name) |
105 | seq_printf(m, " (name: %d)", obj->name); | 106 | seq_printf(m, " (name: %d)", obj->name); |
106 | if (obj_priv->fence_reg != I915_FENCE_REG_NONE) | 107 | if (obj_priv->fence_reg != I915_FENCE_REG_NONE) |
107 | seq_printf(m, " (fence: %d)\n", obj_priv->fence_reg); | 108 | seq_printf(m, " (fence: %d)", obj_priv->fence_reg); |
109 | if (obj_priv->gtt_space != NULL) | ||
110 | seq_printf(m, " (gtt_offset: %08x)", obj_priv->gtt_offset); | ||
111 | |||
108 | seq_printf(m, "\n"); | 112 | seq_printf(m, "\n"); |
109 | } | 113 | } |
110 | 114 | ||
@@ -323,6 +327,41 @@ static int i915_ringbuffer_info(struct seq_file *m, void *data) | |||
323 | return 0; | 327 | return 0; |
324 | } | 328 | } |
325 | 329 | ||
330 | static int i915_error_state(struct seq_file *m, void *unused) | ||
331 | { | ||
332 | struct drm_info_node *node = (struct drm_info_node *) m->private; | ||
333 | struct drm_device *dev = node->minor->dev; | ||
334 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
335 | struct drm_i915_error_state *error; | ||
336 | unsigned long flags; | ||
337 | |||
338 | spin_lock_irqsave(&dev_priv->error_lock, flags); | ||
339 | if (!dev_priv->first_error) { | ||
340 | seq_printf(m, "no error state collected\n"); | ||
341 | goto out; | ||
342 | } | ||
343 | |||
344 | error = dev_priv->first_error; | ||
345 | |||
346 | seq_printf(m, "Time: %ld s %ld us\n", error->time.tv_sec, | ||
347 | error->time.tv_usec); | ||
348 | seq_printf(m, "EIR: 0x%08x\n", error->eir); | ||
349 | seq_printf(m, " PGTBL_ER: 0x%08x\n", error->pgtbl_er); | ||
350 | seq_printf(m, " INSTPM: 0x%08x\n", error->instpm); | ||
351 | seq_printf(m, " IPEIR: 0x%08x\n", error->ipeir); | ||
352 | seq_printf(m, " IPEHR: 0x%08x\n", error->ipehr); | ||
353 | seq_printf(m, " INSTDONE: 0x%08x\n", error->instdone); | ||
354 | seq_printf(m, " ACTHD: 0x%08x\n", error->acthd); | ||
355 | if (IS_I965G(dev)) { | ||
356 | seq_printf(m, " INSTPS: 0x%08x\n", error->instps); | ||
357 | seq_printf(m, " INSTDONE1: 0x%08x\n", error->instdone1); | ||
358 | } | ||
359 | |||
360 | out: | ||
361 | spin_unlock_irqrestore(&dev_priv->error_lock, flags); | ||
362 | |||
363 | return 0; | ||
364 | } | ||
326 | 365 | ||
327 | static struct drm_info_list i915_gem_debugfs_list[] = { | 366 | static struct drm_info_list i915_gem_debugfs_list[] = { |
328 | {"i915_gem_active", i915_gem_object_list_info, 0, (void *) ACTIVE_LIST}, | 367 | {"i915_gem_active", i915_gem_object_list_info, 0, (void *) ACTIVE_LIST}, |
@@ -336,6 +375,7 @@ static struct drm_info_list i915_gem_debugfs_list[] = { | |||
336 | {"i915_ringbuffer_data", i915_ringbuffer_data, 0}, | 375 | {"i915_ringbuffer_data", i915_ringbuffer_data, 0}, |
337 | {"i915_ringbuffer_info", i915_ringbuffer_info, 0}, | 376 | {"i915_ringbuffer_info", i915_ringbuffer_info, 0}, |
338 | {"i915_batchbuffers", i915_batchbuffer_info, 0}, | 377 | {"i915_batchbuffers", i915_batchbuffer_info, 0}, |
378 | {"i915_error_state", i915_error_state, 0}, | ||
339 | }; | 379 | }; |
340 | #define I915_GEM_DEBUGFS_ENTRIES ARRAY_SIZE(i915_gem_debugfs_list) | 380 | #define I915_GEM_DEBUGFS_ENTRIES ARRAY_SIZE(i915_gem_debugfs_list) |
341 | 381 | ||
diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c index 5c1ceec49f5b..a2d527b22ec4 100644 --- a/drivers/gpu/drm/i915/i915_gem_tiling.c +++ b/drivers/gpu/drm/i915/i915_gem_tiling.c | |||
@@ -114,11 +114,13 @@ intel_alloc_mchbar_resource(struct drm_device *dev) | |||
114 | mchbar_addr = ((u64)temp_hi << 32) | temp_lo; | 114 | mchbar_addr = ((u64)temp_hi << 32) | temp_lo; |
115 | 115 | ||
116 | /* If ACPI doesn't have it, assume we need to allocate it ourselves */ | 116 | /* If ACPI doesn't have it, assume we need to allocate it ourselves */ |
117 | #ifdef CONFIG_PNP | ||
117 | if (mchbar_addr && | 118 | if (mchbar_addr && |
118 | pnp_range_reserved(mchbar_addr, mchbar_addr + MCHBAR_SIZE)) { | 119 | pnp_range_reserved(mchbar_addr, mchbar_addr + MCHBAR_SIZE)) { |
119 | ret = 0; | 120 | ret = 0; |
120 | goto out_put; | 121 | goto out_put; |
121 | } | 122 | } |
123 | #endif | ||
122 | 124 | ||
123 | /* Get some space for it */ | 125 | /* Get some space for it */ |
124 | ret = pci_bus_alloc_resource(bridge_dev->bus, &dev_priv->mch_res, | 126 | ret = pci_bus_alloc_resource(bridge_dev->bus, &dev_priv->mch_res, |
@@ -519,6 +521,12 @@ i915_gem_set_tiling(struct drm_device *dev, void *data, | |||
519 | goto err; | 521 | goto err; |
520 | } | 522 | } |
521 | 523 | ||
524 | /* If we've changed tiling, GTT-mappings of the object | ||
525 | * need to re-fault to ensure that the correct fence register | ||
526 | * setup is in place. | ||
527 | */ | ||
528 | i915_gem_release_mmap(obj); | ||
529 | |||
522 | obj_priv->tiling_mode = args->tiling_mode; | 530 | obj_priv->tiling_mode = args->tiling_mode; |
523 | obj_priv->stride = args->stride; | 531 | obj_priv->stride = args->stride; |
524 | } | 532 | } |
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index b86b7b7130c6..83aee80e77a6 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c | |||
@@ -26,6 +26,7 @@ | |||
26 | * | 26 | * |
27 | */ | 27 | */ |
28 | 28 | ||
29 | #include <linux/sysrq.h> | ||
29 | #include "drmP.h" | 30 | #include "drmP.h" |
30 | #include "drm.h" | 31 | #include "drm.h" |
31 | #include "i915_drm.h" | 32 | #include "i915_drm.h" |
@@ -41,9 +42,10 @@ | |||
41 | * we leave them always unmasked in IMR and then control enabling them through | 42 | * we leave them always unmasked in IMR and then control enabling them through |
42 | * PIPESTAT alone. | 43 | * PIPESTAT alone. |
43 | */ | 44 | */ |
44 | #define I915_INTERRUPT_ENABLE_FIX (I915_ASLE_INTERRUPT | \ | 45 | #define I915_INTERRUPT_ENABLE_FIX (I915_ASLE_INTERRUPT | \ |
45 | I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | \ | 46 | I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | \ |
46 | I915_DISPLAY_PIPE_B_EVENT_INTERRUPT) | 47 | I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | \ |
48 | I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT) | ||
47 | 49 | ||
48 | /** Interrupts that we mask and unmask at runtime. */ | 50 | /** Interrupts that we mask and unmask at runtime. */ |
49 | #define I915_INTERRUPT_ENABLE_VAR (I915_USER_INTERRUPT) | 51 | #define I915_INTERRUPT_ENABLE_VAR (I915_USER_INTERRUPT) |
@@ -232,7 +234,17 @@ static void i915_hotplug_work_func(struct work_struct *work) | |||
232 | drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t, | 234 | drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t, |
233 | hotplug_work); | 235 | hotplug_work); |
234 | struct drm_device *dev = dev_priv->dev; | 236 | struct drm_device *dev = dev_priv->dev; |
235 | 237 | struct drm_mode_config *mode_config = &dev->mode_config; | |
238 | struct drm_connector *connector; | ||
239 | |||
240 | if (mode_config->num_connector) { | ||
241 | list_for_each_entry(connector, &mode_config->connector_list, head) { | ||
242 | struct intel_output *intel_output = to_intel_output(connector); | ||
243 | |||
244 | if (intel_output->hot_plug) | ||
245 | (*intel_output->hot_plug) (intel_output); | ||
246 | } | ||
247 | } | ||
236 | /* Just fire off a uevent and let userspace tell us what to do */ | 248 | /* Just fire off a uevent and let userspace tell us what to do */ |
237 | drm_sysfs_hotplug_event(dev); | 249 | drm_sysfs_hotplug_event(dev); |
238 | } | 250 | } |
@@ -278,6 +290,201 @@ irqreturn_t igdng_irq_handler(struct drm_device *dev) | |||
278 | return ret; | 290 | return ret; |
279 | } | 291 | } |
280 | 292 | ||
293 | /** | ||
294 | * i915_error_work_func - do process context error handling work | ||
295 | * @work: work struct | ||
296 | * | ||
297 | * Fire an error uevent so userspace can see that a hang or error | ||
298 | * was detected. | ||
299 | */ | ||
300 | static void i915_error_work_func(struct work_struct *work) | ||
301 | { | ||
302 | drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t, | ||
303 | error_work); | ||
304 | struct drm_device *dev = dev_priv->dev; | ||
305 | char *event_string = "ERROR=1"; | ||
306 | char *envp[] = { event_string, NULL }; | ||
307 | |||
308 | DRM_DEBUG("generating error event\n"); | ||
309 | |||
310 | kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, envp); | ||
311 | } | ||
312 | |||
313 | /** | ||
314 | * i915_capture_error_state - capture an error record for later analysis | ||
315 | * @dev: drm device | ||
316 | * | ||
317 | * Should be called when an error is detected (either a hang or an error | ||
318 | * interrupt) to capture error state from the time of the error. Fills | ||
319 | * out a structure which becomes available in debugfs for user level tools | ||
320 | * to pick up. | ||
321 | */ | ||
322 | static void i915_capture_error_state(struct drm_device *dev) | ||
323 | { | ||
324 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
325 | struct drm_i915_error_state *error; | ||
326 | unsigned long flags; | ||
327 | |||
328 | spin_lock_irqsave(&dev_priv->error_lock, flags); | ||
329 | if (dev_priv->first_error) | ||
330 | goto out; | ||
331 | |||
332 | error = kmalloc(sizeof(*error), GFP_ATOMIC); | ||
333 | if (!error) { | ||
334 | DRM_DEBUG("out ot memory, not capturing error state\n"); | ||
335 | goto out; | ||
336 | } | ||
337 | |||
338 | error->eir = I915_READ(EIR); | ||
339 | error->pgtbl_er = I915_READ(PGTBL_ER); | ||
340 | error->pipeastat = I915_READ(PIPEASTAT); | ||
341 | error->pipebstat = I915_READ(PIPEBSTAT); | ||
342 | error->instpm = I915_READ(INSTPM); | ||
343 | if (!IS_I965G(dev)) { | ||
344 | error->ipeir = I915_READ(IPEIR); | ||
345 | error->ipehr = I915_READ(IPEHR); | ||
346 | error->instdone = I915_READ(INSTDONE); | ||
347 | error->acthd = I915_READ(ACTHD); | ||
348 | } else { | ||
349 | error->ipeir = I915_READ(IPEIR_I965); | ||
350 | error->ipehr = I915_READ(IPEHR_I965); | ||
351 | error->instdone = I915_READ(INSTDONE_I965); | ||
352 | error->instps = I915_READ(INSTPS); | ||
353 | error->instdone1 = I915_READ(INSTDONE1); | ||
354 | error->acthd = I915_READ(ACTHD_I965); | ||
355 | } | ||
356 | |||
357 | do_gettimeofday(&error->time); | ||
358 | |||
359 | dev_priv->first_error = error; | ||
360 | |||
361 | out: | ||
362 | spin_unlock_irqrestore(&dev_priv->error_lock, flags); | ||
363 | } | ||
364 | |||
365 | /** | ||
366 | * i915_handle_error - handle an error interrupt | ||
367 | * @dev: drm device | ||
368 | * | ||
369 | * Do some basic checking of regsiter state at error interrupt time and | ||
370 | * dump it to the syslog. Also call i915_capture_error_state() to make | ||
371 | * sure we get a record and make it available in debugfs. Fire a uevent | ||
372 | * so userspace knows something bad happened (should trigger collection | ||
373 | * of a ring dump etc.). | ||
374 | */ | ||
375 | static void i915_handle_error(struct drm_device *dev) | ||
376 | { | ||
377 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
378 | u32 eir = I915_READ(EIR); | ||
379 | u32 pipea_stats = I915_READ(PIPEASTAT); | ||
380 | u32 pipeb_stats = I915_READ(PIPEBSTAT); | ||
381 | |||
382 | i915_capture_error_state(dev); | ||
383 | |||
384 | printk(KERN_ERR "render error detected, EIR: 0x%08x\n", | ||
385 | eir); | ||
386 | |||
387 | if (IS_G4X(dev)) { | ||
388 | if (eir & (GM45_ERROR_MEM_PRIV | GM45_ERROR_CP_PRIV)) { | ||
389 | u32 ipeir = I915_READ(IPEIR_I965); | ||
390 | |||
391 | printk(KERN_ERR " IPEIR: 0x%08x\n", | ||
392 | I915_READ(IPEIR_I965)); | ||
393 | printk(KERN_ERR " IPEHR: 0x%08x\n", | ||
394 | I915_READ(IPEHR_I965)); | ||
395 | printk(KERN_ERR " INSTDONE: 0x%08x\n", | ||
396 | I915_READ(INSTDONE_I965)); | ||
397 | printk(KERN_ERR " INSTPS: 0x%08x\n", | ||
398 | I915_READ(INSTPS)); | ||
399 | printk(KERN_ERR " INSTDONE1: 0x%08x\n", | ||
400 | I915_READ(INSTDONE1)); | ||
401 | printk(KERN_ERR " ACTHD: 0x%08x\n", | ||
402 | I915_READ(ACTHD_I965)); | ||
403 | I915_WRITE(IPEIR_I965, ipeir); | ||
404 | (void)I915_READ(IPEIR_I965); | ||
405 | } | ||
406 | if (eir & GM45_ERROR_PAGE_TABLE) { | ||
407 | u32 pgtbl_err = I915_READ(PGTBL_ER); | ||
408 | printk(KERN_ERR "page table error\n"); | ||
409 | printk(KERN_ERR " PGTBL_ER: 0x%08x\n", | ||
410 | pgtbl_err); | ||
411 | I915_WRITE(PGTBL_ER, pgtbl_err); | ||
412 | (void)I915_READ(PGTBL_ER); | ||
413 | } | ||
414 | } | ||
415 | |||
416 | if (IS_I9XX(dev)) { | ||
417 | if (eir & I915_ERROR_PAGE_TABLE) { | ||
418 | u32 pgtbl_err = I915_READ(PGTBL_ER); | ||
419 | printk(KERN_ERR "page table error\n"); | ||
420 | printk(KERN_ERR " PGTBL_ER: 0x%08x\n", | ||
421 | pgtbl_err); | ||
422 | I915_WRITE(PGTBL_ER, pgtbl_err); | ||
423 | (void)I915_READ(PGTBL_ER); | ||
424 | } | ||
425 | } | ||
426 | |||
427 | if (eir & I915_ERROR_MEMORY_REFRESH) { | ||
428 | printk(KERN_ERR "memory refresh error\n"); | ||
429 | printk(KERN_ERR "PIPEASTAT: 0x%08x\n", | ||
430 | pipea_stats); | ||
431 | printk(KERN_ERR "PIPEBSTAT: 0x%08x\n", | ||
432 | pipeb_stats); | ||
433 | /* pipestat has already been acked */ | ||
434 | } | ||
435 | if (eir & I915_ERROR_INSTRUCTION) { | ||
436 | printk(KERN_ERR "instruction error\n"); | ||
437 | printk(KERN_ERR " INSTPM: 0x%08x\n", | ||
438 | I915_READ(INSTPM)); | ||
439 | if (!IS_I965G(dev)) { | ||
440 | u32 ipeir = I915_READ(IPEIR); | ||
441 | |||
442 | printk(KERN_ERR " IPEIR: 0x%08x\n", | ||
443 | I915_READ(IPEIR)); | ||
444 | printk(KERN_ERR " IPEHR: 0x%08x\n", | ||
445 | I915_READ(IPEHR)); | ||
446 | printk(KERN_ERR " INSTDONE: 0x%08x\n", | ||
447 | I915_READ(INSTDONE)); | ||
448 | printk(KERN_ERR " ACTHD: 0x%08x\n", | ||
449 | I915_READ(ACTHD)); | ||
450 | I915_WRITE(IPEIR, ipeir); | ||
451 | (void)I915_READ(IPEIR); | ||
452 | } else { | ||
453 | u32 ipeir = I915_READ(IPEIR_I965); | ||
454 | |||
455 | printk(KERN_ERR " IPEIR: 0x%08x\n", | ||
456 | I915_READ(IPEIR_I965)); | ||
457 | printk(KERN_ERR " IPEHR: 0x%08x\n", | ||
458 | I915_READ(IPEHR_I965)); | ||
459 | printk(KERN_ERR " INSTDONE: 0x%08x\n", | ||
460 | I915_READ(INSTDONE_I965)); | ||
461 | printk(KERN_ERR " INSTPS: 0x%08x\n", | ||
462 | I915_READ(INSTPS)); | ||
463 | printk(KERN_ERR " INSTDONE1: 0x%08x\n", | ||
464 | I915_READ(INSTDONE1)); | ||
465 | printk(KERN_ERR " ACTHD: 0x%08x\n", | ||
466 | I915_READ(ACTHD_I965)); | ||
467 | I915_WRITE(IPEIR_I965, ipeir); | ||
468 | (void)I915_READ(IPEIR_I965); | ||
469 | } | ||
470 | } | ||
471 | |||
472 | I915_WRITE(EIR, eir); | ||
473 | (void)I915_READ(EIR); | ||
474 | eir = I915_READ(EIR); | ||
475 | if (eir) { | ||
476 | /* | ||
477 | * some errors might have become stuck, | ||
478 | * mask them. | ||
479 | */ | ||
480 | DRM_ERROR("EIR stuck: 0x%08x, masking\n", eir); | ||
481 | I915_WRITE(EMR, I915_READ(EMR) | eir); | ||
482 | I915_WRITE(IIR, I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT); | ||
483 | } | ||
484 | |||
485 | queue_work(dev_priv->wq, &dev_priv->error_work); | ||
486 | } | ||
487 | |||
281 | irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS) | 488 | irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS) |
282 | { | 489 | { |
283 | struct drm_device *dev = (struct drm_device *) arg; | 490 | struct drm_device *dev = (struct drm_device *) arg; |
@@ -319,15 +526,22 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS) | |||
319 | pipea_stats = I915_READ(PIPEASTAT); | 526 | pipea_stats = I915_READ(PIPEASTAT); |
320 | pipeb_stats = I915_READ(PIPEBSTAT); | 527 | pipeb_stats = I915_READ(PIPEBSTAT); |
321 | 528 | ||
529 | if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT) | ||
530 | i915_handle_error(dev); | ||
531 | |||
322 | /* | 532 | /* |
323 | * Clear the PIPE(A|B)STAT regs before the IIR | 533 | * Clear the PIPE(A|B)STAT regs before the IIR |
324 | */ | 534 | */ |
325 | if (pipea_stats & 0x8000ffff) { | 535 | if (pipea_stats & 0x8000ffff) { |
536 | if (pipea_stats & PIPE_FIFO_UNDERRUN_STATUS) | ||
537 | DRM_DEBUG("pipe a underrun\n"); | ||
326 | I915_WRITE(PIPEASTAT, pipea_stats); | 538 | I915_WRITE(PIPEASTAT, pipea_stats); |
327 | irq_received = 1; | 539 | irq_received = 1; |
328 | } | 540 | } |
329 | 541 | ||
330 | if (pipeb_stats & 0x8000ffff) { | 542 | if (pipeb_stats & 0x8000ffff) { |
543 | if (pipeb_stats & PIPE_FIFO_UNDERRUN_STATUS) | ||
544 | DRM_DEBUG("pipe b underrun\n"); | ||
331 | I915_WRITE(PIPEBSTAT, pipeb_stats); | 545 | I915_WRITE(PIPEBSTAT, pipeb_stats); |
332 | irq_received = 1; | 546 | irq_received = 1; |
333 | } | 547 | } |
@@ -346,7 +560,8 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS) | |||
346 | DRM_DEBUG("hotplug event received, stat 0x%08x\n", | 560 | DRM_DEBUG("hotplug event received, stat 0x%08x\n", |
347 | hotplug_status); | 561 | hotplug_status); |
348 | if (hotplug_status & dev_priv->hotplug_supported_mask) | 562 | if (hotplug_status & dev_priv->hotplug_supported_mask) |
349 | schedule_work(&dev_priv->hotplug_work); | 563 | queue_work(dev_priv->wq, |
564 | &dev_priv->hotplug_work); | ||
350 | 565 | ||
351 | I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status); | 566 | I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status); |
352 | I915_READ(PORT_HOTPLUG_STAT); | 567 | I915_READ(PORT_HOTPLUG_STAT); |
@@ -699,6 +914,7 @@ void i915_driver_irq_preinstall(struct drm_device * dev) | |||
699 | atomic_set(&dev_priv->irq_received, 0); | 914 | atomic_set(&dev_priv->irq_received, 0); |
700 | 915 | ||
701 | INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func); | 916 | INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func); |
917 | INIT_WORK(&dev_priv->error_work, i915_error_work_func); | ||
702 | 918 | ||
703 | if (IS_IGDNG(dev)) { | 919 | if (IS_IGDNG(dev)) { |
704 | igdng_irq_preinstall(dev); | 920 | igdng_irq_preinstall(dev); |
@@ -722,6 +938,7 @@ int i915_driver_irq_postinstall(struct drm_device *dev) | |||
722 | { | 938 | { |
723 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | 939 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; |
724 | u32 enable_mask = I915_INTERRUPT_ENABLE_FIX | I915_INTERRUPT_ENABLE_VAR; | 940 | u32 enable_mask = I915_INTERRUPT_ENABLE_FIX | I915_INTERRUPT_ENABLE_VAR; |
941 | u32 error_mask; | ||
725 | 942 | ||
726 | DRM_INIT_WAITQUEUE(&dev_priv->irq_queue); | 943 | DRM_INIT_WAITQUEUE(&dev_priv->irq_queue); |
727 | 944 | ||
@@ -758,6 +975,21 @@ int i915_driver_irq_postinstall(struct drm_device *dev) | |||
758 | i915_enable_irq(dev_priv, I915_DISPLAY_PORT_INTERRUPT); | 975 | i915_enable_irq(dev_priv, I915_DISPLAY_PORT_INTERRUPT); |
759 | } | 976 | } |
760 | 977 | ||
978 | /* | ||
979 | * Enable some error detection, note the instruction error mask | ||
980 | * bit is reserved, so we leave it masked. | ||
981 | */ | ||
982 | if (IS_G4X(dev)) { | ||
983 | error_mask = ~(GM45_ERROR_PAGE_TABLE | | ||
984 | GM45_ERROR_MEM_PRIV | | ||
985 | GM45_ERROR_CP_PRIV | | ||
986 | I915_ERROR_MEMORY_REFRESH); | ||
987 | } else { | ||
988 | error_mask = ~(I915_ERROR_PAGE_TABLE | | ||
989 | I915_ERROR_MEMORY_REFRESH); | ||
990 | } | ||
991 | I915_WRITE(EMR, error_mask); | ||
992 | |||
761 | /* Disable pipe interrupt enables, clear pending pipe status */ | 993 | /* Disable pipe interrupt enables, clear pending pipe status */ |
762 | I915_WRITE(PIPEASTAT, I915_READ(PIPEASTAT) & 0x8000ffff); | 994 | I915_WRITE(PIPEASTAT, I915_READ(PIPEASTAT) & 0x8000ffff); |
763 | I915_WRITE(PIPEBSTAT, I915_READ(PIPEBSTAT) & 0x8000ffff); | 995 | I915_WRITE(PIPEBSTAT, I915_READ(PIPEBSTAT) & 0x8000ffff); |
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index f6237a0b1133..2955083aa471 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h | |||
@@ -206,6 +206,7 @@ | |||
206 | /* | 206 | /* |
207 | * Instruction and interrupt control regs | 207 | * Instruction and interrupt control regs |
208 | */ | 208 | */ |
209 | #define PGTBL_ER 0x02024 | ||
209 | #define PRB0_TAIL 0x02030 | 210 | #define PRB0_TAIL 0x02030 |
210 | #define PRB0_HEAD 0x02034 | 211 | #define PRB0_HEAD 0x02034 |
211 | #define PRB0_START 0x02038 | 212 | #define PRB0_START 0x02038 |
@@ -226,11 +227,18 @@ | |||
226 | #define PRB1_HEAD 0x02044 /* 915+ only */ | 227 | #define PRB1_HEAD 0x02044 /* 915+ only */ |
227 | #define PRB1_START 0x02048 /* 915+ only */ | 228 | #define PRB1_START 0x02048 /* 915+ only */ |
228 | #define PRB1_CTL 0x0204c /* 915+ only */ | 229 | #define PRB1_CTL 0x0204c /* 915+ only */ |
230 | #define IPEIR_I965 0x02064 | ||
231 | #define IPEHR_I965 0x02068 | ||
232 | #define INSTDONE_I965 0x0206c | ||
233 | #define INSTPS 0x02070 /* 965+ only */ | ||
234 | #define INSTDONE1 0x0207c /* 965+ only */ | ||
229 | #define ACTHD_I965 0x02074 | 235 | #define ACTHD_I965 0x02074 |
230 | #define HWS_PGA 0x02080 | 236 | #define HWS_PGA 0x02080 |
231 | #define HWS_ADDRESS_MASK 0xfffff000 | 237 | #define HWS_ADDRESS_MASK 0xfffff000 |
232 | #define HWS_START_ADDRESS_SHIFT 4 | 238 | #define HWS_START_ADDRESS_SHIFT 4 |
233 | #define IPEIR 0x02088 | 239 | #define IPEIR 0x02088 |
240 | #define IPEHR 0x0208c | ||
241 | #define INSTDONE 0x02090 | ||
234 | #define NOPID 0x02094 | 242 | #define NOPID 0x02094 |
235 | #define HWSTAM 0x02098 | 243 | #define HWSTAM 0x02098 |
236 | #define SCPD0 0x0209c /* 915+ only */ | 244 | #define SCPD0 0x0209c /* 915+ only */ |
@@ -258,10 +266,22 @@ | |||
258 | #define EIR 0x020b0 | 266 | #define EIR 0x020b0 |
259 | #define EMR 0x020b4 | 267 | #define EMR 0x020b4 |
260 | #define ESR 0x020b8 | 268 | #define ESR 0x020b8 |
269 | #define GM45_ERROR_PAGE_TABLE (1<<5) | ||
270 | #define GM45_ERROR_MEM_PRIV (1<<4) | ||
271 | #define I915_ERROR_PAGE_TABLE (1<<4) | ||
272 | #define GM45_ERROR_CP_PRIV (1<<3) | ||
273 | #define I915_ERROR_MEMORY_REFRESH (1<<1) | ||
274 | #define I915_ERROR_INSTRUCTION (1<<0) | ||
261 | #define INSTPM 0x020c0 | 275 | #define INSTPM 0x020c0 |
262 | #define ACTHD 0x020c8 | 276 | #define ACTHD 0x020c8 |
263 | #define FW_BLC 0x020d8 | 277 | #define FW_BLC 0x020d8 |
278 | #define FW_BLC2 0x020dc | ||
264 | #define FW_BLC_SELF 0x020e0 /* 915+ only */ | 279 | #define FW_BLC_SELF 0x020e0 /* 915+ only */ |
280 | #define FW_BLC_SELF_EN (1<<15) | ||
281 | #define MM_BURST_LENGTH 0x00700000 | ||
282 | #define MM_FIFO_WATERMARK 0x0001F000 | ||
283 | #define LM_BURST_LENGTH 0x00000700 | ||
284 | #define LM_FIFO_WATERMARK 0x0000001F | ||
265 | #define MI_ARB_STATE 0x020e4 /* 915+ only */ | 285 | #define MI_ARB_STATE 0x020e4 /* 915+ only */ |
266 | #define CACHE_MODE_0 0x02120 /* 915+ only */ | 286 | #define CACHE_MODE_0 0x02120 /* 915+ only */ |
267 | #define CM0_MASK_SHIFT 16 | 287 | #define CM0_MASK_SHIFT 16 |
@@ -569,6 +589,23 @@ | |||
569 | #define C0DRB3 0x10206 | 589 | #define C0DRB3 0x10206 |
570 | #define C1DRB3 0x10606 | 590 | #define C1DRB3 0x10606 |
571 | 591 | ||
592 | /* Clocking configuration register */ | ||
593 | #define CLKCFG 0x10c00 | ||
594 | #define CLKCFG_FSB_400 (5 << 0) /* hrawclk 100 */ | ||
595 | #define CLKCFG_FSB_533 (1 << 0) /* hrawclk 133 */ | ||
596 | #define CLKCFG_FSB_667 (3 << 0) /* hrawclk 166 */ | ||
597 | #define CLKCFG_FSB_800 (2 << 0) /* hrawclk 200 */ | ||
598 | #define CLKCFG_FSB_1067 (6 << 0) /* hrawclk 266 */ | ||
599 | #define CLKCFG_FSB_1333 (7 << 0) /* hrawclk 333 */ | ||
600 | /* Note, below two are guess */ | ||
601 | #define CLKCFG_FSB_1600 (4 << 0) /* hrawclk 400 */ | ||
602 | #define CLKCFG_FSB_1600_ALT (0 << 0) /* hrawclk 400 */ | ||
603 | #define CLKCFG_FSB_MASK (7 << 0) | ||
604 | #define CLKCFG_MEM_533 (1 << 4) | ||
605 | #define CLKCFG_MEM_667 (2 << 4) | ||
606 | #define CLKCFG_MEM_800 (3 << 4) | ||
607 | #define CLKCFG_MEM_MASK (7 << 4) | ||
608 | |||
572 | /** GM965 GM45 render standby register */ | 609 | /** GM965 GM45 render standby register */ |
573 | #define MCHBAR_RENDER_STANDBY 0x111B8 | 610 | #define MCHBAR_RENDER_STANDBY 0x111B8 |
574 | 611 | ||
@@ -834,9 +871,25 @@ | |||
834 | #define HORIZ_INTERP_MASK (3 << 6) | 871 | #define HORIZ_INTERP_MASK (3 << 6) |
835 | #define HORIZ_AUTO_SCALE (1 << 5) | 872 | #define HORIZ_AUTO_SCALE (1 << 5) |
836 | #define PANEL_8TO6_DITHER_ENABLE (1 << 3) | 873 | #define PANEL_8TO6_DITHER_ENABLE (1 << 3) |
874 | #define PFIT_FILTER_FUZZY (0 << 24) | ||
875 | #define PFIT_SCALING_AUTO (0 << 26) | ||
876 | #define PFIT_SCALING_PROGRAMMED (1 << 26) | ||
877 | #define PFIT_SCALING_PILLAR (2 << 26) | ||
878 | #define PFIT_SCALING_LETTER (3 << 26) | ||
837 | #define PFIT_PGM_RATIOS 0x61234 | 879 | #define PFIT_PGM_RATIOS 0x61234 |
838 | #define PFIT_VERT_SCALE_MASK 0xfff00000 | 880 | #define PFIT_VERT_SCALE_MASK 0xfff00000 |
839 | #define PFIT_HORIZ_SCALE_MASK 0x0000fff0 | 881 | #define PFIT_HORIZ_SCALE_MASK 0x0000fff0 |
882 | /* Pre-965 */ | ||
883 | #define PFIT_VERT_SCALE_SHIFT 20 | ||
884 | #define PFIT_VERT_SCALE_MASK 0xfff00000 | ||
885 | #define PFIT_HORIZ_SCALE_SHIFT 4 | ||
886 | #define PFIT_HORIZ_SCALE_MASK 0x0000fff0 | ||
887 | /* 965+ */ | ||
888 | #define PFIT_VERT_SCALE_SHIFT_965 16 | ||
889 | #define PFIT_VERT_SCALE_MASK_965 0x1fff0000 | ||
890 | #define PFIT_HORIZ_SCALE_SHIFT_965 0 | ||
891 | #define PFIT_HORIZ_SCALE_MASK_965 0x00001fff | ||
892 | |||
840 | #define PFIT_AUTO_RATIOS 0x61238 | 893 | #define PFIT_AUTO_RATIOS 0x61238 |
841 | 894 | ||
842 | /* Backlight control */ | 895 | /* Backlight control */ |
@@ -1342,6 +1395,7 @@ | |||
1342 | #define TV_V_CHROMA_42 0x684a8 | 1395 | #define TV_V_CHROMA_42 0x684a8 |
1343 | 1396 | ||
1344 | /* Display Port */ | 1397 | /* Display Port */ |
1398 | #define DP_A 0x64000 /* eDP */ | ||
1345 | #define DP_B 0x64100 | 1399 | #define DP_B 0x64100 |
1346 | #define DP_C 0x64200 | 1400 | #define DP_C 0x64200 |
1347 | #define DP_D 0x64300 | 1401 | #define DP_D 0x64300 |
@@ -1384,13 +1438,22 @@ | |||
1384 | /* Mystic DPCD version 1.1 special mode */ | 1438 | /* Mystic DPCD version 1.1 special mode */ |
1385 | #define DP_ENHANCED_FRAMING (1 << 18) | 1439 | #define DP_ENHANCED_FRAMING (1 << 18) |
1386 | 1440 | ||
1441 | /* eDP */ | ||
1442 | #define DP_PLL_FREQ_270MHZ (0 << 16) | ||
1443 | #define DP_PLL_FREQ_160MHZ (1 << 16) | ||
1444 | #define DP_PLL_FREQ_MASK (3 << 16) | ||
1445 | |||
1387 | /** locked once port is enabled */ | 1446 | /** locked once port is enabled */ |
1388 | #define DP_PORT_REVERSAL (1 << 15) | 1447 | #define DP_PORT_REVERSAL (1 << 15) |
1389 | 1448 | ||
1449 | /* eDP */ | ||
1450 | #define DP_PLL_ENABLE (1 << 14) | ||
1451 | |||
1390 | /** sends the clock on lane 15 of the PEG for debug */ | 1452 | /** sends the clock on lane 15 of the PEG for debug */ |
1391 | #define DP_CLOCK_OUTPUT_ENABLE (1 << 13) | 1453 | #define DP_CLOCK_OUTPUT_ENABLE (1 << 13) |
1392 | 1454 | ||
1393 | #define DP_SCRAMBLING_DISABLE (1 << 12) | 1455 | #define DP_SCRAMBLING_DISABLE (1 << 12) |
1456 | #define DP_SCRAMBLING_DISABLE_IGDNG (1 << 7) | ||
1394 | 1457 | ||
1395 | /** limit RGB values to avoid confusing TVs */ | 1458 | /** limit RGB values to avoid confusing TVs */ |
1396 | #define DP_COLOR_RANGE_16_235 (1 << 8) | 1459 | #define DP_COLOR_RANGE_16_235 (1 << 8) |
@@ -1410,6 +1473,13 @@ | |||
1410 | * is 20 bytes in each direction, hence the 5 fixed | 1473 | * is 20 bytes in each direction, hence the 5 fixed |
1411 | * data registers | 1474 | * data registers |
1412 | */ | 1475 | */ |
1476 | #define DPA_AUX_CH_CTL 0x64010 | ||
1477 | #define DPA_AUX_CH_DATA1 0x64014 | ||
1478 | #define DPA_AUX_CH_DATA2 0x64018 | ||
1479 | #define DPA_AUX_CH_DATA3 0x6401c | ||
1480 | #define DPA_AUX_CH_DATA4 0x64020 | ||
1481 | #define DPA_AUX_CH_DATA5 0x64024 | ||
1482 | |||
1413 | #define DPB_AUX_CH_CTL 0x64110 | 1483 | #define DPB_AUX_CH_CTL 0x64110 |
1414 | #define DPB_AUX_CH_DATA1 0x64114 | 1484 | #define DPB_AUX_CH_DATA1 0x64114 |
1415 | #define DPB_AUX_CH_DATA2 0x64118 | 1485 | #define DPB_AUX_CH_DATA2 0x64118 |
@@ -1552,6 +1622,34 @@ | |||
1552 | #define DSPARB_CSTART_SHIFT 7 | 1622 | #define DSPARB_CSTART_SHIFT 7 |
1553 | #define DSPARB_BSTART_MASK (0x7f) | 1623 | #define DSPARB_BSTART_MASK (0x7f) |
1554 | #define DSPARB_BSTART_SHIFT 0 | 1624 | #define DSPARB_BSTART_SHIFT 0 |
1625 | #define DSPARB_BEND_SHIFT 9 /* on 855 */ | ||
1626 | #define DSPARB_AEND_SHIFT 0 | ||
1627 | |||
1628 | #define DSPFW1 0x70034 | ||
1629 | #define DSPFW2 0x70038 | ||
1630 | #define DSPFW3 0x7003c | ||
1631 | #define IGD_SELF_REFRESH_EN (1<<30) | ||
1632 | |||
1633 | /* FIFO watermark sizes etc */ | ||
1634 | #define I915_FIFO_LINE_SIZE 64 | ||
1635 | #define I830_FIFO_LINE_SIZE 32 | ||
1636 | #define I945_FIFO_SIZE 127 /* 945 & 965 */ | ||
1637 | #define I915_FIFO_SIZE 95 | ||
1638 | #define I855GM_FIFO_SIZE 127 /* In cachelines */ | ||
1639 | #define I830_FIFO_SIZE 95 | ||
1640 | #define I915_MAX_WM 0x3f | ||
1641 | |||
1642 | #define IGD_DISPLAY_FIFO 512 /* in 64byte unit */ | ||
1643 | #define IGD_FIFO_LINE_SIZE 64 | ||
1644 | #define IGD_MAX_WM 0x1ff | ||
1645 | #define IGD_DFT_WM 0x3f | ||
1646 | #define IGD_DFT_HPLLOFF_WM 0 | ||
1647 | #define IGD_GUARD_WM 10 | ||
1648 | #define IGD_CURSOR_FIFO 64 | ||
1649 | #define IGD_CURSOR_MAX_WM 0x3f | ||
1650 | #define IGD_CURSOR_DFT_WM 0 | ||
1651 | #define IGD_CURSOR_GUARD_WM 5 | ||
1652 | |||
1555 | /* | 1653 | /* |
1556 | * The two pipe frame counter registers are not synchronized, so | 1654 | * The two pipe frame counter registers are not synchronized, so |
1557 | * reading a stable value is somewhat tricky. The following code | 1655 | * reading a stable value is somewhat tricky. The following code |
@@ -1767,6 +1865,8 @@ | |||
1767 | #define PFA_CTL_1 0x68080 | 1865 | #define PFA_CTL_1 0x68080 |
1768 | #define PFB_CTL_1 0x68880 | 1866 | #define PFB_CTL_1 0x68880 |
1769 | #define PF_ENABLE (1<<31) | 1867 | #define PF_ENABLE (1<<31) |
1868 | #define PFA_WIN_SZ 0x68074 | ||
1869 | #define PFB_WIN_SZ 0x68874 | ||
1770 | 1870 | ||
1771 | /* legacy palette */ | 1871 | /* legacy palette */ |
1772 | #define LGC_PALETTE_A 0x4a000 | 1872 | #define LGC_PALETTE_A 0x4a000 |
@@ -2127,4 +2227,28 @@ | |||
2127 | #define PCH_PP_OFF_DELAYS 0xc720c | 2227 | #define PCH_PP_OFF_DELAYS 0xc720c |
2128 | #define PCH_PP_DIVISOR 0xc7210 | 2228 | #define PCH_PP_DIVISOR 0xc7210 |
2129 | 2229 | ||
2230 | #define PCH_DP_B 0xe4100 | ||
2231 | #define PCH_DPB_AUX_CH_CTL 0xe4110 | ||
2232 | #define PCH_DPB_AUX_CH_DATA1 0xe4114 | ||
2233 | #define PCH_DPB_AUX_CH_DATA2 0xe4118 | ||
2234 | #define PCH_DPB_AUX_CH_DATA3 0xe411c | ||
2235 | #define PCH_DPB_AUX_CH_DATA4 0xe4120 | ||
2236 | #define PCH_DPB_AUX_CH_DATA5 0xe4124 | ||
2237 | |||
2238 | #define PCH_DP_C 0xe4200 | ||
2239 | #define PCH_DPC_AUX_CH_CTL 0xe4210 | ||
2240 | #define PCH_DPC_AUX_CH_DATA1 0xe4214 | ||
2241 | #define PCH_DPC_AUX_CH_DATA2 0xe4218 | ||
2242 | #define PCH_DPC_AUX_CH_DATA3 0xe421c | ||
2243 | #define PCH_DPC_AUX_CH_DATA4 0xe4220 | ||
2244 | #define PCH_DPC_AUX_CH_DATA5 0xe4224 | ||
2245 | |||
2246 | #define PCH_DP_D 0xe4300 | ||
2247 | #define PCH_DPD_AUX_CH_CTL 0xe4310 | ||
2248 | #define PCH_DPD_AUX_CH_DATA1 0xe4314 | ||
2249 | #define PCH_DPD_AUX_CH_DATA2 0xe4318 | ||
2250 | #define PCH_DPD_AUX_CH_DATA3 0xe431c | ||
2251 | #define PCH_DPD_AUX_CH_DATA4 0xe4320 | ||
2252 | #define PCH_DPD_AUX_CH_DATA5 0xe4324 | ||
2253 | |||
2130 | #endif /* _I915_REG_H_ */ | 2254 | #endif /* _I915_REG_H_ */ |
diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c index a98e2831ed31..1d04e1904ac6 100644 --- a/drivers/gpu/drm/i915/i915_suspend.c +++ b/drivers/gpu/drm/i915/i915_suspend.c | |||
@@ -222,23 +222,12 @@ static void i915_restore_vga(struct drm_device *dev) | |||
222 | I915_WRITE8(VGA_DACMASK, dev_priv->saveDACMASK); | 222 | I915_WRITE8(VGA_DACMASK, dev_priv->saveDACMASK); |
223 | } | 223 | } |
224 | 224 | ||
225 | int i915_save_state(struct drm_device *dev) | 225 | static void i915_save_modeset_reg(struct drm_device *dev) |
226 | { | 226 | { |
227 | struct drm_i915_private *dev_priv = dev->dev_private; | 227 | struct drm_i915_private *dev_priv = dev->dev_private; |
228 | int i; | ||
229 | |||
230 | pci_read_config_byte(dev->pdev, LBB, &dev_priv->saveLBB); | ||
231 | |||
232 | /* Render Standby */ | ||
233 | if (IS_I965G(dev) && IS_MOBILE(dev)) | ||
234 | dev_priv->saveRENDERSTANDBY = I915_READ(MCHBAR_RENDER_STANDBY); | ||
235 | |||
236 | /* Hardware status page */ | ||
237 | dev_priv->saveHWS = I915_READ(HWS_PGA); | ||
238 | |||
239 | /* Display arbitration control */ | ||
240 | dev_priv->saveDSPARB = I915_READ(DSPARB); | ||
241 | 228 | ||
229 | if (drm_core_check_feature(dev, DRIVER_MODESET)) | ||
230 | return; | ||
242 | /* Pipe & plane A info */ | 231 | /* Pipe & plane A info */ |
243 | dev_priv->savePIPEACONF = I915_READ(PIPEACONF); | 232 | dev_priv->savePIPEACONF = I915_READ(PIPEACONF); |
244 | dev_priv->savePIPEASRC = I915_READ(PIPEASRC); | 233 | dev_priv->savePIPEASRC = I915_READ(PIPEASRC); |
@@ -294,7 +283,122 @@ int i915_save_state(struct drm_device *dev) | |||
294 | } | 283 | } |
295 | i915_save_palette(dev, PIPE_B); | 284 | i915_save_palette(dev, PIPE_B); |
296 | dev_priv->savePIPEBSTAT = I915_READ(PIPEBSTAT); | 285 | dev_priv->savePIPEBSTAT = I915_READ(PIPEBSTAT); |
286 | return; | ||
287 | } | ||
288 | static void i915_restore_modeset_reg(struct drm_device *dev) | ||
289 | { | ||
290 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
291 | |||
292 | if (drm_core_check_feature(dev, DRIVER_MODESET)) | ||
293 | return; | ||
294 | |||
295 | /* Pipe & plane A info */ | ||
296 | /* Prime the clock */ | ||
297 | if (dev_priv->saveDPLL_A & DPLL_VCO_ENABLE) { | ||
298 | I915_WRITE(DPLL_A, dev_priv->saveDPLL_A & | ||
299 | ~DPLL_VCO_ENABLE); | ||
300 | DRM_UDELAY(150); | ||
301 | } | ||
302 | I915_WRITE(FPA0, dev_priv->saveFPA0); | ||
303 | I915_WRITE(FPA1, dev_priv->saveFPA1); | ||
304 | /* Actually enable it */ | ||
305 | I915_WRITE(DPLL_A, dev_priv->saveDPLL_A); | ||
306 | DRM_UDELAY(150); | ||
307 | if (IS_I965G(dev)) | ||
308 | I915_WRITE(DPLL_A_MD, dev_priv->saveDPLL_A_MD); | ||
309 | DRM_UDELAY(150); | ||
310 | |||
311 | /* Restore mode */ | ||
312 | I915_WRITE(HTOTAL_A, dev_priv->saveHTOTAL_A); | ||
313 | I915_WRITE(HBLANK_A, dev_priv->saveHBLANK_A); | ||
314 | I915_WRITE(HSYNC_A, dev_priv->saveHSYNC_A); | ||
315 | I915_WRITE(VTOTAL_A, dev_priv->saveVTOTAL_A); | ||
316 | I915_WRITE(VBLANK_A, dev_priv->saveVBLANK_A); | ||
317 | I915_WRITE(VSYNC_A, dev_priv->saveVSYNC_A); | ||
318 | I915_WRITE(BCLRPAT_A, dev_priv->saveBCLRPAT_A); | ||
319 | |||
320 | /* Restore plane info */ | ||
321 | I915_WRITE(DSPASIZE, dev_priv->saveDSPASIZE); | ||
322 | I915_WRITE(DSPAPOS, dev_priv->saveDSPAPOS); | ||
323 | I915_WRITE(PIPEASRC, dev_priv->savePIPEASRC); | ||
324 | I915_WRITE(DSPAADDR, dev_priv->saveDSPAADDR); | ||
325 | I915_WRITE(DSPASTRIDE, dev_priv->saveDSPASTRIDE); | ||
326 | if (IS_I965G(dev)) { | ||
327 | I915_WRITE(DSPASURF, dev_priv->saveDSPASURF); | ||
328 | I915_WRITE(DSPATILEOFF, dev_priv->saveDSPATILEOFF); | ||
329 | } | ||
330 | |||
331 | I915_WRITE(PIPEACONF, dev_priv->savePIPEACONF); | ||
332 | |||
333 | i915_restore_palette(dev, PIPE_A); | ||
334 | /* Enable the plane */ | ||
335 | I915_WRITE(DSPACNTR, dev_priv->saveDSPACNTR); | ||
336 | I915_WRITE(DSPAADDR, I915_READ(DSPAADDR)); | ||
337 | |||
338 | /* Pipe & plane B info */ | ||
339 | if (dev_priv->saveDPLL_B & DPLL_VCO_ENABLE) { | ||
340 | I915_WRITE(DPLL_B, dev_priv->saveDPLL_B & | ||
341 | ~DPLL_VCO_ENABLE); | ||
342 | DRM_UDELAY(150); | ||
343 | } | ||
344 | I915_WRITE(FPB0, dev_priv->saveFPB0); | ||
345 | I915_WRITE(FPB1, dev_priv->saveFPB1); | ||
346 | /* Actually enable it */ | ||
347 | I915_WRITE(DPLL_B, dev_priv->saveDPLL_B); | ||
348 | DRM_UDELAY(150); | ||
349 | if (IS_I965G(dev)) | ||
350 | I915_WRITE(DPLL_B_MD, dev_priv->saveDPLL_B_MD); | ||
351 | DRM_UDELAY(150); | ||
352 | |||
353 | /* Restore mode */ | ||
354 | I915_WRITE(HTOTAL_B, dev_priv->saveHTOTAL_B); | ||
355 | I915_WRITE(HBLANK_B, dev_priv->saveHBLANK_B); | ||
356 | I915_WRITE(HSYNC_B, dev_priv->saveHSYNC_B); | ||
357 | I915_WRITE(VTOTAL_B, dev_priv->saveVTOTAL_B); | ||
358 | I915_WRITE(VBLANK_B, dev_priv->saveVBLANK_B); | ||
359 | I915_WRITE(VSYNC_B, dev_priv->saveVSYNC_B); | ||
360 | I915_WRITE(BCLRPAT_B, dev_priv->saveBCLRPAT_B); | ||
361 | |||
362 | /* Restore plane info */ | ||
363 | I915_WRITE(DSPBSIZE, dev_priv->saveDSPBSIZE); | ||
364 | I915_WRITE(DSPBPOS, dev_priv->saveDSPBPOS); | ||
365 | I915_WRITE(PIPEBSRC, dev_priv->savePIPEBSRC); | ||
366 | I915_WRITE(DSPBADDR, dev_priv->saveDSPBADDR); | ||
367 | I915_WRITE(DSPBSTRIDE, dev_priv->saveDSPBSTRIDE); | ||
368 | if (IS_I965G(dev)) { | ||
369 | I915_WRITE(DSPBSURF, dev_priv->saveDSPBSURF); | ||
370 | I915_WRITE(DSPBTILEOFF, dev_priv->saveDSPBTILEOFF); | ||
371 | } | ||
372 | |||
373 | I915_WRITE(PIPEBCONF, dev_priv->savePIPEBCONF); | ||
374 | |||
375 | i915_restore_palette(dev, PIPE_B); | ||
376 | /* Enable the plane */ | ||
377 | I915_WRITE(DSPBCNTR, dev_priv->saveDSPBCNTR); | ||
378 | I915_WRITE(DSPBADDR, I915_READ(DSPBADDR)); | ||
297 | 379 | ||
380 | return; | ||
381 | } | ||
382 | int i915_save_state(struct drm_device *dev) | ||
383 | { | ||
384 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
385 | int i; | ||
386 | |||
387 | pci_read_config_byte(dev->pdev, LBB, &dev_priv->saveLBB); | ||
388 | |||
389 | /* Render Standby */ | ||
390 | if (IS_I965G(dev) && IS_MOBILE(dev)) | ||
391 | dev_priv->saveRENDERSTANDBY = I915_READ(MCHBAR_RENDER_STANDBY); | ||
392 | |||
393 | /* Hardware status page */ | ||
394 | dev_priv->saveHWS = I915_READ(HWS_PGA); | ||
395 | |||
396 | /* Display arbitration control */ | ||
397 | dev_priv->saveDSPARB = I915_READ(DSPARB); | ||
398 | |||
399 | /* This is only meaningful in non-KMS mode */ | ||
400 | /* Don't save them in KMS mode */ | ||
401 | i915_save_modeset_reg(dev); | ||
298 | /* Cursor state */ | 402 | /* Cursor state */ |
299 | dev_priv->saveCURACNTR = I915_READ(CURACNTR); | 403 | dev_priv->saveCURACNTR = I915_READ(CURACNTR); |
300 | dev_priv->saveCURAPOS = I915_READ(CURAPOS); | 404 | dev_priv->saveCURAPOS = I915_READ(CURAPOS); |
@@ -322,6 +426,20 @@ int i915_save_state(struct drm_device *dev) | |||
322 | dev_priv->savePP_OFF_DELAYS = I915_READ(PP_OFF_DELAYS); | 426 | dev_priv->savePP_OFF_DELAYS = I915_READ(PP_OFF_DELAYS); |
323 | dev_priv->savePP_DIVISOR = I915_READ(PP_DIVISOR); | 427 | dev_priv->savePP_DIVISOR = I915_READ(PP_DIVISOR); |
324 | 428 | ||
429 | /* Display Port state */ | ||
430 | if (SUPPORTS_INTEGRATED_DP(dev)) { | ||
431 | dev_priv->saveDP_B = I915_READ(DP_B); | ||
432 | dev_priv->saveDP_C = I915_READ(DP_C); | ||
433 | dev_priv->saveDP_D = I915_READ(DP_D); | ||
434 | dev_priv->savePIPEA_GMCH_DATA_M = I915_READ(PIPEA_GMCH_DATA_M); | ||
435 | dev_priv->savePIPEB_GMCH_DATA_M = I915_READ(PIPEB_GMCH_DATA_M); | ||
436 | dev_priv->savePIPEA_GMCH_DATA_N = I915_READ(PIPEA_GMCH_DATA_N); | ||
437 | dev_priv->savePIPEB_GMCH_DATA_N = I915_READ(PIPEB_GMCH_DATA_N); | ||
438 | dev_priv->savePIPEA_DP_LINK_M = I915_READ(PIPEA_DP_LINK_M); | ||
439 | dev_priv->savePIPEB_DP_LINK_M = I915_READ(PIPEB_DP_LINK_M); | ||
440 | dev_priv->savePIPEA_DP_LINK_N = I915_READ(PIPEA_DP_LINK_N); | ||
441 | dev_priv->savePIPEB_DP_LINK_N = I915_READ(PIPEB_DP_LINK_N); | ||
442 | } | ||
325 | /* FIXME: save TV & SDVO state */ | 443 | /* FIXME: save TV & SDVO state */ |
326 | 444 | ||
327 | /* FBC state */ | 445 | /* FBC state */ |
@@ -404,92 +522,21 @@ int i915_restore_state(struct drm_device *dev) | |||
404 | for (i = 0; i < 8; i++) | 522 | for (i = 0; i < 8; i++) |
405 | I915_WRITE(FENCE_REG_945_8 + (i * 4), dev_priv->saveFENCE[i+8]); | 523 | I915_WRITE(FENCE_REG_945_8 + (i * 4), dev_priv->saveFENCE[i+8]); |
406 | } | 524 | } |
407 | 525 | ||
408 | /* Pipe & plane A info */ | 526 | /* Display port ratios (must be done before clock is set) */ |
409 | /* Prime the clock */ | 527 | if (SUPPORTS_INTEGRATED_DP(dev)) { |
410 | if (dev_priv->saveDPLL_A & DPLL_VCO_ENABLE) { | 528 | I915_WRITE(PIPEA_GMCH_DATA_M, dev_priv->savePIPEA_GMCH_DATA_M); |
411 | I915_WRITE(DPLL_A, dev_priv->saveDPLL_A & | 529 | I915_WRITE(PIPEB_GMCH_DATA_M, dev_priv->savePIPEB_GMCH_DATA_M); |
412 | ~DPLL_VCO_ENABLE); | 530 | I915_WRITE(PIPEA_GMCH_DATA_N, dev_priv->savePIPEA_GMCH_DATA_N); |
413 | DRM_UDELAY(150); | 531 | I915_WRITE(PIPEB_GMCH_DATA_N, dev_priv->savePIPEB_GMCH_DATA_N); |
532 | I915_WRITE(PIPEA_DP_LINK_M, dev_priv->savePIPEA_DP_LINK_M); | ||
533 | I915_WRITE(PIPEB_DP_LINK_M, dev_priv->savePIPEB_DP_LINK_M); | ||
534 | I915_WRITE(PIPEA_DP_LINK_N, dev_priv->savePIPEA_DP_LINK_N); | ||
535 | I915_WRITE(PIPEB_DP_LINK_N, dev_priv->savePIPEB_DP_LINK_N); | ||
414 | } | 536 | } |
415 | I915_WRITE(FPA0, dev_priv->saveFPA0); | 537 | /* This is only meaningful in non-KMS mode */ |
416 | I915_WRITE(FPA1, dev_priv->saveFPA1); | 538 | /* Don't restore them in KMS mode */ |
417 | /* Actually enable it */ | 539 | i915_restore_modeset_reg(dev); |
418 | I915_WRITE(DPLL_A, dev_priv->saveDPLL_A); | ||
419 | DRM_UDELAY(150); | ||
420 | if (IS_I965G(dev)) | ||
421 | I915_WRITE(DPLL_A_MD, dev_priv->saveDPLL_A_MD); | ||
422 | DRM_UDELAY(150); | ||
423 | |||
424 | /* Restore mode */ | ||
425 | I915_WRITE(HTOTAL_A, dev_priv->saveHTOTAL_A); | ||
426 | I915_WRITE(HBLANK_A, dev_priv->saveHBLANK_A); | ||
427 | I915_WRITE(HSYNC_A, dev_priv->saveHSYNC_A); | ||
428 | I915_WRITE(VTOTAL_A, dev_priv->saveVTOTAL_A); | ||
429 | I915_WRITE(VBLANK_A, dev_priv->saveVBLANK_A); | ||
430 | I915_WRITE(VSYNC_A, dev_priv->saveVSYNC_A); | ||
431 | I915_WRITE(BCLRPAT_A, dev_priv->saveBCLRPAT_A); | ||
432 | |||
433 | /* Restore plane info */ | ||
434 | I915_WRITE(DSPASIZE, dev_priv->saveDSPASIZE); | ||
435 | I915_WRITE(DSPAPOS, dev_priv->saveDSPAPOS); | ||
436 | I915_WRITE(PIPEASRC, dev_priv->savePIPEASRC); | ||
437 | I915_WRITE(DSPAADDR, dev_priv->saveDSPAADDR); | ||
438 | I915_WRITE(DSPASTRIDE, dev_priv->saveDSPASTRIDE); | ||
439 | if (IS_I965G(dev)) { | ||
440 | I915_WRITE(DSPASURF, dev_priv->saveDSPASURF); | ||
441 | I915_WRITE(DSPATILEOFF, dev_priv->saveDSPATILEOFF); | ||
442 | } | ||
443 | |||
444 | I915_WRITE(PIPEACONF, dev_priv->savePIPEACONF); | ||
445 | |||
446 | i915_restore_palette(dev, PIPE_A); | ||
447 | /* Enable the plane */ | ||
448 | I915_WRITE(DSPACNTR, dev_priv->saveDSPACNTR); | ||
449 | I915_WRITE(DSPAADDR, I915_READ(DSPAADDR)); | ||
450 | |||
451 | /* Pipe & plane B info */ | ||
452 | if (dev_priv->saveDPLL_B & DPLL_VCO_ENABLE) { | ||
453 | I915_WRITE(DPLL_B, dev_priv->saveDPLL_B & | ||
454 | ~DPLL_VCO_ENABLE); | ||
455 | DRM_UDELAY(150); | ||
456 | } | ||
457 | I915_WRITE(FPB0, dev_priv->saveFPB0); | ||
458 | I915_WRITE(FPB1, dev_priv->saveFPB1); | ||
459 | /* Actually enable it */ | ||
460 | I915_WRITE(DPLL_B, dev_priv->saveDPLL_B); | ||
461 | DRM_UDELAY(150); | ||
462 | if (IS_I965G(dev)) | ||
463 | I915_WRITE(DPLL_B_MD, dev_priv->saveDPLL_B_MD); | ||
464 | DRM_UDELAY(150); | ||
465 | |||
466 | /* Restore mode */ | ||
467 | I915_WRITE(HTOTAL_B, dev_priv->saveHTOTAL_B); | ||
468 | I915_WRITE(HBLANK_B, dev_priv->saveHBLANK_B); | ||
469 | I915_WRITE(HSYNC_B, dev_priv->saveHSYNC_B); | ||
470 | I915_WRITE(VTOTAL_B, dev_priv->saveVTOTAL_B); | ||
471 | I915_WRITE(VBLANK_B, dev_priv->saveVBLANK_B); | ||
472 | I915_WRITE(VSYNC_B, dev_priv->saveVSYNC_B); | ||
473 | I915_WRITE(BCLRPAT_B, dev_priv->saveBCLRPAT_B); | ||
474 | |||
475 | /* Restore plane info */ | ||
476 | I915_WRITE(DSPBSIZE, dev_priv->saveDSPBSIZE); | ||
477 | I915_WRITE(DSPBPOS, dev_priv->saveDSPBPOS); | ||
478 | I915_WRITE(PIPEBSRC, dev_priv->savePIPEBSRC); | ||
479 | I915_WRITE(DSPBADDR, dev_priv->saveDSPBADDR); | ||
480 | I915_WRITE(DSPBSTRIDE, dev_priv->saveDSPBSTRIDE); | ||
481 | if (IS_I965G(dev)) { | ||
482 | I915_WRITE(DSPBSURF, dev_priv->saveDSPBSURF); | ||
483 | I915_WRITE(DSPBTILEOFF, dev_priv->saveDSPBTILEOFF); | ||
484 | } | ||
485 | |||
486 | I915_WRITE(PIPEBCONF, dev_priv->savePIPEBCONF); | ||
487 | |||
488 | i915_restore_palette(dev, PIPE_B); | ||
489 | /* Enable the plane */ | ||
490 | I915_WRITE(DSPBCNTR, dev_priv->saveDSPBCNTR); | ||
491 | I915_WRITE(DSPBADDR, I915_READ(DSPBADDR)); | ||
492 | |||
493 | /* Cursor state */ | 540 | /* Cursor state */ |
494 | I915_WRITE(CURAPOS, dev_priv->saveCURAPOS); | 541 | I915_WRITE(CURAPOS, dev_priv->saveCURAPOS); |
495 | I915_WRITE(CURACNTR, dev_priv->saveCURACNTR); | 542 | I915_WRITE(CURACNTR, dev_priv->saveCURACNTR); |
@@ -518,6 +565,12 @@ int i915_restore_state(struct drm_device *dev) | |||
518 | I915_WRITE(PP_DIVISOR, dev_priv->savePP_DIVISOR); | 565 | I915_WRITE(PP_DIVISOR, dev_priv->savePP_DIVISOR); |
519 | I915_WRITE(PP_CONTROL, dev_priv->savePP_CONTROL); | 566 | I915_WRITE(PP_CONTROL, dev_priv->savePP_CONTROL); |
520 | 567 | ||
568 | /* Display Port state */ | ||
569 | if (SUPPORTS_INTEGRATED_DP(dev)) { | ||
570 | I915_WRITE(DP_B, dev_priv->saveDP_B); | ||
571 | I915_WRITE(DP_C, dev_priv->saveDP_C); | ||
572 | I915_WRITE(DP_D, dev_priv->saveDP_D); | ||
573 | } | ||
521 | /* FIXME: restore TV & SDVO state */ | 574 | /* FIXME: restore TV & SDVO state */ |
522 | 575 | ||
523 | /* FBC info */ | 576 | /* FBC info */ |
@@ -545,7 +598,7 @@ int i915_restore_state(struct drm_device *dev) | |||
545 | 598 | ||
546 | for (i = 0; i < 16; i++) { | 599 | for (i = 0; i < 16; i++) { |
547 | I915_WRITE(SWF00 + (i << 2), dev_priv->saveSWF0[i]); | 600 | I915_WRITE(SWF00 + (i << 2), dev_priv->saveSWF0[i]); |
548 | I915_WRITE(SWF10 + (i << 2), dev_priv->saveSWF1[i+7]); | 601 | I915_WRITE(SWF10 + (i << 2), dev_priv->saveSWF1[i]); |
549 | } | 602 | } |
550 | for (i = 0; i < 3; i++) | 603 | for (i = 0; i < 3; i++) |
551 | I915_WRITE(SWF30 + (i << 2), dev_priv->saveSWF2[i]); | 604 | I915_WRITE(SWF30 + (i << 2), dev_priv->saveSWF2[i]); |
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c index cdd126d068a7..300aee3296c2 100644 --- a/drivers/gpu/drm/i915/intel_bios.c +++ b/drivers/gpu/drm/i915/intel_bios.c | |||
@@ -99,9 +99,11 @@ parse_lfp_panel_data(struct drm_i915_private *dev_priv, | |||
99 | { | 99 | { |
100 | struct bdb_lvds_options *lvds_options; | 100 | struct bdb_lvds_options *lvds_options; |
101 | struct bdb_lvds_lfp_data *lvds_lfp_data; | 101 | struct bdb_lvds_lfp_data *lvds_lfp_data; |
102 | struct bdb_lvds_lfp_data_ptrs *lvds_lfp_data_ptrs; | ||
102 | struct bdb_lvds_lfp_data_entry *entry; | 103 | struct bdb_lvds_lfp_data_entry *entry; |
103 | struct lvds_dvo_timing *dvo_timing; | 104 | struct lvds_dvo_timing *dvo_timing; |
104 | struct drm_display_mode *panel_fixed_mode; | 105 | struct drm_display_mode *panel_fixed_mode; |
106 | int lfp_data_size, dvo_timing_offset; | ||
105 | 107 | ||
106 | /* Defaults if we can't find VBT info */ | 108 | /* Defaults if we can't find VBT info */ |
107 | dev_priv->lvds_dither = 0; | 109 | dev_priv->lvds_dither = 0; |
@@ -119,10 +121,27 @@ parse_lfp_panel_data(struct drm_i915_private *dev_priv, | |||
119 | if (!lvds_lfp_data) | 121 | if (!lvds_lfp_data) |
120 | return; | 122 | return; |
121 | 123 | ||
124 | lvds_lfp_data_ptrs = find_section(bdb, BDB_LVDS_LFP_DATA_PTRS); | ||
125 | if (!lvds_lfp_data_ptrs) | ||
126 | return; | ||
127 | |||
122 | dev_priv->lvds_vbt = 1; | 128 | dev_priv->lvds_vbt = 1; |
123 | 129 | ||
124 | entry = &lvds_lfp_data->data[lvds_options->panel_type]; | 130 | lfp_data_size = lvds_lfp_data_ptrs->ptr[1].dvo_timing_offset - |
125 | dvo_timing = &entry->dvo_timing; | 131 | lvds_lfp_data_ptrs->ptr[0].dvo_timing_offset; |
132 | entry = (struct bdb_lvds_lfp_data_entry *) | ||
133 | ((uint8_t *)lvds_lfp_data->data + (lfp_data_size * | ||
134 | lvds_options->panel_type)); | ||
135 | dvo_timing_offset = lvds_lfp_data_ptrs->ptr[0].dvo_timing_offset - | ||
136 | lvds_lfp_data_ptrs->ptr[0].fp_timing_offset; | ||
137 | |||
138 | /* | ||
139 | * the size of fp_timing varies on the different platform. | ||
140 | * So calculate the DVO timing relative offset in LVDS data | ||
141 | * entry to get the DVO timing entry | ||
142 | */ | ||
143 | dvo_timing = (struct lvds_dvo_timing *) | ||
144 | ((unsigned char *)entry + dvo_timing_offset); | ||
126 | 145 | ||
127 | panel_fixed_mode = kzalloc(sizeof(*panel_fixed_mode), GFP_KERNEL); | 146 | panel_fixed_mode = kzalloc(sizeof(*panel_fixed_mode), GFP_KERNEL); |
128 | 147 | ||
@@ -185,10 +204,12 @@ parse_general_features(struct drm_i915_private *dev_priv, | |||
185 | dev_priv->lvds_use_ssc = general->enable_ssc; | 204 | dev_priv->lvds_use_ssc = general->enable_ssc; |
186 | 205 | ||
187 | if (dev_priv->lvds_use_ssc) { | 206 | if (dev_priv->lvds_use_ssc) { |
188 | if (IS_I855(dev_priv->dev)) | 207 | if (IS_I85X(dev_priv->dev)) |
189 | dev_priv->lvds_ssc_freq = general->ssc_freq ? 66 : 48; | 208 | dev_priv->lvds_ssc_freq = |
190 | else | 209 | general->ssc_freq ? 66 : 48; |
191 | dev_priv->lvds_ssc_freq = general->ssc_freq ? 100 : 96; | 210 | else |
211 | dev_priv->lvds_ssc_freq = | ||
212 | general->ssc_freq ? 100 : 96; | ||
192 | } | 213 | } |
193 | } | 214 | } |
194 | } | 215 | } |
@@ -275,6 +296,25 @@ parse_sdvo_device_mapping(struct drm_i915_private *dev_priv, | |||
275 | } | 296 | } |
276 | return; | 297 | return; |
277 | } | 298 | } |
299 | |||
300 | static void | ||
301 | parse_driver_features(struct drm_i915_private *dev_priv, | ||
302 | struct bdb_header *bdb) | ||
303 | { | ||
304 | struct drm_device *dev = dev_priv->dev; | ||
305 | struct bdb_driver_features *driver; | ||
306 | |||
307 | /* set default for chips without eDP */ | ||
308 | if (!SUPPORTS_EDP(dev)) { | ||
309 | dev_priv->edp_support = 0; | ||
310 | return; | ||
311 | } | ||
312 | |||
313 | driver = find_section(bdb, BDB_DRIVER_FEATURES); | ||
314 | if (driver && driver->lvds_config == BDB_DRIVER_FEATURE_EDP) | ||
315 | dev_priv->edp_support = 1; | ||
316 | } | ||
317 | |||
278 | /** | 318 | /** |
279 | * intel_init_bios - initialize VBIOS settings & find VBT | 319 | * intel_init_bios - initialize VBIOS settings & find VBT |
280 | * @dev: DRM device | 320 | * @dev: DRM device |
@@ -325,6 +365,8 @@ intel_init_bios(struct drm_device *dev) | |||
325 | parse_lfp_panel_data(dev_priv, bdb); | 365 | parse_lfp_panel_data(dev_priv, bdb); |
326 | parse_sdvo_panel_data(dev_priv, bdb); | 366 | parse_sdvo_panel_data(dev_priv, bdb); |
327 | parse_sdvo_device_mapping(dev_priv, bdb); | 367 | parse_sdvo_device_mapping(dev_priv, bdb); |
368 | parse_driver_features(dev_priv, bdb); | ||
369 | |||
328 | pci_unmap_rom(pdev, bios); | 370 | pci_unmap_rom(pdev, bios); |
329 | 371 | ||
330 | return 0; | 372 | return 0; |
diff --git a/drivers/gpu/drm/i915/intel_bios.h b/drivers/gpu/drm/i915/intel_bios.h index fe72e1c225d8..0f8e5f69ac7a 100644 --- a/drivers/gpu/drm/i915/intel_bios.h +++ b/drivers/gpu/drm/i915/intel_bios.h | |||
@@ -381,6 +381,51 @@ struct bdb_sdvo_lvds_options { | |||
381 | } __attribute__((packed)); | 381 | } __attribute__((packed)); |
382 | 382 | ||
383 | 383 | ||
384 | #define BDB_DRIVER_FEATURE_NO_LVDS 0 | ||
385 | #define BDB_DRIVER_FEATURE_INT_LVDS 1 | ||
386 | #define BDB_DRIVER_FEATURE_SDVO_LVDS 2 | ||
387 | #define BDB_DRIVER_FEATURE_EDP 3 | ||
388 | |||
389 | struct bdb_driver_features { | ||
390 | u8 boot_dev_algorithm:1; | ||
391 | u8 block_display_switch:1; | ||
392 | u8 allow_display_switch:1; | ||
393 | u8 hotplug_dvo:1; | ||
394 | u8 dual_view_zoom:1; | ||
395 | u8 int15h_hook:1; | ||
396 | u8 sprite_in_clone:1; | ||
397 | u8 primary_lfp_id:1; | ||
398 | |||
399 | u16 boot_mode_x; | ||
400 | u16 boot_mode_y; | ||
401 | u8 boot_mode_bpp; | ||
402 | u8 boot_mode_refresh; | ||
403 | |||
404 | u16 enable_lfp_primary:1; | ||
405 | u16 selective_mode_pruning:1; | ||
406 | u16 dual_frequency:1; | ||
407 | u16 render_clock_freq:1; /* 0: high freq; 1: low freq */ | ||
408 | u16 nt_clone_support:1; | ||
409 | u16 power_scheme_ui:1; /* 0: CUI; 1: 3rd party */ | ||
410 | u16 sprite_display_assign:1; /* 0: secondary; 1: primary */ | ||
411 | u16 cui_aspect_scaling:1; | ||
412 | u16 preserve_aspect_ratio:1; | ||
413 | u16 sdvo_device_power_down:1; | ||
414 | u16 crt_hotplug:1; | ||
415 | u16 lvds_config:2; | ||
416 | u16 tv_hotplug:1; | ||
417 | u16 hdmi_config:2; | ||
418 | |||
419 | u8 static_display:1; | ||
420 | u8 reserved2:7; | ||
421 | u16 legacy_crt_max_x; | ||
422 | u16 legacy_crt_max_y; | ||
423 | u8 legacy_crt_max_refresh; | ||
424 | |||
425 | u8 hdmi_termination; | ||
426 | u8 custom_vbt_version; | ||
427 | } __attribute__((packed)); | ||
428 | |||
384 | bool intel_init_bios(struct drm_device *dev); | 429 | bool intel_init_bios(struct drm_device *dev); |
385 | 430 | ||
386 | /* | 431 | /* |
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c index 6de97fc66029..4cf8e2e88a40 100644 --- a/drivers/gpu/drm/i915/intel_crt.c +++ b/drivers/gpu/drm/i915/intel_crt.c | |||
@@ -46,7 +46,7 @@ static void intel_crt_dpms(struct drm_encoder *encoder, int mode) | |||
46 | 46 | ||
47 | temp = I915_READ(reg); | 47 | temp = I915_READ(reg); |
48 | temp &= ~(ADPA_HSYNC_CNTL_DISABLE | ADPA_VSYNC_CNTL_DISABLE); | 48 | temp &= ~(ADPA_HSYNC_CNTL_DISABLE | ADPA_VSYNC_CNTL_DISABLE); |
49 | temp |= ADPA_DAC_ENABLE; | 49 | temp &= ~ADPA_DAC_ENABLE; |
50 | 50 | ||
51 | switch(mode) { | 51 | switch(mode) { |
52 | case DRM_MODE_DPMS_ON: | 52 | case DRM_MODE_DPMS_ON: |
@@ -156,6 +156,9 @@ static bool intel_igdng_crt_detect_hotplug(struct drm_connector *connector) | |||
156 | 156 | ||
157 | temp = adpa = I915_READ(PCH_ADPA); | 157 | temp = adpa = I915_READ(PCH_ADPA); |
158 | 158 | ||
159 | adpa &= ~ADPA_DAC_ENABLE; | ||
160 | I915_WRITE(PCH_ADPA, adpa); | ||
161 | |||
159 | adpa &= ~ADPA_CRT_HOTPLUG_MASK; | 162 | adpa &= ~ADPA_CRT_HOTPLUG_MASK; |
160 | 163 | ||
161 | adpa |= (ADPA_CRT_HOTPLUG_PERIOD_128 | | 164 | adpa |= (ADPA_CRT_HOTPLUG_PERIOD_128 | |
@@ -169,13 +172,14 @@ static bool intel_igdng_crt_detect_hotplug(struct drm_connector *connector) | |||
169 | DRM_DEBUG("pch crt adpa 0x%x", adpa); | 172 | DRM_DEBUG("pch crt adpa 0x%x", adpa); |
170 | I915_WRITE(PCH_ADPA, adpa); | 173 | I915_WRITE(PCH_ADPA, adpa); |
171 | 174 | ||
172 | /* This might not be needed as not specified in spec...*/ | 175 | while ((I915_READ(PCH_ADPA) & ADPA_CRT_HOTPLUG_FORCE_TRIGGER) != 0) |
173 | udelay(1000); | 176 | ; |
174 | 177 | ||
175 | /* Check the status to see if both blue and green are on now */ | 178 | /* Check the status to see if both blue and green are on now */ |
176 | adpa = I915_READ(PCH_ADPA); | 179 | adpa = I915_READ(PCH_ADPA); |
177 | if ((adpa & ADPA_CRT_HOTPLUG_MONITOR_MASK) == | 180 | adpa &= ADPA_CRT_HOTPLUG_MONITOR_MASK; |
178 | ADPA_CRT_HOTPLUG_MONITOR_COLOR) | 181 | if ((adpa == ADPA_CRT_HOTPLUG_MONITOR_COLOR) || |
182 | (adpa == ADPA_CRT_HOTPLUG_MONITOR_MONO)) | ||
179 | ret = true; | 183 | ret = true; |
180 | else | 184 | else |
181 | ret = false; | 185 | ret = false; |
@@ -428,8 +432,34 @@ static void intel_crt_destroy(struct drm_connector *connector) | |||
428 | 432 | ||
429 | static int intel_crt_get_modes(struct drm_connector *connector) | 433 | static int intel_crt_get_modes(struct drm_connector *connector) |
430 | { | 434 | { |
435 | int ret; | ||
431 | struct intel_output *intel_output = to_intel_output(connector); | 436 | struct intel_output *intel_output = to_intel_output(connector); |
432 | return intel_ddc_get_modes(intel_output); | 437 | struct i2c_adapter *ddcbus; |
438 | struct drm_device *dev = connector->dev; | ||
439 | |||
440 | |||
441 | ret = intel_ddc_get_modes(intel_output); | ||
442 | if (ret || !IS_G4X(dev)) | ||
443 | goto end; | ||
444 | |||
445 | ddcbus = intel_output->ddc_bus; | ||
446 | /* Try to probe digital port for output in DVI-I -> VGA mode. */ | ||
447 | intel_output->ddc_bus = | ||
448 | intel_i2c_create(connector->dev, GPIOD, "CRTDDC_D"); | ||
449 | |||
450 | if (!intel_output->ddc_bus) { | ||
451 | intel_output->ddc_bus = ddcbus; | ||
452 | dev_printk(KERN_ERR, &connector->dev->pdev->dev, | ||
453 | "DDC bus registration failed for CRTDDC_D.\n"); | ||
454 | goto end; | ||
455 | } | ||
456 | /* Try to get modes by GPIOD port */ | ||
457 | ret = intel_ddc_get_modes(intel_output); | ||
458 | intel_i2c_destroy(ddcbus); | ||
459 | |||
460 | end: | ||
461 | return ret; | ||
462 | |||
433 | } | 463 | } |
434 | 464 | ||
435 | static int intel_crt_set_property(struct drm_connector *connector, | 465 | static int intel_crt_set_property(struct drm_connector *connector, |
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 3e1c78162119..d6fce2133413 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c | |||
@@ -25,14 +25,19 @@ | |||
25 | */ | 25 | */ |
26 | 26 | ||
27 | #include <linux/i2c.h> | 27 | #include <linux/i2c.h> |
28 | #include <linux/kernel.h> | ||
28 | #include "drmP.h" | 29 | #include "drmP.h" |
29 | #include "intel_drv.h" | 30 | #include "intel_drv.h" |
30 | #include "i915_drm.h" | 31 | #include "i915_drm.h" |
31 | #include "i915_drv.h" | 32 | #include "i915_drv.h" |
33 | #include "intel_dp.h" | ||
32 | 34 | ||
33 | #include "drm_crtc_helper.h" | 35 | #include "drm_crtc_helper.h" |
34 | 36 | ||
37 | #define HAS_eDP (intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP)) | ||
38 | |||
35 | bool intel_pipe_has_type (struct drm_crtc *crtc, int type); | 39 | bool intel_pipe_has_type (struct drm_crtc *crtc, int type); |
40 | static void intel_update_watermarks(struct drm_device *dev); | ||
36 | 41 | ||
37 | typedef struct { | 42 | typedef struct { |
38 | /* given values */ | 43 | /* given values */ |
@@ -85,7 +90,7 @@ struct intel_limit { | |||
85 | #define I8XX_P2_SLOW 4 | 90 | #define I8XX_P2_SLOW 4 |
86 | #define I8XX_P2_FAST 2 | 91 | #define I8XX_P2_FAST 2 |
87 | #define I8XX_P2_LVDS_SLOW 14 | 92 | #define I8XX_P2_LVDS_SLOW 14 |
88 | #define I8XX_P2_LVDS_FAST 14 /* No fast option */ | 93 | #define I8XX_P2_LVDS_FAST 7 |
89 | #define I8XX_P2_SLOW_LIMIT 165000 | 94 | #define I8XX_P2_SLOW_LIMIT 165000 |
90 | 95 | ||
91 | #define I9XX_DOT_MIN 20000 | 96 | #define I9XX_DOT_MIN 20000 |
@@ -127,19 +132,6 @@ struct intel_limit { | |||
127 | #define I9XX_P2_LVDS_FAST 7 | 132 | #define I9XX_P2_LVDS_FAST 7 |
128 | #define I9XX_P2_LVDS_SLOW_LIMIT 112000 | 133 | #define I9XX_P2_LVDS_SLOW_LIMIT 112000 |
129 | 134 | ||
130 | #define INTEL_LIMIT_I8XX_DVO_DAC 0 | ||
131 | #define INTEL_LIMIT_I8XX_LVDS 1 | ||
132 | #define INTEL_LIMIT_I9XX_SDVO_DAC 2 | ||
133 | #define INTEL_LIMIT_I9XX_LVDS 3 | ||
134 | #define INTEL_LIMIT_G4X_SDVO 4 | ||
135 | #define INTEL_LIMIT_G4X_HDMI_DAC 5 | ||
136 | #define INTEL_LIMIT_G4X_SINGLE_CHANNEL_LVDS 6 | ||
137 | #define INTEL_LIMIT_G4X_DUAL_CHANNEL_LVDS 7 | ||
138 | #define INTEL_LIMIT_IGD_SDVO_DAC 8 | ||
139 | #define INTEL_LIMIT_IGD_LVDS 9 | ||
140 | #define INTEL_LIMIT_IGDNG_SDVO_DAC 10 | ||
141 | #define INTEL_LIMIT_IGDNG_LVDS 11 | ||
142 | |||
143 | /*The parameter is for SDVO on G4x platform*/ | 135 | /*The parameter is for SDVO on G4x platform*/ |
144 | #define G4X_DOT_SDVO_MIN 25000 | 136 | #define G4X_DOT_SDVO_MIN 25000 |
145 | #define G4X_DOT_SDVO_MAX 270000 | 137 | #define G4X_DOT_SDVO_MAX 270000 |
@@ -218,6 +210,25 @@ struct intel_limit { | |||
218 | #define G4X_P2_DUAL_CHANNEL_LVDS_FAST 7 | 210 | #define G4X_P2_DUAL_CHANNEL_LVDS_FAST 7 |
219 | #define G4X_P2_DUAL_CHANNEL_LVDS_LIMIT 0 | 211 | #define G4X_P2_DUAL_CHANNEL_LVDS_LIMIT 0 |
220 | 212 | ||
213 | /*The parameter is for DISPLAY PORT on G4x platform*/ | ||
214 | #define G4X_DOT_DISPLAY_PORT_MIN 161670 | ||
215 | #define G4X_DOT_DISPLAY_PORT_MAX 227000 | ||
216 | #define G4X_N_DISPLAY_PORT_MIN 1 | ||
217 | #define G4X_N_DISPLAY_PORT_MAX 2 | ||
218 | #define G4X_M_DISPLAY_PORT_MIN 97 | ||
219 | #define G4X_M_DISPLAY_PORT_MAX 108 | ||
220 | #define G4X_M1_DISPLAY_PORT_MIN 0x10 | ||
221 | #define G4X_M1_DISPLAY_PORT_MAX 0x12 | ||
222 | #define G4X_M2_DISPLAY_PORT_MIN 0x05 | ||
223 | #define G4X_M2_DISPLAY_PORT_MAX 0x06 | ||
224 | #define G4X_P_DISPLAY_PORT_MIN 10 | ||
225 | #define G4X_P_DISPLAY_PORT_MAX 20 | ||
226 | #define G4X_P1_DISPLAY_PORT_MIN 1 | ||
227 | #define G4X_P1_DISPLAY_PORT_MAX 2 | ||
228 | #define G4X_P2_DISPLAY_PORT_SLOW 10 | ||
229 | #define G4X_P2_DISPLAY_PORT_FAST 10 | ||
230 | #define G4X_P2_DISPLAY_PORT_LIMIT 0 | ||
231 | |||
221 | /* IGDNG */ | 232 | /* IGDNG */ |
222 | /* as we calculate clock using (register_value + 2) for | 233 | /* as we calculate clock using (register_value + 2) for |
223 | N/M1/M2, so here the range value for them is (actual_value-2). | 234 | N/M1/M2, so here the range value for them is (actual_value-2). |
@@ -256,8 +267,14 @@ static bool | |||
256 | intel_igdng_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc, | 267 | intel_igdng_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc, |
257 | int target, int refclk, intel_clock_t *best_clock); | 268 | int target, int refclk, intel_clock_t *best_clock); |
258 | 269 | ||
259 | static const intel_limit_t intel_limits[] = { | 270 | static bool |
260 | { /* INTEL_LIMIT_I8XX_DVO_DAC */ | 271 | intel_find_pll_g4x_dp(const intel_limit_t *, struct drm_crtc *crtc, |
272 | int target, int refclk, intel_clock_t *best_clock); | ||
273 | static bool | ||
274 | intel_find_pll_igdng_dp(const intel_limit_t *, struct drm_crtc *crtc, | ||
275 | int target, int refclk, intel_clock_t *best_clock); | ||
276 | |||
277 | static const intel_limit_t intel_limits_i8xx_dvo = { | ||
261 | .dot = { .min = I8XX_DOT_MIN, .max = I8XX_DOT_MAX }, | 278 | .dot = { .min = I8XX_DOT_MIN, .max = I8XX_DOT_MAX }, |
262 | .vco = { .min = I8XX_VCO_MIN, .max = I8XX_VCO_MAX }, | 279 | .vco = { .min = I8XX_VCO_MIN, .max = I8XX_VCO_MAX }, |
263 | .n = { .min = I8XX_N_MIN, .max = I8XX_N_MAX }, | 280 | .n = { .min = I8XX_N_MIN, .max = I8XX_N_MAX }, |
@@ -269,8 +286,9 @@ static const intel_limit_t intel_limits[] = { | |||
269 | .p2 = { .dot_limit = I8XX_P2_SLOW_LIMIT, | 286 | .p2 = { .dot_limit = I8XX_P2_SLOW_LIMIT, |
270 | .p2_slow = I8XX_P2_SLOW, .p2_fast = I8XX_P2_FAST }, | 287 | .p2_slow = I8XX_P2_SLOW, .p2_fast = I8XX_P2_FAST }, |
271 | .find_pll = intel_find_best_PLL, | 288 | .find_pll = intel_find_best_PLL, |
272 | }, | 289 | }; |
273 | { /* INTEL_LIMIT_I8XX_LVDS */ | 290 | |
291 | static const intel_limit_t intel_limits_i8xx_lvds = { | ||
274 | .dot = { .min = I8XX_DOT_MIN, .max = I8XX_DOT_MAX }, | 292 | .dot = { .min = I8XX_DOT_MIN, .max = I8XX_DOT_MAX }, |
275 | .vco = { .min = I8XX_VCO_MIN, .max = I8XX_VCO_MAX }, | 293 | .vco = { .min = I8XX_VCO_MIN, .max = I8XX_VCO_MAX }, |
276 | .n = { .min = I8XX_N_MIN, .max = I8XX_N_MAX }, | 294 | .n = { .min = I8XX_N_MIN, .max = I8XX_N_MAX }, |
@@ -282,8 +300,9 @@ static const intel_limit_t intel_limits[] = { | |||
282 | .p2 = { .dot_limit = I8XX_P2_SLOW_LIMIT, | 300 | .p2 = { .dot_limit = I8XX_P2_SLOW_LIMIT, |
283 | .p2_slow = I8XX_P2_LVDS_SLOW, .p2_fast = I8XX_P2_LVDS_FAST }, | 301 | .p2_slow = I8XX_P2_LVDS_SLOW, .p2_fast = I8XX_P2_LVDS_FAST }, |
284 | .find_pll = intel_find_best_PLL, | 302 | .find_pll = intel_find_best_PLL, |
285 | }, | 303 | }; |
286 | { /* INTEL_LIMIT_I9XX_SDVO_DAC */ | 304 | |
305 | static const intel_limit_t intel_limits_i9xx_sdvo = { | ||
287 | .dot = { .min = I9XX_DOT_MIN, .max = I9XX_DOT_MAX }, | 306 | .dot = { .min = I9XX_DOT_MIN, .max = I9XX_DOT_MAX }, |
288 | .vco = { .min = I9XX_VCO_MIN, .max = I9XX_VCO_MAX }, | 307 | .vco = { .min = I9XX_VCO_MIN, .max = I9XX_VCO_MAX }, |
289 | .n = { .min = I9XX_N_MIN, .max = I9XX_N_MAX }, | 308 | .n = { .min = I9XX_N_MIN, .max = I9XX_N_MAX }, |
@@ -295,8 +314,9 @@ static const intel_limit_t intel_limits[] = { | |||
295 | .p2 = { .dot_limit = I9XX_P2_SDVO_DAC_SLOW_LIMIT, | 314 | .p2 = { .dot_limit = I9XX_P2_SDVO_DAC_SLOW_LIMIT, |
296 | .p2_slow = I9XX_P2_SDVO_DAC_SLOW, .p2_fast = I9XX_P2_SDVO_DAC_FAST }, | 315 | .p2_slow = I9XX_P2_SDVO_DAC_SLOW, .p2_fast = I9XX_P2_SDVO_DAC_FAST }, |
297 | .find_pll = intel_find_best_PLL, | 316 | .find_pll = intel_find_best_PLL, |
298 | }, | 317 | }; |
299 | { /* INTEL_LIMIT_I9XX_LVDS */ | 318 | |
319 | static const intel_limit_t intel_limits_i9xx_lvds = { | ||
300 | .dot = { .min = I9XX_DOT_MIN, .max = I9XX_DOT_MAX }, | 320 | .dot = { .min = I9XX_DOT_MIN, .max = I9XX_DOT_MAX }, |
301 | .vco = { .min = I9XX_VCO_MIN, .max = I9XX_VCO_MAX }, | 321 | .vco = { .min = I9XX_VCO_MIN, .max = I9XX_VCO_MAX }, |
302 | .n = { .min = I9XX_N_MIN, .max = I9XX_N_MAX }, | 322 | .n = { .min = I9XX_N_MIN, .max = I9XX_N_MAX }, |
@@ -311,9 +331,10 @@ static const intel_limit_t intel_limits[] = { | |||
311 | .p2 = { .dot_limit = I9XX_P2_LVDS_SLOW_LIMIT, | 331 | .p2 = { .dot_limit = I9XX_P2_LVDS_SLOW_LIMIT, |
312 | .p2_slow = I9XX_P2_LVDS_SLOW, .p2_fast = I9XX_P2_LVDS_FAST }, | 332 | .p2_slow = I9XX_P2_LVDS_SLOW, .p2_fast = I9XX_P2_LVDS_FAST }, |
313 | .find_pll = intel_find_best_PLL, | 333 | .find_pll = intel_find_best_PLL, |
314 | }, | 334 | }; |
335 | |||
315 | /* below parameter and function is for G4X Chipset Family*/ | 336 | /* below parameter and function is for G4X Chipset Family*/ |
316 | { /* INTEL_LIMIT_G4X_SDVO */ | 337 | static const intel_limit_t intel_limits_g4x_sdvo = { |
317 | .dot = { .min = G4X_DOT_SDVO_MIN, .max = G4X_DOT_SDVO_MAX }, | 338 | .dot = { .min = G4X_DOT_SDVO_MIN, .max = G4X_DOT_SDVO_MAX }, |
318 | .vco = { .min = G4X_VCO_MIN, .max = G4X_VCO_MAX}, | 339 | .vco = { .min = G4X_VCO_MIN, .max = G4X_VCO_MAX}, |
319 | .n = { .min = G4X_N_SDVO_MIN, .max = G4X_N_SDVO_MAX }, | 340 | .n = { .min = G4X_N_SDVO_MIN, .max = G4X_N_SDVO_MAX }, |
@@ -327,8 +348,9 @@ static const intel_limit_t intel_limits[] = { | |||
327 | .p2_fast = G4X_P2_SDVO_FAST | 348 | .p2_fast = G4X_P2_SDVO_FAST |
328 | }, | 349 | }, |
329 | .find_pll = intel_g4x_find_best_PLL, | 350 | .find_pll = intel_g4x_find_best_PLL, |
330 | }, | 351 | }; |
331 | { /* INTEL_LIMIT_G4X_HDMI_DAC */ | 352 | |
353 | static const intel_limit_t intel_limits_g4x_hdmi = { | ||
332 | .dot = { .min = G4X_DOT_HDMI_DAC_MIN, .max = G4X_DOT_HDMI_DAC_MAX }, | 354 | .dot = { .min = G4X_DOT_HDMI_DAC_MIN, .max = G4X_DOT_HDMI_DAC_MAX }, |
333 | .vco = { .min = G4X_VCO_MIN, .max = G4X_VCO_MAX}, | 355 | .vco = { .min = G4X_VCO_MIN, .max = G4X_VCO_MAX}, |
334 | .n = { .min = G4X_N_HDMI_DAC_MIN, .max = G4X_N_HDMI_DAC_MAX }, | 356 | .n = { .min = G4X_N_HDMI_DAC_MIN, .max = G4X_N_HDMI_DAC_MAX }, |
@@ -342,8 +364,9 @@ static const intel_limit_t intel_limits[] = { | |||
342 | .p2_fast = G4X_P2_HDMI_DAC_FAST | 364 | .p2_fast = G4X_P2_HDMI_DAC_FAST |
343 | }, | 365 | }, |
344 | .find_pll = intel_g4x_find_best_PLL, | 366 | .find_pll = intel_g4x_find_best_PLL, |
345 | }, | 367 | }; |
346 | { /* INTEL_LIMIT_G4X_SINGLE_CHANNEL_LVDS */ | 368 | |
369 | static const intel_limit_t intel_limits_g4x_single_channel_lvds = { | ||
347 | .dot = { .min = G4X_DOT_SINGLE_CHANNEL_LVDS_MIN, | 370 | .dot = { .min = G4X_DOT_SINGLE_CHANNEL_LVDS_MIN, |
348 | .max = G4X_DOT_SINGLE_CHANNEL_LVDS_MAX }, | 371 | .max = G4X_DOT_SINGLE_CHANNEL_LVDS_MAX }, |
349 | .vco = { .min = G4X_VCO_MIN, | 372 | .vco = { .min = G4X_VCO_MIN, |
@@ -365,8 +388,9 @@ static const intel_limit_t intel_limits[] = { | |||
365 | .p2_fast = G4X_P2_SINGLE_CHANNEL_LVDS_FAST | 388 | .p2_fast = G4X_P2_SINGLE_CHANNEL_LVDS_FAST |
366 | }, | 389 | }, |
367 | .find_pll = intel_g4x_find_best_PLL, | 390 | .find_pll = intel_g4x_find_best_PLL, |
368 | }, | 391 | }; |
369 | { /* INTEL_LIMIT_G4X_DUAL_CHANNEL_LVDS */ | 392 | |
393 | static const intel_limit_t intel_limits_g4x_dual_channel_lvds = { | ||
370 | .dot = { .min = G4X_DOT_DUAL_CHANNEL_LVDS_MIN, | 394 | .dot = { .min = G4X_DOT_DUAL_CHANNEL_LVDS_MIN, |
371 | .max = G4X_DOT_DUAL_CHANNEL_LVDS_MAX }, | 395 | .max = G4X_DOT_DUAL_CHANNEL_LVDS_MAX }, |
372 | .vco = { .min = G4X_VCO_MIN, | 396 | .vco = { .min = G4X_VCO_MIN, |
@@ -388,8 +412,32 @@ static const intel_limit_t intel_limits[] = { | |||
388 | .p2_fast = G4X_P2_DUAL_CHANNEL_LVDS_FAST | 412 | .p2_fast = G4X_P2_DUAL_CHANNEL_LVDS_FAST |
389 | }, | 413 | }, |
390 | .find_pll = intel_g4x_find_best_PLL, | 414 | .find_pll = intel_g4x_find_best_PLL, |
391 | }, | 415 | }; |
392 | { /* INTEL_LIMIT_IGD_SDVO */ | 416 | |
417 | static const intel_limit_t intel_limits_g4x_display_port = { | ||
418 | .dot = { .min = G4X_DOT_DISPLAY_PORT_MIN, | ||
419 | .max = G4X_DOT_DISPLAY_PORT_MAX }, | ||
420 | .vco = { .min = G4X_VCO_MIN, | ||
421 | .max = G4X_VCO_MAX}, | ||
422 | .n = { .min = G4X_N_DISPLAY_PORT_MIN, | ||
423 | .max = G4X_N_DISPLAY_PORT_MAX }, | ||
424 | .m = { .min = G4X_M_DISPLAY_PORT_MIN, | ||
425 | .max = G4X_M_DISPLAY_PORT_MAX }, | ||
426 | .m1 = { .min = G4X_M1_DISPLAY_PORT_MIN, | ||
427 | .max = G4X_M1_DISPLAY_PORT_MAX }, | ||
428 | .m2 = { .min = G4X_M2_DISPLAY_PORT_MIN, | ||
429 | .max = G4X_M2_DISPLAY_PORT_MAX }, | ||
430 | .p = { .min = G4X_P_DISPLAY_PORT_MIN, | ||
431 | .max = G4X_P_DISPLAY_PORT_MAX }, | ||
432 | .p1 = { .min = G4X_P1_DISPLAY_PORT_MIN, | ||
433 | .max = G4X_P1_DISPLAY_PORT_MAX}, | ||
434 | .p2 = { .dot_limit = G4X_P2_DISPLAY_PORT_LIMIT, | ||
435 | .p2_slow = G4X_P2_DISPLAY_PORT_SLOW, | ||
436 | .p2_fast = G4X_P2_DISPLAY_PORT_FAST }, | ||
437 | .find_pll = intel_find_pll_g4x_dp, | ||
438 | }; | ||
439 | |||
440 | static const intel_limit_t intel_limits_igd_sdvo = { | ||
393 | .dot = { .min = I9XX_DOT_MIN, .max = I9XX_DOT_MAX}, | 441 | .dot = { .min = I9XX_DOT_MIN, .max = I9XX_DOT_MAX}, |
394 | .vco = { .min = IGD_VCO_MIN, .max = IGD_VCO_MAX }, | 442 | .vco = { .min = IGD_VCO_MIN, .max = IGD_VCO_MAX }, |
395 | .n = { .min = IGD_N_MIN, .max = IGD_N_MAX }, | 443 | .n = { .min = IGD_N_MIN, .max = IGD_N_MAX }, |
@@ -401,8 +449,9 @@ static const intel_limit_t intel_limits[] = { | |||
401 | .p2 = { .dot_limit = I9XX_P2_SDVO_DAC_SLOW_LIMIT, | 449 | .p2 = { .dot_limit = I9XX_P2_SDVO_DAC_SLOW_LIMIT, |
402 | .p2_slow = I9XX_P2_SDVO_DAC_SLOW, .p2_fast = I9XX_P2_SDVO_DAC_FAST }, | 450 | .p2_slow = I9XX_P2_SDVO_DAC_SLOW, .p2_fast = I9XX_P2_SDVO_DAC_FAST }, |
403 | .find_pll = intel_find_best_PLL, | 451 | .find_pll = intel_find_best_PLL, |
404 | }, | 452 | }; |
405 | { /* INTEL_LIMIT_IGD_LVDS */ | 453 | |
454 | static const intel_limit_t intel_limits_igd_lvds = { | ||
406 | .dot = { .min = I9XX_DOT_MIN, .max = I9XX_DOT_MAX }, | 455 | .dot = { .min = I9XX_DOT_MIN, .max = I9XX_DOT_MAX }, |
407 | .vco = { .min = IGD_VCO_MIN, .max = IGD_VCO_MAX }, | 456 | .vco = { .min = IGD_VCO_MIN, .max = IGD_VCO_MAX }, |
408 | .n = { .min = IGD_N_MIN, .max = IGD_N_MAX }, | 457 | .n = { .min = IGD_N_MIN, .max = IGD_N_MAX }, |
@@ -415,8 +464,9 @@ static const intel_limit_t intel_limits[] = { | |||
415 | .p2 = { .dot_limit = I9XX_P2_LVDS_SLOW_LIMIT, | 464 | .p2 = { .dot_limit = I9XX_P2_LVDS_SLOW_LIMIT, |
416 | .p2_slow = I9XX_P2_LVDS_SLOW, .p2_fast = I9XX_P2_LVDS_SLOW }, | 465 | .p2_slow = I9XX_P2_LVDS_SLOW, .p2_fast = I9XX_P2_LVDS_SLOW }, |
417 | .find_pll = intel_find_best_PLL, | 466 | .find_pll = intel_find_best_PLL, |
418 | }, | 467 | }; |
419 | { /* INTEL_LIMIT_IGDNG_SDVO_DAC */ | 468 | |
469 | static const intel_limit_t intel_limits_igdng_sdvo = { | ||
420 | .dot = { .min = IGDNG_DOT_MIN, .max = IGDNG_DOT_MAX }, | 470 | .dot = { .min = IGDNG_DOT_MIN, .max = IGDNG_DOT_MAX }, |
421 | .vco = { .min = IGDNG_VCO_MIN, .max = IGDNG_VCO_MAX }, | 471 | .vco = { .min = IGDNG_VCO_MIN, .max = IGDNG_VCO_MAX }, |
422 | .n = { .min = IGDNG_N_MIN, .max = IGDNG_N_MAX }, | 472 | .n = { .min = IGDNG_N_MIN, .max = IGDNG_N_MAX }, |
@@ -429,8 +479,9 @@ static const intel_limit_t intel_limits[] = { | |||
429 | .p2_slow = IGDNG_P2_SDVO_DAC_SLOW, | 479 | .p2_slow = IGDNG_P2_SDVO_DAC_SLOW, |
430 | .p2_fast = IGDNG_P2_SDVO_DAC_FAST }, | 480 | .p2_fast = IGDNG_P2_SDVO_DAC_FAST }, |
431 | .find_pll = intel_igdng_find_best_PLL, | 481 | .find_pll = intel_igdng_find_best_PLL, |
432 | }, | 482 | }; |
433 | { /* INTEL_LIMIT_IGDNG_LVDS */ | 483 | |
484 | static const intel_limit_t intel_limits_igdng_lvds = { | ||
434 | .dot = { .min = IGDNG_DOT_MIN, .max = IGDNG_DOT_MAX }, | 485 | .dot = { .min = IGDNG_DOT_MIN, .max = IGDNG_DOT_MAX }, |
435 | .vco = { .min = IGDNG_VCO_MIN, .max = IGDNG_VCO_MAX }, | 486 | .vco = { .min = IGDNG_VCO_MIN, .max = IGDNG_VCO_MAX }, |
436 | .n = { .min = IGDNG_N_MIN, .max = IGDNG_N_MAX }, | 487 | .n = { .min = IGDNG_N_MIN, .max = IGDNG_N_MAX }, |
@@ -443,16 +494,15 @@ static const intel_limit_t intel_limits[] = { | |||
443 | .p2_slow = IGDNG_P2_LVDS_SLOW, | 494 | .p2_slow = IGDNG_P2_LVDS_SLOW, |
444 | .p2_fast = IGDNG_P2_LVDS_FAST }, | 495 | .p2_fast = IGDNG_P2_LVDS_FAST }, |
445 | .find_pll = intel_igdng_find_best_PLL, | 496 | .find_pll = intel_igdng_find_best_PLL, |
446 | }, | ||
447 | }; | 497 | }; |
448 | 498 | ||
449 | static const intel_limit_t *intel_igdng_limit(struct drm_crtc *crtc) | 499 | static const intel_limit_t *intel_igdng_limit(struct drm_crtc *crtc) |
450 | { | 500 | { |
451 | const intel_limit_t *limit; | 501 | const intel_limit_t *limit; |
452 | if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) | 502 | if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) |
453 | limit = &intel_limits[INTEL_LIMIT_IGDNG_LVDS]; | 503 | limit = &intel_limits_igdng_lvds; |
454 | else | 504 | else |
455 | limit = &intel_limits[INTEL_LIMIT_IGDNG_SDVO_DAC]; | 505 | limit = &intel_limits_igdng_sdvo; |
456 | 506 | ||
457 | return limit; | 507 | return limit; |
458 | } | 508 | } |
@@ -467,19 +517,19 @@ static const intel_limit_t *intel_g4x_limit(struct drm_crtc *crtc) | |||
467 | if ((I915_READ(LVDS) & LVDS_CLKB_POWER_MASK) == | 517 | if ((I915_READ(LVDS) & LVDS_CLKB_POWER_MASK) == |
468 | LVDS_CLKB_POWER_UP) | 518 | LVDS_CLKB_POWER_UP) |
469 | /* LVDS with dual channel */ | 519 | /* LVDS with dual channel */ |
470 | limit = &intel_limits | 520 | limit = &intel_limits_g4x_dual_channel_lvds; |
471 | [INTEL_LIMIT_G4X_DUAL_CHANNEL_LVDS]; | ||
472 | else | 521 | else |
473 | /* LVDS with dual channel */ | 522 | /* LVDS with dual channel */ |
474 | limit = &intel_limits | 523 | limit = &intel_limits_g4x_single_channel_lvds; |
475 | [INTEL_LIMIT_G4X_SINGLE_CHANNEL_LVDS]; | ||
476 | } else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI) || | 524 | } else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI) || |
477 | intel_pipe_has_type(crtc, INTEL_OUTPUT_ANALOG)) { | 525 | intel_pipe_has_type(crtc, INTEL_OUTPUT_ANALOG)) { |
478 | limit = &intel_limits[INTEL_LIMIT_G4X_HDMI_DAC]; | 526 | limit = &intel_limits_g4x_hdmi; |
479 | } else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_SDVO)) { | 527 | } else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_SDVO)) { |
480 | limit = &intel_limits[INTEL_LIMIT_G4X_SDVO]; | 528 | limit = &intel_limits_g4x_sdvo; |
529 | } else if (intel_pipe_has_type (crtc, INTEL_OUTPUT_DISPLAYPORT)) { | ||
530 | limit = &intel_limits_g4x_display_port; | ||
481 | } else /* The option is for other outputs */ | 531 | } else /* The option is for other outputs */ |
482 | limit = &intel_limits[INTEL_LIMIT_I9XX_SDVO_DAC]; | 532 | limit = &intel_limits_i9xx_sdvo; |
483 | 533 | ||
484 | return limit; | 534 | return limit; |
485 | } | 535 | } |
@@ -495,19 +545,19 @@ static const intel_limit_t *intel_limit(struct drm_crtc *crtc) | |||
495 | limit = intel_g4x_limit(crtc); | 545 | limit = intel_g4x_limit(crtc); |
496 | } else if (IS_I9XX(dev) && !IS_IGD(dev)) { | 546 | } else if (IS_I9XX(dev) && !IS_IGD(dev)) { |
497 | if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) | 547 | if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) |
498 | limit = &intel_limits[INTEL_LIMIT_I9XX_LVDS]; | 548 | limit = &intel_limits_i9xx_lvds; |
499 | else | 549 | else |
500 | limit = &intel_limits[INTEL_LIMIT_I9XX_SDVO_DAC]; | 550 | limit = &intel_limits_i9xx_sdvo; |
501 | } else if (IS_IGD(dev)) { | 551 | } else if (IS_IGD(dev)) { |
502 | if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) | 552 | if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) |
503 | limit = &intel_limits[INTEL_LIMIT_IGD_LVDS]; | 553 | limit = &intel_limits_igd_lvds; |
504 | else | 554 | else |
505 | limit = &intel_limits[INTEL_LIMIT_IGD_SDVO_DAC]; | 555 | limit = &intel_limits_igd_sdvo; |
506 | } else { | 556 | } else { |
507 | if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) | 557 | if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) |
508 | limit = &intel_limits[INTEL_LIMIT_I8XX_LVDS]; | 558 | limit = &intel_limits_i8xx_lvds; |
509 | else | 559 | else |
510 | limit = &intel_limits[INTEL_LIMIT_I8XX_DVO_DAC]; | 560 | limit = &intel_limits_i8xx_dvo; |
511 | } | 561 | } |
512 | return limit; | 562 | return limit; |
513 | } | 563 | } |
@@ -553,6 +603,23 @@ bool intel_pipe_has_type (struct drm_crtc *crtc, int type) | |||
553 | return false; | 603 | return false; |
554 | } | 604 | } |
555 | 605 | ||
606 | struct drm_connector * | ||
607 | intel_pipe_get_output (struct drm_crtc *crtc) | ||
608 | { | ||
609 | struct drm_device *dev = crtc->dev; | ||
610 | struct drm_mode_config *mode_config = &dev->mode_config; | ||
611 | struct drm_connector *l_entry, *ret = NULL; | ||
612 | |||
613 | list_for_each_entry(l_entry, &mode_config->connector_list, head) { | ||
614 | if (l_entry->encoder && | ||
615 | l_entry->encoder->crtc == crtc) { | ||
616 | ret = l_entry; | ||
617 | break; | ||
618 | } | ||
619 | } | ||
620 | return ret; | ||
621 | } | ||
622 | |||
556 | #define INTELPllInvalid(s) do { /* DRM_DEBUG(s); */ return false; } while (0) | 623 | #define INTELPllInvalid(s) do { /* DRM_DEBUG(s); */ return false; } while (0) |
557 | /** | 624 | /** |
558 | * Returns whether the given set of divisors are valid for a given refclk with | 625 | * Returns whether the given set of divisors are valid for a given refclk with |
@@ -600,7 +667,7 @@ intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc, | |||
600 | int err = target; | 667 | int err = target; |
601 | 668 | ||
602 | if (IS_I9XX(dev) && intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) && | 669 | if (IS_I9XX(dev) && intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) && |
603 | (I915_READ(LVDS) & LVDS_PORT_EN) != 0) { | 670 | (I915_READ(LVDS)) != 0) { |
604 | /* | 671 | /* |
605 | * For LVDS, if the panel is on, just rely on its current | 672 | * For LVDS, if the panel is on, just rely on its current |
606 | * settings for dual-channel. We haven't figured out how to | 673 | * settings for dual-channel. We haven't figured out how to |
@@ -707,6 +774,30 @@ intel_g4x_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc, | |||
707 | } | 774 | } |
708 | 775 | ||
709 | static bool | 776 | static bool |
777 | intel_find_pll_igdng_dp(const intel_limit_t *limit, struct drm_crtc *crtc, | ||
778 | int target, int refclk, intel_clock_t *best_clock) | ||
779 | { | ||
780 | struct drm_device *dev = crtc->dev; | ||
781 | intel_clock_t clock; | ||
782 | if (target < 200000) { | ||
783 | clock.n = 1; | ||
784 | clock.p1 = 2; | ||
785 | clock.p2 = 10; | ||
786 | clock.m1 = 12; | ||
787 | clock.m2 = 9; | ||
788 | } else { | ||
789 | clock.n = 2; | ||
790 | clock.p1 = 1; | ||
791 | clock.p2 = 10; | ||
792 | clock.m1 = 14; | ||
793 | clock.m2 = 8; | ||
794 | } | ||
795 | intel_clock(dev, refclk, &clock); | ||
796 | memcpy(best_clock, &clock, sizeof(intel_clock_t)); | ||
797 | return true; | ||
798 | } | ||
799 | |||
800 | static bool | ||
710 | intel_igdng_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc, | 801 | intel_igdng_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc, |
711 | int target, int refclk, intel_clock_t *best_clock) | 802 | int target, int refclk, intel_clock_t *best_clock) |
712 | { | 803 | { |
@@ -718,6 +809,14 @@ intel_igdng_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc, | |||
718 | int err_most = 47; | 809 | int err_most = 47; |
719 | found = false; | 810 | found = false; |
720 | 811 | ||
812 | /* eDP has only 2 clock choice, no n/m/p setting */ | ||
813 | if (HAS_eDP) | ||
814 | return true; | ||
815 | |||
816 | if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) | ||
817 | return intel_find_pll_igdng_dp(limit, crtc, target, | ||
818 | refclk, best_clock); | ||
819 | |||
721 | if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) { | 820 | if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) { |
722 | if ((I915_READ(LVDS) & LVDS_CLKB_POWER_MASK) == | 821 | if ((I915_READ(LVDS) & LVDS_CLKB_POWER_MASK) == |
723 | LVDS_CLKB_POWER_UP) | 822 | LVDS_CLKB_POWER_UP) |
@@ -764,6 +863,32 @@ out: | |||
764 | return found; | 863 | return found; |
765 | } | 864 | } |
766 | 865 | ||
866 | /* DisplayPort has only two frequencies, 162MHz and 270MHz */ | ||
867 | static bool | ||
868 | intel_find_pll_g4x_dp(const intel_limit_t *limit, struct drm_crtc *crtc, | ||
869 | int target, int refclk, intel_clock_t *best_clock) | ||
870 | { | ||
871 | intel_clock_t clock; | ||
872 | if (target < 200000) { | ||
873 | clock.p1 = 2; | ||
874 | clock.p2 = 10; | ||
875 | clock.n = 2; | ||
876 | clock.m1 = 23; | ||
877 | clock.m2 = 8; | ||
878 | } else { | ||
879 | clock.p1 = 1; | ||
880 | clock.p2 = 10; | ||
881 | clock.n = 1; | ||
882 | clock.m1 = 14; | ||
883 | clock.m2 = 2; | ||
884 | } | ||
885 | clock.m = 5 * (clock.m1 + 2) + (clock.m2 + 2); | ||
886 | clock.p = (clock.p1 * clock.p2); | ||
887 | clock.dot = 96000 * clock.m / (clock.n + 2) / clock.p; | ||
888 | memcpy(best_clock, &clock, sizeof(intel_clock_t)); | ||
889 | return true; | ||
890 | } | ||
891 | |||
767 | void | 892 | void |
768 | intel_wait_for_vblank(struct drm_device *dev) | 893 | intel_wait_for_vblank(struct drm_device *dev) |
769 | { | 894 | { |
@@ -927,13 +1052,97 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y, | |||
927 | return 0; | 1052 | return 0; |
928 | } | 1053 | } |
929 | 1054 | ||
1055 | /* Disable the VGA plane that we never use */ | ||
1056 | static void i915_disable_vga (struct drm_device *dev) | ||
1057 | { | ||
1058 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
1059 | u8 sr1; | ||
1060 | u32 vga_reg; | ||
1061 | |||
1062 | if (IS_IGDNG(dev)) | ||
1063 | vga_reg = CPU_VGACNTRL; | ||
1064 | else | ||
1065 | vga_reg = VGACNTRL; | ||
1066 | |||
1067 | if (I915_READ(vga_reg) & VGA_DISP_DISABLE) | ||
1068 | return; | ||
1069 | |||
1070 | I915_WRITE8(VGA_SR_INDEX, 1); | ||
1071 | sr1 = I915_READ8(VGA_SR_DATA); | ||
1072 | I915_WRITE8(VGA_SR_DATA, sr1 | (1 << 5)); | ||
1073 | udelay(100); | ||
1074 | |||
1075 | I915_WRITE(vga_reg, VGA_DISP_DISABLE); | ||
1076 | } | ||
1077 | |||
1078 | static void igdng_disable_pll_edp (struct drm_crtc *crtc) | ||
1079 | { | ||
1080 | struct drm_device *dev = crtc->dev; | ||
1081 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
1082 | u32 dpa_ctl; | ||
1083 | |||
1084 | DRM_DEBUG("\n"); | ||
1085 | dpa_ctl = I915_READ(DP_A); | ||
1086 | dpa_ctl &= ~DP_PLL_ENABLE; | ||
1087 | I915_WRITE(DP_A, dpa_ctl); | ||
1088 | } | ||
1089 | |||
1090 | static void igdng_enable_pll_edp (struct drm_crtc *crtc) | ||
1091 | { | ||
1092 | struct drm_device *dev = crtc->dev; | ||
1093 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
1094 | u32 dpa_ctl; | ||
1095 | |||
1096 | dpa_ctl = I915_READ(DP_A); | ||
1097 | dpa_ctl |= DP_PLL_ENABLE; | ||
1098 | I915_WRITE(DP_A, dpa_ctl); | ||
1099 | udelay(200); | ||
1100 | } | ||
1101 | |||
1102 | |||
1103 | static void igdng_set_pll_edp (struct drm_crtc *crtc, int clock) | ||
1104 | { | ||
1105 | struct drm_device *dev = crtc->dev; | ||
1106 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
1107 | u32 dpa_ctl; | ||
1108 | |||
1109 | DRM_DEBUG("eDP PLL enable for clock %d\n", clock); | ||
1110 | dpa_ctl = I915_READ(DP_A); | ||
1111 | dpa_ctl &= ~DP_PLL_FREQ_MASK; | ||
1112 | |||
1113 | if (clock < 200000) { | ||
1114 | u32 temp; | ||
1115 | dpa_ctl |= DP_PLL_FREQ_160MHZ; | ||
1116 | /* workaround for 160Mhz: | ||
1117 | 1) program 0x4600c bits 15:0 = 0x8124 | ||
1118 | 2) program 0x46010 bit 0 = 1 | ||
1119 | 3) program 0x46034 bit 24 = 1 | ||
1120 | 4) program 0x64000 bit 14 = 1 | ||
1121 | */ | ||
1122 | temp = I915_READ(0x4600c); | ||
1123 | temp &= 0xffff0000; | ||
1124 | I915_WRITE(0x4600c, temp | 0x8124); | ||
1125 | |||
1126 | temp = I915_READ(0x46010); | ||
1127 | I915_WRITE(0x46010, temp | 1); | ||
1128 | |||
1129 | temp = I915_READ(0x46034); | ||
1130 | I915_WRITE(0x46034, temp | (1 << 24)); | ||
1131 | } else { | ||
1132 | dpa_ctl |= DP_PLL_FREQ_270MHZ; | ||
1133 | } | ||
1134 | I915_WRITE(DP_A, dpa_ctl); | ||
1135 | |||
1136 | udelay(500); | ||
1137 | } | ||
1138 | |||
930 | static void igdng_crtc_dpms(struct drm_crtc *crtc, int mode) | 1139 | static void igdng_crtc_dpms(struct drm_crtc *crtc, int mode) |
931 | { | 1140 | { |
932 | struct drm_device *dev = crtc->dev; | 1141 | struct drm_device *dev = crtc->dev; |
933 | struct drm_i915_private *dev_priv = dev->dev_private; | 1142 | struct drm_i915_private *dev_priv = dev->dev_private; |
934 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | 1143 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
935 | int pipe = intel_crtc->pipe; | 1144 | int pipe = intel_crtc->pipe; |
936 | int plane = intel_crtc->pipe; | 1145 | int plane = intel_crtc->plane; |
937 | int pch_dpll_reg = (pipe == 0) ? PCH_DPLL_A : PCH_DPLL_B; | 1146 | int pch_dpll_reg = (pipe == 0) ? PCH_DPLL_A : PCH_DPLL_B; |
938 | int pipeconf_reg = (pipe == 0) ? PIPEACONF : PIPEBCONF; | 1147 | int pipeconf_reg = (pipe == 0) ? PIPEACONF : PIPEBCONF; |
939 | int dspcntr_reg = (plane == 0) ? DSPACNTR : DSPBCNTR; | 1148 | int dspcntr_reg = (plane == 0) ? DSPACNTR : DSPBCNTR; |
@@ -944,6 +1153,7 @@ static void igdng_crtc_dpms(struct drm_crtc *crtc, int mode) | |||
944 | int fdi_rx_imr_reg = (pipe == 0) ? FDI_RXA_IMR : FDI_RXB_IMR; | 1153 | int fdi_rx_imr_reg = (pipe == 0) ? FDI_RXA_IMR : FDI_RXB_IMR; |
945 | int transconf_reg = (pipe == 0) ? TRANSACONF : TRANSBCONF; | 1154 | int transconf_reg = (pipe == 0) ? TRANSACONF : TRANSBCONF; |
946 | int pf_ctl_reg = (pipe == 0) ? PFA_CTL_1 : PFB_CTL_1; | 1155 | int pf_ctl_reg = (pipe == 0) ? PFA_CTL_1 : PFB_CTL_1; |
1156 | int pf_win_size = (pipe == 0) ? PFA_WIN_SZ : PFB_WIN_SZ; | ||
947 | int cpu_htot_reg = (pipe == 0) ? HTOTAL_A : HTOTAL_B; | 1157 | int cpu_htot_reg = (pipe == 0) ? HTOTAL_A : HTOTAL_B; |
948 | int cpu_hblank_reg = (pipe == 0) ? HBLANK_A : HBLANK_B; | 1158 | int cpu_hblank_reg = (pipe == 0) ? HBLANK_A : HBLANK_B; |
949 | int cpu_hsync_reg = (pipe == 0) ? HSYNC_A : HSYNC_B; | 1159 | int cpu_hsync_reg = (pipe == 0) ? HSYNC_A : HSYNC_B; |
@@ -957,7 +1167,7 @@ static void igdng_crtc_dpms(struct drm_crtc *crtc, int mode) | |||
957 | int trans_vblank_reg = (pipe == 0) ? TRANS_VBLANK_A : TRANS_VBLANK_B; | 1167 | int trans_vblank_reg = (pipe == 0) ? TRANS_VBLANK_A : TRANS_VBLANK_B; |
958 | int trans_vsync_reg = (pipe == 0) ? TRANS_VSYNC_A : TRANS_VSYNC_B; | 1168 | int trans_vsync_reg = (pipe == 0) ? TRANS_VSYNC_A : TRANS_VSYNC_B; |
959 | u32 temp; | 1169 | u32 temp; |
960 | int tries = 5, j; | 1170 | int tries = 5, j, n; |
961 | 1171 | ||
962 | /* XXX: When our outputs are all unaware of DPMS modes other than off | 1172 | /* XXX: When our outputs are all unaware of DPMS modes other than off |
963 | * and on, we should map those modes to DRM_MODE_DPMS_OFF in the CRTC. | 1173 | * and on, we should map those modes to DRM_MODE_DPMS_OFF in the CRTC. |
@@ -967,27 +1177,32 @@ static void igdng_crtc_dpms(struct drm_crtc *crtc, int mode) | |||
967 | case DRM_MODE_DPMS_STANDBY: | 1177 | case DRM_MODE_DPMS_STANDBY: |
968 | case DRM_MODE_DPMS_SUSPEND: | 1178 | case DRM_MODE_DPMS_SUSPEND: |
969 | DRM_DEBUG("crtc %d dpms on\n", pipe); | 1179 | DRM_DEBUG("crtc %d dpms on\n", pipe); |
970 | /* enable PCH DPLL */ | 1180 | if (HAS_eDP) { |
971 | temp = I915_READ(pch_dpll_reg); | 1181 | /* enable eDP PLL */ |
972 | if ((temp & DPLL_VCO_ENABLE) == 0) { | 1182 | igdng_enable_pll_edp(crtc); |
973 | I915_WRITE(pch_dpll_reg, temp | DPLL_VCO_ENABLE); | 1183 | } else { |
974 | I915_READ(pch_dpll_reg); | 1184 | /* enable PCH DPLL */ |
975 | } | 1185 | temp = I915_READ(pch_dpll_reg); |
976 | 1186 | if ((temp & DPLL_VCO_ENABLE) == 0) { | |
977 | /* enable PCH FDI RX PLL, wait warmup plus DMI latency */ | 1187 | I915_WRITE(pch_dpll_reg, temp | DPLL_VCO_ENABLE); |
978 | temp = I915_READ(fdi_rx_reg); | 1188 | I915_READ(pch_dpll_reg); |
979 | I915_WRITE(fdi_rx_reg, temp | FDI_RX_PLL_ENABLE | | 1189 | } |
980 | FDI_SEL_PCDCLK | | ||
981 | FDI_DP_PORT_WIDTH_X4); /* default 4 lanes */ | ||
982 | I915_READ(fdi_rx_reg); | ||
983 | udelay(200); | ||
984 | 1190 | ||
985 | /* Enable CPU FDI TX PLL, always on for IGDNG */ | 1191 | /* enable PCH FDI RX PLL, wait warmup plus DMI latency */ |
986 | temp = I915_READ(fdi_tx_reg); | 1192 | temp = I915_READ(fdi_rx_reg); |
987 | if ((temp & FDI_TX_PLL_ENABLE) == 0) { | 1193 | I915_WRITE(fdi_rx_reg, temp | FDI_RX_PLL_ENABLE | |
988 | I915_WRITE(fdi_tx_reg, temp | FDI_TX_PLL_ENABLE); | 1194 | FDI_SEL_PCDCLK | |
989 | I915_READ(fdi_tx_reg); | 1195 | FDI_DP_PORT_WIDTH_X4); /* default 4 lanes */ |
990 | udelay(100); | 1196 | I915_READ(fdi_rx_reg); |
1197 | udelay(200); | ||
1198 | |||
1199 | /* Enable CPU FDI TX PLL, always on for IGDNG */ | ||
1200 | temp = I915_READ(fdi_tx_reg); | ||
1201 | if ((temp & FDI_TX_PLL_ENABLE) == 0) { | ||
1202 | I915_WRITE(fdi_tx_reg, temp | FDI_TX_PLL_ENABLE); | ||
1203 | I915_READ(fdi_tx_reg); | ||
1204 | udelay(100); | ||
1205 | } | ||
991 | } | 1206 | } |
992 | 1207 | ||
993 | /* Enable CPU pipe */ | 1208 | /* Enable CPU pipe */ |
@@ -1006,122 +1221,126 @@ static void igdng_crtc_dpms(struct drm_crtc *crtc, int mode) | |||
1006 | I915_WRITE(dspbase_reg, I915_READ(dspbase_reg)); | 1221 | I915_WRITE(dspbase_reg, I915_READ(dspbase_reg)); |
1007 | } | 1222 | } |
1008 | 1223 | ||
1009 | /* enable CPU FDI TX and PCH FDI RX */ | 1224 | if (!HAS_eDP) { |
1010 | temp = I915_READ(fdi_tx_reg); | 1225 | /* enable CPU FDI TX and PCH FDI RX */ |
1011 | temp |= FDI_TX_ENABLE; | 1226 | temp = I915_READ(fdi_tx_reg); |
1012 | temp |= FDI_DP_PORT_WIDTH_X4; /* default */ | 1227 | temp |= FDI_TX_ENABLE; |
1013 | temp &= ~FDI_LINK_TRAIN_NONE; | 1228 | temp |= FDI_DP_PORT_WIDTH_X4; /* default */ |
1014 | temp |= FDI_LINK_TRAIN_PATTERN_1; | 1229 | temp &= ~FDI_LINK_TRAIN_NONE; |
1015 | I915_WRITE(fdi_tx_reg, temp); | 1230 | temp |= FDI_LINK_TRAIN_PATTERN_1; |
1016 | I915_READ(fdi_tx_reg); | 1231 | I915_WRITE(fdi_tx_reg, temp); |
1232 | I915_READ(fdi_tx_reg); | ||
1017 | 1233 | ||
1018 | temp = I915_READ(fdi_rx_reg); | 1234 | temp = I915_READ(fdi_rx_reg); |
1019 | temp &= ~FDI_LINK_TRAIN_NONE; | 1235 | temp &= ~FDI_LINK_TRAIN_NONE; |
1020 | temp |= FDI_LINK_TRAIN_PATTERN_1; | 1236 | temp |= FDI_LINK_TRAIN_PATTERN_1; |
1021 | I915_WRITE(fdi_rx_reg, temp | FDI_RX_ENABLE); | 1237 | I915_WRITE(fdi_rx_reg, temp | FDI_RX_ENABLE); |
1022 | I915_READ(fdi_rx_reg); | 1238 | I915_READ(fdi_rx_reg); |
1023 | 1239 | ||
1024 | udelay(150); | 1240 | udelay(150); |
1025 | 1241 | ||
1026 | /* Train FDI. */ | 1242 | /* Train FDI. */ |
1027 | /* umask FDI RX Interrupt symbol_lock and bit_lock bit | 1243 | /* umask FDI RX Interrupt symbol_lock and bit_lock bit |
1028 | for train result */ | 1244 | for train result */ |
1029 | temp = I915_READ(fdi_rx_imr_reg); | 1245 | temp = I915_READ(fdi_rx_imr_reg); |
1030 | temp &= ~FDI_RX_SYMBOL_LOCK; | 1246 | temp &= ~FDI_RX_SYMBOL_LOCK; |
1031 | temp &= ~FDI_RX_BIT_LOCK; | 1247 | temp &= ~FDI_RX_BIT_LOCK; |
1032 | I915_WRITE(fdi_rx_imr_reg, temp); | 1248 | I915_WRITE(fdi_rx_imr_reg, temp); |
1033 | I915_READ(fdi_rx_imr_reg); | 1249 | I915_READ(fdi_rx_imr_reg); |
1034 | udelay(150); | 1250 | udelay(150); |
1035 | 1251 | ||
1036 | temp = I915_READ(fdi_rx_iir_reg); | 1252 | temp = I915_READ(fdi_rx_iir_reg); |
1037 | DRM_DEBUG("FDI_RX_IIR 0x%x\n", temp); | 1253 | DRM_DEBUG("FDI_RX_IIR 0x%x\n", temp); |
1038 | 1254 | ||
1039 | if ((temp & FDI_RX_BIT_LOCK) == 0) { | 1255 | if ((temp & FDI_RX_BIT_LOCK) == 0) { |
1040 | for (j = 0; j < tries; j++) { | 1256 | for (j = 0; j < tries; j++) { |
1041 | temp = I915_READ(fdi_rx_iir_reg); | 1257 | temp = I915_READ(fdi_rx_iir_reg); |
1042 | DRM_DEBUG("FDI_RX_IIR 0x%x\n", temp); | 1258 | DRM_DEBUG("FDI_RX_IIR 0x%x\n", temp); |
1043 | if (temp & FDI_RX_BIT_LOCK) | 1259 | if (temp & FDI_RX_BIT_LOCK) |
1044 | break; | 1260 | break; |
1045 | udelay(200); | 1261 | udelay(200); |
1046 | } | 1262 | } |
1047 | if (j != tries) | 1263 | if (j != tries) |
1264 | I915_WRITE(fdi_rx_iir_reg, | ||
1265 | temp | FDI_RX_BIT_LOCK); | ||
1266 | else | ||
1267 | DRM_DEBUG("train 1 fail\n"); | ||
1268 | } else { | ||
1048 | I915_WRITE(fdi_rx_iir_reg, | 1269 | I915_WRITE(fdi_rx_iir_reg, |
1049 | temp | FDI_RX_BIT_LOCK); | 1270 | temp | FDI_RX_BIT_LOCK); |
1050 | else | 1271 | DRM_DEBUG("train 1 ok 2!\n"); |
1051 | DRM_DEBUG("train 1 fail\n"); | 1272 | } |
1052 | } else { | 1273 | temp = I915_READ(fdi_tx_reg); |
1053 | I915_WRITE(fdi_rx_iir_reg, | 1274 | temp &= ~FDI_LINK_TRAIN_NONE; |
1054 | temp | FDI_RX_BIT_LOCK); | 1275 | temp |= FDI_LINK_TRAIN_PATTERN_2; |
1055 | DRM_DEBUG("train 1 ok 2!\n"); | 1276 | I915_WRITE(fdi_tx_reg, temp); |
1056 | } | 1277 | |
1057 | temp = I915_READ(fdi_tx_reg); | 1278 | temp = I915_READ(fdi_rx_reg); |
1058 | temp &= ~FDI_LINK_TRAIN_NONE; | 1279 | temp &= ~FDI_LINK_TRAIN_NONE; |
1059 | temp |= FDI_LINK_TRAIN_PATTERN_2; | 1280 | temp |= FDI_LINK_TRAIN_PATTERN_2; |
1060 | I915_WRITE(fdi_tx_reg, temp); | 1281 | I915_WRITE(fdi_rx_reg, temp); |
1061 | |||
1062 | temp = I915_READ(fdi_rx_reg); | ||
1063 | temp &= ~FDI_LINK_TRAIN_NONE; | ||
1064 | temp |= FDI_LINK_TRAIN_PATTERN_2; | ||
1065 | I915_WRITE(fdi_rx_reg, temp); | ||
1066 | 1282 | ||
1067 | udelay(150); | 1283 | udelay(150); |
1068 | 1284 | ||
1069 | temp = I915_READ(fdi_rx_iir_reg); | 1285 | temp = I915_READ(fdi_rx_iir_reg); |
1070 | DRM_DEBUG("FDI_RX_IIR 0x%x\n", temp); | 1286 | DRM_DEBUG("FDI_RX_IIR 0x%x\n", temp); |
1071 | 1287 | ||
1072 | if ((temp & FDI_RX_SYMBOL_LOCK) == 0) { | 1288 | if ((temp & FDI_RX_SYMBOL_LOCK) == 0) { |
1073 | for (j = 0; j < tries; j++) { | 1289 | for (j = 0; j < tries; j++) { |
1074 | temp = I915_READ(fdi_rx_iir_reg); | 1290 | temp = I915_READ(fdi_rx_iir_reg); |
1075 | DRM_DEBUG("FDI_RX_IIR 0x%x\n", temp); | 1291 | DRM_DEBUG("FDI_RX_IIR 0x%x\n", temp); |
1076 | if (temp & FDI_RX_SYMBOL_LOCK) | 1292 | if (temp & FDI_RX_SYMBOL_LOCK) |
1077 | break; | 1293 | break; |
1078 | udelay(200); | 1294 | udelay(200); |
1079 | } | 1295 | } |
1080 | if (j != tries) { | 1296 | if (j != tries) { |
1297 | I915_WRITE(fdi_rx_iir_reg, | ||
1298 | temp | FDI_RX_SYMBOL_LOCK); | ||
1299 | DRM_DEBUG("train 2 ok 1!\n"); | ||
1300 | } else | ||
1301 | DRM_DEBUG("train 2 fail\n"); | ||
1302 | } else { | ||
1081 | I915_WRITE(fdi_rx_iir_reg, | 1303 | I915_WRITE(fdi_rx_iir_reg, |
1082 | temp | FDI_RX_SYMBOL_LOCK); | 1304 | temp | FDI_RX_SYMBOL_LOCK); |
1083 | DRM_DEBUG("train 2 ok 1!\n"); | 1305 | DRM_DEBUG("train 2 ok 2!\n"); |
1084 | } else | 1306 | } |
1085 | DRM_DEBUG("train 2 fail\n"); | 1307 | DRM_DEBUG("train done\n"); |
1086 | } else { | ||
1087 | I915_WRITE(fdi_rx_iir_reg, temp | FDI_RX_SYMBOL_LOCK); | ||
1088 | DRM_DEBUG("train 2 ok 2!\n"); | ||
1089 | } | ||
1090 | DRM_DEBUG("train done\n"); | ||
1091 | 1308 | ||
1092 | /* set transcoder timing */ | 1309 | /* set transcoder timing */ |
1093 | I915_WRITE(trans_htot_reg, I915_READ(cpu_htot_reg)); | 1310 | I915_WRITE(trans_htot_reg, I915_READ(cpu_htot_reg)); |
1094 | I915_WRITE(trans_hblank_reg, I915_READ(cpu_hblank_reg)); | 1311 | I915_WRITE(trans_hblank_reg, I915_READ(cpu_hblank_reg)); |
1095 | I915_WRITE(trans_hsync_reg, I915_READ(cpu_hsync_reg)); | 1312 | I915_WRITE(trans_hsync_reg, I915_READ(cpu_hsync_reg)); |
1096 | 1313 | ||
1097 | I915_WRITE(trans_vtot_reg, I915_READ(cpu_vtot_reg)); | 1314 | I915_WRITE(trans_vtot_reg, I915_READ(cpu_vtot_reg)); |
1098 | I915_WRITE(trans_vblank_reg, I915_READ(cpu_vblank_reg)); | 1315 | I915_WRITE(trans_vblank_reg, I915_READ(cpu_vblank_reg)); |
1099 | I915_WRITE(trans_vsync_reg, I915_READ(cpu_vsync_reg)); | 1316 | I915_WRITE(trans_vsync_reg, I915_READ(cpu_vsync_reg)); |
1100 | 1317 | ||
1101 | /* enable PCH transcoder */ | 1318 | /* enable PCH transcoder */ |
1102 | temp = I915_READ(transconf_reg); | 1319 | temp = I915_READ(transconf_reg); |
1103 | I915_WRITE(transconf_reg, temp | TRANS_ENABLE); | 1320 | I915_WRITE(transconf_reg, temp | TRANS_ENABLE); |
1104 | I915_READ(transconf_reg); | 1321 | I915_READ(transconf_reg); |
1105 | 1322 | ||
1106 | while ((I915_READ(transconf_reg) & TRANS_STATE_ENABLE) == 0) | 1323 | while ((I915_READ(transconf_reg) & TRANS_STATE_ENABLE) == 0) |
1107 | ; | 1324 | ; |
1108 | 1325 | ||
1109 | /* enable normal */ | 1326 | /* enable normal */ |
1110 | 1327 | ||
1111 | temp = I915_READ(fdi_tx_reg); | 1328 | temp = I915_READ(fdi_tx_reg); |
1112 | temp &= ~FDI_LINK_TRAIN_NONE; | 1329 | temp &= ~FDI_LINK_TRAIN_NONE; |
1113 | I915_WRITE(fdi_tx_reg, temp | FDI_LINK_TRAIN_NONE | | 1330 | I915_WRITE(fdi_tx_reg, temp | FDI_LINK_TRAIN_NONE | |
1114 | FDI_TX_ENHANCE_FRAME_ENABLE); | 1331 | FDI_TX_ENHANCE_FRAME_ENABLE); |
1115 | I915_READ(fdi_tx_reg); | 1332 | I915_READ(fdi_tx_reg); |
1116 | 1333 | ||
1117 | temp = I915_READ(fdi_rx_reg); | 1334 | temp = I915_READ(fdi_rx_reg); |
1118 | temp &= ~FDI_LINK_TRAIN_NONE; | 1335 | temp &= ~FDI_LINK_TRAIN_NONE; |
1119 | I915_WRITE(fdi_rx_reg, temp | FDI_LINK_TRAIN_NONE | | 1336 | I915_WRITE(fdi_rx_reg, temp | FDI_LINK_TRAIN_NONE | |
1120 | FDI_RX_ENHANCE_FRAME_ENABLE); | 1337 | FDI_RX_ENHANCE_FRAME_ENABLE); |
1121 | I915_READ(fdi_rx_reg); | 1338 | I915_READ(fdi_rx_reg); |
1122 | 1339 | ||
1123 | /* wait one idle pattern time */ | 1340 | /* wait one idle pattern time */ |
1124 | udelay(100); | 1341 | udelay(100); |
1342 | |||
1343 | } | ||
1125 | 1344 | ||
1126 | intel_crtc_load_lut(crtc); | 1345 | intel_crtc_load_lut(crtc); |
1127 | 1346 | ||
@@ -1129,8 +1348,7 @@ static void igdng_crtc_dpms(struct drm_crtc *crtc, int mode) | |||
1129 | case DRM_MODE_DPMS_OFF: | 1348 | case DRM_MODE_DPMS_OFF: |
1130 | DRM_DEBUG("crtc %d dpms off\n", pipe); | 1349 | DRM_DEBUG("crtc %d dpms off\n", pipe); |
1131 | 1350 | ||
1132 | /* Disable the VGA plane that we never use */ | 1351 | i915_disable_vga(dev); |
1133 | I915_WRITE(CPU_VGACNTRL, VGA_DISP_DISABLE); | ||
1134 | 1352 | ||
1135 | /* Disable display plane */ | 1353 | /* Disable display plane */ |
1136 | temp = I915_READ(dspcntr_reg); | 1354 | temp = I915_READ(dspcntr_reg); |
@@ -1146,17 +1364,23 @@ static void igdng_crtc_dpms(struct drm_crtc *crtc, int mode) | |||
1146 | if ((temp & PIPEACONF_ENABLE) != 0) { | 1364 | if ((temp & PIPEACONF_ENABLE) != 0) { |
1147 | I915_WRITE(pipeconf_reg, temp & ~PIPEACONF_ENABLE); | 1365 | I915_WRITE(pipeconf_reg, temp & ~PIPEACONF_ENABLE); |
1148 | I915_READ(pipeconf_reg); | 1366 | I915_READ(pipeconf_reg); |
1367 | n = 0; | ||
1149 | /* wait for cpu pipe off, pipe state */ | 1368 | /* wait for cpu pipe off, pipe state */ |
1150 | while ((I915_READ(pipeconf_reg) & I965_PIPECONF_ACTIVE) != 0) | 1369 | while ((I915_READ(pipeconf_reg) & I965_PIPECONF_ACTIVE) != 0) { |
1151 | ; | 1370 | n++; |
1371 | if (n < 60) { | ||
1372 | udelay(500); | ||
1373 | continue; | ||
1374 | } else { | ||
1375 | DRM_DEBUG("pipe %d off delay\n", pipe); | ||
1376 | break; | ||
1377 | } | ||
1378 | } | ||
1152 | } else | 1379 | } else |
1153 | DRM_DEBUG("crtc %d is disabled\n", pipe); | 1380 | DRM_DEBUG("crtc %d is disabled\n", pipe); |
1154 | 1381 | ||
1155 | /* IGDNG-A : disable cpu panel fitter ? */ | 1382 | if (HAS_eDP) { |
1156 | temp = I915_READ(pf_ctl_reg); | 1383 | igdng_disable_pll_edp(crtc); |
1157 | if ((temp & PF_ENABLE) != 0) { | ||
1158 | I915_WRITE(pf_ctl_reg, temp & ~PF_ENABLE); | ||
1159 | I915_READ(pf_ctl_reg); | ||
1160 | } | 1384 | } |
1161 | 1385 | ||
1162 | /* disable CPU FDI tx and PCH FDI rx */ | 1386 | /* disable CPU FDI tx and PCH FDI rx */ |
@@ -1168,6 +1392,8 @@ static void igdng_crtc_dpms(struct drm_crtc *crtc, int mode) | |||
1168 | I915_WRITE(fdi_rx_reg, temp & ~FDI_RX_ENABLE); | 1392 | I915_WRITE(fdi_rx_reg, temp & ~FDI_RX_ENABLE); |
1169 | I915_READ(fdi_rx_reg); | 1393 | I915_READ(fdi_rx_reg); |
1170 | 1394 | ||
1395 | udelay(100); | ||
1396 | |||
1171 | /* still set train pattern 1 */ | 1397 | /* still set train pattern 1 */ |
1172 | temp = I915_READ(fdi_tx_reg); | 1398 | temp = I915_READ(fdi_tx_reg); |
1173 | temp &= ~FDI_LINK_TRAIN_NONE; | 1399 | temp &= ~FDI_LINK_TRAIN_NONE; |
@@ -1179,14 +1405,25 @@ static void igdng_crtc_dpms(struct drm_crtc *crtc, int mode) | |||
1179 | temp |= FDI_LINK_TRAIN_PATTERN_1; | 1405 | temp |= FDI_LINK_TRAIN_PATTERN_1; |
1180 | I915_WRITE(fdi_rx_reg, temp); | 1406 | I915_WRITE(fdi_rx_reg, temp); |
1181 | 1407 | ||
1408 | udelay(100); | ||
1409 | |||
1182 | /* disable PCH transcoder */ | 1410 | /* disable PCH transcoder */ |
1183 | temp = I915_READ(transconf_reg); | 1411 | temp = I915_READ(transconf_reg); |
1184 | if ((temp & TRANS_ENABLE) != 0) { | 1412 | if ((temp & TRANS_ENABLE) != 0) { |
1185 | I915_WRITE(transconf_reg, temp & ~TRANS_ENABLE); | 1413 | I915_WRITE(transconf_reg, temp & ~TRANS_ENABLE); |
1186 | I915_READ(transconf_reg); | 1414 | I915_READ(transconf_reg); |
1415 | n = 0; | ||
1187 | /* wait for PCH transcoder off, transcoder state */ | 1416 | /* wait for PCH transcoder off, transcoder state */ |
1188 | while ((I915_READ(transconf_reg) & TRANS_STATE_ENABLE) != 0) | 1417 | while ((I915_READ(transconf_reg) & TRANS_STATE_ENABLE) != 0) { |
1189 | ; | 1418 | n++; |
1419 | if (n < 60) { | ||
1420 | udelay(500); | ||
1421 | continue; | ||
1422 | } else { | ||
1423 | DRM_DEBUG("transcoder %d off delay\n", pipe); | ||
1424 | break; | ||
1425 | } | ||
1426 | } | ||
1190 | } | 1427 | } |
1191 | 1428 | ||
1192 | /* disable PCH DPLL */ | 1429 | /* disable PCH DPLL */ |
@@ -1204,6 +1441,22 @@ static void igdng_crtc_dpms(struct drm_crtc *crtc, int mode) | |||
1204 | I915_READ(fdi_rx_reg); | 1441 | I915_READ(fdi_rx_reg); |
1205 | } | 1442 | } |
1206 | 1443 | ||
1444 | /* Disable CPU FDI TX PLL */ | ||
1445 | temp = I915_READ(fdi_tx_reg); | ||
1446 | if ((temp & FDI_TX_PLL_ENABLE) != 0) { | ||
1447 | I915_WRITE(fdi_tx_reg, temp & ~FDI_TX_PLL_ENABLE); | ||
1448 | I915_READ(fdi_tx_reg); | ||
1449 | udelay(100); | ||
1450 | } | ||
1451 | |||
1452 | /* Disable PF */ | ||
1453 | temp = I915_READ(pf_ctl_reg); | ||
1454 | if ((temp & PF_ENABLE) != 0) { | ||
1455 | I915_WRITE(pf_ctl_reg, temp & ~PF_ENABLE); | ||
1456 | I915_READ(pf_ctl_reg); | ||
1457 | } | ||
1458 | I915_WRITE(pf_win_size, 0); | ||
1459 | |||
1207 | /* Wait for the clocks to turn off. */ | 1460 | /* Wait for the clocks to turn off. */ |
1208 | udelay(150); | 1461 | udelay(150); |
1209 | break; | 1462 | break; |
@@ -1263,13 +1516,15 @@ static void i9xx_crtc_dpms(struct drm_crtc *crtc, int mode) | |||
1263 | 1516 | ||
1264 | /* Give the overlay scaler a chance to enable if it's on this pipe */ | 1517 | /* Give the overlay scaler a chance to enable if it's on this pipe */ |
1265 | //intel_crtc_dpms_video(crtc, true); TODO | 1518 | //intel_crtc_dpms_video(crtc, true); TODO |
1519 | intel_update_watermarks(dev); | ||
1266 | break; | 1520 | break; |
1267 | case DRM_MODE_DPMS_OFF: | 1521 | case DRM_MODE_DPMS_OFF: |
1522 | intel_update_watermarks(dev); | ||
1268 | /* Give the overlay scaler a chance to disable if it's on this pipe */ | 1523 | /* Give the overlay scaler a chance to disable if it's on this pipe */ |
1269 | //intel_crtc_dpms_video(crtc, FALSE); TODO | 1524 | //intel_crtc_dpms_video(crtc, FALSE); TODO |
1270 | 1525 | ||
1271 | /* Disable the VGA plane that we never use */ | 1526 | /* Disable the VGA plane that we never use */ |
1272 | I915_WRITE(VGACNTRL, VGA_DISP_DISABLE); | 1527 | i915_disable_vga(dev); |
1273 | 1528 | ||
1274 | /* Disable display plane */ | 1529 | /* Disable display plane */ |
1275 | temp = I915_READ(dspcntr_reg); | 1530 | temp = I915_READ(dspcntr_reg); |
@@ -1443,7 +1698,6 @@ static int intel_get_core_clock_speed(struct drm_device *dev) | |||
1443 | return 0; /* Silence gcc warning */ | 1698 | return 0; /* Silence gcc warning */ |
1444 | } | 1699 | } |
1445 | 1700 | ||
1446 | |||
1447 | /** | 1701 | /** |
1448 | * Return the pipe currently connected to the panel fitter, | 1702 | * Return the pipe currently connected to the panel fitter, |
1449 | * or -1 if the panel fitter is not present or not in use | 1703 | * or -1 if the panel fitter is not present or not in use |
@@ -1502,7 +1756,7 @@ igdng_compute_m_n(int bytes_per_pixel, int nlanes, | |||
1502 | 1756 | ||
1503 | temp = (u64) DATA_N * pixel_clock; | 1757 | temp = (u64) DATA_N * pixel_clock; |
1504 | temp = div_u64(temp, link_clock); | 1758 | temp = div_u64(temp, link_clock); |
1505 | m_n->gmch_m = (temp * bytes_per_pixel) / nlanes; | 1759 | m_n->gmch_m = div_u64(temp * bytes_per_pixel, nlanes); |
1506 | m_n->gmch_n = DATA_N; | 1760 | m_n->gmch_n = DATA_N; |
1507 | fdi_reduce_ratio(&m_n->gmch_m, &m_n->gmch_n); | 1761 | fdi_reduce_ratio(&m_n->gmch_m, &m_n->gmch_n); |
1508 | 1762 | ||
@@ -1513,6 +1767,464 @@ igdng_compute_m_n(int bytes_per_pixel, int nlanes, | |||
1513 | } | 1767 | } |
1514 | 1768 | ||
1515 | 1769 | ||
1770 | struct intel_watermark_params { | ||
1771 | unsigned long fifo_size; | ||
1772 | unsigned long max_wm; | ||
1773 | unsigned long default_wm; | ||
1774 | unsigned long guard_size; | ||
1775 | unsigned long cacheline_size; | ||
1776 | }; | ||
1777 | |||
1778 | /* IGD has different values for various configs */ | ||
1779 | static struct intel_watermark_params igd_display_wm = { | ||
1780 | IGD_DISPLAY_FIFO, | ||
1781 | IGD_MAX_WM, | ||
1782 | IGD_DFT_WM, | ||
1783 | IGD_GUARD_WM, | ||
1784 | IGD_FIFO_LINE_SIZE | ||
1785 | }; | ||
1786 | static struct intel_watermark_params igd_display_hplloff_wm = { | ||
1787 | IGD_DISPLAY_FIFO, | ||
1788 | IGD_MAX_WM, | ||
1789 | IGD_DFT_HPLLOFF_WM, | ||
1790 | IGD_GUARD_WM, | ||
1791 | IGD_FIFO_LINE_SIZE | ||
1792 | }; | ||
1793 | static struct intel_watermark_params igd_cursor_wm = { | ||
1794 | IGD_CURSOR_FIFO, | ||
1795 | IGD_CURSOR_MAX_WM, | ||
1796 | IGD_CURSOR_DFT_WM, | ||
1797 | IGD_CURSOR_GUARD_WM, | ||
1798 | IGD_FIFO_LINE_SIZE, | ||
1799 | }; | ||
1800 | static struct intel_watermark_params igd_cursor_hplloff_wm = { | ||
1801 | IGD_CURSOR_FIFO, | ||
1802 | IGD_CURSOR_MAX_WM, | ||
1803 | IGD_CURSOR_DFT_WM, | ||
1804 | IGD_CURSOR_GUARD_WM, | ||
1805 | IGD_FIFO_LINE_SIZE | ||
1806 | }; | ||
1807 | static struct intel_watermark_params i945_wm_info = { | ||
1808 | I945_FIFO_SIZE, | ||
1809 | I915_MAX_WM, | ||
1810 | 1, | ||
1811 | 2, | ||
1812 | I915_FIFO_LINE_SIZE | ||
1813 | }; | ||
1814 | static struct intel_watermark_params i915_wm_info = { | ||
1815 | I915_FIFO_SIZE, | ||
1816 | I915_MAX_WM, | ||
1817 | 1, | ||
1818 | 2, | ||
1819 | I915_FIFO_LINE_SIZE | ||
1820 | }; | ||
1821 | static struct intel_watermark_params i855_wm_info = { | ||
1822 | I855GM_FIFO_SIZE, | ||
1823 | I915_MAX_WM, | ||
1824 | 1, | ||
1825 | 2, | ||
1826 | I830_FIFO_LINE_SIZE | ||
1827 | }; | ||
1828 | static struct intel_watermark_params i830_wm_info = { | ||
1829 | I830_FIFO_SIZE, | ||
1830 | I915_MAX_WM, | ||
1831 | 1, | ||
1832 | 2, | ||
1833 | I830_FIFO_LINE_SIZE | ||
1834 | }; | ||
1835 | |||
1836 | /** | ||
1837 | * intel_calculate_wm - calculate watermark level | ||
1838 | * @clock_in_khz: pixel clock | ||
1839 | * @wm: chip FIFO params | ||
1840 | * @pixel_size: display pixel size | ||
1841 | * @latency_ns: memory latency for the platform | ||
1842 | * | ||
1843 | * Calculate the watermark level (the level at which the display plane will | ||
1844 | * start fetching from memory again). Each chip has a different display | ||
1845 | * FIFO size and allocation, so the caller needs to figure that out and pass | ||
1846 | * in the correct intel_watermark_params structure. | ||
1847 | * | ||
1848 | * As the pixel clock runs, the FIFO will be drained at a rate that depends | ||
1849 | * on the pixel size. When it reaches the watermark level, it'll start | ||
1850 | * fetching FIFO line sized based chunks from memory until the FIFO fills | ||
1851 | * past the watermark point. If the FIFO drains completely, a FIFO underrun | ||
1852 | * will occur, and a display engine hang could result. | ||
1853 | */ | ||
1854 | static unsigned long intel_calculate_wm(unsigned long clock_in_khz, | ||
1855 | struct intel_watermark_params *wm, | ||
1856 | int pixel_size, | ||
1857 | unsigned long latency_ns) | ||
1858 | { | ||
1859 | long entries_required, wm_size; | ||
1860 | |||
1861 | entries_required = (clock_in_khz * pixel_size * latency_ns) / 1000000; | ||
1862 | entries_required /= wm->cacheline_size; | ||
1863 | |||
1864 | DRM_DEBUG("FIFO entries required for mode: %d\n", entries_required); | ||
1865 | |||
1866 | wm_size = wm->fifo_size - (entries_required + wm->guard_size); | ||
1867 | |||
1868 | DRM_DEBUG("FIFO watermark level: %d\n", wm_size); | ||
1869 | |||
1870 | /* Don't promote wm_size to unsigned... */ | ||
1871 | if (wm_size > (long)wm->max_wm) | ||
1872 | wm_size = wm->max_wm; | ||
1873 | if (wm_size <= 0) | ||
1874 | wm_size = wm->default_wm; | ||
1875 | return wm_size; | ||
1876 | } | ||
1877 | |||
1878 | struct cxsr_latency { | ||
1879 | int is_desktop; | ||
1880 | unsigned long fsb_freq; | ||
1881 | unsigned long mem_freq; | ||
1882 | unsigned long display_sr; | ||
1883 | unsigned long display_hpll_disable; | ||
1884 | unsigned long cursor_sr; | ||
1885 | unsigned long cursor_hpll_disable; | ||
1886 | }; | ||
1887 | |||
1888 | static struct cxsr_latency cxsr_latency_table[] = { | ||
1889 | {1, 800, 400, 3382, 33382, 3983, 33983}, /* DDR2-400 SC */ | ||
1890 | {1, 800, 667, 3354, 33354, 3807, 33807}, /* DDR2-667 SC */ | ||
1891 | {1, 800, 800, 3347, 33347, 3763, 33763}, /* DDR2-800 SC */ | ||
1892 | |||
1893 | {1, 667, 400, 3400, 33400, 4021, 34021}, /* DDR2-400 SC */ | ||
1894 | {1, 667, 667, 3372, 33372, 3845, 33845}, /* DDR2-667 SC */ | ||
1895 | {1, 667, 800, 3386, 33386, 3822, 33822}, /* DDR2-800 SC */ | ||
1896 | |||
1897 | {1, 400, 400, 3472, 33472, 4173, 34173}, /* DDR2-400 SC */ | ||
1898 | {1, 400, 667, 3443, 33443, 3996, 33996}, /* DDR2-667 SC */ | ||
1899 | {1, 400, 800, 3430, 33430, 3946, 33946}, /* DDR2-800 SC */ | ||
1900 | |||
1901 | {0, 800, 400, 3438, 33438, 4065, 34065}, /* DDR2-400 SC */ | ||
1902 | {0, 800, 667, 3410, 33410, 3889, 33889}, /* DDR2-667 SC */ | ||
1903 | {0, 800, 800, 3403, 33403, 3845, 33845}, /* DDR2-800 SC */ | ||
1904 | |||
1905 | {0, 667, 400, 3456, 33456, 4103, 34106}, /* DDR2-400 SC */ | ||
1906 | {0, 667, 667, 3428, 33428, 3927, 33927}, /* DDR2-667 SC */ | ||
1907 | {0, 667, 800, 3443, 33443, 3905, 33905}, /* DDR2-800 SC */ | ||
1908 | |||
1909 | {0, 400, 400, 3528, 33528, 4255, 34255}, /* DDR2-400 SC */ | ||
1910 | {0, 400, 667, 3500, 33500, 4079, 34079}, /* DDR2-667 SC */ | ||
1911 | {0, 400, 800, 3487, 33487, 4029, 34029}, /* DDR2-800 SC */ | ||
1912 | }; | ||
1913 | |||
1914 | static struct cxsr_latency *intel_get_cxsr_latency(int is_desktop, int fsb, | ||
1915 | int mem) | ||
1916 | { | ||
1917 | int i; | ||
1918 | struct cxsr_latency *latency; | ||
1919 | |||
1920 | if (fsb == 0 || mem == 0) | ||
1921 | return NULL; | ||
1922 | |||
1923 | for (i = 0; i < ARRAY_SIZE(cxsr_latency_table); i++) { | ||
1924 | latency = &cxsr_latency_table[i]; | ||
1925 | if (is_desktop == latency->is_desktop && | ||
1926 | fsb == latency->fsb_freq && mem == latency->mem_freq) | ||
1927 | break; | ||
1928 | } | ||
1929 | if (i >= ARRAY_SIZE(cxsr_latency_table)) { | ||
1930 | DRM_DEBUG("Unknown FSB/MEM found, disable CxSR\n"); | ||
1931 | return NULL; | ||
1932 | } | ||
1933 | return latency; | ||
1934 | } | ||
1935 | |||
1936 | static void igd_disable_cxsr(struct drm_device *dev) | ||
1937 | { | ||
1938 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
1939 | u32 reg; | ||
1940 | |||
1941 | /* deactivate cxsr */ | ||
1942 | reg = I915_READ(DSPFW3); | ||
1943 | reg &= ~(IGD_SELF_REFRESH_EN); | ||
1944 | I915_WRITE(DSPFW3, reg); | ||
1945 | DRM_INFO("Big FIFO is disabled\n"); | ||
1946 | } | ||
1947 | |||
1948 | static void igd_enable_cxsr(struct drm_device *dev, unsigned long clock, | ||
1949 | int pixel_size) | ||
1950 | { | ||
1951 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
1952 | u32 reg; | ||
1953 | unsigned long wm; | ||
1954 | struct cxsr_latency *latency; | ||
1955 | |||
1956 | latency = intel_get_cxsr_latency(IS_IGDG(dev), dev_priv->fsb_freq, | ||
1957 | dev_priv->mem_freq); | ||
1958 | if (!latency) { | ||
1959 | DRM_DEBUG("Unknown FSB/MEM found, disable CxSR\n"); | ||
1960 | igd_disable_cxsr(dev); | ||
1961 | return; | ||
1962 | } | ||
1963 | |||
1964 | /* Display SR */ | ||
1965 | wm = intel_calculate_wm(clock, &igd_display_wm, pixel_size, | ||
1966 | latency->display_sr); | ||
1967 | reg = I915_READ(DSPFW1); | ||
1968 | reg &= 0x7fffff; | ||
1969 | reg |= wm << 23; | ||
1970 | I915_WRITE(DSPFW1, reg); | ||
1971 | DRM_DEBUG("DSPFW1 register is %x\n", reg); | ||
1972 | |||
1973 | /* cursor SR */ | ||
1974 | wm = intel_calculate_wm(clock, &igd_cursor_wm, pixel_size, | ||
1975 | latency->cursor_sr); | ||
1976 | reg = I915_READ(DSPFW3); | ||
1977 | reg &= ~(0x3f << 24); | ||
1978 | reg |= (wm & 0x3f) << 24; | ||
1979 | I915_WRITE(DSPFW3, reg); | ||
1980 | |||
1981 | /* Display HPLL off SR */ | ||
1982 | wm = intel_calculate_wm(clock, &igd_display_hplloff_wm, | ||
1983 | latency->display_hpll_disable, I915_FIFO_LINE_SIZE); | ||
1984 | reg = I915_READ(DSPFW3); | ||
1985 | reg &= 0xfffffe00; | ||
1986 | reg |= wm & 0x1ff; | ||
1987 | I915_WRITE(DSPFW3, reg); | ||
1988 | |||
1989 | /* cursor HPLL off SR */ | ||
1990 | wm = intel_calculate_wm(clock, &igd_cursor_hplloff_wm, pixel_size, | ||
1991 | latency->cursor_hpll_disable); | ||
1992 | reg = I915_READ(DSPFW3); | ||
1993 | reg &= ~(0x3f << 16); | ||
1994 | reg |= (wm & 0x3f) << 16; | ||
1995 | I915_WRITE(DSPFW3, reg); | ||
1996 | DRM_DEBUG("DSPFW3 register is %x\n", reg); | ||
1997 | |||
1998 | /* activate cxsr */ | ||
1999 | reg = I915_READ(DSPFW3); | ||
2000 | reg |= IGD_SELF_REFRESH_EN; | ||
2001 | I915_WRITE(DSPFW3, reg); | ||
2002 | |||
2003 | DRM_INFO("Big FIFO is enabled\n"); | ||
2004 | |||
2005 | return; | ||
2006 | } | ||
2007 | |||
2008 | const static int latency_ns = 3000; /* default for non-igd platforms */ | ||
2009 | |||
2010 | static int intel_get_fifo_size(struct drm_device *dev, int plane) | ||
2011 | { | ||
2012 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
2013 | uint32_t dsparb = I915_READ(DSPARB); | ||
2014 | int size; | ||
2015 | |||
2016 | if (IS_I9XX(dev)) { | ||
2017 | if (plane == 0) | ||
2018 | size = dsparb & 0x7f; | ||
2019 | else | ||
2020 | size = ((dsparb >> DSPARB_CSTART_SHIFT) & 0x7f) - | ||
2021 | (dsparb & 0x7f); | ||
2022 | } else if (IS_I85X(dev)) { | ||
2023 | if (plane == 0) | ||
2024 | size = dsparb & 0x1ff; | ||
2025 | else | ||
2026 | size = ((dsparb >> DSPARB_BEND_SHIFT) & 0x1ff) - | ||
2027 | (dsparb & 0x1ff); | ||
2028 | size >>= 1; /* Convert to cachelines */ | ||
2029 | } else if (IS_845G(dev)) { | ||
2030 | size = dsparb & 0x7f; | ||
2031 | size >>= 2; /* Convert to cachelines */ | ||
2032 | } else { | ||
2033 | size = dsparb & 0x7f; | ||
2034 | size >>= 1; /* Convert to cachelines */ | ||
2035 | } | ||
2036 | |||
2037 | DRM_DEBUG("FIFO size - (0x%08x) %s: %d\n", dsparb, plane ? "B" : "A", | ||
2038 | size); | ||
2039 | |||
2040 | return size; | ||
2041 | } | ||
2042 | |||
2043 | static void i965_update_wm(struct drm_device *dev) | ||
2044 | { | ||
2045 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
2046 | |||
2047 | DRM_DEBUG("Setting FIFO watermarks - A: 8, B: 8, C: 8, SR 8\n"); | ||
2048 | |||
2049 | /* 965 has limitations... */ | ||
2050 | I915_WRITE(DSPFW1, (8 << 16) | (8 << 8) | (8 << 0)); | ||
2051 | I915_WRITE(DSPFW2, (8 << 8) | (8 << 0)); | ||
2052 | } | ||
2053 | |||
2054 | static void i9xx_update_wm(struct drm_device *dev, int planea_clock, | ||
2055 | int planeb_clock, int sr_hdisplay, int pixel_size) | ||
2056 | { | ||
2057 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
2058 | uint32_t fwater_lo; | ||
2059 | uint32_t fwater_hi; | ||
2060 | int total_size, cacheline_size, cwm, srwm = 1; | ||
2061 | int planea_wm, planeb_wm; | ||
2062 | struct intel_watermark_params planea_params, planeb_params; | ||
2063 | unsigned long line_time_us; | ||
2064 | int sr_clock, sr_entries = 0; | ||
2065 | |||
2066 | /* Create copies of the base settings for each pipe */ | ||
2067 | if (IS_I965GM(dev) || IS_I945GM(dev)) | ||
2068 | planea_params = planeb_params = i945_wm_info; | ||
2069 | else if (IS_I9XX(dev)) | ||
2070 | planea_params = planeb_params = i915_wm_info; | ||
2071 | else | ||
2072 | planea_params = planeb_params = i855_wm_info; | ||
2073 | |||
2074 | /* Grab a couple of global values before we overwrite them */ | ||
2075 | total_size = planea_params.fifo_size; | ||
2076 | cacheline_size = planea_params.cacheline_size; | ||
2077 | |||
2078 | /* Update per-plane FIFO sizes */ | ||
2079 | planea_params.fifo_size = intel_get_fifo_size(dev, 0); | ||
2080 | planeb_params.fifo_size = intel_get_fifo_size(dev, 1); | ||
2081 | |||
2082 | planea_wm = intel_calculate_wm(planea_clock, &planea_params, | ||
2083 | pixel_size, latency_ns); | ||
2084 | planeb_wm = intel_calculate_wm(planeb_clock, &planeb_params, | ||
2085 | pixel_size, latency_ns); | ||
2086 | DRM_DEBUG("FIFO watermarks - A: %d, B: %d\n", planea_wm, planeb_wm); | ||
2087 | |||
2088 | /* | ||
2089 | * Overlay gets an aggressive default since video jitter is bad. | ||
2090 | */ | ||
2091 | cwm = 2; | ||
2092 | |||
2093 | /* Calc sr entries for one plane configs */ | ||
2094 | if (sr_hdisplay && (!planea_clock || !planeb_clock)) { | ||
2095 | /* self-refresh has much higher latency */ | ||
2096 | const static int sr_latency_ns = 6000; | ||
2097 | |||
2098 | sr_clock = planea_clock ? planea_clock : planeb_clock; | ||
2099 | line_time_us = ((sr_hdisplay * 1000) / sr_clock); | ||
2100 | |||
2101 | /* Use ns/us then divide to preserve precision */ | ||
2102 | sr_entries = (((sr_latency_ns / line_time_us) + 1) * | ||
2103 | pixel_size * sr_hdisplay) / 1000; | ||
2104 | sr_entries = roundup(sr_entries / cacheline_size, 1); | ||
2105 | DRM_DEBUG("self-refresh entries: %d\n", sr_entries); | ||
2106 | srwm = total_size - sr_entries; | ||
2107 | if (srwm < 0) | ||
2108 | srwm = 1; | ||
2109 | if (IS_I9XX(dev)) | ||
2110 | I915_WRITE(FW_BLC_SELF, (srwm & 0x3f)); | ||
2111 | } | ||
2112 | |||
2113 | DRM_DEBUG("Setting FIFO watermarks - A: %d, B: %d, C: %d, SR %d\n", | ||
2114 | planea_wm, planeb_wm, cwm, srwm); | ||
2115 | |||
2116 | fwater_lo = ((planeb_wm & 0x3f) << 16) | (planea_wm & 0x3f); | ||
2117 | fwater_hi = (cwm & 0x1f); | ||
2118 | |||
2119 | /* Set request length to 8 cachelines per fetch */ | ||
2120 | fwater_lo = fwater_lo | (1 << 24) | (1 << 8); | ||
2121 | fwater_hi = fwater_hi | (1 << 8); | ||
2122 | |||
2123 | I915_WRITE(FW_BLC, fwater_lo); | ||
2124 | I915_WRITE(FW_BLC2, fwater_hi); | ||
2125 | } | ||
2126 | |||
2127 | static void i830_update_wm(struct drm_device *dev, int planea_clock, | ||
2128 | int pixel_size) | ||
2129 | { | ||
2130 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
2131 | uint32_t fwater_lo = I915_READ(FW_BLC) & ~0xfff; | ||
2132 | int planea_wm; | ||
2133 | |||
2134 | i830_wm_info.fifo_size = intel_get_fifo_size(dev, 0); | ||
2135 | |||
2136 | planea_wm = intel_calculate_wm(planea_clock, &i830_wm_info, | ||
2137 | pixel_size, latency_ns); | ||
2138 | fwater_lo |= (3<<8) | planea_wm; | ||
2139 | |||
2140 | DRM_DEBUG("Setting FIFO watermarks - A: %d\n", planea_wm); | ||
2141 | |||
2142 | I915_WRITE(FW_BLC, fwater_lo); | ||
2143 | } | ||
2144 | |||
2145 | /** | ||
2146 | * intel_update_watermarks - update FIFO watermark values based on current modes | ||
2147 | * | ||
2148 | * Calculate watermark values for the various WM regs based on current mode | ||
2149 | * and plane configuration. | ||
2150 | * | ||
2151 | * There are several cases to deal with here: | ||
2152 | * - normal (i.e. non-self-refresh) | ||
2153 | * - self-refresh (SR) mode | ||
2154 | * - lines are large relative to FIFO size (buffer can hold up to 2) | ||
2155 | * - lines are small relative to FIFO size (buffer can hold more than 2 | ||
2156 | * lines), so need to account for TLB latency | ||
2157 | * | ||
2158 | * The normal calculation is: | ||
2159 | * watermark = dotclock * bytes per pixel * latency | ||
2160 | * where latency is platform & configuration dependent (we assume pessimal | ||
2161 | * values here). | ||
2162 | * | ||
2163 | * The SR calculation is: | ||
2164 | * watermark = (trunc(latency/line time)+1) * surface width * | ||
2165 | * bytes per pixel | ||
2166 | * where | ||
2167 | * line time = htotal / dotclock | ||
2168 | * and latency is assumed to be high, as above. | ||
2169 | * | ||
2170 | * The final value programmed to the register should always be rounded up, | ||
2171 | * and include an extra 2 entries to account for clock crossings. | ||
2172 | * | ||
2173 | * We don't use the sprite, so we can ignore that. And on Crestline we have | ||
2174 | * to set the non-SR watermarks to 8. | ||
2175 | */ | ||
2176 | static void intel_update_watermarks(struct drm_device *dev) | ||
2177 | { | ||
2178 | struct drm_crtc *crtc; | ||
2179 | struct intel_crtc *intel_crtc; | ||
2180 | int sr_hdisplay = 0; | ||
2181 | unsigned long planea_clock = 0, planeb_clock = 0, sr_clock = 0; | ||
2182 | int enabled = 0, pixel_size = 0; | ||
2183 | |||
2184 | if (DSPARB_HWCONTROL(dev)) | ||
2185 | return; | ||
2186 | |||
2187 | /* Get the clock config from both planes */ | ||
2188 | list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { | ||
2189 | intel_crtc = to_intel_crtc(crtc); | ||
2190 | if (crtc->enabled) { | ||
2191 | enabled++; | ||
2192 | if (intel_crtc->plane == 0) { | ||
2193 | DRM_DEBUG("plane A (pipe %d) clock: %d\n", | ||
2194 | intel_crtc->pipe, crtc->mode.clock); | ||
2195 | planea_clock = crtc->mode.clock; | ||
2196 | } else { | ||
2197 | DRM_DEBUG("plane B (pipe %d) clock: %d\n", | ||
2198 | intel_crtc->pipe, crtc->mode.clock); | ||
2199 | planeb_clock = crtc->mode.clock; | ||
2200 | } | ||
2201 | sr_hdisplay = crtc->mode.hdisplay; | ||
2202 | sr_clock = crtc->mode.clock; | ||
2203 | if (crtc->fb) | ||
2204 | pixel_size = crtc->fb->bits_per_pixel / 8; | ||
2205 | else | ||
2206 | pixel_size = 4; /* by default */ | ||
2207 | } | ||
2208 | } | ||
2209 | |||
2210 | if (enabled <= 0) | ||
2211 | return; | ||
2212 | |||
2213 | /* Single plane configs can enable self refresh */ | ||
2214 | if (enabled == 1 && IS_IGD(dev)) | ||
2215 | igd_enable_cxsr(dev, sr_clock, pixel_size); | ||
2216 | else if (IS_IGD(dev)) | ||
2217 | igd_disable_cxsr(dev); | ||
2218 | |||
2219 | if (IS_I965G(dev)) | ||
2220 | i965_update_wm(dev); | ||
2221 | else if (IS_I9XX(dev) || IS_MOBILE(dev)) | ||
2222 | i9xx_update_wm(dev, planea_clock, planeb_clock, sr_hdisplay, | ||
2223 | pixel_size); | ||
2224 | else | ||
2225 | i830_update_wm(dev, planea_clock, pixel_size); | ||
2226 | } | ||
2227 | |||
1516 | static int intel_crtc_mode_set(struct drm_crtc *crtc, | 2228 | static int intel_crtc_mode_set(struct drm_crtc *crtc, |
1517 | struct drm_display_mode *mode, | 2229 | struct drm_display_mode *mode, |
1518 | struct drm_display_mode *adjusted_mode, | 2230 | struct drm_display_mode *adjusted_mode, |
@@ -1541,7 +2253,8 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, | |||
1541 | intel_clock_t clock; | 2253 | intel_clock_t clock; |
1542 | u32 dpll = 0, fp = 0, dspcntr, pipeconf; | 2254 | u32 dpll = 0, fp = 0, dspcntr, pipeconf; |
1543 | bool ok, is_sdvo = false, is_dvo = false; | 2255 | bool ok, is_sdvo = false, is_dvo = false; |
1544 | bool is_crt = false, is_lvds = false, is_tv = false; | 2256 | bool is_crt = false, is_lvds = false, is_tv = false, is_dp = false; |
2257 | bool is_edp = false; | ||
1545 | struct drm_mode_config *mode_config = &dev->mode_config; | 2258 | struct drm_mode_config *mode_config = &dev->mode_config; |
1546 | struct drm_connector *connector; | 2259 | struct drm_connector *connector; |
1547 | const intel_limit_t *limit; | 2260 | const intel_limit_t *limit; |
@@ -1557,6 +2270,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, | |||
1557 | int lvds_reg = LVDS; | 2270 | int lvds_reg = LVDS; |
1558 | u32 temp; | 2271 | u32 temp; |
1559 | int sdvo_pixel_multiply; | 2272 | int sdvo_pixel_multiply; |
2273 | int target_clock; | ||
1560 | 2274 | ||
1561 | drm_vblank_pre_modeset(dev, pipe); | 2275 | drm_vblank_pre_modeset(dev, pipe); |
1562 | 2276 | ||
@@ -1585,6 +2299,12 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, | |||
1585 | case INTEL_OUTPUT_ANALOG: | 2299 | case INTEL_OUTPUT_ANALOG: |
1586 | is_crt = true; | 2300 | is_crt = true; |
1587 | break; | 2301 | break; |
2302 | case INTEL_OUTPUT_DISPLAYPORT: | ||
2303 | is_dp = true; | ||
2304 | break; | ||
2305 | case INTEL_OUTPUT_EDP: | ||
2306 | is_edp = true; | ||
2307 | break; | ||
1588 | } | 2308 | } |
1589 | 2309 | ||
1590 | num_outputs++; | 2310 | num_outputs++; |
@@ -1600,6 +2320,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, | |||
1600 | } else { | 2320 | } else { |
1601 | refclk = 48000; | 2321 | refclk = 48000; |
1602 | } | 2322 | } |
2323 | |||
1603 | 2324 | ||
1604 | /* | 2325 | /* |
1605 | * Returns a set of divisors for the desired target clock with the given | 2326 | * Returns a set of divisors for the desired target clock with the given |
@@ -1635,11 +2356,29 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, | |||
1635 | } | 2356 | } |
1636 | 2357 | ||
1637 | /* FDI link */ | 2358 | /* FDI link */ |
1638 | if (IS_IGDNG(dev)) | 2359 | if (IS_IGDNG(dev)) { |
1639 | igdng_compute_m_n(3, 4, /* lane num 4 */ | 2360 | int lane, link_bw; |
1640 | adjusted_mode->clock, | 2361 | /* eDP doesn't require FDI link, so just set DP M/N |
1641 | 270000, /* lane clock */ | 2362 | according to current link config */ |
1642 | &m_n); | 2363 | if (is_edp) { |
2364 | struct drm_connector *edp; | ||
2365 | target_clock = mode->clock; | ||
2366 | edp = intel_pipe_get_output(crtc); | ||
2367 | intel_edp_link_config(to_intel_output(edp), | ||
2368 | &lane, &link_bw); | ||
2369 | } else { | ||
2370 | /* DP over FDI requires target mode clock | ||
2371 | instead of link clock */ | ||
2372 | if (is_dp) | ||
2373 | target_clock = mode->clock; | ||
2374 | else | ||
2375 | target_clock = adjusted_mode->clock; | ||
2376 | lane = 4; | ||
2377 | link_bw = 270000; | ||
2378 | } | ||
2379 | igdng_compute_m_n(3, lane, target_clock, | ||
2380 | link_bw, &m_n); | ||
2381 | } | ||
1643 | 2382 | ||
1644 | if (IS_IGD(dev)) | 2383 | if (IS_IGD(dev)) |
1645 | fp = (1 << clock.n) << 16 | clock.m1 << 8 | clock.m2; | 2384 | fp = (1 << clock.n) << 16 | clock.m1 << 8 | clock.m2; |
@@ -1662,6 +2401,8 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, | |||
1662 | else if (IS_IGDNG(dev)) | 2401 | else if (IS_IGDNG(dev)) |
1663 | dpll |= (sdvo_pixel_multiply - 1) << PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT; | 2402 | dpll |= (sdvo_pixel_multiply - 1) << PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT; |
1664 | } | 2403 | } |
2404 | if (is_dp) | ||
2405 | dpll |= DPLL_DVO_HIGH_SPEED; | ||
1665 | 2406 | ||
1666 | /* compute bitmask from p1 value */ | 2407 | /* compute bitmask from p1 value */ |
1667 | if (IS_IGD(dev)) | 2408 | if (IS_IGD(dev)) |
@@ -1758,29 +2499,15 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, | |||
1758 | dpll_reg = pch_dpll_reg; | 2499 | dpll_reg = pch_dpll_reg; |
1759 | } | 2500 | } |
1760 | 2501 | ||
1761 | if (dpll & DPLL_VCO_ENABLE) { | 2502 | if (is_edp) { |
2503 | igdng_disable_pll_edp(crtc); | ||
2504 | } else if ((dpll & DPLL_VCO_ENABLE)) { | ||
1762 | I915_WRITE(fp_reg, fp); | 2505 | I915_WRITE(fp_reg, fp); |
1763 | I915_WRITE(dpll_reg, dpll & ~DPLL_VCO_ENABLE); | 2506 | I915_WRITE(dpll_reg, dpll & ~DPLL_VCO_ENABLE); |
1764 | I915_READ(dpll_reg); | 2507 | I915_READ(dpll_reg); |
1765 | udelay(150); | 2508 | udelay(150); |
1766 | } | 2509 | } |
1767 | 2510 | ||
1768 | if (IS_IGDNG(dev)) { | ||
1769 | /* enable PCH clock reference source */ | ||
1770 | /* XXX need to change the setting for other outputs */ | ||
1771 | u32 temp; | ||
1772 | temp = I915_READ(PCH_DREF_CONTROL); | ||
1773 | temp &= ~DREF_NONSPREAD_SOURCE_MASK; | ||
1774 | temp |= DREF_NONSPREAD_CK505_ENABLE; | ||
1775 | temp &= ~DREF_SSC_SOURCE_MASK; | ||
1776 | temp |= DREF_SSC_SOURCE_ENABLE; | ||
1777 | temp &= ~DREF_SSC1_ENABLE; | ||
1778 | /* if no eDP, disable source output to CPU */ | ||
1779 | temp &= ~DREF_CPU_SOURCE_OUTPUT_MASK; | ||
1780 | temp |= DREF_CPU_SOURCE_OUTPUT_DISABLE; | ||
1781 | I915_WRITE(PCH_DREF_CONTROL, temp); | ||
1782 | } | ||
1783 | |||
1784 | /* The LVDS pin pair needs to be on before the DPLLs are enabled. | 2511 | /* The LVDS pin pair needs to be on before the DPLLs are enabled. |
1785 | * This is an exception to the general rule that mode_set doesn't turn | 2512 | * This is an exception to the general rule that mode_set doesn't turn |
1786 | * things on. | 2513 | * things on. |
@@ -1809,24 +2536,28 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, | |||
1809 | I915_WRITE(lvds_reg, lvds); | 2536 | I915_WRITE(lvds_reg, lvds); |
1810 | I915_READ(lvds_reg); | 2537 | I915_READ(lvds_reg); |
1811 | } | 2538 | } |
2539 | if (is_dp) | ||
2540 | intel_dp_set_m_n(crtc, mode, adjusted_mode); | ||
1812 | 2541 | ||
1813 | I915_WRITE(fp_reg, fp); | 2542 | if (!is_edp) { |
1814 | I915_WRITE(dpll_reg, dpll); | 2543 | I915_WRITE(fp_reg, fp); |
1815 | I915_READ(dpll_reg); | ||
1816 | /* Wait for the clocks to stabilize. */ | ||
1817 | udelay(150); | ||
1818 | |||
1819 | if (IS_I965G(dev) && !IS_IGDNG(dev)) { | ||
1820 | sdvo_pixel_multiply = adjusted_mode->clock / mode->clock; | ||
1821 | I915_WRITE(dpll_md_reg, (0 << DPLL_MD_UDI_DIVIDER_SHIFT) | | ||
1822 | ((sdvo_pixel_multiply - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT)); | ||
1823 | } else { | ||
1824 | /* write it again -- the BIOS does, after all */ | ||
1825 | I915_WRITE(dpll_reg, dpll); | 2544 | I915_WRITE(dpll_reg, dpll); |
2545 | I915_READ(dpll_reg); | ||
2546 | /* Wait for the clocks to stabilize. */ | ||
2547 | udelay(150); | ||
2548 | |||
2549 | if (IS_I965G(dev) && !IS_IGDNG(dev)) { | ||
2550 | sdvo_pixel_multiply = adjusted_mode->clock / mode->clock; | ||
2551 | I915_WRITE(dpll_md_reg, (0 << DPLL_MD_UDI_DIVIDER_SHIFT) | | ||
2552 | ((sdvo_pixel_multiply - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT)); | ||
2553 | } else { | ||
2554 | /* write it again -- the BIOS does, after all */ | ||
2555 | I915_WRITE(dpll_reg, dpll); | ||
2556 | } | ||
2557 | I915_READ(dpll_reg); | ||
2558 | /* Wait for the clocks to stabilize. */ | ||
2559 | udelay(150); | ||
1826 | } | 2560 | } |
1827 | I915_READ(dpll_reg); | ||
1828 | /* Wait for the clocks to stabilize. */ | ||
1829 | udelay(150); | ||
1830 | 2561 | ||
1831 | I915_WRITE(htot_reg, (adjusted_mode->crtc_hdisplay - 1) | | 2562 | I915_WRITE(htot_reg, (adjusted_mode->crtc_hdisplay - 1) | |
1832 | ((adjusted_mode->crtc_htotal - 1) << 16)); | 2563 | ((adjusted_mode->crtc_htotal - 1) << 16)); |
@@ -1856,10 +2587,14 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, | |||
1856 | I915_WRITE(link_m1_reg, m_n.link_m); | 2587 | I915_WRITE(link_m1_reg, m_n.link_m); |
1857 | I915_WRITE(link_n1_reg, m_n.link_n); | 2588 | I915_WRITE(link_n1_reg, m_n.link_n); |
1858 | 2589 | ||
1859 | /* enable FDI RX PLL too */ | 2590 | if (is_edp) { |
1860 | temp = I915_READ(fdi_rx_reg); | 2591 | igdng_set_pll_edp(crtc, adjusted_mode->clock); |
1861 | I915_WRITE(fdi_rx_reg, temp | FDI_RX_PLL_ENABLE); | 2592 | } else { |
1862 | udelay(200); | 2593 | /* enable FDI RX PLL too */ |
2594 | temp = I915_READ(fdi_rx_reg); | ||
2595 | I915_WRITE(fdi_rx_reg, temp | FDI_RX_PLL_ENABLE); | ||
2596 | udelay(200); | ||
2597 | } | ||
1863 | } | 2598 | } |
1864 | 2599 | ||
1865 | I915_WRITE(pipeconf_reg, pipeconf); | 2600 | I915_WRITE(pipeconf_reg, pipeconf); |
@@ -1871,6 +2606,9 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, | |||
1871 | 2606 | ||
1872 | /* Flush the plane changes */ | 2607 | /* Flush the plane changes */ |
1873 | ret = intel_pipe_set_base(crtc, x, y, old_fb); | 2608 | ret = intel_pipe_set_base(crtc, x, y, old_fb); |
2609 | |||
2610 | intel_update_watermarks(dev); | ||
2611 | |||
1874 | drm_vblank_post_modeset(dev, pipe); | 2612 | drm_vblank_post_modeset(dev, pipe); |
1875 | 2613 | ||
1876 | return ret; | 2614 | return ret; |
@@ -2359,6 +3097,7 @@ static void intel_crtc_init(struct drm_device *dev, int pipe) | |||
2359 | 3097 | ||
2360 | drm_mode_crtc_set_gamma_size(&intel_crtc->base, 256); | 3098 | drm_mode_crtc_set_gamma_size(&intel_crtc->base, 256); |
2361 | intel_crtc->pipe = pipe; | 3099 | intel_crtc->pipe = pipe; |
3100 | intel_crtc->plane = pipe; | ||
2362 | for (i = 0; i < 256; i++) { | 3101 | for (i = 0; i < 256; i++) { |
2363 | intel_crtc->lut_r[i] = i; | 3102 | intel_crtc->lut_r[i] = i; |
2364 | intel_crtc->lut_g[i] = i; | 3103 | intel_crtc->lut_g[i] = i; |
@@ -2453,12 +3192,17 @@ static void intel_setup_outputs(struct drm_device *dev) | |||
2453 | if (IS_IGDNG(dev)) { | 3192 | if (IS_IGDNG(dev)) { |
2454 | int found; | 3193 | int found; |
2455 | 3194 | ||
3195 | if (IS_MOBILE(dev) && (I915_READ(DP_A) & DP_DETECTED)) | ||
3196 | intel_dp_init(dev, DP_A); | ||
3197 | |||
2456 | if (I915_READ(HDMIB) & PORT_DETECTED) { | 3198 | if (I915_READ(HDMIB) & PORT_DETECTED) { |
2457 | /* check SDVOB */ | 3199 | /* check SDVOB */ |
2458 | /* found = intel_sdvo_init(dev, HDMIB); */ | 3200 | /* found = intel_sdvo_init(dev, HDMIB); */ |
2459 | found = 0; | 3201 | found = 0; |
2460 | if (!found) | 3202 | if (!found) |
2461 | intel_hdmi_init(dev, HDMIB); | 3203 | intel_hdmi_init(dev, HDMIB); |
3204 | if (!found && (I915_READ(PCH_DP_B) & DP_DETECTED)) | ||
3205 | intel_dp_init(dev, PCH_DP_B); | ||
2462 | } | 3206 | } |
2463 | 3207 | ||
2464 | if (I915_READ(HDMIC) & PORT_DETECTED) | 3208 | if (I915_READ(HDMIC) & PORT_DETECTED) |
@@ -2467,6 +3211,12 @@ static void intel_setup_outputs(struct drm_device *dev) | |||
2467 | if (I915_READ(HDMID) & PORT_DETECTED) | 3211 | if (I915_READ(HDMID) & PORT_DETECTED) |
2468 | intel_hdmi_init(dev, HDMID); | 3212 | intel_hdmi_init(dev, HDMID); |
2469 | 3213 | ||
3214 | if (I915_READ(PCH_DP_C) & DP_DETECTED) | ||
3215 | intel_dp_init(dev, PCH_DP_C); | ||
3216 | |||
3217 | if (I915_READ(PCH_DP_D) & DP_DETECTED) | ||
3218 | intel_dp_init(dev, PCH_DP_D); | ||
3219 | |||
2470 | } else if (IS_I9XX(dev)) { | 3220 | } else if (IS_I9XX(dev)) { |
2471 | int found; | 3221 | int found; |
2472 | u32 reg; | 3222 | u32 reg; |
@@ -2475,6 +3225,8 @@ static void intel_setup_outputs(struct drm_device *dev) | |||
2475 | found = intel_sdvo_init(dev, SDVOB); | 3225 | found = intel_sdvo_init(dev, SDVOB); |
2476 | if (!found && SUPPORTS_INTEGRATED_HDMI(dev)) | 3226 | if (!found && SUPPORTS_INTEGRATED_HDMI(dev)) |
2477 | intel_hdmi_init(dev, SDVOB); | 3227 | intel_hdmi_init(dev, SDVOB); |
3228 | if (!found && SUPPORTS_INTEGRATED_DP(dev)) | ||
3229 | intel_dp_init(dev, DP_B); | ||
2478 | } | 3230 | } |
2479 | 3231 | ||
2480 | /* Before G4X SDVOC doesn't have its own detect register */ | 3232 | /* Before G4X SDVOC doesn't have its own detect register */ |
@@ -2487,7 +3239,11 @@ static void intel_setup_outputs(struct drm_device *dev) | |||
2487 | found = intel_sdvo_init(dev, SDVOC); | 3239 | found = intel_sdvo_init(dev, SDVOC); |
2488 | if (!found && SUPPORTS_INTEGRATED_HDMI(dev)) | 3240 | if (!found && SUPPORTS_INTEGRATED_HDMI(dev)) |
2489 | intel_hdmi_init(dev, SDVOC); | 3241 | intel_hdmi_init(dev, SDVOC); |
3242 | if (!found && SUPPORTS_INTEGRATED_DP(dev)) | ||
3243 | intel_dp_init(dev, DP_C); | ||
2490 | } | 3244 | } |
3245 | if (SUPPORTS_INTEGRATED_DP(dev) && (I915_READ(DP_D) & DP_DETECTED)) | ||
3246 | intel_dp_init(dev, DP_D); | ||
2491 | } else | 3247 | } else |
2492 | intel_dvo_init(dev); | 3248 | intel_dvo_init(dev); |
2493 | 3249 | ||
@@ -2530,6 +3286,15 @@ static void intel_setup_outputs(struct drm_device *dev) | |||
2530 | (1 << 1)); | 3286 | (1 << 1)); |
2531 | clone_mask = (1 << INTEL_OUTPUT_TVOUT); | 3287 | clone_mask = (1 << INTEL_OUTPUT_TVOUT); |
2532 | break; | 3288 | break; |
3289 | case INTEL_OUTPUT_DISPLAYPORT: | ||
3290 | crtc_mask = ((1 << 0) | | ||
3291 | (1 << 1)); | ||
3292 | clone_mask = (1 << INTEL_OUTPUT_DISPLAYPORT); | ||
3293 | break; | ||
3294 | case INTEL_OUTPUT_EDP: | ||
3295 | crtc_mask = (1 << 1); | ||
3296 | clone_mask = (1 << INTEL_OUTPUT_EDP); | ||
3297 | break; | ||
2533 | } | 3298 | } |
2534 | encoder->possible_crtcs = crtc_mask; | 3299 | encoder->possible_crtcs = crtc_mask; |
2535 | encoder->possible_clones = intel_connector_clones(dev, clone_mask); | 3300 | encoder->possible_clones = intel_connector_clones(dev, clone_mask); |
@@ -2639,6 +3404,9 @@ void intel_modeset_init(struct drm_device *dev) | |||
2639 | if (IS_I965G(dev)) { | 3404 | if (IS_I965G(dev)) { |
2640 | dev->mode_config.max_width = 8192; | 3405 | dev->mode_config.max_width = 8192; |
2641 | dev->mode_config.max_height = 8192; | 3406 | dev->mode_config.max_height = 8192; |
3407 | } else if (IS_I9XX(dev)) { | ||
3408 | dev->mode_config.max_width = 4096; | ||
3409 | dev->mode_config.max_height = 4096; | ||
2642 | } else { | 3410 | } else { |
2643 | dev->mode_config.max_width = 2048; | 3411 | dev->mode_config.max_width = 2048; |
2644 | dev->mode_config.max_height = 2048; | 3412 | dev->mode_config.max_height = 2048; |
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c new file mode 100644 index 000000000000..a6ff15ac548a --- /dev/null +++ b/drivers/gpu/drm/i915/intel_dp.c | |||
@@ -0,0 +1,1318 @@ | |||
1 | /* | ||
2 | * Copyright © 2008 Intel Corporation | ||
3 | * | ||
4 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
5 | * copy of this software and associated documentation files (the "Software"), | ||
6 | * to deal in the Software without restriction, including without limitation | ||
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
8 | * and/or sell copies of the Software, and to permit persons to whom the | ||
9 | * Software is furnished to do so, subject to the following conditions: | ||
10 | * | ||
11 | * The above copyright notice and this permission notice (including the next | ||
12 | * paragraph) shall be included in all copies or substantial portions of the | ||
13 | * Software. | ||
14 | * | ||
15 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
16 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
17 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
18 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | ||
19 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | ||
20 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS | ||
21 | * IN THE SOFTWARE. | ||
22 | * | ||
23 | * Authors: | ||
24 | * Keith Packard <keithp@keithp.com> | ||
25 | * | ||
26 | */ | ||
27 | |||
28 | #include <linux/i2c.h> | ||
29 | #include "drmP.h" | ||
30 | #include "drm.h" | ||
31 | #include "drm_crtc.h" | ||
32 | #include "drm_crtc_helper.h" | ||
33 | #include "intel_drv.h" | ||
34 | #include "i915_drm.h" | ||
35 | #include "i915_drv.h" | ||
36 | #include "intel_dp.h" | ||
37 | |||
38 | #define DP_LINK_STATUS_SIZE 6 | ||
39 | #define DP_LINK_CHECK_TIMEOUT (10 * 1000) | ||
40 | |||
41 | #define DP_LINK_CONFIGURATION_SIZE 9 | ||
42 | |||
43 | #define IS_eDP(i) ((i)->type == INTEL_OUTPUT_EDP) | ||
44 | |||
45 | struct intel_dp_priv { | ||
46 | uint32_t output_reg; | ||
47 | uint32_t DP; | ||
48 | uint8_t link_configuration[DP_LINK_CONFIGURATION_SIZE]; | ||
49 | uint32_t save_DP; | ||
50 | uint8_t save_link_configuration[DP_LINK_CONFIGURATION_SIZE]; | ||
51 | bool has_audio; | ||
52 | int dpms_mode; | ||
53 | uint8_t link_bw; | ||
54 | uint8_t lane_count; | ||
55 | uint8_t dpcd[4]; | ||
56 | struct intel_output *intel_output; | ||
57 | struct i2c_adapter adapter; | ||
58 | struct i2c_algo_dp_aux_data algo; | ||
59 | }; | ||
60 | |||
61 | static void | ||
62 | intel_dp_link_train(struct intel_output *intel_output, uint32_t DP, | ||
63 | uint8_t link_configuration[DP_LINK_CONFIGURATION_SIZE]); | ||
64 | |||
65 | static void | ||
66 | intel_dp_link_down(struct intel_output *intel_output, uint32_t DP); | ||
67 | |||
68 | void | ||
69 | intel_edp_link_config (struct intel_output *intel_output, | ||
70 | int *lane_num, int *link_bw) | ||
71 | { | ||
72 | struct intel_dp_priv *dp_priv = intel_output->dev_priv; | ||
73 | |||
74 | *lane_num = dp_priv->lane_count; | ||
75 | if (dp_priv->link_bw == DP_LINK_BW_1_62) | ||
76 | *link_bw = 162000; | ||
77 | else if (dp_priv->link_bw == DP_LINK_BW_2_7) | ||
78 | *link_bw = 270000; | ||
79 | } | ||
80 | |||
81 | static int | ||
82 | intel_dp_max_lane_count(struct intel_output *intel_output) | ||
83 | { | ||
84 | struct intel_dp_priv *dp_priv = intel_output->dev_priv; | ||
85 | int max_lane_count = 4; | ||
86 | |||
87 | if (dp_priv->dpcd[0] >= 0x11) { | ||
88 | max_lane_count = dp_priv->dpcd[2] & 0x1f; | ||
89 | switch (max_lane_count) { | ||
90 | case 1: case 2: case 4: | ||
91 | break; | ||
92 | default: | ||
93 | max_lane_count = 4; | ||
94 | } | ||
95 | } | ||
96 | return max_lane_count; | ||
97 | } | ||
98 | |||
99 | static int | ||
100 | intel_dp_max_link_bw(struct intel_output *intel_output) | ||
101 | { | ||
102 | struct intel_dp_priv *dp_priv = intel_output->dev_priv; | ||
103 | int max_link_bw = dp_priv->dpcd[1]; | ||
104 | |||
105 | switch (max_link_bw) { | ||
106 | case DP_LINK_BW_1_62: | ||
107 | case DP_LINK_BW_2_7: | ||
108 | break; | ||
109 | default: | ||
110 | max_link_bw = DP_LINK_BW_1_62; | ||
111 | break; | ||
112 | } | ||
113 | return max_link_bw; | ||
114 | } | ||
115 | |||
116 | static int | ||
117 | intel_dp_link_clock(uint8_t link_bw) | ||
118 | { | ||
119 | if (link_bw == DP_LINK_BW_2_7) | ||
120 | return 270000; | ||
121 | else | ||
122 | return 162000; | ||
123 | } | ||
124 | |||
125 | /* I think this is a fiction */ | ||
126 | static int | ||
127 | intel_dp_link_required(int pixel_clock) | ||
128 | { | ||
129 | return pixel_clock * 3; | ||
130 | } | ||
131 | |||
132 | static int | ||
133 | intel_dp_mode_valid(struct drm_connector *connector, | ||
134 | struct drm_display_mode *mode) | ||
135 | { | ||
136 | struct intel_output *intel_output = to_intel_output(connector); | ||
137 | int max_link_clock = intel_dp_link_clock(intel_dp_max_link_bw(intel_output)); | ||
138 | int max_lanes = intel_dp_max_lane_count(intel_output); | ||
139 | |||
140 | if (intel_dp_link_required(mode->clock) > max_link_clock * max_lanes) | ||
141 | return MODE_CLOCK_HIGH; | ||
142 | |||
143 | if (mode->clock < 10000) | ||
144 | return MODE_CLOCK_LOW; | ||
145 | |||
146 | return MODE_OK; | ||
147 | } | ||
148 | |||
149 | static uint32_t | ||
150 | pack_aux(uint8_t *src, int src_bytes) | ||
151 | { | ||
152 | int i; | ||
153 | uint32_t v = 0; | ||
154 | |||
155 | if (src_bytes > 4) | ||
156 | src_bytes = 4; | ||
157 | for (i = 0; i < src_bytes; i++) | ||
158 | v |= ((uint32_t) src[i]) << ((3-i) * 8); | ||
159 | return v; | ||
160 | } | ||
161 | |||
162 | static void | ||
163 | unpack_aux(uint32_t src, uint8_t *dst, int dst_bytes) | ||
164 | { | ||
165 | int i; | ||
166 | if (dst_bytes > 4) | ||
167 | dst_bytes = 4; | ||
168 | for (i = 0; i < dst_bytes; i++) | ||
169 | dst[i] = src >> ((3-i) * 8); | ||
170 | } | ||
171 | |||
172 | /* hrawclock is 1/4 the FSB frequency */ | ||
173 | static int | ||
174 | intel_hrawclk(struct drm_device *dev) | ||
175 | { | ||
176 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
177 | uint32_t clkcfg; | ||
178 | |||
179 | clkcfg = I915_READ(CLKCFG); | ||
180 | switch (clkcfg & CLKCFG_FSB_MASK) { | ||
181 | case CLKCFG_FSB_400: | ||
182 | return 100; | ||
183 | case CLKCFG_FSB_533: | ||
184 | return 133; | ||
185 | case CLKCFG_FSB_667: | ||
186 | return 166; | ||
187 | case CLKCFG_FSB_800: | ||
188 | return 200; | ||
189 | case CLKCFG_FSB_1067: | ||
190 | return 266; | ||
191 | case CLKCFG_FSB_1333: | ||
192 | return 333; | ||
193 | /* these two are just a guess; one of them might be right */ | ||
194 | case CLKCFG_FSB_1600: | ||
195 | case CLKCFG_FSB_1600_ALT: | ||
196 | return 400; | ||
197 | default: | ||
198 | return 133; | ||
199 | } | ||
200 | } | ||
201 | |||
202 | static int | ||
203 | intel_dp_aux_ch(struct intel_output *intel_output, | ||
204 | uint8_t *send, int send_bytes, | ||
205 | uint8_t *recv, int recv_size) | ||
206 | { | ||
207 | struct intel_dp_priv *dp_priv = intel_output->dev_priv; | ||
208 | uint32_t output_reg = dp_priv->output_reg; | ||
209 | struct drm_device *dev = intel_output->base.dev; | ||
210 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
211 | uint32_t ch_ctl = output_reg + 0x10; | ||
212 | uint32_t ch_data = ch_ctl + 4; | ||
213 | int i; | ||
214 | int recv_bytes; | ||
215 | uint32_t ctl; | ||
216 | uint32_t status; | ||
217 | uint32_t aux_clock_divider; | ||
218 | int try; | ||
219 | |||
220 | /* The clock divider is based off the hrawclk, | ||
221 | * and would like to run at 2MHz. So, take the | ||
222 | * hrawclk value and divide by 2 and use that | ||
223 | */ | ||
224 | if (IS_eDP(intel_output)) | ||
225 | aux_clock_divider = 225; /* eDP input clock at 450Mhz */ | ||
226 | else if (IS_IGDNG(dev)) | ||
227 | aux_clock_divider = 62; /* IGDNG: input clock fixed at 125Mhz */ | ||
228 | else | ||
229 | aux_clock_divider = intel_hrawclk(dev) / 2; | ||
230 | |||
231 | /* Must try at least 3 times according to DP spec */ | ||
232 | for (try = 0; try < 5; try++) { | ||
233 | /* Load the send data into the aux channel data registers */ | ||
234 | for (i = 0; i < send_bytes; i += 4) { | ||
235 | uint32_t d = pack_aux(send + i, send_bytes - i);; | ||
236 | |||
237 | I915_WRITE(ch_data + i, d); | ||
238 | } | ||
239 | |||
240 | ctl = (DP_AUX_CH_CTL_SEND_BUSY | | ||
241 | DP_AUX_CH_CTL_TIME_OUT_400us | | ||
242 | (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) | | ||
243 | (5 << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) | | ||
244 | (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT) | | ||
245 | DP_AUX_CH_CTL_DONE | | ||
246 | DP_AUX_CH_CTL_TIME_OUT_ERROR | | ||
247 | DP_AUX_CH_CTL_RECEIVE_ERROR); | ||
248 | |||
249 | /* Send the command and wait for it to complete */ | ||
250 | I915_WRITE(ch_ctl, ctl); | ||
251 | (void) I915_READ(ch_ctl); | ||
252 | for (;;) { | ||
253 | udelay(100); | ||
254 | status = I915_READ(ch_ctl); | ||
255 | if ((status & DP_AUX_CH_CTL_SEND_BUSY) == 0) | ||
256 | break; | ||
257 | } | ||
258 | |||
259 | /* Clear done status and any errors */ | ||
260 | I915_WRITE(ch_ctl, (status | | ||
261 | DP_AUX_CH_CTL_DONE | | ||
262 | DP_AUX_CH_CTL_TIME_OUT_ERROR | | ||
263 | DP_AUX_CH_CTL_RECEIVE_ERROR)); | ||
264 | (void) I915_READ(ch_ctl); | ||
265 | if ((status & DP_AUX_CH_CTL_TIME_OUT_ERROR) == 0) | ||
266 | break; | ||
267 | } | ||
268 | |||
269 | if ((status & DP_AUX_CH_CTL_DONE) == 0) { | ||
270 | DRM_ERROR("dp_aux_ch not done status 0x%08x\n", status); | ||
271 | return -EBUSY; | ||
272 | } | ||
273 | |||
274 | /* Check for timeout or receive error. | ||
275 | * Timeouts occur when the sink is not connected | ||
276 | */ | ||
277 | if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) { | ||
278 | DRM_ERROR("dp_aux_ch receive error status 0x%08x\n", status); | ||
279 | return -EIO; | ||
280 | } | ||
281 | |||
282 | /* Timeouts occur when the device isn't connected, so they're | ||
283 | * "normal" -- don't fill the kernel log with these */ | ||
284 | if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR) { | ||
285 | DRM_DEBUG("dp_aux_ch timeout status 0x%08x\n", status); | ||
286 | return -ETIMEDOUT; | ||
287 | } | ||
288 | |||
289 | /* Unload any bytes sent back from the other side */ | ||
290 | recv_bytes = ((status & DP_AUX_CH_CTL_MESSAGE_SIZE_MASK) >> | ||
291 | DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT); | ||
292 | |||
293 | if (recv_bytes > recv_size) | ||
294 | recv_bytes = recv_size; | ||
295 | |||
296 | for (i = 0; i < recv_bytes; i += 4) { | ||
297 | uint32_t d = I915_READ(ch_data + i); | ||
298 | |||
299 | unpack_aux(d, recv + i, recv_bytes - i); | ||
300 | } | ||
301 | |||
302 | return recv_bytes; | ||
303 | } | ||
304 | |||
305 | /* Write data to the aux channel in native mode */ | ||
306 | static int | ||
307 | intel_dp_aux_native_write(struct intel_output *intel_output, | ||
308 | uint16_t address, uint8_t *send, int send_bytes) | ||
309 | { | ||
310 | int ret; | ||
311 | uint8_t msg[20]; | ||
312 | int msg_bytes; | ||
313 | uint8_t ack; | ||
314 | |||
315 | if (send_bytes > 16) | ||
316 | return -1; | ||
317 | msg[0] = AUX_NATIVE_WRITE << 4; | ||
318 | msg[1] = address >> 8; | ||
319 | msg[2] = address & 0xff; | ||
320 | msg[3] = send_bytes - 1; | ||
321 | memcpy(&msg[4], send, send_bytes); | ||
322 | msg_bytes = send_bytes + 4; | ||
323 | for (;;) { | ||
324 | ret = intel_dp_aux_ch(intel_output, msg, msg_bytes, &ack, 1); | ||
325 | if (ret < 0) | ||
326 | return ret; | ||
327 | if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_ACK) | ||
328 | break; | ||
329 | else if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_DEFER) | ||
330 | udelay(100); | ||
331 | else | ||
332 | return -EIO; | ||
333 | } | ||
334 | return send_bytes; | ||
335 | } | ||
336 | |||
337 | /* Write a single byte to the aux channel in native mode */ | ||
338 | static int | ||
339 | intel_dp_aux_native_write_1(struct intel_output *intel_output, | ||
340 | uint16_t address, uint8_t byte) | ||
341 | { | ||
342 | return intel_dp_aux_native_write(intel_output, address, &byte, 1); | ||
343 | } | ||
344 | |||
345 | /* read bytes from a native aux channel */ | ||
346 | static int | ||
347 | intel_dp_aux_native_read(struct intel_output *intel_output, | ||
348 | uint16_t address, uint8_t *recv, int recv_bytes) | ||
349 | { | ||
350 | uint8_t msg[4]; | ||
351 | int msg_bytes; | ||
352 | uint8_t reply[20]; | ||
353 | int reply_bytes; | ||
354 | uint8_t ack; | ||
355 | int ret; | ||
356 | |||
357 | msg[0] = AUX_NATIVE_READ << 4; | ||
358 | msg[1] = address >> 8; | ||
359 | msg[2] = address & 0xff; | ||
360 | msg[3] = recv_bytes - 1; | ||
361 | |||
362 | msg_bytes = 4; | ||
363 | reply_bytes = recv_bytes + 1; | ||
364 | |||
365 | for (;;) { | ||
366 | ret = intel_dp_aux_ch(intel_output, msg, msg_bytes, | ||
367 | reply, reply_bytes); | ||
368 | if (ret == 0) | ||
369 | return -EPROTO; | ||
370 | if (ret < 0) | ||
371 | return ret; | ||
372 | ack = reply[0]; | ||
373 | if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_ACK) { | ||
374 | memcpy(recv, reply + 1, ret - 1); | ||
375 | return ret - 1; | ||
376 | } | ||
377 | else if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_DEFER) | ||
378 | udelay(100); | ||
379 | else | ||
380 | return -EIO; | ||
381 | } | ||
382 | } | ||
383 | |||
384 | static int | ||
385 | intel_dp_i2c_aux_ch(struct i2c_adapter *adapter, | ||
386 | uint8_t *send, int send_bytes, | ||
387 | uint8_t *recv, int recv_bytes) | ||
388 | { | ||
389 | struct intel_dp_priv *dp_priv = container_of(adapter, | ||
390 | struct intel_dp_priv, | ||
391 | adapter); | ||
392 | struct intel_output *intel_output = dp_priv->intel_output; | ||
393 | |||
394 | return intel_dp_aux_ch(intel_output, | ||
395 | send, send_bytes, recv, recv_bytes); | ||
396 | } | ||
397 | |||
398 | static int | ||
399 | intel_dp_i2c_init(struct intel_output *intel_output, const char *name) | ||
400 | { | ||
401 | struct intel_dp_priv *dp_priv = intel_output->dev_priv; | ||
402 | |||
403 | DRM_ERROR("i2c_init %s\n", name); | ||
404 | dp_priv->algo.running = false; | ||
405 | dp_priv->algo.address = 0; | ||
406 | dp_priv->algo.aux_ch = intel_dp_i2c_aux_ch; | ||
407 | |||
408 | memset(&dp_priv->adapter, '\0', sizeof (dp_priv->adapter)); | ||
409 | dp_priv->adapter.owner = THIS_MODULE; | ||
410 | dp_priv->adapter.class = I2C_CLASS_DDC; | ||
411 | strncpy (dp_priv->adapter.name, name, sizeof(dp_priv->adapter.name) - 1); | ||
412 | dp_priv->adapter.name[sizeof(dp_priv->adapter.name) - 1] = '\0'; | ||
413 | dp_priv->adapter.algo_data = &dp_priv->algo; | ||
414 | dp_priv->adapter.dev.parent = &intel_output->base.kdev; | ||
415 | |||
416 | return i2c_dp_aux_add_bus(&dp_priv->adapter); | ||
417 | } | ||
418 | |||
419 | static bool | ||
420 | intel_dp_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode, | ||
421 | struct drm_display_mode *adjusted_mode) | ||
422 | { | ||
423 | struct intel_output *intel_output = enc_to_intel_output(encoder); | ||
424 | struct intel_dp_priv *dp_priv = intel_output->dev_priv; | ||
425 | int lane_count, clock; | ||
426 | int max_lane_count = intel_dp_max_lane_count(intel_output); | ||
427 | int max_clock = intel_dp_max_link_bw(intel_output) == DP_LINK_BW_2_7 ? 1 : 0; | ||
428 | static int bws[2] = { DP_LINK_BW_1_62, DP_LINK_BW_2_7 }; | ||
429 | |||
430 | for (lane_count = 1; lane_count <= max_lane_count; lane_count <<= 1) { | ||
431 | for (clock = 0; clock <= max_clock; clock++) { | ||
432 | int link_avail = intel_dp_link_clock(bws[clock]) * lane_count; | ||
433 | |||
434 | if (intel_dp_link_required(mode->clock) <= link_avail) { | ||
435 | dp_priv->link_bw = bws[clock]; | ||
436 | dp_priv->lane_count = lane_count; | ||
437 | adjusted_mode->clock = intel_dp_link_clock(dp_priv->link_bw); | ||
438 | DRM_DEBUG("Display port link bw %02x lane count %d clock %d\n", | ||
439 | dp_priv->link_bw, dp_priv->lane_count, | ||
440 | adjusted_mode->clock); | ||
441 | return true; | ||
442 | } | ||
443 | } | ||
444 | } | ||
445 | return false; | ||
446 | } | ||
447 | |||
448 | struct intel_dp_m_n { | ||
449 | uint32_t tu; | ||
450 | uint32_t gmch_m; | ||
451 | uint32_t gmch_n; | ||
452 | uint32_t link_m; | ||
453 | uint32_t link_n; | ||
454 | }; | ||
455 | |||
456 | static void | ||
457 | intel_reduce_ratio(uint32_t *num, uint32_t *den) | ||
458 | { | ||
459 | while (*num > 0xffffff || *den > 0xffffff) { | ||
460 | *num >>= 1; | ||
461 | *den >>= 1; | ||
462 | } | ||
463 | } | ||
464 | |||
465 | static void | ||
466 | intel_dp_compute_m_n(int bytes_per_pixel, | ||
467 | int nlanes, | ||
468 | int pixel_clock, | ||
469 | int link_clock, | ||
470 | struct intel_dp_m_n *m_n) | ||
471 | { | ||
472 | m_n->tu = 64; | ||
473 | m_n->gmch_m = pixel_clock * bytes_per_pixel; | ||
474 | m_n->gmch_n = link_clock * nlanes; | ||
475 | intel_reduce_ratio(&m_n->gmch_m, &m_n->gmch_n); | ||
476 | m_n->link_m = pixel_clock; | ||
477 | m_n->link_n = link_clock; | ||
478 | intel_reduce_ratio(&m_n->link_m, &m_n->link_n); | ||
479 | } | ||
480 | |||
481 | void | ||
482 | intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode, | ||
483 | struct drm_display_mode *adjusted_mode) | ||
484 | { | ||
485 | struct drm_device *dev = crtc->dev; | ||
486 | struct drm_mode_config *mode_config = &dev->mode_config; | ||
487 | struct drm_connector *connector; | ||
488 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
489 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | ||
490 | int lane_count = 4; | ||
491 | struct intel_dp_m_n m_n; | ||
492 | |||
493 | /* | ||
494 | * Find the lane count in the intel_output private | ||
495 | */ | ||
496 | list_for_each_entry(connector, &mode_config->connector_list, head) { | ||
497 | struct intel_output *intel_output = to_intel_output(connector); | ||
498 | struct intel_dp_priv *dp_priv = intel_output->dev_priv; | ||
499 | |||
500 | if (!connector->encoder || connector->encoder->crtc != crtc) | ||
501 | continue; | ||
502 | |||
503 | if (intel_output->type == INTEL_OUTPUT_DISPLAYPORT) { | ||
504 | lane_count = dp_priv->lane_count; | ||
505 | break; | ||
506 | } | ||
507 | } | ||
508 | |||
509 | /* | ||
510 | * Compute the GMCH and Link ratios. The '3' here is | ||
511 | * the number of bytes_per_pixel post-LUT, which we always | ||
512 | * set up for 8-bits of R/G/B, or 3 bytes total. | ||
513 | */ | ||
514 | intel_dp_compute_m_n(3, lane_count, | ||
515 | mode->clock, adjusted_mode->clock, &m_n); | ||
516 | |||
517 | if (IS_IGDNG(dev)) { | ||
518 | if (intel_crtc->pipe == 0) { | ||
519 | I915_WRITE(TRANSA_DATA_M1, | ||
520 | ((m_n.tu - 1) << PIPE_GMCH_DATA_M_TU_SIZE_SHIFT) | | ||
521 | m_n.gmch_m); | ||
522 | I915_WRITE(TRANSA_DATA_N1, m_n.gmch_n); | ||
523 | I915_WRITE(TRANSA_DP_LINK_M1, m_n.link_m); | ||
524 | I915_WRITE(TRANSA_DP_LINK_N1, m_n.link_n); | ||
525 | } else { | ||
526 | I915_WRITE(TRANSB_DATA_M1, | ||
527 | ((m_n.tu - 1) << PIPE_GMCH_DATA_M_TU_SIZE_SHIFT) | | ||
528 | m_n.gmch_m); | ||
529 | I915_WRITE(TRANSB_DATA_N1, m_n.gmch_n); | ||
530 | I915_WRITE(TRANSB_DP_LINK_M1, m_n.link_m); | ||
531 | I915_WRITE(TRANSB_DP_LINK_N1, m_n.link_n); | ||
532 | } | ||
533 | } else { | ||
534 | if (intel_crtc->pipe == 0) { | ||
535 | I915_WRITE(PIPEA_GMCH_DATA_M, | ||
536 | ((m_n.tu - 1) << PIPE_GMCH_DATA_M_TU_SIZE_SHIFT) | | ||
537 | m_n.gmch_m); | ||
538 | I915_WRITE(PIPEA_GMCH_DATA_N, | ||
539 | m_n.gmch_n); | ||
540 | I915_WRITE(PIPEA_DP_LINK_M, m_n.link_m); | ||
541 | I915_WRITE(PIPEA_DP_LINK_N, m_n.link_n); | ||
542 | } else { | ||
543 | I915_WRITE(PIPEB_GMCH_DATA_M, | ||
544 | ((m_n.tu - 1) << PIPE_GMCH_DATA_M_TU_SIZE_SHIFT) | | ||
545 | m_n.gmch_m); | ||
546 | I915_WRITE(PIPEB_GMCH_DATA_N, | ||
547 | m_n.gmch_n); | ||
548 | I915_WRITE(PIPEB_DP_LINK_M, m_n.link_m); | ||
549 | I915_WRITE(PIPEB_DP_LINK_N, m_n.link_n); | ||
550 | } | ||
551 | } | ||
552 | } | ||
553 | |||
554 | static void | ||
555 | intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, | ||
556 | struct drm_display_mode *adjusted_mode) | ||
557 | { | ||
558 | struct intel_output *intel_output = enc_to_intel_output(encoder); | ||
559 | struct intel_dp_priv *dp_priv = intel_output->dev_priv; | ||
560 | struct drm_crtc *crtc = intel_output->enc.crtc; | ||
561 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | ||
562 | |||
563 | dp_priv->DP = (DP_LINK_TRAIN_OFF | | ||
564 | DP_VOLTAGE_0_4 | | ||
565 | DP_PRE_EMPHASIS_0 | | ||
566 | DP_SYNC_VS_HIGH | | ||
567 | DP_SYNC_HS_HIGH); | ||
568 | |||
569 | switch (dp_priv->lane_count) { | ||
570 | case 1: | ||
571 | dp_priv->DP |= DP_PORT_WIDTH_1; | ||
572 | break; | ||
573 | case 2: | ||
574 | dp_priv->DP |= DP_PORT_WIDTH_2; | ||
575 | break; | ||
576 | case 4: | ||
577 | dp_priv->DP |= DP_PORT_WIDTH_4; | ||
578 | break; | ||
579 | } | ||
580 | if (dp_priv->has_audio) | ||
581 | dp_priv->DP |= DP_AUDIO_OUTPUT_ENABLE; | ||
582 | |||
583 | memset(dp_priv->link_configuration, 0, DP_LINK_CONFIGURATION_SIZE); | ||
584 | dp_priv->link_configuration[0] = dp_priv->link_bw; | ||
585 | dp_priv->link_configuration[1] = dp_priv->lane_count; | ||
586 | |||
587 | /* | ||
588 | * Check for DPCD version > 1.1, | ||
589 | * enable enahanced frame stuff in that case | ||
590 | */ | ||
591 | if (dp_priv->dpcd[0] >= 0x11) { | ||
592 | dp_priv->link_configuration[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN; | ||
593 | dp_priv->DP |= DP_ENHANCED_FRAMING; | ||
594 | } | ||
595 | |||
596 | if (intel_crtc->pipe == 1) | ||
597 | dp_priv->DP |= DP_PIPEB_SELECT; | ||
598 | |||
599 | if (IS_eDP(intel_output)) { | ||
600 | /* don't miss out required setting for eDP */ | ||
601 | dp_priv->DP |= DP_PLL_ENABLE; | ||
602 | if (adjusted_mode->clock < 200000) | ||
603 | dp_priv->DP |= DP_PLL_FREQ_160MHZ; | ||
604 | else | ||
605 | dp_priv->DP |= DP_PLL_FREQ_270MHZ; | ||
606 | } | ||
607 | } | ||
608 | |||
609 | static void igdng_edp_backlight_on (struct drm_device *dev) | ||
610 | { | ||
611 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
612 | u32 pp; | ||
613 | |||
614 | DRM_DEBUG("\n"); | ||
615 | pp = I915_READ(PCH_PP_CONTROL); | ||
616 | pp |= EDP_BLC_ENABLE; | ||
617 | I915_WRITE(PCH_PP_CONTROL, pp); | ||
618 | } | ||
619 | |||
620 | static void igdng_edp_backlight_off (struct drm_device *dev) | ||
621 | { | ||
622 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
623 | u32 pp; | ||
624 | |||
625 | DRM_DEBUG("\n"); | ||
626 | pp = I915_READ(PCH_PP_CONTROL); | ||
627 | pp &= ~EDP_BLC_ENABLE; | ||
628 | I915_WRITE(PCH_PP_CONTROL, pp); | ||
629 | } | ||
630 | |||
631 | static void | ||
632 | intel_dp_dpms(struct drm_encoder *encoder, int mode) | ||
633 | { | ||
634 | struct intel_output *intel_output = enc_to_intel_output(encoder); | ||
635 | struct intel_dp_priv *dp_priv = intel_output->dev_priv; | ||
636 | struct drm_device *dev = intel_output->base.dev; | ||
637 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
638 | uint32_t dp_reg = I915_READ(dp_priv->output_reg); | ||
639 | |||
640 | if (mode != DRM_MODE_DPMS_ON) { | ||
641 | if (dp_reg & DP_PORT_EN) { | ||
642 | intel_dp_link_down(intel_output, dp_priv->DP); | ||
643 | if (IS_eDP(intel_output)) | ||
644 | igdng_edp_backlight_off(dev); | ||
645 | } | ||
646 | } else { | ||
647 | if (!(dp_reg & DP_PORT_EN)) { | ||
648 | intel_dp_link_train(intel_output, dp_priv->DP, dp_priv->link_configuration); | ||
649 | if (IS_eDP(intel_output)) | ||
650 | igdng_edp_backlight_on(dev); | ||
651 | } | ||
652 | } | ||
653 | dp_priv->dpms_mode = mode; | ||
654 | } | ||
655 | |||
656 | /* | ||
657 | * Fetch AUX CH registers 0x202 - 0x207 which contain | ||
658 | * link status information | ||
659 | */ | ||
660 | static bool | ||
661 | intel_dp_get_link_status(struct intel_output *intel_output, | ||
662 | uint8_t link_status[DP_LINK_STATUS_SIZE]) | ||
663 | { | ||
664 | int ret; | ||
665 | |||
666 | ret = intel_dp_aux_native_read(intel_output, | ||
667 | DP_LANE0_1_STATUS, | ||
668 | link_status, DP_LINK_STATUS_SIZE); | ||
669 | if (ret != DP_LINK_STATUS_SIZE) | ||
670 | return false; | ||
671 | return true; | ||
672 | } | ||
673 | |||
674 | static uint8_t | ||
675 | intel_dp_link_status(uint8_t link_status[DP_LINK_STATUS_SIZE], | ||
676 | int r) | ||
677 | { | ||
678 | return link_status[r - DP_LANE0_1_STATUS]; | ||
679 | } | ||
680 | |||
681 | static void | ||
682 | intel_dp_save(struct drm_connector *connector) | ||
683 | { | ||
684 | struct intel_output *intel_output = to_intel_output(connector); | ||
685 | struct drm_device *dev = intel_output->base.dev; | ||
686 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
687 | struct intel_dp_priv *dp_priv = intel_output->dev_priv; | ||
688 | |||
689 | dp_priv->save_DP = I915_READ(dp_priv->output_reg); | ||
690 | intel_dp_aux_native_read(intel_output, DP_LINK_BW_SET, | ||
691 | dp_priv->save_link_configuration, | ||
692 | sizeof (dp_priv->save_link_configuration)); | ||
693 | } | ||
694 | |||
695 | static uint8_t | ||
696 | intel_get_adjust_request_voltage(uint8_t link_status[DP_LINK_STATUS_SIZE], | ||
697 | int lane) | ||
698 | { | ||
699 | int i = DP_ADJUST_REQUEST_LANE0_1 + (lane >> 1); | ||
700 | int s = ((lane & 1) ? | ||
701 | DP_ADJUST_VOLTAGE_SWING_LANE1_SHIFT : | ||
702 | DP_ADJUST_VOLTAGE_SWING_LANE0_SHIFT); | ||
703 | uint8_t l = intel_dp_link_status(link_status, i); | ||
704 | |||
705 | return ((l >> s) & 3) << DP_TRAIN_VOLTAGE_SWING_SHIFT; | ||
706 | } | ||
707 | |||
708 | static uint8_t | ||
709 | intel_get_adjust_request_pre_emphasis(uint8_t link_status[DP_LINK_STATUS_SIZE], | ||
710 | int lane) | ||
711 | { | ||
712 | int i = DP_ADJUST_REQUEST_LANE0_1 + (lane >> 1); | ||
713 | int s = ((lane & 1) ? | ||
714 | DP_ADJUST_PRE_EMPHASIS_LANE1_SHIFT : | ||
715 | DP_ADJUST_PRE_EMPHASIS_LANE0_SHIFT); | ||
716 | uint8_t l = intel_dp_link_status(link_status, i); | ||
717 | |||
718 | return ((l >> s) & 3) << DP_TRAIN_PRE_EMPHASIS_SHIFT; | ||
719 | } | ||
720 | |||
721 | |||
722 | #if 0 | ||
723 | static char *voltage_names[] = { | ||
724 | "0.4V", "0.6V", "0.8V", "1.2V" | ||
725 | }; | ||
726 | static char *pre_emph_names[] = { | ||
727 | "0dB", "3.5dB", "6dB", "9.5dB" | ||
728 | }; | ||
729 | static char *link_train_names[] = { | ||
730 | "pattern 1", "pattern 2", "idle", "off" | ||
731 | }; | ||
732 | #endif | ||
733 | |||
734 | /* | ||
735 | * These are source-specific values; current Intel hardware supports | ||
736 | * a maximum voltage of 800mV and a maximum pre-emphasis of 6dB | ||
737 | */ | ||
738 | #define I830_DP_VOLTAGE_MAX DP_TRAIN_VOLTAGE_SWING_800 | ||
739 | |||
740 | static uint8_t | ||
741 | intel_dp_pre_emphasis_max(uint8_t voltage_swing) | ||
742 | { | ||
743 | switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) { | ||
744 | case DP_TRAIN_VOLTAGE_SWING_400: | ||
745 | return DP_TRAIN_PRE_EMPHASIS_6; | ||
746 | case DP_TRAIN_VOLTAGE_SWING_600: | ||
747 | return DP_TRAIN_PRE_EMPHASIS_6; | ||
748 | case DP_TRAIN_VOLTAGE_SWING_800: | ||
749 | return DP_TRAIN_PRE_EMPHASIS_3_5; | ||
750 | case DP_TRAIN_VOLTAGE_SWING_1200: | ||
751 | default: | ||
752 | return DP_TRAIN_PRE_EMPHASIS_0; | ||
753 | } | ||
754 | } | ||
755 | |||
756 | static void | ||
757 | intel_get_adjust_train(struct intel_output *intel_output, | ||
758 | uint8_t link_status[DP_LINK_STATUS_SIZE], | ||
759 | int lane_count, | ||
760 | uint8_t train_set[4]) | ||
761 | { | ||
762 | uint8_t v = 0; | ||
763 | uint8_t p = 0; | ||
764 | int lane; | ||
765 | |||
766 | for (lane = 0; lane < lane_count; lane++) { | ||
767 | uint8_t this_v = intel_get_adjust_request_voltage(link_status, lane); | ||
768 | uint8_t this_p = intel_get_adjust_request_pre_emphasis(link_status, lane); | ||
769 | |||
770 | if (this_v > v) | ||
771 | v = this_v; | ||
772 | if (this_p > p) | ||
773 | p = this_p; | ||
774 | } | ||
775 | |||
776 | if (v >= I830_DP_VOLTAGE_MAX) | ||
777 | v = I830_DP_VOLTAGE_MAX | DP_TRAIN_MAX_SWING_REACHED; | ||
778 | |||
779 | if (p >= intel_dp_pre_emphasis_max(v)) | ||
780 | p = intel_dp_pre_emphasis_max(v) | DP_TRAIN_MAX_PRE_EMPHASIS_REACHED; | ||
781 | |||
782 | for (lane = 0; lane < 4; lane++) | ||
783 | train_set[lane] = v | p; | ||
784 | } | ||
785 | |||
786 | static uint32_t | ||
787 | intel_dp_signal_levels(uint8_t train_set, int lane_count) | ||
788 | { | ||
789 | uint32_t signal_levels = 0; | ||
790 | |||
791 | switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) { | ||
792 | case DP_TRAIN_VOLTAGE_SWING_400: | ||
793 | default: | ||
794 | signal_levels |= DP_VOLTAGE_0_4; | ||
795 | break; | ||
796 | case DP_TRAIN_VOLTAGE_SWING_600: | ||
797 | signal_levels |= DP_VOLTAGE_0_6; | ||
798 | break; | ||
799 | case DP_TRAIN_VOLTAGE_SWING_800: | ||
800 | signal_levels |= DP_VOLTAGE_0_8; | ||
801 | break; | ||
802 | case DP_TRAIN_VOLTAGE_SWING_1200: | ||
803 | signal_levels |= DP_VOLTAGE_1_2; | ||
804 | break; | ||
805 | } | ||
806 | switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) { | ||
807 | case DP_TRAIN_PRE_EMPHASIS_0: | ||
808 | default: | ||
809 | signal_levels |= DP_PRE_EMPHASIS_0; | ||
810 | break; | ||
811 | case DP_TRAIN_PRE_EMPHASIS_3_5: | ||
812 | signal_levels |= DP_PRE_EMPHASIS_3_5; | ||
813 | break; | ||
814 | case DP_TRAIN_PRE_EMPHASIS_6: | ||
815 | signal_levels |= DP_PRE_EMPHASIS_6; | ||
816 | break; | ||
817 | case DP_TRAIN_PRE_EMPHASIS_9_5: | ||
818 | signal_levels |= DP_PRE_EMPHASIS_9_5; | ||
819 | break; | ||
820 | } | ||
821 | return signal_levels; | ||
822 | } | ||
823 | |||
824 | static uint8_t | ||
825 | intel_get_lane_status(uint8_t link_status[DP_LINK_STATUS_SIZE], | ||
826 | int lane) | ||
827 | { | ||
828 | int i = DP_LANE0_1_STATUS + (lane >> 1); | ||
829 | int s = (lane & 1) * 4; | ||
830 | uint8_t l = intel_dp_link_status(link_status, i); | ||
831 | |||
832 | return (l >> s) & 0xf; | ||
833 | } | ||
834 | |||
835 | /* Check for clock recovery is done on all channels */ | ||
836 | static bool | ||
837 | intel_clock_recovery_ok(uint8_t link_status[DP_LINK_STATUS_SIZE], int lane_count) | ||
838 | { | ||
839 | int lane; | ||
840 | uint8_t lane_status; | ||
841 | |||
842 | for (lane = 0; lane < lane_count; lane++) { | ||
843 | lane_status = intel_get_lane_status(link_status, lane); | ||
844 | if ((lane_status & DP_LANE_CR_DONE) == 0) | ||
845 | return false; | ||
846 | } | ||
847 | return true; | ||
848 | } | ||
849 | |||
850 | /* Check to see if channel eq is done on all channels */ | ||
851 | #define CHANNEL_EQ_BITS (DP_LANE_CR_DONE|\ | ||
852 | DP_LANE_CHANNEL_EQ_DONE|\ | ||
853 | DP_LANE_SYMBOL_LOCKED) | ||
854 | static bool | ||
855 | intel_channel_eq_ok(uint8_t link_status[DP_LINK_STATUS_SIZE], int lane_count) | ||
856 | { | ||
857 | uint8_t lane_align; | ||
858 | uint8_t lane_status; | ||
859 | int lane; | ||
860 | |||
861 | lane_align = intel_dp_link_status(link_status, | ||
862 | DP_LANE_ALIGN_STATUS_UPDATED); | ||
863 | if ((lane_align & DP_INTERLANE_ALIGN_DONE) == 0) | ||
864 | return false; | ||
865 | for (lane = 0; lane < lane_count; lane++) { | ||
866 | lane_status = intel_get_lane_status(link_status, lane); | ||
867 | if ((lane_status & CHANNEL_EQ_BITS) != CHANNEL_EQ_BITS) | ||
868 | return false; | ||
869 | } | ||
870 | return true; | ||
871 | } | ||
872 | |||
873 | static bool | ||
874 | intel_dp_set_link_train(struct intel_output *intel_output, | ||
875 | uint32_t dp_reg_value, | ||
876 | uint8_t dp_train_pat, | ||
877 | uint8_t train_set[4], | ||
878 | bool first) | ||
879 | { | ||
880 | struct drm_device *dev = intel_output->base.dev; | ||
881 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
882 | struct intel_dp_priv *dp_priv = intel_output->dev_priv; | ||
883 | int ret; | ||
884 | |||
885 | I915_WRITE(dp_priv->output_reg, dp_reg_value); | ||
886 | POSTING_READ(dp_priv->output_reg); | ||
887 | if (first) | ||
888 | intel_wait_for_vblank(dev); | ||
889 | |||
890 | intel_dp_aux_native_write_1(intel_output, | ||
891 | DP_TRAINING_PATTERN_SET, | ||
892 | dp_train_pat); | ||
893 | |||
894 | ret = intel_dp_aux_native_write(intel_output, | ||
895 | DP_TRAINING_LANE0_SET, train_set, 4); | ||
896 | if (ret != 4) | ||
897 | return false; | ||
898 | |||
899 | return true; | ||
900 | } | ||
901 | |||
902 | static void | ||
903 | intel_dp_link_train(struct intel_output *intel_output, uint32_t DP, | ||
904 | uint8_t link_configuration[DP_LINK_CONFIGURATION_SIZE]) | ||
905 | { | ||
906 | struct drm_device *dev = intel_output->base.dev; | ||
907 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
908 | struct intel_dp_priv *dp_priv = intel_output->dev_priv; | ||
909 | uint8_t train_set[4]; | ||
910 | uint8_t link_status[DP_LINK_STATUS_SIZE]; | ||
911 | int i; | ||
912 | uint8_t voltage; | ||
913 | bool clock_recovery = false; | ||
914 | bool channel_eq = false; | ||
915 | bool first = true; | ||
916 | int tries; | ||
917 | |||
918 | /* Write the link configuration data */ | ||
919 | intel_dp_aux_native_write(intel_output, 0x100, | ||
920 | link_configuration, DP_LINK_CONFIGURATION_SIZE); | ||
921 | |||
922 | DP |= DP_PORT_EN; | ||
923 | DP &= ~DP_LINK_TRAIN_MASK; | ||
924 | memset(train_set, 0, 4); | ||
925 | voltage = 0xff; | ||
926 | tries = 0; | ||
927 | clock_recovery = false; | ||
928 | for (;;) { | ||
929 | /* Use train_set[0] to set the voltage and pre emphasis values */ | ||
930 | uint32_t signal_levels = intel_dp_signal_levels(train_set[0], dp_priv->lane_count); | ||
931 | DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels; | ||
932 | |||
933 | if (!intel_dp_set_link_train(intel_output, DP | DP_LINK_TRAIN_PAT_1, | ||
934 | DP_TRAINING_PATTERN_1, train_set, first)) | ||
935 | break; | ||
936 | first = false; | ||
937 | /* Set training pattern 1 */ | ||
938 | |||
939 | udelay(100); | ||
940 | if (!intel_dp_get_link_status(intel_output, link_status)) | ||
941 | break; | ||
942 | |||
943 | if (intel_clock_recovery_ok(link_status, dp_priv->lane_count)) { | ||
944 | clock_recovery = true; | ||
945 | break; | ||
946 | } | ||
947 | |||
948 | /* Check to see if we've tried the max voltage */ | ||
949 | for (i = 0; i < dp_priv->lane_count; i++) | ||
950 | if ((train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0) | ||
951 | break; | ||
952 | if (i == dp_priv->lane_count) | ||
953 | break; | ||
954 | |||
955 | /* Check to see if we've tried the same voltage 5 times */ | ||
956 | if ((train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) == voltage) { | ||
957 | ++tries; | ||
958 | if (tries == 5) | ||
959 | break; | ||
960 | } else | ||
961 | tries = 0; | ||
962 | voltage = train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK; | ||
963 | |||
964 | /* Compute new train_set as requested by target */ | ||
965 | intel_get_adjust_train(intel_output, link_status, dp_priv->lane_count, train_set); | ||
966 | } | ||
967 | |||
968 | /* channel equalization */ | ||
969 | tries = 0; | ||
970 | channel_eq = false; | ||
971 | for (;;) { | ||
972 | /* Use train_set[0] to set the voltage and pre emphasis values */ | ||
973 | uint32_t signal_levels = intel_dp_signal_levels(train_set[0], dp_priv->lane_count); | ||
974 | DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels; | ||
975 | |||
976 | /* channel eq pattern */ | ||
977 | if (!intel_dp_set_link_train(intel_output, DP | DP_LINK_TRAIN_PAT_2, | ||
978 | DP_TRAINING_PATTERN_2, train_set, | ||
979 | false)) | ||
980 | break; | ||
981 | |||
982 | udelay(400); | ||
983 | if (!intel_dp_get_link_status(intel_output, link_status)) | ||
984 | break; | ||
985 | |||
986 | if (intel_channel_eq_ok(link_status, dp_priv->lane_count)) { | ||
987 | channel_eq = true; | ||
988 | break; | ||
989 | } | ||
990 | |||
991 | /* Try 5 times */ | ||
992 | if (tries > 5) | ||
993 | break; | ||
994 | |||
995 | /* Compute new train_set as requested by target */ | ||
996 | intel_get_adjust_train(intel_output, link_status, dp_priv->lane_count, train_set); | ||
997 | ++tries; | ||
998 | } | ||
999 | |||
1000 | I915_WRITE(dp_priv->output_reg, DP | DP_LINK_TRAIN_OFF); | ||
1001 | POSTING_READ(dp_priv->output_reg); | ||
1002 | intel_dp_aux_native_write_1(intel_output, | ||
1003 | DP_TRAINING_PATTERN_SET, DP_TRAINING_PATTERN_DISABLE); | ||
1004 | } | ||
1005 | |||
1006 | static void | ||
1007 | intel_dp_link_down(struct intel_output *intel_output, uint32_t DP) | ||
1008 | { | ||
1009 | struct drm_device *dev = intel_output->base.dev; | ||
1010 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
1011 | struct intel_dp_priv *dp_priv = intel_output->dev_priv; | ||
1012 | |||
1013 | DRM_DEBUG("\n"); | ||
1014 | |||
1015 | if (IS_eDP(intel_output)) { | ||
1016 | DP &= ~DP_PLL_ENABLE; | ||
1017 | I915_WRITE(dp_priv->output_reg, DP); | ||
1018 | POSTING_READ(dp_priv->output_reg); | ||
1019 | udelay(100); | ||
1020 | } | ||
1021 | |||
1022 | DP &= ~DP_LINK_TRAIN_MASK; | ||
1023 | I915_WRITE(dp_priv->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE); | ||
1024 | POSTING_READ(dp_priv->output_reg); | ||
1025 | |||
1026 | udelay(17000); | ||
1027 | |||
1028 | if (IS_eDP(intel_output)) | ||
1029 | DP |= DP_LINK_TRAIN_OFF; | ||
1030 | I915_WRITE(dp_priv->output_reg, DP & ~DP_PORT_EN); | ||
1031 | POSTING_READ(dp_priv->output_reg); | ||
1032 | } | ||
1033 | |||
1034 | static void | ||
1035 | intel_dp_restore(struct drm_connector *connector) | ||
1036 | { | ||
1037 | struct intel_output *intel_output = to_intel_output(connector); | ||
1038 | struct intel_dp_priv *dp_priv = intel_output->dev_priv; | ||
1039 | |||
1040 | if (dp_priv->save_DP & DP_PORT_EN) | ||
1041 | intel_dp_link_train(intel_output, dp_priv->save_DP, dp_priv->save_link_configuration); | ||
1042 | else | ||
1043 | intel_dp_link_down(intel_output, dp_priv->save_DP); | ||
1044 | } | ||
1045 | |||
1046 | /* | ||
1047 | * According to DP spec | ||
1048 | * 5.1.2: | ||
1049 | * 1. Read DPCD | ||
1050 | * 2. Configure link according to Receiver Capabilities | ||
1051 | * 3. Use Link Training from 2.5.3.3 and 3.5.1.3 | ||
1052 | * 4. Check link status on receipt of hot-plug interrupt | ||
1053 | */ | ||
1054 | |||
1055 | static void | ||
1056 | intel_dp_check_link_status(struct intel_output *intel_output) | ||
1057 | { | ||
1058 | struct intel_dp_priv *dp_priv = intel_output->dev_priv; | ||
1059 | uint8_t link_status[DP_LINK_STATUS_SIZE]; | ||
1060 | |||
1061 | if (!intel_output->enc.crtc) | ||
1062 | return; | ||
1063 | |||
1064 | if (!intel_dp_get_link_status(intel_output, link_status)) { | ||
1065 | intel_dp_link_down(intel_output, dp_priv->DP); | ||
1066 | return; | ||
1067 | } | ||
1068 | |||
1069 | if (!intel_channel_eq_ok(link_status, dp_priv->lane_count)) | ||
1070 | intel_dp_link_train(intel_output, dp_priv->DP, dp_priv->link_configuration); | ||
1071 | } | ||
1072 | |||
1073 | static enum drm_connector_status | ||
1074 | igdng_dp_detect(struct drm_connector *connector) | ||
1075 | { | ||
1076 | struct intel_output *intel_output = to_intel_output(connector); | ||
1077 | struct intel_dp_priv *dp_priv = intel_output->dev_priv; | ||
1078 | enum drm_connector_status status; | ||
1079 | |||
1080 | status = connector_status_disconnected; | ||
1081 | if (intel_dp_aux_native_read(intel_output, | ||
1082 | 0x000, dp_priv->dpcd, | ||
1083 | sizeof (dp_priv->dpcd)) == sizeof (dp_priv->dpcd)) | ||
1084 | { | ||
1085 | if (dp_priv->dpcd[0] != 0) | ||
1086 | status = connector_status_connected; | ||
1087 | } | ||
1088 | return status; | ||
1089 | } | ||
1090 | |||
1091 | /** | ||
1092 | * Uses CRT_HOTPLUG_EN and CRT_HOTPLUG_STAT to detect DP connection. | ||
1093 | * | ||
1094 | * \return true if DP port is connected. | ||
1095 | * \return false if DP port is disconnected. | ||
1096 | */ | ||
1097 | static enum drm_connector_status | ||
1098 | intel_dp_detect(struct drm_connector *connector) | ||
1099 | { | ||
1100 | struct intel_output *intel_output = to_intel_output(connector); | ||
1101 | struct drm_device *dev = intel_output->base.dev; | ||
1102 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
1103 | struct intel_dp_priv *dp_priv = intel_output->dev_priv; | ||
1104 | uint32_t temp, bit; | ||
1105 | enum drm_connector_status status; | ||
1106 | |||
1107 | dp_priv->has_audio = false; | ||
1108 | |||
1109 | if (IS_IGDNG(dev)) | ||
1110 | return igdng_dp_detect(connector); | ||
1111 | |||
1112 | temp = I915_READ(PORT_HOTPLUG_EN); | ||
1113 | |||
1114 | I915_WRITE(PORT_HOTPLUG_EN, | ||
1115 | temp | | ||
1116 | DPB_HOTPLUG_INT_EN | | ||
1117 | DPC_HOTPLUG_INT_EN | | ||
1118 | DPD_HOTPLUG_INT_EN); | ||
1119 | |||
1120 | POSTING_READ(PORT_HOTPLUG_EN); | ||
1121 | |||
1122 | switch (dp_priv->output_reg) { | ||
1123 | case DP_B: | ||
1124 | bit = DPB_HOTPLUG_INT_STATUS; | ||
1125 | break; | ||
1126 | case DP_C: | ||
1127 | bit = DPC_HOTPLUG_INT_STATUS; | ||
1128 | break; | ||
1129 | case DP_D: | ||
1130 | bit = DPD_HOTPLUG_INT_STATUS; | ||
1131 | break; | ||
1132 | default: | ||
1133 | return connector_status_unknown; | ||
1134 | } | ||
1135 | |||
1136 | temp = I915_READ(PORT_HOTPLUG_STAT); | ||
1137 | |||
1138 | if ((temp & bit) == 0) | ||
1139 | return connector_status_disconnected; | ||
1140 | |||
1141 | status = connector_status_disconnected; | ||
1142 | if (intel_dp_aux_native_read(intel_output, | ||
1143 | 0x000, dp_priv->dpcd, | ||
1144 | sizeof (dp_priv->dpcd)) == sizeof (dp_priv->dpcd)) | ||
1145 | { | ||
1146 | if (dp_priv->dpcd[0] != 0) | ||
1147 | status = connector_status_connected; | ||
1148 | } | ||
1149 | return status; | ||
1150 | } | ||
1151 | |||
1152 | static int intel_dp_get_modes(struct drm_connector *connector) | ||
1153 | { | ||
1154 | struct intel_output *intel_output = to_intel_output(connector); | ||
1155 | struct drm_device *dev = intel_output->base.dev; | ||
1156 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
1157 | int ret; | ||
1158 | |||
1159 | /* We should parse the EDID data and find out if it has an audio sink | ||
1160 | */ | ||
1161 | |||
1162 | ret = intel_ddc_get_modes(intel_output); | ||
1163 | if (ret) | ||
1164 | return ret; | ||
1165 | |||
1166 | /* if eDP has no EDID, try to use fixed panel mode from VBT */ | ||
1167 | if (IS_eDP(intel_output)) { | ||
1168 | if (dev_priv->panel_fixed_mode != NULL) { | ||
1169 | struct drm_display_mode *mode; | ||
1170 | mode = drm_mode_duplicate(dev, dev_priv->panel_fixed_mode); | ||
1171 | drm_mode_probed_add(connector, mode); | ||
1172 | return 1; | ||
1173 | } | ||
1174 | } | ||
1175 | return 0; | ||
1176 | } | ||
1177 | |||
1178 | static void | ||
1179 | intel_dp_destroy (struct drm_connector *connector) | ||
1180 | { | ||
1181 | struct intel_output *intel_output = to_intel_output(connector); | ||
1182 | |||
1183 | if (intel_output->i2c_bus) | ||
1184 | intel_i2c_destroy(intel_output->i2c_bus); | ||
1185 | drm_sysfs_connector_remove(connector); | ||
1186 | drm_connector_cleanup(connector); | ||
1187 | kfree(intel_output); | ||
1188 | } | ||
1189 | |||
1190 | static const struct drm_encoder_helper_funcs intel_dp_helper_funcs = { | ||
1191 | .dpms = intel_dp_dpms, | ||
1192 | .mode_fixup = intel_dp_mode_fixup, | ||
1193 | .prepare = intel_encoder_prepare, | ||
1194 | .mode_set = intel_dp_mode_set, | ||
1195 | .commit = intel_encoder_commit, | ||
1196 | }; | ||
1197 | |||
1198 | static const struct drm_connector_funcs intel_dp_connector_funcs = { | ||
1199 | .dpms = drm_helper_connector_dpms, | ||
1200 | .save = intel_dp_save, | ||
1201 | .restore = intel_dp_restore, | ||
1202 | .detect = intel_dp_detect, | ||
1203 | .fill_modes = drm_helper_probe_single_connector_modes, | ||
1204 | .destroy = intel_dp_destroy, | ||
1205 | }; | ||
1206 | |||
1207 | static const struct drm_connector_helper_funcs intel_dp_connector_helper_funcs = { | ||
1208 | .get_modes = intel_dp_get_modes, | ||
1209 | .mode_valid = intel_dp_mode_valid, | ||
1210 | .best_encoder = intel_best_encoder, | ||
1211 | }; | ||
1212 | |||
1213 | static void intel_dp_enc_destroy(struct drm_encoder *encoder) | ||
1214 | { | ||
1215 | drm_encoder_cleanup(encoder); | ||
1216 | } | ||
1217 | |||
1218 | static const struct drm_encoder_funcs intel_dp_enc_funcs = { | ||
1219 | .destroy = intel_dp_enc_destroy, | ||
1220 | }; | ||
1221 | |||
1222 | void | ||
1223 | intel_dp_hot_plug(struct intel_output *intel_output) | ||
1224 | { | ||
1225 | struct intel_dp_priv *dp_priv = intel_output->dev_priv; | ||
1226 | |||
1227 | if (dp_priv->dpms_mode == DRM_MODE_DPMS_ON) | ||
1228 | intel_dp_check_link_status(intel_output); | ||
1229 | } | ||
1230 | |||
1231 | void | ||
1232 | intel_dp_init(struct drm_device *dev, int output_reg) | ||
1233 | { | ||
1234 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
1235 | struct drm_connector *connector; | ||
1236 | struct intel_output *intel_output; | ||
1237 | struct intel_dp_priv *dp_priv; | ||
1238 | const char *name = NULL; | ||
1239 | |||
1240 | intel_output = kcalloc(sizeof(struct intel_output) + | ||
1241 | sizeof(struct intel_dp_priv), 1, GFP_KERNEL); | ||
1242 | if (!intel_output) | ||
1243 | return; | ||
1244 | |||
1245 | dp_priv = (struct intel_dp_priv *)(intel_output + 1); | ||
1246 | |||
1247 | connector = &intel_output->base; | ||
1248 | drm_connector_init(dev, connector, &intel_dp_connector_funcs, | ||
1249 | DRM_MODE_CONNECTOR_DisplayPort); | ||
1250 | drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs); | ||
1251 | |||
1252 | if (output_reg == DP_A) | ||
1253 | intel_output->type = INTEL_OUTPUT_EDP; | ||
1254 | else | ||
1255 | intel_output->type = INTEL_OUTPUT_DISPLAYPORT; | ||
1256 | |||
1257 | connector->interlace_allowed = true; | ||
1258 | connector->doublescan_allowed = 0; | ||
1259 | |||
1260 | dp_priv->intel_output = intel_output; | ||
1261 | dp_priv->output_reg = output_reg; | ||
1262 | dp_priv->has_audio = false; | ||
1263 | dp_priv->dpms_mode = DRM_MODE_DPMS_ON; | ||
1264 | intel_output->dev_priv = dp_priv; | ||
1265 | |||
1266 | drm_encoder_init(dev, &intel_output->enc, &intel_dp_enc_funcs, | ||
1267 | DRM_MODE_ENCODER_TMDS); | ||
1268 | drm_encoder_helper_add(&intel_output->enc, &intel_dp_helper_funcs); | ||
1269 | |||
1270 | drm_mode_connector_attach_encoder(&intel_output->base, | ||
1271 | &intel_output->enc); | ||
1272 | drm_sysfs_connector_add(connector); | ||
1273 | |||
1274 | /* Set up the DDC bus. */ | ||
1275 | switch (output_reg) { | ||
1276 | case DP_A: | ||
1277 | name = "DPDDC-A"; | ||
1278 | break; | ||
1279 | case DP_B: | ||
1280 | case PCH_DP_B: | ||
1281 | name = "DPDDC-B"; | ||
1282 | break; | ||
1283 | case DP_C: | ||
1284 | case PCH_DP_C: | ||
1285 | name = "DPDDC-C"; | ||
1286 | break; | ||
1287 | case DP_D: | ||
1288 | case PCH_DP_D: | ||
1289 | name = "DPDDC-D"; | ||
1290 | break; | ||
1291 | } | ||
1292 | |||
1293 | intel_dp_i2c_init(intel_output, name); | ||
1294 | |||
1295 | intel_output->ddc_bus = &dp_priv->adapter; | ||
1296 | intel_output->hot_plug = intel_dp_hot_plug; | ||
1297 | |||
1298 | if (output_reg == DP_A) { | ||
1299 | /* initialize panel mode from VBT if available for eDP */ | ||
1300 | if (dev_priv->lfp_lvds_vbt_mode) { | ||
1301 | dev_priv->panel_fixed_mode = | ||
1302 | drm_mode_duplicate(dev, dev_priv->lfp_lvds_vbt_mode); | ||
1303 | if (dev_priv->panel_fixed_mode) { | ||
1304 | dev_priv->panel_fixed_mode->type |= | ||
1305 | DRM_MODE_TYPE_PREFERRED; | ||
1306 | } | ||
1307 | } | ||
1308 | } | ||
1309 | |||
1310 | /* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written | ||
1311 | * 0xd. Failure to do so will result in spurious interrupts being | ||
1312 | * generated on the port when a cable is not attached. | ||
1313 | */ | ||
1314 | if (IS_G4X(dev) && !IS_GM45(dev)) { | ||
1315 | u32 temp = I915_READ(PEG_BAND_GAP_DATA); | ||
1316 | I915_WRITE(PEG_BAND_GAP_DATA, (temp & ~0xf) | 0xd); | ||
1317 | } | ||
1318 | } | ||
diff --git a/drivers/gpu/drm/i915/intel_dp.h b/drivers/gpu/drm/i915/intel_dp.h new file mode 100644 index 000000000000..2b38054d3b6d --- /dev/null +++ b/drivers/gpu/drm/i915/intel_dp.h | |||
@@ -0,0 +1,144 @@ | |||
1 | /* | ||
2 | * Copyright © 2008 Keith Packard | ||
3 | * | ||
4 | * Permission to use, copy, modify, distribute, and sell this software and its | ||
5 | * documentation for any purpose is hereby granted without fee, provided that | ||
6 | * the above copyright notice appear in all copies and that both that copyright | ||
7 | * notice and this permission notice appear in supporting documentation, and | ||
8 | * that the name of the copyright holders not be used in advertising or | ||
9 | * publicity pertaining to distribution of the software without specific, | ||
10 | * written prior permission. The copyright holders make no representations | ||
11 | * about the suitability of this software for any purpose. It is provided "as | ||
12 | * is" without express or implied warranty. | ||
13 | * | ||
14 | * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, | ||
15 | * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO | ||
16 | * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR | ||
17 | * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, | ||
18 | * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER | ||
19 | * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE | ||
20 | * OF THIS SOFTWARE. | ||
21 | */ | ||
22 | |||
23 | #ifndef _INTEL_DP_H_ | ||
24 | #define _INTEL_DP_H_ | ||
25 | |||
26 | /* From the VESA DisplayPort spec */ | ||
27 | |||
28 | #define AUX_NATIVE_WRITE 0x8 | ||
29 | #define AUX_NATIVE_READ 0x9 | ||
30 | #define AUX_I2C_WRITE 0x0 | ||
31 | #define AUX_I2C_READ 0x1 | ||
32 | #define AUX_I2C_STATUS 0x2 | ||
33 | #define AUX_I2C_MOT 0x4 | ||
34 | |||
35 | #define AUX_NATIVE_REPLY_ACK (0x0 << 4) | ||
36 | #define AUX_NATIVE_REPLY_NACK (0x1 << 4) | ||
37 | #define AUX_NATIVE_REPLY_DEFER (0x2 << 4) | ||
38 | #define AUX_NATIVE_REPLY_MASK (0x3 << 4) | ||
39 | |||
40 | #define AUX_I2C_REPLY_ACK (0x0 << 6) | ||
41 | #define AUX_I2C_REPLY_NACK (0x1 << 6) | ||
42 | #define AUX_I2C_REPLY_DEFER (0x2 << 6) | ||
43 | #define AUX_I2C_REPLY_MASK (0x3 << 6) | ||
44 | |||
45 | /* AUX CH addresses */ | ||
46 | #define DP_LINK_BW_SET 0x100 | ||
47 | # define DP_LINK_BW_1_62 0x06 | ||
48 | # define DP_LINK_BW_2_7 0x0a | ||
49 | |||
50 | #define DP_LANE_COUNT_SET 0x101 | ||
51 | # define DP_LANE_COUNT_MASK 0x0f | ||
52 | # define DP_LANE_COUNT_ENHANCED_FRAME_EN (1 << 7) | ||
53 | |||
54 | #define DP_TRAINING_PATTERN_SET 0x102 | ||
55 | |||
56 | # define DP_TRAINING_PATTERN_DISABLE 0 | ||
57 | # define DP_TRAINING_PATTERN_1 1 | ||
58 | # define DP_TRAINING_PATTERN_2 2 | ||
59 | # define DP_TRAINING_PATTERN_MASK 0x3 | ||
60 | |||
61 | # define DP_LINK_QUAL_PATTERN_DISABLE (0 << 2) | ||
62 | # define DP_LINK_QUAL_PATTERN_D10_2 (1 << 2) | ||
63 | # define DP_LINK_QUAL_PATTERN_ERROR_RATE (2 << 2) | ||
64 | # define DP_LINK_QUAL_PATTERN_PRBS7 (3 << 2) | ||
65 | # define DP_LINK_QUAL_PATTERN_MASK (3 << 2) | ||
66 | |||
67 | # define DP_RECOVERED_CLOCK_OUT_EN (1 << 4) | ||
68 | # define DP_LINK_SCRAMBLING_DISABLE (1 << 5) | ||
69 | |||
70 | # define DP_SYMBOL_ERROR_COUNT_BOTH (0 << 6) | ||
71 | # define DP_SYMBOL_ERROR_COUNT_DISPARITY (1 << 6) | ||
72 | # define DP_SYMBOL_ERROR_COUNT_SYMBOL (2 << 6) | ||
73 | # define DP_SYMBOL_ERROR_COUNT_MASK (3 << 6) | ||
74 | |||
75 | #define DP_TRAINING_LANE0_SET 0x103 | ||
76 | #define DP_TRAINING_LANE1_SET 0x104 | ||
77 | #define DP_TRAINING_LANE2_SET 0x105 | ||
78 | #define DP_TRAINING_LANE3_SET 0x106 | ||
79 | |||
80 | # define DP_TRAIN_VOLTAGE_SWING_MASK 0x3 | ||
81 | # define DP_TRAIN_VOLTAGE_SWING_SHIFT 0 | ||
82 | # define DP_TRAIN_MAX_SWING_REACHED (1 << 2) | ||
83 | # define DP_TRAIN_VOLTAGE_SWING_400 (0 << 0) | ||
84 | # define DP_TRAIN_VOLTAGE_SWING_600 (1 << 0) | ||
85 | # define DP_TRAIN_VOLTAGE_SWING_800 (2 << 0) | ||
86 | # define DP_TRAIN_VOLTAGE_SWING_1200 (3 << 0) | ||
87 | |||
88 | # define DP_TRAIN_PRE_EMPHASIS_MASK (3 << 3) | ||
89 | # define DP_TRAIN_PRE_EMPHASIS_0 (0 << 3) | ||
90 | # define DP_TRAIN_PRE_EMPHASIS_3_5 (1 << 3) | ||
91 | # define DP_TRAIN_PRE_EMPHASIS_6 (2 << 3) | ||
92 | # define DP_TRAIN_PRE_EMPHASIS_9_5 (3 << 3) | ||
93 | |||
94 | # define DP_TRAIN_PRE_EMPHASIS_SHIFT 3 | ||
95 | # define DP_TRAIN_MAX_PRE_EMPHASIS_REACHED (1 << 5) | ||
96 | |||
97 | #define DP_DOWNSPREAD_CTRL 0x107 | ||
98 | # define DP_SPREAD_AMP_0_5 (1 << 4) | ||
99 | |||
100 | #define DP_MAIN_LINK_CHANNEL_CODING_SET 0x108 | ||
101 | # define DP_SET_ANSI_8B10B (1 << 0) | ||
102 | |||
103 | #define DP_LANE0_1_STATUS 0x202 | ||
104 | #define DP_LANE2_3_STATUS 0x203 | ||
105 | |||
106 | # define DP_LANE_CR_DONE (1 << 0) | ||
107 | # define DP_LANE_CHANNEL_EQ_DONE (1 << 1) | ||
108 | # define DP_LANE_SYMBOL_LOCKED (1 << 2) | ||
109 | |||
110 | #define DP_LANE_ALIGN_STATUS_UPDATED 0x204 | ||
111 | |||
112 | #define DP_INTERLANE_ALIGN_DONE (1 << 0) | ||
113 | #define DP_DOWNSTREAM_PORT_STATUS_CHANGED (1 << 6) | ||
114 | #define DP_LINK_STATUS_UPDATED (1 << 7) | ||
115 | |||
116 | #define DP_SINK_STATUS 0x205 | ||
117 | |||
118 | #define DP_RECEIVE_PORT_0_STATUS (1 << 0) | ||
119 | #define DP_RECEIVE_PORT_1_STATUS (1 << 1) | ||
120 | |||
121 | #define DP_ADJUST_REQUEST_LANE0_1 0x206 | ||
122 | #define DP_ADJUST_REQUEST_LANE2_3 0x207 | ||
123 | |||
124 | #define DP_ADJUST_VOLTAGE_SWING_LANE0_MASK 0x03 | ||
125 | #define DP_ADJUST_VOLTAGE_SWING_LANE0_SHIFT 0 | ||
126 | #define DP_ADJUST_PRE_EMPHASIS_LANE0_MASK 0x0c | ||
127 | #define DP_ADJUST_PRE_EMPHASIS_LANE0_SHIFT 2 | ||
128 | #define DP_ADJUST_VOLTAGE_SWING_LANE1_MASK 0x30 | ||
129 | #define DP_ADJUST_VOLTAGE_SWING_LANE1_SHIFT 4 | ||
130 | #define DP_ADJUST_PRE_EMPHASIS_LANE1_MASK 0xc0 | ||
131 | #define DP_ADJUST_PRE_EMPHASIS_LANE1_SHIFT 6 | ||
132 | |||
133 | struct i2c_algo_dp_aux_data { | ||
134 | bool running; | ||
135 | u16 address; | ||
136 | int (*aux_ch) (struct i2c_adapter *adapter, | ||
137 | uint8_t *send, int send_bytes, | ||
138 | uint8_t *recv, int recv_bytes); | ||
139 | }; | ||
140 | |||
141 | int | ||
142 | i2c_dp_aux_add_bus(struct i2c_adapter *adapter); | ||
143 | |||
144 | #endif /* _INTEL_DP_H_ */ | ||
diff --git a/drivers/gpu/drm/i915/intel_dp_i2c.c b/drivers/gpu/drm/i915/intel_dp_i2c.c new file mode 100644 index 000000000000..a63b6f57d2d4 --- /dev/null +++ b/drivers/gpu/drm/i915/intel_dp_i2c.c | |||
@@ -0,0 +1,273 @@ | |||
1 | /* | ||
2 | * Copyright © 2009 Keith Packard | ||
3 | * | ||
4 | * Permission to use, copy, modify, distribute, and sell this software and its | ||
5 | * documentation for any purpose is hereby granted without fee, provided that | ||
6 | * the above copyright notice appear in all copies and that both that copyright | ||
7 | * notice and this permission notice appear in supporting documentation, and | ||
8 | * that the name of the copyright holders not be used in advertising or | ||
9 | * publicity pertaining to distribution of the software without specific, | ||
10 | * written prior permission. The copyright holders make no representations | ||
11 | * about the suitability of this software for any purpose. It is provided "as | ||
12 | * is" without express or implied warranty. | ||
13 | * | ||
14 | * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, | ||
15 | * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO | ||
16 | * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR | ||
17 | * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, | ||
18 | * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER | ||
19 | * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE | ||
20 | * OF THIS SOFTWARE. | ||
21 | */ | ||
22 | |||
23 | #include <linux/kernel.h> | ||
24 | #include <linux/module.h> | ||
25 | #include <linux/delay.h> | ||
26 | #include <linux/slab.h> | ||
27 | #include <linux/init.h> | ||
28 | #include <linux/errno.h> | ||
29 | #include <linux/sched.h> | ||
30 | #include <linux/i2c.h> | ||
31 | #include "intel_dp.h" | ||
32 | #include "drmP.h" | ||
33 | |||
34 | /* Run a single AUX_CH I2C transaction, writing/reading data as necessary */ | ||
35 | |||
36 | #define MODE_I2C_START 1 | ||
37 | #define MODE_I2C_WRITE 2 | ||
38 | #define MODE_I2C_READ 4 | ||
39 | #define MODE_I2C_STOP 8 | ||
40 | |||
41 | static int | ||
42 | i2c_algo_dp_aux_transaction(struct i2c_adapter *adapter, int mode, | ||
43 | uint8_t write_byte, uint8_t *read_byte) | ||
44 | { | ||
45 | struct i2c_algo_dp_aux_data *algo_data = adapter->algo_data; | ||
46 | uint16_t address = algo_data->address; | ||
47 | uint8_t msg[5]; | ||
48 | uint8_t reply[2]; | ||
49 | int msg_bytes; | ||
50 | int reply_bytes; | ||
51 | int ret; | ||
52 | |||
53 | /* Set up the command byte */ | ||
54 | if (mode & MODE_I2C_READ) | ||
55 | msg[0] = AUX_I2C_READ << 4; | ||
56 | else | ||
57 | msg[0] = AUX_I2C_WRITE << 4; | ||
58 | |||
59 | if (!(mode & MODE_I2C_STOP)) | ||
60 | msg[0] |= AUX_I2C_MOT << 4; | ||
61 | |||
62 | msg[1] = address >> 8; | ||
63 | msg[2] = address; | ||
64 | |||
65 | switch (mode) { | ||
66 | case MODE_I2C_WRITE: | ||
67 | msg[3] = 0; | ||
68 | msg[4] = write_byte; | ||
69 | msg_bytes = 5; | ||
70 | reply_bytes = 1; | ||
71 | break; | ||
72 | case MODE_I2C_READ: | ||
73 | msg[3] = 0; | ||
74 | msg_bytes = 4; | ||
75 | reply_bytes = 2; | ||
76 | break; | ||
77 | default: | ||
78 | msg_bytes = 3; | ||
79 | reply_bytes = 1; | ||
80 | break; | ||
81 | } | ||
82 | |||
83 | for (;;) { | ||
84 | ret = (*algo_data->aux_ch)(adapter, | ||
85 | msg, msg_bytes, | ||
86 | reply, reply_bytes); | ||
87 | if (ret < 0) { | ||
88 | DRM_DEBUG("aux_ch failed %d\n", ret); | ||
89 | return ret; | ||
90 | } | ||
91 | switch (reply[0] & AUX_I2C_REPLY_MASK) { | ||
92 | case AUX_I2C_REPLY_ACK: | ||
93 | if (mode == MODE_I2C_READ) { | ||
94 | *read_byte = reply[1]; | ||
95 | } | ||
96 | return reply_bytes - 1; | ||
97 | case AUX_I2C_REPLY_NACK: | ||
98 | DRM_DEBUG("aux_ch nack\n"); | ||
99 | return -EREMOTEIO; | ||
100 | case AUX_I2C_REPLY_DEFER: | ||
101 | DRM_DEBUG("aux_ch defer\n"); | ||
102 | udelay(100); | ||
103 | break; | ||
104 | default: | ||
105 | DRM_ERROR("aux_ch invalid reply 0x%02x\n", reply[0]); | ||
106 | return -EREMOTEIO; | ||
107 | } | ||
108 | } | ||
109 | } | ||
110 | |||
111 | /* | ||
112 | * I2C over AUX CH | ||
113 | */ | ||
114 | |||
115 | /* | ||
116 | * Send the address. If the I2C link is running, this 'restarts' | ||
117 | * the connection with the new address, this is used for doing | ||
118 | * a write followed by a read (as needed for DDC) | ||
119 | */ | ||
120 | static int | ||
121 | i2c_algo_dp_aux_address(struct i2c_adapter *adapter, u16 address, bool reading) | ||
122 | { | ||
123 | struct i2c_algo_dp_aux_data *algo_data = adapter->algo_data; | ||
124 | int mode = MODE_I2C_START; | ||
125 | int ret; | ||
126 | |||
127 | if (reading) | ||
128 | mode |= MODE_I2C_READ; | ||
129 | else | ||
130 | mode |= MODE_I2C_WRITE; | ||
131 | algo_data->address = address; | ||
132 | algo_data->running = true; | ||
133 | ret = i2c_algo_dp_aux_transaction(adapter, mode, 0, NULL); | ||
134 | return ret; | ||
135 | } | ||
136 | |||
137 | /* | ||
138 | * Stop the I2C transaction. This closes out the link, sending | ||
139 | * a bare address packet with the MOT bit turned off | ||
140 | */ | ||
141 | static void | ||
142 | i2c_algo_dp_aux_stop(struct i2c_adapter *adapter, bool reading) | ||
143 | { | ||
144 | struct i2c_algo_dp_aux_data *algo_data = adapter->algo_data; | ||
145 | int mode = MODE_I2C_STOP; | ||
146 | |||
147 | if (reading) | ||
148 | mode |= MODE_I2C_READ; | ||
149 | else | ||
150 | mode |= MODE_I2C_WRITE; | ||
151 | if (algo_data->running) { | ||
152 | (void) i2c_algo_dp_aux_transaction(adapter, mode, 0, NULL); | ||
153 | algo_data->running = false; | ||
154 | } | ||
155 | } | ||
156 | |||
157 | /* | ||
158 | * Write a single byte to the current I2C address, the | ||
159 | * the I2C link must be running or this returns -EIO | ||
160 | */ | ||
161 | static int | ||
162 | i2c_algo_dp_aux_put_byte(struct i2c_adapter *adapter, u8 byte) | ||
163 | { | ||
164 | struct i2c_algo_dp_aux_data *algo_data = adapter->algo_data; | ||
165 | int ret; | ||
166 | |||
167 | if (!algo_data->running) | ||
168 | return -EIO; | ||
169 | |||
170 | ret = i2c_algo_dp_aux_transaction(adapter, MODE_I2C_WRITE, byte, NULL); | ||
171 | return ret; | ||
172 | } | ||
173 | |||
174 | /* | ||
175 | * Read a single byte from the current I2C address, the | ||
176 | * I2C link must be running or this returns -EIO | ||
177 | */ | ||
178 | static int | ||
179 | i2c_algo_dp_aux_get_byte(struct i2c_adapter *adapter, u8 *byte_ret) | ||
180 | { | ||
181 | struct i2c_algo_dp_aux_data *algo_data = adapter->algo_data; | ||
182 | int ret; | ||
183 | |||
184 | if (!algo_data->running) | ||
185 | return -EIO; | ||
186 | |||
187 | ret = i2c_algo_dp_aux_transaction(adapter, MODE_I2C_READ, 0, byte_ret); | ||
188 | return ret; | ||
189 | } | ||
190 | |||
191 | static int | ||
192 | i2c_algo_dp_aux_xfer(struct i2c_adapter *adapter, | ||
193 | struct i2c_msg *msgs, | ||
194 | int num) | ||
195 | { | ||
196 | int ret = 0; | ||
197 | bool reading = false; | ||
198 | int m; | ||
199 | int b; | ||
200 | |||
201 | for (m = 0; m < num; m++) { | ||
202 | u16 len = msgs[m].len; | ||
203 | u8 *buf = msgs[m].buf; | ||
204 | reading = (msgs[m].flags & I2C_M_RD) != 0; | ||
205 | ret = i2c_algo_dp_aux_address(adapter, msgs[m].addr, reading); | ||
206 | if (ret < 0) | ||
207 | break; | ||
208 | if (reading) { | ||
209 | for (b = 0; b < len; b++) { | ||
210 | ret = i2c_algo_dp_aux_get_byte(adapter, &buf[b]); | ||
211 | if (ret < 0) | ||
212 | break; | ||
213 | } | ||
214 | } else { | ||
215 | for (b = 0; b < len; b++) { | ||
216 | ret = i2c_algo_dp_aux_put_byte(adapter, buf[b]); | ||
217 | if (ret < 0) | ||
218 | break; | ||
219 | } | ||
220 | } | ||
221 | if (ret < 0) | ||
222 | break; | ||
223 | } | ||
224 | if (ret >= 0) | ||
225 | ret = num; | ||
226 | i2c_algo_dp_aux_stop(adapter, reading); | ||
227 | DRM_DEBUG("dp_aux_xfer return %d\n", ret); | ||
228 | return ret; | ||
229 | } | ||
230 | |||
231 | static u32 | ||
232 | i2c_algo_dp_aux_functionality(struct i2c_adapter *adapter) | ||
233 | { | ||
234 | return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL | | ||
235 | I2C_FUNC_SMBUS_READ_BLOCK_DATA | | ||
236 | I2C_FUNC_SMBUS_BLOCK_PROC_CALL | | ||
237 | I2C_FUNC_10BIT_ADDR; | ||
238 | } | ||
239 | |||
240 | static const struct i2c_algorithm i2c_dp_aux_algo = { | ||
241 | .master_xfer = i2c_algo_dp_aux_xfer, | ||
242 | .functionality = i2c_algo_dp_aux_functionality, | ||
243 | }; | ||
244 | |||
245 | static void | ||
246 | i2c_dp_aux_reset_bus(struct i2c_adapter *adapter) | ||
247 | { | ||
248 | (void) i2c_algo_dp_aux_address(adapter, 0, false); | ||
249 | (void) i2c_algo_dp_aux_stop(adapter, false); | ||
250 | |||
251 | } | ||
252 | |||
253 | static int | ||
254 | i2c_dp_aux_prepare_bus(struct i2c_adapter *adapter) | ||
255 | { | ||
256 | adapter->algo = &i2c_dp_aux_algo; | ||
257 | adapter->retries = 3; | ||
258 | i2c_dp_aux_reset_bus(adapter); | ||
259 | return 0; | ||
260 | } | ||
261 | |||
262 | int | ||
263 | i2c_dp_aux_add_bus(struct i2c_adapter *adapter) | ||
264 | { | ||
265 | int error; | ||
266 | |||
267 | error = i2c_dp_aux_prepare_bus(adapter); | ||
268 | if (error) | ||
269 | return error; | ||
270 | error = i2c_add_adapter(adapter); | ||
271 | return error; | ||
272 | } | ||
273 | EXPORT_SYMBOL(i2c_dp_aux_add_bus); | ||
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index cd4b9c5f715e..d6f92ea1b553 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h | |||
@@ -54,6 +54,8 @@ | |||
54 | #define INTEL_OUTPUT_LVDS 4 | 54 | #define INTEL_OUTPUT_LVDS 4 |
55 | #define INTEL_OUTPUT_TVOUT 5 | 55 | #define INTEL_OUTPUT_TVOUT 5 |
56 | #define INTEL_OUTPUT_HDMI 6 | 56 | #define INTEL_OUTPUT_HDMI 6 |
57 | #define INTEL_OUTPUT_DISPLAYPORT 7 | ||
58 | #define INTEL_OUTPUT_EDP 8 | ||
57 | 59 | ||
58 | #define INTEL_DVO_CHIP_NONE 0 | 60 | #define INTEL_DVO_CHIP_NONE 0 |
59 | #define INTEL_DVO_CHIP_LVDS 1 | 61 | #define INTEL_DVO_CHIP_LVDS 1 |
@@ -65,7 +67,6 @@ struct intel_i2c_chan { | |||
65 | u32 reg; /* GPIO reg */ | 67 | u32 reg; /* GPIO reg */ |
66 | struct i2c_adapter adapter; | 68 | struct i2c_adapter adapter; |
67 | struct i2c_algo_bit_data algo; | 69 | struct i2c_algo_bit_data algo; |
68 | u8 slave_addr; | ||
69 | }; | 70 | }; |
70 | 71 | ||
71 | struct intel_framebuffer { | 72 | struct intel_framebuffer { |
@@ -79,11 +80,12 @@ struct intel_output { | |||
79 | 80 | ||
80 | struct drm_encoder enc; | 81 | struct drm_encoder enc; |
81 | int type; | 82 | int type; |
82 | struct intel_i2c_chan *i2c_bus; /* for control functions */ | 83 | struct i2c_adapter *i2c_bus; |
83 | struct intel_i2c_chan *ddc_bus; /* for DDC only stuff */ | 84 | struct i2c_adapter *ddc_bus; |
84 | bool load_detect_temp; | 85 | bool load_detect_temp; |
85 | bool needs_tv_clock; | 86 | bool needs_tv_clock; |
86 | void *dev_priv; | 87 | void *dev_priv; |
88 | void (*hot_plug)(struct intel_output *); | ||
87 | }; | 89 | }; |
88 | 90 | ||
89 | struct intel_crtc { | 91 | struct intel_crtc { |
@@ -104,9 +106,9 @@ struct intel_crtc { | |||
104 | #define enc_to_intel_output(x) container_of(x, struct intel_output, enc) | 106 | #define enc_to_intel_output(x) container_of(x, struct intel_output, enc) |
105 | #define to_intel_framebuffer(x) container_of(x, struct intel_framebuffer, base) | 107 | #define to_intel_framebuffer(x) container_of(x, struct intel_framebuffer, base) |
106 | 108 | ||
107 | struct intel_i2c_chan *intel_i2c_create(struct drm_device *dev, const u32 reg, | 109 | struct i2c_adapter *intel_i2c_create(struct drm_device *dev, const u32 reg, |
108 | const char *name); | 110 | const char *name); |
109 | void intel_i2c_destroy(struct intel_i2c_chan *chan); | 111 | void intel_i2c_destroy(struct i2c_adapter *adapter); |
110 | int intel_ddc_get_modes(struct intel_output *intel_output); | 112 | int intel_ddc_get_modes(struct intel_output *intel_output); |
111 | extern bool intel_ddc_probe(struct intel_output *intel_output); | 113 | extern bool intel_ddc_probe(struct intel_output *intel_output); |
112 | void intel_i2c_quirk_set(struct drm_device *dev, bool enable); | 114 | void intel_i2c_quirk_set(struct drm_device *dev, bool enable); |
@@ -116,6 +118,12 @@ extern bool intel_sdvo_init(struct drm_device *dev, int output_device); | |||
116 | extern void intel_dvo_init(struct drm_device *dev); | 118 | extern void intel_dvo_init(struct drm_device *dev); |
117 | extern void intel_tv_init(struct drm_device *dev); | 119 | extern void intel_tv_init(struct drm_device *dev); |
118 | extern void intel_lvds_init(struct drm_device *dev); | 120 | extern void intel_lvds_init(struct drm_device *dev); |
121 | extern void intel_dp_init(struct drm_device *dev, int dp_reg); | ||
122 | void | ||
123 | intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode, | ||
124 | struct drm_display_mode *adjusted_mode); | ||
125 | extern void intel_edp_link_config (struct intel_output *, int *, int *); | ||
126 | |||
119 | 127 | ||
120 | extern void intel_crtc_load_lut(struct drm_crtc *crtc); | 128 | extern void intel_crtc_load_lut(struct drm_crtc *crtc); |
121 | extern void intel_encoder_prepare (struct drm_encoder *encoder); | 129 | extern void intel_encoder_prepare (struct drm_encoder *encoder); |
diff --git a/drivers/gpu/drm/i915/intel_dvo.c b/drivers/gpu/drm/i915/intel_dvo.c index 1ee3007d6ec0..13bff20930e8 100644 --- a/drivers/gpu/drm/i915/intel_dvo.c +++ b/drivers/gpu/drm/i915/intel_dvo.c | |||
@@ -384,10 +384,9 @@ void intel_dvo_init(struct drm_device *dev) | |||
384 | { | 384 | { |
385 | struct intel_output *intel_output; | 385 | struct intel_output *intel_output; |
386 | struct intel_dvo_device *dvo; | 386 | struct intel_dvo_device *dvo; |
387 | struct intel_i2c_chan *i2cbus = NULL; | 387 | struct i2c_adapter *i2cbus = NULL; |
388 | int ret = 0; | 388 | int ret = 0; |
389 | int i; | 389 | int i; |
390 | int gpio_inited = 0; | ||
391 | int encoder_type = DRM_MODE_ENCODER_NONE; | 390 | int encoder_type = DRM_MODE_ENCODER_NONE; |
392 | intel_output = kzalloc (sizeof(struct intel_output), GFP_KERNEL); | 391 | intel_output = kzalloc (sizeof(struct intel_output), GFP_KERNEL); |
393 | if (!intel_output) | 392 | if (!intel_output) |
@@ -420,14 +419,11 @@ void intel_dvo_init(struct drm_device *dev) | |||
420 | * It appears that everything is on GPIOE except for panels | 419 | * It appears that everything is on GPIOE except for panels |
421 | * on i830 laptops, which are on GPIOB (DVOA). | 420 | * on i830 laptops, which are on GPIOB (DVOA). |
422 | */ | 421 | */ |
423 | if (gpio_inited != gpio) { | 422 | if (i2cbus != NULL) |
424 | if (i2cbus != NULL) | 423 | intel_i2c_destroy(i2cbus); |
425 | intel_i2c_destroy(i2cbus); | 424 | if (!(i2cbus = intel_i2c_create(dev, gpio, |
426 | if (!(i2cbus = intel_i2c_create(dev, gpio, | 425 | gpio == GPIOB ? "DVOI2C_B" : "DVOI2C_E"))) { |
427 | gpio == GPIOB ? "DVOI2C_B" : "DVOI2C_E"))) { | 426 | continue; |
428 | continue; | ||
429 | } | ||
430 | gpio_inited = gpio; | ||
431 | } | 427 | } |
432 | 428 | ||
433 | if (dvo->dev_ops!= NULL) | 429 | if (dvo->dev_ops!= NULL) |
diff --git a/drivers/gpu/drm/i915/intel_fb.c b/drivers/gpu/drm/i915/intel_fb.c index 1af7d68e3807..1d30802e773e 100644 --- a/drivers/gpu/drm/i915/intel_fb.c +++ b/drivers/gpu/drm/i915/intel_fb.c | |||
@@ -453,7 +453,7 @@ static int intelfb_create(struct drm_device *dev, uint32_t fb_width, | |||
453 | size = ALIGN(size, PAGE_SIZE); | 453 | size = ALIGN(size, PAGE_SIZE); |
454 | fbo = drm_gem_object_alloc(dev, size); | 454 | fbo = drm_gem_object_alloc(dev, size); |
455 | if (!fbo) { | 455 | if (!fbo) { |
456 | printk(KERN_ERR "failed to allocate framebuffer\n"); | 456 | DRM_ERROR("failed to allocate framebuffer\n"); |
457 | ret = -ENOMEM; | 457 | ret = -ENOMEM; |
458 | goto out; | 458 | goto out; |
459 | } | 459 | } |
@@ -610,8 +610,8 @@ static int intelfb_create(struct drm_device *dev, uint32_t fb_width, | |||
610 | par->dev = dev; | 610 | par->dev = dev; |
611 | 611 | ||
612 | /* To allow resizeing without swapping buffers */ | 612 | /* To allow resizeing without swapping buffers */ |
613 | printk("allocated %dx%d fb: 0x%08x, bo %p\n", intel_fb->base.width, | 613 | DRM_DEBUG("allocated %dx%d fb: 0x%08x, bo %p\n", intel_fb->base.width, |
614 | intel_fb->base.height, obj_priv->gtt_offset, fbo); | 614 | intel_fb->base.height, obj_priv->gtt_offset, fbo); |
615 | 615 | ||
616 | mutex_unlock(&dev->struct_mutex); | 616 | mutex_unlock(&dev->struct_mutex); |
617 | return 0; | 617 | return 0; |
@@ -698,13 +698,13 @@ static int intelfb_multi_fb_probe_crtc(struct drm_device *dev, struct drm_crtc * | |||
698 | } else | 698 | } else |
699 | intelfb_set_par(info); | 699 | intelfb_set_par(info); |
700 | 700 | ||
701 | printk(KERN_INFO "fb%d: %s frame buffer device\n", info->node, | 701 | DRM_INFO("fb%d: %s frame buffer device\n", info->node, |
702 | info->fix.id); | 702 | info->fix.id); |
703 | 703 | ||
704 | /* Switch back to kernel console on panic */ | 704 | /* Switch back to kernel console on panic */ |
705 | kernelfb_mode = *modeset; | 705 | kernelfb_mode = *modeset; |
706 | atomic_notifier_chain_register(&panic_notifier_list, &paniced); | 706 | atomic_notifier_chain_register(&panic_notifier_list, &paniced); |
707 | printk(KERN_INFO "registered panic notifier\n"); | 707 | DRM_DEBUG("registered panic notifier\n"); |
708 | 708 | ||
709 | return 0; | 709 | return 0; |
710 | } | 710 | } |
@@ -852,13 +852,13 @@ static int intelfb_single_fb_probe(struct drm_device *dev) | |||
852 | } else | 852 | } else |
853 | intelfb_set_par(info); | 853 | intelfb_set_par(info); |
854 | 854 | ||
855 | printk(KERN_INFO "fb%d: %s frame buffer device\n", info->node, | 855 | DRM_INFO("fb%d: %s frame buffer device\n", info->node, |
856 | info->fix.id); | 856 | info->fix.id); |
857 | 857 | ||
858 | /* Switch back to kernel console on panic */ | 858 | /* Switch back to kernel console on panic */ |
859 | kernelfb_mode = *modeset; | 859 | kernelfb_mode = *modeset; |
860 | atomic_notifier_chain_register(&panic_notifier_list, &paniced); | 860 | atomic_notifier_chain_register(&panic_notifier_list, &paniced); |
861 | printk(KERN_INFO "registered panic notifier\n"); | 861 | DRM_DEBUG("registered panic notifier\n"); |
862 | 862 | ||
863 | return 0; | 863 | return 0; |
864 | } | 864 | } |
@@ -872,8 +872,8 @@ void intelfb_restore(void) | |||
872 | { | 872 | { |
873 | int ret; | 873 | int ret; |
874 | if ((ret = drm_crtc_helper_set_config(&kernelfb_mode)) != 0) { | 874 | if ((ret = drm_crtc_helper_set_config(&kernelfb_mode)) != 0) { |
875 | printk(KERN_ERR "Failed to restore crtc configuration: %d\n", | 875 | DRM_ERROR("Failed to restore crtc configuration: %d\n", |
876 | ret); | 876 | ret); |
877 | } | 877 | } |
878 | } | 878 | } |
879 | 879 | ||
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c index 4ea2a651b92c..1842290cded3 100644 --- a/drivers/gpu/drm/i915/intel_hdmi.c +++ b/drivers/gpu/drm/i915/intel_hdmi.c | |||
@@ -31,6 +31,7 @@ | |||
31 | #include "drmP.h" | 31 | #include "drmP.h" |
32 | #include "drm.h" | 32 | #include "drm.h" |
33 | #include "drm_crtc.h" | 33 | #include "drm_crtc.h" |
34 | #include "drm_edid.h" | ||
34 | #include "intel_drv.h" | 35 | #include "intel_drv.h" |
35 | #include "i915_drm.h" | 36 | #include "i915_drm.h" |
36 | #include "i915_drv.h" | 37 | #include "i915_drv.h" |
@@ -56,8 +57,7 @@ static void intel_hdmi_mode_set(struct drm_encoder *encoder, | |||
56 | sdvox = SDVO_ENCODING_HDMI | | 57 | sdvox = SDVO_ENCODING_HDMI | |
57 | SDVO_BORDER_ENABLE | | 58 | SDVO_BORDER_ENABLE | |
58 | SDVO_VSYNC_ACTIVE_HIGH | | 59 | SDVO_VSYNC_ACTIVE_HIGH | |
59 | SDVO_HSYNC_ACTIVE_HIGH | | 60 | SDVO_HSYNC_ACTIVE_HIGH; |
60 | SDVO_NULL_PACKETS_DURING_VSYNC; | ||
61 | 61 | ||
62 | if (hdmi_priv->has_hdmi_sink) | 62 | if (hdmi_priv->has_hdmi_sink) |
63 | sdvox |= SDVO_AUDIO_ENABLE; | 63 | sdvox |= SDVO_AUDIO_ENABLE; |
@@ -129,83 +129,28 @@ static bool intel_hdmi_mode_fixup(struct drm_encoder *encoder, | |||
129 | return true; | 129 | return true; |
130 | } | 130 | } |
131 | 131 | ||
132 | static void | ||
133 | intel_hdmi_sink_detect(struct drm_connector *connector) | ||
134 | { | ||
135 | struct intel_output *intel_output = to_intel_output(connector); | ||
136 | struct intel_hdmi_priv *hdmi_priv = intel_output->dev_priv; | ||
137 | struct edid *edid = NULL; | ||
138 | |||
139 | edid = drm_get_edid(&intel_output->base, | ||
140 | &intel_output->ddc_bus->adapter); | ||
141 | if (edid != NULL) { | ||
142 | hdmi_priv->has_hdmi_sink = drm_detect_hdmi_monitor(edid); | ||
143 | kfree(edid); | ||
144 | intel_output->base.display_info.raw_edid = NULL; | ||
145 | } | ||
146 | } | ||
147 | |||
148 | static enum drm_connector_status | ||
149 | igdng_hdmi_detect(struct drm_connector *connector) | ||
150 | { | ||
151 | struct intel_output *intel_output = to_intel_output(connector); | ||
152 | struct intel_hdmi_priv *hdmi_priv = intel_output->dev_priv; | ||
153 | |||
154 | /* FIXME hotplug detect */ | ||
155 | |||
156 | hdmi_priv->has_hdmi_sink = false; | ||
157 | intel_hdmi_sink_detect(connector); | ||
158 | if (hdmi_priv->has_hdmi_sink) | ||
159 | return connector_status_connected; | ||
160 | else | ||
161 | return connector_status_disconnected; | ||
162 | } | ||
163 | |||
164 | static enum drm_connector_status | 132 | static enum drm_connector_status |
165 | intel_hdmi_detect(struct drm_connector *connector) | 133 | intel_hdmi_detect(struct drm_connector *connector) |
166 | { | 134 | { |
167 | struct drm_device *dev = connector->dev; | ||
168 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
169 | struct intel_output *intel_output = to_intel_output(connector); | 135 | struct intel_output *intel_output = to_intel_output(connector); |
170 | struct intel_hdmi_priv *hdmi_priv = intel_output->dev_priv; | 136 | struct intel_hdmi_priv *hdmi_priv = intel_output->dev_priv; |
171 | u32 temp, bit; | 137 | struct edid *edid = NULL; |
172 | 138 | enum drm_connector_status status = connector_status_disconnected; | |
173 | if (IS_IGDNG(dev)) | ||
174 | return igdng_hdmi_detect(connector); | ||
175 | |||
176 | temp = I915_READ(PORT_HOTPLUG_EN); | ||
177 | |||
178 | switch (hdmi_priv->sdvox_reg) { | ||
179 | case SDVOB: | ||
180 | temp |= HDMIB_HOTPLUG_INT_EN; | ||
181 | break; | ||
182 | case SDVOC: | ||
183 | temp |= HDMIC_HOTPLUG_INT_EN; | ||
184 | break; | ||
185 | default: | ||
186 | return connector_status_unknown; | ||
187 | } | ||
188 | |||
189 | I915_WRITE(PORT_HOTPLUG_EN, temp); | ||
190 | 139 | ||
191 | POSTING_READ(PORT_HOTPLUG_EN); | 140 | hdmi_priv->has_hdmi_sink = false; |
141 | edid = drm_get_edid(&intel_output->base, | ||
142 | intel_output->ddc_bus); | ||
192 | 143 | ||
193 | switch (hdmi_priv->sdvox_reg) { | 144 | if (edid) { |
194 | case SDVOB: | 145 | if (edid->input & DRM_EDID_INPUT_DIGITAL) { |
195 | bit = HDMIB_HOTPLUG_INT_STATUS; | 146 | status = connector_status_connected; |
196 | break; | 147 | hdmi_priv->has_hdmi_sink = drm_detect_hdmi_monitor(edid); |
197 | case SDVOC: | 148 | } |
198 | bit = HDMIC_HOTPLUG_INT_STATUS; | 149 | intel_output->base.display_info.raw_edid = NULL; |
199 | break; | 150 | kfree(edid); |
200 | default: | ||
201 | return connector_status_unknown; | ||
202 | } | 151 | } |
203 | 152 | ||
204 | if ((I915_READ(PORT_HOTPLUG_STAT) & bit) != 0) { | 153 | return status; |
205 | intel_hdmi_sink_detect(connector); | ||
206 | return connector_status_connected; | ||
207 | } else | ||
208 | return connector_status_disconnected; | ||
209 | } | 154 | } |
210 | 155 | ||
211 | static int intel_hdmi_get_modes(struct drm_connector *connector) | 156 | static int intel_hdmi_get_modes(struct drm_connector *connector) |
diff --git a/drivers/gpu/drm/i915/intel_i2c.c b/drivers/gpu/drm/i915/intel_i2c.c index f7061f68d050..62b8bead7652 100644 --- a/drivers/gpu/drm/i915/intel_i2c.c +++ b/drivers/gpu/drm/i915/intel_i2c.c | |||
@@ -124,6 +124,7 @@ static void set_data(void *data, int state_high) | |||
124 | * @output: driver specific output device | 124 | * @output: driver specific output device |
125 | * @reg: GPIO reg to use | 125 | * @reg: GPIO reg to use |
126 | * @name: name for this bus | 126 | * @name: name for this bus |
127 | * @slave_addr: slave address (if fixed) | ||
127 | * | 128 | * |
128 | * Creates and registers a new i2c bus with the Linux i2c layer, for use | 129 | * Creates and registers a new i2c bus with the Linux i2c layer, for use |
129 | * in output probing and control (e.g. DDC or SDVO control functions). | 130 | * in output probing and control (e.g. DDC or SDVO control functions). |
@@ -139,8 +140,8 @@ static void set_data(void *data, int state_high) | |||
139 | * %GPIOH | 140 | * %GPIOH |
140 | * see PRM for details on how these different busses are used. | 141 | * see PRM for details on how these different busses are used. |
141 | */ | 142 | */ |
142 | struct intel_i2c_chan *intel_i2c_create(struct drm_device *dev, const u32 reg, | 143 | struct i2c_adapter *intel_i2c_create(struct drm_device *dev, const u32 reg, |
143 | const char *name) | 144 | const char *name) |
144 | { | 145 | { |
145 | struct intel_i2c_chan *chan; | 146 | struct intel_i2c_chan *chan; |
146 | 147 | ||
@@ -174,7 +175,7 @@ struct intel_i2c_chan *intel_i2c_create(struct drm_device *dev, const u32 reg, | |||
174 | intel_i2c_quirk_set(dev, false); | 175 | intel_i2c_quirk_set(dev, false); |
175 | udelay(20); | 176 | udelay(20); |
176 | 177 | ||
177 | return chan; | 178 | return &chan->adapter; |
178 | 179 | ||
179 | out_free: | 180 | out_free: |
180 | kfree(chan); | 181 | kfree(chan); |
@@ -187,11 +188,16 @@ out_free: | |||
187 | * | 188 | * |
188 | * Unregister the adapter from the i2c layer, then free the structure. | 189 | * Unregister the adapter from the i2c layer, then free the structure. |
189 | */ | 190 | */ |
190 | void intel_i2c_destroy(struct intel_i2c_chan *chan) | 191 | void intel_i2c_destroy(struct i2c_adapter *adapter) |
191 | { | 192 | { |
192 | if (!chan) | 193 | struct intel_i2c_chan *chan; |
194 | |||
195 | if (!adapter) | ||
193 | return; | 196 | return; |
194 | 197 | ||
198 | chan = container_of(adapter, | ||
199 | struct intel_i2c_chan, | ||
200 | adapter); | ||
195 | i2c_del_adapter(&chan->adapter); | 201 | i2c_del_adapter(&chan->adapter); |
196 | kfree(chan); | 202 | kfree(chan); |
197 | } | 203 | } |
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c index f073ed8432e8..3f445a80c552 100644 --- a/drivers/gpu/drm/i915/intel_lvds.c +++ b/drivers/gpu/drm/i915/intel_lvds.c | |||
@@ -36,9 +36,25 @@ | |||
36 | #include "intel_drv.h" | 36 | #include "intel_drv.h" |
37 | #include "i915_drm.h" | 37 | #include "i915_drm.h" |
38 | #include "i915_drv.h" | 38 | #include "i915_drv.h" |
39 | #include <linux/acpi.h> | ||
39 | 40 | ||
40 | #define I915_LVDS "i915_lvds" | 41 | #define I915_LVDS "i915_lvds" |
41 | 42 | ||
43 | /* | ||
44 | * the following four scaling options are defined. | ||
45 | * #define DRM_MODE_SCALE_NON_GPU 0 | ||
46 | * #define DRM_MODE_SCALE_FULLSCREEN 1 | ||
47 | * #define DRM_MODE_SCALE_NO_SCALE 2 | ||
48 | * #define DRM_MODE_SCALE_ASPECT 3 | ||
49 | */ | ||
50 | |||
51 | /* Private structure for the integrated LVDS support */ | ||
52 | struct intel_lvds_priv { | ||
53 | int fitting_mode; | ||
54 | u32 pfit_control; | ||
55 | u32 pfit_pgm_ratios; | ||
56 | }; | ||
57 | |||
42 | /** | 58 | /** |
43 | * Sets the backlight level. | 59 | * Sets the backlight level. |
44 | * | 60 | * |
@@ -213,26 +229,45 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder, | |||
213 | struct drm_display_mode *mode, | 229 | struct drm_display_mode *mode, |
214 | struct drm_display_mode *adjusted_mode) | 230 | struct drm_display_mode *adjusted_mode) |
215 | { | 231 | { |
232 | /* | ||
233 | * float point operation is not supported . So the PANEL_RATIO_FACTOR | ||
234 | * is defined, which can avoid the float point computation when | ||
235 | * calculating the panel ratio. | ||
236 | */ | ||
237 | #define PANEL_RATIO_FACTOR 8192 | ||
216 | struct drm_device *dev = encoder->dev; | 238 | struct drm_device *dev = encoder->dev; |
217 | struct drm_i915_private *dev_priv = dev->dev_private; | 239 | struct drm_i915_private *dev_priv = dev->dev_private; |
218 | struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc); | 240 | struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc); |
219 | struct drm_encoder *tmp_encoder; | 241 | struct drm_encoder *tmp_encoder; |
242 | struct intel_output *intel_output = enc_to_intel_output(encoder); | ||
243 | struct intel_lvds_priv *lvds_priv = intel_output->dev_priv; | ||
244 | u32 pfit_control = 0, pfit_pgm_ratios = 0; | ||
245 | int left_border = 0, right_border = 0, top_border = 0; | ||
246 | int bottom_border = 0; | ||
247 | bool border = 0; | ||
248 | int panel_ratio, desired_ratio, vert_scale, horiz_scale; | ||
249 | int horiz_ratio, vert_ratio; | ||
250 | u32 hsync_width, vsync_width; | ||
251 | u32 hblank_width, vblank_width; | ||
252 | u32 hsync_pos, vsync_pos; | ||
220 | 253 | ||
221 | /* Should never happen!! */ | 254 | /* Should never happen!! */ |
222 | if (!IS_I965G(dev) && intel_crtc->pipe == 0) { | 255 | if (!IS_I965G(dev) && intel_crtc->pipe == 0) { |
223 | printk(KERN_ERR "Can't support LVDS on pipe A\n"); | 256 | DRM_ERROR("Can't support LVDS on pipe A\n"); |
224 | return false; | 257 | return false; |
225 | } | 258 | } |
226 | 259 | ||
227 | /* Should never happen!! */ | 260 | /* Should never happen!! */ |
228 | list_for_each_entry(tmp_encoder, &dev->mode_config.encoder_list, head) { | 261 | list_for_each_entry(tmp_encoder, &dev->mode_config.encoder_list, head) { |
229 | if (tmp_encoder != encoder && tmp_encoder->crtc == encoder->crtc) { | 262 | if (tmp_encoder != encoder && tmp_encoder->crtc == encoder->crtc) { |
230 | printk(KERN_ERR "Can't enable LVDS and another " | 263 | DRM_ERROR("Can't enable LVDS and another " |
231 | "encoder on the same pipe\n"); | 264 | "encoder on the same pipe\n"); |
232 | return false; | 265 | return false; |
233 | } | 266 | } |
234 | } | 267 | } |
235 | 268 | /* If we don't have a panel mode, there is nothing we can do */ | |
269 | if (dev_priv->panel_fixed_mode == NULL) | ||
270 | return true; | ||
236 | /* | 271 | /* |
237 | * If we have timings from the BIOS for the panel, put them in | 272 | * If we have timings from the BIOS for the panel, put them in |
238 | * to the adjusted mode. The CRTC will be set up for this mode, | 273 | * to the adjusted mode. The CRTC will be set up for this mode, |
@@ -256,6 +291,243 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder, | |||
256 | drm_mode_set_crtcinfo(adjusted_mode, CRTC_INTERLACE_HALVE_V); | 291 | drm_mode_set_crtcinfo(adjusted_mode, CRTC_INTERLACE_HALVE_V); |
257 | } | 292 | } |
258 | 293 | ||
294 | /* Make sure pre-965s set dither correctly */ | ||
295 | if (!IS_I965G(dev)) { | ||
296 | if (dev_priv->panel_wants_dither || dev_priv->lvds_dither) | ||
297 | pfit_control |= PANEL_8TO6_DITHER_ENABLE; | ||
298 | } | ||
299 | |||
300 | /* Native modes don't need fitting */ | ||
301 | if (adjusted_mode->hdisplay == mode->hdisplay && | ||
302 | adjusted_mode->vdisplay == mode->vdisplay) { | ||
303 | pfit_pgm_ratios = 0; | ||
304 | border = 0; | ||
305 | goto out; | ||
306 | } | ||
307 | |||
308 | /* 965+ wants fuzzy fitting */ | ||
309 | if (IS_I965G(dev)) | ||
310 | pfit_control |= (intel_crtc->pipe << PFIT_PIPE_SHIFT) | | ||
311 | PFIT_FILTER_FUZZY; | ||
312 | |||
313 | hsync_width = adjusted_mode->crtc_hsync_end - | ||
314 | adjusted_mode->crtc_hsync_start; | ||
315 | vsync_width = adjusted_mode->crtc_vsync_end - | ||
316 | adjusted_mode->crtc_vsync_start; | ||
317 | hblank_width = adjusted_mode->crtc_hblank_end - | ||
318 | adjusted_mode->crtc_hblank_start; | ||
319 | vblank_width = adjusted_mode->crtc_vblank_end - | ||
320 | adjusted_mode->crtc_vblank_start; | ||
321 | /* | ||
322 | * Deal with panel fitting options. Figure out how to stretch the | ||
323 | * image based on its aspect ratio & the current panel fitting mode. | ||
324 | */ | ||
325 | panel_ratio = adjusted_mode->hdisplay * PANEL_RATIO_FACTOR / | ||
326 | adjusted_mode->vdisplay; | ||
327 | desired_ratio = mode->hdisplay * PANEL_RATIO_FACTOR / | ||
328 | mode->vdisplay; | ||
329 | /* | ||
330 | * Enable automatic panel scaling for non-native modes so that they fill | ||
331 | * the screen. Should be enabled before the pipe is enabled, according | ||
332 | * to register description and PRM. | ||
333 | * Change the value here to see the borders for debugging | ||
334 | */ | ||
335 | I915_WRITE(BCLRPAT_A, 0); | ||
336 | I915_WRITE(BCLRPAT_B, 0); | ||
337 | |||
338 | switch (lvds_priv->fitting_mode) { | ||
339 | case DRM_MODE_SCALE_NO_SCALE: | ||
340 | /* | ||
341 | * For centered modes, we have to calculate border widths & | ||
342 | * heights and modify the values programmed into the CRTC. | ||
343 | */ | ||
344 | left_border = (adjusted_mode->hdisplay - mode->hdisplay) / 2; | ||
345 | right_border = left_border; | ||
346 | if (mode->hdisplay & 1) | ||
347 | right_border++; | ||
348 | top_border = (adjusted_mode->vdisplay - mode->vdisplay) / 2; | ||
349 | bottom_border = top_border; | ||
350 | if (mode->vdisplay & 1) | ||
351 | bottom_border++; | ||
352 | /* Set active & border values */ | ||
353 | adjusted_mode->crtc_hdisplay = mode->hdisplay; | ||
354 | /* Keep the boder be even */ | ||
355 | if (right_border & 1) | ||
356 | right_border++; | ||
357 | /* use the border directly instead of border minuse one */ | ||
358 | adjusted_mode->crtc_hblank_start = mode->hdisplay + | ||
359 | right_border; | ||
360 | /* keep the blank width constant */ | ||
361 | adjusted_mode->crtc_hblank_end = | ||
362 | adjusted_mode->crtc_hblank_start + hblank_width; | ||
363 | /* get the hsync pos relative to hblank start */ | ||
364 | hsync_pos = (hblank_width - hsync_width) / 2; | ||
365 | /* keep the hsync pos be even */ | ||
366 | if (hsync_pos & 1) | ||
367 | hsync_pos++; | ||
368 | adjusted_mode->crtc_hsync_start = | ||
369 | adjusted_mode->crtc_hblank_start + hsync_pos; | ||
370 | /* keep the hsync width constant */ | ||
371 | adjusted_mode->crtc_hsync_end = | ||
372 | adjusted_mode->crtc_hsync_start + hsync_width; | ||
373 | adjusted_mode->crtc_vdisplay = mode->vdisplay; | ||
374 | /* use the border instead of border minus one */ | ||
375 | adjusted_mode->crtc_vblank_start = mode->vdisplay + | ||
376 | bottom_border; | ||
377 | /* keep the vblank width constant */ | ||
378 | adjusted_mode->crtc_vblank_end = | ||
379 | adjusted_mode->crtc_vblank_start + vblank_width; | ||
380 | /* get the vsync start postion relative to vblank start */ | ||
381 | vsync_pos = (vblank_width - vsync_width) / 2; | ||
382 | adjusted_mode->crtc_vsync_start = | ||
383 | adjusted_mode->crtc_vblank_start + vsync_pos; | ||
384 | /* keep the vsync width constant */ | ||
385 | adjusted_mode->crtc_vsync_end = | ||
386 | adjusted_mode->crtc_vblank_start + vsync_width; | ||
387 | border = 1; | ||
388 | break; | ||
389 | case DRM_MODE_SCALE_ASPECT: | ||
390 | /* Scale but preserve the spect ratio */ | ||
391 | pfit_control |= PFIT_ENABLE; | ||
392 | if (IS_I965G(dev)) { | ||
393 | /* 965+ is easy, it does everything in hw */ | ||
394 | if (panel_ratio > desired_ratio) | ||
395 | pfit_control |= PFIT_SCALING_PILLAR; | ||
396 | else if (panel_ratio < desired_ratio) | ||
397 | pfit_control |= PFIT_SCALING_LETTER; | ||
398 | else | ||
399 | pfit_control |= PFIT_SCALING_AUTO; | ||
400 | } else { | ||
401 | /* | ||
402 | * For earlier chips we have to calculate the scaling | ||
403 | * ratio by hand and program it into the | ||
404 | * PFIT_PGM_RATIO register | ||
405 | */ | ||
406 | u32 horiz_bits, vert_bits, bits = 12; | ||
407 | horiz_ratio = mode->hdisplay * PANEL_RATIO_FACTOR/ | ||
408 | adjusted_mode->hdisplay; | ||
409 | vert_ratio = mode->vdisplay * PANEL_RATIO_FACTOR/ | ||
410 | adjusted_mode->vdisplay; | ||
411 | horiz_scale = adjusted_mode->hdisplay * | ||
412 | PANEL_RATIO_FACTOR / mode->hdisplay; | ||
413 | vert_scale = adjusted_mode->vdisplay * | ||
414 | PANEL_RATIO_FACTOR / mode->vdisplay; | ||
415 | |||
416 | /* retain aspect ratio */ | ||
417 | if (panel_ratio > desired_ratio) { /* Pillar */ | ||
418 | u32 scaled_width; | ||
419 | scaled_width = mode->hdisplay * vert_scale / | ||
420 | PANEL_RATIO_FACTOR; | ||
421 | horiz_ratio = vert_ratio; | ||
422 | pfit_control |= (VERT_AUTO_SCALE | | ||
423 | VERT_INTERP_BILINEAR | | ||
424 | HORIZ_INTERP_BILINEAR); | ||
425 | /* Pillar will have left/right borders */ | ||
426 | left_border = (adjusted_mode->hdisplay - | ||
427 | scaled_width) / 2; | ||
428 | right_border = left_border; | ||
429 | if (mode->hdisplay & 1) /* odd resolutions */ | ||
430 | right_border++; | ||
431 | /* keep the border be even */ | ||
432 | if (right_border & 1) | ||
433 | right_border++; | ||
434 | adjusted_mode->crtc_hdisplay = scaled_width; | ||
435 | /* use border instead of border minus one */ | ||
436 | adjusted_mode->crtc_hblank_start = | ||
437 | scaled_width + right_border; | ||
438 | /* keep the hblank width constant */ | ||
439 | adjusted_mode->crtc_hblank_end = | ||
440 | adjusted_mode->crtc_hblank_start + | ||
441 | hblank_width; | ||
442 | /* | ||
443 | * get the hsync start pos relative to | ||
444 | * hblank start | ||
445 | */ | ||
446 | hsync_pos = (hblank_width - hsync_width) / 2; | ||
447 | /* keep the hsync_pos be even */ | ||
448 | if (hsync_pos & 1) | ||
449 | hsync_pos++; | ||
450 | adjusted_mode->crtc_hsync_start = | ||
451 | adjusted_mode->crtc_hblank_start + | ||
452 | hsync_pos; | ||
453 | /* keept hsync width constant */ | ||
454 | adjusted_mode->crtc_hsync_end = | ||
455 | adjusted_mode->crtc_hsync_start + | ||
456 | hsync_width; | ||
457 | border = 1; | ||
458 | } else if (panel_ratio < desired_ratio) { /* letter */ | ||
459 | u32 scaled_height = mode->vdisplay * | ||
460 | horiz_scale / PANEL_RATIO_FACTOR; | ||
461 | vert_ratio = horiz_ratio; | ||
462 | pfit_control |= (HORIZ_AUTO_SCALE | | ||
463 | VERT_INTERP_BILINEAR | | ||
464 | HORIZ_INTERP_BILINEAR); | ||
465 | /* Letterbox will have top/bottom border */ | ||
466 | top_border = (adjusted_mode->vdisplay - | ||
467 | scaled_height) / 2; | ||
468 | bottom_border = top_border; | ||
469 | if (mode->vdisplay & 1) | ||
470 | bottom_border++; | ||
471 | adjusted_mode->crtc_vdisplay = scaled_height; | ||
472 | /* use border instead of border minus one */ | ||
473 | adjusted_mode->crtc_vblank_start = | ||
474 | scaled_height + bottom_border; | ||
475 | /* keep the vblank width constant */ | ||
476 | adjusted_mode->crtc_vblank_end = | ||
477 | adjusted_mode->crtc_vblank_start + | ||
478 | vblank_width; | ||
479 | /* | ||
480 | * get the vsync start pos relative to | ||
481 | * vblank start | ||
482 | */ | ||
483 | vsync_pos = (vblank_width - vsync_width) / 2; | ||
484 | adjusted_mode->crtc_vsync_start = | ||
485 | adjusted_mode->crtc_vblank_start + | ||
486 | vsync_pos; | ||
487 | /* keep the vsync width constant */ | ||
488 | adjusted_mode->crtc_vsync_end = | ||
489 | adjusted_mode->crtc_vsync_start + | ||
490 | vsync_width; | ||
491 | border = 1; | ||
492 | } else { | ||
493 | /* Aspects match, Let hw scale both directions */ | ||
494 | pfit_control |= (VERT_AUTO_SCALE | | ||
495 | HORIZ_AUTO_SCALE | | ||
496 | VERT_INTERP_BILINEAR | | ||
497 | HORIZ_INTERP_BILINEAR); | ||
498 | } | ||
499 | horiz_bits = (1 << bits) * horiz_ratio / | ||
500 | PANEL_RATIO_FACTOR; | ||
501 | vert_bits = (1 << bits) * vert_ratio / | ||
502 | PANEL_RATIO_FACTOR; | ||
503 | pfit_pgm_ratios = | ||
504 | ((vert_bits << PFIT_VERT_SCALE_SHIFT) & | ||
505 | PFIT_VERT_SCALE_MASK) | | ||
506 | ((horiz_bits << PFIT_HORIZ_SCALE_SHIFT) & | ||
507 | PFIT_HORIZ_SCALE_MASK); | ||
508 | } | ||
509 | break; | ||
510 | |||
511 | case DRM_MODE_SCALE_FULLSCREEN: | ||
512 | /* | ||
513 | * Full scaling, even if it changes the aspect ratio. | ||
514 | * Fortunately this is all done for us in hw. | ||
515 | */ | ||
516 | pfit_control |= PFIT_ENABLE; | ||
517 | if (IS_I965G(dev)) | ||
518 | pfit_control |= PFIT_SCALING_AUTO; | ||
519 | else | ||
520 | pfit_control |= (VERT_AUTO_SCALE | HORIZ_AUTO_SCALE | | ||
521 | VERT_INTERP_BILINEAR | | ||
522 | HORIZ_INTERP_BILINEAR); | ||
523 | break; | ||
524 | default: | ||
525 | break; | ||
526 | } | ||
527 | |||
528 | out: | ||
529 | lvds_priv->pfit_control = pfit_control; | ||
530 | lvds_priv->pfit_pgm_ratios = pfit_pgm_ratios; | ||
259 | /* | 531 | /* |
260 | * XXX: It would be nice to support lower refresh rates on the | 532 | * XXX: It would be nice to support lower refresh rates on the |
261 | * panels to reduce power consumption, and perhaps match the | 533 | * panels to reduce power consumption, and perhaps match the |
@@ -301,8 +573,8 @@ static void intel_lvds_mode_set(struct drm_encoder *encoder, | |||
301 | { | 573 | { |
302 | struct drm_device *dev = encoder->dev; | 574 | struct drm_device *dev = encoder->dev; |
303 | struct drm_i915_private *dev_priv = dev->dev_private; | 575 | struct drm_i915_private *dev_priv = dev->dev_private; |
304 | struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc); | 576 | struct intel_output *intel_output = enc_to_intel_output(encoder); |
305 | u32 pfit_control; | 577 | struct intel_lvds_priv *lvds_priv = intel_output->dev_priv; |
306 | 578 | ||
307 | /* | 579 | /* |
308 | * The LVDS pin pair will already have been turned on in the | 580 | * The LVDS pin pair will already have been turned on in the |
@@ -319,22 +591,8 @@ static void intel_lvds_mode_set(struct drm_encoder *encoder, | |||
319 | * screen. Should be enabled before the pipe is enabled, according to | 591 | * screen. Should be enabled before the pipe is enabled, according to |
320 | * register description and PRM. | 592 | * register description and PRM. |
321 | */ | 593 | */ |
322 | if (mode->hdisplay != adjusted_mode->hdisplay || | 594 | I915_WRITE(PFIT_PGM_RATIOS, lvds_priv->pfit_pgm_ratios); |
323 | mode->vdisplay != adjusted_mode->vdisplay) | 595 | I915_WRITE(PFIT_CONTROL, lvds_priv->pfit_control); |
324 | pfit_control = (PFIT_ENABLE | VERT_AUTO_SCALE | | ||
325 | HORIZ_AUTO_SCALE | VERT_INTERP_BILINEAR | | ||
326 | HORIZ_INTERP_BILINEAR); | ||
327 | else | ||
328 | pfit_control = 0; | ||
329 | |||
330 | if (!IS_I965G(dev)) { | ||
331 | if (dev_priv->panel_wants_dither || dev_priv->lvds_dither) | ||
332 | pfit_control |= PANEL_8TO6_DITHER_ENABLE; | ||
333 | } | ||
334 | else | ||
335 | pfit_control |= intel_crtc->pipe << PFIT_PIPE_SHIFT; | ||
336 | |||
337 | I915_WRITE(PFIT_CONTROL, pfit_control); | ||
338 | } | 596 | } |
339 | 597 | ||
340 | /** | 598 | /** |
@@ -406,6 +664,34 @@ static int intel_lvds_set_property(struct drm_connector *connector, | |||
406 | struct drm_property *property, | 664 | struct drm_property *property, |
407 | uint64_t value) | 665 | uint64_t value) |
408 | { | 666 | { |
667 | struct drm_device *dev = connector->dev; | ||
668 | struct intel_output *intel_output = | ||
669 | to_intel_output(connector); | ||
670 | |||
671 | if (property == dev->mode_config.scaling_mode_property && | ||
672 | connector->encoder) { | ||
673 | struct drm_crtc *crtc = connector->encoder->crtc; | ||
674 | struct intel_lvds_priv *lvds_priv = intel_output->dev_priv; | ||
675 | if (value == DRM_MODE_SCALE_NON_GPU) { | ||
676 | DRM_DEBUG_KMS(I915_LVDS, | ||
677 | "non_GPU property is unsupported\n"); | ||
678 | return 0; | ||
679 | } | ||
680 | if (lvds_priv->fitting_mode == value) { | ||
681 | /* the LVDS scaling property is not changed */ | ||
682 | return 0; | ||
683 | } | ||
684 | lvds_priv->fitting_mode = value; | ||
685 | if (crtc && crtc->enabled) { | ||
686 | /* | ||
687 | * If the CRTC is enabled, the display will be changed | ||
688 | * according to the new panel fitting mode. | ||
689 | */ | ||
690 | drm_crtc_helper_set_mode(crtc, &crtc->mode, | ||
691 | crtc->x, crtc->y, crtc->fb); | ||
692 | } | ||
693 | } | ||
694 | |||
409 | return 0; | 695 | return 0; |
410 | } | 696 | } |
411 | 697 | ||
@@ -456,7 +742,7 @@ static const struct dmi_system_id intel_no_lvds[] = { | |||
456 | .callback = intel_no_lvds_dmi_callback, | 742 | .callback = intel_no_lvds_dmi_callback, |
457 | .ident = "Apple Mac Mini (Core series)", | 743 | .ident = "Apple Mac Mini (Core series)", |
458 | .matches = { | 744 | .matches = { |
459 | DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."), | 745 | DMI_MATCH(DMI_SYS_VENDOR, "Apple"), |
460 | DMI_MATCH(DMI_PRODUCT_NAME, "Macmini1,1"), | 746 | DMI_MATCH(DMI_PRODUCT_NAME, "Macmini1,1"), |
461 | }, | 747 | }, |
462 | }, | 748 | }, |
@@ -464,7 +750,7 @@ static const struct dmi_system_id intel_no_lvds[] = { | |||
464 | .callback = intel_no_lvds_dmi_callback, | 750 | .callback = intel_no_lvds_dmi_callback, |
465 | .ident = "Apple Mac Mini (Core 2 series)", | 751 | .ident = "Apple Mac Mini (Core 2 series)", |
466 | .matches = { | 752 | .matches = { |
467 | DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."), | 753 | DMI_MATCH(DMI_SYS_VENDOR, "Apple"), |
468 | DMI_MATCH(DMI_PRODUCT_NAME, "Macmini2,1"), | 754 | DMI_MATCH(DMI_PRODUCT_NAME, "Macmini2,1"), |
469 | }, | 755 | }, |
470 | }, | 756 | }, |
@@ -494,6 +780,14 @@ static const struct dmi_system_id intel_no_lvds[] = { | |||
494 | }, | 780 | }, |
495 | { | 781 | { |
496 | .callback = intel_no_lvds_dmi_callback, | 782 | .callback = intel_no_lvds_dmi_callback, |
783 | .ident = "AOpen Mini PC MP915", | ||
784 | .matches = { | ||
785 | DMI_MATCH(DMI_BOARD_VENDOR, "AOpen"), | ||
786 | DMI_MATCH(DMI_BOARD_NAME, "i915GMx-F"), | ||
787 | }, | ||
788 | }, | ||
789 | { | ||
790 | .callback = intel_no_lvds_dmi_callback, | ||
497 | .ident = "Aopen i945GTt-VFA", | 791 | .ident = "Aopen i945GTt-VFA", |
498 | .matches = { | 792 | .matches = { |
499 | DMI_MATCH(DMI_PRODUCT_VERSION, "AO00001JW"), | 793 | DMI_MATCH(DMI_PRODUCT_VERSION, "AO00001JW"), |
@@ -503,6 +797,65 @@ static const struct dmi_system_id intel_no_lvds[] = { | |||
503 | { } /* terminating entry */ | 797 | { } /* terminating entry */ |
504 | }; | 798 | }; |
505 | 799 | ||
800 | #ifdef CONFIG_ACPI | ||
801 | /* | ||
802 | * check_lid_device -- check whether @handle is an ACPI LID device. | ||
803 | * @handle: ACPI device handle | ||
804 | * @level : depth in the ACPI namespace tree | ||
805 | * @context: the number of LID device when we find the device | ||
806 | * @rv: a return value to fill if desired (Not use) | ||
807 | */ | ||
808 | static acpi_status | ||
809 | check_lid_device(acpi_handle handle, u32 level, void *context, | ||
810 | void **return_value) | ||
811 | { | ||
812 | struct acpi_device *acpi_dev; | ||
813 | int *lid_present = context; | ||
814 | |||
815 | acpi_dev = NULL; | ||
816 | /* Get the acpi device for device handle */ | ||
817 | if (acpi_bus_get_device(handle, &acpi_dev) || !acpi_dev) { | ||
818 | /* If there is no ACPI device for handle, return */ | ||
819 | return AE_OK; | ||
820 | } | ||
821 | |||
822 | if (!strncmp(acpi_device_hid(acpi_dev), "PNP0C0D", 7)) | ||
823 | *lid_present = 1; | ||
824 | |||
825 | return AE_OK; | ||
826 | } | ||
827 | |||
828 | /** | ||
829 | * check whether there exists the ACPI LID device by enumerating the ACPI | ||
830 | * device tree. | ||
831 | */ | ||
832 | static int intel_lid_present(void) | ||
833 | { | ||
834 | int lid_present = 0; | ||
835 | |||
836 | if (acpi_disabled) { | ||
837 | /* If ACPI is disabled, there is no ACPI device tree to | ||
838 | * check, so assume the LID device would have been present. | ||
839 | */ | ||
840 | return 1; | ||
841 | } | ||
842 | |||
843 | acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT, | ||
844 | ACPI_UINT32_MAX, | ||
845 | check_lid_device, &lid_present, NULL); | ||
846 | |||
847 | return lid_present; | ||
848 | } | ||
849 | #else | ||
850 | static int intel_lid_present(void) | ||
851 | { | ||
852 | /* In the absence of ACPI built in, assume that the LID device would | ||
853 | * have been present. | ||
854 | */ | ||
855 | return 1; | ||
856 | } | ||
857 | #endif | ||
858 | |||
506 | /** | 859 | /** |
507 | * intel_lvds_init - setup LVDS connectors on this device | 860 | * intel_lvds_init - setup LVDS connectors on this device |
508 | * @dev: drm device | 861 | * @dev: drm device |
@@ -518,6 +871,7 @@ void intel_lvds_init(struct drm_device *dev) | |||
518 | struct drm_encoder *encoder; | 871 | struct drm_encoder *encoder; |
519 | struct drm_display_mode *scan; /* *modes, *bios_mode; */ | 872 | struct drm_display_mode *scan; /* *modes, *bios_mode; */ |
520 | struct drm_crtc *crtc; | 873 | struct drm_crtc *crtc; |
874 | struct intel_lvds_priv *lvds_priv; | ||
521 | u32 lvds; | 875 | u32 lvds; |
522 | int pipe, gpio = GPIOC; | 876 | int pipe, gpio = GPIOC; |
523 | 877 | ||
@@ -525,13 +879,28 @@ void intel_lvds_init(struct drm_device *dev) | |||
525 | if (dmi_check_system(intel_no_lvds)) | 879 | if (dmi_check_system(intel_no_lvds)) |
526 | return; | 880 | return; |
527 | 881 | ||
882 | /* Assume that any device without an ACPI LID device also doesn't | ||
883 | * have an integrated LVDS. We would be better off parsing the BIOS | ||
884 | * to get a reliable indicator, but that code isn't written yet. | ||
885 | * | ||
886 | * In the case of all-in-one desktops using LVDS that we've seen, | ||
887 | * they're using SDVO LVDS. | ||
888 | */ | ||
889 | if (!intel_lid_present()) | ||
890 | return; | ||
891 | |||
528 | if (IS_IGDNG(dev)) { | 892 | if (IS_IGDNG(dev)) { |
529 | if ((I915_READ(PCH_LVDS) & LVDS_DETECTED) == 0) | 893 | if ((I915_READ(PCH_LVDS) & LVDS_DETECTED) == 0) |
530 | return; | 894 | return; |
895 | if (dev_priv->edp_support) { | ||
896 | DRM_DEBUG("disable LVDS for eDP support\n"); | ||
897 | return; | ||
898 | } | ||
531 | gpio = PCH_GPIOC; | 899 | gpio = PCH_GPIOC; |
532 | } | 900 | } |
533 | 901 | ||
534 | intel_output = kzalloc(sizeof(struct intel_output), GFP_KERNEL); | 902 | intel_output = kzalloc(sizeof(struct intel_output) + |
903 | sizeof(struct intel_lvds_priv), GFP_KERNEL); | ||
535 | if (!intel_output) { | 904 | if (!intel_output) { |
536 | return; | 905 | return; |
537 | } | 906 | } |
@@ -553,7 +922,18 @@ void intel_lvds_init(struct drm_device *dev) | |||
553 | connector->interlace_allowed = false; | 922 | connector->interlace_allowed = false; |
554 | connector->doublescan_allowed = false; | 923 | connector->doublescan_allowed = false; |
555 | 924 | ||
925 | lvds_priv = (struct intel_lvds_priv *)(intel_output + 1); | ||
926 | intel_output->dev_priv = lvds_priv; | ||
927 | /* create the scaling mode property */ | ||
928 | drm_mode_create_scaling_mode_property(dev); | ||
929 | /* | ||
930 | * the initial panel fitting mode will be FULL_SCREEN. | ||
931 | */ | ||
556 | 932 | ||
933 | drm_connector_attach_property(&intel_output->base, | ||
934 | dev->mode_config.scaling_mode_property, | ||
935 | DRM_MODE_SCALE_FULLSCREEN); | ||
936 | lvds_priv->fitting_mode = DRM_MODE_SCALE_FULLSCREEN; | ||
557 | /* | 937 | /* |
558 | * LVDS discovery: | 938 | * LVDS discovery: |
559 | * 1) check for EDID on DDC | 939 | * 1) check for EDID on DDC |
@@ -649,5 +1029,5 @@ failed: | |||
649 | if (intel_output->ddc_bus) | 1029 | if (intel_output->ddc_bus) |
650 | intel_i2c_destroy(intel_output->ddc_bus); | 1030 | intel_i2c_destroy(intel_output->ddc_bus); |
651 | drm_connector_cleanup(connector); | 1031 | drm_connector_cleanup(connector); |
652 | kfree(connector); | 1032 | kfree(intel_output); |
653 | } | 1033 | } |
diff --git a/drivers/gpu/drm/i915/intel_modes.c b/drivers/gpu/drm/i915/intel_modes.c index e0910fefce87..67e2f4632a24 100644 --- a/drivers/gpu/drm/i915/intel_modes.c +++ b/drivers/gpu/drm/i915/intel_modes.c | |||
@@ -53,10 +53,9 @@ bool intel_ddc_probe(struct intel_output *intel_output) | |||
53 | } | 53 | } |
54 | }; | 54 | }; |
55 | 55 | ||
56 | intel_i2c_quirk_set(intel_output->ddc_bus->drm_dev, true); | 56 | intel_i2c_quirk_set(intel_output->base.dev, true); |
57 | ret = i2c_transfer(&intel_output->ddc_bus->adapter, msgs, 2); | 57 | ret = i2c_transfer(intel_output->ddc_bus, msgs, 2); |
58 | intel_i2c_quirk_set(intel_output->ddc_bus->drm_dev, false); | 58 | intel_i2c_quirk_set(intel_output->base.dev, false); |
59 | |||
60 | if (ret == 2) | 59 | if (ret == 2) |
61 | return true; | 60 | return true; |
62 | 61 | ||
@@ -74,10 +73,9 @@ int intel_ddc_get_modes(struct intel_output *intel_output) | |||
74 | struct edid *edid; | 73 | struct edid *edid; |
75 | int ret = 0; | 74 | int ret = 0; |
76 | 75 | ||
77 | intel_i2c_quirk_set(intel_output->ddc_bus->drm_dev, true); | 76 | intel_i2c_quirk_set(intel_output->base.dev, true); |
78 | edid = drm_get_edid(&intel_output->base, | 77 | edid = drm_get_edid(&intel_output->base, intel_output->ddc_bus); |
79 | &intel_output->ddc_bus->adapter); | 78 | intel_i2c_quirk_set(intel_output->base.dev, false); |
80 | intel_i2c_quirk_set(intel_output->ddc_bus->drm_dev, false); | ||
81 | if (edid) { | 79 | if (edid) { |
82 | drm_mode_connector_update_edid_property(&intel_output->base, | 80 | drm_mode_connector_update_edid_property(&intel_output->base, |
83 | edid); | 81 | edid); |
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c index 9a00adb3a508..5371d9332554 100644 --- a/drivers/gpu/drm/i915/intel_sdvo.c +++ b/drivers/gpu/drm/i915/intel_sdvo.c | |||
@@ -31,6 +31,7 @@ | |||
31 | #include "drm.h" | 31 | #include "drm.h" |
32 | #include "drm_crtc.h" | 32 | #include "drm_crtc.h" |
33 | #include "intel_drv.h" | 33 | #include "intel_drv.h" |
34 | #include "drm_edid.h" | ||
34 | #include "i915_drm.h" | 35 | #include "i915_drm.h" |
35 | #include "i915_drv.h" | 36 | #include "i915_drv.h" |
36 | #include "intel_sdvo_regs.h" | 37 | #include "intel_sdvo_regs.h" |
@@ -38,8 +39,7 @@ | |||
38 | #undef SDVO_DEBUG | 39 | #undef SDVO_DEBUG |
39 | #define I915_SDVO "i915_sdvo" | 40 | #define I915_SDVO "i915_sdvo" |
40 | struct intel_sdvo_priv { | 41 | struct intel_sdvo_priv { |
41 | struct intel_i2c_chan *i2c_bus; | 42 | u8 slave_addr; |
42 | int slaveaddr; | ||
43 | 43 | ||
44 | /* Register for the SDVO device: SDVOB or SDVOC */ | 44 | /* Register for the SDVO device: SDVOB or SDVOC */ |
45 | int output_device; | 45 | int output_device; |
@@ -56,6 +56,12 @@ struct intel_sdvo_priv { | |||
56 | /* Pixel clock limitations reported by the SDVO device, in kHz */ | 56 | /* Pixel clock limitations reported by the SDVO device, in kHz */ |
57 | int pixel_clock_min, pixel_clock_max; | 57 | int pixel_clock_min, pixel_clock_max; |
58 | 58 | ||
59 | /* | ||
60 | * For multiple function SDVO device, | ||
61 | * this is for current attached outputs. | ||
62 | */ | ||
63 | uint16_t attached_output; | ||
64 | |||
59 | /** | 65 | /** |
60 | * This is set if we're going to treat the device as TV-out. | 66 | * This is set if we're going to treat the device as TV-out. |
61 | * | 67 | * |
@@ -69,12 +75,23 @@ struct intel_sdvo_priv { | |||
69 | * This is set if we treat the device as HDMI, instead of DVI. | 75 | * This is set if we treat the device as HDMI, instead of DVI. |
70 | */ | 76 | */ |
71 | bool is_hdmi; | 77 | bool is_hdmi; |
78 | |||
72 | /** | 79 | /** |
73 | * This is set if we detect output of sdvo device as LVDS. | 80 | * This is set if we detect output of sdvo device as LVDS. |
74 | */ | 81 | */ |
75 | bool is_lvds; | 82 | bool is_lvds; |
76 | 83 | ||
77 | /** | 84 | /** |
85 | * This is sdvo flags for input timing. | ||
86 | */ | ||
87 | uint8_t sdvo_flags; | ||
88 | |||
89 | /** | ||
90 | * This is sdvo fixed pannel mode pointer | ||
91 | */ | ||
92 | struct drm_display_mode *sdvo_lvds_fixed_mode; | ||
93 | |||
94 | /** | ||
78 | * Returned SDTV resolutions allowed for the current format, if the | 95 | * Returned SDTV resolutions allowed for the current format, if the |
79 | * device reported it. | 96 | * device reported it. |
80 | */ | 97 | */ |
@@ -104,6 +121,9 @@ struct intel_sdvo_priv { | |||
104 | u32 save_SDVOX; | 121 | u32 save_SDVOX; |
105 | }; | 122 | }; |
106 | 123 | ||
124 | static bool | ||
125 | intel_sdvo_output_setup(struct intel_output *intel_output, uint16_t flags); | ||
126 | |||
107 | /** | 127 | /** |
108 | * Writes the SDVOB or SDVOC with the given value, but always writes both | 128 | * Writes the SDVOB or SDVOC with the given value, but always writes both |
109 | * SDVOB and SDVOC to work around apparent hardware issues (according to | 129 | * SDVOB and SDVOC to work around apparent hardware issues (according to |
@@ -146,13 +166,13 @@ static bool intel_sdvo_read_byte(struct intel_output *intel_output, u8 addr, | |||
146 | 166 | ||
147 | struct i2c_msg msgs[] = { | 167 | struct i2c_msg msgs[] = { |
148 | { | 168 | { |
149 | .addr = sdvo_priv->i2c_bus->slave_addr, | 169 | .addr = sdvo_priv->slave_addr >> 1, |
150 | .flags = 0, | 170 | .flags = 0, |
151 | .len = 1, | 171 | .len = 1, |
152 | .buf = out_buf, | 172 | .buf = out_buf, |
153 | }, | 173 | }, |
154 | { | 174 | { |
155 | .addr = sdvo_priv->i2c_bus->slave_addr, | 175 | .addr = sdvo_priv->slave_addr >> 1, |
156 | .flags = I2C_M_RD, | 176 | .flags = I2C_M_RD, |
157 | .len = 1, | 177 | .len = 1, |
158 | .buf = buf, | 178 | .buf = buf, |
@@ -162,7 +182,7 @@ static bool intel_sdvo_read_byte(struct intel_output *intel_output, u8 addr, | |||
162 | out_buf[0] = addr; | 182 | out_buf[0] = addr; |
163 | out_buf[1] = 0; | 183 | out_buf[1] = 0; |
164 | 184 | ||
165 | if ((ret = i2c_transfer(&sdvo_priv->i2c_bus->adapter, msgs, 2)) == 2) | 185 | if ((ret = i2c_transfer(intel_output->i2c_bus, msgs, 2)) == 2) |
166 | { | 186 | { |
167 | *ch = buf[0]; | 187 | *ch = buf[0]; |
168 | return true; | 188 | return true; |
@@ -175,10 +195,11 @@ static bool intel_sdvo_read_byte(struct intel_output *intel_output, u8 addr, | |||
175 | static bool intel_sdvo_write_byte(struct intel_output *intel_output, int addr, | 195 | static bool intel_sdvo_write_byte(struct intel_output *intel_output, int addr, |
176 | u8 ch) | 196 | u8 ch) |
177 | { | 197 | { |
198 | struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv; | ||
178 | u8 out_buf[2]; | 199 | u8 out_buf[2]; |
179 | struct i2c_msg msgs[] = { | 200 | struct i2c_msg msgs[] = { |
180 | { | 201 | { |
181 | .addr = intel_output->i2c_bus->slave_addr, | 202 | .addr = sdvo_priv->slave_addr >> 1, |
182 | .flags = 0, | 203 | .flags = 0, |
183 | .len = 2, | 204 | .len = 2, |
184 | .buf = out_buf, | 205 | .buf = out_buf, |
@@ -188,7 +209,7 @@ static bool intel_sdvo_write_byte(struct intel_output *intel_output, int addr, | |||
188 | out_buf[0] = addr; | 209 | out_buf[0] = addr; |
189 | out_buf[1] = ch; | 210 | out_buf[1] = ch; |
190 | 211 | ||
191 | if (i2c_transfer(&intel_output->i2c_bus->adapter, msgs, 1) == 1) | 212 | if (i2c_transfer(intel_output->i2c_bus, msgs, 1) == 1) |
192 | { | 213 | { |
193 | return true; | 214 | return true; |
194 | } | 215 | } |
@@ -592,6 +613,7 @@ intel_sdvo_create_preferred_input_timing(struct intel_output *output, | |||
592 | uint16_t height) | 613 | uint16_t height) |
593 | { | 614 | { |
594 | struct intel_sdvo_preferred_input_timing_args args; | 615 | struct intel_sdvo_preferred_input_timing_args args; |
616 | struct intel_sdvo_priv *sdvo_priv = output->dev_priv; | ||
595 | uint8_t status; | 617 | uint8_t status; |
596 | 618 | ||
597 | memset(&args, 0, sizeof(args)); | 619 | memset(&args, 0, sizeof(args)); |
@@ -599,7 +621,12 @@ intel_sdvo_create_preferred_input_timing(struct intel_output *output, | |||
599 | args.width = width; | 621 | args.width = width; |
600 | args.height = height; | 622 | args.height = height; |
601 | args.interlace = 0; | 623 | args.interlace = 0; |
602 | args.scaled = 0; | 624 | |
625 | if (sdvo_priv->is_lvds && | ||
626 | (sdvo_priv->sdvo_lvds_fixed_mode->hdisplay != width || | ||
627 | sdvo_priv->sdvo_lvds_fixed_mode->vdisplay != height)) | ||
628 | args.scaled = 1; | ||
629 | |||
603 | intel_sdvo_write_cmd(output, SDVO_CMD_CREATE_PREFERRED_INPUT_TIMING, | 630 | intel_sdvo_write_cmd(output, SDVO_CMD_CREATE_PREFERRED_INPUT_TIMING, |
604 | &args, sizeof(args)); | 631 | &args, sizeof(args)); |
605 | status = intel_sdvo_read_response(output, NULL, 0); | 632 | status = intel_sdvo_read_response(output, NULL, 0); |
@@ -944,12 +971,7 @@ static bool intel_sdvo_mode_fixup(struct drm_encoder *encoder, | |||
944 | struct intel_output *output = enc_to_intel_output(encoder); | 971 | struct intel_output *output = enc_to_intel_output(encoder); |
945 | struct intel_sdvo_priv *dev_priv = output->dev_priv; | 972 | struct intel_sdvo_priv *dev_priv = output->dev_priv; |
946 | 973 | ||
947 | if (!dev_priv->is_tv) { | 974 | if (dev_priv->is_tv) { |
948 | /* Make the CRTC code factor in the SDVO pixel multiplier. The | ||
949 | * SDVO device will be told of the multiplier during mode_set. | ||
950 | */ | ||
951 | adjusted_mode->clock *= intel_sdvo_get_pixel_multiplier(mode); | ||
952 | } else { | ||
953 | struct intel_sdvo_dtd output_dtd; | 975 | struct intel_sdvo_dtd output_dtd; |
954 | bool success; | 976 | bool success; |
955 | 977 | ||
@@ -980,6 +1002,47 @@ static bool intel_sdvo_mode_fixup(struct drm_encoder *encoder, | |||
980 | intel_sdvo_get_preferred_input_timing(output, | 1002 | intel_sdvo_get_preferred_input_timing(output, |
981 | &input_dtd); | 1003 | &input_dtd); |
982 | intel_sdvo_get_mode_from_dtd(adjusted_mode, &input_dtd); | 1004 | intel_sdvo_get_mode_from_dtd(adjusted_mode, &input_dtd); |
1005 | dev_priv->sdvo_flags = input_dtd.part2.sdvo_flags; | ||
1006 | |||
1007 | drm_mode_set_crtcinfo(adjusted_mode, 0); | ||
1008 | |||
1009 | mode->clock = adjusted_mode->clock; | ||
1010 | |||
1011 | adjusted_mode->clock *= | ||
1012 | intel_sdvo_get_pixel_multiplier(mode); | ||
1013 | } else { | ||
1014 | return false; | ||
1015 | } | ||
1016 | } else if (dev_priv->is_lvds) { | ||
1017 | struct intel_sdvo_dtd output_dtd; | ||
1018 | bool success; | ||
1019 | |||
1020 | drm_mode_set_crtcinfo(dev_priv->sdvo_lvds_fixed_mode, 0); | ||
1021 | /* Set output timings */ | ||
1022 | intel_sdvo_get_dtd_from_mode(&output_dtd, | ||
1023 | dev_priv->sdvo_lvds_fixed_mode); | ||
1024 | |||
1025 | intel_sdvo_set_target_output(output, | ||
1026 | dev_priv->controlled_output); | ||
1027 | intel_sdvo_set_output_timing(output, &output_dtd); | ||
1028 | |||
1029 | /* Set the input timing to the screen. Assume always input 0. */ | ||
1030 | intel_sdvo_set_target_input(output, true, false); | ||
1031 | |||
1032 | |||
1033 | success = intel_sdvo_create_preferred_input_timing( | ||
1034 | output, | ||
1035 | mode->clock / 10, | ||
1036 | mode->hdisplay, | ||
1037 | mode->vdisplay); | ||
1038 | |||
1039 | if (success) { | ||
1040 | struct intel_sdvo_dtd input_dtd; | ||
1041 | |||
1042 | intel_sdvo_get_preferred_input_timing(output, | ||
1043 | &input_dtd); | ||
1044 | intel_sdvo_get_mode_from_dtd(adjusted_mode, &input_dtd); | ||
1045 | dev_priv->sdvo_flags = input_dtd.part2.sdvo_flags; | ||
983 | 1046 | ||
984 | drm_mode_set_crtcinfo(adjusted_mode, 0); | 1047 | drm_mode_set_crtcinfo(adjusted_mode, 0); |
985 | 1048 | ||
@@ -990,6 +1053,12 @@ static bool intel_sdvo_mode_fixup(struct drm_encoder *encoder, | |||
990 | } else { | 1053 | } else { |
991 | return false; | 1054 | return false; |
992 | } | 1055 | } |
1056 | |||
1057 | } else { | ||
1058 | /* Make the CRTC code factor in the SDVO pixel multiplier. The | ||
1059 | * SDVO device will be told of the multiplier during mode_set. | ||
1060 | */ | ||
1061 | adjusted_mode->clock *= intel_sdvo_get_pixel_multiplier(mode); | ||
993 | } | 1062 | } |
994 | return true; | 1063 | return true; |
995 | } | 1064 | } |
@@ -1033,15 +1102,16 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder, | |||
1033 | 1102 | ||
1034 | /* We have tried to get input timing in mode_fixup, and filled into | 1103 | /* We have tried to get input timing in mode_fixup, and filled into |
1035 | adjusted_mode */ | 1104 | adjusted_mode */ |
1036 | if (sdvo_priv->is_tv) | 1105 | if (sdvo_priv->is_tv || sdvo_priv->is_lvds) { |
1037 | intel_sdvo_get_dtd_from_mode(&input_dtd, adjusted_mode); | 1106 | intel_sdvo_get_dtd_from_mode(&input_dtd, adjusted_mode); |
1038 | else | 1107 | input_dtd.part2.sdvo_flags = sdvo_priv->sdvo_flags; |
1108 | } else | ||
1039 | intel_sdvo_get_dtd_from_mode(&input_dtd, mode); | 1109 | intel_sdvo_get_dtd_from_mode(&input_dtd, mode); |
1040 | 1110 | ||
1041 | /* If it's a TV, we already set the output timing in mode_fixup. | 1111 | /* If it's a TV, we already set the output timing in mode_fixup. |
1042 | * Otherwise, the output timing is equal to the input timing. | 1112 | * Otherwise, the output timing is equal to the input timing. |
1043 | */ | 1113 | */ |
1044 | if (!sdvo_priv->is_tv) { | 1114 | if (!sdvo_priv->is_tv && !sdvo_priv->is_lvds) { |
1045 | /* Set the output timing to the screen */ | 1115 | /* Set the output timing to the screen */ |
1046 | intel_sdvo_set_target_output(output, | 1116 | intel_sdvo_set_target_output(output, |
1047 | sdvo_priv->controlled_output); | 1117 | sdvo_priv->controlled_output); |
@@ -1116,6 +1186,8 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder, | |||
1116 | sdvox |= (sdvo_pixel_multiply - 1) << SDVO_PORT_MULTIPLY_SHIFT; | 1186 | sdvox |= (sdvo_pixel_multiply - 1) << SDVO_PORT_MULTIPLY_SHIFT; |
1117 | } | 1187 | } |
1118 | 1188 | ||
1189 | if (sdvo_priv->sdvo_flags & SDVO_NEED_TO_STALL) | ||
1190 | sdvox |= SDVO_STALL_SELECT; | ||
1119 | intel_sdvo_write_sdvox(output, sdvox); | 1191 | intel_sdvo_write_sdvox(output, sdvox); |
1120 | } | 1192 | } |
1121 | 1193 | ||
@@ -1276,6 +1348,17 @@ static int intel_sdvo_mode_valid(struct drm_connector *connector, | |||
1276 | if (sdvo_priv->pixel_clock_max < mode->clock) | 1348 | if (sdvo_priv->pixel_clock_max < mode->clock) |
1277 | return MODE_CLOCK_HIGH; | 1349 | return MODE_CLOCK_HIGH; |
1278 | 1350 | ||
1351 | if (sdvo_priv->is_lvds == true) { | ||
1352 | if (sdvo_priv->sdvo_lvds_fixed_mode == NULL) | ||
1353 | return MODE_PANEL; | ||
1354 | |||
1355 | if (mode->hdisplay > sdvo_priv->sdvo_lvds_fixed_mode->hdisplay) | ||
1356 | return MODE_PANEL; | ||
1357 | |||
1358 | if (mode->vdisplay > sdvo_priv->sdvo_lvds_fixed_mode->vdisplay) | ||
1359 | return MODE_PANEL; | ||
1360 | } | ||
1361 | |||
1279 | return MODE_OK; | 1362 | return MODE_OK; |
1280 | } | 1363 | } |
1281 | 1364 | ||
@@ -1362,42 +1445,96 @@ void intel_sdvo_set_hotplug(struct drm_connector *connector, int on) | |||
1362 | intel_sdvo_read_response(intel_output, &response, 2); | 1445 | intel_sdvo_read_response(intel_output, &response, 2); |
1363 | } | 1446 | } |
1364 | 1447 | ||
1365 | static void | 1448 | static bool |
1366 | intel_sdvo_hdmi_sink_detect(struct drm_connector *connector) | 1449 | intel_sdvo_multifunc_encoder(struct intel_output *intel_output) |
1450 | { | ||
1451 | struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv; | ||
1452 | int caps = 0; | ||
1453 | |||
1454 | if (sdvo_priv->caps.output_flags & | ||
1455 | (SDVO_OUTPUT_TMDS0 | SDVO_OUTPUT_TMDS1)) | ||
1456 | caps++; | ||
1457 | if (sdvo_priv->caps.output_flags & | ||
1458 | (SDVO_OUTPUT_RGB0 | SDVO_OUTPUT_RGB1)) | ||
1459 | caps++; | ||
1460 | if (sdvo_priv->caps.output_flags & | ||
1461 | (SDVO_OUTPUT_SVID0 | SDVO_OUTPUT_SVID0)) | ||
1462 | caps++; | ||
1463 | if (sdvo_priv->caps.output_flags & | ||
1464 | (SDVO_OUTPUT_CVBS0 | SDVO_OUTPUT_CVBS1)) | ||
1465 | caps++; | ||
1466 | if (sdvo_priv->caps.output_flags & | ||
1467 | (SDVO_OUTPUT_YPRPB0 | SDVO_OUTPUT_YPRPB1)) | ||
1468 | caps++; | ||
1469 | |||
1470 | if (sdvo_priv->caps.output_flags & | ||
1471 | (SDVO_OUTPUT_SCART0 | SDVO_OUTPUT_SCART1)) | ||
1472 | caps++; | ||
1473 | |||
1474 | if (sdvo_priv->caps.output_flags & | ||
1475 | (SDVO_OUTPUT_LVDS0 | SDVO_OUTPUT_LVDS1)) | ||
1476 | caps++; | ||
1477 | |||
1478 | return (caps > 1); | ||
1479 | } | ||
1480 | |||
1481 | enum drm_connector_status | ||
1482 | intel_sdvo_hdmi_sink_detect(struct drm_connector *connector, u16 response) | ||
1367 | { | 1483 | { |
1368 | struct intel_output *intel_output = to_intel_output(connector); | 1484 | struct intel_output *intel_output = to_intel_output(connector); |
1369 | struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv; | 1485 | struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv; |
1486 | enum drm_connector_status status = connector_status_connected; | ||
1370 | struct edid *edid = NULL; | 1487 | struct edid *edid = NULL; |
1371 | 1488 | ||
1372 | intel_sdvo_set_control_bus_switch(intel_output, sdvo_priv->ddc_bus); | ||
1373 | edid = drm_get_edid(&intel_output->base, | 1489 | edid = drm_get_edid(&intel_output->base, |
1374 | &intel_output->ddc_bus->adapter); | 1490 | intel_output->ddc_bus); |
1375 | if (edid != NULL) { | 1491 | if (edid != NULL) { |
1376 | sdvo_priv->is_hdmi = drm_detect_hdmi_monitor(edid); | 1492 | /* Don't report the output as connected if it's a DVI-I |
1493 | * connector with a non-digital EDID coming out. | ||
1494 | */ | ||
1495 | if (response & (SDVO_OUTPUT_TMDS0 | SDVO_OUTPUT_TMDS1)) { | ||
1496 | if (edid->input & DRM_EDID_INPUT_DIGITAL) | ||
1497 | sdvo_priv->is_hdmi = | ||
1498 | drm_detect_hdmi_monitor(edid); | ||
1499 | else | ||
1500 | status = connector_status_disconnected; | ||
1501 | } | ||
1502 | |||
1377 | kfree(edid); | 1503 | kfree(edid); |
1378 | intel_output->base.display_info.raw_edid = NULL; | 1504 | intel_output->base.display_info.raw_edid = NULL; |
1379 | } | 1505 | |
1506 | } else if (response & (SDVO_OUTPUT_TMDS0 | SDVO_OUTPUT_TMDS1)) | ||
1507 | status = connector_status_disconnected; | ||
1508 | |||
1509 | return status; | ||
1380 | } | 1510 | } |
1381 | 1511 | ||
1382 | static enum drm_connector_status intel_sdvo_detect(struct drm_connector *connector) | 1512 | static enum drm_connector_status intel_sdvo_detect(struct drm_connector *connector) |
1383 | { | 1513 | { |
1384 | u8 response[2]; | 1514 | uint16_t response; |
1385 | u8 status; | 1515 | u8 status; |
1386 | struct intel_output *intel_output = to_intel_output(connector); | 1516 | struct intel_output *intel_output = to_intel_output(connector); |
1517 | struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv; | ||
1387 | 1518 | ||
1388 | intel_sdvo_write_cmd(intel_output, SDVO_CMD_GET_ATTACHED_DISPLAYS, NULL, 0); | 1519 | intel_sdvo_write_cmd(intel_output, SDVO_CMD_GET_ATTACHED_DISPLAYS, NULL, 0); |
1389 | status = intel_sdvo_read_response(intel_output, &response, 2); | 1520 | status = intel_sdvo_read_response(intel_output, &response, 2); |
1390 | 1521 | ||
1391 | DRM_DEBUG("SDVO response %d %d\n", response[0], response[1]); | 1522 | DRM_DEBUG("SDVO response %d %d\n", response & 0xff, response >> 8); |
1392 | 1523 | ||
1393 | if (status != SDVO_CMD_STATUS_SUCCESS) | 1524 | if (status != SDVO_CMD_STATUS_SUCCESS) |
1394 | return connector_status_unknown; | 1525 | return connector_status_unknown; |
1395 | 1526 | ||
1396 | if ((response[0] != 0) || (response[1] != 0)) { | 1527 | if (response == 0) |
1397 | intel_sdvo_hdmi_sink_detect(connector); | ||
1398 | return connector_status_connected; | ||
1399 | } else | ||
1400 | return connector_status_disconnected; | 1528 | return connector_status_disconnected; |
1529 | |||
1530 | if (intel_sdvo_multifunc_encoder(intel_output) && | ||
1531 | sdvo_priv->attached_output != response) { | ||
1532 | if (sdvo_priv->controlled_output != response && | ||
1533 | intel_sdvo_output_setup(intel_output, response) != true) | ||
1534 | return connector_status_unknown; | ||
1535 | sdvo_priv->attached_output = response; | ||
1536 | } | ||
1537 | return intel_sdvo_hdmi_sink_detect(connector, response); | ||
1401 | } | 1538 | } |
1402 | 1539 | ||
1403 | static void intel_sdvo_get_ddc_modes(struct drm_connector *connector) | 1540 | static void intel_sdvo_get_ddc_modes(struct drm_connector *connector) |
@@ -1549,23 +1686,21 @@ static void intel_sdvo_get_tv_modes(struct drm_connector *connector) | |||
1549 | static void intel_sdvo_get_lvds_modes(struct drm_connector *connector) | 1686 | static void intel_sdvo_get_lvds_modes(struct drm_connector *connector) |
1550 | { | 1687 | { |
1551 | struct intel_output *intel_output = to_intel_output(connector); | 1688 | struct intel_output *intel_output = to_intel_output(connector); |
1552 | struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv; | ||
1553 | struct drm_i915_private *dev_priv = connector->dev->dev_private; | 1689 | struct drm_i915_private *dev_priv = connector->dev->dev_private; |
1690 | struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv; | ||
1691 | struct drm_display_mode *newmode; | ||
1554 | 1692 | ||
1555 | /* | 1693 | /* |
1556 | * Attempt to get the mode list from DDC. | 1694 | * Attempt to get the mode list from DDC. |
1557 | * Assume that the preferred modes are | 1695 | * Assume that the preferred modes are |
1558 | * arranged in priority order. | 1696 | * arranged in priority order. |
1559 | */ | 1697 | */ |
1560 | /* set the bus switch and get the modes */ | ||
1561 | intel_sdvo_set_control_bus_switch(intel_output, sdvo_priv->ddc_bus); | ||
1562 | intel_ddc_get_modes(intel_output); | 1698 | intel_ddc_get_modes(intel_output); |
1563 | if (list_empty(&connector->probed_modes) == false) | 1699 | if (list_empty(&connector->probed_modes) == false) |
1564 | return; | 1700 | goto end; |
1565 | 1701 | ||
1566 | /* Fetch modes from VBT */ | 1702 | /* Fetch modes from VBT */ |
1567 | if (dev_priv->sdvo_lvds_vbt_mode != NULL) { | 1703 | if (dev_priv->sdvo_lvds_vbt_mode != NULL) { |
1568 | struct drm_display_mode *newmode; | ||
1569 | newmode = drm_mode_duplicate(connector->dev, | 1704 | newmode = drm_mode_duplicate(connector->dev, |
1570 | dev_priv->sdvo_lvds_vbt_mode); | 1705 | dev_priv->sdvo_lvds_vbt_mode); |
1571 | if (newmode != NULL) { | 1706 | if (newmode != NULL) { |
@@ -1575,6 +1710,16 @@ static void intel_sdvo_get_lvds_modes(struct drm_connector *connector) | |||
1575 | drm_mode_probed_add(connector, newmode); | 1710 | drm_mode_probed_add(connector, newmode); |
1576 | } | 1711 | } |
1577 | } | 1712 | } |
1713 | |||
1714 | end: | ||
1715 | list_for_each_entry(newmode, &connector->probed_modes, head) { | ||
1716 | if (newmode->type & DRM_MODE_TYPE_PREFERRED) { | ||
1717 | sdvo_priv->sdvo_lvds_fixed_mode = | ||
1718 | drm_mode_duplicate(connector->dev, newmode); | ||
1719 | break; | ||
1720 | } | ||
1721 | } | ||
1722 | |||
1578 | } | 1723 | } |
1579 | 1724 | ||
1580 | static int intel_sdvo_get_modes(struct drm_connector *connector) | 1725 | static int intel_sdvo_get_modes(struct drm_connector *connector) |
@@ -1597,14 +1742,20 @@ static int intel_sdvo_get_modes(struct drm_connector *connector) | |||
1597 | static void intel_sdvo_destroy(struct drm_connector *connector) | 1742 | static void intel_sdvo_destroy(struct drm_connector *connector) |
1598 | { | 1743 | { |
1599 | struct intel_output *intel_output = to_intel_output(connector); | 1744 | struct intel_output *intel_output = to_intel_output(connector); |
1745 | struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv; | ||
1600 | 1746 | ||
1601 | if (intel_output->i2c_bus) | 1747 | if (intel_output->i2c_bus) |
1602 | intel_i2c_destroy(intel_output->i2c_bus); | 1748 | intel_i2c_destroy(intel_output->i2c_bus); |
1603 | if (intel_output->ddc_bus) | 1749 | if (intel_output->ddc_bus) |
1604 | intel_i2c_destroy(intel_output->ddc_bus); | 1750 | intel_i2c_destroy(intel_output->ddc_bus); |
1605 | 1751 | ||
1752 | if (sdvo_priv->sdvo_lvds_fixed_mode != NULL) | ||
1753 | drm_mode_destroy(connector->dev, | ||
1754 | sdvo_priv->sdvo_lvds_fixed_mode); | ||
1755 | |||
1606 | drm_sysfs_connector_remove(connector); | 1756 | drm_sysfs_connector_remove(connector); |
1607 | drm_connector_cleanup(connector); | 1757 | drm_connector_cleanup(connector); |
1758 | |||
1608 | kfree(intel_output); | 1759 | kfree(intel_output); |
1609 | } | 1760 | } |
1610 | 1761 | ||
@@ -1709,7 +1860,7 @@ intel_sdvo_chan_to_intel_output(struct intel_i2c_chan *chan) | |||
1709 | 1860 | ||
1710 | list_for_each_entry(connector, | 1861 | list_for_each_entry(connector, |
1711 | &dev->mode_config.connector_list, head) { | 1862 | &dev->mode_config.connector_list, head) { |
1712 | if (to_intel_output(connector)->ddc_bus == chan) { | 1863 | if (to_intel_output(connector)->ddc_bus == &chan->adapter) { |
1713 | intel_output = to_intel_output(connector); | 1864 | intel_output = to_intel_output(connector); |
1714 | break; | 1865 | break; |
1715 | } | 1866 | } |
@@ -1723,7 +1874,7 @@ static int intel_sdvo_master_xfer(struct i2c_adapter *i2c_adap, | |||
1723 | struct intel_output *intel_output; | 1874 | struct intel_output *intel_output; |
1724 | struct intel_sdvo_priv *sdvo_priv; | 1875 | struct intel_sdvo_priv *sdvo_priv; |
1725 | struct i2c_algo_bit_data *algo_data; | 1876 | struct i2c_algo_bit_data *algo_data; |
1726 | struct i2c_algorithm *algo; | 1877 | const struct i2c_algorithm *algo; |
1727 | 1878 | ||
1728 | algo_data = (struct i2c_algo_bit_data *)i2c_adap->algo_data; | 1879 | algo_data = (struct i2c_algo_bit_data *)i2c_adap->algo_data; |
1729 | intel_output = | 1880 | intel_output = |
@@ -1733,7 +1884,7 @@ static int intel_sdvo_master_xfer(struct i2c_adapter *i2c_adap, | |||
1733 | return -EINVAL; | 1884 | return -EINVAL; |
1734 | 1885 | ||
1735 | sdvo_priv = intel_output->dev_priv; | 1886 | sdvo_priv = intel_output->dev_priv; |
1736 | algo = (struct i2c_algorithm *)intel_output->i2c_bus->adapter.algo; | 1887 | algo = intel_output->i2c_bus->algo; |
1737 | 1888 | ||
1738 | intel_sdvo_set_control_bus_switch(intel_output, sdvo_priv->ddc_bus); | 1889 | intel_sdvo_set_control_bus_switch(intel_output, sdvo_priv->ddc_bus); |
1739 | return algo->master_xfer(i2c_adap, msgs, num); | 1890 | return algo->master_xfer(i2c_adap, msgs, num); |
@@ -1780,18 +1931,101 @@ intel_sdvo_get_slave_addr(struct drm_device *dev, int output_device) | |||
1780 | return 0x72; | 1931 | return 0x72; |
1781 | } | 1932 | } |
1782 | 1933 | ||
1934 | static bool | ||
1935 | intel_sdvo_output_setup(struct intel_output *intel_output, uint16_t flags) | ||
1936 | { | ||
1937 | struct drm_connector *connector = &intel_output->base; | ||
1938 | struct drm_encoder *encoder = &intel_output->enc; | ||
1939 | struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv; | ||
1940 | bool ret = true, registered = false; | ||
1941 | |||
1942 | sdvo_priv->is_tv = false; | ||
1943 | intel_output->needs_tv_clock = false; | ||
1944 | sdvo_priv->is_lvds = false; | ||
1945 | |||
1946 | if (device_is_registered(&connector->kdev)) { | ||
1947 | drm_sysfs_connector_remove(connector); | ||
1948 | registered = true; | ||
1949 | } | ||
1950 | |||
1951 | if (flags & | ||
1952 | (SDVO_OUTPUT_TMDS0 | SDVO_OUTPUT_TMDS1)) { | ||
1953 | if (sdvo_priv->caps.output_flags & SDVO_OUTPUT_TMDS0) | ||
1954 | sdvo_priv->controlled_output = SDVO_OUTPUT_TMDS0; | ||
1955 | else | ||
1956 | sdvo_priv->controlled_output = SDVO_OUTPUT_TMDS1; | ||
1957 | |||
1958 | encoder->encoder_type = DRM_MODE_ENCODER_TMDS; | ||
1959 | connector->connector_type = DRM_MODE_CONNECTOR_DVID; | ||
1960 | |||
1961 | if (intel_sdvo_get_supp_encode(intel_output, | ||
1962 | &sdvo_priv->encode) && | ||
1963 | intel_sdvo_get_digital_encoding_mode(intel_output) && | ||
1964 | sdvo_priv->is_hdmi) { | ||
1965 | /* enable hdmi encoding mode if supported */ | ||
1966 | intel_sdvo_set_encode(intel_output, SDVO_ENCODE_HDMI); | ||
1967 | intel_sdvo_set_colorimetry(intel_output, | ||
1968 | SDVO_COLORIMETRY_RGB256); | ||
1969 | connector->connector_type = DRM_MODE_CONNECTOR_HDMIA; | ||
1970 | } | ||
1971 | } else if (flags & SDVO_OUTPUT_SVID0) { | ||
1972 | |||
1973 | sdvo_priv->controlled_output = SDVO_OUTPUT_SVID0; | ||
1974 | encoder->encoder_type = DRM_MODE_ENCODER_TVDAC; | ||
1975 | connector->connector_type = DRM_MODE_CONNECTOR_SVIDEO; | ||
1976 | sdvo_priv->is_tv = true; | ||
1977 | intel_output->needs_tv_clock = true; | ||
1978 | } else if (flags & SDVO_OUTPUT_RGB0) { | ||
1979 | |||
1980 | sdvo_priv->controlled_output = SDVO_OUTPUT_RGB0; | ||
1981 | encoder->encoder_type = DRM_MODE_ENCODER_DAC; | ||
1982 | connector->connector_type = DRM_MODE_CONNECTOR_VGA; | ||
1983 | } else if (flags & SDVO_OUTPUT_RGB1) { | ||
1984 | |||
1985 | sdvo_priv->controlled_output = SDVO_OUTPUT_RGB1; | ||
1986 | encoder->encoder_type = DRM_MODE_ENCODER_DAC; | ||
1987 | connector->connector_type = DRM_MODE_CONNECTOR_VGA; | ||
1988 | } else if (flags & SDVO_OUTPUT_LVDS0) { | ||
1989 | |||
1990 | sdvo_priv->controlled_output = SDVO_OUTPUT_LVDS0; | ||
1991 | encoder->encoder_type = DRM_MODE_ENCODER_LVDS; | ||
1992 | connector->connector_type = DRM_MODE_CONNECTOR_LVDS; | ||
1993 | sdvo_priv->is_lvds = true; | ||
1994 | } else if (flags & SDVO_OUTPUT_LVDS1) { | ||
1995 | |||
1996 | sdvo_priv->controlled_output = SDVO_OUTPUT_LVDS1; | ||
1997 | encoder->encoder_type = DRM_MODE_ENCODER_LVDS; | ||
1998 | connector->connector_type = DRM_MODE_CONNECTOR_LVDS; | ||
1999 | sdvo_priv->is_lvds = true; | ||
2000 | } else { | ||
2001 | |||
2002 | unsigned char bytes[2]; | ||
2003 | |||
2004 | sdvo_priv->controlled_output = 0; | ||
2005 | memcpy(bytes, &sdvo_priv->caps.output_flags, 2); | ||
2006 | DRM_DEBUG_KMS(I915_SDVO, | ||
2007 | "%s: Unknown SDVO output type (0x%02x%02x)\n", | ||
2008 | SDVO_NAME(sdvo_priv), | ||
2009 | bytes[0], bytes[1]); | ||
2010 | ret = false; | ||
2011 | } | ||
2012 | |||
2013 | if (ret && registered) | ||
2014 | ret = drm_sysfs_connector_add(connector) == 0 ? true : false; | ||
2015 | |||
2016 | |||
2017 | return ret; | ||
2018 | |||
2019 | } | ||
2020 | |||
1783 | bool intel_sdvo_init(struct drm_device *dev, int output_device) | 2021 | bool intel_sdvo_init(struct drm_device *dev, int output_device) |
1784 | { | 2022 | { |
1785 | struct drm_connector *connector; | 2023 | struct drm_connector *connector; |
1786 | struct intel_output *intel_output; | 2024 | struct intel_output *intel_output; |
1787 | struct intel_sdvo_priv *sdvo_priv; | 2025 | struct intel_sdvo_priv *sdvo_priv; |
1788 | struct intel_i2c_chan *i2cbus = NULL; | 2026 | |
1789 | struct intel_i2c_chan *ddcbus = NULL; | ||
1790 | int connector_type; | ||
1791 | u8 ch[0x40]; | 2027 | u8 ch[0x40]; |
1792 | int i; | 2028 | int i; |
1793 | int encoder_type, output_id; | ||
1794 | u8 slave_addr; | ||
1795 | 2029 | ||
1796 | intel_output = kcalloc(sizeof(struct intel_output)+sizeof(struct intel_sdvo_priv), 1, GFP_KERNEL); | 2030 | intel_output = kcalloc(sizeof(struct intel_output)+sizeof(struct intel_sdvo_priv), 1, GFP_KERNEL); |
1797 | if (!intel_output) { | 2031 | if (!intel_output) { |
@@ -1799,29 +2033,24 @@ bool intel_sdvo_init(struct drm_device *dev, int output_device) | |||
1799 | } | 2033 | } |
1800 | 2034 | ||
1801 | sdvo_priv = (struct intel_sdvo_priv *)(intel_output + 1); | 2035 | sdvo_priv = (struct intel_sdvo_priv *)(intel_output + 1); |
2036 | sdvo_priv->output_device = output_device; | ||
2037 | |||
2038 | intel_output->dev_priv = sdvo_priv; | ||
1802 | intel_output->type = INTEL_OUTPUT_SDVO; | 2039 | intel_output->type = INTEL_OUTPUT_SDVO; |
1803 | 2040 | ||
1804 | /* setup the DDC bus. */ | 2041 | /* setup the DDC bus. */ |
1805 | if (output_device == SDVOB) | 2042 | if (output_device == SDVOB) |
1806 | i2cbus = intel_i2c_create(dev, GPIOE, "SDVOCTRL_E for SDVOB"); | 2043 | intel_output->i2c_bus = intel_i2c_create(dev, GPIOE, "SDVOCTRL_E for SDVOB"); |
1807 | else | 2044 | else |
1808 | i2cbus = intel_i2c_create(dev, GPIOE, "SDVOCTRL_E for SDVOC"); | 2045 | intel_output->i2c_bus = intel_i2c_create(dev, GPIOE, "SDVOCTRL_E for SDVOC"); |
1809 | 2046 | ||
1810 | if (!i2cbus) | 2047 | if (!intel_output->i2c_bus) |
1811 | goto err_inteloutput; | 2048 | goto err_inteloutput; |
1812 | 2049 | ||
1813 | slave_addr = intel_sdvo_get_slave_addr(dev, output_device); | 2050 | sdvo_priv->slave_addr = intel_sdvo_get_slave_addr(dev, output_device); |
1814 | sdvo_priv->i2c_bus = i2cbus; | ||
1815 | 2051 | ||
1816 | if (output_device == SDVOB) { | 2052 | /* Save the bit-banging i2c functionality for use by the DDC wrapper */ |
1817 | output_id = 1; | 2053 | intel_sdvo_i2c_bit_algo.functionality = intel_output->i2c_bus->algo->functionality; |
1818 | } else { | ||
1819 | output_id = 2; | ||
1820 | } | ||
1821 | sdvo_priv->i2c_bus->slave_addr = slave_addr >> 1; | ||
1822 | sdvo_priv->output_device = output_device; | ||
1823 | intel_output->i2c_bus = i2cbus; | ||
1824 | intel_output->dev_priv = sdvo_priv; | ||
1825 | 2054 | ||
1826 | /* Read the regs to test if we can talk to the device */ | 2055 | /* Read the regs to test if we can talk to the device */ |
1827 | for (i = 0; i < 0x40; i++) { | 2056 | for (i = 0; i < 0x40; i++) { |
@@ -1835,101 +2064,39 @@ bool intel_sdvo_init(struct drm_device *dev, int output_device) | |||
1835 | 2064 | ||
1836 | /* setup the DDC bus. */ | 2065 | /* setup the DDC bus. */ |
1837 | if (output_device == SDVOB) | 2066 | if (output_device == SDVOB) |
1838 | ddcbus = intel_i2c_create(dev, GPIOE, "SDVOB DDC BUS"); | 2067 | intel_output->ddc_bus = intel_i2c_create(dev, GPIOE, "SDVOB DDC BUS"); |
1839 | else | 2068 | else |
1840 | ddcbus = intel_i2c_create(dev, GPIOE, "SDVOC DDC BUS"); | 2069 | intel_output->ddc_bus = intel_i2c_create(dev, GPIOE, "SDVOC DDC BUS"); |
1841 | 2070 | ||
1842 | if (ddcbus == NULL) | 2071 | if (intel_output->ddc_bus == NULL) |
1843 | goto err_i2c; | 2072 | goto err_i2c; |
1844 | 2073 | ||
1845 | intel_sdvo_i2c_bit_algo.functionality = | 2074 | /* Wrap with our custom algo which switches to DDC mode */ |
1846 | intel_output->i2c_bus->adapter.algo->functionality; | 2075 | intel_output->ddc_bus->algo = &intel_sdvo_i2c_bit_algo; |
1847 | ddcbus->adapter.algo = &intel_sdvo_i2c_bit_algo; | ||
1848 | intel_output->ddc_bus = ddcbus; | ||
1849 | 2076 | ||
1850 | /* In defaut case sdvo lvds is false */ | 2077 | /* In defaut case sdvo lvds is false */ |
1851 | sdvo_priv->is_lvds = false; | ||
1852 | intel_sdvo_get_capabilities(intel_output, &sdvo_priv->caps); | 2078 | intel_sdvo_get_capabilities(intel_output, &sdvo_priv->caps); |
1853 | 2079 | ||
1854 | if (sdvo_priv->caps.output_flags & | 2080 | if (intel_sdvo_output_setup(intel_output, |
1855 | (SDVO_OUTPUT_TMDS0 | SDVO_OUTPUT_TMDS1)) { | 2081 | sdvo_priv->caps.output_flags) != true) { |
1856 | if (sdvo_priv->caps.output_flags & SDVO_OUTPUT_TMDS0) | 2082 | DRM_DEBUG("SDVO output failed to setup on SDVO%c\n", |
1857 | sdvo_priv->controlled_output = SDVO_OUTPUT_TMDS0; | 2083 | output_device == SDVOB ? 'B' : 'C'); |
1858 | else | ||
1859 | sdvo_priv->controlled_output = SDVO_OUTPUT_TMDS1; | ||
1860 | |||
1861 | encoder_type = DRM_MODE_ENCODER_TMDS; | ||
1862 | connector_type = DRM_MODE_CONNECTOR_DVID; | ||
1863 | |||
1864 | if (intel_sdvo_get_supp_encode(intel_output, | ||
1865 | &sdvo_priv->encode) && | ||
1866 | intel_sdvo_get_digital_encoding_mode(intel_output) && | ||
1867 | sdvo_priv->is_hdmi) { | ||
1868 | /* enable hdmi encoding mode if supported */ | ||
1869 | intel_sdvo_set_encode(intel_output, SDVO_ENCODE_HDMI); | ||
1870 | intel_sdvo_set_colorimetry(intel_output, | ||
1871 | SDVO_COLORIMETRY_RGB256); | ||
1872 | connector_type = DRM_MODE_CONNECTOR_HDMIA; | ||
1873 | } | ||
1874 | } | ||
1875 | else if (sdvo_priv->caps.output_flags & SDVO_OUTPUT_SVID0) | ||
1876 | { | ||
1877 | sdvo_priv->controlled_output = SDVO_OUTPUT_SVID0; | ||
1878 | encoder_type = DRM_MODE_ENCODER_TVDAC; | ||
1879 | connector_type = DRM_MODE_CONNECTOR_SVIDEO; | ||
1880 | sdvo_priv->is_tv = true; | ||
1881 | intel_output->needs_tv_clock = true; | ||
1882 | } | ||
1883 | else if (sdvo_priv->caps.output_flags & SDVO_OUTPUT_RGB0) | ||
1884 | { | ||
1885 | sdvo_priv->controlled_output = SDVO_OUTPUT_RGB0; | ||
1886 | encoder_type = DRM_MODE_ENCODER_DAC; | ||
1887 | connector_type = DRM_MODE_CONNECTOR_VGA; | ||
1888 | } | ||
1889 | else if (sdvo_priv->caps.output_flags & SDVO_OUTPUT_RGB1) | ||
1890 | { | ||
1891 | sdvo_priv->controlled_output = SDVO_OUTPUT_RGB1; | ||
1892 | encoder_type = DRM_MODE_ENCODER_DAC; | ||
1893 | connector_type = DRM_MODE_CONNECTOR_VGA; | ||
1894 | } | ||
1895 | else if (sdvo_priv->caps.output_flags & SDVO_OUTPUT_LVDS0) | ||
1896 | { | ||
1897 | sdvo_priv->controlled_output = SDVO_OUTPUT_LVDS0; | ||
1898 | encoder_type = DRM_MODE_ENCODER_LVDS; | ||
1899 | connector_type = DRM_MODE_CONNECTOR_LVDS; | ||
1900 | sdvo_priv->is_lvds = true; | ||
1901 | } | ||
1902 | else if (sdvo_priv->caps.output_flags & SDVO_OUTPUT_LVDS1) | ||
1903 | { | ||
1904 | sdvo_priv->controlled_output = SDVO_OUTPUT_LVDS1; | ||
1905 | encoder_type = DRM_MODE_ENCODER_LVDS; | ||
1906 | connector_type = DRM_MODE_CONNECTOR_LVDS; | ||
1907 | sdvo_priv->is_lvds = true; | ||
1908 | } | ||
1909 | else | ||
1910 | { | ||
1911 | unsigned char bytes[2]; | ||
1912 | |||
1913 | sdvo_priv->controlled_output = 0; | ||
1914 | memcpy (bytes, &sdvo_priv->caps.output_flags, 2); | ||
1915 | DRM_DEBUG_KMS(I915_SDVO, | ||
1916 | "%s: Unknown SDVO output type (0x%02x%02x)\n", | ||
1917 | SDVO_NAME(sdvo_priv), | ||
1918 | bytes[0], bytes[1]); | ||
1919 | encoder_type = DRM_MODE_ENCODER_NONE; | ||
1920 | connector_type = DRM_MODE_CONNECTOR_Unknown; | ||
1921 | goto err_i2c; | 2084 | goto err_i2c; |
1922 | } | 2085 | } |
1923 | 2086 | ||
2087 | |||
1924 | connector = &intel_output->base; | 2088 | connector = &intel_output->base; |
1925 | drm_connector_init(dev, connector, &intel_sdvo_connector_funcs, | 2089 | drm_connector_init(dev, connector, &intel_sdvo_connector_funcs, |
1926 | connector_type); | 2090 | connector->connector_type); |
2091 | |||
1927 | drm_connector_helper_add(connector, &intel_sdvo_connector_helper_funcs); | 2092 | drm_connector_helper_add(connector, &intel_sdvo_connector_helper_funcs); |
1928 | connector->interlace_allowed = 0; | 2093 | connector->interlace_allowed = 0; |
1929 | connector->doublescan_allowed = 0; | 2094 | connector->doublescan_allowed = 0; |
1930 | connector->display_info.subpixel_order = SubPixelHorizontalRGB; | 2095 | connector->display_info.subpixel_order = SubPixelHorizontalRGB; |
1931 | 2096 | ||
1932 | drm_encoder_init(dev, &intel_output->enc, &intel_sdvo_enc_funcs, encoder_type); | 2097 | drm_encoder_init(dev, &intel_output->enc, |
2098 | &intel_sdvo_enc_funcs, intel_output->enc.encoder_type); | ||
2099 | |||
1933 | drm_encoder_helper_add(&intel_output->enc, &intel_sdvo_helper_funcs); | 2100 | drm_encoder_helper_add(&intel_output->enc, &intel_sdvo_helper_funcs); |
1934 | 2101 | ||
1935 | drm_mode_connector_attach_encoder(&intel_output->base, &intel_output->enc); | 2102 | drm_mode_connector_attach_encoder(&intel_output->base, &intel_output->enc); |
@@ -1965,9 +2132,10 @@ bool intel_sdvo_init(struct drm_device *dev, int output_device) | |||
1965 | return true; | 2132 | return true; |
1966 | 2133 | ||
1967 | err_i2c: | 2134 | err_i2c: |
1968 | if (ddcbus != NULL) | 2135 | if (intel_output->ddc_bus != NULL) |
1969 | intel_i2c_destroy(intel_output->ddc_bus); | 2136 | intel_i2c_destroy(intel_output->ddc_bus); |
1970 | intel_i2c_destroy(intel_output->i2c_bus); | 2137 | if (intel_output->i2c_bus != NULL) |
2138 | intel_i2c_destroy(intel_output->i2c_bus); | ||
1971 | err_inteloutput: | 2139 | err_inteloutput: |
1972 | kfree(intel_output); | 2140 | kfree(intel_output); |
1973 | 2141 | ||
diff --git a/drivers/gpu/drm/i915/intel_sdvo_regs.h b/drivers/gpu/drm/i915/intel_sdvo_regs.h index 193938b7d7f9..ba5cdf8ae40b 100644 --- a/drivers/gpu/drm/i915/intel_sdvo_regs.h +++ b/drivers/gpu/drm/i915/intel_sdvo_regs.h | |||
@@ -715,6 +715,7 @@ struct intel_sdvo_enhancements_arg { | |||
715 | #define SDVO_HBUF_TX_ONCE (2 << 6) | 715 | #define SDVO_HBUF_TX_ONCE (2 << 6) |
716 | #define SDVO_HBUF_TX_VSYNC (3 << 6) | 716 | #define SDVO_HBUF_TX_VSYNC (3 << 6) |
717 | #define SDVO_CMD_GET_AUDIO_TX_INFO 0x9c | 717 | #define SDVO_CMD_GET_AUDIO_TX_INFO 0x9c |
718 | #define SDVO_NEED_TO_STALL (1 << 7) | ||
718 | 719 | ||
719 | struct intel_sdvo_encode{ | 720 | struct intel_sdvo_encode{ |
720 | u8 dvi_rev; | 721 | u8 dvi_rev; |
diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c index ea68992e4416..da4ab4dc1630 100644 --- a/drivers/gpu/drm/i915/intel_tv.c +++ b/drivers/gpu/drm/i915/intel_tv.c | |||
@@ -1383,34 +1383,31 @@ intel_tv_detect_type (struct drm_crtc *crtc, struct intel_output *intel_output) | |||
1383 | /* | 1383 | /* |
1384 | * Detect TV by polling) | 1384 | * Detect TV by polling) |
1385 | */ | 1385 | */ |
1386 | if (intel_output->load_detect_temp) { | 1386 | save_tv_dac = tv_dac; |
1387 | /* TV not currently running, prod it with destructive detect */ | 1387 | tv_ctl = I915_READ(TV_CTL); |
1388 | save_tv_dac = tv_dac; | 1388 | save_tv_ctl = tv_ctl; |
1389 | tv_ctl = I915_READ(TV_CTL); | 1389 | tv_ctl &= ~TV_ENC_ENABLE; |
1390 | save_tv_ctl = tv_ctl; | 1390 | tv_ctl &= ~TV_TEST_MODE_MASK; |
1391 | tv_ctl &= ~TV_ENC_ENABLE; | 1391 | tv_ctl |= TV_TEST_MODE_MONITOR_DETECT; |
1392 | tv_ctl &= ~TV_TEST_MODE_MASK; | 1392 | tv_dac &= ~TVDAC_SENSE_MASK; |
1393 | tv_ctl |= TV_TEST_MODE_MONITOR_DETECT; | 1393 | tv_dac &= ~DAC_A_MASK; |
1394 | tv_dac &= ~TVDAC_SENSE_MASK; | 1394 | tv_dac &= ~DAC_B_MASK; |
1395 | tv_dac &= ~DAC_A_MASK; | 1395 | tv_dac &= ~DAC_C_MASK; |
1396 | tv_dac &= ~DAC_B_MASK; | 1396 | tv_dac |= (TVDAC_STATE_CHG_EN | |
1397 | tv_dac &= ~DAC_C_MASK; | 1397 | TVDAC_A_SENSE_CTL | |
1398 | tv_dac |= (TVDAC_STATE_CHG_EN | | 1398 | TVDAC_B_SENSE_CTL | |
1399 | TVDAC_A_SENSE_CTL | | 1399 | TVDAC_C_SENSE_CTL | |
1400 | TVDAC_B_SENSE_CTL | | 1400 | DAC_CTL_OVERRIDE | |
1401 | TVDAC_C_SENSE_CTL | | 1401 | DAC_A_0_7_V | |
1402 | DAC_CTL_OVERRIDE | | 1402 | DAC_B_0_7_V | |
1403 | DAC_A_0_7_V | | 1403 | DAC_C_0_7_V); |
1404 | DAC_B_0_7_V | | 1404 | I915_WRITE(TV_CTL, tv_ctl); |
1405 | DAC_C_0_7_V); | 1405 | I915_WRITE(TV_DAC, tv_dac); |
1406 | I915_WRITE(TV_CTL, tv_ctl); | 1406 | intel_wait_for_vblank(dev); |
1407 | I915_WRITE(TV_DAC, tv_dac); | 1407 | tv_dac = I915_READ(TV_DAC); |
1408 | intel_wait_for_vblank(dev); | 1408 | I915_WRITE(TV_DAC, save_tv_dac); |
1409 | tv_dac = I915_READ(TV_DAC); | 1409 | I915_WRITE(TV_CTL, save_tv_ctl); |
1410 | I915_WRITE(TV_DAC, save_tv_dac); | 1410 | intel_wait_for_vblank(dev); |
1411 | I915_WRITE(TV_CTL, save_tv_ctl); | ||
1412 | intel_wait_for_vblank(dev); | ||
1413 | } | ||
1414 | /* | 1411 | /* |
1415 | * A B C | 1412 | * A B C |
1416 | * 0 1 1 Composite | 1413 | * 0 1 1 Composite |
@@ -1493,6 +1490,27 @@ static struct input_res { | |||
1493 | {"1920x1080", 1920, 1080}, | 1490 | {"1920x1080", 1920, 1080}, |
1494 | }; | 1491 | }; |
1495 | 1492 | ||
1493 | /* | ||
1494 | * Chose preferred mode according to line number of TV format | ||
1495 | */ | ||
1496 | static void | ||
1497 | intel_tv_chose_preferred_modes(struct drm_connector *connector, | ||
1498 | struct drm_display_mode *mode_ptr) | ||
1499 | { | ||
1500 | struct intel_output *intel_output = to_intel_output(connector); | ||
1501 | const struct tv_mode *tv_mode = intel_tv_mode_find(intel_output); | ||
1502 | |||
1503 | if (tv_mode->nbr_end < 480 && mode_ptr->vdisplay == 480) | ||
1504 | mode_ptr->type |= DRM_MODE_TYPE_PREFERRED; | ||
1505 | else if (tv_mode->nbr_end > 480) { | ||
1506 | if (tv_mode->progressive == true && tv_mode->nbr_end < 720) { | ||
1507 | if (mode_ptr->vdisplay == 720) | ||
1508 | mode_ptr->type |= DRM_MODE_TYPE_PREFERRED; | ||
1509 | } else if (mode_ptr->vdisplay == 1080) | ||
1510 | mode_ptr->type |= DRM_MODE_TYPE_PREFERRED; | ||
1511 | } | ||
1512 | } | ||
1513 | |||
1496 | /** | 1514 | /** |
1497 | * Stub get_modes function. | 1515 | * Stub get_modes function. |
1498 | * | 1516 | * |
@@ -1547,6 +1565,7 @@ intel_tv_get_modes(struct drm_connector *connector) | |||
1547 | mode_ptr->clock = (int) tmp; | 1565 | mode_ptr->clock = (int) tmp; |
1548 | 1566 | ||
1549 | mode_ptr->type = DRM_MODE_TYPE_DRIVER; | 1567 | mode_ptr->type = DRM_MODE_TYPE_DRIVER; |
1568 | intel_tv_chose_preferred_modes(connector, mode_ptr); | ||
1550 | drm_mode_probed_add(connector, mode_ptr); | 1569 | drm_mode_probed_add(connector, mode_ptr); |
1551 | count++; | 1570 | count++; |
1552 | } | 1571 | } |
diff --git a/drivers/gpu/drm/radeon/Makefile b/drivers/gpu/drm/radeon/Makefile index 5fae1e074b4b..013d38059943 100644 --- a/drivers/gpu/drm/radeon/Makefile +++ b/drivers/gpu/drm/radeon/Makefile | |||
@@ -13,7 +13,8 @@ radeon-$(CONFIG_DRM_RADEON_KMS) += radeon_device.o radeon_kms.o \ | |||
13 | radeon_encoders.o radeon_display.o radeon_cursor.o radeon_i2c.o \ | 13 | radeon_encoders.o radeon_display.o radeon_cursor.o radeon_i2c.o \ |
14 | radeon_clocks.o radeon_fb.o radeon_gem.o radeon_ring.o radeon_irq_kms.o \ | 14 | radeon_clocks.o radeon_fb.o radeon_gem.o radeon_ring.o radeon_irq_kms.o \ |
15 | radeon_cs.o radeon_bios.o radeon_benchmark.o r100.o r300.o r420.o \ | 15 | radeon_cs.o radeon_bios.o radeon_benchmark.o r100.o r300.o r420.o \ |
16 | rs400.o rs600.o rs690.o rv515.o r520.o r600.o rs780.o rv770.o | 16 | rs400.o rs600.o rs690.o rv515.o r520.o r600.o rs780.o rv770.o \ |
17 | radeon_test.o | ||
17 | 18 | ||
18 | radeon-$(CONFIG_COMPAT) += radeon_ioc32.o | 19 | radeon-$(CONFIG_COMPAT) += radeon_ioc32.o |
19 | 20 | ||
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c index c0080cc9bf8d..74d034f77c6b 100644 --- a/drivers/gpu/drm/radeon/atombios_crtc.c +++ b/drivers/gpu/drm/radeon/atombios_crtc.c | |||
@@ -31,6 +31,132 @@ | |||
31 | #include "atom.h" | 31 | #include "atom.h" |
32 | #include "atom-bits.h" | 32 | #include "atom-bits.h" |
33 | 33 | ||
34 | static void atombios_overscan_setup(struct drm_crtc *crtc, | ||
35 | struct drm_display_mode *mode, | ||
36 | struct drm_display_mode *adjusted_mode) | ||
37 | { | ||
38 | struct drm_device *dev = crtc->dev; | ||
39 | struct radeon_device *rdev = dev->dev_private; | ||
40 | struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); | ||
41 | SET_CRTC_OVERSCAN_PS_ALLOCATION args; | ||
42 | int index = GetIndexIntoMasterTable(COMMAND, SetCRTC_OverScan); | ||
43 | int a1, a2; | ||
44 | |||
45 | memset(&args, 0, sizeof(args)); | ||
46 | |||
47 | args.usOverscanRight = 0; | ||
48 | args.usOverscanLeft = 0; | ||
49 | args.usOverscanBottom = 0; | ||
50 | args.usOverscanTop = 0; | ||
51 | args.ucCRTC = radeon_crtc->crtc_id; | ||
52 | |||
53 | switch (radeon_crtc->rmx_type) { | ||
54 | case RMX_CENTER: | ||
55 | args.usOverscanTop = (adjusted_mode->crtc_vdisplay - mode->crtc_vdisplay) / 2; | ||
56 | args.usOverscanBottom = (adjusted_mode->crtc_vdisplay - mode->crtc_vdisplay) / 2; | ||
57 | args.usOverscanLeft = (adjusted_mode->crtc_hdisplay - mode->crtc_hdisplay) / 2; | ||
58 | args.usOverscanRight = (adjusted_mode->crtc_hdisplay - mode->crtc_hdisplay) / 2; | ||
59 | atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); | ||
60 | break; | ||
61 | case RMX_ASPECT: | ||
62 | a1 = mode->crtc_vdisplay * adjusted_mode->crtc_hdisplay; | ||
63 | a2 = adjusted_mode->crtc_vdisplay * mode->crtc_hdisplay; | ||
64 | |||
65 | if (a1 > a2) { | ||
66 | args.usOverscanLeft = (adjusted_mode->crtc_hdisplay - (a2 / mode->crtc_vdisplay)) / 2; | ||
67 | args.usOverscanRight = (adjusted_mode->crtc_hdisplay - (a2 / mode->crtc_vdisplay)) / 2; | ||
68 | } else if (a2 > a1) { | ||
69 | args.usOverscanLeft = (adjusted_mode->crtc_vdisplay - (a1 / mode->crtc_hdisplay)) / 2; | ||
70 | args.usOverscanRight = (adjusted_mode->crtc_vdisplay - (a1 / mode->crtc_hdisplay)) / 2; | ||
71 | } | ||
72 | atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); | ||
73 | break; | ||
74 | case RMX_FULL: | ||
75 | default: | ||
76 | args.usOverscanRight = 0; | ||
77 | args.usOverscanLeft = 0; | ||
78 | args.usOverscanBottom = 0; | ||
79 | args.usOverscanTop = 0; | ||
80 | atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); | ||
81 | break; | ||
82 | } | ||
83 | } | ||
84 | |||
85 | static void atombios_scaler_setup(struct drm_crtc *crtc) | ||
86 | { | ||
87 | struct drm_device *dev = crtc->dev; | ||
88 | struct radeon_device *rdev = dev->dev_private; | ||
89 | struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); | ||
90 | ENABLE_SCALER_PS_ALLOCATION args; | ||
91 | int index = GetIndexIntoMasterTable(COMMAND, EnableScaler); | ||
92 | /* fixme - fill in enc_priv for atom dac */ | ||
93 | enum radeon_tv_std tv_std = TV_STD_NTSC; | ||
94 | |||
95 | if (!ASIC_IS_AVIVO(rdev) && radeon_crtc->crtc_id) | ||
96 | return; | ||
97 | |||
98 | memset(&args, 0, sizeof(args)); | ||
99 | |||
100 | args.ucScaler = radeon_crtc->crtc_id; | ||
101 | |||
102 | if (radeon_crtc->devices & (ATOM_DEVICE_TV_SUPPORT)) { | ||
103 | switch (tv_std) { | ||
104 | case TV_STD_NTSC: | ||
105 | default: | ||
106 | args.ucTVStandard = ATOM_TV_NTSC; | ||
107 | break; | ||
108 | case TV_STD_PAL: | ||
109 | args.ucTVStandard = ATOM_TV_PAL; | ||
110 | break; | ||
111 | case TV_STD_PAL_M: | ||
112 | args.ucTVStandard = ATOM_TV_PALM; | ||
113 | break; | ||
114 | case TV_STD_PAL_60: | ||
115 | args.ucTVStandard = ATOM_TV_PAL60; | ||
116 | break; | ||
117 | case TV_STD_NTSC_J: | ||
118 | args.ucTVStandard = ATOM_TV_NTSCJ; | ||
119 | break; | ||
120 | case TV_STD_SCART_PAL: | ||
121 | args.ucTVStandard = ATOM_TV_PAL; /* ??? */ | ||
122 | break; | ||
123 | case TV_STD_SECAM: | ||
124 | args.ucTVStandard = ATOM_TV_SECAM; | ||
125 | break; | ||
126 | case TV_STD_PAL_CN: | ||
127 | args.ucTVStandard = ATOM_TV_PALCN; | ||
128 | break; | ||
129 | } | ||
130 | args.ucEnable = SCALER_ENABLE_MULTITAP_MODE; | ||
131 | } else if (radeon_crtc->devices & (ATOM_DEVICE_CV_SUPPORT)) { | ||
132 | args.ucTVStandard = ATOM_TV_CV; | ||
133 | args.ucEnable = SCALER_ENABLE_MULTITAP_MODE; | ||
134 | } else { | ||
135 | switch (radeon_crtc->rmx_type) { | ||
136 | case RMX_FULL: | ||
137 | args.ucEnable = ATOM_SCALER_EXPANSION; | ||
138 | break; | ||
139 | case RMX_CENTER: | ||
140 | args.ucEnable = ATOM_SCALER_CENTER; | ||
141 | break; | ||
142 | case RMX_ASPECT: | ||
143 | args.ucEnable = ATOM_SCALER_EXPANSION; | ||
144 | break; | ||
145 | default: | ||
146 | if (ASIC_IS_AVIVO(rdev)) | ||
147 | args.ucEnable = ATOM_SCALER_DISABLE; | ||
148 | else | ||
149 | args.ucEnable = ATOM_SCALER_CENTER; | ||
150 | break; | ||
151 | } | ||
152 | } | ||
153 | atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); | ||
154 | if (radeon_crtc->devices & (ATOM_DEVICE_CV_SUPPORT | ATOM_DEVICE_TV_SUPPORT) | ||
155 | && rdev->family >= CHIP_RV515 && rdev->family <= CHIP_RV570) { | ||
156 | atom_rv515_force_tv_scaler(rdev); | ||
157 | } | ||
158 | } | ||
159 | |||
34 | static void atombios_lock_crtc(struct drm_crtc *crtc, int lock) | 160 | static void atombios_lock_crtc(struct drm_crtc *crtc, int lock) |
35 | { | 161 | { |
36 | struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); | 162 | struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); |
@@ -203,6 +329,12 @@ void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode) | |||
203 | if (ASIC_IS_AVIVO(rdev)) { | 329 | if (ASIC_IS_AVIVO(rdev)) { |
204 | uint32_t ss_cntl; | 330 | uint32_t ss_cntl; |
205 | 331 | ||
332 | if ((rdev->family == CHIP_RS600) || | ||
333 | (rdev->family == CHIP_RS690) || | ||
334 | (rdev->family == CHIP_RS740)) | ||
335 | pll_flags |= (RADEON_PLL_USE_FRAC_FB_DIV | | ||
336 | RADEON_PLL_PREFER_CLOSEST_LOWER); | ||
337 | |||
206 | if (ASIC_IS_DCE32(rdev) && mode->clock > 200000) /* range limits??? */ | 338 | if (ASIC_IS_DCE32(rdev) && mode->clock > 200000) /* range limits??? */ |
207 | pll_flags |= RADEON_PLL_PREFER_HIGH_FB_DIV; | 339 | pll_flags |= RADEON_PLL_PREFER_HIGH_FB_DIV; |
208 | else | 340 | else |
@@ -321,7 +453,7 @@ int atombios_crtc_set_base(struct drm_crtc *crtc, int x, int y, | |||
321 | struct drm_gem_object *obj; | 453 | struct drm_gem_object *obj; |
322 | struct drm_radeon_gem_object *obj_priv; | 454 | struct drm_radeon_gem_object *obj_priv; |
323 | uint64_t fb_location; | 455 | uint64_t fb_location; |
324 | uint32_t fb_format, fb_pitch_pixels; | 456 | uint32_t fb_format, fb_pitch_pixels, tiling_flags; |
325 | 457 | ||
326 | if (!crtc->fb) | 458 | if (!crtc->fb) |
327 | return -EINVAL; | 459 | return -EINVAL; |
@@ -358,7 +490,14 @@ int atombios_crtc_set_base(struct drm_crtc *crtc, int x, int y, | |||
358 | return -EINVAL; | 490 | return -EINVAL; |
359 | } | 491 | } |
360 | 492 | ||
361 | /* TODO tiling */ | 493 | radeon_object_get_tiling_flags(obj->driver_private, |
494 | &tiling_flags, NULL); | ||
495 | if (tiling_flags & RADEON_TILING_MACRO) | ||
496 | fb_format |= AVIVO_D1GRPH_MACRO_ADDRESS_MODE; | ||
497 | |||
498 | if (tiling_flags & RADEON_TILING_MICRO) | ||
499 | fb_format |= AVIVO_D1GRPH_TILED; | ||
500 | |||
362 | if (radeon_crtc->crtc_id == 0) | 501 | if (radeon_crtc->crtc_id == 0) |
363 | WREG32(AVIVO_D1VGA_CONTROL, 0); | 502 | WREG32(AVIVO_D1VGA_CONTROL, 0); |
364 | else | 503 | else |
@@ -509,6 +648,9 @@ int atombios_crtc_mode_set(struct drm_crtc *crtc, | |||
509 | radeon_crtc_set_base(crtc, x, y, old_fb); | 648 | radeon_crtc_set_base(crtc, x, y, old_fb); |
510 | radeon_legacy_atom_set_surface(crtc); | 649 | radeon_legacy_atom_set_surface(crtc); |
511 | } | 650 | } |
651 | atombios_overscan_setup(crtc, mode, adjusted_mode); | ||
652 | atombios_scaler_setup(crtc); | ||
653 | radeon_bandwidth_update(rdev); | ||
512 | return 0; | 654 | return 0; |
513 | } | 655 | } |
514 | 656 | ||
@@ -516,6 +658,8 @@ static bool atombios_crtc_mode_fixup(struct drm_crtc *crtc, | |||
516 | struct drm_display_mode *mode, | 658 | struct drm_display_mode *mode, |
517 | struct drm_display_mode *adjusted_mode) | 659 | struct drm_display_mode *adjusted_mode) |
518 | { | 660 | { |
661 | if (!radeon_crtc_scaling_mode_fixup(crtc, mode, adjusted_mode)) | ||
662 | return false; | ||
519 | return true; | 663 | return true; |
520 | } | 664 | } |
521 | 665 | ||
@@ -548,148 +692,3 @@ void radeon_atombios_init_crtc(struct drm_device *dev, | |||
548 | AVIVO_D2CRTC_H_TOTAL - AVIVO_D1CRTC_H_TOTAL; | 692 | AVIVO_D2CRTC_H_TOTAL - AVIVO_D1CRTC_H_TOTAL; |
549 | drm_crtc_helper_add(&radeon_crtc->base, &atombios_helper_funcs); | 693 | drm_crtc_helper_add(&radeon_crtc->base, &atombios_helper_funcs); |
550 | } | 694 | } |
551 | |||
552 | void radeon_init_disp_bw_avivo(struct drm_device *dev, | ||
553 | struct drm_display_mode *mode1, | ||
554 | uint32_t pixel_bytes1, | ||
555 | struct drm_display_mode *mode2, | ||
556 | uint32_t pixel_bytes2) | ||
557 | { | ||
558 | struct radeon_device *rdev = dev->dev_private; | ||
559 | fixed20_12 min_mem_eff; | ||
560 | fixed20_12 peak_disp_bw, mem_bw, pix_clk, pix_clk2, temp_ff; | ||
561 | fixed20_12 sclk_ff, mclk_ff; | ||
562 | uint32_t dc_lb_memory_split, temp; | ||
563 | |||
564 | min_mem_eff.full = rfixed_const_8(0); | ||
565 | if (rdev->disp_priority == 2) { | ||
566 | uint32_t mc_init_misc_lat_timer = 0; | ||
567 | if (rdev->family == CHIP_RV515) | ||
568 | mc_init_misc_lat_timer = | ||
569 | RREG32_MC(RV515_MC_INIT_MISC_LAT_TIMER); | ||
570 | else if (rdev->family == CHIP_RS690) | ||
571 | mc_init_misc_lat_timer = | ||
572 | RREG32_MC(RS690_MC_INIT_MISC_LAT_TIMER); | ||
573 | |||
574 | mc_init_misc_lat_timer &= | ||
575 | ~(R300_MC_DISP1R_INIT_LAT_MASK << | ||
576 | R300_MC_DISP1R_INIT_LAT_SHIFT); | ||
577 | mc_init_misc_lat_timer &= | ||
578 | ~(R300_MC_DISP0R_INIT_LAT_MASK << | ||
579 | R300_MC_DISP0R_INIT_LAT_SHIFT); | ||
580 | |||
581 | if (mode2) | ||
582 | mc_init_misc_lat_timer |= | ||
583 | (1 << R300_MC_DISP1R_INIT_LAT_SHIFT); | ||
584 | if (mode1) | ||
585 | mc_init_misc_lat_timer |= | ||
586 | (1 << R300_MC_DISP0R_INIT_LAT_SHIFT); | ||
587 | |||
588 | if (rdev->family == CHIP_RV515) | ||
589 | WREG32_MC(RV515_MC_INIT_MISC_LAT_TIMER, | ||
590 | mc_init_misc_lat_timer); | ||
591 | else if (rdev->family == CHIP_RS690) | ||
592 | WREG32_MC(RS690_MC_INIT_MISC_LAT_TIMER, | ||
593 | mc_init_misc_lat_timer); | ||
594 | } | ||
595 | |||
596 | /* | ||
597 | * determine is there is enough bw for current mode | ||
598 | */ | ||
599 | temp_ff.full = rfixed_const(100); | ||
600 | mclk_ff.full = rfixed_const(rdev->clock.default_mclk); | ||
601 | mclk_ff.full = rfixed_div(mclk_ff, temp_ff); | ||
602 | sclk_ff.full = rfixed_const(rdev->clock.default_sclk); | ||
603 | sclk_ff.full = rfixed_div(sclk_ff, temp_ff); | ||
604 | |||
605 | temp = (rdev->mc.vram_width / 8) * (rdev->mc.vram_is_ddr ? 2 : 1); | ||
606 | temp_ff.full = rfixed_const(temp); | ||
607 | mem_bw.full = rfixed_mul(mclk_ff, temp_ff); | ||
608 | mem_bw.full = rfixed_mul(mem_bw, min_mem_eff); | ||
609 | |||
610 | pix_clk.full = 0; | ||
611 | pix_clk2.full = 0; | ||
612 | peak_disp_bw.full = 0; | ||
613 | if (mode1) { | ||
614 | temp_ff.full = rfixed_const(1000); | ||
615 | pix_clk.full = rfixed_const(mode1->clock); /* convert to fixed point */ | ||
616 | pix_clk.full = rfixed_div(pix_clk, temp_ff); | ||
617 | temp_ff.full = rfixed_const(pixel_bytes1); | ||
618 | peak_disp_bw.full += rfixed_mul(pix_clk, temp_ff); | ||
619 | } | ||
620 | if (mode2) { | ||
621 | temp_ff.full = rfixed_const(1000); | ||
622 | pix_clk2.full = rfixed_const(mode2->clock); /* convert to fixed point */ | ||
623 | pix_clk2.full = rfixed_div(pix_clk2, temp_ff); | ||
624 | temp_ff.full = rfixed_const(pixel_bytes2); | ||
625 | peak_disp_bw.full += rfixed_mul(pix_clk2, temp_ff); | ||
626 | } | ||
627 | |||
628 | if (peak_disp_bw.full >= mem_bw.full) { | ||
629 | DRM_ERROR | ||
630 | ("You may not have enough display bandwidth for current mode\n" | ||
631 | "If you have flickering problem, try to lower resolution, refresh rate, or color depth\n"); | ||
632 | printk("peak disp bw %d, mem_bw %d\n", | ||
633 | rfixed_trunc(peak_disp_bw), rfixed_trunc(mem_bw)); | ||
634 | } | ||
635 | |||
636 | /* | ||
637 | * Line Buffer Setup | ||
638 | * There is a single line buffer shared by both display controllers. | ||
639 | * DC_LB_MEMORY_SPLIT controls how that line buffer is shared between the display | ||
640 | * controllers. The paritioning can either be done manually or via one of four | ||
641 | * preset allocations specified in bits 1:0: | ||
642 | * 0 - line buffer is divided in half and shared between each display controller | ||
643 | * 1 - D1 gets 3/4 of the line buffer, D2 gets 1/4 | ||
644 | * 2 - D1 gets the whole buffer | ||
645 | * 3 - D1 gets 1/4 of the line buffer, D2 gets 3/4 | ||
646 | * Setting bit 2 of DC_LB_MEMORY_SPLIT controls switches to manual allocation mode. | ||
647 | * In manual allocation mode, D1 always starts at 0, D1 end/2 is specified in bits | ||
648 | * 14:4; D2 allocation follows D1. | ||
649 | */ | ||
650 | |||
651 | /* is auto or manual better ? */ | ||
652 | dc_lb_memory_split = | ||
653 | RREG32(AVIVO_DC_LB_MEMORY_SPLIT) & ~AVIVO_DC_LB_MEMORY_SPLIT_MASK; | ||
654 | dc_lb_memory_split &= ~AVIVO_DC_LB_MEMORY_SPLIT_SHIFT_MODE; | ||
655 | #if 1 | ||
656 | /* auto */ | ||
657 | if (mode1 && mode2) { | ||
658 | if (mode1->hdisplay > mode2->hdisplay) { | ||
659 | if (mode1->hdisplay > 2560) | ||
660 | dc_lb_memory_split |= | ||
661 | AVIVO_DC_LB_MEMORY_SPLIT_D1_3Q_D2_1Q; | ||
662 | else | ||
663 | dc_lb_memory_split |= | ||
664 | AVIVO_DC_LB_MEMORY_SPLIT_D1HALF_D2HALF; | ||
665 | } else if (mode2->hdisplay > mode1->hdisplay) { | ||
666 | if (mode2->hdisplay > 2560) | ||
667 | dc_lb_memory_split |= | ||
668 | AVIVO_DC_LB_MEMORY_SPLIT_D1_1Q_D2_3Q; | ||
669 | else | ||
670 | dc_lb_memory_split |= | ||
671 | AVIVO_DC_LB_MEMORY_SPLIT_D1HALF_D2HALF; | ||
672 | } else | ||
673 | dc_lb_memory_split |= | ||
674 | AVIVO_DC_LB_MEMORY_SPLIT_D1HALF_D2HALF; | ||
675 | } else if (mode1) { | ||
676 | dc_lb_memory_split |= AVIVO_DC_LB_MEMORY_SPLIT_D1_ONLY; | ||
677 | } else if (mode2) { | ||
678 | dc_lb_memory_split |= AVIVO_DC_LB_MEMORY_SPLIT_D1_1Q_D2_3Q; | ||
679 | } | ||
680 | #else | ||
681 | /* manual */ | ||
682 | dc_lb_memory_split |= AVIVO_DC_LB_MEMORY_SPLIT_SHIFT_MODE; | ||
683 | dc_lb_memory_split &= | ||
684 | ~(AVIVO_DC_LB_DISP1_END_ADR_MASK << | ||
685 | AVIVO_DC_LB_DISP1_END_ADR_SHIFT); | ||
686 | if (mode1) { | ||
687 | dc_lb_memory_split |= | ||
688 | ((((mode1->hdisplay / 2) + 64) & AVIVO_DC_LB_DISP1_END_ADR_MASK) | ||
689 | << AVIVO_DC_LB_DISP1_END_ADR_SHIFT); | ||
690 | } else if (mode2) { | ||
691 | dc_lb_memory_split |= (0 << AVIVO_DC_LB_DISP1_END_ADR_SHIFT); | ||
692 | } | ||
693 | #endif | ||
694 | WREG32(AVIVO_DC_LB_MEMORY_SPLIT, dc_lb_memory_split); | ||
695 | } | ||
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c index c550932a108f..f1ba8ff41130 100644 --- a/drivers/gpu/drm/radeon/r100.c +++ b/drivers/gpu/drm/radeon/r100.c | |||
@@ -110,7 +110,7 @@ int r100_pci_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr) | |||
110 | if (i < 0 || i > rdev->gart.num_gpu_pages) { | 110 | if (i < 0 || i > rdev->gart.num_gpu_pages) { |
111 | return -EINVAL; | 111 | return -EINVAL; |
112 | } | 112 | } |
113 | rdev->gart.table.ram.ptr[i] = cpu_to_le32((uint32_t)addr); | 113 | rdev->gart.table.ram.ptr[i] = cpu_to_le32(lower_32_bits(addr)); |
114 | return 0; | 114 | return 0; |
115 | } | 115 | } |
116 | 116 | ||
@@ -173,8 +173,12 @@ void r100_mc_setup(struct radeon_device *rdev) | |||
173 | DRM_ERROR("Failed to register debugfs file for R100 MC !\n"); | 173 | DRM_ERROR("Failed to register debugfs file for R100 MC !\n"); |
174 | } | 174 | } |
175 | /* Write VRAM size in case we are limiting it */ | 175 | /* Write VRAM size in case we are limiting it */ |
176 | WREG32(RADEON_CONFIG_MEMSIZE, rdev->mc.vram_size); | 176 | WREG32(RADEON_CONFIG_MEMSIZE, rdev->mc.real_vram_size); |
177 | tmp = rdev->mc.vram_location + rdev->mc.vram_size - 1; | 177 | /* Novell bug 204882 for RN50/M6/M7 with 8/16/32MB VRAM, |
178 | * if the aperture is 64MB but we have 32MB VRAM | ||
179 | * we report only 32MB VRAM but we have to set MC_FB_LOCATION | ||
180 | * to 64MB, otherwise the gpu accidentially dies */ | ||
181 | tmp = rdev->mc.vram_location + rdev->mc.mc_vram_size - 1; | ||
178 | tmp = REG_SET(RADEON_MC_FB_TOP, tmp >> 16); | 182 | tmp = REG_SET(RADEON_MC_FB_TOP, tmp >> 16); |
179 | tmp |= REG_SET(RADEON_MC_FB_START, rdev->mc.vram_location >> 16); | 183 | tmp |= REG_SET(RADEON_MC_FB_START, rdev->mc.vram_location >> 16); |
180 | WREG32(RADEON_MC_FB_LOCATION, tmp); | 184 | WREG32(RADEON_MC_FB_LOCATION, tmp); |
@@ -215,7 +219,6 @@ int r100_mc_init(struct radeon_device *rdev) | |||
215 | r100_pci_gart_disable(rdev); | 219 | r100_pci_gart_disable(rdev); |
216 | 220 | ||
217 | /* Setup GPU memory space */ | 221 | /* Setup GPU memory space */ |
218 | rdev->mc.vram_location = 0xFFFFFFFFUL; | ||
219 | rdev->mc.gtt_location = 0xFFFFFFFFUL; | 222 | rdev->mc.gtt_location = 0xFFFFFFFFUL; |
220 | if (rdev->flags & RADEON_IS_AGP) { | 223 | if (rdev->flags & RADEON_IS_AGP) { |
221 | r = radeon_agp_init(rdev); | 224 | r = radeon_agp_init(rdev); |
@@ -719,13 +722,14 @@ int r100_cs_packet_parse(struct radeon_cs_parser *p, | |||
719 | unsigned idx) | 722 | unsigned idx) |
720 | { | 723 | { |
721 | struct radeon_cs_chunk *ib_chunk = &p->chunks[p->chunk_ib_idx]; | 724 | struct radeon_cs_chunk *ib_chunk = &p->chunks[p->chunk_ib_idx]; |
722 | uint32_t header = ib_chunk->kdata[idx]; | 725 | uint32_t header; |
723 | 726 | ||
724 | if (idx >= ib_chunk->length_dw) { | 727 | if (idx >= ib_chunk->length_dw) { |
725 | DRM_ERROR("Can not parse packet at %d after CS end %d !\n", | 728 | DRM_ERROR("Can not parse packet at %d after CS end %d !\n", |
726 | idx, ib_chunk->length_dw); | 729 | idx, ib_chunk->length_dw); |
727 | return -EINVAL; | 730 | return -EINVAL; |
728 | } | 731 | } |
732 | header = ib_chunk->kdata[idx]; | ||
729 | pkt->idx = idx; | 733 | pkt->idx = idx; |
730 | pkt->type = CP_PACKET_GET_TYPE(header); | 734 | pkt->type = CP_PACKET_GET_TYPE(header); |
731 | pkt->count = CP_PACKET_GET_COUNT(header); | 735 | pkt->count = CP_PACKET_GET_COUNT(header); |
@@ -753,6 +757,102 @@ int r100_cs_packet_parse(struct radeon_cs_parser *p, | |||
753 | } | 757 | } |
754 | 758 | ||
755 | /** | 759 | /** |
760 | * r100_cs_packet_next_vline() - parse userspace VLINE packet | ||
761 | * @parser: parser structure holding parsing context. | ||
762 | * | ||
763 | * Userspace sends a special sequence for VLINE waits. | ||
764 | * PACKET0 - VLINE_START_END + value | ||
765 | * PACKET0 - WAIT_UNTIL +_value | ||
766 | * RELOC (P3) - crtc_id in reloc. | ||
767 | * | ||
768 | * This function parses this and relocates the VLINE START END | ||
769 | * and WAIT UNTIL packets to the correct crtc. | ||
770 | * It also detects a switched off crtc and nulls out the | ||
771 | * wait in that case. | ||
772 | */ | ||
773 | int r100_cs_packet_parse_vline(struct radeon_cs_parser *p) | ||
774 | { | ||
775 | struct radeon_cs_chunk *ib_chunk; | ||
776 | struct drm_mode_object *obj; | ||
777 | struct drm_crtc *crtc; | ||
778 | struct radeon_crtc *radeon_crtc; | ||
779 | struct radeon_cs_packet p3reloc, waitreloc; | ||
780 | int crtc_id; | ||
781 | int r; | ||
782 | uint32_t header, h_idx, reg; | ||
783 | |||
784 | ib_chunk = &p->chunks[p->chunk_ib_idx]; | ||
785 | |||
786 | /* parse the wait until */ | ||
787 | r = r100_cs_packet_parse(p, &waitreloc, p->idx); | ||
788 | if (r) | ||
789 | return r; | ||
790 | |||
791 | /* check its a wait until and only 1 count */ | ||
792 | if (waitreloc.reg != RADEON_WAIT_UNTIL || | ||
793 | waitreloc.count != 0) { | ||
794 | DRM_ERROR("vline wait had illegal wait until segment\n"); | ||
795 | r = -EINVAL; | ||
796 | return r; | ||
797 | } | ||
798 | |||
799 | if (ib_chunk->kdata[waitreloc.idx + 1] != RADEON_WAIT_CRTC_VLINE) { | ||
800 | DRM_ERROR("vline wait had illegal wait until\n"); | ||
801 | r = -EINVAL; | ||
802 | return r; | ||
803 | } | ||
804 | |||
805 | /* jump over the NOP */ | ||
806 | r = r100_cs_packet_parse(p, &p3reloc, p->idx); | ||
807 | if (r) | ||
808 | return r; | ||
809 | |||
810 | h_idx = p->idx - 2; | ||
811 | p->idx += waitreloc.count; | ||
812 | p->idx += p3reloc.count; | ||
813 | |||
814 | header = ib_chunk->kdata[h_idx]; | ||
815 | crtc_id = ib_chunk->kdata[h_idx + 5]; | ||
816 | reg = ib_chunk->kdata[h_idx] >> 2; | ||
817 | mutex_lock(&p->rdev->ddev->mode_config.mutex); | ||
818 | obj = drm_mode_object_find(p->rdev->ddev, crtc_id, DRM_MODE_OBJECT_CRTC); | ||
819 | if (!obj) { | ||
820 | DRM_ERROR("cannot find crtc %d\n", crtc_id); | ||
821 | r = -EINVAL; | ||
822 | goto out; | ||
823 | } | ||
824 | crtc = obj_to_crtc(obj); | ||
825 | radeon_crtc = to_radeon_crtc(crtc); | ||
826 | crtc_id = radeon_crtc->crtc_id; | ||
827 | |||
828 | if (!crtc->enabled) { | ||
829 | /* if the CRTC isn't enabled - we need to nop out the wait until */ | ||
830 | ib_chunk->kdata[h_idx + 2] = PACKET2(0); | ||
831 | ib_chunk->kdata[h_idx + 3] = PACKET2(0); | ||
832 | } else if (crtc_id == 1) { | ||
833 | switch (reg) { | ||
834 | case AVIVO_D1MODE_VLINE_START_END: | ||
835 | header &= R300_CP_PACKET0_REG_MASK; | ||
836 | header |= AVIVO_D2MODE_VLINE_START_END >> 2; | ||
837 | break; | ||
838 | case RADEON_CRTC_GUI_TRIG_VLINE: | ||
839 | header &= R300_CP_PACKET0_REG_MASK; | ||
840 | header |= RADEON_CRTC2_GUI_TRIG_VLINE >> 2; | ||
841 | break; | ||
842 | default: | ||
843 | DRM_ERROR("unknown crtc reloc\n"); | ||
844 | r = -EINVAL; | ||
845 | goto out; | ||
846 | } | ||
847 | ib_chunk->kdata[h_idx] = header; | ||
848 | ib_chunk->kdata[h_idx + 3] |= RADEON_ENG_DISPLAY_SELECT_CRTC1; | ||
849 | } | ||
850 | out: | ||
851 | mutex_unlock(&p->rdev->ddev->mode_config.mutex); | ||
852 | return r; | ||
853 | } | ||
854 | |||
855 | /** | ||
756 | * r100_cs_packet_next_reloc() - parse next packet which should be reloc packet3 | 856 | * r100_cs_packet_next_reloc() - parse next packet which should be reloc packet3 |
757 | * @parser: parser structure holding parsing context. | 857 | * @parser: parser structure holding parsing context. |
758 | * @data: pointer to relocation data | 858 | * @data: pointer to relocation data |
@@ -814,6 +914,7 @@ static int r100_packet0_check(struct radeon_cs_parser *p, | |||
814 | unsigned idx; | 914 | unsigned idx; |
815 | bool onereg; | 915 | bool onereg; |
816 | int r; | 916 | int r; |
917 | u32 tile_flags = 0; | ||
817 | 918 | ||
818 | ib = p->ib->ptr; | 919 | ib = p->ib->ptr; |
819 | ib_chunk = &p->chunks[p->chunk_ib_idx]; | 920 | ib_chunk = &p->chunks[p->chunk_ib_idx]; |
@@ -825,6 +926,15 @@ static int r100_packet0_check(struct radeon_cs_parser *p, | |||
825 | } | 926 | } |
826 | for (i = 0; i <= pkt->count; i++, idx++, reg += 4) { | 927 | for (i = 0; i <= pkt->count; i++, idx++, reg += 4) { |
827 | switch (reg) { | 928 | switch (reg) { |
929 | case RADEON_CRTC_GUI_TRIG_VLINE: | ||
930 | r = r100_cs_packet_parse_vline(p); | ||
931 | if (r) { | ||
932 | DRM_ERROR("No reloc for ib[%d]=0x%04X\n", | ||
933 | idx, reg); | ||
934 | r100_cs_dump_packet(p, pkt); | ||
935 | return r; | ||
936 | } | ||
937 | break; | ||
828 | /* FIXME: only allow PACKET3 blit? easier to check for out of | 938 | /* FIXME: only allow PACKET3 blit? easier to check for out of |
829 | * range access */ | 939 | * range access */ |
830 | case RADEON_DST_PITCH_OFFSET: | 940 | case RADEON_DST_PITCH_OFFSET: |
@@ -838,7 +948,20 @@ static int r100_packet0_check(struct radeon_cs_parser *p, | |||
838 | } | 948 | } |
839 | tmp = ib_chunk->kdata[idx] & 0x003fffff; | 949 | tmp = ib_chunk->kdata[idx] & 0x003fffff; |
840 | tmp += (((u32)reloc->lobj.gpu_offset) >> 10); | 950 | tmp += (((u32)reloc->lobj.gpu_offset) >> 10); |
841 | ib[idx] = (ib_chunk->kdata[idx] & 0xffc00000) | tmp; | 951 | |
952 | if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) | ||
953 | tile_flags |= RADEON_DST_TILE_MACRO; | ||
954 | if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) { | ||
955 | if (reg == RADEON_SRC_PITCH_OFFSET) { | ||
956 | DRM_ERROR("Cannot src blit from microtiled surface\n"); | ||
957 | r100_cs_dump_packet(p, pkt); | ||
958 | return -EINVAL; | ||
959 | } | ||
960 | tile_flags |= RADEON_DST_TILE_MICRO; | ||
961 | } | ||
962 | |||
963 | tmp |= tile_flags; | ||
964 | ib[idx] = (ib_chunk->kdata[idx] & 0x3fc00000) | tmp; | ||
842 | break; | 965 | break; |
843 | case RADEON_RB3D_DEPTHOFFSET: | 966 | case RADEON_RB3D_DEPTHOFFSET: |
844 | case RADEON_RB3D_COLOROFFSET: | 967 | case RADEON_RB3D_COLOROFFSET: |
@@ -869,6 +992,11 @@ static int r100_packet0_check(struct radeon_cs_parser *p, | |||
869 | case R300_TX_OFFSET_0+52: | 992 | case R300_TX_OFFSET_0+52: |
870 | case R300_TX_OFFSET_0+56: | 993 | case R300_TX_OFFSET_0+56: |
871 | case R300_TX_OFFSET_0+60: | 994 | case R300_TX_OFFSET_0+60: |
995 | /* rn50 has no 3D engine so fail on any 3d setup */ | ||
996 | if (ASIC_IS_RN50(p->rdev)) { | ||
997 | DRM_ERROR("attempt to use RN50 3D engine failed\n"); | ||
998 | return -EINVAL; | ||
999 | } | ||
872 | r = r100_cs_packet_next_reloc(p, &reloc); | 1000 | r = r100_cs_packet_next_reloc(p, &reloc); |
873 | if (r) { | 1001 | if (r) { |
874 | DRM_ERROR("No reloc for ib[%d]=0x%04X\n", | 1002 | DRM_ERROR("No reloc for ib[%d]=0x%04X\n", |
@@ -878,6 +1006,25 @@ static int r100_packet0_check(struct radeon_cs_parser *p, | |||
878 | } | 1006 | } |
879 | ib[idx] = ib_chunk->kdata[idx] + ((u32)reloc->lobj.gpu_offset); | 1007 | ib[idx] = ib_chunk->kdata[idx] + ((u32)reloc->lobj.gpu_offset); |
880 | break; | 1008 | break; |
1009 | case R300_RB3D_COLORPITCH0: | ||
1010 | case RADEON_RB3D_COLORPITCH: | ||
1011 | r = r100_cs_packet_next_reloc(p, &reloc); | ||
1012 | if (r) { | ||
1013 | DRM_ERROR("No reloc for ib[%d]=0x%04X\n", | ||
1014 | idx, reg); | ||
1015 | r100_cs_dump_packet(p, pkt); | ||
1016 | return r; | ||
1017 | } | ||
1018 | |||
1019 | if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) | ||
1020 | tile_flags |= RADEON_COLOR_TILE_ENABLE; | ||
1021 | if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) | ||
1022 | tile_flags |= RADEON_COLOR_MICROTILE_ENABLE; | ||
1023 | |||
1024 | tmp = ib_chunk->kdata[idx] & ~(0x7 << 16); | ||
1025 | tmp |= tile_flags; | ||
1026 | ib[idx] = tmp; | ||
1027 | break; | ||
881 | default: | 1028 | default: |
882 | /* FIXME: we don't want to allow anyothers packet */ | 1029 | /* FIXME: we don't want to allow anyothers packet */ |
883 | break; | 1030 | break; |
@@ -1256,29 +1403,100 @@ static void r100_vram_get_type(struct radeon_device *rdev) | |||
1256 | } | 1403 | } |
1257 | } | 1404 | } |
1258 | 1405 | ||
1259 | void r100_vram_info(struct radeon_device *rdev) | 1406 | static u32 r100_get_accessible_vram(struct radeon_device *rdev) |
1260 | { | 1407 | { |
1261 | r100_vram_get_type(rdev); | 1408 | u32 aper_size; |
1409 | u8 byte; | ||
1410 | |||
1411 | aper_size = RREG32(RADEON_CONFIG_APER_SIZE); | ||
1412 | |||
1413 | /* Set HDP_APER_CNTL only on cards that are known not to be broken, | ||
1414 | * that is has the 2nd generation multifunction PCI interface | ||
1415 | */ | ||
1416 | if (rdev->family == CHIP_RV280 || | ||
1417 | rdev->family >= CHIP_RV350) { | ||
1418 | WREG32_P(RADEON_HOST_PATH_CNTL, RADEON_HDP_APER_CNTL, | ||
1419 | ~RADEON_HDP_APER_CNTL); | ||
1420 | DRM_INFO("Generation 2 PCI interface, using max accessible memory\n"); | ||
1421 | return aper_size * 2; | ||
1422 | } | ||
1423 | |||
1424 | /* Older cards have all sorts of funny issues to deal with. First | ||
1425 | * check if it's a multifunction card by reading the PCI config | ||
1426 | * header type... Limit those to one aperture size | ||
1427 | */ | ||
1428 | pci_read_config_byte(rdev->pdev, 0xe, &byte); | ||
1429 | if (byte & 0x80) { | ||
1430 | DRM_INFO("Generation 1 PCI interface in multifunction mode\n"); | ||
1431 | DRM_INFO("Limiting VRAM to one aperture\n"); | ||
1432 | return aper_size; | ||
1433 | } | ||
1434 | |||
1435 | /* Single function older card. We read HDP_APER_CNTL to see how the BIOS | ||
1436 | * have set it up. We don't write this as it's broken on some ASICs but | ||
1437 | * we expect the BIOS to have done the right thing (might be too optimistic...) | ||
1438 | */ | ||
1439 | if (RREG32(RADEON_HOST_PATH_CNTL) & RADEON_HDP_APER_CNTL) | ||
1440 | return aper_size * 2; | ||
1441 | return aper_size; | ||
1442 | } | ||
1443 | |||
1444 | void r100_vram_init_sizes(struct radeon_device *rdev) | ||
1445 | { | ||
1446 | u64 config_aper_size; | ||
1447 | u32 accessible; | ||
1448 | |||
1449 | config_aper_size = RREG32(RADEON_CONFIG_APER_SIZE); | ||
1262 | 1450 | ||
1263 | if (rdev->flags & RADEON_IS_IGP) { | 1451 | if (rdev->flags & RADEON_IS_IGP) { |
1264 | uint32_t tom; | 1452 | uint32_t tom; |
1265 | /* read NB_TOM to get the amount of ram stolen for the GPU */ | 1453 | /* read NB_TOM to get the amount of ram stolen for the GPU */ |
1266 | tom = RREG32(RADEON_NB_TOM); | 1454 | tom = RREG32(RADEON_NB_TOM); |
1267 | rdev->mc.vram_size = (((tom >> 16) - (tom & 0xffff) + 1) << 16); | 1455 | rdev->mc.real_vram_size = (((tom >> 16) - (tom & 0xffff) + 1) << 16); |
1268 | WREG32(RADEON_CONFIG_MEMSIZE, rdev->mc.vram_size); | 1456 | /* for IGPs we need to keep VRAM where it was put by the BIOS */ |
1457 | rdev->mc.vram_location = (tom & 0xffff) << 16; | ||
1458 | WREG32(RADEON_CONFIG_MEMSIZE, rdev->mc.real_vram_size); | ||
1459 | rdev->mc.mc_vram_size = rdev->mc.real_vram_size; | ||
1269 | } else { | 1460 | } else { |
1270 | rdev->mc.vram_size = RREG32(RADEON_CONFIG_MEMSIZE); | 1461 | rdev->mc.real_vram_size = RREG32(RADEON_CONFIG_MEMSIZE); |
1271 | /* Some production boards of m6 will report 0 | 1462 | /* Some production boards of m6 will report 0 |
1272 | * if it's 8 MB | 1463 | * if it's 8 MB |
1273 | */ | 1464 | */ |
1274 | if (rdev->mc.vram_size == 0) { | 1465 | if (rdev->mc.real_vram_size == 0) { |
1275 | rdev->mc.vram_size = 8192 * 1024; | 1466 | rdev->mc.real_vram_size = 8192 * 1024; |
1276 | WREG32(RADEON_CONFIG_MEMSIZE, rdev->mc.vram_size); | 1467 | WREG32(RADEON_CONFIG_MEMSIZE, rdev->mc.real_vram_size); |
1277 | } | 1468 | } |
1469 | /* let driver place VRAM */ | ||
1470 | rdev->mc.vram_location = 0xFFFFFFFFUL; | ||
1471 | /* Fix for RN50, M6, M7 with 8/16/32(??) MBs of VRAM - | ||
1472 | * Novell bug 204882 + along with lots of ubuntu ones */ | ||
1473 | if (config_aper_size > rdev->mc.real_vram_size) | ||
1474 | rdev->mc.mc_vram_size = config_aper_size; | ||
1475 | else | ||
1476 | rdev->mc.mc_vram_size = rdev->mc.real_vram_size; | ||
1278 | } | 1477 | } |
1279 | 1478 | ||
1479 | /* work out accessible VRAM */ | ||
1480 | accessible = r100_get_accessible_vram(rdev); | ||
1481 | |||
1280 | rdev->mc.aper_base = drm_get_resource_start(rdev->ddev, 0); | 1482 | rdev->mc.aper_base = drm_get_resource_start(rdev->ddev, 0); |
1281 | rdev->mc.aper_size = drm_get_resource_len(rdev->ddev, 0); | 1483 | rdev->mc.aper_size = drm_get_resource_len(rdev->ddev, 0); |
1484 | |||
1485 | if (accessible > rdev->mc.aper_size) | ||
1486 | accessible = rdev->mc.aper_size; | ||
1487 | |||
1488 | if (rdev->mc.mc_vram_size > rdev->mc.aper_size) | ||
1489 | rdev->mc.mc_vram_size = rdev->mc.aper_size; | ||
1490 | |||
1491 | if (rdev->mc.real_vram_size > rdev->mc.aper_size) | ||
1492 | rdev->mc.real_vram_size = rdev->mc.aper_size; | ||
1493 | } | ||
1494 | |||
1495 | void r100_vram_info(struct radeon_device *rdev) | ||
1496 | { | ||
1497 | r100_vram_get_type(rdev); | ||
1498 | |||
1499 | r100_vram_init_sizes(rdev); | ||
1282 | } | 1500 | } |
1283 | 1501 | ||
1284 | 1502 | ||
@@ -1533,3 +1751,530 @@ int r100_debugfs_mc_info_init(struct radeon_device *rdev) | |||
1533 | return 0; | 1751 | return 0; |
1534 | #endif | 1752 | #endif |
1535 | } | 1753 | } |
1754 | |||
1755 | int r100_set_surface_reg(struct radeon_device *rdev, int reg, | ||
1756 | uint32_t tiling_flags, uint32_t pitch, | ||
1757 | uint32_t offset, uint32_t obj_size) | ||
1758 | { | ||
1759 | int surf_index = reg * 16; | ||
1760 | int flags = 0; | ||
1761 | |||
1762 | /* r100/r200 divide by 16 */ | ||
1763 | if (rdev->family < CHIP_R300) | ||
1764 | flags = pitch / 16; | ||
1765 | else | ||
1766 | flags = pitch / 8; | ||
1767 | |||
1768 | if (rdev->family <= CHIP_RS200) { | ||
1769 | if ((tiling_flags & (RADEON_TILING_MACRO|RADEON_TILING_MICRO)) | ||
1770 | == (RADEON_TILING_MACRO|RADEON_TILING_MICRO)) | ||
1771 | flags |= RADEON_SURF_TILE_COLOR_BOTH; | ||
1772 | if (tiling_flags & RADEON_TILING_MACRO) | ||
1773 | flags |= RADEON_SURF_TILE_COLOR_MACRO; | ||
1774 | } else if (rdev->family <= CHIP_RV280) { | ||
1775 | if (tiling_flags & (RADEON_TILING_MACRO)) | ||
1776 | flags |= R200_SURF_TILE_COLOR_MACRO; | ||
1777 | if (tiling_flags & RADEON_TILING_MICRO) | ||
1778 | flags |= R200_SURF_TILE_COLOR_MICRO; | ||
1779 | } else { | ||
1780 | if (tiling_flags & RADEON_TILING_MACRO) | ||
1781 | flags |= R300_SURF_TILE_MACRO; | ||
1782 | if (tiling_flags & RADEON_TILING_MICRO) | ||
1783 | flags |= R300_SURF_TILE_MICRO; | ||
1784 | } | ||
1785 | |||
1786 | DRM_DEBUG("writing surface %d %d %x %x\n", reg, flags, offset, offset+obj_size-1); | ||
1787 | WREG32(RADEON_SURFACE0_INFO + surf_index, flags); | ||
1788 | WREG32(RADEON_SURFACE0_LOWER_BOUND + surf_index, offset); | ||
1789 | WREG32(RADEON_SURFACE0_UPPER_BOUND + surf_index, offset + obj_size - 1); | ||
1790 | return 0; | ||
1791 | } | ||
1792 | |||
1793 | void r100_clear_surface_reg(struct radeon_device *rdev, int reg) | ||
1794 | { | ||
1795 | int surf_index = reg * 16; | ||
1796 | WREG32(RADEON_SURFACE0_INFO + surf_index, 0); | ||
1797 | } | ||
1798 | |||
1799 | void r100_bandwidth_update(struct radeon_device *rdev) | ||
1800 | { | ||
1801 | fixed20_12 trcd_ff, trp_ff, tras_ff, trbs_ff, tcas_ff; | ||
1802 | fixed20_12 sclk_ff, mclk_ff, sclk_eff_ff, sclk_delay_ff; | ||
1803 | fixed20_12 peak_disp_bw, mem_bw, pix_clk, pix_clk2, temp_ff, crit_point_ff; | ||
1804 | uint32_t temp, data, mem_trcd, mem_trp, mem_tras; | ||
1805 | fixed20_12 memtcas_ff[8] = { | ||
1806 | fixed_init(1), | ||
1807 | fixed_init(2), | ||
1808 | fixed_init(3), | ||
1809 | fixed_init(0), | ||
1810 | fixed_init_half(1), | ||
1811 | fixed_init_half(2), | ||
1812 | fixed_init(0), | ||
1813 | }; | ||
1814 | fixed20_12 memtcas_rs480_ff[8] = { | ||
1815 | fixed_init(0), | ||
1816 | fixed_init(1), | ||
1817 | fixed_init(2), | ||
1818 | fixed_init(3), | ||
1819 | fixed_init(0), | ||
1820 | fixed_init_half(1), | ||
1821 | fixed_init_half(2), | ||
1822 | fixed_init_half(3), | ||
1823 | }; | ||
1824 | fixed20_12 memtcas2_ff[8] = { | ||
1825 | fixed_init(0), | ||
1826 | fixed_init(1), | ||
1827 | fixed_init(2), | ||
1828 | fixed_init(3), | ||
1829 | fixed_init(4), | ||
1830 | fixed_init(5), | ||
1831 | fixed_init(6), | ||
1832 | fixed_init(7), | ||
1833 | }; | ||
1834 | fixed20_12 memtrbs[8] = { | ||
1835 | fixed_init(1), | ||
1836 | fixed_init_half(1), | ||
1837 | fixed_init(2), | ||
1838 | fixed_init_half(2), | ||
1839 | fixed_init(3), | ||
1840 | fixed_init_half(3), | ||
1841 | fixed_init(4), | ||
1842 | fixed_init_half(4) | ||
1843 | }; | ||
1844 | fixed20_12 memtrbs_r4xx[8] = { | ||
1845 | fixed_init(4), | ||
1846 | fixed_init(5), | ||
1847 | fixed_init(6), | ||
1848 | fixed_init(7), | ||
1849 | fixed_init(8), | ||
1850 | fixed_init(9), | ||
1851 | fixed_init(10), | ||
1852 | fixed_init(11) | ||
1853 | }; | ||
1854 | fixed20_12 min_mem_eff; | ||
1855 | fixed20_12 mc_latency_sclk, mc_latency_mclk, k1; | ||
1856 | fixed20_12 cur_latency_mclk, cur_latency_sclk; | ||
1857 | fixed20_12 disp_latency, disp_latency_overhead, disp_drain_rate, | ||
1858 | disp_drain_rate2, read_return_rate; | ||
1859 | fixed20_12 time_disp1_drop_priority; | ||
1860 | int c; | ||
1861 | int cur_size = 16; /* in octawords */ | ||
1862 | int critical_point = 0, critical_point2; | ||
1863 | /* uint32_t read_return_rate, time_disp1_drop_priority; */ | ||
1864 | int stop_req, max_stop_req; | ||
1865 | struct drm_display_mode *mode1 = NULL; | ||
1866 | struct drm_display_mode *mode2 = NULL; | ||
1867 | uint32_t pixel_bytes1 = 0; | ||
1868 | uint32_t pixel_bytes2 = 0; | ||
1869 | |||
1870 | if (rdev->mode_info.crtcs[0]->base.enabled) { | ||
1871 | mode1 = &rdev->mode_info.crtcs[0]->base.mode; | ||
1872 | pixel_bytes1 = rdev->mode_info.crtcs[0]->base.fb->bits_per_pixel / 8; | ||
1873 | } | ||
1874 | if (rdev->mode_info.crtcs[1]->base.enabled) { | ||
1875 | mode2 = &rdev->mode_info.crtcs[1]->base.mode; | ||
1876 | pixel_bytes2 = rdev->mode_info.crtcs[1]->base.fb->bits_per_pixel / 8; | ||
1877 | } | ||
1878 | |||
1879 | min_mem_eff.full = rfixed_const_8(0); | ||
1880 | /* get modes */ | ||
1881 | if ((rdev->disp_priority == 2) && ASIC_IS_R300(rdev)) { | ||
1882 | uint32_t mc_init_misc_lat_timer = RREG32(R300_MC_INIT_MISC_LAT_TIMER); | ||
1883 | mc_init_misc_lat_timer &= ~(R300_MC_DISP1R_INIT_LAT_MASK << R300_MC_DISP1R_INIT_LAT_SHIFT); | ||
1884 | mc_init_misc_lat_timer &= ~(R300_MC_DISP0R_INIT_LAT_MASK << R300_MC_DISP0R_INIT_LAT_SHIFT); | ||
1885 | /* check crtc enables */ | ||
1886 | if (mode2) | ||
1887 | mc_init_misc_lat_timer |= (1 << R300_MC_DISP1R_INIT_LAT_SHIFT); | ||
1888 | if (mode1) | ||
1889 | mc_init_misc_lat_timer |= (1 << R300_MC_DISP0R_INIT_LAT_SHIFT); | ||
1890 | WREG32(R300_MC_INIT_MISC_LAT_TIMER, mc_init_misc_lat_timer); | ||
1891 | } | ||
1892 | |||
1893 | /* | ||
1894 | * determine is there is enough bw for current mode | ||
1895 | */ | ||
1896 | mclk_ff.full = rfixed_const(rdev->clock.default_mclk); | ||
1897 | temp_ff.full = rfixed_const(100); | ||
1898 | mclk_ff.full = rfixed_div(mclk_ff, temp_ff); | ||
1899 | sclk_ff.full = rfixed_const(rdev->clock.default_sclk); | ||
1900 | sclk_ff.full = rfixed_div(sclk_ff, temp_ff); | ||
1901 | |||
1902 | temp = (rdev->mc.vram_width / 8) * (rdev->mc.vram_is_ddr ? 2 : 1); | ||
1903 | temp_ff.full = rfixed_const(temp); | ||
1904 | mem_bw.full = rfixed_mul(mclk_ff, temp_ff); | ||
1905 | |||
1906 | pix_clk.full = 0; | ||
1907 | pix_clk2.full = 0; | ||
1908 | peak_disp_bw.full = 0; | ||
1909 | if (mode1) { | ||
1910 | temp_ff.full = rfixed_const(1000); | ||
1911 | pix_clk.full = rfixed_const(mode1->clock); /* convert to fixed point */ | ||
1912 | pix_clk.full = rfixed_div(pix_clk, temp_ff); | ||
1913 | temp_ff.full = rfixed_const(pixel_bytes1); | ||
1914 | peak_disp_bw.full += rfixed_mul(pix_clk, temp_ff); | ||
1915 | } | ||
1916 | if (mode2) { | ||
1917 | temp_ff.full = rfixed_const(1000); | ||
1918 | pix_clk2.full = rfixed_const(mode2->clock); /* convert to fixed point */ | ||
1919 | pix_clk2.full = rfixed_div(pix_clk2, temp_ff); | ||
1920 | temp_ff.full = rfixed_const(pixel_bytes2); | ||
1921 | peak_disp_bw.full += rfixed_mul(pix_clk2, temp_ff); | ||
1922 | } | ||
1923 | |||
1924 | mem_bw.full = rfixed_mul(mem_bw, min_mem_eff); | ||
1925 | if (peak_disp_bw.full >= mem_bw.full) { | ||
1926 | DRM_ERROR("You may not have enough display bandwidth for current mode\n" | ||
1927 | "If you have flickering problem, try to lower resolution, refresh rate, or color depth\n"); | ||
1928 | } | ||
1929 | |||
1930 | /* Get values from the EXT_MEM_CNTL register...converting its contents. */ | ||
1931 | temp = RREG32(RADEON_MEM_TIMING_CNTL); | ||
1932 | if ((rdev->family == CHIP_RV100) || (rdev->flags & RADEON_IS_IGP)) { /* RV100, M6, IGPs */ | ||
1933 | mem_trcd = ((temp >> 2) & 0x3) + 1; | ||
1934 | mem_trp = ((temp & 0x3)) + 1; | ||
1935 | mem_tras = ((temp & 0x70) >> 4) + 1; | ||
1936 | } else if (rdev->family == CHIP_R300 || | ||
1937 | rdev->family == CHIP_R350) { /* r300, r350 */ | ||
1938 | mem_trcd = (temp & 0x7) + 1; | ||
1939 | mem_trp = ((temp >> 8) & 0x7) + 1; | ||
1940 | mem_tras = ((temp >> 11) & 0xf) + 4; | ||
1941 | } else if (rdev->family == CHIP_RV350 || | ||
1942 | rdev->family <= CHIP_RV380) { | ||
1943 | /* rv3x0 */ | ||
1944 | mem_trcd = (temp & 0x7) + 3; | ||
1945 | mem_trp = ((temp >> 8) & 0x7) + 3; | ||
1946 | mem_tras = ((temp >> 11) & 0xf) + 6; | ||
1947 | } else if (rdev->family == CHIP_R420 || | ||
1948 | rdev->family == CHIP_R423 || | ||
1949 | rdev->family == CHIP_RV410) { | ||
1950 | /* r4xx */ | ||
1951 | mem_trcd = (temp & 0xf) + 3; | ||
1952 | if (mem_trcd > 15) | ||
1953 | mem_trcd = 15; | ||
1954 | mem_trp = ((temp >> 8) & 0xf) + 3; | ||
1955 | if (mem_trp > 15) | ||
1956 | mem_trp = 15; | ||
1957 | mem_tras = ((temp >> 12) & 0x1f) + 6; | ||
1958 | if (mem_tras > 31) | ||
1959 | mem_tras = 31; | ||
1960 | } else { /* RV200, R200 */ | ||
1961 | mem_trcd = (temp & 0x7) + 1; | ||
1962 | mem_trp = ((temp >> 8) & 0x7) + 1; | ||
1963 | mem_tras = ((temp >> 12) & 0xf) + 4; | ||
1964 | } | ||
1965 | /* convert to FF */ | ||
1966 | trcd_ff.full = rfixed_const(mem_trcd); | ||
1967 | trp_ff.full = rfixed_const(mem_trp); | ||
1968 | tras_ff.full = rfixed_const(mem_tras); | ||
1969 | |||
1970 | /* Get values from the MEM_SDRAM_MODE_REG register...converting its */ | ||
1971 | temp = RREG32(RADEON_MEM_SDRAM_MODE_REG); | ||
1972 | data = (temp & (7 << 20)) >> 20; | ||
1973 | if ((rdev->family == CHIP_RV100) || rdev->flags & RADEON_IS_IGP) { | ||
1974 | if (rdev->family == CHIP_RS480) /* don't think rs400 */ | ||
1975 | tcas_ff = memtcas_rs480_ff[data]; | ||
1976 | else | ||
1977 | tcas_ff = memtcas_ff[data]; | ||
1978 | } else | ||
1979 | tcas_ff = memtcas2_ff[data]; | ||
1980 | |||
1981 | if (rdev->family == CHIP_RS400 || | ||
1982 | rdev->family == CHIP_RS480) { | ||
1983 | /* extra cas latency stored in bits 23-25 0-4 clocks */ | ||
1984 | data = (temp >> 23) & 0x7; | ||
1985 | if (data < 5) | ||
1986 | tcas_ff.full += rfixed_const(data); | ||
1987 | } | ||
1988 | |||
1989 | if (ASIC_IS_R300(rdev) && !(rdev->flags & RADEON_IS_IGP)) { | ||
1990 | /* on the R300, Tcas is included in Trbs. | ||
1991 | */ | ||
1992 | temp = RREG32(RADEON_MEM_CNTL); | ||
1993 | data = (R300_MEM_NUM_CHANNELS_MASK & temp); | ||
1994 | if (data == 1) { | ||
1995 | if (R300_MEM_USE_CD_CH_ONLY & temp) { | ||
1996 | temp = RREG32(R300_MC_IND_INDEX); | ||
1997 | temp &= ~R300_MC_IND_ADDR_MASK; | ||
1998 | temp |= R300_MC_READ_CNTL_CD_mcind; | ||
1999 | WREG32(R300_MC_IND_INDEX, temp); | ||
2000 | temp = RREG32(R300_MC_IND_DATA); | ||
2001 | data = (R300_MEM_RBS_POSITION_C_MASK & temp); | ||
2002 | } else { | ||
2003 | temp = RREG32(R300_MC_READ_CNTL_AB); | ||
2004 | data = (R300_MEM_RBS_POSITION_A_MASK & temp); | ||
2005 | } | ||
2006 | } else { | ||
2007 | temp = RREG32(R300_MC_READ_CNTL_AB); | ||
2008 | data = (R300_MEM_RBS_POSITION_A_MASK & temp); | ||
2009 | } | ||
2010 | if (rdev->family == CHIP_RV410 || | ||
2011 | rdev->family == CHIP_R420 || | ||
2012 | rdev->family == CHIP_R423) | ||
2013 | trbs_ff = memtrbs_r4xx[data]; | ||
2014 | else | ||
2015 | trbs_ff = memtrbs[data]; | ||
2016 | tcas_ff.full += trbs_ff.full; | ||
2017 | } | ||
2018 | |||
2019 | sclk_eff_ff.full = sclk_ff.full; | ||
2020 | |||
2021 | if (rdev->flags & RADEON_IS_AGP) { | ||
2022 | fixed20_12 agpmode_ff; | ||
2023 | agpmode_ff.full = rfixed_const(radeon_agpmode); | ||
2024 | temp_ff.full = rfixed_const_666(16); | ||
2025 | sclk_eff_ff.full -= rfixed_mul(agpmode_ff, temp_ff); | ||
2026 | } | ||
2027 | /* TODO PCIE lanes may affect this - agpmode == 16?? */ | ||
2028 | |||
2029 | if (ASIC_IS_R300(rdev)) { | ||
2030 | sclk_delay_ff.full = rfixed_const(250); | ||
2031 | } else { | ||
2032 | if ((rdev->family == CHIP_RV100) || | ||
2033 | rdev->flags & RADEON_IS_IGP) { | ||
2034 | if (rdev->mc.vram_is_ddr) | ||
2035 | sclk_delay_ff.full = rfixed_const(41); | ||
2036 | else | ||
2037 | sclk_delay_ff.full = rfixed_const(33); | ||
2038 | } else { | ||
2039 | if (rdev->mc.vram_width == 128) | ||
2040 | sclk_delay_ff.full = rfixed_const(57); | ||
2041 | else | ||
2042 | sclk_delay_ff.full = rfixed_const(41); | ||
2043 | } | ||
2044 | } | ||
2045 | |||
2046 | mc_latency_sclk.full = rfixed_div(sclk_delay_ff, sclk_eff_ff); | ||
2047 | |||
2048 | if (rdev->mc.vram_is_ddr) { | ||
2049 | if (rdev->mc.vram_width == 32) { | ||
2050 | k1.full = rfixed_const(40); | ||
2051 | c = 3; | ||
2052 | } else { | ||
2053 | k1.full = rfixed_const(20); | ||
2054 | c = 1; | ||
2055 | } | ||
2056 | } else { | ||
2057 | k1.full = rfixed_const(40); | ||
2058 | c = 3; | ||
2059 | } | ||
2060 | |||
2061 | temp_ff.full = rfixed_const(2); | ||
2062 | mc_latency_mclk.full = rfixed_mul(trcd_ff, temp_ff); | ||
2063 | temp_ff.full = rfixed_const(c); | ||
2064 | mc_latency_mclk.full += rfixed_mul(tcas_ff, temp_ff); | ||
2065 | temp_ff.full = rfixed_const(4); | ||
2066 | mc_latency_mclk.full += rfixed_mul(tras_ff, temp_ff); | ||
2067 | mc_latency_mclk.full += rfixed_mul(trp_ff, temp_ff); | ||
2068 | mc_latency_mclk.full += k1.full; | ||
2069 | |||
2070 | mc_latency_mclk.full = rfixed_div(mc_latency_mclk, mclk_ff); | ||
2071 | mc_latency_mclk.full += rfixed_div(temp_ff, sclk_eff_ff); | ||
2072 | |||
2073 | /* | ||
2074 | HW cursor time assuming worst case of full size colour cursor. | ||
2075 | */ | ||
2076 | temp_ff.full = rfixed_const((2 * (cur_size - (rdev->mc.vram_is_ddr + 1)))); | ||
2077 | temp_ff.full += trcd_ff.full; | ||
2078 | if (temp_ff.full < tras_ff.full) | ||
2079 | temp_ff.full = tras_ff.full; | ||
2080 | cur_latency_mclk.full = rfixed_div(temp_ff, mclk_ff); | ||
2081 | |||
2082 | temp_ff.full = rfixed_const(cur_size); | ||
2083 | cur_latency_sclk.full = rfixed_div(temp_ff, sclk_eff_ff); | ||
2084 | /* | ||
2085 | Find the total latency for the display data. | ||
2086 | */ | ||
2087 | disp_latency_overhead.full = rfixed_const(80); | ||
2088 | disp_latency_overhead.full = rfixed_div(disp_latency_overhead, sclk_ff); | ||
2089 | mc_latency_mclk.full += disp_latency_overhead.full + cur_latency_mclk.full; | ||
2090 | mc_latency_sclk.full += disp_latency_overhead.full + cur_latency_sclk.full; | ||
2091 | |||
2092 | if (mc_latency_mclk.full > mc_latency_sclk.full) | ||
2093 | disp_latency.full = mc_latency_mclk.full; | ||
2094 | else | ||
2095 | disp_latency.full = mc_latency_sclk.full; | ||
2096 | |||
2097 | /* setup Max GRPH_STOP_REQ default value */ | ||
2098 | if (ASIC_IS_RV100(rdev)) | ||
2099 | max_stop_req = 0x5c; | ||
2100 | else | ||
2101 | max_stop_req = 0x7c; | ||
2102 | |||
2103 | if (mode1) { | ||
2104 | /* CRTC1 | ||
2105 | Set GRPH_BUFFER_CNTL register using h/w defined optimal values. | ||
2106 | GRPH_STOP_REQ <= MIN[ 0x7C, (CRTC_H_DISP + 1) * (bit depth) / 0x10 ] | ||
2107 | */ | ||
2108 | stop_req = mode1->hdisplay * pixel_bytes1 / 16; | ||
2109 | |||
2110 | if (stop_req > max_stop_req) | ||
2111 | stop_req = max_stop_req; | ||
2112 | |||
2113 | /* | ||
2114 | Find the drain rate of the display buffer. | ||
2115 | */ | ||
2116 | temp_ff.full = rfixed_const((16/pixel_bytes1)); | ||
2117 | disp_drain_rate.full = rfixed_div(pix_clk, temp_ff); | ||
2118 | |||
2119 | /* | ||
2120 | Find the critical point of the display buffer. | ||
2121 | */ | ||
2122 | crit_point_ff.full = rfixed_mul(disp_drain_rate, disp_latency); | ||
2123 | crit_point_ff.full += rfixed_const_half(0); | ||
2124 | |||
2125 | critical_point = rfixed_trunc(crit_point_ff); | ||
2126 | |||
2127 | if (rdev->disp_priority == 2) { | ||
2128 | critical_point = 0; | ||
2129 | } | ||
2130 | |||
2131 | /* | ||
2132 | The critical point should never be above max_stop_req-4. Setting | ||
2133 | GRPH_CRITICAL_CNTL = 0 will thus force high priority all the time. | ||
2134 | */ | ||
2135 | if (max_stop_req - critical_point < 4) | ||
2136 | critical_point = 0; | ||
2137 | |||
2138 | if (critical_point == 0 && mode2 && rdev->family == CHIP_R300) { | ||
2139 | /* some R300 cards have problem with this set to 0, when CRTC2 is enabled.*/ | ||
2140 | critical_point = 0x10; | ||
2141 | } | ||
2142 | |||
2143 | temp = RREG32(RADEON_GRPH_BUFFER_CNTL); | ||
2144 | temp &= ~(RADEON_GRPH_STOP_REQ_MASK); | ||
2145 | temp |= (stop_req << RADEON_GRPH_STOP_REQ_SHIFT); | ||
2146 | temp &= ~(RADEON_GRPH_START_REQ_MASK); | ||
2147 | if ((rdev->family == CHIP_R350) && | ||
2148 | (stop_req > 0x15)) { | ||
2149 | stop_req -= 0x10; | ||
2150 | } | ||
2151 | temp |= (stop_req << RADEON_GRPH_START_REQ_SHIFT); | ||
2152 | temp |= RADEON_GRPH_BUFFER_SIZE; | ||
2153 | temp &= ~(RADEON_GRPH_CRITICAL_CNTL | | ||
2154 | RADEON_GRPH_CRITICAL_AT_SOF | | ||
2155 | RADEON_GRPH_STOP_CNTL); | ||
2156 | /* | ||
2157 | Write the result into the register. | ||
2158 | */ | ||
2159 | WREG32(RADEON_GRPH_BUFFER_CNTL, ((temp & ~RADEON_GRPH_CRITICAL_POINT_MASK) | | ||
2160 | (critical_point << RADEON_GRPH_CRITICAL_POINT_SHIFT))); | ||
2161 | |||
2162 | #if 0 | ||
2163 | if ((rdev->family == CHIP_RS400) || | ||
2164 | (rdev->family == CHIP_RS480)) { | ||
2165 | /* attempt to program RS400 disp regs correctly ??? */ | ||
2166 | temp = RREG32(RS400_DISP1_REG_CNTL); | ||
2167 | temp &= ~(RS400_DISP1_START_REQ_LEVEL_MASK | | ||
2168 | RS400_DISP1_STOP_REQ_LEVEL_MASK); | ||
2169 | WREG32(RS400_DISP1_REQ_CNTL1, (temp | | ||
2170 | (critical_point << RS400_DISP1_START_REQ_LEVEL_SHIFT) | | ||
2171 | (critical_point << RS400_DISP1_STOP_REQ_LEVEL_SHIFT))); | ||
2172 | temp = RREG32(RS400_DMIF_MEM_CNTL1); | ||
2173 | temp &= ~(RS400_DISP1_CRITICAL_POINT_START_MASK | | ||
2174 | RS400_DISP1_CRITICAL_POINT_STOP_MASK); | ||
2175 | WREG32(RS400_DMIF_MEM_CNTL1, (temp | | ||
2176 | (critical_point << RS400_DISP1_CRITICAL_POINT_START_SHIFT) | | ||
2177 | (critical_point << RS400_DISP1_CRITICAL_POINT_STOP_SHIFT))); | ||
2178 | } | ||
2179 | #endif | ||
2180 | |||
2181 | DRM_DEBUG("GRPH_BUFFER_CNTL from to %x\n", | ||
2182 | /* (unsigned int)info->SavedReg->grph_buffer_cntl, */ | ||
2183 | (unsigned int)RREG32(RADEON_GRPH_BUFFER_CNTL)); | ||
2184 | } | ||
2185 | |||
2186 | if (mode2) { | ||
2187 | u32 grph2_cntl; | ||
2188 | stop_req = mode2->hdisplay * pixel_bytes2 / 16; | ||
2189 | |||
2190 | if (stop_req > max_stop_req) | ||
2191 | stop_req = max_stop_req; | ||
2192 | |||
2193 | /* | ||
2194 | Find the drain rate of the display buffer. | ||
2195 | */ | ||
2196 | temp_ff.full = rfixed_const((16/pixel_bytes2)); | ||
2197 | disp_drain_rate2.full = rfixed_div(pix_clk2, temp_ff); | ||
2198 | |||
2199 | grph2_cntl = RREG32(RADEON_GRPH2_BUFFER_CNTL); | ||
2200 | grph2_cntl &= ~(RADEON_GRPH_STOP_REQ_MASK); | ||
2201 | grph2_cntl |= (stop_req << RADEON_GRPH_STOP_REQ_SHIFT); | ||
2202 | grph2_cntl &= ~(RADEON_GRPH_START_REQ_MASK); | ||
2203 | if ((rdev->family == CHIP_R350) && | ||
2204 | (stop_req > 0x15)) { | ||
2205 | stop_req -= 0x10; | ||
2206 | } | ||
2207 | grph2_cntl |= (stop_req << RADEON_GRPH_START_REQ_SHIFT); | ||
2208 | grph2_cntl |= RADEON_GRPH_BUFFER_SIZE; | ||
2209 | grph2_cntl &= ~(RADEON_GRPH_CRITICAL_CNTL | | ||
2210 | RADEON_GRPH_CRITICAL_AT_SOF | | ||
2211 | RADEON_GRPH_STOP_CNTL); | ||
2212 | |||
2213 | if ((rdev->family == CHIP_RS100) || | ||
2214 | (rdev->family == CHIP_RS200)) | ||
2215 | critical_point2 = 0; | ||
2216 | else { | ||
2217 | temp = (rdev->mc.vram_width * rdev->mc.vram_is_ddr + 1)/128; | ||
2218 | temp_ff.full = rfixed_const(temp); | ||
2219 | temp_ff.full = rfixed_mul(mclk_ff, temp_ff); | ||
2220 | if (sclk_ff.full < temp_ff.full) | ||
2221 | temp_ff.full = sclk_ff.full; | ||
2222 | |||
2223 | read_return_rate.full = temp_ff.full; | ||
2224 | |||
2225 | if (mode1) { | ||
2226 | temp_ff.full = read_return_rate.full - disp_drain_rate.full; | ||
2227 | time_disp1_drop_priority.full = rfixed_div(crit_point_ff, temp_ff); | ||
2228 | } else { | ||
2229 | time_disp1_drop_priority.full = 0; | ||
2230 | } | ||
2231 | crit_point_ff.full = disp_latency.full + time_disp1_drop_priority.full + disp_latency.full; | ||
2232 | crit_point_ff.full = rfixed_mul(crit_point_ff, disp_drain_rate2); | ||
2233 | crit_point_ff.full += rfixed_const_half(0); | ||
2234 | |||
2235 | critical_point2 = rfixed_trunc(crit_point_ff); | ||
2236 | |||
2237 | if (rdev->disp_priority == 2) { | ||
2238 | critical_point2 = 0; | ||
2239 | } | ||
2240 | |||
2241 | if (max_stop_req - critical_point2 < 4) | ||
2242 | critical_point2 = 0; | ||
2243 | |||
2244 | } | ||
2245 | |||
2246 | if (critical_point2 == 0 && rdev->family == CHIP_R300) { | ||
2247 | /* some R300 cards have problem with this set to 0 */ | ||
2248 | critical_point2 = 0x10; | ||
2249 | } | ||
2250 | |||
2251 | WREG32(RADEON_GRPH2_BUFFER_CNTL, ((grph2_cntl & ~RADEON_GRPH_CRITICAL_POINT_MASK) | | ||
2252 | (critical_point2 << RADEON_GRPH_CRITICAL_POINT_SHIFT))); | ||
2253 | |||
2254 | if ((rdev->family == CHIP_RS400) || | ||
2255 | (rdev->family == CHIP_RS480)) { | ||
2256 | #if 0 | ||
2257 | /* attempt to program RS400 disp2 regs correctly ??? */ | ||
2258 | temp = RREG32(RS400_DISP2_REQ_CNTL1); | ||
2259 | temp &= ~(RS400_DISP2_START_REQ_LEVEL_MASK | | ||
2260 | RS400_DISP2_STOP_REQ_LEVEL_MASK); | ||
2261 | WREG32(RS400_DISP2_REQ_CNTL1, (temp | | ||
2262 | (critical_point2 << RS400_DISP1_START_REQ_LEVEL_SHIFT) | | ||
2263 | (critical_point2 << RS400_DISP1_STOP_REQ_LEVEL_SHIFT))); | ||
2264 | temp = RREG32(RS400_DISP2_REQ_CNTL2); | ||
2265 | temp &= ~(RS400_DISP2_CRITICAL_POINT_START_MASK | | ||
2266 | RS400_DISP2_CRITICAL_POINT_STOP_MASK); | ||
2267 | WREG32(RS400_DISP2_REQ_CNTL2, (temp | | ||
2268 | (critical_point2 << RS400_DISP2_CRITICAL_POINT_START_SHIFT) | | ||
2269 | (critical_point2 << RS400_DISP2_CRITICAL_POINT_STOP_SHIFT))); | ||
2270 | #endif | ||
2271 | WREG32(RS400_DISP2_REQ_CNTL1, 0x105DC1CC); | ||
2272 | WREG32(RS400_DISP2_REQ_CNTL2, 0x2749D000); | ||
2273 | WREG32(RS400_DMIF_MEM_CNTL1, 0x29CA71DC); | ||
2274 | WREG32(RS400_DISP1_REQ_CNTL1, 0x28FBC3AC); | ||
2275 | } | ||
2276 | |||
2277 | DRM_DEBUG("GRPH2_BUFFER_CNTL from to %x\n", | ||
2278 | (unsigned int)RREG32(RADEON_GRPH2_BUFFER_CNTL)); | ||
2279 | } | ||
2280 | } | ||
diff --git a/drivers/gpu/drm/radeon/r300.c b/drivers/gpu/drm/radeon/r300.c index e2ed5bc08170..9c8d41534a5d 100644 --- a/drivers/gpu/drm/radeon/r300.c +++ b/drivers/gpu/drm/radeon/r300.c | |||
@@ -30,6 +30,8 @@ | |||
30 | #include "drm.h" | 30 | #include "drm.h" |
31 | #include "radeon_reg.h" | 31 | #include "radeon_reg.h" |
32 | #include "radeon.h" | 32 | #include "radeon.h" |
33 | #include "radeon_drm.h" | ||
34 | #include "radeon_share.h" | ||
33 | 35 | ||
34 | /* r300,r350,rv350,rv370,rv380 depends on : */ | 36 | /* r300,r350,rv350,rv370,rv380 depends on : */ |
35 | void r100_hdp_reset(struct radeon_device *rdev); | 37 | void r100_hdp_reset(struct radeon_device *rdev); |
@@ -44,6 +46,7 @@ int r100_gui_wait_for_idle(struct radeon_device *rdev); | |||
44 | int r100_cs_packet_parse(struct radeon_cs_parser *p, | 46 | int r100_cs_packet_parse(struct radeon_cs_parser *p, |
45 | struct radeon_cs_packet *pkt, | 47 | struct radeon_cs_packet *pkt, |
46 | unsigned idx); | 48 | unsigned idx); |
49 | int r100_cs_packet_parse_vline(struct radeon_cs_parser *p); | ||
47 | int r100_cs_packet_next_reloc(struct radeon_cs_parser *p, | 50 | int r100_cs_packet_next_reloc(struct radeon_cs_parser *p, |
48 | struct radeon_cs_reloc **cs_reloc); | 51 | struct radeon_cs_reloc **cs_reloc); |
49 | int r100_cs_parse_packet0(struct radeon_cs_parser *p, | 52 | int r100_cs_parse_packet0(struct radeon_cs_parser *p, |
@@ -150,8 +153,13 @@ int rv370_pcie_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr) | |||
150 | if (i < 0 || i > rdev->gart.num_gpu_pages) { | 153 | if (i < 0 || i > rdev->gart.num_gpu_pages) { |
151 | return -EINVAL; | 154 | return -EINVAL; |
152 | } | 155 | } |
153 | addr = (((u32)addr) >> 8) | ((upper_32_bits(addr) & 0xff) << 4) | 0xC; | 156 | addr = (lower_32_bits(addr) >> 8) | |
154 | writel(cpu_to_le32(addr), ((void __iomem *)ptr) + (i * 4)); | 157 | ((upper_32_bits(addr) & 0xff) << 24) | |
158 | 0xc; | ||
159 | /* on x86 we want this to be CPU endian, on powerpc | ||
160 | * on powerpc without HW swappers, it'll get swapped on way | ||
161 | * into VRAM - so no need for cpu_to_le32 on VRAM tables */ | ||
162 | writel(addr, ((void __iomem *)ptr) + (i * 4)); | ||
155 | return 0; | 163 | return 0; |
156 | } | 164 | } |
157 | 165 | ||
@@ -579,10 +587,8 @@ void r300_vram_info(struct radeon_device *rdev) | |||
579 | } else { | 587 | } else { |
580 | rdev->mc.vram_width = 64; | 588 | rdev->mc.vram_width = 64; |
581 | } | 589 | } |
582 | rdev->mc.vram_size = RREG32(RADEON_CONFIG_MEMSIZE); | ||
583 | 590 | ||
584 | rdev->mc.aper_base = drm_get_resource_start(rdev->ddev, 0); | 591 | r100_vram_init_sizes(rdev); |
585 | rdev->mc.aper_size = drm_get_resource_len(rdev->ddev, 0); | ||
586 | } | 592 | } |
587 | 593 | ||
588 | 594 | ||
@@ -970,7 +976,7 @@ static inline void r300_cs_track_clear(struct r300_cs_track *track) | |||
970 | 976 | ||
971 | static const unsigned r300_reg_safe_bm[159] = { | 977 | static const unsigned r300_reg_safe_bm[159] = { |
972 | 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, | 978 | 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, |
973 | 0xFFFFFFBF, 0xFFFFFFFF, 0xFFFFFFBF, 0xFFFFFFFF, | 979 | 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, |
974 | 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, | 980 | 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, |
975 | 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, | 981 | 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, |
976 | 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, | 982 | 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, |
@@ -1019,7 +1025,7 @@ static int r300_packet0_check(struct radeon_cs_parser *p, | |||
1019 | struct radeon_cs_reloc *reloc; | 1025 | struct radeon_cs_reloc *reloc; |
1020 | struct r300_cs_track *track; | 1026 | struct r300_cs_track *track; |
1021 | volatile uint32_t *ib; | 1027 | volatile uint32_t *ib; |
1022 | uint32_t tmp; | 1028 | uint32_t tmp, tile_flags = 0; |
1023 | unsigned i; | 1029 | unsigned i; |
1024 | int r; | 1030 | int r; |
1025 | 1031 | ||
@@ -1027,6 +1033,16 @@ static int r300_packet0_check(struct radeon_cs_parser *p, | |||
1027 | ib_chunk = &p->chunks[p->chunk_ib_idx]; | 1033 | ib_chunk = &p->chunks[p->chunk_ib_idx]; |
1028 | track = (struct r300_cs_track*)p->track; | 1034 | track = (struct r300_cs_track*)p->track; |
1029 | switch(reg) { | 1035 | switch(reg) { |
1036 | case AVIVO_D1MODE_VLINE_START_END: | ||
1037 | case RADEON_CRTC_GUI_TRIG_VLINE: | ||
1038 | r = r100_cs_packet_parse_vline(p); | ||
1039 | if (r) { | ||
1040 | DRM_ERROR("No reloc for ib[%d]=0x%04X\n", | ||
1041 | idx, reg); | ||
1042 | r100_cs_dump_packet(p, pkt); | ||
1043 | return r; | ||
1044 | } | ||
1045 | break; | ||
1030 | case RADEON_DST_PITCH_OFFSET: | 1046 | case RADEON_DST_PITCH_OFFSET: |
1031 | case RADEON_SRC_PITCH_OFFSET: | 1047 | case RADEON_SRC_PITCH_OFFSET: |
1032 | r = r100_cs_packet_next_reloc(p, &reloc); | 1048 | r = r100_cs_packet_next_reloc(p, &reloc); |
@@ -1038,7 +1054,19 @@ static int r300_packet0_check(struct radeon_cs_parser *p, | |||
1038 | } | 1054 | } |
1039 | tmp = ib_chunk->kdata[idx] & 0x003fffff; | 1055 | tmp = ib_chunk->kdata[idx] & 0x003fffff; |
1040 | tmp += (((u32)reloc->lobj.gpu_offset) >> 10); | 1056 | tmp += (((u32)reloc->lobj.gpu_offset) >> 10); |
1041 | ib[idx] = (ib_chunk->kdata[idx] & 0xffc00000) | tmp; | 1057 | |
1058 | if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) | ||
1059 | tile_flags |= RADEON_DST_TILE_MACRO; | ||
1060 | if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) { | ||
1061 | if (reg == RADEON_SRC_PITCH_OFFSET) { | ||
1062 | DRM_ERROR("Cannot src blit from microtiled surface\n"); | ||
1063 | r100_cs_dump_packet(p, pkt); | ||
1064 | return -EINVAL; | ||
1065 | } | ||
1066 | tile_flags |= RADEON_DST_TILE_MICRO; | ||
1067 | } | ||
1068 | tmp |= tile_flags; | ||
1069 | ib[idx] = (ib_chunk->kdata[idx] & 0x3fc00000) | tmp; | ||
1042 | break; | 1070 | break; |
1043 | case R300_RB3D_COLOROFFSET0: | 1071 | case R300_RB3D_COLOROFFSET0: |
1044 | case R300_RB3D_COLOROFFSET1: | 1072 | case R300_RB3D_COLOROFFSET1: |
@@ -1127,6 +1155,23 @@ static int r300_packet0_check(struct radeon_cs_parser *p, | |||
1127 | /* RB3D_COLORPITCH1 */ | 1155 | /* RB3D_COLORPITCH1 */ |
1128 | /* RB3D_COLORPITCH2 */ | 1156 | /* RB3D_COLORPITCH2 */ |
1129 | /* RB3D_COLORPITCH3 */ | 1157 | /* RB3D_COLORPITCH3 */ |
1158 | r = r100_cs_packet_next_reloc(p, &reloc); | ||
1159 | if (r) { | ||
1160 | DRM_ERROR("No reloc for ib[%d]=0x%04X\n", | ||
1161 | idx, reg); | ||
1162 | r100_cs_dump_packet(p, pkt); | ||
1163 | return r; | ||
1164 | } | ||
1165 | |||
1166 | if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) | ||
1167 | tile_flags |= R300_COLOR_TILE_ENABLE; | ||
1168 | if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) | ||
1169 | tile_flags |= R300_COLOR_MICROTILE_ENABLE; | ||
1170 | |||
1171 | tmp = ib_chunk->kdata[idx] & ~(0x7 << 16); | ||
1172 | tmp |= tile_flags; | ||
1173 | ib[idx] = tmp; | ||
1174 | |||
1130 | i = (reg - 0x4E38) >> 2; | 1175 | i = (reg - 0x4E38) >> 2; |
1131 | track->cb[i].pitch = ib_chunk->kdata[idx] & 0x3FFE; | 1176 | track->cb[i].pitch = ib_chunk->kdata[idx] & 0x3FFE; |
1132 | switch (((ib_chunk->kdata[idx] >> 21) & 0xF)) { | 1177 | switch (((ib_chunk->kdata[idx] >> 21) & 0xF)) { |
@@ -1182,6 +1227,23 @@ static int r300_packet0_check(struct radeon_cs_parser *p, | |||
1182 | break; | 1227 | break; |
1183 | case 0x4F24: | 1228 | case 0x4F24: |
1184 | /* ZB_DEPTHPITCH */ | 1229 | /* ZB_DEPTHPITCH */ |
1230 | r = r100_cs_packet_next_reloc(p, &reloc); | ||
1231 | if (r) { | ||
1232 | DRM_ERROR("No reloc for ib[%d]=0x%04X\n", | ||
1233 | idx, reg); | ||
1234 | r100_cs_dump_packet(p, pkt); | ||
1235 | return r; | ||
1236 | } | ||
1237 | |||
1238 | if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) | ||
1239 | tile_flags |= R300_DEPTHMACROTILE_ENABLE; | ||
1240 | if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) | ||
1241 | tile_flags |= R300_DEPTHMICROTILE_TILED;; | ||
1242 | |||
1243 | tmp = ib_chunk->kdata[idx] & ~(0x7 << 16); | ||
1244 | tmp |= tile_flags; | ||
1245 | ib[idx] = tmp; | ||
1246 | |||
1185 | track->zb.pitch = ib_chunk->kdata[idx] & 0x3FFC; | 1247 | track->zb.pitch = ib_chunk->kdata[idx] & 0x3FFC; |
1186 | break; | 1248 | break; |
1187 | case 0x4104: | 1249 | case 0x4104: |
diff --git a/drivers/gpu/drm/radeon/r300_reg.h b/drivers/gpu/drm/radeon/r300_reg.h index 70f48609515e..4b7afef35a65 100644 --- a/drivers/gpu/drm/radeon/r300_reg.h +++ b/drivers/gpu/drm/radeon/r300_reg.h | |||
@@ -27,7 +27,9 @@ | |||
27 | #ifndef _R300_REG_H_ | 27 | #ifndef _R300_REG_H_ |
28 | #define _R300_REG_H_ | 28 | #define _R300_REG_H_ |
29 | 29 | ||
30 | 30 | #define R300_SURF_TILE_MACRO (1<<16) | |
31 | #define R300_SURF_TILE_MICRO (2<<16) | ||
32 | #define R300_SURF_TILE_BOTH (3<<16) | ||
31 | 33 | ||
32 | 34 | ||
33 | #define R300_MC_INIT_MISC_LAT_TIMER 0x180 | 35 | #define R300_MC_INIT_MISC_LAT_TIMER 0x180 |
diff --git a/drivers/gpu/drm/radeon/r500_reg.h b/drivers/gpu/drm/radeon/r500_reg.h index 9070a1c2ce23..036691b38cb7 100644 --- a/drivers/gpu/drm/radeon/r500_reg.h +++ b/drivers/gpu/drm/radeon/r500_reg.h | |||
@@ -445,6 +445,7 @@ | |||
445 | #define AVIVO_D1MODE_DATA_FORMAT 0x6528 | 445 | #define AVIVO_D1MODE_DATA_FORMAT 0x6528 |
446 | # define AVIVO_D1MODE_INTERLEAVE_EN (1 << 0) | 446 | # define AVIVO_D1MODE_INTERLEAVE_EN (1 << 0) |
447 | #define AVIVO_D1MODE_DESKTOP_HEIGHT 0x652C | 447 | #define AVIVO_D1MODE_DESKTOP_HEIGHT 0x652C |
448 | #define AVIVO_D1MODE_VLINE_START_END 0x6538 | ||
448 | #define AVIVO_D1MODE_VIEWPORT_START 0x6580 | 449 | #define AVIVO_D1MODE_VIEWPORT_START 0x6580 |
449 | #define AVIVO_D1MODE_VIEWPORT_SIZE 0x6584 | 450 | #define AVIVO_D1MODE_VIEWPORT_SIZE 0x6584 |
450 | #define AVIVO_D1MODE_EXT_OVERSCAN_LEFT_RIGHT 0x6588 | 451 | #define AVIVO_D1MODE_EXT_OVERSCAN_LEFT_RIGHT 0x6588 |
@@ -496,6 +497,7 @@ | |||
496 | #define AVIVO_D2CUR_SIZE 0x6c10 | 497 | #define AVIVO_D2CUR_SIZE 0x6c10 |
497 | #define AVIVO_D2CUR_POSITION 0x6c14 | 498 | #define AVIVO_D2CUR_POSITION 0x6c14 |
498 | 499 | ||
500 | #define AVIVO_D2MODE_VLINE_START_END 0x6d38 | ||
499 | #define AVIVO_D2MODE_VIEWPORT_START 0x6d80 | 501 | #define AVIVO_D2MODE_VIEWPORT_START 0x6d80 |
500 | #define AVIVO_D2MODE_VIEWPORT_SIZE 0x6d84 | 502 | #define AVIVO_D2MODE_VIEWPORT_SIZE 0x6d84 |
501 | #define AVIVO_D2MODE_EXT_OVERSCAN_LEFT_RIGHT 0x6d88 | 503 | #define AVIVO_D2MODE_EXT_OVERSCAN_LEFT_RIGHT 0x6d88 |
diff --git a/drivers/gpu/drm/radeon/r520.c b/drivers/gpu/drm/radeon/r520.c index 570a244bd88b..09fb0b6ec7dd 100644 --- a/drivers/gpu/drm/radeon/r520.c +++ b/drivers/gpu/drm/radeon/r520.c | |||
@@ -28,6 +28,7 @@ | |||
28 | #include "drmP.h" | 28 | #include "drmP.h" |
29 | #include "radeon_reg.h" | 29 | #include "radeon_reg.h" |
30 | #include "radeon.h" | 30 | #include "radeon.h" |
31 | #include "radeon_share.h" | ||
31 | 32 | ||
32 | /* r520,rv530,rv560,rv570,r580 depends on : */ | 33 | /* r520,rv530,rv560,rv570,r580 depends on : */ |
33 | void r100_hdp_reset(struct radeon_device *rdev); | 34 | void r100_hdp_reset(struct radeon_device *rdev); |
@@ -94,8 +95,8 @@ int r520_mc_init(struct radeon_device *rdev) | |||
94 | "programming pipes. Bad things might happen.\n"); | 95 | "programming pipes. Bad things might happen.\n"); |
95 | } | 96 | } |
96 | /* Write VRAM size in case we are limiting it */ | 97 | /* Write VRAM size in case we are limiting it */ |
97 | WREG32(RADEON_CONFIG_MEMSIZE, rdev->mc.vram_size); | 98 | WREG32(RADEON_CONFIG_MEMSIZE, rdev->mc.real_vram_size); |
98 | tmp = rdev->mc.vram_location + rdev->mc.vram_size - 1; | 99 | tmp = rdev->mc.vram_location + rdev->mc.mc_vram_size - 1; |
99 | tmp = REG_SET(R520_MC_FB_TOP, tmp >> 16); | 100 | tmp = REG_SET(R520_MC_FB_TOP, tmp >> 16); |
100 | tmp |= REG_SET(R520_MC_FB_START, rdev->mc.vram_location >> 16); | 101 | tmp |= REG_SET(R520_MC_FB_START, rdev->mc.vram_location >> 16); |
101 | WREG32_MC(R520_MC_FB_LOCATION, tmp); | 102 | WREG32_MC(R520_MC_FB_LOCATION, tmp); |
@@ -226,9 +227,20 @@ static void r520_vram_get_type(struct radeon_device *rdev) | |||
226 | 227 | ||
227 | void r520_vram_info(struct radeon_device *rdev) | 228 | void r520_vram_info(struct radeon_device *rdev) |
228 | { | 229 | { |
230 | fixed20_12 a; | ||
231 | |||
229 | r520_vram_get_type(rdev); | 232 | r520_vram_get_type(rdev); |
230 | rdev->mc.vram_size = RREG32(RADEON_CONFIG_MEMSIZE); | ||
231 | 233 | ||
232 | rdev->mc.aper_base = drm_get_resource_start(rdev->ddev, 0); | 234 | r100_vram_init_sizes(rdev); |
233 | rdev->mc.aper_size = drm_get_resource_len(rdev->ddev, 0); | 235 | /* FIXME: we should enforce default clock in case GPU is not in |
236 | * default setup | ||
237 | */ | ||
238 | a.full = rfixed_const(100); | ||
239 | rdev->pm.sclk.full = rfixed_const(rdev->clock.default_sclk); | ||
240 | rdev->pm.sclk.full = rfixed_div(rdev->pm.sclk, a); | ||
241 | } | ||
242 | |||
243 | void r520_bandwidth_update(struct radeon_device *rdev) | ||
244 | { | ||
245 | rv515_bandwidth_avivo_update(rdev); | ||
234 | } | 246 | } |
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c index c45559fc97fd..538cd907df69 100644 --- a/drivers/gpu/drm/radeon/r600.c +++ b/drivers/gpu/drm/radeon/r600.c | |||
@@ -67,7 +67,7 @@ int r600_mc_init(struct radeon_device *rdev) | |||
67 | "programming pipes. Bad things might happen.\n"); | 67 | "programming pipes. Bad things might happen.\n"); |
68 | } | 68 | } |
69 | 69 | ||
70 | tmp = rdev->mc.vram_location + rdev->mc.vram_size - 1; | 70 | tmp = rdev->mc.vram_location + rdev->mc.mc_vram_size - 1; |
71 | tmp = REG_SET(R600_MC_FB_TOP, tmp >> 24); | 71 | tmp = REG_SET(R600_MC_FB_TOP, tmp >> 24); |
72 | tmp |= REG_SET(R600_MC_FB_BASE, rdev->mc.vram_location >> 24); | 72 | tmp |= REG_SET(R600_MC_FB_BASE, rdev->mc.vram_location >> 24); |
73 | WREG32(R600_MC_VM_FB_LOCATION, tmp); | 73 | WREG32(R600_MC_VM_FB_LOCATION, tmp); |
@@ -140,7 +140,8 @@ void r600_vram_get_type(struct radeon_device *rdev) | |||
140 | void r600_vram_info(struct radeon_device *rdev) | 140 | void r600_vram_info(struct radeon_device *rdev) |
141 | { | 141 | { |
142 | r600_vram_get_type(rdev); | 142 | r600_vram_get_type(rdev); |
143 | rdev->mc.vram_size = RREG32(R600_CONFIG_MEMSIZE); | 143 | rdev->mc.real_vram_size = RREG32(R600_CONFIG_MEMSIZE); |
144 | rdev->mc.mc_vram_size = rdev->mc.real_vram_size; | ||
144 | 145 | ||
145 | /* Could aper size report 0 ? */ | 146 | /* Could aper size report 0 ? */ |
146 | rdev->mc.aper_base = drm_get_resource_start(rdev->ddev, 0); | 147 | rdev->mc.aper_base = drm_get_resource_start(rdev->ddev, 0); |
diff --git a/drivers/gpu/drm/radeon/r600_cp.c b/drivers/gpu/drm/radeon/r600_cp.c index 146f3570af8e..20f17908b036 100644 --- a/drivers/gpu/drm/radeon/r600_cp.c +++ b/drivers/gpu/drm/radeon/r600_cp.c | |||
@@ -384,8 +384,9 @@ static void r600_cp_load_microcode(drm_radeon_private_t *dev_priv) | |||
384 | DRM_INFO("Loading RV670 PFP Microcode\n"); | 384 | DRM_INFO("Loading RV670 PFP Microcode\n"); |
385 | for (i = 0; i < PFP_UCODE_SIZE; i++) | 385 | for (i = 0; i < PFP_UCODE_SIZE; i++) |
386 | RADEON_WRITE(R600_CP_PFP_UCODE_DATA, RV670_pfp_microcode[i]); | 386 | RADEON_WRITE(R600_CP_PFP_UCODE_DATA, RV670_pfp_microcode[i]); |
387 | } else if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS780)) { | 387 | } else if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS780) || |
388 | DRM_INFO("Loading RS780 CP Microcode\n"); | 388 | ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS880)) { |
389 | DRM_INFO("Loading RS780/RS880 CP Microcode\n"); | ||
389 | for (i = 0; i < PM4_UCODE_SIZE; i++) { | 390 | for (i = 0; i < PM4_UCODE_SIZE; i++) { |
390 | RADEON_WRITE(R600_CP_ME_RAM_DATA, | 391 | RADEON_WRITE(R600_CP_ME_RAM_DATA, |
391 | RS780_cp_microcode[i][0]); | 392 | RS780_cp_microcode[i][0]); |
@@ -396,7 +397,7 @@ static void r600_cp_load_microcode(drm_radeon_private_t *dev_priv) | |||
396 | } | 397 | } |
397 | 398 | ||
398 | RADEON_WRITE(R600_CP_PFP_UCODE_ADDR, 0); | 399 | RADEON_WRITE(R600_CP_PFP_UCODE_ADDR, 0); |
399 | DRM_INFO("Loading RS780 PFP Microcode\n"); | 400 | DRM_INFO("Loading RS780/RS880 PFP Microcode\n"); |
400 | for (i = 0; i < PFP_UCODE_SIZE; i++) | 401 | for (i = 0; i < PFP_UCODE_SIZE; i++) |
401 | RADEON_WRITE(R600_CP_PFP_UCODE_DATA, RS780_pfp_microcode[i]); | 402 | RADEON_WRITE(R600_CP_PFP_UCODE_DATA, RS780_pfp_microcode[i]); |
402 | } | 403 | } |
@@ -783,6 +784,7 @@ static void r600_gfx_init(struct drm_device *dev, | |||
783 | break; | 784 | break; |
784 | case CHIP_RV610: | 785 | case CHIP_RV610: |
785 | case CHIP_RS780: | 786 | case CHIP_RS780: |
787 | case CHIP_RS880: | ||
786 | case CHIP_RV620: | 788 | case CHIP_RV620: |
787 | dev_priv->r600_max_pipes = 1; | 789 | dev_priv->r600_max_pipes = 1; |
788 | dev_priv->r600_max_tile_pipes = 1; | 790 | dev_priv->r600_max_tile_pipes = 1; |
@@ -917,7 +919,8 @@ static void r600_gfx_init(struct drm_device *dev, | |||
917 | ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV630) || | 919 | ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV630) || |
918 | ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV610) || | 920 | ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV610) || |
919 | ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV620) || | 921 | ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV620) || |
920 | ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS780)) | 922 | ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS780) || |
923 | ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS880)) | ||
921 | RADEON_WRITE(R600_DB_DEBUG, R600_PREZ_MUST_WAIT_FOR_POSTZ_DONE); | 924 | RADEON_WRITE(R600_DB_DEBUG, R600_PREZ_MUST_WAIT_FOR_POSTZ_DONE); |
922 | else | 925 | else |
923 | RADEON_WRITE(R600_DB_DEBUG, 0); | 926 | RADEON_WRITE(R600_DB_DEBUG, 0); |
@@ -935,7 +938,8 @@ static void r600_gfx_init(struct drm_device *dev, | |||
935 | sq_ms_fifo_sizes = RADEON_READ(R600_SQ_MS_FIFO_SIZES); | 938 | sq_ms_fifo_sizes = RADEON_READ(R600_SQ_MS_FIFO_SIZES); |
936 | if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV610) || | 939 | if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV610) || |
937 | ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV620) || | 940 | ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV620) || |
938 | ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS780)) { | 941 | ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS780) || |
942 | ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS880)) { | ||
939 | sq_ms_fifo_sizes = (R600_CACHE_FIFO_SIZE(0xa) | | 943 | sq_ms_fifo_sizes = (R600_CACHE_FIFO_SIZE(0xa) | |
940 | R600_FETCH_FIFO_HIWATER(0xa) | | 944 | R600_FETCH_FIFO_HIWATER(0xa) | |
941 | R600_DONE_FIFO_HIWATER(0xe0) | | 945 | R600_DONE_FIFO_HIWATER(0xe0) | |
@@ -978,7 +982,8 @@ static void r600_gfx_init(struct drm_device *dev, | |||
978 | R600_NUM_ES_STACK_ENTRIES(0)); | 982 | R600_NUM_ES_STACK_ENTRIES(0)); |
979 | } else if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV610) || | 983 | } else if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV610) || |
980 | ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV620) || | 984 | ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV620) || |
981 | ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS780)) { | 985 | ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS780) || |
986 | ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS880)) { | ||
982 | /* no vertex cache */ | 987 | /* no vertex cache */ |
983 | sq_config &= ~R600_VC_ENABLE; | 988 | sq_config &= ~R600_VC_ENABLE; |
984 | 989 | ||
@@ -1035,7 +1040,8 @@ static void r600_gfx_init(struct drm_device *dev, | |||
1035 | 1040 | ||
1036 | if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV610) || | 1041 | if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV610) || |
1037 | ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV620) || | 1042 | ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV620) || |
1038 | ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS780)) | 1043 | ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS780) || |
1044 | ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS880)) | ||
1039 | RADEON_WRITE(R600_VGT_CACHE_INVALIDATION, R600_CACHE_INVALIDATION(R600_TC_ONLY)); | 1045 | RADEON_WRITE(R600_VGT_CACHE_INVALIDATION, R600_CACHE_INVALIDATION(R600_TC_ONLY)); |
1040 | else | 1046 | else |
1041 | RADEON_WRITE(R600_VGT_CACHE_INVALIDATION, R600_CACHE_INVALIDATION(R600_VC_AND_TC)); | 1047 | RADEON_WRITE(R600_VGT_CACHE_INVALIDATION, R600_CACHE_INVALIDATION(R600_VC_AND_TC)); |
@@ -1078,6 +1084,7 @@ static void r600_gfx_init(struct drm_device *dev, | |||
1078 | break; | 1084 | break; |
1079 | case CHIP_RV610: | 1085 | case CHIP_RV610: |
1080 | case CHIP_RS780: | 1086 | case CHIP_RS780: |
1087 | case CHIP_RS880: | ||
1081 | case CHIP_RV620: | 1088 | case CHIP_RV620: |
1082 | gs_prim_buffer_depth = 32; | 1089 | gs_prim_buffer_depth = 32; |
1083 | break; | 1090 | break; |
@@ -1123,6 +1130,7 @@ static void r600_gfx_init(struct drm_device *dev, | |||
1123 | switch (dev_priv->flags & RADEON_FAMILY_MASK) { | 1130 | switch (dev_priv->flags & RADEON_FAMILY_MASK) { |
1124 | case CHIP_RV610: | 1131 | case CHIP_RV610: |
1125 | case CHIP_RS780: | 1132 | case CHIP_RS780: |
1133 | case CHIP_RS880: | ||
1126 | case CHIP_RV620: | 1134 | case CHIP_RV620: |
1127 | tc_cntl = R600_TC_L2_SIZE(8); | 1135 | tc_cntl = R600_TC_L2_SIZE(8); |
1128 | break; | 1136 | break; |
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h index d61f2fc61df5..b1d945b8ed6c 100644 --- a/drivers/gpu/drm/radeon/radeon.h +++ b/drivers/gpu/drm/radeon/radeon.h | |||
@@ -64,6 +64,7 @@ extern int radeon_agpmode; | |||
64 | extern int radeon_vram_limit; | 64 | extern int radeon_vram_limit; |
65 | extern int radeon_gart_size; | 65 | extern int radeon_gart_size; |
66 | extern int radeon_benchmarking; | 66 | extern int radeon_benchmarking; |
67 | extern int radeon_testing; | ||
67 | extern int radeon_connector_table; | 68 | extern int radeon_connector_table; |
68 | 69 | ||
69 | /* | 70 | /* |
@@ -113,6 +114,7 @@ enum radeon_family { | |||
113 | CHIP_RV770, | 114 | CHIP_RV770, |
114 | CHIP_RV730, | 115 | CHIP_RV730, |
115 | CHIP_RV710, | 116 | CHIP_RV710, |
117 | CHIP_RS880, | ||
116 | CHIP_LAST, | 118 | CHIP_LAST, |
117 | }; | 119 | }; |
118 | 120 | ||
@@ -201,6 +203,14 @@ int radeon_fence_wait_last(struct radeon_device *rdev); | |||
201 | struct radeon_fence *radeon_fence_ref(struct radeon_fence *fence); | 203 | struct radeon_fence *radeon_fence_ref(struct radeon_fence *fence); |
202 | void radeon_fence_unref(struct radeon_fence **fence); | 204 | void radeon_fence_unref(struct radeon_fence **fence); |
203 | 205 | ||
206 | /* | ||
207 | * Tiling registers | ||
208 | */ | ||
209 | struct radeon_surface_reg { | ||
210 | struct radeon_object *robj; | ||
211 | }; | ||
212 | |||
213 | #define RADEON_GEM_MAX_SURFACES 8 | ||
204 | 214 | ||
205 | /* | 215 | /* |
206 | * Radeon buffer. | 216 | * Radeon buffer. |
@@ -213,6 +223,7 @@ struct radeon_object_list { | |||
213 | uint64_t gpu_offset; | 223 | uint64_t gpu_offset; |
214 | unsigned rdomain; | 224 | unsigned rdomain; |
215 | unsigned wdomain; | 225 | unsigned wdomain; |
226 | uint32_t tiling_flags; | ||
216 | }; | 227 | }; |
217 | 228 | ||
218 | int radeon_object_init(struct radeon_device *rdev); | 229 | int radeon_object_init(struct radeon_device *rdev); |
@@ -242,8 +253,15 @@ void radeon_object_list_clean(struct list_head *head); | |||
242 | int radeon_object_fbdev_mmap(struct radeon_object *robj, | 253 | int radeon_object_fbdev_mmap(struct radeon_object *robj, |
243 | struct vm_area_struct *vma); | 254 | struct vm_area_struct *vma); |
244 | unsigned long radeon_object_size(struct radeon_object *robj); | 255 | unsigned long radeon_object_size(struct radeon_object *robj); |
245 | 256 | void radeon_object_clear_surface_reg(struct radeon_object *robj); | |
246 | 257 | int radeon_object_check_tiling(struct radeon_object *robj, bool has_moved, | |
258 | bool force_drop); | ||
259 | void radeon_object_set_tiling_flags(struct radeon_object *robj, | ||
260 | uint32_t tiling_flags, uint32_t pitch); | ||
261 | void radeon_object_get_tiling_flags(struct radeon_object *robj, uint32_t *tiling_flags, uint32_t *pitch); | ||
262 | void radeon_bo_move_notify(struct ttm_buffer_object *bo, | ||
263 | struct ttm_mem_reg *mem); | ||
264 | void radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo); | ||
247 | /* | 265 | /* |
248 | * GEM objects. | 266 | * GEM objects. |
249 | */ | 267 | */ |
@@ -315,8 +333,11 @@ struct radeon_mc { | |||
315 | unsigned gtt_location; | 333 | unsigned gtt_location; |
316 | unsigned gtt_size; | 334 | unsigned gtt_size; |
317 | unsigned vram_location; | 335 | unsigned vram_location; |
318 | unsigned vram_size; | 336 | /* for some chips with <= 32MB we need to lie |
337 | * about vram size near mc fb location */ | ||
338 | unsigned mc_vram_size; | ||
319 | unsigned vram_width; | 339 | unsigned vram_width; |
340 | unsigned real_vram_size; | ||
320 | int vram_mtrr; | 341 | int vram_mtrr; |
321 | bool vram_is_ddr; | 342 | bool vram_is_ddr; |
322 | }; | 343 | }; |
@@ -474,6 +495,39 @@ struct radeon_wb { | |||
474 | uint64_t gpu_addr; | 495 | uint64_t gpu_addr; |
475 | }; | 496 | }; |
476 | 497 | ||
498 | /** | ||
499 | * struct radeon_pm - power management datas | ||
500 | * @max_bandwidth: maximum bandwidth the gpu has (MByte/s) | ||
501 | * @igp_sideport_mclk: sideport memory clock Mhz (rs690,rs740,rs780,rs880) | ||
502 | * @igp_system_mclk: system clock Mhz (rs690,rs740,rs780,rs880) | ||
503 | * @igp_ht_link_clk: ht link clock Mhz (rs690,rs740,rs780,rs880) | ||
504 | * @igp_ht_link_width: ht link width in bits (rs690,rs740,rs780,rs880) | ||
505 | * @k8_bandwidth: k8 bandwidth the gpu has (MByte/s) (IGP) | ||
506 | * @sideport_bandwidth: sideport bandwidth the gpu has (MByte/s) (IGP) | ||
507 | * @ht_bandwidth: ht bandwidth the gpu has (MByte/s) (IGP) | ||
508 | * @core_bandwidth: core GPU bandwidth the gpu has (MByte/s) (IGP) | ||
509 | * @sclk: GPU clock Mhz (core bandwith depends of this clock) | ||
510 | * @needed_bandwidth: current bandwidth needs | ||
511 | * | ||
512 | * It keeps track of various data needed to take powermanagement decision. | ||
513 | * Bandwith need is used to determine minimun clock of the GPU and memory. | ||
514 | * Equation between gpu/memory clock and available bandwidth is hw dependent | ||
515 | * (type of memory, bus size, efficiency, ...) | ||
516 | */ | ||
517 | struct radeon_pm { | ||
518 | fixed20_12 max_bandwidth; | ||
519 | fixed20_12 igp_sideport_mclk; | ||
520 | fixed20_12 igp_system_mclk; | ||
521 | fixed20_12 igp_ht_link_clk; | ||
522 | fixed20_12 igp_ht_link_width; | ||
523 | fixed20_12 k8_bandwidth; | ||
524 | fixed20_12 sideport_bandwidth; | ||
525 | fixed20_12 ht_bandwidth; | ||
526 | fixed20_12 core_bandwidth; | ||
527 | fixed20_12 sclk; | ||
528 | fixed20_12 needed_bandwidth; | ||
529 | }; | ||
530 | |||
477 | 531 | ||
478 | /* | 532 | /* |
479 | * Benchmarking | 533 | * Benchmarking |
@@ -482,6 +536,12 @@ void radeon_benchmark(struct radeon_device *rdev); | |||
482 | 536 | ||
483 | 537 | ||
484 | /* | 538 | /* |
539 | * Testing | ||
540 | */ | ||
541 | void radeon_test_moves(struct radeon_device *rdev); | ||
542 | |||
543 | |||
544 | /* | ||
485 | * Debugfs | 545 | * Debugfs |
486 | */ | 546 | */ |
487 | int radeon_debugfs_add_files(struct radeon_device *rdev, | 547 | int radeon_debugfs_add_files(struct radeon_device *rdev, |
@@ -535,6 +595,11 @@ struct radeon_asic { | |||
535 | void (*set_memory_clock)(struct radeon_device *rdev, uint32_t mem_clock); | 595 | void (*set_memory_clock)(struct radeon_device *rdev, uint32_t mem_clock); |
536 | void (*set_pcie_lanes)(struct radeon_device *rdev, int lanes); | 596 | void (*set_pcie_lanes)(struct radeon_device *rdev, int lanes); |
537 | void (*set_clock_gating)(struct radeon_device *rdev, int enable); | 597 | void (*set_clock_gating)(struct radeon_device *rdev, int enable); |
598 | int (*set_surface_reg)(struct radeon_device *rdev, int reg, | ||
599 | uint32_t tiling_flags, uint32_t pitch, | ||
600 | uint32_t offset, uint32_t obj_size); | ||
601 | int (*clear_surface_reg)(struct radeon_device *rdev, int reg); | ||
602 | void (*bandwidth_update)(struct radeon_device *rdev); | ||
538 | }; | 603 | }; |
539 | 604 | ||
540 | union radeon_asic_config { | 605 | union radeon_asic_config { |
@@ -566,6 +631,10 @@ int radeon_gem_busy_ioctl(struct drm_device *dev, void *data, | |||
566 | int radeon_gem_wait_idle_ioctl(struct drm_device *dev, void *data, | 631 | int radeon_gem_wait_idle_ioctl(struct drm_device *dev, void *data, |
567 | struct drm_file *filp); | 632 | struct drm_file *filp); |
568 | int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp); | 633 | int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp); |
634 | int radeon_gem_set_tiling_ioctl(struct drm_device *dev, void *data, | ||
635 | struct drm_file *filp); | ||
636 | int radeon_gem_get_tiling_ioctl(struct drm_device *dev, void *data, | ||
637 | struct drm_file *filp); | ||
569 | 638 | ||
570 | 639 | ||
571 | /* | 640 | /* |
@@ -594,8 +663,8 @@ struct radeon_device { | |||
594 | struct radeon_object *fbdev_robj; | 663 | struct radeon_object *fbdev_robj; |
595 | struct radeon_framebuffer *fbdev_rfb; | 664 | struct radeon_framebuffer *fbdev_rfb; |
596 | /* Register mmio */ | 665 | /* Register mmio */ |
597 | unsigned long rmmio_base; | 666 | resource_size_t rmmio_base; |
598 | unsigned long rmmio_size; | 667 | resource_size_t rmmio_size; |
599 | void *rmmio; | 668 | void *rmmio; |
600 | radeon_rreg_t mm_rreg; | 669 | radeon_rreg_t mm_rreg; |
601 | radeon_wreg_t mm_wreg; | 670 | radeon_wreg_t mm_wreg; |
@@ -619,11 +688,14 @@ struct radeon_device { | |||
619 | struct radeon_irq irq; | 688 | struct radeon_irq irq; |
620 | struct radeon_asic *asic; | 689 | struct radeon_asic *asic; |
621 | struct radeon_gem gem; | 690 | struct radeon_gem gem; |
691 | struct radeon_pm pm; | ||
622 | struct mutex cs_mutex; | 692 | struct mutex cs_mutex; |
623 | struct radeon_wb wb; | 693 | struct radeon_wb wb; |
624 | bool gpu_lockup; | 694 | bool gpu_lockup; |
625 | bool shutdown; | 695 | bool shutdown; |
626 | bool suspend; | 696 | bool suspend; |
697 | bool need_dma32; | ||
698 | struct radeon_surface_reg surface_regs[RADEON_GEM_MAX_SURFACES]; | ||
627 | }; | 699 | }; |
628 | 700 | ||
629 | int radeon_device_init(struct radeon_device *rdev, | 701 | int radeon_device_init(struct radeon_device *rdev, |
@@ -670,6 +742,8 @@ void r100_pll_errata_after_index(struct radeon_device *rdev); | |||
670 | /* | 742 | /* |
671 | * ASICs helpers. | 743 | * ASICs helpers. |
672 | */ | 744 | */ |
745 | #define ASIC_IS_RN50(rdev) ((rdev->pdev->device == 0x515e) || \ | ||
746 | (rdev->pdev->device == 0x5969)) | ||
673 | #define ASIC_IS_RV100(rdev) ((rdev->family == CHIP_RV100) || \ | 747 | #define ASIC_IS_RV100(rdev) ((rdev->family == CHIP_RV100) || \ |
674 | (rdev->family == CHIP_RV200) || \ | 748 | (rdev->family == CHIP_RV200) || \ |
675 | (rdev->family == CHIP_RS100) || \ | 749 | (rdev->family == CHIP_RS100) || \ |
@@ -796,5 +870,8 @@ static inline void radeon_ring_write(struct radeon_device *rdev, uint32_t v) | |||
796 | #define radeon_set_memory_clock(rdev, e) (rdev)->asic->set_engine_clock((rdev), (e)) | 870 | #define radeon_set_memory_clock(rdev, e) (rdev)->asic->set_engine_clock((rdev), (e)) |
797 | #define radeon_set_pcie_lanes(rdev, l) (rdev)->asic->set_pcie_lanes((rdev), (l)) | 871 | #define radeon_set_pcie_lanes(rdev, l) (rdev)->asic->set_pcie_lanes((rdev), (l)) |
798 | #define radeon_set_clock_gating(rdev, e) (rdev)->asic->set_clock_gating((rdev), (e)) | 872 | #define radeon_set_clock_gating(rdev, e) (rdev)->asic->set_clock_gating((rdev), (e)) |
873 | #define radeon_set_surface_reg(rdev, r, f, p, o, s) ((rdev)->asic->set_surface_reg((rdev), (r), (f), (p), (o), (s))) | ||
874 | #define radeon_clear_surface_reg(rdev, r) ((rdev)->asic->clear_surface_reg((rdev), (r))) | ||
875 | #define radeon_bandwidth_update(rdev) (rdev)->asic->bandwidth_update((rdev)) | ||
799 | 876 | ||
800 | #endif | 877 | #endif |
diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h index e2e567395df8..9a75876e0c3b 100644 --- a/drivers/gpu/drm/radeon/radeon_asic.h +++ b/drivers/gpu/drm/radeon/radeon_asic.h | |||
@@ -71,6 +71,11 @@ int r100_copy_blit(struct radeon_device *rdev, | |||
71 | uint64_t dst_offset, | 71 | uint64_t dst_offset, |
72 | unsigned num_pages, | 72 | unsigned num_pages, |
73 | struct radeon_fence *fence); | 73 | struct radeon_fence *fence); |
74 | int r100_set_surface_reg(struct radeon_device *rdev, int reg, | ||
75 | uint32_t tiling_flags, uint32_t pitch, | ||
76 | uint32_t offset, uint32_t obj_size); | ||
77 | int r100_clear_surface_reg(struct radeon_device *rdev, int reg); | ||
78 | void r100_bandwidth_update(struct radeon_device *rdev); | ||
74 | 79 | ||
75 | static struct radeon_asic r100_asic = { | 80 | static struct radeon_asic r100_asic = { |
76 | .init = &r100_init, | 81 | .init = &r100_init, |
@@ -100,6 +105,9 @@ static struct radeon_asic r100_asic = { | |||
100 | .set_memory_clock = NULL, | 105 | .set_memory_clock = NULL, |
101 | .set_pcie_lanes = NULL, | 106 | .set_pcie_lanes = NULL, |
102 | .set_clock_gating = &radeon_legacy_set_clock_gating, | 107 | .set_clock_gating = &radeon_legacy_set_clock_gating, |
108 | .set_surface_reg = r100_set_surface_reg, | ||
109 | .clear_surface_reg = r100_clear_surface_reg, | ||
110 | .bandwidth_update = &r100_bandwidth_update, | ||
103 | }; | 111 | }; |
104 | 112 | ||
105 | 113 | ||
@@ -128,6 +136,7 @@ int r300_copy_dma(struct radeon_device *rdev, | |||
128 | uint64_t dst_offset, | 136 | uint64_t dst_offset, |
129 | unsigned num_pages, | 137 | unsigned num_pages, |
130 | struct radeon_fence *fence); | 138 | struct radeon_fence *fence); |
139 | |||
131 | static struct radeon_asic r300_asic = { | 140 | static struct radeon_asic r300_asic = { |
132 | .init = &r300_init, | 141 | .init = &r300_init, |
133 | .errata = &r300_errata, | 142 | .errata = &r300_errata, |
@@ -156,6 +165,9 @@ static struct radeon_asic r300_asic = { | |||
156 | .set_memory_clock = NULL, | 165 | .set_memory_clock = NULL, |
157 | .set_pcie_lanes = &rv370_set_pcie_lanes, | 166 | .set_pcie_lanes = &rv370_set_pcie_lanes, |
158 | .set_clock_gating = &radeon_legacy_set_clock_gating, | 167 | .set_clock_gating = &radeon_legacy_set_clock_gating, |
168 | .set_surface_reg = r100_set_surface_reg, | ||
169 | .clear_surface_reg = r100_clear_surface_reg, | ||
170 | .bandwidth_update = &r100_bandwidth_update, | ||
159 | }; | 171 | }; |
160 | 172 | ||
161 | /* | 173 | /* |
@@ -193,6 +205,9 @@ static struct radeon_asic r420_asic = { | |||
193 | .set_memory_clock = &radeon_atom_set_memory_clock, | 205 | .set_memory_clock = &radeon_atom_set_memory_clock, |
194 | .set_pcie_lanes = &rv370_set_pcie_lanes, | 206 | .set_pcie_lanes = &rv370_set_pcie_lanes, |
195 | .set_clock_gating = &radeon_atom_set_clock_gating, | 207 | .set_clock_gating = &radeon_atom_set_clock_gating, |
208 | .set_surface_reg = r100_set_surface_reg, | ||
209 | .clear_surface_reg = r100_clear_surface_reg, | ||
210 | .bandwidth_update = &r100_bandwidth_update, | ||
196 | }; | 211 | }; |
197 | 212 | ||
198 | 213 | ||
@@ -237,6 +252,9 @@ static struct radeon_asic rs400_asic = { | |||
237 | .set_memory_clock = NULL, | 252 | .set_memory_clock = NULL, |
238 | .set_pcie_lanes = NULL, | 253 | .set_pcie_lanes = NULL, |
239 | .set_clock_gating = &radeon_legacy_set_clock_gating, | 254 | .set_clock_gating = &radeon_legacy_set_clock_gating, |
255 | .set_surface_reg = r100_set_surface_reg, | ||
256 | .clear_surface_reg = r100_clear_surface_reg, | ||
257 | .bandwidth_update = &r100_bandwidth_update, | ||
240 | }; | 258 | }; |
241 | 259 | ||
242 | 260 | ||
@@ -254,6 +272,7 @@ void rs600_gart_tlb_flush(struct radeon_device *rdev); | |||
254 | int rs600_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr); | 272 | int rs600_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr); |
255 | uint32_t rs600_mc_rreg(struct radeon_device *rdev, uint32_t reg); | 273 | uint32_t rs600_mc_rreg(struct radeon_device *rdev, uint32_t reg); |
256 | void rs600_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v); | 274 | void rs600_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v); |
275 | void rs600_bandwidth_update(struct radeon_device *rdev); | ||
257 | static struct radeon_asic rs600_asic = { | 276 | static struct radeon_asic rs600_asic = { |
258 | .init = &r300_init, | 277 | .init = &r300_init, |
259 | .errata = &rs600_errata, | 278 | .errata = &rs600_errata, |
@@ -282,6 +301,7 @@ static struct radeon_asic rs600_asic = { | |||
282 | .set_memory_clock = &radeon_atom_set_memory_clock, | 301 | .set_memory_clock = &radeon_atom_set_memory_clock, |
283 | .set_pcie_lanes = NULL, | 302 | .set_pcie_lanes = NULL, |
284 | .set_clock_gating = &radeon_atom_set_clock_gating, | 303 | .set_clock_gating = &radeon_atom_set_clock_gating, |
304 | .bandwidth_update = &rs600_bandwidth_update, | ||
285 | }; | 305 | }; |
286 | 306 | ||
287 | 307 | ||
@@ -294,6 +314,7 @@ int rs690_mc_init(struct radeon_device *rdev); | |||
294 | void rs690_mc_fini(struct radeon_device *rdev); | 314 | void rs690_mc_fini(struct radeon_device *rdev); |
295 | uint32_t rs690_mc_rreg(struct radeon_device *rdev, uint32_t reg); | 315 | uint32_t rs690_mc_rreg(struct radeon_device *rdev, uint32_t reg); |
296 | void rs690_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v); | 316 | void rs690_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v); |
317 | void rs690_bandwidth_update(struct radeon_device *rdev); | ||
297 | static struct radeon_asic rs690_asic = { | 318 | static struct radeon_asic rs690_asic = { |
298 | .init = &r300_init, | 319 | .init = &r300_init, |
299 | .errata = &rs690_errata, | 320 | .errata = &rs690_errata, |
@@ -322,6 +343,9 @@ static struct radeon_asic rs690_asic = { | |||
322 | .set_memory_clock = &radeon_atom_set_memory_clock, | 343 | .set_memory_clock = &radeon_atom_set_memory_clock, |
323 | .set_pcie_lanes = NULL, | 344 | .set_pcie_lanes = NULL, |
324 | .set_clock_gating = &radeon_atom_set_clock_gating, | 345 | .set_clock_gating = &radeon_atom_set_clock_gating, |
346 | .set_surface_reg = r100_set_surface_reg, | ||
347 | .clear_surface_reg = r100_clear_surface_reg, | ||
348 | .bandwidth_update = &rs690_bandwidth_update, | ||
325 | }; | 349 | }; |
326 | 350 | ||
327 | 351 | ||
@@ -339,6 +363,7 @@ void rv515_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v); | |||
339 | void rv515_ring_start(struct radeon_device *rdev); | 363 | void rv515_ring_start(struct radeon_device *rdev); |
340 | uint32_t rv515_pcie_rreg(struct radeon_device *rdev, uint32_t reg); | 364 | uint32_t rv515_pcie_rreg(struct radeon_device *rdev, uint32_t reg); |
341 | void rv515_pcie_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v); | 365 | void rv515_pcie_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v); |
366 | void rv515_bandwidth_update(struct radeon_device *rdev); | ||
342 | static struct radeon_asic rv515_asic = { | 367 | static struct radeon_asic rv515_asic = { |
343 | .init = &rv515_init, | 368 | .init = &rv515_init, |
344 | .errata = &rv515_errata, | 369 | .errata = &rv515_errata, |
@@ -367,6 +392,9 @@ static struct radeon_asic rv515_asic = { | |||
367 | .set_memory_clock = &radeon_atom_set_memory_clock, | 392 | .set_memory_clock = &radeon_atom_set_memory_clock, |
368 | .set_pcie_lanes = &rv370_set_pcie_lanes, | 393 | .set_pcie_lanes = &rv370_set_pcie_lanes, |
369 | .set_clock_gating = &radeon_atom_set_clock_gating, | 394 | .set_clock_gating = &radeon_atom_set_clock_gating, |
395 | .set_surface_reg = r100_set_surface_reg, | ||
396 | .clear_surface_reg = r100_clear_surface_reg, | ||
397 | .bandwidth_update = &rv515_bandwidth_update, | ||
370 | }; | 398 | }; |
371 | 399 | ||
372 | 400 | ||
@@ -377,6 +405,7 @@ void r520_errata(struct radeon_device *rdev); | |||
377 | void r520_vram_info(struct radeon_device *rdev); | 405 | void r520_vram_info(struct radeon_device *rdev); |
378 | int r520_mc_init(struct radeon_device *rdev); | 406 | int r520_mc_init(struct radeon_device *rdev); |
379 | void r520_mc_fini(struct radeon_device *rdev); | 407 | void r520_mc_fini(struct radeon_device *rdev); |
408 | void r520_bandwidth_update(struct radeon_device *rdev); | ||
380 | static struct radeon_asic r520_asic = { | 409 | static struct radeon_asic r520_asic = { |
381 | .init = &rv515_init, | 410 | .init = &rv515_init, |
382 | .errata = &r520_errata, | 411 | .errata = &r520_errata, |
@@ -405,6 +434,9 @@ static struct radeon_asic r520_asic = { | |||
405 | .set_memory_clock = &radeon_atom_set_memory_clock, | 434 | .set_memory_clock = &radeon_atom_set_memory_clock, |
406 | .set_pcie_lanes = &rv370_set_pcie_lanes, | 435 | .set_pcie_lanes = &rv370_set_pcie_lanes, |
407 | .set_clock_gating = &radeon_atom_set_clock_gating, | 436 | .set_clock_gating = &radeon_atom_set_clock_gating, |
437 | .set_surface_reg = r100_set_surface_reg, | ||
438 | .clear_surface_reg = r100_clear_surface_reg, | ||
439 | .bandwidth_update = &r520_bandwidth_update, | ||
408 | }; | 440 | }; |
409 | 441 | ||
410 | /* | 442 | /* |
diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c index 1f5a1a490984..fcfe5c02d744 100644 --- a/drivers/gpu/drm/radeon/radeon_atombios.c +++ b/drivers/gpu/drm/radeon/radeon_atombios.c | |||
@@ -103,7 +103,8 @@ static inline struct radeon_i2c_bus_rec radeon_lookup_gpio(struct drm_device | |||
103 | static bool radeon_atom_apply_quirks(struct drm_device *dev, | 103 | static bool radeon_atom_apply_quirks(struct drm_device *dev, |
104 | uint32_t supported_device, | 104 | uint32_t supported_device, |
105 | int *connector_type, | 105 | int *connector_type, |
106 | struct radeon_i2c_bus_rec *i2c_bus) | 106 | struct radeon_i2c_bus_rec *i2c_bus, |
107 | uint8_t *line_mux) | ||
107 | { | 108 | { |
108 | 109 | ||
109 | /* Asus M2A-VM HDMI board lists the DVI port as HDMI */ | 110 | /* Asus M2A-VM HDMI board lists the DVI port as HDMI */ |
@@ -127,8 +128,10 @@ static bool radeon_atom_apply_quirks(struct drm_device *dev, | |||
127 | if ((dev->pdev->device == 0x5653) && | 128 | if ((dev->pdev->device == 0x5653) && |
128 | (dev->pdev->subsystem_vendor == 0x1462) && | 129 | (dev->pdev->subsystem_vendor == 0x1462) && |
129 | (dev->pdev->subsystem_device == 0x0291)) { | 130 | (dev->pdev->subsystem_device == 0x0291)) { |
130 | if (*connector_type == DRM_MODE_CONNECTOR_LVDS) | 131 | if (*connector_type == DRM_MODE_CONNECTOR_LVDS) { |
131 | i2c_bus->valid = false; | 132 | i2c_bus->valid = false; |
133 | *line_mux = 53; | ||
134 | } | ||
132 | } | 135 | } |
133 | 136 | ||
134 | /* Funky macbooks */ | 137 | /* Funky macbooks */ |
@@ -526,7 +529,7 @@ bool radeon_get_atom_connector_info_from_supported_devices_table(struct | |||
526 | 529 | ||
527 | if (!radeon_atom_apply_quirks | 530 | if (!radeon_atom_apply_quirks |
528 | (dev, (1 << i), &bios_connectors[i].connector_type, | 531 | (dev, (1 << i), &bios_connectors[i].connector_type, |
529 | &bios_connectors[i].ddc_bus)) | 532 | &bios_connectors[i].ddc_bus, &bios_connectors[i].line_mux)) |
530 | continue; | 533 | continue; |
531 | 534 | ||
532 | bios_connectors[i].valid = true; | 535 | bios_connectors[i].valid = true; |
diff --git a/drivers/gpu/drm/radeon/radeon_benchmark.c b/drivers/gpu/drm/radeon/radeon_benchmark.c index c44403a2ca76..2e938f7496fb 100644 --- a/drivers/gpu/drm/radeon/radeon_benchmark.c +++ b/drivers/gpu/drm/radeon/radeon_benchmark.c | |||
@@ -63,7 +63,7 @@ void radeon_benchmark_move(struct radeon_device *rdev, unsigned bsize, | |||
63 | if (r) { | 63 | if (r) { |
64 | goto out_cleanup; | 64 | goto out_cleanup; |
65 | } | 65 | } |
66 | r = radeon_copy_dma(rdev, saddr, daddr, size >> 14, fence); | 66 | r = radeon_copy_dma(rdev, saddr, daddr, size / 4096, fence); |
67 | if (r) { | 67 | if (r) { |
68 | goto out_cleanup; | 68 | goto out_cleanup; |
69 | } | 69 | } |
@@ -88,7 +88,7 @@ void radeon_benchmark_move(struct radeon_device *rdev, unsigned bsize, | |||
88 | if (r) { | 88 | if (r) { |
89 | goto out_cleanup; | 89 | goto out_cleanup; |
90 | } | 90 | } |
91 | r = radeon_copy_blit(rdev, saddr, daddr, size >> 14, fence); | 91 | r = radeon_copy_blit(rdev, saddr, daddr, size / 4096, fence); |
92 | if (r) { | 92 | if (r) { |
93 | goto out_cleanup; | 93 | goto out_cleanup; |
94 | } | 94 | } |
diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c index b843f9bdfb14..a169067efc4e 100644 --- a/drivers/gpu/drm/radeon/radeon_cs.c +++ b/drivers/gpu/drm/radeon/radeon_cs.c | |||
@@ -127,17 +127,23 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data) | |||
127 | sizeof(struct drm_radeon_cs_chunk))) { | 127 | sizeof(struct drm_radeon_cs_chunk))) { |
128 | return -EFAULT; | 128 | return -EFAULT; |
129 | } | 129 | } |
130 | p->chunks[i].length_dw = user_chunk.length_dw; | ||
131 | p->chunks[i].kdata = NULL; | ||
130 | p->chunks[i].chunk_id = user_chunk.chunk_id; | 132 | p->chunks[i].chunk_id = user_chunk.chunk_id; |
133 | |||
131 | if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_RELOCS) { | 134 | if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_RELOCS) { |
132 | p->chunk_relocs_idx = i; | 135 | p->chunk_relocs_idx = i; |
133 | } | 136 | } |
134 | if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_IB) { | 137 | if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_IB) { |
135 | p->chunk_ib_idx = i; | 138 | p->chunk_ib_idx = i; |
139 | /* zero length IB isn't useful */ | ||
140 | if (p->chunks[i].length_dw == 0) | ||
141 | return -EINVAL; | ||
136 | } | 142 | } |
143 | |||
137 | p->chunks[i].length_dw = user_chunk.length_dw; | 144 | p->chunks[i].length_dw = user_chunk.length_dw; |
138 | cdata = (uint32_t *)(unsigned long)user_chunk.chunk_data; | 145 | cdata = (uint32_t *)(unsigned long)user_chunk.chunk_data; |
139 | 146 | ||
140 | p->chunks[i].kdata = NULL; | ||
141 | size = p->chunks[i].length_dw * sizeof(uint32_t); | 147 | size = p->chunks[i].length_dw * sizeof(uint32_t); |
142 | p->chunks[i].kdata = kzalloc(size, GFP_KERNEL); | 148 | p->chunks[i].kdata = kzalloc(size, GFP_KERNEL); |
143 | if (p->chunks[i].kdata == NULL) { | 149 | if (p->chunks[i].kdata == NULL) { |
diff --git a/drivers/gpu/drm/radeon/radeon_cursor.c b/drivers/gpu/drm/radeon/radeon_cursor.c index 5232441f119b..b13c79e38bc0 100644 --- a/drivers/gpu/drm/radeon/radeon_cursor.c +++ b/drivers/gpu/drm/radeon/radeon_cursor.c | |||
@@ -111,9 +111,11 @@ static void radeon_set_cursor(struct drm_crtc *crtc, struct drm_gem_object *obj, | |||
111 | 111 | ||
112 | if (ASIC_IS_AVIVO(rdev)) | 112 | if (ASIC_IS_AVIVO(rdev)) |
113 | WREG32(AVIVO_D1CUR_SURFACE_ADDRESS + radeon_crtc->crtc_offset, gpu_addr); | 113 | WREG32(AVIVO_D1CUR_SURFACE_ADDRESS + radeon_crtc->crtc_offset, gpu_addr); |
114 | else | 114 | else { |
115 | radeon_crtc->legacy_cursor_offset = gpu_addr - radeon_crtc->legacy_display_base_addr; | ||
115 | /* offset is from DISP(2)_BASE_ADDRESS */ | 116 | /* offset is from DISP(2)_BASE_ADDRESS */ |
116 | WREG32(RADEON_CUR_OFFSET + radeon_crtc->crtc_offset, gpu_addr); | 117 | WREG32(RADEON_CUR_OFFSET + radeon_crtc->crtc_offset, radeon_crtc->legacy_cursor_offset); |
118 | } | ||
117 | } | 119 | } |
118 | 120 | ||
119 | int radeon_crtc_cursor_set(struct drm_crtc *crtc, | 121 | int radeon_crtc_cursor_set(struct drm_crtc *crtc, |
@@ -245,6 +247,9 @@ int radeon_crtc_cursor_move(struct drm_crtc *crtc, | |||
245 | (RADEON_CUR_LOCK | 247 | (RADEON_CUR_LOCK |
246 | | ((xorigin ? 0 : x) << 16) | 248 | | ((xorigin ? 0 : x) << 16) |
247 | | (yorigin ? 0 : y))); | 249 | | (yorigin ? 0 : y))); |
250 | /* offset is from DISP(2)_BASE_ADDRESS */ | ||
251 | WREG32(RADEON_CUR_OFFSET + radeon_crtc->crtc_offset, (radeon_crtc->legacy_cursor_offset + | ||
252 | (yorigin * 256))); | ||
248 | } | 253 | } |
249 | radeon_lock_cursor(crtc, false); | 254 | radeon_lock_cursor(crtc, false); |
250 | 255 | ||
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c index f30aa7274a54..9ff6dcb97f9d 100644 --- a/drivers/gpu/drm/radeon/radeon_device.c +++ b/drivers/gpu/drm/radeon/radeon_device.c | |||
@@ -35,6 +35,25 @@ | |||
35 | #include "atom.h" | 35 | #include "atom.h" |
36 | 36 | ||
37 | /* | 37 | /* |
38 | * Clear GPU surface registers. | ||
39 | */ | ||
40 | static void radeon_surface_init(struct radeon_device *rdev) | ||
41 | { | ||
42 | /* FIXME: check this out */ | ||
43 | if (rdev->family < CHIP_R600) { | ||
44 | int i; | ||
45 | |||
46 | for (i = 0; i < 8; i++) { | ||
47 | WREG32(RADEON_SURFACE0_INFO + | ||
48 | i * (RADEON_SURFACE1_INFO - RADEON_SURFACE0_INFO), | ||
49 | 0); | ||
50 | } | ||
51 | /* enable surfaces */ | ||
52 | WREG32(RADEON_SURFACE_CNTL, 0); | ||
53 | } | ||
54 | } | ||
55 | |||
56 | /* | ||
38 | * GPU scratch registers helpers function. | 57 | * GPU scratch registers helpers function. |
39 | */ | 58 | */ |
40 | static void radeon_scratch_init(struct radeon_device *rdev) | 59 | static void radeon_scratch_init(struct radeon_device *rdev) |
@@ -102,7 +121,7 @@ int radeon_mc_setup(struct radeon_device *rdev) | |||
102 | if (rdev->mc.vram_location != 0xFFFFFFFFUL) { | 121 | if (rdev->mc.vram_location != 0xFFFFFFFFUL) { |
103 | /* vram location was already setup try to put gtt after | 122 | /* vram location was already setup try to put gtt after |
104 | * if it fits */ | 123 | * if it fits */ |
105 | tmp = rdev->mc.vram_location + rdev->mc.vram_size; | 124 | tmp = rdev->mc.vram_location + rdev->mc.mc_vram_size; |
106 | tmp = (tmp + rdev->mc.gtt_size - 1) & ~(rdev->mc.gtt_size - 1); | 125 | tmp = (tmp + rdev->mc.gtt_size - 1) & ~(rdev->mc.gtt_size - 1); |
107 | if ((0xFFFFFFFFUL - tmp) >= rdev->mc.gtt_size) { | 126 | if ((0xFFFFFFFFUL - tmp) >= rdev->mc.gtt_size) { |
108 | rdev->mc.gtt_location = tmp; | 127 | rdev->mc.gtt_location = tmp; |
@@ -117,13 +136,13 @@ int radeon_mc_setup(struct radeon_device *rdev) | |||
117 | } else if (rdev->mc.gtt_location != 0xFFFFFFFFUL) { | 136 | } else if (rdev->mc.gtt_location != 0xFFFFFFFFUL) { |
118 | /* gtt location was already setup try to put vram before | 137 | /* gtt location was already setup try to put vram before |
119 | * if it fits */ | 138 | * if it fits */ |
120 | if (rdev->mc.vram_size < rdev->mc.gtt_location) { | 139 | if (rdev->mc.mc_vram_size < rdev->mc.gtt_location) { |
121 | rdev->mc.vram_location = 0; | 140 | rdev->mc.vram_location = 0; |
122 | } else { | 141 | } else { |
123 | tmp = rdev->mc.gtt_location + rdev->mc.gtt_size; | 142 | tmp = rdev->mc.gtt_location + rdev->mc.gtt_size; |
124 | tmp += (rdev->mc.vram_size - 1); | 143 | tmp += (rdev->mc.mc_vram_size - 1); |
125 | tmp &= ~(rdev->mc.vram_size - 1); | 144 | tmp &= ~(rdev->mc.mc_vram_size - 1); |
126 | if ((0xFFFFFFFFUL - tmp) >= rdev->mc.vram_size) { | 145 | if ((0xFFFFFFFFUL - tmp) >= rdev->mc.mc_vram_size) { |
127 | rdev->mc.vram_location = tmp; | 146 | rdev->mc.vram_location = tmp; |
128 | } else { | 147 | } else { |
129 | printk(KERN_ERR "[drm] vram too big to fit " | 148 | printk(KERN_ERR "[drm] vram too big to fit " |
@@ -133,12 +152,16 @@ int radeon_mc_setup(struct radeon_device *rdev) | |||
133 | } | 152 | } |
134 | } else { | 153 | } else { |
135 | rdev->mc.vram_location = 0; | 154 | rdev->mc.vram_location = 0; |
136 | rdev->mc.gtt_location = rdev->mc.vram_size; | 155 | tmp = rdev->mc.mc_vram_size; |
156 | tmp = (tmp + rdev->mc.gtt_size - 1) & ~(rdev->mc.gtt_size - 1); | ||
157 | rdev->mc.gtt_location = tmp; | ||
137 | } | 158 | } |
138 | DRM_INFO("radeon: VRAM %uM\n", rdev->mc.vram_size >> 20); | 159 | DRM_INFO("radeon: VRAM %uM\n", rdev->mc.real_vram_size >> 20); |
139 | DRM_INFO("radeon: VRAM from 0x%08X to 0x%08X\n", | 160 | DRM_INFO("radeon: VRAM from 0x%08X to 0x%08X\n", |
140 | rdev->mc.vram_location, | 161 | rdev->mc.vram_location, |
141 | rdev->mc.vram_location + rdev->mc.vram_size - 1); | 162 | rdev->mc.vram_location + rdev->mc.mc_vram_size - 1); |
163 | if (rdev->mc.real_vram_size != rdev->mc.mc_vram_size) | ||
164 | DRM_INFO("radeon: VRAM less than aperture workaround enabled\n"); | ||
142 | DRM_INFO("radeon: GTT %uM\n", rdev->mc.gtt_size >> 20); | 165 | DRM_INFO("radeon: GTT %uM\n", rdev->mc.gtt_size >> 20); |
143 | DRM_INFO("radeon: GTT from 0x%08X to 0x%08X\n", | 166 | DRM_INFO("radeon: GTT from 0x%08X to 0x%08X\n", |
144 | rdev->mc.gtt_location, | 167 | rdev->mc.gtt_location, |
@@ -433,6 +456,7 @@ int radeon_device_init(struct radeon_device *rdev, | |||
433 | uint32_t flags) | 456 | uint32_t flags) |
434 | { | 457 | { |
435 | int r, ret; | 458 | int r, ret; |
459 | int dma_bits; | ||
436 | 460 | ||
437 | DRM_INFO("radeon: Initializing kernel modesetting.\n"); | 461 | DRM_INFO("radeon: Initializing kernel modesetting.\n"); |
438 | rdev->shutdown = false; | 462 | rdev->shutdown = false; |
@@ -475,8 +499,20 @@ int radeon_device_init(struct radeon_device *rdev, | |||
475 | return r; | 499 | return r; |
476 | } | 500 | } |
477 | 501 | ||
478 | /* Report DMA addressing limitation */ | 502 | /* set DMA mask + need_dma32 flags. |
479 | r = pci_set_dma_mask(rdev->pdev, DMA_BIT_MASK(32)); | 503 | * PCIE - can handle 40-bits. |
504 | * IGP - can handle 40-bits (in theory) | ||
505 | * AGP - generally dma32 is safest | ||
506 | * PCI - only dma32 | ||
507 | */ | ||
508 | rdev->need_dma32 = false; | ||
509 | if (rdev->flags & RADEON_IS_AGP) | ||
510 | rdev->need_dma32 = true; | ||
511 | if (rdev->flags & RADEON_IS_PCI) | ||
512 | rdev->need_dma32 = true; | ||
513 | |||
514 | dma_bits = rdev->need_dma32 ? 32 : 40; | ||
515 | r = pci_set_dma_mask(rdev->pdev, DMA_BIT_MASK(dma_bits)); | ||
480 | if (r) { | 516 | if (r) { |
481 | printk(KERN_WARNING "radeon: No suitable DMA available.\n"); | 517 | printk(KERN_WARNING "radeon: No suitable DMA available.\n"); |
482 | } | 518 | } |
@@ -496,6 +532,8 @@ int radeon_device_init(struct radeon_device *rdev, | |||
496 | radeon_errata(rdev); | 532 | radeon_errata(rdev); |
497 | /* Initialize scratch registers */ | 533 | /* Initialize scratch registers */ |
498 | radeon_scratch_init(rdev); | 534 | radeon_scratch_init(rdev); |
535 | /* Initialize surface registers */ | ||
536 | radeon_surface_init(rdev); | ||
499 | 537 | ||
500 | /* TODO: disable VGA need to use VGA request */ | 538 | /* TODO: disable VGA need to use VGA request */ |
501 | /* BIOS*/ | 539 | /* BIOS*/ |
@@ -527,27 +565,22 @@ int radeon_device_init(struct radeon_device *rdev, | |||
527 | radeon_combios_asic_init(rdev->ddev); | 565 | radeon_combios_asic_init(rdev->ddev); |
528 | } | 566 | } |
529 | } | 567 | } |
568 | /* Initialize clocks */ | ||
569 | r = radeon_clocks_init(rdev); | ||
570 | if (r) { | ||
571 | return r; | ||
572 | } | ||
530 | /* Get vram informations */ | 573 | /* Get vram informations */ |
531 | radeon_vram_info(rdev); | 574 | radeon_vram_info(rdev); |
532 | /* Device is severly broken if aper size > vram size. | 575 | |
533 | * for RN50/M6/M7 - Novell bug 204882 ? | ||
534 | */ | ||
535 | if (rdev->mc.vram_size < rdev->mc.aper_size) { | ||
536 | rdev->mc.aper_size = rdev->mc.vram_size; | ||
537 | } | ||
538 | /* Add an MTRR for the VRAM */ | 576 | /* Add an MTRR for the VRAM */ |
539 | rdev->mc.vram_mtrr = mtrr_add(rdev->mc.aper_base, rdev->mc.aper_size, | 577 | rdev->mc.vram_mtrr = mtrr_add(rdev->mc.aper_base, rdev->mc.aper_size, |
540 | MTRR_TYPE_WRCOMB, 1); | 578 | MTRR_TYPE_WRCOMB, 1); |
541 | DRM_INFO("Detected VRAM RAM=%uM, BAR=%uM\n", | 579 | DRM_INFO("Detected VRAM RAM=%uM, BAR=%uM\n", |
542 | rdev->mc.vram_size >> 20, | 580 | rdev->mc.real_vram_size >> 20, |
543 | (unsigned)rdev->mc.aper_size >> 20); | 581 | (unsigned)rdev->mc.aper_size >> 20); |
544 | DRM_INFO("RAM width %dbits %cDR\n", | 582 | DRM_INFO("RAM width %dbits %cDR\n", |
545 | rdev->mc.vram_width, rdev->mc.vram_is_ddr ? 'D' : 'S'); | 583 | rdev->mc.vram_width, rdev->mc.vram_is_ddr ? 'D' : 'S'); |
546 | /* Initialize clocks */ | ||
547 | r = radeon_clocks_init(rdev); | ||
548 | if (r) { | ||
549 | return r; | ||
550 | } | ||
551 | /* Initialize memory controller (also test AGP) */ | 584 | /* Initialize memory controller (also test AGP) */ |
552 | r = radeon_mc_init(rdev); | 585 | r = radeon_mc_init(rdev); |
553 | if (r) { | 586 | if (r) { |
@@ -604,12 +637,12 @@ int radeon_device_init(struct radeon_device *rdev, | |||
604 | if (r) { | 637 | if (r) { |
605 | return r; | 638 | return r; |
606 | } | 639 | } |
607 | if (rdev->fbdev_rfb && rdev->fbdev_rfb->obj) { | ||
608 | rdev->fbdev_robj = rdev->fbdev_rfb->obj->driver_private; | ||
609 | } | ||
610 | if (!ret) { | 640 | if (!ret) { |
611 | DRM_INFO("radeon: kernel modesetting successfully initialized.\n"); | 641 | DRM_INFO("radeon: kernel modesetting successfully initialized.\n"); |
612 | } | 642 | } |
643 | if (radeon_testing) { | ||
644 | radeon_test_moves(rdev); | ||
645 | } | ||
613 | if (radeon_benchmarking) { | 646 | if (radeon_benchmarking) { |
614 | radeon_benchmark(rdev); | 647 | radeon_benchmark(rdev); |
615 | } | 648 | } |
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c index 3efcf1a526be..a8fa1bb84cf7 100644 --- a/drivers/gpu/drm/radeon/radeon_display.c +++ b/drivers/gpu/drm/radeon/radeon_display.c | |||
@@ -187,6 +187,7 @@ static void radeon_crtc_init(struct drm_device *dev, int index) | |||
187 | 187 | ||
188 | drm_mode_crtc_set_gamma_size(&radeon_crtc->base, 256); | 188 | drm_mode_crtc_set_gamma_size(&radeon_crtc->base, 256); |
189 | radeon_crtc->crtc_id = index; | 189 | radeon_crtc->crtc_id = index; |
190 | rdev->mode_info.crtcs[index] = radeon_crtc; | ||
190 | 191 | ||
191 | radeon_crtc->mode_set.crtc = &radeon_crtc->base; | 192 | radeon_crtc->mode_set.crtc = &radeon_crtc->base; |
192 | radeon_crtc->mode_set.connectors = (struct drm_connector **)(radeon_crtc + 1); | 193 | radeon_crtc->mode_set.connectors = (struct drm_connector **)(radeon_crtc + 1); |
@@ -491,7 +492,11 @@ void radeon_compute_pll(struct radeon_pll *pll, | |||
491 | tmp += (uint64_t)pll->reference_freq * 1000 * frac_feedback_div; | 492 | tmp += (uint64_t)pll->reference_freq * 1000 * frac_feedback_div; |
492 | current_freq = radeon_div(tmp, ref_div * post_div); | 493 | current_freq = radeon_div(tmp, ref_div * post_div); |
493 | 494 | ||
494 | error = abs(current_freq - freq); | 495 | if (flags & RADEON_PLL_PREFER_CLOSEST_LOWER) { |
496 | error = freq - current_freq; | ||
497 | error = error < 0 ? 0xffffffff : error; | ||
498 | } else | ||
499 | error = abs(current_freq - freq); | ||
495 | vco_diff = abs(vco - best_vco); | 500 | vco_diff = abs(vco - best_vco); |
496 | 501 | ||
497 | if ((best_vco == 0 && error < best_error) || | 502 | if ((best_vco == 0 && error < best_error) || |
@@ -657,36 +662,51 @@ void radeon_modeset_fini(struct radeon_device *rdev) | |||
657 | } | 662 | } |
658 | } | 663 | } |
659 | 664 | ||
660 | void radeon_init_disp_bandwidth(struct drm_device *dev) | 665 | bool radeon_crtc_scaling_mode_fixup(struct drm_crtc *crtc, |
666 | struct drm_display_mode *mode, | ||
667 | struct drm_display_mode *adjusted_mode) | ||
661 | { | 668 | { |
662 | struct radeon_device *rdev = dev->dev_private; | 669 | struct drm_device *dev = crtc->dev; |
663 | struct drm_display_mode *modes[2]; | 670 | struct drm_encoder *encoder; |
664 | int pixel_bytes[2]; | 671 | struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); |
665 | struct drm_crtc *crtc; | 672 | struct radeon_encoder *radeon_encoder; |
666 | 673 | bool first = true; | |
667 | pixel_bytes[0] = pixel_bytes[1] = 0; | ||
668 | modes[0] = modes[1] = NULL; | ||
669 | |||
670 | list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { | ||
671 | struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); | ||
672 | 674 | ||
673 | if (crtc->enabled && crtc->fb) { | 675 | list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { |
674 | modes[radeon_crtc->crtc_id] = &crtc->mode; | 676 | radeon_encoder = to_radeon_encoder(encoder); |
675 | pixel_bytes[radeon_crtc->crtc_id] = crtc->fb->bits_per_pixel / 8; | 677 | if (encoder->crtc != crtc) |
678 | continue; | ||
679 | if (first) { | ||
680 | radeon_crtc->rmx_type = radeon_encoder->rmx_type; | ||
681 | radeon_crtc->devices = radeon_encoder->devices; | ||
682 | memcpy(&radeon_crtc->native_mode, | ||
683 | &radeon_encoder->native_mode, | ||
684 | sizeof(struct radeon_native_mode)); | ||
685 | first = false; | ||
686 | } else { | ||
687 | if (radeon_crtc->rmx_type != radeon_encoder->rmx_type) { | ||
688 | /* WARNING: Right now this can't happen but | ||
689 | * in the future we need to check that scaling | ||
690 | * are consistent accross different encoder | ||
691 | * (ie all encoder can work with the same | ||
692 | * scaling). | ||
693 | */ | ||
694 | DRM_ERROR("Scaling not consistent accross encoder.\n"); | ||
695 | return false; | ||
696 | } | ||
676 | } | 697 | } |
677 | } | 698 | } |
678 | 699 | if (radeon_crtc->rmx_type != RMX_OFF) { | |
679 | if (ASIC_IS_AVIVO(rdev)) { | 700 | fixed20_12 a, b; |
680 | radeon_init_disp_bw_avivo(dev, | 701 | a.full = rfixed_const(crtc->mode.vdisplay); |
681 | modes[0], | 702 | b.full = rfixed_const(radeon_crtc->native_mode.panel_xres); |
682 | pixel_bytes[0], | 703 | radeon_crtc->vsc.full = rfixed_div(a, b); |
683 | modes[1], | 704 | a.full = rfixed_const(crtc->mode.hdisplay); |
684 | pixel_bytes[1]); | 705 | b.full = rfixed_const(radeon_crtc->native_mode.panel_yres); |
706 | radeon_crtc->hsc.full = rfixed_div(a, b); | ||
685 | } else { | 707 | } else { |
686 | radeon_init_disp_bw_legacy(dev, | 708 | radeon_crtc->vsc.full = rfixed_const(1); |
687 | modes[0], | 709 | radeon_crtc->hsc.full = rfixed_const(1); |
688 | pixel_bytes[0], | ||
689 | modes[1], | ||
690 | pixel_bytes[1]); | ||
691 | } | 710 | } |
711 | return true; | ||
692 | } | 712 | } |
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c index 09c9fb9f6210..0bd5879a4957 100644 --- a/drivers/gpu/drm/radeon/radeon_drv.c +++ b/drivers/gpu/drm/radeon/radeon_drv.c | |||
@@ -89,6 +89,7 @@ int radeon_agpmode = 0; | |||
89 | int radeon_vram_limit = 0; | 89 | int radeon_vram_limit = 0; |
90 | int radeon_gart_size = 512; /* default gart size */ | 90 | int radeon_gart_size = 512; /* default gart size */ |
91 | int radeon_benchmarking = 0; | 91 | int radeon_benchmarking = 0; |
92 | int radeon_testing = 0; | ||
92 | int radeon_connector_table = 0; | 93 | int radeon_connector_table = 0; |
93 | #endif | 94 | #endif |
94 | 95 | ||
@@ -117,6 +118,9 @@ module_param_named(gartsize, radeon_gart_size, int, 0600); | |||
117 | MODULE_PARM_DESC(benchmark, "Run benchmark"); | 118 | MODULE_PARM_DESC(benchmark, "Run benchmark"); |
118 | module_param_named(benchmark, radeon_benchmarking, int, 0444); | 119 | module_param_named(benchmark, radeon_benchmarking, int, 0444); |
119 | 120 | ||
121 | MODULE_PARM_DESC(test, "Run tests"); | ||
122 | module_param_named(test, radeon_testing, int, 0444); | ||
123 | |||
120 | MODULE_PARM_DESC(connector_table, "Force connector table"); | 124 | MODULE_PARM_DESC(connector_table, "Force connector table"); |
121 | module_param_named(connector_table, radeon_connector_table, int, 0444); | 125 | module_param_named(connector_table, radeon_connector_table, int, 0444); |
122 | #endif | 126 | #endif |
@@ -314,6 +318,14 @@ static int __init radeon_init(void) | |||
314 | driver = &driver_old; | 318 | driver = &driver_old; |
315 | driver->num_ioctls = radeon_max_ioctl; | 319 | driver->num_ioctls = radeon_max_ioctl; |
316 | #if defined(CONFIG_DRM_RADEON_KMS) | 320 | #if defined(CONFIG_DRM_RADEON_KMS) |
321 | #ifdef CONFIG_VGA_CONSOLE | ||
322 | if (vgacon_text_force() && radeon_modeset == -1) { | ||
323 | DRM_INFO("VGACON disable radeon kernel modesetting.\n"); | ||
324 | driver = &driver_old; | ||
325 | driver->driver_features &= ~DRIVER_MODESET; | ||
326 | radeon_modeset = 0; | ||
327 | } | ||
328 | #endif | ||
317 | /* if enabled by default */ | 329 | /* if enabled by default */ |
318 | if (radeon_modeset == -1) { | 330 | if (radeon_modeset == -1) { |
319 | DRM_INFO("radeon default to kernel modesetting.\n"); | 331 | DRM_INFO("radeon default to kernel modesetting.\n"); |
@@ -325,17 +337,8 @@ static int __init radeon_init(void) | |||
325 | driver->driver_features |= DRIVER_MODESET; | 337 | driver->driver_features |= DRIVER_MODESET; |
326 | driver->num_ioctls = radeon_max_kms_ioctl; | 338 | driver->num_ioctls = radeon_max_kms_ioctl; |
327 | } | 339 | } |
328 | |||
329 | /* if the vga console setting is enabled still | 340 | /* if the vga console setting is enabled still |
330 | * let modprobe override it */ | 341 | * let modprobe override it */ |
331 | #ifdef CONFIG_VGA_CONSOLE | ||
332 | if (vgacon_text_force() && radeon_modeset == -1) { | ||
333 | DRM_INFO("VGACON disable radeon kernel modesetting.\n"); | ||
334 | driver = &driver_old; | ||
335 | driver->driver_features &= ~DRIVER_MODESET; | ||
336 | radeon_modeset = 0; | ||
337 | } | ||
338 | #endif | ||
339 | #endif | 342 | #endif |
340 | return drm_init(driver); | 343 | return drm_init(driver); |
341 | } | 344 | } |
@@ -345,7 +348,7 @@ static void __exit radeon_exit(void) | |||
345 | drm_exit(driver); | 348 | drm_exit(driver); |
346 | } | 349 | } |
347 | 350 | ||
348 | late_initcall(radeon_init); | 351 | module_init(radeon_init); |
349 | module_exit(radeon_exit); | 352 | module_exit(radeon_exit); |
350 | 353 | ||
351 | MODULE_AUTHOR(DRIVER_AUTHOR); | 354 | MODULE_AUTHOR(DRIVER_AUTHOR); |
diff --git a/drivers/gpu/drm/radeon/radeon_drv.h b/drivers/gpu/drm/radeon/radeon_drv.h index 127d0456f628..3933f8216a34 100644 --- a/drivers/gpu/drm/radeon/radeon_drv.h +++ b/drivers/gpu/drm/radeon/radeon_drv.h | |||
@@ -143,6 +143,7 @@ enum radeon_family { | |||
143 | CHIP_RV635, | 143 | CHIP_RV635, |
144 | CHIP_RV670, | 144 | CHIP_RV670, |
145 | CHIP_RS780, | 145 | CHIP_RS780, |
146 | CHIP_RS880, | ||
146 | CHIP_RV770, | 147 | CHIP_RV770, |
147 | CHIP_RV730, | 148 | CHIP_RV730, |
148 | CHIP_RV710, | 149 | CHIP_RV710, |
diff --git a/drivers/gpu/drm/radeon/radeon_encoders.c b/drivers/gpu/drm/radeon/radeon_encoders.c index c8ef0d14ffab..0a92706eac19 100644 --- a/drivers/gpu/drm/radeon/radeon_encoders.c +++ b/drivers/gpu/drm/radeon/radeon_encoders.c | |||
@@ -154,7 +154,6 @@ void radeon_rmx_mode_fixup(struct drm_encoder *encoder, | |||
154 | 154 | ||
155 | if (mode->hdisplay < native_mode->panel_xres || | 155 | if (mode->hdisplay < native_mode->panel_xres || |
156 | mode->vdisplay < native_mode->panel_yres) { | 156 | mode->vdisplay < native_mode->panel_yres) { |
157 | radeon_encoder->flags |= RADEON_USE_RMX; | ||
158 | if (ASIC_IS_AVIVO(rdev)) { | 157 | if (ASIC_IS_AVIVO(rdev)) { |
159 | adjusted_mode->hdisplay = native_mode->panel_xres; | 158 | adjusted_mode->hdisplay = native_mode->panel_xres; |
160 | adjusted_mode->vdisplay = native_mode->panel_yres; | 159 | adjusted_mode->vdisplay = native_mode->panel_yres; |
@@ -197,15 +196,13 @@ void radeon_rmx_mode_fixup(struct drm_encoder *encoder, | |||
197 | } | 196 | } |
198 | } | 197 | } |
199 | 198 | ||
199 | |||
200 | static bool radeon_atom_mode_fixup(struct drm_encoder *encoder, | 200 | static bool radeon_atom_mode_fixup(struct drm_encoder *encoder, |
201 | struct drm_display_mode *mode, | 201 | struct drm_display_mode *mode, |
202 | struct drm_display_mode *adjusted_mode) | 202 | struct drm_display_mode *adjusted_mode) |
203 | { | 203 | { |
204 | |||
205 | struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); | 204 | struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); |
206 | 205 | ||
207 | radeon_encoder->flags &= ~RADEON_USE_RMX; | ||
208 | |||
209 | drm_mode_set_crtcinfo(adjusted_mode, 0); | 206 | drm_mode_set_crtcinfo(adjusted_mode, 0); |
210 | 207 | ||
211 | if (radeon_encoder->rmx_type != RMX_OFF) | 208 | if (radeon_encoder->rmx_type != RMX_OFF) |
@@ -808,234 +805,6 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action) | |||
808 | 805 | ||
809 | } | 806 | } |
810 | 807 | ||
811 | static void atom_rv515_force_tv_scaler(struct radeon_device *rdev) | ||
812 | { | ||
813 | |||
814 | WREG32(0x659C, 0x0); | ||
815 | WREG32(0x6594, 0x705); | ||
816 | WREG32(0x65A4, 0x10001); | ||
817 | WREG32(0x65D8, 0x0); | ||
818 | WREG32(0x65B0, 0x0); | ||
819 | WREG32(0x65C0, 0x0); | ||
820 | WREG32(0x65D4, 0x0); | ||
821 | WREG32(0x6578, 0x0); | ||
822 | WREG32(0x657C, 0x841880A8); | ||
823 | WREG32(0x6578, 0x1); | ||
824 | WREG32(0x657C, 0x84208680); | ||
825 | WREG32(0x6578, 0x2); | ||
826 | WREG32(0x657C, 0xBFF880B0); | ||
827 | WREG32(0x6578, 0x100); | ||
828 | WREG32(0x657C, 0x83D88088); | ||
829 | WREG32(0x6578, 0x101); | ||
830 | WREG32(0x657C, 0x84608680); | ||
831 | WREG32(0x6578, 0x102); | ||
832 | WREG32(0x657C, 0xBFF080D0); | ||
833 | WREG32(0x6578, 0x200); | ||
834 | WREG32(0x657C, 0x83988068); | ||
835 | WREG32(0x6578, 0x201); | ||
836 | WREG32(0x657C, 0x84A08680); | ||
837 | WREG32(0x6578, 0x202); | ||
838 | WREG32(0x657C, 0xBFF080F8); | ||
839 | WREG32(0x6578, 0x300); | ||
840 | WREG32(0x657C, 0x83588058); | ||
841 | WREG32(0x6578, 0x301); | ||
842 | WREG32(0x657C, 0x84E08660); | ||
843 | WREG32(0x6578, 0x302); | ||
844 | WREG32(0x657C, 0xBFF88120); | ||
845 | WREG32(0x6578, 0x400); | ||
846 | WREG32(0x657C, 0x83188040); | ||
847 | WREG32(0x6578, 0x401); | ||
848 | WREG32(0x657C, 0x85008660); | ||
849 | WREG32(0x6578, 0x402); | ||
850 | WREG32(0x657C, 0xBFF88150); | ||
851 | WREG32(0x6578, 0x500); | ||
852 | WREG32(0x657C, 0x82D88030); | ||
853 | WREG32(0x6578, 0x501); | ||
854 | WREG32(0x657C, 0x85408640); | ||
855 | WREG32(0x6578, 0x502); | ||
856 | WREG32(0x657C, 0xBFF88180); | ||
857 | WREG32(0x6578, 0x600); | ||
858 | WREG32(0x657C, 0x82A08018); | ||
859 | WREG32(0x6578, 0x601); | ||
860 | WREG32(0x657C, 0x85808620); | ||
861 | WREG32(0x6578, 0x602); | ||
862 | WREG32(0x657C, 0xBFF081B8); | ||
863 | WREG32(0x6578, 0x700); | ||
864 | WREG32(0x657C, 0x82608010); | ||
865 | WREG32(0x6578, 0x701); | ||
866 | WREG32(0x657C, 0x85A08600); | ||
867 | WREG32(0x6578, 0x702); | ||
868 | WREG32(0x657C, 0x800081F0); | ||
869 | WREG32(0x6578, 0x800); | ||
870 | WREG32(0x657C, 0x8228BFF8); | ||
871 | WREG32(0x6578, 0x801); | ||
872 | WREG32(0x657C, 0x85E085E0); | ||
873 | WREG32(0x6578, 0x802); | ||
874 | WREG32(0x657C, 0xBFF88228); | ||
875 | WREG32(0x6578, 0x10000); | ||
876 | WREG32(0x657C, 0x82A8BF00); | ||
877 | WREG32(0x6578, 0x10001); | ||
878 | WREG32(0x657C, 0x82A08CC0); | ||
879 | WREG32(0x6578, 0x10002); | ||
880 | WREG32(0x657C, 0x8008BEF8); | ||
881 | WREG32(0x6578, 0x10100); | ||
882 | WREG32(0x657C, 0x81F0BF28); | ||
883 | WREG32(0x6578, 0x10101); | ||
884 | WREG32(0x657C, 0x83608CA0); | ||
885 | WREG32(0x6578, 0x10102); | ||
886 | WREG32(0x657C, 0x8018BED0); | ||
887 | WREG32(0x6578, 0x10200); | ||
888 | WREG32(0x657C, 0x8148BF38); | ||
889 | WREG32(0x6578, 0x10201); | ||
890 | WREG32(0x657C, 0x84408C80); | ||
891 | WREG32(0x6578, 0x10202); | ||
892 | WREG32(0x657C, 0x8008BEB8); | ||
893 | WREG32(0x6578, 0x10300); | ||
894 | WREG32(0x657C, 0x80B0BF78); | ||
895 | WREG32(0x6578, 0x10301); | ||
896 | WREG32(0x657C, 0x85008C20); | ||
897 | WREG32(0x6578, 0x10302); | ||
898 | WREG32(0x657C, 0x8020BEA0); | ||
899 | WREG32(0x6578, 0x10400); | ||
900 | WREG32(0x657C, 0x8028BF90); | ||
901 | WREG32(0x6578, 0x10401); | ||
902 | WREG32(0x657C, 0x85E08BC0); | ||
903 | WREG32(0x6578, 0x10402); | ||
904 | WREG32(0x657C, 0x8018BE90); | ||
905 | WREG32(0x6578, 0x10500); | ||
906 | WREG32(0x657C, 0xBFB8BFB0); | ||
907 | WREG32(0x6578, 0x10501); | ||
908 | WREG32(0x657C, 0x86C08B40); | ||
909 | WREG32(0x6578, 0x10502); | ||
910 | WREG32(0x657C, 0x8010BE90); | ||
911 | WREG32(0x6578, 0x10600); | ||
912 | WREG32(0x657C, 0xBF58BFC8); | ||
913 | WREG32(0x6578, 0x10601); | ||
914 | WREG32(0x657C, 0x87A08AA0); | ||
915 | WREG32(0x6578, 0x10602); | ||
916 | WREG32(0x657C, 0x8010BE98); | ||
917 | WREG32(0x6578, 0x10700); | ||
918 | WREG32(0x657C, 0xBF10BFF0); | ||
919 | WREG32(0x6578, 0x10701); | ||
920 | WREG32(0x657C, 0x886089E0); | ||
921 | WREG32(0x6578, 0x10702); | ||
922 | WREG32(0x657C, 0x8018BEB0); | ||
923 | WREG32(0x6578, 0x10800); | ||
924 | WREG32(0x657C, 0xBED8BFE8); | ||
925 | WREG32(0x6578, 0x10801); | ||
926 | WREG32(0x657C, 0x89408940); | ||
927 | WREG32(0x6578, 0x10802); | ||
928 | WREG32(0x657C, 0xBFE8BED8); | ||
929 | WREG32(0x6578, 0x20000); | ||
930 | WREG32(0x657C, 0x80008000); | ||
931 | WREG32(0x6578, 0x20001); | ||
932 | WREG32(0x657C, 0x90008000); | ||
933 | WREG32(0x6578, 0x20002); | ||
934 | WREG32(0x657C, 0x80008000); | ||
935 | WREG32(0x6578, 0x20003); | ||
936 | WREG32(0x657C, 0x80008000); | ||
937 | WREG32(0x6578, 0x20100); | ||
938 | WREG32(0x657C, 0x80108000); | ||
939 | WREG32(0x6578, 0x20101); | ||
940 | WREG32(0x657C, 0x8FE0BF70); | ||
941 | WREG32(0x6578, 0x20102); | ||
942 | WREG32(0x657C, 0xBFE880C0); | ||
943 | WREG32(0x6578, 0x20103); | ||
944 | WREG32(0x657C, 0x80008000); | ||
945 | WREG32(0x6578, 0x20200); | ||
946 | WREG32(0x657C, 0x8018BFF8); | ||
947 | WREG32(0x6578, 0x20201); | ||
948 | WREG32(0x657C, 0x8F80BF08); | ||
949 | WREG32(0x6578, 0x20202); | ||
950 | WREG32(0x657C, 0xBFD081A0); | ||
951 | WREG32(0x6578, 0x20203); | ||
952 | WREG32(0x657C, 0xBFF88000); | ||
953 | WREG32(0x6578, 0x20300); | ||
954 | WREG32(0x657C, 0x80188000); | ||
955 | WREG32(0x6578, 0x20301); | ||
956 | WREG32(0x657C, 0x8EE0BEC0); | ||
957 | WREG32(0x6578, 0x20302); | ||
958 | WREG32(0x657C, 0xBFB082A0); | ||
959 | WREG32(0x6578, 0x20303); | ||
960 | WREG32(0x657C, 0x80008000); | ||
961 | WREG32(0x6578, 0x20400); | ||
962 | WREG32(0x657C, 0x80188000); | ||
963 | WREG32(0x6578, 0x20401); | ||
964 | WREG32(0x657C, 0x8E00BEA0); | ||
965 | WREG32(0x6578, 0x20402); | ||
966 | WREG32(0x657C, 0xBF8883C0); | ||
967 | WREG32(0x6578, 0x20403); | ||
968 | WREG32(0x657C, 0x80008000); | ||
969 | WREG32(0x6578, 0x20500); | ||
970 | WREG32(0x657C, 0x80188000); | ||
971 | WREG32(0x6578, 0x20501); | ||
972 | WREG32(0x657C, 0x8D00BE90); | ||
973 | WREG32(0x6578, 0x20502); | ||
974 | WREG32(0x657C, 0xBF588500); | ||
975 | WREG32(0x6578, 0x20503); | ||
976 | WREG32(0x657C, 0x80008008); | ||
977 | WREG32(0x6578, 0x20600); | ||
978 | WREG32(0x657C, 0x80188000); | ||
979 | WREG32(0x6578, 0x20601); | ||
980 | WREG32(0x657C, 0x8BC0BE98); | ||
981 | WREG32(0x6578, 0x20602); | ||
982 | WREG32(0x657C, 0xBF308660); | ||
983 | WREG32(0x6578, 0x20603); | ||
984 | WREG32(0x657C, 0x80008008); | ||
985 | WREG32(0x6578, 0x20700); | ||
986 | WREG32(0x657C, 0x80108000); | ||
987 | WREG32(0x6578, 0x20701); | ||
988 | WREG32(0x657C, 0x8A80BEB0); | ||
989 | WREG32(0x6578, 0x20702); | ||
990 | WREG32(0x657C, 0xBF0087C0); | ||
991 | WREG32(0x6578, 0x20703); | ||
992 | WREG32(0x657C, 0x80008008); | ||
993 | WREG32(0x6578, 0x20800); | ||
994 | WREG32(0x657C, 0x80108000); | ||
995 | WREG32(0x6578, 0x20801); | ||
996 | WREG32(0x657C, 0x8920BED0); | ||
997 | WREG32(0x6578, 0x20802); | ||
998 | WREG32(0x657C, 0xBED08920); | ||
999 | WREG32(0x6578, 0x20803); | ||
1000 | WREG32(0x657C, 0x80008010); | ||
1001 | WREG32(0x6578, 0x30000); | ||
1002 | WREG32(0x657C, 0x90008000); | ||
1003 | WREG32(0x6578, 0x30001); | ||
1004 | WREG32(0x657C, 0x80008000); | ||
1005 | WREG32(0x6578, 0x30100); | ||
1006 | WREG32(0x657C, 0x8FE0BF90); | ||
1007 | WREG32(0x6578, 0x30101); | ||
1008 | WREG32(0x657C, 0xBFF880A0); | ||
1009 | WREG32(0x6578, 0x30200); | ||
1010 | WREG32(0x657C, 0x8F60BF40); | ||
1011 | WREG32(0x6578, 0x30201); | ||
1012 | WREG32(0x657C, 0xBFE88180); | ||
1013 | WREG32(0x6578, 0x30300); | ||
1014 | WREG32(0x657C, 0x8EC0BF00); | ||
1015 | WREG32(0x6578, 0x30301); | ||
1016 | WREG32(0x657C, 0xBFC88280); | ||
1017 | WREG32(0x6578, 0x30400); | ||
1018 | WREG32(0x657C, 0x8DE0BEE0); | ||
1019 | WREG32(0x6578, 0x30401); | ||
1020 | WREG32(0x657C, 0xBFA083A0); | ||
1021 | WREG32(0x6578, 0x30500); | ||
1022 | WREG32(0x657C, 0x8CE0BED0); | ||
1023 | WREG32(0x6578, 0x30501); | ||
1024 | WREG32(0x657C, 0xBF7884E0); | ||
1025 | WREG32(0x6578, 0x30600); | ||
1026 | WREG32(0x657C, 0x8BA0BED8); | ||
1027 | WREG32(0x6578, 0x30601); | ||
1028 | WREG32(0x657C, 0xBF508640); | ||
1029 | WREG32(0x6578, 0x30700); | ||
1030 | WREG32(0x657C, 0x8A60BEE8); | ||
1031 | WREG32(0x6578, 0x30701); | ||
1032 | WREG32(0x657C, 0xBF2087A0); | ||
1033 | WREG32(0x6578, 0x30800); | ||
1034 | WREG32(0x657C, 0x8900BF00); | ||
1035 | WREG32(0x6578, 0x30801); | ||
1036 | WREG32(0x657C, 0xBF008900); | ||
1037 | } | ||
1038 | |||
1039 | static void | 808 | static void |
1040 | atombios_yuv_setup(struct drm_encoder *encoder, bool enable) | 809 | atombios_yuv_setup(struct drm_encoder *encoder, bool enable) |
1041 | { | 810 | { |
@@ -1074,129 +843,6 @@ atombios_yuv_setup(struct drm_encoder *encoder, bool enable) | |||
1074 | } | 843 | } |
1075 | 844 | ||
1076 | static void | 845 | static void |
1077 | atombios_overscan_setup(struct drm_encoder *encoder, | ||
1078 | struct drm_display_mode *mode, | ||
1079 | struct drm_display_mode *adjusted_mode) | ||
1080 | { | ||
1081 | struct drm_device *dev = encoder->dev; | ||
1082 | struct radeon_device *rdev = dev->dev_private; | ||
1083 | struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); | ||
1084 | struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc); | ||
1085 | SET_CRTC_OVERSCAN_PS_ALLOCATION args; | ||
1086 | int index = GetIndexIntoMasterTable(COMMAND, SetCRTC_OverScan); | ||
1087 | |||
1088 | memset(&args, 0, sizeof(args)); | ||
1089 | |||
1090 | args.usOverscanRight = 0; | ||
1091 | args.usOverscanLeft = 0; | ||
1092 | args.usOverscanBottom = 0; | ||
1093 | args.usOverscanTop = 0; | ||
1094 | args.ucCRTC = radeon_crtc->crtc_id; | ||
1095 | |||
1096 | if (radeon_encoder->flags & RADEON_USE_RMX) { | ||
1097 | if (radeon_encoder->rmx_type == RMX_FULL) { | ||
1098 | args.usOverscanRight = 0; | ||
1099 | args.usOverscanLeft = 0; | ||
1100 | args.usOverscanBottom = 0; | ||
1101 | args.usOverscanTop = 0; | ||
1102 | } else if (radeon_encoder->rmx_type == RMX_CENTER) { | ||
1103 | args.usOverscanTop = (adjusted_mode->crtc_vdisplay - mode->crtc_vdisplay) / 2; | ||
1104 | args.usOverscanBottom = (adjusted_mode->crtc_vdisplay - mode->crtc_vdisplay) / 2; | ||
1105 | args.usOverscanLeft = (adjusted_mode->crtc_hdisplay - mode->crtc_hdisplay) / 2; | ||
1106 | args.usOverscanRight = (adjusted_mode->crtc_hdisplay - mode->crtc_hdisplay) / 2; | ||
1107 | } else if (radeon_encoder->rmx_type == RMX_ASPECT) { | ||
1108 | int a1 = mode->crtc_vdisplay * adjusted_mode->crtc_hdisplay; | ||
1109 | int a2 = adjusted_mode->crtc_vdisplay * mode->crtc_hdisplay; | ||
1110 | |||
1111 | if (a1 > a2) { | ||
1112 | args.usOverscanLeft = (adjusted_mode->crtc_hdisplay - (a2 / mode->crtc_vdisplay)) / 2; | ||
1113 | args.usOverscanRight = (adjusted_mode->crtc_hdisplay - (a2 / mode->crtc_vdisplay)) / 2; | ||
1114 | } else if (a2 > a1) { | ||
1115 | args.usOverscanLeft = (adjusted_mode->crtc_vdisplay - (a1 / mode->crtc_hdisplay)) / 2; | ||
1116 | args.usOverscanRight = (adjusted_mode->crtc_vdisplay - (a1 / mode->crtc_hdisplay)) / 2; | ||
1117 | } | ||
1118 | } | ||
1119 | } | ||
1120 | |||
1121 | atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); | ||
1122 | |||
1123 | } | ||
1124 | |||
1125 | static void | ||
1126 | atombios_scaler_setup(struct drm_encoder *encoder) | ||
1127 | { | ||
1128 | struct drm_device *dev = encoder->dev; | ||
1129 | struct radeon_device *rdev = dev->dev_private; | ||
1130 | struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); | ||
1131 | struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc); | ||
1132 | ENABLE_SCALER_PS_ALLOCATION args; | ||
1133 | int index = GetIndexIntoMasterTable(COMMAND, EnableScaler); | ||
1134 | /* fixme - fill in enc_priv for atom dac */ | ||
1135 | enum radeon_tv_std tv_std = TV_STD_NTSC; | ||
1136 | |||
1137 | if (!ASIC_IS_AVIVO(rdev) && radeon_crtc->crtc_id) | ||
1138 | return; | ||
1139 | |||
1140 | memset(&args, 0, sizeof(args)); | ||
1141 | |||
1142 | args.ucScaler = radeon_crtc->crtc_id; | ||
1143 | |||
1144 | if (radeon_encoder->devices & (ATOM_DEVICE_TV_SUPPORT)) { | ||
1145 | switch (tv_std) { | ||
1146 | case TV_STD_NTSC: | ||
1147 | default: | ||
1148 | args.ucTVStandard = ATOM_TV_NTSC; | ||
1149 | break; | ||
1150 | case TV_STD_PAL: | ||
1151 | args.ucTVStandard = ATOM_TV_PAL; | ||
1152 | break; | ||
1153 | case TV_STD_PAL_M: | ||
1154 | args.ucTVStandard = ATOM_TV_PALM; | ||
1155 | break; | ||
1156 | case TV_STD_PAL_60: | ||
1157 | args.ucTVStandard = ATOM_TV_PAL60; | ||
1158 | break; | ||
1159 | case TV_STD_NTSC_J: | ||
1160 | args.ucTVStandard = ATOM_TV_NTSCJ; | ||
1161 | break; | ||
1162 | case TV_STD_SCART_PAL: | ||
1163 | args.ucTVStandard = ATOM_TV_PAL; /* ??? */ | ||
1164 | break; | ||
1165 | case TV_STD_SECAM: | ||
1166 | args.ucTVStandard = ATOM_TV_SECAM; | ||
1167 | break; | ||
1168 | case TV_STD_PAL_CN: | ||
1169 | args.ucTVStandard = ATOM_TV_PALCN; | ||
1170 | break; | ||
1171 | } | ||
1172 | args.ucEnable = SCALER_ENABLE_MULTITAP_MODE; | ||
1173 | } else if (radeon_encoder->devices & (ATOM_DEVICE_CV_SUPPORT)) { | ||
1174 | args.ucTVStandard = ATOM_TV_CV; | ||
1175 | args.ucEnable = SCALER_ENABLE_MULTITAP_MODE; | ||
1176 | } else if (radeon_encoder->flags & RADEON_USE_RMX) { | ||
1177 | if (radeon_encoder->rmx_type == RMX_FULL) | ||
1178 | args.ucEnable = ATOM_SCALER_EXPANSION; | ||
1179 | else if (radeon_encoder->rmx_type == RMX_CENTER) | ||
1180 | args.ucEnable = ATOM_SCALER_CENTER; | ||
1181 | else if (radeon_encoder->rmx_type == RMX_ASPECT) | ||
1182 | args.ucEnable = ATOM_SCALER_EXPANSION; | ||
1183 | } else { | ||
1184 | if (ASIC_IS_AVIVO(rdev)) | ||
1185 | args.ucEnable = ATOM_SCALER_DISABLE; | ||
1186 | else | ||
1187 | args.ucEnable = ATOM_SCALER_CENTER; | ||
1188 | } | ||
1189 | |||
1190 | atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); | ||
1191 | |||
1192 | if (radeon_encoder->devices & (ATOM_DEVICE_CV_SUPPORT | ATOM_DEVICE_TV_SUPPORT) | ||
1193 | && rdev->family >= CHIP_RV515 && rdev->family <= CHIP_RV570) { | ||
1194 | atom_rv515_force_tv_scaler(rdev); | ||
1195 | } | ||
1196 | |||
1197 | } | ||
1198 | |||
1199 | static void | ||
1200 | radeon_atom_encoder_dpms(struct drm_encoder *encoder, int mode) | 846 | radeon_atom_encoder_dpms(struct drm_encoder *encoder, int mode) |
1201 | { | 847 | { |
1202 | struct drm_device *dev = encoder->dev; | 848 | struct drm_device *dev = encoder->dev; |
@@ -1448,8 +1094,6 @@ radeon_atom_encoder_mode_set(struct drm_encoder *encoder, | |||
1448 | radeon_encoder->pixel_clock = adjusted_mode->clock; | 1094 | radeon_encoder->pixel_clock = adjusted_mode->clock; |
1449 | 1095 | ||
1450 | radeon_atombios_encoder_crtc_scratch_regs(encoder, radeon_crtc->crtc_id); | 1096 | radeon_atombios_encoder_crtc_scratch_regs(encoder, radeon_crtc->crtc_id); |
1451 | atombios_overscan_setup(encoder, mode, adjusted_mode); | ||
1452 | atombios_scaler_setup(encoder); | ||
1453 | atombios_set_encoder_crtc_source(encoder); | 1097 | atombios_set_encoder_crtc_source(encoder); |
1454 | 1098 | ||
1455 | if (ASIC_IS_AVIVO(rdev)) { | 1099 | if (ASIC_IS_AVIVO(rdev)) { |
@@ -1667,6 +1311,7 @@ radeon_add_atom_encoder(struct drm_device *dev, uint32_t encoder_id, uint32_t su | |||
1667 | 1311 | ||
1668 | radeon_encoder->encoder_id = encoder_id; | 1312 | radeon_encoder->encoder_id = encoder_id; |
1669 | radeon_encoder->devices = supported_device; | 1313 | radeon_encoder->devices = supported_device; |
1314 | radeon_encoder->rmx_type = RMX_OFF; | ||
1670 | 1315 | ||
1671 | switch (radeon_encoder->encoder_id) { | 1316 | switch (radeon_encoder->encoder_id) { |
1672 | case ENCODER_OBJECT_ID_INTERNAL_LVDS: | 1317 | case ENCODER_OBJECT_ID_INTERNAL_LVDS: |
diff --git a/drivers/gpu/drm/radeon/radeon_fb.c b/drivers/gpu/drm/radeon/radeon_fb.c index fa86d398945e..3206c0ad7b6c 100644 --- a/drivers/gpu/drm/radeon/radeon_fb.c +++ b/drivers/gpu/drm/radeon/radeon_fb.c | |||
@@ -101,9 +101,10 @@ static int radeonfb_setcolreg(unsigned regno, | |||
101 | break; | 101 | break; |
102 | case 24: | 102 | case 24: |
103 | case 32: | 103 | case 32: |
104 | fb->pseudo_palette[regno] = ((red & 0xff00) << 8) | | 104 | fb->pseudo_palette[regno] = |
105 | (green & 0xff00) | | 105 | (((red >> 8) & 0xff) << info->var.red.offset) | |
106 | ((blue & 0xff00) >> 8); | 106 | (((green >> 8) & 0xff) << info->var.green.offset) | |
107 | (((blue >> 8) & 0xff) << info->var.blue.offset); | ||
107 | break; | 108 | break; |
108 | } | 109 | } |
109 | } | 110 | } |
@@ -154,6 +155,7 @@ static int radeonfb_check_var(struct fb_var_screeninfo *var, | |||
154 | var->transp.length = 0; | 155 | var->transp.length = 0; |
155 | var->transp.offset = 0; | 156 | var->transp.offset = 0; |
156 | break; | 157 | break; |
158 | #ifdef __LITTLE_ENDIAN | ||
157 | case 15: | 159 | case 15: |
158 | var->red.offset = 10; | 160 | var->red.offset = 10; |
159 | var->green.offset = 5; | 161 | var->green.offset = 5; |
@@ -194,6 +196,28 @@ static int radeonfb_check_var(struct fb_var_screeninfo *var, | |||
194 | var->transp.length = 8; | 196 | var->transp.length = 8; |
195 | var->transp.offset = 24; | 197 | var->transp.offset = 24; |
196 | break; | 198 | break; |
199 | #else | ||
200 | case 24: | ||
201 | var->red.offset = 8; | ||
202 | var->green.offset = 16; | ||
203 | var->blue.offset = 24; | ||
204 | var->red.length = 8; | ||
205 | var->green.length = 8; | ||
206 | var->blue.length = 8; | ||
207 | var->transp.length = 0; | ||
208 | var->transp.offset = 0; | ||
209 | break; | ||
210 | case 32: | ||
211 | var->red.offset = 8; | ||
212 | var->green.offset = 16; | ||
213 | var->blue.offset = 24; | ||
214 | var->red.length = 8; | ||
215 | var->green.length = 8; | ||
216 | var->blue.length = 8; | ||
217 | var->transp.length = 8; | ||
218 | var->transp.offset = 0; | ||
219 | break; | ||
220 | #endif | ||
197 | default: | 221 | default: |
198 | return -EINVAL; | 222 | return -EINVAL; |
199 | } | 223 | } |
@@ -447,10 +471,10 @@ static struct notifier_block paniced = { | |||
447 | .notifier_call = radeonfb_panic, | 471 | .notifier_call = radeonfb_panic, |
448 | }; | 472 | }; |
449 | 473 | ||
450 | static int radeon_align_pitch(struct radeon_device *rdev, int width, int bpp) | 474 | static int radeon_align_pitch(struct radeon_device *rdev, int width, int bpp, bool tiled) |
451 | { | 475 | { |
452 | int aligned = width; | 476 | int aligned = width; |
453 | int align_large = (ASIC_IS_AVIVO(rdev)); | 477 | int align_large = (ASIC_IS_AVIVO(rdev)) || tiled; |
454 | int pitch_mask = 0; | 478 | int pitch_mask = 0; |
455 | 479 | ||
456 | switch (bpp / 8) { | 480 | switch (bpp / 8) { |
@@ -478,36 +502,42 @@ int radeonfb_create(struct radeon_device *rdev, | |||
478 | { | 502 | { |
479 | struct fb_info *info; | 503 | struct fb_info *info; |
480 | struct radeon_fb_device *rfbdev; | 504 | struct radeon_fb_device *rfbdev; |
481 | struct drm_framebuffer *fb; | 505 | struct drm_framebuffer *fb = NULL; |
482 | struct radeon_framebuffer *rfb; | 506 | struct radeon_framebuffer *rfb; |
483 | struct drm_mode_fb_cmd mode_cmd; | 507 | struct drm_mode_fb_cmd mode_cmd; |
484 | struct drm_gem_object *gobj = NULL; | 508 | struct drm_gem_object *gobj = NULL; |
485 | struct radeon_object *robj = NULL; | 509 | struct radeon_object *robj = NULL; |
486 | struct device *device = &rdev->pdev->dev; | 510 | struct device *device = &rdev->pdev->dev; |
487 | int size, aligned_size, ret; | 511 | int size, aligned_size, ret; |
512 | u64 fb_gpuaddr; | ||
488 | void *fbptr = NULL; | 513 | void *fbptr = NULL; |
514 | unsigned long tmp; | ||
515 | bool fb_tiled = false; /* useful for testing */ | ||
489 | 516 | ||
490 | mode_cmd.width = surface_width; | 517 | mode_cmd.width = surface_width; |
491 | mode_cmd.height = surface_height; | 518 | mode_cmd.height = surface_height; |
492 | mode_cmd.bpp = 32; | 519 | mode_cmd.bpp = 32; |
493 | /* need to align pitch with crtc limits */ | 520 | /* need to align pitch with crtc limits */ |
494 | mode_cmd.pitch = radeon_align_pitch(rdev, mode_cmd.width, mode_cmd.bpp) * ((mode_cmd.bpp + 1) / 8); | 521 | mode_cmd.pitch = radeon_align_pitch(rdev, mode_cmd.width, mode_cmd.bpp, fb_tiled) * ((mode_cmd.bpp + 1) / 8); |
495 | mode_cmd.depth = 24; | 522 | mode_cmd.depth = 24; |
496 | 523 | ||
497 | size = mode_cmd.pitch * mode_cmd.height; | 524 | size = mode_cmd.pitch * mode_cmd.height; |
498 | aligned_size = ALIGN(size, PAGE_SIZE); | 525 | aligned_size = ALIGN(size, PAGE_SIZE); |
499 | 526 | ||
500 | ret = radeon_gem_object_create(rdev, aligned_size, 0, | 527 | ret = radeon_gem_object_create(rdev, aligned_size, 0, |
501 | RADEON_GEM_DOMAIN_VRAM, | 528 | RADEON_GEM_DOMAIN_VRAM, |
502 | false, ttm_bo_type_kernel, | 529 | false, ttm_bo_type_kernel, |
503 | false, &gobj); | 530 | false, &gobj); |
504 | if (ret) { | 531 | if (ret) { |
505 | printk(KERN_ERR "failed to allocate framebuffer\n"); | 532 | printk(KERN_ERR "failed to allocate framebuffer (%d %d)\n", |
533 | surface_width, surface_height); | ||
506 | ret = -ENOMEM; | 534 | ret = -ENOMEM; |
507 | goto out; | 535 | goto out; |
508 | } | 536 | } |
509 | robj = gobj->driver_private; | 537 | robj = gobj->driver_private; |
510 | 538 | ||
539 | if (fb_tiled) | ||
540 | radeon_object_set_tiling_flags(robj, RADEON_TILING_MACRO|RADEON_TILING_SURFACE, mode_cmd.pitch); | ||
511 | mutex_lock(&rdev->ddev->struct_mutex); | 541 | mutex_lock(&rdev->ddev->struct_mutex); |
512 | fb = radeon_framebuffer_create(rdev->ddev, &mode_cmd, gobj); | 542 | fb = radeon_framebuffer_create(rdev->ddev, &mode_cmd, gobj); |
513 | if (fb == NULL) { | 543 | if (fb == NULL) { |
@@ -515,12 +545,19 @@ int radeonfb_create(struct radeon_device *rdev, | |||
515 | ret = -ENOMEM; | 545 | ret = -ENOMEM; |
516 | goto out_unref; | 546 | goto out_unref; |
517 | } | 547 | } |
548 | ret = radeon_object_pin(robj, RADEON_GEM_DOMAIN_VRAM, &fb_gpuaddr); | ||
549 | if (ret) { | ||
550 | printk(KERN_ERR "failed to pin framebuffer\n"); | ||
551 | ret = -ENOMEM; | ||
552 | goto out_unref; | ||
553 | } | ||
518 | 554 | ||
519 | list_add(&fb->filp_head, &rdev->ddev->mode_config.fb_kernel_list); | 555 | list_add(&fb->filp_head, &rdev->ddev->mode_config.fb_kernel_list); |
520 | 556 | ||
521 | rfb = to_radeon_framebuffer(fb); | 557 | rfb = to_radeon_framebuffer(fb); |
522 | *rfb_p = rfb; | 558 | *rfb_p = rfb; |
523 | rdev->fbdev_rfb = rfb; | 559 | rdev->fbdev_rfb = rfb; |
560 | rdev->fbdev_robj = robj; | ||
524 | 561 | ||
525 | info = framebuffer_alloc(sizeof(struct radeon_fb_device), device); | 562 | info = framebuffer_alloc(sizeof(struct radeon_fb_device), device); |
526 | if (info == NULL) { | 563 | if (info == NULL) { |
@@ -529,6 +566,9 @@ int radeonfb_create(struct radeon_device *rdev, | |||
529 | } | 566 | } |
530 | rfbdev = info->par; | 567 | rfbdev = info->par; |
531 | 568 | ||
569 | if (fb_tiled) | ||
570 | radeon_object_check_tiling(robj, 0, 0); | ||
571 | |||
532 | ret = radeon_object_kmap(robj, &fbptr); | 572 | ret = radeon_object_kmap(robj, &fbptr); |
533 | if (ret) { | 573 | if (ret) { |
534 | goto out_unref; | 574 | goto out_unref; |
@@ -541,13 +581,13 @@ int radeonfb_create(struct radeon_device *rdev, | |||
541 | info->fix.xpanstep = 1; /* doing it in hw */ | 581 | info->fix.xpanstep = 1; /* doing it in hw */ |
542 | info->fix.ypanstep = 1; /* doing it in hw */ | 582 | info->fix.ypanstep = 1; /* doing it in hw */ |
543 | info->fix.ywrapstep = 0; | 583 | info->fix.ywrapstep = 0; |
544 | info->fix.accel = FB_ACCEL_I830; | 584 | info->fix.accel = FB_ACCEL_NONE; |
545 | info->fix.type_aux = 0; | 585 | info->fix.type_aux = 0; |
546 | info->flags = FBINFO_DEFAULT; | 586 | info->flags = FBINFO_DEFAULT; |
547 | info->fbops = &radeonfb_ops; | 587 | info->fbops = &radeonfb_ops; |
548 | info->fix.line_length = fb->pitch; | 588 | info->fix.line_length = fb->pitch; |
549 | info->screen_base = fbptr; | 589 | tmp = fb_gpuaddr - rdev->mc.vram_location; |
550 | info->fix.smem_start = (unsigned long)fbptr; | 590 | info->fix.smem_start = rdev->mc.aper_base + tmp; |
551 | info->fix.smem_len = size; | 591 | info->fix.smem_len = size; |
552 | info->screen_base = fbptr; | 592 | info->screen_base = fbptr; |
553 | info->screen_size = size; | 593 | info->screen_size = size; |
@@ -562,8 +602,13 @@ int radeonfb_create(struct radeon_device *rdev, | |||
562 | info->var.width = -1; | 602 | info->var.width = -1; |
563 | info->var.xres = fb_width; | 603 | info->var.xres = fb_width; |
564 | info->var.yres = fb_height; | 604 | info->var.yres = fb_height; |
565 | info->fix.mmio_start = pci_resource_start(rdev->pdev, 2); | 605 | |
566 | info->fix.mmio_len = pci_resource_len(rdev->pdev, 2); | 606 | /* setup aperture base/size for vesafb takeover */ |
607 | info->aperture_base = rdev->ddev->mode_config.fb_base; | ||
608 | info->aperture_size = rdev->mc.real_vram_size; | ||
609 | |||
610 | info->fix.mmio_start = 0; | ||
611 | info->fix.mmio_len = 0; | ||
567 | info->pixmap.size = 64*1024; | 612 | info->pixmap.size = 64*1024; |
568 | info->pixmap.buf_align = 8; | 613 | info->pixmap.buf_align = 8; |
569 | info->pixmap.access_align = 32; | 614 | info->pixmap.access_align = 32; |
@@ -590,6 +635,7 @@ int radeonfb_create(struct radeon_device *rdev, | |||
590 | info->var.transp.offset = 0; | 635 | info->var.transp.offset = 0; |
591 | info->var.transp.length = 0; | 636 | info->var.transp.length = 0; |
592 | break; | 637 | break; |
638 | #ifdef __LITTLE_ENDIAN | ||
593 | case 15: | 639 | case 15: |
594 | info->var.red.offset = 10; | 640 | info->var.red.offset = 10; |
595 | info->var.green.offset = 5; | 641 | info->var.green.offset = 5; |
@@ -629,7 +675,29 @@ int radeonfb_create(struct radeon_device *rdev, | |||
629 | info->var.transp.offset = 24; | 675 | info->var.transp.offset = 24; |
630 | info->var.transp.length = 8; | 676 | info->var.transp.length = 8; |
631 | break; | 677 | break; |
678 | #else | ||
679 | case 24: | ||
680 | info->var.red.offset = 8; | ||
681 | info->var.green.offset = 16; | ||
682 | info->var.blue.offset = 24; | ||
683 | info->var.red.length = 8; | ||
684 | info->var.green.length = 8; | ||
685 | info->var.blue.length = 8; | ||
686 | info->var.transp.offset = 0; | ||
687 | info->var.transp.length = 0; | ||
688 | break; | ||
689 | case 32: | ||
690 | info->var.red.offset = 8; | ||
691 | info->var.green.offset = 16; | ||
692 | info->var.blue.offset = 24; | ||
693 | info->var.red.length = 8; | ||
694 | info->var.green.length = 8; | ||
695 | info->var.blue.length = 8; | ||
696 | info->var.transp.offset = 0; | ||
697 | info->var.transp.length = 8; | ||
698 | break; | ||
632 | default: | 699 | default: |
700 | #endif | ||
633 | break; | 701 | break; |
634 | } | 702 | } |
635 | 703 | ||
@@ -644,7 +712,7 @@ out_unref: | |||
644 | if (robj) { | 712 | if (robj) { |
645 | radeon_object_kunmap(robj); | 713 | radeon_object_kunmap(robj); |
646 | } | 714 | } |
647 | if (ret) { | 715 | if (fb && ret) { |
648 | list_del(&fb->filp_head); | 716 | list_del(&fb->filp_head); |
649 | drm_gem_object_unreference(gobj); | 717 | drm_gem_object_unreference(gobj); |
650 | drm_framebuffer_cleanup(fb); | 718 | drm_framebuffer_cleanup(fb); |
@@ -813,6 +881,7 @@ int radeonfb_remove(struct drm_device *dev, struct drm_framebuffer *fb) | |||
813 | robj = rfb->obj->driver_private; | 881 | robj = rfb->obj->driver_private; |
814 | unregister_framebuffer(info); | 882 | unregister_framebuffer(info); |
815 | radeon_object_kunmap(robj); | 883 | radeon_object_kunmap(robj); |
884 | radeon_object_unpin(robj); | ||
816 | framebuffer_release(info); | 885 | framebuffer_release(info); |
817 | } | 886 | } |
818 | 887 | ||
diff --git a/drivers/gpu/drm/radeon/radeon_fence.c b/drivers/gpu/drm/radeon/radeon_fence.c index 96afbf5ae2ad..b4e48dd2e859 100644 --- a/drivers/gpu/drm/radeon/radeon_fence.c +++ b/drivers/gpu/drm/radeon/radeon_fence.c | |||
@@ -195,7 +195,7 @@ retry: | |||
195 | r = wait_event_interruptible_timeout(rdev->fence_drv.queue, | 195 | r = wait_event_interruptible_timeout(rdev->fence_drv.queue, |
196 | radeon_fence_signaled(fence), timeout); | 196 | radeon_fence_signaled(fence), timeout); |
197 | if (unlikely(r == -ERESTARTSYS)) { | 197 | if (unlikely(r == -ERESTARTSYS)) { |
198 | return -ERESTART; | 198 | return -EBUSY; |
199 | } | 199 | } |
200 | } else { | 200 | } else { |
201 | r = wait_event_timeout(rdev->fence_drv.queue, | 201 | r = wait_event_timeout(rdev->fence_drv.queue, |
diff --git a/drivers/gpu/drm/radeon/radeon_gart.c b/drivers/gpu/drm/radeon/radeon_gart.c index d343a15316ec..2977539880fb 100644 --- a/drivers/gpu/drm/radeon/radeon_gart.c +++ b/drivers/gpu/drm/radeon/radeon_gart.c | |||
@@ -177,7 +177,7 @@ int radeon_gart_bind(struct radeon_device *rdev, unsigned offset, | |||
177 | return -ENOMEM; | 177 | return -ENOMEM; |
178 | } | 178 | } |
179 | rdev->gart.pages[p] = pagelist[i]; | 179 | rdev->gart.pages[p] = pagelist[i]; |
180 | page_base = (uint32_t)rdev->gart.pages_addr[p]; | 180 | page_base = rdev->gart.pages_addr[p]; |
181 | for (j = 0; j < (PAGE_SIZE / 4096); j++, t++) { | 181 | for (j = 0; j < (PAGE_SIZE / 4096); j++, t++) { |
182 | radeon_gart_set_page(rdev, t, page_base); | 182 | radeon_gart_set_page(rdev, t, page_base); |
183 | page_base += 4096; | 183 | page_base += 4096; |
diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c index eb516034235d..cded5180c752 100644 --- a/drivers/gpu/drm/radeon/radeon_gem.c +++ b/drivers/gpu/drm/radeon/radeon_gem.c | |||
@@ -157,9 +157,9 @@ int radeon_gem_info_ioctl(struct drm_device *dev, void *data, | |||
157 | struct radeon_device *rdev = dev->dev_private; | 157 | struct radeon_device *rdev = dev->dev_private; |
158 | struct drm_radeon_gem_info *args = data; | 158 | struct drm_radeon_gem_info *args = data; |
159 | 159 | ||
160 | args->vram_size = rdev->mc.vram_size; | 160 | args->vram_size = rdev->mc.real_vram_size; |
161 | /* FIXME: report somethings that makes sense */ | 161 | /* FIXME: report somethings that makes sense */ |
162 | args->vram_visible = rdev->mc.vram_size - (4 * 1024 * 1024); | 162 | args->vram_visible = rdev->mc.real_vram_size - (4 * 1024 * 1024); |
163 | args->gart_size = rdev->mc.gtt_size; | 163 | args->gart_size = rdev->mc.gtt_size; |
164 | return 0; | 164 | return 0; |
165 | } | 165 | } |
@@ -285,3 +285,44 @@ int radeon_gem_wait_idle_ioctl(struct drm_device *dev, void *data, | |||
285 | mutex_unlock(&dev->struct_mutex); | 285 | mutex_unlock(&dev->struct_mutex); |
286 | return r; | 286 | return r; |
287 | } | 287 | } |
288 | |||
289 | int radeon_gem_set_tiling_ioctl(struct drm_device *dev, void *data, | ||
290 | struct drm_file *filp) | ||
291 | { | ||
292 | struct drm_radeon_gem_set_tiling *args = data; | ||
293 | struct drm_gem_object *gobj; | ||
294 | struct radeon_object *robj; | ||
295 | int r = 0; | ||
296 | |||
297 | DRM_DEBUG("%d \n", args->handle); | ||
298 | gobj = drm_gem_object_lookup(dev, filp, args->handle); | ||
299 | if (gobj == NULL) | ||
300 | return -EINVAL; | ||
301 | robj = gobj->driver_private; | ||
302 | radeon_object_set_tiling_flags(robj, args->tiling_flags, args->pitch); | ||
303 | mutex_lock(&dev->struct_mutex); | ||
304 | drm_gem_object_unreference(gobj); | ||
305 | mutex_unlock(&dev->struct_mutex); | ||
306 | return r; | ||
307 | } | ||
308 | |||
309 | int radeon_gem_get_tiling_ioctl(struct drm_device *dev, void *data, | ||
310 | struct drm_file *filp) | ||
311 | { | ||
312 | struct drm_radeon_gem_get_tiling *args = data; | ||
313 | struct drm_gem_object *gobj; | ||
314 | struct radeon_object *robj; | ||
315 | int r = 0; | ||
316 | |||
317 | DRM_DEBUG("\n"); | ||
318 | gobj = drm_gem_object_lookup(dev, filp, args->handle); | ||
319 | if (gobj == NULL) | ||
320 | return -EINVAL; | ||
321 | robj = gobj->driver_private; | ||
322 | radeon_object_get_tiling_flags(robj, &args->tiling_flags, | ||
323 | &args->pitch); | ||
324 | mutex_lock(&dev->struct_mutex); | ||
325 | drm_gem_object_unreference(gobj); | ||
326 | mutex_unlock(&dev->struct_mutex); | ||
327 | return r; | ||
328 | } | ||
diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c index 4612a7c146d1..3357110e30ce 100644 --- a/drivers/gpu/drm/radeon/radeon_kms.c +++ b/drivers/gpu/drm/radeon/radeon_kms.c | |||
@@ -58,6 +58,8 @@ int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags) | |||
58 | if (r) { | 58 | if (r) { |
59 | DRM_ERROR("Failed to initialize radeon, disabling IOCTL\n"); | 59 | DRM_ERROR("Failed to initialize radeon, disabling IOCTL\n"); |
60 | radeon_device_fini(rdev); | 60 | radeon_device_fini(rdev); |
61 | kfree(rdev); | ||
62 | dev->dev_private = NULL; | ||
61 | return r; | 63 | return r; |
62 | } | 64 | } |
63 | return 0; | 65 | return 0; |
@@ -291,5 +293,7 @@ struct drm_ioctl_desc radeon_ioctls_kms[] = { | |||
291 | DRM_IOCTL_DEF(DRM_RADEON_GEM_WAIT_IDLE, radeon_gem_wait_idle_ioctl, DRM_AUTH), | 293 | DRM_IOCTL_DEF(DRM_RADEON_GEM_WAIT_IDLE, radeon_gem_wait_idle_ioctl, DRM_AUTH), |
292 | DRM_IOCTL_DEF(DRM_RADEON_CS, radeon_cs_ioctl, DRM_AUTH), | 294 | DRM_IOCTL_DEF(DRM_RADEON_CS, radeon_cs_ioctl, DRM_AUTH), |
293 | DRM_IOCTL_DEF(DRM_RADEON_INFO, radeon_info_ioctl, DRM_AUTH), | 295 | DRM_IOCTL_DEF(DRM_RADEON_INFO, radeon_info_ioctl, DRM_AUTH), |
296 | DRM_IOCTL_DEF(DRM_RADEON_GEM_SET_TILING, radeon_gem_set_tiling_ioctl, DRM_AUTH), | ||
297 | DRM_IOCTL_DEF(DRM_RADEON_GEM_GET_TILING, radeon_gem_get_tiling_ioctl, DRM_AUTH), | ||
294 | }; | 298 | }; |
295 | int radeon_max_kms_ioctl = DRM_ARRAY_SIZE(radeon_ioctls_kms); | 299 | int radeon_max_kms_ioctl = DRM_ARRAY_SIZE(radeon_ioctls_kms); |
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c index 8086ecf7f03d..7d06dc98a42a 100644 --- a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c +++ b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c | |||
@@ -29,6 +29,171 @@ | |||
29 | #include "radeon_fixed.h" | 29 | #include "radeon_fixed.h" |
30 | #include "radeon.h" | 30 | #include "radeon.h" |
31 | 31 | ||
32 | static void radeon_legacy_rmx_mode_set(struct drm_crtc *crtc, | ||
33 | struct drm_display_mode *mode, | ||
34 | struct drm_display_mode *adjusted_mode) | ||
35 | { | ||
36 | struct drm_device *dev = crtc->dev; | ||
37 | struct radeon_device *rdev = dev->dev_private; | ||
38 | struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); | ||
39 | int xres = mode->hdisplay; | ||
40 | int yres = mode->vdisplay; | ||
41 | bool hscale = true, vscale = true; | ||
42 | int hsync_wid; | ||
43 | int vsync_wid; | ||
44 | int hsync_start; | ||
45 | int blank_width; | ||
46 | u32 scale, inc, crtc_more_cntl; | ||
47 | u32 fp_horz_stretch, fp_vert_stretch, fp_horz_vert_active; | ||
48 | u32 fp_h_sync_strt_wid, fp_crtc_h_total_disp; | ||
49 | u32 fp_v_sync_strt_wid, fp_crtc_v_total_disp; | ||
50 | struct radeon_native_mode *native_mode = &radeon_crtc->native_mode; | ||
51 | |||
52 | fp_vert_stretch = RREG32(RADEON_FP_VERT_STRETCH) & | ||
53 | (RADEON_VERT_STRETCH_RESERVED | | ||
54 | RADEON_VERT_AUTO_RATIO_INC); | ||
55 | fp_horz_stretch = RREG32(RADEON_FP_HORZ_STRETCH) & | ||
56 | (RADEON_HORZ_FP_LOOP_STRETCH | | ||
57 | RADEON_HORZ_AUTO_RATIO_INC); | ||
58 | |||
59 | crtc_more_cntl = 0; | ||
60 | if ((rdev->family == CHIP_RS100) || | ||
61 | (rdev->family == CHIP_RS200)) { | ||
62 | /* This is to workaround the asic bug for RMX, some versions | ||
63 | of BIOS dosen't have this register initialized correctly. */ | ||
64 | crtc_more_cntl |= RADEON_CRTC_H_CUTOFF_ACTIVE_EN; | ||
65 | } | ||
66 | |||
67 | |||
68 | fp_crtc_h_total_disp = ((((mode->crtc_htotal / 8) - 1) & 0x3ff) | ||
69 | | ((((mode->crtc_hdisplay / 8) - 1) & 0x1ff) << 16)); | ||
70 | |||
71 | hsync_wid = (mode->crtc_hsync_end - mode->crtc_hsync_start) / 8; | ||
72 | if (!hsync_wid) | ||
73 | hsync_wid = 1; | ||
74 | hsync_start = mode->crtc_hsync_start - 8; | ||
75 | |||
76 | fp_h_sync_strt_wid = ((hsync_start & 0x1fff) | ||
77 | | ((hsync_wid & 0x3f) << 16) | ||
78 | | ((mode->flags & DRM_MODE_FLAG_NHSYNC) | ||
79 | ? RADEON_CRTC_H_SYNC_POL | ||
80 | : 0)); | ||
81 | |||
82 | fp_crtc_v_total_disp = (((mode->crtc_vtotal - 1) & 0xffff) | ||
83 | | ((mode->crtc_vdisplay - 1) << 16)); | ||
84 | |||
85 | vsync_wid = mode->crtc_vsync_end - mode->crtc_vsync_start; | ||
86 | if (!vsync_wid) | ||
87 | vsync_wid = 1; | ||
88 | |||
89 | fp_v_sync_strt_wid = (((mode->crtc_vsync_start - 1) & 0xfff) | ||
90 | | ((vsync_wid & 0x1f) << 16) | ||
91 | | ((mode->flags & DRM_MODE_FLAG_NVSYNC) | ||
92 | ? RADEON_CRTC_V_SYNC_POL | ||
93 | : 0)); | ||
94 | |||
95 | fp_horz_vert_active = 0; | ||
96 | |||
97 | if (native_mode->panel_xres == 0 || | ||
98 | native_mode->panel_yres == 0) { | ||
99 | hscale = false; | ||
100 | vscale = false; | ||
101 | } else { | ||
102 | if (xres > native_mode->panel_xres) | ||
103 | xres = native_mode->panel_xres; | ||
104 | if (yres > native_mode->panel_yres) | ||
105 | yres = native_mode->panel_yres; | ||
106 | |||
107 | if (xres == native_mode->panel_xres) | ||
108 | hscale = false; | ||
109 | if (yres == native_mode->panel_yres) | ||
110 | vscale = false; | ||
111 | } | ||
112 | |||
113 | switch (radeon_crtc->rmx_type) { | ||
114 | case RMX_FULL: | ||
115 | case RMX_ASPECT: | ||
116 | if (!hscale) | ||
117 | fp_horz_stretch |= ((xres/8-1) << 16); | ||
118 | else { | ||
119 | inc = (fp_horz_stretch & RADEON_HORZ_AUTO_RATIO_INC) ? 1 : 0; | ||
120 | scale = ((xres + inc) * RADEON_HORZ_STRETCH_RATIO_MAX) | ||
121 | / native_mode->panel_xres + 1; | ||
122 | fp_horz_stretch |= (((scale) & RADEON_HORZ_STRETCH_RATIO_MASK) | | ||
123 | RADEON_HORZ_STRETCH_BLEND | | ||
124 | RADEON_HORZ_STRETCH_ENABLE | | ||
125 | ((native_mode->panel_xres/8-1) << 16)); | ||
126 | } | ||
127 | |||
128 | if (!vscale) | ||
129 | fp_vert_stretch |= ((yres-1) << 12); | ||
130 | else { | ||
131 | inc = (fp_vert_stretch & RADEON_VERT_AUTO_RATIO_INC) ? 1 : 0; | ||
132 | scale = ((yres + inc) * RADEON_VERT_STRETCH_RATIO_MAX) | ||
133 | / native_mode->panel_yres + 1; | ||
134 | fp_vert_stretch |= (((scale) & RADEON_VERT_STRETCH_RATIO_MASK) | | ||
135 | RADEON_VERT_STRETCH_ENABLE | | ||
136 | RADEON_VERT_STRETCH_BLEND | | ||
137 | ((native_mode->panel_yres-1) << 12)); | ||
138 | } | ||
139 | break; | ||
140 | case RMX_CENTER: | ||
141 | fp_horz_stretch |= ((xres/8-1) << 16); | ||
142 | fp_vert_stretch |= ((yres-1) << 12); | ||
143 | |||
144 | crtc_more_cntl |= (RADEON_CRTC_AUTO_HORZ_CENTER_EN | | ||
145 | RADEON_CRTC_AUTO_VERT_CENTER_EN); | ||
146 | |||
147 | blank_width = (mode->crtc_hblank_end - mode->crtc_hblank_start) / 8; | ||
148 | if (blank_width > 110) | ||
149 | blank_width = 110; | ||
150 | |||
151 | fp_crtc_h_total_disp = (((blank_width) & 0x3ff) | ||
152 | | ((((mode->crtc_hdisplay / 8) - 1) & 0x1ff) << 16)); | ||
153 | |||
154 | hsync_wid = (mode->crtc_hsync_end - mode->crtc_hsync_start) / 8; | ||
155 | if (!hsync_wid) | ||
156 | hsync_wid = 1; | ||
157 | |||
158 | fp_h_sync_strt_wid = ((((mode->crtc_hsync_start - mode->crtc_hblank_start) / 8) & 0x1fff) | ||
159 | | ((hsync_wid & 0x3f) << 16) | ||
160 | | ((mode->flags & DRM_MODE_FLAG_NHSYNC) | ||
161 | ? RADEON_CRTC_H_SYNC_POL | ||
162 | : 0)); | ||
163 | |||
164 | fp_crtc_v_total_disp = (((mode->crtc_vblank_end - mode->crtc_vblank_start) & 0xffff) | ||
165 | | ((mode->crtc_vdisplay - 1) << 16)); | ||
166 | |||
167 | vsync_wid = mode->crtc_vsync_end - mode->crtc_vsync_start; | ||
168 | if (!vsync_wid) | ||
169 | vsync_wid = 1; | ||
170 | |||
171 | fp_v_sync_strt_wid = ((((mode->crtc_vsync_start - mode->crtc_vblank_start) & 0xfff) | ||
172 | | ((vsync_wid & 0x1f) << 16) | ||
173 | | ((mode->flags & DRM_MODE_FLAG_NVSYNC) | ||
174 | ? RADEON_CRTC_V_SYNC_POL | ||
175 | : 0))); | ||
176 | |||
177 | fp_horz_vert_active = (((native_mode->panel_yres) & 0xfff) | | ||
178 | (((native_mode->panel_xres / 8) & 0x1ff) << 16)); | ||
179 | break; | ||
180 | case RMX_OFF: | ||
181 | default: | ||
182 | fp_horz_stretch |= ((xres/8-1) << 16); | ||
183 | fp_vert_stretch |= ((yres-1) << 12); | ||
184 | break; | ||
185 | } | ||
186 | |||
187 | WREG32(RADEON_FP_HORZ_STRETCH, fp_horz_stretch); | ||
188 | WREG32(RADEON_FP_VERT_STRETCH, fp_vert_stretch); | ||
189 | WREG32(RADEON_CRTC_MORE_CNTL, crtc_more_cntl); | ||
190 | WREG32(RADEON_FP_HORZ_VERT_ACTIVE, fp_horz_vert_active); | ||
191 | WREG32(RADEON_FP_H_SYNC_STRT_WID, fp_h_sync_strt_wid); | ||
192 | WREG32(RADEON_FP_V_SYNC_STRT_WID, fp_v_sync_strt_wid); | ||
193 | WREG32(RADEON_FP_CRTC_H_TOTAL_DISP, fp_crtc_h_total_disp); | ||
194 | WREG32(RADEON_FP_CRTC_V_TOTAL_DISP, fp_crtc_v_total_disp); | ||
195 | } | ||
196 | |||
32 | void radeon_restore_common_regs(struct drm_device *dev) | 197 | void radeon_restore_common_regs(struct drm_device *dev) |
33 | { | 198 | { |
34 | /* don't need this yet */ | 199 | /* don't need this yet */ |
@@ -235,6 +400,7 @@ int radeon_crtc_set_base(struct drm_crtc *crtc, int x, int y, | |||
235 | uint64_t base; | 400 | uint64_t base; |
236 | uint32_t crtc_offset, crtc_offset_cntl, crtc_tile_x0_y0 = 0; | 401 | uint32_t crtc_offset, crtc_offset_cntl, crtc_tile_x0_y0 = 0; |
237 | uint32_t crtc_pitch, pitch_pixels; | 402 | uint32_t crtc_pitch, pitch_pixels; |
403 | uint32_t tiling_flags; | ||
238 | 404 | ||
239 | DRM_DEBUG("\n"); | 405 | DRM_DEBUG("\n"); |
240 | 406 | ||
@@ -244,7 +410,12 @@ int radeon_crtc_set_base(struct drm_crtc *crtc, int x, int y, | |||
244 | if (radeon_gem_object_pin(obj, RADEON_GEM_DOMAIN_VRAM, &base)) { | 410 | if (radeon_gem_object_pin(obj, RADEON_GEM_DOMAIN_VRAM, &base)) { |
245 | return -EINVAL; | 411 | return -EINVAL; |
246 | } | 412 | } |
247 | crtc_offset = (u32)base; | 413 | /* if scanout was in GTT this really wouldn't work */ |
414 | /* crtc offset is from display base addr not FB location */ | ||
415 | radeon_crtc->legacy_display_base_addr = rdev->mc.vram_location; | ||
416 | |||
417 | base -= radeon_crtc->legacy_display_base_addr; | ||
418 | |||
248 | crtc_offset_cntl = 0; | 419 | crtc_offset_cntl = 0; |
249 | 420 | ||
250 | pitch_pixels = crtc->fb->pitch / (crtc->fb->bits_per_pixel / 8); | 421 | pitch_pixels = crtc->fb->pitch / (crtc->fb->bits_per_pixel / 8); |
@@ -253,8 +424,12 @@ int radeon_crtc_set_base(struct drm_crtc *crtc, int x, int y, | |||
253 | (crtc->fb->bits_per_pixel * 8)); | 424 | (crtc->fb->bits_per_pixel * 8)); |
254 | crtc_pitch |= crtc_pitch << 16; | 425 | crtc_pitch |= crtc_pitch << 16; |
255 | 426 | ||
256 | /* TODO tiling */ | 427 | radeon_object_get_tiling_flags(obj->driver_private, |
257 | if (0) { | 428 | &tiling_flags, NULL); |
429 | if (tiling_flags & RADEON_TILING_MICRO) | ||
430 | DRM_ERROR("trying to scanout microtiled buffer\n"); | ||
431 | |||
432 | if (tiling_flags & RADEON_TILING_MACRO) { | ||
258 | if (ASIC_IS_R300(rdev)) | 433 | if (ASIC_IS_R300(rdev)) |
259 | crtc_offset_cntl |= (R300_CRTC_X_Y_MODE_EN | | 434 | crtc_offset_cntl |= (R300_CRTC_X_Y_MODE_EN | |
260 | R300_CRTC_MICRO_TILE_BUFFER_DIS | | 435 | R300_CRTC_MICRO_TILE_BUFFER_DIS | |
@@ -270,15 +445,13 @@ int radeon_crtc_set_base(struct drm_crtc *crtc, int x, int y, | |||
270 | crtc_offset_cntl &= ~RADEON_CRTC_TILE_EN; | 445 | crtc_offset_cntl &= ~RADEON_CRTC_TILE_EN; |
271 | } | 446 | } |
272 | 447 | ||
273 | 448 | if (tiling_flags & RADEON_TILING_MACRO) { | |
274 | /* TODO more tiling */ | ||
275 | if (0) { | ||
276 | if (ASIC_IS_R300(rdev)) { | 449 | if (ASIC_IS_R300(rdev)) { |
277 | crtc_tile_x0_y0 = x | (y << 16); | 450 | crtc_tile_x0_y0 = x | (y << 16); |
278 | base &= ~0x7ff; | 451 | base &= ~0x7ff; |
279 | } else { | 452 | } else { |
280 | int byteshift = crtc->fb->bits_per_pixel >> 4; | 453 | int byteshift = crtc->fb->bits_per_pixel >> 4; |
281 | int tile_addr = (((y >> 3) * crtc->fb->width + x) >> (8 - byteshift)) << 11; | 454 | int tile_addr = (((y >> 3) * pitch_pixels + x) >> (8 - byteshift)) << 11; |
282 | base += tile_addr + ((x << byteshift) % 256) + ((y % 8) << 8); | 455 | base += tile_addr + ((x << byteshift) % 256) + ((y % 8) << 8); |
283 | crtc_offset_cntl |= (y % 16); | 456 | crtc_offset_cntl |= (y % 16); |
284 | } | 457 | } |
@@ -303,11 +476,9 @@ int radeon_crtc_set_base(struct drm_crtc *crtc, int x, int y, | |||
303 | 476 | ||
304 | base &= ~7; | 477 | base &= ~7; |
305 | 478 | ||
306 | /* update sarea TODO */ | ||
307 | |||
308 | crtc_offset = (u32)base; | 479 | crtc_offset = (u32)base; |
309 | 480 | ||
310 | WREG32(RADEON_DISPLAY_BASE_ADDR + radeon_crtc->crtc_offset, rdev->mc.vram_location); | 481 | WREG32(RADEON_DISPLAY_BASE_ADDR + radeon_crtc->crtc_offset, radeon_crtc->legacy_display_base_addr); |
311 | 482 | ||
312 | if (ASIC_IS_R300(rdev)) { | 483 | if (ASIC_IS_R300(rdev)) { |
313 | if (radeon_crtc->crtc_id) | 484 | if (radeon_crtc->crtc_id) |
@@ -751,6 +922,8 @@ static bool radeon_crtc_mode_fixup(struct drm_crtc *crtc, | |||
751 | struct drm_display_mode *mode, | 922 | struct drm_display_mode *mode, |
752 | struct drm_display_mode *adjusted_mode) | 923 | struct drm_display_mode *adjusted_mode) |
753 | { | 924 | { |
925 | if (!radeon_crtc_scaling_mode_fixup(crtc, mode, adjusted_mode)) | ||
926 | return false; | ||
754 | return true; | 927 | return true; |
755 | } | 928 | } |
756 | 929 | ||
@@ -759,16 +932,25 @@ static int radeon_crtc_mode_set(struct drm_crtc *crtc, | |||
759 | struct drm_display_mode *adjusted_mode, | 932 | struct drm_display_mode *adjusted_mode, |
760 | int x, int y, struct drm_framebuffer *old_fb) | 933 | int x, int y, struct drm_framebuffer *old_fb) |
761 | { | 934 | { |
762 | 935 | struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); | |
763 | DRM_DEBUG("\n"); | 936 | struct drm_device *dev = crtc->dev; |
937 | struct radeon_device *rdev = dev->dev_private; | ||
764 | 938 | ||
765 | /* TODO TV */ | 939 | /* TODO TV */ |
766 | |||
767 | radeon_crtc_set_base(crtc, x, y, old_fb); | 940 | radeon_crtc_set_base(crtc, x, y, old_fb); |
768 | radeon_set_crtc_timing(crtc, adjusted_mode); | 941 | radeon_set_crtc_timing(crtc, adjusted_mode); |
769 | radeon_set_pll(crtc, adjusted_mode); | 942 | radeon_set_pll(crtc, adjusted_mode); |
770 | radeon_init_disp_bandwidth(crtc->dev); | 943 | radeon_bandwidth_update(rdev); |
771 | 944 | if (radeon_crtc->crtc_id == 0) { | |
945 | radeon_legacy_rmx_mode_set(crtc, mode, adjusted_mode); | ||
946 | } else { | ||
947 | if (radeon_crtc->rmx_type != RMX_OFF) { | ||
948 | /* FIXME: only first crtc has rmx what should we | ||
949 | * do ? | ||
950 | */ | ||
951 | DRM_ERROR("Mode need scaling but only first crtc can do that.\n"); | ||
952 | } | ||
953 | } | ||
772 | return 0; | 954 | return 0; |
773 | } | 955 | } |
774 | 956 | ||
@@ -799,478 +981,3 @@ void radeon_legacy_init_crtc(struct drm_device *dev, | |||
799 | radeon_crtc->crtc_offset = RADEON_CRTC2_H_TOTAL_DISP - RADEON_CRTC_H_TOTAL_DISP; | 981 | radeon_crtc->crtc_offset = RADEON_CRTC2_H_TOTAL_DISP - RADEON_CRTC_H_TOTAL_DISP; |
800 | drm_crtc_helper_add(&radeon_crtc->base, &legacy_helper_funcs); | 982 | drm_crtc_helper_add(&radeon_crtc->base, &legacy_helper_funcs); |
801 | } | 983 | } |
802 | |||
803 | void radeon_init_disp_bw_legacy(struct drm_device *dev, | ||
804 | struct drm_display_mode *mode1, | ||
805 | uint32_t pixel_bytes1, | ||
806 | struct drm_display_mode *mode2, | ||
807 | uint32_t pixel_bytes2) | ||
808 | { | ||
809 | struct radeon_device *rdev = dev->dev_private; | ||
810 | fixed20_12 trcd_ff, trp_ff, tras_ff, trbs_ff, tcas_ff; | ||
811 | fixed20_12 sclk_ff, mclk_ff, sclk_eff_ff, sclk_delay_ff; | ||
812 | fixed20_12 peak_disp_bw, mem_bw, pix_clk, pix_clk2, temp_ff, crit_point_ff; | ||
813 | uint32_t temp, data, mem_trcd, mem_trp, mem_tras; | ||
814 | fixed20_12 memtcas_ff[8] = { | ||
815 | fixed_init(1), | ||
816 | fixed_init(2), | ||
817 | fixed_init(3), | ||
818 | fixed_init(0), | ||
819 | fixed_init_half(1), | ||
820 | fixed_init_half(2), | ||
821 | fixed_init(0), | ||
822 | }; | ||
823 | fixed20_12 memtcas_rs480_ff[8] = { | ||
824 | fixed_init(0), | ||
825 | fixed_init(1), | ||
826 | fixed_init(2), | ||
827 | fixed_init(3), | ||
828 | fixed_init(0), | ||
829 | fixed_init_half(1), | ||
830 | fixed_init_half(2), | ||
831 | fixed_init_half(3), | ||
832 | }; | ||
833 | fixed20_12 memtcas2_ff[8] = { | ||
834 | fixed_init(0), | ||
835 | fixed_init(1), | ||
836 | fixed_init(2), | ||
837 | fixed_init(3), | ||
838 | fixed_init(4), | ||
839 | fixed_init(5), | ||
840 | fixed_init(6), | ||
841 | fixed_init(7), | ||
842 | }; | ||
843 | fixed20_12 memtrbs[8] = { | ||
844 | fixed_init(1), | ||
845 | fixed_init_half(1), | ||
846 | fixed_init(2), | ||
847 | fixed_init_half(2), | ||
848 | fixed_init(3), | ||
849 | fixed_init_half(3), | ||
850 | fixed_init(4), | ||
851 | fixed_init_half(4) | ||
852 | }; | ||
853 | fixed20_12 memtrbs_r4xx[8] = { | ||
854 | fixed_init(4), | ||
855 | fixed_init(5), | ||
856 | fixed_init(6), | ||
857 | fixed_init(7), | ||
858 | fixed_init(8), | ||
859 | fixed_init(9), | ||
860 | fixed_init(10), | ||
861 | fixed_init(11) | ||
862 | }; | ||
863 | fixed20_12 min_mem_eff; | ||
864 | fixed20_12 mc_latency_sclk, mc_latency_mclk, k1; | ||
865 | fixed20_12 cur_latency_mclk, cur_latency_sclk; | ||
866 | fixed20_12 disp_latency, disp_latency_overhead, disp_drain_rate, | ||
867 | disp_drain_rate2, read_return_rate; | ||
868 | fixed20_12 time_disp1_drop_priority; | ||
869 | int c; | ||
870 | int cur_size = 16; /* in octawords */ | ||
871 | int critical_point = 0, critical_point2; | ||
872 | /* uint32_t read_return_rate, time_disp1_drop_priority; */ | ||
873 | int stop_req, max_stop_req; | ||
874 | |||
875 | min_mem_eff.full = rfixed_const_8(0); | ||
876 | /* get modes */ | ||
877 | if ((rdev->disp_priority == 2) && ASIC_IS_R300(rdev)) { | ||
878 | uint32_t mc_init_misc_lat_timer = RREG32(R300_MC_INIT_MISC_LAT_TIMER); | ||
879 | mc_init_misc_lat_timer &= ~(R300_MC_DISP1R_INIT_LAT_MASK << R300_MC_DISP1R_INIT_LAT_SHIFT); | ||
880 | mc_init_misc_lat_timer &= ~(R300_MC_DISP0R_INIT_LAT_MASK << R300_MC_DISP0R_INIT_LAT_SHIFT); | ||
881 | /* check crtc enables */ | ||
882 | if (mode2) | ||
883 | mc_init_misc_lat_timer |= (1 << R300_MC_DISP1R_INIT_LAT_SHIFT); | ||
884 | if (mode1) | ||
885 | mc_init_misc_lat_timer |= (1 << R300_MC_DISP0R_INIT_LAT_SHIFT); | ||
886 | WREG32(R300_MC_INIT_MISC_LAT_TIMER, mc_init_misc_lat_timer); | ||
887 | } | ||
888 | |||
889 | /* | ||
890 | * determine is there is enough bw for current mode | ||
891 | */ | ||
892 | mclk_ff.full = rfixed_const(rdev->clock.default_mclk); | ||
893 | temp_ff.full = rfixed_const(100); | ||
894 | mclk_ff.full = rfixed_div(mclk_ff, temp_ff); | ||
895 | sclk_ff.full = rfixed_const(rdev->clock.default_sclk); | ||
896 | sclk_ff.full = rfixed_div(sclk_ff, temp_ff); | ||
897 | |||
898 | temp = (rdev->mc.vram_width / 8) * (rdev->mc.vram_is_ddr ? 2 : 1); | ||
899 | temp_ff.full = rfixed_const(temp); | ||
900 | mem_bw.full = rfixed_mul(mclk_ff, temp_ff); | ||
901 | |||
902 | pix_clk.full = 0; | ||
903 | pix_clk2.full = 0; | ||
904 | peak_disp_bw.full = 0; | ||
905 | if (mode1) { | ||
906 | temp_ff.full = rfixed_const(1000); | ||
907 | pix_clk.full = rfixed_const(mode1->clock); /* convert to fixed point */ | ||
908 | pix_clk.full = rfixed_div(pix_clk, temp_ff); | ||
909 | temp_ff.full = rfixed_const(pixel_bytes1); | ||
910 | peak_disp_bw.full += rfixed_mul(pix_clk, temp_ff); | ||
911 | } | ||
912 | if (mode2) { | ||
913 | temp_ff.full = rfixed_const(1000); | ||
914 | pix_clk2.full = rfixed_const(mode2->clock); /* convert to fixed point */ | ||
915 | pix_clk2.full = rfixed_div(pix_clk2, temp_ff); | ||
916 | temp_ff.full = rfixed_const(pixel_bytes2); | ||
917 | peak_disp_bw.full += rfixed_mul(pix_clk2, temp_ff); | ||
918 | } | ||
919 | |||
920 | mem_bw.full = rfixed_mul(mem_bw, min_mem_eff); | ||
921 | if (peak_disp_bw.full >= mem_bw.full) { | ||
922 | DRM_ERROR("You may not have enough display bandwidth for current mode\n" | ||
923 | "If you have flickering problem, try to lower resolution, refresh rate, or color depth\n"); | ||
924 | } | ||
925 | |||
926 | /* Get values from the EXT_MEM_CNTL register...converting its contents. */ | ||
927 | temp = RREG32(RADEON_MEM_TIMING_CNTL); | ||
928 | if ((rdev->family == CHIP_RV100) || (rdev->flags & RADEON_IS_IGP)) { /* RV100, M6, IGPs */ | ||
929 | mem_trcd = ((temp >> 2) & 0x3) + 1; | ||
930 | mem_trp = ((temp & 0x3)) + 1; | ||
931 | mem_tras = ((temp & 0x70) >> 4) + 1; | ||
932 | } else if (rdev->family == CHIP_R300 || | ||
933 | rdev->family == CHIP_R350) { /* r300, r350 */ | ||
934 | mem_trcd = (temp & 0x7) + 1; | ||
935 | mem_trp = ((temp >> 8) & 0x7) + 1; | ||
936 | mem_tras = ((temp >> 11) & 0xf) + 4; | ||
937 | } else if (rdev->family == CHIP_RV350 || | ||
938 | rdev->family <= CHIP_RV380) { | ||
939 | /* rv3x0 */ | ||
940 | mem_trcd = (temp & 0x7) + 3; | ||
941 | mem_trp = ((temp >> 8) & 0x7) + 3; | ||
942 | mem_tras = ((temp >> 11) & 0xf) + 6; | ||
943 | } else if (rdev->family == CHIP_R420 || | ||
944 | rdev->family == CHIP_R423 || | ||
945 | rdev->family == CHIP_RV410) { | ||
946 | /* r4xx */ | ||
947 | mem_trcd = (temp & 0xf) + 3; | ||
948 | if (mem_trcd > 15) | ||
949 | mem_trcd = 15; | ||
950 | mem_trp = ((temp >> 8) & 0xf) + 3; | ||
951 | if (mem_trp > 15) | ||
952 | mem_trp = 15; | ||
953 | mem_tras = ((temp >> 12) & 0x1f) + 6; | ||
954 | if (mem_tras > 31) | ||
955 | mem_tras = 31; | ||
956 | } else { /* RV200, R200 */ | ||
957 | mem_trcd = (temp & 0x7) + 1; | ||
958 | mem_trp = ((temp >> 8) & 0x7) + 1; | ||
959 | mem_tras = ((temp >> 12) & 0xf) + 4; | ||
960 | } | ||
961 | /* convert to FF */ | ||
962 | trcd_ff.full = rfixed_const(mem_trcd); | ||
963 | trp_ff.full = rfixed_const(mem_trp); | ||
964 | tras_ff.full = rfixed_const(mem_tras); | ||
965 | |||
966 | /* Get values from the MEM_SDRAM_MODE_REG register...converting its */ | ||
967 | temp = RREG32(RADEON_MEM_SDRAM_MODE_REG); | ||
968 | data = (temp & (7 << 20)) >> 20; | ||
969 | if ((rdev->family == CHIP_RV100) || rdev->flags & RADEON_IS_IGP) { | ||
970 | if (rdev->family == CHIP_RS480) /* don't think rs400 */ | ||
971 | tcas_ff = memtcas_rs480_ff[data]; | ||
972 | else | ||
973 | tcas_ff = memtcas_ff[data]; | ||
974 | } else | ||
975 | tcas_ff = memtcas2_ff[data]; | ||
976 | |||
977 | if (rdev->family == CHIP_RS400 || | ||
978 | rdev->family == CHIP_RS480) { | ||
979 | /* extra cas latency stored in bits 23-25 0-4 clocks */ | ||
980 | data = (temp >> 23) & 0x7; | ||
981 | if (data < 5) | ||
982 | tcas_ff.full += rfixed_const(data); | ||
983 | } | ||
984 | |||
985 | if (ASIC_IS_R300(rdev) && !(rdev->flags & RADEON_IS_IGP)) { | ||
986 | /* on the R300, Tcas is included in Trbs. | ||
987 | */ | ||
988 | temp = RREG32(RADEON_MEM_CNTL); | ||
989 | data = (R300_MEM_NUM_CHANNELS_MASK & temp); | ||
990 | if (data == 1) { | ||
991 | if (R300_MEM_USE_CD_CH_ONLY & temp) { | ||
992 | temp = RREG32(R300_MC_IND_INDEX); | ||
993 | temp &= ~R300_MC_IND_ADDR_MASK; | ||
994 | temp |= R300_MC_READ_CNTL_CD_mcind; | ||
995 | WREG32(R300_MC_IND_INDEX, temp); | ||
996 | temp = RREG32(R300_MC_IND_DATA); | ||
997 | data = (R300_MEM_RBS_POSITION_C_MASK & temp); | ||
998 | } else { | ||
999 | temp = RREG32(R300_MC_READ_CNTL_AB); | ||
1000 | data = (R300_MEM_RBS_POSITION_A_MASK & temp); | ||
1001 | } | ||
1002 | } else { | ||
1003 | temp = RREG32(R300_MC_READ_CNTL_AB); | ||
1004 | data = (R300_MEM_RBS_POSITION_A_MASK & temp); | ||
1005 | } | ||
1006 | if (rdev->family == CHIP_RV410 || | ||
1007 | rdev->family == CHIP_R420 || | ||
1008 | rdev->family == CHIP_R423) | ||
1009 | trbs_ff = memtrbs_r4xx[data]; | ||
1010 | else | ||
1011 | trbs_ff = memtrbs[data]; | ||
1012 | tcas_ff.full += trbs_ff.full; | ||
1013 | } | ||
1014 | |||
1015 | sclk_eff_ff.full = sclk_ff.full; | ||
1016 | |||
1017 | if (rdev->flags & RADEON_IS_AGP) { | ||
1018 | fixed20_12 agpmode_ff; | ||
1019 | agpmode_ff.full = rfixed_const(radeon_agpmode); | ||
1020 | temp_ff.full = rfixed_const_666(16); | ||
1021 | sclk_eff_ff.full -= rfixed_mul(agpmode_ff, temp_ff); | ||
1022 | } | ||
1023 | /* TODO PCIE lanes may affect this - agpmode == 16?? */ | ||
1024 | |||
1025 | if (ASIC_IS_R300(rdev)) { | ||
1026 | sclk_delay_ff.full = rfixed_const(250); | ||
1027 | } else { | ||
1028 | if ((rdev->family == CHIP_RV100) || | ||
1029 | rdev->flags & RADEON_IS_IGP) { | ||
1030 | if (rdev->mc.vram_is_ddr) | ||
1031 | sclk_delay_ff.full = rfixed_const(41); | ||
1032 | else | ||
1033 | sclk_delay_ff.full = rfixed_const(33); | ||
1034 | } else { | ||
1035 | if (rdev->mc.vram_width == 128) | ||
1036 | sclk_delay_ff.full = rfixed_const(57); | ||
1037 | else | ||
1038 | sclk_delay_ff.full = rfixed_const(41); | ||
1039 | } | ||
1040 | } | ||
1041 | |||
1042 | mc_latency_sclk.full = rfixed_div(sclk_delay_ff, sclk_eff_ff); | ||
1043 | |||
1044 | if (rdev->mc.vram_is_ddr) { | ||
1045 | if (rdev->mc.vram_width == 32) { | ||
1046 | k1.full = rfixed_const(40); | ||
1047 | c = 3; | ||
1048 | } else { | ||
1049 | k1.full = rfixed_const(20); | ||
1050 | c = 1; | ||
1051 | } | ||
1052 | } else { | ||
1053 | k1.full = rfixed_const(40); | ||
1054 | c = 3; | ||
1055 | } | ||
1056 | |||
1057 | temp_ff.full = rfixed_const(2); | ||
1058 | mc_latency_mclk.full = rfixed_mul(trcd_ff, temp_ff); | ||
1059 | temp_ff.full = rfixed_const(c); | ||
1060 | mc_latency_mclk.full += rfixed_mul(tcas_ff, temp_ff); | ||
1061 | temp_ff.full = rfixed_const(4); | ||
1062 | mc_latency_mclk.full += rfixed_mul(tras_ff, temp_ff); | ||
1063 | mc_latency_mclk.full += rfixed_mul(trp_ff, temp_ff); | ||
1064 | mc_latency_mclk.full += k1.full; | ||
1065 | |||
1066 | mc_latency_mclk.full = rfixed_div(mc_latency_mclk, mclk_ff); | ||
1067 | mc_latency_mclk.full += rfixed_div(temp_ff, sclk_eff_ff); | ||
1068 | |||
1069 | /* | ||
1070 | HW cursor time assuming worst case of full size colour cursor. | ||
1071 | */ | ||
1072 | temp_ff.full = rfixed_const((2 * (cur_size - (rdev->mc.vram_is_ddr + 1)))); | ||
1073 | temp_ff.full += trcd_ff.full; | ||
1074 | if (temp_ff.full < tras_ff.full) | ||
1075 | temp_ff.full = tras_ff.full; | ||
1076 | cur_latency_mclk.full = rfixed_div(temp_ff, mclk_ff); | ||
1077 | |||
1078 | temp_ff.full = rfixed_const(cur_size); | ||
1079 | cur_latency_sclk.full = rfixed_div(temp_ff, sclk_eff_ff); | ||
1080 | /* | ||
1081 | Find the total latency for the display data. | ||
1082 | */ | ||
1083 | disp_latency_overhead.full = rfixed_const(80); | ||
1084 | disp_latency_overhead.full = rfixed_div(disp_latency_overhead, sclk_ff); | ||
1085 | mc_latency_mclk.full += disp_latency_overhead.full + cur_latency_mclk.full; | ||
1086 | mc_latency_sclk.full += disp_latency_overhead.full + cur_latency_sclk.full; | ||
1087 | |||
1088 | if (mc_latency_mclk.full > mc_latency_sclk.full) | ||
1089 | disp_latency.full = mc_latency_mclk.full; | ||
1090 | else | ||
1091 | disp_latency.full = mc_latency_sclk.full; | ||
1092 | |||
1093 | /* setup Max GRPH_STOP_REQ default value */ | ||
1094 | if (ASIC_IS_RV100(rdev)) | ||
1095 | max_stop_req = 0x5c; | ||
1096 | else | ||
1097 | max_stop_req = 0x7c; | ||
1098 | |||
1099 | if (mode1) { | ||
1100 | /* CRTC1 | ||
1101 | Set GRPH_BUFFER_CNTL register using h/w defined optimal values. | ||
1102 | GRPH_STOP_REQ <= MIN[ 0x7C, (CRTC_H_DISP + 1) * (bit depth) / 0x10 ] | ||
1103 | */ | ||
1104 | stop_req = mode1->hdisplay * pixel_bytes1 / 16; | ||
1105 | |||
1106 | if (stop_req > max_stop_req) | ||
1107 | stop_req = max_stop_req; | ||
1108 | |||
1109 | /* | ||
1110 | Find the drain rate of the display buffer. | ||
1111 | */ | ||
1112 | temp_ff.full = rfixed_const((16/pixel_bytes1)); | ||
1113 | disp_drain_rate.full = rfixed_div(pix_clk, temp_ff); | ||
1114 | |||
1115 | /* | ||
1116 | Find the critical point of the display buffer. | ||
1117 | */ | ||
1118 | crit_point_ff.full = rfixed_mul(disp_drain_rate, disp_latency); | ||
1119 | crit_point_ff.full += rfixed_const_half(0); | ||
1120 | |||
1121 | critical_point = rfixed_trunc(crit_point_ff); | ||
1122 | |||
1123 | if (rdev->disp_priority == 2) { | ||
1124 | critical_point = 0; | ||
1125 | } | ||
1126 | |||
1127 | /* | ||
1128 | The critical point should never be above max_stop_req-4. Setting | ||
1129 | GRPH_CRITICAL_CNTL = 0 will thus force high priority all the time. | ||
1130 | */ | ||
1131 | if (max_stop_req - critical_point < 4) | ||
1132 | critical_point = 0; | ||
1133 | |||
1134 | if (critical_point == 0 && mode2 && rdev->family == CHIP_R300) { | ||
1135 | /* some R300 cards have problem with this set to 0, when CRTC2 is enabled.*/ | ||
1136 | critical_point = 0x10; | ||
1137 | } | ||
1138 | |||
1139 | temp = RREG32(RADEON_GRPH_BUFFER_CNTL); | ||
1140 | temp &= ~(RADEON_GRPH_STOP_REQ_MASK); | ||
1141 | temp |= (stop_req << RADEON_GRPH_STOP_REQ_SHIFT); | ||
1142 | temp &= ~(RADEON_GRPH_START_REQ_MASK); | ||
1143 | if ((rdev->family == CHIP_R350) && | ||
1144 | (stop_req > 0x15)) { | ||
1145 | stop_req -= 0x10; | ||
1146 | } | ||
1147 | temp |= (stop_req << RADEON_GRPH_START_REQ_SHIFT); | ||
1148 | temp |= RADEON_GRPH_BUFFER_SIZE; | ||
1149 | temp &= ~(RADEON_GRPH_CRITICAL_CNTL | | ||
1150 | RADEON_GRPH_CRITICAL_AT_SOF | | ||
1151 | RADEON_GRPH_STOP_CNTL); | ||
1152 | /* | ||
1153 | Write the result into the register. | ||
1154 | */ | ||
1155 | WREG32(RADEON_GRPH_BUFFER_CNTL, ((temp & ~RADEON_GRPH_CRITICAL_POINT_MASK) | | ||
1156 | (critical_point << RADEON_GRPH_CRITICAL_POINT_SHIFT))); | ||
1157 | |||
1158 | #if 0 | ||
1159 | if ((rdev->family == CHIP_RS400) || | ||
1160 | (rdev->family == CHIP_RS480)) { | ||
1161 | /* attempt to program RS400 disp regs correctly ??? */ | ||
1162 | temp = RREG32(RS400_DISP1_REG_CNTL); | ||
1163 | temp &= ~(RS400_DISP1_START_REQ_LEVEL_MASK | | ||
1164 | RS400_DISP1_STOP_REQ_LEVEL_MASK); | ||
1165 | WREG32(RS400_DISP1_REQ_CNTL1, (temp | | ||
1166 | (critical_point << RS400_DISP1_START_REQ_LEVEL_SHIFT) | | ||
1167 | (critical_point << RS400_DISP1_STOP_REQ_LEVEL_SHIFT))); | ||
1168 | temp = RREG32(RS400_DMIF_MEM_CNTL1); | ||
1169 | temp &= ~(RS400_DISP1_CRITICAL_POINT_START_MASK | | ||
1170 | RS400_DISP1_CRITICAL_POINT_STOP_MASK); | ||
1171 | WREG32(RS400_DMIF_MEM_CNTL1, (temp | | ||
1172 | (critical_point << RS400_DISP1_CRITICAL_POINT_START_SHIFT) | | ||
1173 | (critical_point << RS400_DISP1_CRITICAL_POINT_STOP_SHIFT))); | ||
1174 | } | ||
1175 | #endif | ||
1176 | |||
1177 | DRM_DEBUG("GRPH_BUFFER_CNTL from to %x\n", | ||
1178 | /* (unsigned int)info->SavedReg->grph_buffer_cntl, */ | ||
1179 | (unsigned int)RREG32(RADEON_GRPH_BUFFER_CNTL)); | ||
1180 | } | ||
1181 | |||
1182 | if (mode2) { | ||
1183 | u32 grph2_cntl; | ||
1184 | stop_req = mode2->hdisplay * pixel_bytes2 / 16; | ||
1185 | |||
1186 | if (stop_req > max_stop_req) | ||
1187 | stop_req = max_stop_req; | ||
1188 | |||
1189 | /* | ||
1190 | Find the drain rate of the display buffer. | ||
1191 | */ | ||
1192 | temp_ff.full = rfixed_const((16/pixel_bytes2)); | ||
1193 | disp_drain_rate2.full = rfixed_div(pix_clk2, temp_ff); | ||
1194 | |||
1195 | grph2_cntl = RREG32(RADEON_GRPH2_BUFFER_CNTL); | ||
1196 | grph2_cntl &= ~(RADEON_GRPH_STOP_REQ_MASK); | ||
1197 | grph2_cntl |= (stop_req << RADEON_GRPH_STOP_REQ_SHIFT); | ||
1198 | grph2_cntl &= ~(RADEON_GRPH_START_REQ_MASK); | ||
1199 | if ((rdev->family == CHIP_R350) && | ||
1200 | (stop_req > 0x15)) { | ||
1201 | stop_req -= 0x10; | ||
1202 | } | ||
1203 | grph2_cntl |= (stop_req << RADEON_GRPH_START_REQ_SHIFT); | ||
1204 | grph2_cntl |= RADEON_GRPH_BUFFER_SIZE; | ||
1205 | grph2_cntl &= ~(RADEON_GRPH_CRITICAL_CNTL | | ||
1206 | RADEON_GRPH_CRITICAL_AT_SOF | | ||
1207 | RADEON_GRPH_STOP_CNTL); | ||
1208 | |||
1209 | if ((rdev->family == CHIP_RS100) || | ||
1210 | (rdev->family == CHIP_RS200)) | ||
1211 | critical_point2 = 0; | ||
1212 | else { | ||
1213 | temp = (rdev->mc.vram_width * rdev->mc.vram_is_ddr + 1)/128; | ||
1214 | temp_ff.full = rfixed_const(temp); | ||
1215 | temp_ff.full = rfixed_mul(mclk_ff, temp_ff); | ||
1216 | if (sclk_ff.full < temp_ff.full) | ||
1217 | temp_ff.full = sclk_ff.full; | ||
1218 | |||
1219 | read_return_rate.full = temp_ff.full; | ||
1220 | |||
1221 | if (mode1) { | ||
1222 | temp_ff.full = read_return_rate.full - disp_drain_rate.full; | ||
1223 | time_disp1_drop_priority.full = rfixed_div(crit_point_ff, temp_ff); | ||
1224 | } else { | ||
1225 | time_disp1_drop_priority.full = 0; | ||
1226 | } | ||
1227 | crit_point_ff.full = disp_latency.full + time_disp1_drop_priority.full + disp_latency.full; | ||
1228 | crit_point_ff.full = rfixed_mul(crit_point_ff, disp_drain_rate2); | ||
1229 | crit_point_ff.full += rfixed_const_half(0); | ||
1230 | |||
1231 | critical_point2 = rfixed_trunc(crit_point_ff); | ||
1232 | |||
1233 | if (rdev->disp_priority == 2) { | ||
1234 | critical_point2 = 0; | ||
1235 | } | ||
1236 | |||
1237 | if (max_stop_req - critical_point2 < 4) | ||
1238 | critical_point2 = 0; | ||
1239 | |||
1240 | } | ||
1241 | |||
1242 | if (critical_point2 == 0 && rdev->family == CHIP_R300) { | ||
1243 | /* some R300 cards have problem with this set to 0 */ | ||
1244 | critical_point2 = 0x10; | ||
1245 | } | ||
1246 | |||
1247 | WREG32(RADEON_GRPH2_BUFFER_CNTL, ((grph2_cntl & ~RADEON_GRPH_CRITICAL_POINT_MASK) | | ||
1248 | (critical_point2 << RADEON_GRPH_CRITICAL_POINT_SHIFT))); | ||
1249 | |||
1250 | if ((rdev->family == CHIP_RS400) || | ||
1251 | (rdev->family == CHIP_RS480)) { | ||
1252 | #if 0 | ||
1253 | /* attempt to program RS400 disp2 regs correctly ??? */ | ||
1254 | temp = RREG32(RS400_DISP2_REQ_CNTL1); | ||
1255 | temp &= ~(RS400_DISP2_START_REQ_LEVEL_MASK | | ||
1256 | RS400_DISP2_STOP_REQ_LEVEL_MASK); | ||
1257 | WREG32(RS400_DISP2_REQ_CNTL1, (temp | | ||
1258 | (critical_point2 << RS400_DISP1_START_REQ_LEVEL_SHIFT) | | ||
1259 | (critical_point2 << RS400_DISP1_STOP_REQ_LEVEL_SHIFT))); | ||
1260 | temp = RREG32(RS400_DISP2_REQ_CNTL2); | ||
1261 | temp &= ~(RS400_DISP2_CRITICAL_POINT_START_MASK | | ||
1262 | RS400_DISP2_CRITICAL_POINT_STOP_MASK); | ||
1263 | WREG32(RS400_DISP2_REQ_CNTL2, (temp | | ||
1264 | (critical_point2 << RS400_DISP2_CRITICAL_POINT_START_SHIFT) | | ||
1265 | (critical_point2 << RS400_DISP2_CRITICAL_POINT_STOP_SHIFT))); | ||
1266 | #endif | ||
1267 | WREG32(RS400_DISP2_REQ_CNTL1, 0x105DC1CC); | ||
1268 | WREG32(RS400_DISP2_REQ_CNTL2, 0x2749D000); | ||
1269 | WREG32(RS400_DMIF_MEM_CNTL1, 0x29CA71DC); | ||
1270 | WREG32(RS400_DISP1_REQ_CNTL1, 0x28FBC3AC); | ||
1271 | } | ||
1272 | |||
1273 | DRM_DEBUG("GRPH2_BUFFER_CNTL from to %x\n", | ||
1274 | (unsigned int)RREG32(RADEON_GRPH2_BUFFER_CNTL)); | ||
1275 | } | ||
1276 | } | ||
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c index 2c2f42de1d4c..34d0f58eb944 100644 --- a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c +++ b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c | |||
@@ -30,170 +30,6 @@ | |||
30 | #include "atom.h" | 30 | #include "atom.h" |
31 | 31 | ||
32 | 32 | ||
33 | static void radeon_legacy_rmx_mode_set(struct drm_encoder *encoder, | ||
34 | struct drm_display_mode *mode, | ||
35 | struct drm_display_mode *adjusted_mode) | ||
36 | { | ||
37 | struct drm_device *dev = encoder->dev; | ||
38 | struct radeon_device *rdev = dev->dev_private; | ||
39 | struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); | ||
40 | int xres = mode->hdisplay; | ||
41 | int yres = mode->vdisplay; | ||
42 | bool hscale = true, vscale = true; | ||
43 | int hsync_wid; | ||
44 | int vsync_wid; | ||
45 | int hsync_start; | ||
46 | uint32_t scale, inc; | ||
47 | uint32_t fp_horz_stretch, fp_vert_stretch, crtc_more_cntl, fp_horz_vert_active; | ||
48 | uint32_t fp_h_sync_strt_wid, fp_v_sync_strt_wid, fp_crtc_h_total_disp, fp_crtc_v_total_disp; | ||
49 | struct radeon_native_mode *native_mode = &radeon_encoder->native_mode; | ||
50 | |||
51 | DRM_DEBUG("\n"); | ||
52 | |||
53 | fp_vert_stretch = RREG32(RADEON_FP_VERT_STRETCH) & | ||
54 | (RADEON_VERT_STRETCH_RESERVED | | ||
55 | RADEON_VERT_AUTO_RATIO_INC); | ||
56 | fp_horz_stretch = RREG32(RADEON_FP_HORZ_STRETCH) & | ||
57 | (RADEON_HORZ_FP_LOOP_STRETCH | | ||
58 | RADEON_HORZ_AUTO_RATIO_INC); | ||
59 | |||
60 | crtc_more_cntl = 0; | ||
61 | if ((rdev->family == CHIP_RS100) || | ||
62 | (rdev->family == CHIP_RS200)) { | ||
63 | /* This is to workaround the asic bug for RMX, some versions | ||
64 | of BIOS dosen't have this register initialized correctly. */ | ||
65 | crtc_more_cntl |= RADEON_CRTC_H_CUTOFF_ACTIVE_EN; | ||
66 | } | ||
67 | |||
68 | |||
69 | fp_crtc_h_total_disp = ((((mode->crtc_htotal / 8) - 1) & 0x3ff) | ||
70 | | ((((mode->crtc_hdisplay / 8) - 1) & 0x1ff) << 16)); | ||
71 | |||
72 | hsync_wid = (mode->crtc_hsync_end - mode->crtc_hsync_start) / 8; | ||
73 | if (!hsync_wid) | ||
74 | hsync_wid = 1; | ||
75 | hsync_start = mode->crtc_hsync_start - 8; | ||
76 | |||
77 | fp_h_sync_strt_wid = ((hsync_start & 0x1fff) | ||
78 | | ((hsync_wid & 0x3f) << 16) | ||
79 | | ((mode->flags & DRM_MODE_FLAG_NHSYNC) | ||
80 | ? RADEON_CRTC_H_SYNC_POL | ||
81 | : 0)); | ||
82 | |||
83 | fp_crtc_v_total_disp = (((mode->crtc_vtotal - 1) & 0xffff) | ||
84 | | ((mode->crtc_vdisplay - 1) << 16)); | ||
85 | |||
86 | vsync_wid = mode->crtc_vsync_end - mode->crtc_vsync_start; | ||
87 | if (!vsync_wid) | ||
88 | vsync_wid = 1; | ||
89 | |||
90 | fp_v_sync_strt_wid = (((mode->crtc_vsync_start - 1) & 0xfff) | ||
91 | | ((vsync_wid & 0x1f) << 16) | ||
92 | | ((mode->flags & DRM_MODE_FLAG_NVSYNC) | ||
93 | ? RADEON_CRTC_V_SYNC_POL | ||
94 | : 0)); | ||
95 | |||
96 | fp_horz_vert_active = 0; | ||
97 | |||
98 | if (native_mode->panel_xres == 0 || | ||
99 | native_mode->panel_yres == 0) { | ||
100 | hscale = false; | ||
101 | vscale = false; | ||
102 | } else { | ||
103 | if (xres > native_mode->panel_xres) | ||
104 | xres = native_mode->panel_xres; | ||
105 | if (yres > native_mode->panel_yres) | ||
106 | yres = native_mode->panel_yres; | ||
107 | |||
108 | if (xres == native_mode->panel_xres) | ||
109 | hscale = false; | ||
110 | if (yres == native_mode->panel_yres) | ||
111 | vscale = false; | ||
112 | } | ||
113 | |||
114 | if (radeon_encoder->flags & RADEON_USE_RMX) { | ||
115 | if (radeon_encoder->rmx_type != RMX_CENTER) { | ||
116 | if (!hscale) | ||
117 | fp_horz_stretch |= ((xres/8-1) << 16); | ||
118 | else { | ||
119 | inc = (fp_horz_stretch & RADEON_HORZ_AUTO_RATIO_INC) ? 1 : 0; | ||
120 | scale = ((xres + inc) * RADEON_HORZ_STRETCH_RATIO_MAX) | ||
121 | / native_mode->panel_xres + 1; | ||
122 | fp_horz_stretch |= (((scale) & RADEON_HORZ_STRETCH_RATIO_MASK) | | ||
123 | RADEON_HORZ_STRETCH_BLEND | | ||
124 | RADEON_HORZ_STRETCH_ENABLE | | ||
125 | ((native_mode->panel_xres/8-1) << 16)); | ||
126 | } | ||
127 | |||
128 | if (!vscale) | ||
129 | fp_vert_stretch |= ((yres-1) << 12); | ||
130 | else { | ||
131 | inc = (fp_vert_stretch & RADEON_VERT_AUTO_RATIO_INC) ? 1 : 0; | ||
132 | scale = ((yres + inc) * RADEON_VERT_STRETCH_RATIO_MAX) | ||
133 | / native_mode->panel_yres + 1; | ||
134 | fp_vert_stretch |= (((scale) & RADEON_VERT_STRETCH_RATIO_MASK) | | ||
135 | RADEON_VERT_STRETCH_ENABLE | | ||
136 | RADEON_VERT_STRETCH_BLEND | | ||
137 | ((native_mode->panel_yres-1) << 12)); | ||
138 | } | ||
139 | } else if (radeon_encoder->rmx_type == RMX_CENTER) { | ||
140 | int blank_width; | ||
141 | |||
142 | fp_horz_stretch |= ((xres/8-1) << 16); | ||
143 | fp_vert_stretch |= ((yres-1) << 12); | ||
144 | |||
145 | crtc_more_cntl |= (RADEON_CRTC_AUTO_HORZ_CENTER_EN | | ||
146 | RADEON_CRTC_AUTO_VERT_CENTER_EN); | ||
147 | |||
148 | blank_width = (mode->crtc_hblank_end - mode->crtc_hblank_start) / 8; | ||
149 | if (blank_width > 110) | ||
150 | blank_width = 110; | ||
151 | |||
152 | fp_crtc_h_total_disp = (((blank_width) & 0x3ff) | ||
153 | | ((((mode->crtc_hdisplay / 8) - 1) & 0x1ff) << 16)); | ||
154 | |||
155 | hsync_wid = (mode->crtc_hsync_end - mode->crtc_hsync_start) / 8; | ||
156 | if (!hsync_wid) | ||
157 | hsync_wid = 1; | ||
158 | |||
159 | fp_h_sync_strt_wid = ((((mode->crtc_hsync_start - mode->crtc_hblank_start) / 8) & 0x1fff) | ||
160 | | ((hsync_wid & 0x3f) << 16) | ||
161 | | ((mode->flags & DRM_MODE_FLAG_NHSYNC) | ||
162 | ? RADEON_CRTC_H_SYNC_POL | ||
163 | : 0)); | ||
164 | |||
165 | fp_crtc_v_total_disp = (((mode->crtc_vblank_end - mode->crtc_vblank_start) & 0xffff) | ||
166 | | ((mode->crtc_vdisplay - 1) << 16)); | ||
167 | |||
168 | vsync_wid = mode->crtc_vsync_end - mode->crtc_vsync_start; | ||
169 | if (!vsync_wid) | ||
170 | vsync_wid = 1; | ||
171 | |||
172 | fp_v_sync_strt_wid = ((((mode->crtc_vsync_start - mode->crtc_vblank_start) & 0xfff) | ||
173 | | ((vsync_wid & 0x1f) << 16) | ||
174 | | ((mode->flags & DRM_MODE_FLAG_NVSYNC) | ||
175 | ? RADEON_CRTC_V_SYNC_POL | ||
176 | : 0))); | ||
177 | |||
178 | fp_horz_vert_active = (((native_mode->panel_yres) & 0xfff) | | ||
179 | (((native_mode->panel_xres / 8) & 0x1ff) << 16)); | ||
180 | } | ||
181 | } else { | ||
182 | fp_horz_stretch |= ((xres/8-1) << 16); | ||
183 | fp_vert_stretch |= ((yres-1) << 12); | ||
184 | } | ||
185 | |||
186 | WREG32(RADEON_FP_HORZ_STRETCH, fp_horz_stretch); | ||
187 | WREG32(RADEON_FP_VERT_STRETCH, fp_vert_stretch); | ||
188 | WREG32(RADEON_CRTC_MORE_CNTL, crtc_more_cntl); | ||
189 | WREG32(RADEON_FP_HORZ_VERT_ACTIVE, fp_horz_vert_active); | ||
190 | WREG32(RADEON_FP_H_SYNC_STRT_WID, fp_h_sync_strt_wid); | ||
191 | WREG32(RADEON_FP_V_SYNC_STRT_WID, fp_v_sync_strt_wid); | ||
192 | WREG32(RADEON_FP_CRTC_H_TOTAL_DISP, fp_crtc_h_total_disp); | ||
193 | WREG32(RADEON_FP_CRTC_V_TOTAL_DISP, fp_crtc_v_total_disp); | ||
194 | |||
195 | } | ||
196 | |||
197 | static void radeon_legacy_lvds_dpms(struct drm_encoder *encoder, int mode) | 33 | static void radeon_legacy_lvds_dpms(struct drm_encoder *encoder, int mode) |
198 | { | 34 | { |
199 | struct drm_device *dev = encoder->dev; | 35 | struct drm_device *dev = encoder->dev; |
@@ -287,9 +123,6 @@ static void radeon_legacy_lvds_mode_set(struct drm_encoder *encoder, | |||
287 | 123 | ||
288 | DRM_DEBUG("\n"); | 124 | DRM_DEBUG("\n"); |
289 | 125 | ||
290 | if (radeon_crtc->crtc_id == 0) | ||
291 | radeon_legacy_rmx_mode_set(encoder, mode, adjusted_mode); | ||
292 | |||
293 | lvds_pll_cntl = RREG32(RADEON_LVDS_PLL_CNTL); | 126 | lvds_pll_cntl = RREG32(RADEON_LVDS_PLL_CNTL); |
294 | lvds_pll_cntl &= ~RADEON_LVDS_PLL_EN; | 127 | lvds_pll_cntl &= ~RADEON_LVDS_PLL_EN; |
295 | 128 | ||
@@ -318,7 +151,7 @@ static void radeon_legacy_lvds_mode_set(struct drm_encoder *encoder, | |||
318 | 151 | ||
319 | if (radeon_crtc->crtc_id == 0) { | 152 | if (radeon_crtc->crtc_id == 0) { |
320 | if (ASIC_IS_R300(rdev)) { | 153 | if (ASIC_IS_R300(rdev)) { |
321 | if (radeon_encoder->flags & RADEON_USE_RMX) | 154 | if (radeon_encoder->rmx_type != RMX_OFF) |
322 | lvds_pll_cntl |= R300_LVDS_SRC_SEL_RMX; | 155 | lvds_pll_cntl |= R300_LVDS_SRC_SEL_RMX; |
323 | } else | 156 | } else |
324 | lvds_gen_cntl &= ~RADEON_LVDS_SEL_CRTC2; | 157 | lvds_gen_cntl &= ~RADEON_LVDS_SEL_CRTC2; |
@@ -350,8 +183,6 @@ static bool radeon_legacy_lvds_mode_fixup(struct drm_encoder *encoder, | |||
350 | 183 | ||
351 | drm_mode_set_crtcinfo(adjusted_mode, 0); | 184 | drm_mode_set_crtcinfo(adjusted_mode, 0); |
352 | 185 | ||
353 | radeon_encoder->flags &= ~RADEON_USE_RMX; | ||
354 | |||
355 | if (radeon_encoder->rmx_type != RMX_OFF) | 186 | if (radeon_encoder->rmx_type != RMX_OFF) |
356 | radeon_rmx_mode_fixup(encoder, mode, adjusted_mode); | 187 | radeon_rmx_mode_fixup(encoder, mode, adjusted_mode); |
357 | 188 | ||
@@ -455,9 +286,6 @@ static void radeon_legacy_primary_dac_mode_set(struct drm_encoder *encoder, | |||
455 | 286 | ||
456 | DRM_DEBUG("\n"); | 287 | DRM_DEBUG("\n"); |
457 | 288 | ||
458 | if (radeon_crtc->crtc_id == 0) | ||
459 | radeon_legacy_rmx_mode_set(encoder, mode, adjusted_mode); | ||
460 | |||
461 | if (radeon_crtc->crtc_id == 0) { | 289 | if (radeon_crtc->crtc_id == 0) { |
462 | if (rdev->family == CHIP_R200 || ASIC_IS_R300(rdev)) { | 290 | if (rdev->family == CHIP_R200 || ASIC_IS_R300(rdev)) { |
463 | disp_output_cntl = RREG32(RADEON_DISP_OUTPUT_CNTL) & | 291 | disp_output_cntl = RREG32(RADEON_DISP_OUTPUT_CNTL) & |
@@ -653,9 +481,6 @@ static void radeon_legacy_tmds_int_mode_set(struct drm_encoder *encoder, | |||
653 | 481 | ||
654 | DRM_DEBUG("\n"); | 482 | DRM_DEBUG("\n"); |
655 | 483 | ||
656 | if (radeon_crtc->crtc_id == 0) | ||
657 | radeon_legacy_rmx_mode_set(encoder, mode, adjusted_mode); | ||
658 | |||
659 | tmp = tmds_pll_cntl = RREG32(RADEON_TMDS_PLL_CNTL); | 484 | tmp = tmds_pll_cntl = RREG32(RADEON_TMDS_PLL_CNTL); |
660 | tmp &= 0xfffff; | 485 | tmp &= 0xfffff; |
661 | if (rdev->family == CHIP_RV280) { | 486 | if (rdev->family == CHIP_RV280) { |
@@ -711,7 +536,7 @@ static void radeon_legacy_tmds_int_mode_set(struct drm_encoder *encoder, | |||
711 | if (radeon_crtc->crtc_id == 0) { | 536 | if (radeon_crtc->crtc_id == 0) { |
712 | if (ASIC_IS_R300(rdev) || rdev->family == CHIP_R200) { | 537 | if (ASIC_IS_R300(rdev) || rdev->family == CHIP_R200) { |
713 | fp_gen_cntl &= ~R200_FP_SOURCE_SEL_MASK; | 538 | fp_gen_cntl &= ~R200_FP_SOURCE_SEL_MASK; |
714 | if (radeon_encoder->flags & RADEON_USE_RMX) | 539 | if (radeon_encoder->rmx_type != RMX_OFF) |
715 | fp_gen_cntl |= R200_FP_SOURCE_SEL_RMX; | 540 | fp_gen_cntl |= R200_FP_SOURCE_SEL_RMX; |
716 | else | 541 | else |
717 | fp_gen_cntl |= R200_FP_SOURCE_SEL_CRTC1; | 542 | fp_gen_cntl |= R200_FP_SOURCE_SEL_CRTC1; |
@@ -820,9 +645,6 @@ static void radeon_legacy_tmds_ext_mode_set(struct drm_encoder *encoder, | |||
820 | 645 | ||
821 | DRM_DEBUG("\n"); | 646 | DRM_DEBUG("\n"); |
822 | 647 | ||
823 | if (radeon_crtc->crtc_id == 0) | ||
824 | radeon_legacy_rmx_mode_set(encoder, mode, adjusted_mode); | ||
825 | |||
826 | if (rdev->is_atom_bios) { | 648 | if (rdev->is_atom_bios) { |
827 | radeon_encoder->pixel_clock = adjusted_mode->clock; | 649 | radeon_encoder->pixel_clock = adjusted_mode->clock; |
828 | atombios_external_tmds_setup(encoder, ATOM_ENABLE); | 650 | atombios_external_tmds_setup(encoder, ATOM_ENABLE); |
@@ -856,7 +678,7 @@ static void radeon_legacy_tmds_ext_mode_set(struct drm_encoder *encoder, | |||
856 | if (radeon_crtc->crtc_id == 0) { | 678 | if (radeon_crtc->crtc_id == 0) { |
857 | if ((rdev->family == CHIP_R200) || ASIC_IS_R300(rdev)) { | 679 | if ((rdev->family == CHIP_R200) || ASIC_IS_R300(rdev)) { |
858 | fp2_gen_cntl &= ~R200_FP2_SOURCE_SEL_MASK; | 680 | fp2_gen_cntl &= ~R200_FP2_SOURCE_SEL_MASK; |
859 | if (radeon_encoder->flags & RADEON_USE_RMX) | 681 | if (radeon_encoder->rmx_type != RMX_OFF) |
860 | fp2_gen_cntl |= R200_FP2_SOURCE_SEL_RMX; | 682 | fp2_gen_cntl |= R200_FP2_SOURCE_SEL_RMX; |
861 | else | 683 | else |
862 | fp2_gen_cntl |= R200_FP2_SOURCE_SEL_CRTC1; | 684 | fp2_gen_cntl |= R200_FP2_SOURCE_SEL_CRTC1; |
@@ -1014,9 +836,6 @@ static void radeon_legacy_tv_dac_mode_set(struct drm_encoder *encoder, | |||
1014 | 836 | ||
1015 | DRM_DEBUG("\n"); | 837 | DRM_DEBUG("\n"); |
1016 | 838 | ||
1017 | if (radeon_crtc->crtc_id == 0) | ||
1018 | radeon_legacy_rmx_mode_set(encoder, mode, adjusted_mode); | ||
1019 | |||
1020 | if (rdev->family != CHIP_R200) { | 839 | if (rdev->family != CHIP_R200) { |
1021 | tv_dac_cntl = RREG32(RADEON_TV_DAC_CNTL); | 840 | tv_dac_cntl = RREG32(RADEON_TV_DAC_CNTL); |
1022 | if (rdev->family == CHIP_R420 || | 841 | if (rdev->family == CHIP_R420 || |
@@ -1243,6 +1062,7 @@ radeon_add_legacy_encoder(struct drm_device *dev, uint32_t encoder_id, uint32_t | |||
1243 | 1062 | ||
1244 | radeon_encoder->encoder_id = encoder_id; | 1063 | radeon_encoder->encoder_id = encoder_id; |
1245 | radeon_encoder->devices = supported_device; | 1064 | radeon_encoder->devices = supported_device; |
1065 | radeon_encoder->rmx_type = RMX_OFF; | ||
1246 | 1066 | ||
1247 | switch (radeon_encoder->encoder_id) { | 1067 | switch (radeon_encoder->encoder_id) { |
1248 | case ENCODER_OBJECT_ID_INTERNAL_LVDS: | 1068 | case ENCODER_OBJECT_ID_INTERNAL_LVDS: |
diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h index 9173b687462b..3b09a1f2d8f9 100644 --- a/drivers/gpu/drm/radeon/radeon_mode.h +++ b/drivers/gpu/drm/radeon/radeon_mode.h | |||
@@ -36,6 +36,9 @@ | |||
36 | #include <linux/i2c.h> | 36 | #include <linux/i2c.h> |
37 | #include <linux/i2c-id.h> | 37 | #include <linux/i2c-id.h> |
38 | #include <linux/i2c-algo-bit.h> | 38 | #include <linux/i2c-algo-bit.h> |
39 | #include "radeon_fixed.h" | ||
40 | |||
41 | struct radeon_device; | ||
39 | 42 | ||
40 | #define to_radeon_crtc(x) container_of(x, struct radeon_crtc, base) | 43 | #define to_radeon_crtc(x) container_of(x, struct radeon_crtc, base) |
41 | #define to_radeon_connector(x) container_of(x, struct radeon_connector, base) | 44 | #define to_radeon_connector(x) container_of(x, struct radeon_connector, base) |
@@ -124,6 +127,7 @@ struct radeon_tmds_pll { | |||
124 | #define RADEON_PLL_PREFER_LOW_POST_DIV (1 << 8) | 127 | #define RADEON_PLL_PREFER_LOW_POST_DIV (1 << 8) |
125 | #define RADEON_PLL_PREFER_HIGH_POST_DIV (1 << 9) | 128 | #define RADEON_PLL_PREFER_HIGH_POST_DIV (1 << 9) |
126 | #define RADEON_PLL_USE_FRAC_FB_DIV (1 << 10) | 129 | #define RADEON_PLL_USE_FRAC_FB_DIV (1 << 10) |
130 | #define RADEON_PLL_PREFER_CLOSEST_LOWER (1 << 11) | ||
127 | 131 | ||
128 | struct radeon_pll { | 132 | struct radeon_pll { |
129 | uint16_t reference_freq; | 133 | uint16_t reference_freq; |
@@ -170,6 +174,18 @@ struct radeon_mode_info { | |||
170 | struct atom_context *atom_context; | 174 | struct atom_context *atom_context; |
171 | enum radeon_connector_table connector_table; | 175 | enum radeon_connector_table connector_table; |
172 | bool mode_config_initialized; | 176 | bool mode_config_initialized; |
177 | struct radeon_crtc *crtcs[2]; | ||
178 | }; | ||
179 | |||
180 | struct radeon_native_mode { | ||
181 | /* preferred mode */ | ||
182 | uint32_t panel_xres, panel_yres; | ||
183 | uint32_t hoverplus, hsync_width; | ||
184 | uint32_t hblank; | ||
185 | uint32_t voverplus, vsync_width; | ||
186 | uint32_t vblank; | ||
187 | uint32_t dotclock; | ||
188 | uint32_t flags; | ||
173 | }; | 189 | }; |
174 | 190 | ||
175 | struct radeon_crtc { | 191 | struct radeon_crtc { |
@@ -185,19 +201,13 @@ struct radeon_crtc { | |||
185 | uint64_t cursor_addr; | 201 | uint64_t cursor_addr; |
186 | int cursor_width; | 202 | int cursor_width; |
187 | int cursor_height; | 203 | int cursor_height; |
188 | }; | 204 | uint32_t legacy_display_base_addr; |
189 | 205 | uint32_t legacy_cursor_offset; | |
190 | #define RADEON_USE_RMX 1 | 206 | enum radeon_rmx_type rmx_type; |
191 | 207 | uint32_t devices; | |
192 | struct radeon_native_mode { | 208 | fixed20_12 vsc; |
193 | /* preferred mode */ | 209 | fixed20_12 hsc; |
194 | uint32_t panel_xres, panel_yres; | 210 | struct radeon_native_mode native_mode; |
195 | uint32_t hoverplus, hsync_width; | ||
196 | uint32_t hblank; | ||
197 | uint32_t voverplus, vsync_width; | ||
198 | uint32_t vblank; | ||
199 | uint32_t dotclock; | ||
200 | uint32_t flags; | ||
201 | }; | 211 | }; |
202 | 212 | ||
203 | struct radeon_encoder_primary_dac { | 213 | struct radeon_encoder_primary_dac { |
@@ -383,16 +393,9 @@ void radeon_enc_destroy(struct drm_encoder *encoder); | |||
383 | void radeon_copy_fb(struct drm_device *dev, struct drm_gem_object *dst_obj); | 393 | void radeon_copy_fb(struct drm_device *dev, struct drm_gem_object *dst_obj); |
384 | void radeon_combios_asic_init(struct drm_device *dev); | 394 | void radeon_combios_asic_init(struct drm_device *dev); |
385 | extern int radeon_static_clocks_init(struct drm_device *dev); | 395 | extern int radeon_static_clocks_init(struct drm_device *dev); |
386 | void radeon_init_disp_bw_legacy(struct drm_device *dev, | 396 | bool radeon_crtc_scaling_mode_fixup(struct drm_crtc *crtc, |
387 | struct drm_display_mode *mode1, | 397 | struct drm_display_mode *mode, |
388 | uint32_t pixel_bytes1, | 398 | struct drm_display_mode *adjusted_mode); |
389 | struct drm_display_mode *mode2, | 399 | void atom_rv515_force_tv_scaler(struct radeon_device *rdev); |
390 | uint32_t pixel_bytes2); | ||
391 | void radeon_init_disp_bw_avivo(struct drm_device *dev, | ||
392 | struct drm_display_mode *mode1, | ||
393 | uint32_t pixel_bytes1, | ||
394 | struct drm_display_mode *mode2, | ||
395 | uint32_t pixel_bytes2); | ||
396 | void radeon_init_disp_bandwidth(struct drm_device *dev); | ||
397 | 400 | ||
398 | #endif | 401 | #endif |
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c index 983e8df5e000..e98cae3bf4a6 100644 --- a/drivers/gpu/drm/radeon/radeon_object.c +++ b/drivers/gpu/drm/radeon/radeon_object.c | |||
@@ -44,6 +44,9 @@ struct radeon_object { | |||
44 | uint64_t gpu_addr; | 44 | uint64_t gpu_addr; |
45 | void *kptr; | 45 | void *kptr; |
46 | bool is_iomem; | 46 | bool is_iomem; |
47 | uint32_t tiling_flags; | ||
48 | uint32_t pitch; | ||
49 | int surface_reg; | ||
47 | }; | 50 | }; |
48 | 51 | ||
49 | int radeon_ttm_init(struct radeon_device *rdev); | 52 | int radeon_ttm_init(struct radeon_device *rdev); |
@@ -70,6 +73,7 @@ static void radeon_ttm_object_object_destroy(struct ttm_buffer_object *tobj) | |||
70 | 73 | ||
71 | robj = container_of(tobj, struct radeon_object, tobj); | 74 | robj = container_of(tobj, struct radeon_object, tobj); |
72 | list_del_init(&robj->list); | 75 | list_del_init(&robj->list); |
76 | radeon_object_clear_surface_reg(robj); | ||
73 | kfree(robj); | 77 | kfree(robj); |
74 | } | 78 | } |
75 | 79 | ||
@@ -99,16 +103,16 @@ static inline uint32_t radeon_object_flags_from_domain(uint32_t domain) | |||
99 | { | 103 | { |
100 | uint32_t flags = 0; | 104 | uint32_t flags = 0; |
101 | if (domain & RADEON_GEM_DOMAIN_VRAM) { | 105 | if (domain & RADEON_GEM_DOMAIN_VRAM) { |
102 | flags |= TTM_PL_FLAG_VRAM; | 106 | flags |= TTM_PL_FLAG_VRAM | TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED; |
103 | } | 107 | } |
104 | if (domain & RADEON_GEM_DOMAIN_GTT) { | 108 | if (domain & RADEON_GEM_DOMAIN_GTT) { |
105 | flags |= TTM_PL_FLAG_TT; | 109 | flags |= TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING; |
106 | } | 110 | } |
107 | if (domain & RADEON_GEM_DOMAIN_CPU) { | 111 | if (domain & RADEON_GEM_DOMAIN_CPU) { |
108 | flags |= TTM_PL_FLAG_SYSTEM; | 112 | flags |= TTM_PL_FLAG_SYSTEM | TTM_PL_MASK_CACHING; |
109 | } | 113 | } |
110 | if (!flags) { | 114 | if (!flags) { |
111 | flags |= TTM_PL_FLAG_SYSTEM; | 115 | flags |= TTM_PL_FLAG_SYSTEM | TTM_PL_MASK_CACHING; |
112 | } | 116 | } |
113 | return flags; | 117 | return flags; |
114 | } | 118 | } |
@@ -141,6 +145,7 @@ int radeon_object_create(struct radeon_device *rdev, | |||
141 | } | 145 | } |
142 | robj->rdev = rdev; | 146 | robj->rdev = rdev; |
143 | robj->gobj = gobj; | 147 | robj->gobj = gobj; |
148 | robj->surface_reg = -1; | ||
144 | INIT_LIST_HEAD(&robj->list); | 149 | INIT_LIST_HEAD(&robj->list); |
145 | 150 | ||
146 | flags = radeon_object_flags_from_domain(domain); | 151 | flags = radeon_object_flags_from_domain(domain); |
@@ -223,7 +228,6 @@ int radeon_object_pin(struct radeon_object *robj, uint32_t domain, | |||
223 | { | 228 | { |
224 | uint32_t flags; | 229 | uint32_t flags; |
225 | uint32_t tmp; | 230 | uint32_t tmp; |
226 | void *fbptr; | ||
227 | int r; | 231 | int r; |
228 | 232 | ||
229 | flags = radeon_object_flags_from_domain(domain); | 233 | flags = radeon_object_flags_from_domain(domain); |
@@ -242,10 +246,6 @@ int radeon_object_pin(struct radeon_object *robj, uint32_t domain, | |||
242 | DRM_ERROR("radeon: failed to reserve object for pinning it.\n"); | 246 | DRM_ERROR("radeon: failed to reserve object for pinning it.\n"); |
243 | return r; | 247 | return r; |
244 | } | 248 | } |
245 | if (robj->rdev->fbdev_robj == robj) { | ||
246 | mutex_lock(&robj->rdev->fbdev_info->lock); | ||
247 | radeon_object_kunmap(robj); | ||
248 | } | ||
249 | tmp = robj->tobj.mem.placement; | 249 | tmp = robj->tobj.mem.placement; |
250 | ttm_flag_masked(&tmp, flags, TTM_PL_MASK_MEM); | 250 | ttm_flag_masked(&tmp, flags, TTM_PL_MASK_MEM); |
251 | robj->tobj.proposed_placement = tmp | TTM_PL_FLAG_NO_EVICT | TTM_PL_MASK_CACHING; | 251 | robj->tobj.proposed_placement = tmp | TTM_PL_FLAG_NO_EVICT | TTM_PL_MASK_CACHING; |
@@ -261,23 +261,12 @@ int radeon_object_pin(struct radeon_object *robj, uint32_t domain, | |||
261 | DRM_ERROR("radeon: failed to pin object.\n"); | 261 | DRM_ERROR("radeon: failed to pin object.\n"); |
262 | } | 262 | } |
263 | radeon_object_unreserve(robj); | 263 | radeon_object_unreserve(robj); |
264 | if (robj->rdev->fbdev_robj == robj) { | ||
265 | if (!r) { | ||
266 | r = radeon_object_kmap(robj, &fbptr); | ||
267 | } | ||
268 | if (!r) { | ||
269 | robj->rdev->fbdev_info->screen_base = fbptr; | ||
270 | robj->rdev->fbdev_info->fix.smem_start = (unsigned long)fbptr; | ||
271 | } | ||
272 | mutex_unlock(&robj->rdev->fbdev_info->lock); | ||
273 | } | ||
274 | return r; | 264 | return r; |
275 | } | 265 | } |
276 | 266 | ||
277 | void radeon_object_unpin(struct radeon_object *robj) | 267 | void radeon_object_unpin(struct radeon_object *robj) |
278 | { | 268 | { |
279 | uint32_t flags; | 269 | uint32_t flags; |
280 | void *fbptr; | ||
281 | int r; | 270 | int r; |
282 | 271 | ||
283 | spin_lock(&robj->tobj.lock); | 272 | spin_lock(&robj->tobj.lock); |
@@ -297,10 +286,6 @@ void radeon_object_unpin(struct radeon_object *robj) | |||
297 | DRM_ERROR("radeon: failed to reserve object for unpinning it.\n"); | 286 | DRM_ERROR("radeon: failed to reserve object for unpinning it.\n"); |
298 | return; | 287 | return; |
299 | } | 288 | } |
300 | if (robj->rdev->fbdev_robj == robj) { | ||
301 | mutex_lock(&robj->rdev->fbdev_info->lock); | ||
302 | radeon_object_kunmap(robj); | ||
303 | } | ||
304 | flags = robj->tobj.mem.placement; | 289 | flags = robj->tobj.mem.placement; |
305 | robj->tobj.proposed_placement = flags & ~TTM_PL_FLAG_NO_EVICT; | 290 | robj->tobj.proposed_placement = flags & ~TTM_PL_FLAG_NO_EVICT; |
306 | r = ttm_buffer_object_validate(&robj->tobj, | 291 | r = ttm_buffer_object_validate(&robj->tobj, |
@@ -310,16 +295,6 @@ void radeon_object_unpin(struct radeon_object *robj) | |||
310 | DRM_ERROR("radeon: failed to unpin buffer.\n"); | 295 | DRM_ERROR("radeon: failed to unpin buffer.\n"); |
311 | } | 296 | } |
312 | radeon_object_unreserve(robj); | 297 | radeon_object_unreserve(robj); |
313 | if (robj->rdev->fbdev_robj == robj) { | ||
314 | if (!r) { | ||
315 | r = radeon_object_kmap(robj, &fbptr); | ||
316 | } | ||
317 | if (!r) { | ||
318 | robj->rdev->fbdev_info->screen_base = fbptr; | ||
319 | robj->rdev->fbdev_info->fix.smem_start = (unsigned long)fbptr; | ||
320 | } | ||
321 | mutex_unlock(&robj->rdev->fbdev_info->lock); | ||
322 | } | ||
323 | } | 298 | } |
324 | 299 | ||
325 | int radeon_object_wait(struct radeon_object *robj) | 300 | int radeon_object_wait(struct radeon_object *robj) |
@@ -334,7 +309,7 @@ int radeon_object_wait(struct radeon_object *robj) | |||
334 | } | 309 | } |
335 | spin_lock(&robj->tobj.lock); | 310 | spin_lock(&robj->tobj.lock); |
336 | if (robj->tobj.sync_obj) { | 311 | if (robj->tobj.sync_obj) { |
337 | r = ttm_bo_wait(&robj->tobj, true, false, false); | 312 | r = ttm_bo_wait(&robj->tobj, true, true, false); |
338 | } | 313 | } |
339 | spin_unlock(&robj->tobj.lock); | 314 | spin_unlock(&robj->tobj.lock); |
340 | radeon_object_unreserve(robj); | 315 | radeon_object_unreserve(robj); |
@@ -433,7 +408,6 @@ int radeon_object_list_validate(struct list_head *head, void *fence) | |||
433 | struct radeon_object *robj; | 408 | struct radeon_object *robj; |
434 | struct radeon_fence *old_fence = NULL; | 409 | struct radeon_fence *old_fence = NULL; |
435 | struct list_head *i; | 410 | struct list_head *i; |
436 | uint32_t flags; | ||
437 | int r; | 411 | int r; |
438 | 412 | ||
439 | r = radeon_object_list_reserve(head); | 413 | r = radeon_object_list_reserve(head); |
@@ -444,27 +418,25 @@ int radeon_object_list_validate(struct list_head *head, void *fence) | |||
444 | list_for_each(i, head) { | 418 | list_for_each(i, head) { |
445 | lobj = list_entry(i, struct radeon_object_list, list); | 419 | lobj = list_entry(i, struct radeon_object_list, list); |
446 | robj = lobj->robj; | 420 | robj = lobj->robj; |
447 | if (lobj->wdomain) { | ||
448 | flags = radeon_object_flags_from_domain(lobj->wdomain); | ||
449 | flags |= TTM_PL_FLAG_TT; | ||
450 | } else { | ||
451 | flags = radeon_object_flags_from_domain(lobj->rdomain); | ||
452 | flags |= TTM_PL_FLAG_TT; | ||
453 | flags |= TTM_PL_FLAG_VRAM; | ||
454 | } | ||
455 | if (!robj->pin_count) { | 421 | if (!robj->pin_count) { |
456 | robj->tobj.proposed_placement = flags | TTM_PL_MASK_CACHING; | 422 | if (lobj->wdomain) { |
423 | robj->tobj.proposed_placement = | ||
424 | radeon_object_flags_from_domain(lobj->wdomain); | ||
425 | } else { | ||
426 | robj->tobj.proposed_placement = | ||
427 | radeon_object_flags_from_domain(lobj->rdomain); | ||
428 | } | ||
457 | r = ttm_buffer_object_validate(&robj->tobj, | 429 | r = ttm_buffer_object_validate(&robj->tobj, |
458 | robj->tobj.proposed_placement, | 430 | robj->tobj.proposed_placement, |
459 | true, false); | 431 | true, false); |
460 | if (unlikely(r)) { | 432 | if (unlikely(r)) { |
461 | radeon_object_list_unreserve(head); | ||
462 | DRM_ERROR("radeon: failed to validate.\n"); | 433 | DRM_ERROR("radeon: failed to validate.\n"); |
463 | return r; | 434 | return r; |
464 | } | 435 | } |
465 | radeon_object_gpu_addr(robj); | 436 | radeon_object_gpu_addr(robj); |
466 | } | 437 | } |
467 | lobj->gpu_offset = robj->gpu_addr; | 438 | lobj->gpu_offset = robj->gpu_addr; |
439 | lobj->tiling_flags = robj->tiling_flags; | ||
468 | if (fence) { | 440 | if (fence) { |
469 | old_fence = (struct radeon_fence *)robj->tobj.sync_obj; | 441 | old_fence = (struct radeon_fence *)robj->tobj.sync_obj; |
470 | robj->tobj.sync_obj = radeon_fence_ref(fence); | 442 | robj->tobj.sync_obj = radeon_fence_ref(fence); |
@@ -509,3 +481,127 @@ unsigned long radeon_object_size(struct radeon_object *robj) | |||
509 | { | 481 | { |
510 | return robj->tobj.num_pages << PAGE_SHIFT; | 482 | return robj->tobj.num_pages << PAGE_SHIFT; |
511 | } | 483 | } |
484 | |||
485 | int radeon_object_get_surface_reg(struct radeon_object *robj) | ||
486 | { | ||
487 | struct radeon_device *rdev = robj->rdev; | ||
488 | struct radeon_surface_reg *reg; | ||
489 | struct radeon_object *old_object; | ||
490 | int steal; | ||
491 | int i; | ||
492 | |||
493 | if (!robj->tiling_flags) | ||
494 | return 0; | ||
495 | |||
496 | if (robj->surface_reg >= 0) { | ||
497 | reg = &rdev->surface_regs[robj->surface_reg]; | ||
498 | i = robj->surface_reg; | ||
499 | goto out; | ||
500 | } | ||
501 | |||
502 | steal = -1; | ||
503 | for (i = 0; i < RADEON_GEM_MAX_SURFACES; i++) { | ||
504 | |||
505 | reg = &rdev->surface_regs[i]; | ||
506 | if (!reg->robj) | ||
507 | break; | ||
508 | |||
509 | old_object = reg->robj; | ||
510 | if (old_object->pin_count == 0) | ||
511 | steal = i; | ||
512 | } | ||
513 | |||
514 | /* if we are all out */ | ||
515 | if (i == RADEON_GEM_MAX_SURFACES) { | ||
516 | if (steal == -1) | ||
517 | return -ENOMEM; | ||
518 | /* find someone with a surface reg and nuke their BO */ | ||
519 | reg = &rdev->surface_regs[steal]; | ||
520 | old_object = reg->robj; | ||
521 | /* blow away the mapping */ | ||
522 | DRM_DEBUG("stealing surface reg %d from %p\n", steal, old_object); | ||
523 | ttm_bo_unmap_virtual(&old_object->tobj); | ||
524 | old_object->surface_reg = -1; | ||
525 | i = steal; | ||
526 | } | ||
527 | |||
528 | robj->surface_reg = i; | ||
529 | reg->robj = robj; | ||
530 | |||
531 | out: | ||
532 | radeon_set_surface_reg(rdev, i, robj->tiling_flags, robj->pitch, | ||
533 | robj->tobj.mem.mm_node->start << PAGE_SHIFT, | ||
534 | robj->tobj.num_pages << PAGE_SHIFT); | ||
535 | return 0; | ||
536 | } | ||
537 | |||
538 | void radeon_object_clear_surface_reg(struct radeon_object *robj) | ||
539 | { | ||
540 | struct radeon_device *rdev = robj->rdev; | ||
541 | struct radeon_surface_reg *reg; | ||
542 | |||
543 | if (robj->surface_reg == -1) | ||
544 | return; | ||
545 | |||
546 | reg = &rdev->surface_regs[robj->surface_reg]; | ||
547 | radeon_clear_surface_reg(rdev, robj->surface_reg); | ||
548 | |||
549 | reg->robj = NULL; | ||
550 | robj->surface_reg = -1; | ||
551 | } | ||
552 | |||
553 | void radeon_object_set_tiling_flags(struct radeon_object *robj, | ||
554 | uint32_t tiling_flags, uint32_t pitch) | ||
555 | { | ||
556 | robj->tiling_flags = tiling_flags; | ||
557 | robj->pitch = pitch; | ||
558 | } | ||
559 | |||
560 | void radeon_object_get_tiling_flags(struct radeon_object *robj, | ||
561 | uint32_t *tiling_flags, | ||
562 | uint32_t *pitch) | ||
563 | { | ||
564 | if (tiling_flags) | ||
565 | *tiling_flags = robj->tiling_flags; | ||
566 | if (pitch) | ||
567 | *pitch = robj->pitch; | ||
568 | } | ||
569 | |||
570 | int radeon_object_check_tiling(struct radeon_object *robj, bool has_moved, | ||
571 | bool force_drop) | ||
572 | { | ||
573 | if (!(robj->tiling_flags & RADEON_TILING_SURFACE)) | ||
574 | return 0; | ||
575 | |||
576 | if (force_drop) { | ||
577 | radeon_object_clear_surface_reg(robj); | ||
578 | return 0; | ||
579 | } | ||
580 | |||
581 | if (robj->tobj.mem.mem_type != TTM_PL_VRAM) { | ||
582 | if (!has_moved) | ||
583 | return 0; | ||
584 | |||
585 | if (robj->surface_reg >= 0) | ||
586 | radeon_object_clear_surface_reg(robj); | ||
587 | return 0; | ||
588 | } | ||
589 | |||
590 | if ((robj->surface_reg >= 0) && !has_moved) | ||
591 | return 0; | ||
592 | |||
593 | return radeon_object_get_surface_reg(robj); | ||
594 | } | ||
595 | |||
596 | void radeon_bo_move_notify(struct ttm_buffer_object *bo, | ||
597 | struct ttm_mem_reg *mem) | ||
598 | { | ||
599 | struct radeon_object *robj = container_of(bo, struct radeon_object, tobj); | ||
600 | radeon_object_check_tiling(robj, 0, 1); | ||
601 | } | ||
602 | |||
603 | void radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo) | ||
604 | { | ||
605 | struct radeon_object *robj = container_of(bo, struct radeon_object, tobj); | ||
606 | radeon_object_check_tiling(robj, 0, 0); | ||
607 | } | ||
diff --git a/drivers/gpu/drm/radeon/radeon_ring.c b/drivers/gpu/drm/radeon/radeon_ring.c index a853261d1881..60d159308b88 100644 --- a/drivers/gpu/drm/radeon/radeon_ring.c +++ b/drivers/gpu/drm/radeon/radeon_ring.c | |||
@@ -126,32 +126,19 @@ static void radeon_ib_align(struct radeon_device *rdev, struct radeon_ib *ib) | |||
126 | } | 126 | } |
127 | } | 127 | } |
128 | 128 | ||
129 | static void radeon_ib_cpu_flush(struct radeon_device *rdev, | ||
130 | struct radeon_ib *ib) | ||
131 | { | ||
132 | unsigned long tmp; | ||
133 | unsigned i; | ||
134 | |||
135 | /* To force CPU cache flush ugly but seems reliable */ | ||
136 | for (i = 0; i < ib->length_dw; i += (rdev->cp.align_mask + 1)) { | ||
137 | tmp = readl(&ib->ptr[i]); | ||
138 | } | ||
139 | } | ||
140 | |||
141 | int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib) | 129 | int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib) |
142 | { | 130 | { |
143 | int r = 0; | 131 | int r = 0; |
144 | 132 | ||
145 | mutex_lock(&rdev->ib_pool.mutex); | 133 | mutex_lock(&rdev->ib_pool.mutex); |
146 | radeon_ib_align(rdev, ib); | 134 | radeon_ib_align(rdev, ib); |
147 | radeon_ib_cpu_flush(rdev, ib); | ||
148 | if (!ib->length_dw || !rdev->cp.ready) { | 135 | if (!ib->length_dw || !rdev->cp.ready) { |
149 | /* TODO: Nothings in the ib we should report. */ | 136 | /* TODO: Nothings in the ib we should report. */ |
150 | mutex_unlock(&rdev->ib_pool.mutex); | 137 | mutex_unlock(&rdev->ib_pool.mutex); |
151 | DRM_ERROR("radeon: couldn't schedule IB(%lu).\n", ib->idx); | 138 | DRM_ERROR("radeon: couldn't schedule IB(%lu).\n", ib->idx); |
152 | return -EINVAL; | 139 | return -EINVAL; |
153 | } | 140 | } |
154 | /* 64 dwords should be enought for fence too */ | 141 | /* 64 dwords should be enough for fence too */ |
155 | r = radeon_ring_lock(rdev, 64); | 142 | r = radeon_ring_lock(rdev, 64); |
156 | if (r) { | 143 | if (r) { |
157 | DRM_ERROR("radeon: scheduling IB failled (%d).\n", r); | 144 | DRM_ERROR("radeon: scheduling IB failled (%d).\n", r); |
diff --git a/drivers/gpu/drm/radeon/radeon_share.h b/drivers/gpu/drm/radeon/radeon_share.h new file mode 100644 index 000000000000..63a773578f17 --- /dev/null +++ b/drivers/gpu/drm/radeon/radeon_share.h | |||
@@ -0,0 +1,39 @@ | |||
1 | /* | ||
2 | * Copyright 2008 Advanced Micro Devices, Inc. | ||
3 | * Copyright 2008 Red Hat Inc. | ||
4 | * Copyright 2009 Jerome Glisse. | ||
5 | * | ||
6 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
7 | * copy of this software and associated documentation files (the "Software"), | ||
8 | * to deal in the Software without restriction, including without limitation | ||
9 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
10 | * and/or sell copies of the Software, and to permit persons to whom the | ||
11 | * Software is furnished to do so, subject to the following conditions: | ||
12 | * | ||
13 | * The above copyright notice and this permission notice shall be included in | ||
14 | * all copies or substantial portions of the Software. | ||
15 | * | ||
16 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
17 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
18 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
19 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
20 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
21 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
22 | * OTHER DEALINGS IN THE SOFTWARE. | ||
23 | * | ||
24 | * Authors: Dave Airlie | ||
25 | * Alex Deucher | ||
26 | * Jerome Glisse | ||
27 | */ | ||
28 | #ifndef __RADEON_SHARE_H__ | ||
29 | #define __RADEON_SHARE_H__ | ||
30 | |||
31 | void r100_vram_init_sizes(struct radeon_device *rdev); | ||
32 | |||
33 | void rs690_line_buffer_adjust(struct radeon_device *rdev, | ||
34 | struct drm_display_mode *mode1, | ||
35 | struct drm_display_mode *mode2); | ||
36 | |||
37 | void rv515_bandwidth_avivo_update(struct radeon_device *rdev); | ||
38 | |||
39 | #endif | ||
diff --git a/drivers/gpu/drm/radeon/radeon_test.c b/drivers/gpu/drm/radeon/radeon_test.c new file mode 100644 index 000000000000..03c33cf4e14c --- /dev/null +++ b/drivers/gpu/drm/radeon/radeon_test.c | |||
@@ -0,0 +1,209 @@ | |||
1 | /* | ||
2 | * Copyright 2009 VMware, Inc. | ||
3 | * | ||
4 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
5 | * copy of this software and associated documentation files (the "Software"), | ||
6 | * to deal in the Software without restriction, including without limitation | ||
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
8 | * and/or sell copies of the Software, and to permit persons to whom the | ||
9 | * Software is furnished to do so, subject to the following conditions: | ||
10 | * | ||
11 | * The above copyright notice and this permission notice shall be included in | ||
12 | * all copies or substantial portions of the Software. | ||
13 | * | ||
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
20 | * OTHER DEALINGS IN THE SOFTWARE. | ||
21 | * | ||
22 | * Authors: Michel Dänzer | ||
23 | */ | ||
24 | #include <drm/drmP.h> | ||
25 | #include <drm/radeon_drm.h> | ||
26 | #include "radeon_reg.h" | ||
27 | #include "radeon.h" | ||
28 | |||
29 | |||
30 | /* Test BO GTT->VRAM and VRAM->GTT GPU copies across the whole GTT aperture */ | ||
31 | void radeon_test_moves(struct radeon_device *rdev) | ||
32 | { | ||
33 | struct radeon_object *vram_obj = NULL; | ||
34 | struct radeon_object **gtt_obj = NULL; | ||
35 | struct radeon_fence *fence = NULL; | ||
36 | uint64_t gtt_addr, vram_addr; | ||
37 | unsigned i, n, size; | ||
38 | int r; | ||
39 | |||
40 | size = 1024 * 1024; | ||
41 | |||
42 | /* Number of tests = | ||
43 | * (Total GTT - IB pool - writeback page - ring buffer) / test size | ||
44 | */ | ||
45 | n = (rdev->mc.gtt_size - RADEON_IB_POOL_SIZE*64*1024 - 4096 - | ||
46 | rdev->cp.ring_size) / size; | ||
47 | |||
48 | gtt_obj = kzalloc(n * sizeof(*gtt_obj), GFP_KERNEL); | ||
49 | if (!gtt_obj) { | ||
50 | DRM_ERROR("Failed to allocate %d pointers\n", n); | ||
51 | r = 1; | ||
52 | goto out_cleanup; | ||
53 | } | ||
54 | |||
55 | r = radeon_object_create(rdev, NULL, size, true, RADEON_GEM_DOMAIN_VRAM, | ||
56 | false, &vram_obj); | ||
57 | if (r) { | ||
58 | DRM_ERROR("Failed to create VRAM object\n"); | ||
59 | goto out_cleanup; | ||
60 | } | ||
61 | |||
62 | r = radeon_object_pin(vram_obj, RADEON_GEM_DOMAIN_VRAM, &vram_addr); | ||
63 | if (r) { | ||
64 | DRM_ERROR("Failed to pin VRAM object\n"); | ||
65 | goto out_cleanup; | ||
66 | } | ||
67 | |||
68 | for (i = 0; i < n; i++) { | ||
69 | void *gtt_map, *vram_map; | ||
70 | void **gtt_start, **gtt_end; | ||
71 | void **vram_start, **vram_end; | ||
72 | |||
73 | r = radeon_object_create(rdev, NULL, size, true, | ||
74 | RADEON_GEM_DOMAIN_GTT, false, gtt_obj + i); | ||
75 | if (r) { | ||
76 | DRM_ERROR("Failed to create GTT object %d\n", i); | ||
77 | goto out_cleanup; | ||
78 | } | ||
79 | |||
80 | r = radeon_object_pin(gtt_obj[i], RADEON_GEM_DOMAIN_GTT, >t_addr); | ||
81 | if (r) { | ||
82 | DRM_ERROR("Failed to pin GTT object %d\n", i); | ||
83 | goto out_cleanup; | ||
84 | } | ||
85 | |||
86 | r = radeon_object_kmap(gtt_obj[i], >t_map); | ||
87 | if (r) { | ||
88 | DRM_ERROR("Failed to map GTT object %d\n", i); | ||
89 | goto out_cleanup; | ||
90 | } | ||
91 | |||
92 | for (gtt_start = gtt_map, gtt_end = gtt_map + size; | ||
93 | gtt_start < gtt_end; | ||
94 | gtt_start++) | ||
95 | *gtt_start = gtt_start; | ||
96 | |||
97 | radeon_object_kunmap(gtt_obj[i]); | ||
98 | |||
99 | r = radeon_fence_create(rdev, &fence); | ||
100 | if (r) { | ||
101 | DRM_ERROR("Failed to create GTT->VRAM fence %d\n", i); | ||
102 | goto out_cleanup; | ||
103 | } | ||
104 | |||
105 | r = radeon_copy(rdev, gtt_addr, vram_addr, size / 4096, fence); | ||
106 | if (r) { | ||
107 | DRM_ERROR("Failed GTT->VRAM copy %d\n", i); | ||
108 | goto out_cleanup; | ||
109 | } | ||
110 | |||
111 | r = radeon_fence_wait(fence, false); | ||
112 | if (r) { | ||
113 | DRM_ERROR("Failed to wait for GTT->VRAM fence %d\n", i); | ||
114 | goto out_cleanup; | ||
115 | } | ||
116 | |||
117 | radeon_fence_unref(&fence); | ||
118 | |||
119 | r = radeon_object_kmap(vram_obj, &vram_map); | ||
120 | if (r) { | ||
121 | DRM_ERROR("Failed to map VRAM object after copy %d\n", i); | ||
122 | goto out_cleanup; | ||
123 | } | ||
124 | |||
125 | for (gtt_start = gtt_map, gtt_end = gtt_map + size, | ||
126 | vram_start = vram_map, vram_end = vram_map + size; | ||
127 | vram_start < vram_end; | ||
128 | gtt_start++, vram_start++) { | ||
129 | if (*vram_start != gtt_start) { | ||
130 | DRM_ERROR("Incorrect GTT->VRAM copy %d: Got 0x%p, " | ||
131 | "expected 0x%p (GTT map 0x%p-0x%p)\n", | ||
132 | i, *vram_start, gtt_start, gtt_map, | ||
133 | gtt_end); | ||
134 | radeon_object_kunmap(vram_obj); | ||
135 | goto out_cleanup; | ||
136 | } | ||
137 | *vram_start = vram_start; | ||
138 | } | ||
139 | |||
140 | radeon_object_kunmap(vram_obj); | ||
141 | |||
142 | r = radeon_fence_create(rdev, &fence); | ||
143 | if (r) { | ||
144 | DRM_ERROR("Failed to create VRAM->GTT fence %d\n", i); | ||
145 | goto out_cleanup; | ||
146 | } | ||
147 | |||
148 | r = radeon_copy(rdev, vram_addr, gtt_addr, size / 4096, fence); | ||
149 | if (r) { | ||
150 | DRM_ERROR("Failed VRAM->GTT copy %d\n", i); | ||
151 | goto out_cleanup; | ||
152 | } | ||
153 | |||
154 | r = radeon_fence_wait(fence, false); | ||
155 | if (r) { | ||
156 | DRM_ERROR("Failed to wait for VRAM->GTT fence %d\n", i); | ||
157 | goto out_cleanup; | ||
158 | } | ||
159 | |||
160 | radeon_fence_unref(&fence); | ||
161 | |||
162 | r = radeon_object_kmap(gtt_obj[i], >t_map); | ||
163 | if (r) { | ||
164 | DRM_ERROR("Failed to map GTT object after copy %d\n", i); | ||
165 | goto out_cleanup; | ||
166 | } | ||
167 | |||
168 | for (gtt_start = gtt_map, gtt_end = gtt_map + size, | ||
169 | vram_start = vram_map, vram_end = vram_map + size; | ||
170 | gtt_start < gtt_end; | ||
171 | gtt_start++, vram_start++) { | ||
172 | if (*gtt_start != vram_start) { | ||
173 | DRM_ERROR("Incorrect VRAM->GTT copy %d: Got 0x%p, " | ||
174 | "expected 0x%p (VRAM map 0x%p-0x%p)\n", | ||
175 | i, *gtt_start, vram_start, vram_map, | ||
176 | vram_end); | ||
177 | radeon_object_kunmap(gtt_obj[i]); | ||
178 | goto out_cleanup; | ||
179 | } | ||
180 | } | ||
181 | |||
182 | radeon_object_kunmap(gtt_obj[i]); | ||
183 | |||
184 | DRM_INFO("Tested GTT->VRAM and VRAM->GTT copy for GTT offset 0x%llx\n", | ||
185 | gtt_addr - rdev->mc.gtt_location); | ||
186 | } | ||
187 | |||
188 | out_cleanup: | ||
189 | if (vram_obj) { | ||
190 | radeon_object_unpin(vram_obj); | ||
191 | radeon_object_unref(&vram_obj); | ||
192 | } | ||
193 | if (gtt_obj) { | ||
194 | for (i = 0; i < n; i++) { | ||
195 | if (gtt_obj[i]) { | ||
196 | radeon_object_unpin(gtt_obj[i]); | ||
197 | radeon_object_unref(>t_obj[i]); | ||
198 | } | ||
199 | } | ||
200 | kfree(gtt_obj); | ||
201 | } | ||
202 | if (fence) { | ||
203 | radeon_fence_unref(&fence); | ||
204 | } | ||
205 | if (r) { | ||
206 | printk(KERN_WARNING "Error while testing BO move.\n"); | ||
207 | } | ||
208 | } | ||
209 | |||
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c index 1227a97f5169..15c3531377ed 100644 --- a/drivers/gpu/drm/radeon/radeon_ttm.c +++ b/drivers/gpu/drm/radeon/radeon_ttm.c | |||
@@ -355,23 +355,26 @@ static int radeon_bo_move(struct ttm_buffer_object *bo, | |||
355 | if (!rdev->cp.ready) { | 355 | if (!rdev->cp.ready) { |
356 | /* use memcpy */ | 356 | /* use memcpy */ |
357 | DRM_ERROR("CP is not ready use memcpy.\n"); | 357 | DRM_ERROR("CP is not ready use memcpy.\n"); |
358 | return ttm_bo_move_memcpy(bo, evict, no_wait, new_mem); | 358 | goto memcpy; |
359 | } | 359 | } |
360 | 360 | ||
361 | if (old_mem->mem_type == TTM_PL_VRAM && | 361 | if (old_mem->mem_type == TTM_PL_VRAM && |
362 | new_mem->mem_type == TTM_PL_SYSTEM) { | 362 | new_mem->mem_type == TTM_PL_SYSTEM) { |
363 | return radeon_move_vram_ram(bo, evict, interruptible, | 363 | r = radeon_move_vram_ram(bo, evict, interruptible, |
364 | no_wait, new_mem); | 364 | no_wait, new_mem); |
365 | } else if (old_mem->mem_type == TTM_PL_SYSTEM && | 365 | } else if (old_mem->mem_type == TTM_PL_SYSTEM && |
366 | new_mem->mem_type == TTM_PL_VRAM) { | 366 | new_mem->mem_type == TTM_PL_VRAM) { |
367 | return radeon_move_ram_vram(bo, evict, interruptible, | 367 | r = radeon_move_ram_vram(bo, evict, interruptible, |
368 | no_wait, new_mem); | 368 | no_wait, new_mem); |
369 | } else { | 369 | } else { |
370 | r = radeon_move_blit(bo, evict, no_wait, new_mem, old_mem); | 370 | r = radeon_move_blit(bo, evict, no_wait, new_mem, old_mem); |
371 | if (unlikely(r)) { | ||
372 | return r; | ||
373 | } | ||
374 | } | 371 | } |
372 | |||
373 | if (r) { | ||
374 | memcpy: | ||
375 | r = ttm_bo_move_memcpy(bo, evict, no_wait, new_mem); | ||
376 | } | ||
377 | |||
375 | return r; | 378 | return r; |
376 | } | 379 | } |
377 | 380 | ||
@@ -429,6 +432,8 @@ static struct ttm_bo_driver radeon_bo_driver = { | |||
429 | .sync_obj_flush = &radeon_sync_obj_flush, | 432 | .sync_obj_flush = &radeon_sync_obj_flush, |
430 | .sync_obj_unref = &radeon_sync_obj_unref, | 433 | .sync_obj_unref = &radeon_sync_obj_unref, |
431 | .sync_obj_ref = &radeon_sync_obj_ref, | 434 | .sync_obj_ref = &radeon_sync_obj_ref, |
435 | .move_notify = &radeon_bo_move_notify, | ||
436 | .fault_reserve_notify = &radeon_bo_fault_reserve_notify, | ||
432 | }; | 437 | }; |
433 | 438 | ||
434 | int radeon_ttm_init(struct radeon_device *rdev) | 439 | int radeon_ttm_init(struct radeon_device *rdev) |
@@ -442,13 +447,14 @@ int radeon_ttm_init(struct radeon_device *rdev) | |||
442 | /* No others user of address space so set it to 0 */ | 447 | /* No others user of address space so set it to 0 */ |
443 | r = ttm_bo_device_init(&rdev->mman.bdev, | 448 | r = ttm_bo_device_init(&rdev->mman.bdev, |
444 | rdev->mman.mem_global_ref.object, | 449 | rdev->mman.mem_global_ref.object, |
445 | &radeon_bo_driver, DRM_FILE_PAGE_OFFSET); | 450 | &radeon_bo_driver, DRM_FILE_PAGE_OFFSET, |
451 | rdev->need_dma32); | ||
446 | if (r) { | 452 | if (r) { |
447 | DRM_ERROR("failed initializing buffer object driver(%d).\n", r); | 453 | DRM_ERROR("failed initializing buffer object driver(%d).\n", r); |
448 | return r; | 454 | return r; |
449 | } | 455 | } |
450 | r = ttm_bo_init_mm(&rdev->mman.bdev, TTM_PL_VRAM, 0, | 456 | r = ttm_bo_init_mm(&rdev->mman.bdev, TTM_PL_VRAM, 0, |
451 | ((rdev->mc.aper_size) >> PAGE_SHIFT)); | 457 | ((rdev->mc.real_vram_size) >> PAGE_SHIFT)); |
452 | if (r) { | 458 | if (r) { |
453 | DRM_ERROR("Failed initializing VRAM heap.\n"); | 459 | DRM_ERROR("Failed initializing VRAM heap.\n"); |
454 | return r; | 460 | return r; |
@@ -465,7 +471,7 @@ int radeon_ttm_init(struct radeon_device *rdev) | |||
465 | return r; | 471 | return r; |
466 | } | 472 | } |
467 | DRM_INFO("radeon: %uM of VRAM memory ready\n", | 473 | DRM_INFO("radeon: %uM of VRAM memory ready\n", |
468 | rdev->mc.vram_size / (1024 * 1024)); | 474 | rdev->mc.real_vram_size / (1024 * 1024)); |
469 | r = ttm_bo_init_mm(&rdev->mman.bdev, TTM_PL_TT, 0, | 475 | r = ttm_bo_init_mm(&rdev->mman.bdev, TTM_PL_TT, 0, |
470 | ((rdev->mc.gtt_size) >> PAGE_SHIFT)); | 476 | ((rdev->mc.gtt_size) >> PAGE_SHIFT)); |
471 | if (r) { | 477 | if (r) { |
diff --git a/drivers/gpu/drm/radeon/rs400.c b/drivers/gpu/drm/radeon/rs400.c index cc074b5a8f74..b29affd9c5d8 100644 --- a/drivers/gpu/drm/radeon/rs400.c +++ b/drivers/gpu/drm/radeon/rs400.c | |||
@@ -29,6 +29,7 @@ | |||
29 | #include <drm/drmP.h> | 29 | #include <drm/drmP.h> |
30 | #include "radeon_reg.h" | 30 | #include "radeon_reg.h" |
31 | #include "radeon.h" | 31 | #include "radeon.h" |
32 | #include "radeon_share.h" | ||
32 | 33 | ||
33 | /* rs400,rs480 depends on : */ | 34 | /* rs400,rs480 depends on : */ |
34 | void r100_hdp_reset(struct radeon_device *rdev); | 35 | void r100_hdp_reset(struct radeon_device *rdev); |
@@ -164,7 +165,9 @@ int rs400_gart_enable(struct radeon_device *rdev) | |||
164 | WREG32(RADEON_BUS_CNTL, tmp); | 165 | WREG32(RADEON_BUS_CNTL, tmp); |
165 | } | 166 | } |
166 | /* Table should be in 32bits address space so ignore bits above. */ | 167 | /* Table should be in 32bits address space so ignore bits above. */ |
167 | tmp = rdev->gart.table_addr & 0xfffff000; | 168 | tmp = (u32)rdev->gart.table_addr & 0xfffff000; |
169 | tmp |= (upper_32_bits(rdev->gart.table_addr) & 0xff) << 4; | ||
170 | |||
168 | WREG32_MC(RS480_GART_BASE, tmp); | 171 | WREG32_MC(RS480_GART_BASE, tmp); |
169 | /* TODO: more tweaking here */ | 172 | /* TODO: more tweaking here */ |
170 | WREG32_MC(RS480_GART_FEATURE_ID, | 173 | WREG32_MC(RS480_GART_FEATURE_ID, |
@@ -201,10 +204,17 @@ void rs400_gart_disable(struct radeon_device *rdev) | |||
201 | 204 | ||
202 | int rs400_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr) | 205 | int rs400_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr) |
203 | { | 206 | { |
207 | uint32_t entry; | ||
208 | |||
204 | if (i < 0 || i > rdev->gart.num_gpu_pages) { | 209 | if (i < 0 || i > rdev->gart.num_gpu_pages) { |
205 | return -EINVAL; | 210 | return -EINVAL; |
206 | } | 211 | } |
207 | rdev->gart.table.ram.ptr[i] = cpu_to_le32(((uint32_t)addr) | 0xC); | 212 | |
213 | entry = (lower_32_bits(addr) & PAGE_MASK) | | ||
214 | ((upper_32_bits(addr) & 0xff) << 4) | | ||
215 | 0xc; | ||
216 | entry = cpu_to_le32(entry); | ||
217 | rdev->gart.table.ram.ptr[i] = entry; | ||
208 | return 0; | 218 | return 0; |
209 | } | 219 | } |
210 | 220 | ||
@@ -223,10 +233,9 @@ int rs400_mc_init(struct radeon_device *rdev) | |||
223 | 233 | ||
224 | rs400_gpu_init(rdev); | 234 | rs400_gpu_init(rdev); |
225 | rs400_gart_disable(rdev); | 235 | rs400_gart_disable(rdev); |
226 | rdev->mc.gtt_location = rdev->mc.vram_size; | 236 | rdev->mc.gtt_location = rdev->mc.mc_vram_size; |
227 | rdev->mc.gtt_location += (rdev->mc.gtt_size - 1); | 237 | rdev->mc.gtt_location += (rdev->mc.gtt_size - 1); |
228 | rdev->mc.gtt_location &= ~(rdev->mc.gtt_size - 1); | 238 | rdev->mc.gtt_location &= ~(rdev->mc.gtt_size - 1); |
229 | rdev->mc.vram_location = 0xFFFFFFFFUL; | ||
230 | r = radeon_mc_setup(rdev); | 239 | r = radeon_mc_setup(rdev); |
231 | if (r) { | 240 | if (r) { |
232 | return r; | 241 | return r; |
@@ -238,7 +247,7 @@ int rs400_mc_init(struct radeon_device *rdev) | |||
238 | "programming pipes. Bad things might happen.\n"); | 247 | "programming pipes. Bad things might happen.\n"); |
239 | } | 248 | } |
240 | 249 | ||
241 | tmp = rdev->mc.vram_location + rdev->mc.vram_size - 1; | 250 | tmp = rdev->mc.vram_location + rdev->mc.mc_vram_size - 1; |
242 | tmp = REG_SET(RADEON_MC_FB_TOP, tmp >> 16); | 251 | tmp = REG_SET(RADEON_MC_FB_TOP, tmp >> 16); |
243 | tmp |= REG_SET(RADEON_MC_FB_START, rdev->mc.vram_location >> 16); | 252 | tmp |= REG_SET(RADEON_MC_FB_START, rdev->mc.vram_location >> 16); |
244 | WREG32(RADEON_MC_FB_LOCATION, tmp); | 253 | WREG32(RADEON_MC_FB_LOCATION, tmp); |
@@ -284,21 +293,12 @@ void rs400_gpu_init(struct radeon_device *rdev) | |||
284 | */ | 293 | */ |
285 | void rs400_vram_info(struct radeon_device *rdev) | 294 | void rs400_vram_info(struct radeon_device *rdev) |
286 | { | 295 | { |
287 | uint32_t tom; | ||
288 | |||
289 | rs400_gart_adjust_size(rdev); | 296 | rs400_gart_adjust_size(rdev); |
290 | /* DDR for all card after R300 & IGP */ | 297 | /* DDR for all card after R300 & IGP */ |
291 | rdev->mc.vram_is_ddr = true; | 298 | rdev->mc.vram_is_ddr = true; |
292 | rdev->mc.vram_width = 128; | 299 | rdev->mc.vram_width = 128; |
293 | 300 | ||
294 | /* read NB_TOM to get the amount of ram stolen for the GPU */ | 301 | r100_vram_init_sizes(rdev); |
295 | tom = RREG32(RADEON_NB_TOM); | ||
296 | rdev->mc.vram_size = (((tom >> 16) - (tom & 0xffff) + 1) << 16); | ||
297 | WREG32(RADEON_CONFIG_MEMSIZE, rdev->mc.vram_size); | ||
298 | |||
299 | /* Could aper size report 0 ? */ | ||
300 | rdev->mc.aper_base = drm_get_resource_start(rdev->ddev, 0); | ||
301 | rdev->mc.aper_size = drm_get_resource_len(rdev->ddev, 0); | ||
302 | } | 302 | } |
303 | 303 | ||
304 | 304 | ||
diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c index ab0c967553e6..bbea6dee4a94 100644 --- a/drivers/gpu/drm/radeon/rs600.c +++ b/drivers/gpu/drm/radeon/rs600.c | |||
@@ -223,7 +223,7 @@ int rs600_mc_init(struct radeon_device *rdev) | |||
223 | printk(KERN_WARNING "Failed to wait MC idle while " | 223 | printk(KERN_WARNING "Failed to wait MC idle while " |
224 | "programming pipes. Bad things might happen.\n"); | 224 | "programming pipes. Bad things might happen.\n"); |
225 | } | 225 | } |
226 | tmp = rdev->mc.vram_location + rdev->mc.vram_size - 1; | 226 | tmp = rdev->mc.vram_location + rdev->mc.mc_vram_size - 1; |
227 | tmp = REG_SET(RS600_MC_FB_TOP, tmp >> 16); | 227 | tmp = REG_SET(RS600_MC_FB_TOP, tmp >> 16); |
228 | tmp |= REG_SET(RS600_MC_FB_START, rdev->mc.vram_location >> 16); | 228 | tmp |= REG_SET(RS600_MC_FB_START, rdev->mc.vram_location >> 16); |
229 | WREG32_MC(RS600_MC_FB_LOCATION, tmp); | 229 | WREG32_MC(RS600_MC_FB_LOCATION, tmp); |
@@ -301,6 +301,11 @@ void rs600_vram_info(struct radeon_device *rdev) | |||
301 | rdev->mc.vram_width = 128; | 301 | rdev->mc.vram_width = 128; |
302 | } | 302 | } |
303 | 303 | ||
304 | void rs600_bandwidth_update(struct radeon_device *rdev) | ||
305 | { | ||
306 | /* FIXME: implement, should this be like rs690 ? */ | ||
307 | } | ||
308 | |||
304 | 309 | ||
305 | /* | 310 | /* |
306 | * Indirect registers accessor | 311 | * Indirect registers accessor |
diff --git a/drivers/gpu/drm/radeon/rs690.c b/drivers/gpu/drm/radeon/rs690.c index 79ba85042b5f..839595b00728 100644 --- a/drivers/gpu/drm/radeon/rs690.c +++ b/drivers/gpu/drm/radeon/rs690.c | |||
@@ -28,6 +28,9 @@ | |||
28 | #include "drmP.h" | 28 | #include "drmP.h" |
29 | #include "radeon_reg.h" | 29 | #include "radeon_reg.h" |
30 | #include "radeon.h" | 30 | #include "radeon.h" |
31 | #include "rs690r.h" | ||
32 | #include "atom.h" | ||
33 | #include "atom-bits.h" | ||
31 | 34 | ||
32 | /* rs690,rs740 depends on : */ | 35 | /* rs690,rs740 depends on : */ |
33 | void r100_hdp_reset(struct radeon_device *rdev); | 36 | void r100_hdp_reset(struct radeon_device *rdev); |
@@ -64,7 +67,7 @@ int rs690_mc_init(struct radeon_device *rdev) | |||
64 | rs400_gart_disable(rdev); | 67 | rs400_gart_disable(rdev); |
65 | 68 | ||
66 | /* Setup GPU memory space */ | 69 | /* Setup GPU memory space */ |
67 | rdev->mc.gtt_location = rdev->mc.vram_size; | 70 | rdev->mc.gtt_location = rdev->mc.mc_vram_size; |
68 | rdev->mc.gtt_location += (rdev->mc.gtt_size - 1); | 71 | rdev->mc.gtt_location += (rdev->mc.gtt_size - 1); |
69 | rdev->mc.gtt_location &= ~(rdev->mc.gtt_size - 1); | 72 | rdev->mc.gtt_location &= ~(rdev->mc.gtt_size - 1); |
70 | rdev->mc.vram_location = 0xFFFFFFFFUL; | 73 | rdev->mc.vram_location = 0xFFFFFFFFUL; |
@@ -79,7 +82,7 @@ int rs690_mc_init(struct radeon_device *rdev) | |||
79 | printk(KERN_WARNING "Failed to wait MC idle while " | 82 | printk(KERN_WARNING "Failed to wait MC idle while " |
80 | "programming pipes. Bad things might happen.\n"); | 83 | "programming pipes. Bad things might happen.\n"); |
81 | } | 84 | } |
82 | tmp = rdev->mc.vram_location + rdev->mc.vram_size - 1; | 85 | tmp = rdev->mc.vram_location + rdev->mc.mc_vram_size - 1; |
83 | tmp = REG_SET(RS690_MC_FB_TOP, tmp >> 16); | 86 | tmp = REG_SET(RS690_MC_FB_TOP, tmp >> 16); |
84 | tmp |= REG_SET(RS690_MC_FB_START, rdev->mc.vram_location >> 16); | 87 | tmp |= REG_SET(RS690_MC_FB_START, rdev->mc.vram_location >> 16); |
85 | WREG32_MC(RS690_MCCFG_FB_LOCATION, tmp); | 88 | WREG32_MC(RS690_MCCFG_FB_LOCATION, tmp); |
@@ -138,9 +141,82 @@ void rs690_gpu_init(struct radeon_device *rdev) | |||
138 | /* | 141 | /* |
139 | * VRAM info. | 142 | * VRAM info. |
140 | */ | 143 | */ |
144 | void rs690_pm_info(struct radeon_device *rdev) | ||
145 | { | ||
146 | int index = GetIndexIntoMasterTable(DATA, IntegratedSystemInfo); | ||
147 | struct _ATOM_INTEGRATED_SYSTEM_INFO *info; | ||
148 | struct _ATOM_INTEGRATED_SYSTEM_INFO_V2 *info_v2; | ||
149 | void *ptr; | ||
150 | uint16_t data_offset; | ||
151 | uint8_t frev, crev; | ||
152 | fixed20_12 tmp; | ||
153 | |||
154 | atom_parse_data_header(rdev->mode_info.atom_context, index, NULL, | ||
155 | &frev, &crev, &data_offset); | ||
156 | ptr = rdev->mode_info.atom_context->bios + data_offset; | ||
157 | info = (struct _ATOM_INTEGRATED_SYSTEM_INFO *)ptr; | ||
158 | info_v2 = (struct _ATOM_INTEGRATED_SYSTEM_INFO_V2 *)ptr; | ||
159 | /* Get various system informations from bios */ | ||
160 | switch (crev) { | ||
161 | case 1: | ||
162 | tmp.full = rfixed_const(100); | ||
163 | rdev->pm.igp_sideport_mclk.full = rfixed_const(info->ulBootUpMemoryClock); | ||
164 | rdev->pm.igp_sideport_mclk.full = rfixed_div(rdev->pm.igp_sideport_mclk, tmp); | ||
165 | rdev->pm.igp_system_mclk.full = rfixed_const(le16_to_cpu(info->usK8MemoryClock)); | ||
166 | rdev->pm.igp_ht_link_clk.full = rfixed_const(le16_to_cpu(info->usFSBClock)); | ||
167 | rdev->pm.igp_ht_link_width.full = rfixed_const(info->ucHTLinkWidth); | ||
168 | break; | ||
169 | case 2: | ||
170 | tmp.full = rfixed_const(100); | ||
171 | rdev->pm.igp_sideport_mclk.full = rfixed_const(info_v2->ulBootUpSidePortClock); | ||
172 | rdev->pm.igp_sideport_mclk.full = rfixed_div(rdev->pm.igp_sideport_mclk, tmp); | ||
173 | rdev->pm.igp_system_mclk.full = rfixed_const(info_v2->ulBootUpUMAClock); | ||
174 | rdev->pm.igp_system_mclk.full = rfixed_div(rdev->pm.igp_system_mclk, tmp); | ||
175 | rdev->pm.igp_ht_link_clk.full = rfixed_const(info_v2->ulHTLinkFreq); | ||
176 | rdev->pm.igp_ht_link_clk.full = rfixed_div(rdev->pm.igp_ht_link_clk, tmp); | ||
177 | rdev->pm.igp_ht_link_width.full = rfixed_const(le16_to_cpu(info_v2->usMinHTLinkWidth)); | ||
178 | break; | ||
179 | default: | ||
180 | tmp.full = rfixed_const(100); | ||
181 | /* We assume the slower possible clock ie worst case */ | ||
182 | /* DDR 333Mhz */ | ||
183 | rdev->pm.igp_sideport_mclk.full = rfixed_const(333); | ||
184 | /* FIXME: system clock ? */ | ||
185 | rdev->pm.igp_system_mclk.full = rfixed_const(100); | ||
186 | rdev->pm.igp_system_mclk.full = rfixed_div(rdev->pm.igp_system_mclk, tmp); | ||
187 | rdev->pm.igp_ht_link_clk.full = rfixed_const(200); | ||
188 | rdev->pm.igp_ht_link_width.full = rfixed_const(8); | ||
189 | DRM_ERROR("No integrated system info for your GPU, using safe default\n"); | ||
190 | break; | ||
191 | } | ||
192 | /* Compute various bandwidth */ | ||
193 | /* k8_bandwidth = (memory_clk / 2) * 2 * 8 * 0.5 = memory_clk * 4 */ | ||
194 | tmp.full = rfixed_const(4); | ||
195 | rdev->pm.k8_bandwidth.full = rfixed_mul(rdev->pm.igp_system_mclk, tmp); | ||
196 | /* ht_bandwidth = ht_clk * 2 * ht_width / 8 * 0.8 | ||
197 | * = ht_clk * ht_width / 5 | ||
198 | */ | ||
199 | tmp.full = rfixed_const(5); | ||
200 | rdev->pm.ht_bandwidth.full = rfixed_mul(rdev->pm.igp_ht_link_clk, | ||
201 | rdev->pm.igp_ht_link_width); | ||
202 | rdev->pm.ht_bandwidth.full = rfixed_div(rdev->pm.ht_bandwidth, tmp); | ||
203 | if (tmp.full < rdev->pm.max_bandwidth.full) { | ||
204 | /* HT link is a limiting factor */ | ||
205 | rdev->pm.max_bandwidth.full = tmp.full; | ||
206 | } | ||
207 | /* sideport_bandwidth = (sideport_clk / 2) * 2 * 2 * 0.7 | ||
208 | * = (sideport_clk * 14) / 10 | ||
209 | */ | ||
210 | tmp.full = rfixed_const(14); | ||
211 | rdev->pm.sideport_bandwidth.full = rfixed_mul(rdev->pm.igp_sideport_mclk, tmp); | ||
212 | tmp.full = rfixed_const(10); | ||
213 | rdev->pm.sideport_bandwidth.full = rfixed_div(rdev->pm.sideport_bandwidth, tmp); | ||
214 | } | ||
215 | |||
141 | void rs690_vram_info(struct radeon_device *rdev) | 216 | void rs690_vram_info(struct radeon_device *rdev) |
142 | { | 217 | { |
143 | uint32_t tmp; | 218 | uint32_t tmp; |
219 | fixed20_12 a; | ||
144 | 220 | ||
145 | rs400_gart_adjust_size(rdev); | 221 | rs400_gart_adjust_size(rdev); |
146 | /* DDR for all card after R300 & IGP */ | 222 | /* DDR for all card after R300 & IGP */ |
@@ -152,12 +228,409 @@ void rs690_vram_info(struct radeon_device *rdev) | |||
152 | } else { | 228 | } else { |
153 | rdev->mc.vram_width = 64; | 229 | rdev->mc.vram_width = 64; |
154 | } | 230 | } |
155 | rdev->mc.vram_size = RREG32(RADEON_CONFIG_MEMSIZE); | 231 | rdev->mc.real_vram_size = RREG32(RADEON_CONFIG_MEMSIZE); |
232 | rdev->mc.mc_vram_size = rdev->mc.real_vram_size; | ||
156 | 233 | ||
157 | rdev->mc.aper_base = drm_get_resource_start(rdev->ddev, 0); | 234 | rdev->mc.aper_base = drm_get_resource_start(rdev->ddev, 0); |
158 | rdev->mc.aper_size = drm_get_resource_len(rdev->ddev, 0); | 235 | rdev->mc.aper_size = drm_get_resource_len(rdev->ddev, 0); |
236 | rs690_pm_info(rdev); | ||
237 | /* FIXME: we should enforce default clock in case GPU is not in | ||
238 | * default setup | ||
239 | */ | ||
240 | a.full = rfixed_const(100); | ||
241 | rdev->pm.sclk.full = rfixed_const(rdev->clock.default_sclk); | ||
242 | rdev->pm.sclk.full = rfixed_div(rdev->pm.sclk, a); | ||
243 | a.full = rfixed_const(16); | ||
244 | /* core_bandwidth = sclk(Mhz) * 16 */ | ||
245 | rdev->pm.core_bandwidth.full = rfixed_div(rdev->pm.sclk, a); | ||
246 | } | ||
247 | |||
248 | void rs690_line_buffer_adjust(struct radeon_device *rdev, | ||
249 | struct drm_display_mode *mode1, | ||
250 | struct drm_display_mode *mode2) | ||
251 | { | ||
252 | u32 tmp; | ||
253 | |||
254 | /* | ||
255 | * Line Buffer Setup | ||
256 | * There is a single line buffer shared by both display controllers. | ||
257 | * DC_LB_MEMORY_SPLIT controls how that line buffer is shared between | ||
258 | * the display controllers. The paritioning can either be done | ||
259 | * manually or via one of four preset allocations specified in bits 1:0: | ||
260 | * 0 - line buffer is divided in half and shared between crtc | ||
261 | * 1 - D1 gets 3/4 of the line buffer, D2 gets 1/4 | ||
262 | * 2 - D1 gets the whole buffer | ||
263 | * 3 - D1 gets 1/4 of the line buffer, D2 gets 3/4 | ||
264 | * Setting bit 2 of DC_LB_MEMORY_SPLIT controls switches to manual | ||
265 | * allocation mode. In manual allocation mode, D1 always starts at 0, | ||
266 | * D1 end/2 is specified in bits 14:4; D2 allocation follows D1. | ||
267 | */ | ||
268 | tmp = RREG32(DC_LB_MEMORY_SPLIT) & ~DC_LB_MEMORY_SPLIT_MASK; | ||
269 | tmp &= ~DC_LB_MEMORY_SPLIT_SHIFT_MODE; | ||
270 | /* auto */ | ||
271 | if (mode1 && mode2) { | ||
272 | if (mode1->hdisplay > mode2->hdisplay) { | ||
273 | if (mode1->hdisplay > 2560) | ||
274 | tmp |= DC_LB_MEMORY_SPLIT_D1_3Q_D2_1Q; | ||
275 | else | ||
276 | tmp |= DC_LB_MEMORY_SPLIT_D1HALF_D2HALF; | ||
277 | } else if (mode2->hdisplay > mode1->hdisplay) { | ||
278 | if (mode2->hdisplay > 2560) | ||
279 | tmp |= DC_LB_MEMORY_SPLIT_D1_1Q_D2_3Q; | ||
280 | else | ||
281 | tmp |= DC_LB_MEMORY_SPLIT_D1HALF_D2HALF; | ||
282 | } else | ||
283 | tmp |= AVIVO_DC_LB_MEMORY_SPLIT_D1HALF_D2HALF; | ||
284 | } else if (mode1) { | ||
285 | tmp |= DC_LB_MEMORY_SPLIT_D1_ONLY; | ||
286 | } else if (mode2) { | ||
287 | tmp |= DC_LB_MEMORY_SPLIT_D1_1Q_D2_3Q; | ||
288 | } | ||
289 | WREG32(DC_LB_MEMORY_SPLIT, tmp); | ||
159 | } | 290 | } |
160 | 291 | ||
292 | struct rs690_watermark { | ||
293 | u32 lb_request_fifo_depth; | ||
294 | fixed20_12 num_line_pair; | ||
295 | fixed20_12 estimated_width; | ||
296 | fixed20_12 worst_case_latency; | ||
297 | fixed20_12 consumption_rate; | ||
298 | fixed20_12 active_time; | ||
299 | fixed20_12 dbpp; | ||
300 | fixed20_12 priority_mark_max; | ||
301 | fixed20_12 priority_mark; | ||
302 | fixed20_12 sclk; | ||
303 | }; | ||
304 | |||
305 | void rs690_crtc_bandwidth_compute(struct radeon_device *rdev, | ||
306 | struct radeon_crtc *crtc, | ||
307 | struct rs690_watermark *wm) | ||
308 | { | ||
309 | struct drm_display_mode *mode = &crtc->base.mode; | ||
310 | fixed20_12 a, b, c; | ||
311 | fixed20_12 pclk, request_fifo_depth, tolerable_latency, estimated_width; | ||
312 | fixed20_12 consumption_time, line_time, chunk_time, read_delay_latency; | ||
313 | /* FIXME: detect IGP with sideport memory, i don't think there is any | ||
314 | * such product available | ||
315 | */ | ||
316 | bool sideport = false; | ||
317 | |||
318 | if (!crtc->base.enabled) { | ||
319 | /* FIXME: wouldn't it better to set priority mark to maximum */ | ||
320 | wm->lb_request_fifo_depth = 4; | ||
321 | return; | ||
322 | } | ||
323 | |||
324 | if (crtc->vsc.full > rfixed_const(2)) | ||
325 | wm->num_line_pair.full = rfixed_const(2); | ||
326 | else | ||
327 | wm->num_line_pair.full = rfixed_const(1); | ||
328 | |||
329 | b.full = rfixed_const(mode->crtc_hdisplay); | ||
330 | c.full = rfixed_const(256); | ||
331 | a.full = rfixed_mul(wm->num_line_pair, b); | ||
332 | request_fifo_depth.full = rfixed_div(a, c); | ||
333 | if (a.full < rfixed_const(4)) { | ||
334 | wm->lb_request_fifo_depth = 4; | ||
335 | } else { | ||
336 | wm->lb_request_fifo_depth = rfixed_trunc(request_fifo_depth); | ||
337 | } | ||
338 | |||
339 | /* Determine consumption rate | ||
340 | * pclk = pixel clock period(ns) = 1000 / (mode.clock / 1000) | ||
341 | * vtaps = number of vertical taps, | ||
342 | * vsc = vertical scaling ratio, defined as source/destination | ||
343 | * hsc = horizontal scaling ration, defined as source/destination | ||
344 | */ | ||
345 | a.full = rfixed_const(mode->clock); | ||
346 | b.full = rfixed_const(1000); | ||
347 | a.full = rfixed_div(a, b); | ||
348 | pclk.full = rfixed_div(b, a); | ||
349 | if (crtc->rmx_type != RMX_OFF) { | ||
350 | b.full = rfixed_const(2); | ||
351 | if (crtc->vsc.full > b.full) | ||
352 | b.full = crtc->vsc.full; | ||
353 | b.full = rfixed_mul(b, crtc->hsc); | ||
354 | c.full = rfixed_const(2); | ||
355 | b.full = rfixed_div(b, c); | ||
356 | consumption_time.full = rfixed_div(pclk, b); | ||
357 | } else { | ||
358 | consumption_time.full = pclk.full; | ||
359 | } | ||
360 | a.full = rfixed_const(1); | ||
361 | wm->consumption_rate.full = rfixed_div(a, consumption_time); | ||
362 | |||
363 | |||
364 | /* Determine line time | ||
365 | * LineTime = total time for one line of displayhtotal | ||
366 | * LineTime = total number of horizontal pixels | ||
367 | * pclk = pixel clock period(ns) | ||
368 | */ | ||
369 | a.full = rfixed_const(crtc->base.mode.crtc_htotal); | ||
370 | line_time.full = rfixed_mul(a, pclk); | ||
371 | |||
372 | /* Determine active time | ||
373 | * ActiveTime = time of active region of display within one line, | ||
374 | * hactive = total number of horizontal active pixels | ||
375 | * htotal = total number of horizontal pixels | ||
376 | */ | ||
377 | a.full = rfixed_const(crtc->base.mode.crtc_htotal); | ||
378 | b.full = rfixed_const(crtc->base.mode.crtc_hdisplay); | ||
379 | wm->active_time.full = rfixed_mul(line_time, b); | ||
380 | wm->active_time.full = rfixed_div(wm->active_time, a); | ||
381 | |||
382 | /* Maximun bandwidth is the minimun bandwidth of all component */ | ||
383 | rdev->pm.max_bandwidth = rdev->pm.core_bandwidth; | ||
384 | if (sideport) { | ||
385 | if (rdev->pm.max_bandwidth.full > rdev->pm.sideport_bandwidth.full && | ||
386 | rdev->pm.sideport_bandwidth.full) | ||
387 | rdev->pm.max_bandwidth = rdev->pm.sideport_bandwidth; | ||
388 | read_delay_latency.full = rfixed_const(370 * 800 * 1000); | ||
389 | read_delay_latency.full = rfixed_div(read_delay_latency, | ||
390 | rdev->pm.igp_sideport_mclk); | ||
391 | } else { | ||
392 | if (rdev->pm.max_bandwidth.full > rdev->pm.k8_bandwidth.full && | ||
393 | rdev->pm.k8_bandwidth.full) | ||
394 | rdev->pm.max_bandwidth = rdev->pm.k8_bandwidth; | ||
395 | if (rdev->pm.max_bandwidth.full > rdev->pm.ht_bandwidth.full && | ||
396 | rdev->pm.ht_bandwidth.full) | ||
397 | rdev->pm.max_bandwidth = rdev->pm.ht_bandwidth; | ||
398 | read_delay_latency.full = rfixed_const(5000); | ||
399 | } | ||
400 | |||
401 | /* sclk = system clocks(ns) = 1000 / max_bandwidth / 16 */ | ||
402 | a.full = rfixed_const(16); | ||
403 | rdev->pm.sclk.full = rfixed_mul(rdev->pm.max_bandwidth, a); | ||
404 | a.full = rfixed_const(1000); | ||
405 | rdev->pm.sclk.full = rfixed_div(a, rdev->pm.sclk); | ||
406 | /* Determine chunk time | ||
407 | * ChunkTime = the time it takes the DCP to send one chunk of data | ||
408 | * to the LB which consists of pipeline delay and inter chunk gap | ||
409 | * sclk = system clock(ns) | ||
410 | */ | ||
411 | a.full = rfixed_const(256 * 13); | ||
412 | chunk_time.full = rfixed_mul(rdev->pm.sclk, a); | ||
413 | a.full = rfixed_const(10); | ||
414 | chunk_time.full = rfixed_div(chunk_time, a); | ||
415 | |||
416 | /* Determine the worst case latency | ||
417 | * NumLinePair = Number of line pairs to request(1=2 lines, 2=4 lines) | ||
418 | * WorstCaseLatency = worst case time from urgent to when the MC starts | ||
419 | * to return data | ||
420 | * READ_DELAY_IDLE_MAX = constant of 1us | ||
421 | * ChunkTime = time it takes the DCP to send one chunk of data to the LB | ||
422 | * which consists of pipeline delay and inter chunk gap | ||
423 | */ | ||
424 | if (rfixed_trunc(wm->num_line_pair) > 1) { | ||
425 | a.full = rfixed_const(3); | ||
426 | wm->worst_case_latency.full = rfixed_mul(a, chunk_time); | ||
427 | wm->worst_case_latency.full += read_delay_latency.full; | ||
428 | } else { | ||
429 | a.full = rfixed_const(2); | ||
430 | wm->worst_case_latency.full = rfixed_mul(a, chunk_time); | ||
431 | wm->worst_case_latency.full += read_delay_latency.full; | ||
432 | } | ||
433 | |||
434 | /* Determine the tolerable latency | ||
435 | * TolerableLatency = Any given request has only 1 line time | ||
436 | * for the data to be returned | ||
437 | * LBRequestFifoDepth = Number of chunk requests the LB can | ||
438 | * put into the request FIFO for a display | ||
439 | * LineTime = total time for one line of display | ||
440 | * ChunkTime = the time it takes the DCP to send one chunk | ||
441 | * of data to the LB which consists of | ||
442 | * pipeline delay and inter chunk gap | ||
443 | */ | ||
444 | if ((2+wm->lb_request_fifo_depth) >= rfixed_trunc(request_fifo_depth)) { | ||
445 | tolerable_latency.full = line_time.full; | ||
446 | } else { | ||
447 | tolerable_latency.full = rfixed_const(wm->lb_request_fifo_depth - 2); | ||
448 | tolerable_latency.full = request_fifo_depth.full - tolerable_latency.full; | ||
449 | tolerable_latency.full = rfixed_mul(tolerable_latency, chunk_time); | ||
450 | tolerable_latency.full = line_time.full - tolerable_latency.full; | ||
451 | } | ||
452 | /* We assume worst case 32bits (4 bytes) */ | ||
453 | wm->dbpp.full = rfixed_const(4 * 8); | ||
454 | |||
455 | /* Determine the maximum priority mark | ||
456 | * width = viewport width in pixels | ||
457 | */ | ||
458 | a.full = rfixed_const(16); | ||
459 | wm->priority_mark_max.full = rfixed_const(crtc->base.mode.crtc_hdisplay); | ||
460 | wm->priority_mark_max.full = rfixed_div(wm->priority_mark_max, a); | ||
461 | |||
462 | /* Determine estimated width */ | ||
463 | estimated_width.full = tolerable_latency.full - wm->worst_case_latency.full; | ||
464 | estimated_width.full = rfixed_div(estimated_width, consumption_time); | ||
465 | if (rfixed_trunc(estimated_width) > crtc->base.mode.crtc_hdisplay) { | ||
466 | wm->priority_mark.full = rfixed_const(10); | ||
467 | } else { | ||
468 | a.full = rfixed_const(16); | ||
469 | wm->priority_mark.full = rfixed_div(estimated_width, a); | ||
470 | wm->priority_mark.full = wm->priority_mark_max.full - wm->priority_mark.full; | ||
471 | } | ||
472 | } | ||
473 | |||
474 | void rs690_bandwidth_update(struct radeon_device *rdev) | ||
475 | { | ||
476 | struct drm_display_mode *mode0 = NULL; | ||
477 | struct drm_display_mode *mode1 = NULL; | ||
478 | struct rs690_watermark wm0; | ||
479 | struct rs690_watermark wm1; | ||
480 | u32 tmp; | ||
481 | fixed20_12 priority_mark02, priority_mark12, fill_rate; | ||
482 | fixed20_12 a, b; | ||
483 | |||
484 | if (rdev->mode_info.crtcs[0]->base.enabled) | ||
485 | mode0 = &rdev->mode_info.crtcs[0]->base.mode; | ||
486 | if (rdev->mode_info.crtcs[1]->base.enabled) | ||
487 | mode1 = &rdev->mode_info.crtcs[1]->base.mode; | ||
488 | /* | ||
489 | * Set display0/1 priority up in the memory controller for | ||
490 | * modes if the user specifies HIGH for displaypriority | ||
491 | * option. | ||
492 | */ | ||
493 | if (rdev->disp_priority == 2) { | ||
494 | tmp = RREG32_MC(MC_INIT_MISC_LAT_TIMER); | ||
495 | tmp &= ~MC_DISP1R_INIT_LAT_MASK; | ||
496 | tmp &= ~MC_DISP0R_INIT_LAT_MASK; | ||
497 | if (mode1) | ||
498 | tmp |= (1 << MC_DISP1R_INIT_LAT_SHIFT); | ||
499 | if (mode0) | ||
500 | tmp |= (1 << MC_DISP0R_INIT_LAT_SHIFT); | ||
501 | WREG32_MC(MC_INIT_MISC_LAT_TIMER, tmp); | ||
502 | } | ||
503 | rs690_line_buffer_adjust(rdev, mode0, mode1); | ||
504 | |||
505 | if ((rdev->family == CHIP_RS690) || (rdev->family == CHIP_RS740)) | ||
506 | WREG32(DCP_CONTROL, 0); | ||
507 | if ((rdev->family == CHIP_RS780) || (rdev->family == CHIP_RS880)) | ||
508 | WREG32(DCP_CONTROL, 2); | ||
509 | |||
510 | rs690_crtc_bandwidth_compute(rdev, rdev->mode_info.crtcs[0], &wm0); | ||
511 | rs690_crtc_bandwidth_compute(rdev, rdev->mode_info.crtcs[1], &wm1); | ||
512 | |||
513 | tmp = (wm0.lb_request_fifo_depth - 1); | ||
514 | tmp |= (wm1.lb_request_fifo_depth - 1) << 16; | ||
515 | WREG32(LB_MAX_REQ_OUTSTANDING, tmp); | ||
516 | |||
517 | if (mode0 && mode1) { | ||
518 | if (rfixed_trunc(wm0.dbpp) > 64) | ||
519 | a.full = rfixed_mul(wm0.dbpp, wm0.num_line_pair); | ||
520 | else | ||
521 | a.full = wm0.num_line_pair.full; | ||
522 | if (rfixed_trunc(wm1.dbpp) > 64) | ||
523 | b.full = rfixed_mul(wm1.dbpp, wm1.num_line_pair); | ||
524 | else | ||
525 | b.full = wm1.num_line_pair.full; | ||
526 | a.full += b.full; | ||
527 | fill_rate.full = rfixed_div(wm0.sclk, a); | ||
528 | if (wm0.consumption_rate.full > fill_rate.full) { | ||
529 | b.full = wm0.consumption_rate.full - fill_rate.full; | ||
530 | b.full = rfixed_mul(b, wm0.active_time); | ||
531 | a.full = rfixed_mul(wm0.worst_case_latency, | ||
532 | wm0.consumption_rate); | ||
533 | a.full = a.full + b.full; | ||
534 | b.full = rfixed_const(16 * 1000); | ||
535 | priority_mark02.full = rfixed_div(a, b); | ||
536 | } else { | ||
537 | a.full = rfixed_mul(wm0.worst_case_latency, | ||
538 | wm0.consumption_rate); | ||
539 | b.full = rfixed_const(16 * 1000); | ||
540 | priority_mark02.full = rfixed_div(a, b); | ||
541 | } | ||
542 | if (wm1.consumption_rate.full > fill_rate.full) { | ||
543 | b.full = wm1.consumption_rate.full - fill_rate.full; | ||
544 | b.full = rfixed_mul(b, wm1.active_time); | ||
545 | a.full = rfixed_mul(wm1.worst_case_latency, | ||
546 | wm1.consumption_rate); | ||
547 | a.full = a.full + b.full; | ||
548 | b.full = rfixed_const(16 * 1000); | ||
549 | priority_mark12.full = rfixed_div(a, b); | ||
550 | } else { | ||
551 | a.full = rfixed_mul(wm1.worst_case_latency, | ||
552 | wm1.consumption_rate); | ||
553 | b.full = rfixed_const(16 * 1000); | ||
554 | priority_mark12.full = rfixed_div(a, b); | ||
555 | } | ||
556 | if (wm0.priority_mark.full > priority_mark02.full) | ||
557 | priority_mark02.full = wm0.priority_mark.full; | ||
558 | if (rfixed_trunc(priority_mark02) < 0) | ||
559 | priority_mark02.full = 0; | ||
560 | if (wm0.priority_mark_max.full > priority_mark02.full) | ||
561 | priority_mark02.full = wm0.priority_mark_max.full; | ||
562 | if (wm1.priority_mark.full > priority_mark12.full) | ||
563 | priority_mark12.full = wm1.priority_mark.full; | ||
564 | if (rfixed_trunc(priority_mark12) < 0) | ||
565 | priority_mark12.full = 0; | ||
566 | if (wm1.priority_mark_max.full > priority_mark12.full) | ||
567 | priority_mark12.full = wm1.priority_mark_max.full; | ||
568 | WREG32(D1MODE_PRIORITY_A_CNT, rfixed_trunc(priority_mark02)); | ||
569 | WREG32(D1MODE_PRIORITY_B_CNT, rfixed_trunc(priority_mark02)); | ||
570 | WREG32(D2MODE_PRIORITY_A_CNT, rfixed_trunc(priority_mark12)); | ||
571 | WREG32(D2MODE_PRIORITY_B_CNT, rfixed_trunc(priority_mark12)); | ||
572 | } else if (mode0) { | ||
573 | if (rfixed_trunc(wm0.dbpp) > 64) | ||
574 | a.full = rfixed_mul(wm0.dbpp, wm0.num_line_pair); | ||
575 | else | ||
576 | a.full = wm0.num_line_pair.full; | ||
577 | fill_rate.full = rfixed_div(wm0.sclk, a); | ||
578 | if (wm0.consumption_rate.full > fill_rate.full) { | ||
579 | b.full = wm0.consumption_rate.full - fill_rate.full; | ||
580 | b.full = rfixed_mul(b, wm0.active_time); | ||
581 | a.full = rfixed_mul(wm0.worst_case_latency, | ||
582 | wm0.consumption_rate); | ||
583 | a.full = a.full + b.full; | ||
584 | b.full = rfixed_const(16 * 1000); | ||
585 | priority_mark02.full = rfixed_div(a, b); | ||
586 | } else { | ||
587 | a.full = rfixed_mul(wm0.worst_case_latency, | ||
588 | wm0.consumption_rate); | ||
589 | b.full = rfixed_const(16 * 1000); | ||
590 | priority_mark02.full = rfixed_div(a, b); | ||
591 | } | ||
592 | if (wm0.priority_mark.full > priority_mark02.full) | ||
593 | priority_mark02.full = wm0.priority_mark.full; | ||
594 | if (rfixed_trunc(priority_mark02) < 0) | ||
595 | priority_mark02.full = 0; | ||
596 | if (wm0.priority_mark_max.full > priority_mark02.full) | ||
597 | priority_mark02.full = wm0.priority_mark_max.full; | ||
598 | WREG32(D1MODE_PRIORITY_A_CNT, rfixed_trunc(priority_mark02)); | ||
599 | WREG32(D1MODE_PRIORITY_B_CNT, rfixed_trunc(priority_mark02)); | ||
600 | WREG32(D2MODE_PRIORITY_A_CNT, MODE_PRIORITY_OFF); | ||
601 | WREG32(D2MODE_PRIORITY_B_CNT, MODE_PRIORITY_OFF); | ||
602 | } else { | ||
603 | if (rfixed_trunc(wm1.dbpp) > 64) | ||
604 | a.full = rfixed_mul(wm1.dbpp, wm1.num_line_pair); | ||
605 | else | ||
606 | a.full = wm1.num_line_pair.full; | ||
607 | fill_rate.full = rfixed_div(wm1.sclk, a); | ||
608 | if (wm1.consumption_rate.full > fill_rate.full) { | ||
609 | b.full = wm1.consumption_rate.full - fill_rate.full; | ||
610 | b.full = rfixed_mul(b, wm1.active_time); | ||
611 | a.full = rfixed_mul(wm1.worst_case_latency, | ||
612 | wm1.consumption_rate); | ||
613 | a.full = a.full + b.full; | ||
614 | b.full = rfixed_const(16 * 1000); | ||
615 | priority_mark12.full = rfixed_div(a, b); | ||
616 | } else { | ||
617 | a.full = rfixed_mul(wm1.worst_case_latency, | ||
618 | wm1.consumption_rate); | ||
619 | b.full = rfixed_const(16 * 1000); | ||
620 | priority_mark12.full = rfixed_div(a, b); | ||
621 | } | ||
622 | if (wm1.priority_mark.full > priority_mark12.full) | ||
623 | priority_mark12.full = wm1.priority_mark.full; | ||
624 | if (rfixed_trunc(priority_mark12) < 0) | ||
625 | priority_mark12.full = 0; | ||
626 | if (wm1.priority_mark_max.full > priority_mark12.full) | ||
627 | priority_mark12.full = wm1.priority_mark_max.full; | ||
628 | WREG32(D1MODE_PRIORITY_A_CNT, MODE_PRIORITY_OFF); | ||
629 | WREG32(D1MODE_PRIORITY_B_CNT, MODE_PRIORITY_OFF); | ||
630 | WREG32(D2MODE_PRIORITY_A_CNT, rfixed_trunc(priority_mark12)); | ||
631 | WREG32(D2MODE_PRIORITY_B_CNT, rfixed_trunc(priority_mark12)); | ||
632 | } | ||
633 | } | ||
161 | 634 | ||
162 | /* | 635 | /* |
163 | * Indirect registers accessor | 636 | * Indirect registers accessor |
diff --git a/drivers/gpu/drm/radeon/rs690r.h b/drivers/gpu/drm/radeon/rs690r.h new file mode 100644 index 000000000000..c0d9faa2175b --- /dev/null +++ b/drivers/gpu/drm/radeon/rs690r.h | |||
@@ -0,0 +1,99 @@ | |||
1 | /* | ||
2 | * Copyright 2008 Advanced Micro Devices, Inc. | ||
3 | * Copyright 2008 Red Hat Inc. | ||
4 | * Copyright 2009 Jerome Glisse. | ||
5 | * | ||
6 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
7 | * copy of this software and associated documentation files (the "Software"), | ||
8 | * to deal in the Software without restriction, including without limitation | ||
9 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
10 | * and/or sell copies of the Software, and to permit persons to whom the | ||
11 | * Software is furnished to do so, subject to the following conditions: | ||
12 | * | ||
13 | * The above copyright notice and this permission notice shall be included in | ||
14 | * all copies or substantial portions of the Software. | ||
15 | * | ||
16 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
17 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
18 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
19 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
20 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
21 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
22 | * OTHER DEALINGS IN THE SOFTWARE. | ||
23 | * | ||
24 | * Authors: Dave Airlie | ||
25 | * Alex Deucher | ||
26 | * Jerome Glisse | ||
27 | */ | ||
28 | #ifndef RS690R_H | ||
29 | #define RS690R_H | ||
30 | |||
31 | /* RS690/RS740 registers */ | ||
32 | #define MC_INDEX 0x0078 | ||
33 | # define MC_INDEX_MASK 0x1FF | ||
34 | # define MC_INDEX_WR_EN (1 << 9) | ||
35 | # define MC_INDEX_WR_ACK 0x7F | ||
36 | #define MC_DATA 0x007C | ||
37 | #define HDP_FB_LOCATION 0x0134 | ||
38 | #define DC_LB_MEMORY_SPLIT 0x6520 | ||
39 | #define DC_LB_MEMORY_SPLIT_MASK 0x00000003 | ||
40 | #define DC_LB_MEMORY_SPLIT_SHIFT 0 | ||
41 | #define DC_LB_MEMORY_SPLIT_D1HALF_D2HALF 0 | ||
42 | #define DC_LB_MEMORY_SPLIT_D1_3Q_D2_1Q 1 | ||
43 | #define DC_LB_MEMORY_SPLIT_D1_ONLY 2 | ||
44 | #define DC_LB_MEMORY_SPLIT_D1_1Q_D2_3Q 3 | ||
45 | #define DC_LB_MEMORY_SPLIT_SHIFT_MODE (1 << 2) | ||
46 | #define DC_LB_DISP1_END_ADR_SHIFT 4 | ||
47 | #define DC_LB_DISP1_END_ADR_MASK 0x00007FF0 | ||
48 | #define D1MODE_PRIORITY_A_CNT 0x6548 | ||
49 | #define MODE_PRIORITY_MARK_MASK 0x00007FFF | ||
50 | #define MODE_PRIORITY_OFF (1 << 16) | ||
51 | #define MODE_PRIORITY_ALWAYS_ON (1 << 20) | ||
52 | #define MODE_PRIORITY_FORCE_MASK (1 << 24) | ||
53 | #define D1MODE_PRIORITY_B_CNT 0x654C | ||
54 | #define LB_MAX_REQ_OUTSTANDING 0x6D58 | ||
55 | #define LB_D1_MAX_REQ_OUTSTANDING_MASK 0x0000000F | ||
56 | #define LB_D1_MAX_REQ_OUTSTANDING_SHIFT 0 | ||
57 | #define LB_D2_MAX_REQ_OUTSTANDING_MASK 0x000F0000 | ||
58 | #define LB_D2_MAX_REQ_OUTSTANDING_SHIFT 16 | ||
59 | #define DCP_CONTROL 0x6C9C | ||
60 | #define D2MODE_PRIORITY_A_CNT 0x6D48 | ||
61 | #define D2MODE_PRIORITY_B_CNT 0x6D4C | ||
62 | |||
63 | /* MC indirect registers */ | ||
64 | #define MC_STATUS_IDLE (1 << 0) | ||
65 | #define MC_MISC_CNTL 0x18 | ||
66 | #define DISABLE_GTW (1 << 1) | ||
67 | #define GART_INDEX_REG_EN (1 << 12) | ||
68 | #define BLOCK_GFX_D3_EN (1 << 14) | ||
69 | #define GART_FEATURE_ID 0x2B | ||
70 | #define HANG_EN (1 << 11) | ||
71 | #define TLB_ENABLE (1 << 18) | ||
72 | #define P2P_ENABLE (1 << 19) | ||
73 | #define GTW_LAC_EN (1 << 25) | ||
74 | #define LEVEL2_GART (0 << 30) | ||
75 | #define LEVEL1_GART (1 << 30) | ||
76 | #define PDC_EN (1 << 31) | ||
77 | #define GART_BASE 0x2C | ||
78 | #define GART_CACHE_CNTRL 0x2E | ||
79 | # define GART_CACHE_INVALIDATE (1 << 0) | ||
80 | #define MC_STATUS 0x90 | ||
81 | #define MCCFG_FB_LOCATION 0x100 | ||
82 | #define MC_FB_START_MASK 0x0000FFFF | ||
83 | #define MC_FB_START_SHIFT 0 | ||
84 | #define MC_FB_TOP_MASK 0xFFFF0000 | ||
85 | #define MC_FB_TOP_SHIFT 16 | ||
86 | #define MCCFG_AGP_LOCATION 0x101 | ||
87 | #define MC_AGP_START_MASK 0x0000FFFF | ||
88 | #define MC_AGP_START_SHIFT 0 | ||
89 | #define MC_AGP_TOP_MASK 0xFFFF0000 | ||
90 | #define MC_AGP_TOP_SHIFT 16 | ||
91 | #define MCCFG_AGP_BASE 0x102 | ||
92 | #define MCCFG_AGP_BASE_2 0x103 | ||
93 | #define MC_INIT_MISC_LAT_TIMER 0x104 | ||
94 | #define MC_DISP0R_INIT_LAT_SHIFT 8 | ||
95 | #define MC_DISP0R_INIT_LAT_MASK 0x00000F00 | ||
96 | #define MC_DISP1R_INIT_LAT_SHIFT 12 | ||
97 | #define MC_DISP1R_INIT_LAT_MASK 0x0000F000 | ||
98 | |||
99 | #endif | ||
diff --git a/drivers/gpu/drm/radeon/rv515.c b/drivers/gpu/drm/radeon/rv515.c index ffea37b1b3e2..fd8f3ca716ea 100644 --- a/drivers/gpu/drm/radeon/rv515.c +++ b/drivers/gpu/drm/radeon/rv515.c | |||
@@ -27,8 +27,9 @@ | |||
27 | */ | 27 | */ |
28 | #include <linux/seq_file.h> | 28 | #include <linux/seq_file.h> |
29 | #include "drmP.h" | 29 | #include "drmP.h" |
30 | #include "radeon_reg.h" | 30 | #include "rv515r.h" |
31 | #include "radeon.h" | 31 | #include "radeon.h" |
32 | #include "radeon_share.h" | ||
32 | 33 | ||
33 | /* rv515 depends on : */ | 34 | /* rv515 depends on : */ |
34 | void r100_hdp_reset(struct radeon_device *rdev); | 35 | void r100_hdp_reset(struct radeon_device *rdev); |
@@ -99,26 +100,26 @@ int rv515_mc_init(struct radeon_device *rdev) | |||
99 | "programming pipes. Bad things might happen.\n"); | 100 | "programming pipes. Bad things might happen.\n"); |
100 | } | 101 | } |
101 | /* Write VRAM size in case we are limiting it */ | 102 | /* Write VRAM size in case we are limiting it */ |
102 | WREG32(RADEON_CONFIG_MEMSIZE, rdev->mc.vram_size); | 103 | WREG32(RADEON_CONFIG_MEMSIZE, rdev->mc.real_vram_size); |
103 | tmp = REG_SET(RV515_MC_FB_START, rdev->mc.vram_location >> 16); | 104 | tmp = REG_SET(MC_FB_START, rdev->mc.vram_location >> 16); |
104 | WREG32(0x134, tmp); | 105 | WREG32(0x134, tmp); |
105 | tmp = rdev->mc.vram_location + rdev->mc.vram_size - 1; | 106 | tmp = rdev->mc.vram_location + rdev->mc.mc_vram_size - 1; |
106 | tmp = REG_SET(RV515_MC_FB_TOP, tmp >> 16); | 107 | tmp = REG_SET(MC_FB_TOP, tmp >> 16); |
107 | tmp |= REG_SET(RV515_MC_FB_START, rdev->mc.vram_location >> 16); | 108 | tmp |= REG_SET(MC_FB_START, rdev->mc.vram_location >> 16); |
108 | WREG32_MC(RV515_MC_FB_LOCATION, tmp); | 109 | WREG32_MC(MC_FB_LOCATION, tmp); |
109 | WREG32(RS690_HDP_FB_LOCATION, rdev->mc.vram_location >> 16); | 110 | WREG32(HDP_FB_LOCATION, rdev->mc.vram_location >> 16); |
110 | WREG32(0x310, rdev->mc.vram_location); | 111 | WREG32(0x310, rdev->mc.vram_location); |
111 | if (rdev->flags & RADEON_IS_AGP) { | 112 | if (rdev->flags & RADEON_IS_AGP) { |
112 | tmp = rdev->mc.gtt_location + rdev->mc.gtt_size - 1; | 113 | tmp = rdev->mc.gtt_location + rdev->mc.gtt_size - 1; |
113 | tmp = REG_SET(RV515_MC_AGP_TOP, tmp >> 16); | 114 | tmp = REG_SET(MC_AGP_TOP, tmp >> 16); |
114 | tmp |= REG_SET(RV515_MC_AGP_START, rdev->mc.gtt_location >> 16); | 115 | tmp |= REG_SET(MC_AGP_START, rdev->mc.gtt_location >> 16); |
115 | WREG32_MC(RV515_MC_AGP_LOCATION, tmp); | 116 | WREG32_MC(MC_AGP_LOCATION, tmp); |
116 | WREG32_MC(RV515_MC_AGP_BASE, rdev->mc.agp_base); | 117 | WREG32_MC(MC_AGP_BASE, rdev->mc.agp_base); |
117 | WREG32_MC(RV515_MC_AGP_BASE_2, 0); | 118 | WREG32_MC(MC_AGP_BASE_2, 0); |
118 | } else { | 119 | } else { |
119 | WREG32_MC(RV515_MC_AGP_LOCATION, 0x0FFFFFFF); | 120 | WREG32_MC(MC_AGP_LOCATION, 0x0FFFFFFF); |
120 | WREG32_MC(RV515_MC_AGP_BASE, 0); | 121 | WREG32_MC(MC_AGP_BASE, 0); |
121 | WREG32_MC(RV515_MC_AGP_BASE_2, 0); | 122 | WREG32_MC(MC_AGP_BASE_2, 0); |
122 | } | 123 | } |
123 | return 0; | 124 | return 0; |
124 | } | 125 | } |
@@ -136,95 +137,67 @@ void rv515_mc_fini(struct radeon_device *rdev) | |||
136 | */ | 137 | */ |
137 | void rv515_ring_start(struct radeon_device *rdev) | 138 | void rv515_ring_start(struct radeon_device *rdev) |
138 | { | 139 | { |
139 | unsigned gb_tile_config; | ||
140 | int r; | 140 | int r; |
141 | 141 | ||
142 | /* Sub pixel 1/12 so we can have 4K rendering according to doc */ | ||
143 | gb_tile_config = R300_ENABLE_TILING | R300_TILE_SIZE_16; | ||
144 | switch (rdev->num_gb_pipes) { | ||
145 | case 2: | ||
146 | gb_tile_config |= R300_PIPE_COUNT_R300; | ||
147 | break; | ||
148 | case 3: | ||
149 | gb_tile_config |= R300_PIPE_COUNT_R420_3P; | ||
150 | break; | ||
151 | case 4: | ||
152 | gb_tile_config |= R300_PIPE_COUNT_R420; | ||
153 | break; | ||
154 | case 1: | ||
155 | default: | ||
156 | gb_tile_config |= R300_PIPE_COUNT_RV350; | ||
157 | break; | ||
158 | } | ||
159 | |||
160 | r = radeon_ring_lock(rdev, 64); | 142 | r = radeon_ring_lock(rdev, 64); |
161 | if (r) { | 143 | if (r) { |
162 | return; | 144 | return; |
163 | } | 145 | } |
164 | radeon_ring_write(rdev, PACKET0(RADEON_ISYNC_CNTL, 0)); | 146 | radeon_ring_write(rdev, PACKET0(ISYNC_CNTL, 0)); |
165 | radeon_ring_write(rdev, | ||
166 | RADEON_ISYNC_ANY2D_IDLE3D | | ||
167 | RADEON_ISYNC_ANY3D_IDLE2D | | ||
168 | RADEON_ISYNC_WAIT_IDLEGUI | | ||
169 | RADEON_ISYNC_CPSCRATCH_IDLEGUI); | ||
170 | radeon_ring_write(rdev, PACKET0(R300_GB_TILE_CONFIG, 0)); | ||
171 | radeon_ring_write(rdev, gb_tile_config); | ||
172 | radeon_ring_write(rdev, PACKET0(RADEON_WAIT_UNTIL, 0)); | ||
173 | radeon_ring_write(rdev, | 147 | radeon_ring_write(rdev, |
174 | RADEON_WAIT_2D_IDLECLEAN | | 148 | ISYNC_ANY2D_IDLE3D | |
175 | RADEON_WAIT_3D_IDLECLEAN); | 149 | ISYNC_ANY3D_IDLE2D | |
150 | ISYNC_WAIT_IDLEGUI | | ||
151 | ISYNC_CPSCRATCH_IDLEGUI); | ||
152 | radeon_ring_write(rdev, PACKET0(WAIT_UNTIL, 0)); | ||
153 | radeon_ring_write(rdev, WAIT_2D_IDLECLEAN | WAIT_3D_IDLECLEAN); | ||
176 | radeon_ring_write(rdev, PACKET0(0x170C, 0)); | 154 | radeon_ring_write(rdev, PACKET0(0x170C, 0)); |
177 | radeon_ring_write(rdev, 1 << 31); | 155 | radeon_ring_write(rdev, 1 << 31); |
178 | radeon_ring_write(rdev, PACKET0(R300_GB_SELECT, 0)); | 156 | radeon_ring_write(rdev, PACKET0(GB_SELECT, 0)); |
179 | radeon_ring_write(rdev, 0); | 157 | radeon_ring_write(rdev, 0); |
180 | radeon_ring_write(rdev, PACKET0(R300_GB_ENABLE, 0)); | 158 | radeon_ring_write(rdev, PACKET0(GB_ENABLE, 0)); |
181 | radeon_ring_write(rdev, 0); | 159 | radeon_ring_write(rdev, 0); |
182 | radeon_ring_write(rdev, PACKET0(0x42C8, 0)); | 160 | radeon_ring_write(rdev, PACKET0(0x42C8, 0)); |
183 | radeon_ring_write(rdev, (1 << rdev->num_gb_pipes) - 1); | 161 | radeon_ring_write(rdev, (1 << rdev->num_gb_pipes) - 1); |
184 | radeon_ring_write(rdev, PACKET0(R500_VAP_INDEX_OFFSET, 0)); | 162 | radeon_ring_write(rdev, PACKET0(VAP_INDEX_OFFSET, 0)); |
185 | radeon_ring_write(rdev, 0); | 163 | radeon_ring_write(rdev, 0); |
186 | radeon_ring_write(rdev, PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0)); | 164 | radeon_ring_write(rdev, PACKET0(RB3D_DSTCACHE_CTLSTAT, 0)); |
187 | radeon_ring_write(rdev, R300_RB3D_DC_FLUSH | R300_RB3D_DC_FREE); | 165 | radeon_ring_write(rdev, RB3D_DC_FLUSH | RB3D_DC_FREE); |
188 | radeon_ring_write(rdev, PACKET0(R300_RB3D_ZCACHE_CTLSTAT, 0)); | 166 | radeon_ring_write(rdev, PACKET0(ZB_ZCACHE_CTLSTAT, 0)); |
189 | radeon_ring_write(rdev, R300_ZC_FLUSH | R300_ZC_FREE); | 167 | radeon_ring_write(rdev, ZC_FLUSH | ZC_FREE); |
190 | radeon_ring_write(rdev, PACKET0(RADEON_WAIT_UNTIL, 0)); | 168 | radeon_ring_write(rdev, PACKET0(WAIT_UNTIL, 0)); |
191 | radeon_ring_write(rdev, | 169 | radeon_ring_write(rdev, WAIT_2D_IDLECLEAN | WAIT_3D_IDLECLEAN); |
192 | RADEON_WAIT_2D_IDLECLEAN | | 170 | radeon_ring_write(rdev, PACKET0(GB_AA_CONFIG, 0)); |
193 | RADEON_WAIT_3D_IDLECLEAN); | ||
194 | radeon_ring_write(rdev, PACKET0(R300_GB_AA_CONFIG, 0)); | ||
195 | radeon_ring_write(rdev, 0); | 171 | radeon_ring_write(rdev, 0); |
196 | radeon_ring_write(rdev, PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0)); | 172 | radeon_ring_write(rdev, PACKET0(RB3D_DSTCACHE_CTLSTAT, 0)); |
197 | radeon_ring_write(rdev, R300_RB3D_DC_FLUSH | R300_RB3D_DC_FREE); | 173 | radeon_ring_write(rdev, RB3D_DC_FLUSH | RB3D_DC_FREE); |
198 | radeon_ring_write(rdev, PACKET0(R300_RB3D_ZCACHE_CTLSTAT, 0)); | 174 | radeon_ring_write(rdev, PACKET0(ZB_ZCACHE_CTLSTAT, 0)); |
199 | radeon_ring_write(rdev, R300_ZC_FLUSH | R300_ZC_FREE); | 175 | radeon_ring_write(rdev, ZC_FLUSH | ZC_FREE); |
200 | radeon_ring_write(rdev, PACKET0(R300_GB_MSPOS0, 0)); | 176 | radeon_ring_write(rdev, PACKET0(GB_MSPOS0, 0)); |
201 | radeon_ring_write(rdev, | ||
202 | ((6 << R300_MS_X0_SHIFT) | | ||
203 | (6 << R300_MS_Y0_SHIFT) | | ||
204 | (6 << R300_MS_X1_SHIFT) | | ||
205 | (6 << R300_MS_Y1_SHIFT) | | ||
206 | (6 << R300_MS_X2_SHIFT) | | ||
207 | (6 << R300_MS_Y2_SHIFT) | | ||
208 | (6 << R300_MSBD0_Y_SHIFT) | | ||
209 | (6 << R300_MSBD0_X_SHIFT))); | ||
210 | radeon_ring_write(rdev, PACKET0(R300_GB_MSPOS1, 0)); | ||
211 | radeon_ring_write(rdev, | 177 | radeon_ring_write(rdev, |
212 | ((6 << R300_MS_X3_SHIFT) | | 178 | ((6 << MS_X0_SHIFT) | |
213 | (6 << R300_MS_Y3_SHIFT) | | 179 | (6 << MS_Y0_SHIFT) | |
214 | (6 << R300_MS_X4_SHIFT) | | 180 | (6 << MS_X1_SHIFT) | |
215 | (6 << R300_MS_Y4_SHIFT) | | 181 | (6 << MS_Y1_SHIFT) | |
216 | (6 << R300_MS_X5_SHIFT) | | 182 | (6 << MS_X2_SHIFT) | |
217 | (6 << R300_MS_Y5_SHIFT) | | 183 | (6 << MS_Y2_SHIFT) | |
218 | (6 << R300_MSBD1_SHIFT))); | 184 | (6 << MSBD0_Y_SHIFT) | |
219 | radeon_ring_write(rdev, PACKET0(R300_GA_ENHANCE, 0)); | 185 | (6 << MSBD0_X_SHIFT))); |
220 | radeon_ring_write(rdev, R300_GA_DEADLOCK_CNTL | R300_GA_FASTSYNC_CNTL); | 186 | radeon_ring_write(rdev, PACKET0(GB_MSPOS1, 0)); |
221 | radeon_ring_write(rdev, PACKET0(R300_GA_POLY_MODE, 0)); | ||
222 | radeon_ring_write(rdev, | 187 | radeon_ring_write(rdev, |
223 | R300_FRONT_PTYPE_TRIANGE | R300_BACK_PTYPE_TRIANGE); | 188 | ((6 << MS_X3_SHIFT) | |
224 | radeon_ring_write(rdev, PACKET0(R300_GA_ROUND_MODE, 0)); | 189 | (6 << MS_Y3_SHIFT) | |
225 | radeon_ring_write(rdev, | 190 | (6 << MS_X4_SHIFT) | |
226 | R300_GEOMETRY_ROUND_NEAREST | | 191 | (6 << MS_Y4_SHIFT) | |
227 | R300_COLOR_ROUND_NEAREST); | 192 | (6 << MS_X5_SHIFT) | |
193 | (6 << MS_Y5_SHIFT) | | ||
194 | (6 << MSBD1_SHIFT))); | ||
195 | radeon_ring_write(rdev, PACKET0(GA_ENHANCE, 0)); | ||
196 | radeon_ring_write(rdev, GA_DEADLOCK_CNTL | GA_FASTSYNC_CNTL); | ||
197 | radeon_ring_write(rdev, PACKET0(GA_POLY_MODE, 0)); | ||
198 | radeon_ring_write(rdev, FRONT_PTYPE_TRIANGE | BACK_PTYPE_TRIANGE); | ||
199 | radeon_ring_write(rdev, PACKET0(GA_ROUND_MODE, 0)); | ||
200 | radeon_ring_write(rdev, GEOMETRY_ROUND_NEAREST | COLOR_ROUND_NEAREST); | ||
228 | radeon_ring_write(rdev, PACKET0(0x20C8, 0)); | 201 | radeon_ring_write(rdev, PACKET0(0x20C8, 0)); |
229 | radeon_ring_write(rdev, 0); | 202 | radeon_ring_write(rdev, 0); |
230 | radeon_ring_unlock_commit(rdev); | 203 | radeon_ring_unlock_commit(rdev); |
@@ -242,8 +215,8 @@ int rv515_mc_wait_for_idle(struct radeon_device *rdev) | |||
242 | 215 | ||
243 | for (i = 0; i < rdev->usec_timeout; i++) { | 216 | for (i = 0; i < rdev->usec_timeout; i++) { |
244 | /* read MC_STATUS */ | 217 | /* read MC_STATUS */ |
245 | tmp = RREG32_MC(RV515_MC_STATUS); | 218 | tmp = RREG32_MC(MC_STATUS); |
246 | if (tmp & RV515_MC_STATUS_IDLE) { | 219 | if (tmp & MC_STATUS_IDLE) { |
247 | return 0; | 220 | return 0; |
248 | } | 221 | } |
249 | DRM_UDELAY(1); | 222 | DRM_UDELAY(1); |
@@ -291,33 +264,33 @@ int rv515_ga_reset(struct radeon_device *rdev) | |||
291 | reinit_cp = rdev->cp.ready; | 264 | reinit_cp = rdev->cp.ready; |
292 | rdev->cp.ready = false; | 265 | rdev->cp.ready = false; |
293 | for (i = 0; i < rdev->usec_timeout; i++) { | 266 | for (i = 0; i < rdev->usec_timeout; i++) { |
294 | WREG32(RADEON_CP_CSQ_MODE, 0); | 267 | WREG32(CP_CSQ_MODE, 0); |
295 | WREG32(RADEON_CP_CSQ_CNTL, 0); | 268 | WREG32(CP_CSQ_CNTL, 0); |
296 | WREG32(RADEON_RBBM_SOFT_RESET, 0x32005); | 269 | WREG32(RBBM_SOFT_RESET, 0x32005); |
297 | (void)RREG32(RADEON_RBBM_SOFT_RESET); | 270 | (void)RREG32(RBBM_SOFT_RESET); |
298 | udelay(200); | 271 | udelay(200); |
299 | WREG32(RADEON_RBBM_SOFT_RESET, 0); | 272 | WREG32(RBBM_SOFT_RESET, 0); |
300 | /* Wait to prevent race in RBBM_STATUS */ | 273 | /* Wait to prevent race in RBBM_STATUS */ |
301 | mdelay(1); | 274 | mdelay(1); |
302 | tmp = RREG32(RADEON_RBBM_STATUS); | 275 | tmp = RREG32(RBBM_STATUS); |
303 | if (tmp & ((1 << 20) | (1 << 26))) { | 276 | if (tmp & ((1 << 20) | (1 << 26))) { |
304 | DRM_ERROR("VAP & CP still busy (RBBM_STATUS=0x%08X)\n", tmp); | 277 | DRM_ERROR("VAP & CP still busy (RBBM_STATUS=0x%08X)\n", tmp); |
305 | /* GA still busy soft reset it */ | 278 | /* GA still busy soft reset it */ |
306 | WREG32(0x429C, 0x200); | 279 | WREG32(0x429C, 0x200); |
307 | WREG32(R300_VAP_PVS_STATE_FLUSH_REG, 0); | 280 | WREG32(VAP_PVS_STATE_FLUSH_REG, 0); |
308 | WREG32(0x43E0, 0); | 281 | WREG32(0x43E0, 0); |
309 | WREG32(0x43E4, 0); | 282 | WREG32(0x43E4, 0); |
310 | WREG32(0x24AC, 0); | 283 | WREG32(0x24AC, 0); |
311 | } | 284 | } |
312 | /* Wait to prevent race in RBBM_STATUS */ | 285 | /* Wait to prevent race in RBBM_STATUS */ |
313 | mdelay(1); | 286 | mdelay(1); |
314 | tmp = RREG32(RADEON_RBBM_STATUS); | 287 | tmp = RREG32(RBBM_STATUS); |
315 | if (!(tmp & ((1 << 20) | (1 << 26)))) { | 288 | if (!(tmp & ((1 << 20) | (1 << 26)))) { |
316 | break; | 289 | break; |
317 | } | 290 | } |
318 | } | 291 | } |
319 | for (i = 0; i < rdev->usec_timeout; i++) { | 292 | for (i = 0; i < rdev->usec_timeout; i++) { |
320 | tmp = RREG32(RADEON_RBBM_STATUS); | 293 | tmp = RREG32(RBBM_STATUS); |
321 | if (!(tmp & ((1 << 20) | (1 << 26)))) { | 294 | if (!(tmp & ((1 << 20) | (1 << 26)))) { |
322 | DRM_INFO("GA reset succeed (RBBM_STATUS=0x%08X)\n", | 295 | DRM_INFO("GA reset succeed (RBBM_STATUS=0x%08X)\n", |
323 | tmp); | 296 | tmp); |
@@ -331,7 +304,7 @@ int rv515_ga_reset(struct radeon_device *rdev) | |||
331 | } | 304 | } |
332 | DRM_UDELAY(1); | 305 | DRM_UDELAY(1); |
333 | } | 306 | } |
334 | tmp = RREG32(RADEON_RBBM_STATUS); | 307 | tmp = RREG32(RBBM_STATUS); |
335 | DRM_ERROR("Failed to reset GA ! (RBBM_STATUS=0x%08X)\n", tmp); | 308 | DRM_ERROR("Failed to reset GA ! (RBBM_STATUS=0x%08X)\n", tmp); |
336 | return -1; | 309 | return -1; |
337 | } | 310 | } |
@@ -341,7 +314,7 @@ int rv515_gpu_reset(struct radeon_device *rdev) | |||
341 | uint32_t status; | 314 | uint32_t status; |
342 | 315 | ||
343 | /* reset order likely matter */ | 316 | /* reset order likely matter */ |
344 | status = RREG32(RADEON_RBBM_STATUS); | 317 | status = RREG32(RBBM_STATUS); |
345 | /* reset HDP */ | 318 | /* reset HDP */ |
346 | r100_hdp_reset(rdev); | 319 | r100_hdp_reset(rdev); |
347 | /* reset rb2d */ | 320 | /* reset rb2d */ |
@@ -353,12 +326,12 @@ int rv515_gpu_reset(struct radeon_device *rdev) | |||
353 | rv515_ga_reset(rdev); | 326 | rv515_ga_reset(rdev); |
354 | } | 327 | } |
355 | /* reset CP */ | 328 | /* reset CP */ |
356 | status = RREG32(RADEON_RBBM_STATUS); | 329 | status = RREG32(RBBM_STATUS); |
357 | if (status & (1 << 16)) { | 330 | if (status & (1 << 16)) { |
358 | r100_cp_reset(rdev); | 331 | r100_cp_reset(rdev); |
359 | } | 332 | } |
360 | /* Check if GPU is idle */ | 333 | /* Check if GPU is idle */ |
361 | status = RREG32(RADEON_RBBM_STATUS); | 334 | status = RREG32(RBBM_STATUS); |
362 | if (status & (1 << 31)) { | 335 | if (status & (1 << 31)) { |
363 | DRM_ERROR("Failed to reset GPU (RBBM_STATUS=0x%08X)\n", status); | 336 | DRM_ERROR("Failed to reset GPU (RBBM_STATUS=0x%08X)\n", status); |
364 | return -1; | 337 | return -1; |
@@ -377,8 +350,7 @@ static void rv515_vram_get_type(struct radeon_device *rdev) | |||
377 | 350 | ||
378 | rdev->mc.vram_width = 128; | 351 | rdev->mc.vram_width = 128; |
379 | rdev->mc.vram_is_ddr = true; | 352 | rdev->mc.vram_is_ddr = true; |
380 | tmp = RREG32_MC(RV515_MC_CNTL); | 353 | tmp = RREG32_MC(RV515_MC_CNTL) & MEM_NUM_CHANNELS_MASK; |
381 | tmp &= RV515_MEM_NUM_CHANNELS_MASK; | ||
382 | switch (tmp) { | 354 | switch (tmp) { |
383 | case 0: | 355 | case 0: |
384 | rdev->mc.vram_width = 64; | 356 | rdev->mc.vram_width = 64; |
@@ -394,11 +366,17 @@ static void rv515_vram_get_type(struct radeon_device *rdev) | |||
394 | 366 | ||
395 | void rv515_vram_info(struct radeon_device *rdev) | 367 | void rv515_vram_info(struct radeon_device *rdev) |
396 | { | 368 | { |
369 | fixed20_12 a; | ||
370 | |||
397 | rv515_vram_get_type(rdev); | 371 | rv515_vram_get_type(rdev); |
398 | rdev->mc.vram_size = RREG32(RADEON_CONFIG_MEMSIZE); | ||
399 | 372 | ||
400 | rdev->mc.aper_base = drm_get_resource_start(rdev->ddev, 0); | 373 | r100_vram_init_sizes(rdev); |
401 | rdev->mc.aper_size = drm_get_resource_len(rdev->ddev, 0); | 374 | /* FIXME: we should enforce default clock in case GPU is not in |
375 | * default setup | ||
376 | */ | ||
377 | a.full = rfixed_const(100); | ||
378 | rdev->pm.sclk.full = rfixed_const(rdev->clock.default_sclk); | ||
379 | rdev->pm.sclk.full = rfixed_div(rdev->pm.sclk, a); | ||
402 | } | 380 | } |
403 | 381 | ||
404 | 382 | ||
@@ -409,35 +387,35 @@ uint32_t rv515_mc_rreg(struct radeon_device *rdev, uint32_t reg) | |||
409 | { | 387 | { |
410 | uint32_t r; | 388 | uint32_t r; |
411 | 389 | ||
412 | WREG32(R520_MC_IND_INDEX, 0x7f0000 | (reg & 0xffff)); | 390 | WREG32(MC_IND_INDEX, 0x7f0000 | (reg & 0xffff)); |
413 | r = RREG32(R520_MC_IND_DATA); | 391 | r = RREG32(MC_IND_DATA); |
414 | WREG32(R520_MC_IND_INDEX, 0); | 392 | WREG32(MC_IND_INDEX, 0); |
415 | return r; | 393 | return r; |
416 | } | 394 | } |
417 | 395 | ||
418 | void rv515_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v) | 396 | void rv515_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v) |
419 | { | 397 | { |
420 | WREG32(R520_MC_IND_INDEX, 0xff0000 | ((reg) & 0xffff)); | 398 | WREG32(MC_IND_INDEX, 0xff0000 | ((reg) & 0xffff)); |
421 | WREG32(R520_MC_IND_DATA, (v)); | 399 | WREG32(MC_IND_DATA, (v)); |
422 | WREG32(R520_MC_IND_INDEX, 0); | 400 | WREG32(MC_IND_INDEX, 0); |
423 | } | 401 | } |
424 | 402 | ||
425 | uint32_t rv515_pcie_rreg(struct radeon_device *rdev, uint32_t reg) | 403 | uint32_t rv515_pcie_rreg(struct radeon_device *rdev, uint32_t reg) |
426 | { | 404 | { |
427 | uint32_t r; | 405 | uint32_t r; |
428 | 406 | ||
429 | WREG32(RADEON_PCIE_INDEX, ((reg) & 0x7ff)); | 407 | WREG32(PCIE_INDEX, ((reg) & 0x7ff)); |
430 | (void)RREG32(RADEON_PCIE_INDEX); | 408 | (void)RREG32(PCIE_INDEX); |
431 | r = RREG32(RADEON_PCIE_DATA); | 409 | r = RREG32(PCIE_DATA); |
432 | return r; | 410 | return r; |
433 | } | 411 | } |
434 | 412 | ||
435 | void rv515_pcie_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v) | 413 | void rv515_pcie_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v) |
436 | { | 414 | { |
437 | WREG32(RADEON_PCIE_INDEX, ((reg) & 0x7ff)); | 415 | WREG32(PCIE_INDEX, ((reg) & 0x7ff)); |
438 | (void)RREG32(RADEON_PCIE_INDEX); | 416 | (void)RREG32(PCIE_INDEX); |
439 | WREG32(RADEON_PCIE_DATA, (v)); | 417 | WREG32(PCIE_DATA, (v)); |
440 | (void)RREG32(RADEON_PCIE_DATA); | 418 | (void)RREG32(PCIE_DATA); |
441 | } | 419 | } |
442 | 420 | ||
443 | 421 | ||
@@ -452,13 +430,13 @@ static int rv515_debugfs_pipes_info(struct seq_file *m, void *data) | |||
452 | struct radeon_device *rdev = dev->dev_private; | 430 | struct radeon_device *rdev = dev->dev_private; |
453 | uint32_t tmp; | 431 | uint32_t tmp; |
454 | 432 | ||
455 | tmp = RREG32(R400_GB_PIPE_SELECT); | 433 | tmp = RREG32(GB_PIPE_SELECT); |
456 | seq_printf(m, "GB_PIPE_SELECT 0x%08x\n", tmp); | 434 | seq_printf(m, "GB_PIPE_SELECT 0x%08x\n", tmp); |
457 | tmp = RREG32(R500_SU_REG_DEST); | 435 | tmp = RREG32(SU_REG_DEST); |
458 | seq_printf(m, "SU_REG_DEST 0x%08x\n", tmp); | 436 | seq_printf(m, "SU_REG_DEST 0x%08x\n", tmp); |
459 | tmp = RREG32(R300_GB_TILE_CONFIG); | 437 | tmp = RREG32(GB_TILE_CONFIG); |
460 | seq_printf(m, "GB_TILE_CONFIG 0x%08x\n", tmp); | 438 | seq_printf(m, "GB_TILE_CONFIG 0x%08x\n", tmp); |
461 | tmp = RREG32(R300_DST_PIPE_CONFIG); | 439 | tmp = RREG32(DST_PIPE_CONFIG); |
462 | seq_printf(m, "DST_PIPE_CONFIG 0x%08x\n", tmp); | 440 | seq_printf(m, "DST_PIPE_CONFIG 0x%08x\n", tmp); |
463 | return 0; | 441 | return 0; |
464 | } | 442 | } |
@@ -509,9 +487,9 @@ int rv515_debugfs_ga_info_init(struct radeon_device *rdev) | |||
509 | /* | 487 | /* |
510 | * Asic initialization | 488 | * Asic initialization |
511 | */ | 489 | */ |
512 | static const unsigned r500_reg_safe_bm[159] = { | 490 | static const unsigned r500_reg_safe_bm[219] = { |
491 | 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, | ||
513 | 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, | 492 | 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, |
514 | 0xFFFFFFBF, 0xFFFFFFFF, 0xFFFFFFBF, 0xFFFFFFFF, | ||
515 | 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, | 493 | 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, |
516 | 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, | 494 | 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, |
517 | 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, | 495 | 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, |
@@ -549,14 +527,575 @@ static const unsigned r500_reg_safe_bm[159] = { | |||
549 | 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, | 527 | 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, |
550 | 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFF80FFFF, | 528 | 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFF80FFFF, |
551 | 0x00000000, 0x00000000, 0x00000000, 0x00000000, | 529 | 0x00000000, 0x00000000, 0x00000000, 0x00000000, |
552 | 0x0003FC01, 0x3FFFFCF8, 0xFE800B19, | 530 | 0x0003FC01, 0x3FFFFCF8, 0xFE800B19, 0xFFFFFFFF, |
531 | 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, | ||
532 | 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, | ||
533 | 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, | ||
534 | 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, | ||
535 | 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, | ||
536 | 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, | ||
537 | 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, | ||
538 | 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, | ||
539 | 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, | ||
540 | 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, | ||
541 | 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, | ||
542 | 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, | ||
543 | 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, | ||
544 | 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, | ||
545 | 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, | ||
553 | }; | 546 | }; |
554 | 547 | ||
555 | |||
556 | |||
557 | int rv515_init(struct radeon_device *rdev) | 548 | int rv515_init(struct radeon_device *rdev) |
558 | { | 549 | { |
559 | rdev->config.r300.reg_safe_bm = r500_reg_safe_bm; | 550 | rdev->config.r300.reg_safe_bm = r500_reg_safe_bm; |
560 | rdev->config.r300.reg_safe_bm_size = ARRAY_SIZE(r500_reg_safe_bm); | 551 | rdev->config.r300.reg_safe_bm_size = ARRAY_SIZE(r500_reg_safe_bm); |
561 | return 0; | 552 | return 0; |
562 | } | 553 | } |
554 | |||
555 | void atom_rv515_force_tv_scaler(struct radeon_device *rdev) | ||
556 | { | ||
557 | |||
558 | WREG32(0x659C, 0x0); | ||
559 | WREG32(0x6594, 0x705); | ||
560 | WREG32(0x65A4, 0x10001); | ||
561 | WREG32(0x65D8, 0x0); | ||
562 | WREG32(0x65B0, 0x0); | ||
563 | WREG32(0x65C0, 0x0); | ||
564 | WREG32(0x65D4, 0x0); | ||
565 | WREG32(0x6578, 0x0); | ||
566 | WREG32(0x657C, 0x841880A8); | ||
567 | WREG32(0x6578, 0x1); | ||
568 | WREG32(0x657C, 0x84208680); | ||
569 | WREG32(0x6578, 0x2); | ||
570 | WREG32(0x657C, 0xBFF880B0); | ||
571 | WREG32(0x6578, 0x100); | ||
572 | WREG32(0x657C, 0x83D88088); | ||
573 | WREG32(0x6578, 0x101); | ||
574 | WREG32(0x657C, 0x84608680); | ||
575 | WREG32(0x6578, 0x102); | ||
576 | WREG32(0x657C, 0xBFF080D0); | ||
577 | WREG32(0x6578, 0x200); | ||
578 | WREG32(0x657C, 0x83988068); | ||
579 | WREG32(0x6578, 0x201); | ||
580 | WREG32(0x657C, 0x84A08680); | ||
581 | WREG32(0x6578, 0x202); | ||
582 | WREG32(0x657C, 0xBFF080F8); | ||
583 | WREG32(0x6578, 0x300); | ||
584 | WREG32(0x657C, 0x83588058); | ||
585 | WREG32(0x6578, 0x301); | ||
586 | WREG32(0x657C, 0x84E08660); | ||
587 | WREG32(0x6578, 0x302); | ||
588 | WREG32(0x657C, 0xBFF88120); | ||
589 | WREG32(0x6578, 0x400); | ||
590 | WREG32(0x657C, 0x83188040); | ||
591 | WREG32(0x6578, 0x401); | ||
592 | WREG32(0x657C, 0x85008660); | ||
593 | WREG32(0x6578, 0x402); | ||
594 | WREG32(0x657C, 0xBFF88150); | ||
595 | WREG32(0x6578, 0x500); | ||
596 | WREG32(0x657C, 0x82D88030); | ||
597 | WREG32(0x6578, 0x501); | ||
598 | WREG32(0x657C, 0x85408640); | ||
599 | WREG32(0x6578, 0x502); | ||
600 | WREG32(0x657C, 0xBFF88180); | ||
601 | WREG32(0x6578, 0x600); | ||
602 | WREG32(0x657C, 0x82A08018); | ||
603 | WREG32(0x6578, 0x601); | ||
604 | WREG32(0x657C, 0x85808620); | ||
605 | WREG32(0x6578, 0x602); | ||
606 | WREG32(0x657C, 0xBFF081B8); | ||
607 | WREG32(0x6578, 0x700); | ||
608 | WREG32(0x657C, 0x82608010); | ||
609 | WREG32(0x6578, 0x701); | ||
610 | WREG32(0x657C, 0x85A08600); | ||
611 | WREG32(0x6578, 0x702); | ||
612 | WREG32(0x657C, 0x800081F0); | ||
613 | WREG32(0x6578, 0x800); | ||
614 | WREG32(0x657C, 0x8228BFF8); | ||
615 | WREG32(0x6578, 0x801); | ||
616 | WREG32(0x657C, 0x85E085E0); | ||
617 | WREG32(0x6578, 0x802); | ||
618 | WREG32(0x657C, 0xBFF88228); | ||
619 | WREG32(0x6578, 0x10000); | ||
620 | WREG32(0x657C, 0x82A8BF00); | ||
621 | WREG32(0x6578, 0x10001); | ||
622 | WREG32(0x657C, 0x82A08CC0); | ||
623 | WREG32(0x6578, 0x10002); | ||
624 | WREG32(0x657C, 0x8008BEF8); | ||
625 | WREG32(0x6578, 0x10100); | ||
626 | WREG32(0x657C, 0x81F0BF28); | ||
627 | WREG32(0x6578, 0x10101); | ||
628 | WREG32(0x657C, 0x83608CA0); | ||
629 | WREG32(0x6578, 0x10102); | ||
630 | WREG32(0x657C, 0x8018BED0); | ||
631 | WREG32(0x6578, 0x10200); | ||
632 | WREG32(0x657C, 0x8148BF38); | ||
633 | WREG32(0x6578, 0x10201); | ||
634 | WREG32(0x657C, 0x84408C80); | ||
635 | WREG32(0x6578, 0x10202); | ||
636 | WREG32(0x657C, 0x8008BEB8); | ||
637 | WREG32(0x6578, 0x10300); | ||
638 | WREG32(0x657C, 0x80B0BF78); | ||
639 | WREG32(0x6578, 0x10301); | ||
640 | WREG32(0x657C, 0x85008C20); | ||
641 | WREG32(0x6578, 0x10302); | ||
642 | WREG32(0x657C, 0x8020BEA0); | ||
643 | WREG32(0x6578, 0x10400); | ||
644 | WREG32(0x657C, 0x8028BF90); | ||
645 | WREG32(0x6578, 0x10401); | ||
646 | WREG32(0x657C, 0x85E08BC0); | ||
647 | WREG32(0x6578, 0x10402); | ||
648 | WREG32(0x657C, 0x8018BE90); | ||
649 | WREG32(0x6578, 0x10500); | ||
650 | WREG32(0x657C, 0xBFB8BFB0); | ||
651 | WREG32(0x6578, 0x10501); | ||
652 | WREG32(0x657C, 0x86C08B40); | ||
653 | WREG32(0x6578, 0x10502); | ||
654 | WREG32(0x657C, 0x8010BE90); | ||
655 | WREG32(0x6578, 0x10600); | ||
656 | WREG32(0x657C, 0xBF58BFC8); | ||
657 | WREG32(0x6578, 0x10601); | ||
658 | WREG32(0x657C, 0x87A08AA0); | ||
659 | WREG32(0x6578, 0x10602); | ||
660 | WREG32(0x657C, 0x8010BE98); | ||
661 | WREG32(0x6578, 0x10700); | ||
662 | WREG32(0x657C, 0xBF10BFF0); | ||
663 | WREG32(0x6578, 0x10701); | ||
664 | WREG32(0x657C, 0x886089E0); | ||
665 | WREG32(0x6578, 0x10702); | ||
666 | WREG32(0x657C, 0x8018BEB0); | ||
667 | WREG32(0x6578, 0x10800); | ||
668 | WREG32(0x657C, 0xBED8BFE8); | ||
669 | WREG32(0x6578, 0x10801); | ||
670 | WREG32(0x657C, 0x89408940); | ||
671 | WREG32(0x6578, 0x10802); | ||
672 | WREG32(0x657C, 0xBFE8BED8); | ||
673 | WREG32(0x6578, 0x20000); | ||
674 | WREG32(0x657C, 0x80008000); | ||
675 | WREG32(0x6578, 0x20001); | ||
676 | WREG32(0x657C, 0x90008000); | ||
677 | WREG32(0x6578, 0x20002); | ||
678 | WREG32(0x657C, 0x80008000); | ||
679 | WREG32(0x6578, 0x20003); | ||
680 | WREG32(0x657C, 0x80008000); | ||
681 | WREG32(0x6578, 0x20100); | ||
682 | WREG32(0x657C, 0x80108000); | ||
683 | WREG32(0x6578, 0x20101); | ||
684 | WREG32(0x657C, 0x8FE0BF70); | ||
685 | WREG32(0x6578, 0x20102); | ||
686 | WREG32(0x657C, 0xBFE880C0); | ||
687 | WREG32(0x6578, 0x20103); | ||
688 | WREG32(0x657C, 0x80008000); | ||
689 | WREG32(0x6578, 0x20200); | ||
690 | WREG32(0x657C, 0x8018BFF8); | ||
691 | WREG32(0x6578, 0x20201); | ||
692 | WREG32(0x657C, 0x8F80BF08); | ||
693 | WREG32(0x6578, 0x20202); | ||
694 | WREG32(0x657C, 0xBFD081A0); | ||
695 | WREG32(0x6578, 0x20203); | ||
696 | WREG32(0x657C, 0xBFF88000); | ||
697 | WREG32(0x6578, 0x20300); | ||
698 | WREG32(0x657C, 0x80188000); | ||
699 | WREG32(0x6578, 0x20301); | ||
700 | WREG32(0x657C, 0x8EE0BEC0); | ||
701 | WREG32(0x6578, 0x20302); | ||
702 | WREG32(0x657C, 0xBFB082A0); | ||
703 | WREG32(0x6578, 0x20303); | ||
704 | WREG32(0x657C, 0x80008000); | ||
705 | WREG32(0x6578, 0x20400); | ||
706 | WREG32(0x657C, 0x80188000); | ||
707 | WREG32(0x6578, 0x20401); | ||
708 | WREG32(0x657C, 0x8E00BEA0); | ||
709 | WREG32(0x6578, 0x20402); | ||
710 | WREG32(0x657C, 0xBF8883C0); | ||
711 | WREG32(0x6578, 0x20403); | ||
712 | WREG32(0x657C, 0x80008000); | ||
713 | WREG32(0x6578, 0x20500); | ||
714 | WREG32(0x657C, 0x80188000); | ||
715 | WREG32(0x6578, 0x20501); | ||
716 | WREG32(0x657C, 0x8D00BE90); | ||
717 | WREG32(0x6578, 0x20502); | ||
718 | WREG32(0x657C, 0xBF588500); | ||
719 | WREG32(0x6578, 0x20503); | ||
720 | WREG32(0x657C, 0x80008008); | ||
721 | WREG32(0x6578, 0x20600); | ||
722 | WREG32(0x657C, 0x80188000); | ||
723 | WREG32(0x6578, 0x20601); | ||
724 | WREG32(0x657C, 0x8BC0BE98); | ||
725 | WREG32(0x6578, 0x20602); | ||
726 | WREG32(0x657C, 0xBF308660); | ||
727 | WREG32(0x6578, 0x20603); | ||
728 | WREG32(0x657C, 0x80008008); | ||
729 | WREG32(0x6578, 0x20700); | ||
730 | WREG32(0x657C, 0x80108000); | ||
731 | WREG32(0x6578, 0x20701); | ||
732 | WREG32(0x657C, 0x8A80BEB0); | ||
733 | WREG32(0x6578, 0x20702); | ||
734 | WREG32(0x657C, 0xBF0087C0); | ||
735 | WREG32(0x6578, 0x20703); | ||
736 | WREG32(0x657C, 0x80008008); | ||
737 | WREG32(0x6578, 0x20800); | ||
738 | WREG32(0x657C, 0x80108000); | ||
739 | WREG32(0x6578, 0x20801); | ||
740 | WREG32(0x657C, 0x8920BED0); | ||
741 | WREG32(0x6578, 0x20802); | ||
742 | WREG32(0x657C, 0xBED08920); | ||
743 | WREG32(0x6578, 0x20803); | ||
744 | WREG32(0x657C, 0x80008010); | ||
745 | WREG32(0x6578, 0x30000); | ||
746 | WREG32(0x657C, 0x90008000); | ||
747 | WREG32(0x6578, 0x30001); | ||
748 | WREG32(0x657C, 0x80008000); | ||
749 | WREG32(0x6578, 0x30100); | ||
750 | WREG32(0x657C, 0x8FE0BF90); | ||
751 | WREG32(0x6578, 0x30101); | ||
752 | WREG32(0x657C, 0xBFF880A0); | ||
753 | WREG32(0x6578, 0x30200); | ||
754 | WREG32(0x657C, 0x8F60BF40); | ||
755 | WREG32(0x6578, 0x30201); | ||
756 | WREG32(0x657C, 0xBFE88180); | ||
757 | WREG32(0x6578, 0x30300); | ||
758 | WREG32(0x657C, 0x8EC0BF00); | ||
759 | WREG32(0x6578, 0x30301); | ||
760 | WREG32(0x657C, 0xBFC88280); | ||
761 | WREG32(0x6578, 0x30400); | ||
762 | WREG32(0x657C, 0x8DE0BEE0); | ||
763 | WREG32(0x6578, 0x30401); | ||
764 | WREG32(0x657C, 0xBFA083A0); | ||
765 | WREG32(0x6578, 0x30500); | ||
766 | WREG32(0x657C, 0x8CE0BED0); | ||
767 | WREG32(0x6578, 0x30501); | ||
768 | WREG32(0x657C, 0xBF7884E0); | ||
769 | WREG32(0x6578, 0x30600); | ||
770 | WREG32(0x657C, 0x8BA0BED8); | ||
771 | WREG32(0x6578, 0x30601); | ||
772 | WREG32(0x657C, 0xBF508640); | ||
773 | WREG32(0x6578, 0x30700); | ||
774 | WREG32(0x657C, 0x8A60BEE8); | ||
775 | WREG32(0x6578, 0x30701); | ||
776 | WREG32(0x657C, 0xBF2087A0); | ||
777 | WREG32(0x6578, 0x30800); | ||
778 | WREG32(0x657C, 0x8900BF00); | ||
779 | WREG32(0x6578, 0x30801); | ||
780 | WREG32(0x657C, 0xBF008900); | ||
781 | } | ||
782 | |||
783 | struct rv515_watermark { | ||
784 | u32 lb_request_fifo_depth; | ||
785 | fixed20_12 num_line_pair; | ||
786 | fixed20_12 estimated_width; | ||
787 | fixed20_12 worst_case_latency; | ||
788 | fixed20_12 consumption_rate; | ||
789 | fixed20_12 active_time; | ||
790 | fixed20_12 dbpp; | ||
791 | fixed20_12 priority_mark_max; | ||
792 | fixed20_12 priority_mark; | ||
793 | fixed20_12 sclk; | ||
794 | }; | ||
795 | |||
796 | void rv515_crtc_bandwidth_compute(struct radeon_device *rdev, | ||
797 | struct radeon_crtc *crtc, | ||
798 | struct rv515_watermark *wm) | ||
799 | { | ||
800 | struct drm_display_mode *mode = &crtc->base.mode; | ||
801 | fixed20_12 a, b, c; | ||
802 | fixed20_12 pclk, request_fifo_depth, tolerable_latency, estimated_width; | ||
803 | fixed20_12 consumption_time, line_time, chunk_time, read_delay_latency; | ||
804 | |||
805 | if (!crtc->base.enabled) { | ||
806 | /* FIXME: wouldn't it better to set priority mark to maximum */ | ||
807 | wm->lb_request_fifo_depth = 4; | ||
808 | return; | ||
809 | } | ||
810 | |||
811 | if (crtc->vsc.full > rfixed_const(2)) | ||
812 | wm->num_line_pair.full = rfixed_const(2); | ||
813 | else | ||
814 | wm->num_line_pair.full = rfixed_const(1); | ||
815 | |||
816 | b.full = rfixed_const(mode->crtc_hdisplay); | ||
817 | c.full = rfixed_const(256); | ||
818 | a.full = rfixed_mul(wm->num_line_pair, b); | ||
819 | request_fifo_depth.full = rfixed_div(a, c); | ||
820 | if (a.full < rfixed_const(4)) { | ||
821 | wm->lb_request_fifo_depth = 4; | ||
822 | } else { | ||
823 | wm->lb_request_fifo_depth = rfixed_trunc(request_fifo_depth); | ||
824 | } | ||
825 | |||
826 | /* Determine consumption rate | ||
827 | * pclk = pixel clock period(ns) = 1000 / (mode.clock / 1000) | ||
828 | * vtaps = number of vertical taps, | ||
829 | * vsc = vertical scaling ratio, defined as source/destination | ||
830 | * hsc = horizontal scaling ration, defined as source/destination | ||
831 | */ | ||
832 | a.full = rfixed_const(mode->clock); | ||
833 | b.full = rfixed_const(1000); | ||
834 | a.full = rfixed_div(a, b); | ||
835 | pclk.full = rfixed_div(b, a); | ||
836 | if (crtc->rmx_type != RMX_OFF) { | ||
837 | b.full = rfixed_const(2); | ||
838 | if (crtc->vsc.full > b.full) | ||
839 | b.full = crtc->vsc.full; | ||
840 | b.full = rfixed_mul(b, crtc->hsc); | ||
841 | c.full = rfixed_const(2); | ||
842 | b.full = rfixed_div(b, c); | ||
843 | consumption_time.full = rfixed_div(pclk, b); | ||
844 | } else { | ||
845 | consumption_time.full = pclk.full; | ||
846 | } | ||
847 | a.full = rfixed_const(1); | ||
848 | wm->consumption_rate.full = rfixed_div(a, consumption_time); | ||
849 | |||
850 | |||
851 | /* Determine line time | ||
852 | * LineTime = total time for one line of displayhtotal | ||
853 | * LineTime = total number of horizontal pixels | ||
854 | * pclk = pixel clock period(ns) | ||
855 | */ | ||
856 | a.full = rfixed_const(crtc->base.mode.crtc_htotal); | ||
857 | line_time.full = rfixed_mul(a, pclk); | ||
858 | |||
859 | /* Determine active time | ||
860 | * ActiveTime = time of active region of display within one line, | ||
861 | * hactive = total number of horizontal active pixels | ||
862 | * htotal = total number of horizontal pixels | ||
863 | */ | ||
864 | a.full = rfixed_const(crtc->base.mode.crtc_htotal); | ||
865 | b.full = rfixed_const(crtc->base.mode.crtc_hdisplay); | ||
866 | wm->active_time.full = rfixed_mul(line_time, b); | ||
867 | wm->active_time.full = rfixed_div(wm->active_time, a); | ||
868 | |||
869 | /* Determine chunk time | ||
870 | * ChunkTime = the time it takes the DCP to send one chunk of data | ||
871 | * to the LB which consists of pipeline delay and inter chunk gap | ||
872 | * sclk = system clock(Mhz) | ||
873 | */ | ||
874 | a.full = rfixed_const(600 * 1000); | ||
875 | chunk_time.full = rfixed_div(a, rdev->pm.sclk); | ||
876 | read_delay_latency.full = rfixed_const(1000); | ||
877 | |||
878 | /* Determine the worst case latency | ||
879 | * NumLinePair = Number of line pairs to request(1=2 lines, 2=4 lines) | ||
880 | * WorstCaseLatency = worst case time from urgent to when the MC starts | ||
881 | * to return data | ||
882 | * READ_DELAY_IDLE_MAX = constant of 1us | ||
883 | * ChunkTime = time it takes the DCP to send one chunk of data to the LB | ||
884 | * which consists of pipeline delay and inter chunk gap | ||
885 | */ | ||
886 | if (rfixed_trunc(wm->num_line_pair) > 1) { | ||
887 | a.full = rfixed_const(3); | ||
888 | wm->worst_case_latency.full = rfixed_mul(a, chunk_time); | ||
889 | wm->worst_case_latency.full += read_delay_latency.full; | ||
890 | } else { | ||
891 | wm->worst_case_latency.full = chunk_time.full + read_delay_latency.full; | ||
892 | } | ||
893 | |||
894 | /* Determine the tolerable latency | ||
895 | * TolerableLatency = Any given request has only 1 line time | ||
896 | * for the data to be returned | ||
897 | * LBRequestFifoDepth = Number of chunk requests the LB can | ||
898 | * put into the request FIFO for a display | ||
899 | * LineTime = total time for one line of display | ||
900 | * ChunkTime = the time it takes the DCP to send one chunk | ||
901 | * of data to the LB which consists of | ||
902 | * pipeline delay and inter chunk gap | ||
903 | */ | ||
904 | if ((2+wm->lb_request_fifo_depth) >= rfixed_trunc(request_fifo_depth)) { | ||
905 | tolerable_latency.full = line_time.full; | ||
906 | } else { | ||
907 | tolerable_latency.full = rfixed_const(wm->lb_request_fifo_depth - 2); | ||
908 | tolerable_latency.full = request_fifo_depth.full - tolerable_latency.full; | ||
909 | tolerable_latency.full = rfixed_mul(tolerable_latency, chunk_time); | ||
910 | tolerable_latency.full = line_time.full - tolerable_latency.full; | ||
911 | } | ||
912 | /* We assume worst case 32bits (4 bytes) */ | ||
913 | wm->dbpp.full = rfixed_const(2 * 16); | ||
914 | |||
915 | /* Determine the maximum priority mark | ||
916 | * width = viewport width in pixels | ||
917 | */ | ||
918 | a.full = rfixed_const(16); | ||
919 | wm->priority_mark_max.full = rfixed_const(crtc->base.mode.crtc_hdisplay); | ||
920 | wm->priority_mark_max.full = rfixed_div(wm->priority_mark_max, a); | ||
921 | |||
922 | /* Determine estimated width */ | ||
923 | estimated_width.full = tolerable_latency.full - wm->worst_case_latency.full; | ||
924 | estimated_width.full = rfixed_div(estimated_width, consumption_time); | ||
925 | if (rfixed_trunc(estimated_width) > crtc->base.mode.crtc_hdisplay) { | ||
926 | wm->priority_mark.full = rfixed_const(10); | ||
927 | } else { | ||
928 | a.full = rfixed_const(16); | ||
929 | wm->priority_mark.full = rfixed_div(estimated_width, a); | ||
930 | wm->priority_mark.full = wm->priority_mark_max.full - wm->priority_mark.full; | ||
931 | } | ||
932 | } | ||
933 | |||
934 | void rv515_bandwidth_avivo_update(struct radeon_device *rdev) | ||
935 | { | ||
936 | struct drm_display_mode *mode0 = NULL; | ||
937 | struct drm_display_mode *mode1 = NULL; | ||
938 | struct rv515_watermark wm0; | ||
939 | struct rv515_watermark wm1; | ||
940 | u32 tmp; | ||
941 | fixed20_12 priority_mark02, priority_mark12, fill_rate; | ||
942 | fixed20_12 a, b; | ||
943 | |||
944 | if (rdev->mode_info.crtcs[0]->base.enabled) | ||
945 | mode0 = &rdev->mode_info.crtcs[0]->base.mode; | ||
946 | if (rdev->mode_info.crtcs[1]->base.enabled) | ||
947 | mode1 = &rdev->mode_info.crtcs[1]->base.mode; | ||
948 | rs690_line_buffer_adjust(rdev, mode0, mode1); | ||
949 | |||
950 | rv515_crtc_bandwidth_compute(rdev, rdev->mode_info.crtcs[0], &wm0); | ||
951 | rv515_crtc_bandwidth_compute(rdev, rdev->mode_info.crtcs[1], &wm1); | ||
952 | |||
953 | tmp = wm0.lb_request_fifo_depth; | ||
954 | tmp |= wm1.lb_request_fifo_depth << 16; | ||
955 | WREG32(LB_MAX_REQ_OUTSTANDING, tmp); | ||
956 | |||
957 | if (mode0 && mode1) { | ||
958 | if (rfixed_trunc(wm0.dbpp) > 64) | ||
959 | a.full = rfixed_div(wm0.dbpp, wm0.num_line_pair); | ||
960 | else | ||
961 | a.full = wm0.num_line_pair.full; | ||
962 | if (rfixed_trunc(wm1.dbpp) > 64) | ||
963 | b.full = rfixed_div(wm1.dbpp, wm1.num_line_pair); | ||
964 | else | ||
965 | b.full = wm1.num_line_pair.full; | ||
966 | a.full += b.full; | ||
967 | fill_rate.full = rfixed_div(wm0.sclk, a); | ||
968 | if (wm0.consumption_rate.full > fill_rate.full) { | ||
969 | b.full = wm0.consumption_rate.full - fill_rate.full; | ||
970 | b.full = rfixed_mul(b, wm0.active_time); | ||
971 | a.full = rfixed_const(16); | ||
972 | b.full = rfixed_div(b, a); | ||
973 | a.full = rfixed_mul(wm0.worst_case_latency, | ||
974 | wm0.consumption_rate); | ||
975 | priority_mark02.full = a.full + b.full; | ||
976 | } else { | ||
977 | a.full = rfixed_mul(wm0.worst_case_latency, | ||
978 | wm0.consumption_rate); | ||
979 | b.full = rfixed_const(16 * 1000); | ||
980 | priority_mark02.full = rfixed_div(a, b); | ||
981 | } | ||
982 | if (wm1.consumption_rate.full > fill_rate.full) { | ||
983 | b.full = wm1.consumption_rate.full - fill_rate.full; | ||
984 | b.full = rfixed_mul(b, wm1.active_time); | ||
985 | a.full = rfixed_const(16); | ||
986 | b.full = rfixed_div(b, a); | ||
987 | a.full = rfixed_mul(wm1.worst_case_latency, | ||
988 | wm1.consumption_rate); | ||
989 | priority_mark12.full = a.full + b.full; | ||
990 | } else { | ||
991 | a.full = rfixed_mul(wm1.worst_case_latency, | ||
992 | wm1.consumption_rate); | ||
993 | b.full = rfixed_const(16 * 1000); | ||
994 | priority_mark12.full = rfixed_div(a, b); | ||
995 | } | ||
996 | if (wm0.priority_mark.full > priority_mark02.full) | ||
997 | priority_mark02.full = wm0.priority_mark.full; | ||
998 | if (rfixed_trunc(priority_mark02) < 0) | ||
999 | priority_mark02.full = 0; | ||
1000 | if (wm0.priority_mark_max.full > priority_mark02.full) | ||
1001 | priority_mark02.full = wm0.priority_mark_max.full; | ||
1002 | if (wm1.priority_mark.full > priority_mark12.full) | ||
1003 | priority_mark12.full = wm1.priority_mark.full; | ||
1004 | if (rfixed_trunc(priority_mark12) < 0) | ||
1005 | priority_mark12.full = 0; | ||
1006 | if (wm1.priority_mark_max.full > priority_mark12.full) | ||
1007 | priority_mark12.full = wm1.priority_mark_max.full; | ||
1008 | WREG32(D1MODE_PRIORITY_A_CNT, rfixed_trunc(priority_mark02)); | ||
1009 | WREG32(D1MODE_PRIORITY_B_CNT, rfixed_trunc(priority_mark02)); | ||
1010 | WREG32(D2MODE_PRIORITY_A_CNT, rfixed_trunc(priority_mark12)); | ||
1011 | WREG32(D2MODE_PRIORITY_B_CNT, rfixed_trunc(priority_mark12)); | ||
1012 | } else if (mode0) { | ||
1013 | if (rfixed_trunc(wm0.dbpp) > 64) | ||
1014 | a.full = rfixed_div(wm0.dbpp, wm0.num_line_pair); | ||
1015 | else | ||
1016 | a.full = wm0.num_line_pair.full; | ||
1017 | fill_rate.full = rfixed_div(wm0.sclk, a); | ||
1018 | if (wm0.consumption_rate.full > fill_rate.full) { | ||
1019 | b.full = wm0.consumption_rate.full - fill_rate.full; | ||
1020 | b.full = rfixed_mul(b, wm0.active_time); | ||
1021 | a.full = rfixed_const(16); | ||
1022 | b.full = rfixed_div(b, a); | ||
1023 | a.full = rfixed_mul(wm0.worst_case_latency, | ||
1024 | wm0.consumption_rate); | ||
1025 | priority_mark02.full = a.full + b.full; | ||
1026 | } else { | ||
1027 | a.full = rfixed_mul(wm0.worst_case_latency, | ||
1028 | wm0.consumption_rate); | ||
1029 | b.full = rfixed_const(16); | ||
1030 | priority_mark02.full = rfixed_div(a, b); | ||
1031 | } | ||
1032 | if (wm0.priority_mark.full > priority_mark02.full) | ||
1033 | priority_mark02.full = wm0.priority_mark.full; | ||
1034 | if (rfixed_trunc(priority_mark02) < 0) | ||
1035 | priority_mark02.full = 0; | ||
1036 | if (wm0.priority_mark_max.full > priority_mark02.full) | ||
1037 | priority_mark02.full = wm0.priority_mark_max.full; | ||
1038 | WREG32(D1MODE_PRIORITY_A_CNT, rfixed_trunc(priority_mark02)); | ||
1039 | WREG32(D1MODE_PRIORITY_B_CNT, rfixed_trunc(priority_mark02)); | ||
1040 | WREG32(D2MODE_PRIORITY_A_CNT, MODE_PRIORITY_OFF); | ||
1041 | WREG32(D2MODE_PRIORITY_B_CNT, MODE_PRIORITY_OFF); | ||
1042 | } else { | ||
1043 | if (rfixed_trunc(wm1.dbpp) > 64) | ||
1044 | a.full = rfixed_div(wm1.dbpp, wm1.num_line_pair); | ||
1045 | else | ||
1046 | a.full = wm1.num_line_pair.full; | ||
1047 | fill_rate.full = rfixed_div(wm1.sclk, a); | ||
1048 | if (wm1.consumption_rate.full > fill_rate.full) { | ||
1049 | b.full = wm1.consumption_rate.full - fill_rate.full; | ||
1050 | b.full = rfixed_mul(b, wm1.active_time); | ||
1051 | a.full = rfixed_const(16); | ||
1052 | b.full = rfixed_div(b, a); | ||
1053 | a.full = rfixed_mul(wm1.worst_case_latency, | ||
1054 | wm1.consumption_rate); | ||
1055 | priority_mark12.full = a.full + b.full; | ||
1056 | } else { | ||
1057 | a.full = rfixed_mul(wm1.worst_case_latency, | ||
1058 | wm1.consumption_rate); | ||
1059 | b.full = rfixed_const(16 * 1000); | ||
1060 | priority_mark12.full = rfixed_div(a, b); | ||
1061 | } | ||
1062 | if (wm1.priority_mark.full > priority_mark12.full) | ||
1063 | priority_mark12.full = wm1.priority_mark.full; | ||
1064 | if (rfixed_trunc(priority_mark12) < 0) | ||
1065 | priority_mark12.full = 0; | ||
1066 | if (wm1.priority_mark_max.full > priority_mark12.full) | ||
1067 | priority_mark12.full = wm1.priority_mark_max.full; | ||
1068 | WREG32(D1MODE_PRIORITY_A_CNT, MODE_PRIORITY_OFF); | ||
1069 | WREG32(D1MODE_PRIORITY_B_CNT, MODE_PRIORITY_OFF); | ||
1070 | WREG32(D2MODE_PRIORITY_A_CNT, rfixed_trunc(priority_mark12)); | ||
1071 | WREG32(D2MODE_PRIORITY_B_CNT, rfixed_trunc(priority_mark12)); | ||
1072 | } | ||
1073 | } | ||
1074 | |||
1075 | void rv515_bandwidth_update(struct radeon_device *rdev) | ||
1076 | { | ||
1077 | uint32_t tmp; | ||
1078 | struct drm_display_mode *mode0 = NULL; | ||
1079 | struct drm_display_mode *mode1 = NULL; | ||
1080 | |||
1081 | if (rdev->mode_info.crtcs[0]->base.enabled) | ||
1082 | mode0 = &rdev->mode_info.crtcs[0]->base.mode; | ||
1083 | if (rdev->mode_info.crtcs[1]->base.enabled) | ||
1084 | mode1 = &rdev->mode_info.crtcs[1]->base.mode; | ||
1085 | /* | ||
1086 | * Set display0/1 priority up in the memory controller for | ||
1087 | * modes if the user specifies HIGH for displaypriority | ||
1088 | * option. | ||
1089 | */ | ||
1090 | if (rdev->disp_priority == 2) { | ||
1091 | tmp = RREG32_MC(MC_MISC_LAT_TIMER); | ||
1092 | tmp &= ~MC_DISP1R_INIT_LAT_MASK; | ||
1093 | tmp &= ~MC_DISP0R_INIT_LAT_MASK; | ||
1094 | if (mode1) | ||
1095 | tmp |= (1 << MC_DISP1R_INIT_LAT_SHIFT); | ||
1096 | if (mode0) | ||
1097 | tmp |= (1 << MC_DISP0R_INIT_LAT_SHIFT); | ||
1098 | WREG32_MC(MC_MISC_LAT_TIMER, tmp); | ||
1099 | } | ||
1100 | rv515_bandwidth_avivo_update(rdev); | ||
1101 | } | ||
diff --git a/drivers/gpu/drm/radeon/rv515r.h b/drivers/gpu/drm/radeon/rv515r.h new file mode 100644 index 000000000000..f3cf84039906 --- /dev/null +++ b/drivers/gpu/drm/radeon/rv515r.h | |||
@@ -0,0 +1,170 @@ | |||
1 | /* | ||
2 | * Copyright 2008 Advanced Micro Devices, Inc. | ||
3 | * Copyright 2008 Red Hat Inc. | ||
4 | * Copyright 2009 Jerome Glisse. | ||
5 | * | ||
6 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
7 | * copy of this software and associated documentation files (the "Software"), | ||
8 | * to deal in the Software without restriction, including without limitation | ||
9 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
10 | * and/or sell copies of the Software, and to permit persons to whom the | ||
11 | * Software is furnished to do so, subject to the following conditions: | ||
12 | * | ||
13 | * The above copyright notice and this permission notice shall be included in | ||
14 | * all copies or substantial portions of the Software. | ||
15 | * | ||
16 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
17 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
18 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
19 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
20 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
21 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
22 | * OTHER DEALINGS IN THE SOFTWARE. | ||
23 | * | ||
24 | * Authors: Dave Airlie | ||
25 | * Alex Deucher | ||
26 | * Jerome Glisse | ||
27 | */ | ||
28 | #ifndef RV515R_H | ||
29 | #define RV515R_H | ||
30 | |||
31 | /* RV515 registers */ | ||
32 | #define PCIE_INDEX 0x0030 | ||
33 | #define PCIE_DATA 0x0034 | ||
34 | #define MC_IND_INDEX 0x0070 | ||
35 | #define MC_IND_WR_EN (1 << 24) | ||
36 | #define MC_IND_DATA 0x0074 | ||
37 | #define RBBM_SOFT_RESET 0x00F0 | ||
38 | #define CONFIG_MEMSIZE 0x00F8 | ||
39 | #define HDP_FB_LOCATION 0x0134 | ||
40 | #define CP_CSQ_CNTL 0x0740 | ||
41 | #define CP_CSQ_MODE 0x0744 | ||
42 | #define CP_CSQ_ADDR 0x07F0 | ||
43 | #define CP_CSQ_DATA 0x07F4 | ||
44 | #define CP_CSQ_STAT 0x07F8 | ||
45 | #define CP_CSQ2_STAT 0x07FC | ||
46 | #define RBBM_STATUS 0x0E40 | ||
47 | #define DST_PIPE_CONFIG 0x170C | ||
48 | #define WAIT_UNTIL 0x1720 | ||
49 | #define WAIT_2D_IDLE (1 << 14) | ||
50 | #define WAIT_3D_IDLE (1 << 15) | ||
51 | #define WAIT_2D_IDLECLEAN (1 << 16) | ||
52 | #define WAIT_3D_IDLECLEAN (1 << 17) | ||
53 | #define ISYNC_CNTL 0x1724 | ||
54 | #define ISYNC_ANY2D_IDLE3D (1 << 0) | ||
55 | #define ISYNC_ANY3D_IDLE2D (1 << 1) | ||
56 | #define ISYNC_TRIG2D_IDLE3D (1 << 2) | ||
57 | #define ISYNC_TRIG3D_IDLE2D (1 << 3) | ||
58 | #define ISYNC_WAIT_IDLEGUI (1 << 4) | ||
59 | #define ISYNC_CPSCRATCH_IDLEGUI (1 << 5) | ||
60 | #define VAP_INDEX_OFFSET 0x208C | ||
61 | #define VAP_PVS_STATE_FLUSH_REG 0x2284 | ||
62 | #define GB_ENABLE 0x4008 | ||
63 | #define GB_MSPOS0 0x4010 | ||
64 | #define MS_X0_SHIFT 0 | ||
65 | #define MS_Y0_SHIFT 4 | ||
66 | #define MS_X1_SHIFT 8 | ||
67 | #define MS_Y1_SHIFT 12 | ||
68 | #define MS_X2_SHIFT 16 | ||
69 | #define MS_Y2_SHIFT 20 | ||
70 | #define MSBD0_Y_SHIFT 24 | ||
71 | #define MSBD0_X_SHIFT 28 | ||
72 | #define GB_MSPOS1 0x4014 | ||
73 | #define MS_X3_SHIFT 0 | ||
74 | #define MS_Y3_SHIFT 4 | ||
75 | #define MS_X4_SHIFT 8 | ||
76 | #define MS_Y4_SHIFT 12 | ||
77 | #define MS_X5_SHIFT 16 | ||
78 | #define MS_Y5_SHIFT 20 | ||
79 | #define MSBD1_SHIFT 24 | ||
80 | #define GB_TILE_CONFIG 0x4018 | ||
81 | #define ENABLE_TILING (1 << 0) | ||
82 | #define PIPE_COUNT_MASK 0x0000000E | ||
83 | #define PIPE_COUNT_SHIFT 1 | ||
84 | #define TILE_SIZE_8 (0 << 4) | ||
85 | #define TILE_SIZE_16 (1 << 4) | ||
86 | #define TILE_SIZE_32 (2 << 4) | ||
87 | #define SUBPIXEL_1_12 (0 << 16) | ||
88 | #define SUBPIXEL_1_16 (1 << 16) | ||
89 | #define GB_SELECT 0x401C | ||
90 | #define GB_AA_CONFIG 0x4020 | ||
91 | #define GB_PIPE_SELECT 0x402C | ||
92 | #define GA_ENHANCE 0x4274 | ||
93 | #define GA_DEADLOCK_CNTL (1 << 0) | ||
94 | #define GA_FASTSYNC_CNTL (1 << 1) | ||
95 | #define GA_POLY_MODE 0x4288 | ||
96 | #define FRONT_PTYPE_POINT (0 << 4) | ||
97 | #define FRONT_PTYPE_LINE (1 << 4) | ||
98 | #define FRONT_PTYPE_TRIANGE (2 << 4) | ||
99 | #define BACK_PTYPE_POINT (0 << 7) | ||
100 | #define BACK_PTYPE_LINE (1 << 7) | ||
101 | #define BACK_PTYPE_TRIANGE (2 << 7) | ||
102 | #define GA_ROUND_MODE 0x428C | ||
103 | #define GEOMETRY_ROUND_TRUNC (0 << 0) | ||
104 | #define GEOMETRY_ROUND_NEAREST (1 << 0) | ||
105 | #define COLOR_ROUND_TRUNC (0 << 2) | ||
106 | #define COLOR_ROUND_NEAREST (1 << 2) | ||
107 | #define SU_REG_DEST 0x42C8 | ||
108 | #define RB3D_DSTCACHE_CTLSTAT 0x4E4C | ||
109 | #define RB3D_DC_FLUSH (2 << 0) | ||
110 | #define RB3D_DC_FREE (2 << 2) | ||
111 | #define RB3D_DC_FINISH (1 << 4) | ||
112 | #define ZB_ZCACHE_CTLSTAT 0x4F18 | ||
113 | #define ZC_FLUSH (1 << 0) | ||
114 | #define ZC_FREE (1 << 1) | ||
115 | #define DC_LB_MEMORY_SPLIT 0x6520 | ||
116 | #define DC_LB_MEMORY_SPLIT_MASK 0x00000003 | ||
117 | #define DC_LB_MEMORY_SPLIT_SHIFT 0 | ||
118 | #define DC_LB_MEMORY_SPLIT_D1HALF_D2HALF 0 | ||
119 | #define DC_LB_MEMORY_SPLIT_D1_3Q_D2_1Q 1 | ||
120 | #define DC_LB_MEMORY_SPLIT_D1_ONLY 2 | ||
121 | #define DC_LB_MEMORY_SPLIT_D1_1Q_D2_3Q 3 | ||
122 | #define DC_LB_MEMORY_SPLIT_SHIFT_MODE (1 << 2) | ||
123 | #define DC_LB_DISP1_END_ADR_SHIFT 4 | ||
124 | #define DC_LB_DISP1_END_ADR_MASK 0x00007FF0 | ||
125 | #define D1MODE_PRIORITY_A_CNT 0x6548 | ||
126 | #define MODE_PRIORITY_MARK_MASK 0x00007FFF | ||
127 | #define MODE_PRIORITY_OFF (1 << 16) | ||
128 | #define MODE_PRIORITY_ALWAYS_ON (1 << 20) | ||
129 | #define MODE_PRIORITY_FORCE_MASK (1 << 24) | ||
130 | #define D1MODE_PRIORITY_B_CNT 0x654C | ||
131 | #define LB_MAX_REQ_OUTSTANDING 0x6D58 | ||
132 | #define LB_D1_MAX_REQ_OUTSTANDING_MASK 0x0000000F | ||
133 | #define LB_D1_MAX_REQ_OUTSTANDING_SHIFT 0 | ||
134 | #define LB_D2_MAX_REQ_OUTSTANDING_MASK 0x000F0000 | ||
135 | #define LB_D2_MAX_REQ_OUTSTANDING_SHIFT 16 | ||
136 | #define D2MODE_PRIORITY_A_CNT 0x6D48 | ||
137 | #define D2MODE_PRIORITY_B_CNT 0x6D4C | ||
138 | |||
139 | /* ix[MC] registers */ | ||
140 | #define MC_FB_LOCATION 0x01 | ||
141 | #define MC_FB_START_MASK 0x0000FFFF | ||
142 | #define MC_FB_START_SHIFT 0 | ||
143 | #define MC_FB_TOP_MASK 0xFFFF0000 | ||
144 | #define MC_FB_TOP_SHIFT 16 | ||
145 | #define MC_AGP_LOCATION 0x02 | ||
146 | #define MC_AGP_START_MASK 0x0000FFFF | ||
147 | #define MC_AGP_START_SHIFT 0 | ||
148 | #define MC_AGP_TOP_MASK 0xFFFF0000 | ||
149 | #define MC_AGP_TOP_SHIFT 16 | ||
150 | #define MC_AGP_BASE 0x03 | ||
151 | #define MC_AGP_BASE_2 0x04 | ||
152 | #define MC_CNTL 0x5 | ||
153 | #define MEM_NUM_CHANNELS_MASK 0x00000003 | ||
154 | #define MC_STATUS 0x08 | ||
155 | #define MC_STATUS_IDLE (1 << 4) | ||
156 | #define MC_MISC_LAT_TIMER 0x09 | ||
157 | #define MC_CPR_INIT_LAT_MASK 0x0000000F | ||
158 | #define MC_VF_INIT_LAT_MASK 0x000000F0 | ||
159 | #define MC_DISP0R_INIT_LAT_MASK 0x00000F00 | ||
160 | #define MC_DISP0R_INIT_LAT_SHIFT 8 | ||
161 | #define MC_DISP1R_INIT_LAT_MASK 0x0000F000 | ||
162 | #define MC_DISP1R_INIT_LAT_SHIFT 12 | ||
163 | #define MC_FIXED_INIT_LAT_MASK 0x000F0000 | ||
164 | #define MC_E2R_INIT_LAT_MASK 0x00F00000 | ||
165 | #define SAME_PAGE_PRIO_MASK 0x0F000000 | ||
166 | #define MC_GLOBW_INIT_LAT_MASK 0xF0000000 | ||
167 | |||
168 | |||
169 | #endif | ||
170 | |||
diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c index da50cc51ede3..21d8ffd57308 100644 --- a/drivers/gpu/drm/radeon/rv770.c +++ b/drivers/gpu/drm/radeon/rv770.c | |||
@@ -67,7 +67,7 @@ int rv770_mc_init(struct radeon_device *rdev) | |||
67 | "programming pipes. Bad things might happen.\n"); | 67 | "programming pipes. Bad things might happen.\n"); |
68 | } | 68 | } |
69 | 69 | ||
70 | tmp = rdev->mc.vram_location + rdev->mc.vram_size - 1; | 70 | tmp = rdev->mc.vram_location + rdev->mc.mc_vram_size - 1; |
71 | tmp = REG_SET(R700_MC_FB_TOP, tmp >> 24); | 71 | tmp = REG_SET(R700_MC_FB_TOP, tmp >> 24); |
72 | tmp |= REG_SET(R700_MC_FB_BASE, rdev->mc.vram_location >> 24); | 72 | tmp |= REG_SET(R700_MC_FB_BASE, rdev->mc.vram_location >> 24); |
73 | WREG32(R700_MC_VM_FB_LOCATION, tmp); | 73 | WREG32(R700_MC_VM_FB_LOCATION, tmp); |
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c index c1c407f7cca3..c2b0d710d10f 100644 --- a/drivers/gpu/drm/ttm/ttm_bo.c +++ b/drivers/gpu/drm/ttm/ttm_bo.c | |||
@@ -43,7 +43,6 @@ | |||
43 | #define TTM_BO_HASH_ORDER 13 | 43 | #define TTM_BO_HASH_ORDER 13 |
44 | 44 | ||
45 | static int ttm_bo_setup_vm(struct ttm_buffer_object *bo); | 45 | static int ttm_bo_setup_vm(struct ttm_buffer_object *bo); |
46 | static void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo); | ||
47 | static int ttm_bo_swapout(struct ttm_mem_shrink *shrink); | 46 | static int ttm_bo_swapout(struct ttm_mem_shrink *shrink); |
48 | 47 | ||
49 | static inline uint32_t ttm_bo_type_flags(unsigned type) | 48 | static inline uint32_t ttm_bo_type_flags(unsigned type) |
@@ -224,6 +223,9 @@ static int ttm_bo_add_ttm(struct ttm_buffer_object *bo, bool zero_alloc) | |||
224 | TTM_ASSERT_LOCKED(&bo->mutex); | 223 | TTM_ASSERT_LOCKED(&bo->mutex); |
225 | bo->ttm = NULL; | 224 | bo->ttm = NULL; |
226 | 225 | ||
226 | if (bdev->need_dma32) | ||
227 | page_flags |= TTM_PAGE_FLAG_DMA32; | ||
228 | |||
227 | switch (bo->type) { | 229 | switch (bo->type) { |
228 | case ttm_bo_type_device: | 230 | case ttm_bo_type_device: |
229 | if (zero_alloc) | 231 | if (zero_alloc) |
@@ -304,6 +306,9 @@ static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo, | |||
304 | 306 | ||
305 | } | 307 | } |
306 | 308 | ||
309 | if (bdev->driver->move_notify) | ||
310 | bdev->driver->move_notify(bo, mem); | ||
311 | |||
307 | if (!(old_man->flags & TTM_MEMTYPE_FLAG_FIXED) && | 312 | if (!(old_man->flags & TTM_MEMTYPE_FLAG_FIXED) && |
308 | !(new_man->flags & TTM_MEMTYPE_FLAG_FIXED)) | 313 | !(new_man->flags & TTM_MEMTYPE_FLAG_FIXED)) |
309 | ret = ttm_bo_move_ttm(bo, evict, no_wait, mem); | 314 | ret = ttm_bo_move_ttm(bo, evict, no_wait, mem); |
@@ -655,31 +660,52 @@ retry_pre_get: | |||
655 | return 0; | 660 | return 0; |
656 | } | 661 | } |
657 | 662 | ||
663 | static uint32_t ttm_bo_select_caching(struct ttm_mem_type_manager *man, | ||
664 | uint32_t cur_placement, | ||
665 | uint32_t proposed_placement) | ||
666 | { | ||
667 | uint32_t caching = proposed_placement & TTM_PL_MASK_CACHING; | ||
668 | uint32_t result = proposed_placement & ~TTM_PL_MASK_CACHING; | ||
669 | |||
670 | /** | ||
671 | * Keep current caching if possible. | ||
672 | */ | ||
673 | |||
674 | if ((cur_placement & caching) != 0) | ||
675 | result |= (cur_placement & caching); | ||
676 | else if ((man->default_caching & caching) != 0) | ||
677 | result |= man->default_caching; | ||
678 | else if ((TTM_PL_FLAG_CACHED & caching) != 0) | ||
679 | result |= TTM_PL_FLAG_CACHED; | ||
680 | else if ((TTM_PL_FLAG_WC & caching) != 0) | ||
681 | result |= TTM_PL_FLAG_WC; | ||
682 | else if ((TTM_PL_FLAG_UNCACHED & caching) != 0) | ||
683 | result |= TTM_PL_FLAG_UNCACHED; | ||
684 | |||
685 | return result; | ||
686 | } | ||
687 | |||
688 | |||
658 | static bool ttm_bo_mt_compatible(struct ttm_mem_type_manager *man, | 689 | static bool ttm_bo_mt_compatible(struct ttm_mem_type_manager *man, |
659 | bool disallow_fixed, | 690 | bool disallow_fixed, |
660 | uint32_t mem_type, | 691 | uint32_t mem_type, |
661 | uint32_t mask, uint32_t *res_mask) | 692 | uint32_t proposed_placement, |
693 | uint32_t *masked_placement) | ||
662 | { | 694 | { |
663 | uint32_t cur_flags = ttm_bo_type_flags(mem_type); | 695 | uint32_t cur_flags = ttm_bo_type_flags(mem_type); |
664 | 696 | ||
665 | if ((man->flags & TTM_MEMTYPE_FLAG_FIXED) && disallow_fixed) | 697 | if ((man->flags & TTM_MEMTYPE_FLAG_FIXED) && disallow_fixed) |
666 | return false; | 698 | return false; |
667 | 699 | ||
668 | if ((cur_flags & mask & TTM_PL_MASK_MEM) == 0) | 700 | if ((cur_flags & proposed_placement & TTM_PL_MASK_MEM) == 0) |
669 | return false; | 701 | return false; |
670 | 702 | ||
671 | if ((mask & man->available_caching) == 0) | 703 | if ((proposed_placement & man->available_caching) == 0) |
672 | return false; | 704 | return false; |
673 | if (mask & man->default_caching) | ||
674 | cur_flags |= man->default_caching; | ||
675 | else if (mask & TTM_PL_FLAG_CACHED) | ||
676 | cur_flags |= TTM_PL_FLAG_CACHED; | ||
677 | else if (mask & TTM_PL_FLAG_WC) | ||
678 | cur_flags |= TTM_PL_FLAG_WC; | ||
679 | else | ||
680 | cur_flags |= TTM_PL_FLAG_UNCACHED; | ||
681 | 705 | ||
682 | *res_mask = cur_flags; | 706 | cur_flags |= (proposed_placement & man->available_caching); |
707 | |||
708 | *masked_placement = cur_flags; | ||
683 | return true; | 709 | return true; |
684 | } | 710 | } |
685 | 711 | ||
@@ -723,6 +749,9 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo, | |||
723 | if (!type_ok) | 749 | if (!type_ok) |
724 | continue; | 750 | continue; |
725 | 751 | ||
752 | cur_flags = ttm_bo_select_caching(man, bo->mem.placement, | ||
753 | cur_flags); | ||
754 | |||
726 | if (mem_type == TTM_PL_SYSTEM) | 755 | if (mem_type == TTM_PL_SYSTEM) |
727 | break; | 756 | break; |
728 | 757 | ||
@@ -779,6 +808,9 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo, | |||
779 | proposed_placement, &cur_flags)) | 808 | proposed_placement, &cur_flags)) |
780 | continue; | 809 | continue; |
781 | 810 | ||
811 | cur_flags = ttm_bo_select_caching(man, bo->mem.placement, | ||
812 | cur_flags); | ||
813 | |||
782 | ret = ttm_bo_mem_force_space(bdev, mem, mem_type, | 814 | ret = ttm_bo_mem_force_space(bdev, mem, mem_type, |
783 | interruptible, no_wait); | 815 | interruptible, no_wait); |
784 | 816 | ||
@@ -1150,13 +1182,14 @@ static int ttm_bo_force_list_clean(struct ttm_bo_device *bdev, | |||
1150 | 1182 | ||
1151 | int ttm_bo_clean_mm(struct ttm_bo_device *bdev, unsigned mem_type) | 1183 | int ttm_bo_clean_mm(struct ttm_bo_device *bdev, unsigned mem_type) |
1152 | { | 1184 | { |
1153 | struct ttm_mem_type_manager *man = &bdev->man[mem_type]; | 1185 | struct ttm_mem_type_manager *man; |
1154 | int ret = -EINVAL; | 1186 | int ret = -EINVAL; |
1155 | 1187 | ||
1156 | if (mem_type >= TTM_NUM_MEM_TYPES) { | 1188 | if (mem_type >= TTM_NUM_MEM_TYPES) { |
1157 | printk(KERN_ERR TTM_PFX "Illegal memory type %d\n", mem_type); | 1189 | printk(KERN_ERR TTM_PFX "Illegal memory type %d\n", mem_type); |
1158 | return ret; | 1190 | return ret; |
1159 | } | 1191 | } |
1192 | man = &bdev->man[mem_type]; | ||
1160 | 1193 | ||
1161 | if (!man->has_type) { | 1194 | if (!man->has_type) { |
1162 | printk(KERN_ERR TTM_PFX "Trying to take down uninitialized " | 1195 | printk(KERN_ERR TTM_PFX "Trying to take down uninitialized " |
@@ -1305,7 +1338,8 @@ EXPORT_SYMBOL(ttm_bo_device_release); | |||
1305 | 1338 | ||
1306 | int ttm_bo_device_init(struct ttm_bo_device *bdev, | 1339 | int ttm_bo_device_init(struct ttm_bo_device *bdev, |
1307 | struct ttm_mem_global *mem_glob, | 1340 | struct ttm_mem_global *mem_glob, |
1308 | struct ttm_bo_driver *driver, uint64_t file_page_offset) | 1341 | struct ttm_bo_driver *driver, uint64_t file_page_offset, |
1342 | bool need_dma32) | ||
1309 | { | 1343 | { |
1310 | int ret = -EINVAL; | 1344 | int ret = -EINVAL; |
1311 | 1345 | ||
@@ -1342,6 +1376,7 @@ int ttm_bo_device_init(struct ttm_bo_device *bdev, | |||
1342 | INIT_LIST_HEAD(&bdev->ddestroy); | 1376 | INIT_LIST_HEAD(&bdev->ddestroy); |
1343 | INIT_LIST_HEAD(&bdev->swap_lru); | 1377 | INIT_LIST_HEAD(&bdev->swap_lru); |
1344 | bdev->dev_mapping = NULL; | 1378 | bdev->dev_mapping = NULL; |
1379 | bdev->need_dma32 = need_dma32; | ||
1345 | ttm_mem_init_shrink(&bdev->shrink, ttm_bo_swapout); | 1380 | ttm_mem_init_shrink(&bdev->shrink, ttm_bo_swapout); |
1346 | ret = ttm_mem_register_shrink(mem_glob, &bdev->shrink); | 1381 | ret = ttm_mem_register_shrink(mem_glob, &bdev->shrink); |
1347 | if (unlikely(ret != 0)) { | 1382 | if (unlikely(ret != 0)) { |
@@ -1419,6 +1454,7 @@ void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo) | |||
1419 | 1454 | ||
1420 | unmap_mapping_range(bdev->dev_mapping, offset, holelen, 1); | 1455 | unmap_mapping_range(bdev->dev_mapping, offset, holelen, 1); |
1421 | } | 1456 | } |
1457 | EXPORT_SYMBOL(ttm_bo_unmap_virtual); | ||
1422 | 1458 | ||
1423 | static void ttm_bo_vm_insert_rb(struct ttm_buffer_object *bo) | 1459 | static void ttm_bo_vm_insert_rb(struct ttm_buffer_object *bo) |
1424 | { | 1460 | { |
@@ -1540,6 +1576,10 @@ int ttm_bo_wait(struct ttm_buffer_object *bo, | |||
1540 | driver->sync_obj_unref(&sync_obj); | 1576 | driver->sync_obj_unref(&sync_obj); |
1541 | driver->sync_obj_unref(&tmp_obj); | 1577 | driver->sync_obj_unref(&tmp_obj); |
1542 | spin_lock(&bo->lock); | 1578 | spin_lock(&bo->lock); |
1579 | } else { | ||
1580 | spin_unlock(&bo->lock); | ||
1581 | driver->sync_obj_unref(&sync_obj); | ||
1582 | spin_lock(&bo->lock); | ||
1543 | } | 1583 | } |
1544 | } | 1584 | } |
1545 | return 0; | 1585 | return 0; |
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c index 517c84559633..ad4ada07c6cf 100644 --- a/drivers/gpu/drm/ttm/ttm_bo_util.c +++ b/drivers/gpu/drm/ttm/ttm_bo_util.c | |||
@@ -34,7 +34,6 @@ | |||
34 | #include <linux/highmem.h> | 34 | #include <linux/highmem.h> |
35 | #include <linux/wait.h> | 35 | #include <linux/wait.h> |
36 | #include <linux/vmalloc.h> | 36 | #include <linux/vmalloc.h> |
37 | #include <linux/version.h> | ||
38 | #include <linux/module.h> | 37 | #include <linux/module.h> |
39 | 38 | ||
40 | void ttm_bo_free_old_node(struct ttm_buffer_object *bo) | 39 | void ttm_bo_free_old_node(struct ttm_buffer_object *bo) |
@@ -137,7 +136,8 @@ static int ttm_copy_io_page(void *dst, void *src, unsigned long page) | |||
137 | } | 136 | } |
138 | 137 | ||
139 | static int ttm_copy_io_ttm_page(struct ttm_tt *ttm, void *src, | 138 | static int ttm_copy_io_ttm_page(struct ttm_tt *ttm, void *src, |
140 | unsigned long page) | 139 | unsigned long page, |
140 | pgprot_t prot) | ||
141 | { | 141 | { |
142 | struct page *d = ttm_tt_get_page(ttm, page); | 142 | struct page *d = ttm_tt_get_page(ttm, page); |
143 | void *dst; | 143 | void *dst; |
@@ -146,17 +146,35 @@ static int ttm_copy_io_ttm_page(struct ttm_tt *ttm, void *src, | |||
146 | return -ENOMEM; | 146 | return -ENOMEM; |
147 | 147 | ||
148 | src = (void *)((unsigned long)src + (page << PAGE_SHIFT)); | 148 | src = (void *)((unsigned long)src + (page << PAGE_SHIFT)); |
149 | dst = kmap(d); | 149 | |
150 | #ifdef CONFIG_X86 | ||
151 | dst = kmap_atomic_prot(d, KM_USER0, prot); | ||
152 | #else | ||
153 | if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL)) | ||
154 | dst = vmap(&d, 1, 0, prot); | ||
155 | else | ||
156 | dst = kmap(d); | ||
157 | #endif | ||
150 | if (!dst) | 158 | if (!dst) |
151 | return -ENOMEM; | 159 | return -ENOMEM; |
152 | 160 | ||
153 | memcpy_fromio(dst, src, PAGE_SIZE); | 161 | memcpy_fromio(dst, src, PAGE_SIZE); |
154 | kunmap(d); | 162 | |
163 | #ifdef CONFIG_X86 | ||
164 | kunmap_atomic(dst, KM_USER0); | ||
165 | #else | ||
166 | if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL)) | ||
167 | vunmap(dst); | ||
168 | else | ||
169 | kunmap(d); | ||
170 | #endif | ||
171 | |||
155 | return 0; | 172 | return 0; |
156 | } | 173 | } |
157 | 174 | ||
158 | static int ttm_copy_ttm_io_page(struct ttm_tt *ttm, void *dst, | 175 | static int ttm_copy_ttm_io_page(struct ttm_tt *ttm, void *dst, |
159 | unsigned long page) | 176 | unsigned long page, |
177 | pgprot_t prot) | ||
160 | { | 178 | { |
161 | struct page *s = ttm_tt_get_page(ttm, page); | 179 | struct page *s = ttm_tt_get_page(ttm, page); |
162 | void *src; | 180 | void *src; |
@@ -165,12 +183,28 @@ static int ttm_copy_ttm_io_page(struct ttm_tt *ttm, void *dst, | |||
165 | return -ENOMEM; | 183 | return -ENOMEM; |
166 | 184 | ||
167 | dst = (void *)((unsigned long)dst + (page << PAGE_SHIFT)); | 185 | dst = (void *)((unsigned long)dst + (page << PAGE_SHIFT)); |
168 | src = kmap(s); | 186 | #ifdef CONFIG_X86 |
187 | src = kmap_atomic_prot(s, KM_USER0, prot); | ||
188 | #else | ||
189 | if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL)) | ||
190 | src = vmap(&s, 1, 0, prot); | ||
191 | else | ||
192 | src = kmap(s); | ||
193 | #endif | ||
169 | if (!src) | 194 | if (!src) |
170 | return -ENOMEM; | 195 | return -ENOMEM; |
171 | 196 | ||
172 | memcpy_toio(dst, src, PAGE_SIZE); | 197 | memcpy_toio(dst, src, PAGE_SIZE); |
173 | kunmap(s); | 198 | |
199 | #ifdef CONFIG_X86 | ||
200 | kunmap_atomic(src, KM_USER0); | ||
201 | #else | ||
202 | if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL)) | ||
203 | vunmap(src); | ||
204 | else | ||
205 | kunmap(s); | ||
206 | #endif | ||
207 | |||
174 | return 0; | 208 | return 0; |
175 | } | 209 | } |
176 | 210 | ||
@@ -215,11 +249,17 @@ int ttm_bo_move_memcpy(struct ttm_buffer_object *bo, | |||
215 | 249 | ||
216 | for (i = 0; i < new_mem->num_pages; ++i) { | 250 | for (i = 0; i < new_mem->num_pages; ++i) { |
217 | page = i * dir + add; | 251 | page = i * dir + add; |
218 | if (old_iomap == NULL) | 252 | if (old_iomap == NULL) { |
219 | ret = ttm_copy_ttm_io_page(ttm, new_iomap, page); | 253 | pgprot_t prot = ttm_io_prot(old_mem->placement, |
220 | else if (new_iomap == NULL) | 254 | PAGE_KERNEL); |
221 | ret = ttm_copy_io_ttm_page(ttm, old_iomap, page); | 255 | ret = ttm_copy_ttm_io_page(ttm, new_iomap, page, |
222 | else | 256 | prot); |
257 | } else if (new_iomap == NULL) { | ||
258 | pgprot_t prot = ttm_io_prot(new_mem->placement, | ||
259 | PAGE_KERNEL); | ||
260 | ret = ttm_copy_io_ttm_page(ttm, old_iomap, page, | ||
261 | prot); | ||
262 | } else | ||
223 | ret = ttm_copy_io_page(new_iomap, old_iomap, page); | 263 | ret = ttm_copy_io_page(new_iomap, old_iomap, page); |
224 | if (ret) | 264 | if (ret) |
225 | goto out1; | 265 | goto out1; |
@@ -510,8 +550,8 @@ int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo, | |||
510 | if (evict) { | 550 | if (evict) { |
511 | ret = ttm_bo_wait(bo, false, false, false); | 551 | ret = ttm_bo_wait(bo, false, false, false); |
512 | spin_unlock(&bo->lock); | 552 | spin_unlock(&bo->lock); |
513 | driver->sync_obj_unref(&bo->sync_obj); | 553 | if (tmp_obj) |
514 | 554 | driver->sync_obj_unref(&tmp_obj); | |
515 | if (ret) | 555 | if (ret) |
516 | return ret; | 556 | return ret; |
517 | 557 | ||
@@ -533,6 +573,8 @@ int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo, | |||
533 | 573 | ||
534 | set_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags); | 574 | set_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags); |
535 | spin_unlock(&bo->lock); | 575 | spin_unlock(&bo->lock); |
576 | if (tmp_obj) | ||
577 | driver->sync_obj_unref(&tmp_obj); | ||
536 | 578 | ||
537 | ret = ttm_buffer_object_transfer(bo, &ghost_obj); | 579 | ret = ttm_buffer_object_transfer(bo, &ghost_obj); |
538 | if (ret) | 580 | if (ret) |
diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c index 27b146c54fbc..33de7637c0c6 100644 --- a/drivers/gpu/drm/ttm/ttm_bo_vm.c +++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c | |||
@@ -32,7 +32,6 @@ | |||
32 | #include <ttm/ttm_bo_driver.h> | 32 | #include <ttm/ttm_bo_driver.h> |
33 | #include <ttm/ttm_placement.h> | 33 | #include <ttm/ttm_placement.h> |
34 | #include <linux/mm.h> | 34 | #include <linux/mm.h> |
35 | #include <linux/version.h> | ||
36 | #include <linux/rbtree.h> | 35 | #include <linux/rbtree.h> |
37 | #include <linux/module.h> | 36 | #include <linux/module.h> |
38 | #include <linux/uaccess.h> | 37 | #include <linux/uaccess.h> |
@@ -102,6 +101,9 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) | |||
102 | return VM_FAULT_NOPAGE; | 101 | return VM_FAULT_NOPAGE; |
103 | } | 102 | } |
104 | 103 | ||
104 | if (bdev->driver->fault_reserve_notify) | ||
105 | bdev->driver->fault_reserve_notify(bo); | ||
106 | |||
105 | /* | 107 | /* |
106 | * Wait for buffer data in transit, due to a pipelined | 108 | * Wait for buffer data in transit, due to a pipelined |
107 | * move. | 109 | * move. |
@@ -328,7 +330,7 @@ ssize_t ttm_bo_io(struct ttm_bo_device *bdev, struct file *filp, | |||
328 | goto out_unref; | 330 | goto out_unref; |
329 | 331 | ||
330 | kmap_offset = dev_offset - bo->vm_node->start; | 332 | kmap_offset = dev_offset - bo->vm_node->start; |
331 | if (unlikely(kmap_offset) >= bo->num_pages) { | 333 | if (unlikely(kmap_offset >= bo->num_pages)) { |
332 | ret = -EFBIG; | 334 | ret = -EFBIG; |
333 | goto out_unref; | 335 | goto out_unref; |
334 | } | 336 | } |
@@ -402,7 +404,7 @@ ssize_t ttm_bo_fbdev_io(struct ttm_buffer_object *bo, const char __user *wbuf, | |||
402 | bool dummy; | 404 | bool dummy; |
403 | 405 | ||
404 | kmap_offset = (*f_pos >> PAGE_SHIFT); | 406 | kmap_offset = (*f_pos >> PAGE_SHIFT); |
405 | if (unlikely(kmap_offset) >= bo->num_pages) | 407 | if (unlikely(kmap_offset >= bo->num_pages)) |
406 | return -EFBIG; | 408 | return -EFBIG; |
407 | 409 | ||
408 | page_offset = *f_pos & ~PAGE_MASK; | 410 | page_offset = *f_pos & ~PAGE_MASK; |
diff --git a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c index 0331fa74cd3f..b8b6c4a5f983 100644 --- a/drivers/gpu/drm/ttm/ttm_tt.c +++ b/drivers/gpu/drm/ttm/ttm_tt.c | |||
@@ -28,7 +28,6 @@ | |||
28 | * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com> | 28 | * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com> |
29 | */ | 29 | */ |
30 | 30 | ||
31 | #include <linux/version.h> | ||
32 | #include <linux/vmalloc.h> | 31 | #include <linux/vmalloc.h> |
33 | #include <linux/sched.h> | 32 | #include <linux/sched.h> |
34 | #include <linux/highmem.h> | 33 | #include <linux/highmem.h> |
@@ -87,10 +86,16 @@ void ttm_tt_cache_flush(struct page *pages[], unsigned long num_pages) | |||
87 | unsigned long i; | 86 | unsigned long i; |
88 | 87 | ||
89 | for (i = 0; i < num_pages; ++i) { | 88 | for (i = 0; i < num_pages; ++i) { |
90 | if (pages[i]) { | 89 | struct page *page = pages[i]; |
91 | unsigned long start = (unsigned long)page_address(pages[i]); | 90 | void *page_virtual; |
92 | flush_dcache_range(start, start + PAGE_SIZE); | 91 | |
93 | } | 92 | if (unlikely(page == NULL)) |
93 | continue; | ||
94 | |||
95 | page_virtual = kmap_atomic(page, KM_USER0); | ||
96 | flush_dcache_range((unsigned long) page_virtual, | ||
97 | (unsigned long) page_virtual + PAGE_SIZE); | ||
98 | kunmap_atomic(page_virtual, KM_USER0); | ||
94 | } | 99 | } |
95 | #else | 100 | #else |
96 | if (on_each_cpu(ttm_tt_ipi_handler, NULL, 1) != 0) | 101 | if (on_each_cpu(ttm_tt_ipi_handler, NULL, 1) != 0) |
@@ -132,10 +137,17 @@ static void ttm_tt_free_page_directory(struct ttm_tt *ttm) | |||
132 | 137 | ||
133 | static struct page *ttm_tt_alloc_page(unsigned page_flags) | 138 | static struct page *ttm_tt_alloc_page(unsigned page_flags) |
134 | { | 139 | { |
140 | gfp_t gfp_flags = GFP_USER; | ||
141 | |||
135 | if (page_flags & TTM_PAGE_FLAG_ZERO_ALLOC) | 142 | if (page_flags & TTM_PAGE_FLAG_ZERO_ALLOC) |
136 | return alloc_page(GFP_HIGHUSER | __GFP_ZERO); | 143 | gfp_flags |= __GFP_ZERO; |
144 | |||
145 | if (page_flags & TTM_PAGE_FLAG_DMA32) | ||
146 | gfp_flags |= __GFP_DMA32; | ||
147 | else | ||
148 | gfp_flags |= __GFP_HIGHMEM; | ||
137 | 149 | ||
138 | return alloc_page(GFP_HIGHUSER); | 150 | return alloc_page(gfp_flags); |
139 | } | 151 | } |
140 | 152 | ||
141 | static void ttm_tt_free_user_pages(struct ttm_tt *ttm) | 153 | static void ttm_tt_free_user_pages(struct ttm_tt *ttm) |
diff --git a/drivers/gpu/drm/via/via_irq.c b/drivers/gpu/drm/via/via_irq.c index c248c1d37268..5935b8842e86 100644 --- a/drivers/gpu/drm/via/via_irq.c +++ b/drivers/gpu/drm/via/via_irq.c | |||
@@ -183,7 +183,7 @@ int via_enable_vblank(struct drm_device *dev, int crtc) | |||
183 | } | 183 | } |
184 | 184 | ||
185 | status = VIA_READ(VIA_REG_INTERRUPT); | 185 | status = VIA_READ(VIA_REG_INTERRUPT); |
186 | VIA_WRITE(VIA_REG_INTERRUPT, status & VIA_IRQ_VBLANK_ENABLE); | 186 | VIA_WRITE(VIA_REG_INTERRUPT, status | VIA_IRQ_VBLANK_ENABLE); |
187 | 187 | ||
188 | VIA_WRITE8(0x83d4, 0x11); | 188 | VIA_WRITE8(0x83d4, 0x11); |
189 | VIA_WRITE8(0x83d5, VIA_READ8(0x83d5) | 0x30); | 189 | VIA_WRITE8(0x83d5, VIA_READ8(0x83d5) | 0x30); |
@@ -194,6 +194,10 @@ int via_enable_vblank(struct drm_device *dev, int crtc) | |||
194 | void via_disable_vblank(struct drm_device *dev, int crtc) | 194 | void via_disable_vblank(struct drm_device *dev, int crtc) |
195 | { | 195 | { |
196 | drm_via_private_t *dev_priv = dev->dev_private; | 196 | drm_via_private_t *dev_priv = dev->dev_private; |
197 | u32 status; | ||
198 | |||
199 | status = VIA_READ(VIA_REG_INTERRUPT); | ||
200 | VIA_WRITE(VIA_REG_INTERRUPT, status & ~VIA_IRQ_VBLANK_ENABLE); | ||
197 | 201 | ||
198 | VIA_WRITE8(0x83d4, 0x11); | 202 | VIA_WRITE8(0x83d4, 0x11); |
199 | VIA_WRITE8(0x83d5, VIA_READ8(0x83d5) & ~0x30); | 203 | VIA_WRITE8(0x83d5, VIA_READ8(0x83d5) & ~0x30); |