diff options
52 files changed, 2267 insertions, 1426 deletions
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c index 5dda07cf7097..fadcd44ff196 100644 --- a/drivers/gpu/drm/drm_edid.c +++ b/drivers/gpu/drm/drm_edid.c | |||
| @@ -395,13 +395,14 @@ out: | |||
| 395 | * \param adapter : i2c device adaptor | 395 | * \param adapter : i2c device adaptor |
| 396 | * \return 1 on success | 396 | * \return 1 on success |
| 397 | */ | 397 | */ |
| 398 | static bool | 398 | bool |
| 399 | drm_probe_ddc(struct i2c_adapter *adapter) | 399 | drm_probe_ddc(struct i2c_adapter *adapter) |
| 400 | { | 400 | { |
| 401 | unsigned char out; | 401 | unsigned char out; |
| 402 | 402 | ||
| 403 | return (drm_do_probe_ddc_edid(adapter, &out, 0, 1) == 0); | 403 | return (drm_do_probe_ddc_edid(adapter, &out, 0, 1) == 0); |
| 404 | } | 404 | } |
| 405 | EXPORT_SYMBOL(drm_probe_ddc); | ||
| 405 | 406 | ||
| 406 | /** | 407 | /** |
| 407 | * drm_get_edid - get EDID data, if available | 408 | * drm_get_edid - get EDID data, if available |
diff --git a/drivers/gpu/drm/exynos/exynos_ddc.c b/drivers/gpu/drm/exynos/exynos_ddc.c index 961a1806a246..37e6ec704e1d 100644 --- a/drivers/gpu/drm/exynos/exynos_ddc.c +++ b/drivers/gpu/drm/exynos/exynos_ddc.c | |||
| @@ -26,29 +26,41 @@ static int s5p_ddc_probe(struct i2c_client *client, | |||
| 26 | { | 26 | { |
| 27 | hdmi_attach_ddc_client(client); | 27 | hdmi_attach_ddc_client(client); |
| 28 | 28 | ||
| 29 | dev_info(&client->adapter->dev, "attached s5p_ddc " | 29 | dev_info(&client->adapter->dev, |
| 30 | "into i2c adapter successfully\n"); | 30 | "attached %s into i2c adapter successfully\n", |
| 31 | client->name); | ||
| 31 | 32 | ||
| 32 | return 0; | 33 | return 0; |
| 33 | } | 34 | } |
| 34 | 35 | ||
| 35 | static int s5p_ddc_remove(struct i2c_client *client) | 36 | static int s5p_ddc_remove(struct i2c_client *client) |
| 36 | { | 37 | { |
| 37 | dev_info(&client->adapter->dev, "detached s5p_ddc " | 38 | dev_info(&client->adapter->dev, |
| 38 | "from i2c adapter successfully\n"); | 39 | "detached %s from i2c adapter successfully\n", |
| 40 | client->name); | ||
| 39 | 41 | ||
| 40 | return 0; | 42 | return 0; |
| 41 | } | 43 | } |
| 42 | 44 | ||
| 43 | static struct i2c_device_id ddc_idtable[] = { | 45 | static struct i2c_device_id ddc_idtable[] = { |
| 44 | {"s5p_ddc", 0}, | 46 | {"s5p_ddc", 0}, |
| 47 | {"exynos5-hdmiddc", 0}, | ||
| 45 | { }, | 48 | { }, |
| 46 | }; | 49 | }; |
| 47 | 50 | ||
| 51 | static struct of_device_id hdmiddc_match_types[] = { | ||
| 52 | { | ||
| 53 | .compatible = "samsung,exynos5-hdmiddc", | ||
| 54 | }, { | ||
| 55 | /* end node */ | ||
| 56 | } | ||
| 57 | }; | ||
| 58 | |||
| 48 | struct i2c_driver ddc_driver = { | 59 | struct i2c_driver ddc_driver = { |
| 49 | .driver = { | 60 | .driver = { |
| 50 | .name = "s5p_ddc", | 61 | .name = "exynos-hdmiddc", |
| 51 | .owner = THIS_MODULE, | 62 | .owner = THIS_MODULE, |
| 63 | .of_match_table = hdmiddc_match_types, | ||
| 52 | }, | 64 | }, |
| 53 | .id_table = ddc_idtable, | 65 | .id_table = ddc_idtable, |
| 54 | .probe = s5p_ddc_probe, | 66 | .probe = s5p_ddc_probe, |
diff --git a/drivers/gpu/drm/exynos/exynos_drm_connector.c b/drivers/gpu/drm/exynos/exynos_drm_connector.c index c2b1b1441ed0..18c271862ca8 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_connector.c +++ b/drivers/gpu/drm/exynos/exynos_drm_connector.c | |||
| @@ -40,6 +40,7 @@ struct exynos_drm_connector { | |||
| 40 | struct drm_connector drm_connector; | 40 | struct drm_connector drm_connector; |
| 41 | uint32_t encoder_id; | 41 | uint32_t encoder_id; |
| 42 | struct exynos_drm_manager *manager; | 42 | struct exynos_drm_manager *manager; |
| 43 | uint32_t dpms; | ||
| 43 | }; | 44 | }; |
| 44 | 45 | ||
| 45 | /* convert exynos_video_timings to drm_display_mode */ | 46 | /* convert exynos_video_timings to drm_display_mode */ |
| @@ -149,8 +150,12 @@ static int exynos_drm_connector_get_modes(struct drm_connector *connector) | |||
| 149 | count = drm_add_edid_modes(connector, edid); | 150 | count = drm_add_edid_modes(connector, edid); |
| 150 | kfree(edid); | 151 | kfree(edid); |
| 151 | } else { | 152 | } else { |
| 152 | struct drm_display_mode *mode = drm_mode_create(connector->dev); | ||
| 153 | struct exynos_drm_panel_info *panel; | 153 | struct exynos_drm_panel_info *panel; |
| 154 | struct drm_display_mode *mode = drm_mode_create(connector->dev); | ||
| 155 | if (!mode) { | ||
| 156 | DRM_ERROR("failed to create a new display mode.\n"); | ||
| 157 | return 0; | ||
| 158 | } | ||
| 154 | 159 | ||
| 155 | if (display_ops->get_panel) | 160 | if (display_ops->get_panel) |
| 156 | panel = display_ops->get_panel(manager->dev); | 161 | panel = display_ops->get_panel(manager->dev); |
| @@ -194,8 +199,7 @@ static int exynos_drm_connector_mode_valid(struct drm_connector *connector, | |||
| 194 | return ret; | 199 | return ret; |
| 195 | } | 200 | } |
| 196 | 201 | ||
| 197 | static struct drm_encoder *exynos_drm_best_encoder( | 202 | struct drm_encoder *exynos_drm_best_encoder(struct drm_connector *connector) |
| 198 | struct drm_connector *connector) | ||
| 199 | { | 203 | { |
| 200 | struct drm_device *dev = connector->dev; | 204 | struct drm_device *dev = connector->dev; |
| 201 | struct exynos_drm_connector *exynos_connector = | 205 | struct exynos_drm_connector *exynos_connector = |
| @@ -224,6 +228,43 @@ static struct drm_connector_helper_funcs exynos_connector_helper_funcs = { | |||
| 224 | .best_encoder = exynos_drm_best_encoder, | 228 | .best_encoder = exynos_drm_best_encoder, |
| 225 | }; | 229 | }; |
| 226 | 230 | ||
| 231 | void exynos_drm_display_power(struct drm_connector *connector, int mode) | ||
| 232 | { | ||
| 233 | struct drm_encoder *encoder = exynos_drm_best_encoder(connector); | ||
| 234 | struct exynos_drm_connector *exynos_connector; | ||
| 235 | struct exynos_drm_manager *manager = exynos_drm_get_manager(encoder); | ||
| 236 | struct exynos_drm_display_ops *display_ops = manager->display_ops; | ||
| 237 | |||
| 238 | exynos_connector = to_exynos_connector(connector); | ||
| 239 | |||
| 240 | if (exynos_connector->dpms == mode) { | ||
| 241 | DRM_DEBUG_KMS("desired dpms mode is same as previous one.\n"); | ||
| 242 | return; | ||
| 243 | } | ||
| 244 | |||
| 245 | if (display_ops && display_ops->power_on) | ||
| 246 | display_ops->power_on(manager->dev, mode); | ||
| 247 | |||
| 248 | exynos_connector->dpms = mode; | ||
| 249 | } | ||
| 250 | |||
| 251 | static void exynos_drm_connector_dpms(struct drm_connector *connector, | ||
| 252 | int mode) | ||
| 253 | { | ||
| 254 | DRM_DEBUG_KMS("%s\n", __FILE__); | ||
| 255 | |||
| 256 | /* | ||
| 257 | * in case that drm_crtc_helper_set_mode() is called, | ||
| 258 | * encoder/crtc->funcs->dpms() will be just returned | ||
| 259 | * because they already were DRM_MODE_DPMS_ON so only | ||
| 260 | * exynos_drm_display_power() will be called. | ||
| 261 | */ | ||
| 262 | drm_helper_connector_dpms(connector, mode); | ||
| 263 | |||
| 264 | exynos_drm_display_power(connector, mode); | ||
| 265 | |||
| 266 | } | ||
| 267 | |||
| 227 | static int exynos_drm_connector_fill_modes(struct drm_connector *connector, | 268 | static int exynos_drm_connector_fill_modes(struct drm_connector *connector, |
| 228 | unsigned int max_width, unsigned int max_height) | 269 | unsigned int max_width, unsigned int max_height) |
| 229 | { | 270 | { |
| @@ -283,7 +324,7 @@ static void exynos_drm_connector_destroy(struct drm_connector *connector) | |||
| 283 | } | 324 | } |
| 284 | 325 | ||
| 285 | static struct drm_connector_funcs exynos_connector_funcs = { | 326 | static struct drm_connector_funcs exynos_connector_funcs = { |
| 286 | .dpms = drm_helper_connector_dpms, | 327 | .dpms = exynos_drm_connector_dpms, |
| 287 | .fill_modes = exynos_drm_connector_fill_modes, | 328 | .fill_modes = exynos_drm_connector_fill_modes, |
| 288 | .detect = exynos_drm_connector_detect, | 329 | .detect = exynos_drm_connector_detect, |
| 289 | .destroy = exynos_drm_connector_destroy, | 330 | .destroy = exynos_drm_connector_destroy, |
| @@ -332,6 +373,7 @@ struct drm_connector *exynos_drm_connector_create(struct drm_device *dev, | |||
| 332 | 373 | ||
| 333 | exynos_connector->encoder_id = encoder->base.id; | 374 | exynos_connector->encoder_id = encoder->base.id; |
| 334 | exynos_connector->manager = manager; | 375 | exynos_connector->manager = manager; |
| 376 | exynos_connector->dpms = DRM_MODE_DPMS_OFF; | ||
| 335 | connector->encoder = encoder; | 377 | connector->encoder = encoder; |
| 336 | 378 | ||
| 337 | err = drm_mode_connector_attach_encoder(connector, encoder); | 379 | err = drm_mode_connector_attach_encoder(connector, encoder); |
diff --git a/drivers/gpu/drm/exynos/exynos_drm_connector.h b/drivers/gpu/drm/exynos/exynos_drm_connector.h index 1c7b2b5b579c..22f6cc442c3d 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_connector.h +++ b/drivers/gpu/drm/exynos/exynos_drm_connector.h | |||
| @@ -31,4 +31,8 @@ | |||
| 31 | struct drm_connector *exynos_drm_connector_create(struct drm_device *dev, | 31 | struct drm_connector *exynos_drm_connector_create(struct drm_device *dev, |
| 32 | struct drm_encoder *encoder); | 32 | struct drm_encoder *encoder); |
| 33 | 33 | ||
| 34 | struct drm_encoder *exynos_drm_best_encoder(struct drm_connector *connector); | ||
| 35 | |||
| 36 | void exynos_drm_display_power(struct drm_connector *connector, int mode); | ||
| 37 | |||
| 34 | #endif | 38 | #endif |
diff --git a/drivers/gpu/drm/exynos/exynos_drm_core.c b/drivers/gpu/drm/exynos/exynos_drm_core.c index 19bdf0a194eb..94026ad76a77 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_core.c +++ b/drivers/gpu/drm/exynos/exynos_drm_core.c | |||
| @@ -34,33 +34,15 @@ | |||
| 34 | 34 | ||
| 35 | static LIST_HEAD(exynos_drm_subdrv_list); | 35 | static LIST_HEAD(exynos_drm_subdrv_list); |
| 36 | 36 | ||
| 37 | static int exynos_drm_subdrv_probe(struct drm_device *dev, | 37 | static int exynos_drm_create_enc_conn(struct drm_device *dev, |
| 38 | struct exynos_drm_subdrv *subdrv) | 38 | struct exynos_drm_subdrv *subdrv) |
| 39 | { | 39 | { |
| 40 | struct drm_encoder *encoder; | 40 | struct drm_encoder *encoder; |
| 41 | struct drm_connector *connector; | 41 | struct drm_connector *connector; |
| 42 | int ret; | ||
| 42 | 43 | ||
| 43 | DRM_DEBUG_DRIVER("%s\n", __FILE__); | 44 | DRM_DEBUG_DRIVER("%s\n", __FILE__); |
| 44 | 45 | ||
| 45 | if (subdrv->probe) { | ||
| 46 | int ret; | ||
| 47 | |||
| 48 | /* | ||
| 49 | * this probe callback would be called by sub driver | ||
| 50 | * after setting of all resources to this sub driver, | ||
| 51 | * such as clock, irq and register map are done or by load() | ||
| 52 | * of exynos drm driver. | ||
| 53 | * | ||
| 54 | * P.S. note that this driver is considered for modularization. | ||
| 55 | */ | ||
| 56 | ret = subdrv->probe(dev, subdrv->dev); | ||
| 57 | if (ret) | ||
| 58 | return ret; | ||
| 59 | } | ||
| 60 | |||
| 61 | if (!subdrv->manager) | ||
| 62 | return 0; | ||
| 63 | |||
| 64 | subdrv->manager->dev = subdrv->dev; | 46 | subdrv->manager->dev = subdrv->dev; |
| 65 | 47 | ||
| 66 | /* create and initialize a encoder for this sub driver. */ | 48 | /* create and initialize a encoder for this sub driver. */ |
| @@ -78,24 +60,22 @@ static int exynos_drm_subdrv_probe(struct drm_device *dev, | |||
| 78 | connector = exynos_drm_connector_create(dev, encoder); | 60 | connector = exynos_drm_connector_create(dev, encoder); |
| 79 | if (!connector) { | 61 | if (!connector) { |
| 80 | DRM_ERROR("failed to create connector\n"); | 62 | DRM_ERROR("failed to create connector\n"); |
| 81 | encoder->funcs->destroy(encoder); | 63 | ret = -EFAULT; |
| 82 | return -EFAULT; | 64 | goto err_destroy_encoder; |
| 83 | } | 65 | } |
| 84 | 66 | ||
| 85 | subdrv->encoder = encoder; | 67 | subdrv->encoder = encoder; |
| 86 | subdrv->connector = connector; | 68 | subdrv->connector = connector; |
| 87 | 69 | ||
| 88 | return 0; | 70 | return 0; |
| 71 | |||
| 72 | err_destroy_encoder: | ||
| 73 | encoder->funcs->destroy(encoder); | ||
| 74 | return ret; | ||
| 89 | } | 75 | } |
| 90 | 76 | ||
| 91 | static void exynos_drm_subdrv_remove(struct drm_device *dev, | 77 | static void exynos_drm_destroy_enc_conn(struct exynos_drm_subdrv *subdrv) |
| 92 | struct exynos_drm_subdrv *subdrv) | ||
| 93 | { | 78 | { |
| 94 | DRM_DEBUG_DRIVER("%s\n", __FILE__); | ||
| 95 | |||
| 96 | if (subdrv->remove) | ||
| 97 | subdrv->remove(dev); | ||
| 98 | |||
| 99 | if (subdrv->encoder) { | 79 | if (subdrv->encoder) { |
| 100 | struct drm_encoder *encoder = subdrv->encoder; | 80 | struct drm_encoder *encoder = subdrv->encoder; |
| 101 | encoder->funcs->destroy(encoder); | 81 | encoder->funcs->destroy(encoder); |
| @@ -109,9 +89,43 @@ static void exynos_drm_subdrv_remove(struct drm_device *dev, | |||
| 109 | } | 89 | } |
| 110 | } | 90 | } |
| 111 | 91 | ||
| 92 | static int exynos_drm_subdrv_probe(struct drm_device *dev, | ||
| 93 | struct exynos_drm_subdrv *subdrv) | ||
| 94 | { | ||
| 95 | if (subdrv->probe) { | ||
| 96 | int ret; | ||
| 97 | |||
| 98 | subdrv->drm_dev = dev; | ||
| 99 | |||
| 100 | /* | ||
| 101 | * this probe callback would be called by sub driver | ||
| 102 | * after setting of all resources to this sub driver, | ||
| 103 | * such as clock, irq and register map are done or by load() | ||
| 104 | * of exynos drm driver. | ||
| 105 | * | ||
| 106 | * P.S. note that this driver is considered for modularization. | ||
| 107 | */ | ||
| 108 | ret = subdrv->probe(dev, subdrv->dev); | ||
| 109 | if (ret) | ||
| 110 | return ret; | ||
| 111 | } | ||
| 112 | |||
| 113 | return 0; | ||
| 114 | } | ||
| 115 | |||
| 116 | static void exynos_drm_subdrv_remove(struct drm_device *dev, | ||
| 117 | struct exynos_drm_subdrv *subdrv) | ||
| 118 | { | ||
| 119 | DRM_DEBUG_DRIVER("%s\n", __FILE__); | ||
| 120 | |||
| 121 | if (subdrv->remove) | ||
| 122 | subdrv->remove(dev, subdrv->dev); | ||
| 123 | } | ||
| 124 | |||
| 112 | int exynos_drm_device_register(struct drm_device *dev) | 125 | int exynos_drm_device_register(struct drm_device *dev) |
| 113 | { | 126 | { |
| 114 | struct exynos_drm_subdrv *subdrv, *n; | 127 | struct exynos_drm_subdrv *subdrv, *n; |
| 128 | unsigned int fine_cnt = 0; | ||
| 115 | int err; | 129 | int err; |
| 116 | 130 | ||
| 117 | DRM_DEBUG_DRIVER("%s\n", __FILE__); | 131 | DRM_DEBUG_DRIVER("%s\n", __FILE__); |
| @@ -120,14 +134,36 @@ int exynos_drm_device_register(struct drm_device *dev) | |||
| 120 | return -EINVAL; | 134 | return -EINVAL; |
| 121 | 135 | ||
| 122 | list_for_each_entry_safe(subdrv, n, &exynos_drm_subdrv_list, list) { | 136 | list_for_each_entry_safe(subdrv, n, &exynos_drm_subdrv_list, list) { |
| 123 | subdrv->drm_dev = dev; | ||
| 124 | err = exynos_drm_subdrv_probe(dev, subdrv); | 137 | err = exynos_drm_subdrv_probe(dev, subdrv); |
| 125 | if (err) { | 138 | if (err) { |
| 126 | DRM_DEBUG("exynos drm subdrv probe failed.\n"); | 139 | DRM_DEBUG("exynos drm subdrv probe failed.\n"); |
| 127 | list_del(&subdrv->list); | 140 | list_del(&subdrv->list); |
| 141 | continue; | ||
| 142 | } | ||
| 143 | |||
| 144 | /* | ||
| 145 | * if manager is null then it means that this sub driver | ||
| 146 | * doesn't need encoder and connector. | ||
| 147 | */ | ||
| 148 | if (!subdrv->manager) { | ||
| 149 | fine_cnt++; | ||
| 150 | continue; | ||
| 151 | } | ||
| 152 | |||
| 153 | err = exynos_drm_create_enc_conn(dev, subdrv); | ||
| 154 | if (err) { | ||
| 155 | DRM_DEBUG("failed to create encoder and connector.\n"); | ||
| 156 | exynos_drm_subdrv_remove(dev, subdrv); | ||
| 157 | list_del(&subdrv->list); | ||
| 158 | continue; | ||
| 128 | } | 159 | } |
| 160 | |||
| 161 | fine_cnt++; | ||
| 129 | } | 162 | } |
| 130 | 163 | ||
| 164 | if (!fine_cnt) | ||
| 165 | return -EINVAL; | ||
| 166 | |||
| 131 | return 0; | 167 | return 0; |
| 132 | } | 168 | } |
| 133 | EXPORT_SYMBOL_GPL(exynos_drm_device_register); | 169 | EXPORT_SYMBOL_GPL(exynos_drm_device_register); |
| @@ -143,8 +179,10 @@ int exynos_drm_device_unregister(struct drm_device *dev) | |||
| 143 | return -EINVAL; | 179 | return -EINVAL; |
| 144 | } | 180 | } |
| 145 | 181 | ||
| 146 | list_for_each_entry(subdrv, &exynos_drm_subdrv_list, list) | 182 | list_for_each_entry(subdrv, &exynos_drm_subdrv_list, list) { |
| 147 | exynos_drm_subdrv_remove(dev, subdrv); | 183 | exynos_drm_subdrv_remove(dev, subdrv); |
| 184 | exynos_drm_destroy_enc_conn(subdrv); | ||
| 185 | } | ||
| 148 | 186 | ||
| 149 | return 0; | 187 | return 0; |
| 150 | } | 188 | } |
diff --git a/drivers/gpu/drm/exynos/exynos_drm_crtc.c b/drivers/gpu/drm/exynos/exynos_drm_crtc.c index df1e34f0f091..fce245f64c4f 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_crtc.c +++ b/drivers/gpu/drm/exynos/exynos_drm_crtc.c | |||
| @@ -66,7 +66,6 @@ struct exynos_drm_crtc { | |||
| 66 | 66 | ||
| 67 | static void exynos_drm_crtc_dpms(struct drm_crtc *crtc, int mode) | 67 | static void exynos_drm_crtc_dpms(struct drm_crtc *crtc, int mode) |
| 68 | { | 68 | { |
| 69 | struct drm_device *dev = crtc->dev; | ||
| 70 | struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc); | 69 | struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc); |
| 71 | 70 | ||
| 72 | DRM_DEBUG_KMS("crtc[%d] mode[%d]\n", crtc->base.id, mode); | 71 | DRM_DEBUG_KMS("crtc[%d] mode[%d]\n", crtc->base.id, mode); |
| @@ -76,12 +75,8 @@ static void exynos_drm_crtc_dpms(struct drm_crtc *crtc, int mode) | |||
| 76 | return; | 75 | return; |
| 77 | } | 76 | } |
| 78 | 77 | ||
| 79 | mutex_lock(&dev->struct_mutex); | ||
| 80 | |||
| 81 | exynos_drm_fn_encoder(crtc, &mode, exynos_drm_encoder_crtc_dpms); | 78 | exynos_drm_fn_encoder(crtc, &mode, exynos_drm_encoder_crtc_dpms); |
| 82 | exynos_crtc->dpms = mode; | 79 | exynos_crtc->dpms = mode; |
| 83 | |||
| 84 | mutex_unlock(&dev->struct_mutex); | ||
| 85 | } | 80 | } |
| 86 | 81 | ||
| 87 | static void exynos_drm_crtc_prepare(struct drm_crtc *crtc) | 82 | static void exynos_drm_crtc_prepare(struct drm_crtc *crtc) |
| @@ -97,6 +92,7 @@ static void exynos_drm_crtc_commit(struct drm_crtc *crtc) | |||
| 97 | 92 | ||
| 98 | DRM_DEBUG_KMS("%s\n", __FILE__); | 93 | DRM_DEBUG_KMS("%s\n", __FILE__); |
| 99 | 94 | ||
| 95 | exynos_drm_crtc_dpms(crtc, DRM_MODE_DPMS_ON); | ||
| 100 | exynos_plane_commit(exynos_crtc->plane); | 96 | exynos_plane_commit(exynos_crtc->plane); |
| 101 | exynos_plane_dpms(exynos_crtc->plane, DRM_MODE_DPMS_ON); | 97 | exynos_plane_dpms(exynos_crtc->plane, DRM_MODE_DPMS_ON); |
| 102 | } | 98 | } |
| @@ -126,8 +122,6 @@ exynos_drm_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *mode, | |||
| 126 | 122 | ||
| 127 | DRM_DEBUG_KMS("%s\n", __FILE__); | 123 | DRM_DEBUG_KMS("%s\n", __FILE__); |
| 128 | 124 | ||
| 129 | exynos_drm_crtc_dpms(crtc, DRM_MODE_DPMS_ON); | ||
| 130 | |||
| 131 | /* | 125 | /* |
| 132 | * copy the mode data adjusted by mode_fixup() into crtc->mode | 126 | * copy the mode data adjusted by mode_fixup() into crtc->mode |
| 133 | * so that hardware can be seet to proper mode. | 127 | * so that hardware can be seet to proper mode. |
| @@ -161,6 +155,12 @@ static int exynos_drm_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y, | |||
| 161 | 155 | ||
| 162 | DRM_DEBUG_KMS("%s\n", __FILE__); | 156 | DRM_DEBUG_KMS("%s\n", __FILE__); |
| 163 | 157 | ||
| 158 | /* when framebuffer changing is requested, crtc's dpms should be on */ | ||
| 159 | if (exynos_crtc->dpms > DRM_MODE_DPMS_ON) { | ||
| 160 | DRM_ERROR("failed framebuffer changing request.\n"); | ||
| 161 | return -EPERM; | ||
| 162 | } | ||
| 163 | |||
| 164 | crtc_w = crtc->fb->width - x; | 164 | crtc_w = crtc->fb->width - x; |
| 165 | crtc_h = crtc->fb->height - y; | 165 | crtc_h = crtc->fb->height - y; |
| 166 | 166 | ||
| @@ -213,6 +213,12 @@ static int exynos_drm_crtc_page_flip(struct drm_crtc *crtc, | |||
| 213 | 213 | ||
| 214 | DRM_DEBUG_KMS("%s\n", __FILE__); | 214 | DRM_DEBUG_KMS("%s\n", __FILE__); |
| 215 | 215 | ||
| 216 | /* when the page flip is requested, crtc's dpms should be on */ | ||
| 217 | if (exynos_crtc->dpms > DRM_MODE_DPMS_ON) { | ||
| 218 | DRM_ERROR("failed page flip request.\n"); | ||
| 219 | return -EINVAL; | ||
| 220 | } | ||
| 221 | |||
| 216 | mutex_lock(&dev->struct_mutex); | 222 | mutex_lock(&dev->struct_mutex); |
| 217 | 223 | ||
| 218 | if (event) { | 224 | if (event) { |
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.h b/drivers/gpu/drm/exynos/exynos_drm_drv.h index a4ab98b52dd8..a34231036496 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_drv.h +++ b/drivers/gpu/drm/exynos/exynos_drm_drv.h | |||
| @@ -36,6 +36,20 @@ | |||
| 36 | #define MAX_FB_BUFFER 4 | 36 | #define MAX_FB_BUFFER 4 |
| 37 | #define DEFAULT_ZPOS -1 | 37 | #define DEFAULT_ZPOS -1 |
| 38 | 38 | ||
| 39 | #define _wait_for(COND, MS) ({ \ | ||
| 40 | unsigned long timeout__ = jiffies + msecs_to_jiffies(MS); \ | ||
| 41 | int ret__ = 0; \ | ||
| 42 | while (!(COND)) { \ | ||
| 43 | if (time_after(jiffies, timeout__)) { \ | ||
| 44 | ret__ = -ETIMEDOUT; \ | ||
| 45 | break; \ | ||
| 46 | } \ | ||
| 47 | } \ | ||
| 48 | ret__; \ | ||
| 49 | }) | ||
| 50 | |||
| 51 | #define wait_for(COND, MS) _wait_for(COND, MS) | ||
| 52 | |||
| 39 | struct drm_device; | 53 | struct drm_device; |
| 40 | struct exynos_drm_overlay; | 54 | struct exynos_drm_overlay; |
| 41 | struct drm_connector; | 55 | struct drm_connector; |
| @@ -60,6 +74,8 @@ enum exynos_drm_output_type { | |||
| 60 | * @commit: apply hardware specific overlay data to registers. | 74 | * @commit: apply hardware specific overlay data to registers. |
| 61 | * @enable: enable hardware specific overlay. | 75 | * @enable: enable hardware specific overlay. |
| 62 | * @disable: disable hardware specific overlay. | 76 | * @disable: disable hardware specific overlay. |
| 77 | * @wait_for_vblank: wait for vblank interrupt to make sure that | ||
| 78 | * hardware overlay is disabled. | ||
| 63 | */ | 79 | */ |
| 64 | struct exynos_drm_overlay_ops { | 80 | struct exynos_drm_overlay_ops { |
| 65 | void (*mode_set)(struct device *subdrv_dev, | 81 | void (*mode_set)(struct device *subdrv_dev, |
| @@ -67,6 +83,7 @@ struct exynos_drm_overlay_ops { | |||
| 67 | void (*commit)(struct device *subdrv_dev, int zpos); | 83 | void (*commit)(struct device *subdrv_dev, int zpos); |
| 68 | void (*enable)(struct device *subdrv_dev, int zpos); | 84 | void (*enable)(struct device *subdrv_dev, int zpos); |
| 69 | void (*disable)(struct device *subdrv_dev, int zpos); | 85 | void (*disable)(struct device *subdrv_dev, int zpos); |
| 86 | void (*wait_for_vblank)(struct device *subdrv_dev); | ||
| 70 | }; | 87 | }; |
| 71 | 88 | ||
| 72 | /* | 89 | /* |
| @@ -265,7 +282,7 @@ struct exynos_drm_subdrv { | |||
| 265 | struct exynos_drm_manager *manager; | 282 | struct exynos_drm_manager *manager; |
| 266 | 283 | ||
| 267 | int (*probe)(struct drm_device *drm_dev, struct device *dev); | 284 | int (*probe)(struct drm_device *drm_dev, struct device *dev); |
| 268 | void (*remove)(struct drm_device *dev); | 285 | void (*remove)(struct drm_device *drm_dev, struct device *dev); |
| 269 | int (*open)(struct drm_device *drm_dev, struct device *dev, | 286 | int (*open)(struct drm_device *drm_dev, struct device *dev, |
| 270 | struct drm_file *file); | 287 | struct drm_file *file); |
| 271 | void (*close)(struct drm_device *drm_dev, struct device *dev, | 288 | void (*close)(struct drm_device *drm_dev, struct device *dev, |
diff --git a/drivers/gpu/drm/exynos/exynos_drm_encoder.c b/drivers/gpu/drm/exynos/exynos_drm_encoder.c index 39bd8abff3f1..e51503fbaf2b 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_encoder.c +++ b/drivers/gpu/drm/exynos/exynos_drm_encoder.c | |||
| @@ -31,6 +31,7 @@ | |||
| 31 | 31 | ||
| 32 | #include "exynos_drm_drv.h" | 32 | #include "exynos_drm_drv.h" |
| 33 | #include "exynos_drm_encoder.h" | 33 | #include "exynos_drm_encoder.h" |
| 34 | #include "exynos_drm_connector.h" | ||
| 34 | 35 | ||
| 35 | #define to_exynos_encoder(x) container_of(x, struct exynos_drm_encoder,\ | 36 | #define to_exynos_encoder(x) container_of(x, struct exynos_drm_encoder,\ |
| 36 | drm_encoder) | 37 | drm_encoder) |
| @@ -44,26 +45,23 @@ | |||
| 44 | * @dpms: store the encoder dpms value. | 45 | * @dpms: store the encoder dpms value. |
| 45 | */ | 46 | */ |
| 46 | struct exynos_drm_encoder { | 47 | struct exynos_drm_encoder { |
| 48 | struct drm_crtc *old_crtc; | ||
| 47 | struct drm_encoder drm_encoder; | 49 | struct drm_encoder drm_encoder; |
| 48 | struct exynos_drm_manager *manager; | 50 | struct exynos_drm_manager *manager; |
| 49 | int dpms; | 51 | int dpms; |
| 50 | }; | 52 | }; |
| 51 | 53 | ||
| 52 | static void exynos_drm_display_power(struct drm_encoder *encoder, int mode) | 54 | static void exynos_drm_connector_power(struct drm_encoder *encoder, int mode) |
| 53 | { | 55 | { |
| 54 | struct drm_device *dev = encoder->dev; | 56 | struct drm_device *dev = encoder->dev; |
| 55 | struct drm_connector *connector; | 57 | struct drm_connector *connector; |
| 56 | struct exynos_drm_manager *manager = exynos_drm_get_manager(encoder); | ||
| 57 | 58 | ||
| 58 | list_for_each_entry(connector, &dev->mode_config.connector_list, head) { | 59 | list_for_each_entry(connector, &dev->mode_config.connector_list, head) { |
| 59 | if (connector->encoder == encoder) { | 60 | if (exynos_drm_best_encoder(connector) == encoder) { |
| 60 | struct exynos_drm_display_ops *display_ops = | ||
| 61 | manager->display_ops; | ||
| 62 | |||
| 63 | DRM_DEBUG_KMS("connector[%d] dpms[%d]\n", | 61 | DRM_DEBUG_KMS("connector[%d] dpms[%d]\n", |
| 64 | connector->base.id, mode); | 62 | connector->base.id, mode); |
| 65 | if (display_ops && display_ops->power_on) | 63 | |
| 66 | display_ops->power_on(manager->dev, mode); | 64 | exynos_drm_display_power(connector, mode); |
| 67 | } | 65 | } |
| 68 | } | 66 | } |
| 69 | } | 67 | } |
| @@ -88,13 +86,13 @@ static void exynos_drm_encoder_dpms(struct drm_encoder *encoder, int mode) | |||
| 88 | case DRM_MODE_DPMS_ON: | 86 | case DRM_MODE_DPMS_ON: |
| 89 | if (manager_ops && manager_ops->apply) | 87 | if (manager_ops && manager_ops->apply) |
| 90 | manager_ops->apply(manager->dev); | 88 | manager_ops->apply(manager->dev); |
| 91 | exynos_drm_display_power(encoder, mode); | 89 | exynos_drm_connector_power(encoder, mode); |
| 92 | exynos_encoder->dpms = mode; | 90 | exynos_encoder->dpms = mode; |
| 93 | break; | 91 | break; |
| 94 | case DRM_MODE_DPMS_STANDBY: | 92 | case DRM_MODE_DPMS_STANDBY: |
| 95 | case DRM_MODE_DPMS_SUSPEND: | 93 | case DRM_MODE_DPMS_SUSPEND: |
| 96 | case DRM_MODE_DPMS_OFF: | 94 | case DRM_MODE_DPMS_OFF: |
| 97 | exynos_drm_display_power(encoder, mode); | 95 | exynos_drm_connector_power(encoder, mode); |
| 98 | exynos_encoder->dpms = mode; | 96 | exynos_encoder->dpms = mode; |
| 99 | break; | 97 | break; |
| 100 | default: | 98 | default: |
| @@ -127,24 +125,74 @@ exynos_drm_encoder_mode_fixup(struct drm_encoder *encoder, | |||
| 127 | return true; | 125 | return true; |
| 128 | } | 126 | } |
| 129 | 127 | ||
| 128 | static void disable_plane_to_crtc(struct drm_device *dev, | ||
| 129 | struct drm_crtc *old_crtc, | ||
| 130 | struct drm_crtc *new_crtc) | ||
| 131 | { | ||
| 132 | struct drm_plane *plane; | ||
| 133 | |||
| 134 | /* | ||
| 135 | * if old_crtc isn't same as encoder->crtc then it means that | ||
| 136 | * user changed crtc id to another one so the plane to old_crtc | ||
| 137 | * should be disabled and plane->crtc should be set to new_crtc | ||
| 138 | * (encoder->crtc) | ||
| 139 | */ | ||
| 140 | list_for_each_entry(plane, &dev->mode_config.plane_list, head) { | ||
| 141 | if (plane->crtc == old_crtc) { | ||
| 142 | /* | ||
| 143 | * do not change below call order. | ||
| 144 | * | ||
| 145 | * plane->funcs->disable_plane call checks | ||
| 146 | * if encoder->crtc is same as plane->crtc and if same | ||
| 147 | * then overlay_ops->disable callback will be called | ||
| 148 | * to diasble current hw overlay so plane->crtc should | ||
| 149 | * have new_crtc because new_crtc was set to | ||
| 150 | * encoder->crtc in advance. | ||
| 151 | */ | ||
| 152 | plane->crtc = new_crtc; | ||
| 153 | plane->funcs->disable_plane(plane); | ||
| 154 | } | ||
| 155 | } | ||
| 156 | } | ||
| 157 | |||
| 130 | static void exynos_drm_encoder_mode_set(struct drm_encoder *encoder, | 158 | static void exynos_drm_encoder_mode_set(struct drm_encoder *encoder, |
| 131 | struct drm_display_mode *mode, | 159 | struct drm_display_mode *mode, |
| 132 | struct drm_display_mode *adjusted_mode) | 160 | struct drm_display_mode *adjusted_mode) |
| 133 | { | 161 | { |
| 134 | struct drm_device *dev = encoder->dev; | 162 | struct drm_device *dev = encoder->dev; |
| 135 | struct drm_connector *connector; | 163 | struct drm_connector *connector; |
| 136 | struct exynos_drm_manager *manager = exynos_drm_get_manager(encoder); | 164 | struct exynos_drm_manager *manager; |
| 137 | struct exynos_drm_manager_ops *manager_ops = manager->ops; | 165 | struct exynos_drm_manager_ops *manager_ops; |
| 138 | 166 | ||
| 139 | DRM_DEBUG_KMS("%s\n", __FILE__); | 167 | DRM_DEBUG_KMS("%s\n", __FILE__); |
| 140 | 168 | ||
| 141 | exynos_drm_encoder_dpms(encoder, DRM_MODE_DPMS_ON); | ||
| 142 | |||
| 143 | list_for_each_entry(connector, &dev->mode_config.connector_list, head) { | 169 | list_for_each_entry(connector, &dev->mode_config.connector_list, head) { |
| 144 | if (connector->encoder == encoder) | 170 | if (connector->encoder == encoder) { |
| 171 | struct exynos_drm_encoder *exynos_encoder; | ||
| 172 | |||
| 173 | exynos_encoder = to_exynos_encoder(encoder); | ||
| 174 | |||
| 175 | if (exynos_encoder->old_crtc != encoder->crtc && | ||
| 176 | exynos_encoder->old_crtc) { | ||
| 177 | |||
| 178 | /* | ||
| 179 | * disable a plane to old crtc and change | ||
| 180 | * crtc of the plane to new one. | ||
| 181 | */ | ||
| 182 | disable_plane_to_crtc(dev, | ||
| 183 | exynos_encoder->old_crtc, | ||
| 184 | encoder->crtc); | ||
| 185 | } | ||
| 186 | |||
| 187 | manager = exynos_drm_get_manager(encoder); | ||
| 188 | manager_ops = manager->ops; | ||
| 189 | |||
| 145 | if (manager_ops && manager_ops->mode_set) | 190 | if (manager_ops && manager_ops->mode_set) |
| 146 | manager_ops->mode_set(manager->dev, | 191 | manager_ops->mode_set(manager->dev, |
| 147 | adjusted_mode); | 192 | adjusted_mode); |
| 193 | |||
| 194 | exynos_encoder->old_crtc = encoder->crtc; | ||
| 195 | } | ||
| 148 | } | 196 | } |
| 149 | } | 197 | } |
| 150 | 198 | ||
| @@ -166,12 +214,27 @@ static void exynos_drm_encoder_commit(struct drm_encoder *encoder) | |||
| 166 | manager_ops->commit(manager->dev); | 214 | manager_ops->commit(manager->dev); |
| 167 | } | 215 | } |
| 168 | 216 | ||
| 217 | static void exynos_drm_encoder_disable(struct drm_encoder *encoder) | ||
| 218 | { | ||
| 219 | struct drm_plane *plane; | ||
| 220 | struct drm_device *dev = encoder->dev; | ||
| 221 | |||
| 222 | exynos_drm_encoder_dpms(encoder, DRM_MODE_DPMS_OFF); | ||
| 223 | |||
| 224 | /* all planes connected to this encoder should be also disabled. */ | ||
| 225 | list_for_each_entry(plane, &dev->mode_config.plane_list, head) { | ||
| 226 | if (plane->crtc == encoder->crtc) | ||
| 227 | plane->funcs->disable_plane(plane); | ||
| 228 | } | ||
| 229 | } | ||
| 230 | |||
| 169 | static struct drm_encoder_helper_funcs exynos_encoder_helper_funcs = { | 231 | static struct drm_encoder_helper_funcs exynos_encoder_helper_funcs = { |
| 170 | .dpms = exynos_drm_encoder_dpms, | 232 | .dpms = exynos_drm_encoder_dpms, |
| 171 | .mode_fixup = exynos_drm_encoder_mode_fixup, | 233 | .mode_fixup = exynos_drm_encoder_mode_fixup, |
| 172 | .mode_set = exynos_drm_encoder_mode_set, | 234 | .mode_set = exynos_drm_encoder_mode_set, |
| 173 | .prepare = exynos_drm_encoder_prepare, | 235 | .prepare = exynos_drm_encoder_prepare, |
| 174 | .commit = exynos_drm_encoder_commit, | 236 | .commit = exynos_drm_encoder_commit, |
| 237 | .disable = exynos_drm_encoder_disable, | ||
| 175 | }; | 238 | }; |
| 176 | 239 | ||
| 177 | static void exynos_drm_encoder_destroy(struct drm_encoder *encoder) | 240 | static void exynos_drm_encoder_destroy(struct drm_encoder *encoder) |
| @@ -338,6 +401,19 @@ void exynos_drm_encoder_crtc_dpms(struct drm_encoder *encoder, void *data) | |||
| 338 | manager_ops->dpms(manager->dev, mode); | 401 | manager_ops->dpms(manager->dev, mode); |
| 339 | 402 | ||
| 340 | /* | 403 | /* |
| 404 | * set current mode to new one so that data aren't updated into | ||
| 405 | * registers by drm_helper_connector_dpms two times. | ||
| 406 | * | ||
| 407 | * in case that drm_crtc_helper_set_mode() is called, | ||
| 408 | * overlay_ops->commit() and manager_ops->commit() callbacks | ||
| 409 | * can be called two times, first at drm_crtc_helper_set_mode() | ||
| 410 | * and second at drm_helper_connector_dpms(). | ||
| 411 | * so with this setting, when drm_helper_connector_dpms() is called | ||
| 412 | * encoder->funcs->dpms() will be ignored. | ||
| 413 | */ | ||
| 414 | exynos_encoder->dpms = mode; | ||
| 415 | |||
| 416 | /* | ||
| 341 | * if this condition is ok then it means that the crtc is already | 417 | * if this condition is ok then it means that the crtc is already |
| 342 | * detached from encoder and last function for detaching is properly | 418 | * detached from encoder and last function for detaching is properly |
| 343 | * done, so clear pipe from manager to prevent repeated call. | 419 | * done, so clear pipe from manager to prevent repeated call. |
| @@ -422,4 +498,14 @@ void exynos_drm_encoder_plane_disable(struct drm_encoder *encoder, void *data) | |||
| 422 | 498 | ||
| 423 | if (overlay_ops && overlay_ops->disable) | 499 | if (overlay_ops && overlay_ops->disable) |
| 424 | overlay_ops->disable(manager->dev, zpos); | 500 | overlay_ops->disable(manager->dev, zpos); |
| 501 | |||
| 502 | /* | ||
| 503 | * wait for vblank interrupt | ||
| 504 | * - this makes sure that hardware overlay is disabled to avoid | ||
| 505 | * for the dma accesses to memory after gem buffer was released | ||
| 506 | * because the setting for disabling the overlay will be updated | ||
| 507 | * at vsync. | ||
| 508 | */ | ||
| 509 | if (overlay_ops->wait_for_vblank) | ||
| 510 | overlay_ops->wait_for_vblank(manager->dev); | ||
| 425 | } | 511 | } |
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fb.c b/drivers/gpu/drm/exynos/exynos_drm_fb.c index 53afcc5f0945..4ef4cd3f9936 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_fb.c +++ b/drivers/gpu/drm/exynos/exynos_drm_fb.c | |||
| @@ -41,10 +41,12 @@ | |||
| 41 | * exynos specific framebuffer structure. | 41 | * exynos specific framebuffer structure. |
| 42 | * | 42 | * |
| 43 | * @fb: drm framebuffer obejct. | 43 | * @fb: drm framebuffer obejct. |
| 44 | * @buf_cnt: a buffer count to drm framebuffer. | ||
| 44 | * @exynos_gem_obj: array of exynos specific gem object containing a gem object. | 45 | * @exynos_gem_obj: array of exynos specific gem object containing a gem object. |
| 45 | */ | 46 | */ |
| 46 | struct exynos_drm_fb { | 47 | struct exynos_drm_fb { |
| 47 | struct drm_framebuffer fb; | 48 | struct drm_framebuffer fb; |
| 49 | unsigned int buf_cnt; | ||
| 48 | struct exynos_drm_gem_obj *exynos_gem_obj[MAX_FB_BUFFER]; | 50 | struct exynos_drm_gem_obj *exynos_gem_obj[MAX_FB_BUFFER]; |
| 49 | }; | 51 | }; |
| 50 | 52 | ||
| @@ -101,6 +103,25 @@ static struct drm_framebuffer_funcs exynos_drm_fb_funcs = { | |||
| 101 | .dirty = exynos_drm_fb_dirty, | 103 | .dirty = exynos_drm_fb_dirty, |
| 102 | }; | 104 | }; |
| 103 | 105 | ||
| 106 | void exynos_drm_fb_set_buf_cnt(struct drm_framebuffer *fb, | ||
| 107 | unsigned int cnt) | ||
| 108 | { | ||
| 109 | struct exynos_drm_fb *exynos_fb; | ||
| 110 | |||
| 111 | exynos_fb = to_exynos_fb(fb); | ||
| 112 | |||
| 113 | exynos_fb->buf_cnt = cnt; | ||
| 114 | } | ||
| 115 | |||
| 116 | unsigned int exynos_drm_fb_get_buf_cnt(struct drm_framebuffer *fb) | ||
| 117 | { | ||
| 118 | struct exynos_drm_fb *exynos_fb; | ||
| 119 | |||
| 120 | exynos_fb = to_exynos_fb(fb); | ||
| 121 | |||
| 122 | return exynos_fb->buf_cnt; | ||
| 123 | } | ||
| 124 | |||
| 104 | struct drm_framebuffer * | 125 | struct drm_framebuffer * |
| 105 | exynos_drm_framebuffer_init(struct drm_device *dev, | 126 | exynos_drm_framebuffer_init(struct drm_device *dev, |
| 106 | struct drm_mode_fb_cmd2 *mode_cmd, | 127 | struct drm_mode_fb_cmd2 *mode_cmd, |
| @@ -127,6 +148,43 @@ exynos_drm_framebuffer_init(struct drm_device *dev, | |||
| 127 | return &exynos_fb->fb; | 148 | return &exynos_fb->fb; |
| 128 | } | 149 | } |
| 129 | 150 | ||
| 151 | static u32 exynos_drm_format_num_buffers(struct drm_mode_fb_cmd2 *mode_cmd) | ||
| 152 | { | ||
| 153 | unsigned int cnt = 0; | ||
| 154 | |||
| 155 | if (mode_cmd->pixel_format != DRM_FORMAT_NV12) | ||
| 156 | return drm_format_num_planes(mode_cmd->pixel_format); | ||
| 157 | |||
| 158 | while (cnt != MAX_FB_BUFFER) { | ||
| 159 | if (!mode_cmd->handles[cnt]) | ||
| 160 | break; | ||
| 161 | cnt++; | ||
| 162 | } | ||
| 163 | |||
| 164 | /* | ||
| 165 | * check if NV12 or NV12M. | ||
| 166 | * | ||
| 167 | * NV12 | ||
| 168 | * handles[0] = base1, offsets[0] = 0 | ||
| 169 | * handles[1] = base1, offsets[1] = Y_size | ||
| 170 | * | ||
| 171 | * NV12M | ||
| 172 | * handles[0] = base1, offsets[0] = 0 | ||
| 173 | * handles[1] = base2, offsets[1] = 0 | ||
| 174 | */ | ||
| 175 | if (cnt == 2) { | ||
| 176 | /* | ||
| 177 | * in case of NV12 format, offsets[1] is not 0 and | ||
| 178 | * handles[0] is same as handles[1]. | ||
| 179 | */ | ||
| 180 | if (mode_cmd->offsets[1] && | ||
| 181 | mode_cmd->handles[0] == mode_cmd->handles[1]) | ||
| 182 | cnt = 1; | ||
| 183 | } | ||
| 184 | |||
| 185 | return cnt; | ||
| 186 | } | ||
| 187 | |||
| 130 | static struct drm_framebuffer * | 188 | static struct drm_framebuffer * |
| 131 | exynos_user_fb_create(struct drm_device *dev, struct drm_file *file_priv, | 189 | exynos_user_fb_create(struct drm_device *dev, struct drm_file *file_priv, |
| 132 | struct drm_mode_fb_cmd2 *mode_cmd) | 190 | struct drm_mode_fb_cmd2 *mode_cmd) |
| @@ -134,7 +192,6 @@ exynos_user_fb_create(struct drm_device *dev, struct drm_file *file_priv, | |||
| 134 | struct drm_gem_object *obj; | 192 | struct drm_gem_object *obj; |
| 135 | struct drm_framebuffer *fb; | 193 | struct drm_framebuffer *fb; |
| 136 | struct exynos_drm_fb *exynos_fb; | 194 | struct exynos_drm_fb *exynos_fb; |
| 137 | int nr; | ||
| 138 | int i; | 195 | int i; |
| 139 | 196 | ||
| 140 | DRM_DEBUG_KMS("%s\n", __FILE__); | 197 | DRM_DEBUG_KMS("%s\n", __FILE__); |
| @@ -152,9 +209,11 @@ exynos_user_fb_create(struct drm_device *dev, struct drm_file *file_priv, | |||
| 152 | } | 209 | } |
| 153 | 210 | ||
| 154 | exynos_fb = to_exynos_fb(fb); | 211 | exynos_fb = to_exynos_fb(fb); |
| 155 | nr = exynos_drm_format_num_buffers(fb->pixel_format); | 212 | exynos_fb->buf_cnt = exynos_drm_format_num_buffers(mode_cmd); |
| 213 | |||
| 214 | DRM_DEBUG_KMS("buf_cnt = %d\n", exynos_fb->buf_cnt); | ||
| 156 | 215 | ||
| 157 | for (i = 1; i < nr; i++) { | 216 | for (i = 1; i < exynos_fb->buf_cnt; i++) { |
| 158 | obj = drm_gem_object_lookup(dev, file_priv, | 217 | obj = drm_gem_object_lookup(dev, file_priv, |
| 159 | mode_cmd->handles[i]); | 218 | mode_cmd->handles[i]); |
| 160 | if (!obj) { | 219 | if (!obj) { |
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fb.h b/drivers/gpu/drm/exynos/exynos_drm_fb.h index 50823756cdea..96262e54f76d 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_fb.h +++ b/drivers/gpu/drm/exynos/exynos_drm_fb.h | |||
| @@ -28,19 +28,6 @@ | |||
| 28 | #ifndef _EXYNOS_DRM_FB_H_ | 28 | #ifndef _EXYNOS_DRM_FB_H_ |
| 29 | #define _EXYNOS_DRM_FB_H | 29 | #define _EXYNOS_DRM_FB_H |
| 30 | 30 | ||
| 31 | static inline int exynos_drm_format_num_buffers(uint32_t format) | ||
| 32 | { | ||
| 33 | switch (format) { | ||
| 34 | case DRM_FORMAT_NV12: | ||
| 35 | case DRM_FORMAT_NV12MT: | ||
| 36 | return 2; | ||
| 37 | case DRM_FORMAT_YUV420: | ||
| 38 | return 3; | ||
| 39 | default: | ||
| 40 | return 1; | ||
| 41 | } | ||
| 42 | } | ||
| 43 | |||
| 44 | struct drm_framebuffer * | 31 | struct drm_framebuffer * |
| 45 | exynos_drm_framebuffer_init(struct drm_device *dev, | 32 | exynos_drm_framebuffer_init(struct drm_device *dev, |
| 46 | struct drm_mode_fb_cmd2 *mode_cmd, | 33 | struct drm_mode_fb_cmd2 *mode_cmd, |
| @@ -52,4 +39,11 @@ struct exynos_drm_gem_buf *exynos_drm_fb_buffer(struct drm_framebuffer *fb, | |||
| 52 | 39 | ||
| 53 | void exynos_drm_mode_config_init(struct drm_device *dev); | 40 | void exynos_drm_mode_config_init(struct drm_device *dev); |
| 54 | 41 | ||
| 42 | /* set a buffer count to drm framebuffer. */ | ||
| 43 | void exynos_drm_fb_set_buf_cnt(struct drm_framebuffer *fb, | ||
| 44 | unsigned int cnt); | ||
| 45 | |||
| 46 | /* get a buffer count to drm framebuffer. */ | ||
| 47 | unsigned int exynos_drm_fb_get_buf_cnt(struct drm_framebuffer *fb); | ||
| 48 | |||
| 55 | #endif | 49 | #endif |
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c index bd4ff6348239..67eb6ba56edf 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c +++ b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c | |||
| @@ -79,6 +79,9 @@ static int exynos_drm_fbdev_update(struct drm_fb_helper *helper, | |||
| 79 | return -EFAULT; | 79 | return -EFAULT; |
| 80 | } | 80 | } |
| 81 | 81 | ||
| 82 | /* buffer count to framebuffer always is 1 at booting time. */ | ||
| 83 | exynos_drm_fb_set_buf_cnt(fb, 1); | ||
| 84 | |||
| 82 | offset = fbi->var.xoffset * (fb->bits_per_pixel >> 3); | 85 | offset = fbi->var.xoffset * (fb->bits_per_pixel >> 3); |
| 83 | offset += fbi->var.yoffset * fb->pitches[0]; | 86 | offset += fbi->var.yoffset * fb->pitches[0]; |
| 84 | 87 | ||
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimd.c b/drivers/gpu/drm/exynos/exynos_drm_fimd.c index 58d50e368a58..a32837951dd2 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_fimd.c +++ b/drivers/gpu/drm/exynos/exynos_drm_fimd.c | |||
| @@ -57,6 +57,18 @@ | |||
| 57 | 57 | ||
| 58 | #define get_fimd_context(dev) platform_get_drvdata(to_platform_device(dev)) | 58 | #define get_fimd_context(dev) platform_get_drvdata(to_platform_device(dev)) |
| 59 | 59 | ||
| 60 | struct fimd_driver_data { | ||
| 61 | unsigned int timing_base; | ||
| 62 | }; | ||
| 63 | |||
| 64 | struct fimd_driver_data exynos4_fimd_driver_data = { | ||
| 65 | .timing_base = 0x0, | ||
| 66 | }; | ||
| 67 | |||
| 68 | struct fimd_driver_data exynos5_fimd_driver_data = { | ||
| 69 | .timing_base = 0x20000, | ||
| 70 | }; | ||
| 71 | |||
| 60 | struct fimd_win_data { | 72 | struct fimd_win_data { |
| 61 | unsigned int offset_x; | 73 | unsigned int offset_x; |
| 62 | unsigned int offset_y; | 74 | unsigned int offset_y; |
| @@ -91,6 +103,13 @@ struct fimd_context { | |||
| 91 | struct exynos_drm_panel_info *panel; | 103 | struct exynos_drm_panel_info *panel; |
| 92 | }; | 104 | }; |
| 93 | 105 | ||
| 106 | static inline struct fimd_driver_data *drm_fimd_get_driver_data( | ||
| 107 | struct platform_device *pdev) | ||
| 108 | { | ||
| 109 | return (struct fimd_driver_data *) | ||
| 110 | platform_get_device_id(pdev)->driver_data; | ||
| 111 | } | ||
| 112 | |||
| 94 | static bool fimd_display_is_connected(struct device *dev) | 113 | static bool fimd_display_is_connected(struct device *dev) |
| 95 | { | 114 | { |
| 96 | DRM_DEBUG_KMS("%s\n", __FILE__); | 115 | DRM_DEBUG_KMS("%s\n", __FILE__); |
| @@ -194,32 +213,35 @@ static void fimd_commit(struct device *dev) | |||
| 194 | struct fimd_context *ctx = get_fimd_context(dev); | 213 | struct fimd_context *ctx = get_fimd_context(dev); |
| 195 | struct exynos_drm_panel_info *panel = ctx->panel; | 214 | struct exynos_drm_panel_info *panel = ctx->panel; |
| 196 | struct fb_videomode *timing = &panel->timing; | 215 | struct fb_videomode *timing = &panel->timing; |
| 216 | struct fimd_driver_data *driver_data; | ||
| 217 | struct platform_device *pdev = to_platform_device(dev); | ||
| 197 | u32 val; | 218 | u32 val; |
| 198 | 219 | ||
| 220 | driver_data = drm_fimd_get_driver_data(pdev); | ||
| 199 | if (ctx->suspended) | 221 | if (ctx->suspended) |
| 200 | return; | 222 | return; |
| 201 | 223 | ||
| 202 | DRM_DEBUG_KMS("%s\n", __FILE__); | 224 | DRM_DEBUG_KMS("%s\n", __FILE__); |
| 203 | 225 | ||
| 204 | /* setup polarity values from machine code. */ | 226 | /* setup polarity values from machine code. */ |
| 205 | writel(ctx->vidcon1, ctx->regs + VIDCON1); | 227 | writel(ctx->vidcon1, ctx->regs + driver_data->timing_base + VIDCON1); |
| 206 | 228 | ||
| 207 | /* setup vertical timing values. */ | 229 | /* setup vertical timing values. */ |
| 208 | val = VIDTCON0_VBPD(timing->upper_margin - 1) | | 230 | val = VIDTCON0_VBPD(timing->upper_margin - 1) | |
| 209 | VIDTCON0_VFPD(timing->lower_margin - 1) | | 231 | VIDTCON0_VFPD(timing->lower_margin - 1) | |
| 210 | VIDTCON0_VSPW(timing->vsync_len - 1); | 232 | VIDTCON0_VSPW(timing->vsync_len - 1); |
| 211 | writel(val, ctx->regs + VIDTCON0); | 233 | writel(val, ctx->regs + driver_data->timing_base + VIDTCON0); |
| 212 | 234 | ||
| 213 | /* setup horizontal timing values. */ | 235 | /* setup horizontal timing values. */ |
| 214 | val = VIDTCON1_HBPD(timing->left_margin - 1) | | 236 | val = VIDTCON1_HBPD(timing->left_margin - 1) | |
| 215 | VIDTCON1_HFPD(timing->right_margin - 1) | | 237 | VIDTCON1_HFPD(timing->right_margin - 1) | |
| 216 | VIDTCON1_HSPW(timing->hsync_len - 1); | 238 | VIDTCON1_HSPW(timing->hsync_len - 1); |
| 217 | writel(val, ctx->regs + VIDTCON1); | 239 | writel(val, ctx->regs + driver_data->timing_base + VIDTCON1); |
| 218 | 240 | ||
| 219 | /* setup horizontal and vertical display size. */ | 241 | /* setup horizontal and vertical display size. */ |
| 220 | val = VIDTCON2_LINEVAL(timing->yres - 1) | | 242 | val = VIDTCON2_LINEVAL(timing->yres - 1) | |
| 221 | VIDTCON2_HOZVAL(timing->xres - 1); | 243 | VIDTCON2_HOZVAL(timing->xres - 1); |
| 222 | writel(val, ctx->regs + VIDTCON2); | 244 | writel(val, ctx->regs + driver_data->timing_base + VIDTCON2); |
| 223 | 245 | ||
| 224 | /* setup clock source, clock divider, enable dma. */ | 246 | /* setup clock source, clock divider, enable dma. */ |
| 225 | val = ctx->vidcon0; | 247 | val = ctx->vidcon0; |
| @@ -570,10 +592,22 @@ static void fimd_win_disable(struct device *dev, int zpos) | |||
| 570 | win_data->enabled = false; | 592 | win_data->enabled = false; |
| 571 | } | 593 | } |
| 572 | 594 | ||
| 595 | static void fimd_wait_for_vblank(struct device *dev) | ||
| 596 | { | ||
| 597 | struct fimd_context *ctx = get_fimd_context(dev); | ||
| 598 | int ret; | ||
| 599 | |||
| 600 | ret = wait_for((__raw_readl(ctx->regs + VIDCON1) & | ||
| 601 | VIDCON1_VSTATUS_VSYNC), 50); | ||
| 602 | if (ret < 0) | ||
| 603 | DRM_DEBUG_KMS("vblank wait timed out.\n"); | ||
| 604 | } | ||
| 605 | |||
| 573 | static struct exynos_drm_overlay_ops fimd_overlay_ops = { | 606 | static struct exynos_drm_overlay_ops fimd_overlay_ops = { |
| 574 | .mode_set = fimd_win_mode_set, | 607 | .mode_set = fimd_win_mode_set, |
| 575 | .commit = fimd_win_commit, | 608 | .commit = fimd_win_commit, |
| 576 | .disable = fimd_win_disable, | 609 | .disable = fimd_win_disable, |
| 610 | .wait_for_vblank = fimd_wait_for_vblank, | ||
| 577 | }; | 611 | }; |
| 578 | 612 | ||
| 579 | static struct exynos_drm_manager fimd_manager = { | 613 | static struct exynos_drm_manager fimd_manager = { |
| @@ -678,7 +712,7 @@ static int fimd_subdrv_probe(struct drm_device *drm_dev, struct device *dev) | |||
| 678 | return 0; | 712 | return 0; |
| 679 | } | 713 | } |
| 680 | 714 | ||
| 681 | static void fimd_subdrv_remove(struct drm_device *drm_dev) | 715 | static void fimd_subdrv_remove(struct drm_device *drm_dev, struct device *dev) |
| 682 | { | 716 | { |
| 683 | DRM_DEBUG_KMS("%s\n", __FILE__); | 717 | DRM_DEBUG_KMS("%s\n", __FILE__); |
| 684 | 718 | ||
| @@ -747,16 +781,10 @@ static void fimd_clear_win(struct fimd_context *ctx, int win) | |||
| 747 | writel(val, ctx->regs + SHADOWCON); | 781 | writel(val, ctx->regs + SHADOWCON); |
| 748 | } | 782 | } |
| 749 | 783 | ||
| 750 | static int fimd_power_on(struct fimd_context *ctx, bool enable) | 784 | static int fimd_clock(struct fimd_context *ctx, bool enable) |
| 751 | { | 785 | { |
| 752 | struct exynos_drm_subdrv *subdrv = &ctx->subdrv; | ||
| 753 | struct device *dev = subdrv->dev; | ||
| 754 | |||
| 755 | DRM_DEBUG_KMS("%s\n", __FILE__); | 786 | DRM_DEBUG_KMS("%s\n", __FILE__); |
| 756 | 787 | ||
| 757 | if (enable != false && enable != true) | ||
| 758 | return -EINVAL; | ||
| 759 | |||
| 760 | if (enable) { | 788 | if (enable) { |
| 761 | int ret; | 789 | int ret; |
| 762 | 790 | ||
| @@ -769,18 +797,31 @@ static int fimd_power_on(struct fimd_context *ctx, bool enable) | |||
| 769 | clk_disable(ctx->bus_clk); | 797 | clk_disable(ctx->bus_clk); |
| 770 | return ret; | 798 | return ret; |
| 771 | } | 799 | } |
| 800 | } else { | ||
| 801 | clk_disable(ctx->lcd_clk); | ||
| 802 | clk_disable(ctx->bus_clk); | ||
| 803 | } | ||
| 804 | |||
| 805 | return 0; | ||
| 806 | } | ||
| 807 | |||
| 808 | static int fimd_activate(struct fimd_context *ctx, bool enable) | ||
| 809 | { | ||
| 810 | if (enable) { | ||
| 811 | int ret; | ||
| 812 | struct device *dev = ctx->subdrv.dev; | ||
| 813 | |||
| 814 | ret = fimd_clock(ctx, true); | ||
| 815 | if (ret < 0) | ||
| 816 | return ret; | ||
| 772 | 817 | ||
| 773 | ctx->suspended = false; | 818 | ctx->suspended = false; |
| 774 | 819 | ||
| 775 | /* if vblank was enabled status, enable it again. */ | 820 | /* if vblank was enabled status, enable it again. */ |
| 776 | if (test_and_clear_bit(0, &ctx->irq_flags)) | 821 | if (test_and_clear_bit(0, &ctx->irq_flags)) |
| 777 | fimd_enable_vblank(dev); | 822 | fimd_enable_vblank(dev); |
| 778 | |||
| 779 | fimd_apply(dev); | ||
| 780 | } else { | 823 | } else { |
| 781 | clk_disable(ctx->lcd_clk); | 824 | fimd_clock(ctx, false); |
| 782 | clk_disable(ctx->bus_clk); | ||
| 783 | |||
| 784 | ctx->suspended = true; | 825 | ctx->suspended = true; |
| 785 | } | 826 | } |
| 786 | 827 | ||
| @@ -930,15 +971,15 @@ static int fimd_suspend(struct device *dev) | |||
| 930 | { | 971 | { |
| 931 | struct fimd_context *ctx = get_fimd_context(dev); | 972 | struct fimd_context *ctx = get_fimd_context(dev); |
| 932 | 973 | ||
| 933 | if (pm_runtime_suspended(dev)) | ||
| 934 | return 0; | ||
| 935 | |||
| 936 | /* | 974 | /* |
| 937 | * do not use pm_runtime_suspend(). if pm_runtime_suspend() is | 975 | * do not use pm_runtime_suspend(). if pm_runtime_suspend() is |
| 938 | * called here, an error would be returned by that interface | 976 | * called here, an error would be returned by that interface |
| 939 | * because the usage_count of pm runtime is more than 1. | 977 | * because the usage_count of pm runtime is more than 1. |
| 940 | */ | 978 | */ |
| 941 | return fimd_power_on(ctx, false); | 979 | if (!pm_runtime_suspended(dev)) |
| 980 | return fimd_activate(ctx, false); | ||
| 981 | |||
| 982 | return 0; | ||
| 942 | } | 983 | } |
| 943 | 984 | ||
| 944 | static int fimd_resume(struct device *dev) | 985 | static int fimd_resume(struct device *dev) |
| @@ -950,8 +991,21 @@ static int fimd_resume(struct device *dev) | |||
| 950 | * of pm runtime would still be 1 so in this case, fimd driver | 991 | * of pm runtime would still be 1 so in this case, fimd driver |
| 951 | * should be on directly not drawing on pm runtime interface. | 992 | * should be on directly not drawing on pm runtime interface. |
| 952 | */ | 993 | */ |
| 953 | if (!pm_runtime_suspended(dev)) | 994 | if (pm_runtime_suspended(dev)) { |
| 954 | return fimd_power_on(ctx, true); | 995 | int ret; |
| 996 | |||
| 997 | ret = fimd_activate(ctx, true); | ||
| 998 | if (ret < 0) | ||
| 999 | return ret; | ||
| 1000 | |||
| 1001 | /* | ||
| 1002 | * in case of dpms on(standby), fimd_apply function will | ||
| 1003 | * be called by encoder's dpms callback to update fimd's | ||
| 1004 | * registers but in case of sleep wakeup, it's not. | ||
| 1005 | * so fimd_apply function should be called at here. | ||
| 1006 | */ | ||
| 1007 | fimd_apply(dev); | ||
| 1008 | } | ||
| 955 | 1009 | ||
| 956 | return 0; | 1010 | return 0; |
| 957 | } | 1011 | } |
| @@ -964,7 +1018,7 @@ static int fimd_runtime_suspend(struct device *dev) | |||
| 964 | 1018 | ||
| 965 | DRM_DEBUG_KMS("%s\n", __FILE__); | 1019 | DRM_DEBUG_KMS("%s\n", __FILE__); |
| 966 | 1020 | ||
| 967 | return fimd_power_on(ctx, false); | 1021 | return fimd_activate(ctx, false); |
| 968 | } | 1022 | } |
| 969 | 1023 | ||
| 970 | static int fimd_runtime_resume(struct device *dev) | 1024 | static int fimd_runtime_resume(struct device *dev) |
| @@ -973,10 +1027,22 @@ static int fimd_runtime_resume(struct device *dev) | |||
| 973 | 1027 | ||
| 974 | DRM_DEBUG_KMS("%s\n", __FILE__); | 1028 | DRM_DEBUG_KMS("%s\n", __FILE__); |
| 975 | 1029 | ||
| 976 | return fimd_power_on(ctx, true); | 1030 | return fimd_activate(ctx, true); |
| 977 | } | 1031 | } |
| 978 | #endif | 1032 | #endif |
| 979 | 1033 | ||
| 1034 | static struct platform_device_id fimd_driver_ids[] = { | ||
| 1035 | { | ||
| 1036 | .name = "exynos4-fb", | ||
| 1037 | .driver_data = (unsigned long)&exynos4_fimd_driver_data, | ||
| 1038 | }, { | ||
| 1039 | .name = "exynos5-fb", | ||
| 1040 | .driver_data = (unsigned long)&exynos5_fimd_driver_data, | ||
| 1041 | }, | ||
| 1042 | {}, | ||
| 1043 | }; | ||
| 1044 | MODULE_DEVICE_TABLE(platform, fimd_driver_ids); | ||
| 1045 | |||
| 980 | static const struct dev_pm_ops fimd_pm_ops = { | 1046 | static const struct dev_pm_ops fimd_pm_ops = { |
| 981 | SET_SYSTEM_SLEEP_PM_OPS(fimd_suspend, fimd_resume) | 1047 | SET_SYSTEM_SLEEP_PM_OPS(fimd_suspend, fimd_resume) |
| 982 | SET_RUNTIME_PM_OPS(fimd_runtime_suspend, fimd_runtime_resume, NULL) | 1048 | SET_RUNTIME_PM_OPS(fimd_runtime_suspend, fimd_runtime_resume, NULL) |
| @@ -985,6 +1051,7 @@ static const struct dev_pm_ops fimd_pm_ops = { | |||
| 985 | struct platform_driver fimd_driver = { | 1051 | struct platform_driver fimd_driver = { |
| 986 | .probe = fimd_probe, | 1052 | .probe = fimd_probe, |
| 987 | .remove = __devexit_p(fimd_remove), | 1053 | .remove = __devexit_p(fimd_remove), |
| 1054 | .id_table = fimd_driver_ids, | ||
| 988 | .driver = { | 1055 | .driver = { |
| 989 | .name = "exynos4-fb", | 1056 | .name = "exynos4-fb", |
| 990 | .owner = THIS_MODULE, | 1057 | .owner = THIS_MODULE, |
diff --git a/drivers/gpu/drm/exynos/exynos_drm_g2d.c b/drivers/gpu/drm/exynos/exynos_drm_g2d.c index bc2a2e9be8eb..f7aab24ea46c 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_g2d.c +++ b/drivers/gpu/drm/exynos/exynos_drm_g2d.c | |||
| @@ -122,6 +122,7 @@ struct g2d_runqueue_node { | |||
| 122 | struct list_head list; | 122 | struct list_head list; |
| 123 | struct list_head run_cmdlist; | 123 | struct list_head run_cmdlist; |
| 124 | struct list_head event_list; | 124 | struct list_head event_list; |
| 125 | pid_t pid; | ||
| 125 | struct completion complete; | 126 | struct completion complete; |
| 126 | int async; | 127 | int async; |
| 127 | }; | 128 | }; |
| @@ -164,8 +165,7 @@ static int g2d_init_cmdlist(struct g2d_data *g2d) | |||
| 164 | return -ENOMEM; | 165 | return -ENOMEM; |
| 165 | } | 166 | } |
| 166 | 167 | ||
| 167 | node = kcalloc(G2D_CMDLIST_NUM, G2D_CMDLIST_NUM * sizeof(*node), | 168 | node = kcalloc(G2D_CMDLIST_NUM, sizeof(*node), GFP_KERNEL); |
| 168 | GFP_KERNEL); | ||
| 169 | if (!node) { | 169 | if (!node) { |
| 170 | dev_err(dev, "failed to allocate memory\n"); | 170 | dev_err(dev, "failed to allocate memory\n"); |
| 171 | ret = -ENOMEM; | 171 | ret = -ENOMEM; |
| @@ -679,6 +679,7 @@ int exynos_g2d_exec_ioctl(struct drm_device *drm_dev, void *data, | |||
| 679 | } | 679 | } |
| 680 | 680 | ||
| 681 | mutex_lock(&g2d->runqueue_mutex); | 681 | mutex_lock(&g2d->runqueue_mutex); |
| 682 | runqueue_node->pid = current->pid; | ||
| 682 | list_add_tail(&runqueue_node->list, &g2d->runqueue); | 683 | list_add_tail(&runqueue_node->list, &g2d->runqueue); |
| 683 | if (!g2d->runqueue_node) | 684 | if (!g2d->runqueue_node) |
| 684 | g2d_exec_runqueue(g2d); | 685 | g2d_exec_runqueue(g2d); |
diff --git a/drivers/gpu/drm/exynos/exynos_drm_hdmi.c b/drivers/gpu/drm/exynos/exynos_drm_hdmi.c index c3d3a5e4f109..c3b9e2b45185 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_hdmi.c +++ b/drivers/gpu/drm/exynos/exynos_drm_hdmi.c | |||
| @@ -29,6 +29,11 @@ | |||
| 29 | #define get_ctx_from_subdrv(subdrv) container_of(subdrv,\ | 29 | #define get_ctx_from_subdrv(subdrv) container_of(subdrv,\ |
| 30 | struct drm_hdmi_context, subdrv); | 30 | struct drm_hdmi_context, subdrv); |
| 31 | 31 | ||
| 32 | /* Common hdmi subdrv needs to access the hdmi and mixer though context. | ||
| 33 | * These should be initialied by the repective drivers */ | ||
| 34 | static struct exynos_drm_hdmi_context *hdmi_ctx; | ||
| 35 | static struct exynos_drm_hdmi_context *mixer_ctx; | ||
| 36 | |||
| 32 | /* these callback points shoud be set by specific drivers. */ | 37 | /* these callback points shoud be set by specific drivers. */ |
| 33 | static struct exynos_hdmi_ops *hdmi_ops; | 38 | static struct exynos_hdmi_ops *hdmi_ops; |
| 34 | static struct exynos_mixer_ops *mixer_ops; | 39 | static struct exynos_mixer_ops *mixer_ops; |
| @@ -41,6 +46,18 @@ struct drm_hdmi_context { | |||
| 41 | bool enabled[MIXER_WIN_NR]; | 46 | bool enabled[MIXER_WIN_NR]; |
| 42 | }; | 47 | }; |
| 43 | 48 | ||
| 49 | void exynos_hdmi_drv_attach(struct exynos_drm_hdmi_context *ctx) | ||
| 50 | { | ||
| 51 | if (ctx) | ||
| 52 | hdmi_ctx = ctx; | ||
| 53 | } | ||
| 54 | |||
| 55 | void exynos_mixer_drv_attach(struct exynos_drm_hdmi_context *ctx) | ||
| 56 | { | ||
| 57 | if (ctx) | ||
| 58 | mixer_ctx = ctx; | ||
| 59 | } | ||
| 60 | |||
| 44 | void exynos_hdmi_ops_register(struct exynos_hdmi_ops *ops) | 61 | void exynos_hdmi_ops_register(struct exynos_hdmi_ops *ops) |
| 45 | { | 62 | { |
| 46 | DRM_DEBUG_KMS("%s\n", __FILE__); | 63 | DRM_DEBUG_KMS("%s\n", __FILE__); |
| @@ -274,10 +291,21 @@ static void drm_mixer_disable(struct device *subdrv_dev, int zpos) | |||
| 274 | ctx->enabled[win] = false; | 291 | ctx->enabled[win] = false; |
| 275 | } | 292 | } |
| 276 | 293 | ||
| 294 | static void drm_mixer_wait_for_vblank(struct device *subdrv_dev) | ||
| 295 | { | ||
| 296 | struct drm_hdmi_context *ctx = to_context(subdrv_dev); | ||
| 297 | |||
| 298 | DRM_DEBUG_KMS("%s\n", __FILE__); | ||
| 299 | |||
| 300 | if (mixer_ops && mixer_ops->wait_for_vblank) | ||
| 301 | mixer_ops->wait_for_vblank(ctx->mixer_ctx->ctx); | ||
| 302 | } | ||
| 303 | |||
| 277 | static struct exynos_drm_overlay_ops drm_hdmi_overlay_ops = { | 304 | static struct exynos_drm_overlay_ops drm_hdmi_overlay_ops = { |
| 278 | .mode_set = drm_mixer_mode_set, | 305 | .mode_set = drm_mixer_mode_set, |
| 279 | .commit = drm_mixer_commit, | 306 | .commit = drm_mixer_commit, |
| 280 | .disable = drm_mixer_disable, | 307 | .disable = drm_mixer_disable, |
| 308 | .wait_for_vblank = drm_mixer_wait_for_vblank, | ||
| 281 | }; | 309 | }; |
| 282 | 310 | ||
| 283 | static struct exynos_drm_manager hdmi_manager = { | 311 | static struct exynos_drm_manager hdmi_manager = { |
| @@ -292,46 +320,30 @@ static int hdmi_subdrv_probe(struct drm_device *drm_dev, | |||
| 292 | { | 320 | { |
| 293 | struct exynos_drm_subdrv *subdrv = to_subdrv(dev); | 321 | struct exynos_drm_subdrv *subdrv = to_subdrv(dev); |
| 294 | struct drm_hdmi_context *ctx; | 322 | struct drm_hdmi_context *ctx; |
| 295 | struct platform_device *pdev = to_platform_device(dev); | ||
| 296 | struct exynos_drm_common_hdmi_pd *pd; | ||
| 297 | 323 | ||
| 298 | DRM_DEBUG_KMS("%s\n", __FILE__); | 324 | DRM_DEBUG_KMS("%s\n", __FILE__); |
| 299 | 325 | ||
| 300 | pd = pdev->dev.platform_data; | 326 | if (!hdmi_ctx) { |
| 301 | 327 | DRM_ERROR("hdmi context not initialized.\n"); | |
| 302 | if (!pd) { | ||
| 303 | DRM_DEBUG_KMS("platform data is null.\n"); | ||
| 304 | return -EFAULT; | ||
| 305 | } | ||
| 306 | |||
| 307 | if (!pd->hdmi_dev) { | ||
| 308 | DRM_DEBUG_KMS("hdmi device is null.\n"); | ||
| 309 | return -EFAULT; | 328 | return -EFAULT; |
| 310 | } | 329 | } |
| 311 | 330 | ||
| 312 | if (!pd->mixer_dev) { | 331 | if (!mixer_ctx) { |
| 313 | DRM_DEBUG_KMS("mixer device is null.\n"); | 332 | DRM_ERROR("mixer context not initialized.\n"); |
| 314 | return -EFAULT; | 333 | return -EFAULT; |
| 315 | } | 334 | } |
| 316 | 335 | ||
| 317 | ctx = get_ctx_from_subdrv(subdrv); | 336 | ctx = get_ctx_from_subdrv(subdrv); |
| 318 | 337 | ||
| 319 | ctx->hdmi_ctx = (struct exynos_drm_hdmi_context *) | 338 | if (!ctx) { |
| 320 | to_context(pd->hdmi_dev); | 339 | DRM_ERROR("no drm hdmi context.\n"); |
| 321 | if (!ctx->hdmi_ctx) { | ||
| 322 | DRM_DEBUG_KMS("hdmi context is null.\n"); | ||
| 323 | return -EFAULT; | 340 | return -EFAULT; |
| 324 | } | 341 | } |
| 325 | 342 | ||
| 326 | ctx->hdmi_ctx->drm_dev = drm_dev; | 343 | ctx->hdmi_ctx = hdmi_ctx; |
| 327 | 344 | ctx->mixer_ctx = mixer_ctx; | |
| 328 | ctx->mixer_ctx = (struct exynos_drm_hdmi_context *) | ||
| 329 | to_context(pd->mixer_dev); | ||
| 330 | if (!ctx->mixer_ctx) { | ||
| 331 | DRM_DEBUG_KMS("mixer context is null.\n"); | ||
| 332 | return -EFAULT; | ||
| 333 | } | ||
| 334 | 345 | ||
| 346 | ctx->hdmi_ctx->drm_dev = drm_dev; | ||
| 335 | ctx->mixer_ctx->drm_dev = drm_dev; | 347 | ctx->mixer_ctx->drm_dev = drm_dev; |
| 336 | 348 | ||
| 337 | return 0; | 349 | return 0; |
diff --git a/drivers/gpu/drm/exynos/exynos_drm_hdmi.h b/drivers/gpu/drm/exynos/exynos_drm_hdmi.h index a91c42088e42..2da5ffd3a059 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_hdmi.h +++ b/drivers/gpu/drm/exynos/exynos_drm_hdmi.h | |||
| @@ -67,11 +67,14 @@ struct exynos_mixer_ops { | |||
| 67 | void (*dpms)(void *ctx, int mode); | 67 | void (*dpms)(void *ctx, int mode); |
| 68 | 68 | ||
| 69 | /* overlay */ | 69 | /* overlay */ |
| 70 | void (*wait_for_vblank)(void *ctx); | ||
| 70 | void (*win_mode_set)(void *ctx, struct exynos_drm_overlay *overlay); | 71 | void (*win_mode_set)(void *ctx, struct exynos_drm_overlay *overlay); |
| 71 | void (*win_commit)(void *ctx, int zpos); | 72 | void (*win_commit)(void *ctx, int zpos); |
| 72 | void (*win_disable)(void *ctx, int zpos); | 73 | void (*win_disable)(void *ctx, int zpos); |
| 73 | }; | 74 | }; |
| 74 | 75 | ||
| 76 | void exynos_hdmi_drv_attach(struct exynos_drm_hdmi_context *ctx); | ||
| 77 | void exynos_mixer_drv_attach(struct exynos_drm_hdmi_context *ctx); | ||
| 75 | void exynos_hdmi_ops_register(struct exynos_hdmi_ops *ops); | 78 | void exynos_hdmi_ops_register(struct exynos_hdmi_ops *ops); |
| 76 | void exynos_mixer_ops_register(struct exynos_mixer_ops *ops); | 79 | void exynos_mixer_ops_register(struct exynos_mixer_ops *ops); |
| 77 | #endif | 80 | #endif |
diff --git a/drivers/gpu/drm/exynos/exynos_drm_plane.c b/drivers/gpu/drm/exynos/exynos_drm_plane.c index 03b472b43013..60b877a388c2 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_plane.c +++ b/drivers/gpu/drm/exynos/exynos_drm_plane.c | |||
| @@ -32,6 +32,42 @@ static const uint32_t formats[] = { | |||
| 32 | DRM_FORMAT_NV12MT, | 32 | DRM_FORMAT_NV12MT, |
| 33 | }; | 33 | }; |
| 34 | 34 | ||
| 35 | /* | ||
| 36 | * This function is to get X or Y size shown via screen. This needs length and | ||
| 37 | * start position of CRTC. | ||
| 38 | * | ||
| 39 | * <--- length ---> | ||
| 40 | * CRTC ---------------- | ||
| 41 | * ^ start ^ end | ||
| 42 | * | ||
| 43 | * There are six cases from a to b. | ||
| 44 | * | ||
| 45 | * <----- SCREEN -----> | ||
| 46 | * 0 last | ||
| 47 | * ----------|------------------|---------- | ||
| 48 | * CRTCs | ||
| 49 | * a ------- | ||
| 50 | * b ------- | ||
| 51 | * c -------------------------- | ||
| 52 | * d -------- | ||
| 53 | * e ------- | ||
| 54 | * f ------- | ||
| 55 | */ | ||
| 56 | static int exynos_plane_get_size(int start, unsigned length, unsigned last) | ||
| 57 | { | ||
| 58 | int end = start + length; | ||
| 59 | int size = 0; | ||
| 60 | |||
| 61 | if (start <= 0) { | ||
| 62 | if (end > 0) | ||
| 63 | size = min_t(unsigned, end, last); | ||
| 64 | } else if (start <= last) { | ||
| 65 | size = min_t(unsigned, last - start, length); | ||
| 66 | } | ||
| 67 | |||
| 68 | return size; | ||
| 69 | } | ||
| 70 | |||
| 35 | int exynos_plane_mode_set(struct drm_plane *plane, struct drm_crtc *crtc, | 71 | int exynos_plane_mode_set(struct drm_plane *plane, struct drm_crtc *crtc, |
| 36 | struct drm_framebuffer *fb, int crtc_x, int crtc_y, | 72 | struct drm_framebuffer *fb, int crtc_x, int crtc_y, |
| 37 | unsigned int crtc_w, unsigned int crtc_h, | 73 | unsigned int crtc_w, unsigned int crtc_h, |
| @@ -47,7 +83,7 @@ int exynos_plane_mode_set(struct drm_plane *plane, struct drm_crtc *crtc, | |||
| 47 | 83 | ||
| 48 | DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__); | 84 | DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__); |
| 49 | 85 | ||
| 50 | nr = exynos_drm_format_num_buffers(fb->pixel_format); | 86 | nr = exynos_drm_fb_get_buf_cnt(fb); |
| 51 | for (i = 0; i < nr; i++) { | 87 | for (i = 0; i < nr; i++) { |
| 52 | struct exynos_drm_gem_buf *buffer = exynos_drm_fb_buffer(fb, i); | 88 | struct exynos_drm_gem_buf *buffer = exynos_drm_fb_buffer(fb, i); |
| 53 | 89 | ||
| @@ -64,8 +100,24 @@ int exynos_plane_mode_set(struct drm_plane *plane, struct drm_crtc *crtc, | |||
| 64 | (unsigned long)overlay->dma_addr[i]); | 100 | (unsigned long)overlay->dma_addr[i]); |
| 65 | } | 101 | } |
| 66 | 102 | ||
| 67 | actual_w = min((unsigned)(crtc->mode.hdisplay - crtc_x), crtc_w); | 103 | actual_w = exynos_plane_get_size(crtc_x, crtc_w, crtc->mode.hdisplay); |
| 68 | actual_h = min((unsigned)(crtc->mode.vdisplay - crtc_y), crtc_h); | 104 | actual_h = exynos_plane_get_size(crtc_y, crtc_h, crtc->mode.vdisplay); |
| 105 | |||
| 106 | if (crtc_x < 0) { | ||
| 107 | if (actual_w) | ||
| 108 | src_x -= crtc_x; | ||
| 109 | else | ||
| 110 | src_x += crtc_w; | ||
| 111 | crtc_x = 0; | ||
| 112 | } | ||
| 113 | |||
| 114 | if (crtc_y < 0) { | ||
| 115 | if (actual_h) | ||
| 116 | src_y -= crtc_y; | ||
| 117 | else | ||
| 118 | src_y += crtc_h; | ||
| 119 | crtc_y = 0; | ||
| 120 | } | ||
| 69 | 121 | ||
| 70 | /* set drm framebuffer data. */ | 122 | /* set drm framebuffer data. */ |
| 71 | overlay->fb_x = src_x; | 123 | overlay->fb_x = src_x; |
diff --git a/drivers/gpu/drm/exynos/exynos_drm_vidi.c b/drivers/gpu/drm/exynos/exynos_drm_vidi.c index 8fe431ae537b..e4b8a8f741f7 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_vidi.c +++ b/drivers/gpu/drm/exynos/exynos_drm_vidi.c | |||
| @@ -56,6 +56,7 @@ struct vidi_context { | |||
| 56 | unsigned int connected; | 56 | unsigned int connected; |
| 57 | bool vblank_on; | 57 | bool vblank_on; |
| 58 | bool suspended; | 58 | bool suspended; |
| 59 | bool direct_vblank; | ||
| 59 | struct work_struct work; | 60 | struct work_struct work; |
| 60 | struct mutex lock; | 61 | struct mutex lock; |
| 61 | }; | 62 | }; |
| @@ -224,6 +225,15 @@ static int vidi_enable_vblank(struct device *dev) | |||
| 224 | if (!test_and_set_bit(0, &ctx->irq_flags)) | 225 | if (!test_and_set_bit(0, &ctx->irq_flags)) |
| 225 | ctx->vblank_on = true; | 226 | ctx->vblank_on = true; |
| 226 | 227 | ||
| 228 | ctx->direct_vblank = true; | ||
| 229 | |||
| 230 | /* | ||
| 231 | * in case of page flip request, vidi_finish_pageflip function | ||
| 232 | * will not be called because direct_vblank is true and then | ||
| 233 | * that function will be called by overlay_ops->commit callback | ||
| 234 | */ | ||
| 235 | schedule_work(&ctx->work); | ||
| 236 | |||
| 227 | return 0; | 237 | return 0; |
| 228 | } | 238 | } |
| 229 | 239 | ||
| @@ -425,7 +435,17 @@ static void vidi_fake_vblank_handler(struct work_struct *work) | |||
| 425 | /* refresh rate is about 50Hz. */ | 435 | /* refresh rate is about 50Hz. */ |
| 426 | usleep_range(16000, 20000); | 436 | usleep_range(16000, 20000); |
| 427 | 437 | ||
| 428 | drm_handle_vblank(subdrv->drm_dev, manager->pipe); | 438 | mutex_lock(&ctx->lock); |
| 439 | |||
| 440 | if (ctx->direct_vblank) { | ||
| 441 | drm_handle_vblank(subdrv->drm_dev, manager->pipe); | ||
| 442 | ctx->direct_vblank = false; | ||
| 443 | mutex_unlock(&ctx->lock); | ||
| 444 | return; | ||
| 445 | } | ||
| 446 | |||
| 447 | mutex_unlock(&ctx->lock); | ||
| 448 | |||
| 429 | vidi_finish_pageflip(subdrv->drm_dev, manager->pipe); | 449 | vidi_finish_pageflip(subdrv->drm_dev, manager->pipe); |
| 430 | } | 450 | } |
| 431 | 451 | ||
| @@ -453,7 +473,7 @@ static int vidi_subdrv_probe(struct drm_device *drm_dev, struct device *dev) | |||
| 453 | return 0; | 473 | return 0; |
| 454 | } | 474 | } |
| 455 | 475 | ||
| 456 | static void vidi_subdrv_remove(struct drm_device *drm_dev) | 476 | static void vidi_subdrv_remove(struct drm_device *drm_dev, struct device *dev) |
| 457 | { | 477 | { |
| 458 | DRM_DEBUG_KMS("%s\n", __FILE__); | 478 | DRM_DEBUG_KMS("%s\n", __FILE__); |
| 459 | 479 | ||
diff --git a/drivers/gpu/drm/exynos/exynos_hdmi.c b/drivers/gpu/drm/exynos/exynos_hdmi.c index e1c53956aa27..2c115f8a62a3 100644 --- a/drivers/gpu/drm/exynos/exynos_hdmi.c +++ b/drivers/gpu/drm/exynos/exynos_hdmi.c | |||
| @@ -32,6 +32,9 @@ | |||
| 32 | #include <linux/pm_runtime.h> | 32 | #include <linux/pm_runtime.h> |
| 33 | #include <linux/clk.h> | 33 | #include <linux/clk.h> |
| 34 | #include <linux/regulator/consumer.h> | 34 | #include <linux/regulator/consumer.h> |
| 35 | #include <linux/io.h> | ||
| 36 | #include <linux/of_gpio.h> | ||
| 37 | #include <plat/gpio-cfg.h> | ||
| 35 | 38 | ||
| 36 | #include <drm/exynos_drm.h> | 39 | #include <drm/exynos_drm.h> |
| 37 | 40 | ||
| @@ -40,10 +43,18 @@ | |||
| 40 | 43 | ||
| 41 | #include "exynos_hdmi.h" | 44 | #include "exynos_hdmi.h" |
| 42 | 45 | ||
| 46 | #include <linux/gpio.h> | ||
| 47 | #include <media/s5p_hdmi.h> | ||
| 48 | |||
| 43 | #define MAX_WIDTH 1920 | 49 | #define MAX_WIDTH 1920 |
| 44 | #define MAX_HEIGHT 1080 | 50 | #define MAX_HEIGHT 1080 |
| 45 | #define get_hdmi_context(dev) platform_get_drvdata(to_platform_device(dev)) | 51 | #define get_hdmi_context(dev) platform_get_drvdata(to_platform_device(dev)) |
| 46 | 52 | ||
| 53 | enum hdmi_type { | ||
| 54 | HDMI_TYPE13, | ||
| 55 | HDMI_TYPE14, | ||
| 56 | }; | ||
| 57 | |||
| 47 | struct hdmi_resources { | 58 | struct hdmi_resources { |
| 48 | struct clk *hdmi; | 59 | struct clk *hdmi; |
| 49 | struct clk *sclk_hdmi; | 60 | struct clk *sclk_hdmi; |
| @@ -59,13 +70,12 @@ struct hdmi_context { | |||
| 59 | struct drm_device *drm_dev; | 70 | struct drm_device *drm_dev; |
| 60 | bool hpd; | 71 | bool hpd; |
| 61 | bool powered; | 72 | bool powered; |
| 62 | bool is_v13; | ||
| 63 | bool dvi_mode; | 73 | bool dvi_mode; |
| 64 | struct mutex hdmi_mutex; | 74 | struct mutex hdmi_mutex; |
| 65 | 75 | ||
| 66 | void __iomem *regs; | 76 | void __iomem *regs; |
| 67 | unsigned int external_irq; | 77 | int external_irq; |
| 68 | unsigned int internal_irq; | 78 | int internal_irq; |
| 69 | 79 | ||
| 70 | struct i2c_client *ddc_port; | 80 | struct i2c_client *ddc_port; |
| 71 | struct i2c_client *hdmiphy_port; | 81 | struct i2c_client *hdmiphy_port; |
| @@ -76,8 +86,9 @@ struct hdmi_context { | |||
| 76 | struct hdmi_resources res; | 86 | struct hdmi_resources res; |
| 77 | void *parent_ctx; | 87 | void *parent_ctx; |
| 78 | 88 | ||
| 79 | void (*cfg_hpd)(bool external); | 89 | int hpd_gpio; |
| 80 | int (*get_hpd)(void); | 90 | |
| 91 | enum hdmi_type type; | ||
| 81 | }; | 92 | }; |
| 82 | 93 | ||
| 83 | /* HDMI Version 1.3 */ | 94 | /* HDMI Version 1.3 */ |
| @@ -1209,7 +1220,7 @@ static void hdmi_v14_regs_dump(struct hdmi_context *hdata, char *prefix) | |||
| 1209 | 1220 | ||
| 1210 | static void hdmi_regs_dump(struct hdmi_context *hdata, char *prefix) | 1221 | static void hdmi_regs_dump(struct hdmi_context *hdata, char *prefix) |
| 1211 | { | 1222 | { |
| 1212 | if (hdata->is_v13) | 1223 | if (hdata->type == HDMI_TYPE13) |
| 1213 | hdmi_v13_regs_dump(hdata, prefix); | 1224 | hdmi_v13_regs_dump(hdata, prefix); |
| 1214 | else | 1225 | else |
| 1215 | hdmi_v14_regs_dump(hdata, prefix); | 1226 | hdmi_v14_regs_dump(hdata, prefix); |
| @@ -1250,7 +1261,7 @@ static int hdmi_v14_conf_index(struct drm_display_mode *mode) | |||
| 1250 | static int hdmi_conf_index(struct hdmi_context *hdata, | 1261 | static int hdmi_conf_index(struct hdmi_context *hdata, |
| 1251 | struct drm_display_mode *mode) | 1262 | struct drm_display_mode *mode) |
| 1252 | { | 1263 | { |
| 1253 | if (hdata->is_v13) | 1264 | if (hdata->type == HDMI_TYPE13) |
| 1254 | return hdmi_v13_conf_index(mode); | 1265 | return hdmi_v13_conf_index(mode); |
| 1255 | 1266 | ||
| 1256 | return hdmi_v14_conf_index(mode); | 1267 | return hdmi_v14_conf_index(mode); |
| @@ -1346,7 +1357,7 @@ static int hdmi_check_timing(void *ctx, void *timing) | |||
| 1346 | check_timing->yres, check_timing->refresh, | 1357 | check_timing->yres, check_timing->refresh, |
| 1347 | check_timing->vmode); | 1358 | check_timing->vmode); |
| 1348 | 1359 | ||
| 1349 | if (hdata->is_v13) | 1360 | if (hdata->type == HDMI_TYPE13) |
| 1350 | return hdmi_v13_check_timing(check_timing); | 1361 | return hdmi_v13_check_timing(check_timing); |
| 1351 | else | 1362 | else |
| 1352 | return hdmi_v14_check_timing(check_timing); | 1363 | return hdmi_v14_check_timing(check_timing); |
| @@ -1412,7 +1423,7 @@ static void hdmi_reg_acr(struct hdmi_context *hdata, u8 *acr) | |||
| 1412 | hdmi_reg_writeb(hdata, HDMI_ACR_CTS1, acr[2]); | 1423 | hdmi_reg_writeb(hdata, HDMI_ACR_CTS1, acr[2]); |
| 1413 | hdmi_reg_writeb(hdata, HDMI_ACR_CTS2, acr[1]); | 1424 | hdmi_reg_writeb(hdata, HDMI_ACR_CTS2, acr[1]); |
| 1414 | 1425 | ||
| 1415 | if (hdata->is_v13) | 1426 | if (hdata->type == HDMI_TYPE13) |
| 1416 | hdmi_reg_writeb(hdata, HDMI_V13_ACR_CON, 4); | 1427 | hdmi_reg_writeb(hdata, HDMI_V13_ACR_CON, 4); |
| 1417 | else | 1428 | else |
| 1418 | hdmi_reg_writeb(hdata, HDMI_ACR_CON, 4); | 1429 | hdmi_reg_writeb(hdata, HDMI_ACR_CON, 4); |
| @@ -1516,7 +1527,7 @@ static void hdmi_conf_reset(struct hdmi_context *hdata) | |||
| 1516 | { | 1527 | { |
| 1517 | u32 reg; | 1528 | u32 reg; |
| 1518 | 1529 | ||
| 1519 | if (hdata->is_v13) | 1530 | if (hdata->type == HDMI_TYPE13) |
| 1520 | reg = HDMI_V13_CORE_RSTOUT; | 1531 | reg = HDMI_V13_CORE_RSTOUT; |
| 1521 | else | 1532 | else |
| 1522 | reg = HDMI_CORE_RSTOUT; | 1533 | reg = HDMI_CORE_RSTOUT; |
| @@ -1530,12 +1541,9 @@ static void hdmi_conf_reset(struct hdmi_context *hdata) | |||
| 1530 | 1541 | ||
| 1531 | static void hdmi_conf_init(struct hdmi_context *hdata) | 1542 | static void hdmi_conf_init(struct hdmi_context *hdata) |
| 1532 | { | 1543 | { |
| 1533 | /* enable HPD interrupts */ | 1544 | /* disable HPD interrupts */ |
| 1534 | hdmi_reg_writemask(hdata, HDMI_INTC_CON, 0, HDMI_INTC_EN_GLOBAL | | 1545 | hdmi_reg_writemask(hdata, HDMI_INTC_CON, 0, HDMI_INTC_EN_GLOBAL | |
| 1535 | HDMI_INTC_EN_HPD_PLUG | HDMI_INTC_EN_HPD_UNPLUG); | 1546 | HDMI_INTC_EN_HPD_PLUG | HDMI_INTC_EN_HPD_UNPLUG); |
| 1536 | mdelay(10); | ||
| 1537 | hdmi_reg_writemask(hdata, HDMI_INTC_CON, ~0, HDMI_INTC_EN_GLOBAL | | ||
| 1538 | HDMI_INTC_EN_HPD_PLUG | HDMI_INTC_EN_HPD_UNPLUG); | ||
| 1539 | 1547 | ||
| 1540 | /* choose HDMI mode */ | 1548 | /* choose HDMI mode */ |
| 1541 | hdmi_reg_writemask(hdata, HDMI_MODE_SEL, | 1549 | hdmi_reg_writemask(hdata, HDMI_MODE_SEL, |
| @@ -1551,7 +1559,7 @@ static void hdmi_conf_init(struct hdmi_context *hdata) | |||
| 1551 | HDMI_VID_PREAMBLE_DIS | HDMI_GUARD_BAND_DIS); | 1559 | HDMI_VID_PREAMBLE_DIS | HDMI_GUARD_BAND_DIS); |
| 1552 | } | 1560 | } |
| 1553 | 1561 | ||
| 1554 | if (hdata->is_v13) { | 1562 | if (hdata->type == HDMI_TYPE13) { |
| 1555 | /* choose bluescreen (fecal) color */ | 1563 | /* choose bluescreen (fecal) color */ |
| 1556 | hdmi_reg_writeb(hdata, HDMI_V13_BLUE_SCREEN_0, 0x12); | 1564 | hdmi_reg_writeb(hdata, HDMI_V13_BLUE_SCREEN_0, 0x12); |
| 1557 | hdmi_reg_writeb(hdata, HDMI_V13_BLUE_SCREEN_1, 0x34); | 1565 | hdmi_reg_writeb(hdata, HDMI_V13_BLUE_SCREEN_1, 0x34); |
| @@ -1833,7 +1841,7 @@ static void hdmi_v14_timing_apply(struct hdmi_context *hdata) | |||
| 1833 | 1841 | ||
| 1834 | static void hdmi_timing_apply(struct hdmi_context *hdata) | 1842 | static void hdmi_timing_apply(struct hdmi_context *hdata) |
| 1835 | { | 1843 | { |
| 1836 | if (hdata->is_v13) | 1844 | if (hdata->type == HDMI_TYPE13) |
| 1837 | hdmi_v13_timing_apply(hdata); | 1845 | hdmi_v13_timing_apply(hdata); |
| 1838 | else | 1846 | else |
| 1839 | hdmi_v14_timing_apply(hdata); | 1847 | hdmi_v14_timing_apply(hdata); |
| @@ -1855,7 +1863,7 @@ static void hdmiphy_conf_reset(struct hdmi_context *hdata) | |||
| 1855 | if (hdata->hdmiphy_port) | 1863 | if (hdata->hdmiphy_port) |
| 1856 | i2c_master_send(hdata->hdmiphy_port, buffer, 2); | 1864 | i2c_master_send(hdata->hdmiphy_port, buffer, 2); |
| 1857 | 1865 | ||
| 1858 | if (hdata->is_v13) | 1866 | if (hdata->type == HDMI_TYPE13) |
| 1859 | reg = HDMI_V13_PHY_RSTOUT; | 1867 | reg = HDMI_V13_PHY_RSTOUT; |
| 1860 | else | 1868 | else |
| 1861 | reg = HDMI_PHY_RSTOUT; | 1869 | reg = HDMI_PHY_RSTOUT; |
| @@ -1882,7 +1890,7 @@ static void hdmiphy_conf_apply(struct hdmi_context *hdata) | |||
| 1882 | } | 1890 | } |
| 1883 | 1891 | ||
| 1884 | /* pixel clock */ | 1892 | /* pixel clock */ |
| 1885 | if (hdata->is_v13) | 1893 | if (hdata->type == HDMI_TYPE13) |
| 1886 | hdmiphy_data = hdmi_v13_confs[hdata->cur_conf].hdmiphy_data; | 1894 | hdmiphy_data = hdmi_v13_confs[hdata->cur_conf].hdmiphy_data; |
| 1887 | else | 1895 | else |
| 1888 | hdmiphy_data = hdmi_confs[hdata->cur_conf].hdmiphy_data; | 1896 | hdmiphy_data = hdmi_confs[hdata->cur_conf].hdmiphy_data; |
| @@ -1950,7 +1958,7 @@ static void hdmi_mode_fixup(void *ctx, struct drm_connector *connector, | |||
| 1950 | 1958 | ||
| 1951 | drm_mode_set_crtcinfo(adjusted_mode, 0); | 1959 | drm_mode_set_crtcinfo(adjusted_mode, 0); |
| 1952 | 1960 | ||
| 1953 | if (hdata->is_v13) | 1961 | if (hdata->type == HDMI_TYPE13) |
| 1954 | index = hdmi_v13_conf_index(adjusted_mode); | 1962 | index = hdmi_v13_conf_index(adjusted_mode); |
| 1955 | else | 1963 | else |
| 1956 | index = hdmi_v14_conf_index(adjusted_mode); | 1964 | index = hdmi_v14_conf_index(adjusted_mode); |
| @@ -1964,7 +1972,7 @@ static void hdmi_mode_fixup(void *ctx, struct drm_connector *connector, | |||
| 1964 | * to adjusted_mode. | 1972 | * to adjusted_mode. |
| 1965 | */ | 1973 | */ |
| 1966 | list_for_each_entry(m, &connector->modes, head) { | 1974 | list_for_each_entry(m, &connector->modes, head) { |
| 1967 | if (hdata->is_v13) | 1975 | if (hdata->type == HDMI_TYPE13) |
| 1968 | index = hdmi_v13_conf_index(m); | 1976 | index = hdmi_v13_conf_index(m); |
| 1969 | else | 1977 | else |
| 1970 | index = hdmi_v14_conf_index(m); | 1978 | index = hdmi_v14_conf_index(m); |
| @@ -2024,8 +2032,6 @@ static void hdmi_poweron(struct hdmi_context *hdata) | |||
| 2024 | 2032 | ||
| 2025 | hdata->powered = true; | 2033 | hdata->powered = true; |
| 2026 | 2034 | ||
| 2027 | if (hdata->cfg_hpd) | ||
| 2028 | hdata->cfg_hpd(true); | ||
| 2029 | mutex_unlock(&hdata->hdmi_mutex); | 2035 | mutex_unlock(&hdata->hdmi_mutex); |
| 2030 | 2036 | ||
| 2031 | pm_runtime_get_sync(hdata->dev); | 2037 | pm_runtime_get_sync(hdata->dev); |
| @@ -2061,8 +2067,6 @@ static void hdmi_poweroff(struct hdmi_context *hdata) | |||
| 2061 | pm_runtime_put_sync(hdata->dev); | 2067 | pm_runtime_put_sync(hdata->dev); |
| 2062 | 2068 | ||
| 2063 | mutex_lock(&hdata->hdmi_mutex); | 2069 | mutex_lock(&hdata->hdmi_mutex); |
| 2064 | if (hdata->cfg_hpd) | ||
| 2065 | hdata->cfg_hpd(false); | ||
| 2066 | 2070 | ||
| 2067 | hdata->powered = false; | 2071 | hdata->powered = false; |
| 2068 | 2072 | ||
| @@ -2110,17 +2114,13 @@ static irqreturn_t hdmi_external_irq_thread(int irq, void *arg) | |||
| 2110 | struct exynos_drm_hdmi_context *ctx = arg; | 2114 | struct exynos_drm_hdmi_context *ctx = arg; |
| 2111 | struct hdmi_context *hdata = ctx->ctx; | 2115 | struct hdmi_context *hdata = ctx->ctx; |
| 2112 | 2116 | ||
| 2113 | if (!hdata->get_hpd) | ||
| 2114 | goto out; | ||
| 2115 | |||
| 2116 | mutex_lock(&hdata->hdmi_mutex); | 2117 | mutex_lock(&hdata->hdmi_mutex); |
| 2117 | hdata->hpd = hdata->get_hpd(); | 2118 | hdata->hpd = gpio_get_value(hdata->hpd_gpio); |
| 2118 | mutex_unlock(&hdata->hdmi_mutex); | 2119 | mutex_unlock(&hdata->hdmi_mutex); |
| 2119 | 2120 | ||
| 2120 | if (ctx->drm_dev) | 2121 | if (ctx->drm_dev) |
| 2121 | drm_helper_hpd_irq_event(ctx->drm_dev); | 2122 | drm_helper_hpd_irq_event(ctx->drm_dev); |
| 2122 | 2123 | ||
| 2123 | out: | ||
| 2124 | return IRQ_HANDLED; | 2124 | return IRQ_HANDLED; |
| 2125 | } | 2125 | } |
| 2126 | 2126 | ||
| @@ -2143,18 +2143,9 @@ static irqreturn_t hdmi_internal_irq_thread(int irq, void *arg) | |||
| 2143 | HDMI_INTC_FLAG_HPD_PLUG); | 2143 | HDMI_INTC_FLAG_HPD_PLUG); |
| 2144 | } | 2144 | } |
| 2145 | 2145 | ||
| 2146 | mutex_lock(&hdata->hdmi_mutex); | ||
| 2147 | hdata->hpd = hdmi_reg_read(hdata, HDMI_HPD_STATUS); | ||
| 2148 | if (hdata->powered && hdata->hpd) { | ||
| 2149 | mutex_unlock(&hdata->hdmi_mutex); | ||
| 2150 | goto out; | ||
| 2151 | } | ||
| 2152 | mutex_unlock(&hdata->hdmi_mutex); | ||
| 2153 | |||
| 2154 | if (ctx->drm_dev) | 2146 | if (ctx->drm_dev) |
| 2155 | drm_helper_hpd_irq_event(ctx->drm_dev); | 2147 | drm_helper_hpd_irq_event(ctx->drm_dev); |
| 2156 | 2148 | ||
| 2157 | out: | ||
| 2158 | return IRQ_HANDLED; | 2149 | return IRQ_HANDLED; |
| 2159 | } | 2150 | } |
| 2160 | 2151 | ||
| @@ -2262,18 +2253,89 @@ void hdmi_attach_hdmiphy_client(struct i2c_client *hdmiphy) | |||
| 2262 | hdmi_hdmiphy = hdmiphy; | 2253 | hdmi_hdmiphy = hdmiphy; |
| 2263 | } | 2254 | } |
| 2264 | 2255 | ||
| 2256 | #ifdef CONFIG_OF | ||
| 2257 | static struct s5p_hdmi_platform_data *drm_hdmi_dt_parse_pdata | ||
| 2258 | (struct device *dev) | ||
| 2259 | { | ||
| 2260 | struct device_node *np = dev->of_node; | ||
| 2261 | struct s5p_hdmi_platform_data *pd; | ||
| 2262 | enum of_gpio_flags flags; | ||
| 2263 | u32 value; | ||
| 2264 | |||
| 2265 | pd = devm_kzalloc(dev, sizeof(*pd), GFP_KERNEL); | ||
| 2266 | if (!pd) { | ||
| 2267 | DRM_ERROR("memory allocation for pdata failed\n"); | ||
| 2268 | goto err_data; | ||
| 2269 | } | ||
| 2270 | |||
| 2271 | if (!of_find_property(np, "hpd-gpio", &value)) { | ||
| 2272 | DRM_ERROR("no hpd gpio property found\n"); | ||
| 2273 | goto err_data; | ||
| 2274 | } | ||
| 2275 | |||
| 2276 | pd->hpd_gpio = of_get_named_gpio_flags(np, "hpd-gpio", 0, &flags); | ||
| 2277 | |||
| 2278 | return pd; | ||
| 2279 | |||
| 2280 | err_data: | ||
| 2281 | return NULL; | ||
| 2282 | } | ||
| 2283 | #else | ||
| 2284 | static struct s5p_hdmi_platform_data *drm_hdmi_dt_parse_pdata | ||
| 2285 | (struct device *dev) | ||
| 2286 | { | ||
| 2287 | return NULL; | ||
| 2288 | } | ||
| 2289 | #endif | ||
| 2290 | |||
| 2291 | static struct platform_device_id hdmi_driver_types[] = { | ||
| 2292 | { | ||
| 2293 | .name = "s5pv210-hdmi", | ||
| 2294 | .driver_data = HDMI_TYPE13, | ||
| 2295 | }, { | ||
| 2296 | .name = "exynos4-hdmi", | ||
| 2297 | .driver_data = HDMI_TYPE13, | ||
| 2298 | }, { | ||
| 2299 | .name = "exynos4-hdmi14", | ||
| 2300 | .driver_data = HDMI_TYPE14, | ||
| 2301 | }, { | ||
| 2302 | .name = "exynos5-hdmi", | ||
| 2303 | .driver_data = HDMI_TYPE14, | ||
| 2304 | }, { | ||
| 2305 | /* end node */ | ||
| 2306 | } | ||
| 2307 | }; | ||
| 2308 | |||
| 2309 | static struct of_device_id hdmi_match_types[] = { | ||
| 2310 | { | ||
| 2311 | .compatible = "samsung,exynos5-hdmi", | ||
| 2312 | .data = (void *)HDMI_TYPE14, | ||
| 2313 | }, { | ||
| 2314 | /* end node */ | ||
| 2315 | } | ||
| 2316 | }; | ||
| 2317 | |||
| 2265 | static int __devinit hdmi_probe(struct platform_device *pdev) | 2318 | static int __devinit hdmi_probe(struct platform_device *pdev) |
| 2266 | { | 2319 | { |
| 2267 | struct device *dev = &pdev->dev; | 2320 | struct device *dev = &pdev->dev; |
| 2268 | struct exynos_drm_hdmi_context *drm_hdmi_ctx; | 2321 | struct exynos_drm_hdmi_context *drm_hdmi_ctx; |
| 2269 | struct hdmi_context *hdata; | 2322 | struct hdmi_context *hdata; |
| 2270 | struct exynos_drm_hdmi_pdata *pdata; | 2323 | struct s5p_hdmi_platform_data *pdata; |
| 2271 | struct resource *res; | 2324 | struct resource *res; |
| 2272 | int ret; | 2325 | int ret; |
| 2273 | 2326 | ||
| 2274 | DRM_DEBUG_KMS("[%d]\n", __LINE__); | 2327 | DRM_DEBUG_KMS("[%d]\n", __LINE__); |
| 2275 | 2328 | ||
| 2276 | pdata = pdev->dev.platform_data; | 2329 | if (pdev->dev.of_node) { |
| 2330 | pdata = drm_hdmi_dt_parse_pdata(dev); | ||
| 2331 | if (IS_ERR(pdata)) { | ||
| 2332 | DRM_ERROR("failed to parse dt\n"); | ||
| 2333 | return PTR_ERR(pdata); | ||
| 2334 | } | ||
| 2335 | } else { | ||
| 2336 | pdata = pdev->dev.platform_data; | ||
| 2337 | } | ||
| 2338 | |||
| 2277 | if (!pdata) { | 2339 | if (!pdata) { |
| 2278 | DRM_ERROR("no platform data specified\n"); | 2340 | DRM_ERROR("no platform data specified\n"); |
| 2279 | return -EINVAL; | 2341 | return -EINVAL; |
| @@ -2300,18 +2362,33 @@ static int __devinit hdmi_probe(struct platform_device *pdev) | |||
| 2300 | 2362 | ||
| 2301 | platform_set_drvdata(pdev, drm_hdmi_ctx); | 2363 | platform_set_drvdata(pdev, drm_hdmi_ctx); |
| 2302 | 2364 | ||
| 2303 | hdata->is_v13 = pdata->is_v13; | 2365 | if (dev->of_node) { |
| 2304 | hdata->cfg_hpd = pdata->cfg_hpd; | 2366 | const struct of_device_id *match; |
| 2305 | hdata->get_hpd = pdata->get_hpd; | 2367 | match = of_match_node(of_match_ptr(hdmi_match_types), |
| 2368 | pdev->dev.of_node); | ||
| 2369 | hdata->type = (enum hdmi_type)match->data; | ||
| 2370 | } else { | ||
| 2371 | hdata->type = (enum hdmi_type)platform_get_device_id | ||
| 2372 | (pdev)->driver_data; | ||
| 2373 | } | ||
| 2374 | |||
| 2375 | hdata->hpd_gpio = pdata->hpd_gpio; | ||
| 2306 | hdata->dev = dev; | 2376 | hdata->dev = dev; |
| 2307 | 2377 | ||
| 2308 | ret = hdmi_resources_init(hdata); | 2378 | ret = hdmi_resources_init(hdata); |
| 2379 | |||
| 2309 | if (ret) { | 2380 | if (ret) { |
| 2310 | ret = -EINVAL; | 2381 | ret = -EINVAL; |
| 2382 | DRM_ERROR("hdmi_resources_init failed\n"); | ||
| 2311 | goto err_data; | 2383 | goto err_data; |
| 2312 | } | 2384 | } |
| 2313 | 2385 | ||
| 2314 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 2386 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
| 2387 | if (!res) { | ||
| 2388 | DRM_ERROR("failed to find registers\n"); | ||
| 2389 | ret = -ENOENT; | ||
| 2390 | goto err_resource; | ||
| 2391 | } | ||
| 2315 | 2392 | ||
| 2316 | hdata->regs = devm_request_and_ioremap(&pdev->dev, res); | 2393 | hdata->regs = devm_request_and_ioremap(&pdev->dev, res); |
| 2317 | if (!hdata->regs) { | 2394 | if (!hdata->regs) { |
| @@ -2320,11 +2397,17 @@ static int __devinit hdmi_probe(struct platform_device *pdev) | |||
| 2320 | goto err_resource; | 2397 | goto err_resource; |
| 2321 | } | 2398 | } |
| 2322 | 2399 | ||
| 2400 | ret = gpio_request(hdata->hpd_gpio, "HPD"); | ||
| 2401 | if (ret) { | ||
| 2402 | DRM_ERROR("failed to request HPD gpio\n"); | ||
| 2403 | goto err_resource; | ||
| 2404 | } | ||
| 2405 | |||
| 2323 | /* DDC i2c driver */ | 2406 | /* DDC i2c driver */ |
| 2324 | if (i2c_add_driver(&ddc_driver)) { | 2407 | if (i2c_add_driver(&ddc_driver)) { |
| 2325 | DRM_ERROR("failed to register ddc i2c driver\n"); | 2408 | DRM_ERROR("failed to register ddc i2c driver\n"); |
| 2326 | ret = -ENOENT; | 2409 | ret = -ENOENT; |
| 2327 | goto err_resource; | 2410 | goto err_gpio; |
| 2328 | } | 2411 | } |
| 2329 | 2412 | ||
| 2330 | hdata->ddc_port = hdmi_ddc; | 2413 | hdata->ddc_port = hdmi_ddc; |
| @@ -2338,32 +2421,31 @@ static int __devinit hdmi_probe(struct platform_device *pdev) | |||
| 2338 | 2421 | ||
| 2339 | hdata->hdmiphy_port = hdmi_hdmiphy; | 2422 | hdata->hdmiphy_port = hdmi_hdmiphy; |
| 2340 | 2423 | ||
| 2341 | hdata->external_irq = platform_get_irq_byname(pdev, "external_irq"); | 2424 | hdata->external_irq = gpio_to_irq(hdata->hpd_gpio); |
| 2342 | if (hdata->external_irq < 0) { | 2425 | if (hdata->external_irq < 0) { |
| 2343 | DRM_ERROR("failed to get platform irq\n"); | 2426 | DRM_ERROR("failed to get GPIO external irq\n"); |
| 2344 | ret = hdata->external_irq; | 2427 | ret = hdata->external_irq; |
| 2345 | goto err_hdmiphy; | 2428 | goto err_hdmiphy; |
| 2346 | } | 2429 | } |
| 2347 | 2430 | ||
| 2348 | hdata->internal_irq = platform_get_irq_byname(pdev, "internal_irq"); | 2431 | hdata->internal_irq = platform_get_irq(pdev, 0); |
| 2349 | if (hdata->internal_irq < 0) { | 2432 | if (hdata->internal_irq < 0) { |
| 2350 | DRM_ERROR("failed to get platform internal irq\n"); | 2433 | DRM_ERROR("failed to get platform internal irq\n"); |
| 2351 | ret = hdata->internal_irq; | 2434 | ret = hdata->internal_irq; |
| 2352 | goto err_hdmiphy; | 2435 | goto err_hdmiphy; |
| 2353 | } | 2436 | } |
| 2354 | 2437 | ||
| 2438 | hdata->hpd = gpio_get_value(hdata->hpd_gpio); | ||
| 2439 | |||
| 2355 | ret = request_threaded_irq(hdata->external_irq, NULL, | 2440 | ret = request_threaded_irq(hdata->external_irq, NULL, |
| 2356 | hdmi_external_irq_thread, IRQF_TRIGGER_RISING | | 2441 | hdmi_external_irq_thread, IRQF_TRIGGER_RISING | |
| 2357 | IRQF_TRIGGER_FALLING | IRQF_ONESHOT, | 2442 | IRQF_TRIGGER_FALLING | IRQF_ONESHOT, |
| 2358 | "hdmi_external", drm_hdmi_ctx); | 2443 | "hdmi_external", drm_hdmi_ctx); |
| 2359 | if (ret) { | 2444 | if (ret) { |
| 2360 | DRM_ERROR("failed to register hdmi internal interrupt\n"); | 2445 | DRM_ERROR("failed to register hdmi external interrupt\n"); |
| 2361 | goto err_hdmiphy; | 2446 | goto err_hdmiphy; |
| 2362 | } | 2447 | } |
| 2363 | 2448 | ||
| 2364 | if (hdata->cfg_hpd) | ||
| 2365 | hdata->cfg_hpd(false); | ||
| 2366 | |||
| 2367 | ret = request_threaded_irq(hdata->internal_irq, NULL, | 2449 | ret = request_threaded_irq(hdata->internal_irq, NULL, |
| 2368 | hdmi_internal_irq_thread, IRQF_ONESHOT, | 2450 | hdmi_internal_irq_thread, IRQF_ONESHOT, |
| 2369 | "hdmi_internal", drm_hdmi_ctx); | 2451 | "hdmi_internal", drm_hdmi_ctx); |
| @@ -2372,6 +2454,9 @@ static int __devinit hdmi_probe(struct platform_device *pdev) | |||
| 2372 | goto err_free_irq; | 2454 | goto err_free_irq; |
| 2373 | } | 2455 | } |
| 2374 | 2456 | ||
| 2457 | /* Attach HDMI Driver to common hdmi. */ | ||
| 2458 | exynos_hdmi_drv_attach(drm_hdmi_ctx); | ||
| 2459 | |||
| 2375 | /* register specific callbacks to common hdmi. */ | 2460 | /* register specific callbacks to common hdmi. */ |
| 2376 | exynos_hdmi_ops_register(&hdmi_ops); | 2461 | exynos_hdmi_ops_register(&hdmi_ops); |
| 2377 | 2462 | ||
| @@ -2385,6 +2470,8 @@ err_hdmiphy: | |||
| 2385 | i2c_del_driver(&hdmiphy_driver); | 2470 | i2c_del_driver(&hdmiphy_driver); |
| 2386 | err_ddc: | 2471 | err_ddc: |
| 2387 | i2c_del_driver(&ddc_driver); | 2472 | i2c_del_driver(&ddc_driver); |
| 2473 | err_gpio: | ||
| 2474 | gpio_free(hdata->hpd_gpio); | ||
| 2388 | err_resource: | 2475 | err_resource: |
| 2389 | hdmi_resources_cleanup(hdata); | 2476 | hdmi_resources_cleanup(hdata); |
| 2390 | err_data: | 2477 | err_data: |
| @@ -2402,6 +2489,9 @@ static int __devexit hdmi_remove(struct platform_device *pdev) | |||
| 2402 | pm_runtime_disable(dev); | 2489 | pm_runtime_disable(dev); |
| 2403 | 2490 | ||
| 2404 | free_irq(hdata->internal_irq, hdata); | 2491 | free_irq(hdata->internal_irq, hdata); |
| 2492 | free_irq(hdata->external_irq, hdata); | ||
| 2493 | |||
| 2494 | gpio_free(hdata->hpd_gpio); | ||
| 2405 | 2495 | ||
| 2406 | hdmi_resources_cleanup(hdata); | 2496 | hdmi_resources_cleanup(hdata); |
| 2407 | 2497 | ||
| @@ -2447,9 +2537,11 @@ static SIMPLE_DEV_PM_OPS(hdmi_pm_ops, hdmi_suspend, hdmi_resume); | |||
| 2447 | struct platform_driver hdmi_driver = { | 2537 | struct platform_driver hdmi_driver = { |
| 2448 | .probe = hdmi_probe, | 2538 | .probe = hdmi_probe, |
| 2449 | .remove = __devexit_p(hdmi_remove), | 2539 | .remove = __devexit_p(hdmi_remove), |
| 2540 | .id_table = hdmi_driver_types, | ||
| 2450 | .driver = { | 2541 | .driver = { |
| 2451 | .name = "exynos4-hdmi", | 2542 | .name = "exynos-hdmi", |
| 2452 | .owner = THIS_MODULE, | 2543 | .owner = THIS_MODULE, |
| 2453 | .pm = &hdmi_pm_ops, | 2544 | .pm = &hdmi_pm_ops, |
| 2545 | .of_match_table = hdmi_match_types, | ||
| 2454 | }, | 2546 | }, |
| 2455 | }; | 2547 | }; |
diff --git a/drivers/gpu/drm/exynos/exynos_hdmiphy.c b/drivers/gpu/drm/exynos/exynos_hdmiphy.c index 0a8162b7de3d..27d1720f1bbd 100644 --- a/drivers/gpu/drm/exynos/exynos_hdmiphy.c +++ b/drivers/gpu/drm/exynos/exynos_hdmiphy.c | |||
| @@ -42,13 +42,23 @@ static int hdmiphy_remove(struct i2c_client *client) | |||
| 42 | 42 | ||
| 43 | static const struct i2c_device_id hdmiphy_id[] = { | 43 | static const struct i2c_device_id hdmiphy_id[] = { |
| 44 | { "s5p_hdmiphy", 0 }, | 44 | { "s5p_hdmiphy", 0 }, |
| 45 | { "exynos5-hdmiphy", 0 }, | ||
| 45 | { }, | 46 | { }, |
| 46 | }; | 47 | }; |
| 47 | 48 | ||
| 49 | static struct of_device_id hdmiphy_match_types[] = { | ||
| 50 | { | ||
| 51 | .compatible = "samsung,exynos5-hdmiphy", | ||
| 52 | }, { | ||
| 53 | /* end node */ | ||
| 54 | } | ||
| 55 | }; | ||
| 56 | |||
| 48 | struct i2c_driver hdmiphy_driver = { | 57 | struct i2c_driver hdmiphy_driver = { |
| 49 | .driver = { | 58 | .driver = { |
| 50 | .name = "s5p-hdmiphy", | 59 | .name = "exynos-hdmiphy", |
| 51 | .owner = THIS_MODULE, | 60 | .owner = THIS_MODULE, |
| 61 | .of_match_table = hdmiphy_match_types, | ||
| 52 | }, | 62 | }, |
| 53 | .id_table = hdmiphy_id, | 63 | .id_table = hdmiphy_id, |
| 54 | .probe = hdmiphy_probe, | 64 | .probe = hdmiphy_probe, |
diff --git a/drivers/gpu/drm/exynos/exynos_mixer.c b/drivers/gpu/drm/exynos/exynos_mixer.c index e6098f247a5d..614b2e9ac462 100644 --- a/drivers/gpu/drm/exynos/exynos_mixer.c +++ b/drivers/gpu/drm/exynos/exynos_mixer.c | |||
| @@ -73,16 +73,28 @@ struct mixer_resources { | |||
| 73 | struct clk *sclk_dac; | 73 | struct clk *sclk_dac; |
| 74 | }; | 74 | }; |
| 75 | 75 | ||
| 76 | enum mixer_version_id { | ||
| 77 | MXR_VER_0_0_0_16, | ||
| 78 | MXR_VER_16_0_33_0, | ||
| 79 | }; | ||
| 80 | |||
| 76 | struct mixer_context { | 81 | struct mixer_context { |
| 77 | struct device *dev; | 82 | struct device *dev; |
| 78 | int pipe; | 83 | int pipe; |
| 79 | bool interlace; | 84 | bool interlace; |
| 80 | bool powered; | 85 | bool powered; |
| 86 | bool vp_enabled; | ||
| 81 | u32 int_en; | 87 | u32 int_en; |
| 82 | 88 | ||
| 83 | struct mutex mixer_mutex; | 89 | struct mutex mixer_mutex; |
| 84 | struct mixer_resources mixer_res; | 90 | struct mixer_resources mixer_res; |
| 85 | struct hdmi_win_data win_data[MIXER_WIN_NR]; | 91 | struct hdmi_win_data win_data[MIXER_WIN_NR]; |
| 92 | enum mixer_version_id mxr_ver; | ||
| 93 | }; | ||
| 94 | |||
| 95 | struct mixer_drv_data { | ||
| 96 | enum mixer_version_id version; | ||
| 97 | bool is_vp_enabled; | ||
| 86 | }; | 98 | }; |
| 87 | 99 | ||
| 88 | static const u8 filter_y_horiz_tap8[] = { | 100 | static const u8 filter_y_horiz_tap8[] = { |
| @@ -251,7 +263,8 @@ static void mixer_vsync_set_update(struct mixer_context *ctx, bool enable) | |||
| 251 | mixer_reg_writemask(res, MXR_STATUS, enable ? | 263 | mixer_reg_writemask(res, MXR_STATUS, enable ? |
| 252 | MXR_STATUS_SYNC_ENABLE : 0, MXR_STATUS_SYNC_ENABLE); | 264 | MXR_STATUS_SYNC_ENABLE : 0, MXR_STATUS_SYNC_ENABLE); |
| 253 | 265 | ||
| 254 | vp_reg_write(res, VP_SHADOW_UPDATE, enable ? | 266 | if (ctx->vp_enabled) |
| 267 | vp_reg_write(res, VP_SHADOW_UPDATE, enable ? | ||
| 255 | VP_SHADOW_UPDATE_ENABLE : 0); | 268 | VP_SHADOW_UPDATE_ENABLE : 0); |
| 256 | } | 269 | } |
| 257 | 270 | ||
| @@ -333,8 +346,11 @@ static void mixer_cfg_layer(struct mixer_context *ctx, int win, bool enable) | |||
| 333 | mixer_reg_writemask(res, MXR_CFG, val, MXR_CFG_GRP1_ENABLE); | 346 | mixer_reg_writemask(res, MXR_CFG, val, MXR_CFG_GRP1_ENABLE); |
| 334 | break; | 347 | break; |
| 335 | case 2: | 348 | case 2: |
| 336 | vp_reg_writemask(res, VP_ENABLE, val, VP_ENABLE_ON); | 349 | if (ctx->vp_enabled) { |
| 337 | mixer_reg_writemask(res, MXR_CFG, val, MXR_CFG_VP_ENABLE); | 350 | vp_reg_writemask(res, VP_ENABLE, val, VP_ENABLE_ON); |
| 351 | mixer_reg_writemask(res, MXR_CFG, val, | ||
| 352 | MXR_CFG_VP_ENABLE); | ||
| 353 | } | ||
| 338 | break; | 354 | break; |
| 339 | } | 355 | } |
| 340 | } | 356 | } |
| @@ -465,6 +481,18 @@ static void vp_video_buffer(struct mixer_context *ctx, int win) | |||
| 465 | vp_regs_dump(ctx); | 481 | vp_regs_dump(ctx); |
| 466 | } | 482 | } |
| 467 | 483 | ||
| 484 | static void mixer_layer_update(struct mixer_context *ctx) | ||
| 485 | { | ||
| 486 | struct mixer_resources *res = &ctx->mixer_res; | ||
| 487 | u32 val; | ||
| 488 | |||
| 489 | val = mixer_reg_read(res, MXR_CFG); | ||
| 490 | |||
| 491 | /* allow one update per vsync only */ | ||
| 492 | if (!(val & MXR_CFG_LAYER_UPDATE_COUNT_MASK)) | ||
| 493 | mixer_reg_writemask(res, MXR_CFG, ~0, MXR_CFG_LAYER_UPDATE); | ||
| 494 | } | ||
| 495 | |||
| 468 | static void mixer_graph_buffer(struct mixer_context *ctx, int win) | 496 | static void mixer_graph_buffer(struct mixer_context *ctx, int win) |
| 469 | { | 497 | { |
| 470 | struct mixer_resources *res = &ctx->mixer_res; | 498 | struct mixer_resources *res = &ctx->mixer_res; |
| @@ -545,6 +573,11 @@ static void mixer_graph_buffer(struct mixer_context *ctx, int win) | |||
| 545 | mixer_cfg_scan(ctx, win_data->mode_height); | 573 | mixer_cfg_scan(ctx, win_data->mode_height); |
| 546 | mixer_cfg_rgb_fmt(ctx, win_data->mode_height); | 574 | mixer_cfg_rgb_fmt(ctx, win_data->mode_height); |
| 547 | mixer_cfg_layer(ctx, win, true); | 575 | mixer_cfg_layer(ctx, win, true); |
| 576 | |||
| 577 | /* layer update mandatory for mixer 16.0.33.0 */ | ||
| 578 | if (ctx->mxr_ver == MXR_VER_16_0_33_0) | ||
| 579 | mixer_layer_update(ctx); | ||
| 580 | |||
| 548 | mixer_run(ctx); | 581 | mixer_run(ctx); |
| 549 | 582 | ||
| 550 | mixer_vsync_set_update(ctx, true); | 583 | mixer_vsync_set_update(ctx, true); |
| @@ -592,7 +625,8 @@ static void mixer_win_reset(struct mixer_context *ctx) | |||
| 592 | */ | 625 | */ |
| 593 | val = MXR_LAYER_CFG_GRP1_VAL(3); | 626 | val = MXR_LAYER_CFG_GRP1_VAL(3); |
| 594 | val |= MXR_LAYER_CFG_GRP0_VAL(2); | 627 | val |= MXR_LAYER_CFG_GRP0_VAL(2); |
| 595 | val |= MXR_LAYER_CFG_VP_VAL(1); | 628 | if (ctx->vp_enabled) |
| 629 | val |= MXR_LAYER_CFG_VP_VAL(1); | ||
| 596 | mixer_reg_write(res, MXR_LAYER_CFG, val); | 630 | mixer_reg_write(res, MXR_LAYER_CFG, val); |
| 597 | 631 | ||
| 598 | /* setting background color */ | 632 | /* setting background color */ |
| @@ -615,14 +649,17 @@ static void mixer_win_reset(struct mixer_context *ctx) | |||
| 615 | val = MXR_GRP_CFG_ALPHA_VAL(0); | 649 | val = MXR_GRP_CFG_ALPHA_VAL(0); |
| 616 | mixer_reg_write(res, MXR_VIDEO_CFG, val); | 650 | mixer_reg_write(res, MXR_VIDEO_CFG, val); |
| 617 | 651 | ||
| 618 | /* configuration of Video Processor Registers */ | 652 | if (ctx->vp_enabled) { |
| 619 | vp_win_reset(ctx); | 653 | /* configuration of Video Processor Registers */ |
| 620 | vp_default_filter(res); | 654 | vp_win_reset(ctx); |
| 655 | vp_default_filter(res); | ||
| 656 | } | ||
| 621 | 657 | ||
| 622 | /* disable all layers */ | 658 | /* disable all layers */ |
| 623 | mixer_reg_writemask(res, MXR_CFG, 0, MXR_CFG_GRP0_ENABLE); | 659 | mixer_reg_writemask(res, MXR_CFG, 0, MXR_CFG_GRP0_ENABLE); |
| 624 | mixer_reg_writemask(res, MXR_CFG, 0, MXR_CFG_GRP1_ENABLE); | 660 | mixer_reg_writemask(res, MXR_CFG, 0, MXR_CFG_GRP1_ENABLE); |
| 625 | mixer_reg_writemask(res, MXR_CFG, 0, MXR_CFG_VP_ENABLE); | 661 | if (ctx->vp_enabled) |
| 662 | mixer_reg_writemask(res, MXR_CFG, 0, MXR_CFG_VP_ENABLE); | ||
| 626 | 663 | ||
| 627 | mixer_vsync_set_update(ctx, true); | 664 | mixer_vsync_set_update(ctx, true); |
| 628 | spin_unlock_irqrestore(&res->reg_slock, flags); | 665 | spin_unlock_irqrestore(&res->reg_slock, flags); |
| @@ -645,8 +682,10 @@ static void mixer_poweron(struct mixer_context *ctx) | |||
| 645 | pm_runtime_get_sync(ctx->dev); | 682 | pm_runtime_get_sync(ctx->dev); |
| 646 | 683 | ||
| 647 | clk_enable(res->mixer); | 684 | clk_enable(res->mixer); |
| 648 | clk_enable(res->vp); | 685 | if (ctx->vp_enabled) { |
| 649 | clk_enable(res->sclk_mixer); | 686 | clk_enable(res->vp); |
| 687 | clk_enable(res->sclk_mixer); | ||
| 688 | } | ||
| 650 | 689 | ||
| 651 | mixer_reg_write(res, MXR_INT_EN, ctx->int_en); | 690 | mixer_reg_write(res, MXR_INT_EN, ctx->int_en); |
| 652 | mixer_win_reset(ctx); | 691 | mixer_win_reset(ctx); |
| @@ -666,8 +705,10 @@ static void mixer_poweroff(struct mixer_context *ctx) | |||
| 666 | ctx->int_en = mixer_reg_read(res, MXR_INT_EN); | 705 | ctx->int_en = mixer_reg_read(res, MXR_INT_EN); |
| 667 | 706 | ||
| 668 | clk_disable(res->mixer); | 707 | clk_disable(res->mixer); |
| 669 | clk_disable(res->vp); | 708 | if (ctx->vp_enabled) { |
| 670 | clk_disable(res->sclk_mixer); | 709 | clk_disable(res->vp); |
| 710 | clk_disable(res->sclk_mixer); | ||
| 711 | } | ||
| 671 | 712 | ||
| 672 | pm_runtime_put_sync(ctx->dev); | 713 | pm_runtime_put_sync(ctx->dev); |
| 673 | 714 | ||
| @@ -726,6 +767,18 @@ static void mixer_dpms(void *ctx, int mode) | |||
| 726 | } | 767 | } |
| 727 | } | 768 | } |
| 728 | 769 | ||
| 770 | static void mixer_wait_for_vblank(void *ctx) | ||
| 771 | { | ||
| 772 | struct mixer_context *mixer_ctx = ctx; | ||
| 773 | struct mixer_resources *res = &mixer_ctx->mixer_res; | ||
| 774 | int ret; | ||
| 775 | |||
| 776 | ret = wait_for((mixer_reg_read(res, MXR_INT_STATUS) & | ||
| 777 | MXR_INT_STATUS_VSYNC), 50); | ||
| 778 | if (ret < 0) | ||
| 779 | DRM_DEBUG_KMS("vblank wait timed out.\n"); | ||
| 780 | } | ||
| 781 | |||
| 729 | static void mixer_win_mode_set(void *ctx, | 782 | static void mixer_win_mode_set(void *ctx, |
| 730 | struct exynos_drm_overlay *overlay) | 783 | struct exynos_drm_overlay *overlay) |
| 731 | { | 784 | { |
| @@ -788,7 +841,7 @@ static void mixer_win_commit(void *ctx, int win) | |||
| 788 | 841 | ||
| 789 | DRM_DEBUG_KMS("[%d] %s, win: %d\n", __LINE__, __func__, win); | 842 | DRM_DEBUG_KMS("[%d] %s, win: %d\n", __LINE__, __func__, win); |
| 790 | 843 | ||
| 791 | if (win > 1) | 844 | if (win > 1 && mixer_ctx->vp_enabled) |
| 792 | vp_video_buffer(mixer_ctx, win); | 845 | vp_video_buffer(mixer_ctx, win); |
| 793 | else | 846 | else |
| 794 | mixer_graph_buffer(mixer_ctx, win); | 847 | mixer_graph_buffer(mixer_ctx, win); |
| @@ -818,6 +871,7 @@ static struct exynos_mixer_ops mixer_ops = { | |||
| 818 | .dpms = mixer_dpms, | 871 | .dpms = mixer_dpms, |
| 819 | 872 | ||
| 820 | /* overlay */ | 873 | /* overlay */ |
| 874 | .wait_for_vblank = mixer_wait_for_vblank, | ||
| 821 | .win_mode_set = mixer_win_mode_set, | 875 | .win_mode_set = mixer_win_mode_set, |
| 822 | .win_commit = mixer_win_commit, | 876 | .win_commit = mixer_win_commit, |
| 823 | .win_disable = mixer_win_disable, | 877 | .win_disable = mixer_win_disable, |
| @@ -923,39 +977,20 @@ static int __devinit mixer_resources_init(struct exynos_drm_hdmi_context *ctx, | |||
| 923 | ret = -ENODEV; | 977 | ret = -ENODEV; |
| 924 | goto fail; | 978 | goto fail; |
| 925 | } | 979 | } |
| 926 | mixer_res->vp = clk_get(dev, "vp"); | 980 | |
| 927 | if (IS_ERR_OR_NULL(mixer_res->vp)) { | ||
| 928 | dev_err(dev, "failed to get clock 'vp'\n"); | ||
| 929 | ret = -ENODEV; | ||
| 930 | goto fail; | ||
| 931 | } | ||
| 932 | mixer_res->sclk_mixer = clk_get(dev, "sclk_mixer"); | ||
| 933 | if (IS_ERR_OR_NULL(mixer_res->sclk_mixer)) { | ||
| 934 | dev_err(dev, "failed to get clock 'sclk_mixer'\n"); | ||
| 935 | ret = -ENODEV; | ||
| 936 | goto fail; | ||
| 937 | } | ||
| 938 | mixer_res->sclk_hdmi = clk_get(dev, "sclk_hdmi"); | 981 | mixer_res->sclk_hdmi = clk_get(dev, "sclk_hdmi"); |
| 939 | if (IS_ERR_OR_NULL(mixer_res->sclk_hdmi)) { | 982 | if (IS_ERR_OR_NULL(mixer_res->sclk_hdmi)) { |
| 940 | dev_err(dev, "failed to get clock 'sclk_hdmi'\n"); | 983 | dev_err(dev, "failed to get clock 'sclk_hdmi'\n"); |
| 941 | ret = -ENODEV; | 984 | ret = -ENODEV; |
| 942 | goto fail; | 985 | goto fail; |
| 943 | } | 986 | } |
| 944 | mixer_res->sclk_dac = clk_get(dev, "sclk_dac"); | 987 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
| 945 | if (IS_ERR_OR_NULL(mixer_res->sclk_dac)) { | ||
| 946 | dev_err(dev, "failed to get clock 'sclk_dac'\n"); | ||
| 947 | ret = -ENODEV; | ||
| 948 | goto fail; | ||
| 949 | } | ||
| 950 | res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "mxr"); | ||
| 951 | if (res == NULL) { | 988 | if (res == NULL) { |
| 952 | dev_err(dev, "get memory resource failed.\n"); | 989 | dev_err(dev, "get memory resource failed.\n"); |
| 953 | ret = -ENXIO; | 990 | ret = -ENXIO; |
| 954 | goto fail; | 991 | goto fail; |
| 955 | } | 992 | } |
| 956 | 993 | ||
| 957 | clk_set_parent(mixer_res->sclk_mixer, mixer_res->sclk_hdmi); | ||
| 958 | |||
| 959 | mixer_res->mixer_regs = devm_ioremap(&pdev->dev, res->start, | 994 | mixer_res->mixer_regs = devm_ioremap(&pdev->dev, res->start, |
| 960 | resource_size(res)); | 995 | resource_size(res)); |
| 961 | if (mixer_res->mixer_regs == NULL) { | 996 | if (mixer_res->mixer_regs == NULL) { |
| @@ -964,57 +999,126 @@ static int __devinit mixer_resources_init(struct exynos_drm_hdmi_context *ctx, | |||
| 964 | goto fail; | 999 | goto fail; |
| 965 | } | 1000 | } |
| 966 | 1001 | ||
| 967 | res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "vp"); | 1002 | res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); |
| 968 | if (res == NULL) { | 1003 | if (res == NULL) { |
| 969 | dev_err(dev, "get memory resource failed.\n"); | 1004 | dev_err(dev, "get interrupt resource failed.\n"); |
| 970 | ret = -ENXIO; | 1005 | ret = -ENXIO; |
| 971 | goto fail; | 1006 | goto fail; |
| 972 | } | 1007 | } |
| 973 | 1008 | ||
| 974 | mixer_res->vp_regs = devm_ioremap(&pdev->dev, res->start, | 1009 | ret = devm_request_irq(&pdev->dev, res->start, mixer_irq_handler, |
| 975 | resource_size(res)); | 1010 | 0, "drm_mixer", ctx); |
| 976 | if (mixer_res->vp_regs == NULL) { | 1011 | if (ret) { |
| 977 | dev_err(dev, "register mapping failed.\n"); | 1012 | dev_err(dev, "request interrupt failed.\n"); |
| 978 | ret = -ENXIO; | ||
| 979 | goto fail; | 1013 | goto fail; |
| 980 | } | 1014 | } |
| 1015 | mixer_res->irq = res->start; | ||
| 1016 | |||
| 1017 | return 0; | ||
| 981 | 1018 | ||
| 982 | res = platform_get_resource_byname(pdev, IORESOURCE_IRQ, "irq"); | 1019 | fail: |
| 1020 | if (!IS_ERR_OR_NULL(mixer_res->sclk_hdmi)) | ||
| 1021 | clk_put(mixer_res->sclk_hdmi); | ||
| 1022 | if (!IS_ERR_OR_NULL(mixer_res->mixer)) | ||
| 1023 | clk_put(mixer_res->mixer); | ||
| 1024 | return ret; | ||
| 1025 | } | ||
| 1026 | |||
| 1027 | static int __devinit vp_resources_init(struct exynos_drm_hdmi_context *ctx, | ||
| 1028 | struct platform_device *pdev) | ||
| 1029 | { | ||
| 1030 | struct mixer_context *mixer_ctx = ctx->ctx; | ||
| 1031 | struct device *dev = &pdev->dev; | ||
| 1032 | struct mixer_resources *mixer_res = &mixer_ctx->mixer_res; | ||
| 1033 | struct resource *res; | ||
| 1034 | int ret; | ||
| 1035 | |||
| 1036 | mixer_res->vp = clk_get(dev, "vp"); | ||
| 1037 | if (IS_ERR_OR_NULL(mixer_res->vp)) { | ||
| 1038 | dev_err(dev, "failed to get clock 'vp'\n"); | ||
| 1039 | ret = -ENODEV; | ||
| 1040 | goto fail; | ||
| 1041 | } | ||
| 1042 | mixer_res->sclk_mixer = clk_get(dev, "sclk_mixer"); | ||
| 1043 | if (IS_ERR_OR_NULL(mixer_res->sclk_mixer)) { | ||
| 1044 | dev_err(dev, "failed to get clock 'sclk_mixer'\n"); | ||
| 1045 | ret = -ENODEV; | ||
| 1046 | goto fail; | ||
| 1047 | } | ||
| 1048 | mixer_res->sclk_dac = clk_get(dev, "sclk_dac"); | ||
| 1049 | if (IS_ERR_OR_NULL(mixer_res->sclk_dac)) { | ||
| 1050 | dev_err(dev, "failed to get clock 'sclk_dac'\n"); | ||
| 1051 | ret = -ENODEV; | ||
| 1052 | goto fail; | ||
| 1053 | } | ||
| 1054 | |||
| 1055 | if (mixer_res->sclk_hdmi) | ||
| 1056 | clk_set_parent(mixer_res->sclk_mixer, mixer_res->sclk_hdmi); | ||
| 1057 | |||
| 1058 | res = platform_get_resource(pdev, IORESOURCE_MEM, 1); | ||
| 983 | if (res == NULL) { | 1059 | if (res == NULL) { |
| 984 | dev_err(dev, "get interrupt resource failed.\n"); | 1060 | dev_err(dev, "get memory resource failed.\n"); |
| 985 | ret = -ENXIO; | 1061 | ret = -ENXIO; |
| 986 | goto fail; | 1062 | goto fail; |
| 987 | } | 1063 | } |
| 988 | 1064 | ||
| 989 | ret = devm_request_irq(&pdev->dev, res->start, mixer_irq_handler, | 1065 | mixer_res->vp_regs = devm_ioremap(&pdev->dev, res->start, |
| 990 | 0, "drm_mixer", ctx); | 1066 | resource_size(res)); |
| 991 | if (ret) { | 1067 | if (mixer_res->vp_regs == NULL) { |
| 992 | dev_err(dev, "request interrupt failed.\n"); | 1068 | dev_err(dev, "register mapping failed.\n"); |
| 1069 | ret = -ENXIO; | ||
| 993 | goto fail; | 1070 | goto fail; |
| 994 | } | 1071 | } |
| 995 | mixer_res->irq = res->start; | ||
| 996 | 1072 | ||
| 997 | return 0; | 1073 | return 0; |
| 998 | 1074 | ||
| 999 | fail: | 1075 | fail: |
| 1000 | if (!IS_ERR_OR_NULL(mixer_res->sclk_dac)) | 1076 | if (!IS_ERR_OR_NULL(mixer_res->sclk_dac)) |
| 1001 | clk_put(mixer_res->sclk_dac); | 1077 | clk_put(mixer_res->sclk_dac); |
| 1002 | if (!IS_ERR_OR_NULL(mixer_res->sclk_hdmi)) | ||
| 1003 | clk_put(mixer_res->sclk_hdmi); | ||
| 1004 | if (!IS_ERR_OR_NULL(mixer_res->sclk_mixer)) | 1078 | if (!IS_ERR_OR_NULL(mixer_res->sclk_mixer)) |
| 1005 | clk_put(mixer_res->sclk_mixer); | 1079 | clk_put(mixer_res->sclk_mixer); |
| 1006 | if (!IS_ERR_OR_NULL(mixer_res->vp)) | 1080 | if (!IS_ERR_OR_NULL(mixer_res->vp)) |
| 1007 | clk_put(mixer_res->vp); | 1081 | clk_put(mixer_res->vp); |
| 1008 | if (!IS_ERR_OR_NULL(mixer_res->mixer)) | ||
| 1009 | clk_put(mixer_res->mixer); | ||
| 1010 | return ret; | 1082 | return ret; |
| 1011 | } | 1083 | } |
| 1012 | 1084 | ||
| 1085 | static struct mixer_drv_data exynos5_mxr_drv_data = { | ||
| 1086 | .version = MXR_VER_16_0_33_0, | ||
| 1087 | .is_vp_enabled = 0, | ||
| 1088 | }; | ||
| 1089 | |||
| 1090 | static struct mixer_drv_data exynos4_mxr_drv_data = { | ||
| 1091 | .version = MXR_VER_0_0_0_16, | ||
| 1092 | .is_vp_enabled = 1, | ||
| 1093 | }; | ||
| 1094 | |||
| 1095 | static struct platform_device_id mixer_driver_types[] = { | ||
| 1096 | { | ||
| 1097 | .name = "s5p-mixer", | ||
| 1098 | .driver_data = (unsigned long)&exynos4_mxr_drv_data, | ||
| 1099 | }, { | ||
| 1100 | .name = "exynos5-mixer", | ||
| 1101 | .driver_data = (unsigned long)&exynos5_mxr_drv_data, | ||
| 1102 | }, { | ||
| 1103 | /* end node */ | ||
| 1104 | } | ||
| 1105 | }; | ||
| 1106 | |||
| 1107 | static struct of_device_id mixer_match_types[] = { | ||
| 1108 | { | ||
| 1109 | .compatible = "samsung,exynos5-mixer", | ||
| 1110 | .data = &exynos5_mxr_drv_data, | ||
| 1111 | }, { | ||
| 1112 | /* end node */ | ||
| 1113 | } | ||
| 1114 | }; | ||
| 1115 | |||
| 1013 | static int __devinit mixer_probe(struct platform_device *pdev) | 1116 | static int __devinit mixer_probe(struct platform_device *pdev) |
| 1014 | { | 1117 | { |
| 1015 | struct device *dev = &pdev->dev; | 1118 | struct device *dev = &pdev->dev; |
| 1016 | struct exynos_drm_hdmi_context *drm_hdmi_ctx; | 1119 | struct exynos_drm_hdmi_context *drm_hdmi_ctx; |
| 1017 | struct mixer_context *ctx; | 1120 | struct mixer_context *ctx; |
| 1121 | struct mixer_drv_data *drv; | ||
| 1018 | int ret; | 1122 | int ret; |
| 1019 | 1123 | ||
| 1020 | dev_info(dev, "probe start\n"); | 1124 | dev_info(dev, "probe start\n"); |
| @@ -1034,15 +1138,41 @@ static int __devinit mixer_probe(struct platform_device *pdev) | |||
| 1034 | 1138 | ||
| 1035 | mutex_init(&ctx->mixer_mutex); | 1139 | mutex_init(&ctx->mixer_mutex); |
| 1036 | 1140 | ||
| 1141 | if (dev->of_node) { | ||
| 1142 | const struct of_device_id *match; | ||
| 1143 | match = of_match_node(of_match_ptr(mixer_match_types), | ||
| 1144 | pdev->dev.of_node); | ||
| 1145 | drv = match->data; | ||
| 1146 | } else { | ||
| 1147 | drv = (struct mixer_drv_data *) | ||
| 1148 | platform_get_device_id(pdev)->driver_data; | ||
| 1149 | } | ||
| 1150 | |||
| 1037 | ctx->dev = &pdev->dev; | 1151 | ctx->dev = &pdev->dev; |
| 1038 | drm_hdmi_ctx->ctx = (void *)ctx; | 1152 | drm_hdmi_ctx->ctx = (void *)ctx; |
| 1153 | ctx->vp_enabled = drv->is_vp_enabled; | ||
| 1154 | ctx->mxr_ver = drv->version; | ||
| 1039 | 1155 | ||
| 1040 | platform_set_drvdata(pdev, drm_hdmi_ctx); | 1156 | platform_set_drvdata(pdev, drm_hdmi_ctx); |
| 1041 | 1157 | ||
| 1042 | /* acquire resources: regs, irqs, clocks */ | 1158 | /* acquire resources: regs, irqs, clocks */ |
| 1043 | ret = mixer_resources_init(drm_hdmi_ctx, pdev); | 1159 | ret = mixer_resources_init(drm_hdmi_ctx, pdev); |
| 1044 | if (ret) | 1160 | if (ret) { |
| 1161 | DRM_ERROR("mixer_resources_init failed\n"); | ||
| 1045 | goto fail; | 1162 | goto fail; |
| 1163 | } | ||
| 1164 | |||
| 1165 | if (ctx->vp_enabled) { | ||
| 1166 | /* acquire vp resources: regs, irqs, clocks */ | ||
| 1167 | ret = vp_resources_init(drm_hdmi_ctx, pdev); | ||
| 1168 | if (ret) { | ||
| 1169 | DRM_ERROR("vp_resources_init failed\n"); | ||
| 1170 | goto fail; | ||
| 1171 | } | ||
| 1172 | } | ||
| 1173 | |||
| 1174 | /* attach mixer driver to common hdmi. */ | ||
| 1175 | exynos_mixer_drv_attach(drm_hdmi_ctx); | ||
| 1046 | 1176 | ||
| 1047 | /* register specific callback point to common hdmi. */ | 1177 | /* register specific callback point to common hdmi. */ |
| 1048 | exynos_mixer_ops_register(&mixer_ops); | 1178 | exynos_mixer_ops_register(&mixer_ops); |
| @@ -1082,10 +1212,12 @@ static SIMPLE_DEV_PM_OPS(mixer_pm_ops, mixer_suspend, NULL); | |||
| 1082 | 1212 | ||
| 1083 | struct platform_driver mixer_driver = { | 1213 | struct platform_driver mixer_driver = { |
| 1084 | .driver = { | 1214 | .driver = { |
| 1085 | .name = "s5p-mixer", | 1215 | .name = "exynos-mixer", |
| 1086 | .owner = THIS_MODULE, | 1216 | .owner = THIS_MODULE, |
| 1087 | .pm = &mixer_pm_ops, | 1217 | .pm = &mixer_pm_ops, |
| 1218 | .of_match_table = mixer_match_types, | ||
| 1088 | }, | 1219 | }, |
| 1089 | .probe = mixer_probe, | 1220 | .probe = mixer_probe, |
| 1090 | .remove = __devexit_p(mixer_remove), | 1221 | .remove = __devexit_p(mixer_remove), |
| 1222 | .id_table = mixer_driver_types, | ||
| 1091 | }; | 1223 | }; |
diff --git a/drivers/gpu/drm/exynos/regs-mixer.h b/drivers/gpu/drm/exynos/regs-mixer.h index fd2f4d14cf6d..5d8dbc0301e6 100644 --- a/drivers/gpu/drm/exynos/regs-mixer.h +++ b/drivers/gpu/drm/exynos/regs-mixer.h | |||
| @@ -69,6 +69,7 @@ | |||
| 69 | (((val) << (low_bit)) & MXR_MASK(high_bit, low_bit)) | 69 | (((val) << (low_bit)) & MXR_MASK(high_bit, low_bit)) |
| 70 | 70 | ||
| 71 | /* bits for MXR_STATUS */ | 71 | /* bits for MXR_STATUS */ |
| 72 | #define MXR_STATUS_SOFT_RESET (1 << 8) | ||
| 72 | #define MXR_STATUS_16_BURST (1 << 7) | 73 | #define MXR_STATUS_16_BURST (1 << 7) |
| 73 | #define MXR_STATUS_BURST_MASK (1 << 7) | 74 | #define MXR_STATUS_BURST_MASK (1 << 7) |
| 74 | #define MXR_STATUS_BIG_ENDIAN (1 << 3) | 75 | #define MXR_STATUS_BIG_ENDIAN (1 << 3) |
| @@ -77,6 +78,8 @@ | |||
| 77 | #define MXR_STATUS_REG_RUN (1 << 0) | 78 | #define MXR_STATUS_REG_RUN (1 << 0) |
| 78 | 79 | ||
| 79 | /* bits for MXR_CFG */ | 80 | /* bits for MXR_CFG */ |
| 81 | #define MXR_CFG_LAYER_UPDATE (1 << 31) | ||
| 82 | #define MXR_CFG_LAYER_UPDATE_COUNT_MASK (3 << 29) | ||
| 80 | #define MXR_CFG_RGB601_0_255 (0 << 9) | 83 | #define MXR_CFG_RGB601_0_255 (0 << 9) |
| 81 | #define MXR_CFG_RGB601_16_235 (1 << 9) | 84 | #define MXR_CFG_RGB601_16_235 (1 << 9) |
| 82 | #define MXR_CFG_RGB709_0_255 (2 << 9) | 85 | #define MXR_CFG_RGB709_0_255 (2 << 9) |
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index e957f3740f68..19dbdd7dd564 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c | |||
| @@ -1399,10 +1399,16 @@ out: | |||
| 1399 | case 0: | 1399 | case 0: |
| 1400 | case -ERESTARTSYS: | 1400 | case -ERESTARTSYS: |
| 1401 | case -EINTR: | 1401 | case -EINTR: |
| 1402 | case -EBUSY: | ||
| 1403 | /* | ||
| 1404 | * EBUSY is ok: this just means that another thread | ||
| 1405 | * already did the job. | ||
| 1406 | */ | ||
| 1402 | return VM_FAULT_NOPAGE; | 1407 | return VM_FAULT_NOPAGE; |
| 1403 | case -ENOMEM: | 1408 | case -ENOMEM: |
| 1404 | return VM_FAULT_OOM; | 1409 | return VM_FAULT_OOM; |
| 1405 | default: | 1410 | default: |
| 1411 | WARN_ON_ONCE(ret); | ||
| 1406 | return VM_FAULT_SIGBUS; | 1412 | return VM_FAULT_SIGBUS; |
| 1407 | } | 1413 | } |
| 1408 | } | 1414 | } |
| @@ -3217,10 +3223,6 @@ int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data, | |||
| 3217 | enum i915_cache_level level; | 3223 | enum i915_cache_level level; |
| 3218 | int ret; | 3224 | int ret; |
| 3219 | 3225 | ||
| 3220 | ret = i915_mutex_lock_interruptible(dev); | ||
| 3221 | if (ret) | ||
| 3222 | return ret; | ||
| 3223 | |||
| 3224 | switch (args->caching) { | 3226 | switch (args->caching) { |
| 3225 | case I915_CACHING_NONE: | 3227 | case I915_CACHING_NONE: |
| 3226 | level = I915_CACHE_NONE; | 3228 | level = I915_CACHE_NONE; |
| @@ -3232,6 +3234,10 @@ int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data, | |||
| 3232 | return -EINVAL; | 3234 | return -EINVAL; |
| 3233 | } | 3235 | } |
| 3234 | 3236 | ||
| 3237 | ret = i915_mutex_lock_interruptible(dev); | ||
| 3238 | if (ret) | ||
| 3239 | return ret; | ||
| 3240 | |||
| 3235 | obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle)); | 3241 | obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle)); |
| 3236 | if (&obj->base == NULL) { | 3242 | if (&obj->base == NULL) { |
| 3237 | ret = -ENOENT; | 3243 | ret = -ENOENT; |
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c index 1eb48faf741b..05ed42f203d7 100644 --- a/drivers/gpu/drm/i915/i915_gem_context.c +++ b/drivers/gpu/drm/i915/i915_gem_context.c | |||
| @@ -328,7 +328,7 @@ mi_set_context(struct intel_ring_buffer *ring, | |||
| 328 | * itlb_before_ctx_switch. | 328 | * itlb_before_ctx_switch. |
| 329 | */ | 329 | */ |
| 330 | if (IS_GEN6(ring->dev) && ring->itlb_before_ctx_switch) { | 330 | if (IS_GEN6(ring->dev) && ring->itlb_before_ctx_switch) { |
| 331 | ret = ring->flush(ring, 0, 0); | 331 | ret = ring->flush(ring, I915_GEM_GPU_DOMAINS, 0); |
| 332 | if (ret) | 332 | if (ret) |
| 333 | return ret; | 333 | return ret; |
| 334 | } | 334 | } |
diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c index 3208650a235c..cedbfd7b3dfa 100644 --- a/drivers/gpu/drm/i915/i915_gem_tiling.c +++ b/drivers/gpu/drm/i915/i915_gem_tiling.c | |||
| @@ -91,7 +91,10 @@ i915_gem_detect_bit_6_swizzle(struct drm_device *dev) | |||
| 91 | uint32_t swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN; | 91 | uint32_t swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN; |
| 92 | uint32_t swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN; | 92 | uint32_t swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN; |
| 93 | 93 | ||
| 94 | if (INTEL_INFO(dev)->gen >= 6) { | 94 | if (IS_VALLEYVIEW(dev)) { |
| 95 | swizzle_x = I915_BIT_6_SWIZZLE_NONE; | ||
| 96 | swizzle_y = I915_BIT_6_SWIZZLE_NONE; | ||
| 97 | } else if (INTEL_INFO(dev)->gen >= 6) { | ||
| 95 | uint32_t dimm_c0, dimm_c1; | 98 | uint32_t dimm_c0, dimm_c1; |
| 96 | dimm_c0 = I915_READ(MAD_DIMM_C0); | 99 | dimm_c0 = I915_READ(MAD_DIMM_C0); |
| 97 | dimm_c1 = I915_READ(MAD_DIMM_C1); | 100 | dimm_c1 = I915_READ(MAD_DIMM_C1); |
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 4e9888388c0c..32e1bda865b8 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c | |||
| @@ -697,12 +697,12 @@ static irqreturn_t ivybridge_irq_handler(DRM_IRQ_ARGS) | |||
| 697 | intel_opregion_gse_intr(dev); | 697 | intel_opregion_gse_intr(dev); |
| 698 | 698 | ||
| 699 | for (i = 0; i < 3; i++) { | 699 | for (i = 0; i < 3; i++) { |
| 700 | if (de_iir & (DE_PIPEA_VBLANK_IVB << (5 * i))) | ||
| 701 | drm_handle_vblank(dev, i); | ||
| 700 | if (de_iir & (DE_PLANEA_FLIP_DONE_IVB << (5 * i))) { | 702 | if (de_iir & (DE_PLANEA_FLIP_DONE_IVB << (5 * i))) { |
| 701 | intel_prepare_page_flip(dev, i); | 703 | intel_prepare_page_flip(dev, i); |
| 702 | intel_finish_page_flip_plane(dev, i); | 704 | intel_finish_page_flip_plane(dev, i); |
| 703 | } | 705 | } |
| 704 | if (de_iir & (DE_PIPEA_VBLANK_IVB << (5 * i))) | ||
| 705 | drm_handle_vblank(dev, i); | ||
| 706 | } | 706 | } |
| 707 | 707 | ||
| 708 | /* check event from PCH */ | 708 | /* check event from PCH */ |
| @@ -784,6 +784,12 @@ static irqreturn_t ironlake_irq_handler(DRM_IRQ_ARGS) | |||
| 784 | if (de_iir & DE_GSE) | 784 | if (de_iir & DE_GSE) |
| 785 | intel_opregion_gse_intr(dev); | 785 | intel_opregion_gse_intr(dev); |
| 786 | 786 | ||
| 787 | if (de_iir & DE_PIPEA_VBLANK) | ||
| 788 | drm_handle_vblank(dev, 0); | ||
| 789 | |||
| 790 | if (de_iir & DE_PIPEB_VBLANK) | ||
| 791 | drm_handle_vblank(dev, 1); | ||
| 792 | |||
| 787 | if (de_iir & DE_PLANEA_FLIP_DONE) { | 793 | if (de_iir & DE_PLANEA_FLIP_DONE) { |
| 788 | intel_prepare_page_flip(dev, 0); | 794 | intel_prepare_page_flip(dev, 0); |
| 789 | intel_finish_page_flip_plane(dev, 0); | 795 | intel_finish_page_flip_plane(dev, 0); |
| @@ -794,12 +800,6 @@ static irqreturn_t ironlake_irq_handler(DRM_IRQ_ARGS) | |||
| 794 | intel_finish_page_flip_plane(dev, 1); | 800 | intel_finish_page_flip_plane(dev, 1); |
| 795 | } | 801 | } |
| 796 | 802 | ||
| 797 | if (de_iir & DE_PIPEA_VBLANK) | ||
| 798 | drm_handle_vblank(dev, 0); | ||
| 799 | |||
| 800 | if (de_iir & DE_PIPEB_VBLANK) | ||
| 801 | drm_handle_vblank(dev, 1); | ||
| 802 | |||
| 803 | /* check event from PCH */ | 803 | /* check event from PCH */ |
| 804 | if (de_iir & DE_PCH_EVENT) { | 804 | if (de_iir & DE_PCH_EVENT) { |
| 805 | if (pch_iir & hotplug_mask) | 805 | if (pch_iir & hotplug_mask) |
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 7637824c6a7d..64c1be0a9cfd 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h | |||
| @@ -527,6 +527,9 @@ | |||
| 527 | # define VS_TIMER_DISPATCH (1 << 6) | 527 | # define VS_TIMER_DISPATCH (1 << 6) |
| 528 | # define MI_FLUSH_ENABLE (1 << 12) | 528 | # define MI_FLUSH_ENABLE (1 << 12) |
| 529 | 529 | ||
| 530 | #define GEN6_GT_MODE 0x20d0 | ||
| 531 | #define GEN6_GT_MODE_HI (1 << 9) | ||
| 532 | |||
| 530 | #define GFX_MODE 0x02520 | 533 | #define GFX_MODE 0x02520 |
| 531 | #define GFX_MODE_GEN7 0x0229c | 534 | #define GFX_MODE_GEN7 0x0229c |
| 532 | #define RING_MODE_GEN7(ring) ((ring)->mmio_base+0x29c) | 535 | #define RING_MODE_GEN7(ring) ((ring)->mmio_base+0x29c) |
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index e3c02655d36f..2b6ce9b2674a 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c | |||
| @@ -2806,13 +2806,34 @@ static void ironlake_fdi_disable(struct drm_crtc *crtc) | |||
| 2806 | udelay(100); | 2806 | udelay(100); |
| 2807 | } | 2807 | } |
| 2808 | 2808 | ||
| 2809 | static bool intel_crtc_has_pending_flip(struct drm_crtc *crtc) | ||
| 2810 | { | ||
| 2811 | struct drm_device *dev = crtc->dev; | ||
| 2812 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
| 2813 | unsigned long flags; | ||
| 2814 | bool pending; | ||
| 2815 | |||
| 2816 | if (atomic_read(&dev_priv->mm.wedged)) | ||
| 2817 | return false; | ||
| 2818 | |||
| 2819 | spin_lock_irqsave(&dev->event_lock, flags); | ||
| 2820 | pending = to_intel_crtc(crtc)->unpin_work != NULL; | ||
| 2821 | spin_unlock_irqrestore(&dev->event_lock, flags); | ||
| 2822 | |||
| 2823 | return pending; | ||
| 2824 | } | ||
| 2825 | |||
| 2809 | static void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc) | 2826 | static void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc) |
| 2810 | { | 2827 | { |
| 2811 | struct drm_device *dev = crtc->dev; | 2828 | struct drm_device *dev = crtc->dev; |
| 2829 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
| 2812 | 2830 | ||
| 2813 | if (crtc->fb == NULL) | 2831 | if (crtc->fb == NULL) |
| 2814 | return; | 2832 | return; |
| 2815 | 2833 | ||
| 2834 | wait_event(dev_priv->pending_flip_queue, | ||
| 2835 | !intel_crtc_has_pending_flip(crtc)); | ||
| 2836 | |||
| 2816 | mutex_lock(&dev->struct_mutex); | 2837 | mutex_lock(&dev->struct_mutex); |
| 2817 | intel_finish_fb(crtc->fb); | 2838 | intel_finish_fb(crtc->fb); |
| 2818 | mutex_unlock(&dev->struct_mutex); | 2839 | mutex_unlock(&dev->struct_mutex); |
| @@ -4370,7 +4391,7 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc, | |||
| 4370 | /* default to 8bpc */ | 4391 | /* default to 8bpc */ |
| 4371 | pipeconf &= ~(PIPECONF_BPP_MASK | PIPECONF_DITHER_EN); | 4392 | pipeconf &= ~(PIPECONF_BPP_MASK | PIPECONF_DITHER_EN); |
| 4372 | if (is_dp) { | 4393 | if (is_dp) { |
| 4373 | if (mode->private_flags & INTEL_MODE_DP_FORCE_6BPC) { | 4394 | if (adjusted_mode->private_flags & INTEL_MODE_DP_FORCE_6BPC) { |
| 4374 | pipeconf |= PIPECONF_BPP_6 | | 4395 | pipeconf |= PIPECONF_BPP_6 | |
| 4375 | PIPECONF_DITHER_EN | | 4396 | PIPECONF_DITHER_EN | |
| 4376 | PIPECONF_DITHER_TYPE_SP; | 4397 | PIPECONF_DITHER_TYPE_SP; |
| @@ -4802,7 +4823,8 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc, | |||
| 4802 | target_clock = adjusted_mode->clock; | 4823 | target_clock = adjusted_mode->clock; |
| 4803 | 4824 | ||
| 4804 | /* determine panel color depth */ | 4825 | /* determine panel color depth */ |
| 4805 | dither = intel_choose_pipe_bpp_dither(crtc, fb, &pipe_bpp, mode); | 4826 | dither = intel_choose_pipe_bpp_dither(crtc, fb, &pipe_bpp, |
| 4827 | adjusted_mode); | ||
| 4806 | if (is_lvds && dev_priv->lvds_dither) | 4828 | if (is_lvds && dev_priv->lvds_dither) |
| 4807 | dither = true; | 4829 | dither = true; |
| 4808 | 4830 | ||
| @@ -6159,15 +6181,13 @@ static void do_intel_finish_page_flip(struct drm_device *dev, | |||
| 6159 | struct intel_unpin_work *work; | 6181 | struct intel_unpin_work *work; |
| 6160 | struct drm_i915_gem_object *obj; | 6182 | struct drm_i915_gem_object *obj; |
| 6161 | struct drm_pending_vblank_event *e; | 6183 | struct drm_pending_vblank_event *e; |
| 6162 | struct timeval tnow, tvbl; | 6184 | struct timeval tvbl; |
| 6163 | unsigned long flags; | 6185 | unsigned long flags; |
| 6164 | 6186 | ||
| 6165 | /* Ignore early vblank irqs */ | 6187 | /* Ignore early vblank irqs */ |
| 6166 | if (intel_crtc == NULL) | 6188 | if (intel_crtc == NULL) |
| 6167 | return; | 6189 | return; |
| 6168 | 6190 | ||
| 6169 | do_gettimeofday(&tnow); | ||
| 6170 | |||
| 6171 | spin_lock_irqsave(&dev->event_lock, flags); | 6191 | spin_lock_irqsave(&dev->event_lock, flags); |
| 6172 | work = intel_crtc->unpin_work; | 6192 | work = intel_crtc->unpin_work; |
| 6173 | if (work == NULL || !work->pending) { | 6193 | if (work == NULL || !work->pending) { |
| @@ -6181,25 +6201,6 @@ static void do_intel_finish_page_flip(struct drm_device *dev, | |||
| 6181 | e = work->event; | 6201 | e = work->event; |
| 6182 | e->event.sequence = drm_vblank_count_and_time(dev, intel_crtc->pipe, &tvbl); | 6202 | e->event.sequence = drm_vblank_count_and_time(dev, intel_crtc->pipe, &tvbl); |
| 6183 | 6203 | ||
| 6184 | /* Called before vblank count and timestamps have | ||
| 6185 | * been updated for the vblank interval of flip | ||
| 6186 | * completion? Need to increment vblank count and | ||
| 6187 | * add one videorefresh duration to returned timestamp | ||
| 6188 | * to account for this. We assume this happened if we | ||
| 6189 | * get called over 0.9 frame durations after the last | ||
| 6190 | * timestamped vblank. | ||
| 6191 | * | ||
| 6192 | * This calculation can not be used with vrefresh rates | ||
| 6193 | * below 5Hz (10Hz to be on the safe side) without | ||
| 6194 | * promoting to 64 integers. | ||
| 6195 | */ | ||
| 6196 | if (10 * (timeval_to_ns(&tnow) - timeval_to_ns(&tvbl)) > | ||
| 6197 | 9 * crtc->framedur_ns) { | ||
| 6198 | e->event.sequence++; | ||
| 6199 | tvbl = ns_to_timeval(timeval_to_ns(&tvbl) + | ||
| 6200 | crtc->framedur_ns); | ||
| 6201 | } | ||
| 6202 | |||
| 6203 | e->event.tv_sec = tvbl.tv_sec; | 6204 | e->event.tv_sec = tvbl.tv_sec; |
| 6204 | e->event.tv_usec = tvbl.tv_usec; | 6205 | e->event.tv_usec = tvbl.tv_usec; |
| 6205 | 6206 | ||
| @@ -6216,9 +6217,8 @@ static void do_intel_finish_page_flip(struct drm_device *dev, | |||
| 6216 | 6217 | ||
| 6217 | atomic_clear_mask(1 << intel_crtc->plane, | 6218 | atomic_clear_mask(1 << intel_crtc->plane, |
| 6218 | &obj->pending_flip.counter); | 6219 | &obj->pending_flip.counter); |
| 6219 | if (atomic_read(&obj->pending_flip) == 0) | ||
| 6220 | wake_up(&dev_priv->pending_flip_queue); | ||
| 6221 | 6220 | ||
| 6221 | wake_up(&dev_priv->pending_flip_queue); | ||
| 6222 | schedule_work(&work->work); | 6222 | schedule_work(&work->work); |
| 6223 | 6223 | ||
| 6224 | trace_i915_flip_complete(intel_crtc->plane, work->pending_flip_obj); | 6224 | trace_i915_flip_complete(intel_crtc->plane, work->pending_flip_obj); |
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index 6c8746c030c7..d1e8ddb2d6c0 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c | |||
| @@ -36,6 +36,7 @@ | |||
| 36 | #include <drm/i915_drm.h> | 36 | #include <drm/i915_drm.h> |
| 37 | #include "i915_drv.h" | 37 | #include "i915_drv.h" |
| 38 | 38 | ||
| 39 | #define DP_RECEIVER_CAP_SIZE 0xf | ||
| 39 | #define DP_LINK_STATUS_SIZE 6 | 40 | #define DP_LINK_STATUS_SIZE 6 |
| 40 | #define DP_LINK_CHECK_TIMEOUT (10 * 1000) | 41 | #define DP_LINK_CHECK_TIMEOUT (10 * 1000) |
| 41 | 42 | ||
| @@ -1796,8 +1797,7 @@ intel_dp_start_link_train(struct intel_dp *intel_dp) | |||
| 1796 | if ((intel_dp->train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0) | 1797 | if ((intel_dp->train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0) |
| 1797 | break; | 1798 | break; |
| 1798 | if (i == intel_dp->lane_count && voltage_tries == 5) { | 1799 | if (i == intel_dp->lane_count && voltage_tries == 5) { |
| 1799 | ++loop_tries; | 1800 | if (++loop_tries == 5) { |
| 1800 | if (loop_tries == 5) { | ||
| 1801 | DRM_DEBUG_KMS("too many full retries, give up\n"); | 1801 | DRM_DEBUG_KMS("too many full retries, give up\n"); |
| 1802 | break; | 1802 | break; |
| 1803 | } | 1803 | } |
| @@ -1807,15 +1807,11 @@ intel_dp_start_link_train(struct intel_dp *intel_dp) | |||
| 1807 | } | 1807 | } |
| 1808 | 1808 | ||
| 1809 | /* Check to see if we've tried the same voltage 5 times */ | 1809 | /* Check to see if we've tried the same voltage 5 times */ |
| 1810 | if ((intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) == voltage) { | 1810 | if ((intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) != voltage) { |
| 1811 | ++voltage_tries; | 1811 | voltage = intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK; |
| 1812 | if (voltage_tries == 5) { | ||
| 1813 | DRM_DEBUG_KMS("too many voltage retries, give up\n"); | ||
| 1814 | break; | ||
| 1815 | } | ||
| 1816 | } else | ||
| 1817 | voltage_tries = 0; | 1812 | voltage_tries = 0; |
| 1818 | voltage = intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK; | 1813 | } else |
| 1814 | ++voltage_tries; | ||
| 1819 | 1815 | ||
| 1820 | /* Compute new intel_dp->train_set as requested by target */ | 1816 | /* Compute new intel_dp->train_set as requested by target */ |
| 1821 | intel_get_adjust_train(intel_dp, link_status); | 1817 | intel_get_adjust_train(intel_dp, link_status); |
| @@ -1963,12 +1959,25 @@ static bool | |||
| 1963 | intel_dp_get_dpcd(struct intel_dp *intel_dp) | 1959 | intel_dp_get_dpcd(struct intel_dp *intel_dp) |
| 1964 | { | 1960 | { |
| 1965 | if (intel_dp_aux_native_read_retry(intel_dp, 0x000, intel_dp->dpcd, | 1961 | if (intel_dp_aux_native_read_retry(intel_dp, 0x000, intel_dp->dpcd, |
| 1966 | sizeof(intel_dp->dpcd)) && | 1962 | sizeof(intel_dp->dpcd)) == 0) |
| 1967 | (intel_dp->dpcd[DP_DPCD_REV] != 0)) { | 1963 | return false; /* aux transfer failed */ |
| 1968 | return true; | ||
| 1969 | } | ||
| 1970 | 1964 | ||
| 1971 | return false; | 1965 | if (intel_dp->dpcd[DP_DPCD_REV] == 0) |
| 1966 | return false; /* DPCD not present */ | ||
| 1967 | |||
| 1968 | if (!(intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] & | ||
| 1969 | DP_DWN_STRM_PORT_PRESENT)) | ||
| 1970 | return true; /* native DP sink */ | ||
| 1971 | |||
| 1972 | if (intel_dp->dpcd[DP_DPCD_REV] == 0x10) | ||
| 1973 | return true; /* no per-port downstream info */ | ||
| 1974 | |||
| 1975 | if (intel_dp_aux_native_read_retry(intel_dp, DP_DOWNSTREAM_PORT_0, | ||
| 1976 | intel_dp->downstream_ports, | ||
| 1977 | DP_MAX_DOWNSTREAM_PORTS) == 0) | ||
| 1978 | return false; /* downstream port status fetch failed */ | ||
| 1979 | |||
| 1980 | return true; | ||
| 1972 | } | 1981 | } |
| 1973 | 1982 | ||
| 1974 | static void | 1983 | static void |
| @@ -2068,11 +2077,43 @@ intel_dp_check_link_status(struct intel_dp *intel_dp) | |||
| 2068 | } | 2077 | } |
| 2069 | } | 2078 | } |
| 2070 | 2079 | ||
| 2080 | /* XXX this is probably wrong for multiple downstream ports */ | ||
| 2071 | static enum drm_connector_status | 2081 | static enum drm_connector_status |
| 2072 | intel_dp_detect_dpcd(struct intel_dp *intel_dp) | 2082 | intel_dp_detect_dpcd(struct intel_dp *intel_dp) |
| 2073 | { | 2083 | { |
| 2074 | if (intel_dp_get_dpcd(intel_dp)) | 2084 | uint8_t *dpcd = intel_dp->dpcd; |
| 2085 | bool hpd; | ||
| 2086 | uint8_t type; | ||
| 2087 | |||
| 2088 | if (!intel_dp_get_dpcd(intel_dp)) | ||
| 2089 | return connector_status_disconnected; | ||
| 2090 | |||
| 2091 | /* if there's no downstream port, we're done */ | ||
| 2092 | if (!(dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DWN_STRM_PORT_PRESENT)) | ||
| 2093 | return connector_status_connected; | ||
| 2094 | |||
| 2095 | /* If we're HPD-aware, SINK_COUNT changes dynamically */ | ||
| 2096 | hpd = !!(intel_dp->downstream_ports[0] & DP_DS_PORT_HPD); | ||
| 2097 | if (hpd) { | ||
| 2098 | uint8_t reg; | ||
| 2099 | if (!intel_dp_aux_native_read_retry(intel_dp, DP_SINK_COUNT, | ||
| 2100 | ®, 1)) | ||
| 2101 | return connector_status_unknown; | ||
| 2102 | return DP_GET_SINK_COUNT(reg) ? connector_status_connected | ||
| 2103 | : connector_status_disconnected; | ||
| 2104 | } | ||
| 2105 | |||
| 2106 | /* If no HPD, poke DDC gently */ | ||
| 2107 | if (drm_probe_ddc(&intel_dp->adapter)) | ||
| 2075 | return connector_status_connected; | 2108 | return connector_status_connected; |
| 2109 | |||
| 2110 | /* Well we tried, say unknown for unreliable port types */ | ||
| 2111 | type = intel_dp->downstream_ports[0] & DP_DS_PORT_TYPE_MASK; | ||
| 2112 | if (type == DP_DS_PORT_TYPE_VGA || type == DP_DS_PORT_TYPE_NON_EDID) | ||
| 2113 | return connector_status_unknown; | ||
| 2114 | |||
| 2115 | /* Anything else is out of spec, warn and ignore */ | ||
| 2116 | DRM_DEBUG_KMS("Broken DP branch device, ignoring\n"); | ||
| 2076 | return connector_status_disconnected; | 2117 | return connector_status_disconnected; |
| 2077 | } | 2118 | } |
| 2078 | 2119 | ||
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index 05cc7c372fc5..fe7142502f43 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h | |||
| @@ -332,6 +332,7 @@ struct intel_hdmi { | |||
| 332 | }; | 332 | }; |
| 333 | 333 | ||
| 334 | #define DP_RECEIVER_CAP_SIZE 0xf | 334 | #define DP_RECEIVER_CAP_SIZE 0xf |
| 335 | #define DP_MAX_DOWNSTREAM_PORTS 0x10 | ||
| 335 | #define DP_LINK_CONFIGURATION_SIZE 9 | 336 | #define DP_LINK_CONFIGURATION_SIZE 9 |
| 336 | 337 | ||
| 337 | struct intel_dp { | 338 | struct intel_dp { |
| @@ -346,6 +347,7 @@ struct intel_dp { | |||
| 346 | uint8_t link_bw; | 347 | uint8_t link_bw; |
| 347 | uint8_t lane_count; | 348 | uint8_t lane_count; |
| 348 | uint8_t dpcd[DP_RECEIVER_CAP_SIZE]; | 349 | uint8_t dpcd[DP_RECEIVER_CAP_SIZE]; |
| 350 | uint8_t downstream_ports[DP_MAX_DOWNSTREAM_PORTS]; | ||
| 349 | struct i2c_adapter adapter; | 351 | struct i2c_adapter adapter; |
| 350 | struct i2c_algo_dp_aux_data algo; | 352 | struct i2c_algo_dp_aux_data algo; |
| 351 | bool is_pch_edp; | 353 | bool is_pch_edp; |
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index d69f8f49beb5..b3b4b6cea8b0 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c | |||
| @@ -3474,6 +3474,11 @@ static void gen6_init_clock_gating(struct drm_device *dev) | |||
| 3474 | DISPPLANE_TRICKLE_FEED_DISABLE); | 3474 | DISPPLANE_TRICKLE_FEED_DISABLE); |
| 3475 | intel_flush_display_plane(dev_priv, pipe); | 3475 | intel_flush_display_plane(dev_priv, pipe); |
| 3476 | } | 3476 | } |
| 3477 | |||
| 3478 | /* The default value should be 0x200 according to docs, but the two | ||
| 3479 | * platforms I checked have a 0 for this. (Maybe BIOS overrides?) */ | ||
| 3480 | I915_WRITE(GEN6_GT_MODE, _MASKED_BIT_DISABLE(0xffff)); | ||
| 3481 | I915_WRITE(GEN6_GT_MODE, _MASKED_BIT_ENABLE(GEN6_GT_MODE_HI)); | ||
| 3477 | } | 3482 | } |
| 3478 | 3483 | ||
| 3479 | static void gen7_setup_fixed_func_scheduler(struct drm_i915_private *dev_priv) | 3484 | static void gen7_setup_fixed_func_scheduler(struct drm_i915_private *dev_priv) |
diff --git a/include/drm/Kbuild b/include/drm/Kbuild index 1e38a19d68f6..e69de29bb2d1 100644 --- a/include/drm/Kbuild +++ b/include/drm/Kbuild | |||
| @@ -1,15 +0,0 @@ | |||
| 1 | header-y += drm.h | ||
| 2 | header-y += drm_fourcc.h | ||
| 3 | header-y += drm_mode.h | ||
| 4 | header-y += drm_sarea.h | ||
| 5 | header-y += exynos_drm.h | ||
| 6 | header-y += i810_drm.h | ||
| 7 | header-y += i915_drm.h | ||
| 8 | header-y += mga_drm.h | ||
| 9 | header-y += nouveau_drm.h | ||
| 10 | header-y += r128_drm.h | ||
| 11 | header-y += radeon_drm.h | ||
| 12 | header-y += savage_drm.h | ||
| 13 | header-y += sis_drm.h | ||
| 14 | header-y += via_drm.h | ||
| 15 | header-y += vmwgfx_drm.h | ||
diff --git a/include/drm/drm_crtc.h b/include/drm/drm_crtc.h index 1816bb31273a..3fa18b7e9497 100644 --- a/include/drm/drm_crtc.h +++ b/include/drm/drm_crtc.h | |||
| @@ -878,6 +878,7 @@ extern char *drm_get_tv_subconnector_name(int val); | |||
| 878 | extern char *drm_get_tv_select_name(int val); | 878 | extern char *drm_get_tv_select_name(int val); |
| 879 | extern void drm_fb_release(struct drm_file *file_priv); | 879 | extern void drm_fb_release(struct drm_file *file_priv); |
| 880 | extern int drm_mode_group_init_legacy_group(struct drm_device *dev, struct drm_mode_group *group); | 880 | extern int drm_mode_group_init_legacy_group(struct drm_device *dev, struct drm_mode_group *group); |
| 881 | extern bool drm_probe_ddc(struct i2c_adapter *adapter); | ||
| 881 | extern struct edid *drm_get_edid(struct drm_connector *connector, | 882 | extern struct edid *drm_get_edid(struct drm_connector *connector, |
| 882 | struct i2c_adapter *adapter); | 883 | struct i2c_adapter *adapter); |
| 883 | extern int drm_add_edid_modes(struct drm_connector *connector, struct edid *edid); | 884 | extern int drm_add_edid_modes(struct drm_connector *connector, struct edid *edid); |
diff --git a/include/drm/drm_dp_helper.h b/include/drm/drm_dp_helper.h index 1744b18c06b3..fe061489f91f 100644 --- a/include/drm/drm_dp_helper.h +++ b/include/drm/drm_dp_helper.h | |||
| @@ -26,7 +26,19 @@ | |||
| 26 | #include <linux/types.h> | 26 | #include <linux/types.h> |
| 27 | #include <linux/i2c.h> | 27 | #include <linux/i2c.h> |
| 28 | 28 | ||
| 29 | /* From the VESA DisplayPort spec */ | 29 | /* |
| 30 | * Unless otherwise noted, all values are from the DP 1.1a spec. Note that | ||
| 31 | * DP and DPCD versions are independent. Differences from 1.0 are not noted, | ||
| 32 | * 1.0 devices basically don't exist in the wild. | ||
| 33 | * | ||
| 34 | * Abbreviations, in chronological order: | ||
| 35 | * | ||
| 36 | * eDP: Embedded DisplayPort version 1 | ||
| 37 | * DPI: DisplayPort Interoperability Guideline v1.1a | ||
| 38 | * 1.2: DisplayPort 1.2 | ||
| 39 | * | ||
| 40 | * 1.2 formally includes both eDP and DPI definitions. | ||
| 41 | */ | ||
| 30 | 42 | ||
| 31 | #define AUX_NATIVE_WRITE 0x8 | 43 | #define AUX_NATIVE_WRITE 0x8 |
| 32 | #define AUX_NATIVE_READ 0x9 | 44 | #define AUX_NATIVE_READ 0x9 |
| @@ -53,7 +65,7 @@ | |||
| 53 | 65 | ||
| 54 | #define DP_MAX_LANE_COUNT 0x002 | 66 | #define DP_MAX_LANE_COUNT 0x002 |
| 55 | # define DP_MAX_LANE_COUNT_MASK 0x1f | 67 | # define DP_MAX_LANE_COUNT_MASK 0x1f |
| 56 | # define DP_TPS3_SUPPORTED (1 << 6) | 68 | # define DP_TPS3_SUPPORTED (1 << 6) /* 1.2 */ |
| 57 | # define DP_ENHANCED_FRAME_CAP (1 << 7) | 69 | # define DP_ENHANCED_FRAME_CAP (1 << 7) |
| 58 | 70 | ||
| 59 | #define DP_MAX_DOWNSPREAD 0x003 | 71 | #define DP_MAX_DOWNSPREAD 0x003 |
| @@ -69,19 +81,33 @@ | |||
| 69 | /* 10b = TMDS or HDMI */ | 81 | /* 10b = TMDS or HDMI */ |
| 70 | /* 11b = Other */ | 82 | /* 11b = Other */ |
| 71 | # define DP_FORMAT_CONVERSION (1 << 3) | 83 | # define DP_FORMAT_CONVERSION (1 << 3) |
| 84 | # define DP_DETAILED_CAP_INFO_AVAILABLE (1 << 4) /* DPI */ | ||
| 72 | 85 | ||
| 73 | #define DP_MAIN_LINK_CHANNEL_CODING 0x006 | 86 | #define DP_MAIN_LINK_CHANNEL_CODING 0x006 |
| 74 | 87 | ||
| 75 | #define DP_DOWN_STREAM_PORT_COUNT 0x007 | 88 | #define DP_DOWN_STREAM_PORT_COUNT 0x007 |
| 76 | #define DP_PORT_COUNT_MASK 0x0f | 89 | # define DP_PORT_COUNT_MASK 0x0f |
| 77 | #define DP_OUI_SUPPORT (1 << 7) | 90 | # define DP_MSA_TIMING_PAR_IGNORED (1 << 6) /* eDP */ |
| 78 | 91 | # define DP_OUI_SUPPORT (1 << 7) | |
| 79 | #define DP_EDP_CONFIGURATION_CAP 0x00d | 92 | |
| 80 | #define DP_TRAINING_AUX_RD_INTERVAL 0x00e | 93 | #define DP_I2C_SPEED_CAP 0x00c /* DPI */ |
| 81 | 94 | # define DP_I2C_SPEED_1K 0x01 | |
| 82 | #define DP_PSR_SUPPORT 0x070 | 95 | # define DP_I2C_SPEED_5K 0x02 |
| 96 | # define DP_I2C_SPEED_10K 0x04 | ||
| 97 | # define DP_I2C_SPEED_100K 0x08 | ||
| 98 | # define DP_I2C_SPEED_400K 0x10 | ||
| 99 | # define DP_I2C_SPEED_1M 0x20 | ||
| 100 | |||
| 101 | #define DP_EDP_CONFIGURATION_CAP 0x00d /* XXX 1.2? */ | ||
| 102 | #define DP_TRAINING_AUX_RD_INTERVAL 0x00e /* XXX 1.2? */ | ||
| 103 | |||
| 104 | /* Multiple stream transport */ | ||
| 105 | #define DP_MSTM_CAP 0x021 /* 1.2 */ | ||
| 106 | # define DP_MST_CAP (1 << 0) | ||
| 107 | |||
| 108 | #define DP_PSR_SUPPORT 0x070 /* XXX 1.2? */ | ||
| 83 | # define DP_PSR_IS_SUPPORTED 1 | 109 | # define DP_PSR_IS_SUPPORTED 1 |
| 84 | #define DP_PSR_CAPS 0x071 | 110 | #define DP_PSR_CAPS 0x071 /* XXX 1.2? */ |
| 85 | # define DP_PSR_NO_TRAIN_ON_EXIT 1 | 111 | # define DP_PSR_NO_TRAIN_ON_EXIT 1 |
| 86 | # define DP_PSR_SETUP_TIME_330 (0 << 1) | 112 | # define DP_PSR_SETUP_TIME_330 (0 << 1) |
| 87 | # define DP_PSR_SETUP_TIME_275 (1 << 1) | 113 | # define DP_PSR_SETUP_TIME_275 (1 << 1) |
| @@ -93,11 +119,36 @@ | |||
| 93 | # define DP_PSR_SETUP_TIME_MASK (7 << 1) | 119 | # define DP_PSR_SETUP_TIME_MASK (7 << 1) |
| 94 | # define DP_PSR_SETUP_TIME_SHIFT 1 | 120 | # define DP_PSR_SETUP_TIME_SHIFT 1 |
| 95 | 121 | ||
| 122 | /* | ||
| 123 | * 0x80-0x8f describe downstream port capabilities, but there are two layouts | ||
| 124 | * based on whether DP_DETAILED_CAP_INFO_AVAILABLE was set. If it was not, | ||
| 125 | * each port's descriptor is one byte wide. If it was set, each port's is | ||
| 126 | * four bytes wide, starting with the one byte from the base info. As of | ||
| 127 | * DP interop v1.1a only VGA defines additional detail. | ||
| 128 | */ | ||
| 129 | |||
| 130 | /* offset 0 */ | ||
| 131 | #define DP_DOWNSTREAM_PORT_0 0x80 | ||
| 132 | # define DP_DS_PORT_TYPE_MASK (7 << 0) | ||
| 133 | # define DP_DS_PORT_TYPE_DP 0 | ||
| 134 | # define DP_DS_PORT_TYPE_VGA 1 | ||
| 135 | # define DP_DS_PORT_TYPE_DVI 2 | ||
| 136 | # define DP_DS_PORT_TYPE_HDMI 3 | ||
| 137 | # define DP_DS_PORT_TYPE_NON_EDID 4 | ||
| 138 | # define DP_DS_PORT_HPD (1 << 3) | ||
| 139 | /* offset 1 for VGA is maximum megapixels per second / 8 */ | ||
| 140 | /* offset 2 */ | ||
| 141 | # define DP_DS_VGA_MAX_BPC_MASK (3 << 0) | ||
| 142 | # define DP_DS_VGA_8BPC 0 | ||
| 143 | # define DP_DS_VGA_10BPC 1 | ||
| 144 | # define DP_DS_VGA_12BPC 2 | ||
| 145 | # define DP_DS_VGA_16BPC 3 | ||
| 146 | |||
| 96 | /* link configuration */ | 147 | /* link configuration */ |
| 97 | #define DP_LINK_BW_SET 0x100 | 148 | #define DP_LINK_BW_SET 0x100 |
| 98 | # define DP_LINK_BW_1_62 0x06 | 149 | # define DP_LINK_BW_1_62 0x06 |
| 99 | # define DP_LINK_BW_2_7 0x0a | 150 | # define DP_LINK_BW_2_7 0x0a |
| 100 | # define DP_LINK_BW_5_4 0x14 | 151 | # define DP_LINK_BW_5_4 0x14 /* 1.2 */ |
| 101 | 152 | ||
| 102 | #define DP_LANE_COUNT_SET 0x101 | 153 | #define DP_LANE_COUNT_SET 0x101 |
| 103 | # define DP_LANE_COUNT_MASK 0x0f | 154 | # define DP_LANE_COUNT_MASK 0x0f |
| @@ -107,7 +158,7 @@ | |||
| 107 | # define DP_TRAINING_PATTERN_DISABLE 0 | 158 | # define DP_TRAINING_PATTERN_DISABLE 0 |
| 108 | # define DP_TRAINING_PATTERN_1 1 | 159 | # define DP_TRAINING_PATTERN_1 1 |
| 109 | # define DP_TRAINING_PATTERN_2 2 | 160 | # define DP_TRAINING_PATTERN_2 2 |
| 110 | # define DP_TRAINING_PATTERN_3 3 | 161 | # define DP_TRAINING_PATTERN_3 3 /* 1.2 */ |
| 111 | # define DP_TRAINING_PATTERN_MASK 0x3 | 162 | # define DP_TRAINING_PATTERN_MASK 0x3 |
| 112 | 163 | ||
| 113 | # define DP_LINK_QUAL_PATTERN_DISABLE (0 << 2) | 164 | # define DP_LINK_QUAL_PATTERN_DISABLE (0 << 2) |
| @@ -148,24 +199,38 @@ | |||
| 148 | 199 | ||
| 149 | #define DP_DOWNSPREAD_CTRL 0x107 | 200 | #define DP_DOWNSPREAD_CTRL 0x107 |
| 150 | # define DP_SPREAD_AMP_0_5 (1 << 4) | 201 | # define DP_SPREAD_AMP_0_5 (1 << 4) |
| 202 | # define DP_MSA_TIMING_PAR_IGNORE_EN (1 << 7) /* eDP */ | ||
| 151 | 203 | ||
| 152 | #define DP_MAIN_LINK_CHANNEL_CODING_SET 0x108 | 204 | #define DP_MAIN_LINK_CHANNEL_CODING_SET 0x108 |
| 153 | # define DP_SET_ANSI_8B10B (1 << 0) | 205 | # define DP_SET_ANSI_8B10B (1 << 0) |
| 154 | 206 | ||
| 155 | #define DP_PSR_EN_CFG 0x170 | 207 | #define DP_I2C_SPEED_CONTROL_STATUS 0x109 /* DPI */ |
| 208 | /* bitmask as for DP_I2C_SPEED_CAP */ | ||
| 209 | |||
| 210 | #define DP_EDP_CONFIGURATION_SET 0x10a /* XXX 1.2? */ | ||
| 211 | |||
| 212 | #define DP_MSTM_CTRL 0x111 /* 1.2 */ | ||
| 213 | # define DP_MST_EN (1 << 0) | ||
| 214 | # define DP_UP_REQ_EN (1 << 1) | ||
| 215 | # define DP_UPSTREAM_IS_SRC (1 << 2) | ||
| 216 | |||
| 217 | #define DP_PSR_EN_CFG 0x170 /* XXX 1.2? */ | ||
| 156 | # define DP_PSR_ENABLE (1 << 0) | 218 | # define DP_PSR_ENABLE (1 << 0) |
| 157 | # define DP_PSR_MAIN_LINK_ACTIVE (1 << 1) | 219 | # define DP_PSR_MAIN_LINK_ACTIVE (1 << 1) |
| 158 | # define DP_PSR_CRC_VERIFICATION (1 << 2) | 220 | # define DP_PSR_CRC_VERIFICATION (1 << 2) |
| 159 | # define DP_PSR_FRAME_CAPTURE (1 << 3) | 221 | # define DP_PSR_FRAME_CAPTURE (1 << 3) |
| 160 | 222 | ||
| 223 | #define DP_SINK_COUNT 0x200 | ||
| 224 | /* prior to 1.2 bit 7 was reserved mbz */ | ||
| 225 | # define DP_GET_SINK_COUNT(x) ((((x) & 0x80) >> 1) | ((x) & 0x3f)) | ||
| 226 | # define DP_SINK_CP_READY (1 << 6) | ||
| 227 | |||
| 161 | #define DP_DEVICE_SERVICE_IRQ_VECTOR 0x201 | 228 | #define DP_DEVICE_SERVICE_IRQ_VECTOR 0x201 |
| 162 | # define DP_REMOTE_CONTROL_COMMAND_PENDING (1 << 0) | 229 | # define DP_REMOTE_CONTROL_COMMAND_PENDING (1 << 0) |
| 163 | # define DP_AUTOMATED_TEST_REQUEST (1 << 1) | 230 | # define DP_AUTOMATED_TEST_REQUEST (1 << 1) |
| 164 | # define DP_CP_IRQ (1 << 2) | 231 | # define DP_CP_IRQ (1 << 2) |
| 165 | # define DP_SINK_SPECIFIC_IRQ (1 << 6) | 232 | # define DP_SINK_SPECIFIC_IRQ (1 << 6) |
| 166 | 233 | ||
| 167 | #define DP_EDP_CONFIGURATION_SET 0x10a | ||
| 168 | |||
| 169 | #define DP_LANE0_1_STATUS 0x202 | 234 | #define DP_LANE0_1_STATUS 0x202 |
| 170 | #define DP_LANE2_3_STATUS 0x203 | 235 | #define DP_LANE2_3_STATUS 0x203 |
| 171 | # define DP_LANE_CR_DONE (1 << 0) | 236 | # define DP_LANE_CR_DONE (1 << 0) |
| @@ -225,14 +290,14 @@ | |||
| 225 | # define DP_SET_POWER_D0 0x1 | 290 | # define DP_SET_POWER_D0 0x1 |
| 226 | # define DP_SET_POWER_D3 0x2 | 291 | # define DP_SET_POWER_D3 0x2 |
| 227 | 292 | ||
| 228 | #define DP_PSR_ERROR_STATUS 0x2006 | 293 | #define DP_PSR_ERROR_STATUS 0x2006 /* XXX 1.2? */ |
| 229 | # define DP_PSR_LINK_CRC_ERROR (1 << 0) | 294 | # define DP_PSR_LINK_CRC_ERROR (1 << 0) |
| 230 | # define DP_PSR_RFB_STORAGE_ERROR (1 << 1) | 295 | # define DP_PSR_RFB_STORAGE_ERROR (1 << 1) |
| 231 | 296 | ||
| 232 | #define DP_PSR_ESI 0x2007 | 297 | #define DP_PSR_ESI 0x2007 /* XXX 1.2? */ |
| 233 | # define DP_PSR_CAPS_CHANGE (1 << 0) | 298 | # define DP_PSR_CAPS_CHANGE (1 << 0) |
| 234 | 299 | ||
| 235 | #define DP_PSR_STATUS 0x2008 | 300 | #define DP_PSR_STATUS 0x2008 /* XXX 1.2? */ |
| 236 | # define DP_PSR_SINK_INACTIVE 0 | 301 | # define DP_PSR_SINK_INACTIVE 0 |
| 237 | # define DP_PSR_SINK_ACTIVE_SRC_SYNCED 1 | 302 | # define DP_PSR_SINK_ACTIVE_SRC_SYNCED 1 |
| 238 | # define DP_PSR_SINK_ACTIVE_RFB 2 | 303 | # define DP_PSR_SINK_ACTIVE_RFB 2 |
diff --git a/include/drm/exynos_drm.h b/include/drm/exynos_drm.h index 1f2acdfbfd6d..3c13a3a4b158 100644 --- a/include/drm/exynos_drm.h +++ b/include/drm/exynos_drm.h | |||
| @@ -25,182 +25,10 @@ | |||
| 25 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | 25 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR |
| 26 | * OTHER DEALINGS IN THE SOFTWARE. | 26 | * OTHER DEALINGS IN THE SOFTWARE. |
| 27 | */ | 27 | */ |
| 28 | |||
| 29 | #ifndef _EXYNOS_DRM_H_ | 28 | #ifndef _EXYNOS_DRM_H_ |
| 30 | #define _EXYNOS_DRM_H_ | 29 | #define _EXYNOS_DRM_H_ |
| 31 | 30 | ||
| 32 | #include <drm/drm.h> | 31 | #include <uapi/drm/exynos_drm.h> |
| 33 | |||
| 34 | /** | ||
| 35 | * User-desired buffer creation information structure. | ||
| 36 | * | ||
| 37 | * @size: user-desired memory allocation size. | ||
| 38 | * - this size value would be page-aligned internally. | ||
| 39 | * @flags: user request for setting memory type or cache attributes. | ||
| 40 | * @handle: returned a handle to created gem object. | ||
| 41 | * - this handle will be set by gem module of kernel side. | ||
| 42 | */ | ||
| 43 | struct drm_exynos_gem_create { | ||
| 44 | uint64_t size; | ||
| 45 | unsigned int flags; | ||
| 46 | unsigned int handle; | ||
| 47 | }; | ||
| 48 | |||
| 49 | /** | ||
| 50 | * A structure for getting buffer offset. | ||
| 51 | * | ||
| 52 | * @handle: a pointer to gem object created. | ||
| 53 | * @pad: just padding to be 64-bit aligned. | ||
| 54 | * @offset: relatived offset value of the memory region allocated. | ||
| 55 | * - this value should be set by user. | ||
| 56 | */ | ||
| 57 | struct drm_exynos_gem_map_off { | ||
| 58 | unsigned int handle; | ||
| 59 | unsigned int pad; | ||
| 60 | uint64_t offset; | ||
| 61 | }; | ||
| 62 | |||
| 63 | /** | ||
| 64 | * A structure for mapping buffer. | ||
| 65 | * | ||
| 66 | * @handle: a handle to gem object created. | ||
| 67 | * @pad: just padding to be 64-bit aligned. | ||
| 68 | * @size: memory size to be mapped. | ||
| 69 | * @mapped: having user virtual address mmaped. | ||
| 70 | * - this variable would be filled by exynos gem module | ||
| 71 | * of kernel side with user virtual address which is allocated | ||
| 72 | * by do_mmap(). | ||
| 73 | */ | ||
| 74 | struct drm_exynos_gem_mmap { | ||
| 75 | unsigned int handle; | ||
| 76 | unsigned int pad; | ||
| 77 | uint64_t size; | ||
| 78 | uint64_t mapped; | ||
| 79 | }; | ||
| 80 | |||
| 81 | /** | ||
| 82 | * A structure to gem information. | ||
| 83 | * | ||
| 84 | * @handle: a handle to gem object created. | ||
| 85 | * @flags: flag value including memory type and cache attribute and | ||
| 86 | * this value would be set by driver. | ||
| 87 | * @size: size to memory region allocated by gem and this size would | ||
| 88 | * be set by driver. | ||
| 89 | */ | ||
| 90 | struct drm_exynos_gem_info { | ||
| 91 | unsigned int handle; | ||
| 92 | unsigned int flags; | ||
| 93 | uint64_t size; | ||
| 94 | }; | ||
| 95 | |||
| 96 | /** | ||
| 97 | * A structure for user connection request of virtual display. | ||
| 98 | * | ||
| 99 | * @connection: indicate whether doing connetion or not by user. | ||
| 100 | * @extensions: if this value is 1 then the vidi driver would need additional | ||
| 101 | * 128bytes edid data. | ||
| 102 | * @edid: the edid data pointer from user side. | ||
| 103 | */ | ||
| 104 | struct drm_exynos_vidi_connection { | ||
| 105 | unsigned int connection; | ||
| 106 | unsigned int extensions; | ||
| 107 | uint64_t edid; | ||
| 108 | }; | ||
| 109 | |||
| 110 | /* memory type definitions. */ | ||
| 111 | enum e_drm_exynos_gem_mem_type { | ||
| 112 | /* Physically Continuous memory and used as default. */ | ||
| 113 | EXYNOS_BO_CONTIG = 0 << 0, | ||
| 114 | /* Physically Non-Continuous memory. */ | ||
| 115 | EXYNOS_BO_NONCONTIG = 1 << 0, | ||
| 116 | /* non-cachable mapping and used as default. */ | ||
| 117 | EXYNOS_BO_NONCACHABLE = 0 << 1, | ||
| 118 | /* cachable mapping. */ | ||
| 119 | EXYNOS_BO_CACHABLE = 1 << 1, | ||
| 120 | /* write-combine mapping. */ | ||
| 121 | EXYNOS_BO_WC = 1 << 2, | ||
| 122 | EXYNOS_BO_MASK = EXYNOS_BO_NONCONTIG | EXYNOS_BO_CACHABLE | | ||
| 123 | EXYNOS_BO_WC | ||
| 124 | }; | ||
| 125 | |||
| 126 | struct drm_exynos_g2d_get_ver { | ||
| 127 | __u32 major; | ||
| 128 | __u32 minor; | ||
| 129 | }; | ||
| 130 | |||
| 131 | struct drm_exynos_g2d_cmd { | ||
| 132 | __u32 offset; | ||
| 133 | __u32 data; | ||
| 134 | }; | ||
| 135 | |||
| 136 | enum drm_exynos_g2d_event_type { | ||
| 137 | G2D_EVENT_NOT, | ||
| 138 | G2D_EVENT_NONSTOP, | ||
| 139 | G2D_EVENT_STOP, /* not yet */ | ||
| 140 | }; | ||
| 141 | |||
| 142 | struct drm_exynos_g2d_set_cmdlist { | ||
| 143 | __u64 cmd; | ||
| 144 | __u64 cmd_gem; | ||
| 145 | __u32 cmd_nr; | ||
| 146 | __u32 cmd_gem_nr; | ||
| 147 | |||
| 148 | /* for g2d event */ | ||
| 149 | __u64 event_type; | ||
| 150 | __u64 user_data; | ||
| 151 | }; | ||
| 152 | |||
| 153 | struct drm_exynos_g2d_exec { | ||
| 154 | __u64 async; | ||
| 155 | }; | ||
| 156 | |||
| 157 | #define DRM_EXYNOS_GEM_CREATE 0x00 | ||
| 158 | #define DRM_EXYNOS_GEM_MAP_OFFSET 0x01 | ||
| 159 | #define DRM_EXYNOS_GEM_MMAP 0x02 | ||
| 160 | /* Reserved 0x03 ~ 0x05 for exynos specific gem ioctl */ | ||
| 161 | #define DRM_EXYNOS_GEM_GET 0x04 | ||
| 162 | #define DRM_EXYNOS_VIDI_CONNECTION 0x07 | ||
| 163 | |||
| 164 | /* G2D */ | ||
| 165 | #define DRM_EXYNOS_G2D_GET_VER 0x20 | ||
| 166 | #define DRM_EXYNOS_G2D_SET_CMDLIST 0x21 | ||
| 167 | #define DRM_EXYNOS_G2D_EXEC 0x22 | ||
| 168 | |||
| 169 | #define DRM_IOCTL_EXYNOS_GEM_CREATE DRM_IOWR(DRM_COMMAND_BASE + \ | ||
| 170 | DRM_EXYNOS_GEM_CREATE, struct drm_exynos_gem_create) | ||
| 171 | |||
| 172 | #define DRM_IOCTL_EXYNOS_GEM_MAP_OFFSET DRM_IOWR(DRM_COMMAND_BASE + \ | ||
| 173 | DRM_EXYNOS_GEM_MAP_OFFSET, struct drm_exynos_gem_map_off) | ||
| 174 | |||
| 175 | #define DRM_IOCTL_EXYNOS_GEM_MMAP DRM_IOWR(DRM_COMMAND_BASE + \ | ||
| 176 | DRM_EXYNOS_GEM_MMAP, struct drm_exynos_gem_mmap) | ||
| 177 | |||
| 178 | #define DRM_IOCTL_EXYNOS_GEM_GET DRM_IOWR(DRM_COMMAND_BASE + \ | ||
| 179 | DRM_EXYNOS_GEM_GET, struct drm_exynos_gem_info) | ||
| 180 | |||
| 181 | #define DRM_IOCTL_EXYNOS_VIDI_CONNECTION DRM_IOWR(DRM_COMMAND_BASE + \ | ||
| 182 | DRM_EXYNOS_VIDI_CONNECTION, struct drm_exynos_vidi_connection) | ||
| 183 | |||
| 184 | #define DRM_IOCTL_EXYNOS_G2D_GET_VER DRM_IOWR(DRM_COMMAND_BASE + \ | ||
| 185 | DRM_EXYNOS_G2D_GET_VER, struct drm_exynos_g2d_get_ver) | ||
| 186 | #define DRM_IOCTL_EXYNOS_G2D_SET_CMDLIST DRM_IOWR(DRM_COMMAND_BASE + \ | ||
| 187 | DRM_EXYNOS_G2D_SET_CMDLIST, struct drm_exynos_g2d_set_cmdlist) | ||
| 188 | #define DRM_IOCTL_EXYNOS_G2D_EXEC DRM_IOWR(DRM_COMMAND_BASE + \ | ||
| 189 | DRM_EXYNOS_G2D_EXEC, struct drm_exynos_g2d_exec) | ||
| 190 | |||
| 191 | /* EXYNOS specific events */ | ||
| 192 | #define DRM_EXYNOS_G2D_EVENT 0x80000000 | ||
| 193 | |||
| 194 | struct drm_exynos_g2d_event { | ||
| 195 | struct drm_event base; | ||
| 196 | __u64 user_data; | ||
| 197 | __u32 tv_sec; | ||
| 198 | __u32 tv_usec; | ||
| 199 | __u32 cmdlist_no; | ||
| 200 | __u32 reserved; | ||
| 201 | }; | ||
| 202 | |||
| 203 | #ifdef __KERNEL__ | ||
| 204 | 32 | ||
| 205 | /** | 33 | /** |
| 206 | * A structure for lcd panel information. | 34 | * A structure for lcd panel information. |
| @@ -257,5 +85,4 @@ struct exynos_drm_hdmi_pdata { | |||
| 257 | int (*get_hpd)(void); | 85 | int (*get_hpd)(void); |
| 258 | }; | 86 | }; |
| 259 | 87 | ||
| 260 | #endif /* __KERNEL__ */ | ||
| 261 | #endif /* _EXYNOS_DRM_H_ */ | 88 | #endif /* _EXYNOS_DRM_H_ */ |
diff --git a/include/drm/i915_drm.h b/include/drm/i915_drm.h index a940d4e18917..63d609d8a3f6 100644 --- a/include/drm/i915_drm.h +++ b/include/drm/i915_drm.h | |||
| @@ -23,933 +23,15 @@ | |||
| 23 | * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. | 23 | * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. |
| 24 | * | 24 | * |
| 25 | */ | 25 | */ |
| 26 | |||
| 27 | #ifndef _I915_DRM_H_ | 26 | #ifndef _I915_DRM_H_ |
| 28 | #define _I915_DRM_H_ | 27 | #define _I915_DRM_H_ |
| 29 | 28 | ||
| 30 | #include <drm/drm.h> | 29 | #include <uapi/drm/i915_drm.h> |
| 31 | |||
| 32 | /* Please note that modifications to all structs defined here are | ||
| 33 | * subject to backwards-compatibility constraints. | ||
| 34 | */ | ||
| 35 | 30 | ||
| 36 | #ifdef __KERNEL__ | ||
| 37 | /* For use by IPS driver */ | 31 | /* For use by IPS driver */ |
| 38 | extern unsigned long i915_read_mch_val(void); | 32 | extern unsigned long i915_read_mch_val(void); |
| 39 | extern bool i915_gpu_raise(void); | 33 | extern bool i915_gpu_raise(void); |
| 40 | extern bool i915_gpu_lower(void); | 34 | extern bool i915_gpu_lower(void); |
| 41 | extern bool i915_gpu_busy(void); | 35 | extern bool i915_gpu_busy(void); |
| 42 | extern bool i915_gpu_turbo_disable(void); | 36 | extern bool i915_gpu_turbo_disable(void); |
| 43 | #endif | ||
| 44 | |||
| 45 | /* Each region is a minimum of 16k, and there are at most 255 of them. | ||
| 46 | */ | ||
| 47 | #define I915_NR_TEX_REGIONS 255 /* table size 2k - maximum due to use | ||
| 48 | * of chars for next/prev indices */ | ||
| 49 | #define I915_LOG_MIN_TEX_REGION_SIZE 14 | ||
| 50 | |||
| 51 | typedef struct _drm_i915_init { | ||
| 52 | enum { | ||
| 53 | I915_INIT_DMA = 0x01, | ||
| 54 | I915_CLEANUP_DMA = 0x02, | ||
| 55 | I915_RESUME_DMA = 0x03 | ||
| 56 | } func; | ||
| 57 | unsigned int mmio_offset; | ||
| 58 | int sarea_priv_offset; | ||
| 59 | unsigned int ring_start; | ||
| 60 | unsigned int ring_end; | ||
| 61 | unsigned int ring_size; | ||
| 62 | unsigned int front_offset; | ||
| 63 | unsigned int back_offset; | ||
| 64 | unsigned int depth_offset; | ||
| 65 | unsigned int w; | ||
| 66 | unsigned int h; | ||
| 67 | unsigned int pitch; | ||
| 68 | unsigned int pitch_bits; | ||
| 69 | unsigned int back_pitch; | ||
| 70 | unsigned int depth_pitch; | ||
| 71 | unsigned int cpp; | ||
| 72 | unsigned int chipset; | ||
| 73 | } drm_i915_init_t; | ||
| 74 | |||
| 75 | typedef struct _drm_i915_sarea { | ||
| 76 | struct drm_tex_region texList[I915_NR_TEX_REGIONS + 1]; | ||
| 77 | int last_upload; /* last time texture was uploaded */ | ||
| 78 | int last_enqueue; /* last time a buffer was enqueued */ | ||
| 79 | int last_dispatch; /* age of the most recently dispatched buffer */ | ||
| 80 | int ctxOwner; /* last context to upload state */ | ||
| 81 | int texAge; | ||
| 82 | int pf_enabled; /* is pageflipping allowed? */ | ||
| 83 | int pf_active; | ||
| 84 | int pf_current_page; /* which buffer is being displayed? */ | ||
| 85 | int perf_boxes; /* performance boxes to be displayed */ | ||
| 86 | int width, height; /* screen size in pixels */ | ||
| 87 | |||
| 88 | drm_handle_t front_handle; | ||
| 89 | int front_offset; | ||
| 90 | int front_size; | ||
| 91 | |||
| 92 | drm_handle_t back_handle; | ||
| 93 | int back_offset; | ||
| 94 | int back_size; | ||
| 95 | |||
| 96 | drm_handle_t depth_handle; | ||
| 97 | int depth_offset; | ||
| 98 | int depth_size; | ||
| 99 | |||
| 100 | drm_handle_t tex_handle; | ||
| 101 | int tex_offset; | ||
| 102 | int tex_size; | ||
| 103 | int log_tex_granularity; | ||
| 104 | int pitch; | ||
| 105 | int rotation; /* 0, 90, 180 or 270 */ | ||
| 106 | int rotated_offset; | ||
| 107 | int rotated_size; | ||
| 108 | int rotated_pitch; | ||
| 109 | int virtualX, virtualY; | ||
| 110 | |||
| 111 | unsigned int front_tiled; | ||
| 112 | unsigned int back_tiled; | ||
| 113 | unsigned int depth_tiled; | ||
| 114 | unsigned int rotated_tiled; | ||
| 115 | unsigned int rotated2_tiled; | ||
| 116 | |||
| 117 | int pipeA_x; | ||
| 118 | int pipeA_y; | ||
| 119 | int pipeA_w; | ||
| 120 | int pipeA_h; | ||
| 121 | int pipeB_x; | ||
| 122 | int pipeB_y; | ||
| 123 | int pipeB_w; | ||
| 124 | int pipeB_h; | ||
| 125 | |||
| 126 | /* fill out some space for old userspace triple buffer */ | ||
| 127 | drm_handle_t unused_handle; | ||
| 128 | __u32 unused1, unused2, unused3; | ||
| 129 | |||
| 130 | /* buffer object handles for static buffers. May change | ||
| 131 | * over the lifetime of the client. | ||
| 132 | */ | ||
| 133 | __u32 front_bo_handle; | ||
| 134 | __u32 back_bo_handle; | ||
| 135 | __u32 unused_bo_handle; | ||
| 136 | __u32 depth_bo_handle; | ||
| 137 | |||
| 138 | } drm_i915_sarea_t; | ||
| 139 | |||
| 140 | /* due to userspace building against these headers we need some compat here */ | ||
| 141 | #define planeA_x pipeA_x | ||
| 142 | #define planeA_y pipeA_y | ||
| 143 | #define planeA_w pipeA_w | ||
| 144 | #define planeA_h pipeA_h | ||
| 145 | #define planeB_x pipeB_x | ||
| 146 | #define planeB_y pipeB_y | ||
| 147 | #define planeB_w pipeB_w | ||
| 148 | #define planeB_h pipeB_h | ||
| 149 | |||
| 150 | /* Flags for perf_boxes | ||
| 151 | */ | ||
| 152 | #define I915_BOX_RING_EMPTY 0x1 | ||
| 153 | #define I915_BOX_FLIP 0x2 | ||
| 154 | #define I915_BOX_WAIT 0x4 | ||
| 155 | #define I915_BOX_TEXTURE_LOAD 0x8 | ||
| 156 | #define I915_BOX_LOST_CONTEXT 0x10 | ||
| 157 | |||
| 158 | /* I915 specific ioctls | ||
| 159 | * The device specific ioctl range is 0x40 to 0x79. | ||
| 160 | */ | ||
| 161 | #define DRM_I915_INIT 0x00 | ||
| 162 | #define DRM_I915_FLUSH 0x01 | ||
| 163 | #define DRM_I915_FLIP 0x02 | ||
| 164 | #define DRM_I915_BATCHBUFFER 0x03 | ||
| 165 | #define DRM_I915_IRQ_EMIT 0x04 | ||
| 166 | #define DRM_I915_IRQ_WAIT 0x05 | ||
| 167 | #define DRM_I915_GETPARAM 0x06 | ||
| 168 | #define DRM_I915_SETPARAM 0x07 | ||
| 169 | #define DRM_I915_ALLOC 0x08 | ||
| 170 | #define DRM_I915_FREE 0x09 | ||
| 171 | #define DRM_I915_INIT_HEAP 0x0a | ||
| 172 | #define DRM_I915_CMDBUFFER 0x0b | ||
| 173 | #define DRM_I915_DESTROY_HEAP 0x0c | ||
| 174 | #define DRM_I915_SET_VBLANK_PIPE 0x0d | ||
| 175 | #define DRM_I915_GET_VBLANK_PIPE 0x0e | ||
| 176 | #define DRM_I915_VBLANK_SWAP 0x0f | ||
| 177 | #define DRM_I915_HWS_ADDR 0x11 | ||
| 178 | #define DRM_I915_GEM_INIT 0x13 | ||
| 179 | #define DRM_I915_GEM_EXECBUFFER 0x14 | ||
| 180 | #define DRM_I915_GEM_PIN 0x15 | ||
| 181 | #define DRM_I915_GEM_UNPIN 0x16 | ||
| 182 | #define DRM_I915_GEM_BUSY 0x17 | ||
| 183 | #define DRM_I915_GEM_THROTTLE 0x18 | ||
| 184 | #define DRM_I915_GEM_ENTERVT 0x19 | ||
| 185 | #define DRM_I915_GEM_LEAVEVT 0x1a | ||
| 186 | #define DRM_I915_GEM_CREATE 0x1b | ||
| 187 | #define DRM_I915_GEM_PREAD 0x1c | ||
| 188 | #define DRM_I915_GEM_PWRITE 0x1d | ||
| 189 | #define DRM_I915_GEM_MMAP 0x1e | ||
| 190 | #define DRM_I915_GEM_SET_DOMAIN 0x1f | ||
| 191 | #define DRM_I915_GEM_SW_FINISH 0x20 | ||
| 192 | #define DRM_I915_GEM_SET_TILING 0x21 | ||
| 193 | #define DRM_I915_GEM_GET_TILING 0x22 | ||
| 194 | #define DRM_I915_GEM_GET_APERTURE 0x23 | ||
| 195 | #define DRM_I915_GEM_MMAP_GTT 0x24 | ||
| 196 | #define DRM_I915_GET_PIPE_FROM_CRTC_ID 0x25 | ||
| 197 | #define DRM_I915_GEM_MADVISE 0x26 | ||
| 198 | #define DRM_I915_OVERLAY_PUT_IMAGE 0x27 | ||
| 199 | #define DRM_I915_OVERLAY_ATTRS 0x28 | ||
| 200 | #define DRM_I915_GEM_EXECBUFFER2 0x29 | ||
| 201 | #define DRM_I915_GET_SPRITE_COLORKEY 0x2a | ||
| 202 | #define DRM_I915_SET_SPRITE_COLORKEY 0x2b | ||
| 203 | #define DRM_I915_GEM_WAIT 0x2c | ||
| 204 | #define DRM_I915_GEM_CONTEXT_CREATE 0x2d | ||
| 205 | #define DRM_I915_GEM_CONTEXT_DESTROY 0x2e | ||
| 206 | #define DRM_I915_GEM_SET_CACHING 0x2f | ||
| 207 | #define DRM_I915_GEM_GET_CACHING 0x30 | ||
| 208 | #define DRM_I915_REG_READ 0x31 | ||
| 209 | |||
| 210 | #define DRM_IOCTL_I915_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT, drm_i915_init_t) | ||
| 211 | #define DRM_IOCTL_I915_FLUSH DRM_IO ( DRM_COMMAND_BASE + DRM_I915_FLUSH) | ||
| 212 | #define DRM_IOCTL_I915_FLIP DRM_IO ( DRM_COMMAND_BASE + DRM_I915_FLIP) | ||
| 213 | #define DRM_IOCTL_I915_BATCHBUFFER DRM_IOW( DRM_COMMAND_BASE + DRM_I915_BATCHBUFFER, drm_i915_batchbuffer_t) | ||
| 214 | #define DRM_IOCTL_I915_IRQ_EMIT DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_IRQ_EMIT, drm_i915_irq_emit_t) | ||
| 215 | #define DRM_IOCTL_I915_IRQ_WAIT DRM_IOW( DRM_COMMAND_BASE + DRM_I915_IRQ_WAIT, drm_i915_irq_wait_t) | ||
| 216 | #define DRM_IOCTL_I915_GETPARAM DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GETPARAM, drm_i915_getparam_t) | ||
| 217 | #define DRM_IOCTL_I915_SETPARAM DRM_IOW( DRM_COMMAND_BASE + DRM_I915_SETPARAM, drm_i915_setparam_t) | ||
| 218 | #define DRM_IOCTL_I915_ALLOC DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_ALLOC, drm_i915_mem_alloc_t) | ||
| 219 | #define DRM_IOCTL_I915_FREE DRM_IOW( DRM_COMMAND_BASE + DRM_I915_FREE, drm_i915_mem_free_t) | ||
| 220 | #define DRM_IOCTL_I915_INIT_HEAP DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT_HEAP, drm_i915_mem_init_heap_t) | ||
| 221 | #define DRM_IOCTL_I915_CMDBUFFER DRM_IOW( DRM_COMMAND_BASE + DRM_I915_CMDBUFFER, drm_i915_cmdbuffer_t) | ||
| 222 | #define DRM_IOCTL_I915_DESTROY_HEAP DRM_IOW( DRM_COMMAND_BASE + DRM_I915_DESTROY_HEAP, drm_i915_mem_destroy_heap_t) | ||
| 223 | #define DRM_IOCTL_I915_SET_VBLANK_PIPE DRM_IOW( DRM_COMMAND_BASE + DRM_I915_SET_VBLANK_PIPE, drm_i915_vblank_pipe_t) | ||
| 224 | #define DRM_IOCTL_I915_GET_VBLANK_PIPE DRM_IOR( DRM_COMMAND_BASE + DRM_I915_GET_VBLANK_PIPE, drm_i915_vblank_pipe_t) | ||
| 225 | #define DRM_IOCTL_I915_VBLANK_SWAP DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_VBLANK_SWAP, drm_i915_vblank_swap_t) | ||
| 226 | #define DRM_IOCTL_I915_HWS_ADDR DRM_IOW(DRM_COMMAND_BASE + DRM_I915_HWS_ADDR, struct drm_i915_gem_init) | ||
| 227 | #define DRM_IOCTL_I915_GEM_INIT DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_INIT, struct drm_i915_gem_init) | ||
| 228 | #define DRM_IOCTL_I915_GEM_EXECBUFFER DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_EXECBUFFER, struct drm_i915_gem_execbuffer) | ||
| 229 | #define DRM_IOCTL_I915_GEM_EXECBUFFER2 DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_EXECBUFFER2, struct drm_i915_gem_execbuffer2) | ||
| 230 | #define DRM_IOCTL_I915_GEM_PIN DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_PIN, struct drm_i915_gem_pin) | ||
| 231 | #define DRM_IOCTL_I915_GEM_UNPIN DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_UNPIN, struct drm_i915_gem_unpin) | ||
| 232 | #define DRM_IOCTL_I915_GEM_BUSY DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_BUSY, struct drm_i915_gem_busy) | ||
| 233 | #define DRM_IOCTL_I915_GEM_SET_CACHING DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_SET_CACHING, struct drm_i915_gem_caching) | ||
| 234 | #define DRM_IOCTL_I915_GEM_GET_CACHING DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_GET_CACHING, struct drm_i915_gem_caching) | ||
| 235 | #define DRM_IOCTL_I915_GEM_THROTTLE DRM_IO ( DRM_COMMAND_BASE + DRM_I915_GEM_THROTTLE) | ||
| 236 | #define DRM_IOCTL_I915_GEM_ENTERVT DRM_IO(DRM_COMMAND_BASE + DRM_I915_GEM_ENTERVT) | ||
| 237 | #define DRM_IOCTL_I915_GEM_LEAVEVT DRM_IO(DRM_COMMAND_BASE + DRM_I915_GEM_LEAVEVT) | ||
| 238 | #define DRM_IOCTL_I915_GEM_CREATE DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_CREATE, struct drm_i915_gem_create) | ||
| 239 | #define DRM_IOCTL_I915_GEM_PREAD DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_PREAD, struct drm_i915_gem_pread) | ||
| 240 | #define DRM_IOCTL_I915_GEM_PWRITE DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_PWRITE, struct drm_i915_gem_pwrite) | ||
| 241 | #define DRM_IOCTL_I915_GEM_MMAP DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_MMAP, struct drm_i915_gem_mmap) | ||
| 242 | #define DRM_IOCTL_I915_GEM_MMAP_GTT DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_MMAP_GTT, struct drm_i915_gem_mmap_gtt) | ||
| 243 | #define DRM_IOCTL_I915_GEM_SET_DOMAIN DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_SET_DOMAIN, struct drm_i915_gem_set_domain) | ||
| 244 | #define DRM_IOCTL_I915_GEM_SW_FINISH DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_SW_FINISH, struct drm_i915_gem_sw_finish) | ||
| 245 | #define DRM_IOCTL_I915_GEM_SET_TILING DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_SET_TILING, struct drm_i915_gem_set_tiling) | ||
| 246 | #define DRM_IOCTL_I915_GEM_GET_TILING DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_GET_TILING, struct drm_i915_gem_get_tiling) | ||
| 247 | #define DRM_IOCTL_I915_GEM_GET_APERTURE DRM_IOR (DRM_COMMAND_BASE + DRM_I915_GEM_GET_APERTURE, struct drm_i915_gem_get_aperture) | ||
| 248 | #define DRM_IOCTL_I915_GET_PIPE_FROM_CRTC_ID DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GET_PIPE_FROM_CRTC_ID, struct drm_i915_get_pipe_from_crtc_id) | ||
| 249 | #define DRM_IOCTL_I915_GEM_MADVISE DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_MADVISE, struct drm_i915_gem_madvise) | ||
| 250 | #define DRM_IOCTL_I915_OVERLAY_PUT_IMAGE DRM_IOW(DRM_COMMAND_BASE + DRM_I915_OVERLAY_PUT_IMAGE, struct drm_intel_overlay_put_image) | ||
| 251 | #define DRM_IOCTL_I915_OVERLAY_ATTRS DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_OVERLAY_ATTRS, struct drm_intel_overlay_attrs) | ||
| 252 | #define DRM_IOCTL_I915_SET_SPRITE_COLORKEY DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_SET_SPRITE_COLORKEY, struct drm_intel_sprite_colorkey) | ||
| 253 | #define DRM_IOCTL_I915_GET_SPRITE_COLORKEY DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_SET_SPRITE_COLORKEY, struct drm_intel_sprite_colorkey) | ||
| 254 | #define DRM_IOCTL_I915_GEM_WAIT DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_WAIT, struct drm_i915_gem_wait) | ||
| 255 | #define DRM_IOCTL_I915_GEM_CONTEXT_CREATE DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_CREATE, struct drm_i915_gem_context_create) | ||
| 256 | #define DRM_IOCTL_I915_GEM_CONTEXT_DESTROY DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_DESTROY, struct drm_i915_gem_context_destroy) | ||
| 257 | #define DRM_IOCTL_I915_REG_READ DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_REG_READ, struct drm_i915_reg_read) | ||
| 258 | |||
| 259 | /* Allow drivers to submit batchbuffers directly to hardware, relying | ||
| 260 | * on the security mechanisms provided by hardware. | ||
| 261 | */ | ||
| 262 | typedef struct drm_i915_batchbuffer { | ||
| 263 | int start; /* agp offset */ | ||
| 264 | int used; /* nr bytes in use */ | ||
| 265 | int DR1; /* hw flags for GFX_OP_DRAWRECT_INFO */ | ||
| 266 | int DR4; /* window origin for GFX_OP_DRAWRECT_INFO */ | ||
| 267 | int num_cliprects; /* mulitpass with multiple cliprects? */ | ||
| 268 | struct drm_clip_rect __user *cliprects; /* pointer to userspace cliprects */ | ||
| 269 | } drm_i915_batchbuffer_t; | ||
| 270 | |||
| 271 | /* As above, but pass a pointer to userspace buffer which can be | ||
| 272 | * validated by the kernel prior to sending to hardware. | ||
| 273 | */ | ||
| 274 | typedef struct _drm_i915_cmdbuffer { | ||
| 275 | char __user *buf; /* pointer to userspace command buffer */ | ||
| 276 | int sz; /* nr bytes in buf */ | ||
| 277 | int DR1; /* hw flags for GFX_OP_DRAWRECT_INFO */ | ||
| 278 | int DR4; /* window origin for GFX_OP_DRAWRECT_INFO */ | ||
| 279 | int num_cliprects; /* mulitpass with multiple cliprects? */ | ||
| 280 | struct drm_clip_rect __user *cliprects; /* pointer to userspace cliprects */ | ||
| 281 | } drm_i915_cmdbuffer_t; | ||
| 282 | |||
| 283 | /* Userspace can request & wait on irq's: | ||
| 284 | */ | ||
| 285 | typedef struct drm_i915_irq_emit { | ||
| 286 | int __user *irq_seq; | ||
| 287 | } drm_i915_irq_emit_t; | ||
| 288 | |||
| 289 | typedef struct drm_i915_irq_wait { | ||
| 290 | int irq_seq; | ||
| 291 | } drm_i915_irq_wait_t; | ||
| 292 | |||
| 293 | /* Ioctl to query kernel params: | ||
| 294 | */ | ||
| 295 | #define I915_PARAM_IRQ_ACTIVE 1 | ||
| 296 | #define I915_PARAM_ALLOW_BATCHBUFFER 2 | ||
| 297 | #define I915_PARAM_LAST_DISPATCH 3 | ||
| 298 | #define I915_PARAM_CHIPSET_ID 4 | ||
| 299 | #define I915_PARAM_HAS_GEM 5 | ||
| 300 | #define I915_PARAM_NUM_FENCES_AVAIL 6 | ||
| 301 | #define I915_PARAM_HAS_OVERLAY 7 | ||
| 302 | #define I915_PARAM_HAS_PAGEFLIPPING 8 | ||
| 303 | #define I915_PARAM_HAS_EXECBUF2 9 | ||
| 304 | #define I915_PARAM_HAS_BSD 10 | ||
| 305 | #define I915_PARAM_HAS_BLT 11 | ||
| 306 | #define I915_PARAM_HAS_RELAXED_FENCING 12 | ||
| 307 | #define I915_PARAM_HAS_COHERENT_RINGS 13 | ||
| 308 | #define I915_PARAM_HAS_EXEC_CONSTANTS 14 | ||
| 309 | #define I915_PARAM_HAS_RELAXED_DELTA 15 | ||
| 310 | #define I915_PARAM_HAS_GEN7_SOL_RESET 16 | ||
| 311 | #define I915_PARAM_HAS_LLC 17 | ||
| 312 | #define I915_PARAM_HAS_ALIASING_PPGTT 18 | ||
| 313 | #define I915_PARAM_HAS_WAIT_TIMEOUT 19 | ||
| 314 | #define I915_PARAM_HAS_SEMAPHORES 20 | ||
| 315 | #define I915_PARAM_HAS_PRIME_VMAP_FLUSH 21 | ||
| 316 | #define I915_PARAM_RSVD_FOR_FUTURE_USE 22 | ||
| 317 | |||
| 318 | typedef struct drm_i915_getparam { | ||
| 319 | int param; | ||
| 320 | int __user *value; | ||
| 321 | } drm_i915_getparam_t; | ||
| 322 | |||
| 323 | /* Ioctl to set kernel params: | ||
| 324 | */ | ||
| 325 | #define I915_SETPARAM_USE_MI_BATCHBUFFER_START 1 | ||
| 326 | #define I915_SETPARAM_TEX_LRU_LOG_GRANULARITY 2 | ||
| 327 | #define I915_SETPARAM_ALLOW_BATCHBUFFER 3 | ||
| 328 | #define I915_SETPARAM_NUM_USED_FENCES 4 | ||
| 329 | |||
| 330 | typedef struct drm_i915_setparam { | ||
| 331 | int param; | ||
| 332 | int value; | ||
| 333 | } drm_i915_setparam_t; | ||
| 334 | |||
| 335 | /* A memory manager for regions of shared memory: | ||
| 336 | */ | ||
| 337 | #define I915_MEM_REGION_AGP 1 | ||
| 338 | |||
| 339 | typedef struct drm_i915_mem_alloc { | ||
| 340 | int region; | ||
| 341 | int alignment; | ||
| 342 | int size; | ||
| 343 | int __user *region_offset; /* offset from start of fb or agp */ | ||
| 344 | } drm_i915_mem_alloc_t; | ||
| 345 | |||
| 346 | typedef struct drm_i915_mem_free { | ||
| 347 | int region; | ||
| 348 | int region_offset; | ||
| 349 | } drm_i915_mem_free_t; | ||
| 350 | |||
| 351 | typedef struct drm_i915_mem_init_heap { | ||
| 352 | int region; | ||
| 353 | int size; | ||
| 354 | int start; | ||
| 355 | } drm_i915_mem_init_heap_t; | ||
| 356 | |||
| 357 | /* Allow memory manager to be torn down and re-initialized (eg on | ||
| 358 | * rotate): | ||
| 359 | */ | ||
| 360 | typedef struct drm_i915_mem_destroy_heap { | ||
| 361 | int region; | ||
| 362 | } drm_i915_mem_destroy_heap_t; | ||
| 363 | |||
| 364 | /* Allow X server to configure which pipes to monitor for vblank signals | ||
| 365 | */ | ||
| 366 | #define DRM_I915_VBLANK_PIPE_A 1 | ||
| 367 | #define DRM_I915_VBLANK_PIPE_B 2 | ||
| 368 | |||
| 369 | typedef struct drm_i915_vblank_pipe { | ||
| 370 | int pipe; | ||
| 371 | } drm_i915_vblank_pipe_t; | ||
| 372 | |||
| 373 | /* Schedule buffer swap at given vertical blank: | ||
| 374 | */ | ||
| 375 | typedef struct drm_i915_vblank_swap { | ||
| 376 | drm_drawable_t drawable; | ||
| 377 | enum drm_vblank_seq_type seqtype; | ||
| 378 | unsigned int sequence; | ||
| 379 | } drm_i915_vblank_swap_t; | ||
| 380 | |||
| 381 | typedef struct drm_i915_hws_addr { | ||
| 382 | __u64 addr; | ||
| 383 | } drm_i915_hws_addr_t; | ||
| 384 | |||
| 385 | struct drm_i915_gem_init { | ||
| 386 | /** | ||
| 387 | * Beginning offset in the GTT to be managed by the DRM memory | ||
| 388 | * manager. | ||
| 389 | */ | ||
| 390 | __u64 gtt_start; | ||
| 391 | /** | ||
| 392 | * Ending offset in the GTT to be managed by the DRM memory | ||
| 393 | * manager. | ||
| 394 | */ | ||
| 395 | __u64 gtt_end; | ||
| 396 | }; | ||
| 397 | |||
| 398 | struct drm_i915_gem_create { | ||
| 399 | /** | ||
| 400 | * Requested size for the object. | ||
| 401 | * | ||
| 402 | * The (page-aligned) allocated size for the object will be returned. | ||
| 403 | */ | ||
| 404 | __u64 size; | ||
| 405 | /** | ||
| 406 | * Returned handle for the object. | ||
| 407 | * | ||
| 408 | * Object handles are nonzero. | ||
| 409 | */ | ||
| 410 | __u32 handle; | ||
| 411 | __u32 pad; | ||
| 412 | }; | ||
| 413 | |||
| 414 | struct drm_i915_gem_pread { | ||
| 415 | /** Handle for the object being read. */ | ||
| 416 | __u32 handle; | ||
| 417 | __u32 pad; | ||
| 418 | /** Offset into the object to read from */ | ||
| 419 | __u64 offset; | ||
| 420 | /** Length of data to read */ | ||
| 421 | __u64 size; | ||
| 422 | /** | ||
| 423 | * Pointer to write the data into. | ||
| 424 | * | ||
| 425 | * This is a fixed-size type for 32/64 compatibility. | ||
| 426 | */ | ||
| 427 | __u64 data_ptr; | ||
| 428 | }; | ||
| 429 | |||
| 430 | struct drm_i915_gem_pwrite { | ||
| 431 | /** Handle for the object being written to. */ | ||
| 432 | __u32 handle; | ||
| 433 | __u32 pad; | ||
| 434 | /** Offset into the object to write to */ | ||
| 435 | __u64 offset; | ||
| 436 | /** Length of data to write */ | ||
| 437 | __u64 size; | ||
| 438 | /** | ||
| 439 | * Pointer to read the data from. | ||
| 440 | * | ||
| 441 | * This is a fixed-size type for 32/64 compatibility. | ||
| 442 | */ | ||
| 443 | __u64 data_ptr; | ||
| 444 | }; | ||
| 445 | |||
| 446 | struct drm_i915_gem_mmap { | ||
| 447 | /** Handle for the object being mapped. */ | ||
| 448 | __u32 handle; | ||
| 449 | __u32 pad; | ||
| 450 | /** Offset in the object to map. */ | ||
| 451 | __u64 offset; | ||
| 452 | /** | ||
| 453 | * Length of data to map. | ||
| 454 | * | ||
| 455 | * The value will be page-aligned. | ||
| 456 | */ | ||
| 457 | __u64 size; | ||
| 458 | /** | ||
| 459 | * Returned pointer the data was mapped at. | ||
| 460 | * | ||
| 461 | * This is a fixed-size type for 32/64 compatibility. | ||
| 462 | */ | ||
| 463 | __u64 addr_ptr; | ||
| 464 | }; | ||
| 465 | |||
| 466 | struct drm_i915_gem_mmap_gtt { | ||
| 467 | /** Handle for the object being mapped. */ | ||
| 468 | __u32 handle; | ||
| 469 | __u32 pad; | ||
| 470 | /** | ||
| 471 | * Fake offset to use for subsequent mmap call | ||
| 472 | * | ||
| 473 | * This is a fixed-size type for 32/64 compatibility. | ||
| 474 | */ | ||
| 475 | __u64 offset; | ||
| 476 | }; | ||
| 477 | |||
| 478 | struct drm_i915_gem_set_domain { | ||
| 479 | /** Handle for the object */ | ||
| 480 | __u32 handle; | ||
| 481 | |||
| 482 | /** New read domains */ | ||
| 483 | __u32 read_domains; | ||
| 484 | |||
| 485 | /** New write domain */ | ||
| 486 | __u32 write_domain; | ||
| 487 | }; | ||
| 488 | |||
| 489 | struct drm_i915_gem_sw_finish { | ||
| 490 | /** Handle for the object */ | ||
| 491 | __u32 handle; | ||
| 492 | }; | ||
| 493 | |||
| 494 | struct drm_i915_gem_relocation_entry { | ||
| 495 | /** | ||
| 496 | * Handle of the buffer being pointed to by this relocation entry. | ||
| 497 | * | ||
| 498 | * It's appealing to make this be an index into the mm_validate_entry | ||
| 499 | * list to refer to the buffer, but this allows the driver to create | ||
| 500 | * a relocation list for state buffers and not re-write it per | ||
| 501 | * exec using the buffer. | ||
| 502 | */ | ||
| 503 | __u32 target_handle; | ||
| 504 | |||
| 505 | /** | ||
| 506 | * Value to be added to the offset of the target buffer to make up | ||
| 507 | * the relocation entry. | ||
| 508 | */ | ||
| 509 | __u32 delta; | ||
| 510 | |||
| 511 | /** Offset in the buffer the relocation entry will be written into */ | ||
| 512 | __u64 offset; | ||
| 513 | |||
| 514 | /** | ||
| 515 | * Offset value of the target buffer that the relocation entry was last | ||
| 516 | * written as. | ||
| 517 | * | ||
| 518 | * If the buffer has the same offset as last time, we can skip syncing | ||
| 519 | * and writing the relocation. This value is written back out by | ||
| 520 | * the execbuffer ioctl when the relocation is written. | ||
| 521 | */ | ||
| 522 | __u64 presumed_offset; | ||
| 523 | |||
| 524 | /** | ||
| 525 | * Target memory domains read by this operation. | ||
| 526 | */ | ||
| 527 | __u32 read_domains; | ||
| 528 | |||
| 529 | /** | ||
| 530 | * Target memory domains written by this operation. | ||
| 531 | * | ||
| 532 | * Note that only one domain may be written by the whole | ||
| 533 | * execbuffer operation, so that where there are conflicts, | ||
| 534 | * the application will get -EINVAL back. | ||
| 535 | */ | ||
| 536 | __u32 write_domain; | ||
| 537 | }; | ||
| 538 | |||
| 539 | /** @{ | ||
| 540 | * Intel memory domains | ||
| 541 | * | ||
| 542 | * Most of these just align with the various caches in | ||
| 543 | * the system and are used to flush and invalidate as | ||
| 544 | * objects end up cached in different domains. | ||
| 545 | */ | ||
| 546 | /** CPU cache */ | ||
| 547 | #define I915_GEM_DOMAIN_CPU 0x00000001 | ||
| 548 | /** Render cache, used by 2D and 3D drawing */ | ||
| 549 | #define I915_GEM_DOMAIN_RENDER 0x00000002 | ||
| 550 | /** Sampler cache, used by texture engine */ | ||
| 551 | #define I915_GEM_DOMAIN_SAMPLER 0x00000004 | ||
| 552 | /** Command queue, used to load batch buffers */ | ||
| 553 | #define I915_GEM_DOMAIN_COMMAND 0x00000008 | ||
| 554 | /** Instruction cache, used by shader programs */ | ||
| 555 | #define I915_GEM_DOMAIN_INSTRUCTION 0x00000010 | ||
| 556 | /** Vertex address cache */ | ||
| 557 | #define I915_GEM_DOMAIN_VERTEX 0x00000020 | ||
| 558 | /** GTT domain - aperture and scanout */ | ||
| 559 | #define I915_GEM_DOMAIN_GTT 0x00000040 | ||
| 560 | /** @} */ | ||
| 561 | |||
| 562 | struct drm_i915_gem_exec_object { | ||
| 563 | /** | ||
| 564 | * User's handle for a buffer to be bound into the GTT for this | ||
| 565 | * operation. | ||
| 566 | */ | ||
| 567 | __u32 handle; | ||
| 568 | |||
| 569 | /** Number of relocations to be performed on this buffer */ | ||
| 570 | __u32 relocation_count; | ||
| 571 | /** | ||
| 572 | * Pointer to array of struct drm_i915_gem_relocation_entry containing | ||
| 573 | * the relocations to be performed in this buffer. | ||
| 574 | */ | ||
| 575 | __u64 relocs_ptr; | ||
| 576 | |||
| 577 | /** Required alignment in graphics aperture */ | ||
| 578 | __u64 alignment; | ||
| 579 | |||
| 580 | /** | ||
| 581 | * Returned value of the updated offset of the object, for future | ||
| 582 | * presumed_offset writes. | ||
| 583 | */ | ||
| 584 | __u64 offset; | ||
| 585 | }; | ||
| 586 | |||
| 587 | struct drm_i915_gem_execbuffer { | ||
| 588 | /** | ||
| 589 | * List of buffers to be validated with their relocations to be | ||
| 590 | * performend on them. | ||
| 591 | * | ||
| 592 | * This is a pointer to an array of struct drm_i915_gem_validate_entry. | ||
| 593 | * | ||
| 594 | * These buffers must be listed in an order such that all relocations | ||
| 595 | * a buffer is performing refer to buffers that have already appeared | ||
| 596 | * in the validate list. | ||
| 597 | */ | ||
| 598 | __u64 buffers_ptr; | ||
| 599 | __u32 buffer_count; | ||
| 600 | |||
| 601 | /** Offset in the batchbuffer to start execution from. */ | ||
| 602 | __u32 batch_start_offset; | ||
| 603 | /** Bytes used in batchbuffer from batch_start_offset */ | ||
| 604 | __u32 batch_len; | ||
| 605 | __u32 DR1; | ||
| 606 | __u32 DR4; | ||
| 607 | __u32 num_cliprects; | ||
| 608 | /** This is a struct drm_clip_rect *cliprects */ | ||
| 609 | __u64 cliprects_ptr; | ||
| 610 | }; | ||
| 611 | |||
| 612 | struct drm_i915_gem_exec_object2 { | ||
| 613 | /** | ||
| 614 | * User's handle for a buffer to be bound into the GTT for this | ||
| 615 | * operation. | ||
| 616 | */ | ||
| 617 | __u32 handle; | ||
| 618 | |||
| 619 | /** Number of relocations to be performed on this buffer */ | ||
| 620 | __u32 relocation_count; | ||
| 621 | /** | ||
| 622 | * Pointer to array of struct drm_i915_gem_relocation_entry containing | ||
| 623 | * the relocations to be performed in this buffer. | ||
| 624 | */ | ||
| 625 | __u64 relocs_ptr; | ||
| 626 | |||
| 627 | /** Required alignment in graphics aperture */ | ||
| 628 | __u64 alignment; | ||
| 629 | |||
| 630 | /** | ||
| 631 | * Returned value of the updated offset of the object, for future | ||
| 632 | * presumed_offset writes. | ||
| 633 | */ | ||
| 634 | __u64 offset; | ||
| 635 | |||
| 636 | #define EXEC_OBJECT_NEEDS_FENCE (1<<0) | ||
| 637 | __u64 flags; | ||
| 638 | __u64 rsvd1; | ||
| 639 | __u64 rsvd2; | ||
| 640 | }; | ||
| 641 | |||
| 642 | struct drm_i915_gem_execbuffer2 { | ||
| 643 | /** | ||
| 644 | * List of gem_exec_object2 structs | ||
| 645 | */ | ||
| 646 | __u64 buffers_ptr; | ||
| 647 | __u32 buffer_count; | ||
| 648 | |||
| 649 | /** Offset in the batchbuffer to start execution from. */ | ||
| 650 | __u32 batch_start_offset; | ||
| 651 | /** Bytes used in batchbuffer from batch_start_offset */ | ||
| 652 | __u32 batch_len; | ||
| 653 | __u32 DR1; | ||
| 654 | __u32 DR4; | ||
| 655 | __u32 num_cliprects; | ||
| 656 | /** This is a struct drm_clip_rect *cliprects */ | ||
| 657 | __u64 cliprects_ptr; | ||
| 658 | #define I915_EXEC_RING_MASK (7<<0) | ||
| 659 | #define I915_EXEC_DEFAULT (0<<0) | ||
| 660 | #define I915_EXEC_RENDER (1<<0) | ||
| 661 | #define I915_EXEC_BSD (2<<0) | ||
| 662 | #define I915_EXEC_BLT (3<<0) | ||
| 663 | |||
| 664 | /* Used for switching the constants addressing mode on gen4+ RENDER ring. | ||
| 665 | * Gen6+ only supports relative addressing to dynamic state (default) and | ||
| 666 | * absolute addressing. | ||
| 667 | * | ||
| 668 | * These flags are ignored for the BSD and BLT rings. | ||
| 669 | */ | ||
| 670 | #define I915_EXEC_CONSTANTS_MASK (3<<6) | ||
| 671 | #define I915_EXEC_CONSTANTS_REL_GENERAL (0<<6) /* default */ | ||
| 672 | #define I915_EXEC_CONSTANTS_ABSOLUTE (1<<6) | ||
| 673 | #define I915_EXEC_CONSTANTS_REL_SURFACE (2<<6) /* gen4/5 only */ | ||
| 674 | __u64 flags; | ||
| 675 | __u64 rsvd1; /* now used for context info */ | ||
| 676 | __u64 rsvd2; | ||
| 677 | }; | ||
| 678 | |||
| 679 | /** Resets the SO write offset registers for transform feedback on gen7. */ | ||
| 680 | #define I915_EXEC_GEN7_SOL_RESET (1<<8) | ||
| 681 | |||
| 682 | #define I915_EXEC_CONTEXT_ID_MASK (0xffffffff) | ||
| 683 | #define i915_execbuffer2_set_context_id(eb2, context) \ | ||
| 684 | (eb2).rsvd1 = context & I915_EXEC_CONTEXT_ID_MASK | ||
| 685 | #define i915_execbuffer2_get_context_id(eb2) \ | ||
| 686 | ((eb2).rsvd1 & I915_EXEC_CONTEXT_ID_MASK) | ||
| 687 | |||
| 688 | struct drm_i915_gem_pin { | ||
| 689 | /** Handle of the buffer to be pinned. */ | ||
| 690 | __u32 handle; | ||
| 691 | __u32 pad; | ||
| 692 | |||
| 693 | /** alignment required within the aperture */ | ||
| 694 | __u64 alignment; | ||
| 695 | |||
| 696 | /** Returned GTT offset of the buffer. */ | ||
| 697 | __u64 offset; | ||
| 698 | }; | ||
| 699 | |||
| 700 | struct drm_i915_gem_unpin { | ||
| 701 | /** Handle of the buffer to be unpinned. */ | ||
| 702 | __u32 handle; | ||
| 703 | __u32 pad; | ||
| 704 | }; | ||
| 705 | |||
| 706 | struct drm_i915_gem_busy { | ||
| 707 | /** Handle of the buffer to check for busy */ | ||
| 708 | __u32 handle; | ||
| 709 | |||
| 710 | /** Return busy status (1 if busy, 0 if idle). | ||
| 711 | * The high word is used to indicate on which rings the object | ||
| 712 | * currently resides: | ||
| 713 | * 16:31 - busy (r or r/w) rings (16 render, 17 bsd, 18 blt, etc) | ||
| 714 | */ | ||
| 715 | __u32 busy; | ||
| 716 | }; | ||
| 717 | |||
| 718 | #define I915_CACHING_NONE 0 | ||
| 719 | #define I915_CACHING_CACHED 1 | ||
| 720 | |||
| 721 | struct drm_i915_gem_caching { | ||
| 722 | /** | ||
| 723 | * Handle of the buffer to set/get the caching level of. */ | ||
| 724 | __u32 handle; | ||
| 725 | |||
| 726 | /** | ||
| 727 | * Cacheing level to apply or return value | ||
| 728 | * | ||
| 729 | * bits0-15 are for generic caching control (i.e. the above defined | ||
| 730 | * values). bits16-31 are reserved for platform-specific variations | ||
| 731 | * (e.g. l3$ caching on gen7). */ | ||
| 732 | __u32 caching; | ||
| 733 | }; | ||
| 734 | |||
| 735 | #define I915_TILING_NONE 0 | ||
| 736 | #define I915_TILING_X 1 | ||
| 737 | #define I915_TILING_Y 2 | ||
| 738 | |||
| 739 | #define I915_BIT_6_SWIZZLE_NONE 0 | ||
| 740 | #define I915_BIT_6_SWIZZLE_9 1 | ||
| 741 | #define I915_BIT_6_SWIZZLE_9_10 2 | ||
| 742 | #define I915_BIT_6_SWIZZLE_9_11 3 | ||
| 743 | #define I915_BIT_6_SWIZZLE_9_10_11 4 | ||
| 744 | /* Not seen by userland */ | ||
| 745 | #define I915_BIT_6_SWIZZLE_UNKNOWN 5 | ||
| 746 | /* Seen by userland. */ | ||
| 747 | #define I915_BIT_6_SWIZZLE_9_17 6 | ||
| 748 | #define I915_BIT_6_SWIZZLE_9_10_17 7 | ||
| 749 | |||
| 750 | struct drm_i915_gem_set_tiling { | ||
| 751 | /** Handle of the buffer to have its tiling state updated */ | ||
| 752 | __u32 handle; | ||
| 753 | |||
| 754 | /** | ||
| 755 | * Tiling mode for the object (I915_TILING_NONE, I915_TILING_X, | ||
| 756 | * I915_TILING_Y). | ||
| 757 | * | ||
| 758 | * This value is to be set on request, and will be updated by the | ||
| 759 | * kernel on successful return with the actual chosen tiling layout. | ||
| 760 | * | ||
| 761 | * The tiling mode may be demoted to I915_TILING_NONE when the system | ||
| 762 | * has bit 6 swizzling that can't be managed correctly by GEM. | ||
| 763 | * | ||
| 764 | * Buffer contents become undefined when changing tiling_mode. | ||
| 765 | */ | ||
| 766 | __u32 tiling_mode; | ||
| 767 | |||
| 768 | /** | ||
| 769 | * Stride in bytes for the object when in I915_TILING_X or | ||
| 770 | * I915_TILING_Y. | ||
| 771 | */ | ||
| 772 | __u32 stride; | ||
| 773 | |||
| 774 | /** | ||
| 775 | * Returned address bit 6 swizzling required for CPU access through | ||
| 776 | * mmap mapping. | ||
| 777 | */ | ||
| 778 | __u32 swizzle_mode; | ||
| 779 | }; | ||
| 780 | |||
| 781 | struct drm_i915_gem_get_tiling { | ||
| 782 | /** Handle of the buffer to get tiling state for. */ | ||
| 783 | __u32 handle; | ||
| 784 | |||
| 785 | /** | ||
| 786 | * Current tiling mode for the object (I915_TILING_NONE, I915_TILING_X, | ||
| 787 | * I915_TILING_Y). | ||
| 788 | */ | ||
| 789 | __u32 tiling_mode; | ||
| 790 | |||
| 791 | /** | ||
| 792 | * Returned address bit 6 swizzling required for CPU access through | ||
| 793 | * mmap mapping. | ||
| 794 | */ | ||
| 795 | __u32 swizzle_mode; | ||
| 796 | }; | ||
| 797 | |||
| 798 | struct drm_i915_gem_get_aperture { | ||
| 799 | /** Total size of the aperture used by i915_gem_execbuffer, in bytes */ | ||
| 800 | __u64 aper_size; | ||
| 801 | |||
| 802 | /** | ||
| 803 | * Available space in the aperture used by i915_gem_execbuffer, in | ||
| 804 | * bytes | ||
| 805 | */ | ||
| 806 | __u64 aper_available_size; | ||
| 807 | }; | ||
| 808 | |||
| 809 | struct drm_i915_get_pipe_from_crtc_id { | ||
| 810 | /** ID of CRTC being requested **/ | ||
| 811 | __u32 crtc_id; | ||
| 812 | |||
| 813 | /** pipe of requested CRTC **/ | ||
| 814 | __u32 pipe; | ||
| 815 | }; | ||
| 816 | |||
| 817 | #define I915_MADV_WILLNEED 0 | ||
| 818 | #define I915_MADV_DONTNEED 1 | ||
| 819 | #define __I915_MADV_PURGED 2 /* internal state */ | ||
| 820 | |||
| 821 | struct drm_i915_gem_madvise { | ||
| 822 | /** Handle of the buffer to change the backing store advice */ | ||
| 823 | __u32 handle; | ||
| 824 | |||
| 825 | /* Advice: either the buffer will be needed again in the near future, | ||
| 826 | * or wont be and could be discarded under memory pressure. | ||
| 827 | */ | ||
| 828 | __u32 madv; | ||
| 829 | |||
| 830 | /** Whether the backing store still exists. */ | ||
| 831 | __u32 retained; | ||
| 832 | }; | ||
| 833 | |||
| 834 | /* flags */ | ||
| 835 | #define I915_OVERLAY_TYPE_MASK 0xff | ||
| 836 | #define I915_OVERLAY_YUV_PLANAR 0x01 | ||
| 837 | #define I915_OVERLAY_YUV_PACKED 0x02 | ||
| 838 | #define I915_OVERLAY_RGB 0x03 | ||
| 839 | |||
| 840 | #define I915_OVERLAY_DEPTH_MASK 0xff00 | ||
| 841 | #define I915_OVERLAY_RGB24 0x1000 | ||
| 842 | #define I915_OVERLAY_RGB16 0x2000 | ||
| 843 | #define I915_OVERLAY_RGB15 0x3000 | ||
| 844 | #define I915_OVERLAY_YUV422 0x0100 | ||
| 845 | #define I915_OVERLAY_YUV411 0x0200 | ||
| 846 | #define I915_OVERLAY_YUV420 0x0300 | ||
| 847 | #define I915_OVERLAY_YUV410 0x0400 | ||
| 848 | |||
| 849 | #define I915_OVERLAY_SWAP_MASK 0xff0000 | ||
| 850 | #define I915_OVERLAY_NO_SWAP 0x000000 | ||
| 851 | #define I915_OVERLAY_UV_SWAP 0x010000 | ||
| 852 | #define I915_OVERLAY_Y_SWAP 0x020000 | ||
| 853 | #define I915_OVERLAY_Y_AND_UV_SWAP 0x030000 | ||
| 854 | |||
| 855 | #define I915_OVERLAY_FLAGS_MASK 0xff000000 | ||
| 856 | #define I915_OVERLAY_ENABLE 0x01000000 | ||
| 857 | |||
| 858 | struct drm_intel_overlay_put_image { | ||
| 859 | /* various flags and src format description */ | ||
| 860 | __u32 flags; | ||
| 861 | /* source picture description */ | ||
| 862 | __u32 bo_handle; | ||
| 863 | /* stride values and offsets are in bytes, buffer relative */ | ||
| 864 | __u16 stride_Y; /* stride for packed formats */ | ||
| 865 | __u16 stride_UV; | ||
| 866 | __u32 offset_Y; /* offset for packet formats */ | ||
| 867 | __u32 offset_U; | ||
| 868 | __u32 offset_V; | ||
| 869 | /* in pixels */ | ||
| 870 | __u16 src_width; | ||
| 871 | __u16 src_height; | ||
| 872 | /* to compensate the scaling factors for partially covered surfaces */ | ||
| 873 | __u16 src_scan_width; | ||
| 874 | __u16 src_scan_height; | ||
| 875 | /* output crtc description */ | ||
| 876 | __u32 crtc_id; | ||
| 877 | __u16 dst_x; | ||
| 878 | __u16 dst_y; | ||
| 879 | __u16 dst_width; | ||
| 880 | __u16 dst_height; | ||
| 881 | }; | ||
| 882 | |||
| 883 | /* flags */ | ||
| 884 | #define I915_OVERLAY_UPDATE_ATTRS (1<<0) | ||
| 885 | #define I915_OVERLAY_UPDATE_GAMMA (1<<1) | ||
| 886 | struct drm_intel_overlay_attrs { | ||
| 887 | __u32 flags; | ||
| 888 | __u32 color_key; | ||
| 889 | __s32 brightness; | ||
| 890 | __u32 contrast; | ||
| 891 | __u32 saturation; | ||
| 892 | __u32 gamma0; | ||
| 893 | __u32 gamma1; | ||
| 894 | __u32 gamma2; | ||
| 895 | __u32 gamma3; | ||
| 896 | __u32 gamma4; | ||
| 897 | __u32 gamma5; | ||
| 898 | }; | ||
| 899 | |||
| 900 | /* | ||
| 901 | * Intel sprite handling | ||
| 902 | * | ||
| 903 | * Color keying works with a min/mask/max tuple. Both source and destination | ||
| 904 | * color keying is allowed. | ||
| 905 | * | ||
| 906 | * Source keying: | ||
| 907 | * Sprite pixels within the min & max values, masked against the color channels | ||
| 908 | * specified in the mask field, will be transparent. All other pixels will | ||
| 909 | * be displayed on top of the primary plane. For RGB surfaces, only the min | ||
| 910 | * and mask fields will be used; ranged compares are not allowed. | ||
| 911 | * | ||
| 912 | * Destination keying: | ||
| 913 | * Primary plane pixels that match the min value, masked against the color | ||
| 914 | * channels specified in the mask field, will be replaced by corresponding | ||
| 915 | * pixels from the sprite plane. | ||
| 916 | * | ||
| 917 | * Note that source & destination keying are exclusive; only one can be | ||
| 918 | * active on a given plane. | ||
| 919 | */ | ||
| 920 | |||
| 921 | #define I915_SET_COLORKEY_NONE (1<<0) /* disable color key matching */ | ||
| 922 | #define I915_SET_COLORKEY_DESTINATION (1<<1) | ||
| 923 | #define I915_SET_COLORKEY_SOURCE (1<<2) | ||
| 924 | struct drm_intel_sprite_colorkey { | ||
| 925 | __u32 plane_id; | ||
| 926 | __u32 min_value; | ||
| 927 | __u32 channel_mask; | ||
| 928 | __u32 max_value; | ||
| 929 | __u32 flags; | ||
| 930 | }; | ||
| 931 | |||
| 932 | struct drm_i915_gem_wait { | ||
| 933 | /** Handle of BO we shall wait on */ | ||
| 934 | __u32 bo_handle; | ||
| 935 | __u32 flags; | ||
| 936 | /** Number of nanoseconds to wait, Returns time remaining. */ | ||
| 937 | __s64 timeout_ns; | ||
| 938 | }; | ||
| 939 | |||
| 940 | struct drm_i915_gem_context_create { | ||
| 941 | /* output: id of new context*/ | ||
| 942 | __u32 ctx_id; | ||
| 943 | __u32 pad; | ||
| 944 | }; | ||
| 945 | |||
| 946 | struct drm_i915_gem_context_destroy { | ||
| 947 | __u32 ctx_id; | ||
| 948 | __u32 pad; | ||
| 949 | }; | ||
| 950 | |||
| 951 | struct drm_i915_reg_read { | ||
| 952 | __u64 offset; | ||
| 953 | __u64 val; /* Return value */ | ||
| 954 | }; | ||
| 955 | #endif /* _I915_DRM_H_ */ | 37 | #endif /* _I915_DRM_H_ */ |
diff --git a/include/media/s5p_hdmi.h b/include/media/s5p_hdmi.h index 361a751f73af..181642b8d0a5 100644 --- a/include/media/s5p_hdmi.h +++ b/include/media/s5p_hdmi.h | |||
| @@ -20,6 +20,7 @@ struct i2c_board_info; | |||
| 20 | * @hdmiphy_info: template for HDMIPHY I2C device | 20 | * @hdmiphy_info: template for HDMIPHY I2C device |
| 21 | * @mhl_bus: controller id for MHL control bus | 21 | * @mhl_bus: controller id for MHL control bus |
| 22 | * @mhl_info: template for MHL I2C device | 22 | * @mhl_info: template for MHL I2C device |
| 23 | * @hpd_gpio: GPIO for Hot-Plug-Detect pin | ||
| 23 | * | 24 | * |
| 24 | * NULL pointer for *_info fields indicates that | 25 | * NULL pointer for *_info fields indicates that |
| 25 | * the corresponding chip is not present | 26 | * the corresponding chip is not present |
| @@ -29,6 +30,7 @@ struct s5p_hdmi_platform_data { | |||
| 29 | struct i2c_board_info *hdmiphy_info; | 30 | struct i2c_board_info *hdmiphy_info; |
| 30 | int mhl_bus; | 31 | int mhl_bus; |
| 31 | struct i2c_board_info *mhl_info; | 32 | struct i2c_board_info *mhl_info; |
| 33 | int hpd_gpio; | ||
| 32 | }; | 34 | }; |
| 33 | 35 | ||
| 34 | #endif /* S5P_HDMI_H */ | 36 | #endif /* S5P_HDMI_H */ |
diff --git a/include/uapi/drm/Kbuild b/include/uapi/drm/Kbuild index aafaa5aa54d4..ba99ce3f7372 100644 --- a/include/uapi/drm/Kbuild +++ b/include/uapi/drm/Kbuild | |||
| @@ -1 +1,16 @@ | |||
| 1 | # UAPI Header export list | 1 | # UAPI Header export list |
| 2 | header-y += drm.h | ||
| 3 | header-y += drm_fourcc.h | ||
| 4 | header-y += drm_mode.h | ||
| 5 | header-y += drm_sarea.h | ||
| 6 | header-y += exynos_drm.h | ||
| 7 | header-y += i810_drm.h | ||
| 8 | header-y += i915_drm.h | ||
| 9 | header-y += mga_drm.h | ||
| 10 | header-y += nouveau_drm.h | ||
| 11 | header-y += r128_drm.h | ||
| 12 | header-y += radeon_drm.h | ||
| 13 | header-y += savage_drm.h | ||
| 14 | header-y += sis_drm.h | ||
| 15 | header-y += via_drm.h | ||
| 16 | header-y += vmwgfx_drm.h | ||
diff --git a/include/drm/drm.h b/include/uapi/drm/drm.h index 1e3481edf062..1e3481edf062 100644 --- a/include/drm/drm.h +++ b/include/uapi/drm/drm.h | |||
diff --git a/include/drm/drm_fourcc.h b/include/uapi/drm/drm_fourcc.h index 646ae5f39f42..646ae5f39f42 100644 --- a/include/drm/drm_fourcc.h +++ b/include/uapi/drm/drm_fourcc.h | |||
diff --git a/include/drm/drm_mode.h b/include/uapi/drm/drm_mode.h index 3d6301b6ec16..3d6301b6ec16 100644 --- a/include/drm/drm_mode.h +++ b/include/uapi/drm/drm_mode.h | |||
diff --git a/include/drm/drm_sarea.h b/include/uapi/drm/drm_sarea.h index 413a5642d49f..413a5642d49f 100644 --- a/include/drm/drm_sarea.h +++ b/include/uapi/drm/drm_sarea.h | |||
diff --git a/include/uapi/drm/exynos_drm.h b/include/uapi/drm/exynos_drm.h new file mode 100644 index 000000000000..c0494d586e23 --- /dev/null +++ b/include/uapi/drm/exynos_drm.h | |||
| @@ -0,0 +1,203 @@ | |||
| 1 | /* exynos_drm.h | ||
| 2 | * | ||
| 3 | * Copyright (c) 2011 Samsung Electronics Co., Ltd. | ||
| 4 | * Authors: | ||
| 5 | * Inki Dae <inki.dae@samsung.com> | ||
| 6 | * Joonyoung Shim <jy0922.shim@samsung.com> | ||
| 7 | * Seung-Woo Kim <sw0312.kim@samsung.com> | ||
| 8 | * | ||
| 9 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
| 10 | * copy of this software and associated documentation files (the "Software"), | ||
| 11 | * to deal in the Software without restriction, including without limitation | ||
| 12 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
| 13 | * and/or sell copies of the Software, and to permit persons to whom the | ||
| 14 | * Software is furnished to do so, subject to the following conditions: | ||
| 15 | * | ||
| 16 | * The above copyright notice and this permission notice (including the next | ||
| 17 | * paragraph) shall be included in all copies or substantial portions of the | ||
| 18 | * Software. | ||
| 19 | * | ||
| 20 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
| 21 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
| 22 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
| 23 | * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
| 24 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
| 25 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
| 26 | * OTHER DEALINGS IN THE SOFTWARE. | ||
| 27 | */ | ||
| 28 | |||
| 29 | #ifndef _UAPI_EXYNOS_DRM_H_ | ||
| 30 | #define _UAPI_EXYNOS_DRM_H_ | ||
| 31 | |||
| 32 | #include <drm/drm.h> | ||
| 33 | |||
| 34 | /** | ||
| 35 | * User-desired buffer creation information structure. | ||
| 36 | * | ||
| 37 | * @size: user-desired memory allocation size. | ||
| 38 | * - this size value would be page-aligned internally. | ||
| 39 | * @flags: user request for setting memory type or cache attributes. | ||
| 40 | * @handle: returned a handle to created gem object. | ||
| 41 | * - this handle will be set by gem module of kernel side. | ||
| 42 | */ | ||
| 43 | struct drm_exynos_gem_create { | ||
| 44 | uint64_t size; | ||
| 45 | unsigned int flags; | ||
| 46 | unsigned int handle; | ||
| 47 | }; | ||
| 48 | |||
| 49 | /** | ||
| 50 | * A structure for getting buffer offset. | ||
| 51 | * | ||
| 52 | * @handle: a pointer to gem object created. | ||
| 53 | * @pad: just padding to be 64-bit aligned. | ||
| 54 | * @offset: relatived offset value of the memory region allocated. | ||
| 55 | * - this value should be set by user. | ||
| 56 | */ | ||
| 57 | struct drm_exynos_gem_map_off { | ||
| 58 | unsigned int handle; | ||
| 59 | unsigned int pad; | ||
| 60 | uint64_t offset; | ||
| 61 | }; | ||
| 62 | |||
| 63 | /** | ||
| 64 | * A structure for mapping buffer. | ||
| 65 | * | ||
| 66 | * @handle: a handle to gem object created. | ||
| 67 | * @pad: just padding to be 64-bit aligned. | ||
| 68 | * @size: memory size to be mapped. | ||
| 69 | * @mapped: having user virtual address mmaped. | ||
| 70 | * - this variable would be filled by exynos gem module | ||
| 71 | * of kernel side with user virtual address which is allocated | ||
| 72 | * by do_mmap(). | ||
| 73 | */ | ||
| 74 | struct drm_exynos_gem_mmap { | ||
| 75 | unsigned int handle; | ||
| 76 | unsigned int pad; | ||
| 77 | uint64_t size; | ||
| 78 | uint64_t mapped; | ||
| 79 | }; | ||
| 80 | |||
| 81 | /** | ||
| 82 | * A structure to gem information. | ||
| 83 | * | ||
| 84 | * @handle: a handle to gem object created. | ||
| 85 | * @flags: flag value including memory type and cache attribute and | ||
| 86 | * this value would be set by driver. | ||
| 87 | * @size: size to memory region allocated by gem and this size would | ||
| 88 | * be set by driver. | ||
| 89 | */ | ||
| 90 | struct drm_exynos_gem_info { | ||
| 91 | unsigned int handle; | ||
| 92 | unsigned int flags; | ||
| 93 | uint64_t size; | ||
| 94 | }; | ||
| 95 | |||
| 96 | /** | ||
| 97 | * A structure for user connection request of virtual display. | ||
| 98 | * | ||
| 99 | * @connection: indicate whether doing connetion or not by user. | ||
| 100 | * @extensions: if this value is 1 then the vidi driver would need additional | ||
| 101 | * 128bytes edid data. | ||
| 102 | * @edid: the edid data pointer from user side. | ||
| 103 | */ | ||
| 104 | struct drm_exynos_vidi_connection { | ||
| 105 | unsigned int connection; | ||
| 106 | unsigned int extensions; | ||
| 107 | uint64_t edid; | ||
| 108 | }; | ||
| 109 | |||
| 110 | /* memory type definitions. */ | ||
| 111 | enum e_drm_exynos_gem_mem_type { | ||
| 112 | /* Physically Continuous memory and used as default. */ | ||
| 113 | EXYNOS_BO_CONTIG = 0 << 0, | ||
| 114 | /* Physically Non-Continuous memory. */ | ||
| 115 | EXYNOS_BO_NONCONTIG = 1 << 0, | ||
| 116 | /* non-cachable mapping and used as default. */ | ||
| 117 | EXYNOS_BO_NONCACHABLE = 0 << 1, | ||
| 118 | /* cachable mapping. */ | ||
| 119 | EXYNOS_BO_CACHABLE = 1 << 1, | ||
| 120 | /* write-combine mapping. */ | ||
| 121 | EXYNOS_BO_WC = 1 << 2, | ||
| 122 | EXYNOS_BO_MASK = EXYNOS_BO_NONCONTIG | EXYNOS_BO_CACHABLE | | ||
| 123 | EXYNOS_BO_WC | ||
| 124 | }; | ||
| 125 | |||
| 126 | struct drm_exynos_g2d_get_ver { | ||
| 127 | __u32 major; | ||
| 128 | __u32 minor; | ||
| 129 | }; | ||
| 130 | |||
| 131 | struct drm_exynos_g2d_cmd { | ||
| 132 | __u32 offset; | ||
| 133 | __u32 data; | ||
| 134 | }; | ||
| 135 | |||
| 136 | enum drm_exynos_g2d_event_type { | ||
| 137 | G2D_EVENT_NOT, | ||
| 138 | G2D_EVENT_NONSTOP, | ||
| 139 | G2D_EVENT_STOP, /* not yet */ | ||
| 140 | }; | ||
| 141 | |||
| 142 | struct drm_exynos_g2d_set_cmdlist { | ||
| 143 | __u64 cmd; | ||
| 144 | __u64 cmd_gem; | ||
| 145 | __u32 cmd_nr; | ||
| 146 | __u32 cmd_gem_nr; | ||
| 147 | |||
| 148 | /* for g2d event */ | ||
| 149 | __u64 event_type; | ||
| 150 | __u64 user_data; | ||
| 151 | }; | ||
| 152 | |||
| 153 | struct drm_exynos_g2d_exec { | ||
| 154 | __u64 async; | ||
| 155 | }; | ||
| 156 | |||
| 157 | #define DRM_EXYNOS_GEM_CREATE 0x00 | ||
| 158 | #define DRM_EXYNOS_GEM_MAP_OFFSET 0x01 | ||
| 159 | #define DRM_EXYNOS_GEM_MMAP 0x02 | ||
| 160 | /* Reserved 0x03 ~ 0x05 for exynos specific gem ioctl */ | ||
| 161 | #define DRM_EXYNOS_GEM_GET 0x04 | ||
| 162 | #define DRM_EXYNOS_VIDI_CONNECTION 0x07 | ||
| 163 | |||
| 164 | /* G2D */ | ||
| 165 | #define DRM_EXYNOS_G2D_GET_VER 0x20 | ||
| 166 | #define DRM_EXYNOS_G2D_SET_CMDLIST 0x21 | ||
| 167 | #define DRM_EXYNOS_G2D_EXEC 0x22 | ||
| 168 | |||
| 169 | #define DRM_IOCTL_EXYNOS_GEM_CREATE DRM_IOWR(DRM_COMMAND_BASE + \ | ||
| 170 | DRM_EXYNOS_GEM_CREATE, struct drm_exynos_gem_create) | ||
| 171 | |||
| 172 | #define DRM_IOCTL_EXYNOS_GEM_MAP_OFFSET DRM_IOWR(DRM_COMMAND_BASE + \ | ||
| 173 | DRM_EXYNOS_GEM_MAP_OFFSET, struct drm_exynos_gem_map_off) | ||
| 174 | |||
| 175 | #define DRM_IOCTL_EXYNOS_GEM_MMAP DRM_IOWR(DRM_COMMAND_BASE + \ | ||
| 176 | DRM_EXYNOS_GEM_MMAP, struct drm_exynos_gem_mmap) | ||
| 177 | |||
| 178 | #define DRM_IOCTL_EXYNOS_GEM_GET DRM_IOWR(DRM_COMMAND_BASE + \ | ||
| 179 | DRM_EXYNOS_GEM_GET, struct drm_exynos_gem_info) | ||
| 180 | |||
| 181 | #define DRM_IOCTL_EXYNOS_VIDI_CONNECTION DRM_IOWR(DRM_COMMAND_BASE + \ | ||
| 182 | DRM_EXYNOS_VIDI_CONNECTION, struct drm_exynos_vidi_connection) | ||
| 183 | |||
| 184 | #define DRM_IOCTL_EXYNOS_G2D_GET_VER DRM_IOWR(DRM_COMMAND_BASE + \ | ||
| 185 | DRM_EXYNOS_G2D_GET_VER, struct drm_exynos_g2d_get_ver) | ||
| 186 | #define DRM_IOCTL_EXYNOS_G2D_SET_CMDLIST DRM_IOWR(DRM_COMMAND_BASE + \ | ||
| 187 | DRM_EXYNOS_G2D_SET_CMDLIST, struct drm_exynos_g2d_set_cmdlist) | ||
| 188 | #define DRM_IOCTL_EXYNOS_G2D_EXEC DRM_IOWR(DRM_COMMAND_BASE + \ | ||
| 189 | DRM_EXYNOS_G2D_EXEC, struct drm_exynos_g2d_exec) | ||
| 190 | |||
| 191 | /* EXYNOS specific events */ | ||
| 192 | #define DRM_EXYNOS_G2D_EVENT 0x80000000 | ||
| 193 | |||
| 194 | struct drm_exynos_g2d_event { | ||
| 195 | struct drm_event base; | ||
| 196 | __u64 user_data; | ||
| 197 | __u32 tv_sec; | ||
| 198 | __u32 tv_usec; | ||
| 199 | __u32 cmdlist_no; | ||
| 200 | __u32 reserved; | ||
| 201 | }; | ||
| 202 | |||
| 203 | #endif /* _UAPI_EXYNOS_DRM_H_ */ | ||
diff --git a/include/drm/i810_drm.h b/include/uapi/drm/i810_drm.h index 7a10bb6f2c0f..7a10bb6f2c0f 100644 --- a/include/drm/i810_drm.h +++ b/include/uapi/drm/i810_drm.h | |||
diff --git a/include/uapi/drm/i915_drm.h b/include/uapi/drm/i915_drm.h new file mode 100644 index 000000000000..4322b1e7d2ed --- /dev/null +++ b/include/uapi/drm/i915_drm.h | |||
| @@ -0,0 +1,947 @@ | |||
| 1 | /* | ||
| 2 | * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas. | ||
| 3 | * All Rights Reserved. | ||
| 4 | * | ||
| 5 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
| 6 | * copy of this software and associated documentation files (the | ||
| 7 | * "Software"), to deal in the Software without restriction, including | ||
| 8 | * without limitation the rights to use, copy, modify, merge, publish, | ||
| 9 | * distribute, sub license, and/or sell copies of the Software, and to | ||
| 10 | * permit persons to whom the Software is furnished to do so, subject to | ||
| 11 | * the following conditions: | ||
| 12 | * | ||
| 13 | * The above copyright notice and this permission notice (including the | ||
| 14 | * next paragraph) shall be included in all copies or substantial portions | ||
| 15 | * of the Software. | ||
| 16 | * | ||
| 17 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS | ||
| 18 | * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
| 19 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. | ||
| 20 | * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR | ||
| 21 | * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, | ||
| 22 | * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE | ||
| 23 | * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. | ||
| 24 | * | ||
| 25 | */ | ||
| 26 | |||
| 27 | #ifndef _UAPI_I915_DRM_H_ | ||
| 28 | #define _UAPI_I915_DRM_H_ | ||
| 29 | |||
| 30 | #include <drm/drm.h> | ||
| 31 | |||
| 32 | /* Please note that modifications to all structs defined here are | ||
| 33 | * subject to backwards-compatibility constraints. | ||
| 34 | */ | ||
| 35 | |||
| 36 | |||
| 37 | /* Each region is a minimum of 16k, and there are at most 255 of them. | ||
| 38 | */ | ||
| 39 | #define I915_NR_TEX_REGIONS 255 /* table size 2k - maximum due to use | ||
| 40 | * of chars for next/prev indices */ | ||
| 41 | #define I915_LOG_MIN_TEX_REGION_SIZE 14 | ||
| 42 | |||
| 43 | typedef struct _drm_i915_init { | ||
| 44 | enum { | ||
| 45 | I915_INIT_DMA = 0x01, | ||
| 46 | I915_CLEANUP_DMA = 0x02, | ||
| 47 | I915_RESUME_DMA = 0x03 | ||
| 48 | } func; | ||
| 49 | unsigned int mmio_offset; | ||
| 50 | int sarea_priv_offset; | ||
| 51 | unsigned int ring_start; | ||
| 52 | unsigned int ring_end; | ||
| 53 | unsigned int ring_size; | ||
| 54 | unsigned int front_offset; | ||
| 55 | unsigned int back_offset; | ||
| 56 | unsigned int depth_offset; | ||
| 57 | unsigned int w; | ||
| 58 | unsigned int h; | ||
| 59 | unsigned int pitch; | ||
| 60 | unsigned int pitch_bits; | ||
| 61 | unsigned int back_pitch; | ||
| 62 | unsigned int depth_pitch; | ||
| 63 | unsigned int cpp; | ||
| 64 | unsigned int chipset; | ||
| 65 | } drm_i915_init_t; | ||
| 66 | |||
| 67 | typedef struct _drm_i915_sarea { | ||
| 68 | struct drm_tex_region texList[I915_NR_TEX_REGIONS + 1]; | ||
| 69 | int last_upload; /* last time texture was uploaded */ | ||
| 70 | int last_enqueue; /* last time a buffer was enqueued */ | ||
| 71 | int last_dispatch; /* age of the most recently dispatched buffer */ | ||
| 72 | int ctxOwner; /* last context to upload state */ | ||
| 73 | int texAge; | ||
| 74 | int pf_enabled; /* is pageflipping allowed? */ | ||
| 75 | int pf_active; | ||
| 76 | int pf_current_page; /* which buffer is being displayed? */ | ||
| 77 | int perf_boxes; /* performance boxes to be displayed */ | ||
| 78 | int width, height; /* screen size in pixels */ | ||
| 79 | |||
| 80 | drm_handle_t front_handle; | ||
| 81 | int front_offset; | ||
| 82 | int front_size; | ||
| 83 | |||
| 84 | drm_handle_t back_handle; | ||
| 85 | int back_offset; | ||
| 86 | int back_size; | ||
| 87 | |||
| 88 | drm_handle_t depth_handle; | ||
| 89 | int depth_offset; | ||
| 90 | int depth_size; | ||
| 91 | |||
| 92 | drm_handle_t tex_handle; | ||
| 93 | int tex_offset; | ||
| 94 | int tex_size; | ||
| 95 | int log_tex_granularity; | ||
| 96 | int pitch; | ||
| 97 | int rotation; /* 0, 90, 180 or 270 */ | ||
| 98 | int rotated_offset; | ||
| 99 | int rotated_size; | ||
| 100 | int rotated_pitch; | ||
| 101 | int virtualX, virtualY; | ||
| 102 | |||
| 103 | unsigned int front_tiled; | ||
| 104 | unsigned int back_tiled; | ||
| 105 | unsigned int depth_tiled; | ||
| 106 | unsigned int rotated_tiled; | ||
| 107 | unsigned int rotated2_tiled; | ||
| 108 | |||
| 109 | int pipeA_x; | ||
| 110 | int pipeA_y; | ||
| 111 | int pipeA_w; | ||
| 112 | int pipeA_h; | ||
| 113 | int pipeB_x; | ||
| 114 | int pipeB_y; | ||
| 115 | int pipeB_w; | ||
| 116 | int pipeB_h; | ||
| 117 | |||
| 118 | /* fill out some space for old userspace triple buffer */ | ||
| 119 | drm_handle_t unused_handle; | ||
| 120 | __u32 unused1, unused2, unused3; | ||
| 121 | |||
| 122 | /* buffer object handles for static buffers. May change | ||
| 123 | * over the lifetime of the client. | ||
| 124 | */ | ||
| 125 | __u32 front_bo_handle; | ||
| 126 | __u32 back_bo_handle; | ||
| 127 | __u32 unused_bo_handle; | ||
| 128 | __u32 depth_bo_handle; | ||
| 129 | |||
| 130 | } drm_i915_sarea_t; | ||
| 131 | |||
| 132 | /* due to userspace building against these headers we need some compat here */ | ||
| 133 | #define planeA_x pipeA_x | ||
| 134 | #define planeA_y pipeA_y | ||
| 135 | #define planeA_w pipeA_w | ||
| 136 | #define planeA_h pipeA_h | ||
| 137 | #define planeB_x pipeB_x | ||
| 138 | #define planeB_y pipeB_y | ||
| 139 | #define planeB_w pipeB_w | ||
| 140 | #define planeB_h pipeB_h | ||
| 141 | |||
| 142 | /* Flags for perf_boxes | ||
| 143 | */ | ||
| 144 | #define I915_BOX_RING_EMPTY 0x1 | ||
| 145 | #define I915_BOX_FLIP 0x2 | ||
| 146 | #define I915_BOX_WAIT 0x4 | ||
| 147 | #define I915_BOX_TEXTURE_LOAD 0x8 | ||
| 148 | #define I915_BOX_LOST_CONTEXT 0x10 | ||
| 149 | |||
| 150 | /* I915 specific ioctls | ||
| 151 | * The device specific ioctl range is 0x40 to 0x79. | ||
| 152 | */ | ||
| 153 | #define DRM_I915_INIT 0x00 | ||
| 154 | #define DRM_I915_FLUSH 0x01 | ||
| 155 | #define DRM_I915_FLIP 0x02 | ||
| 156 | #define DRM_I915_BATCHBUFFER 0x03 | ||
| 157 | #define DRM_I915_IRQ_EMIT 0x04 | ||
| 158 | #define DRM_I915_IRQ_WAIT 0x05 | ||
| 159 | #define DRM_I915_GETPARAM 0x06 | ||
| 160 | #define DRM_I915_SETPARAM 0x07 | ||
| 161 | #define DRM_I915_ALLOC 0x08 | ||
| 162 | #define DRM_I915_FREE 0x09 | ||
| 163 | #define DRM_I915_INIT_HEAP 0x0a | ||
| 164 | #define DRM_I915_CMDBUFFER 0x0b | ||
| 165 | #define DRM_I915_DESTROY_HEAP 0x0c | ||
| 166 | #define DRM_I915_SET_VBLANK_PIPE 0x0d | ||
| 167 | #define DRM_I915_GET_VBLANK_PIPE 0x0e | ||
| 168 | #define DRM_I915_VBLANK_SWAP 0x0f | ||
| 169 | #define DRM_I915_HWS_ADDR 0x11 | ||
| 170 | #define DRM_I915_GEM_INIT 0x13 | ||
| 171 | #define DRM_I915_GEM_EXECBUFFER 0x14 | ||
| 172 | #define DRM_I915_GEM_PIN 0x15 | ||
| 173 | #define DRM_I915_GEM_UNPIN 0x16 | ||
| 174 | #define DRM_I915_GEM_BUSY 0x17 | ||
| 175 | #define DRM_I915_GEM_THROTTLE 0x18 | ||
| 176 | #define DRM_I915_GEM_ENTERVT 0x19 | ||
| 177 | #define DRM_I915_GEM_LEAVEVT 0x1a | ||
| 178 | #define DRM_I915_GEM_CREATE 0x1b | ||
| 179 | #define DRM_I915_GEM_PREAD 0x1c | ||
| 180 | #define DRM_I915_GEM_PWRITE 0x1d | ||
| 181 | #define DRM_I915_GEM_MMAP 0x1e | ||
| 182 | #define DRM_I915_GEM_SET_DOMAIN 0x1f | ||
| 183 | #define DRM_I915_GEM_SW_FINISH 0x20 | ||
| 184 | #define DRM_I915_GEM_SET_TILING 0x21 | ||
| 185 | #define DRM_I915_GEM_GET_TILING 0x22 | ||
| 186 | #define DRM_I915_GEM_GET_APERTURE 0x23 | ||
| 187 | #define DRM_I915_GEM_MMAP_GTT 0x24 | ||
| 188 | #define DRM_I915_GET_PIPE_FROM_CRTC_ID 0x25 | ||
| 189 | #define DRM_I915_GEM_MADVISE 0x26 | ||
| 190 | #define DRM_I915_OVERLAY_PUT_IMAGE 0x27 | ||
| 191 | #define DRM_I915_OVERLAY_ATTRS 0x28 | ||
| 192 | #define DRM_I915_GEM_EXECBUFFER2 0x29 | ||
| 193 | #define DRM_I915_GET_SPRITE_COLORKEY 0x2a | ||
| 194 | #define DRM_I915_SET_SPRITE_COLORKEY 0x2b | ||
| 195 | #define DRM_I915_GEM_WAIT 0x2c | ||
| 196 | #define DRM_I915_GEM_CONTEXT_CREATE 0x2d | ||
| 197 | #define DRM_I915_GEM_CONTEXT_DESTROY 0x2e | ||
| 198 | #define DRM_I915_GEM_SET_CACHING 0x2f | ||
| 199 | #define DRM_I915_GEM_GET_CACHING 0x30 | ||
| 200 | #define DRM_I915_REG_READ 0x31 | ||
| 201 | |||
| 202 | #define DRM_IOCTL_I915_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT, drm_i915_init_t) | ||
| 203 | #define DRM_IOCTL_I915_FLUSH DRM_IO ( DRM_COMMAND_BASE + DRM_I915_FLUSH) | ||
| 204 | #define DRM_IOCTL_I915_FLIP DRM_IO ( DRM_COMMAND_BASE + DRM_I915_FLIP) | ||
| 205 | #define DRM_IOCTL_I915_BATCHBUFFER DRM_IOW( DRM_COMMAND_BASE + DRM_I915_BATCHBUFFER, drm_i915_batchbuffer_t) | ||
| 206 | #define DRM_IOCTL_I915_IRQ_EMIT DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_IRQ_EMIT, drm_i915_irq_emit_t) | ||
| 207 | #define DRM_IOCTL_I915_IRQ_WAIT DRM_IOW( DRM_COMMAND_BASE + DRM_I915_IRQ_WAIT, drm_i915_irq_wait_t) | ||
| 208 | #define DRM_IOCTL_I915_GETPARAM DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GETPARAM, drm_i915_getparam_t) | ||
| 209 | #define DRM_IOCTL_I915_SETPARAM DRM_IOW( DRM_COMMAND_BASE + DRM_I915_SETPARAM, drm_i915_setparam_t) | ||
| 210 | #define DRM_IOCTL_I915_ALLOC DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_ALLOC, drm_i915_mem_alloc_t) | ||
| 211 | #define DRM_IOCTL_I915_FREE DRM_IOW( DRM_COMMAND_BASE + DRM_I915_FREE, drm_i915_mem_free_t) | ||
| 212 | #define DRM_IOCTL_I915_INIT_HEAP DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT_HEAP, drm_i915_mem_init_heap_t) | ||
| 213 | #define DRM_IOCTL_I915_CMDBUFFER DRM_IOW( DRM_COMMAND_BASE + DRM_I915_CMDBUFFER, drm_i915_cmdbuffer_t) | ||
| 214 | #define DRM_IOCTL_I915_DESTROY_HEAP DRM_IOW( DRM_COMMAND_BASE + DRM_I915_DESTROY_HEAP, drm_i915_mem_destroy_heap_t) | ||
| 215 | #define DRM_IOCTL_I915_SET_VBLANK_PIPE DRM_IOW( DRM_COMMAND_BASE + DRM_I915_SET_VBLANK_PIPE, drm_i915_vblank_pipe_t) | ||
| 216 | #define DRM_IOCTL_I915_GET_VBLANK_PIPE DRM_IOR( DRM_COMMAND_BASE + DRM_I915_GET_VBLANK_PIPE, drm_i915_vblank_pipe_t) | ||
| 217 | #define DRM_IOCTL_I915_VBLANK_SWAP DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_VBLANK_SWAP, drm_i915_vblank_swap_t) | ||
| 218 | #define DRM_IOCTL_I915_HWS_ADDR DRM_IOW(DRM_COMMAND_BASE + DRM_I915_HWS_ADDR, struct drm_i915_gem_init) | ||
| 219 | #define DRM_IOCTL_I915_GEM_INIT DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_INIT, struct drm_i915_gem_init) | ||
| 220 | #define DRM_IOCTL_I915_GEM_EXECBUFFER DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_EXECBUFFER, struct drm_i915_gem_execbuffer) | ||
| 221 | #define DRM_IOCTL_I915_GEM_EXECBUFFER2 DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_EXECBUFFER2, struct drm_i915_gem_execbuffer2) | ||
| 222 | #define DRM_IOCTL_I915_GEM_PIN DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_PIN, struct drm_i915_gem_pin) | ||
| 223 | #define DRM_IOCTL_I915_GEM_UNPIN DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_UNPIN, struct drm_i915_gem_unpin) | ||
| 224 | #define DRM_IOCTL_I915_GEM_BUSY DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_BUSY, struct drm_i915_gem_busy) | ||
| 225 | #define DRM_IOCTL_I915_GEM_SET_CACHING DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_SET_CACHING, struct drm_i915_gem_caching) | ||
| 226 | #define DRM_IOCTL_I915_GEM_GET_CACHING DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_GET_CACHING, struct drm_i915_gem_caching) | ||
| 227 | #define DRM_IOCTL_I915_GEM_THROTTLE DRM_IO ( DRM_COMMAND_BASE + DRM_I915_GEM_THROTTLE) | ||
| 228 | #define DRM_IOCTL_I915_GEM_ENTERVT DRM_IO(DRM_COMMAND_BASE + DRM_I915_GEM_ENTERVT) | ||
| 229 | #define DRM_IOCTL_I915_GEM_LEAVEVT DRM_IO(DRM_COMMAND_BASE + DRM_I915_GEM_LEAVEVT) | ||
| 230 | #define DRM_IOCTL_I915_GEM_CREATE DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_CREATE, struct drm_i915_gem_create) | ||
| 231 | #define DRM_IOCTL_I915_GEM_PREAD DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_PREAD, struct drm_i915_gem_pread) | ||
| 232 | #define DRM_IOCTL_I915_GEM_PWRITE DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_PWRITE, struct drm_i915_gem_pwrite) | ||
| 233 | #define DRM_IOCTL_I915_GEM_MMAP DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_MMAP, struct drm_i915_gem_mmap) | ||
| 234 | #define DRM_IOCTL_I915_GEM_MMAP_GTT DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_MMAP_GTT, struct drm_i915_gem_mmap_gtt) | ||
| 235 | #define DRM_IOCTL_I915_GEM_SET_DOMAIN DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_SET_DOMAIN, struct drm_i915_gem_set_domain) | ||
| 236 | #define DRM_IOCTL_I915_GEM_SW_FINISH DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_SW_FINISH, struct drm_i915_gem_sw_finish) | ||
| 237 | #define DRM_IOCTL_I915_GEM_SET_TILING DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_SET_TILING, struct drm_i915_gem_set_tiling) | ||
| 238 | #define DRM_IOCTL_I915_GEM_GET_TILING DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_GET_TILING, struct drm_i915_gem_get_tiling) | ||
| 239 | #define DRM_IOCTL_I915_GEM_GET_APERTURE DRM_IOR (DRM_COMMAND_BASE + DRM_I915_GEM_GET_APERTURE, struct drm_i915_gem_get_aperture) | ||
| 240 | #define DRM_IOCTL_I915_GET_PIPE_FROM_CRTC_ID DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GET_PIPE_FROM_CRTC_ID, struct drm_i915_get_pipe_from_crtc_id) | ||
| 241 | #define DRM_IOCTL_I915_GEM_MADVISE DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_MADVISE, struct drm_i915_gem_madvise) | ||
| 242 | #define DRM_IOCTL_I915_OVERLAY_PUT_IMAGE DRM_IOW(DRM_COMMAND_BASE + DRM_I915_OVERLAY_PUT_IMAGE, struct drm_intel_overlay_put_image) | ||
| 243 | #define DRM_IOCTL_I915_OVERLAY_ATTRS DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_OVERLAY_ATTRS, struct drm_intel_overlay_attrs) | ||
| 244 | #define DRM_IOCTL_I915_SET_SPRITE_COLORKEY DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_SET_SPRITE_COLORKEY, struct drm_intel_sprite_colorkey) | ||
| 245 | #define DRM_IOCTL_I915_GET_SPRITE_COLORKEY DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_SET_SPRITE_COLORKEY, struct drm_intel_sprite_colorkey) | ||
| 246 | #define DRM_IOCTL_I915_GEM_WAIT DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_WAIT, struct drm_i915_gem_wait) | ||
| 247 | #define DRM_IOCTL_I915_GEM_CONTEXT_CREATE DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_CREATE, struct drm_i915_gem_context_create) | ||
| 248 | #define DRM_IOCTL_I915_GEM_CONTEXT_DESTROY DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_DESTROY, struct drm_i915_gem_context_destroy) | ||
| 249 | #define DRM_IOCTL_I915_REG_READ DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_REG_READ, struct drm_i915_reg_read) | ||
| 250 | |||
| 251 | /* Allow drivers to submit batchbuffers directly to hardware, relying | ||
| 252 | * on the security mechanisms provided by hardware. | ||
| 253 | */ | ||
| 254 | typedef struct drm_i915_batchbuffer { | ||
| 255 | int start; /* agp offset */ | ||
| 256 | int used; /* nr bytes in use */ | ||
| 257 | int DR1; /* hw flags for GFX_OP_DRAWRECT_INFO */ | ||
| 258 | int DR4; /* window origin for GFX_OP_DRAWRECT_INFO */ | ||
| 259 | int num_cliprects; /* mulitpass with multiple cliprects? */ | ||
| 260 | struct drm_clip_rect __user *cliprects; /* pointer to userspace cliprects */ | ||
| 261 | } drm_i915_batchbuffer_t; | ||
| 262 | |||
| 263 | /* As above, but pass a pointer to userspace buffer which can be | ||
| 264 | * validated by the kernel prior to sending to hardware. | ||
| 265 | */ | ||
| 266 | typedef struct _drm_i915_cmdbuffer { | ||
| 267 | char __user *buf; /* pointer to userspace command buffer */ | ||
| 268 | int sz; /* nr bytes in buf */ | ||
| 269 | int DR1; /* hw flags for GFX_OP_DRAWRECT_INFO */ | ||
| 270 | int DR4; /* window origin for GFX_OP_DRAWRECT_INFO */ | ||
| 271 | int num_cliprects; /* mulitpass with multiple cliprects? */ | ||
| 272 | struct drm_clip_rect __user *cliprects; /* pointer to userspace cliprects */ | ||
| 273 | } drm_i915_cmdbuffer_t; | ||
| 274 | |||
| 275 | /* Userspace can request & wait on irq's: | ||
| 276 | */ | ||
| 277 | typedef struct drm_i915_irq_emit { | ||
| 278 | int __user *irq_seq; | ||
| 279 | } drm_i915_irq_emit_t; | ||
| 280 | |||
| 281 | typedef struct drm_i915_irq_wait { | ||
| 282 | int irq_seq; | ||
| 283 | } drm_i915_irq_wait_t; | ||
| 284 | |||
| 285 | /* Ioctl to query kernel params: | ||
| 286 | */ | ||
| 287 | #define I915_PARAM_IRQ_ACTIVE 1 | ||
| 288 | #define I915_PARAM_ALLOW_BATCHBUFFER 2 | ||
| 289 | #define I915_PARAM_LAST_DISPATCH 3 | ||
| 290 | #define I915_PARAM_CHIPSET_ID 4 | ||
| 291 | #define I915_PARAM_HAS_GEM 5 | ||
| 292 | #define I915_PARAM_NUM_FENCES_AVAIL 6 | ||
| 293 | #define I915_PARAM_HAS_OVERLAY 7 | ||
| 294 | #define I915_PARAM_HAS_PAGEFLIPPING 8 | ||
| 295 | #define I915_PARAM_HAS_EXECBUF2 9 | ||
| 296 | #define I915_PARAM_HAS_BSD 10 | ||
| 297 | #define I915_PARAM_HAS_BLT 11 | ||
| 298 | #define I915_PARAM_HAS_RELAXED_FENCING 12 | ||
| 299 | #define I915_PARAM_HAS_COHERENT_RINGS 13 | ||
| 300 | #define I915_PARAM_HAS_EXEC_CONSTANTS 14 | ||
| 301 | #define I915_PARAM_HAS_RELAXED_DELTA 15 | ||
| 302 | #define I915_PARAM_HAS_GEN7_SOL_RESET 16 | ||
| 303 | #define I915_PARAM_HAS_LLC 17 | ||
| 304 | #define I915_PARAM_HAS_ALIASING_PPGTT 18 | ||
| 305 | #define I915_PARAM_HAS_WAIT_TIMEOUT 19 | ||
| 306 | #define I915_PARAM_HAS_SEMAPHORES 20 | ||
| 307 | #define I915_PARAM_HAS_PRIME_VMAP_FLUSH 21 | ||
| 308 | #define I915_PARAM_RSVD_FOR_FUTURE_USE 22 | ||
| 309 | |||
| 310 | typedef struct drm_i915_getparam { | ||
| 311 | int param; | ||
| 312 | int __user *value; | ||
| 313 | } drm_i915_getparam_t; | ||
| 314 | |||
| 315 | /* Ioctl to set kernel params: | ||
| 316 | */ | ||
| 317 | #define I915_SETPARAM_USE_MI_BATCHBUFFER_START 1 | ||
| 318 | #define I915_SETPARAM_TEX_LRU_LOG_GRANULARITY 2 | ||
| 319 | #define I915_SETPARAM_ALLOW_BATCHBUFFER 3 | ||
| 320 | #define I915_SETPARAM_NUM_USED_FENCES 4 | ||
| 321 | |||
| 322 | typedef struct drm_i915_setparam { | ||
| 323 | int param; | ||
| 324 | int value; | ||
| 325 | } drm_i915_setparam_t; | ||
| 326 | |||
| 327 | /* A memory manager for regions of shared memory: | ||
| 328 | */ | ||
| 329 | #define I915_MEM_REGION_AGP 1 | ||
| 330 | |||
| 331 | typedef struct drm_i915_mem_alloc { | ||
| 332 | int region; | ||
| 333 | int alignment; | ||
| 334 | int size; | ||
| 335 | int __user *region_offset; /* offset from start of fb or agp */ | ||
| 336 | } drm_i915_mem_alloc_t; | ||
| 337 | |||
| 338 | typedef struct drm_i915_mem_free { | ||
| 339 | int region; | ||
| 340 | int region_offset; | ||
| 341 | } drm_i915_mem_free_t; | ||
| 342 | |||
| 343 | typedef struct drm_i915_mem_init_heap { | ||
| 344 | int region; | ||
| 345 | int size; | ||
| 346 | int start; | ||
| 347 | } drm_i915_mem_init_heap_t; | ||
| 348 | |||
| 349 | /* Allow memory manager to be torn down and re-initialized (eg on | ||
| 350 | * rotate): | ||
| 351 | */ | ||
| 352 | typedef struct drm_i915_mem_destroy_heap { | ||
| 353 | int region; | ||
| 354 | } drm_i915_mem_destroy_heap_t; | ||
| 355 | |||
| 356 | /* Allow X server to configure which pipes to monitor for vblank signals | ||
| 357 | */ | ||
| 358 | #define DRM_I915_VBLANK_PIPE_A 1 | ||
| 359 | #define DRM_I915_VBLANK_PIPE_B 2 | ||
| 360 | |||
| 361 | typedef struct drm_i915_vblank_pipe { | ||
| 362 | int pipe; | ||
| 363 | } drm_i915_vblank_pipe_t; | ||
| 364 | |||
| 365 | /* Schedule buffer swap at given vertical blank: | ||
| 366 | */ | ||
| 367 | typedef struct drm_i915_vblank_swap { | ||
| 368 | drm_drawable_t drawable; | ||
| 369 | enum drm_vblank_seq_type seqtype; | ||
| 370 | unsigned int sequence; | ||
| 371 | } drm_i915_vblank_swap_t; | ||
| 372 | |||
| 373 | typedef struct drm_i915_hws_addr { | ||
| 374 | __u64 addr; | ||
| 375 | } drm_i915_hws_addr_t; | ||
| 376 | |||
| 377 | struct drm_i915_gem_init { | ||
| 378 | /** | ||
| 379 | * Beginning offset in the GTT to be managed by the DRM memory | ||
| 380 | * manager. | ||
| 381 | */ | ||
| 382 | __u64 gtt_start; | ||
| 383 | /** | ||
| 384 | * Ending offset in the GTT to be managed by the DRM memory | ||
| 385 | * manager. | ||
| 386 | */ | ||
| 387 | __u64 gtt_end; | ||
| 388 | }; | ||
| 389 | |||
| 390 | struct drm_i915_gem_create { | ||
| 391 | /** | ||
| 392 | * Requested size for the object. | ||
| 393 | * | ||
| 394 | * The (page-aligned) allocated size for the object will be returned. | ||
| 395 | */ | ||
| 396 | __u64 size; | ||
| 397 | /** | ||
| 398 | * Returned handle for the object. | ||
| 399 | * | ||
| 400 | * Object handles are nonzero. | ||
| 401 | */ | ||
| 402 | __u32 handle; | ||
| 403 | __u32 pad; | ||
| 404 | }; | ||
| 405 | |||
| 406 | struct drm_i915_gem_pread { | ||
| 407 | /** Handle for the object being read. */ | ||
| 408 | __u32 handle; | ||
| 409 | __u32 pad; | ||
| 410 | /** Offset into the object to read from */ | ||
| 411 | __u64 offset; | ||
| 412 | /** Length of data to read */ | ||
| 413 | __u64 size; | ||
| 414 | /** | ||
| 415 | * Pointer to write the data into. | ||
| 416 | * | ||
| 417 | * This is a fixed-size type for 32/64 compatibility. | ||
| 418 | */ | ||
| 419 | __u64 data_ptr; | ||
| 420 | }; | ||
| 421 | |||
| 422 | struct drm_i915_gem_pwrite { | ||
| 423 | /** Handle for the object being written to. */ | ||
| 424 | __u32 handle; | ||
| 425 | __u32 pad; | ||
| 426 | /** Offset into the object to write to */ | ||
| 427 | __u64 offset; | ||
| 428 | /** Length of data to write */ | ||
| 429 | __u64 size; | ||
| 430 | /** | ||
| 431 | * Pointer to read the data from. | ||
| 432 | * | ||
| 433 | * This is a fixed-size type for 32/64 compatibility. | ||
| 434 | */ | ||
| 435 | __u64 data_ptr; | ||
| 436 | }; | ||
| 437 | |||
| 438 | struct drm_i915_gem_mmap { | ||
| 439 | /** Handle for the object being mapped. */ | ||
| 440 | __u32 handle; | ||
| 441 | __u32 pad; | ||
| 442 | /** Offset in the object to map. */ | ||
| 443 | __u64 offset; | ||
| 444 | /** | ||
| 445 | * Length of data to map. | ||
| 446 | * | ||
| 447 | * The value will be page-aligned. | ||
| 448 | */ | ||
| 449 | __u64 size; | ||
| 450 | /** | ||
| 451 | * Returned pointer the data was mapped at. | ||
| 452 | * | ||
| 453 | * This is a fixed-size type for 32/64 compatibility. | ||
| 454 | */ | ||
| 455 | __u64 addr_ptr; | ||
| 456 | }; | ||
| 457 | |||
| 458 | struct drm_i915_gem_mmap_gtt { | ||
| 459 | /** Handle for the object being mapped. */ | ||
| 460 | __u32 handle; | ||
| 461 | __u32 pad; | ||
| 462 | /** | ||
| 463 | * Fake offset to use for subsequent mmap call | ||
| 464 | * | ||
| 465 | * This is a fixed-size type for 32/64 compatibility. | ||
| 466 | */ | ||
| 467 | __u64 offset; | ||
| 468 | }; | ||
| 469 | |||
| 470 | struct drm_i915_gem_set_domain { | ||
| 471 | /** Handle for the object */ | ||
| 472 | __u32 handle; | ||
| 473 | |||
| 474 | /** New read domains */ | ||
| 475 | __u32 read_domains; | ||
| 476 | |||
| 477 | /** New write domain */ | ||
| 478 | __u32 write_domain; | ||
| 479 | }; | ||
| 480 | |||
| 481 | struct drm_i915_gem_sw_finish { | ||
| 482 | /** Handle for the object */ | ||
| 483 | __u32 handle; | ||
| 484 | }; | ||
| 485 | |||
| 486 | struct drm_i915_gem_relocation_entry { | ||
| 487 | /** | ||
| 488 | * Handle of the buffer being pointed to by this relocation entry. | ||
| 489 | * | ||
| 490 | * It's appealing to make this be an index into the mm_validate_entry | ||
| 491 | * list to refer to the buffer, but this allows the driver to create | ||
| 492 | * a relocation list for state buffers and not re-write it per | ||
| 493 | * exec using the buffer. | ||
| 494 | */ | ||
| 495 | __u32 target_handle; | ||
| 496 | |||
| 497 | /** | ||
| 498 | * Value to be added to the offset of the target buffer to make up | ||
| 499 | * the relocation entry. | ||
| 500 | */ | ||
| 501 | __u32 delta; | ||
| 502 | |||
| 503 | /** Offset in the buffer the relocation entry will be written into */ | ||
| 504 | __u64 offset; | ||
| 505 | |||
| 506 | /** | ||
| 507 | * Offset value of the target buffer that the relocation entry was last | ||
| 508 | * written as. | ||
| 509 | * | ||
| 510 | * If the buffer has the same offset as last time, we can skip syncing | ||
| 511 | * and writing the relocation. This value is written back out by | ||
| 512 | * the execbuffer ioctl when the relocation is written. | ||
| 513 | */ | ||
| 514 | __u64 presumed_offset; | ||
| 515 | |||
| 516 | /** | ||
| 517 | * Target memory domains read by this operation. | ||
| 518 | */ | ||
| 519 | __u32 read_domains; | ||
| 520 | |||
| 521 | /** | ||
| 522 | * Target memory domains written by this operation. | ||
| 523 | * | ||
| 524 | * Note that only one domain may be written by the whole | ||
| 525 | * execbuffer operation, so that where there are conflicts, | ||
| 526 | * the application will get -EINVAL back. | ||
| 527 | */ | ||
| 528 | __u32 write_domain; | ||
| 529 | }; | ||
| 530 | |||
| 531 | /** @{ | ||
| 532 | * Intel memory domains | ||
| 533 | * | ||
| 534 | * Most of these just align with the various caches in | ||
| 535 | * the system and are used to flush and invalidate as | ||
| 536 | * objects end up cached in different domains. | ||
| 537 | */ | ||
| 538 | /** CPU cache */ | ||
| 539 | #define I915_GEM_DOMAIN_CPU 0x00000001 | ||
| 540 | /** Render cache, used by 2D and 3D drawing */ | ||
| 541 | #define I915_GEM_DOMAIN_RENDER 0x00000002 | ||
| 542 | /** Sampler cache, used by texture engine */ | ||
| 543 | #define I915_GEM_DOMAIN_SAMPLER 0x00000004 | ||
| 544 | /** Command queue, used to load batch buffers */ | ||
| 545 | #define I915_GEM_DOMAIN_COMMAND 0x00000008 | ||
| 546 | /** Instruction cache, used by shader programs */ | ||
| 547 | #define I915_GEM_DOMAIN_INSTRUCTION 0x00000010 | ||
| 548 | /** Vertex address cache */ | ||
| 549 | #define I915_GEM_DOMAIN_VERTEX 0x00000020 | ||
| 550 | /** GTT domain - aperture and scanout */ | ||
| 551 | #define I915_GEM_DOMAIN_GTT 0x00000040 | ||
| 552 | /** @} */ | ||
| 553 | |||
| 554 | struct drm_i915_gem_exec_object { | ||
| 555 | /** | ||
| 556 | * User's handle for a buffer to be bound into the GTT for this | ||
| 557 | * operation. | ||
| 558 | */ | ||
| 559 | __u32 handle; | ||
| 560 | |||
| 561 | /** Number of relocations to be performed on this buffer */ | ||
| 562 | __u32 relocation_count; | ||
| 563 | /** | ||
| 564 | * Pointer to array of struct drm_i915_gem_relocation_entry containing | ||
| 565 | * the relocations to be performed in this buffer. | ||
| 566 | */ | ||
| 567 | __u64 relocs_ptr; | ||
| 568 | |||
| 569 | /** Required alignment in graphics aperture */ | ||
| 570 | __u64 alignment; | ||
| 571 | |||
| 572 | /** | ||
| 573 | * Returned value of the updated offset of the object, for future | ||
| 574 | * presumed_offset writes. | ||
| 575 | */ | ||
| 576 | __u64 offset; | ||
| 577 | }; | ||
| 578 | |||
| 579 | struct drm_i915_gem_execbuffer { | ||
| 580 | /** | ||
| 581 | * List of buffers to be validated with their relocations to be | ||
| 582 | * performend on them. | ||
| 583 | * | ||
| 584 | * This is a pointer to an array of struct drm_i915_gem_validate_entry. | ||
| 585 | * | ||
| 586 | * These buffers must be listed in an order such that all relocations | ||
| 587 | * a buffer is performing refer to buffers that have already appeared | ||
| 588 | * in the validate list. | ||
| 589 | */ | ||
| 590 | __u64 buffers_ptr; | ||
| 591 | __u32 buffer_count; | ||
| 592 | |||
| 593 | /** Offset in the batchbuffer to start execution from. */ | ||
| 594 | __u32 batch_start_offset; | ||
| 595 | /** Bytes used in batchbuffer from batch_start_offset */ | ||
| 596 | __u32 batch_len; | ||
| 597 | __u32 DR1; | ||
| 598 | __u32 DR4; | ||
| 599 | __u32 num_cliprects; | ||
| 600 | /** This is a struct drm_clip_rect *cliprects */ | ||
| 601 | __u64 cliprects_ptr; | ||
| 602 | }; | ||
| 603 | |||
| 604 | struct drm_i915_gem_exec_object2 { | ||
| 605 | /** | ||
| 606 | * User's handle for a buffer to be bound into the GTT for this | ||
| 607 | * operation. | ||
| 608 | */ | ||
| 609 | __u32 handle; | ||
| 610 | |||
| 611 | /** Number of relocations to be performed on this buffer */ | ||
| 612 | __u32 relocation_count; | ||
| 613 | /** | ||
| 614 | * Pointer to array of struct drm_i915_gem_relocation_entry containing | ||
| 615 | * the relocations to be performed in this buffer. | ||
| 616 | */ | ||
| 617 | __u64 relocs_ptr; | ||
| 618 | |||
| 619 | /** Required alignment in graphics aperture */ | ||
| 620 | __u64 alignment; | ||
| 621 | |||
| 622 | /** | ||
| 623 | * Returned value of the updated offset of the object, for future | ||
| 624 | * presumed_offset writes. | ||
| 625 | */ | ||
| 626 | __u64 offset; | ||
| 627 | |||
| 628 | #define EXEC_OBJECT_NEEDS_FENCE (1<<0) | ||
| 629 | __u64 flags; | ||
| 630 | __u64 rsvd1; | ||
| 631 | __u64 rsvd2; | ||
| 632 | }; | ||
| 633 | |||
| 634 | struct drm_i915_gem_execbuffer2 { | ||
| 635 | /** | ||
| 636 | * List of gem_exec_object2 structs | ||
| 637 | */ | ||
| 638 | __u64 buffers_ptr; | ||
| 639 | __u32 buffer_count; | ||
| 640 | |||
| 641 | /** Offset in the batchbuffer to start execution from. */ | ||
| 642 | __u32 batch_start_offset; | ||
| 643 | /** Bytes used in batchbuffer from batch_start_offset */ | ||
| 644 | __u32 batch_len; | ||
| 645 | __u32 DR1; | ||
| 646 | __u32 DR4; | ||
| 647 | __u32 num_cliprects; | ||
| 648 | /** This is a struct drm_clip_rect *cliprects */ | ||
| 649 | __u64 cliprects_ptr; | ||
| 650 | #define I915_EXEC_RING_MASK (7<<0) | ||
| 651 | #define I915_EXEC_DEFAULT (0<<0) | ||
| 652 | #define I915_EXEC_RENDER (1<<0) | ||
| 653 | #define I915_EXEC_BSD (2<<0) | ||
| 654 | #define I915_EXEC_BLT (3<<0) | ||
| 655 | |||
| 656 | /* Used for switching the constants addressing mode on gen4+ RENDER ring. | ||
| 657 | * Gen6+ only supports relative addressing to dynamic state (default) and | ||
| 658 | * absolute addressing. | ||
| 659 | * | ||
| 660 | * These flags are ignored for the BSD and BLT rings. | ||
| 661 | */ | ||
| 662 | #define I915_EXEC_CONSTANTS_MASK (3<<6) | ||
| 663 | #define I915_EXEC_CONSTANTS_REL_GENERAL (0<<6) /* default */ | ||
| 664 | #define I915_EXEC_CONSTANTS_ABSOLUTE (1<<6) | ||
| 665 | #define I915_EXEC_CONSTANTS_REL_SURFACE (2<<6) /* gen4/5 only */ | ||
| 666 | __u64 flags; | ||
| 667 | __u64 rsvd1; /* now used for context info */ | ||
| 668 | __u64 rsvd2; | ||
| 669 | }; | ||
| 670 | |||
| 671 | /** Resets the SO write offset registers for transform feedback on gen7. */ | ||
| 672 | #define I915_EXEC_GEN7_SOL_RESET (1<<8) | ||
| 673 | |||
| 674 | #define I915_EXEC_CONTEXT_ID_MASK (0xffffffff) | ||
| 675 | #define i915_execbuffer2_set_context_id(eb2, context) \ | ||
| 676 | (eb2).rsvd1 = context & I915_EXEC_CONTEXT_ID_MASK | ||
| 677 | #define i915_execbuffer2_get_context_id(eb2) \ | ||
| 678 | ((eb2).rsvd1 & I915_EXEC_CONTEXT_ID_MASK) | ||
| 679 | |||
| 680 | struct drm_i915_gem_pin { | ||
| 681 | /** Handle of the buffer to be pinned. */ | ||
| 682 | __u32 handle; | ||
| 683 | __u32 pad; | ||
| 684 | |||
| 685 | /** alignment required within the aperture */ | ||
| 686 | __u64 alignment; | ||
| 687 | |||
| 688 | /** Returned GTT offset of the buffer. */ | ||
| 689 | __u64 offset; | ||
| 690 | }; | ||
| 691 | |||
| 692 | struct drm_i915_gem_unpin { | ||
| 693 | /** Handle of the buffer to be unpinned. */ | ||
| 694 | __u32 handle; | ||
| 695 | __u32 pad; | ||
| 696 | }; | ||
| 697 | |||
| 698 | struct drm_i915_gem_busy { | ||
| 699 | /** Handle of the buffer to check for busy */ | ||
| 700 | __u32 handle; | ||
| 701 | |||
| 702 | /** Return busy status (1 if busy, 0 if idle). | ||
| 703 | * The high word is used to indicate on which rings the object | ||
| 704 | * currently resides: | ||
| 705 | * 16:31 - busy (r or r/w) rings (16 render, 17 bsd, 18 blt, etc) | ||
| 706 | */ | ||
| 707 | __u32 busy; | ||
| 708 | }; | ||
| 709 | |||
| 710 | #define I915_CACHING_NONE 0 | ||
| 711 | #define I915_CACHING_CACHED 1 | ||
| 712 | |||
| 713 | struct drm_i915_gem_caching { | ||
| 714 | /** | ||
| 715 | * Handle of the buffer to set/get the caching level of. */ | ||
| 716 | __u32 handle; | ||
| 717 | |||
| 718 | /** | ||
| 719 | * Cacheing level to apply or return value | ||
| 720 | * | ||
| 721 | * bits0-15 are for generic caching control (i.e. the above defined | ||
| 722 | * values). bits16-31 are reserved for platform-specific variations | ||
| 723 | * (e.g. l3$ caching on gen7). */ | ||
| 724 | __u32 caching; | ||
| 725 | }; | ||
| 726 | |||
| 727 | #define I915_TILING_NONE 0 | ||
| 728 | #define I915_TILING_X 1 | ||
| 729 | #define I915_TILING_Y 2 | ||
| 730 | |||
| 731 | #define I915_BIT_6_SWIZZLE_NONE 0 | ||
| 732 | #define I915_BIT_6_SWIZZLE_9 1 | ||
| 733 | #define I915_BIT_6_SWIZZLE_9_10 2 | ||
| 734 | #define I915_BIT_6_SWIZZLE_9_11 3 | ||
| 735 | #define I915_BIT_6_SWIZZLE_9_10_11 4 | ||
| 736 | /* Not seen by userland */ | ||
| 737 | #define I915_BIT_6_SWIZZLE_UNKNOWN 5 | ||
| 738 | /* Seen by userland. */ | ||
| 739 | #define I915_BIT_6_SWIZZLE_9_17 6 | ||
| 740 | #define I915_BIT_6_SWIZZLE_9_10_17 7 | ||
| 741 | |||
| 742 | struct drm_i915_gem_set_tiling { | ||
| 743 | /** Handle of the buffer to have its tiling state updated */ | ||
| 744 | __u32 handle; | ||
| 745 | |||
| 746 | /** | ||
| 747 | * Tiling mode for the object (I915_TILING_NONE, I915_TILING_X, | ||
| 748 | * I915_TILING_Y). | ||
| 749 | * | ||
| 750 | * This value is to be set on request, and will be updated by the | ||
| 751 | * kernel on successful return with the actual chosen tiling layout. | ||
| 752 | * | ||
| 753 | * The tiling mode may be demoted to I915_TILING_NONE when the system | ||
| 754 | * has bit 6 swizzling that can't be managed correctly by GEM. | ||
| 755 | * | ||
| 756 | * Buffer contents become undefined when changing tiling_mode. | ||
| 757 | */ | ||
| 758 | __u32 tiling_mode; | ||
| 759 | |||
| 760 | /** | ||
| 761 | * Stride in bytes for the object when in I915_TILING_X or | ||
| 762 | * I915_TILING_Y. | ||
| 763 | */ | ||
| 764 | __u32 stride; | ||
| 765 | |||
| 766 | /** | ||
| 767 | * Returned address bit 6 swizzling required for CPU access through | ||
| 768 | * mmap mapping. | ||
| 769 | */ | ||
| 770 | __u32 swizzle_mode; | ||
| 771 | }; | ||
| 772 | |||
| 773 | struct drm_i915_gem_get_tiling { | ||
| 774 | /** Handle of the buffer to get tiling state for. */ | ||
| 775 | __u32 handle; | ||
| 776 | |||
| 777 | /** | ||
| 778 | * Current tiling mode for the object (I915_TILING_NONE, I915_TILING_X, | ||
| 779 | * I915_TILING_Y). | ||
| 780 | */ | ||
| 781 | __u32 tiling_mode; | ||
| 782 | |||
| 783 | /** | ||
| 784 | * Returned address bit 6 swizzling required for CPU access through | ||
| 785 | * mmap mapping. | ||
| 786 | */ | ||
| 787 | __u32 swizzle_mode; | ||
| 788 | }; | ||
| 789 | |||
| 790 | struct drm_i915_gem_get_aperture { | ||
| 791 | /** Total size of the aperture used by i915_gem_execbuffer, in bytes */ | ||
| 792 | __u64 aper_size; | ||
| 793 | |||
| 794 | /** | ||
| 795 | * Available space in the aperture used by i915_gem_execbuffer, in | ||
| 796 | * bytes | ||
| 797 | */ | ||
| 798 | __u64 aper_available_size; | ||
| 799 | }; | ||
| 800 | |||
| 801 | struct drm_i915_get_pipe_from_crtc_id { | ||
| 802 | /** ID of CRTC being requested **/ | ||
| 803 | __u32 crtc_id; | ||
| 804 | |||
| 805 | /** pipe of requested CRTC **/ | ||
| 806 | __u32 pipe; | ||
| 807 | }; | ||
| 808 | |||
| 809 | #define I915_MADV_WILLNEED 0 | ||
| 810 | #define I915_MADV_DONTNEED 1 | ||
| 811 | #define __I915_MADV_PURGED 2 /* internal state */ | ||
| 812 | |||
| 813 | struct drm_i915_gem_madvise { | ||
| 814 | /** Handle of the buffer to change the backing store advice */ | ||
| 815 | __u32 handle; | ||
| 816 | |||
| 817 | /* Advice: either the buffer will be needed again in the near future, | ||
| 818 | * or wont be and could be discarded under memory pressure. | ||
| 819 | */ | ||
| 820 | __u32 madv; | ||
| 821 | |||
| 822 | /** Whether the backing store still exists. */ | ||
| 823 | __u32 retained; | ||
| 824 | }; | ||
| 825 | |||
| 826 | /* flags */ | ||
| 827 | #define I915_OVERLAY_TYPE_MASK 0xff | ||
| 828 | #define I915_OVERLAY_YUV_PLANAR 0x01 | ||
| 829 | #define I915_OVERLAY_YUV_PACKED 0x02 | ||
| 830 | #define I915_OVERLAY_RGB 0x03 | ||
| 831 | |||
| 832 | #define I915_OVERLAY_DEPTH_MASK 0xff00 | ||
| 833 | #define I915_OVERLAY_RGB24 0x1000 | ||
| 834 | #define I915_OVERLAY_RGB16 0x2000 | ||
| 835 | #define I915_OVERLAY_RGB15 0x3000 | ||
| 836 | #define I915_OVERLAY_YUV422 0x0100 | ||
| 837 | #define I915_OVERLAY_YUV411 0x0200 | ||
| 838 | #define I915_OVERLAY_YUV420 0x0300 | ||
| 839 | #define I915_OVERLAY_YUV410 0x0400 | ||
| 840 | |||
| 841 | #define I915_OVERLAY_SWAP_MASK 0xff0000 | ||
| 842 | #define I915_OVERLAY_NO_SWAP 0x000000 | ||
| 843 | #define I915_OVERLAY_UV_SWAP 0x010000 | ||
| 844 | #define I915_OVERLAY_Y_SWAP 0x020000 | ||
| 845 | #define I915_OVERLAY_Y_AND_UV_SWAP 0x030000 | ||
| 846 | |||
| 847 | #define I915_OVERLAY_FLAGS_MASK 0xff000000 | ||
| 848 | #define I915_OVERLAY_ENABLE 0x01000000 | ||
| 849 | |||
| 850 | struct drm_intel_overlay_put_image { | ||
| 851 | /* various flags and src format description */ | ||
| 852 | __u32 flags; | ||
| 853 | /* source picture description */ | ||
| 854 | __u32 bo_handle; | ||
| 855 | /* stride values and offsets are in bytes, buffer relative */ | ||
| 856 | __u16 stride_Y; /* stride for packed formats */ | ||
| 857 | __u16 stride_UV; | ||
| 858 | __u32 offset_Y; /* offset for packet formats */ | ||
| 859 | __u32 offset_U; | ||
| 860 | __u32 offset_V; | ||
| 861 | /* in pixels */ | ||
| 862 | __u16 src_width; | ||
| 863 | __u16 src_height; | ||
| 864 | /* to compensate the scaling factors for partially covered surfaces */ | ||
| 865 | __u16 src_scan_width; | ||
| 866 | __u16 src_scan_height; | ||
| 867 | /* output crtc description */ | ||
| 868 | __u32 crtc_id; | ||
| 869 | __u16 dst_x; | ||
| 870 | __u16 dst_y; | ||
| 871 | __u16 dst_width; | ||
| 872 | __u16 dst_height; | ||
| 873 | }; | ||
| 874 | |||
| 875 | /* flags */ | ||
| 876 | #define I915_OVERLAY_UPDATE_ATTRS (1<<0) | ||
| 877 | #define I915_OVERLAY_UPDATE_GAMMA (1<<1) | ||
| 878 | struct drm_intel_overlay_attrs { | ||
| 879 | __u32 flags; | ||
| 880 | __u32 color_key; | ||
| 881 | __s32 brightness; | ||
| 882 | __u32 contrast; | ||
| 883 | __u32 saturation; | ||
| 884 | __u32 gamma0; | ||
| 885 | __u32 gamma1; | ||
| 886 | __u32 gamma2; | ||
| 887 | __u32 gamma3; | ||
| 888 | __u32 gamma4; | ||
| 889 | __u32 gamma5; | ||
| 890 | }; | ||
| 891 | |||
| 892 | /* | ||
| 893 | * Intel sprite handling | ||
| 894 | * | ||
| 895 | * Color keying works with a min/mask/max tuple. Both source and destination | ||
| 896 | * color keying is allowed. | ||
| 897 | * | ||
| 898 | * Source keying: | ||
| 899 | * Sprite pixels within the min & max values, masked against the color channels | ||
| 900 | * specified in the mask field, will be transparent. All other pixels will | ||
| 901 | * be displayed on top of the primary plane. For RGB surfaces, only the min | ||
| 902 | * and mask fields will be used; ranged compares are not allowed. | ||
| 903 | * | ||
| 904 | * Destination keying: | ||
| 905 | * Primary plane pixels that match the min value, masked against the color | ||
| 906 | * channels specified in the mask field, will be replaced by corresponding | ||
| 907 | * pixels from the sprite plane. | ||
| 908 | * | ||
| 909 | * Note that source & destination keying are exclusive; only one can be | ||
| 910 | * active on a given plane. | ||
| 911 | */ | ||
| 912 | |||
| 913 | #define I915_SET_COLORKEY_NONE (1<<0) /* disable color key matching */ | ||
| 914 | #define I915_SET_COLORKEY_DESTINATION (1<<1) | ||
| 915 | #define I915_SET_COLORKEY_SOURCE (1<<2) | ||
| 916 | struct drm_intel_sprite_colorkey { | ||
| 917 | __u32 plane_id; | ||
| 918 | __u32 min_value; | ||
| 919 | __u32 channel_mask; | ||
| 920 | __u32 max_value; | ||
| 921 | __u32 flags; | ||
| 922 | }; | ||
| 923 | |||
| 924 | struct drm_i915_gem_wait { | ||
| 925 | /** Handle of BO we shall wait on */ | ||
| 926 | __u32 bo_handle; | ||
| 927 | __u32 flags; | ||
| 928 | /** Number of nanoseconds to wait, Returns time remaining. */ | ||
| 929 | __s64 timeout_ns; | ||
| 930 | }; | ||
| 931 | |||
| 932 | struct drm_i915_gem_context_create { | ||
| 933 | /* output: id of new context*/ | ||
| 934 | __u32 ctx_id; | ||
| 935 | __u32 pad; | ||
| 936 | }; | ||
| 937 | |||
| 938 | struct drm_i915_gem_context_destroy { | ||
| 939 | __u32 ctx_id; | ||
| 940 | __u32 pad; | ||
| 941 | }; | ||
| 942 | |||
| 943 | struct drm_i915_reg_read { | ||
| 944 | __u64 offset; | ||
| 945 | __u64 val; /* Return value */ | ||
| 946 | }; | ||
| 947 | #endif /* _UAPI_I915_DRM_H_ */ | ||
diff --git a/include/drm/mga_drm.h b/include/uapi/drm/mga_drm.h index 2375bfd6e5e9..2375bfd6e5e9 100644 --- a/include/drm/mga_drm.h +++ b/include/uapi/drm/mga_drm.h | |||
diff --git a/include/drm/nouveau_drm.h b/include/uapi/drm/nouveau_drm.h index 2a5769fdf8ba..2a5769fdf8ba 100644 --- a/include/drm/nouveau_drm.h +++ b/include/uapi/drm/nouveau_drm.h | |||
diff --git a/include/drm/r128_drm.h b/include/uapi/drm/r128_drm.h index 8d8878b55f55..8d8878b55f55 100644 --- a/include/drm/r128_drm.h +++ b/include/uapi/drm/r128_drm.h | |||
diff --git a/include/drm/radeon_drm.h b/include/uapi/drm/radeon_drm.h index 4766c0f6a838..4766c0f6a838 100644 --- a/include/drm/radeon_drm.h +++ b/include/uapi/drm/radeon_drm.h | |||
diff --git a/include/drm/savage_drm.h b/include/uapi/drm/savage_drm.h index 818d49be2e6e..818d49be2e6e 100644 --- a/include/drm/savage_drm.h +++ b/include/uapi/drm/savage_drm.h | |||
diff --git a/include/drm/sis_drm.h b/include/uapi/drm/sis_drm.h index df3763222d73..df3763222d73 100644 --- a/include/drm/sis_drm.h +++ b/include/uapi/drm/sis_drm.h | |||
diff --git a/include/drm/via_drm.h b/include/uapi/drm/via_drm.h index 8b0533ccbd5a..8b0533ccbd5a 100644 --- a/include/drm/via_drm.h +++ b/include/uapi/drm/via_drm.h | |||
diff --git a/include/drm/vmwgfx_drm.h b/include/uapi/drm/vmwgfx_drm.h index bcb0912afe7a..bcb0912afe7a 100644 --- a/include/drm/vmwgfx_drm.h +++ b/include/uapi/drm/vmwgfx_drm.h | |||
