diff options
77 files changed, 3677 insertions, 446 deletions
diff --git a/Documentation/DocBook/drm.tmpl b/Documentation/DocBook/drm.tmpl index 7583dc7cf64d..910c923a9b86 100644 --- a/Documentation/DocBook/drm.tmpl +++ b/Documentation/DocBook/drm.tmpl | |||
| @@ -389,7 +389,7 @@ | |||
| 389 | </para> | 389 | </para> |
| 390 | <para> | 390 | <para> |
| 391 | If your driver supports memory management (it should!), you'll | 391 | If your driver supports memory management (it should!), you'll |
| 392 | need to set that up at load time as well. How you intialize | 392 | need to set that up at load time as well. How you initialize |
| 393 | it depends on which memory manager you're using, TTM or GEM. | 393 | it depends on which memory manager you're using, TTM or GEM. |
| 394 | </para> | 394 | </para> |
| 395 | <sect3> | 395 | <sect3> |
| @@ -399,7 +399,7 @@ | |||
| 399 | aperture space for graphics devices. TTM supports both UMA devices | 399 | aperture space for graphics devices. TTM supports both UMA devices |
| 400 | and devices with dedicated video RAM (VRAM), i.e. most discrete | 400 | and devices with dedicated video RAM (VRAM), i.e. most discrete |
| 401 | graphics devices. If your device has dedicated RAM, supporting | 401 | graphics devices. If your device has dedicated RAM, supporting |
| 402 | TTM is desireable. TTM also integrates tightly with your | 402 | TTM is desirable. TTM also integrates tightly with your |
| 403 | driver specific buffer execution function. See the radeon | 403 | driver specific buffer execution function. See the radeon |
| 404 | driver for examples. | 404 | driver for examples. |
| 405 | </para> | 405 | </para> |
| @@ -443,7 +443,7 @@ | |||
| 443 | likely eventually calling ttm_bo_global_init and | 443 | likely eventually calling ttm_bo_global_init and |
| 444 | ttm_bo_global_release, respectively. Also like the previous | 444 | ttm_bo_global_release, respectively. Also like the previous |
| 445 | object, ttm_global_item_ref is used to create an initial reference | 445 | object, ttm_global_item_ref is used to create an initial reference |
| 446 | count for the TTM, which will call your initalization function. | 446 | count for the TTM, which will call your initialization function. |
| 447 | </para> | 447 | </para> |
| 448 | </sect3> | 448 | </sect3> |
| 449 | <sect3> | 449 | <sect3> |
| @@ -557,7 +557,7 @@ void intel_crt_init(struct drm_device *dev) | |||
| 557 | CRT connector and encoder combination is created. A device | 557 | CRT connector and encoder combination is created. A device |
| 558 | specific i2c bus is also created, for fetching EDID data and | 558 | specific i2c bus is also created, for fetching EDID data and |
| 559 | performing monitor detection. Once the process is complete, | 559 | performing monitor detection. Once the process is complete, |
| 560 | the new connector is regsitered with sysfs, to make its | 560 | the new connector is registered with sysfs, to make its |
| 561 | properties available to applications. | 561 | properties available to applications. |
| 562 | </para> | 562 | </para> |
| 563 | <sect4> | 563 | <sect4> |
| @@ -581,12 +581,12 @@ void intel_crt_init(struct drm_device *dev) | |||
| 581 | <para> | 581 | <para> |
| 582 | For each encoder, CRTC and connector, several functions must | 582 | For each encoder, CRTC and connector, several functions must |
| 583 | be provided, depending on the object type. Encoder objects | 583 | be provided, depending on the object type. Encoder objects |
| 584 | need should provide a DPMS (basically on/off) function, mode fixup | 584 | need to provide a DPMS (basically on/off) function, mode fixup |
| 585 | (for converting requested modes into native hardware timings), | 585 | (for converting requested modes into native hardware timings), |
| 586 | and prepare, set and commit functions for use by the core DRM | 586 | and prepare, set and commit functions for use by the core DRM |
| 587 | helper functions. Connector helpers need to provide mode fetch and | 587 | helper functions. Connector helpers need to provide mode fetch and |
| 588 | validity functions as well as an encoder matching function for | 588 | validity functions as well as an encoder matching function for |
| 589 | returing an ideal encoder for a given connector. The core | 589 | returning an ideal encoder for a given connector. The core |
| 590 | connector functions include a DPMS callback, (deprecated) | 590 | connector functions include a DPMS callback, (deprecated) |
| 591 | save/restore routines, detection, mode probing, property handling, | 591 | save/restore routines, detection, mode probing, property handling, |
| 592 | and cleanup functions. | 592 | and cleanup functions. |
diff --git a/arch/powerpc/sysdev/fsl_msi.c b/arch/powerpc/sysdev/fsl_msi.c index 0f5bee90ee4e..962c2d8dd8d9 100644 --- a/arch/powerpc/sysdev/fsl_msi.c +++ b/arch/powerpc/sysdev/fsl_msi.c | |||
| @@ -347,7 +347,7 @@ static int __devinit fsl_of_msi_probe(struct of_device *dev, | |||
| 347 | goto error_out; | 347 | goto error_out; |
| 348 | } | 348 | } |
| 349 | offset = 0; | 349 | offset = 0; |
| 350 | p = of_get_property(dev->node, "msi-available-ranges", &len); | 350 | p = of_get_property(dev->dev.of_node, "msi-available-ranges", &len); |
| 351 | if (p) | 351 | if (p) |
| 352 | offset = *p / IRQS_PER_MSI_REG; | 352 | offset = *p / IRQS_PER_MSI_REG; |
| 353 | 353 | ||
diff --git a/arch/powerpc/sysdev/fsl_rio.c b/arch/powerpc/sysdev/fsl_rio.c index cd37e49e7034..30e1626b2e85 100644 --- a/arch/powerpc/sysdev/fsl_rio.c +++ b/arch/powerpc/sysdev/fsl_rio.c | |||
| @@ -1426,7 +1426,7 @@ int fsl_rio_setup(struct of_device *dev) | |||
| 1426 | port->iores.flags = IORESOURCE_MEM; | 1426 | port->iores.flags = IORESOURCE_MEM; |
| 1427 | port->iores.name = "rio_io_win"; | 1427 | port->iores.name = "rio_io_win"; |
| 1428 | 1428 | ||
| 1429 | priv->pwirq = irq_of_parse_and_map(dev->node, 0); | 1429 | priv->pwirq = irq_of_parse_and_map(dev->dev.of_node, 0); |
| 1430 | priv->bellirq = irq_of_parse_and_map(dev->dev.of_node, 2); | 1430 | priv->bellirq = irq_of_parse_and_map(dev->dev.of_node, 2); |
| 1431 | priv->txirq = irq_of_parse_and_map(dev->dev.of_node, 3); | 1431 | priv->txirq = irq_of_parse_and_map(dev->dev.of_node, 3); |
| 1432 | priv->rxirq = irq_of_parse_and_map(dev->dev.of_node, 4); | 1432 | priv->rxirq = irq_of_parse_and_map(dev->dev.of_node, 4); |
diff --git a/drivers/crypto/amcc/crypto4xx_core.c b/drivers/crypto/amcc/crypto4xx_core.c index 9d65b371de64..983530ba04a7 100644 --- a/drivers/crypto/amcc/crypto4xx_core.c +++ b/drivers/crypto/amcc/crypto4xx_core.c | |||
| @@ -1158,7 +1158,7 @@ static int __init crypto4xx_probe(struct of_device *ofdev, | |||
| 1158 | struct device *dev = &ofdev->dev; | 1158 | struct device *dev = &ofdev->dev; |
| 1159 | struct crypto4xx_core_device *core_dev; | 1159 | struct crypto4xx_core_device *core_dev; |
| 1160 | 1160 | ||
| 1161 | rc = of_address_to_resource(ofdev->node, 0, &res); | 1161 | rc = of_address_to_resource(ofdev->dev.of_node, 0, &res); |
| 1162 | if (rc) | 1162 | if (rc) |
| 1163 | return -ENODEV; | 1163 | return -ENODEV; |
| 1164 | 1164 | ||
| @@ -1215,13 +1215,13 @@ static int __init crypto4xx_probe(struct of_device *ofdev, | |||
| 1215 | (unsigned long) dev); | 1215 | (unsigned long) dev); |
| 1216 | 1216 | ||
| 1217 | /* Register for Crypto isr, Crypto Engine IRQ */ | 1217 | /* Register for Crypto isr, Crypto Engine IRQ */ |
| 1218 | core_dev->irq = irq_of_parse_and_map(ofdev->node, 0); | 1218 | core_dev->irq = irq_of_parse_and_map(ofdev->dev.of_node, 0); |
| 1219 | rc = request_irq(core_dev->irq, crypto4xx_ce_interrupt_handler, 0, | 1219 | rc = request_irq(core_dev->irq, crypto4xx_ce_interrupt_handler, 0, |
| 1220 | core_dev->dev->name, dev); | 1220 | core_dev->dev->name, dev); |
| 1221 | if (rc) | 1221 | if (rc) |
| 1222 | goto err_request_irq; | 1222 | goto err_request_irq; |
| 1223 | 1223 | ||
| 1224 | core_dev->dev->ce_base = of_iomap(ofdev->node, 0); | 1224 | core_dev->dev->ce_base = of_iomap(ofdev->dev.of_node, 0); |
| 1225 | if (!core_dev->dev->ce_base) { | 1225 | if (!core_dev->dev->ce_base) { |
| 1226 | dev_err(dev, "failed to of_iomap\n"); | 1226 | dev_err(dev, "failed to of_iomap\n"); |
| 1227 | goto err_iomap; | 1227 | goto err_iomap; |
diff --git a/drivers/crypto/n2_core.c b/drivers/crypto/n2_core.c index 8566be832f51..23163fda5035 100644 --- a/drivers/crypto/n2_core.c +++ b/drivers/crypto/n2_core.c | |||
| @@ -251,16 +251,10 @@ static void n2_base_ctx_init(struct n2_base_ctx *ctx) | |||
| 251 | struct n2_hash_ctx { | 251 | struct n2_hash_ctx { |
| 252 | struct n2_base_ctx base; | 252 | struct n2_base_ctx base; |
| 253 | 253 | ||
| 254 | struct crypto_ahash *fallback; | 254 | struct crypto_ahash *fallback_tfm; |
| 255 | }; | ||
| 255 | 256 | ||
| 256 | /* These next three members must match the layout created by | 257 | struct n2_hash_req_ctx { |
| 257 | * crypto_init_shash_ops_async. This allows us to properly | ||
| 258 | * plumb requests we can't do in hardware down to the fallback | ||
| 259 | * operation, providing all of the data structures and layouts | ||
| 260 | * expected by those paths. | ||
| 261 | */ | ||
| 262 | struct ahash_request fallback_req; | ||
| 263 | struct shash_desc fallback_desc; | ||
| 264 | union { | 258 | union { |
| 265 | struct md5_state md5; | 259 | struct md5_state md5; |
| 266 | struct sha1_state sha1; | 260 | struct sha1_state sha1; |
| @@ -269,56 +263,62 @@ struct n2_hash_ctx { | |||
| 269 | 263 | ||
| 270 | unsigned char hash_key[64]; | 264 | unsigned char hash_key[64]; |
| 271 | unsigned char keyed_zero_hash[32]; | 265 | unsigned char keyed_zero_hash[32]; |
| 266 | |||
| 267 | struct ahash_request fallback_req; | ||
| 272 | }; | 268 | }; |
| 273 | 269 | ||
| 274 | static int n2_hash_async_init(struct ahash_request *req) | 270 | static int n2_hash_async_init(struct ahash_request *req) |
| 275 | { | 271 | { |
| 272 | struct n2_hash_req_ctx *rctx = ahash_request_ctx(req); | ||
| 276 | struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); | 273 | struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); |
| 277 | struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm); | 274 | struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm); |
| 278 | 275 | ||
| 279 | ctx->fallback_req.base.tfm = crypto_ahash_tfm(ctx->fallback); | 276 | ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm); |
| 280 | ctx->fallback_req.base.flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP; | 277 | rctx->fallback_req.base.flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP; |
| 281 | 278 | ||
| 282 | return crypto_ahash_init(&ctx->fallback_req); | 279 | return crypto_ahash_init(&rctx->fallback_req); |
| 283 | } | 280 | } |
| 284 | 281 | ||
| 285 | static int n2_hash_async_update(struct ahash_request *req) | 282 | static int n2_hash_async_update(struct ahash_request *req) |
| 286 | { | 283 | { |
| 284 | struct n2_hash_req_ctx *rctx = ahash_request_ctx(req); | ||
| 287 | struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); | 285 | struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); |
| 288 | struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm); | 286 | struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm); |
| 289 | 287 | ||
| 290 | ctx->fallback_req.base.tfm = crypto_ahash_tfm(ctx->fallback); | 288 | ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm); |
| 291 | ctx->fallback_req.base.flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP; | 289 | rctx->fallback_req.base.flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP; |
| 292 | ctx->fallback_req.nbytes = req->nbytes; | 290 | rctx->fallback_req.nbytes = req->nbytes; |
| 293 | ctx->fallback_req.src = req->src; | 291 | rctx->fallback_req.src = req->src; |
| 294 | 292 | ||
| 295 | return crypto_ahash_update(&ctx->fallback_req); | 293 | return crypto_ahash_update(&rctx->fallback_req); |
| 296 | } | 294 | } |
| 297 | 295 | ||
| 298 | static int n2_hash_async_final(struct ahash_request *req) | 296 | static int n2_hash_async_final(struct ahash_request *req) |
| 299 | { | 297 | { |
| 298 | struct n2_hash_req_ctx *rctx = ahash_request_ctx(req); | ||
| 300 | struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); | 299 | struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); |
| 301 | struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm); | 300 | struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm); |
| 302 | 301 | ||
| 303 | ctx->fallback_req.base.tfm = crypto_ahash_tfm(ctx->fallback); | 302 | ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm); |
| 304 | ctx->fallback_req.base.flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP; | 303 | rctx->fallback_req.base.flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP; |
| 305 | ctx->fallback_req.result = req->result; | 304 | rctx->fallback_req.result = req->result; |
| 306 | 305 | ||
| 307 | return crypto_ahash_final(&ctx->fallback_req); | 306 | return crypto_ahash_final(&rctx->fallback_req); |
| 308 | } | 307 | } |
| 309 | 308 | ||
| 310 | static int n2_hash_async_finup(struct ahash_request *req) | 309 | static int n2_hash_async_finup(struct ahash_request *req) |
| 311 | { | 310 | { |
| 311 | struct n2_hash_req_ctx *rctx = ahash_request_ctx(req); | ||
| 312 | struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); | 312 | struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); |
| 313 | struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm); | 313 | struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm); |
| 314 | 314 | ||
| 315 | ctx->fallback_req.base.tfm = crypto_ahash_tfm(ctx->fallback); | 315 | ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm); |
| 316 | ctx->fallback_req.base.flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP; | 316 | rctx->fallback_req.base.flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP; |
| 317 | ctx->fallback_req.nbytes = req->nbytes; | 317 | rctx->fallback_req.nbytes = req->nbytes; |
| 318 | ctx->fallback_req.src = req->src; | 318 | rctx->fallback_req.src = req->src; |
| 319 | ctx->fallback_req.result = req->result; | 319 | rctx->fallback_req.result = req->result; |
| 320 | 320 | ||
| 321 | return crypto_ahash_finup(&ctx->fallback_req); | 321 | return crypto_ahash_finup(&rctx->fallback_req); |
| 322 | } | 322 | } |
| 323 | 323 | ||
| 324 | static int n2_hash_cra_init(struct crypto_tfm *tfm) | 324 | static int n2_hash_cra_init(struct crypto_tfm *tfm) |
| @@ -338,7 +338,10 @@ static int n2_hash_cra_init(struct crypto_tfm *tfm) | |||
| 338 | goto out; | 338 | goto out; |
| 339 | } | 339 | } |
| 340 | 340 | ||
| 341 | ctx->fallback = fallback_tfm; | 341 | crypto_ahash_set_reqsize(ahash, (sizeof(struct n2_hash_req_ctx) + |
| 342 | crypto_ahash_reqsize(fallback_tfm))); | ||
| 343 | |||
| 344 | ctx->fallback_tfm = fallback_tfm; | ||
| 342 | return 0; | 345 | return 0; |
| 343 | 346 | ||
| 344 | out: | 347 | out: |
| @@ -350,7 +353,7 @@ static void n2_hash_cra_exit(struct crypto_tfm *tfm) | |||
| 350 | struct crypto_ahash *ahash = __crypto_ahash_cast(tfm); | 353 | struct crypto_ahash *ahash = __crypto_ahash_cast(tfm); |
| 351 | struct n2_hash_ctx *ctx = crypto_ahash_ctx(ahash); | 354 | struct n2_hash_ctx *ctx = crypto_ahash_ctx(ahash); |
| 352 | 355 | ||
| 353 | crypto_free_ahash(ctx->fallback); | 356 | crypto_free_ahash(ctx->fallback_tfm); |
| 354 | } | 357 | } |
| 355 | 358 | ||
| 356 | static unsigned long wait_for_tail(struct spu_queue *qp) | 359 | static unsigned long wait_for_tail(struct spu_queue *qp) |
| @@ -399,14 +402,16 @@ static int n2_hash_async_digest(struct ahash_request *req, | |||
| 399 | * exceed 2^16. | 402 | * exceed 2^16. |
| 400 | */ | 403 | */ |
| 401 | if (unlikely(req->nbytes > (1 << 16))) { | 404 | if (unlikely(req->nbytes > (1 << 16))) { |
| 402 | ctx->fallback_req.base.tfm = crypto_ahash_tfm(ctx->fallback); | 405 | struct n2_hash_req_ctx *rctx = ahash_request_ctx(req); |
| 403 | ctx->fallback_req.base.flags = | 406 | |
| 407 | ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm); | ||
| 408 | rctx->fallback_req.base.flags = | ||
| 404 | req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP; | 409 | req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP; |
| 405 | ctx->fallback_req.nbytes = req->nbytes; | 410 | rctx->fallback_req.nbytes = req->nbytes; |
| 406 | ctx->fallback_req.src = req->src; | 411 | rctx->fallback_req.src = req->src; |
| 407 | ctx->fallback_req.result = req->result; | 412 | rctx->fallback_req.result = req->result; |
| 408 | 413 | ||
| 409 | return crypto_ahash_digest(&ctx->fallback_req); | 414 | return crypto_ahash_digest(&rctx->fallback_req); |
| 410 | } | 415 | } |
| 411 | 416 | ||
| 412 | n2_base_ctx_init(&ctx->base); | 417 | n2_base_ctx_init(&ctx->base); |
| @@ -472,9 +477,8 @@ out: | |||
| 472 | 477 | ||
| 473 | static int n2_md5_async_digest(struct ahash_request *req) | 478 | static int n2_md5_async_digest(struct ahash_request *req) |
| 474 | { | 479 | { |
| 475 | struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); | 480 | struct n2_hash_req_ctx *rctx = ahash_request_ctx(req); |
| 476 | struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm); | 481 | struct md5_state *m = &rctx->u.md5; |
| 477 | struct md5_state *m = &ctx->u.md5; | ||
| 478 | 482 | ||
| 479 | if (unlikely(req->nbytes == 0)) { | 483 | if (unlikely(req->nbytes == 0)) { |
| 480 | static const char md5_zero[MD5_DIGEST_SIZE] = { | 484 | static const char md5_zero[MD5_DIGEST_SIZE] = { |
| @@ -497,9 +501,8 @@ static int n2_md5_async_digest(struct ahash_request *req) | |||
| 497 | 501 | ||
| 498 | static int n2_sha1_async_digest(struct ahash_request *req) | 502 | static int n2_sha1_async_digest(struct ahash_request *req) |
| 499 | { | 503 | { |
| 500 | struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); | 504 | struct n2_hash_req_ctx *rctx = ahash_request_ctx(req); |
| 501 | struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm); | 505 | struct sha1_state *s = &rctx->u.sha1; |
| 502 | struct sha1_state *s = &ctx->u.sha1; | ||
| 503 | 506 | ||
| 504 | if (unlikely(req->nbytes == 0)) { | 507 | if (unlikely(req->nbytes == 0)) { |
| 505 | static const char sha1_zero[SHA1_DIGEST_SIZE] = { | 508 | static const char sha1_zero[SHA1_DIGEST_SIZE] = { |
| @@ -524,9 +527,8 @@ static int n2_sha1_async_digest(struct ahash_request *req) | |||
| 524 | 527 | ||
| 525 | static int n2_sha256_async_digest(struct ahash_request *req) | 528 | static int n2_sha256_async_digest(struct ahash_request *req) |
| 526 | { | 529 | { |
| 527 | struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); | 530 | struct n2_hash_req_ctx *rctx = ahash_request_ctx(req); |
| 528 | struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm); | 531 | struct sha256_state *s = &rctx->u.sha256; |
| 529 | struct sha256_state *s = &ctx->u.sha256; | ||
| 530 | 532 | ||
| 531 | if (req->nbytes == 0) { | 533 | if (req->nbytes == 0) { |
| 532 | static const char sha256_zero[SHA256_DIGEST_SIZE] = { | 534 | static const char sha256_zero[SHA256_DIGEST_SIZE] = { |
| @@ -555,9 +557,8 @@ static int n2_sha256_async_digest(struct ahash_request *req) | |||
| 555 | 557 | ||
| 556 | static int n2_sha224_async_digest(struct ahash_request *req) | 558 | static int n2_sha224_async_digest(struct ahash_request *req) |
| 557 | { | 559 | { |
| 558 | struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); | 560 | struct n2_hash_req_ctx *rctx = ahash_request_ctx(req); |
| 559 | struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm); | 561 | struct sha256_state *s = &rctx->u.sha256; |
| 560 | struct sha256_state *s = &ctx->u.sha256; | ||
| 561 | 562 | ||
| 562 | if (req->nbytes == 0) { | 563 | if (req->nbytes == 0) { |
| 563 | static const char sha224_zero[SHA224_DIGEST_SIZE] = { | 564 | static const char sha224_zero[SHA224_DIGEST_SIZE] = { |
| @@ -1398,7 +1399,7 @@ static int find_devino_index(struct of_device *dev, struct spu_mdesc_info *ip, | |||
| 1398 | 1399 | ||
| 1399 | intr = ip->ino_table[i].intr; | 1400 | intr = ip->ino_table[i].intr; |
| 1400 | 1401 | ||
| 1401 | dev_intrs = of_get_property(dev->node, "interrupts", NULL); | 1402 | dev_intrs = of_get_property(dev->dev.of_node, "interrupts", NULL); |
| 1402 | if (!dev_intrs) | 1403 | if (!dev_intrs) |
| 1403 | return -ENODEV; | 1404 | return -ENODEV; |
| 1404 | 1405 | ||
| @@ -1449,7 +1450,7 @@ static int queue_cache_init(void) | |||
| 1449 | { | 1450 | { |
| 1450 | if (!queue_cache[HV_NCS_QTYPE_MAU - 1]) | 1451 | if (!queue_cache[HV_NCS_QTYPE_MAU - 1]) |
| 1451 | queue_cache[HV_NCS_QTYPE_MAU - 1] = | 1452 | queue_cache[HV_NCS_QTYPE_MAU - 1] = |
| 1452 | kmem_cache_create("cwq_queue", | 1453 | kmem_cache_create("mau_queue", |
| 1453 | (MAU_NUM_ENTRIES * | 1454 | (MAU_NUM_ENTRIES * |
| 1454 | MAU_ENTRY_SIZE), | 1455 | MAU_ENTRY_SIZE), |
| 1455 | MAU_ENTRY_SIZE, 0, NULL); | 1456 | MAU_ENTRY_SIZE, 0, NULL); |
| @@ -1574,7 +1575,7 @@ static int spu_mdesc_walk_arcs(struct mdesc_handle *mdesc, | |||
| 1574 | id = mdesc_get_property(mdesc, tgt, "id", NULL); | 1575 | id = mdesc_get_property(mdesc, tgt, "id", NULL); |
| 1575 | if (table[*id] != NULL) { | 1576 | if (table[*id] != NULL) { |
| 1576 | dev_err(&dev->dev, "%s: SPU cpu slot already set.\n", | 1577 | dev_err(&dev->dev, "%s: SPU cpu slot already set.\n", |
| 1577 | dev->node->full_name); | 1578 | dev->dev.of_node->full_name); |
| 1578 | return -EINVAL; | 1579 | return -EINVAL; |
| 1579 | } | 1580 | } |
| 1580 | cpu_set(*id, p->sharing); | 1581 | cpu_set(*id, p->sharing); |
| @@ -1595,7 +1596,7 @@ static int handle_exec_unit(struct spu_mdesc_info *ip, struct list_head *list, | |||
| 1595 | p = kzalloc(sizeof(struct spu_queue), GFP_KERNEL); | 1596 | p = kzalloc(sizeof(struct spu_queue), GFP_KERNEL); |
| 1596 | if (!p) { | 1597 | if (!p) { |
| 1597 | dev_err(&dev->dev, "%s: Could not allocate SPU queue.\n", | 1598 | dev_err(&dev->dev, "%s: Could not allocate SPU queue.\n", |
| 1598 | dev->node->full_name); | 1599 | dev->dev.of_node->full_name); |
| 1599 | return -ENOMEM; | 1600 | return -ENOMEM; |
| 1600 | } | 1601 | } |
| 1601 | 1602 | ||
| @@ -1684,7 +1685,7 @@ static int __devinit grab_mdesc_irq_props(struct mdesc_handle *mdesc, | |||
| 1684 | const unsigned int *reg; | 1685 | const unsigned int *reg; |
| 1685 | u64 node; | 1686 | u64 node; |
| 1686 | 1687 | ||
| 1687 | reg = of_get_property(dev->node, "reg", NULL); | 1688 | reg = of_get_property(dev->dev.of_node, "reg", NULL); |
| 1688 | if (!reg) | 1689 | if (!reg) |
| 1689 | return -ENODEV; | 1690 | return -ENODEV; |
| 1690 | 1691 | ||
| @@ -1836,7 +1837,7 @@ static int __devinit n2_crypto_probe(struct of_device *dev, | |||
| 1836 | 1837 | ||
| 1837 | n2_spu_driver_version(); | 1838 | n2_spu_driver_version(); |
| 1838 | 1839 | ||
| 1839 | full_name = dev->node->full_name; | 1840 | full_name = dev->dev.of_node->full_name; |
| 1840 | pr_info("Found N2CP at %s\n", full_name); | 1841 | pr_info("Found N2CP at %s\n", full_name); |
| 1841 | 1842 | ||
| 1842 | np = alloc_n2cp(); | 1843 | np = alloc_n2cp(); |
| @@ -1948,7 +1949,7 @@ static int __devinit n2_mau_probe(struct of_device *dev, | |||
| 1948 | 1949 | ||
| 1949 | n2_spu_driver_version(); | 1950 | n2_spu_driver_version(); |
| 1950 | 1951 | ||
| 1951 | full_name = dev->node->full_name; | 1952 | full_name = dev->dev.of_node->full_name; |
| 1952 | pr_info("Found NCP at %s\n", full_name); | 1953 | pr_info("Found NCP at %s\n", full_name); |
| 1953 | 1954 | ||
| 1954 | mp = alloc_ncp(); | 1955 | mp = alloc_ncp(); |
| @@ -2034,8 +2035,11 @@ static struct of_device_id n2_crypto_match[] = { | |||
| 2034 | MODULE_DEVICE_TABLE(of, n2_crypto_match); | 2035 | MODULE_DEVICE_TABLE(of, n2_crypto_match); |
| 2035 | 2036 | ||
| 2036 | static struct of_platform_driver n2_crypto_driver = { | 2037 | static struct of_platform_driver n2_crypto_driver = { |
| 2037 | .name = "n2cp", | 2038 | .driver = { |
| 2038 | .match_table = n2_crypto_match, | 2039 | .name = "n2cp", |
| 2040 | .owner = THIS_MODULE, | ||
| 2041 | .of_match_table = n2_crypto_match, | ||
| 2042 | }, | ||
| 2039 | .probe = n2_crypto_probe, | 2043 | .probe = n2_crypto_probe, |
| 2040 | .remove = __devexit_p(n2_crypto_remove), | 2044 | .remove = __devexit_p(n2_crypto_remove), |
| 2041 | }; | 2045 | }; |
| @@ -2055,8 +2059,11 @@ static struct of_device_id n2_mau_match[] = { | |||
| 2055 | MODULE_DEVICE_TABLE(of, n2_mau_match); | 2059 | MODULE_DEVICE_TABLE(of, n2_mau_match); |
| 2056 | 2060 | ||
| 2057 | static struct of_platform_driver n2_mau_driver = { | 2061 | static struct of_platform_driver n2_mau_driver = { |
| 2058 | .name = "ncp", | 2062 | .driver = { |
| 2059 | .match_table = n2_mau_match, | 2063 | .name = "ncp", |
| 2064 | .owner = THIS_MODULE, | ||
| 2065 | .of_match_table = n2_mau_match, | ||
| 2066 | }, | ||
| 2060 | .probe = n2_mau_probe, | 2067 | .probe = n2_mau_probe, |
| 2061 | .remove = __devexit_p(n2_mau_remove), | 2068 | .remove = __devexit_p(n2_mau_remove), |
| 2062 | }; | 2069 | }; |
diff --git a/drivers/dma/mpc512x_dma.c b/drivers/dma/mpc512x_dma.c index 201e6e19c344..14a8c0f1698e 100644 --- a/drivers/dma/mpc512x_dma.c +++ b/drivers/dma/mpc512x_dma.c | |||
| @@ -630,7 +630,7 @@ mpc_dma_prep_memcpy(struct dma_chan *chan, dma_addr_t dst, dma_addr_t src, | |||
| 630 | static int __devinit mpc_dma_probe(struct of_device *op, | 630 | static int __devinit mpc_dma_probe(struct of_device *op, |
| 631 | const struct of_device_id *match) | 631 | const struct of_device_id *match) |
| 632 | { | 632 | { |
| 633 | struct device_node *dn = op->node; | 633 | struct device_node *dn = op->dev.of_node; |
| 634 | struct device *dev = &op->dev; | 634 | struct device *dev = &op->dev; |
| 635 | struct dma_device *dma; | 635 | struct dma_device *dma; |
| 636 | struct mpc_dma *mdma; | 636 | struct mpc_dma *mdma; |
| @@ -771,12 +771,12 @@ static struct of_device_id mpc_dma_match[] = { | |||
| 771 | }; | 771 | }; |
| 772 | 772 | ||
| 773 | static struct of_platform_driver mpc_dma_driver = { | 773 | static struct of_platform_driver mpc_dma_driver = { |
| 774 | .match_table = mpc_dma_match, | ||
| 775 | .probe = mpc_dma_probe, | 774 | .probe = mpc_dma_probe, |
| 776 | .remove = __devexit_p(mpc_dma_remove), | 775 | .remove = __devexit_p(mpc_dma_remove), |
| 777 | .driver = { | 776 | .driver = { |
| 778 | .name = DRV_NAME, | 777 | .name = DRV_NAME, |
| 779 | .owner = THIS_MODULE, | 778 | .owner = THIS_MODULE, |
| 779 | .of_match_table = mpc_dma_match, | ||
| 780 | }, | 780 | }, |
| 781 | }; | 781 | }; |
| 782 | 782 | ||
diff --git a/drivers/dma/ppc4xx/adma.c b/drivers/dma/ppc4xx/adma.c index fa98abe4686f..5a22ca6927e5 100644 --- a/drivers/dma/ppc4xx/adma.c +++ b/drivers/dma/ppc4xx/adma.c | |||
| @@ -4394,7 +4394,7 @@ static void ppc440spe_adma_release_irqs(struct ppc440spe_adma_device *adev, | |||
| 4394 | static int __devinit ppc440spe_adma_probe(struct of_device *ofdev, | 4394 | static int __devinit ppc440spe_adma_probe(struct of_device *ofdev, |
| 4395 | const struct of_device_id *match) | 4395 | const struct of_device_id *match) |
| 4396 | { | 4396 | { |
| 4397 | struct device_node *np = ofdev->node; | 4397 | struct device_node *np = ofdev->dev.of_node; |
| 4398 | struct resource res; | 4398 | struct resource res; |
| 4399 | struct ppc440spe_adma_device *adev; | 4399 | struct ppc440spe_adma_device *adev; |
| 4400 | struct ppc440spe_adma_chan *chan; | 4400 | struct ppc440spe_adma_chan *chan; |
| @@ -4626,7 +4626,7 @@ out: | |||
| 4626 | static int __devexit ppc440spe_adma_remove(struct of_device *ofdev) | 4626 | static int __devexit ppc440spe_adma_remove(struct of_device *ofdev) |
| 4627 | { | 4627 | { |
| 4628 | struct ppc440spe_adma_device *adev = dev_get_drvdata(&ofdev->dev); | 4628 | struct ppc440spe_adma_device *adev = dev_get_drvdata(&ofdev->dev); |
| 4629 | struct device_node *np = ofdev->node; | 4629 | struct device_node *np = ofdev->dev.of_node; |
| 4630 | struct resource res; | 4630 | struct resource res; |
| 4631 | struct dma_chan *chan, *_chan; | 4631 | struct dma_chan *chan, *_chan; |
| 4632 | struct ppc_dma_chan_ref *ref, *_ref; | 4632 | struct ppc_dma_chan_ref *ref, *_ref; |
diff --git a/drivers/edac/mpc85xx_edac.c b/drivers/edac/mpc85xx_edac.c index 6c1886b497ff..52ca09bf4726 100644 --- a/drivers/edac/mpc85xx_edac.c +++ b/drivers/edac/mpc85xx_edac.c | |||
| @@ -229,7 +229,7 @@ static int __devinit mpc85xx_pci_err_probe(struct of_device *op, | |||
| 229 | 229 | ||
| 230 | pdata->edac_idx = edac_pci_idx++; | 230 | pdata->edac_idx = edac_pci_idx++; |
| 231 | 231 | ||
| 232 | res = of_address_to_resource(op->node, 0, &r); | 232 | res = of_address_to_resource(op->dev.of_node, 0, &r); |
| 233 | if (res) { | 233 | if (res) { |
| 234 | printk(KERN_ERR "%s: Unable to get resource for " | 234 | printk(KERN_ERR "%s: Unable to get resource for " |
| 235 | "PCI err regs\n", __func__); | 235 | "PCI err regs\n", __func__); |
| @@ -274,7 +274,7 @@ static int __devinit mpc85xx_pci_err_probe(struct of_device *op, | |||
| 274 | } | 274 | } |
| 275 | 275 | ||
| 276 | if (edac_op_state == EDAC_OPSTATE_INT) { | 276 | if (edac_op_state == EDAC_OPSTATE_INT) { |
| 277 | pdata->irq = irq_of_parse_and_map(op->node, 0); | 277 | pdata->irq = irq_of_parse_and_map(op->dev.of_node, 0); |
| 278 | res = devm_request_irq(&op->dev, pdata->irq, | 278 | res = devm_request_irq(&op->dev, pdata->irq, |
| 279 | mpc85xx_pci_isr, IRQF_DISABLED, | 279 | mpc85xx_pci_isr, IRQF_DISABLED, |
| 280 | "[EDAC] PCI err", pci); | 280 | "[EDAC] PCI err", pci); |
| @@ -529,7 +529,7 @@ static int __devinit mpc85xx_l2_err_probe(struct of_device *op, | |||
| 529 | edac_dev->ctl_name = pdata->name; | 529 | edac_dev->ctl_name = pdata->name; |
| 530 | edac_dev->dev_name = pdata->name; | 530 | edac_dev->dev_name = pdata->name; |
| 531 | 531 | ||
| 532 | res = of_address_to_resource(op->node, 0, &r); | 532 | res = of_address_to_resource(op->dev.of_node, 0, &r); |
| 533 | if (res) { | 533 | if (res) { |
| 534 | printk(KERN_ERR "%s: Unable to get resource for " | 534 | printk(KERN_ERR "%s: Unable to get resource for " |
| 535 | "L2 err regs\n", __func__); | 535 | "L2 err regs\n", __func__); |
| @@ -576,7 +576,7 @@ static int __devinit mpc85xx_l2_err_probe(struct of_device *op, | |||
| 576 | } | 576 | } |
| 577 | 577 | ||
| 578 | if (edac_op_state == EDAC_OPSTATE_INT) { | 578 | if (edac_op_state == EDAC_OPSTATE_INT) { |
| 579 | pdata->irq = irq_of_parse_and_map(op->node, 0); | 579 | pdata->irq = irq_of_parse_and_map(op->dev.of_node, 0); |
| 580 | res = devm_request_irq(&op->dev, pdata->irq, | 580 | res = devm_request_irq(&op->dev, pdata->irq, |
| 581 | mpc85xx_l2_isr, IRQF_DISABLED, | 581 | mpc85xx_l2_isr, IRQF_DISABLED, |
| 582 | "[EDAC] L2 err", edac_dev); | 582 | "[EDAC] L2 err", edac_dev); |
| @@ -978,7 +978,7 @@ static int __devinit mpc85xx_mc_err_probe(struct of_device *op, | |||
| 978 | mci->ctl_name = pdata->name; | 978 | mci->ctl_name = pdata->name; |
| 979 | mci->dev_name = pdata->name; | 979 | mci->dev_name = pdata->name; |
| 980 | 980 | ||
| 981 | res = of_address_to_resource(op->node, 0, &r); | 981 | res = of_address_to_resource(op->dev.of_node, 0, &r); |
| 982 | if (res) { | 982 | if (res) { |
| 983 | printk(KERN_ERR "%s: Unable to get resource for MC err regs\n", | 983 | printk(KERN_ERR "%s: Unable to get resource for MC err regs\n", |
| 984 | __func__); | 984 | __func__); |
| @@ -1052,7 +1052,7 @@ static int __devinit mpc85xx_mc_err_probe(struct of_device *op, | |||
| 1052 | out_be32(pdata->mc_vbase + MPC85XX_MC_ERR_SBE, 0x10000); | 1052 | out_be32(pdata->mc_vbase + MPC85XX_MC_ERR_SBE, 0x10000); |
| 1053 | 1053 | ||
| 1054 | /* register interrupts */ | 1054 | /* register interrupts */ |
| 1055 | pdata->irq = irq_of_parse_and_map(op->node, 0); | 1055 | pdata->irq = irq_of_parse_and_map(op->dev.of_node, 0); |
| 1056 | res = devm_request_irq(&op->dev, pdata->irq, | 1056 | res = devm_request_irq(&op->dev, pdata->irq, |
| 1057 | mpc85xx_mc_isr, | 1057 | mpc85xx_mc_isr, |
| 1058 | IRQF_DISABLED | IRQF_SHARED, | 1058 | IRQF_DISABLED | IRQF_SHARED, |
diff --git a/drivers/edac/ppc4xx_edac.c b/drivers/edac/ppc4xx_edac.c index 9d6f6783328c..e78839e89a06 100644 --- a/drivers/edac/ppc4xx_edac.c +++ b/drivers/edac/ppc4xx_edac.c | |||
| @@ -1022,7 +1022,7 @@ ppc4xx_edac_mc_init(struct mem_ctl_info *mci, | |||
| 1022 | int status = 0; | 1022 | int status = 0; |
| 1023 | const u32 memcheck = (mcopt1 & SDRAM_MCOPT1_MCHK_MASK); | 1023 | const u32 memcheck = (mcopt1 & SDRAM_MCOPT1_MCHK_MASK); |
| 1024 | struct ppc4xx_edac_pdata *pdata = NULL; | 1024 | struct ppc4xx_edac_pdata *pdata = NULL; |
| 1025 | const struct device_node *np = op->node; | 1025 | const struct device_node *np = op->dev.of_node; |
| 1026 | 1026 | ||
| 1027 | if (match == NULL) | 1027 | if (match == NULL) |
| 1028 | return -EINVAL; | 1028 | return -EINVAL; |
| @@ -1113,7 +1113,7 @@ ppc4xx_edac_register_irq(struct of_device *op, struct mem_ctl_info *mci) | |||
| 1113 | int status = 0; | 1113 | int status = 0; |
| 1114 | int ded_irq, sec_irq; | 1114 | int ded_irq, sec_irq; |
| 1115 | struct ppc4xx_edac_pdata *pdata = mci->pvt_info; | 1115 | struct ppc4xx_edac_pdata *pdata = mci->pvt_info; |
| 1116 | struct device_node *np = op->node; | 1116 | struct device_node *np = op->dev.of_node; |
| 1117 | 1117 | ||
| 1118 | ded_irq = irq_of_parse_and_map(np, INTMAP_ECCDED_INDEX); | 1118 | ded_irq = irq_of_parse_and_map(np, INTMAP_ECCDED_INDEX); |
| 1119 | sec_irq = irq_of_parse_and_map(np, INTMAP_ECCSEC_INDEX); | 1119 | sec_irq = irq_of_parse_and_map(np, INTMAP_ECCSEC_INDEX); |
| @@ -1243,7 +1243,7 @@ ppc4xx_edac_probe(struct of_device *op, const struct of_device_id *match) | |||
| 1243 | int status = 0; | 1243 | int status = 0; |
| 1244 | u32 mcopt1, memcheck; | 1244 | u32 mcopt1, memcheck; |
| 1245 | dcr_host_t dcr_host; | 1245 | dcr_host_t dcr_host; |
| 1246 | const struct device_node *np = op->node; | 1246 | const struct device_node *np = op->dev.of_node; |
| 1247 | struct mem_ctl_info *mci = NULL; | 1247 | struct mem_ctl_info *mci = NULL; |
| 1248 | static int ppc4xx_edac_instance; | 1248 | static int ppc4xx_edac_instance; |
| 1249 | 1249 | ||
diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c index 764401951041..9b2a54117c91 100644 --- a/drivers/gpu/drm/drm_crtc_helper.c +++ b/drivers/gpu/drm/drm_crtc_helper.c | |||
| @@ -860,19 +860,24 @@ static void output_poll_execute(struct slow_work *work) | |||
| 860 | } | 860 | } |
| 861 | } | 861 | } |
| 862 | 862 | ||
| 863 | void drm_kms_helper_poll_init(struct drm_device *dev) | 863 | void drm_kms_helper_poll_disable(struct drm_device *dev) |
| 864 | { | ||
| 865 | if (!dev->mode_config.poll_enabled) | ||
| 866 | return; | ||
| 867 | delayed_slow_work_cancel(&dev->mode_config.output_poll_slow_work); | ||
| 868 | } | ||
| 869 | EXPORT_SYMBOL(drm_kms_helper_poll_disable); | ||
| 870 | |||
| 871 | void drm_kms_helper_poll_enable(struct drm_device *dev) | ||
| 864 | { | 872 | { |
| 865 | struct drm_connector *connector; | ||
| 866 | bool poll = false; | 873 | bool poll = false; |
| 874 | struct drm_connector *connector; | ||
| 867 | int ret; | 875 | int ret; |
| 868 | 876 | ||
| 869 | list_for_each_entry(connector, &dev->mode_config.connector_list, head) { | 877 | list_for_each_entry(connector, &dev->mode_config.connector_list, head) { |
| 870 | if (connector->polled) | 878 | if (connector->polled) |
| 871 | poll = true; | 879 | poll = true; |
| 872 | } | 880 | } |
| 873 | slow_work_register_user(THIS_MODULE); | ||
| 874 | delayed_slow_work_init(&dev->mode_config.output_poll_slow_work, | ||
| 875 | &output_poll_ops); | ||
| 876 | 881 | ||
| 877 | if (poll) { | 882 | if (poll) { |
| 878 | ret = delayed_slow_work_enqueue(&dev->mode_config.output_poll_slow_work, DRM_OUTPUT_POLL_PERIOD); | 883 | ret = delayed_slow_work_enqueue(&dev->mode_config.output_poll_slow_work, DRM_OUTPUT_POLL_PERIOD); |
| @@ -880,11 +885,22 @@ void drm_kms_helper_poll_init(struct drm_device *dev) | |||
| 880 | DRM_ERROR("delayed enqueue failed %d\n", ret); | 885 | DRM_ERROR("delayed enqueue failed %d\n", ret); |
| 881 | } | 886 | } |
| 882 | } | 887 | } |
| 888 | EXPORT_SYMBOL(drm_kms_helper_poll_enable); | ||
| 889 | |||
| 890 | void drm_kms_helper_poll_init(struct drm_device *dev) | ||
| 891 | { | ||
| 892 | slow_work_register_user(THIS_MODULE); | ||
| 893 | delayed_slow_work_init(&dev->mode_config.output_poll_slow_work, | ||
| 894 | &output_poll_ops); | ||
| 895 | dev->mode_config.poll_enabled = true; | ||
| 896 | |||
| 897 | drm_kms_helper_poll_enable(dev); | ||
| 898 | } | ||
| 883 | EXPORT_SYMBOL(drm_kms_helper_poll_init); | 899 | EXPORT_SYMBOL(drm_kms_helper_poll_init); |
| 884 | 900 | ||
| 885 | void drm_kms_helper_poll_fini(struct drm_device *dev) | 901 | void drm_kms_helper_poll_fini(struct drm_device *dev) |
| 886 | { | 902 | { |
| 887 | delayed_slow_work_cancel(&dev->mode_config.output_poll_slow_work); | 903 | drm_kms_helper_poll_disable(dev); |
| 888 | slow_work_unregister_user(THIS_MODULE); | 904 | slow_work_unregister_user(THIS_MODULE); |
| 889 | } | 905 | } |
| 890 | EXPORT_SYMBOL(drm_kms_helper_poll_fini); | 906 | EXPORT_SYMBOL(drm_kms_helper_poll_fini); |
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c index 84ce95602f00..b2ebf02e4f8a 100644 --- a/drivers/gpu/drm/i915/i915_dma.c +++ b/drivers/gpu/drm/i915/i915_dma.c | |||
| @@ -1320,12 +1320,14 @@ static void i915_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_ | |||
| 1320 | struct drm_device *dev = pci_get_drvdata(pdev); | 1320 | struct drm_device *dev = pci_get_drvdata(pdev); |
| 1321 | pm_message_t pmm = { .event = PM_EVENT_SUSPEND }; | 1321 | pm_message_t pmm = { .event = PM_EVENT_SUSPEND }; |
| 1322 | if (state == VGA_SWITCHEROO_ON) { | 1322 | if (state == VGA_SWITCHEROO_ON) { |
| 1323 | printk(KERN_INFO "i915: switched off\n"); | 1323 | printk(KERN_INFO "i915: switched on\n"); |
| 1324 | /* i915 resume handler doesn't set to D0 */ | 1324 | /* i915 resume handler doesn't set to D0 */ |
| 1325 | pci_set_power_state(dev->pdev, PCI_D0); | 1325 | pci_set_power_state(dev->pdev, PCI_D0); |
| 1326 | i915_resume(dev); | 1326 | i915_resume(dev); |
| 1327 | drm_kms_helper_poll_enable(dev); | ||
| 1327 | } else { | 1328 | } else { |
| 1328 | printk(KERN_ERR "i915: switched off\n"); | 1329 | printk(KERN_ERR "i915: switched off\n"); |
| 1330 | drm_kms_helper_poll_disable(dev); | ||
| 1329 | i915_suspend(dev, pmm); | 1331 | i915_suspend(dev, pmm); |
| 1330 | } | 1332 | } |
| 1331 | } | 1333 | } |
diff --git a/drivers/gpu/drm/nouveau/nouveau_acpi.c b/drivers/gpu/drm/nouveau/nouveau_acpi.c index e13f6af0037a..d4bcca8a5133 100644 --- a/drivers/gpu/drm/nouveau/nouveau_acpi.c +++ b/drivers/gpu/drm/nouveau/nouveau_acpi.c | |||
| @@ -34,7 +34,7 @@ | |||
| 34 | static struct nouveau_dsm_priv { | 34 | static struct nouveau_dsm_priv { |
| 35 | bool dsm_detected; | 35 | bool dsm_detected; |
| 36 | acpi_handle dhandle; | 36 | acpi_handle dhandle; |
| 37 | acpi_handle dsm_handle; | 37 | acpi_handle rom_handle; |
| 38 | } nouveau_dsm_priv; | 38 | } nouveau_dsm_priv; |
| 39 | 39 | ||
| 40 | static const char nouveau_dsm_muid[] = { | 40 | static const char nouveau_dsm_muid[] = { |
| @@ -107,9 +107,9 @@ static int nouveau_dsm_set_discrete_state(acpi_handle handle, enum vga_switchero | |||
| 107 | static int nouveau_dsm_switchto(enum vga_switcheroo_client_id id) | 107 | static int nouveau_dsm_switchto(enum vga_switcheroo_client_id id) |
| 108 | { | 108 | { |
| 109 | if (id == VGA_SWITCHEROO_IGD) | 109 | if (id == VGA_SWITCHEROO_IGD) |
| 110 | return nouveau_dsm_switch_mux(nouveau_dsm_priv.dsm_handle, NOUVEAU_DSM_LED_STAMINA); | 110 | return nouveau_dsm_switch_mux(nouveau_dsm_priv.dhandle, NOUVEAU_DSM_LED_STAMINA); |
| 111 | else | 111 | else |
| 112 | return nouveau_dsm_switch_mux(nouveau_dsm_priv.dsm_handle, NOUVEAU_DSM_LED_SPEED); | 112 | return nouveau_dsm_switch_mux(nouveau_dsm_priv.dhandle, NOUVEAU_DSM_LED_SPEED); |
| 113 | } | 113 | } |
| 114 | 114 | ||
| 115 | static int nouveau_dsm_power_state(enum vga_switcheroo_client_id id, | 115 | static int nouveau_dsm_power_state(enum vga_switcheroo_client_id id, |
| @@ -118,7 +118,7 @@ static int nouveau_dsm_power_state(enum vga_switcheroo_client_id id, | |||
| 118 | if (id == VGA_SWITCHEROO_IGD) | 118 | if (id == VGA_SWITCHEROO_IGD) |
| 119 | return 0; | 119 | return 0; |
| 120 | 120 | ||
| 121 | return nouveau_dsm_set_discrete_state(nouveau_dsm_priv.dsm_handle, state); | 121 | return nouveau_dsm_set_discrete_state(nouveau_dsm_priv.dhandle, state); |
| 122 | } | 122 | } |
| 123 | 123 | ||
| 124 | static int nouveau_dsm_init(void) | 124 | static int nouveau_dsm_init(void) |
| @@ -151,18 +151,18 @@ static bool nouveau_dsm_pci_probe(struct pci_dev *pdev) | |||
| 151 | dhandle = DEVICE_ACPI_HANDLE(&pdev->dev); | 151 | dhandle = DEVICE_ACPI_HANDLE(&pdev->dev); |
| 152 | if (!dhandle) | 152 | if (!dhandle) |
| 153 | return false; | 153 | return false; |
| 154 | |||
| 154 | status = acpi_get_handle(dhandle, "_DSM", &nvidia_handle); | 155 | status = acpi_get_handle(dhandle, "_DSM", &nvidia_handle); |
| 155 | if (ACPI_FAILURE(status)) { | 156 | if (ACPI_FAILURE(status)) { |
| 156 | return false; | 157 | return false; |
| 157 | } | 158 | } |
| 158 | 159 | ||
| 159 | ret= nouveau_dsm(nvidia_handle, NOUVEAU_DSM_SUPPORTED, | 160 | ret = nouveau_dsm(dhandle, NOUVEAU_DSM_SUPPORTED, |
| 160 | NOUVEAU_DSM_SUPPORTED_FUNCTIONS, &result); | 161 | NOUVEAU_DSM_SUPPORTED_FUNCTIONS, &result); |
| 161 | if (ret < 0) | 162 | if (ret < 0) |
| 162 | return false; | 163 | return false; |
| 163 | 164 | ||
| 164 | nouveau_dsm_priv.dhandle = dhandle; | 165 | nouveau_dsm_priv.dhandle = dhandle; |
| 165 | nouveau_dsm_priv.dsm_handle = nvidia_handle; | ||
| 166 | return true; | 166 | return true; |
| 167 | } | 167 | } |
| 168 | 168 | ||
| @@ -173,6 +173,7 @@ static bool nouveau_dsm_detect(void) | |||
| 173 | struct pci_dev *pdev = NULL; | 173 | struct pci_dev *pdev = NULL; |
| 174 | int has_dsm = 0; | 174 | int has_dsm = 0; |
| 175 | int vga_count = 0; | 175 | int vga_count = 0; |
| 176 | |||
| 176 | while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, pdev)) != NULL) { | 177 | while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, pdev)) != NULL) { |
| 177 | vga_count++; | 178 | vga_count++; |
| 178 | 179 | ||
| @@ -180,7 +181,7 @@ static bool nouveau_dsm_detect(void) | |||
| 180 | } | 181 | } |
| 181 | 182 | ||
| 182 | if (vga_count == 2 && has_dsm) { | 183 | if (vga_count == 2 && has_dsm) { |
| 183 | acpi_get_name(nouveau_dsm_priv.dsm_handle, ACPI_FULL_PATHNAME, &buffer); | 184 | acpi_get_name(nouveau_dsm_priv.dhandle, ACPI_FULL_PATHNAME, &buffer); |
| 184 | printk(KERN_INFO "VGA switcheroo: detected DSM switching method %s handle\n", | 185 | printk(KERN_INFO "VGA switcheroo: detected DSM switching method %s handle\n", |
| 185 | acpi_method_name); | 186 | acpi_method_name); |
| 186 | nouveau_dsm_priv.dsm_detected = true; | 187 | nouveau_dsm_priv.dsm_detected = true; |
| @@ -204,3 +205,57 @@ void nouveau_unregister_dsm_handler(void) | |||
| 204 | { | 205 | { |
| 205 | vga_switcheroo_unregister_handler(); | 206 | vga_switcheroo_unregister_handler(); |
| 206 | } | 207 | } |
| 208 | |||
| 209 | /* retrieve the ROM in 4k blocks */ | ||
| 210 | static int nouveau_rom_call(acpi_handle rom_handle, uint8_t *bios, | ||
| 211 | int offset, int len) | ||
| 212 | { | ||
| 213 | acpi_status status; | ||
| 214 | union acpi_object rom_arg_elements[2], *obj; | ||
| 215 | struct acpi_object_list rom_arg; | ||
| 216 | struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL}; | ||
| 217 | |||
| 218 | rom_arg.count = 2; | ||
| 219 | rom_arg.pointer = &rom_arg_elements[0]; | ||
| 220 | |||
| 221 | rom_arg_elements[0].type = ACPI_TYPE_INTEGER; | ||
| 222 | rom_arg_elements[0].integer.value = offset; | ||
| 223 | |||
| 224 | rom_arg_elements[1].type = ACPI_TYPE_INTEGER; | ||
| 225 | rom_arg_elements[1].integer.value = len; | ||
| 226 | |||
| 227 | status = acpi_evaluate_object(rom_handle, NULL, &rom_arg, &buffer); | ||
| 228 | if (ACPI_FAILURE(status)) { | ||
| 229 | printk(KERN_INFO "failed to evaluate ROM got %s\n", acpi_format_exception(status)); | ||
| 230 | return -ENODEV; | ||
| 231 | } | ||
| 232 | obj = (union acpi_object *)buffer.pointer; | ||
| 233 | memcpy(bios+offset, obj->buffer.pointer, len); | ||
| 234 | kfree(buffer.pointer); | ||
| 235 | return len; | ||
| 236 | } | ||
| 237 | |||
| 238 | bool nouveau_acpi_rom_supported(struct pci_dev *pdev) | ||
| 239 | { | ||
| 240 | acpi_status status; | ||
| 241 | acpi_handle dhandle, rom_handle; | ||
| 242 | |||
| 243 | if (!nouveau_dsm_priv.dsm_detected) | ||
| 244 | return false; | ||
| 245 | |||
| 246 | dhandle = DEVICE_ACPI_HANDLE(&pdev->dev); | ||
| 247 | if (!dhandle) | ||
| 248 | return false; | ||
| 249 | |||
| 250 | status = acpi_get_handle(dhandle, "_ROM", &rom_handle); | ||
| 251 | if (ACPI_FAILURE(status)) | ||
| 252 | return false; | ||
| 253 | |||
| 254 | nouveau_dsm_priv.rom_handle = rom_handle; | ||
| 255 | return true; | ||
| 256 | } | ||
| 257 | |||
| 258 | int nouveau_acpi_get_bios_chunk(uint8_t *bios, int offset, int len) | ||
| 259 | { | ||
| 260 | return nouveau_rom_call(nouveau_dsm_priv.rom_handle, bios, offset, len); | ||
| 261 | } | ||
diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c index e7e69ccce5c9..9ba2deaadcc7 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bios.c +++ b/drivers/gpu/drm/nouveau/nouveau_bios.c | |||
| @@ -178,6 +178,25 @@ out: | |||
| 178 | pci_disable_rom(dev->pdev); | 178 | pci_disable_rom(dev->pdev); |
| 179 | } | 179 | } |
| 180 | 180 | ||
| 181 | static void load_vbios_acpi(struct drm_device *dev, uint8_t *data) | ||
| 182 | { | ||
| 183 | int i; | ||
| 184 | int ret; | ||
| 185 | int size = 64 * 1024; | ||
| 186 | |||
| 187 | if (!nouveau_acpi_rom_supported(dev->pdev)) | ||
| 188 | return; | ||
| 189 | |||
| 190 | for (i = 0; i < (size / ROM_BIOS_PAGE); i++) { | ||
| 191 | ret = nouveau_acpi_get_bios_chunk(data, | ||
| 192 | (i * ROM_BIOS_PAGE), | ||
| 193 | ROM_BIOS_PAGE); | ||
| 194 | if (ret <= 0) | ||
| 195 | break; | ||
| 196 | } | ||
| 197 | return; | ||
| 198 | } | ||
| 199 | |||
| 181 | struct methods { | 200 | struct methods { |
| 182 | const char desc[8]; | 201 | const char desc[8]; |
| 183 | void (*loadbios)(struct drm_device *, uint8_t *); | 202 | void (*loadbios)(struct drm_device *, uint8_t *); |
| @@ -191,6 +210,7 @@ static struct methods nv04_methods[] = { | |||
| 191 | }; | 210 | }; |
| 192 | 211 | ||
| 193 | static struct methods nv50_methods[] = { | 212 | static struct methods nv50_methods[] = { |
| 213 | { "ACPI", load_vbios_acpi, true }, | ||
| 194 | { "PRAMIN", load_vbios_pramin, true }, | 214 | { "PRAMIN", load_vbios_pramin, true }, |
| 195 | { "PROM", load_vbios_prom, false }, | 215 | { "PROM", load_vbios_prom, false }, |
| 196 | { "PCIROM", load_vbios_pci, true }, | 216 | { "PCIROM", load_vbios_pci, true }, |
| @@ -2807,7 +2827,10 @@ init_gpio(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) | |||
| 2807 | 2827 | ||
| 2808 | BIOSLOG(bios, "0x%04X: Entry: 0x%08X\n", offset, gpio->entry); | 2828 | BIOSLOG(bios, "0x%04X: Entry: 0x%08X\n", offset, gpio->entry); |
| 2809 | 2829 | ||
| 2810 | nv50_gpio_set(bios->dev, gpio->tag, gpio->state_default); | 2830 | BIOSLOG(bios, "0x%04X: set gpio 0x%02x, state %d\n", |
| 2831 | offset, gpio->tag, gpio->state_default); | ||
| 2832 | if (bios->execute) | ||
| 2833 | nv50_gpio_set(bios->dev, gpio->tag, gpio->state_default); | ||
| 2811 | 2834 | ||
| 2812 | /* The NVIDIA binary driver doesn't appear to actually do | 2835 | /* The NVIDIA binary driver doesn't appear to actually do |
| 2813 | * any of this, my VBIOS does however. | 2836 | * any of this, my VBIOS does however. |
| @@ -5533,12 +5556,6 @@ parse_dcb20_entry(struct drm_device *dev, struct dcb_table *dcb, | |||
| 5533 | entry->bus = (conn >> 16) & 0xf; | 5556 | entry->bus = (conn >> 16) & 0xf; |
| 5534 | entry->location = (conn >> 20) & 0x3; | 5557 | entry->location = (conn >> 20) & 0x3; |
| 5535 | entry->or = (conn >> 24) & 0xf; | 5558 | entry->or = (conn >> 24) & 0xf; |
| 5536 | /* | ||
| 5537 | * Normal entries consist of a single bit, but dual link has the | ||
| 5538 | * next most significant bit set too | ||
| 5539 | */ | ||
| 5540 | entry->duallink_possible = | ||
| 5541 | ((1 << (ffs(entry->or) - 1)) * 3 == entry->or); | ||
| 5542 | 5559 | ||
| 5543 | switch (entry->type) { | 5560 | switch (entry->type) { |
| 5544 | case OUTPUT_ANALOG: | 5561 | case OUTPUT_ANALOG: |
| @@ -5622,6 +5639,16 @@ parse_dcb20_entry(struct drm_device *dev, struct dcb_table *dcb, | |||
| 5622 | break; | 5639 | break; |
| 5623 | } | 5640 | } |
| 5624 | 5641 | ||
| 5642 | if (dcb->version < 0x40) { | ||
| 5643 | /* Normal entries consist of a single bit, but dual link has | ||
| 5644 | * the next most significant bit set too | ||
| 5645 | */ | ||
| 5646 | entry->duallink_possible = | ||
| 5647 | ((1 << (ffs(entry->or) - 1)) * 3 == entry->or); | ||
| 5648 | } else { | ||
| 5649 | entry->duallink_possible = (entry->sorconf.link == 3); | ||
| 5650 | } | ||
| 5651 | |||
| 5625 | /* unsure what DCB version introduces this, 3.0? */ | 5652 | /* unsure what DCB version introduces this, 3.0? */ |
| 5626 | if (conf & 0x100000) | 5653 | if (conf & 0x100000) |
| 5627 | entry->i2c_upper_default = true; | 5654 | entry->i2c_upper_default = true; |
| @@ -6205,6 +6232,30 @@ nouveau_bios_i2c_devices_takedown(struct drm_device *dev) | |||
| 6205 | nouveau_i2c_fini(dev, entry); | 6232 | nouveau_i2c_fini(dev, entry); |
| 6206 | } | 6233 | } |
| 6207 | 6234 | ||
| 6235 | static bool | ||
| 6236 | nouveau_bios_posted(struct drm_device *dev) | ||
| 6237 | { | ||
| 6238 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
| 6239 | bool was_locked; | ||
| 6240 | unsigned htotal; | ||
| 6241 | |||
| 6242 | if (dev_priv->chipset >= NV_50) { | ||
| 6243 | if (NVReadVgaCrtc(dev, 0, 0x00) == 0 && | ||
| 6244 | NVReadVgaCrtc(dev, 0, 0x1a) == 0) | ||
| 6245 | return false; | ||
| 6246 | return true; | ||
| 6247 | } | ||
| 6248 | |||
| 6249 | was_locked = NVLockVgaCrtcs(dev, false); | ||
| 6250 | htotal = NVReadVgaCrtc(dev, 0, 0x06); | ||
| 6251 | htotal |= (NVReadVgaCrtc(dev, 0, 0x07) & 0x01) << 8; | ||
| 6252 | htotal |= (NVReadVgaCrtc(dev, 0, 0x07) & 0x20) << 4; | ||
| 6253 | htotal |= (NVReadVgaCrtc(dev, 0, 0x25) & 0x01) << 10; | ||
| 6254 | htotal |= (NVReadVgaCrtc(dev, 0, 0x41) & 0x01) << 11; | ||
| 6255 | NVLockVgaCrtcs(dev, was_locked); | ||
| 6256 | return (htotal != 0); | ||
| 6257 | } | ||
| 6258 | |||
| 6208 | int | 6259 | int |
| 6209 | nouveau_bios_init(struct drm_device *dev) | 6260 | nouveau_bios_init(struct drm_device *dev) |
| 6210 | { | 6261 | { |
| @@ -6239,11 +6290,9 @@ nouveau_bios_init(struct drm_device *dev) | |||
| 6239 | bios->execute = false; | 6290 | bios->execute = false; |
| 6240 | 6291 | ||
| 6241 | /* ... unless card isn't POSTed already */ | 6292 | /* ... unless card isn't POSTed already */ |
| 6242 | if (dev_priv->card_type >= NV_10 && | 6293 | if (!nouveau_bios_posted(dev)) { |
| 6243 | NVReadVgaCrtc(dev, 0, 0x00) == 0 && | ||
| 6244 | NVReadVgaCrtc(dev, 0, 0x1a) == 0) { | ||
| 6245 | NV_INFO(dev, "Adaptor not initialised\n"); | 6294 | NV_INFO(dev, "Adaptor not initialised\n"); |
| 6246 | if (dev_priv->card_type < NV_50) { | 6295 | if (dev_priv->card_type < NV_40) { |
| 6247 | NV_ERROR(dev, "Unable to POST this chipset\n"); | 6296 | NV_ERROR(dev, "Unable to POST this chipset\n"); |
| 6248 | return -ENODEV; | 6297 | return -ENODEV; |
| 6249 | } | 6298 | } |
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c index 266b0ff441af..149ed224c3cb 100644 --- a/drivers/gpu/drm/nouveau/nouveau_connector.c +++ b/drivers/gpu/drm/nouveau/nouveau_connector.c | |||
| @@ -432,24 +432,27 @@ nouveau_connector_set_property(struct drm_connector *connector, | |||
| 432 | } | 432 | } |
| 433 | 433 | ||
| 434 | static struct drm_display_mode * | 434 | static struct drm_display_mode * |
| 435 | nouveau_connector_native_mode(struct nouveau_connector *connector) | 435 | nouveau_connector_native_mode(struct drm_connector *connector) |
| 436 | { | 436 | { |
| 437 | struct drm_device *dev = connector->base.dev; | 437 | struct drm_connector_helper_funcs *helper = connector->helper_private; |
| 438 | struct nouveau_connector *nv_connector = nouveau_connector(connector); | ||
| 439 | struct drm_device *dev = connector->dev; | ||
| 438 | struct drm_display_mode *mode, *largest = NULL; | 440 | struct drm_display_mode *mode, *largest = NULL; |
| 439 | int high_w = 0, high_h = 0, high_v = 0; | 441 | int high_w = 0, high_h = 0, high_v = 0; |
| 440 | 442 | ||
| 441 | /* Use preferred mode if there is one.. */ | 443 | list_for_each_entry(mode, &nv_connector->base.probed_modes, head) { |
| 442 | list_for_each_entry(mode, &connector->base.probed_modes, head) { | 444 | if (helper->mode_valid(connector, mode) != MODE_OK) |
| 445 | continue; | ||
| 446 | |||
| 447 | /* Use preferred mode if there is one.. */ | ||
| 443 | if (mode->type & DRM_MODE_TYPE_PREFERRED) { | 448 | if (mode->type & DRM_MODE_TYPE_PREFERRED) { |
| 444 | NV_DEBUG_KMS(dev, "native mode from preferred\n"); | 449 | NV_DEBUG_KMS(dev, "native mode from preferred\n"); |
| 445 | return drm_mode_duplicate(dev, mode); | 450 | return drm_mode_duplicate(dev, mode); |
| 446 | } | 451 | } |
| 447 | } | ||
| 448 | 452 | ||
| 449 | /* Otherwise, take the resolution with the largest width, then height, | 453 | /* Otherwise, take the resolution with the largest width, then |
| 450 | * then vertical refresh | 454 | * height, then vertical refresh |
| 451 | */ | 455 | */ |
| 452 | list_for_each_entry(mode, &connector->base.probed_modes, head) { | ||
| 453 | if (mode->hdisplay < high_w) | 456 | if (mode->hdisplay < high_w) |
| 454 | continue; | 457 | continue; |
| 455 | 458 | ||
| @@ -553,7 +556,7 @@ nouveau_connector_get_modes(struct drm_connector *connector) | |||
| 553 | */ | 556 | */ |
| 554 | if (!nv_connector->native_mode) | 557 | if (!nv_connector->native_mode) |
| 555 | nv_connector->native_mode = | 558 | nv_connector->native_mode = |
| 556 | nouveau_connector_native_mode(nv_connector); | 559 | nouveau_connector_native_mode(connector); |
| 557 | if (ret == 0 && nv_connector->native_mode) { | 560 | if (ret == 0 && nv_connector->native_mode) { |
| 558 | struct drm_display_mode *mode; | 561 | struct drm_display_mode *mode; |
| 559 | 562 | ||
| @@ -584,9 +587,9 @@ nouveau_connector_mode_valid(struct drm_connector *connector, | |||
| 584 | 587 | ||
| 585 | switch (nv_encoder->dcb->type) { | 588 | switch (nv_encoder->dcb->type) { |
| 586 | case OUTPUT_LVDS: | 589 | case OUTPUT_LVDS: |
| 587 | BUG_ON(!nv_connector->native_mode); | 590 | if (nv_connector->native_mode && |
| 588 | if (mode->hdisplay > nv_connector->native_mode->hdisplay || | 591 | (mode->hdisplay > nv_connector->native_mode->hdisplay || |
| 589 | mode->vdisplay > nv_connector->native_mode->vdisplay) | 592 | mode->vdisplay > nv_connector->native_mode->vdisplay)) |
| 590 | return MODE_PANEL; | 593 | return MODE_PANEL; |
| 591 | 594 | ||
| 592 | min_clock = 0; | 595 | min_clock = 0; |
| @@ -594,8 +597,7 @@ nouveau_connector_mode_valid(struct drm_connector *connector, | |||
| 594 | break; | 597 | break; |
| 595 | case OUTPUT_TMDS: | 598 | case OUTPUT_TMDS: |
| 596 | if ((dev_priv->card_type >= NV_50 && !nouveau_duallink) || | 599 | if ((dev_priv->card_type >= NV_50 && !nouveau_duallink) || |
| 597 | (dev_priv->card_type < NV_50 && | 600 | !nv_encoder->dcb->duallink_possible) |
| 598 | !nv_encoder->dcb->duallink_possible)) | ||
| 599 | max_clock = 165000; | 601 | max_clock = 165000; |
| 600 | else | 602 | else |
| 601 | max_clock = 330000; | 603 | max_clock = 330000; |
| @@ -729,7 +731,7 @@ nouveau_connector_create_lvds(struct drm_device *dev, | |||
| 729 | if (ret == 0) | 731 | if (ret == 0) |
| 730 | goto out; | 732 | goto out; |
| 731 | nv_connector->detected_encoder = nv_encoder; | 733 | nv_connector->detected_encoder = nv_encoder; |
| 732 | nv_connector->native_mode = nouveau_connector_native_mode(nv_connector); | 734 | nv_connector->native_mode = nouveau_connector_native_mode(connector); |
| 733 | list_for_each_entry_safe(mode, temp, &connector->probed_modes, head) | 735 | list_for_each_entry_safe(mode, temp, &connector->probed_modes, head) |
| 734 | drm_mode_remove(connector, mode); | 736 | drm_mode_remove(connector, mode); |
| 735 | 737 | ||
diff --git a/drivers/gpu/drm/nouveau/nouveau_crtc.h b/drivers/gpu/drm/nouveau/nouveau_crtc.h index 49fa7b2d257e..cb1ce2a09162 100644 --- a/drivers/gpu/drm/nouveau/nouveau_crtc.h +++ b/drivers/gpu/drm/nouveau/nouveau_crtc.h | |||
| @@ -40,6 +40,8 @@ struct nouveau_crtc { | |||
| 40 | int sharpness; | 40 | int sharpness; |
| 41 | int last_dpms; | 41 | int last_dpms; |
| 42 | 42 | ||
| 43 | int cursor_saved_x, cursor_saved_y; | ||
| 44 | |||
| 43 | struct { | 45 | struct { |
| 44 | int cpp; | 46 | int cpp; |
| 45 | bool blanked; | 47 | bool blanked; |
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.c b/drivers/gpu/drm/nouveau/nouveau_drv.c index c6079e36669d..273770432298 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drv.c +++ b/drivers/gpu/drm/nouveau/nouveau_drv.c | |||
| @@ -175,6 +175,13 @@ nouveau_pci_suspend(struct pci_dev *pdev, pm_message_t pm_state) | |||
| 175 | nouveau_bo_unpin(nouveau_fb->nvbo); | 175 | nouveau_bo_unpin(nouveau_fb->nvbo); |
| 176 | } | 176 | } |
| 177 | 177 | ||
| 178 | list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { | ||
| 179 | struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); | ||
| 180 | |||
| 181 | nouveau_bo_unmap(nv_crtc->cursor.nvbo); | ||
| 182 | nouveau_bo_unpin(nv_crtc->cursor.nvbo); | ||
| 183 | } | ||
| 184 | |||
| 178 | NV_INFO(dev, "Evicting buffers...\n"); | 185 | NV_INFO(dev, "Evicting buffers...\n"); |
| 179 | ttm_bo_evict_mm(&dev_priv->ttm.bdev, TTM_PL_VRAM); | 186 | ttm_bo_evict_mm(&dev_priv->ttm.bdev, TTM_PL_VRAM); |
| 180 | 187 | ||
| @@ -314,12 +321,34 @@ nouveau_pci_resume(struct pci_dev *pdev) | |||
| 314 | nouveau_bo_pin(nouveau_fb->nvbo, TTM_PL_FLAG_VRAM); | 321 | nouveau_bo_pin(nouveau_fb->nvbo, TTM_PL_FLAG_VRAM); |
| 315 | } | 322 | } |
| 316 | 323 | ||
| 324 | list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { | ||
| 325 | struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); | ||
| 326 | int ret; | ||
| 327 | |||
| 328 | ret = nouveau_bo_pin(nv_crtc->cursor.nvbo, TTM_PL_FLAG_VRAM); | ||
| 329 | if (!ret) | ||
| 330 | ret = nouveau_bo_map(nv_crtc->cursor.nvbo); | ||
| 331 | if (ret) | ||
| 332 | NV_ERROR(dev, "Could not pin/map cursor.\n"); | ||
| 333 | } | ||
| 334 | |||
| 317 | if (dev_priv->card_type < NV_50) { | 335 | if (dev_priv->card_type < NV_50) { |
| 318 | nv04_display_restore(dev); | 336 | nv04_display_restore(dev); |
| 319 | NVLockVgaCrtcs(dev, false); | 337 | NVLockVgaCrtcs(dev, false); |
| 320 | } else | 338 | } else |
| 321 | nv50_display_init(dev); | 339 | nv50_display_init(dev); |
| 322 | 340 | ||
| 341 | list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { | ||
| 342 | struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); | ||
| 343 | |||
| 344 | nv_crtc->cursor.set_offset(nv_crtc, | ||
| 345 | nv_crtc->cursor.nvbo->bo.offset - | ||
| 346 | dev_priv->vm_vram_base); | ||
| 347 | |||
| 348 | nv_crtc->cursor.set_pos(nv_crtc, nv_crtc->cursor_saved_x, | ||
| 349 | nv_crtc->cursor_saved_y); | ||
| 350 | } | ||
| 351 | |||
| 323 | /* Force CLUT to get re-loaded during modeset */ | 352 | /* Force CLUT to get re-loaded during modeset */ |
| 324 | list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { | 353 | list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { |
| 325 | struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); | 354 | struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); |
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h index 5b134438effe..c69719106489 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drv.h +++ b/drivers/gpu/drm/nouveau/nouveau_drv.h | |||
| @@ -851,12 +851,17 @@ extern int nouveau_dma_init(struct nouveau_channel *); | |||
| 851 | extern int nouveau_dma_wait(struct nouveau_channel *, int slots, int size); | 851 | extern int nouveau_dma_wait(struct nouveau_channel *, int slots, int size); |
| 852 | 852 | ||
| 853 | /* nouveau_acpi.c */ | 853 | /* nouveau_acpi.c */ |
| 854 | #define ROM_BIOS_PAGE 4096 | ||
| 854 | #if defined(CONFIG_ACPI) | 855 | #if defined(CONFIG_ACPI) |
| 855 | void nouveau_register_dsm_handler(void); | 856 | void nouveau_register_dsm_handler(void); |
| 856 | void nouveau_unregister_dsm_handler(void); | 857 | void nouveau_unregister_dsm_handler(void); |
| 858 | int nouveau_acpi_get_bios_chunk(uint8_t *bios, int offset, int len); | ||
| 859 | bool nouveau_acpi_rom_supported(struct pci_dev *pdev); | ||
| 857 | #else | 860 | #else |
| 858 | static inline void nouveau_register_dsm_handler(void) {} | 861 | static inline void nouveau_register_dsm_handler(void) {} |
| 859 | static inline void nouveau_unregister_dsm_handler(void) {} | 862 | static inline void nouveau_unregister_dsm_handler(void) {} |
| 863 | static inline bool nouveau_acpi_rom_supported(struct pci_dev *pdev) { return false; } | ||
| 864 | static inline int nouveau_acpi_get_bios_chunk(uint8_t *bios, int offset, int len) { return -EINVAL; } | ||
| 860 | #endif | 865 | #endif |
| 861 | 866 | ||
| 862 | /* nouveau_backlight.c */ | 867 | /* nouveau_backlight.c */ |
diff --git a/drivers/gpu/drm/nouveau/nouveau_mem.c b/drivers/gpu/drm/nouveau/nouveau_mem.c index 775a7017af64..c1fd42b0dad1 100644 --- a/drivers/gpu/drm/nouveau/nouveau_mem.c +++ b/drivers/gpu/drm/nouveau/nouveau_mem.c | |||
| @@ -540,7 +540,8 @@ nouveau_mem_detect(struct drm_device *dev) | |||
| 540 | dev_priv->vram_size = nv_rd32(dev, NV04_FIFO_DATA); | 540 | dev_priv->vram_size = nv_rd32(dev, NV04_FIFO_DATA); |
| 541 | dev_priv->vram_size &= NV10_FIFO_DATA_RAM_AMOUNT_MB_MASK; | 541 | dev_priv->vram_size &= NV10_FIFO_DATA_RAM_AMOUNT_MB_MASK; |
| 542 | if (dev_priv->chipset == 0xaa || dev_priv->chipset == 0xac) | 542 | if (dev_priv->chipset == 0xaa || dev_priv->chipset == 0xac) |
| 543 | dev_priv->vram_sys_base = nv_rd32(dev, 0x100e10) << 12; | 543 | dev_priv->vram_sys_base = nv_rd32(dev, 0x100e10); |
| 544 | dev_priv->vram_sys_base <<= 12; | ||
| 544 | } | 545 | } |
| 545 | 546 | ||
| 546 | NV_INFO(dev, "Detected %dMiB VRAM\n", (int)(dev_priv->vram_size >> 20)); | 547 | NV_INFO(dev, "Detected %dMiB VRAM\n", (int)(dev_priv->vram_size >> 20)); |
diff --git a/drivers/gpu/drm/nouveau/nouveau_state.c b/drivers/gpu/drm/nouveau/nouveau_state.c index e632339c323e..147e59c40151 100644 --- a/drivers/gpu/drm/nouveau/nouveau_state.c +++ b/drivers/gpu/drm/nouveau/nouveau_state.c | |||
| @@ -376,12 +376,15 @@ out_err: | |||
| 376 | static void nouveau_switcheroo_set_state(struct pci_dev *pdev, | 376 | static void nouveau_switcheroo_set_state(struct pci_dev *pdev, |
| 377 | enum vga_switcheroo_state state) | 377 | enum vga_switcheroo_state state) |
| 378 | { | 378 | { |
| 379 | struct drm_device *dev = pci_get_drvdata(pdev); | ||
| 379 | pm_message_t pmm = { .event = PM_EVENT_SUSPEND }; | 380 | pm_message_t pmm = { .event = PM_EVENT_SUSPEND }; |
| 380 | if (state == VGA_SWITCHEROO_ON) { | 381 | if (state == VGA_SWITCHEROO_ON) { |
| 381 | printk(KERN_ERR "VGA switcheroo: switched nouveau on\n"); | 382 | printk(KERN_ERR "VGA switcheroo: switched nouveau on\n"); |
| 382 | nouveau_pci_resume(pdev); | 383 | nouveau_pci_resume(pdev); |
| 384 | drm_kms_helper_poll_enable(dev); | ||
| 383 | } else { | 385 | } else { |
| 384 | printk(KERN_ERR "VGA switcheroo: switched nouveau off\n"); | 386 | printk(KERN_ERR "VGA switcheroo: switched nouveau off\n"); |
| 387 | drm_kms_helper_poll_disable(dev); | ||
| 385 | nouveau_pci_suspend(pdev, pmm); | 388 | nouveau_pci_suspend(pdev, pmm); |
| 386 | } | 389 | } |
| 387 | } | 390 | } |
| @@ -913,6 +916,9 @@ int nouveau_ioctl_getparam(struct drm_device *dev, void *data, | |||
| 913 | case NOUVEAU_GETPARAM_VM_VRAM_BASE: | 916 | case NOUVEAU_GETPARAM_VM_VRAM_BASE: |
| 914 | getparam->value = dev_priv->vm_vram_base; | 917 | getparam->value = dev_priv->vm_vram_base; |
| 915 | break; | 918 | break; |
| 919 | case NOUVEAU_GETPARAM_PTIMER_TIME: | ||
| 920 | getparam->value = dev_priv->engine.timer.read(dev); | ||
| 921 | break; | ||
| 916 | case NOUVEAU_GETPARAM_GRAPH_UNITS: | 922 | case NOUVEAU_GETPARAM_GRAPH_UNITS: |
| 917 | /* NV40 and NV50 versions are quite different, but register | 923 | /* NV40 and NV50 versions are quite different, but register |
| 918 | * address is the same. User is supposed to know the card | 924 | * address is the same. User is supposed to know the card |
diff --git a/drivers/gpu/drm/nouveau/nv04_cursor.c b/drivers/gpu/drm/nouveau/nv04_cursor.c index 89a91b9d8b25..aaf3de3bc816 100644 --- a/drivers/gpu/drm/nouveau/nv04_cursor.c +++ b/drivers/gpu/drm/nouveau/nv04_cursor.c | |||
| @@ -20,6 +20,7 @@ nv04_cursor_hide(struct nouveau_crtc *nv_crtc, bool update) | |||
| 20 | static void | 20 | static void |
| 21 | nv04_cursor_set_pos(struct nouveau_crtc *nv_crtc, int x, int y) | 21 | nv04_cursor_set_pos(struct nouveau_crtc *nv_crtc, int x, int y) |
| 22 | { | 22 | { |
| 23 | nv_crtc->cursor_saved_x = x; nv_crtc->cursor_saved_y = y; | ||
| 23 | NVWriteRAMDAC(nv_crtc->base.dev, nv_crtc->index, | 24 | NVWriteRAMDAC(nv_crtc->base.dev, nv_crtc->index, |
| 24 | NV_PRAMDAC_CU_START_POS, | 25 | NV_PRAMDAC_CU_START_POS, |
| 25 | XLATE(y, 0, NV_PRAMDAC_CU_START_POS_Y) | | 26 | XLATE(y, 0, NV_PRAMDAC_CU_START_POS_Y) | |
diff --git a/drivers/gpu/drm/nouveau/nv50_cursor.c b/drivers/gpu/drm/nouveau/nv50_cursor.c index 753e723adb3a..03ad7ab14f09 100644 --- a/drivers/gpu/drm/nouveau/nv50_cursor.c +++ b/drivers/gpu/drm/nouveau/nv50_cursor.c | |||
| @@ -107,6 +107,7 @@ nv50_cursor_set_pos(struct nouveau_crtc *nv_crtc, int x, int y) | |||
| 107 | { | 107 | { |
| 108 | struct drm_device *dev = nv_crtc->base.dev; | 108 | struct drm_device *dev = nv_crtc->base.dev; |
| 109 | 109 | ||
| 110 | nv_crtc->cursor_saved_x = x; nv_crtc->cursor_saved_y = y; | ||
| 110 | nv_wr32(dev, NV50_PDISPLAY_CURSOR_USER_POS(nv_crtc->index), | 111 | nv_wr32(dev, NV50_PDISPLAY_CURSOR_USER_POS(nv_crtc->index), |
| 111 | ((y & 0xFFFF) << 16) | (x & 0xFFFF)); | 112 | ((y & 0xFFFF) << 16) | (x & 0xFFFF)); |
| 112 | /* Needed to make the cursor move. */ | 113 | /* Needed to make the cursor move. */ |
diff --git a/drivers/gpu/drm/nouveau/nv50_sor.c b/drivers/gpu/drm/nouveau/nv50_sor.c index b11eaf9c5c7c..812778db76ac 100644 --- a/drivers/gpu/drm/nouveau/nv50_sor.c +++ b/drivers/gpu/drm/nouveau/nv50_sor.c | |||
| @@ -274,7 +274,6 @@ static const struct drm_encoder_funcs nv50_sor_encoder_funcs = { | |||
| 274 | int | 274 | int |
| 275 | nv50_sor_create(struct drm_device *dev, struct dcb_entry *entry) | 275 | nv50_sor_create(struct drm_device *dev, struct dcb_entry *entry) |
| 276 | { | 276 | { |
| 277 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
| 278 | struct nouveau_encoder *nv_encoder = NULL; | 277 | struct nouveau_encoder *nv_encoder = NULL; |
| 279 | struct drm_encoder *encoder; | 278 | struct drm_encoder *encoder; |
| 280 | bool dum; | 279 | bool dum; |
| @@ -324,11 +323,7 @@ nv50_sor_create(struct drm_device *dev, struct dcb_entry *entry) | |||
| 324 | int or = nv_encoder->or, link = !(entry->dpconf.sor.link & 1); | 323 | int or = nv_encoder->or, link = !(entry->dpconf.sor.link & 1); |
| 325 | uint32_t tmp; | 324 | uint32_t tmp; |
| 326 | 325 | ||
| 327 | if (dev_priv->chipset < 0x90 || | 326 | tmp = nv_rd32(dev, 0x61c700 + (or * 0x800)); |
| 328 | dev_priv->chipset == 0x92 || dev_priv->chipset == 0xa0) | ||
| 329 | tmp = nv_rd32(dev, NV50_PDISPLAY_SOR_MODE_CTRL_C(or)); | ||
| 330 | else | ||
| 331 | tmp = nv_rd32(dev, NV90_PDISPLAY_SOR_MODE_CTRL_C(or)); | ||
| 332 | 327 | ||
| 333 | switch ((tmp & 0x00000f00) >> 8) { | 328 | switch ((tmp & 0x00000f00) >> 8) { |
| 334 | case 8: | 329 | case 8: |
diff --git a/drivers/gpu/drm/radeon/Makefile b/drivers/gpu/drm/radeon/Makefile index 3c91312dea9a..84b1f2729d43 100644 --- a/drivers/gpu/drm/radeon/Makefile +++ b/drivers/gpu/drm/radeon/Makefile | |||
| @@ -33,6 +33,9 @@ $(obj)/rs600_reg_safe.h: $(src)/reg_srcs/rs600 $(obj)/mkregtable | |||
| 33 | $(obj)/r600_reg_safe.h: $(src)/reg_srcs/r600 $(obj)/mkregtable | 33 | $(obj)/r600_reg_safe.h: $(src)/reg_srcs/r600 $(obj)/mkregtable |
| 34 | $(call if_changed,mkregtable) | 34 | $(call if_changed,mkregtable) |
| 35 | 35 | ||
| 36 | $(obj)/evergreen_reg_safe.h: $(src)/reg_srcs/evergreen $(obj)/mkregtable | ||
| 37 | $(call if_changed,mkregtable) | ||
| 38 | |||
| 36 | $(obj)/r100.o: $(obj)/r100_reg_safe.h $(obj)/rn50_reg_safe.h | 39 | $(obj)/r100.o: $(obj)/r100_reg_safe.h $(obj)/rn50_reg_safe.h |
| 37 | 40 | ||
| 38 | $(obj)/r200.o: $(obj)/r200_reg_safe.h | 41 | $(obj)/r200.o: $(obj)/r200_reg_safe.h |
| @@ -47,6 +50,8 @@ $(obj)/rs600.o: $(obj)/rs600_reg_safe.h | |||
| 47 | 50 | ||
| 48 | $(obj)/r600_cs.o: $(obj)/r600_reg_safe.h | 51 | $(obj)/r600_cs.o: $(obj)/r600_reg_safe.h |
| 49 | 52 | ||
| 53 | $(obj)/evergreen_cs.o: $(obj)/evergreen_reg_safe.h | ||
| 54 | |||
| 50 | radeon-y := radeon_drv.o radeon_cp.o radeon_state.o radeon_mem.o \ | 55 | radeon-y := radeon_drv.o radeon_cp.o radeon_state.o radeon_mem.o \ |
| 51 | radeon_irq.o r300_cmdbuf.o r600_cp.o | 56 | radeon_irq.o r300_cmdbuf.o r600_cp.o |
| 52 | # add KMS driver | 57 | # add KMS driver |
| @@ -60,7 +65,7 @@ radeon-y += radeon_device.o radeon_asic.o radeon_kms.o \ | |||
| 60 | rs400.o rs600.o rs690.o rv515.o r520.o r600.o rv770.o radeon_test.o \ | 65 | rs400.o rs600.o rs690.o rv515.o r520.o r600.o rv770.o radeon_test.o \ |
| 61 | r200.o radeon_legacy_tv.o r600_cs.o r600_blit.o r600_blit_shaders.o \ | 66 | r200.o radeon_legacy_tv.o r600_cs.o r600_blit.o r600_blit_shaders.o \ |
| 62 | r600_blit_kms.o radeon_pm.o atombios_dp.o r600_audio.o r600_hdmi.o \ | 67 | r600_blit_kms.o radeon_pm.o atombios_dp.o r600_audio.o r600_hdmi.o \ |
| 63 | evergreen.o | 68 | evergreen.o evergreen_cs.o |
| 64 | 69 | ||
| 65 | radeon-$(CONFIG_COMPAT) += radeon_ioc32.o | 70 | radeon-$(CONFIG_COMPAT) += radeon_ioc32.o |
| 66 | radeon-$(CONFIG_VGA_SWITCHEROO) += radeon_atpx_handler.o | 71 | radeon-$(CONFIG_VGA_SWITCHEROO) += radeon_atpx_handler.o |
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c index 8c8e4d3cbaa3..0440c0939bdd 100644 --- a/drivers/gpu/drm/radeon/evergreen.c +++ b/drivers/gpu/drm/radeon/evergreen.c | |||
| @@ -41,7 +41,12 @@ void evergreen_fini(struct radeon_device *rdev); | |||
| 41 | 41 | ||
| 42 | void evergreen_pm_misc(struct radeon_device *rdev) | 42 | void evergreen_pm_misc(struct radeon_device *rdev) |
| 43 | { | 43 | { |
| 44 | int requested_index = rdev->pm.requested_power_state_index; | ||
| 45 | struct radeon_power_state *ps = &rdev->pm.power_state[requested_index]; | ||
| 46 | struct radeon_voltage *voltage = &ps->clock_info[0].voltage; | ||
| 44 | 47 | ||
| 48 | if ((voltage->type == VOLTAGE_SW) && voltage->voltage) | ||
| 49 | radeon_atom_set_voltage(rdev, voltage->voltage); | ||
| 45 | } | 50 | } |
| 46 | 51 | ||
| 47 | void evergreen_pm_prepare(struct radeon_device *rdev) | 52 | void evergreen_pm_prepare(struct radeon_device *rdev) |
diff --git a/drivers/gpu/drm/radeon/evergreen_cs.c b/drivers/gpu/drm/radeon/evergreen_cs.c new file mode 100644 index 000000000000..64516b950891 --- /dev/null +++ b/drivers/gpu/drm/radeon/evergreen_cs.c | |||
| @@ -0,0 +1,1356 @@ | |||
| 1 | /* | ||
| 2 | * Copyright 2010 Advanced Micro Devices, Inc. | ||
| 3 | * Copyright 2008 Red Hat Inc. | ||
| 4 | * Copyright 2009 Jerome Glisse. | ||
| 5 | * | ||
| 6 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
| 7 | * copy of this software and associated documentation files (the "Software"), | ||
| 8 | * to deal in the Software without restriction, including without limitation | ||
| 9 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
| 10 | * and/or sell copies of the Software, and to permit persons to whom the | ||
| 11 | * Software is furnished to do so, subject to the following conditions: | ||
| 12 | * | ||
| 13 | * The above copyright notice and this permission notice shall be included in | ||
| 14 | * all copies or substantial portions of the Software. | ||
| 15 | * | ||
| 16 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
| 17 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
| 18 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
| 19 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
| 20 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
| 21 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
| 22 | * OTHER DEALINGS IN THE SOFTWARE. | ||
| 23 | * | ||
| 24 | * Authors: Dave Airlie | ||
| 25 | * Alex Deucher | ||
| 26 | * Jerome Glisse | ||
| 27 | */ | ||
| 28 | #include "drmP.h" | ||
| 29 | #include "radeon.h" | ||
| 30 | #include "evergreend.h" | ||
| 31 | #include "evergreen_reg_safe.h" | ||
| 32 | |||
| 33 | static int evergreen_cs_packet_next_reloc(struct radeon_cs_parser *p, | ||
| 34 | struct radeon_cs_reloc **cs_reloc); | ||
| 35 | |||
| 36 | struct evergreen_cs_track { | ||
| 37 | u32 group_size; | ||
| 38 | u32 nbanks; | ||
| 39 | u32 npipes; | ||
| 40 | /* value we track */ | ||
| 41 | u32 nsamples; | ||
| 42 | u32 cb_color_base_last[12]; | ||
| 43 | struct radeon_bo *cb_color_bo[12]; | ||
| 44 | u32 cb_color_bo_offset[12]; | ||
| 45 | struct radeon_bo *cb_color_fmask_bo[8]; | ||
| 46 | struct radeon_bo *cb_color_cmask_bo[8]; | ||
| 47 | u32 cb_color_info[12]; | ||
| 48 | u32 cb_color_view[12]; | ||
| 49 | u32 cb_color_pitch_idx[12]; | ||
| 50 | u32 cb_color_slice_idx[12]; | ||
| 51 | u32 cb_color_dim_idx[12]; | ||
| 52 | u32 cb_color_dim[12]; | ||
| 53 | u32 cb_color_pitch[12]; | ||
| 54 | u32 cb_color_slice[12]; | ||
| 55 | u32 cb_color_cmask_slice[8]; | ||
| 56 | u32 cb_color_fmask_slice[8]; | ||
| 57 | u32 cb_target_mask; | ||
| 58 | u32 cb_shader_mask; | ||
| 59 | u32 vgt_strmout_config; | ||
| 60 | u32 vgt_strmout_buffer_config; | ||
| 61 | u32 db_depth_control; | ||
| 62 | u32 db_depth_view; | ||
| 63 | u32 db_depth_size; | ||
| 64 | u32 db_depth_size_idx; | ||
| 65 | u32 db_z_info; | ||
| 66 | u32 db_z_idx; | ||
| 67 | u32 db_z_read_offset; | ||
| 68 | u32 db_z_write_offset; | ||
| 69 | struct radeon_bo *db_z_read_bo; | ||
| 70 | struct radeon_bo *db_z_write_bo; | ||
| 71 | u32 db_s_info; | ||
| 72 | u32 db_s_idx; | ||
| 73 | u32 db_s_read_offset; | ||
| 74 | u32 db_s_write_offset; | ||
| 75 | struct radeon_bo *db_s_read_bo; | ||
| 76 | struct radeon_bo *db_s_write_bo; | ||
| 77 | }; | ||
| 78 | |||
| 79 | static void evergreen_cs_track_init(struct evergreen_cs_track *track) | ||
| 80 | { | ||
| 81 | int i; | ||
| 82 | |||
| 83 | for (i = 0; i < 8; i++) { | ||
| 84 | track->cb_color_fmask_bo[i] = NULL; | ||
| 85 | track->cb_color_cmask_bo[i] = NULL; | ||
| 86 | track->cb_color_cmask_slice[i] = 0; | ||
| 87 | track->cb_color_fmask_slice[i] = 0; | ||
| 88 | } | ||
| 89 | |||
| 90 | for (i = 0; i < 12; i++) { | ||
| 91 | track->cb_color_base_last[i] = 0; | ||
| 92 | track->cb_color_bo[i] = NULL; | ||
| 93 | track->cb_color_bo_offset[i] = 0xFFFFFFFF; | ||
| 94 | track->cb_color_info[i] = 0; | ||
| 95 | track->cb_color_view[i] = 0; | ||
| 96 | track->cb_color_pitch_idx[i] = 0; | ||
| 97 | track->cb_color_slice_idx[i] = 0; | ||
| 98 | track->cb_color_dim[i] = 0; | ||
| 99 | track->cb_color_pitch[i] = 0; | ||
| 100 | track->cb_color_slice[i] = 0; | ||
| 101 | track->cb_color_dim[i] = 0; | ||
| 102 | } | ||
| 103 | track->cb_target_mask = 0xFFFFFFFF; | ||
| 104 | track->cb_shader_mask = 0xFFFFFFFF; | ||
| 105 | |||
| 106 | track->db_depth_view = 0xFFFFC000; | ||
| 107 | track->db_depth_size = 0xFFFFFFFF; | ||
| 108 | track->db_depth_size_idx = 0; | ||
| 109 | track->db_depth_control = 0xFFFFFFFF; | ||
| 110 | track->db_z_info = 0xFFFFFFFF; | ||
| 111 | track->db_z_idx = 0xFFFFFFFF; | ||
| 112 | track->db_z_read_offset = 0xFFFFFFFF; | ||
| 113 | track->db_z_write_offset = 0xFFFFFFFF; | ||
| 114 | track->db_z_read_bo = NULL; | ||
| 115 | track->db_z_write_bo = NULL; | ||
| 116 | track->db_s_info = 0xFFFFFFFF; | ||
| 117 | track->db_s_idx = 0xFFFFFFFF; | ||
| 118 | track->db_s_read_offset = 0xFFFFFFFF; | ||
| 119 | track->db_s_write_offset = 0xFFFFFFFF; | ||
| 120 | track->db_s_read_bo = NULL; | ||
| 121 | track->db_s_write_bo = NULL; | ||
| 122 | } | ||
| 123 | |||
| 124 | static inline int evergreen_cs_track_validate_cb(struct radeon_cs_parser *p, int i) | ||
| 125 | { | ||
| 126 | /* XXX fill in */ | ||
| 127 | return 0; | ||
| 128 | } | ||
| 129 | |||
| 130 | static int evergreen_cs_track_check(struct radeon_cs_parser *p) | ||
| 131 | { | ||
| 132 | struct evergreen_cs_track *track = p->track; | ||
| 133 | |||
| 134 | /* we don't support stream out buffer yet */ | ||
| 135 | if (track->vgt_strmout_config || track->vgt_strmout_buffer_config) { | ||
| 136 | dev_warn(p->dev, "this kernel doesn't support SMX output buffer\n"); | ||
| 137 | return -EINVAL; | ||
| 138 | } | ||
| 139 | |||
| 140 | /* XXX fill in */ | ||
| 141 | return 0; | ||
| 142 | } | ||
| 143 | |||
| 144 | /** | ||
| 145 | * evergreen_cs_packet_parse() - parse cp packet and point ib index to next packet | ||
| 146 | * @parser: parser structure holding parsing context. | ||
| 147 | * @pkt: where to store packet informations | ||
| 148 | * | ||
| 149 | * Assume that chunk_ib_index is properly set. Will return -EINVAL | ||
| 150 | * if packet is bigger than remaining ib size. or if packets is unknown. | ||
| 151 | **/ | ||
| 152 | int evergreen_cs_packet_parse(struct radeon_cs_parser *p, | ||
| 153 | struct radeon_cs_packet *pkt, | ||
| 154 | unsigned idx) | ||
| 155 | { | ||
| 156 | struct radeon_cs_chunk *ib_chunk = &p->chunks[p->chunk_ib_idx]; | ||
| 157 | uint32_t header; | ||
| 158 | |||
| 159 | if (idx >= ib_chunk->length_dw) { | ||
| 160 | DRM_ERROR("Can not parse packet at %d after CS end %d !\n", | ||
| 161 | idx, ib_chunk->length_dw); | ||
| 162 | return -EINVAL; | ||
| 163 | } | ||
| 164 | header = radeon_get_ib_value(p, idx); | ||
| 165 | pkt->idx = idx; | ||
| 166 | pkt->type = CP_PACKET_GET_TYPE(header); | ||
| 167 | pkt->count = CP_PACKET_GET_COUNT(header); | ||
| 168 | pkt->one_reg_wr = 0; | ||
| 169 | switch (pkt->type) { | ||
| 170 | case PACKET_TYPE0: | ||
| 171 | pkt->reg = CP_PACKET0_GET_REG(header); | ||
| 172 | break; | ||
| 173 | case PACKET_TYPE3: | ||
| 174 | pkt->opcode = CP_PACKET3_GET_OPCODE(header); | ||
| 175 | break; | ||
| 176 | case PACKET_TYPE2: | ||
| 177 | pkt->count = -1; | ||
| 178 | break; | ||
| 179 | default: | ||
| 180 | DRM_ERROR("Unknown packet type %d at %d !\n", pkt->type, idx); | ||
| 181 | return -EINVAL; | ||
| 182 | } | ||
| 183 | if ((pkt->count + 1 + pkt->idx) >= ib_chunk->length_dw) { | ||
| 184 | DRM_ERROR("Packet (%d:%d:%d) end after CS buffer (%d) !\n", | ||
| 185 | pkt->idx, pkt->type, pkt->count, ib_chunk->length_dw); | ||
| 186 | return -EINVAL; | ||
| 187 | } | ||
| 188 | return 0; | ||
| 189 | } | ||
| 190 | |||
| 191 | /** | ||
| 192 | * evergreen_cs_packet_next_reloc() - parse next packet which should be reloc packet3 | ||
| 193 | * @parser: parser structure holding parsing context. | ||
| 194 | * @data: pointer to relocation data | ||
| 195 | * @offset_start: starting offset | ||
| 196 | * @offset_mask: offset mask (to align start offset on) | ||
| 197 | * @reloc: reloc informations | ||
| 198 | * | ||
| 199 | * Check next packet is relocation packet3, do bo validation and compute | ||
| 200 | * GPU offset using the provided start. | ||
| 201 | **/ | ||
| 202 | static int evergreen_cs_packet_next_reloc(struct radeon_cs_parser *p, | ||
| 203 | struct radeon_cs_reloc **cs_reloc) | ||
| 204 | { | ||
| 205 | struct radeon_cs_chunk *relocs_chunk; | ||
| 206 | struct radeon_cs_packet p3reloc; | ||
| 207 | unsigned idx; | ||
| 208 | int r; | ||
| 209 | |||
| 210 | if (p->chunk_relocs_idx == -1) { | ||
| 211 | DRM_ERROR("No relocation chunk !\n"); | ||
| 212 | return -EINVAL; | ||
| 213 | } | ||
| 214 | *cs_reloc = NULL; | ||
| 215 | relocs_chunk = &p->chunks[p->chunk_relocs_idx]; | ||
| 216 | r = evergreen_cs_packet_parse(p, &p3reloc, p->idx); | ||
| 217 | if (r) { | ||
| 218 | return r; | ||
| 219 | } | ||
| 220 | p->idx += p3reloc.count + 2; | ||
| 221 | if (p3reloc.type != PACKET_TYPE3 || p3reloc.opcode != PACKET3_NOP) { | ||
| 222 | DRM_ERROR("No packet3 for relocation for packet at %d.\n", | ||
| 223 | p3reloc.idx); | ||
| 224 | return -EINVAL; | ||
| 225 | } | ||
| 226 | idx = radeon_get_ib_value(p, p3reloc.idx + 1); | ||
| 227 | if (idx >= relocs_chunk->length_dw) { | ||
| 228 | DRM_ERROR("Relocs at %d after relocations chunk end %d !\n", | ||
| 229 | idx, relocs_chunk->length_dw); | ||
| 230 | return -EINVAL; | ||
| 231 | } | ||
| 232 | /* FIXME: we assume reloc size is 4 dwords */ | ||
| 233 | *cs_reloc = p->relocs_ptr[(idx / 4)]; | ||
| 234 | return 0; | ||
| 235 | } | ||
| 236 | |||
| 237 | /** | ||
| 238 | * evergreen_cs_packet_next_is_pkt3_nop() - test if next packet is packet3 nop for reloc | ||
| 239 | * @parser: parser structure holding parsing context. | ||
| 240 | * | ||
| 241 | * Check next packet is relocation packet3, do bo validation and compute | ||
| 242 | * GPU offset using the provided start. | ||
| 243 | **/ | ||
| 244 | static inline int evergreen_cs_packet_next_is_pkt3_nop(struct radeon_cs_parser *p) | ||
| 245 | { | ||
| 246 | struct radeon_cs_packet p3reloc; | ||
| 247 | int r; | ||
| 248 | |||
| 249 | r = evergreen_cs_packet_parse(p, &p3reloc, p->idx); | ||
| 250 | if (r) { | ||
| 251 | return 0; | ||
| 252 | } | ||
| 253 | if (p3reloc.type != PACKET_TYPE3 || p3reloc.opcode != PACKET3_NOP) { | ||
| 254 | return 0; | ||
| 255 | } | ||
| 256 | return 1; | ||
| 257 | } | ||
| 258 | |||
| 259 | /** | ||
| 260 | * evergreen_cs_packet_next_vline() - parse userspace VLINE packet | ||
| 261 | * @parser: parser structure holding parsing context. | ||
| 262 | * | ||
| 263 | * Userspace sends a special sequence for VLINE waits. | ||
| 264 | * PACKET0 - VLINE_START_END + value | ||
| 265 | * PACKET3 - WAIT_REG_MEM poll vline status reg | ||
| 266 | * RELOC (P3) - crtc_id in reloc. | ||
| 267 | * | ||
| 268 | * This function parses this and relocates the VLINE START END | ||
| 269 | * and WAIT_REG_MEM packets to the correct crtc. | ||
| 270 | * It also detects a switched off crtc and nulls out the | ||
| 271 | * wait in that case. | ||
| 272 | */ | ||
| 273 | static int evergreen_cs_packet_parse_vline(struct radeon_cs_parser *p) | ||
| 274 | { | ||
| 275 | struct drm_mode_object *obj; | ||
| 276 | struct drm_crtc *crtc; | ||
| 277 | struct radeon_crtc *radeon_crtc; | ||
| 278 | struct radeon_cs_packet p3reloc, wait_reg_mem; | ||
| 279 | int crtc_id; | ||
| 280 | int r; | ||
| 281 | uint32_t header, h_idx, reg, wait_reg_mem_info; | ||
| 282 | volatile uint32_t *ib; | ||
| 283 | |||
| 284 | ib = p->ib->ptr; | ||
| 285 | |||
| 286 | /* parse the WAIT_REG_MEM */ | ||
| 287 | r = evergreen_cs_packet_parse(p, &wait_reg_mem, p->idx); | ||
| 288 | if (r) | ||
| 289 | return r; | ||
| 290 | |||
| 291 | /* check its a WAIT_REG_MEM */ | ||
| 292 | if (wait_reg_mem.type != PACKET_TYPE3 || | ||
| 293 | wait_reg_mem.opcode != PACKET3_WAIT_REG_MEM) { | ||
| 294 | DRM_ERROR("vline wait missing WAIT_REG_MEM segment\n"); | ||
| 295 | r = -EINVAL; | ||
| 296 | return r; | ||
| 297 | } | ||
| 298 | |||
| 299 | wait_reg_mem_info = radeon_get_ib_value(p, wait_reg_mem.idx + 1); | ||
| 300 | /* bit 4 is reg (0) or mem (1) */ | ||
| 301 | if (wait_reg_mem_info & 0x10) { | ||
| 302 | DRM_ERROR("vline WAIT_REG_MEM waiting on MEM rather than REG\n"); | ||
| 303 | r = -EINVAL; | ||
| 304 | return r; | ||
| 305 | } | ||
| 306 | /* waiting for value to be equal */ | ||
| 307 | if ((wait_reg_mem_info & 0x7) != 0x3) { | ||
| 308 | DRM_ERROR("vline WAIT_REG_MEM function not equal\n"); | ||
| 309 | r = -EINVAL; | ||
| 310 | return r; | ||
| 311 | } | ||
| 312 | if ((radeon_get_ib_value(p, wait_reg_mem.idx + 2) << 2) != EVERGREEN_VLINE_STATUS) { | ||
| 313 | DRM_ERROR("vline WAIT_REG_MEM bad reg\n"); | ||
| 314 | r = -EINVAL; | ||
| 315 | return r; | ||
| 316 | } | ||
| 317 | |||
| 318 | if (radeon_get_ib_value(p, wait_reg_mem.idx + 5) != EVERGREEN_VLINE_STAT) { | ||
| 319 | DRM_ERROR("vline WAIT_REG_MEM bad bit mask\n"); | ||
| 320 | r = -EINVAL; | ||
| 321 | return r; | ||
| 322 | } | ||
| 323 | |||
| 324 | /* jump over the NOP */ | ||
| 325 | r = evergreen_cs_packet_parse(p, &p3reloc, p->idx + wait_reg_mem.count + 2); | ||
| 326 | if (r) | ||
| 327 | return r; | ||
| 328 | |||
| 329 | h_idx = p->idx - 2; | ||
| 330 | p->idx += wait_reg_mem.count + 2; | ||
| 331 | p->idx += p3reloc.count + 2; | ||
| 332 | |||
| 333 | header = radeon_get_ib_value(p, h_idx); | ||
| 334 | crtc_id = radeon_get_ib_value(p, h_idx + 2 + 7 + 1); | ||
| 335 | reg = CP_PACKET0_GET_REG(header); | ||
| 336 | mutex_lock(&p->rdev->ddev->mode_config.mutex); | ||
| 337 | obj = drm_mode_object_find(p->rdev->ddev, crtc_id, DRM_MODE_OBJECT_CRTC); | ||
| 338 | if (!obj) { | ||
| 339 | DRM_ERROR("cannot find crtc %d\n", crtc_id); | ||
| 340 | r = -EINVAL; | ||
| 341 | goto out; | ||
| 342 | } | ||
| 343 | crtc = obj_to_crtc(obj); | ||
| 344 | radeon_crtc = to_radeon_crtc(crtc); | ||
| 345 | crtc_id = radeon_crtc->crtc_id; | ||
| 346 | |||
| 347 | if (!crtc->enabled) { | ||
| 348 | /* if the CRTC isn't enabled - we need to nop out the WAIT_REG_MEM */ | ||
| 349 | ib[h_idx + 2] = PACKET2(0); | ||
| 350 | ib[h_idx + 3] = PACKET2(0); | ||
| 351 | ib[h_idx + 4] = PACKET2(0); | ||
| 352 | ib[h_idx + 5] = PACKET2(0); | ||
| 353 | ib[h_idx + 6] = PACKET2(0); | ||
| 354 | ib[h_idx + 7] = PACKET2(0); | ||
| 355 | ib[h_idx + 8] = PACKET2(0); | ||
| 356 | } else { | ||
| 357 | switch (reg) { | ||
| 358 | case EVERGREEN_VLINE_START_END: | ||
| 359 | header &= ~R600_CP_PACKET0_REG_MASK; | ||
| 360 | header |= (EVERGREEN_VLINE_START_END + radeon_crtc->crtc_offset) >> 2; | ||
| 361 | ib[h_idx] = header; | ||
| 362 | ib[h_idx + 4] = (EVERGREEN_VLINE_STATUS + radeon_crtc->crtc_offset) >> 2; | ||
| 363 | break; | ||
| 364 | default: | ||
| 365 | DRM_ERROR("unknown crtc reloc\n"); | ||
| 366 | r = -EINVAL; | ||
| 367 | goto out; | ||
| 368 | } | ||
| 369 | } | ||
| 370 | out: | ||
| 371 | mutex_unlock(&p->rdev->ddev->mode_config.mutex); | ||
| 372 | return r; | ||
| 373 | } | ||
| 374 | |||
| 375 | static int evergreen_packet0_check(struct radeon_cs_parser *p, | ||
| 376 | struct radeon_cs_packet *pkt, | ||
| 377 | unsigned idx, unsigned reg) | ||
| 378 | { | ||
| 379 | int r; | ||
| 380 | |||
| 381 | switch (reg) { | ||
| 382 | case EVERGREEN_VLINE_START_END: | ||
| 383 | r = evergreen_cs_packet_parse_vline(p); | ||
| 384 | if (r) { | ||
| 385 | DRM_ERROR("No reloc for ib[%d]=0x%04X\n", | ||
| 386 | idx, reg); | ||
| 387 | return r; | ||
| 388 | } | ||
| 389 | break; | ||
| 390 | default: | ||
| 391 | printk(KERN_ERR "Forbidden register 0x%04X in cs at %d\n", | ||
| 392 | reg, idx); | ||
| 393 | return -EINVAL; | ||
| 394 | } | ||
| 395 | return 0; | ||
| 396 | } | ||
| 397 | |||
| 398 | static int evergreen_cs_parse_packet0(struct radeon_cs_parser *p, | ||
| 399 | struct radeon_cs_packet *pkt) | ||
| 400 | { | ||
| 401 | unsigned reg, i; | ||
| 402 | unsigned idx; | ||
| 403 | int r; | ||
| 404 | |||
| 405 | idx = pkt->idx + 1; | ||
| 406 | reg = pkt->reg; | ||
| 407 | for (i = 0; i <= pkt->count; i++, idx++, reg += 4) { | ||
| 408 | r = evergreen_packet0_check(p, pkt, idx, reg); | ||
| 409 | if (r) { | ||
| 410 | return r; | ||
| 411 | } | ||
| 412 | } | ||
| 413 | return 0; | ||
| 414 | } | ||
| 415 | |||
| 416 | /** | ||
| 417 | * evergreen_cs_check_reg() - check if register is authorized or not | ||
| 418 | * @parser: parser structure holding parsing context | ||
| 419 | * @reg: register we are testing | ||
| 420 | * @idx: index into the cs buffer | ||
| 421 | * | ||
| 422 | * This function will test against evergreen_reg_safe_bm and return 0 | ||
| 423 | * if register is safe. If register is not flag as safe this function | ||
| 424 | * will test it against a list of register needind special handling. | ||
| 425 | */ | ||
| 426 | static inline int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx) | ||
| 427 | { | ||
| 428 | struct evergreen_cs_track *track = (struct evergreen_cs_track *)p->track; | ||
| 429 | struct radeon_cs_reloc *reloc; | ||
| 430 | u32 last_reg = ARRAY_SIZE(evergreen_reg_safe_bm); | ||
| 431 | u32 m, i, tmp, *ib; | ||
| 432 | int r; | ||
| 433 | |||
| 434 | i = (reg >> 7); | ||
| 435 | if (i > last_reg) { | ||
| 436 | dev_warn(p->dev, "forbidden register 0x%08x at %d\n", reg, idx); | ||
| 437 | return -EINVAL; | ||
| 438 | } | ||
| 439 | m = 1 << ((reg >> 2) & 31); | ||
| 440 | if (!(evergreen_reg_safe_bm[i] & m)) | ||
| 441 | return 0; | ||
| 442 | ib = p->ib->ptr; | ||
| 443 | switch (reg) { | ||
| 444 | /* force following reg to 0 in an attemp to disable out buffer | ||
| 445 | * which will need us to better understand how it works to perform | ||
| 446 | * security check on it (Jerome) | ||
| 447 | */ | ||
| 448 | case SQ_ESGS_RING_SIZE: | ||
| 449 | case SQ_GSVS_RING_SIZE: | ||
| 450 | case SQ_ESTMP_RING_SIZE: | ||
| 451 | case SQ_GSTMP_RING_SIZE: | ||
| 452 | case SQ_HSTMP_RING_SIZE: | ||
| 453 | case SQ_LSTMP_RING_SIZE: | ||
| 454 | case SQ_PSTMP_RING_SIZE: | ||
| 455 | case SQ_VSTMP_RING_SIZE: | ||
| 456 | case SQ_ESGS_RING_ITEMSIZE: | ||
| 457 | case SQ_ESTMP_RING_ITEMSIZE: | ||
| 458 | case SQ_GSTMP_RING_ITEMSIZE: | ||
| 459 | case SQ_GSVS_RING_ITEMSIZE: | ||
| 460 | case SQ_GS_VERT_ITEMSIZE: | ||
| 461 | case SQ_GS_VERT_ITEMSIZE_1: | ||
| 462 | case SQ_GS_VERT_ITEMSIZE_2: | ||
| 463 | case SQ_GS_VERT_ITEMSIZE_3: | ||
| 464 | case SQ_GSVS_RING_OFFSET_1: | ||
| 465 | case SQ_GSVS_RING_OFFSET_2: | ||
| 466 | case SQ_GSVS_RING_OFFSET_3: | ||
| 467 | case SQ_HSTMP_RING_ITEMSIZE: | ||
| 468 | case SQ_LSTMP_RING_ITEMSIZE: | ||
| 469 | case SQ_PSTMP_RING_ITEMSIZE: | ||
| 470 | case SQ_VSTMP_RING_ITEMSIZE: | ||
| 471 | case VGT_TF_RING_SIZE: | ||
| 472 | /* get value to populate the IB don't remove */ | ||
| 473 | tmp =radeon_get_ib_value(p, idx); | ||
| 474 | ib[idx] = 0; | ||
| 475 | break; | ||
| 476 | case DB_DEPTH_CONTROL: | ||
| 477 | track->db_depth_control = radeon_get_ib_value(p, idx); | ||
| 478 | break; | ||
| 479 | case DB_Z_INFO: | ||
| 480 | r = evergreen_cs_packet_next_reloc(p, &reloc); | ||
| 481 | if (r) { | ||
| 482 | dev_warn(p->dev, "bad SET_CONTEXT_REG " | ||
| 483 | "0x%04X\n", reg); | ||
| 484 | return -EINVAL; | ||
| 485 | } | ||
| 486 | track->db_z_info = radeon_get_ib_value(p, idx); | ||
| 487 | ib[idx] &= ~Z_ARRAY_MODE(0xf); | ||
| 488 | track->db_z_info &= ~Z_ARRAY_MODE(0xf); | ||
| 489 | if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) { | ||
| 490 | ib[idx] |= Z_ARRAY_MODE(ARRAY_2D_TILED_THIN1); | ||
| 491 | track->db_z_info |= Z_ARRAY_MODE(ARRAY_2D_TILED_THIN1); | ||
| 492 | } else { | ||
| 493 | ib[idx] |= Z_ARRAY_MODE(ARRAY_1D_TILED_THIN1); | ||
| 494 | track->db_z_info |= Z_ARRAY_MODE(ARRAY_1D_TILED_THIN1); | ||
| 495 | } | ||
| 496 | break; | ||
| 497 | case DB_STENCIL_INFO: | ||
| 498 | track->db_s_info = radeon_get_ib_value(p, idx); | ||
| 499 | break; | ||
| 500 | case DB_DEPTH_VIEW: | ||
| 501 | track->db_depth_view = radeon_get_ib_value(p, idx); | ||
| 502 | break; | ||
| 503 | case DB_DEPTH_SIZE: | ||
| 504 | track->db_depth_size = radeon_get_ib_value(p, idx); | ||
| 505 | track->db_depth_size_idx = idx; | ||
| 506 | break; | ||
| 507 | case DB_Z_READ_BASE: | ||
| 508 | r = evergreen_cs_packet_next_reloc(p, &reloc); | ||
| 509 | if (r) { | ||
| 510 | dev_warn(p->dev, "bad SET_CONTEXT_REG " | ||
| 511 | "0x%04X\n", reg); | ||
| 512 | return -EINVAL; | ||
| 513 | } | ||
| 514 | track->db_z_read_offset = radeon_get_ib_value(p, idx); | ||
| 515 | ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); | ||
| 516 | track->db_z_read_bo = reloc->robj; | ||
| 517 | break; | ||
| 518 | case DB_Z_WRITE_BASE: | ||
| 519 | r = evergreen_cs_packet_next_reloc(p, &reloc); | ||
| 520 | if (r) { | ||
| 521 | dev_warn(p->dev, "bad SET_CONTEXT_REG " | ||
| 522 | "0x%04X\n", reg); | ||
| 523 | return -EINVAL; | ||
| 524 | } | ||
| 525 | track->db_z_write_offset = radeon_get_ib_value(p, idx); | ||
| 526 | ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); | ||
| 527 | track->db_z_write_bo = reloc->robj; | ||
| 528 | break; | ||
| 529 | case DB_STENCIL_READ_BASE: | ||
| 530 | r = evergreen_cs_packet_next_reloc(p, &reloc); | ||
| 531 | if (r) { | ||
| 532 | dev_warn(p->dev, "bad SET_CONTEXT_REG " | ||
| 533 | "0x%04X\n", reg); | ||
| 534 | return -EINVAL; | ||
| 535 | } | ||
| 536 | track->db_s_read_offset = radeon_get_ib_value(p, idx); | ||
| 537 | ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); | ||
| 538 | track->db_s_read_bo = reloc->robj; | ||
| 539 | break; | ||
| 540 | case DB_STENCIL_WRITE_BASE: | ||
| 541 | r = evergreen_cs_packet_next_reloc(p, &reloc); | ||
| 542 | if (r) { | ||
| 543 | dev_warn(p->dev, "bad SET_CONTEXT_REG " | ||
| 544 | "0x%04X\n", reg); | ||
| 545 | return -EINVAL; | ||
| 546 | } | ||
| 547 | track->db_s_write_offset = radeon_get_ib_value(p, idx); | ||
| 548 | ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); | ||
| 549 | track->db_s_write_bo = reloc->robj; | ||
| 550 | break; | ||
| 551 | case VGT_STRMOUT_CONFIG: | ||
| 552 | track->vgt_strmout_config = radeon_get_ib_value(p, idx); | ||
| 553 | break; | ||
| 554 | case VGT_STRMOUT_BUFFER_CONFIG: | ||
| 555 | track->vgt_strmout_buffer_config = radeon_get_ib_value(p, idx); | ||
| 556 | break; | ||
| 557 | case CB_TARGET_MASK: | ||
| 558 | track->cb_target_mask = radeon_get_ib_value(p, idx); | ||
| 559 | break; | ||
| 560 | case CB_SHADER_MASK: | ||
| 561 | track->cb_shader_mask = radeon_get_ib_value(p, idx); | ||
| 562 | break; | ||
| 563 | case PA_SC_AA_CONFIG: | ||
| 564 | tmp = radeon_get_ib_value(p, idx) & MSAA_NUM_SAMPLES_MASK; | ||
| 565 | track->nsamples = 1 << tmp; | ||
| 566 | break; | ||
| 567 | case CB_COLOR0_VIEW: | ||
| 568 | case CB_COLOR1_VIEW: | ||
| 569 | case CB_COLOR2_VIEW: | ||
| 570 | case CB_COLOR3_VIEW: | ||
| 571 | case CB_COLOR4_VIEW: | ||
| 572 | case CB_COLOR5_VIEW: | ||
| 573 | case CB_COLOR6_VIEW: | ||
| 574 | case CB_COLOR7_VIEW: | ||
| 575 | tmp = (reg - CB_COLOR0_VIEW) / 0x3c; | ||
| 576 | track->cb_color_view[tmp] = radeon_get_ib_value(p, idx); | ||
| 577 | break; | ||
| 578 | case CB_COLOR8_VIEW: | ||
| 579 | case CB_COLOR9_VIEW: | ||
| 580 | case CB_COLOR10_VIEW: | ||
| 581 | case CB_COLOR11_VIEW: | ||
| 582 | tmp = ((reg - CB_COLOR8_VIEW) / 0x1c) + 8; | ||
| 583 | track->cb_color_view[tmp] = radeon_get_ib_value(p, idx); | ||
| 584 | break; | ||
| 585 | case CB_COLOR0_INFO: | ||
| 586 | case CB_COLOR1_INFO: | ||
| 587 | case CB_COLOR2_INFO: | ||
| 588 | case CB_COLOR3_INFO: | ||
| 589 | case CB_COLOR4_INFO: | ||
| 590 | case CB_COLOR5_INFO: | ||
| 591 | case CB_COLOR6_INFO: | ||
| 592 | case CB_COLOR7_INFO: | ||
| 593 | r = evergreen_cs_packet_next_reloc(p, &reloc); | ||
| 594 | if (r) { | ||
| 595 | dev_warn(p->dev, "bad SET_CONTEXT_REG " | ||
| 596 | "0x%04X\n", reg); | ||
| 597 | return -EINVAL; | ||
| 598 | } | ||
| 599 | tmp = (reg - CB_COLOR0_INFO) / 0x3c; | ||
| 600 | track->cb_color_info[tmp] = radeon_get_ib_value(p, idx); | ||
| 601 | if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) { | ||
| 602 | ib[idx] |= CB_ARRAY_MODE(ARRAY_2D_TILED_THIN1); | ||
| 603 | track->cb_color_info[tmp] |= CB_ARRAY_MODE(ARRAY_2D_TILED_THIN1); | ||
| 604 | } else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) { | ||
| 605 | ib[idx] |= CB_ARRAY_MODE(ARRAY_1D_TILED_THIN1); | ||
| 606 | track->cb_color_info[tmp] |= CB_ARRAY_MODE(ARRAY_1D_TILED_THIN1); | ||
| 607 | } | ||
| 608 | break; | ||
| 609 | case CB_COLOR8_INFO: | ||
| 610 | case CB_COLOR9_INFO: | ||
| 611 | case CB_COLOR10_INFO: | ||
| 612 | case CB_COLOR11_INFO: | ||
| 613 | r = evergreen_cs_packet_next_reloc(p, &reloc); | ||
| 614 | if (r) { | ||
| 615 | dev_warn(p->dev, "bad SET_CONTEXT_REG " | ||
| 616 | "0x%04X\n", reg); | ||
| 617 | return -EINVAL; | ||
| 618 | } | ||
| 619 | tmp = ((reg - CB_COLOR8_INFO) / 0x1c) + 8; | ||
| 620 | track->cb_color_info[tmp] = radeon_get_ib_value(p, idx); | ||
| 621 | if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) { | ||
| 622 | ib[idx] |= CB_ARRAY_MODE(ARRAY_2D_TILED_THIN1); | ||
| 623 | track->cb_color_info[tmp] |= CB_ARRAY_MODE(ARRAY_2D_TILED_THIN1); | ||
| 624 | } else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) { | ||
| 625 | ib[idx] |= CB_ARRAY_MODE(ARRAY_1D_TILED_THIN1); | ||
| 626 | track->cb_color_info[tmp] |= CB_ARRAY_MODE(ARRAY_1D_TILED_THIN1); | ||
| 627 | } | ||
| 628 | break; | ||
| 629 | case CB_COLOR0_PITCH: | ||
| 630 | case CB_COLOR1_PITCH: | ||
| 631 | case CB_COLOR2_PITCH: | ||
| 632 | case CB_COLOR3_PITCH: | ||
| 633 | case CB_COLOR4_PITCH: | ||
| 634 | case CB_COLOR5_PITCH: | ||
| 635 | case CB_COLOR6_PITCH: | ||
| 636 | case CB_COLOR7_PITCH: | ||
| 637 | tmp = (reg - CB_COLOR0_PITCH) / 0x3c; | ||
| 638 | track->cb_color_pitch[tmp] = radeon_get_ib_value(p, idx); | ||
| 639 | track->cb_color_pitch_idx[tmp] = idx; | ||
| 640 | break; | ||
| 641 | case CB_COLOR8_PITCH: | ||
| 642 | case CB_COLOR9_PITCH: | ||
| 643 | case CB_COLOR10_PITCH: | ||
| 644 | case CB_COLOR11_PITCH: | ||
| 645 | tmp = ((reg - CB_COLOR8_PITCH) / 0x1c) + 8; | ||
| 646 | track->cb_color_pitch[tmp] = radeon_get_ib_value(p, idx); | ||
| 647 | track->cb_color_pitch_idx[tmp] = idx; | ||
| 648 | break; | ||
| 649 | case CB_COLOR0_SLICE: | ||
| 650 | case CB_COLOR1_SLICE: | ||
| 651 | case CB_COLOR2_SLICE: | ||
| 652 | case CB_COLOR3_SLICE: | ||
| 653 | case CB_COLOR4_SLICE: | ||
| 654 | case CB_COLOR5_SLICE: | ||
| 655 | case CB_COLOR6_SLICE: | ||
| 656 | case CB_COLOR7_SLICE: | ||
| 657 | tmp = (reg - CB_COLOR0_SLICE) / 0x3c; | ||
| 658 | track->cb_color_slice[tmp] = radeon_get_ib_value(p, idx); | ||
| 659 | track->cb_color_slice_idx[tmp] = idx; | ||
| 660 | break; | ||
| 661 | case CB_COLOR8_SLICE: | ||
| 662 | case CB_COLOR9_SLICE: | ||
| 663 | case CB_COLOR10_SLICE: | ||
| 664 | case CB_COLOR11_SLICE: | ||
| 665 | tmp = ((reg - CB_COLOR8_SLICE) / 0x1c) + 8; | ||
| 666 | track->cb_color_slice[tmp] = radeon_get_ib_value(p, idx); | ||
| 667 | track->cb_color_slice_idx[tmp] = idx; | ||
| 668 | break; | ||
| 669 | case CB_COLOR0_ATTRIB: | ||
| 670 | case CB_COLOR1_ATTRIB: | ||
| 671 | case CB_COLOR2_ATTRIB: | ||
| 672 | case CB_COLOR3_ATTRIB: | ||
| 673 | case CB_COLOR4_ATTRIB: | ||
| 674 | case CB_COLOR5_ATTRIB: | ||
| 675 | case CB_COLOR6_ATTRIB: | ||
| 676 | case CB_COLOR7_ATTRIB: | ||
| 677 | case CB_COLOR8_ATTRIB: | ||
| 678 | case CB_COLOR9_ATTRIB: | ||
| 679 | case CB_COLOR10_ATTRIB: | ||
| 680 | case CB_COLOR11_ATTRIB: | ||
| 681 | break; | ||
| 682 | case CB_COLOR0_DIM: | ||
| 683 | case CB_COLOR1_DIM: | ||
| 684 | case CB_COLOR2_DIM: | ||
| 685 | case CB_COLOR3_DIM: | ||
| 686 | case CB_COLOR4_DIM: | ||
| 687 | case CB_COLOR5_DIM: | ||
| 688 | case CB_COLOR6_DIM: | ||
| 689 | case CB_COLOR7_DIM: | ||
| 690 | tmp = (reg - CB_COLOR0_DIM) / 0x3c; | ||
| 691 | track->cb_color_dim[tmp] = radeon_get_ib_value(p, idx); | ||
| 692 | track->cb_color_dim_idx[tmp] = idx; | ||
| 693 | break; | ||
| 694 | case CB_COLOR8_DIM: | ||
| 695 | case CB_COLOR9_DIM: | ||
| 696 | case CB_COLOR10_DIM: | ||
| 697 | case CB_COLOR11_DIM: | ||
| 698 | tmp = ((reg - CB_COLOR8_DIM) / 0x1c) + 8; | ||
| 699 | track->cb_color_dim[tmp] = radeon_get_ib_value(p, idx); | ||
| 700 | track->cb_color_dim_idx[tmp] = idx; | ||
| 701 | break; | ||
| 702 | case CB_COLOR0_FMASK: | ||
| 703 | case CB_COLOR1_FMASK: | ||
| 704 | case CB_COLOR2_FMASK: | ||
| 705 | case CB_COLOR3_FMASK: | ||
| 706 | case CB_COLOR4_FMASK: | ||
| 707 | case CB_COLOR5_FMASK: | ||
| 708 | case CB_COLOR6_FMASK: | ||
| 709 | case CB_COLOR7_FMASK: | ||
| 710 | tmp = (reg - CB_COLOR0_FMASK) / 0x3c; | ||
| 711 | r = evergreen_cs_packet_next_reloc(p, &reloc); | ||
| 712 | if (r) { | ||
| 713 | dev_err(p->dev, "bad SET_CONTEXT_REG 0x%04X\n", reg); | ||
| 714 | return -EINVAL; | ||
| 715 | } | ||
| 716 | ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); | ||
| 717 | track->cb_color_fmask_bo[tmp] = reloc->robj; | ||
| 718 | break; | ||
| 719 | case CB_COLOR0_CMASK: | ||
| 720 | case CB_COLOR1_CMASK: | ||
| 721 | case CB_COLOR2_CMASK: | ||
| 722 | case CB_COLOR3_CMASK: | ||
| 723 | case CB_COLOR4_CMASK: | ||
| 724 | case CB_COLOR5_CMASK: | ||
| 725 | case CB_COLOR6_CMASK: | ||
| 726 | case CB_COLOR7_CMASK: | ||
| 727 | tmp = (reg - CB_COLOR0_CMASK) / 0x3c; | ||
| 728 | r = evergreen_cs_packet_next_reloc(p, &reloc); | ||
| 729 | if (r) { | ||
| 730 | dev_err(p->dev, "bad SET_CONTEXT_REG 0x%04X\n", reg); | ||
| 731 | return -EINVAL; | ||
| 732 | } | ||
| 733 | ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); | ||
| 734 | track->cb_color_cmask_bo[tmp] = reloc->robj; | ||
| 735 | break; | ||
| 736 | case CB_COLOR0_FMASK_SLICE: | ||
| 737 | case CB_COLOR1_FMASK_SLICE: | ||
| 738 | case CB_COLOR2_FMASK_SLICE: | ||
| 739 | case CB_COLOR3_FMASK_SLICE: | ||
| 740 | case CB_COLOR4_FMASK_SLICE: | ||
| 741 | case CB_COLOR5_FMASK_SLICE: | ||
| 742 | case CB_COLOR6_FMASK_SLICE: | ||
| 743 | case CB_COLOR7_FMASK_SLICE: | ||
| 744 | tmp = (reg - CB_COLOR0_FMASK_SLICE) / 0x3c; | ||
| 745 | track->cb_color_fmask_slice[tmp] = radeon_get_ib_value(p, idx); | ||
| 746 | break; | ||
| 747 | case CB_COLOR0_CMASK_SLICE: | ||
| 748 | case CB_COLOR1_CMASK_SLICE: | ||
| 749 | case CB_COLOR2_CMASK_SLICE: | ||
| 750 | case CB_COLOR3_CMASK_SLICE: | ||
| 751 | case CB_COLOR4_CMASK_SLICE: | ||
| 752 | case CB_COLOR5_CMASK_SLICE: | ||
| 753 | case CB_COLOR6_CMASK_SLICE: | ||
| 754 | case CB_COLOR7_CMASK_SLICE: | ||
| 755 | tmp = (reg - CB_COLOR0_CMASK_SLICE) / 0x3c; | ||
| 756 | track->cb_color_cmask_slice[tmp] = radeon_get_ib_value(p, idx); | ||
| 757 | break; | ||
| 758 | case CB_COLOR0_BASE: | ||
| 759 | case CB_COLOR1_BASE: | ||
| 760 | case CB_COLOR2_BASE: | ||
| 761 | case CB_COLOR3_BASE: | ||
| 762 | case CB_COLOR4_BASE: | ||
| 763 | case CB_COLOR5_BASE: | ||
| 764 | case CB_COLOR6_BASE: | ||
| 765 | case CB_COLOR7_BASE: | ||
| 766 | r = evergreen_cs_packet_next_reloc(p, &reloc); | ||
| 767 | if (r) { | ||
| 768 | dev_warn(p->dev, "bad SET_CONTEXT_REG " | ||
| 769 | "0x%04X\n", reg); | ||
| 770 | return -EINVAL; | ||
| 771 | } | ||
| 772 | tmp = (reg - CB_COLOR0_BASE) / 0x3c; | ||
| 773 | track->cb_color_bo_offset[tmp] = radeon_get_ib_value(p, idx); | ||
| 774 | ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); | ||
| 775 | track->cb_color_base_last[tmp] = ib[idx]; | ||
| 776 | track->cb_color_bo[tmp] = reloc->robj; | ||
| 777 | break; | ||
| 778 | case CB_COLOR8_BASE: | ||
| 779 | case CB_COLOR9_BASE: | ||
| 780 | case CB_COLOR10_BASE: | ||
| 781 | case CB_COLOR11_BASE: | ||
| 782 | r = evergreen_cs_packet_next_reloc(p, &reloc); | ||
| 783 | if (r) { | ||
| 784 | dev_warn(p->dev, "bad SET_CONTEXT_REG " | ||
| 785 | "0x%04X\n", reg); | ||
| 786 | return -EINVAL; | ||
| 787 | } | ||
| 788 | tmp = ((reg - CB_COLOR8_BASE) / 0x1c) + 8; | ||
| 789 | track->cb_color_bo_offset[tmp] = radeon_get_ib_value(p, idx); | ||
| 790 | ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); | ||
| 791 | track->cb_color_base_last[tmp] = ib[idx]; | ||
| 792 | track->cb_color_bo[tmp] = reloc->robj; | ||
| 793 | break; | ||
| 794 | case CB_IMMED0_BASE: | ||
| 795 | case CB_IMMED1_BASE: | ||
| 796 | case CB_IMMED2_BASE: | ||
| 797 | case CB_IMMED3_BASE: | ||
| 798 | case CB_IMMED4_BASE: | ||
| 799 | case CB_IMMED5_BASE: | ||
| 800 | case CB_IMMED6_BASE: | ||
| 801 | case CB_IMMED7_BASE: | ||
| 802 | case CB_IMMED8_BASE: | ||
| 803 | case CB_IMMED9_BASE: | ||
| 804 | case CB_IMMED10_BASE: | ||
| 805 | case CB_IMMED11_BASE: | ||
| 806 | case DB_HTILE_DATA_BASE: | ||
| 807 | case SQ_PGM_START_FS: | ||
| 808 | case SQ_PGM_START_ES: | ||
| 809 | case SQ_PGM_START_VS: | ||
| 810 | case SQ_PGM_START_GS: | ||
| 811 | case SQ_PGM_START_PS: | ||
| 812 | case SQ_PGM_START_HS: | ||
| 813 | case SQ_PGM_START_LS: | ||
| 814 | case GDS_ADDR_BASE: | ||
| 815 | case SQ_CONST_MEM_BASE: | ||
| 816 | case SQ_ALU_CONST_CACHE_GS_0: | ||
| 817 | case SQ_ALU_CONST_CACHE_GS_1: | ||
| 818 | case SQ_ALU_CONST_CACHE_GS_2: | ||
| 819 | case SQ_ALU_CONST_CACHE_GS_3: | ||
| 820 | case SQ_ALU_CONST_CACHE_GS_4: | ||
| 821 | case SQ_ALU_CONST_CACHE_GS_5: | ||
| 822 | case SQ_ALU_CONST_CACHE_GS_6: | ||
| 823 | case SQ_ALU_CONST_CACHE_GS_7: | ||
| 824 | case SQ_ALU_CONST_CACHE_GS_8: | ||
| 825 | case SQ_ALU_CONST_CACHE_GS_9: | ||
| 826 | case SQ_ALU_CONST_CACHE_GS_10: | ||
| 827 | case SQ_ALU_CONST_CACHE_GS_11: | ||
| 828 | case SQ_ALU_CONST_CACHE_GS_12: | ||
| 829 | case SQ_ALU_CONST_CACHE_GS_13: | ||
| 830 | case SQ_ALU_CONST_CACHE_GS_14: | ||
| 831 | case SQ_ALU_CONST_CACHE_GS_15: | ||
| 832 | case SQ_ALU_CONST_CACHE_PS_0: | ||
| 833 | case SQ_ALU_CONST_CACHE_PS_1: | ||
| 834 | case SQ_ALU_CONST_CACHE_PS_2: | ||
| 835 | case SQ_ALU_CONST_CACHE_PS_3: | ||
| 836 | case SQ_ALU_CONST_CACHE_PS_4: | ||
| 837 | case SQ_ALU_CONST_CACHE_PS_5: | ||
| 838 | case SQ_ALU_CONST_CACHE_PS_6: | ||
| 839 | case SQ_ALU_CONST_CACHE_PS_7: | ||
| 840 | case SQ_ALU_CONST_CACHE_PS_8: | ||
| 841 | case SQ_ALU_CONST_CACHE_PS_9: | ||
| 842 | case SQ_ALU_CONST_CACHE_PS_10: | ||
| 843 | case SQ_ALU_CONST_CACHE_PS_11: | ||
| 844 | case SQ_ALU_CONST_CACHE_PS_12: | ||
| 845 | case SQ_ALU_CONST_CACHE_PS_13: | ||
| 846 | case SQ_ALU_CONST_CACHE_PS_14: | ||
| 847 | case SQ_ALU_CONST_CACHE_PS_15: | ||
| 848 | case SQ_ALU_CONST_CACHE_VS_0: | ||
| 849 | case SQ_ALU_CONST_CACHE_VS_1: | ||
| 850 | case SQ_ALU_CONST_CACHE_VS_2: | ||
| 851 | case SQ_ALU_CONST_CACHE_VS_3: | ||
| 852 | case SQ_ALU_CONST_CACHE_VS_4: | ||
| 853 | case SQ_ALU_CONST_CACHE_VS_5: | ||
| 854 | case SQ_ALU_CONST_CACHE_VS_6: | ||
| 855 | case SQ_ALU_CONST_CACHE_VS_7: | ||
| 856 | case SQ_ALU_CONST_CACHE_VS_8: | ||
| 857 | case SQ_ALU_CONST_CACHE_VS_9: | ||
| 858 | case SQ_ALU_CONST_CACHE_VS_10: | ||
| 859 | case SQ_ALU_CONST_CACHE_VS_11: | ||
| 860 | case SQ_ALU_CONST_CACHE_VS_12: | ||
| 861 | case SQ_ALU_CONST_CACHE_VS_13: | ||
| 862 | case SQ_ALU_CONST_CACHE_VS_14: | ||
| 863 | case SQ_ALU_CONST_CACHE_VS_15: | ||
| 864 | case SQ_ALU_CONST_CACHE_HS_0: | ||
| 865 | case SQ_ALU_CONST_CACHE_HS_1: | ||
| 866 | case SQ_ALU_CONST_CACHE_HS_2: | ||
| 867 | case SQ_ALU_CONST_CACHE_HS_3: | ||
| 868 | case SQ_ALU_CONST_CACHE_HS_4: | ||
| 869 | case SQ_ALU_CONST_CACHE_HS_5: | ||
| 870 | case SQ_ALU_CONST_CACHE_HS_6: | ||
| 871 | case SQ_ALU_CONST_CACHE_HS_7: | ||
| 872 | case SQ_ALU_CONST_CACHE_HS_8: | ||
| 873 | case SQ_ALU_CONST_CACHE_HS_9: | ||
| 874 | case SQ_ALU_CONST_CACHE_HS_10: | ||
| 875 | case SQ_ALU_CONST_CACHE_HS_11: | ||
| 876 | case SQ_ALU_CONST_CACHE_HS_12: | ||
| 877 | case SQ_ALU_CONST_CACHE_HS_13: | ||
| 878 | case SQ_ALU_CONST_CACHE_HS_14: | ||
| 879 | case SQ_ALU_CONST_CACHE_HS_15: | ||
| 880 | case SQ_ALU_CONST_CACHE_LS_0: | ||
| 881 | case SQ_ALU_CONST_CACHE_LS_1: | ||
| 882 | case SQ_ALU_CONST_CACHE_LS_2: | ||
| 883 | case SQ_ALU_CONST_CACHE_LS_3: | ||
| 884 | case SQ_ALU_CONST_CACHE_LS_4: | ||
| 885 | case SQ_ALU_CONST_CACHE_LS_5: | ||
| 886 | case SQ_ALU_CONST_CACHE_LS_6: | ||
| 887 | case SQ_ALU_CONST_CACHE_LS_7: | ||
| 888 | case SQ_ALU_CONST_CACHE_LS_8: | ||
| 889 | case SQ_ALU_CONST_CACHE_LS_9: | ||
| 890 | case SQ_ALU_CONST_CACHE_LS_10: | ||
| 891 | case SQ_ALU_CONST_CACHE_LS_11: | ||
| 892 | case SQ_ALU_CONST_CACHE_LS_12: | ||
| 893 | case SQ_ALU_CONST_CACHE_LS_13: | ||
| 894 | case SQ_ALU_CONST_CACHE_LS_14: | ||
| 895 | case SQ_ALU_CONST_CACHE_LS_15: | ||
| 896 | r = evergreen_cs_packet_next_reloc(p, &reloc); | ||
| 897 | if (r) { | ||
| 898 | dev_warn(p->dev, "bad SET_CONTEXT_REG " | ||
| 899 | "0x%04X\n", reg); | ||
| 900 | return -EINVAL; | ||
| 901 | } | ||
| 902 | ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); | ||
| 903 | break; | ||
| 904 | default: | ||
| 905 | dev_warn(p->dev, "forbidden register 0x%08x at %d\n", reg, idx); | ||
| 906 | return -EINVAL; | ||
| 907 | } | ||
| 908 | return 0; | ||
| 909 | } | ||
| 910 | |||
| 911 | /** | ||
| 912 | * evergreen_check_texture_resource() - check if register is authorized or not | ||
| 913 | * @p: parser structure holding parsing context | ||
| 914 | * @idx: index into the cs buffer | ||
| 915 | * @texture: texture's bo structure | ||
| 916 | * @mipmap: mipmap's bo structure | ||
| 917 | * | ||
| 918 | * This function will check that the resource has valid field and that | ||
| 919 | * the texture and mipmap bo object are big enough to cover this resource. | ||
| 920 | */ | ||
| 921 | static inline int evergreen_check_texture_resource(struct radeon_cs_parser *p, u32 idx, | ||
| 922 | struct radeon_bo *texture, | ||
| 923 | struct radeon_bo *mipmap) | ||
| 924 | { | ||
| 925 | /* XXX fill in */ | ||
| 926 | return 0; | ||
| 927 | } | ||
| 928 | |||
| 929 | static int evergreen_packet3_check(struct radeon_cs_parser *p, | ||
| 930 | struct radeon_cs_packet *pkt) | ||
| 931 | { | ||
| 932 | struct radeon_cs_reloc *reloc; | ||
| 933 | struct evergreen_cs_track *track; | ||
| 934 | volatile u32 *ib; | ||
| 935 | unsigned idx; | ||
| 936 | unsigned i; | ||
| 937 | unsigned start_reg, end_reg, reg; | ||
| 938 | int r; | ||
| 939 | u32 idx_value; | ||
| 940 | |||
| 941 | track = (struct evergreen_cs_track *)p->track; | ||
| 942 | ib = p->ib->ptr; | ||
| 943 | idx = pkt->idx + 1; | ||
| 944 | idx_value = radeon_get_ib_value(p, idx); | ||
| 945 | |||
| 946 | switch (pkt->opcode) { | ||
| 947 | case PACKET3_CONTEXT_CONTROL: | ||
| 948 | if (pkt->count != 1) { | ||
| 949 | DRM_ERROR("bad CONTEXT_CONTROL\n"); | ||
| 950 | return -EINVAL; | ||
| 951 | } | ||
| 952 | break; | ||
| 953 | case PACKET3_INDEX_TYPE: | ||
| 954 | case PACKET3_NUM_INSTANCES: | ||
| 955 | case PACKET3_CLEAR_STATE: | ||
| 956 | if (pkt->count) { | ||
| 957 | DRM_ERROR("bad INDEX_TYPE/NUM_INSTANCES/CLEAR_STATE\n"); | ||
| 958 | return -EINVAL; | ||
| 959 | } | ||
| 960 | break; | ||
| 961 | case PACKET3_INDEX_BASE: | ||
| 962 | if (pkt->count != 1) { | ||
| 963 | DRM_ERROR("bad INDEX_BASE\n"); | ||
| 964 | return -EINVAL; | ||
| 965 | } | ||
| 966 | r = evergreen_cs_packet_next_reloc(p, &reloc); | ||
| 967 | if (r) { | ||
| 968 | DRM_ERROR("bad INDEX_BASE\n"); | ||
| 969 | return -EINVAL; | ||
| 970 | } | ||
| 971 | ib[idx+0] = idx_value + (u32)(reloc->lobj.gpu_offset & 0xffffffff); | ||
| 972 | ib[idx+1] += upper_32_bits(reloc->lobj.gpu_offset) & 0xff; | ||
| 973 | r = evergreen_cs_track_check(p); | ||
| 974 | if (r) { | ||
| 975 | dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__); | ||
| 976 | return r; | ||
| 977 | } | ||
| 978 | break; | ||
| 979 | case PACKET3_DRAW_INDEX: | ||
| 980 | if (pkt->count != 3) { | ||
| 981 | DRM_ERROR("bad DRAW_INDEX\n"); | ||
| 982 | return -EINVAL; | ||
| 983 | } | ||
| 984 | r = evergreen_cs_packet_next_reloc(p, &reloc); | ||
| 985 | if (r) { | ||
| 986 | DRM_ERROR("bad DRAW_INDEX\n"); | ||
| 987 | return -EINVAL; | ||
| 988 | } | ||
| 989 | ib[idx+0] = idx_value + (u32)(reloc->lobj.gpu_offset & 0xffffffff); | ||
| 990 | ib[idx+1] += upper_32_bits(reloc->lobj.gpu_offset) & 0xff; | ||
| 991 | r = evergreen_cs_track_check(p); | ||
| 992 | if (r) { | ||
| 993 | dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__); | ||
| 994 | return r; | ||
| 995 | } | ||
| 996 | break; | ||
| 997 | case PACKET3_DRAW_INDEX_2: | ||
| 998 | if (pkt->count != 4) { | ||
| 999 | DRM_ERROR("bad DRAW_INDEX_2\n"); | ||
| 1000 | return -EINVAL; | ||
| 1001 | } | ||
| 1002 | r = evergreen_cs_packet_next_reloc(p, &reloc); | ||
| 1003 | if (r) { | ||
| 1004 | DRM_ERROR("bad DRAW_INDEX_2\n"); | ||
| 1005 | return -EINVAL; | ||
| 1006 | } | ||
| 1007 | ib[idx+1] = idx_value + (u32)(reloc->lobj.gpu_offset & 0xffffffff); | ||
| 1008 | ib[idx+2] += upper_32_bits(reloc->lobj.gpu_offset) & 0xff; | ||
| 1009 | r = evergreen_cs_track_check(p); | ||
| 1010 | if (r) { | ||
| 1011 | dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__); | ||
| 1012 | return r; | ||
| 1013 | } | ||
| 1014 | break; | ||
| 1015 | case PACKET3_DRAW_INDEX_AUTO: | ||
| 1016 | if (pkt->count != 1) { | ||
| 1017 | DRM_ERROR("bad DRAW_INDEX_AUTO\n"); | ||
| 1018 | return -EINVAL; | ||
| 1019 | } | ||
| 1020 | r = evergreen_cs_track_check(p); | ||
| 1021 | if (r) { | ||
| 1022 | dev_warn(p->dev, "%s:%d invalid cmd stream %d\n", __func__, __LINE__, idx); | ||
| 1023 | return r; | ||
| 1024 | } | ||
| 1025 | break; | ||
| 1026 | case PACKET3_DRAW_INDEX_MULTI_AUTO: | ||
| 1027 | if (pkt->count != 2) { | ||
| 1028 | DRM_ERROR("bad DRAW_INDEX_MULTI_AUTO\n"); | ||
| 1029 | return -EINVAL; | ||
| 1030 | } | ||
| 1031 | r = evergreen_cs_track_check(p); | ||
| 1032 | if (r) { | ||
| 1033 | dev_warn(p->dev, "%s:%d invalid cmd stream %d\n", __func__, __LINE__, idx); | ||
| 1034 | return r; | ||
| 1035 | } | ||
| 1036 | break; | ||
| 1037 | case PACKET3_DRAW_INDEX_IMMD: | ||
| 1038 | if (pkt->count < 2) { | ||
| 1039 | DRM_ERROR("bad DRAW_INDEX_IMMD\n"); | ||
| 1040 | return -EINVAL; | ||
| 1041 | } | ||
| 1042 | r = evergreen_cs_track_check(p); | ||
| 1043 | if (r) { | ||
| 1044 | dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__); | ||
| 1045 | return r; | ||
| 1046 | } | ||
| 1047 | break; | ||
| 1048 | case PACKET3_DRAW_INDEX_OFFSET: | ||
| 1049 | if (pkt->count != 2) { | ||
| 1050 | DRM_ERROR("bad DRAW_INDEX_OFFSET\n"); | ||
| 1051 | return -EINVAL; | ||
| 1052 | } | ||
| 1053 | r = evergreen_cs_track_check(p); | ||
| 1054 | if (r) { | ||
| 1055 | dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__); | ||
| 1056 | return r; | ||
| 1057 | } | ||
| 1058 | break; | ||
| 1059 | case PACKET3_DRAW_INDEX_OFFSET_2: | ||
| 1060 | if (pkt->count != 3) { | ||
| 1061 | DRM_ERROR("bad DRAW_INDEX_OFFSET_2\n"); | ||
| 1062 | return -EINVAL; | ||
| 1063 | } | ||
| 1064 | r = evergreen_cs_track_check(p); | ||
| 1065 | if (r) { | ||
| 1066 | dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__); | ||
| 1067 | return r; | ||
| 1068 | } | ||
| 1069 | break; | ||
| 1070 | case PACKET3_WAIT_REG_MEM: | ||
| 1071 | if (pkt->count != 5) { | ||
| 1072 | DRM_ERROR("bad WAIT_REG_MEM\n"); | ||
| 1073 | return -EINVAL; | ||
| 1074 | } | ||
| 1075 | /* bit 4 is reg (0) or mem (1) */ | ||
| 1076 | if (idx_value & 0x10) { | ||
| 1077 | r = evergreen_cs_packet_next_reloc(p, &reloc); | ||
| 1078 | if (r) { | ||
| 1079 | DRM_ERROR("bad WAIT_REG_MEM\n"); | ||
| 1080 | return -EINVAL; | ||
| 1081 | } | ||
| 1082 | ib[idx+1] += (u32)(reloc->lobj.gpu_offset & 0xffffffff); | ||
| 1083 | ib[idx+2] += upper_32_bits(reloc->lobj.gpu_offset) & 0xff; | ||
| 1084 | } | ||
| 1085 | break; | ||
| 1086 | case PACKET3_SURFACE_SYNC: | ||
| 1087 | if (pkt->count != 3) { | ||
| 1088 | DRM_ERROR("bad SURFACE_SYNC\n"); | ||
| 1089 | return -EINVAL; | ||
| 1090 | } | ||
| 1091 | /* 0xffffffff/0x0 is flush all cache flag */ | ||
| 1092 | if (radeon_get_ib_value(p, idx + 1) != 0xffffffff || | ||
| 1093 | radeon_get_ib_value(p, idx + 2) != 0) { | ||
| 1094 | r = evergreen_cs_packet_next_reloc(p, &reloc); | ||
| 1095 | if (r) { | ||
| 1096 | DRM_ERROR("bad SURFACE_SYNC\n"); | ||
| 1097 | return -EINVAL; | ||
| 1098 | } | ||
| 1099 | ib[idx+2] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); | ||
| 1100 | } | ||
| 1101 | break; | ||
| 1102 | case PACKET3_EVENT_WRITE: | ||
| 1103 | if (pkt->count != 2 && pkt->count != 0) { | ||
| 1104 | DRM_ERROR("bad EVENT_WRITE\n"); | ||
| 1105 | return -EINVAL; | ||
| 1106 | } | ||
| 1107 | if (pkt->count) { | ||
| 1108 | r = evergreen_cs_packet_next_reloc(p, &reloc); | ||
| 1109 | if (r) { | ||
| 1110 | DRM_ERROR("bad EVENT_WRITE\n"); | ||
| 1111 | return -EINVAL; | ||
| 1112 | } | ||
| 1113 | ib[idx+1] += (u32)(reloc->lobj.gpu_offset & 0xffffffff); | ||
| 1114 | ib[idx+2] += upper_32_bits(reloc->lobj.gpu_offset) & 0xff; | ||
| 1115 | } | ||
| 1116 | break; | ||
| 1117 | case PACKET3_EVENT_WRITE_EOP: | ||
| 1118 | if (pkt->count != 4) { | ||
| 1119 | DRM_ERROR("bad EVENT_WRITE_EOP\n"); | ||
| 1120 | return -EINVAL; | ||
| 1121 | } | ||
| 1122 | r = evergreen_cs_packet_next_reloc(p, &reloc); | ||
| 1123 | if (r) { | ||
| 1124 | DRM_ERROR("bad EVENT_WRITE_EOP\n"); | ||
| 1125 | return -EINVAL; | ||
| 1126 | } | ||
| 1127 | ib[idx+1] += (u32)(reloc->lobj.gpu_offset & 0xffffffff); | ||
| 1128 | ib[idx+2] += upper_32_bits(reloc->lobj.gpu_offset) & 0xff; | ||
| 1129 | break; | ||
| 1130 | case PACKET3_EVENT_WRITE_EOS: | ||
| 1131 | if (pkt->count != 3) { | ||
| 1132 | DRM_ERROR("bad EVENT_WRITE_EOS\n"); | ||
| 1133 | return -EINVAL; | ||
| 1134 | } | ||
| 1135 | r = evergreen_cs_packet_next_reloc(p, &reloc); | ||
| 1136 | if (r) { | ||
| 1137 | DRM_ERROR("bad EVENT_WRITE_EOS\n"); | ||
| 1138 | return -EINVAL; | ||
| 1139 | } | ||
| 1140 | ib[idx+1] += (u32)(reloc->lobj.gpu_offset & 0xffffffff); | ||
| 1141 | ib[idx+2] += upper_32_bits(reloc->lobj.gpu_offset) & 0xff; | ||
| 1142 | break; | ||
| 1143 | case PACKET3_SET_CONFIG_REG: | ||
| 1144 | start_reg = (idx_value << 2) + PACKET3_SET_CONFIG_REG_START; | ||
| 1145 | end_reg = 4 * pkt->count + start_reg - 4; | ||
| 1146 | if ((start_reg < PACKET3_SET_CONFIG_REG_START) || | ||
| 1147 | (start_reg >= PACKET3_SET_CONFIG_REG_END) || | ||
| 1148 | (end_reg >= PACKET3_SET_CONFIG_REG_END)) { | ||
| 1149 | DRM_ERROR("bad PACKET3_SET_CONFIG_REG\n"); | ||
| 1150 | return -EINVAL; | ||
| 1151 | } | ||
| 1152 | for (i = 0; i < pkt->count; i++) { | ||
| 1153 | reg = start_reg + (4 * i); | ||
| 1154 | r = evergreen_cs_check_reg(p, reg, idx+1+i); | ||
| 1155 | if (r) | ||
| 1156 | return r; | ||
| 1157 | } | ||
| 1158 | break; | ||
| 1159 | case PACKET3_SET_CONTEXT_REG: | ||
| 1160 | start_reg = (idx_value << 2) + PACKET3_SET_CONTEXT_REG_START; | ||
| 1161 | end_reg = 4 * pkt->count + start_reg - 4; | ||
| 1162 | if ((start_reg < PACKET3_SET_CONTEXT_REG_START) || | ||
| 1163 | (start_reg >= PACKET3_SET_CONTEXT_REG_END) || | ||
| 1164 | (end_reg >= PACKET3_SET_CONTEXT_REG_END)) { | ||
| 1165 | DRM_ERROR("bad PACKET3_SET_CONTEXT_REG\n"); | ||
| 1166 | return -EINVAL; | ||
| 1167 | } | ||
| 1168 | for (i = 0; i < pkt->count; i++) { | ||
| 1169 | reg = start_reg + (4 * i); | ||
| 1170 | r = evergreen_cs_check_reg(p, reg, idx+1+i); | ||
| 1171 | if (r) | ||
| 1172 | return r; | ||
| 1173 | } | ||
| 1174 | break; | ||
| 1175 | case PACKET3_SET_RESOURCE: | ||
| 1176 | if (pkt->count % 8) { | ||
| 1177 | DRM_ERROR("bad SET_RESOURCE\n"); | ||
| 1178 | return -EINVAL; | ||
| 1179 | } | ||
| 1180 | start_reg = (idx_value << 2) + PACKET3_SET_RESOURCE_START; | ||
| 1181 | end_reg = 4 * pkt->count + start_reg - 4; | ||
| 1182 | if ((start_reg < PACKET3_SET_RESOURCE_START) || | ||
| 1183 | (start_reg >= PACKET3_SET_RESOURCE_END) || | ||
| 1184 | (end_reg >= PACKET3_SET_RESOURCE_END)) { | ||
| 1185 | DRM_ERROR("bad SET_RESOURCE\n"); | ||
| 1186 | return -EINVAL; | ||
| 1187 | } | ||
| 1188 | for (i = 0; i < (pkt->count / 8); i++) { | ||
| 1189 | struct radeon_bo *texture, *mipmap; | ||
| 1190 | u32 size, offset; | ||
| 1191 | |||
| 1192 | switch (G__SQ_CONSTANT_TYPE(radeon_get_ib_value(p, idx+1+(i*8)+7))) { | ||
| 1193 | case SQ_TEX_VTX_VALID_TEXTURE: | ||
| 1194 | /* tex base */ | ||
| 1195 | r = evergreen_cs_packet_next_reloc(p, &reloc); | ||
| 1196 | if (r) { | ||
| 1197 | DRM_ERROR("bad SET_RESOURCE (tex)\n"); | ||
| 1198 | return -EINVAL; | ||
| 1199 | } | ||
| 1200 | ib[idx+1+(i*8)+3] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); | ||
| 1201 | if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) | ||
| 1202 | ib[idx+1+(i*8)+1] |= TEX_ARRAY_MODE(ARRAY_2D_TILED_THIN1); | ||
| 1203 | else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) | ||
| 1204 | ib[idx+1+(i*8)+1] |= TEX_ARRAY_MODE(ARRAY_1D_TILED_THIN1); | ||
| 1205 | texture = reloc->robj; | ||
| 1206 | /* tex mip base */ | ||
| 1207 | r = evergreen_cs_packet_next_reloc(p, &reloc); | ||
| 1208 | if (r) { | ||
| 1209 | DRM_ERROR("bad SET_RESOURCE (tex)\n"); | ||
| 1210 | return -EINVAL; | ||
| 1211 | } | ||
| 1212 | ib[idx+1+(i*8)+4] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); | ||
| 1213 | mipmap = reloc->robj; | ||
| 1214 | r = evergreen_check_texture_resource(p, idx+1+(i*8), | ||
| 1215 | texture, mipmap); | ||
| 1216 | if (r) | ||
| 1217 | return r; | ||
| 1218 | break; | ||
| 1219 | case SQ_TEX_VTX_VALID_BUFFER: | ||
| 1220 | /* vtx base */ | ||
| 1221 | r = evergreen_cs_packet_next_reloc(p, &reloc); | ||
| 1222 | if (r) { | ||
| 1223 | DRM_ERROR("bad SET_RESOURCE (vtx)\n"); | ||
| 1224 | return -EINVAL; | ||
| 1225 | } | ||
| 1226 | offset = radeon_get_ib_value(p, idx+1+(i*8)+0); | ||
| 1227 | size = radeon_get_ib_value(p, idx+1+(i*8)+1); | ||
| 1228 | if (p->rdev && (size + offset) > radeon_bo_size(reloc->robj)) { | ||
| 1229 | /* force size to size of the buffer */ | ||
| 1230 | dev_warn(p->dev, "vbo resource seems too big for the bo\n"); | ||
| 1231 | ib[idx+1+(i*8)+1] = radeon_bo_size(reloc->robj); | ||
| 1232 | } | ||
| 1233 | ib[idx+1+(i*8)+0] += (u32)((reloc->lobj.gpu_offset) & 0xffffffff); | ||
| 1234 | ib[idx+1+(i*8)+2] += upper_32_bits(reloc->lobj.gpu_offset) & 0xff; | ||
| 1235 | break; | ||
| 1236 | case SQ_TEX_VTX_INVALID_TEXTURE: | ||
| 1237 | case SQ_TEX_VTX_INVALID_BUFFER: | ||
| 1238 | default: | ||
| 1239 | DRM_ERROR("bad SET_RESOURCE\n"); | ||
| 1240 | return -EINVAL; | ||
| 1241 | } | ||
| 1242 | } | ||
| 1243 | break; | ||
| 1244 | case PACKET3_SET_ALU_CONST: | ||
| 1245 | /* XXX fix me ALU const buffers only */ | ||
| 1246 | break; | ||
| 1247 | case PACKET3_SET_BOOL_CONST: | ||
| 1248 | start_reg = (idx_value << 2) + PACKET3_SET_BOOL_CONST_START; | ||
| 1249 | end_reg = 4 * pkt->count + start_reg - 4; | ||
| 1250 | if ((start_reg < PACKET3_SET_BOOL_CONST_START) || | ||
| 1251 | (start_reg >= PACKET3_SET_BOOL_CONST_END) || | ||
| 1252 | (end_reg >= PACKET3_SET_BOOL_CONST_END)) { | ||
| 1253 | DRM_ERROR("bad SET_BOOL_CONST\n"); | ||
| 1254 | return -EINVAL; | ||
| 1255 | } | ||
| 1256 | break; | ||
| 1257 | case PACKET3_SET_LOOP_CONST: | ||
| 1258 | start_reg = (idx_value << 2) + PACKET3_SET_LOOP_CONST_START; | ||
| 1259 | end_reg = 4 * pkt->count + start_reg - 4; | ||
| 1260 | if ((start_reg < PACKET3_SET_LOOP_CONST_START) || | ||
| 1261 | (start_reg >= PACKET3_SET_LOOP_CONST_END) || | ||
| 1262 | (end_reg >= PACKET3_SET_LOOP_CONST_END)) { | ||
| 1263 | DRM_ERROR("bad SET_LOOP_CONST\n"); | ||
| 1264 | return -EINVAL; | ||
| 1265 | } | ||
| 1266 | break; | ||
| 1267 | case PACKET3_SET_CTL_CONST: | ||
| 1268 | start_reg = (idx_value << 2) + PACKET3_SET_CTL_CONST_START; | ||
| 1269 | end_reg = 4 * pkt->count + start_reg - 4; | ||
| 1270 | if ((start_reg < PACKET3_SET_CTL_CONST_START) || | ||
| 1271 | (start_reg >= PACKET3_SET_CTL_CONST_END) || | ||
| 1272 | (end_reg >= PACKET3_SET_CTL_CONST_END)) { | ||
| 1273 | DRM_ERROR("bad SET_CTL_CONST\n"); | ||
| 1274 | return -EINVAL; | ||
| 1275 | } | ||
| 1276 | break; | ||
| 1277 | case PACKET3_SET_SAMPLER: | ||
| 1278 | if (pkt->count % 3) { | ||
| 1279 | DRM_ERROR("bad SET_SAMPLER\n"); | ||
| 1280 | return -EINVAL; | ||
| 1281 | } | ||
| 1282 | start_reg = (idx_value << 2) + PACKET3_SET_SAMPLER_START; | ||
| 1283 | end_reg = 4 * pkt->count + start_reg - 4; | ||
| 1284 | if ((start_reg < PACKET3_SET_SAMPLER_START) || | ||
| 1285 | (start_reg >= PACKET3_SET_SAMPLER_END) || | ||
| 1286 | (end_reg >= PACKET3_SET_SAMPLER_END)) { | ||
| 1287 | DRM_ERROR("bad SET_SAMPLER\n"); | ||
| 1288 | return -EINVAL; | ||
| 1289 | } | ||
| 1290 | break; | ||
| 1291 | case PACKET3_NOP: | ||
| 1292 | break; | ||
| 1293 | default: | ||
| 1294 | DRM_ERROR("Packet3 opcode %x not supported\n", pkt->opcode); | ||
| 1295 | return -EINVAL; | ||
| 1296 | } | ||
| 1297 | return 0; | ||
| 1298 | } | ||
| 1299 | |||
| 1300 | int evergreen_cs_parse(struct radeon_cs_parser *p) | ||
| 1301 | { | ||
| 1302 | struct radeon_cs_packet pkt; | ||
| 1303 | struct evergreen_cs_track *track; | ||
| 1304 | int r; | ||
| 1305 | |||
| 1306 | if (p->track == NULL) { | ||
| 1307 | /* initialize tracker, we are in kms */ | ||
| 1308 | track = kzalloc(sizeof(*track), GFP_KERNEL); | ||
| 1309 | if (track == NULL) | ||
| 1310 | return -ENOMEM; | ||
| 1311 | evergreen_cs_track_init(track); | ||
| 1312 | track->npipes = p->rdev->config.evergreen.tiling_npipes; | ||
| 1313 | track->nbanks = p->rdev->config.evergreen.tiling_nbanks; | ||
| 1314 | track->group_size = p->rdev->config.evergreen.tiling_group_size; | ||
| 1315 | p->track = track; | ||
| 1316 | } | ||
| 1317 | do { | ||
| 1318 | r = evergreen_cs_packet_parse(p, &pkt, p->idx); | ||
| 1319 | if (r) { | ||
| 1320 | kfree(p->track); | ||
| 1321 | p->track = NULL; | ||
| 1322 | return r; | ||
| 1323 | } | ||
| 1324 | p->idx += pkt.count + 2; | ||
| 1325 | switch (pkt.type) { | ||
| 1326 | case PACKET_TYPE0: | ||
| 1327 | r = evergreen_cs_parse_packet0(p, &pkt); | ||
| 1328 | break; | ||
| 1329 | case PACKET_TYPE2: | ||
| 1330 | break; | ||
| 1331 | case PACKET_TYPE3: | ||
| 1332 | r = evergreen_packet3_check(p, &pkt); | ||
| 1333 | break; | ||
| 1334 | default: | ||
| 1335 | DRM_ERROR("Unknown packet type %d !\n", pkt.type); | ||
| 1336 | kfree(p->track); | ||
| 1337 | p->track = NULL; | ||
| 1338 | return -EINVAL; | ||
| 1339 | } | ||
| 1340 | if (r) { | ||
| 1341 | kfree(p->track); | ||
| 1342 | p->track = NULL; | ||
| 1343 | return r; | ||
| 1344 | } | ||
| 1345 | } while (p->idx < p->chunks[p->chunk_ib_idx].length_dw); | ||
| 1346 | #if 0 | ||
| 1347 | for (r = 0; r < p->ib->length_dw; r++) { | ||
| 1348 | printk(KERN_INFO "%05d 0x%08X\n", r, p->ib->ptr[r]); | ||
| 1349 | mdelay(1); | ||
| 1350 | } | ||
| 1351 | #endif | ||
| 1352 | kfree(p->track); | ||
| 1353 | p->track = NULL; | ||
| 1354 | return 0; | ||
| 1355 | } | ||
| 1356 | |||
diff --git a/drivers/gpu/drm/radeon/evergreen_reg.h b/drivers/gpu/drm/radeon/evergreen_reg.h index af86af836f13..e028c1cd9d9b 100644 --- a/drivers/gpu/drm/radeon/evergreen_reg.h +++ b/drivers/gpu/drm/radeon/evergreen_reg.h | |||
| @@ -151,6 +151,9 @@ | |||
| 151 | #define EVERGREEN_DATA_FORMAT 0x6b00 | 151 | #define EVERGREEN_DATA_FORMAT 0x6b00 |
| 152 | # define EVERGREEN_INTERLEAVE_EN (1 << 0) | 152 | # define EVERGREEN_INTERLEAVE_EN (1 << 0) |
| 153 | #define EVERGREEN_DESKTOP_HEIGHT 0x6b04 | 153 | #define EVERGREEN_DESKTOP_HEIGHT 0x6b04 |
| 154 | #define EVERGREEN_VLINE_START_END 0x6b08 | ||
| 155 | #define EVERGREEN_VLINE_STATUS 0x6bb8 | ||
| 156 | # define EVERGREEN_VLINE_STAT (1 << 12) | ||
| 154 | 157 | ||
| 155 | #define EVERGREEN_VIEWPORT_START 0x6d70 | 158 | #define EVERGREEN_VIEWPORT_START 0x6d70 |
| 156 | #define EVERGREEN_VIEWPORT_SIZE 0x6d74 | 159 | #define EVERGREEN_VIEWPORT_SIZE 0x6d74 |
diff --git a/drivers/gpu/drm/radeon/evergreend.h b/drivers/gpu/drm/radeon/evergreend.h index 93e9e17ad54a..79683f6b4452 100644 --- a/drivers/gpu/drm/radeon/evergreend.h +++ b/drivers/gpu/drm/radeon/evergreend.h | |||
| @@ -218,6 +218,8 @@ | |||
| 218 | #define CLIP_VTX_REORDER_ENA (1 << 0) | 218 | #define CLIP_VTX_REORDER_ENA (1 << 0) |
| 219 | #define NUM_CLIP_SEQ(x) ((x) << 1) | 219 | #define NUM_CLIP_SEQ(x) ((x) << 1) |
| 220 | #define PA_SC_AA_CONFIG 0x28C04 | 220 | #define PA_SC_AA_CONFIG 0x28C04 |
| 221 | #define MSAA_NUM_SAMPLES_SHIFT 0 | ||
| 222 | #define MSAA_NUM_SAMPLES_MASK 0x3 | ||
| 221 | #define PA_SC_CLIPRECT_RULE 0x2820C | 223 | #define PA_SC_CLIPRECT_RULE 0x2820C |
| 222 | #define PA_SC_EDGERULE 0x28230 | 224 | #define PA_SC_EDGERULE 0x28230 |
| 223 | #define PA_SC_FIFO_SIZE 0x8BCC | 225 | #define PA_SC_FIFO_SIZE 0x8BCC |
| @@ -553,4 +555,466 @@ | |||
| 553 | # define DC_HPDx_RX_INT_TIMER(x) ((x) << 16) | 555 | # define DC_HPDx_RX_INT_TIMER(x) ((x) << 16) |
| 554 | # define DC_HPDx_EN (1 << 28) | 556 | # define DC_HPDx_EN (1 << 28) |
| 555 | 557 | ||
| 558 | /* | ||
| 559 | * PM4 | ||
| 560 | */ | ||
| 561 | #define PACKET_TYPE0 0 | ||
| 562 | #define PACKET_TYPE1 1 | ||
| 563 | #define PACKET_TYPE2 2 | ||
| 564 | #define PACKET_TYPE3 3 | ||
| 565 | |||
| 566 | #define CP_PACKET_GET_TYPE(h) (((h) >> 30) & 3) | ||
| 567 | #define CP_PACKET_GET_COUNT(h) (((h) >> 16) & 0x3FFF) | ||
| 568 | #define CP_PACKET0_GET_REG(h) (((h) & 0xFFFF) << 2) | ||
| 569 | #define CP_PACKET3_GET_OPCODE(h) (((h) >> 8) & 0xFF) | ||
| 570 | #define PACKET0(reg, n) ((PACKET_TYPE0 << 30) | \ | ||
| 571 | (((reg) >> 2) & 0xFFFF) | \ | ||
| 572 | ((n) & 0x3FFF) << 16) | ||
| 573 | #define CP_PACKET2 0x80000000 | ||
| 574 | #define PACKET2_PAD_SHIFT 0 | ||
| 575 | #define PACKET2_PAD_MASK (0x3fffffff << 0) | ||
| 576 | |||
| 577 | #define PACKET2(v) (CP_PACKET2 | REG_SET(PACKET2_PAD, (v))) | ||
| 578 | |||
| 579 | #define PACKET3(op, n) ((PACKET_TYPE3 << 30) | \ | ||
| 580 | (((op) & 0xFF) << 8) | \ | ||
| 581 | ((n) & 0x3FFF) << 16) | ||
| 582 | |||
| 583 | /* Packet 3 types */ | ||
| 584 | #define PACKET3_NOP 0x10 | ||
| 585 | #define PACKET3_SET_BASE 0x11 | ||
| 586 | #define PACKET3_CLEAR_STATE 0x12 | ||
| 587 | #define PACKET3_INDIRECT_BUFFER_SIZE 0x13 | ||
| 588 | #define PACKET3_DISPATCH_DIRECT 0x15 | ||
| 589 | #define PACKET3_DISPATCH_INDIRECT 0x16 | ||
| 590 | #define PACKET3_INDIRECT_BUFFER_END 0x17 | ||
| 591 | #define PACKET3_SET_PREDICATION 0x20 | ||
| 592 | #define PACKET3_REG_RMW 0x21 | ||
| 593 | #define PACKET3_COND_EXEC 0x22 | ||
| 594 | #define PACKET3_PRED_EXEC 0x23 | ||
| 595 | #define PACKET3_DRAW_INDIRECT 0x24 | ||
| 596 | #define PACKET3_DRAW_INDEX_INDIRECT 0x25 | ||
| 597 | #define PACKET3_INDEX_BASE 0x26 | ||
| 598 | #define PACKET3_DRAW_INDEX_2 0x27 | ||
| 599 | #define PACKET3_CONTEXT_CONTROL 0x28 | ||
| 600 | #define PACKET3_DRAW_INDEX_OFFSET 0x29 | ||
| 601 | #define PACKET3_INDEX_TYPE 0x2A | ||
| 602 | #define PACKET3_DRAW_INDEX 0x2B | ||
| 603 | #define PACKET3_DRAW_INDEX_AUTO 0x2D | ||
| 604 | #define PACKET3_DRAW_INDEX_IMMD 0x2E | ||
| 605 | #define PACKET3_NUM_INSTANCES 0x2F | ||
| 606 | #define PACKET3_DRAW_INDEX_MULTI_AUTO 0x30 | ||
| 607 | #define PACKET3_STRMOUT_BUFFER_UPDATE 0x34 | ||
| 608 | #define PACKET3_DRAW_INDEX_OFFSET_2 0x35 | ||
| 609 | #define PACKET3_DRAW_INDEX_MULTI_ELEMENT 0x36 | ||
| 610 | #define PACKET3_MEM_SEMAPHORE 0x39 | ||
| 611 | #define PACKET3_MPEG_INDEX 0x3A | ||
| 612 | #define PACKET3_WAIT_REG_MEM 0x3C | ||
| 613 | #define PACKET3_MEM_WRITE 0x3D | ||
| 614 | #define PACKET3_INDIRECT_BUFFER 0x32 | ||
| 615 | #define PACKET3_SURFACE_SYNC 0x43 | ||
| 616 | # define PACKET3_CB0_DEST_BASE_ENA (1 << 6) | ||
| 617 | # define PACKET3_CB1_DEST_BASE_ENA (1 << 7) | ||
| 618 | # define PACKET3_CB2_DEST_BASE_ENA (1 << 8) | ||
| 619 | # define PACKET3_CB3_DEST_BASE_ENA (1 << 9) | ||
| 620 | # define PACKET3_CB4_DEST_BASE_ENA (1 << 10) | ||
| 621 | # define PACKET3_CB5_DEST_BASE_ENA (1 << 11) | ||
| 622 | # define PACKET3_CB6_DEST_BASE_ENA (1 << 12) | ||
| 623 | # define PACKET3_CB7_DEST_BASE_ENA (1 << 13) | ||
| 624 | # define PACKET3_DB_DEST_BASE_ENA (1 << 14) | ||
| 625 | # define PACKET3_CB8_DEST_BASE_ENA (1 << 15) | ||
| 626 | # define PACKET3_CB9_DEST_BASE_ENA (1 << 16) | ||
| 627 | # define PACKET3_CB10_DEST_BASE_ENA (1 << 17) | ||
| 628 | # define PACKET3_CB11_DEST_BASE_ENA (1 << 17) | ||
| 629 | # define PACKET3_FULL_CACHE_ENA (1 << 20) | ||
| 630 | # define PACKET3_TC_ACTION_ENA (1 << 23) | ||
| 631 | # define PACKET3_VC_ACTION_ENA (1 << 24) | ||
| 632 | # define PACKET3_CB_ACTION_ENA (1 << 25) | ||
| 633 | # define PACKET3_DB_ACTION_ENA (1 << 26) | ||
| 634 | # define PACKET3_SH_ACTION_ENA (1 << 27) | ||
| 635 | # define PACKET3_SMX_ACTION_ENA (1 << 28) | ||
| 636 | #define PACKET3_ME_INITIALIZE 0x44 | ||
| 637 | #define PACKET3_ME_INITIALIZE_DEVICE_ID(x) ((x) << 16) | ||
| 638 | #define PACKET3_COND_WRITE 0x45 | ||
| 639 | #define PACKET3_EVENT_WRITE 0x46 | ||
| 640 | #define PACKET3_EVENT_WRITE_EOP 0x47 | ||
| 641 | #define PACKET3_EVENT_WRITE_EOS 0x48 | ||
| 642 | #define PACKET3_PREAMBLE_CNTL 0x4A | ||
| 643 | #define PACKET3_RB_OFFSET 0x4B | ||
| 644 | #define PACKET3_ALU_PS_CONST_BUFFER_COPY 0x4C | ||
| 645 | #define PACKET3_ALU_VS_CONST_BUFFER_COPY 0x4D | ||
| 646 | #define PACKET3_ALU_PS_CONST_UPDATE 0x4E | ||
| 647 | #define PACKET3_ALU_VS_CONST_UPDATE 0x4F | ||
| 648 | #define PACKET3_ONE_REG_WRITE 0x57 | ||
| 649 | #define PACKET3_SET_CONFIG_REG 0x68 | ||
| 650 | #define PACKET3_SET_CONFIG_REG_START 0x00008000 | ||
| 651 | #define PACKET3_SET_CONFIG_REG_END 0x0000ac00 | ||
| 652 | #define PACKET3_SET_CONTEXT_REG 0x69 | ||
| 653 | #define PACKET3_SET_CONTEXT_REG_START 0x00028000 | ||
| 654 | #define PACKET3_SET_CONTEXT_REG_END 0x00029000 | ||
| 655 | #define PACKET3_SET_ALU_CONST 0x6A | ||
| 656 | /* alu const buffers only; no reg file */ | ||
| 657 | #define PACKET3_SET_BOOL_CONST 0x6B | ||
| 658 | #define PACKET3_SET_BOOL_CONST_START 0x0003a500 | ||
| 659 | #define PACKET3_SET_BOOL_CONST_END 0x0003a518 | ||
| 660 | #define PACKET3_SET_LOOP_CONST 0x6C | ||
| 661 | #define PACKET3_SET_LOOP_CONST_START 0x0003a200 | ||
| 662 | #define PACKET3_SET_LOOP_CONST_END 0x0003a500 | ||
| 663 | #define PACKET3_SET_RESOURCE 0x6D | ||
| 664 | #define PACKET3_SET_RESOURCE_START 0x00030000 | ||
| 665 | #define PACKET3_SET_RESOURCE_END 0x00038000 | ||
| 666 | #define PACKET3_SET_SAMPLER 0x6E | ||
| 667 | #define PACKET3_SET_SAMPLER_START 0x0003c000 | ||
| 668 | #define PACKET3_SET_SAMPLER_END 0x0003c600 | ||
| 669 | #define PACKET3_SET_CTL_CONST 0x6F | ||
| 670 | #define PACKET3_SET_CTL_CONST_START 0x0003cff0 | ||
| 671 | #define PACKET3_SET_CTL_CONST_END 0x0003ff0c | ||
| 672 | #define PACKET3_SET_RESOURCE_OFFSET 0x70 | ||
| 673 | #define PACKET3_SET_ALU_CONST_VS 0x71 | ||
| 674 | #define PACKET3_SET_ALU_CONST_DI 0x72 | ||
| 675 | #define PACKET3_SET_CONTEXT_REG_INDIRECT 0x73 | ||
| 676 | #define PACKET3_SET_RESOURCE_INDIRECT 0x74 | ||
| 677 | #define PACKET3_SET_APPEND_CNT 0x75 | ||
| 678 | |||
| 679 | #define SQ_RESOURCE_CONSTANT_WORD7_0 0x3001c | ||
| 680 | #define S__SQ_CONSTANT_TYPE(x) (((x) & 3) << 30) | ||
| 681 | #define G__SQ_CONSTANT_TYPE(x) (((x) >> 30) & 3) | ||
| 682 | #define SQ_TEX_VTX_INVALID_TEXTURE 0x0 | ||
| 683 | #define SQ_TEX_VTX_INVALID_BUFFER 0x1 | ||
| 684 | #define SQ_TEX_VTX_VALID_TEXTURE 0x2 | ||
| 685 | #define SQ_TEX_VTX_VALID_BUFFER 0x3 | ||
| 686 | |||
| 687 | #define SQ_CONST_MEM_BASE 0x8df8 | ||
| 688 | |||
| 689 | #define SQ_ESGS_RING_SIZE 0x8c44 | ||
| 690 | #define SQ_GSVS_RING_SIZE 0x8c4c | ||
| 691 | #define SQ_ESTMP_RING_SIZE 0x8c54 | ||
| 692 | #define SQ_GSTMP_RING_SIZE 0x8c5c | ||
| 693 | #define SQ_VSTMP_RING_SIZE 0x8c64 | ||
| 694 | #define SQ_PSTMP_RING_SIZE 0x8c6c | ||
| 695 | #define SQ_LSTMP_RING_SIZE 0x8e14 | ||
| 696 | #define SQ_HSTMP_RING_SIZE 0x8e1c | ||
| 697 | #define VGT_TF_RING_SIZE 0x8988 | ||
| 698 | |||
| 699 | #define SQ_ESGS_RING_ITEMSIZE 0x28900 | ||
| 700 | #define SQ_GSVS_RING_ITEMSIZE 0x28904 | ||
| 701 | #define SQ_ESTMP_RING_ITEMSIZE 0x28908 | ||
| 702 | #define SQ_GSTMP_RING_ITEMSIZE 0x2890c | ||
| 703 | #define SQ_VSTMP_RING_ITEMSIZE 0x28910 | ||
| 704 | #define SQ_PSTMP_RING_ITEMSIZE 0x28914 | ||
| 705 | #define SQ_LSTMP_RING_ITEMSIZE 0x28830 | ||
| 706 | #define SQ_HSTMP_RING_ITEMSIZE 0x28834 | ||
| 707 | |||
| 708 | #define SQ_GS_VERT_ITEMSIZE 0x2891c | ||
| 709 | #define SQ_GS_VERT_ITEMSIZE_1 0x28920 | ||
| 710 | #define SQ_GS_VERT_ITEMSIZE_2 0x28924 | ||
| 711 | #define SQ_GS_VERT_ITEMSIZE_3 0x28928 | ||
| 712 | #define SQ_GSVS_RING_OFFSET_1 0x2892c | ||
| 713 | #define SQ_GSVS_RING_OFFSET_2 0x28930 | ||
| 714 | #define SQ_GSVS_RING_OFFSET_3 0x28934 | ||
| 715 | |||
| 716 | #define SQ_ALU_CONST_CACHE_PS_0 0x28940 | ||
| 717 | #define SQ_ALU_CONST_CACHE_PS_1 0x28944 | ||
| 718 | #define SQ_ALU_CONST_CACHE_PS_2 0x28948 | ||
| 719 | #define SQ_ALU_CONST_CACHE_PS_3 0x2894c | ||
| 720 | #define SQ_ALU_CONST_CACHE_PS_4 0x28950 | ||
| 721 | #define SQ_ALU_CONST_CACHE_PS_5 0x28954 | ||
| 722 | #define SQ_ALU_CONST_CACHE_PS_6 0x28958 | ||
| 723 | #define SQ_ALU_CONST_CACHE_PS_7 0x2895c | ||
| 724 | #define SQ_ALU_CONST_CACHE_PS_8 0x28960 | ||
| 725 | #define SQ_ALU_CONST_CACHE_PS_9 0x28964 | ||
| 726 | #define SQ_ALU_CONST_CACHE_PS_10 0x28968 | ||
| 727 | #define SQ_ALU_CONST_CACHE_PS_11 0x2896c | ||
| 728 | #define SQ_ALU_CONST_CACHE_PS_12 0x28970 | ||
| 729 | #define SQ_ALU_CONST_CACHE_PS_13 0x28974 | ||
| 730 | #define SQ_ALU_CONST_CACHE_PS_14 0x28978 | ||
| 731 | #define SQ_ALU_CONST_CACHE_PS_15 0x2897c | ||
| 732 | #define SQ_ALU_CONST_CACHE_VS_0 0x28980 | ||
| 733 | #define SQ_ALU_CONST_CACHE_VS_1 0x28984 | ||
| 734 | #define SQ_ALU_CONST_CACHE_VS_2 0x28988 | ||
| 735 | #define SQ_ALU_CONST_CACHE_VS_3 0x2898c | ||
| 736 | #define SQ_ALU_CONST_CACHE_VS_4 0x28990 | ||
| 737 | #define SQ_ALU_CONST_CACHE_VS_5 0x28994 | ||
| 738 | #define SQ_ALU_CONST_CACHE_VS_6 0x28998 | ||
| 739 | #define SQ_ALU_CONST_CACHE_VS_7 0x2899c | ||
| 740 | #define SQ_ALU_CONST_CACHE_VS_8 0x289a0 | ||
| 741 | #define SQ_ALU_CONST_CACHE_VS_9 0x289a4 | ||
| 742 | #define SQ_ALU_CONST_CACHE_VS_10 0x289a8 | ||
| 743 | #define SQ_ALU_CONST_CACHE_VS_11 0x289ac | ||
| 744 | #define SQ_ALU_CONST_CACHE_VS_12 0x289b0 | ||
| 745 | #define SQ_ALU_CONST_CACHE_VS_13 0x289b4 | ||
| 746 | #define SQ_ALU_CONST_CACHE_VS_14 0x289b8 | ||
| 747 | #define SQ_ALU_CONST_CACHE_VS_15 0x289bc | ||
| 748 | #define SQ_ALU_CONST_CACHE_GS_0 0x289c0 | ||
| 749 | #define SQ_ALU_CONST_CACHE_GS_1 0x289c4 | ||
| 750 | #define SQ_ALU_CONST_CACHE_GS_2 0x289c8 | ||
| 751 | #define SQ_ALU_CONST_CACHE_GS_3 0x289cc | ||
| 752 | #define SQ_ALU_CONST_CACHE_GS_4 0x289d0 | ||
| 753 | #define SQ_ALU_CONST_CACHE_GS_5 0x289d4 | ||
| 754 | #define SQ_ALU_CONST_CACHE_GS_6 0x289d8 | ||
| 755 | #define SQ_ALU_CONST_CACHE_GS_7 0x289dc | ||
| 756 | #define SQ_ALU_CONST_CACHE_GS_8 0x289e0 | ||
| 757 | #define SQ_ALU_CONST_CACHE_GS_9 0x289e4 | ||
| 758 | #define SQ_ALU_CONST_CACHE_GS_10 0x289e8 | ||
| 759 | #define SQ_ALU_CONST_CACHE_GS_11 0x289ec | ||
| 760 | #define SQ_ALU_CONST_CACHE_GS_12 0x289f0 | ||
| 761 | #define SQ_ALU_CONST_CACHE_GS_13 0x289f4 | ||
| 762 | #define SQ_ALU_CONST_CACHE_GS_14 0x289f8 | ||
| 763 | #define SQ_ALU_CONST_CACHE_GS_15 0x289fc | ||
| 764 | #define SQ_ALU_CONST_CACHE_HS_0 0x28f00 | ||
| 765 | #define SQ_ALU_CONST_CACHE_HS_1 0x28f04 | ||
| 766 | #define SQ_ALU_CONST_CACHE_HS_2 0x28f08 | ||
| 767 | #define SQ_ALU_CONST_CACHE_HS_3 0x28f0c | ||
| 768 | #define SQ_ALU_CONST_CACHE_HS_4 0x28f10 | ||
| 769 | #define SQ_ALU_CONST_CACHE_HS_5 0x28f14 | ||
| 770 | #define SQ_ALU_CONST_CACHE_HS_6 0x28f18 | ||
| 771 | #define SQ_ALU_CONST_CACHE_HS_7 0x28f1c | ||
| 772 | #define SQ_ALU_CONST_CACHE_HS_8 0x28f20 | ||
| 773 | #define SQ_ALU_CONST_CACHE_HS_9 0x28f24 | ||
| 774 | #define SQ_ALU_CONST_CACHE_HS_10 0x28f28 | ||
| 775 | #define SQ_ALU_CONST_CACHE_HS_11 0x28f2c | ||
| 776 | #define SQ_ALU_CONST_CACHE_HS_12 0x28f30 | ||
| 777 | #define SQ_ALU_CONST_CACHE_HS_13 0x28f34 | ||
| 778 | #define SQ_ALU_CONST_CACHE_HS_14 0x28f38 | ||
| 779 | #define SQ_ALU_CONST_CACHE_HS_15 0x28f3c | ||
| 780 | #define SQ_ALU_CONST_CACHE_LS_0 0x28f40 | ||
| 781 | #define SQ_ALU_CONST_CACHE_LS_1 0x28f44 | ||
| 782 | #define SQ_ALU_CONST_CACHE_LS_2 0x28f48 | ||
| 783 | #define SQ_ALU_CONST_CACHE_LS_3 0x28f4c | ||
| 784 | #define SQ_ALU_CONST_CACHE_LS_4 0x28f50 | ||
| 785 | #define SQ_ALU_CONST_CACHE_LS_5 0x28f54 | ||
| 786 | #define SQ_ALU_CONST_CACHE_LS_6 0x28f58 | ||
| 787 | #define SQ_ALU_CONST_CACHE_LS_7 0x28f5c | ||
| 788 | #define SQ_ALU_CONST_CACHE_LS_8 0x28f60 | ||
| 789 | #define SQ_ALU_CONST_CACHE_LS_9 0x28f64 | ||
| 790 | #define SQ_ALU_CONST_CACHE_LS_10 0x28f68 | ||
| 791 | #define SQ_ALU_CONST_CACHE_LS_11 0x28f6c | ||
| 792 | #define SQ_ALU_CONST_CACHE_LS_12 0x28f70 | ||
| 793 | #define SQ_ALU_CONST_CACHE_LS_13 0x28f74 | ||
| 794 | #define SQ_ALU_CONST_CACHE_LS_14 0x28f78 | ||
| 795 | #define SQ_ALU_CONST_CACHE_LS_15 0x28f7c | ||
| 796 | |||
| 797 | #define DB_DEPTH_CONTROL 0x28800 | ||
| 798 | #define DB_DEPTH_VIEW 0x28008 | ||
| 799 | #define DB_HTILE_DATA_BASE 0x28014 | ||
| 800 | #define DB_Z_INFO 0x28040 | ||
| 801 | # define Z_ARRAY_MODE(x) ((x) << 4) | ||
| 802 | #define DB_STENCIL_INFO 0x28044 | ||
| 803 | #define DB_Z_READ_BASE 0x28048 | ||
| 804 | #define DB_STENCIL_READ_BASE 0x2804c | ||
| 805 | #define DB_Z_WRITE_BASE 0x28050 | ||
| 806 | #define DB_STENCIL_WRITE_BASE 0x28054 | ||
| 807 | #define DB_DEPTH_SIZE 0x28058 | ||
| 808 | |||
| 809 | #define SQ_PGM_START_PS 0x28840 | ||
| 810 | #define SQ_PGM_START_VS 0x2885c | ||
| 811 | #define SQ_PGM_START_GS 0x28874 | ||
| 812 | #define SQ_PGM_START_ES 0x2888c | ||
| 813 | #define SQ_PGM_START_FS 0x288a4 | ||
| 814 | #define SQ_PGM_START_HS 0x288b8 | ||
| 815 | #define SQ_PGM_START_LS 0x288d0 | ||
| 816 | |||
| 817 | #define VGT_STRMOUT_CONFIG 0x28b94 | ||
| 818 | #define VGT_STRMOUT_BUFFER_CONFIG 0x28b98 | ||
| 819 | |||
| 820 | #define CB_TARGET_MASK 0x28238 | ||
| 821 | #define CB_SHADER_MASK 0x2823c | ||
| 822 | |||
| 823 | #define GDS_ADDR_BASE 0x28720 | ||
| 824 | |||
| 825 | #define CB_IMMED0_BASE 0x28b9c | ||
| 826 | #define CB_IMMED1_BASE 0x28ba0 | ||
| 827 | #define CB_IMMED2_BASE 0x28ba4 | ||
| 828 | #define CB_IMMED3_BASE 0x28ba8 | ||
| 829 | #define CB_IMMED4_BASE 0x28bac | ||
| 830 | #define CB_IMMED5_BASE 0x28bb0 | ||
| 831 | #define CB_IMMED6_BASE 0x28bb4 | ||
| 832 | #define CB_IMMED7_BASE 0x28bb8 | ||
| 833 | #define CB_IMMED8_BASE 0x28bbc | ||
| 834 | #define CB_IMMED9_BASE 0x28bc0 | ||
| 835 | #define CB_IMMED10_BASE 0x28bc4 | ||
| 836 | #define CB_IMMED11_BASE 0x28bc8 | ||
| 837 | |||
| 838 | /* all 12 CB blocks have these regs */ | ||
| 839 | #define CB_COLOR0_BASE 0x28c60 | ||
| 840 | #define CB_COLOR0_PITCH 0x28c64 | ||
| 841 | #define CB_COLOR0_SLICE 0x28c68 | ||
| 842 | #define CB_COLOR0_VIEW 0x28c6c | ||
| 843 | #define CB_COLOR0_INFO 0x28c70 | ||
| 844 | # define CB_ARRAY_MODE(x) ((x) << 8) | ||
| 845 | # define ARRAY_LINEAR_GENERAL 0 | ||
| 846 | # define ARRAY_LINEAR_ALIGNED 1 | ||
| 847 | # define ARRAY_1D_TILED_THIN1 2 | ||
| 848 | # define ARRAY_2D_TILED_THIN1 4 | ||
| 849 | #define CB_COLOR0_ATTRIB 0x28c74 | ||
| 850 | #define CB_COLOR0_DIM 0x28c78 | ||
| 851 | /* only CB0-7 blocks have these regs */ | ||
| 852 | #define CB_COLOR0_CMASK 0x28c7c | ||
| 853 | #define CB_COLOR0_CMASK_SLICE 0x28c80 | ||
| 854 | #define CB_COLOR0_FMASK 0x28c84 | ||
| 855 | #define CB_COLOR0_FMASK_SLICE 0x28c88 | ||
| 856 | #define CB_COLOR0_CLEAR_WORD0 0x28c8c | ||
| 857 | #define CB_COLOR0_CLEAR_WORD1 0x28c90 | ||
| 858 | #define CB_COLOR0_CLEAR_WORD2 0x28c94 | ||
| 859 | #define CB_COLOR0_CLEAR_WORD3 0x28c98 | ||
| 860 | |||
| 861 | #define CB_COLOR1_BASE 0x28c9c | ||
| 862 | #define CB_COLOR2_BASE 0x28cd8 | ||
| 863 | #define CB_COLOR3_BASE 0x28d14 | ||
| 864 | #define CB_COLOR4_BASE 0x28d50 | ||
| 865 | #define CB_COLOR5_BASE 0x28d8c | ||
| 866 | #define CB_COLOR6_BASE 0x28dc8 | ||
| 867 | #define CB_COLOR7_BASE 0x28e04 | ||
| 868 | #define CB_COLOR8_BASE 0x28e40 | ||
| 869 | #define CB_COLOR9_BASE 0x28e5c | ||
| 870 | #define CB_COLOR10_BASE 0x28e78 | ||
| 871 | #define CB_COLOR11_BASE 0x28e94 | ||
| 872 | |||
| 873 | #define CB_COLOR1_PITCH 0x28ca0 | ||
| 874 | #define CB_COLOR2_PITCH 0x28cdc | ||
| 875 | #define CB_COLOR3_PITCH 0x28d18 | ||
| 876 | #define CB_COLOR4_PITCH 0x28d54 | ||
| 877 | #define CB_COLOR5_PITCH 0x28d90 | ||
| 878 | #define CB_COLOR6_PITCH 0x28dcc | ||
| 879 | #define CB_COLOR7_PITCH 0x28e08 | ||
| 880 | #define CB_COLOR8_PITCH 0x28e44 | ||
| 881 | #define CB_COLOR9_PITCH 0x28e60 | ||
| 882 | #define CB_COLOR10_PITCH 0x28e7c | ||
| 883 | #define CB_COLOR11_PITCH 0x28e98 | ||
| 884 | |||
| 885 | #define CB_COLOR1_SLICE 0x28ca4 | ||
| 886 | #define CB_COLOR2_SLICE 0x28ce0 | ||
| 887 | #define CB_COLOR3_SLICE 0x28d1c | ||
| 888 | #define CB_COLOR4_SLICE 0x28d58 | ||
| 889 | #define CB_COLOR5_SLICE 0x28d94 | ||
| 890 | #define CB_COLOR6_SLICE 0x28dd0 | ||
| 891 | #define CB_COLOR7_SLICE 0x28e0c | ||
| 892 | #define CB_COLOR8_SLICE 0x28e48 | ||
| 893 | #define CB_COLOR9_SLICE 0x28e64 | ||
| 894 | #define CB_COLOR10_SLICE 0x28e80 | ||
| 895 | #define CB_COLOR11_SLICE 0x28e9c | ||
| 896 | |||
| 897 | #define CB_COLOR1_VIEW 0x28ca8 | ||
| 898 | #define CB_COLOR2_VIEW 0x28ce4 | ||
| 899 | #define CB_COLOR3_VIEW 0x28d20 | ||
| 900 | #define CB_COLOR4_VIEW 0x28d5c | ||
| 901 | #define CB_COLOR5_VIEW 0x28d98 | ||
| 902 | #define CB_COLOR6_VIEW 0x28dd4 | ||
| 903 | #define CB_COLOR7_VIEW 0x28e10 | ||
| 904 | #define CB_COLOR8_VIEW 0x28e4c | ||
| 905 | #define CB_COLOR9_VIEW 0x28e68 | ||
| 906 | #define CB_COLOR10_VIEW 0x28e84 | ||
| 907 | #define CB_COLOR11_VIEW 0x28ea0 | ||
| 908 | |||
| 909 | #define CB_COLOR1_INFO 0x28cac | ||
| 910 | #define CB_COLOR2_INFO 0x28ce8 | ||
| 911 | #define CB_COLOR3_INFO 0x28d24 | ||
| 912 | #define CB_COLOR4_INFO 0x28d60 | ||
| 913 | #define CB_COLOR5_INFO 0x28d9c | ||
| 914 | #define CB_COLOR6_INFO 0x28dd8 | ||
| 915 | #define CB_COLOR7_INFO 0x28e14 | ||
| 916 | #define CB_COLOR8_INFO 0x28e50 | ||
| 917 | #define CB_COLOR9_INFO 0x28e6c | ||
| 918 | #define CB_COLOR10_INFO 0x28e88 | ||
| 919 | #define CB_COLOR11_INFO 0x28ea4 | ||
| 920 | |||
| 921 | #define CB_COLOR1_ATTRIB 0x28cb0 | ||
| 922 | #define CB_COLOR2_ATTRIB 0x28cec | ||
| 923 | #define CB_COLOR3_ATTRIB 0x28d28 | ||
| 924 | #define CB_COLOR4_ATTRIB 0x28d64 | ||
| 925 | #define CB_COLOR5_ATTRIB 0x28da0 | ||
| 926 | #define CB_COLOR6_ATTRIB 0x28ddc | ||
| 927 | #define CB_COLOR7_ATTRIB 0x28e18 | ||
| 928 | #define CB_COLOR8_ATTRIB 0x28e54 | ||
| 929 | #define CB_COLOR9_ATTRIB 0x28e70 | ||
| 930 | #define CB_COLOR10_ATTRIB 0x28e8c | ||
| 931 | #define CB_COLOR11_ATTRIB 0x28ea8 | ||
| 932 | |||
| 933 | #define CB_COLOR1_DIM 0x28cb4 | ||
| 934 | #define CB_COLOR2_DIM 0x28cf0 | ||
| 935 | #define CB_COLOR3_DIM 0x28d2c | ||
| 936 | #define CB_COLOR4_DIM 0x28d68 | ||
| 937 | #define CB_COLOR5_DIM 0x28da4 | ||
| 938 | #define CB_COLOR6_DIM 0x28de0 | ||
| 939 | #define CB_COLOR7_DIM 0x28e1c | ||
| 940 | #define CB_COLOR8_DIM 0x28e58 | ||
| 941 | #define CB_COLOR9_DIM 0x28e74 | ||
| 942 | #define CB_COLOR10_DIM 0x28e90 | ||
| 943 | #define CB_COLOR11_DIM 0x28eac | ||
| 944 | |||
| 945 | #define CB_COLOR1_CMASK 0x28cb8 | ||
| 946 | #define CB_COLOR2_CMASK 0x28cf4 | ||
| 947 | #define CB_COLOR3_CMASK 0x28d30 | ||
| 948 | #define CB_COLOR4_CMASK 0x28d6c | ||
| 949 | #define CB_COLOR5_CMASK 0x28da8 | ||
| 950 | #define CB_COLOR6_CMASK 0x28de4 | ||
| 951 | #define CB_COLOR7_CMASK 0x28e20 | ||
| 952 | |||
| 953 | #define CB_COLOR1_CMASK_SLICE 0x28cbc | ||
| 954 | #define CB_COLOR2_CMASK_SLICE 0x28cf8 | ||
| 955 | #define CB_COLOR3_CMASK_SLICE 0x28d34 | ||
| 956 | #define CB_COLOR4_CMASK_SLICE 0x28d70 | ||
| 957 | #define CB_COLOR5_CMASK_SLICE 0x28dac | ||
| 958 | #define CB_COLOR6_CMASK_SLICE 0x28de8 | ||
| 959 | #define CB_COLOR7_CMASK_SLICE 0x28e24 | ||
| 960 | |||
| 961 | #define CB_COLOR1_FMASK 0x28cc0 | ||
| 962 | #define CB_COLOR2_FMASK 0x28cfc | ||
| 963 | #define CB_COLOR3_FMASK 0x28d38 | ||
| 964 | #define CB_COLOR4_FMASK 0x28d74 | ||
| 965 | #define CB_COLOR5_FMASK 0x28db0 | ||
| 966 | #define CB_COLOR6_FMASK 0x28dec | ||
| 967 | #define CB_COLOR7_FMASK 0x28e28 | ||
| 968 | |||
| 969 | #define CB_COLOR1_FMASK_SLICE 0x28cc4 | ||
| 970 | #define CB_COLOR2_FMASK_SLICE 0x28d00 | ||
| 971 | #define CB_COLOR3_FMASK_SLICE 0x28d3c | ||
| 972 | #define CB_COLOR4_FMASK_SLICE 0x28d78 | ||
| 973 | #define CB_COLOR5_FMASK_SLICE 0x28db4 | ||
| 974 | #define CB_COLOR6_FMASK_SLICE 0x28df0 | ||
| 975 | #define CB_COLOR7_FMASK_SLICE 0x28e2c | ||
| 976 | |||
| 977 | #define CB_COLOR1_CLEAR_WORD0 0x28cc8 | ||
| 978 | #define CB_COLOR2_CLEAR_WORD0 0x28d04 | ||
| 979 | #define CB_COLOR3_CLEAR_WORD0 0x28d40 | ||
| 980 | #define CB_COLOR4_CLEAR_WORD0 0x28d7c | ||
| 981 | #define CB_COLOR5_CLEAR_WORD0 0x28db8 | ||
| 982 | #define CB_COLOR6_CLEAR_WORD0 0x28df4 | ||
| 983 | #define CB_COLOR7_CLEAR_WORD0 0x28e30 | ||
| 984 | |||
| 985 | #define CB_COLOR1_CLEAR_WORD1 0x28ccc | ||
| 986 | #define CB_COLOR2_CLEAR_WORD1 0x28d08 | ||
| 987 | #define CB_COLOR3_CLEAR_WORD1 0x28d44 | ||
| 988 | #define CB_COLOR4_CLEAR_WORD1 0x28d80 | ||
| 989 | #define CB_COLOR5_CLEAR_WORD1 0x28dbc | ||
| 990 | #define CB_COLOR6_CLEAR_WORD1 0x28df8 | ||
| 991 | #define CB_COLOR7_CLEAR_WORD1 0x28e34 | ||
| 992 | |||
| 993 | #define CB_COLOR1_CLEAR_WORD2 0x28cd0 | ||
| 994 | #define CB_COLOR2_CLEAR_WORD2 0x28d0c | ||
| 995 | #define CB_COLOR3_CLEAR_WORD2 0x28d48 | ||
| 996 | #define CB_COLOR4_CLEAR_WORD2 0x28d84 | ||
| 997 | #define CB_COLOR5_CLEAR_WORD2 0x28dc0 | ||
| 998 | #define CB_COLOR6_CLEAR_WORD2 0x28dfc | ||
| 999 | #define CB_COLOR7_CLEAR_WORD2 0x28e38 | ||
| 1000 | |||
| 1001 | #define CB_COLOR1_CLEAR_WORD3 0x28cd4 | ||
| 1002 | #define CB_COLOR2_CLEAR_WORD3 0x28d10 | ||
| 1003 | #define CB_COLOR3_CLEAR_WORD3 0x28d4c | ||
| 1004 | #define CB_COLOR4_CLEAR_WORD3 0x28d88 | ||
| 1005 | #define CB_COLOR5_CLEAR_WORD3 0x28dc4 | ||
| 1006 | #define CB_COLOR6_CLEAR_WORD3 0x28e00 | ||
| 1007 | #define CB_COLOR7_CLEAR_WORD3 0x28e3c | ||
| 1008 | |||
| 1009 | #define SQ_TEX_RESOURCE_WORD0_0 0x30000 | ||
| 1010 | #define SQ_TEX_RESOURCE_WORD1_0 0x30004 | ||
| 1011 | # define TEX_ARRAY_MODE(x) ((x) << 28) | ||
| 1012 | #define SQ_TEX_RESOURCE_WORD2_0 0x30008 | ||
| 1013 | #define SQ_TEX_RESOURCE_WORD3_0 0x3000C | ||
| 1014 | #define SQ_TEX_RESOURCE_WORD4_0 0x30010 | ||
| 1015 | #define SQ_TEX_RESOURCE_WORD5_0 0x30014 | ||
| 1016 | #define SQ_TEX_RESOURCE_WORD6_0 0x30018 | ||
| 1017 | #define SQ_TEX_RESOURCE_WORD7_0 0x3001c | ||
| 1018 | |||
| 1019 | |||
| 556 | #endif | 1020 | #endif |
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c index 44e96a2ae25a..e14f59748e65 100644 --- a/drivers/gpu/drm/radeon/r600.c +++ b/drivers/gpu/drm/radeon/r600.c | |||
| @@ -475,6 +475,12 @@ void r600_pm_init_profile(struct radeon_device *rdev) | |||
| 475 | 475 | ||
| 476 | void r600_pm_misc(struct radeon_device *rdev) | 476 | void r600_pm_misc(struct radeon_device *rdev) |
| 477 | { | 477 | { |
| 478 | int requested_index = rdev->pm.requested_power_state_index; | ||
| 479 | struct radeon_power_state *ps = &rdev->pm.power_state[requested_index]; | ||
| 480 | struct radeon_voltage *voltage = &ps->clock_info[0].voltage; | ||
| 481 | |||
| 482 | if ((voltage->type == VOLTAGE_SW) && voltage->voltage) | ||
| 483 | radeon_atom_set_voltage(rdev, voltage->voltage); | ||
| 478 | 484 | ||
| 479 | } | 485 | } |
| 480 | 486 | ||
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h index 669feb689bfc..5f96fe871b3f 100644 --- a/drivers/gpu/drm/radeon/radeon.h +++ b/drivers/gpu/drm/radeon/radeon.h | |||
| @@ -176,6 +176,7 @@ void radeon_pm_suspend(struct radeon_device *rdev); | |||
| 176 | void radeon_pm_resume(struct radeon_device *rdev); | 176 | void radeon_pm_resume(struct radeon_device *rdev); |
| 177 | void radeon_combios_get_power_modes(struct radeon_device *rdev); | 177 | void radeon_combios_get_power_modes(struct radeon_device *rdev); |
| 178 | void radeon_atombios_get_power_modes(struct radeon_device *rdev); | 178 | void radeon_atombios_get_power_modes(struct radeon_device *rdev); |
| 179 | void radeon_atom_set_voltage(struct radeon_device *rdev, u16 level); | ||
| 179 | 180 | ||
| 180 | /* | 181 | /* |
| 181 | * Fences. | 182 | * Fences. |
diff --git a/drivers/gpu/drm/radeon/radeon_asic.c b/drivers/gpu/drm/radeon/radeon_asic.c index e57df08d4aeb..87f7e2cc52d4 100644 --- a/drivers/gpu/drm/radeon/radeon_asic.c +++ b/drivers/gpu/drm/radeon/radeon_asic.c | |||
| @@ -724,8 +724,8 @@ static struct radeon_asic evergreen_asic = { | |||
| 724 | .irq_set = &evergreen_irq_set, | 724 | .irq_set = &evergreen_irq_set, |
| 725 | .irq_process = &evergreen_irq_process, | 725 | .irq_process = &evergreen_irq_process, |
| 726 | .get_vblank_counter = &evergreen_get_vblank_counter, | 726 | .get_vblank_counter = &evergreen_get_vblank_counter, |
| 727 | .fence_ring_emit = NULL, | 727 | .fence_ring_emit = &r600_fence_ring_emit, |
| 728 | .cs_parse = NULL, | 728 | .cs_parse = &evergreen_cs_parse, |
| 729 | .copy_blit = NULL, | 729 | .copy_blit = NULL, |
| 730 | .copy_dma = NULL, | 730 | .copy_dma = NULL, |
| 731 | .copy = NULL, | 731 | .copy = NULL, |
diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h index 5c40a3dfaca2..c0bbaa64157a 100644 --- a/drivers/gpu/drm/radeon/radeon_asic.h +++ b/drivers/gpu/drm/radeon/radeon_asic.h | |||
| @@ -314,6 +314,7 @@ void evergreen_hpd_set_polarity(struct radeon_device *rdev, | |||
| 314 | u32 evergreen_get_vblank_counter(struct radeon_device *rdev, int crtc); | 314 | u32 evergreen_get_vblank_counter(struct radeon_device *rdev, int crtc); |
| 315 | int evergreen_irq_set(struct radeon_device *rdev); | 315 | int evergreen_irq_set(struct radeon_device *rdev); |
| 316 | int evergreen_irq_process(struct radeon_device *rdev); | 316 | int evergreen_irq_process(struct radeon_device *rdev); |
| 317 | extern int evergreen_cs_parse(struct radeon_cs_parser *p); | ||
| 317 | extern void evergreen_pm_misc(struct radeon_device *rdev); | 318 | extern void evergreen_pm_misc(struct radeon_device *rdev); |
| 318 | extern void evergreen_pm_prepare(struct radeon_device *rdev); | 319 | extern void evergreen_pm_prepare(struct radeon_device *rdev); |
| 319 | extern void evergreen_pm_finish(struct radeon_device *rdev); | 320 | extern void evergreen_pm_finish(struct radeon_device *rdev); |
diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c index 24ea683f7cf5..4305cd55d0ac 100644 --- a/drivers/gpu/drm/radeon/radeon_atombios.c +++ b/drivers/gpu/drm/radeon/radeon_atombios.c | |||
| @@ -1538,7 +1538,8 @@ void radeon_atombios_get_power_modes(struct radeon_device *rdev) | |||
| 1538 | rdev->pm.power_state[state_index].pcie_lanes = | 1538 | rdev->pm.power_state[state_index].pcie_lanes = |
| 1539 | power_info->info.asPowerPlayInfo[i].ucNumPciELanes; | 1539 | power_info->info.asPowerPlayInfo[i].ucNumPciELanes; |
| 1540 | misc = le32_to_cpu(power_info->info.asPowerPlayInfo[i].ulMiscInfo); | 1540 | misc = le32_to_cpu(power_info->info.asPowerPlayInfo[i].ulMiscInfo); |
| 1541 | if (misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_SUPPORT) { | 1541 | if ((misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_SUPPORT) || |
| 1542 | (misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_ACTIVE_HIGH)) { | ||
| 1542 | rdev->pm.power_state[state_index].clock_info[0].voltage.type = | 1543 | rdev->pm.power_state[state_index].clock_info[0].voltage.type = |
| 1543 | VOLTAGE_GPIO; | 1544 | VOLTAGE_GPIO; |
| 1544 | rdev->pm.power_state[state_index].clock_info[0].voltage.gpio = | 1545 | rdev->pm.power_state[state_index].clock_info[0].voltage.gpio = |
| @@ -1605,7 +1606,8 @@ void radeon_atombios_get_power_modes(struct radeon_device *rdev) | |||
| 1605 | power_info->info_2.asPowerPlayInfo[i].ucNumPciELanes; | 1606 | power_info->info_2.asPowerPlayInfo[i].ucNumPciELanes; |
| 1606 | misc = le32_to_cpu(power_info->info_2.asPowerPlayInfo[i].ulMiscInfo); | 1607 | misc = le32_to_cpu(power_info->info_2.asPowerPlayInfo[i].ulMiscInfo); |
| 1607 | misc2 = le32_to_cpu(power_info->info_2.asPowerPlayInfo[i].ulMiscInfo2); | 1608 | misc2 = le32_to_cpu(power_info->info_2.asPowerPlayInfo[i].ulMiscInfo2); |
| 1608 | if (misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_SUPPORT) { | 1609 | if ((misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_SUPPORT) || |
| 1610 | (misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_ACTIVE_HIGH)) { | ||
| 1609 | rdev->pm.power_state[state_index].clock_info[0].voltage.type = | 1611 | rdev->pm.power_state[state_index].clock_info[0].voltage.type = |
| 1610 | VOLTAGE_GPIO; | 1612 | VOLTAGE_GPIO; |
| 1611 | rdev->pm.power_state[state_index].clock_info[0].voltage.gpio = | 1613 | rdev->pm.power_state[state_index].clock_info[0].voltage.gpio = |
| @@ -1679,7 +1681,8 @@ void radeon_atombios_get_power_modes(struct radeon_device *rdev) | |||
| 1679 | power_info->info_3.asPowerPlayInfo[i].ucNumPciELanes; | 1681 | power_info->info_3.asPowerPlayInfo[i].ucNumPciELanes; |
| 1680 | misc = le32_to_cpu(power_info->info_3.asPowerPlayInfo[i].ulMiscInfo); | 1682 | misc = le32_to_cpu(power_info->info_3.asPowerPlayInfo[i].ulMiscInfo); |
| 1681 | misc2 = le32_to_cpu(power_info->info_3.asPowerPlayInfo[i].ulMiscInfo2); | 1683 | misc2 = le32_to_cpu(power_info->info_3.asPowerPlayInfo[i].ulMiscInfo2); |
| 1682 | if (misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_SUPPORT) { | 1684 | if ((misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_SUPPORT) || |
| 1685 | (misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_ACTIVE_HIGH)) { | ||
| 1683 | rdev->pm.power_state[state_index].clock_info[0].voltage.type = | 1686 | rdev->pm.power_state[state_index].clock_info[0].voltage.type = |
| 1684 | VOLTAGE_GPIO; | 1687 | VOLTAGE_GPIO; |
| 1685 | rdev->pm.power_state[state_index].clock_info[0].voltage.gpio = | 1688 | rdev->pm.power_state[state_index].clock_info[0].voltage.gpio = |
| @@ -1755,9 +1758,22 @@ void radeon_atombios_get_power_modes(struct radeon_device *rdev) | |||
| 1755 | rdev->pm.power_state[state_index].misc2 = 0; | 1758 | rdev->pm.power_state[state_index].misc2 = 0; |
| 1756 | } | 1759 | } |
| 1757 | } else { | 1760 | } else { |
| 1761 | int fw_index = GetIndexIntoMasterTable(DATA, FirmwareInfo); | ||
| 1762 | uint8_t fw_frev, fw_crev; | ||
| 1763 | uint16_t fw_data_offset, vddc = 0; | ||
| 1764 | union firmware_info *firmware_info; | ||
| 1765 | ATOM_PPLIB_THERMALCONTROLLER *controller = &power_info->info_4.sThermalController; | ||
| 1766 | |||
| 1767 | if (atom_parse_data_header(mode_info->atom_context, fw_index, NULL, | ||
| 1768 | &fw_frev, &fw_crev, &fw_data_offset)) { | ||
| 1769 | firmware_info = | ||
| 1770 | (union firmware_info *)(mode_info->atom_context->bios + | ||
| 1771 | fw_data_offset); | ||
| 1772 | vddc = firmware_info->info_14.usBootUpVDDCVoltage; | ||
| 1773 | } | ||
| 1774 | |||
| 1758 | /* add the i2c bus for thermal/fan chip */ | 1775 | /* add the i2c bus for thermal/fan chip */ |
| 1759 | /* no support for internal controller yet */ | 1776 | /* no support for internal controller yet */ |
| 1760 | ATOM_PPLIB_THERMALCONTROLLER *controller = &power_info->info_4.sThermalController; | ||
| 1761 | if (controller->ucType > 0) { | 1777 | if (controller->ucType > 0) { |
| 1762 | if ((controller->ucType == ATOM_PP_THERMALCONTROLLER_RV6xx) || | 1778 | if ((controller->ucType == ATOM_PP_THERMALCONTROLLER_RV6xx) || |
| 1763 | (controller->ucType == ATOM_PP_THERMALCONTROLLER_RV770) || | 1779 | (controller->ucType == ATOM_PP_THERMALCONTROLLER_RV770) || |
| @@ -1904,6 +1920,16 @@ void radeon_atombios_get_power_modes(struct radeon_device *rdev) | |||
| 1904 | rdev->pm.default_power_state_index = state_index; | 1920 | rdev->pm.default_power_state_index = state_index; |
| 1905 | rdev->pm.power_state[state_index].default_clock_mode = | 1921 | rdev->pm.power_state[state_index].default_clock_mode = |
| 1906 | &rdev->pm.power_state[state_index].clock_info[mode_index - 1]; | 1922 | &rdev->pm.power_state[state_index].clock_info[mode_index - 1]; |
| 1923 | /* patch the table values with the default slck/mclk from firmware info */ | ||
| 1924 | for (j = 0; j < mode_index; j++) { | ||
| 1925 | rdev->pm.power_state[state_index].clock_info[j].mclk = | ||
| 1926 | rdev->clock.default_mclk; | ||
| 1927 | rdev->pm.power_state[state_index].clock_info[j].sclk = | ||
| 1928 | rdev->clock.default_sclk; | ||
| 1929 | if (vddc) | ||
| 1930 | rdev->pm.power_state[state_index].clock_info[j].voltage.voltage = | ||
| 1931 | vddc; | ||
| 1932 | } | ||
| 1907 | } | 1933 | } |
| 1908 | state_index++; | 1934 | state_index++; |
| 1909 | } | 1935 | } |
| @@ -1998,6 +2024,42 @@ void radeon_atom_set_memory_clock(struct radeon_device *rdev, | |||
| 1998 | atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); | 2024 | atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); |
| 1999 | } | 2025 | } |
| 2000 | 2026 | ||
| 2027 | union set_voltage { | ||
| 2028 | struct _SET_VOLTAGE_PS_ALLOCATION alloc; | ||
| 2029 | struct _SET_VOLTAGE_PARAMETERS v1; | ||
| 2030 | struct _SET_VOLTAGE_PARAMETERS_V2 v2; | ||
| 2031 | }; | ||
| 2032 | |||
| 2033 | void radeon_atom_set_voltage(struct radeon_device *rdev, u16 level) | ||
| 2034 | { | ||
| 2035 | union set_voltage args; | ||
| 2036 | int index = GetIndexIntoMasterTable(COMMAND, SetVoltage); | ||
| 2037 | u8 frev, crev, volt_index = level; | ||
| 2038 | |||
| 2039 | if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev)) | ||
| 2040 | return; | ||
| 2041 | |||
| 2042 | switch (crev) { | ||
| 2043 | case 1: | ||
| 2044 | args.v1.ucVoltageType = SET_VOLTAGE_TYPE_ASIC_VDDC; | ||
| 2045 | args.v1.ucVoltageMode = SET_ASIC_VOLTAGE_MODE_ALL_SOURCE; | ||
| 2046 | args.v1.ucVoltageIndex = volt_index; | ||
| 2047 | break; | ||
| 2048 | case 2: | ||
| 2049 | args.v2.ucVoltageType = SET_VOLTAGE_TYPE_ASIC_VDDC; | ||
| 2050 | args.v2.ucVoltageMode = SET_ASIC_VOLTAGE_MODE_SET_VOLTAGE; | ||
| 2051 | args.v2.usVoltageLevel = cpu_to_le16(level); | ||
| 2052 | break; | ||
| 2053 | default: | ||
| 2054 | DRM_ERROR("Unknown table version %d, %d\n", frev, crev); | ||
| 2055 | return; | ||
| 2056 | } | ||
| 2057 | |||
| 2058 | atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); | ||
| 2059 | } | ||
| 2060 | |||
| 2061 | |||
| 2062 | |||
| 2001 | void radeon_atom_initialize_bios_scratch_regs(struct drm_device *dev) | 2063 | void radeon_atom_initialize_bios_scratch_regs(struct drm_device *dev) |
| 2002 | { | 2064 | { |
| 2003 | struct radeon_device *rdev = dev->dev_private; | 2065 | struct radeon_device *rdev = dev->dev_private; |
diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c index 7b5e10d3e9c9..102c744eaf5a 100644 --- a/drivers/gpu/drm/radeon/radeon_combios.c +++ b/drivers/gpu/drm/radeon/radeon_combios.c | |||
| @@ -2454,7 +2454,12 @@ default_mode: | |||
| 2454 | rdev->pm.power_state[state_index].clock_info[0].mclk = rdev->clock.default_mclk; | 2454 | rdev->pm.power_state[state_index].clock_info[0].mclk = rdev->clock.default_mclk; |
| 2455 | rdev->pm.power_state[state_index].clock_info[0].sclk = rdev->clock.default_sclk; | 2455 | rdev->pm.power_state[state_index].clock_info[0].sclk = rdev->clock.default_sclk; |
| 2456 | rdev->pm.power_state[state_index].default_clock_mode = &rdev->pm.power_state[state_index].clock_info[0]; | 2456 | rdev->pm.power_state[state_index].default_clock_mode = &rdev->pm.power_state[state_index].clock_info[0]; |
| 2457 | rdev->pm.power_state[state_index].clock_info[0].voltage.type = VOLTAGE_NONE; | 2457 | if ((state_index > 0) && |
| 2458 | (rdev->pm.power_state[0].clock_info[0].voltage.type = VOLTAGE_GPIO)) | ||
| 2459 | rdev->pm.power_state[state_index].clock_info[0].voltage = | ||
| 2460 | rdev->pm.power_state[0].clock_info[0].voltage; | ||
| 2461 | else | ||
| 2462 | rdev->pm.power_state[state_index].clock_info[0].voltage.type = VOLTAGE_NONE; | ||
| 2458 | rdev->pm.power_state[state_index].pcie_lanes = 16; | 2463 | rdev->pm.power_state[state_index].pcie_lanes = 16; |
| 2459 | rdev->pm.power_state[state_index].flags = 0; | 2464 | rdev->pm.power_state[state_index].flags = 0; |
| 2460 | rdev->pm.default_power_state_index = state_index; | 2465 | rdev->pm.default_power_state_index = state_index; |
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c index fdc3fdf78acb..f10faed21567 100644 --- a/drivers/gpu/drm/radeon/radeon_device.c +++ b/drivers/gpu/drm/radeon/radeon_device.c | |||
| @@ -546,8 +546,10 @@ static void radeon_switcheroo_set_state(struct pci_dev *pdev, enum vga_switchero | |||
| 546 | /* don't suspend or resume card normally */ | 546 | /* don't suspend or resume card normally */ |
| 547 | rdev->powered_down = false; | 547 | rdev->powered_down = false; |
| 548 | radeon_resume_kms(dev); | 548 | radeon_resume_kms(dev); |
| 549 | drm_kms_helper_poll_enable(dev); | ||
| 549 | } else { | 550 | } else { |
| 550 | printk(KERN_INFO "radeon: switched off\n"); | 551 | printk(KERN_INFO "radeon: switched off\n"); |
| 552 | drm_kms_helper_poll_disable(dev); | ||
| 551 | radeon_suspend_kms(dev, pmm); | 553 | radeon_suspend_kms(dev, pmm); |
| 552 | /* don't suspend or resume card normally */ | 554 | /* don't suspend or resume card normally */ |
| 553 | rdev->powered_down = true; | 555 | rdev->powered_down = true; |
| @@ -711,6 +713,7 @@ int radeon_suspend_kms(struct drm_device *dev, pm_message_t state) | |||
| 711 | { | 713 | { |
| 712 | struct radeon_device *rdev; | 714 | struct radeon_device *rdev; |
| 713 | struct drm_crtc *crtc; | 715 | struct drm_crtc *crtc; |
| 716 | struct drm_connector *connector; | ||
| 714 | int r; | 717 | int r; |
| 715 | 718 | ||
| 716 | if (dev == NULL || dev->dev_private == NULL) { | 719 | if (dev == NULL || dev->dev_private == NULL) { |
| @@ -723,6 +726,12 @@ int radeon_suspend_kms(struct drm_device *dev, pm_message_t state) | |||
| 723 | 726 | ||
| 724 | if (rdev->powered_down) | 727 | if (rdev->powered_down) |
| 725 | return 0; | 728 | return 0; |
| 729 | |||
| 730 | /* turn off display hw */ | ||
| 731 | list_for_each_entry(connector, &dev->mode_config.connector_list, head) { | ||
| 732 | drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF); | ||
| 733 | } | ||
| 734 | |||
| 726 | /* unpin the front buffers */ | 735 | /* unpin the front buffers */ |
| 727 | list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { | 736 | list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { |
| 728 | struct radeon_framebuffer *rfb = to_radeon_framebuffer(crtc->fb); | 737 | struct radeon_framebuffer *rfb = to_radeon_framebuffer(crtc->fb); |
diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c index a8d162c6f829..02281269a881 100644 --- a/drivers/gpu/drm/radeon/radeon_pm.c +++ b/drivers/gpu/drm/radeon/radeon_pm.c | |||
| @@ -151,6 +151,7 @@ static void radeon_sync_with_vblank(struct radeon_device *rdev) | |||
| 151 | static void radeon_set_power_state(struct radeon_device *rdev) | 151 | static void radeon_set_power_state(struct radeon_device *rdev) |
| 152 | { | 152 | { |
| 153 | u32 sclk, mclk; | 153 | u32 sclk, mclk; |
| 154 | bool misc_after = false; | ||
| 154 | 155 | ||
| 155 | if ((rdev->pm.requested_clock_mode_index == rdev->pm.current_clock_mode_index) && | 156 | if ((rdev->pm.requested_clock_mode_index == rdev->pm.current_clock_mode_index) && |
| 156 | (rdev->pm.requested_power_state_index == rdev->pm.current_power_state_index)) | 157 | (rdev->pm.requested_power_state_index == rdev->pm.current_power_state_index)) |
| @@ -167,55 +168,47 @@ static void radeon_set_power_state(struct radeon_device *rdev) | |||
| 167 | if (mclk > rdev->clock.default_mclk) | 168 | if (mclk > rdev->clock.default_mclk) |
| 168 | mclk = rdev->clock.default_mclk; | 169 | mclk = rdev->clock.default_mclk; |
| 169 | 170 | ||
| 170 | /* voltage, pcie lanes, etc.*/ | 171 | /* upvolt before raising clocks, downvolt after lowering clocks */ |
| 171 | radeon_pm_misc(rdev); | 172 | if (sclk < rdev->pm.current_sclk) |
| 173 | misc_after = true; | ||
| 172 | 174 | ||
| 173 | if (rdev->pm.pm_method == PM_METHOD_DYNPM) { | 175 | radeon_sync_with_vblank(rdev); |
| 174 | radeon_sync_with_vblank(rdev); | ||
| 175 | 176 | ||
| 177 | if (rdev->pm.pm_method == PM_METHOD_DYNPM) { | ||
| 176 | if (!radeon_pm_in_vbl(rdev)) | 178 | if (!radeon_pm_in_vbl(rdev)) |
| 177 | return; | 179 | return; |
| 180 | } | ||
| 178 | 181 | ||
| 179 | radeon_pm_prepare(rdev); | 182 | radeon_pm_prepare(rdev); |
| 180 | /* set engine clock */ | ||
| 181 | if (sclk != rdev->pm.current_sclk) { | ||
| 182 | radeon_pm_debug_check_in_vbl(rdev, false); | ||
| 183 | radeon_set_engine_clock(rdev, sclk); | ||
| 184 | radeon_pm_debug_check_in_vbl(rdev, true); | ||
| 185 | rdev->pm.current_sclk = sclk; | ||
| 186 | DRM_DEBUG("Setting: e: %d\n", sclk); | ||
| 187 | } | ||
| 188 | 183 | ||
| 189 | /* set memory clock */ | 184 | if (!misc_after) |
| 190 | if (rdev->asic->set_memory_clock && (mclk != rdev->pm.current_mclk)) { | 185 | /* voltage, pcie lanes, etc.*/ |
| 191 | radeon_pm_debug_check_in_vbl(rdev, false); | 186 | radeon_pm_misc(rdev); |
| 192 | radeon_set_memory_clock(rdev, mclk); | 187 | |
| 193 | radeon_pm_debug_check_in_vbl(rdev, true); | 188 | /* set engine clock */ |
| 194 | rdev->pm.current_mclk = mclk; | 189 | if (sclk != rdev->pm.current_sclk) { |
| 195 | DRM_DEBUG("Setting: m: %d\n", mclk); | 190 | radeon_pm_debug_check_in_vbl(rdev, false); |
| 196 | } | 191 | radeon_set_engine_clock(rdev, sclk); |
| 197 | radeon_pm_finish(rdev); | 192 | radeon_pm_debug_check_in_vbl(rdev, true); |
| 198 | } else { | 193 | rdev->pm.current_sclk = sclk; |
| 199 | /* set engine clock */ | 194 | DRM_DEBUG("Setting: e: %d\n", sclk); |
| 200 | if (sclk != rdev->pm.current_sclk) { | 195 | } |
| 201 | radeon_sync_with_vblank(rdev); | 196 | |
| 202 | radeon_pm_prepare(rdev); | 197 | /* set memory clock */ |
| 203 | radeon_set_engine_clock(rdev, sclk); | 198 | if (rdev->asic->set_memory_clock && (mclk != rdev->pm.current_mclk)) { |
| 204 | radeon_pm_finish(rdev); | 199 | radeon_pm_debug_check_in_vbl(rdev, false); |
| 205 | rdev->pm.current_sclk = sclk; | 200 | radeon_set_memory_clock(rdev, mclk); |
| 206 | DRM_DEBUG("Setting: e: %d\n", sclk); | 201 | radeon_pm_debug_check_in_vbl(rdev, true); |
| 207 | } | 202 | rdev->pm.current_mclk = mclk; |
| 208 | /* set memory clock */ | 203 | DRM_DEBUG("Setting: m: %d\n", mclk); |
| 209 | if (rdev->asic->set_memory_clock && (mclk != rdev->pm.current_mclk)) { | ||
| 210 | radeon_sync_with_vblank(rdev); | ||
| 211 | radeon_pm_prepare(rdev); | ||
| 212 | radeon_set_memory_clock(rdev, mclk); | ||
| 213 | radeon_pm_finish(rdev); | ||
| 214 | rdev->pm.current_mclk = mclk; | ||
| 215 | DRM_DEBUG("Setting: m: %d\n", mclk); | ||
| 216 | } | ||
| 217 | } | 204 | } |
| 218 | 205 | ||
| 206 | if (misc_after) | ||
| 207 | /* voltage, pcie lanes, etc.*/ | ||
| 208 | radeon_pm_misc(rdev); | ||
| 209 | |||
| 210 | radeon_pm_finish(rdev); | ||
| 211 | |||
| 219 | rdev->pm.current_power_state_index = rdev->pm.requested_power_state_index; | 212 | rdev->pm.current_power_state_index = rdev->pm.requested_power_state_index; |
| 220 | rdev->pm.current_clock_mode_index = rdev->pm.requested_clock_mode_index; | 213 | rdev->pm.current_clock_mode_index = rdev->pm.requested_clock_mode_index; |
| 221 | } else | 214 | } else |
diff --git a/drivers/gpu/drm/radeon/reg_srcs/evergreen b/drivers/gpu/drm/radeon/reg_srcs/evergreen new file mode 100644 index 000000000000..b5c757f68d3c --- /dev/null +++ b/drivers/gpu/drm/radeon/reg_srcs/evergreen | |||
| @@ -0,0 +1,611 @@ | |||
| 1 | evergreen 0x9400 | ||
| 2 | 0x00008040 WAIT_UNTIL | ||
| 3 | 0x00008044 WAIT_UNTIL_POLL_CNTL | ||
| 4 | 0x00008048 WAIT_UNTIL_POLL_MASK | ||
| 5 | 0x0000804c WAIT_UNTIL_POLL_REFDATA | ||
| 6 | 0x000088B0 VGT_VTX_VECT_EJECT_REG | ||
| 7 | 0x000088C4 VGT_CACHE_INVALIDATION | ||
| 8 | 0x000088D4 VGT_GS_VERTEX_REUSE | ||
| 9 | 0x00008958 VGT_PRIMITIVE_TYPE | ||
| 10 | 0x0000895C VGT_INDEX_TYPE | ||
| 11 | 0x00008970 VGT_NUM_INDICES | ||
| 12 | 0x00008974 VGT_NUM_INSTANCES | ||
| 13 | 0x00008990 VGT_COMPUTE_DIM_X | ||
| 14 | 0x00008994 VGT_COMPUTE_DIM_Y | ||
| 15 | 0x00008998 VGT_COMPUTE_DIM_Z | ||
| 16 | 0x0000899C VGT_COMPUTE_START_X | ||
| 17 | 0x000089A0 VGT_COMPUTE_START_Y | ||
| 18 | 0x000089A4 VGT_COMPUTE_START_Z | ||
| 19 | 0x000089AC VGT_COMPUTE_THREAD_GOURP_SIZE | ||
| 20 | 0x00008A14 PA_CL_ENHANCE | ||
| 21 | 0x00008A60 PA_SC_LINE_STIPPLE_VALUE | ||
| 22 | 0x00008B10 PA_SC_LINE_STIPPLE_STATE | ||
| 23 | 0x00008BF0 PA_SC_ENHANCE | ||
| 24 | 0x00008D8C SQ_DYN_GPR_CNTL_PS_FLUSH_REQ | ||
| 25 | 0x00008C00 SQ_CONFIG | ||
| 26 | 0x00008C04 SQ_GPR_RESOURCE_MGMT_1 | ||
| 27 | 0x00008C08 SQ_GPR_RESOURCE_MGMT_2 | ||
| 28 | 0x00008C0C SQ_GPR_RESOURCE_MGMT_3 | ||
| 29 | 0x00008C10 SQ_GLOBAL_GPR_RESOURCE_MGMT_1 | ||
| 30 | 0x00008C14 SQ_GLOBAL_GPR_RESOURCE_MGMT_2 | ||
| 31 | 0x00008C18 SQ_THREAD_RESOURCE_MGMT | ||
| 32 | 0x00008C1C SQ_THREAD_RESOURCE_MGMT_2 | ||
| 33 | 0x00008C20 SQ_STACK_RESOURCE_MGMT_1 | ||
| 34 | 0x00008C24 SQ_STACK_RESOURCE_MGMT_2 | ||
| 35 | 0x00008C28 SQ_STACK_RESOURCE_MGMT_3 | ||
| 36 | 0x00008DF8 SQ_CONST_MEM_BASE | ||
| 37 | 0x00008E48 SQ_EX_ALLOC_TABLE_SLOTS | ||
| 38 | 0x00009100 SPI_CONFIG_CNTL | ||
| 39 | 0x0000913C SPI_CONFIG_CNTL_1 | ||
| 40 | 0x00009700 VC_CNTL | ||
| 41 | 0x00009714 VC_ENHANCE | ||
| 42 | 0x00009830 DB_DEBUG | ||
| 43 | 0x00009834 DB_DEBUG2 | ||
| 44 | 0x00009838 DB_DEBUG3 | ||
| 45 | 0x0000983C DB_DEBUG4 | ||
| 46 | 0x00009854 DB_WATERMARKS | ||
| 47 | 0x0000A400 TD_PS_BORDER_COLOR_INDEX | ||
| 48 | 0x0000A404 TD_PS_BORDER_COLOR_RED | ||
| 49 | 0x0000A408 TD_PS_BORDER_COLOR_GREEN | ||
| 50 | 0x0000A40C TD_PS_BORDER_COLOR_BLUE | ||
| 51 | 0x0000A410 TD_PS_BORDER_COLOR_ALPHA | ||
| 52 | 0x0000A414 TD_VS_BORDER_COLOR_INDEX | ||
| 53 | 0x0000A418 TD_VS_BORDER_COLOR_RED | ||
| 54 | 0x0000A41C TD_VS_BORDER_COLOR_GREEN | ||
| 55 | 0x0000A420 TD_VS_BORDER_COLOR_BLUE | ||
| 56 | 0x0000A424 TD_VS_BORDER_COLOR_ALPHA | ||
| 57 | 0x0000A428 TD_GS_BORDER_COLOR_INDEX | ||
| 58 | 0x0000A42C TD_GS_BORDER_COLOR_RED | ||
| 59 | 0x0000A430 TD_GS_BORDER_COLOR_GREEN | ||
| 60 | 0x0000A434 TD_GS_BORDER_COLOR_BLUE | ||
| 61 | 0x0000A438 TD_GS_BORDER_COLOR_ALPHA | ||
| 62 | 0x0000A43C TD_HS_BORDER_COLOR_INDEX | ||
| 63 | 0x0000A440 TD_HS_BORDER_COLOR_RED | ||
| 64 | 0x0000A444 TD_HS_BORDER_COLOR_GREEN | ||
| 65 | 0x0000A448 TD_HS_BORDER_COLOR_BLUE | ||
| 66 | 0x0000A44C TD_HS_BORDER_COLOR_ALPHA | ||
| 67 | 0x0000A450 TD_LS_BORDER_COLOR_INDEX | ||
| 68 | 0x0000A454 TD_LS_BORDER_COLOR_RED | ||
| 69 | 0x0000A458 TD_LS_BORDER_COLOR_GREEN | ||
| 70 | 0x0000A45C TD_LS_BORDER_COLOR_BLUE | ||
| 71 | 0x0000A460 TD_LS_BORDER_COLOR_ALPHA | ||
| 72 | 0x0000A464 TD_CS_BORDER_COLOR_INDEX | ||
| 73 | 0x0000A468 TD_CS_BORDER_COLOR_RED | ||
| 74 | 0x0000A46C TD_CS_BORDER_COLOR_GREEN | ||
| 75 | 0x0000A470 TD_CS_BORDER_COLOR_BLUE | ||
| 76 | 0x0000A474 TD_CS_BORDER_COLOR_ALPHA | ||
| 77 | 0x00028000 DB_RENDER_CONTROL | ||
| 78 | 0x00028004 DB_COUNT_CONTROL | ||
| 79 | 0x0002800C DB_RENDER_OVERRIDE | ||
| 80 | 0x00028010 DB_RENDER_OVERRIDE2 | ||
| 81 | 0x00028028 DB_STENCIL_CLEAR | ||
| 82 | 0x0002802C DB_DEPTH_CLEAR | ||
| 83 | 0x00028034 PA_SC_SCREEN_SCISSOR_BR | ||
| 84 | 0x00028030 PA_SC_SCREEN_SCISSOR_TL | ||
| 85 | 0x0002805C DB_DEPTH_SLICE | ||
| 86 | 0x00028140 SQ_ALU_CONST_BUFFER_SIZE_PS_0 | ||
| 87 | 0x00028144 SQ_ALU_CONST_BUFFER_SIZE_PS_1 | ||
| 88 | 0x00028148 SQ_ALU_CONST_BUFFER_SIZE_PS_2 | ||
| 89 | 0x0002814C SQ_ALU_CONST_BUFFER_SIZE_PS_3 | ||
| 90 | 0x00028150 SQ_ALU_CONST_BUFFER_SIZE_PS_4 | ||
| 91 | 0x00028154 SQ_ALU_CONST_BUFFER_SIZE_PS_5 | ||
| 92 | 0x00028158 SQ_ALU_CONST_BUFFER_SIZE_PS_6 | ||
| 93 | 0x0002815C SQ_ALU_CONST_BUFFER_SIZE_PS_7 | ||
| 94 | 0x00028160 SQ_ALU_CONST_BUFFER_SIZE_PS_8 | ||
| 95 | 0x00028164 SQ_ALU_CONST_BUFFER_SIZE_PS_9 | ||
| 96 | 0x00028168 SQ_ALU_CONST_BUFFER_SIZE_PS_10 | ||
| 97 | 0x0002816C SQ_ALU_CONST_BUFFER_SIZE_PS_11 | ||
| 98 | 0x00028170 SQ_ALU_CONST_BUFFER_SIZE_PS_12 | ||
| 99 | 0x00028174 SQ_ALU_CONST_BUFFER_SIZE_PS_13 | ||
| 100 | 0x00028178 SQ_ALU_CONST_BUFFER_SIZE_PS_14 | ||
| 101 | 0x0002817C SQ_ALU_CONST_BUFFER_SIZE_PS_15 | ||
| 102 | 0x00028180 SQ_ALU_CONST_BUFFER_SIZE_VS_0 | ||
| 103 | 0x00028184 SQ_ALU_CONST_BUFFER_SIZE_VS_1 | ||
| 104 | 0x00028188 SQ_ALU_CONST_BUFFER_SIZE_VS_2 | ||
| 105 | 0x0002818C SQ_ALU_CONST_BUFFER_SIZE_VS_3 | ||
| 106 | 0x00028190 SQ_ALU_CONST_BUFFER_SIZE_VS_4 | ||
| 107 | 0x00028194 SQ_ALU_CONST_BUFFER_SIZE_VS_5 | ||
| 108 | 0x00028198 SQ_ALU_CONST_BUFFER_SIZE_VS_6 | ||
| 109 | 0x0002819C SQ_ALU_CONST_BUFFER_SIZE_VS_7 | ||
| 110 | 0x000281A0 SQ_ALU_CONST_BUFFER_SIZE_VS_8 | ||
| 111 | 0x000281A4 SQ_ALU_CONST_BUFFER_SIZE_VS_9 | ||
| 112 | 0x000281A8 SQ_ALU_CONST_BUFFER_SIZE_VS_10 | ||
| 113 | 0x000281AC SQ_ALU_CONST_BUFFER_SIZE_VS_11 | ||
| 114 | 0x000281B0 SQ_ALU_CONST_BUFFER_SIZE_VS_12 | ||
| 115 | 0x000281B4 SQ_ALU_CONST_BUFFER_SIZE_VS_13 | ||
| 116 | 0x000281B8 SQ_ALU_CONST_BUFFER_SIZE_VS_14 | ||
| 117 | 0x000281BC SQ_ALU_CONST_BUFFER_SIZE_VS_15 | ||
| 118 | 0x000281C0 SQ_ALU_CONST_BUFFER_SIZE_GS_0 | ||
| 119 | 0x000281C4 SQ_ALU_CONST_BUFFER_SIZE_GS_1 | ||
| 120 | 0x000281C8 SQ_ALU_CONST_BUFFER_SIZE_GS_2 | ||
| 121 | 0x000281CC SQ_ALU_CONST_BUFFER_SIZE_GS_3 | ||
| 122 | 0x000281D0 SQ_ALU_CONST_BUFFER_SIZE_GS_4 | ||
| 123 | 0x000281D4 SQ_ALU_CONST_BUFFER_SIZE_GS_5 | ||
| 124 | 0x000281D8 SQ_ALU_CONST_BUFFER_SIZE_GS_6 | ||
| 125 | 0x000281DC SQ_ALU_CONST_BUFFER_SIZE_GS_7 | ||
| 126 | 0x000281E0 SQ_ALU_CONST_BUFFER_SIZE_GS_8 | ||
| 127 | 0x000281E4 SQ_ALU_CONST_BUFFER_SIZE_GS_9 | ||
| 128 | 0x000281E8 SQ_ALU_CONST_BUFFER_SIZE_GS_10 | ||
| 129 | 0x000281EC SQ_ALU_CONST_BUFFER_SIZE_GS_11 | ||
| 130 | 0x000281F0 SQ_ALU_CONST_BUFFER_SIZE_GS_12 | ||
| 131 | 0x000281F4 SQ_ALU_CONST_BUFFER_SIZE_GS_13 | ||
| 132 | 0x000281F8 SQ_ALU_CONST_BUFFER_SIZE_GS_14 | ||
| 133 | 0x000281FC SQ_ALU_CONST_BUFFER_SIZE_GS_15 | ||
| 134 | 0x00028200 PA_SC_WINDOW_OFFSET | ||
| 135 | 0x00028204 PA_SC_WINDOW_SCISSOR_TL | ||
| 136 | 0x00028208 PA_SC_WINDOW_SCISSOR_BR | ||
| 137 | 0x0002820C PA_SC_CLIPRECT_RULE | ||
| 138 | 0x00028210 PA_SC_CLIPRECT_0_TL | ||
| 139 | 0x00028214 PA_SC_CLIPRECT_0_BR | ||
| 140 | 0x00028218 PA_SC_CLIPRECT_1_TL | ||
| 141 | 0x0002821C PA_SC_CLIPRECT_1_BR | ||
| 142 | 0x00028220 PA_SC_CLIPRECT_2_TL | ||
| 143 | 0x00028224 PA_SC_CLIPRECT_2_BR | ||
| 144 | 0x00028228 PA_SC_CLIPRECT_3_TL | ||
| 145 | 0x0002822C PA_SC_CLIPRECT_3_BR | ||
| 146 | 0x00028230 PA_SC_EDGERULE | ||
| 147 | 0x00028234 PA_SU_HARDWARE_SCREEN_OFFSET | ||
| 148 | 0x00028240 PA_SC_GENERIC_SCISSOR_TL | ||
| 149 | 0x00028244 PA_SC_GENERIC_SCISSOR_BR | ||
| 150 | 0x00028250 PA_SC_VPORT_SCISSOR_0_TL | ||
| 151 | 0x00028254 PA_SC_VPORT_SCISSOR_0_BR | ||
| 152 | 0x00028258 PA_SC_VPORT_SCISSOR_1_TL | ||
| 153 | 0x0002825C PA_SC_VPORT_SCISSOR_1_BR | ||
| 154 | 0x00028260 PA_SC_VPORT_SCISSOR_2_TL | ||
| 155 | 0x00028264 PA_SC_VPORT_SCISSOR_2_BR | ||
| 156 | 0x00028268 PA_SC_VPORT_SCISSOR_3_TL | ||
| 157 | 0x0002826C PA_SC_VPORT_SCISSOR_3_BR | ||
| 158 | 0x00028270 PA_SC_VPORT_SCISSOR_4_TL | ||
| 159 | 0x00028274 PA_SC_VPORT_SCISSOR_4_BR | ||
| 160 | 0x00028278 PA_SC_VPORT_SCISSOR_5_TL | ||
| 161 | 0x0002827C PA_SC_VPORT_SCISSOR_5_BR | ||
| 162 | 0x00028280 PA_SC_VPORT_SCISSOR_6_TL | ||
| 163 | 0x00028284 PA_SC_VPORT_SCISSOR_6_BR | ||
| 164 | 0x00028288 PA_SC_VPORT_SCISSOR_7_TL | ||
| 165 | 0x0002828C PA_SC_VPORT_SCISSOR_7_BR | ||
| 166 | 0x00028290 PA_SC_VPORT_SCISSOR_8_TL | ||
| 167 | 0x00028294 PA_SC_VPORT_SCISSOR_8_BR | ||
| 168 | 0x00028298 PA_SC_VPORT_SCISSOR_9_TL | ||
| 169 | 0x0002829C PA_SC_VPORT_SCISSOR_9_BR | ||
| 170 | 0x000282A0 PA_SC_VPORT_SCISSOR_10_TL | ||
| 171 | 0x000282A4 PA_SC_VPORT_SCISSOR_10_BR | ||
| 172 | 0x000282A8 PA_SC_VPORT_SCISSOR_11_TL | ||
| 173 | 0x000282AC PA_SC_VPORT_SCISSOR_11_BR | ||
| 174 | 0x000282B0 PA_SC_VPORT_SCISSOR_12_TL | ||
| 175 | 0x000282B4 PA_SC_VPORT_SCISSOR_12_BR | ||
| 176 | 0x000282B8 PA_SC_VPORT_SCISSOR_13_TL | ||
| 177 | 0x000282BC PA_SC_VPORT_SCISSOR_13_BR | ||
| 178 | 0x000282C0 PA_SC_VPORT_SCISSOR_14_TL | ||
| 179 | 0x000282C4 PA_SC_VPORT_SCISSOR_14_BR | ||
| 180 | 0x000282C8 PA_SC_VPORT_SCISSOR_15_TL | ||
| 181 | 0x000282CC PA_SC_VPORT_SCISSOR_15_BR | ||
| 182 | 0x000282D0 PA_SC_VPORT_ZMIN_0 | ||
| 183 | 0x000282D4 PA_SC_VPORT_ZMAX_0 | ||
| 184 | 0x000282D8 PA_SC_VPORT_ZMIN_1 | ||
| 185 | 0x000282DC PA_SC_VPORT_ZMAX_1 | ||
| 186 | 0x000282E0 PA_SC_VPORT_ZMIN_2 | ||
| 187 | 0x000282E4 PA_SC_VPORT_ZMAX_2 | ||
| 188 | 0x000282E8 PA_SC_VPORT_ZMIN_3 | ||
| 189 | 0x000282EC PA_SC_VPORT_ZMAX_3 | ||
| 190 | 0x000282F0 PA_SC_VPORT_ZMIN_4 | ||
| 191 | 0x000282F4 PA_SC_VPORT_ZMAX_4 | ||
| 192 | 0x000282F8 PA_SC_VPORT_ZMIN_5 | ||
| 193 | 0x000282FC PA_SC_VPORT_ZMAX_5 | ||
| 194 | 0x00028300 PA_SC_VPORT_ZMIN_6 | ||
| 195 | 0x00028304 PA_SC_VPORT_ZMAX_6 | ||
| 196 | 0x00028308 PA_SC_VPORT_ZMIN_7 | ||
| 197 | 0x0002830C PA_SC_VPORT_ZMAX_7 | ||
| 198 | 0x00028310 PA_SC_VPORT_ZMIN_8 | ||
| 199 | 0x00028314 PA_SC_VPORT_ZMAX_8 | ||
| 200 | 0x00028318 PA_SC_VPORT_ZMIN_9 | ||
| 201 | 0x0002831C PA_SC_VPORT_ZMAX_9 | ||
| 202 | 0x00028320 PA_SC_VPORT_ZMIN_10 | ||
| 203 | 0x00028324 PA_SC_VPORT_ZMAX_10 | ||
| 204 | 0x00028328 PA_SC_VPORT_ZMIN_11 | ||
| 205 | 0x0002832C PA_SC_VPORT_ZMAX_11 | ||
| 206 | 0x00028330 PA_SC_VPORT_ZMIN_12 | ||
| 207 | 0x00028334 PA_SC_VPORT_ZMAX_12 | ||
| 208 | 0x00028338 PA_SC_VPORT_ZMIN_13 | ||
| 209 | 0x0002833C PA_SC_VPORT_ZMAX_13 | ||
| 210 | 0x00028340 PA_SC_VPORT_ZMIN_14 | ||
| 211 | 0x00028344 PA_SC_VPORT_ZMAX_14 | ||
| 212 | 0x00028348 PA_SC_VPORT_ZMIN_15 | ||
| 213 | 0x0002834C PA_SC_VPORT_ZMAX_15 | ||
| 214 | 0x00028350 SX_MISC | ||
| 215 | 0x00028380 SQ_VTX_SEMANTIC_0 | ||
| 216 | 0x00028384 SQ_VTX_SEMANTIC_1 | ||
| 217 | 0x00028388 SQ_VTX_SEMANTIC_2 | ||
| 218 | 0x0002838C SQ_VTX_SEMANTIC_3 | ||
| 219 | 0x00028390 SQ_VTX_SEMANTIC_4 | ||
| 220 | 0x00028394 SQ_VTX_SEMANTIC_5 | ||
| 221 | 0x00028398 SQ_VTX_SEMANTIC_6 | ||
| 222 | 0x0002839C SQ_VTX_SEMANTIC_7 | ||
| 223 | 0x000283A0 SQ_VTX_SEMANTIC_8 | ||
| 224 | 0x000283A4 SQ_VTX_SEMANTIC_9 | ||
| 225 | 0x000283A8 SQ_VTX_SEMANTIC_10 | ||
| 226 | 0x000283AC SQ_VTX_SEMANTIC_11 | ||
| 227 | 0x000283B0 SQ_VTX_SEMANTIC_12 | ||
| 228 | 0x000283B4 SQ_VTX_SEMANTIC_13 | ||
| 229 | 0x000283B8 SQ_VTX_SEMANTIC_14 | ||
| 230 | 0x000283BC SQ_VTX_SEMANTIC_15 | ||
| 231 | 0x000283C0 SQ_VTX_SEMANTIC_16 | ||
| 232 | 0x000283C4 SQ_VTX_SEMANTIC_17 | ||
| 233 | 0x000283C8 SQ_VTX_SEMANTIC_18 | ||
| 234 | 0x000283CC SQ_VTX_SEMANTIC_19 | ||
| 235 | 0x000283D0 SQ_VTX_SEMANTIC_20 | ||
| 236 | 0x000283D4 SQ_VTX_SEMANTIC_21 | ||
| 237 | 0x000283D8 SQ_VTX_SEMANTIC_22 | ||
| 238 | 0x000283DC SQ_VTX_SEMANTIC_23 | ||
| 239 | 0x000283E0 SQ_VTX_SEMANTIC_24 | ||
| 240 | 0x000283E4 SQ_VTX_SEMANTIC_25 | ||
| 241 | 0x000283E8 SQ_VTX_SEMANTIC_26 | ||
| 242 | 0x000283EC SQ_VTX_SEMANTIC_27 | ||
| 243 | 0x000283F0 SQ_VTX_SEMANTIC_28 | ||
| 244 | 0x000283F4 SQ_VTX_SEMANTIC_29 | ||
| 245 | 0x000283F8 SQ_VTX_SEMANTIC_30 | ||
| 246 | 0x000283FC SQ_VTX_SEMANTIC_31 | ||
| 247 | 0x00028400 VGT_MAX_VTX_INDX | ||
| 248 | 0x00028404 VGT_MIN_VTX_INDX | ||
| 249 | 0x00028408 VGT_INDX_OFFSET | ||
| 250 | 0x0002840C VGT_MULTI_PRIM_IB_RESET_INDX | ||
| 251 | 0x00028410 SX_ALPHA_TEST_CONTROL | ||
| 252 | 0x00028414 CB_BLEND_RED | ||
| 253 | 0x00028418 CB_BLEND_GREEN | ||
| 254 | 0x0002841C CB_BLEND_BLUE | ||
| 255 | 0x00028420 CB_BLEND_ALPHA | ||
| 256 | 0x00028430 DB_STENCILREFMASK | ||
| 257 | 0x00028434 DB_STENCILREFMASK_BF | ||
| 258 | 0x00028438 SX_ALPHA_REF | ||
| 259 | 0x0002843C PA_CL_VPORT_XSCALE_0 | ||
| 260 | 0x00028440 PA_CL_VPORT_XOFFSET_0 | ||
| 261 | 0x00028444 PA_CL_VPORT_YSCALE_0 | ||
| 262 | 0x00028448 PA_CL_VPORT_YOFFSET_0 | ||
| 263 | 0x0002844C PA_CL_VPORT_ZSCALE_0 | ||
| 264 | 0x00028450 PA_CL_VPORT_ZOFFSET_0 | ||
| 265 | 0x00028454 PA_CL_VPORT_XSCALE_1 | ||
| 266 | 0x00028458 PA_CL_VPORT_XOFFSET_1 | ||
| 267 | 0x0002845C PA_CL_VPORT_YSCALE_1 | ||
| 268 | 0x00028460 PA_CL_VPORT_YOFFSET_1 | ||
| 269 | 0x00028464 PA_CL_VPORT_ZSCALE_1 | ||
| 270 | 0x00028468 PA_CL_VPORT_ZOFFSET_1 | ||
| 271 | 0x0002846C PA_CL_VPORT_XSCALE_2 | ||
| 272 | 0x00028470 PA_CL_VPORT_XOFFSET_2 | ||
| 273 | 0x00028474 PA_CL_VPORT_YSCALE_2 | ||
| 274 | 0x00028478 PA_CL_VPORT_YOFFSET_2 | ||
| 275 | 0x0002847C PA_CL_VPORT_ZSCALE_2 | ||
| 276 | 0x00028480 PA_CL_VPORT_ZOFFSET_2 | ||
| 277 | 0x00028484 PA_CL_VPORT_XSCALE_3 | ||
| 278 | 0x00028488 PA_CL_VPORT_XOFFSET_3 | ||
| 279 | 0x0002848C PA_CL_VPORT_YSCALE_3 | ||
| 280 | 0x00028490 PA_CL_VPORT_YOFFSET_3 | ||
| 281 | 0x00028494 PA_CL_VPORT_ZSCALE_3 | ||
| 282 | 0x00028498 PA_CL_VPORT_ZOFFSET_3 | ||
| 283 | 0x0002849C PA_CL_VPORT_XSCALE_4 | ||
| 284 | 0x000284A0 PA_CL_VPORT_XOFFSET_4 | ||
| 285 | 0x000284A4 PA_CL_VPORT_YSCALE_4 | ||
| 286 | 0x000284A8 PA_CL_VPORT_YOFFSET_4 | ||
| 287 | 0x000284AC PA_CL_VPORT_ZSCALE_4 | ||
| 288 | 0x000284B0 PA_CL_VPORT_ZOFFSET_4 | ||
| 289 | 0x000284B4 PA_CL_VPORT_XSCALE_5 | ||
| 290 | 0x000284B8 PA_CL_VPORT_XOFFSET_5 | ||
| 291 | 0x000284BC PA_CL_VPORT_YSCALE_5 | ||
| 292 | 0x000284C0 PA_CL_VPORT_YOFFSET_5 | ||
| 293 | 0x000284C4 PA_CL_VPORT_ZSCALE_5 | ||
| 294 | 0x000284C8 PA_CL_VPORT_ZOFFSET_5 | ||
| 295 | 0x000284CC PA_CL_VPORT_XSCALE_6 | ||
| 296 | 0x000284D0 PA_CL_VPORT_XOFFSET_6 | ||
| 297 | 0x000284D4 PA_CL_VPORT_YSCALE_6 | ||
| 298 | 0x000284D8 PA_CL_VPORT_YOFFSET_6 | ||
| 299 | 0x000284DC PA_CL_VPORT_ZSCALE_6 | ||
| 300 | 0x000284E0 PA_CL_VPORT_ZOFFSET_6 | ||
| 301 | 0x000284E4 PA_CL_VPORT_XSCALE_7 | ||
| 302 | 0x000284E8 PA_CL_VPORT_XOFFSET_7 | ||
| 303 | 0x000284EC PA_CL_VPORT_YSCALE_7 | ||
| 304 | 0x000284F0 PA_CL_VPORT_YOFFSET_7 | ||
| 305 | 0x000284F4 PA_CL_VPORT_ZSCALE_7 | ||
| 306 | 0x000284F8 PA_CL_VPORT_ZOFFSET_7 | ||
| 307 | 0x000284FC PA_CL_VPORT_XSCALE_8 | ||
| 308 | 0x00028500 PA_CL_VPORT_XOFFSET_8 | ||
| 309 | 0x00028504 PA_CL_VPORT_YSCALE_8 | ||
| 310 | 0x00028508 PA_CL_VPORT_YOFFSET_8 | ||
| 311 | 0x0002850C PA_CL_VPORT_ZSCALE_8 | ||
| 312 | 0x00028510 PA_CL_VPORT_ZOFFSET_8 | ||
| 313 | 0x00028514 PA_CL_VPORT_XSCALE_9 | ||
| 314 | 0x00028518 PA_CL_VPORT_XOFFSET_9 | ||
| 315 | 0x0002851C PA_CL_VPORT_YSCALE_9 | ||
| 316 | 0x00028520 PA_CL_VPORT_YOFFSET_9 | ||
| 317 | 0x00028524 PA_CL_VPORT_ZSCALE_9 | ||
| 318 | 0x00028528 PA_CL_VPORT_ZOFFSET_9 | ||
| 319 | 0x0002852C PA_CL_VPORT_XSCALE_10 | ||
| 320 | 0x00028530 PA_CL_VPORT_XOFFSET_10 | ||
| 321 | 0x00028534 PA_CL_VPORT_YSCALE_10 | ||
| 322 | 0x00028538 PA_CL_VPORT_YOFFSET_10 | ||
| 323 | 0x0002853C PA_CL_VPORT_ZSCALE_10 | ||
| 324 | 0x00028540 PA_CL_VPORT_ZOFFSET_10 | ||
| 325 | 0x00028544 PA_CL_VPORT_XSCALE_11 | ||
| 326 | 0x00028548 PA_CL_VPORT_XOFFSET_11 | ||
| 327 | 0x0002854C PA_CL_VPORT_YSCALE_11 | ||
| 328 | 0x00028550 PA_CL_VPORT_YOFFSET_11 | ||
| 329 | 0x00028554 PA_CL_VPORT_ZSCALE_11 | ||
| 330 | 0x00028558 PA_CL_VPORT_ZOFFSET_11 | ||
| 331 | 0x0002855C PA_CL_VPORT_XSCALE_12 | ||
| 332 | 0x00028560 PA_CL_VPORT_XOFFSET_12 | ||
| 333 | 0x00028564 PA_CL_VPORT_YSCALE_12 | ||
| 334 | 0x00028568 PA_CL_VPORT_YOFFSET_12 | ||
| 335 | 0x0002856C PA_CL_VPORT_ZSCALE_12 | ||
| 336 | 0x00028570 PA_CL_VPORT_ZOFFSET_12 | ||
| 337 | 0x00028574 PA_CL_VPORT_XSCALE_13 | ||
| 338 | 0x00028578 PA_CL_VPORT_XOFFSET_13 | ||
| 339 | 0x0002857C PA_CL_VPORT_YSCALE_13 | ||
| 340 | 0x00028580 PA_CL_VPORT_YOFFSET_13 | ||
| 341 | 0x00028584 PA_CL_VPORT_ZSCALE_13 | ||
| 342 | 0x00028588 PA_CL_VPORT_ZOFFSET_13 | ||
| 343 | 0x0002858C PA_CL_VPORT_XSCALE_14 | ||
| 344 | 0x00028590 PA_CL_VPORT_XOFFSET_14 | ||
| 345 | 0x00028594 PA_CL_VPORT_YSCALE_14 | ||
| 346 | 0x00028598 PA_CL_VPORT_YOFFSET_14 | ||
| 347 | 0x0002859C PA_CL_VPORT_ZSCALE_14 | ||
| 348 | 0x000285A0 PA_CL_VPORT_ZOFFSET_14 | ||
| 349 | 0x000285A4 PA_CL_VPORT_XSCALE_15 | ||
| 350 | 0x000285A8 PA_CL_VPORT_XOFFSET_15 | ||
| 351 | 0x000285AC PA_CL_VPORT_YSCALE_15 | ||
| 352 | 0x000285B0 PA_CL_VPORT_YOFFSET_15 | ||
| 353 | 0x000285B4 PA_CL_VPORT_ZSCALE_15 | ||
| 354 | 0x000285B8 PA_CL_VPORT_ZOFFSET_15 | ||
| 355 | 0x000285BC PA_CL_UCP_0_X | ||
| 356 | 0x000285C0 PA_CL_UCP_0_Y | ||
| 357 | 0x000285C4 PA_CL_UCP_0_Z | ||
| 358 | 0x000285C8 PA_CL_UCP_0_W | ||
| 359 | 0x000285CC PA_CL_UCP_1_X | ||
| 360 | 0x000285D0 PA_CL_UCP_1_Y | ||
| 361 | 0x000285D4 PA_CL_UCP_1_Z | ||
| 362 | 0x000285D8 PA_CL_UCP_1_W | ||
| 363 | 0x000285DC PA_CL_UCP_2_X | ||
| 364 | 0x000285E0 PA_CL_UCP_2_Y | ||
| 365 | 0x000285E4 PA_CL_UCP_2_Z | ||
| 366 | 0x000285E8 PA_CL_UCP_2_W | ||
| 367 | 0x000285EC PA_CL_UCP_3_X | ||
| 368 | 0x000285F0 PA_CL_UCP_3_Y | ||
| 369 | 0x000285F4 PA_CL_UCP_3_Z | ||
| 370 | 0x000285F8 PA_CL_UCP_3_W | ||
| 371 | 0x000285FC PA_CL_UCP_4_X | ||
| 372 | 0x00028600 PA_CL_UCP_4_Y | ||
| 373 | 0x00028604 PA_CL_UCP_4_Z | ||
| 374 | 0x00028608 PA_CL_UCP_4_W | ||
| 375 | 0x0002860C PA_CL_UCP_5_X | ||
| 376 | 0x00028610 PA_CL_UCP_5_Y | ||
| 377 | 0x00028614 PA_CL_UCP_5_Z | ||
| 378 | 0x00028618 PA_CL_UCP_5_W | ||
| 379 | 0x0002861C SPI_VS_OUT_ID_0 | ||
| 380 | 0x00028620 SPI_VS_OUT_ID_1 | ||
| 381 | 0x00028624 SPI_VS_OUT_ID_2 | ||
| 382 | 0x00028628 SPI_VS_OUT_ID_3 | ||
| 383 | 0x0002862C SPI_VS_OUT_ID_4 | ||
| 384 | 0x00028630 SPI_VS_OUT_ID_5 | ||
| 385 | 0x00028634 SPI_VS_OUT_ID_6 | ||
| 386 | 0x00028638 SPI_VS_OUT_ID_7 | ||
| 387 | 0x0002863C SPI_VS_OUT_ID_8 | ||
| 388 | 0x00028640 SPI_VS_OUT_ID_9 | ||
| 389 | 0x00028644 SPI_PS_INPUT_CNTL_0 | ||
| 390 | 0x00028648 SPI_PS_INPUT_CNTL_1 | ||
| 391 | 0x0002864C SPI_PS_INPUT_CNTL_2 | ||
| 392 | 0x00028650 SPI_PS_INPUT_CNTL_3 | ||
| 393 | 0x00028654 SPI_PS_INPUT_CNTL_4 | ||
| 394 | 0x00028658 SPI_PS_INPUT_CNTL_5 | ||
| 395 | 0x0002865C SPI_PS_INPUT_CNTL_6 | ||
| 396 | 0x00028660 SPI_PS_INPUT_CNTL_7 | ||
| 397 | 0x00028664 SPI_PS_INPUT_CNTL_8 | ||
| 398 | 0x00028668 SPI_PS_INPUT_CNTL_9 | ||
| 399 | 0x0002866C SPI_PS_INPUT_CNTL_10 | ||
| 400 | 0x00028670 SPI_PS_INPUT_CNTL_11 | ||
| 401 | 0x00028674 SPI_PS_INPUT_CNTL_12 | ||
| 402 | 0x00028678 SPI_PS_INPUT_CNTL_13 | ||
| 403 | 0x0002867C SPI_PS_INPUT_CNTL_14 | ||
| 404 | 0x00028680 SPI_PS_INPUT_CNTL_15 | ||
| 405 | 0x00028684 SPI_PS_INPUT_CNTL_16 | ||
| 406 | 0x00028688 SPI_PS_INPUT_CNTL_17 | ||
| 407 | 0x0002868C SPI_PS_INPUT_CNTL_18 | ||
| 408 | 0x00028690 SPI_PS_INPUT_CNTL_19 | ||
| 409 | 0x00028694 SPI_PS_INPUT_CNTL_20 | ||
| 410 | 0x00028698 SPI_PS_INPUT_CNTL_21 | ||
| 411 | 0x0002869C SPI_PS_INPUT_CNTL_22 | ||
| 412 | 0x000286A0 SPI_PS_INPUT_CNTL_23 | ||
| 413 | 0x000286A4 SPI_PS_INPUT_CNTL_24 | ||
| 414 | 0x000286A8 SPI_PS_INPUT_CNTL_25 | ||
| 415 | 0x000286AC SPI_PS_INPUT_CNTL_26 | ||
| 416 | 0x000286B0 SPI_PS_INPUT_CNTL_27 | ||
| 417 | 0x000286B4 SPI_PS_INPUT_CNTL_28 | ||
| 418 | 0x000286B8 SPI_PS_INPUT_CNTL_29 | ||
| 419 | 0x000286BC SPI_PS_INPUT_CNTL_30 | ||
| 420 | 0x000286C0 SPI_PS_INPUT_CNTL_31 | ||
| 421 | 0x000286C4 SPI_VS_OUT_CONFIG | ||
| 422 | 0x000286C8 SPI_THREAD_GROUPING | ||
| 423 | 0x000286CC SPI_PS_IN_CONTROL_0 | ||
| 424 | 0x000286D0 SPI_PS_IN_CONTROL_1 | ||
| 425 | 0x000286D4 SPI_INTERP_CONTROL_0 | ||
| 426 | 0x000286D8 SPI_INPUT_Z | ||
| 427 | 0x000286DC SPI_FOG_CNTL | ||
| 428 | 0x000286E0 SPI_BARYC_CNTL | ||
| 429 | 0x000286E4 SPI_PS_IN_CONTROL_2 | ||
| 430 | 0x000286E8 SPI_COMPUTE_INPUT_CNTL | ||
| 431 | 0x000286EC SPI_COMPUTE_NUM_THREAD_X | ||
| 432 | 0x000286F0 SPI_COMPUTE_NUM_THREAD_Y | ||
| 433 | 0x000286F4 SPI_COMPUTE_NUM_THREAD_Z | ||
| 434 | 0x000286F8 GDS_ADDR_SIZE | ||
| 435 | 0x00028780 CB_BLEND0_CONTROL | ||
| 436 | 0x00028784 CB_BLEND1_CONTROL | ||
| 437 | 0x00028788 CB_BLEND2_CONTROL | ||
| 438 | 0x0002878C CB_BLEND3_CONTROL | ||
| 439 | 0x00028790 CB_BLEND4_CONTROL | ||
| 440 | 0x00028794 CB_BLEND5_CONTROL | ||
| 441 | 0x00028798 CB_BLEND6_CONTROL | ||
| 442 | 0x0002879C CB_BLEND7_CONTROL | ||
| 443 | 0x000287CC CS_COPY_STATE | ||
| 444 | 0x000287D0 GFX_COPY_STATE | ||
| 445 | 0x000287D4 PA_CL_POINT_X_RAD | ||
| 446 | 0x000287D8 PA_CL_POINT_Y_RAD | ||
| 447 | 0x000287DC PA_CL_POINT_SIZE | ||
| 448 | 0x000287E0 PA_CL_POINT_CULL_RAD | ||
| 449 | 0x00028808 CB_COLOR_CONTROL | ||
| 450 | 0x0002880C DB_SHADER_CONTROL | ||
| 451 | 0x00028810 PA_CL_CLIP_CNTL | ||
| 452 | 0x00028814 PA_SU_SC_MODE_CNTL | ||
| 453 | 0x00028818 PA_CL_VTE_CNTL | ||
| 454 | 0x0002881C PA_CL_VS_OUT_CNTL | ||
| 455 | 0x00028820 PA_CL_NANINF_CNTL | ||
| 456 | 0x00028824 PA_SU_LINE_STIPPLE_CNTL | ||
| 457 | 0x00028828 PA_SU_LINE_STIPPLE_SCALE | ||
| 458 | 0x0002882C PA_SU_PRIM_FILTER_CNTL | ||
| 459 | 0x00028838 SQ_DYN_GPR_RESOURCE_LIMIT_1 | ||
| 460 | 0x00028844 SQ_PGM_RESOURCES_PS | ||
| 461 | 0x00028848 SQ_PGM_RESOURCES_2_PS | ||
| 462 | 0x0002884C SQ_PGM_EXPORTS_PS | ||
| 463 | 0x0002885C SQ_PGM_RESOURCES_VS | ||
| 464 | 0x00028860 SQ_PGM_RESOURCES_2_VS | ||
| 465 | 0x00028878 SQ_PGM_RESOURCES_GS | ||
| 466 | 0x0002887C SQ_PGM_RESOURCES_2_GS | ||
| 467 | 0x00028890 SQ_PGM_RESOURCES_ES | ||
| 468 | 0x00028894 SQ_PGM_RESOURCES_2_ES | ||
| 469 | 0x000288A8 SQ_PGM_RESOURCES_FS | ||
| 470 | 0x000288BC SQ_PGM_RESOURCES_HS | ||
| 471 | 0x000288C0 SQ_PGM_RESOURCES_2_HS | ||
| 472 | 0x000288D0 SQ_PGM_RESOURCES_LS | ||
| 473 | 0x000288D4 SQ_PGM_RESOURCES_2_LS | ||
| 474 | 0x000288E8 SQ_LDS_ALLOC | ||
| 475 | 0x000288EC SQ_LDS_ALLOC_PS | ||
| 476 | 0x000288F0 SQ_VTX_SEMANTIC_CLEAR | ||
| 477 | 0x00028A00 PA_SU_POINT_SIZE | ||
| 478 | 0x00028A04 PA_SU_POINT_MINMAX | ||
| 479 | 0x00028A08 PA_SU_LINE_CNTL | ||
| 480 | 0x00028A0C PA_SC_LINE_STIPPLE | ||
| 481 | 0x00028A10 VGT_OUTPUT_PATH_CNTL | ||
| 482 | 0x00028A14 VGT_HOS_CNTL | ||
| 483 | 0x00028A18 VGT_HOS_MAX_TESS_LEVEL | ||
| 484 | 0x00028A1C VGT_HOS_MIN_TESS_LEVEL | ||
| 485 | 0x00028A20 VGT_HOS_REUSE_DEPTH | ||
| 486 | 0x00028A24 VGT_GROUP_PRIM_TYPE | ||
| 487 | 0x00028A28 VGT_GROUP_FIRST_DECR | ||
| 488 | 0x00028A2C VGT_GROUP_DECR | ||
| 489 | 0x00028A30 VGT_GROUP_VECT_0_CNTL | ||
| 490 | 0x00028A34 VGT_GROUP_VECT_1_CNTL | ||
| 491 | 0x00028A38 VGT_GROUP_VECT_0_FMT_CNTL | ||
| 492 | 0x00028A3C VGT_GROUP_VECT_1_FMT_CNTL | ||
| 493 | 0x00028A40 VGT_GS_MODE | ||
| 494 | 0x00028A48 PA_SC_MODE_CNTL_0 | ||
| 495 | 0x00028A4C PA_SC_MODE_CNTL_1 | ||
| 496 | 0x00028A50 VGT_ENHANCE | ||
| 497 | 0x00028A54 VGT_GS_PER_ES | ||
| 498 | 0x00028A58 VGT_ES_PER_GS | ||
| 499 | 0x00028A5C VGT_GS_PER_VS | ||
| 500 | 0x00028A6C VGT_GS_OUT_PRIM_TYPE | ||
| 501 | 0x00028A84 VGT_PRIMITIVEID_EN | ||
| 502 | 0x00028A94 VGT_MULTI_PRIM_IB_RESET_EN | ||
| 503 | 0x00028AA0 VGT_INSTANCE_STEP_RATE_0 | ||
| 504 | 0x00028AA4 VGT_INSTANCE_STEP_RATE_1 | ||
| 505 | 0x00028AB4 VGT_REUSE_OFF | ||
| 506 | 0x00028AB8 VGT_VTX_CNT_EN | ||
| 507 | 0x00028ABC DB_HTILE_SURFACE | ||
| 508 | 0x00028AC0 DB_SRESULTS_COMPARE_STATE0 | ||
| 509 | 0x00028AC4 DB_SRESULTS_COMPARE_STATE1 | ||
| 510 | 0x00028AC8 DB_PRELOAD_CONTROL | ||
| 511 | 0x00028B38 VGT_GS_MAX_VERT_OUT | ||
| 512 | 0x00028B54 VGT_SHADER_STAGES_EN | ||
| 513 | 0x00028B58 VGT_LS_HS_CONFIG | ||
| 514 | 0x00028B5C VGT_LS_SIZE | ||
| 515 | 0x00028B60 VGT_HS_SIZE | ||
| 516 | 0x00028B64 VGT_LS_HS_ALLOC | ||
| 517 | 0x00028B68 VGT_HS_PATCH_CONST | ||
| 518 | 0x00028B6C VGT_TF_PARAM | ||
| 519 | 0x00028B70 DB_ALPHA_TO_MASK | ||
| 520 | 0x00028B74 VGT_DISPATCH_INITIATOR | ||
| 521 | 0x00028B78 PA_SU_POLY_OFFSET_DB_FMT_CNTL | ||
| 522 | 0x00028B7C PA_SU_POLY_OFFSET_CLAMP | ||
| 523 | 0x00028B80 PA_SU_POLY_OFFSET_FRONT_SCALE | ||
| 524 | 0x00028B84 PA_SU_POLY_OFFSET_FRONT_OFFSET | ||
| 525 | 0x00028B88 PA_SU_POLY_OFFSET_BACK_SCALE | ||
| 526 | 0x00028B8C PA_SU_POLY_OFFSET_BACK_OFFSET | ||
| 527 | 0x00028B74 VGT_GS_INSTANCE_CNT | ||
| 528 | 0x00028C00 PA_SC_LINE_CNTL | ||
| 529 | 0x00028C08 PA_SU_VTX_CNTL | ||
| 530 | 0x00028C0C PA_CL_GB_VERT_CLIP_ADJ | ||
| 531 | 0x00028C10 PA_CL_GB_VERT_DISC_ADJ | ||
| 532 | 0x00028C14 PA_CL_GB_HORZ_CLIP_ADJ | ||
| 533 | 0x00028C18 PA_CL_GB_HORZ_DISC_ADJ | ||
| 534 | 0x00028C1C PA_SC_AA_SAMPLE_LOCS_0 | ||
| 535 | 0x00028C20 PA_SC_AA_SAMPLE_LOCS_1 | ||
| 536 | 0x00028C24 PA_SC_AA_SAMPLE_LOCS_2 | ||
| 537 | 0x00028C28 PA_SC_AA_SAMPLE_LOCS_3 | ||
| 538 | 0x00028C2C PA_SC_AA_SAMPLE_LOCS_4 | ||
| 539 | 0x00028C30 PA_SC_AA_SAMPLE_LOCS_5 | ||
| 540 | 0x00028C34 PA_SC_AA_SAMPLE_LOCS_6 | ||
| 541 | 0x00028C38 PA_SC_AA_SAMPLE_LOCS_7 | ||
| 542 | 0x00028C3C PA_SC_AA_MASK | ||
| 543 | 0x00028C8C CB_COLOR0_CLEAR_WORD0 | ||
| 544 | 0x00028C90 CB_COLOR0_CLEAR_WORD1 | ||
| 545 | 0x00028C94 CB_COLOR0_CLEAR_WORD2 | ||
| 546 | 0x00028C98 CB_COLOR0_CLEAR_WORD3 | ||
| 547 | 0x00028CC8 CB_COLOR1_CLEAR_WORD0 | ||
| 548 | 0x00028CCC CB_COLOR1_CLEAR_WORD1 | ||
| 549 | 0x00028CD0 CB_COLOR1_CLEAR_WORD2 | ||
| 550 | 0x00028CD4 CB_COLOR1_CLEAR_WORD3 | ||
| 551 | 0x00028D04 CB_COLOR2_CLEAR_WORD0 | ||
| 552 | 0x00028D08 CB_COLOR2_CLEAR_WORD1 | ||
| 553 | 0x00028D0C CB_COLOR2_CLEAR_WORD2 | ||
| 554 | 0x00028D10 CB_COLOR2_CLEAR_WORD3 | ||
| 555 | 0x00028D40 CB_COLOR3_CLEAR_WORD0 | ||
| 556 | 0x00028D44 CB_COLOR3_CLEAR_WORD1 | ||
| 557 | 0x00028D48 CB_COLOR3_CLEAR_WORD2 | ||
| 558 | 0x00028D4C CB_COLOR3_CLEAR_WORD3 | ||
| 559 | 0x00028D7C CB_COLOR4_CLEAR_WORD0 | ||
| 560 | 0x00028D80 CB_COLOR4_CLEAR_WORD1 | ||
| 561 | 0x00028D84 CB_COLOR4_CLEAR_WORD2 | ||
| 562 | 0x00028D88 CB_COLOR4_CLEAR_WORD3 | ||
| 563 | 0x00028DB8 CB_COLOR5_CLEAR_WORD0 | ||
| 564 | 0x00028DBC CB_COLOR5_CLEAR_WORD1 | ||
| 565 | 0x00028DC0 CB_COLOR5_CLEAR_WORD2 | ||
| 566 | 0x00028DC4 CB_COLOR5_CLEAR_WORD3 | ||
| 567 | 0x00028DF4 CB_COLOR6_CLEAR_WORD0 | ||
| 568 | 0x00028DF8 CB_COLOR6_CLEAR_WORD1 | ||
| 569 | 0x00028DFC CB_COLOR6_CLEAR_WORD2 | ||
| 570 | 0x00028E00 CB_COLOR6_CLEAR_WORD3 | ||
| 571 | 0x00028E30 CB_COLOR7_CLEAR_WORD0 | ||
| 572 | 0x00028E34 CB_COLOR7_CLEAR_WORD1 | ||
| 573 | 0x00028E38 CB_COLOR7_CLEAR_WORD2 | ||
| 574 | 0x00028E3C CB_COLOR7_CLEAR_WORD3 | ||
| 575 | 0x00028F80 SQ_ALU_CONST_BUFFER_SIZE_HS_0 | ||
| 576 | 0x00028F84 SQ_ALU_CONST_BUFFER_SIZE_HS_1 | ||
| 577 | 0x00028F88 SQ_ALU_CONST_BUFFER_SIZE_HS_2 | ||
| 578 | 0x00028F8C SQ_ALU_CONST_BUFFER_SIZE_HS_3 | ||
| 579 | 0x00028F90 SQ_ALU_CONST_BUFFER_SIZE_HS_4 | ||
| 580 | 0x00028F94 SQ_ALU_CONST_BUFFER_SIZE_HS_5 | ||
| 581 | 0x00028F98 SQ_ALU_CONST_BUFFER_SIZE_HS_6 | ||
| 582 | 0x00028F9C SQ_ALU_CONST_BUFFER_SIZE_HS_7 | ||
| 583 | 0x00028FA0 SQ_ALU_CONST_BUFFER_SIZE_HS_8 | ||
| 584 | 0x00028FA4 SQ_ALU_CONST_BUFFER_SIZE_HS_9 | ||
| 585 | 0x00028FA8 SQ_ALU_CONST_BUFFER_SIZE_HS_10 | ||
| 586 | 0x00028FAC SQ_ALU_CONST_BUFFER_SIZE_HS_11 | ||
| 587 | 0x00028FB0 SQ_ALU_CONST_BUFFER_SIZE_HS_12 | ||
| 588 | 0x00028FB4 SQ_ALU_CONST_BUFFER_SIZE_HS_13 | ||
| 589 | 0x00028FB8 SQ_ALU_CONST_BUFFER_SIZE_HS_14 | ||
| 590 | 0x00028FBC SQ_ALU_CONST_BUFFER_SIZE_HS_15 | ||
| 591 | 0x00028FC0 SQ_ALU_CONST_BUFFER_SIZE_LS_0 | ||
| 592 | 0x00028FC4 SQ_ALU_CONST_BUFFER_SIZE_LS_1 | ||
| 593 | 0x00028FC8 SQ_ALU_CONST_BUFFER_SIZE_LS_2 | ||
| 594 | 0x00028FCC SQ_ALU_CONST_BUFFER_SIZE_LS_3 | ||
| 595 | 0x00028FD0 SQ_ALU_CONST_BUFFER_SIZE_LS_4 | ||
| 596 | 0x00028FD4 SQ_ALU_CONST_BUFFER_SIZE_LS_5 | ||
| 597 | 0x00028FD8 SQ_ALU_CONST_BUFFER_SIZE_LS_6 | ||
| 598 | 0x00028FDC SQ_ALU_CONST_BUFFER_SIZE_LS_7 | ||
| 599 | 0x00028FE0 SQ_ALU_CONST_BUFFER_SIZE_LS_8 | ||
| 600 | 0x00028FE4 SQ_ALU_CONST_BUFFER_SIZE_LS_9 | ||
| 601 | 0x00028FE8 SQ_ALU_CONST_BUFFER_SIZE_LS_10 | ||
| 602 | 0x00028FEC SQ_ALU_CONST_BUFFER_SIZE_LS_11 | ||
| 603 | 0x00028FF0 SQ_ALU_CONST_BUFFER_SIZE_LS_12 | ||
| 604 | 0x00028FF4 SQ_ALU_CONST_BUFFER_SIZE_LS_13 | ||
| 605 | 0x00028FF8 SQ_ALU_CONST_BUFFER_SIZE_LS_14 | ||
| 606 | 0x00028FFC SQ_ALU_CONST_BUFFER_SIZE_LS_15 | ||
| 607 | 0x0003CFF0 SQ_VTX_BASE_VTX_LOC | ||
| 608 | 0x0003CFF4 SQ_VTX_START_INST_LOC | ||
| 609 | 0x0003FF00 SQ_TEX_SAMPLER_CLEAR | ||
| 610 | 0x0003FF04 SQ_TEX_RESOURCE_CLEAR | ||
| 611 | 0x0003FF08 SQ_LOOP_BOOL_CLEAR | ||
diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c index 79887cac5b54..7bb4c3e52f3b 100644 --- a/drivers/gpu/drm/radeon/rs600.c +++ b/drivers/gpu/drm/radeon/rs600.c | |||
| @@ -74,7 +74,8 @@ void rs600_pm_misc(struct radeon_device *rdev) | |||
| 74 | if (voltage->delay) | 74 | if (voltage->delay) |
| 75 | udelay(voltage->delay); | 75 | udelay(voltage->delay); |
| 76 | } | 76 | } |
| 77 | } | 77 | } else if (voltage->type == VOLTAGE_VDDC) |
| 78 | radeon_atom_set_voltage(rdev, voltage->vddc_id); | ||
| 78 | 79 | ||
| 79 | dyn_pwrmgt_sclk_length = RREG32_PLL(DYN_PWRMGT_SCLK_LENGTH); | 80 | dyn_pwrmgt_sclk_length = RREG32_PLL(DYN_PWRMGT_SCLK_LENGTH); |
| 80 | dyn_pwrmgt_sclk_length &= ~REDUCED_POWER_SCLK_HILEN(0xf); | 81 | dyn_pwrmgt_sclk_length &= ~REDUCED_POWER_SCLK_HILEN(0xf); |
diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c index 253f24aec031..33952da65340 100644 --- a/drivers/gpu/drm/radeon/rv770.c +++ b/drivers/gpu/drm/radeon/rv770.c | |||
| @@ -44,7 +44,12 @@ void rv770_fini(struct radeon_device *rdev); | |||
| 44 | 44 | ||
| 45 | void rv770_pm_misc(struct radeon_device *rdev) | 45 | void rv770_pm_misc(struct radeon_device *rdev) |
| 46 | { | 46 | { |
| 47 | int requested_index = rdev->pm.requested_power_state_index; | ||
| 48 | struct radeon_power_state *ps = &rdev->pm.power_state[requested_index]; | ||
| 49 | struct radeon_voltage *voltage = &ps->clock_info[0].voltage; | ||
| 47 | 50 | ||
| 51 | if ((voltage->type == VOLTAGE_SW) && voltage->voltage) | ||
| 52 | radeon_atom_set_voltage(rdev, voltage->voltage); | ||
| 48 | } | 53 | } |
| 49 | 54 | ||
| 50 | /* | 55 | /* |
diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c index 0d9a42c2394f..ef910694bd63 100644 --- a/drivers/gpu/drm/ttm/ttm_page_alloc.c +++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c | |||
| @@ -77,7 +77,7 @@ struct ttm_page_pool { | |||
| 77 | /** | 77 | /** |
| 78 | * Limits for the pool. They are handled without locks because only place where | 78 | * Limits for the pool. They are handled without locks because only place where |
| 79 | * they may change is in sysfs store. They won't have immediate effect anyway | 79 | * they may change is in sysfs store. They won't have immediate effect anyway |
| 80 | * so forcing serialiazation to access them is pointless. | 80 | * so forcing serialization to access them is pointless. |
| 81 | */ | 81 | */ |
| 82 | 82 | ||
| 83 | struct ttm_pool_opts { | 83 | struct ttm_pool_opts { |
| @@ -165,16 +165,18 @@ static ssize_t ttm_pool_store(struct kobject *kobj, | |||
| 165 | m->options.small = val; | 165 | m->options.small = val; |
| 166 | else if (attr == &ttm_page_pool_alloc_size) { | 166 | else if (attr == &ttm_page_pool_alloc_size) { |
| 167 | if (val > NUM_PAGES_TO_ALLOC*8) { | 167 | if (val > NUM_PAGES_TO_ALLOC*8) { |
| 168 | printk(KERN_ERR "[ttm] Setting allocation size to %lu " | 168 | printk(KERN_ERR TTM_PFX |
| 169 | "is not allowed. Recomended size is " | 169 | "Setting allocation size to %lu " |
| 170 | "%lu\n", | 170 | "is not allowed. Recommended size is " |
| 171 | NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 7), | 171 | "%lu\n", |
| 172 | NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 10)); | 172 | NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 7), |
| 173 | NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 10)); | ||
| 173 | return size; | 174 | return size; |
| 174 | } else if (val > NUM_PAGES_TO_ALLOC) { | 175 | } else if (val > NUM_PAGES_TO_ALLOC) { |
| 175 | printk(KERN_WARNING "[ttm] Setting allocation size to " | 176 | printk(KERN_WARNING TTM_PFX |
| 176 | "larger than %lu is not recomended.\n", | 177 | "Setting allocation size to " |
| 177 | NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 10)); | 178 | "larger than %lu is not recommended.\n", |
| 179 | NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 10)); | ||
| 178 | } | 180 | } |
| 179 | m->options.alloc_size = val; | 181 | m->options.alloc_size = val; |
| 180 | } | 182 | } |
| @@ -277,7 +279,7 @@ static void ttm_pages_put(struct page *pages[], unsigned npages) | |||
| 277 | { | 279 | { |
| 278 | unsigned i; | 280 | unsigned i; |
| 279 | if (set_pages_array_wb(pages, npages)) | 281 | if (set_pages_array_wb(pages, npages)) |
| 280 | printk(KERN_ERR "[ttm] Failed to set %d pages to wb!\n", | 282 | printk(KERN_ERR TTM_PFX "Failed to set %d pages to wb!\n", |
| 281 | npages); | 283 | npages); |
| 282 | for (i = 0; i < npages; ++i) | 284 | for (i = 0; i < npages; ++i) |
| 283 | __free_page(pages[i]); | 285 | __free_page(pages[i]); |
| @@ -313,7 +315,8 @@ static int ttm_page_pool_free(struct ttm_page_pool *pool, unsigned nr_free) | |||
| 313 | pages_to_free = kmalloc(npages_to_free * sizeof(struct page *), | 315 | pages_to_free = kmalloc(npages_to_free * sizeof(struct page *), |
| 314 | GFP_KERNEL); | 316 | GFP_KERNEL); |
| 315 | if (!pages_to_free) { | 317 | if (!pages_to_free) { |
| 316 | printk(KERN_ERR "Failed to allocate memory for pool free operation.\n"); | 318 | printk(KERN_ERR TTM_PFX |
| 319 | "Failed to allocate memory for pool free operation.\n"); | ||
| 317 | return 0; | 320 | return 0; |
| 318 | } | 321 | } |
| 319 | 322 | ||
| @@ -390,7 +393,7 @@ static int ttm_pool_get_num_unused_pages(void) | |||
| 390 | } | 393 | } |
| 391 | 394 | ||
| 392 | /** | 395 | /** |
| 393 | * Calback for mm to request pool to reduce number of page held. | 396 | * Callback for mm to request pool to reduce number of page held. |
| 394 | */ | 397 | */ |
| 395 | static int ttm_pool_mm_shrink(int shrink_pages, gfp_t gfp_mask) | 398 | static int ttm_pool_mm_shrink(int shrink_pages, gfp_t gfp_mask) |
| 396 | { | 399 | { |
| @@ -433,14 +436,16 @@ static int ttm_set_pages_caching(struct page **pages, | |||
| 433 | case tt_uncached: | 436 | case tt_uncached: |
| 434 | r = set_pages_array_uc(pages, cpages); | 437 | r = set_pages_array_uc(pages, cpages); |
| 435 | if (r) | 438 | if (r) |
| 436 | printk(KERN_ERR "[ttm] Failed to set %d pages to uc!\n", | 439 | printk(KERN_ERR TTM_PFX |
| 437 | cpages); | 440 | "Failed to set %d pages to uc!\n", |
| 441 | cpages); | ||
| 438 | break; | 442 | break; |
| 439 | case tt_wc: | 443 | case tt_wc: |
| 440 | r = set_pages_array_wc(pages, cpages); | 444 | r = set_pages_array_wc(pages, cpages); |
| 441 | if (r) | 445 | if (r) |
| 442 | printk(KERN_ERR "[ttm] Failed to set %d pages to wc!\n", | 446 | printk(KERN_ERR TTM_PFX |
| 443 | cpages); | 447 | "Failed to set %d pages to wc!\n", |
| 448 | cpages); | ||
| 444 | break; | 449 | break; |
| 445 | default: | 450 | default: |
| 446 | break; | 451 | break; |
| @@ -458,7 +463,7 @@ static void ttm_handle_caching_state_failure(struct list_head *pages, | |||
| 458 | struct page **failed_pages, unsigned cpages) | 463 | struct page **failed_pages, unsigned cpages) |
| 459 | { | 464 | { |
| 460 | unsigned i; | 465 | unsigned i; |
| 461 | /* Failed pages has to be reed */ | 466 | /* Failed pages have to be freed */ |
| 462 | for (i = 0; i < cpages; ++i) { | 467 | for (i = 0; i < cpages; ++i) { |
| 463 | list_del(&failed_pages[i]->lru); | 468 | list_del(&failed_pages[i]->lru); |
| 464 | __free_page(failed_pages[i]); | 469 | __free_page(failed_pages[i]); |
| @@ -485,7 +490,8 @@ static int ttm_alloc_new_pages(struct list_head *pages, int gfp_flags, | |||
| 485 | caching_array = kmalloc(max_cpages*sizeof(struct page *), GFP_KERNEL); | 490 | caching_array = kmalloc(max_cpages*sizeof(struct page *), GFP_KERNEL); |
| 486 | 491 | ||
| 487 | if (!caching_array) { | 492 | if (!caching_array) { |
| 488 | printk(KERN_ERR "[ttm] unable to allocate table for new pages."); | 493 | printk(KERN_ERR TTM_PFX |
| 494 | "Unable to allocate table for new pages."); | ||
| 489 | return -ENOMEM; | 495 | return -ENOMEM; |
| 490 | } | 496 | } |
| 491 | 497 | ||
| @@ -493,12 +499,13 @@ static int ttm_alloc_new_pages(struct list_head *pages, int gfp_flags, | |||
| 493 | p = alloc_page(gfp_flags); | 499 | p = alloc_page(gfp_flags); |
| 494 | 500 | ||
| 495 | if (!p) { | 501 | if (!p) { |
| 496 | printk(KERN_ERR "[ttm] unable to get page %u\n", i); | 502 | printk(KERN_ERR TTM_PFX "Unable to get page %u.\n", i); |
| 497 | 503 | ||
| 498 | /* store already allocated pages in the pool after | 504 | /* store already allocated pages in the pool after |
| 499 | * setting the caching state */ | 505 | * setting the caching state */ |
| 500 | if (cpages) { | 506 | if (cpages) { |
| 501 | r = ttm_set_pages_caching(caching_array, cstate, cpages); | 507 | r = ttm_set_pages_caching(caching_array, |
| 508 | cstate, cpages); | ||
| 502 | if (r) | 509 | if (r) |
| 503 | ttm_handle_caching_state_failure(pages, | 510 | ttm_handle_caching_state_failure(pages, |
| 504 | ttm_flags, cstate, | 511 | ttm_flags, cstate, |
| @@ -590,7 +597,8 @@ static void ttm_page_pool_fill_locked(struct ttm_page_pool *pool, | |||
| 590 | ++pool->nrefills; | 597 | ++pool->nrefills; |
| 591 | pool->npages += alloc_size; | 598 | pool->npages += alloc_size; |
| 592 | } else { | 599 | } else { |
| 593 | printk(KERN_ERR "[ttm] Failed to fill pool (%p).", pool); | 600 | printk(KERN_ERR TTM_PFX |
| 601 | "Failed to fill pool (%p).", pool); | ||
| 594 | /* If we have any pages left put them to the pool. */ | 602 | /* If we have any pages left put them to the pool. */ |
| 595 | list_for_each_entry(p, &pool->list, lru) { | 603 | list_for_each_entry(p, &pool->list, lru) { |
| 596 | ++cpages; | 604 | ++cpages; |
| @@ -671,13 +679,14 @@ int ttm_get_pages(struct list_head *pages, int flags, | |||
| 671 | if (flags & TTM_PAGE_FLAG_DMA32) | 679 | if (flags & TTM_PAGE_FLAG_DMA32) |
| 672 | gfp_flags |= GFP_DMA32; | 680 | gfp_flags |= GFP_DMA32; |
| 673 | else | 681 | else |
| 674 | gfp_flags |= __GFP_HIGHMEM; | 682 | gfp_flags |= GFP_HIGHUSER; |
| 675 | 683 | ||
| 676 | for (r = 0; r < count; ++r) { | 684 | for (r = 0; r < count; ++r) { |
| 677 | p = alloc_page(gfp_flags); | 685 | p = alloc_page(gfp_flags); |
| 678 | if (!p) { | 686 | if (!p) { |
| 679 | 687 | ||
| 680 | printk(KERN_ERR "[ttm] unable to allocate page."); | 688 | printk(KERN_ERR TTM_PFX |
| 689 | "Unable to allocate page."); | ||
| 681 | return -ENOMEM; | 690 | return -ENOMEM; |
| 682 | } | 691 | } |
| 683 | 692 | ||
| @@ -709,8 +718,9 @@ int ttm_get_pages(struct list_head *pages, int flags, | |||
| 709 | if (r) { | 718 | if (r) { |
| 710 | /* If there is any pages in the list put them back to | 719 | /* If there is any pages in the list put them back to |
| 711 | * the pool. */ | 720 | * the pool. */ |
| 712 | printk(KERN_ERR "[ttm] Failed to allocate extra pages " | 721 | printk(KERN_ERR TTM_PFX |
| 713 | "for large request."); | 722 | "Failed to allocate extra pages " |
| 723 | "for large request."); | ||
| 714 | ttm_put_pages(pages, 0, flags, cstate); | 724 | ttm_put_pages(pages, 0, flags, cstate); |
| 715 | return r; | 725 | return r; |
| 716 | } | 726 | } |
| @@ -778,7 +788,7 @@ int ttm_page_alloc_init(struct ttm_mem_global *glob, unsigned max_pages) | |||
| 778 | if (atomic_add_return(1, &_manager.page_alloc_inited) > 1) | 788 | if (atomic_add_return(1, &_manager.page_alloc_inited) > 1) |
| 779 | return 0; | 789 | return 0; |
| 780 | 790 | ||
| 781 | printk(KERN_INFO "[ttm] Initializing pool allocator.\n"); | 791 | printk(KERN_INFO TTM_PFX "Initializing pool allocator.\n"); |
| 782 | 792 | ||
| 783 | ttm_page_pool_init_locked(&_manager.wc_pool, GFP_HIGHUSER, "wc"); | 793 | ttm_page_pool_init_locked(&_manager.wc_pool, GFP_HIGHUSER, "wc"); |
| 784 | 794 | ||
| @@ -813,7 +823,7 @@ void ttm_page_alloc_fini() | |||
| 813 | if (atomic_sub_return(1, &_manager.page_alloc_inited) > 0) | 823 | if (atomic_sub_return(1, &_manager.page_alloc_inited) > 0) |
| 814 | return; | 824 | return; |
| 815 | 825 | ||
| 816 | printk(KERN_INFO "[ttm] Finilizing pool allocator.\n"); | 826 | printk(KERN_INFO TTM_PFX "Finalizing pool allocator.\n"); |
| 817 | ttm_pool_mm_shrink_fini(&_manager); | 827 | ttm_pool_mm_shrink_fini(&_manager); |
| 818 | 828 | ||
| 819 | for (i = 0; i < NUM_POOLS; ++i) | 829 | for (i = 0; i < NUM_POOLS; ++i) |
diff --git a/drivers/gpu/drm/vmwgfx/Makefile b/drivers/gpu/drm/vmwgfx/Makefile index 1a3cb6816d1c..4505e17df3f5 100644 --- a/drivers/gpu/drm/vmwgfx/Makefile +++ b/drivers/gpu/drm/vmwgfx/Makefile | |||
| @@ -4,6 +4,6 @@ ccflags-y := -Iinclude/drm | |||
| 4 | vmwgfx-y := vmwgfx_execbuf.o vmwgfx_gmr.o vmwgfx_kms.o vmwgfx_drv.o \ | 4 | vmwgfx-y := vmwgfx_execbuf.o vmwgfx_gmr.o vmwgfx_kms.o vmwgfx_drv.o \ |
| 5 | vmwgfx_fb.o vmwgfx_ioctl.o vmwgfx_resource.o vmwgfx_buffer.o \ | 5 | vmwgfx_fb.o vmwgfx_ioctl.o vmwgfx_resource.o vmwgfx_buffer.o \ |
| 6 | vmwgfx_fifo.o vmwgfx_irq.o vmwgfx_ldu.o vmwgfx_ttm_glue.o \ | 6 | vmwgfx_fifo.o vmwgfx_irq.o vmwgfx_ldu.o vmwgfx_ttm_glue.o \ |
| 7 | vmwgfx_overlay.o | 7 | vmwgfx_overlay.o vmwgfx_fence.o |
| 8 | 8 | ||
| 9 | obj-$(CONFIG_DRM_VMWGFX) := vmwgfx.o | 9 | obj-$(CONFIG_DRM_VMWGFX) := vmwgfx.o |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c index 0c9c0811f42d..b793c8c9acb3 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c | |||
| @@ -88,6 +88,9 @@ | |||
| 88 | #define DRM_IOCTL_VMW_FENCE_WAIT \ | 88 | #define DRM_IOCTL_VMW_FENCE_WAIT \ |
| 89 | DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_FENCE_WAIT, \ | 89 | DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_FENCE_WAIT, \ |
| 90 | struct drm_vmw_fence_wait_arg) | 90 | struct drm_vmw_fence_wait_arg) |
| 91 | #define DRM_IOCTL_VMW_UPDATE_LAYOUT \ | ||
| 92 | DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_UPDATE_LAYOUT, \ | ||
| 93 | struct drm_vmw_update_layout_arg) | ||
| 91 | 94 | ||
| 92 | 95 | ||
| 93 | /** | 96 | /** |
| @@ -135,7 +138,9 @@ static struct drm_ioctl_desc vmw_ioctls[] = { | |||
| 135 | VMW_IOCTL_DEF(DRM_IOCTL_VMW_FIFO_DEBUG, vmw_fifo_debug_ioctl, | 138 | VMW_IOCTL_DEF(DRM_IOCTL_VMW_FIFO_DEBUG, vmw_fifo_debug_ioctl, |
| 136 | DRM_AUTH | DRM_ROOT_ONLY | DRM_MASTER | DRM_UNLOCKED), | 139 | DRM_AUTH | DRM_ROOT_ONLY | DRM_MASTER | DRM_UNLOCKED), |
| 137 | VMW_IOCTL_DEF(DRM_IOCTL_VMW_FENCE_WAIT, vmw_fence_wait_ioctl, | 140 | VMW_IOCTL_DEF(DRM_IOCTL_VMW_FENCE_WAIT, vmw_fence_wait_ioctl, |
| 138 | DRM_AUTH | DRM_UNLOCKED) | 141 | DRM_AUTH | DRM_UNLOCKED), |
| 142 | VMW_IOCTL_DEF(DRM_IOCTL_VMW_UPDATE_LAYOUT, vmw_kms_update_layout_ioctl, | ||
| 143 | DRM_MASTER | DRM_CONTROL_ALLOW | DRM_UNLOCKED) | ||
| 139 | }; | 144 | }; |
| 140 | 145 | ||
| 141 | static struct pci_device_id vmw_pci_id_list[] = { | 146 | static struct pci_device_id vmw_pci_id_list[] = { |
| @@ -318,6 +323,15 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset) | |||
| 318 | goto out_err3; | 323 | goto out_err3; |
| 319 | } | 324 | } |
| 320 | 325 | ||
| 326 | /* Need mmio memory to check for fifo pitchlock cap. */ | ||
| 327 | if (!(dev_priv->capabilities & SVGA_CAP_DISPLAY_TOPOLOGY) && | ||
| 328 | !(dev_priv->capabilities & SVGA_CAP_PITCHLOCK) && | ||
| 329 | !vmw_fifo_have_pitchlock(dev_priv)) { | ||
| 330 | ret = -ENOSYS; | ||
| 331 | DRM_ERROR("Hardware has no pitchlock\n"); | ||
| 332 | goto out_err4; | ||
| 333 | } | ||
| 334 | |||
| 321 | dev_priv->tdev = ttm_object_device_init | 335 | dev_priv->tdev = ttm_object_device_init |
| 322 | (dev_priv->mem_global_ref.object, 12); | 336 | (dev_priv->mem_global_ref.object, 12); |
| 323 | 337 | ||
| @@ -399,8 +413,6 @@ static int vmw_driver_unload(struct drm_device *dev) | |||
| 399 | { | 413 | { |
| 400 | struct vmw_private *dev_priv = vmw_priv(dev); | 414 | struct vmw_private *dev_priv = vmw_priv(dev); |
| 401 | 415 | ||
| 402 | DRM_INFO(VMWGFX_DRIVER_NAME " unload.\n"); | ||
| 403 | |||
| 404 | unregister_pm_notifier(&dev_priv->pm_nb); | 416 | unregister_pm_notifier(&dev_priv->pm_nb); |
| 405 | 417 | ||
| 406 | vmw_fb_close(dev_priv); | 418 | vmw_fb_close(dev_priv); |
| @@ -546,7 +558,6 @@ static int vmw_master_create(struct drm_device *dev, | |||
| 546 | { | 558 | { |
| 547 | struct vmw_master *vmaster; | 559 | struct vmw_master *vmaster; |
| 548 | 560 | ||
| 549 | DRM_INFO("Master create.\n"); | ||
| 550 | vmaster = kzalloc(sizeof(*vmaster), GFP_KERNEL); | 561 | vmaster = kzalloc(sizeof(*vmaster), GFP_KERNEL); |
| 551 | if (unlikely(vmaster == NULL)) | 562 | if (unlikely(vmaster == NULL)) |
| 552 | return -ENOMEM; | 563 | return -ENOMEM; |
| @@ -563,7 +574,6 @@ static void vmw_master_destroy(struct drm_device *dev, | |||
| 563 | { | 574 | { |
| 564 | struct vmw_master *vmaster = vmw_master(master); | 575 | struct vmw_master *vmaster = vmw_master(master); |
| 565 | 576 | ||
| 566 | DRM_INFO("Master destroy.\n"); | ||
| 567 | master->driver_priv = NULL; | 577 | master->driver_priv = NULL; |
| 568 | kfree(vmaster); | 578 | kfree(vmaster); |
| 569 | } | 579 | } |
| @@ -579,8 +589,6 @@ static int vmw_master_set(struct drm_device *dev, | |||
| 579 | struct vmw_master *vmaster = vmw_master(file_priv->master); | 589 | struct vmw_master *vmaster = vmw_master(file_priv->master); |
| 580 | int ret = 0; | 590 | int ret = 0; |
| 581 | 591 | ||
| 582 | DRM_INFO("Master set.\n"); | ||
| 583 | |||
| 584 | if (active) { | 592 | if (active) { |
| 585 | BUG_ON(active != &dev_priv->fbdev_master); | 593 | BUG_ON(active != &dev_priv->fbdev_master); |
| 586 | ret = ttm_vt_lock(&active->lock, false, vmw_fp->tfile); | 594 | ret = ttm_vt_lock(&active->lock, false, vmw_fp->tfile); |
| @@ -622,8 +630,6 @@ static void vmw_master_drop(struct drm_device *dev, | |||
| 622 | struct vmw_master *vmaster = vmw_master(file_priv->master); | 630 | struct vmw_master *vmaster = vmw_master(file_priv->master); |
| 623 | int ret; | 631 | int ret; |
| 624 | 632 | ||
| 625 | DRM_INFO("Master drop.\n"); | ||
| 626 | |||
| 627 | /** | 633 | /** |
| 628 | * Make sure the master doesn't disappear while we have | 634 | * Make sure the master doesn't disappear while we have |
| 629 | * it locked. | 635 | * it locked. |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h index 356dc935ec13..eaad52095339 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h | |||
| @@ -41,12 +41,13 @@ | |||
| 41 | 41 | ||
| 42 | #define VMWGFX_DRIVER_DATE "20100209" | 42 | #define VMWGFX_DRIVER_DATE "20100209" |
| 43 | #define VMWGFX_DRIVER_MAJOR 1 | 43 | #define VMWGFX_DRIVER_MAJOR 1 |
| 44 | #define VMWGFX_DRIVER_MINOR 0 | 44 | #define VMWGFX_DRIVER_MINOR 2 |
| 45 | #define VMWGFX_DRIVER_PATCHLEVEL 0 | 45 | #define VMWGFX_DRIVER_PATCHLEVEL 0 |
| 46 | #define VMWGFX_FILE_PAGE_OFFSET 0x00100000 | 46 | #define VMWGFX_FILE_PAGE_OFFSET 0x00100000 |
| 47 | #define VMWGFX_FIFO_STATIC_SIZE (1024*1024) | 47 | #define VMWGFX_FIFO_STATIC_SIZE (1024*1024) |
| 48 | #define VMWGFX_MAX_RELOCATIONS 2048 | 48 | #define VMWGFX_MAX_RELOCATIONS 2048 |
| 49 | #define VMWGFX_MAX_GMRS 2048 | 49 | #define VMWGFX_MAX_GMRS 2048 |
| 50 | #define VMWGFX_MAX_DISPLAYS 16 | ||
| 50 | 51 | ||
| 51 | struct vmw_fpriv { | 52 | struct vmw_fpriv { |
| 52 | struct drm_master *locked_master; | 53 | struct drm_master *locked_master; |
| @@ -102,6 +103,13 @@ struct vmw_surface { | |||
| 102 | struct vmw_cursor_snooper snooper; | 103 | struct vmw_cursor_snooper snooper; |
| 103 | }; | 104 | }; |
| 104 | 105 | ||
| 106 | struct vmw_fence_queue { | ||
| 107 | struct list_head head; | ||
| 108 | struct timespec lag; | ||
| 109 | struct timespec lag_time; | ||
| 110 | spinlock_t lock; | ||
| 111 | }; | ||
| 112 | |||
| 105 | struct vmw_fifo_state { | 113 | struct vmw_fifo_state { |
| 106 | unsigned long reserved_size; | 114 | unsigned long reserved_size; |
| 107 | __le32 *dynamic_buffer; | 115 | __le32 *dynamic_buffer; |
| @@ -115,6 +123,7 @@ struct vmw_fifo_state { | |||
| 115 | uint32_t capabilities; | 123 | uint32_t capabilities; |
| 116 | struct mutex fifo_mutex; | 124 | struct mutex fifo_mutex; |
| 117 | struct rw_semaphore rwsem; | 125 | struct rw_semaphore rwsem; |
| 126 | struct vmw_fence_queue fence_queue; | ||
| 118 | }; | 127 | }; |
| 119 | 128 | ||
| 120 | struct vmw_relocation { | 129 | struct vmw_relocation { |
| @@ -144,6 +153,14 @@ struct vmw_master { | |||
| 144 | struct ttm_lock lock; | 153 | struct ttm_lock lock; |
| 145 | }; | 154 | }; |
| 146 | 155 | ||
| 156 | struct vmw_vga_topology_state { | ||
| 157 | uint32_t width; | ||
| 158 | uint32_t height; | ||
| 159 | uint32_t primary; | ||
| 160 | uint32_t pos_x; | ||
| 161 | uint32_t pos_y; | ||
| 162 | }; | ||
| 163 | |||
| 147 | struct vmw_private { | 164 | struct vmw_private { |
| 148 | struct ttm_bo_device bdev; | 165 | struct ttm_bo_device bdev; |
| 149 | struct ttm_bo_global_ref bo_global_ref; | 166 | struct ttm_bo_global_ref bo_global_ref; |
| @@ -171,14 +188,19 @@ struct vmw_private { | |||
| 171 | * VGA registers. | 188 | * VGA registers. |
| 172 | */ | 189 | */ |
| 173 | 190 | ||
| 191 | struct vmw_vga_topology_state vga_save[VMWGFX_MAX_DISPLAYS]; | ||
| 174 | uint32_t vga_width; | 192 | uint32_t vga_width; |
| 175 | uint32_t vga_height; | 193 | uint32_t vga_height; |
| 176 | uint32_t vga_depth; | 194 | uint32_t vga_depth; |
| 177 | uint32_t vga_bpp; | 195 | uint32_t vga_bpp; |
| 178 | uint32_t vga_pseudo; | 196 | uint32_t vga_pseudo; |
| 179 | uint32_t vga_red_mask; | 197 | uint32_t vga_red_mask; |
| 180 | uint32_t vga_blue_mask; | ||
| 181 | uint32_t vga_green_mask; | 198 | uint32_t vga_green_mask; |
| 199 | uint32_t vga_blue_mask; | ||
| 200 | uint32_t vga_bpl; | ||
| 201 | uint32_t vga_pitchlock; | ||
| 202 | |||
| 203 | uint32_t num_displays; | ||
| 182 | 204 | ||
| 183 | /* | 205 | /* |
| 184 | * Framebuffer info. | 206 | * Framebuffer info. |
| @@ -393,6 +415,7 @@ extern int vmw_fifo_send_fence(struct vmw_private *dev_priv, | |||
| 393 | extern void vmw_fifo_ping_host(struct vmw_private *dev_priv, uint32_t reason); | 415 | extern void vmw_fifo_ping_host(struct vmw_private *dev_priv, uint32_t reason); |
| 394 | extern int vmw_fifo_mmap(struct file *filp, struct vm_area_struct *vma); | 416 | extern int vmw_fifo_mmap(struct file *filp, struct vm_area_struct *vma); |
| 395 | extern bool vmw_fifo_have_3d(struct vmw_private *dev_priv); | 417 | extern bool vmw_fifo_have_3d(struct vmw_private *dev_priv); |
| 418 | extern bool vmw_fifo_have_pitchlock(struct vmw_private *dev_priv); | ||
| 396 | 419 | ||
| 397 | /** | 420 | /** |
| 398 | * TTM glue - vmwgfx_ttm_glue.c | 421 | * TTM glue - vmwgfx_ttm_glue.c |
| @@ -441,6 +464,23 @@ extern int vmw_fallback_wait(struct vmw_private *dev_priv, | |||
| 441 | uint32_t sequence, | 464 | uint32_t sequence, |
| 442 | bool interruptible, | 465 | bool interruptible, |
| 443 | unsigned long timeout); | 466 | unsigned long timeout); |
| 467 | extern void vmw_update_sequence(struct vmw_private *dev_priv, | ||
| 468 | struct vmw_fifo_state *fifo_state); | ||
| 469 | |||
| 470 | |||
| 471 | /** | ||
| 472 | * Rudimentary fence objects currently used only for throttling - | ||
| 473 | * vmwgfx_fence.c | ||
| 474 | */ | ||
| 475 | |||
| 476 | extern void vmw_fence_queue_init(struct vmw_fence_queue *queue); | ||
| 477 | extern void vmw_fence_queue_takedown(struct vmw_fence_queue *queue); | ||
| 478 | extern int vmw_fence_push(struct vmw_fence_queue *queue, | ||
| 479 | uint32_t sequence); | ||
| 480 | extern int vmw_fence_pull(struct vmw_fence_queue *queue, | ||
| 481 | uint32_t signaled_sequence); | ||
| 482 | extern int vmw_wait_lag(struct vmw_private *dev_priv, | ||
| 483 | struct vmw_fence_queue *queue, uint32_t us); | ||
| 444 | 484 | ||
| 445 | /** | 485 | /** |
| 446 | * Kernel framebuffer - vmwgfx_fb.c | 486 | * Kernel framebuffer - vmwgfx_fb.c |
| @@ -466,6 +506,11 @@ void vmw_kms_cursor_snoop(struct vmw_surface *srf, | |||
| 466 | struct ttm_object_file *tfile, | 506 | struct ttm_object_file *tfile, |
| 467 | struct ttm_buffer_object *bo, | 507 | struct ttm_buffer_object *bo, |
| 468 | SVGA3dCmdHeader *header); | 508 | SVGA3dCmdHeader *header); |
| 509 | void vmw_kms_write_svga(struct vmw_private *vmw_priv, | ||
| 510 | unsigned width, unsigned height, unsigned pitch, | ||
| 511 | unsigned bbp, unsigned depth); | ||
| 512 | int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data, | ||
| 513 | struct drm_file *file_priv); | ||
| 469 | 514 | ||
| 470 | /** | 515 | /** |
| 471 | * Overlay control - vmwgfx_overlay.c | 516 | * Overlay control - vmwgfx_overlay.c |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c index dbd36b8910cf..bdd67cf83315 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c | |||
| @@ -669,6 +669,15 @@ int vmw_execbuf_ioctl(struct drm_device *dev, void *data, | |||
| 669 | goto out_err; | 669 | goto out_err; |
| 670 | 670 | ||
| 671 | vmw_apply_relocations(sw_context); | 671 | vmw_apply_relocations(sw_context); |
| 672 | |||
| 673 | if (arg->throttle_us) { | ||
| 674 | ret = vmw_wait_lag(dev_priv, &dev_priv->fifo.fence_queue, | ||
| 675 | arg->throttle_us); | ||
| 676 | |||
| 677 | if (unlikely(ret != 0)) | ||
| 678 | goto out_err; | ||
| 679 | } | ||
| 680 | |||
| 672 | vmw_fifo_commit(dev_priv, arg->command_size); | 681 | vmw_fifo_commit(dev_priv, arg->command_size); |
| 673 | 682 | ||
| 674 | ret = vmw_fifo_send_fence(dev_priv, &sequence); | 683 | ret = vmw_fifo_send_fence(dev_priv, &sequence); |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c index 7421aaad8d09..b0866f04ec76 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c | |||
| @@ -132,16 +132,14 @@ static int vmw_fb_check_var(struct fb_var_screeninfo *var, | |||
| 132 | return -EINVAL; | 132 | return -EINVAL; |
| 133 | } | 133 | } |
| 134 | 134 | ||
| 135 | /* without multimon its hard to resize */ | 135 | if (!(vmw_priv->capabilities & SVGA_CAP_DISPLAY_TOPOLOGY) && |
| 136 | if (!(vmw_priv->capabilities & SVGA_CAP_MULTIMON) && | 136 | (var->xoffset != 0 || var->yoffset != 0)) { |
| 137 | (var->xres != par->max_width || | 137 | DRM_ERROR("Can not handle panning without display topology\n"); |
| 138 | var->yres != par->max_height)) { | ||
| 139 | DRM_ERROR("Tried to resize, but we don't have multimon\n"); | ||
| 140 | return -EINVAL; | 138 | return -EINVAL; |
| 141 | } | 139 | } |
| 142 | 140 | ||
| 143 | if (var->xres > par->max_width || | 141 | if ((var->xoffset + var->xres) > par->max_width || |
| 144 | var->yres > par->max_height) { | 142 | (var->yoffset + var->yres) > par->max_height) { |
| 145 | DRM_ERROR("Requested geom can not fit in framebuffer\n"); | 143 | DRM_ERROR("Requested geom can not fit in framebuffer\n"); |
| 146 | return -EINVAL; | 144 | return -EINVAL; |
| 147 | } | 145 | } |
| @@ -154,27 +152,11 @@ static int vmw_fb_set_par(struct fb_info *info) | |||
| 154 | struct vmw_fb_par *par = info->par; | 152 | struct vmw_fb_par *par = info->par; |
| 155 | struct vmw_private *vmw_priv = par->vmw_priv; | 153 | struct vmw_private *vmw_priv = par->vmw_priv; |
| 156 | 154 | ||
| 157 | if (vmw_priv->capabilities & SVGA_CAP_MULTIMON) { | 155 | vmw_kms_write_svga(vmw_priv, info->var.xres, info->var.yres, |
| 158 | vmw_write(vmw_priv, SVGA_REG_NUM_GUEST_DISPLAYS, 1); | 156 | info->fix.line_length, |
| 159 | vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, 0); | 157 | par->bpp, par->depth); |
| 160 | vmw_write(vmw_priv, SVGA_REG_DISPLAY_IS_PRIMARY, true); | 158 | if (vmw_priv->capabilities & SVGA_CAP_DISPLAY_TOPOLOGY) { |
| 161 | vmw_write(vmw_priv, SVGA_REG_DISPLAY_POSITION_X, 0); | ||
| 162 | vmw_write(vmw_priv, SVGA_REG_DISPLAY_POSITION_Y, 0); | ||
| 163 | vmw_write(vmw_priv, SVGA_REG_DISPLAY_WIDTH, 0); | ||
| 164 | vmw_write(vmw_priv, SVGA_REG_DISPLAY_HEIGHT, 0); | ||
| 165 | vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, SVGA_ID_INVALID); | ||
| 166 | |||
| 167 | vmw_write(vmw_priv, SVGA_REG_ENABLE, 1); | ||
| 168 | vmw_write(vmw_priv, SVGA_REG_WIDTH, par->max_width); | ||
| 169 | vmw_write(vmw_priv, SVGA_REG_HEIGHT, par->max_height); | ||
| 170 | vmw_write(vmw_priv, SVGA_REG_BITS_PER_PIXEL, par->bpp); | ||
| 171 | vmw_write(vmw_priv, SVGA_REG_DEPTH, par->depth); | ||
| 172 | vmw_write(vmw_priv, SVGA_REG_RED_MASK, 0x00ff0000); | ||
| 173 | vmw_write(vmw_priv, SVGA_REG_GREEN_MASK, 0x0000ff00); | ||
| 174 | vmw_write(vmw_priv, SVGA_REG_BLUE_MASK, 0x000000ff); | ||
| 175 | |||
| 176 | /* TODO check if pitch and offset changes */ | 159 | /* TODO check if pitch and offset changes */ |
| 177 | |||
| 178 | vmw_write(vmw_priv, SVGA_REG_NUM_GUEST_DISPLAYS, 1); | 160 | vmw_write(vmw_priv, SVGA_REG_NUM_GUEST_DISPLAYS, 1); |
| 179 | vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, 0); | 161 | vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, 0); |
| 180 | vmw_write(vmw_priv, SVGA_REG_DISPLAY_IS_PRIMARY, true); | 162 | vmw_write(vmw_priv, SVGA_REG_DISPLAY_IS_PRIMARY, true); |
| @@ -183,13 +165,13 @@ static int vmw_fb_set_par(struct fb_info *info) | |||
| 183 | vmw_write(vmw_priv, SVGA_REG_DISPLAY_WIDTH, info->var.xres); | 165 | vmw_write(vmw_priv, SVGA_REG_DISPLAY_WIDTH, info->var.xres); |
| 184 | vmw_write(vmw_priv, SVGA_REG_DISPLAY_HEIGHT, info->var.yres); | 166 | vmw_write(vmw_priv, SVGA_REG_DISPLAY_HEIGHT, info->var.yres); |
| 185 | vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, SVGA_ID_INVALID); | 167 | vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, SVGA_ID_INVALID); |
| 186 | } else { | ||
| 187 | vmw_write(vmw_priv, SVGA_REG_WIDTH, info->var.xres); | ||
| 188 | vmw_write(vmw_priv, SVGA_REG_HEIGHT, info->var.yres); | ||
| 189 | |||
| 190 | /* TODO check if pitch and offset changes */ | ||
| 191 | } | 168 | } |
| 192 | 169 | ||
| 170 | /* This is really helpful since if this fails the user | ||
| 171 | * can probably not see anything on the screen. | ||
| 172 | */ | ||
| 173 | WARN_ON(vmw_read(vmw_priv, SVGA_REG_FB_OFFSET) != 0); | ||
| 174 | |||
| 193 | return 0; | 175 | return 0; |
| 194 | } | 176 | } |
| 195 | 177 | ||
| @@ -416,48 +398,23 @@ int vmw_fb_init(struct vmw_private *vmw_priv) | |||
| 416 | unsigned fb_bbp, fb_depth, fb_offset, fb_pitch, fb_size; | 398 | unsigned fb_bbp, fb_depth, fb_offset, fb_pitch, fb_size; |
| 417 | int ret; | 399 | int ret; |
| 418 | 400 | ||
| 401 | /* XXX These shouldn't be hardcoded. */ | ||
| 419 | initial_width = 800; | 402 | initial_width = 800; |
| 420 | initial_height = 600; | 403 | initial_height = 600; |
| 421 | 404 | ||
| 422 | fb_bbp = 32; | 405 | fb_bbp = 32; |
| 423 | fb_depth = 24; | 406 | fb_depth = 24; |
| 424 | 407 | ||
| 425 | if (vmw_priv->capabilities & SVGA_CAP_MULTIMON) { | 408 | /* XXX As shouldn't these be as well. */ |
| 426 | fb_width = min(vmw_priv->fb_max_width, (unsigned)2048); | 409 | fb_width = min(vmw_priv->fb_max_width, (unsigned)2048); |
| 427 | fb_height = min(vmw_priv->fb_max_height, (unsigned)2048); | 410 | fb_height = min(vmw_priv->fb_max_height, (unsigned)2048); |
| 428 | } else { | ||
| 429 | fb_width = min(vmw_priv->fb_max_width, initial_width); | ||
| 430 | fb_height = min(vmw_priv->fb_max_height, initial_height); | ||
| 431 | } | ||
| 432 | 411 | ||
| 433 | initial_width = min(fb_width, initial_width); | 412 | initial_width = min(fb_width, initial_width); |
| 434 | initial_height = min(fb_height, initial_height); | 413 | initial_height = min(fb_height, initial_height); |
| 435 | 414 | ||
| 436 | vmw_write(vmw_priv, SVGA_REG_WIDTH, fb_width); | 415 | fb_pitch = fb_width * fb_bbp / 8; |
| 437 | vmw_write(vmw_priv, SVGA_REG_HEIGHT, fb_height); | 416 | fb_size = fb_pitch * fb_height; |
| 438 | vmw_write(vmw_priv, SVGA_REG_BITS_PER_PIXEL, fb_bbp); | ||
| 439 | vmw_write(vmw_priv, SVGA_REG_DEPTH, fb_depth); | ||
| 440 | vmw_write(vmw_priv, SVGA_REG_RED_MASK, 0x00ff0000); | ||
| 441 | vmw_write(vmw_priv, SVGA_REG_GREEN_MASK, 0x0000ff00); | ||
| 442 | vmw_write(vmw_priv, SVGA_REG_BLUE_MASK, 0x000000ff); | ||
| 443 | |||
| 444 | fb_size = vmw_read(vmw_priv, SVGA_REG_FB_SIZE); | ||
| 445 | fb_offset = vmw_read(vmw_priv, SVGA_REG_FB_OFFSET); | 417 | fb_offset = vmw_read(vmw_priv, SVGA_REG_FB_OFFSET); |
| 446 | fb_pitch = vmw_read(vmw_priv, SVGA_REG_BYTES_PER_LINE); | ||
| 447 | |||
| 448 | DRM_DEBUG("width %u\n", vmw_read(vmw_priv, SVGA_REG_MAX_WIDTH)); | ||
| 449 | DRM_DEBUG("height %u\n", vmw_read(vmw_priv, SVGA_REG_MAX_HEIGHT)); | ||
| 450 | DRM_DEBUG("width %u\n", vmw_read(vmw_priv, SVGA_REG_WIDTH)); | ||
| 451 | DRM_DEBUG("height %u\n", vmw_read(vmw_priv, SVGA_REG_HEIGHT)); | ||
| 452 | DRM_DEBUG("bpp %u\n", vmw_read(vmw_priv, SVGA_REG_BITS_PER_PIXEL)); | ||
| 453 | DRM_DEBUG("depth %u\n", vmw_read(vmw_priv, SVGA_REG_DEPTH)); | ||
| 454 | DRM_DEBUG("bpl %u\n", vmw_read(vmw_priv, SVGA_REG_BYTES_PER_LINE)); | ||
| 455 | DRM_DEBUG("r mask %08x\n", vmw_read(vmw_priv, SVGA_REG_RED_MASK)); | ||
| 456 | DRM_DEBUG("g mask %08x\n", vmw_read(vmw_priv, SVGA_REG_GREEN_MASK)); | ||
| 457 | DRM_DEBUG("b mask %08x\n", vmw_read(vmw_priv, SVGA_REG_BLUE_MASK)); | ||
| 458 | DRM_DEBUG("fb_offset 0x%08x\n", fb_offset); | ||
| 459 | DRM_DEBUG("fb_pitch %u\n", fb_pitch); | ||
| 460 | DRM_DEBUG("fb_size %u kiB\n", fb_size / 1024); | ||
| 461 | 418 | ||
| 462 | info = framebuffer_alloc(sizeof(*par), device); | 419 | info = framebuffer_alloc(sizeof(*par), device); |
| 463 | if (!info) | 420 | if (!info) |
| @@ -659,6 +616,10 @@ int vmw_dmabuf_to_start_of_vram(struct vmw_private *vmw_priv, | |||
| 659 | goto err_unlock; | 616 | goto err_unlock; |
| 660 | 617 | ||
| 661 | ret = ttm_bo_validate(bo, &ne_placement, false, false, false); | 618 | ret = ttm_bo_validate(bo, &ne_placement, false, false, false); |
| 619 | |||
| 620 | /* Could probably bug on */ | ||
| 621 | WARN_ON(bo->offset != 0); | ||
| 622 | |||
| 662 | ttm_bo_unreserve(bo); | 623 | ttm_bo_unreserve(bo); |
| 663 | err_unlock: | 624 | err_unlock: |
| 664 | ttm_write_unlock(&vmw_priv->active_master->lock); | 625 | ttm_write_unlock(&vmw_priv->active_master->lock); |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c new file mode 100644 index 000000000000..61eacc1b5ca3 --- /dev/null +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c | |||
| @@ -0,0 +1,173 @@ | |||
| 1 | /************************************************************************** | ||
| 2 | * | ||
| 3 | * Copyright (C) 2010 VMware, Inc., Palo Alto, CA., USA | ||
| 4 | * All Rights Reserved. | ||
| 5 | * | ||
| 6 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
| 7 | * copy of this software and associated documentation files (the | ||
| 8 | * "Software"), to deal in the Software without restriction, including | ||
| 9 | * without limitation the rights to use, copy, modify, merge, publish, | ||
| 10 | * distribute, sub license, and/or sell copies of the Software, and to | ||
| 11 | * permit persons to whom the Software is furnished to do so, subject to | ||
| 12 | * the following conditions: | ||
| 13 | * | ||
| 14 | * The above copyright notice and this permission notice (including the | ||
| 15 | * next paragraph) shall be included in all copies or substantial portions | ||
| 16 | * of the Software. | ||
| 17 | * | ||
| 18 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
| 19 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
| 20 | * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL | ||
| 21 | * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, | ||
| 22 | * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR | ||
| 23 | * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE | ||
| 24 | * USE OR OTHER DEALINGS IN THE SOFTWARE. | ||
| 25 | * | ||
| 26 | **************************************************************************/ | ||
| 27 | |||
| 28 | |||
| 29 | #include "vmwgfx_drv.h" | ||
| 30 | |||
| 31 | struct vmw_fence { | ||
| 32 | struct list_head head; | ||
| 33 | uint32_t sequence; | ||
| 34 | struct timespec submitted; | ||
| 35 | }; | ||
| 36 | |||
| 37 | void vmw_fence_queue_init(struct vmw_fence_queue *queue) | ||
| 38 | { | ||
| 39 | INIT_LIST_HEAD(&queue->head); | ||
| 40 | queue->lag = ns_to_timespec(0); | ||
| 41 | getrawmonotonic(&queue->lag_time); | ||
| 42 | spin_lock_init(&queue->lock); | ||
| 43 | } | ||
| 44 | |||
| 45 | void vmw_fence_queue_takedown(struct vmw_fence_queue *queue) | ||
| 46 | { | ||
| 47 | struct vmw_fence *fence, *next; | ||
| 48 | |||
| 49 | spin_lock(&queue->lock); | ||
| 50 | list_for_each_entry_safe(fence, next, &queue->head, head) { | ||
| 51 | kfree(fence); | ||
| 52 | } | ||
| 53 | spin_unlock(&queue->lock); | ||
| 54 | } | ||
| 55 | |||
| 56 | int vmw_fence_push(struct vmw_fence_queue *queue, | ||
| 57 | uint32_t sequence) | ||
| 58 | { | ||
| 59 | struct vmw_fence *fence = kmalloc(sizeof(*fence), GFP_KERNEL); | ||
| 60 | |||
| 61 | if (unlikely(!fence)) | ||
| 62 | return -ENOMEM; | ||
| 63 | |||
| 64 | fence->sequence = sequence; | ||
| 65 | getrawmonotonic(&fence->submitted); | ||
| 66 | spin_lock(&queue->lock); | ||
| 67 | list_add_tail(&fence->head, &queue->head); | ||
| 68 | spin_unlock(&queue->lock); | ||
| 69 | |||
| 70 | return 0; | ||
| 71 | } | ||
| 72 | |||
| 73 | int vmw_fence_pull(struct vmw_fence_queue *queue, | ||
| 74 | uint32_t signaled_sequence) | ||
| 75 | { | ||
| 76 | struct vmw_fence *fence, *next; | ||
| 77 | struct timespec now; | ||
| 78 | bool updated = false; | ||
| 79 | |||
| 80 | spin_lock(&queue->lock); | ||
| 81 | getrawmonotonic(&now); | ||
| 82 | |||
| 83 | if (list_empty(&queue->head)) { | ||
| 84 | queue->lag = ns_to_timespec(0); | ||
| 85 | queue->lag_time = now; | ||
| 86 | updated = true; | ||
| 87 | goto out_unlock; | ||
| 88 | } | ||
| 89 | |||
| 90 | list_for_each_entry_safe(fence, next, &queue->head, head) { | ||
| 91 | if (signaled_sequence - fence->sequence > (1 << 30)) | ||
| 92 | continue; | ||
| 93 | |||
| 94 | queue->lag = timespec_sub(now, fence->submitted); | ||
| 95 | queue->lag_time = now; | ||
| 96 | updated = true; | ||
| 97 | list_del(&fence->head); | ||
| 98 | kfree(fence); | ||
| 99 | } | ||
| 100 | |||
| 101 | out_unlock: | ||
| 102 | spin_unlock(&queue->lock); | ||
| 103 | |||
| 104 | return (updated) ? 0 : -EBUSY; | ||
| 105 | } | ||
| 106 | |||
| 107 | static struct timespec vmw_timespec_add(struct timespec t1, | ||
| 108 | struct timespec t2) | ||
| 109 | { | ||
| 110 | t1.tv_sec += t2.tv_sec; | ||
| 111 | t1.tv_nsec += t2.tv_nsec; | ||
| 112 | if (t1.tv_nsec >= 1000000000L) { | ||
| 113 | t1.tv_sec += 1; | ||
| 114 | t1.tv_nsec -= 1000000000L; | ||
| 115 | } | ||
| 116 | |||
| 117 | return t1; | ||
| 118 | } | ||
| 119 | |||
| 120 | static struct timespec vmw_fifo_lag(struct vmw_fence_queue *queue) | ||
| 121 | { | ||
| 122 | struct timespec now; | ||
| 123 | |||
| 124 | spin_lock(&queue->lock); | ||
| 125 | getrawmonotonic(&now); | ||
| 126 | queue->lag = vmw_timespec_add(queue->lag, | ||
| 127 | timespec_sub(now, queue->lag_time)); | ||
| 128 | queue->lag_time = now; | ||
| 129 | spin_unlock(&queue->lock); | ||
| 130 | return queue->lag; | ||
| 131 | } | ||
| 132 | |||
| 133 | |||
| 134 | static bool vmw_lag_lt(struct vmw_fence_queue *queue, | ||
| 135 | uint32_t us) | ||
| 136 | { | ||
| 137 | struct timespec lag, cond; | ||
| 138 | |||
| 139 | cond = ns_to_timespec((s64) us * 1000); | ||
| 140 | lag = vmw_fifo_lag(queue); | ||
| 141 | return (timespec_compare(&lag, &cond) < 1); | ||
| 142 | } | ||
| 143 | |||
| 144 | int vmw_wait_lag(struct vmw_private *dev_priv, | ||
| 145 | struct vmw_fence_queue *queue, uint32_t us) | ||
| 146 | { | ||
| 147 | struct vmw_fence *fence; | ||
| 148 | uint32_t sequence; | ||
| 149 | int ret; | ||
| 150 | |||
| 151 | while (!vmw_lag_lt(queue, us)) { | ||
| 152 | spin_lock(&queue->lock); | ||
| 153 | if (list_empty(&queue->head)) | ||
| 154 | sequence = atomic_read(&dev_priv->fence_seq); | ||
| 155 | else { | ||
| 156 | fence = list_first_entry(&queue->head, | ||
| 157 | struct vmw_fence, head); | ||
| 158 | sequence = fence->sequence; | ||
| 159 | } | ||
| 160 | spin_unlock(&queue->lock); | ||
| 161 | |||
| 162 | ret = vmw_wait_fence(dev_priv, false, sequence, true, | ||
| 163 | 3*HZ); | ||
| 164 | |||
| 165 | if (unlikely(ret != 0)) | ||
| 166 | return ret; | ||
| 167 | |||
| 168 | (void) vmw_fence_pull(queue, sequence); | ||
| 169 | } | ||
| 170 | return 0; | ||
| 171 | } | ||
| 172 | |||
| 173 | |||
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c index 39d43a01d846..e6a1eb7ea954 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c | |||
| @@ -34,6 +34,9 @@ bool vmw_fifo_have_3d(struct vmw_private *dev_priv) | |||
| 34 | __le32 __iomem *fifo_mem = dev_priv->mmio_virt; | 34 | __le32 __iomem *fifo_mem = dev_priv->mmio_virt; |
| 35 | uint32_t fifo_min, hwversion; | 35 | uint32_t fifo_min, hwversion; |
| 36 | 36 | ||
| 37 | if (!(dev_priv->capabilities & SVGA_CAP_EXTENDED_FIFO)) | ||
| 38 | return false; | ||
| 39 | |||
| 37 | fifo_min = ioread32(fifo_mem + SVGA_FIFO_MIN); | 40 | fifo_min = ioread32(fifo_mem + SVGA_FIFO_MIN); |
| 38 | if (fifo_min <= SVGA_FIFO_3D_HWVERSION * sizeof(unsigned int)) | 41 | if (fifo_min <= SVGA_FIFO_3D_HWVERSION * sizeof(unsigned int)) |
| 39 | return false; | 42 | return false; |
| @@ -48,6 +51,21 @@ bool vmw_fifo_have_3d(struct vmw_private *dev_priv) | |||
| 48 | return true; | 51 | return true; |
| 49 | } | 52 | } |
| 50 | 53 | ||
| 54 | bool vmw_fifo_have_pitchlock(struct vmw_private *dev_priv) | ||
| 55 | { | ||
| 56 | __le32 __iomem *fifo_mem = dev_priv->mmio_virt; | ||
| 57 | uint32_t caps; | ||
| 58 | |||
| 59 | if (!(dev_priv->capabilities & SVGA_CAP_EXTENDED_FIFO)) | ||
| 60 | return false; | ||
| 61 | |||
| 62 | caps = ioread32(fifo_mem + SVGA_FIFO_CAPABILITIES); | ||
| 63 | if (caps & SVGA_FIFO_CAP_PITCHLOCK) | ||
| 64 | return true; | ||
| 65 | |||
| 66 | return false; | ||
| 67 | } | ||
| 68 | |||
| 51 | int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo) | 69 | int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo) |
| 52 | { | 70 | { |
| 53 | __le32 __iomem *fifo_mem = dev_priv->mmio_virt; | 71 | __le32 __iomem *fifo_mem = dev_priv->mmio_virt; |
| @@ -120,7 +138,7 @@ int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo) | |||
| 120 | 138 | ||
| 121 | atomic_set(&dev_priv->fence_seq, dev_priv->last_read_sequence); | 139 | atomic_set(&dev_priv->fence_seq, dev_priv->last_read_sequence); |
| 122 | iowrite32(dev_priv->last_read_sequence, fifo_mem + SVGA_FIFO_FENCE); | 140 | iowrite32(dev_priv->last_read_sequence, fifo_mem + SVGA_FIFO_FENCE); |
| 123 | 141 | vmw_fence_queue_init(&fifo->fence_queue); | |
| 124 | return vmw_fifo_send_fence(dev_priv, &dummy); | 142 | return vmw_fifo_send_fence(dev_priv, &dummy); |
| 125 | out_err: | 143 | out_err: |
| 126 | vfree(fifo->static_buffer); | 144 | vfree(fifo->static_buffer); |
| @@ -159,6 +177,7 @@ void vmw_fifo_release(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo) | |||
| 159 | dev_priv->enable_state); | 177 | dev_priv->enable_state); |
| 160 | 178 | ||
| 161 | mutex_unlock(&dev_priv->hw_mutex); | 179 | mutex_unlock(&dev_priv->hw_mutex); |
| 180 | vmw_fence_queue_takedown(&fifo->fence_queue); | ||
| 162 | 181 | ||
| 163 | if (likely(fifo->last_buffer != NULL)) { | 182 | if (likely(fifo->last_buffer != NULL)) { |
| 164 | vfree(fifo->last_buffer); | 183 | vfree(fifo->last_buffer); |
| @@ -484,6 +503,8 @@ int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *sequence) | |||
| 484 | fifo_state->last_buffer_add = true; | 503 | fifo_state->last_buffer_add = true; |
| 485 | vmw_fifo_commit(dev_priv, bytes); | 504 | vmw_fifo_commit(dev_priv, bytes); |
| 486 | fifo_state->last_buffer_add = false; | 505 | fifo_state->last_buffer_add = false; |
| 506 | (void) vmw_fence_push(&fifo_state->fence_queue, *sequence); | ||
| 507 | vmw_update_sequence(dev_priv, fifo_state); | ||
| 487 | 508 | ||
| 488 | out_err: | 509 | out_err: |
| 489 | return ret; | 510 | return ret; |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c index 4d7cb5393860..e92298a6a383 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c | |||
| @@ -64,22 +64,33 @@ static bool vmw_fifo_idle(struct vmw_private *dev_priv, uint32_t sequence) | |||
| 64 | return (busy == 0); | 64 | return (busy == 0); |
| 65 | } | 65 | } |
| 66 | 66 | ||
| 67 | void vmw_update_sequence(struct vmw_private *dev_priv, | ||
| 68 | struct vmw_fifo_state *fifo_state) | ||
| 69 | { | ||
| 70 | __le32 __iomem *fifo_mem = dev_priv->mmio_virt; | ||
| 71 | |||
| 72 | uint32_t sequence = ioread32(fifo_mem + SVGA_FIFO_FENCE); | ||
| 73 | |||
| 74 | if (dev_priv->last_read_sequence != sequence) { | ||
| 75 | dev_priv->last_read_sequence = sequence; | ||
| 76 | vmw_fence_pull(&fifo_state->fence_queue, sequence); | ||
| 77 | } | ||
| 78 | } | ||
| 67 | 79 | ||
| 68 | bool vmw_fence_signaled(struct vmw_private *dev_priv, | 80 | bool vmw_fence_signaled(struct vmw_private *dev_priv, |
| 69 | uint32_t sequence) | 81 | uint32_t sequence) |
| 70 | { | 82 | { |
| 71 | __le32 __iomem *fifo_mem = dev_priv->mmio_virt; | ||
| 72 | struct vmw_fifo_state *fifo_state; | 83 | struct vmw_fifo_state *fifo_state; |
| 73 | bool ret; | 84 | bool ret; |
| 74 | 85 | ||
| 75 | if (likely(dev_priv->last_read_sequence - sequence < VMW_FENCE_WRAP)) | 86 | if (likely(dev_priv->last_read_sequence - sequence < VMW_FENCE_WRAP)) |
| 76 | return true; | 87 | return true; |
| 77 | 88 | ||
| 78 | dev_priv->last_read_sequence = ioread32(fifo_mem + SVGA_FIFO_FENCE); | 89 | fifo_state = &dev_priv->fifo; |
| 90 | vmw_update_sequence(dev_priv, fifo_state); | ||
| 79 | if (likely(dev_priv->last_read_sequence - sequence < VMW_FENCE_WRAP)) | 91 | if (likely(dev_priv->last_read_sequence - sequence < VMW_FENCE_WRAP)) |
| 80 | return true; | 92 | return true; |
| 81 | 93 | ||
| 82 | fifo_state = &dev_priv->fifo; | ||
| 83 | if (!(fifo_state->capabilities & SVGA_FIFO_CAP_FENCE) && | 94 | if (!(fifo_state->capabilities & SVGA_FIFO_CAP_FENCE) && |
| 84 | vmw_fifo_idle(dev_priv, sequence)) | 95 | vmw_fifo_idle(dev_priv, sequence)) |
| 85 | return true; | 96 | return true; |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c index bbc7c4c30bc7..f1d626112415 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c | |||
| @@ -30,6 +30,8 @@ | |||
| 30 | /* Might need a hrtimer here? */ | 30 | /* Might need a hrtimer here? */ |
| 31 | #define VMWGFX_PRESENT_RATE ((HZ / 60 > 0) ? HZ / 60 : 1) | 31 | #define VMWGFX_PRESENT_RATE ((HZ / 60 > 0) ? HZ / 60 : 1) |
| 32 | 32 | ||
| 33 | static int vmw_surface_dmabuf_pin(struct vmw_framebuffer *vfb); | ||
| 34 | static int vmw_surface_dmabuf_unpin(struct vmw_framebuffer *vfb); | ||
| 33 | 35 | ||
| 34 | void vmw_display_unit_cleanup(struct vmw_display_unit *du) | 36 | void vmw_display_unit_cleanup(struct vmw_display_unit *du) |
| 35 | { | 37 | { |
| @@ -326,6 +328,7 @@ int vmw_framebuffer_create_handle(struct drm_framebuffer *fb, | |||
| 326 | struct vmw_framebuffer_surface { | 328 | struct vmw_framebuffer_surface { |
| 327 | struct vmw_framebuffer base; | 329 | struct vmw_framebuffer base; |
| 328 | struct vmw_surface *surface; | 330 | struct vmw_surface *surface; |
| 331 | struct vmw_dma_buffer *buffer; | ||
| 329 | struct delayed_work d_work; | 332 | struct delayed_work d_work; |
| 330 | struct mutex work_lock; | 333 | struct mutex work_lock; |
| 331 | bool present_fs; | 334 | bool present_fs; |
| @@ -500,8 +503,8 @@ int vmw_kms_new_framebuffer_surface(struct vmw_private *dev_priv, | |||
| 500 | vfbs->base.base.depth = 24; | 503 | vfbs->base.base.depth = 24; |
| 501 | vfbs->base.base.width = width; | 504 | vfbs->base.base.width = width; |
| 502 | vfbs->base.base.height = height; | 505 | vfbs->base.base.height = height; |
| 503 | vfbs->base.pin = NULL; | 506 | vfbs->base.pin = &vmw_surface_dmabuf_pin; |
| 504 | vfbs->base.unpin = NULL; | 507 | vfbs->base.unpin = &vmw_surface_dmabuf_unpin; |
| 505 | vfbs->surface = surface; | 508 | vfbs->surface = surface; |
| 506 | mutex_init(&vfbs->work_lock); | 509 | mutex_init(&vfbs->work_lock); |
| 507 | INIT_DELAYED_WORK(&vfbs->d_work, &vmw_framebuffer_present_fs_callback); | 510 | INIT_DELAYED_WORK(&vfbs->d_work, &vmw_framebuffer_present_fs_callback); |
| @@ -589,6 +592,40 @@ static struct drm_framebuffer_funcs vmw_framebuffer_dmabuf_funcs = { | |||
| 589 | .create_handle = vmw_framebuffer_create_handle, | 592 | .create_handle = vmw_framebuffer_create_handle, |
| 590 | }; | 593 | }; |
| 591 | 594 | ||
| 595 | static int vmw_surface_dmabuf_pin(struct vmw_framebuffer *vfb) | ||
| 596 | { | ||
| 597 | struct vmw_private *dev_priv = vmw_priv(vfb->base.dev); | ||
| 598 | struct vmw_framebuffer_surface *vfbs = | ||
| 599 | vmw_framebuffer_to_vfbs(&vfb->base); | ||
| 600 | unsigned long size = vfbs->base.base.pitch * vfbs->base.base.height; | ||
| 601 | int ret; | ||
| 602 | |||
| 603 | vfbs->buffer = kzalloc(sizeof(*vfbs->buffer), GFP_KERNEL); | ||
| 604 | if (unlikely(vfbs->buffer == NULL)) | ||
| 605 | return -ENOMEM; | ||
| 606 | |||
| 607 | vmw_overlay_pause_all(dev_priv); | ||
| 608 | ret = vmw_dmabuf_init(dev_priv, vfbs->buffer, size, | ||
| 609 | &vmw_vram_ne_placement, | ||
| 610 | false, &vmw_dmabuf_bo_free); | ||
| 611 | vmw_overlay_resume_all(dev_priv); | ||
| 612 | |||
| 613 | return ret; | ||
| 614 | } | ||
| 615 | |||
| 616 | static int vmw_surface_dmabuf_unpin(struct vmw_framebuffer *vfb) | ||
| 617 | { | ||
| 618 | struct ttm_buffer_object *bo; | ||
| 619 | struct vmw_framebuffer_surface *vfbs = | ||
| 620 | vmw_framebuffer_to_vfbs(&vfb->base); | ||
| 621 | |||
| 622 | bo = &vfbs->buffer->base; | ||
| 623 | ttm_bo_unref(&bo); | ||
| 624 | vfbs->buffer = NULL; | ||
| 625 | |||
| 626 | return 0; | ||
| 627 | } | ||
| 628 | |||
| 592 | static int vmw_framebuffer_dmabuf_pin(struct vmw_framebuffer *vfb) | 629 | static int vmw_framebuffer_dmabuf_pin(struct vmw_framebuffer *vfb) |
| 593 | { | 630 | { |
| 594 | struct vmw_private *dev_priv = vmw_priv(vfb->base.dev); | 631 | struct vmw_private *dev_priv = vmw_priv(vfb->base.dev); |
| @@ -596,33 +633,15 @@ static int vmw_framebuffer_dmabuf_pin(struct vmw_framebuffer *vfb) | |||
| 596 | vmw_framebuffer_to_vfbd(&vfb->base); | 633 | vmw_framebuffer_to_vfbd(&vfb->base); |
| 597 | int ret; | 634 | int ret; |
| 598 | 635 | ||
| 636 | |||
| 599 | vmw_overlay_pause_all(dev_priv); | 637 | vmw_overlay_pause_all(dev_priv); |
| 600 | 638 | ||
| 601 | ret = vmw_dmabuf_to_start_of_vram(dev_priv, vfbd->buffer); | 639 | ret = vmw_dmabuf_to_start_of_vram(dev_priv, vfbd->buffer); |
| 602 | 640 | ||
| 603 | if (dev_priv->capabilities & SVGA_CAP_MULTIMON) { | ||
| 604 | vmw_write(dev_priv, SVGA_REG_NUM_GUEST_DISPLAYS, 1); | ||
| 605 | vmw_write(dev_priv, SVGA_REG_DISPLAY_ID, 0); | ||
| 606 | vmw_write(dev_priv, SVGA_REG_DISPLAY_IS_PRIMARY, true); | ||
| 607 | vmw_write(dev_priv, SVGA_REG_DISPLAY_POSITION_X, 0); | ||
| 608 | vmw_write(dev_priv, SVGA_REG_DISPLAY_POSITION_Y, 0); | ||
| 609 | vmw_write(dev_priv, SVGA_REG_DISPLAY_WIDTH, 0); | ||
| 610 | vmw_write(dev_priv, SVGA_REG_DISPLAY_HEIGHT, 0); | ||
| 611 | vmw_write(dev_priv, SVGA_REG_DISPLAY_ID, SVGA_ID_INVALID); | ||
| 612 | |||
| 613 | vmw_write(dev_priv, SVGA_REG_ENABLE, 1); | ||
| 614 | vmw_write(dev_priv, SVGA_REG_WIDTH, vfb->base.width); | ||
| 615 | vmw_write(dev_priv, SVGA_REG_HEIGHT, vfb->base.height); | ||
| 616 | vmw_write(dev_priv, SVGA_REG_BITS_PER_PIXEL, vfb->base.bits_per_pixel); | ||
| 617 | vmw_write(dev_priv, SVGA_REG_DEPTH, vfb->base.depth); | ||
| 618 | vmw_write(dev_priv, SVGA_REG_RED_MASK, 0x00ff0000); | ||
| 619 | vmw_write(dev_priv, SVGA_REG_GREEN_MASK, 0x0000ff00); | ||
| 620 | vmw_write(dev_priv, SVGA_REG_BLUE_MASK, 0x000000ff); | ||
| 621 | } else | ||
| 622 | WARN_ON(true); | ||
| 623 | |||
| 624 | vmw_overlay_resume_all(dev_priv); | 641 | vmw_overlay_resume_all(dev_priv); |
| 625 | 642 | ||
| 643 | WARN_ON(ret != 0); | ||
| 644 | |||
| 626 | return 0; | 645 | return 0; |
| 627 | } | 646 | } |
| 628 | 647 | ||
| @@ -668,7 +687,7 @@ int vmw_kms_new_framebuffer_dmabuf(struct vmw_private *dev_priv, | |||
| 668 | 687 | ||
| 669 | /* XXX get the first 3 from the surface info */ | 688 | /* XXX get the first 3 from the surface info */ |
| 670 | vfbd->base.base.bits_per_pixel = 32; | 689 | vfbd->base.base.bits_per_pixel = 32; |
| 671 | vfbd->base.base.pitch = width * 32 / 4; | 690 | vfbd->base.base.pitch = width * vfbd->base.base.bits_per_pixel / 8; |
| 672 | vfbd->base.base.depth = 24; | 691 | vfbd->base.base.depth = 24; |
| 673 | vfbd->base.base.width = width; | 692 | vfbd->base.base.width = width; |
| 674 | vfbd->base.base.height = height; | 693 | vfbd->base.base.height = height; |
| @@ -765,8 +784,9 @@ int vmw_kms_init(struct vmw_private *dev_priv) | |||
| 765 | dev->mode_config.funcs = &vmw_kms_funcs; | 784 | dev->mode_config.funcs = &vmw_kms_funcs; |
| 766 | dev->mode_config.min_width = 1; | 785 | dev->mode_config.min_width = 1; |
| 767 | dev->mode_config.min_height = 1; | 786 | dev->mode_config.min_height = 1; |
| 768 | dev->mode_config.max_width = dev_priv->fb_max_width; | 787 | /* assumed largest fb size */ |
| 769 | dev->mode_config.max_height = dev_priv->fb_max_height; | 788 | dev->mode_config.max_width = 8192; |
| 789 | dev->mode_config.max_height = 8192; | ||
| 770 | 790 | ||
| 771 | ret = vmw_kms_init_legacy_display_system(dev_priv); | 791 | ret = vmw_kms_init_legacy_display_system(dev_priv); |
| 772 | 792 | ||
| @@ -826,49 +846,140 @@ out: | |||
| 826 | return ret; | 846 | return ret; |
| 827 | } | 847 | } |
| 828 | 848 | ||
| 849 | void vmw_kms_write_svga(struct vmw_private *vmw_priv, | ||
| 850 | unsigned width, unsigned height, unsigned pitch, | ||
| 851 | unsigned bbp, unsigned depth) | ||
| 852 | { | ||
| 853 | if (vmw_priv->capabilities & SVGA_CAP_PITCHLOCK) | ||
| 854 | vmw_write(vmw_priv, SVGA_REG_PITCHLOCK, pitch); | ||
| 855 | else if (vmw_fifo_have_pitchlock(vmw_priv)) | ||
| 856 | iowrite32(pitch, vmw_priv->mmio_virt + SVGA_FIFO_PITCHLOCK); | ||
| 857 | vmw_write(vmw_priv, SVGA_REG_WIDTH, width); | ||
| 858 | vmw_write(vmw_priv, SVGA_REG_HEIGHT, height); | ||
| 859 | vmw_write(vmw_priv, SVGA_REG_BITS_PER_PIXEL, bbp); | ||
| 860 | vmw_write(vmw_priv, SVGA_REG_DEPTH, depth); | ||
| 861 | vmw_write(vmw_priv, SVGA_REG_RED_MASK, 0x00ff0000); | ||
| 862 | vmw_write(vmw_priv, SVGA_REG_GREEN_MASK, 0x0000ff00); | ||
| 863 | vmw_write(vmw_priv, SVGA_REG_BLUE_MASK, 0x000000ff); | ||
| 864 | } | ||
| 865 | |||
| 829 | int vmw_kms_save_vga(struct vmw_private *vmw_priv) | 866 | int vmw_kms_save_vga(struct vmw_private *vmw_priv) |
| 830 | { | 867 | { |
| 831 | /* | 868 | struct vmw_vga_topology_state *save; |
| 832 | * setup a single multimon monitor with the size | 869 | uint32_t i; |
| 833 | * of 0x0, this stops the UI from resizing when we | ||
| 834 | * change the framebuffer size | ||
| 835 | */ | ||
| 836 | if (vmw_priv->capabilities & SVGA_CAP_MULTIMON) { | ||
| 837 | vmw_write(vmw_priv, SVGA_REG_NUM_GUEST_DISPLAYS, 1); | ||
| 838 | vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, 0); | ||
| 839 | vmw_write(vmw_priv, SVGA_REG_DISPLAY_IS_PRIMARY, true); | ||
| 840 | vmw_write(vmw_priv, SVGA_REG_DISPLAY_POSITION_X, 0); | ||
| 841 | vmw_write(vmw_priv, SVGA_REG_DISPLAY_POSITION_Y, 0); | ||
| 842 | vmw_write(vmw_priv, SVGA_REG_DISPLAY_WIDTH, 0); | ||
| 843 | vmw_write(vmw_priv, SVGA_REG_DISPLAY_HEIGHT, 0); | ||
| 844 | vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, SVGA_ID_INVALID); | ||
| 845 | } | ||
| 846 | 870 | ||
| 847 | vmw_priv->vga_width = vmw_read(vmw_priv, SVGA_REG_WIDTH); | 871 | vmw_priv->vga_width = vmw_read(vmw_priv, SVGA_REG_WIDTH); |
| 848 | vmw_priv->vga_height = vmw_read(vmw_priv, SVGA_REG_HEIGHT); | 872 | vmw_priv->vga_height = vmw_read(vmw_priv, SVGA_REG_HEIGHT); |
| 849 | vmw_priv->vga_bpp = vmw_read(vmw_priv, SVGA_REG_BITS_PER_PIXEL); | ||
| 850 | vmw_priv->vga_depth = vmw_read(vmw_priv, SVGA_REG_DEPTH); | 873 | vmw_priv->vga_depth = vmw_read(vmw_priv, SVGA_REG_DEPTH); |
| 874 | vmw_priv->vga_bpp = vmw_read(vmw_priv, SVGA_REG_BITS_PER_PIXEL); | ||
| 851 | vmw_priv->vga_pseudo = vmw_read(vmw_priv, SVGA_REG_PSEUDOCOLOR); | 875 | vmw_priv->vga_pseudo = vmw_read(vmw_priv, SVGA_REG_PSEUDOCOLOR); |
| 852 | vmw_priv->vga_red_mask = vmw_read(vmw_priv, SVGA_REG_RED_MASK); | 876 | vmw_priv->vga_red_mask = vmw_read(vmw_priv, SVGA_REG_RED_MASK); |
| 853 | vmw_priv->vga_green_mask = vmw_read(vmw_priv, SVGA_REG_GREEN_MASK); | ||
| 854 | vmw_priv->vga_blue_mask = vmw_read(vmw_priv, SVGA_REG_BLUE_MASK); | 877 | vmw_priv->vga_blue_mask = vmw_read(vmw_priv, SVGA_REG_BLUE_MASK); |
| 878 | vmw_priv->vga_green_mask = vmw_read(vmw_priv, SVGA_REG_GREEN_MASK); | ||
| 879 | if (vmw_priv->capabilities & SVGA_CAP_PITCHLOCK) | ||
| 880 | vmw_priv->vga_pitchlock = | ||
| 881 | vmw_read(vmw_priv, SVGA_REG_PITCHLOCK); | ||
| 882 | else if (vmw_fifo_have_pitchlock(vmw_priv)) | ||
| 883 | vmw_priv->vga_pitchlock = ioread32(vmw_priv->mmio_virt + | ||
| 884 | SVGA_FIFO_PITCHLOCK); | ||
| 885 | |||
| 886 | if (!(vmw_priv->capabilities & SVGA_CAP_DISPLAY_TOPOLOGY)) | ||
| 887 | return 0; | ||
| 855 | 888 | ||
| 889 | vmw_priv->num_displays = vmw_read(vmw_priv, | ||
| 890 | SVGA_REG_NUM_GUEST_DISPLAYS); | ||
| 891 | |||
| 892 | for (i = 0; i < vmw_priv->num_displays; ++i) { | ||
| 893 | save = &vmw_priv->vga_save[i]; | ||
| 894 | vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, i); | ||
| 895 | save->primary = vmw_read(vmw_priv, SVGA_REG_DISPLAY_IS_PRIMARY); | ||
| 896 | save->pos_x = vmw_read(vmw_priv, SVGA_REG_DISPLAY_POSITION_X); | ||
| 897 | save->pos_y = vmw_read(vmw_priv, SVGA_REG_DISPLAY_POSITION_Y); | ||
| 898 | save->width = vmw_read(vmw_priv, SVGA_REG_DISPLAY_WIDTH); | ||
| 899 | save->height = vmw_read(vmw_priv, SVGA_REG_DISPLAY_HEIGHT); | ||
| 900 | vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, SVGA_ID_INVALID); | ||
| 901 | } | ||
| 856 | return 0; | 902 | return 0; |
| 857 | } | 903 | } |
| 858 | 904 | ||
| 859 | int vmw_kms_restore_vga(struct vmw_private *vmw_priv) | 905 | int vmw_kms_restore_vga(struct vmw_private *vmw_priv) |
| 860 | { | 906 | { |
| 907 | struct vmw_vga_topology_state *save; | ||
| 908 | uint32_t i; | ||
| 909 | |||
| 861 | vmw_write(vmw_priv, SVGA_REG_WIDTH, vmw_priv->vga_width); | 910 | vmw_write(vmw_priv, SVGA_REG_WIDTH, vmw_priv->vga_width); |
| 862 | vmw_write(vmw_priv, SVGA_REG_HEIGHT, vmw_priv->vga_height); | 911 | vmw_write(vmw_priv, SVGA_REG_HEIGHT, vmw_priv->vga_height); |
| 863 | vmw_write(vmw_priv, SVGA_REG_BITS_PER_PIXEL, vmw_priv->vga_bpp); | ||
| 864 | vmw_write(vmw_priv, SVGA_REG_DEPTH, vmw_priv->vga_depth); | 912 | vmw_write(vmw_priv, SVGA_REG_DEPTH, vmw_priv->vga_depth); |
| 913 | vmw_write(vmw_priv, SVGA_REG_BITS_PER_PIXEL, vmw_priv->vga_bpp); | ||
| 865 | vmw_write(vmw_priv, SVGA_REG_PSEUDOCOLOR, vmw_priv->vga_pseudo); | 914 | vmw_write(vmw_priv, SVGA_REG_PSEUDOCOLOR, vmw_priv->vga_pseudo); |
| 866 | vmw_write(vmw_priv, SVGA_REG_RED_MASK, vmw_priv->vga_red_mask); | 915 | vmw_write(vmw_priv, SVGA_REG_RED_MASK, vmw_priv->vga_red_mask); |
| 867 | vmw_write(vmw_priv, SVGA_REG_GREEN_MASK, vmw_priv->vga_green_mask); | 916 | vmw_write(vmw_priv, SVGA_REG_GREEN_MASK, vmw_priv->vga_green_mask); |
| 868 | vmw_write(vmw_priv, SVGA_REG_BLUE_MASK, vmw_priv->vga_blue_mask); | 917 | vmw_write(vmw_priv, SVGA_REG_BLUE_MASK, vmw_priv->vga_blue_mask); |
| 918 | if (vmw_priv->capabilities & SVGA_CAP_PITCHLOCK) | ||
| 919 | vmw_write(vmw_priv, SVGA_REG_PITCHLOCK, | ||
| 920 | vmw_priv->vga_pitchlock); | ||
| 921 | else if (vmw_fifo_have_pitchlock(vmw_priv)) | ||
| 922 | iowrite32(vmw_priv->vga_pitchlock, | ||
| 923 | vmw_priv->mmio_virt + SVGA_FIFO_PITCHLOCK); | ||
| 924 | |||
| 925 | if (!(vmw_priv->capabilities & SVGA_CAP_DISPLAY_TOPOLOGY)) | ||
| 926 | return 0; | ||
| 869 | 927 | ||
| 870 | /* TODO check for multimon */ | 928 | for (i = 0; i < vmw_priv->num_displays; ++i) { |
| 871 | vmw_write(vmw_priv, SVGA_REG_NUM_GUEST_DISPLAYS, 0); | 929 | save = &vmw_priv->vga_save[i]; |
| 930 | vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, i); | ||
| 931 | vmw_write(vmw_priv, SVGA_REG_DISPLAY_IS_PRIMARY, save->primary); | ||
| 932 | vmw_write(vmw_priv, SVGA_REG_DISPLAY_POSITION_X, save->pos_x); | ||
| 933 | vmw_write(vmw_priv, SVGA_REG_DISPLAY_POSITION_Y, save->pos_y); | ||
| 934 | vmw_write(vmw_priv, SVGA_REG_DISPLAY_WIDTH, save->width); | ||
| 935 | vmw_write(vmw_priv, SVGA_REG_DISPLAY_HEIGHT, save->height); | ||
| 936 | vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, SVGA_ID_INVALID); | ||
| 937 | } | ||
| 872 | 938 | ||
| 873 | return 0; | 939 | return 0; |
| 874 | } | 940 | } |
| 941 | |||
| 942 | int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data, | ||
| 943 | struct drm_file *file_priv) | ||
| 944 | { | ||
| 945 | struct vmw_private *dev_priv = vmw_priv(dev); | ||
| 946 | struct drm_vmw_update_layout_arg *arg = | ||
| 947 | (struct drm_vmw_update_layout_arg *)data; | ||
| 948 | struct vmw_master *vmaster = vmw_master(file_priv->master); | ||
| 949 | void __user *user_rects; | ||
| 950 | struct drm_vmw_rect *rects; | ||
| 951 | unsigned rects_size; | ||
| 952 | int ret; | ||
| 953 | |||
| 954 | ret = ttm_read_lock(&vmaster->lock, true); | ||
| 955 | if (unlikely(ret != 0)) | ||
| 956 | return ret; | ||
| 957 | |||
| 958 | if (!arg->num_outputs) { | ||
| 959 | struct drm_vmw_rect def_rect = {0, 0, 800, 600}; | ||
| 960 | vmw_kms_ldu_update_layout(dev_priv, 1, &def_rect); | ||
| 961 | goto out_unlock; | ||
| 962 | } | ||
| 963 | |||
| 964 | rects_size = arg->num_outputs * sizeof(struct drm_vmw_rect); | ||
| 965 | rects = kzalloc(rects_size, GFP_KERNEL); | ||
| 966 | if (unlikely(!rects)) { | ||
| 967 | ret = -ENOMEM; | ||
| 968 | goto out_unlock; | ||
| 969 | } | ||
| 970 | |||
| 971 | user_rects = (void __user *)(unsigned long)arg->rects; | ||
| 972 | ret = copy_from_user(rects, user_rects, rects_size); | ||
| 973 | if (unlikely(ret != 0)) { | ||
| 974 | DRM_ERROR("Failed to get rects.\n"); | ||
| 975 | goto out_free; | ||
| 976 | } | ||
| 977 | |||
| 978 | vmw_kms_ldu_update_layout(dev_priv, arg->num_outputs, rects); | ||
| 979 | |||
| 980 | out_free: | ||
| 981 | kfree(rects); | ||
| 982 | out_unlock: | ||
| 983 | ttm_read_unlock(&vmaster->lock); | ||
| 984 | return ret; | ||
| 985 | } | ||
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h index 8b95249f0531..8a398a0339b6 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h | |||
| @@ -94,9 +94,11 @@ int vmw_du_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv, | |||
| 94 | int vmw_du_crtc_cursor_move(struct drm_crtc *crtc, int x, int y); | 94 | int vmw_du_crtc_cursor_move(struct drm_crtc *crtc, int x, int y); |
| 95 | 95 | ||
| 96 | /* | 96 | /* |
| 97 | * Legacy display unit functions - vmwgfx_ldu.h | 97 | * Legacy display unit functions - vmwgfx_ldu.c |
| 98 | */ | 98 | */ |
| 99 | int vmw_kms_init_legacy_display_system(struct vmw_private *dev_priv); | 99 | int vmw_kms_init_legacy_display_system(struct vmw_private *dev_priv); |
| 100 | int vmw_kms_close_legacy_display_system(struct vmw_private *dev_priv); | 100 | int vmw_kms_close_legacy_display_system(struct vmw_private *dev_priv); |
| 101 | int vmw_kms_ldu_update_layout(struct vmw_private *dev_priv, unsigned num, | ||
| 102 | struct drm_vmw_rect *rects); | ||
| 101 | 103 | ||
| 102 | #endif | 104 | #endif |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c index 90891593bf6c..cfaf690a5b2f 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c | |||
| @@ -38,6 +38,7 @@ struct vmw_legacy_display { | |||
| 38 | struct list_head active; | 38 | struct list_head active; |
| 39 | 39 | ||
| 40 | unsigned num_active; | 40 | unsigned num_active; |
| 41 | unsigned last_num_active; | ||
| 41 | 42 | ||
| 42 | struct vmw_framebuffer *fb; | 43 | struct vmw_framebuffer *fb; |
| 43 | }; | 44 | }; |
| @@ -48,9 +49,12 @@ struct vmw_legacy_display { | |||
| 48 | struct vmw_legacy_display_unit { | 49 | struct vmw_legacy_display_unit { |
| 49 | struct vmw_display_unit base; | 50 | struct vmw_display_unit base; |
| 50 | 51 | ||
| 51 | struct list_head active; | 52 | unsigned pref_width; |
| 53 | unsigned pref_height; | ||
| 54 | bool pref_active; | ||
| 55 | struct drm_display_mode *pref_mode; | ||
| 52 | 56 | ||
| 53 | unsigned unit; | 57 | struct list_head active; |
| 54 | }; | 58 | }; |
| 55 | 59 | ||
| 56 | static void vmw_ldu_destroy(struct vmw_legacy_display_unit *ldu) | 60 | static void vmw_ldu_destroy(struct vmw_legacy_display_unit *ldu) |
| @@ -88,23 +92,44 @@ static int vmw_ldu_commit_list(struct vmw_private *dev_priv) | |||
| 88 | { | 92 | { |
| 89 | struct vmw_legacy_display *lds = dev_priv->ldu_priv; | 93 | struct vmw_legacy_display *lds = dev_priv->ldu_priv; |
| 90 | struct vmw_legacy_display_unit *entry; | 94 | struct vmw_legacy_display_unit *entry; |
| 91 | struct drm_crtc *crtc; | 95 | struct drm_framebuffer *fb = NULL; |
| 96 | struct drm_crtc *crtc = NULL; | ||
| 92 | int i = 0; | 97 | int i = 0; |
| 93 | 98 | ||
| 94 | /* to stop the screen from changing size on resize */ | 99 | /* If there is no display topology the host just assumes |
| 95 | vmw_write(dev_priv, SVGA_REG_NUM_GUEST_DISPLAYS, 0); | 100 | * that the guest will set the same layout as the host. |
| 96 | for (i = 0; i < lds->num_active; i++) { | 101 | */ |
| 97 | vmw_write(dev_priv, SVGA_REG_DISPLAY_ID, i); | 102 | if (!(dev_priv->capabilities & SVGA_CAP_DISPLAY_TOPOLOGY)) { |
| 98 | vmw_write(dev_priv, SVGA_REG_DISPLAY_IS_PRIMARY, !i); | 103 | int w = 0, h = 0; |
| 99 | vmw_write(dev_priv, SVGA_REG_DISPLAY_POSITION_X, 0); | 104 | list_for_each_entry(entry, &lds->active, active) { |
| 100 | vmw_write(dev_priv, SVGA_REG_DISPLAY_POSITION_Y, 0); | 105 | crtc = &entry->base.crtc; |
| 101 | vmw_write(dev_priv, SVGA_REG_DISPLAY_WIDTH, 0); | 106 | w = max(w, crtc->x + crtc->mode.hdisplay); |
| 102 | vmw_write(dev_priv, SVGA_REG_DISPLAY_HEIGHT, 0); | 107 | h = max(h, crtc->y + crtc->mode.vdisplay); |
| 103 | vmw_write(dev_priv, SVGA_REG_DISPLAY_ID, SVGA_ID_INVALID); | 108 | i++; |
| 109 | } | ||
| 110 | |||
| 111 | if (crtc == NULL) | ||
| 112 | return 0; | ||
| 113 | fb = entry->base.crtc.fb; | ||
| 114 | |||
| 115 | vmw_kms_write_svga(dev_priv, w, h, fb->pitch, | ||
| 116 | fb->bits_per_pixel, fb->depth); | ||
| 117 | |||
| 118 | return 0; | ||
| 104 | } | 119 | } |
| 105 | 120 | ||
| 106 | /* Now set the mode */ | 121 | if (!list_empty(&lds->active)) { |
| 107 | vmw_write(dev_priv, SVGA_REG_NUM_GUEST_DISPLAYS, lds->num_active); | 122 | entry = list_entry(lds->active.next, typeof(*entry), active); |
| 123 | fb = entry->base.crtc.fb; | ||
| 124 | |||
| 125 | vmw_kms_write_svga(dev_priv, fb->width, fb->height, fb->pitch, | ||
| 126 | fb->bits_per_pixel, fb->depth); | ||
| 127 | } | ||
| 128 | |||
| 129 | /* Make sure we always show something. */ | ||
| 130 | vmw_write(dev_priv, SVGA_REG_NUM_GUEST_DISPLAYS, | ||
| 131 | lds->num_active ? lds->num_active : 1); | ||
| 132 | |||
| 108 | i = 0; | 133 | i = 0; |
| 109 | list_for_each_entry(entry, &lds->active, active) { | 134 | list_for_each_entry(entry, &lds->active, active) { |
| 110 | crtc = &entry->base.crtc; | 135 | crtc = &entry->base.crtc; |
| @@ -120,6 +145,10 @@ static int vmw_ldu_commit_list(struct vmw_private *dev_priv) | |||
| 120 | i++; | 145 | i++; |
| 121 | } | 146 | } |
| 122 | 147 | ||
| 148 | BUG_ON(i != lds->num_active); | ||
| 149 | |||
| 150 | lds->last_num_active = lds->num_active; | ||
| 151 | |||
| 123 | return 0; | 152 | return 0; |
| 124 | } | 153 | } |
| 125 | 154 | ||
| @@ -130,6 +159,7 @@ static int vmw_ldu_del_active(struct vmw_private *vmw_priv, | |||
| 130 | if (list_empty(&ldu->active)) | 159 | if (list_empty(&ldu->active)) |
| 131 | return 0; | 160 | return 0; |
| 132 | 161 | ||
| 162 | /* Must init otherwise list_empty(&ldu->active) will not work. */ | ||
| 133 | list_del_init(&ldu->active); | 163 | list_del_init(&ldu->active); |
| 134 | if (--(ld->num_active) == 0) { | 164 | if (--(ld->num_active) == 0) { |
| 135 | BUG_ON(!ld->fb); | 165 | BUG_ON(!ld->fb); |
| @@ -149,24 +179,29 @@ static int vmw_ldu_add_active(struct vmw_private *vmw_priv, | |||
| 149 | struct vmw_legacy_display_unit *entry; | 179 | struct vmw_legacy_display_unit *entry; |
| 150 | struct list_head *at; | 180 | struct list_head *at; |
| 151 | 181 | ||
| 182 | BUG_ON(!ld->num_active && ld->fb); | ||
| 183 | if (vfb != ld->fb) { | ||
| 184 | if (ld->fb && ld->fb->unpin) | ||
| 185 | ld->fb->unpin(ld->fb); | ||
| 186 | if (vfb->pin) | ||
| 187 | vfb->pin(vfb); | ||
| 188 | ld->fb = vfb; | ||
| 189 | } | ||
| 190 | |||
| 152 | if (!list_empty(&ldu->active)) | 191 | if (!list_empty(&ldu->active)) |
| 153 | return 0; | 192 | return 0; |
| 154 | 193 | ||
| 155 | at = &ld->active; | 194 | at = &ld->active; |
| 156 | list_for_each_entry(entry, &ld->active, active) { | 195 | list_for_each_entry(entry, &ld->active, active) { |
| 157 | if (entry->unit > ldu->unit) | 196 | if (entry->base.unit > ldu->base.unit) |
| 158 | break; | 197 | break; |
| 159 | 198 | ||
| 160 | at = &entry->active; | 199 | at = &entry->active; |
| 161 | } | 200 | } |
| 162 | 201 | ||
| 163 | list_add(&ldu->active, at); | 202 | list_add(&ldu->active, at); |
| 164 | if (ld->num_active++ == 0) { | 203 | |
| 165 | BUG_ON(ld->fb); | 204 | ld->num_active++; |
| 166 | if (vfb->pin) | ||
| 167 | vfb->pin(vfb); | ||
| 168 | ld->fb = vfb; | ||
| 169 | } | ||
| 170 | 205 | ||
| 171 | return 0; | 206 | return 0; |
| 172 | } | 207 | } |
| @@ -208,6 +243,8 @@ static int vmw_ldu_crtc_set_config(struct drm_mode_set *set) | |||
| 208 | 243 | ||
| 209 | /* ldu only supports one fb active at the time */ | 244 | /* ldu only supports one fb active at the time */ |
| 210 | if (dev_priv->ldu_priv->fb && vfb && | 245 | if (dev_priv->ldu_priv->fb && vfb && |
| 246 | !(dev_priv->ldu_priv->num_active == 1 && | ||
| 247 | !list_empty(&ldu->active)) && | ||
| 211 | dev_priv->ldu_priv->fb != vfb) { | 248 | dev_priv->ldu_priv->fb != vfb) { |
| 212 | DRM_ERROR("Multiple framebuffers not supported\n"); | 249 | DRM_ERROR("Multiple framebuffers not supported\n"); |
| 213 | return -EINVAL; | 250 | return -EINVAL; |
| @@ -300,8 +337,7 @@ static void vmw_ldu_connector_restore(struct drm_connector *connector) | |||
| 300 | static enum drm_connector_status | 337 | static enum drm_connector_status |
| 301 | vmw_ldu_connector_detect(struct drm_connector *connector) | 338 | vmw_ldu_connector_detect(struct drm_connector *connector) |
| 302 | { | 339 | { |
| 303 | /* XXX vmwctrl should control connection status */ | 340 | if (vmw_connector_to_ldu(connector)->pref_active) |
| 304 | if (vmw_connector_to_ldu(connector)->base.unit == 0) | ||
| 305 | return connector_status_connected; | 341 | return connector_status_connected; |
| 306 | return connector_status_disconnected; | 342 | return connector_status_disconnected; |
| 307 | } | 343 | } |
| @@ -312,10 +348,9 @@ static struct drm_display_mode vmw_ldu_connector_builtin[] = { | |||
| 312 | 752, 800, 0, 480, 489, 492, 525, 0, | 348 | 752, 800, 0, 480, 489, 492, 525, 0, |
| 313 | DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, | 349 | DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, |
| 314 | /* 800x600@60Hz */ | 350 | /* 800x600@60Hz */ |
| 315 | { DRM_MODE("800x600", | 351 | { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 40000, 800, 840, |
| 316 | DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED, | 352 | 968, 1056, 0, 600, 601, 605, 628, 0, |
| 317 | 40000, 800, 840, 968, 1056, 0, 600, 601, 605, 628, | 353 | DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, |
| 318 | 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, | ||
| 319 | /* 1024x768@60Hz */ | 354 | /* 1024x768@60Hz */ |
| 320 | { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 65000, 1024, 1048, | 355 | { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 65000, 1024, 1048, |
| 321 | 1184, 1344, 0, 768, 771, 777, 806, 0, | 356 | 1184, 1344, 0, 768, 771, 777, 806, 0, |
| @@ -387,10 +422,34 @@ static struct drm_display_mode vmw_ldu_connector_builtin[] = { | |||
| 387 | static int vmw_ldu_connector_fill_modes(struct drm_connector *connector, | 422 | static int vmw_ldu_connector_fill_modes(struct drm_connector *connector, |
| 388 | uint32_t max_width, uint32_t max_height) | 423 | uint32_t max_width, uint32_t max_height) |
| 389 | { | 424 | { |
| 425 | struct vmw_legacy_display_unit *ldu = vmw_connector_to_ldu(connector); | ||
| 390 | struct drm_device *dev = connector->dev; | 426 | struct drm_device *dev = connector->dev; |
| 391 | struct drm_display_mode *mode = NULL; | 427 | struct drm_display_mode *mode = NULL; |
| 428 | struct drm_display_mode prefmode = { DRM_MODE("preferred", | ||
| 429 | DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED, | ||
| 430 | 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, | ||
| 431 | DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) | ||
| 432 | }; | ||
| 392 | int i; | 433 | int i; |
| 393 | 434 | ||
| 435 | /* Add preferred mode */ | ||
| 436 | { | ||
| 437 | mode = drm_mode_duplicate(dev, &prefmode); | ||
| 438 | if (!mode) | ||
| 439 | return 0; | ||
| 440 | mode->hdisplay = ldu->pref_width; | ||
| 441 | mode->vdisplay = ldu->pref_height; | ||
| 442 | mode->vrefresh = drm_mode_vrefresh(mode); | ||
| 443 | drm_mode_probed_add(connector, mode); | ||
| 444 | |||
| 445 | if (ldu->pref_mode) { | ||
| 446 | list_del_init(&ldu->pref_mode->head); | ||
| 447 | drm_mode_destroy(dev, ldu->pref_mode); | ||
| 448 | } | ||
| 449 | |||
| 450 | ldu->pref_mode = mode; | ||
| 451 | } | ||
| 452 | |||
| 394 | for (i = 0; vmw_ldu_connector_builtin[i].type != 0; i++) { | 453 | for (i = 0; vmw_ldu_connector_builtin[i].type != 0; i++) { |
| 395 | if (vmw_ldu_connector_builtin[i].hdisplay > max_width || | 454 | if (vmw_ldu_connector_builtin[i].hdisplay > max_width || |
| 396 | vmw_ldu_connector_builtin[i].vdisplay > max_height) | 455 | vmw_ldu_connector_builtin[i].vdisplay > max_height) |
| @@ -443,18 +502,21 @@ static int vmw_ldu_init(struct vmw_private *dev_priv, unsigned unit) | |||
| 443 | if (!ldu) | 502 | if (!ldu) |
| 444 | return -ENOMEM; | 503 | return -ENOMEM; |
| 445 | 504 | ||
| 446 | ldu->unit = unit; | 505 | ldu->base.unit = unit; |
| 447 | crtc = &ldu->base.crtc; | 506 | crtc = &ldu->base.crtc; |
| 448 | encoder = &ldu->base.encoder; | 507 | encoder = &ldu->base.encoder; |
| 449 | connector = &ldu->base.connector; | 508 | connector = &ldu->base.connector; |
| 450 | 509 | ||
| 510 | INIT_LIST_HEAD(&ldu->active); | ||
| 511 | |||
| 512 | ldu->pref_active = (unit == 0); | ||
| 513 | ldu->pref_width = 800; | ||
| 514 | ldu->pref_height = 600; | ||
| 515 | ldu->pref_mode = NULL; | ||
| 516 | |||
| 451 | drm_connector_init(dev, connector, &vmw_legacy_connector_funcs, | 517 | drm_connector_init(dev, connector, &vmw_legacy_connector_funcs, |
| 452 | DRM_MODE_CONNECTOR_LVDS); | 518 | DRM_MODE_CONNECTOR_LVDS); |
| 453 | /* Initial status */ | 519 | connector->status = vmw_ldu_connector_detect(connector); |
| 454 | if (unit == 0) | ||
| 455 | connector->status = connector_status_connected; | ||
| 456 | else | ||
| 457 | connector->status = connector_status_disconnected; | ||
| 458 | 520 | ||
| 459 | drm_encoder_init(dev, encoder, &vmw_legacy_encoder_funcs, | 521 | drm_encoder_init(dev, encoder, &vmw_legacy_encoder_funcs, |
| 460 | DRM_MODE_ENCODER_LVDS); | 522 | DRM_MODE_ENCODER_LVDS); |
| @@ -462,8 +524,6 @@ static int vmw_ldu_init(struct vmw_private *dev_priv, unsigned unit) | |||
| 462 | encoder->possible_crtcs = (1 << unit); | 524 | encoder->possible_crtcs = (1 << unit); |
| 463 | encoder->possible_clones = 0; | 525 | encoder->possible_clones = 0; |
| 464 | 526 | ||
| 465 | INIT_LIST_HEAD(&ldu->active); | ||
| 466 | |||
| 467 | drm_crtc_init(dev, crtc, &vmw_legacy_crtc_funcs); | 527 | drm_crtc_init(dev, crtc, &vmw_legacy_crtc_funcs); |
| 468 | 528 | ||
| 469 | drm_connector_attach_property(connector, | 529 | drm_connector_attach_property(connector, |
| @@ -487,18 +547,22 @@ int vmw_kms_init_legacy_display_system(struct vmw_private *dev_priv) | |||
| 487 | 547 | ||
| 488 | INIT_LIST_HEAD(&dev_priv->ldu_priv->active); | 548 | INIT_LIST_HEAD(&dev_priv->ldu_priv->active); |
| 489 | dev_priv->ldu_priv->num_active = 0; | 549 | dev_priv->ldu_priv->num_active = 0; |
| 550 | dev_priv->ldu_priv->last_num_active = 0; | ||
| 490 | dev_priv->ldu_priv->fb = NULL; | 551 | dev_priv->ldu_priv->fb = NULL; |
| 491 | 552 | ||
| 492 | drm_mode_create_dirty_info_property(dev_priv->dev); | 553 | drm_mode_create_dirty_info_property(dev_priv->dev); |
| 493 | 554 | ||
| 494 | vmw_ldu_init(dev_priv, 0); | 555 | vmw_ldu_init(dev_priv, 0); |
| 495 | vmw_ldu_init(dev_priv, 1); | 556 | /* for old hardware without multimon only enable one display */ |
| 496 | vmw_ldu_init(dev_priv, 2); | 557 | if (dev_priv->capabilities & SVGA_CAP_MULTIMON) { |
| 497 | vmw_ldu_init(dev_priv, 3); | 558 | vmw_ldu_init(dev_priv, 1); |
| 498 | vmw_ldu_init(dev_priv, 4); | 559 | vmw_ldu_init(dev_priv, 2); |
| 499 | vmw_ldu_init(dev_priv, 5); | 560 | vmw_ldu_init(dev_priv, 3); |
| 500 | vmw_ldu_init(dev_priv, 6); | 561 | vmw_ldu_init(dev_priv, 4); |
| 501 | vmw_ldu_init(dev_priv, 7); | 562 | vmw_ldu_init(dev_priv, 5); |
| 563 | vmw_ldu_init(dev_priv, 6); | ||
| 564 | vmw_ldu_init(dev_priv, 7); | ||
| 565 | } | ||
| 502 | 566 | ||
| 503 | return 0; | 567 | return 0; |
| 504 | } | 568 | } |
| @@ -514,3 +578,42 @@ int vmw_kms_close_legacy_display_system(struct vmw_private *dev_priv) | |||
| 514 | 578 | ||
| 515 | return 0; | 579 | return 0; |
| 516 | } | 580 | } |
| 581 | |||
| 582 | int vmw_kms_ldu_update_layout(struct vmw_private *dev_priv, unsigned num, | ||
| 583 | struct drm_vmw_rect *rects) | ||
| 584 | { | ||
| 585 | struct drm_device *dev = dev_priv->dev; | ||
| 586 | struct vmw_legacy_display_unit *ldu; | ||
| 587 | struct drm_connector *con; | ||
| 588 | int i; | ||
| 589 | |||
| 590 | mutex_lock(&dev->mode_config.mutex); | ||
| 591 | |||
| 592 | #if 0 | ||
| 593 | DRM_INFO("%s: new layout ", __func__); | ||
| 594 | for (i = 0; i < (int)num; i++) | ||
| 595 | DRM_INFO("(%i, %i %ux%u) ", rects[i].x, rects[i].y, | ||
| 596 | rects[i].w, rects[i].h); | ||
| 597 | DRM_INFO("\n"); | ||
| 598 | #else | ||
| 599 | (void)i; | ||
| 600 | #endif | ||
| 601 | |||
| 602 | list_for_each_entry(con, &dev->mode_config.connector_list, head) { | ||
| 603 | ldu = vmw_connector_to_ldu(con); | ||
| 604 | if (num > ldu->base.unit) { | ||
| 605 | ldu->pref_width = rects[ldu->base.unit].w; | ||
| 606 | ldu->pref_height = rects[ldu->base.unit].h; | ||
| 607 | ldu->pref_active = true; | ||
| 608 | } else { | ||
| 609 | ldu->pref_width = 800; | ||
| 610 | ldu->pref_height = 600; | ||
| 611 | ldu->pref_active = false; | ||
| 612 | } | ||
| 613 | con->status = vmw_ldu_connector_detect(con); | ||
| 614 | } | ||
| 615 | |||
| 616 | mutex_unlock(&dev->mode_config.mutex); | ||
| 617 | |||
| 618 | return 0; | ||
| 619 | } | ||
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c b/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c index ad566c85b075..df2036ed18d5 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c | |||
| @@ -358,6 +358,8 @@ static int vmw_overlay_update_stream(struct vmw_private *dev_priv, | |||
| 358 | if (stream->buf != buf) | 358 | if (stream->buf != buf) |
| 359 | stream->buf = vmw_dmabuf_reference(buf); | 359 | stream->buf = vmw_dmabuf_reference(buf); |
| 360 | stream->saved = *arg; | 360 | stream->saved = *arg; |
| 361 | /* stream is no longer stopped/paused */ | ||
| 362 | stream->paused = false; | ||
| 361 | 363 | ||
| 362 | return 0; | 364 | return 0; |
| 363 | } | 365 | } |
diff --git a/drivers/gpu/vga/vgaarb.c b/drivers/gpu/vga/vgaarb.c index 441e38c95a85..b87569e96b16 100644 --- a/drivers/gpu/vga/vgaarb.c +++ b/drivers/gpu/vga/vgaarb.c | |||
| @@ -1,12 +1,32 @@ | |||
| 1 | /* | 1 | /* |
| 2 | * vgaarb.c | 2 | * vgaarb.c: Implements the VGA arbitration. For details refer to |
| 3 | * Documentation/vgaarbiter.txt | ||
| 4 | * | ||
| 3 | * | 5 | * |
| 4 | * (C) Copyright 2005 Benjamin Herrenschmidt <benh@kernel.crashing.org> | 6 | * (C) Copyright 2005 Benjamin Herrenschmidt <benh@kernel.crashing.org> |
| 5 | * (C) Copyright 2007 Paulo R. Zanoni <przanoni@gmail.com> | 7 | * (C) Copyright 2007 Paulo R. Zanoni <przanoni@gmail.com> |
| 6 | * (C) Copyright 2007, 2009 Tiago Vignatti <vignatti@freedesktop.org> | 8 | * (C) Copyright 2007, 2009 Tiago Vignatti <vignatti@freedesktop.org> |
| 7 | * | 9 | * |
| 8 | * Implements the VGA arbitration. For details refer to | 10 | * Permission is hereby granted, free of charge, to any person obtaining a |
| 9 | * Documentation/vgaarbiter.txt | 11 | * copy of this software and associated documentation files (the "Software"), |
| 12 | * to deal in the Software without restriction, including without limitation | ||
| 13 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
| 14 | * and/or sell copies of the Software, and to permit persons to whom the | ||
| 15 | * Software is furnished to do so, subject to the following conditions: | ||
| 16 | * | ||
| 17 | * The above copyright notice and this permission notice (including the next | ||
| 18 | * paragraph) shall be included in all copies or substantial portions of the | ||
| 19 | * Software. | ||
| 20 | * | ||
| 21 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
| 22 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
| 23 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
| 24 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | ||
| 25 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | ||
| 26 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER | ||
| 27 | * DEALINGS | ||
| 28 | * IN THE SOFTWARE. | ||
| 29 | * | ||
| 10 | */ | 30 | */ |
| 11 | 31 | ||
| 12 | #include <linux/module.h> | 32 | #include <linux/module.h> |
| @@ -155,8 +175,8 @@ static struct vga_device *__vga_tryget(struct vga_device *vgadev, | |||
| 155 | (vgadev->decodes & VGA_RSRC_LEGACY_MEM)) | 175 | (vgadev->decodes & VGA_RSRC_LEGACY_MEM)) |
| 156 | rsrc |= VGA_RSRC_LEGACY_MEM; | 176 | rsrc |= VGA_RSRC_LEGACY_MEM; |
| 157 | 177 | ||
| 158 | pr_devel("%s: %d\n", __func__, rsrc); | 178 | pr_debug("%s: %d\n", __func__, rsrc); |
| 159 | pr_devel("%s: owns: %d\n", __func__, vgadev->owns); | 179 | pr_debug("%s: owns: %d\n", __func__, vgadev->owns); |
| 160 | 180 | ||
| 161 | /* Check what resources we need to acquire */ | 181 | /* Check what resources we need to acquire */ |
| 162 | wants = rsrc & ~vgadev->owns; | 182 | wants = rsrc & ~vgadev->owns; |
| @@ -268,7 +288,7 @@ static void __vga_put(struct vga_device *vgadev, unsigned int rsrc) | |||
| 268 | { | 288 | { |
| 269 | unsigned int old_locks = vgadev->locks; | 289 | unsigned int old_locks = vgadev->locks; |
| 270 | 290 | ||
| 271 | pr_devel("%s\n", __func__); | 291 | pr_debug("%s\n", __func__); |
| 272 | 292 | ||
| 273 | /* Update our counters, and account for equivalent legacy resources | 293 | /* Update our counters, and account for equivalent legacy resources |
| 274 | * if we decode them | 294 | * if we decode them |
| @@ -575,6 +595,7 @@ static inline void vga_update_device_decodes(struct vga_device *vgadev, | |||
| 575 | else | 595 | else |
| 576 | vga_decode_count--; | 596 | vga_decode_count--; |
| 577 | } | 597 | } |
| 598 | pr_debug("vgaarb: decoding count now is: %d\n", vga_decode_count); | ||
| 578 | } | 599 | } |
| 579 | 600 | ||
| 580 | void __vga_set_legacy_decoding(struct pci_dev *pdev, unsigned int decodes, bool userspace) | 601 | void __vga_set_legacy_decoding(struct pci_dev *pdev, unsigned int decodes, bool userspace) |
| @@ -831,7 +852,7 @@ static ssize_t vga_arb_write(struct file *file, const char __user * buf, | |||
| 831 | curr_pos += 5; | 852 | curr_pos += 5; |
| 832 | remaining -= 5; | 853 | remaining -= 5; |
| 833 | 854 | ||
| 834 | pr_devel("client 0x%p called 'lock'\n", priv); | 855 | pr_debug("client 0x%p called 'lock'\n", priv); |
| 835 | 856 | ||
| 836 | if (!vga_str_to_iostate(curr_pos, remaining, &io_state)) { | 857 | if (!vga_str_to_iostate(curr_pos, remaining, &io_state)) { |
| 837 | ret_val = -EPROTO; | 858 | ret_val = -EPROTO; |
| @@ -867,7 +888,7 @@ static ssize_t vga_arb_write(struct file *file, const char __user * buf, | |||
| 867 | curr_pos += 7; | 888 | curr_pos += 7; |
| 868 | remaining -= 7; | 889 | remaining -= 7; |
| 869 | 890 | ||
| 870 | pr_devel("client 0x%p called 'unlock'\n", priv); | 891 | pr_debug("client 0x%p called 'unlock'\n", priv); |
| 871 | 892 | ||
| 872 | if (strncmp(curr_pos, "all", 3) == 0) | 893 | if (strncmp(curr_pos, "all", 3) == 0) |
| 873 | io_state = VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM; | 894 | io_state = VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM; |
| @@ -917,7 +938,7 @@ static ssize_t vga_arb_write(struct file *file, const char __user * buf, | |||
| 917 | curr_pos += 8; | 938 | curr_pos += 8; |
| 918 | remaining -= 8; | 939 | remaining -= 8; |
| 919 | 940 | ||
| 920 | pr_devel("client 0x%p called 'trylock'\n", priv); | 941 | pr_debug("client 0x%p called 'trylock'\n", priv); |
| 921 | 942 | ||
| 922 | if (!vga_str_to_iostate(curr_pos, remaining, &io_state)) { | 943 | if (!vga_str_to_iostate(curr_pos, remaining, &io_state)) { |
| 923 | ret_val = -EPROTO; | 944 | ret_val = -EPROTO; |
| @@ -961,7 +982,7 @@ static ssize_t vga_arb_write(struct file *file, const char __user * buf, | |||
| 961 | 982 | ||
| 962 | curr_pos += 7; | 983 | curr_pos += 7; |
| 963 | remaining -= 7; | 984 | remaining -= 7; |
| 964 | pr_devel("client 0x%p called 'target'\n", priv); | 985 | pr_debug("client 0x%p called 'target'\n", priv); |
| 965 | /* if target is default */ | 986 | /* if target is default */ |
| 966 | if (!strncmp(curr_pos, "default", 7)) | 987 | if (!strncmp(curr_pos, "default", 7)) |
| 967 | pdev = pci_dev_get(vga_default_device()); | 988 | pdev = pci_dev_get(vga_default_device()); |
| @@ -971,11 +992,11 @@ static ssize_t vga_arb_write(struct file *file, const char __user * buf, | |||
| 971 | ret_val = -EPROTO; | 992 | ret_val = -EPROTO; |
| 972 | goto done; | 993 | goto done; |
| 973 | } | 994 | } |
| 974 | pr_devel("vgaarb: %s ==> %x:%x:%x.%x\n", curr_pos, | 995 | pr_debug("vgaarb: %s ==> %x:%x:%x.%x\n", curr_pos, |
| 975 | domain, bus, PCI_SLOT(devfn), PCI_FUNC(devfn)); | 996 | domain, bus, PCI_SLOT(devfn), PCI_FUNC(devfn)); |
| 976 | 997 | ||
| 977 | pbus = pci_find_bus(domain, bus); | 998 | pbus = pci_find_bus(domain, bus); |
| 978 | pr_devel("vgaarb: pbus %p\n", pbus); | 999 | pr_debug("vgaarb: pbus %p\n", pbus); |
| 979 | if (pbus == NULL) { | 1000 | if (pbus == NULL) { |
| 980 | pr_err("vgaarb: invalid PCI domain and/or bus address %x:%x\n", | 1001 | pr_err("vgaarb: invalid PCI domain and/or bus address %x:%x\n", |
| 981 | domain, bus); | 1002 | domain, bus); |
| @@ -983,7 +1004,7 @@ static ssize_t vga_arb_write(struct file *file, const char __user * buf, | |||
| 983 | goto done; | 1004 | goto done; |
| 984 | } | 1005 | } |
| 985 | pdev = pci_get_slot(pbus, devfn); | 1006 | pdev = pci_get_slot(pbus, devfn); |
| 986 | pr_devel("vgaarb: pdev %p\n", pdev); | 1007 | pr_debug("vgaarb: pdev %p\n", pdev); |
| 987 | if (!pdev) { | 1008 | if (!pdev) { |
| 988 | pr_err("vgaarb: invalid PCI address %x:%x\n", | 1009 | pr_err("vgaarb: invalid PCI address %x:%x\n", |
| 989 | bus, devfn); | 1010 | bus, devfn); |
| @@ -993,7 +1014,7 @@ static ssize_t vga_arb_write(struct file *file, const char __user * buf, | |||
| 993 | } | 1014 | } |
| 994 | 1015 | ||
| 995 | vgadev = vgadev_find(pdev); | 1016 | vgadev = vgadev_find(pdev); |
| 996 | pr_devel("vgaarb: vgadev %p\n", vgadev); | 1017 | pr_debug("vgaarb: vgadev %p\n", vgadev); |
| 997 | if (vgadev == NULL) { | 1018 | if (vgadev == NULL) { |
| 998 | pr_err("vgaarb: this pci device is not a vga device\n"); | 1019 | pr_err("vgaarb: this pci device is not a vga device\n"); |
| 999 | pci_dev_put(pdev); | 1020 | pci_dev_put(pdev); |
| @@ -1029,7 +1050,7 @@ static ssize_t vga_arb_write(struct file *file, const char __user * buf, | |||
| 1029 | } else if (strncmp(curr_pos, "decodes ", 8) == 0) { | 1050 | } else if (strncmp(curr_pos, "decodes ", 8) == 0) { |
| 1030 | curr_pos += 8; | 1051 | curr_pos += 8; |
| 1031 | remaining -= 8; | 1052 | remaining -= 8; |
| 1032 | pr_devel("vgaarb: client 0x%p called 'decodes'\n", priv); | 1053 | pr_debug("vgaarb: client 0x%p called 'decodes'\n", priv); |
| 1033 | 1054 | ||
| 1034 | if (!vga_str_to_iostate(curr_pos, remaining, &io_state)) { | 1055 | if (!vga_str_to_iostate(curr_pos, remaining, &io_state)) { |
| 1035 | ret_val = -EPROTO; | 1056 | ret_val = -EPROTO; |
| @@ -1058,7 +1079,7 @@ static unsigned int vga_arb_fpoll(struct file *file, poll_table * wait) | |||
| 1058 | { | 1079 | { |
| 1059 | struct vga_arb_private *priv = file->private_data; | 1080 | struct vga_arb_private *priv = file->private_data; |
| 1060 | 1081 | ||
| 1061 | pr_devel("%s\n", __func__); | 1082 | pr_debug("%s\n", __func__); |
| 1062 | 1083 | ||
| 1063 | if (priv == NULL) | 1084 | if (priv == NULL) |
| 1064 | return -ENODEV; | 1085 | return -ENODEV; |
| @@ -1071,7 +1092,7 @@ static int vga_arb_open(struct inode *inode, struct file *file) | |||
| 1071 | struct vga_arb_private *priv; | 1092 | struct vga_arb_private *priv; |
| 1072 | unsigned long flags; | 1093 | unsigned long flags; |
| 1073 | 1094 | ||
| 1074 | pr_devel("%s\n", __func__); | 1095 | pr_debug("%s\n", __func__); |
| 1075 | 1096 | ||
| 1076 | priv = kmalloc(sizeof(struct vga_arb_private), GFP_KERNEL); | 1097 | priv = kmalloc(sizeof(struct vga_arb_private), GFP_KERNEL); |
| 1077 | if (priv == NULL) | 1098 | if (priv == NULL) |
| @@ -1101,7 +1122,7 @@ static int vga_arb_release(struct inode *inode, struct file *file) | |||
| 1101 | unsigned long flags; | 1122 | unsigned long flags; |
| 1102 | int i; | 1123 | int i; |
| 1103 | 1124 | ||
| 1104 | pr_devel("%s\n", __func__); | 1125 | pr_debug("%s\n", __func__); |
| 1105 | 1126 | ||
| 1106 | if (priv == NULL) | 1127 | if (priv == NULL) |
| 1107 | return -ENODEV; | 1128 | return -ENODEV; |
| @@ -1112,7 +1133,7 @@ static int vga_arb_release(struct inode *inode, struct file *file) | |||
| 1112 | uc = &priv->cards[i]; | 1133 | uc = &priv->cards[i]; |
| 1113 | if (uc->pdev == NULL) | 1134 | if (uc->pdev == NULL) |
| 1114 | continue; | 1135 | continue; |
| 1115 | pr_devel("uc->io_cnt == %d, uc->mem_cnt == %d\n", | 1136 | pr_debug("uc->io_cnt == %d, uc->mem_cnt == %d\n", |
| 1116 | uc->io_cnt, uc->mem_cnt); | 1137 | uc->io_cnt, uc->mem_cnt); |
| 1117 | while (uc->io_cnt--) | 1138 | while (uc->io_cnt--) |
| 1118 | vga_put(uc->pdev, VGA_RSRC_LEGACY_IO); | 1139 | vga_put(uc->pdev, VGA_RSRC_LEGACY_IO); |
| @@ -1165,7 +1186,7 @@ static int pci_notify(struct notifier_block *nb, unsigned long action, | |||
| 1165 | struct pci_dev *pdev = to_pci_dev(dev); | 1186 | struct pci_dev *pdev = to_pci_dev(dev); |
| 1166 | bool notify = false; | 1187 | bool notify = false; |
| 1167 | 1188 | ||
| 1168 | pr_devel("%s\n", __func__); | 1189 | pr_debug("%s\n", __func__); |
| 1169 | 1190 | ||
| 1170 | /* For now we're only intereted in devices added and removed. I didn't | 1191 | /* For now we're only intereted in devices added and removed. I didn't |
| 1171 | * test this thing here, so someone needs to double check for the | 1192 | * test this thing here, so someone needs to double check for the |
diff --git a/drivers/mtd/nand/fsl_upm.c b/drivers/mtd/nand/fsl_upm.c index 00aea6f7d1f1..1312eda57ba6 100644 --- a/drivers/mtd/nand/fsl_upm.c +++ b/drivers/mtd/nand/fsl_upm.c | |||
| @@ -232,7 +232,7 @@ static int __devinit fun_probe(struct of_device *ofdev, | |||
| 232 | if (!fun) | 232 | if (!fun) |
| 233 | return -ENOMEM; | 233 | return -ENOMEM; |
| 234 | 234 | ||
| 235 | ret = of_address_to_resource(ofdev->node, 0, &io_res); | 235 | ret = of_address_to_resource(ofdev->dev.of_node, 0, &io_res); |
| 236 | if (ret) { | 236 | if (ret) { |
| 237 | dev_err(&ofdev->dev, "can't get IO base\n"); | 237 | dev_err(&ofdev->dev, "can't get IO base\n"); |
| 238 | goto err1; | 238 | goto err1; |
| @@ -244,7 +244,8 @@ static int __devinit fun_probe(struct of_device *ofdev, | |||
| 244 | goto err1; | 244 | goto err1; |
| 245 | } | 245 | } |
| 246 | 246 | ||
| 247 | prop = of_get_property(ofdev->node, "fsl,upm-addr-offset", &size); | 247 | prop = of_get_property(ofdev->dev.of_node, "fsl,upm-addr-offset", |
| 248 | &size); | ||
| 248 | if (!prop || size != sizeof(uint32_t)) { | 249 | if (!prop || size != sizeof(uint32_t)) { |
| 249 | dev_err(&ofdev->dev, "can't get UPM address offset\n"); | 250 | dev_err(&ofdev->dev, "can't get UPM address offset\n"); |
| 250 | ret = -EINVAL; | 251 | ret = -EINVAL; |
| @@ -252,7 +253,7 @@ static int __devinit fun_probe(struct of_device *ofdev, | |||
| 252 | } | 253 | } |
| 253 | fun->upm_addr_offset = *prop; | 254 | fun->upm_addr_offset = *prop; |
| 254 | 255 | ||
| 255 | prop = of_get_property(ofdev->node, "fsl,upm-cmd-offset", &size); | 256 | prop = of_get_property(ofdev->dev.of_node, "fsl,upm-cmd-offset", &size); |
| 256 | if (!prop || size != sizeof(uint32_t)) { | 257 | if (!prop || size != sizeof(uint32_t)) { |
| 257 | dev_err(&ofdev->dev, "can't get UPM command offset\n"); | 258 | dev_err(&ofdev->dev, "can't get UPM command offset\n"); |
| 258 | ret = -EINVAL; | 259 | ret = -EINVAL; |
| @@ -260,7 +261,7 @@ static int __devinit fun_probe(struct of_device *ofdev, | |||
| 260 | } | 261 | } |
| 261 | fun->upm_cmd_offset = *prop; | 262 | fun->upm_cmd_offset = *prop; |
| 262 | 263 | ||
| 263 | prop = of_get_property(ofdev->node, | 264 | prop = of_get_property(ofdev->dev.of_node, |
| 264 | "fsl,upm-addr-line-cs-offsets", &size); | 265 | "fsl,upm-addr-line-cs-offsets", &size); |
| 265 | if (prop && (size / sizeof(uint32_t)) > 0) { | 266 | if (prop && (size / sizeof(uint32_t)) > 0) { |
| 266 | fun->mchip_count = size / sizeof(uint32_t); | 267 | fun->mchip_count = size / sizeof(uint32_t); |
| @@ -276,7 +277,7 @@ static int __devinit fun_probe(struct of_device *ofdev, | |||
| 276 | 277 | ||
| 277 | for (i = 0; i < fun->mchip_count; i++) { | 278 | for (i = 0; i < fun->mchip_count; i++) { |
| 278 | fun->rnb_gpio[i] = -1; | 279 | fun->rnb_gpio[i] = -1; |
| 279 | rnb_gpio = of_get_gpio(ofdev->node, i); | 280 | rnb_gpio = of_get_gpio(ofdev->dev.of_node, i); |
| 280 | if (rnb_gpio >= 0) { | 281 | if (rnb_gpio >= 0) { |
| 281 | ret = gpio_request(rnb_gpio, dev_name(&ofdev->dev)); | 282 | ret = gpio_request(rnb_gpio, dev_name(&ofdev->dev)); |
| 282 | if (ret) { | 283 | if (ret) { |
| @@ -292,13 +293,13 @@ static int __devinit fun_probe(struct of_device *ofdev, | |||
| 292 | } | 293 | } |
| 293 | } | 294 | } |
| 294 | 295 | ||
| 295 | prop = of_get_property(ofdev->node, "chip-delay", NULL); | 296 | prop = of_get_property(ofdev->dev.of_node, "chip-delay", NULL); |
| 296 | if (prop) | 297 | if (prop) |
| 297 | fun->chip_delay = *prop; | 298 | fun->chip_delay = *prop; |
| 298 | else | 299 | else |
| 299 | fun->chip_delay = 50; | 300 | fun->chip_delay = 50; |
| 300 | 301 | ||
| 301 | prop = of_get_property(ofdev->node, "fsl,upm-wait-flags", &size); | 302 | prop = of_get_property(ofdev->dev.of_node, "fsl,upm-wait-flags", &size); |
| 302 | if (prop && size == sizeof(uint32_t)) | 303 | if (prop && size == sizeof(uint32_t)) |
| 303 | fun->wait_flags = *prop; | 304 | fun->wait_flags = *prop; |
| 304 | else | 305 | else |
| @@ -315,7 +316,7 @@ static int __devinit fun_probe(struct of_device *ofdev, | |||
| 315 | fun->dev = &ofdev->dev; | 316 | fun->dev = &ofdev->dev; |
| 316 | fun->last_ctrl = NAND_CLE; | 317 | fun->last_ctrl = NAND_CLE; |
| 317 | 318 | ||
| 318 | ret = fun_chip_init(fun, ofdev->node, &io_res); | 319 | ret = fun_chip_init(fun, ofdev->dev.of_node, &io_res); |
| 319 | if (ret) | 320 | if (ret) |
| 320 | goto err2; | 321 | goto err2; |
| 321 | 322 | ||
diff --git a/drivers/mtd/nand/mpc5121_nfc.c b/drivers/mtd/nand/mpc5121_nfc.c index 3d0867d829cb..0a130dcaa129 100644 --- a/drivers/mtd/nand/mpc5121_nfc.c +++ b/drivers/mtd/nand/mpc5121_nfc.c | |||
| @@ -650,7 +650,7 @@ static void mpc5121_nfc_free(struct device *dev, struct mtd_info *mtd) | |||
| 650 | static int __devinit mpc5121_nfc_probe(struct of_device *op, | 650 | static int __devinit mpc5121_nfc_probe(struct of_device *op, |
| 651 | const struct of_device_id *match) | 651 | const struct of_device_id *match) |
| 652 | { | 652 | { |
| 653 | struct device_node *rootnode, *dn = op->node; | 653 | struct device_node *rootnode, *dn = op->dev.of_node; |
| 654 | struct device *dev = &op->dev; | 654 | struct device *dev = &op->dev; |
| 655 | struct mpc5121_nfc_prv *prv; | 655 | struct mpc5121_nfc_prv *prv; |
| 656 | struct resource res; | 656 | struct resource res; |
| @@ -889,12 +889,12 @@ static struct of_device_id mpc5121_nfc_match[] __devinitdata = { | |||
| 889 | }; | 889 | }; |
| 890 | 890 | ||
| 891 | static struct of_platform_driver mpc5121_nfc_driver = { | 891 | static struct of_platform_driver mpc5121_nfc_driver = { |
| 892 | .match_table = mpc5121_nfc_match, | ||
| 893 | .probe = mpc5121_nfc_probe, | 892 | .probe = mpc5121_nfc_probe, |
| 894 | .remove = __devexit_p(mpc5121_nfc_remove), | 893 | .remove = __devexit_p(mpc5121_nfc_remove), |
| 895 | .driver = { | 894 | .driver = { |
| 896 | .name = DRV_NAME, | 895 | .name = DRV_NAME, |
| 897 | .owner = THIS_MODULE, | 896 | .owner = THIS_MODULE, |
| 897 | .of_match_table = mpc5121_nfc_match, | ||
| 898 | }, | 898 | }, |
| 899 | }; | 899 | }; |
| 900 | 900 | ||
diff --git a/drivers/mtd/nand/socrates_nand.c b/drivers/mtd/nand/socrates_nand.c index 884852dc7eb4..cc728b12de82 100644 --- a/drivers/mtd/nand/socrates_nand.c +++ b/drivers/mtd/nand/socrates_nand.c | |||
| @@ -183,7 +183,7 @@ static int __devinit socrates_nand_probe(struct of_device *ofdev, | |||
| 183 | return -ENOMEM; | 183 | return -ENOMEM; |
| 184 | } | 184 | } |
| 185 | 185 | ||
| 186 | host->io_base = of_iomap(ofdev->node, 0); | 186 | host->io_base = of_iomap(ofdev->dev.of_node, 0); |
| 187 | if (host->io_base == NULL) { | 187 | if (host->io_base == NULL) { |
| 188 | printk(KERN_ERR "socrates_nand: ioremap failed\n"); | 188 | printk(KERN_ERR "socrates_nand: ioremap failed\n"); |
| 189 | kfree(host); | 189 | kfree(host); |
| @@ -244,7 +244,7 @@ static int __devinit socrates_nand_probe(struct of_device *ofdev, | |||
| 244 | #ifdef CONFIG_MTD_OF_PARTS | 244 | #ifdef CONFIG_MTD_OF_PARTS |
| 245 | if (num_partitions == 0) { | 245 | if (num_partitions == 0) { |
| 246 | num_partitions = of_mtd_parse_partitions(&ofdev->dev, | 246 | num_partitions = of_mtd_parse_partitions(&ofdev->dev, |
| 247 | ofdev->node, | 247 | ofdev->dev.of_node, |
| 248 | &partitions); | 248 | &partitions); |
| 249 | if (num_partitions < 0) { | 249 | if (num_partitions < 0) { |
| 250 | res = num_partitions; | 250 | res = num_partitions; |
diff --git a/drivers/net/fs_enet/mii-bitbang.c b/drivers/net/fs_enet/mii-bitbang.c index 0f90685d3d19..3607340f3da7 100644 --- a/drivers/net/fs_enet/mii-bitbang.c +++ b/drivers/net/fs_enet/mii-bitbang.c | |||
| @@ -169,7 +169,7 @@ static int __devinit fs_enet_mdio_probe(struct of_device *ofdev, | |||
| 169 | 169 | ||
| 170 | new_bus->name = "CPM2 Bitbanged MII", | 170 | new_bus->name = "CPM2 Bitbanged MII", |
| 171 | 171 | ||
| 172 | ret = fs_mii_bitbang_init(new_bus, ofdev->node); | 172 | ret = fs_mii_bitbang_init(new_bus, ofdev->dev.of_node); |
| 173 | if (ret) | 173 | if (ret) |
| 174 | goto out_free_bus; | 174 | goto out_free_bus; |
| 175 | 175 | ||
| @@ -181,7 +181,7 @@ static int __devinit fs_enet_mdio_probe(struct of_device *ofdev, | |||
| 181 | new_bus->parent = &ofdev->dev; | 181 | new_bus->parent = &ofdev->dev; |
| 182 | dev_set_drvdata(&ofdev->dev, new_bus); | 182 | dev_set_drvdata(&ofdev->dev, new_bus); |
| 183 | 183 | ||
| 184 | ret = of_mdiobus_register(new_bus, ofdev->node); | 184 | ret = of_mdiobus_register(new_bus, ofdev->dev.of_node); |
| 185 | if (ret) | 185 | if (ret) |
| 186 | goto out_free_irqs; | 186 | goto out_free_irqs; |
| 187 | 187 | ||
diff --git a/drivers/pcmcia/m8xx_pcmcia.c b/drivers/pcmcia/m8xx_pcmcia.c index 1a648b90b634..25e5e30a18af 100644 --- a/drivers/pcmcia/m8xx_pcmcia.c +++ b/drivers/pcmcia/m8xx_pcmcia.c | |||
| @@ -1157,7 +1157,7 @@ static int __init m8xx_probe(struct of_device *ofdev, | |||
| 1157 | unsigned int i, m, hwirq; | 1157 | unsigned int i, m, hwirq; |
| 1158 | pcmconf8xx_t *pcmcia; | 1158 | pcmconf8xx_t *pcmcia; |
| 1159 | int status; | 1159 | int status; |
| 1160 | struct device_node *np = ofdev->node; | 1160 | struct device_node *np = ofdev->dev.of_node; |
| 1161 | 1161 | ||
| 1162 | pcmcia_info("%s\n", version); | 1162 | pcmcia_info("%s\n", version); |
| 1163 | 1163 | ||
| @@ -1301,7 +1301,7 @@ static struct of_platform_driver m8xx_pcmcia_driver = { | |||
| 1301 | .driver = { | 1301 | .driver = { |
| 1302 | .name = driver_name, | 1302 | .name = driver_name, |
| 1303 | .owner = THIS_MODULE, | 1303 | .owner = THIS_MODULE, |
| 1304 | .match_table = m8xx_pcmcia_match, | 1304 | .of_match_table = m8xx_pcmcia_match, |
| 1305 | }, | 1305 | }, |
| 1306 | .probe = m8xx_probe, | 1306 | .probe = m8xx_probe, |
| 1307 | .remove = m8xx_remove, | 1307 | .remove = m8xx_remove, |
diff --git a/drivers/rtc/rtc-mpc5121.c b/drivers/rtc/rtc-mpc5121.c index f0dbf9cb8f9c..db5d8c416d26 100644 --- a/drivers/rtc/rtc-mpc5121.c +++ b/drivers/rtc/rtc-mpc5121.c | |||
| @@ -279,7 +279,7 @@ static int __devinit mpc5121_rtc_probe(struct of_device *op, | |||
| 279 | if (!rtc) | 279 | if (!rtc) |
| 280 | return -ENOMEM; | 280 | return -ENOMEM; |
| 281 | 281 | ||
| 282 | rtc->regs = of_iomap(op->node, 0); | 282 | rtc->regs = of_iomap(op->dev.of_node, 0); |
| 283 | if (!rtc->regs) { | 283 | if (!rtc->regs) { |
| 284 | dev_err(&op->dev, "%s: couldn't map io space\n", __func__); | 284 | dev_err(&op->dev, "%s: couldn't map io space\n", __func__); |
| 285 | err = -ENOSYS; | 285 | err = -ENOSYS; |
| @@ -290,7 +290,7 @@ static int __devinit mpc5121_rtc_probe(struct of_device *op, | |||
| 290 | 290 | ||
| 291 | dev_set_drvdata(&op->dev, rtc); | 291 | dev_set_drvdata(&op->dev, rtc); |
| 292 | 292 | ||
| 293 | rtc->irq = irq_of_parse_and_map(op->node, 1); | 293 | rtc->irq = irq_of_parse_and_map(op->dev.of_node, 1); |
| 294 | err = request_irq(rtc->irq, mpc5121_rtc_handler, IRQF_DISABLED, | 294 | err = request_irq(rtc->irq, mpc5121_rtc_handler, IRQF_DISABLED, |
| 295 | "mpc5121-rtc", &op->dev); | 295 | "mpc5121-rtc", &op->dev); |
| 296 | if (err) { | 296 | if (err) { |
| @@ -299,7 +299,7 @@ static int __devinit mpc5121_rtc_probe(struct of_device *op, | |||
| 299 | goto out_dispose; | 299 | goto out_dispose; |
| 300 | } | 300 | } |
| 301 | 301 | ||
| 302 | rtc->irq_periodic = irq_of_parse_and_map(op->node, 0); | 302 | rtc->irq_periodic = irq_of_parse_and_map(op->dev.of_node, 0); |
| 303 | err = request_irq(rtc->irq_periodic, mpc5121_rtc_handler_upd, | 303 | err = request_irq(rtc->irq_periodic, mpc5121_rtc_handler_upd, |
| 304 | IRQF_DISABLED, "mpc5121-rtc_upd", &op->dev); | 304 | IRQF_DISABLED, "mpc5121-rtc_upd", &op->dev); |
| 305 | if (err) { | 305 | if (err) { |
| @@ -365,9 +365,11 @@ static struct of_device_id mpc5121_rtc_match[] __devinitdata = { | |||
| 365 | }; | 365 | }; |
| 366 | 366 | ||
| 367 | static struct of_platform_driver mpc5121_rtc_driver = { | 367 | static struct of_platform_driver mpc5121_rtc_driver = { |
| 368 | .owner = THIS_MODULE, | 368 | .driver = { |
| 369 | .name = "mpc5121-rtc", | 369 | .name = "mpc5121-rtc", |
| 370 | .match_table = mpc5121_rtc_match, | 370 | .owner = THIS_MODULE, |
| 371 | .of_match_table = mpc5121_rtc_match, | ||
| 372 | }, | ||
| 371 | .probe = mpc5121_rtc_probe, | 373 | .probe = mpc5121_rtc_probe, |
| 372 | .remove = __devexit_p(mpc5121_rtc_remove), | 374 | .remove = __devexit_p(mpc5121_rtc_remove), |
| 373 | }; | 375 | }; |
diff --git a/drivers/spi/mpc512x_psc_spi.c b/drivers/spi/mpc512x_psc_spi.c index 28a126d2742b..2534b1ec3edd 100644 --- a/drivers/spi/mpc512x_psc_spi.c +++ b/drivers/spi/mpc512x_psc_spi.c | |||
| @@ -512,29 +512,29 @@ static int __init mpc512x_psc_spi_of_probe(struct of_device *op, | |||
| 512 | u64 regaddr64, size64; | 512 | u64 regaddr64, size64; |
| 513 | s16 id = -1; | 513 | s16 id = -1; |
| 514 | 514 | ||
| 515 | regaddr_p = of_get_address(op->node, 0, &size64, NULL); | 515 | regaddr_p = of_get_address(op->dev.of_node, 0, &size64, NULL); |
| 516 | if (!regaddr_p) { | 516 | if (!regaddr_p) { |
| 517 | dev_err(&op->dev, "Invalid PSC address\n"); | 517 | dev_err(&op->dev, "Invalid PSC address\n"); |
| 518 | return -EINVAL; | 518 | return -EINVAL; |
| 519 | } | 519 | } |
| 520 | regaddr64 = of_translate_address(op->node, regaddr_p); | 520 | regaddr64 = of_translate_address(op->dev.of_node, regaddr_p); |
| 521 | 521 | ||
| 522 | /* get PSC id (0..11, used by port_config) */ | 522 | /* get PSC id (0..11, used by port_config) */ |
| 523 | if (op->dev.platform_data == NULL) { | 523 | if (op->dev.platform_data == NULL) { |
| 524 | const u32 *psc_nump; | 524 | const u32 *psc_nump; |
| 525 | 525 | ||
| 526 | psc_nump = of_get_property(op->node, "cell-index", NULL); | 526 | psc_nump = of_get_property(op->dev.of_node, "cell-index", NULL); |
| 527 | if (!psc_nump || *psc_nump > 11) { | 527 | if (!psc_nump || *psc_nump > 11) { |
| 528 | dev_err(&op->dev, "mpc512x_psc_spi: Device node %s " | 528 | dev_err(&op->dev, "mpc512x_psc_spi: Device node %s " |
| 529 | "has invalid cell-index property\n", | 529 | "has invalid cell-index property\n", |
| 530 | op->node->full_name); | 530 | op->dev.of_node->full_name); |
| 531 | return -EINVAL; | 531 | return -EINVAL; |
| 532 | } | 532 | } |
| 533 | id = *psc_nump; | 533 | id = *psc_nump; |
| 534 | } | 534 | } |
| 535 | 535 | ||
| 536 | return mpc512x_psc_spi_do_probe(&op->dev, (u32) regaddr64, (u32) size64, | 536 | return mpc512x_psc_spi_do_probe(&op->dev, (u32) regaddr64, (u32) size64, |
| 537 | irq_of_parse_and_map(op->node, 0), id); | 537 | irq_of_parse_and_map(op->dev.of_node, 0), id); |
| 538 | } | 538 | } |
| 539 | 539 | ||
| 540 | static int __exit mpc512x_psc_spi_of_remove(struct of_device *op) | 540 | static int __exit mpc512x_psc_spi_of_remove(struct of_device *op) |
| @@ -550,12 +550,12 @@ static struct of_device_id mpc512x_psc_spi_of_match[] = { | |||
| 550 | MODULE_DEVICE_TABLE(of, mpc512x_psc_spi_of_match); | 550 | MODULE_DEVICE_TABLE(of, mpc512x_psc_spi_of_match); |
| 551 | 551 | ||
| 552 | static struct of_platform_driver mpc512x_psc_spi_of_driver = { | 552 | static struct of_platform_driver mpc512x_psc_spi_of_driver = { |
| 553 | .match_table = mpc512x_psc_spi_of_match, | ||
| 554 | .probe = mpc512x_psc_spi_of_probe, | 553 | .probe = mpc512x_psc_spi_of_probe, |
| 555 | .remove = __exit_p(mpc512x_psc_spi_of_remove), | 554 | .remove = __exit_p(mpc512x_psc_spi_of_remove), |
| 556 | .driver = { | 555 | .driver = { |
| 557 | .name = "mpc512x-psc-spi", | 556 | .name = "mpc512x-psc-spi", |
| 558 | .owner = THIS_MODULE, | 557 | .owner = THIS_MODULE, |
| 558 | .of_match_table = mpc512x_psc_spi_of_match, | ||
| 559 | }, | 559 | }, |
| 560 | }; | 560 | }; |
| 561 | 561 | ||
diff --git a/drivers/spi/spi_ppc4xx.c b/drivers/spi/spi_ppc4xx.c index 19c0b3b34fce..d53466a249d9 100644 --- a/drivers/spi/spi_ppc4xx.c +++ b/drivers/spi/spi_ppc4xx.c | |||
| @@ -397,7 +397,7 @@ static int __init spi_ppc4xx_of_probe(struct of_device *op, | |||
| 397 | struct spi_master *master; | 397 | struct spi_master *master; |
| 398 | struct spi_bitbang *bbp; | 398 | struct spi_bitbang *bbp; |
| 399 | struct resource resource; | 399 | struct resource resource; |
| 400 | struct device_node *np = op->node; | 400 | struct device_node *np = op->dev.of_node; |
| 401 | struct device *dev = &op->dev; | 401 | struct device *dev = &op->dev; |
| 402 | struct device_node *opbnp; | 402 | struct device_node *opbnp; |
| 403 | int ret; | 403 | int ret; |
diff --git a/drivers/usb/gadget/fsl_qe_udc.c b/drivers/usb/gadget/fsl_qe_udc.c index 2928523268b5..82506ca297d5 100644 --- a/drivers/usb/gadget/fsl_qe_udc.c +++ b/drivers/usb/gadget/fsl_qe_udc.c | |||
| @@ -2400,7 +2400,7 @@ EXPORT_SYMBOL(usb_gadget_unregister_driver); | |||
| 2400 | static struct qe_udc __devinit *qe_udc_config(struct of_device *ofdev) | 2400 | static struct qe_udc __devinit *qe_udc_config(struct of_device *ofdev) |
| 2401 | { | 2401 | { |
| 2402 | struct qe_udc *udc; | 2402 | struct qe_udc *udc; |
| 2403 | struct device_node *np = ofdev->node; | 2403 | struct device_node *np = ofdev->dev.of_node; |
| 2404 | unsigned int tmp_addr = 0; | 2404 | unsigned int tmp_addr = 0; |
| 2405 | struct usb_device_para __iomem *usbpram; | 2405 | struct usb_device_para __iomem *usbpram; |
| 2406 | unsigned int i; | 2406 | unsigned int i; |
| @@ -2525,7 +2525,7 @@ static void qe_udc_release(struct device *dev) | |||
| 2525 | static int __devinit qe_udc_probe(struct of_device *ofdev, | 2525 | static int __devinit qe_udc_probe(struct of_device *ofdev, |
| 2526 | const struct of_device_id *match) | 2526 | const struct of_device_id *match) |
| 2527 | { | 2527 | { |
| 2528 | struct device_node *np = ofdev->node; | 2528 | struct device_node *np = ofdev->dev.of_node; |
| 2529 | struct qe_ep *ep; | 2529 | struct qe_ep *ep; |
| 2530 | unsigned int ret = 0; | 2530 | unsigned int ret = 0; |
| 2531 | unsigned int i; | 2531 | unsigned int i; |
diff --git a/drivers/usb/host/ehci-xilinx-of.c b/drivers/usb/host/ehci-xilinx-of.c index 013972bbde57..4899f451add9 100644 --- a/drivers/usb/host/ehci-xilinx-of.c +++ b/drivers/usb/host/ehci-xilinx-of.c | |||
| @@ -151,7 +151,7 @@ static const struct hc_driver ehci_xilinx_of_hc_driver = { | |||
| 151 | static int __devinit | 151 | static int __devinit |
| 152 | ehci_hcd_xilinx_of_probe(struct of_device *op, const struct of_device_id *match) | 152 | ehci_hcd_xilinx_of_probe(struct of_device *op, const struct of_device_id *match) |
| 153 | { | 153 | { |
| 154 | struct device_node *dn = op->node; | 154 | struct device_node *dn = op->dev.of_node; |
| 155 | struct usb_hcd *hcd; | 155 | struct usb_hcd *hcd; |
| 156 | struct ehci_hcd *ehci; | 156 | struct ehci_hcd *ehci; |
| 157 | struct resource res; | 157 | struct resource res; |
diff --git a/drivers/video/aty/mach64_accel.c b/drivers/video/aty/mach64_accel.c index 51fcc0a2c94a..e45833ce975b 100644 --- a/drivers/video/aty/mach64_accel.c +++ b/drivers/video/aty/mach64_accel.c | |||
| @@ -242,7 +242,7 @@ void atyfb_copyarea(struct fb_info *info, const struct fb_copyarea *area) | |||
| 242 | void atyfb_fillrect(struct fb_info *info, const struct fb_fillrect *rect) | 242 | void atyfb_fillrect(struct fb_info *info, const struct fb_fillrect *rect) |
| 243 | { | 243 | { |
| 244 | struct atyfb_par *par = (struct atyfb_par *) info->par; | 244 | struct atyfb_par *par = (struct atyfb_par *) info->par; |
| 245 | u32 color = rect->color, dx = rect->dx, width = rect->width, rotation = 0; | 245 | u32 color, dx = rect->dx, width = rect->width, rotation = 0; |
| 246 | 246 | ||
| 247 | if (par->asleep) | 247 | if (par->asleep) |
| 248 | return; | 248 | return; |
| @@ -253,8 +253,11 @@ void atyfb_fillrect(struct fb_info *info, const struct fb_fillrect *rect) | |||
| 253 | return; | 253 | return; |
| 254 | } | 254 | } |
| 255 | 255 | ||
| 256 | color |= (rect->color << 8); | 256 | if (info->fix.visual == FB_VISUAL_TRUECOLOR || |
| 257 | color |= (rect->color << 16); | 257 | info->fix.visual == FB_VISUAL_DIRECTCOLOR) |
| 258 | color = ((u32 *)(info->pseudo_palette))[rect->color]; | ||
| 259 | else | ||
| 260 | color = rect->color; | ||
| 258 | 261 | ||
| 259 | if (info->var.bits_per_pixel == 24) { | 262 | if (info->var.bits_per_pixel == 24) { |
| 260 | /* In 24 bpp, the engine is in 8 bpp - this requires that all */ | 263 | /* In 24 bpp, the engine is in 8 bpp - this requires that all */ |
diff --git a/drivers/video/bw2.c b/drivers/video/bw2.c index 2c371c07f0da..09f1b9b462f4 100644 --- a/drivers/video/bw2.c +++ b/drivers/video/bw2.c | |||
| @@ -275,7 +275,7 @@ static int __devinit bw2_do_default_mode(struct bw2_par *par, | |||
| 275 | 275 | ||
| 276 | static int __devinit bw2_probe(struct of_device *op, const struct of_device_id *match) | 276 | static int __devinit bw2_probe(struct of_device *op, const struct of_device_id *match) |
| 277 | { | 277 | { |
| 278 | struct device_node *dp = op->node; | 278 | struct device_node *dp = op->dev.of_node; |
| 279 | struct fb_info *info; | 279 | struct fb_info *info; |
| 280 | struct bw2_par *par; | 280 | struct bw2_par *par; |
| 281 | int linebytes, err; | 281 | int linebytes, err; |
diff --git a/drivers/video/cg14.c b/drivers/video/cg14.c index d12e05b6e63f..e5dc2241194f 100644 --- a/drivers/video/cg14.c +++ b/drivers/video/cg14.c | |||
| @@ -465,7 +465,7 @@ static void cg14_unmap_regs(struct of_device *op, struct fb_info *info, | |||
| 465 | 465 | ||
| 466 | static int __devinit cg14_probe(struct of_device *op, const struct of_device_id *match) | 466 | static int __devinit cg14_probe(struct of_device *op, const struct of_device_id *match) |
| 467 | { | 467 | { |
| 468 | struct device_node *dp = op->node; | 468 | struct device_node *dp = op->dev.of_node; |
| 469 | struct fb_info *info; | 469 | struct fb_info *info; |
| 470 | struct cg14_par *par; | 470 | struct cg14_par *par; |
| 471 | int is_8mb, linebytes, i, err; | 471 | int is_8mb, linebytes, i, err; |
diff --git a/drivers/video/cg3.c b/drivers/video/cg3.c index b98f93f7f663..558d73a948a0 100644 --- a/drivers/video/cg3.c +++ b/drivers/video/cg3.c | |||
| @@ -349,7 +349,7 @@ static int __devinit cg3_do_default_mode(struct cg3_par *par) | |||
| 349 | static int __devinit cg3_probe(struct of_device *op, | 349 | static int __devinit cg3_probe(struct of_device *op, |
| 350 | const struct of_device_id *match) | 350 | const struct of_device_id *match) |
| 351 | { | 351 | { |
| 352 | struct device_node *dp = op->node; | 352 | struct device_node *dp = op->dev.of_node; |
| 353 | struct fb_info *info; | 353 | struct fb_info *info; |
| 354 | struct cg3_par *par; | 354 | struct cg3_par *par; |
| 355 | int linebytes, err; | 355 | int linebytes, err; |
diff --git a/drivers/video/leo.c b/drivers/video/leo.c index 3d7895316eaf..9e8bf7d5e249 100644 --- a/drivers/video/leo.c +++ b/drivers/video/leo.c | |||
| @@ -550,7 +550,7 @@ static void leo_unmap_regs(struct of_device *op, struct fb_info *info, | |||
| 550 | static int __devinit leo_probe(struct of_device *op, | 550 | static int __devinit leo_probe(struct of_device *op, |
| 551 | const struct of_device_id *match) | 551 | const struct of_device_id *match) |
| 552 | { | 552 | { |
| 553 | struct device_node *dp = op->node; | 553 | struct device_node *dp = op->dev.of_node; |
| 554 | struct fb_info *info; | 554 | struct fb_info *info; |
| 555 | struct leo_par *par; | 555 | struct leo_par *par; |
| 556 | int linebytes, err; | 556 | int linebytes, err; |
diff --git a/drivers/video/mb862xx/mb862xxfb.c b/drivers/video/mb862xx/mb862xxfb.c index 0540de4f5cb4..4e2b8cc3d460 100644 --- a/drivers/video/mb862xx/mb862xxfb.c +++ b/drivers/video/mb862xx/mb862xxfb.c | |||
| @@ -553,7 +553,7 @@ static int mb862xx_gdc_init(struct mb862xxfb_par *par) | |||
| 553 | static int __devinit of_platform_mb862xx_probe(struct of_device *ofdev, | 553 | static int __devinit of_platform_mb862xx_probe(struct of_device *ofdev, |
| 554 | const struct of_device_id *id) | 554 | const struct of_device_id *id) |
| 555 | { | 555 | { |
| 556 | struct device_node *np = ofdev->node; | 556 | struct device_node *np = ofdev->dev.of_node; |
| 557 | struct device *dev = &ofdev->dev; | 557 | struct device *dev = &ofdev->dev; |
| 558 | struct mb862xxfb_par *par; | 558 | struct mb862xxfb_par *par; |
| 559 | struct fb_info *info; | 559 | struct fb_info *info; |
diff --git a/drivers/video/p9100.c b/drivers/video/p9100.c index c85dd408a9b8..6552751e81aa 100644 --- a/drivers/video/p9100.c +++ b/drivers/video/p9100.c | |||
| @@ -251,7 +251,7 @@ static void p9100_init_fix(struct fb_info *info, int linebytes, struct device_no | |||
| 251 | 251 | ||
| 252 | static int __devinit p9100_probe(struct of_device *op, const struct of_device_id *match) | 252 | static int __devinit p9100_probe(struct of_device *op, const struct of_device_id *match) |
| 253 | { | 253 | { |
| 254 | struct device_node *dp = op->node; | 254 | struct device_node *dp = op->dev.of_node; |
| 255 | struct fb_info *info; | 255 | struct fb_info *info; |
| 256 | struct p9100_par *par; | 256 | struct p9100_par *par; |
| 257 | int linebytes, err; | 257 | int linebytes, err; |
diff --git a/drivers/video/tcx.c b/drivers/video/tcx.c index ef7a7bd8b503..cc039b33d2d8 100644 --- a/drivers/video/tcx.c +++ b/drivers/video/tcx.c | |||
| @@ -365,7 +365,7 @@ static void tcx_unmap_regs(struct of_device *op, struct fb_info *info, | |||
| 365 | static int __devinit tcx_probe(struct of_device *op, | 365 | static int __devinit tcx_probe(struct of_device *op, |
| 366 | const struct of_device_id *match) | 366 | const struct of_device_id *match) |
| 367 | { | 367 | { |
| 368 | struct device_node *dp = op->node; | 368 | struct device_node *dp = op->dev.of_node; |
| 369 | struct fb_info *info; | 369 | struct fb_info *info; |
| 370 | struct tcx_par *par; | 370 | struct tcx_par *par; |
| 371 | int linebytes, i, err; | 371 | int linebytes, i, err; |
diff --git a/drivers/watchdog/gef_wdt.c b/drivers/watchdog/gef_wdt.c index ca0f4c6cf5ab..1df284f9c2a1 100644 --- a/drivers/watchdog/gef_wdt.c +++ b/drivers/watchdog/gef_wdt.c | |||
| @@ -273,7 +273,7 @@ static int __devinit gef_wdt_probe(struct of_device *dev, | |||
| 273 | bus_clk = freq; | 273 | bus_clk = freq; |
| 274 | 274 | ||
| 275 | /* Map devices registers into memory */ | 275 | /* Map devices registers into memory */ |
| 276 | gef_wdt_regs = of_iomap(dev->node, 0); | 276 | gef_wdt_regs = of_iomap(dev->dev.of_node, 0); |
| 277 | if (gef_wdt_regs == NULL) | 277 | if (gef_wdt_regs == NULL) |
| 278 | return -ENOMEM; | 278 | return -ENOMEM; |
| 279 | 279 | ||
diff --git a/drivers/watchdog/mpc8xxx_wdt.c b/drivers/watchdog/mpc8xxx_wdt.c index 6622335773bb..4cda64dd309c 100644 --- a/drivers/watchdog/mpc8xxx_wdt.c +++ b/drivers/watchdog/mpc8xxx_wdt.c | |||
| @@ -189,7 +189,7 @@ static int __devinit mpc8xxx_wdt_probe(struct of_device *ofdev, | |||
| 189 | const struct of_device_id *match) | 189 | const struct of_device_id *match) |
| 190 | { | 190 | { |
| 191 | int ret; | 191 | int ret; |
| 192 | struct device_node *np = ofdev->node; | 192 | struct device_node *np = ofdev->dev.of_node; |
| 193 | struct mpc8xxx_wdt_type *wdt_type = match->data; | 193 | struct mpc8xxx_wdt_type *wdt_type = match->data; |
| 194 | u32 freq = fsl_get_sys_freq(); | 194 | u32 freq = fsl_get_sys_freq(); |
| 195 | bool enabled; | 195 | bool enabled; |
diff --git a/fs/cifs/file.c b/fs/cifs/file.c index f1ff785b2292..75541af4b3db 100644 --- a/fs/cifs/file.c +++ b/fs/cifs/file.c | |||
| @@ -1952,6 +1952,7 @@ static void cifs_copy_cache_pages(struct address_space *mapping, | |||
| 1952 | bytes_read -= PAGE_CACHE_SIZE; | 1952 | bytes_read -= PAGE_CACHE_SIZE; |
| 1953 | continue; | 1953 | continue; |
| 1954 | } | 1954 | } |
| 1955 | page_cache_release(page); | ||
| 1955 | 1956 | ||
| 1956 | target = kmap_atomic(page, KM_USER0); | 1957 | target = kmap_atomic(page, KM_USER0); |
| 1957 | 1958 | ||
diff --git a/include/drm/drm_crtc_helper.h b/include/drm/drm_crtc_helper.h index dc5873c21e45..1121f7799c6f 100644 --- a/include/drm/drm_crtc_helper.h +++ b/include/drm/drm_crtc_helper.h | |||
| @@ -130,4 +130,7 @@ extern int drm_helper_resume_force_mode(struct drm_device *dev); | |||
| 130 | extern void drm_kms_helper_poll_init(struct drm_device *dev); | 130 | extern void drm_kms_helper_poll_init(struct drm_device *dev); |
| 131 | extern void drm_kms_helper_poll_fini(struct drm_device *dev); | 131 | extern void drm_kms_helper_poll_fini(struct drm_device *dev); |
| 132 | extern void drm_helper_hpd_irq_event(struct drm_device *dev); | 132 | extern void drm_helper_hpd_irq_event(struct drm_device *dev); |
| 133 | |||
| 134 | extern void drm_kms_helper_poll_disable(struct drm_device *dev); | ||
| 135 | extern void drm_kms_helper_poll_enable(struct drm_device *dev); | ||
| 133 | #endif | 136 | #endif |
diff --git a/include/drm/nouveau_drm.h b/include/drm/nouveau_drm.h index a6a9f4af5ebd..fe917dee723a 100644 --- a/include/drm/nouveau_drm.h +++ b/include/drm/nouveau_drm.h | |||
| @@ -79,6 +79,7 @@ struct drm_nouveau_gpuobj_free { | |||
| 79 | #define NOUVEAU_GETPARAM_CHIPSET_ID 11 | 79 | #define NOUVEAU_GETPARAM_CHIPSET_ID 11 |
| 80 | #define NOUVEAU_GETPARAM_VM_VRAM_BASE 12 | 80 | #define NOUVEAU_GETPARAM_VM_VRAM_BASE 12 |
| 81 | #define NOUVEAU_GETPARAM_GRAPH_UNITS 13 | 81 | #define NOUVEAU_GETPARAM_GRAPH_UNITS 13 |
| 82 | #define NOUVEAU_GETPARAM_PTIMER_TIME 14 | ||
| 82 | struct drm_nouveau_getparam { | 83 | struct drm_nouveau_getparam { |
| 83 | uint64_t param; | 84 | uint64_t param; |
| 84 | uint64_t value; | 85 | uint64_t value; |
diff --git a/include/drm/vmwgfx_drm.h b/include/drm/vmwgfx_drm.h index c7645f480d12..4d0842391edc 100644 --- a/include/drm/vmwgfx_drm.h +++ b/include/drm/vmwgfx_drm.h | |||
| @@ -50,6 +50,8 @@ | |||
| 50 | #define DRM_VMW_EXECBUF 12 | 50 | #define DRM_VMW_EXECBUF 12 |
| 51 | #define DRM_VMW_FIFO_DEBUG 13 | 51 | #define DRM_VMW_FIFO_DEBUG 13 |
| 52 | #define DRM_VMW_FENCE_WAIT 14 | 52 | #define DRM_VMW_FENCE_WAIT 14 |
| 53 | /* guarded by minor version >= 2 */ | ||
| 54 | #define DRM_VMW_UPDATE_LAYOUT 15 | ||
| 53 | 55 | ||
| 54 | 56 | ||
| 55 | /*************************************************************************/ | 57 | /*************************************************************************/ |
| @@ -585,4 +587,28 @@ struct drm_vmw_stream_arg { | |||
| 585 | * sure that the stream has been stopped. | 587 | * sure that the stream has been stopped. |
| 586 | */ | 588 | */ |
| 587 | 589 | ||
| 590 | /*************************************************************************/ | ||
| 591 | /** | ||
| 592 | * DRM_VMW_UPDATE_LAYOUT - Update layout | ||
| 593 | * | ||
| 594 | * Updates the prefered modes and connection status for connectors. The | ||
| 595 | * command conisits of one drm_vmw_update_layout_arg pointing out a array | ||
| 596 | * of num_outputs drm_vmw_rect's. | ||
| 597 | */ | ||
| 598 | |||
| 599 | /** | ||
| 600 | * struct drm_vmw_update_layout_arg | ||
| 601 | * | ||
| 602 | * @num_outputs: number of active | ||
| 603 | * @rects: pointer to array of drm_vmw_rect | ||
| 604 | * | ||
| 605 | * Input argument to the DRM_VMW_UPDATE_LAYOUT Ioctl. | ||
| 606 | */ | ||
| 607 | |||
| 608 | struct drm_vmw_update_layout_arg { | ||
| 609 | uint32_t num_outputs; | ||
| 610 | uint32_t pad64; | ||
| 611 | uint64_t rects; | ||
| 612 | }; | ||
| 613 | |||
| 588 | #endif | 614 | #endif |
diff --git a/include/linux/vgaarb.h b/include/linux/vgaarb.h index 2dfaa293ae8c..c9a975976995 100644 --- a/include/linux/vgaarb.h +++ b/include/linux/vgaarb.h | |||
| @@ -5,6 +5,27 @@ | |||
| 5 | * (C) Copyright 2005 Benjamin Herrenschmidt <benh@kernel.crashing.org> | 5 | * (C) Copyright 2005 Benjamin Herrenschmidt <benh@kernel.crashing.org> |
| 6 | * (C) Copyright 2007 Paulo R. Zanoni <przanoni@gmail.com> | 6 | * (C) Copyright 2007 Paulo R. Zanoni <przanoni@gmail.com> |
| 7 | * (C) Copyright 2007, 2009 Tiago Vignatti <vignatti@freedesktop.org> | 7 | * (C) Copyright 2007, 2009 Tiago Vignatti <vignatti@freedesktop.org> |
| 8 | * | ||
| 9 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
| 10 | * copy of this software and associated documentation files (the "Software"), | ||
| 11 | * to deal in the Software without restriction, including without limitation | ||
| 12 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
| 13 | * and/or sell copies of the Software, and to permit persons to whom the | ||
| 14 | * Software is furnished to do so, subject to the following conditions: | ||
| 15 | * | ||
| 16 | * The above copyright notice and this permission notice (including the next | ||
| 17 | * paragraph) shall be included in all copies or substantial portions of the | ||
| 18 | * Software. | ||
| 19 | * | ||
| 20 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
| 21 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
| 22 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
| 23 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | ||
| 24 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | ||
| 25 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER | ||
| 26 | * DEALINGS | ||
| 27 | * IN THE SOFTWARE. | ||
| 28 | * | ||
| 8 | */ | 29 | */ |
| 9 | 30 | ||
| 10 | #ifndef LINUX_VGA_H | 31 | #ifndef LINUX_VGA_H |
