diff options
Diffstat (limited to 'drivers/video/tegra/host')
59 files changed, 11427 insertions, 0 deletions
diff --git a/drivers/video/tegra/host/Makefile b/drivers/video/tegra/host/Makefile new file mode 100644 index 00000000000..0180885af4d --- /dev/null +++ b/drivers/video/tegra/host/Makefile | |||
| @@ -0,0 +1,23 @@ | |||
| 1 | GCOV_PROFILE := y | ||
| 2 | nvhost-objs = \ | ||
| 3 | nvhost_acm.o \ | ||
| 4 | nvhost_syncpt.o \ | ||
| 5 | nvhost_cdma.o \ | ||
| 6 | nvhost_intr.o \ | ||
| 7 | nvhost_channel.o \ | ||
| 8 | nvhost_job.o \ | ||
| 9 | bus.o \ | ||
| 10 | dev.o \ | ||
| 11 | debug.o \ | ||
| 12 | bus_client.o | ||
| 13 | |||
| 14 | obj-$(CONFIG_TEGRA_GRHOST) += mpe/ | ||
| 15 | obj-$(CONFIG_TEGRA_GRHOST) += gr3d/ | ||
| 16 | obj-$(CONFIG_TEGRA_GRHOST) += host1x/ | ||
| 17 | obj-$(CONFIG_TEGRA_GRHOST) += t20/ | ||
| 18 | obj-$(CONFIG_TEGRA_GRHOST) += t30/ | ||
| 19 | obj-$(CONFIG_TEGRA_GRHOST) += dsi/ | ||
| 20 | obj-$(CONFIG_TEGRA_GRHOST) += gr2d/ | ||
| 21 | obj-$(CONFIG_TEGRA_GRHOST) += isp/ | ||
| 22 | obj-$(CONFIG_TEGRA_GRHOST) += vi/ | ||
| 23 | obj-$(CONFIG_TEGRA_GRHOST) += nvhost.o | ||
diff --git a/drivers/video/tegra/host/bus.c b/drivers/video/tegra/host/bus.c new file mode 100644 index 00000000000..774aac7bd43 --- /dev/null +++ b/drivers/video/tegra/host/bus.c | |||
| @@ -0,0 +1,569 @@ | |||
| 1 | /* | ||
| 2 | * drivers/video/tegra/host/bus.c | ||
| 3 | * | ||
| 4 | * Copyright (C) 2010 Google, Inc. | ||
| 5 | * Author: Erik Gilling <konkers@google.com> | ||
| 6 | * | ||
| 7 | * Copyright (C) 2010-2012 NVIDIA Corporation | ||
| 8 | * | ||
| 9 | * This software is licensed under the terms of the GNU General Public | ||
| 10 | * License version 2, as published by the Free Software Foundation, and | ||
| 11 | * may be copied, distributed, and modified under those terms. | ||
| 12 | * | ||
| 13 | * This program is distributed in the hope that it will be useful, | ||
| 14 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 15 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
| 16 | * GNU General Public License for more details. | ||
| 17 | * | ||
| 18 | */ | ||
| 19 | |||
| 20 | #include <linux/pm_runtime.h> | ||
| 21 | #include <linux/nvhost.h> | ||
| 22 | |||
| 23 | #include "dev.h" | ||
| 24 | |||
| 25 | struct nvhost_master *nvhost; | ||
| 26 | |||
| 27 | struct resource *nvhost_get_resource(struct nvhost_device *dev, | ||
| 28 | unsigned int type, unsigned int num) | ||
| 29 | { | ||
| 30 | int i; | ||
| 31 | |||
| 32 | for (i = 0; i < dev->num_resources; i++) { | ||
| 33 | struct resource *r = &dev->resource[i]; | ||
| 34 | |||
| 35 | if (type == resource_type(r) && num-- == 0) | ||
| 36 | return r; | ||
| 37 | } | ||
| 38 | return NULL; | ||
| 39 | } | ||
| 40 | EXPORT_SYMBOL_GPL(nvhost_get_resource); | ||
| 41 | |||
| 42 | int nvhost_get_irq(struct nvhost_device *dev, unsigned int num) | ||
| 43 | { | ||
| 44 | struct resource *r = nvhost_get_resource(dev, IORESOURCE_IRQ, num); | ||
| 45 | |||
| 46 | return r ? r->start : -ENXIO; | ||
| 47 | } | ||
| 48 | EXPORT_SYMBOL_GPL(nvhost_get_irq); | ||
| 49 | |||
| 50 | struct resource *nvhost_get_resource_byname(struct nvhost_device *dev, | ||
| 51 | unsigned int type, | ||
| 52 | const char *name) | ||
| 53 | { | ||
| 54 | int i; | ||
| 55 | |||
| 56 | for (i = 0; i < dev->num_resources; i++) { | ||
| 57 | struct resource *r = &dev->resource[i]; | ||
| 58 | |||
| 59 | if (type == resource_type(r) && !strcmp(r->name, name)) | ||
| 60 | return r; | ||
| 61 | } | ||
| 62 | return NULL; | ||
| 63 | } | ||
| 64 | EXPORT_SYMBOL_GPL(nvhost_get_resource_byname); | ||
| 65 | |||
| 66 | int nvhost_get_irq_byname(struct nvhost_device *dev, const char *name) | ||
| 67 | { | ||
| 68 | struct resource *r = nvhost_get_resource_byname(dev, IORESOURCE_IRQ, | ||
| 69 | name); | ||
| 70 | |||
| 71 | return r ? r->start : -ENXIO; | ||
| 72 | } | ||
| 73 | EXPORT_SYMBOL_GPL(nvhost_get_irq_byname); | ||
| 74 | |||
| 75 | static int nvhost_drv_probe(struct device *_dev) | ||
| 76 | { | ||
| 77 | struct nvhost_driver *drv = to_nvhost_driver(_dev->driver); | ||
| 78 | struct nvhost_device *dev = to_nvhost_device(_dev); | ||
| 79 | |||
| 80 | return drv->probe(dev); | ||
| 81 | } | ||
| 82 | |||
| 83 | static int nvhost_drv_remove(struct device *_dev) | ||
| 84 | { | ||
| 85 | struct nvhost_driver *drv = to_nvhost_driver(_dev->driver); | ||
| 86 | struct nvhost_device *dev = to_nvhost_device(_dev); | ||
| 87 | |||
| 88 | return drv->remove(dev); | ||
| 89 | } | ||
| 90 | |||
| 91 | static void nvhost_drv_shutdown(struct device *_dev) | ||
| 92 | { | ||
| 93 | struct nvhost_driver *drv = to_nvhost_driver(_dev->driver); | ||
| 94 | struct nvhost_device *dev = to_nvhost_device(_dev); | ||
| 95 | |||
| 96 | drv->shutdown(dev); | ||
| 97 | } | ||
| 98 | |||
| 99 | int nvhost_driver_register(struct nvhost_driver *drv) | ||
| 100 | { | ||
| 101 | drv->driver.bus = &nvhost_bus_type; | ||
| 102 | if (drv->probe) | ||
| 103 | drv->driver.probe = nvhost_drv_probe; | ||
| 104 | if (drv->remove) | ||
| 105 | drv->driver.remove = nvhost_drv_remove; | ||
| 106 | if (drv->shutdown) | ||
| 107 | drv->driver.shutdown = nvhost_drv_shutdown; | ||
| 108 | |||
| 109 | return driver_register(&drv->driver); | ||
| 110 | } | ||
| 111 | EXPORT_SYMBOL(nvhost_driver_register); | ||
| 112 | |||
| 113 | void nvhost_driver_unregister(struct nvhost_driver *drv) | ||
| 114 | { | ||
| 115 | driver_unregister(&drv->driver); | ||
| 116 | } | ||
| 117 | EXPORT_SYMBOL_GPL(nvhost_driver_unregister); | ||
| 118 | |||
| 119 | int nvhost_device_register(struct nvhost_device *dev) | ||
| 120 | { | ||
| 121 | int i, ret = 0; | ||
| 122 | |||
| 123 | if (!dev) | ||
| 124 | return -EINVAL; | ||
| 125 | |||
| 126 | device_initialize(&dev->dev); | ||
| 127 | |||
| 128 | /* If the dev does not have a parent, assign host1x as parent */ | ||
| 129 | if (!dev->dev.parent && nvhost && nvhost->dev != dev) | ||
| 130 | dev->dev.parent = &nvhost->dev->dev; | ||
| 131 | |||
| 132 | dev->dev.bus = &nvhost_bus_type; | ||
| 133 | |||
| 134 | if (dev->id != -1) | ||
| 135 | dev_set_name(&dev->dev, "%s.%d", dev->name, dev->id); | ||
| 136 | else | ||
| 137 | dev_set_name(&dev->dev, "%s", dev->name); | ||
| 138 | |||
| 139 | for (i = 0; i < dev->num_resources; i++) { | ||
| 140 | struct resource *p, *r = &dev->resource[i]; | ||
| 141 | |||
| 142 | if (r->name == NULL) | ||
| 143 | r->name = dev_name(&dev->dev); | ||
| 144 | |||
| 145 | p = r->parent; | ||
| 146 | if (!p) { | ||
| 147 | if (resource_type(r) == IORESOURCE_MEM) | ||
| 148 | p = &iomem_resource; | ||
| 149 | else if (resource_type(r) == IORESOURCE_IO) | ||
| 150 | p = &ioport_resource; | ||
| 151 | } | ||
| 152 | |||
| 153 | if (p && insert_resource(p, r)) { | ||
| 154 | pr_err("%s: failed to claim resource %d\n", | ||
| 155 | dev_name(&dev->dev), i); | ||
| 156 | ret = -EBUSY; | ||
| 157 | goto failed; | ||
| 158 | } | ||
| 159 | } | ||
| 160 | |||
| 161 | ret = device_add(&dev->dev); | ||
| 162 | if (ret == 0) | ||
| 163 | return ret; | ||
| 164 | |||
| 165 | failed: | ||
| 166 | while (--i >= 0) { | ||
| 167 | struct resource *r = &dev->resource[i]; | ||
| 168 | unsigned long type = resource_type(r); | ||
| 169 | |||
| 170 | if (type == IORESOURCE_MEM || type == IORESOURCE_IO) | ||
| 171 | release_resource(r); | ||
| 172 | } | ||
| 173 | |||
| 174 | return ret; | ||
| 175 | } | ||
| 176 | EXPORT_SYMBOL_GPL(nvhost_device_register); | ||
| 177 | |||
| 178 | void nvhost_device_unregister(struct nvhost_device *dev) | ||
| 179 | { | ||
| 180 | int i; | ||
| 181 | if (dev) { | ||
| 182 | device_del(&dev->dev); | ||
| 183 | |||
| 184 | for (i = 0; i < dev->num_resources; i++) { | ||
| 185 | struct resource *r = &dev->resource[i]; | ||
| 186 | unsigned long type = resource_type(r); | ||
| 187 | |||
| 188 | if (type == IORESOURCE_MEM || type == IORESOURCE_IO) | ||
| 189 | release_resource(r); | ||
| 190 | } | ||
| 191 | |||
| 192 | put_device(&dev->dev); | ||
| 193 | } | ||
| 194 | } | ||
| 195 | EXPORT_SYMBOL_GPL(nvhost_device_unregister); | ||
| 196 | |||
| 197 | static int nvhost_bus_match(struct device *_dev, struct device_driver *drv) | ||
| 198 | { | ||
| 199 | struct nvhost_device *dev = to_nvhost_device(_dev); | ||
| 200 | |||
| 201 | return !strncmp(dev->name, drv->name, strlen(drv->name)); | ||
| 202 | } | ||
| 203 | |||
| 204 | #ifdef CONFIG_PM_SLEEP | ||
| 205 | |||
| 206 | static int nvhost_legacy_suspend(struct device *dev, pm_message_t mesg) | ||
| 207 | { | ||
| 208 | struct nvhost_driver *pdrv = to_nvhost_driver(dev->driver); | ||
| 209 | struct nvhost_device *pdev = to_nvhost_device(dev); | ||
| 210 | int ret = 0; | ||
| 211 | |||
| 212 | if (dev->driver && pdrv->suspend) | ||
| 213 | ret = pdrv->suspend(pdev, mesg); | ||
| 214 | |||
| 215 | return ret; | ||
| 216 | } | ||
| 217 | |||
| 218 | static int nvhost_legacy_resume(struct device *dev) | ||
| 219 | { | ||
| 220 | struct nvhost_driver *pdrv = to_nvhost_driver(dev->driver); | ||
| 221 | struct nvhost_device *pdev = to_nvhost_device(dev); | ||
| 222 | int ret = 0; | ||
| 223 | |||
| 224 | if (dev->driver && pdrv->resume) | ||
| 225 | ret = pdrv->resume(pdev); | ||
| 226 | |||
| 227 | return ret; | ||
| 228 | } | ||
| 229 | |||
| 230 | static int nvhost_pm_prepare(struct device *dev) | ||
| 231 | { | ||
| 232 | struct device_driver *drv = dev->driver; | ||
| 233 | int ret = 0; | ||
| 234 | |||
| 235 | if (drv && drv->pm && drv->pm->prepare) | ||
| 236 | ret = drv->pm->prepare(dev); | ||
| 237 | |||
| 238 | return ret; | ||
| 239 | } | ||
| 240 | |||
| 241 | static void nvhost_pm_complete(struct device *dev) | ||
| 242 | { | ||
| 243 | struct device_driver *drv = dev->driver; | ||
| 244 | |||
| 245 | if (drv && drv->pm && drv->pm->complete) | ||
| 246 | drv->pm->complete(dev); | ||
| 247 | } | ||
| 248 | |||
| 249 | #else /* !CONFIG_PM_SLEEP */ | ||
| 250 | |||
| 251 | #define nvhost_pm_prepare NULL | ||
| 252 | #define nvhost_pm_complete NULL | ||
| 253 | |||
| 254 | #endif /* !CONFIG_PM_SLEEP */ | ||
| 255 | |||
| 256 | #ifdef CONFIG_SUSPEND | ||
| 257 | |||
| 258 | int __weak nvhost_pm_suspend(struct device *dev) | ||
| 259 | { | ||
| 260 | struct device_driver *drv = dev->driver; | ||
| 261 | int ret = 0; | ||
| 262 | |||
| 263 | if (!drv) | ||
| 264 | return 0; | ||
| 265 | |||
| 266 | if (drv->pm) { | ||
| 267 | if (drv->pm->suspend) | ||
| 268 | ret = drv->pm->suspend(dev); | ||
| 269 | } else { | ||
| 270 | ret = nvhost_legacy_suspend(dev, PMSG_SUSPEND); | ||
| 271 | } | ||
| 272 | |||
| 273 | return ret; | ||
| 274 | } | ||
| 275 | |||
| 276 | int __weak nvhost_pm_suspend_noirq(struct device *dev) | ||
| 277 | { | ||
| 278 | struct device_driver *drv = dev->driver; | ||
| 279 | int ret = 0; | ||
| 280 | |||
| 281 | if (!drv) | ||
| 282 | return 0; | ||
| 283 | |||
| 284 | if (drv->pm) { | ||
| 285 | if (drv->pm->suspend_noirq) | ||
| 286 | ret = drv->pm->suspend_noirq(dev); | ||
| 287 | } | ||
| 288 | |||
| 289 | return ret; | ||
| 290 | } | ||
| 291 | |||
| 292 | int __weak nvhost_pm_resume(struct device *dev) | ||
| 293 | { | ||
| 294 | struct device_driver *drv = dev->driver; | ||
| 295 | int ret = 0; | ||
| 296 | |||
| 297 | if (!drv) | ||
| 298 | return 0; | ||
| 299 | |||
| 300 | if (drv->pm) { | ||
| 301 | if (drv->pm->resume) | ||
| 302 | ret = drv->pm->resume(dev); | ||
| 303 | } else { | ||
| 304 | ret = nvhost_legacy_resume(dev); | ||
| 305 | } | ||
| 306 | |||
| 307 | return ret; | ||
| 308 | } | ||
| 309 | |||
| 310 | int __weak nvhost_pm_resume_noirq(struct device *dev) | ||
| 311 | { | ||
| 312 | struct device_driver *drv = dev->driver; | ||
| 313 | int ret = 0; | ||
| 314 | |||
| 315 | if (!drv) | ||
| 316 | return 0; | ||
| 317 | |||
| 318 | if (drv->pm) { | ||
| 319 | if (drv->pm->resume_noirq) | ||
| 320 | ret = drv->pm->resume_noirq(dev); | ||
| 321 | } | ||
| 322 | |||
| 323 | return ret; | ||
| 324 | } | ||
| 325 | |||
| 326 | #else /* !CONFIG_SUSPEND */ | ||
| 327 | |||
| 328 | #define nvhost_pm_suspend NULL | ||
| 329 | #define nvhost_pm_resume NULL | ||
| 330 | #define nvhost_pm_suspend_noirq NULL | ||
| 331 | #define nvhost_pm_resume_noirq NULL | ||
| 332 | |||
| 333 | #endif /* !CONFIG_SUSPEND */ | ||
| 334 | |||
| 335 | #ifdef CONFIG_HIBERNATION | ||
| 336 | |||
| 337 | static int nvhost_pm_freeze(struct device *dev) | ||
| 338 | { | ||
| 339 | struct device_driver *drv = dev->driver; | ||
| 340 | int ret = 0; | ||
| 341 | |||
| 342 | if (!drv) | ||
| 343 | return 0; | ||
| 344 | |||
| 345 | if (drv->pm) { | ||
| 346 | if (drv->pm->freeze) | ||
| 347 | ret = drv->pm->freeze(dev); | ||
| 348 | } else { | ||
| 349 | ret = nvhost_legacy_suspend(dev, PMSG_FREEZE); | ||
| 350 | } | ||
| 351 | |||
| 352 | return ret; | ||
| 353 | } | ||
| 354 | |||
| 355 | static int nvhost_pm_freeze_noirq(struct device *dev) | ||
| 356 | { | ||
| 357 | struct device_driver *drv = dev->driver; | ||
| 358 | int ret = 0; | ||
| 359 | |||
| 360 | if (!drv) | ||
| 361 | return 0; | ||
| 362 | |||
| 363 | if (drv->pm) { | ||
| 364 | if (drv->pm->freeze_noirq) | ||
| 365 | ret = drv->pm->freeze_noirq(dev); | ||
| 366 | } | ||
| 367 | |||
| 368 | return ret; | ||
| 369 | } | ||
| 370 | |||
| 371 | static int nvhost_pm_thaw(struct device *dev) | ||
| 372 | { | ||
| 373 | struct device_driver *drv = dev->driver; | ||
| 374 | int ret = 0; | ||
| 375 | |||
| 376 | if (!drv) | ||
| 377 | return 0; | ||
| 378 | |||
| 379 | if (drv->pm) { | ||
| 380 | if (drv->pm->thaw) | ||
| 381 | ret = drv->pm->thaw(dev); | ||
| 382 | } else { | ||
| 383 | ret = nvhost_legacy_resume(dev); | ||
| 384 | } | ||
| 385 | |||
| 386 | return ret; | ||
| 387 | } | ||
| 388 | |||
| 389 | static int nvhost_pm_thaw_noirq(struct device *dev) | ||
| 390 | { | ||
| 391 | struct device_driver *drv = dev->driver; | ||
| 392 | int ret = 0; | ||
| 393 | |||
| 394 | if (!drv) | ||
| 395 | return 0; | ||
| 396 | |||
| 397 | if (drv->pm) { | ||
| 398 | if (drv->pm->thaw_noirq) | ||
| 399 | ret = drv->pm->thaw_noirq(dev); | ||
| 400 | } | ||
| 401 | |||
| 402 | return ret; | ||
| 403 | } | ||
| 404 | |||
| 405 | static int nvhost_pm_poweroff(struct device *dev) | ||
| 406 | { | ||
| 407 | struct device_driver *drv = dev->driver; | ||
| 408 | int ret = 0; | ||
| 409 | |||
| 410 | if (!drv) | ||
| 411 | return 0; | ||
| 412 | |||
| 413 | if (drv->pm) { | ||
| 414 | if (drv->pm->poweroff) | ||
| 415 | ret = drv->pm->poweroff(dev); | ||
| 416 | } else { | ||
| 417 | ret = nvhost_legacy_suspend(dev, PMSG_HIBERNATE); | ||
| 418 | } | ||
| 419 | |||
| 420 | return ret; | ||
| 421 | } | ||
| 422 | |||
| 423 | static int nvhost_pm_poweroff_noirq(struct device *dev) | ||
| 424 | { | ||
| 425 | struct device_driver *drv = dev->driver; | ||
| 426 | int ret = 0; | ||
| 427 | |||
| 428 | if (!drv) | ||
| 429 | return 0; | ||
| 430 | |||
| 431 | if (drv->pm) { | ||
| 432 | if (drv->pm->poweroff_noirq) | ||
| 433 | ret = drv->pm->poweroff_noirq(dev); | ||
| 434 | } | ||
| 435 | |||
| 436 | return ret; | ||
| 437 | } | ||
| 438 | |||
| 439 | static int nvhost_pm_restore(struct device *dev) | ||
| 440 | { | ||
| 441 | struct device_driver *drv = dev->driver; | ||
| 442 | int ret = 0; | ||
| 443 | |||
| 444 | if (!drv) | ||
| 445 | return 0; | ||
| 446 | |||
| 447 | if (drv->pm) { | ||
| 448 | if (drv->pm->restore) | ||
| 449 | ret = drv->pm->restore(dev); | ||
| 450 | } else { | ||
| 451 | ret = nvhost_legacy_resume(dev); | ||
| 452 | } | ||
| 453 | |||
| 454 | return ret; | ||
| 455 | } | ||
| 456 | |||
| 457 | static int nvhost_pm_restore_noirq(struct device *dev) | ||
| 458 | { | ||
| 459 | struct device_driver *drv = dev->driver; | ||
| 460 | int ret = 0; | ||
| 461 | |||
| 462 | if (!drv) | ||
| 463 | return 0; | ||
| 464 | |||
| 465 | if (drv->pm) { | ||
| 466 | if (drv->pm->restore_noirq) | ||
| 467 | ret = drv->pm->restore_noirq(dev); | ||
| 468 | } | ||
| 469 | |||
| 470 | return ret; | ||
| 471 | } | ||
| 472 | |||
| 473 | #else /* !CONFIG_HIBERNATION */ | ||
| 474 | |||
| 475 | #define nvhost_pm_freeze NULL | ||
| 476 | #define nvhost_pm_thaw NULL | ||
| 477 | #define nvhost_pm_poweroff NULL | ||
| 478 | #define nvhost_pm_restore NULL | ||
| 479 | #define nvhost_pm_freeze_noirq NULL | ||
| 480 | #define nvhost_pm_thaw_noirq NULL | ||
| 481 | #define nvhost_pm_poweroff_noirq NULL | ||
| 482 | #define nvhost_pm_restore_noirq NULL | ||
| 483 | |||
| 484 | #endif /* !CONFIG_HIBERNATION */ | ||
| 485 | |||
| 486 | #ifdef CONFIG_PM_RUNTIME | ||
| 487 | |||
| 488 | int __weak nvhost_pm_runtime_suspend(struct device *dev) | ||
| 489 | { | ||
| 490 | return pm_generic_runtime_suspend(dev); | ||
| 491 | }; | ||
| 492 | |||
| 493 | int __weak nvhost_pm_runtime_resume(struct device *dev) | ||
| 494 | { | ||
| 495 | return pm_generic_runtime_resume(dev); | ||
| 496 | }; | ||
| 497 | |||
| 498 | int __weak nvhost_pm_runtime_idle(struct device *dev) | ||
| 499 | { | ||
| 500 | return pm_generic_runtime_idle(dev); | ||
| 501 | }; | ||
| 502 | |||
| 503 | #else /* !CONFIG_PM_RUNTIME */ | ||
| 504 | |||
| 505 | #define nvhost_pm_runtime_suspend NULL | ||
| 506 | #define nvhost_pm_runtime_resume NULL | ||
| 507 | #define nvhost_pm_runtime_idle NULL | ||
| 508 | |||
| 509 | #endif /* !CONFIG_PM_RUNTIME */ | ||
| 510 | |||
| 511 | static const struct dev_pm_ops nvhost_dev_pm_ops = { | ||
| 512 | .prepare = nvhost_pm_prepare, | ||
| 513 | .complete = nvhost_pm_complete, | ||
| 514 | .suspend = nvhost_pm_suspend, | ||
| 515 | .resume = nvhost_pm_resume, | ||
| 516 | .freeze = nvhost_pm_freeze, | ||
| 517 | .thaw = nvhost_pm_thaw, | ||
| 518 | .poweroff = nvhost_pm_poweroff, | ||
| 519 | .restore = nvhost_pm_restore, | ||
| 520 | .suspend_noirq = nvhost_pm_suspend_noirq, | ||
| 521 | .resume_noirq = nvhost_pm_resume_noirq, | ||
| 522 | .freeze_noirq = nvhost_pm_freeze_noirq, | ||
| 523 | .thaw_noirq = nvhost_pm_thaw_noirq, | ||
| 524 | .poweroff_noirq = nvhost_pm_poweroff_noirq, | ||
| 525 | .restore_noirq = nvhost_pm_restore_noirq, | ||
| 526 | .runtime_suspend = nvhost_pm_runtime_suspend, | ||
| 527 | .runtime_resume = nvhost_pm_runtime_resume, | ||
| 528 | .runtime_idle = nvhost_pm_runtime_idle, | ||
| 529 | }; | ||
| 530 | |||
| 531 | struct bus_type nvhost_bus_type = { | ||
| 532 | .name = "nvhost", | ||
| 533 | .match = nvhost_bus_match, | ||
| 534 | .pm = &nvhost_dev_pm_ops, | ||
| 535 | }; | ||
| 536 | EXPORT_SYMBOL(nvhost_bus_type); | ||
| 537 | |||
| 538 | static int set_parent(struct device *dev, void *data) | ||
| 539 | { | ||
| 540 | struct nvhost_device *ndev = to_nvhost_device(dev); | ||
| 541 | struct nvhost_master *host = data; | ||
| 542 | if (!dev->parent && ndev != host->dev) | ||
| 543 | dev->parent = &host->dev->dev; | ||
| 544 | return 0; | ||
| 545 | } | ||
| 546 | |||
| 547 | int nvhost_bus_add_host(struct nvhost_master *host) | ||
| 548 | { | ||
| 549 | nvhost = host; | ||
| 550 | |||
| 551 | /* Assign host1x as parent to all devices in nvhost bus */ | ||
| 552 | bus_for_each_dev(&nvhost_bus_type, NULL, host, set_parent); | ||
| 553 | |||
| 554 | return 0; | ||
| 555 | } | ||
| 556 | |||
| 557 | |||
| 558 | int nvhost_bus_init(void) | ||
| 559 | { | ||
| 560 | int err; | ||
| 561 | |||
| 562 | pr_info("host1x bus init\n"); | ||
| 563 | |||
| 564 | err = bus_register(&nvhost_bus_type); | ||
| 565 | |||
| 566 | return err; | ||
| 567 | } | ||
| 568 | postcore_initcall(nvhost_bus_init); | ||
| 569 | |||
diff --git a/drivers/video/tegra/host/bus_client.c b/drivers/video/tegra/host/bus_client.c new file mode 100644 index 00000000000..940f04a40e8 --- /dev/null +++ b/drivers/video/tegra/host/bus_client.c | |||
| @@ -0,0 +1,606 @@ | |||
| 1 | /* | ||
| 2 | * drivers/video/tegra/host/bus_client.c | ||
| 3 | * | ||
| 4 | * Tegra Graphics Host Client Module | ||
| 5 | * | ||
| 6 | * Copyright (c) 2010-2012, NVIDIA Corporation. | ||
| 7 | * | ||
| 8 | * This program is free software; you can redistribute it and/or modify it | ||
| 9 | * under the terms and conditions of the GNU General Public License, | ||
| 10 | * version 2, as published by the Free Software Foundation. | ||
| 11 | * | ||
| 12 | * This program is distributed in the hope it will be useful, but WITHOUT | ||
| 13 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
| 14 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
| 15 | * more details. | ||
| 16 | * | ||
| 17 | * You should have received a copy of the GNU General Public License | ||
| 18 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | ||
| 19 | */ | ||
| 20 | |||
| 21 | #include <linux/slab.h> | ||
| 22 | #include <linux/string.h> | ||
| 23 | #include <linux/spinlock.h> | ||
| 24 | #include <linux/fs.h> | ||
| 25 | #include <linux/cdev.h> | ||
| 26 | #include <linux/uaccess.h> | ||
| 27 | #include <linux/file.h> | ||
| 28 | #include <linux/clk.h> | ||
| 29 | #include <linux/hrtimer.h> | ||
| 30 | |||
| 31 | #include <trace/events/nvhost.h> | ||
| 32 | |||
| 33 | #include <linux/io.h> | ||
| 34 | #include <linux/string.h> | ||
| 35 | |||
| 36 | #include <linux/nvhost.h> | ||
| 37 | #include <linux/nvhost_ioctl.h> | ||
| 38 | |||
| 39 | #include <mach/nvmap.h> | ||
| 40 | #include <mach/gpufuse.h> | ||
| 41 | #include <mach/hardware.h> | ||
| 42 | #include <mach/iomap.h> | ||
| 43 | |||
| 44 | #include "debug.h" | ||
| 45 | #include "bus_client.h" | ||
| 46 | #include "dev.h" | ||
| 47 | |||
| 48 | void nvhost_read_module_regs(struct nvhost_device *ndev, | ||
| 49 | u32 offset, int count, u32 *values) | ||
| 50 | { | ||
| 51 | void __iomem *p = ndev->aperture + offset; | ||
| 52 | |||
| 53 | nvhost_module_busy(ndev); | ||
| 54 | while (count--) { | ||
| 55 | *(values++) = readl(p); | ||
| 56 | p += 4; | ||
| 57 | } | ||
| 58 | rmb(); | ||
| 59 | nvhost_module_idle(ndev); | ||
| 60 | } | ||
| 61 | |||
| 62 | void nvhost_write_module_regs(struct nvhost_device *ndev, | ||
| 63 | u32 offset, int count, const u32 *values) | ||
| 64 | { | ||
| 65 | void __iomem *p = ndev->aperture + offset; | ||
| 66 | |||
| 67 | nvhost_module_busy(ndev); | ||
| 68 | while (count--) { | ||
| 69 | writel(*(values++), p); | ||
| 70 | p += 4; | ||
| 71 | } | ||
| 72 | wmb(); | ||
| 73 | nvhost_module_idle(ndev); | ||
| 74 | } | ||
| 75 | |||
| 76 | struct nvhost_channel_userctx { | ||
| 77 | struct nvhost_channel *ch; | ||
| 78 | struct nvhost_hwctx *hwctx; | ||
| 79 | struct nvhost_submit_hdr_ext hdr; | ||
| 80 | int num_relocshifts; | ||
| 81 | struct nvhost_job *job; | ||
| 82 | struct nvmap_client *nvmap; | ||
| 83 | u32 timeout; | ||
| 84 | u32 priority; | ||
| 85 | int clientid; | ||
| 86 | }; | ||
| 87 | |||
| 88 | /* | ||
| 89 | * Write cmdbuf to ftrace output. Checks if cmdbuf contents should be output | ||
| 90 | * and mmaps the cmdbuf contents if required. | ||
| 91 | */ | ||
| 92 | static void trace_write_cmdbufs(struct nvhost_job *job) | ||
| 93 | { | ||
| 94 | struct nvmap_handle_ref handle; | ||
| 95 | void *mem = NULL; | ||
| 96 | int i = 0; | ||
| 97 | |||
| 98 | for (i = 0; i < job->num_gathers; i++) { | ||
| 99 | struct nvhost_channel_gather *gather = &job->gathers[i]; | ||
| 100 | if (nvhost_debug_trace_cmdbuf) { | ||
| 101 | handle.handle = nvmap_id_to_handle(gather->mem_id); | ||
| 102 | mem = nvmap_mmap(&handle); | ||
| 103 | if (IS_ERR_OR_NULL(mem)) | ||
| 104 | mem = NULL; | ||
| 105 | }; | ||
| 106 | |||
| 107 | if (mem) { | ||
| 108 | u32 i; | ||
| 109 | /* | ||
| 110 | * Write in batches of 128 as there seems to be a limit | ||
| 111 | * of how much you can output to ftrace at once. | ||
| 112 | */ | ||
| 113 | for (i = 0; i < gather->words; i += TRACE_MAX_LENGTH) { | ||
| 114 | trace_nvhost_channel_write_cmdbuf_data( | ||
| 115 | job->ch->dev->name, | ||
| 116 | gather->mem_id, | ||
| 117 | min(gather->words - i, | ||
| 118 | TRACE_MAX_LENGTH), | ||
| 119 | gather->offset + i * sizeof(u32), | ||
| 120 | mem); | ||
| 121 | } | ||
| 122 | nvmap_munmap(&handle, mem); | ||
| 123 | } | ||
| 124 | } | ||
| 125 | } | ||
| 126 | |||
| 127 | static int nvhost_channelrelease(struct inode *inode, struct file *filp) | ||
| 128 | { | ||
| 129 | struct nvhost_channel_userctx *priv = filp->private_data; | ||
| 130 | |||
| 131 | trace_nvhost_channel_release(priv->ch->dev->name); | ||
| 132 | |||
| 133 | filp->private_data = NULL; | ||
| 134 | |||
| 135 | nvhost_module_remove_client(priv->ch->dev, priv); | ||
| 136 | nvhost_putchannel(priv->ch, priv->hwctx); | ||
| 137 | |||
| 138 | if (priv->hwctx) | ||
| 139 | priv->ch->ctxhandler->put(priv->hwctx); | ||
| 140 | |||
| 141 | if (priv->job) | ||
| 142 | nvhost_job_put(priv->job); | ||
| 143 | |||
| 144 | nvmap_client_put(priv->nvmap); | ||
| 145 | kfree(priv); | ||
| 146 | return 0; | ||
| 147 | } | ||
| 148 | |||
| 149 | static int nvhost_channelopen(struct inode *inode, struct file *filp) | ||
| 150 | { | ||
| 151 | struct nvhost_channel_userctx *priv; | ||
| 152 | struct nvhost_channel *ch; | ||
| 153 | |||
| 154 | ch = container_of(inode->i_cdev, struct nvhost_channel, cdev); | ||
| 155 | ch = nvhost_getchannel(ch); | ||
| 156 | if (!ch) | ||
| 157 | return -ENOMEM; | ||
| 158 | trace_nvhost_channel_open(ch->dev->name); | ||
| 159 | |||
| 160 | priv = kzalloc(sizeof(*priv), GFP_KERNEL); | ||
| 161 | if (!priv) { | ||
| 162 | nvhost_putchannel(ch, NULL); | ||
| 163 | return -ENOMEM; | ||
| 164 | } | ||
| 165 | filp->private_data = priv; | ||
| 166 | priv->ch = ch; | ||
| 167 | nvhost_module_add_client(ch->dev, priv); | ||
| 168 | |||
| 169 | if (ch->ctxhandler && ch->ctxhandler->alloc) { | ||
| 170 | priv->hwctx = ch->ctxhandler->alloc(ch->ctxhandler, ch); | ||
| 171 | if (!priv->hwctx) | ||
| 172 | goto fail; | ||
| 173 | } | ||
| 174 | priv->priority = NVHOST_PRIORITY_MEDIUM; | ||
| 175 | priv->clientid = atomic_add_return(1, | ||
| 176 | &nvhost_get_host(ch->dev)->clientid); | ||
| 177 | |||
| 178 | priv->job = nvhost_job_alloc(ch, priv->hwctx, &priv->hdr, | ||
| 179 | NULL, priv->priority, priv->clientid); | ||
| 180 | if (!priv->job) | ||
| 181 | goto fail; | ||
| 182 | |||
| 183 | return 0; | ||
| 184 | fail: | ||
| 185 | nvhost_channelrelease(inode, filp); | ||
| 186 | return -ENOMEM; | ||
| 187 | } | ||
| 188 | |||
| 189 | static int set_submit(struct nvhost_channel_userctx *ctx) | ||
| 190 | { | ||
| 191 | struct device *device = &ctx->ch->dev->dev; | ||
| 192 | |||
| 193 | /* submit should have at least 1 cmdbuf */ | ||
| 194 | if (!ctx->hdr.num_cmdbufs) | ||
| 195 | return -EIO; | ||
| 196 | |||
| 197 | if (!ctx->nvmap) { | ||
| 198 | dev_err(device, "no nvmap context set\n"); | ||
| 199 | return -EFAULT; | ||
| 200 | } | ||
| 201 | |||
| 202 | ctx->job = nvhost_job_realloc(ctx->job, | ||
| 203 | ctx->hwctx, | ||
| 204 | &ctx->hdr, | ||
| 205 | ctx->nvmap, | ||
| 206 | ctx->priority, | ||
| 207 | ctx->clientid); | ||
| 208 | if (!ctx->job) | ||
| 209 | return -ENOMEM; | ||
| 210 | ctx->job->timeout = ctx->timeout; | ||
| 211 | |||
| 212 | if (ctx->hdr.submit_version >= NVHOST_SUBMIT_VERSION_V2) | ||
| 213 | ctx->num_relocshifts = ctx->hdr.num_relocs; | ||
| 214 | |||
| 215 | return 0; | ||
| 216 | } | ||
| 217 | |||
| 218 | static void reset_submit(struct nvhost_channel_userctx *ctx) | ||
| 219 | { | ||
| 220 | ctx->hdr.num_cmdbufs = 0; | ||
| 221 | ctx->hdr.num_relocs = 0; | ||
| 222 | ctx->num_relocshifts = 0; | ||
| 223 | ctx->hdr.num_waitchks = 0; | ||
| 224 | } | ||
| 225 | |||
| 226 | static ssize_t nvhost_channelwrite(struct file *filp, const char __user *buf, | ||
| 227 | size_t count, loff_t *offp) | ||
| 228 | { | ||
| 229 | struct nvhost_channel_userctx *priv = filp->private_data; | ||
| 230 | size_t remaining = count; | ||
| 231 | int err = 0; | ||
| 232 | struct nvhost_job *job = priv->job; | ||
| 233 | struct nvhost_submit_hdr_ext *hdr = &priv->hdr; | ||
| 234 | const char *chname = priv->ch->dev->name; | ||
| 235 | |||
| 236 | if (!job) | ||
| 237 | return -EIO; | ||
| 238 | |||
| 239 | while (remaining) { | ||
| 240 | size_t consumed; | ||
| 241 | if (!hdr->num_relocs && | ||
| 242 | !priv->num_relocshifts && | ||
| 243 | !hdr->num_cmdbufs && | ||
| 244 | !hdr->num_waitchks) { | ||
| 245 | consumed = sizeof(struct nvhost_submit_hdr); | ||
| 246 | if (remaining < consumed) | ||
| 247 | break; | ||
| 248 | if (copy_from_user(hdr, buf, consumed)) { | ||
| 249 | err = -EFAULT; | ||
| 250 | break; | ||
| 251 | } | ||
| 252 | hdr->submit_version = NVHOST_SUBMIT_VERSION_V0; | ||
| 253 | err = set_submit(priv); | ||
| 254 | if (err) | ||
| 255 | break; | ||
| 256 | trace_nvhost_channel_write_submit(chname, | ||
| 257 | count, hdr->num_cmdbufs, hdr->num_relocs, | ||
| 258 | hdr->syncpt_id, hdr->syncpt_incrs); | ||
| 259 | } else if (hdr->num_cmdbufs) { | ||
| 260 | struct nvhost_cmdbuf cmdbuf; | ||
| 261 | consumed = sizeof(cmdbuf); | ||
| 262 | if (remaining < consumed) | ||
| 263 | break; | ||
| 264 | if (copy_from_user(&cmdbuf, buf, consumed)) { | ||
| 265 | err = -EFAULT; | ||
| 266 | break; | ||
| 267 | } | ||
| 268 | trace_nvhost_channel_write_cmdbuf(chname, | ||
| 269 | cmdbuf.mem, cmdbuf.words, cmdbuf.offset); | ||
| 270 | nvhost_job_add_gather(job, | ||
| 271 | cmdbuf.mem, cmdbuf.words, cmdbuf.offset); | ||
| 272 | hdr->num_cmdbufs--; | ||
| 273 | } else if (hdr->num_relocs) { | ||
| 274 | consumed = sizeof(struct nvhost_reloc); | ||
| 275 | if (remaining < consumed) | ||
| 276 | break; | ||
| 277 | if (copy_from_user(&job->pinarray[job->num_pins], | ||
| 278 | buf, consumed)) { | ||
| 279 | err = -EFAULT; | ||
| 280 | break; | ||
| 281 | } | ||
| 282 | trace_nvhost_channel_write_reloc(chname); | ||
| 283 | job->num_pins++; | ||
| 284 | hdr->num_relocs--; | ||
| 285 | } else if (hdr->num_waitchks) { | ||
| 286 | int numwaitchks = | ||
| 287 | (remaining / sizeof(struct nvhost_waitchk)); | ||
| 288 | if (!numwaitchks) | ||
| 289 | break; | ||
| 290 | numwaitchks = min_t(int, | ||
| 291 | numwaitchks, hdr->num_waitchks); | ||
| 292 | consumed = numwaitchks * sizeof(struct nvhost_waitchk); | ||
| 293 | if (copy_from_user(&job->waitchk[job->num_waitchk], | ||
| 294 | buf, consumed)) { | ||
| 295 | err = -EFAULT; | ||
| 296 | break; | ||
| 297 | } | ||
| 298 | trace_nvhost_channel_write_waitchks( | ||
| 299 | chname, numwaitchks, | ||
| 300 | hdr->waitchk_mask); | ||
| 301 | job->num_waitchk += numwaitchks; | ||
| 302 | hdr->num_waitchks -= numwaitchks; | ||
| 303 | } else if (priv->num_relocshifts) { | ||
| 304 | int next_shift = | ||
| 305 | job->num_pins - priv->num_relocshifts; | ||
| 306 | consumed = sizeof(struct nvhost_reloc_shift); | ||
| 307 | if (remaining < consumed) | ||
| 308 | break; | ||
| 309 | if (copy_from_user( | ||
| 310 | &job->pinarray[next_shift].reloc_shift, | ||
| 311 | buf, consumed)) { | ||
| 312 | err = -EFAULT; | ||
| 313 | break; | ||
| 314 | } | ||
| 315 | priv->num_relocshifts--; | ||
| 316 | } else { | ||
| 317 | err = -EFAULT; | ||
| 318 | break; | ||
| 319 | } | ||
| 320 | remaining -= consumed; | ||
| 321 | buf += consumed; | ||
| 322 | } | ||
| 323 | |||
| 324 | if (err < 0) { | ||
| 325 | dev_err(&priv->ch->dev->dev, "channel write error\n"); | ||
| 326 | reset_submit(priv); | ||
| 327 | return err; | ||
| 328 | } | ||
| 329 | |||
| 330 | return count - remaining; | ||
| 331 | } | ||
| 332 | |||
| 333 | static int nvhost_ioctl_channel_flush( | ||
| 334 | struct nvhost_channel_userctx *ctx, | ||
| 335 | struct nvhost_get_param_args *args, | ||
| 336 | int null_kickoff) | ||
| 337 | { | ||
| 338 | struct device *device = &ctx->ch->dev->dev; | ||
| 339 | int err; | ||
| 340 | |||
| 341 | trace_nvhost_ioctl_channel_flush(ctx->ch->dev->name); | ||
| 342 | |||
| 343 | if (!ctx->job || | ||
| 344 | ctx->hdr.num_relocs || | ||
| 345 | ctx->hdr.num_cmdbufs || | ||
| 346 | ctx->hdr.num_waitchks) { | ||
| 347 | reset_submit(ctx); | ||
| 348 | dev_err(device, "channel submit out of sync\n"); | ||
| 349 | return -EFAULT; | ||
| 350 | } | ||
| 351 | |||
| 352 | err = nvhost_job_pin(ctx->job); | ||
| 353 | if (err) { | ||
| 354 | dev_warn(device, "nvhost_job_pin failed: %d\n", err); | ||
| 355 | return err; | ||
| 356 | } | ||
| 357 | |||
| 358 | if (nvhost_debug_null_kickoff_pid == current->tgid) | ||
| 359 | null_kickoff = 1; | ||
| 360 | ctx->job->null_kickoff = null_kickoff; | ||
| 361 | |||
| 362 | if ((nvhost_debug_force_timeout_pid == current->tgid) && | ||
| 363 | (nvhost_debug_force_timeout_channel == ctx->ch->chid)) { | ||
| 364 | ctx->timeout = nvhost_debug_force_timeout_val; | ||
| 365 | } | ||
| 366 | |||
| 367 | trace_write_cmdbufs(ctx->job); | ||
| 368 | |||
| 369 | /* context switch if needed, and submit user's gathers to the channel */ | ||
| 370 | err = nvhost_channel_submit(ctx->job); | ||
| 371 | args->value = ctx->job->syncpt_end; | ||
| 372 | if (err) | ||
| 373 | nvhost_job_unpin(ctx->job); | ||
| 374 | |||
| 375 | return err; | ||
| 376 | } | ||
| 377 | |||
| 378 | static int nvhost_ioctl_channel_read_3d_reg( | ||
| 379 | struct nvhost_channel_userctx *ctx, | ||
| 380 | struct nvhost_read_3d_reg_args *args) | ||
| 381 | { | ||
| 382 | BUG_ON(!channel_op(ctx->ch).read3dreg); | ||
| 383 | return channel_op(ctx->ch).read3dreg(ctx->ch, ctx->hwctx, | ||
| 384 | args->offset, &args->value); | ||
| 385 | } | ||
| 386 | |||
| 387 | static long nvhost_channelctl(struct file *filp, | ||
| 388 | unsigned int cmd, unsigned long arg) | ||
| 389 | { | ||
| 390 | struct nvhost_channel_userctx *priv = filp->private_data; | ||
| 391 | u8 buf[NVHOST_IOCTL_CHANNEL_MAX_ARG_SIZE]; | ||
| 392 | int err = 0; | ||
| 393 | |||
| 394 | if ((_IOC_TYPE(cmd) != NVHOST_IOCTL_MAGIC) || | ||
| 395 | (_IOC_NR(cmd) == 0) || | ||
| 396 | (_IOC_NR(cmd) > NVHOST_IOCTL_CHANNEL_LAST)) | ||
| 397 | return -EFAULT; | ||
| 398 | |||
| 399 | BUG_ON(_IOC_SIZE(cmd) > NVHOST_IOCTL_CHANNEL_MAX_ARG_SIZE); | ||
| 400 | |||
| 401 | if (_IOC_DIR(cmd) & _IOC_WRITE) { | ||
| 402 | if (copy_from_user(buf, (void __user *)arg, _IOC_SIZE(cmd))) | ||
| 403 | return -EFAULT; | ||
| 404 | } | ||
| 405 | |||
| 406 | switch (cmd) { | ||
| 407 | case NVHOST_IOCTL_CHANNEL_FLUSH: | ||
| 408 | err = nvhost_ioctl_channel_flush(priv, (void *)buf, 0); | ||
| 409 | break; | ||
| 410 | case NVHOST_IOCTL_CHANNEL_NULL_KICKOFF: | ||
| 411 | err = nvhost_ioctl_channel_flush(priv, (void *)buf, 1); | ||
| 412 | break; | ||
| 413 | case NVHOST_IOCTL_CHANNEL_SUBMIT_EXT: | ||
| 414 | { | ||
| 415 | struct nvhost_submit_hdr_ext *hdr; | ||
| 416 | |||
| 417 | if (priv->hdr.num_relocs || | ||
| 418 | priv->num_relocshifts || | ||
| 419 | priv->hdr.num_cmdbufs || | ||
| 420 | priv->hdr.num_waitchks) { | ||
| 421 | reset_submit(priv); | ||
| 422 | dev_err(&priv->ch->dev->dev, | ||
| 423 | "channel submit out of sync\n"); | ||
| 424 | err = -EIO; | ||
| 425 | break; | ||
| 426 | } | ||
| 427 | |||
| 428 | hdr = (struct nvhost_submit_hdr_ext *)buf; | ||
| 429 | if (hdr->submit_version > NVHOST_SUBMIT_VERSION_MAX_SUPPORTED) { | ||
| 430 | dev_err(&priv->ch->dev->dev, | ||
| 431 | "submit version %d > max supported %d\n", | ||
| 432 | hdr->submit_version, | ||
| 433 | NVHOST_SUBMIT_VERSION_MAX_SUPPORTED); | ||
| 434 | err = -EINVAL; | ||
| 435 | break; | ||
| 436 | } | ||
| 437 | memcpy(&priv->hdr, hdr, sizeof(struct nvhost_submit_hdr_ext)); | ||
| 438 | err = set_submit(priv); | ||
| 439 | trace_nvhost_ioctl_channel_submit(priv->ch->dev->name, | ||
| 440 | priv->hdr.submit_version, | ||
| 441 | priv->hdr.num_cmdbufs, priv->hdr.num_relocs, | ||
| 442 | priv->hdr.num_waitchks, | ||
| 443 | priv->hdr.syncpt_id, priv->hdr.syncpt_incrs); | ||
| 444 | break; | ||
| 445 | } | ||
| 446 | case NVHOST_IOCTL_CHANNEL_GET_SYNCPOINTS: | ||
| 447 | /* host syncpt ID is used by the RM (and never be given out) */ | ||
| 448 | BUG_ON(priv->ch->dev->syncpts & (1 << NVSYNCPT_GRAPHICS_HOST)); | ||
| 449 | ((struct nvhost_get_param_args *)buf)->value = | ||
| 450 | priv->ch->dev->syncpts; | ||
| 451 | break; | ||
| 452 | case NVHOST_IOCTL_CHANNEL_GET_WAITBASES: | ||
| 453 | ((struct nvhost_get_param_args *)buf)->value = | ||
| 454 | priv->ch->dev->waitbases; | ||
| 455 | break; | ||
| 456 | case NVHOST_IOCTL_CHANNEL_GET_MODMUTEXES: | ||
| 457 | ((struct nvhost_get_param_args *)buf)->value = | ||
| 458 | priv->ch->dev->modulemutexes; | ||
| 459 | break; | ||
| 460 | case NVHOST_IOCTL_CHANNEL_SET_NVMAP_FD: | ||
| 461 | { | ||
| 462 | int fd = (int)((struct nvhost_set_nvmap_fd_args *)buf)->fd; | ||
| 463 | struct nvmap_client *new_client = nvmap_client_get_file(fd); | ||
| 464 | |||
| 465 | if (IS_ERR(new_client)) { | ||
| 466 | err = PTR_ERR(new_client); | ||
| 467 | break; | ||
| 468 | } | ||
| 469 | |||
| 470 | if (priv->nvmap) | ||
| 471 | nvmap_client_put(priv->nvmap); | ||
| 472 | |||
| 473 | priv->nvmap = new_client; | ||
| 474 | break; | ||
| 475 | } | ||
| 476 | case NVHOST_IOCTL_CHANNEL_READ_3D_REG: | ||
| 477 | err = nvhost_ioctl_channel_read_3d_reg(priv, (void *)buf); | ||
| 478 | break; | ||
| 479 | case NVHOST_IOCTL_CHANNEL_GET_CLK_RATE: | ||
| 480 | { | ||
| 481 | unsigned long rate; | ||
| 482 | struct nvhost_clk_rate_args *arg = | ||
| 483 | (struct nvhost_clk_rate_args *)buf; | ||
| 484 | |||
| 485 | err = nvhost_module_get_rate(priv->ch->dev, &rate, 0); | ||
| 486 | if (err == 0) | ||
| 487 | arg->rate = rate; | ||
| 488 | break; | ||
| 489 | } | ||
| 490 | case NVHOST_IOCTL_CHANNEL_SET_CLK_RATE: | ||
| 491 | { | ||
| 492 | struct nvhost_clk_rate_args *arg = | ||
| 493 | (struct nvhost_clk_rate_args *)buf; | ||
| 494 | unsigned long rate = (unsigned long)arg->rate; | ||
| 495 | |||
| 496 | err = nvhost_module_set_rate(priv->ch->dev, priv, rate, 0); | ||
| 497 | break; | ||
| 498 | } | ||
| 499 | case NVHOST_IOCTL_CHANNEL_SET_TIMEOUT: | ||
| 500 | priv->timeout = | ||
| 501 | (u32)((struct nvhost_set_timeout_args *)buf)->timeout; | ||
| 502 | dev_dbg(&priv->ch->dev->dev, | ||
| 503 | "%s: setting buffer timeout (%d ms) for userctx 0x%p\n", | ||
| 504 | __func__, priv->timeout, priv); | ||
| 505 | break; | ||
| 506 | case NVHOST_IOCTL_CHANNEL_GET_TIMEDOUT: | ||
| 507 | ((struct nvhost_get_param_args *)buf)->value = | ||
| 508 | priv->hwctx->has_timedout; | ||
| 509 | break; | ||
| 510 | case NVHOST_IOCTL_CHANNEL_SET_PRIORITY: | ||
| 511 | priv->priority = | ||
| 512 | (u32)((struct nvhost_set_priority_args *)buf)->priority; | ||
| 513 | break; | ||
| 514 | default: | ||
| 515 | err = -ENOTTY; | ||
| 516 | break; | ||
| 517 | } | ||
| 518 | |||
| 519 | if ((err == 0) && (_IOC_DIR(cmd) & _IOC_READ)) | ||
| 520 | err = copy_to_user((void __user *)arg, buf, _IOC_SIZE(cmd)); | ||
| 521 | |||
| 522 | return err; | ||
| 523 | } | ||
| 524 | |||
| 525 | static const struct file_operations nvhost_channelops = { | ||
| 526 | .owner = THIS_MODULE, | ||
| 527 | .release = nvhost_channelrelease, | ||
| 528 | .open = nvhost_channelopen, | ||
| 529 | .write = nvhost_channelwrite, | ||
| 530 | .unlocked_ioctl = nvhost_channelctl | ||
| 531 | }; | ||
| 532 | |||
| 533 | int nvhost_client_user_init(struct nvhost_device *dev) | ||
| 534 | { | ||
| 535 | int err, devno; | ||
| 536 | |||
| 537 | struct nvhost_channel *ch = dev->channel; | ||
| 538 | |||
| 539 | cdev_init(&ch->cdev, &nvhost_channelops); | ||
| 540 | ch->cdev.owner = THIS_MODULE; | ||
| 541 | |||
| 542 | devno = MKDEV(nvhost_major, nvhost_minor + dev->index); | ||
| 543 | err = cdev_add(&ch->cdev, devno, 1); | ||
| 544 | if (err < 0) { | ||
| 545 | dev_err(&dev->dev, | ||
| 546 | "failed to add chan %i cdev\n", dev->index); | ||
| 547 | goto fail; | ||
| 548 | } | ||
| 549 | ch->node = device_create(nvhost_get_host(dev)->nvhost_class, NULL, devno, NULL, | ||
| 550 | IFACE_NAME "-%s", dev->name); | ||
| 551 | if (IS_ERR(ch->node)) { | ||
| 552 | err = PTR_ERR(ch->node); | ||
| 553 | dev_err(&dev->dev, | ||
| 554 | "failed to create %s channel device\n", dev->name); | ||
| 555 | goto fail; | ||
| 556 | } | ||
| 557 | |||
| 558 | return 0; | ||
| 559 | fail: | ||
| 560 | return err; | ||
| 561 | } | ||
| 562 | |||
| 563 | int nvhost_client_device_init(struct nvhost_device *dev) | ||
| 564 | { | ||
| 565 | int err; | ||
| 566 | struct nvhost_master *nvhost_master = nvhost_get_host(dev); | ||
| 567 | struct nvhost_channel *ch = &nvhost_master->channels[dev->index]; | ||
| 568 | |||
| 569 | /* store the pointer to this device for channel */ | ||
| 570 | ch->dev = dev; | ||
| 571 | |||
| 572 | err = nvhost_channel_init(ch, nvhost_master, dev->index); | ||
| 573 | if (err) | ||
| 574 | goto fail; | ||
| 575 | |||
| 576 | err = nvhost_client_user_init(dev); | ||
| 577 | if (err) | ||
| 578 | goto fail; | ||
| 579 | |||
| 580 | err = nvhost_module_init(dev); | ||
| 581 | if (err) | ||
| 582 | goto fail; | ||
| 583 | |||
| 584 | dev_info(&dev->dev, "initialized\n"); | ||
| 585 | |||
| 586 | return 0; | ||
| 587 | |||
| 588 | fail: | ||
| 589 | /* Add clean-up */ | ||
| 590 | return err; | ||
| 591 | } | ||
| 592 | |||
| 593 | int nvhost_client_device_suspend(struct nvhost_device *dev) | ||
| 594 | { | ||
| 595 | int ret = 0; | ||
| 596 | |||
| 597 | dev_info(&dev->dev, "suspending\n"); | ||
| 598 | |||
| 599 | ret = nvhost_channel_suspend(dev->channel); | ||
| 600 | if (ret) | ||
| 601 | return ret; | ||
| 602 | |||
| 603 | dev_info(&dev->dev, "suspend status: %d\n", ret); | ||
| 604 | |||
| 605 | return ret; | ||
| 606 | } | ||
diff --git a/drivers/video/tegra/host/bus_client.h b/drivers/video/tegra/host/bus_client.h new file mode 100644 index 00000000000..4e47071fd14 --- /dev/null +++ b/drivers/video/tegra/host/bus_client.h | |||
| @@ -0,0 +1,39 @@ | |||
| 1 | /* | ||
| 2 | * drivers/video/tegra/host/bus_client.h | ||
| 3 | * | ||
| 4 | * Tegra Graphics Host client | ||
| 5 | * | ||
| 6 | * Copyright (c) 2010-2012, NVIDIA Corporation. | ||
| 7 | * | ||
| 8 | * This program is free software; you can redistribute it and/or modify it | ||
| 9 | * under the terms and conditions of the GNU General Public License, | ||
| 10 | * version 2, as published by the Free Software Foundation. | ||
| 11 | * | ||
| 12 | * This program is distributed in the hope it will be useful, but WITHOUT | ||
| 13 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
| 14 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
| 15 | * more details. | ||
| 16 | * | ||
| 17 | * You should have received a copy of the GNU General Public License | ||
| 18 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | ||
| 19 | */ | ||
| 20 | |||
| 21 | #ifndef __NVHOST_BUS_CLIENT_H | ||
| 22 | #define __NVHOST_BUS_CLIENT_H | ||
| 23 | |||
| 24 | #include <linux/types.h> | ||
| 25 | struct nvhost_device; | ||
| 26 | |||
| 27 | void nvhost_read_module_regs(struct nvhost_device *ndev, | ||
| 28 | u32 offset, int count, u32 *values); | ||
| 29 | |||
| 30 | void nvhost_write_module_regs(struct nvhost_device *ndev, | ||
| 31 | u32 offset, int count, const u32 *values); | ||
| 32 | |||
| 33 | int nvhost_client_user_init(struct nvhost_device *dev); | ||
| 34 | |||
| 35 | int nvhost_client_device_init(struct nvhost_device *dev); | ||
| 36 | |||
| 37 | int nvhost_client_device_suspend(struct nvhost_device *dev); | ||
| 38 | |||
| 39 | #endif | ||
diff --git a/drivers/video/tegra/host/chip_support.h b/drivers/video/tegra/host/chip_support.h new file mode 100644 index 00000000000..6727e7a69fb --- /dev/null +++ b/drivers/video/tegra/host/chip_support.h | |||
| @@ -0,0 +1,141 @@ | |||
| 1 | /* | ||
| 2 | * drivers/video/tegra/host/chip_support.h | ||
| 3 | * | ||
| 4 | * Tegra Graphics Host Chip Support | ||
| 5 | * | ||
| 6 | * Copyright (c) 2011-2012, NVIDIA Corporation. | ||
| 7 | * | ||
| 8 | * This program is free software; you can redistribute it and/or modify it | ||
| 9 | * under the terms and conditions of the GNU General Public License, | ||
| 10 | * version 2, as published by the Free Software Foundation. | ||
| 11 | * | ||
| 12 | * This program is distributed in the hope it will be useful, but WITHOUT | ||
| 13 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
| 14 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
| 15 | * more details. | ||
| 16 | * | ||
| 17 | * You should have received a copy of the GNU General Public License | ||
| 18 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | ||
| 19 | */ | ||
| 20 | #ifndef _NVHOST_CHIP_SUPPORT_H_ | ||
| 21 | #define _NVHOST_CHIP_SUPPORT_H_ | ||
| 22 | |||
| 23 | #include <linux/types.h> | ||
| 24 | struct output; | ||
| 25 | struct nvhost_waitchk; | ||
| 26 | struct nvhost_userctx_timeout; | ||
| 27 | struct nvhost_master; | ||
| 28 | struct nvhost_channel; | ||
| 29 | struct nvmap_handle; | ||
| 30 | struct nvmap_client; | ||
| 31 | struct nvhost_hwctx; | ||
| 32 | struct nvhost_cdma; | ||
| 33 | struct nvhost_intr; | ||
| 34 | struct push_buffer; | ||
| 35 | struct nvhost_syncpt; | ||
| 36 | struct nvhost_master; | ||
| 37 | struct dentry; | ||
| 38 | struct nvhost_job; | ||
| 39 | |||
| 40 | struct nvhost_chip_support { | ||
| 41 | struct { | ||
| 42 | int (*init)(struct nvhost_channel *, | ||
| 43 | struct nvhost_master *, | ||
| 44 | int chid); | ||
| 45 | int (*submit)(struct nvhost_job *job); | ||
| 46 | int (*read3dreg)(struct nvhost_channel *channel, | ||
| 47 | struct nvhost_hwctx *hwctx, | ||
| 48 | u32 offset, | ||
| 49 | u32 *value); | ||
| 50 | } channel; | ||
| 51 | |||
| 52 | struct { | ||
| 53 | void (*start)(struct nvhost_cdma *); | ||
| 54 | void (*stop)(struct nvhost_cdma *); | ||
| 55 | void (*kick)(struct nvhost_cdma *); | ||
| 56 | int (*timeout_init)(struct nvhost_cdma *, | ||
| 57 | u32 syncpt_id); | ||
| 58 | void (*timeout_destroy)(struct nvhost_cdma *); | ||
| 59 | void (*timeout_teardown_begin)(struct nvhost_cdma *); | ||
| 60 | void (*timeout_teardown_end)(struct nvhost_cdma *, | ||
| 61 | u32 getptr); | ||
| 62 | void (*timeout_cpu_incr)(struct nvhost_cdma *, | ||
| 63 | u32 getptr, | ||
| 64 | u32 syncpt_incrs, | ||
| 65 | u32 syncval, | ||
| 66 | u32 nr_slots); | ||
| 67 | void (*timeout_pb_incr)(struct nvhost_cdma *, | ||
| 68 | u32 getptr, | ||
| 69 | u32 syncpt_incrs, | ||
| 70 | u32 nr_slots, | ||
| 71 | bool exec_ctxsave); | ||
| 72 | } cdma; | ||
| 73 | |||
| 74 | struct { | ||
| 75 | void (*reset)(struct push_buffer *); | ||
| 76 | int (*init)(struct push_buffer *); | ||
| 77 | void (*destroy)(struct push_buffer *); | ||
| 78 | void (*push_to)(struct push_buffer *, | ||
| 79 | struct nvmap_client *, | ||
| 80 | struct nvmap_handle *, | ||
| 81 | u32 op1, u32 op2); | ||
| 82 | void (*pop_from)(struct push_buffer *, | ||
| 83 | unsigned int slots); | ||
| 84 | u32 (*space)(struct push_buffer *); | ||
| 85 | u32 (*putptr)(struct push_buffer *); | ||
| 86 | } push_buffer; | ||
| 87 | |||
| 88 | struct { | ||
| 89 | void (*debug_init)(struct dentry *de); | ||
| 90 | void (*show_channel_cdma)(struct nvhost_master *, | ||
| 91 | struct nvhost_channel *, | ||
| 92 | struct output *, | ||
| 93 | int chid); | ||
| 94 | void (*show_channel_fifo)(struct nvhost_master *, | ||
| 95 | struct nvhost_channel *, | ||
| 96 | struct output *, | ||
| 97 | int chid); | ||
| 98 | void (*show_mlocks)(struct nvhost_master *m, | ||
| 99 | struct output *o); | ||
| 100 | |||
| 101 | } debug; | ||
| 102 | |||
| 103 | struct { | ||
| 104 | void (*reset)(struct nvhost_syncpt *, u32 id); | ||
| 105 | void (*reset_wait_base)(struct nvhost_syncpt *, u32 id); | ||
| 106 | void (*read_wait_base)(struct nvhost_syncpt *, u32 id); | ||
| 107 | u32 (*update_min)(struct nvhost_syncpt *, u32 id); | ||
| 108 | void (*cpu_incr)(struct nvhost_syncpt *, u32 id); | ||
| 109 | int (*wait_check)(struct nvhost_syncpt *sp, | ||
| 110 | struct nvmap_client *nvmap, | ||
| 111 | u32 waitchk_mask, | ||
| 112 | struct nvhost_waitchk *wait, | ||
| 113 | int num_waitchk); | ||
| 114 | void (*debug)(struct nvhost_syncpt *); | ||
| 115 | const char * (*name)(struct nvhost_syncpt *, u32 id); | ||
| 116 | int (*mutex_try_lock)(struct nvhost_syncpt *, | ||
| 117 | unsigned int idx); | ||
| 118 | void (*mutex_unlock)(struct nvhost_syncpt *, | ||
| 119 | unsigned int idx); | ||
| 120 | } syncpt; | ||
| 121 | |||
| 122 | struct { | ||
| 123 | void (*init_host_sync)(struct nvhost_intr *); | ||
| 124 | void (*set_host_clocks_per_usec)( | ||
| 125 | struct nvhost_intr *, u32 clocks); | ||
| 126 | void (*set_syncpt_threshold)( | ||
| 127 | struct nvhost_intr *, u32 id, u32 thresh); | ||
| 128 | void (*enable_syncpt_intr)(struct nvhost_intr *, u32 id); | ||
| 129 | void (*disable_all_syncpt_intrs)(struct nvhost_intr *); | ||
| 130 | int (*request_host_general_irq)(struct nvhost_intr *); | ||
| 131 | void (*free_host_general_irq)(struct nvhost_intr *); | ||
| 132 | int (*request_syncpt_irq)(struct nvhost_intr_syncpt *syncpt); | ||
| 133 | } intr; | ||
| 134 | |||
| 135 | struct { | ||
| 136 | struct nvhost_device *(*get_nvhost_device)(struct nvhost_master *host, | ||
| 137 | char *name); | ||
| 138 | } nvhost_dev; | ||
| 139 | }; | ||
| 140 | |||
| 141 | #endif /* _NVHOST_CHIP_SUPPORT_H_ */ | ||
diff --git a/drivers/video/tegra/host/debug.c b/drivers/video/tegra/host/debug.c new file mode 100644 index 00000000000..91436c903fc --- /dev/null +++ b/drivers/video/tegra/host/debug.c | |||
| @@ -0,0 +1,167 @@ | |||
| 1 | /* | ||
| 2 | * drivers/video/tegra/host/debug.c | ||
| 3 | * | ||
| 4 | * Copyright (C) 2010 Google, Inc. | ||
| 5 | * Author: Erik Gilling <konkers@android.com> | ||
| 6 | * | ||
| 7 | * Copyright (C) 2011 NVIDIA Corporation | ||
| 8 | * | ||
| 9 | * This software is licensed under the terms of the GNU General Public | ||
| 10 | * License version 2, as published by the Free Software Foundation, and | ||
| 11 | * may be copied, distributed, and modified under those terms. | ||
| 12 | * | ||
| 13 | * This program is distributed in the hope that it will be useful, | ||
| 14 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 15 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
| 16 | * GNU General Public License for more details. | ||
| 17 | * | ||
| 18 | */ | ||
| 19 | |||
| 20 | #include <linux/debugfs.h> | ||
| 21 | #include <linux/seq_file.h> | ||
| 22 | |||
| 23 | #include <linux/io.h> | ||
| 24 | |||
| 25 | #include "dev.h" | ||
| 26 | #include "debug.h" | ||
| 27 | |||
| 28 | pid_t nvhost_debug_null_kickoff_pid; | ||
| 29 | unsigned int nvhost_debug_trace_cmdbuf; | ||
| 30 | |||
| 31 | pid_t nvhost_debug_force_timeout_pid; | ||
| 32 | u32 nvhost_debug_force_timeout_val; | ||
| 33 | u32 nvhost_debug_force_timeout_channel; | ||
| 34 | |||
| 35 | void nvhost_debug_output(struct output *o, const char* fmt, ...) | ||
| 36 | { | ||
| 37 | va_list args; | ||
| 38 | int len; | ||
| 39 | |||
| 40 | va_start(args, fmt); | ||
| 41 | len = vsnprintf(o->buf, sizeof(o->buf), fmt, args); | ||
| 42 | va_end(args); | ||
| 43 | o->fn(o->ctx, o->buf, len); | ||
| 44 | } | ||
| 45 | |||
| 46 | static int show_channels(struct device *dev, void *data) | ||
| 47 | { | ||
| 48 | struct nvhost_channel *ch; | ||
| 49 | struct nvhost_device *nvdev = to_nvhost_device(dev); | ||
| 50 | struct output *o = data; | ||
| 51 | struct nvhost_master *m; | ||
| 52 | |||
| 53 | if (nvdev == NULL) | ||
| 54 | return 0; | ||
| 55 | |||
| 56 | m = nvhost_get_host(nvdev); | ||
| 57 | ch = nvdev->channel; | ||
| 58 | if (ch) { | ||
| 59 | mutex_lock(&ch->reflock); | ||
| 60 | if (ch->refcount) { | ||
| 61 | mutex_lock(&ch->cdma.lock); | ||
| 62 | m->op.debug.show_channel_fifo(m, ch, o, nvdev->index); | ||
| 63 | m->op.debug.show_channel_cdma(m, ch, o, nvdev->index); | ||
| 64 | mutex_unlock(&ch->cdma.lock); | ||
| 65 | } | ||
| 66 | mutex_unlock(&ch->reflock); | ||
| 67 | } | ||
| 68 | |||
| 69 | return 0; | ||
| 70 | } | ||
| 71 | |||
| 72 | static void show_syncpts(struct nvhost_master *m, struct output *o) | ||
| 73 | { | ||
| 74 | int i; | ||
| 75 | BUG_ON(!m->op.syncpt.name); | ||
| 76 | nvhost_debug_output(o, "---- syncpts ----\n"); | ||
| 77 | for (i = 0; i < m->syncpt.nb_pts; i++) { | ||
| 78 | u32 max = nvhost_syncpt_read_max(&m->syncpt, i); | ||
| 79 | u32 min = nvhost_syncpt_update_min(&m->syncpt, i); | ||
| 80 | if (!min && !max) | ||
| 81 | continue; | ||
| 82 | nvhost_debug_output(o, "id %d (%s) min %d max %d\n", | ||
| 83 | i, m->op.syncpt.name(&m->syncpt, i), | ||
| 84 | min, max); | ||
| 85 | } | ||
| 86 | |||
| 87 | for (i = 0; i < m->syncpt.nb_bases; i++) { | ||
| 88 | u32 base_val; | ||
| 89 | base_val = nvhost_syncpt_read_wait_base(&m->syncpt, i); | ||
| 90 | if (base_val) | ||
| 91 | nvhost_debug_output(o, "waitbase id %d val %d\n", | ||
| 92 | i, base_val); | ||
| 93 | } | ||
| 94 | |||
| 95 | nvhost_debug_output(o, "\n"); | ||
| 96 | } | ||
| 97 | |||
| 98 | static void show_all(struct nvhost_master *m, struct output *o) | ||
| 99 | { | ||
| 100 | nvhost_module_busy(m->dev); | ||
| 101 | |||
| 102 | m->op.debug.show_mlocks(m, o); | ||
| 103 | show_syncpts(m, o); | ||
| 104 | nvhost_debug_output(o, "---- channels ----\n"); | ||
| 105 | bus_for_each_dev(&nvhost_bus_type, NULL, o, show_channels); | ||
| 106 | |||
| 107 | nvhost_module_idle(m->dev); | ||
| 108 | } | ||
| 109 | |||
| 110 | #ifdef CONFIG_DEBUG_FS | ||
| 111 | static int nvhost_debug_show(struct seq_file *s, void *unused) | ||
| 112 | { | ||
| 113 | struct output o = { | ||
| 114 | .fn = write_to_seqfile, | ||
| 115 | .ctx = s | ||
| 116 | }; | ||
| 117 | show_all(s->private, &o); | ||
| 118 | return 0; | ||
| 119 | } | ||
| 120 | |||
| 121 | static int nvhost_debug_open(struct inode *inode, struct file *file) | ||
| 122 | { | ||
| 123 | return single_open(file, nvhost_debug_show, inode->i_private); | ||
| 124 | } | ||
| 125 | |||
| 126 | static const struct file_operations nvhost_debug_fops = { | ||
| 127 | .open = nvhost_debug_open, | ||
| 128 | .read = seq_read, | ||
| 129 | .llseek = seq_lseek, | ||
| 130 | .release = single_release, | ||
| 131 | }; | ||
| 132 | |||
| 133 | void nvhost_debug_init(struct nvhost_master *master) | ||
| 134 | { | ||
| 135 | struct dentry *de = debugfs_create_dir("tegra_host", NULL); | ||
| 136 | |||
| 137 | debugfs_create_file("status", S_IRUGO, de, | ||
| 138 | master, &nvhost_debug_fops); | ||
| 139 | |||
| 140 | debugfs_create_u32("null_kickoff_pid", S_IRUGO|S_IWUSR, de, | ||
| 141 | &nvhost_debug_null_kickoff_pid); | ||
| 142 | debugfs_create_u32("trace_cmdbuf", S_IRUGO|S_IWUSR, de, | ||
| 143 | &nvhost_debug_trace_cmdbuf); | ||
| 144 | |||
| 145 | if (master->op.debug.debug_init) | ||
| 146 | master->op.debug.debug_init(de); | ||
| 147 | |||
| 148 | debugfs_create_u32("force_timeout_pid", S_IRUGO|S_IWUSR, de, | ||
| 149 | &nvhost_debug_force_timeout_pid); | ||
| 150 | debugfs_create_u32("force_timeout_val", S_IRUGO|S_IWUSR, de, | ||
| 151 | &nvhost_debug_force_timeout_val); | ||
| 152 | debugfs_create_u32("force_timeout_channel", S_IRUGO|S_IWUSR, de, | ||
| 153 | &nvhost_debug_force_timeout_channel); | ||
| 154 | } | ||
| 155 | #else | ||
| 156 | void nvhost_debug_init(struct nvhost_master *master) | ||
| 157 | { | ||
| 158 | } | ||
| 159 | #endif | ||
| 160 | |||
| 161 | void nvhost_debug_dump(struct nvhost_master *master) | ||
| 162 | { | ||
| 163 | struct output o = { | ||
| 164 | .fn = write_to_printk | ||
| 165 | }; | ||
| 166 | show_all(master, &o); | ||
| 167 | } | ||
diff --git a/drivers/video/tegra/host/debug.h b/drivers/video/tegra/host/debug.h new file mode 100644 index 00000000000..3dc156ab474 --- /dev/null +++ b/drivers/video/tegra/host/debug.h | |||
| @@ -0,0 +1,50 @@ | |||
| 1 | /* | ||
| 2 | * drivers/video/tegra/host/debug.h | ||
| 3 | * | ||
| 4 | * Tegra Graphics Host Debug | ||
| 5 | * | ||
| 6 | * Copyright (c) 2011-2012 NVIDIA Corporation. | ||
| 7 | * | ||
| 8 | * This program is free software; you can redistribute it and/or modify it | ||
| 9 | * under the terms and conditions of the GNU General Public License, | ||
| 10 | * version 2, as published by the Free Software Foundation. | ||
| 11 | * | ||
| 12 | * This program is distributed in the hope it will be useful, but WITHOUT | ||
| 13 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
| 14 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
| 15 | * more details. | ||
| 16 | * | ||
| 17 | * You should have received a copy of the GNU General Public License | ||
| 18 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | ||
| 19 | */ | ||
| 20 | #ifndef __NVHOST_DEBUG_H | ||
| 21 | #define __NVHOST_DEBUG_H | ||
| 22 | |||
| 23 | #include <linux/debugfs.h> | ||
| 24 | #include <linux/seq_file.h> | ||
| 25 | |||
| 26 | struct output { | ||
| 27 | void (*fn)(void *ctx, const char* str, size_t len); | ||
| 28 | void *ctx; | ||
| 29 | char buf[256]; | ||
| 30 | }; | ||
| 31 | |||
| 32 | static inline void write_to_seqfile(void *ctx, const char* str, size_t len) | ||
| 33 | { | ||
| 34 | seq_write((struct seq_file *)ctx, str, len); | ||
| 35 | } | ||
| 36 | |||
| 37 | static inline void write_to_printk(void *ctx, const char* str, size_t len) | ||
| 38 | { | ||
| 39 | printk(KERN_INFO "%s", str); | ||
| 40 | } | ||
| 41 | |||
| 42 | void nvhost_debug_output(struct output *o, const char* fmt, ...); | ||
| 43 | |||
| 44 | extern pid_t nvhost_debug_null_kickoff_pid; | ||
| 45 | extern pid_t nvhost_debug_force_timeout_pid; | ||
| 46 | extern u32 nvhost_debug_force_timeout_val; | ||
| 47 | extern u32 nvhost_debug_force_timeout_channel; | ||
| 48 | extern unsigned int nvhost_debug_trace_cmdbuf; | ||
| 49 | |||
| 50 | #endif /*__NVHOST_DEBUG_H */ | ||
diff --git a/drivers/video/tegra/host/dev.c b/drivers/video/tegra/host/dev.c new file mode 100644 index 00000000000..8f0c0393401 --- /dev/null +++ b/drivers/video/tegra/host/dev.c | |||
| @@ -0,0 +1,635 @@ | |||
| 1 | /* | ||
| 2 | * drivers/video/tegra/host/dev.c | ||
| 3 | * | ||
| 4 | * Tegra Graphics Host Driver Entrypoint | ||
| 5 | * | ||
| 6 | * Copyright (c) 2010-2012, NVIDIA Corporation. | ||
| 7 | * | ||
| 8 | * This program is free software; you can redistribute it and/or modify it | ||
| 9 | * under the terms and conditions of the GNU General Public License, | ||
| 10 | * version 2, as published by the Free Software Foundation. | ||
| 11 | * | ||
| 12 | * This program is distributed in the hope it will be useful, but WITHOUT | ||
| 13 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
| 14 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
| 15 | * more details. | ||
| 16 | * | ||
| 17 | * You should have received a copy of the GNU General Public License | ||
| 18 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | ||
| 19 | */ | ||
| 20 | |||
| 21 | #include <linux/slab.h> | ||
| 22 | #include <linux/string.h> | ||
| 23 | #include <linux/spinlock.h> | ||
| 24 | #include <linux/fs.h> | ||
| 25 | #include <linux/cdev.h> | ||
| 26 | #include <linux/uaccess.h> | ||
| 27 | #include <linux/file.h> | ||
| 28 | #include <linux/clk.h> | ||
| 29 | #include <linux/hrtimer.h> | ||
| 30 | |||
| 31 | #include "dev.h" | ||
| 32 | #define CREATE_TRACE_POINTS | ||
| 33 | #include <trace/events/nvhost.h> | ||
| 34 | |||
| 35 | #include <linux/io.h> | ||
| 36 | |||
| 37 | #include <linux/nvhost.h> | ||
| 38 | #include <linux/nvhost_ioctl.h> | ||
| 39 | #include <mach/nvmap.h> | ||
| 40 | #include <mach/gpufuse.h> | ||
| 41 | #include <mach/hardware.h> | ||
| 42 | #include <mach/iomap.h> | ||
| 43 | |||
| 44 | #include "debug.h" | ||
| 45 | #include "nvhost_job.h" | ||
| 46 | #include "t20/t20.h" | ||
| 47 | #include "t30/t30.h" | ||
| 48 | #include "bus_client.h" | ||
| 49 | |||
| 50 | #define DRIVER_NAME "host1x" | ||
| 51 | |||
| 52 | int nvhost_major; | ||
| 53 | int nvhost_minor; | ||
| 54 | |||
| 55 | static unsigned int register_sets; | ||
| 56 | |||
| 57 | struct nvhost_ctrl_userctx { | ||
| 58 | struct nvhost_master *dev; | ||
| 59 | u32 *mod_locks; | ||
| 60 | }; | ||
| 61 | |||
| 62 | static int nvhost_ctrlrelease(struct inode *inode, struct file *filp) | ||
| 63 | { | ||
| 64 | struct nvhost_ctrl_userctx *priv = filp->private_data; | ||
| 65 | int i; | ||
| 66 | |||
| 67 | trace_nvhost_ctrlrelease(priv->dev->dev->name); | ||
| 68 | |||
| 69 | filp->private_data = NULL; | ||
| 70 | if (priv->mod_locks[0]) | ||
| 71 | nvhost_module_idle(priv->dev->dev); | ||
| 72 | for (i = 1; i < priv->dev->syncpt.nb_mlocks; i++) | ||
| 73 | if (priv->mod_locks[i]) | ||
| 74 | nvhost_mutex_unlock(&priv->dev->syncpt, i); | ||
| 75 | kfree(priv->mod_locks); | ||
| 76 | kfree(priv); | ||
| 77 | return 0; | ||
| 78 | } | ||
| 79 | |||
| 80 | static int nvhost_ctrlopen(struct inode *inode, struct file *filp) | ||
| 81 | { | ||
| 82 | struct nvhost_master *host = container_of(inode->i_cdev, struct nvhost_master, cdev); | ||
| 83 | struct nvhost_ctrl_userctx *priv; | ||
| 84 | u32 *mod_locks; | ||
| 85 | |||
| 86 | trace_nvhost_ctrlopen(host->dev->name); | ||
| 87 | |||
| 88 | priv = kzalloc(sizeof(*priv), GFP_KERNEL); | ||
| 89 | mod_locks = kzalloc(sizeof(u32) * host->syncpt.nb_mlocks, GFP_KERNEL); | ||
| 90 | |||
| 91 | if (!(priv && mod_locks)) { | ||
| 92 | kfree(priv); | ||
| 93 | kfree(mod_locks); | ||
| 94 | return -ENOMEM; | ||
| 95 | } | ||
| 96 | |||
| 97 | priv->dev = host; | ||
| 98 | priv->mod_locks = mod_locks; | ||
| 99 | filp->private_data = priv; | ||
| 100 | return 0; | ||
| 101 | } | ||
| 102 | |||
| 103 | static int nvhost_ioctl_ctrl_syncpt_read(struct nvhost_ctrl_userctx *ctx, | ||
| 104 | struct nvhost_ctrl_syncpt_read_args *args) | ||
| 105 | { | ||
| 106 | if (args->id >= ctx->dev->syncpt.nb_pts) | ||
| 107 | return -EINVAL; | ||
| 108 | args->value = nvhost_syncpt_read(&ctx->dev->syncpt, args->id); | ||
| 109 | trace_nvhost_ioctl_ctrl_syncpt_read(args->id, args->value); | ||
| 110 | return 0; | ||
| 111 | } | ||
| 112 | |||
| 113 | static int nvhost_ioctl_ctrl_syncpt_incr(struct nvhost_ctrl_userctx *ctx, | ||
| 114 | struct nvhost_ctrl_syncpt_incr_args *args) | ||
| 115 | { | ||
| 116 | if (args->id >= ctx->dev->syncpt.nb_pts) | ||
| 117 | return -EINVAL; | ||
| 118 | trace_nvhost_ioctl_ctrl_syncpt_incr(args->id); | ||
| 119 | nvhost_syncpt_incr(&ctx->dev->syncpt, args->id); | ||
| 120 | return 0; | ||
| 121 | } | ||
| 122 | |||
| 123 | static int nvhost_ioctl_ctrl_syncpt_waitex(struct nvhost_ctrl_userctx *ctx, | ||
| 124 | struct nvhost_ctrl_syncpt_waitex_args *args) | ||
| 125 | { | ||
| 126 | u32 timeout; | ||
| 127 | int err; | ||
| 128 | if (args->id >= ctx->dev->syncpt.nb_pts) | ||
| 129 | return -EINVAL; | ||
| 130 | if (args->timeout == NVHOST_NO_TIMEOUT) | ||
| 131 | timeout = MAX_SCHEDULE_TIMEOUT; | ||
| 132 | else | ||
| 133 | timeout = (u32)msecs_to_jiffies(args->timeout); | ||
| 134 | |||
| 135 | err = nvhost_syncpt_wait_timeout(&ctx->dev->syncpt, args->id, | ||
| 136 | args->thresh, timeout, &args->value); | ||
| 137 | trace_nvhost_ioctl_ctrl_syncpt_wait(args->id, args->thresh, | ||
| 138 | args->timeout, args->value, err); | ||
| 139 | |||
| 140 | return err; | ||
| 141 | } | ||
| 142 | |||
| 143 | static int nvhost_ioctl_ctrl_module_mutex(struct nvhost_ctrl_userctx *ctx, | ||
| 144 | struct nvhost_ctrl_module_mutex_args *args) | ||
| 145 | { | ||
| 146 | int err = 0; | ||
| 147 | if (args->id >= ctx->dev->syncpt.nb_mlocks || | ||
| 148 | args->lock > 1) | ||
| 149 | return -EINVAL; | ||
| 150 | |||
| 151 | trace_nvhost_ioctl_ctrl_module_mutex(args->lock, args->id); | ||
| 152 | if (args->lock && !ctx->mod_locks[args->id]) { | ||
| 153 | if (args->id == 0) | ||
| 154 | nvhost_module_busy(ctx->dev->dev); | ||
| 155 | else | ||
| 156 | err = nvhost_mutex_try_lock(&ctx->dev->syncpt, | ||
| 157 | args->id); | ||
| 158 | if (!err) | ||
| 159 | ctx->mod_locks[args->id] = 1; | ||
| 160 | } else if (!args->lock && ctx->mod_locks[args->id]) { | ||
| 161 | if (args->id == 0) | ||
| 162 | nvhost_module_idle(ctx->dev->dev); | ||
| 163 | else | ||
| 164 | nvhost_mutex_unlock(&ctx->dev->syncpt, args->id); | ||
| 165 | ctx->mod_locks[args->id] = 0; | ||
| 166 | } | ||
| 167 | return err; | ||
| 168 | } | ||
| 169 | |||
| 170 | static struct nvhost_device *get_ndev_by_moduleid(struct nvhost_master *host, | ||
| 171 | u32 id) | ||
| 172 | { | ||
| 173 | int i; | ||
| 174 | |||
| 175 | for (i = 0; i < host->nb_channels; i++) { | ||
| 176 | struct nvhost_device *ndev = host->channels[i].dev; | ||
| 177 | |||
| 178 | /* display and dsi do not use channel for register programming. | ||
| 179 | * so their channels do not have device instance. | ||
| 180 | * hence skip such channels from here. */ | ||
| 181 | if (ndev == NULL) | ||
| 182 | continue; | ||
| 183 | |||
| 184 | if (id == ndev->moduleid) | ||
| 185 | return ndev; | ||
| 186 | } | ||
| 187 | return NULL; | ||
| 188 | } | ||
| 189 | |||
| 190 | static int nvhost_ioctl_ctrl_module_regrdwr(struct nvhost_ctrl_userctx *ctx, | ||
| 191 | struct nvhost_ctrl_module_regrdwr_args *args) | ||
| 192 | { | ||
| 193 | u32 num_offsets = args->num_offsets; | ||
| 194 | u32 *offsets = args->offsets; | ||
| 195 | u32 *values = args->values; | ||
| 196 | u32 vals[64]; | ||
| 197 | struct nvhost_device *ndev; | ||
| 198 | |||
| 199 | trace_nvhost_ioctl_ctrl_module_regrdwr(args->id, | ||
| 200 | args->num_offsets, args->write); | ||
| 201 | /* Check that there is something to read and that block size is | ||
| 202 | * u32 aligned */ | ||
| 203 | if (num_offsets == 0 || args->block_size & 3) | ||
| 204 | return -EINVAL; | ||
| 205 | |||
| 206 | ndev = get_ndev_by_moduleid(ctx->dev, args->id); | ||
| 207 | if (!ndev) | ||
| 208 | return -EINVAL; | ||
| 209 | |||
| 210 | while (num_offsets--) { | ||
| 211 | int remaining = args->block_size >> 2; | ||
| 212 | u32 offs; | ||
| 213 | if (get_user(offs, offsets)) | ||
| 214 | return -EFAULT; | ||
| 215 | offsets++; | ||
| 216 | while (remaining) { | ||
| 217 | int batch = min(remaining, 64); | ||
| 218 | if (args->write) { | ||
| 219 | if (copy_from_user(vals, values, | ||
| 220 | batch*sizeof(u32))) | ||
| 221 | return -EFAULT; | ||
| 222 | nvhost_write_module_regs(ndev, | ||
| 223 | offs, batch, vals); | ||
| 224 | } else { | ||
| 225 | nvhost_read_module_regs(ndev, | ||
| 226 | offs, batch, vals); | ||
| 227 | if (copy_to_user(values, vals, | ||
| 228 | batch*sizeof(u32))) | ||
| 229 | return -EFAULT; | ||
| 230 | } | ||
| 231 | remaining -= batch; | ||
| 232 | offs += batch; | ||
| 233 | values += batch; | ||
| 234 | } | ||
| 235 | } | ||
| 236 | |||
| 237 | return 0; | ||
| 238 | } | ||
| 239 | |||
| 240 | static int nvhost_ioctl_ctrl_get_version(struct nvhost_ctrl_userctx *ctx, | ||
| 241 | struct nvhost_get_param_args *args) | ||
| 242 | { | ||
| 243 | args->value = NVHOST_SUBMIT_VERSION_MAX_SUPPORTED; | ||
| 244 | return 0; | ||
| 245 | } | ||
| 246 | |||
| 247 | static long nvhost_ctrlctl(struct file *filp, | ||
| 248 | unsigned int cmd, unsigned long arg) | ||
| 249 | { | ||
| 250 | struct nvhost_ctrl_userctx *priv = filp->private_data; | ||
| 251 | u8 buf[NVHOST_IOCTL_CTRL_MAX_ARG_SIZE]; | ||
| 252 | int err = 0; | ||
| 253 | |||
| 254 | if ((_IOC_TYPE(cmd) != NVHOST_IOCTL_MAGIC) || | ||
| 255 | (_IOC_NR(cmd) == 0) || | ||
| 256 | (_IOC_NR(cmd) > NVHOST_IOCTL_CTRL_LAST)) | ||
| 257 | return -EFAULT; | ||
| 258 | |||
| 259 | BUG_ON(_IOC_SIZE(cmd) > NVHOST_IOCTL_CTRL_MAX_ARG_SIZE); | ||
| 260 | |||
| 261 | if (_IOC_DIR(cmd) & _IOC_WRITE) { | ||
| 262 | if (copy_from_user(buf, (void __user *)arg, _IOC_SIZE(cmd))) | ||
| 263 | return -EFAULT; | ||
| 264 | } | ||
| 265 | |||
| 266 | switch (cmd) { | ||
| 267 | case NVHOST_IOCTL_CTRL_SYNCPT_READ: | ||
| 268 | err = nvhost_ioctl_ctrl_syncpt_read(priv, (void *)buf); | ||
| 269 | break; | ||
| 270 | case NVHOST_IOCTL_CTRL_SYNCPT_INCR: | ||
| 271 | err = nvhost_ioctl_ctrl_syncpt_incr(priv, (void *)buf); | ||
| 272 | break; | ||
| 273 | case NVHOST_IOCTL_CTRL_SYNCPT_WAIT: | ||
| 274 | err = nvhost_ioctl_ctrl_syncpt_waitex(priv, (void *)buf); | ||
| 275 | break; | ||
| 276 | case NVHOST_IOCTL_CTRL_MODULE_MUTEX: | ||
| 277 | err = nvhost_ioctl_ctrl_module_mutex(priv, (void *)buf); | ||
| 278 | break; | ||
| 279 | case NVHOST_IOCTL_CTRL_MODULE_REGRDWR: | ||
| 280 | err = nvhost_ioctl_ctrl_module_regrdwr(priv, (void *)buf); | ||
| 281 | break; | ||
| 282 | case NVHOST_IOCTL_CTRL_SYNCPT_WAITEX: | ||
| 283 | err = nvhost_ioctl_ctrl_syncpt_waitex(priv, (void *)buf); | ||
| 284 | break; | ||
| 285 | case NVHOST_IOCTL_CTRL_GET_VERSION: | ||
| 286 | err = nvhost_ioctl_ctrl_get_version(priv, (void *)buf); | ||
| 287 | break; | ||
| 288 | default: | ||
| 289 | err = -ENOTTY; | ||
| 290 | break; | ||
| 291 | } | ||
| 292 | |||
| 293 | if ((err == 0) && (_IOC_DIR(cmd) & _IOC_READ)) | ||
| 294 | err = copy_to_user((void __user *)arg, buf, _IOC_SIZE(cmd)); | ||
| 295 | |||
| 296 | return err; | ||
| 297 | } | ||
| 298 | |||
| 299 | static const struct file_operations nvhost_ctrlops = { | ||
| 300 | .owner = THIS_MODULE, | ||
| 301 | .release = nvhost_ctrlrelease, | ||
| 302 | .open = nvhost_ctrlopen, | ||
| 303 | .unlocked_ioctl = nvhost_ctrlctl | ||
| 304 | }; | ||
| 305 | |||
| 306 | static void power_on_host(struct nvhost_device *dev) | ||
| 307 | { | ||
| 308 | struct nvhost_master *host = nvhost_get_drvdata(dev); | ||
| 309 | nvhost_syncpt_reset(&host->syncpt); | ||
| 310 | nvhost_intr_start(&host->intr, clk_get_rate(dev->clk[0])); | ||
| 311 | } | ||
| 312 | |||
| 313 | static int power_off_host(struct nvhost_device *dev) | ||
| 314 | { | ||
| 315 | struct nvhost_master *host = nvhost_get_drvdata(dev); | ||
| 316 | nvhost_syncpt_save(&host->syncpt); | ||
| 317 | nvhost_intr_stop(&host->intr); | ||
| 318 | return 0; | ||
| 319 | } | ||
| 320 | |||
| 321 | static int __devinit nvhost_user_init(struct nvhost_master *host) | ||
| 322 | { | ||
| 323 | int err, devno; | ||
| 324 | |||
| 325 | host->nvhost_class = class_create(THIS_MODULE, IFACE_NAME); | ||
| 326 | if (IS_ERR(host->nvhost_class)) { | ||
| 327 | err = PTR_ERR(host->nvhost_class); | ||
| 328 | dev_err(&host->dev->dev, "failed to create class\n"); | ||
| 329 | goto fail; | ||
| 330 | } | ||
| 331 | |||
| 332 | err = alloc_chrdev_region(&devno, nvhost_minor, | ||
| 333 | host->nb_channels + 1, IFACE_NAME); | ||
| 334 | nvhost_major = MAJOR(devno); | ||
| 335 | if (err < 0) { | ||
| 336 | dev_err(&host->dev->dev, "failed to reserve chrdev region\n"); | ||
| 337 | goto fail; | ||
| 338 | } | ||
| 339 | |||
| 340 | cdev_init(&host->cdev, &nvhost_ctrlops); | ||
| 341 | host->cdev.owner = THIS_MODULE; | ||
| 342 | devno = MKDEV(nvhost_major, nvhost_minor + host->nb_channels); | ||
| 343 | err = cdev_add(&host->cdev, devno, 1); | ||
| 344 | if (err < 0) | ||
| 345 | goto fail; | ||
| 346 | host->ctrl = device_create(host->nvhost_class, NULL, devno, NULL, | ||
| 347 | IFACE_NAME "-ctrl"); | ||
| 348 | if (IS_ERR(host->ctrl)) { | ||
| 349 | err = PTR_ERR(host->ctrl); | ||
| 350 | dev_err(&host->dev->dev, "failed to create ctrl device\n"); | ||
| 351 | goto fail; | ||
| 352 | } | ||
| 353 | |||
| 354 | return 0; | ||
| 355 | fail: | ||
| 356 | return err; | ||
| 357 | } | ||
| 358 | |||
| 359 | struct nvhost_device *nvhost_get_device(char *name) | ||
| 360 | { | ||
| 361 | BUG_ON(!host_device_op(nvhost).get_nvhost_device); | ||
| 362 | return host_device_op(nvhost).get_nvhost_device(nvhost, name); | ||
| 363 | } | ||
| 364 | |||
| 365 | static void nvhost_remove_chip_support(struct nvhost_master *host) | ||
| 366 | { | ||
| 367 | kfree(host->channels); | ||
| 368 | host->channels = 0; | ||
| 369 | |||
| 370 | kfree(host->syncpt.min_val); | ||
| 371 | host->syncpt.min_val = 0; | ||
| 372 | |||
| 373 | kfree(host->syncpt.max_val); | ||
| 374 | host->syncpt.max_val = 0; | ||
| 375 | |||
| 376 | kfree(host->syncpt.base_val); | ||
| 377 | host->syncpt.base_val = 0; | ||
| 378 | |||
| 379 | kfree(host->intr.syncpt); | ||
| 380 | host->intr.syncpt = 0; | ||
| 381 | |||
| 382 | kfree(host->syncpt.lock_counts); | ||
| 383 | host->syncpt.lock_counts = 0; | ||
| 384 | } | ||
| 385 | |||
| 386 | static int __devinit nvhost_init_chip_support(struct nvhost_master *host) | ||
| 387 | { | ||
| 388 | int err; | ||
| 389 | switch (tegra_get_chipid()) { | ||
| 390 | case TEGRA_CHIPID_TEGRA2: | ||
| 391 | err = nvhost_init_t20_support(host); | ||
| 392 | break; | ||
| 393 | |||
| 394 | case TEGRA_CHIPID_TEGRA3: | ||
| 395 | err = nvhost_init_t30_support(host); | ||
| 396 | break; | ||
| 397 | default: | ||
| 398 | return -ENODEV; | ||
| 399 | } | ||
| 400 | |||
| 401 | if (err) | ||
| 402 | return err; | ||
| 403 | |||
| 404 | /* allocate items sized in chip specific support init */ | ||
| 405 | host->channels = kzalloc(sizeof(struct nvhost_channel) * | ||
| 406 | host->nb_channels, GFP_KERNEL); | ||
| 407 | |||
| 408 | host->syncpt.min_val = kzalloc(sizeof(atomic_t) * | ||
| 409 | host->syncpt.nb_pts, GFP_KERNEL); | ||
| 410 | |||
| 411 | host->syncpt.max_val = kzalloc(sizeof(atomic_t) * | ||
| 412 | host->syncpt.nb_pts, GFP_KERNEL); | ||
| 413 | |||
| 414 | host->syncpt.base_val = kzalloc(sizeof(u32) * | ||
| 415 | host->syncpt.nb_bases, GFP_KERNEL); | ||
| 416 | |||
| 417 | host->intr.syncpt = kzalloc(sizeof(struct nvhost_intr_syncpt) * | ||
| 418 | host->syncpt.nb_pts, GFP_KERNEL); | ||
| 419 | |||
| 420 | host->syncpt.lock_counts = kzalloc(sizeof(atomic_t) * | ||
| 421 | host->syncpt.nb_mlocks, GFP_KERNEL); | ||
| 422 | |||
| 423 | if (!(host->channels && host->syncpt.min_val && | ||
| 424 | host->syncpt.max_val && host->syncpt.base_val && | ||
| 425 | host->intr.syncpt && host->syncpt.lock_counts)) { | ||
| 426 | /* frees happen in the support removal phase */ | ||
| 427 | return -ENOMEM; | ||
| 428 | } | ||
| 429 | |||
| 430 | return 0; | ||
| 431 | } | ||
| 432 | |||
| 433 | static struct resource nvhost_resources[] = { | ||
| 434 | { | ||
| 435 | .start = TEGRA_HOST1X_BASE, | ||
| 436 | .end = TEGRA_HOST1X_BASE + TEGRA_HOST1X_SIZE - 1, | ||
| 437 | .flags = IORESOURCE_MEM, | ||
| 438 | }, | ||
| 439 | { | ||
| 440 | .start = TEGRA_DISPLAY_BASE, | ||
| 441 | .end = TEGRA_DISPLAY_BASE + TEGRA_DISPLAY_SIZE - 1, | ||
| 442 | .flags = IORESOURCE_MEM, | ||
| 443 | }, | ||
| 444 | { | ||
| 445 | .start = TEGRA_DISPLAY2_BASE, | ||
| 446 | .end = TEGRA_DISPLAY2_BASE + TEGRA_DISPLAY2_SIZE - 1, | ||
| 447 | .flags = IORESOURCE_MEM, | ||
| 448 | }, | ||
| 449 | { | ||
| 450 | .start = TEGRA_VI_BASE, | ||
| 451 | .end = TEGRA_VI_BASE + TEGRA_VI_SIZE - 1, | ||
| 452 | .flags = IORESOURCE_MEM, | ||
| 453 | }, | ||
| 454 | { | ||
| 455 | .start = TEGRA_ISP_BASE, | ||
| 456 | .end = TEGRA_ISP_BASE + TEGRA_ISP_SIZE - 1, | ||
| 457 | .flags = IORESOURCE_MEM, | ||
| 458 | }, | ||
| 459 | { | ||
| 460 | .start = TEGRA_MPE_BASE, | ||
| 461 | .end = TEGRA_MPE_BASE + TEGRA_MPE_SIZE - 1, | ||
| 462 | .flags = IORESOURCE_MEM, | ||
| 463 | }, | ||
| 464 | { | ||
| 465 | .start = INT_SYNCPT_THRESH_BASE, | ||
| 466 | .end = INT_SYNCPT_THRESH_BASE + INT_SYNCPT_THRESH_NR - 1, | ||
| 467 | .flags = IORESOURCE_IRQ, | ||
| 468 | }, | ||
| 469 | { | ||
| 470 | .start = INT_HOST1X_MPCORE_GENERAL, | ||
| 471 | .end = INT_HOST1X_MPCORE_GENERAL, | ||
| 472 | .flags = IORESOURCE_IRQ, | ||
| 473 | }, | ||
| 474 | }; | ||
| 475 | |||
| 476 | struct nvhost_device tegra_grhost_device = { | ||
| 477 | .name = DRIVER_NAME, | ||
| 478 | .id = -1, | ||
| 479 | .resource = nvhost_resources, | ||
| 480 | .num_resources = ARRAY_SIZE(nvhost_resources), | ||
| 481 | .finalize_poweron = power_on_host, | ||
| 482 | .prepare_poweroff = power_off_host, | ||
| 483 | .clocks = {{"host1x", UINT_MAX}, {} }, | ||
| 484 | NVHOST_MODULE_NO_POWERGATE_IDS, | ||
| 485 | }; | ||
| 486 | |||
| 487 | static int __devinit nvhost_probe(struct nvhost_device *dev) | ||
| 488 | { | ||
| 489 | struct nvhost_master *host; | ||
| 490 | struct resource *regs, *intr0, *intr1; | ||
| 491 | int i, err; | ||
| 492 | |||
| 493 | regs = nvhost_get_resource(dev, IORESOURCE_MEM, 0); | ||
| 494 | intr0 = nvhost_get_resource(dev, IORESOURCE_IRQ, 0); | ||
| 495 | intr1 = nvhost_get_resource(dev, IORESOURCE_IRQ, 1); | ||
| 496 | |||
| 497 | if (!regs || !intr0 || !intr1) { | ||
| 498 | dev_err(&dev->dev, "missing required platform resources\n"); | ||
| 499 | return -ENXIO; | ||
| 500 | } | ||
| 501 | |||
| 502 | host = kzalloc(sizeof(*host), GFP_KERNEL); | ||
| 503 | if (!host) | ||
| 504 | return -ENOMEM; | ||
| 505 | |||
| 506 | host->nvmap = nvmap_create_client(nvmap_dev, "nvhost"); | ||
| 507 | if (!host->nvmap) { | ||
| 508 | dev_err(&dev->dev, "unable to create nvmap client\n"); | ||
| 509 | err = -EIO; | ||
| 510 | goto fail; | ||
| 511 | } | ||
| 512 | |||
| 513 | host->reg_mem = request_mem_region(regs->start, | ||
| 514 | resource_size(regs), dev->name); | ||
| 515 | if (!host->reg_mem) { | ||
| 516 | dev_err(&dev->dev, "failed to get host register memory\n"); | ||
| 517 | err = -ENXIO; | ||
| 518 | goto fail; | ||
| 519 | } | ||
| 520 | |||
| 521 | host->aperture = ioremap(regs->start, resource_size(regs)); | ||
| 522 | if (!host->aperture) { | ||
| 523 | dev_err(&dev->dev, "failed to remap host registers\n"); | ||
| 524 | err = -ENXIO; | ||
| 525 | goto fail; | ||
| 526 | } | ||
| 527 | |||
| 528 | err = nvhost_init_chip_support(host); | ||
| 529 | if (err) { | ||
| 530 | dev_err(&dev->dev, "failed to init chip support\n"); | ||
| 531 | goto fail; | ||
| 532 | } | ||
| 533 | |||
| 534 | /* Register host1x device as bus master */ | ||
| 535 | host->dev = dev; | ||
| 536 | |||
| 537 | /* Give pointer to host1x via driver */ | ||
| 538 | nvhost_set_drvdata(dev, host); | ||
| 539 | |||
| 540 | nvhost_bus_add_host(host); | ||
| 541 | |||
| 542 | err = nvhost_intr_init(&host->intr, intr1->start, intr0->start); | ||
| 543 | if (err) | ||
| 544 | goto fail; | ||
| 545 | |||
| 546 | err = nvhost_user_init(host); | ||
| 547 | if (err) | ||
| 548 | goto fail; | ||
| 549 | |||
| 550 | err = nvhost_module_init(&tegra_grhost_device); | ||
| 551 | if (err) | ||
| 552 | goto fail; | ||
| 553 | |||
| 554 | for (i = 0; i < host->dev->num_clks; i++) | ||
| 555 | clk_enable(host->dev->clk[i]); | ||
| 556 | nvhost_syncpt_reset(&host->syncpt); | ||
| 557 | for (i = 0; i < host->dev->num_clks; i++) | ||
| 558 | clk_disable(host->dev->clk[0]); | ||
| 559 | |||
| 560 | nvhost_debug_init(host); | ||
| 561 | |||
| 562 | dev_info(&dev->dev, "initialized\n"); | ||
| 563 | return 0; | ||
| 564 | |||
| 565 | fail: | ||
| 566 | nvhost_remove_chip_support(host); | ||
| 567 | if (host->nvmap) | ||
| 568 | nvmap_client_put(host->nvmap); | ||
| 569 | kfree(host); | ||
| 570 | return err; | ||
| 571 | } | ||
| 572 | |||
| 573 | static int __exit nvhost_remove(struct nvhost_device *dev) | ||
| 574 | { | ||
| 575 | struct nvhost_master *host = nvhost_get_drvdata(dev); | ||
| 576 | nvhost_intr_deinit(&host->intr); | ||
| 577 | nvhost_remove_chip_support(host); | ||
| 578 | return 0; | ||
| 579 | } | ||
| 580 | |||
| 581 | static int nvhost_suspend(struct nvhost_device *dev, pm_message_t state) | ||
| 582 | { | ||
| 583 | struct nvhost_master *host = nvhost_get_drvdata(dev); | ||
| 584 | int ret = 0; | ||
| 585 | |||
| 586 | dev_info(&dev->dev, "suspending\n"); | ||
| 587 | ret = nvhost_module_suspend(host->dev, true); | ||
| 588 | dev_info(&dev->dev, "suspend status: %d\n", ret); | ||
| 589 | |||
| 590 | return ret; | ||
| 591 | } | ||
| 592 | |||
| 593 | static int nvhost_resume(struct nvhost_device *dev) | ||
| 594 | { | ||
| 595 | dev_info(&dev->dev, "resuming\n"); | ||
| 596 | return 0; | ||
| 597 | } | ||
| 598 | |||
| 599 | static struct nvhost_driver nvhost_driver = { | ||
| 600 | .probe = nvhost_probe, | ||
| 601 | .remove = __exit_p(nvhost_remove), | ||
| 602 | .suspend = nvhost_suspend, | ||
| 603 | .resume = nvhost_resume, | ||
| 604 | .driver = { | ||
| 605 | .owner = THIS_MODULE, | ||
| 606 | .name = DRIVER_NAME | ||
| 607 | } | ||
| 608 | }; | ||
| 609 | |||
| 610 | static int __init nvhost_mod_init(void) | ||
| 611 | { | ||
| 612 | register_sets = tegra_gpu_register_sets(); | ||
| 613 | return nvhost_driver_register(&nvhost_driver); | ||
| 614 | } | ||
| 615 | |||
| 616 | static void __exit nvhost_mod_exit(void) | ||
| 617 | { | ||
| 618 | nvhost_driver_unregister(&nvhost_driver); | ||
| 619 | } | ||
| 620 | |||
| 621 | /* host1x master device needs nvmap to be instantiated first. | ||
| 622 | * nvmap is instantiated via fs_initcall. | ||
| 623 | * Hence instantiate host1x master device using rootfs_initcall | ||
| 624 | * which is one level after fs_initcall. */ | ||
| 625 | rootfs_initcall(nvhost_mod_init); | ||
| 626 | module_exit(nvhost_mod_exit); | ||
| 627 | |||
| 628 | module_param_call(register_sets, NULL, param_get_uint, ®ister_sets, 0444); | ||
| 629 | MODULE_PARM_DESC(register_sets, "Number of register sets"); | ||
| 630 | |||
| 631 | MODULE_AUTHOR("NVIDIA"); | ||
| 632 | MODULE_DESCRIPTION("Graphics host driver for Tegra products"); | ||
| 633 | MODULE_VERSION("1.0"); | ||
| 634 | MODULE_LICENSE("GPL"); | ||
| 635 | MODULE_ALIAS("platform-nvhost"); | ||
diff --git a/drivers/video/tegra/host/dev.h b/drivers/video/tegra/host/dev.h new file mode 100644 index 00000000000..74d7e16fc27 --- /dev/null +++ b/drivers/video/tegra/host/dev.h | |||
| @@ -0,0 +1,68 @@ | |||
| 1 | /* | ||
| 2 | * drivers/video/tegra/host/dev.h | ||
| 3 | * | ||
| 4 | * Tegra Graphics Host Driver Entrypoint | ||
| 5 | * | ||
| 6 | * Copyright (c) 2010-2012, NVIDIA Corporation. | ||
| 7 | * | ||
| 8 | * This program is free software; you can redistribute it and/or modify it | ||
| 9 | * under the terms and conditions of the GNU General Public License, | ||
| 10 | * version 2, as published by the Free Software Foundation. | ||
| 11 | * | ||
| 12 | * This program is distributed in the hope it will be useful, but WITHOUT | ||
| 13 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
| 14 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
| 15 | * more details. | ||
| 16 | * | ||
| 17 | * You should have received a copy of the GNU General Public License | ||
| 18 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | ||
| 19 | */ | ||
| 20 | |||
| 21 | #ifndef __NVHOST_DEV_H | ||
| 22 | #define __NVHOST_DEV_H | ||
| 23 | |||
| 24 | #include "nvhost_acm.h" | ||
| 25 | #include "nvhost_syncpt.h" | ||
| 26 | #include "nvhost_intr.h" | ||
| 27 | #include "nvhost_channel.h" | ||
| 28 | #include "chip_support.h" | ||
| 29 | |||
| 30 | #define TRACE_MAX_LENGTH 128U | ||
| 31 | #define IFACE_NAME "nvhost" | ||
| 32 | |||
| 33 | extern int nvhost_major; | ||
| 34 | extern int nvhost_minor; | ||
| 35 | |||
| 36 | struct nvhost_hwctx; | ||
| 37 | |||
| 38 | struct nvhost_master { | ||
| 39 | void __iomem *aperture; | ||
| 40 | void __iomem *sync_aperture; | ||
| 41 | struct resource *reg_mem; | ||
| 42 | struct class *nvhost_class; | ||
| 43 | struct cdev cdev; | ||
| 44 | struct device *ctrl; | ||
| 45 | struct nvhost_syncpt syncpt; | ||
| 46 | struct nvmap_client *nvmap; | ||
| 47 | struct nvhost_intr intr; | ||
| 48 | struct nvhost_device *dev; | ||
| 49 | struct nvhost_channel *channels; | ||
| 50 | u32 nb_channels; | ||
| 51 | |||
| 52 | struct nvhost_chip_support op; | ||
| 53 | |||
| 54 | atomic_t clientid; | ||
| 55 | }; | ||
| 56 | |||
| 57 | extern struct nvhost_master *nvhost; | ||
| 58 | |||
| 59 | void nvhost_debug_init(struct nvhost_master *master); | ||
| 60 | void nvhost_debug_dump(struct nvhost_master *master); | ||
| 61 | |||
| 62 | #define host_device_op(host) (host->op.nvhost_dev) | ||
| 63 | |||
| 64 | struct nvhost_device *nvhost_get_device(char *name); | ||
| 65 | |||
| 66 | extern pid_t nvhost_debug_null_kickoff_pid; | ||
| 67 | |||
| 68 | #endif | ||
diff --git a/drivers/video/tegra/host/dsi/Makefile b/drivers/video/tegra/host/dsi/Makefile new file mode 100644 index 00000000000..eb94d3ec492 --- /dev/null +++ b/drivers/video/tegra/host/dsi/Makefile | |||
| @@ -0,0 +1,7 @@ | |||
| 1 | GCOV_PROFILE := y | ||
| 2 | EXTRA_CFLAGS += -Idrivers/video/tegra/host | ||
| 3 | |||
| 4 | nvhost-dsi-objs = \ | ||
| 5 | dsi.o | ||
| 6 | |||
| 7 | obj-$(CONFIG_TEGRA_GRHOST) += nvhost-dsi.o | ||
diff --git a/drivers/video/tegra/host/dsi/dsi.c b/drivers/video/tegra/host/dsi/dsi.c new file mode 100644 index 00000000000..0e49f591574 --- /dev/null +++ b/drivers/video/tegra/host/dsi/dsi.c | |||
| @@ -0,0 +1,82 @@ | |||
| 1 | /* | ||
| 2 | * drivers/video/tegra/host/dsi/dsi.c | ||
| 3 | * | ||
| 4 | * Tegra Graphics DSI | ||
| 5 | * | ||
| 6 | * Copyright (c) 2012, NVIDIA Corporation. | ||
| 7 | * | ||
| 8 | * This program is free software; you can redistribute it and/or modify it | ||
| 9 | * under the terms and conditions of the GNU General Public License, | ||
| 10 | * version 2, as published by the Free Software Foundation. | ||
| 11 | * | ||
| 12 | * This program is distributed in the hope it will be useful, but WITHOUT | ||
| 13 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
| 14 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
| 15 | * more details. | ||
| 16 | * | ||
| 17 | * You should have received a copy of the GNU General Public License | ||
| 18 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | ||
| 19 | */ | ||
| 20 | |||
| 21 | #include "dev.h" | ||
| 22 | #include "bus_client.h" | ||
| 23 | |||
| 24 | static int dsi_probe(struct nvhost_device *dev) | ||
| 25 | { | ||
| 26 | return nvhost_client_device_init(dev); | ||
| 27 | } | ||
| 28 | |||
| 29 | static int __exit dsi_remove(struct nvhost_device *dev) | ||
| 30 | { | ||
| 31 | /* Add clean-up */ | ||
| 32 | return 0; | ||
| 33 | } | ||
| 34 | |||
| 35 | static int dsi_suspend(struct nvhost_device *dev, pm_message_t state) | ||
| 36 | { | ||
| 37 | return nvhost_client_device_suspend(dev); | ||
| 38 | } | ||
| 39 | |||
| 40 | static int dsi_resume(struct nvhost_device *dev) | ||
| 41 | { | ||
| 42 | dev_info(&dev->dev, "resuming\n"); | ||
| 43 | return 0; | ||
| 44 | } | ||
| 45 | |||
| 46 | struct nvhost_device *dsi_device; | ||
| 47 | |||
| 48 | static struct nvhost_driver dsi_driver = { | ||
| 49 | .probe = dsi_probe, | ||
| 50 | .remove = __exit_p(dsi_remove), | ||
| 51 | #ifdef CONFIG_PM | ||
| 52 | .suspend = dsi_suspend, | ||
| 53 | .resume = dsi_resume, | ||
| 54 | #endif | ||
| 55 | .driver = { | ||
| 56 | .owner = THIS_MODULE, | ||
| 57 | .name = "dsi", | ||
| 58 | } | ||
| 59 | }; | ||
| 60 | |||
| 61 | static int __init dsi_init(void) | ||
| 62 | { | ||
| 63 | int err; | ||
| 64 | |||
| 65 | dsi_device = nvhost_get_device("dsi"); | ||
| 66 | if (!dsi_device) | ||
| 67 | return -ENXIO; | ||
| 68 | |||
| 69 | err = nvhost_device_register(dsi_device); | ||
| 70 | if (err) | ||
| 71 | return err; | ||
| 72 | |||
| 73 | return nvhost_driver_register(&dsi_driver); | ||
| 74 | } | ||
| 75 | |||
| 76 | static void __exit dsi_exit(void) | ||
| 77 | { | ||
| 78 | nvhost_driver_unregister(&dsi_driver); | ||
| 79 | } | ||
| 80 | |||
| 81 | module_init(dsi_init); | ||
| 82 | module_exit(dsi_exit); | ||
diff --git a/drivers/video/tegra/host/gr2d/Makefile b/drivers/video/tegra/host/gr2d/Makefile new file mode 100644 index 00000000000..a79a2101677 --- /dev/null +++ b/drivers/video/tegra/host/gr2d/Makefile | |||
| @@ -0,0 +1,7 @@ | |||
| 1 | GCOV_PROFILE := y | ||
| 2 | EXTRA_CFLAGS += -Idrivers/video/tegra/host | ||
| 3 | |||
| 4 | nvhost-gr2d-objs = \ | ||
| 5 | gr2d.o | ||
| 6 | |||
| 7 | obj-$(CONFIG_TEGRA_GRHOST) += nvhost-gr2d.o | ||
diff --git a/drivers/video/tegra/host/gr2d/gr2d.c b/drivers/video/tegra/host/gr2d/gr2d.c new file mode 100644 index 00000000000..f88eb72e0a4 --- /dev/null +++ b/drivers/video/tegra/host/gr2d/gr2d.c | |||
| @@ -0,0 +1,82 @@ | |||
| 1 | /* | ||
| 2 | * drivers/video/tegra/host/gr2d/gr2d.c | ||
| 3 | * | ||
| 4 | * Tegra Graphics 2D | ||
| 5 | * | ||
| 6 | * Copyright (c) 2012, NVIDIA Corporation. | ||
| 7 | * | ||
| 8 | * This program is free software; you can redistribute it and/or modify it | ||
| 9 | * under the terms and conditions of the GNU General Public License, | ||
| 10 | * version 2, as published by the Free Software Foundation. | ||
| 11 | * | ||
| 12 | * This program is distributed in the hope it will be useful, but WITHOUT | ||
| 13 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
| 14 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
| 15 | * more details. | ||
| 16 | * | ||
| 17 | * You should have received a copy of the GNU General Public License | ||
| 18 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | ||
| 19 | */ | ||
| 20 | |||
| 21 | #include "dev.h" | ||
| 22 | #include "bus_client.h" | ||
| 23 | |||
| 24 | static int __devinit gr2d_probe(struct nvhost_device *dev) | ||
| 25 | { | ||
| 26 | return nvhost_client_device_init(dev); | ||
| 27 | } | ||
| 28 | |||
| 29 | static int __exit gr2d_remove(struct nvhost_device *dev) | ||
| 30 | { | ||
| 31 | /* Add clean-up */ | ||
| 32 | return 0; | ||
| 33 | } | ||
| 34 | |||
| 35 | static int gr2d_suspend(struct nvhost_device *dev, pm_message_t state) | ||
| 36 | { | ||
| 37 | return nvhost_client_device_suspend(dev); | ||
| 38 | } | ||
| 39 | |||
| 40 | static int gr2d_resume(struct nvhost_device *dev) | ||
| 41 | { | ||
| 42 | dev_info(&dev->dev, "resuming\n"); | ||
| 43 | return 0; | ||
| 44 | } | ||
| 45 | |||
| 46 | struct nvhost_device *gr2d_device; | ||
| 47 | |||
| 48 | static struct nvhost_driver gr2d_driver = { | ||
| 49 | .probe = gr2d_probe, | ||
| 50 | .remove = __exit_p(gr2d_remove), | ||
| 51 | #ifdef CONFIG_PM | ||
| 52 | .suspend = gr2d_suspend, | ||
| 53 | .resume = gr2d_resume, | ||
| 54 | #endif | ||
| 55 | .driver = { | ||
| 56 | .owner = THIS_MODULE, | ||
| 57 | .name = "gr2d", | ||
| 58 | } | ||
| 59 | }; | ||
| 60 | |||
| 61 | static int __init gr2d_init(void) | ||
| 62 | { | ||
| 63 | int err; | ||
| 64 | |||
| 65 | gr2d_device = nvhost_get_device("gr2d"); | ||
| 66 | if (!gr2d_device) | ||
| 67 | return -ENXIO; | ||
| 68 | |||
| 69 | err = nvhost_device_register(gr2d_device); | ||
| 70 | if (err) | ||
| 71 | return err; | ||
| 72 | |||
| 73 | return nvhost_driver_register(&gr2d_driver); | ||
| 74 | } | ||
| 75 | |||
| 76 | static void __exit gr2d_exit(void) | ||
| 77 | { | ||
| 78 | nvhost_driver_unregister(&gr2d_driver); | ||
| 79 | } | ||
| 80 | |||
| 81 | module_init(gr2d_init); | ||
| 82 | module_exit(gr2d_exit); | ||
diff --git a/drivers/video/tegra/host/gr3d/Makefile b/drivers/video/tegra/host/gr3d/Makefile new file mode 100644 index 00000000000..dfbd078ab42 --- /dev/null +++ b/drivers/video/tegra/host/gr3d/Makefile | |||
| @@ -0,0 +1,10 @@ | |||
| 1 | GCOV_PROFILE := y | ||
| 2 | EXTRA_CFLAGS += -Idrivers/video/tegra/host | ||
| 3 | |||
| 4 | nvhost-gr3d-objs = \ | ||
| 5 | gr3d.o \ | ||
| 6 | gr3d_t20.o \ | ||
| 7 | gr3d_t30.o \ | ||
| 8 | scale3d.o | ||
| 9 | |||
| 10 | obj-$(CONFIG_TEGRA_GRHOST) += nvhost-gr3d.o | ||
diff --git a/drivers/video/tegra/host/gr3d/gr3d.c b/drivers/video/tegra/host/gr3d/gr3d.c new file mode 100644 index 00000000000..f387d54e585 --- /dev/null +++ b/drivers/video/tegra/host/gr3d/gr3d.c | |||
| @@ -0,0 +1,211 @@ | |||
| 1 | /* | ||
| 2 | * drivers/video/tegra/host/gr3d/gr3d.c | ||
| 3 | * | ||
| 4 | * Tegra Graphics Host 3D | ||
| 5 | * | ||
| 6 | * Copyright (c) 2012 NVIDIA Corporation. | ||
| 7 | * | ||
| 8 | * This program is free software; you can redistribute it and/or modify it | ||
| 9 | * under the terms and conditions of the GNU General Public License, | ||
| 10 | * version 2, as published by the Free Software Foundation. | ||
| 11 | * | ||
| 12 | * This program is distributed in the hope it will be useful, but WITHOUT | ||
| 13 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
| 14 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
| 15 | * more details. | ||
| 16 | * | ||
| 17 | * You should have received a copy of the GNU General Public License | ||
| 18 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | ||
| 19 | */ | ||
| 20 | |||
| 21 | #include <mach/nvmap.h> | ||
| 22 | #include <linux/slab.h> | ||
| 23 | |||
| 24 | #include "t20/t20.h" | ||
| 25 | #include "host1x/host1x_channel.h" | ||
| 26 | #include "host1x/host1x_hardware.h" | ||
| 27 | #include "host1x/host1x_syncpt.h" | ||
| 28 | #include "nvhost_hwctx.h" | ||
| 29 | #include "dev.h" | ||
| 30 | #include "gr3d.h" | ||
| 31 | #include "bus_client.h" | ||
| 32 | |||
| 33 | #ifndef TEGRA_POWERGATE_3D1 | ||
| 34 | #define TEGRA_POWERGATE_3D1 -1 | ||
| 35 | #endif | ||
| 36 | |||
| 37 | void nvhost_3dctx_restore_begin(struct host1x_hwctx_handler *p, u32 *ptr) | ||
| 38 | { | ||
| 39 | /* set class to host */ | ||
| 40 | ptr[0] = nvhost_opcode_setclass(NV_HOST1X_CLASS_ID, | ||
| 41 | NV_CLASS_HOST_INCR_SYNCPT_BASE, 1); | ||
| 42 | /* increment sync point base */ | ||
| 43 | ptr[1] = nvhost_class_host_incr_syncpt_base(p->waitbase, | ||
| 44 | p->restore_incrs); | ||
| 45 | /* set class to 3D */ | ||
| 46 | ptr[2] = nvhost_opcode_setclass(NV_GRAPHICS_3D_CLASS_ID, 0, 0); | ||
| 47 | /* program PSEQ_QUAD_ID */ | ||
| 48 | ptr[3] = nvhost_opcode_imm(AR3D_PSEQ_QUAD_ID, 0); | ||
| 49 | } | ||
| 50 | |||
| 51 | void nvhost_3dctx_restore_direct(u32 *ptr, u32 start_reg, u32 count) | ||
| 52 | { | ||
| 53 | ptr[0] = nvhost_opcode_incr(start_reg, count); | ||
| 54 | } | ||
| 55 | |||
| 56 | void nvhost_3dctx_restore_indirect(u32 *ptr, u32 offset_reg, u32 offset, | ||
| 57 | u32 data_reg, u32 count) | ||
| 58 | { | ||
| 59 | ptr[0] = nvhost_opcode_imm(offset_reg, offset); | ||
| 60 | ptr[1] = nvhost_opcode_nonincr(data_reg, count); | ||
| 61 | } | ||
| 62 | |||
| 63 | void nvhost_3dctx_restore_end(struct host1x_hwctx_handler *p, u32 *ptr) | ||
| 64 | { | ||
| 65 | /* syncpt increment to track restore gather. */ | ||
| 66 | ptr[0] = nvhost_opcode_imm_incr_syncpt( | ||
| 67 | NV_SYNCPT_OP_DONE, p->syncpt); | ||
| 68 | } | ||
| 69 | |||
| 70 | /*** ctx3d ***/ | ||
| 71 | |||
| 72 | struct host1x_hwctx *nvhost_3dctx_alloc_common(struct host1x_hwctx_handler *p, | ||
| 73 | struct nvhost_channel *ch, bool map_restore) | ||
| 74 | { | ||
| 75 | struct nvmap_client *nvmap = nvhost_get_host(ch->dev)->nvmap; | ||
| 76 | struct host1x_hwctx *ctx; | ||
| 77 | |||
| 78 | ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); | ||
| 79 | if (!ctx) | ||
| 80 | return NULL; | ||
| 81 | ctx->restore = nvmap_alloc(nvmap, p->restore_size * 4, 32, | ||
| 82 | map_restore ? NVMAP_HANDLE_WRITE_COMBINE | ||
| 83 | : NVMAP_HANDLE_UNCACHEABLE, 0); | ||
| 84 | if (IS_ERR_OR_NULL(ctx->restore)) | ||
| 85 | goto fail; | ||
| 86 | |||
| 87 | if (map_restore) { | ||
| 88 | ctx->restore_virt = nvmap_mmap(ctx->restore); | ||
| 89 | if (!ctx->restore_virt) | ||
| 90 | goto fail; | ||
| 91 | } else | ||
| 92 | ctx->restore_virt = NULL; | ||
| 93 | |||
| 94 | kref_init(&ctx->hwctx.ref); | ||
| 95 | ctx->hwctx.h = &p->h; | ||
| 96 | ctx->hwctx.channel = ch; | ||
| 97 | ctx->hwctx.valid = false; | ||
| 98 | ctx->save_incrs = p->save_incrs; | ||
| 99 | ctx->save_thresh = p->save_thresh; | ||
| 100 | ctx->save_slots = p->save_slots; | ||
| 101 | ctx->restore_phys = nvmap_pin(nvmap, ctx->restore); | ||
| 102 | if (IS_ERR_VALUE(ctx->restore_phys)) | ||
| 103 | goto fail; | ||
| 104 | |||
| 105 | ctx->restore_size = p->restore_size; | ||
| 106 | ctx->restore_incrs = p->restore_incrs; | ||
| 107 | return ctx; | ||
| 108 | |||
| 109 | fail: | ||
| 110 | if (map_restore && ctx->restore_virt) { | ||
| 111 | nvmap_munmap(ctx->restore, ctx->restore_virt); | ||
| 112 | ctx->restore_virt = NULL; | ||
| 113 | } | ||
| 114 | nvmap_free(nvmap, ctx->restore); | ||
| 115 | ctx->restore = NULL; | ||
| 116 | kfree(ctx); | ||
| 117 | return NULL; | ||
| 118 | } | ||
| 119 | |||
| 120 | void nvhost_3dctx_get(struct nvhost_hwctx *ctx) | ||
| 121 | { | ||
| 122 | kref_get(&ctx->ref); | ||
| 123 | } | ||
| 124 | |||
| 125 | void nvhost_3dctx_free(struct kref *ref) | ||
| 126 | { | ||
| 127 | struct nvhost_hwctx *nctx = container_of(ref, struct nvhost_hwctx, ref); | ||
| 128 | struct host1x_hwctx *ctx = to_host1x_hwctx(nctx); | ||
| 129 | struct nvmap_client *nvmap = | ||
| 130 | nvhost_get_host(nctx->channel->dev)->nvmap; | ||
| 131 | |||
| 132 | if (ctx->restore_virt) { | ||
| 133 | nvmap_munmap(ctx->restore, ctx->restore_virt); | ||
| 134 | ctx->restore_virt = NULL; | ||
| 135 | } | ||
| 136 | nvmap_unpin(nvmap, ctx->restore); | ||
| 137 | ctx->restore_phys = 0; | ||
| 138 | nvmap_free(nvmap, ctx->restore); | ||
| 139 | ctx->restore = NULL; | ||
| 140 | kfree(ctx); | ||
| 141 | } | ||
| 142 | |||
| 143 | void nvhost_3dctx_put(struct nvhost_hwctx *ctx) | ||
| 144 | { | ||
| 145 | kref_put(&ctx->ref, nvhost_3dctx_free); | ||
| 146 | } | ||
| 147 | |||
| 148 | int nvhost_gr3d_prepare_power_off(struct nvhost_device *dev) | ||
| 149 | { | ||
| 150 | return host1x_save_context(dev, NVSYNCPT_3D); | ||
| 151 | } | ||
| 152 | |||
| 153 | static int __devinit gr3d_probe(struct nvhost_device *dev) | ||
| 154 | { | ||
| 155 | return nvhost_client_device_init(dev); | ||
| 156 | } | ||
| 157 | |||
| 158 | static int __exit gr3d_remove(struct nvhost_device *dev) | ||
| 159 | { | ||
| 160 | /* Add clean-up */ | ||
| 161 | return 0; | ||
| 162 | } | ||
| 163 | |||
| 164 | static int gr3d_suspend(struct nvhost_device *dev, pm_message_t state) | ||
| 165 | { | ||
| 166 | return nvhost_client_device_suspend(dev); | ||
| 167 | } | ||
| 168 | |||
| 169 | static int gr3d_resume(struct nvhost_device *dev) | ||
| 170 | { | ||
| 171 | dev_info(&dev->dev, "resuming\n"); | ||
| 172 | return 0; | ||
| 173 | } | ||
| 174 | |||
| 175 | struct nvhost_device *gr3d_device; | ||
| 176 | |||
| 177 | static struct nvhost_driver gr3d_driver = { | ||
| 178 | .probe = gr3d_probe, | ||
| 179 | .remove = __exit_p(gr3d_remove), | ||
| 180 | #ifdef CONFIG_PM | ||
| 181 | .suspend = gr3d_suspend, | ||
| 182 | .resume = gr3d_resume, | ||
| 183 | #endif | ||
| 184 | .driver = { | ||
| 185 | .owner = THIS_MODULE, | ||
| 186 | .name = "gr3d", | ||
| 187 | } | ||
| 188 | }; | ||
| 189 | |||
| 190 | static int __init gr3d_init(void) | ||
| 191 | { | ||
| 192 | int err; | ||
| 193 | |||
| 194 | gr3d_device = nvhost_get_device("gr3d"); | ||
| 195 | if (!gr3d_device) | ||
| 196 | return -ENXIO; | ||
| 197 | |||
| 198 | err = nvhost_device_register(gr3d_device); | ||
| 199 | if (err) | ||
| 200 | return err; | ||
| 201 | |||
| 202 | return nvhost_driver_register(&gr3d_driver); | ||
| 203 | } | ||
| 204 | |||
| 205 | static void __exit gr3d_exit(void) | ||
| 206 | { | ||
| 207 | nvhost_driver_unregister(&gr3d_driver); | ||
| 208 | } | ||
| 209 | |||
| 210 | module_init(gr3d_init); | ||
| 211 | module_exit(gr3d_exit); | ||
diff --git a/drivers/video/tegra/host/gr3d/gr3d.h b/drivers/video/tegra/host/gr3d/gr3d.h new file mode 100644 index 00000000000..3855b237b70 --- /dev/null +++ b/drivers/video/tegra/host/gr3d/gr3d.h | |||
| @@ -0,0 +1,54 @@ | |||
| 1 | /* | ||
| 2 | * drivers/video/tegra/host/gr3d/gr3d.h | ||
| 3 | * | ||
| 4 | * Tegra Graphics Host 3D | ||
| 5 | * | ||
| 6 | * Copyright (c) 2011-2012, NVIDIA Corporation. | ||
| 7 | * | ||
| 8 | * This program is free software; you can redistribute it and/or modify it | ||
| 9 | * under the terms and conditions of the GNU General Public License, | ||
| 10 | * version 2, as published by the Free Software Foundation. | ||
| 11 | * | ||
| 12 | * This program is distributed in the hope it will be useful, but WITHOUT | ||
| 13 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
| 14 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
| 15 | * more details. | ||
| 16 | * | ||
| 17 | * You should have received a copy of the GNU General Public License | ||
| 18 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | ||
| 19 | */ | ||
| 20 | |||
| 21 | #ifndef __NVHOST_GR3D_GR3D_H | ||
| 22 | #define __NVHOST_GR3D_GR3D_H | ||
| 23 | |||
| 24 | #include "host1x/host1x_hwctx.h" | ||
| 25 | #include <linux/types.h> | ||
| 26 | |||
| 27 | /* Registers of 3D unit */ | ||
| 28 | |||
| 29 | #define AR3D_PSEQ_QUAD_ID 0x545 | ||
| 30 | #define AR3D_DW_MEMORY_OUTPUT_ADDRESS 0x904 | ||
| 31 | #define AR3D_DW_MEMORY_OUTPUT_DATA 0x905 | ||
| 32 | #define AR3D_GSHIM_WRITE_MASK 0xb00 | ||
| 33 | #define AR3D_GSHIM_READ_SELECT 0xb01 | ||
| 34 | #define AR3D_GLOBAL_MEMORY_OUTPUT_READS 0xe40 | ||
| 35 | |||
| 36 | struct nvhost_hwctx; | ||
| 37 | struct nvhost_channel; | ||
| 38 | struct kref; | ||
| 39 | |||
| 40 | /* Functions used commonly by all 3D context switch modules */ | ||
| 41 | void nvhost_3dctx_restore_begin(struct host1x_hwctx_handler *h, u32 *ptr); | ||
| 42 | void nvhost_3dctx_restore_direct(u32 *ptr, u32 start_reg, u32 count); | ||
| 43 | void nvhost_3dctx_restore_indirect(u32 *ptr, u32 offset_reg, | ||
| 44 | u32 offset, u32 data_reg, u32 count); | ||
| 45 | void nvhost_3dctx_restore_end(struct host1x_hwctx_handler *h, u32 *ptr); | ||
| 46 | struct host1x_hwctx *nvhost_3dctx_alloc_common( | ||
| 47 | struct host1x_hwctx_handler *p, | ||
| 48 | struct nvhost_channel *ch, bool map_restore); | ||
| 49 | void nvhost_3dctx_get(struct nvhost_hwctx *ctx); | ||
| 50 | void nvhost_3dctx_free(struct kref *ref); | ||
| 51 | void nvhost_3dctx_put(struct nvhost_hwctx *ctx); | ||
| 52 | int nvhost_gr3d_prepare_power_off(struct nvhost_device *dev); | ||
| 53 | |||
| 54 | #endif | ||
diff --git a/drivers/video/tegra/host/gr3d/gr3d_t20.c b/drivers/video/tegra/host/gr3d/gr3d_t20.c new file mode 100644 index 00000000000..3604142aaf2 --- /dev/null +++ b/drivers/video/tegra/host/gr3d/gr3d_t20.c | |||
| @@ -0,0 +1,395 @@ | |||
| 1 | /* | ||
| 2 | * drivers/video/tegra/host/gr3d/gr3d_t20.c | ||
| 3 | * | ||
| 4 | * Tegra Graphics Host 3D for Tegra2 | ||
| 5 | * | ||
| 6 | * Copyright (c) 2010-2012, NVIDIA Corporation. | ||
| 7 | * | ||
| 8 | * This program is free software; you can redistribute it and/or modify it | ||
| 9 | * under the terms and conditions of the GNU General Public License, | ||
| 10 | * version 2, as published by the Free Software Foundation. | ||
| 11 | * | ||
| 12 | * This program is distributed in the hope it will be useful, but WITHOUT | ||
| 13 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
| 14 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
| 15 | * more details. | ||
| 16 | * | ||
| 17 | * You should have received a copy of the GNU General Public License | ||
| 18 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | ||
| 19 | */ | ||
| 20 | |||
| 21 | #include "nvhost_hwctx.h" | ||
| 22 | #include "dev.h" | ||
| 23 | #include "host1x/host1x_channel.h" | ||
| 24 | #include "host1x/host1x_hardware.h" | ||
| 25 | #include "host1x/host1x_syncpt.h" | ||
| 26 | #include "gr3d.h" | ||
| 27 | |||
| 28 | #include <linux/slab.h> | ||
| 29 | |||
| 30 | static const struct hwctx_reginfo ctxsave_regs_3d_global[] = { | ||
| 31 | HWCTX_REGINFO(0xe00, 4, DIRECT), | ||
| 32 | HWCTX_REGINFO(0xe05, 30, DIRECT), | ||
| 33 | HWCTX_REGINFO(0xe25, 2, DIRECT), | ||
| 34 | HWCTX_REGINFO(0xe28, 2, DIRECT), | ||
| 35 | HWCTX_REGINFO(0x001, 2, DIRECT), | ||
| 36 | HWCTX_REGINFO(0x00c, 10, DIRECT), | ||
| 37 | HWCTX_REGINFO(0x100, 34, DIRECT), | ||
| 38 | HWCTX_REGINFO(0x124, 2, DIRECT), | ||
| 39 | HWCTX_REGINFO(0x200, 5, DIRECT), | ||
| 40 | HWCTX_REGINFO(0x205, 1024, INDIRECT), | ||
| 41 | HWCTX_REGINFO(0x207, 1024, INDIRECT), | ||
| 42 | HWCTX_REGINFO(0x209, 1, DIRECT), | ||
| 43 | HWCTX_REGINFO(0x300, 64, DIRECT), | ||
| 44 | HWCTX_REGINFO(0x343, 25, DIRECT), | ||
| 45 | HWCTX_REGINFO(0x363, 2, DIRECT), | ||
| 46 | HWCTX_REGINFO(0x400, 16, DIRECT), | ||
| 47 | HWCTX_REGINFO(0x411, 1, DIRECT), | ||
| 48 | HWCTX_REGINFO(0x500, 4, DIRECT), | ||
| 49 | HWCTX_REGINFO(0x520, 32, DIRECT), | ||
| 50 | HWCTX_REGINFO(0x540, 64, INDIRECT), | ||
| 51 | HWCTX_REGINFO(0x600, 16, INDIRECT_4X), | ||
| 52 | HWCTX_REGINFO(0x603, 128, INDIRECT), | ||
| 53 | HWCTX_REGINFO(0x608, 4, DIRECT), | ||
| 54 | HWCTX_REGINFO(0x60e, 1, DIRECT), | ||
| 55 | HWCTX_REGINFO(0x700, 64, INDIRECT), | ||
| 56 | HWCTX_REGINFO(0x710, 50, DIRECT), | ||
| 57 | HWCTX_REGINFO(0x800, 16, INDIRECT_4X), | ||
| 58 | HWCTX_REGINFO(0x803, 512, INDIRECT), | ||
| 59 | HWCTX_REGINFO(0x805, 64, INDIRECT), | ||
| 60 | HWCTX_REGINFO(0x820, 32, DIRECT), | ||
| 61 | HWCTX_REGINFO(0x900, 64, INDIRECT), | ||
| 62 | HWCTX_REGINFO(0x902, 2, DIRECT), | ||
| 63 | HWCTX_REGINFO(0xa02, 10, DIRECT), | ||
| 64 | HWCTX_REGINFO(0xe04, 1, DIRECT), | ||
| 65 | HWCTX_REGINFO(0xe2a, 1, DIRECT), | ||
| 66 | }; | ||
| 67 | |||
| 68 | /* the same context save command sequence is used for all contexts. */ | ||
| 69 | #define SAVE_BEGIN_V0_SIZE 5 | ||
| 70 | #define SAVE_DIRECT_V0_SIZE 3 | ||
| 71 | #define SAVE_INDIRECT_V0_SIZE 5 | ||
| 72 | #define SAVE_END_V0_SIZE 5 | ||
| 73 | #define SAVE_INCRS 3 | ||
| 74 | #define SAVE_THRESH_OFFSET 1 | ||
| 75 | #define RESTORE_BEGIN_SIZE 4 | ||
| 76 | #define RESTORE_DIRECT_SIZE 1 | ||
| 77 | #define RESTORE_INDIRECT_SIZE 2 | ||
| 78 | #define RESTORE_END_SIZE 1 | ||
| 79 | |||
| 80 | struct save_info { | ||
| 81 | u32 *ptr; | ||
| 82 | unsigned int save_count; | ||
| 83 | unsigned int restore_count; | ||
| 84 | unsigned int save_incrs; | ||
| 85 | unsigned int restore_incrs; | ||
| 86 | }; | ||
| 87 | |||
| 88 | static u32 *setup_restore_regs_v0(u32 *ptr, | ||
| 89 | const struct hwctx_reginfo *regs, | ||
| 90 | unsigned int nr_regs) | ||
| 91 | { | ||
| 92 | const struct hwctx_reginfo *rend = regs + nr_regs; | ||
| 93 | |||
| 94 | for ( ; regs != rend; ++regs) { | ||
| 95 | u32 offset = regs->offset; | ||
| 96 | u32 count = regs->count; | ||
| 97 | u32 indoff = offset + 1; | ||
| 98 | switch (regs->type) { | ||
| 99 | case HWCTX_REGINFO_DIRECT: | ||
| 100 | nvhost_3dctx_restore_direct(ptr, offset, count); | ||
| 101 | ptr += RESTORE_DIRECT_SIZE; | ||
| 102 | break; | ||
| 103 | case HWCTX_REGINFO_INDIRECT_4X: | ||
| 104 | ++indoff; | ||
| 105 | /* fall through */ | ||
| 106 | case HWCTX_REGINFO_INDIRECT: | ||
| 107 | nvhost_3dctx_restore_indirect(ptr, | ||
| 108 | offset, 0, indoff, count); | ||
| 109 | ptr += RESTORE_INDIRECT_SIZE; | ||
| 110 | break; | ||
| 111 | } | ||
| 112 | ptr += count; | ||
| 113 | } | ||
| 114 | return ptr; | ||
| 115 | } | ||
| 116 | |||
| 117 | static void setup_restore_v0(struct host1x_hwctx_handler *h, u32 *ptr) | ||
| 118 | { | ||
| 119 | nvhost_3dctx_restore_begin(h, ptr); | ||
| 120 | ptr += RESTORE_BEGIN_SIZE; | ||
| 121 | |||
| 122 | ptr = setup_restore_regs_v0(ptr, | ||
| 123 | ctxsave_regs_3d_global, | ||
| 124 | ARRAY_SIZE(ctxsave_regs_3d_global)); | ||
| 125 | |||
| 126 | nvhost_3dctx_restore_end(h, ptr); | ||
| 127 | |||
| 128 | wmb(); | ||
| 129 | } | ||
| 130 | |||
| 131 | /*** v0 saver ***/ | ||
| 132 | |||
| 133 | static void save_push_v0(struct nvhost_hwctx *nctx, struct nvhost_cdma *cdma) | ||
| 134 | { | ||
| 135 | struct host1x_hwctx *ctx = to_host1x_hwctx(nctx); | ||
| 136 | struct host1x_hwctx_handler *p = host1x_hwctx_handler(ctx); | ||
| 137 | |||
| 138 | nvhost_cdma_push_gather(cdma, | ||
| 139 | (void *)NVHOST_CDMA_PUSH_GATHER_CTXSAVE, | ||
| 140 | (void *)NVHOST_CDMA_PUSH_GATHER_CTXSAVE, | ||
| 141 | nvhost_opcode_gather(p->save_size), | ||
| 142 | p->save_phys); | ||
| 143 | } | ||
| 144 | |||
| 145 | static void __init save_begin_v0(struct host1x_hwctx_handler *h, u32 *ptr) | ||
| 146 | { | ||
| 147 | /* 3d: when done, increment syncpt to base+1 */ | ||
| 148 | ptr[0] = nvhost_opcode_setclass(NV_GRAPHICS_3D_CLASS_ID, 0, 0); | ||
| 149 | ptr[1] = nvhost_opcode_imm_incr_syncpt(NV_SYNCPT_OP_DONE, | ||
| 150 | h->syncpt); /* incr 1 */ | ||
| 151 | /* host: wait for syncpt base+1 */ | ||
| 152 | ptr[2] = nvhost_opcode_setclass(NV_HOST1X_CLASS_ID, | ||
| 153 | NV_CLASS_HOST_WAIT_SYNCPT_BASE, 1); | ||
| 154 | ptr[3] = nvhost_class_host_wait_syncpt_base(h->syncpt, | ||
| 155 | h->waitbase, 1); | ||
| 156 | /* host: signal context read thread to start reading */ | ||
| 157 | ptr[4] = nvhost_opcode_imm_incr_syncpt(NV_SYNCPT_IMMEDIATE, | ||
| 158 | h->syncpt); /* incr 2 */ | ||
| 159 | } | ||
| 160 | |||
| 161 | static void __init save_direct_v0(u32 *ptr, u32 start_reg, u32 count) | ||
| 162 | { | ||
| 163 | ptr[0] = nvhost_opcode_nonincr(NV_CLASS_HOST_INDOFF, 1); | ||
| 164 | ptr[1] = nvhost_class_host_indoff_reg_read(NV_HOST_MODULE_GR3D, | ||
| 165 | start_reg, true); | ||
| 166 | ptr[2] = nvhost_opcode_nonincr(NV_CLASS_HOST_INDDATA, count); | ||
| 167 | } | ||
| 168 | |||
| 169 | static void __init save_indirect_v0(u32 *ptr, u32 offset_reg, u32 offset, | ||
| 170 | u32 data_reg, u32 count) | ||
| 171 | { | ||
| 172 | ptr[0] = nvhost_opcode_setclass(NV_GRAPHICS_3D_CLASS_ID, | ||
| 173 | offset_reg, 1); | ||
| 174 | ptr[1] = offset; | ||
| 175 | ptr[2] = nvhost_opcode_setclass(NV_HOST1X_CLASS_ID, | ||
| 176 | NV_CLASS_HOST_INDOFF, 1); | ||
| 177 | ptr[3] = nvhost_class_host_indoff_reg_read(NV_HOST_MODULE_GR3D, | ||
| 178 | data_reg, false); | ||
| 179 | ptr[4] = nvhost_opcode_nonincr(NV_CLASS_HOST_INDDATA, count); | ||
| 180 | } | ||
| 181 | |||
| 182 | static void __init save_end_v0(struct host1x_hwctx_handler *h, u32 *ptr) | ||
| 183 | { | ||
| 184 | /* Wait for context read service to finish (cpu incr 3) */ | ||
| 185 | ptr[0] = nvhost_opcode_nonincr(NV_CLASS_HOST_WAIT_SYNCPT_BASE, 1); | ||
| 186 | ptr[1] = nvhost_class_host_wait_syncpt_base(h->syncpt, | ||
| 187 | h->waitbase, h->save_incrs); | ||
| 188 | /* Advance syncpoint base */ | ||
| 189 | ptr[2] = nvhost_opcode_nonincr(NV_CLASS_HOST_INCR_SYNCPT_BASE, 1); | ||
| 190 | ptr[3] = nvhost_class_host_incr_syncpt_base(NVWAITBASE_3D, | ||
| 191 | h->save_incrs); | ||
| 192 | /* set class back to the unit */ | ||
| 193 | ptr[4] = nvhost_opcode_setclass(NV_GRAPHICS_3D_CLASS_ID, 0, 0); | ||
| 194 | } | ||
| 195 | |||
| 196 | static u32 *save_regs_v0(u32 *ptr, unsigned int *pending, | ||
| 197 | void __iomem *chan_regs, | ||
| 198 | const struct hwctx_reginfo *regs, | ||
| 199 | unsigned int nr_regs) | ||
| 200 | { | ||
| 201 | const struct hwctx_reginfo *rend = regs + nr_regs; | ||
| 202 | int drain_result = 0; | ||
| 203 | |||
| 204 | for ( ; regs != rend; ++regs) { | ||
| 205 | u32 count = regs->count; | ||
| 206 | switch (regs->type) { | ||
| 207 | case HWCTX_REGINFO_DIRECT: | ||
| 208 | ptr += RESTORE_DIRECT_SIZE; | ||
| 209 | break; | ||
| 210 | case HWCTX_REGINFO_INDIRECT: | ||
| 211 | case HWCTX_REGINFO_INDIRECT_4X: | ||
| 212 | ptr += RESTORE_INDIRECT_SIZE; | ||
| 213 | break; | ||
| 214 | } | ||
| 215 | drain_result = host1x_drain_read_fifo(chan_regs, | ||
| 216 | ptr, count, pending); | ||
| 217 | BUG_ON(drain_result < 0); | ||
| 218 | ptr += count; | ||
| 219 | } | ||
| 220 | return ptr; | ||
| 221 | } | ||
| 222 | |||
| 223 | /*** save ***/ | ||
| 224 | |||
| 225 | static void __init setup_save_regs(struct save_info *info, | ||
| 226 | const struct hwctx_reginfo *regs, | ||
| 227 | unsigned int nr_regs) | ||
| 228 | { | ||
| 229 | const struct hwctx_reginfo *rend = regs + nr_regs; | ||
| 230 | u32 *ptr = info->ptr; | ||
| 231 | unsigned int save_count = info->save_count; | ||
| 232 | unsigned int restore_count = info->restore_count; | ||
| 233 | |||
| 234 | for ( ; regs != rend; ++regs) { | ||
| 235 | u32 offset = regs->offset; | ||
| 236 | u32 count = regs->count; | ||
| 237 | u32 indoff = offset + 1; | ||
| 238 | switch (regs->type) { | ||
| 239 | case HWCTX_REGINFO_DIRECT: | ||
| 240 | if (ptr) { | ||
| 241 | save_direct_v0(ptr, offset, count); | ||
| 242 | ptr += SAVE_DIRECT_V0_SIZE; | ||
| 243 | } | ||
| 244 | save_count += SAVE_DIRECT_V0_SIZE; | ||
| 245 | restore_count += RESTORE_DIRECT_SIZE; | ||
| 246 | break; | ||
| 247 | case HWCTX_REGINFO_INDIRECT_4X: | ||
| 248 | ++indoff; | ||
| 249 | /* fall through */ | ||
| 250 | case HWCTX_REGINFO_INDIRECT: | ||
| 251 | if (ptr) { | ||
| 252 | save_indirect_v0(ptr, offset, 0, | ||
| 253 | indoff, count); | ||
| 254 | ptr += SAVE_INDIRECT_V0_SIZE; | ||
| 255 | } | ||
| 256 | save_count += SAVE_INDIRECT_V0_SIZE; | ||
| 257 | restore_count += RESTORE_INDIRECT_SIZE; | ||
| 258 | break; | ||
| 259 | } | ||
| 260 | if (ptr) { | ||
| 261 | /* SAVE cases only: reserve room for incoming data */ | ||
| 262 | u32 k = 0; | ||
| 263 | /* | ||
| 264 | * Create a signature pattern for indirect data (which | ||
| 265 | * will be overwritten by true incoming data) for | ||
| 266 | * better deducing where we are in a long command | ||
| 267 | * sequence, when given only a FIFO snapshot for debug | ||
| 268 | * purposes. | ||
| 269 | */ | ||
| 270 | for (k = 0; k < count; k++) | ||
| 271 | *(ptr + k) = 0xd000d000 | (offset << 16) | k; | ||
| 272 | ptr += count; | ||
| 273 | } | ||
| 274 | save_count += count; | ||
| 275 | restore_count += count; | ||
| 276 | } | ||
| 277 | |||
| 278 | info->ptr = ptr; | ||
| 279 | info->save_count = save_count; | ||
| 280 | info->restore_count = restore_count; | ||
| 281 | } | ||
| 282 | |||
| 283 | static void __init setup_save(struct host1x_hwctx_handler *h, u32 *ptr) | ||
| 284 | { | ||
| 285 | struct save_info info = { | ||
| 286 | ptr, | ||
| 287 | SAVE_BEGIN_V0_SIZE, | ||
| 288 | RESTORE_BEGIN_SIZE, | ||
| 289 | SAVE_INCRS, | ||
| 290 | 1 | ||
| 291 | }; | ||
| 292 | |||
| 293 | if (info.ptr) { | ||
| 294 | save_begin_v0(h, info.ptr); | ||
| 295 | info.ptr += SAVE_BEGIN_V0_SIZE; | ||
| 296 | } | ||
| 297 | |||
| 298 | /* save regs */ | ||
| 299 | setup_save_regs(&info, | ||
| 300 | ctxsave_regs_3d_global, | ||
| 301 | ARRAY_SIZE(ctxsave_regs_3d_global)); | ||
| 302 | |||
| 303 | if (info.ptr) { | ||
| 304 | save_end_v0(h, info.ptr); | ||
| 305 | info.ptr += SAVE_END_V0_SIZE; | ||
| 306 | } | ||
| 307 | |||
| 308 | wmb(); | ||
| 309 | |||
| 310 | h->save_size = info.save_count + SAVE_END_V0_SIZE; | ||
| 311 | h->restore_size = info.restore_count + RESTORE_END_SIZE; | ||
| 312 | h->save_incrs = info.save_incrs; | ||
| 313 | h->save_thresh = h->save_incrs - SAVE_THRESH_OFFSET; | ||
| 314 | h->restore_incrs = info.restore_incrs; | ||
| 315 | } | ||
| 316 | |||
| 317 | |||
| 318 | |||
| 319 | /*** ctx3d ***/ | ||
| 320 | |||
| 321 | static struct nvhost_hwctx *ctx3d_alloc_v0(struct nvhost_hwctx_handler *h, | ||
| 322 | struct nvhost_channel *ch) | ||
| 323 | { | ||
| 324 | struct host1x_hwctx_handler *p = to_host1x_hwctx_handler(h); | ||
| 325 | struct host1x_hwctx *ctx = | ||
| 326 | nvhost_3dctx_alloc_common(p, ch, true); | ||
| 327 | if (ctx) { | ||
| 328 | setup_restore_v0(p, ctx->restore_virt); | ||
| 329 | return &ctx->hwctx; | ||
| 330 | } else | ||
| 331 | return NULL; | ||
| 332 | } | ||
| 333 | |||
| 334 | static void ctx3d_save_service(struct nvhost_hwctx *nctx) | ||
| 335 | { | ||
| 336 | struct host1x_hwctx *ctx = to_host1x_hwctx(nctx); | ||
| 337 | |||
| 338 | u32 *ptr = (u32 *)ctx->restore_virt + RESTORE_BEGIN_SIZE; | ||
| 339 | unsigned int pending = 0; | ||
| 340 | |||
| 341 | ptr = save_regs_v0(ptr, &pending, nctx->channel->aperture, | ||
| 342 | ctxsave_regs_3d_global, | ||
| 343 | ARRAY_SIZE(ctxsave_regs_3d_global)); | ||
| 344 | |||
| 345 | wmb(); | ||
| 346 | nvhost_syncpt_cpu_incr(&nvhost_get_host(nctx->channel->dev)->syncpt, | ||
| 347 | host1x_hwctx_handler(ctx)->syncpt); | ||
| 348 | } | ||
| 349 | |||
| 350 | struct nvhost_hwctx_handler * __init nvhost_gr3d_t20_ctxhandler_init( | ||
| 351 | u32 syncpt, u32 waitbase, | ||
| 352 | struct nvhost_channel *ch) | ||
| 353 | { | ||
| 354 | struct nvmap_client *nvmap; | ||
| 355 | u32 *save_ptr; | ||
| 356 | struct host1x_hwctx_handler *p; | ||
| 357 | |||
| 358 | p = kmalloc(sizeof(*p), GFP_KERNEL); | ||
| 359 | if (!p) | ||
| 360 | return NULL; | ||
| 361 | nvmap = nvhost_get_host(ch->dev)->nvmap; | ||
| 362 | |||
| 363 | p->syncpt = syncpt; | ||
| 364 | p->waitbase = waitbase; | ||
| 365 | |||
| 366 | setup_save(p, NULL); | ||
| 367 | |||
| 368 | p->save_buf = nvmap_alloc(nvmap, p->save_size * sizeof(u32), 32, | ||
| 369 | NVMAP_HANDLE_WRITE_COMBINE, 0); | ||
| 370 | if (IS_ERR(p->save_buf)) { | ||
| 371 | p->save_buf = NULL; | ||
| 372 | return NULL; | ||
| 373 | } | ||
| 374 | |||
| 375 | p->save_slots = 1; | ||
| 376 | |||
| 377 | save_ptr = nvmap_mmap(p->save_buf); | ||
| 378 | if (!save_ptr) { | ||
| 379 | nvmap_free(nvmap, p->save_buf); | ||
| 380 | p->save_buf = NULL; | ||
| 381 | return NULL; | ||
| 382 | } | ||
| 383 | |||
| 384 | p->save_phys = nvmap_pin(nvmap, p->save_buf); | ||
| 385 | |||
| 386 | setup_save(p, save_ptr); | ||
| 387 | |||
| 388 | p->h.alloc = ctx3d_alloc_v0; | ||
| 389 | p->h.save_push = save_push_v0; | ||
| 390 | p->h.save_service = ctx3d_save_service; | ||
| 391 | p->h.get = nvhost_3dctx_get; | ||
| 392 | p->h.put = nvhost_3dctx_put; | ||
| 393 | |||
| 394 | return &p->h; | ||
| 395 | } | ||
diff --git a/drivers/video/tegra/host/gr3d/gr3d_t20.h b/drivers/video/tegra/host/gr3d/gr3d_t20.h new file mode 100644 index 00000000000..5fe6d50d0c3 --- /dev/null +++ b/drivers/video/tegra/host/gr3d/gr3d_t20.h | |||
| @@ -0,0 +1,30 @@ | |||
| 1 | /* | ||
| 2 | * drivers/video/tegra/host/gr3d/gr3d_t20.h | ||
| 3 | * | ||
| 4 | * Tegra Graphics Host 3D for Tegra2 | ||
| 5 | * | ||
| 6 | * Copyright (c) 2011-2012, NVIDIA Corporation. | ||
| 7 | * | ||
| 8 | * This program is free software; you can redistribute it and/or modify it | ||
| 9 | * under the terms and conditions of the GNU General Public License, | ||
| 10 | * version 2, as published by the Free Software Foundation. | ||
| 11 | * | ||
| 12 | * This program is distributed in the hope it will be useful, but WITHOUT | ||
| 13 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
| 14 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
| 15 | * more details. | ||
| 16 | * | ||
| 17 | * You should have received a copy of the GNU General Public License | ||
| 18 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | ||
| 19 | */ | ||
| 20 | |||
| 21 | #ifndef __NVHOST_GR3D_GR3D_T20_H | ||
| 22 | #define __NVHOST_GR3D_GR3D_T20_H | ||
| 23 | |||
| 24 | struct nvhost_hwctx_handler; | ||
| 25 | |||
| 26 | struct nvhost_hwctx_handler *nvhost_gr3d_t20_ctxhandler_init( | ||
| 27 | u32 syncpt, u32 waitbase, | ||
| 28 | struct nvhost_channel *ch); | ||
| 29 | |||
| 30 | #endif | ||
diff --git a/drivers/video/tegra/host/gr3d/gr3d_t30.c b/drivers/video/tegra/host/gr3d/gr3d_t30.c new file mode 100644 index 00000000000..e7329e50e3d --- /dev/null +++ b/drivers/video/tegra/host/gr3d/gr3d_t30.c | |||
| @@ -0,0 +1,435 @@ | |||
| 1 | /* | ||
| 2 | * drivers/video/tegra/host/gr3d/gr3d_t30.c | ||
| 3 | * | ||
| 4 | * Tegra Graphics Host 3D for Tegra3 | ||
| 5 | * | ||
| 6 | * Copyright (c) 2011-2012 NVIDIA Corporation. | ||
| 7 | * | ||
| 8 | * This program is free software; you can redistribute it and/or modify it | ||
| 9 | * under the terms and conditions of the GNU General Public License, | ||
| 10 | * version 2, as published by the Free Software Foundation. | ||
| 11 | * | ||
| 12 | * This program is distributed in the hope it will be useful, but WITHOUT | ||
| 13 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
| 14 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
| 15 | * more details. | ||
| 16 | * | ||
| 17 | * You should have received a copy of the GNU General Public License | ||
| 18 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | ||
| 19 | */ | ||
| 20 | |||
| 21 | #include "nvhost_hwctx.h" | ||
| 22 | #include "dev.h" | ||
| 23 | #include "host1x/host1x_hardware.h" | ||
| 24 | #include "host1x/host1x_syncpt.h" | ||
| 25 | #include "gr3d.h" | ||
| 26 | |||
| 27 | #include <mach/gpufuse.h> | ||
| 28 | #include <mach/hardware.h> | ||
| 29 | #include <linux/slab.h> | ||
| 30 | |||
| 31 | /* 99 > 2, which makes kernel panic if register set is incorrect */ | ||
| 32 | static int register_sets = 99; | ||
| 33 | |||
| 34 | static const struct hwctx_reginfo ctxsave_regs_3d_global[] = { | ||
| 35 | HWCTX_REGINFO(0xe00, 4, DIRECT), | ||
| 36 | HWCTX_REGINFO(0xe05, 30, DIRECT), | ||
| 37 | HWCTX_REGINFO(0xe25, 2, DIRECT), | ||
| 38 | HWCTX_REGINFO(0xe28, 2, DIRECT), | ||
| 39 | HWCTX_REGINFO(0xe30, 16, DIRECT), | ||
| 40 | HWCTX_REGINFO(0x001, 2, DIRECT), | ||
| 41 | HWCTX_REGINFO(0x00c, 10, DIRECT), | ||
| 42 | HWCTX_REGINFO(0x100, 34, DIRECT), | ||
| 43 | HWCTX_REGINFO(0x124, 2, DIRECT), | ||
| 44 | HWCTX_REGINFO(0x200, 5, DIRECT), | ||
| 45 | HWCTX_REGINFO(0x205, 1024, INDIRECT), | ||
| 46 | HWCTX_REGINFO(0x207, 1024, INDIRECT), | ||
| 47 | HWCTX_REGINFO(0x209, 1, DIRECT), | ||
| 48 | HWCTX_REGINFO(0x300, 64, DIRECT), | ||
| 49 | HWCTX_REGINFO(0x343, 25, DIRECT), | ||
| 50 | HWCTX_REGINFO(0x363, 2, DIRECT), | ||
| 51 | HWCTX_REGINFO(0x400, 16, DIRECT), | ||
| 52 | HWCTX_REGINFO(0x411, 1, DIRECT), | ||
| 53 | HWCTX_REGINFO(0x412, 1, DIRECT), | ||
| 54 | HWCTX_REGINFO(0x500, 4, DIRECT), | ||
| 55 | HWCTX_REGINFO(0x520, 32, DIRECT), | ||
| 56 | HWCTX_REGINFO(0x540, 64, INDIRECT), | ||
| 57 | HWCTX_REGINFO(0x600, 16, INDIRECT_4X), | ||
| 58 | HWCTX_REGINFO(0x603, 128, INDIRECT), | ||
| 59 | HWCTX_REGINFO(0x608, 4, DIRECT), | ||
| 60 | HWCTX_REGINFO(0x60e, 1, DIRECT), | ||
| 61 | HWCTX_REGINFO(0x700, 64, INDIRECT), | ||
| 62 | HWCTX_REGINFO(0x710, 50, DIRECT), | ||
| 63 | HWCTX_REGINFO(0x750, 16, DIRECT), | ||
| 64 | HWCTX_REGINFO(0x800, 16, INDIRECT_4X), | ||
| 65 | HWCTX_REGINFO(0x803, 512, INDIRECT), | ||
| 66 | HWCTX_REGINFO(0x805, 64, INDIRECT), | ||
| 67 | HWCTX_REGINFO(0x820, 32, DIRECT), | ||
| 68 | HWCTX_REGINFO(0x900, 64, INDIRECT), | ||
| 69 | HWCTX_REGINFO(0x902, 2, DIRECT), | ||
| 70 | HWCTX_REGINFO(0x90a, 1, DIRECT), | ||
| 71 | HWCTX_REGINFO(0xa02, 10, DIRECT), | ||
| 72 | HWCTX_REGINFO(0xb04, 1, DIRECT), | ||
| 73 | HWCTX_REGINFO(0xb06, 13, DIRECT), | ||
| 74 | HWCTX_REGINFO(0xe42, 2, DIRECT), /* HW bug workaround */ | ||
| 75 | }; | ||
| 76 | |||
| 77 | static const struct hwctx_reginfo ctxsave_regs_3d_perset[] = { | ||
| 78 | HWCTX_REGINFO(0xe04, 1, DIRECT), | ||
| 79 | HWCTX_REGINFO(0xe2a, 1, DIRECT), | ||
| 80 | HWCTX_REGINFO(0x413, 1, DIRECT), | ||
| 81 | HWCTX_REGINFO(0x90b, 1, DIRECT), | ||
| 82 | HWCTX_REGINFO(0xe41, 1, DIRECT), | ||
| 83 | }; | ||
| 84 | |||
| 85 | static unsigned int restore_set1_offset; | ||
| 86 | |||
| 87 | #define SAVE_BEGIN_V1_SIZE (1 + RESTORE_BEGIN_SIZE) | ||
| 88 | #define SAVE_DIRECT_V1_SIZE (4 + RESTORE_DIRECT_SIZE) | ||
| 89 | #define SAVE_INDIRECT_V1_SIZE (6 + RESTORE_INDIRECT_SIZE) | ||
| 90 | #define SAVE_END_V1_SIZE (9 + RESTORE_END_SIZE) | ||
| 91 | #define SAVE_INCRS 3 | ||
| 92 | #define SAVE_THRESH_OFFSET 0 | ||
| 93 | #define RESTORE_BEGIN_SIZE 4 | ||
| 94 | #define RESTORE_DIRECT_SIZE 1 | ||
| 95 | #define RESTORE_INDIRECT_SIZE 2 | ||
| 96 | #define RESTORE_END_SIZE 1 | ||
| 97 | |||
| 98 | struct save_info { | ||
| 99 | u32 *ptr; | ||
| 100 | unsigned int save_count; | ||
| 101 | unsigned int restore_count; | ||
| 102 | unsigned int save_incrs; | ||
| 103 | unsigned int restore_incrs; | ||
| 104 | }; | ||
| 105 | |||
| 106 | /*** v1 saver ***/ | ||
| 107 | |||
| 108 | static void save_push_v1(struct nvhost_hwctx *nctx, struct nvhost_cdma *cdma) | ||
| 109 | { | ||
| 110 | struct host1x_hwctx *ctx = to_host1x_hwctx(nctx); | ||
| 111 | struct host1x_hwctx_handler *p = host1x_hwctx_handler(ctx); | ||
| 112 | |||
| 113 | /* wait for 3d idle */ | ||
| 114 | nvhost_cdma_push(cdma, | ||
| 115 | nvhost_opcode_setclass(NV_GRAPHICS_3D_CLASS_ID, 0, 0), | ||
| 116 | nvhost_opcode_imm_incr_syncpt(NV_SYNCPT_OP_DONE, | ||
| 117 | p->syncpt)); | ||
| 118 | nvhost_cdma_push(cdma, | ||
| 119 | nvhost_opcode_setclass(NV_HOST1X_CLASS_ID, | ||
| 120 | NV_CLASS_HOST_WAIT_SYNCPT_BASE, 1), | ||
| 121 | nvhost_class_host_wait_syncpt_base(p->syncpt, | ||
| 122 | p->waitbase, 1)); | ||
| 123 | /* back to 3d */ | ||
| 124 | nvhost_cdma_push(cdma, | ||
| 125 | nvhost_opcode_setclass(NV_GRAPHICS_3D_CLASS_ID, 0, 0), | ||
| 126 | NVHOST_OPCODE_NOOP); | ||
| 127 | /* set register set 0 and 1 register read memory output addresses, | ||
| 128 | and send their reads to memory */ | ||
| 129 | if (register_sets == 2) { | ||
| 130 | nvhost_cdma_push(cdma, | ||
| 131 | nvhost_opcode_imm(AR3D_GSHIM_WRITE_MASK, 2), | ||
| 132 | nvhost_opcode_imm(AR3D_GLOBAL_MEMORY_OUTPUT_READS, | ||
| 133 | 1)); | ||
| 134 | nvhost_cdma_push(cdma, | ||
| 135 | nvhost_opcode_nonincr(0x904, 1), | ||
| 136 | ctx->restore_phys + restore_set1_offset * 4); | ||
| 137 | } | ||
| 138 | nvhost_cdma_push(cdma, | ||
| 139 | nvhost_opcode_imm(AR3D_GSHIM_WRITE_MASK, 1), | ||
| 140 | nvhost_opcode_imm(AR3D_GLOBAL_MEMORY_OUTPUT_READS, 1)); | ||
| 141 | nvhost_cdma_push(cdma, | ||
| 142 | nvhost_opcode_nonincr(AR3D_DW_MEMORY_OUTPUT_ADDRESS, 1), | ||
| 143 | ctx->restore_phys); | ||
| 144 | /* gather the save buffer */ | ||
| 145 | nvhost_cdma_push_gather(cdma, | ||
| 146 | (void *)NVHOST_CDMA_PUSH_GATHER_CTXSAVE, | ||
| 147 | (void *)NVHOST_CDMA_PUSH_GATHER_CTXSAVE, | ||
| 148 | nvhost_opcode_gather(p->save_size), | ||
| 149 | p->save_phys); | ||
| 150 | } | ||
| 151 | |||
| 152 | static void __init save_begin_v1(struct host1x_hwctx_handler *p, u32 *ptr) | ||
| 153 | { | ||
| 154 | ptr[0] = nvhost_opcode_nonincr(AR3D_DW_MEMORY_OUTPUT_DATA, | ||
| 155 | RESTORE_BEGIN_SIZE); | ||
| 156 | nvhost_3dctx_restore_begin(p, ptr + 1); | ||
| 157 | ptr += RESTORE_BEGIN_SIZE; | ||
| 158 | } | ||
| 159 | |||
| 160 | static void __init save_direct_v1(u32 *ptr, u32 start_reg, u32 count) | ||
| 161 | { | ||
| 162 | ptr[0] = nvhost_opcode_setclass(NV_GRAPHICS_3D_CLASS_ID, | ||
| 163 | AR3D_DW_MEMORY_OUTPUT_DATA, 1); | ||
| 164 | nvhost_3dctx_restore_direct(ptr + 1, start_reg, count); | ||
| 165 | ptr += RESTORE_DIRECT_SIZE; | ||
| 166 | ptr[1] = nvhost_opcode_setclass(NV_HOST1X_CLASS_ID, | ||
| 167 | NV_CLASS_HOST_INDOFF, 1); | ||
| 168 | ptr[2] = nvhost_class_host_indoff_reg_read(NV_HOST_MODULE_GR3D, | ||
| 169 | start_reg, true); | ||
| 170 | /* TODO could do this in the setclass if count < 6 */ | ||
| 171 | ptr[3] = nvhost_opcode_nonincr(NV_CLASS_HOST_INDDATA, count); | ||
| 172 | } | ||
| 173 | |||
| 174 | static void __init save_indirect_v1(u32 *ptr, u32 offset_reg, u32 offset, | ||
| 175 | u32 data_reg, u32 count) | ||
| 176 | { | ||
| 177 | ptr[0] = nvhost_opcode_setclass(NV_GRAPHICS_3D_CLASS_ID, 0, 0); | ||
| 178 | ptr[1] = nvhost_opcode_nonincr(AR3D_DW_MEMORY_OUTPUT_DATA, | ||
| 179 | RESTORE_INDIRECT_SIZE); | ||
| 180 | nvhost_3dctx_restore_indirect(ptr + 2, offset_reg, offset, data_reg, | ||
| 181 | count); | ||
| 182 | ptr += RESTORE_INDIRECT_SIZE; | ||
| 183 | ptr[2] = nvhost_opcode_imm(offset_reg, offset); | ||
| 184 | ptr[3] = nvhost_opcode_setclass(NV_HOST1X_CLASS_ID, | ||
| 185 | NV_CLASS_HOST_INDOFF, 1); | ||
| 186 | ptr[4] = nvhost_class_host_indoff_reg_read(NV_HOST_MODULE_GR3D, | ||
| 187 | data_reg, false); | ||
| 188 | ptr[5] = nvhost_opcode_nonincr(NV_CLASS_HOST_INDDATA, count); | ||
| 189 | } | ||
| 190 | |||
| 191 | static void __init save_end_v1(struct host1x_hwctx_handler *p, u32 *ptr) | ||
| 192 | { | ||
| 193 | /* write end of restore buffer */ | ||
| 194 | ptr[0] = nvhost_opcode_setclass(NV_GRAPHICS_3D_CLASS_ID, | ||
| 195 | AR3D_DW_MEMORY_OUTPUT_DATA, 1); | ||
| 196 | nvhost_3dctx_restore_end(p, ptr + 1); | ||
| 197 | ptr += RESTORE_END_SIZE; | ||
| 198 | /* reset to dual reg if necessary */ | ||
| 199 | ptr[1] = nvhost_opcode_imm(AR3D_GSHIM_WRITE_MASK, | ||
| 200 | (1 << register_sets) - 1); | ||
| 201 | /* op_done syncpt incr to flush FDC */ | ||
| 202 | ptr[2] = nvhost_opcode_imm_incr_syncpt(NV_SYNCPT_OP_DONE, p->syncpt); | ||
| 203 | /* host wait for that syncpt incr, and advance the wait base */ | ||
| 204 | ptr[3] = nvhost_opcode_setclass(NV_HOST1X_CLASS_ID, | ||
| 205 | NV_CLASS_HOST_WAIT_SYNCPT_BASE, | ||
| 206 | nvhost_mask2( | ||
| 207 | NV_CLASS_HOST_WAIT_SYNCPT_BASE, | ||
| 208 | NV_CLASS_HOST_INCR_SYNCPT_BASE)); | ||
| 209 | ptr[4] = nvhost_class_host_wait_syncpt_base(p->syncpt, | ||
| 210 | p->waitbase, p->save_incrs - 1); | ||
| 211 | ptr[5] = nvhost_class_host_incr_syncpt_base(p->waitbase, | ||
| 212 | p->save_incrs); | ||
| 213 | /* set class back to 3d */ | ||
| 214 | ptr[6] = nvhost_opcode_setclass(NV_GRAPHICS_3D_CLASS_ID, 0, 0); | ||
| 215 | /* send reg reads back to host */ | ||
| 216 | ptr[7] = nvhost_opcode_imm(AR3D_GLOBAL_MEMORY_OUTPUT_READS, 0); | ||
| 217 | /* final syncpt increment to release waiters */ | ||
| 218 | ptr[8] = nvhost_opcode_imm(0, p->syncpt); | ||
| 219 | } | ||
| 220 | |||
| 221 | /*** save ***/ | ||
| 222 | |||
| 223 | |||
| 224 | |||
| 225 | static void __init setup_save_regs(struct save_info *info, | ||
| 226 | const struct hwctx_reginfo *regs, | ||
| 227 | unsigned int nr_regs) | ||
| 228 | { | ||
| 229 | const struct hwctx_reginfo *rend = regs + nr_regs; | ||
| 230 | u32 *ptr = info->ptr; | ||
| 231 | unsigned int save_count = info->save_count; | ||
| 232 | unsigned int restore_count = info->restore_count; | ||
| 233 | |||
| 234 | for ( ; regs != rend; ++regs) { | ||
| 235 | u32 offset = regs->offset; | ||
| 236 | u32 count = regs->count; | ||
| 237 | u32 indoff = offset + 1; | ||
| 238 | switch (regs->type) { | ||
| 239 | case HWCTX_REGINFO_DIRECT: | ||
| 240 | if (ptr) { | ||
| 241 | save_direct_v1(ptr, offset, count); | ||
| 242 | ptr += SAVE_DIRECT_V1_SIZE; | ||
| 243 | } | ||
| 244 | save_count += SAVE_DIRECT_V1_SIZE; | ||
| 245 | restore_count += RESTORE_DIRECT_SIZE; | ||
| 246 | break; | ||
| 247 | case HWCTX_REGINFO_INDIRECT_4X: | ||
| 248 | ++indoff; | ||
| 249 | /* fall through */ | ||
| 250 | case HWCTX_REGINFO_INDIRECT: | ||
| 251 | if (ptr) { | ||
| 252 | save_indirect_v1(ptr, offset, 0, | ||
| 253 | indoff, count); | ||
| 254 | ptr += SAVE_INDIRECT_V1_SIZE; | ||
| 255 | } | ||
| 256 | save_count += SAVE_INDIRECT_V1_SIZE; | ||
| 257 | restore_count += RESTORE_INDIRECT_SIZE; | ||
| 258 | break; | ||
| 259 | } | ||
| 260 | if (ptr) { | ||
| 261 | /* SAVE cases only: reserve room for incoming data */ | ||
| 262 | u32 k = 0; | ||
| 263 | /* | ||
| 264 | * Create a signature pattern for indirect data (which | ||
| 265 | * will be overwritten by true incoming data) for | ||
| 266 | * better deducing where we are in a long command | ||
| 267 | * sequence, when given only a FIFO snapshot for debug | ||
| 268 | * purposes. | ||
| 269 | */ | ||
| 270 | for (k = 0; k < count; k++) | ||
| 271 | *(ptr + k) = 0xd000d000 | (offset << 16) | k; | ||
| 272 | ptr += count; | ||
| 273 | } | ||
| 274 | save_count += count; | ||
| 275 | restore_count += count; | ||
| 276 | } | ||
| 277 | |||
| 278 | info->ptr = ptr; | ||
| 279 | info->save_count = save_count; | ||
| 280 | info->restore_count = restore_count; | ||
| 281 | } | ||
| 282 | |||
| 283 | static void __init switch_gpu(struct save_info *info, | ||
| 284 | unsigned int save_src_set, | ||
| 285 | u32 save_dest_sets, | ||
| 286 | u32 restore_dest_sets) | ||
| 287 | { | ||
| 288 | if (info->ptr) { | ||
| 289 | info->ptr[0] = nvhost_opcode_setclass( | ||
| 290 | NV_GRAPHICS_3D_CLASS_ID, | ||
| 291 | AR3D_DW_MEMORY_OUTPUT_DATA, 1); | ||
| 292 | info->ptr[1] = nvhost_opcode_imm(AR3D_GSHIM_WRITE_MASK, | ||
| 293 | restore_dest_sets); | ||
| 294 | info->ptr[2] = nvhost_opcode_imm(AR3D_GSHIM_WRITE_MASK, | ||
| 295 | save_dest_sets); | ||
| 296 | info->ptr[3] = nvhost_opcode_imm(AR3D_GSHIM_READ_SELECT, | ||
| 297 | save_src_set); | ||
| 298 | info->ptr += 4; | ||
| 299 | } | ||
| 300 | info->save_count += 4; | ||
| 301 | info->restore_count += 1; | ||
| 302 | } | ||
| 303 | |||
| 304 | static void __init setup_save(struct host1x_hwctx_handler *p, u32 *ptr) | ||
| 305 | { | ||
| 306 | struct save_info info = { | ||
| 307 | ptr, | ||
| 308 | SAVE_BEGIN_V1_SIZE, | ||
| 309 | RESTORE_BEGIN_SIZE, | ||
| 310 | SAVE_INCRS, | ||
| 311 | 1 | ||
| 312 | }; | ||
| 313 | int save_end_size = SAVE_END_V1_SIZE; | ||
| 314 | |||
| 315 | BUG_ON(register_sets > 2); | ||
| 316 | |||
| 317 | if (info.ptr) { | ||
| 318 | save_begin_v1(p, info.ptr); | ||
| 319 | info.ptr += SAVE_BEGIN_V1_SIZE; | ||
| 320 | } | ||
| 321 | |||
| 322 | /* read from set0, write cmds through set0, restore to set0 and 1 */ | ||
| 323 | if (register_sets == 2) | ||
| 324 | switch_gpu(&info, 0, 1, 3); | ||
| 325 | |||
| 326 | /* save regs that are common to both sets */ | ||
| 327 | setup_save_regs(&info, | ||
| 328 | ctxsave_regs_3d_global, | ||
| 329 | ARRAY_SIZE(ctxsave_regs_3d_global)); | ||
| 330 | |||
| 331 | /* read from set 0, write cmds through set0, restore to set0 */ | ||
| 332 | if (register_sets == 2) | ||
| 333 | switch_gpu(&info, 0, 1, 1); | ||
| 334 | |||
| 335 | /* save set 0 specific regs */ | ||
| 336 | setup_save_regs(&info, | ||
| 337 | ctxsave_regs_3d_perset, | ||
| 338 | ARRAY_SIZE(ctxsave_regs_3d_perset)); | ||
| 339 | |||
| 340 | if (register_sets == 2) { | ||
| 341 | /* read from set1, write cmds through set1, restore to set1 */ | ||
| 342 | switch_gpu(&info, 1, 2, 2); | ||
| 343 | /* note offset at which set 1 restore starts */ | ||
| 344 | restore_set1_offset = info.restore_count; | ||
| 345 | /* save set 1 specific regs */ | ||
| 346 | setup_save_regs(&info, | ||
| 347 | ctxsave_regs_3d_perset, | ||
| 348 | ARRAY_SIZE(ctxsave_regs_3d_perset)); | ||
| 349 | } | ||
| 350 | |||
| 351 | /* read from set0, write cmds through set1, restore to set0 and 1 */ | ||
| 352 | if (register_sets == 2) | ||
| 353 | switch_gpu(&info, 0, 2, 3); | ||
| 354 | |||
| 355 | if (info.ptr) { | ||
| 356 | save_end_v1(p, info.ptr); | ||
| 357 | info.ptr += SAVE_END_V1_SIZE; | ||
| 358 | } | ||
| 359 | |||
| 360 | wmb(); | ||
| 361 | |||
| 362 | p->save_size = info.save_count + save_end_size; | ||
| 363 | p->restore_size = info.restore_count + RESTORE_END_SIZE; | ||
| 364 | p->save_incrs = info.save_incrs; | ||
| 365 | p->save_thresh = p->save_incrs - SAVE_THRESH_OFFSET; | ||
| 366 | p->restore_incrs = info.restore_incrs; | ||
| 367 | } | ||
| 368 | |||
| 369 | |||
| 370 | /*** ctx3d ***/ | ||
| 371 | |||
| 372 | static struct nvhost_hwctx *ctx3d_alloc_v1(struct nvhost_hwctx_handler *h, | ||
| 373 | struct nvhost_channel *ch) | ||
| 374 | { | ||
| 375 | struct host1x_hwctx_handler *p = to_host1x_hwctx_handler(h); | ||
| 376 | struct host1x_hwctx *ctx = nvhost_3dctx_alloc_common(p, ch, false); | ||
| 377 | |||
| 378 | if (ctx) | ||
| 379 | return &ctx->hwctx; | ||
| 380 | else | ||
| 381 | return NULL; | ||
| 382 | } | ||
| 383 | |||
| 384 | struct nvhost_hwctx_handler *__init nvhost_gr3d_t30_ctxhandler_init( | ||
| 385 | u32 syncpt, u32 waitbase, | ||
| 386 | struct nvhost_channel *ch) | ||
| 387 | { | ||
| 388 | struct nvmap_client *nvmap; | ||
| 389 | u32 *save_ptr; | ||
| 390 | struct host1x_hwctx_handler *p; | ||
| 391 | |||
| 392 | p = kmalloc(sizeof(*p), GFP_KERNEL); | ||
| 393 | if (!p) | ||
| 394 | return NULL; | ||
| 395 | |||
| 396 | nvmap = nvhost_get_host(ch->dev)->nvmap; | ||
| 397 | |||
| 398 | register_sets = tegra_gpu_register_sets(); | ||
| 399 | BUG_ON(register_sets == 0 || register_sets > 2); | ||
| 400 | |||
| 401 | p->syncpt = syncpt; | ||
| 402 | p->waitbase = waitbase; | ||
| 403 | |||
| 404 | setup_save(p, NULL); | ||
| 405 | |||
| 406 | p->save_buf = nvmap_alloc(nvmap, p->save_size * 4, 32, | ||
| 407 | NVMAP_HANDLE_WRITE_COMBINE, 0); | ||
| 408 | if (IS_ERR(p->save_buf)) { | ||
| 409 | p->save_buf = NULL; | ||
| 410 | return NULL; | ||
| 411 | } | ||
| 412 | |||
| 413 | p->save_slots = 6; | ||
| 414 | if (register_sets == 2) | ||
| 415 | p->save_slots += 2; | ||
| 416 | |||
| 417 | save_ptr = nvmap_mmap(p->save_buf); | ||
| 418 | if (!save_ptr) { | ||
| 419 | nvmap_free(nvmap, p->save_buf); | ||
| 420 | p->save_buf = NULL; | ||
| 421 | return NULL; | ||
| 422 | } | ||
| 423 | |||
| 424 | p->save_phys = nvmap_pin(nvmap, p->save_buf); | ||
| 425 | |||
| 426 | setup_save(p, save_ptr); | ||
| 427 | |||
| 428 | p->h.alloc = ctx3d_alloc_v1; | ||
| 429 | p->h.save_push = save_push_v1; | ||
| 430 | p->h.save_service = NULL; | ||
| 431 | p->h.get = nvhost_3dctx_get; | ||
| 432 | p->h.put = nvhost_3dctx_put; | ||
| 433 | |||
| 434 | return &p->h; | ||
| 435 | } | ||
diff --git a/drivers/video/tegra/host/gr3d/gr3d_t30.h b/drivers/video/tegra/host/gr3d/gr3d_t30.h new file mode 100644 index 00000000000..d1b787e14b4 --- /dev/null +++ b/drivers/video/tegra/host/gr3d/gr3d_t30.h | |||
| @@ -0,0 +1,30 @@ | |||
| 1 | /* | ||
| 2 | * drivers/video/tegra/host/gr3d/gr3d_t30.h | ||
| 3 | * | ||
| 4 | * Tegra Graphics Host 3D for Tegra3 | ||
| 5 | * | ||
| 6 | * Copyright (c) 2011-2012, NVIDIA Corporation. | ||
| 7 | * | ||
| 8 | * This program is free software; you can redistribute it and/or modify it | ||
| 9 | * under the terms and conditions of the GNU General Public License, | ||
| 10 | * version 2, as published by the Free Software Foundation. | ||
| 11 | * | ||
| 12 | * This program is distributed in the hope it will be useful, but WITHOUT | ||
| 13 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
| 14 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
| 15 | * more details. | ||
| 16 | * | ||
| 17 | * You should have received a copy of the GNU General Public License | ||
| 18 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | ||
| 19 | */ | ||
| 20 | |||
| 21 | #ifndef __NVHOST_GR3D_GR3D_T30_H | ||
| 22 | #define __NVHOST_GR3D_GR3D_T30_H | ||
| 23 | |||
| 24 | struct nvhost_hwctx_handler; | ||
| 25 | |||
| 26 | struct nvhost_hwctx_handler *nvhost_gr3d_t30_ctxhandler_init( | ||
| 27 | u32 syncpt, u32 waitbase, | ||
| 28 | struct nvhost_channel *ch); | ||
| 29 | |||
| 30 | #endif | ||
diff --git a/drivers/video/tegra/host/gr3d/scale3d.c b/drivers/video/tegra/host/gr3d/scale3d.c new file mode 100644 index 00000000000..8a267a127ea --- /dev/null +++ b/drivers/video/tegra/host/gr3d/scale3d.c | |||
| @@ -0,0 +1,661 @@ | |||
| 1 | /* | ||
| 2 | * drivers/video/tegra/host/t20/scale3d.c | ||
| 3 | * | ||
| 4 | * Tegra Graphics Host 3D clock scaling | ||
| 5 | * | ||
| 6 | * Copyright (c) 2010-2012, NVIDIA Corporation. | ||
| 7 | * | ||
| 8 | * This program is free software; you can redistribute it and/or modify it | ||
| 9 | * under the terms and conditions of the GNU General Public License, | ||
| 10 | * version 2, as published by the Free Software Foundation. | ||
| 11 | * | ||
| 12 | * This program is distributed in the hope it will be useful, but WITHOUT | ||
| 13 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
| 14 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
| 15 | * more details. | ||
| 16 | * | ||
| 17 | * You should have received a copy of the GNU General Public License | ||
| 18 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | ||
| 19 | */ | ||
| 20 | |||
| 21 | /* | ||
| 22 | * 3d clock scaling | ||
| 23 | * | ||
| 24 | * module3d_notify_busy() is called upon submit, module3d_notify_idle() is | ||
| 25 | * called when all outstanding submits are completed. Idle times are measured | ||
| 26 | * over a fixed time period (scale3d.p_period). If the 3d module idle time | ||
| 27 | * percentage goes over the limit (set in scale3d.p_idle_max), 3d clocks are | ||
| 28 | * scaled down. If the percentage goes under the minimum limit (set in | ||
| 29 | * scale3d.p_idle_min), 3d clocks are scaled up. An additional test is made | ||
| 30 | * over the time frame given in scale3d.p_fast_response for clocking up | ||
| 31 | * quickly in response to load peaks. | ||
| 32 | * | ||
| 33 | * 3d.emc clock is scaled proportionately to 3d clock, with a quadratic- | ||
| 34 | * bezier-like factor added to pull 3d.emc rate a bit lower. | ||
| 35 | */ | ||
| 36 | |||
| 37 | #include <linux/debugfs.h> | ||
| 38 | #include <linux/types.h> | ||
| 39 | #include <linux/clk.h> | ||
| 40 | #include <mach/clk.h> | ||
| 41 | #include <mach/hardware.h> | ||
| 42 | #include "scale3d.h" | ||
| 43 | #include "dev.h" | ||
| 44 | |||
| 45 | static int scale3d_is_enabled(void); | ||
| 46 | static void scale3d_enable(int enable); | ||
| 47 | |||
| 48 | #define POW2(x) ((x) * (x)) | ||
| 49 | |||
| 50 | /* | ||
| 51 | * debugfs parameters to control 3d clock scaling test | ||
| 52 | * | ||
| 53 | * period - time period for clock rate evaluation | ||
| 54 | * fast_response - time period for evaluation of 'busy' spikes | ||
| 55 | * idle_min - if less than [idle_min] percent idle over [fast_response] | ||
| 56 | * microseconds, clock up. | ||
| 57 | * idle_max - if over [idle_max] percent idle over [period] microseconds, | ||
| 58 | * clock down. | ||
| 59 | * max_scale - limits rate changes to no less than (100 - max_scale)% or | ||
| 60 | * (100 + 2 * max_scale)% of current clock rate | ||
| 61 | * verbosity - set above 5 for debug printouts | ||
| 62 | */ | ||
| 63 | |||
| 64 | struct scale3d_info_rec { | ||
| 65 | struct mutex lock; /* lock for timestamps etc */ | ||
| 66 | int enable; | ||
| 67 | int init; | ||
| 68 | ktime_t idle_frame; | ||
| 69 | ktime_t fast_frame; | ||
| 70 | ktime_t last_idle; | ||
| 71 | ktime_t last_short_term_idle; | ||
| 72 | int is_idle; | ||
| 73 | ktime_t last_tweak; | ||
| 74 | ktime_t last_down; | ||
| 75 | int fast_up_count; | ||
| 76 | int slow_down_count; | ||
| 77 | int is_scaled; | ||
| 78 | int fast_responses; | ||
| 79 | unsigned long idle_total; | ||
| 80 | unsigned long idle_short_term_total; | ||
| 81 | unsigned long max_rate_3d; | ||
| 82 | long emc_slope; | ||
| 83 | long emc_offset; | ||
| 84 | long emc_dip_slope; | ||
| 85 | long emc_dip_offset; | ||
| 86 | long emc_xmid; | ||
| 87 | unsigned long min_rate_3d; | ||
| 88 | struct work_struct work; | ||
| 89 | struct delayed_work idle_timer; | ||
| 90 | unsigned int scale; | ||
| 91 | unsigned int p_period; | ||
| 92 | unsigned int period; | ||
| 93 | unsigned int p_idle_min; | ||
| 94 | unsigned int idle_min; | ||
| 95 | unsigned int p_idle_max; | ||
| 96 | unsigned int idle_max; | ||
| 97 | unsigned int p_fast_response; | ||
| 98 | unsigned int fast_response; | ||
| 99 | unsigned int p_adjust; | ||
| 100 | unsigned int p_scale_emc; | ||
| 101 | unsigned int p_emc_dip; | ||
| 102 | unsigned int p_verbosity; | ||
| 103 | struct clk *clk_3d; | ||
| 104 | struct clk *clk_3d2; | ||
| 105 | struct clk *clk_3d_emc; | ||
| 106 | }; | ||
| 107 | |||
| 108 | static struct scale3d_info_rec scale3d; | ||
| 109 | |||
| 110 | static void scale3d_clocks(unsigned long percent) | ||
| 111 | { | ||
| 112 | unsigned long hz, curr; | ||
| 113 | |||
| 114 | if (!tegra_is_clk_enabled(scale3d.clk_3d)) | ||
| 115 | return; | ||
| 116 | |||
| 117 | if (tegra_get_chipid() == TEGRA_CHIPID_TEGRA3) | ||
| 118 | if (!tegra_is_clk_enabled(scale3d.clk_3d2)) | ||
| 119 | return; | ||
| 120 | |||
| 121 | curr = clk_get_rate(scale3d.clk_3d); | ||
| 122 | hz = percent * (curr / 100); | ||
| 123 | |||
| 124 | if (!(hz >= scale3d.max_rate_3d && curr == scale3d.max_rate_3d)) { | ||
| 125 | if (tegra_get_chipid() == TEGRA_CHIPID_TEGRA3) | ||
| 126 | clk_set_rate(scale3d.clk_3d2, 0); | ||
| 127 | clk_set_rate(scale3d.clk_3d, hz); | ||
| 128 | |||
| 129 | if (scale3d.p_scale_emc) { | ||
| 130 | long after = (long) clk_get_rate(scale3d.clk_3d); | ||
| 131 | hz = after * scale3d.emc_slope + scale3d.emc_offset; | ||
| 132 | if (scale3d.p_emc_dip) | ||
| 133 | hz -= | ||
| 134 | (scale3d.emc_dip_slope * | ||
| 135 | POW2(after / 1000 - scale3d.emc_xmid) + | ||
| 136 | scale3d.emc_dip_offset); | ||
| 137 | clk_set_rate(scale3d.clk_3d_emc, hz); | ||
| 138 | } | ||
| 139 | } | ||
| 140 | } | ||
| 141 | |||
| 142 | static void scale3d_clocks_handler(struct work_struct *work) | ||
| 143 | { | ||
| 144 | unsigned int scale; | ||
| 145 | |||
| 146 | mutex_lock(&scale3d.lock); | ||
| 147 | scale = scale3d.scale; | ||
| 148 | mutex_unlock(&scale3d.lock); | ||
| 149 | |||
| 150 | if (scale != 0) | ||
| 151 | scale3d_clocks(scale); | ||
| 152 | } | ||
| 153 | |||
| 154 | void nvhost_scale3d_suspend(struct nvhost_device *dev) | ||
| 155 | { | ||
| 156 | if (!scale3d.enable) | ||
| 157 | return; | ||
| 158 | |||
| 159 | cancel_work_sync(&scale3d.work); | ||
| 160 | cancel_delayed_work(&scale3d.idle_timer); | ||
| 161 | } | ||
| 162 | |||
| 163 | /* set 3d clocks to max */ | ||
| 164 | static void reset_3d_clocks(void) | ||
| 165 | { | ||
| 166 | if (clk_get_rate(scale3d.clk_3d) != scale3d.max_rate_3d) { | ||
| 167 | clk_set_rate(scale3d.clk_3d, scale3d.max_rate_3d); | ||
| 168 | if (tegra_get_chipid() == TEGRA_CHIPID_TEGRA3) | ||
| 169 | clk_set_rate(scale3d.clk_3d2, scale3d.max_rate_3d); | ||
| 170 | if (scale3d.p_scale_emc) | ||
| 171 | clk_set_rate(scale3d.clk_3d_emc, | ||
| 172 | clk_round_rate(scale3d.clk_3d_emc, UINT_MAX)); | ||
| 173 | } | ||
| 174 | } | ||
| 175 | |||
| 176 | static int scale3d_is_enabled(void) | ||
| 177 | { | ||
| 178 | int enable; | ||
| 179 | |||
| 180 | if (!scale3d.enable) | ||
| 181 | return 0; | ||
| 182 | |||
| 183 | mutex_lock(&scale3d.lock); | ||
| 184 | enable = scale3d.enable; | ||
| 185 | mutex_unlock(&scale3d.lock); | ||
| 186 | |||
| 187 | return enable; | ||
| 188 | } | ||
| 189 | |||
| 190 | static void scale3d_enable(int enable) | ||
| 191 | { | ||
| 192 | int disable = 0; | ||
| 193 | |||
| 194 | mutex_lock(&scale3d.lock); | ||
| 195 | |||
| 196 | if (enable) { | ||
| 197 | if (scale3d.max_rate_3d != scale3d.min_rate_3d) | ||
| 198 | scale3d.enable = 1; | ||
| 199 | } else { | ||
| 200 | scale3d.enable = 0; | ||
| 201 | disable = 1; | ||
| 202 | } | ||
| 203 | |||
| 204 | mutex_unlock(&scale3d.lock); | ||
| 205 | |||
| 206 | if (disable) | ||
| 207 | reset_3d_clocks(); | ||
| 208 | } | ||
| 209 | |||
| 210 | static void reset_scaling_counters(ktime_t time) | ||
| 211 | { | ||
| 212 | scale3d.idle_total = 0; | ||
| 213 | scale3d.idle_short_term_total = 0; | ||
| 214 | scale3d.last_idle = time; | ||
| 215 | scale3d.last_short_term_idle = time; | ||
| 216 | scale3d.idle_frame = time; | ||
| 217 | } | ||
| 218 | |||
| 219 | /* scaling_adjust - use scale up / scale down hint counts to adjust scaling | ||
| 220 | * parameters. | ||
| 221 | * | ||
| 222 | * hint_ratio is 100 x the ratio of scale up to scale down hints. Three cases | ||
| 223 | * are distinguished: | ||
| 224 | * | ||
| 225 | * hint_ratio < HINT_RATIO_MIN - set parameters to maximize scaling effect | ||
| 226 | * hint_ratio > HINT_RATIO_MAX - set parameters to minimize scaling effect | ||
| 227 | * hint_ratio between limits - scale parameters linearly | ||
| 228 | * | ||
| 229 | * the parameters adjusted are | ||
| 230 | * | ||
| 231 | * * fast_response time | ||
| 232 | * * period - time for scaling down estimate | ||
| 233 | * * idle_min percentage | ||
| 234 | * * idle_max percentage | ||
| 235 | */ | ||
| 236 | #define SCALING_ADJUST_PERIOD 1000000 | ||
| 237 | #define HINT_RATIO_MAX 400 | ||
| 238 | #define HINT_RATIO_MIN 100 | ||
| 239 | #define HINT_RATIO_MID ((HINT_RATIO_MAX + HINT_RATIO_MIN) / 2) | ||
| 240 | #define HINT_RATIO_DIFF (HINT_RATIO_MAX - HINT_RATIO_MIN) | ||
| 241 | |||
| 242 | static void scaling_adjust(ktime_t time) | ||
| 243 | { | ||
| 244 | long hint_ratio; | ||
| 245 | long fast_response_adjustment; | ||
| 246 | long period_adjustment; | ||
| 247 | int idle_min_adjustment; | ||
| 248 | int idle_max_adjustment; | ||
| 249 | unsigned long dt; | ||
| 250 | |||
| 251 | dt = (unsigned long) ktime_us_delta(time, scale3d.last_tweak); | ||
| 252 | if (dt < SCALING_ADJUST_PERIOD) | ||
| 253 | return; | ||
| 254 | |||
| 255 | hint_ratio = (100 * (scale3d.fast_up_count + 1)) / | ||
| 256 | (scale3d.slow_down_count + 1); | ||
| 257 | |||
| 258 | if (hint_ratio > HINT_RATIO_MAX) { | ||
| 259 | fast_response_adjustment = -((int) scale3d.p_fast_response) / 4; | ||
| 260 | period_adjustment = scale3d.p_period / 2; | ||
| 261 | idle_min_adjustment = scale3d.p_idle_min; | ||
| 262 | idle_max_adjustment = scale3d.p_idle_max; | ||
| 263 | } else if (hint_ratio < HINT_RATIO_MIN) { | ||
| 264 | fast_response_adjustment = scale3d.p_fast_response / 2; | ||
| 265 | period_adjustment = -((int) scale3d.p_period) / 4; | ||
| 266 | idle_min_adjustment = -((int) scale3d.p_idle_min) / 2; | ||
| 267 | idle_max_adjustment = -((int) scale3d.p_idle_max) / 2; | ||
| 268 | } else { | ||
| 269 | int diff; | ||
| 270 | int factor; | ||
| 271 | |||
| 272 | diff = HINT_RATIO_MID - hint_ratio; | ||
| 273 | if (diff < 0) | ||
| 274 | factor = -diff * 2; | ||
| 275 | else { | ||
| 276 | factor = -diff; | ||
| 277 | diff *= 2; | ||
| 278 | } | ||
| 279 | |||
| 280 | fast_response_adjustment = diff * | ||
| 281 | (scale3d.p_fast_response / (HINT_RATIO_DIFF * 2)); | ||
| 282 | period_adjustment = | ||
| 283 | diff * (scale3d.p_period / HINT_RATIO_DIFF); | ||
| 284 | idle_min_adjustment = | ||
| 285 | (factor * (int) scale3d.p_idle_min) / HINT_RATIO_DIFF; | ||
| 286 | idle_max_adjustment = | ||
| 287 | (factor * (int) scale3d.p_idle_max) / HINT_RATIO_DIFF; | ||
| 288 | } | ||
| 289 | |||
| 290 | scale3d.fast_response = | ||
| 291 | scale3d.p_fast_response + fast_response_adjustment; | ||
| 292 | scale3d.period = scale3d.p_period + period_adjustment; | ||
| 293 | scale3d.idle_min = scale3d.p_idle_min + idle_min_adjustment; | ||
| 294 | scale3d.idle_max = scale3d.p_idle_max + idle_max_adjustment; | ||
| 295 | |||
| 296 | if (scale3d.p_verbosity >= 10) | ||
| 297 | pr_info("scale3d stats: + %d - %d (/ %d) f %u p %u min %u max %u\n", | ||
| 298 | scale3d.fast_up_count, scale3d.slow_down_count, | ||
| 299 | scale3d.fast_responses, scale3d.fast_response, | ||
| 300 | scale3d.period, scale3d.idle_min, scale3d.idle_max); | ||
| 301 | |||
| 302 | scale3d.fast_up_count = 0; | ||
| 303 | scale3d.slow_down_count = 0; | ||
| 304 | scale3d.fast_responses = 0; | ||
| 305 | scale3d.last_down = time; | ||
| 306 | scale3d.last_tweak = time; | ||
| 307 | } | ||
| 308 | |||
| 309 | #undef SCALING_ADJUST_PERIOD | ||
| 310 | #undef HINT_RATIO_MAX | ||
| 311 | #undef HINT_RATIO_MIN | ||
| 312 | #undef HINT_RATIO_MID | ||
| 313 | #undef HINT_RATIO_DIFF | ||
| 314 | |||
| 315 | static void scaling_state_check(ktime_t time) | ||
| 316 | { | ||
| 317 | unsigned long dt; | ||
| 318 | |||
| 319 | /* adjustment: set scale parameters (fast_response, period) +/- 25% | ||
| 320 | * based on ratio of scale up to scale down hints | ||
| 321 | */ | ||
| 322 | if (scale3d.p_adjust) | ||
| 323 | scaling_adjust(time); | ||
| 324 | else { | ||
| 325 | scale3d.fast_response = scale3d.p_fast_response; | ||
| 326 | scale3d.period = scale3d.p_period; | ||
| 327 | scale3d.idle_min = scale3d.p_idle_min; | ||
| 328 | scale3d.idle_max = scale3d.p_idle_max; | ||
| 329 | } | ||
| 330 | |||
| 331 | /* check for load peaks */ | ||
| 332 | dt = (unsigned long) ktime_us_delta(time, scale3d.fast_frame); | ||
| 333 | if (dt > scale3d.fast_response) { | ||
| 334 | unsigned long idleness = | ||
| 335 | (scale3d.idle_short_term_total * 100) / dt; | ||
| 336 | scale3d.fast_responses++; | ||
| 337 | scale3d.fast_frame = time; | ||
| 338 | /* if too busy, scale up */ | ||
| 339 | if (idleness < scale3d.idle_min) { | ||
| 340 | scale3d.is_scaled = 0; | ||
| 341 | scale3d.fast_up_count++; | ||
| 342 | if (scale3d.p_verbosity >= 5) | ||
| 343 | pr_info("scale3d: %ld%% busy\n", | ||
| 344 | 100 - idleness); | ||
| 345 | |||
| 346 | reset_3d_clocks(); | ||
| 347 | reset_scaling_counters(time); | ||
| 348 | return; | ||
| 349 | } | ||
| 350 | scale3d.idle_short_term_total = 0; | ||
| 351 | scale3d.last_short_term_idle = time; | ||
| 352 | } | ||
| 353 | |||
| 354 | dt = (unsigned long) ktime_us_delta(time, scale3d.idle_frame); | ||
| 355 | if (dt > scale3d.period) { | ||
| 356 | unsigned long idleness = (scale3d.idle_total * 100) / dt; | ||
| 357 | |||
| 358 | if (scale3d.p_verbosity >= 5) | ||
| 359 | pr_info("scale3d: idle %lu, ~%lu%%\n", | ||
| 360 | scale3d.idle_total, idleness); | ||
| 361 | |||
| 362 | if (idleness > scale3d.idle_max) { | ||
| 363 | if (!scale3d.is_scaled) { | ||
| 364 | scale3d.is_scaled = 1; | ||
| 365 | scale3d.last_down = time; | ||
| 366 | } | ||
| 367 | scale3d.slow_down_count++; | ||
| 368 | /* if idle time is high, clock down */ | ||
| 369 | scale3d.scale = 100 - (idleness - scale3d.idle_min); | ||
| 370 | schedule_work(&scale3d.work); | ||
| 371 | } | ||
| 372 | |||
| 373 | reset_scaling_counters(time); | ||
| 374 | } | ||
| 375 | } | ||
| 376 | |||
| 377 | void nvhost_scale3d_notify_idle(struct nvhost_device *dev) | ||
| 378 | { | ||
| 379 | ktime_t t; | ||
| 380 | unsigned long dt; | ||
| 381 | |||
| 382 | if (!scale3d.enable) | ||
| 383 | return; | ||
| 384 | |||
| 385 | mutex_lock(&scale3d.lock); | ||
| 386 | |||
| 387 | t = ktime_get(); | ||
| 388 | |||
| 389 | if (scale3d.is_idle) { | ||
| 390 | dt = ktime_us_delta(t, scale3d.last_idle); | ||
| 391 | scale3d.idle_total += dt; | ||
| 392 | dt = ktime_us_delta(t, scale3d.last_short_term_idle); | ||
| 393 | scale3d.idle_short_term_total += dt; | ||
| 394 | } else | ||
| 395 | scale3d.is_idle = 1; | ||
| 396 | |||
| 397 | scale3d.last_idle = t; | ||
| 398 | scale3d.last_short_term_idle = t; | ||
| 399 | |||
| 400 | scaling_state_check(scale3d.last_idle); | ||
| 401 | |||
| 402 | /* delay idle_max % of 2 * fast_response time (given in microseconds) */ | ||
| 403 | schedule_delayed_work(&scale3d.idle_timer, | ||
| 404 | msecs_to_jiffies((scale3d.idle_max * scale3d.fast_response) | ||
| 405 | / 50000)); | ||
| 406 | |||
| 407 | mutex_unlock(&scale3d.lock); | ||
| 408 | } | ||
| 409 | |||
| 410 | void nvhost_scale3d_notify_busy(struct nvhost_device *dev) | ||
| 411 | { | ||
| 412 | unsigned long idle; | ||
| 413 | unsigned long short_term_idle; | ||
| 414 | ktime_t t; | ||
| 415 | |||
| 416 | if (!scale3d.enable) | ||
| 417 | return; | ||
| 418 | |||
| 419 | mutex_lock(&scale3d.lock); | ||
| 420 | |||
| 421 | cancel_delayed_work(&scale3d.idle_timer); | ||
| 422 | |||
| 423 | t = ktime_get(); | ||
| 424 | |||
| 425 | if (scale3d.is_idle) { | ||
| 426 | idle = (unsigned long) | ||
| 427 | ktime_us_delta(t, scale3d.last_idle); | ||
| 428 | scale3d.idle_total += idle; | ||
| 429 | short_term_idle = | ||
| 430 | ktime_us_delta(t, scale3d.last_short_term_idle); | ||
| 431 | scale3d.idle_short_term_total += short_term_idle; | ||
| 432 | scale3d.is_idle = 0; | ||
| 433 | } | ||
| 434 | |||
| 435 | scaling_state_check(t); | ||
| 436 | |||
| 437 | mutex_unlock(&scale3d.lock); | ||
| 438 | } | ||
| 439 | |||
| 440 | static void scale3d_idle_handler(struct work_struct *work) | ||
| 441 | { | ||
| 442 | int notify_idle = 0; | ||
| 443 | |||
| 444 | if (!scale3d.enable) | ||
| 445 | return; | ||
| 446 | |||
| 447 | mutex_lock(&scale3d.lock); | ||
| 448 | |||
| 449 | if (scale3d.is_idle && tegra_is_clk_enabled(scale3d.clk_3d)) { | ||
| 450 | unsigned long curr = clk_get_rate(scale3d.clk_3d); | ||
| 451 | if (curr > scale3d.min_rate_3d) | ||
| 452 | notify_idle = 1; | ||
| 453 | } | ||
| 454 | |||
| 455 | mutex_unlock(&scale3d.lock); | ||
| 456 | |||
| 457 | if (notify_idle) | ||
| 458 | nvhost_scale3d_notify_idle(NULL); | ||
| 459 | } | ||
| 460 | |||
| 461 | void nvhost_scale3d_reset() | ||
| 462 | { | ||
| 463 | ktime_t t; | ||
| 464 | |||
| 465 | if (!scale3d.enable) | ||
| 466 | return; | ||
| 467 | |||
| 468 | t = ktime_get(); | ||
| 469 | mutex_lock(&scale3d.lock); | ||
| 470 | reset_scaling_counters(t); | ||
| 471 | mutex_unlock(&scale3d.lock); | ||
| 472 | } | ||
| 473 | |||
| 474 | /* | ||
| 475 | * debugfs parameters to control 3d clock scaling | ||
| 476 | */ | ||
| 477 | |||
| 478 | void nvhost_scale3d_debug_init(struct dentry *de) | ||
| 479 | { | ||
| 480 | struct dentry *d, *f; | ||
| 481 | |||
| 482 | d = debugfs_create_dir("scaling", de); | ||
| 483 | if (!d) { | ||
| 484 | pr_err("scale3d: can\'t create debugfs directory\n"); | ||
| 485 | return; | ||
| 486 | } | ||
| 487 | |||
| 488 | #define CREATE_SCALE3D_FILE(fname) \ | ||
| 489 | do {\ | ||
| 490 | f = debugfs_create_u32(#fname, S_IRUGO | S_IWUSR, d,\ | ||
| 491 | &scale3d.p_##fname);\ | ||
| 492 | if (NULL == f) {\ | ||
| 493 | pr_err("scale3d: can\'t create file " #fname "\n");\ | ||
| 494 | return;\ | ||
| 495 | } \ | ||
| 496 | } while (0) | ||
| 497 | |||
| 498 | CREATE_SCALE3D_FILE(fast_response); | ||
| 499 | CREATE_SCALE3D_FILE(idle_min); | ||
| 500 | CREATE_SCALE3D_FILE(idle_max); | ||
| 501 | CREATE_SCALE3D_FILE(period); | ||
| 502 | CREATE_SCALE3D_FILE(adjust); | ||
| 503 | CREATE_SCALE3D_FILE(scale_emc); | ||
| 504 | CREATE_SCALE3D_FILE(emc_dip); | ||
| 505 | CREATE_SCALE3D_FILE(verbosity); | ||
| 506 | #undef CREATE_SCALE3D_FILE | ||
| 507 | } | ||
| 508 | |||
| 509 | static ssize_t enable_3d_scaling_show(struct device *device, | ||
| 510 | struct device_attribute *attr, char *buf) | ||
| 511 | { | ||
| 512 | ssize_t res; | ||
| 513 | |||
| 514 | res = snprintf(buf, PAGE_SIZE, "%d\n", scale3d_is_enabled()); | ||
| 515 | |||
| 516 | return res; | ||
| 517 | } | ||
| 518 | |||
| 519 | static ssize_t enable_3d_scaling_store(struct device *dev, | ||
| 520 | struct device_attribute *attr, const char *buf, size_t count) | ||
| 521 | { | ||
| 522 | unsigned long val = 0; | ||
| 523 | |||
| 524 | if (strict_strtoul(buf, 10, &val) < 0) | ||
| 525 | return -EINVAL; | ||
| 526 | |||
| 527 | scale3d_enable(val); | ||
| 528 | |||
| 529 | return count; | ||
| 530 | } | ||
| 531 | |||
| 532 | static DEVICE_ATTR(enable_3d_scaling, S_IRUGO | S_IWUSR, | ||
| 533 | enable_3d_scaling_show, enable_3d_scaling_store); | ||
| 534 | |||
| 535 | void nvhost_scale3d_init(struct nvhost_device *d) | ||
| 536 | { | ||
| 537 | if (!scale3d.init) { | ||
| 538 | int error; | ||
| 539 | unsigned long max_emc, min_emc; | ||
| 540 | long correction; | ||
| 541 | mutex_init(&scale3d.lock); | ||
| 542 | |||
| 543 | scale3d.clk_3d = d->clk[0]; | ||
| 544 | if (tegra_get_chipid() == TEGRA_CHIPID_TEGRA3) { | ||
| 545 | scale3d.clk_3d2 = d->clk[1]; | ||
| 546 | scale3d.clk_3d_emc = d->clk[2]; | ||
| 547 | } else | ||
| 548 | scale3d.clk_3d_emc = d->clk[1]; | ||
| 549 | |||
| 550 | scale3d.max_rate_3d = clk_round_rate(scale3d.clk_3d, UINT_MAX); | ||
| 551 | scale3d.min_rate_3d = clk_round_rate(scale3d.clk_3d, 0); | ||
| 552 | |||
| 553 | if (scale3d.max_rate_3d == scale3d.min_rate_3d) { | ||
| 554 | pr_warn("scale3d: 3d max rate = min rate (%lu), " | ||
| 555 | "disabling\n", scale3d.max_rate_3d); | ||
| 556 | scale3d.enable = 0; | ||
| 557 | return; | ||
| 558 | } | ||
| 559 | |||
| 560 | /* emc scaling: | ||
| 561 | * | ||
| 562 | * Remc = S * R3d + O - (Sd * (R3d - Rm)^2 + Od) | ||
| 563 | * | ||
| 564 | * Remc - 3d.emc rate | ||
| 565 | * R3d - 3d.cbus rate | ||
| 566 | * Rm - 3d.cbus 'middle' rate = (max + min)/2 | ||
| 567 | * S - emc_slope | ||
| 568 | * O - emc_offset | ||
| 569 | * Sd - emc_dip_slope | ||
| 570 | * Od - emc_dip_offset | ||
| 571 | * | ||
| 572 | * this superposes a quadratic dip centered around the middle 3d | ||
| 573 | * frequency over a linear correlation of 3d.emc to 3d clock | ||
| 574 | * rates. | ||
| 575 | * | ||
| 576 | * S, O are chosen so that the maximum 3d rate produces the | ||
| 577 | * maximum 3d.emc rate exactly, and the minimum 3d rate produces | ||
| 578 | * at least the minimum 3d.emc rate. | ||
| 579 | * | ||
| 580 | * Sd and Od are chosen to produce the largest dip that will | ||
| 581 | * keep 3d.emc frequencies monotonously decreasing with 3d | ||
| 582 | * frequencies. To achieve this, the first derivative of Remc | ||
| 583 | * with respect to R3d should be zero for the minimal 3d rate: | ||
| 584 | * | ||
| 585 | * R'emc = S - 2 * Sd * (R3d - Rm) | ||
| 586 | * R'emc(R3d-min) = 0 | ||
| 587 | * S = 2 * Sd * (R3d-min - Rm) | ||
| 588 | * = 2 * Sd * (R3d-min - R3d-max) / 2 | ||
| 589 | * Sd = S / (R3d-min - R3d-max) | ||
| 590 | * | ||
| 591 | * +---------------------------------------------------+ | ||
| 592 | * | Sd = -(emc-max - emc-min) / (R3d-min - R3d-max)^2 | | ||
| 593 | * +---------------------------------------------------+ | ||
| 594 | * | ||
| 595 | * dip = Sd * (R3d - Rm)^2 + Od | ||
| 596 | * | ||
| 597 | * requiring dip(R3d-min) = 0 and dip(R3d-max) = 0 gives | ||
| 598 | * | ||
| 599 | * Sd * (R3d-min - Rm)^2 + Od = 0 | ||
| 600 | * Od = -Sd * ((R3d-min - R3d-max) / 2)^2 | ||
| 601 | * = -Sd * ((R3d-min - R3d-max)^2) / 4 | ||
| 602 | * | ||
| 603 | * +------------------------------+ | ||
| 604 | * | Od = (emc-max - emc-min) / 4 | | ||
| 605 | * +------------------------------+ | ||
| 606 | */ | ||
| 607 | |||
| 608 | max_emc = clk_round_rate(scale3d.clk_3d_emc, UINT_MAX); | ||
| 609 | min_emc = clk_round_rate(scale3d.clk_3d_emc, 0); | ||
| 610 | |||
| 611 | scale3d.emc_slope = (max_emc - min_emc) / | ||
| 612 | (scale3d.max_rate_3d - scale3d.min_rate_3d); | ||
| 613 | scale3d.emc_offset = max_emc - | ||
| 614 | scale3d.emc_slope * scale3d.max_rate_3d; | ||
| 615 | /* guarantee max 3d rate maps to max emc rate */ | ||
| 616 | scale3d.emc_offset += max_emc - | ||
| 617 | (scale3d.emc_slope * scale3d.max_rate_3d + | ||
| 618 | scale3d.emc_offset); | ||
| 619 | |||
| 620 | scale3d.emc_dip_offset = (max_emc - min_emc) / 4; | ||
| 621 | scale3d.emc_dip_slope = | ||
| 622 | -4 * (scale3d.emc_dip_offset / | ||
| 623 | (POW2(scale3d.max_rate_3d - scale3d.min_rate_3d))); | ||
| 624 | scale3d.emc_xmid = | ||
| 625 | (scale3d.max_rate_3d + scale3d.min_rate_3d) / 2; | ||
| 626 | correction = | ||
| 627 | scale3d.emc_dip_offset + | ||
| 628 | scale3d.emc_dip_slope * | ||
| 629 | POW2(scale3d.max_rate_3d - scale3d.emc_xmid); | ||
| 630 | scale3d.emc_dip_offset -= correction; | ||
| 631 | |||
| 632 | INIT_WORK(&scale3d.work, scale3d_clocks_handler); | ||
| 633 | INIT_DELAYED_WORK(&scale3d.idle_timer, scale3d_idle_handler); | ||
| 634 | |||
| 635 | /* set scaling parameter defaults */ | ||
| 636 | scale3d.enable = 1; | ||
| 637 | scale3d.period = scale3d.p_period = 100000; | ||
| 638 | scale3d.idle_min = scale3d.p_idle_min = 10; | ||
| 639 | scale3d.idle_max = scale3d.p_idle_max = 15; | ||
| 640 | scale3d.fast_response = scale3d.p_fast_response = 7000; | ||
| 641 | scale3d.p_scale_emc = 1; | ||
| 642 | scale3d.p_emc_dip = 1; | ||
| 643 | scale3d.p_verbosity = 0; | ||
| 644 | scale3d.p_adjust = 1; | ||
| 645 | |||
| 646 | error = device_create_file(&d->dev, | ||
| 647 | &dev_attr_enable_3d_scaling); | ||
| 648 | if (error) | ||
| 649 | dev_err(&d->dev, "failed to create sysfs attributes"); | ||
| 650 | |||
| 651 | scale3d.init = 1; | ||
| 652 | } | ||
| 653 | |||
| 654 | nvhost_scale3d_reset(); | ||
| 655 | } | ||
| 656 | |||
| 657 | void nvhost_scale3d_deinit(struct nvhost_device *dev) | ||
| 658 | { | ||
| 659 | device_remove_file(&dev->dev, &dev_attr_enable_3d_scaling); | ||
| 660 | scale3d.init = 0; | ||
| 661 | } | ||
diff --git a/drivers/video/tegra/host/gr3d/scale3d.h b/drivers/video/tegra/host/gr3d/scale3d.h new file mode 100644 index 00000000000..f8aae1d591a --- /dev/null +++ b/drivers/video/tegra/host/gr3d/scale3d.h | |||
| @@ -0,0 +1,47 @@ | |||
| 1 | /* | ||
| 2 | * drivers/video/tegra/host/t30/scale3d.h | ||
| 3 | * | ||
| 4 | * Tegra Graphics Host 3D Clock Scaling | ||
| 5 | * | ||
| 6 | * Copyright (c) 2010-2012, NVIDIA Corporation. | ||
| 7 | * | ||
| 8 | * This program is free software; you can redistribute it and/or modify it | ||
| 9 | * under the terms and conditions of the GNU General Public License, | ||
| 10 | * version 2, as published by the Free Software Foundation. | ||
| 11 | * | ||
| 12 | * This program is distributed in the hope it will be useful, but WITHOUT | ||
| 13 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
| 14 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
| 15 | * more details. | ||
| 16 | * | ||
| 17 | * You should have received a copy of the GNU General Public License | ||
| 18 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | ||
| 19 | */ | ||
| 20 | |||
| 21 | #ifndef NVHOST_T30_SCALE3D_H | ||
| 22 | #define NVHOST_T30_SCALE3D_H | ||
| 23 | |||
| 24 | struct nvhost_device; | ||
| 25 | struct device; | ||
| 26 | struct dentry; | ||
| 27 | |||
| 28 | /* Initialization and de-initialization for module */ | ||
| 29 | void nvhost_scale3d_init(struct nvhost_device *); | ||
| 30 | void nvhost_scale3d_deinit(struct nvhost_device *); | ||
| 31 | |||
| 32 | /* Suspend is called when powering down module */ | ||
| 33 | void nvhost_scale3d_suspend(struct nvhost_device *); | ||
| 34 | |||
| 35 | /* reset 3d module load counters, called on resume */ | ||
| 36 | void nvhost_scale3d_reset(void); | ||
| 37 | |||
| 38 | /* | ||
| 39 | * call when performing submit to notify scaling mechanism that 3d module is | ||
| 40 | * in use | ||
| 41 | */ | ||
| 42 | void nvhost_scale3d_notify_busy(struct nvhost_device *); | ||
| 43 | void nvhost_scale3d_notify_idle(struct nvhost_device *); | ||
| 44 | |||
| 45 | void nvhost_scale3d_debug_init(struct dentry *de); | ||
| 46 | |||
| 47 | #endif | ||
diff --git a/drivers/video/tegra/host/host1x/Makefile b/drivers/video/tegra/host/host1x/Makefile new file mode 100644 index 00000000000..c3214ffe147 --- /dev/null +++ b/drivers/video/tegra/host/host1x/Makefile | |||
| @@ -0,0 +1,12 @@ | |||
| 1 | GCOV_PROFILE := y | ||
| 2 | |||
| 3 | EXTRA_CFLAGS += -Idrivers/video/tegra/host | ||
| 4 | |||
| 5 | nvhost-host1x-objs = \ | ||
| 6 | host1x_syncpt.o \ | ||
| 7 | host1x_channel.o \ | ||
| 8 | host1x_intr.o \ | ||
| 9 | host1x_cdma.o \ | ||
| 10 | host1x_debug.o | ||
| 11 | |||
| 12 | obj-$(CONFIG_TEGRA_GRHOST) += nvhost-host1x.o | ||
diff --git a/drivers/video/tegra/host/host1x/host1x_cdma.c b/drivers/video/tegra/host/host1x/host1x_cdma.c new file mode 100644 index 00000000000..cdd6026718b --- /dev/null +++ b/drivers/video/tegra/host/host1x/host1x_cdma.c | |||
| @@ -0,0 +1,665 @@ | |||
| 1 | /* | ||
| 2 | * drivers/video/tegra/host/host1x/host1x_cdma.c | ||
| 3 | * | ||
| 4 | * Tegra Graphics Host Command DMA | ||
| 5 | * | ||
| 6 | * Copyright (c) 2010-2012, NVIDIA Corporation. | ||
| 7 | * | ||
| 8 | * This program is free software; you can redistribute it and/or modify it | ||
| 9 | * under the terms and conditions of the GNU General Public License, | ||
| 10 | * version 2, as published by the Free Software Foundation. | ||
| 11 | * | ||
| 12 | * This program is distributed in the hope it will be useful, but WITHOUT | ||
| 13 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
| 14 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
| 15 | * more details. | ||
| 16 | * | ||
| 17 | * You should have received a copy of the GNU General Public License | ||
| 18 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | ||
| 19 | */ | ||
| 20 | |||
| 21 | #include <linux/slab.h> | ||
| 22 | #include "nvhost_cdma.h" | ||
| 23 | #include "dev.h" | ||
| 24 | |||
| 25 | #include "host1x_hardware.h" | ||
| 26 | #include "host1x_syncpt.h" | ||
| 27 | #include "host1x_cdma.h" | ||
| 28 | #include "host1x_hwctx.h" | ||
| 29 | |||
| 30 | static inline u32 host1x_channel_dmactrl(int stop, int get_rst, int init_get) | ||
| 31 | { | ||
| 32 | return HOST1X_CREATE(CHANNEL_DMACTRL, DMASTOP, stop) | ||
| 33 | | HOST1X_CREATE(CHANNEL_DMACTRL, DMAGETRST, get_rst) | ||
| 34 | | HOST1X_CREATE(CHANNEL_DMACTRL, DMAINITGET, init_get); | ||
| 35 | } | ||
| 36 | |||
| 37 | static void cdma_timeout_handler(struct work_struct *work); | ||
| 38 | |||
| 39 | /* | ||
| 40 | * push_buffer | ||
| 41 | * | ||
| 42 | * The push buffer is a circular array of words to be fetched by command DMA. | ||
| 43 | * Note that it works slightly differently to the sync queue; fence == cur | ||
| 44 | * means that the push buffer is full, not empty. | ||
| 45 | */ | ||
| 46 | |||
| 47 | |||
| 48 | /** | ||
| 49 | * Reset to empty push buffer | ||
| 50 | */ | ||
| 51 | static void push_buffer_reset(struct push_buffer *pb) | ||
| 52 | { | ||
| 53 | pb->fence = PUSH_BUFFER_SIZE - 8; | ||
| 54 | pb->cur = 0; | ||
| 55 | } | ||
| 56 | |||
| 57 | /** | ||
| 58 | * Init push buffer resources | ||
| 59 | */ | ||
| 60 | static int push_buffer_init(struct push_buffer *pb) | ||
| 61 | { | ||
| 62 | struct nvhost_cdma *cdma = pb_to_cdma(pb); | ||
| 63 | struct nvmap_client *nvmap = cdma_to_nvmap(cdma); | ||
| 64 | pb->mem = NULL; | ||
| 65 | pb->mapped = NULL; | ||
| 66 | pb->phys = 0; | ||
| 67 | pb->nvmap = NULL; | ||
| 68 | |||
| 69 | BUG_ON(!cdma_pb_op(cdma).reset); | ||
| 70 | cdma_pb_op(cdma).reset(pb); | ||
| 71 | |||
| 72 | /* allocate and map pushbuffer memory */ | ||
| 73 | pb->mem = nvmap_alloc(nvmap, PUSH_BUFFER_SIZE + 4, 32, | ||
| 74 | NVMAP_HANDLE_WRITE_COMBINE, 0); | ||
| 75 | if (IS_ERR_OR_NULL(pb->mem)) { | ||
| 76 | pb->mem = NULL; | ||
| 77 | goto fail; | ||
| 78 | } | ||
| 79 | pb->mapped = nvmap_mmap(pb->mem); | ||
| 80 | if (pb->mapped == NULL) | ||
| 81 | goto fail; | ||
| 82 | |||
| 83 | /* pin pushbuffer and get physical address */ | ||
| 84 | pb->phys = nvmap_pin(nvmap, pb->mem); | ||
| 85 | if (pb->phys >= 0xfffff000) { | ||
| 86 | pb->phys = 0; | ||
| 87 | goto fail; | ||
| 88 | } | ||
| 89 | |||
| 90 | /* memory for storing nvmap client and handles for each opcode pair */ | ||
| 91 | pb->nvmap = kzalloc(NVHOST_GATHER_QUEUE_SIZE * | ||
| 92 | sizeof(struct nvmap_client_handle), | ||
| 93 | GFP_KERNEL); | ||
| 94 | if (!pb->nvmap) | ||
| 95 | goto fail; | ||
| 96 | |||
| 97 | /* put the restart at the end of pushbuffer memory */ | ||
| 98 | *(pb->mapped + (PUSH_BUFFER_SIZE >> 2)) = | ||
| 99 | nvhost_opcode_restart(pb->phys); | ||
| 100 | |||
| 101 | return 0; | ||
| 102 | |||
| 103 | fail: | ||
| 104 | cdma_pb_op(cdma).destroy(pb); | ||
| 105 | return -ENOMEM; | ||
| 106 | } | ||
| 107 | |||
| 108 | /** | ||
| 109 | * Clean up push buffer resources | ||
| 110 | */ | ||
| 111 | static void push_buffer_destroy(struct push_buffer *pb) | ||
| 112 | { | ||
| 113 | struct nvhost_cdma *cdma = pb_to_cdma(pb); | ||
| 114 | struct nvmap_client *nvmap = cdma_to_nvmap(cdma); | ||
| 115 | if (pb->mapped) | ||
| 116 | nvmap_munmap(pb->mem, pb->mapped); | ||
| 117 | |||
| 118 | if (pb->phys != 0) | ||
| 119 | nvmap_unpin(nvmap, pb->mem); | ||
| 120 | |||
| 121 | if (pb->mem) | ||
| 122 | nvmap_free(nvmap, pb->mem); | ||
| 123 | |||
| 124 | kfree(pb->nvmap); | ||
| 125 | |||
| 126 | pb->mem = NULL; | ||
| 127 | pb->mapped = NULL; | ||
| 128 | pb->phys = 0; | ||
| 129 | pb->nvmap = 0; | ||
| 130 | } | ||
| 131 | |||
| 132 | /** | ||
| 133 | * Push two words to the push buffer | ||
| 134 | * Caller must ensure push buffer is not full | ||
| 135 | */ | ||
| 136 | static void push_buffer_push_to(struct push_buffer *pb, | ||
| 137 | struct nvmap_client *client, | ||
| 138 | struct nvmap_handle *handle, u32 op1, u32 op2) | ||
| 139 | { | ||
| 140 | u32 cur = pb->cur; | ||
| 141 | u32 *p = (u32 *)((u32)pb->mapped + cur); | ||
| 142 | u32 cur_nvmap = (cur/8) & (NVHOST_GATHER_QUEUE_SIZE - 1); | ||
| 143 | BUG_ON(cur == pb->fence); | ||
| 144 | *(p++) = op1; | ||
| 145 | *(p++) = op2; | ||
| 146 | pb->nvmap[cur_nvmap].client = client; | ||
| 147 | pb->nvmap[cur_nvmap].handle = handle; | ||
| 148 | pb->cur = (cur + 8) & (PUSH_BUFFER_SIZE - 1); | ||
| 149 | } | ||
| 150 | |||
| 151 | /** | ||
| 152 | * Pop a number of two word slots from the push buffer | ||
| 153 | * Caller must ensure push buffer is not empty | ||
| 154 | */ | ||
| 155 | static void push_buffer_pop_from(struct push_buffer *pb, | ||
| 156 | unsigned int slots) | ||
| 157 | { | ||
| 158 | /* Clear the nvmap references for old items from pb */ | ||
| 159 | unsigned int i; | ||
| 160 | u32 fence_nvmap = pb->fence/8; | ||
| 161 | for (i = 0; i < slots; i++) { | ||
| 162 | int cur_fence_nvmap = (fence_nvmap+i) | ||
| 163 | & (NVHOST_GATHER_QUEUE_SIZE - 1); | ||
| 164 | struct nvmap_client_handle *h = | ||
| 165 | &pb->nvmap[cur_fence_nvmap]; | ||
| 166 | h->client = NULL; | ||
| 167 | h->handle = NULL; | ||
| 168 | } | ||
| 169 | /* Advance the next write position */ | ||
| 170 | pb->fence = (pb->fence + slots * 8) & (PUSH_BUFFER_SIZE - 1); | ||
| 171 | } | ||
| 172 | |||
| 173 | /** | ||
| 174 | * Return the number of two word slots free in the push buffer | ||
| 175 | */ | ||
| 176 | static u32 push_buffer_space(struct push_buffer *pb) | ||
| 177 | { | ||
| 178 | return ((pb->fence - pb->cur) & (PUSH_BUFFER_SIZE - 1)) / 8; | ||
| 179 | } | ||
| 180 | |||
| 181 | static u32 push_buffer_putptr(struct push_buffer *pb) | ||
| 182 | { | ||
| 183 | return pb->phys + pb->cur; | ||
| 184 | } | ||
| 185 | |||
| 186 | /* | ||
| 187 | * The syncpt incr buffer is filled with methods to increment syncpts, which | ||
| 188 | * is later GATHER-ed into the mainline PB. It's used when a timed out context | ||
| 189 | * is interleaved with other work, so needs to inline the syncpt increments | ||
| 190 | * to maintain the count (but otherwise does no work). | ||
| 191 | */ | ||
| 192 | |||
| 193 | /** | ||
| 194 | * Init timeout and syncpt incr buffer resources | ||
| 195 | */ | ||
| 196 | static int cdma_timeout_init(struct nvhost_cdma *cdma, | ||
| 197 | u32 syncpt_id) | ||
| 198 | { | ||
| 199 | struct nvhost_master *dev = cdma_to_dev(cdma); | ||
| 200 | struct nvmap_client *nvmap = cdma_to_nvmap(cdma); | ||
| 201 | struct syncpt_buffer *sb = &cdma->syncpt_buffer; | ||
| 202 | struct nvhost_channel *ch = cdma_to_channel(cdma); | ||
| 203 | u32 i = 0; | ||
| 204 | |||
| 205 | if (syncpt_id == NVSYNCPT_INVALID) | ||
| 206 | return -EINVAL; | ||
| 207 | |||
| 208 | /* allocate and map syncpt incr memory */ | ||
| 209 | sb->mem = nvmap_alloc(nvmap, | ||
| 210 | (SYNCPT_INCR_BUFFER_SIZE_WORDS * sizeof(u32)), 32, | ||
| 211 | NVMAP_HANDLE_WRITE_COMBINE, 0); | ||
| 212 | if (IS_ERR_OR_NULL(sb->mem)) { | ||
| 213 | sb->mem = NULL; | ||
| 214 | goto fail; | ||
| 215 | } | ||
| 216 | sb->mapped = nvmap_mmap(sb->mem); | ||
| 217 | if (sb->mapped == NULL) | ||
| 218 | goto fail; | ||
| 219 | |||
| 220 | /* pin syncpt buffer and get physical address */ | ||
| 221 | sb->phys = nvmap_pin(nvmap, sb->mem); | ||
| 222 | if (sb->phys >= 0xfffff000) { | ||
| 223 | sb->phys = 0; | ||
| 224 | goto fail; | ||
| 225 | } | ||
| 226 | |||
| 227 | dev_dbg(&dev->dev->dev, "%s: SYNCPT_INCR buffer at 0x%x\n", | ||
| 228 | __func__, sb->phys); | ||
| 229 | |||
| 230 | sb->words_per_incr = (syncpt_id == NVSYNCPT_3D) ? 5 : 3; | ||
| 231 | sb->incr_per_buffer = (SYNCPT_INCR_BUFFER_SIZE_WORDS / | ||
| 232 | sb->words_per_incr); | ||
| 233 | |||
| 234 | /* init buffer with SETCL and INCR_SYNCPT methods */ | ||
| 235 | while (i < sb->incr_per_buffer) { | ||
| 236 | sb->mapped[i++] = nvhost_opcode_setclass(NV_HOST1X_CLASS_ID, | ||
| 237 | 0, 0); | ||
| 238 | sb->mapped[i++] = nvhost_opcode_imm_incr_syncpt( | ||
| 239 | NV_SYNCPT_IMMEDIATE, | ||
| 240 | syncpt_id); | ||
| 241 | if (syncpt_id == NVSYNCPT_3D) { | ||
| 242 | /* also contains base increments */ | ||
| 243 | sb->mapped[i++] = nvhost_opcode_nonincr( | ||
| 244 | NV_CLASS_HOST_INCR_SYNCPT_BASE, | ||
| 245 | 1); | ||
| 246 | sb->mapped[i++] = nvhost_class_host_incr_syncpt_base( | ||
| 247 | NVWAITBASE_3D, 1); | ||
| 248 | } | ||
| 249 | sb->mapped[i++] = nvhost_opcode_setclass(ch->dev->class, | ||
| 250 | 0, 0); | ||
| 251 | } | ||
| 252 | wmb(); | ||
| 253 | |||
| 254 | INIT_DELAYED_WORK(&cdma->timeout.wq, cdma_timeout_handler); | ||
| 255 | cdma->timeout.initialized = true; | ||
| 256 | |||
| 257 | return 0; | ||
| 258 | fail: | ||
| 259 | cdma_op(cdma).timeout_destroy(cdma); | ||
| 260 | return -ENOMEM; | ||
| 261 | } | ||
| 262 | |||
| 263 | /** | ||
| 264 | * Clean up timeout syncpt buffer resources | ||
| 265 | */ | ||
| 266 | static void cdma_timeout_destroy(struct nvhost_cdma *cdma) | ||
| 267 | { | ||
| 268 | struct nvmap_client *nvmap = cdma_to_nvmap(cdma); | ||
| 269 | struct syncpt_buffer *sb = &cdma->syncpt_buffer; | ||
| 270 | |||
| 271 | if (sb->mapped) | ||
| 272 | nvmap_munmap(sb->mem, sb->mapped); | ||
| 273 | |||
| 274 | if (sb->phys != 0) | ||
| 275 | nvmap_unpin(nvmap, sb->mem); | ||
| 276 | |||
| 277 | if (sb->mem) | ||
| 278 | nvmap_free(nvmap, sb->mem); | ||
| 279 | |||
| 280 | sb->mem = NULL; | ||
| 281 | sb->mapped = NULL; | ||
| 282 | sb->phys = 0; | ||
| 283 | |||
| 284 | if (cdma->timeout.initialized) | ||
| 285 | cancel_delayed_work(&cdma->timeout.wq); | ||
| 286 | cdma->timeout.initialized = false; | ||
| 287 | } | ||
| 288 | |||
| 289 | /** | ||
| 290 | * Increment timedout buffer's syncpt via CPU. | ||
| 291 | */ | ||
| 292 | static void cdma_timeout_cpu_incr(struct nvhost_cdma *cdma, u32 getptr, | ||
| 293 | u32 syncpt_incrs, u32 syncval, u32 nr_slots) | ||
| 294 | { | ||
| 295 | struct nvhost_master *dev = cdma_to_dev(cdma); | ||
| 296 | struct push_buffer *pb = &cdma->push_buffer; | ||
| 297 | u32 i, getidx; | ||
| 298 | |||
| 299 | for (i = 0; i < syncpt_incrs; i++) | ||
| 300 | nvhost_syncpt_cpu_incr(&dev->syncpt, cdma->timeout.syncpt_id); | ||
| 301 | |||
| 302 | /* after CPU incr, ensure shadow is up to date */ | ||
| 303 | nvhost_syncpt_update_min(&dev->syncpt, cdma->timeout.syncpt_id); | ||
| 304 | |||
| 305 | /* update WAITBASE_3D by same number of incrs */ | ||
| 306 | if (cdma->timeout.syncpt_id == NVSYNCPT_3D) { | ||
| 307 | void __iomem *p; | ||
| 308 | p = dev->sync_aperture + HOST1X_SYNC_SYNCPT_BASE_0 + | ||
| 309 | (NVWAITBASE_3D * sizeof(u32)); | ||
| 310 | writel(syncval, p); | ||
| 311 | } | ||
| 312 | |||
| 313 | /* NOP all the PB slots */ | ||
| 314 | getidx = getptr - pb->phys; | ||
| 315 | while (nr_slots--) { | ||
| 316 | u32 *p = (u32 *)((u32)pb->mapped + getidx); | ||
| 317 | *(p++) = NVHOST_OPCODE_NOOP; | ||
| 318 | *(p++) = NVHOST_OPCODE_NOOP; | ||
| 319 | dev_dbg(&dev->dev->dev, "%s: NOP at 0x%x\n", | ||
| 320 | __func__, pb->phys + getidx); | ||
| 321 | getidx = (getidx + 8) & (PUSH_BUFFER_SIZE - 1); | ||
| 322 | } | ||
| 323 | wmb(); | ||
| 324 | } | ||
| 325 | |||
| 326 | /** | ||
| 327 | * This routine is called at the point we transition back into a timed | ||
| 328 | * ctx. The syncpts are incremented via pushbuffer with a flag indicating | ||
| 329 | * whether there's a CTXSAVE that should be still executed (for the | ||
| 330 | * preceding HW ctx). | ||
| 331 | */ | ||
| 332 | static void cdma_timeout_pb_incr(struct nvhost_cdma *cdma, u32 getptr, | ||
| 333 | u32 syncpt_incrs, u32 nr_slots, | ||
| 334 | bool exec_ctxsave) | ||
| 335 | { | ||
| 336 | struct nvhost_master *dev = cdma_to_dev(cdma); | ||
| 337 | struct syncpt_buffer *sb = &cdma->syncpt_buffer; | ||
| 338 | struct push_buffer *pb = &cdma->push_buffer; | ||
| 339 | struct host1x_hwctx *hwctx = to_host1x_hwctx(cdma->timeout.ctx); | ||
| 340 | u32 getidx, *p; | ||
| 341 | |||
| 342 | /* should have enough slots to incr to desired count */ | ||
| 343 | BUG_ON(syncpt_incrs > (nr_slots * sb->incr_per_buffer)); | ||
| 344 | |||
| 345 | getidx = getptr - pb->phys; | ||
| 346 | if (exec_ctxsave) { | ||
| 347 | /* don't disrupt the CTXSAVE of a good/non-timed out ctx */ | ||
| 348 | nr_slots -= hwctx->save_slots; | ||
| 349 | syncpt_incrs -= hwctx->save_incrs; | ||
| 350 | |||
| 351 | getidx += (hwctx->save_slots * 8); | ||
| 352 | getidx &= (PUSH_BUFFER_SIZE - 1); | ||
| 353 | |||
| 354 | dev_dbg(&dev->dev->dev, | ||
| 355 | "%s: exec CTXSAVE of prev ctx (slots %d, incrs %d)\n", | ||
| 356 | __func__, nr_slots, syncpt_incrs); | ||
| 357 | } | ||
| 358 | |||
| 359 | while (syncpt_incrs) { | ||
| 360 | u32 incrs, count; | ||
| 361 | |||
| 362 | /* GATHER count are incrs * number of DWORDs per incr */ | ||
| 363 | incrs = min(syncpt_incrs, sb->incr_per_buffer); | ||
| 364 | count = incrs * sb->words_per_incr; | ||
| 365 | |||
| 366 | p = (u32 *)((u32)pb->mapped + getidx); | ||
| 367 | *(p++) = nvhost_opcode_gather(count); | ||
| 368 | *(p++) = sb->phys; | ||
| 369 | |||
| 370 | dev_dbg(&dev->dev->dev, | ||
| 371 | "%s: GATHER at 0x%x, from 0x%x, dcount = %d\n", | ||
| 372 | __func__, | ||
| 373 | pb->phys + getidx, sb->phys, | ||
| 374 | (incrs * sb->words_per_incr)); | ||
| 375 | |||
| 376 | syncpt_incrs -= incrs; | ||
| 377 | getidx = (getidx + 8) & (PUSH_BUFFER_SIZE - 1); | ||
| 378 | nr_slots--; | ||
| 379 | } | ||
| 380 | |||
| 381 | /* NOP remaining slots */ | ||
| 382 | while (nr_slots--) { | ||
| 383 | p = (u32 *)((u32)pb->mapped + getidx); | ||
| 384 | *(p++) = NVHOST_OPCODE_NOOP; | ||
| 385 | *(p++) = NVHOST_OPCODE_NOOP; | ||
| 386 | dev_dbg(&dev->dev->dev, "%s: NOP at 0x%x\n", | ||
| 387 | __func__, pb->phys + getidx); | ||
| 388 | getidx = (getidx + 8) & (PUSH_BUFFER_SIZE - 1); | ||
| 389 | } | ||
| 390 | wmb(); | ||
| 391 | } | ||
| 392 | |||
| 393 | /** | ||
| 394 | * Start channel DMA | ||
| 395 | */ | ||
| 396 | static void cdma_start(struct nvhost_cdma *cdma) | ||
| 397 | { | ||
| 398 | void __iomem *chan_regs = cdma_to_channel(cdma)->aperture; | ||
| 399 | |||
| 400 | if (cdma->running) | ||
| 401 | return; | ||
| 402 | |||
| 403 | BUG_ON(!cdma_pb_op(cdma).putptr); | ||
| 404 | cdma->last_put = cdma_pb_op(cdma).putptr(&cdma->push_buffer); | ||
| 405 | |||
| 406 | writel(host1x_channel_dmactrl(true, false, false), | ||
| 407 | chan_regs + HOST1X_CHANNEL_DMACTRL); | ||
| 408 | |||
| 409 | /* set base, put, end pointer (all of memory) */ | ||
| 410 | writel(0, chan_regs + HOST1X_CHANNEL_DMASTART); | ||
| 411 | writel(cdma->last_put, chan_regs + HOST1X_CHANNEL_DMAPUT); | ||
| 412 | writel(0xFFFFFFFF, chan_regs + HOST1X_CHANNEL_DMAEND); | ||
| 413 | |||
| 414 | /* reset GET */ | ||
| 415 | writel(host1x_channel_dmactrl(true, true, true), | ||
| 416 | chan_regs + HOST1X_CHANNEL_DMACTRL); | ||
| 417 | |||
| 418 | /* start the command DMA */ | ||
| 419 | writel(host1x_channel_dmactrl(false, false, false), | ||
| 420 | chan_regs + HOST1X_CHANNEL_DMACTRL); | ||
| 421 | |||
| 422 | cdma->running = true; | ||
| 423 | } | ||
| 424 | |||
| 425 | /** | ||
| 426 | * Similar to cdma_start(), but rather than starting from an idle | ||
| 427 | * state (where DMA GET is set to DMA PUT), on a timeout we restore | ||
| 428 | * DMA GET from an explicit value (so DMA may again be pending). | ||
| 429 | */ | ||
| 430 | static void cdma_timeout_restart(struct nvhost_cdma *cdma, u32 getptr) | ||
| 431 | { | ||
| 432 | struct nvhost_master *dev = cdma_to_dev(cdma); | ||
| 433 | void __iomem *chan_regs = cdma_to_channel(cdma)->aperture; | ||
| 434 | |||
| 435 | if (cdma->running) | ||
| 436 | return; | ||
| 437 | |||
| 438 | BUG_ON(!cdma_pb_op(cdma).putptr); | ||
| 439 | cdma->last_put = cdma_pb_op(cdma).putptr(&cdma->push_buffer); | ||
| 440 | |||
| 441 | writel(host1x_channel_dmactrl(true, false, false), | ||
| 442 | chan_regs + HOST1X_CHANNEL_DMACTRL); | ||
| 443 | |||
| 444 | /* set base, end pointer (all of memory) */ | ||
| 445 | writel(0, chan_regs + HOST1X_CHANNEL_DMASTART); | ||
| 446 | writel(0xFFFFFFFF, chan_regs + HOST1X_CHANNEL_DMAEND); | ||
| 447 | |||
| 448 | /* set GET, by loading the value in PUT (then reset GET) */ | ||
| 449 | writel(getptr, chan_regs + HOST1X_CHANNEL_DMAPUT); | ||
| 450 | writel(host1x_channel_dmactrl(true, true, true), | ||
| 451 | chan_regs + HOST1X_CHANNEL_DMACTRL); | ||
| 452 | |||
| 453 | dev_dbg(&dev->dev->dev, | ||
| 454 | "%s: DMA GET 0x%x, PUT HW 0x%x / shadow 0x%x\n", | ||
| 455 | __func__, | ||
| 456 | readl(chan_regs + HOST1X_CHANNEL_DMAGET), | ||
| 457 | readl(chan_regs + HOST1X_CHANNEL_DMAPUT), | ||
| 458 | cdma->last_put); | ||
| 459 | |||
| 460 | /* deassert GET reset and set PUT */ | ||
| 461 | writel(host1x_channel_dmactrl(true, false, false), | ||
| 462 | chan_regs + HOST1X_CHANNEL_DMACTRL); | ||
| 463 | writel(cdma->last_put, chan_regs + HOST1X_CHANNEL_DMAPUT); | ||
| 464 | |||
| 465 | /* start the command DMA */ | ||
| 466 | writel(host1x_channel_dmactrl(false, false, false), | ||
| 467 | chan_regs + HOST1X_CHANNEL_DMACTRL); | ||
| 468 | |||
| 469 | cdma->running = true; | ||
| 470 | } | ||
| 471 | |||
| 472 | /** | ||
| 473 | * Kick channel DMA into action by writing its PUT offset (if it has changed) | ||
| 474 | */ | ||
| 475 | static void cdma_kick(struct nvhost_cdma *cdma) | ||
| 476 | { | ||
| 477 | u32 put; | ||
| 478 | BUG_ON(!cdma_pb_op(cdma).putptr); | ||
| 479 | |||
| 480 | put = cdma_pb_op(cdma).putptr(&cdma->push_buffer); | ||
| 481 | |||
| 482 | if (put != cdma->last_put) { | ||
| 483 | void __iomem *chan_regs = cdma_to_channel(cdma)->aperture; | ||
| 484 | wmb(); | ||
| 485 | writel(put, chan_regs + HOST1X_CHANNEL_DMAPUT); | ||
| 486 | cdma->last_put = put; | ||
| 487 | } | ||
| 488 | } | ||
| 489 | |||
| 490 | static void cdma_stop(struct nvhost_cdma *cdma) | ||
| 491 | { | ||
| 492 | void __iomem *chan_regs = cdma_to_channel(cdma)->aperture; | ||
| 493 | |||
| 494 | mutex_lock(&cdma->lock); | ||
| 495 | if (cdma->running) { | ||
| 496 | nvhost_cdma_wait_locked(cdma, CDMA_EVENT_SYNC_QUEUE_EMPTY); | ||
| 497 | writel(host1x_channel_dmactrl(true, false, false), | ||
| 498 | chan_regs + HOST1X_CHANNEL_DMACTRL); | ||
| 499 | cdma->running = false; | ||
| 500 | } | ||
| 501 | mutex_unlock(&cdma->lock); | ||
| 502 | } | ||
| 503 | |||
| 504 | /** | ||
| 505 | * Retrieve the op pair at a slot offset from a DMA address | ||
| 506 | */ | ||
| 507 | void cdma_peek(struct nvhost_cdma *cdma, | ||
| 508 | u32 dmaget, int slot, u32 *out) | ||
| 509 | { | ||
| 510 | u32 offset = dmaget - cdma->push_buffer.phys; | ||
| 511 | u32 *p = cdma->push_buffer.mapped; | ||
| 512 | |||
| 513 | offset = ((offset + slot * 8) & (PUSH_BUFFER_SIZE - 1)) >> 2; | ||
| 514 | out[0] = p[offset]; | ||
| 515 | out[1] = p[offset + 1]; | ||
| 516 | } | ||
| 517 | |||
| 518 | /** | ||
| 519 | * Stops both channel's command processor and CDMA immediately. | ||
| 520 | * Also, tears down the channel and resets corresponding module. | ||
| 521 | */ | ||
| 522 | void cdma_timeout_teardown_begin(struct nvhost_cdma *cdma) | ||
| 523 | { | ||
| 524 | struct nvhost_master *dev = cdma_to_dev(cdma); | ||
| 525 | struct nvhost_channel *ch = cdma_to_channel(cdma); | ||
| 526 | u32 cmdproc_stop; | ||
| 527 | |||
| 528 | BUG_ON(cdma->torndown); | ||
| 529 | |||
| 530 | dev_dbg(&dev->dev->dev, | ||
| 531 | "begin channel teardown (channel id %d)\n", ch->chid); | ||
| 532 | |||
| 533 | cmdproc_stop = readl(dev->sync_aperture + HOST1X_SYNC_CMDPROC_STOP); | ||
| 534 | cmdproc_stop |= BIT(ch->chid); | ||
| 535 | writel(cmdproc_stop, dev->sync_aperture + HOST1X_SYNC_CMDPROC_STOP); | ||
| 536 | |||
| 537 | dev_dbg(&dev->dev->dev, | ||
| 538 | "%s: DMA GET 0x%x, PUT HW 0x%x / shadow 0x%x\n", | ||
| 539 | __func__, | ||
| 540 | readl(ch->aperture + HOST1X_CHANNEL_DMAGET), | ||
| 541 | readl(ch->aperture + HOST1X_CHANNEL_DMAPUT), | ||
| 542 | cdma->last_put); | ||
| 543 | |||
| 544 | writel(host1x_channel_dmactrl(true, false, false), | ||
| 545 | ch->aperture + HOST1X_CHANNEL_DMACTRL); | ||
| 546 | |||
| 547 | writel(BIT(ch->chid), dev->sync_aperture + HOST1X_SYNC_CH_TEARDOWN); | ||
| 548 | nvhost_module_reset(ch->dev); | ||
| 549 | |||
| 550 | cdma->running = false; | ||
| 551 | cdma->torndown = true; | ||
| 552 | } | ||
| 553 | |||
| 554 | void cdma_timeout_teardown_end(struct nvhost_cdma *cdma, u32 getptr) | ||
| 555 | { | ||
| 556 | struct nvhost_master *dev = cdma_to_dev(cdma); | ||
| 557 | struct nvhost_channel *ch = cdma_to_channel(cdma); | ||
| 558 | u32 cmdproc_stop; | ||
| 559 | |||
| 560 | BUG_ON(!cdma->torndown || cdma->running); | ||
| 561 | |||
| 562 | dev_dbg(&dev->dev->dev, | ||
| 563 | "end channel teardown (id %d, DMAGET restart = 0x%x)\n", | ||
| 564 | ch->chid, getptr); | ||
| 565 | |||
| 566 | cmdproc_stop = readl(dev->sync_aperture + HOST1X_SYNC_CMDPROC_STOP); | ||
| 567 | cmdproc_stop &= ~(BIT(ch->chid)); | ||
| 568 | writel(cmdproc_stop, dev->sync_aperture + HOST1X_SYNC_CMDPROC_STOP); | ||
| 569 | |||
| 570 | cdma->torndown = false; | ||
| 571 | cdma_timeout_restart(cdma, getptr); | ||
| 572 | } | ||
| 573 | |||
| 574 | /** | ||
| 575 | * If this timeout fires, it indicates the current sync_queue entry has | ||
| 576 | * exceeded its TTL and the userctx should be timed out and remaining | ||
| 577 | * submits already issued cleaned up (future submits return an error). | ||
| 578 | */ | ||
| 579 | static void cdma_timeout_handler(struct work_struct *work) | ||
| 580 | { | ||
| 581 | struct nvhost_cdma *cdma; | ||
| 582 | struct nvhost_master *dev; | ||
| 583 | struct nvhost_syncpt *sp; | ||
| 584 | struct nvhost_channel *ch; | ||
| 585 | |||
| 586 | u32 syncpt_val; | ||
| 587 | |||
| 588 | u32 prev_cmdproc, cmdproc_stop; | ||
| 589 | |||
| 590 | cdma = container_of(to_delayed_work(work), struct nvhost_cdma, | ||
| 591 | timeout.wq); | ||
| 592 | dev = cdma_to_dev(cdma); | ||
| 593 | sp = &dev->syncpt; | ||
| 594 | ch = cdma_to_channel(cdma); | ||
| 595 | |||
| 596 | mutex_lock(&cdma->lock); | ||
| 597 | |||
| 598 | if (!cdma->timeout.clientid) { | ||
| 599 | dev_dbg(&dev->dev->dev, | ||
| 600 | "cdma_timeout: expired, but has no clientid\n"); | ||
| 601 | mutex_unlock(&cdma->lock); | ||
| 602 | return; | ||
| 603 | } | ||
| 604 | |||
| 605 | /* stop processing to get a clean snapshot */ | ||
| 606 | prev_cmdproc = readl(dev->sync_aperture + HOST1X_SYNC_CMDPROC_STOP); | ||
| 607 | cmdproc_stop = prev_cmdproc | BIT(ch->chid); | ||
| 608 | writel(cmdproc_stop, dev->sync_aperture + HOST1X_SYNC_CMDPROC_STOP); | ||
| 609 | |||
| 610 | dev_dbg(&dev->dev->dev, "cdma_timeout: cmdproc was 0x%x is 0x%x\n", | ||
| 611 | prev_cmdproc, cmdproc_stop); | ||
| 612 | |||
| 613 | syncpt_val = nvhost_syncpt_update_min(&dev->syncpt, | ||
| 614 | cdma->timeout.syncpt_id); | ||
| 615 | |||
| 616 | /* has buffer actually completed? */ | ||
| 617 | if ((s32)(syncpt_val - cdma->timeout.syncpt_val) >= 0) { | ||
| 618 | dev_dbg(&dev->dev->dev, | ||
| 619 | "cdma_timeout: expired, but buffer had completed\n"); | ||
| 620 | /* restore */ | ||
| 621 | cmdproc_stop = prev_cmdproc & ~(BIT(ch->chid)); | ||
| 622 | writel(cmdproc_stop, | ||
| 623 | dev->sync_aperture + HOST1X_SYNC_CMDPROC_STOP); | ||
| 624 | mutex_unlock(&cdma->lock); | ||
| 625 | return; | ||
| 626 | } | ||
| 627 | |||
| 628 | dev_warn(&dev->dev->dev, | ||
| 629 | "%s: timeout: %d (%s) ctx 0x%p, HW thresh %d, done %d\n", | ||
| 630 | __func__, | ||
| 631 | cdma->timeout.syncpt_id, | ||
| 632 | syncpt_op(sp).name(sp, cdma->timeout.syncpt_id), | ||
| 633 | cdma->timeout.ctx, | ||
| 634 | syncpt_val, cdma->timeout.syncpt_val); | ||
| 635 | |||
| 636 | /* stop HW, resetting channel/module */ | ||
| 637 | cdma_op(cdma).timeout_teardown_begin(cdma); | ||
| 638 | |||
| 639 | nvhost_cdma_update_sync_queue(cdma, sp, &dev->dev->dev); | ||
| 640 | mutex_unlock(&cdma->lock); | ||
| 641 | } | ||
| 642 | |||
| 643 | int host1x_init_cdma_support(struct nvhost_master *host) | ||
| 644 | { | ||
| 645 | host->op.cdma.start = cdma_start; | ||
| 646 | host->op.cdma.stop = cdma_stop; | ||
| 647 | host->op.cdma.kick = cdma_kick; | ||
| 648 | |||
| 649 | host->op.cdma.timeout_init = cdma_timeout_init; | ||
| 650 | host->op.cdma.timeout_destroy = cdma_timeout_destroy; | ||
| 651 | host->op.cdma.timeout_teardown_begin = cdma_timeout_teardown_begin; | ||
| 652 | host->op.cdma.timeout_teardown_end = cdma_timeout_teardown_end; | ||
| 653 | host->op.cdma.timeout_cpu_incr = cdma_timeout_cpu_incr; | ||
| 654 | host->op.cdma.timeout_pb_incr = cdma_timeout_pb_incr; | ||
| 655 | |||
| 656 | host->op.push_buffer.reset = push_buffer_reset; | ||
| 657 | host->op.push_buffer.init = push_buffer_init; | ||
| 658 | host->op.push_buffer.destroy = push_buffer_destroy; | ||
| 659 | host->op.push_buffer.push_to = push_buffer_push_to; | ||
| 660 | host->op.push_buffer.pop_from = push_buffer_pop_from; | ||
| 661 | host->op.push_buffer.space = push_buffer_space; | ||
| 662 | host->op.push_buffer.putptr = push_buffer_putptr; | ||
| 663 | |||
| 664 | return 0; | ||
| 665 | } | ||
diff --git a/drivers/video/tegra/host/host1x/host1x_cdma.h b/drivers/video/tegra/host/host1x/host1x_cdma.h new file mode 100644 index 00000000000..60909236a7c --- /dev/null +++ b/drivers/video/tegra/host/host1x/host1x_cdma.h | |||
| @@ -0,0 +1,41 @@ | |||
| 1 | /* | ||
| 2 | * drivers/video/tegra/host/host1x/host1x_cdma.h | ||
| 3 | * | ||
| 4 | * Tegra Graphics Host Channel | ||
| 5 | * | ||
| 6 | * Copyright (c) 2011-2012, NVIDIA Corporation. | ||
| 7 | * | ||
| 8 | * This program is free software; you can redistribute it and/or modify it | ||
| 9 | * under the terms and conditions of the GNU General Public License, | ||
| 10 | * version 2, as published by the Free Software Foundation. | ||
| 11 | * | ||
| 12 | * This program is distributed in the hope it will be useful, but WITHOUT | ||
| 13 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
| 14 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
| 15 | * more details. | ||
| 16 | * | ||
| 17 | * You should have received a copy of the GNU General Public License | ||
| 18 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | ||
| 19 | */ | ||
| 20 | |||
| 21 | #ifndef __NVHOST_HOST1X_HOST1X_CDMA_H | ||
| 22 | #define __NVHOST_HOST1X_HOST1X_CDMA_H | ||
| 23 | |||
| 24 | /* Size of the sync queue. If it is too small, we won't be able to queue up | ||
| 25 | * many command buffers. If it is too large, we waste memory. */ | ||
| 26 | #define NVHOST_SYNC_QUEUE_SIZE 512 | ||
| 27 | |||
| 28 | /* Number of gathers we allow to be queued up per channel. Must be a | ||
| 29 | * power of two. Currently sized such that pushbuffer is 4KB (512*8B). */ | ||
| 30 | #define NVHOST_GATHER_QUEUE_SIZE 512 | ||
| 31 | |||
| 32 | /* 8 bytes per slot. (This number does not include the final RESTART.) */ | ||
| 33 | #define PUSH_BUFFER_SIZE (NVHOST_GATHER_QUEUE_SIZE * 8) | ||
| 34 | |||
| 35 | /* 4K page containing GATHERed methods to increment channel syncpts | ||
| 36 | * and replaces the original timed out contexts GATHER slots */ | ||
| 37 | #define SYNCPT_INCR_BUFFER_SIZE_WORDS (4096 / sizeof(u32)) | ||
| 38 | |||
| 39 | int host1x_init_cdma_support(struct nvhost_master *); | ||
| 40 | |||
| 41 | #endif | ||
diff --git a/drivers/video/tegra/host/host1x/host1x_channel.c b/drivers/video/tegra/host/host1x/host1x_channel.c new file mode 100644 index 00000000000..b16a34f416a --- /dev/null +++ b/drivers/video/tegra/host/host1x/host1x_channel.c | |||
| @@ -0,0 +1,627 @@ | |||
| 1 | /* | ||
| 2 | * drivers/video/tegra/host/host1x/channel_host1x.c | ||
| 3 | * | ||
| 4 | * Tegra Graphics Host Channel | ||
| 5 | * | ||
| 6 | * Copyright (c) 2010-2012, NVIDIA Corporation. | ||
| 7 | * | ||
| 8 | * This program is free software; you can redistribute it and/or modify it | ||
| 9 | * under the terms and conditions of the GNU General Public License, | ||
| 10 | * version 2, as published by the Free Software Foundation. | ||
| 11 | * | ||
| 12 | * This program is distributed in the hope it will be useful, but WITHOUT | ||
| 13 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
| 14 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
| 15 | * more details. | ||
| 16 | * | ||
| 17 | * You should have received a copy of the GNU General Public License | ||
| 18 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | ||
| 19 | */ | ||
| 20 | |||
| 21 | #include "nvhost_channel.h" | ||
| 22 | #include "dev.h" | ||
| 23 | #include "nvhost_hwctx.h" | ||
| 24 | #include <trace/events/nvhost.h> | ||
| 25 | #include <linux/slab.h> | ||
| 26 | |||
| 27 | #include "host1x_syncpt.h" | ||
| 28 | #include "host1x_channel.h" | ||
| 29 | #include "host1x_hardware.h" | ||
| 30 | #include "host1x_hwctx.h" | ||
| 31 | #include "nvhost_intr.h" | ||
| 32 | |||
| 33 | #define NV_FIFO_READ_TIMEOUT 200000 | ||
| 34 | |||
| 35 | static void sync_waitbases(struct nvhost_channel *ch, u32 syncpt_val) | ||
| 36 | { | ||
| 37 | unsigned long waitbase; | ||
| 38 | unsigned long int waitbase_mask = ch->dev->waitbases; | ||
| 39 | if (ch->dev->waitbasesync) { | ||
| 40 | waitbase = find_first_bit(&waitbase_mask, BITS_PER_LONG); | ||
| 41 | nvhost_cdma_push(&ch->cdma, | ||
| 42 | nvhost_opcode_setclass(NV_HOST1X_CLASS_ID, | ||
| 43 | NV_CLASS_HOST_LOAD_SYNCPT_BASE, | ||
| 44 | 1), | ||
| 45 | nvhost_class_host_load_syncpt_base(waitbase, | ||
| 46 | syncpt_val)); | ||
| 47 | } | ||
| 48 | } | ||
| 49 | |||
| 50 | static void *pre_submit_ctxsave(struct nvhost_job *job, | ||
| 51 | struct nvhost_hwctx *cur_ctx) | ||
| 52 | { | ||
| 53 | struct nvhost_channel *ch = job->ch; | ||
| 54 | void *ctxsave_waiter = NULL; | ||
| 55 | |||
| 56 | /* Is a save needed? */ | ||
| 57 | if (!cur_ctx || ch->cur_ctx == job->hwctx) | ||
| 58 | return NULL; | ||
| 59 | |||
| 60 | if (cur_ctx->has_timedout) { | ||
| 61 | dev_dbg(&ch->dev->dev, | ||
| 62 | "%s: skip save of timed out context (0x%p)\n", | ||
| 63 | __func__, ch->cur_ctx); | ||
| 64 | |||
| 65 | return NULL; | ||
| 66 | } | ||
| 67 | |||
| 68 | /* Allocate save waiter if needed */ | ||
| 69 | if (ch->ctxhandler->save_service) { | ||
| 70 | ctxsave_waiter = nvhost_intr_alloc_waiter(); | ||
| 71 | if (!ctxsave_waiter) | ||
| 72 | return ERR_PTR(-ENOMEM); | ||
| 73 | } | ||
| 74 | |||
| 75 | return ctxsave_waiter; | ||
| 76 | } | ||
| 77 | |||
| 78 | static void submit_ctxsave(struct nvhost_job *job, void *ctxsave_waiter, | ||
| 79 | struct nvhost_hwctx *cur_ctx) | ||
| 80 | { | ||
| 81 | struct nvhost_master *host = nvhost_get_host(job->ch->dev); | ||
| 82 | struct nvhost_channel *ch = job->ch; | ||
| 83 | u32 syncval; | ||
| 84 | int err; | ||
| 85 | u32 save_thresh = 0; | ||
| 86 | |||
| 87 | /* Is a save needed? */ | ||
| 88 | if (!cur_ctx || cur_ctx == job->hwctx || cur_ctx->has_timedout) | ||
| 89 | return; | ||
| 90 | |||
| 91 | /* Retrieve save threshold if we have a waiter */ | ||
| 92 | if (ctxsave_waiter) | ||
| 93 | save_thresh = | ||
| 94 | nvhost_syncpt_read_max(&host->syncpt, job->syncpt_id) | ||
| 95 | + to_host1x_hwctx(cur_ctx)->save_thresh; | ||
| 96 | |||
| 97 | /* Adjust the syncpoint max */ | ||
| 98 | job->syncpt_incrs += to_host1x_hwctx(cur_ctx)->save_incrs; | ||
| 99 | syncval = nvhost_syncpt_incr_max(&host->syncpt, | ||
| 100 | job->syncpt_id, | ||
| 101 | to_host1x_hwctx(cur_ctx)->save_incrs); | ||
| 102 | |||
| 103 | /* Send the save to channel */ | ||
| 104 | cur_ctx->valid = true; | ||
| 105 | ch->ctxhandler->save_push(cur_ctx, &ch->cdma); | ||
| 106 | nvhost_job_get_hwctx(job, cur_ctx); | ||
| 107 | |||
| 108 | /* Notify save service */ | ||
| 109 | if (ctxsave_waiter) { | ||
| 110 | err = nvhost_intr_add_action(&host->intr, | ||
| 111 | job->syncpt_id, | ||
| 112 | save_thresh, | ||
| 113 | NVHOST_INTR_ACTION_CTXSAVE, cur_ctx, | ||
| 114 | ctxsave_waiter, | ||
| 115 | NULL); | ||
| 116 | ctxsave_waiter = NULL; | ||
| 117 | WARN(err, "Failed to set ctx save interrupt"); | ||
| 118 | } | ||
| 119 | |||
| 120 | trace_nvhost_channel_context_save(ch->dev->name, cur_ctx); | ||
| 121 | } | ||
| 122 | |||
| 123 | static void submit_ctxrestore(struct nvhost_job *job) | ||
| 124 | { | ||
| 125 | struct nvhost_master *host = nvhost_get_host(job->ch->dev); | ||
| 126 | struct nvhost_channel *ch = job->ch; | ||
| 127 | u32 syncval; | ||
| 128 | struct host1x_hwctx *ctx = | ||
| 129 | job->hwctx ? to_host1x_hwctx(job->hwctx) : NULL; | ||
| 130 | |||
| 131 | /* First check if we have a valid context to restore */ | ||
| 132 | if(ch->cur_ctx == job->hwctx || !job->hwctx || !job->hwctx->valid) | ||
| 133 | return; | ||
| 134 | |||
| 135 | /* Increment syncpt max */ | ||
| 136 | job->syncpt_incrs += ctx->restore_incrs; | ||
| 137 | syncval = nvhost_syncpt_incr_max(&host->syncpt, | ||
| 138 | job->syncpt_id, | ||
| 139 | ctx->restore_incrs); | ||
| 140 | |||
| 141 | /* Send restore buffer to channel */ | ||
| 142 | nvhost_cdma_push_gather(&ch->cdma, | ||
| 143 | host->nvmap, | ||
| 144 | nvmap_ref_to_handle(ctx->restore), | ||
| 145 | nvhost_opcode_gather(ctx->restore_size), | ||
| 146 | ctx->restore_phys); | ||
| 147 | |||
| 148 | trace_nvhost_channel_context_restore(ch->dev->name, &ctx->hwctx); | ||
| 149 | } | ||
| 150 | |||
| 151 | void submit_nullkickoff(struct nvhost_job *job, int user_syncpt_incrs) | ||
| 152 | { | ||
| 153 | struct nvhost_channel *ch = job->ch; | ||
| 154 | int incr; | ||
| 155 | u32 op_incr; | ||
| 156 | |||
| 157 | /* push increments that correspond to nulled out commands */ | ||
| 158 | op_incr = nvhost_opcode_imm_incr_syncpt(NV_SYNCPT_OP_DONE, | ||
| 159 | job->syncpt_id); | ||
| 160 | for (incr = 0; incr < (user_syncpt_incrs >> 1); incr++) | ||
| 161 | nvhost_cdma_push(&ch->cdma, op_incr, op_incr); | ||
| 162 | if (user_syncpt_incrs & 1) | ||
| 163 | nvhost_cdma_push(&ch->cdma, op_incr, NVHOST_OPCODE_NOOP); | ||
| 164 | |||
| 165 | /* for 3d, waitbase needs to be incremented after each submit */ | ||
| 166 | if (ch->dev->class == NV_GRAPHICS_3D_CLASS_ID) { | ||
| 167 | u32 waitbase = to_host1x_hwctx_handler(job->hwctx->h)->waitbase; | ||
| 168 | nvhost_cdma_push(&ch->cdma, | ||
| 169 | nvhost_opcode_setclass( | ||
| 170 | NV_HOST1X_CLASS_ID, | ||
| 171 | NV_CLASS_HOST_INCR_SYNCPT_BASE, | ||
| 172 | 1), | ||
| 173 | nvhost_class_host_incr_syncpt_base( | ||
| 174 | waitbase, | ||
| 175 | user_syncpt_incrs)); | ||
| 176 | } | ||
| 177 | } | ||
| 178 | |||
| 179 | void submit_gathers(struct nvhost_job *job) | ||
| 180 | { | ||
| 181 | /* push user gathers */ | ||
| 182 | int i = 0; | ||
| 183 | for ( ; i < job->num_gathers; i++) { | ||
| 184 | u32 op1 = nvhost_opcode_gather(job->gathers[i].words); | ||
| 185 | u32 op2 = job->gathers[i].mem; | ||
| 186 | nvhost_cdma_push_gather(&job->ch->cdma, | ||
| 187 | job->nvmap, job->unpins[i/2], | ||
| 188 | op1, op2); | ||
| 189 | } | ||
| 190 | } | ||
| 191 | |||
| 192 | int host1x_channel_submit(struct nvhost_job *job) | ||
| 193 | { | ||
| 194 | struct nvhost_channel *ch = job->ch; | ||
| 195 | struct nvhost_syncpt *sp = &nvhost_get_host(job->ch->dev)->syncpt; | ||
| 196 | u32 user_syncpt_incrs = job->syncpt_incrs; | ||
| 197 | u32 prev_max = 0; | ||
| 198 | u32 syncval; | ||
| 199 | int err; | ||
| 200 | void *completed_waiter = NULL, *ctxsave_waiter = NULL; | ||
| 201 | |||
| 202 | /* Bail out on timed out contexts */ | ||
| 203 | if (job->hwctx && job->hwctx->has_timedout) | ||
| 204 | return -ETIMEDOUT; | ||
| 205 | |||
| 206 | /* Turn on the client module and host1x */ | ||
| 207 | nvhost_module_busy(ch->dev); | ||
| 208 | if (ch->dev->busy) | ||
| 209 | ch->dev->busy(ch->dev); | ||
| 210 | |||
| 211 | /* before error checks, return current max */ | ||
| 212 | prev_max = job->syncpt_end = | ||
| 213 | nvhost_syncpt_read_max(sp, job->syncpt_id); | ||
| 214 | |||
| 215 | /* get submit lock */ | ||
| 216 | err = mutex_lock_interruptible(&ch->submitlock); | ||
| 217 | if (err) { | ||
| 218 | nvhost_module_idle(ch->dev); | ||
| 219 | goto error; | ||
| 220 | } | ||
| 221 | |||
| 222 | /* Do the needed allocations */ | ||
| 223 | ctxsave_waiter = pre_submit_ctxsave(job, ch->cur_ctx); | ||
| 224 | if (IS_ERR(ctxsave_waiter)) { | ||
| 225 | err = PTR_ERR(ctxsave_waiter); | ||
| 226 | nvhost_module_idle(ch->dev); | ||
| 227 | mutex_unlock(&ch->submitlock); | ||
| 228 | goto error; | ||
| 229 | } | ||
| 230 | |||
| 231 | completed_waiter = nvhost_intr_alloc_waiter(); | ||
| 232 | if (!completed_waiter) { | ||
| 233 | nvhost_module_idle(ch->dev); | ||
| 234 | mutex_unlock(&ch->submitlock); | ||
| 235 | err = -ENOMEM; | ||
| 236 | goto error; | ||
| 237 | } | ||
| 238 | |||
| 239 | /* remove stale waits */ | ||
| 240 | if (job->num_waitchk) { | ||
| 241 | err = nvhost_syncpt_wait_check(sp, | ||
| 242 | job->nvmap, | ||
| 243 | job->waitchk_mask, | ||
| 244 | job->waitchk, | ||
| 245 | job->num_waitchk); | ||
| 246 | if (err) { | ||
| 247 | dev_warn(&ch->dev->dev, | ||
| 248 | "nvhost_syncpt_wait_check failed: %d\n", err); | ||
| 249 | mutex_unlock(&ch->submitlock); | ||
| 250 | nvhost_module_idle(ch->dev); | ||
| 251 | goto error; | ||
| 252 | } | ||
| 253 | } | ||
| 254 | |||
| 255 | /* begin a CDMA submit */ | ||
| 256 | err = nvhost_cdma_begin(&ch->cdma, job); | ||
| 257 | if (err) { | ||
| 258 | mutex_unlock(&ch->submitlock); | ||
| 259 | nvhost_module_idle(ch->dev); | ||
| 260 | goto error; | ||
| 261 | } | ||
| 262 | |||
| 263 | submit_ctxsave(job, ctxsave_waiter, ch->cur_ctx); | ||
| 264 | submit_ctxrestore(job); | ||
| 265 | ch->cur_ctx = job->hwctx; | ||
| 266 | |||
| 267 | syncval = nvhost_syncpt_incr_max(sp, | ||
| 268 | job->syncpt_id, user_syncpt_incrs); | ||
| 269 | |||
| 270 | job->syncpt_end = syncval; | ||
| 271 | |||
| 272 | /* add a setclass for modules that require it */ | ||
| 273 | if (ch->dev->class) | ||
| 274 | nvhost_cdma_push(&ch->cdma, | ||
| 275 | nvhost_opcode_setclass(ch->dev->class, 0, 0), | ||
| 276 | NVHOST_OPCODE_NOOP); | ||
| 277 | |||
| 278 | if (job->null_kickoff) | ||
| 279 | submit_nullkickoff(job, user_syncpt_incrs); | ||
| 280 | else | ||
| 281 | submit_gathers(job); | ||
| 282 | |||
| 283 | sync_waitbases(ch, job->syncpt_end); | ||
| 284 | |||
| 285 | /* end CDMA submit & stash pinned hMems into sync queue */ | ||
| 286 | nvhost_cdma_end(&ch->cdma, job); | ||
| 287 | |||
| 288 | trace_nvhost_channel_submitted(ch->dev->name, | ||
| 289 | prev_max, syncval); | ||
| 290 | |||
| 291 | /* schedule a submit complete interrupt */ | ||
| 292 | err = nvhost_intr_add_action(&nvhost_get_host(ch->dev)->intr, | ||
| 293 | job->syncpt_id, syncval, | ||
| 294 | NVHOST_INTR_ACTION_SUBMIT_COMPLETE, ch, | ||
| 295 | completed_waiter, | ||
| 296 | NULL); | ||
| 297 | completed_waiter = NULL; | ||
| 298 | WARN(err, "Failed to set submit complete interrupt"); | ||
| 299 | |||
| 300 | mutex_unlock(&ch->submitlock); | ||
| 301 | |||
| 302 | return 0; | ||
| 303 | |||
| 304 | error: | ||
| 305 | kfree(ctxsave_waiter); | ||
| 306 | kfree(completed_waiter); | ||
| 307 | return err; | ||
| 308 | } | ||
| 309 | |||
| 310 | int host1x_channel_read_3d_reg( | ||
| 311 | struct nvhost_channel *channel, | ||
| 312 | struct nvhost_hwctx *hwctx, | ||
| 313 | u32 offset, | ||
| 314 | u32 *value) | ||
| 315 | { | ||
| 316 | struct host1x_hwctx *hwctx_to_save = NULL; | ||
| 317 | struct nvhost_hwctx_handler *h = hwctx->h; | ||
| 318 | struct host1x_hwctx_handler *p = to_host1x_hwctx_handler(h); | ||
| 319 | bool need_restore = false; | ||
| 320 | u32 syncpt_incrs = 4; | ||
| 321 | unsigned int pending = 0; | ||
| 322 | DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq); | ||
| 323 | void *ref; | ||
| 324 | void *ctx_waiter, *read_waiter, *completed_waiter; | ||
| 325 | struct nvhost_job *job; | ||
| 326 | u32 syncval; | ||
| 327 | int err; | ||
| 328 | |||
| 329 | if (hwctx && hwctx->has_timedout) | ||
| 330 | return -ETIMEDOUT; | ||
| 331 | |||
| 332 | ctx_waiter = nvhost_intr_alloc_waiter(); | ||
| 333 | read_waiter = nvhost_intr_alloc_waiter(); | ||
| 334 | completed_waiter = nvhost_intr_alloc_waiter(); | ||
| 335 | if (!ctx_waiter || !read_waiter || !completed_waiter) { | ||
| 336 | err = -ENOMEM; | ||
| 337 | goto done; | ||
| 338 | } | ||
| 339 | |||
| 340 | job = nvhost_job_alloc(channel, hwctx, | ||
| 341 | NULL, | ||
| 342 | nvhost_get_host(channel->dev)->nvmap, 0, 0); | ||
| 343 | if (!job) { | ||
| 344 | err = -ENOMEM; | ||
| 345 | goto done; | ||
| 346 | } | ||
| 347 | |||
| 348 | /* keep module powered */ | ||
| 349 | nvhost_module_busy(channel->dev); | ||
| 350 | |||
| 351 | /* get submit lock */ | ||
| 352 | err = mutex_lock_interruptible(&channel->submitlock); | ||
| 353 | if (err) { | ||
| 354 | nvhost_module_idle(channel->dev); | ||
| 355 | return err; | ||
| 356 | } | ||
| 357 | |||
| 358 | /* context switch */ | ||
| 359 | if (channel->cur_ctx != hwctx) { | ||
| 360 | hwctx_to_save = channel->cur_ctx ? | ||
| 361 | to_host1x_hwctx(channel->cur_ctx) : NULL; | ||
| 362 | if (hwctx_to_save) { | ||
| 363 | syncpt_incrs += hwctx_to_save->save_incrs; | ||
| 364 | hwctx_to_save->hwctx.valid = true; | ||
| 365 | channel->ctxhandler->get(&hwctx_to_save->hwctx); | ||
| 366 | } | ||
| 367 | channel->cur_ctx = hwctx; | ||
| 368 | if (channel->cur_ctx && channel->cur_ctx->valid) { | ||
| 369 | need_restore = true; | ||
| 370 | syncpt_incrs += to_host1x_hwctx(channel->cur_ctx) | ||
| 371 | ->restore_incrs; | ||
| 372 | } | ||
| 373 | } | ||
| 374 | |||
| 375 | syncval = nvhost_syncpt_incr_max(&nvhost_get_host(channel->dev)->syncpt, | ||
| 376 | p->syncpt, syncpt_incrs); | ||
| 377 | |||
| 378 | job->syncpt_id = p->syncpt; | ||
| 379 | job->syncpt_incrs = syncpt_incrs; | ||
| 380 | job->syncpt_end = syncval; | ||
| 381 | |||
| 382 | /* begin a CDMA submit */ | ||
| 383 | nvhost_cdma_begin(&channel->cdma, job); | ||
| 384 | |||
| 385 | /* push save buffer (pre-gather setup depends on unit) */ | ||
| 386 | if (hwctx_to_save) | ||
| 387 | h->save_push(&hwctx_to_save->hwctx, &channel->cdma); | ||
| 388 | |||
| 389 | /* gather restore buffer */ | ||
| 390 | if (need_restore) | ||
| 391 | nvhost_cdma_push(&channel->cdma, | ||
| 392 | nvhost_opcode_gather(to_host1x_hwctx(channel->cur_ctx) | ||
| 393 | ->restore_size), | ||
| 394 | to_host1x_hwctx(channel->cur_ctx)->restore_phys); | ||
| 395 | |||
| 396 | /* Switch to 3D - wait for it to complete what it was doing */ | ||
| 397 | nvhost_cdma_push(&channel->cdma, | ||
| 398 | nvhost_opcode_setclass(NV_GRAPHICS_3D_CLASS_ID, 0, 0), | ||
| 399 | nvhost_opcode_imm_incr_syncpt(NV_SYNCPT_OP_DONE, | ||
| 400 | p->syncpt)); | ||
| 401 | nvhost_cdma_push(&channel->cdma, | ||
| 402 | nvhost_opcode_setclass(NV_HOST1X_CLASS_ID, | ||
| 403 | NV_CLASS_HOST_WAIT_SYNCPT_BASE, 1), | ||
| 404 | nvhost_class_host_wait_syncpt_base(p->syncpt, | ||
| 405 | p->waitbase, 1)); | ||
| 406 | /* Tell 3D to send register value to FIFO */ | ||
| 407 | nvhost_cdma_push(&channel->cdma, | ||
| 408 | nvhost_opcode_nonincr(NV_CLASS_HOST_INDOFF, 1), | ||
| 409 | nvhost_class_host_indoff_reg_read(NV_HOST_MODULE_GR3D, | ||
| 410 | offset, false)); | ||
| 411 | nvhost_cdma_push(&channel->cdma, | ||
| 412 | nvhost_opcode_imm(NV_CLASS_HOST_INDDATA, 0), | ||
| 413 | NVHOST_OPCODE_NOOP); | ||
| 414 | /* Increment syncpt to indicate that FIFO can be read */ | ||
| 415 | nvhost_cdma_push(&channel->cdma, | ||
| 416 | nvhost_opcode_imm_incr_syncpt(NV_SYNCPT_IMMEDIATE, | ||
| 417 | p->syncpt), | ||
| 418 | NVHOST_OPCODE_NOOP); | ||
| 419 | /* Wait for value to be read from FIFO */ | ||
| 420 | nvhost_cdma_push(&channel->cdma, | ||
| 421 | nvhost_opcode_nonincr(NV_CLASS_HOST_WAIT_SYNCPT_BASE, 1), | ||
| 422 | nvhost_class_host_wait_syncpt_base(p->syncpt, | ||
| 423 | p->waitbase, 3)); | ||
| 424 | /* Indicate submit complete */ | ||
| 425 | nvhost_cdma_push(&channel->cdma, | ||
| 426 | nvhost_opcode_nonincr(NV_CLASS_HOST_INCR_SYNCPT_BASE, 1), | ||
| 427 | nvhost_class_host_incr_syncpt_base(p->waitbase, 4)); | ||
| 428 | nvhost_cdma_push(&channel->cdma, | ||
| 429 | NVHOST_OPCODE_NOOP, | ||
| 430 | nvhost_opcode_imm_incr_syncpt(NV_SYNCPT_IMMEDIATE, | ||
| 431 | p->syncpt)); | ||
| 432 | |||
| 433 | /* end CDMA submit */ | ||
| 434 | nvhost_cdma_end(&channel->cdma, job); | ||
| 435 | nvhost_job_put(job); | ||
| 436 | job = NULL; | ||
| 437 | |||
| 438 | /* | ||
| 439 | * schedule a context save interrupt (to drain the host FIFO | ||
| 440 | * if necessary, and to release the restore buffer) | ||
| 441 | */ | ||
| 442 | if (hwctx_to_save) { | ||
| 443 | err = nvhost_intr_add_action( | ||
| 444 | &nvhost_get_host(channel->dev)->intr, | ||
| 445 | p->syncpt, | ||
| 446 | syncval - syncpt_incrs | ||
| 447 | + hwctx_to_save->save_incrs | ||
| 448 | - 1, | ||
| 449 | NVHOST_INTR_ACTION_CTXSAVE, hwctx_to_save, | ||
| 450 | ctx_waiter, | ||
| 451 | NULL); | ||
| 452 | ctx_waiter = NULL; | ||
| 453 | WARN(err, "Failed to set context save interrupt"); | ||
| 454 | } | ||
| 455 | |||
| 456 | /* Wait for FIFO to be ready */ | ||
| 457 | err = nvhost_intr_add_action(&nvhost_get_host(channel->dev)->intr, | ||
| 458 | p->syncpt, syncval - 2, | ||
| 459 | NVHOST_INTR_ACTION_WAKEUP, &wq, | ||
| 460 | read_waiter, | ||
| 461 | &ref); | ||
| 462 | read_waiter = NULL; | ||
| 463 | WARN(err, "Failed to set wakeup interrupt"); | ||
| 464 | wait_event(wq, | ||
| 465 | nvhost_syncpt_is_expired(&nvhost_get_host(channel->dev)->syncpt, | ||
| 466 | p->syncpt, syncval - 2)); | ||
| 467 | nvhost_intr_put_ref(&nvhost_get_host(channel->dev)->intr, ref); | ||
| 468 | |||
| 469 | /* Read the register value from FIFO */ | ||
| 470 | err = host1x_drain_read_fifo(channel->aperture, | ||
| 471 | value, 1, &pending); | ||
| 472 | |||
| 473 | /* Indicate we've read the value */ | ||
| 474 | nvhost_syncpt_cpu_incr(&nvhost_get_host(channel->dev)->syncpt, | ||
| 475 | p->syncpt); | ||
| 476 | |||
| 477 | /* Schedule a submit complete interrupt */ | ||
| 478 | err = nvhost_intr_add_action(&nvhost_get_host(channel->dev)->intr, | ||
| 479 | p->syncpt, syncval, | ||
| 480 | NVHOST_INTR_ACTION_SUBMIT_COMPLETE, channel, | ||
| 481 | completed_waiter, NULL); | ||
| 482 | completed_waiter = NULL; | ||
| 483 | WARN(err, "Failed to set submit complete interrupt"); | ||
| 484 | |||
| 485 | mutex_unlock(&channel->submitlock); | ||
| 486 | |||
| 487 | done: | ||
| 488 | kfree(ctx_waiter); | ||
| 489 | kfree(read_waiter); | ||
| 490 | kfree(completed_waiter); | ||
| 491 | return err; | ||
| 492 | } | ||
| 493 | |||
| 494 | |||
| 495 | int host1x_drain_read_fifo(void __iomem *chan_regs, | ||
| 496 | u32 *ptr, unsigned int count, unsigned int *pending) | ||
| 497 | { | ||
| 498 | unsigned int entries = *pending; | ||
| 499 | unsigned long timeout = jiffies + NV_FIFO_READ_TIMEOUT; | ||
| 500 | while (count) { | ||
| 501 | unsigned int num; | ||
| 502 | |||
| 503 | while (!entries && time_before(jiffies, timeout)) { | ||
| 504 | /* query host for number of entries in fifo */ | ||
| 505 | entries = HOST1X_VAL(CHANNEL_FIFOSTAT, OUTFENTRIES, | ||
| 506 | readl(chan_regs + HOST1X_CHANNEL_FIFOSTAT)); | ||
| 507 | if (!entries) | ||
| 508 | cpu_relax(); | ||
| 509 | } | ||
| 510 | |||
| 511 | /* timeout -> return error */ | ||
| 512 | if (!entries) | ||
| 513 | return -EIO; | ||
| 514 | |||
| 515 | num = min(entries, count); | ||
| 516 | entries -= num; | ||
| 517 | count -= num; | ||
| 518 | |||
| 519 | while (num & ~0x3) { | ||
| 520 | u32 arr[4]; | ||
| 521 | arr[0] = readl(chan_regs + HOST1X_CHANNEL_INDDATA); | ||
| 522 | arr[1] = readl(chan_regs + HOST1X_CHANNEL_INDDATA); | ||
| 523 | arr[2] = readl(chan_regs + HOST1X_CHANNEL_INDDATA); | ||
| 524 | arr[3] = readl(chan_regs + HOST1X_CHANNEL_INDDATA); | ||
| 525 | memcpy(ptr, arr, 4*sizeof(u32)); | ||
| 526 | ptr += 4; | ||
| 527 | num -= 4; | ||
| 528 | } | ||
| 529 | while (num--) | ||
| 530 | *ptr++ = readl(chan_regs + HOST1X_CHANNEL_INDDATA); | ||
| 531 | } | ||
| 532 | *pending = entries; | ||
| 533 | |||
| 534 | return 0; | ||
| 535 | } | ||
| 536 | |||
| 537 | int host1x_save_context(struct nvhost_device *dev, u32 syncpt_id) | ||
| 538 | { | ||
| 539 | struct nvhost_channel *ch = dev->channel; | ||
| 540 | struct nvhost_hwctx *hwctx_to_save; | ||
| 541 | DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq); | ||
| 542 | u32 syncpt_incrs, syncpt_val; | ||
| 543 | int err = 0; | ||
| 544 | void *ref; | ||
| 545 | void *ctx_waiter = NULL, *wakeup_waiter = NULL; | ||
| 546 | struct nvhost_job *job; | ||
| 547 | |||
| 548 | ctx_waiter = nvhost_intr_alloc_waiter(); | ||
| 549 | wakeup_waiter = nvhost_intr_alloc_waiter(); | ||
| 550 | if (!ctx_waiter || !wakeup_waiter) { | ||
| 551 | err = -ENOMEM; | ||
| 552 | goto done; | ||
| 553 | } | ||
| 554 | |||
| 555 | if (dev->busy) | ||
| 556 | dev->busy(dev); | ||
| 557 | |||
| 558 | mutex_lock(&ch->submitlock); | ||
| 559 | hwctx_to_save = ch->cur_ctx; | ||
| 560 | if (!hwctx_to_save) { | ||
| 561 | mutex_unlock(&ch->submitlock); | ||
| 562 | goto done; | ||
| 563 | } | ||
| 564 | |||
| 565 | job = nvhost_job_alloc(ch, hwctx_to_save, | ||
| 566 | NULL, | ||
| 567 | nvhost_get_host(ch->dev)->nvmap, 0, 0); | ||
| 568 | if (IS_ERR_OR_NULL(job)) { | ||
| 569 | err = PTR_ERR(job); | ||
| 570 | mutex_unlock(&ch->submitlock); | ||
| 571 | goto done; | ||
| 572 | } | ||
| 573 | |||
| 574 | hwctx_to_save->valid = true; | ||
| 575 | ch->ctxhandler->get(hwctx_to_save); | ||
| 576 | ch->cur_ctx = NULL; | ||
| 577 | |||
| 578 | syncpt_incrs = to_host1x_hwctx(hwctx_to_save)->save_incrs; | ||
| 579 | syncpt_val = nvhost_syncpt_incr_max(&nvhost_get_host(ch->dev)->syncpt, | ||
| 580 | syncpt_id, syncpt_incrs); | ||
| 581 | |||
| 582 | job->syncpt_id = syncpt_id; | ||
| 583 | job->syncpt_incrs = syncpt_incrs; | ||
| 584 | job->syncpt_end = syncpt_val; | ||
| 585 | |||
| 586 | err = nvhost_cdma_begin(&ch->cdma, job); | ||
| 587 | if (err) { | ||
| 588 | mutex_unlock(&ch->submitlock); | ||
| 589 | goto done; | ||
| 590 | } | ||
| 591 | |||
| 592 | ch->ctxhandler->save_push(hwctx_to_save, &ch->cdma); | ||
| 593 | nvhost_cdma_end(&ch->cdma, job); | ||
| 594 | nvhost_job_put(job); | ||
| 595 | job = NULL; | ||
| 596 | |||
| 597 | err = nvhost_intr_add_action(&nvhost_get_host(ch->dev)->intr, syncpt_id, | ||
| 598 | syncpt_val - syncpt_incrs + | ||
| 599 | to_host1x_hwctx(hwctx_to_save)->save_thresh, | ||
| 600 | NVHOST_INTR_ACTION_CTXSAVE, hwctx_to_save, | ||
| 601 | ctx_waiter, | ||
| 602 | NULL); | ||
| 603 | ctx_waiter = NULL; | ||
| 604 | WARN(err, "Failed to set context save interrupt"); | ||
| 605 | |||
| 606 | err = nvhost_intr_add_action(&nvhost_get_host(ch->dev)->intr, | ||
| 607 | syncpt_id, syncpt_val, | ||
| 608 | NVHOST_INTR_ACTION_WAKEUP, &wq, | ||
| 609 | wakeup_waiter, | ||
| 610 | &ref); | ||
| 611 | wakeup_waiter = NULL; | ||
| 612 | WARN(err, "Failed to set wakeup interrupt"); | ||
| 613 | wait_event(wq, | ||
| 614 | nvhost_syncpt_is_expired(&nvhost_get_host(ch->dev)->syncpt, | ||
| 615 | syncpt_id, syncpt_val)); | ||
| 616 | |||
| 617 | nvhost_intr_put_ref(&nvhost_get_host(ch->dev)->intr, ref); | ||
| 618 | |||
| 619 | nvhost_cdma_update(&ch->cdma); | ||
| 620 | |||
| 621 | mutex_unlock(&ch->submitlock); | ||
| 622 | |||
| 623 | done: | ||
| 624 | kfree(ctx_waiter); | ||
| 625 | kfree(wakeup_waiter); | ||
| 626 | return err; | ||
| 627 | } | ||
diff --git a/drivers/video/tegra/host/host1x/host1x_channel.h b/drivers/video/tegra/host/host1x/host1x_channel.h new file mode 100644 index 00000000000..4113dbcada2 --- /dev/null +++ b/drivers/video/tegra/host/host1x/host1x_channel.h | |||
| @@ -0,0 +1,45 @@ | |||
| 1 | /* | ||
| 2 | * drivers/video/tegra/host/host1x/host1x_channel.h | ||
| 3 | * | ||
| 4 | * Tegra Graphics Host Channel | ||
| 5 | * | ||
| 6 | * Copyright (c) 2011-2012, NVIDIA Corporation. | ||
| 7 | * | ||
| 8 | * This program is free software; you can redistribute it and/or modify it | ||
| 9 | * under the terms and conditions of the GNU General Public License, | ||
| 10 | * version 2, as published by the Free Software Foundation. | ||
| 11 | * | ||
| 12 | * This program is distributed in the hope it will be useful, but WITHOUT | ||
| 13 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
| 14 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
| 15 | * more details. | ||
| 16 | * | ||
| 17 | * You should have received a copy of the GNU General Public License | ||
| 18 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | ||
| 19 | */ | ||
| 20 | |||
| 21 | #ifndef __NVHOST_HOST1X_CHANNEL_H | ||
| 22 | #define __NVHOST_HOST1X_CHANNEL_H | ||
| 23 | |||
| 24 | struct nvhost_job; | ||
| 25 | struct nvhost_channel; | ||
| 26 | struct nvhost_hwctx; | ||
| 27 | struct nvhost_device; | ||
| 28 | |||
| 29 | /* Submit job to a host1x client */ | ||
| 30 | int host1x_channel_submit(struct nvhost_job *job); | ||
| 31 | |||
| 32 | /* Read 3d register via FIFO */ | ||
| 33 | int host1x_channel_read_3d_reg( | ||
| 34 | struct nvhost_channel *channel, | ||
| 35 | struct nvhost_hwctx *hwctx, | ||
| 36 | u32 offset, | ||
| 37 | u32 *value); | ||
| 38 | |||
| 39 | /* Reads words from FIFO */ | ||
| 40 | int host1x_drain_read_fifo(void __iomem *chan_regs, | ||
| 41 | u32 *ptr, unsigned int count, unsigned int *pending); | ||
| 42 | |||
| 43 | int host1x_save_context(struct nvhost_device *dev, u32 syncpt_id); | ||
| 44 | |||
| 45 | #endif | ||
diff --git a/drivers/video/tegra/host/host1x/host1x_debug.c b/drivers/video/tegra/host/host1x/host1x_debug.c new file mode 100644 index 00000000000..1a1d764bbd6 --- /dev/null +++ b/drivers/video/tegra/host/host1x/host1x_debug.c | |||
| @@ -0,0 +1,404 @@ | |||
| 1 | /* | ||
| 2 | * drivers/video/tegra/host/host1x/host1x_debug.c | ||
| 3 | * | ||
| 4 | * Copyright (C) 2010 Google, Inc. | ||
| 5 | * Author: Erik Gilling <konkers@android.com> | ||
| 6 | * | ||
| 7 | * Copyright (C) 2011 NVIDIA Corporation | ||
| 8 | * | ||
| 9 | * This software is licensed under the terms of the GNU General Public | ||
| 10 | * License version 2, as published by the Free Software Foundation, and | ||
| 11 | * may be copied, distributed, and modified under those terms. | ||
| 12 | * | ||
| 13 | * This program is distributed in the hope that it will be useful, | ||
| 14 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 15 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
| 16 | * GNU General Public License for more details. | ||
| 17 | * | ||
| 18 | */ | ||
| 19 | |||
| 20 | #include <linux/debugfs.h> | ||
| 21 | #include <linux/seq_file.h> | ||
| 22 | #include <linux/mm.h> | ||
| 23 | |||
| 24 | #include <linux/io.h> | ||
| 25 | |||
| 26 | #include "dev.h" | ||
| 27 | #include "debug.h" | ||
| 28 | #include "nvhost_cdma.h" | ||
| 29 | #include "../../nvmap/nvmap.h" | ||
| 30 | |||
| 31 | #include "host1x_hardware.h" | ||
| 32 | #include "host1x_cdma.h" | ||
| 33 | |||
| 34 | #define NVHOST_DEBUG_MAX_PAGE_OFFSET 102400 | ||
| 35 | |||
| 36 | enum { | ||
| 37 | NVHOST_DBG_STATE_CMD = 0, | ||
| 38 | NVHOST_DBG_STATE_DATA = 1, | ||
| 39 | NVHOST_DBG_STATE_GATHER = 2 | ||
| 40 | }; | ||
| 41 | |||
| 42 | static int show_channel_command(struct output *o, u32 addr, u32 val, int *count) | ||
| 43 | { | ||
| 44 | unsigned mask; | ||
| 45 | unsigned subop; | ||
| 46 | |||
| 47 | switch (val >> 28) { | ||
| 48 | case 0x0: | ||
| 49 | mask = val & 0x3f; | ||
| 50 | if (mask) { | ||
| 51 | nvhost_debug_output(o, | ||
| 52 | "SETCL(class=%03x, offset=%03x, mask=%02x, [", | ||
| 53 | val >> 6 & 0x3ff, val >> 16 & 0xfff, mask); | ||
| 54 | *count = hweight8(mask); | ||
| 55 | return NVHOST_DBG_STATE_DATA; | ||
| 56 | } else { | ||
| 57 | nvhost_debug_output(o, "SETCL(class=%03x)\n", | ||
| 58 | val >> 6 & 0x3ff); | ||
| 59 | return NVHOST_DBG_STATE_CMD; | ||
| 60 | } | ||
| 61 | |||
| 62 | case 0x1: | ||
| 63 | nvhost_debug_output(o, "INCR(offset=%03x, [", | ||
| 64 | val >> 16 & 0xfff); | ||
| 65 | *count = val & 0xffff; | ||
| 66 | return NVHOST_DBG_STATE_DATA; | ||
| 67 | |||
| 68 | case 0x2: | ||
| 69 | nvhost_debug_output(o, "NONINCR(offset=%03x, [", | ||
| 70 | val >> 16 & 0xfff); | ||
| 71 | *count = val & 0xffff; | ||
| 72 | return NVHOST_DBG_STATE_DATA; | ||
| 73 | |||
| 74 | case 0x3: | ||
| 75 | mask = val & 0xffff; | ||
| 76 | nvhost_debug_output(o, "MASK(offset=%03x, mask=%03x, [", | ||
| 77 | val >> 16 & 0xfff, mask); | ||
| 78 | *count = hweight16(mask); | ||
| 79 | return NVHOST_DBG_STATE_DATA; | ||
| 80 | |||
| 81 | case 0x4: | ||
| 82 | nvhost_debug_output(o, "IMM(offset=%03x, data=%03x)\n", | ||
| 83 | val >> 16 & 0xfff, val & 0xffff); | ||
| 84 | return NVHOST_DBG_STATE_CMD; | ||
| 85 | |||
| 86 | case 0x5: | ||
| 87 | nvhost_debug_output(o, "RESTART(offset=%08x)\n", val << 4); | ||
| 88 | return NVHOST_DBG_STATE_CMD; | ||
| 89 | |||
| 90 | case 0x6: | ||
| 91 | nvhost_debug_output(o, "GATHER(offset=%03x, insert=%d, type=%d, count=%04x, addr=[", | ||
| 92 | val >> 16 & 0xfff, val >> 15 & 0x1, val >> 14 & 0x1, | ||
| 93 | val & 0x3fff); | ||
| 94 | *count = val & 0x3fff; /* TODO: insert */ | ||
| 95 | return NVHOST_DBG_STATE_GATHER; | ||
| 96 | |||
| 97 | case 0xe: | ||
| 98 | subop = val >> 24 & 0xf; | ||
| 99 | if (subop == 0) | ||
| 100 | nvhost_debug_output(o, "ACQUIRE_MLOCK(index=%d)\n", | ||
| 101 | val & 0xff); | ||
| 102 | else if (subop == 1) | ||
| 103 | nvhost_debug_output(o, "RELEASE_MLOCK(index=%d)\n", | ||
| 104 | val & 0xff); | ||
| 105 | else | ||
| 106 | nvhost_debug_output(o, "EXTEND_UNKNOWN(%08x)\n", val); | ||
| 107 | return NVHOST_DBG_STATE_CMD; | ||
| 108 | |||
| 109 | default: | ||
| 110 | return NVHOST_DBG_STATE_CMD; | ||
| 111 | } | ||
| 112 | } | ||
| 113 | |||
| 114 | static void show_channel_gather(struct output *o, u32 addr, | ||
| 115 | phys_addr_t phys_addr, u32 words, struct nvhost_cdma *cdma); | ||
| 116 | |||
| 117 | static void show_channel_word(struct output *o, int *state, int *count, | ||
| 118 | u32 addr, u32 val, struct nvhost_cdma *cdma) | ||
| 119 | { | ||
| 120 | static int start_count, dont_print; | ||
| 121 | |||
| 122 | switch (*state) { | ||
| 123 | case NVHOST_DBG_STATE_CMD: | ||
| 124 | if (addr) | ||
| 125 | nvhost_debug_output(o, "%08x: %08x:", addr, val); | ||
| 126 | else | ||
| 127 | nvhost_debug_output(o, "%08x:", val); | ||
| 128 | |||
| 129 | *state = show_channel_command(o, addr, val, count); | ||
| 130 | dont_print = 0; | ||
| 131 | start_count = *count; | ||
| 132 | if (*state == NVHOST_DBG_STATE_DATA && *count == 0) { | ||
| 133 | *state = NVHOST_DBG_STATE_CMD; | ||
| 134 | nvhost_debug_output(o, "])\n"); | ||
| 135 | } | ||
| 136 | break; | ||
| 137 | |||
| 138 | case NVHOST_DBG_STATE_DATA: | ||
| 139 | (*count)--; | ||
| 140 | if (start_count - *count < 64) | ||
| 141 | nvhost_debug_output(o, "%08x%s", | ||
| 142 | val, *count > 0 ? ", " : "])\n"); | ||
| 143 | else if (!dont_print && (*count > 0)) { | ||
| 144 | nvhost_debug_output(o, "[truncated; %d more words]\n", | ||
| 145 | *count); | ||
| 146 | dont_print = 1; | ||
| 147 | } | ||
| 148 | if (*count == 0) | ||
| 149 | *state = NVHOST_DBG_STATE_CMD; | ||
| 150 | break; | ||
| 151 | |||
| 152 | case NVHOST_DBG_STATE_GATHER: | ||
| 153 | *state = NVHOST_DBG_STATE_CMD; | ||
| 154 | nvhost_debug_output(o, "%08x]):\n", val); | ||
| 155 | if (cdma) { | ||
| 156 | show_channel_gather(o, addr, val, | ||
| 157 | *count, cdma); | ||
| 158 | } | ||
| 159 | break; | ||
| 160 | } | ||
| 161 | } | ||
| 162 | |||
| 163 | static void show_channel_gather(struct output *o, u32 addr, | ||
| 164 | phys_addr_t phys_addr, | ||
| 165 | u32 words, struct nvhost_cdma *cdma) | ||
| 166 | { | ||
| 167 | #if defined(CONFIG_TEGRA_NVMAP) | ||
| 168 | /* Map dmaget cursor to corresponding nvmap_handle */ | ||
| 169 | struct push_buffer *pb = &cdma->push_buffer; | ||
| 170 | u32 cur = addr - pb->phys; | ||
| 171 | struct nvmap_client_handle *nvmap = &pb->nvmap[cur/8]; | ||
| 172 | struct nvmap_handle_ref ref; | ||
| 173 | u32 *map_addr, offset; | ||
| 174 | phys_addr_t pin_addr; | ||
| 175 | int state, count, i; | ||
| 176 | |||
| 177 | if ((u32)nvmap->handle == NVHOST_CDMA_PUSH_GATHER_CTXSAVE) { | ||
| 178 | nvhost_debug_output(o, "[context save]\n"); | ||
| 179 | return; | ||
| 180 | } | ||
| 181 | |||
| 182 | if (!nvmap->handle || !nvmap->client | ||
| 183 | || atomic_read(&nvmap->handle->ref) < 1) { | ||
| 184 | nvhost_debug_output(o, "[already deallocated]\n"); | ||
| 185 | return; | ||
| 186 | } | ||
| 187 | |||
| 188 | /* Create a fake nvmap_handle_ref - nvmap requires it | ||
| 189 | * but accesses only the first field - nvmap_handle */ | ||
| 190 | ref.handle = nvmap->handle; | ||
| 191 | |||
| 192 | map_addr = nvmap_mmap(&ref); | ||
| 193 | if (!map_addr) { | ||
| 194 | nvhost_debug_output(o, "[could not mmap]\n"); | ||
| 195 | return; | ||
| 196 | } | ||
| 197 | |||
| 198 | /* Get base address from nvmap */ | ||
| 199 | pin_addr = nvmap_pin(nvmap->client, &ref); | ||
| 200 | if (IS_ERR_VALUE(pin_addr)) { | ||
| 201 | nvhost_debug_output(o, "[couldn't pin]\n"); | ||
| 202 | nvmap_munmap(&ref, map_addr); | ||
| 203 | return; | ||
| 204 | } | ||
| 205 | |||
| 206 | offset = phys_addr - pin_addr; | ||
| 207 | /* | ||
| 208 | * Sometimes we're given different hardware address to the same | ||
| 209 | * page - in these cases the offset will get an invalid number and | ||
| 210 | * we just have to bail out. | ||
| 211 | */ | ||
| 212 | if (offset > NVHOST_DEBUG_MAX_PAGE_OFFSET) { | ||
| 213 | nvhost_debug_output(o, "[address mismatch]\n"); | ||
| 214 | } else { | ||
| 215 | /* GATHER buffer starts always with commands */ | ||
| 216 | state = NVHOST_DBG_STATE_CMD; | ||
| 217 | for (i = 0; i < words; i++) | ||
| 218 | show_channel_word(o, &state, &count, | ||
| 219 | phys_addr + i * 4, | ||
| 220 | *(map_addr + offset/4 + i), | ||
| 221 | cdma); | ||
| 222 | } | ||
| 223 | nvmap_unpin(nvmap->client, &ref); | ||
| 224 | nvmap_munmap(&ref, map_addr); | ||
| 225 | #endif | ||
| 226 | } | ||
| 227 | |||
| 228 | static void show_channel_pair(struct output *o, u32 addr, | ||
| 229 | u32 w0, u32 w1, struct nvhost_cdma *cdma) | ||
| 230 | { | ||
| 231 | int state = NVHOST_DBG_STATE_CMD; | ||
| 232 | int count; | ||
| 233 | |||
| 234 | show_channel_word(o, &state, &count, addr, w0, cdma); | ||
| 235 | show_channel_word(o, &state, &count, addr+4, w1, cdma); | ||
| 236 | } | ||
| 237 | |||
| 238 | /** | ||
| 239 | * Retrieve the op pair at a slot offset from a DMA address | ||
| 240 | */ | ||
| 241 | static void cdma_peek(struct nvhost_cdma *cdma, | ||
| 242 | u32 dmaget, int slot, u32 *out) | ||
| 243 | { | ||
| 244 | u32 offset = dmaget - cdma->push_buffer.phys; | ||
| 245 | u32 *p = cdma->push_buffer.mapped; | ||
| 246 | |||
| 247 | offset = ((offset + slot * 8) & (PUSH_BUFFER_SIZE - 1)) >> 2; | ||
| 248 | out[0] = p[offset]; | ||
| 249 | out[1] = p[offset + 1]; | ||
| 250 | } | ||
| 251 | |||
| 252 | u32 previous_oppair(struct nvhost_cdma *cdma, u32 cur) | ||
| 253 | { | ||
| 254 | u32 pb = cdma->push_buffer.phys; | ||
| 255 | u32 prev = cur-8; | ||
| 256 | if (prev < pb) | ||
| 257 | prev += PUSH_BUFFER_SIZE; | ||
| 258 | return prev; | ||
| 259 | } | ||
| 260 | |||
| 261 | static void t20_debug_show_channel_cdma(struct nvhost_master *m, | ||
| 262 | struct nvhost_channel *ch, struct output *o, int chid) | ||
| 263 | { | ||
| 264 | struct nvhost_channel *channel = ch; | ||
| 265 | struct nvhost_cdma *cdma = &channel->cdma; | ||
| 266 | u32 dmaput, dmaget, dmactrl; | ||
| 267 | u32 cbstat, cbread; | ||
| 268 | u32 val, base, baseval; | ||
| 269 | u32 pbw[2]; | ||
| 270 | |||
| 271 | dmaput = readl(channel->aperture + HOST1X_CHANNEL_DMAPUT); | ||
| 272 | dmaget = readl(channel->aperture + HOST1X_CHANNEL_DMAGET); | ||
| 273 | dmactrl = readl(channel->aperture + HOST1X_CHANNEL_DMACTRL); | ||
| 274 | cbread = readl(m->sync_aperture + HOST1X_SYNC_CBREAD_x(chid)); | ||
| 275 | cbstat = readl(m->sync_aperture + HOST1X_SYNC_CBSTAT_x(chid)); | ||
| 276 | |||
| 277 | nvhost_debug_output(o, "%d-%s (%d): ", chid, | ||
| 278 | channel->dev->name, | ||
| 279 | channel->dev->refcount); | ||
| 280 | |||
| 281 | if (HOST1X_VAL(CHANNEL_DMACTRL, DMASTOP, dmactrl) | ||
| 282 | || !channel->cdma.push_buffer.mapped) { | ||
| 283 | nvhost_debug_output(o, "inactive\n\n"); | ||
| 284 | return; | ||
| 285 | } | ||
| 286 | |||
| 287 | switch (cbstat) { | ||
| 288 | case 0x00010008: | ||
| 289 | nvhost_debug_output(o, "waiting on syncpt %d val %d\n", | ||
| 290 | cbread >> 24, cbread & 0xffffff); | ||
| 291 | break; | ||
| 292 | |||
| 293 | case 0x00010009: | ||
| 294 | base = (cbread >> 16) & 0xff; | ||
| 295 | val = readl(m->sync_aperture + | ||
| 296 | HOST1X_SYNC_SYNCPT_BASE_x(base)); | ||
| 297 | baseval = HOST1X_VAL(SYNC_SYNCPT_BASE_0, BASE, val); | ||
| 298 | val = cbread & 0xffff; | ||
| 299 | nvhost_debug_output(o, "waiting on syncpt %d val %d " | ||
| 300 | "(base %d = %d; offset = %d)\n", | ||
| 301 | cbread >> 24, baseval + val, | ||
| 302 | base, baseval, val); | ||
| 303 | break; | ||
| 304 | |||
| 305 | default: | ||
| 306 | nvhost_debug_output(o, | ||
| 307 | "active class %02x, offset %04x, val %08x\n", | ||
| 308 | HOST1X_VAL(SYNC_CBSTAT_0, CBCLASS0, cbstat), | ||
| 309 | HOST1X_VAL(SYNC_CBSTAT_0, CBOFFSET0, cbstat), | ||
| 310 | cbread); | ||
| 311 | break; | ||
| 312 | } | ||
| 313 | |||
| 314 | nvhost_debug_output(o, "DMAPUT %08x, DMAGET %08x, DMACTL %08x\n", | ||
| 315 | dmaput, dmaget, dmactrl); | ||
| 316 | nvhost_debug_output(o, "CBREAD %08x, CBSTAT %08x\n", cbread, cbstat); | ||
| 317 | |||
| 318 | cdma_peek(cdma, dmaget, -1, pbw); | ||
| 319 | show_channel_pair(o, previous_oppair(cdma, dmaget), | ||
| 320 | pbw[0], pbw[1], &channel->cdma); | ||
| 321 | nvhost_debug_output(o, "\n"); | ||
| 322 | } | ||
| 323 | |||
| 324 | void t20_debug_show_channel_fifo(struct nvhost_master *m, | ||
| 325 | struct nvhost_channel *ch, struct output *o, int chid) | ||
| 326 | { | ||
| 327 | u32 val, rd_ptr, wr_ptr, start, end; | ||
| 328 | struct nvhost_channel *channel = ch; | ||
| 329 | int state, count; | ||
| 330 | |||
| 331 | nvhost_debug_output(o, "%d: fifo:\n", chid); | ||
| 332 | |||
| 333 | val = readl(channel->aperture + HOST1X_CHANNEL_FIFOSTAT); | ||
| 334 | nvhost_debug_output(o, "FIFOSTAT %08x\n", val); | ||
| 335 | if (HOST1X_VAL(CHANNEL_FIFOSTAT, CFEMPTY, val)) { | ||
| 336 | nvhost_debug_output(o, "[empty]\n"); | ||
| 337 | return; | ||
| 338 | } | ||
| 339 | |||
| 340 | writel(0x0, m->sync_aperture + HOST1X_SYNC_CFPEEK_CTRL); | ||
| 341 | writel(HOST1X_CREATE(SYNC_CFPEEK_CTRL, ENA, 1) | ||
| 342 | | HOST1X_CREATE(SYNC_CFPEEK_CTRL, CHANNR, chid), | ||
| 343 | m->sync_aperture + HOST1X_SYNC_CFPEEK_CTRL); | ||
| 344 | |||
| 345 | val = readl(m->sync_aperture + HOST1X_SYNC_CFPEEK_PTRS); | ||
| 346 | rd_ptr = HOST1X_VAL(SYNC_CFPEEK_PTRS, CF_RD_PTR, val); | ||
| 347 | wr_ptr = HOST1X_VAL(SYNC_CFPEEK_PTRS, CF_WR_PTR, val); | ||
| 348 | |||
| 349 | val = readl(m->sync_aperture + HOST1X_SYNC_CFx_SETUP(chid)); | ||
| 350 | start = HOST1X_VAL(SYNC_CF0_SETUP, BASE, val); | ||
| 351 | end = HOST1X_VAL(SYNC_CF0_SETUP, LIMIT, val); | ||
| 352 | |||
| 353 | state = NVHOST_DBG_STATE_CMD; | ||
| 354 | |||
| 355 | do { | ||
| 356 | writel(0x0, m->sync_aperture + HOST1X_SYNC_CFPEEK_CTRL); | ||
| 357 | writel(HOST1X_CREATE(SYNC_CFPEEK_CTRL, ENA, 1) | ||
| 358 | | HOST1X_CREATE(SYNC_CFPEEK_CTRL, CHANNR, chid) | ||
| 359 | | HOST1X_CREATE(SYNC_CFPEEK_CTRL, ADDR, rd_ptr), | ||
| 360 | m->sync_aperture + HOST1X_SYNC_CFPEEK_CTRL); | ||
| 361 | val = readl(m->sync_aperture + HOST1X_SYNC_CFPEEK_READ); | ||
| 362 | |||
| 363 | show_channel_word(o, &state, &count, 0, val, NULL); | ||
| 364 | |||
| 365 | if (rd_ptr == end) | ||
| 366 | rd_ptr = start; | ||
| 367 | else | ||
| 368 | rd_ptr++; | ||
| 369 | } while (rd_ptr != wr_ptr); | ||
| 370 | |||
| 371 | if (state == NVHOST_DBG_STATE_DATA) | ||
| 372 | nvhost_debug_output(o, ", ...])\n"); | ||
| 373 | nvhost_debug_output(o, "\n"); | ||
| 374 | |||
| 375 | writel(0x0, m->sync_aperture + HOST1X_SYNC_CFPEEK_CTRL); | ||
| 376 | } | ||
| 377 | |||
| 378 | static void t20_debug_show_mlocks(struct nvhost_master *m, struct output *o) | ||
| 379 | { | ||
| 380 | u32 __iomem *mlo_regs = m->sync_aperture + HOST1X_SYNC_MLOCK_OWNER_0; | ||
| 381 | int i; | ||
| 382 | |||
| 383 | nvhost_debug_output(o, "---- mlocks ----\n"); | ||
| 384 | for (i = 0; i < NV_HOST1X_NB_MLOCKS; i++) { | ||
| 385 | u32 owner = readl(mlo_regs + i); | ||
| 386 | if (HOST1X_VAL(SYNC_MLOCK_OWNER_0, CH_OWNS, owner)) | ||
| 387 | nvhost_debug_output(o, "%d: locked by channel %d\n", | ||
| 388 | i, HOST1X_VAL(SYNC_MLOCK_OWNER_0, CHID, owner)); | ||
| 389 | else if (HOST1X_VAL(SYNC_MLOCK_OWNER_0, CPU_OWNS, owner)) | ||
| 390 | nvhost_debug_output(o, "%d: locked by cpu\n", i); | ||
| 391 | else | ||
| 392 | nvhost_debug_output(o, "%d: unlocked\n", i); | ||
| 393 | } | ||
| 394 | nvhost_debug_output(o, "\n"); | ||
| 395 | } | ||
| 396 | |||
| 397 | int nvhost_init_t20_debug_support(struct nvhost_master *host) | ||
| 398 | { | ||
| 399 | host->op.debug.show_channel_cdma = t20_debug_show_channel_cdma; | ||
| 400 | host->op.debug.show_channel_fifo = t20_debug_show_channel_fifo; | ||
| 401 | host->op.debug.show_mlocks = t20_debug_show_mlocks; | ||
| 402 | |||
| 403 | return 0; | ||
| 404 | } | ||
diff --git a/drivers/video/tegra/host/host1x/host1x_hardware.h b/drivers/video/tegra/host/host1x/host1x_hardware.h new file mode 100644 index 00000000000..d13d5752364 --- /dev/null +++ b/drivers/video/tegra/host/host1x/host1x_hardware.h | |||
| @@ -0,0 +1,274 @@ | |||
| 1 | /* | ||
| 2 | * drivers/video/tegra/host/host1x/host1x_hardware.h | ||
| 3 | * | ||
| 4 | * Tegra Graphics Host Register Offsets | ||
| 5 | * | ||
| 6 | * Copyright (c) 2010-2012 NVIDIA Corporation. | ||
| 7 | * | ||
| 8 | * This program is free software; you can redistribute it and/or modify it | ||
| 9 | * under the terms and conditions of the GNU General Public License, | ||
| 10 | * version 2, as published by the Free Software Foundation. | ||
| 11 | * | ||
| 12 | * This program is distributed in the hope it will be useful, but WITHOUT | ||
| 13 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
| 14 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
| 15 | * more details. | ||
| 16 | * | ||
| 17 | * You should have received a copy of the GNU General Public License | ||
| 18 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | ||
| 19 | */ | ||
| 20 | |||
| 21 | #ifndef __NVHOST_HOST1X_HOST1X_HARDWARE_H | ||
| 22 | #define __NVHOST_HOST1X_HOST1X_HARDWARE_H | ||
| 23 | |||
| 24 | #include <linux/types.h> | ||
| 25 | #include <linux/bitops.h> | ||
| 26 | |||
| 27 | /* class ids */ | ||
| 28 | enum { | ||
| 29 | NV_HOST1X_CLASS_ID = 0x1, | ||
| 30 | NV_VIDEO_ENCODE_MPEG_CLASS_ID = 0x20, | ||
| 31 | NV_GRAPHICS_3D_CLASS_ID = 0x60 | ||
| 32 | }; | ||
| 33 | |||
| 34 | |||
| 35 | /* channel registers */ | ||
| 36 | #define NV_HOST1X_CHANNELS 8 | ||
| 37 | #define NV_HOST1X_CHANNEL0_BASE 0 | ||
| 38 | #define NV_HOST1X_CHANNEL_MAP_SIZE_BYTES 16384 | ||
| 39 | #define NV_HOST1X_SYNC_MLOCK_NUM 16 | ||
| 40 | |||
| 41 | #define HOST1X_VAL(reg, field, regdata) \ | ||
| 42 | ((regdata >> HOST1X_##reg##_##field##_SHIFT) \ | ||
| 43 | & HOST1X_##reg##_##field##_MASK) | ||
| 44 | #define HOST1X_CREATE(reg, field, data) \ | ||
| 45 | ((data & HOST1X_##reg##_##field##_MASK) \ | ||
| 46 | << HOST1X_##reg##_##field##_SHIFT) \ | ||
| 47 | |||
| 48 | #define HOST1X_CHANNEL_FIFOSTAT 0x00 | ||
| 49 | #define HOST1X_CHANNEL_FIFOSTAT_CFEMPTY_SHIFT 10 | ||
| 50 | #define HOST1X_CHANNEL_FIFOSTAT_CFEMPTY_MASK 0x1 | ||
| 51 | #define HOST1X_CHANNEL_FIFOSTAT_OUTFENTRIES_SHIFT 24 | ||
| 52 | #define HOST1X_CHANNEL_FIFOSTAT_OUTFENTRIES_MASK 0x1f | ||
| 53 | #define HOST1X_CHANNEL_INDDATA 0x0c | ||
| 54 | #define HOST1X_CHANNEL_DMASTART 0x14 | ||
| 55 | #define HOST1X_CHANNEL_DMAPUT 0x18 | ||
| 56 | #define HOST1X_CHANNEL_DMAGET 0x1c | ||
| 57 | #define HOST1X_CHANNEL_DMAEND 0x20 | ||
| 58 | #define HOST1X_CHANNEL_DMACTRL 0x24 | ||
| 59 | #define HOST1X_CHANNEL_DMACTRL_DMASTOP_SHIFT 0 | ||
| 60 | #define HOST1X_CHANNEL_DMACTRL_DMASTOP_MASK 0x1 | ||
| 61 | #define HOST1X_CHANNEL_DMACTRL_DMAGETRST_SHIFT 1 | ||
| 62 | #define HOST1X_CHANNEL_DMACTRL_DMAGETRST_MASK 0x1 | ||
| 63 | #define HOST1X_CHANNEL_DMACTRL_DMAINITGET_SHIFT 2 | ||
| 64 | #define HOST1X_CHANNEL_DMACTRL_DMAINITGET_MASK 0x1 | ||
| 65 | |||
| 66 | #define HOST1X_CHANNEL_SYNC_REG_BASE 0x3000 | ||
| 67 | |||
| 68 | #define HOST1X_SYNC_INTMASK 0x4 | ||
| 69 | #define HOST1X_SYNC_INTC0MASK 0x8 | ||
| 70 | #define HOST1X_SYNC_HINTSTATUS 0x20 | ||
| 71 | #define HOST1X_SYNC_HINTMASK 0x24 | ||
| 72 | #define HOST1X_SYNC_HINTSTATUS_EXT 0x28 | ||
| 73 | #define HOST1X_SYNC_HINTSTATUS_EXT_IP_READ_INT_SHIFT 30 | ||
| 74 | #define HOST1X_SYNC_HINTSTATUS_EXT_IP_READ_INT_MASK 0x1 | ||
| 75 | #define HOST1X_SYNC_HINTSTATUS_EXT_IP_WRITE_INT_SHIFT 31 | ||
| 76 | #define HOST1X_SYNC_HINTSTATUS_EXT_IP_WRITE_INT_MASK 0x1 | ||
| 77 | #define HOST1X_SYNC_HINTMASK_EXT 0x2c | ||
| 78 | #define HOST1X_SYNC_SYNCPT_THRESH_CPU0_INT_STATUS 0x40 | ||
| 79 | #define HOST1X_SYNC_SYNCPT_THRESH_CPU1_INT_STATUS 0x48 | ||
| 80 | #define HOST1X_SYNC_SYNCPT_THRESH_INT_DISABLE 0x60 | ||
| 81 | #define HOST1X_SYNC_SYNCPT_THRESH_INT_ENABLE_CPU0 0x68 | ||
| 82 | #define HOST1X_SYNC_CF0_SETUP 0x80 | ||
| 83 | #define HOST1X_SYNC_CF0_SETUP_BASE_SHIFT 0 | ||
| 84 | #define HOST1X_SYNC_CF0_SETUP_BASE_MASK 0x1ff | ||
| 85 | #define HOST1X_SYNC_CF0_SETUP_LIMIT_SHIFT 16 | ||
| 86 | #define HOST1X_SYNC_CF0_SETUP_LIMIT_MASK 0x1ff | ||
| 87 | #define HOST1X_SYNC_CFx_SETUP(x) (HOST1X_SYNC_CF0_SETUP + (4 * (x))) | ||
| 88 | |||
| 89 | #define HOST1X_SYNC_CMDPROC_STOP 0xac | ||
| 90 | #define HOST1X_SYNC_CH_TEARDOWN 0xb0 | ||
| 91 | #define HOST1X_SYNC_USEC_CLK 0x1a4 | ||
| 92 | #define HOST1X_SYNC_CTXSW_TIMEOUT_CFG 0x1a8 | ||
| 93 | #define HOST1X_SYNC_IP_BUSY_TIMEOUT 0x1bc | ||
| 94 | #define HOST1X_SYNC_IP_READ_TIMEOUT_ADDR 0x1c0 | ||
| 95 | #define HOST1X_SYNC_IP_WRITE_TIMEOUT_ADDR 0x1c4 | ||
| 96 | #define HOST1X_SYNC_MLOCK_0 0x2c0 | ||
| 97 | #define HOST1X_SYNC_MLOCK_OWNER_0 0x340 | ||
| 98 | #define HOST1X_SYNC_MLOCK_OWNER_0_CHID_SHIFT 8 | ||
| 99 | #define HOST1X_SYNC_MLOCK_OWNER_0_CHID_MASK 0xf | ||
| 100 | #define HOST1X_SYNC_MLOCK_OWNER_0_CPU_OWNS_SHIFT 1 | ||
| 101 | #define HOST1X_SYNC_MLOCK_OWNER_0_CPU_OWNS_MASK 0x1 | ||
| 102 | #define HOST1X_SYNC_MLOCK_OWNER_0_CH_OWNS_SHIFT 0 | ||
| 103 | #define HOST1X_SYNC_MLOCK_OWNER_0_CH_OWNS_MASK 0x1 | ||
| 104 | #define HOST1X_SYNC_SYNCPT_0 0x400 | ||
| 105 | #define HOST1X_SYNC_SYNCPT_INT_THRESH_0 0x500 | ||
| 106 | |||
| 107 | #define HOST1X_SYNC_SYNCPT_BASE_0 0x600 | ||
| 108 | #define HOST1X_SYNC_SYNCPT_BASE_0_BASE_SHIFT 0 | ||
| 109 | #define HOST1X_SYNC_SYNCPT_BASE_0_BASE_MASK 0xffff | ||
| 110 | #define HOST1X_SYNC_SYNCPT_BASE_x(x) (HOST1X_SYNC_SYNCPT_BASE_0 + (4 * (x))) | ||
| 111 | |||
| 112 | #define HOST1X_SYNC_SYNCPT_CPU_INCR 0x700 | ||
| 113 | |||
| 114 | #define HOST1X_SYNC_CBREAD_0 0x720 | ||
| 115 | #define HOST1X_SYNC_CBREAD_x(x) (HOST1X_SYNC_CBREAD_0 + (4 * (x))) | ||
| 116 | #define HOST1X_SYNC_CFPEEK_CTRL 0x74c | ||
| 117 | #define HOST1X_SYNC_CFPEEK_CTRL_ADDR_SHIFT 0 | ||
| 118 | #define HOST1X_SYNC_CFPEEK_CTRL_ADDR_MASK 0x1ff | ||
| 119 | #define HOST1X_SYNC_CFPEEK_CTRL_CHANNR_SHIFT 16 | ||
| 120 | #define HOST1X_SYNC_CFPEEK_CTRL_CHANNR_MASK 0x7 | ||
| 121 | #define HOST1X_SYNC_CFPEEK_CTRL_ENA_SHIFT 31 | ||
| 122 | #define HOST1X_SYNC_CFPEEK_CTRL_ENA_MASK 0x1 | ||
| 123 | #define HOST1X_SYNC_CFPEEK_READ 0x750 | ||
| 124 | #define HOST1X_SYNC_CFPEEK_PTRS 0x754 | ||
| 125 | #define HOST1X_SYNC_CFPEEK_PTRS_CF_RD_PTR_SHIFT 0 | ||
| 126 | #define HOST1X_SYNC_CFPEEK_PTRS_CF_RD_PTR_MASK 0x1ff | ||
| 127 | #define HOST1X_SYNC_CFPEEK_PTRS_CF_WR_PTR_SHIFT 16 | ||
| 128 | #define HOST1X_SYNC_CFPEEK_PTRS_CF_WR_PTR_MASK 0x1ff | ||
| 129 | #define HOST1X_SYNC_CBSTAT_0 0x758 | ||
| 130 | #define HOST1X_SYNC_CBSTAT_0_CBOFFSET0_SHIFT 0 | ||
| 131 | #define HOST1X_SYNC_CBSTAT_0_CBOFFSET0_MASK 0xffff | ||
| 132 | #define HOST1X_SYNC_CBSTAT_0_CBCLASS0_SHIFT 16 | ||
| 133 | #define HOST1X_SYNC_CBSTAT_0_CBCLASS0_MASK 0xffff | ||
| 134 | #define HOST1X_SYNC_CBSTAT_x(x) (HOST1X_SYNC_CBSTAT_0 + (4 * (x))) | ||
| 135 | |||
| 136 | /* sync registers */ | ||
| 137 | #define NV_HOST1X_SYNCPT_NB_PTS 32 | ||
| 138 | #define NV_HOST1X_SYNCPT_NB_BASES 8 | ||
| 139 | #define NV_HOST1X_NB_MLOCKS 16 | ||
| 140 | |||
| 141 | /* host class methods */ | ||
| 142 | enum { | ||
| 143 | NV_CLASS_HOST_INCR_SYNCPT = 0x0, | ||
| 144 | NV_CLASS_HOST_WAIT_SYNCPT = 0x8, | ||
| 145 | NV_CLASS_HOST_WAIT_SYNCPT_BASE = 0x9, | ||
| 146 | NV_CLASS_HOST_LOAD_SYNCPT_BASE = 0xb, | ||
| 147 | NV_CLASS_HOST_INCR_SYNCPT_BASE = 0xc, | ||
| 148 | NV_CLASS_HOST_INDOFF = 0x2d, | ||
| 149 | NV_CLASS_HOST_INDDATA = 0x2e | ||
| 150 | }; | ||
| 151 | /* sync point conditionals */ | ||
| 152 | enum { | ||
| 153 | NV_SYNCPT_IMMEDIATE = 0x0, | ||
| 154 | NV_SYNCPT_OP_DONE = 0x1, | ||
| 155 | NV_SYNCPT_RD_DONE = 0x2, | ||
| 156 | NV_SYNCPT_REG_WR_SAFE = 0x3, | ||
| 157 | }; | ||
| 158 | |||
| 159 | static inline u32 nvhost_class_host_wait_syncpt( | ||
| 160 | unsigned indx, unsigned threshold) | ||
| 161 | { | ||
| 162 | return (indx << 24) | (threshold & 0xffffff); | ||
| 163 | } | ||
| 164 | |||
| 165 | static inline u32 nvhost_class_host_load_syncpt_base( | ||
| 166 | unsigned indx, unsigned threshold) | ||
| 167 | { | ||
| 168 | return (indx << 24) | (threshold & 0xffffff); | ||
| 169 | } | ||
| 170 | |||
| 171 | static inline u32 nvhost_class_host_wait_syncpt_base( | ||
| 172 | unsigned indx, unsigned base_indx, unsigned offset) | ||
| 173 | { | ||
| 174 | return (indx << 24) | (base_indx << 16) | offset; | ||
| 175 | } | ||
| 176 | |||
| 177 | static inline u32 nvhost_class_host_incr_syncpt_base( | ||
| 178 | unsigned base_indx, unsigned offset) | ||
| 179 | { | ||
| 180 | return (base_indx << 24) | offset; | ||
| 181 | } | ||
| 182 | |||
| 183 | static inline u32 nvhost_class_host_incr_syncpt( | ||
| 184 | unsigned cond, unsigned indx) | ||
| 185 | { | ||
| 186 | return (cond << 8) | indx; | ||
| 187 | } | ||
| 188 | |||
| 189 | enum { | ||
| 190 | NV_HOST_MODULE_HOST1X = 0, | ||
| 191 | NV_HOST_MODULE_MPE = 1, | ||
| 192 | NV_HOST_MODULE_GR3D = 6 | ||
| 193 | }; | ||
| 194 | |||
| 195 | static inline u32 nvhost_class_host_indoff_reg_write( | ||
| 196 | unsigned mod_id, unsigned offset, bool auto_inc) | ||
| 197 | { | ||
| 198 | u32 v = (0xf << 28) | (mod_id << 18) | (offset << 2); | ||
| 199 | if (auto_inc) | ||
| 200 | v |= BIT(27); | ||
| 201 | return v; | ||
| 202 | } | ||
| 203 | |||
| 204 | static inline u32 nvhost_class_host_indoff_reg_read( | ||
| 205 | unsigned mod_id, unsigned offset, bool auto_inc) | ||
| 206 | { | ||
| 207 | u32 v = (mod_id << 18) | (offset << 2) | 1; | ||
| 208 | if (auto_inc) | ||
| 209 | v |= BIT(27); | ||
| 210 | return v; | ||
| 211 | } | ||
| 212 | |||
| 213 | |||
| 214 | /* cdma opcodes */ | ||
| 215 | static inline u32 nvhost_opcode_setclass( | ||
| 216 | unsigned class_id, unsigned offset, unsigned mask) | ||
| 217 | { | ||
| 218 | return (0 << 28) | (offset << 16) | (class_id << 6) | mask; | ||
| 219 | } | ||
| 220 | |||
| 221 | static inline u32 nvhost_opcode_incr(unsigned offset, unsigned count) | ||
| 222 | { | ||
| 223 | return (1 << 28) | (offset << 16) | count; | ||
| 224 | } | ||
| 225 | |||
| 226 | static inline u32 nvhost_opcode_nonincr(unsigned offset, unsigned count) | ||
| 227 | { | ||
| 228 | return (2 << 28) | (offset << 16) | count; | ||
| 229 | } | ||
| 230 | |||
| 231 | static inline u32 nvhost_opcode_mask(unsigned offset, unsigned mask) | ||
| 232 | { | ||
| 233 | return (3 << 28) | (offset << 16) | mask; | ||
| 234 | } | ||
| 235 | |||
| 236 | static inline u32 nvhost_opcode_imm(unsigned offset, unsigned value) | ||
| 237 | { | ||
| 238 | return (4 << 28) | (offset << 16) | value; | ||
| 239 | } | ||
| 240 | |||
| 241 | static inline u32 nvhost_opcode_imm_incr_syncpt(unsigned cond, unsigned indx) | ||
| 242 | { | ||
| 243 | return nvhost_opcode_imm(NV_CLASS_HOST_INCR_SYNCPT, | ||
| 244 | nvhost_class_host_incr_syncpt(cond, indx)); | ||
| 245 | } | ||
| 246 | |||
| 247 | static inline u32 nvhost_opcode_restart(unsigned address) | ||
| 248 | { | ||
| 249 | return (5 << 28) | (address >> 4); | ||
| 250 | } | ||
| 251 | |||
| 252 | static inline u32 nvhost_opcode_gather(unsigned count) | ||
| 253 | { | ||
| 254 | return (6 << 28) | count; | ||
| 255 | } | ||
| 256 | |||
| 257 | static inline u32 nvhost_opcode_gather_nonincr(unsigned offset, unsigned count) | ||
| 258 | { | ||
| 259 | return (6 << 28) | (offset << 16) | BIT(15) | count; | ||
| 260 | } | ||
| 261 | |||
| 262 | static inline u32 nvhost_opcode_gather_incr(unsigned offset, unsigned count) | ||
| 263 | { | ||
| 264 | return (6 << 28) | (offset << 16) | BIT(15) | BIT(14) | count; | ||
| 265 | } | ||
| 266 | |||
| 267 | #define NVHOST_OPCODE_NOOP nvhost_opcode_nonincr(0, 0) | ||
| 268 | |||
| 269 | static inline u32 nvhost_mask2(unsigned x, unsigned y) | ||
| 270 | { | ||
| 271 | return 1 | (1 << (y - x)); | ||
| 272 | } | ||
| 273 | |||
| 274 | #endif | ||
diff --git a/drivers/video/tegra/host/host1x/host1x_hwctx.h b/drivers/video/tegra/host/host1x/host1x_hwctx.h new file mode 100644 index 00000000000..7587642d0e1 --- /dev/null +++ b/drivers/video/tegra/host/host1x/host1x_hwctx.h | |||
| @@ -0,0 +1,65 @@ | |||
| 1 | /* | ||
| 2 | * drivers/video/tegra/host/host1x/host1x_hwctx.h | ||
| 3 | * | ||
| 4 | * Tegra Graphics Host HOST1X Hardware Context Interface | ||
| 5 | * | ||
| 6 | * Copyright (c) 2012, NVIDIA Corporation. | ||
| 7 | * | ||
| 8 | * This program is free software; you can redistribute it and/or modify | ||
| 9 | * it under the terms of the GNU General Public License as published by | ||
| 10 | * the Free Software Foundation; either version 2 of the License, or | ||
| 11 | * (at your option) any later version. | ||
| 12 | * | ||
| 13 | * This program is distributed in the hope that it will be useful, but WITHOUT | ||
| 14 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
| 15 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
| 16 | * more details. | ||
| 17 | * | ||
| 18 | * You should have received a copy of the GNU General Public License along | ||
| 19 | * with this program; if not, write to the Free Software Foundation, Inc., | ||
| 20 | * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. | ||
| 21 | */ | ||
| 22 | |||
| 23 | #ifndef __NVHOST_HOST1X_HWCTX_H | ||
| 24 | #define __NVHOST_HOST1X_HWCTX_H | ||
| 25 | |||
| 26 | #include <linux/kref.h> | ||
| 27 | |||
| 28 | struct nvhost_hwctx_handler; | ||
| 29 | struct nvhost_channel; | ||
| 30 | |||
| 31 | #define to_host1x_hwctx_handler(handler) \ | ||
| 32 | container_of((handler), struct host1x_hwctx_handler, h) | ||
| 33 | #define to_host1x_hwctx(h) container_of((h), struct host1x_hwctx, hwctx) | ||
| 34 | #define host1x_hwctx_handler(_hwctx) to_host1x_hwctx_handler((_hwctx)->hwctx.h) | ||
| 35 | |||
| 36 | struct host1x_hwctx { | ||
| 37 | struct nvhost_hwctx hwctx; | ||
| 38 | |||
| 39 | u32 save_incrs; | ||
| 40 | u32 save_thresh; | ||
| 41 | u32 save_slots; | ||
| 42 | |||
| 43 | struct nvmap_handle_ref *restore; | ||
| 44 | u32 *restore_virt; | ||
| 45 | phys_addr_t restore_phys; | ||
| 46 | u32 restore_size; | ||
| 47 | u32 restore_incrs; | ||
| 48 | }; | ||
| 49 | |||
| 50 | struct host1x_hwctx_handler { | ||
| 51 | struct nvhost_hwctx_handler h; | ||
| 52 | |||
| 53 | u32 syncpt; | ||
| 54 | u32 waitbase; | ||
| 55 | u32 restore_size; | ||
| 56 | u32 restore_incrs; | ||
| 57 | struct nvmap_handle_ref *save_buf; | ||
| 58 | u32 save_incrs; | ||
| 59 | u32 save_thresh; | ||
| 60 | u32 save_slots; | ||
| 61 | phys_addr_t save_phys; | ||
| 62 | u32 save_size; | ||
| 63 | }; | ||
| 64 | |||
| 65 | #endif | ||
diff --git a/drivers/video/tegra/host/host1x/host1x_intr.c b/drivers/video/tegra/host/host1x/host1x_intr.c new file mode 100644 index 00000000000..47e984e2943 --- /dev/null +++ b/drivers/video/tegra/host/host1x/host1x_intr.c | |||
| @@ -0,0 +1,218 @@ | |||
| 1 | /* | ||
| 2 | * drivers/video/tegra/host/host1x/host1x_intr.c | ||
| 3 | * | ||
| 4 | * Tegra Graphics Host Interrupt Management | ||
| 5 | * | ||
| 6 | * Copyright (c) 2010-2012, NVIDIA Corporation. | ||
| 7 | * | ||
| 8 | * This program is free software; you can redistribute it and/or modify it | ||
| 9 | * under the terms and conditions of the GNU General Public License, | ||
| 10 | * version 2, as published by the Free Software Foundation. | ||
| 11 | * | ||
| 12 | * This program is distributed in the hope it will be useful, but WITHOUT | ||
| 13 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
| 14 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
| 15 | * more details. | ||
| 16 | * | ||
| 17 | * You should have received a copy of the GNU General Public License | ||
| 18 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | ||
| 19 | */ | ||
| 20 | |||
| 21 | #include <linux/interrupt.h> | ||
| 22 | #include <linux/irq.h> | ||
| 23 | |||
| 24 | #include "nvhost_intr.h" | ||
| 25 | #include "dev.h" | ||
| 26 | #include "host1x_hardware.h" | ||
| 27 | |||
| 28 | |||
| 29 | /*** HW host sync management ***/ | ||
| 30 | |||
| 31 | static void t20_intr_init_host_sync(struct nvhost_intr *intr) | ||
| 32 | { | ||
| 33 | struct nvhost_master *dev = intr_to_dev(intr); | ||
| 34 | void __iomem *sync_regs = dev->sync_aperture; | ||
| 35 | /* disable the ip_busy_timeout. this prevents write drops, etc. | ||
| 36 | * there's no real way to recover from a hung client anyway. | ||
| 37 | */ | ||
| 38 | writel(0, sync_regs + HOST1X_SYNC_IP_BUSY_TIMEOUT); | ||
| 39 | |||
| 40 | /* increase the auto-ack timout to the maximum value. 2d will hang | ||
| 41 | * otherwise on ap20. | ||
| 42 | */ | ||
| 43 | writel(0xff, sync_regs + HOST1X_SYNC_CTXSW_TIMEOUT_CFG); | ||
| 44 | } | ||
| 45 | |||
| 46 | static void t20_intr_set_host_clocks_per_usec(struct nvhost_intr *intr, u32 cpm) | ||
| 47 | { | ||
| 48 | struct nvhost_master *dev = intr_to_dev(intr); | ||
| 49 | void __iomem *sync_regs = dev->sync_aperture; | ||
| 50 | /* write microsecond clock register */ | ||
| 51 | writel(cpm, sync_regs + HOST1X_SYNC_USEC_CLK); | ||
| 52 | } | ||
| 53 | |||
| 54 | static void t20_intr_set_syncpt_threshold(struct nvhost_intr *intr, | ||
| 55 | u32 id, u32 thresh) | ||
| 56 | { | ||
| 57 | struct nvhost_master *dev = intr_to_dev(intr); | ||
| 58 | void __iomem *sync_regs = dev->sync_aperture; | ||
| 59 | thresh &= 0xffff; | ||
| 60 | writel(thresh, sync_regs + (HOST1X_SYNC_SYNCPT_INT_THRESH_0 + id * 4)); | ||
| 61 | } | ||
| 62 | |||
| 63 | static void t20_intr_enable_syncpt_intr(struct nvhost_intr *intr, u32 id) | ||
| 64 | { | ||
| 65 | struct nvhost_master *dev = intr_to_dev(intr); | ||
| 66 | void __iomem *sync_regs = dev->sync_aperture; | ||
| 67 | writel(BIT(id), sync_regs + HOST1X_SYNC_SYNCPT_THRESH_INT_ENABLE_CPU0); | ||
| 68 | } | ||
| 69 | |||
| 70 | static void t20_intr_disable_all_syncpt_intrs(struct nvhost_intr *intr) | ||
| 71 | { | ||
| 72 | struct nvhost_master *dev = intr_to_dev(intr); | ||
| 73 | void __iomem *sync_regs = dev->sync_aperture; | ||
| 74 | /* disable interrupts for both cpu's */ | ||
| 75 | writel(0, sync_regs + HOST1X_SYNC_SYNCPT_THRESH_INT_DISABLE); | ||
| 76 | |||
| 77 | /* clear status for both cpu's */ | ||
| 78 | writel(0xffffffffu, sync_regs + | ||
| 79 | HOST1X_SYNC_SYNCPT_THRESH_CPU0_INT_STATUS); | ||
| 80 | writel(0xffffffffu, sync_regs + | ||
| 81 | HOST1X_SYNC_SYNCPT_THRESH_CPU1_INT_STATUS); | ||
| 82 | } | ||
| 83 | |||
| 84 | /** | ||
| 85 | * Sync point threshold interrupt service function | ||
| 86 | * Handles sync point threshold triggers, in interrupt context | ||
| 87 | */ | ||
| 88 | irqreturn_t t20_intr_syncpt_thresh_isr(int irq, void *dev_id) | ||
| 89 | { | ||
| 90 | struct nvhost_intr_syncpt *syncpt = dev_id; | ||
| 91 | unsigned int id = syncpt->id; | ||
| 92 | struct nvhost_intr *intr = intr_syncpt_to_intr(syncpt); | ||
| 93 | |||
| 94 | void __iomem *sync_regs = intr_to_dev(intr)->sync_aperture; | ||
| 95 | |||
| 96 | writel(BIT(id), | ||
| 97 | sync_regs + HOST1X_SYNC_SYNCPT_THRESH_INT_DISABLE); | ||
| 98 | writel(BIT(id), | ||
| 99 | sync_regs + HOST1X_SYNC_SYNCPT_THRESH_CPU0_INT_STATUS); | ||
| 100 | |||
| 101 | return IRQ_WAKE_THREAD; | ||
| 102 | } | ||
| 103 | |||
| 104 | /** | ||
| 105 | * Host general interrupt service function | ||
| 106 | * Handles read / write failures | ||
| 107 | */ | ||
| 108 | static irqreturn_t t20_intr_host1x_isr(int irq, void *dev_id) | ||
| 109 | { | ||
| 110 | struct nvhost_intr *intr = dev_id; | ||
| 111 | void __iomem *sync_regs = intr_to_dev(intr)->sync_aperture; | ||
| 112 | u32 stat; | ||
| 113 | u32 ext_stat; | ||
| 114 | u32 addr; | ||
| 115 | |||
| 116 | stat = readl(sync_regs + HOST1X_SYNC_HINTSTATUS); | ||
| 117 | ext_stat = readl(sync_regs + HOST1X_SYNC_HINTSTATUS_EXT); | ||
| 118 | |||
| 119 | if (HOST1X_VAL(SYNC_HINTSTATUS_EXT, IP_READ_INT, ext_stat)) { | ||
| 120 | addr = readl(sync_regs + HOST1X_SYNC_IP_READ_TIMEOUT_ADDR); | ||
| 121 | pr_err("Host read timeout at address %x\n", addr); | ||
| 122 | } | ||
| 123 | |||
| 124 | if (HOST1X_VAL(SYNC_HINTSTATUS_EXT, IP_WRITE_INT, ext_stat)) { | ||
| 125 | addr = readl(sync_regs + HOST1X_SYNC_IP_WRITE_TIMEOUT_ADDR); | ||
| 126 | pr_err("Host write timeout at address %x\n", addr); | ||
| 127 | } | ||
| 128 | |||
| 129 | writel(ext_stat, sync_regs + HOST1X_SYNC_HINTSTATUS_EXT); | ||
| 130 | writel(stat, sync_regs + HOST1X_SYNC_HINTSTATUS); | ||
| 131 | |||
| 132 | return IRQ_HANDLED; | ||
| 133 | } | ||
| 134 | static int t20_intr_request_host_general_irq(struct nvhost_intr *intr) | ||
| 135 | { | ||
| 136 | void __iomem *sync_regs = intr_to_dev(intr)->sync_aperture; | ||
| 137 | int err; | ||
| 138 | |||
| 139 | if (intr->host_general_irq_requested) | ||
| 140 | return 0; | ||
| 141 | |||
| 142 | /* master disable for general (not syncpt) host interrupts */ | ||
| 143 | writel(0, sync_regs + HOST1X_SYNC_INTMASK); | ||
| 144 | |||
| 145 | /* clear status & extstatus */ | ||
| 146 | writel(0xfffffffful, sync_regs + HOST1X_SYNC_HINTSTATUS_EXT); | ||
| 147 | writel(0xfffffffful, sync_regs + HOST1X_SYNC_HINTSTATUS); | ||
| 148 | |||
| 149 | err = request_irq(intr->host_general_irq, t20_intr_host1x_isr, 0, | ||
| 150 | "host_status", intr); | ||
| 151 | if (err) | ||
| 152 | return err; | ||
| 153 | |||
| 154 | /* enable extra interrupt sources IP_READ_INT and IP_WRITE_INT */ | ||
| 155 | writel(BIT(30) | BIT(31), sync_regs + HOST1X_SYNC_HINTMASK_EXT); | ||
| 156 | |||
| 157 | /* enable extra interrupt sources */ | ||
| 158 | writel(BIT(31), sync_regs + HOST1X_SYNC_HINTMASK); | ||
| 159 | |||
| 160 | /* enable host module interrupt to CPU0 */ | ||
| 161 | writel(BIT(0), sync_regs + HOST1X_SYNC_INTC0MASK); | ||
| 162 | |||
| 163 | /* master enable for general (not syncpt) host interrupts */ | ||
| 164 | writel(BIT(0), sync_regs + HOST1X_SYNC_INTMASK); | ||
| 165 | |||
| 166 | intr->host_general_irq_requested = true; | ||
| 167 | |||
| 168 | return err; | ||
| 169 | } | ||
| 170 | |||
| 171 | static void t20_intr_free_host_general_irq(struct nvhost_intr *intr) | ||
| 172 | { | ||
| 173 | if (intr->host_general_irq_requested) { | ||
| 174 | void __iomem *sync_regs = intr_to_dev(intr)->sync_aperture; | ||
| 175 | |||
| 176 | /* master disable for general (not syncpt) host interrupts */ | ||
| 177 | writel(0, sync_regs + HOST1X_SYNC_INTMASK); | ||
| 178 | |||
| 179 | free_irq(intr->host_general_irq, intr); | ||
| 180 | intr->host_general_irq_requested = false; | ||
| 181 | } | ||
| 182 | } | ||
| 183 | |||
| 184 | static int t20_request_syncpt_irq(struct nvhost_intr_syncpt *syncpt) | ||
| 185 | { | ||
| 186 | int err; | ||
| 187 | if (syncpt->irq_requested) | ||
| 188 | return 0; | ||
| 189 | |||
| 190 | err = request_threaded_irq(syncpt->irq, | ||
| 191 | t20_intr_syncpt_thresh_isr, | ||
| 192 | nvhost_syncpt_thresh_fn, | ||
| 193 | 0, syncpt->thresh_irq_name, syncpt); | ||
| 194 | if (err) | ||
| 195 | return err; | ||
| 196 | |||
| 197 | syncpt->irq_requested = 1; | ||
| 198 | return 0; | ||
| 199 | } | ||
| 200 | |||
| 201 | int nvhost_init_t20_intr_support(struct nvhost_master *host) | ||
| 202 | { | ||
| 203 | host->op.intr.init_host_sync = t20_intr_init_host_sync; | ||
| 204 | host->op.intr.set_host_clocks_per_usec = | ||
| 205 | t20_intr_set_host_clocks_per_usec; | ||
| 206 | host->op.intr.set_syncpt_threshold = t20_intr_set_syncpt_threshold; | ||
| 207 | host->op.intr.enable_syncpt_intr = t20_intr_enable_syncpt_intr; | ||
| 208 | host->op.intr.disable_all_syncpt_intrs = | ||
| 209 | t20_intr_disable_all_syncpt_intrs; | ||
| 210 | host->op.intr.request_host_general_irq = | ||
| 211 | t20_intr_request_host_general_irq; | ||
| 212 | host->op.intr.free_host_general_irq = | ||
| 213 | t20_intr_free_host_general_irq; | ||
| 214 | host->op.intr.request_syncpt_irq = | ||
| 215 | t20_request_syncpt_irq; | ||
| 216 | |||
| 217 | return 0; | ||
| 218 | } | ||
diff --git a/drivers/video/tegra/host/host1x/host1x_syncpt.c b/drivers/video/tegra/host/host1x/host1x_syncpt.c new file mode 100644 index 00000000000..b0fd9970aaa --- /dev/null +++ b/drivers/video/tegra/host/host1x/host1x_syncpt.c | |||
| @@ -0,0 +1,248 @@ | |||
| 1 | /* | ||
| 2 | * drivers/video/tegra/host/host1x/host1x_syncpt.c | ||
| 3 | * | ||
| 4 | * Tegra Graphics Host Syncpoints for HOST1X | ||
| 5 | * | ||
| 6 | * Copyright (c) 2010-2012, NVIDIA Corporation. | ||
| 7 | * | ||
| 8 | * This program is free software; you can redistribute it and/or modify it | ||
| 9 | * under the terms and conditions of the GNU General Public License, | ||
| 10 | * version 2, as published by the Free Software Foundation. | ||
| 11 | * | ||
| 12 | * This program is distributed in the hope it will be useful, but WITHOUT | ||
| 13 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
| 14 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
| 15 | * more details. | ||
| 16 | * | ||
| 17 | * You should have received a copy of the GNU General Public License | ||
| 18 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | ||
| 19 | */ | ||
| 20 | |||
| 21 | #include <linux/nvhost_ioctl.h> | ||
| 22 | #include "nvhost_syncpt.h" | ||
| 23 | #include "dev.h" | ||
| 24 | #include "host1x_syncpt.h" | ||
| 25 | #include "host1x_hardware.h" | ||
| 26 | |||
| 27 | /** | ||
| 28 | * Write the current syncpoint value back to hw. | ||
| 29 | */ | ||
| 30 | static void t20_syncpt_reset(struct nvhost_syncpt *sp, u32 id) | ||
| 31 | { | ||
| 32 | struct nvhost_master *dev = syncpt_to_dev(sp); | ||
| 33 | int min = nvhost_syncpt_read_min(sp, id); | ||
| 34 | writel(min, dev->sync_aperture + (HOST1X_SYNC_SYNCPT_0 + id * 4)); | ||
| 35 | } | ||
| 36 | |||
| 37 | /** | ||
| 38 | * Write the current waitbase value back to hw. | ||
| 39 | */ | ||
| 40 | static void t20_syncpt_reset_wait_base(struct nvhost_syncpt *sp, u32 id) | ||
| 41 | { | ||
| 42 | struct nvhost_master *dev = syncpt_to_dev(sp); | ||
| 43 | writel(sp->base_val[id], | ||
| 44 | dev->sync_aperture + (HOST1X_SYNC_SYNCPT_BASE_0 + id * 4)); | ||
| 45 | } | ||
| 46 | |||
| 47 | /** | ||
| 48 | * Read waitbase value from hw. | ||
| 49 | */ | ||
| 50 | static void t20_syncpt_read_wait_base(struct nvhost_syncpt *sp, u32 id) | ||
| 51 | { | ||
| 52 | struct nvhost_master *dev = syncpt_to_dev(sp); | ||
| 53 | sp->base_val[id] = readl(dev->sync_aperture + | ||
| 54 | (HOST1X_SYNC_SYNCPT_BASE_0 + id * 4)); | ||
| 55 | } | ||
| 56 | |||
| 57 | /** | ||
| 58 | * Updates the last value read from hardware. | ||
| 59 | * (was nvhost_syncpt_update_min) | ||
| 60 | */ | ||
| 61 | static u32 t20_syncpt_update_min(struct nvhost_syncpt *sp, u32 id) | ||
| 62 | { | ||
| 63 | struct nvhost_master *dev = syncpt_to_dev(sp); | ||
| 64 | void __iomem *sync_regs = dev->sync_aperture; | ||
| 65 | u32 old, live; | ||
| 66 | |||
| 67 | do { | ||
| 68 | old = nvhost_syncpt_read_min(sp, id); | ||
| 69 | live = readl(sync_regs + (HOST1X_SYNC_SYNCPT_0 + id * 4)); | ||
| 70 | } while ((u32)atomic_cmpxchg(&sp->min_val[id], old, live) != old); | ||
| 71 | |||
| 72 | if (!nvhost_syncpt_check_max(sp, id, live)) | ||
| 73 | dev_err(&syncpt_to_dev(sp)->dev->dev, | ||
| 74 | "%s failed: id=%u\n", | ||
| 75 | __func__, | ||
| 76 | id); | ||
| 77 | |||
| 78 | return live; | ||
| 79 | } | ||
| 80 | |||
| 81 | /** | ||
| 82 | * Write a cpu syncpoint increment to the hardware, without touching | ||
| 83 | * the cache. Caller is responsible for host being powered. | ||
| 84 | */ | ||
| 85 | static void t20_syncpt_cpu_incr(struct nvhost_syncpt *sp, u32 id) | ||
| 86 | { | ||
| 87 | struct nvhost_master *dev = syncpt_to_dev(sp); | ||
| 88 | BUG_ON(!nvhost_module_powered(dev->dev)); | ||
| 89 | if (!client_managed(id) && nvhost_syncpt_min_eq_max(sp, id)) { | ||
| 90 | dev_err(&syncpt_to_dev(sp)->dev->dev, | ||
| 91 | "Trying to increment syncpoint id %d beyond max\n", | ||
| 92 | id); | ||
| 93 | nvhost_debug_dump(syncpt_to_dev(sp)); | ||
| 94 | return; | ||
| 95 | } | ||
| 96 | writel(BIT(id), dev->sync_aperture + HOST1X_SYNC_SYNCPT_CPU_INCR); | ||
| 97 | wmb(); | ||
| 98 | } | ||
| 99 | |||
| 100 | /* check for old WAITs to be removed (avoiding a wrap) */ | ||
| 101 | static int t20_syncpt_wait_check(struct nvhost_syncpt *sp, | ||
| 102 | struct nvmap_client *nvmap, | ||
| 103 | u32 waitchk_mask, | ||
| 104 | struct nvhost_waitchk *wait, | ||
| 105 | int num_waitchk) | ||
| 106 | { | ||
| 107 | u32 idx; | ||
| 108 | int err = 0; | ||
| 109 | |||
| 110 | /* get current syncpt values */ | ||
| 111 | for (idx = 0; idx < NV_HOST1X_SYNCPT_NB_PTS; idx++) { | ||
| 112 | if (BIT(idx) & waitchk_mask) | ||
| 113 | nvhost_syncpt_update_min(sp, idx); | ||
| 114 | } | ||
| 115 | |||
| 116 | BUG_ON(!wait && !num_waitchk); | ||
| 117 | |||
| 118 | /* compare syncpt vs wait threshold */ | ||
| 119 | while (num_waitchk) { | ||
| 120 | u32 override; | ||
| 121 | |||
| 122 | BUG_ON(wait->syncpt_id >= NV_HOST1X_SYNCPT_NB_PTS); | ||
| 123 | if (nvhost_syncpt_is_expired(sp, | ||
| 124 | wait->syncpt_id, wait->thresh)) { | ||
| 125 | /* | ||
| 126 | * NULL an already satisfied WAIT_SYNCPT host method, | ||
| 127 | * by patching its args in the command stream. The | ||
| 128 | * method data is changed to reference a reserved | ||
| 129 | * (never given out or incr) NVSYNCPT_GRAPHICS_HOST | ||
| 130 | * syncpt with a matching threshold value of 0, so | ||
| 131 | * is guaranteed to be popped by the host HW. | ||
| 132 | */ | ||
| 133 | dev_dbg(&syncpt_to_dev(sp)->dev->dev, | ||
| 134 | "drop WAIT id %d (%s) thresh 0x%x, min 0x%x\n", | ||
| 135 | wait->syncpt_id, | ||
| 136 | syncpt_op(sp).name(sp, wait->syncpt_id), | ||
| 137 | wait->thresh, | ||
| 138 | nvhost_syncpt_read_min(sp, wait->syncpt_id)); | ||
| 139 | |||
| 140 | /* patch the wait */ | ||
| 141 | override = nvhost_class_host_wait_syncpt( | ||
| 142 | NVSYNCPT_GRAPHICS_HOST, 0); | ||
| 143 | err = nvmap_patch_word(nvmap, | ||
| 144 | (struct nvmap_handle *)wait->mem, | ||
| 145 | wait->offset, override); | ||
| 146 | if (err) | ||
| 147 | break; | ||
| 148 | } | ||
| 149 | |||
| 150 | wait++; | ||
| 151 | num_waitchk--; | ||
| 152 | } | ||
| 153 | return err; | ||
| 154 | } | ||
| 155 | |||
| 156 | |||
| 157 | static const char *s_syncpt_names[32] = { | ||
| 158 | "gfx_host", | ||
| 159 | "", "", "", "", "", "", "", | ||
| 160 | "disp0_a", "disp1_a", "avp_0", | ||
| 161 | "csi_vi_0", "csi_vi_1", | ||
| 162 | "vi_isp_0", "vi_isp_1", "vi_isp_2", "vi_isp_3", "vi_isp_4", | ||
| 163 | "2d_0", "2d_1", | ||
| 164 | "disp0_b", "disp1_b", | ||
| 165 | "3d", | ||
| 166 | "mpe", | ||
| 167 | "disp0_c", "disp1_c", | ||
| 168 | "vblank0", "vblank1", | ||
| 169 | "mpe_ebm_eof", "mpe_wr_safe", | ||
| 170 | "2d_tinyblt", | ||
| 171 | "dsi" | ||
| 172 | }; | ||
| 173 | |||
| 174 | static const char *t20_syncpt_name(struct nvhost_syncpt *s, u32 id) | ||
| 175 | { | ||
| 176 | BUG_ON(id >= ARRAY_SIZE(s_syncpt_names)); | ||
| 177 | return s_syncpt_names[id]; | ||
| 178 | } | ||
| 179 | |||
| 180 | static void t20_syncpt_debug(struct nvhost_syncpt *sp) | ||
| 181 | { | ||
| 182 | u32 i; | ||
| 183 | for (i = 0; i < NV_HOST1X_SYNCPT_NB_PTS; i++) { | ||
| 184 | u32 max = nvhost_syncpt_read_max(sp, i); | ||
| 185 | u32 min = nvhost_syncpt_update_min(sp, i); | ||
| 186 | if (!max && !min) | ||
| 187 | continue; | ||
| 188 | dev_info(&syncpt_to_dev(sp)->dev->dev, | ||
| 189 | "id %d (%s) min %d max %d\n", | ||
| 190 | i, syncpt_op(sp).name(sp, i), | ||
| 191 | min, max); | ||
| 192 | |||
| 193 | } | ||
| 194 | |||
| 195 | for (i = 0; i < NV_HOST1X_SYNCPT_NB_BASES; i++) { | ||
| 196 | u32 base_val; | ||
| 197 | t20_syncpt_read_wait_base(sp, i); | ||
| 198 | base_val = sp->base_val[i]; | ||
| 199 | if (base_val) | ||
| 200 | dev_info(&syncpt_to_dev(sp)->dev->dev, | ||
| 201 | "waitbase id %d val %d\n", | ||
| 202 | i, base_val); | ||
| 203 | |||
| 204 | } | ||
| 205 | } | ||
| 206 | |||
| 207 | static int syncpt_mutex_try_lock(struct nvhost_syncpt *sp, | ||
| 208 | unsigned int idx) | ||
| 209 | { | ||
| 210 | void __iomem *sync_regs = syncpt_to_dev(sp)->sync_aperture; | ||
| 211 | /* mlock registers returns 0 when the lock is aquired. | ||
| 212 | * writing 0 clears the lock. */ | ||
| 213 | return !!readl(sync_regs + (HOST1X_SYNC_MLOCK_0 + idx * 4)); | ||
| 214 | } | ||
| 215 | |||
| 216 | static void syncpt_mutex_unlock(struct nvhost_syncpt *sp, | ||
| 217 | unsigned int idx) | ||
| 218 | { | ||
| 219 | void __iomem *sync_regs = syncpt_to_dev(sp)->sync_aperture; | ||
| 220 | |||
| 221 | writel(0, sync_regs + (HOST1X_SYNC_MLOCK_0 + idx * 4)); | ||
| 222 | } | ||
| 223 | |||
| 224 | int host1x_init_syncpt_support(struct nvhost_master *host) | ||
| 225 | { | ||
| 226 | |||
| 227 | host->sync_aperture = host->aperture + | ||
| 228 | (NV_HOST1X_CHANNEL0_BASE + | ||
| 229 | HOST1X_CHANNEL_SYNC_REG_BASE); | ||
| 230 | |||
| 231 | host->op.syncpt.reset = t20_syncpt_reset; | ||
| 232 | host->op.syncpt.reset_wait_base = t20_syncpt_reset_wait_base; | ||
| 233 | host->op.syncpt.read_wait_base = t20_syncpt_read_wait_base; | ||
| 234 | host->op.syncpt.update_min = t20_syncpt_update_min; | ||
| 235 | host->op.syncpt.cpu_incr = t20_syncpt_cpu_incr; | ||
| 236 | host->op.syncpt.wait_check = t20_syncpt_wait_check; | ||
| 237 | host->op.syncpt.debug = t20_syncpt_debug; | ||
| 238 | host->op.syncpt.name = t20_syncpt_name; | ||
| 239 | host->op.syncpt.mutex_try_lock = syncpt_mutex_try_lock; | ||
| 240 | host->op.syncpt.mutex_unlock = syncpt_mutex_unlock; | ||
| 241 | |||
| 242 | host->syncpt.nb_pts = NV_HOST1X_SYNCPT_NB_PTS; | ||
| 243 | host->syncpt.nb_bases = NV_HOST1X_SYNCPT_NB_BASES; | ||
| 244 | host->syncpt.client_managed = NVSYNCPTS_CLIENT_MANAGED; | ||
| 245 | host->syncpt.nb_mlocks = NV_HOST1X_SYNC_MLOCK_NUM; | ||
| 246 | |||
| 247 | return 0; | ||
| 248 | } | ||
diff --git a/drivers/video/tegra/host/host1x/host1x_syncpt.h b/drivers/video/tegra/host/host1x/host1x_syncpt.h new file mode 100644 index 00000000000..0d263dc92ed --- /dev/null +++ b/drivers/video/tegra/host/host1x/host1x_syncpt.h | |||
| @@ -0,0 +1,77 @@ | |||
| 1 | /* | ||
| 2 | * drivers/video/tegra/host/host1x/host1x_syncpt.h | ||
| 3 | * | ||
| 4 | * Tegra Graphics Host Syncpoints for HOST1X | ||
| 5 | * | ||
| 6 | * Copyright (c) 2010-2012, NVIDIA Corporation. | ||
| 7 | * | ||
| 8 | * This program is free software; you can redistribute it and/or modify it | ||
| 9 | * under the terms and conditions of the GNU General Public License, | ||
| 10 | * version 2, as published by the Free Software Foundation. | ||
| 11 | * | ||
| 12 | * This program is distributed in the hope it will be useful, but WITHOUT | ||
| 13 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
| 14 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
| 15 | * more details. | ||
| 16 | * | ||
| 17 | * You should have received a copy of the GNU General Public License | ||
| 18 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | ||
| 19 | */ | ||
| 20 | |||
| 21 | #ifndef __NVHOST_HOST1X_HOST1X_SYNCPT_H | ||
| 22 | #define __NVHOST_HOST1X_HOST1X_SYNCPT_H | ||
| 23 | |||
| 24 | #define NVSYNCPT_DISP0_A (8) | ||
| 25 | #define NVSYNCPT_DISP1_A (9) | ||
| 26 | #define NVSYNCPT_AVP_0 (10) | ||
| 27 | #define NVSYNCPT_CSI_VI_0 (11) | ||
| 28 | #define NVSYNCPT_CSI_VI_1 (12) | ||
| 29 | #define NVSYNCPT_VI_ISP_0 (13) | ||
| 30 | #define NVSYNCPT_VI_ISP_1 (14) | ||
| 31 | #define NVSYNCPT_VI_ISP_2 (15) | ||
| 32 | #define NVSYNCPT_VI_ISP_3 (16) | ||
| 33 | #define NVSYNCPT_VI_ISP_4 (17) | ||
| 34 | #define NVSYNCPT_2D_0 (18) | ||
| 35 | #define NVSYNCPT_2D_1 (19) | ||
| 36 | #define NVSYNCPT_DISP0_B (20) | ||
| 37 | #define NVSYNCPT_DISP1_B (21) | ||
| 38 | #define NVSYNCPT_3D (22) | ||
| 39 | #define NVSYNCPT_MPE (23) | ||
| 40 | #define NVSYNCPT_DISP0_C (24) | ||
| 41 | #define NVSYNCPT_DISP1_C (25) | ||
| 42 | #define NVSYNCPT_VBLANK0 (26) | ||
| 43 | #define NVSYNCPT_VBLANK1 (27) | ||
| 44 | #define NVSYNCPT_MPE_EBM_EOF (28) | ||
| 45 | #define NVSYNCPT_MPE_WR_SAFE (29) | ||
| 46 | #define NVSYNCPT_DSI (31) | ||
| 47 | |||
| 48 | |||
| 49 | /*#define NVSYNCPT_2D_CHANNEL2_0 (20) */ | ||
| 50 | /*#define NVSYNCPT_2D_CHANNEL2_1 (21) */ | ||
| 51 | /*#define NVSYNCPT_2D_TINYBLT_WAR (30)*/ | ||
| 52 | /*#define NVSYNCPT_2D_TINYBLT_RESTORE_CLASS_ID (30)*/ | ||
| 53 | |||
| 54 | /* sync points that are wholly managed by the client */ | ||
| 55 | #define NVSYNCPTS_CLIENT_MANAGED ( \ | ||
| 56 | BIT(NVSYNCPT_DISP0_A) | BIT(NVSYNCPT_DISP1_A) | \ | ||
| 57 | BIT(NVSYNCPT_DISP0_B) | BIT(NVSYNCPT_DISP1_B) | \ | ||
| 58 | BIT(NVSYNCPT_DISP0_C) | BIT(NVSYNCPT_DISP1_C) | \ | ||
| 59 | BIT(NVSYNCPT_DSI) | \ | ||
| 60 | BIT(NVSYNCPT_VBLANK0) | BIT(NVSYNCPT_VBLANK1) | \ | ||
| 61 | BIT(NVSYNCPT_CSI_VI_0) | BIT(NVSYNCPT_CSI_VI_1) | \ | ||
| 62 | BIT(NVSYNCPT_VI_ISP_1) | BIT(NVSYNCPT_VI_ISP_2) | \ | ||
| 63 | BIT(NVSYNCPT_VI_ISP_3) | BIT(NVSYNCPT_VI_ISP_4) | \ | ||
| 64 | BIT(NVSYNCPT_MPE_EBM_EOF) | BIT(NVSYNCPT_MPE_WR_SAFE) | \ | ||
| 65 | BIT(NVSYNCPT_2D_1) | BIT(NVSYNCPT_AVP_0)) | ||
| 66 | |||
| 67 | |||
| 68 | #define NVWAITBASE_2D_0 (1) | ||
| 69 | #define NVWAITBASE_2D_1 (2) | ||
| 70 | #define NVWAITBASE_3D (3) | ||
| 71 | #define NVWAITBASE_MPE (4) | ||
| 72 | |||
| 73 | struct nvhost_master; | ||
| 74 | int host1x_init_syncpt(struct nvhost_master *host); | ||
| 75 | int host1x_init_syncpt_support(struct nvhost_master *host); | ||
| 76 | |||
| 77 | #endif | ||
diff --git a/drivers/video/tegra/host/isp/Makefile b/drivers/video/tegra/host/isp/Makefile new file mode 100644 index 00000000000..7bcdc33c83d --- /dev/null +++ b/drivers/video/tegra/host/isp/Makefile | |||
| @@ -0,0 +1,7 @@ | |||
| 1 | GCOV_PROFILE := y | ||
| 2 | EXTRA_CFLAGS += -Idrivers/video/tegra/host | ||
| 3 | |||
| 4 | nvhost-isp-objs = \ | ||
| 5 | isp.o | ||
| 6 | |||
| 7 | obj-$(CONFIG_TEGRA_GRHOST) += nvhost-isp.o | ||
diff --git a/drivers/video/tegra/host/isp/isp.c b/drivers/video/tegra/host/isp/isp.c new file mode 100644 index 00000000000..f39dc644b27 --- /dev/null +++ b/drivers/video/tegra/host/isp/isp.c | |||
| @@ -0,0 +1,82 @@ | |||
| 1 | /* | ||
| 2 | * drivers/video/tegra/host/isp/isp.c | ||
| 3 | * | ||
| 4 | * Tegra Graphics ISP | ||
| 5 | * | ||
| 6 | * Copyright (c) 2012, NVIDIA Corporation. | ||
| 7 | * | ||
| 8 | * This program is free software; you can redistribute it and/or modify it | ||
| 9 | * under the terms and conditions of the GNU General Public License, | ||
| 10 | * version 2, as published by the Free Software Foundation. | ||
| 11 | * | ||
| 12 | * This program is distributed in the hope it will be useful, but WITHOUT | ||
| 13 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
| 14 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
| 15 | * more details. | ||
| 16 | * | ||
| 17 | * You should have received a copy of the GNU General Public License | ||
| 18 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | ||
| 19 | */ | ||
| 20 | |||
| 21 | #include "dev.h" | ||
| 22 | #include "bus_client.h" | ||
| 23 | |||
| 24 | static int __devinit isp_probe(struct nvhost_device *dev) | ||
| 25 | { | ||
| 26 | return nvhost_client_device_init(dev); | ||
| 27 | } | ||
| 28 | |||
| 29 | static int __exit isp_remove(struct nvhost_device *dev) | ||
| 30 | { | ||
| 31 | /* Add clean-up */ | ||
| 32 | return 0; | ||
| 33 | } | ||
| 34 | |||
| 35 | static int isp_suspend(struct nvhost_device *dev, pm_message_t state) | ||
| 36 | { | ||
| 37 | return nvhost_client_device_suspend(dev); | ||
| 38 | } | ||
| 39 | |||
| 40 | static int isp_resume(struct nvhost_device *dev) | ||
| 41 | { | ||
| 42 | dev_info(&dev->dev, "resuming\n"); | ||
| 43 | return 0; | ||
| 44 | } | ||
| 45 | |||
| 46 | struct nvhost_device *isp_device; | ||
| 47 | |||
| 48 | static struct nvhost_driver isp_driver = { | ||
| 49 | .probe = isp_probe, | ||
| 50 | .remove = __exit_p(isp_remove), | ||
| 51 | #ifdef CONFIG_PM | ||
| 52 | .suspend = isp_suspend, | ||
| 53 | .resume = isp_resume, | ||
| 54 | #endif | ||
| 55 | .driver = { | ||
| 56 | .owner = THIS_MODULE, | ||
| 57 | .name = "isp", | ||
| 58 | } | ||
| 59 | }; | ||
| 60 | |||
| 61 | static int __init isp_init(void) | ||
| 62 | { | ||
| 63 | int err; | ||
| 64 | |||
| 65 | isp_device = nvhost_get_device("isp"); | ||
| 66 | if (!isp_device) | ||
| 67 | return -ENXIO; | ||
| 68 | |||
| 69 | err = nvhost_device_register(isp_device); | ||
| 70 | if (err) | ||
| 71 | return err; | ||
| 72 | |||
| 73 | return nvhost_driver_register(&isp_driver); | ||
| 74 | } | ||
| 75 | |||
| 76 | static void __exit isp_exit(void) | ||
| 77 | { | ||
| 78 | nvhost_driver_unregister(&isp_driver); | ||
| 79 | } | ||
| 80 | |||
| 81 | module_init(isp_init); | ||
| 82 | module_exit(isp_exit); | ||
diff --git a/drivers/video/tegra/host/mpe/Makefile b/drivers/video/tegra/host/mpe/Makefile new file mode 100644 index 00000000000..efd77bb88fe --- /dev/null +++ b/drivers/video/tegra/host/mpe/Makefile | |||
| @@ -0,0 +1,7 @@ | |||
| 1 | GCOV_PROFILE := y | ||
| 2 | EXTRA_CFLAGS += -Idrivers/video/tegra/host | ||
| 3 | |||
| 4 | nvhost-mpe-objs = \ | ||
| 5 | mpe.o | ||
| 6 | |||
| 7 | obj-$(CONFIG_TEGRA_GRHOST) += nvhost-mpe.o | ||
diff --git a/drivers/video/tegra/host/mpe/mpe.c b/drivers/video/tegra/host/mpe/mpe.c new file mode 100644 index 00000000000..28002aa637a --- /dev/null +++ b/drivers/video/tegra/host/mpe/mpe.c | |||
| @@ -0,0 +1,638 @@ | |||
| 1 | /* | ||
| 2 | * drivers/video/tegra/host/mpe/mpe.c | ||
| 3 | * | ||
| 4 | * Tegra Graphics Host MPE | ||
| 5 | * | ||
| 6 | * Copyright (c) 2010-2012, NVIDIA Corporation. | ||
| 7 | * | ||
| 8 | * This program is free software; you can redistribute it and/or modify it | ||
| 9 | * under the terms and conditions of the GNU General Public License, | ||
| 10 | * version 2, as published by the Free Software Foundation. | ||
| 11 | * | ||
| 12 | * This program is distributed in the hope it will be useful, but WITHOUT | ||
| 13 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
| 14 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
| 15 | * more details. | ||
| 16 | * | ||
| 17 | * You should have received a copy of the GNU General Public License | ||
| 18 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | ||
| 19 | */ | ||
| 20 | |||
| 21 | #include "nvhost_hwctx.h" | ||
| 22 | #include "dev.h" | ||
| 23 | #include "host1x/host1x_hardware.h" | ||
| 24 | #include "host1x/host1x_channel.h" | ||
| 25 | #include "host1x/host1x_syncpt.h" | ||
| 26 | #include "host1x/host1x_hwctx.h" | ||
| 27 | #include "t20/t20.h" | ||
| 28 | #include <linux/slab.h> | ||
| 29 | #include "bus_client.h" | ||
| 30 | |||
| 31 | enum { | ||
| 32 | HWCTX_REGINFO_NORMAL = 0, | ||
| 33 | HWCTX_REGINFO_STASH, | ||
| 34 | HWCTX_REGINFO_CALCULATE, | ||
| 35 | HWCTX_REGINFO_WRITEBACK | ||
| 36 | }; | ||
| 37 | |||
| 38 | const struct hwctx_reginfo ctxsave_regs_mpe[] = { | ||
| 39 | HWCTX_REGINFO(0x124, 1, STASH), | ||
| 40 | HWCTX_REGINFO(0x123, 1, STASH), | ||
| 41 | HWCTX_REGINFO(0x103, 1, STASH), | ||
| 42 | HWCTX_REGINFO(0x074, 1, STASH), | ||
| 43 | HWCTX_REGINFO(0x021, 1, NORMAL), | ||
| 44 | HWCTX_REGINFO(0x020, 1, STASH), | ||
| 45 | HWCTX_REGINFO(0x024, 2, NORMAL), | ||
| 46 | HWCTX_REGINFO(0x0e6, 1, NORMAL), | ||
| 47 | HWCTX_REGINFO(0x3fc, 1, NORMAL), | ||
| 48 | HWCTX_REGINFO(0x3d0, 1, NORMAL), | ||
| 49 | HWCTX_REGINFO(0x3d4, 1, NORMAL), | ||
| 50 | HWCTX_REGINFO(0x013, 1, NORMAL), | ||
| 51 | HWCTX_REGINFO(0x022, 1, NORMAL), | ||
| 52 | HWCTX_REGINFO(0x030, 4, NORMAL), | ||
| 53 | HWCTX_REGINFO(0x023, 1, NORMAL), | ||
| 54 | HWCTX_REGINFO(0x070, 1, NORMAL), | ||
| 55 | HWCTX_REGINFO(0x0a0, 9, NORMAL), | ||
| 56 | HWCTX_REGINFO(0x071, 1, NORMAL), | ||
| 57 | HWCTX_REGINFO(0x100, 4, NORMAL), | ||
| 58 | HWCTX_REGINFO(0x104, 2, NORMAL), | ||
| 59 | HWCTX_REGINFO(0x108, 9, NORMAL), | ||
| 60 | HWCTX_REGINFO(0x112, 2, NORMAL), | ||
| 61 | HWCTX_REGINFO(0x114, 1, STASH), | ||
| 62 | HWCTX_REGINFO(0x014, 1, NORMAL), | ||
| 63 | HWCTX_REGINFO(0x072, 1, NORMAL), | ||
| 64 | HWCTX_REGINFO(0x200, 1, NORMAL), | ||
| 65 | HWCTX_REGINFO(0x0d1, 1, NORMAL), | ||
| 66 | HWCTX_REGINFO(0x0d0, 1, NORMAL), | ||
| 67 | HWCTX_REGINFO(0x0c0, 1, NORMAL), | ||
| 68 | HWCTX_REGINFO(0x0c3, 2, NORMAL), | ||
| 69 | HWCTX_REGINFO(0x0d2, 1, NORMAL), | ||
| 70 | HWCTX_REGINFO(0x0d8, 1, NORMAL), | ||
| 71 | HWCTX_REGINFO(0x0e0, 2, NORMAL), | ||
| 72 | HWCTX_REGINFO(0x07f, 2, NORMAL), | ||
| 73 | HWCTX_REGINFO(0x084, 8, NORMAL), | ||
| 74 | HWCTX_REGINFO(0x0d3, 1, NORMAL), | ||
| 75 | HWCTX_REGINFO(0x040, 13, NORMAL), | ||
| 76 | HWCTX_REGINFO(0x050, 6, NORMAL), | ||
| 77 | HWCTX_REGINFO(0x058, 1, NORMAL), | ||
| 78 | HWCTX_REGINFO(0x057, 1, NORMAL), | ||
| 79 | HWCTX_REGINFO(0x111, 1, NORMAL), | ||
| 80 | HWCTX_REGINFO(0x130, 3, NORMAL), | ||
| 81 | HWCTX_REGINFO(0x201, 1, NORMAL), | ||
| 82 | HWCTX_REGINFO(0x068, 2, NORMAL), | ||
| 83 | HWCTX_REGINFO(0x08c, 1, NORMAL), | ||
| 84 | HWCTX_REGINFO(0x0cf, 1, NORMAL), | ||
| 85 | HWCTX_REGINFO(0x082, 2, NORMAL), | ||
| 86 | HWCTX_REGINFO(0x075, 1, NORMAL), | ||
| 87 | HWCTX_REGINFO(0x0e8, 1, NORMAL), | ||
| 88 | HWCTX_REGINFO(0x056, 1, NORMAL), | ||
| 89 | HWCTX_REGINFO(0x057, 1, NORMAL), | ||
| 90 | HWCTX_REGINFO(0x073, 1, CALCULATE), | ||
| 91 | HWCTX_REGINFO(0x074, 1, NORMAL), | ||
| 92 | HWCTX_REGINFO(0x075, 1, NORMAL), | ||
| 93 | HWCTX_REGINFO(0x076, 1, STASH), | ||
| 94 | HWCTX_REGINFO(0x11a, 9, NORMAL), | ||
| 95 | HWCTX_REGINFO(0x123, 1, NORMAL), | ||
| 96 | HWCTX_REGINFO(0x124, 1, NORMAL), | ||
| 97 | HWCTX_REGINFO(0x12a, 5, NORMAL), | ||
| 98 | HWCTX_REGINFO(0x12f, 1, STASH), | ||
| 99 | HWCTX_REGINFO(0x125, 2, NORMAL), | ||
| 100 | HWCTX_REGINFO(0x034, 1, NORMAL), | ||
| 101 | HWCTX_REGINFO(0x133, 2, NORMAL), | ||
| 102 | HWCTX_REGINFO(0x127, 1, NORMAL), | ||
| 103 | HWCTX_REGINFO(0x106, 1, WRITEBACK), | ||
| 104 | HWCTX_REGINFO(0x107, 1, WRITEBACK) | ||
| 105 | }; | ||
| 106 | |||
| 107 | #define NR_STASHES 8 | ||
| 108 | #define NR_WRITEBACKS 2 | ||
| 109 | |||
| 110 | #define RC_RAM_LOAD_CMD 0x115 | ||
| 111 | #define RC_RAM_LOAD_DATA 0x116 | ||
| 112 | #define RC_RAM_READ_CMD 0x128 | ||
| 113 | #define RC_RAM_READ_DATA 0x129 | ||
| 114 | #define RC_RAM_SIZE 692 | ||
| 115 | |||
| 116 | #define IRFR_RAM_LOAD_CMD 0xc5 | ||
| 117 | #define IRFR_RAM_LOAD_DATA 0xc6 | ||
| 118 | #define IRFR_RAM_READ_CMD 0xcd | ||
| 119 | #define IRFR_RAM_READ_DATA 0xce | ||
| 120 | #define IRFR_RAM_SIZE 408 | ||
| 121 | |||
| 122 | struct mpe_save_info { | ||
| 123 | u32 in[NR_STASHES]; | ||
| 124 | u32 out[NR_WRITEBACKS]; | ||
| 125 | unsigned in_pos; | ||
| 126 | unsigned out_pos; | ||
| 127 | u32 h264_mode; | ||
| 128 | }; | ||
| 129 | |||
| 130 | |||
| 131 | /*** restore ***/ | ||
| 132 | |||
| 133 | static unsigned int restore_size; | ||
| 134 | |||
| 135 | static void restore_begin(struct host1x_hwctx_handler *h, u32 *ptr) | ||
| 136 | { | ||
| 137 | /* set class to host */ | ||
| 138 | ptr[0] = nvhost_opcode_setclass(NV_HOST1X_CLASS_ID, | ||
| 139 | NV_CLASS_HOST_INCR_SYNCPT_BASE, 1); | ||
| 140 | /* increment sync point base */ | ||
| 141 | ptr[1] = nvhost_class_host_incr_syncpt_base(h->waitbase, 1); | ||
| 142 | /* set class to MPE */ | ||
| 143 | ptr[2] = nvhost_opcode_setclass(NV_VIDEO_ENCODE_MPEG_CLASS_ID, 0, 0); | ||
| 144 | } | ||
| 145 | #define RESTORE_BEGIN_SIZE 3 | ||
| 146 | |||
| 147 | static void restore_ram(u32 *ptr, unsigned words, | ||
| 148 | unsigned cmd_reg, unsigned data_reg) | ||
| 149 | { | ||
| 150 | ptr[0] = nvhost_opcode_imm(cmd_reg, words); | ||
| 151 | ptr[1] = nvhost_opcode_nonincr(data_reg, words); | ||
| 152 | } | ||
| 153 | #define RESTORE_RAM_SIZE 2 | ||
| 154 | |||
| 155 | static void restore_end(struct host1x_hwctx_handler *h, u32 *ptr) | ||
| 156 | { | ||
| 157 | /* syncpt increment to track restore gather. */ | ||
| 158 | ptr[0] = nvhost_opcode_imm_incr_syncpt(NV_SYNCPT_OP_DONE, | ||
| 159 | h->syncpt); | ||
| 160 | } | ||
| 161 | #define RESTORE_END_SIZE 1 | ||
| 162 | |||
| 163 | static u32 *setup_restore_regs(u32 *ptr, | ||
| 164 | const struct hwctx_reginfo *regs, | ||
| 165 | unsigned int nr_regs) | ||
| 166 | { | ||
| 167 | const struct hwctx_reginfo *rend = regs + nr_regs; | ||
| 168 | |||
| 169 | for ( ; regs != rend; ++regs) { | ||
| 170 | u32 offset = regs->offset; | ||
| 171 | u32 count = regs->count; | ||
| 172 | *ptr++ = nvhost_opcode_incr(offset, count); | ||
| 173 | ptr += count; | ||
| 174 | } | ||
| 175 | return ptr; | ||
| 176 | } | ||
| 177 | |||
| 178 | static u32 *setup_restore_ram(u32 *ptr, unsigned words, | ||
| 179 | unsigned cmd_reg, unsigned data_reg) | ||
| 180 | { | ||
| 181 | restore_ram(ptr, words, cmd_reg, data_reg); | ||
| 182 | return ptr + (RESTORE_RAM_SIZE + words); | ||
| 183 | } | ||
| 184 | |||
| 185 | static void setup_restore(struct host1x_hwctx_handler *h, u32 *ptr) | ||
| 186 | { | ||
| 187 | restore_begin(h, ptr); | ||
| 188 | ptr += RESTORE_BEGIN_SIZE; | ||
| 189 | |||
| 190 | ptr = setup_restore_regs(ptr, ctxsave_regs_mpe, | ||
| 191 | ARRAY_SIZE(ctxsave_regs_mpe)); | ||
| 192 | |||
| 193 | ptr = setup_restore_ram(ptr, RC_RAM_SIZE, | ||
| 194 | RC_RAM_LOAD_CMD, RC_RAM_LOAD_DATA); | ||
| 195 | |||
| 196 | ptr = setup_restore_ram(ptr, IRFR_RAM_SIZE, | ||
| 197 | IRFR_RAM_LOAD_CMD, IRFR_RAM_LOAD_DATA); | ||
| 198 | |||
| 199 | restore_end(h, ptr); | ||
| 200 | |||
| 201 | wmb(); | ||
| 202 | } | ||
| 203 | |||
| 204 | |||
| 205 | /*** save ***/ | ||
| 206 | struct save_info { | ||
| 207 | u32 *ptr; | ||
| 208 | unsigned int save_count; | ||
| 209 | unsigned int restore_count; | ||
| 210 | }; | ||
| 211 | |||
| 212 | static void __init save_begin(struct host1x_hwctx_handler *h, u32 *ptr) | ||
| 213 | { | ||
| 214 | /* MPE: when done, increment syncpt to base+1 */ | ||
| 215 | ptr[0] = nvhost_opcode_setclass(NV_VIDEO_ENCODE_MPEG_CLASS_ID, 0, 0); | ||
| 216 | ptr[1] = nvhost_opcode_imm_incr_syncpt(NV_SYNCPT_OP_DONE, h->syncpt); | ||
| 217 | /* host: wait for syncpt base+1 */ | ||
| 218 | ptr[2] = nvhost_opcode_setclass(NV_HOST1X_CLASS_ID, | ||
| 219 | NV_CLASS_HOST_WAIT_SYNCPT_BASE, 1); | ||
| 220 | ptr[3] = nvhost_class_host_wait_syncpt_base(h->syncpt, h->waitbase, 1); | ||
| 221 | /* host: signal context read thread to start reading */ | ||
| 222 | ptr[4] = nvhost_opcode_imm_incr_syncpt(NV_SYNCPT_IMMEDIATE, h->syncpt); | ||
| 223 | } | ||
| 224 | #define SAVE_BEGIN_SIZE 5 | ||
| 225 | |||
| 226 | static void __init save_direct(u32 *ptr, u32 start_reg, u32 count) | ||
| 227 | { | ||
| 228 | ptr[0] = nvhost_opcode_setclass(NV_HOST1X_CLASS_ID, | ||
| 229 | NV_CLASS_HOST_INDOFF, 1); | ||
| 230 | ptr[1] = nvhost_class_host_indoff_reg_read(NV_HOST_MODULE_MPE, | ||
| 231 | start_reg, true); | ||
| 232 | ptr[2] = nvhost_opcode_nonincr(NV_CLASS_HOST_INDDATA, count); | ||
| 233 | } | ||
| 234 | #define SAVE_DIRECT_SIZE 3 | ||
| 235 | |||
| 236 | static void __init save_set_ram_cmd(u32 *ptr, u32 cmd_reg, u32 count) | ||
| 237 | { | ||
| 238 | ptr[0] = nvhost_opcode_setclass(NV_VIDEO_ENCODE_MPEG_CLASS_ID, | ||
| 239 | cmd_reg, 1); | ||
| 240 | ptr[1] = count; | ||
| 241 | } | ||
| 242 | #define SAVE_SET_RAM_CMD_SIZE 2 | ||
| 243 | |||
| 244 | static void __init save_read_ram_data_nasty(u32 *ptr, u32 data_reg) | ||
| 245 | { | ||
| 246 | ptr[0] = nvhost_opcode_setclass(NV_HOST1X_CLASS_ID, | ||
| 247 | NV_CLASS_HOST_INDOFF, 1); | ||
| 248 | ptr[1] = nvhost_class_host_indoff_reg_read(NV_HOST_MODULE_MPE, | ||
| 249 | data_reg, false); | ||
| 250 | ptr[2] = nvhost_opcode_imm(NV_CLASS_HOST_INDDATA, 0); | ||
| 251 | /* write junk data to avoid 'cached problem with register memory' */ | ||
| 252 | ptr[3] = nvhost_opcode_setclass(NV_VIDEO_ENCODE_MPEG_CLASS_ID, | ||
| 253 | data_reg, 1); | ||
| 254 | ptr[4] = 0x99; | ||
| 255 | } | ||
| 256 | #define SAVE_READ_RAM_DATA_NASTY_SIZE 5 | ||
| 257 | |||
| 258 | static void __init save_end(struct host1x_hwctx_handler *h, u32 *ptr) | ||
| 259 | { | ||
| 260 | /* Wait for context read service to finish (cpu incr 3) */ | ||
| 261 | ptr[0] = nvhost_opcode_setclass(NV_HOST1X_CLASS_ID, | ||
| 262 | NV_CLASS_HOST_WAIT_SYNCPT_BASE, 1); | ||
| 263 | ptr[1] = nvhost_class_host_wait_syncpt_base(h->syncpt, h->waitbase, 3); | ||
| 264 | /* Advance syncpoint base */ | ||
| 265 | ptr[2] = nvhost_opcode_nonincr(NV_CLASS_HOST_INCR_SYNCPT_BASE, 1); | ||
| 266 | ptr[3] = nvhost_class_host_incr_syncpt_base(h->waitbase, 3); | ||
| 267 | /* set class back to the unit */ | ||
| 268 | ptr[4] = nvhost_opcode_setclass(NV_VIDEO_ENCODE_MPEG_CLASS_ID, 0, 0); | ||
| 269 | } | ||
| 270 | #define SAVE_END_SIZE 5 | ||
| 271 | |||
| 272 | static void __init setup_save_regs(struct save_info *info, | ||
| 273 | const struct hwctx_reginfo *regs, | ||
| 274 | unsigned int nr_regs) | ||
| 275 | { | ||
| 276 | const struct hwctx_reginfo *rend = regs + nr_regs; | ||
| 277 | u32 *ptr = info->ptr; | ||
| 278 | unsigned int save_count = info->save_count; | ||
| 279 | unsigned int restore_count = info->restore_count; | ||
| 280 | |||
| 281 | for ( ; regs != rend; ++regs) { | ||
| 282 | u32 offset = regs->offset; | ||
| 283 | u32 count = regs->count; | ||
| 284 | if (regs->type != HWCTX_REGINFO_WRITEBACK) { | ||
| 285 | if (ptr) { | ||
| 286 | save_direct(ptr, offset, count); | ||
| 287 | ptr += SAVE_DIRECT_SIZE; | ||
| 288 | memset(ptr, 0, count * 4); | ||
| 289 | ptr += count; | ||
| 290 | } | ||
| 291 | save_count += (SAVE_DIRECT_SIZE + count); | ||
| 292 | } | ||
| 293 | restore_count += (1 + count); | ||
| 294 | } | ||
| 295 | |||
| 296 | info->ptr = ptr; | ||
| 297 | info->save_count = save_count; | ||
| 298 | info->restore_count = restore_count; | ||
| 299 | } | ||
| 300 | |||
| 301 | static void __init setup_save_ram_nasty(struct save_info *info, unsigned words, | ||
| 302 | unsigned cmd_reg, unsigned data_reg) | ||
| 303 | { | ||
| 304 | u32 *ptr = info->ptr; | ||
| 305 | unsigned int save_count = info->save_count; | ||
| 306 | unsigned int restore_count = info->restore_count; | ||
| 307 | unsigned i; | ||
| 308 | |||
| 309 | if (ptr) { | ||
| 310 | save_set_ram_cmd(ptr, cmd_reg, words); | ||
| 311 | ptr += SAVE_SET_RAM_CMD_SIZE; | ||
| 312 | for (i = words; i; --i) { | ||
| 313 | save_read_ram_data_nasty(ptr, data_reg); | ||
| 314 | ptr += SAVE_READ_RAM_DATA_NASTY_SIZE; | ||
| 315 | } | ||
| 316 | } | ||
| 317 | |||
| 318 | save_count += SAVE_SET_RAM_CMD_SIZE; | ||
| 319 | save_count += words * SAVE_READ_RAM_DATA_NASTY_SIZE; | ||
| 320 | restore_count += (RESTORE_RAM_SIZE + words); | ||
| 321 | |||
| 322 | info->ptr = ptr; | ||
| 323 | info->save_count = save_count; | ||
| 324 | info->restore_count = restore_count; | ||
| 325 | } | ||
| 326 | |||
| 327 | static void __init setup_save(struct host1x_hwctx_handler *h, u32 *ptr) | ||
| 328 | { | ||
| 329 | struct save_info info = { | ||
| 330 | ptr, | ||
| 331 | SAVE_BEGIN_SIZE, | ||
| 332 | RESTORE_BEGIN_SIZE | ||
| 333 | }; | ||
| 334 | |||
| 335 | if (info.ptr) { | ||
| 336 | save_begin(h, info.ptr); | ||
| 337 | info.ptr += SAVE_BEGIN_SIZE; | ||
| 338 | } | ||
| 339 | |||
| 340 | setup_save_regs(&info, ctxsave_regs_mpe, | ||
| 341 | ARRAY_SIZE(ctxsave_regs_mpe)); | ||
| 342 | |||
| 343 | setup_save_ram_nasty(&info, RC_RAM_SIZE, | ||
| 344 | RC_RAM_READ_CMD, RC_RAM_READ_DATA); | ||
| 345 | |||
| 346 | setup_save_ram_nasty(&info, IRFR_RAM_SIZE, | ||
| 347 | IRFR_RAM_READ_CMD, IRFR_RAM_READ_DATA); | ||
| 348 | |||
| 349 | if (info.ptr) { | ||
| 350 | save_end(h, info.ptr); | ||
| 351 | info.ptr += SAVE_END_SIZE; | ||
| 352 | } | ||
| 353 | |||
| 354 | wmb(); | ||
| 355 | |||
| 356 | h->save_size = info.save_count + SAVE_END_SIZE; | ||
| 357 | restore_size = info.restore_count + RESTORE_END_SIZE; | ||
| 358 | } | ||
| 359 | |||
| 360 | |||
| 361 | static u32 calculate_mpe(u32 word, struct mpe_save_info *msi) | ||
| 362 | { | ||
| 363 | u32 buffer_full_read = msi->in[0] & 0x01ffffff; | ||
| 364 | u32 byte_len = msi->in[1]; | ||
| 365 | u32 drain = (msi->in[2] >> 2) & 0x007fffff; | ||
| 366 | u32 rep_frame = msi->in[3] & 0x0000ffff; | ||
| 367 | u32 h264_mode = (msi->in[4] >> 11) & 1; | ||
| 368 | int new_buffer_full; | ||
| 369 | |||
| 370 | if (h264_mode) | ||
| 371 | byte_len >>= 3; | ||
| 372 | new_buffer_full = buffer_full_read + byte_len - (drain * 4); | ||
| 373 | msi->out[0] = max(0, new_buffer_full); | ||
| 374 | msi->out[1] = rep_frame; | ||
| 375 | if (rep_frame == 0) | ||
| 376 | word &= 0xffff0000; | ||
| 377 | return word; | ||
| 378 | } | ||
| 379 | |||
| 380 | static u32 *save_regs(u32 *ptr, unsigned int *pending, | ||
| 381 | struct nvhost_channel *channel, | ||
| 382 | const struct hwctx_reginfo *regs, | ||
| 383 | unsigned int nr_regs, | ||
| 384 | struct mpe_save_info *msi) | ||
| 385 | { | ||
| 386 | const struct hwctx_reginfo *rend = regs + nr_regs; | ||
| 387 | |||
| 388 | for ( ; regs != rend; ++regs) { | ||
| 389 | u32 count = regs->count; | ||
| 390 | ++ptr; /* restore incr */ | ||
| 391 | if (regs->type == HWCTX_REGINFO_NORMAL) { | ||
| 392 | host1x_drain_read_fifo(channel->aperture, | ||
| 393 | ptr, count, pending); | ||
| 394 | ptr += count; | ||
| 395 | } else { | ||
| 396 | u32 word; | ||
| 397 | if (regs->type == HWCTX_REGINFO_WRITEBACK) { | ||
| 398 | BUG_ON(msi->out_pos >= NR_WRITEBACKS); | ||
| 399 | word = msi->out[msi->out_pos++]; | ||
| 400 | } else { | ||
| 401 | host1x_drain_read_fifo(channel->aperture, | ||
| 402 | &word, 1, pending); | ||
| 403 | if (regs->type == HWCTX_REGINFO_STASH) { | ||
| 404 | BUG_ON(msi->in_pos >= NR_STASHES); | ||
| 405 | msi->in[msi->in_pos++] = word; | ||
| 406 | } else { | ||
| 407 | word = calculate_mpe(word, msi); | ||
| 408 | } | ||
| 409 | } | ||
| 410 | *ptr++ = word; | ||
| 411 | } | ||
| 412 | } | ||
| 413 | return ptr; | ||
| 414 | } | ||
| 415 | |||
| 416 | static u32 *save_ram(u32 *ptr, unsigned int *pending, | ||
| 417 | struct nvhost_channel *channel, | ||
| 418 | unsigned words, unsigned cmd_reg, unsigned data_reg) | ||
| 419 | { | ||
| 420 | int err = 0; | ||
| 421 | ptr += RESTORE_RAM_SIZE; | ||
| 422 | err = host1x_drain_read_fifo(channel->aperture, ptr, words, pending); | ||
| 423 | WARN_ON(err); | ||
| 424 | return ptr + words; | ||
| 425 | } | ||
| 426 | |||
| 427 | |||
| 428 | /*** ctxmpe ***/ | ||
| 429 | |||
| 430 | static struct nvhost_hwctx *ctxmpe_alloc(struct nvhost_hwctx_handler *h, | ||
| 431 | struct nvhost_channel *ch) | ||
| 432 | { | ||
| 433 | struct nvmap_client *nvmap = nvhost_get_host(ch->dev)->nvmap; | ||
| 434 | struct host1x_hwctx_handler *p = to_host1x_hwctx_handler(h); | ||
| 435 | struct host1x_hwctx *ctx; | ||
| 436 | |||
| 437 | ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); | ||
| 438 | if (!ctx) | ||
| 439 | return NULL; | ||
| 440 | ctx->restore = nvmap_alloc(nvmap, restore_size * 4, 32, | ||
| 441 | NVMAP_HANDLE_WRITE_COMBINE, 0); | ||
| 442 | if (IS_ERR_OR_NULL(ctx->restore)) { | ||
| 443 | kfree(ctx); | ||
| 444 | return NULL; | ||
| 445 | } | ||
| 446 | |||
| 447 | ctx->restore_virt = nvmap_mmap(ctx->restore); | ||
| 448 | if (!ctx->restore_virt) { | ||
| 449 | nvmap_free(nvmap, ctx->restore); | ||
| 450 | kfree(ctx); | ||
| 451 | return NULL; | ||
| 452 | } | ||
| 453 | |||
| 454 | kref_init(&ctx->hwctx.ref); | ||
| 455 | ctx->hwctx.h = &p->h; | ||
| 456 | ctx->hwctx.channel = ch; | ||
| 457 | ctx->hwctx.valid = false; | ||
| 458 | ctx->save_incrs = 3; | ||
| 459 | ctx->save_thresh = 2; | ||
| 460 | ctx->restore_phys = nvmap_pin(nvmap, ctx->restore); | ||
| 461 | ctx->restore_size = restore_size; | ||
| 462 | ctx->restore_incrs = 1; | ||
| 463 | |||
| 464 | setup_restore(p, ctx->restore_virt); | ||
| 465 | |||
| 466 | return &ctx->hwctx; | ||
| 467 | } | ||
| 468 | |||
| 469 | static void ctxmpe_get(struct nvhost_hwctx *ctx) | ||
| 470 | { | ||
| 471 | kref_get(&ctx->ref); | ||
| 472 | } | ||
| 473 | |||
| 474 | static void ctxmpe_free(struct kref *ref) | ||
| 475 | { | ||
| 476 | struct nvhost_hwctx *nctx = container_of(ref, struct nvhost_hwctx, ref); | ||
| 477 | struct host1x_hwctx *ctx = to_host1x_hwctx(nctx); | ||
| 478 | struct nvmap_client *nvmap = | ||
| 479 | nvhost_get_host(nctx->channel->dev)->nvmap; | ||
| 480 | |||
| 481 | if (ctx->restore_virt) | ||
| 482 | nvmap_munmap(ctx->restore, ctx->restore_virt); | ||
| 483 | nvmap_unpin(nvmap, ctx->restore); | ||
| 484 | nvmap_free(nvmap, ctx->restore); | ||
| 485 | kfree(ctx); | ||
| 486 | } | ||
| 487 | |||
| 488 | static void ctxmpe_put(struct nvhost_hwctx *ctx) | ||
| 489 | { | ||
| 490 | kref_put(&ctx->ref, ctxmpe_free); | ||
| 491 | } | ||
| 492 | |||
| 493 | static void ctxmpe_save_push(struct nvhost_hwctx *nctx, | ||
| 494 | struct nvhost_cdma *cdma) | ||
| 495 | { | ||
| 496 | struct host1x_hwctx *ctx = to_host1x_hwctx(nctx); | ||
| 497 | struct host1x_hwctx_handler *h = host1x_hwctx_handler(ctx); | ||
| 498 | nvhost_cdma_push(cdma, | ||
| 499 | nvhost_opcode_gather(h->save_size), | ||
| 500 | h->save_phys); | ||
| 501 | } | ||
| 502 | |||
| 503 | static void ctxmpe_save_service(struct nvhost_hwctx *nctx) | ||
| 504 | { | ||
| 505 | struct host1x_hwctx *ctx = to_host1x_hwctx(nctx); | ||
| 506 | struct host1x_hwctx_handler *h = host1x_hwctx_handler(ctx); | ||
| 507 | |||
| 508 | u32 *ptr = (u32 *)ctx->restore_virt + RESTORE_BEGIN_SIZE; | ||
| 509 | unsigned int pending = 0; | ||
| 510 | struct mpe_save_info msi; | ||
| 511 | |||
| 512 | msi.in_pos = 0; | ||
| 513 | msi.out_pos = 0; | ||
| 514 | |||
| 515 | ptr = save_regs(ptr, &pending, nctx->channel, | ||
| 516 | ctxsave_regs_mpe, ARRAY_SIZE(ctxsave_regs_mpe), &msi); | ||
| 517 | |||
| 518 | ptr = save_ram(ptr, &pending, nctx->channel, | ||
| 519 | RC_RAM_SIZE, RC_RAM_READ_CMD, RC_RAM_READ_DATA); | ||
| 520 | |||
| 521 | ptr = save_ram(ptr, &pending, nctx->channel, | ||
| 522 | IRFR_RAM_SIZE, IRFR_RAM_READ_CMD, IRFR_RAM_READ_DATA); | ||
| 523 | |||
| 524 | wmb(); | ||
| 525 | nvhost_syncpt_cpu_incr(&nvhost_get_host(nctx->channel->dev)->syncpt, | ||
| 526 | h->syncpt); | ||
| 527 | } | ||
| 528 | |||
| 529 | struct nvhost_hwctx_handler * __init nvhost_mpe_ctxhandler_init( | ||
| 530 | u32 syncpt, u32 waitbase, | ||
| 531 | struct nvhost_channel *ch) | ||
| 532 | { | ||
| 533 | struct nvmap_client *nvmap; | ||
| 534 | u32 *save_ptr; | ||
| 535 | struct host1x_hwctx_handler *p; | ||
| 536 | |||
| 537 | p = kmalloc(sizeof(*p), GFP_KERNEL); | ||
| 538 | if (!p) | ||
| 539 | return NULL; | ||
| 540 | |||
| 541 | nvmap = nvhost_get_host(ch->dev)->nvmap; | ||
| 542 | |||
| 543 | p->syncpt = syncpt; | ||
| 544 | p->waitbase = waitbase; | ||
| 545 | |||
| 546 | setup_save(p, NULL); | ||
| 547 | |||
| 548 | p->save_buf = nvmap_alloc(nvmap, p->save_size * 4, 32, | ||
| 549 | NVMAP_HANDLE_WRITE_COMBINE, 0); | ||
| 550 | if (IS_ERR(p->save_buf)) { | ||
| 551 | p->save_buf = NULL; | ||
| 552 | return NULL; | ||
| 553 | } | ||
| 554 | |||
| 555 | save_ptr = nvmap_mmap(p->save_buf); | ||
| 556 | if (!save_ptr) { | ||
| 557 | nvmap_free(nvmap, p->save_buf); | ||
| 558 | p->save_buf = NULL; | ||
| 559 | return NULL; | ||
| 560 | } | ||
| 561 | |||
| 562 | p->save_phys = nvmap_pin(nvmap, p->save_buf); | ||
| 563 | |||
| 564 | setup_save(p, save_ptr); | ||
| 565 | |||
| 566 | p->h.alloc = ctxmpe_alloc; | ||
| 567 | p->h.save_push = ctxmpe_save_push; | ||
| 568 | p->h.save_service = ctxmpe_save_service; | ||
| 569 | p->h.get = ctxmpe_get; | ||
| 570 | p->h.put = ctxmpe_put; | ||
| 571 | |||
| 572 | return &p->h; | ||
| 573 | } | ||
| 574 | |||
| 575 | int nvhost_mpe_prepare_power_off(struct nvhost_device *dev) | ||
| 576 | { | ||
| 577 | return host1x_save_context(dev, NVSYNCPT_MPE); | ||
| 578 | } | ||
| 579 | |||
| 580 | static int __devinit mpe_probe(struct nvhost_device *dev) | ||
| 581 | { | ||
| 582 | return nvhost_client_device_init(dev); | ||
| 583 | } | ||
| 584 | |||
| 585 | static int __exit mpe_remove(struct nvhost_device *dev) | ||
| 586 | { | ||
| 587 | /* Add clean-up */ | ||
| 588 | return 0; | ||
| 589 | } | ||
| 590 | |||
| 591 | static int mpe_suspend(struct nvhost_device *dev, pm_message_t state) | ||
| 592 | { | ||
| 593 | return nvhost_client_device_suspend(dev); | ||
| 594 | } | ||
| 595 | |||
| 596 | static int mpe_resume(struct nvhost_device *dev) | ||
| 597 | { | ||
| 598 | dev_info(&dev->dev, "resuming\n"); | ||
| 599 | return 0; | ||
| 600 | } | ||
| 601 | |||
| 602 | struct nvhost_device *mpe_device; | ||
| 603 | |||
| 604 | static struct nvhost_driver mpe_driver = { | ||
| 605 | .probe = mpe_probe, | ||
| 606 | .remove = __exit_p(mpe_remove), | ||
| 607 | #ifdef CONFIG_PM | ||
| 608 | .suspend = mpe_suspend, | ||
| 609 | .resume = mpe_resume, | ||
| 610 | #endif | ||
| 611 | .driver = { | ||
| 612 | .owner = THIS_MODULE, | ||
| 613 | .name = "mpe", | ||
| 614 | } | ||
| 615 | }; | ||
| 616 | |||
| 617 | static int __init mpe_init(void) | ||
| 618 | { | ||
| 619 | int err; | ||
| 620 | |||
| 621 | mpe_device = nvhost_get_device("mpe"); | ||
| 622 | if (!mpe_device) | ||
| 623 | return -ENXIO; | ||
| 624 | |||
| 625 | err = nvhost_device_register(mpe_device); | ||
| 626 | if (err) | ||
| 627 | return err; | ||
| 628 | |||
| 629 | return nvhost_driver_register(&mpe_driver); | ||
| 630 | } | ||
| 631 | |||
| 632 | static void __exit mpe_exit(void) | ||
| 633 | { | ||
| 634 | nvhost_driver_unregister(&mpe_driver); | ||
| 635 | } | ||
| 636 | |||
| 637 | module_init(mpe_init); | ||
| 638 | module_exit(mpe_exit); | ||
diff --git a/drivers/video/tegra/host/mpe/mpe.h b/drivers/video/tegra/host/mpe/mpe.h new file mode 100644 index 00000000000..1bc2a8a04c1 --- /dev/null +++ b/drivers/video/tegra/host/mpe/mpe.h | |||
| @@ -0,0 +1,32 @@ | |||
| 1 | /* | ||
| 2 | * drivers/video/tegra/host/mpe/mpe.h | ||
| 3 | * | ||
| 4 | * Tegra Graphics Host MPE | ||
| 5 | * | ||
| 6 | * Copyright (c) 2011-2012, NVIDIA Corporation. | ||
| 7 | * | ||
| 8 | * This program is free software; you can redistribute it and/or modify it | ||
| 9 | * under the terms and conditions of the GNU General Public License, | ||
| 10 | * version 2, as published by the Free Software Foundation. | ||
| 11 | * | ||
| 12 | * This program is distributed in the hope it will be useful, but WITHOUT | ||
| 13 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
| 14 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
| 15 | * more details. | ||
| 16 | * | ||
| 17 | * You should have received a copy of the GNU General Public License | ||
| 18 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | ||
| 19 | */ | ||
| 20 | |||
| 21 | #ifndef __NVHOST_MPE_MPE_H | ||
| 22 | #define __NVHOST_MPE_MPE_H | ||
| 23 | |||
| 24 | struct nvhost_hwctx_handler; | ||
| 25 | struct nvhost_device; | ||
| 26 | |||
| 27 | struct nvhost_hwctx_handler *nvhost_mpe_ctxhandler_init( | ||
| 28 | u32 syncpt, u32 waitbase, | ||
| 29 | struct nvhost_channel *ch); | ||
| 30 | int nvhost_mpe_prepare_power_off(struct nvhost_device *dev); | ||
| 31 | |||
| 32 | #endif | ||
diff --git a/drivers/video/tegra/host/nvhost_acm.c b/drivers/video/tegra/host/nvhost_acm.c new file mode 100644 index 00000000000..318f209651a --- /dev/null +++ b/drivers/video/tegra/host/nvhost_acm.c | |||
| @@ -0,0 +1,467 @@ | |||
| 1 | /* | ||
| 2 | * drivers/video/tegra/host/nvhost_acm.c | ||
| 3 | * | ||
| 4 | * Tegra Graphics Host Automatic Clock Management | ||
| 5 | * | ||
| 6 | * Copyright (c) 2010-2012, NVIDIA Corporation. | ||
| 7 | * | ||
| 8 | * This program is free software; you can redistribute it and/or modify it | ||
| 9 | * under the terms and conditions of the GNU General Public License, | ||
| 10 | * version 2, as published by the Free Software Foundation. | ||
| 11 | * | ||
| 12 | * This program is distributed in the hope it will be useful, but WITHOUT | ||
| 13 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
| 14 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
| 15 | * more details. | ||
| 16 | * | ||
| 17 | * You should have received a copy of the GNU General Public License | ||
| 18 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | ||
| 19 | */ | ||
| 20 | |||
| 21 | #include "nvhost_acm.h" | ||
| 22 | #include "dev.h" | ||
| 23 | #include <linux/slab.h> | ||
| 24 | #include <linux/string.h> | ||
| 25 | #include <linux/sched.h> | ||
| 26 | #include <linux/err.h> | ||
| 27 | #include <linux/device.h> | ||
| 28 | #include <linux/delay.h> | ||
| 29 | #include <linux/platform_device.h> | ||
| 30 | #include <mach/powergate.h> | ||
| 31 | #include <mach/clk.h> | ||
| 32 | #include <mach/hardware.h> | ||
| 33 | |||
| 34 | #define ACM_SUSPEND_WAIT_FOR_IDLE_TIMEOUT (2 * HZ) | ||
| 35 | #define POWERGATE_DELAY 10 | ||
| 36 | #define MAX_DEVID_LENGTH 16 | ||
| 37 | |||
| 38 | DEFINE_MUTEX(client_list_lock); | ||
| 39 | |||
| 40 | struct nvhost_module_client { | ||
| 41 | struct list_head node; | ||
| 42 | unsigned long rate[NVHOST_MODULE_MAX_CLOCKS]; | ||
| 43 | void *priv; | ||
| 44 | }; | ||
| 45 | |||
| 46 | static void do_powergate_locked(int id) | ||
| 47 | { | ||
| 48 | if (id != -1 && tegra_powergate_is_powered(id)) | ||
| 49 | tegra_powergate_partition(id); | ||
| 50 | } | ||
| 51 | |||
| 52 | static void do_unpowergate_locked(int id) | ||
| 53 | { | ||
| 54 | if (id != -1) | ||
| 55 | tegra_unpowergate_partition(id); | ||
| 56 | } | ||
| 57 | |||
| 58 | void nvhost_module_reset(struct nvhost_device *dev) | ||
| 59 | { | ||
| 60 | dev_dbg(&dev->dev, | ||
| 61 | "%s: asserting %s module reset (id %d, id2 %d)\n", | ||
| 62 | __func__, dev->name, | ||
| 63 | dev->powergate_ids[0], dev->powergate_ids[1]); | ||
| 64 | |||
| 65 | mutex_lock(&dev->lock); | ||
| 66 | |||
| 67 | /* assert module and mc client reset */ | ||
| 68 | if (dev->powergate_ids[0] != -1) { | ||
| 69 | tegra_powergate_mc_disable(dev->powergate_ids[0]); | ||
| 70 | tegra_periph_reset_assert(dev->clk[0]); | ||
| 71 | tegra_powergate_mc_flush(dev->powergate_ids[0]); | ||
| 72 | } | ||
| 73 | if (dev->powergate_ids[1] != -1) { | ||
| 74 | tegra_powergate_mc_disable(dev->powergate_ids[1]); | ||
| 75 | tegra_periph_reset_assert(dev->clk[1]); | ||
| 76 | tegra_powergate_mc_flush(dev->powergate_ids[1]); | ||
| 77 | } | ||
| 78 | |||
| 79 | udelay(POWERGATE_DELAY); | ||
| 80 | |||
| 81 | /* deassert reset */ | ||
| 82 | if (dev->powergate_ids[0] != -1) { | ||
| 83 | tegra_powergate_mc_flush_done(dev->powergate_ids[0]); | ||
| 84 | tegra_periph_reset_deassert(dev->clk[0]); | ||
| 85 | tegra_powergate_mc_enable(dev->powergate_ids[0]); | ||
| 86 | } | ||
| 87 | if (dev->powergate_ids[1] != -1) { | ||
| 88 | tegra_powergate_mc_flush_done(dev->powergate_ids[1]); | ||
| 89 | tegra_periph_reset_deassert(dev->clk[1]); | ||
| 90 | tegra_powergate_mc_enable(dev->powergate_ids[1]); | ||
| 91 | } | ||
| 92 | |||
| 93 | mutex_unlock(&dev->lock); | ||
| 94 | |||
| 95 | dev_dbg(&dev->dev, "%s: module %s out of reset\n", | ||
| 96 | __func__, dev->name); | ||
| 97 | } | ||
| 98 | |||
| 99 | static void to_state_clockgated_locked(struct nvhost_device *dev) | ||
| 100 | { | ||
| 101 | if (dev->powerstate == NVHOST_POWER_STATE_RUNNING) { | ||
| 102 | int i; | ||
| 103 | for (i = 0; i < dev->num_clks; i++) | ||
| 104 | clk_disable(dev->clk[i]); | ||
| 105 | if (dev->dev.parent) | ||
| 106 | nvhost_module_idle(to_nvhost_device(dev->dev.parent)); | ||
| 107 | } else if (dev->powerstate == NVHOST_POWER_STATE_POWERGATED | ||
| 108 | && dev->can_powergate) { | ||
| 109 | do_unpowergate_locked(dev->powergate_ids[0]); | ||
| 110 | do_unpowergate_locked(dev->powergate_ids[1]); | ||
| 111 | } | ||
| 112 | dev->powerstate = NVHOST_POWER_STATE_CLOCKGATED; | ||
| 113 | } | ||
| 114 | |||
| 115 | static void to_state_running_locked(struct nvhost_device *dev) | ||
| 116 | { | ||
| 117 | int prev_state = dev->powerstate; | ||
| 118 | if (dev->powerstate == NVHOST_POWER_STATE_POWERGATED) | ||
| 119 | to_state_clockgated_locked(dev); | ||
| 120 | if (dev->powerstate == NVHOST_POWER_STATE_CLOCKGATED) { | ||
| 121 | int i; | ||
| 122 | |||
| 123 | if (dev->dev.parent) | ||
| 124 | nvhost_module_busy(to_nvhost_device(dev->dev.parent)); | ||
| 125 | |||
| 126 | for (i = 0; i < dev->num_clks; i++) { | ||
| 127 | int err = clk_enable(dev->clk[i]); | ||
| 128 | BUG_ON(err); | ||
| 129 | } | ||
| 130 | |||
| 131 | if (prev_state == NVHOST_POWER_STATE_POWERGATED | ||
| 132 | && dev->finalize_poweron) | ||
| 133 | dev->finalize_poweron(dev); | ||
| 134 | } | ||
| 135 | dev->powerstate = NVHOST_POWER_STATE_RUNNING; | ||
| 136 | } | ||
| 137 | |||
| 138 | /* This gets called from powergate_handler() and from module suspend. | ||
| 139 | * Module suspend is done for all modules, runtime power gating only | ||
| 140 | * for modules with can_powergate set. | ||
| 141 | */ | ||
| 142 | static int to_state_powergated_locked(struct nvhost_device *dev) | ||
| 143 | { | ||
| 144 | int err = 0; | ||
| 145 | |||
| 146 | if (dev->prepare_poweroff | ||
| 147 | && dev->powerstate != NVHOST_POWER_STATE_POWERGATED) { | ||
| 148 | /* Clock needs to be on in prepare_poweroff */ | ||
| 149 | to_state_running_locked(dev); | ||
| 150 | err = dev->prepare_poweroff(dev); | ||
| 151 | if (err) | ||
| 152 | return err; | ||
| 153 | } | ||
| 154 | |||
| 155 | if (dev->powerstate == NVHOST_POWER_STATE_RUNNING) | ||
| 156 | to_state_clockgated_locked(dev); | ||
| 157 | |||
| 158 | if (dev->can_powergate) { | ||
| 159 | do_powergate_locked(dev->powergate_ids[0]); | ||
| 160 | do_powergate_locked(dev->powergate_ids[1]); | ||
| 161 | } | ||
| 162 | |||
| 163 | dev->powerstate = NVHOST_POWER_STATE_POWERGATED; | ||
| 164 | return 0; | ||
| 165 | } | ||
| 166 | |||
| 167 | static void schedule_powergating_locked(struct nvhost_device *dev) | ||
| 168 | { | ||
| 169 | if (dev->can_powergate) | ||
| 170 | schedule_delayed_work(&dev->powerstate_down, | ||
| 171 | msecs_to_jiffies(dev->powergate_delay)); | ||
| 172 | } | ||
| 173 | |||
| 174 | static void schedule_clockgating_locked(struct nvhost_device *dev) | ||
| 175 | { | ||
| 176 | schedule_delayed_work(&dev->powerstate_down, | ||
| 177 | msecs_to_jiffies(dev->clockgate_delay)); | ||
| 178 | } | ||
| 179 | |||
| 180 | void nvhost_module_busy(struct nvhost_device *dev) | ||
| 181 | { | ||
| 182 | if (dev->busy) | ||
| 183 | dev->busy(dev); | ||
| 184 | |||
| 185 | mutex_lock(&dev->lock); | ||
| 186 | cancel_delayed_work(&dev->powerstate_down); | ||
| 187 | |||
| 188 | dev->refcount++; | ||
| 189 | if (dev->refcount > 0 && !nvhost_module_powered(dev)) | ||
| 190 | to_state_running_locked(dev); | ||
| 191 | mutex_unlock(&dev->lock); | ||
| 192 | } | ||
| 193 | |||
| 194 | static void powerstate_down_handler(struct work_struct *work) | ||
| 195 | { | ||
| 196 | struct nvhost_device *dev; | ||
| 197 | |||
| 198 | dev = container_of(to_delayed_work(work), | ||
| 199 | struct nvhost_device, | ||
| 200 | powerstate_down); | ||
| 201 | |||
| 202 | mutex_lock(&dev->lock); | ||
| 203 | if (dev->refcount == 0) { | ||
| 204 | switch (dev->powerstate) { | ||
| 205 | case NVHOST_POWER_STATE_RUNNING: | ||
| 206 | to_state_clockgated_locked(dev); | ||
| 207 | schedule_powergating_locked(dev); | ||
| 208 | break; | ||
| 209 | case NVHOST_POWER_STATE_CLOCKGATED: | ||
| 210 | if (to_state_powergated_locked(dev)) | ||
| 211 | schedule_powergating_locked(dev); | ||
| 212 | break; | ||
| 213 | default: | ||
| 214 | break; | ||
| 215 | } | ||
| 216 | } | ||
| 217 | mutex_unlock(&dev->lock); | ||
| 218 | } | ||
| 219 | |||
| 220 | |||
| 221 | void nvhost_module_idle_mult(struct nvhost_device *dev, int refs) | ||
| 222 | { | ||
| 223 | bool kick = false; | ||
| 224 | |||
| 225 | mutex_lock(&dev->lock); | ||
| 226 | dev->refcount -= refs; | ||
| 227 | if (dev->refcount == 0) { | ||
| 228 | if (nvhost_module_powered(dev)) | ||
| 229 | schedule_clockgating_locked(dev); | ||
| 230 | kick = true; | ||
| 231 | } | ||
| 232 | mutex_unlock(&dev->lock); | ||
| 233 | |||
| 234 | if (kick) { | ||
| 235 | wake_up(&dev->idle_wq); | ||
| 236 | |||
| 237 | if (dev->idle) | ||
| 238 | dev->idle(dev); | ||
| 239 | } | ||
| 240 | } | ||
| 241 | |||
| 242 | int nvhost_module_get_rate(struct nvhost_device *dev, unsigned long *rate, | ||
| 243 | int index) | ||
| 244 | { | ||
| 245 | struct clk *c; | ||
| 246 | |||
| 247 | c = dev->clk[index]; | ||
| 248 | if (IS_ERR_OR_NULL(c)) | ||
| 249 | return -EINVAL; | ||
| 250 | |||
| 251 | /* Need to enable client to get correct rate */ | ||
| 252 | nvhost_module_busy(dev); | ||
| 253 | *rate = clk_get_rate(c); | ||
| 254 | nvhost_module_idle(dev); | ||
| 255 | return 0; | ||
| 256 | |||
| 257 | } | ||
| 258 | |||
| 259 | static int nvhost_module_update_rate(struct nvhost_device *dev, int index) | ||
| 260 | { | ||
| 261 | unsigned long rate = 0; | ||
| 262 | struct nvhost_module_client *m; | ||
| 263 | |||
| 264 | if (!dev->clk[index]) | ||
| 265 | return -EINVAL; | ||
| 266 | |||
| 267 | list_for_each_entry(m, &dev->client_list, node) { | ||
| 268 | rate = max(m->rate[index], rate); | ||
| 269 | } | ||
| 270 | if (!rate) | ||
| 271 | rate = clk_round_rate(dev->clk[index], | ||
| 272 | dev->clocks[index].default_rate); | ||
| 273 | |||
| 274 | return clk_set_rate(dev->clk[index], rate); | ||
| 275 | } | ||
| 276 | |||
| 277 | int nvhost_module_set_rate(struct nvhost_device *dev, void *priv, | ||
| 278 | unsigned long rate, int index) | ||
| 279 | { | ||
| 280 | struct nvhost_module_client *m; | ||
| 281 | int i, ret = 0; | ||
| 282 | |||
| 283 | mutex_lock(&client_list_lock); | ||
| 284 | list_for_each_entry(m, &dev->client_list, node) { | ||
| 285 | if (m->priv == priv) { | ||
| 286 | for (i = 0; i < dev->num_clks; i++) | ||
| 287 | m->rate[i] = clk_round_rate(dev->clk[i], rate); | ||
| 288 | break; | ||
| 289 | } | ||
| 290 | } | ||
| 291 | |||
| 292 | for (i = 0; i < dev->num_clks; i++) { | ||
| 293 | ret = nvhost_module_update_rate(dev, i); | ||
| 294 | if (ret < 0) | ||
| 295 | break; | ||
| 296 | } | ||
| 297 | mutex_unlock(&client_list_lock); | ||
| 298 | return ret; | ||
| 299 | |||
| 300 | } | ||
| 301 | |||
| 302 | int nvhost_module_add_client(struct nvhost_device *dev, void *priv) | ||
| 303 | { | ||
| 304 | int i; | ||
| 305 | unsigned long rate; | ||
| 306 | struct nvhost_module_client *client; | ||
| 307 | |||
| 308 | client = kzalloc(sizeof(*client), GFP_KERNEL); | ||
| 309 | if (!client) | ||
| 310 | return -ENOMEM; | ||
| 311 | |||
| 312 | INIT_LIST_HEAD(&client->node); | ||
| 313 | client->priv = priv; | ||
| 314 | |||
| 315 | for (i = 0; i < dev->num_clks; i++) { | ||
| 316 | rate = clk_round_rate(dev->clk[i], | ||
| 317 | dev->clocks[i].default_rate); | ||
| 318 | client->rate[i] = rate; | ||
| 319 | } | ||
| 320 | mutex_lock(&client_list_lock); | ||
| 321 | list_add_tail(&client->node, &dev->client_list); | ||
| 322 | mutex_unlock(&client_list_lock); | ||
| 323 | return 0; | ||
| 324 | } | ||
| 325 | |||
| 326 | void nvhost_module_remove_client(struct nvhost_device *dev, void *priv) | ||
| 327 | { | ||
| 328 | int i; | ||
| 329 | struct nvhost_module_client *m; | ||
| 330 | |||
| 331 | mutex_lock(&client_list_lock); | ||
| 332 | list_for_each_entry(m, &dev->client_list, node) { | ||
| 333 | if (priv == m->priv) { | ||
| 334 | list_del(&m->node); | ||
| 335 | break; | ||
| 336 | } | ||
| 337 | } | ||
| 338 | if (m) { | ||
| 339 | kfree(m); | ||
| 340 | for (i = 0; i < dev->num_clks; i++) | ||
| 341 | nvhost_module_update_rate(dev, i); | ||
| 342 | } | ||
| 343 | mutex_unlock(&client_list_lock); | ||
| 344 | } | ||
| 345 | |||
| 346 | int nvhost_module_init(struct nvhost_device *dev) | ||
| 347 | { | ||
| 348 | int i = 0; | ||
| 349 | |||
| 350 | /* initialize clocks to known state */ | ||
| 351 | INIT_LIST_HEAD(&dev->client_list); | ||
| 352 | while (dev->clocks[i].name && i < NVHOST_MODULE_MAX_CLOCKS) { | ||
| 353 | char devname[MAX_DEVID_LENGTH]; | ||
| 354 | long rate = dev->clocks[i].default_rate; | ||
| 355 | struct clk *c; | ||
| 356 | |||
| 357 | snprintf(devname, MAX_DEVID_LENGTH, "tegra_%s", dev->name); | ||
| 358 | c = clk_get_sys(devname, dev->clocks[i].name); | ||
| 359 | BUG_ON(IS_ERR_OR_NULL(c)); | ||
| 360 | |||
| 361 | rate = clk_round_rate(c, rate); | ||
| 362 | clk_enable(c); | ||
| 363 | clk_set_rate(c, rate); | ||
| 364 | clk_disable(c); | ||
| 365 | dev->clk[i] = c; | ||
| 366 | i++; | ||
| 367 | } | ||
| 368 | dev->num_clks = i; | ||
| 369 | |||
| 370 | mutex_init(&dev->lock); | ||
| 371 | init_waitqueue_head(&dev->idle_wq); | ||
| 372 | INIT_DELAYED_WORK(&dev->powerstate_down, powerstate_down_handler); | ||
| 373 | |||
| 374 | /* power gate units that we can power gate */ | ||
| 375 | if (dev->can_powergate) { | ||
| 376 | do_powergate_locked(dev->powergate_ids[0]); | ||
| 377 | do_powergate_locked(dev->powergate_ids[1]); | ||
| 378 | dev->powerstate = NVHOST_POWER_STATE_POWERGATED; | ||
| 379 | } else { | ||
| 380 | do_unpowergate_locked(dev->powergate_ids[0]); | ||
| 381 | do_unpowergate_locked(dev->powergate_ids[1]); | ||
| 382 | dev->powerstate = NVHOST_POWER_STATE_CLOCKGATED; | ||
| 383 | } | ||
| 384 | |||
| 385 | return 0; | ||
| 386 | } | ||
| 387 | |||
| 388 | static int is_module_idle(struct nvhost_device *dev) | ||
| 389 | { | ||
| 390 | int count; | ||
| 391 | mutex_lock(&dev->lock); | ||
| 392 | count = dev->refcount; | ||
| 393 | mutex_unlock(&dev->lock); | ||
| 394 | return (count == 0); | ||
| 395 | } | ||
| 396 | |||
| 397 | static void debug_not_idle(struct nvhost_master *host) | ||
| 398 | { | ||
| 399 | int i; | ||
| 400 | bool lock_released = true; | ||
| 401 | |||
| 402 | for (i = 0; i < host->nb_channels; i++) { | ||
| 403 | struct nvhost_device *dev = host->channels[i].dev; | ||
| 404 | mutex_lock(&dev->lock); | ||
| 405 | if (dev->name) | ||
| 406 | dev_warn(&host->dev->dev, | ||
| 407 | "tegra_grhost: %s: refcnt %d\n", dev->name, | ||
| 408 | dev->refcount); | ||
| 409 | mutex_unlock(&dev->lock); | ||
| 410 | } | ||
| 411 | |||
| 412 | for (i = 0; i < host->syncpt.nb_mlocks; i++) { | ||
| 413 | int c = atomic_read(&host->syncpt.lock_counts[i]); | ||
| 414 | if (c) { | ||
| 415 | dev_warn(&host->dev->dev, | ||
| 416 | "tegra_grhost: lock id %d: refcnt %d\n", | ||
| 417 | i, c); | ||
| 418 | lock_released = false; | ||
| 419 | } | ||
| 420 | } | ||
| 421 | if (lock_released) | ||
| 422 | dev_dbg(&host->dev->dev, "tegra_grhost: all locks released\n"); | ||
| 423 | } | ||
| 424 | |||
| 425 | int nvhost_module_suspend(struct nvhost_device *dev, bool system_suspend) | ||
| 426 | { | ||
| 427 | int ret; | ||
| 428 | struct nvhost_master *host = nvhost_get_host(dev); | ||
| 429 | |||
| 430 | if (system_suspend && !is_module_idle(dev)) | ||
| 431 | debug_not_idle(host); | ||
| 432 | |||
| 433 | ret = wait_event_timeout(dev->idle_wq, is_module_idle(dev), | ||
| 434 | ACM_SUSPEND_WAIT_FOR_IDLE_TIMEOUT); | ||
| 435 | if (ret == 0) { | ||
| 436 | dev_info(&dev->dev, "%s prevented suspend\n", | ||
| 437 | dev->name); | ||
| 438 | return -EBUSY; | ||
| 439 | } | ||
| 440 | |||
| 441 | if (system_suspend) | ||
| 442 | dev_dbg(&dev->dev, "tegra_grhost: entered idle\n"); | ||
| 443 | |||
| 444 | mutex_lock(&dev->lock); | ||
| 445 | cancel_delayed_work(&dev->powerstate_down); | ||
| 446 | to_state_powergated_locked(dev); | ||
| 447 | mutex_unlock(&dev->lock); | ||
| 448 | |||
| 449 | if (dev->suspend) | ||
| 450 | dev->suspend(dev); | ||
| 451 | |||
| 452 | return 0; | ||
| 453 | } | ||
| 454 | |||
| 455 | void nvhost_module_deinit(struct nvhost_device *dev) | ||
| 456 | { | ||
| 457 | int i; | ||
| 458 | |||
| 459 | if (dev->deinit) | ||
| 460 | dev->deinit(dev); | ||
| 461 | |||
| 462 | nvhost_module_suspend(dev, false); | ||
| 463 | for (i = 0; i < dev->num_clks; i++) | ||
| 464 | clk_put(dev->clk[i]); | ||
| 465 | dev->powerstate = NVHOST_POWER_STATE_DEINIT; | ||
| 466 | } | ||
| 467 | |||
diff --git a/drivers/video/tegra/host/nvhost_acm.h b/drivers/video/tegra/host/nvhost_acm.h new file mode 100644 index 00000000000..a12c0c3fa32 --- /dev/null +++ b/drivers/video/tegra/host/nvhost_acm.h | |||
| @@ -0,0 +1,60 @@ | |||
| 1 | /* | ||
| 2 | * drivers/video/tegra/host/nvhost_acm.h | ||
| 3 | * | ||
| 4 | * Tegra Graphics Host Automatic Clock Management | ||
| 5 | * | ||
| 6 | * Copyright (c) 2010-2012, NVIDIA Corporation. | ||
| 7 | * | ||
| 8 | * This program is free software; you can redistribute it and/or modify it | ||
| 9 | * under the terms and conditions of the GNU General Public License, | ||
| 10 | * version 2, as published by the Free Software Foundation. | ||
| 11 | * | ||
| 12 | * This program is distributed in the hope it will be useful, but WITHOUT | ||
| 13 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
| 14 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
| 15 | * more details. | ||
| 16 | * | ||
| 17 | * You should have received a copy of the GNU General Public License | ||
| 18 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | ||
| 19 | */ | ||
| 20 | |||
| 21 | #ifndef __NVHOST_ACM_H | ||
| 22 | #define __NVHOST_ACM_H | ||
| 23 | |||
| 24 | #include <linux/workqueue.h> | ||
| 25 | #include <linux/wait.h> | ||
| 26 | #include <linux/mutex.h> | ||
| 27 | #include <linux/clk.h> | ||
| 28 | #include <linux/nvhost.h> | ||
| 29 | |||
| 30 | /* Sets clocks and powergating state for a module */ | ||
| 31 | int nvhost_module_init(struct nvhost_device *ndev); | ||
| 32 | void nvhost_module_deinit(struct nvhost_device *dev); | ||
| 33 | int nvhost_module_suspend(struct nvhost_device *dev, bool system_suspend); | ||
| 34 | |||
| 35 | void nvhost_module_reset(struct nvhost_device *dev); | ||
| 36 | void nvhost_module_busy(struct nvhost_device *dev); | ||
| 37 | void nvhost_module_idle_mult(struct nvhost_device *dev, int refs); | ||
| 38 | int nvhost_module_add_client(struct nvhost_device *dev, | ||
| 39 | void *priv); | ||
| 40 | void nvhost_module_remove_client(struct nvhost_device *dev, | ||
| 41 | void *priv); | ||
| 42 | int nvhost_module_get_rate(struct nvhost_device *dev, | ||
| 43 | unsigned long *rate, | ||
| 44 | int index); | ||
| 45 | int nvhost_module_set_rate(struct nvhost_device *dev, void *priv, | ||
| 46 | unsigned long rate, int index); | ||
| 47 | |||
| 48 | |||
| 49 | static inline bool nvhost_module_powered(struct nvhost_device *dev) | ||
| 50 | { | ||
| 51 | return dev->powerstate == NVHOST_POWER_STATE_RUNNING; | ||
| 52 | } | ||
| 53 | |||
| 54 | static inline void nvhost_module_idle(struct nvhost_device *dev) | ||
| 55 | { | ||
| 56 | nvhost_module_idle_mult(dev, 1); | ||
| 57 | } | ||
| 58 | |||
| 59 | |||
| 60 | #endif | ||
diff --git a/drivers/video/tegra/host/nvhost_cdma.c b/drivers/video/tegra/host/nvhost_cdma.c new file mode 100644 index 00000000000..775d761e65c --- /dev/null +++ b/drivers/video/tegra/host/nvhost_cdma.c | |||
| @@ -0,0 +1,508 @@ | |||
| 1 | /* | ||
| 2 | * drivers/video/tegra/host/nvhost_cdma.c | ||
| 3 | * | ||
| 4 | * Tegra Graphics Host Command DMA | ||
| 5 | * | ||
| 6 | * Copyright (c) 2010-2012, NVIDIA Corporation. | ||
| 7 | * | ||
| 8 | * This program is free software; you can redistribute it and/or modify it | ||
| 9 | * under the terms and conditions of the GNU General Public License, | ||
| 10 | * version 2, as published by the Free Software Foundation. | ||
| 11 | * | ||
| 12 | * This program is distributed in the hope it will be useful, but WITHOUT | ||
| 13 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
| 14 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
| 15 | * more details. | ||
| 16 | * | ||
| 17 | * You should have received a copy of the GNU General Public License | ||
| 18 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | ||
| 19 | */ | ||
| 20 | |||
| 21 | #include "nvhost_cdma.h" | ||
| 22 | #include "dev.h" | ||
| 23 | #include <asm/cacheflush.h> | ||
| 24 | |||
| 25 | #include <linux/slab.h> | ||
| 26 | #include <linux/kfifo.h> | ||
| 27 | #include <trace/events/nvhost.h> | ||
| 28 | #include <linux/interrupt.h> | ||
| 29 | |||
| 30 | /* | ||
| 31 | * TODO: | ||
| 32 | * stats | ||
| 33 | * - for figuring out what to optimize further | ||
| 34 | * resizable push buffer | ||
| 35 | * - some channels hardly need any, some channels (3d) could use more | ||
| 36 | */ | ||
| 37 | |||
| 38 | /** | ||
| 39 | * Add an entry to the sync queue. | ||
| 40 | */ | ||
| 41 | static void add_to_sync_queue(struct nvhost_cdma *cdma, | ||
| 42 | struct nvhost_job *job, | ||
| 43 | u32 nr_slots, | ||
| 44 | u32 first_get) | ||
| 45 | { | ||
| 46 | BUG_ON(job->syncpt_id == NVSYNCPT_INVALID); | ||
| 47 | |||
| 48 | job->first_get = first_get; | ||
| 49 | job->num_slots = nr_slots; | ||
| 50 | nvhost_job_get(job); | ||
| 51 | list_add_tail(&job->list, &cdma->sync_queue); | ||
| 52 | } | ||
| 53 | |||
| 54 | /** | ||
| 55 | * Return the status of the cdma's sync queue or push buffer for the given event | ||
| 56 | * - sq empty: returns 1 for empty, 0 for not empty (as in "1 empty queue" :-) | ||
| 57 | * - pb space: returns the number of free slots in the channel's push buffer | ||
| 58 | * Must be called with the cdma lock held. | ||
| 59 | */ | ||
| 60 | static unsigned int cdma_status_locked(struct nvhost_cdma *cdma, | ||
| 61 | enum cdma_event event) | ||
| 62 | { | ||
| 63 | switch (event) { | ||
| 64 | case CDMA_EVENT_SYNC_QUEUE_EMPTY: | ||
| 65 | return list_empty(&cdma->sync_queue) ? 1 : 0; | ||
| 66 | case CDMA_EVENT_PUSH_BUFFER_SPACE: { | ||
| 67 | struct push_buffer *pb = &cdma->push_buffer; | ||
| 68 | BUG_ON(!cdma_pb_op(cdma).space); | ||
| 69 | return cdma_pb_op(cdma).space(pb); | ||
| 70 | } | ||
| 71 | default: | ||
| 72 | return 0; | ||
| 73 | } | ||
| 74 | } | ||
| 75 | |||
| 76 | /** | ||
| 77 | * Sleep (if necessary) until the requested event happens | ||
| 78 | * - CDMA_EVENT_SYNC_QUEUE_EMPTY : sync queue is completely empty. | ||
| 79 | * - Returns 1 | ||
| 80 | * - CDMA_EVENT_PUSH_BUFFER_SPACE : there is space in the push buffer | ||
| 81 | * - Return the amount of space (> 0) | ||
| 82 | * Must be called with the cdma lock held. | ||
| 83 | */ | ||
| 84 | unsigned int nvhost_cdma_wait_locked(struct nvhost_cdma *cdma, | ||
| 85 | enum cdma_event event) | ||
| 86 | { | ||
| 87 | for (;;) { | ||
| 88 | unsigned int space = cdma_status_locked(cdma, event); | ||
| 89 | if (space) | ||
| 90 | return space; | ||
| 91 | |||
| 92 | trace_nvhost_wait_cdma(cdma_to_channel(cdma)->dev->name, | ||
| 93 | event); | ||
| 94 | |||
| 95 | BUG_ON(cdma->event != CDMA_EVENT_NONE); | ||
| 96 | cdma->event = event; | ||
| 97 | |||
| 98 | mutex_unlock(&cdma->lock); | ||
| 99 | down(&cdma->sem); | ||
| 100 | mutex_lock(&cdma->lock); | ||
| 101 | } | ||
| 102 | return 0; | ||
| 103 | } | ||
| 104 | |||
| 105 | /** | ||
| 106 | * Start timer for a buffer submition that has completed yet. | ||
| 107 | * Must be called with the cdma lock held. | ||
| 108 | */ | ||
| 109 | static void cdma_start_timer_locked(struct nvhost_cdma *cdma, | ||
| 110 | struct nvhost_job *job) | ||
| 111 | { | ||
| 112 | BUG_ON(!job); | ||
| 113 | if (cdma->timeout.clientid) { | ||
| 114 | /* timer already started */ | ||
| 115 | return; | ||
| 116 | } | ||
| 117 | |||
| 118 | cdma->timeout.ctx = job->hwctx; | ||
| 119 | cdma->timeout.clientid = job->clientid; | ||
| 120 | cdma->timeout.syncpt_id = job->syncpt_id; | ||
| 121 | cdma->timeout.syncpt_val = job->syncpt_end; | ||
| 122 | cdma->timeout.start_ktime = ktime_get(); | ||
| 123 | |||
| 124 | schedule_delayed_work(&cdma->timeout.wq, | ||
| 125 | msecs_to_jiffies(job->timeout)); | ||
| 126 | } | ||
| 127 | |||
| 128 | /** | ||
| 129 | * Stop timer when a buffer submition completes. | ||
| 130 | * Must be called with the cdma lock held. | ||
| 131 | */ | ||
| 132 | static void stop_cdma_timer_locked(struct nvhost_cdma *cdma) | ||
| 133 | { | ||
| 134 | cancel_delayed_work(&cdma->timeout.wq); | ||
| 135 | cdma->timeout.ctx = NULL; | ||
| 136 | cdma->timeout.clientid = 0; | ||
| 137 | } | ||
| 138 | |||
| 139 | /** | ||
| 140 | * For all sync queue entries that have already finished according to the | ||
| 141 | * current sync point registers: | ||
| 142 | * - unpin & unref their mems | ||
| 143 | * - pop their push buffer slots | ||
| 144 | * - remove them from the sync queue | ||
| 145 | * This is normally called from the host code's worker thread, but can be | ||
| 146 | * called manually if necessary. | ||
| 147 | * Must be called with the cdma lock held. | ||
| 148 | */ | ||
| 149 | static void update_cdma_locked(struct nvhost_cdma *cdma) | ||
| 150 | { | ||
| 151 | bool signal = false; | ||
| 152 | struct nvhost_master *dev = cdma_to_dev(cdma); | ||
| 153 | struct nvhost_syncpt *sp = &dev->syncpt; | ||
| 154 | struct nvhost_job *job, *n; | ||
| 155 | |||
| 156 | BUG_ON(!cdma->running); | ||
| 157 | |||
| 158 | /* | ||
| 159 | * Walk the sync queue, reading the sync point registers as necessary, | ||
| 160 | * to consume as many sync queue entries as possible without blocking | ||
| 161 | */ | ||
| 162 | list_for_each_entry_safe(job, n, &cdma->sync_queue, list) { | ||
| 163 | BUG_ON(job->syncpt_id == NVSYNCPT_INVALID); | ||
| 164 | |||
| 165 | /* Check whether this syncpt has completed, and bail if not */ | ||
| 166 | if (!nvhost_syncpt_is_expired(sp, | ||
| 167 | job->syncpt_id, job->syncpt_end)) { | ||
| 168 | /* Start timer on next pending syncpt */ | ||
| 169 | if (job->timeout) | ||
| 170 | cdma_start_timer_locked(cdma, job); | ||
| 171 | break; | ||
| 172 | } | ||
| 173 | |||
| 174 | /* Cancel timeout, when a buffer completes */ | ||
| 175 | if (cdma->timeout.clientid) | ||
| 176 | stop_cdma_timer_locked(cdma); | ||
| 177 | |||
| 178 | /* Unpin the memory */ | ||
| 179 | nvhost_job_unpin(job); | ||
| 180 | |||
| 181 | /* Pop push buffer slots */ | ||
| 182 | if (job->num_slots) { | ||
| 183 | struct push_buffer *pb = &cdma->push_buffer; | ||
| 184 | BUG_ON(!cdma_pb_op(cdma).pop_from); | ||
| 185 | cdma_pb_op(cdma).pop_from(pb, job->num_slots); | ||
| 186 | if (cdma->event == CDMA_EVENT_PUSH_BUFFER_SPACE) | ||
| 187 | signal = true; | ||
| 188 | } | ||
| 189 | |||
| 190 | list_del(&job->list); | ||
| 191 | nvhost_job_put(job); | ||
| 192 | } | ||
| 193 | |||
| 194 | if (list_empty(&cdma->sync_queue) && | ||
| 195 | cdma->event == CDMA_EVENT_SYNC_QUEUE_EMPTY) | ||
| 196 | signal = true; | ||
| 197 | |||
| 198 | /* Wake up CdmaWait() if the requested event happened */ | ||
| 199 | if (signal) { | ||
| 200 | cdma->event = CDMA_EVENT_NONE; | ||
| 201 | up(&cdma->sem); | ||
| 202 | } | ||
| 203 | } | ||
| 204 | |||
| 205 | void nvhost_cdma_update_sync_queue(struct nvhost_cdma *cdma, | ||
| 206 | struct nvhost_syncpt *syncpt, struct device *dev) | ||
| 207 | { | ||
| 208 | u32 get_restart; | ||
| 209 | u32 syncpt_incrs; | ||
| 210 | bool exec_ctxsave; | ||
| 211 | struct nvhost_job *job = NULL; | ||
| 212 | u32 syncpt_val; | ||
| 213 | |||
| 214 | syncpt_val = nvhost_syncpt_update_min(syncpt, cdma->timeout.syncpt_id); | ||
| 215 | |||
| 216 | dev_dbg(dev, | ||
| 217 | "%s: starting cleanup (thresh %d)\n", | ||
| 218 | __func__, syncpt_val); | ||
| 219 | |||
| 220 | /* | ||
| 221 | * Move the sync_queue read pointer to the first entry that hasn't | ||
| 222 | * completed based on the current HW syncpt value. It's likely there | ||
| 223 | * won't be any (i.e. we're still at the head), but covers the case | ||
| 224 | * where a syncpt incr happens just prior/during the teardown. | ||
| 225 | */ | ||
| 226 | |||
| 227 | dev_dbg(dev, | ||
| 228 | "%s: skip completed buffers still in sync_queue\n", | ||
| 229 | __func__); | ||
| 230 | |||
| 231 | list_for_each_entry(job, &cdma->sync_queue, list) { | ||
| 232 | if (syncpt_val < job->syncpt_end) | ||
| 233 | break; | ||
| 234 | |||
| 235 | nvhost_job_dump(dev, job); | ||
| 236 | } | ||
| 237 | |||
| 238 | /* | ||
| 239 | * Walk the sync_queue, first incrementing with the CPU syncpts that | ||
| 240 | * are partially executed (the first buffer) or fully skipped while | ||
| 241 | * still in the current context (slots are also NOP-ed). | ||
| 242 | * | ||
| 243 | * At the point contexts are interleaved, syncpt increments must be | ||
| 244 | * done inline with the pushbuffer from a GATHER buffer to maintain | ||
| 245 | * the order (slots are modified to be a GATHER of syncpt incrs). | ||
| 246 | * | ||
| 247 | * Note: save in get_restart the location where the timed out buffer | ||
| 248 | * started in the PB, so we can start the refetch from there (with the | ||
| 249 | * modified NOP-ed PB slots). This lets things appear to have completed | ||
| 250 | * properly for this buffer and resources are freed. | ||
| 251 | */ | ||
| 252 | |||
| 253 | dev_dbg(dev, | ||
| 254 | "%s: perform CPU incr on pending same ctx buffers\n", | ||
| 255 | __func__); | ||
| 256 | |||
| 257 | get_restart = cdma->last_put; | ||
| 258 | if (!list_empty(&cdma->sync_queue)) | ||
| 259 | get_restart = job->first_get; | ||
| 260 | |||
| 261 | /* do CPU increments as long as this context continues */ | ||
| 262 | list_for_each_entry_from(job, &cdma->sync_queue, list) { | ||
| 263 | /* different context, gets us out of this loop */ | ||
| 264 | if (job->clientid != cdma->timeout.clientid) | ||
| 265 | break; | ||
| 266 | |||
| 267 | /* won't need a timeout when replayed */ | ||
| 268 | job->timeout = 0; | ||
| 269 | |||
| 270 | syncpt_incrs = job->syncpt_end - syncpt_val; | ||
| 271 | dev_dbg(dev, | ||
| 272 | "%s: CPU incr (%d)\n", __func__, syncpt_incrs); | ||
| 273 | |||
| 274 | nvhost_job_dump(dev, job); | ||
| 275 | |||
| 276 | /* safe to use CPU to incr syncpts */ | ||
| 277 | cdma_op(cdma).timeout_cpu_incr(cdma, | ||
| 278 | job->first_get, | ||
| 279 | syncpt_incrs, | ||
| 280 | job->syncpt_end, | ||
| 281 | job->num_slots); | ||
| 282 | |||
| 283 | syncpt_val += syncpt_incrs; | ||
| 284 | } | ||
| 285 | |||
| 286 | dev_dbg(dev, | ||
| 287 | "%s: GPU incr blocked interleaved ctx buffers\n", | ||
| 288 | __func__); | ||
| 289 | |||
| 290 | exec_ctxsave = false; | ||
| 291 | |||
| 292 | /* setup GPU increments */ | ||
| 293 | list_for_each_entry_from(job, &cdma->sync_queue, list) { | ||
| 294 | /* same context, increment in the pushbuffer */ | ||
| 295 | if (job->clientid == cdma->timeout.clientid) { | ||
| 296 | /* won't need a timeout when replayed */ | ||
| 297 | job->timeout = 0; | ||
| 298 | |||
| 299 | /* update buffer's syncpts in the pushbuffer */ | ||
| 300 | cdma_op(cdma).timeout_pb_incr(cdma, | ||
| 301 | job->first_get, | ||
| 302 | job->syncpt_incrs, | ||
| 303 | job->num_slots, | ||
| 304 | exec_ctxsave); | ||
| 305 | |||
| 306 | exec_ctxsave = false; | ||
| 307 | } else { | ||
| 308 | dev_dbg(dev, | ||
| 309 | "%s: switch to a different userctx\n", | ||
| 310 | __func__); | ||
| 311 | /* | ||
| 312 | * If previous context was the timed out context | ||
| 313 | * then clear its CTXSAVE in this slot. | ||
| 314 | */ | ||
| 315 | exec_ctxsave = true; | ||
| 316 | } | ||
| 317 | |||
| 318 | nvhost_job_dump(dev, job); | ||
| 319 | } | ||
| 320 | |||
| 321 | dev_dbg(dev, | ||
| 322 | "%s: finished sync_queue modification\n", __func__); | ||
| 323 | |||
| 324 | /* roll back DMAGET and start up channel again */ | ||
| 325 | cdma_op(cdma).timeout_teardown_end(cdma, get_restart); | ||
| 326 | |||
| 327 | if (cdma->timeout.ctx) | ||
| 328 | cdma->timeout.ctx->has_timedout = true; | ||
| 329 | } | ||
| 330 | |||
| 331 | /** | ||
| 332 | * Create a cdma | ||
| 333 | */ | ||
| 334 | int nvhost_cdma_init(struct nvhost_cdma *cdma) | ||
| 335 | { | ||
| 336 | int err; | ||
| 337 | struct push_buffer *pb = &cdma->push_buffer; | ||
| 338 | BUG_ON(!cdma_pb_op(cdma).init); | ||
| 339 | mutex_init(&cdma->lock); | ||
| 340 | sema_init(&cdma->sem, 0); | ||
| 341 | |||
| 342 | INIT_LIST_HEAD(&cdma->sync_queue); | ||
| 343 | |||
| 344 | cdma->event = CDMA_EVENT_NONE; | ||
| 345 | cdma->running = false; | ||
| 346 | cdma->torndown = false; | ||
| 347 | |||
| 348 | err = cdma_pb_op(cdma).init(pb); | ||
| 349 | if (err) | ||
| 350 | return err; | ||
| 351 | return 0; | ||
| 352 | } | ||
| 353 | |||
| 354 | /** | ||
| 355 | * Destroy a cdma | ||
| 356 | */ | ||
| 357 | void nvhost_cdma_deinit(struct nvhost_cdma *cdma) | ||
| 358 | { | ||
| 359 | struct push_buffer *pb = &cdma->push_buffer; | ||
| 360 | |||
| 361 | BUG_ON(!cdma_pb_op(cdma).destroy); | ||
| 362 | BUG_ON(cdma->running); | ||
| 363 | cdma_pb_op(cdma).destroy(pb); | ||
| 364 | cdma_op(cdma).timeout_destroy(cdma); | ||
| 365 | } | ||
| 366 | |||
| 367 | /** | ||
| 368 | * Begin a cdma submit | ||
| 369 | */ | ||
| 370 | int nvhost_cdma_begin(struct nvhost_cdma *cdma, struct nvhost_job *job) | ||
| 371 | { | ||
| 372 | mutex_lock(&cdma->lock); | ||
| 373 | |||
| 374 | if (job->timeout) { | ||
| 375 | /* init state on first submit with timeout value */ | ||
| 376 | if (!cdma->timeout.initialized) { | ||
| 377 | int err; | ||
| 378 | BUG_ON(!cdma_op(cdma).timeout_init); | ||
| 379 | err = cdma_op(cdma).timeout_init(cdma, | ||
| 380 | job->syncpt_id); | ||
| 381 | if (err) { | ||
| 382 | mutex_unlock(&cdma->lock); | ||
| 383 | return err; | ||
| 384 | } | ||
| 385 | } | ||
| 386 | } | ||
| 387 | if (!cdma->running) { | ||
| 388 | BUG_ON(!cdma_op(cdma).start); | ||
| 389 | cdma_op(cdma).start(cdma); | ||
| 390 | } | ||
| 391 | cdma->slots_free = 0; | ||
| 392 | cdma->slots_used = 0; | ||
| 393 | cdma->first_get = cdma_pb_op(cdma).putptr(&cdma->push_buffer); | ||
| 394 | return 0; | ||
| 395 | } | ||
| 396 | |||
| 397 | /** | ||
| 398 | * Push two words into a push buffer slot | ||
| 399 | * Blocks as necessary if the push buffer is full. | ||
| 400 | */ | ||
| 401 | void nvhost_cdma_push(struct nvhost_cdma *cdma, u32 op1, u32 op2) | ||
| 402 | { | ||
| 403 | nvhost_cdma_push_gather(cdma, NULL, NULL, op1, op2); | ||
| 404 | } | ||
| 405 | |||
| 406 | /** | ||
| 407 | * Push two words into a push buffer slot | ||
| 408 | * Blocks as necessary if the push buffer is full. | ||
| 409 | */ | ||
| 410 | void nvhost_cdma_push_gather(struct nvhost_cdma *cdma, | ||
| 411 | struct nvmap_client *client, | ||
| 412 | struct nvmap_handle *handle, u32 op1, u32 op2) | ||
| 413 | { | ||
| 414 | u32 slots_free = cdma->slots_free; | ||
| 415 | struct push_buffer *pb = &cdma->push_buffer; | ||
| 416 | BUG_ON(!cdma_pb_op(cdma).push_to); | ||
| 417 | BUG_ON(!cdma_op(cdma).kick); | ||
| 418 | if (slots_free == 0) { | ||
| 419 | cdma_op(cdma).kick(cdma); | ||
| 420 | slots_free = nvhost_cdma_wait_locked(cdma, | ||
| 421 | CDMA_EVENT_PUSH_BUFFER_SPACE); | ||
| 422 | } | ||
| 423 | cdma->slots_free = slots_free - 1; | ||
| 424 | cdma->slots_used++; | ||
| 425 | cdma_pb_op(cdma).push_to(pb, client, handle, op1, op2); | ||
| 426 | } | ||
| 427 | |||
| 428 | /** | ||
| 429 | * End a cdma submit | ||
| 430 | * Kick off DMA, add job to the sync queue, and a number of slots to be freed | ||
| 431 | * from the pushbuffer. The handles for a submit must all be pinned at the same | ||
| 432 | * time, but they can be unpinned in smaller chunks. | ||
| 433 | */ | ||
| 434 | void nvhost_cdma_end(struct nvhost_cdma *cdma, | ||
| 435 | struct nvhost_job *job) | ||
| 436 | { | ||
| 437 | bool was_idle = list_empty(&cdma->sync_queue); | ||
| 438 | |||
| 439 | BUG_ON(!cdma_op(cdma).kick); | ||
| 440 | cdma_op(cdma).kick(cdma); | ||
| 441 | |||
| 442 | BUG_ON(job->syncpt_id == NVSYNCPT_INVALID); | ||
| 443 | |||
| 444 | add_to_sync_queue(cdma, | ||
| 445 | job, | ||
| 446 | cdma->slots_used, | ||
| 447 | cdma->first_get); | ||
| 448 | |||
| 449 | /* start timer on idle -> active transitions */ | ||
| 450 | if (job->timeout && was_idle) | ||
| 451 | cdma_start_timer_locked(cdma, job); | ||
| 452 | |||
| 453 | mutex_unlock(&cdma->lock); | ||
| 454 | } | ||
| 455 | |||
| 456 | /** | ||
| 457 | * Update cdma state according to current sync point values | ||
| 458 | */ | ||
| 459 | void nvhost_cdma_update(struct nvhost_cdma *cdma) | ||
| 460 | { | ||
| 461 | mutex_lock(&cdma->lock); | ||
| 462 | update_cdma_locked(cdma); | ||
| 463 | mutex_unlock(&cdma->lock); | ||
| 464 | } | ||
| 465 | |||
| 466 | /** | ||
| 467 | * Wait for push buffer to be empty. | ||
| 468 | * @cdma pointer to channel cdma | ||
| 469 | * @timeout timeout in ms | ||
| 470 | * Returns -ETIME if timeout was reached, zero if push buffer is empty. | ||
| 471 | */ | ||
| 472 | int nvhost_cdma_flush(struct nvhost_cdma *cdma, int timeout) | ||
| 473 | { | ||
| 474 | unsigned int space, err = 0; | ||
| 475 | unsigned long end_jiffies = jiffies + msecs_to_jiffies(timeout); | ||
| 476 | |||
| 477 | /* | ||
| 478 | * Wait for at most timeout ms. Recalculate timeout at each iteration | ||
| 479 | * to better keep within given timeout. | ||
| 480 | */ | ||
| 481 | while(!err && time_before(jiffies, end_jiffies)) { | ||
| 482 | int timeout_jiffies = end_jiffies - jiffies; | ||
| 483 | |||
| 484 | mutex_lock(&cdma->lock); | ||
| 485 | space = cdma_status_locked(cdma, | ||
| 486 | CDMA_EVENT_SYNC_QUEUE_EMPTY); | ||
| 487 | if (space) { | ||
| 488 | mutex_unlock(&cdma->lock); | ||
| 489 | return 0; | ||
| 490 | } | ||
| 491 | |||
| 492 | /* | ||
| 493 | * Wait for sync queue to become empty. If there is already | ||
| 494 | * an event pending, we need to poll. | ||
| 495 | */ | ||
| 496 | if (cdma->event != CDMA_EVENT_NONE) { | ||
| 497 | mutex_unlock(&cdma->lock); | ||
| 498 | schedule(); | ||
| 499 | } else { | ||
| 500 | cdma->event = CDMA_EVENT_SYNC_QUEUE_EMPTY; | ||
| 501 | |||
| 502 | mutex_unlock(&cdma->lock); | ||
| 503 | err = down_timeout(&cdma->sem, | ||
| 504 | jiffies_to_msecs(timeout_jiffies)); | ||
| 505 | } | ||
| 506 | } | ||
| 507 | return err; | ||
| 508 | } | ||
diff --git a/drivers/video/tegra/host/nvhost_cdma.h b/drivers/video/tegra/host/nvhost_cdma.h new file mode 100644 index 00000000000..9cb9b827725 --- /dev/null +++ b/drivers/video/tegra/host/nvhost_cdma.h | |||
| @@ -0,0 +1,133 @@ | |||
| 1 | /* | ||
| 2 | * drivers/video/tegra/host/nvhost_cdma.h | ||
| 3 | * | ||
| 4 | * Tegra Graphics Host Command DMA | ||
| 5 | * | ||
| 6 | * Copyright (c) 2010-2012, NVIDIA Corporation. | ||
| 7 | * | ||
| 8 | * This program is free software; you can redistribute it and/or modify it | ||
| 9 | * under the terms and conditions of the GNU General Public License, | ||
| 10 | * version 2, as published by the Free Software Foundation. | ||
| 11 | * | ||
| 12 | * This program is distributed in the hope it will be useful, but WITHOUT | ||
| 13 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
| 14 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
| 15 | * more details. | ||
| 16 | * | ||
| 17 | * You should have received a copy of the GNU General Public License | ||
| 18 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | ||
| 19 | */ | ||
| 20 | |||
| 21 | #ifndef __NVHOST_CDMA_H | ||
| 22 | #define __NVHOST_CDMA_H | ||
| 23 | |||
| 24 | #include <linux/sched.h> | ||
| 25 | #include <linux/semaphore.h> | ||
| 26 | |||
| 27 | #include <linux/nvhost.h> | ||
| 28 | #include <mach/nvmap.h> | ||
| 29 | #include <linux/list.h> | ||
| 30 | |||
| 31 | #include "nvhost_acm.h" | ||
| 32 | |||
| 33 | struct nvhost_syncpt; | ||
| 34 | struct nvhost_userctx_timeout; | ||
| 35 | struct nvhost_job; | ||
| 36 | |||
| 37 | /* | ||
| 38 | * cdma | ||
| 39 | * | ||
| 40 | * This is in charge of a host command DMA channel. | ||
| 41 | * Sends ops to a push buffer, and takes responsibility for unpinning | ||
| 42 | * (& possibly freeing) of memory after those ops have completed. | ||
| 43 | * Producer: | ||
| 44 | * begin | ||
| 45 | * push - send ops to the push buffer | ||
| 46 | * end - start command DMA and enqueue handles to be unpinned | ||
| 47 | * Consumer: | ||
| 48 | * update - call to update sync queue and push buffer, unpin memory | ||
| 49 | */ | ||
| 50 | |||
| 51 | struct nvmap_client_handle { | ||
| 52 | struct nvmap_client *client; | ||
| 53 | struct nvmap_handle *handle; | ||
| 54 | }; | ||
| 55 | |||
| 56 | struct push_buffer { | ||
| 57 | struct nvmap_handle_ref *mem; /* handle to pushbuffer memory */ | ||
| 58 | u32 *mapped; /* mapped pushbuffer memory */ | ||
| 59 | u32 phys; /* physical address of pushbuffer */ | ||
| 60 | u32 fence; /* index we've written */ | ||
| 61 | u32 cur; /* index to write to */ | ||
| 62 | struct nvmap_client_handle *nvmap; | ||
| 63 | /* nvmap handle for each opcode pair */ | ||
| 64 | }; | ||
| 65 | |||
| 66 | struct syncpt_buffer { | ||
| 67 | struct nvmap_handle_ref *mem; /* handle to pushbuffer memory */ | ||
| 68 | u32 *mapped; /* mapped gather buffer (at channel offset */ | ||
| 69 | u32 phys; /* physical address (at channel offset) */ | ||
| 70 | u32 incr_per_buffer; /* max # of incrs per GATHER */ | ||
| 71 | u32 words_per_incr; /* # of DWORDS in buffer to incr a syncpt */ | ||
| 72 | }; | ||
| 73 | |||
| 74 | struct buffer_timeout { | ||
| 75 | struct delayed_work wq; /* work queue */ | ||
| 76 | bool initialized; /* timer one-time setup flag */ | ||
| 77 | u32 syncpt_id; /* buffer completion syncpt id */ | ||
| 78 | u32 syncpt_val; /* syncpt value when completed */ | ||
| 79 | ktime_t start_ktime; /* starting time */ | ||
| 80 | /* context timeout information */ | ||
| 81 | struct nvhost_hwctx *ctx; | ||
| 82 | int clientid; | ||
| 83 | }; | ||
| 84 | |||
| 85 | enum cdma_event { | ||
| 86 | CDMA_EVENT_NONE, /* not waiting for any event */ | ||
| 87 | CDMA_EVENT_SYNC_QUEUE_EMPTY, /* wait for empty sync queue */ | ||
| 88 | CDMA_EVENT_PUSH_BUFFER_SPACE /* wait for space in push buffer */ | ||
| 89 | }; | ||
| 90 | |||
| 91 | struct nvhost_cdma { | ||
| 92 | struct mutex lock; /* controls access to shared state */ | ||
| 93 | struct semaphore sem; /* signalled when event occurs */ | ||
| 94 | enum cdma_event event; /* event that sem is waiting for */ | ||
| 95 | unsigned int slots_used; /* pb slots used in current submit */ | ||
| 96 | unsigned int slots_free; /* pb slots free in current submit */ | ||
| 97 | unsigned int first_get; /* DMAGET value, where submit begins */ | ||
| 98 | unsigned int last_put; /* last value written to DMAPUT */ | ||
| 99 | struct push_buffer push_buffer; /* channel's push buffer */ | ||
| 100 | struct syncpt_buffer syncpt_buffer; /* syncpt incr buffer */ | ||
| 101 | struct list_head sync_queue; /* job queue */ | ||
| 102 | struct buffer_timeout timeout; /* channel's timeout state/wq */ | ||
| 103 | bool running; | ||
| 104 | bool torndown; | ||
| 105 | }; | ||
| 106 | |||
| 107 | #define cdma_to_channel(cdma) container_of(cdma, struct nvhost_channel, cdma) | ||
| 108 | #define cdma_to_dev(cdma) nvhost_get_host(cdma_to_channel(cdma)->dev) | ||
| 109 | #define cdma_op(cdma) (cdma_to_dev(cdma)->op.cdma) | ||
| 110 | #define cdma_to_nvmap(cdma) ((cdma_to_dev(cdma))->nvmap) | ||
| 111 | #define pb_to_cdma(pb) container_of(pb, struct nvhost_cdma, push_buffer) | ||
| 112 | #define cdma_pb_op(cdma) (cdma_to_dev(cdma)->op.push_buffer) | ||
| 113 | |||
| 114 | int nvhost_cdma_init(struct nvhost_cdma *cdma); | ||
| 115 | void nvhost_cdma_deinit(struct nvhost_cdma *cdma); | ||
| 116 | void nvhost_cdma_stop(struct nvhost_cdma *cdma); | ||
| 117 | int nvhost_cdma_begin(struct nvhost_cdma *cdma, struct nvhost_job *job); | ||
| 118 | void nvhost_cdma_push(struct nvhost_cdma *cdma, u32 op1, u32 op2); | ||
| 119 | #define NVHOST_CDMA_PUSH_GATHER_CTXSAVE 0xffffffff | ||
| 120 | void nvhost_cdma_push_gather(struct nvhost_cdma *cdma, | ||
| 121 | struct nvmap_client *client, | ||
| 122 | struct nvmap_handle *handle, u32 op1, u32 op2); | ||
| 123 | void nvhost_cdma_end(struct nvhost_cdma *cdma, | ||
| 124 | struct nvhost_job *job); | ||
| 125 | void nvhost_cdma_update(struct nvhost_cdma *cdma); | ||
| 126 | int nvhost_cdma_flush(struct nvhost_cdma *cdma, int timeout); | ||
| 127 | void nvhost_cdma_peek(struct nvhost_cdma *cdma, | ||
| 128 | u32 dmaget, int slot, u32 *out); | ||
| 129 | unsigned int nvhost_cdma_wait_locked(struct nvhost_cdma *cdma, | ||
| 130 | enum cdma_event event); | ||
| 131 | void nvhost_cdma_update_sync_queue(struct nvhost_cdma *cdma, | ||
| 132 | struct nvhost_syncpt *syncpt, struct device *dev); | ||
| 133 | #endif | ||
diff --git a/drivers/video/tegra/host/nvhost_channel.c b/drivers/video/tegra/host/nvhost_channel.c new file mode 100644 index 00000000000..a7c03308134 --- /dev/null +++ b/drivers/video/tegra/host/nvhost_channel.c | |||
| @@ -0,0 +1,158 @@ | |||
| 1 | /* | ||
| 2 | * drivers/video/tegra/host/nvhost_channel.c | ||
| 3 | * | ||
| 4 | * Tegra Graphics Host Channel | ||
| 5 | * | ||
| 6 | * Copyright (c) 2010-2012, NVIDIA Corporation. | ||
| 7 | * | ||
| 8 | * This program is free software; you can redistribute it and/or modify it | ||
| 9 | * under the terms and conditions of the GNU General Public License, | ||
| 10 | * version 2, as published by the Free Software Foundation. | ||
| 11 | * | ||
| 12 | * This program is distributed in the hope it will be useful, but WITHOUT | ||
| 13 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
| 14 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
| 15 | * more details. | ||
| 16 | * | ||
| 17 | * You should have received a copy of the GNU General Public License | ||
| 18 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | ||
| 19 | */ | ||
| 20 | |||
| 21 | #include "nvhost_channel.h" | ||
| 22 | #include "dev.h" | ||
| 23 | #include "nvhost_job.h" | ||
| 24 | #include <trace/events/nvhost.h> | ||
| 25 | #include <linux/nvhost_ioctl.h> | ||
| 26 | #include <linux/slab.h> | ||
| 27 | |||
| 28 | #include <linux/platform_device.h> | ||
| 29 | |||
| 30 | #define NVHOST_CHANNEL_LOW_PRIO_MAX_WAIT 50 | ||
| 31 | |||
| 32 | int nvhost_channel_init(struct nvhost_channel *ch, | ||
| 33 | struct nvhost_master *dev, int index) | ||
| 34 | { | ||
| 35 | int err; | ||
| 36 | struct nvhost_device *ndev; | ||
| 37 | struct resource *r = NULL; | ||
| 38 | void __iomem *regs = NULL; | ||
| 39 | struct resource *reg_mem = NULL; | ||
| 40 | |||
| 41 | /* Link nvhost_device to nvhost_channel */ | ||
| 42 | err = host_channel_op(dev).init(ch, dev, index); | ||
| 43 | if (err < 0) { | ||
| 44 | dev_err(&dev->dev->dev, "failed to init channel %d\n", | ||
| 45 | index); | ||
| 46 | return err; | ||
| 47 | } | ||
| 48 | ndev = ch->dev; | ||
| 49 | ndev->channel = ch; | ||
| 50 | |||
| 51 | /* Map IO memory related to nvhost_device */ | ||
| 52 | if (ndev->moduleid != NVHOST_MODULE_NONE) { | ||
| 53 | /* First one is host1x - skip that */ | ||
| 54 | r = nvhost_get_resource(dev->dev, | ||
| 55 | IORESOURCE_MEM, ndev->moduleid + 1); | ||
| 56 | if (!r) | ||
| 57 | goto fail; | ||
| 58 | |||
| 59 | reg_mem = request_mem_region(r->start, | ||
| 60 | resource_size(r), ndev->name); | ||
| 61 | if (!reg_mem) | ||
| 62 | goto fail; | ||
| 63 | |||
| 64 | regs = ioremap(r->start, resource_size(r)); | ||
| 65 | if (!regs) | ||
| 66 | goto fail; | ||
| 67 | |||
| 68 | ndev->reg_mem = reg_mem; | ||
| 69 | ndev->aperture = regs; | ||
| 70 | } | ||
| 71 | return 0; | ||
| 72 | |||
| 73 | fail: | ||
| 74 | if (reg_mem) | ||
| 75 | release_mem_region(r->start, resource_size(r)); | ||
| 76 | if (regs) | ||
| 77 | iounmap(regs); | ||
| 78 | dev_err(&ndev->dev, "failed to get register memory\n"); | ||
| 79 | return -ENXIO; | ||
| 80 | |||
| 81 | } | ||
| 82 | |||
| 83 | int nvhost_channel_submit(struct nvhost_job *job) | ||
| 84 | { | ||
| 85 | /* Low priority submits wait until sync queue is empty. Ignores result | ||
| 86 | * from nvhost_cdma_flush, as we submit either when push buffer is | ||
| 87 | * empty or when we reach the timeout. */ | ||
| 88 | if (job->priority < NVHOST_PRIORITY_MEDIUM) | ||
| 89 | (void)nvhost_cdma_flush(&job->ch->cdma, | ||
| 90 | NVHOST_CHANNEL_LOW_PRIO_MAX_WAIT); | ||
| 91 | |||
| 92 | return channel_op(job->ch).submit(job); | ||
| 93 | } | ||
| 94 | |||
| 95 | struct nvhost_channel *nvhost_getchannel(struct nvhost_channel *ch) | ||
| 96 | { | ||
| 97 | int err = 0; | ||
| 98 | mutex_lock(&ch->reflock); | ||
| 99 | if (ch->refcount == 0) { | ||
| 100 | if (ch->dev->init) | ||
| 101 | ch->dev->init(ch->dev); | ||
| 102 | err = nvhost_cdma_init(&ch->cdma); | ||
| 103 | } else if (ch->dev->exclusive) { | ||
| 104 | err = -EBUSY; | ||
| 105 | } | ||
| 106 | if (!err) | ||
| 107 | ch->refcount++; | ||
| 108 | |||
| 109 | mutex_unlock(&ch->reflock); | ||
| 110 | |||
| 111 | /* Keep alive modules that needs to be when a channel is open */ | ||
| 112 | if (!err && ch->dev->keepalive) | ||
| 113 | nvhost_module_busy(ch->dev); | ||
| 114 | |||
| 115 | return err ? NULL : ch; | ||
| 116 | } | ||
| 117 | |||
| 118 | void nvhost_putchannel(struct nvhost_channel *ch, struct nvhost_hwctx *ctx) | ||
| 119 | { | ||
| 120 | BUG_ON(!channel_cdma_op(ch).stop); | ||
| 121 | |||
| 122 | if (ctx) { | ||
| 123 | mutex_lock(&ch->submitlock); | ||
| 124 | if (ch->cur_ctx == ctx) | ||
| 125 | ch->cur_ctx = NULL; | ||
| 126 | mutex_unlock(&ch->submitlock); | ||
| 127 | } | ||
| 128 | |||
| 129 | /* Allow keep-alive'd module to be turned off */ | ||
| 130 | if (ch->dev->keepalive) | ||
| 131 | nvhost_module_idle(ch->dev); | ||
| 132 | |||
| 133 | mutex_lock(&ch->reflock); | ||
| 134 | if (ch->refcount == 1) { | ||
| 135 | channel_cdma_op(ch).stop(&ch->cdma); | ||
| 136 | nvhost_cdma_deinit(&ch->cdma); | ||
| 137 | nvhost_module_suspend(ch->dev, false); | ||
| 138 | } | ||
| 139 | ch->refcount--; | ||
| 140 | mutex_unlock(&ch->reflock); | ||
| 141 | } | ||
| 142 | |||
| 143 | int nvhost_channel_suspend(struct nvhost_channel *ch) | ||
| 144 | { | ||
| 145 | int ret = 0; | ||
| 146 | |||
| 147 | mutex_lock(&ch->reflock); | ||
| 148 | BUG_ON(!channel_cdma_op(ch).stop); | ||
| 149 | |||
| 150 | if (ch->refcount) { | ||
| 151 | ret = nvhost_module_suspend(ch->dev, false); | ||
| 152 | if (!ret) | ||
| 153 | channel_cdma_op(ch).stop(&ch->cdma); | ||
| 154 | } | ||
| 155 | mutex_unlock(&ch->reflock); | ||
| 156 | |||
| 157 | return ret; | ||
| 158 | } | ||
diff --git a/drivers/video/tegra/host/nvhost_channel.h b/drivers/video/tegra/host/nvhost_channel.h new file mode 100644 index 00000000000..7b946c8ee85 --- /dev/null +++ b/drivers/video/tegra/host/nvhost_channel.h | |||
| @@ -0,0 +1,86 @@ | |||
| 1 | /* | ||
| 2 | * drivers/video/tegra/host/nvhost_channel.h | ||
| 3 | * | ||
| 4 | * Tegra Graphics Host Channel | ||
| 5 | * | ||
| 6 | * Copyright (c) 2010-2012, NVIDIA Corporation. | ||
| 7 | * | ||
| 8 | * This program is free software; you can redistribute it and/or modify it | ||
| 9 | * under the terms and conditions of the GNU General Public License, | ||
| 10 | * version 2, as published by the Free Software Foundation. | ||
| 11 | * | ||
| 12 | * This program is distributed in the hope it will be useful, but WITHOUT | ||
| 13 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
| 14 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
| 15 | * more details. | ||
| 16 | * | ||
| 17 | * You should have received a copy of the GNU General Public License | ||
| 18 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | ||
| 19 | */ | ||
| 20 | |||
| 21 | #ifndef __NVHOST_CHANNEL_H | ||
| 22 | #define __NVHOST_CHANNEL_H | ||
| 23 | |||
| 24 | #include "nvhost_cdma.h" | ||
| 25 | #include "nvhost_acm.h" | ||
| 26 | #include "nvhost_hwctx.h" | ||
| 27 | #include "nvhost_job.h" | ||
| 28 | |||
| 29 | #include <linux/cdev.h> | ||
| 30 | #include <linux/io.h> | ||
| 31 | |||
| 32 | #define NVHOST_MAX_WAIT_CHECKS 256 | ||
| 33 | #define NVHOST_MAX_GATHERS 512 | ||
| 34 | #define NVHOST_MAX_HANDLES 1280 | ||
| 35 | #define NVHOST_MAX_POWERGATE_IDS 2 | ||
| 36 | |||
| 37 | struct nvhost_master; | ||
| 38 | struct nvhost_waitchk; | ||
| 39 | struct nvhost_device; | ||
| 40 | |||
| 41 | struct nvhost_channel_gather { | ||
| 42 | u32 words; | ||
| 43 | phys_addr_t mem; | ||
| 44 | u32 mem_id; | ||
| 45 | int offset; | ||
| 46 | }; | ||
| 47 | |||
| 48 | struct nvhost_channel { | ||
| 49 | int refcount; | ||
| 50 | int chid; | ||
| 51 | u32 syncpt_id; | ||
| 52 | struct mutex reflock; | ||
| 53 | struct mutex submitlock; | ||
| 54 | void __iomem *aperture; | ||
| 55 | struct nvhost_hwctx *cur_ctx; | ||
| 56 | struct device *node; | ||
| 57 | struct nvhost_device *dev; | ||
| 58 | struct cdev cdev; | ||
| 59 | struct nvhost_hwctx_handler *ctxhandler; | ||
| 60 | struct nvhost_cdma cdma; | ||
| 61 | }; | ||
| 62 | |||
| 63 | int nvhost_channel_init( | ||
| 64 | struct nvhost_channel *ch, | ||
| 65 | struct nvhost_master *dev, int index); | ||
| 66 | |||
| 67 | int nvhost_channel_submit(struct nvhost_job *job); | ||
| 68 | |||
| 69 | struct nvhost_channel *nvhost_getchannel(struct nvhost_channel *ch); | ||
| 70 | void nvhost_putchannel(struct nvhost_channel *ch, struct nvhost_hwctx *ctx); | ||
| 71 | int nvhost_channel_suspend(struct nvhost_channel *ch); | ||
| 72 | |||
| 73 | #define channel_cdma_op(ch) (nvhost_get_host(ch->dev)->op.cdma) | ||
| 74 | #define channel_op(ch) (nvhost_get_host(ch->dev)->op.channel) | ||
| 75 | #define host_channel_op(host) (host->op.channel) | ||
| 76 | |||
| 77 | int nvhost_channel_drain_read_fifo(void __iomem *chan_regs, | ||
| 78 | u32 *ptr, unsigned int count, unsigned int *pending); | ||
| 79 | |||
| 80 | int nvhost_channel_read_3d_reg( | ||
| 81 | struct nvhost_channel *channel, | ||
| 82 | struct nvhost_hwctx *hwctx, | ||
| 83 | u32 offset, | ||
| 84 | u32 *value); | ||
| 85 | |||
| 86 | #endif | ||
diff --git a/drivers/video/tegra/host/nvhost_hwctx.h b/drivers/video/tegra/host/nvhost_hwctx.h new file mode 100644 index 00000000000..02a3976f01c --- /dev/null +++ b/drivers/video/tegra/host/nvhost_hwctx.h | |||
| @@ -0,0 +1,67 @@ | |||
| 1 | /* | ||
| 2 | * drivers/video/tegra/host/nvhost_hwctx.h | ||
| 3 | * | ||
| 4 | * Tegra Graphics Host Hardware Context Interface | ||
| 5 | * | ||
| 6 | * Copyright (c) 2010-2012, NVIDIA Corporation. | ||
| 7 | * | ||
| 8 | * This program is free software; you can redistribute it and/or modify it | ||
| 9 | * under the terms and conditions of the GNU General Public License, | ||
| 10 | * version 2, as published by the Free Software Foundation. | ||
| 11 | * | ||
| 12 | * This program is distributed in the hope it will be useful, but WITHOUT | ||
| 13 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
| 14 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
| 15 | * more details. | ||
| 16 | * | ||
| 17 | * You should have received a copy of the GNU General Public License | ||
| 18 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | ||
| 19 | */ | ||
| 20 | |||
| 21 | #ifndef __NVHOST_HWCTX_H | ||
| 22 | #define __NVHOST_HWCTX_H | ||
| 23 | |||
| 24 | #include <linux/string.h> | ||
| 25 | #include <linux/kref.h> | ||
| 26 | |||
| 27 | #include <linux/nvhost.h> | ||
| 28 | #include <mach/nvmap.h> | ||
| 29 | |||
| 30 | struct nvhost_channel; | ||
| 31 | struct nvhost_cdma; | ||
| 32 | |||
| 33 | struct nvhost_hwctx { | ||
| 34 | struct kref ref; | ||
| 35 | struct nvhost_hwctx_handler *h; | ||
| 36 | struct nvhost_channel *channel; | ||
| 37 | bool valid; | ||
| 38 | bool has_timedout; | ||
| 39 | }; | ||
| 40 | |||
| 41 | struct nvhost_hwctx_handler { | ||
| 42 | struct nvhost_hwctx * (*alloc) (struct nvhost_hwctx_handler *h, | ||
| 43 | struct nvhost_channel *ch); | ||
| 44 | void (*get) (struct nvhost_hwctx *ctx); | ||
| 45 | void (*put) (struct nvhost_hwctx *ctx); | ||
| 46 | void (*save_push) (struct nvhost_hwctx *ctx, | ||
| 47 | struct nvhost_cdma *cdma); | ||
| 48 | void (*save_service) (struct nvhost_hwctx *ctx); | ||
| 49 | void *priv; | ||
| 50 | }; | ||
| 51 | |||
| 52 | |||
| 53 | struct hwctx_reginfo { | ||
| 54 | unsigned int offset:12; | ||
| 55 | unsigned int count:16; | ||
| 56 | unsigned int type:2; | ||
| 57 | }; | ||
| 58 | |||
| 59 | enum { | ||
| 60 | HWCTX_REGINFO_DIRECT = 0, | ||
| 61 | HWCTX_REGINFO_INDIRECT, | ||
| 62 | HWCTX_REGINFO_INDIRECT_4X | ||
| 63 | }; | ||
| 64 | |||
| 65 | #define HWCTX_REGINFO(offset, count, type) {offset, count, HWCTX_REGINFO_##type} | ||
| 66 | |||
| 67 | #endif | ||
diff --git a/drivers/video/tegra/host/nvhost_intr.c b/drivers/video/tegra/host/nvhost_intr.c new file mode 100644 index 00000000000..7c4bdc7bafb --- /dev/null +++ b/drivers/video/tegra/host/nvhost_intr.c | |||
| @@ -0,0 +1,428 @@ | |||
| 1 | /* | ||
| 2 | * drivers/video/tegra/host/nvhost_intr.c | ||
| 3 | * | ||
| 4 | * Tegra Graphics Host Interrupt Management | ||
| 5 | * | ||
| 6 | * Copyright (c) 2010-2012, NVIDIA Corporation. | ||
| 7 | * | ||
| 8 | * This program is free software; you can redistribute it and/or modify it | ||
| 9 | * under the terms and conditions of the GNU General Public License, | ||
| 10 | * version 2, as published by the Free Software Foundation. | ||
| 11 | * | ||
| 12 | * This program is distributed in the hope it will be useful, but WITHOUT | ||
| 13 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
| 14 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
| 15 | * more details. | ||
| 16 | * | ||
| 17 | * You should have received a copy of the GNU General Public License | ||
| 18 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | ||
| 19 | */ | ||
| 20 | |||
| 21 | #include "nvhost_intr.h" | ||
| 22 | #include "dev.h" | ||
| 23 | #include <linux/interrupt.h> | ||
| 24 | #include <linux/slab.h> | ||
| 25 | #include <linux/irq.h> | ||
| 26 | #include <trace/events/nvhost.h> | ||
| 27 | |||
| 28 | |||
| 29 | |||
| 30 | |||
| 31 | |||
| 32 | /*** Wait list management ***/ | ||
| 33 | |||
| 34 | struct nvhost_waitlist { | ||
| 35 | struct list_head list; | ||
| 36 | struct kref refcount; | ||
| 37 | u32 thresh; | ||
| 38 | enum nvhost_intr_action action; | ||
| 39 | atomic_t state; | ||
| 40 | void *data; | ||
| 41 | int count; | ||
| 42 | }; | ||
| 43 | |||
| 44 | enum waitlist_state { | ||
| 45 | WLS_PENDING, | ||
| 46 | WLS_REMOVED, | ||
| 47 | WLS_CANCELLED, | ||
| 48 | WLS_HANDLED | ||
| 49 | }; | ||
| 50 | |||
| 51 | static void waiter_release(struct kref *kref) | ||
| 52 | { | ||
| 53 | kfree(container_of(kref, struct nvhost_waitlist, refcount)); | ||
| 54 | } | ||
| 55 | |||
| 56 | /** | ||
| 57 | * add a waiter to a waiter queue, sorted by threshold | ||
| 58 | * returns true if it was added at the head of the queue | ||
| 59 | */ | ||
| 60 | static bool add_waiter_to_queue(struct nvhost_waitlist *waiter, | ||
| 61 | struct list_head *queue) | ||
| 62 | { | ||
| 63 | struct nvhost_waitlist *pos; | ||
| 64 | u32 thresh = waiter->thresh; | ||
| 65 | |||
| 66 | list_for_each_entry_reverse(pos, queue, list) | ||
| 67 | if ((s32)(pos->thresh - thresh) <= 0) { | ||
| 68 | list_add(&waiter->list, &pos->list); | ||
| 69 | return false; | ||
| 70 | } | ||
| 71 | |||
| 72 | list_add(&waiter->list, queue); | ||
| 73 | return true; | ||
| 74 | } | ||
| 75 | |||
| 76 | /** | ||
| 77 | * run through a waiter queue for a single sync point ID | ||
| 78 | * and gather all completed waiters into lists by actions | ||
| 79 | */ | ||
| 80 | static void remove_completed_waiters(struct list_head *head, u32 sync, | ||
| 81 | struct list_head completed[NVHOST_INTR_ACTION_COUNT]) | ||
| 82 | { | ||
| 83 | struct list_head *dest; | ||
| 84 | struct nvhost_waitlist *waiter, *next, *prev; | ||
| 85 | |||
| 86 | list_for_each_entry_safe(waiter, next, head, list) { | ||
| 87 | if ((s32)(waiter->thresh - sync) > 0) | ||
| 88 | break; | ||
| 89 | |||
| 90 | dest = completed + waiter->action; | ||
| 91 | |||
| 92 | /* consolidate submit cleanups */ | ||
| 93 | if (waiter->action == NVHOST_INTR_ACTION_SUBMIT_COMPLETE | ||
| 94 | && !list_empty(dest)) { | ||
| 95 | prev = list_entry(dest->prev, | ||
| 96 | struct nvhost_waitlist, list); | ||
| 97 | if (prev->data == waiter->data) { | ||
| 98 | prev->count++; | ||
| 99 | dest = NULL; | ||
| 100 | } | ||
| 101 | } | ||
| 102 | |||
| 103 | /* PENDING->REMOVED or CANCELLED->HANDLED */ | ||
| 104 | if (atomic_inc_return(&waiter->state) == WLS_HANDLED || !dest) { | ||
| 105 | list_del(&waiter->list); | ||
| 106 | kref_put(&waiter->refcount, waiter_release); | ||
| 107 | } else { | ||
| 108 | list_move_tail(&waiter->list, dest); | ||
| 109 | } | ||
| 110 | } | ||
| 111 | } | ||
| 112 | |||
| 113 | void reset_threshold_interrupt(struct nvhost_intr *intr, | ||
| 114 | struct list_head *head, | ||
| 115 | unsigned int id) | ||
| 116 | { | ||
| 117 | u32 thresh = list_first_entry(head, | ||
| 118 | struct nvhost_waitlist, list)->thresh; | ||
| 119 | BUG_ON(!(intr_op(intr).set_syncpt_threshold && | ||
| 120 | intr_op(intr).enable_syncpt_intr)); | ||
| 121 | |||
| 122 | intr_op(intr).set_syncpt_threshold(intr, id, thresh); | ||
| 123 | intr_op(intr).enable_syncpt_intr(intr, id); | ||
| 124 | } | ||
| 125 | |||
| 126 | |||
| 127 | static void action_submit_complete(struct nvhost_waitlist *waiter) | ||
| 128 | { | ||
| 129 | struct nvhost_channel *channel = waiter->data; | ||
| 130 | int nr_completed = waiter->count; | ||
| 131 | |||
| 132 | /* Add nr_completed to trace */ | ||
| 133 | trace_nvhost_channel_submit_complete(channel->dev->name, | ||
| 134 | nr_completed, waiter->thresh); | ||
| 135 | |||
| 136 | nvhost_cdma_update(&channel->cdma); | ||
| 137 | nvhost_module_idle_mult(channel->dev, nr_completed); | ||
| 138 | } | ||
| 139 | |||
| 140 | static void action_ctxsave(struct nvhost_waitlist *waiter) | ||
| 141 | { | ||
| 142 | struct nvhost_hwctx *hwctx = waiter->data; | ||
| 143 | struct nvhost_channel *channel = hwctx->channel; | ||
| 144 | |||
| 145 | if (channel->ctxhandler->save_service) | ||
| 146 | channel->ctxhandler->save_service(hwctx); | ||
| 147 | } | ||
| 148 | |||
| 149 | static void action_wakeup(struct nvhost_waitlist *waiter) | ||
| 150 | { | ||
| 151 | wait_queue_head_t *wq = waiter->data; | ||
| 152 | |||
| 153 | wake_up(wq); | ||
| 154 | } | ||
| 155 | |||
| 156 | static void action_wakeup_interruptible(struct nvhost_waitlist *waiter) | ||
| 157 | { | ||
| 158 | wait_queue_head_t *wq = waiter->data; | ||
| 159 | |||
| 160 | wake_up_interruptible(wq); | ||
| 161 | } | ||
| 162 | |||
| 163 | typedef void (*action_handler)(struct nvhost_waitlist *waiter); | ||
| 164 | |||
| 165 | static action_handler action_handlers[NVHOST_INTR_ACTION_COUNT] = { | ||
| 166 | action_submit_complete, | ||
| 167 | action_ctxsave, | ||
| 168 | action_wakeup, | ||
| 169 | action_wakeup_interruptible, | ||
| 170 | }; | ||
| 171 | |||
| 172 | static void run_handlers(struct list_head completed[NVHOST_INTR_ACTION_COUNT]) | ||
| 173 | { | ||
| 174 | struct list_head *head = completed; | ||
| 175 | int i; | ||
| 176 | |||
| 177 | for (i = 0; i < NVHOST_INTR_ACTION_COUNT; ++i, ++head) { | ||
| 178 | action_handler handler = action_handlers[i]; | ||
| 179 | struct nvhost_waitlist *waiter, *next; | ||
| 180 | |||
| 181 | list_for_each_entry_safe(waiter, next, head, list) { | ||
| 182 | list_del(&waiter->list); | ||
| 183 | handler(waiter); | ||
| 184 | WARN_ON(atomic_xchg(&waiter->state, WLS_HANDLED) != WLS_REMOVED); | ||
| 185 | kref_put(&waiter->refcount, waiter_release); | ||
| 186 | } | ||
| 187 | } | ||
| 188 | } | ||
| 189 | |||
| 190 | /** | ||
| 191 | * Remove & handle all waiters that have completed for the given syncpt | ||
| 192 | */ | ||
| 193 | static int process_wait_list(struct nvhost_intr *intr, | ||
| 194 | struct nvhost_intr_syncpt *syncpt, | ||
| 195 | u32 threshold) | ||
| 196 | { | ||
| 197 | struct list_head completed[NVHOST_INTR_ACTION_COUNT]; | ||
| 198 | unsigned int i; | ||
| 199 | int empty; | ||
| 200 | |||
| 201 | for (i = 0; i < NVHOST_INTR_ACTION_COUNT; ++i) | ||
| 202 | INIT_LIST_HEAD(completed + i); | ||
| 203 | |||
| 204 | spin_lock(&syncpt->lock); | ||
| 205 | |||
| 206 | remove_completed_waiters(&syncpt->wait_head, threshold, completed); | ||
| 207 | |||
| 208 | empty = list_empty(&syncpt->wait_head); | ||
| 209 | if (!empty) | ||
| 210 | reset_threshold_interrupt(intr, &syncpt->wait_head, | ||
| 211 | syncpt->id); | ||
| 212 | |||
| 213 | spin_unlock(&syncpt->lock); | ||
| 214 | |||
| 215 | run_handlers(completed); | ||
| 216 | |||
| 217 | return empty; | ||
| 218 | } | ||
| 219 | |||
| 220 | /*** host syncpt interrupt service functions ***/ | ||
| 221 | /** | ||
| 222 | * Sync point threshold interrupt service thread function | ||
| 223 | * Handles sync point threshold triggers, in thread context | ||
| 224 | */ | ||
| 225 | irqreturn_t nvhost_syncpt_thresh_fn(int irq, void *dev_id) | ||
| 226 | { | ||
| 227 | struct nvhost_intr_syncpt *syncpt = dev_id; | ||
| 228 | unsigned int id = syncpt->id; | ||
| 229 | struct nvhost_intr *intr = intr_syncpt_to_intr(syncpt); | ||
| 230 | struct nvhost_master *dev = intr_to_dev(intr); | ||
| 231 | |||
| 232 | (void)process_wait_list(intr, syncpt, | ||
| 233 | nvhost_syncpt_update_min(&dev->syncpt, id)); | ||
| 234 | |||
| 235 | return IRQ_HANDLED; | ||
| 236 | } | ||
| 237 | |||
| 238 | /** | ||
| 239 | * free a syncpt's irq. syncpt interrupt should be disabled first. | ||
| 240 | */ | ||
| 241 | static void free_syncpt_irq(struct nvhost_intr_syncpt *syncpt) | ||
| 242 | { | ||
| 243 | if (syncpt->irq_requested) { | ||
| 244 | free_irq(syncpt->irq, syncpt); | ||
| 245 | syncpt->irq_requested = 0; | ||
| 246 | } | ||
| 247 | } | ||
| 248 | |||
| 249 | |||
| 250 | /*** host general interrupt service functions ***/ | ||
| 251 | |||
| 252 | |||
| 253 | /*** Main API ***/ | ||
| 254 | |||
| 255 | int nvhost_intr_add_action(struct nvhost_intr *intr, u32 id, u32 thresh, | ||
| 256 | enum nvhost_intr_action action, void *data, | ||
| 257 | void *_waiter, | ||
| 258 | void **ref) | ||
| 259 | { | ||
| 260 | struct nvhost_waitlist *waiter = _waiter; | ||
| 261 | struct nvhost_intr_syncpt *syncpt; | ||
| 262 | int queue_was_empty; | ||
| 263 | int err; | ||
| 264 | |||
| 265 | BUG_ON(waiter == NULL); | ||
| 266 | |||
| 267 | BUG_ON(!(intr_op(intr).set_syncpt_threshold && | ||
| 268 | intr_op(intr).enable_syncpt_intr)); | ||
| 269 | |||
| 270 | /* initialize a new waiter */ | ||
| 271 | INIT_LIST_HEAD(&waiter->list); | ||
| 272 | kref_init(&waiter->refcount); | ||
| 273 | if (ref) | ||
| 274 | kref_get(&waiter->refcount); | ||
| 275 | waiter->thresh = thresh; | ||
| 276 | waiter->action = action; | ||
| 277 | atomic_set(&waiter->state, WLS_PENDING); | ||
| 278 | waiter->data = data; | ||
| 279 | waiter->count = 1; | ||
| 280 | |||
| 281 | BUG_ON(id >= intr_to_dev(intr)->syncpt.nb_pts); | ||
| 282 | syncpt = intr->syncpt + id; | ||
| 283 | |||
| 284 | spin_lock(&syncpt->lock); | ||
| 285 | |||
| 286 | /* lazily request irq for this sync point */ | ||
| 287 | if (!syncpt->irq_requested) { | ||
| 288 | spin_unlock(&syncpt->lock); | ||
| 289 | |||
| 290 | mutex_lock(&intr->mutex); | ||
| 291 | BUG_ON(!(intr_op(intr).request_syncpt_irq)); | ||
| 292 | err = intr_op(intr).request_syncpt_irq(syncpt); | ||
| 293 | mutex_unlock(&intr->mutex); | ||
| 294 | |||
| 295 | if (err) { | ||
| 296 | kfree(waiter); | ||
| 297 | return err; | ||
| 298 | } | ||
| 299 | |||
| 300 | spin_lock(&syncpt->lock); | ||
| 301 | } | ||
| 302 | |||
| 303 | queue_was_empty = list_empty(&syncpt->wait_head); | ||
| 304 | |||
| 305 | if (add_waiter_to_queue(waiter, &syncpt->wait_head)) { | ||
| 306 | /* added at head of list - new threshold value */ | ||
| 307 | intr_op(intr).set_syncpt_threshold(intr, id, thresh); | ||
| 308 | |||
| 309 | /* added as first waiter - enable interrupt */ | ||
| 310 | if (queue_was_empty) | ||
| 311 | intr_op(intr).enable_syncpt_intr(intr, id); | ||
| 312 | } | ||
| 313 | |||
| 314 | spin_unlock(&syncpt->lock); | ||
| 315 | |||
| 316 | if (ref) | ||
| 317 | *ref = waiter; | ||
| 318 | return 0; | ||
| 319 | } | ||
| 320 | |||
| 321 | void *nvhost_intr_alloc_waiter() | ||
| 322 | { | ||
| 323 | return kzalloc(sizeof(struct nvhost_waitlist), | ||
| 324 | GFP_KERNEL|__GFP_REPEAT); | ||
| 325 | } | ||
| 326 | |||
| 327 | void nvhost_intr_put_ref(struct nvhost_intr *intr, void *ref) | ||
| 328 | { | ||
| 329 | struct nvhost_waitlist *waiter = ref; | ||
| 330 | |||
| 331 | while (atomic_cmpxchg(&waiter->state, | ||
| 332 | WLS_PENDING, WLS_CANCELLED) == WLS_REMOVED) | ||
| 333 | schedule(); | ||
| 334 | |||
| 335 | kref_put(&waiter->refcount, waiter_release); | ||
| 336 | } | ||
| 337 | |||
| 338 | |||
| 339 | /*** Init & shutdown ***/ | ||
| 340 | |||
| 341 | int nvhost_intr_init(struct nvhost_intr *intr, u32 irq_gen, u32 irq_sync) | ||
| 342 | { | ||
| 343 | unsigned int id; | ||
| 344 | struct nvhost_intr_syncpt *syncpt; | ||
| 345 | struct nvhost_master *host = | ||
| 346 | container_of(intr, struct nvhost_master, intr); | ||
| 347 | u32 nb_pts = host->syncpt.nb_pts; | ||
| 348 | |||
| 349 | mutex_init(&intr->mutex); | ||
| 350 | intr->host_general_irq = irq_gen; | ||
| 351 | intr->host_general_irq_requested = false; | ||
| 352 | |||
| 353 | for (id = 0, syncpt = intr->syncpt; | ||
| 354 | id < nb_pts; | ||
| 355 | ++id, ++syncpt) { | ||
| 356 | syncpt->intr = &host->intr; | ||
| 357 | syncpt->id = id; | ||
| 358 | syncpt->irq = irq_sync + id; | ||
| 359 | syncpt->irq_requested = 0; | ||
| 360 | spin_lock_init(&syncpt->lock); | ||
| 361 | INIT_LIST_HEAD(&syncpt->wait_head); | ||
| 362 | snprintf(syncpt->thresh_irq_name, | ||
| 363 | sizeof(syncpt->thresh_irq_name), | ||
| 364 | "host_sp_%02d", id); | ||
| 365 | } | ||
| 366 | |||
| 367 | return 0; | ||
| 368 | } | ||
| 369 | |||
| 370 | void nvhost_intr_deinit(struct nvhost_intr *intr) | ||
| 371 | { | ||
| 372 | nvhost_intr_stop(intr); | ||
| 373 | } | ||
| 374 | |||
| 375 | void nvhost_intr_start(struct nvhost_intr *intr, u32 hz) | ||
| 376 | { | ||
| 377 | BUG_ON(!(intr_op(intr).init_host_sync && | ||
| 378 | intr_op(intr).set_host_clocks_per_usec && | ||
| 379 | intr_op(intr).request_host_general_irq)); | ||
| 380 | |||
| 381 | mutex_lock(&intr->mutex); | ||
| 382 | |||
| 383 | intr_op(intr).init_host_sync(intr); | ||
| 384 | intr_op(intr).set_host_clocks_per_usec(intr, | ||
| 385 | (hz + 1000000 - 1)/1000000); | ||
| 386 | |||
| 387 | intr_op(intr).request_host_general_irq(intr); | ||
| 388 | |||
| 389 | mutex_unlock(&intr->mutex); | ||
| 390 | } | ||
| 391 | |||
| 392 | void nvhost_intr_stop(struct nvhost_intr *intr) | ||
| 393 | { | ||
| 394 | unsigned int id; | ||
| 395 | struct nvhost_intr_syncpt *syncpt; | ||
| 396 | u32 nb_pts = intr_to_dev(intr)->syncpt.nb_pts; | ||
| 397 | |||
| 398 | BUG_ON(!(intr_op(intr).disable_all_syncpt_intrs && | ||
| 399 | intr_op(intr).free_host_general_irq)); | ||
| 400 | |||
| 401 | mutex_lock(&intr->mutex); | ||
| 402 | |||
| 403 | intr_op(intr).disable_all_syncpt_intrs(intr); | ||
| 404 | |||
| 405 | for (id = 0, syncpt = intr->syncpt; | ||
| 406 | id < nb_pts; | ||
| 407 | ++id, ++syncpt) { | ||
| 408 | struct nvhost_waitlist *waiter, *next; | ||
| 409 | list_for_each_entry_safe(waiter, next, &syncpt->wait_head, list) { | ||
| 410 | if (atomic_cmpxchg(&waiter->state, WLS_CANCELLED, WLS_HANDLED) | ||
| 411 | == WLS_CANCELLED) { | ||
| 412 | list_del(&waiter->list); | ||
| 413 | kref_put(&waiter->refcount, waiter_release); | ||
| 414 | } | ||
| 415 | } | ||
| 416 | |||
| 417 | if (!list_empty(&syncpt->wait_head)) { /* output diagnostics */ | ||
| 418 | printk(KERN_DEBUG "%s id=%d\n", __func__, id); | ||
| 419 | BUG_ON(1); | ||
| 420 | } | ||
| 421 | |||
| 422 | free_syncpt_irq(syncpt); | ||
| 423 | } | ||
| 424 | |||
| 425 | intr_op(intr).free_host_general_irq(intr); | ||
| 426 | |||
| 427 | mutex_unlock(&intr->mutex); | ||
| 428 | } | ||
diff --git a/drivers/video/tegra/host/nvhost_intr.h b/drivers/video/tegra/host/nvhost_intr.h new file mode 100644 index 00000000000..26ab04ebd4a --- /dev/null +++ b/drivers/video/tegra/host/nvhost_intr.h | |||
| @@ -0,0 +1,115 @@ | |||
| 1 | /* | ||
| 2 | * drivers/video/tegra/host/nvhost_intr.h | ||
| 3 | * | ||
| 4 | * Tegra Graphics Host Interrupt Management | ||
| 5 | * | ||
| 6 | * Copyright (c) 2010-2012, NVIDIA Corporation. | ||
| 7 | * | ||
| 8 | * This program is free software; you can redistribute it and/or modify it | ||
| 9 | * under the terms and conditions of the GNU General Public License, | ||
| 10 | * version 2, as published by the Free Software Foundation. | ||
| 11 | * | ||
| 12 | * This program is distributed in the hope it will be useful, but WITHOUT | ||
| 13 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
| 14 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
| 15 | * more details. | ||
| 16 | * | ||
| 17 | * You should have received a copy of the GNU General Public License | ||
| 18 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | ||
| 19 | */ | ||
| 20 | |||
| 21 | #ifndef __NVHOST_INTR_H | ||
| 22 | #define __NVHOST_INTR_H | ||
| 23 | |||
| 24 | #include <linux/kthread.h> | ||
| 25 | #include <linux/semaphore.h> | ||
| 26 | #include <linux/interrupt.h> | ||
| 27 | |||
| 28 | struct nvhost_channel; | ||
| 29 | |||
| 30 | enum nvhost_intr_action { | ||
| 31 | /** | ||
| 32 | * Perform cleanup after a submit has completed. | ||
| 33 | * 'data' points to a channel | ||
| 34 | */ | ||
| 35 | NVHOST_INTR_ACTION_SUBMIT_COMPLETE = 0, | ||
| 36 | |||
| 37 | /** | ||
| 38 | * Save a HW context. | ||
| 39 | * 'data' points to a context | ||
| 40 | */ | ||
| 41 | NVHOST_INTR_ACTION_CTXSAVE, | ||
| 42 | |||
| 43 | /** | ||
| 44 | * Wake up a task. | ||
| 45 | * 'data' points to a wait_queue_head_t | ||
| 46 | */ | ||
| 47 | NVHOST_INTR_ACTION_WAKEUP, | ||
| 48 | |||
| 49 | /** | ||
| 50 | * Wake up a interruptible task. | ||
| 51 | * 'data' points to a wait_queue_head_t | ||
| 52 | */ | ||
| 53 | NVHOST_INTR_ACTION_WAKEUP_INTERRUPTIBLE, | ||
| 54 | |||
| 55 | NVHOST_INTR_ACTION_COUNT | ||
| 56 | }; | ||
| 57 | |||
| 58 | struct nvhost_intr; | ||
| 59 | |||
| 60 | struct nvhost_intr_syncpt { | ||
| 61 | struct nvhost_intr *intr; | ||
| 62 | u8 id; | ||
| 63 | u8 irq_requested; | ||
| 64 | u16 irq; | ||
| 65 | spinlock_t lock; | ||
| 66 | struct list_head wait_head; | ||
| 67 | char thresh_irq_name[12]; | ||
| 68 | }; | ||
| 69 | |||
| 70 | struct nvhost_intr { | ||
| 71 | struct nvhost_intr_syncpt *syncpt; | ||
| 72 | struct mutex mutex; | ||
| 73 | int host_general_irq; | ||
| 74 | bool host_general_irq_requested; | ||
| 75 | }; | ||
| 76 | #define intr_to_dev(x) container_of(x, struct nvhost_master, intr) | ||
| 77 | #define intr_op(intr) (intr_to_dev(intr)->op.intr) | ||
| 78 | #define intr_syncpt_to_intr(is) (is->intr) | ||
| 79 | |||
| 80 | /** | ||
| 81 | * Schedule an action to be taken when a sync point reaches the given threshold. | ||
| 82 | * | ||
| 83 | * @id the sync point | ||
| 84 | * @thresh the threshold | ||
| 85 | * @action the action to take | ||
| 86 | * @data a pointer to extra data depending on action, see above | ||
| 87 | * @waiter waiter allocated with nvhost_intr_alloc_waiter - assumes ownership | ||
| 88 | * @ref must be passed if cancellation is possible, else NULL | ||
| 89 | * | ||
| 90 | * This is a non-blocking api. | ||
| 91 | */ | ||
| 92 | int nvhost_intr_add_action(struct nvhost_intr *intr, u32 id, u32 thresh, | ||
| 93 | enum nvhost_intr_action action, void *data, | ||
| 94 | void *waiter, | ||
| 95 | void **ref); | ||
| 96 | |||
| 97 | /** | ||
| 98 | * Allocate a waiter. | ||
| 99 | */ | ||
| 100 | void *nvhost_intr_alloc_waiter(void); | ||
| 101 | |||
| 102 | /** | ||
| 103 | * Unreference an action submitted to nvhost_intr_add_action(). | ||
| 104 | * You must call this if you passed non-NULL as ref. | ||
| 105 | * @ref the ref returned from nvhost_intr_add_action() | ||
| 106 | */ | ||
| 107 | void nvhost_intr_put_ref(struct nvhost_intr *intr, void *ref); | ||
| 108 | |||
| 109 | int nvhost_intr_init(struct nvhost_intr *intr, u32 irq_gen, u32 irq_sync); | ||
| 110 | void nvhost_intr_deinit(struct nvhost_intr *intr); | ||
| 111 | void nvhost_intr_start(struct nvhost_intr *intr, u32 hz); | ||
| 112 | void nvhost_intr_stop(struct nvhost_intr *intr); | ||
| 113 | |||
| 114 | irqreturn_t nvhost_syncpt_thresh_fn(int irq, void *dev_id); | ||
| 115 | #endif | ||
diff --git a/drivers/video/tegra/host/nvhost_job.c b/drivers/video/tegra/host/nvhost_job.c new file mode 100644 index 00000000000..df7a62d689b --- /dev/null +++ b/drivers/video/tegra/host/nvhost_job.c | |||
| @@ -0,0 +1,339 @@ | |||
| 1 | /* | ||
| 2 | * drivers/video/tegra/host/nvhost_job.c | ||
| 3 | * | ||
| 4 | * Tegra Graphics Host Job | ||
| 5 | * | ||
| 6 | * Copyright (c) 2010-2012, NVIDIA Corporation. | ||
| 7 | * | ||
| 8 | * This program is free software; you can redistribute it and/or modify it | ||
| 9 | * under the terms and conditions of the GNU General Public License, | ||
| 10 | * version 2, as published by the Free Software Foundation. | ||
| 11 | * | ||
| 12 | * This program is distributed in the hope it will be useful, but WITHOUT | ||
| 13 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
| 14 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
| 15 | * more details. | ||
| 16 | * | ||
| 17 | * You should have received a copy of the GNU General Public License | ||
| 18 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | ||
| 19 | */ | ||
| 20 | |||
| 21 | #include <linux/slab.h> | ||
| 22 | #include <linux/kref.h> | ||
| 23 | #include <linux/err.h> | ||
| 24 | #include <linux/vmalloc.h> | ||
| 25 | #include <mach/nvmap.h> | ||
| 26 | #include "nvhost_channel.h" | ||
| 27 | #include "nvhost_job.h" | ||
| 28 | #include "dev.h" | ||
| 29 | |||
| 30 | /* Magic to use to fill freed handle slots */ | ||
| 31 | #define BAD_MAGIC 0xdeadbeef | ||
| 32 | |||
| 33 | static int job_size(struct nvhost_submit_hdr_ext *hdr) | ||
| 34 | { | ||
| 35 | int num_pins = hdr ? (hdr->num_relocs + hdr->num_cmdbufs)*2 : 0; | ||
| 36 | int num_waitchks = hdr ? hdr->num_waitchks : 0; | ||
| 37 | |||
| 38 | return sizeof(struct nvhost_job) | ||
| 39 | + num_pins * sizeof(struct nvmap_pinarray_elem) | ||
| 40 | + num_pins * sizeof(struct nvmap_handle *) | ||
| 41 | + num_waitchks * sizeof(struct nvhost_waitchk); | ||
| 42 | } | ||
| 43 | |||
| 44 | static int gather_size(int num_cmdbufs) | ||
| 45 | { | ||
| 46 | return num_cmdbufs * sizeof(struct nvhost_channel_gather); | ||
| 47 | } | ||
| 48 | |||
| 49 | static void free_gathers(struct nvhost_job *job) | ||
| 50 | { | ||
| 51 | if (job->gathers) { | ||
| 52 | nvmap_munmap(job->gather_mem, job->gathers); | ||
| 53 | job->gathers = NULL; | ||
| 54 | } | ||
| 55 | if (job->gather_mem) { | ||
| 56 | nvmap_free(job->nvmap, job->gather_mem); | ||
| 57 | job->gather_mem = NULL; | ||
| 58 | } | ||
| 59 | } | ||
| 60 | |||
| 61 | static int alloc_gathers(struct nvhost_job *job, | ||
| 62 | int num_cmdbufs) | ||
| 63 | { | ||
| 64 | int err = 0; | ||
| 65 | |||
| 66 | job->gather_mem = NULL; | ||
| 67 | job->gathers = NULL; | ||
| 68 | job->gather_mem_size = 0; | ||
| 69 | |||
| 70 | if (num_cmdbufs) { | ||
| 71 | /* Allocate memory */ | ||
| 72 | job->gather_mem = nvmap_alloc(job->nvmap, | ||
| 73 | gather_size(num_cmdbufs), | ||
| 74 | 32, NVMAP_HANDLE_CACHEABLE, 0); | ||
| 75 | if (IS_ERR_OR_NULL(job->gather_mem)) { | ||
| 76 | err = PTR_ERR(job->gather_mem); | ||
| 77 | job->gather_mem = NULL; | ||
| 78 | goto error; | ||
| 79 | } | ||
| 80 | job->gather_mem_size = gather_size(num_cmdbufs); | ||
| 81 | |||
| 82 | /* Map memory to kernel */ | ||
| 83 | job->gathers = nvmap_mmap(job->gather_mem); | ||
| 84 | if (IS_ERR_OR_NULL(job->gathers)) { | ||
| 85 | err = PTR_ERR(job->gathers); | ||
| 86 | job->gathers = NULL; | ||
| 87 | goto error; | ||
| 88 | } | ||
| 89 | } | ||
| 90 | |||
| 91 | return 0; | ||
| 92 | |||
| 93 | error: | ||
| 94 | free_gathers(job); | ||
| 95 | return err; | ||
| 96 | } | ||
| 97 | |||
| 98 | static int realloc_gathers(struct nvhost_job *oldjob, | ||
| 99 | struct nvhost_job *newjob, | ||
| 100 | int num_cmdbufs) | ||
| 101 | { | ||
| 102 | int err = 0; | ||
| 103 | |||
| 104 | /* Check if we can reuse gather buffer */ | ||
| 105 | if (oldjob->gather_mem_size < gather_size(num_cmdbufs) | ||
| 106 | || oldjob->nvmap != newjob->nvmap) { | ||
| 107 | free_gathers(oldjob); | ||
| 108 | err = alloc_gathers(newjob, num_cmdbufs); | ||
| 109 | } else { | ||
| 110 | newjob->gather_mem = oldjob->gather_mem; | ||
| 111 | newjob->gathers = oldjob->gathers; | ||
| 112 | newjob->gather_mem_size = oldjob->gather_mem_size; | ||
| 113 | |||
| 114 | oldjob->gather_mem = NULL; | ||
| 115 | oldjob->gathers = NULL; | ||
| 116 | oldjob->gather_mem_size = 0; | ||
| 117 | } | ||
| 118 | return err; | ||
| 119 | } | ||
| 120 | |||
| 121 | static void init_fields(struct nvhost_job *job, | ||
| 122 | struct nvhost_submit_hdr_ext *hdr, | ||
| 123 | int priority, int clientid) | ||
| 124 | { | ||
| 125 | int num_pins = hdr ? (hdr->num_relocs + hdr->num_cmdbufs)*2 : 0; | ||
| 126 | int num_waitchks = hdr ? hdr->num_waitchks : 0; | ||
| 127 | void *mem = job; | ||
| 128 | |||
| 129 | /* First init state to zero */ | ||
| 130 | job->num_gathers = 0; | ||
| 131 | job->num_pins = 0; | ||
| 132 | job->num_unpins = 0; | ||
| 133 | job->num_waitchk = 0; | ||
| 134 | job->waitchk_mask = 0; | ||
| 135 | job->syncpt_id = 0; | ||
| 136 | job->syncpt_incrs = 0; | ||
| 137 | job->syncpt_end = 0; | ||
| 138 | job->priority = priority; | ||
| 139 | job->clientid = clientid; | ||
| 140 | job->null_kickoff = false; | ||
| 141 | job->first_get = 0; | ||
| 142 | job->num_slots = 0; | ||
| 143 | |||
| 144 | /* Redistribute memory to the structs */ | ||
| 145 | mem += sizeof(struct nvhost_job); | ||
| 146 | if (num_pins) { | ||
| 147 | job->pinarray = mem; | ||
| 148 | mem += num_pins * sizeof(struct nvmap_pinarray_elem); | ||
| 149 | job->unpins = mem; | ||
| 150 | mem += num_pins * sizeof(struct nvmap_handle *); | ||
| 151 | } else { | ||
| 152 | job->pinarray = NULL; | ||
| 153 | job->unpins = NULL; | ||
| 154 | } | ||
| 155 | |||
| 156 | job->waitchk = num_waitchks ? mem : NULL; | ||
| 157 | |||
| 158 | /* Copy information from header */ | ||
| 159 | if (hdr) { | ||
| 160 | job->waitchk_mask = hdr->waitchk_mask; | ||
| 161 | job->syncpt_id = hdr->syncpt_id; | ||
| 162 | job->syncpt_incrs = hdr->syncpt_incrs; | ||
| 163 | } | ||
| 164 | } | ||
| 165 | |||
| 166 | struct nvhost_job *nvhost_job_alloc(struct nvhost_channel *ch, | ||
| 167 | struct nvhost_hwctx *hwctx, | ||
| 168 | struct nvhost_submit_hdr_ext *hdr, | ||
| 169 | struct nvmap_client *nvmap, | ||
| 170 | int priority, | ||
| 171 | int clientid) | ||
| 172 | { | ||
| 173 | struct nvhost_job *job = NULL; | ||
| 174 | int num_cmdbufs = hdr ? hdr->num_cmdbufs : 0; | ||
| 175 | int err = 0; | ||
| 176 | |||
| 177 | job = vzalloc(job_size(hdr)); | ||
| 178 | if (!job) | ||
| 179 | goto error; | ||
| 180 | |||
| 181 | kref_init(&job->ref); | ||
| 182 | job->ch = ch; | ||
| 183 | job->hwctx = hwctx; | ||
| 184 | if (hwctx) | ||
| 185 | hwctx->h->get(hwctx); | ||
| 186 | job->nvmap = nvmap ? nvmap_client_get(nvmap) : NULL; | ||
| 187 | |||
| 188 | err = alloc_gathers(job, num_cmdbufs); | ||
| 189 | if (err) | ||
| 190 | goto error; | ||
| 191 | |||
| 192 | init_fields(job, hdr, priority, clientid); | ||
| 193 | |||
| 194 | return job; | ||
| 195 | |||
| 196 | error: | ||
| 197 | if (job) | ||
| 198 | nvhost_job_put(job); | ||
| 199 | return NULL; | ||
| 200 | } | ||
| 201 | |||
| 202 | struct nvhost_job *nvhost_job_realloc( | ||
| 203 | struct nvhost_job *oldjob, | ||
| 204 | struct nvhost_hwctx *hwctx, | ||
| 205 | struct nvhost_submit_hdr_ext *hdr, | ||
| 206 | struct nvmap_client *nvmap, | ||
| 207 | int priority, int clientid) | ||
| 208 | { | ||
| 209 | struct nvhost_job *newjob = NULL; | ||
| 210 | int num_cmdbufs = hdr ? hdr->num_cmdbufs : 0; | ||
| 211 | int err = 0; | ||
| 212 | |||
| 213 | newjob = vzalloc(job_size(hdr)); | ||
| 214 | if (!newjob) | ||
| 215 | goto error; | ||
| 216 | kref_init(&newjob->ref); | ||
| 217 | newjob->ch = oldjob->ch; | ||
| 218 | newjob->hwctx = hwctx; | ||
| 219 | if (hwctx) | ||
| 220 | newjob->hwctx->h->get(newjob->hwctx); | ||
| 221 | newjob->timeout = oldjob->timeout; | ||
| 222 | newjob->nvmap = nvmap ? nvmap_client_get(nvmap) : NULL; | ||
| 223 | |||
| 224 | err = realloc_gathers(oldjob, newjob, num_cmdbufs); | ||
| 225 | if (err) | ||
| 226 | goto error; | ||
| 227 | |||
| 228 | nvhost_job_put(oldjob); | ||
| 229 | |||
| 230 | init_fields(newjob, hdr, priority, clientid); | ||
| 231 | |||
| 232 | return newjob; | ||
| 233 | |||
| 234 | error: | ||
| 235 | if (newjob) | ||
| 236 | nvhost_job_put(newjob); | ||
| 237 | if (oldjob) | ||
| 238 | nvhost_job_put(oldjob); | ||
| 239 | return NULL; | ||
| 240 | } | ||
| 241 | |||
| 242 | void nvhost_job_get(struct nvhost_job *job) | ||
| 243 | { | ||
| 244 | kref_get(&job->ref); | ||
| 245 | } | ||
| 246 | |||
| 247 | static void job_free(struct kref *ref) | ||
| 248 | { | ||
| 249 | struct nvhost_job *job = container_of(ref, struct nvhost_job, ref); | ||
| 250 | |||
| 251 | if (job->hwctxref) | ||
| 252 | job->hwctxref->h->put(job->hwctxref); | ||
| 253 | if (job->hwctx) | ||
| 254 | job->hwctx->h->put(job->hwctx); | ||
| 255 | if (job->gathers) | ||
| 256 | nvmap_munmap(job->gather_mem, job->gathers); | ||
| 257 | if (job->gather_mem) | ||
| 258 | nvmap_free(job->nvmap, job->gather_mem); | ||
| 259 | if (job->nvmap) | ||
| 260 | nvmap_client_put(job->nvmap); | ||
| 261 | vfree(job); | ||
| 262 | } | ||
| 263 | |||
| 264 | /* Acquire reference to a hardware context. Used for keeping saved contexts in | ||
| 265 | * memory. */ | ||
| 266 | void nvhost_job_get_hwctx(struct nvhost_job *job, struct nvhost_hwctx *hwctx) | ||
| 267 | { | ||
| 268 | BUG_ON(job->hwctxref); | ||
| 269 | |||
| 270 | job->hwctxref = hwctx; | ||
| 271 | hwctx->h->get(hwctx); | ||
| 272 | } | ||
| 273 | |||
| 274 | void nvhost_job_put(struct nvhost_job *job) | ||
| 275 | { | ||
| 276 | kref_put(&job->ref, job_free); | ||
| 277 | } | ||
| 278 | |||
| 279 | void nvhost_job_add_gather(struct nvhost_job *job, | ||
| 280 | u32 mem_id, u32 words, u32 offset) | ||
| 281 | { | ||
| 282 | struct nvmap_pinarray_elem *pin; | ||
| 283 | struct nvhost_channel_gather *cur_gather = | ||
| 284 | &job->gathers[job->num_gathers]; | ||
| 285 | |||
| 286 | pin = &job->pinarray[job->num_pins++]; | ||
| 287 | pin->patch_mem = (u32)nvmap_ref_to_handle(job->gather_mem); | ||
| 288 | pin->patch_offset = (void *)&(cur_gather->mem) - (void *)job->gathers; | ||
| 289 | pin->pin_mem = nvmap_convert_handle_u2k(mem_id); | ||
| 290 | pin->pin_offset = offset; | ||
| 291 | cur_gather->words = words; | ||
| 292 | cur_gather->mem_id = mem_id; | ||
| 293 | cur_gather->offset = offset; | ||
| 294 | job->num_gathers += 1; | ||
| 295 | } | ||
| 296 | |||
| 297 | int nvhost_job_pin(struct nvhost_job *job) | ||
| 298 | { | ||
| 299 | int err = 0; | ||
| 300 | |||
| 301 | /* pin mem handles and patch physical addresses */ | ||
| 302 | job->num_unpins = nvmap_pin_array(job->nvmap, | ||
| 303 | nvmap_ref_to_handle(job->gather_mem), | ||
| 304 | job->pinarray, job->num_pins, | ||
| 305 | job->unpins); | ||
| 306 | if (job->num_unpins < 0) | ||
| 307 | err = job->num_unpins; | ||
| 308 | |||
| 309 | return err; | ||
| 310 | } | ||
| 311 | |||
| 312 | void nvhost_job_unpin(struct nvhost_job *job) | ||
| 313 | { | ||
| 314 | nvmap_unpin_handles(job->nvmap, job->unpins, | ||
| 315 | job->num_unpins); | ||
| 316 | memset(job->unpins, BAD_MAGIC, | ||
| 317 | job->num_unpins * sizeof(struct nvmap_handle *)); | ||
| 318 | } | ||
| 319 | |||
| 320 | /** | ||
| 321 | * Debug routine used to dump job entries | ||
| 322 | */ | ||
| 323 | void nvhost_job_dump(struct device *dev, struct nvhost_job *job) | ||
| 324 | { | ||
| 325 | dev_dbg(dev, " SYNCPT_ID %d\n", | ||
| 326 | job->syncpt_id); | ||
| 327 | dev_dbg(dev, " SYNCPT_VAL %d\n", | ||
| 328 | job->syncpt_end); | ||
| 329 | dev_dbg(dev, " FIRST_GET 0x%x\n", | ||
| 330 | job->first_get); | ||
| 331 | dev_dbg(dev, " TIMEOUT %d\n", | ||
| 332 | job->timeout); | ||
| 333 | dev_dbg(dev, " CTX 0x%p\n", | ||
| 334 | job->hwctx); | ||
| 335 | dev_dbg(dev, " NUM_SLOTS %d\n", | ||
| 336 | job->num_slots); | ||
| 337 | dev_dbg(dev, " NUM_HANDLES %d\n", | ||
| 338 | job->num_unpins); | ||
| 339 | } | ||
diff --git a/drivers/video/tegra/host/nvhost_job.h b/drivers/video/tegra/host/nvhost_job.h new file mode 100644 index 00000000000..ad9d1af60da --- /dev/null +++ b/drivers/video/tegra/host/nvhost_job.h | |||
| @@ -0,0 +1,150 @@ | |||
| 1 | /* | ||
| 2 | * drivers/video/tegra/host/nvhost_job.h | ||
| 3 | * | ||
| 4 | * Tegra Graphics Host Interrupt Management | ||
| 5 | * | ||
| 6 | * Copyright (c) 2011-2012, NVIDIA Corporation. | ||
| 7 | * | ||
| 8 | * This program is free software; you can redistribute it and/or modify it | ||
| 9 | * under the terms and conditions of the GNU General Public License, | ||
| 10 | * version 2, as published by the Free Software Foundation. | ||
| 11 | * | ||
| 12 | * This program is distributed in the hope it will be useful, but WITHOUT | ||
| 13 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
| 14 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
| 15 | * more details. | ||
| 16 | * | ||
| 17 | * You should have received a copy of the GNU General Public License | ||
| 18 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | ||
| 19 | */ | ||
| 20 | |||
| 21 | #ifndef __NVHOST_JOB_H | ||
| 22 | #define __NVHOST_JOB_H | ||
| 23 | |||
| 24 | #include <linux/nvhost_ioctl.h> | ||
| 25 | |||
| 26 | struct nvhost_channel; | ||
| 27 | struct nvhost_hwctx; | ||
| 28 | struct nvmap_client; | ||
| 29 | struct nvhost_waitchk; | ||
| 30 | struct nvmap_handle; | ||
| 31 | |||
| 32 | /* | ||
| 33 | * Each submit is tracked as a nvhost_job. | ||
| 34 | */ | ||
| 35 | struct nvhost_job { | ||
| 36 | /* When refcount goes to zero, job can be freed */ | ||
| 37 | struct kref ref; | ||
| 38 | |||
| 39 | /* List entry */ | ||
| 40 | struct list_head list; | ||
| 41 | |||
| 42 | /* Channel where job is submitted to */ | ||
| 43 | struct nvhost_channel *ch; | ||
| 44 | |||
| 45 | /* Hardware context valid for this client */ | ||
| 46 | struct nvhost_hwctx *hwctx; | ||
| 47 | int clientid; | ||
| 48 | |||
| 49 | /* Nvmap to be used for pinning & unpinning memory */ | ||
| 50 | struct nvmap_client *nvmap; | ||
| 51 | |||
| 52 | /* Gathers and their memory */ | ||
| 53 | struct nvmap_handle_ref *gather_mem; | ||
| 54 | struct nvhost_channel_gather *gathers; | ||
| 55 | int num_gathers; | ||
| 56 | int gather_mem_size; | ||
| 57 | |||
| 58 | /* Wait checks to be processed at submit time */ | ||
| 59 | struct nvhost_waitchk *waitchk; | ||
| 60 | int num_waitchk; | ||
| 61 | u32 waitchk_mask; | ||
| 62 | |||
| 63 | /* Array of handles to be pinned & unpinned */ | ||
| 64 | struct nvmap_pinarray_elem *pinarray; | ||
| 65 | int num_pins; | ||
| 66 | struct nvmap_handle **unpins; | ||
| 67 | int num_unpins; | ||
| 68 | |||
| 69 | /* Sync point id, number of increments and end related to the submit */ | ||
| 70 | u32 syncpt_id; | ||
| 71 | u32 syncpt_incrs; | ||
| 72 | u32 syncpt_end; | ||
| 73 | |||
| 74 | /* Priority of this submit. */ | ||
| 75 | int priority; | ||
| 76 | |||
| 77 | /* Maximum time to wait for this job */ | ||
| 78 | int timeout; | ||
| 79 | |||
| 80 | /* Null kickoff prevents submit from being sent to hardware */ | ||
| 81 | bool null_kickoff; | ||
| 82 | |||
| 83 | /* Index and number of slots used in the push buffer */ | ||
| 84 | int first_get; | ||
| 85 | int num_slots; | ||
| 86 | |||
| 87 | /* Context to be freed */ | ||
| 88 | struct nvhost_hwctx *hwctxref; | ||
| 89 | }; | ||
| 90 | |||
| 91 | /* | ||
| 92 | * Allocate memory for a job. Just enough memory will be allocated to | ||
| 93 | * accomodate the submit announced in submit header. | ||
| 94 | */ | ||
| 95 | struct nvhost_job *nvhost_job_alloc(struct nvhost_channel *ch, | ||
| 96 | struct nvhost_hwctx *hwctx, | ||
| 97 | struct nvhost_submit_hdr_ext *hdr, | ||
| 98 | struct nvmap_client *nvmap, | ||
| 99 | int priority, int clientid); | ||
| 100 | |||
| 101 | /* | ||
| 102 | * Allocate memory for a job. Just enough memory will be allocated to | ||
| 103 | * accomodate the submit announced in submit header. Gather memory from | ||
| 104 | * oldjob will be reused, and nvhost_job_put() will be called to it. | ||
| 105 | */ | ||
| 106 | struct nvhost_job *nvhost_job_realloc(struct nvhost_job *oldjob, | ||
| 107 | struct nvhost_hwctx *hwctx, | ||
| 108 | struct nvhost_submit_hdr_ext *hdr, | ||
| 109 | struct nvmap_client *nvmap, | ||
| 110 | int priority, int clientid); | ||
| 111 | |||
| 112 | /* | ||
| 113 | * Add a gather to a job. | ||
| 114 | */ | ||
| 115 | void nvhost_job_add_gather(struct nvhost_job *job, | ||
| 116 | u32 mem_id, u32 words, u32 offset); | ||
| 117 | |||
| 118 | /* | ||
| 119 | * Increment reference going to nvhost_job. | ||
| 120 | */ | ||
| 121 | void nvhost_job_get(struct nvhost_job *job); | ||
| 122 | |||
| 123 | /* | ||
| 124 | * Increment reference for a hardware context. | ||
| 125 | */ | ||
| 126 | void nvhost_job_get_hwctx(struct nvhost_job *job, struct nvhost_hwctx *hwctx); | ||
| 127 | |||
| 128 | /* | ||
| 129 | * Decrement reference job, free if goes to zero. | ||
| 130 | */ | ||
| 131 | void nvhost_job_put(struct nvhost_job *job); | ||
| 132 | |||
| 133 | /* | ||
| 134 | * Pin memory related to job. This handles relocation of addresses to the | ||
| 135 | * host1x address space. Handles both the gather memory and any other memory | ||
| 136 | * referred to from the gather buffers. | ||
| 137 | */ | ||
| 138 | int nvhost_job_pin(struct nvhost_job *job); | ||
| 139 | |||
| 140 | /* | ||
| 141 | * Unpin memory related to job. | ||
| 142 | */ | ||
| 143 | void nvhost_job_unpin(struct nvhost_job *job); | ||
| 144 | |||
| 145 | /* | ||
| 146 | * Dump contents of job to debug output. | ||
| 147 | */ | ||
| 148 | void nvhost_job_dump(struct device *dev, struct nvhost_job *job); | ||
| 149 | |||
| 150 | #endif | ||
diff --git a/drivers/video/tegra/host/nvhost_syncpt.c b/drivers/video/tegra/host/nvhost_syncpt.c new file mode 100644 index 00000000000..eb5176ea1bf --- /dev/null +++ b/drivers/video/tegra/host/nvhost_syncpt.c | |||
| @@ -0,0 +1,319 @@ | |||
| 1 | /* | ||
| 2 | * drivers/video/tegra/host/nvhost_syncpt.c | ||
| 3 | * | ||
| 4 | * Tegra Graphics Host Syncpoints | ||
| 5 | * | ||
| 6 | * Copyright (c) 2010-2012, NVIDIA Corporation. | ||
| 7 | * | ||
| 8 | * This program is free software; you can redistribute it and/or modify it | ||
| 9 | * under the terms and conditions of the GNU General Public License, | ||
| 10 | * version 2, as published by the Free Software Foundation. | ||
| 11 | * | ||
| 12 | * This program is distributed in the hope it will be useful, but WITHOUT | ||
| 13 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
| 14 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
| 15 | * more details. | ||
| 16 | * | ||
| 17 | * You should have received a copy of the GNU General Public License | ||
| 18 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | ||
| 19 | */ | ||
| 20 | |||
| 21 | #include <linux/nvhost_ioctl.h> | ||
| 22 | #include <linux/platform_device.h> | ||
| 23 | #include "nvhost_syncpt.h" | ||
| 24 | #include "dev.h" | ||
| 25 | |||
| 26 | #define MAX_STUCK_CHECK_COUNT 15 | ||
| 27 | |||
| 28 | /** | ||
| 29 | * Resets syncpoint and waitbase values to sw shadows | ||
| 30 | */ | ||
| 31 | void nvhost_syncpt_reset(struct nvhost_syncpt *sp) | ||
| 32 | { | ||
| 33 | u32 i; | ||
| 34 | BUG_ON(!(syncpt_op(sp).reset && syncpt_op(sp).reset_wait_base)); | ||
| 35 | |||
| 36 | for (i = 0; i < sp->nb_pts; i++) | ||
| 37 | syncpt_op(sp).reset(sp, i); | ||
| 38 | for (i = 0; i < sp->nb_bases; i++) | ||
| 39 | syncpt_op(sp).reset_wait_base(sp, i); | ||
| 40 | wmb(); | ||
| 41 | } | ||
| 42 | |||
| 43 | /** | ||
| 44 | * Updates sw shadow state for client managed registers | ||
| 45 | */ | ||
| 46 | void nvhost_syncpt_save(struct nvhost_syncpt *sp) | ||
| 47 | { | ||
| 48 | u32 i; | ||
| 49 | BUG_ON(!(syncpt_op(sp).update_min && syncpt_op(sp).read_wait_base)); | ||
| 50 | |||
| 51 | for (i = 0; i < sp->nb_pts; i++) { | ||
| 52 | if (client_managed(i)) | ||
| 53 | syncpt_op(sp).update_min(sp, i); | ||
| 54 | else | ||
| 55 | BUG_ON(!nvhost_syncpt_min_eq_max(sp, i)); | ||
| 56 | } | ||
| 57 | |||
| 58 | for (i = 0; i < sp->nb_bases; i++) | ||
| 59 | syncpt_op(sp).read_wait_base(sp, i); | ||
| 60 | } | ||
| 61 | |||
| 62 | /** | ||
| 63 | * Updates the last value read from hardware. | ||
| 64 | */ | ||
| 65 | u32 nvhost_syncpt_update_min(struct nvhost_syncpt *sp, u32 id) | ||
| 66 | { | ||
| 67 | BUG_ON(!syncpt_op(sp).update_min); | ||
| 68 | |||
| 69 | return syncpt_op(sp).update_min(sp, id); | ||
| 70 | } | ||
| 71 | |||
| 72 | /** | ||
| 73 | * Get the current syncpoint value | ||
| 74 | */ | ||
| 75 | u32 nvhost_syncpt_read(struct nvhost_syncpt *sp, u32 id) | ||
| 76 | { | ||
| 77 | u32 val; | ||
| 78 | BUG_ON(!syncpt_op(sp).update_min); | ||
| 79 | nvhost_module_busy(syncpt_to_dev(sp)->dev); | ||
| 80 | val = syncpt_op(sp).update_min(sp, id); | ||
| 81 | nvhost_module_idle(syncpt_to_dev(sp)->dev); | ||
| 82 | return val; | ||
| 83 | } | ||
| 84 | |||
| 85 | /** | ||
| 86 | * Get the current syncpoint base | ||
| 87 | */ | ||
| 88 | u32 nvhost_syncpt_read_wait_base(struct nvhost_syncpt *sp, u32 id) | ||
| 89 | { | ||
| 90 | u32 val; | ||
| 91 | BUG_ON(!syncpt_op(sp).read_wait_base); | ||
| 92 | nvhost_module_busy(syncpt_to_dev(sp)->dev); | ||
| 93 | syncpt_op(sp).read_wait_base(sp, id); | ||
| 94 | val = sp->base_val[id]; | ||
| 95 | nvhost_module_idle(syncpt_to_dev(sp)->dev); | ||
| 96 | return val; | ||
| 97 | } | ||
| 98 | |||
| 99 | /** | ||
| 100 | * Write a cpu syncpoint increment to the hardware, without touching | ||
| 101 | * the cache. Caller is responsible for host being powered. | ||
| 102 | */ | ||
| 103 | void nvhost_syncpt_cpu_incr(struct nvhost_syncpt *sp, u32 id) | ||
| 104 | { | ||
| 105 | BUG_ON(!syncpt_op(sp).cpu_incr); | ||
| 106 | syncpt_op(sp).cpu_incr(sp, id); | ||
| 107 | } | ||
| 108 | |||
| 109 | /** | ||
| 110 | * Increment syncpoint value from cpu, updating cache | ||
| 111 | */ | ||
| 112 | void nvhost_syncpt_incr(struct nvhost_syncpt *sp, u32 id) | ||
| 113 | { | ||
| 114 | if (client_managed(id)) | ||
| 115 | nvhost_syncpt_incr_max(sp, id, 1); | ||
| 116 | nvhost_module_busy(syncpt_to_dev(sp)->dev); | ||
| 117 | nvhost_syncpt_cpu_incr(sp, id); | ||
| 118 | nvhost_module_idle(syncpt_to_dev(sp)->dev); | ||
| 119 | } | ||
| 120 | |||
| 121 | /** | ||
| 122 | * Main entrypoint for syncpoint value waits. | ||
| 123 | */ | ||
| 124 | int nvhost_syncpt_wait_timeout(struct nvhost_syncpt *sp, u32 id, | ||
| 125 | u32 thresh, u32 timeout, u32 *value) | ||
| 126 | { | ||
| 127 | DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq); | ||
| 128 | void *ref; | ||
| 129 | void *waiter; | ||
| 130 | int err = 0, check_count = 0, low_timeout = 0; | ||
| 131 | u32 val; | ||
| 132 | |||
| 133 | if (value) | ||
| 134 | *value = 0; | ||
| 135 | |||
| 136 | /* first check cache */ | ||
| 137 | if (nvhost_syncpt_is_expired(sp, id, thresh)) { | ||
| 138 | if (value) | ||
| 139 | *value = nvhost_syncpt_read_min(sp, id); | ||
| 140 | return 0; | ||
| 141 | } | ||
| 142 | |||
| 143 | /* keep host alive */ | ||
| 144 | nvhost_module_busy(syncpt_to_dev(sp)->dev); | ||
| 145 | |||
| 146 | /* try to read from register */ | ||
| 147 | val = syncpt_op(sp).update_min(sp, id); | ||
| 148 | if (nvhost_syncpt_is_expired(sp, id, thresh)) { | ||
| 149 | if (value) | ||
| 150 | *value = val; | ||
| 151 | goto done; | ||
| 152 | } | ||
| 153 | |||
| 154 | if (!timeout) { | ||
| 155 | err = -EAGAIN; | ||
| 156 | goto done; | ||
| 157 | } | ||
| 158 | |||
| 159 | /* schedule a wakeup when the syncpoint value is reached */ | ||
| 160 | waiter = nvhost_intr_alloc_waiter(); | ||
| 161 | if (!waiter) { | ||
| 162 | err = -ENOMEM; | ||
| 163 | goto done; | ||
| 164 | } | ||
| 165 | |||
| 166 | err = nvhost_intr_add_action(&(syncpt_to_dev(sp)->intr), id, thresh, | ||
| 167 | NVHOST_INTR_ACTION_WAKEUP_INTERRUPTIBLE, &wq, | ||
| 168 | waiter, | ||
| 169 | &ref); | ||
| 170 | if (err) | ||
| 171 | goto done; | ||
| 172 | |||
| 173 | err = -EAGAIN; | ||
| 174 | /* Caller-specified timeout may be impractically low */ | ||
| 175 | if (timeout < SYNCPT_CHECK_PERIOD) | ||
| 176 | low_timeout = timeout; | ||
| 177 | |||
| 178 | /* wait for the syncpoint, or timeout, or signal */ | ||
| 179 | while (timeout) { | ||
| 180 | u32 check = min_t(u32, SYNCPT_CHECK_PERIOD, timeout); | ||
| 181 | int remain = wait_event_interruptible_timeout(wq, | ||
| 182 | nvhost_syncpt_is_expired(sp, id, thresh), | ||
| 183 | check); | ||
| 184 | if (remain > 0) { | ||
| 185 | if (value) | ||
| 186 | *value = nvhost_syncpt_read_min(sp, id); | ||
| 187 | err = 0; | ||
| 188 | break; | ||
| 189 | } | ||
| 190 | if (remain < 0) { | ||
| 191 | err = remain; | ||
| 192 | break; | ||
| 193 | } | ||
| 194 | if (timeout != NVHOST_NO_TIMEOUT) | ||
| 195 | timeout -= check; | ||
| 196 | if (timeout) { | ||
| 197 | dev_warn(&syncpt_to_dev(sp)->dev->dev, | ||
| 198 | "%s: syncpoint id %d (%s) stuck waiting %d, timeout=%d\n", | ||
| 199 | current->comm, id, syncpt_op(sp).name(sp, id), | ||
| 200 | thresh, timeout); | ||
| 201 | syncpt_op(sp).debug(sp); | ||
| 202 | if (check_count > MAX_STUCK_CHECK_COUNT) { | ||
| 203 | if (low_timeout) { | ||
| 204 | dev_warn(&syncpt_to_dev(sp)->dev->dev, | ||
| 205 | "is timeout %d too low?\n", | ||
| 206 | low_timeout); | ||
| 207 | } | ||
| 208 | nvhost_debug_dump(syncpt_to_dev(sp)); | ||
| 209 | BUG(); | ||
| 210 | } | ||
| 211 | check_count++; | ||
| 212 | } | ||
| 213 | } | ||
| 214 | nvhost_intr_put_ref(&(syncpt_to_dev(sp)->intr), ref); | ||
| 215 | |||
| 216 | done: | ||
| 217 | nvhost_module_idle(syncpt_to_dev(sp)->dev); | ||
| 218 | return err; | ||
| 219 | } | ||
| 220 | |||
| 221 | /** | ||
| 222 | * Returns true if syncpoint is expired, false if we may need to wait | ||
| 223 | */ | ||
| 224 | bool nvhost_syncpt_is_expired( | ||
| 225 | struct nvhost_syncpt *sp, | ||
| 226 | u32 id, | ||
| 227 | u32 thresh) | ||
| 228 | { | ||
| 229 | u32 current_val; | ||
| 230 | u32 future_val; | ||
| 231 | smp_rmb(); | ||
| 232 | current_val = (u32)atomic_read(&sp->min_val[id]); | ||
| 233 | future_val = (u32)atomic_read(&sp->max_val[id]); | ||
| 234 | |||
| 235 | /* Note the use of unsigned arithmetic here (mod 1<<32). | ||
| 236 | * | ||
| 237 | * c = current_val = min_val = the current value of the syncpoint. | ||
| 238 | * t = thresh = the value we are checking | ||
| 239 | * f = future_val = max_val = the value c will reach when all | ||
| 240 | * outstanding increments have completed. | ||
| 241 | * | ||
| 242 | * Note that c always chases f until it reaches f. | ||
| 243 | * | ||
| 244 | * Dtf = (f - t) | ||
| 245 | * Dtc = (c - t) | ||
| 246 | * | ||
| 247 | * Consider all cases: | ||
| 248 | * | ||
| 249 | * A) .....c..t..f..... Dtf < Dtc need to wait | ||
| 250 | * B) .....c.....f..t.. Dtf > Dtc expired | ||
| 251 | * C) ..t..c.....f..... Dtf > Dtc expired (Dct very large) | ||
| 252 | * | ||
| 253 | * Any case where f==c: always expired (for any t). Dtf == Dcf | ||
| 254 | * Any case where t==c: always expired (for any f). Dtf >= Dtc (because Dtc==0) | ||
| 255 | * Any case where t==f!=c: always wait. Dtf < Dtc (because Dtf==0, | ||
| 256 | * Dtc!=0) | ||
| 257 | * | ||
| 258 | * Other cases: | ||
| 259 | * | ||
| 260 | * A) .....t..f..c..... Dtf < Dtc need to wait | ||
| 261 | * A) .....f..c..t..... Dtf < Dtc need to wait | ||
| 262 | * A) .....f..t..c..... Dtf > Dtc expired | ||
| 263 | * | ||
| 264 | * So: | ||
| 265 | * Dtf >= Dtc implies EXPIRED (return true) | ||
| 266 | * Dtf < Dtc implies WAIT (return false) | ||
| 267 | * | ||
| 268 | * Note: If t is expired then we *cannot* wait on it. We would wait | ||
| 269 | * forever (hang the system). | ||
| 270 | * | ||
| 271 | * Note: do NOT get clever and remove the -thresh from both sides. It | ||
| 272 | * is NOT the same. | ||
| 273 | * | ||
| 274 | * If future valueis zero, we have a client managed sync point. In that | ||
| 275 | * case we do a direct comparison. | ||
| 276 | */ | ||
| 277 | if (!client_managed(id)) | ||
| 278 | return future_val - thresh >= current_val - thresh; | ||
| 279 | else | ||
| 280 | return (s32)(current_val - thresh) >= 0; | ||
| 281 | } | ||
| 282 | |||
| 283 | void nvhost_syncpt_debug(struct nvhost_syncpt *sp) | ||
| 284 | { | ||
| 285 | syncpt_op(sp).debug(sp); | ||
| 286 | } | ||
| 287 | |||
| 288 | int nvhost_mutex_try_lock(struct nvhost_syncpt *sp, int idx) | ||
| 289 | { | ||
| 290 | struct nvhost_master *host = syncpt_to_dev(sp); | ||
| 291 | u32 reg; | ||
| 292 | |||
| 293 | nvhost_module_busy(host->dev); | ||
| 294 | reg = syncpt_op(sp).mutex_try_lock(sp, idx); | ||
| 295 | if (reg) { | ||
| 296 | nvhost_module_idle(host->dev); | ||
| 297 | return -EBUSY; | ||
| 298 | } | ||
| 299 | atomic_inc(&sp->lock_counts[idx]); | ||
| 300 | return 0; | ||
| 301 | } | ||
| 302 | |||
| 303 | void nvhost_mutex_unlock(struct nvhost_syncpt *sp, int idx) | ||
| 304 | { | ||
| 305 | syncpt_op(sp).mutex_unlock(sp, idx); | ||
| 306 | nvhost_module_idle(syncpt_to_dev(sp)->dev); | ||
| 307 | atomic_dec(&sp->lock_counts[idx]); | ||
| 308 | } | ||
| 309 | |||
| 310 | /* check for old WAITs to be removed (avoiding a wrap) */ | ||
| 311 | int nvhost_syncpt_wait_check(struct nvhost_syncpt *sp, | ||
| 312 | struct nvmap_client *nvmap, | ||
| 313 | u32 waitchk_mask, | ||
| 314 | struct nvhost_waitchk *wait, | ||
| 315 | int num_waitchk) | ||
| 316 | { | ||
| 317 | return syncpt_op(sp).wait_check(sp, nvmap, | ||
| 318 | waitchk_mask, wait, num_waitchk); | ||
| 319 | } | ||
diff --git a/drivers/video/tegra/host/nvhost_syncpt.h b/drivers/video/tegra/host/nvhost_syncpt.h new file mode 100644 index 00000000000..5b339178d1e --- /dev/null +++ b/drivers/video/tegra/host/nvhost_syncpt.h | |||
| @@ -0,0 +1,155 @@ | |||
| 1 | /* | ||
| 2 | * drivers/video/tegra/host/nvhost_syncpt.h | ||
| 3 | * | ||
| 4 | * Tegra Graphics Host Syncpoints | ||
| 5 | * | ||
| 6 | * Copyright (c) 2010-2012, NVIDIA Corporation. | ||
| 7 | * | ||
| 8 | * This program is free software; you can redistribute it and/or modify it | ||
| 9 | * under the terms and conditions of the GNU General Public License, | ||
| 10 | * version 2, as published by the Free Software Foundation. | ||
| 11 | * | ||
| 12 | * This program is distributed in the hope it will be useful, but WITHOUT | ||
| 13 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
| 14 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
| 15 | * more details. | ||
| 16 | * | ||
| 17 | * You should have received a copy of the GNU General Public License | ||
| 18 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | ||
| 19 | */ | ||
| 20 | |||
| 21 | #ifndef __NVHOST_SYNCPT_H | ||
| 22 | #define __NVHOST_SYNCPT_H | ||
| 23 | |||
| 24 | #include <linux/kernel.h> | ||
| 25 | #include <linux/sched.h> | ||
| 26 | #include <linux/nvhost.h> | ||
| 27 | #include <mach/nvmap.h> | ||
| 28 | #include <linux/atomic.h> | ||
| 29 | |||
| 30 | struct nvhost_syncpt; | ||
| 31 | struct nvhost_waitchk; | ||
| 32 | |||
| 33 | /* host managed and invalid syncpt id */ | ||
| 34 | #define NVSYNCPT_GRAPHICS_HOST (0) | ||
| 35 | #define NVSYNCPT_INVALID (-1) | ||
| 36 | |||
| 37 | struct nvhost_syncpt { | ||
| 38 | atomic_t *min_val; | ||
| 39 | atomic_t *max_val; | ||
| 40 | u32 *base_val; | ||
| 41 | u32 nb_pts; | ||
| 42 | u32 nb_bases; | ||
| 43 | u32 client_managed; | ||
| 44 | atomic_t *lock_counts; | ||
| 45 | u32 nb_mlocks; | ||
| 46 | }; | ||
| 47 | |||
| 48 | int nvhost_syncpt_init(struct nvhost_syncpt *); | ||
| 49 | #define client_managed(id) (BIT(id) & sp->client_managed) | ||
| 50 | #define syncpt_to_dev(sp) container_of(sp, struct nvhost_master, syncpt) | ||
| 51 | #define syncpt_op(sp) (syncpt_to_dev(sp)->op.syncpt) | ||
| 52 | #define SYNCPT_CHECK_PERIOD (2*HZ) | ||
| 53 | |||
| 54 | |||
| 55 | /** | ||
| 56 | * Updates the value sent to hardware. | ||
| 57 | */ | ||
| 58 | static inline u32 nvhost_syncpt_incr_max(struct nvhost_syncpt *sp, | ||
| 59 | u32 id, u32 incrs) | ||
| 60 | { | ||
| 61 | return (u32)atomic_add_return(incrs, &sp->max_val[id]); | ||
| 62 | } | ||
| 63 | |||
| 64 | /** | ||
| 65 | * Updated the value sent to hardware. | ||
| 66 | */ | ||
| 67 | static inline u32 nvhost_syncpt_set_max(struct nvhost_syncpt *sp, | ||
| 68 | u32 id, u32 val) | ||
| 69 | { | ||
| 70 | atomic_set(&sp->max_val[id], val); | ||
| 71 | smp_wmb(); | ||
| 72 | return val; | ||
| 73 | } | ||
| 74 | |||
| 75 | static inline u32 nvhost_syncpt_read_max(struct nvhost_syncpt *sp, u32 id) | ||
| 76 | { | ||
| 77 | smp_rmb(); | ||
| 78 | return (u32)atomic_read(&sp->max_val[id]); | ||
| 79 | } | ||
| 80 | |||
| 81 | static inline u32 nvhost_syncpt_read_min(struct nvhost_syncpt *sp, u32 id) | ||
| 82 | { | ||
| 83 | smp_rmb(); | ||
| 84 | return (u32)atomic_read(&sp->min_val[id]); | ||
| 85 | } | ||
| 86 | |||
| 87 | static inline bool nvhost_syncpt_check_max(struct nvhost_syncpt *sp, | ||
| 88 | u32 id, u32 real) | ||
| 89 | { | ||
| 90 | u32 max; | ||
| 91 | if (client_managed(id)) | ||
| 92 | return true; | ||
| 93 | max = nvhost_syncpt_read_max(sp, id); | ||
| 94 | return (s32)(max - real) >= 0; | ||
| 95 | } | ||
| 96 | |||
| 97 | /** | ||
| 98 | * Returns true if syncpoint min == max | ||
| 99 | */ | ||
| 100 | static inline bool nvhost_syncpt_min_eq_max(struct nvhost_syncpt *sp, u32 id) | ||
| 101 | { | ||
| 102 | int min, max; | ||
| 103 | smp_rmb(); | ||
| 104 | min = atomic_read(&sp->min_val[id]); | ||
| 105 | max = atomic_read(&sp->max_val[id]); | ||
| 106 | return (min == max); | ||
| 107 | } | ||
| 108 | |||
| 109 | void nvhost_syncpt_cpu_incr(struct nvhost_syncpt *sp, u32 id); | ||
| 110 | |||
| 111 | u32 nvhost_syncpt_update_min(struct nvhost_syncpt *sp, u32 id); | ||
| 112 | bool nvhost_syncpt_is_expired(struct nvhost_syncpt *sp, u32 id, u32 thresh); | ||
| 113 | |||
| 114 | void nvhost_syncpt_save(struct nvhost_syncpt *sp); | ||
| 115 | |||
| 116 | void nvhost_syncpt_reset(struct nvhost_syncpt *sp); | ||
| 117 | |||
| 118 | u32 nvhost_syncpt_read(struct nvhost_syncpt *sp, u32 id); | ||
| 119 | u32 nvhost_syncpt_read_wait_base(struct nvhost_syncpt *sp, u32 id); | ||
| 120 | |||
| 121 | void nvhost_syncpt_incr(struct nvhost_syncpt *sp, u32 id); | ||
| 122 | |||
| 123 | int nvhost_syncpt_wait_timeout(struct nvhost_syncpt *sp, u32 id, u32 thresh, | ||
| 124 | u32 timeout, u32 *value); | ||
| 125 | |||
| 126 | static inline int nvhost_syncpt_wait(struct nvhost_syncpt *sp, u32 id, u32 thresh) | ||
| 127 | { | ||
| 128 | return nvhost_syncpt_wait_timeout(sp, id, thresh, | ||
| 129 | MAX_SCHEDULE_TIMEOUT, NULL); | ||
| 130 | } | ||
| 131 | |||
| 132 | /* | ||
| 133 | * Check driver supplied waitchk structs for syncpt thresholds | ||
| 134 | * that have already been satisfied and NULL the comparison (to | ||
| 135 | * avoid a wrap condition in the HW). | ||
| 136 | * | ||
| 137 | * @param: sp - global shadowed syncpt struct | ||
| 138 | * @param: nvmap - needed to access command buffer | ||
| 139 | * @param: mask - bit mask of syncpt IDs referenced in WAITs | ||
| 140 | * @param: wait - start of filled in array of waitchk structs | ||
| 141 | * @param: waitend - end ptr (one beyond last valid waitchk) | ||
| 142 | */ | ||
| 143 | int nvhost_syncpt_wait_check(struct nvhost_syncpt *sp, | ||
| 144 | struct nvmap_client *nvmap, | ||
| 145 | u32 mask, | ||
| 146 | struct nvhost_waitchk *wait, | ||
| 147 | int num_waitchk); | ||
| 148 | |||
| 149 | void nvhost_syncpt_debug(struct nvhost_syncpt *sp); | ||
| 150 | |||
| 151 | int nvhost_mutex_try_lock(struct nvhost_syncpt *sp, int idx); | ||
| 152 | |||
| 153 | void nvhost_mutex_unlock(struct nvhost_syncpt *sp, int idx); | ||
| 154 | |||
| 155 | #endif | ||
diff --git a/drivers/video/tegra/host/t20/Makefile b/drivers/video/tegra/host/t20/Makefile new file mode 100644 index 00000000000..c2ade9bf925 --- /dev/null +++ b/drivers/video/tegra/host/t20/Makefile | |||
| @@ -0,0 +1,8 @@ | |||
| 1 | GCOV_PROFILE := y | ||
| 2 | |||
| 3 | EXTRA_CFLAGS += -Idrivers/video/tegra/host | ||
| 4 | |||
| 5 | nvhost-t20-objs = \ | ||
| 6 | t20.o | ||
| 7 | |||
| 8 | obj-$(CONFIG_TEGRA_GRHOST) += nvhost-t20.o | ||
diff --git a/drivers/video/tegra/host/t20/t20.c b/drivers/video/tegra/host/t20/t20.c new file mode 100644 index 00000000000..24ddedc842e --- /dev/null +++ b/drivers/video/tegra/host/t20/t20.c | |||
| @@ -0,0 +1,233 @@ | |||
| 1 | /* | ||
| 2 | * drivers/video/tegra/host/t20/t20.c | ||
| 3 | * | ||
| 4 | * Tegra Graphics Init for T20 Architecture Chips | ||
| 5 | * | ||
| 6 | * Copyright (c) 2011-2012, NVIDIA Corporation. | ||
| 7 | * | ||
| 8 | * This program is free software; you can redistribute it and/or modify it | ||
| 9 | * under the terms and conditions of the GNU General Public License, | ||
| 10 | * version 2, as published by the Free Software Foundation. | ||
| 11 | * | ||
| 12 | * This program is distributed in the hope it will be useful, but WITHOUT | ||
| 13 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
| 14 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
| 15 | * more details. | ||
| 16 | * | ||
| 17 | * You should have received a copy of the GNU General Public License | ||
| 18 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | ||
| 19 | */ | ||
| 20 | |||
| 21 | #include <linux/slab.h> | ||
| 22 | #include <mach/powergate.h> | ||
| 23 | #include "dev.h" | ||
| 24 | #include "t20.h" | ||
| 25 | #include "host1x/host1x_channel.h" | ||
| 26 | #include "host1x/host1x_syncpt.h" | ||
| 27 | #include "host1x/host1x_hardware.h" | ||
| 28 | #include "host1x/host1x_cdma.h" | ||
| 29 | #include "gr3d/gr3d.h" | ||
| 30 | #include "gr3d/gr3d_t20.h" | ||
| 31 | #include "mpe/mpe.h" | ||
| 32 | #include "nvhost_hwctx.h" | ||
| 33 | |||
| 34 | #define NVMODMUTEX_2D_FULL (1) | ||
| 35 | #define NVMODMUTEX_2D_SIMPLE (2) | ||
| 36 | #define NVMODMUTEX_2D_SB_A (3) | ||
| 37 | #define NVMODMUTEX_2D_SB_B (4) | ||
| 38 | #define NVMODMUTEX_3D (5) | ||
| 39 | #define NVMODMUTEX_DISPLAYA (6) | ||
| 40 | #define NVMODMUTEX_DISPLAYB (7) | ||
| 41 | #define NVMODMUTEX_VI (8) | ||
| 42 | #define NVMODMUTEX_DSI (9) | ||
| 43 | |||
| 44 | #define NVHOST_NUMCHANNELS (NV_HOST1X_CHANNELS - 1) | ||
| 45 | |||
| 46 | struct nvhost_device t20_devices[] = { | ||
| 47 | { | ||
| 48 | /* channel 0 */ | ||
| 49 | .name = "display", | ||
| 50 | .id = -1, | ||
| 51 | .index = 0, | ||
| 52 | .syncpts = BIT(NVSYNCPT_DISP0_A) | BIT(NVSYNCPT_DISP1_A) | | ||
| 53 | BIT(NVSYNCPT_DISP0_B) | BIT(NVSYNCPT_DISP1_B) | | ||
| 54 | BIT(NVSYNCPT_DISP0_C) | BIT(NVSYNCPT_DISP1_C) | | ||
| 55 | BIT(NVSYNCPT_VBLANK0) | BIT(NVSYNCPT_VBLANK1), | ||
| 56 | .modulemutexes = BIT(NVMODMUTEX_DISPLAYA) | BIT(NVMODMUTEX_DISPLAYB), | ||
| 57 | NVHOST_MODULE_NO_POWERGATE_IDS, | ||
| 58 | NVHOST_DEFAULT_CLOCKGATE_DELAY, | ||
| 59 | .moduleid = NVHOST_MODULE_NONE, | ||
| 60 | }, | ||
| 61 | { | ||
| 62 | /* channel 1 */ | ||
| 63 | .name = "gr3d", | ||
| 64 | .id = -1, | ||
| 65 | .index = 1, | ||
| 66 | .syncpts = BIT(NVSYNCPT_3D), | ||
| 67 | .waitbases = BIT(NVWAITBASE_3D), | ||
| 68 | .modulemutexes = BIT(NVMODMUTEX_3D), | ||
| 69 | .class = NV_GRAPHICS_3D_CLASS_ID, | ||
| 70 | .prepare_poweroff = nvhost_gr3d_prepare_power_off, | ||
| 71 | .alloc_hwctx_handler = nvhost_gr3d_t20_ctxhandler_init, | ||
| 72 | .clocks = {{"gr3d", UINT_MAX}, {"emc", UINT_MAX}, {} }, | ||
| 73 | .powergate_ids = {TEGRA_POWERGATE_3D, -1}, | ||
| 74 | NVHOST_DEFAULT_CLOCKGATE_DELAY, | ||
| 75 | .moduleid = NVHOST_MODULE_NONE, | ||
| 76 | }, | ||
| 77 | { | ||
| 78 | /* channel 2 */ | ||
| 79 | .name = "gr2d", | ||
| 80 | .id = -1, | ||
| 81 | .index = 2, | ||
| 82 | .syncpts = BIT(NVSYNCPT_2D_0) | BIT(NVSYNCPT_2D_1), | ||
| 83 | .waitbases = BIT(NVWAITBASE_2D_0) | BIT(NVWAITBASE_2D_1), | ||
| 84 | .modulemutexes = BIT(NVMODMUTEX_2D_FULL) | BIT(NVMODMUTEX_2D_SIMPLE) | | ||
| 85 | BIT(NVMODMUTEX_2D_SB_A) | BIT(NVMODMUTEX_2D_SB_B), | ||
| 86 | .clocks = { {"gr2d", UINT_MAX}, | ||
| 87 | {"epp", UINT_MAX}, | ||
| 88 | {"emc", UINT_MAX} }, | ||
| 89 | NVHOST_MODULE_NO_POWERGATE_IDS, | ||
| 90 | .clockgate_delay = 0, | ||
| 91 | .moduleid = NVHOST_MODULE_NONE, | ||
| 92 | }, | ||
| 93 | { | ||
| 94 | /* channel 3 */ | ||
| 95 | .name = "isp", | ||
| 96 | .id = -1, | ||
| 97 | .index = 3, | ||
| 98 | .syncpts = 0, | ||
| 99 | NVHOST_MODULE_NO_POWERGATE_IDS, | ||
| 100 | NVHOST_DEFAULT_CLOCKGATE_DELAY, | ||
| 101 | .moduleid = NVHOST_MODULE_ISP, | ||
| 102 | }, | ||
| 103 | { | ||
| 104 | /* channel 4 */ | ||
| 105 | .name = "vi", | ||
| 106 | .id = -1, | ||
| 107 | .index = 4, | ||
| 108 | .syncpts = BIT(NVSYNCPT_CSI_VI_0) | BIT(NVSYNCPT_CSI_VI_1) | | ||
| 109 | BIT(NVSYNCPT_VI_ISP_0) | BIT(NVSYNCPT_VI_ISP_1) | | ||
| 110 | BIT(NVSYNCPT_VI_ISP_2) | BIT(NVSYNCPT_VI_ISP_3) | | ||
| 111 | BIT(NVSYNCPT_VI_ISP_4), | ||
| 112 | .modulemutexes = BIT(NVMODMUTEX_VI), | ||
| 113 | .exclusive = true, | ||
| 114 | NVHOST_MODULE_NO_POWERGATE_IDS, | ||
| 115 | NVHOST_DEFAULT_CLOCKGATE_DELAY, | ||
| 116 | .moduleid = NVHOST_MODULE_VI, | ||
| 117 | }, | ||
| 118 | { | ||
| 119 | /* channel 5 */ | ||
| 120 | .name = "mpe", | ||
| 121 | .id = -1, | ||
| 122 | .index = 5, | ||
| 123 | .syncpts = BIT(NVSYNCPT_MPE) | BIT(NVSYNCPT_MPE_EBM_EOF) | | ||
| 124 | BIT(NVSYNCPT_MPE_WR_SAFE), | ||
| 125 | .waitbases = BIT(NVWAITBASE_MPE), | ||
| 126 | .class = NV_VIDEO_ENCODE_MPEG_CLASS_ID, | ||
| 127 | .waitbasesync = true, | ||
| 128 | .keepalive = true, | ||
| 129 | .prepare_poweroff = nvhost_mpe_prepare_power_off, | ||
| 130 | .alloc_hwctx_handler = nvhost_mpe_ctxhandler_init, | ||
| 131 | .clocks = { {"mpe", UINT_MAX}, | ||
| 132 | {"emc", UINT_MAX} }, | ||
| 133 | .powergate_ids = {TEGRA_POWERGATE_MPE, -1}, | ||
| 134 | NVHOST_DEFAULT_CLOCKGATE_DELAY, | ||
| 135 | .moduleid = NVHOST_MODULE_MPE, | ||
| 136 | }, | ||
| 137 | { | ||
| 138 | /* channel 6 */ | ||
| 139 | .name = "dsi", | ||
| 140 | .id = -1, | ||
| 141 | .index = 6, | ||
| 142 | .syncpts = BIT(NVSYNCPT_DSI), | ||
| 143 | .modulemutexes = BIT(NVMODMUTEX_DSI), | ||
| 144 | NVHOST_MODULE_NO_POWERGATE_IDS, | ||
| 145 | NVHOST_DEFAULT_CLOCKGATE_DELAY, | ||
| 146 | .moduleid = NVHOST_MODULE_NONE, | ||
| 147 | } }; | ||
| 148 | |||
| 149 | |||
| 150 | static inline void __iomem *t20_channel_aperture(void __iomem *p, int ndx) | ||
| 151 | { | ||
| 152 | p += NV_HOST1X_CHANNEL0_BASE; | ||
| 153 | p += ndx * NV_HOST1X_CHANNEL_MAP_SIZE_BYTES; | ||
| 154 | return p; | ||
| 155 | } | ||
| 156 | |||
| 157 | static inline int t20_nvhost_hwctx_handler_init(struct nvhost_channel *ch) | ||
| 158 | { | ||
| 159 | int err = 0; | ||
| 160 | unsigned long syncpts = ch->dev->syncpts; | ||
| 161 | unsigned long waitbases = ch->dev->waitbases; | ||
| 162 | u32 syncpt = find_first_bit(&syncpts, BITS_PER_LONG); | ||
| 163 | u32 waitbase = find_first_bit(&waitbases, BITS_PER_LONG); | ||
| 164 | |||
| 165 | if (ch->dev->alloc_hwctx_handler) { | ||
| 166 | ch->ctxhandler = ch->dev->alloc_hwctx_handler(syncpt, | ||
| 167 | waitbase, ch); | ||
| 168 | if (!ch->ctxhandler) | ||
| 169 | err = -ENOMEM; | ||
| 170 | } | ||
| 171 | |||
| 172 | return err; | ||
| 173 | } | ||
| 174 | |||
| 175 | static int t20_channel_init(struct nvhost_channel *ch, | ||
| 176 | struct nvhost_master *dev, int index) | ||
| 177 | { | ||
| 178 | ch->chid = index; | ||
| 179 | mutex_init(&ch->reflock); | ||
| 180 | mutex_init(&ch->submitlock); | ||
| 181 | |||
| 182 | ch->aperture = t20_channel_aperture(dev->aperture, index); | ||
| 183 | |||
| 184 | return t20_nvhost_hwctx_handler_init(ch); | ||
| 185 | } | ||
| 186 | |||
| 187 | int nvhost_init_t20_channel_support(struct nvhost_master *host) | ||
| 188 | { | ||
| 189 | host->nb_channels = NVHOST_NUMCHANNELS; | ||
| 190 | |||
| 191 | host->op.channel.init = t20_channel_init; | ||
| 192 | host->op.channel.submit = host1x_channel_submit; | ||
| 193 | host->op.channel.read3dreg = host1x_channel_read_3d_reg; | ||
| 194 | |||
| 195 | return 0; | ||
| 196 | } | ||
| 197 | |||
| 198 | struct nvhost_device *t20_get_nvhost_device(struct nvhost_master *host, | ||
| 199 | char *name) | ||
| 200 | { | ||
| 201 | int i; | ||
| 202 | |||
| 203 | for (i = 0; i < host->nb_channels; i++) { | ||
| 204 | if (strcmp(t20_devices[i].name, name) == 0) | ||
| 205 | return &t20_devices[i]; | ||
| 206 | } | ||
| 207 | |||
| 208 | return NULL; | ||
| 209 | } | ||
| 210 | |||
| 211 | int nvhost_init_t20_support(struct nvhost_master *host) | ||
| 212 | { | ||
| 213 | int err; | ||
| 214 | |||
| 215 | /* don't worry about cleaning up on failure... "remove" does it. */ | ||
| 216 | err = nvhost_init_t20_channel_support(host); | ||
| 217 | if (err) | ||
| 218 | return err; | ||
| 219 | err = host1x_init_cdma_support(host); | ||
| 220 | if (err) | ||
| 221 | return err; | ||
| 222 | err = nvhost_init_t20_debug_support(host); | ||
| 223 | if (err) | ||
| 224 | return err; | ||
| 225 | err = host1x_init_syncpt_support(host); | ||
| 226 | if (err) | ||
| 227 | return err; | ||
| 228 | err = nvhost_init_t20_intr_support(host); | ||
| 229 | if (err) | ||
| 230 | return err; | ||
| 231 | host->op.nvhost_dev.get_nvhost_device = t20_get_nvhost_device; | ||
| 232 | return 0; | ||
| 233 | } | ||
diff --git a/drivers/video/tegra/host/t20/t20.h b/drivers/video/tegra/host/t20/t20.h new file mode 100644 index 00000000000..93555a55b58 --- /dev/null +++ b/drivers/video/tegra/host/t20/t20.h | |||
| @@ -0,0 +1,33 @@ | |||
| 1 | /* | ||
| 2 | * drivers/video/tegra/host/t20/t20.h | ||
| 3 | * | ||
| 4 | * Tegra Graphics Chip support for T20 | ||
| 5 | * | ||
| 6 | * Copyright (c) 2011-2012, NVIDIA Corporation. | ||
| 7 | * | ||
| 8 | * This program is free software; you can redistribute it and/or modify it | ||
| 9 | * under the terms and conditions of the GNU General Public License, | ||
| 10 | * version 2, as published by the Free Software Foundation. | ||
| 11 | * | ||
| 12 | * This program is distributed in the hope it will be useful, but WITHOUT | ||
| 13 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
| 14 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
| 15 | * more details. | ||
| 16 | * | ||
| 17 | * You should have received a copy of the GNU General Public License | ||
| 18 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | ||
| 19 | */ | ||
| 20 | #ifndef _NVHOST_T20_H_ | ||
| 21 | #define _NVHOST_T20_H_ | ||
| 22 | |||
| 23 | struct nvhost_master; | ||
| 24 | struct nvhost_module; | ||
| 25 | |||
| 26 | int nvhost_init_t20_channel_support(struct nvhost_master *); | ||
| 27 | int nvhost_init_t20_debug_support(struct nvhost_master *); | ||
| 28 | int nvhost_init_t20_syncpt_support(struct nvhost_master *); | ||
| 29 | int nvhost_init_t20_intr_support(struct nvhost_master *); | ||
| 30 | int nvhost_init_t20_support(struct nvhost_master *host); | ||
| 31 | int nvhost_t20_save_context(struct nvhost_module *mod, u32 syncpt_id); | ||
| 32 | |||
| 33 | #endif /* _NVHOST_T20_H_ */ | ||
diff --git a/drivers/video/tegra/host/t30/Makefile b/drivers/video/tegra/host/t30/Makefile new file mode 100644 index 00000000000..b343eb4fc7c --- /dev/null +++ b/drivers/video/tegra/host/t30/Makefile | |||
| @@ -0,0 +1,8 @@ | |||
| 1 | GCOV_PROFILE := y | ||
| 2 | |||
| 3 | EXTRA_CFLAGS += -Idrivers/video/tegra/host | ||
| 4 | |||
| 5 | nvhost-t30-objs = \ | ||
| 6 | t30.o | ||
| 7 | |||
| 8 | obj-$(CONFIG_TEGRA_GRHOST) += nvhost-t30.o | ||
diff --git a/drivers/video/tegra/host/t30/t30.c b/drivers/video/tegra/host/t30/t30.c new file mode 100644 index 00000000000..8a8b1f4d924 --- /dev/null +++ b/drivers/video/tegra/host/t30/t30.c | |||
| @@ -0,0 +1,251 @@ | |||
| 1 | /* | ||
| 2 | * drivers/video/tegra/host/t30/t30.c | ||
| 3 | * | ||
| 4 | * Tegra Graphics Init for T30 Architecture Chips | ||
| 5 | * | ||
| 6 | * Copyright (c) 2011-2012, NVIDIA Corporation. | ||
| 7 | * | ||
| 8 | * This program is free software; you can redistribute it and/or modify it | ||
| 9 | * under the terms and conditions of the GNU General Public License, | ||
| 10 | * version 2, as published by the Free Software Foundation. | ||
| 11 | * | ||
| 12 | * This program is distributed in the hope it will be useful, but WITHOUT | ||
| 13 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
| 14 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
| 15 | * more details. | ||
| 16 | * | ||
| 17 | * You should have received a copy of the GNU General Public License | ||
| 18 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | ||
| 19 | */ | ||
| 20 | |||
| 21 | #include <linux/mutex.h> | ||
| 22 | #include <mach/powergate.h> | ||
| 23 | #include <mach/iomap.h> | ||
| 24 | #include "dev.h" | ||
| 25 | #include "t20/t20.h" | ||
| 26 | #include "t30.h" | ||
| 27 | #include "gr3d/gr3d.h" | ||
| 28 | #include "mpe/mpe.h" | ||
| 29 | #include "gr3d/gr3d_t30.h" | ||
| 30 | #include "gr3d/scale3d.h" | ||
| 31 | #include "host1x/host1x_hardware.h" | ||
| 32 | #include "host1x/host1x_cdma.h" | ||
| 33 | #include "host1x/host1x_syncpt.h" | ||
| 34 | #include "chip_support.h" | ||
| 35 | |||
| 36 | #define NVMODMUTEX_2D_FULL (1) | ||
| 37 | #define NVMODMUTEX_2D_SIMPLE (2) | ||
| 38 | #define NVMODMUTEX_2D_SB_A (3) | ||
| 39 | #define NVMODMUTEX_2D_SB_B (4) | ||
| 40 | #define NVMODMUTEX_3D (5) | ||
| 41 | #define NVMODMUTEX_DISPLAYA (6) | ||
| 42 | #define NVMODMUTEX_DISPLAYB (7) | ||
| 43 | #define NVMODMUTEX_VI (8) | ||
| 44 | #define NVMODMUTEX_DSI (9) | ||
| 45 | |||
| 46 | #define NVHOST_CHANNEL_BASE 0 | ||
| 47 | |||
| 48 | struct nvhost_device t30_devices[] = { | ||
| 49 | { | ||
| 50 | /* channel 0 */ | ||
| 51 | .name = "display", | ||
| 52 | .id = -1, | ||
| 53 | .index = 0, | ||
| 54 | .syncpts = BIT(NVSYNCPT_DISP0_A) | BIT(NVSYNCPT_DISP1_A) | | ||
| 55 | BIT(NVSYNCPT_DISP0_B) | BIT(NVSYNCPT_DISP1_B) | | ||
| 56 | BIT(NVSYNCPT_DISP0_C) | BIT(NVSYNCPT_DISP1_C) | | ||
| 57 | BIT(NVSYNCPT_VBLANK0) | BIT(NVSYNCPT_VBLANK1), | ||
| 58 | .modulemutexes = BIT(NVMODMUTEX_DISPLAYA) | BIT(NVMODMUTEX_DISPLAYB), | ||
| 59 | NVHOST_MODULE_NO_POWERGATE_IDS, | ||
| 60 | NVHOST_DEFAULT_CLOCKGATE_DELAY, | ||
| 61 | .moduleid = NVHOST_MODULE_NONE, | ||
| 62 | }, | ||
| 63 | { | ||
| 64 | /* channel 1 */ | ||
| 65 | .name = "gr3d", | ||
| 66 | .id = -1, | ||
| 67 | .index = 1, | ||
| 68 | .syncpts = BIT(NVSYNCPT_3D), | ||
| 69 | .waitbases = BIT(NVWAITBASE_3D), | ||
| 70 | .modulemutexes = BIT(NVMODMUTEX_3D), | ||
| 71 | .class = NV_GRAPHICS_3D_CLASS_ID, | ||
| 72 | .prepare_poweroff = nvhost_gr3d_prepare_power_off, | ||
| 73 | .busy = nvhost_scale3d_notify_busy, | ||
| 74 | .idle = nvhost_scale3d_notify_idle, | ||
| 75 | .init = nvhost_scale3d_init, | ||
| 76 | .deinit = nvhost_scale3d_deinit, | ||
| 77 | .suspend = nvhost_scale3d_suspend, | ||
| 78 | .alloc_hwctx_handler = nvhost_gr3d_t30_ctxhandler_init, | ||
| 79 | .clocks = { {"gr3d", UINT_MAX}, | ||
| 80 | {"gr3d2", UINT_MAX}, | ||
| 81 | {"emc", UINT_MAX} }, | ||
| 82 | .powergate_ids = { TEGRA_POWERGATE_3D, | ||
| 83 | TEGRA_POWERGATE_3D1 }, | ||
| 84 | NVHOST_DEFAULT_CLOCKGATE_DELAY, | ||
| 85 | .can_powergate = false, | ||
| 86 | .powergate_delay = 250, | ||
| 87 | .moduleid = NVHOST_MODULE_NONE, | ||
| 88 | }, | ||
| 89 | { | ||
| 90 | /* channel 2 */ | ||
| 91 | .name = "gr2d", | ||
| 92 | .id = -1, | ||
| 93 | .index = 2, | ||
| 94 | .syncpts = BIT(NVSYNCPT_2D_0) | BIT(NVSYNCPT_2D_1), | ||
| 95 | .waitbases = BIT(NVWAITBASE_2D_0) | BIT(NVWAITBASE_2D_1), | ||
| 96 | .modulemutexes = BIT(NVMODMUTEX_2D_FULL) | BIT(NVMODMUTEX_2D_SIMPLE) | | ||
| 97 | BIT(NVMODMUTEX_2D_SB_A) | BIT(NVMODMUTEX_2D_SB_B), | ||
| 98 | .clocks = { {"gr2d", 0}, | ||
| 99 | {"epp", 0}, | ||
| 100 | {"emc", 300000000} }, | ||
| 101 | NVHOST_MODULE_NO_POWERGATE_IDS, | ||
| 102 | .clockgate_delay = 0, | ||
| 103 | .moduleid = NVHOST_MODULE_NONE, | ||
| 104 | }, | ||
| 105 | { | ||
| 106 | /* channel 3 */ | ||
| 107 | .name = "isp", | ||
| 108 | .id = -1, | ||
| 109 | .index = 3, | ||
| 110 | .syncpts = 0, | ||
| 111 | NVHOST_MODULE_NO_POWERGATE_IDS, | ||
| 112 | NVHOST_DEFAULT_CLOCKGATE_DELAY, | ||
| 113 | .moduleid = NVHOST_MODULE_ISP, | ||
| 114 | }, | ||
| 115 | { | ||
| 116 | /* channel 4 */ | ||
| 117 | .name = "vi", | ||
| 118 | .id = -1, | ||
| 119 | .index = 4, | ||
| 120 | .syncpts = BIT(NVSYNCPT_CSI_VI_0) | BIT(NVSYNCPT_CSI_VI_1) | | ||
| 121 | BIT(NVSYNCPT_VI_ISP_0) | BIT(NVSYNCPT_VI_ISP_1) | | ||
| 122 | BIT(NVSYNCPT_VI_ISP_2) | BIT(NVSYNCPT_VI_ISP_3) | | ||
| 123 | BIT(NVSYNCPT_VI_ISP_4), | ||
| 124 | .modulemutexes = BIT(NVMODMUTEX_VI), | ||
| 125 | .exclusive = true, | ||
| 126 | NVHOST_MODULE_NO_POWERGATE_IDS, | ||
| 127 | NVHOST_DEFAULT_CLOCKGATE_DELAY, | ||
| 128 | .moduleid = NVHOST_MODULE_VI, | ||
| 129 | }, | ||
| 130 | { | ||
| 131 | /* channel 5 */ | ||
| 132 | .name = "mpe", | ||
| 133 | .id = -1, | ||
| 134 | .index = 5, | ||
| 135 | .syncpts = BIT(NVSYNCPT_MPE) | BIT(NVSYNCPT_MPE_EBM_EOF) | | ||
| 136 | BIT(NVSYNCPT_MPE_WR_SAFE), | ||
| 137 | .waitbases = BIT(NVWAITBASE_MPE), | ||
| 138 | .class = NV_VIDEO_ENCODE_MPEG_CLASS_ID, | ||
| 139 | .waitbasesync = true, | ||
| 140 | .keepalive = true, | ||
| 141 | .prepare_poweroff = nvhost_mpe_prepare_power_off, | ||
| 142 | .alloc_hwctx_handler = nvhost_mpe_ctxhandler_init, | ||
| 143 | .clocks = { {"mpe", UINT_MAX}, | ||
| 144 | {"emc", UINT_MAX} }, | ||
| 145 | .powergate_ids = {TEGRA_POWERGATE_MPE, -1}, | ||
| 146 | NVHOST_DEFAULT_CLOCKGATE_DELAY, | ||
| 147 | .can_powergate = true, | ||
| 148 | .powergate_delay = 100, | ||
| 149 | .moduleid = NVHOST_MODULE_MPE, | ||
| 150 | }, | ||
| 151 | { | ||
| 152 | /* channel 6 */ | ||
| 153 | .name = "dsi", | ||
| 154 | .id = -1, | ||
| 155 | .index = 6, | ||
| 156 | .syncpts = BIT(NVSYNCPT_DSI), | ||
| 157 | .modulemutexes = BIT(NVMODMUTEX_DSI), | ||
| 158 | NVHOST_MODULE_NO_POWERGATE_IDS, | ||
| 159 | NVHOST_DEFAULT_CLOCKGATE_DELAY, | ||
| 160 | .moduleid = NVHOST_MODULE_NONE, | ||
| 161 | } }; | ||
| 162 | |||
| 163 | static inline int t30_nvhost_hwctx_handler_init(struct nvhost_channel *ch) | ||
| 164 | { | ||
| 165 | int err = 0; | ||
| 166 | unsigned long syncpts = ch->dev->syncpts; | ||
| 167 | unsigned long waitbases = ch->dev->waitbases; | ||
| 168 | u32 syncpt = find_first_bit(&syncpts, BITS_PER_LONG); | ||
| 169 | u32 waitbase = find_first_bit(&waitbases, BITS_PER_LONG); | ||
| 170 | |||
| 171 | if (ch->dev->alloc_hwctx_handler) { | ||
| 172 | ch->ctxhandler = ch->dev->alloc_hwctx_handler(syncpt, | ||
| 173 | waitbase, ch); | ||
| 174 | if (!ch->ctxhandler) | ||
| 175 | err = -ENOMEM; | ||
| 176 | } | ||
| 177 | |||
| 178 | return err; | ||
| 179 | } | ||
| 180 | |||
| 181 | static inline void __iomem *t30_channel_aperture(void __iomem *p, int ndx) | ||
| 182 | { | ||
| 183 | ndx += NVHOST_CHANNEL_BASE; | ||
| 184 | p += NV_HOST1X_CHANNEL0_BASE; | ||
| 185 | p += ndx * NV_HOST1X_CHANNEL_MAP_SIZE_BYTES; | ||
| 186 | return p; | ||
| 187 | } | ||
| 188 | |||
| 189 | static int t30_channel_init(struct nvhost_channel *ch, | ||
| 190 | struct nvhost_master *dev, int index) | ||
| 191 | { | ||
| 192 | ch->chid = index; | ||
| 193 | mutex_init(&ch->reflock); | ||
| 194 | mutex_init(&ch->submitlock); | ||
| 195 | |||
| 196 | ch->aperture = t30_channel_aperture(dev->aperture, index); | ||
| 197 | |||
| 198 | return t30_nvhost_hwctx_handler_init(ch); | ||
| 199 | } | ||
| 200 | |||
| 201 | int nvhost_init_t30_channel_support(struct nvhost_master *host) | ||
| 202 | { | ||
| 203 | int result = nvhost_init_t20_channel_support(host); | ||
| 204 | host->op.channel.init = t30_channel_init; | ||
| 205 | |||
| 206 | return result; | ||
| 207 | } | ||
| 208 | int nvhost_init_t30_debug_support(struct nvhost_master *host) | ||
| 209 | { | ||
| 210 | nvhost_init_t20_debug_support(host); | ||
| 211 | host->op.debug.debug_init = nvhost_scale3d_debug_init; | ||
| 212 | |||
| 213 | return 0; | ||
| 214 | } | ||
| 215 | |||
| 216 | struct nvhost_device *t30_get_nvhost_device(struct nvhost_master *host, | ||
| 217 | char *name) | ||
| 218 | { | ||
| 219 | int i; | ||
| 220 | |||
| 221 | for (i = 0; i < host->nb_channels; i++) { | ||
| 222 | if (strcmp(t30_devices[i].name, name) == 0) | ||
| 223 | return &t30_devices[i]; | ||
| 224 | } | ||
| 225 | |||
| 226 | return NULL; | ||
| 227 | } | ||
| 228 | |||
| 229 | int nvhost_init_t30_support(struct nvhost_master *host) | ||
| 230 | { | ||
| 231 | int err; | ||
| 232 | |||
| 233 | /* don't worry about cleaning up on failure... "remove" does it. */ | ||
| 234 | err = nvhost_init_t30_channel_support(host); | ||
| 235 | if (err) | ||
| 236 | return err; | ||
| 237 | err = host1x_init_cdma_support(host); | ||
| 238 | if (err) | ||
| 239 | return err; | ||
| 240 | err = nvhost_init_t30_debug_support(host); | ||
| 241 | if (err) | ||
| 242 | return err; | ||
| 243 | err = host1x_init_syncpt_support(host); | ||
| 244 | if (err) | ||
| 245 | return err; | ||
| 246 | err = nvhost_init_t20_intr_support(host); | ||
| 247 | if (err) | ||
| 248 | return err; | ||
| 249 | host->op.nvhost_dev.get_nvhost_device = t30_get_nvhost_device; | ||
| 250 | return 0; | ||
| 251 | } | ||
diff --git a/drivers/video/tegra/host/t30/t30.h b/drivers/video/tegra/host/t30/t30.h new file mode 100644 index 00000000000..0446dbd19b3 --- /dev/null +++ b/drivers/video/tegra/host/t30/t30.h | |||
| @@ -0,0 +1,29 @@ | |||
| 1 | /* | ||
| 2 | * drivers/video/tegra/host/t30/t30.h | ||
| 3 | * | ||
| 4 | * Tegra Graphics Chip support for Tegra3 | ||
| 5 | * | ||
| 6 | * Copyright (c) 2011-2012, NVIDIA Corporation. | ||
| 7 | * | ||
| 8 | * This program is free software; you can redistribute it and/or modify it | ||
| 9 | * under the terms and conditions of the GNU General Public License, | ||
| 10 | * version 2, as published by the Free Software Foundation. | ||
| 11 | * | ||
| 12 | * This program is distributed in the hope it will be useful, but WITHOUT | ||
| 13 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
| 14 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
| 15 | * more details. | ||
| 16 | * | ||
| 17 | * You should have received a copy of the GNU General Public License | ||
| 18 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | ||
| 19 | */ | ||
| 20 | #ifndef _NVHOST_T30_H_ | ||
| 21 | #define _NVHOST_T30_H_ | ||
| 22 | |||
| 23 | struct nvhost_master; | ||
| 24 | |||
| 25 | int nvhost_init_t30_channel_support(struct nvhost_master *); | ||
| 26 | int nvhost_init_t30_debug_support(struct nvhost_master *); | ||
| 27 | int nvhost_init_t30_support(struct nvhost_master *host); | ||
| 28 | |||
| 29 | #endif /* _NVHOST_T30_H_ */ | ||
diff --git a/drivers/video/tegra/host/vi/Makefile b/drivers/video/tegra/host/vi/Makefile new file mode 100644 index 00000000000..8c130e49814 --- /dev/null +++ b/drivers/video/tegra/host/vi/Makefile | |||
| @@ -0,0 +1,7 @@ | |||
| 1 | GCOV_PROFILE := y | ||
| 2 | EXTRA_CFLAGS += -Idrivers/video/tegra/host | ||
| 3 | |||
| 4 | nvhost-vi-objs = \ | ||
| 5 | vi.o | ||
| 6 | |||
| 7 | obj-$(CONFIG_TEGRA_GRHOST) += nvhost-vi.o | ||
diff --git a/drivers/video/tegra/host/vi/vi.c b/drivers/video/tegra/host/vi/vi.c new file mode 100644 index 00000000000..71d517152ad --- /dev/null +++ b/drivers/video/tegra/host/vi/vi.c | |||
| @@ -0,0 +1,82 @@ | |||
| 1 | /* | ||
| 2 | * drivers/video/tegra/host/vi/vi.c | ||
| 3 | * | ||
| 4 | * Tegra Graphics Host VI | ||
| 5 | * | ||
| 6 | * Copyright (c) 2012, NVIDIA Corporation. | ||
| 7 | * | ||
| 8 | * This program is free software; you can redistribute it and/or modify it | ||
| 9 | * under the terms and conditions of the GNU General Public License, | ||
| 10 | * version 2, as published by the Free Software Foundation. | ||
| 11 | * | ||
| 12 | * This program is distributed in the hope it will be useful, but WITHOUT | ||
| 13 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
| 14 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
| 15 | * more details. | ||
| 16 | * | ||
| 17 | * You should have received a copy of the GNU General Public License | ||
| 18 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | ||
| 19 | */ | ||
| 20 | |||
| 21 | #include "dev.h" | ||
| 22 | #include "bus_client.h" | ||
| 23 | |||
| 24 | static int __devinit vi_probe(struct nvhost_device *dev) | ||
| 25 | { | ||
| 26 | return nvhost_client_device_init(dev); | ||
| 27 | } | ||
| 28 | |||
| 29 | static int __exit vi_remove(struct nvhost_device *dev) | ||
| 30 | { | ||
| 31 | /* Add clean-up */ | ||
| 32 | return 0; | ||
| 33 | } | ||
| 34 | |||
| 35 | static int vi_suspend(struct nvhost_device *dev, pm_message_t state) | ||
| 36 | { | ||
| 37 | return nvhost_client_device_suspend(dev); | ||
| 38 | } | ||
| 39 | |||
| 40 | static int vi_resume(struct nvhost_device *dev) | ||
| 41 | { | ||
| 42 | dev_info(&dev->dev, "resuming\n"); | ||
| 43 | return 0; | ||
| 44 | } | ||
| 45 | |||
| 46 | struct nvhost_device *vi_device; | ||
| 47 | |||
| 48 | static struct nvhost_driver vi_driver = { | ||
| 49 | .probe = vi_probe, | ||
| 50 | .remove = __exit_p(vi_remove), | ||
| 51 | #ifdef CONFIG_PM | ||
| 52 | .suspend = vi_suspend, | ||
| 53 | .resume = vi_resume, | ||
| 54 | #endif | ||
| 55 | .driver = { | ||
| 56 | .owner = THIS_MODULE, | ||
| 57 | .name = "vi", | ||
| 58 | } | ||
| 59 | }; | ||
| 60 | |||
| 61 | static int __init vi_init(void) | ||
| 62 | { | ||
| 63 | int err; | ||
| 64 | |||
| 65 | vi_device = nvhost_get_device("vi"); | ||
| 66 | if (!vi_device) | ||
| 67 | return -ENXIO; | ||
| 68 | |||
| 69 | err = nvhost_device_register(vi_device); | ||
| 70 | if (err) | ||
| 71 | return err; | ||
| 72 | |||
| 73 | return nvhost_driver_register(&vi_driver); | ||
| 74 | } | ||
| 75 | |||
| 76 | static void __exit vi_exit(void) | ||
| 77 | { | ||
| 78 | nvhost_driver_unregister(&vi_driver); | ||
| 79 | } | ||
| 80 | |||
| 81 | module_init(vi_init); | ||
| 82 | module_exit(vi_exit); | ||
