diff options
Diffstat (limited to 'drivers/gpu')
26 files changed, 7016 insertions, 0 deletions
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig index c4bf9a1cf4a6..b02ac62f5863 100644 --- a/drivers/gpu/drm/Kconfig +++ b/drivers/gpu/drm/Kconfig | |||
| @@ -266,3 +266,5 @@ source "drivers/gpu/drm/amd/amdkfd/Kconfig" | |||
| 266 | source "drivers/gpu/drm/imx/Kconfig" | 266 | source "drivers/gpu/drm/imx/Kconfig" |
| 267 | 267 | ||
| 268 | source "drivers/gpu/drm/vc4/Kconfig" | 268 | source "drivers/gpu/drm/vc4/Kconfig" |
| 269 | |||
| 270 | source "drivers/gpu/drm/etnaviv/Kconfig" | ||
diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile index 1e9ff4c3e3db..f858aa25fbb2 100644 --- a/drivers/gpu/drm/Makefile +++ b/drivers/gpu/drm/Makefile | |||
| @@ -75,3 +75,4 @@ obj-y += i2c/ | |||
| 75 | obj-y += panel/ | 75 | obj-y += panel/ |
| 76 | obj-y += bridge/ | 76 | obj-y += bridge/ |
| 77 | obj-$(CONFIG_DRM_FSL_DCU) += fsl-dcu/ | 77 | obj-$(CONFIG_DRM_FSL_DCU) += fsl-dcu/ |
| 78 | obj-$(CONFIG_DRM_ETNAVIV) += etnaviv/ | ||
diff --git a/drivers/gpu/drm/etnaviv/Kconfig b/drivers/gpu/drm/etnaviv/Kconfig new file mode 100644 index 000000000000..2cde7a5442fb --- /dev/null +++ b/drivers/gpu/drm/etnaviv/Kconfig | |||
| @@ -0,0 +1,20 @@ | |||
| 1 | |||
| 2 | config DRM_ETNAVIV | ||
| 3 | tristate "ETNAVIV (DRM support for Vivante GPU IP cores)" | ||
| 4 | depends on DRM | ||
| 5 | depends on ARCH_MXC || ARCH_DOVE | ||
| 6 | select SHMEM | ||
| 7 | select TMPFS | ||
| 8 | select IOMMU_API | ||
| 9 | select IOMMU_SUPPORT | ||
| 10 | select WANT_DEV_COREDUMP | ||
| 11 | help | ||
| 12 | DRM driver for Vivante GPUs. | ||
| 13 | |||
| 14 | config DRM_ETNAVIV_REGISTER_LOGGING | ||
| 15 | bool "enable ETNAVIV register logging" | ||
| 16 | depends on DRM_ETNAVIV | ||
| 17 | help | ||
| 18 | Compile in support for logging register reads/writes in a format | ||
| 19 | that can be parsed by envytools demsm tool. If enabled, register | ||
| 20 | logging can be switched on via etnaviv.reglog=y module param. | ||
diff --git a/drivers/gpu/drm/etnaviv/Makefile b/drivers/gpu/drm/etnaviv/Makefile new file mode 100644 index 000000000000..1086e9876f91 --- /dev/null +++ b/drivers/gpu/drm/etnaviv/Makefile | |||
| @@ -0,0 +1,14 @@ | |||
| 1 | etnaviv-y := \ | ||
| 2 | etnaviv_buffer.o \ | ||
| 3 | etnaviv_cmd_parser.o \ | ||
| 4 | etnaviv_drv.o \ | ||
| 5 | etnaviv_dump.o \ | ||
| 6 | etnaviv_gem_prime.o \ | ||
| 7 | etnaviv_gem_submit.o \ | ||
| 8 | etnaviv_gem.o \ | ||
| 9 | etnaviv_gpu.o \ | ||
| 10 | etnaviv_iommu_v2.o \ | ||
| 11 | etnaviv_iommu.o \ | ||
| 12 | etnaviv_mmu.o | ||
| 13 | |||
| 14 | obj-$(CONFIG_DRM_ETNAVIV) += etnaviv.o | ||
diff --git a/drivers/gpu/drm/etnaviv/cmdstream.xml.h b/drivers/gpu/drm/etnaviv/cmdstream.xml.h new file mode 100644 index 000000000000..8c44ba9a694e --- /dev/null +++ b/drivers/gpu/drm/etnaviv/cmdstream.xml.h | |||
| @@ -0,0 +1,218 @@ | |||
| 1 | #ifndef CMDSTREAM_XML | ||
| 2 | #define CMDSTREAM_XML | ||
| 3 | |||
| 4 | /* Autogenerated file, DO NOT EDIT manually! | ||
| 5 | |||
| 6 | This file was generated by the rules-ng-ng headergen tool in this git repository: | ||
| 7 | http://0x04.net/cgit/index.cgi/rules-ng-ng | ||
| 8 | git clone git://0x04.net/rules-ng-ng | ||
| 9 | |||
| 10 | The rules-ng-ng source files this header was generated from are: | ||
| 11 | - cmdstream.xml ( 12589 bytes, from 2014-02-17 14:57:56) | ||
| 12 | - common.xml ( 18437 bytes, from 2015-03-25 11:27:41) | ||
| 13 | |||
| 14 | Copyright (C) 2014 | ||
| 15 | */ | ||
| 16 | |||
| 17 | |||
| 18 | #define FE_OPCODE_LOAD_STATE 0x00000001 | ||
| 19 | #define FE_OPCODE_END 0x00000002 | ||
| 20 | #define FE_OPCODE_NOP 0x00000003 | ||
| 21 | #define FE_OPCODE_DRAW_2D 0x00000004 | ||
| 22 | #define FE_OPCODE_DRAW_PRIMITIVES 0x00000005 | ||
| 23 | #define FE_OPCODE_DRAW_INDEXED_PRIMITIVES 0x00000006 | ||
| 24 | #define FE_OPCODE_WAIT 0x00000007 | ||
| 25 | #define FE_OPCODE_LINK 0x00000008 | ||
| 26 | #define FE_OPCODE_STALL 0x00000009 | ||
| 27 | #define FE_OPCODE_CALL 0x0000000a | ||
| 28 | #define FE_OPCODE_RETURN 0x0000000b | ||
| 29 | #define FE_OPCODE_CHIP_SELECT 0x0000000d | ||
| 30 | #define PRIMITIVE_TYPE_POINTS 0x00000001 | ||
| 31 | #define PRIMITIVE_TYPE_LINES 0x00000002 | ||
| 32 | #define PRIMITIVE_TYPE_LINE_STRIP 0x00000003 | ||
| 33 | #define PRIMITIVE_TYPE_TRIANGLES 0x00000004 | ||
| 34 | #define PRIMITIVE_TYPE_TRIANGLE_STRIP 0x00000005 | ||
| 35 | #define PRIMITIVE_TYPE_TRIANGLE_FAN 0x00000006 | ||
| 36 | #define PRIMITIVE_TYPE_LINE_LOOP 0x00000007 | ||
| 37 | #define PRIMITIVE_TYPE_QUADS 0x00000008 | ||
| 38 | #define VIV_FE_LOAD_STATE 0x00000000 | ||
| 39 | |||
| 40 | #define VIV_FE_LOAD_STATE_HEADER 0x00000000 | ||
| 41 | #define VIV_FE_LOAD_STATE_HEADER_OP__MASK 0xf8000000 | ||
| 42 | #define VIV_FE_LOAD_STATE_HEADER_OP__SHIFT 27 | ||
| 43 | #define VIV_FE_LOAD_STATE_HEADER_OP_LOAD_STATE 0x08000000 | ||
| 44 | #define VIV_FE_LOAD_STATE_HEADER_FIXP 0x04000000 | ||
| 45 | #define VIV_FE_LOAD_STATE_HEADER_COUNT__MASK 0x03ff0000 | ||
| 46 | #define VIV_FE_LOAD_STATE_HEADER_COUNT__SHIFT 16 | ||
| 47 | #define VIV_FE_LOAD_STATE_HEADER_COUNT(x) (((x) << VIV_FE_LOAD_STATE_HEADER_COUNT__SHIFT) & VIV_FE_LOAD_STATE_HEADER_COUNT__MASK) | ||
| 48 | #define VIV_FE_LOAD_STATE_HEADER_OFFSET__MASK 0x0000ffff | ||
| 49 | #define VIV_FE_LOAD_STATE_HEADER_OFFSET__SHIFT 0 | ||
| 50 | #define VIV_FE_LOAD_STATE_HEADER_OFFSET(x) (((x) << VIV_FE_LOAD_STATE_HEADER_OFFSET__SHIFT) & VIV_FE_LOAD_STATE_HEADER_OFFSET__MASK) | ||
| 51 | #define VIV_FE_LOAD_STATE_HEADER_OFFSET__SHR 2 | ||
| 52 | |||
| 53 | #define VIV_FE_END 0x00000000 | ||
| 54 | |||
| 55 | #define VIV_FE_END_HEADER 0x00000000 | ||
| 56 | #define VIV_FE_END_HEADER_EVENT_ID__MASK 0x0000001f | ||
| 57 | #define VIV_FE_END_HEADER_EVENT_ID__SHIFT 0 | ||
| 58 | #define VIV_FE_END_HEADER_EVENT_ID(x) (((x) << VIV_FE_END_HEADER_EVENT_ID__SHIFT) & VIV_FE_END_HEADER_EVENT_ID__MASK) | ||
| 59 | #define VIV_FE_END_HEADER_EVENT_ENABLE 0x00000100 | ||
| 60 | #define VIV_FE_END_HEADER_OP__MASK 0xf8000000 | ||
| 61 | #define VIV_FE_END_HEADER_OP__SHIFT 27 | ||
| 62 | #define VIV_FE_END_HEADER_OP_END 0x10000000 | ||
| 63 | |||
| 64 | #define VIV_FE_NOP 0x00000000 | ||
| 65 | |||
| 66 | #define VIV_FE_NOP_HEADER 0x00000000 | ||
| 67 | #define VIV_FE_NOP_HEADER_OP__MASK 0xf8000000 | ||
| 68 | #define VIV_FE_NOP_HEADER_OP__SHIFT 27 | ||
| 69 | #define VIV_FE_NOP_HEADER_OP_NOP 0x18000000 | ||
| 70 | |||
| 71 | #define VIV_FE_DRAW_2D 0x00000000 | ||
| 72 | |||
| 73 | #define VIV_FE_DRAW_2D_HEADER 0x00000000 | ||
| 74 | #define VIV_FE_DRAW_2D_HEADER_COUNT__MASK 0x0000ff00 | ||
| 75 | #define VIV_FE_DRAW_2D_HEADER_COUNT__SHIFT 8 | ||
| 76 | #define VIV_FE_DRAW_2D_HEADER_COUNT(x) (((x) << VIV_FE_DRAW_2D_HEADER_COUNT__SHIFT) & VIV_FE_DRAW_2D_HEADER_COUNT__MASK) | ||
| 77 | #define VIV_FE_DRAW_2D_HEADER_DATA_COUNT__MASK 0x07ff0000 | ||
| 78 | #define VIV_FE_DRAW_2D_HEADER_DATA_COUNT__SHIFT 16 | ||
| 79 | #define VIV_FE_DRAW_2D_HEADER_DATA_COUNT(x) (((x) << VIV_FE_DRAW_2D_HEADER_DATA_COUNT__SHIFT) & VIV_FE_DRAW_2D_HEADER_DATA_COUNT__MASK) | ||
| 80 | #define VIV_FE_DRAW_2D_HEADER_OP__MASK 0xf8000000 | ||
| 81 | #define VIV_FE_DRAW_2D_HEADER_OP__SHIFT 27 | ||
| 82 | #define VIV_FE_DRAW_2D_HEADER_OP_DRAW_2D 0x20000000 | ||
| 83 | |||
| 84 | #define VIV_FE_DRAW_2D_TOP_LEFT 0x00000008 | ||
| 85 | #define VIV_FE_DRAW_2D_TOP_LEFT_X__MASK 0x0000ffff | ||
| 86 | #define VIV_FE_DRAW_2D_TOP_LEFT_X__SHIFT 0 | ||
| 87 | #define VIV_FE_DRAW_2D_TOP_LEFT_X(x) (((x) << VIV_FE_DRAW_2D_TOP_LEFT_X__SHIFT) & VIV_FE_DRAW_2D_TOP_LEFT_X__MASK) | ||
| 88 | #define VIV_FE_DRAW_2D_TOP_LEFT_Y__MASK 0xffff0000 | ||
| 89 | #define VIV_FE_DRAW_2D_TOP_LEFT_Y__SHIFT 16 | ||
| 90 | #define VIV_FE_DRAW_2D_TOP_LEFT_Y(x) (((x) << VIV_FE_DRAW_2D_TOP_LEFT_Y__SHIFT) & VIV_FE_DRAW_2D_TOP_LEFT_Y__MASK) | ||
| 91 | |||
| 92 | #define VIV_FE_DRAW_2D_BOTTOM_RIGHT 0x0000000c | ||
| 93 | #define VIV_FE_DRAW_2D_BOTTOM_RIGHT_X__MASK 0x0000ffff | ||
| 94 | #define VIV_FE_DRAW_2D_BOTTOM_RIGHT_X__SHIFT 0 | ||
| 95 | #define VIV_FE_DRAW_2D_BOTTOM_RIGHT_X(x) (((x) << VIV_FE_DRAW_2D_BOTTOM_RIGHT_X__SHIFT) & VIV_FE_DRAW_2D_BOTTOM_RIGHT_X__MASK) | ||
| 96 | #define VIV_FE_DRAW_2D_BOTTOM_RIGHT_Y__MASK 0xffff0000 | ||
| 97 | #define VIV_FE_DRAW_2D_BOTTOM_RIGHT_Y__SHIFT 16 | ||
| 98 | #define VIV_FE_DRAW_2D_BOTTOM_RIGHT_Y(x) (((x) << VIV_FE_DRAW_2D_BOTTOM_RIGHT_Y__SHIFT) & VIV_FE_DRAW_2D_BOTTOM_RIGHT_Y__MASK) | ||
| 99 | |||
| 100 | #define VIV_FE_DRAW_PRIMITIVES 0x00000000 | ||
| 101 | |||
| 102 | #define VIV_FE_DRAW_PRIMITIVES_HEADER 0x00000000 | ||
| 103 | #define VIV_FE_DRAW_PRIMITIVES_HEADER_OP__MASK 0xf8000000 | ||
| 104 | #define VIV_FE_DRAW_PRIMITIVES_HEADER_OP__SHIFT 27 | ||
| 105 | #define VIV_FE_DRAW_PRIMITIVES_HEADER_OP_DRAW_PRIMITIVES 0x28000000 | ||
| 106 | |||
| 107 | #define VIV_FE_DRAW_PRIMITIVES_COMMAND 0x00000004 | ||
| 108 | #define VIV_FE_DRAW_PRIMITIVES_COMMAND_TYPE__MASK 0x000000ff | ||
| 109 | #define VIV_FE_DRAW_PRIMITIVES_COMMAND_TYPE__SHIFT 0 | ||
| 110 | #define VIV_FE_DRAW_PRIMITIVES_COMMAND_TYPE(x) (((x) << VIV_FE_DRAW_PRIMITIVES_COMMAND_TYPE__SHIFT) & VIV_FE_DRAW_PRIMITIVES_COMMAND_TYPE__MASK) | ||
| 111 | |||
| 112 | #define VIV_FE_DRAW_PRIMITIVES_START 0x00000008 | ||
| 113 | |||
| 114 | #define VIV_FE_DRAW_PRIMITIVES_COUNT 0x0000000c | ||
| 115 | |||
| 116 | #define VIV_FE_DRAW_INDEXED_PRIMITIVES 0x00000000 | ||
| 117 | |||
| 118 | #define VIV_FE_DRAW_INDEXED_PRIMITIVES_HEADER 0x00000000 | ||
| 119 | #define VIV_FE_DRAW_INDEXED_PRIMITIVES_HEADER_OP__MASK 0xf8000000 | ||
| 120 | #define VIV_FE_DRAW_INDEXED_PRIMITIVES_HEADER_OP__SHIFT 27 | ||
| 121 | #define VIV_FE_DRAW_INDEXED_PRIMITIVES_HEADER_OP_DRAW_INDEXED_PRIMITIVES 0x30000000 | ||
| 122 | |||
| 123 | #define VIV_FE_DRAW_INDEXED_PRIMITIVES_COMMAND 0x00000004 | ||
| 124 | #define VIV_FE_DRAW_INDEXED_PRIMITIVES_COMMAND_TYPE__MASK 0x000000ff | ||
| 125 | #define VIV_FE_DRAW_INDEXED_PRIMITIVES_COMMAND_TYPE__SHIFT 0 | ||
| 126 | #define VIV_FE_DRAW_INDEXED_PRIMITIVES_COMMAND_TYPE(x) (((x) << VIV_FE_DRAW_INDEXED_PRIMITIVES_COMMAND_TYPE__SHIFT) & VIV_FE_DRAW_INDEXED_PRIMITIVES_COMMAND_TYPE__MASK) | ||
| 127 | |||
| 128 | #define VIV_FE_DRAW_INDEXED_PRIMITIVES_START 0x00000008 | ||
| 129 | |||
| 130 | #define VIV_FE_DRAW_INDEXED_PRIMITIVES_COUNT 0x0000000c | ||
| 131 | |||
| 132 | #define VIV_FE_DRAW_INDEXED_PRIMITIVES_OFFSET 0x00000010 | ||
| 133 | |||
| 134 | #define VIV_FE_WAIT 0x00000000 | ||
| 135 | |||
| 136 | #define VIV_FE_WAIT_HEADER 0x00000000 | ||
| 137 | #define VIV_FE_WAIT_HEADER_DELAY__MASK 0x0000ffff | ||
| 138 | #define VIV_FE_WAIT_HEADER_DELAY__SHIFT 0 | ||
| 139 | #define VIV_FE_WAIT_HEADER_DELAY(x) (((x) << VIV_FE_WAIT_HEADER_DELAY__SHIFT) & VIV_FE_WAIT_HEADER_DELAY__MASK) | ||
| 140 | #define VIV_FE_WAIT_HEADER_OP__MASK 0xf8000000 | ||
| 141 | #define VIV_FE_WAIT_HEADER_OP__SHIFT 27 | ||
| 142 | #define VIV_FE_WAIT_HEADER_OP_WAIT 0x38000000 | ||
| 143 | |||
| 144 | #define VIV_FE_LINK 0x00000000 | ||
| 145 | |||
| 146 | #define VIV_FE_LINK_HEADER 0x00000000 | ||
| 147 | #define VIV_FE_LINK_HEADER_PREFETCH__MASK 0x0000ffff | ||
| 148 | #define VIV_FE_LINK_HEADER_PREFETCH__SHIFT 0 | ||
| 149 | #define VIV_FE_LINK_HEADER_PREFETCH(x) (((x) << VIV_FE_LINK_HEADER_PREFETCH__SHIFT) & VIV_FE_LINK_HEADER_PREFETCH__MASK) | ||
| 150 | #define VIV_FE_LINK_HEADER_OP__MASK 0xf8000000 | ||
| 151 | #define VIV_FE_LINK_HEADER_OP__SHIFT 27 | ||
| 152 | #define VIV_FE_LINK_HEADER_OP_LINK 0x40000000 | ||
| 153 | |||
| 154 | #define VIV_FE_LINK_ADDRESS 0x00000004 | ||
| 155 | |||
| 156 | #define VIV_FE_STALL 0x00000000 | ||
| 157 | |||
| 158 | #define VIV_FE_STALL_HEADER 0x00000000 | ||
| 159 | #define VIV_FE_STALL_HEADER_OP__MASK 0xf8000000 | ||
| 160 | #define VIV_FE_STALL_HEADER_OP__SHIFT 27 | ||
| 161 | #define VIV_FE_STALL_HEADER_OP_STALL 0x48000000 | ||
| 162 | |||
| 163 | #define VIV_FE_STALL_TOKEN 0x00000004 | ||
| 164 | #define VIV_FE_STALL_TOKEN_FROM__MASK 0x0000001f | ||
| 165 | #define VIV_FE_STALL_TOKEN_FROM__SHIFT 0 | ||
| 166 | #define VIV_FE_STALL_TOKEN_FROM(x) (((x) << VIV_FE_STALL_TOKEN_FROM__SHIFT) & VIV_FE_STALL_TOKEN_FROM__MASK) | ||
| 167 | #define VIV_FE_STALL_TOKEN_TO__MASK 0x00001f00 | ||
| 168 | #define VIV_FE_STALL_TOKEN_TO__SHIFT 8 | ||
| 169 | #define VIV_FE_STALL_TOKEN_TO(x) (((x) << VIV_FE_STALL_TOKEN_TO__SHIFT) & VIV_FE_STALL_TOKEN_TO__MASK) | ||
| 170 | |||
| 171 | #define VIV_FE_CALL 0x00000000 | ||
| 172 | |||
| 173 | #define VIV_FE_CALL_HEADER 0x00000000 | ||
| 174 | #define VIV_FE_CALL_HEADER_PREFETCH__MASK 0x0000ffff | ||
| 175 | #define VIV_FE_CALL_HEADER_PREFETCH__SHIFT 0 | ||
| 176 | #define VIV_FE_CALL_HEADER_PREFETCH(x) (((x) << VIV_FE_CALL_HEADER_PREFETCH__SHIFT) & VIV_FE_CALL_HEADER_PREFETCH__MASK) | ||
| 177 | #define VIV_FE_CALL_HEADER_OP__MASK 0xf8000000 | ||
| 178 | #define VIV_FE_CALL_HEADER_OP__SHIFT 27 | ||
| 179 | #define VIV_FE_CALL_HEADER_OP_CALL 0x50000000 | ||
| 180 | |||
| 181 | #define VIV_FE_CALL_ADDRESS 0x00000004 | ||
| 182 | |||
| 183 | #define VIV_FE_CALL_RETURN_PREFETCH 0x00000008 | ||
| 184 | |||
| 185 | #define VIV_FE_CALL_RETURN_ADDRESS 0x0000000c | ||
| 186 | |||
| 187 | #define VIV_FE_RETURN 0x00000000 | ||
| 188 | |||
| 189 | #define VIV_FE_RETURN_HEADER 0x00000000 | ||
| 190 | #define VIV_FE_RETURN_HEADER_OP__MASK 0xf8000000 | ||
| 191 | #define VIV_FE_RETURN_HEADER_OP__SHIFT 27 | ||
| 192 | #define VIV_FE_RETURN_HEADER_OP_RETURN 0x58000000 | ||
| 193 | |||
| 194 | #define VIV_FE_CHIP_SELECT 0x00000000 | ||
| 195 | |||
| 196 | #define VIV_FE_CHIP_SELECT_HEADER 0x00000000 | ||
| 197 | #define VIV_FE_CHIP_SELECT_HEADER_OP__MASK 0xf8000000 | ||
| 198 | #define VIV_FE_CHIP_SELECT_HEADER_OP__SHIFT 27 | ||
| 199 | #define VIV_FE_CHIP_SELECT_HEADER_OP_CHIP_SELECT 0x68000000 | ||
| 200 | #define VIV_FE_CHIP_SELECT_HEADER_ENABLE_CHIP15 0x00008000 | ||
| 201 | #define VIV_FE_CHIP_SELECT_HEADER_ENABLE_CHIP14 0x00004000 | ||
| 202 | #define VIV_FE_CHIP_SELECT_HEADER_ENABLE_CHIP13 0x00002000 | ||
| 203 | #define VIV_FE_CHIP_SELECT_HEADER_ENABLE_CHIP12 0x00001000 | ||
| 204 | #define VIV_FE_CHIP_SELECT_HEADER_ENABLE_CHIP11 0x00000800 | ||
| 205 | #define VIV_FE_CHIP_SELECT_HEADER_ENABLE_CHIP10 0x00000400 | ||
| 206 | #define VIV_FE_CHIP_SELECT_HEADER_ENABLE_CHIP9 0x00000200 | ||
| 207 | #define VIV_FE_CHIP_SELECT_HEADER_ENABLE_CHIP8 0x00000100 | ||
| 208 | #define VIV_FE_CHIP_SELECT_HEADER_ENABLE_CHIP7 0x00000080 | ||
| 209 | #define VIV_FE_CHIP_SELECT_HEADER_ENABLE_CHIP6 0x00000040 | ||
| 210 | #define VIV_FE_CHIP_SELECT_HEADER_ENABLE_CHIP5 0x00000020 | ||
| 211 | #define VIV_FE_CHIP_SELECT_HEADER_ENABLE_CHIP4 0x00000010 | ||
| 212 | #define VIV_FE_CHIP_SELECT_HEADER_ENABLE_CHIP3 0x00000008 | ||
| 213 | #define VIV_FE_CHIP_SELECT_HEADER_ENABLE_CHIP2 0x00000004 | ||
| 214 | #define VIV_FE_CHIP_SELECT_HEADER_ENABLE_CHIP1 0x00000002 | ||
| 215 | #define VIV_FE_CHIP_SELECT_HEADER_ENABLE_CHIP0 0x00000001 | ||
| 216 | |||
| 217 | |||
| 218 | #endif /* CMDSTREAM_XML */ | ||
diff --git a/drivers/gpu/drm/etnaviv/common.xml.h b/drivers/gpu/drm/etnaviv/common.xml.h new file mode 100644 index 000000000000..9e585d51fb78 --- /dev/null +++ b/drivers/gpu/drm/etnaviv/common.xml.h | |||
| @@ -0,0 +1,249 @@ | |||
| 1 | #ifndef COMMON_XML | ||
| 2 | #define COMMON_XML | ||
| 3 | |||
| 4 | /* Autogenerated file, DO NOT EDIT manually! | ||
| 5 | |||
| 6 | This file was generated by the rules-ng-ng headergen tool in this git repository: | ||
| 7 | http://0x04.net/cgit/index.cgi/rules-ng-ng | ||
| 8 | git clone git://0x04.net/rules-ng-ng | ||
| 9 | |||
| 10 | The rules-ng-ng source files this header was generated from are: | ||
| 11 | - state_vg.xml ( 5973 bytes, from 2015-03-25 11:26:01) | ||
| 12 | - common.xml ( 18437 bytes, from 2015-03-25 11:27:41) | ||
| 13 | |||
| 14 | Copyright (C) 2015 | ||
| 15 | */ | ||
| 16 | |||
| 17 | |||
| 18 | #define PIPE_ID_PIPE_3D 0x00000000 | ||
| 19 | #define PIPE_ID_PIPE_2D 0x00000001 | ||
| 20 | #define SYNC_RECIPIENT_FE 0x00000001 | ||
| 21 | #define SYNC_RECIPIENT_RA 0x00000005 | ||
| 22 | #define SYNC_RECIPIENT_PE 0x00000007 | ||
| 23 | #define SYNC_RECIPIENT_DE 0x0000000b | ||
| 24 | #define SYNC_RECIPIENT_VG 0x0000000f | ||
| 25 | #define SYNC_RECIPIENT_TESSELATOR 0x00000010 | ||
| 26 | #define SYNC_RECIPIENT_VG2 0x00000011 | ||
| 27 | #define SYNC_RECIPIENT_TESSELATOR2 0x00000012 | ||
| 28 | #define SYNC_RECIPIENT_VG3 0x00000013 | ||
| 29 | #define SYNC_RECIPIENT_TESSELATOR3 0x00000014 | ||
| 30 | #define ENDIAN_MODE_NO_SWAP 0x00000000 | ||
| 31 | #define ENDIAN_MODE_SWAP_16 0x00000001 | ||
| 32 | #define ENDIAN_MODE_SWAP_32 0x00000002 | ||
| 33 | #define chipModel_GC300 0x00000300 | ||
| 34 | #define chipModel_GC320 0x00000320 | ||
| 35 | #define chipModel_GC350 0x00000350 | ||
| 36 | #define chipModel_GC355 0x00000355 | ||
| 37 | #define chipModel_GC400 0x00000400 | ||
| 38 | #define chipModel_GC410 0x00000410 | ||
| 39 | #define chipModel_GC420 0x00000420 | ||
| 40 | #define chipModel_GC450 0x00000450 | ||
| 41 | #define chipModel_GC500 0x00000500 | ||
| 42 | #define chipModel_GC530 0x00000530 | ||
| 43 | #define chipModel_GC600 0x00000600 | ||
| 44 | #define chipModel_GC700 0x00000700 | ||
| 45 | #define chipModel_GC800 0x00000800 | ||
| 46 | #define chipModel_GC860 0x00000860 | ||
| 47 | #define chipModel_GC880 0x00000880 | ||
| 48 | #define chipModel_GC1000 0x00001000 | ||
| 49 | #define chipModel_GC2000 0x00002000 | ||
| 50 | #define chipModel_GC2100 0x00002100 | ||
| 51 | #define chipModel_GC4000 0x00004000 | ||
| 52 | #define RGBA_BITS_R 0x00000001 | ||
| 53 | #define RGBA_BITS_G 0x00000002 | ||
| 54 | #define RGBA_BITS_B 0x00000004 | ||
| 55 | #define RGBA_BITS_A 0x00000008 | ||
| 56 | #define chipFeatures_FAST_CLEAR 0x00000001 | ||
| 57 | #define chipFeatures_SPECIAL_ANTI_ALIASING 0x00000002 | ||
| 58 | #define chipFeatures_PIPE_3D 0x00000004 | ||
| 59 | #define chipFeatures_DXT_TEXTURE_COMPRESSION 0x00000008 | ||
| 60 | #define chipFeatures_DEBUG_MODE 0x00000010 | ||
| 61 | #define chipFeatures_Z_COMPRESSION 0x00000020 | ||
| 62 | #define chipFeatures_YUV420_SCALER 0x00000040 | ||
| 63 | #define chipFeatures_MSAA 0x00000080 | ||
| 64 | #define chipFeatures_DC 0x00000100 | ||
| 65 | #define chipFeatures_PIPE_2D 0x00000200 | ||
| 66 | #define chipFeatures_ETC1_TEXTURE_COMPRESSION 0x00000400 | ||
| 67 | #define chipFeatures_FAST_SCALER 0x00000800 | ||
| 68 | #define chipFeatures_HIGH_DYNAMIC_RANGE 0x00001000 | ||
| 69 | #define chipFeatures_YUV420_TILER 0x00002000 | ||
| 70 | #define chipFeatures_MODULE_CG 0x00004000 | ||
| 71 | #define chipFeatures_MIN_AREA 0x00008000 | ||
| 72 | #define chipFeatures_NO_EARLY_Z 0x00010000 | ||
| 73 | #define chipFeatures_NO_422_TEXTURE 0x00020000 | ||
| 74 | #define chipFeatures_BUFFER_INTERLEAVING 0x00040000 | ||
| 75 | #define chipFeatures_BYTE_WRITE_2D 0x00080000 | ||
| 76 | #define chipFeatures_NO_SCALER 0x00100000 | ||
| 77 | #define chipFeatures_YUY2_AVERAGING 0x00200000 | ||
| 78 | #define chipFeatures_HALF_PE_CACHE 0x00400000 | ||
| 79 | #define chipFeatures_HALF_TX_CACHE 0x00800000 | ||
| 80 | #define chipFeatures_YUY2_RENDER_TARGET 0x01000000 | ||
| 81 | #define chipFeatures_MEM32 0x02000000 | ||
| 82 | #define chipFeatures_PIPE_VG 0x04000000 | ||
| 83 | #define chipFeatures_VGTS 0x08000000 | ||
| 84 | #define chipFeatures_FE20 0x10000000 | ||
| 85 | #define chipFeatures_BYTE_WRITE_3D 0x20000000 | ||
| 86 | #define chipFeatures_RS_YUV_TARGET 0x40000000 | ||
| 87 | #define chipFeatures_32_BIT_INDICES 0x80000000 | ||
| 88 | #define chipMinorFeatures0_FLIP_Y 0x00000001 | ||
| 89 | #define chipMinorFeatures0_DUAL_RETURN_BUS 0x00000002 | ||
| 90 | #define chipMinorFeatures0_ENDIANNESS_CONFIG 0x00000004 | ||
| 91 | #define chipMinorFeatures0_TEXTURE_8K 0x00000008 | ||
| 92 | #define chipMinorFeatures0_CORRECT_TEXTURE_CONVERTER 0x00000010 | ||
| 93 | #define chipMinorFeatures0_SPECIAL_MSAA_LOD 0x00000020 | ||
| 94 | #define chipMinorFeatures0_FAST_CLEAR_FLUSH 0x00000040 | ||
| 95 | #define chipMinorFeatures0_2DPE20 0x00000080 | ||
| 96 | #define chipMinorFeatures0_CORRECT_AUTO_DISABLE 0x00000100 | ||
| 97 | #define chipMinorFeatures0_RENDERTARGET_8K 0x00000200 | ||
| 98 | #define chipMinorFeatures0_2BITPERTILE 0x00000400 | ||
| 99 | #define chipMinorFeatures0_SEPARATE_TILE_STATUS_WHEN_INTERLEAVED 0x00000800 | ||
| 100 | #define chipMinorFeatures0_SUPER_TILED 0x00001000 | ||
| 101 | #define chipMinorFeatures0_VG_20 0x00002000 | ||
| 102 | #define chipMinorFeatures0_TS_EXTENDED_COMMANDS 0x00004000 | ||
| 103 | #define chipMinorFeatures0_COMPRESSION_FIFO_FIXED 0x00008000 | ||
| 104 | #define chipMinorFeatures0_HAS_SIGN_FLOOR_CEIL 0x00010000 | ||
| 105 | #define chipMinorFeatures0_VG_FILTER 0x00020000 | ||
| 106 | #define chipMinorFeatures0_VG_21 0x00040000 | ||
| 107 | #define chipMinorFeatures0_SHADER_HAS_W 0x00080000 | ||
| 108 | #define chipMinorFeatures0_HAS_SQRT_TRIG 0x00100000 | ||
| 109 | #define chipMinorFeatures0_MORE_MINOR_FEATURES 0x00200000 | ||
| 110 | #define chipMinorFeatures0_MC20 0x00400000 | ||
| 111 | #define chipMinorFeatures0_MSAA_SIDEBAND 0x00800000 | ||
| 112 | #define chipMinorFeatures0_BUG_FIXES0 0x01000000 | ||
| 113 | #define chipMinorFeatures0_VAA 0x02000000 | ||
| 114 | #define chipMinorFeatures0_BYPASS_IN_MSAA 0x04000000 | ||
| 115 | #define chipMinorFeatures0_HZ 0x08000000 | ||
| 116 | #define chipMinorFeatures0_NEW_TEXTURE 0x10000000 | ||
| 117 | #define chipMinorFeatures0_2D_A8_TARGET 0x20000000 | ||
| 118 | #define chipMinorFeatures0_CORRECT_STENCIL 0x40000000 | ||
| 119 | #define chipMinorFeatures0_ENHANCE_VR 0x80000000 | ||
| 120 | #define chipMinorFeatures1_RSUV_SWIZZLE 0x00000001 | ||
| 121 | #define chipMinorFeatures1_V2_COMPRESSION 0x00000002 | ||
| 122 | #define chipMinorFeatures1_VG_DOUBLE_BUFFER 0x00000004 | ||
| 123 | #define chipMinorFeatures1_EXTRA_EVENT_STATES 0x00000008 | ||
| 124 | #define chipMinorFeatures1_NO_STRIPING_NEEDED 0x00000010 | ||
| 125 | #define chipMinorFeatures1_TEXTURE_STRIDE 0x00000020 | ||
| 126 | #define chipMinorFeatures1_BUG_FIXES3 0x00000040 | ||
| 127 | #define chipMinorFeatures1_AUTO_DISABLE 0x00000080 | ||
| 128 | #define chipMinorFeatures1_AUTO_RESTART_TS 0x00000100 | ||
| 129 | #define chipMinorFeatures1_DISABLE_PE_GATING 0x00000200 | ||
| 130 | #define chipMinorFeatures1_L2_WINDOWING 0x00000400 | ||
| 131 | #define chipMinorFeatures1_HALF_FLOAT 0x00000800 | ||
| 132 | #define chipMinorFeatures1_PIXEL_DITHER 0x00001000 | ||
| 133 | #define chipMinorFeatures1_TWO_STENCIL_REFERENCE 0x00002000 | ||
| 134 | #define chipMinorFeatures1_EXTENDED_PIXEL_FORMAT 0x00004000 | ||
| 135 | #define chipMinorFeatures1_CORRECT_MIN_MAX_DEPTH 0x00008000 | ||
| 136 | #define chipMinorFeatures1_2D_DITHER 0x00010000 | ||
| 137 | #define chipMinorFeatures1_BUG_FIXES5 0x00020000 | ||
| 138 | #define chipMinorFeatures1_NEW_2D 0x00040000 | ||
| 139 | #define chipMinorFeatures1_NEW_FP 0x00080000 | ||
| 140 | #define chipMinorFeatures1_TEXTURE_HALIGN 0x00100000 | ||
| 141 | #define chipMinorFeatures1_NON_POWER_OF_TWO 0x00200000 | ||
| 142 | #define chipMinorFeatures1_LINEAR_TEXTURE_SUPPORT 0x00400000 | ||
| 143 | #define chipMinorFeatures1_HALTI0 0x00800000 | ||
| 144 | #define chipMinorFeatures1_CORRECT_OVERFLOW_VG 0x01000000 | ||
| 145 | #define chipMinorFeatures1_NEGATIVE_LOG_FIX 0x02000000 | ||
| 146 | #define chipMinorFeatures1_RESOLVE_OFFSET 0x04000000 | ||
| 147 | #define chipMinorFeatures1_OK_TO_GATE_AXI_CLOCK 0x08000000 | ||
| 148 | #define chipMinorFeatures1_MMU_VERSION 0x10000000 | ||
| 149 | #define chipMinorFeatures1_WIDE_LINE 0x20000000 | ||
| 150 | #define chipMinorFeatures1_BUG_FIXES6 0x40000000 | ||
| 151 | #define chipMinorFeatures1_FC_FLUSH_STALL 0x80000000 | ||
| 152 | #define chipMinorFeatures2_LINE_LOOP 0x00000001 | ||
| 153 | #define chipMinorFeatures2_LOGIC_OP 0x00000002 | ||
| 154 | #define chipMinorFeatures2_UNK2 0x00000004 | ||
| 155 | #define chipMinorFeatures2_SUPERTILED_TEXTURE 0x00000008 | ||
| 156 | #define chipMinorFeatures2_UNK4 0x00000010 | ||
| 157 | #define chipMinorFeatures2_RECT_PRIMITIVE 0x00000020 | ||
| 158 | #define chipMinorFeatures2_COMPOSITION 0x00000040 | ||
| 159 | #define chipMinorFeatures2_CORRECT_AUTO_DISABLE_COUNT 0x00000080 | ||
| 160 | #define chipMinorFeatures2_UNK8 0x00000100 | ||
| 161 | #define chipMinorFeatures2_UNK9 0x00000200 | ||
| 162 | #define chipMinorFeatures2_UNK10 0x00000400 | ||
| 163 | #define chipMinorFeatures2_SAMPLERBASE_16 0x00000800 | ||
| 164 | #define chipMinorFeatures2_UNK12 0x00001000 | ||
| 165 | #define chipMinorFeatures2_UNK13 0x00002000 | ||
| 166 | #define chipMinorFeatures2_UNK14 0x00004000 | ||
| 167 | #define chipMinorFeatures2_EXTRA_TEXTURE_STATE 0x00008000 | ||
| 168 | #define chipMinorFeatures2_FULL_DIRECTFB 0x00010000 | ||
| 169 | #define chipMinorFeatures2_2D_TILING 0x00020000 | ||
| 170 | #define chipMinorFeatures2_THREAD_WALKER_IN_PS 0x00040000 | ||
| 171 | #define chipMinorFeatures2_TILE_FILLER 0x00080000 | ||
| 172 | #define chipMinorFeatures2_UNK20 0x00100000 | ||
| 173 | #define chipMinorFeatures2_2D_MULTI_SOURCE_BLIT 0x00200000 | ||
| 174 | #define chipMinorFeatures2_UNK22 0x00400000 | ||
| 175 | #define chipMinorFeatures2_UNK23 0x00800000 | ||
| 176 | #define chipMinorFeatures2_UNK24 0x01000000 | ||
| 177 | #define chipMinorFeatures2_MIXED_STREAMS 0x02000000 | ||
| 178 | #define chipMinorFeatures2_2D_420_L2CACHE 0x04000000 | ||
| 179 | #define chipMinorFeatures2_UNK27 0x08000000 | ||
| 180 | #define chipMinorFeatures2_2D_NO_INDEX8_BRUSH 0x10000000 | ||
| 181 | #define chipMinorFeatures2_TEXTURE_TILED_READ 0x20000000 | ||
| 182 | #define chipMinorFeatures2_UNK30 0x40000000 | ||
| 183 | #define chipMinorFeatures2_UNK31 0x80000000 | ||
| 184 | #define chipMinorFeatures3_ROTATION_STALL_FIX 0x00000001 | ||
| 185 | #define chipMinorFeatures3_UNK1 0x00000002 | ||
| 186 | #define chipMinorFeatures3_2D_MULTI_SOURCE_BLT_EX 0x00000004 | ||
| 187 | #define chipMinorFeatures3_UNK3 0x00000008 | ||
| 188 | #define chipMinorFeatures3_UNK4 0x00000010 | ||
| 189 | #define chipMinorFeatures3_UNK5 0x00000020 | ||
| 190 | #define chipMinorFeatures3_UNK6 0x00000040 | ||
| 191 | #define chipMinorFeatures3_UNK7 0x00000080 | ||
| 192 | #define chipMinorFeatures3_UNK8 0x00000100 | ||
| 193 | #define chipMinorFeatures3_UNK9 0x00000200 | ||
| 194 | #define chipMinorFeatures3_BUG_FIXES10 0x00000400 | ||
| 195 | #define chipMinorFeatures3_UNK11 0x00000800 | ||
| 196 | #define chipMinorFeatures3_BUG_FIXES11 0x00001000 | ||
| 197 | #define chipMinorFeatures3_UNK13 0x00002000 | ||
| 198 | #define chipMinorFeatures3_UNK14 0x00004000 | ||
| 199 | #define chipMinorFeatures3_UNK15 0x00008000 | ||
| 200 | #define chipMinorFeatures3_UNK16 0x00010000 | ||
| 201 | #define chipMinorFeatures3_UNK17 0x00020000 | ||
| 202 | #define chipMinorFeatures3_UNK18 0x00040000 | ||
| 203 | #define chipMinorFeatures3_UNK19 0x00080000 | ||
| 204 | #define chipMinorFeatures3_UNK20 0x00100000 | ||
| 205 | #define chipMinorFeatures3_UNK21 0x00200000 | ||
| 206 | #define chipMinorFeatures3_UNK22 0x00400000 | ||
| 207 | #define chipMinorFeatures3_UNK23 0x00800000 | ||
| 208 | #define chipMinorFeatures3_UNK24 0x01000000 | ||
| 209 | #define chipMinorFeatures3_UNK25 0x02000000 | ||
| 210 | #define chipMinorFeatures3_UNK26 0x04000000 | ||
| 211 | #define chipMinorFeatures3_UNK27 0x08000000 | ||
| 212 | #define chipMinorFeatures3_UNK28 0x10000000 | ||
| 213 | #define chipMinorFeatures3_UNK29 0x20000000 | ||
| 214 | #define chipMinorFeatures3_UNK30 0x40000000 | ||
| 215 | #define chipMinorFeatures3_UNK31 0x80000000 | ||
| 216 | #define chipMinorFeatures4_UNK0 0x00000001 | ||
| 217 | #define chipMinorFeatures4_UNK1 0x00000002 | ||
| 218 | #define chipMinorFeatures4_UNK2 0x00000004 | ||
| 219 | #define chipMinorFeatures4_UNK3 0x00000008 | ||
| 220 | #define chipMinorFeatures4_UNK4 0x00000010 | ||
| 221 | #define chipMinorFeatures4_UNK5 0x00000020 | ||
| 222 | #define chipMinorFeatures4_UNK6 0x00000040 | ||
| 223 | #define chipMinorFeatures4_UNK7 0x00000080 | ||
| 224 | #define chipMinorFeatures4_UNK8 0x00000100 | ||
| 225 | #define chipMinorFeatures4_UNK9 0x00000200 | ||
| 226 | #define chipMinorFeatures4_UNK10 0x00000400 | ||
| 227 | #define chipMinorFeatures4_UNK11 0x00000800 | ||
| 228 | #define chipMinorFeatures4_UNK12 0x00001000 | ||
| 229 | #define chipMinorFeatures4_UNK13 0x00002000 | ||
| 230 | #define chipMinorFeatures4_UNK14 0x00004000 | ||
| 231 | #define chipMinorFeatures4_UNK15 0x00008000 | ||
| 232 | #define chipMinorFeatures4_UNK16 0x00010000 | ||
| 233 | #define chipMinorFeatures4_UNK17 0x00020000 | ||
| 234 | #define chipMinorFeatures4_UNK18 0x00040000 | ||
| 235 | #define chipMinorFeatures4_UNK19 0x00080000 | ||
| 236 | #define chipMinorFeatures4_UNK20 0x00100000 | ||
| 237 | #define chipMinorFeatures4_UNK21 0x00200000 | ||
| 238 | #define chipMinorFeatures4_UNK22 0x00400000 | ||
| 239 | #define chipMinorFeatures4_UNK23 0x00800000 | ||
| 240 | #define chipMinorFeatures4_UNK24 0x01000000 | ||
| 241 | #define chipMinorFeatures4_UNK25 0x02000000 | ||
| 242 | #define chipMinorFeatures4_UNK26 0x04000000 | ||
| 243 | #define chipMinorFeatures4_UNK27 0x08000000 | ||
| 244 | #define chipMinorFeatures4_UNK28 0x10000000 | ||
| 245 | #define chipMinorFeatures4_UNK29 0x20000000 | ||
| 246 | #define chipMinorFeatures4_UNK30 0x40000000 | ||
| 247 | #define chipMinorFeatures4_UNK31 0x80000000 | ||
| 248 | |||
| 249 | #endif /* COMMON_XML */ | ||
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_buffer.c b/drivers/gpu/drm/etnaviv/etnaviv_buffer.c new file mode 100644 index 000000000000..332c55ebba6d --- /dev/null +++ b/drivers/gpu/drm/etnaviv/etnaviv_buffer.c | |||
| @@ -0,0 +1,268 @@ | |||
| 1 | /* | ||
| 2 | * Copyright (C) 2014 Etnaviv Project | ||
| 3 | * Author: Christian Gmeiner <christian.gmeiner@gmail.com> | ||
| 4 | * | ||
| 5 | * This program is free software; you can redistribute it and/or modify it | ||
| 6 | * under the terms of the GNU General Public License version 2 as published by | ||
| 7 | * the Free Software Foundation. | ||
| 8 | * | ||
| 9 | * This program is distributed in the hope that it will be useful, but WITHOUT | ||
| 10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
| 11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
| 12 | * more details. | ||
| 13 | * | ||
| 14 | * You should have received a copy of the GNU General Public License along with | ||
| 15 | * this program. If not, see <http://www.gnu.org/licenses/>. | ||
| 16 | */ | ||
| 17 | |||
| 18 | #include "etnaviv_gpu.h" | ||
| 19 | #include "etnaviv_gem.h" | ||
| 20 | #include "etnaviv_mmu.h" | ||
| 21 | |||
| 22 | #include "common.xml.h" | ||
| 23 | #include "state.xml.h" | ||
| 24 | #include "cmdstream.xml.h" | ||
| 25 | |||
| 26 | /* | ||
| 27 | * Command Buffer helper: | ||
| 28 | */ | ||
| 29 | |||
| 30 | |||
| 31 | static inline void OUT(struct etnaviv_cmdbuf *buffer, u32 data) | ||
| 32 | { | ||
| 33 | u32 *vaddr = (u32 *)buffer->vaddr; | ||
| 34 | |||
| 35 | BUG_ON(buffer->user_size >= buffer->size); | ||
| 36 | |||
| 37 | vaddr[buffer->user_size / 4] = data; | ||
| 38 | buffer->user_size += 4; | ||
| 39 | } | ||
| 40 | |||
| 41 | static inline void CMD_LOAD_STATE(struct etnaviv_cmdbuf *buffer, | ||
| 42 | u32 reg, u32 value) | ||
| 43 | { | ||
| 44 | u32 index = reg >> VIV_FE_LOAD_STATE_HEADER_OFFSET__SHR; | ||
| 45 | |||
| 46 | buffer->user_size = ALIGN(buffer->user_size, 8); | ||
| 47 | |||
| 48 | /* write a register via cmd stream */ | ||
| 49 | OUT(buffer, VIV_FE_LOAD_STATE_HEADER_OP_LOAD_STATE | | ||
| 50 | VIV_FE_LOAD_STATE_HEADER_COUNT(1) | | ||
| 51 | VIV_FE_LOAD_STATE_HEADER_OFFSET(index)); | ||
| 52 | OUT(buffer, value); | ||
| 53 | } | ||
| 54 | |||
| 55 | static inline void CMD_END(struct etnaviv_cmdbuf *buffer) | ||
| 56 | { | ||
| 57 | buffer->user_size = ALIGN(buffer->user_size, 8); | ||
| 58 | |||
| 59 | OUT(buffer, VIV_FE_END_HEADER_OP_END); | ||
| 60 | } | ||
| 61 | |||
| 62 | static inline void CMD_WAIT(struct etnaviv_cmdbuf *buffer) | ||
| 63 | { | ||
| 64 | buffer->user_size = ALIGN(buffer->user_size, 8); | ||
| 65 | |||
| 66 | OUT(buffer, VIV_FE_WAIT_HEADER_OP_WAIT | 200); | ||
| 67 | } | ||
| 68 | |||
| 69 | static inline void CMD_LINK(struct etnaviv_cmdbuf *buffer, | ||
| 70 | u16 prefetch, u32 address) | ||
| 71 | { | ||
| 72 | buffer->user_size = ALIGN(buffer->user_size, 8); | ||
| 73 | |||
| 74 | OUT(buffer, VIV_FE_LINK_HEADER_OP_LINK | | ||
| 75 | VIV_FE_LINK_HEADER_PREFETCH(prefetch)); | ||
| 76 | OUT(buffer, address); | ||
| 77 | } | ||
| 78 | |||
| 79 | static inline void CMD_STALL(struct etnaviv_cmdbuf *buffer, | ||
| 80 | u32 from, u32 to) | ||
| 81 | { | ||
| 82 | buffer->user_size = ALIGN(buffer->user_size, 8); | ||
| 83 | |||
| 84 | OUT(buffer, VIV_FE_STALL_HEADER_OP_STALL); | ||
| 85 | OUT(buffer, VIV_FE_STALL_TOKEN_FROM(from) | VIV_FE_STALL_TOKEN_TO(to)); | ||
| 86 | } | ||
| 87 | |||
| 88 | static void etnaviv_cmd_select_pipe(struct etnaviv_cmdbuf *buffer, u8 pipe) | ||
| 89 | { | ||
| 90 | u32 flush; | ||
| 91 | u32 stall; | ||
| 92 | |||
| 93 | /* | ||
| 94 | * This assumes that if we're switching to 2D, we're switching | ||
| 95 | * away from 3D, and vice versa. Hence, if we're switching to | ||
| 96 | * the 2D core, we need to flush the 3D depth and color caches, | ||
| 97 | * otherwise we need to flush the 2D pixel engine cache. | ||
| 98 | */ | ||
| 99 | if (pipe == ETNA_PIPE_2D) | ||
| 100 | flush = VIVS_GL_FLUSH_CACHE_DEPTH | VIVS_GL_FLUSH_CACHE_COLOR; | ||
| 101 | else | ||
| 102 | flush = VIVS_GL_FLUSH_CACHE_PE2D; | ||
| 103 | |||
| 104 | stall = VIVS_GL_SEMAPHORE_TOKEN_FROM(SYNC_RECIPIENT_FE) | | ||
| 105 | VIVS_GL_SEMAPHORE_TOKEN_TO(SYNC_RECIPIENT_PE); | ||
| 106 | |||
| 107 | CMD_LOAD_STATE(buffer, VIVS_GL_FLUSH_CACHE, flush); | ||
| 108 | CMD_LOAD_STATE(buffer, VIVS_GL_SEMAPHORE_TOKEN, stall); | ||
| 109 | |||
| 110 | CMD_STALL(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_PE); | ||
| 111 | |||
| 112 | CMD_LOAD_STATE(buffer, VIVS_GL_PIPE_SELECT, | ||
| 113 | VIVS_GL_PIPE_SELECT_PIPE(pipe)); | ||
| 114 | } | ||
| 115 | |||
| 116 | static u32 gpu_va(struct etnaviv_gpu *gpu, struct etnaviv_cmdbuf *buf) | ||
| 117 | { | ||
| 118 | return buf->paddr - gpu->memory_base; | ||
| 119 | } | ||
| 120 | |||
| 121 | static void etnaviv_buffer_dump(struct etnaviv_gpu *gpu, | ||
| 122 | struct etnaviv_cmdbuf *buf, u32 off, u32 len) | ||
| 123 | { | ||
| 124 | u32 size = buf->size; | ||
| 125 | u32 *ptr = buf->vaddr + off; | ||
| 126 | |||
| 127 | dev_info(gpu->dev, "virt %p phys 0x%08x free 0x%08x\n", | ||
| 128 | ptr, gpu_va(gpu, buf) + off, size - len * 4 - off); | ||
| 129 | |||
| 130 | print_hex_dump(KERN_INFO, "cmd ", DUMP_PREFIX_OFFSET, 16, 4, | ||
| 131 | ptr, len * 4, 0); | ||
| 132 | } | ||
| 133 | |||
| 134 | u16 etnaviv_buffer_init(struct etnaviv_gpu *gpu) | ||
| 135 | { | ||
| 136 | struct etnaviv_cmdbuf *buffer = gpu->buffer; | ||
| 137 | |||
| 138 | /* initialize buffer */ | ||
| 139 | buffer->user_size = 0; | ||
| 140 | |||
| 141 | CMD_WAIT(buffer); | ||
| 142 | CMD_LINK(buffer, 2, gpu_va(gpu, buffer) + buffer->user_size - 4); | ||
| 143 | |||
| 144 | return buffer->user_size / 8; | ||
| 145 | } | ||
| 146 | |||
| 147 | void etnaviv_buffer_end(struct etnaviv_gpu *gpu) | ||
| 148 | { | ||
| 149 | struct etnaviv_cmdbuf *buffer = gpu->buffer; | ||
| 150 | |||
| 151 | /* Replace the last WAIT with an END */ | ||
| 152 | buffer->user_size -= 16; | ||
| 153 | |||
| 154 | CMD_END(buffer); | ||
| 155 | mb(); | ||
| 156 | } | ||
| 157 | |||
| 158 | void etnaviv_buffer_queue(struct etnaviv_gpu *gpu, unsigned int event, | ||
| 159 | struct etnaviv_cmdbuf *cmdbuf) | ||
| 160 | { | ||
| 161 | struct etnaviv_cmdbuf *buffer = gpu->buffer; | ||
| 162 | u32 *lw = buffer->vaddr + buffer->user_size - 16; | ||
| 163 | u32 back, link_target, link_size, reserve_size, extra_size = 0; | ||
| 164 | |||
| 165 | if (drm_debug & DRM_UT_DRIVER) | ||
| 166 | etnaviv_buffer_dump(gpu, buffer, 0, 0x50); | ||
| 167 | |||
| 168 | /* | ||
| 169 | * If we need to flush the MMU prior to submitting this buffer, we | ||
| 170 | * will need to append a mmu flush load state, followed by a new | ||
| 171 | * link to this buffer - a total of four additional words. | ||
| 172 | */ | ||
| 173 | if (gpu->mmu->need_flush || gpu->switch_context) { | ||
| 174 | /* link command */ | ||
| 175 | extra_size += 2; | ||
| 176 | /* flush command */ | ||
| 177 | if (gpu->mmu->need_flush) | ||
| 178 | extra_size += 2; | ||
| 179 | /* pipe switch commands */ | ||
| 180 | if (gpu->switch_context) | ||
| 181 | extra_size += 8; | ||
| 182 | } | ||
| 183 | |||
| 184 | reserve_size = (6 + extra_size) * 4; | ||
| 185 | |||
| 186 | /* | ||
| 187 | * if we are going to completely overflow the buffer, we need to wrap. | ||
| 188 | */ | ||
| 189 | if (buffer->user_size + reserve_size > buffer->size) | ||
| 190 | buffer->user_size = 0; | ||
| 191 | |||
| 192 | /* save offset back into main buffer */ | ||
| 193 | back = buffer->user_size + reserve_size - 6 * 4; | ||
| 194 | link_target = gpu_va(gpu, buffer) + buffer->user_size; | ||
| 195 | link_size = 6; | ||
| 196 | |||
| 197 | /* Skip over any extra instructions */ | ||
| 198 | link_target += extra_size * sizeof(u32); | ||
| 199 | |||
| 200 | if (drm_debug & DRM_UT_DRIVER) | ||
| 201 | pr_info("stream link to 0x%08x @ 0x%08x %p\n", | ||
| 202 | link_target, gpu_va(gpu, cmdbuf), cmdbuf->vaddr); | ||
| 203 | |||
| 204 | /* jump back from cmd to main buffer */ | ||
| 205 | CMD_LINK(cmdbuf, link_size, link_target); | ||
| 206 | |||
| 207 | link_target = gpu_va(gpu, cmdbuf); | ||
| 208 | link_size = cmdbuf->size / 8; | ||
| 209 | |||
| 210 | |||
| 211 | |||
| 212 | if (drm_debug & DRM_UT_DRIVER) { | ||
| 213 | print_hex_dump(KERN_INFO, "cmd ", DUMP_PREFIX_OFFSET, 16, 4, | ||
| 214 | cmdbuf->vaddr, cmdbuf->size, 0); | ||
| 215 | |||
| 216 | pr_info("link op: %p\n", lw); | ||
| 217 | pr_info("link addr: %p\n", lw + 1); | ||
| 218 | pr_info("addr: 0x%08x\n", link_target); | ||
| 219 | pr_info("back: 0x%08x\n", gpu_va(gpu, buffer) + back); | ||
| 220 | pr_info("event: %d\n", event); | ||
| 221 | } | ||
| 222 | |||
| 223 | if (gpu->mmu->need_flush || gpu->switch_context) { | ||
| 224 | u32 new_target = gpu_va(gpu, buffer) + buffer->user_size; | ||
| 225 | |||
| 226 | if (gpu->mmu->need_flush) { | ||
| 227 | /* Add the MMU flush */ | ||
| 228 | CMD_LOAD_STATE(buffer, VIVS_GL_FLUSH_MMU, | ||
| 229 | VIVS_GL_FLUSH_MMU_FLUSH_FEMMU | | ||
| 230 | VIVS_GL_FLUSH_MMU_FLUSH_UNK1 | | ||
| 231 | VIVS_GL_FLUSH_MMU_FLUSH_UNK2 | | ||
| 232 | VIVS_GL_FLUSH_MMU_FLUSH_PEMMU | | ||
| 233 | VIVS_GL_FLUSH_MMU_FLUSH_UNK4); | ||
| 234 | |||
| 235 | gpu->mmu->need_flush = false; | ||
| 236 | } | ||
| 237 | |||
| 238 | if (gpu->switch_context) { | ||
| 239 | etnaviv_cmd_select_pipe(buffer, cmdbuf->exec_state); | ||
| 240 | gpu->switch_context = false; | ||
| 241 | } | ||
| 242 | |||
| 243 | /* And the link to the first buffer */ | ||
| 244 | CMD_LINK(buffer, link_size, link_target); | ||
| 245 | |||
| 246 | /* Update the link target to point to above instructions */ | ||
| 247 | link_target = new_target; | ||
| 248 | link_size = extra_size; | ||
| 249 | } | ||
| 250 | |||
| 251 | /* trigger event */ | ||
| 252 | CMD_LOAD_STATE(buffer, VIVS_GL_EVENT, VIVS_GL_EVENT_EVENT_ID(event) | | ||
| 253 | VIVS_GL_EVENT_FROM_PE); | ||
| 254 | |||
| 255 | /* append WAIT/LINK to main buffer */ | ||
| 256 | CMD_WAIT(buffer); | ||
| 257 | CMD_LINK(buffer, 2, gpu_va(gpu, buffer) + (buffer->user_size - 4)); | ||
| 258 | |||
| 259 | /* Change WAIT into a LINK command; write the address first. */ | ||
| 260 | *(lw + 1) = link_target; | ||
| 261 | mb(); | ||
| 262 | *(lw) = VIV_FE_LINK_HEADER_OP_LINK | | ||
| 263 | VIV_FE_LINK_HEADER_PREFETCH(link_size); | ||
| 264 | mb(); | ||
| 265 | |||
| 266 | if (drm_debug & DRM_UT_DRIVER) | ||
| 267 | etnaviv_buffer_dump(gpu, buffer, 0, 0x50); | ||
| 268 | } | ||
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_cmd_parser.c b/drivers/gpu/drm/etnaviv/etnaviv_cmd_parser.c new file mode 100644 index 000000000000..dcfd565c88d1 --- /dev/null +++ b/drivers/gpu/drm/etnaviv/etnaviv_cmd_parser.c | |||
| @@ -0,0 +1,209 @@ | |||
| 1 | /* | ||
| 2 | * Copyright (C) 2015 Etnaviv Project | ||
| 3 | * | ||
| 4 | * This program is free software; you can redistribute it and/or modify it | ||
| 5 | * under the terms of the GNU General Public License version 2 as published by | ||
| 6 | * the Free Software Foundation. | ||
| 7 | * | ||
| 8 | * This program is distributed in the hope that it will be useful, but WITHOUT | ||
| 9 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
| 10 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
| 11 | * more details. | ||
| 12 | * | ||
| 13 | * You should have received a copy of the GNU General Public License along with | ||
| 14 | * this program. If not, see <http://www.gnu.org/licenses/>. | ||
| 15 | */ | ||
| 16 | |||
| 17 | #include <linux/kernel.h> | ||
| 18 | |||
| 19 | #include "etnaviv_gem.h" | ||
| 20 | #include "etnaviv_gpu.h" | ||
| 21 | |||
| 22 | #include "cmdstream.xml.h" | ||
| 23 | |||
| 24 | #define EXTRACT(val, field) (((val) & field##__MASK) >> field##__SHIFT) | ||
| 25 | |||
| 26 | struct etna_validation_state { | ||
| 27 | struct etnaviv_gpu *gpu; | ||
| 28 | const struct drm_etnaviv_gem_submit_reloc *relocs; | ||
| 29 | unsigned int num_relocs; | ||
| 30 | u32 *start; | ||
| 31 | }; | ||
| 32 | |||
| 33 | static const struct { | ||
| 34 | u16 offset; | ||
| 35 | u16 size; | ||
| 36 | } etnaviv_sensitive_states[] __initconst = { | ||
| 37 | #define ST(start, num) { (start) >> 2, (num) } | ||
| 38 | /* 2D */ | ||
| 39 | ST(0x1200, 1), | ||
| 40 | ST(0x1228, 1), | ||
| 41 | ST(0x1238, 1), | ||
| 42 | ST(0x1284, 1), | ||
| 43 | ST(0x128c, 1), | ||
| 44 | ST(0x1304, 1), | ||
| 45 | ST(0x1310, 1), | ||
| 46 | ST(0x1318, 1), | ||
| 47 | ST(0x12800, 4), | ||
| 48 | ST(0x128a0, 4), | ||
| 49 | ST(0x128c0, 4), | ||
| 50 | ST(0x12970, 4), | ||
| 51 | ST(0x12a00, 8), | ||
| 52 | ST(0x12b40, 8), | ||
| 53 | ST(0x12b80, 8), | ||
| 54 | ST(0x12ce0, 8), | ||
| 55 | /* 3D */ | ||
| 56 | ST(0x0644, 1), | ||
| 57 | ST(0x064c, 1), | ||
| 58 | ST(0x0680, 8), | ||
| 59 | ST(0x1410, 1), | ||
| 60 | ST(0x1430, 1), | ||
| 61 | ST(0x1458, 1), | ||
| 62 | ST(0x1460, 8), | ||
| 63 | ST(0x1480, 8), | ||
| 64 | ST(0x1500, 8), | ||
| 65 | ST(0x1520, 8), | ||
| 66 | ST(0x1608, 1), | ||
| 67 | ST(0x1610, 1), | ||
| 68 | ST(0x1658, 1), | ||
| 69 | ST(0x165c, 1), | ||
| 70 | ST(0x1664, 1), | ||
| 71 | ST(0x1668, 1), | ||
| 72 | ST(0x16a4, 1), | ||
| 73 | ST(0x16c0, 8), | ||
| 74 | ST(0x16e0, 8), | ||
| 75 | ST(0x1740, 8), | ||
| 76 | ST(0x2400, 14 * 16), | ||
| 77 | ST(0x10800, 32 * 16), | ||
| 78 | #undef ST | ||
| 79 | }; | ||
| 80 | |||
| 81 | #define ETNAVIV_STATES_SIZE (VIV_FE_LOAD_STATE_HEADER_OFFSET__MASK + 1u) | ||
| 82 | static DECLARE_BITMAP(etnaviv_states, ETNAVIV_STATES_SIZE); | ||
| 83 | |||
| 84 | void __init etnaviv_validate_init(void) | ||
| 85 | { | ||
| 86 | unsigned int i; | ||
| 87 | |||
| 88 | for (i = 0; i < ARRAY_SIZE(etnaviv_sensitive_states); i++) | ||
| 89 | bitmap_set(etnaviv_states, etnaviv_sensitive_states[i].offset, | ||
| 90 | etnaviv_sensitive_states[i].size); | ||
| 91 | } | ||
| 92 | |||
| 93 | static void etnaviv_warn_if_non_sensitive(struct etna_validation_state *state, | ||
| 94 | unsigned int buf_offset, unsigned int state_addr) | ||
| 95 | { | ||
| 96 | if (state->num_relocs && state->relocs->submit_offset < buf_offset) { | ||
| 97 | dev_warn_once(state->gpu->dev, | ||
| 98 | "%s: relocation for non-sensitive state 0x%x at offset %u\n", | ||
| 99 | __func__, state_addr, | ||
| 100 | state->relocs->submit_offset); | ||
| 101 | while (state->num_relocs && | ||
| 102 | state->relocs->submit_offset < buf_offset) { | ||
| 103 | state->relocs++; | ||
| 104 | state->num_relocs--; | ||
| 105 | } | ||
| 106 | } | ||
| 107 | } | ||
| 108 | |||
| 109 | static bool etnaviv_validate_load_state(struct etna_validation_state *state, | ||
| 110 | u32 *ptr, unsigned int state_offset, unsigned int num) | ||
| 111 | { | ||
| 112 | unsigned int size = min(ETNAVIV_STATES_SIZE, state_offset + num); | ||
| 113 | unsigned int st_offset = state_offset, buf_offset; | ||
| 114 | |||
| 115 | for_each_set_bit_from(st_offset, etnaviv_states, size) { | ||
| 116 | buf_offset = (ptr - state->start + | ||
| 117 | st_offset - state_offset) * 4; | ||
| 118 | |||
| 119 | etnaviv_warn_if_non_sensitive(state, buf_offset, st_offset * 4); | ||
| 120 | if (state->num_relocs && | ||
| 121 | state->relocs->submit_offset == buf_offset) { | ||
| 122 | state->relocs++; | ||
| 123 | state->num_relocs--; | ||
| 124 | continue; | ||
| 125 | } | ||
| 126 | |||
| 127 | dev_warn_ratelimited(state->gpu->dev, | ||
| 128 | "%s: load state touches restricted state 0x%x at offset %u\n", | ||
| 129 | __func__, st_offset * 4, buf_offset); | ||
| 130 | return false; | ||
| 131 | } | ||
| 132 | |||
| 133 | if (state->num_relocs) { | ||
| 134 | buf_offset = (ptr - state->start + num) * 4; | ||
| 135 | etnaviv_warn_if_non_sensitive(state, buf_offset, st_offset * 4 + | ||
| 136 | state->relocs->submit_offset - | ||
| 137 | buf_offset); | ||
| 138 | } | ||
| 139 | |||
| 140 | return true; | ||
| 141 | } | ||
| 142 | |||
| 143 | static uint8_t cmd_length[32] = { | ||
| 144 | [FE_OPCODE_DRAW_PRIMITIVES] = 4, | ||
| 145 | [FE_OPCODE_DRAW_INDEXED_PRIMITIVES] = 6, | ||
| 146 | [FE_OPCODE_NOP] = 2, | ||
| 147 | [FE_OPCODE_STALL] = 2, | ||
| 148 | }; | ||
| 149 | |||
| 150 | bool etnaviv_cmd_validate_one(struct etnaviv_gpu *gpu, u32 *stream, | ||
| 151 | unsigned int size, | ||
| 152 | struct drm_etnaviv_gem_submit_reloc *relocs, | ||
| 153 | unsigned int reloc_size) | ||
| 154 | { | ||
| 155 | struct etna_validation_state state; | ||
| 156 | u32 *buf = stream; | ||
| 157 | u32 *end = buf + size; | ||
| 158 | |||
| 159 | state.gpu = gpu; | ||
| 160 | state.relocs = relocs; | ||
| 161 | state.num_relocs = reloc_size; | ||
| 162 | state.start = stream; | ||
| 163 | |||
| 164 | while (buf < end) { | ||
| 165 | u32 cmd = *buf; | ||
| 166 | unsigned int len, n, off; | ||
| 167 | unsigned int op = cmd >> 27; | ||
| 168 | |||
| 169 | switch (op) { | ||
| 170 | case FE_OPCODE_LOAD_STATE: | ||
| 171 | n = EXTRACT(cmd, VIV_FE_LOAD_STATE_HEADER_COUNT); | ||
| 172 | len = ALIGN(1 + n, 2); | ||
| 173 | if (buf + len > end) | ||
| 174 | break; | ||
| 175 | |||
| 176 | off = EXTRACT(cmd, VIV_FE_LOAD_STATE_HEADER_OFFSET); | ||
| 177 | if (!etnaviv_validate_load_state(&state, buf + 1, | ||
| 178 | off, n)) | ||
| 179 | return false; | ||
| 180 | break; | ||
| 181 | |||
| 182 | case FE_OPCODE_DRAW_2D: | ||
| 183 | n = EXTRACT(cmd, VIV_FE_DRAW_2D_HEADER_COUNT); | ||
| 184 | if (n == 0) | ||
| 185 | n = 256; | ||
| 186 | len = 2 + n * 2; | ||
| 187 | break; | ||
| 188 | |||
| 189 | default: | ||
| 190 | len = cmd_length[op]; | ||
| 191 | if (len == 0) { | ||
| 192 | dev_err(gpu->dev, "%s: op %u not permitted at offset %tu\n", | ||
| 193 | __func__, op, buf - state.start); | ||
| 194 | return false; | ||
| 195 | } | ||
| 196 | break; | ||
| 197 | } | ||
| 198 | |||
| 199 | buf += len; | ||
| 200 | } | ||
| 201 | |||
| 202 | if (buf > end) { | ||
| 203 | dev_err(gpu->dev, "%s: commands overflow end of buffer: %tu > %u\n", | ||
| 204 | __func__, buf - state.start, size); | ||
| 205 | return false; | ||
| 206 | } | ||
| 207 | |||
| 208 | return true; | ||
| 209 | } | ||
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_drv.c b/drivers/gpu/drm/etnaviv/etnaviv_drv.c new file mode 100644 index 000000000000..5c89ebb52fd2 --- /dev/null +++ b/drivers/gpu/drm/etnaviv/etnaviv_drv.c | |||
| @@ -0,0 +1,707 @@ | |||
| 1 | /* | ||
| 2 | * Copyright (C) 2015 Etnaviv Project | ||
| 3 | * | ||
| 4 | * This program is free software; you can redistribute it and/or modify it | ||
| 5 | * under the terms of the GNU General Public License version 2 as published by | ||
| 6 | * the Free Software Foundation. | ||
| 7 | * | ||
| 8 | * This program is distributed in the hope that it will be useful, but WITHOUT | ||
| 9 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
| 10 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
| 11 | * more details. | ||
| 12 | * | ||
| 13 | * You should have received a copy of the GNU General Public License along with | ||
| 14 | * this program. If not, see <http://www.gnu.org/licenses/>. | ||
| 15 | */ | ||
| 16 | |||
| 17 | #include <linux/component.h> | ||
| 18 | #include <linux/of_platform.h> | ||
| 19 | |||
| 20 | #include "etnaviv_drv.h" | ||
| 21 | #include "etnaviv_gpu.h" | ||
| 22 | #include "etnaviv_gem.h" | ||
| 23 | #include "etnaviv_mmu.h" | ||
| 24 | #include "etnaviv_gem.h" | ||
| 25 | |||
| 26 | #ifdef CONFIG_DRM_ETNAVIV_REGISTER_LOGGING | ||
| 27 | static bool reglog; | ||
| 28 | MODULE_PARM_DESC(reglog, "Enable register read/write logging"); | ||
| 29 | module_param(reglog, bool, 0600); | ||
| 30 | #else | ||
| 31 | #define reglog 0 | ||
| 32 | #endif | ||
| 33 | |||
| 34 | void __iomem *etnaviv_ioremap(struct platform_device *pdev, const char *name, | ||
| 35 | const char *dbgname) | ||
| 36 | { | ||
| 37 | struct resource *res; | ||
| 38 | void __iomem *ptr; | ||
| 39 | |||
| 40 | if (name) | ||
| 41 | res = platform_get_resource_byname(pdev, IORESOURCE_MEM, name); | ||
| 42 | else | ||
| 43 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
| 44 | |||
| 45 | ptr = devm_ioremap_resource(&pdev->dev, res); | ||
| 46 | if (IS_ERR(ptr)) { | ||
| 47 | dev_err(&pdev->dev, "failed to ioremap %s: %ld\n", name, | ||
| 48 | PTR_ERR(ptr)); | ||
| 49 | return ptr; | ||
| 50 | } | ||
| 51 | |||
| 52 | if (reglog) | ||
| 53 | dev_printk(KERN_DEBUG, &pdev->dev, "IO:region %s 0x%p %08zx\n", | ||
| 54 | dbgname, ptr, (size_t)resource_size(res)); | ||
| 55 | |||
| 56 | return ptr; | ||
| 57 | } | ||
| 58 | |||
| 59 | void etnaviv_writel(u32 data, void __iomem *addr) | ||
| 60 | { | ||
| 61 | if (reglog) | ||
| 62 | printk(KERN_DEBUG "IO:W %p %08x\n", addr, data); | ||
| 63 | |||
| 64 | writel(data, addr); | ||
| 65 | } | ||
| 66 | |||
| 67 | u32 etnaviv_readl(const void __iomem *addr) | ||
| 68 | { | ||
| 69 | u32 val = readl(addr); | ||
| 70 | |||
| 71 | if (reglog) | ||
| 72 | printk(KERN_DEBUG "IO:R %p %08x\n", addr, val); | ||
| 73 | |||
| 74 | return val; | ||
| 75 | } | ||
| 76 | |||
| 77 | /* | ||
| 78 | * DRM operations: | ||
| 79 | */ | ||
| 80 | |||
| 81 | |||
| 82 | static void load_gpu(struct drm_device *dev) | ||
| 83 | { | ||
| 84 | struct etnaviv_drm_private *priv = dev->dev_private; | ||
| 85 | unsigned int i; | ||
| 86 | |||
| 87 | for (i = 0; i < ETNA_MAX_PIPES; i++) { | ||
| 88 | struct etnaviv_gpu *g = priv->gpu[i]; | ||
| 89 | |||
| 90 | if (g) { | ||
| 91 | int ret; | ||
| 92 | |||
| 93 | ret = etnaviv_gpu_init(g); | ||
| 94 | if (ret) { | ||
| 95 | dev_err(g->dev, "hw init failed: %d\n", ret); | ||
| 96 | priv->gpu[i] = NULL; | ||
| 97 | } | ||
| 98 | } | ||
| 99 | } | ||
| 100 | } | ||
| 101 | |||
| 102 | static int etnaviv_open(struct drm_device *dev, struct drm_file *file) | ||
| 103 | { | ||
| 104 | struct etnaviv_file_private *ctx; | ||
| 105 | |||
| 106 | ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); | ||
| 107 | if (!ctx) | ||
| 108 | return -ENOMEM; | ||
| 109 | |||
| 110 | file->driver_priv = ctx; | ||
| 111 | |||
| 112 | return 0; | ||
| 113 | } | ||
| 114 | |||
| 115 | static void etnaviv_preclose(struct drm_device *dev, struct drm_file *file) | ||
| 116 | { | ||
| 117 | struct etnaviv_drm_private *priv = dev->dev_private; | ||
| 118 | struct etnaviv_file_private *ctx = file->driver_priv; | ||
| 119 | unsigned int i; | ||
| 120 | |||
| 121 | for (i = 0; i < ETNA_MAX_PIPES; i++) { | ||
| 122 | struct etnaviv_gpu *gpu = priv->gpu[i]; | ||
| 123 | |||
| 124 | if (gpu) { | ||
| 125 | mutex_lock(&gpu->lock); | ||
| 126 | if (gpu->lastctx == ctx) | ||
| 127 | gpu->lastctx = NULL; | ||
| 128 | mutex_unlock(&gpu->lock); | ||
| 129 | } | ||
| 130 | } | ||
| 131 | |||
| 132 | kfree(ctx); | ||
| 133 | } | ||
| 134 | |||
| 135 | /* | ||
| 136 | * DRM debugfs: | ||
| 137 | */ | ||
| 138 | |||
| 139 | #ifdef CONFIG_DEBUG_FS | ||
| 140 | static int etnaviv_gem_show(struct drm_device *dev, struct seq_file *m) | ||
| 141 | { | ||
| 142 | struct etnaviv_drm_private *priv = dev->dev_private; | ||
| 143 | |||
| 144 | etnaviv_gem_describe_objects(priv, m); | ||
| 145 | |||
| 146 | return 0; | ||
| 147 | } | ||
| 148 | |||
| 149 | static int etnaviv_mm_show(struct drm_device *dev, struct seq_file *m) | ||
| 150 | { | ||
| 151 | int ret; | ||
| 152 | |||
| 153 | read_lock(&dev->vma_offset_manager->vm_lock); | ||
| 154 | ret = drm_mm_dump_table(m, &dev->vma_offset_manager->vm_addr_space_mm); | ||
| 155 | read_unlock(&dev->vma_offset_manager->vm_lock); | ||
| 156 | |||
| 157 | return ret; | ||
| 158 | } | ||
| 159 | |||
| 160 | static int etnaviv_mmu_show(struct etnaviv_gpu *gpu, struct seq_file *m) | ||
| 161 | { | ||
| 162 | seq_printf(m, "Active Objects (%s):\n", dev_name(gpu->dev)); | ||
| 163 | |||
| 164 | mutex_lock(&gpu->mmu->lock); | ||
| 165 | drm_mm_dump_table(m, &gpu->mmu->mm); | ||
| 166 | mutex_unlock(&gpu->mmu->lock); | ||
| 167 | |||
| 168 | return 0; | ||
| 169 | } | ||
| 170 | |||
| 171 | static void etnaviv_buffer_dump(struct etnaviv_gpu *gpu, struct seq_file *m) | ||
| 172 | { | ||
| 173 | struct etnaviv_cmdbuf *buf = gpu->buffer; | ||
| 174 | u32 size = buf->size; | ||
| 175 | u32 *ptr = buf->vaddr; | ||
| 176 | u32 i; | ||
| 177 | |||
| 178 | seq_printf(m, "virt %p - phys 0x%llx - free 0x%08x\n", | ||
| 179 | buf->vaddr, (u64)buf->paddr, size - buf->user_size); | ||
| 180 | |||
| 181 | for (i = 0; i < size / 4; i++) { | ||
| 182 | if (i && !(i % 4)) | ||
| 183 | seq_puts(m, "\n"); | ||
| 184 | if (i % 4 == 0) | ||
| 185 | seq_printf(m, "\t0x%p: ", ptr + i); | ||
| 186 | seq_printf(m, "%08x ", *(ptr + i)); | ||
| 187 | } | ||
| 188 | seq_puts(m, "\n"); | ||
| 189 | } | ||
| 190 | |||
| 191 | static int etnaviv_ring_show(struct etnaviv_gpu *gpu, struct seq_file *m) | ||
| 192 | { | ||
| 193 | seq_printf(m, "Ring Buffer (%s): ", dev_name(gpu->dev)); | ||
| 194 | |||
| 195 | mutex_lock(&gpu->lock); | ||
| 196 | etnaviv_buffer_dump(gpu, m); | ||
| 197 | mutex_unlock(&gpu->lock); | ||
| 198 | |||
| 199 | return 0; | ||
| 200 | } | ||
| 201 | |||
| 202 | static int show_unlocked(struct seq_file *m, void *arg) | ||
| 203 | { | ||
| 204 | struct drm_info_node *node = (struct drm_info_node *) m->private; | ||
| 205 | struct drm_device *dev = node->minor->dev; | ||
| 206 | int (*show)(struct drm_device *dev, struct seq_file *m) = | ||
| 207 | node->info_ent->data; | ||
| 208 | |||
| 209 | return show(dev, m); | ||
| 210 | } | ||
| 211 | |||
| 212 | static int show_each_gpu(struct seq_file *m, void *arg) | ||
| 213 | { | ||
| 214 | struct drm_info_node *node = (struct drm_info_node *) m->private; | ||
| 215 | struct drm_device *dev = node->minor->dev; | ||
| 216 | struct etnaviv_drm_private *priv = dev->dev_private; | ||
| 217 | struct etnaviv_gpu *gpu; | ||
| 218 | int (*show)(struct etnaviv_gpu *gpu, struct seq_file *m) = | ||
| 219 | node->info_ent->data; | ||
| 220 | unsigned int i; | ||
| 221 | int ret = 0; | ||
| 222 | |||
| 223 | for (i = 0; i < ETNA_MAX_PIPES; i++) { | ||
| 224 | gpu = priv->gpu[i]; | ||
| 225 | if (!gpu) | ||
| 226 | continue; | ||
| 227 | |||
| 228 | ret = show(gpu, m); | ||
| 229 | if (ret < 0) | ||
| 230 | break; | ||
| 231 | } | ||
| 232 | |||
| 233 | return ret; | ||
| 234 | } | ||
| 235 | |||
| 236 | static struct drm_info_list etnaviv_debugfs_list[] = { | ||
| 237 | {"gpu", show_each_gpu, 0, etnaviv_gpu_debugfs}, | ||
| 238 | {"gem", show_unlocked, 0, etnaviv_gem_show}, | ||
| 239 | { "mm", show_unlocked, 0, etnaviv_mm_show }, | ||
| 240 | {"mmu", show_each_gpu, 0, etnaviv_mmu_show}, | ||
| 241 | {"ring", show_each_gpu, 0, etnaviv_ring_show}, | ||
| 242 | }; | ||
| 243 | |||
| 244 | static int etnaviv_debugfs_init(struct drm_minor *minor) | ||
| 245 | { | ||
| 246 | struct drm_device *dev = minor->dev; | ||
| 247 | int ret; | ||
| 248 | |||
| 249 | ret = drm_debugfs_create_files(etnaviv_debugfs_list, | ||
| 250 | ARRAY_SIZE(etnaviv_debugfs_list), | ||
| 251 | minor->debugfs_root, minor); | ||
| 252 | |||
| 253 | if (ret) { | ||
| 254 | dev_err(dev->dev, "could not install etnaviv_debugfs_list\n"); | ||
| 255 | return ret; | ||
| 256 | } | ||
| 257 | |||
| 258 | return ret; | ||
| 259 | } | ||
| 260 | |||
| 261 | static void etnaviv_debugfs_cleanup(struct drm_minor *minor) | ||
| 262 | { | ||
| 263 | drm_debugfs_remove_files(etnaviv_debugfs_list, | ||
| 264 | ARRAY_SIZE(etnaviv_debugfs_list), minor); | ||
| 265 | } | ||
| 266 | #endif | ||
| 267 | |||
| 268 | /* | ||
| 269 | * DRM ioctls: | ||
| 270 | */ | ||
| 271 | |||
| 272 | static int etnaviv_ioctl_get_param(struct drm_device *dev, void *data, | ||
| 273 | struct drm_file *file) | ||
| 274 | { | ||
| 275 | struct etnaviv_drm_private *priv = dev->dev_private; | ||
| 276 | struct drm_etnaviv_param *args = data; | ||
| 277 | struct etnaviv_gpu *gpu; | ||
| 278 | |||
| 279 | if (args->pipe >= ETNA_MAX_PIPES) | ||
| 280 | return -EINVAL; | ||
| 281 | |||
| 282 | gpu = priv->gpu[args->pipe]; | ||
| 283 | if (!gpu) | ||
| 284 | return -ENXIO; | ||
| 285 | |||
| 286 | return etnaviv_gpu_get_param(gpu, args->param, &args->value); | ||
| 287 | } | ||
| 288 | |||
| 289 | static int etnaviv_ioctl_gem_new(struct drm_device *dev, void *data, | ||
| 290 | struct drm_file *file) | ||
| 291 | { | ||
| 292 | struct drm_etnaviv_gem_new *args = data; | ||
| 293 | |||
| 294 | if (args->flags & ~(ETNA_BO_CACHED | ETNA_BO_WC | ETNA_BO_UNCACHED | | ||
| 295 | ETNA_BO_FORCE_MMU)) | ||
| 296 | return -EINVAL; | ||
| 297 | |||
| 298 | return etnaviv_gem_new_handle(dev, file, args->size, | ||
| 299 | args->flags, &args->handle); | ||
| 300 | } | ||
| 301 | |||
| 302 | #define TS(t) ((struct timespec){ \ | ||
| 303 | .tv_sec = (t).tv_sec, \ | ||
| 304 | .tv_nsec = (t).tv_nsec \ | ||
| 305 | }) | ||
| 306 | |||
| 307 | static int etnaviv_ioctl_gem_cpu_prep(struct drm_device *dev, void *data, | ||
| 308 | struct drm_file *file) | ||
| 309 | { | ||
| 310 | struct drm_etnaviv_gem_cpu_prep *args = data; | ||
| 311 | struct drm_gem_object *obj; | ||
| 312 | int ret; | ||
| 313 | |||
| 314 | if (args->op & ~(ETNA_PREP_READ | ETNA_PREP_WRITE | ETNA_PREP_NOSYNC)) | ||
| 315 | return -EINVAL; | ||
| 316 | |||
| 317 | obj = drm_gem_object_lookup(dev, file, args->handle); | ||
| 318 | if (!obj) | ||
| 319 | return -ENOENT; | ||
| 320 | |||
| 321 | ret = etnaviv_gem_cpu_prep(obj, args->op, &TS(args->timeout)); | ||
| 322 | |||
| 323 | drm_gem_object_unreference_unlocked(obj); | ||
| 324 | |||
| 325 | return ret; | ||
| 326 | } | ||
| 327 | |||
| 328 | static int etnaviv_ioctl_gem_cpu_fini(struct drm_device *dev, void *data, | ||
| 329 | struct drm_file *file) | ||
| 330 | { | ||
| 331 | struct drm_etnaviv_gem_cpu_fini *args = data; | ||
| 332 | struct drm_gem_object *obj; | ||
| 333 | int ret; | ||
| 334 | |||
| 335 | if (args->flags) | ||
| 336 | return -EINVAL; | ||
| 337 | |||
| 338 | obj = drm_gem_object_lookup(dev, file, args->handle); | ||
| 339 | if (!obj) | ||
| 340 | return -ENOENT; | ||
| 341 | |||
| 342 | ret = etnaviv_gem_cpu_fini(obj); | ||
| 343 | |||
| 344 | drm_gem_object_unreference_unlocked(obj); | ||
| 345 | |||
| 346 | return ret; | ||
| 347 | } | ||
| 348 | |||
| 349 | static int etnaviv_ioctl_gem_info(struct drm_device *dev, void *data, | ||
| 350 | struct drm_file *file) | ||
| 351 | { | ||
| 352 | struct drm_etnaviv_gem_info *args = data; | ||
| 353 | struct drm_gem_object *obj; | ||
| 354 | int ret; | ||
| 355 | |||
| 356 | if (args->pad) | ||
| 357 | return -EINVAL; | ||
| 358 | |||
| 359 | obj = drm_gem_object_lookup(dev, file, args->handle); | ||
| 360 | if (!obj) | ||
| 361 | return -ENOENT; | ||
| 362 | |||
| 363 | ret = etnaviv_gem_mmap_offset(obj, &args->offset); | ||
| 364 | drm_gem_object_unreference_unlocked(obj); | ||
| 365 | |||
| 366 | return ret; | ||
| 367 | } | ||
| 368 | |||
| 369 | static int etnaviv_ioctl_wait_fence(struct drm_device *dev, void *data, | ||
| 370 | struct drm_file *file) | ||
| 371 | { | ||
| 372 | struct drm_etnaviv_wait_fence *args = data; | ||
| 373 | struct etnaviv_drm_private *priv = dev->dev_private; | ||
| 374 | struct timespec *timeout = &TS(args->timeout); | ||
| 375 | struct etnaviv_gpu *gpu; | ||
| 376 | |||
| 377 | if (args->flags & ~(ETNA_WAIT_NONBLOCK)) | ||
| 378 | return -EINVAL; | ||
| 379 | |||
| 380 | if (args->pipe >= ETNA_MAX_PIPES) | ||
| 381 | return -EINVAL; | ||
| 382 | |||
| 383 | gpu = priv->gpu[args->pipe]; | ||
| 384 | if (!gpu) | ||
| 385 | return -ENXIO; | ||
| 386 | |||
| 387 | if (args->flags & ETNA_WAIT_NONBLOCK) | ||
| 388 | timeout = NULL; | ||
| 389 | |||
| 390 | return etnaviv_gpu_wait_fence_interruptible(gpu, args->fence, | ||
| 391 | timeout); | ||
| 392 | } | ||
| 393 | |||
| 394 | static int etnaviv_ioctl_gem_userptr(struct drm_device *dev, void *data, | ||
| 395 | struct drm_file *file) | ||
| 396 | { | ||
| 397 | struct drm_etnaviv_gem_userptr *args = data; | ||
| 398 | int access; | ||
| 399 | |||
| 400 | if (args->flags & ~(ETNA_USERPTR_READ|ETNA_USERPTR_WRITE) || | ||
| 401 | args->flags == 0) | ||
| 402 | return -EINVAL; | ||
| 403 | |||
| 404 | if (offset_in_page(args->user_ptr | args->user_size) || | ||
| 405 | (uintptr_t)args->user_ptr != args->user_ptr || | ||
| 406 | (u32)args->user_size != args->user_size || | ||
| 407 | args->user_ptr & ~PAGE_MASK) | ||
| 408 | return -EINVAL; | ||
| 409 | |||
| 410 | if (args->flags & ETNA_USERPTR_WRITE) | ||
| 411 | access = VERIFY_WRITE; | ||
| 412 | else | ||
| 413 | access = VERIFY_READ; | ||
| 414 | |||
| 415 | if (!access_ok(access, (void __user *)(unsigned long)args->user_ptr, | ||
| 416 | args->user_size)) | ||
| 417 | return -EFAULT; | ||
| 418 | |||
| 419 | return etnaviv_gem_new_userptr(dev, file, args->user_ptr, | ||
| 420 | args->user_size, args->flags, | ||
| 421 | &args->handle); | ||
| 422 | } | ||
| 423 | |||
| 424 | static int etnaviv_ioctl_gem_wait(struct drm_device *dev, void *data, | ||
| 425 | struct drm_file *file) | ||
| 426 | { | ||
| 427 | struct etnaviv_drm_private *priv = dev->dev_private; | ||
| 428 | struct drm_etnaviv_gem_wait *args = data; | ||
| 429 | struct timespec *timeout = &TS(args->timeout); | ||
| 430 | struct drm_gem_object *obj; | ||
| 431 | struct etnaviv_gpu *gpu; | ||
| 432 | int ret; | ||
| 433 | |||
| 434 | if (args->flags & ~(ETNA_WAIT_NONBLOCK)) | ||
| 435 | return -EINVAL; | ||
| 436 | |||
| 437 | if (args->pipe >= ETNA_MAX_PIPES) | ||
| 438 | return -EINVAL; | ||
| 439 | |||
| 440 | gpu = priv->gpu[args->pipe]; | ||
| 441 | if (!gpu) | ||
| 442 | return -ENXIO; | ||
| 443 | |||
| 444 | obj = drm_gem_object_lookup(dev, file, args->handle); | ||
| 445 | if (!obj) | ||
| 446 | return -ENOENT; | ||
| 447 | |||
| 448 | if (args->flags & ETNA_WAIT_NONBLOCK) | ||
| 449 | timeout = NULL; | ||
| 450 | |||
| 451 | ret = etnaviv_gem_wait_bo(gpu, obj, timeout); | ||
| 452 | |||
| 453 | drm_gem_object_unreference_unlocked(obj); | ||
| 454 | |||
| 455 | return ret; | ||
| 456 | } | ||
| 457 | |||
| 458 | static const struct drm_ioctl_desc etnaviv_ioctls[] = { | ||
| 459 | #define ETNA_IOCTL(n, func, flags) \ | ||
| 460 | DRM_IOCTL_DEF_DRV(ETNAVIV_##n, etnaviv_ioctl_##func, flags) | ||
| 461 | ETNA_IOCTL(GET_PARAM, get_param, DRM_AUTH|DRM_RENDER_ALLOW), | ||
| 462 | ETNA_IOCTL(GEM_NEW, gem_new, DRM_AUTH|DRM_RENDER_ALLOW), | ||
| 463 | ETNA_IOCTL(GEM_INFO, gem_info, DRM_AUTH|DRM_RENDER_ALLOW), | ||
| 464 | ETNA_IOCTL(GEM_CPU_PREP, gem_cpu_prep, DRM_AUTH|DRM_RENDER_ALLOW), | ||
| 465 | ETNA_IOCTL(GEM_CPU_FINI, gem_cpu_fini, DRM_AUTH|DRM_RENDER_ALLOW), | ||
| 466 | ETNA_IOCTL(GEM_SUBMIT, gem_submit, DRM_AUTH|DRM_RENDER_ALLOW), | ||
| 467 | ETNA_IOCTL(WAIT_FENCE, wait_fence, DRM_AUTH|DRM_RENDER_ALLOW), | ||
| 468 | ETNA_IOCTL(GEM_USERPTR, gem_userptr, DRM_AUTH|DRM_RENDER_ALLOW), | ||
| 469 | ETNA_IOCTL(GEM_WAIT, gem_wait, DRM_AUTH|DRM_RENDER_ALLOW), | ||
| 470 | }; | ||
| 471 | |||
| 472 | static const struct vm_operations_struct vm_ops = { | ||
| 473 | .fault = etnaviv_gem_fault, | ||
| 474 | .open = drm_gem_vm_open, | ||
| 475 | .close = drm_gem_vm_close, | ||
| 476 | }; | ||
| 477 | |||
| 478 | static const struct file_operations fops = { | ||
| 479 | .owner = THIS_MODULE, | ||
| 480 | .open = drm_open, | ||
| 481 | .release = drm_release, | ||
| 482 | .unlocked_ioctl = drm_ioctl, | ||
| 483 | #ifdef CONFIG_COMPAT | ||
| 484 | .compat_ioctl = drm_compat_ioctl, | ||
| 485 | #endif | ||
| 486 | .poll = drm_poll, | ||
| 487 | .read = drm_read, | ||
| 488 | .llseek = no_llseek, | ||
| 489 | .mmap = etnaviv_gem_mmap, | ||
| 490 | }; | ||
| 491 | |||
| 492 | static struct drm_driver etnaviv_drm_driver = { | ||
| 493 | .driver_features = DRIVER_HAVE_IRQ | | ||
| 494 | DRIVER_GEM | | ||
| 495 | DRIVER_PRIME | | ||
| 496 | DRIVER_RENDER, | ||
| 497 | .open = etnaviv_open, | ||
| 498 | .preclose = etnaviv_preclose, | ||
| 499 | .set_busid = drm_platform_set_busid, | ||
| 500 | .gem_free_object = etnaviv_gem_free_object, | ||
| 501 | .gem_vm_ops = &vm_ops, | ||
| 502 | .prime_handle_to_fd = drm_gem_prime_handle_to_fd, | ||
| 503 | .prime_fd_to_handle = drm_gem_prime_fd_to_handle, | ||
| 504 | .gem_prime_export = drm_gem_prime_export, | ||
| 505 | .gem_prime_import = drm_gem_prime_import, | ||
| 506 | .gem_prime_pin = etnaviv_gem_prime_pin, | ||
| 507 | .gem_prime_unpin = etnaviv_gem_prime_unpin, | ||
| 508 | .gem_prime_get_sg_table = etnaviv_gem_prime_get_sg_table, | ||
| 509 | .gem_prime_import_sg_table = etnaviv_gem_prime_import_sg_table, | ||
| 510 | .gem_prime_vmap = etnaviv_gem_prime_vmap, | ||
| 511 | .gem_prime_vunmap = etnaviv_gem_prime_vunmap, | ||
| 512 | #ifdef CONFIG_DEBUG_FS | ||
| 513 | .debugfs_init = etnaviv_debugfs_init, | ||
| 514 | .debugfs_cleanup = etnaviv_debugfs_cleanup, | ||
| 515 | #endif | ||
| 516 | .ioctls = etnaviv_ioctls, | ||
| 517 | .num_ioctls = DRM_ETNAVIV_NUM_IOCTLS, | ||
| 518 | .fops = &fops, | ||
| 519 | .name = "etnaviv", | ||
| 520 | .desc = "etnaviv DRM", | ||
| 521 | .date = "20151214", | ||
| 522 | .major = 1, | ||
| 523 | .minor = 0, | ||
| 524 | }; | ||
| 525 | |||
| 526 | /* | ||
| 527 | * Platform driver: | ||
| 528 | */ | ||
| 529 | static int etnaviv_bind(struct device *dev) | ||
| 530 | { | ||
| 531 | struct etnaviv_drm_private *priv; | ||
| 532 | struct drm_device *drm; | ||
| 533 | int ret; | ||
| 534 | |||
| 535 | drm = drm_dev_alloc(&etnaviv_drm_driver, dev); | ||
| 536 | if (!drm) | ||
| 537 | return -ENOMEM; | ||
| 538 | |||
| 539 | drm->platformdev = to_platform_device(dev); | ||
| 540 | |||
| 541 | priv = kzalloc(sizeof(*priv), GFP_KERNEL); | ||
| 542 | if (!priv) { | ||
| 543 | dev_err(dev, "failed to allocate private data\n"); | ||
| 544 | ret = -ENOMEM; | ||
| 545 | goto out_unref; | ||
| 546 | } | ||
| 547 | drm->dev_private = priv; | ||
| 548 | |||
| 549 | priv->wq = alloc_ordered_workqueue("etnaviv", 0); | ||
| 550 | if (!priv->wq) { | ||
| 551 | ret = -ENOMEM; | ||
| 552 | goto out_wq; | ||
| 553 | } | ||
| 554 | |||
| 555 | mutex_init(&priv->gem_lock); | ||
| 556 | INIT_LIST_HEAD(&priv->gem_list); | ||
| 557 | priv->num_gpus = 0; | ||
| 558 | |||
| 559 | dev_set_drvdata(dev, drm); | ||
| 560 | |||
| 561 | ret = component_bind_all(dev, drm); | ||
| 562 | if (ret < 0) | ||
| 563 | goto out_bind; | ||
| 564 | |||
| 565 | load_gpu(drm); | ||
| 566 | |||
| 567 | ret = drm_dev_register(drm, 0); | ||
| 568 | if (ret) | ||
| 569 | goto out_register; | ||
| 570 | |||
| 571 | return 0; | ||
| 572 | |||
| 573 | out_register: | ||
| 574 | component_unbind_all(dev, drm); | ||
| 575 | out_bind: | ||
| 576 | flush_workqueue(priv->wq); | ||
| 577 | destroy_workqueue(priv->wq); | ||
| 578 | out_wq: | ||
| 579 | kfree(priv); | ||
| 580 | out_unref: | ||
| 581 | drm_dev_unref(drm); | ||
| 582 | |||
| 583 | return ret; | ||
| 584 | } | ||
| 585 | |||
| 586 | static void etnaviv_unbind(struct device *dev) | ||
| 587 | { | ||
| 588 | struct drm_device *drm = dev_get_drvdata(dev); | ||
| 589 | struct etnaviv_drm_private *priv = drm->dev_private; | ||
| 590 | |||
| 591 | drm_dev_unregister(drm); | ||
| 592 | |||
| 593 | flush_workqueue(priv->wq); | ||
| 594 | destroy_workqueue(priv->wq); | ||
| 595 | |||
| 596 | component_unbind_all(dev, drm); | ||
| 597 | |||
| 598 | drm->dev_private = NULL; | ||
| 599 | kfree(priv); | ||
| 600 | |||
| 601 | drm_put_dev(drm); | ||
| 602 | } | ||
| 603 | |||
| 604 | static const struct component_master_ops etnaviv_master_ops = { | ||
| 605 | .bind = etnaviv_bind, | ||
| 606 | .unbind = etnaviv_unbind, | ||
| 607 | }; | ||
| 608 | |||
| 609 | static int compare_of(struct device *dev, void *data) | ||
| 610 | { | ||
| 611 | struct device_node *np = data; | ||
| 612 | |||
| 613 | return dev->of_node == np; | ||
| 614 | } | ||
| 615 | |||
| 616 | static int compare_str(struct device *dev, void *data) | ||
| 617 | { | ||
| 618 | return !strcmp(dev_name(dev), data); | ||
| 619 | } | ||
| 620 | |||
| 621 | static int etnaviv_pdev_probe(struct platform_device *pdev) | ||
| 622 | { | ||
| 623 | struct device *dev = &pdev->dev; | ||
| 624 | struct device_node *node = dev->of_node; | ||
| 625 | struct component_match *match = NULL; | ||
| 626 | |||
| 627 | dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32)); | ||
| 628 | |||
| 629 | if (node) { | ||
| 630 | struct device_node *core_node; | ||
| 631 | int i; | ||
| 632 | |||
| 633 | for (i = 0; ; i++) { | ||
| 634 | core_node = of_parse_phandle(node, "cores", i); | ||
| 635 | if (!core_node) | ||
| 636 | break; | ||
| 637 | |||
| 638 | component_match_add(&pdev->dev, &match, compare_of, | ||
| 639 | core_node); | ||
| 640 | of_node_put(core_node); | ||
| 641 | } | ||
| 642 | } else if (dev->platform_data) { | ||
| 643 | char **names = dev->platform_data; | ||
| 644 | unsigned i; | ||
| 645 | |||
| 646 | for (i = 0; names[i]; i++) | ||
| 647 | component_match_add(dev, &match, compare_str, names[i]); | ||
| 648 | } | ||
| 649 | |||
| 650 | return component_master_add_with_match(dev, &etnaviv_master_ops, match); | ||
| 651 | } | ||
| 652 | |||
| 653 | static int etnaviv_pdev_remove(struct platform_device *pdev) | ||
| 654 | { | ||
| 655 | component_master_del(&pdev->dev, &etnaviv_master_ops); | ||
| 656 | |||
| 657 | return 0; | ||
| 658 | } | ||
| 659 | |||
| 660 | static const struct of_device_id dt_match[] = { | ||
| 661 | { .compatible = "fsl,imx-gpu-subsystem" }, | ||
| 662 | { .compatible = "marvell,dove-gpu-subsystem" }, | ||
| 663 | {} | ||
| 664 | }; | ||
| 665 | MODULE_DEVICE_TABLE(of, dt_match); | ||
| 666 | |||
| 667 | static struct platform_driver etnaviv_platform_driver = { | ||
| 668 | .probe = etnaviv_pdev_probe, | ||
| 669 | .remove = etnaviv_pdev_remove, | ||
| 670 | .driver = { | ||
| 671 | .owner = THIS_MODULE, | ||
| 672 | .name = "etnaviv", | ||
| 673 | .of_match_table = dt_match, | ||
| 674 | }, | ||
| 675 | }; | ||
| 676 | |||
| 677 | static int __init etnaviv_init(void) | ||
| 678 | { | ||
| 679 | int ret; | ||
| 680 | |||
| 681 | etnaviv_validate_init(); | ||
| 682 | |||
| 683 | ret = platform_driver_register(&etnaviv_gpu_driver); | ||
| 684 | if (ret != 0) | ||
| 685 | return ret; | ||
| 686 | |||
| 687 | ret = platform_driver_register(&etnaviv_platform_driver); | ||
| 688 | if (ret != 0) | ||
| 689 | platform_driver_unregister(&etnaviv_gpu_driver); | ||
| 690 | |||
| 691 | return ret; | ||
| 692 | } | ||
| 693 | module_init(etnaviv_init); | ||
| 694 | |||
| 695 | static void __exit etnaviv_exit(void) | ||
| 696 | { | ||
| 697 | platform_driver_unregister(&etnaviv_gpu_driver); | ||
| 698 | platform_driver_unregister(&etnaviv_platform_driver); | ||
| 699 | } | ||
| 700 | module_exit(etnaviv_exit); | ||
| 701 | |||
| 702 | MODULE_AUTHOR("Christian Gmeiner <christian.gmeiner@gmail.com>"); | ||
| 703 | MODULE_AUTHOR("Russell King <rmk+kernel@arm.linux.org.uk>"); | ||
| 704 | MODULE_AUTHOR("Lucas Stach <l.stach@pengutronix.de>"); | ||
| 705 | MODULE_DESCRIPTION("etnaviv DRM Driver"); | ||
| 706 | MODULE_LICENSE("GPL v2"); | ||
| 707 | MODULE_ALIAS("platform:etnaviv"); | ||
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_drv.h b/drivers/gpu/drm/etnaviv/etnaviv_drv.h new file mode 100644 index 000000000000..d6bd438bd5be --- /dev/null +++ b/drivers/gpu/drm/etnaviv/etnaviv_drv.h | |||
| @@ -0,0 +1,161 @@ | |||
| 1 | /* | ||
| 2 | * Copyright (C) 2015 Etnaviv Project | ||
| 3 | * | ||
| 4 | * This program is free software; you can redistribute it and/or modify it | ||
| 5 | * under the terms of the GNU General Public License version 2 as published by | ||
| 6 | * the Free Software Foundation. | ||
| 7 | * | ||
| 8 | * This program is distributed in the hope that it will be useful, but WITHOUT | ||
| 9 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
| 10 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
| 11 | * more details. | ||
| 12 | * | ||
| 13 | * You should have received a copy of the GNU General Public License along with | ||
| 14 | * this program. If not, see <http://www.gnu.org/licenses/>. | ||
| 15 | */ | ||
| 16 | |||
| 17 | #ifndef __ETNAVIV_DRV_H__ | ||
| 18 | #define __ETNAVIV_DRV_H__ | ||
| 19 | |||
| 20 | #include <linux/kernel.h> | ||
| 21 | #include <linux/clk.h> | ||
| 22 | #include <linux/cpufreq.h> | ||
| 23 | #include <linux/module.h> | ||
| 24 | #include <linux/platform_device.h> | ||
| 25 | #include <linux/pm.h> | ||
| 26 | #include <linux/pm_runtime.h> | ||
| 27 | #include <linux/slab.h> | ||
| 28 | #include <linux/list.h> | ||
| 29 | #include <linux/iommu.h> | ||
| 30 | #include <linux/types.h> | ||
| 31 | #include <linux/sizes.h> | ||
| 32 | |||
| 33 | #include <drm/drmP.h> | ||
| 34 | #include <drm/drm_crtc_helper.h> | ||
| 35 | #include <drm/drm_fb_helper.h> | ||
| 36 | #include <drm/drm_gem.h> | ||
| 37 | #include <drm/etnaviv_drm.h> | ||
| 38 | |||
| 39 | struct etnaviv_cmdbuf; | ||
| 40 | struct etnaviv_gpu; | ||
| 41 | struct etnaviv_mmu; | ||
| 42 | struct etnaviv_gem_object; | ||
| 43 | struct etnaviv_gem_submit; | ||
| 44 | |||
| 45 | struct etnaviv_file_private { | ||
| 46 | /* currently we don't do anything useful with this.. but when | ||
| 47 | * per-context address spaces are supported we'd keep track of | ||
| 48 | * the context's page-tables here. | ||
| 49 | */ | ||
| 50 | int dummy; | ||
| 51 | }; | ||
| 52 | |||
| 53 | struct etnaviv_drm_private { | ||
| 54 | int num_gpus; | ||
| 55 | struct etnaviv_gpu *gpu[ETNA_MAX_PIPES]; | ||
| 56 | |||
| 57 | /* list of GEM objects: */ | ||
| 58 | struct mutex gem_lock; | ||
| 59 | struct list_head gem_list; | ||
| 60 | |||
| 61 | struct workqueue_struct *wq; | ||
| 62 | }; | ||
| 63 | |||
| 64 | static inline void etnaviv_queue_work(struct drm_device *dev, | ||
| 65 | struct work_struct *w) | ||
| 66 | { | ||
| 67 | struct etnaviv_drm_private *priv = dev->dev_private; | ||
| 68 | |||
| 69 | queue_work(priv->wq, w); | ||
| 70 | } | ||
| 71 | |||
| 72 | int etnaviv_ioctl_gem_submit(struct drm_device *dev, void *data, | ||
| 73 | struct drm_file *file); | ||
| 74 | |||
| 75 | int etnaviv_gem_mmap(struct file *filp, struct vm_area_struct *vma); | ||
| 76 | int etnaviv_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf); | ||
| 77 | int etnaviv_gem_mmap_offset(struct drm_gem_object *obj, u64 *offset); | ||
| 78 | int etnaviv_gem_get_iova(struct etnaviv_gpu *gpu, | ||
| 79 | struct drm_gem_object *obj, u32 *iova); | ||
| 80 | void etnaviv_gem_put_iova(struct etnaviv_gpu *gpu, struct drm_gem_object *obj); | ||
| 81 | struct sg_table *etnaviv_gem_prime_get_sg_table(struct drm_gem_object *obj); | ||
| 82 | void *etnaviv_gem_prime_vmap(struct drm_gem_object *obj); | ||
| 83 | void etnaviv_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr); | ||
| 84 | struct drm_gem_object *etnaviv_gem_prime_import_sg_table(struct drm_device *dev, | ||
| 85 | struct dma_buf_attachment *attach, struct sg_table *sg); | ||
| 86 | int etnaviv_gem_prime_pin(struct drm_gem_object *obj); | ||
| 87 | void etnaviv_gem_prime_unpin(struct drm_gem_object *obj); | ||
| 88 | void *etnaviv_gem_vaddr(struct drm_gem_object *obj); | ||
| 89 | int etnaviv_gem_cpu_prep(struct drm_gem_object *obj, u32 op, | ||
| 90 | struct timespec *timeout); | ||
| 91 | int etnaviv_gem_cpu_fini(struct drm_gem_object *obj); | ||
| 92 | void etnaviv_gem_free_object(struct drm_gem_object *obj); | ||
| 93 | int etnaviv_gem_new_handle(struct drm_device *dev, struct drm_file *file, | ||
| 94 | u32 size, u32 flags, u32 *handle); | ||
| 95 | struct drm_gem_object *etnaviv_gem_new_locked(struct drm_device *dev, | ||
| 96 | u32 size, u32 flags); | ||
| 97 | struct drm_gem_object *etnaviv_gem_new(struct drm_device *dev, | ||
| 98 | u32 size, u32 flags); | ||
| 99 | int etnaviv_gem_new_userptr(struct drm_device *dev, struct drm_file *file, | ||
| 100 | uintptr_t ptr, u32 size, u32 flags, u32 *handle); | ||
| 101 | u16 etnaviv_buffer_init(struct etnaviv_gpu *gpu); | ||
| 102 | void etnaviv_buffer_end(struct etnaviv_gpu *gpu); | ||
| 103 | void etnaviv_buffer_queue(struct etnaviv_gpu *gpu, unsigned int event, | ||
| 104 | struct etnaviv_cmdbuf *cmdbuf); | ||
| 105 | void etnaviv_validate_init(void); | ||
| 106 | bool etnaviv_cmd_validate_one(struct etnaviv_gpu *gpu, | ||
| 107 | u32 *stream, unsigned int size, | ||
| 108 | struct drm_etnaviv_gem_submit_reloc *relocs, unsigned int reloc_size); | ||
| 109 | |||
| 110 | #ifdef CONFIG_DEBUG_FS | ||
| 111 | void etnaviv_gem_describe_objects(struct etnaviv_drm_private *priv, | ||
| 112 | struct seq_file *m); | ||
| 113 | #endif | ||
| 114 | |||
| 115 | void __iomem *etnaviv_ioremap(struct platform_device *pdev, const char *name, | ||
| 116 | const char *dbgname); | ||
| 117 | void etnaviv_writel(u32 data, void __iomem *addr); | ||
| 118 | u32 etnaviv_readl(const void __iomem *addr); | ||
| 119 | |||
| 120 | #define DBG(fmt, ...) DRM_DEBUG(fmt"\n", ##__VA_ARGS__) | ||
| 121 | #define VERB(fmt, ...) if (0) DRM_DEBUG(fmt"\n", ##__VA_ARGS__) | ||
| 122 | |||
| 123 | /* | ||
| 124 | * Return the storage size of a structure with a variable length array. | ||
| 125 | * The array is nelem elements of elem_size, where the base structure | ||
| 126 | * is defined by base. If the size overflows size_t, return zero. | ||
| 127 | */ | ||
| 128 | static inline size_t size_vstruct(size_t nelem, size_t elem_size, size_t base) | ||
| 129 | { | ||
| 130 | if (elem_size && nelem > (SIZE_MAX - base) / elem_size) | ||
| 131 | return 0; | ||
| 132 | return base + nelem * elem_size; | ||
| 133 | } | ||
| 134 | |||
| 135 | /* returns true if fence a comes after fence b */ | ||
| 136 | static inline bool fence_after(u32 a, u32 b) | ||
| 137 | { | ||
| 138 | return (s32)(a - b) > 0; | ||
| 139 | } | ||
| 140 | |||
| 141 | static inline bool fence_after_eq(u32 a, u32 b) | ||
| 142 | { | ||
| 143 | return (s32)(a - b) >= 0; | ||
| 144 | } | ||
| 145 | |||
| 146 | static inline unsigned long etnaviv_timeout_to_jiffies( | ||
| 147 | const struct timespec *timeout) | ||
| 148 | { | ||
| 149 | unsigned long timeout_jiffies = timespec_to_jiffies(timeout); | ||
| 150 | unsigned long start_jiffies = jiffies; | ||
| 151 | unsigned long remaining_jiffies; | ||
| 152 | |||
| 153 | if (time_after(start_jiffies, timeout_jiffies)) | ||
| 154 | remaining_jiffies = 0; | ||
| 155 | else | ||
| 156 | remaining_jiffies = timeout_jiffies - start_jiffies; | ||
| 157 | |||
| 158 | return remaining_jiffies; | ||
| 159 | } | ||
| 160 | |||
| 161 | #endif /* __ETNAVIV_DRV_H__ */ | ||
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_dump.c b/drivers/gpu/drm/etnaviv/etnaviv_dump.c new file mode 100644 index 000000000000..bf8fa859e8be --- /dev/null +++ b/drivers/gpu/drm/etnaviv/etnaviv_dump.c | |||
| @@ -0,0 +1,227 @@ | |||
| 1 | /* | ||
| 2 | * Copyright (C) 2015 Etnaviv Project | ||
| 3 | * | ||
| 4 | * This program is free software; you can redistribute it and/or modify it | ||
| 5 | * under the terms of the GNU General Public License version 2 as published by | ||
| 6 | * the Free Software Foundation. | ||
| 7 | * | ||
| 8 | * This program is distributed in the hope that it will be useful, but WITHOUT | ||
| 9 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
| 10 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
| 11 | * more details. | ||
| 12 | * | ||
| 13 | * You should have received a copy of the GNU General Public License along with | ||
| 14 | * this program. If not, see <http://www.gnu.org/licenses/>. | ||
| 15 | */ | ||
| 16 | |||
| 17 | #include <linux/devcoredump.h> | ||
| 18 | #include "etnaviv_dump.h" | ||
| 19 | #include "etnaviv_gem.h" | ||
| 20 | #include "etnaviv_gpu.h" | ||
| 21 | #include "etnaviv_mmu.h" | ||
| 22 | #include "state.xml.h" | ||
| 23 | #include "state_hi.xml.h" | ||
| 24 | |||
| 25 | struct core_dump_iterator { | ||
| 26 | void *start; | ||
| 27 | struct etnaviv_dump_object_header *hdr; | ||
| 28 | void *data; | ||
| 29 | }; | ||
| 30 | |||
| 31 | static const unsigned short etnaviv_dump_registers[] = { | ||
| 32 | VIVS_HI_AXI_STATUS, | ||
| 33 | VIVS_HI_CLOCK_CONTROL, | ||
| 34 | VIVS_HI_IDLE_STATE, | ||
| 35 | VIVS_HI_AXI_CONFIG, | ||
| 36 | VIVS_HI_INTR_ENBL, | ||
| 37 | VIVS_HI_CHIP_IDENTITY, | ||
| 38 | VIVS_HI_CHIP_FEATURE, | ||
| 39 | VIVS_HI_CHIP_MODEL, | ||
| 40 | VIVS_HI_CHIP_REV, | ||
| 41 | VIVS_HI_CHIP_DATE, | ||
| 42 | VIVS_HI_CHIP_TIME, | ||
| 43 | VIVS_HI_CHIP_MINOR_FEATURE_0, | ||
| 44 | VIVS_HI_CACHE_CONTROL, | ||
| 45 | VIVS_HI_AXI_CONTROL, | ||
| 46 | VIVS_PM_POWER_CONTROLS, | ||
| 47 | VIVS_PM_MODULE_CONTROLS, | ||
| 48 | VIVS_PM_MODULE_STATUS, | ||
| 49 | VIVS_PM_PULSE_EATER, | ||
| 50 | VIVS_MC_MMU_FE_PAGE_TABLE, | ||
| 51 | VIVS_MC_MMU_TX_PAGE_TABLE, | ||
| 52 | VIVS_MC_MMU_PE_PAGE_TABLE, | ||
| 53 | VIVS_MC_MMU_PEZ_PAGE_TABLE, | ||
| 54 | VIVS_MC_MMU_RA_PAGE_TABLE, | ||
| 55 | VIVS_MC_DEBUG_MEMORY, | ||
| 56 | VIVS_MC_MEMORY_BASE_ADDR_RA, | ||
| 57 | VIVS_MC_MEMORY_BASE_ADDR_FE, | ||
| 58 | VIVS_MC_MEMORY_BASE_ADDR_TX, | ||
| 59 | VIVS_MC_MEMORY_BASE_ADDR_PEZ, | ||
| 60 | VIVS_MC_MEMORY_BASE_ADDR_PE, | ||
| 61 | VIVS_MC_MEMORY_TIMING_CONTROL, | ||
| 62 | VIVS_MC_BUS_CONFIG, | ||
| 63 | VIVS_FE_DMA_STATUS, | ||
| 64 | VIVS_FE_DMA_DEBUG_STATE, | ||
| 65 | VIVS_FE_DMA_ADDRESS, | ||
| 66 | VIVS_FE_DMA_LOW, | ||
| 67 | VIVS_FE_DMA_HIGH, | ||
| 68 | VIVS_FE_AUTO_FLUSH, | ||
| 69 | }; | ||
| 70 | |||
| 71 | static void etnaviv_core_dump_header(struct core_dump_iterator *iter, | ||
| 72 | u32 type, void *data_end) | ||
| 73 | { | ||
| 74 | struct etnaviv_dump_object_header *hdr = iter->hdr; | ||
| 75 | |||
| 76 | hdr->magic = cpu_to_le32(ETDUMP_MAGIC); | ||
| 77 | hdr->type = cpu_to_le32(type); | ||
| 78 | hdr->file_offset = cpu_to_le32(iter->data - iter->start); | ||
| 79 | hdr->file_size = cpu_to_le32(data_end - iter->data); | ||
| 80 | |||
| 81 | iter->hdr++; | ||
| 82 | iter->data += hdr->file_size; | ||
| 83 | } | ||
| 84 | |||
| 85 | static void etnaviv_core_dump_registers(struct core_dump_iterator *iter, | ||
| 86 | struct etnaviv_gpu *gpu) | ||
| 87 | { | ||
| 88 | struct etnaviv_dump_registers *reg = iter->data; | ||
| 89 | unsigned int i; | ||
| 90 | |||
| 91 | for (i = 0; i < ARRAY_SIZE(etnaviv_dump_registers); i++, reg++) { | ||
| 92 | reg->reg = etnaviv_dump_registers[i]; | ||
| 93 | reg->value = gpu_read(gpu, etnaviv_dump_registers[i]); | ||
| 94 | } | ||
| 95 | |||
| 96 | etnaviv_core_dump_header(iter, ETDUMP_BUF_REG, reg); | ||
| 97 | } | ||
| 98 | |||
| 99 | static void etnaviv_core_dump_mmu(struct core_dump_iterator *iter, | ||
| 100 | struct etnaviv_gpu *gpu, size_t mmu_size) | ||
| 101 | { | ||
| 102 | etnaviv_iommu_dump(gpu->mmu, iter->data); | ||
| 103 | |||
| 104 | etnaviv_core_dump_header(iter, ETDUMP_BUF_MMU, iter->data + mmu_size); | ||
| 105 | } | ||
| 106 | |||
| 107 | static void etnaviv_core_dump_mem(struct core_dump_iterator *iter, u32 type, | ||
| 108 | void *ptr, size_t size, u64 iova) | ||
| 109 | { | ||
| 110 | memcpy(iter->data, ptr, size); | ||
| 111 | |||
| 112 | iter->hdr->iova = cpu_to_le64(iova); | ||
| 113 | |||
| 114 | etnaviv_core_dump_header(iter, type, iter->data + size); | ||
| 115 | } | ||
| 116 | |||
| 117 | void etnaviv_core_dump(struct etnaviv_gpu *gpu) | ||
| 118 | { | ||
| 119 | struct core_dump_iterator iter; | ||
| 120 | struct etnaviv_vram_mapping *vram; | ||
| 121 | struct etnaviv_gem_object *obj; | ||
| 122 | struct etnaviv_cmdbuf *cmd; | ||
| 123 | unsigned int n_obj, n_bomap_pages; | ||
| 124 | size_t file_size, mmu_size; | ||
| 125 | __le64 *bomap, *bomap_start; | ||
| 126 | |||
| 127 | mmu_size = etnaviv_iommu_dump_size(gpu->mmu); | ||
| 128 | |||
| 129 | /* We always dump registers, mmu, ring and end marker */ | ||
| 130 | n_obj = 4; | ||
| 131 | n_bomap_pages = 0; | ||
| 132 | file_size = ARRAY_SIZE(etnaviv_dump_registers) * | ||
| 133 | sizeof(struct etnaviv_dump_registers) + | ||
| 134 | mmu_size + gpu->buffer->size; | ||
| 135 | |||
| 136 | /* Add in the active command buffers */ | ||
| 137 | list_for_each_entry(cmd, &gpu->active_cmd_list, node) { | ||
| 138 | file_size += cmd->size; | ||
| 139 | n_obj++; | ||
| 140 | } | ||
| 141 | |||
| 142 | /* Add in the active buffer objects */ | ||
| 143 | list_for_each_entry(vram, &gpu->mmu->mappings, mmu_node) { | ||
| 144 | if (!vram->use) | ||
| 145 | continue; | ||
| 146 | |||
| 147 | obj = vram->object; | ||
| 148 | file_size += obj->base.size; | ||
| 149 | n_bomap_pages += obj->base.size >> PAGE_SHIFT; | ||
| 150 | n_obj++; | ||
| 151 | } | ||
| 152 | |||
| 153 | /* If we have any buffer objects, add a bomap object */ | ||
| 154 | if (n_bomap_pages) { | ||
| 155 | file_size += n_bomap_pages * sizeof(__le64); | ||
| 156 | n_obj++; | ||
| 157 | } | ||
| 158 | |||
| 159 | /* Add the size of the headers */ | ||
| 160 | file_size += sizeof(*iter.hdr) * n_obj; | ||
| 161 | |||
| 162 | /* Allocate the file in vmalloc memory, it's likely to be big */ | ||
| 163 | iter.start = vmalloc(file_size); | ||
| 164 | if (!iter.start) { | ||
| 165 | dev_warn(gpu->dev, "failed to allocate devcoredump file\n"); | ||
| 166 | return; | ||
| 167 | } | ||
| 168 | |||
| 169 | /* Point the data member after the headers */ | ||
| 170 | iter.hdr = iter.start; | ||
| 171 | iter.data = &iter.hdr[n_obj]; | ||
| 172 | |||
| 173 | memset(iter.hdr, 0, iter.data - iter.start); | ||
| 174 | |||
| 175 | etnaviv_core_dump_registers(&iter, gpu); | ||
| 176 | etnaviv_core_dump_mmu(&iter, gpu, mmu_size); | ||
| 177 | etnaviv_core_dump_mem(&iter, ETDUMP_BUF_RING, gpu->buffer->vaddr, | ||
| 178 | gpu->buffer->size, gpu->buffer->paddr); | ||
| 179 | |||
| 180 | list_for_each_entry(cmd, &gpu->active_cmd_list, node) | ||
| 181 | etnaviv_core_dump_mem(&iter, ETDUMP_BUF_CMD, cmd->vaddr, | ||
| 182 | cmd->size, cmd->paddr); | ||
| 183 | |||
| 184 | /* Reserve space for the bomap */ | ||
| 185 | if (n_bomap_pages) { | ||
| 186 | bomap_start = bomap = iter.data; | ||
| 187 | memset(bomap, 0, sizeof(*bomap) * n_bomap_pages); | ||
| 188 | etnaviv_core_dump_header(&iter, ETDUMP_BUF_BOMAP, | ||
| 189 | bomap + n_bomap_pages); | ||
| 190 | } else { | ||
| 191 | /* Silence warning */ | ||
| 192 | bomap_start = bomap = NULL; | ||
| 193 | } | ||
| 194 | |||
| 195 | list_for_each_entry(vram, &gpu->mmu->mappings, mmu_node) { | ||
| 196 | struct page **pages; | ||
| 197 | void *vaddr; | ||
| 198 | |||
| 199 | if (vram->use == 0) | ||
| 200 | continue; | ||
| 201 | |||
| 202 | obj = vram->object; | ||
| 203 | |||
| 204 | pages = etnaviv_gem_get_pages(obj); | ||
| 205 | if (pages) { | ||
| 206 | int j; | ||
| 207 | |||
| 208 | iter.hdr->data[0] = bomap - bomap_start; | ||
| 209 | |||
| 210 | for (j = 0; j < obj->base.size >> PAGE_SHIFT; j++) | ||
| 211 | *bomap++ = cpu_to_le64(page_to_phys(*pages++)); | ||
| 212 | } | ||
| 213 | |||
| 214 | iter.hdr->iova = cpu_to_le64(vram->iova); | ||
| 215 | |||
| 216 | vaddr = etnaviv_gem_vaddr(&obj->base); | ||
| 217 | if (vaddr && !IS_ERR(vaddr)) | ||
| 218 | memcpy(iter.data, vaddr, obj->base.size); | ||
| 219 | |||
| 220 | etnaviv_core_dump_header(&iter, ETDUMP_BUF_BO, iter.data + | ||
| 221 | obj->base.size); | ||
| 222 | } | ||
| 223 | |||
| 224 | etnaviv_core_dump_header(&iter, ETDUMP_BUF_END, iter.data); | ||
| 225 | |||
| 226 | dev_coredumpv(gpu->dev, iter.start, iter.data - iter.start, GFP_KERNEL); | ||
| 227 | } | ||
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_dump.h b/drivers/gpu/drm/etnaviv/etnaviv_dump.h new file mode 100644 index 000000000000..97f2f8db9133 --- /dev/null +++ b/drivers/gpu/drm/etnaviv/etnaviv_dump.h | |||
| @@ -0,0 +1,54 @@ | |||
| 1 | /* | ||
| 2 | * Copyright (C) 2015 Etnaviv Project | ||
| 3 | * | ||
| 4 | * This program is free software; you can redistribute it and/or modify it | ||
| 5 | * under the terms of the GNU General Public License version 2 as published by | ||
| 6 | * the Free Software Foundation. | ||
| 7 | * | ||
| 8 | * This program is distributed in the hope that it will be useful, but WITHOUT | ||
| 9 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
| 10 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
| 11 | * more details. | ||
| 12 | * | ||
| 13 | * You should have received a copy of the GNU General Public License along with | ||
| 14 | * this program. If not, see <http://www.gnu.org/licenses/>. | ||
| 15 | * | ||
| 16 | * Etnaviv devcoredump file definitions | ||
| 17 | */ | ||
| 18 | #ifndef ETNAVIV_DUMP_H | ||
| 19 | #define ETNAVIV_DUMP_H | ||
| 20 | |||
| 21 | #include <linux/types.h> | ||
| 22 | |||
| 23 | enum { | ||
| 24 | ETDUMP_MAGIC = 0x414e5445, | ||
| 25 | ETDUMP_BUF_REG = 0, | ||
| 26 | ETDUMP_BUF_MMU, | ||
| 27 | ETDUMP_BUF_RING, | ||
| 28 | ETDUMP_BUF_CMD, | ||
| 29 | ETDUMP_BUF_BOMAP, | ||
| 30 | ETDUMP_BUF_BO, | ||
| 31 | ETDUMP_BUF_END, | ||
| 32 | }; | ||
| 33 | |||
| 34 | struct etnaviv_dump_object_header { | ||
| 35 | __le32 magic; | ||
| 36 | __le32 type; | ||
| 37 | __le32 file_offset; | ||
| 38 | __le32 file_size; | ||
| 39 | __le64 iova; | ||
| 40 | __le32 data[2]; | ||
| 41 | }; | ||
| 42 | |||
| 43 | /* Registers object, an array of these */ | ||
| 44 | struct etnaviv_dump_registers { | ||
| 45 | __le32 reg; | ||
| 46 | __le32 value; | ||
| 47 | }; | ||
| 48 | |||
| 49 | #ifdef __KERNEL__ | ||
| 50 | struct etnaviv_gpu; | ||
| 51 | void etnaviv_core_dump(struct etnaviv_gpu *gpu); | ||
| 52 | #endif | ||
| 53 | |||
| 54 | #endif | ||
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem.c b/drivers/gpu/drm/etnaviv/etnaviv_gem.c new file mode 100644 index 000000000000..8d6f859f8200 --- /dev/null +++ b/drivers/gpu/drm/etnaviv/etnaviv_gem.c | |||
| @@ -0,0 +1,897 @@ | |||
| 1 | /* | ||
| 2 | * Copyright (C) 2015 Etnaviv Project | ||
| 3 | * | ||
| 4 | * This program is free software; you can redistribute it and/or modify it | ||
| 5 | * under the terms of the GNU General Public License version 2 as published by | ||
| 6 | * the Free Software Foundation. | ||
| 7 | * | ||
| 8 | * This program is distributed in the hope that it will be useful, but WITHOUT | ||
| 9 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
| 10 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
| 11 | * more details. | ||
| 12 | * | ||
| 13 | * You should have received a copy of the GNU General Public License along with | ||
| 14 | * this program. If not, see <http://www.gnu.org/licenses/>. | ||
| 15 | */ | ||
| 16 | |||
| 17 | #include <linux/spinlock.h> | ||
| 18 | #include <linux/shmem_fs.h> | ||
| 19 | |||
| 20 | #include "etnaviv_drv.h" | ||
| 21 | #include "etnaviv_gem.h" | ||
| 22 | #include "etnaviv_gpu.h" | ||
| 23 | #include "etnaviv_mmu.h" | ||
| 24 | |||
| 25 | static void etnaviv_gem_scatter_map(struct etnaviv_gem_object *etnaviv_obj) | ||
| 26 | { | ||
| 27 | struct drm_device *dev = etnaviv_obj->base.dev; | ||
| 28 | struct sg_table *sgt = etnaviv_obj->sgt; | ||
| 29 | |||
| 30 | /* | ||
| 31 | * For non-cached buffers, ensure the new pages are clean | ||
| 32 | * because display controller, GPU, etc. are not coherent. | ||
| 33 | */ | ||
| 34 | if (etnaviv_obj->flags & ETNA_BO_CACHE_MASK) | ||
| 35 | dma_map_sg(dev->dev, sgt->sgl, sgt->nents, DMA_BIDIRECTIONAL); | ||
| 36 | } | ||
| 37 | |||
| 38 | static void etnaviv_gem_scatterlist_unmap(struct etnaviv_gem_object *etnaviv_obj) | ||
| 39 | { | ||
| 40 | struct drm_device *dev = etnaviv_obj->base.dev; | ||
| 41 | struct sg_table *sgt = etnaviv_obj->sgt; | ||
| 42 | |||
| 43 | /* | ||
| 44 | * For non-cached buffers, ensure the new pages are clean | ||
| 45 | * because display controller, GPU, etc. are not coherent: | ||
| 46 | * | ||
| 47 | * WARNING: The DMA API does not support concurrent CPU | ||
| 48 | * and device access to the memory area. With BIDIRECTIONAL, | ||
| 49 | * we will clean the cache lines which overlap the region, | ||
| 50 | * and invalidate all cache lines (partially) contained in | ||
| 51 | * the region. | ||
| 52 | * | ||
| 53 | * If you have dirty data in the overlapping cache lines, | ||
| 54 | * that will corrupt the GPU-written data. If you have | ||
| 55 | * written into the remainder of the region, this can | ||
| 56 | * discard those writes. | ||
| 57 | */ | ||
| 58 | if (etnaviv_obj->flags & ETNA_BO_CACHE_MASK) | ||
| 59 | dma_unmap_sg(dev->dev, sgt->sgl, sgt->nents, DMA_BIDIRECTIONAL); | ||
| 60 | } | ||
| 61 | |||
| 62 | /* called with etnaviv_obj->lock held */ | ||
| 63 | static int etnaviv_gem_shmem_get_pages(struct etnaviv_gem_object *etnaviv_obj) | ||
| 64 | { | ||
| 65 | struct drm_device *dev = etnaviv_obj->base.dev; | ||
| 66 | struct page **p = drm_gem_get_pages(&etnaviv_obj->base); | ||
| 67 | |||
| 68 | if (IS_ERR(p)) { | ||
| 69 | dev_err(dev->dev, "could not get pages: %ld\n", PTR_ERR(p)); | ||
| 70 | return PTR_ERR(p); | ||
| 71 | } | ||
| 72 | |||
| 73 | etnaviv_obj->pages = p; | ||
| 74 | |||
| 75 | return 0; | ||
| 76 | } | ||
| 77 | |||
| 78 | static void put_pages(struct etnaviv_gem_object *etnaviv_obj) | ||
| 79 | { | ||
| 80 | if (etnaviv_obj->sgt) { | ||
| 81 | etnaviv_gem_scatterlist_unmap(etnaviv_obj); | ||
| 82 | sg_free_table(etnaviv_obj->sgt); | ||
| 83 | kfree(etnaviv_obj->sgt); | ||
| 84 | etnaviv_obj->sgt = NULL; | ||
| 85 | } | ||
| 86 | if (etnaviv_obj->pages) { | ||
| 87 | drm_gem_put_pages(&etnaviv_obj->base, etnaviv_obj->pages, | ||
| 88 | true, false); | ||
| 89 | |||
| 90 | etnaviv_obj->pages = NULL; | ||
| 91 | } | ||
| 92 | } | ||
| 93 | |||
| 94 | struct page **etnaviv_gem_get_pages(struct etnaviv_gem_object *etnaviv_obj) | ||
| 95 | { | ||
| 96 | int ret; | ||
| 97 | |||
| 98 | lockdep_assert_held(&etnaviv_obj->lock); | ||
| 99 | |||
| 100 | if (!etnaviv_obj->pages) { | ||
| 101 | ret = etnaviv_obj->ops->get_pages(etnaviv_obj); | ||
| 102 | if (ret < 0) | ||
| 103 | return ERR_PTR(ret); | ||
| 104 | } | ||
| 105 | |||
| 106 | if (!etnaviv_obj->sgt) { | ||
| 107 | struct drm_device *dev = etnaviv_obj->base.dev; | ||
| 108 | int npages = etnaviv_obj->base.size >> PAGE_SHIFT; | ||
| 109 | struct sg_table *sgt; | ||
| 110 | |||
| 111 | sgt = drm_prime_pages_to_sg(etnaviv_obj->pages, npages); | ||
| 112 | if (IS_ERR(sgt)) { | ||
| 113 | dev_err(dev->dev, "failed to allocate sgt: %ld\n", | ||
| 114 | PTR_ERR(sgt)); | ||
| 115 | return ERR_CAST(sgt); | ||
| 116 | } | ||
| 117 | |||
| 118 | etnaviv_obj->sgt = sgt; | ||
| 119 | |||
| 120 | etnaviv_gem_scatter_map(etnaviv_obj); | ||
| 121 | } | ||
| 122 | |||
| 123 | return etnaviv_obj->pages; | ||
| 124 | } | ||
| 125 | |||
| 126 | void etnaviv_gem_put_pages(struct etnaviv_gem_object *etnaviv_obj) | ||
| 127 | { | ||
| 128 | lockdep_assert_held(&etnaviv_obj->lock); | ||
| 129 | /* when we start tracking the pin count, then do something here */ | ||
| 130 | } | ||
| 131 | |||
| 132 | static int etnaviv_gem_mmap_obj(struct drm_gem_object *obj, | ||
| 133 | struct vm_area_struct *vma) | ||
| 134 | { | ||
| 135 | struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj); | ||
| 136 | pgprot_t vm_page_prot; | ||
| 137 | |||
| 138 | vma->vm_flags &= ~VM_PFNMAP; | ||
| 139 | vma->vm_flags |= VM_MIXEDMAP; | ||
| 140 | |||
| 141 | vm_page_prot = vm_get_page_prot(vma->vm_flags); | ||
| 142 | |||
| 143 | if (etnaviv_obj->flags & ETNA_BO_WC) { | ||
| 144 | vma->vm_page_prot = pgprot_writecombine(vm_page_prot); | ||
| 145 | } else if (etnaviv_obj->flags & ETNA_BO_UNCACHED) { | ||
| 146 | vma->vm_page_prot = pgprot_noncached(vm_page_prot); | ||
| 147 | } else { | ||
| 148 | /* | ||
| 149 | * Shunt off cached objs to shmem file so they have their own | ||
| 150 | * address_space (so unmap_mapping_range does what we want, | ||
| 151 | * in particular in the case of mmap'd dmabufs) | ||
| 152 | */ | ||
| 153 | fput(vma->vm_file); | ||
| 154 | get_file(obj->filp); | ||
| 155 | vma->vm_pgoff = 0; | ||
| 156 | vma->vm_file = obj->filp; | ||
| 157 | |||
| 158 | vma->vm_page_prot = vm_page_prot; | ||
| 159 | } | ||
| 160 | |||
| 161 | return 0; | ||
| 162 | } | ||
| 163 | |||
| 164 | int etnaviv_gem_mmap(struct file *filp, struct vm_area_struct *vma) | ||
| 165 | { | ||
| 166 | struct etnaviv_gem_object *obj; | ||
| 167 | int ret; | ||
| 168 | |||
| 169 | ret = drm_gem_mmap(filp, vma); | ||
| 170 | if (ret) { | ||
| 171 | DBG("mmap failed: %d", ret); | ||
| 172 | return ret; | ||
| 173 | } | ||
| 174 | |||
| 175 | obj = to_etnaviv_bo(vma->vm_private_data); | ||
| 176 | return etnaviv_gem_mmap_obj(vma->vm_private_data, vma); | ||
| 177 | } | ||
| 178 | |||
| 179 | int etnaviv_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf) | ||
| 180 | { | ||
| 181 | struct drm_gem_object *obj = vma->vm_private_data; | ||
| 182 | struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj); | ||
| 183 | struct page **pages, *page; | ||
| 184 | pgoff_t pgoff; | ||
| 185 | int ret; | ||
| 186 | |||
| 187 | /* | ||
| 188 | * Make sure we don't parallel update on a fault, nor move or remove | ||
| 189 | * something from beneath our feet. Note that vm_insert_page() is | ||
| 190 | * specifically coded to take care of this, so we don't have to. | ||
| 191 | */ | ||
| 192 | ret = mutex_lock_interruptible(&etnaviv_obj->lock); | ||
| 193 | if (ret) | ||
| 194 | goto out; | ||
| 195 | |||
| 196 | /* make sure we have pages attached now */ | ||
| 197 | pages = etnaviv_gem_get_pages(etnaviv_obj); | ||
| 198 | mutex_unlock(&etnaviv_obj->lock); | ||
| 199 | |||
| 200 | if (IS_ERR(pages)) { | ||
| 201 | ret = PTR_ERR(pages); | ||
| 202 | goto out; | ||
| 203 | } | ||
| 204 | |||
| 205 | /* We don't use vmf->pgoff since that has the fake offset: */ | ||
| 206 | pgoff = ((unsigned long)vmf->virtual_address - | ||
| 207 | vma->vm_start) >> PAGE_SHIFT; | ||
| 208 | |||
| 209 | page = pages[pgoff]; | ||
| 210 | |||
| 211 | VERB("Inserting %p pfn %lx, pa %lx", vmf->virtual_address, | ||
| 212 | page_to_pfn(page), page_to_pfn(page) << PAGE_SHIFT); | ||
| 213 | |||
| 214 | ret = vm_insert_page(vma, (unsigned long)vmf->virtual_address, page); | ||
| 215 | |||
| 216 | out: | ||
| 217 | switch (ret) { | ||
| 218 | case -EAGAIN: | ||
| 219 | case 0: | ||
| 220 | case -ERESTARTSYS: | ||
| 221 | case -EINTR: | ||
| 222 | case -EBUSY: | ||
| 223 | /* | ||
| 224 | * EBUSY is ok: this just means that another thread | ||
| 225 | * already did the job. | ||
| 226 | */ | ||
| 227 | return VM_FAULT_NOPAGE; | ||
| 228 | case -ENOMEM: | ||
| 229 | return VM_FAULT_OOM; | ||
| 230 | default: | ||
| 231 | return VM_FAULT_SIGBUS; | ||
| 232 | } | ||
| 233 | } | ||
| 234 | |||
| 235 | int etnaviv_gem_mmap_offset(struct drm_gem_object *obj, u64 *offset) | ||
| 236 | { | ||
| 237 | int ret; | ||
| 238 | |||
| 239 | /* Make it mmapable */ | ||
| 240 | ret = drm_gem_create_mmap_offset(obj); | ||
| 241 | if (ret) | ||
| 242 | dev_err(obj->dev->dev, "could not allocate mmap offset\n"); | ||
| 243 | else | ||
| 244 | *offset = drm_vma_node_offset_addr(&obj->vma_node); | ||
| 245 | |||
| 246 | return ret; | ||
| 247 | } | ||
| 248 | |||
| 249 | static struct etnaviv_vram_mapping * | ||
| 250 | etnaviv_gem_get_vram_mapping(struct etnaviv_gem_object *obj, | ||
| 251 | struct etnaviv_iommu *mmu) | ||
| 252 | { | ||
| 253 | struct etnaviv_vram_mapping *mapping; | ||
| 254 | |||
| 255 | list_for_each_entry(mapping, &obj->vram_list, obj_node) { | ||
| 256 | if (mapping->mmu == mmu) | ||
| 257 | return mapping; | ||
| 258 | } | ||
| 259 | |||
| 260 | return NULL; | ||
| 261 | } | ||
| 262 | |||
| 263 | int etnaviv_gem_get_iova(struct etnaviv_gpu *gpu, | ||
| 264 | struct drm_gem_object *obj, u32 *iova) | ||
| 265 | { | ||
| 266 | struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj); | ||
| 267 | struct etnaviv_vram_mapping *mapping; | ||
| 268 | struct page **pages; | ||
| 269 | int ret = 0; | ||
| 270 | |||
| 271 | mutex_lock(&etnaviv_obj->lock); | ||
| 272 | mapping = etnaviv_gem_get_vram_mapping(etnaviv_obj, gpu->mmu); | ||
| 273 | if (mapping) { | ||
| 274 | /* | ||
| 275 | * Holding the object lock prevents the use count changing | ||
| 276 | * beneath us. If the use count is zero, the MMU might be | ||
| 277 | * reaping this object, so take the lock and re-check that | ||
| 278 | * the MMU owns this mapping to close this race. | ||
| 279 | */ | ||
| 280 | if (mapping->use == 0) { | ||
| 281 | mutex_lock(&gpu->mmu->lock); | ||
| 282 | if (mapping->mmu == gpu->mmu) | ||
| 283 | mapping->use += 1; | ||
| 284 | else | ||
| 285 | mapping = NULL; | ||
| 286 | mutex_unlock(&gpu->mmu->lock); | ||
| 287 | if (mapping) | ||
| 288 | goto out; | ||
| 289 | } else { | ||
| 290 | mapping->use += 1; | ||
| 291 | goto out; | ||
| 292 | } | ||
| 293 | } | ||
| 294 | |||
| 295 | pages = etnaviv_gem_get_pages(etnaviv_obj); | ||
| 296 | if (IS_ERR(pages)) { | ||
| 297 | ret = PTR_ERR(pages); | ||
| 298 | goto out; | ||
| 299 | } | ||
| 300 | |||
| 301 | /* | ||
| 302 | * See if we have a reaped vram mapping we can re-use before | ||
| 303 | * allocating a fresh mapping. | ||
| 304 | */ | ||
| 305 | mapping = etnaviv_gem_get_vram_mapping(etnaviv_obj, NULL); | ||
| 306 | if (!mapping) { | ||
| 307 | mapping = kzalloc(sizeof(*mapping), GFP_KERNEL); | ||
| 308 | if (!mapping) | ||
| 309 | return -ENOMEM; | ||
| 310 | |||
| 311 | INIT_LIST_HEAD(&mapping->scan_node); | ||
| 312 | mapping->object = etnaviv_obj; | ||
| 313 | } else { | ||
| 314 | list_del(&mapping->obj_node); | ||
| 315 | } | ||
| 316 | |||
| 317 | mapping->mmu = gpu->mmu; | ||
| 318 | mapping->use = 1; | ||
| 319 | |||
| 320 | ret = etnaviv_iommu_map_gem(gpu->mmu, etnaviv_obj, gpu->memory_base, | ||
| 321 | mapping); | ||
| 322 | if (ret < 0) | ||
| 323 | kfree(mapping); | ||
| 324 | else | ||
| 325 | list_add_tail(&mapping->obj_node, &etnaviv_obj->vram_list); | ||
| 326 | |||
| 327 | out: | ||
| 328 | mutex_unlock(&etnaviv_obj->lock); | ||
| 329 | |||
| 330 | if (!ret) { | ||
| 331 | /* Take a reference on the object */ | ||
| 332 | drm_gem_object_reference(obj); | ||
| 333 | *iova = mapping->iova; | ||
| 334 | } | ||
| 335 | |||
| 336 | return ret; | ||
| 337 | } | ||
| 338 | |||
| 339 | void etnaviv_gem_put_iova(struct etnaviv_gpu *gpu, struct drm_gem_object *obj) | ||
| 340 | { | ||
| 341 | struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj); | ||
| 342 | struct etnaviv_vram_mapping *mapping; | ||
| 343 | |||
| 344 | mutex_lock(&etnaviv_obj->lock); | ||
| 345 | mapping = etnaviv_gem_get_vram_mapping(etnaviv_obj, gpu->mmu); | ||
| 346 | |||
| 347 | WARN_ON(mapping->use == 0); | ||
| 348 | mapping->use -= 1; | ||
| 349 | mutex_unlock(&etnaviv_obj->lock); | ||
| 350 | |||
| 351 | drm_gem_object_unreference_unlocked(obj); | ||
| 352 | } | ||
| 353 | |||
| 354 | void *etnaviv_gem_vaddr(struct drm_gem_object *obj) | ||
| 355 | { | ||
| 356 | struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj); | ||
| 357 | |||
| 358 | mutex_lock(&etnaviv_obj->lock); | ||
| 359 | if (!etnaviv_obj->vaddr) { | ||
| 360 | struct page **pages = etnaviv_gem_get_pages(etnaviv_obj); | ||
| 361 | |||
| 362 | if (IS_ERR(pages)) | ||
| 363 | return ERR_CAST(pages); | ||
| 364 | |||
| 365 | etnaviv_obj->vaddr = vmap(pages, obj->size >> PAGE_SHIFT, | ||
| 366 | VM_MAP, pgprot_writecombine(PAGE_KERNEL)); | ||
| 367 | } | ||
| 368 | mutex_unlock(&etnaviv_obj->lock); | ||
| 369 | |||
| 370 | return etnaviv_obj->vaddr; | ||
| 371 | } | ||
| 372 | |||
| 373 | static inline enum dma_data_direction etnaviv_op_to_dma_dir(u32 op) | ||
| 374 | { | ||
| 375 | if (op & ETNA_PREP_READ) | ||
| 376 | return DMA_FROM_DEVICE; | ||
| 377 | else if (op & ETNA_PREP_WRITE) | ||
| 378 | return DMA_TO_DEVICE; | ||
| 379 | else | ||
| 380 | return DMA_BIDIRECTIONAL; | ||
| 381 | } | ||
| 382 | |||
| 383 | int etnaviv_gem_cpu_prep(struct drm_gem_object *obj, u32 op, | ||
| 384 | struct timespec *timeout) | ||
| 385 | { | ||
| 386 | struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj); | ||
| 387 | struct drm_device *dev = obj->dev; | ||
| 388 | bool write = !!(op & ETNA_PREP_WRITE); | ||
| 389 | int ret; | ||
| 390 | |||
| 391 | if (op & ETNA_PREP_NOSYNC) { | ||
| 392 | if (!reservation_object_test_signaled_rcu(etnaviv_obj->resv, | ||
| 393 | write)) | ||
| 394 | return -EBUSY; | ||
| 395 | } else { | ||
| 396 | unsigned long remain = etnaviv_timeout_to_jiffies(timeout); | ||
| 397 | |||
| 398 | ret = reservation_object_wait_timeout_rcu(etnaviv_obj->resv, | ||
| 399 | write, true, remain); | ||
| 400 | if (ret <= 0) | ||
| 401 | return ret == 0 ? -ETIMEDOUT : ret; | ||
| 402 | } | ||
| 403 | |||
| 404 | if (etnaviv_obj->flags & ETNA_BO_CACHED) { | ||
| 405 | if (!etnaviv_obj->sgt) { | ||
| 406 | void *ret; | ||
| 407 | |||
| 408 | mutex_lock(&etnaviv_obj->lock); | ||
| 409 | ret = etnaviv_gem_get_pages(etnaviv_obj); | ||
| 410 | mutex_unlock(&etnaviv_obj->lock); | ||
| 411 | if (IS_ERR(ret)) | ||
| 412 | return PTR_ERR(ret); | ||
| 413 | } | ||
| 414 | |||
| 415 | dma_sync_sg_for_cpu(dev->dev, etnaviv_obj->sgt->sgl, | ||
| 416 | etnaviv_obj->sgt->nents, | ||
| 417 | etnaviv_op_to_dma_dir(op)); | ||
| 418 | etnaviv_obj->last_cpu_prep_op = op; | ||
| 419 | } | ||
| 420 | |||
| 421 | return 0; | ||
| 422 | } | ||
| 423 | |||
| 424 | int etnaviv_gem_cpu_fini(struct drm_gem_object *obj) | ||
| 425 | { | ||
| 426 | struct drm_device *dev = obj->dev; | ||
| 427 | struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj); | ||
| 428 | |||
| 429 | if (etnaviv_obj->flags & ETNA_BO_CACHED) { | ||
| 430 | /* fini without a prep is almost certainly a userspace error */ | ||
| 431 | WARN_ON(etnaviv_obj->last_cpu_prep_op == 0); | ||
| 432 | dma_sync_sg_for_device(dev->dev, etnaviv_obj->sgt->sgl, | ||
| 433 | etnaviv_obj->sgt->nents, | ||
| 434 | etnaviv_op_to_dma_dir(etnaviv_obj->last_cpu_prep_op)); | ||
| 435 | etnaviv_obj->last_cpu_prep_op = 0; | ||
| 436 | } | ||
| 437 | |||
| 438 | return 0; | ||
| 439 | } | ||
| 440 | |||
| 441 | int etnaviv_gem_wait_bo(struct etnaviv_gpu *gpu, struct drm_gem_object *obj, | ||
| 442 | struct timespec *timeout) | ||
| 443 | { | ||
| 444 | struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj); | ||
| 445 | |||
| 446 | return etnaviv_gpu_wait_obj_inactive(gpu, etnaviv_obj, timeout); | ||
| 447 | } | ||
| 448 | |||
| 449 | #ifdef CONFIG_DEBUG_FS | ||
| 450 | static void etnaviv_gem_describe_fence(struct fence *fence, | ||
| 451 | const char *type, struct seq_file *m) | ||
| 452 | { | ||
| 453 | if (!test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->flags)) | ||
| 454 | seq_printf(m, "\t%9s: %s %s seq %u\n", | ||
| 455 | type, | ||
| 456 | fence->ops->get_driver_name(fence), | ||
| 457 | fence->ops->get_timeline_name(fence), | ||
| 458 | fence->seqno); | ||
| 459 | } | ||
| 460 | |||
| 461 | static void etnaviv_gem_describe(struct drm_gem_object *obj, struct seq_file *m) | ||
| 462 | { | ||
| 463 | struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj); | ||
| 464 | struct reservation_object *robj = etnaviv_obj->resv; | ||
| 465 | struct reservation_object_list *fobj; | ||
| 466 | struct fence *fence; | ||
| 467 | unsigned long off = drm_vma_node_start(&obj->vma_node); | ||
| 468 | |||
| 469 | seq_printf(m, "%08x: %c %2d (%2d) %08lx %p %zd\n", | ||
| 470 | etnaviv_obj->flags, is_active(etnaviv_obj) ? 'A' : 'I', | ||
| 471 | obj->name, obj->refcount.refcount.counter, | ||
| 472 | off, etnaviv_obj->vaddr, obj->size); | ||
| 473 | |||
| 474 | rcu_read_lock(); | ||
| 475 | fobj = rcu_dereference(robj->fence); | ||
| 476 | if (fobj) { | ||
| 477 | unsigned int i, shared_count = fobj->shared_count; | ||
| 478 | |||
| 479 | for (i = 0; i < shared_count; i++) { | ||
| 480 | fence = rcu_dereference(fobj->shared[i]); | ||
| 481 | etnaviv_gem_describe_fence(fence, "Shared", m); | ||
| 482 | } | ||
| 483 | } | ||
| 484 | |||
| 485 | fence = rcu_dereference(robj->fence_excl); | ||
| 486 | if (fence) | ||
| 487 | etnaviv_gem_describe_fence(fence, "Exclusive", m); | ||
| 488 | rcu_read_unlock(); | ||
| 489 | } | ||
| 490 | |||
| 491 | void etnaviv_gem_describe_objects(struct etnaviv_drm_private *priv, | ||
| 492 | struct seq_file *m) | ||
| 493 | { | ||
| 494 | struct etnaviv_gem_object *etnaviv_obj; | ||
| 495 | int count = 0; | ||
| 496 | size_t size = 0; | ||
| 497 | |||
| 498 | mutex_lock(&priv->gem_lock); | ||
| 499 | list_for_each_entry(etnaviv_obj, &priv->gem_list, gem_node) { | ||
| 500 | struct drm_gem_object *obj = &etnaviv_obj->base; | ||
| 501 | |||
| 502 | seq_puts(m, " "); | ||
| 503 | etnaviv_gem_describe(obj, m); | ||
| 504 | count++; | ||
| 505 | size += obj->size; | ||
| 506 | } | ||
| 507 | mutex_unlock(&priv->gem_lock); | ||
| 508 | |||
| 509 | seq_printf(m, "Total %d objects, %zu bytes\n", count, size); | ||
| 510 | } | ||
| 511 | #endif | ||
| 512 | |||
| 513 | static void etnaviv_gem_shmem_release(struct etnaviv_gem_object *etnaviv_obj) | ||
| 514 | { | ||
| 515 | if (etnaviv_obj->vaddr) | ||
| 516 | vunmap(etnaviv_obj->vaddr); | ||
| 517 | put_pages(etnaviv_obj); | ||
| 518 | } | ||
| 519 | |||
| 520 | static const struct etnaviv_gem_ops etnaviv_gem_shmem_ops = { | ||
| 521 | .get_pages = etnaviv_gem_shmem_get_pages, | ||
| 522 | .release = etnaviv_gem_shmem_release, | ||
| 523 | }; | ||
| 524 | |||
| 525 | void etnaviv_gem_free_object(struct drm_gem_object *obj) | ||
| 526 | { | ||
| 527 | struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj); | ||
| 528 | struct etnaviv_vram_mapping *mapping, *tmp; | ||
| 529 | |||
| 530 | /* object should not be active */ | ||
| 531 | WARN_ON(is_active(etnaviv_obj)); | ||
| 532 | |||
| 533 | list_del(&etnaviv_obj->gem_node); | ||
| 534 | |||
| 535 | list_for_each_entry_safe(mapping, tmp, &etnaviv_obj->vram_list, | ||
| 536 | obj_node) { | ||
| 537 | struct etnaviv_iommu *mmu = mapping->mmu; | ||
| 538 | |||
| 539 | WARN_ON(mapping->use); | ||
| 540 | |||
| 541 | if (mmu) | ||
| 542 | etnaviv_iommu_unmap_gem(mmu, mapping); | ||
| 543 | |||
| 544 | list_del(&mapping->obj_node); | ||
| 545 | kfree(mapping); | ||
| 546 | } | ||
| 547 | |||
| 548 | drm_gem_free_mmap_offset(obj); | ||
| 549 | etnaviv_obj->ops->release(etnaviv_obj); | ||
| 550 | if (etnaviv_obj->resv == &etnaviv_obj->_resv) | ||
| 551 | reservation_object_fini(&etnaviv_obj->_resv); | ||
| 552 | drm_gem_object_release(obj); | ||
| 553 | |||
| 554 | kfree(etnaviv_obj); | ||
| 555 | } | ||
| 556 | |||
| 557 | int etnaviv_gem_obj_add(struct drm_device *dev, struct drm_gem_object *obj) | ||
| 558 | { | ||
| 559 | struct etnaviv_drm_private *priv = dev->dev_private; | ||
| 560 | struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj); | ||
| 561 | |||
| 562 | mutex_lock(&priv->gem_lock); | ||
| 563 | list_add_tail(&etnaviv_obj->gem_node, &priv->gem_list); | ||
| 564 | mutex_unlock(&priv->gem_lock); | ||
| 565 | |||
| 566 | return 0; | ||
| 567 | } | ||
| 568 | |||
| 569 | static int etnaviv_gem_new_impl(struct drm_device *dev, u32 size, u32 flags, | ||
| 570 | struct reservation_object *robj, const struct etnaviv_gem_ops *ops, | ||
| 571 | struct drm_gem_object **obj) | ||
| 572 | { | ||
| 573 | struct etnaviv_gem_object *etnaviv_obj; | ||
| 574 | unsigned sz = sizeof(*etnaviv_obj); | ||
| 575 | bool valid = true; | ||
| 576 | |||
| 577 | /* validate flags */ | ||
| 578 | switch (flags & ETNA_BO_CACHE_MASK) { | ||
| 579 | case ETNA_BO_UNCACHED: | ||
| 580 | case ETNA_BO_CACHED: | ||
| 581 | case ETNA_BO_WC: | ||
| 582 | break; | ||
| 583 | default: | ||
| 584 | valid = false; | ||
| 585 | } | ||
| 586 | |||
| 587 | if (!valid) { | ||
| 588 | dev_err(dev->dev, "invalid cache flag: %x\n", | ||
| 589 | (flags & ETNA_BO_CACHE_MASK)); | ||
| 590 | return -EINVAL; | ||
| 591 | } | ||
| 592 | |||
| 593 | etnaviv_obj = kzalloc(sz, GFP_KERNEL); | ||
| 594 | if (!etnaviv_obj) | ||
| 595 | return -ENOMEM; | ||
| 596 | |||
| 597 | etnaviv_obj->flags = flags; | ||
| 598 | etnaviv_obj->ops = ops; | ||
| 599 | if (robj) { | ||
| 600 | etnaviv_obj->resv = robj; | ||
| 601 | } else { | ||
| 602 | etnaviv_obj->resv = &etnaviv_obj->_resv; | ||
| 603 | reservation_object_init(&etnaviv_obj->_resv); | ||
| 604 | } | ||
| 605 | |||
| 606 | mutex_init(&etnaviv_obj->lock); | ||
| 607 | INIT_LIST_HEAD(&etnaviv_obj->vram_list); | ||
| 608 | |||
| 609 | *obj = &etnaviv_obj->base; | ||
| 610 | |||
| 611 | return 0; | ||
| 612 | } | ||
| 613 | |||
| 614 | static struct drm_gem_object *__etnaviv_gem_new(struct drm_device *dev, | ||
| 615 | u32 size, u32 flags) | ||
| 616 | { | ||
| 617 | struct drm_gem_object *obj = NULL; | ||
| 618 | int ret; | ||
| 619 | |||
| 620 | size = PAGE_ALIGN(size); | ||
| 621 | |||
| 622 | ret = etnaviv_gem_new_impl(dev, size, flags, NULL, | ||
| 623 | &etnaviv_gem_shmem_ops, &obj); | ||
| 624 | if (ret) | ||
| 625 | goto fail; | ||
| 626 | |||
| 627 | ret = drm_gem_object_init(dev, obj, size); | ||
| 628 | if (ret == 0) { | ||
| 629 | struct address_space *mapping; | ||
| 630 | |||
| 631 | /* | ||
| 632 | * Our buffers are kept pinned, so allocating them | ||
| 633 | * from the MOVABLE zone is a really bad idea, and | ||
| 634 | * conflicts with CMA. See coments above new_inode() | ||
| 635 | * why this is required _and_ expected if you're | ||
| 636 | * going to pin these pages. | ||
| 637 | */ | ||
| 638 | mapping = file_inode(obj->filp)->i_mapping; | ||
| 639 | mapping_set_gfp_mask(mapping, GFP_HIGHUSER); | ||
| 640 | } | ||
| 641 | |||
| 642 | if (ret) | ||
| 643 | goto fail; | ||
| 644 | |||
| 645 | return obj; | ||
| 646 | |||
| 647 | fail: | ||
| 648 | if (obj) | ||
| 649 | drm_gem_object_unreference_unlocked(obj); | ||
| 650 | |||
| 651 | return ERR_PTR(ret); | ||
| 652 | } | ||
| 653 | |||
| 654 | /* convenience method to construct a GEM buffer object, and userspace handle */ | ||
| 655 | int etnaviv_gem_new_handle(struct drm_device *dev, struct drm_file *file, | ||
| 656 | u32 size, u32 flags, u32 *handle) | ||
| 657 | { | ||
| 658 | struct drm_gem_object *obj; | ||
| 659 | int ret; | ||
| 660 | |||
| 661 | obj = __etnaviv_gem_new(dev, size, flags); | ||
| 662 | if (IS_ERR(obj)) | ||
| 663 | return PTR_ERR(obj); | ||
| 664 | |||
| 665 | ret = etnaviv_gem_obj_add(dev, obj); | ||
| 666 | if (ret < 0) { | ||
| 667 | drm_gem_object_unreference_unlocked(obj); | ||
| 668 | return ret; | ||
| 669 | } | ||
| 670 | |||
| 671 | ret = drm_gem_handle_create(file, obj, handle); | ||
| 672 | |||
| 673 | /* drop reference from allocate - handle holds it now */ | ||
| 674 | drm_gem_object_unreference_unlocked(obj); | ||
| 675 | |||
| 676 | return ret; | ||
| 677 | } | ||
| 678 | |||
| 679 | struct drm_gem_object *etnaviv_gem_new(struct drm_device *dev, | ||
| 680 | u32 size, u32 flags) | ||
| 681 | { | ||
| 682 | struct drm_gem_object *obj; | ||
| 683 | int ret; | ||
| 684 | |||
| 685 | obj = __etnaviv_gem_new(dev, size, flags); | ||
| 686 | if (IS_ERR(obj)) | ||
| 687 | return obj; | ||
| 688 | |||
| 689 | ret = etnaviv_gem_obj_add(dev, obj); | ||
| 690 | if (ret < 0) { | ||
| 691 | drm_gem_object_unreference_unlocked(obj); | ||
| 692 | return ERR_PTR(ret); | ||
| 693 | } | ||
| 694 | |||
| 695 | return obj; | ||
| 696 | } | ||
| 697 | |||
| 698 | int etnaviv_gem_new_private(struct drm_device *dev, size_t size, u32 flags, | ||
| 699 | struct reservation_object *robj, const struct etnaviv_gem_ops *ops, | ||
| 700 | struct etnaviv_gem_object **res) | ||
| 701 | { | ||
| 702 | struct drm_gem_object *obj; | ||
| 703 | int ret; | ||
| 704 | |||
| 705 | ret = etnaviv_gem_new_impl(dev, size, flags, robj, ops, &obj); | ||
| 706 | if (ret) | ||
| 707 | return ret; | ||
| 708 | |||
| 709 | drm_gem_private_object_init(dev, obj, size); | ||
| 710 | |||
| 711 | *res = to_etnaviv_bo(obj); | ||
| 712 | |||
| 713 | return 0; | ||
| 714 | } | ||
| 715 | |||
| 716 | struct get_pages_work { | ||
| 717 | struct work_struct work; | ||
| 718 | struct mm_struct *mm; | ||
| 719 | struct task_struct *task; | ||
| 720 | struct etnaviv_gem_object *etnaviv_obj; | ||
| 721 | }; | ||
| 722 | |||
| 723 | static struct page **etnaviv_gem_userptr_do_get_pages( | ||
| 724 | struct etnaviv_gem_object *etnaviv_obj, struct mm_struct *mm, struct task_struct *task) | ||
| 725 | { | ||
| 726 | int ret = 0, pinned, npages = etnaviv_obj->base.size >> PAGE_SHIFT; | ||
| 727 | struct page **pvec; | ||
| 728 | uintptr_t ptr; | ||
| 729 | |||
| 730 | pvec = drm_malloc_ab(npages, sizeof(struct page *)); | ||
| 731 | if (!pvec) | ||
| 732 | return ERR_PTR(-ENOMEM); | ||
| 733 | |||
| 734 | pinned = 0; | ||
| 735 | ptr = etnaviv_obj->userptr.ptr; | ||
| 736 | |||
| 737 | down_read(&mm->mmap_sem); | ||
| 738 | while (pinned < npages) { | ||
| 739 | ret = get_user_pages(task, mm, ptr, npages - pinned, | ||
| 740 | !etnaviv_obj->userptr.ro, 0, | ||
| 741 | pvec + pinned, NULL); | ||
| 742 | if (ret < 0) | ||
| 743 | break; | ||
| 744 | |||
| 745 | ptr += ret * PAGE_SIZE; | ||
| 746 | pinned += ret; | ||
| 747 | } | ||
| 748 | up_read(&mm->mmap_sem); | ||
| 749 | |||
| 750 | if (ret < 0) { | ||
| 751 | release_pages(pvec, pinned, 0); | ||
| 752 | drm_free_large(pvec); | ||
| 753 | return ERR_PTR(ret); | ||
| 754 | } | ||
| 755 | |||
| 756 | return pvec; | ||
| 757 | } | ||
| 758 | |||
| 759 | static void __etnaviv_gem_userptr_get_pages(struct work_struct *_work) | ||
| 760 | { | ||
| 761 | struct get_pages_work *work = container_of(_work, typeof(*work), work); | ||
| 762 | struct etnaviv_gem_object *etnaviv_obj = work->etnaviv_obj; | ||
| 763 | struct page **pvec; | ||
| 764 | |||
| 765 | pvec = etnaviv_gem_userptr_do_get_pages(etnaviv_obj, work->mm, work->task); | ||
| 766 | |||
| 767 | mutex_lock(&etnaviv_obj->lock); | ||
| 768 | if (IS_ERR(pvec)) { | ||
| 769 | etnaviv_obj->userptr.work = ERR_CAST(pvec); | ||
| 770 | } else { | ||
| 771 | etnaviv_obj->userptr.work = NULL; | ||
| 772 | etnaviv_obj->pages = pvec; | ||
| 773 | } | ||
| 774 | |||
| 775 | mutex_unlock(&etnaviv_obj->lock); | ||
| 776 | drm_gem_object_unreference_unlocked(&etnaviv_obj->base); | ||
| 777 | |||
| 778 | mmput(work->mm); | ||
| 779 | put_task_struct(work->task); | ||
| 780 | kfree(work); | ||
| 781 | } | ||
| 782 | |||
| 783 | static int etnaviv_gem_userptr_get_pages(struct etnaviv_gem_object *etnaviv_obj) | ||
| 784 | { | ||
| 785 | struct page **pvec = NULL; | ||
| 786 | struct get_pages_work *work; | ||
| 787 | struct mm_struct *mm; | ||
| 788 | int ret, pinned, npages = etnaviv_obj->base.size >> PAGE_SHIFT; | ||
| 789 | |||
| 790 | if (etnaviv_obj->userptr.work) { | ||
| 791 | if (IS_ERR(etnaviv_obj->userptr.work)) { | ||
| 792 | ret = PTR_ERR(etnaviv_obj->userptr.work); | ||
| 793 | etnaviv_obj->userptr.work = NULL; | ||
| 794 | } else { | ||
| 795 | ret = -EAGAIN; | ||
| 796 | } | ||
| 797 | return ret; | ||
| 798 | } | ||
| 799 | |||
| 800 | mm = get_task_mm(etnaviv_obj->userptr.task); | ||
| 801 | pinned = 0; | ||
| 802 | if (mm == current->mm) { | ||
| 803 | pvec = drm_malloc_ab(npages, sizeof(struct page *)); | ||
| 804 | if (!pvec) { | ||
| 805 | mmput(mm); | ||
| 806 | return -ENOMEM; | ||
| 807 | } | ||
| 808 | |||
| 809 | pinned = __get_user_pages_fast(etnaviv_obj->userptr.ptr, npages, | ||
| 810 | !etnaviv_obj->userptr.ro, pvec); | ||
| 811 | if (pinned < 0) { | ||
| 812 | drm_free_large(pvec); | ||
| 813 | mmput(mm); | ||
| 814 | return pinned; | ||
| 815 | } | ||
| 816 | |||
| 817 | if (pinned == npages) { | ||
| 818 | etnaviv_obj->pages = pvec; | ||
| 819 | mmput(mm); | ||
| 820 | return 0; | ||
| 821 | } | ||
| 822 | } | ||
| 823 | |||
| 824 | release_pages(pvec, pinned, 0); | ||
| 825 | drm_free_large(pvec); | ||
| 826 | |||
| 827 | work = kmalloc(sizeof(*work), GFP_KERNEL); | ||
| 828 | if (!work) { | ||
| 829 | mmput(mm); | ||
| 830 | return -ENOMEM; | ||
| 831 | } | ||
| 832 | |||
| 833 | get_task_struct(current); | ||
| 834 | drm_gem_object_reference(&etnaviv_obj->base); | ||
| 835 | |||
| 836 | work->mm = mm; | ||
| 837 | work->task = current; | ||
| 838 | work->etnaviv_obj = etnaviv_obj; | ||
| 839 | |||
| 840 | etnaviv_obj->userptr.work = &work->work; | ||
| 841 | INIT_WORK(&work->work, __etnaviv_gem_userptr_get_pages); | ||
| 842 | |||
| 843 | etnaviv_queue_work(etnaviv_obj->base.dev, &work->work); | ||
| 844 | |||
| 845 | return -EAGAIN; | ||
| 846 | } | ||
| 847 | |||
| 848 | static void etnaviv_gem_userptr_release(struct etnaviv_gem_object *etnaviv_obj) | ||
| 849 | { | ||
| 850 | if (etnaviv_obj->sgt) { | ||
| 851 | etnaviv_gem_scatterlist_unmap(etnaviv_obj); | ||
| 852 | sg_free_table(etnaviv_obj->sgt); | ||
| 853 | kfree(etnaviv_obj->sgt); | ||
| 854 | } | ||
| 855 | if (etnaviv_obj->pages) { | ||
| 856 | int npages = etnaviv_obj->base.size >> PAGE_SHIFT; | ||
| 857 | |||
| 858 | release_pages(etnaviv_obj->pages, npages, 0); | ||
| 859 | drm_free_large(etnaviv_obj->pages); | ||
| 860 | } | ||
| 861 | put_task_struct(etnaviv_obj->userptr.task); | ||
| 862 | } | ||
| 863 | |||
| 864 | static const struct etnaviv_gem_ops etnaviv_gem_userptr_ops = { | ||
| 865 | .get_pages = etnaviv_gem_userptr_get_pages, | ||
| 866 | .release = etnaviv_gem_userptr_release, | ||
| 867 | }; | ||
| 868 | |||
| 869 | int etnaviv_gem_new_userptr(struct drm_device *dev, struct drm_file *file, | ||
| 870 | uintptr_t ptr, u32 size, u32 flags, u32 *handle) | ||
| 871 | { | ||
| 872 | struct etnaviv_gem_object *etnaviv_obj; | ||
| 873 | int ret; | ||
| 874 | |||
| 875 | ret = etnaviv_gem_new_private(dev, size, ETNA_BO_CACHED, NULL, | ||
| 876 | &etnaviv_gem_userptr_ops, &etnaviv_obj); | ||
| 877 | if (ret) | ||
| 878 | return ret; | ||
| 879 | |||
| 880 | etnaviv_obj->userptr.ptr = ptr; | ||
| 881 | etnaviv_obj->userptr.task = current; | ||
| 882 | etnaviv_obj->userptr.ro = !(flags & ETNA_USERPTR_WRITE); | ||
| 883 | get_task_struct(current); | ||
| 884 | |||
| 885 | ret = etnaviv_gem_obj_add(dev, &etnaviv_obj->base); | ||
| 886 | if (ret) { | ||
| 887 | drm_gem_object_unreference_unlocked(&etnaviv_obj->base); | ||
| 888 | return ret; | ||
| 889 | } | ||
| 890 | |||
| 891 | ret = drm_gem_handle_create(file, &etnaviv_obj->base, handle); | ||
| 892 | |||
| 893 | /* drop reference from allocate - handle holds it now */ | ||
| 894 | drm_gem_object_unreference_unlocked(&etnaviv_obj->base); | ||
| 895 | |||
| 896 | return ret; | ||
| 897 | } | ||
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem.h b/drivers/gpu/drm/etnaviv/etnaviv_gem.h new file mode 100644 index 000000000000..a300b4b3d545 --- /dev/null +++ b/drivers/gpu/drm/etnaviv/etnaviv_gem.h | |||
| @@ -0,0 +1,117 @@ | |||
| 1 | /* | ||
| 2 | * Copyright (C) 2015 Etnaviv Project | ||
| 3 | * | ||
| 4 | * This program is free software; you can redistribute it and/or modify it | ||
| 5 | * under the terms of the GNU General Public License version 2 as published by | ||
| 6 | * the Free Software Foundation. | ||
| 7 | * | ||
| 8 | * This program is distributed in the hope that it will be useful, but WITHOUT | ||
| 9 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
| 10 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
| 11 | * more details. | ||
| 12 | * | ||
| 13 | * You should have received a copy of the GNU General Public License along with | ||
| 14 | * this program. If not, see <http://www.gnu.org/licenses/>. | ||
| 15 | */ | ||
| 16 | |||
| 17 | #ifndef __ETNAVIV_GEM_H__ | ||
| 18 | #define __ETNAVIV_GEM_H__ | ||
| 19 | |||
| 20 | #include <linux/reservation.h> | ||
| 21 | #include "etnaviv_drv.h" | ||
| 22 | |||
| 23 | struct etnaviv_gem_ops; | ||
| 24 | struct etnaviv_gem_object; | ||
| 25 | |||
| 26 | struct etnaviv_gem_userptr { | ||
| 27 | uintptr_t ptr; | ||
| 28 | struct task_struct *task; | ||
| 29 | struct work_struct *work; | ||
| 30 | bool ro; | ||
| 31 | }; | ||
| 32 | |||
| 33 | struct etnaviv_vram_mapping { | ||
| 34 | struct list_head obj_node; | ||
| 35 | struct list_head scan_node; | ||
| 36 | struct list_head mmu_node; | ||
| 37 | struct etnaviv_gem_object *object; | ||
| 38 | struct etnaviv_iommu *mmu; | ||
| 39 | struct drm_mm_node vram_node; | ||
| 40 | unsigned int use; | ||
| 41 | u32 iova; | ||
| 42 | }; | ||
| 43 | |||
| 44 | struct etnaviv_gem_object { | ||
| 45 | struct drm_gem_object base; | ||
| 46 | const struct etnaviv_gem_ops *ops; | ||
| 47 | struct mutex lock; | ||
| 48 | |||
| 49 | u32 flags; | ||
| 50 | |||
| 51 | struct list_head gem_node; | ||
| 52 | struct etnaviv_gpu *gpu; /* non-null if active */ | ||
| 53 | atomic_t gpu_active; | ||
| 54 | u32 access; | ||
| 55 | |||
| 56 | struct page **pages; | ||
| 57 | struct sg_table *sgt; | ||
| 58 | void *vaddr; | ||
| 59 | |||
| 60 | /* normally (resv == &_resv) except for imported bo's */ | ||
| 61 | struct reservation_object *resv; | ||
| 62 | struct reservation_object _resv; | ||
| 63 | |||
| 64 | struct list_head vram_list; | ||
| 65 | |||
| 66 | /* cache maintenance */ | ||
| 67 | u32 last_cpu_prep_op; | ||
| 68 | |||
| 69 | struct etnaviv_gem_userptr userptr; | ||
| 70 | }; | ||
| 71 | |||
| 72 | static inline | ||
| 73 | struct etnaviv_gem_object *to_etnaviv_bo(struct drm_gem_object *obj) | ||
| 74 | { | ||
| 75 | return container_of(obj, struct etnaviv_gem_object, base); | ||
| 76 | } | ||
| 77 | |||
| 78 | struct etnaviv_gem_ops { | ||
| 79 | int (*get_pages)(struct etnaviv_gem_object *); | ||
| 80 | void (*release)(struct etnaviv_gem_object *); | ||
| 81 | }; | ||
| 82 | |||
| 83 | static inline bool is_active(struct etnaviv_gem_object *etnaviv_obj) | ||
| 84 | { | ||
| 85 | return atomic_read(&etnaviv_obj->gpu_active) != 0; | ||
| 86 | } | ||
| 87 | |||
| 88 | #define MAX_CMDS 4 | ||
| 89 | |||
| 90 | /* Created per submit-ioctl, to track bo's and cmdstream bufs, etc, | ||
| 91 | * associated with the cmdstream submission for synchronization (and | ||
| 92 | * make it easier to unwind when things go wrong, etc). This only | ||
| 93 | * lasts for the duration of the submit-ioctl. | ||
| 94 | */ | ||
| 95 | struct etnaviv_gem_submit { | ||
| 96 | struct drm_device *dev; | ||
| 97 | struct etnaviv_gpu *gpu; | ||
| 98 | struct ww_acquire_ctx ticket; | ||
| 99 | u32 fence; | ||
| 100 | unsigned int nr_bos; | ||
| 101 | struct { | ||
| 102 | u32 flags; | ||
| 103 | struct etnaviv_gem_object *obj; | ||
| 104 | u32 iova; | ||
| 105 | } bos[0]; | ||
| 106 | }; | ||
| 107 | |||
| 108 | int etnaviv_gem_wait_bo(struct etnaviv_gpu *gpu, struct drm_gem_object *obj, | ||
| 109 | struct timespec *timeout); | ||
| 110 | int etnaviv_gem_new_private(struct drm_device *dev, size_t size, u32 flags, | ||
| 111 | struct reservation_object *robj, const struct etnaviv_gem_ops *ops, | ||
| 112 | struct etnaviv_gem_object **res); | ||
| 113 | int etnaviv_gem_obj_add(struct drm_device *dev, struct drm_gem_object *obj); | ||
| 114 | struct page **etnaviv_gem_get_pages(struct etnaviv_gem_object *obj); | ||
| 115 | void etnaviv_gem_put_pages(struct etnaviv_gem_object *obj); | ||
| 116 | |||
| 117 | #endif /* __ETNAVIV_GEM_H__ */ | ||
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c b/drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c new file mode 100644 index 000000000000..e94db4f95770 --- /dev/null +++ b/drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c | |||
| @@ -0,0 +1,122 @@ | |||
| 1 | /* | ||
| 2 | * Copyright (C) 2013 Red Hat | ||
| 3 | * Author: Rob Clark <robdclark@gmail.com> | ||
| 4 | * | ||
| 5 | * This program is free software; you can redistribute it and/or modify it | ||
| 6 | * under the terms of the GNU General Public License version 2 as published by | ||
| 7 | * the Free Software Foundation. | ||
| 8 | * | ||
| 9 | * This program is distributed in the hope that it will be useful, but WITHOUT | ||
| 10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
| 11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
| 12 | * more details. | ||
| 13 | * | ||
| 14 | * You should have received a copy of the GNU General Public License along with | ||
| 15 | * this program. If not, see <http://www.gnu.org/licenses/>. | ||
| 16 | */ | ||
| 17 | |||
| 18 | #include <linux/dma-buf.h> | ||
| 19 | #include "etnaviv_drv.h" | ||
| 20 | #include "etnaviv_gem.h" | ||
| 21 | |||
| 22 | |||
| 23 | struct sg_table *etnaviv_gem_prime_get_sg_table(struct drm_gem_object *obj) | ||
| 24 | { | ||
| 25 | struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj); | ||
| 26 | |||
| 27 | BUG_ON(!etnaviv_obj->sgt); /* should have already pinned! */ | ||
| 28 | |||
| 29 | return etnaviv_obj->sgt; | ||
| 30 | } | ||
| 31 | |||
| 32 | void *etnaviv_gem_prime_vmap(struct drm_gem_object *obj) | ||
| 33 | { | ||
| 34 | return etnaviv_gem_vaddr(obj); | ||
| 35 | } | ||
| 36 | |||
| 37 | void etnaviv_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr) | ||
| 38 | { | ||
| 39 | /* TODO msm_gem_vunmap() */ | ||
| 40 | } | ||
| 41 | |||
| 42 | int etnaviv_gem_prime_pin(struct drm_gem_object *obj) | ||
| 43 | { | ||
| 44 | if (!obj->import_attach) { | ||
| 45 | struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj); | ||
| 46 | |||
| 47 | mutex_lock(&etnaviv_obj->lock); | ||
| 48 | etnaviv_gem_get_pages(etnaviv_obj); | ||
| 49 | mutex_unlock(&etnaviv_obj->lock); | ||
| 50 | } | ||
| 51 | return 0; | ||
| 52 | } | ||
| 53 | |||
| 54 | void etnaviv_gem_prime_unpin(struct drm_gem_object *obj) | ||
| 55 | { | ||
| 56 | if (!obj->import_attach) { | ||
| 57 | struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj); | ||
| 58 | |||
| 59 | mutex_lock(&etnaviv_obj->lock); | ||
| 60 | etnaviv_gem_put_pages(to_etnaviv_bo(obj)); | ||
| 61 | mutex_unlock(&etnaviv_obj->lock); | ||
| 62 | } | ||
| 63 | } | ||
| 64 | |||
| 65 | static void etnaviv_gem_prime_release(struct etnaviv_gem_object *etnaviv_obj) | ||
| 66 | { | ||
| 67 | if (etnaviv_obj->vaddr) | ||
| 68 | dma_buf_vunmap(etnaviv_obj->base.import_attach->dmabuf, | ||
| 69 | etnaviv_obj->vaddr); | ||
| 70 | |||
| 71 | /* Don't drop the pages for imported dmabuf, as they are not | ||
| 72 | * ours, just free the array we allocated: | ||
| 73 | */ | ||
| 74 | if (etnaviv_obj->pages) | ||
| 75 | drm_free_large(etnaviv_obj->pages); | ||
| 76 | |||
| 77 | drm_prime_gem_destroy(&etnaviv_obj->base, etnaviv_obj->sgt); | ||
| 78 | } | ||
| 79 | |||
| 80 | static const struct etnaviv_gem_ops etnaviv_gem_prime_ops = { | ||
| 81 | /* .get_pages should never be called */ | ||
| 82 | .release = etnaviv_gem_prime_release, | ||
| 83 | }; | ||
| 84 | |||
| 85 | struct drm_gem_object *etnaviv_gem_prime_import_sg_table(struct drm_device *dev, | ||
| 86 | struct dma_buf_attachment *attach, struct sg_table *sgt) | ||
| 87 | { | ||
| 88 | struct etnaviv_gem_object *etnaviv_obj; | ||
| 89 | size_t size = PAGE_ALIGN(attach->dmabuf->size); | ||
| 90 | int ret, npages; | ||
| 91 | |||
| 92 | ret = etnaviv_gem_new_private(dev, size, ETNA_BO_WC, | ||
| 93 | attach->dmabuf->resv, | ||
| 94 | &etnaviv_gem_prime_ops, &etnaviv_obj); | ||
| 95 | if (ret < 0) | ||
| 96 | return ERR_PTR(ret); | ||
| 97 | |||
| 98 | npages = size / PAGE_SIZE; | ||
| 99 | |||
| 100 | etnaviv_obj->sgt = sgt; | ||
| 101 | etnaviv_obj->pages = drm_malloc_ab(npages, sizeof(struct page *)); | ||
| 102 | if (!etnaviv_obj->pages) { | ||
| 103 | ret = -ENOMEM; | ||
| 104 | goto fail; | ||
| 105 | } | ||
| 106 | |||
| 107 | ret = drm_prime_sg_to_page_addr_arrays(sgt, etnaviv_obj->pages, | ||
| 108 | NULL, npages); | ||
| 109 | if (ret) | ||
| 110 | goto fail; | ||
| 111 | |||
| 112 | ret = etnaviv_gem_obj_add(dev, &etnaviv_obj->base); | ||
| 113 | if (ret) | ||
| 114 | goto fail; | ||
| 115 | |||
| 116 | return &etnaviv_obj->base; | ||
| 117 | |||
| 118 | fail: | ||
| 119 | drm_gem_object_unreference_unlocked(&etnaviv_obj->base); | ||
| 120 | |||
| 121 | return ERR_PTR(ret); | ||
| 122 | } | ||
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c new file mode 100644 index 000000000000..1aba01a999df --- /dev/null +++ b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c | |||
| @@ -0,0 +1,443 @@ | |||
| 1 | /* | ||
| 2 | * Copyright (C) 2015 Etnaviv Project | ||
| 3 | * | ||
| 4 | * This program is free software; you can redistribute it and/or modify it | ||
| 5 | * under the terms of the GNU General Public License version 2 as published by | ||
| 6 | * the Free Software Foundation. | ||
| 7 | * | ||
| 8 | * This program is distributed in the hope that it will be useful, but WITHOUT | ||
| 9 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
| 10 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
| 11 | * more details. | ||
| 12 | * | ||
| 13 | * You should have received a copy of the GNU General Public License along with | ||
| 14 | * this program. If not, see <http://www.gnu.org/licenses/>. | ||
| 15 | */ | ||
| 16 | |||
| 17 | #include <linux/reservation.h> | ||
| 18 | #include "etnaviv_drv.h" | ||
| 19 | #include "etnaviv_gpu.h" | ||
| 20 | #include "etnaviv_gem.h" | ||
| 21 | |||
| 22 | /* | ||
| 23 | * Cmdstream submission: | ||
| 24 | */ | ||
| 25 | |||
| 26 | #define BO_INVALID_FLAGS ~(ETNA_SUBMIT_BO_READ | ETNA_SUBMIT_BO_WRITE) | ||
| 27 | /* make sure these don't conflict w/ ETNAVIV_SUBMIT_BO_x */ | ||
| 28 | #define BO_LOCKED 0x4000 | ||
| 29 | #define BO_PINNED 0x2000 | ||
| 30 | |||
| 31 | static inline void __user *to_user_ptr(u64 address) | ||
| 32 | { | ||
| 33 | return (void __user *)(uintptr_t)address; | ||
| 34 | } | ||
| 35 | |||
| 36 | static struct etnaviv_gem_submit *submit_create(struct drm_device *dev, | ||
| 37 | struct etnaviv_gpu *gpu, size_t nr) | ||
| 38 | { | ||
| 39 | struct etnaviv_gem_submit *submit; | ||
| 40 | size_t sz = size_vstruct(nr, sizeof(submit->bos[0]), sizeof(*submit)); | ||
| 41 | |||
| 42 | submit = kmalloc(sz, GFP_TEMPORARY | __GFP_NOWARN | __GFP_NORETRY); | ||
| 43 | if (submit) { | ||
| 44 | submit->dev = dev; | ||
| 45 | submit->gpu = gpu; | ||
| 46 | |||
| 47 | /* initially, until copy_from_user() and bo lookup succeeds: */ | ||
| 48 | submit->nr_bos = 0; | ||
| 49 | |||
| 50 | ww_acquire_init(&submit->ticket, &reservation_ww_class); | ||
| 51 | } | ||
| 52 | |||
| 53 | return submit; | ||
| 54 | } | ||
| 55 | |||
| 56 | static int submit_lookup_objects(struct etnaviv_gem_submit *submit, | ||
| 57 | struct drm_file *file, struct drm_etnaviv_gem_submit_bo *submit_bos, | ||
| 58 | unsigned nr_bos) | ||
| 59 | { | ||
| 60 | struct drm_etnaviv_gem_submit_bo *bo; | ||
| 61 | unsigned i; | ||
| 62 | int ret = 0; | ||
| 63 | |||
| 64 | spin_lock(&file->table_lock); | ||
| 65 | |||
| 66 | for (i = 0, bo = submit_bos; i < nr_bos; i++, bo++) { | ||
| 67 | struct drm_gem_object *obj; | ||
| 68 | |||
| 69 | if (bo->flags & BO_INVALID_FLAGS) { | ||
| 70 | DRM_ERROR("invalid flags: %x\n", bo->flags); | ||
| 71 | ret = -EINVAL; | ||
| 72 | goto out_unlock; | ||
| 73 | } | ||
| 74 | |||
| 75 | submit->bos[i].flags = bo->flags; | ||
| 76 | |||
| 77 | /* normally use drm_gem_object_lookup(), but for bulk lookup | ||
| 78 | * all under single table_lock just hit object_idr directly: | ||
| 79 | */ | ||
| 80 | obj = idr_find(&file->object_idr, bo->handle); | ||
| 81 | if (!obj) { | ||
| 82 | DRM_ERROR("invalid handle %u at index %u\n", | ||
| 83 | bo->handle, i); | ||
| 84 | ret = -EINVAL; | ||
| 85 | goto out_unlock; | ||
| 86 | } | ||
| 87 | |||
| 88 | /* | ||
| 89 | * Take a refcount on the object. The file table lock | ||
| 90 | * prevents the object_idr's refcount on this being dropped. | ||
| 91 | */ | ||
| 92 | drm_gem_object_reference(obj); | ||
| 93 | |||
| 94 | submit->bos[i].obj = to_etnaviv_bo(obj); | ||
| 95 | } | ||
| 96 | |||
| 97 | out_unlock: | ||
| 98 | submit->nr_bos = i; | ||
| 99 | spin_unlock(&file->table_lock); | ||
| 100 | |||
| 101 | return ret; | ||
| 102 | } | ||
| 103 | |||
| 104 | static void submit_unlock_object(struct etnaviv_gem_submit *submit, int i) | ||
| 105 | { | ||
| 106 | if (submit->bos[i].flags & BO_LOCKED) { | ||
| 107 | struct etnaviv_gem_object *etnaviv_obj = submit->bos[i].obj; | ||
| 108 | |||
| 109 | ww_mutex_unlock(&etnaviv_obj->resv->lock); | ||
| 110 | submit->bos[i].flags &= ~BO_LOCKED; | ||
| 111 | } | ||
| 112 | } | ||
| 113 | |||
| 114 | static int submit_lock_objects(struct etnaviv_gem_submit *submit) | ||
| 115 | { | ||
| 116 | int contended, slow_locked = -1, i, ret = 0; | ||
| 117 | |||
| 118 | retry: | ||
| 119 | for (i = 0; i < submit->nr_bos; i++) { | ||
| 120 | struct etnaviv_gem_object *etnaviv_obj = submit->bos[i].obj; | ||
| 121 | |||
| 122 | if (slow_locked == i) | ||
| 123 | slow_locked = -1; | ||
| 124 | |||
| 125 | contended = i; | ||
| 126 | |||
| 127 | if (!(submit->bos[i].flags & BO_LOCKED)) { | ||
| 128 | ret = ww_mutex_lock_interruptible(&etnaviv_obj->resv->lock, | ||
| 129 | &submit->ticket); | ||
| 130 | if (ret == -EALREADY) | ||
| 131 | DRM_ERROR("BO at index %u already on submit list\n", | ||
| 132 | i); | ||
| 133 | if (ret) | ||
| 134 | goto fail; | ||
| 135 | submit->bos[i].flags |= BO_LOCKED; | ||
| 136 | } | ||
| 137 | } | ||
| 138 | |||
| 139 | ww_acquire_done(&submit->ticket); | ||
| 140 | |||
| 141 | return 0; | ||
| 142 | |||
| 143 | fail: | ||
| 144 | for (; i >= 0; i--) | ||
| 145 | submit_unlock_object(submit, i); | ||
| 146 | |||
| 147 | if (slow_locked > 0) | ||
| 148 | submit_unlock_object(submit, slow_locked); | ||
| 149 | |||
| 150 | if (ret == -EDEADLK) { | ||
| 151 | struct etnaviv_gem_object *etnaviv_obj; | ||
| 152 | |||
| 153 | etnaviv_obj = submit->bos[contended].obj; | ||
| 154 | |||
| 155 | /* we lost out in a seqno race, lock and retry.. */ | ||
| 156 | ret = ww_mutex_lock_slow_interruptible(&etnaviv_obj->resv->lock, | ||
| 157 | &submit->ticket); | ||
| 158 | if (!ret) { | ||
| 159 | submit->bos[contended].flags |= BO_LOCKED; | ||
| 160 | slow_locked = contended; | ||
| 161 | goto retry; | ||
| 162 | } | ||
| 163 | } | ||
| 164 | |||
| 165 | return ret; | ||
| 166 | } | ||
| 167 | |||
| 168 | static int submit_fence_sync(const struct etnaviv_gem_submit *submit) | ||
| 169 | { | ||
| 170 | unsigned int context = submit->gpu->fence_context; | ||
| 171 | int i, ret = 0; | ||
| 172 | |||
| 173 | for (i = 0; i < submit->nr_bos; i++) { | ||
| 174 | struct etnaviv_gem_object *etnaviv_obj = submit->bos[i].obj; | ||
| 175 | bool write = submit->bos[i].flags & ETNA_SUBMIT_BO_WRITE; | ||
| 176 | |||
| 177 | ret = etnaviv_gpu_fence_sync_obj(etnaviv_obj, context, write); | ||
| 178 | if (ret) | ||
| 179 | break; | ||
| 180 | } | ||
| 181 | |||
| 182 | return ret; | ||
| 183 | } | ||
| 184 | |||
| 185 | static void submit_unpin_objects(struct etnaviv_gem_submit *submit) | ||
| 186 | { | ||
| 187 | int i; | ||
| 188 | |||
| 189 | for (i = 0; i < submit->nr_bos; i++) { | ||
| 190 | struct etnaviv_gem_object *etnaviv_obj = submit->bos[i].obj; | ||
| 191 | |||
| 192 | if (submit->bos[i].flags & BO_PINNED) | ||
| 193 | etnaviv_gem_put_iova(submit->gpu, &etnaviv_obj->base); | ||
| 194 | |||
| 195 | submit->bos[i].iova = 0; | ||
| 196 | submit->bos[i].flags &= ~BO_PINNED; | ||
| 197 | } | ||
| 198 | } | ||
| 199 | |||
| 200 | static int submit_pin_objects(struct etnaviv_gem_submit *submit) | ||
| 201 | { | ||
| 202 | int i, ret = 0; | ||
| 203 | |||
| 204 | for (i = 0; i < submit->nr_bos; i++) { | ||
| 205 | struct etnaviv_gem_object *etnaviv_obj = submit->bos[i].obj; | ||
| 206 | u32 iova; | ||
| 207 | |||
| 208 | ret = etnaviv_gem_get_iova(submit->gpu, &etnaviv_obj->base, | ||
| 209 | &iova); | ||
| 210 | if (ret) | ||
| 211 | break; | ||
| 212 | |||
| 213 | submit->bos[i].flags |= BO_PINNED; | ||
| 214 | submit->bos[i].iova = iova; | ||
| 215 | } | ||
| 216 | |||
| 217 | return ret; | ||
| 218 | } | ||
| 219 | |||
| 220 | static int submit_bo(struct etnaviv_gem_submit *submit, u32 idx, | ||
| 221 | struct etnaviv_gem_object **obj, u32 *iova) | ||
| 222 | { | ||
| 223 | if (idx >= submit->nr_bos) { | ||
| 224 | DRM_ERROR("invalid buffer index: %u (out of %u)\n", | ||
| 225 | idx, submit->nr_bos); | ||
| 226 | return -EINVAL; | ||
| 227 | } | ||
| 228 | |||
| 229 | if (obj) | ||
| 230 | *obj = submit->bos[idx].obj; | ||
| 231 | if (iova) | ||
| 232 | *iova = submit->bos[idx].iova; | ||
| 233 | |||
| 234 | return 0; | ||
| 235 | } | ||
| 236 | |||
| 237 | /* process the reloc's and patch up the cmdstream as needed: */ | ||
| 238 | static int submit_reloc(struct etnaviv_gem_submit *submit, void *stream, | ||
| 239 | u32 size, const struct drm_etnaviv_gem_submit_reloc *relocs, | ||
| 240 | u32 nr_relocs) | ||
| 241 | { | ||
| 242 | u32 i, last_offset = 0; | ||
| 243 | u32 *ptr = stream; | ||
| 244 | int ret; | ||
| 245 | |||
| 246 | for (i = 0; i < nr_relocs; i++) { | ||
| 247 | const struct drm_etnaviv_gem_submit_reloc *r = relocs + i; | ||
| 248 | struct etnaviv_gem_object *bobj; | ||
| 249 | u32 iova, off; | ||
| 250 | |||
| 251 | if (unlikely(r->flags)) { | ||
| 252 | DRM_ERROR("invalid reloc flags\n"); | ||
| 253 | return -EINVAL; | ||
| 254 | } | ||
| 255 | |||
| 256 | if (r->submit_offset % 4) { | ||
| 257 | DRM_ERROR("non-aligned reloc offset: %u\n", | ||
| 258 | r->submit_offset); | ||
| 259 | return -EINVAL; | ||
| 260 | } | ||
| 261 | |||
| 262 | /* offset in dwords: */ | ||
| 263 | off = r->submit_offset / 4; | ||
| 264 | |||
| 265 | if ((off >= size ) || | ||
| 266 | (off < last_offset)) { | ||
| 267 | DRM_ERROR("invalid offset %u at reloc %u\n", off, i); | ||
| 268 | return -EINVAL; | ||
| 269 | } | ||
| 270 | |||
| 271 | ret = submit_bo(submit, r->reloc_idx, &bobj, &iova); | ||
| 272 | if (ret) | ||
| 273 | return ret; | ||
| 274 | |||
| 275 | if (r->reloc_offset >= | ||
| 276 | bobj->base.size - sizeof(*ptr)) { | ||
| 277 | DRM_ERROR("relocation %u outside object", i); | ||
| 278 | return -EINVAL; | ||
| 279 | } | ||
| 280 | |||
| 281 | ptr[off] = iova + r->reloc_offset; | ||
| 282 | |||
| 283 | last_offset = off; | ||
| 284 | } | ||
| 285 | |||
| 286 | return 0; | ||
| 287 | } | ||
| 288 | |||
| 289 | static void submit_cleanup(struct etnaviv_gem_submit *submit) | ||
| 290 | { | ||
| 291 | unsigned i; | ||
| 292 | |||
| 293 | for (i = 0; i < submit->nr_bos; i++) { | ||
| 294 | struct etnaviv_gem_object *etnaviv_obj = submit->bos[i].obj; | ||
| 295 | |||
| 296 | submit_unlock_object(submit, i); | ||
| 297 | drm_gem_object_unreference_unlocked(&etnaviv_obj->base); | ||
| 298 | } | ||
| 299 | |||
| 300 | ww_acquire_fini(&submit->ticket); | ||
| 301 | kfree(submit); | ||
| 302 | } | ||
| 303 | |||
| 304 | int etnaviv_ioctl_gem_submit(struct drm_device *dev, void *data, | ||
| 305 | struct drm_file *file) | ||
| 306 | { | ||
| 307 | struct etnaviv_drm_private *priv = dev->dev_private; | ||
| 308 | struct drm_etnaviv_gem_submit *args = data; | ||
| 309 | struct drm_etnaviv_gem_submit_reloc *relocs; | ||
| 310 | struct drm_etnaviv_gem_submit_bo *bos; | ||
| 311 | struct etnaviv_gem_submit *submit; | ||
| 312 | struct etnaviv_cmdbuf *cmdbuf; | ||
| 313 | struct etnaviv_gpu *gpu; | ||
| 314 | void *stream; | ||
| 315 | int ret; | ||
| 316 | |||
| 317 | if (args->pipe >= ETNA_MAX_PIPES) | ||
| 318 | return -EINVAL; | ||
| 319 | |||
| 320 | gpu = priv->gpu[args->pipe]; | ||
| 321 | if (!gpu) | ||
| 322 | return -ENXIO; | ||
| 323 | |||
| 324 | if (args->stream_size % 4) { | ||
| 325 | DRM_ERROR("non-aligned cmdstream buffer size: %u\n", | ||
| 326 | args->stream_size); | ||
| 327 | return -EINVAL; | ||
| 328 | } | ||
| 329 | |||
| 330 | if (args->exec_state != ETNA_PIPE_3D && | ||
| 331 | args->exec_state != ETNA_PIPE_2D && | ||
| 332 | args->exec_state != ETNA_PIPE_VG) { | ||
| 333 | DRM_ERROR("invalid exec_state: 0x%x\n", args->exec_state); | ||
| 334 | return -EINVAL; | ||
| 335 | } | ||
| 336 | |||
| 337 | /* | ||
| 338 | * Copy the command submission and bo array to kernel space in | ||
| 339 | * one go, and do this outside of any locks. | ||
| 340 | */ | ||
| 341 | bos = drm_malloc_ab(args->nr_bos, sizeof(*bos)); | ||
| 342 | relocs = drm_malloc_ab(args->nr_relocs, sizeof(*relocs)); | ||
| 343 | stream = drm_malloc_ab(1, args->stream_size); | ||
| 344 | cmdbuf = etnaviv_gpu_cmdbuf_new(gpu, ALIGN(args->stream_size, 8) + 8, | ||
| 345 | args->nr_bos); | ||
| 346 | if (!bos || !relocs || !stream || !cmdbuf) { | ||
| 347 | ret = -ENOMEM; | ||
| 348 | goto err_submit_cmds; | ||
| 349 | } | ||
| 350 | |||
| 351 | cmdbuf->exec_state = args->exec_state; | ||
| 352 | cmdbuf->ctx = file->driver_priv; | ||
| 353 | |||
| 354 | ret = copy_from_user(bos, to_user_ptr(args->bos), | ||
| 355 | args->nr_bos * sizeof(*bos)); | ||
| 356 | if (ret) { | ||
| 357 | ret = -EFAULT; | ||
| 358 | goto err_submit_cmds; | ||
| 359 | } | ||
| 360 | |||
| 361 | ret = copy_from_user(relocs, to_user_ptr(args->relocs), | ||
| 362 | args->nr_relocs * sizeof(*relocs)); | ||
| 363 | if (ret) { | ||
| 364 | ret = -EFAULT; | ||
| 365 | goto err_submit_cmds; | ||
| 366 | } | ||
| 367 | |||
| 368 | ret = copy_from_user(stream, to_user_ptr(args->stream), | ||
| 369 | args->stream_size); | ||
| 370 | if (ret) { | ||
| 371 | ret = -EFAULT; | ||
| 372 | goto err_submit_cmds; | ||
| 373 | } | ||
| 374 | |||
| 375 | submit = submit_create(dev, gpu, args->nr_bos); | ||
| 376 | if (!submit) { | ||
| 377 | ret = -ENOMEM; | ||
| 378 | goto err_submit_cmds; | ||
| 379 | } | ||
| 380 | |||
| 381 | ret = submit_lookup_objects(submit, file, bos, args->nr_bos); | ||
| 382 | if (ret) | ||
| 383 | goto err_submit_objects; | ||
| 384 | |||
| 385 | ret = submit_lock_objects(submit); | ||
| 386 | if (ret) | ||
| 387 | goto err_submit_objects; | ||
| 388 | |||
| 389 | if (!etnaviv_cmd_validate_one(gpu, stream, args->stream_size / 4, | ||
| 390 | relocs, args->nr_relocs)) { | ||
| 391 | ret = -EINVAL; | ||
| 392 | goto err_submit_objects; | ||
| 393 | } | ||
| 394 | |||
| 395 | ret = submit_fence_sync(submit); | ||
| 396 | if (ret) | ||
| 397 | goto err_submit_objects; | ||
| 398 | |||
| 399 | ret = submit_pin_objects(submit); | ||
| 400 | if (ret) | ||
| 401 | goto out; | ||
| 402 | |||
| 403 | ret = submit_reloc(submit, stream, args->stream_size / 4, | ||
| 404 | relocs, args->nr_relocs); | ||
| 405 | if (ret) | ||
| 406 | goto out; | ||
| 407 | |||
| 408 | memcpy(cmdbuf->vaddr, stream, args->stream_size); | ||
| 409 | cmdbuf->user_size = ALIGN(args->stream_size, 8); | ||
| 410 | |||
| 411 | ret = etnaviv_gpu_submit(gpu, submit, cmdbuf); | ||
| 412 | if (ret == 0) | ||
| 413 | cmdbuf = NULL; | ||
| 414 | |||
| 415 | args->fence = submit->fence; | ||
| 416 | |||
| 417 | out: | ||
| 418 | submit_unpin_objects(submit); | ||
| 419 | |||
| 420 | /* | ||
| 421 | * If we're returning -EAGAIN, it may be due to the userptr code | ||
| 422 | * wanting to run its workqueue outside of any locks. Flush our | ||
| 423 | * workqueue to ensure that it is run in a timely manner. | ||
| 424 | */ | ||
| 425 | if (ret == -EAGAIN) | ||
| 426 | flush_workqueue(priv->wq); | ||
| 427 | |||
| 428 | err_submit_objects: | ||
| 429 | submit_cleanup(submit); | ||
| 430 | |||
| 431 | err_submit_cmds: | ||
| 432 | /* if we still own the cmdbuf */ | ||
| 433 | if (cmdbuf) | ||
| 434 | etnaviv_gpu_cmdbuf_free(cmdbuf); | ||
| 435 | if (stream) | ||
| 436 | drm_free_large(stream); | ||
| 437 | if (bos) | ||
| 438 | drm_free_large(bos); | ||
| 439 | if (relocs) | ||
| 440 | drm_free_large(relocs); | ||
| 441 | |||
| 442 | return ret; | ||
| 443 | } | ||
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c new file mode 100644 index 000000000000..d39093dc37e6 --- /dev/null +++ b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c | |||
| @@ -0,0 +1,1644 @@ | |||
| 1 | /* | ||
| 2 | * Copyright (C) 2015 Etnaviv Project | ||
| 3 | * | ||
| 4 | * This program is free software; you can redistribute it and/or modify it | ||
| 5 | * under the terms of the GNU General Public License version 2 as published by | ||
| 6 | * the Free Software Foundation. | ||
| 7 | * | ||
| 8 | * This program is distributed in the hope that it will be useful, but WITHOUT | ||
| 9 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
| 10 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
| 11 | * more details. | ||
| 12 | * | ||
| 13 | * You should have received a copy of the GNU General Public License along with | ||
| 14 | * this program. If not, see <http://www.gnu.org/licenses/>. | ||
| 15 | */ | ||
| 16 | |||
| 17 | #include <linux/component.h> | ||
| 18 | #include <linux/fence.h> | ||
| 19 | #include <linux/moduleparam.h> | ||
| 20 | #include <linux/of_device.h> | ||
| 21 | #include "etnaviv_dump.h" | ||
| 22 | #include "etnaviv_gpu.h" | ||
| 23 | #include "etnaviv_gem.h" | ||
| 24 | #include "etnaviv_mmu.h" | ||
| 25 | #include "etnaviv_iommu.h" | ||
| 26 | #include "etnaviv_iommu_v2.h" | ||
| 27 | #include "common.xml.h" | ||
| 28 | #include "state.xml.h" | ||
| 29 | #include "state_hi.xml.h" | ||
| 30 | #include "cmdstream.xml.h" | ||
| 31 | |||
| 32 | static const struct platform_device_id gpu_ids[] = { | ||
| 33 | { .name = "etnaviv-gpu,2d" }, | ||
| 34 | { }, | ||
| 35 | }; | ||
| 36 | |||
| 37 | static bool etnaviv_dump_core = true; | ||
| 38 | module_param_named(dump_core, etnaviv_dump_core, bool, 0600); | ||
| 39 | |||
| 40 | /* | ||
| 41 | * Driver functions: | ||
| 42 | */ | ||
| 43 | |||
| 44 | int etnaviv_gpu_get_param(struct etnaviv_gpu *gpu, u32 param, u64 *value) | ||
| 45 | { | ||
| 46 | switch (param) { | ||
| 47 | case ETNAVIV_PARAM_GPU_MODEL: | ||
| 48 | *value = gpu->identity.model; | ||
| 49 | break; | ||
| 50 | |||
| 51 | case ETNAVIV_PARAM_GPU_REVISION: | ||
| 52 | *value = gpu->identity.revision; | ||
| 53 | break; | ||
| 54 | |||
| 55 | case ETNAVIV_PARAM_GPU_FEATURES_0: | ||
| 56 | *value = gpu->identity.features; | ||
| 57 | break; | ||
| 58 | |||
| 59 | case ETNAVIV_PARAM_GPU_FEATURES_1: | ||
| 60 | *value = gpu->identity.minor_features0; | ||
| 61 | break; | ||
| 62 | |||
| 63 | case ETNAVIV_PARAM_GPU_FEATURES_2: | ||
| 64 | *value = gpu->identity.minor_features1; | ||
| 65 | break; | ||
| 66 | |||
| 67 | case ETNAVIV_PARAM_GPU_FEATURES_3: | ||
| 68 | *value = gpu->identity.minor_features2; | ||
| 69 | break; | ||
| 70 | |||
| 71 | case ETNAVIV_PARAM_GPU_FEATURES_4: | ||
| 72 | *value = gpu->identity.minor_features3; | ||
| 73 | break; | ||
| 74 | |||
| 75 | case ETNAVIV_PARAM_GPU_STREAM_COUNT: | ||
| 76 | *value = gpu->identity.stream_count; | ||
| 77 | break; | ||
| 78 | |||
| 79 | case ETNAVIV_PARAM_GPU_REGISTER_MAX: | ||
| 80 | *value = gpu->identity.register_max; | ||
| 81 | break; | ||
| 82 | |||
| 83 | case ETNAVIV_PARAM_GPU_THREAD_COUNT: | ||
| 84 | *value = gpu->identity.thread_count; | ||
| 85 | break; | ||
| 86 | |||
| 87 | case ETNAVIV_PARAM_GPU_VERTEX_CACHE_SIZE: | ||
| 88 | *value = gpu->identity.vertex_cache_size; | ||
| 89 | break; | ||
| 90 | |||
| 91 | case ETNAVIV_PARAM_GPU_SHADER_CORE_COUNT: | ||
| 92 | *value = gpu->identity.shader_core_count; | ||
| 93 | break; | ||
| 94 | |||
| 95 | case ETNAVIV_PARAM_GPU_PIXEL_PIPES: | ||
| 96 | *value = gpu->identity.pixel_pipes; | ||
| 97 | break; | ||
| 98 | |||
| 99 | case ETNAVIV_PARAM_GPU_VERTEX_OUTPUT_BUFFER_SIZE: | ||
| 100 | *value = gpu->identity.vertex_output_buffer_size; | ||
| 101 | break; | ||
| 102 | |||
| 103 | case ETNAVIV_PARAM_GPU_BUFFER_SIZE: | ||
| 104 | *value = gpu->identity.buffer_size; | ||
| 105 | break; | ||
| 106 | |||
| 107 | case ETNAVIV_PARAM_GPU_INSTRUCTION_COUNT: | ||
| 108 | *value = gpu->identity.instruction_count; | ||
| 109 | break; | ||
| 110 | |||
| 111 | case ETNAVIV_PARAM_GPU_NUM_CONSTANTS: | ||
| 112 | *value = gpu->identity.num_constants; | ||
| 113 | break; | ||
| 114 | |||
| 115 | default: | ||
| 116 | DBG("%s: invalid param: %u", dev_name(gpu->dev), param); | ||
| 117 | return -EINVAL; | ||
| 118 | } | ||
| 119 | |||
| 120 | return 0; | ||
| 121 | } | ||
| 122 | |||
| 123 | static void etnaviv_hw_specs(struct etnaviv_gpu *gpu) | ||
| 124 | { | ||
| 125 | if (gpu->identity.minor_features0 & | ||
| 126 | chipMinorFeatures0_MORE_MINOR_FEATURES) { | ||
| 127 | u32 specs[2]; | ||
| 128 | |||
| 129 | specs[0] = gpu_read(gpu, VIVS_HI_CHIP_SPECS); | ||
| 130 | specs[1] = gpu_read(gpu, VIVS_HI_CHIP_SPECS_2); | ||
| 131 | |||
| 132 | gpu->identity.stream_count = | ||
| 133 | (specs[0] & VIVS_HI_CHIP_SPECS_STREAM_COUNT__MASK) | ||
| 134 | >> VIVS_HI_CHIP_SPECS_STREAM_COUNT__SHIFT; | ||
| 135 | gpu->identity.register_max = | ||
| 136 | (specs[0] & VIVS_HI_CHIP_SPECS_REGISTER_MAX__MASK) | ||
| 137 | >> VIVS_HI_CHIP_SPECS_REGISTER_MAX__SHIFT; | ||
| 138 | gpu->identity.thread_count = | ||
| 139 | (specs[0] & VIVS_HI_CHIP_SPECS_THREAD_COUNT__MASK) | ||
| 140 | >> VIVS_HI_CHIP_SPECS_THREAD_COUNT__SHIFT; | ||
| 141 | gpu->identity.vertex_cache_size = | ||
| 142 | (specs[0] & VIVS_HI_CHIP_SPECS_VERTEX_CACHE_SIZE__MASK) | ||
| 143 | >> VIVS_HI_CHIP_SPECS_VERTEX_CACHE_SIZE__SHIFT; | ||
| 144 | gpu->identity.shader_core_count = | ||
| 145 | (specs[0] & VIVS_HI_CHIP_SPECS_SHADER_CORE_COUNT__MASK) | ||
| 146 | >> VIVS_HI_CHIP_SPECS_SHADER_CORE_COUNT__SHIFT; | ||
| 147 | gpu->identity.pixel_pipes = | ||
| 148 | (specs[0] & VIVS_HI_CHIP_SPECS_PIXEL_PIPES__MASK) | ||
| 149 | >> VIVS_HI_CHIP_SPECS_PIXEL_PIPES__SHIFT; | ||
| 150 | gpu->identity.vertex_output_buffer_size = | ||
| 151 | (specs[0] & VIVS_HI_CHIP_SPECS_VERTEX_OUTPUT_BUFFER_SIZE__MASK) | ||
| 152 | >> VIVS_HI_CHIP_SPECS_VERTEX_OUTPUT_BUFFER_SIZE__SHIFT; | ||
| 153 | |||
| 154 | gpu->identity.buffer_size = | ||
| 155 | (specs[1] & VIVS_HI_CHIP_SPECS_2_BUFFER_SIZE__MASK) | ||
| 156 | >> VIVS_HI_CHIP_SPECS_2_BUFFER_SIZE__SHIFT; | ||
| 157 | gpu->identity.instruction_count = | ||
| 158 | (specs[1] & VIVS_HI_CHIP_SPECS_2_INSTRUCTION_COUNT__MASK) | ||
| 159 | >> VIVS_HI_CHIP_SPECS_2_INSTRUCTION_COUNT__SHIFT; | ||
| 160 | gpu->identity.num_constants = | ||
| 161 | (specs[1] & VIVS_HI_CHIP_SPECS_2_NUM_CONSTANTS__MASK) | ||
| 162 | >> VIVS_HI_CHIP_SPECS_2_NUM_CONSTANTS__SHIFT; | ||
| 163 | } | ||
| 164 | |||
| 165 | /* Fill in the stream count if not specified */ | ||
| 166 | if (gpu->identity.stream_count == 0) { | ||
| 167 | if (gpu->identity.model >= 0x1000) | ||
| 168 | gpu->identity.stream_count = 4; | ||
| 169 | else | ||
| 170 | gpu->identity.stream_count = 1; | ||
| 171 | } | ||
| 172 | |||
| 173 | /* Convert the register max value */ | ||
| 174 | if (gpu->identity.register_max) | ||
| 175 | gpu->identity.register_max = 1 << gpu->identity.register_max; | ||
| 176 | else if (gpu->identity.model == 0x0400) | ||
| 177 | gpu->identity.register_max = 32; | ||
| 178 | else | ||
| 179 | gpu->identity.register_max = 64; | ||
| 180 | |||
| 181 | /* Convert thread count */ | ||
| 182 | if (gpu->identity.thread_count) | ||
| 183 | gpu->identity.thread_count = 1 << gpu->identity.thread_count; | ||
| 184 | else if (gpu->identity.model == 0x0400) | ||
| 185 | gpu->identity.thread_count = 64; | ||
| 186 | else if (gpu->identity.model == 0x0500 || | ||
| 187 | gpu->identity.model == 0x0530) | ||
| 188 | gpu->identity.thread_count = 128; | ||
| 189 | else | ||
| 190 | gpu->identity.thread_count = 256; | ||
| 191 | |||
| 192 | if (gpu->identity.vertex_cache_size == 0) | ||
| 193 | gpu->identity.vertex_cache_size = 8; | ||
| 194 | |||
| 195 | if (gpu->identity.shader_core_count == 0) { | ||
| 196 | if (gpu->identity.model >= 0x1000) | ||
| 197 | gpu->identity.shader_core_count = 2; | ||
| 198 | else | ||
| 199 | gpu->identity.shader_core_count = 1; | ||
| 200 | } | ||
| 201 | |||
| 202 | if (gpu->identity.pixel_pipes == 0) | ||
| 203 | gpu->identity.pixel_pipes = 1; | ||
| 204 | |||
| 205 | /* Convert virtex buffer size */ | ||
| 206 | if (gpu->identity.vertex_output_buffer_size) { | ||
| 207 | gpu->identity.vertex_output_buffer_size = | ||
| 208 | 1 << gpu->identity.vertex_output_buffer_size; | ||
| 209 | } else if (gpu->identity.model == 0x0400) { | ||
| 210 | if (gpu->identity.revision < 0x4000) | ||
| 211 | gpu->identity.vertex_output_buffer_size = 512; | ||
| 212 | else if (gpu->identity.revision < 0x4200) | ||
| 213 | gpu->identity.vertex_output_buffer_size = 256; | ||
| 214 | else | ||
| 215 | gpu->identity.vertex_output_buffer_size = 128; | ||
| 216 | } else { | ||
| 217 | gpu->identity.vertex_output_buffer_size = 512; | ||
| 218 | } | ||
| 219 | |||
| 220 | switch (gpu->identity.instruction_count) { | ||
| 221 | case 0: | ||
| 222 | if ((gpu->identity.model == 0x2000 && | ||
| 223 | gpu->identity.revision == 0x5108) || | ||
| 224 | gpu->identity.model == 0x880) | ||
| 225 | gpu->identity.instruction_count = 512; | ||
| 226 | else | ||
| 227 | gpu->identity.instruction_count = 256; | ||
| 228 | break; | ||
| 229 | |||
| 230 | case 1: | ||
| 231 | gpu->identity.instruction_count = 1024; | ||
| 232 | break; | ||
| 233 | |||
| 234 | case 2: | ||
| 235 | gpu->identity.instruction_count = 2048; | ||
| 236 | break; | ||
| 237 | |||
| 238 | default: | ||
| 239 | gpu->identity.instruction_count = 256; | ||
| 240 | break; | ||
| 241 | } | ||
| 242 | |||
| 243 | if (gpu->identity.num_constants == 0) | ||
| 244 | gpu->identity.num_constants = 168; | ||
| 245 | } | ||
| 246 | |||
| 247 | static void etnaviv_hw_identify(struct etnaviv_gpu *gpu) | ||
| 248 | { | ||
| 249 | u32 chipIdentity; | ||
| 250 | |||
| 251 | chipIdentity = gpu_read(gpu, VIVS_HI_CHIP_IDENTITY); | ||
| 252 | |||
| 253 | /* Special case for older graphic cores. */ | ||
| 254 | if (VIVS_HI_CHIP_IDENTITY_FAMILY(chipIdentity) == 0x01) { | ||
| 255 | gpu->identity.model = 0x500; /* gc500 */ | ||
| 256 | gpu->identity.revision = VIVS_HI_CHIP_IDENTITY_REVISION(chipIdentity); | ||
| 257 | } else { | ||
| 258 | |||
| 259 | gpu->identity.model = gpu_read(gpu, VIVS_HI_CHIP_MODEL); | ||
| 260 | gpu->identity.revision = gpu_read(gpu, VIVS_HI_CHIP_REV); | ||
| 261 | |||
| 262 | /* | ||
| 263 | * !!!! HACK ALERT !!!! | ||
| 264 | * Because people change device IDs without letting software | ||
| 265 | * know about it - here is the hack to make it all look the | ||
| 266 | * same. Only for GC400 family. | ||
| 267 | */ | ||
| 268 | if ((gpu->identity.model & 0xff00) == 0x0400 && | ||
| 269 | gpu->identity.model != 0x0420) { | ||
| 270 | gpu->identity.model = gpu->identity.model & 0x0400; | ||
| 271 | } | ||
| 272 | |||
| 273 | /* Another special case */ | ||
| 274 | if (gpu->identity.model == 0x300 && | ||
| 275 | gpu->identity.revision == 0x2201) { | ||
| 276 | u32 chipDate = gpu_read(gpu, VIVS_HI_CHIP_DATE); | ||
| 277 | u32 chipTime = gpu_read(gpu, VIVS_HI_CHIP_TIME); | ||
| 278 | |||
| 279 | if (chipDate == 0x20080814 && chipTime == 0x12051100) { | ||
| 280 | /* | ||
| 281 | * This IP has an ECO; put the correct | ||
| 282 | * revision in it. | ||
| 283 | */ | ||
| 284 | gpu->identity.revision = 0x1051; | ||
| 285 | } | ||
| 286 | } | ||
| 287 | } | ||
| 288 | |||
| 289 | dev_info(gpu->dev, "model: GC%x, revision: %x\n", | ||
| 290 | gpu->identity.model, gpu->identity.revision); | ||
| 291 | |||
| 292 | gpu->identity.features = gpu_read(gpu, VIVS_HI_CHIP_FEATURE); | ||
| 293 | |||
| 294 | /* Disable fast clear on GC700. */ | ||
| 295 | if (gpu->identity.model == 0x700) | ||
| 296 | gpu->identity.features &= ~chipFeatures_FAST_CLEAR; | ||
| 297 | |||
| 298 | if ((gpu->identity.model == 0x500 && gpu->identity.revision < 2) || | ||
| 299 | (gpu->identity.model == 0x300 && gpu->identity.revision < 0x2000)) { | ||
| 300 | |||
| 301 | /* | ||
| 302 | * GC500 rev 1.x and GC300 rev < 2.0 doesn't have these | ||
| 303 | * registers. | ||
| 304 | */ | ||
| 305 | gpu->identity.minor_features0 = 0; | ||
| 306 | gpu->identity.minor_features1 = 0; | ||
| 307 | gpu->identity.minor_features2 = 0; | ||
| 308 | gpu->identity.minor_features3 = 0; | ||
| 309 | } else | ||
| 310 | gpu->identity.minor_features0 = | ||
| 311 | gpu_read(gpu, VIVS_HI_CHIP_MINOR_FEATURE_0); | ||
| 312 | |||
| 313 | if (gpu->identity.minor_features0 & | ||
| 314 | chipMinorFeatures0_MORE_MINOR_FEATURES) { | ||
| 315 | gpu->identity.minor_features1 = | ||
| 316 | gpu_read(gpu, VIVS_HI_CHIP_MINOR_FEATURE_1); | ||
| 317 | gpu->identity.minor_features2 = | ||
| 318 | gpu_read(gpu, VIVS_HI_CHIP_MINOR_FEATURE_2); | ||
| 319 | gpu->identity.minor_features3 = | ||
| 320 | gpu_read(gpu, VIVS_HI_CHIP_MINOR_FEATURE_3); | ||
| 321 | } | ||
| 322 | |||
| 323 | /* GC600 idle register reports zero bits where modules aren't present */ | ||
| 324 | if (gpu->identity.model == chipModel_GC600) { | ||
| 325 | gpu->idle_mask = VIVS_HI_IDLE_STATE_TX | | ||
| 326 | VIVS_HI_IDLE_STATE_RA | | ||
| 327 | VIVS_HI_IDLE_STATE_SE | | ||
| 328 | VIVS_HI_IDLE_STATE_PA | | ||
| 329 | VIVS_HI_IDLE_STATE_SH | | ||
| 330 | VIVS_HI_IDLE_STATE_PE | | ||
| 331 | VIVS_HI_IDLE_STATE_DE | | ||
| 332 | VIVS_HI_IDLE_STATE_FE; | ||
| 333 | } else { | ||
| 334 | gpu->idle_mask = ~VIVS_HI_IDLE_STATE_AXI_LP; | ||
| 335 | } | ||
| 336 | |||
| 337 | etnaviv_hw_specs(gpu); | ||
| 338 | } | ||
| 339 | |||
| 340 | static void etnaviv_gpu_load_clock(struct etnaviv_gpu *gpu, u32 clock) | ||
| 341 | { | ||
| 342 | gpu_write(gpu, VIVS_HI_CLOCK_CONTROL, clock | | ||
| 343 | VIVS_HI_CLOCK_CONTROL_FSCALE_CMD_LOAD); | ||
| 344 | gpu_write(gpu, VIVS_HI_CLOCK_CONTROL, clock); | ||
| 345 | } | ||
| 346 | |||
| 347 | static int etnaviv_hw_reset(struct etnaviv_gpu *gpu) | ||
| 348 | { | ||
| 349 | u32 control, idle; | ||
| 350 | unsigned long timeout; | ||
| 351 | bool failed = true; | ||
| 352 | |||
| 353 | /* TODO | ||
| 354 | * | ||
| 355 | * - clock gating | ||
| 356 | * - puls eater | ||
| 357 | * - what about VG? | ||
| 358 | */ | ||
| 359 | |||
| 360 | /* We hope that the GPU resets in under one second */ | ||
| 361 | timeout = jiffies + msecs_to_jiffies(1000); | ||
| 362 | |||
| 363 | while (time_is_after_jiffies(timeout)) { | ||
| 364 | control = VIVS_HI_CLOCK_CONTROL_DISABLE_DEBUG_REGISTERS | | ||
| 365 | VIVS_HI_CLOCK_CONTROL_FSCALE_VAL(0x40); | ||
| 366 | |||
| 367 | /* enable clock */ | ||
| 368 | etnaviv_gpu_load_clock(gpu, control); | ||
| 369 | |||
| 370 | /* Wait for stable clock. Vivante's code waited for 1ms */ | ||
| 371 | usleep_range(1000, 10000); | ||
| 372 | |||
| 373 | /* isolate the GPU. */ | ||
| 374 | control |= VIVS_HI_CLOCK_CONTROL_ISOLATE_GPU; | ||
| 375 | gpu_write(gpu, VIVS_HI_CLOCK_CONTROL, control); | ||
| 376 | |||
| 377 | /* set soft reset. */ | ||
| 378 | control |= VIVS_HI_CLOCK_CONTROL_SOFT_RESET; | ||
| 379 | gpu_write(gpu, VIVS_HI_CLOCK_CONTROL, control); | ||
| 380 | |||
| 381 | /* wait for reset. */ | ||
| 382 | msleep(1); | ||
| 383 | |||
| 384 | /* reset soft reset bit. */ | ||
| 385 | control &= ~VIVS_HI_CLOCK_CONTROL_SOFT_RESET; | ||
| 386 | gpu_write(gpu, VIVS_HI_CLOCK_CONTROL, control); | ||
| 387 | |||
| 388 | /* reset GPU isolation. */ | ||
| 389 | control &= ~VIVS_HI_CLOCK_CONTROL_ISOLATE_GPU; | ||
| 390 | gpu_write(gpu, VIVS_HI_CLOCK_CONTROL, control); | ||
| 391 | |||
| 392 | /* read idle register. */ | ||
| 393 | idle = gpu_read(gpu, VIVS_HI_IDLE_STATE); | ||
| 394 | |||
| 395 | /* try reseting again if FE it not idle */ | ||
| 396 | if ((idle & VIVS_HI_IDLE_STATE_FE) == 0) { | ||
| 397 | dev_dbg(gpu->dev, "FE is not idle\n"); | ||
| 398 | continue; | ||
| 399 | } | ||
| 400 | |||
| 401 | /* read reset register. */ | ||
| 402 | control = gpu_read(gpu, VIVS_HI_CLOCK_CONTROL); | ||
| 403 | |||
| 404 | /* is the GPU idle? */ | ||
| 405 | if (((control & VIVS_HI_CLOCK_CONTROL_IDLE_3D) == 0) || | ||
| 406 | ((control & VIVS_HI_CLOCK_CONTROL_IDLE_2D) == 0)) { | ||
| 407 | dev_dbg(gpu->dev, "GPU is not idle\n"); | ||
| 408 | continue; | ||
| 409 | } | ||
| 410 | |||
| 411 | failed = false; | ||
| 412 | break; | ||
| 413 | } | ||
| 414 | |||
| 415 | if (failed) { | ||
| 416 | idle = gpu_read(gpu, VIVS_HI_IDLE_STATE); | ||
| 417 | control = gpu_read(gpu, VIVS_HI_CLOCK_CONTROL); | ||
| 418 | |||
| 419 | dev_err(gpu->dev, "GPU failed to reset: FE %sidle, 3D %sidle, 2D %sidle\n", | ||
| 420 | idle & VIVS_HI_IDLE_STATE_FE ? "" : "not ", | ||
| 421 | control & VIVS_HI_CLOCK_CONTROL_IDLE_3D ? "" : "not ", | ||
| 422 | control & VIVS_HI_CLOCK_CONTROL_IDLE_2D ? "" : "not "); | ||
| 423 | |||
| 424 | return -EBUSY; | ||
| 425 | } | ||
| 426 | |||
| 427 | /* We rely on the GPU running, so program the clock */ | ||
| 428 | control = VIVS_HI_CLOCK_CONTROL_DISABLE_DEBUG_REGISTERS | | ||
| 429 | VIVS_HI_CLOCK_CONTROL_FSCALE_VAL(0x40); | ||
| 430 | |||
| 431 | /* enable clock */ | ||
| 432 | etnaviv_gpu_load_clock(gpu, control); | ||
| 433 | |||
| 434 | return 0; | ||
| 435 | } | ||
| 436 | |||
| 437 | static void etnaviv_gpu_hw_init(struct etnaviv_gpu *gpu) | ||
| 438 | { | ||
| 439 | u16 prefetch; | ||
| 440 | |||
| 441 | if (gpu->identity.model == chipModel_GC320 && | ||
| 442 | gpu_read(gpu, VIVS_HI_CHIP_TIME) != 0x2062400 && | ||
| 443 | (gpu->identity.revision == 0x5007 || | ||
| 444 | gpu->identity.revision == 0x5220)) { | ||
| 445 | u32 mc_memory_debug; | ||
| 446 | |||
| 447 | mc_memory_debug = gpu_read(gpu, VIVS_MC_DEBUG_MEMORY) & ~0xff; | ||
| 448 | |||
| 449 | if (gpu->identity.revision == 0x5007) | ||
| 450 | mc_memory_debug |= 0x0c; | ||
| 451 | else | ||
| 452 | mc_memory_debug |= 0x08; | ||
| 453 | |||
| 454 | gpu_write(gpu, VIVS_MC_DEBUG_MEMORY, mc_memory_debug); | ||
| 455 | } | ||
| 456 | |||
| 457 | /* | ||
| 458 | * Update GPU AXI cache atttribute to "cacheable, no allocate". | ||
| 459 | * This is necessary to prevent the iMX6 SoC locking up. | ||
| 460 | */ | ||
| 461 | gpu_write(gpu, VIVS_HI_AXI_CONFIG, | ||
| 462 | VIVS_HI_AXI_CONFIG_AWCACHE(2) | | ||
| 463 | VIVS_HI_AXI_CONFIG_ARCACHE(2)); | ||
| 464 | |||
| 465 | /* GC2000 rev 5108 needs a special bus config */ | ||
| 466 | if (gpu->identity.model == 0x2000 && gpu->identity.revision == 0x5108) { | ||
| 467 | u32 bus_config = gpu_read(gpu, VIVS_MC_BUS_CONFIG); | ||
| 468 | bus_config &= ~(VIVS_MC_BUS_CONFIG_FE_BUS_CONFIG__MASK | | ||
| 469 | VIVS_MC_BUS_CONFIG_TX_BUS_CONFIG__MASK); | ||
| 470 | bus_config |= VIVS_MC_BUS_CONFIG_FE_BUS_CONFIG(1) | | ||
| 471 | VIVS_MC_BUS_CONFIG_TX_BUS_CONFIG(0); | ||
| 472 | gpu_write(gpu, VIVS_MC_BUS_CONFIG, bus_config); | ||
| 473 | } | ||
| 474 | |||
| 475 | /* set base addresses */ | ||
| 476 | gpu_write(gpu, VIVS_MC_MEMORY_BASE_ADDR_RA, gpu->memory_base); | ||
| 477 | gpu_write(gpu, VIVS_MC_MEMORY_BASE_ADDR_FE, gpu->memory_base); | ||
| 478 | gpu_write(gpu, VIVS_MC_MEMORY_BASE_ADDR_TX, gpu->memory_base); | ||
| 479 | gpu_write(gpu, VIVS_MC_MEMORY_BASE_ADDR_PEZ, gpu->memory_base); | ||
| 480 | gpu_write(gpu, VIVS_MC_MEMORY_BASE_ADDR_PE, gpu->memory_base); | ||
| 481 | |||
| 482 | /* setup the MMU page table pointers */ | ||
| 483 | etnaviv_iommu_domain_restore(gpu, gpu->mmu->domain); | ||
| 484 | |||
| 485 | /* Start command processor */ | ||
| 486 | prefetch = etnaviv_buffer_init(gpu); | ||
| 487 | |||
| 488 | gpu_write(gpu, VIVS_HI_INTR_ENBL, ~0U); | ||
| 489 | gpu_write(gpu, VIVS_FE_COMMAND_ADDRESS, | ||
| 490 | gpu->buffer->paddr - gpu->memory_base); | ||
| 491 | gpu_write(gpu, VIVS_FE_COMMAND_CONTROL, | ||
| 492 | VIVS_FE_COMMAND_CONTROL_ENABLE | | ||
| 493 | VIVS_FE_COMMAND_CONTROL_PREFETCH(prefetch)); | ||
| 494 | } | ||
| 495 | |||
| 496 | int etnaviv_gpu_init(struct etnaviv_gpu *gpu) | ||
| 497 | { | ||
| 498 | int ret, i; | ||
| 499 | struct iommu_domain *iommu; | ||
| 500 | enum etnaviv_iommu_version version; | ||
| 501 | bool mmuv2; | ||
| 502 | |||
| 503 | ret = pm_runtime_get_sync(gpu->dev); | ||
| 504 | if (ret < 0) | ||
| 505 | return ret; | ||
| 506 | |||
| 507 | etnaviv_hw_identify(gpu); | ||
| 508 | |||
| 509 | if (gpu->identity.model == 0) { | ||
| 510 | dev_err(gpu->dev, "Unknown GPU model\n"); | ||
| 511 | pm_runtime_put_autosuspend(gpu->dev); | ||
| 512 | return -ENXIO; | ||
| 513 | } | ||
| 514 | |||
| 515 | ret = etnaviv_hw_reset(gpu); | ||
| 516 | if (ret) | ||
| 517 | goto fail; | ||
| 518 | |||
| 519 | /* Setup IOMMU.. eventually we will (I think) do this once per context | ||
| 520 | * and have separate page tables per context. For now, to keep things | ||
| 521 | * simple and to get something working, just use a single address space: | ||
| 522 | */ | ||
| 523 | mmuv2 = gpu->identity.minor_features1 & chipMinorFeatures1_MMU_VERSION; | ||
| 524 | dev_dbg(gpu->dev, "mmuv2: %d\n", mmuv2); | ||
| 525 | |||
| 526 | if (!mmuv2) { | ||
| 527 | iommu = etnaviv_iommu_domain_alloc(gpu); | ||
| 528 | version = ETNAVIV_IOMMU_V1; | ||
| 529 | } else { | ||
| 530 | iommu = etnaviv_iommu_v2_domain_alloc(gpu); | ||
| 531 | version = ETNAVIV_IOMMU_V2; | ||
| 532 | } | ||
| 533 | |||
| 534 | if (!iommu) { | ||
| 535 | ret = -ENOMEM; | ||
| 536 | goto fail; | ||
| 537 | } | ||
| 538 | |||
| 539 | /* TODO: we will leak here memory - fix it! */ | ||
| 540 | |||
| 541 | gpu->mmu = etnaviv_iommu_new(gpu, iommu, version); | ||
| 542 | if (!gpu->mmu) { | ||
| 543 | ret = -ENOMEM; | ||
| 544 | goto fail; | ||
| 545 | } | ||
| 546 | |||
| 547 | /* Create buffer: */ | ||
| 548 | gpu->buffer = etnaviv_gpu_cmdbuf_new(gpu, PAGE_SIZE, 0); | ||
| 549 | if (!gpu->buffer) { | ||
| 550 | ret = -ENOMEM; | ||
| 551 | dev_err(gpu->dev, "could not create command buffer\n"); | ||
| 552 | goto fail; | ||
| 553 | } | ||
| 554 | if (gpu->buffer->paddr - gpu->memory_base > 0x80000000) { | ||
| 555 | ret = -EINVAL; | ||
| 556 | dev_err(gpu->dev, | ||
| 557 | "command buffer outside valid memory window\n"); | ||
| 558 | goto free_buffer; | ||
| 559 | } | ||
| 560 | |||
| 561 | /* Setup event management */ | ||
| 562 | spin_lock_init(&gpu->event_spinlock); | ||
| 563 | init_completion(&gpu->event_free); | ||
| 564 | for (i = 0; i < ARRAY_SIZE(gpu->event); i++) { | ||
| 565 | gpu->event[i].used = false; | ||
| 566 | complete(&gpu->event_free); | ||
| 567 | } | ||
| 568 | |||
| 569 | /* Now program the hardware */ | ||
| 570 | mutex_lock(&gpu->lock); | ||
| 571 | etnaviv_gpu_hw_init(gpu); | ||
| 572 | mutex_unlock(&gpu->lock); | ||
| 573 | |||
| 574 | pm_runtime_mark_last_busy(gpu->dev); | ||
| 575 | pm_runtime_put_autosuspend(gpu->dev); | ||
| 576 | |||
| 577 | return 0; | ||
| 578 | |||
| 579 | free_buffer: | ||
| 580 | etnaviv_gpu_cmdbuf_free(gpu->buffer); | ||
| 581 | gpu->buffer = NULL; | ||
| 582 | fail: | ||
| 583 | pm_runtime_mark_last_busy(gpu->dev); | ||
| 584 | pm_runtime_put_autosuspend(gpu->dev); | ||
| 585 | |||
| 586 | return ret; | ||
| 587 | } | ||
| 588 | |||
| 589 | #ifdef CONFIG_DEBUG_FS | ||
| 590 | struct dma_debug { | ||
| 591 | u32 address[2]; | ||
| 592 | u32 state[2]; | ||
| 593 | }; | ||
| 594 | |||
| 595 | static void verify_dma(struct etnaviv_gpu *gpu, struct dma_debug *debug) | ||
| 596 | { | ||
| 597 | u32 i; | ||
| 598 | |||
| 599 | debug->address[0] = gpu_read(gpu, VIVS_FE_DMA_ADDRESS); | ||
| 600 | debug->state[0] = gpu_read(gpu, VIVS_FE_DMA_DEBUG_STATE); | ||
| 601 | |||
| 602 | for (i = 0; i < 500; i++) { | ||
| 603 | debug->address[1] = gpu_read(gpu, VIVS_FE_DMA_ADDRESS); | ||
| 604 | debug->state[1] = gpu_read(gpu, VIVS_FE_DMA_DEBUG_STATE); | ||
| 605 | |||
| 606 | if (debug->address[0] != debug->address[1]) | ||
| 607 | break; | ||
| 608 | |||
| 609 | if (debug->state[0] != debug->state[1]) | ||
| 610 | break; | ||
| 611 | } | ||
| 612 | } | ||
| 613 | |||
| 614 | int etnaviv_gpu_debugfs(struct etnaviv_gpu *gpu, struct seq_file *m) | ||
| 615 | { | ||
| 616 | struct dma_debug debug; | ||
| 617 | u32 dma_lo, dma_hi, axi, idle; | ||
| 618 | int ret; | ||
| 619 | |||
| 620 | seq_printf(m, "%s Status:\n", dev_name(gpu->dev)); | ||
| 621 | |||
| 622 | ret = pm_runtime_get_sync(gpu->dev); | ||
| 623 | if (ret < 0) | ||
| 624 | return ret; | ||
| 625 | |||
| 626 | dma_lo = gpu_read(gpu, VIVS_FE_DMA_LOW); | ||
| 627 | dma_hi = gpu_read(gpu, VIVS_FE_DMA_HIGH); | ||
| 628 | axi = gpu_read(gpu, VIVS_HI_AXI_STATUS); | ||
| 629 | idle = gpu_read(gpu, VIVS_HI_IDLE_STATE); | ||
| 630 | |||
| 631 | verify_dma(gpu, &debug); | ||
| 632 | |||
| 633 | seq_puts(m, "\tfeatures\n"); | ||
| 634 | seq_printf(m, "\t minor_features0: 0x%08x\n", | ||
| 635 | gpu->identity.minor_features0); | ||
| 636 | seq_printf(m, "\t minor_features1: 0x%08x\n", | ||
| 637 | gpu->identity.minor_features1); | ||
| 638 | seq_printf(m, "\t minor_features2: 0x%08x\n", | ||
| 639 | gpu->identity.minor_features2); | ||
| 640 | seq_printf(m, "\t minor_features3: 0x%08x\n", | ||
| 641 | gpu->identity.minor_features3); | ||
| 642 | |||
| 643 | seq_puts(m, "\tspecs\n"); | ||
| 644 | seq_printf(m, "\t stream_count: %d\n", | ||
| 645 | gpu->identity.stream_count); | ||
| 646 | seq_printf(m, "\t register_max: %d\n", | ||
| 647 | gpu->identity.register_max); | ||
| 648 | seq_printf(m, "\t thread_count: %d\n", | ||
| 649 | gpu->identity.thread_count); | ||
| 650 | seq_printf(m, "\t vertex_cache_size: %d\n", | ||
| 651 | gpu->identity.vertex_cache_size); | ||
| 652 | seq_printf(m, "\t shader_core_count: %d\n", | ||
| 653 | gpu->identity.shader_core_count); | ||
| 654 | seq_printf(m, "\t pixel_pipes: %d\n", | ||
| 655 | gpu->identity.pixel_pipes); | ||
| 656 | seq_printf(m, "\t vertex_output_buffer_size: %d\n", | ||
| 657 | gpu->identity.vertex_output_buffer_size); | ||
| 658 | seq_printf(m, "\t buffer_size: %d\n", | ||
| 659 | gpu->identity.buffer_size); | ||
| 660 | seq_printf(m, "\t instruction_count: %d\n", | ||
| 661 | gpu->identity.instruction_count); | ||
| 662 | seq_printf(m, "\t num_constants: %d\n", | ||
| 663 | gpu->identity.num_constants); | ||
| 664 | |||
| 665 | seq_printf(m, "\taxi: 0x%08x\n", axi); | ||
| 666 | seq_printf(m, "\tidle: 0x%08x\n", idle); | ||
| 667 | idle |= ~gpu->idle_mask & ~VIVS_HI_IDLE_STATE_AXI_LP; | ||
| 668 | if ((idle & VIVS_HI_IDLE_STATE_FE) == 0) | ||
| 669 | seq_puts(m, "\t FE is not idle\n"); | ||
| 670 | if ((idle & VIVS_HI_IDLE_STATE_DE) == 0) | ||
| 671 | seq_puts(m, "\t DE is not idle\n"); | ||
| 672 | if ((idle & VIVS_HI_IDLE_STATE_PE) == 0) | ||
| 673 | seq_puts(m, "\t PE is not idle\n"); | ||
| 674 | if ((idle & VIVS_HI_IDLE_STATE_SH) == 0) | ||
| 675 | seq_puts(m, "\t SH is not idle\n"); | ||
| 676 | if ((idle & VIVS_HI_IDLE_STATE_PA) == 0) | ||
| 677 | seq_puts(m, "\t PA is not idle\n"); | ||
| 678 | if ((idle & VIVS_HI_IDLE_STATE_SE) == 0) | ||
| 679 | seq_puts(m, "\t SE is not idle\n"); | ||
| 680 | if ((idle & VIVS_HI_IDLE_STATE_RA) == 0) | ||
| 681 | seq_puts(m, "\t RA is not idle\n"); | ||
| 682 | if ((idle & VIVS_HI_IDLE_STATE_TX) == 0) | ||
| 683 | seq_puts(m, "\t TX is not idle\n"); | ||
| 684 | if ((idle & VIVS_HI_IDLE_STATE_VG) == 0) | ||
| 685 | seq_puts(m, "\t VG is not idle\n"); | ||
| 686 | if ((idle & VIVS_HI_IDLE_STATE_IM) == 0) | ||
| 687 | seq_puts(m, "\t IM is not idle\n"); | ||
| 688 | if ((idle & VIVS_HI_IDLE_STATE_FP) == 0) | ||
| 689 | seq_puts(m, "\t FP is not idle\n"); | ||
| 690 | if ((idle & VIVS_HI_IDLE_STATE_TS) == 0) | ||
| 691 | seq_puts(m, "\t TS is not idle\n"); | ||
| 692 | if (idle & VIVS_HI_IDLE_STATE_AXI_LP) | ||
| 693 | seq_puts(m, "\t AXI low power mode\n"); | ||
| 694 | |||
| 695 | if (gpu->identity.features & chipFeatures_DEBUG_MODE) { | ||
| 696 | u32 read0 = gpu_read(gpu, VIVS_MC_DEBUG_READ0); | ||
| 697 | u32 read1 = gpu_read(gpu, VIVS_MC_DEBUG_READ1); | ||
| 698 | u32 write = gpu_read(gpu, VIVS_MC_DEBUG_WRITE); | ||
| 699 | |||
| 700 | seq_puts(m, "\tMC\n"); | ||
| 701 | seq_printf(m, "\t read0: 0x%08x\n", read0); | ||
| 702 | seq_printf(m, "\t read1: 0x%08x\n", read1); | ||
| 703 | seq_printf(m, "\t write: 0x%08x\n", write); | ||
| 704 | } | ||
| 705 | |||
| 706 | seq_puts(m, "\tDMA "); | ||
| 707 | |||
| 708 | if (debug.address[0] == debug.address[1] && | ||
| 709 | debug.state[0] == debug.state[1]) { | ||
| 710 | seq_puts(m, "seems to be stuck\n"); | ||
| 711 | } else if (debug.address[0] == debug.address[1]) { | ||
| 712 | seq_puts(m, "adress is constant\n"); | ||
| 713 | } else { | ||
| 714 | seq_puts(m, "is runing\n"); | ||
| 715 | } | ||
| 716 | |||
| 717 | seq_printf(m, "\t address 0: 0x%08x\n", debug.address[0]); | ||
| 718 | seq_printf(m, "\t address 1: 0x%08x\n", debug.address[1]); | ||
| 719 | seq_printf(m, "\t state 0: 0x%08x\n", debug.state[0]); | ||
| 720 | seq_printf(m, "\t state 1: 0x%08x\n", debug.state[1]); | ||
| 721 | seq_printf(m, "\t last fetch 64 bit word: 0x%08x 0x%08x\n", | ||
| 722 | dma_lo, dma_hi); | ||
| 723 | |||
| 724 | ret = 0; | ||
| 725 | |||
| 726 | pm_runtime_mark_last_busy(gpu->dev); | ||
| 727 | pm_runtime_put_autosuspend(gpu->dev); | ||
| 728 | |||
| 729 | return ret; | ||
| 730 | } | ||
| 731 | #endif | ||
| 732 | |||
| 733 | /* | ||
| 734 | * Power Management: | ||
| 735 | */ | ||
| 736 | static int enable_clk(struct etnaviv_gpu *gpu) | ||
| 737 | { | ||
| 738 | if (gpu->clk_core) | ||
| 739 | clk_prepare_enable(gpu->clk_core); | ||
| 740 | if (gpu->clk_shader) | ||
| 741 | clk_prepare_enable(gpu->clk_shader); | ||
| 742 | |||
| 743 | return 0; | ||
| 744 | } | ||
| 745 | |||
| 746 | static int disable_clk(struct etnaviv_gpu *gpu) | ||
| 747 | { | ||
| 748 | if (gpu->clk_core) | ||
| 749 | clk_disable_unprepare(gpu->clk_core); | ||
| 750 | if (gpu->clk_shader) | ||
| 751 | clk_disable_unprepare(gpu->clk_shader); | ||
| 752 | |||
| 753 | return 0; | ||
| 754 | } | ||
| 755 | |||
| 756 | static int enable_axi(struct etnaviv_gpu *gpu) | ||
| 757 | { | ||
| 758 | if (gpu->clk_bus) | ||
| 759 | clk_prepare_enable(gpu->clk_bus); | ||
| 760 | |||
| 761 | return 0; | ||
| 762 | } | ||
| 763 | |||
| 764 | static int disable_axi(struct etnaviv_gpu *gpu) | ||
| 765 | { | ||
| 766 | if (gpu->clk_bus) | ||
| 767 | clk_disable_unprepare(gpu->clk_bus); | ||
| 768 | |||
| 769 | return 0; | ||
| 770 | } | ||
| 771 | |||
| 772 | /* | ||
| 773 | * Hangcheck detection for locked gpu: | ||
| 774 | */ | ||
| 775 | static void recover_worker(struct work_struct *work) | ||
| 776 | { | ||
| 777 | struct etnaviv_gpu *gpu = container_of(work, struct etnaviv_gpu, | ||
| 778 | recover_work); | ||
| 779 | unsigned long flags; | ||
| 780 | unsigned int i; | ||
| 781 | |||
| 782 | dev_err(gpu->dev, "hangcheck recover!\n"); | ||
| 783 | |||
| 784 | if (pm_runtime_get_sync(gpu->dev) < 0) | ||
| 785 | return; | ||
| 786 | |||
| 787 | mutex_lock(&gpu->lock); | ||
| 788 | |||
| 789 | /* Only catch the first event, or when manually re-armed */ | ||
| 790 | if (etnaviv_dump_core) { | ||
| 791 | etnaviv_core_dump(gpu); | ||
| 792 | etnaviv_dump_core = false; | ||
| 793 | } | ||
| 794 | |||
| 795 | etnaviv_hw_reset(gpu); | ||
| 796 | |||
| 797 | /* complete all events, the GPU won't do it after the reset */ | ||
| 798 | spin_lock_irqsave(&gpu->event_spinlock, flags); | ||
| 799 | for (i = 0; i < ARRAY_SIZE(gpu->event); i++) { | ||
| 800 | if (!gpu->event[i].used) | ||
| 801 | continue; | ||
| 802 | fence_signal(gpu->event[i].fence); | ||
| 803 | gpu->event[i].fence = NULL; | ||
| 804 | gpu->event[i].used = false; | ||
| 805 | complete(&gpu->event_free); | ||
| 806 | /* | ||
| 807 | * Decrement the PM count for each stuck event. This is safe | ||
| 808 | * even in atomic context as we use ASYNC RPM here. | ||
| 809 | */ | ||
| 810 | pm_runtime_put_autosuspend(gpu->dev); | ||
| 811 | } | ||
| 812 | spin_unlock_irqrestore(&gpu->event_spinlock, flags); | ||
| 813 | gpu->completed_fence = gpu->active_fence; | ||
| 814 | |||
| 815 | etnaviv_gpu_hw_init(gpu); | ||
| 816 | gpu->switch_context = true; | ||
| 817 | |||
| 818 | mutex_unlock(&gpu->lock); | ||
| 819 | pm_runtime_mark_last_busy(gpu->dev); | ||
| 820 | pm_runtime_put_autosuspend(gpu->dev); | ||
| 821 | |||
| 822 | /* Retire the buffer objects in a work */ | ||
| 823 | etnaviv_queue_work(gpu->drm, &gpu->retire_work); | ||
| 824 | } | ||
| 825 | |||
| 826 | static void hangcheck_timer_reset(struct etnaviv_gpu *gpu) | ||
| 827 | { | ||
| 828 | DBG("%s", dev_name(gpu->dev)); | ||
| 829 | mod_timer(&gpu->hangcheck_timer, | ||
| 830 | round_jiffies_up(jiffies + DRM_ETNAVIV_HANGCHECK_JIFFIES)); | ||
| 831 | } | ||
| 832 | |||
| 833 | static void hangcheck_handler(unsigned long data) | ||
| 834 | { | ||
| 835 | struct etnaviv_gpu *gpu = (struct etnaviv_gpu *)data; | ||
| 836 | u32 fence = gpu->completed_fence; | ||
| 837 | bool progress = false; | ||
| 838 | |||
| 839 | if (fence != gpu->hangcheck_fence) { | ||
| 840 | gpu->hangcheck_fence = fence; | ||
| 841 | progress = true; | ||
| 842 | } | ||
| 843 | |||
| 844 | if (!progress) { | ||
| 845 | u32 dma_addr = gpu_read(gpu, VIVS_FE_DMA_ADDRESS); | ||
| 846 | int change = dma_addr - gpu->hangcheck_dma_addr; | ||
| 847 | |||
| 848 | if (change < 0 || change > 16) { | ||
| 849 | gpu->hangcheck_dma_addr = dma_addr; | ||
| 850 | progress = true; | ||
| 851 | } | ||
| 852 | } | ||
| 853 | |||
| 854 | if (!progress && fence_after(gpu->active_fence, fence)) { | ||
| 855 | dev_err(gpu->dev, "hangcheck detected gpu lockup!\n"); | ||
| 856 | dev_err(gpu->dev, " completed fence: %u\n", fence); | ||
| 857 | dev_err(gpu->dev, " active fence: %u\n", | ||
| 858 | gpu->active_fence); | ||
| 859 | etnaviv_queue_work(gpu->drm, &gpu->recover_work); | ||
| 860 | } | ||
| 861 | |||
| 862 | /* if still more pending work, reset the hangcheck timer: */ | ||
| 863 | if (fence_after(gpu->active_fence, gpu->hangcheck_fence)) | ||
| 864 | hangcheck_timer_reset(gpu); | ||
| 865 | } | ||
| 866 | |||
| 867 | static void hangcheck_disable(struct etnaviv_gpu *gpu) | ||
| 868 | { | ||
| 869 | del_timer_sync(&gpu->hangcheck_timer); | ||
| 870 | cancel_work_sync(&gpu->recover_work); | ||
| 871 | } | ||
| 872 | |||
| 873 | /* fence object management */ | ||
| 874 | struct etnaviv_fence { | ||
| 875 | struct etnaviv_gpu *gpu; | ||
| 876 | struct fence base; | ||
| 877 | }; | ||
| 878 | |||
| 879 | static inline struct etnaviv_fence *to_etnaviv_fence(struct fence *fence) | ||
| 880 | { | ||
| 881 | return container_of(fence, struct etnaviv_fence, base); | ||
| 882 | } | ||
| 883 | |||
| 884 | static const char *etnaviv_fence_get_driver_name(struct fence *fence) | ||
| 885 | { | ||
| 886 | return "etnaviv"; | ||
| 887 | } | ||
| 888 | |||
| 889 | static const char *etnaviv_fence_get_timeline_name(struct fence *fence) | ||
| 890 | { | ||
| 891 | struct etnaviv_fence *f = to_etnaviv_fence(fence); | ||
| 892 | |||
| 893 | return dev_name(f->gpu->dev); | ||
| 894 | } | ||
| 895 | |||
| 896 | static bool etnaviv_fence_enable_signaling(struct fence *fence) | ||
| 897 | { | ||
| 898 | return true; | ||
| 899 | } | ||
| 900 | |||
| 901 | static bool etnaviv_fence_signaled(struct fence *fence) | ||
| 902 | { | ||
| 903 | struct etnaviv_fence *f = to_etnaviv_fence(fence); | ||
| 904 | |||
| 905 | return fence_completed(f->gpu, f->base.seqno); | ||
| 906 | } | ||
| 907 | |||
| 908 | static void etnaviv_fence_release(struct fence *fence) | ||
| 909 | { | ||
| 910 | struct etnaviv_fence *f = to_etnaviv_fence(fence); | ||
| 911 | |||
| 912 | kfree_rcu(f, base.rcu); | ||
| 913 | } | ||
| 914 | |||
| 915 | static const struct fence_ops etnaviv_fence_ops = { | ||
| 916 | .get_driver_name = etnaviv_fence_get_driver_name, | ||
| 917 | .get_timeline_name = etnaviv_fence_get_timeline_name, | ||
| 918 | .enable_signaling = etnaviv_fence_enable_signaling, | ||
| 919 | .signaled = etnaviv_fence_signaled, | ||
| 920 | .wait = fence_default_wait, | ||
| 921 | .release = etnaviv_fence_release, | ||
| 922 | }; | ||
| 923 | |||
| 924 | static struct fence *etnaviv_gpu_fence_alloc(struct etnaviv_gpu *gpu) | ||
| 925 | { | ||
| 926 | struct etnaviv_fence *f; | ||
| 927 | |||
| 928 | f = kzalloc(sizeof(*f), GFP_KERNEL); | ||
| 929 | if (!f) | ||
| 930 | return NULL; | ||
| 931 | |||
| 932 | f->gpu = gpu; | ||
| 933 | |||
| 934 | fence_init(&f->base, &etnaviv_fence_ops, &gpu->fence_spinlock, | ||
| 935 | gpu->fence_context, ++gpu->next_fence); | ||
| 936 | |||
| 937 | return &f->base; | ||
| 938 | } | ||
| 939 | |||
| 940 | int etnaviv_gpu_fence_sync_obj(struct etnaviv_gem_object *etnaviv_obj, | ||
| 941 | unsigned int context, bool exclusive) | ||
| 942 | { | ||
| 943 | struct reservation_object *robj = etnaviv_obj->resv; | ||
| 944 | struct reservation_object_list *fobj; | ||
| 945 | struct fence *fence; | ||
| 946 | int i, ret; | ||
| 947 | |||
| 948 | if (!exclusive) { | ||
| 949 | ret = reservation_object_reserve_shared(robj); | ||
| 950 | if (ret) | ||
| 951 | return ret; | ||
| 952 | } | ||
| 953 | |||
| 954 | /* | ||
| 955 | * If we have any shared fences, then the exclusive fence | ||
| 956 | * should be ignored as it will already have been signalled. | ||
| 957 | */ | ||
| 958 | fobj = reservation_object_get_list(robj); | ||
| 959 | if (!fobj || fobj->shared_count == 0) { | ||
| 960 | /* Wait on any existing exclusive fence which isn't our own */ | ||
| 961 | fence = reservation_object_get_excl(robj); | ||
| 962 | if (fence && fence->context != context) { | ||
| 963 | ret = fence_wait(fence, true); | ||
| 964 | if (ret) | ||
| 965 | return ret; | ||
| 966 | } | ||
| 967 | } | ||
| 968 | |||
| 969 | if (!exclusive || !fobj) | ||
| 970 | return 0; | ||
| 971 | |||
| 972 | for (i = 0; i < fobj->shared_count; i++) { | ||
| 973 | fence = rcu_dereference_protected(fobj->shared[i], | ||
| 974 | reservation_object_held(robj)); | ||
| 975 | if (fence->context != context) { | ||
| 976 | ret = fence_wait(fence, true); | ||
| 977 | if (ret) | ||
| 978 | return ret; | ||
| 979 | } | ||
| 980 | } | ||
| 981 | |||
| 982 | return 0; | ||
| 983 | } | ||
| 984 | |||
| 985 | /* | ||
| 986 | * event management: | ||
| 987 | */ | ||
| 988 | |||
| 989 | static unsigned int event_alloc(struct etnaviv_gpu *gpu) | ||
| 990 | { | ||
| 991 | unsigned long ret, flags; | ||
| 992 | unsigned int i, event = ~0U; | ||
| 993 | |||
| 994 | ret = wait_for_completion_timeout(&gpu->event_free, | ||
| 995 | msecs_to_jiffies(10 * 10000)); | ||
| 996 | if (!ret) | ||
| 997 | dev_err(gpu->dev, "wait_for_completion_timeout failed"); | ||
| 998 | |||
| 999 | spin_lock_irqsave(&gpu->event_spinlock, flags); | ||
| 1000 | |||
| 1001 | /* find first free event */ | ||
| 1002 | for (i = 0; i < ARRAY_SIZE(gpu->event); i++) { | ||
| 1003 | if (gpu->event[i].used == false) { | ||
| 1004 | gpu->event[i].used = true; | ||
| 1005 | event = i; | ||
| 1006 | break; | ||
| 1007 | } | ||
| 1008 | } | ||
| 1009 | |||
| 1010 | spin_unlock_irqrestore(&gpu->event_spinlock, flags); | ||
| 1011 | |||
| 1012 | return event; | ||
| 1013 | } | ||
| 1014 | |||
| 1015 | static void event_free(struct etnaviv_gpu *gpu, unsigned int event) | ||
| 1016 | { | ||
| 1017 | unsigned long flags; | ||
| 1018 | |||
| 1019 | spin_lock_irqsave(&gpu->event_spinlock, flags); | ||
| 1020 | |||
| 1021 | if (gpu->event[event].used == false) { | ||
| 1022 | dev_warn(gpu->dev, "event %u is already marked as free", | ||
| 1023 | event); | ||
| 1024 | spin_unlock_irqrestore(&gpu->event_spinlock, flags); | ||
| 1025 | } else { | ||
| 1026 | gpu->event[event].used = false; | ||
| 1027 | spin_unlock_irqrestore(&gpu->event_spinlock, flags); | ||
| 1028 | |||
| 1029 | complete(&gpu->event_free); | ||
| 1030 | } | ||
| 1031 | } | ||
| 1032 | |||
| 1033 | /* | ||
| 1034 | * Cmdstream submission/retirement: | ||
| 1035 | */ | ||
| 1036 | |||
| 1037 | struct etnaviv_cmdbuf *etnaviv_gpu_cmdbuf_new(struct etnaviv_gpu *gpu, u32 size, | ||
| 1038 | size_t nr_bos) | ||
| 1039 | { | ||
| 1040 | struct etnaviv_cmdbuf *cmdbuf; | ||
| 1041 | size_t sz = size_vstruct(nr_bos, sizeof(cmdbuf->bo[0]), | ||
| 1042 | sizeof(*cmdbuf)); | ||
| 1043 | |||
| 1044 | cmdbuf = kzalloc(sz, GFP_KERNEL); | ||
| 1045 | if (!cmdbuf) | ||
| 1046 | return NULL; | ||
| 1047 | |||
| 1048 | cmdbuf->vaddr = dma_alloc_writecombine(gpu->dev, size, &cmdbuf->paddr, | ||
| 1049 | GFP_KERNEL); | ||
| 1050 | if (!cmdbuf->vaddr) { | ||
| 1051 | kfree(cmdbuf); | ||
| 1052 | return NULL; | ||
| 1053 | } | ||
| 1054 | |||
| 1055 | cmdbuf->gpu = gpu; | ||
| 1056 | cmdbuf->size = size; | ||
| 1057 | |||
| 1058 | return cmdbuf; | ||
| 1059 | } | ||
| 1060 | |||
| 1061 | void etnaviv_gpu_cmdbuf_free(struct etnaviv_cmdbuf *cmdbuf) | ||
| 1062 | { | ||
| 1063 | dma_free_writecombine(cmdbuf->gpu->dev, cmdbuf->size, | ||
| 1064 | cmdbuf->vaddr, cmdbuf->paddr); | ||
| 1065 | kfree(cmdbuf); | ||
| 1066 | } | ||
| 1067 | |||
| 1068 | static void retire_worker(struct work_struct *work) | ||
| 1069 | { | ||
| 1070 | struct etnaviv_gpu *gpu = container_of(work, struct etnaviv_gpu, | ||
| 1071 | retire_work); | ||
| 1072 | u32 fence = gpu->completed_fence; | ||
| 1073 | struct etnaviv_cmdbuf *cmdbuf, *tmp; | ||
| 1074 | unsigned int i; | ||
| 1075 | |||
| 1076 | mutex_lock(&gpu->lock); | ||
| 1077 | list_for_each_entry_safe(cmdbuf, tmp, &gpu->active_cmd_list, node) { | ||
| 1078 | if (!fence_is_signaled(cmdbuf->fence)) | ||
| 1079 | break; | ||
| 1080 | |||
| 1081 | list_del(&cmdbuf->node); | ||
| 1082 | fence_put(cmdbuf->fence); | ||
| 1083 | |||
| 1084 | for (i = 0; i < cmdbuf->nr_bos; i++) { | ||
| 1085 | struct etnaviv_gem_object *etnaviv_obj = cmdbuf->bo[i]; | ||
| 1086 | |||
| 1087 | atomic_dec(&etnaviv_obj->gpu_active); | ||
| 1088 | /* drop the refcount taken in etnaviv_gpu_submit */ | ||
| 1089 | etnaviv_gem_put_iova(gpu, &etnaviv_obj->base); | ||
| 1090 | } | ||
| 1091 | |||
| 1092 | etnaviv_gpu_cmdbuf_free(cmdbuf); | ||
| 1093 | } | ||
| 1094 | |||
| 1095 | gpu->retired_fence = fence; | ||
| 1096 | |||
| 1097 | mutex_unlock(&gpu->lock); | ||
| 1098 | |||
| 1099 | wake_up_all(&gpu->fence_event); | ||
| 1100 | } | ||
| 1101 | |||
| 1102 | int etnaviv_gpu_wait_fence_interruptible(struct etnaviv_gpu *gpu, | ||
| 1103 | u32 fence, struct timespec *timeout) | ||
| 1104 | { | ||
| 1105 | int ret; | ||
| 1106 | |||
| 1107 | if (fence_after(fence, gpu->next_fence)) { | ||
| 1108 | DRM_ERROR("waiting on invalid fence: %u (of %u)\n", | ||
| 1109 | fence, gpu->next_fence); | ||
| 1110 | return -EINVAL; | ||
| 1111 | } | ||
| 1112 | |||
| 1113 | if (!timeout) { | ||
| 1114 | /* No timeout was requested: just test for completion */ | ||
| 1115 | ret = fence_completed(gpu, fence) ? 0 : -EBUSY; | ||
| 1116 | } else { | ||
| 1117 | unsigned long remaining = etnaviv_timeout_to_jiffies(timeout); | ||
| 1118 | |||
| 1119 | ret = wait_event_interruptible_timeout(gpu->fence_event, | ||
| 1120 | fence_completed(gpu, fence), | ||
| 1121 | remaining); | ||
| 1122 | if (ret == 0) { | ||
| 1123 | DBG("timeout waiting for fence: %u (retired: %u completed: %u)", | ||
| 1124 | fence, gpu->retired_fence, | ||
| 1125 | gpu->completed_fence); | ||
| 1126 | ret = -ETIMEDOUT; | ||
| 1127 | } else if (ret != -ERESTARTSYS) { | ||
| 1128 | ret = 0; | ||
| 1129 | } | ||
| 1130 | } | ||
| 1131 | |||
| 1132 | return ret; | ||
| 1133 | } | ||
| 1134 | |||
| 1135 | /* | ||
| 1136 | * Wait for an object to become inactive. This, on it's own, is not race | ||
| 1137 | * free: the object is moved by the retire worker off the active list, and | ||
| 1138 | * then the iova is put. Moreover, the object could be re-submitted just | ||
| 1139 | * after we notice that it's become inactive. | ||
| 1140 | * | ||
| 1141 | * Although the retirement happens under the gpu lock, we don't want to hold | ||
| 1142 | * that lock in this function while waiting. | ||
| 1143 | */ | ||
| 1144 | int etnaviv_gpu_wait_obj_inactive(struct etnaviv_gpu *gpu, | ||
| 1145 | struct etnaviv_gem_object *etnaviv_obj, struct timespec *timeout) | ||
| 1146 | { | ||
| 1147 | unsigned long remaining; | ||
| 1148 | long ret; | ||
| 1149 | |||
| 1150 | if (!timeout) | ||
| 1151 | return !is_active(etnaviv_obj) ? 0 : -EBUSY; | ||
| 1152 | |||
| 1153 | remaining = etnaviv_timeout_to_jiffies(timeout); | ||
| 1154 | |||
| 1155 | ret = wait_event_interruptible_timeout(gpu->fence_event, | ||
| 1156 | !is_active(etnaviv_obj), | ||
| 1157 | remaining); | ||
| 1158 | if (ret > 0) { | ||
| 1159 | struct etnaviv_drm_private *priv = gpu->drm->dev_private; | ||
| 1160 | |||
| 1161 | /* Synchronise with the retire worker */ | ||
| 1162 | flush_workqueue(priv->wq); | ||
| 1163 | return 0; | ||
| 1164 | } else if (ret == -ERESTARTSYS) { | ||
| 1165 | return -ERESTARTSYS; | ||
| 1166 | } else { | ||
| 1167 | return -ETIMEDOUT; | ||
| 1168 | } | ||
| 1169 | } | ||
| 1170 | |||
| 1171 | int etnaviv_gpu_pm_get_sync(struct etnaviv_gpu *gpu) | ||
| 1172 | { | ||
| 1173 | return pm_runtime_get_sync(gpu->dev); | ||
| 1174 | } | ||
| 1175 | |||
| 1176 | void etnaviv_gpu_pm_put(struct etnaviv_gpu *gpu) | ||
| 1177 | { | ||
| 1178 | pm_runtime_mark_last_busy(gpu->dev); | ||
| 1179 | pm_runtime_put_autosuspend(gpu->dev); | ||
| 1180 | } | ||
| 1181 | |||
| 1182 | /* add bo's to gpu's ring, and kick gpu: */ | ||
| 1183 | int etnaviv_gpu_submit(struct etnaviv_gpu *gpu, | ||
| 1184 | struct etnaviv_gem_submit *submit, struct etnaviv_cmdbuf *cmdbuf) | ||
| 1185 | { | ||
| 1186 | struct fence *fence; | ||
| 1187 | unsigned int event, i; | ||
| 1188 | int ret; | ||
| 1189 | |||
| 1190 | ret = etnaviv_gpu_pm_get_sync(gpu); | ||
| 1191 | if (ret < 0) | ||
| 1192 | return ret; | ||
| 1193 | |||
| 1194 | mutex_lock(&gpu->lock); | ||
| 1195 | |||
| 1196 | /* | ||
| 1197 | * TODO | ||
| 1198 | * | ||
| 1199 | * - flush | ||
| 1200 | * - data endian | ||
| 1201 | * - prefetch | ||
| 1202 | * | ||
| 1203 | */ | ||
| 1204 | |||
| 1205 | event = event_alloc(gpu); | ||
| 1206 | if (unlikely(event == ~0U)) { | ||
| 1207 | DRM_ERROR("no free event\n"); | ||
| 1208 | ret = -EBUSY; | ||
| 1209 | goto out_unlock; | ||
| 1210 | } | ||
| 1211 | |||
| 1212 | fence = etnaviv_gpu_fence_alloc(gpu); | ||
| 1213 | if (!fence) { | ||
| 1214 | event_free(gpu, event); | ||
| 1215 | ret = -ENOMEM; | ||
| 1216 | goto out_unlock; | ||
| 1217 | } | ||
| 1218 | |||
| 1219 | gpu->event[event].fence = fence; | ||
| 1220 | submit->fence = fence->seqno; | ||
| 1221 | gpu->active_fence = submit->fence; | ||
| 1222 | |||
| 1223 | if (gpu->lastctx != cmdbuf->ctx) { | ||
| 1224 | gpu->mmu->need_flush = true; | ||
| 1225 | gpu->switch_context = true; | ||
| 1226 | gpu->lastctx = cmdbuf->ctx; | ||
| 1227 | } | ||
| 1228 | |||
| 1229 | etnaviv_buffer_queue(gpu, event, cmdbuf); | ||
| 1230 | |||
| 1231 | cmdbuf->fence = fence; | ||
| 1232 | list_add_tail(&cmdbuf->node, &gpu->active_cmd_list); | ||
| 1233 | |||
| 1234 | /* We're committed to adding this command buffer, hold a PM reference */ | ||
| 1235 | pm_runtime_get_noresume(gpu->dev); | ||
| 1236 | |||
| 1237 | for (i = 0; i < submit->nr_bos; i++) { | ||
| 1238 | struct etnaviv_gem_object *etnaviv_obj = submit->bos[i].obj; | ||
| 1239 | u32 iova; | ||
| 1240 | |||
| 1241 | /* Each cmdbuf takes a refcount on the iova */ | ||
| 1242 | etnaviv_gem_get_iova(gpu, &etnaviv_obj->base, &iova); | ||
| 1243 | cmdbuf->bo[i] = etnaviv_obj; | ||
| 1244 | atomic_inc(&etnaviv_obj->gpu_active); | ||
| 1245 | |||
| 1246 | if (submit->bos[i].flags & ETNA_SUBMIT_BO_WRITE) | ||
| 1247 | reservation_object_add_excl_fence(etnaviv_obj->resv, | ||
| 1248 | fence); | ||
| 1249 | else | ||
| 1250 | reservation_object_add_shared_fence(etnaviv_obj->resv, | ||
| 1251 | fence); | ||
| 1252 | } | ||
| 1253 | cmdbuf->nr_bos = submit->nr_bos; | ||
| 1254 | hangcheck_timer_reset(gpu); | ||
| 1255 | ret = 0; | ||
| 1256 | |||
| 1257 | out_unlock: | ||
| 1258 | mutex_unlock(&gpu->lock); | ||
| 1259 | |||
| 1260 | etnaviv_gpu_pm_put(gpu); | ||
| 1261 | |||
| 1262 | return ret; | ||
| 1263 | } | ||
| 1264 | |||
| 1265 | /* | ||
| 1266 | * Init/Cleanup: | ||
| 1267 | */ | ||
| 1268 | static irqreturn_t irq_handler(int irq, void *data) | ||
| 1269 | { | ||
| 1270 | struct etnaviv_gpu *gpu = data; | ||
| 1271 | irqreturn_t ret = IRQ_NONE; | ||
| 1272 | |||
| 1273 | u32 intr = gpu_read(gpu, VIVS_HI_INTR_ACKNOWLEDGE); | ||
| 1274 | |||
| 1275 | if (intr != 0) { | ||
| 1276 | int event; | ||
| 1277 | |||
| 1278 | pm_runtime_mark_last_busy(gpu->dev); | ||
| 1279 | |||
| 1280 | dev_dbg(gpu->dev, "intr 0x%08x\n", intr); | ||
| 1281 | |||
| 1282 | if (intr & VIVS_HI_INTR_ACKNOWLEDGE_AXI_BUS_ERROR) { | ||
| 1283 | dev_err(gpu->dev, "AXI bus error\n"); | ||
| 1284 | intr &= ~VIVS_HI_INTR_ACKNOWLEDGE_AXI_BUS_ERROR; | ||
| 1285 | } | ||
| 1286 | |||
| 1287 | while ((event = ffs(intr)) != 0) { | ||
| 1288 | struct fence *fence; | ||
| 1289 | |||
| 1290 | event -= 1; | ||
| 1291 | |||
| 1292 | intr &= ~(1 << event); | ||
| 1293 | |||
| 1294 | dev_dbg(gpu->dev, "event %u\n", event); | ||
| 1295 | |||
| 1296 | fence = gpu->event[event].fence; | ||
| 1297 | gpu->event[event].fence = NULL; | ||
| 1298 | fence_signal(fence); | ||
| 1299 | |||
| 1300 | /* | ||
| 1301 | * Events can be processed out of order. Eg, | ||
| 1302 | * - allocate and queue event 0 | ||
| 1303 | * - allocate event 1 | ||
| 1304 | * - event 0 completes, we process it | ||
| 1305 | * - allocate and queue event 0 | ||
| 1306 | * - event 1 and event 0 complete | ||
| 1307 | * we can end up processing event 0 first, then 1. | ||
| 1308 | */ | ||
| 1309 | if (fence_after(fence->seqno, gpu->completed_fence)) | ||
| 1310 | gpu->completed_fence = fence->seqno; | ||
| 1311 | |||
| 1312 | event_free(gpu, event); | ||
| 1313 | |||
| 1314 | /* | ||
| 1315 | * We need to balance the runtime PM count caused by | ||
| 1316 | * each submission. Upon submission, we increment | ||
| 1317 | * the runtime PM counter, and allocate one event. | ||
| 1318 | * So here, we put the runtime PM count for each | ||
| 1319 | * completed event. | ||
| 1320 | */ | ||
| 1321 | pm_runtime_put_autosuspend(gpu->dev); | ||
| 1322 | } | ||
| 1323 | |||
| 1324 | /* Retire the buffer objects in a work */ | ||
| 1325 | etnaviv_queue_work(gpu->drm, &gpu->retire_work); | ||
| 1326 | |||
| 1327 | ret = IRQ_HANDLED; | ||
| 1328 | } | ||
| 1329 | |||
| 1330 | return ret; | ||
| 1331 | } | ||
| 1332 | |||
| 1333 | static int etnaviv_gpu_clk_enable(struct etnaviv_gpu *gpu) | ||
| 1334 | { | ||
| 1335 | int ret; | ||
| 1336 | |||
| 1337 | ret = enable_clk(gpu); | ||
| 1338 | if (ret) | ||
| 1339 | return ret; | ||
| 1340 | |||
| 1341 | ret = enable_axi(gpu); | ||
| 1342 | if (ret) { | ||
| 1343 | disable_clk(gpu); | ||
| 1344 | return ret; | ||
| 1345 | } | ||
| 1346 | |||
| 1347 | return 0; | ||
| 1348 | } | ||
| 1349 | |||
| 1350 | static int etnaviv_gpu_clk_disable(struct etnaviv_gpu *gpu) | ||
| 1351 | { | ||
| 1352 | int ret; | ||
| 1353 | |||
| 1354 | ret = disable_axi(gpu); | ||
| 1355 | if (ret) | ||
| 1356 | return ret; | ||
| 1357 | |||
| 1358 | ret = disable_clk(gpu); | ||
| 1359 | if (ret) | ||
| 1360 | return ret; | ||
| 1361 | |||
| 1362 | return 0; | ||
| 1363 | } | ||
| 1364 | |||
| 1365 | static int etnaviv_gpu_hw_suspend(struct etnaviv_gpu *gpu) | ||
| 1366 | { | ||
| 1367 | if (gpu->buffer) { | ||
| 1368 | unsigned long timeout; | ||
| 1369 | |||
| 1370 | /* Replace the last WAIT with END */ | ||
| 1371 | etnaviv_buffer_end(gpu); | ||
| 1372 | |||
| 1373 | /* | ||
| 1374 | * We know that only the FE is busy here, this should | ||
| 1375 | * happen quickly (as the WAIT is only 200 cycles). If | ||
| 1376 | * we fail, just warn and continue. | ||
| 1377 | */ | ||
| 1378 | timeout = jiffies + msecs_to_jiffies(100); | ||
| 1379 | do { | ||
| 1380 | u32 idle = gpu_read(gpu, VIVS_HI_IDLE_STATE); | ||
| 1381 | |||
| 1382 | if ((idle & gpu->idle_mask) == gpu->idle_mask) | ||
| 1383 | break; | ||
| 1384 | |||
| 1385 | if (time_is_before_jiffies(timeout)) { | ||
| 1386 | dev_warn(gpu->dev, | ||
| 1387 | "timed out waiting for idle: idle=0x%x\n", | ||
| 1388 | idle); | ||
| 1389 | break; | ||
| 1390 | } | ||
| 1391 | |||
| 1392 | udelay(5); | ||
| 1393 | } while (1); | ||
| 1394 | } | ||
| 1395 | |||
| 1396 | return etnaviv_gpu_clk_disable(gpu); | ||
| 1397 | } | ||
| 1398 | |||
| 1399 | #ifdef CONFIG_PM | ||
| 1400 | static int etnaviv_gpu_hw_resume(struct etnaviv_gpu *gpu) | ||
| 1401 | { | ||
| 1402 | u32 clock; | ||
| 1403 | int ret; | ||
| 1404 | |||
| 1405 | ret = mutex_lock_killable(&gpu->lock); | ||
| 1406 | if (ret) | ||
| 1407 | return ret; | ||
| 1408 | |||
| 1409 | clock = VIVS_HI_CLOCK_CONTROL_DISABLE_DEBUG_REGISTERS | | ||
| 1410 | VIVS_HI_CLOCK_CONTROL_FSCALE_VAL(0x40); | ||
| 1411 | |||
| 1412 | etnaviv_gpu_load_clock(gpu, clock); | ||
| 1413 | etnaviv_gpu_hw_init(gpu); | ||
| 1414 | |||
| 1415 | gpu->switch_context = true; | ||
| 1416 | |||
| 1417 | mutex_unlock(&gpu->lock); | ||
| 1418 | |||
| 1419 | return 0; | ||
| 1420 | } | ||
| 1421 | #endif | ||
| 1422 | |||
| 1423 | static int etnaviv_gpu_bind(struct device *dev, struct device *master, | ||
| 1424 | void *data) | ||
| 1425 | { | ||
| 1426 | struct drm_device *drm = data; | ||
| 1427 | struct etnaviv_drm_private *priv = drm->dev_private; | ||
| 1428 | struct etnaviv_gpu *gpu = dev_get_drvdata(dev); | ||
| 1429 | int ret; | ||
| 1430 | |||
| 1431 | #ifdef CONFIG_PM | ||
| 1432 | ret = pm_runtime_get_sync(gpu->dev); | ||
| 1433 | #else | ||
| 1434 | ret = etnaviv_gpu_clk_enable(gpu); | ||
| 1435 | #endif | ||
| 1436 | if (ret < 0) | ||
| 1437 | return ret; | ||
| 1438 | |||
| 1439 | gpu->drm = drm; | ||
| 1440 | gpu->fence_context = fence_context_alloc(1); | ||
| 1441 | spin_lock_init(&gpu->fence_spinlock); | ||
| 1442 | |||
| 1443 | INIT_LIST_HEAD(&gpu->active_cmd_list); | ||
| 1444 | INIT_WORK(&gpu->retire_work, retire_worker); | ||
| 1445 | INIT_WORK(&gpu->recover_work, recover_worker); | ||
| 1446 | init_waitqueue_head(&gpu->fence_event); | ||
| 1447 | |||
| 1448 | setup_timer(&gpu->hangcheck_timer, hangcheck_handler, | ||
| 1449 | (unsigned long)gpu); | ||
| 1450 | |||
| 1451 | priv->gpu[priv->num_gpus++] = gpu; | ||
| 1452 | |||
| 1453 | pm_runtime_mark_last_busy(gpu->dev); | ||
| 1454 | pm_runtime_put_autosuspend(gpu->dev); | ||
| 1455 | |||
| 1456 | return 0; | ||
| 1457 | } | ||
| 1458 | |||
| 1459 | static void etnaviv_gpu_unbind(struct device *dev, struct device *master, | ||
| 1460 | void *data) | ||
| 1461 | { | ||
| 1462 | struct etnaviv_gpu *gpu = dev_get_drvdata(dev); | ||
| 1463 | |||
| 1464 | DBG("%s", dev_name(gpu->dev)); | ||
| 1465 | |||
| 1466 | hangcheck_disable(gpu); | ||
| 1467 | |||
| 1468 | #ifdef CONFIG_PM | ||
| 1469 | pm_runtime_get_sync(gpu->dev); | ||
| 1470 | pm_runtime_put_sync_suspend(gpu->dev); | ||
| 1471 | #else | ||
| 1472 | etnaviv_gpu_hw_suspend(gpu); | ||
| 1473 | #endif | ||
| 1474 | |||
| 1475 | if (gpu->buffer) { | ||
| 1476 | etnaviv_gpu_cmdbuf_free(gpu->buffer); | ||
| 1477 | gpu->buffer = NULL; | ||
| 1478 | } | ||
| 1479 | |||
| 1480 | if (gpu->mmu) { | ||
| 1481 | etnaviv_iommu_destroy(gpu->mmu); | ||
| 1482 | gpu->mmu = NULL; | ||
| 1483 | } | ||
| 1484 | |||
| 1485 | gpu->drm = NULL; | ||
| 1486 | } | ||
| 1487 | |||
| 1488 | static const struct component_ops gpu_ops = { | ||
| 1489 | .bind = etnaviv_gpu_bind, | ||
| 1490 | .unbind = etnaviv_gpu_unbind, | ||
| 1491 | }; | ||
| 1492 | |||
| 1493 | static const struct of_device_id etnaviv_gpu_match[] = { | ||
| 1494 | { | ||
| 1495 | .compatible = "vivante,gc" | ||
| 1496 | }, | ||
| 1497 | { /* sentinel */ } | ||
| 1498 | }; | ||
| 1499 | |||
| 1500 | static int etnaviv_gpu_platform_probe(struct platform_device *pdev) | ||
| 1501 | { | ||
| 1502 | struct device *dev = &pdev->dev; | ||
| 1503 | struct etnaviv_gpu *gpu; | ||
| 1504 | int err = 0; | ||
| 1505 | |||
| 1506 | gpu = devm_kzalloc(dev, sizeof(*gpu), GFP_KERNEL); | ||
| 1507 | if (!gpu) | ||
| 1508 | return -ENOMEM; | ||
| 1509 | |||
| 1510 | gpu->dev = &pdev->dev; | ||
| 1511 | mutex_init(&gpu->lock); | ||
| 1512 | |||
| 1513 | /* | ||
| 1514 | * Set the GPU base address to the start of physical memory. This | ||
| 1515 | * ensures that if we have up to 2GB, the v1 MMU can address the | ||
| 1516 | * highest memory. This is important as command buffers may be | ||
| 1517 | * allocated outside of this limit. | ||
| 1518 | */ | ||
| 1519 | gpu->memory_base = PHYS_OFFSET; | ||
| 1520 | |||
| 1521 | /* Map registers: */ | ||
| 1522 | gpu->mmio = etnaviv_ioremap(pdev, NULL, dev_name(gpu->dev)); | ||
| 1523 | if (IS_ERR(gpu->mmio)) | ||
| 1524 | return PTR_ERR(gpu->mmio); | ||
| 1525 | |||
| 1526 | /* Get Interrupt: */ | ||
| 1527 | gpu->irq = platform_get_irq(pdev, 0); | ||
| 1528 | if (gpu->irq < 0) { | ||
| 1529 | err = gpu->irq; | ||
| 1530 | dev_err(dev, "failed to get irq: %d\n", err); | ||
| 1531 | goto fail; | ||
| 1532 | } | ||
| 1533 | |||
| 1534 | err = devm_request_irq(&pdev->dev, gpu->irq, irq_handler, 0, | ||
| 1535 | dev_name(gpu->dev), gpu); | ||
| 1536 | if (err) { | ||
| 1537 | dev_err(dev, "failed to request IRQ%u: %d\n", gpu->irq, err); | ||
| 1538 | goto fail; | ||
| 1539 | } | ||
| 1540 | |||
| 1541 | /* Get Clocks: */ | ||
| 1542 | gpu->clk_bus = devm_clk_get(&pdev->dev, "bus"); | ||
| 1543 | DBG("clk_bus: %p", gpu->clk_bus); | ||
| 1544 | if (IS_ERR(gpu->clk_bus)) | ||
| 1545 | gpu->clk_bus = NULL; | ||
| 1546 | |||
| 1547 | gpu->clk_core = devm_clk_get(&pdev->dev, "core"); | ||
| 1548 | DBG("clk_core: %p", gpu->clk_core); | ||
| 1549 | if (IS_ERR(gpu->clk_core)) | ||
| 1550 | gpu->clk_core = NULL; | ||
| 1551 | |||
| 1552 | gpu->clk_shader = devm_clk_get(&pdev->dev, "shader"); | ||
| 1553 | DBG("clk_shader: %p", gpu->clk_shader); | ||
| 1554 | if (IS_ERR(gpu->clk_shader)) | ||
| 1555 | gpu->clk_shader = NULL; | ||
| 1556 | |||
| 1557 | /* TODO: figure out max mapped size */ | ||
| 1558 | dev_set_drvdata(dev, gpu); | ||
| 1559 | |||
| 1560 | /* | ||
| 1561 | * We treat the device as initially suspended. The runtime PM | ||
| 1562 | * autosuspend delay is rather arbitary: no measurements have | ||
| 1563 | * yet been performed to determine an appropriate value. | ||
| 1564 | */ | ||
| 1565 | pm_runtime_use_autosuspend(gpu->dev); | ||
| 1566 | pm_runtime_set_autosuspend_delay(gpu->dev, 200); | ||
| 1567 | pm_runtime_enable(gpu->dev); | ||
| 1568 | |||
| 1569 | err = component_add(&pdev->dev, &gpu_ops); | ||
| 1570 | if (err < 0) { | ||
| 1571 | dev_err(&pdev->dev, "failed to register component: %d\n", err); | ||
| 1572 | goto fail; | ||
| 1573 | } | ||
| 1574 | |||
| 1575 | return 0; | ||
| 1576 | |||
| 1577 | fail: | ||
| 1578 | return err; | ||
| 1579 | } | ||
| 1580 | |||
| 1581 | static int etnaviv_gpu_platform_remove(struct platform_device *pdev) | ||
| 1582 | { | ||
| 1583 | component_del(&pdev->dev, &gpu_ops); | ||
| 1584 | pm_runtime_disable(&pdev->dev); | ||
| 1585 | return 0; | ||
| 1586 | } | ||
| 1587 | |||
| 1588 | #ifdef CONFIG_PM | ||
| 1589 | static int etnaviv_gpu_rpm_suspend(struct device *dev) | ||
| 1590 | { | ||
| 1591 | struct etnaviv_gpu *gpu = dev_get_drvdata(dev); | ||
| 1592 | u32 idle, mask; | ||
| 1593 | |||
| 1594 | /* If we have outstanding fences, we're not idle */ | ||
| 1595 | if (gpu->completed_fence != gpu->active_fence) | ||
| 1596 | return -EBUSY; | ||
| 1597 | |||
| 1598 | /* Check whether the hardware (except FE) is idle */ | ||
| 1599 | mask = gpu->idle_mask & ~VIVS_HI_IDLE_STATE_FE; | ||
| 1600 | idle = gpu_read(gpu, VIVS_HI_IDLE_STATE) & mask; | ||
| 1601 | if (idle != mask) | ||
| 1602 | return -EBUSY; | ||
| 1603 | |||
| 1604 | return etnaviv_gpu_hw_suspend(gpu); | ||
| 1605 | } | ||
| 1606 | |||
| 1607 | static int etnaviv_gpu_rpm_resume(struct device *dev) | ||
| 1608 | { | ||
| 1609 | struct etnaviv_gpu *gpu = dev_get_drvdata(dev); | ||
| 1610 | int ret; | ||
| 1611 | |||
| 1612 | ret = etnaviv_gpu_clk_enable(gpu); | ||
| 1613 | if (ret) | ||
| 1614 | return ret; | ||
| 1615 | |||
| 1616 | /* Re-initialise the basic hardware state */ | ||
| 1617 | if (gpu->drm && gpu->buffer) { | ||
| 1618 | ret = etnaviv_gpu_hw_resume(gpu); | ||
| 1619 | if (ret) { | ||
| 1620 | etnaviv_gpu_clk_disable(gpu); | ||
| 1621 | return ret; | ||
| 1622 | } | ||
| 1623 | } | ||
| 1624 | |||
| 1625 | return 0; | ||
| 1626 | } | ||
| 1627 | #endif | ||
| 1628 | |||
| 1629 | static const struct dev_pm_ops etnaviv_gpu_pm_ops = { | ||
| 1630 | SET_RUNTIME_PM_OPS(etnaviv_gpu_rpm_suspend, etnaviv_gpu_rpm_resume, | ||
| 1631 | NULL) | ||
| 1632 | }; | ||
| 1633 | |||
| 1634 | struct platform_driver etnaviv_gpu_driver = { | ||
| 1635 | .driver = { | ||
| 1636 | .name = "etnaviv-gpu", | ||
| 1637 | .owner = THIS_MODULE, | ||
| 1638 | .pm = &etnaviv_gpu_pm_ops, | ||
| 1639 | .of_match_table = etnaviv_gpu_match, | ||
| 1640 | }, | ||
| 1641 | .probe = etnaviv_gpu_platform_probe, | ||
| 1642 | .remove = etnaviv_gpu_platform_remove, | ||
| 1643 | .id_table = gpu_ids, | ||
| 1644 | }; | ||
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gpu.h b/drivers/gpu/drm/etnaviv/etnaviv_gpu.h new file mode 100644 index 000000000000..c75d50359ab0 --- /dev/null +++ b/drivers/gpu/drm/etnaviv/etnaviv_gpu.h | |||
| @@ -0,0 +1,209 @@ | |||
| 1 | /* | ||
| 2 | * Copyright (C) 2015 Etnaviv Project | ||
| 3 | * | ||
| 4 | * This program is free software; you can redistribute it and/or modify it | ||
| 5 | * under the terms of the GNU General Public License version 2 as published by | ||
| 6 | * the Free Software Foundation. | ||
| 7 | * | ||
| 8 | * This program is distributed in the hope that it will be useful, but WITHOUT | ||
| 9 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
| 10 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
| 11 | * more details. | ||
| 12 | * | ||
| 13 | * You should have received a copy of the GNU General Public License along with | ||
| 14 | * this program. If not, see <http://www.gnu.org/licenses/>. | ||
| 15 | */ | ||
| 16 | |||
| 17 | #ifndef __ETNAVIV_GPU_H__ | ||
| 18 | #define __ETNAVIV_GPU_H__ | ||
| 19 | |||
| 20 | #include <linux/clk.h> | ||
| 21 | #include <linux/regulator/consumer.h> | ||
| 22 | |||
| 23 | #include "etnaviv_drv.h" | ||
| 24 | |||
| 25 | struct etnaviv_gem_submit; | ||
| 26 | |||
| 27 | struct etnaviv_chip_identity { | ||
| 28 | /* Chip model. */ | ||
| 29 | u32 model; | ||
| 30 | |||
| 31 | /* Revision value.*/ | ||
| 32 | u32 revision; | ||
| 33 | |||
| 34 | /* Supported feature fields. */ | ||
| 35 | u32 features; | ||
| 36 | |||
| 37 | /* Supported minor feature fields. */ | ||
| 38 | u32 minor_features0; | ||
| 39 | |||
| 40 | /* Supported minor feature 1 fields. */ | ||
| 41 | u32 minor_features1; | ||
| 42 | |||
| 43 | /* Supported minor feature 2 fields. */ | ||
| 44 | u32 minor_features2; | ||
| 45 | |||
| 46 | /* Supported minor feature 3 fields. */ | ||
| 47 | u32 minor_features3; | ||
| 48 | |||
| 49 | /* Number of streams supported. */ | ||
| 50 | u32 stream_count; | ||
| 51 | |||
| 52 | /* Total number of temporary registers per thread. */ | ||
| 53 | u32 register_max; | ||
| 54 | |||
| 55 | /* Maximum number of threads. */ | ||
| 56 | u32 thread_count; | ||
| 57 | |||
| 58 | /* Number of shader cores. */ | ||
| 59 | u32 shader_core_count; | ||
| 60 | |||
| 61 | /* Size of the vertex cache. */ | ||
| 62 | u32 vertex_cache_size; | ||
| 63 | |||
| 64 | /* Number of entries in the vertex output buffer. */ | ||
| 65 | u32 vertex_output_buffer_size; | ||
| 66 | |||
| 67 | /* Number of pixel pipes. */ | ||
| 68 | u32 pixel_pipes; | ||
| 69 | |||
| 70 | /* Number of instructions. */ | ||
| 71 | u32 instruction_count; | ||
| 72 | |||
| 73 | /* Number of constants. */ | ||
| 74 | u32 num_constants; | ||
| 75 | |||
| 76 | /* Buffer size */ | ||
| 77 | u32 buffer_size; | ||
| 78 | }; | ||
| 79 | |||
| 80 | struct etnaviv_event { | ||
| 81 | bool used; | ||
| 82 | struct fence *fence; | ||
| 83 | }; | ||
| 84 | |||
| 85 | struct etnaviv_cmdbuf; | ||
| 86 | |||
| 87 | struct etnaviv_gpu { | ||
| 88 | struct drm_device *drm; | ||
| 89 | struct device *dev; | ||
| 90 | struct mutex lock; | ||
| 91 | struct etnaviv_chip_identity identity; | ||
| 92 | struct etnaviv_file_private *lastctx; | ||
| 93 | bool switch_context; | ||
| 94 | |||
| 95 | /* 'ring'-buffer: */ | ||
| 96 | struct etnaviv_cmdbuf *buffer; | ||
| 97 | |||
| 98 | /* bus base address of memory */ | ||
| 99 | u32 memory_base; | ||
| 100 | |||
| 101 | /* event management: */ | ||
| 102 | struct etnaviv_event event[30]; | ||
| 103 | struct completion event_free; | ||
| 104 | spinlock_t event_spinlock; | ||
| 105 | |||
| 106 | /* list of currently in-flight command buffers */ | ||
| 107 | struct list_head active_cmd_list; | ||
| 108 | |||
| 109 | u32 idle_mask; | ||
| 110 | |||
| 111 | /* Fencing support */ | ||
| 112 | u32 next_fence; | ||
| 113 | u32 active_fence; | ||
| 114 | u32 completed_fence; | ||
| 115 | u32 retired_fence; | ||
| 116 | wait_queue_head_t fence_event; | ||
| 117 | unsigned int fence_context; | ||
| 118 | spinlock_t fence_spinlock; | ||
| 119 | |||
| 120 | /* worker for handling active-list retiring: */ | ||
| 121 | struct work_struct retire_work; | ||
| 122 | |||
| 123 | void __iomem *mmio; | ||
| 124 | int irq; | ||
| 125 | |||
| 126 | struct etnaviv_iommu *mmu; | ||
| 127 | |||
| 128 | /* Power Control: */ | ||
| 129 | struct clk *clk_bus; | ||
| 130 | struct clk *clk_core; | ||
| 131 | struct clk *clk_shader; | ||
| 132 | |||
| 133 | /* Hang Detction: */ | ||
| 134 | #define DRM_ETNAVIV_HANGCHECK_PERIOD 500 /* in ms */ | ||
| 135 | #define DRM_ETNAVIV_HANGCHECK_JIFFIES msecs_to_jiffies(DRM_ETNAVIV_HANGCHECK_PERIOD) | ||
| 136 | struct timer_list hangcheck_timer; | ||
| 137 | u32 hangcheck_fence; | ||
| 138 | u32 hangcheck_dma_addr; | ||
| 139 | struct work_struct recover_work; | ||
| 140 | }; | ||
| 141 | |||
| 142 | struct etnaviv_cmdbuf { | ||
| 143 | /* device this cmdbuf is allocated for */ | ||
| 144 | struct etnaviv_gpu *gpu; | ||
| 145 | /* user context key, must be unique between all active users */ | ||
| 146 | struct etnaviv_file_private *ctx; | ||
| 147 | /* cmdbuf properties */ | ||
| 148 | void *vaddr; | ||
| 149 | dma_addr_t paddr; | ||
| 150 | u32 size; | ||
| 151 | u32 user_size; | ||
| 152 | /* fence after which this buffer is to be disposed */ | ||
| 153 | struct fence *fence; | ||
| 154 | /* target exec state */ | ||
| 155 | u32 exec_state; | ||
| 156 | /* per GPU in-flight list */ | ||
| 157 | struct list_head node; | ||
| 158 | /* BOs attached to this command buffer */ | ||
| 159 | unsigned int nr_bos; | ||
| 160 | struct etnaviv_gem_object *bo[0]; | ||
| 161 | }; | ||
| 162 | |||
| 163 | static inline void gpu_write(struct etnaviv_gpu *gpu, u32 reg, u32 data) | ||
| 164 | { | ||
| 165 | etnaviv_writel(data, gpu->mmio + reg); | ||
| 166 | } | ||
| 167 | |||
| 168 | static inline u32 gpu_read(struct etnaviv_gpu *gpu, u32 reg) | ||
| 169 | { | ||
| 170 | return etnaviv_readl(gpu->mmio + reg); | ||
| 171 | } | ||
| 172 | |||
| 173 | static inline bool fence_completed(struct etnaviv_gpu *gpu, u32 fence) | ||
| 174 | { | ||
| 175 | return fence_after_eq(gpu->completed_fence, fence); | ||
| 176 | } | ||
| 177 | |||
| 178 | static inline bool fence_retired(struct etnaviv_gpu *gpu, u32 fence) | ||
| 179 | { | ||
| 180 | return fence_after_eq(gpu->retired_fence, fence); | ||
| 181 | } | ||
| 182 | |||
| 183 | int etnaviv_gpu_get_param(struct etnaviv_gpu *gpu, u32 param, u64 *value); | ||
| 184 | |||
| 185 | int etnaviv_gpu_init(struct etnaviv_gpu *gpu); | ||
| 186 | |||
| 187 | #ifdef CONFIG_DEBUG_FS | ||
| 188 | int etnaviv_gpu_debugfs(struct etnaviv_gpu *gpu, struct seq_file *m); | ||
| 189 | #endif | ||
| 190 | |||
| 191 | int etnaviv_gpu_fence_sync_obj(struct etnaviv_gem_object *etnaviv_obj, | ||
| 192 | unsigned int context, bool exclusive); | ||
| 193 | |||
| 194 | void etnaviv_gpu_retire(struct etnaviv_gpu *gpu); | ||
| 195 | int etnaviv_gpu_wait_fence_interruptible(struct etnaviv_gpu *gpu, | ||
| 196 | u32 fence, struct timespec *timeout); | ||
| 197 | int etnaviv_gpu_wait_obj_inactive(struct etnaviv_gpu *gpu, | ||
| 198 | struct etnaviv_gem_object *etnaviv_obj, struct timespec *timeout); | ||
| 199 | int etnaviv_gpu_submit(struct etnaviv_gpu *gpu, | ||
| 200 | struct etnaviv_gem_submit *submit, struct etnaviv_cmdbuf *cmdbuf); | ||
| 201 | struct etnaviv_cmdbuf *etnaviv_gpu_cmdbuf_new(struct etnaviv_gpu *gpu, | ||
| 202 | u32 size, size_t nr_bos); | ||
| 203 | void etnaviv_gpu_cmdbuf_free(struct etnaviv_cmdbuf *cmdbuf); | ||
| 204 | int etnaviv_gpu_pm_get_sync(struct etnaviv_gpu *gpu); | ||
| 205 | void etnaviv_gpu_pm_put(struct etnaviv_gpu *gpu); | ||
| 206 | |||
| 207 | extern struct platform_driver etnaviv_gpu_driver; | ||
| 208 | |||
| 209 | #endif /* __ETNAVIV_GPU_H__ */ | ||
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_iommu.c b/drivers/gpu/drm/etnaviv/etnaviv_iommu.c new file mode 100644 index 000000000000..522cfd447892 --- /dev/null +++ b/drivers/gpu/drm/etnaviv/etnaviv_iommu.c | |||
| @@ -0,0 +1,240 @@ | |||
| 1 | /* | ||
| 2 | * Copyright (C) 2014 Christian Gmeiner <christian.gmeiner@gmail.com> | ||
| 3 | * | ||
| 4 | * This program is free software; you can redistribute it and/or modify it | ||
| 5 | * under the terms of the GNU General Public License version 2 as published by | ||
| 6 | * the Free Software Foundation. | ||
| 7 | * | ||
| 8 | * This program is distributed in the hope that it will be useful, but WITHOUT | ||
| 9 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
| 10 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
| 11 | * more details. | ||
| 12 | * | ||
| 13 | * You should have received a copy of the GNU General Public License along with | ||
| 14 | * this program. If not, see <http://www.gnu.org/licenses/>. | ||
| 15 | */ | ||
| 16 | |||
| 17 | #include <linux/iommu.h> | ||
| 18 | #include <linux/platform_device.h> | ||
| 19 | #include <linux/sizes.h> | ||
| 20 | #include <linux/slab.h> | ||
| 21 | #include <linux/dma-mapping.h> | ||
| 22 | #include <linux/bitops.h> | ||
| 23 | |||
| 24 | #include "etnaviv_gpu.h" | ||
| 25 | #include "etnaviv_mmu.h" | ||
| 26 | #include "etnaviv_iommu.h" | ||
| 27 | #include "state_hi.xml.h" | ||
| 28 | |||
| 29 | #define PT_SIZE SZ_2M | ||
| 30 | #define PT_ENTRIES (PT_SIZE / sizeof(u32)) | ||
| 31 | |||
| 32 | #define GPU_MEM_START 0x80000000 | ||
| 33 | |||
| 34 | struct etnaviv_iommu_domain_pgtable { | ||
| 35 | u32 *pgtable; | ||
| 36 | dma_addr_t paddr; | ||
| 37 | }; | ||
| 38 | |||
| 39 | struct etnaviv_iommu_domain { | ||
| 40 | struct iommu_domain domain; | ||
| 41 | struct device *dev; | ||
| 42 | void *bad_page_cpu; | ||
| 43 | dma_addr_t bad_page_dma; | ||
| 44 | struct etnaviv_iommu_domain_pgtable pgtable; | ||
| 45 | spinlock_t map_lock; | ||
| 46 | }; | ||
| 47 | |||
| 48 | static struct etnaviv_iommu_domain *to_etnaviv_domain(struct iommu_domain *domain) | ||
| 49 | { | ||
| 50 | return container_of(domain, struct etnaviv_iommu_domain, domain); | ||
| 51 | } | ||
| 52 | |||
| 53 | static int pgtable_alloc(struct etnaviv_iommu_domain_pgtable *pgtable, | ||
| 54 | size_t size) | ||
| 55 | { | ||
| 56 | pgtable->pgtable = dma_alloc_coherent(NULL, size, &pgtable->paddr, GFP_KERNEL); | ||
| 57 | if (!pgtable->pgtable) | ||
| 58 | return -ENOMEM; | ||
| 59 | |||
| 60 | return 0; | ||
| 61 | } | ||
| 62 | |||
| 63 | static void pgtable_free(struct etnaviv_iommu_domain_pgtable *pgtable, | ||
| 64 | size_t size) | ||
| 65 | { | ||
| 66 | dma_free_coherent(NULL, size, pgtable->pgtable, pgtable->paddr); | ||
| 67 | } | ||
| 68 | |||
| 69 | static u32 pgtable_read(struct etnaviv_iommu_domain_pgtable *pgtable, | ||
| 70 | unsigned long iova) | ||
| 71 | { | ||
| 72 | /* calcuate index into page table */ | ||
| 73 | unsigned int index = (iova - GPU_MEM_START) / SZ_4K; | ||
| 74 | phys_addr_t paddr; | ||
| 75 | |||
| 76 | paddr = pgtable->pgtable[index]; | ||
| 77 | |||
| 78 | return paddr; | ||
| 79 | } | ||
| 80 | |||
| 81 | static void pgtable_write(struct etnaviv_iommu_domain_pgtable *pgtable, | ||
| 82 | unsigned long iova, phys_addr_t paddr) | ||
| 83 | { | ||
| 84 | /* calcuate index into page table */ | ||
| 85 | unsigned int index = (iova - GPU_MEM_START) / SZ_4K; | ||
| 86 | |||
| 87 | pgtable->pgtable[index] = paddr; | ||
| 88 | } | ||
| 89 | |||
| 90 | static int __etnaviv_iommu_init(struct etnaviv_iommu_domain *etnaviv_domain) | ||
| 91 | { | ||
| 92 | u32 *p; | ||
| 93 | int ret, i; | ||
| 94 | |||
| 95 | etnaviv_domain->bad_page_cpu = dma_alloc_coherent(etnaviv_domain->dev, | ||
| 96 | SZ_4K, | ||
| 97 | &etnaviv_domain->bad_page_dma, | ||
| 98 | GFP_KERNEL); | ||
| 99 | if (!etnaviv_domain->bad_page_cpu) | ||
| 100 | return -ENOMEM; | ||
| 101 | |||
| 102 | p = etnaviv_domain->bad_page_cpu; | ||
| 103 | for (i = 0; i < SZ_4K / 4; i++) | ||
| 104 | *p++ = 0xdead55aa; | ||
| 105 | |||
| 106 | ret = pgtable_alloc(&etnaviv_domain->pgtable, PT_SIZE); | ||
| 107 | if (ret < 0) { | ||
| 108 | dma_free_coherent(etnaviv_domain->dev, SZ_4K, | ||
| 109 | etnaviv_domain->bad_page_cpu, | ||
| 110 | etnaviv_domain->bad_page_dma); | ||
| 111 | return ret; | ||
| 112 | } | ||
| 113 | |||
| 114 | for (i = 0; i < PT_ENTRIES; i++) | ||
| 115 | etnaviv_domain->pgtable.pgtable[i] = | ||
| 116 | etnaviv_domain->bad_page_dma; | ||
| 117 | |||
| 118 | spin_lock_init(&etnaviv_domain->map_lock); | ||
| 119 | |||
| 120 | return 0; | ||
| 121 | } | ||
| 122 | |||
| 123 | static void etnaviv_domain_free(struct iommu_domain *domain) | ||
| 124 | { | ||
| 125 | struct etnaviv_iommu_domain *etnaviv_domain = to_etnaviv_domain(domain); | ||
| 126 | |||
| 127 | pgtable_free(&etnaviv_domain->pgtable, PT_SIZE); | ||
| 128 | |||
| 129 | dma_free_coherent(etnaviv_domain->dev, SZ_4K, | ||
| 130 | etnaviv_domain->bad_page_cpu, | ||
| 131 | etnaviv_domain->bad_page_dma); | ||
| 132 | |||
| 133 | kfree(etnaviv_domain); | ||
| 134 | } | ||
| 135 | |||
| 136 | static int etnaviv_iommuv1_map(struct iommu_domain *domain, unsigned long iova, | ||
| 137 | phys_addr_t paddr, size_t size, int prot) | ||
| 138 | { | ||
| 139 | struct etnaviv_iommu_domain *etnaviv_domain = to_etnaviv_domain(domain); | ||
| 140 | |||
| 141 | if (size != SZ_4K) | ||
| 142 | return -EINVAL; | ||
| 143 | |||
| 144 | spin_lock(&etnaviv_domain->map_lock); | ||
| 145 | pgtable_write(&etnaviv_domain->pgtable, iova, paddr); | ||
| 146 | spin_unlock(&etnaviv_domain->map_lock); | ||
| 147 | |||
| 148 | return 0; | ||
| 149 | } | ||
| 150 | |||
| 151 | static size_t etnaviv_iommuv1_unmap(struct iommu_domain *domain, | ||
| 152 | unsigned long iova, size_t size) | ||
| 153 | { | ||
| 154 | struct etnaviv_iommu_domain *etnaviv_domain = to_etnaviv_domain(domain); | ||
| 155 | |||
| 156 | if (size != SZ_4K) | ||
| 157 | return -EINVAL; | ||
| 158 | |||
| 159 | spin_lock(&etnaviv_domain->map_lock); | ||
| 160 | pgtable_write(&etnaviv_domain->pgtable, iova, | ||
| 161 | etnaviv_domain->bad_page_dma); | ||
| 162 | spin_unlock(&etnaviv_domain->map_lock); | ||
| 163 | |||
| 164 | return SZ_4K; | ||
| 165 | } | ||
| 166 | |||
| 167 | static phys_addr_t etnaviv_iommu_iova_to_phys(struct iommu_domain *domain, | ||
| 168 | dma_addr_t iova) | ||
| 169 | { | ||
| 170 | struct etnaviv_iommu_domain *etnaviv_domain = to_etnaviv_domain(domain); | ||
| 171 | |||
| 172 | return pgtable_read(&etnaviv_domain->pgtable, iova); | ||
| 173 | } | ||
| 174 | |||
| 175 | static size_t etnaviv_iommuv1_dump_size(struct iommu_domain *domain) | ||
| 176 | { | ||
| 177 | return PT_SIZE; | ||
| 178 | } | ||
| 179 | |||
| 180 | static void etnaviv_iommuv1_dump(struct iommu_domain *domain, void *buf) | ||
| 181 | { | ||
| 182 | struct etnaviv_iommu_domain *etnaviv_domain = to_etnaviv_domain(domain); | ||
| 183 | |||
| 184 | memcpy(buf, etnaviv_domain->pgtable.pgtable, PT_SIZE); | ||
| 185 | } | ||
| 186 | |||
| 187 | static struct etnaviv_iommu_ops etnaviv_iommu_ops = { | ||
| 188 | .ops = { | ||
| 189 | .domain_free = etnaviv_domain_free, | ||
| 190 | .map = etnaviv_iommuv1_map, | ||
| 191 | .unmap = etnaviv_iommuv1_unmap, | ||
| 192 | .iova_to_phys = etnaviv_iommu_iova_to_phys, | ||
| 193 | .pgsize_bitmap = SZ_4K, | ||
| 194 | }, | ||
| 195 | .dump_size = etnaviv_iommuv1_dump_size, | ||
| 196 | .dump = etnaviv_iommuv1_dump, | ||
| 197 | }; | ||
| 198 | |||
| 199 | void etnaviv_iommu_domain_restore(struct etnaviv_gpu *gpu, | ||
| 200 | struct iommu_domain *domain) | ||
| 201 | { | ||
| 202 | struct etnaviv_iommu_domain *etnaviv_domain = to_etnaviv_domain(domain); | ||
| 203 | u32 pgtable; | ||
| 204 | |||
| 205 | /* set page table address in MC */ | ||
| 206 | pgtable = (u32)etnaviv_domain->pgtable.paddr; | ||
| 207 | |||
| 208 | gpu_write(gpu, VIVS_MC_MMU_FE_PAGE_TABLE, pgtable); | ||
| 209 | gpu_write(gpu, VIVS_MC_MMU_TX_PAGE_TABLE, pgtable); | ||
| 210 | gpu_write(gpu, VIVS_MC_MMU_PE_PAGE_TABLE, pgtable); | ||
| 211 | gpu_write(gpu, VIVS_MC_MMU_PEZ_PAGE_TABLE, pgtable); | ||
| 212 | gpu_write(gpu, VIVS_MC_MMU_RA_PAGE_TABLE, pgtable); | ||
| 213 | } | ||
| 214 | |||
| 215 | struct iommu_domain *etnaviv_iommu_domain_alloc(struct etnaviv_gpu *gpu) | ||
| 216 | { | ||
| 217 | struct etnaviv_iommu_domain *etnaviv_domain; | ||
| 218 | int ret; | ||
| 219 | |||
| 220 | etnaviv_domain = kzalloc(sizeof(*etnaviv_domain), GFP_KERNEL); | ||
| 221 | if (!etnaviv_domain) | ||
| 222 | return NULL; | ||
| 223 | |||
| 224 | etnaviv_domain->dev = gpu->dev; | ||
| 225 | |||
| 226 | etnaviv_domain->domain.type = __IOMMU_DOMAIN_PAGING; | ||
| 227 | etnaviv_domain->domain.ops = &etnaviv_iommu_ops.ops; | ||
| 228 | etnaviv_domain->domain.geometry.aperture_start = GPU_MEM_START; | ||
| 229 | etnaviv_domain->domain.geometry.aperture_end = GPU_MEM_START + PT_ENTRIES * SZ_4K - 1; | ||
| 230 | |||
| 231 | ret = __etnaviv_iommu_init(etnaviv_domain); | ||
| 232 | if (ret) | ||
| 233 | goto out_free; | ||
| 234 | |||
| 235 | return &etnaviv_domain->domain; | ||
| 236 | |||
| 237 | out_free: | ||
| 238 | kfree(etnaviv_domain); | ||
| 239 | return NULL; | ||
| 240 | } | ||
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_iommu.h b/drivers/gpu/drm/etnaviv/etnaviv_iommu.h new file mode 100644 index 000000000000..cf45503f6b6f --- /dev/null +++ b/drivers/gpu/drm/etnaviv/etnaviv_iommu.h | |||
| @@ -0,0 +1,28 @@ | |||
| 1 | /* | ||
| 2 | * Copyright (C) 2014 Christian Gmeiner <christian.gmeiner@gmail.com> | ||
| 3 | * | ||
| 4 | * This program is free software; you can redistribute it and/or modify it | ||
| 5 | * under the terms of the GNU General Public License version 2 as published by | ||
| 6 | * the Free Software Foundation. | ||
| 7 | * | ||
| 8 | * This program is distributed in the hope that it will be useful, but WITHOUT | ||
| 9 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
| 10 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
| 11 | * more details. | ||
| 12 | * | ||
| 13 | * You should have received a copy of the GNU General Public License along with | ||
| 14 | * this program. If not, see <http://www.gnu.org/licenses/>. | ||
| 15 | */ | ||
| 16 | |||
| 17 | #ifndef __ETNAVIV_IOMMU_H__ | ||
| 18 | #define __ETNAVIV_IOMMU_H__ | ||
| 19 | |||
| 20 | #include <linux/iommu.h> | ||
| 21 | struct etnaviv_gpu; | ||
| 22 | |||
| 23 | struct iommu_domain *etnaviv_iommu_domain_alloc(struct etnaviv_gpu *gpu); | ||
| 24 | void etnaviv_iommu_domain_restore(struct etnaviv_gpu *gpu, | ||
| 25 | struct iommu_domain *domain); | ||
| 26 | struct iommu_domain *etnaviv_iommu_v2_domain_alloc(struct etnaviv_gpu *gpu); | ||
| 27 | |||
| 28 | #endif /* __ETNAVIV_IOMMU_H__ */ | ||
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_iommu_v2.c b/drivers/gpu/drm/etnaviv/etnaviv_iommu_v2.c new file mode 100644 index 000000000000..fbb4aed3dc80 --- /dev/null +++ b/drivers/gpu/drm/etnaviv/etnaviv_iommu_v2.c | |||
| @@ -0,0 +1,33 @@ | |||
| 1 | /* | ||
| 2 | * Copyright (C) 2014 Christian Gmeiner <christian.gmeiner@gmail.com> | ||
| 3 | * | ||
| 4 | * This program is free software; you can redistribute it and/or modify it | ||
| 5 | * under the terms of the GNU General Public License version 2 as published by | ||
| 6 | * the Free Software Foundation. | ||
| 7 | * | ||
| 8 | * This program is distributed in the hope that it will be useful, but WITHOUT | ||
| 9 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
| 10 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
| 11 | * more details. | ||
| 12 | * | ||
| 13 | * You should have received a copy of the GNU General Public License along with | ||
| 14 | * this program. If not, see <http://www.gnu.org/licenses/>. | ||
| 15 | */ | ||
| 16 | |||
| 17 | #include <linux/iommu.h> | ||
| 18 | #include <linux/platform_device.h> | ||
| 19 | #include <linux/sizes.h> | ||
| 20 | #include <linux/slab.h> | ||
| 21 | #include <linux/dma-mapping.h> | ||
| 22 | #include <linux/bitops.h> | ||
| 23 | |||
| 24 | #include "etnaviv_gpu.h" | ||
| 25 | #include "etnaviv_iommu.h" | ||
| 26 | #include "state_hi.xml.h" | ||
| 27 | |||
| 28 | |||
| 29 | struct iommu_domain *etnaviv_iommu_v2_domain_alloc(struct etnaviv_gpu *gpu) | ||
| 30 | { | ||
| 31 | /* TODO */ | ||
| 32 | return NULL; | ||
| 33 | } | ||
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_iommu_v2.h b/drivers/gpu/drm/etnaviv/etnaviv_iommu_v2.h new file mode 100644 index 000000000000..603ea41c5389 --- /dev/null +++ b/drivers/gpu/drm/etnaviv/etnaviv_iommu_v2.h | |||
| @@ -0,0 +1,25 @@ | |||
| 1 | /* | ||
| 2 | * Copyright (C) 2014 Christian Gmeiner <christian.gmeiner@gmail.com> | ||
| 3 | * | ||
| 4 | * This program is free software; you can redistribute it and/or modify it | ||
| 5 | * under the terms of the GNU General Public License version 2 as published by | ||
| 6 | * the Free Software Foundation. | ||
| 7 | * | ||
| 8 | * This program is distributed in the hope that it will be useful, but WITHOUT | ||
| 9 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
| 10 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
| 11 | * more details. | ||
| 12 | * | ||
| 13 | * You should have received a copy of the GNU General Public License along with | ||
| 14 | * this program. If not, see <http://www.gnu.org/licenses/>. | ||
| 15 | */ | ||
| 16 | |||
| 17 | #ifndef __ETNAVIV_IOMMU_V2_H__ | ||
| 18 | #define __ETNAVIV_IOMMU_V2_H__ | ||
| 19 | |||
| 20 | #include <linux/iommu.h> | ||
| 21 | struct etnaviv_gpu; | ||
| 22 | |||
| 23 | struct iommu_domain *etnaviv_iommu_v2_domain_alloc(struct etnaviv_gpu *gpu); | ||
| 24 | |||
| 25 | #endif /* __ETNAVIV_IOMMU_V2_H__ */ | ||
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_mmu.c b/drivers/gpu/drm/etnaviv/etnaviv_mmu.c new file mode 100644 index 000000000000..6743bc648dc8 --- /dev/null +++ b/drivers/gpu/drm/etnaviv/etnaviv_mmu.c | |||
| @@ -0,0 +1,299 @@ | |||
| 1 | /* | ||
| 2 | * Copyright (C) 2015 Etnaviv Project | ||
| 3 | * | ||
| 4 | * This program is free software; you can redistribute it and/or modify it | ||
| 5 | * under the terms of the GNU General Public License version 2 as published by | ||
| 6 | * the Free Software Foundation. | ||
| 7 | * | ||
| 8 | * This program is distributed in the hope that it will be useful, but WITHOUT | ||
| 9 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
| 10 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
| 11 | * more details. | ||
| 12 | * | ||
| 13 | * You should have received a copy of the GNU General Public License along with | ||
| 14 | * this program. If not, see <http://www.gnu.org/licenses/>. | ||
| 15 | */ | ||
| 16 | |||
| 17 | #include "etnaviv_drv.h" | ||
| 18 | #include "etnaviv_gem.h" | ||
| 19 | #include "etnaviv_gpu.h" | ||
| 20 | #include "etnaviv_mmu.h" | ||
| 21 | |||
| 22 | static int etnaviv_fault_handler(struct iommu_domain *iommu, struct device *dev, | ||
| 23 | unsigned long iova, int flags, void *arg) | ||
| 24 | { | ||
| 25 | DBG("*** fault: iova=%08lx, flags=%d", iova, flags); | ||
| 26 | return 0; | ||
| 27 | } | ||
| 28 | |||
| 29 | int etnaviv_iommu_map(struct etnaviv_iommu *iommu, u32 iova, | ||
| 30 | struct sg_table *sgt, unsigned len, int prot) | ||
| 31 | { | ||
| 32 | struct iommu_domain *domain = iommu->domain; | ||
| 33 | struct scatterlist *sg; | ||
| 34 | unsigned int da = iova; | ||
| 35 | unsigned int i, j; | ||
| 36 | int ret; | ||
| 37 | |||
| 38 | if (!domain || !sgt) | ||
| 39 | return -EINVAL; | ||
| 40 | |||
| 41 | for_each_sg(sgt->sgl, sg, sgt->nents, i) { | ||
| 42 | u32 pa = sg_dma_address(sg) - sg->offset; | ||
| 43 | size_t bytes = sg_dma_len(sg) + sg->offset; | ||
| 44 | |||
| 45 | VERB("map[%d]: %08x %08x(%zx)", i, iova, pa, bytes); | ||
| 46 | |||
| 47 | ret = iommu_map(domain, da, pa, bytes, prot); | ||
| 48 | if (ret) | ||
| 49 | goto fail; | ||
| 50 | |||
| 51 | da += bytes; | ||
| 52 | } | ||
| 53 | |||
| 54 | return 0; | ||
| 55 | |||
| 56 | fail: | ||
| 57 | da = iova; | ||
| 58 | |||
| 59 | for_each_sg(sgt->sgl, sg, i, j) { | ||
| 60 | size_t bytes = sg_dma_len(sg) + sg->offset; | ||
| 61 | |||
| 62 | iommu_unmap(domain, da, bytes); | ||
| 63 | da += bytes; | ||
| 64 | } | ||
| 65 | return ret; | ||
| 66 | } | ||
| 67 | |||
| 68 | int etnaviv_iommu_unmap(struct etnaviv_iommu *iommu, u32 iova, | ||
| 69 | struct sg_table *sgt, unsigned len) | ||
| 70 | { | ||
| 71 | struct iommu_domain *domain = iommu->domain; | ||
| 72 | struct scatterlist *sg; | ||
| 73 | unsigned int da = iova; | ||
| 74 | int i; | ||
| 75 | |||
| 76 | for_each_sg(sgt->sgl, sg, sgt->nents, i) { | ||
| 77 | size_t bytes = sg_dma_len(sg) + sg->offset; | ||
| 78 | size_t unmapped; | ||
| 79 | |||
| 80 | unmapped = iommu_unmap(domain, da, bytes); | ||
| 81 | if (unmapped < bytes) | ||
| 82 | return unmapped; | ||
| 83 | |||
| 84 | VERB("unmap[%d]: %08x(%zx)", i, iova, bytes); | ||
| 85 | |||
| 86 | BUG_ON(!PAGE_ALIGNED(bytes)); | ||
| 87 | |||
| 88 | da += bytes; | ||
| 89 | } | ||
| 90 | |||
| 91 | return 0; | ||
| 92 | } | ||
| 93 | |||
| 94 | static void etnaviv_iommu_remove_mapping(struct etnaviv_iommu *mmu, | ||
| 95 | struct etnaviv_vram_mapping *mapping) | ||
| 96 | { | ||
| 97 | struct etnaviv_gem_object *etnaviv_obj = mapping->object; | ||
| 98 | |||
| 99 | etnaviv_iommu_unmap(mmu, mapping->vram_node.start, | ||
| 100 | etnaviv_obj->sgt, etnaviv_obj->base.size); | ||
| 101 | drm_mm_remove_node(&mapping->vram_node); | ||
| 102 | } | ||
| 103 | |||
| 104 | int etnaviv_iommu_map_gem(struct etnaviv_iommu *mmu, | ||
| 105 | struct etnaviv_gem_object *etnaviv_obj, u32 memory_base, | ||
| 106 | struct etnaviv_vram_mapping *mapping) | ||
| 107 | { | ||
| 108 | struct etnaviv_vram_mapping *free = NULL; | ||
| 109 | struct sg_table *sgt = etnaviv_obj->sgt; | ||
| 110 | struct drm_mm_node *node; | ||
| 111 | int ret; | ||
| 112 | |||
| 113 | lockdep_assert_held(&etnaviv_obj->lock); | ||
| 114 | |||
| 115 | mutex_lock(&mmu->lock); | ||
| 116 | |||
| 117 | /* v1 MMU can optimize single entry (contiguous) scatterlists */ | ||
| 118 | if (sgt->nents == 1 && !(etnaviv_obj->flags & ETNA_BO_FORCE_MMU)) { | ||
| 119 | u32 iova; | ||
| 120 | |||
| 121 | iova = sg_dma_address(sgt->sgl) - memory_base; | ||
| 122 | if (iova < 0x80000000 - sg_dma_len(sgt->sgl)) { | ||
| 123 | mapping->iova = iova; | ||
| 124 | list_add_tail(&mapping->mmu_node, &mmu->mappings); | ||
| 125 | mutex_unlock(&mmu->lock); | ||
| 126 | return 0; | ||
| 127 | } | ||
| 128 | } | ||
| 129 | |||
| 130 | node = &mapping->vram_node; | ||
| 131 | while (1) { | ||
| 132 | struct etnaviv_vram_mapping *m, *n; | ||
| 133 | struct list_head list; | ||
| 134 | bool found; | ||
| 135 | |||
| 136 | ret = drm_mm_insert_node_in_range(&mmu->mm, node, | ||
| 137 | etnaviv_obj->base.size, 0, mmu->last_iova, ~0UL, | ||
| 138 | DRM_MM_SEARCH_DEFAULT); | ||
| 139 | |||
| 140 | if (ret != -ENOSPC) | ||
| 141 | break; | ||
| 142 | |||
| 143 | /* | ||
| 144 | * If we did not search from the start of the MMU region, | ||
| 145 | * try again in case there are free slots. | ||
| 146 | */ | ||
| 147 | if (mmu->last_iova) { | ||
| 148 | mmu->last_iova = 0; | ||
| 149 | mmu->need_flush = true; | ||
| 150 | continue; | ||
| 151 | } | ||
| 152 | |||
| 153 | /* Try to retire some entries */ | ||
| 154 | drm_mm_init_scan(&mmu->mm, etnaviv_obj->base.size, 0, 0); | ||
| 155 | |||
| 156 | found = 0; | ||
| 157 | INIT_LIST_HEAD(&list); | ||
| 158 | list_for_each_entry(free, &mmu->mappings, mmu_node) { | ||
| 159 | /* If this vram node has not been used, skip this. */ | ||
| 160 | if (!free->vram_node.mm) | ||
| 161 | continue; | ||
| 162 | |||
| 163 | /* | ||
| 164 | * If the iova is pinned, then it's in-use, | ||
| 165 | * so we must keep its mapping. | ||
| 166 | */ | ||
| 167 | if (free->use) | ||
| 168 | continue; | ||
| 169 | |||
| 170 | list_add(&free->scan_node, &list); | ||
| 171 | if (drm_mm_scan_add_block(&free->vram_node)) { | ||
| 172 | found = true; | ||
| 173 | break; | ||
| 174 | } | ||
| 175 | } | ||
| 176 | |||
| 177 | if (!found) { | ||
| 178 | /* Nothing found, clean up and fail */ | ||
| 179 | list_for_each_entry_safe(m, n, &list, scan_node) | ||
| 180 | BUG_ON(drm_mm_scan_remove_block(&m->vram_node)); | ||
| 181 | break; | ||
| 182 | } | ||
| 183 | |||
| 184 | /* | ||
| 185 | * drm_mm does not allow any other operations while | ||
| 186 | * scanning, so we have to remove all blocks first. | ||
| 187 | * If drm_mm_scan_remove_block() returns false, we | ||
| 188 | * can leave the block pinned. | ||
| 189 | */ | ||
| 190 | list_for_each_entry_safe(m, n, &list, scan_node) | ||
| 191 | if (!drm_mm_scan_remove_block(&m->vram_node)) | ||
| 192 | list_del_init(&m->scan_node); | ||
| 193 | |||
| 194 | /* | ||
| 195 | * Unmap the blocks which need to be reaped from the MMU. | ||
| 196 | * Clear the mmu pointer to prevent the get_iova finding | ||
| 197 | * this mapping. | ||
| 198 | */ | ||
| 199 | list_for_each_entry_safe(m, n, &list, scan_node) { | ||
| 200 | etnaviv_iommu_remove_mapping(mmu, m); | ||
| 201 | m->mmu = NULL; | ||
| 202 | list_del_init(&m->mmu_node); | ||
| 203 | list_del_init(&m->scan_node); | ||
| 204 | } | ||
| 205 | |||
| 206 | /* | ||
| 207 | * We removed enough mappings so that the new allocation will | ||
| 208 | * succeed. Ensure that the MMU will be flushed before the | ||
| 209 | * associated commit requesting this mapping, and retry the | ||
| 210 | * allocation one more time. | ||
| 211 | */ | ||
| 212 | mmu->need_flush = true; | ||
| 213 | } | ||
| 214 | |||
| 215 | if (ret < 0) { | ||
| 216 | mutex_unlock(&mmu->lock); | ||
| 217 | return ret; | ||
| 218 | } | ||
| 219 | |||
| 220 | mmu->last_iova = node->start + etnaviv_obj->base.size; | ||
| 221 | mapping->iova = node->start; | ||
| 222 | ret = etnaviv_iommu_map(mmu, node->start, sgt, etnaviv_obj->base.size, | ||
| 223 | IOMMU_READ | IOMMU_WRITE); | ||
| 224 | |||
| 225 | if (ret < 0) { | ||
| 226 | drm_mm_remove_node(node); | ||
| 227 | mutex_unlock(&mmu->lock); | ||
| 228 | return ret; | ||
| 229 | } | ||
| 230 | |||
| 231 | list_add_tail(&mapping->mmu_node, &mmu->mappings); | ||
| 232 | mutex_unlock(&mmu->lock); | ||
| 233 | |||
| 234 | return ret; | ||
| 235 | } | ||
| 236 | |||
| 237 | void etnaviv_iommu_unmap_gem(struct etnaviv_iommu *mmu, | ||
| 238 | struct etnaviv_vram_mapping *mapping) | ||
| 239 | { | ||
| 240 | WARN_ON(mapping->use); | ||
| 241 | |||
| 242 | mutex_lock(&mmu->lock); | ||
| 243 | |||
| 244 | /* If the vram node is on the mm, unmap and remove the node */ | ||
| 245 | if (mapping->vram_node.mm == &mmu->mm) | ||
| 246 | etnaviv_iommu_remove_mapping(mmu, mapping); | ||
| 247 | |||
| 248 | list_del(&mapping->mmu_node); | ||
| 249 | mutex_unlock(&mmu->lock); | ||
| 250 | } | ||
| 251 | |||
| 252 | void etnaviv_iommu_destroy(struct etnaviv_iommu *mmu) | ||
| 253 | { | ||
| 254 | drm_mm_takedown(&mmu->mm); | ||
| 255 | iommu_domain_free(mmu->domain); | ||
| 256 | kfree(mmu); | ||
| 257 | } | ||
| 258 | |||
| 259 | struct etnaviv_iommu *etnaviv_iommu_new(struct etnaviv_gpu *gpu, | ||
| 260 | struct iommu_domain *domain, enum etnaviv_iommu_version version) | ||
| 261 | { | ||
| 262 | struct etnaviv_iommu *mmu; | ||
| 263 | |||
| 264 | mmu = kzalloc(sizeof(*mmu), GFP_KERNEL); | ||
| 265 | if (!mmu) | ||
| 266 | return ERR_PTR(-ENOMEM); | ||
| 267 | |||
| 268 | mmu->domain = domain; | ||
| 269 | mmu->gpu = gpu; | ||
| 270 | mmu->version = version; | ||
| 271 | mutex_init(&mmu->lock); | ||
| 272 | INIT_LIST_HEAD(&mmu->mappings); | ||
| 273 | |||
| 274 | drm_mm_init(&mmu->mm, domain->geometry.aperture_start, | ||
| 275 | domain->geometry.aperture_end - | ||
| 276 | domain->geometry.aperture_start + 1); | ||
| 277 | |||
| 278 | iommu_set_fault_handler(domain, etnaviv_fault_handler, gpu->dev); | ||
| 279 | |||
| 280 | return mmu; | ||
| 281 | } | ||
| 282 | |||
| 283 | size_t etnaviv_iommu_dump_size(struct etnaviv_iommu *iommu) | ||
| 284 | { | ||
| 285 | struct etnaviv_iommu_ops *ops; | ||
| 286 | |||
| 287 | ops = container_of(iommu->domain->ops, struct etnaviv_iommu_ops, ops); | ||
| 288 | |||
| 289 | return ops->dump_size(iommu->domain); | ||
| 290 | } | ||
| 291 | |||
| 292 | void etnaviv_iommu_dump(struct etnaviv_iommu *iommu, void *buf) | ||
| 293 | { | ||
| 294 | struct etnaviv_iommu_ops *ops; | ||
| 295 | |||
| 296 | ops = container_of(iommu->domain->ops, struct etnaviv_iommu_ops, ops); | ||
| 297 | |||
| 298 | ops->dump(iommu->domain, buf); | ||
| 299 | } | ||
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_mmu.h b/drivers/gpu/drm/etnaviv/etnaviv_mmu.h new file mode 100644 index 000000000000..fff215a47630 --- /dev/null +++ b/drivers/gpu/drm/etnaviv/etnaviv_mmu.h | |||
| @@ -0,0 +1,71 @@ | |||
| 1 | /* | ||
| 2 | * Copyright (C) 2015 Etnaviv Project | ||
| 3 | * | ||
| 4 | * This program is free software; you can redistribute it and/or modify it | ||
| 5 | * under the terms of the GNU General Public License version 2 as published by | ||
| 6 | * the Free Software Foundation. | ||
| 7 | * | ||
| 8 | * This program is distributed in the hope that it will be useful, but WITHOUT | ||
| 9 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
| 10 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
| 11 | * more details. | ||
| 12 | * | ||
| 13 | * You should have received a copy of the GNU General Public License along with | ||
| 14 | * this program. If not, see <http://www.gnu.org/licenses/>. | ||
| 15 | */ | ||
| 16 | |||
| 17 | #ifndef __ETNAVIV_MMU_H__ | ||
| 18 | #define __ETNAVIV_MMU_H__ | ||
| 19 | |||
| 20 | #include <linux/iommu.h> | ||
| 21 | |||
| 22 | enum etnaviv_iommu_version { | ||
| 23 | ETNAVIV_IOMMU_V1 = 0, | ||
| 24 | ETNAVIV_IOMMU_V2, | ||
| 25 | }; | ||
| 26 | |||
| 27 | struct etnaviv_gpu; | ||
| 28 | struct etnaviv_vram_mapping; | ||
| 29 | |||
| 30 | struct etnaviv_iommu_ops { | ||
| 31 | struct iommu_ops ops; | ||
| 32 | size_t (*dump_size)(struct iommu_domain *); | ||
| 33 | void (*dump)(struct iommu_domain *, void *); | ||
| 34 | }; | ||
| 35 | |||
| 36 | struct etnaviv_iommu { | ||
| 37 | struct etnaviv_gpu *gpu; | ||
| 38 | struct iommu_domain *domain; | ||
| 39 | |||
| 40 | enum etnaviv_iommu_version version; | ||
| 41 | |||
| 42 | /* memory manager for GPU address area */ | ||
| 43 | struct mutex lock; | ||
| 44 | struct list_head mappings; | ||
| 45 | struct drm_mm mm; | ||
| 46 | u32 last_iova; | ||
| 47 | bool need_flush; | ||
| 48 | }; | ||
| 49 | |||
| 50 | struct etnaviv_gem_object; | ||
| 51 | |||
| 52 | int etnaviv_iommu_attach(struct etnaviv_iommu *iommu, const char **names, | ||
| 53 | int cnt); | ||
| 54 | int etnaviv_iommu_map(struct etnaviv_iommu *iommu, u32 iova, | ||
| 55 | struct sg_table *sgt, unsigned len, int prot); | ||
| 56 | int etnaviv_iommu_unmap(struct etnaviv_iommu *iommu, u32 iova, | ||
| 57 | struct sg_table *sgt, unsigned len); | ||
| 58 | int etnaviv_iommu_map_gem(struct etnaviv_iommu *mmu, | ||
| 59 | struct etnaviv_gem_object *etnaviv_obj, u32 memory_base, | ||
| 60 | struct etnaviv_vram_mapping *mapping); | ||
| 61 | void etnaviv_iommu_unmap_gem(struct etnaviv_iommu *mmu, | ||
| 62 | struct etnaviv_vram_mapping *mapping); | ||
| 63 | void etnaviv_iommu_destroy(struct etnaviv_iommu *iommu); | ||
| 64 | |||
| 65 | size_t etnaviv_iommu_dump_size(struct etnaviv_iommu *iommu); | ||
| 66 | void etnaviv_iommu_dump(struct etnaviv_iommu *iommu, void *buf); | ||
| 67 | |||
| 68 | struct etnaviv_iommu *etnaviv_iommu_new(struct etnaviv_gpu *gpu, | ||
| 69 | struct iommu_domain *domain, enum etnaviv_iommu_version version); | ||
| 70 | |||
| 71 | #endif /* __ETNAVIV_MMU_H__ */ | ||
diff --git a/drivers/gpu/drm/etnaviv/state.xml.h b/drivers/gpu/drm/etnaviv/state.xml.h new file mode 100644 index 000000000000..368218304566 --- /dev/null +++ b/drivers/gpu/drm/etnaviv/state.xml.h | |||
| @@ -0,0 +1,351 @@ | |||
| 1 | #ifndef STATE_XML | ||
| 2 | #define STATE_XML | ||
| 3 | |||
| 4 | /* Autogenerated file, DO NOT EDIT manually! | ||
| 5 | |||
| 6 | This file was generated by the rules-ng-ng headergen tool in this git repository: | ||
| 7 | http://0x04.net/cgit/index.cgi/rules-ng-ng | ||
| 8 | git clone git://0x04.net/rules-ng-ng | ||
| 9 | |||
| 10 | The rules-ng-ng source files this header was generated from are: | ||
| 11 | - state.xml ( 18882 bytes, from 2015-03-25 11:42:32) | ||
| 12 | - common.xml ( 18437 bytes, from 2015-03-25 11:27:41) | ||
| 13 | - state_hi.xml ( 23420 bytes, from 2015-03-25 11:47:21) | ||
| 14 | - state_2d.xml ( 51549 bytes, from 2015-03-25 11:25:06) | ||
| 15 | - state_3d.xml ( 54600 bytes, from 2015-03-25 11:25:19) | ||
| 16 | - state_vg.xml ( 5973 bytes, from 2015-03-25 11:26:01) | ||
| 17 | |||
| 18 | Copyright (C) 2015 | ||
| 19 | */ | ||
| 20 | |||
| 21 | |||
| 22 | #define VARYING_COMPONENT_USE_UNUSED 0x00000000 | ||
| 23 | #define VARYING_COMPONENT_USE_USED 0x00000001 | ||
| 24 | #define VARYING_COMPONENT_USE_POINTCOORD_X 0x00000002 | ||
| 25 | #define VARYING_COMPONENT_USE_POINTCOORD_Y 0x00000003 | ||
| 26 | #define FE_VERTEX_STREAM_CONTROL_VERTEX_STRIDE__MASK 0x000000ff | ||
| 27 | #define FE_VERTEX_STREAM_CONTROL_VERTEX_STRIDE__SHIFT 0 | ||
| 28 | #define FE_VERTEX_STREAM_CONTROL_VERTEX_STRIDE(x) (((x) << FE_VERTEX_STREAM_CONTROL_VERTEX_STRIDE__SHIFT) & FE_VERTEX_STREAM_CONTROL_VERTEX_STRIDE__MASK) | ||
| 29 | #define VIVS_FE 0x00000000 | ||
| 30 | |||
| 31 | #define VIVS_FE_VERTEX_ELEMENT_CONFIG(i0) (0x00000600 + 0x4*(i0)) | ||
| 32 | #define VIVS_FE_VERTEX_ELEMENT_CONFIG__ESIZE 0x00000004 | ||
| 33 | #define VIVS_FE_VERTEX_ELEMENT_CONFIG__LEN 0x00000010 | ||
| 34 | #define VIVS_FE_VERTEX_ELEMENT_CONFIG_TYPE__MASK 0x0000000f | ||
| 35 | #define VIVS_FE_VERTEX_ELEMENT_CONFIG_TYPE__SHIFT 0 | ||
| 36 | #define VIVS_FE_VERTEX_ELEMENT_CONFIG_TYPE_BYTE 0x00000000 | ||
| 37 | #define VIVS_FE_VERTEX_ELEMENT_CONFIG_TYPE_UNSIGNED_BYTE 0x00000001 | ||
| 38 | #define VIVS_FE_VERTEX_ELEMENT_CONFIG_TYPE_SHORT 0x00000002 | ||
| 39 | #define VIVS_FE_VERTEX_ELEMENT_CONFIG_TYPE_UNSIGNED_SHORT 0x00000003 | ||
| 40 | #define VIVS_FE_VERTEX_ELEMENT_CONFIG_TYPE_INT 0x00000004 | ||
| 41 | #define VIVS_FE_VERTEX_ELEMENT_CONFIG_TYPE_UNSIGNED_INT 0x00000005 | ||
| 42 | #define VIVS_FE_VERTEX_ELEMENT_CONFIG_TYPE_FLOAT 0x00000008 | ||
| 43 | #define VIVS_FE_VERTEX_ELEMENT_CONFIG_TYPE_HALF_FLOAT 0x00000009 | ||
| 44 | #define VIVS_FE_VERTEX_ELEMENT_CONFIG_TYPE_FIXED 0x0000000b | ||
| 45 | #define VIVS_FE_VERTEX_ELEMENT_CONFIG_TYPE_INT_10_10_10_2 0x0000000c | ||
| 46 | #define VIVS_FE_VERTEX_ELEMENT_CONFIG_TYPE_UNSIGNED_INT_10_10_10_2 0x0000000d | ||
| 47 | #define VIVS_FE_VERTEX_ELEMENT_CONFIG_ENDIAN__MASK 0x00000030 | ||
| 48 | #define VIVS_FE_VERTEX_ELEMENT_CONFIG_ENDIAN__SHIFT 4 | ||
| 49 | #define VIVS_FE_VERTEX_ELEMENT_CONFIG_ENDIAN(x) (((x) << VIVS_FE_VERTEX_ELEMENT_CONFIG_ENDIAN__SHIFT) & VIVS_FE_VERTEX_ELEMENT_CONFIG_ENDIAN__MASK) | ||
| 50 | #define VIVS_FE_VERTEX_ELEMENT_CONFIG_NONCONSECUTIVE 0x00000080 | ||
| 51 | #define VIVS_FE_VERTEX_ELEMENT_CONFIG_STREAM__MASK 0x00000700 | ||
| 52 | #define VIVS_FE_VERTEX_ELEMENT_CONFIG_STREAM__SHIFT 8 | ||
| 53 | #define VIVS_FE_VERTEX_ELEMENT_CONFIG_STREAM(x) (((x) << VIVS_FE_VERTEX_ELEMENT_CONFIG_STREAM__SHIFT) & VIVS_FE_VERTEX_ELEMENT_CONFIG_STREAM__MASK) | ||
| 54 | #define VIVS_FE_VERTEX_ELEMENT_CONFIG_NUM__MASK 0x00003000 | ||
| 55 | #define VIVS_FE_VERTEX_ELEMENT_CONFIG_NUM__SHIFT 12 | ||
| 56 | #define VIVS_FE_VERTEX_ELEMENT_CONFIG_NUM(x) (((x) << VIVS_FE_VERTEX_ELEMENT_CONFIG_NUM__SHIFT) & VIVS_FE_VERTEX_ELEMENT_CONFIG_NUM__MASK) | ||
| 57 | #define VIVS_FE_VERTEX_ELEMENT_CONFIG_NORMALIZE__MASK 0x0000c000 | ||
| 58 | #define VIVS_FE_VERTEX_ELEMENT_CONFIG_NORMALIZE__SHIFT 14 | ||
| 59 | #define VIVS_FE_VERTEX_ELEMENT_CONFIG_NORMALIZE_OFF 0x00000000 | ||
| 60 | #define VIVS_FE_VERTEX_ELEMENT_CONFIG_NORMALIZE_ON 0x00008000 | ||
| 61 | #define VIVS_FE_VERTEX_ELEMENT_CONFIG_START__MASK 0x00ff0000 | ||
| 62 | #define VIVS_FE_VERTEX_ELEMENT_CONFIG_START__SHIFT 16 | ||
| 63 | #define VIVS_FE_VERTEX_ELEMENT_CONFIG_START(x) (((x) << VIVS_FE_VERTEX_ELEMENT_CONFIG_START__SHIFT) & VIVS_FE_VERTEX_ELEMENT_CONFIG_START__MASK) | ||
| 64 | #define VIVS_FE_VERTEX_ELEMENT_CONFIG_END__MASK 0xff000000 | ||
| 65 | #define VIVS_FE_VERTEX_ELEMENT_CONFIG_END__SHIFT 24 | ||
| 66 | #define VIVS_FE_VERTEX_ELEMENT_CONFIG_END(x) (((x) << VIVS_FE_VERTEX_ELEMENT_CONFIG_END__SHIFT) & VIVS_FE_VERTEX_ELEMENT_CONFIG_END__MASK) | ||
| 67 | |||
| 68 | #define VIVS_FE_CMD_STREAM_BASE_ADDR 0x00000640 | ||
| 69 | |||
| 70 | #define VIVS_FE_INDEX_STREAM_BASE_ADDR 0x00000644 | ||
| 71 | |||
| 72 | #define VIVS_FE_INDEX_STREAM_CONTROL 0x00000648 | ||
| 73 | #define VIVS_FE_INDEX_STREAM_CONTROL_TYPE__MASK 0x00000003 | ||
| 74 | #define VIVS_FE_INDEX_STREAM_CONTROL_TYPE__SHIFT 0 | ||
| 75 | #define VIVS_FE_INDEX_STREAM_CONTROL_TYPE_UNSIGNED_CHAR 0x00000000 | ||
| 76 | #define VIVS_FE_INDEX_STREAM_CONTROL_TYPE_UNSIGNED_SHORT 0x00000001 | ||
| 77 | #define VIVS_FE_INDEX_STREAM_CONTROL_TYPE_UNSIGNED_INT 0x00000002 | ||
| 78 | |||
| 79 | #define VIVS_FE_VERTEX_STREAM_BASE_ADDR 0x0000064c | ||
| 80 | |||
| 81 | #define VIVS_FE_VERTEX_STREAM_CONTROL 0x00000650 | ||
| 82 | |||
| 83 | #define VIVS_FE_COMMAND_ADDRESS 0x00000654 | ||
| 84 | |||
| 85 | #define VIVS_FE_COMMAND_CONTROL 0x00000658 | ||
| 86 | #define VIVS_FE_COMMAND_CONTROL_PREFETCH__MASK 0x0000ffff | ||
| 87 | #define VIVS_FE_COMMAND_CONTROL_PREFETCH__SHIFT 0 | ||
| 88 | #define VIVS_FE_COMMAND_CONTROL_PREFETCH(x) (((x) << VIVS_FE_COMMAND_CONTROL_PREFETCH__SHIFT) & VIVS_FE_COMMAND_CONTROL_PREFETCH__MASK) | ||
| 89 | #define VIVS_FE_COMMAND_CONTROL_ENABLE 0x00010000 | ||
| 90 | |||
| 91 | #define VIVS_FE_DMA_STATUS 0x0000065c | ||
| 92 | |||
| 93 | #define VIVS_FE_DMA_DEBUG_STATE 0x00000660 | ||
| 94 | #define VIVS_FE_DMA_DEBUG_STATE_CMD_STATE__MASK 0x0000001f | ||
| 95 | #define VIVS_FE_DMA_DEBUG_STATE_CMD_STATE__SHIFT 0 | ||
| 96 | #define VIVS_FE_DMA_DEBUG_STATE_CMD_STATE_IDLE 0x00000000 | ||
| 97 | #define VIVS_FE_DMA_DEBUG_STATE_CMD_STATE_DEC 0x00000001 | ||
| 98 | #define VIVS_FE_DMA_DEBUG_STATE_CMD_STATE_ADR0 0x00000002 | ||
| 99 | #define VIVS_FE_DMA_DEBUG_STATE_CMD_STATE_LOAD0 0x00000003 | ||
| 100 | #define VIVS_FE_DMA_DEBUG_STATE_CMD_STATE_ADR1 0x00000004 | ||
| 101 | #define VIVS_FE_DMA_DEBUG_STATE_CMD_STATE_LOAD1 0x00000005 | ||
| 102 | #define VIVS_FE_DMA_DEBUG_STATE_CMD_STATE_3DADR 0x00000006 | ||
| 103 | #define VIVS_FE_DMA_DEBUG_STATE_CMD_STATE_3DCMD 0x00000007 | ||
| 104 | #define VIVS_FE_DMA_DEBUG_STATE_CMD_STATE_3DCNTL 0x00000008 | ||
| 105 | #define VIVS_FE_DMA_DEBUG_STATE_CMD_STATE_3DIDXCNTL 0x00000009 | ||
| 106 | #define VIVS_FE_DMA_DEBUG_STATE_CMD_STATE_INITREQDMA 0x0000000a | ||
| 107 | #define VIVS_FE_DMA_DEBUG_STATE_CMD_STATE_DRAWIDX 0x0000000b | ||
| 108 | #define VIVS_FE_DMA_DEBUG_STATE_CMD_STATE_DRAW 0x0000000c | ||
| 109 | #define VIVS_FE_DMA_DEBUG_STATE_CMD_STATE_2DRECT0 0x0000000d | ||
| 110 | #define VIVS_FE_DMA_DEBUG_STATE_CMD_STATE_2DRECT1 0x0000000e | ||
| 111 | #define VIVS_FE_DMA_DEBUG_STATE_CMD_STATE_2DDATA0 0x0000000f | ||
| 112 | #define VIVS_FE_DMA_DEBUG_STATE_CMD_STATE_2DDATA1 0x00000010 | ||
| 113 | #define VIVS_FE_DMA_DEBUG_STATE_CMD_STATE_WAITFIFO 0x00000011 | ||
| 114 | #define VIVS_FE_DMA_DEBUG_STATE_CMD_STATE_WAIT 0x00000012 | ||
| 115 | #define VIVS_FE_DMA_DEBUG_STATE_CMD_STATE_LINK 0x00000013 | ||
| 116 | #define VIVS_FE_DMA_DEBUG_STATE_CMD_STATE_END 0x00000014 | ||
| 117 | #define VIVS_FE_DMA_DEBUG_STATE_CMD_STATE_STALL 0x00000015 | ||
| 118 | #define VIVS_FE_DMA_DEBUG_STATE_CMD_DMA_STATE__MASK 0x00000300 | ||
| 119 | #define VIVS_FE_DMA_DEBUG_STATE_CMD_DMA_STATE__SHIFT 8 | ||
| 120 | #define VIVS_FE_DMA_DEBUG_STATE_CMD_DMA_STATE_IDLE 0x00000000 | ||
| 121 | #define VIVS_FE_DMA_DEBUG_STATE_CMD_DMA_STATE_START 0x00000100 | ||
| 122 | #define VIVS_FE_DMA_DEBUG_STATE_CMD_DMA_STATE_REQ 0x00000200 | ||
| 123 | #define VIVS_FE_DMA_DEBUG_STATE_CMD_DMA_STATE_END 0x00000300 | ||
| 124 | #define VIVS_FE_DMA_DEBUG_STATE_CMD_FETCH_STATE__MASK 0x00000c00 | ||
| 125 | #define VIVS_FE_DMA_DEBUG_STATE_CMD_FETCH_STATE__SHIFT 10 | ||
| 126 | #define VIVS_FE_DMA_DEBUG_STATE_CMD_FETCH_STATE_IDLE 0x00000000 | ||
| 127 | #define VIVS_FE_DMA_DEBUG_STATE_CMD_FETCH_STATE_RAMVALID 0x00000400 | ||
| 128 | #define VIVS_FE_DMA_DEBUG_STATE_CMD_FETCH_STATE_VALID 0x00000800 | ||
| 129 | #define VIVS_FE_DMA_DEBUG_STATE_REQ_DMA_STATE__MASK 0x00003000 | ||
| 130 | #define VIVS_FE_DMA_DEBUG_STATE_REQ_DMA_STATE__SHIFT 12 | ||
| 131 | #define VIVS_FE_DMA_DEBUG_STATE_REQ_DMA_STATE_IDLE 0x00000000 | ||
| 132 | #define VIVS_FE_DMA_DEBUG_STATE_REQ_DMA_STATE_WAITIDX 0x00001000 | ||
| 133 | #define VIVS_FE_DMA_DEBUG_STATE_REQ_DMA_STATE_CAL 0x00002000 | ||
| 134 | #define VIVS_FE_DMA_DEBUG_STATE_CAL_STATE__MASK 0x0000c000 | ||
| 135 | #define VIVS_FE_DMA_DEBUG_STATE_CAL_STATE__SHIFT 14 | ||
| 136 | #define VIVS_FE_DMA_DEBUG_STATE_CAL_STATE_IDLE 0x00000000 | ||
| 137 | #define VIVS_FE_DMA_DEBUG_STATE_CAL_STATE_LDADR 0x00004000 | ||
| 138 | #define VIVS_FE_DMA_DEBUG_STATE_CAL_STATE_IDXCALC 0x00008000 | ||
| 139 | #define VIVS_FE_DMA_DEBUG_STATE_VE_REQ_STATE__MASK 0x00030000 | ||
| 140 | #define VIVS_FE_DMA_DEBUG_STATE_VE_REQ_STATE__SHIFT 16 | ||
| 141 | #define VIVS_FE_DMA_DEBUG_STATE_VE_REQ_STATE_IDLE 0x00000000 | ||
| 142 | #define VIVS_FE_DMA_DEBUG_STATE_VE_REQ_STATE_CKCACHE 0x00010000 | ||
| 143 | #define VIVS_FE_DMA_DEBUG_STATE_VE_REQ_STATE_MISS 0x00020000 | ||
| 144 | |||
| 145 | #define VIVS_FE_DMA_ADDRESS 0x00000664 | ||
| 146 | |||
| 147 | #define VIVS_FE_DMA_LOW 0x00000668 | ||
| 148 | |||
| 149 | #define VIVS_FE_DMA_HIGH 0x0000066c | ||
| 150 | |||
| 151 | #define VIVS_FE_AUTO_FLUSH 0x00000670 | ||
| 152 | |||
| 153 | #define VIVS_FE_UNK00678 0x00000678 | ||
| 154 | |||
| 155 | #define VIVS_FE_UNK0067C 0x0000067c | ||
| 156 | |||
| 157 | #define VIVS_FE_VERTEX_STREAMS(i0) (0x00000000 + 0x4*(i0)) | ||
| 158 | #define VIVS_FE_VERTEX_STREAMS__ESIZE 0x00000004 | ||
| 159 | #define VIVS_FE_VERTEX_STREAMS__LEN 0x00000008 | ||
| 160 | |||
| 161 | #define VIVS_FE_VERTEX_STREAMS_BASE_ADDR(i0) (0x00000680 + 0x4*(i0)) | ||
| 162 | |||
| 163 | #define VIVS_FE_VERTEX_STREAMS_CONTROL(i0) (0x000006a0 + 0x4*(i0)) | ||
| 164 | |||
| 165 | #define VIVS_FE_UNK00700(i0) (0x00000700 + 0x4*(i0)) | ||
| 166 | #define VIVS_FE_UNK00700__ESIZE 0x00000004 | ||
| 167 | #define VIVS_FE_UNK00700__LEN 0x00000010 | ||
| 168 | |||
| 169 | #define VIVS_FE_UNK00740(i0) (0x00000740 + 0x4*(i0)) | ||
| 170 | #define VIVS_FE_UNK00740__ESIZE 0x00000004 | ||
| 171 | #define VIVS_FE_UNK00740__LEN 0x00000010 | ||
| 172 | |||
| 173 | #define VIVS_FE_UNK00780(i0) (0x00000780 + 0x4*(i0)) | ||
| 174 | #define VIVS_FE_UNK00780__ESIZE 0x00000004 | ||
| 175 | #define VIVS_FE_UNK00780__LEN 0x00000010 | ||
| 176 | |||
| 177 | #define VIVS_GL 0x00000000 | ||
| 178 | |||
| 179 | #define VIVS_GL_PIPE_SELECT 0x00003800 | ||
| 180 | #define VIVS_GL_PIPE_SELECT_PIPE__MASK 0x00000001 | ||
| 181 | #define VIVS_GL_PIPE_SELECT_PIPE__SHIFT 0 | ||
| 182 | #define VIVS_GL_PIPE_SELECT_PIPE(x) (((x) << VIVS_GL_PIPE_SELECT_PIPE__SHIFT) & VIVS_GL_PIPE_SELECT_PIPE__MASK) | ||
| 183 | |||
| 184 | #define VIVS_GL_EVENT 0x00003804 | ||
| 185 | #define VIVS_GL_EVENT_EVENT_ID__MASK 0x0000001f | ||
| 186 | #define VIVS_GL_EVENT_EVENT_ID__SHIFT 0 | ||
| 187 | #define VIVS_GL_EVENT_EVENT_ID(x) (((x) << VIVS_GL_EVENT_EVENT_ID__SHIFT) & VIVS_GL_EVENT_EVENT_ID__MASK) | ||
| 188 | #define VIVS_GL_EVENT_FROM_FE 0x00000020 | ||
| 189 | #define VIVS_GL_EVENT_FROM_PE 0x00000040 | ||
| 190 | #define VIVS_GL_EVENT_SOURCE__MASK 0x00001f00 | ||
| 191 | #define VIVS_GL_EVENT_SOURCE__SHIFT 8 | ||
| 192 | #define VIVS_GL_EVENT_SOURCE(x) (((x) << VIVS_GL_EVENT_SOURCE__SHIFT) & VIVS_GL_EVENT_SOURCE__MASK) | ||
| 193 | |||
| 194 | #define VIVS_GL_SEMAPHORE_TOKEN 0x00003808 | ||
| 195 | #define VIVS_GL_SEMAPHORE_TOKEN_FROM__MASK 0x0000001f | ||
| 196 | #define VIVS_GL_SEMAPHORE_TOKEN_FROM__SHIFT 0 | ||
| 197 | #define VIVS_GL_SEMAPHORE_TOKEN_FROM(x) (((x) << VIVS_GL_SEMAPHORE_TOKEN_FROM__SHIFT) & VIVS_GL_SEMAPHORE_TOKEN_FROM__MASK) | ||
| 198 | #define VIVS_GL_SEMAPHORE_TOKEN_TO__MASK 0x00001f00 | ||
| 199 | #define VIVS_GL_SEMAPHORE_TOKEN_TO__SHIFT 8 | ||
| 200 | #define VIVS_GL_SEMAPHORE_TOKEN_TO(x) (((x) << VIVS_GL_SEMAPHORE_TOKEN_TO__SHIFT) & VIVS_GL_SEMAPHORE_TOKEN_TO__MASK) | ||
| 201 | |||
| 202 | #define VIVS_GL_FLUSH_CACHE 0x0000380c | ||
| 203 | #define VIVS_GL_FLUSH_CACHE_DEPTH 0x00000001 | ||
| 204 | #define VIVS_GL_FLUSH_CACHE_COLOR 0x00000002 | ||
| 205 | #define VIVS_GL_FLUSH_CACHE_TEXTURE 0x00000004 | ||
| 206 | #define VIVS_GL_FLUSH_CACHE_PE2D 0x00000008 | ||
| 207 | #define VIVS_GL_FLUSH_CACHE_TEXTUREVS 0x00000010 | ||
| 208 | #define VIVS_GL_FLUSH_CACHE_SHADER_L1 0x00000020 | ||
| 209 | #define VIVS_GL_FLUSH_CACHE_SHADER_L2 0x00000040 | ||
| 210 | |||
| 211 | #define VIVS_GL_FLUSH_MMU 0x00003810 | ||
| 212 | #define VIVS_GL_FLUSH_MMU_FLUSH_FEMMU 0x00000001 | ||
| 213 | #define VIVS_GL_FLUSH_MMU_FLUSH_UNK1 0x00000002 | ||
| 214 | #define VIVS_GL_FLUSH_MMU_FLUSH_UNK2 0x00000004 | ||
| 215 | #define VIVS_GL_FLUSH_MMU_FLUSH_PEMMU 0x00000008 | ||
| 216 | #define VIVS_GL_FLUSH_MMU_FLUSH_UNK4 0x00000010 | ||
| 217 | |||
| 218 | #define VIVS_GL_VERTEX_ELEMENT_CONFIG 0x00003814 | ||
| 219 | |||
| 220 | #define VIVS_GL_MULTI_SAMPLE_CONFIG 0x00003818 | ||
| 221 | #define VIVS_GL_MULTI_SAMPLE_CONFIG_MSAA_SAMPLES__MASK 0x00000003 | ||
| 222 | #define VIVS_GL_MULTI_SAMPLE_CONFIG_MSAA_SAMPLES__SHIFT 0 | ||
| 223 | #define VIVS_GL_MULTI_SAMPLE_CONFIG_MSAA_SAMPLES_NONE 0x00000000 | ||
| 224 | #define VIVS_GL_MULTI_SAMPLE_CONFIG_MSAA_SAMPLES_2X 0x00000001 | ||
| 225 | #define VIVS_GL_MULTI_SAMPLE_CONFIG_MSAA_SAMPLES_4X 0x00000002 | ||
| 226 | #define VIVS_GL_MULTI_SAMPLE_CONFIG_MSAA_SAMPLES_MASK 0x00000008 | ||
| 227 | #define VIVS_GL_MULTI_SAMPLE_CONFIG_MSAA_ENABLES__MASK 0x000000f0 | ||
| 228 | #define VIVS_GL_MULTI_SAMPLE_CONFIG_MSAA_ENABLES__SHIFT 4 | ||
| 229 | #define VIVS_GL_MULTI_SAMPLE_CONFIG_MSAA_ENABLES(x) (((x) << VIVS_GL_MULTI_SAMPLE_CONFIG_MSAA_ENABLES__SHIFT) & VIVS_GL_MULTI_SAMPLE_CONFIG_MSAA_ENABLES__MASK) | ||
| 230 | #define VIVS_GL_MULTI_SAMPLE_CONFIG_MSAA_ENABLES_MASK 0x00000100 | ||
| 231 | #define VIVS_GL_MULTI_SAMPLE_CONFIG_UNK12__MASK 0x00007000 | ||
| 232 | #define VIVS_GL_MULTI_SAMPLE_CONFIG_UNK12__SHIFT 12 | ||
| 233 | #define VIVS_GL_MULTI_SAMPLE_CONFIG_UNK12(x) (((x) << VIVS_GL_MULTI_SAMPLE_CONFIG_UNK12__SHIFT) & VIVS_GL_MULTI_SAMPLE_CONFIG_UNK12__MASK) | ||
| 234 | #define VIVS_GL_MULTI_SAMPLE_CONFIG_UNK12_MASK 0x00008000 | ||
| 235 | #define VIVS_GL_MULTI_SAMPLE_CONFIG_UNK16__MASK 0x00030000 | ||
| 236 | #define VIVS_GL_MULTI_SAMPLE_CONFIG_UNK16__SHIFT 16 | ||
| 237 | #define VIVS_GL_MULTI_SAMPLE_CONFIG_UNK16(x) (((x) << VIVS_GL_MULTI_SAMPLE_CONFIG_UNK16__SHIFT) & VIVS_GL_MULTI_SAMPLE_CONFIG_UNK16__MASK) | ||
| 238 | #define VIVS_GL_MULTI_SAMPLE_CONFIG_UNK16_MASK 0x00080000 | ||
| 239 | |||
| 240 | #define VIVS_GL_VARYING_TOTAL_COMPONENTS 0x0000381c | ||
| 241 | #define VIVS_GL_VARYING_TOTAL_COMPONENTS_NUM__MASK 0x000000ff | ||
| 242 | #define VIVS_GL_VARYING_TOTAL_COMPONENTS_NUM__SHIFT 0 | ||
| 243 | #define VIVS_GL_VARYING_TOTAL_COMPONENTS_NUM(x) (((x) << VIVS_GL_VARYING_TOTAL_COMPONENTS_NUM__SHIFT) & VIVS_GL_VARYING_TOTAL_COMPONENTS_NUM__MASK) | ||
| 244 | |||
| 245 | #define VIVS_GL_VARYING_NUM_COMPONENTS 0x00003820 | ||
| 246 | #define VIVS_GL_VARYING_NUM_COMPONENTS_VAR0__MASK 0x00000007 | ||
| 247 | #define VIVS_GL_VARYING_NUM_COMPONENTS_VAR0__SHIFT 0 | ||
| 248 | #define VIVS_GL_VARYING_NUM_COMPONENTS_VAR0(x) (((x) << VIVS_GL_VARYING_NUM_COMPONENTS_VAR0__SHIFT) & VIVS_GL_VARYING_NUM_COMPONENTS_VAR0__MASK) | ||
| 249 | #define VIVS_GL_VARYING_NUM_COMPONENTS_VAR1__MASK 0x00000070 | ||
| 250 | #define VIVS_GL_VARYING_NUM_COMPONENTS_VAR1__SHIFT 4 | ||
| 251 | #define VIVS_GL_VARYING_NUM_COMPONENTS_VAR1(x) (((x) << VIVS_GL_VARYING_NUM_COMPONENTS_VAR1__SHIFT) & VIVS_GL_VARYING_NUM_COMPONENTS_VAR1__MASK) | ||
| 252 | #define VIVS_GL_VARYING_NUM_COMPONENTS_VAR2__MASK 0x00000700 | ||
| 253 | #define VIVS_GL_VARYING_NUM_COMPONENTS_VAR2__SHIFT 8 | ||
| 254 | #define VIVS_GL_VARYING_NUM_COMPONENTS_VAR2(x) (((x) << VIVS_GL_VARYING_NUM_COMPONENTS_VAR2__SHIFT) & VIVS_GL_VARYING_NUM_COMPONENTS_VAR2__MASK) | ||
| 255 | #define VIVS_GL_VARYING_NUM_COMPONENTS_VAR3__MASK 0x00007000 | ||
| 256 | #define VIVS_GL_VARYING_NUM_COMPONENTS_VAR3__SHIFT 12 | ||
| 257 | #define VIVS_GL_VARYING_NUM_COMPONENTS_VAR3(x) (((x) << VIVS_GL_VARYING_NUM_COMPONENTS_VAR3__SHIFT) & VIVS_GL_VARYING_NUM_COMPONENTS_VAR3__MASK) | ||
| 258 | #define VIVS_GL_VARYING_NUM_COMPONENTS_VAR4__MASK 0x00070000 | ||
| 259 | #define VIVS_GL_VARYING_NUM_COMPONENTS_VAR4__SHIFT 16 | ||
| 260 | #define VIVS_GL_VARYING_NUM_COMPONENTS_VAR4(x) (((x) << VIVS_GL_VARYING_NUM_COMPONENTS_VAR4__SHIFT) & VIVS_GL_VARYING_NUM_COMPONENTS_VAR4__MASK) | ||
| 261 | #define VIVS_GL_VARYING_NUM_COMPONENTS_VAR5__MASK 0x00700000 | ||
| 262 | #define VIVS_GL_VARYING_NUM_COMPONENTS_VAR5__SHIFT 20 | ||
| 263 | #define VIVS_GL_VARYING_NUM_COMPONENTS_VAR5(x) (((x) << VIVS_GL_VARYING_NUM_COMPONENTS_VAR5__SHIFT) & VIVS_GL_VARYING_NUM_COMPONENTS_VAR5__MASK) | ||
| 264 | #define VIVS_GL_VARYING_NUM_COMPONENTS_VAR6__MASK 0x07000000 | ||
| 265 | #define VIVS_GL_VARYING_NUM_COMPONENTS_VAR6__SHIFT 24 | ||
| 266 | #define VIVS_GL_VARYING_NUM_COMPONENTS_VAR6(x) (((x) << VIVS_GL_VARYING_NUM_COMPONENTS_VAR6__SHIFT) & VIVS_GL_VARYING_NUM_COMPONENTS_VAR6__MASK) | ||
| 267 | #define VIVS_GL_VARYING_NUM_COMPONENTS_VAR7__MASK 0x70000000 | ||
| 268 | #define VIVS_GL_VARYING_NUM_COMPONENTS_VAR7__SHIFT 28 | ||
| 269 | #define VIVS_GL_VARYING_NUM_COMPONENTS_VAR7(x) (((x) << VIVS_GL_VARYING_NUM_COMPONENTS_VAR7__SHIFT) & VIVS_GL_VARYING_NUM_COMPONENTS_VAR7__MASK) | ||
| 270 | |||
| 271 | #define VIVS_GL_VARYING_COMPONENT_USE(i0) (0x00003828 + 0x4*(i0)) | ||
| 272 | #define VIVS_GL_VARYING_COMPONENT_USE__ESIZE 0x00000004 | ||
| 273 | #define VIVS_GL_VARYING_COMPONENT_USE__LEN 0x00000002 | ||
| 274 | #define VIVS_GL_VARYING_COMPONENT_USE_COMP0__MASK 0x00000003 | ||
| 275 | #define VIVS_GL_VARYING_COMPONENT_USE_COMP0__SHIFT 0 | ||
| 276 | #define VIVS_GL_VARYING_COMPONENT_USE_COMP0(x) (((x) << VIVS_GL_VARYING_COMPONENT_USE_COMP0__SHIFT) & VIVS_GL_VARYING_COMPONENT_USE_COMP0__MASK) | ||
| 277 | #define VIVS_GL_VARYING_COMPONENT_USE_COMP1__MASK 0x0000000c | ||
| 278 | #define VIVS_GL_VARYING_COMPONENT_USE_COMP1__SHIFT 2 | ||
| 279 | #define VIVS_GL_VARYING_COMPONENT_USE_COMP1(x) (((x) << VIVS_GL_VARYING_COMPONENT_USE_COMP1__SHIFT) & VIVS_GL_VARYING_COMPONENT_USE_COMP1__MASK) | ||
| 280 | #define VIVS_GL_VARYING_COMPONENT_USE_COMP2__MASK 0x00000030 | ||
| 281 | #define VIVS_GL_VARYING_COMPONENT_USE_COMP2__SHIFT 4 | ||
| 282 | #define VIVS_GL_VARYING_COMPONENT_USE_COMP2(x) (((x) << VIVS_GL_VARYING_COMPONENT_USE_COMP2__SHIFT) & VIVS_GL_VARYING_COMPONENT_USE_COMP2__MASK) | ||
| 283 | #define VIVS_GL_VARYING_COMPONENT_USE_COMP3__MASK 0x000000c0 | ||
| 284 | #define VIVS_GL_VARYING_COMPONENT_USE_COMP3__SHIFT 6 | ||
| 285 | #define VIVS_GL_VARYING_COMPONENT_USE_COMP3(x) (((x) << VIVS_GL_VARYING_COMPONENT_USE_COMP3__SHIFT) & VIVS_GL_VARYING_COMPONENT_USE_COMP3__MASK) | ||
| 286 | #define VIVS_GL_VARYING_COMPONENT_USE_COMP4__MASK 0x00000300 | ||
| 287 | #define VIVS_GL_VARYING_COMPONENT_USE_COMP4__SHIFT 8 | ||
| 288 | #define VIVS_GL_VARYING_COMPONENT_USE_COMP4(x) (((x) << VIVS_GL_VARYING_COMPONENT_USE_COMP4__SHIFT) & VIVS_GL_VARYING_COMPONENT_USE_COMP4__MASK) | ||
| 289 | #define VIVS_GL_VARYING_COMPONENT_USE_COMP5__MASK 0x00000c00 | ||
| 290 | #define VIVS_GL_VARYING_COMPONENT_USE_COMP5__SHIFT 10 | ||
| 291 | #define VIVS_GL_VARYING_COMPONENT_USE_COMP5(x) (((x) << VIVS_GL_VARYING_COMPONENT_USE_COMP5__SHIFT) & VIVS_GL_VARYING_COMPONENT_USE_COMP5__MASK) | ||
| 292 | #define VIVS_GL_VARYING_COMPONENT_USE_COMP6__MASK 0x00003000 | ||
| 293 | #define VIVS_GL_VARYING_COMPONENT_USE_COMP6__SHIFT 12 | ||
| 294 | #define VIVS_GL_VARYING_COMPONENT_USE_COMP6(x) (((x) << VIVS_GL_VARYING_COMPONENT_USE_COMP6__SHIFT) & VIVS_GL_VARYING_COMPONENT_USE_COMP6__MASK) | ||
| 295 | #define VIVS_GL_VARYING_COMPONENT_USE_COMP7__MASK 0x0000c000 | ||
| 296 | #define VIVS_GL_VARYING_COMPONENT_USE_COMP7__SHIFT 14 | ||
| 297 | #define VIVS_GL_VARYING_COMPONENT_USE_COMP7(x) (((x) << VIVS_GL_VARYING_COMPONENT_USE_COMP7__SHIFT) & VIVS_GL_VARYING_COMPONENT_USE_COMP7__MASK) | ||
| 298 | #define VIVS_GL_VARYING_COMPONENT_USE_COMP8__MASK 0x00030000 | ||
| 299 | #define VIVS_GL_VARYING_COMPONENT_USE_COMP8__SHIFT 16 | ||
| 300 | #define VIVS_GL_VARYING_COMPONENT_USE_COMP8(x) (((x) << VIVS_GL_VARYING_COMPONENT_USE_COMP8__SHIFT) & VIVS_GL_VARYING_COMPONENT_USE_COMP8__MASK) | ||
| 301 | #define VIVS_GL_VARYING_COMPONENT_USE_COMP9__MASK 0x000c0000 | ||
| 302 | #define VIVS_GL_VARYING_COMPONENT_USE_COMP9__SHIFT 18 | ||
| 303 | #define VIVS_GL_VARYING_COMPONENT_USE_COMP9(x) (((x) << VIVS_GL_VARYING_COMPONENT_USE_COMP9__SHIFT) & VIVS_GL_VARYING_COMPONENT_USE_COMP9__MASK) | ||
| 304 | #define VIVS_GL_VARYING_COMPONENT_USE_COMP10__MASK 0x00300000 | ||
| 305 | #define VIVS_GL_VARYING_COMPONENT_USE_COMP10__SHIFT 20 | ||
| 306 | #define VIVS_GL_VARYING_COMPONENT_USE_COMP10(x) (((x) << VIVS_GL_VARYING_COMPONENT_USE_COMP10__SHIFT) & VIVS_GL_VARYING_COMPONENT_USE_COMP10__MASK) | ||
| 307 | #define VIVS_GL_VARYING_COMPONENT_USE_COMP11__MASK 0x00c00000 | ||
| 308 | #define VIVS_GL_VARYING_COMPONENT_USE_COMP11__SHIFT 22 | ||
| 309 | #define VIVS_GL_VARYING_COMPONENT_USE_COMP11(x) (((x) << VIVS_GL_VARYING_COMPONENT_USE_COMP11__SHIFT) & VIVS_GL_VARYING_COMPONENT_USE_COMP11__MASK) | ||
| 310 | #define VIVS_GL_VARYING_COMPONENT_USE_COMP12__MASK 0x03000000 | ||
| 311 | #define VIVS_GL_VARYING_COMPONENT_USE_COMP12__SHIFT 24 | ||
| 312 | #define VIVS_GL_VARYING_COMPONENT_USE_COMP12(x) (((x) << VIVS_GL_VARYING_COMPONENT_USE_COMP12__SHIFT) & VIVS_GL_VARYING_COMPONENT_USE_COMP12__MASK) | ||
| 313 | #define VIVS_GL_VARYING_COMPONENT_USE_COMP13__MASK 0x0c000000 | ||
| 314 | #define VIVS_GL_VARYING_COMPONENT_USE_COMP13__SHIFT 26 | ||
| 315 | #define VIVS_GL_VARYING_COMPONENT_USE_COMP13(x) (((x) << VIVS_GL_VARYING_COMPONENT_USE_COMP13__SHIFT) & VIVS_GL_VARYING_COMPONENT_USE_COMP13__MASK) | ||
| 316 | #define VIVS_GL_VARYING_COMPONENT_USE_COMP14__MASK 0x30000000 | ||
| 317 | #define VIVS_GL_VARYING_COMPONENT_USE_COMP14__SHIFT 28 | ||
| 318 | #define VIVS_GL_VARYING_COMPONENT_USE_COMP14(x) (((x) << VIVS_GL_VARYING_COMPONENT_USE_COMP14__SHIFT) & VIVS_GL_VARYING_COMPONENT_USE_COMP14__MASK) | ||
| 319 | #define VIVS_GL_VARYING_COMPONENT_USE_COMP15__MASK 0xc0000000 | ||
| 320 | #define VIVS_GL_VARYING_COMPONENT_USE_COMP15__SHIFT 30 | ||
| 321 | #define VIVS_GL_VARYING_COMPONENT_USE_COMP15(x) (((x) << VIVS_GL_VARYING_COMPONENT_USE_COMP15__SHIFT) & VIVS_GL_VARYING_COMPONENT_USE_COMP15__MASK) | ||
| 322 | |||
| 323 | #define VIVS_GL_UNK03834 0x00003834 | ||
| 324 | |||
| 325 | #define VIVS_GL_UNK03838 0x00003838 | ||
| 326 | |||
| 327 | #define VIVS_GL_API_MODE 0x0000384c | ||
| 328 | #define VIVS_GL_API_MODE_OPENGL 0x00000000 | ||
| 329 | #define VIVS_GL_API_MODE_OPENVG 0x00000001 | ||
| 330 | #define VIVS_GL_API_MODE_OPENCL 0x00000002 | ||
| 331 | |||
| 332 | #define VIVS_GL_CONTEXT_POINTER 0x00003850 | ||
| 333 | |||
| 334 | #define VIVS_GL_UNK03A00 0x00003a00 | ||
| 335 | |||
| 336 | #define VIVS_GL_STALL_TOKEN 0x00003c00 | ||
| 337 | #define VIVS_GL_STALL_TOKEN_FROM__MASK 0x0000001f | ||
| 338 | #define VIVS_GL_STALL_TOKEN_FROM__SHIFT 0 | ||
| 339 | #define VIVS_GL_STALL_TOKEN_FROM(x) (((x) << VIVS_GL_STALL_TOKEN_FROM__SHIFT) & VIVS_GL_STALL_TOKEN_FROM__MASK) | ||
| 340 | #define VIVS_GL_STALL_TOKEN_TO__MASK 0x00001f00 | ||
| 341 | #define VIVS_GL_STALL_TOKEN_TO__SHIFT 8 | ||
| 342 | #define VIVS_GL_STALL_TOKEN_TO(x) (((x) << VIVS_GL_STALL_TOKEN_TO__SHIFT) & VIVS_GL_STALL_TOKEN_TO__MASK) | ||
| 343 | #define VIVS_GL_STALL_TOKEN_FLIP0 0x40000000 | ||
| 344 | #define VIVS_GL_STALL_TOKEN_FLIP1 0x80000000 | ||
| 345 | |||
| 346 | #define VIVS_DUMMY 0x00000000 | ||
| 347 | |||
| 348 | #define VIVS_DUMMY_DUMMY 0x0003fffc | ||
| 349 | |||
| 350 | |||
| 351 | #endif /* STATE_XML */ | ||
diff --git a/drivers/gpu/drm/etnaviv/state_hi.xml.h b/drivers/gpu/drm/etnaviv/state_hi.xml.h new file mode 100644 index 000000000000..0064f2640396 --- /dev/null +++ b/drivers/gpu/drm/etnaviv/state_hi.xml.h | |||
| @@ -0,0 +1,407 @@ | |||
| 1 | #ifndef STATE_HI_XML | ||
| 2 | #define STATE_HI_XML | ||
| 3 | |||
| 4 | /* Autogenerated file, DO NOT EDIT manually! | ||
| 5 | |||
| 6 | This file was generated by the rules-ng-ng headergen tool in this git repository: | ||
| 7 | http://0x04.net/cgit/index.cgi/rules-ng-ng | ||
| 8 | git clone git://0x04.net/rules-ng-ng | ||
| 9 | |||
| 10 | The rules-ng-ng source files this header was generated from are: | ||
| 11 | - state_hi.xml ( 23420 bytes, from 2015-03-25 11:47:21) | ||
| 12 | - common.xml ( 18437 bytes, from 2015-03-25 11:27:41) | ||
| 13 | |||
| 14 | Copyright (C) 2015 | ||
| 15 | */ | ||
| 16 | |||
| 17 | |||
| 18 | #define MMU_EXCEPTION_SLAVE_NOT_PRESENT 0x00000001 | ||
| 19 | #define MMU_EXCEPTION_PAGE_NOT_PRESENT 0x00000002 | ||
| 20 | #define MMU_EXCEPTION_WRITE_VIOLATION 0x00000003 | ||
| 21 | #define VIVS_HI 0x00000000 | ||
| 22 | |||
| 23 | #define VIVS_HI_CLOCK_CONTROL 0x00000000 | ||
| 24 | #define VIVS_HI_CLOCK_CONTROL_CLK3D_DIS 0x00000001 | ||
| 25 | #define VIVS_HI_CLOCK_CONTROL_CLK2D_DIS 0x00000002 | ||
| 26 | #define VIVS_HI_CLOCK_CONTROL_FSCALE_VAL__MASK 0x000001fc | ||
| 27 | #define VIVS_HI_CLOCK_CONTROL_FSCALE_VAL__SHIFT 2 | ||
| 28 | #define VIVS_HI_CLOCK_CONTROL_FSCALE_VAL(x) (((x) << VIVS_HI_CLOCK_CONTROL_FSCALE_VAL__SHIFT) & VIVS_HI_CLOCK_CONTROL_FSCALE_VAL__MASK) | ||
| 29 | #define VIVS_HI_CLOCK_CONTROL_FSCALE_CMD_LOAD 0x00000200 | ||
| 30 | #define VIVS_HI_CLOCK_CONTROL_DISABLE_RAM_CLK_GATING 0x00000400 | ||
| 31 | #define VIVS_HI_CLOCK_CONTROL_DISABLE_DEBUG_REGISTERS 0x00000800 | ||
| 32 | #define VIVS_HI_CLOCK_CONTROL_SOFT_RESET 0x00001000 | ||
| 33 | #define VIVS_HI_CLOCK_CONTROL_IDLE_3D 0x00010000 | ||
| 34 | #define VIVS_HI_CLOCK_CONTROL_IDLE_2D 0x00020000 | ||
| 35 | #define VIVS_HI_CLOCK_CONTROL_IDLE_VG 0x00040000 | ||
| 36 | #define VIVS_HI_CLOCK_CONTROL_ISOLATE_GPU 0x00080000 | ||
| 37 | #define VIVS_HI_CLOCK_CONTROL_DEBUG_PIXEL_PIPE__MASK 0x00f00000 | ||
| 38 | #define VIVS_HI_CLOCK_CONTROL_DEBUG_PIXEL_PIPE__SHIFT 20 | ||
| 39 | #define VIVS_HI_CLOCK_CONTROL_DEBUG_PIXEL_PIPE(x) (((x) << VIVS_HI_CLOCK_CONTROL_DEBUG_PIXEL_PIPE__SHIFT) & VIVS_HI_CLOCK_CONTROL_DEBUG_PIXEL_PIPE__MASK) | ||
| 40 | |||
| 41 | #define VIVS_HI_IDLE_STATE 0x00000004 | ||
| 42 | #define VIVS_HI_IDLE_STATE_FE 0x00000001 | ||
| 43 | #define VIVS_HI_IDLE_STATE_DE 0x00000002 | ||
| 44 | #define VIVS_HI_IDLE_STATE_PE 0x00000004 | ||
| 45 | #define VIVS_HI_IDLE_STATE_SH 0x00000008 | ||
| 46 | #define VIVS_HI_IDLE_STATE_PA 0x00000010 | ||
| 47 | #define VIVS_HI_IDLE_STATE_SE 0x00000020 | ||
| 48 | #define VIVS_HI_IDLE_STATE_RA 0x00000040 | ||
| 49 | #define VIVS_HI_IDLE_STATE_TX 0x00000080 | ||
| 50 | #define VIVS_HI_IDLE_STATE_VG 0x00000100 | ||
| 51 | #define VIVS_HI_IDLE_STATE_IM 0x00000200 | ||
| 52 | #define VIVS_HI_IDLE_STATE_FP 0x00000400 | ||
| 53 | #define VIVS_HI_IDLE_STATE_TS 0x00000800 | ||
| 54 | #define VIVS_HI_IDLE_STATE_AXI_LP 0x80000000 | ||
| 55 | |||
| 56 | #define VIVS_HI_AXI_CONFIG 0x00000008 | ||
| 57 | #define VIVS_HI_AXI_CONFIG_AWID__MASK 0x0000000f | ||
| 58 | #define VIVS_HI_AXI_CONFIG_AWID__SHIFT 0 | ||
| 59 | #define VIVS_HI_AXI_CONFIG_AWID(x) (((x) << VIVS_HI_AXI_CONFIG_AWID__SHIFT) & VIVS_HI_AXI_CONFIG_AWID__MASK) | ||
| 60 | #define VIVS_HI_AXI_CONFIG_ARID__MASK 0x000000f0 | ||
| 61 | #define VIVS_HI_AXI_CONFIG_ARID__SHIFT 4 | ||
| 62 | #define VIVS_HI_AXI_CONFIG_ARID(x) (((x) << VIVS_HI_AXI_CONFIG_ARID__SHIFT) & VIVS_HI_AXI_CONFIG_ARID__MASK) | ||
| 63 | #define VIVS_HI_AXI_CONFIG_AWCACHE__MASK 0x00000f00 | ||
| 64 | #define VIVS_HI_AXI_CONFIG_AWCACHE__SHIFT 8 | ||
| 65 | #define VIVS_HI_AXI_CONFIG_AWCACHE(x) (((x) << VIVS_HI_AXI_CONFIG_AWCACHE__SHIFT) & VIVS_HI_AXI_CONFIG_AWCACHE__MASK) | ||
| 66 | #define VIVS_HI_AXI_CONFIG_ARCACHE__MASK 0x0000f000 | ||
| 67 | #define VIVS_HI_AXI_CONFIG_ARCACHE__SHIFT 12 | ||
| 68 | #define VIVS_HI_AXI_CONFIG_ARCACHE(x) (((x) << VIVS_HI_AXI_CONFIG_ARCACHE__SHIFT) & VIVS_HI_AXI_CONFIG_ARCACHE__MASK) | ||
| 69 | |||
| 70 | #define VIVS_HI_AXI_STATUS 0x0000000c | ||
| 71 | #define VIVS_HI_AXI_STATUS_WR_ERR_ID__MASK 0x0000000f | ||
| 72 | #define VIVS_HI_AXI_STATUS_WR_ERR_ID__SHIFT 0 | ||
| 73 | #define VIVS_HI_AXI_STATUS_WR_ERR_ID(x) (((x) << VIVS_HI_AXI_STATUS_WR_ERR_ID__SHIFT) & VIVS_HI_AXI_STATUS_WR_ERR_ID__MASK) | ||
| 74 | #define VIVS_HI_AXI_STATUS_RD_ERR_ID__MASK 0x000000f0 | ||
| 75 | #define VIVS_HI_AXI_STATUS_RD_ERR_ID__SHIFT 4 | ||
| 76 | #define VIVS_HI_AXI_STATUS_RD_ERR_ID(x) (((x) << VIVS_HI_AXI_STATUS_RD_ERR_ID__SHIFT) & VIVS_HI_AXI_STATUS_RD_ERR_ID__MASK) | ||
| 77 | #define VIVS_HI_AXI_STATUS_DET_WR_ERR 0x00000100 | ||
| 78 | #define VIVS_HI_AXI_STATUS_DET_RD_ERR 0x00000200 | ||
| 79 | |||
| 80 | #define VIVS_HI_INTR_ACKNOWLEDGE 0x00000010 | ||
| 81 | #define VIVS_HI_INTR_ACKNOWLEDGE_INTR_VEC__MASK 0x7fffffff | ||
| 82 | #define VIVS_HI_INTR_ACKNOWLEDGE_INTR_VEC__SHIFT 0 | ||
| 83 | #define VIVS_HI_INTR_ACKNOWLEDGE_INTR_VEC(x) (((x) << VIVS_HI_INTR_ACKNOWLEDGE_INTR_VEC__SHIFT) & VIVS_HI_INTR_ACKNOWLEDGE_INTR_VEC__MASK) | ||
| 84 | #define VIVS_HI_INTR_ACKNOWLEDGE_AXI_BUS_ERROR 0x80000000 | ||
| 85 | |||
| 86 | #define VIVS_HI_INTR_ENBL 0x00000014 | ||
| 87 | #define VIVS_HI_INTR_ENBL_INTR_ENBL_VEC__MASK 0xffffffff | ||
| 88 | #define VIVS_HI_INTR_ENBL_INTR_ENBL_VEC__SHIFT 0 | ||
| 89 | #define VIVS_HI_INTR_ENBL_INTR_ENBL_VEC(x) (((x) << VIVS_HI_INTR_ENBL_INTR_ENBL_VEC__SHIFT) & VIVS_HI_INTR_ENBL_INTR_ENBL_VEC__MASK) | ||
| 90 | |||
| 91 | #define VIVS_HI_CHIP_IDENTITY 0x00000018 | ||
| 92 | #define VIVS_HI_CHIP_IDENTITY_FAMILY__MASK 0xff000000 | ||
| 93 | #define VIVS_HI_CHIP_IDENTITY_FAMILY__SHIFT 24 | ||
| 94 | #define VIVS_HI_CHIP_IDENTITY_FAMILY(x) (((x) << VIVS_HI_CHIP_IDENTITY_FAMILY__SHIFT) & VIVS_HI_CHIP_IDENTITY_FAMILY__MASK) | ||
| 95 | #define VIVS_HI_CHIP_IDENTITY_PRODUCT__MASK 0x00ff0000 | ||
| 96 | #define VIVS_HI_CHIP_IDENTITY_PRODUCT__SHIFT 16 | ||
| 97 | #define VIVS_HI_CHIP_IDENTITY_PRODUCT(x) (((x) << VIVS_HI_CHIP_IDENTITY_PRODUCT__SHIFT) & VIVS_HI_CHIP_IDENTITY_PRODUCT__MASK) | ||
| 98 | #define VIVS_HI_CHIP_IDENTITY_REVISION__MASK 0x0000f000 | ||
| 99 | #define VIVS_HI_CHIP_IDENTITY_REVISION__SHIFT 12 | ||
| 100 | #define VIVS_HI_CHIP_IDENTITY_REVISION(x) (((x) << VIVS_HI_CHIP_IDENTITY_REVISION__SHIFT) & VIVS_HI_CHIP_IDENTITY_REVISION__MASK) | ||
| 101 | |||
| 102 | #define VIVS_HI_CHIP_FEATURE 0x0000001c | ||
| 103 | |||
| 104 | #define VIVS_HI_CHIP_MODEL 0x00000020 | ||
| 105 | |||
| 106 | #define VIVS_HI_CHIP_REV 0x00000024 | ||
| 107 | |||
| 108 | #define VIVS_HI_CHIP_DATE 0x00000028 | ||
| 109 | |||
| 110 | #define VIVS_HI_CHIP_TIME 0x0000002c | ||
| 111 | |||
| 112 | #define VIVS_HI_CHIP_MINOR_FEATURE_0 0x00000034 | ||
| 113 | |||
| 114 | #define VIVS_HI_CACHE_CONTROL 0x00000038 | ||
| 115 | |||
| 116 | #define VIVS_HI_MEMORY_COUNTER_RESET 0x0000003c | ||
| 117 | |||
| 118 | #define VIVS_HI_PROFILE_READ_BYTES8 0x00000040 | ||
| 119 | |||
| 120 | #define VIVS_HI_PROFILE_WRITE_BYTES8 0x00000044 | ||
| 121 | |||
| 122 | #define VIVS_HI_CHIP_SPECS 0x00000048 | ||
| 123 | #define VIVS_HI_CHIP_SPECS_STREAM_COUNT__MASK 0x0000000f | ||
| 124 | #define VIVS_HI_CHIP_SPECS_STREAM_COUNT__SHIFT 0 | ||
| 125 | #define VIVS_HI_CHIP_SPECS_STREAM_COUNT(x) (((x) << VIVS_HI_CHIP_SPECS_STREAM_COUNT__SHIFT) & VIVS_HI_CHIP_SPECS_STREAM_COUNT__MASK) | ||
| 126 | #define VIVS_HI_CHIP_SPECS_REGISTER_MAX__MASK 0x000000f0 | ||
| 127 | #define VIVS_HI_CHIP_SPECS_REGISTER_MAX__SHIFT 4 | ||
| 128 | #define VIVS_HI_CHIP_SPECS_REGISTER_MAX(x) (((x) << VIVS_HI_CHIP_SPECS_REGISTER_MAX__SHIFT) & VIVS_HI_CHIP_SPECS_REGISTER_MAX__MASK) | ||
| 129 | #define VIVS_HI_CHIP_SPECS_THREAD_COUNT__MASK 0x00000f00 | ||
| 130 | #define VIVS_HI_CHIP_SPECS_THREAD_COUNT__SHIFT 8 | ||
| 131 | #define VIVS_HI_CHIP_SPECS_THREAD_COUNT(x) (((x) << VIVS_HI_CHIP_SPECS_THREAD_COUNT__SHIFT) & VIVS_HI_CHIP_SPECS_THREAD_COUNT__MASK) | ||
| 132 | #define VIVS_HI_CHIP_SPECS_VERTEX_CACHE_SIZE__MASK 0x0001f000 | ||
| 133 | #define VIVS_HI_CHIP_SPECS_VERTEX_CACHE_SIZE__SHIFT 12 | ||
| 134 | #define VIVS_HI_CHIP_SPECS_VERTEX_CACHE_SIZE(x) (((x) << VIVS_HI_CHIP_SPECS_VERTEX_CACHE_SIZE__SHIFT) & VIVS_HI_CHIP_SPECS_VERTEX_CACHE_SIZE__MASK) | ||
| 135 | #define VIVS_HI_CHIP_SPECS_SHADER_CORE_COUNT__MASK 0x01f00000 | ||
| 136 | #define VIVS_HI_CHIP_SPECS_SHADER_CORE_COUNT__SHIFT 20 | ||
| 137 | #define VIVS_HI_CHIP_SPECS_SHADER_CORE_COUNT(x) (((x) << VIVS_HI_CHIP_SPECS_SHADER_CORE_COUNT__SHIFT) & VIVS_HI_CHIP_SPECS_SHADER_CORE_COUNT__MASK) | ||
| 138 | #define VIVS_HI_CHIP_SPECS_PIXEL_PIPES__MASK 0x0e000000 | ||
| 139 | #define VIVS_HI_CHIP_SPECS_PIXEL_PIPES__SHIFT 25 | ||
| 140 | #define VIVS_HI_CHIP_SPECS_PIXEL_PIPES(x) (((x) << VIVS_HI_CHIP_SPECS_PIXEL_PIPES__SHIFT) & VIVS_HI_CHIP_SPECS_PIXEL_PIPES__MASK) | ||
| 141 | #define VIVS_HI_CHIP_SPECS_VERTEX_OUTPUT_BUFFER_SIZE__MASK 0xf0000000 | ||
| 142 | #define VIVS_HI_CHIP_SPECS_VERTEX_OUTPUT_BUFFER_SIZE__SHIFT 28 | ||
| 143 | #define VIVS_HI_CHIP_SPECS_VERTEX_OUTPUT_BUFFER_SIZE(x) (((x) << VIVS_HI_CHIP_SPECS_VERTEX_OUTPUT_BUFFER_SIZE__SHIFT) & VIVS_HI_CHIP_SPECS_VERTEX_OUTPUT_BUFFER_SIZE__MASK) | ||
| 144 | |||
| 145 | #define VIVS_HI_PROFILE_WRITE_BURSTS 0x0000004c | ||
| 146 | |||
| 147 | #define VIVS_HI_PROFILE_WRITE_REQUESTS 0x00000050 | ||
| 148 | |||
| 149 | #define VIVS_HI_PROFILE_READ_BURSTS 0x00000058 | ||
| 150 | |||
| 151 | #define VIVS_HI_PROFILE_READ_REQUESTS 0x0000005c | ||
| 152 | |||
| 153 | #define VIVS_HI_PROFILE_READ_LASTS 0x00000060 | ||
| 154 | |||
| 155 | #define VIVS_HI_GP_OUT0 0x00000064 | ||
| 156 | |||
| 157 | #define VIVS_HI_GP_OUT1 0x00000068 | ||
| 158 | |||
| 159 | #define VIVS_HI_GP_OUT2 0x0000006c | ||
| 160 | |||
| 161 | #define VIVS_HI_AXI_CONTROL 0x00000070 | ||
| 162 | #define VIVS_HI_AXI_CONTROL_WR_FULL_BURST_MODE 0x00000001 | ||
| 163 | |||
| 164 | #define VIVS_HI_CHIP_MINOR_FEATURE_1 0x00000074 | ||
| 165 | |||
| 166 | #define VIVS_HI_PROFILE_TOTAL_CYCLES 0x00000078 | ||
| 167 | |||
| 168 | #define VIVS_HI_PROFILE_IDLE_CYCLES 0x0000007c | ||
| 169 | |||
| 170 | #define VIVS_HI_CHIP_SPECS_2 0x00000080 | ||
| 171 | #define VIVS_HI_CHIP_SPECS_2_BUFFER_SIZE__MASK 0x000000ff | ||
| 172 | #define VIVS_HI_CHIP_SPECS_2_BUFFER_SIZE__SHIFT 0 | ||
| 173 | #define VIVS_HI_CHIP_SPECS_2_BUFFER_SIZE(x) (((x) << VIVS_HI_CHIP_SPECS_2_BUFFER_SIZE__SHIFT) & VIVS_HI_CHIP_SPECS_2_BUFFER_SIZE__MASK) | ||
| 174 | #define VIVS_HI_CHIP_SPECS_2_INSTRUCTION_COUNT__MASK 0x0000ff00 | ||
| 175 | #define VIVS_HI_CHIP_SPECS_2_INSTRUCTION_COUNT__SHIFT 8 | ||
| 176 | #define VIVS_HI_CHIP_SPECS_2_INSTRUCTION_COUNT(x) (((x) << VIVS_HI_CHIP_SPECS_2_INSTRUCTION_COUNT__SHIFT) & VIVS_HI_CHIP_SPECS_2_INSTRUCTION_COUNT__MASK) | ||
| 177 | #define VIVS_HI_CHIP_SPECS_2_NUM_CONSTANTS__MASK 0xffff0000 | ||
| 178 | #define VIVS_HI_CHIP_SPECS_2_NUM_CONSTANTS__SHIFT 16 | ||
| 179 | #define VIVS_HI_CHIP_SPECS_2_NUM_CONSTANTS(x) (((x) << VIVS_HI_CHIP_SPECS_2_NUM_CONSTANTS__SHIFT) & VIVS_HI_CHIP_SPECS_2_NUM_CONSTANTS__MASK) | ||
| 180 | |||
| 181 | #define VIVS_HI_CHIP_MINOR_FEATURE_2 0x00000084 | ||
| 182 | |||
| 183 | #define VIVS_HI_CHIP_MINOR_FEATURE_3 0x00000088 | ||
| 184 | |||
| 185 | #define VIVS_HI_CHIP_MINOR_FEATURE_4 0x00000094 | ||
| 186 | |||
| 187 | #define VIVS_PM 0x00000000 | ||
| 188 | |||
| 189 | #define VIVS_PM_POWER_CONTROLS 0x00000100 | ||
| 190 | #define VIVS_PM_POWER_CONTROLS_ENABLE_MODULE_CLOCK_GATING 0x00000001 | ||
| 191 | #define VIVS_PM_POWER_CONTROLS_DISABLE_STALL_MODULE_CLOCK_GATING 0x00000002 | ||
| 192 | #define VIVS_PM_POWER_CONTROLS_DISABLE_STARVE_MODULE_CLOCK_GATING 0x00000004 | ||
| 193 | #define VIVS_PM_POWER_CONTROLS_TURN_ON_COUNTER__MASK 0x000000f0 | ||
| 194 | #define VIVS_PM_POWER_CONTROLS_TURN_ON_COUNTER__SHIFT 4 | ||
| 195 | #define VIVS_PM_POWER_CONTROLS_TURN_ON_COUNTER(x) (((x) << VIVS_PM_POWER_CONTROLS_TURN_ON_COUNTER__SHIFT) & VIVS_PM_POWER_CONTROLS_TURN_ON_COUNTER__MASK) | ||
| 196 | #define VIVS_PM_POWER_CONTROLS_TURN_OFF_COUNTER__MASK 0xffff0000 | ||
| 197 | #define VIVS_PM_POWER_CONTROLS_TURN_OFF_COUNTER__SHIFT 16 | ||
| 198 | #define VIVS_PM_POWER_CONTROLS_TURN_OFF_COUNTER(x) (((x) << VIVS_PM_POWER_CONTROLS_TURN_OFF_COUNTER__SHIFT) & VIVS_PM_POWER_CONTROLS_TURN_OFF_COUNTER__MASK) | ||
| 199 | |||
| 200 | #define VIVS_PM_MODULE_CONTROLS 0x00000104 | ||
| 201 | #define VIVS_PM_MODULE_CONTROLS_DISABLE_MODULE_CLOCK_GATING_FE 0x00000001 | ||
| 202 | #define VIVS_PM_MODULE_CONTROLS_DISABLE_MODULE_CLOCK_GATING_DE 0x00000002 | ||
| 203 | #define VIVS_PM_MODULE_CONTROLS_DISABLE_MODULE_CLOCK_GATING_PE 0x00000004 | ||
| 204 | |||
| 205 | #define VIVS_PM_MODULE_STATUS 0x00000108 | ||
| 206 | #define VIVS_PM_MODULE_STATUS_MODULE_CLOCK_GATED_FE 0x00000001 | ||
| 207 | #define VIVS_PM_MODULE_STATUS_MODULE_CLOCK_GATED_DE 0x00000002 | ||
| 208 | #define VIVS_PM_MODULE_STATUS_MODULE_CLOCK_GATED_PE 0x00000004 | ||
| 209 | |||
| 210 | #define VIVS_PM_PULSE_EATER 0x0000010c | ||
| 211 | |||
| 212 | #define VIVS_MMUv2 0x00000000 | ||
| 213 | |||
| 214 | #define VIVS_MMUv2_SAFE_ADDRESS 0x00000180 | ||
| 215 | |||
| 216 | #define VIVS_MMUv2_CONFIGURATION 0x00000184 | ||
| 217 | #define VIVS_MMUv2_CONFIGURATION_MODE__MASK 0x00000001 | ||
| 218 | #define VIVS_MMUv2_CONFIGURATION_MODE__SHIFT 0 | ||
| 219 | #define VIVS_MMUv2_CONFIGURATION_MODE_MODE4_K 0x00000000 | ||
| 220 | #define VIVS_MMUv2_CONFIGURATION_MODE_MODE1_K 0x00000001 | ||
| 221 | #define VIVS_MMUv2_CONFIGURATION_MODE_MASK 0x00000008 | ||
| 222 | #define VIVS_MMUv2_CONFIGURATION_FLUSH__MASK 0x00000010 | ||
| 223 | #define VIVS_MMUv2_CONFIGURATION_FLUSH__SHIFT 4 | ||
| 224 | #define VIVS_MMUv2_CONFIGURATION_FLUSH_FLUSH 0x00000010 | ||
| 225 | #define VIVS_MMUv2_CONFIGURATION_FLUSH_MASK 0x00000080 | ||
| 226 | #define VIVS_MMUv2_CONFIGURATION_ADDRESS_MASK 0x00000100 | ||
| 227 | #define VIVS_MMUv2_CONFIGURATION_ADDRESS__MASK 0xfffffc00 | ||
| 228 | #define VIVS_MMUv2_CONFIGURATION_ADDRESS__SHIFT 10 | ||
| 229 | #define VIVS_MMUv2_CONFIGURATION_ADDRESS(x) (((x) << VIVS_MMUv2_CONFIGURATION_ADDRESS__SHIFT) & VIVS_MMUv2_CONFIGURATION_ADDRESS__MASK) | ||
| 230 | |||
| 231 | #define VIVS_MMUv2_STATUS 0x00000188 | ||
| 232 | #define VIVS_MMUv2_STATUS_EXCEPTION0__MASK 0x00000003 | ||
| 233 | #define VIVS_MMUv2_STATUS_EXCEPTION0__SHIFT 0 | ||
| 234 | #define VIVS_MMUv2_STATUS_EXCEPTION0(x) (((x) << VIVS_MMUv2_STATUS_EXCEPTION0__SHIFT) & VIVS_MMUv2_STATUS_EXCEPTION0__MASK) | ||
| 235 | #define VIVS_MMUv2_STATUS_EXCEPTION1__MASK 0x00000030 | ||
| 236 | #define VIVS_MMUv2_STATUS_EXCEPTION1__SHIFT 4 | ||
| 237 | #define VIVS_MMUv2_STATUS_EXCEPTION1(x) (((x) << VIVS_MMUv2_STATUS_EXCEPTION1__SHIFT) & VIVS_MMUv2_STATUS_EXCEPTION1__MASK) | ||
| 238 | #define VIVS_MMUv2_STATUS_EXCEPTION2__MASK 0x00000300 | ||
| 239 | #define VIVS_MMUv2_STATUS_EXCEPTION2__SHIFT 8 | ||
| 240 | #define VIVS_MMUv2_STATUS_EXCEPTION2(x) (((x) << VIVS_MMUv2_STATUS_EXCEPTION2__SHIFT) & VIVS_MMUv2_STATUS_EXCEPTION2__MASK) | ||
| 241 | #define VIVS_MMUv2_STATUS_EXCEPTION3__MASK 0x00003000 | ||
| 242 | #define VIVS_MMUv2_STATUS_EXCEPTION3__SHIFT 12 | ||
| 243 | #define VIVS_MMUv2_STATUS_EXCEPTION3(x) (((x) << VIVS_MMUv2_STATUS_EXCEPTION3__SHIFT) & VIVS_MMUv2_STATUS_EXCEPTION3__MASK) | ||
| 244 | |||
| 245 | #define VIVS_MMUv2_CONTROL 0x0000018c | ||
| 246 | #define VIVS_MMUv2_CONTROL_ENABLE 0x00000001 | ||
| 247 | |||
| 248 | #define VIVS_MMUv2_EXCEPTION_ADDR(i0) (0x00000190 + 0x4*(i0)) | ||
| 249 | #define VIVS_MMUv2_EXCEPTION_ADDR__ESIZE 0x00000004 | ||
| 250 | #define VIVS_MMUv2_EXCEPTION_ADDR__LEN 0x00000004 | ||
| 251 | |||
| 252 | #define VIVS_MC 0x00000000 | ||
| 253 | |||
| 254 | #define VIVS_MC_MMU_FE_PAGE_TABLE 0x00000400 | ||
| 255 | |||
| 256 | #define VIVS_MC_MMU_TX_PAGE_TABLE 0x00000404 | ||
| 257 | |||
| 258 | #define VIVS_MC_MMU_PE_PAGE_TABLE 0x00000408 | ||
| 259 | |||
| 260 | #define VIVS_MC_MMU_PEZ_PAGE_TABLE 0x0000040c | ||
| 261 | |||
| 262 | #define VIVS_MC_MMU_RA_PAGE_TABLE 0x00000410 | ||
| 263 | |||
| 264 | #define VIVS_MC_DEBUG_MEMORY 0x00000414 | ||
| 265 | #define VIVS_MC_DEBUG_MEMORY_SPECIAL_PATCH_GC320 0x00000008 | ||
| 266 | #define VIVS_MC_DEBUG_MEMORY_FAST_CLEAR_BYPASS 0x00100000 | ||
| 267 | #define VIVS_MC_DEBUG_MEMORY_COMPRESSION_BYPASS 0x00200000 | ||
| 268 | |||
| 269 | #define VIVS_MC_MEMORY_BASE_ADDR_RA 0x00000418 | ||
| 270 | |||
| 271 | #define VIVS_MC_MEMORY_BASE_ADDR_FE 0x0000041c | ||
| 272 | |||
| 273 | #define VIVS_MC_MEMORY_BASE_ADDR_TX 0x00000420 | ||
| 274 | |||
| 275 | #define VIVS_MC_MEMORY_BASE_ADDR_PEZ 0x00000424 | ||
| 276 | |||
| 277 | #define VIVS_MC_MEMORY_BASE_ADDR_PE 0x00000428 | ||
| 278 | |||
| 279 | #define VIVS_MC_MEMORY_TIMING_CONTROL 0x0000042c | ||
| 280 | |||
| 281 | #define VIVS_MC_MEMORY_FLUSH 0x00000430 | ||
| 282 | |||
| 283 | #define VIVS_MC_PROFILE_CYCLE_COUNTER 0x00000438 | ||
| 284 | |||
| 285 | #define VIVS_MC_DEBUG_READ0 0x0000043c | ||
| 286 | |||
| 287 | #define VIVS_MC_DEBUG_READ1 0x00000440 | ||
| 288 | |||
| 289 | #define VIVS_MC_DEBUG_WRITE 0x00000444 | ||
| 290 | |||
| 291 | #define VIVS_MC_PROFILE_RA_READ 0x00000448 | ||
| 292 | |||
| 293 | #define VIVS_MC_PROFILE_TX_READ 0x0000044c | ||
| 294 | |||
| 295 | #define VIVS_MC_PROFILE_FE_READ 0x00000450 | ||
| 296 | |||
| 297 | #define VIVS_MC_PROFILE_PE_READ 0x00000454 | ||
| 298 | |||
| 299 | #define VIVS_MC_PROFILE_DE_READ 0x00000458 | ||
| 300 | |||
| 301 | #define VIVS_MC_PROFILE_SH_READ 0x0000045c | ||
| 302 | |||
| 303 | #define VIVS_MC_PROFILE_PA_READ 0x00000460 | ||
| 304 | |||
| 305 | #define VIVS_MC_PROFILE_SE_READ 0x00000464 | ||
| 306 | |||
| 307 | #define VIVS_MC_PROFILE_MC_READ 0x00000468 | ||
| 308 | |||
| 309 | #define VIVS_MC_PROFILE_HI_READ 0x0000046c | ||
| 310 | |||
| 311 | #define VIVS_MC_PROFILE_CONFIG0 0x00000470 | ||
| 312 | #define VIVS_MC_PROFILE_CONFIG0_FE__MASK 0x0000000f | ||
| 313 | #define VIVS_MC_PROFILE_CONFIG0_FE__SHIFT 0 | ||
| 314 | #define VIVS_MC_PROFILE_CONFIG0_FE_RESET 0x0000000f | ||
| 315 | #define VIVS_MC_PROFILE_CONFIG0_DE__MASK 0x00000f00 | ||
| 316 | #define VIVS_MC_PROFILE_CONFIG0_DE__SHIFT 8 | ||
| 317 | #define VIVS_MC_PROFILE_CONFIG0_DE_RESET 0x00000f00 | ||
| 318 | #define VIVS_MC_PROFILE_CONFIG0_PE__MASK 0x000f0000 | ||
| 319 | #define VIVS_MC_PROFILE_CONFIG0_PE__SHIFT 16 | ||
| 320 | #define VIVS_MC_PROFILE_CONFIG0_PE_PIXEL_COUNT_KILLED_BY_COLOR_PIPE 0x00000000 | ||
| 321 | #define VIVS_MC_PROFILE_CONFIG0_PE_PIXEL_COUNT_KILLED_BY_DEPTH_PIPE 0x00010000 | ||
| 322 | #define VIVS_MC_PROFILE_CONFIG0_PE_PIXEL_COUNT_DRAWN_BY_COLOR_PIPE 0x00020000 | ||
| 323 | #define VIVS_MC_PROFILE_CONFIG0_PE_PIXEL_COUNT_DRAWN_BY_DEPTH_PIPE 0x00030000 | ||
| 324 | #define VIVS_MC_PROFILE_CONFIG0_PE_PIXELS_RENDERED_2D 0x000b0000 | ||
| 325 | #define VIVS_MC_PROFILE_CONFIG0_PE_RESET 0x000f0000 | ||
| 326 | #define VIVS_MC_PROFILE_CONFIG0_SH__MASK 0x0f000000 | ||
| 327 | #define VIVS_MC_PROFILE_CONFIG0_SH__SHIFT 24 | ||
| 328 | #define VIVS_MC_PROFILE_CONFIG0_SH_SHADER_CYCLES 0x04000000 | ||
| 329 | #define VIVS_MC_PROFILE_CONFIG0_SH_PS_INST_COUNTER 0x07000000 | ||
| 330 | #define VIVS_MC_PROFILE_CONFIG0_SH_RENDERED_PIXEL_COUNTER 0x08000000 | ||
| 331 | #define VIVS_MC_PROFILE_CONFIG0_SH_VS_INST_COUNTER 0x09000000 | ||
| 332 | #define VIVS_MC_PROFILE_CONFIG0_SH_RENDERED_VERTICE_COUNTER 0x0a000000 | ||
| 333 | #define VIVS_MC_PROFILE_CONFIG0_SH_VTX_BRANCH_INST_COUNTER 0x0b000000 | ||
| 334 | #define VIVS_MC_PROFILE_CONFIG0_SH_VTX_TEXLD_INST_COUNTER 0x0c000000 | ||
| 335 | #define VIVS_MC_PROFILE_CONFIG0_SH_PXL_BRANCH_INST_COUNTER 0x0d000000 | ||
| 336 | #define VIVS_MC_PROFILE_CONFIG0_SH_PXL_TEXLD_INST_COUNTER 0x0e000000 | ||
| 337 | #define VIVS_MC_PROFILE_CONFIG0_SH_RESET 0x0f000000 | ||
| 338 | |||
| 339 | #define VIVS_MC_PROFILE_CONFIG1 0x00000474 | ||
| 340 | #define VIVS_MC_PROFILE_CONFIG1_PA__MASK 0x0000000f | ||
| 341 | #define VIVS_MC_PROFILE_CONFIG1_PA__SHIFT 0 | ||
| 342 | #define VIVS_MC_PROFILE_CONFIG1_PA_INPUT_VTX_COUNTER 0x00000003 | ||
| 343 | #define VIVS_MC_PROFILE_CONFIG1_PA_INPUT_PRIM_COUNTER 0x00000004 | ||
| 344 | #define VIVS_MC_PROFILE_CONFIG1_PA_OUTPUT_PRIM_COUNTER 0x00000005 | ||
| 345 | #define VIVS_MC_PROFILE_CONFIG1_PA_DEPTH_CLIPPED_COUNTER 0x00000006 | ||
| 346 | #define VIVS_MC_PROFILE_CONFIG1_PA_TRIVIAL_REJECTED_COUNTER 0x00000007 | ||
| 347 | #define VIVS_MC_PROFILE_CONFIG1_PA_CULLED_COUNTER 0x00000008 | ||
| 348 | #define VIVS_MC_PROFILE_CONFIG1_PA_RESET 0x0000000f | ||
| 349 | #define VIVS_MC_PROFILE_CONFIG1_SE__MASK 0x00000f00 | ||
| 350 | #define VIVS_MC_PROFILE_CONFIG1_SE__SHIFT 8 | ||
| 351 | #define VIVS_MC_PROFILE_CONFIG1_SE_CULLED_TRIANGLE_COUNT 0x00000000 | ||
| 352 | #define VIVS_MC_PROFILE_CONFIG1_SE_CULLED_LINES_COUNT 0x00000100 | ||
| 353 | #define VIVS_MC_PROFILE_CONFIG1_SE_RESET 0x00000f00 | ||
| 354 | #define VIVS_MC_PROFILE_CONFIG1_RA__MASK 0x000f0000 | ||
| 355 | #define VIVS_MC_PROFILE_CONFIG1_RA__SHIFT 16 | ||
| 356 | #define VIVS_MC_PROFILE_CONFIG1_RA_VALID_PIXEL_COUNT 0x00000000 | ||
| 357 | #define VIVS_MC_PROFILE_CONFIG1_RA_TOTAL_QUAD_COUNT 0x00010000 | ||
| 358 | #define VIVS_MC_PROFILE_CONFIG1_RA_VALID_QUAD_COUNT_AFTER_EARLY_Z 0x00020000 | ||
| 359 | #define VIVS_MC_PROFILE_CONFIG1_RA_TOTAL_PRIMITIVE_COUNT 0x00030000 | ||
| 360 | #define VIVS_MC_PROFILE_CONFIG1_RA_PIPE_CACHE_MISS_COUNTER 0x00090000 | ||
| 361 | #define VIVS_MC_PROFILE_CONFIG1_RA_PREFETCH_CACHE_MISS_COUNTER 0x000a0000 | ||
| 362 | #define VIVS_MC_PROFILE_CONFIG1_RA_CULLED_QUAD_COUNT 0x000b0000 | ||
| 363 | #define VIVS_MC_PROFILE_CONFIG1_RA_RESET 0x000f0000 | ||
| 364 | #define VIVS_MC_PROFILE_CONFIG1_TX__MASK 0x0f000000 | ||
| 365 | #define VIVS_MC_PROFILE_CONFIG1_TX__SHIFT 24 | ||
| 366 | #define VIVS_MC_PROFILE_CONFIG1_TX_TOTAL_BILINEAR_REQUESTS 0x00000000 | ||
| 367 | #define VIVS_MC_PROFILE_CONFIG1_TX_TOTAL_TRILINEAR_REQUESTS 0x01000000 | ||
| 368 | #define VIVS_MC_PROFILE_CONFIG1_TX_TOTAL_DISCARDED_TEXTURE_REQUESTS 0x02000000 | ||
| 369 | #define VIVS_MC_PROFILE_CONFIG1_TX_TOTAL_TEXTURE_REQUESTS 0x03000000 | ||
| 370 | #define VIVS_MC_PROFILE_CONFIG1_TX_UNKNOWN 0x04000000 | ||
| 371 | #define VIVS_MC_PROFILE_CONFIG1_TX_MEM_READ_COUNT 0x05000000 | ||
| 372 | #define VIVS_MC_PROFILE_CONFIG1_TX_MEM_READ_IN_8B_COUNT 0x06000000 | ||
| 373 | #define VIVS_MC_PROFILE_CONFIG1_TX_CACHE_MISS_COUNT 0x07000000 | ||
| 374 | #define VIVS_MC_PROFILE_CONFIG1_TX_CACHE_HIT_TEXEL_COUNT 0x08000000 | ||
| 375 | #define VIVS_MC_PROFILE_CONFIG1_TX_CACHE_MISS_TEXEL_COUNT 0x09000000 | ||
| 376 | #define VIVS_MC_PROFILE_CONFIG1_TX_RESET 0x0f000000 | ||
| 377 | |||
| 378 | #define VIVS_MC_PROFILE_CONFIG2 0x00000478 | ||
| 379 | #define VIVS_MC_PROFILE_CONFIG2_MC__MASK 0x0000000f | ||
| 380 | #define VIVS_MC_PROFILE_CONFIG2_MC__SHIFT 0 | ||
| 381 | #define VIVS_MC_PROFILE_CONFIG2_MC_TOTAL_READ_REQ_8B_FROM_PIPELINE 0x00000001 | ||
| 382 | #define VIVS_MC_PROFILE_CONFIG2_MC_TOTAL_READ_REQ_8B_FROM_IP 0x00000002 | ||
| 383 | #define VIVS_MC_PROFILE_CONFIG2_MC_TOTAL_WRITE_REQ_8B_FROM_PIPELINE 0x00000003 | ||
| 384 | #define VIVS_MC_PROFILE_CONFIG2_MC_RESET 0x0000000f | ||
| 385 | #define VIVS_MC_PROFILE_CONFIG2_HI__MASK 0x00000f00 | ||
| 386 | #define VIVS_MC_PROFILE_CONFIG2_HI__SHIFT 8 | ||
| 387 | #define VIVS_MC_PROFILE_CONFIG2_HI_AXI_CYCLES_READ_REQUEST_STALLED 0x00000000 | ||
| 388 | #define VIVS_MC_PROFILE_CONFIG2_HI_AXI_CYCLES_WRITE_REQUEST_STALLED 0x00000100 | ||
| 389 | #define VIVS_MC_PROFILE_CONFIG2_HI_AXI_CYCLES_WRITE_DATA_STALLED 0x00000200 | ||
| 390 | #define VIVS_MC_PROFILE_CONFIG2_HI_RESET 0x00000f00 | ||
| 391 | |||
| 392 | #define VIVS_MC_PROFILE_CONFIG3 0x0000047c | ||
| 393 | |||
| 394 | #define VIVS_MC_BUS_CONFIG 0x00000480 | ||
| 395 | #define VIVS_MC_BUS_CONFIG_FE_BUS_CONFIG__MASK 0x0000000f | ||
| 396 | #define VIVS_MC_BUS_CONFIG_FE_BUS_CONFIG__SHIFT 0 | ||
| 397 | #define VIVS_MC_BUS_CONFIG_FE_BUS_CONFIG(x) (((x) << VIVS_MC_BUS_CONFIG_FE_BUS_CONFIG__SHIFT) & VIVS_MC_BUS_CONFIG_FE_BUS_CONFIG__MASK) | ||
| 398 | #define VIVS_MC_BUS_CONFIG_TX_BUS_CONFIG__MASK 0x000000f0 | ||
| 399 | #define VIVS_MC_BUS_CONFIG_TX_BUS_CONFIG__SHIFT 4 | ||
| 400 | #define VIVS_MC_BUS_CONFIG_TX_BUS_CONFIG(x) (((x) << VIVS_MC_BUS_CONFIG_TX_BUS_CONFIG__SHIFT) & VIVS_MC_BUS_CONFIG_TX_BUS_CONFIG__MASK) | ||
| 401 | |||
| 402 | #define VIVS_MC_START_COMPOSITION 0x00000554 | ||
| 403 | |||
| 404 | #define VIVS_MC_128B_MERGE 0x00000558 | ||
| 405 | |||
| 406 | |||
| 407 | #endif /* STATE_HI_XML */ | ||
