diff options
Diffstat (limited to 'drivers/gpu/drm/vmwgfx')
| -rw-r--r-- | drivers/gpu/drm/vmwgfx/svga3d_reg.h | 153 | ||||
| -rw-r--r-- | drivers/gpu/drm/vmwgfx/svga3d_surfacedefs.h | 11 | ||||
| -rw-r--r-- | drivers/gpu/drm/vmwgfx/svga_reg.h | 9 | ||||
| -rw-r--r-- | drivers/gpu/drm/vmwgfx/vmwgfx_context.c | 141 | ||||
| -rw-r--r-- | drivers/gpu/drm/vmwgfx/vmwgfx_drv.c | 10 | ||||
| -rw-r--r-- | drivers/gpu/drm/vmwgfx/vmwgfx_drv.h | 38 | ||||
| -rw-r--r-- | drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c | 337 | ||||
| -rw-r--r-- | drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c | 96 | ||||
| -rw-r--r-- | drivers/gpu/drm/vmwgfx/vmwgfx_mob.c | 36 | ||||
| -rw-r--r-- | drivers/gpu/drm/vmwgfx/vmwgfx_resource.c | 14 | ||||
| -rw-r--r-- | drivers/gpu/drm/vmwgfx/vmwgfx_shader.c | 469 | ||||
| -rw-r--r-- | drivers/gpu/drm/vmwgfx/vmwgfx_surface.c | 24 |
12 files changed, 1126 insertions, 212 deletions
diff --git a/drivers/gpu/drm/vmwgfx/svga3d_reg.h b/drivers/gpu/drm/vmwgfx/svga3d_reg.h index d95335cb90bd..f58dc7dd15c5 100644 --- a/drivers/gpu/drm/vmwgfx/svga3d_reg.h +++ b/drivers/gpu/drm/vmwgfx/svga3d_reg.h | |||
| @@ -261,12 +261,7 @@ typedef enum SVGA3dSurfaceFormat { | |||
| 261 | /* Planar video formats. */ | 261 | /* Planar video formats. */ |
| 262 | SVGA3D_YV12 = 121, | 262 | SVGA3D_YV12 = 121, |
| 263 | 263 | ||
| 264 | /* Shader constant formats. */ | 264 | SVGA3D_FORMAT_MAX = 122, |
| 265 | SVGA3D_SURFACE_SHADERCONST_FLOAT = 122, | ||
| 266 | SVGA3D_SURFACE_SHADERCONST_INT = 123, | ||
| 267 | SVGA3D_SURFACE_SHADERCONST_BOOL = 124, | ||
| 268 | |||
| 269 | SVGA3D_FORMAT_MAX = 125, | ||
| 270 | } SVGA3dSurfaceFormat; | 265 | } SVGA3dSurfaceFormat; |
| 271 | 266 | ||
| 272 | typedef uint32 SVGA3dColor; /* a, r, g, b */ | 267 | typedef uint32 SVGA3dColor; /* a, r, g, b */ |
| @@ -1223,9 +1218,19 @@ typedef enum { | |||
| 1223 | #define SVGA_3D_CMD_INVALIDATE_GB_IMAGE_PARTIAL 1129 | 1218 | #define SVGA_3D_CMD_INVALIDATE_GB_IMAGE_PARTIAL 1129 |
| 1224 | 1219 | ||
| 1225 | #define SVGA_3D_CMD_SET_GB_SHADERCONSTS_INLINE 1130 | 1220 | #define SVGA_3D_CMD_SET_GB_SHADERCONSTS_INLINE 1130 |
| 1226 | 1221 | #define SVGA_3D_CMD_GB_SCREEN_DMA 1131 | |
| 1222 | #define SVGA_3D_CMD_BIND_GB_SURFACE_WITH_PITCH 1132 | ||
| 1223 | #define SVGA_3D_CMD_GB_MOB_FENCE 1133 | ||
| 1224 | #define SVGA_3D_CMD_DEFINE_GB_SURFACE_V2 1134 | ||
| 1227 | #define SVGA_3D_CMD_DEFINE_GB_MOB64 1135 | 1225 | #define SVGA_3D_CMD_DEFINE_GB_MOB64 1135 |
| 1228 | #define SVGA_3D_CMD_REDEFINE_GB_MOB64 1136 | 1226 | #define SVGA_3D_CMD_REDEFINE_GB_MOB64 1136 |
| 1227 | #define SVGA_3D_CMD_NOP_ERROR 1137 | ||
| 1228 | |||
| 1229 | #define SVGA_3D_CMD_RESERVED1 1138 | ||
| 1230 | #define SVGA_3D_CMD_RESERVED2 1139 | ||
| 1231 | #define SVGA_3D_CMD_RESERVED3 1140 | ||
| 1232 | #define SVGA_3D_CMD_RESERVED4 1141 | ||
| 1233 | #define SVGA_3D_CMD_RESERVED5 1142 | ||
| 1229 | 1234 | ||
| 1230 | #define SVGA_3D_CMD_MAX 1142 | 1235 | #define SVGA_3D_CMD_MAX 1142 |
| 1231 | #define SVGA_3D_CMD_FUTURE_MAX 3000 | 1236 | #define SVGA_3D_CMD_FUTURE_MAX 3000 |
| @@ -1973,8 +1978,7 @@ struct { | |||
| 1973 | uint32 sizeInBytes; | 1978 | uint32 sizeInBytes; |
| 1974 | uint32 validSizeInBytes; | 1979 | uint32 validSizeInBytes; |
| 1975 | SVGAMobFormat ptDepth; | 1980 | SVGAMobFormat ptDepth; |
| 1976 | } | 1981 | } __packed |
| 1977 | __attribute__((__packed__)) | ||
| 1978 | SVGA3dCmdSetOTableBase; /* SVGA_3D_CMD_SET_OTABLE_BASE */ | 1982 | SVGA3dCmdSetOTableBase; /* SVGA_3D_CMD_SET_OTABLE_BASE */ |
| 1979 | 1983 | ||
| 1980 | typedef | 1984 | typedef |
| @@ -1984,15 +1988,13 @@ struct { | |||
| 1984 | uint32 sizeInBytes; | 1988 | uint32 sizeInBytes; |
| 1985 | uint32 validSizeInBytes; | 1989 | uint32 validSizeInBytes; |
| 1986 | SVGAMobFormat ptDepth; | 1990 | SVGAMobFormat ptDepth; |
| 1987 | } | 1991 | } __packed |
| 1988 | __attribute__((__packed__)) | ||
| 1989 | SVGA3dCmdSetOTableBase64; /* SVGA_3D_CMD_SET_OTABLE_BASE64 */ | 1992 | SVGA3dCmdSetOTableBase64; /* SVGA_3D_CMD_SET_OTABLE_BASE64 */ |
| 1990 | 1993 | ||
| 1991 | typedef | 1994 | typedef |
| 1992 | struct { | 1995 | struct { |
| 1993 | SVGAOTableType type; | 1996 | SVGAOTableType type; |
| 1994 | } | 1997 | } __packed |
| 1995 | __attribute__((__packed__)) | ||
| 1996 | SVGA3dCmdReadbackOTable; /* SVGA_3D_CMD_READBACK_OTABLE */ | 1998 | SVGA3dCmdReadbackOTable; /* SVGA_3D_CMD_READBACK_OTABLE */ |
| 1997 | 1999 | ||
| 1998 | /* | 2000 | /* |
| @@ -2005,8 +2007,7 @@ struct SVGA3dCmdDefineGBMob { | |||
| 2005 | SVGAMobFormat ptDepth; | 2007 | SVGAMobFormat ptDepth; |
| 2006 | PPN base; | 2008 | PPN base; |
| 2007 | uint32 sizeInBytes; | 2009 | uint32 sizeInBytes; |
| 2008 | } | 2010 | } __packed |
| 2009 | __attribute__((__packed__)) | ||
| 2010 | SVGA3dCmdDefineGBMob; /* SVGA_3D_CMD_DEFINE_GB_MOB */ | 2011 | SVGA3dCmdDefineGBMob; /* SVGA_3D_CMD_DEFINE_GB_MOB */ |
| 2011 | 2012 | ||
| 2012 | 2013 | ||
| @@ -2017,8 +2018,7 @@ SVGA3dCmdDefineGBMob; /* SVGA_3D_CMD_DEFINE_GB_MOB */ | |||
| 2017 | typedef | 2018 | typedef |
| 2018 | struct SVGA3dCmdDestroyGBMob { | 2019 | struct SVGA3dCmdDestroyGBMob { |
| 2019 | SVGAMobId mobid; | 2020 | SVGAMobId mobid; |
| 2020 | } | 2021 | } __packed |
| 2021 | __attribute__((__packed__)) | ||
| 2022 | SVGA3dCmdDestroyGBMob; /* SVGA_3D_CMD_DESTROY_GB_MOB */ | 2022 | SVGA3dCmdDestroyGBMob; /* SVGA_3D_CMD_DESTROY_GB_MOB */ |
| 2023 | 2023 | ||
| 2024 | /* | 2024 | /* |
| @@ -2031,8 +2031,7 @@ struct SVGA3dCmdRedefineGBMob { | |||
| 2031 | SVGAMobFormat ptDepth; | 2031 | SVGAMobFormat ptDepth; |
| 2032 | PPN base; | 2032 | PPN base; |
| 2033 | uint32 sizeInBytes; | 2033 | uint32 sizeInBytes; |
| 2034 | } | 2034 | } __packed |
| 2035 | __attribute__((__packed__)) | ||
| 2036 | SVGA3dCmdRedefineGBMob; /* SVGA_3D_CMD_REDEFINE_GB_MOB */ | 2035 | SVGA3dCmdRedefineGBMob; /* SVGA_3D_CMD_REDEFINE_GB_MOB */ |
| 2037 | 2036 | ||
| 2038 | /* | 2037 | /* |
| @@ -2045,8 +2044,7 @@ struct SVGA3dCmdDefineGBMob64 { | |||
| 2045 | SVGAMobFormat ptDepth; | 2044 | SVGAMobFormat ptDepth; |
| 2046 | PPN64 base; | 2045 | PPN64 base; |
| 2047 | uint32 sizeInBytes; | 2046 | uint32 sizeInBytes; |
| 2048 | } | 2047 | } __packed |
| 2049 | __attribute__((__packed__)) | ||
| 2050 | SVGA3dCmdDefineGBMob64; /* SVGA_3D_CMD_DEFINE_GB_MOB64 */ | 2048 | SVGA3dCmdDefineGBMob64; /* SVGA_3D_CMD_DEFINE_GB_MOB64 */ |
| 2051 | 2049 | ||
| 2052 | /* | 2050 | /* |
| @@ -2059,8 +2057,7 @@ struct SVGA3dCmdRedefineGBMob64 { | |||
| 2059 | SVGAMobFormat ptDepth; | 2057 | SVGAMobFormat ptDepth; |
| 2060 | PPN64 base; | 2058 | PPN64 base; |
| 2061 | uint32 sizeInBytes; | 2059 | uint32 sizeInBytes; |
| 2062 | } | 2060 | } __packed |
| 2063 | __attribute__((__packed__)) | ||
| 2064 | SVGA3dCmdRedefineGBMob64; /* SVGA_3D_CMD_REDEFINE_GB_MOB64 */ | 2061 | SVGA3dCmdRedefineGBMob64; /* SVGA_3D_CMD_REDEFINE_GB_MOB64 */ |
| 2065 | 2062 | ||
| 2066 | /* | 2063 | /* |
| @@ -2070,8 +2067,7 @@ SVGA3dCmdRedefineGBMob64; /* SVGA_3D_CMD_REDEFINE_GB_MOB64 */ | |||
| 2070 | typedef | 2067 | typedef |
| 2071 | struct SVGA3dCmdUpdateGBMobMapping { | 2068 | struct SVGA3dCmdUpdateGBMobMapping { |
| 2072 | SVGAMobId mobid; | 2069 | SVGAMobId mobid; |
| 2073 | } | 2070 | } __packed |
| 2074 | __attribute__((__packed__)) | ||
| 2075 | SVGA3dCmdUpdateGBMobMapping; /* SVGA_3D_CMD_UPDATE_GB_MOB_MAPPING */ | 2071 | SVGA3dCmdUpdateGBMobMapping; /* SVGA_3D_CMD_UPDATE_GB_MOB_MAPPING */ |
| 2076 | 2072 | ||
| 2077 | /* | 2073 | /* |
| @@ -2087,7 +2083,8 @@ struct SVGA3dCmdDefineGBSurface { | |||
| 2087 | uint32 multisampleCount; | 2083 | uint32 multisampleCount; |
| 2088 | SVGA3dTextureFilter autogenFilter; | 2084 | SVGA3dTextureFilter autogenFilter; |
| 2089 | SVGA3dSize size; | 2085 | SVGA3dSize size; |
| 2090 | } SVGA3dCmdDefineGBSurface; /* SVGA_3D_CMD_DEFINE_GB_SURFACE */ | 2086 | } __packed |
| 2087 | SVGA3dCmdDefineGBSurface; /* SVGA_3D_CMD_DEFINE_GB_SURFACE */ | ||
| 2091 | 2088 | ||
| 2092 | /* | 2089 | /* |
| 2093 | * Destroy a guest-backed surface. | 2090 | * Destroy a guest-backed surface. |
| @@ -2096,7 +2093,8 @@ struct SVGA3dCmdDefineGBSurface { | |||
| 2096 | typedef | 2093 | typedef |
| 2097 | struct SVGA3dCmdDestroyGBSurface { | 2094 | struct SVGA3dCmdDestroyGBSurface { |
| 2098 | uint32 sid; | 2095 | uint32 sid; |
| 2099 | } SVGA3dCmdDestroyGBSurface; /* SVGA_3D_CMD_DESTROY_GB_SURFACE */ | 2096 | } __packed |
| 2097 | SVGA3dCmdDestroyGBSurface; /* SVGA_3D_CMD_DESTROY_GB_SURFACE */ | ||
| 2100 | 2098 | ||
| 2101 | /* | 2099 | /* |
| 2102 | * Bind a guest-backed surface to an object. | 2100 | * Bind a guest-backed surface to an object. |
| @@ -2106,7 +2104,8 @@ typedef | |||
| 2106 | struct SVGA3dCmdBindGBSurface { | 2104 | struct SVGA3dCmdBindGBSurface { |
| 2107 | uint32 sid; | 2105 | uint32 sid; |
| 2108 | SVGAMobId mobid; | 2106 | SVGAMobId mobid; |
| 2109 | } SVGA3dCmdBindGBSurface; /* SVGA_3D_CMD_BIND_GB_SURFACE */ | 2107 | } __packed |
| 2108 | SVGA3dCmdBindGBSurface; /* SVGA_3D_CMD_BIND_GB_SURFACE */ | ||
| 2110 | 2109 | ||
| 2111 | /* | 2110 | /* |
| 2112 | * Conditionally bind a mob to a guest backed surface if testMobid | 2111 | * Conditionally bind a mob to a guest backed surface if testMobid |
| @@ -2123,7 +2122,7 @@ struct{ | |||
| 2123 | SVGAMobId testMobid; | 2122 | SVGAMobId testMobid; |
| 2124 | SVGAMobId mobid; | 2123 | SVGAMobId mobid; |
| 2125 | uint32 flags; | 2124 | uint32 flags; |
| 2126 | } | 2125 | } __packed |
| 2127 | SVGA3dCmdCondBindGBSurface; /* SVGA_3D_CMD_COND_BIND_GB_SURFACE */ | 2126 | SVGA3dCmdCondBindGBSurface; /* SVGA_3D_CMD_COND_BIND_GB_SURFACE */ |
| 2128 | 2127 | ||
| 2129 | /* | 2128 | /* |
| @@ -2135,7 +2134,8 @@ typedef | |||
| 2135 | struct SVGA3dCmdUpdateGBImage { | 2134 | struct SVGA3dCmdUpdateGBImage { |
| 2136 | SVGA3dSurfaceImageId image; | 2135 | SVGA3dSurfaceImageId image; |
| 2137 | SVGA3dBox box; | 2136 | SVGA3dBox box; |
| 2138 | } SVGA3dCmdUpdateGBImage; /* SVGA_3D_CMD_UPDATE_GB_IMAGE */ | 2137 | } __packed |
| 2138 | SVGA3dCmdUpdateGBImage; /* SVGA_3D_CMD_UPDATE_GB_IMAGE */ | ||
| 2139 | 2139 | ||
| 2140 | /* | 2140 | /* |
| 2141 | * Update an entire guest-backed surface. | 2141 | * Update an entire guest-backed surface. |
| @@ -2145,7 +2145,8 @@ struct SVGA3dCmdUpdateGBImage { | |||
| 2145 | typedef | 2145 | typedef |
| 2146 | struct SVGA3dCmdUpdateGBSurface { | 2146 | struct SVGA3dCmdUpdateGBSurface { |
| 2147 | uint32 sid; | 2147 | uint32 sid; |
| 2148 | } SVGA3dCmdUpdateGBSurface; /* SVGA_3D_CMD_UPDATE_GB_SURFACE */ | 2148 | } __packed |
| 2149 | SVGA3dCmdUpdateGBSurface; /* SVGA_3D_CMD_UPDATE_GB_SURFACE */ | ||
| 2149 | 2150 | ||
| 2150 | /* | 2151 | /* |
| 2151 | * Readback an image in a guest-backed surface. | 2152 | * Readback an image in a guest-backed surface. |
| @@ -2155,7 +2156,8 @@ struct SVGA3dCmdUpdateGBSurface { | |||
| 2155 | typedef | 2156 | typedef |
| 2156 | struct SVGA3dCmdReadbackGBImage { | 2157 | struct SVGA3dCmdReadbackGBImage { |
| 2157 | SVGA3dSurfaceImageId image; | 2158 | SVGA3dSurfaceImageId image; |
| 2158 | } SVGA3dCmdReadbackGBImage; /* SVGA_3D_CMD_READBACK_GB_IMAGE*/ | 2159 | } __packed |
| 2160 | SVGA3dCmdReadbackGBImage; /* SVGA_3D_CMD_READBACK_GB_IMAGE*/ | ||
| 2159 | 2161 | ||
| 2160 | /* | 2162 | /* |
| 2161 | * Readback an entire guest-backed surface. | 2163 | * Readback an entire guest-backed surface. |
| @@ -2165,7 +2167,8 @@ struct SVGA3dCmdReadbackGBImage { | |||
| 2165 | typedef | 2167 | typedef |
| 2166 | struct SVGA3dCmdReadbackGBSurface { | 2168 | struct SVGA3dCmdReadbackGBSurface { |
| 2167 | uint32 sid; | 2169 | uint32 sid; |
| 2168 | } SVGA3dCmdReadbackGBSurface; /* SVGA_3D_CMD_READBACK_GB_SURFACE */ | 2170 | } __packed |
| 2171 | SVGA3dCmdReadbackGBSurface; /* SVGA_3D_CMD_READBACK_GB_SURFACE */ | ||
| 2169 | 2172 | ||
| 2170 | /* | 2173 | /* |
| 2171 | * Readback a sub rect of an image in a guest-backed surface. After | 2174 | * Readback a sub rect of an image in a guest-backed surface. After |
| @@ -2179,7 +2182,7 @@ struct SVGA3dCmdReadbackGBImagePartial { | |||
| 2179 | SVGA3dSurfaceImageId image; | 2182 | SVGA3dSurfaceImageId image; |
| 2180 | SVGA3dBox box; | 2183 | SVGA3dBox box; |
| 2181 | uint32 invertBox; | 2184 | uint32 invertBox; |
| 2182 | } | 2185 | } __packed |
| 2183 | SVGA3dCmdReadbackGBImagePartial; /* SVGA_3D_CMD_READBACK_GB_IMAGE_PARTIAL */ | 2186 | SVGA3dCmdReadbackGBImagePartial; /* SVGA_3D_CMD_READBACK_GB_IMAGE_PARTIAL */ |
| 2184 | 2187 | ||
| 2185 | /* | 2188 | /* |
| @@ -2190,7 +2193,8 @@ SVGA3dCmdReadbackGBImagePartial; /* SVGA_3D_CMD_READBACK_GB_IMAGE_PARTIAL */ | |||
| 2190 | typedef | 2193 | typedef |
| 2191 | struct SVGA3dCmdInvalidateGBImage { | 2194 | struct SVGA3dCmdInvalidateGBImage { |
| 2192 | SVGA3dSurfaceImageId image; | 2195 | SVGA3dSurfaceImageId image; |
| 2193 | } SVGA3dCmdInvalidateGBImage; /* SVGA_3D_CMD_INVALIDATE_GB_IMAGE */ | 2196 | } __packed |
| 2197 | SVGA3dCmdInvalidateGBImage; /* SVGA_3D_CMD_INVALIDATE_GB_IMAGE */ | ||
| 2194 | 2198 | ||
| 2195 | /* | 2199 | /* |
| 2196 | * Invalidate an entire guest-backed surface. | 2200 | * Invalidate an entire guest-backed surface. |
| @@ -2200,7 +2204,8 @@ struct SVGA3dCmdInvalidateGBImage { | |||
| 2200 | typedef | 2204 | typedef |
| 2201 | struct SVGA3dCmdInvalidateGBSurface { | 2205 | struct SVGA3dCmdInvalidateGBSurface { |
| 2202 | uint32 sid; | 2206 | uint32 sid; |
| 2203 | } SVGA3dCmdInvalidateGBSurface; /* SVGA_3D_CMD_INVALIDATE_GB_SURFACE */ | 2207 | } __packed |
| 2208 | SVGA3dCmdInvalidateGBSurface; /* SVGA_3D_CMD_INVALIDATE_GB_SURFACE */ | ||
| 2204 | 2209 | ||
| 2205 | /* | 2210 | /* |
| 2206 | * Invalidate a sub rect of an image in a guest-backed surface. After | 2211 | * Invalidate a sub rect of an image in a guest-backed surface. After |
| @@ -2214,7 +2219,7 @@ struct SVGA3dCmdInvalidateGBImagePartial { | |||
| 2214 | SVGA3dSurfaceImageId image; | 2219 | SVGA3dSurfaceImageId image; |
| 2215 | SVGA3dBox box; | 2220 | SVGA3dBox box; |
| 2216 | uint32 invertBox; | 2221 | uint32 invertBox; |
| 2217 | } | 2222 | } __packed |
| 2218 | SVGA3dCmdInvalidateGBImagePartial; /* SVGA_3D_CMD_INVALIDATE_GB_IMAGE_PARTIAL */ | 2223 | SVGA3dCmdInvalidateGBImagePartial; /* SVGA_3D_CMD_INVALIDATE_GB_IMAGE_PARTIAL */ |
| 2219 | 2224 | ||
| 2220 | /* | 2225 | /* |
| @@ -2224,7 +2229,8 @@ SVGA3dCmdInvalidateGBImagePartial; /* SVGA_3D_CMD_INVALIDATE_GB_IMAGE_PARTIAL */ | |||
| 2224 | typedef | 2229 | typedef |
| 2225 | struct SVGA3dCmdDefineGBContext { | 2230 | struct SVGA3dCmdDefineGBContext { |
| 2226 | uint32 cid; | 2231 | uint32 cid; |
| 2227 | } SVGA3dCmdDefineGBContext; /* SVGA_3D_CMD_DEFINE_GB_CONTEXT */ | 2232 | } __packed |
| 2233 | SVGA3dCmdDefineGBContext; /* SVGA_3D_CMD_DEFINE_GB_CONTEXT */ | ||
| 2228 | 2234 | ||
| 2229 | /* | 2235 | /* |
| 2230 | * Destroy a guest-backed context. | 2236 | * Destroy a guest-backed context. |
| @@ -2233,7 +2239,8 @@ struct SVGA3dCmdDefineGBContext { | |||
| 2233 | typedef | 2239 | typedef |
| 2234 | struct SVGA3dCmdDestroyGBContext { | 2240 | struct SVGA3dCmdDestroyGBContext { |
| 2235 | uint32 cid; | 2241 | uint32 cid; |
| 2236 | } SVGA3dCmdDestroyGBContext; /* SVGA_3D_CMD_DESTROY_GB_CONTEXT */ | 2242 | } __packed |
| 2243 | SVGA3dCmdDestroyGBContext; /* SVGA_3D_CMD_DESTROY_GB_CONTEXT */ | ||
| 2237 | 2244 | ||
| 2238 | /* | 2245 | /* |
| 2239 | * Bind a guest-backed context. | 2246 | * Bind a guest-backed context. |
| @@ -2252,7 +2259,8 @@ struct SVGA3dCmdBindGBContext { | |||
| 2252 | uint32 cid; | 2259 | uint32 cid; |
| 2253 | SVGAMobId mobid; | 2260 | SVGAMobId mobid; |
| 2254 | uint32 validContents; | 2261 | uint32 validContents; |
| 2255 | } SVGA3dCmdBindGBContext; /* SVGA_3D_CMD_BIND_GB_CONTEXT */ | 2262 | } __packed |
| 2263 | SVGA3dCmdBindGBContext; /* SVGA_3D_CMD_BIND_GB_CONTEXT */ | ||
| 2256 | 2264 | ||
| 2257 | /* | 2265 | /* |
| 2258 | * Readback a guest-backed context. | 2266 | * Readback a guest-backed context. |
| @@ -2262,7 +2270,8 @@ struct SVGA3dCmdBindGBContext { | |||
| 2262 | typedef | 2270 | typedef |
| 2263 | struct SVGA3dCmdReadbackGBContext { | 2271 | struct SVGA3dCmdReadbackGBContext { |
| 2264 | uint32 cid; | 2272 | uint32 cid; |
| 2265 | } SVGA3dCmdReadbackGBContext; /* SVGA_3D_CMD_READBACK_GB_CONTEXT */ | 2273 | } __packed |
| 2274 | SVGA3dCmdReadbackGBContext; /* SVGA_3D_CMD_READBACK_GB_CONTEXT */ | ||
| 2266 | 2275 | ||
| 2267 | /* | 2276 | /* |
| 2268 | * Invalidate a guest-backed context. | 2277 | * Invalidate a guest-backed context. |
| @@ -2270,7 +2279,8 @@ struct SVGA3dCmdReadbackGBContext { | |||
| 2270 | typedef | 2279 | typedef |
| 2271 | struct SVGA3dCmdInvalidateGBContext { | 2280 | struct SVGA3dCmdInvalidateGBContext { |
| 2272 | uint32 cid; | 2281 | uint32 cid; |
| 2273 | } SVGA3dCmdInvalidateGBContext; /* SVGA_3D_CMD_INVALIDATE_GB_CONTEXT */ | 2282 | } __packed |
| 2283 | SVGA3dCmdInvalidateGBContext; /* SVGA_3D_CMD_INVALIDATE_GB_CONTEXT */ | ||
| 2274 | 2284 | ||
| 2275 | /* | 2285 | /* |
| 2276 | * Define a guest-backed shader. | 2286 | * Define a guest-backed shader. |
| @@ -2281,7 +2291,8 @@ struct SVGA3dCmdDefineGBShader { | |||
| 2281 | uint32 shid; | 2291 | uint32 shid; |
| 2282 | SVGA3dShaderType type; | 2292 | SVGA3dShaderType type; |
| 2283 | uint32 sizeInBytes; | 2293 | uint32 sizeInBytes; |
| 2284 | } SVGA3dCmdDefineGBShader; /* SVGA_3D_CMD_DEFINE_GB_SHADER */ | 2294 | } __packed |
| 2295 | SVGA3dCmdDefineGBShader; /* SVGA_3D_CMD_DEFINE_GB_SHADER */ | ||
| 2285 | 2296 | ||
| 2286 | /* | 2297 | /* |
| 2287 | * Bind a guest-backed shader. | 2298 | * Bind a guest-backed shader. |
| @@ -2291,7 +2302,8 @@ typedef struct SVGA3dCmdBindGBShader { | |||
| 2291 | uint32 shid; | 2302 | uint32 shid; |
| 2292 | SVGAMobId mobid; | 2303 | SVGAMobId mobid; |
| 2293 | uint32 offsetInBytes; | 2304 | uint32 offsetInBytes; |
| 2294 | } SVGA3dCmdBindGBShader; /* SVGA_3D_CMD_BIND_GB_SHADER */ | 2305 | } __packed |
| 2306 | SVGA3dCmdBindGBShader; /* SVGA_3D_CMD_BIND_GB_SHADER */ | ||
| 2295 | 2307 | ||
| 2296 | /* | 2308 | /* |
| 2297 | * Destroy a guest-backed shader. | 2309 | * Destroy a guest-backed shader. |
| @@ -2299,7 +2311,8 @@ typedef struct SVGA3dCmdBindGBShader { | |||
| 2299 | 2311 | ||
| 2300 | typedef struct SVGA3dCmdDestroyGBShader { | 2312 | typedef struct SVGA3dCmdDestroyGBShader { |
| 2301 | uint32 shid; | 2313 | uint32 shid; |
| 2302 | } SVGA3dCmdDestroyGBShader; /* SVGA_3D_CMD_DESTROY_GB_SHADER */ | 2314 | } __packed |
| 2315 | SVGA3dCmdDestroyGBShader; /* SVGA_3D_CMD_DESTROY_GB_SHADER */ | ||
| 2303 | 2316 | ||
| 2304 | typedef | 2317 | typedef |
| 2305 | struct { | 2318 | struct { |
| @@ -2314,14 +2327,16 @@ struct { | |||
| 2314 | * Note that FLOAT and INT constants are 4-dwords in length, while | 2327 | * Note that FLOAT and INT constants are 4-dwords in length, while |
| 2315 | * BOOL constants are 1-dword in length. | 2328 | * BOOL constants are 1-dword in length. |
| 2316 | */ | 2329 | */ |
| 2317 | } SVGA3dCmdSetGBShaderConstInline; | 2330 | } __packed |
| 2331 | SVGA3dCmdSetGBShaderConstInline; | ||
| 2318 | /* SVGA_3D_CMD_SET_GB_SHADERCONSTS_INLINE */ | 2332 | /* SVGA_3D_CMD_SET_GB_SHADERCONSTS_INLINE */ |
| 2319 | 2333 | ||
| 2320 | typedef | 2334 | typedef |
| 2321 | struct { | 2335 | struct { |
| 2322 | uint32 cid; | 2336 | uint32 cid; |
| 2323 | SVGA3dQueryType type; | 2337 | SVGA3dQueryType type; |
| 2324 | } SVGA3dCmdBeginGBQuery; /* SVGA_3D_CMD_BEGIN_GB_QUERY */ | 2338 | } __packed |
| 2339 | SVGA3dCmdBeginGBQuery; /* SVGA_3D_CMD_BEGIN_GB_QUERY */ | ||
| 2325 | 2340 | ||
| 2326 | typedef | 2341 | typedef |
| 2327 | struct { | 2342 | struct { |
| @@ -2329,7 +2344,8 @@ struct { | |||
| 2329 | SVGA3dQueryType type; | 2344 | SVGA3dQueryType type; |
| 2330 | SVGAMobId mobid; | 2345 | SVGAMobId mobid; |
| 2331 | uint32 offset; | 2346 | uint32 offset; |
| 2332 | } SVGA3dCmdEndGBQuery; /* SVGA_3D_CMD_END_GB_QUERY */ | 2347 | } __packed |
| 2348 | SVGA3dCmdEndGBQuery; /* SVGA_3D_CMD_END_GB_QUERY */ | ||
| 2333 | 2349 | ||
| 2334 | 2350 | ||
| 2335 | /* | 2351 | /* |
| @@ -2346,21 +2362,22 @@ struct { | |||
| 2346 | SVGA3dQueryType type; | 2362 | SVGA3dQueryType type; |
| 2347 | SVGAMobId mobid; | 2363 | SVGAMobId mobid; |
| 2348 | uint32 offset; | 2364 | uint32 offset; |
| 2349 | } SVGA3dCmdWaitForGBQuery; /* SVGA_3D_CMD_WAIT_FOR_GB_QUERY */ | 2365 | } __packed |
| 2366 | SVGA3dCmdWaitForGBQuery; /* SVGA_3D_CMD_WAIT_FOR_GB_QUERY */ | ||
| 2350 | 2367 | ||
| 2351 | typedef | 2368 | typedef |
| 2352 | struct { | 2369 | struct { |
| 2353 | SVGAMobId mobid; | 2370 | SVGAMobId mobid; |
| 2354 | uint32 fbOffset; | 2371 | uint32 fbOffset; |
| 2355 | uint32 initalized; | 2372 | uint32 initalized; |
| 2356 | } | 2373 | } __packed |
| 2357 | SVGA3dCmdEnableGart; /* SVGA_3D_CMD_ENABLE_GART */ | 2374 | SVGA3dCmdEnableGart; /* SVGA_3D_CMD_ENABLE_GART */ |
| 2358 | 2375 | ||
| 2359 | typedef | 2376 | typedef |
| 2360 | struct { | 2377 | struct { |
| 2361 | SVGAMobId mobid; | 2378 | SVGAMobId mobid; |
| 2362 | uint32 gartOffset; | 2379 | uint32 gartOffset; |
| 2363 | } | 2380 | } __packed |
| 2364 | SVGA3dCmdMapMobIntoGart; /* SVGA_3D_CMD_MAP_MOB_INTO_GART */ | 2381 | SVGA3dCmdMapMobIntoGart; /* SVGA_3D_CMD_MAP_MOB_INTO_GART */ |
| 2365 | 2382 | ||
| 2366 | 2383 | ||
| @@ -2368,7 +2385,7 @@ typedef | |||
| 2368 | struct { | 2385 | struct { |
| 2369 | uint32 gartOffset; | 2386 | uint32 gartOffset; |
| 2370 | uint32 numPages; | 2387 | uint32 numPages; |
| 2371 | } | 2388 | } __packed |
| 2372 | SVGA3dCmdUnmapGartRange; /* SVGA_3D_CMD_UNMAP_GART_RANGE */ | 2389 | SVGA3dCmdUnmapGartRange; /* SVGA_3D_CMD_UNMAP_GART_RANGE */ |
| 2373 | 2390 | ||
| 2374 | 2391 | ||
| @@ -2385,27 +2402,27 @@ struct { | |||
| 2385 | int32 xRoot; | 2402 | int32 xRoot; |
| 2386 | int32 yRoot; | 2403 | int32 yRoot; |
| 2387 | uint32 flags; | 2404 | uint32 flags; |
| 2388 | } | 2405 | } __packed |
| 2389 | SVGA3dCmdDefineGBScreenTarget; /* SVGA_3D_CMD_DEFINE_GB_SCREENTARGET */ | 2406 | SVGA3dCmdDefineGBScreenTarget; /* SVGA_3D_CMD_DEFINE_GB_SCREENTARGET */ |
| 2390 | 2407 | ||
| 2391 | typedef | 2408 | typedef |
| 2392 | struct { | 2409 | struct { |
| 2393 | uint32 stid; | 2410 | uint32 stid; |
| 2394 | } | 2411 | } __packed |
| 2395 | SVGA3dCmdDestroyGBScreenTarget; /* SVGA_3D_CMD_DESTROY_GB_SCREENTARGET */ | 2412 | SVGA3dCmdDestroyGBScreenTarget; /* SVGA_3D_CMD_DESTROY_GB_SCREENTARGET */ |
| 2396 | 2413 | ||
| 2397 | typedef | 2414 | typedef |
| 2398 | struct { | 2415 | struct { |
| 2399 | uint32 stid; | 2416 | uint32 stid; |
| 2400 | SVGA3dSurfaceImageId image; | 2417 | SVGA3dSurfaceImageId image; |
| 2401 | } | 2418 | } __packed |
| 2402 | SVGA3dCmdBindGBScreenTarget; /* SVGA_3D_CMD_BIND_GB_SCREENTARGET */ | 2419 | SVGA3dCmdBindGBScreenTarget; /* SVGA_3D_CMD_BIND_GB_SCREENTARGET */ |
| 2403 | 2420 | ||
| 2404 | typedef | 2421 | typedef |
| 2405 | struct { | 2422 | struct { |
| 2406 | uint32 stid; | 2423 | uint32 stid; |
| 2407 | SVGA3dBox box; | 2424 | SVGA3dBox box; |
| 2408 | } | 2425 | } __packed |
| 2409 | SVGA3dCmdUpdateGBScreenTarget; /* SVGA_3D_CMD_UPDATE_GB_SCREENTARGET */ | 2426 | SVGA3dCmdUpdateGBScreenTarget; /* SVGA_3D_CMD_UPDATE_GB_SCREENTARGET */ |
| 2410 | 2427 | ||
| 2411 | /* | 2428 | /* |
| @@ -2583,4 +2600,28 @@ typedef union { | |||
| 2583 | float f; | 2600 | float f; |
| 2584 | } SVGA3dDevCapResult; | 2601 | } SVGA3dDevCapResult; |
| 2585 | 2602 | ||
| 2603 | typedef enum { | ||
| 2604 | SVGA3DCAPS_RECORD_UNKNOWN = 0, | ||
| 2605 | SVGA3DCAPS_RECORD_DEVCAPS_MIN = 0x100, | ||
| 2606 | SVGA3DCAPS_RECORD_DEVCAPS = 0x100, | ||
| 2607 | SVGA3DCAPS_RECORD_DEVCAPS_MAX = 0x1ff, | ||
| 2608 | } SVGA3dCapsRecordType; | ||
| 2609 | |||
| 2610 | typedef | ||
| 2611 | struct SVGA3dCapsRecordHeader { | ||
| 2612 | uint32 length; | ||
| 2613 | SVGA3dCapsRecordType type; | ||
| 2614 | } | ||
| 2615 | SVGA3dCapsRecordHeader; | ||
| 2616 | |||
| 2617 | typedef | ||
| 2618 | struct SVGA3dCapsRecord { | ||
| 2619 | SVGA3dCapsRecordHeader header; | ||
| 2620 | uint32 data[1]; | ||
| 2621 | } | ||
| 2622 | SVGA3dCapsRecord; | ||
| 2623 | |||
| 2624 | |||
| 2625 | typedef uint32 SVGA3dCapPair[2]; | ||
| 2626 | |||
| 2586 | #endif /* _SVGA3D_REG_H_ */ | 2627 | #endif /* _SVGA3D_REG_H_ */ |
diff --git a/drivers/gpu/drm/vmwgfx/svga3d_surfacedefs.h b/drivers/gpu/drm/vmwgfx/svga3d_surfacedefs.h index 8369c3ba10fe..ef3385096145 100644 --- a/drivers/gpu/drm/vmwgfx/svga3d_surfacedefs.h +++ b/drivers/gpu/drm/vmwgfx/svga3d_surfacedefs.h | |||
| @@ -38,8 +38,11 @@ | |||
| 38 | 38 | ||
| 39 | #define DIV_ROUND_UP(x, y) (((x) + (y) - 1) / (y)) | 39 | #define DIV_ROUND_UP(x, y) (((x) + (y) - 1) / (y)) |
| 40 | #define max_t(type, x, y) ((x) > (y) ? (x) : (y)) | 40 | #define max_t(type, x, y) ((x) > (y) ? (x) : (y)) |
| 41 | #define min_t(type, x, y) ((x) < (y) ? (x) : (y)) | ||
| 41 | #define surf_size_struct SVGA3dSize | 42 | #define surf_size_struct SVGA3dSize |
| 42 | #define u32 uint32 | 43 | #define u32 uint32 |
| 44 | #define u64 uint64_t | ||
| 45 | #define U32_MAX ((u32)~0U) | ||
| 43 | 46 | ||
| 44 | #endif /* __KERNEL__ */ | 47 | #endif /* __KERNEL__ */ |
| 45 | 48 | ||
| @@ -704,8 +707,8 @@ static const struct svga3d_surface_desc svga3d_surface_descs[] = { | |||
| 704 | 707 | ||
| 705 | static inline u32 clamped_umul32(u32 a, u32 b) | 708 | static inline u32 clamped_umul32(u32 a, u32 b) |
| 706 | { | 709 | { |
| 707 | uint64_t tmp = (uint64_t) a*b; | 710 | u64 tmp = (u64) a*b; |
| 708 | return (tmp > (uint64_t) ((u32) -1)) ? (u32) -1 : tmp; | 711 | return (tmp > (u64) U32_MAX) ? U32_MAX : tmp; |
| 709 | } | 712 | } |
| 710 | 713 | ||
| 711 | static inline const struct svga3d_surface_desc * | 714 | static inline const struct svga3d_surface_desc * |
| @@ -834,7 +837,7 @@ svga3dsurface_get_serialized_size(SVGA3dSurfaceFormat format, | |||
| 834 | bool cubemap) | 837 | bool cubemap) |
| 835 | { | 838 | { |
| 836 | const struct svga3d_surface_desc *desc = svga3dsurface_get_desc(format); | 839 | const struct svga3d_surface_desc *desc = svga3dsurface_get_desc(format); |
| 837 | u32 total_size = 0; | 840 | u64 total_size = 0; |
| 838 | u32 mip; | 841 | u32 mip; |
| 839 | 842 | ||
| 840 | for (mip = 0; mip < num_mip_levels; mip++) { | 843 | for (mip = 0; mip < num_mip_levels; mip++) { |
| @@ -847,7 +850,7 @@ svga3dsurface_get_serialized_size(SVGA3dSurfaceFormat format, | |||
| 847 | if (cubemap) | 850 | if (cubemap) |
| 848 | total_size *= SVGA3D_MAX_SURFACE_FACES; | 851 | total_size *= SVGA3D_MAX_SURFACE_FACES; |
| 849 | 852 | ||
| 850 | return total_size; | 853 | return (u32) min_t(u64, total_size, (u64) U32_MAX); |
| 851 | } | 854 | } |
| 852 | 855 | ||
| 853 | 856 | ||
diff --git a/drivers/gpu/drm/vmwgfx/svga_reg.h b/drivers/gpu/drm/vmwgfx/svga_reg.h index 71defa4d2d75..11323dd5196f 100644 --- a/drivers/gpu/drm/vmwgfx/svga_reg.h +++ b/drivers/gpu/drm/vmwgfx/svga_reg.h | |||
| @@ -169,10 +169,17 @@ enum { | |||
| 169 | SVGA_REG_TRACES = 45, /* Enable trace-based updates even when FIFO is on */ | 169 | SVGA_REG_TRACES = 45, /* Enable trace-based updates even when FIFO is on */ |
| 170 | SVGA_REG_GMRS_MAX_PAGES = 46, /* Maximum number of 4KB pages for all GMRs */ | 170 | SVGA_REG_GMRS_MAX_PAGES = 46, /* Maximum number of 4KB pages for all GMRs */ |
| 171 | SVGA_REG_MEMORY_SIZE = 47, /* Total dedicated device memory excluding FIFO */ | 171 | SVGA_REG_MEMORY_SIZE = 47, /* Total dedicated device memory excluding FIFO */ |
| 172 | SVGA_REG_COMMAND_LOW = 48, /* Lower 32 bits and submits commands */ | ||
| 173 | SVGA_REG_COMMAND_HIGH = 49, /* Upper 32 bits of command buffer PA */ | ||
| 172 | SVGA_REG_MAX_PRIMARY_BOUNDING_BOX_MEM = 50, /* Max primary memory */ | 174 | SVGA_REG_MAX_PRIMARY_BOUNDING_BOX_MEM = 50, /* Max primary memory */ |
| 173 | SVGA_REG_SUGGESTED_GBOBJECT_MEM_SIZE_KB = 51, /* Suggested limit on mob mem */ | 175 | SVGA_REG_SUGGESTED_GBOBJECT_MEM_SIZE_KB = 51, /* Suggested limit on mob mem */ |
| 174 | SVGA_REG_DEV_CAP = 52, /* Write dev cap index, read value */ | 176 | SVGA_REG_DEV_CAP = 52, /* Write dev cap index, read value */ |
| 175 | SVGA_REG_TOP = 53, /* Must be 1 more than the last register */ | 177 | SVGA_REG_CMD_PREPEND_LOW = 53, |
| 178 | SVGA_REG_CMD_PREPEND_HIGH = 54, | ||
| 179 | SVGA_REG_SCREENTARGET_MAX_WIDTH = 55, | ||
| 180 | SVGA_REG_SCREENTARGET_MAX_HEIGHT = 56, | ||
| 181 | SVGA_REG_MOB_MAX_SIZE = 57, | ||
| 182 | SVGA_REG_TOP = 58, /* Must be 1 more than the last register */ | ||
| 176 | 183 | ||
| 177 | SVGA_PALETTE_BASE = 1024, /* Base of SVGA color map */ | 184 | SVGA_PALETTE_BASE = 1024, /* Base of SVGA color map */ |
| 178 | /* Next 768 (== 256*3) registers exist for colormap */ | 185 | /* Next 768 (== 256*3) registers exist for colormap */ |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_context.c b/drivers/gpu/drm/vmwgfx/vmwgfx_context.c index 82c41daebc0e..1e80152674b5 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_context.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_context.c | |||
| @@ -37,7 +37,7 @@ struct vmw_user_context { | |||
| 37 | 37 | ||
| 38 | 38 | ||
| 39 | 39 | ||
| 40 | typedef int (*vmw_scrub_func)(struct vmw_ctx_bindinfo *); | 40 | typedef int (*vmw_scrub_func)(struct vmw_ctx_bindinfo *, bool); |
| 41 | 41 | ||
| 42 | static void vmw_user_context_free(struct vmw_resource *res); | 42 | static void vmw_user_context_free(struct vmw_resource *res); |
| 43 | static struct vmw_resource * | 43 | static struct vmw_resource * |
| @@ -50,9 +50,11 @@ static int vmw_gb_context_unbind(struct vmw_resource *res, | |||
| 50 | bool readback, | 50 | bool readback, |
| 51 | struct ttm_validate_buffer *val_buf); | 51 | struct ttm_validate_buffer *val_buf); |
| 52 | static int vmw_gb_context_destroy(struct vmw_resource *res); | 52 | static int vmw_gb_context_destroy(struct vmw_resource *res); |
| 53 | static int vmw_context_scrub_shader(struct vmw_ctx_bindinfo *bi); | 53 | static int vmw_context_scrub_shader(struct vmw_ctx_bindinfo *bi, bool rebind); |
| 54 | static int vmw_context_scrub_render_target(struct vmw_ctx_bindinfo *bi); | 54 | static int vmw_context_scrub_render_target(struct vmw_ctx_bindinfo *bi, |
| 55 | static int vmw_context_scrub_texture(struct vmw_ctx_bindinfo *bi); | 55 | bool rebind); |
| 56 | static int vmw_context_scrub_texture(struct vmw_ctx_bindinfo *bi, bool rebind); | ||
| 57 | static void vmw_context_binding_state_scrub(struct vmw_ctx_binding_state *cbs); | ||
| 56 | static void vmw_context_binding_state_kill(struct vmw_ctx_binding_state *cbs); | 58 | static void vmw_context_binding_state_kill(struct vmw_ctx_binding_state *cbs); |
| 57 | static uint64_t vmw_user_context_size; | 59 | static uint64_t vmw_user_context_size; |
| 58 | 60 | ||
| @@ -111,10 +113,14 @@ static void vmw_hw_context_destroy(struct vmw_resource *res) | |||
| 111 | 113 | ||
| 112 | if (res->func->destroy == vmw_gb_context_destroy) { | 114 | if (res->func->destroy == vmw_gb_context_destroy) { |
| 113 | mutex_lock(&dev_priv->cmdbuf_mutex); | 115 | mutex_lock(&dev_priv->cmdbuf_mutex); |
| 116 | mutex_lock(&dev_priv->binding_mutex); | ||
| 117 | (void) vmw_context_binding_state_kill | ||
| 118 | (&container_of(res, struct vmw_user_context, res)->cbs); | ||
| 114 | (void) vmw_gb_context_destroy(res); | 119 | (void) vmw_gb_context_destroy(res); |
| 115 | if (dev_priv->pinned_bo != NULL && | 120 | if (dev_priv->pinned_bo != NULL && |
| 116 | !dev_priv->query_cid_valid) | 121 | !dev_priv->query_cid_valid) |
| 117 | __vmw_execbuf_release_pinned_bo(dev_priv, NULL); | 122 | __vmw_execbuf_release_pinned_bo(dev_priv, NULL); |
| 123 | mutex_unlock(&dev_priv->binding_mutex); | ||
| 118 | mutex_unlock(&dev_priv->cmdbuf_mutex); | 124 | mutex_unlock(&dev_priv->cmdbuf_mutex); |
| 119 | return; | 125 | return; |
| 120 | } | 126 | } |
| @@ -328,7 +334,7 @@ static int vmw_gb_context_unbind(struct vmw_resource *res, | |||
| 328 | BUG_ON(bo->mem.mem_type != VMW_PL_MOB); | 334 | BUG_ON(bo->mem.mem_type != VMW_PL_MOB); |
| 329 | 335 | ||
| 330 | mutex_lock(&dev_priv->binding_mutex); | 336 | mutex_lock(&dev_priv->binding_mutex); |
| 331 | vmw_context_binding_state_kill(&uctx->cbs); | 337 | vmw_context_binding_state_scrub(&uctx->cbs); |
| 332 | 338 | ||
| 333 | submit_size = sizeof(*cmd2) + (readback ? sizeof(*cmd1) : 0); | 339 | submit_size = sizeof(*cmd2) + (readback ? sizeof(*cmd1) : 0); |
| 334 | 340 | ||
| @@ -378,10 +384,6 @@ static int vmw_gb_context_destroy(struct vmw_resource *res) | |||
| 378 | SVGA3dCmdHeader header; | 384 | SVGA3dCmdHeader header; |
| 379 | SVGA3dCmdDestroyGBContext body; | 385 | SVGA3dCmdDestroyGBContext body; |
| 380 | } *cmd; | 386 | } *cmd; |
| 381 | struct vmw_user_context *uctx = | ||
| 382 | container_of(res, struct vmw_user_context, res); | ||
| 383 | |||
| 384 | BUG_ON(!list_empty(&uctx->cbs.list)); | ||
| 385 | 387 | ||
| 386 | if (likely(res->id == -1)) | 388 | if (likely(res->id == -1)) |
| 387 | return 0; | 389 | return 0; |
| @@ -528,8 +530,9 @@ out_unlock: | |||
| 528 | * vmw_context_scrub_shader - scrub a shader binding from a context. | 530 | * vmw_context_scrub_shader - scrub a shader binding from a context. |
| 529 | * | 531 | * |
| 530 | * @bi: single binding information. | 532 | * @bi: single binding information. |
| 533 | * @rebind: Whether to issue a bind instead of scrub command. | ||
| 531 | */ | 534 | */ |
| 532 | static int vmw_context_scrub_shader(struct vmw_ctx_bindinfo *bi) | 535 | static int vmw_context_scrub_shader(struct vmw_ctx_bindinfo *bi, bool rebind) |
| 533 | { | 536 | { |
| 534 | struct vmw_private *dev_priv = bi->ctx->dev_priv; | 537 | struct vmw_private *dev_priv = bi->ctx->dev_priv; |
| 535 | struct { | 538 | struct { |
| @@ -548,7 +551,7 @@ static int vmw_context_scrub_shader(struct vmw_ctx_bindinfo *bi) | |||
| 548 | cmd->header.size = sizeof(cmd->body); | 551 | cmd->header.size = sizeof(cmd->body); |
| 549 | cmd->body.cid = bi->ctx->id; | 552 | cmd->body.cid = bi->ctx->id; |
| 550 | cmd->body.type = bi->i1.shader_type; | 553 | cmd->body.type = bi->i1.shader_type; |
| 551 | cmd->body.shid = SVGA3D_INVALID_ID; | 554 | cmd->body.shid = ((rebind) ? bi->res->id : SVGA3D_INVALID_ID); |
| 552 | vmw_fifo_commit(dev_priv, sizeof(*cmd)); | 555 | vmw_fifo_commit(dev_priv, sizeof(*cmd)); |
| 553 | 556 | ||
| 554 | return 0; | 557 | return 0; |
| @@ -559,8 +562,10 @@ static int vmw_context_scrub_shader(struct vmw_ctx_bindinfo *bi) | |||
| 559 | * from a context. | 562 | * from a context. |
| 560 | * | 563 | * |
| 561 | * @bi: single binding information. | 564 | * @bi: single binding information. |
| 565 | * @rebind: Whether to issue a bind instead of scrub command. | ||
| 562 | */ | 566 | */ |
| 563 | static int vmw_context_scrub_render_target(struct vmw_ctx_bindinfo *bi) | 567 | static int vmw_context_scrub_render_target(struct vmw_ctx_bindinfo *bi, |
| 568 | bool rebind) | ||
| 564 | { | 569 | { |
| 565 | struct vmw_private *dev_priv = bi->ctx->dev_priv; | 570 | struct vmw_private *dev_priv = bi->ctx->dev_priv; |
| 566 | struct { | 571 | struct { |
| @@ -579,7 +584,7 @@ static int vmw_context_scrub_render_target(struct vmw_ctx_bindinfo *bi) | |||
| 579 | cmd->header.size = sizeof(cmd->body); | 584 | cmd->header.size = sizeof(cmd->body); |
| 580 | cmd->body.cid = bi->ctx->id; | 585 | cmd->body.cid = bi->ctx->id; |
| 581 | cmd->body.type = bi->i1.rt_type; | 586 | cmd->body.type = bi->i1.rt_type; |
| 582 | cmd->body.target.sid = SVGA3D_INVALID_ID; | 587 | cmd->body.target.sid = ((rebind) ? bi->res->id : SVGA3D_INVALID_ID); |
| 583 | cmd->body.target.face = 0; | 588 | cmd->body.target.face = 0; |
| 584 | cmd->body.target.mipmap = 0; | 589 | cmd->body.target.mipmap = 0; |
| 585 | vmw_fifo_commit(dev_priv, sizeof(*cmd)); | 590 | vmw_fifo_commit(dev_priv, sizeof(*cmd)); |
| @@ -591,11 +596,13 @@ static int vmw_context_scrub_render_target(struct vmw_ctx_bindinfo *bi) | |||
| 591 | * vmw_context_scrub_texture - scrub a texture binding from a context. | 596 | * vmw_context_scrub_texture - scrub a texture binding from a context. |
| 592 | * | 597 | * |
| 593 | * @bi: single binding information. | 598 | * @bi: single binding information. |
| 599 | * @rebind: Whether to issue a bind instead of scrub command. | ||
| 594 | * | 600 | * |
| 595 | * TODO: Possibly complement this function with a function that takes | 601 | * TODO: Possibly complement this function with a function that takes |
| 596 | * a list of texture bindings and combines them to a single command. | 602 | * a list of texture bindings and combines them to a single command. |
| 597 | */ | 603 | */ |
| 598 | static int vmw_context_scrub_texture(struct vmw_ctx_bindinfo *bi) | 604 | static int vmw_context_scrub_texture(struct vmw_ctx_bindinfo *bi, |
| 605 | bool rebind) | ||
| 599 | { | 606 | { |
| 600 | struct vmw_private *dev_priv = bi->ctx->dev_priv; | 607 | struct vmw_private *dev_priv = bi->ctx->dev_priv; |
| 601 | struct { | 608 | struct { |
| @@ -619,7 +626,7 @@ static int vmw_context_scrub_texture(struct vmw_ctx_bindinfo *bi) | |||
| 619 | cmd->body.c.cid = bi->ctx->id; | 626 | cmd->body.c.cid = bi->ctx->id; |
| 620 | cmd->body.s1.stage = bi->i1.texture_stage; | 627 | cmd->body.s1.stage = bi->i1.texture_stage; |
| 621 | cmd->body.s1.name = SVGA3D_TS_BIND_TEXTURE; | 628 | cmd->body.s1.name = SVGA3D_TS_BIND_TEXTURE; |
| 622 | cmd->body.s1.value = (uint32) SVGA3D_INVALID_ID; | 629 | cmd->body.s1.value = ((rebind) ? bi->res->id : SVGA3D_INVALID_ID); |
| 623 | vmw_fifo_commit(dev_priv, sizeof(*cmd)); | 630 | vmw_fifo_commit(dev_priv, sizeof(*cmd)); |
| 624 | 631 | ||
| 625 | return 0; | 632 | return 0; |
| @@ -692,6 +699,7 @@ int vmw_context_binding_add(struct vmw_ctx_binding_state *cbs, | |||
| 692 | vmw_context_binding_drop(loc); | 699 | vmw_context_binding_drop(loc); |
| 693 | 700 | ||
| 694 | loc->bi = *bi; | 701 | loc->bi = *bi; |
| 702 | loc->bi.scrubbed = false; | ||
| 695 | list_add_tail(&loc->ctx_list, &cbs->list); | 703 | list_add_tail(&loc->ctx_list, &cbs->list); |
| 696 | INIT_LIST_HEAD(&loc->res_list); | 704 | INIT_LIST_HEAD(&loc->res_list); |
| 697 | 705 | ||
| @@ -727,12 +735,11 @@ static void vmw_context_binding_transfer(struct vmw_ctx_binding_state *cbs, | |||
| 727 | if (loc->bi.ctx != NULL) | 735 | if (loc->bi.ctx != NULL) |
| 728 | vmw_context_binding_drop(loc); | 736 | vmw_context_binding_drop(loc); |
| 729 | 737 | ||
| 730 | loc->bi = *bi; | 738 | if (bi->res != NULL) { |
| 731 | list_add_tail(&loc->ctx_list, &cbs->list); | 739 | loc->bi = *bi; |
| 732 | if (bi->res != NULL) | 740 | list_add_tail(&loc->ctx_list, &cbs->list); |
| 733 | list_add_tail(&loc->res_list, &bi->res->binding_head); | 741 | list_add_tail(&loc->res_list, &bi->res->binding_head); |
| 734 | else | 742 | } |
| 735 | INIT_LIST_HEAD(&loc->res_list); | ||
| 736 | } | 743 | } |
| 737 | 744 | ||
| 738 | /** | 745 | /** |
| @@ -746,7 +753,10 @@ static void vmw_context_binding_transfer(struct vmw_ctx_binding_state *cbs, | |||
| 746 | */ | 753 | */ |
| 747 | static void vmw_context_binding_kill(struct vmw_ctx_binding *cb) | 754 | static void vmw_context_binding_kill(struct vmw_ctx_binding *cb) |
| 748 | { | 755 | { |
| 749 | (void) vmw_scrub_funcs[cb->bi.bt](&cb->bi); | 756 | if (!cb->bi.scrubbed) { |
| 757 | (void) vmw_scrub_funcs[cb->bi.bt](&cb->bi, false); | ||
| 758 | cb->bi.scrubbed = true; | ||
| 759 | } | ||
| 750 | vmw_context_binding_drop(cb); | 760 | vmw_context_binding_drop(cb); |
| 751 | } | 761 | } |
| 752 | 762 | ||
| @@ -768,6 +778,27 @@ static void vmw_context_binding_state_kill(struct vmw_ctx_binding_state *cbs) | |||
| 768 | } | 778 | } |
| 769 | 779 | ||
| 770 | /** | 780 | /** |
| 781 | * vmw_context_binding_state_scrub - Scrub all bindings associated with a | ||
| 782 | * struct vmw_ctx_binding state structure. | ||
| 783 | * | ||
| 784 | * @cbs: Pointer to the context binding state tracker. | ||
| 785 | * | ||
| 786 | * Emits commands to scrub all bindings associated with the | ||
| 787 | * context binding state tracker. | ||
| 788 | */ | ||
| 789 | static void vmw_context_binding_state_scrub(struct vmw_ctx_binding_state *cbs) | ||
| 790 | { | ||
| 791 | struct vmw_ctx_binding *entry; | ||
| 792 | |||
| 793 | list_for_each_entry(entry, &cbs->list, ctx_list) { | ||
| 794 | if (!entry->bi.scrubbed) { | ||
| 795 | (void) vmw_scrub_funcs[entry->bi.bt](&entry->bi, false); | ||
| 796 | entry->bi.scrubbed = true; | ||
| 797 | } | ||
| 798 | } | ||
| 799 | } | ||
| 800 | |||
| 801 | /** | ||
| 771 | * vmw_context_binding_res_list_kill - Kill all bindings on a | 802 | * vmw_context_binding_res_list_kill - Kill all bindings on a |
| 772 | * resource binding list | 803 | * resource binding list |
| 773 | * | 804 | * |
| @@ -785,6 +816,27 @@ void vmw_context_binding_res_list_kill(struct list_head *head) | |||
| 785 | } | 816 | } |
| 786 | 817 | ||
| 787 | /** | 818 | /** |
| 819 | * vmw_context_binding_res_list_scrub - Scrub all bindings on a | ||
| 820 | * resource binding list | ||
| 821 | * | ||
| 822 | * @head: list head of resource binding list | ||
| 823 | * | ||
| 824 | * Scrub all bindings associated with a specific resource. Typically | ||
| 825 | * called before the resource is evicted. | ||
| 826 | */ | ||
| 827 | void vmw_context_binding_res_list_scrub(struct list_head *head) | ||
| 828 | { | ||
| 829 | struct vmw_ctx_binding *entry; | ||
| 830 | |||
| 831 | list_for_each_entry(entry, head, res_list) { | ||
| 832 | if (!entry->bi.scrubbed) { | ||
| 833 | (void) vmw_scrub_funcs[entry->bi.bt](&entry->bi, false); | ||
| 834 | entry->bi.scrubbed = true; | ||
| 835 | } | ||
| 836 | } | ||
| 837 | } | ||
| 838 | |||
| 839 | /** | ||
| 788 | * vmw_context_binding_state_transfer - Commit staged binding info | 840 | * vmw_context_binding_state_transfer - Commit staged binding info |
| 789 | * | 841 | * |
| 790 | * @ctx: Pointer to context to commit the staged binding info to. | 842 | * @ctx: Pointer to context to commit the staged binding info to. |
| @@ -803,3 +855,50 @@ void vmw_context_binding_state_transfer(struct vmw_resource *ctx, | |||
| 803 | list_for_each_entry_safe(entry, next, &from->list, ctx_list) | 855 | list_for_each_entry_safe(entry, next, &from->list, ctx_list) |
| 804 | vmw_context_binding_transfer(&uctx->cbs, &entry->bi); | 856 | vmw_context_binding_transfer(&uctx->cbs, &entry->bi); |
| 805 | } | 857 | } |
| 858 | |||
| 859 | /** | ||
| 860 | * vmw_context_rebind_all - Rebind all scrubbed bindings of a context | ||
| 861 | * | ||
| 862 | * @ctx: The context resource | ||
| 863 | * | ||
| 864 | * Walks through the context binding list and rebinds all scrubbed | ||
| 865 | * resources. | ||
| 866 | */ | ||
| 867 | int vmw_context_rebind_all(struct vmw_resource *ctx) | ||
| 868 | { | ||
| 869 | struct vmw_ctx_binding *entry; | ||
| 870 | struct vmw_user_context *uctx = | ||
| 871 | container_of(ctx, struct vmw_user_context, res); | ||
| 872 | struct vmw_ctx_binding_state *cbs = &uctx->cbs; | ||
| 873 | int ret; | ||
| 874 | |||
| 875 | list_for_each_entry(entry, &cbs->list, ctx_list) { | ||
| 876 | if (likely(!entry->bi.scrubbed)) | ||
| 877 | continue; | ||
| 878 | |||
| 879 | if (WARN_ON(entry->bi.res == NULL || entry->bi.res->id == | ||
| 880 | SVGA3D_INVALID_ID)) | ||
| 881 | continue; | ||
| 882 | |||
| 883 | ret = vmw_scrub_funcs[entry->bi.bt](&entry->bi, true); | ||
| 884 | if (unlikely(ret != 0)) | ||
| 885 | return ret; | ||
| 886 | |||
| 887 | entry->bi.scrubbed = false; | ||
| 888 | } | ||
| 889 | |||
| 890 | return 0; | ||
| 891 | } | ||
| 892 | |||
| 893 | /** | ||
| 894 | * vmw_context_binding_list - Return a list of context bindings | ||
| 895 | * | ||
| 896 | * @ctx: The context resource | ||
| 897 | * | ||
| 898 | * Returns the current list of bindings of the given context. Note that | ||
| 899 | * this list becomes stale as soon as the dev_priv::binding_mutex is unlocked. | ||
| 900 | */ | ||
| 901 | struct list_head *vmw_context_binding_list(struct vmw_resource *ctx) | ||
| 902 | { | ||
| 903 | return &(container_of(ctx, struct vmw_user_context, res)->cbs.list); | ||
| 904 | } | ||
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c index 9893328f8fdc..0083cbf99edf 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c | |||
| @@ -667,6 +667,7 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset) | |||
| 667 | dev_priv->memory_size = 512*1024*1024; | 667 | dev_priv->memory_size = 512*1024*1024; |
| 668 | } | 668 | } |
| 669 | dev_priv->max_mob_pages = 0; | 669 | dev_priv->max_mob_pages = 0; |
| 670 | dev_priv->max_mob_size = 0; | ||
| 670 | if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS) { | 671 | if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS) { |
| 671 | uint64_t mem_size = | 672 | uint64_t mem_size = |
| 672 | vmw_read(dev_priv, | 673 | vmw_read(dev_priv, |
| @@ -676,6 +677,8 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset) | |||
| 676 | dev_priv->prim_bb_mem = | 677 | dev_priv->prim_bb_mem = |
| 677 | vmw_read(dev_priv, | 678 | vmw_read(dev_priv, |
| 678 | SVGA_REG_MAX_PRIMARY_BOUNDING_BOX_MEM); | 679 | SVGA_REG_MAX_PRIMARY_BOUNDING_BOX_MEM); |
| 680 | dev_priv->max_mob_size = | ||
| 681 | vmw_read(dev_priv, SVGA_REG_MOB_MAX_SIZE); | ||
| 679 | } else | 682 | } else |
| 680 | dev_priv->prim_bb_mem = dev_priv->vram_size; | 683 | dev_priv->prim_bb_mem = dev_priv->vram_size; |
| 681 | 684 | ||
| @@ -941,6 +944,7 @@ static void vmw_postclose(struct drm_device *dev, | |||
| 941 | drm_master_put(&vmw_fp->locked_master); | 944 | drm_master_put(&vmw_fp->locked_master); |
| 942 | } | 945 | } |
| 943 | 946 | ||
| 947 | vmw_compat_shader_man_destroy(vmw_fp->shman); | ||
| 944 | ttm_object_file_release(&vmw_fp->tfile); | 948 | ttm_object_file_release(&vmw_fp->tfile); |
| 945 | kfree(vmw_fp); | 949 | kfree(vmw_fp); |
| 946 | } | 950 | } |
| @@ -960,11 +964,17 @@ static int vmw_driver_open(struct drm_device *dev, struct drm_file *file_priv) | |||
| 960 | if (unlikely(vmw_fp->tfile == NULL)) | 964 | if (unlikely(vmw_fp->tfile == NULL)) |
| 961 | goto out_no_tfile; | 965 | goto out_no_tfile; |
| 962 | 966 | ||
| 967 | vmw_fp->shman = vmw_compat_shader_man_create(dev_priv); | ||
| 968 | if (IS_ERR(vmw_fp->shman)) | ||
| 969 | goto out_no_shman; | ||
| 970 | |||
| 963 | file_priv->driver_priv = vmw_fp; | 971 | file_priv->driver_priv = vmw_fp; |
| 964 | dev_priv->bdev.dev_mapping = dev->dev_mapping; | 972 | dev_priv->bdev.dev_mapping = dev->dev_mapping; |
| 965 | 973 | ||
| 966 | return 0; | 974 | return 0; |
| 967 | 975 | ||
| 976 | out_no_shman: | ||
| 977 | ttm_object_file_release(&vmw_fp->tfile); | ||
| 968 | out_no_tfile: | 978 | out_no_tfile: |
| 969 | kfree(vmw_fp); | 979 | kfree(vmw_fp); |
| 970 | return ret; | 980 | return ret; |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h index 554e7fa33082..07831554dad7 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h | |||
| @@ -40,7 +40,7 @@ | |||
| 40 | #include <drm/ttm/ttm_module.h> | 40 | #include <drm/ttm/ttm_module.h> |
| 41 | #include "vmwgfx_fence.h" | 41 | #include "vmwgfx_fence.h" |
| 42 | 42 | ||
| 43 | #define VMWGFX_DRIVER_DATE "20121114" | 43 | #define VMWGFX_DRIVER_DATE "20140228" |
| 44 | #define VMWGFX_DRIVER_MAJOR 2 | 44 | #define VMWGFX_DRIVER_MAJOR 2 |
| 45 | #define VMWGFX_DRIVER_MINOR 5 | 45 | #define VMWGFX_DRIVER_MINOR 5 |
| 46 | #define VMWGFX_DRIVER_PATCHLEVEL 0 | 46 | #define VMWGFX_DRIVER_PATCHLEVEL 0 |
| @@ -75,10 +75,14 @@ | |||
| 75 | #define VMW_RES_FENCE ttm_driver_type3 | 75 | #define VMW_RES_FENCE ttm_driver_type3 |
| 76 | #define VMW_RES_SHADER ttm_driver_type4 | 76 | #define VMW_RES_SHADER ttm_driver_type4 |
| 77 | 77 | ||
| 78 | struct vmw_compat_shader_manager; | ||
| 79 | |||
| 78 | struct vmw_fpriv { | 80 | struct vmw_fpriv { |
| 79 | struct drm_master *locked_master; | 81 | struct drm_master *locked_master; |
| 80 | struct ttm_object_file *tfile; | 82 | struct ttm_object_file *tfile; |
| 81 | struct list_head fence_events; | 83 | struct list_head fence_events; |
| 84 | bool gb_aware; | ||
| 85 | struct vmw_compat_shader_manager *shman; | ||
| 82 | }; | 86 | }; |
| 83 | 87 | ||
| 84 | struct vmw_dma_buffer { | 88 | struct vmw_dma_buffer { |
| @@ -272,6 +276,7 @@ struct vmw_ctx_bindinfo { | |||
| 272 | struct vmw_resource *ctx; | 276 | struct vmw_resource *ctx; |
| 273 | struct vmw_resource *res; | 277 | struct vmw_resource *res; |
| 274 | enum vmw_ctx_binding_type bt; | 278 | enum vmw_ctx_binding_type bt; |
| 279 | bool scrubbed; | ||
| 275 | union { | 280 | union { |
| 276 | SVGA3dShaderType shader_type; | 281 | SVGA3dShaderType shader_type; |
| 277 | SVGA3dRenderTargetType rt_type; | 282 | SVGA3dRenderTargetType rt_type; |
| @@ -318,7 +323,7 @@ struct vmw_sw_context{ | |||
| 318 | struct drm_open_hash res_ht; | 323 | struct drm_open_hash res_ht; |
| 319 | bool res_ht_initialized; | 324 | bool res_ht_initialized; |
| 320 | bool kernel; /**< is the called made from the kernel */ | 325 | bool kernel; /**< is the called made from the kernel */ |
| 321 | struct ttm_object_file *tfile; | 326 | struct vmw_fpriv *fp; |
| 322 | struct list_head validate_nodes; | 327 | struct list_head validate_nodes; |
| 323 | struct vmw_relocation relocs[VMWGFX_MAX_RELOCATIONS]; | 328 | struct vmw_relocation relocs[VMWGFX_MAX_RELOCATIONS]; |
| 324 | uint32_t cur_reloc; | 329 | uint32_t cur_reloc; |
| @@ -336,6 +341,7 @@ struct vmw_sw_context{ | |||
| 336 | bool needs_post_query_barrier; | 341 | bool needs_post_query_barrier; |
| 337 | struct vmw_resource *error_resource; | 342 | struct vmw_resource *error_resource; |
| 338 | struct vmw_ctx_binding_state staged_bindings; | 343 | struct vmw_ctx_binding_state staged_bindings; |
| 344 | struct list_head staged_shaders; | ||
| 339 | }; | 345 | }; |
| 340 | 346 | ||
| 341 | struct vmw_legacy_display; | 347 | struct vmw_legacy_display; |
| @@ -380,6 +386,7 @@ struct vmw_private { | |||
| 380 | uint32_t max_gmr_ids; | 386 | uint32_t max_gmr_ids; |
| 381 | uint32_t max_gmr_pages; | 387 | uint32_t max_gmr_pages; |
| 382 | uint32_t max_mob_pages; | 388 | uint32_t max_mob_pages; |
| 389 | uint32_t max_mob_size; | ||
| 383 | uint32_t memory_size; | 390 | uint32_t memory_size; |
| 384 | bool has_gmr; | 391 | bool has_gmr; |
| 385 | bool has_mob; | 392 | bool has_mob; |
| @@ -569,6 +576,8 @@ struct vmw_user_resource_conv; | |||
| 569 | 576 | ||
| 570 | extern void vmw_resource_unreference(struct vmw_resource **p_res); | 577 | extern void vmw_resource_unreference(struct vmw_resource **p_res); |
| 571 | extern struct vmw_resource *vmw_resource_reference(struct vmw_resource *res); | 578 | extern struct vmw_resource *vmw_resource_reference(struct vmw_resource *res); |
| 579 | extern struct vmw_resource * | ||
| 580 | vmw_resource_reference_unless_doomed(struct vmw_resource *res); | ||
| 572 | extern int vmw_resource_validate(struct vmw_resource *res); | 581 | extern int vmw_resource_validate(struct vmw_resource *res); |
| 573 | extern int vmw_resource_reserve(struct vmw_resource *res, bool no_backup); | 582 | extern int vmw_resource_reserve(struct vmw_resource *res, bool no_backup); |
| 574 | extern bool vmw_resource_needs_backup(const struct vmw_resource *res); | 583 | extern bool vmw_resource_needs_backup(const struct vmw_resource *res); |
| @@ -957,6 +966,9 @@ extern void | |||
| 957 | vmw_context_binding_state_transfer(struct vmw_resource *res, | 966 | vmw_context_binding_state_transfer(struct vmw_resource *res, |
| 958 | struct vmw_ctx_binding_state *cbs); | 967 | struct vmw_ctx_binding_state *cbs); |
| 959 | extern void vmw_context_binding_res_list_kill(struct list_head *head); | 968 | extern void vmw_context_binding_res_list_kill(struct list_head *head); |
| 969 | extern void vmw_context_binding_res_list_scrub(struct list_head *head); | ||
| 970 | extern int vmw_context_rebind_all(struct vmw_resource *ctx); | ||
| 971 | extern struct list_head *vmw_context_binding_list(struct vmw_resource *ctx); | ||
| 960 | 972 | ||
| 961 | /* | 973 | /* |
| 962 | * Surface management - vmwgfx_surface.c | 974 | * Surface management - vmwgfx_surface.c |
| @@ -991,6 +1003,28 @@ extern int vmw_shader_define_ioctl(struct drm_device *dev, void *data, | |||
| 991 | struct drm_file *file_priv); | 1003 | struct drm_file *file_priv); |
| 992 | extern int vmw_shader_destroy_ioctl(struct drm_device *dev, void *data, | 1004 | extern int vmw_shader_destroy_ioctl(struct drm_device *dev, void *data, |
| 993 | struct drm_file *file_priv); | 1005 | struct drm_file *file_priv); |
| 1006 | extern int vmw_compat_shader_lookup(struct vmw_compat_shader_manager *man, | ||
| 1007 | SVGA3dShaderType shader_type, | ||
| 1008 | u32 *user_key); | ||
| 1009 | extern void vmw_compat_shaders_commit(struct vmw_compat_shader_manager *man, | ||
| 1010 | struct list_head *list); | ||
| 1011 | extern void vmw_compat_shaders_revert(struct vmw_compat_shader_manager *man, | ||
| 1012 | struct list_head *list); | ||
| 1013 | extern int vmw_compat_shader_remove(struct vmw_compat_shader_manager *man, | ||
| 1014 | u32 user_key, | ||
| 1015 | SVGA3dShaderType shader_type, | ||
| 1016 | struct list_head *list); | ||
| 1017 | extern int vmw_compat_shader_add(struct vmw_compat_shader_manager *man, | ||
| 1018 | u32 user_key, const void *bytecode, | ||
| 1019 | SVGA3dShaderType shader_type, | ||
| 1020 | size_t size, | ||
| 1021 | struct ttm_object_file *tfile, | ||
| 1022 | struct list_head *list); | ||
| 1023 | extern struct vmw_compat_shader_manager * | ||
| 1024 | vmw_compat_shader_man_create(struct vmw_private *dev_priv); | ||
| 1025 | extern void | ||
| 1026 | vmw_compat_shader_man_destroy(struct vmw_compat_shader_manager *man); | ||
| 1027 | |||
| 994 | 1028 | ||
| 995 | /** | 1029 | /** |
| 996 | * Inline helper functions | 1030 | * Inline helper functions |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c index 7a5f1eb55c5a..efb575a7996c 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c | |||
| @@ -114,8 +114,10 @@ static void vmw_resource_list_unreserve(struct list_head *list, | |||
| 114 | * persistent context binding tracker. | 114 | * persistent context binding tracker. |
| 115 | */ | 115 | */ |
| 116 | if (unlikely(val->staged_bindings)) { | 116 | if (unlikely(val->staged_bindings)) { |
| 117 | vmw_context_binding_state_transfer | 117 | if (!backoff) { |
| 118 | (val->res, val->staged_bindings); | 118 | vmw_context_binding_state_transfer |
| 119 | (val->res, val->staged_bindings); | ||
| 120 | } | ||
| 119 | kfree(val->staged_bindings); | 121 | kfree(val->staged_bindings); |
| 120 | val->staged_bindings = NULL; | 122 | val->staged_bindings = NULL; |
| 121 | } | 123 | } |
| @@ -178,6 +180,44 @@ static int vmw_resource_val_add(struct vmw_sw_context *sw_context, | |||
| 178 | } | 180 | } |
| 179 | 181 | ||
| 180 | /** | 182 | /** |
| 183 | * vmw_resource_context_res_add - Put resources previously bound to a context on | ||
| 184 | * the validation list | ||
| 185 | * | ||
| 186 | * @dev_priv: Pointer to a device private structure | ||
| 187 | * @sw_context: Pointer to a software context used for this command submission | ||
| 188 | * @ctx: Pointer to the context resource | ||
| 189 | * | ||
| 190 | * This function puts all resources that were previously bound to @ctx on | ||
| 191 | * the resource validation list. This is part of the context state reemission | ||
| 192 | */ | ||
| 193 | static int vmw_resource_context_res_add(struct vmw_private *dev_priv, | ||
| 194 | struct vmw_sw_context *sw_context, | ||
| 195 | struct vmw_resource *ctx) | ||
| 196 | { | ||
| 197 | struct list_head *binding_list; | ||
| 198 | struct vmw_ctx_binding *entry; | ||
| 199 | int ret = 0; | ||
| 200 | struct vmw_resource *res; | ||
| 201 | |||
| 202 | mutex_lock(&dev_priv->binding_mutex); | ||
| 203 | binding_list = vmw_context_binding_list(ctx); | ||
| 204 | |||
| 205 | list_for_each_entry(entry, binding_list, ctx_list) { | ||
| 206 | res = vmw_resource_reference_unless_doomed(entry->bi.res); | ||
| 207 | if (unlikely(res == NULL)) | ||
| 208 | continue; | ||
| 209 | |||
| 210 | ret = vmw_resource_val_add(sw_context, entry->bi.res, NULL); | ||
| 211 | vmw_resource_unreference(&res); | ||
| 212 | if (unlikely(ret != 0)) | ||
| 213 | break; | ||
| 214 | } | ||
| 215 | |||
| 216 | mutex_unlock(&dev_priv->binding_mutex); | ||
| 217 | return ret; | ||
| 218 | } | ||
| 219 | |||
| 220 | /** | ||
| 181 | * vmw_resource_relocation_add - Add a relocation to the relocation list | 221 | * vmw_resource_relocation_add - Add a relocation to the relocation list |
| 182 | * | 222 | * |
| 183 | * @list: Pointer to head of relocation list. | 223 | * @list: Pointer to head of relocation list. |
| @@ -233,8 +273,12 @@ static void vmw_resource_relocations_apply(uint32_t *cb, | |||
| 233 | { | 273 | { |
| 234 | struct vmw_resource_relocation *rel; | 274 | struct vmw_resource_relocation *rel; |
| 235 | 275 | ||
| 236 | list_for_each_entry(rel, list, head) | 276 | list_for_each_entry(rel, list, head) { |
| 237 | cb[rel->offset] = rel->res->id; | 277 | if (likely(rel->res != NULL)) |
| 278 | cb[rel->offset] = rel->res->id; | ||
| 279 | else | ||
| 280 | cb[rel->offset] = SVGA_3D_CMD_NOP; | ||
| 281 | } | ||
| 238 | } | 282 | } |
| 239 | 283 | ||
| 240 | static int vmw_cmd_invalid(struct vmw_private *dev_priv, | 284 | static int vmw_cmd_invalid(struct vmw_private *dev_priv, |
| @@ -379,22 +423,27 @@ static int vmw_resources_validate(struct vmw_sw_context *sw_context) | |||
| 379 | } | 423 | } |
| 380 | 424 | ||
| 381 | /** | 425 | /** |
| 382 | * vmw_cmd_res_check - Check that a resource is present and if so, put it | 426 | * vmw_cmd_compat_res_check - Check that a resource is present and if so, put it |
| 383 | * on the resource validate list unless it's already there. | 427 | * on the resource validate list unless it's already there. |
| 384 | * | 428 | * |
| 385 | * @dev_priv: Pointer to a device private structure. | 429 | * @dev_priv: Pointer to a device private structure. |
| 386 | * @sw_context: Pointer to the software context. | 430 | * @sw_context: Pointer to the software context. |
| 387 | * @res_type: Resource type. | 431 | * @res_type: Resource type. |
| 388 | * @converter: User-space visisble type specific information. | 432 | * @converter: User-space visisble type specific information. |
| 389 | * @id: Pointer to the location in the command buffer currently being | 433 | * @id: user-space resource id handle. |
| 434 | * @id_loc: Pointer to the location in the command buffer currently being | ||
| 390 | * parsed from where the user-space resource id handle is located. | 435 | * parsed from where the user-space resource id handle is located. |
| 436 | * @p_val: Pointer to pointer to resource validalidation node. Populated | ||
| 437 | * on exit. | ||
| 391 | */ | 438 | */ |
| 392 | static int vmw_cmd_res_check(struct vmw_private *dev_priv, | 439 | static int |
| 393 | struct vmw_sw_context *sw_context, | 440 | vmw_cmd_compat_res_check(struct vmw_private *dev_priv, |
| 394 | enum vmw_res_type res_type, | 441 | struct vmw_sw_context *sw_context, |
| 395 | const struct vmw_user_resource_conv *converter, | 442 | enum vmw_res_type res_type, |
| 396 | uint32_t *id, | 443 | const struct vmw_user_resource_conv *converter, |
| 397 | struct vmw_resource_val_node **p_val) | 444 | uint32_t id, |
| 445 | uint32_t *id_loc, | ||
| 446 | struct vmw_resource_val_node **p_val) | ||
| 398 | { | 447 | { |
| 399 | struct vmw_res_cache_entry *rcache = | 448 | struct vmw_res_cache_entry *rcache = |
| 400 | &sw_context->res_cache[res_type]; | 449 | &sw_context->res_cache[res_type]; |
| @@ -402,7 +451,7 @@ static int vmw_cmd_res_check(struct vmw_private *dev_priv, | |||
| 402 | struct vmw_resource_val_node *node; | 451 | struct vmw_resource_val_node *node; |
| 403 | int ret; | 452 | int ret; |
| 404 | 453 | ||
| 405 | if (*id == SVGA3D_INVALID_ID) { | 454 | if (id == SVGA3D_INVALID_ID) { |
| 406 | if (p_val) | 455 | if (p_val) |
| 407 | *p_val = NULL; | 456 | *p_val = NULL; |
| 408 | if (res_type == vmw_res_context) { | 457 | if (res_type == vmw_res_context) { |
| @@ -417,7 +466,7 @@ static int vmw_cmd_res_check(struct vmw_private *dev_priv, | |||
| 417 | * resource | 466 | * resource |
| 418 | */ | 467 | */ |
| 419 | 468 | ||
| 420 | if (likely(rcache->valid && *id == rcache->handle)) { | 469 | if (likely(rcache->valid && id == rcache->handle)) { |
| 421 | const struct vmw_resource *res = rcache->res; | 470 | const struct vmw_resource *res = rcache->res; |
| 422 | 471 | ||
| 423 | rcache->node->first_usage = false; | 472 | rcache->node->first_usage = false; |
| @@ -426,28 +475,28 @@ static int vmw_cmd_res_check(struct vmw_private *dev_priv, | |||
| 426 | 475 | ||
| 427 | return vmw_resource_relocation_add | 476 | return vmw_resource_relocation_add |
| 428 | (&sw_context->res_relocations, res, | 477 | (&sw_context->res_relocations, res, |
| 429 | id - sw_context->buf_start); | 478 | id_loc - sw_context->buf_start); |
| 430 | } | 479 | } |
| 431 | 480 | ||
| 432 | ret = vmw_user_resource_lookup_handle(dev_priv, | 481 | ret = vmw_user_resource_lookup_handle(dev_priv, |
| 433 | sw_context->tfile, | 482 | sw_context->fp->tfile, |
| 434 | *id, | 483 | id, |
| 435 | converter, | 484 | converter, |
| 436 | &res); | 485 | &res); |
| 437 | if (unlikely(ret != 0)) { | 486 | if (unlikely(ret != 0)) { |
| 438 | DRM_ERROR("Could not find or use resource 0x%08x.\n", | 487 | DRM_ERROR("Could not find or use resource 0x%08x.\n", |
| 439 | (unsigned) *id); | 488 | (unsigned) id); |
| 440 | dump_stack(); | 489 | dump_stack(); |
| 441 | return ret; | 490 | return ret; |
| 442 | } | 491 | } |
| 443 | 492 | ||
| 444 | rcache->valid = true; | 493 | rcache->valid = true; |
| 445 | rcache->res = res; | 494 | rcache->res = res; |
| 446 | rcache->handle = *id; | 495 | rcache->handle = id; |
| 447 | 496 | ||
| 448 | ret = vmw_resource_relocation_add(&sw_context->res_relocations, | 497 | ret = vmw_resource_relocation_add(&sw_context->res_relocations, |
| 449 | res, | 498 | res, |
| 450 | id - sw_context->buf_start); | 499 | id_loc - sw_context->buf_start); |
| 451 | if (unlikely(ret != 0)) | 500 | if (unlikely(ret != 0)) |
| 452 | goto out_no_reloc; | 501 | goto out_no_reloc; |
| 453 | 502 | ||
| @@ -459,7 +508,11 @@ static int vmw_cmd_res_check(struct vmw_private *dev_priv, | |||
| 459 | if (p_val) | 508 | if (p_val) |
| 460 | *p_val = node; | 509 | *p_val = node; |
| 461 | 510 | ||
| 462 | if (node->first_usage && res_type == vmw_res_context) { | 511 | if (dev_priv->has_mob && node->first_usage && |
| 512 | res_type == vmw_res_context) { | ||
| 513 | ret = vmw_resource_context_res_add(dev_priv, sw_context, res); | ||
| 514 | if (unlikely(ret != 0)) | ||
| 515 | goto out_no_reloc; | ||
| 463 | node->staged_bindings = | 516 | node->staged_bindings = |
| 464 | kzalloc(sizeof(*node->staged_bindings), GFP_KERNEL); | 517 | kzalloc(sizeof(*node->staged_bindings), GFP_KERNEL); |
| 465 | if (node->staged_bindings == NULL) { | 518 | if (node->staged_bindings == NULL) { |
| @@ -481,6 +534,59 @@ out_no_reloc: | |||
| 481 | } | 534 | } |
| 482 | 535 | ||
| 483 | /** | 536 | /** |
| 537 | * vmw_cmd_res_check - Check that a resource is present and if so, put it | ||
| 538 | * on the resource validate list unless it's already there. | ||
| 539 | * | ||
| 540 | * @dev_priv: Pointer to a device private structure. | ||
| 541 | * @sw_context: Pointer to the software context. | ||
| 542 | * @res_type: Resource type. | ||
| 543 | * @converter: User-space visisble type specific information. | ||
| 544 | * @id_loc: Pointer to the location in the command buffer currently being | ||
| 545 | * parsed from where the user-space resource id handle is located. | ||
| 546 | * @p_val: Pointer to pointer to resource validalidation node. Populated | ||
| 547 | * on exit. | ||
| 548 | */ | ||
| 549 | static int | ||
| 550 | vmw_cmd_res_check(struct vmw_private *dev_priv, | ||
| 551 | struct vmw_sw_context *sw_context, | ||
| 552 | enum vmw_res_type res_type, | ||
| 553 | const struct vmw_user_resource_conv *converter, | ||
| 554 | uint32_t *id_loc, | ||
| 555 | struct vmw_resource_val_node **p_val) | ||
| 556 | { | ||
| 557 | return vmw_cmd_compat_res_check(dev_priv, sw_context, res_type, | ||
| 558 | converter, *id_loc, id_loc, p_val); | ||
| 559 | } | ||
| 560 | |||
| 561 | /** | ||
| 562 | * vmw_rebind_contexts - Rebind all resources previously bound to | ||
| 563 | * referenced contexts. | ||
| 564 | * | ||
| 565 | * @sw_context: Pointer to the software context. | ||
| 566 | * | ||
| 567 | * Rebind context binding points that have been scrubbed because of eviction. | ||
| 568 | */ | ||
| 569 | static int vmw_rebind_contexts(struct vmw_sw_context *sw_context) | ||
| 570 | { | ||
| 571 | struct vmw_resource_val_node *val; | ||
| 572 | int ret; | ||
| 573 | |||
| 574 | list_for_each_entry(val, &sw_context->resource_list, head) { | ||
| 575 | if (likely(!val->staged_bindings)) | ||
| 576 | continue; | ||
| 577 | |||
| 578 | ret = vmw_context_rebind_all(val->res); | ||
| 579 | if (unlikely(ret != 0)) { | ||
| 580 | if (ret != -ERESTARTSYS) | ||
| 581 | DRM_ERROR("Failed to rebind context.\n"); | ||
| 582 | return ret; | ||
| 583 | } | ||
| 584 | } | ||
| 585 | |||
| 586 | return 0; | ||
| 587 | } | ||
| 588 | |||
| 589 | /** | ||
| 484 | * vmw_cmd_cid_check - Check a command header for valid context information. | 590 | * vmw_cmd_cid_check - Check a command header for valid context information. |
| 485 | * | 591 | * |
| 486 | * @dev_priv: Pointer to a device private structure. | 592 | * @dev_priv: Pointer to a device private structure. |
| @@ -496,7 +602,7 @@ static int vmw_cmd_cid_check(struct vmw_private *dev_priv, | |||
| 496 | { | 602 | { |
| 497 | struct vmw_cid_cmd { | 603 | struct vmw_cid_cmd { |
| 498 | SVGA3dCmdHeader header; | 604 | SVGA3dCmdHeader header; |
| 499 | __le32 cid; | 605 | uint32_t cid; |
| 500 | } *cmd; | 606 | } *cmd; |
| 501 | 607 | ||
| 502 | cmd = container_of(header, struct vmw_cid_cmd, header); | 608 | cmd = container_of(header, struct vmw_cid_cmd, header); |
| @@ -767,7 +873,7 @@ static int vmw_translate_mob_ptr(struct vmw_private *dev_priv, | |||
| 767 | struct vmw_relocation *reloc; | 873 | struct vmw_relocation *reloc; |
| 768 | int ret; | 874 | int ret; |
| 769 | 875 | ||
| 770 | ret = vmw_user_dmabuf_lookup(sw_context->tfile, handle, &vmw_bo); | 876 | ret = vmw_user_dmabuf_lookup(sw_context->fp->tfile, handle, &vmw_bo); |
| 771 | if (unlikely(ret != 0)) { | 877 | if (unlikely(ret != 0)) { |
| 772 | DRM_ERROR("Could not find or use MOB buffer.\n"); | 878 | DRM_ERROR("Could not find or use MOB buffer.\n"); |
| 773 | return -EINVAL; | 879 | return -EINVAL; |
| @@ -828,7 +934,7 @@ static int vmw_translate_guest_ptr(struct vmw_private *dev_priv, | |||
| 828 | struct vmw_relocation *reloc; | 934 | struct vmw_relocation *reloc; |
| 829 | int ret; | 935 | int ret; |
| 830 | 936 | ||
| 831 | ret = vmw_user_dmabuf_lookup(sw_context->tfile, handle, &vmw_bo); | 937 | ret = vmw_user_dmabuf_lookup(sw_context->fp->tfile, handle, &vmw_bo); |
| 832 | if (unlikely(ret != 0)) { | 938 | if (unlikely(ret != 0)) { |
| 833 | DRM_ERROR("Could not find or use GMR region.\n"); | 939 | DRM_ERROR("Could not find or use GMR region.\n"); |
| 834 | return -EINVAL; | 940 | return -EINVAL; |
| @@ -1127,7 +1233,8 @@ static int vmw_cmd_dma(struct vmw_private *dev_priv, | |||
| 1127 | 1233 | ||
| 1128 | srf = vmw_res_to_srf(sw_context->res_cache[vmw_res_surface].res); | 1234 | srf = vmw_res_to_srf(sw_context->res_cache[vmw_res_surface].res); |
| 1129 | 1235 | ||
| 1130 | vmw_kms_cursor_snoop(srf, sw_context->tfile, &vmw_bo->base, header); | 1236 | vmw_kms_cursor_snoop(srf, sw_context->fp->tfile, &vmw_bo->base, |
| 1237 | header); | ||
| 1131 | 1238 | ||
| 1132 | out_no_surface: | 1239 | out_no_surface: |
| 1133 | vmw_dmabuf_unreference(&vmw_bo); | 1240 | vmw_dmabuf_unreference(&vmw_bo); |
| @@ -1478,6 +1585,98 @@ static int vmw_cmd_invalidate_gb_surface(struct vmw_private *dev_priv, | |||
| 1478 | &cmd->body.sid, NULL); | 1585 | &cmd->body.sid, NULL); |
| 1479 | } | 1586 | } |
| 1480 | 1587 | ||
| 1588 | |||
| 1589 | /** | ||
| 1590 | * vmw_cmd_shader_define - Validate an SVGA_3D_CMD_SHADER_DEFINE | ||
| 1591 | * command | ||
| 1592 | * | ||
| 1593 | * @dev_priv: Pointer to a device private struct. | ||
| 1594 | * @sw_context: The software context being used for this batch. | ||
| 1595 | * @header: Pointer to the command header in the command stream. | ||
| 1596 | */ | ||
| 1597 | static int vmw_cmd_shader_define(struct vmw_private *dev_priv, | ||
| 1598 | struct vmw_sw_context *sw_context, | ||
| 1599 | SVGA3dCmdHeader *header) | ||
| 1600 | { | ||
| 1601 | struct vmw_shader_define_cmd { | ||
| 1602 | SVGA3dCmdHeader header; | ||
| 1603 | SVGA3dCmdDefineShader body; | ||
| 1604 | } *cmd; | ||
| 1605 | int ret; | ||
| 1606 | size_t size; | ||
| 1607 | |||
| 1608 | cmd = container_of(header, struct vmw_shader_define_cmd, | ||
| 1609 | header); | ||
| 1610 | |||
| 1611 | ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context, | ||
| 1612 | user_context_converter, &cmd->body.cid, | ||
| 1613 | NULL); | ||
| 1614 | if (unlikely(ret != 0)) | ||
| 1615 | return ret; | ||
| 1616 | |||
| 1617 | if (unlikely(!dev_priv->has_mob)) | ||
| 1618 | return 0; | ||
| 1619 | |||
| 1620 | size = cmd->header.size - sizeof(cmd->body); | ||
| 1621 | ret = vmw_compat_shader_add(sw_context->fp->shman, | ||
| 1622 | cmd->body.shid, cmd + 1, | ||
| 1623 | cmd->body.type, size, | ||
| 1624 | sw_context->fp->tfile, | ||
| 1625 | &sw_context->staged_shaders); | ||
| 1626 | if (unlikely(ret != 0)) | ||
| 1627 | return ret; | ||
| 1628 | |||
| 1629 | return vmw_resource_relocation_add(&sw_context->res_relocations, | ||
| 1630 | NULL, &cmd->header.id - | ||
| 1631 | sw_context->buf_start); | ||
| 1632 | |||
| 1633 | return 0; | ||
| 1634 | } | ||
| 1635 | |||
| 1636 | /** | ||
| 1637 | * vmw_cmd_shader_destroy - Validate an SVGA_3D_CMD_SHADER_DESTROY | ||
| 1638 | * command | ||
| 1639 | * | ||
| 1640 | * @dev_priv: Pointer to a device private struct. | ||
| 1641 | * @sw_context: The software context being used for this batch. | ||
| 1642 | * @header: Pointer to the command header in the command stream. | ||
| 1643 | */ | ||
| 1644 | static int vmw_cmd_shader_destroy(struct vmw_private *dev_priv, | ||
| 1645 | struct vmw_sw_context *sw_context, | ||
| 1646 | SVGA3dCmdHeader *header) | ||
| 1647 | { | ||
| 1648 | struct vmw_shader_destroy_cmd { | ||
| 1649 | SVGA3dCmdHeader header; | ||
| 1650 | SVGA3dCmdDestroyShader body; | ||
| 1651 | } *cmd; | ||
| 1652 | int ret; | ||
| 1653 | |||
| 1654 | cmd = container_of(header, struct vmw_shader_destroy_cmd, | ||
| 1655 | header); | ||
| 1656 | |||
| 1657 | ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context, | ||
| 1658 | user_context_converter, &cmd->body.cid, | ||
| 1659 | NULL); | ||
| 1660 | if (unlikely(ret != 0)) | ||
| 1661 | return ret; | ||
| 1662 | |||
| 1663 | if (unlikely(!dev_priv->has_mob)) | ||
| 1664 | return 0; | ||
| 1665 | |||
| 1666 | ret = vmw_compat_shader_remove(sw_context->fp->shman, | ||
| 1667 | cmd->body.shid, | ||
| 1668 | cmd->body.type, | ||
| 1669 | &sw_context->staged_shaders); | ||
| 1670 | if (unlikely(ret != 0)) | ||
| 1671 | return ret; | ||
| 1672 | |||
| 1673 | return vmw_resource_relocation_add(&sw_context->res_relocations, | ||
| 1674 | NULL, &cmd->header.id - | ||
| 1675 | sw_context->buf_start); | ||
| 1676 | |||
| 1677 | return 0; | ||
| 1678 | } | ||
| 1679 | |||
| 1481 | /** | 1680 | /** |
| 1482 | * vmw_cmd_set_shader - Validate an SVGA_3D_CMD_SET_SHADER | 1681 | * vmw_cmd_set_shader - Validate an SVGA_3D_CMD_SET_SHADER |
| 1483 | * command | 1682 | * command |
| @@ -1509,10 +1708,18 @@ static int vmw_cmd_set_shader(struct vmw_private *dev_priv, | |||
| 1509 | if (dev_priv->has_mob) { | 1708 | if (dev_priv->has_mob) { |
| 1510 | struct vmw_ctx_bindinfo bi; | 1709 | struct vmw_ctx_bindinfo bi; |
| 1511 | struct vmw_resource_val_node *res_node; | 1710 | struct vmw_resource_val_node *res_node; |
| 1512 | 1711 | u32 shid = cmd->body.shid; | |
| 1513 | ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_shader, | 1712 | |
| 1514 | user_shader_converter, | 1713 | if (shid != SVGA3D_INVALID_ID) |
| 1515 | &cmd->body.shid, &res_node); | 1714 | (void) vmw_compat_shader_lookup(sw_context->fp->shman, |
| 1715 | cmd->body.type, | ||
| 1716 | &shid); | ||
| 1717 | |||
| 1718 | ret = vmw_cmd_compat_res_check(dev_priv, sw_context, | ||
| 1719 | vmw_res_shader, | ||
| 1720 | user_shader_converter, | ||
| 1721 | shid, | ||
| 1722 | &cmd->body.shid, &res_node); | ||
| 1516 | if (unlikely(ret != 0)) | 1723 | if (unlikely(ret != 0)) |
| 1517 | return ret; | 1724 | return ret; |
| 1518 | 1725 | ||
| @@ -1527,6 +1734,39 @@ static int vmw_cmd_set_shader(struct vmw_private *dev_priv, | |||
| 1527 | } | 1734 | } |
| 1528 | 1735 | ||
| 1529 | /** | 1736 | /** |
| 1737 | * vmw_cmd_set_shader_const - Validate an SVGA_3D_CMD_SET_SHADER_CONST | ||
| 1738 | * command | ||
| 1739 | * | ||
| 1740 | * @dev_priv: Pointer to a device private struct. | ||
| 1741 | * @sw_context: The software context being used for this batch. | ||
| 1742 | * @header: Pointer to the command header in the command stream. | ||
| 1743 | */ | ||
| 1744 | static int vmw_cmd_set_shader_const(struct vmw_private *dev_priv, | ||
| 1745 | struct vmw_sw_context *sw_context, | ||
| 1746 | SVGA3dCmdHeader *header) | ||
| 1747 | { | ||
| 1748 | struct vmw_set_shader_const_cmd { | ||
| 1749 | SVGA3dCmdHeader header; | ||
| 1750 | SVGA3dCmdSetShaderConst body; | ||
| 1751 | } *cmd; | ||
| 1752 | int ret; | ||
| 1753 | |||
| 1754 | cmd = container_of(header, struct vmw_set_shader_const_cmd, | ||
| 1755 | header); | ||
| 1756 | |||
| 1757 | ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context, | ||
| 1758 | user_context_converter, &cmd->body.cid, | ||
| 1759 | NULL); | ||
| 1760 | if (unlikely(ret != 0)) | ||
| 1761 | return ret; | ||
| 1762 | |||
| 1763 | if (dev_priv->has_mob) | ||
| 1764 | header->id = SVGA_3D_CMD_SET_GB_SHADERCONSTS_INLINE; | ||
| 1765 | |||
| 1766 | return 0; | ||
| 1767 | } | ||
| 1768 | |||
| 1769 | /** | ||
| 1530 | * vmw_cmd_bind_gb_shader - Validate an SVGA_3D_CMD_BIND_GB_SHADER | 1770 | * vmw_cmd_bind_gb_shader - Validate an SVGA_3D_CMD_BIND_GB_SHADER |
| 1531 | * command | 1771 | * command |
| 1532 | * | 1772 | * |
| @@ -1595,7 +1835,7 @@ static int vmw_cmd_check_not_3d(struct vmw_private *dev_priv, | |||
| 1595 | return 0; | 1835 | return 0; |
| 1596 | } | 1836 | } |
| 1597 | 1837 | ||
| 1598 | static const struct vmw_cmd_entry const vmw_cmd_entries[SVGA_3D_CMD_MAX] = { | 1838 | static const struct vmw_cmd_entry vmw_cmd_entries[SVGA_3D_CMD_MAX] = { |
| 1599 | VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DEFINE, &vmw_cmd_invalid, | 1839 | VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DEFINE, &vmw_cmd_invalid, |
| 1600 | false, false, false), | 1840 | false, false, false), |
| 1601 | VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DESTROY, &vmw_cmd_invalid, | 1841 | VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DESTROY, &vmw_cmd_invalid, |
| @@ -1634,14 +1874,14 @@ static const struct vmw_cmd_entry const vmw_cmd_entries[SVGA_3D_CMD_MAX] = { | |||
| 1634 | true, false, false), | 1874 | true, false, false), |
| 1635 | VMW_CMD_DEF(SVGA_3D_CMD_PRESENT, &vmw_cmd_present_check, | 1875 | VMW_CMD_DEF(SVGA_3D_CMD_PRESENT, &vmw_cmd_present_check, |
| 1636 | false, false, false), | 1876 | false, false, false), |
| 1637 | VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DEFINE, &vmw_cmd_cid_check, | 1877 | VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DEFINE, &vmw_cmd_shader_define, |
| 1638 | true, true, false), | 1878 | true, false, false), |
| 1639 | VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DESTROY, &vmw_cmd_cid_check, | 1879 | VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DESTROY, &vmw_cmd_shader_destroy, |
| 1640 | true, true, false), | 1880 | true, false, false), |
| 1641 | VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER, &vmw_cmd_set_shader, | 1881 | VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER, &vmw_cmd_set_shader, |
| 1642 | true, false, false), | 1882 | true, false, false), |
| 1643 | VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER_CONST, &vmw_cmd_cid_check, | 1883 | VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER_CONST, &vmw_cmd_set_shader_const, |
| 1644 | true, true, false), | 1884 | true, false, false), |
| 1645 | VMW_CMD_DEF(SVGA_3D_CMD_DRAW_PRIMITIVES, &vmw_cmd_draw, | 1885 | VMW_CMD_DEF(SVGA_3D_CMD_DRAW_PRIMITIVES, &vmw_cmd_draw, |
| 1646 | true, false, false), | 1886 | true, false, false), |
| 1647 | VMW_CMD_DEF(SVGA_3D_CMD_SETSCISSORRECT, &vmw_cmd_cid_check, | 1887 | VMW_CMD_DEF(SVGA_3D_CMD_SETSCISSORRECT, &vmw_cmd_cid_check, |
| @@ -1792,6 +2032,9 @@ static int vmw_cmd_check(struct vmw_private *dev_priv, | |||
| 1792 | goto out_invalid; | 2032 | goto out_invalid; |
| 1793 | 2033 | ||
| 1794 | entry = &vmw_cmd_entries[cmd_id]; | 2034 | entry = &vmw_cmd_entries[cmd_id]; |
| 2035 | if (unlikely(!entry->func)) | ||
| 2036 | goto out_invalid; | ||
| 2037 | |||
| 1795 | if (unlikely(!entry->user_allow && !sw_context->kernel)) | 2038 | if (unlikely(!entry->user_allow && !sw_context->kernel)) |
| 1796 | goto out_privileged; | 2039 | goto out_privileged; |
| 1797 | 2040 | ||
| @@ -2171,7 +2414,7 @@ int vmw_execbuf_process(struct drm_file *file_priv, | |||
| 2171 | } else | 2414 | } else |
| 2172 | sw_context->kernel = true; | 2415 | sw_context->kernel = true; |
| 2173 | 2416 | ||
| 2174 | sw_context->tfile = vmw_fpriv(file_priv)->tfile; | 2417 | sw_context->fp = vmw_fpriv(file_priv); |
| 2175 | sw_context->cur_reloc = 0; | 2418 | sw_context->cur_reloc = 0; |
| 2176 | sw_context->cur_val_buf = 0; | 2419 | sw_context->cur_val_buf = 0; |
| 2177 | sw_context->fence_flags = 0; | 2420 | sw_context->fence_flags = 0; |
| @@ -2188,16 +2431,17 @@ int vmw_execbuf_process(struct drm_file *file_priv, | |||
| 2188 | goto out_unlock; | 2431 | goto out_unlock; |
| 2189 | sw_context->res_ht_initialized = true; | 2432 | sw_context->res_ht_initialized = true; |
| 2190 | } | 2433 | } |
| 2434 | INIT_LIST_HEAD(&sw_context->staged_shaders); | ||
| 2191 | 2435 | ||
| 2192 | INIT_LIST_HEAD(&resource_list); | 2436 | INIT_LIST_HEAD(&resource_list); |
| 2193 | ret = vmw_cmd_check_all(dev_priv, sw_context, kernel_commands, | 2437 | ret = vmw_cmd_check_all(dev_priv, sw_context, kernel_commands, |
| 2194 | command_size); | 2438 | command_size); |
| 2195 | if (unlikely(ret != 0)) | 2439 | if (unlikely(ret != 0)) |
| 2196 | goto out_err; | 2440 | goto out_err_nores; |
| 2197 | 2441 | ||
| 2198 | ret = vmw_resources_reserve(sw_context); | 2442 | ret = vmw_resources_reserve(sw_context); |
| 2199 | if (unlikely(ret != 0)) | 2443 | if (unlikely(ret != 0)) |
| 2200 | goto out_err; | 2444 | goto out_err_nores; |
| 2201 | 2445 | ||
| 2202 | ret = ttm_eu_reserve_buffers(&ticket, &sw_context->validate_nodes); | 2446 | ret = ttm_eu_reserve_buffers(&ticket, &sw_context->validate_nodes); |
| 2203 | if (unlikely(ret != 0)) | 2447 | if (unlikely(ret != 0)) |
| @@ -2225,6 +2469,12 @@ int vmw_execbuf_process(struct drm_file *file_priv, | |||
| 2225 | goto out_err; | 2469 | goto out_err; |
| 2226 | } | 2470 | } |
| 2227 | 2471 | ||
| 2472 | if (dev_priv->has_mob) { | ||
| 2473 | ret = vmw_rebind_contexts(sw_context); | ||
| 2474 | if (unlikely(ret != 0)) | ||
| 2475 | goto out_unlock_binding; | ||
| 2476 | } | ||
| 2477 | |||
| 2228 | cmd = vmw_fifo_reserve(dev_priv, command_size); | 2478 | cmd = vmw_fifo_reserve(dev_priv, command_size); |
| 2229 | if (unlikely(cmd == NULL)) { | 2479 | if (unlikely(cmd == NULL)) { |
| 2230 | DRM_ERROR("Failed reserving fifo space for commands.\n"); | 2480 | DRM_ERROR("Failed reserving fifo space for commands.\n"); |
| @@ -2276,6 +2526,8 @@ int vmw_execbuf_process(struct drm_file *file_priv, | |||
| 2276 | } | 2526 | } |
| 2277 | 2527 | ||
| 2278 | list_splice_init(&sw_context->resource_list, &resource_list); | 2528 | list_splice_init(&sw_context->resource_list, &resource_list); |
| 2529 | vmw_compat_shaders_commit(sw_context->fp->shman, | ||
| 2530 | &sw_context->staged_shaders); | ||
| 2279 | mutex_unlock(&dev_priv->cmdbuf_mutex); | 2531 | mutex_unlock(&dev_priv->cmdbuf_mutex); |
| 2280 | 2532 | ||
| 2281 | /* | 2533 | /* |
| @@ -2289,10 +2541,11 @@ int vmw_execbuf_process(struct drm_file *file_priv, | |||
| 2289 | out_unlock_binding: | 2541 | out_unlock_binding: |
| 2290 | mutex_unlock(&dev_priv->binding_mutex); | 2542 | mutex_unlock(&dev_priv->binding_mutex); |
| 2291 | out_err: | 2543 | out_err: |
| 2292 | vmw_resource_relocations_free(&sw_context->res_relocations); | ||
| 2293 | vmw_free_relocations(sw_context); | ||
| 2294 | ttm_eu_backoff_reservation(&ticket, &sw_context->validate_nodes); | 2544 | ttm_eu_backoff_reservation(&ticket, &sw_context->validate_nodes); |
| 2545 | out_err_nores: | ||
| 2295 | vmw_resource_list_unreserve(&sw_context->resource_list, true); | 2546 | vmw_resource_list_unreserve(&sw_context->resource_list, true); |
| 2547 | vmw_resource_relocations_free(&sw_context->res_relocations); | ||
| 2548 | vmw_free_relocations(sw_context); | ||
| 2296 | vmw_clear_validations(sw_context); | 2549 | vmw_clear_validations(sw_context); |
| 2297 | if (unlikely(dev_priv->pinned_bo != NULL && | 2550 | if (unlikely(dev_priv->pinned_bo != NULL && |
| 2298 | !dev_priv->query_cid_valid)) | 2551 | !dev_priv->query_cid_valid)) |
| @@ -2301,6 +2554,8 @@ out_unlock: | |||
| 2301 | list_splice_init(&sw_context->resource_list, &resource_list); | 2554 | list_splice_init(&sw_context->resource_list, &resource_list); |
| 2302 | error_resource = sw_context->error_resource; | 2555 | error_resource = sw_context->error_resource; |
| 2303 | sw_context->error_resource = NULL; | 2556 | sw_context->error_resource = NULL; |
| 2557 | vmw_compat_shaders_revert(sw_context->fp->shman, | ||
| 2558 | &sw_context->staged_shaders); | ||
| 2304 | mutex_unlock(&dev_priv->cmdbuf_mutex); | 2559 | mutex_unlock(&dev_priv->cmdbuf_mutex); |
| 2305 | 2560 | ||
| 2306 | /* | 2561 | /* |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c index 116c49736763..47b70949bf3a 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c | |||
| @@ -29,12 +29,18 @@ | |||
| 29 | #include <drm/vmwgfx_drm.h> | 29 | #include <drm/vmwgfx_drm.h> |
| 30 | #include "vmwgfx_kms.h" | 30 | #include "vmwgfx_kms.h" |
| 31 | 31 | ||
| 32 | struct svga_3d_compat_cap { | ||
| 33 | SVGA3dCapsRecordHeader header; | ||
| 34 | SVGA3dCapPair pairs[SVGA3D_DEVCAP_MAX]; | ||
| 35 | }; | ||
| 36 | |||
| 32 | int vmw_getparam_ioctl(struct drm_device *dev, void *data, | 37 | int vmw_getparam_ioctl(struct drm_device *dev, void *data, |
| 33 | struct drm_file *file_priv) | 38 | struct drm_file *file_priv) |
| 34 | { | 39 | { |
| 35 | struct vmw_private *dev_priv = vmw_priv(dev); | 40 | struct vmw_private *dev_priv = vmw_priv(dev); |
| 36 | struct drm_vmw_getparam_arg *param = | 41 | struct drm_vmw_getparam_arg *param = |
| 37 | (struct drm_vmw_getparam_arg *)data; | 42 | (struct drm_vmw_getparam_arg *)data; |
| 43 | struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv); | ||
| 38 | 44 | ||
| 39 | switch (param->param) { | 45 | switch (param->param) { |
| 40 | case DRM_VMW_PARAM_NUM_STREAMS: | 46 | case DRM_VMW_PARAM_NUM_STREAMS: |
| @@ -60,6 +66,11 @@ int vmw_getparam_ioctl(struct drm_device *dev, void *data, | |||
| 60 | __le32 __iomem *fifo_mem = dev_priv->mmio_virt; | 66 | __le32 __iomem *fifo_mem = dev_priv->mmio_virt; |
| 61 | const struct vmw_fifo_state *fifo = &dev_priv->fifo; | 67 | const struct vmw_fifo_state *fifo = &dev_priv->fifo; |
| 62 | 68 | ||
| 69 | if ((dev_priv->capabilities & SVGA_CAP_GBOBJECTS)) { | ||
| 70 | param->value = SVGA3D_HWVERSION_WS8_B1; | ||
| 71 | break; | ||
| 72 | } | ||
| 73 | |||
| 63 | param->value = | 74 | param->value = |
| 64 | ioread32(fifo_mem + | 75 | ioread32(fifo_mem + |
| 65 | ((fifo->capabilities & | 76 | ((fifo->capabilities & |
| @@ -69,19 +80,31 @@ int vmw_getparam_ioctl(struct drm_device *dev, void *data, | |||
| 69 | break; | 80 | break; |
| 70 | } | 81 | } |
| 71 | case DRM_VMW_PARAM_MAX_SURF_MEMORY: | 82 | case DRM_VMW_PARAM_MAX_SURF_MEMORY: |
| 72 | param->value = dev_priv->memory_size; | 83 | if ((dev_priv->capabilities & SVGA_CAP_GBOBJECTS) && |
| 84 | !vmw_fp->gb_aware) | ||
| 85 | param->value = dev_priv->max_mob_pages * PAGE_SIZE / 2; | ||
| 86 | else | ||
| 87 | param->value = dev_priv->memory_size; | ||
| 73 | break; | 88 | break; |
| 74 | case DRM_VMW_PARAM_3D_CAPS_SIZE: | 89 | case DRM_VMW_PARAM_3D_CAPS_SIZE: |
| 75 | if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS) | 90 | if ((dev_priv->capabilities & SVGA_CAP_GBOBJECTS) && |
| 76 | param->value = SVGA3D_DEVCAP_MAX; | 91 | vmw_fp->gb_aware) |
| 92 | param->value = SVGA3D_DEVCAP_MAX * sizeof(uint32_t); | ||
| 93 | else if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS) | ||
| 94 | param->value = sizeof(struct svga_3d_compat_cap) + | ||
| 95 | sizeof(uint32_t); | ||
| 77 | else | 96 | else |
| 78 | param->value = (SVGA_FIFO_3D_CAPS_LAST - | 97 | param->value = (SVGA_FIFO_3D_CAPS_LAST - |
| 79 | SVGA_FIFO_3D_CAPS + 1); | 98 | SVGA_FIFO_3D_CAPS + 1) * |
| 80 | param->value *= sizeof(uint32_t); | 99 | sizeof(uint32_t); |
| 81 | break; | 100 | break; |
| 82 | case DRM_VMW_PARAM_MAX_MOB_MEMORY: | 101 | case DRM_VMW_PARAM_MAX_MOB_MEMORY: |
| 102 | vmw_fp->gb_aware = true; | ||
| 83 | param->value = dev_priv->max_mob_pages * PAGE_SIZE; | 103 | param->value = dev_priv->max_mob_pages * PAGE_SIZE; |
| 84 | break; | 104 | break; |
| 105 | case DRM_VMW_PARAM_MAX_MOB_SIZE: | ||
| 106 | param->value = dev_priv->max_mob_size; | ||
| 107 | break; | ||
| 85 | default: | 108 | default: |
| 86 | DRM_ERROR("Illegal vmwgfx get param request: %d\n", | 109 | DRM_ERROR("Illegal vmwgfx get param request: %d\n", |
| 87 | param->param); | 110 | param->param); |
| @@ -91,6 +114,38 @@ int vmw_getparam_ioctl(struct drm_device *dev, void *data, | |||
| 91 | return 0; | 114 | return 0; |
| 92 | } | 115 | } |
| 93 | 116 | ||
| 117 | static int vmw_fill_compat_cap(struct vmw_private *dev_priv, void *bounce, | ||
| 118 | size_t size) | ||
| 119 | { | ||
| 120 | struct svga_3d_compat_cap *compat_cap = | ||
| 121 | (struct svga_3d_compat_cap *) bounce; | ||
| 122 | unsigned int i; | ||
| 123 | size_t pair_offset = offsetof(struct svga_3d_compat_cap, pairs); | ||
| 124 | unsigned int max_size; | ||
| 125 | |||
| 126 | if (size < pair_offset) | ||
| 127 | return -EINVAL; | ||
| 128 | |||
| 129 | max_size = (size - pair_offset) / sizeof(SVGA3dCapPair); | ||
| 130 | |||
| 131 | if (max_size > SVGA3D_DEVCAP_MAX) | ||
| 132 | max_size = SVGA3D_DEVCAP_MAX; | ||
| 133 | |||
| 134 | compat_cap->header.length = | ||
| 135 | (pair_offset + max_size * sizeof(SVGA3dCapPair)) / sizeof(u32); | ||
| 136 | compat_cap->header.type = SVGA3DCAPS_RECORD_DEVCAPS; | ||
| 137 | |||
| 138 | mutex_lock(&dev_priv->hw_mutex); | ||
| 139 | for (i = 0; i < max_size; ++i) { | ||
| 140 | vmw_write(dev_priv, SVGA_REG_DEV_CAP, i); | ||
| 141 | compat_cap->pairs[i][0] = i; | ||
| 142 | compat_cap->pairs[i][1] = vmw_read(dev_priv, SVGA_REG_DEV_CAP); | ||
| 143 | } | ||
| 144 | mutex_unlock(&dev_priv->hw_mutex); | ||
| 145 | |||
| 146 | return 0; | ||
| 147 | } | ||
| 148 | |||
| 94 | 149 | ||
| 95 | int vmw_get_cap_3d_ioctl(struct drm_device *dev, void *data, | 150 | int vmw_get_cap_3d_ioctl(struct drm_device *dev, void *data, |
| 96 | struct drm_file *file_priv) | 151 | struct drm_file *file_priv) |
| @@ -104,41 +159,49 @@ int vmw_get_cap_3d_ioctl(struct drm_device *dev, void *data, | |||
| 104 | void *bounce; | 159 | void *bounce; |
| 105 | int ret; | 160 | int ret; |
| 106 | bool gb_objects = !!(dev_priv->capabilities & SVGA_CAP_GBOBJECTS); | 161 | bool gb_objects = !!(dev_priv->capabilities & SVGA_CAP_GBOBJECTS); |
| 162 | struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv); | ||
| 107 | 163 | ||
| 108 | if (unlikely(arg->pad64 != 0)) { | 164 | if (unlikely(arg->pad64 != 0)) { |
| 109 | DRM_ERROR("Illegal GET_3D_CAP argument.\n"); | 165 | DRM_ERROR("Illegal GET_3D_CAP argument.\n"); |
| 110 | return -EINVAL; | 166 | return -EINVAL; |
| 111 | } | 167 | } |
| 112 | 168 | ||
| 113 | if (gb_objects) | 169 | if (gb_objects && vmw_fp->gb_aware) |
| 114 | size = SVGA3D_DEVCAP_MAX; | 170 | size = SVGA3D_DEVCAP_MAX * sizeof(uint32_t); |
| 171 | else if (gb_objects) | ||
| 172 | size = sizeof(struct svga_3d_compat_cap) + sizeof(uint32_t); | ||
| 115 | else | 173 | else |
| 116 | size = (SVGA_FIFO_3D_CAPS_LAST - SVGA_FIFO_3D_CAPS + 1); | 174 | size = (SVGA_FIFO_3D_CAPS_LAST - SVGA_FIFO_3D_CAPS + 1) * |
| 117 | 175 | sizeof(uint32_t); | |
| 118 | size *= sizeof(uint32_t); | ||
| 119 | 176 | ||
| 120 | if (arg->max_size < size) | 177 | if (arg->max_size < size) |
| 121 | size = arg->max_size; | 178 | size = arg->max_size; |
| 122 | 179 | ||
| 123 | bounce = vmalloc(size); | 180 | bounce = vzalloc(size); |
| 124 | if (unlikely(bounce == NULL)) { | 181 | if (unlikely(bounce == NULL)) { |
| 125 | DRM_ERROR("Failed to allocate bounce buffer for 3D caps.\n"); | 182 | DRM_ERROR("Failed to allocate bounce buffer for 3D caps.\n"); |
| 126 | return -ENOMEM; | 183 | return -ENOMEM; |
| 127 | } | 184 | } |
| 128 | 185 | ||
| 129 | if (gb_objects) { | 186 | if (gb_objects && vmw_fp->gb_aware) { |
| 130 | int i; | 187 | int i, num; |
| 131 | uint32_t *bounce32 = (uint32_t *) bounce; | 188 | uint32_t *bounce32 = (uint32_t *) bounce; |
| 132 | 189 | ||
| 190 | num = size / sizeof(uint32_t); | ||
| 191 | if (num > SVGA3D_DEVCAP_MAX) | ||
| 192 | num = SVGA3D_DEVCAP_MAX; | ||
| 193 | |||
| 133 | mutex_lock(&dev_priv->hw_mutex); | 194 | mutex_lock(&dev_priv->hw_mutex); |
| 134 | for (i = 0; i < SVGA3D_DEVCAP_MAX; ++i) { | 195 | for (i = 0; i < num; ++i) { |
| 135 | vmw_write(dev_priv, SVGA_REG_DEV_CAP, i); | 196 | vmw_write(dev_priv, SVGA_REG_DEV_CAP, i); |
| 136 | *bounce32++ = vmw_read(dev_priv, SVGA_REG_DEV_CAP); | 197 | *bounce32++ = vmw_read(dev_priv, SVGA_REG_DEV_CAP); |
| 137 | } | 198 | } |
| 138 | mutex_unlock(&dev_priv->hw_mutex); | 199 | mutex_unlock(&dev_priv->hw_mutex); |
| 139 | 200 | } else if (gb_objects) { | |
| 201 | ret = vmw_fill_compat_cap(dev_priv, bounce, size); | ||
| 202 | if (unlikely(ret != 0)) | ||
| 203 | goto out_err; | ||
| 140 | } else { | 204 | } else { |
| 141 | |||
| 142 | fifo_mem = dev_priv->mmio_virt; | 205 | fifo_mem = dev_priv->mmio_virt; |
| 143 | memcpy_fromio(bounce, &fifo_mem[SVGA_FIFO_3D_CAPS], size); | 206 | memcpy_fromio(bounce, &fifo_mem[SVGA_FIFO_3D_CAPS], size); |
| 144 | } | 207 | } |
| @@ -146,6 +209,7 @@ int vmw_get_cap_3d_ioctl(struct drm_device *dev, void *data, | |||
| 146 | ret = copy_to_user(buffer, bounce, size); | 209 | ret = copy_to_user(buffer, bounce, size); |
| 147 | if (ret) | 210 | if (ret) |
| 148 | ret = -EFAULT; | 211 | ret = -EFAULT; |
| 212 | out_err: | ||
| 149 | vfree(bounce); | 213 | vfree(bounce); |
| 150 | 214 | ||
| 151 | if (unlikely(ret != 0)) | 215 | if (unlikely(ret != 0)) |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c b/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c index 4910e7b81811..04a64b8cd3cd 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c | |||
| @@ -134,6 +134,7 @@ static int vmw_setup_otable_base(struct vmw_private *dev_priv, | |||
| 134 | cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd)); | 134 | cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd)); |
| 135 | if (unlikely(cmd == NULL)) { | 135 | if (unlikely(cmd == NULL)) { |
| 136 | DRM_ERROR("Failed reserving FIFO space for OTable setup.\n"); | 136 | DRM_ERROR("Failed reserving FIFO space for OTable setup.\n"); |
| 137 | ret = -ENOMEM; | ||
| 137 | goto out_no_fifo; | 138 | goto out_no_fifo; |
| 138 | } | 139 | } |
| 139 | 140 | ||
| @@ -187,18 +188,20 @@ static void vmw_takedown_otable_base(struct vmw_private *dev_priv, | |||
| 187 | 188 | ||
| 188 | bo = otable->page_table->pt_bo; | 189 | bo = otable->page_table->pt_bo; |
| 189 | cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd)); | 190 | cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd)); |
| 190 | if (unlikely(cmd == NULL)) | 191 | if (unlikely(cmd == NULL)) { |
| 191 | DRM_ERROR("Failed reserving FIFO space for OTable setup.\n"); | 192 | DRM_ERROR("Failed reserving FIFO space for OTable " |
| 192 | 193 | "takedown.\n"); | |
| 193 | memset(cmd, 0, sizeof(*cmd)); | 194 | } else { |
| 194 | cmd->header.id = SVGA_3D_CMD_SET_OTABLE_BASE; | 195 | memset(cmd, 0, sizeof(*cmd)); |
| 195 | cmd->header.size = sizeof(cmd->body); | 196 | cmd->header.id = SVGA_3D_CMD_SET_OTABLE_BASE; |
| 196 | cmd->body.type = type; | 197 | cmd->header.size = sizeof(cmd->body); |
| 197 | cmd->body.baseAddress = 0; | 198 | cmd->body.type = type; |
| 198 | cmd->body.sizeInBytes = 0; | 199 | cmd->body.baseAddress = 0; |
| 199 | cmd->body.validSizeInBytes = 0; | 200 | cmd->body.sizeInBytes = 0; |
| 200 | cmd->body.ptDepth = SVGA3D_MOBFMT_INVALID; | 201 | cmd->body.validSizeInBytes = 0; |
| 201 | vmw_fifo_commit(dev_priv, sizeof(*cmd)); | 202 | cmd->body.ptDepth = SVGA3D_MOBFMT_INVALID; |
| 203 | vmw_fifo_commit(dev_priv, sizeof(*cmd)); | ||
| 204 | } | ||
| 202 | 205 | ||
| 203 | if (bo) { | 206 | if (bo) { |
| 204 | int ret; | 207 | int ret; |
| @@ -561,11 +564,12 @@ void vmw_mob_unbind(struct vmw_private *dev_priv, | |||
| 561 | if (unlikely(cmd == NULL)) { | 564 | if (unlikely(cmd == NULL)) { |
| 562 | DRM_ERROR("Failed reserving FIFO space for Memory " | 565 | DRM_ERROR("Failed reserving FIFO space for Memory " |
| 563 | "Object unbinding.\n"); | 566 | "Object unbinding.\n"); |
| 567 | } else { | ||
| 568 | cmd->header.id = SVGA_3D_CMD_DESTROY_GB_MOB; | ||
| 569 | cmd->header.size = sizeof(cmd->body); | ||
| 570 | cmd->body.mobid = mob->id; | ||
| 571 | vmw_fifo_commit(dev_priv, sizeof(*cmd)); | ||
| 564 | } | 572 | } |
| 565 | cmd->header.id = SVGA_3D_CMD_DESTROY_GB_MOB; | ||
| 566 | cmd->header.size = sizeof(cmd->body); | ||
| 567 | cmd->body.mobid = mob->id; | ||
| 568 | vmw_fifo_commit(dev_priv, sizeof(*cmd)); | ||
| 569 | if (bo) { | 573 | if (bo) { |
| 570 | vmw_fence_single_bo(bo, NULL); | 574 | vmw_fence_single_bo(bo, NULL); |
| 571 | ttm_bo_unreserve(bo); | 575 | ttm_bo_unreserve(bo); |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c index 6fdd82d42f65..9757b57f8388 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c | |||
| @@ -88,6 +88,11 @@ struct vmw_resource *vmw_resource_reference(struct vmw_resource *res) | |||
| 88 | return res; | 88 | return res; |
| 89 | } | 89 | } |
| 90 | 90 | ||
| 91 | struct vmw_resource * | ||
| 92 | vmw_resource_reference_unless_doomed(struct vmw_resource *res) | ||
| 93 | { | ||
| 94 | return kref_get_unless_zero(&res->kref) ? res : NULL; | ||
| 95 | } | ||
| 91 | 96 | ||
| 92 | /** | 97 | /** |
| 93 | * vmw_resource_release_id - release a resource id to the id manager. | 98 | * vmw_resource_release_id - release a resource id to the id manager. |
| @@ -136,8 +141,12 @@ static void vmw_resource_release(struct kref *kref) | |||
| 136 | vmw_dmabuf_unreference(&res->backup); | 141 | vmw_dmabuf_unreference(&res->backup); |
| 137 | } | 142 | } |
| 138 | 143 | ||
| 139 | if (likely(res->hw_destroy != NULL)) | 144 | if (likely(res->hw_destroy != NULL)) { |
| 140 | res->hw_destroy(res); | 145 | res->hw_destroy(res); |
| 146 | mutex_lock(&dev_priv->binding_mutex); | ||
| 147 | vmw_context_binding_res_list_kill(&res->binding_head); | ||
| 148 | mutex_unlock(&dev_priv->binding_mutex); | ||
| 149 | } | ||
| 141 | 150 | ||
| 142 | id = res->id; | 151 | id = res->id; |
| 143 | if (res->res_free != NULL) | 152 | if (res->res_free != NULL) |
| @@ -418,8 +427,7 @@ int vmw_dmabuf_init(struct vmw_private *dev_priv, | |||
| 418 | INIT_LIST_HEAD(&vmw_bo->res_list); | 427 | INIT_LIST_HEAD(&vmw_bo->res_list); |
| 419 | 428 | ||
| 420 | ret = ttm_bo_init(bdev, &vmw_bo->base, size, | 429 | ret = ttm_bo_init(bdev, &vmw_bo->base, size, |
| 421 | (user) ? ttm_bo_type_device : | 430 | ttm_bo_type_device, placement, |
| 422 | ttm_bo_type_kernel, placement, | ||
| 423 | 0, interruptible, | 431 | 0, interruptible, |
| 424 | NULL, acc_size, NULL, bo_free); | 432 | NULL, acc_size, NULL, bo_free); |
| 425 | return ret; | 433 | return ret; |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c b/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c index 1457ec4b7125..ee3856578a12 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c | |||
| @@ -29,6 +29,8 @@ | |||
| 29 | #include "vmwgfx_resource_priv.h" | 29 | #include "vmwgfx_resource_priv.h" |
| 30 | #include "ttm/ttm_placement.h" | 30 | #include "ttm/ttm_placement.h" |
| 31 | 31 | ||
| 32 | #define VMW_COMPAT_SHADER_HT_ORDER 12 | ||
| 33 | |||
| 32 | struct vmw_shader { | 34 | struct vmw_shader { |
| 33 | struct vmw_resource res; | 35 | struct vmw_resource res; |
| 34 | SVGA3dShaderType type; | 36 | SVGA3dShaderType type; |
| @@ -40,6 +42,50 @@ struct vmw_user_shader { | |||
| 40 | struct vmw_shader shader; | 42 | struct vmw_shader shader; |
| 41 | }; | 43 | }; |
| 42 | 44 | ||
| 45 | /** | ||
| 46 | * enum vmw_compat_shader_state - Staging state for compat shaders | ||
| 47 | */ | ||
| 48 | enum vmw_compat_shader_state { | ||
| 49 | VMW_COMPAT_COMMITED, | ||
| 50 | VMW_COMPAT_ADD, | ||
| 51 | VMW_COMPAT_DEL | ||
| 52 | }; | ||
| 53 | |||
| 54 | /** | ||
| 55 | * struct vmw_compat_shader - Metadata for compat shaders. | ||
| 56 | * | ||
| 57 | * @handle: The TTM handle of the guest backed shader. | ||
| 58 | * @tfile: The struct ttm_object_file the guest backed shader is registered | ||
| 59 | * with. | ||
| 60 | * @hash: Hash item for lookup. | ||
| 61 | * @head: List head for staging lists or the compat shader manager list. | ||
| 62 | * @state: Staging state. | ||
| 63 | * | ||
| 64 | * The structure is protected by the cmdbuf lock. | ||
| 65 | */ | ||
| 66 | struct vmw_compat_shader { | ||
| 67 | u32 handle; | ||
| 68 | struct ttm_object_file *tfile; | ||
| 69 | struct drm_hash_item hash; | ||
| 70 | struct list_head head; | ||
| 71 | enum vmw_compat_shader_state state; | ||
| 72 | }; | ||
| 73 | |||
| 74 | /** | ||
| 75 | * struct vmw_compat_shader_manager - Compat shader manager. | ||
| 76 | * | ||
| 77 | * @shaders: Hash table containing staged and commited compat shaders | ||
| 78 | * @list: List of commited shaders. | ||
| 79 | * @dev_priv: Pointer to a device private structure. | ||
| 80 | * | ||
| 81 | * @shaders and @list are protected by the cmdbuf mutex for now. | ||
| 82 | */ | ||
| 83 | struct vmw_compat_shader_manager { | ||
| 84 | struct drm_open_hash shaders; | ||
| 85 | struct list_head list; | ||
| 86 | struct vmw_private *dev_priv; | ||
| 87 | }; | ||
| 88 | |||
| 43 | static void vmw_user_shader_free(struct vmw_resource *res); | 89 | static void vmw_user_shader_free(struct vmw_resource *res); |
| 44 | static struct vmw_resource * | 90 | static struct vmw_resource * |
| 45 | vmw_user_shader_base_to_res(struct ttm_base_object *base); | 91 | vmw_user_shader_base_to_res(struct ttm_base_object *base); |
| @@ -258,7 +304,7 @@ static int vmw_gb_shader_destroy(struct vmw_resource *res) | |||
| 258 | return 0; | 304 | return 0; |
| 259 | 305 | ||
| 260 | mutex_lock(&dev_priv->binding_mutex); | 306 | mutex_lock(&dev_priv->binding_mutex); |
| 261 | vmw_context_binding_res_list_kill(&res->binding_head); | 307 | vmw_context_binding_res_list_scrub(&res->binding_head); |
| 262 | 308 | ||
| 263 | cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd)); | 309 | cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd)); |
| 264 | if (unlikely(cmd == NULL)) { | 310 | if (unlikely(cmd == NULL)) { |
| @@ -325,13 +371,81 @@ int vmw_shader_destroy_ioctl(struct drm_device *dev, void *data, | |||
| 325 | TTM_REF_USAGE); | 371 | TTM_REF_USAGE); |
| 326 | } | 372 | } |
| 327 | 373 | ||
| 374 | static int vmw_shader_alloc(struct vmw_private *dev_priv, | ||
| 375 | struct vmw_dma_buffer *buffer, | ||
| 376 | size_t shader_size, | ||
| 377 | size_t offset, | ||
| 378 | SVGA3dShaderType shader_type, | ||
| 379 | struct ttm_object_file *tfile, | ||
| 380 | u32 *handle) | ||
| 381 | { | ||
| 382 | struct vmw_user_shader *ushader; | ||
| 383 | struct vmw_resource *res, *tmp; | ||
| 384 | int ret; | ||
| 385 | |||
| 386 | /* | ||
| 387 | * Approximate idr memory usage with 128 bytes. It will be limited | ||
| 388 | * by maximum number_of shaders anyway. | ||
| 389 | */ | ||
| 390 | if (unlikely(vmw_user_shader_size == 0)) | ||
| 391 | vmw_user_shader_size = | ||
| 392 | ttm_round_pot(sizeof(struct vmw_user_shader)) + 128; | ||
| 393 | |||
| 394 | ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv), | ||
| 395 | vmw_user_shader_size, | ||
| 396 | false, true); | ||
| 397 | if (unlikely(ret != 0)) { | ||
| 398 | if (ret != -ERESTARTSYS) | ||
| 399 | DRM_ERROR("Out of graphics memory for shader " | ||
| 400 | "creation.\n"); | ||
| 401 | goto out; | ||
| 402 | } | ||
| 403 | |||
| 404 | ushader = kzalloc(sizeof(*ushader), GFP_KERNEL); | ||
| 405 | if (unlikely(ushader == NULL)) { | ||
| 406 | ttm_mem_global_free(vmw_mem_glob(dev_priv), | ||
| 407 | vmw_user_shader_size); | ||
| 408 | ret = -ENOMEM; | ||
| 409 | goto out; | ||
| 410 | } | ||
| 411 | |||
| 412 | res = &ushader->shader.res; | ||
| 413 | ushader->base.shareable = false; | ||
| 414 | ushader->base.tfile = NULL; | ||
| 415 | |||
| 416 | /* | ||
| 417 | * From here on, the destructor takes over resource freeing. | ||
| 418 | */ | ||
| 419 | |||
| 420 | ret = vmw_gb_shader_init(dev_priv, res, shader_size, | ||
| 421 | offset, shader_type, buffer, | ||
| 422 | vmw_user_shader_free); | ||
| 423 | if (unlikely(ret != 0)) | ||
| 424 | goto out; | ||
| 425 | |||
| 426 | tmp = vmw_resource_reference(res); | ||
| 427 | ret = ttm_base_object_init(tfile, &ushader->base, false, | ||
| 428 | VMW_RES_SHADER, | ||
| 429 | &vmw_user_shader_base_release, NULL); | ||
| 430 | |||
| 431 | if (unlikely(ret != 0)) { | ||
| 432 | vmw_resource_unreference(&tmp); | ||
| 433 | goto out_err; | ||
| 434 | } | ||
| 435 | |||
| 436 | if (handle) | ||
| 437 | *handle = ushader->base.hash.key; | ||
| 438 | out_err: | ||
| 439 | vmw_resource_unreference(&res); | ||
| 440 | out: | ||
| 441 | return ret; | ||
| 442 | } | ||
| 443 | |||
| 444 | |||
| 328 | int vmw_shader_define_ioctl(struct drm_device *dev, void *data, | 445 | int vmw_shader_define_ioctl(struct drm_device *dev, void *data, |
| 329 | struct drm_file *file_priv) | 446 | struct drm_file *file_priv) |
| 330 | { | 447 | { |
| 331 | struct vmw_private *dev_priv = vmw_priv(dev); | 448 | struct vmw_private *dev_priv = vmw_priv(dev); |
| 332 | struct vmw_user_shader *ushader; | ||
| 333 | struct vmw_resource *res; | ||
| 334 | struct vmw_resource *tmp; | ||
| 335 | struct drm_vmw_shader_create_arg *arg = | 449 | struct drm_vmw_shader_create_arg *arg = |
| 336 | (struct drm_vmw_shader_create_arg *)data; | 450 | (struct drm_vmw_shader_create_arg *)data; |
| 337 | struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; | 451 | struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; |
| @@ -373,69 +487,326 @@ int vmw_shader_define_ioctl(struct drm_device *dev, void *data, | |||
| 373 | goto out_bad_arg; | 487 | goto out_bad_arg; |
| 374 | } | 488 | } |
| 375 | 489 | ||
| 376 | /* | 490 | ret = ttm_read_lock(&vmaster->lock, true); |
| 377 | * Approximate idr memory usage with 128 bytes. It will be limited | 491 | if (unlikely(ret != 0)) |
| 378 | * by maximum number_of shaders anyway. | 492 | goto out_bad_arg; |
| 379 | */ | ||
| 380 | 493 | ||
| 381 | if (unlikely(vmw_user_shader_size == 0)) | 494 | ret = vmw_shader_alloc(dev_priv, buffer, arg->size, arg->offset, |
| 382 | vmw_user_shader_size = ttm_round_pot(sizeof(*ushader)) | 495 | shader_type, tfile, &arg->shader_handle); |
| 383 | + 128; | ||
| 384 | 496 | ||
| 385 | ret = ttm_read_lock(&vmaster->lock, true); | 497 | ttm_read_unlock(&vmaster->lock); |
| 498 | out_bad_arg: | ||
| 499 | vmw_dmabuf_unreference(&buffer); | ||
| 500 | return ret; | ||
| 501 | } | ||
| 502 | |||
| 503 | /** | ||
| 504 | * vmw_compat_shader_lookup - Look up a compat shader | ||
| 505 | * | ||
| 506 | * @man: Pointer to the compat shader manager. | ||
| 507 | * @shader_type: The shader type, that combined with the user_key identifies | ||
| 508 | * the shader. | ||
| 509 | * @user_key: On entry, this should be a pointer to the user_key. | ||
| 510 | * On successful exit, it will contain the guest-backed shader's TTM handle. | ||
| 511 | * | ||
| 512 | * Returns 0 on success. Non-zero on failure, in which case the value pointed | ||
| 513 | * to by @user_key is unmodified. | ||
| 514 | */ | ||
| 515 | int vmw_compat_shader_lookup(struct vmw_compat_shader_manager *man, | ||
| 516 | SVGA3dShaderType shader_type, | ||
| 517 | u32 *user_key) | ||
| 518 | { | ||
| 519 | struct drm_hash_item *hash; | ||
| 520 | int ret; | ||
| 521 | unsigned long key = *user_key | (shader_type << 24); | ||
| 522 | |||
| 523 | ret = drm_ht_find_item(&man->shaders, key, &hash); | ||
| 386 | if (unlikely(ret != 0)) | 524 | if (unlikely(ret != 0)) |
| 387 | return ret; | 525 | return ret; |
| 388 | 526 | ||
| 389 | ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv), | 527 | *user_key = drm_hash_entry(hash, struct vmw_compat_shader, |
| 390 | vmw_user_shader_size, | 528 | hash)->handle; |
| 391 | false, true); | 529 | |
| 392 | if (unlikely(ret != 0)) { | 530 | return 0; |
| 393 | if (ret != -ERESTARTSYS) | 531 | } |
| 394 | DRM_ERROR("Out of graphics memory for shader" | 532 | |
| 395 | " creation.\n"); | 533 | /** |
| 396 | goto out_unlock; | 534 | * vmw_compat_shader_free - Free a compat shader. |
| 535 | * | ||
| 536 | * @man: Pointer to the compat shader manager. | ||
| 537 | * @entry: Pointer to a struct vmw_compat_shader. | ||
| 538 | * | ||
| 539 | * Frees a struct vmw_compat_shder entry and drops its reference to the | ||
| 540 | * guest backed shader. | ||
| 541 | */ | ||
| 542 | static void vmw_compat_shader_free(struct vmw_compat_shader_manager *man, | ||
| 543 | struct vmw_compat_shader *entry) | ||
| 544 | { | ||
| 545 | list_del(&entry->head); | ||
| 546 | WARN_ON(drm_ht_remove_item(&man->shaders, &entry->hash)); | ||
| 547 | WARN_ON(ttm_ref_object_base_unref(entry->tfile, entry->handle, | ||
| 548 | TTM_REF_USAGE)); | ||
| 549 | kfree(entry); | ||
| 550 | } | ||
| 551 | |||
| 552 | /** | ||
| 553 | * vmw_compat_shaders_commit - Commit a list of compat shader actions. | ||
| 554 | * | ||
| 555 | * @man: Pointer to the compat shader manager. | ||
| 556 | * @list: Caller's list of compat shader actions. | ||
| 557 | * | ||
| 558 | * This function commits a list of compat shader additions or removals. | ||
| 559 | * It is typically called when the execbuf ioctl call triggering these | ||
| 560 | * actions has commited the fifo contents to the device. | ||
| 561 | */ | ||
| 562 | void vmw_compat_shaders_commit(struct vmw_compat_shader_manager *man, | ||
| 563 | struct list_head *list) | ||
| 564 | { | ||
| 565 | struct vmw_compat_shader *entry, *next; | ||
| 566 | |||
| 567 | list_for_each_entry_safe(entry, next, list, head) { | ||
| 568 | list_del(&entry->head); | ||
| 569 | switch (entry->state) { | ||
| 570 | case VMW_COMPAT_ADD: | ||
| 571 | entry->state = VMW_COMPAT_COMMITED; | ||
| 572 | list_add_tail(&entry->head, &man->list); | ||
| 573 | break; | ||
| 574 | case VMW_COMPAT_DEL: | ||
| 575 | ttm_ref_object_base_unref(entry->tfile, entry->handle, | ||
| 576 | TTM_REF_USAGE); | ||
| 577 | kfree(entry); | ||
| 578 | break; | ||
| 579 | default: | ||
| 580 | BUG(); | ||
| 581 | break; | ||
| 582 | } | ||
| 397 | } | 583 | } |
| 584 | } | ||
| 398 | 585 | ||
| 399 | ushader = kzalloc(sizeof(*ushader), GFP_KERNEL); | 586 | /** |
| 400 | if (unlikely(ushader == NULL)) { | 587 | * vmw_compat_shaders_revert - Revert a list of compat shader actions |
| 401 | ttm_mem_global_free(vmw_mem_glob(dev_priv), | 588 | * |
| 402 | vmw_user_shader_size); | 589 | * @man: Pointer to the compat shader manager. |
| 403 | ret = -ENOMEM; | 590 | * @list: Caller's list of compat shader actions. |
| 404 | goto out_unlock; | 591 | * |
| 592 | * This function reverts a list of compat shader additions or removals. | ||
| 593 | * It is typically called when the execbuf ioctl call triggering these | ||
| 594 | * actions failed for some reason, and the command stream was never | ||
| 595 | * submitted. | ||
| 596 | */ | ||
| 597 | void vmw_compat_shaders_revert(struct vmw_compat_shader_manager *man, | ||
| 598 | struct list_head *list) | ||
| 599 | { | ||
| 600 | struct vmw_compat_shader *entry, *next; | ||
| 601 | int ret; | ||
| 602 | |||
| 603 | list_for_each_entry_safe(entry, next, list, head) { | ||
| 604 | switch (entry->state) { | ||
| 605 | case VMW_COMPAT_ADD: | ||
| 606 | vmw_compat_shader_free(man, entry); | ||
| 607 | break; | ||
| 608 | case VMW_COMPAT_DEL: | ||
| 609 | ret = drm_ht_insert_item(&man->shaders, &entry->hash); | ||
| 610 | list_del(&entry->head); | ||
| 611 | list_add_tail(&entry->head, &man->list); | ||
| 612 | entry->state = VMW_COMPAT_COMMITED; | ||
| 613 | break; | ||
| 614 | default: | ||
| 615 | BUG(); | ||
| 616 | break; | ||
| 617 | } | ||
| 405 | } | 618 | } |
| 619 | } | ||
| 406 | 620 | ||
| 407 | res = &ushader->shader.res; | 621 | /** |
| 408 | ushader->base.shareable = false; | 622 | * vmw_compat_shader_remove - Stage a compat shader for removal. |
| 409 | ushader->base.tfile = NULL; | 623 | * |
| 624 | * @man: Pointer to the compat shader manager | ||
| 625 | * @user_key: The key that is used to identify the shader. The key is | ||
| 626 | * unique to the shader type. | ||
| 627 | * @shader_type: Shader type. | ||
| 628 | * @list: Caller's list of staged shader actions. | ||
| 629 | * | ||
| 630 | * This function stages a compat shader for removal and removes the key from | ||
| 631 | * the shader manager's hash table. If the shader was previously only staged | ||
| 632 | * for addition it is completely removed (But the execbuf code may keep a | ||
| 633 | * reference if it was bound to a context between addition and removal). If | ||
| 634 | * it was previously commited to the manager, it is staged for removal. | ||
| 635 | */ | ||
| 636 | int vmw_compat_shader_remove(struct vmw_compat_shader_manager *man, | ||
| 637 | u32 user_key, SVGA3dShaderType shader_type, | ||
| 638 | struct list_head *list) | ||
| 639 | { | ||
| 640 | struct vmw_compat_shader *entry; | ||
| 641 | struct drm_hash_item *hash; | ||
| 642 | int ret; | ||
| 410 | 643 | ||
| 411 | /* | 644 | ret = drm_ht_find_item(&man->shaders, user_key | (shader_type << 24), |
| 412 | * From here on, the destructor takes over resource freeing. | 645 | &hash); |
| 413 | */ | 646 | if (likely(ret != 0)) |
| 647 | return -EINVAL; | ||
| 414 | 648 | ||
| 415 | ret = vmw_gb_shader_init(dev_priv, res, arg->size, | 649 | entry = drm_hash_entry(hash, struct vmw_compat_shader, hash); |
| 416 | arg->offset, shader_type, buffer, | 650 | |
| 417 | vmw_user_shader_free); | 651 | switch (entry->state) { |
| 652 | case VMW_COMPAT_ADD: | ||
| 653 | vmw_compat_shader_free(man, entry); | ||
| 654 | break; | ||
| 655 | case VMW_COMPAT_COMMITED: | ||
| 656 | (void) drm_ht_remove_item(&man->shaders, &entry->hash); | ||
| 657 | list_del(&entry->head); | ||
| 658 | entry->state = VMW_COMPAT_DEL; | ||
| 659 | list_add_tail(&entry->head, list); | ||
| 660 | break; | ||
| 661 | default: | ||
| 662 | BUG(); | ||
| 663 | break; | ||
| 664 | } | ||
| 665 | |||
| 666 | return 0; | ||
| 667 | } | ||
| 668 | |||
| 669 | /** | ||
| 670 | * vmw_compat_shader_add - Create a compat shader and add the | ||
| 671 | * key to the manager | ||
| 672 | * | ||
| 673 | * @man: Pointer to the compat shader manager | ||
| 674 | * @user_key: The key that is used to identify the shader. The key is | ||
| 675 | * unique to the shader type. | ||
| 676 | * @bytecode: Pointer to the bytecode of the shader. | ||
| 677 | * @shader_type: Shader type. | ||
| 678 | * @tfile: Pointer to a struct ttm_object_file that the guest-backed shader is | ||
| 679 | * to be created with. | ||
| 680 | * @list: Caller's list of staged shader actions. | ||
| 681 | * | ||
| 682 | * Note that only the key is added to the shader manager's hash table. | ||
| 683 | * The shader is not yet added to the shader manager's list of shaders. | ||
| 684 | */ | ||
| 685 | int vmw_compat_shader_add(struct vmw_compat_shader_manager *man, | ||
| 686 | u32 user_key, const void *bytecode, | ||
| 687 | SVGA3dShaderType shader_type, | ||
| 688 | size_t size, | ||
| 689 | struct ttm_object_file *tfile, | ||
| 690 | struct list_head *list) | ||
| 691 | { | ||
| 692 | struct vmw_dma_buffer *buf; | ||
| 693 | struct ttm_bo_kmap_obj map; | ||
| 694 | bool is_iomem; | ||
| 695 | struct vmw_compat_shader *compat; | ||
| 696 | u32 handle; | ||
| 697 | int ret; | ||
| 698 | |||
| 699 | if (user_key > ((1 << 24) - 1) || (unsigned) shader_type > 16) | ||
| 700 | return -EINVAL; | ||
| 701 | |||
| 702 | /* Allocate and pin a DMA buffer */ | ||
| 703 | buf = kzalloc(sizeof(*buf), GFP_KERNEL); | ||
| 704 | if (unlikely(buf == NULL)) | ||
| 705 | return -ENOMEM; | ||
| 706 | |||
| 707 | ret = vmw_dmabuf_init(man->dev_priv, buf, size, &vmw_sys_ne_placement, | ||
| 708 | true, vmw_dmabuf_bo_free); | ||
| 418 | if (unlikely(ret != 0)) | 709 | if (unlikely(ret != 0)) |
| 419 | goto out_unlock; | 710 | goto out; |
| 420 | 711 | ||
| 421 | tmp = vmw_resource_reference(res); | 712 | ret = ttm_bo_reserve(&buf->base, false, true, false, NULL); |
| 422 | ret = ttm_base_object_init(tfile, &ushader->base, false, | 713 | if (unlikely(ret != 0)) |
| 423 | VMW_RES_SHADER, | 714 | goto no_reserve; |
| 424 | &vmw_user_shader_base_release, NULL); | ||
| 425 | 715 | ||
| 716 | /* Map and copy shader bytecode. */ | ||
| 717 | ret = ttm_bo_kmap(&buf->base, 0, PAGE_ALIGN(size) >> PAGE_SHIFT, | ||
| 718 | &map); | ||
| 426 | if (unlikely(ret != 0)) { | 719 | if (unlikely(ret != 0)) { |
| 427 | vmw_resource_unreference(&tmp); | 720 | ttm_bo_unreserve(&buf->base); |
| 428 | goto out_err; | 721 | goto no_reserve; |
| 429 | } | 722 | } |
| 430 | 723 | ||
| 431 | arg->shader_handle = ushader->base.hash.key; | 724 | memcpy(ttm_kmap_obj_virtual(&map, &is_iomem), bytecode, size); |
| 432 | out_err: | 725 | WARN_ON(is_iomem); |
| 433 | vmw_resource_unreference(&res); | 726 | |
| 434 | out_unlock: | 727 | ttm_bo_kunmap(&map); |
| 435 | ttm_read_unlock(&vmaster->lock); | 728 | ret = ttm_bo_validate(&buf->base, &vmw_sys_placement, false, true); |
| 436 | out_bad_arg: | 729 | WARN_ON(ret != 0); |
| 437 | vmw_dmabuf_unreference(&buffer); | 730 | ttm_bo_unreserve(&buf->base); |
| 731 | |||
| 732 | /* Create a guest-backed shader container backed by the dma buffer */ | ||
| 733 | ret = vmw_shader_alloc(man->dev_priv, buf, size, 0, shader_type, | ||
| 734 | tfile, &handle); | ||
| 735 | vmw_dmabuf_unreference(&buf); | ||
| 736 | if (unlikely(ret != 0)) | ||
| 737 | goto no_reserve; | ||
| 738 | /* | ||
| 739 | * Create a compat shader structure and stage it for insertion | ||
| 740 | * in the manager | ||
| 741 | */ | ||
| 742 | compat = kzalloc(sizeof(*compat), GFP_KERNEL); | ||
| 743 | if (compat == NULL) | ||
| 744 | goto no_compat; | ||
| 745 | |||
| 746 | compat->hash.key = user_key | (shader_type << 24); | ||
| 747 | ret = drm_ht_insert_item(&man->shaders, &compat->hash); | ||
| 748 | if (unlikely(ret != 0)) | ||
| 749 | goto out_invalid_key; | ||
| 750 | |||
| 751 | compat->state = VMW_COMPAT_ADD; | ||
| 752 | compat->handle = handle; | ||
| 753 | compat->tfile = tfile; | ||
| 754 | list_add_tail(&compat->head, list); | ||
| 438 | 755 | ||
| 756 | return 0; | ||
| 757 | |||
| 758 | out_invalid_key: | ||
| 759 | kfree(compat); | ||
| 760 | no_compat: | ||
| 761 | ttm_ref_object_base_unref(tfile, handle, TTM_REF_USAGE); | ||
| 762 | no_reserve: | ||
| 763 | out: | ||
| 439 | return ret; | 764 | return ret; |
| 765 | } | ||
| 766 | |||
| 767 | /** | ||
| 768 | * vmw_compat_shader_man_create - Create a compat shader manager | ||
| 769 | * | ||
| 770 | * @dev_priv: Pointer to a device private structure. | ||
| 771 | * | ||
| 772 | * Typically done at file open time. If successful returns a pointer to a | ||
| 773 | * compat shader manager. Otherwise returns an error pointer. | ||
| 774 | */ | ||
| 775 | struct vmw_compat_shader_manager * | ||
| 776 | vmw_compat_shader_man_create(struct vmw_private *dev_priv) | ||
| 777 | { | ||
| 778 | struct vmw_compat_shader_manager *man; | ||
| 779 | int ret; | ||
| 780 | |||
| 781 | man = kzalloc(sizeof(*man), GFP_KERNEL); | ||
| 782 | if (man == NULL) | ||
| 783 | return ERR_PTR(-ENOMEM); | ||
| 784 | |||
| 785 | man->dev_priv = dev_priv; | ||
| 786 | INIT_LIST_HEAD(&man->list); | ||
| 787 | ret = drm_ht_create(&man->shaders, VMW_COMPAT_SHADER_HT_ORDER); | ||
| 788 | if (ret == 0) | ||
| 789 | return man; | ||
| 790 | |||
| 791 | kfree(man); | ||
| 792 | return ERR_PTR(ret); | ||
| 793 | } | ||
| 794 | |||
| 795 | /** | ||
| 796 | * vmw_compat_shader_man_destroy - Destroy a compat shader manager | ||
| 797 | * | ||
| 798 | * @man: Pointer to the shader manager to destroy. | ||
| 799 | * | ||
| 800 | * Typically done at file close time. | ||
| 801 | */ | ||
| 802 | void vmw_compat_shader_man_destroy(struct vmw_compat_shader_manager *man) | ||
| 803 | { | ||
| 804 | struct vmw_compat_shader *entry, *next; | ||
| 805 | |||
| 806 | mutex_lock(&man->dev_priv->cmdbuf_mutex); | ||
| 807 | list_for_each_entry_safe(entry, next, &man->list, head) | ||
| 808 | vmw_compat_shader_free(man, entry); | ||
| 440 | 809 | ||
| 810 | mutex_unlock(&man->dev_priv->cmdbuf_mutex); | ||
| 811 | kfree(man); | ||
| 441 | } | 812 | } |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c index 979da1c246a5..e7af580ab977 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c | |||
| @@ -830,6 +830,24 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data, | |||
| 830 | if (unlikely(ret != 0)) | 830 | if (unlikely(ret != 0)) |
| 831 | goto out_unlock; | 831 | goto out_unlock; |
| 832 | 832 | ||
| 833 | /* | ||
| 834 | * A gb-aware client referencing a shared surface will | ||
| 835 | * expect a backup buffer to be present. | ||
| 836 | */ | ||
| 837 | if (dev_priv->has_mob && req->shareable) { | ||
| 838 | uint32_t backup_handle; | ||
| 839 | |||
| 840 | ret = vmw_user_dmabuf_alloc(dev_priv, tfile, | ||
| 841 | res->backup_size, | ||
| 842 | true, | ||
| 843 | &backup_handle, | ||
| 844 | &res->backup); | ||
| 845 | if (unlikely(ret != 0)) { | ||
| 846 | vmw_resource_unreference(&res); | ||
| 847 | goto out_unlock; | ||
| 848 | } | ||
| 849 | } | ||
| 850 | |||
| 833 | tmp = vmw_resource_reference(&srf->res); | 851 | tmp = vmw_resource_reference(&srf->res); |
| 834 | ret = ttm_prime_object_init(tfile, res->backup_size, &user_srf->prime, | 852 | ret = ttm_prime_object_init(tfile, res->backup_size, &user_srf->prime, |
| 835 | req->shareable, VMW_RES_SURFACE, | 853 | req->shareable, VMW_RES_SURFACE, |
| @@ -908,8 +926,8 @@ int vmw_surface_reference_ioctl(struct drm_device *dev, void *data, | |||
| 908 | rep->size_addr; | 926 | rep->size_addr; |
| 909 | 927 | ||
| 910 | if (user_sizes) | 928 | if (user_sizes) |
| 911 | ret = copy_to_user(user_sizes, srf->sizes, | 929 | ret = copy_to_user(user_sizes, &srf->base_size, |
| 912 | srf->num_sizes * sizeof(*srf->sizes)); | 930 | sizeof(srf->base_size)); |
| 913 | if (unlikely(ret != 0)) { | 931 | if (unlikely(ret != 0)) { |
| 914 | DRM_ERROR("copy_to_user failed %p %u\n", | 932 | DRM_ERROR("copy_to_user failed %p %u\n", |
| 915 | user_sizes, srf->num_sizes); | 933 | user_sizes, srf->num_sizes); |
| @@ -1111,7 +1129,7 @@ static int vmw_gb_surface_destroy(struct vmw_resource *res) | |||
| 1111 | return 0; | 1129 | return 0; |
| 1112 | 1130 | ||
| 1113 | mutex_lock(&dev_priv->binding_mutex); | 1131 | mutex_lock(&dev_priv->binding_mutex); |
| 1114 | vmw_context_binding_res_list_kill(&res->binding_head); | 1132 | vmw_context_binding_res_list_scrub(&res->binding_head); |
| 1115 | 1133 | ||
| 1116 | cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd)); | 1134 | cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd)); |
| 1117 | if (unlikely(cmd == NULL)) { | 1135 | if (unlikely(cmd == NULL)) { |
