diff options
author | Laurent Pinchart <laurent.pinchart@ideasonboard.com> | 2017-05-08 18:27:11 -0400 |
---|---|---|
committer | Tomi Valkeinen <tomi.valkeinen@ti.com> | 2017-06-02 03:53:43 -0400 |
commit | d6f544f6bf419a5bc65f7f2d070db008d1119097 (patch) | |
tree | 634d4fea720707b99c5cb71153f7ac967db2f0e4 /drivers/gpu/drm/omapdrm/omap_gem.c | |
parent | a9e6f9f7d603ef769b4cd33e0a2b61ba48a1058e (diff) |
drm: omapdrm: Remove legacy buffer synchronization support
The omapdrm driver uses a custom API to synchronize with the SGX GPU.
This is unusable as such in the mainline kernel as the API is only
partially implemented and requires additional out-of-tree patches.
Furthermore, as no SGX driver is available in the mainline kernel, the
API can't be considered as a stable mainline API.
Now that the driver supports synchronization through fences, remove
legacy buffer synchronization support. The two userspace ioctls are
turned into no-ops to avoid breaking userspace and will be removed in
the future.
Signed-off-by: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
Reviewed-by: Tomi Valkeinen <tomi.valkeinen@ti.com>
Signed-off-by: Tomi Valkeinen <tomi.valkeinen@ti.com>
Diffstat (limited to 'drivers/gpu/drm/omapdrm/omap_gem.c')
-rw-r--r-- | drivers/gpu/drm/omapdrm/omap_gem.c | 214 |
1 files changed, 0 insertions, 214 deletions
diff --git a/drivers/gpu/drm/omapdrm/omap_gem.c b/drivers/gpu/drm/omapdrm/omap_gem.c index 68a75b829b71..4bb52a5f5939 100644 --- a/drivers/gpu/drm/omapdrm/omap_gem.c +++ b/drivers/gpu/drm/omapdrm/omap_gem.c | |||
@@ -101,19 +101,6 @@ struct omap_gem_object { | |||
101 | * Virtual address, if mapped. | 101 | * Virtual address, if mapped. |
102 | */ | 102 | */ |
103 | void *vaddr; | 103 | void *vaddr; |
104 | |||
105 | /** | ||
106 | * sync-object allocated on demand (if needed) | ||
107 | * | ||
108 | * Per-buffer sync-object for tracking pending and completed hw/dma | ||
109 | * read and write operations. | ||
110 | */ | ||
111 | struct { | ||
112 | uint32_t write_pending; | ||
113 | uint32_t write_complete; | ||
114 | uint32_t read_pending; | ||
115 | uint32_t read_complete; | ||
116 | } *sync; | ||
117 | }; | 104 | }; |
118 | 105 | ||
119 | #define to_omap_bo(x) container_of(x, struct omap_gem_object, base) | 106 | #define to_omap_bo(x) container_of(x, struct omap_gem_object, base) |
@@ -1071,205 +1058,6 @@ void omap_gem_describe_objects(struct list_head *list, struct seq_file *m) | |||
1071 | #endif | 1058 | #endif |
1072 | 1059 | ||
1073 | /* ----------------------------------------------------------------------------- | 1060 | /* ----------------------------------------------------------------------------- |
1074 | * Buffer Synchronization | ||
1075 | */ | ||
1076 | |||
1077 | static DEFINE_SPINLOCK(sync_lock); | ||
1078 | |||
1079 | struct omap_gem_sync_waiter { | ||
1080 | struct list_head list; | ||
1081 | struct omap_gem_object *omap_obj; | ||
1082 | enum omap_gem_op op; | ||
1083 | uint32_t read_target, write_target; | ||
1084 | /* notify called w/ sync_lock held */ | ||
1085 | void (*notify)(void *arg); | ||
1086 | void *arg; | ||
1087 | }; | ||
1088 | |||
1089 | /* list of omap_gem_sync_waiter.. the notify fxn gets called back when | ||
1090 | * the read and/or write target count is achieved which can call a user | ||
1091 | * callback (ex. to kick 3d and/or 2d), wakeup blocked task (prep for | ||
1092 | * cpu access), etc. | ||
1093 | */ | ||
1094 | static LIST_HEAD(waiters); | ||
1095 | |||
1096 | static inline bool is_waiting(struct omap_gem_sync_waiter *waiter) | ||
1097 | { | ||
1098 | struct omap_gem_object *omap_obj = waiter->omap_obj; | ||
1099 | if ((waiter->op & OMAP_GEM_READ) && | ||
1100 | (omap_obj->sync->write_complete < waiter->write_target)) | ||
1101 | return true; | ||
1102 | if ((waiter->op & OMAP_GEM_WRITE) && | ||
1103 | (omap_obj->sync->read_complete < waiter->read_target)) | ||
1104 | return true; | ||
1105 | return false; | ||
1106 | } | ||
1107 | |||
1108 | /* macro for sync debug.. */ | ||
1109 | #define SYNCDBG 0 | ||
1110 | #define SYNC(fmt, ...) do { if (SYNCDBG) \ | ||
1111 | pr_err("%s:%d: " fmt "\n", __func__, __LINE__, ##__VA_ARGS__); \ | ||
1112 | } while (0) | ||
1113 | |||
1114 | |||
1115 | static void sync_op_update(void) | ||
1116 | { | ||
1117 | struct omap_gem_sync_waiter *waiter, *n; | ||
1118 | list_for_each_entry_safe(waiter, n, &waiters, list) { | ||
1119 | if (!is_waiting(waiter)) { | ||
1120 | list_del(&waiter->list); | ||
1121 | SYNC("notify: %p", waiter); | ||
1122 | waiter->notify(waiter->arg); | ||
1123 | kfree(waiter); | ||
1124 | } | ||
1125 | } | ||
1126 | } | ||
1127 | |||
1128 | static inline int sync_op(struct drm_gem_object *obj, | ||
1129 | enum omap_gem_op op, bool start) | ||
1130 | { | ||
1131 | struct omap_gem_object *omap_obj = to_omap_bo(obj); | ||
1132 | int ret = 0; | ||
1133 | |||
1134 | spin_lock(&sync_lock); | ||
1135 | |||
1136 | if (!omap_obj->sync) { | ||
1137 | omap_obj->sync = kzalloc(sizeof(*omap_obj->sync), GFP_ATOMIC); | ||
1138 | if (!omap_obj->sync) { | ||
1139 | ret = -ENOMEM; | ||
1140 | goto unlock; | ||
1141 | } | ||
1142 | } | ||
1143 | |||
1144 | if (start) { | ||
1145 | if (op & OMAP_GEM_READ) | ||
1146 | omap_obj->sync->read_pending++; | ||
1147 | if (op & OMAP_GEM_WRITE) | ||
1148 | omap_obj->sync->write_pending++; | ||
1149 | } else { | ||
1150 | if (op & OMAP_GEM_READ) | ||
1151 | omap_obj->sync->read_complete++; | ||
1152 | if (op & OMAP_GEM_WRITE) | ||
1153 | omap_obj->sync->write_complete++; | ||
1154 | sync_op_update(); | ||
1155 | } | ||
1156 | |||
1157 | unlock: | ||
1158 | spin_unlock(&sync_lock); | ||
1159 | |||
1160 | return ret; | ||
1161 | } | ||
1162 | |||
1163 | /* mark the start of read and/or write operation */ | ||
1164 | int omap_gem_op_start(struct drm_gem_object *obj, enum omap_gem_op op) | ||
1165 | { | ||
1166 | return sync_op(obj, op, true); | ||
1167 | } | ||
1168 | |||
1169 | int omap_gem_op_finish(struct drm_gem_object *obj, enum omap_gem_op op) | ||
1170 | { | ||
1171 | return sync_op(obj, op, false); | ||
1172 | } | ||
1173 | |||
1174 | static DECLARE_WAIT_QUEUE_HEAD(sync_event); | ||
1175 | |||
1176 | static void sync_notify(void *arg) | ||
1177 | { | ||
1178 | struct task_struct **waiter_task = arg; | ||
1179 | *waiter_task = NULL; | ||
1180 | wake_up_all(&sync_event); | ||
1181 | } | ||
1182 | |||
1183 | int omap_gem_op_sync(struct drm_gem_object *obj, enum omap_gem_op op) | ||
1184 | { | ||
1185 | struct omap_gem_object *omap_obj = to_omap_bo(obj); | ||
1186 | int ret = 0; | ||
1187 | if (omap_obj->sync) { | ||
1188 | struct task_struct *waiter_task = current; | ||
1189 | struct omap_gem_sync_waiter *waiter = | ||
1190 | kzalloc(sizeof(*waiter), GFP_KERNEL); | ||
1191 | |||
1192 | if (!waiter) | ||
1193 | return -ENOMEM; | ||
1194 | |||
1195 | waiter->omap_obj = omap_obj; | ||
1196 | waiter->op = op; | ||
1197 | waiter->read_target = omap_obj->sync->read_pending; | ||
1198 | waiter->write_target = omap_obj->sync->write_pending; | ||
1199 | waiter->notify = sync_notify; | ||
1200 | waiter->arg = &waiter_task; | ||
1201 | |||
1202 | spin_lock(&sync_lock); | ||
1203 | if (is_waiting(waiter)) { | ||
1204 | SYNC("waited: %p", waiter); | ||
1205 | list_add_tail(&waiter->list, &waiters); | ||
1206 | spin_unlock(&sync_lock); | ||
1207 | ret = wait_event_interruptible(sync_event, | ||
1208 | (waiter_task == NULL)); | ||
1209 | spin_lock(&sync_lock); | ||
1210 | if (waiter_task) { | ||
1211 | SYNC("interrupted: %p", waiter); | ||
1212 | /* we were interrupted */ | ||
1213 | list_del(&waiter->list); | ||
1214 | waiter_task = NULL; | ||
1215 | } else { | ||
1216 | /* freed in sync_op_update() */ | ||
1217 | waiter = NULL; | ||
1218 | } | ||
1219 | } | ||
1220 | spin_unlock(&sync_lock); | ||
1221 | kfree(waiter); | ||
1222 | } | ||
1223 | return ret; | ||
1224 | } | ||
1225 | |||
1226 | /* call fxn(arg), either synchronously or asynchronously if the op | ||
1227 | * is currently blocked.. fxn() can be called from any context | ||
1228 | * | ||
1229 | * (TODO for now fxn is called back from whichever context calls | ||
1230 | * omap_gem_op_finish().. but this could be better defined later | ||
1231 | * if needed) | ||
1232 | * | ||
1233 | * TODO more code in common w/ _sync().. | ||
1234 | */ | ||
1235 | int omap_gem_op_async(struct drm_gem_object *obj, enum omap_gem_op op, | ||
1236 | void (*fxn)(void *arg), void *arg) | ||
1237 | { | ||
1238 | struct omap_gem_object *omap_obj = to_omap_bo(obj); | ||
1239 | if (omap_obj->sync) { | ||
1240 | struct omap_gem_sync_waiter *waiter = | ||
1241 | kzalloc(sizeof(*waiter), GFP_ATOMIC); | ||
1242 | |||
1243 | if (!waiter) | ||
1244 | return -ENOMEM; | ||
1245 | |||
1246 | waiter->omap_obj = omap_obj; | ||
1247 | waiter->op = op; | ||
1248 | waiter->read_target = omap_obj->sync->read_pending; | ||
1249 | waiter->write_target = omap_obj->sync->write_pending; | ||
1250 | waiter->notify = fxn; | ||
1251 | waiter->arg = arg; | ||
1252 | |||
1253 | spin_lock(&sync_lock); | ||
1254 | if (is_waiting(waiter)) { | ||
1255 | SYNC("waited: %p", waiter); | ||
1256 | list_add_tail(&waiter->list, &waiters); | ||
1257 | spin_unlock(&sync_lock); | ||
1258 | return 0; | ||
1259 | } | ||
1260 | |||
1261 | spin_unlock(&sync_lock); | ||
1262 | |||
1263 | kfree(waiter); | ||
1264 | } | ||
1265 | |||
1266 | /* no waiting.. */ | ||
1267 | fxn(arg); | ||
1268 | |||
1269 | return 0; | ||
1270 | } | ||
1271 | |||
1272 | /* ----------------------------------------------------------------------------- | ||
1273 | * Constructor & Destructor | 1061 | * Constructor & Destructor |
1274 | */ | 1062 | */ |
1275 | 1063 | ||
@@ -1308,8 +1096,6 @@ void omap_gem_free_object(struct drm_gem_object *obj) | |||
1308 | drm_prime_gem_destroy(obj, omap_obj->sgt); | 1096 | drm_prime_gem_destroy(obj, omap_obj->sgt); |
1309 | } | 1097 | } |
1310 | 1098 | ||
1311 | kfree(omap_obj->sync); | ||
1312 | |||
1313 | drm_gem_object_release(obj); | 1099 | drm_gem_object_release(obj); |
1314 | 1100 | ||
1315 | kfree(omap_obj); | 1101 | kfree(omap_obj); |