diff options
Diffstat (limited to 'drivers')
35 files changed, 2050 insertions, 774 deletions
diff --git a/drivers/Makefile b/drivers/Makefile index 54bfae1f09a4..ebee55537a05 100644 --- a/drivers/Makefile +++ b/drivers/Makefile | |||
@@ -61,6 +61,7 @@ obj-$(CONFIG_FB_INTEL) += video/fbdev/intelfb/ | |||
61 | 61 | ||
62 | obj-$(CONFIG_PARPORT) += parport/ | 62 | obj-$(CONFIG_PARPORT) += parport/ |
63 | obj-y += base/ block/ misc/ mfd/ nfc/ | 63 | obj-y += base/ block/ misc/ mfd/ nfc/ |
64 | obj-$(CONFIG_DMA_SHARED_BUFFER) += dma-buf/ | ||
64 | obj-$(CONFIG_NUBUS) += nubus/ | 65 | obj-$(CONFIG_NUBUS) += nubus/ |
65 | obj-y += macintosh/ | 66 | obj-y += macintosh/ |
66 | obj-$(CONFIG_IDE) += ide/ | 67 | obj-$(CONFIG_IDE) += ide/ |
diff --git a/drivers/base/Kconfig b/drivers/base/Kconfig index 23b8726962af..88500fed3c7a 100644 --- a/drivers/base/Kconfig +++ b/drivers/base/Kconfig | |||
@@ -149,15 +149,21 @@ config EXTRA_FIRMWARE_DIR | |||
149 | some other directory containing the firmware files. | 149 | some other directory containing the firmware files. |
150 | 150 | ||
151 | config FW_LOADER_USER_HELPER | 151 | config FW_LOADER_USER_HELPER |
152 | bool | ||
153 | |||
154 | config FW_LOADER_USER_HELPER_FALLBACK | ||
152 | bool "Fallback user-helper invocation for firmware loading" | 155 | bool "Fallback user-helper invocation for firmware loading" |
153 | depends on FW_LOADER | 156 | depends on FW_LOADER |
154 | default y | 157 | select FW_LOADER_USER_HELPER |
155 | help | 158 | help |
156 | This option enables / disables the invocation of user-helper | 159 | This option enables / disables the invocation of user-helper |
157 | (e.g. udev) for loading firmware files as a fallback after the | 160 | (e.g. udev) for loading firmware files as a fallback after the |
158 | direct file loading in kernel fails. The user-mode helper is | 161 | direct file loading in kernel fails. The user-mode helper is |
159 | no longer required unless you have a special firmware file that | 162 | no longer required unless you have a special firmware file that |
160 | resides in a non-standard path. | 163 | resides in a non-standard path. Moreover, the udev support has |
164 | been deprecated upstream. | ||
165 | |||
166 | If you are unsure about this, say N here. | ||
161 | 167 | ||
162 | config DEBUG_DRIVER | 168 | config DEBUG_DRIVER |
163 | bool "Driver Core verbose debug messages" | 169 | bool "Driver Core verbose debug messages" |
@@ -208,6 +214,15 @@ config DMA_SHARED_BUFFER | |||
208 | APIs extension; the file's descriptor can then be passed on to other | 214 | APIs extension; the file's descriptor can then be passed on to other |
209 | driver. | 215 | driver. |
210 | 216 | ||
217 | config FENCE_TRACE | ||
218 | bool "Enable verbose FENCE_TRACE messages" | ||
219 | depends on DMA_SHARED_BUFFER | ||
220 | help | ||
221 | Enable the FENCE_TRACE printks. This will add extra | ||
222 | spam to the console log, but will make it easier to diagnose | ||
223 | lockup related problems for dma-buffers shared across multiple | ||
224 | devices. | ||
225 | |||
211 | config DMA_CMA | 226 | config DMA_CMA |
212 | bool "DMA Contiguous Memory Allocator" | 227 | bool "DMA Contiguous Memory Allocator" |
213 | depends on HAVE_DMA_CONTIGUOUS && CMA | 228 | depends on HAVE_DMA_CONTIGUOUS && CMA |
diff --git a/drivers/base/Makefile b/drivers/base/Makefile index 04b314e0fa51..4aab26ec0292 100644 --- a/drivers/base/Makefile +++ b/drivers/base/Makefile | |||
@@ -10,7 +10,6 @@ obj-$(CONFIG_DMA_CMA) += dma-contiguous.o | |||
10 | obj-y += power/ | 10 | obj-y += power/ |
11 | obj-$(CONFIG_HAS_DMA) += dma-mapping.o | 11 | obj-$(CONFIG_HAS_DMA) += dma-mapping.o |
12 | obj-$(CONFIG_HAVE_GENERIC_DMA_COHERENT) += dma-coherent.o | 12 | obj-$(CONFIG_HAVE_GENERIC_DMA_COHERENT) += dma-coherent.o |
13 | obj-$(CONFIG_DMA_SHARED_BUFFER) += dma-buf.o reservation.o | ||
14 | obj-$(CONFIG_ISA) += isa.o | 13 | obj-$(CONFIG_ISA) += isa.o |
15 | obj-$(CONFIG_FW_LOADER) += firmware_class.o | 14 | obj-$(CONFIG_FW_LOADER) += firmware_class.o |
16 | obj-$(CONFIG_NUMA) += node.o | 15 | obj-$(CONFIG_NUMA) += node.o |
diff --git a/drivers/base/component.c b/drivers/base/component.c index c4778995cd72..f748430bb654 100644 --- a/drivers/base/component.c +++ b/drivers/base/component.c | |||
@@ -18,6 +18,15 @@ | |||
18 | #include <linux/mutex.h> | 18 | #include <linux/mutex.h> |
19 | #include <linux/slab.h> | 19 | #include <linux/slab.h> |
20 | 20 | ||
21 | struct component_match { | ||
22 | size_t alloc; | ||
23 | size_t num; | ||
24 | struct { | ||
25 | void *data; | ||
26 | int (*fn)(struct device *, void *); | ||
27 | } compare[0]; | ||
28 | }; | ||
29 | |||
21 | struct master { | 30 | struct master { |
22 | struct list_head node; | 31 | struct list_head node; |
23 | struct list_head components; | 32 | struct list_head components; |
@@ -25,6 +34,7 @@ struct master { | |||
25 | 34 | ||
26 | const struct component_master_ops *ops; | 35 | const struct component_master_ops *ops; |
27 | struct device *dev; | 36 | struct device *dev; |
37 | struct component_match *match; | ||
28 | }; | 38 | }; |
29 | 39 | ||
30 | struct component { | 40 | struct component { |
@@ -69,6 +79,11 @@ static void component_detach_master(struct master *master, struct component *c) | |||
69 | c->master = NULL; | 79 | c->master = NULL; |
70 | } | 80 | } |
71 | 81 | ||
82 | /* | ||
83 | * Add a component to a master, finding the component via the compare | ||
84 | * function and compare data. This is safe to call for duplicate matches | ||
85 | * and will not result in the same component being added multiple times. | ||
86 | */ | ||
72 | int component_master_add_child(struct master *master, | 87 | int component_master_add_child(struct master *master, |
73 | int (*compare)(struct device *, void *), void *compare_data) | 88 | int (*compare)(struct device *, void *), void *compare_data) |
74 | { | 89 | { |
@@ -76,11 +91,12 @@ int component_master_add_child(struct master *master, | |||
76 | int ret = -ENXIO; | 91 | int ret = -ENXIO; |
77 | 92 | ||
78 | list_for_each_entry(c, &component_list, node) { | 93 | list_for_each_entry(c, &component_list, node) { |
79 | if (c->master) | 94 | if (c->master && c->master != master) |
80 | continue; | 95 | continue; |
81 | 96 | ||
82 | if (compare(c->dev, compare_data)) { | 97 | if (compare(c->dev, compare_data)) { |
83 | component_attach_master(master, c); | 98 | if (!c->master) |
99 | component_attach_master(master, c); | ||
84 | ret = 0; | 100 | ret = 0; |
85 | break; | 101 | break; |
86 | } | 102 | } |
@@ -90,6 +106,34 @@ int component_master_add_child(struct master *master, | |||
90 | } | 106 | } |
91 | EXPORT_SYMBOL_GPL(component_master_add_child); | 107 | EXPORT_SYMBOL_GPL(component_master_add_child); |
92 | 108 | ||
109 | static int find_components(struct master *master) | ||
110 | { | ||
111 | struct component_match *match = master->match; | ||
112 | size_t i; | ||
113 | int ret = 0; | ||
114 | |||
115 | if (!match) { | ||
116 | /* | ||
117 | * Search the list of components, looking for components that | ||
118 | * belong to this master, and attach them to the master. | ||
119 | */ | ||
120 | return master->ops->add_components(master->dev, master); | ||
121 | } | ||
122 | |||
123 | /* | ||
124 | * Scan the array of match functions and attach | ||
125 | * any components which are found to this master. | ||
126 | */ | ||
127 | for (i = 0; i < match->num; i++) { | ||
128 | ret = component_master_add_child(master, | ||
129 | match->compare[i].fn, | ||
130 | match->compare[i].data); | ||
131 | if (ret) | ||
132 | break; | ||
133 | } | ||
134 | return ret; | ||
135 | } | ||
136 | |||
93 | /* Detach all attached components from this master */ | 137 | /* Detach all attached components from this master */ |
94 | static void master_remove_components(struct master *master) | 138 | static void master_remove_components(struct master *master) |
95 | { | 139 | { |
@@ -113,44 +157,44 @@ static void master_remove_components(struct master *master) | |||
113 | static int try_to_bring_up_master(struct master *master, | 157 | static int try_to_bring_up_master(struct master *master, |
114 | struct component *component) | 158 | struct component *component) |
115 | { | 159 | { |
116 | int ret = 0; | 160 | int ret; |
117 | 161 | ||
118 | if (!master->bound) { | 162 | if (master->bound) |
119 | /* | 163 | return 0; |
120 | * Search the list of components, looking for components that | ||
121 | * belong to this master, and attach them to the master. | ||
122 | */ | ||
123 | if (master->ops->add_components(master->dev, master)) { | ||
124 | /* Failed to find all components */ | ||
125 | master_remove_components(master); | ||
126 | ret = 0; | ||
127 | goto out; | ||
128 | } | ||
129 | 164 | ||
130 | if (component && component->master != master) { | 165 | /* |
131 | master_remove_components(master); | 166 | * Search the list of components, looking for components that |
132 | ret = 0; | 167 | * belong to this master, and attach them to the master. |
133 | goto out; | 168 | */ |
134 | } | 169 | if (find_components(master)) { |
170 | /* Failed to find all components */ | ||
171 | ret = 0; | ||
172 | goto out; | ||
173 | } | ||
135 | 174 | ||
136 | if (!devres_open_group(master->dev, NULL, GFP_KERNEL)) { | 175 | if (component && component->master != master) { |
137 | ret = -ENOMEM; | 176 | ret = 0; |
138 | goto out; | 177 | goto out; |
139 | } | 178 | } |
140 | 179 | ||
141 | /* Found all components */ | 180 | if (!devres_open_group(master->dev, NULL, GFP_KERNEL)) { |
142 | ret = master->ops->bind(master->dev); | 181 | ret = -ENOMEM; |
143 | if (ret < 0) { | 182 | goto out; |
144 | devres_release_group(master->dev, NULL); | 183 | } |
145 | dev_info(master->dev, "master bind failed: %d\n", ret); | ||
146 | master_remove_components(master); | ||
147 | goto out; | ||
148 | } | ||
149 | 184 | ||
150 | master->bound = true; | 185 | /* Found all components */ |
151 | ret = 1; | 186 | ret = master->ops->bind(master->dev); |
187 | if (ret < 0) { | ||
188 | devres_release_group(master->dev, NULL); | ||
189 | dev_info(master->dev, "master bind failed: %d\n", ret); | ||
190 | goto out; | ||
152 | } | 191 | } |
192 | |||
193 | master->bound = true; | ||
194 | return 1; | ||
195 | |||
153 | out: | 196 | out: |
197 | master_remove_components(master); | ||
154 | 198 | ||
155 | return ret; | 199 | return ret; |
156 | } | 200 | } |
@@ -180,18 +224,89 @@ static void take_down_master(struct master *master) | |||
180 | master_remove_components(master); | 224 | master_remove_components(master); |
181 | } | 225 | } |
182 | 226 | ||
183 | int component_master_add(struct device *dev, | 227 | static size_t component_match_size(size_t num) |
184 | const struct component_master_ops *ops) | 228 | { |
229 | return offsetof(struct component_match, compare[num]); | ||
230 | } | ||
231 | |||
232 | static struct component_match *component_match_realloc(struct device *dev, | ||
233 | struct component_match *match, size_t num) | ||
234 | { | ||
235 | struct component_match *new; | ||
236 | |||
237 | if (match && match->alloc == num) | ||
238 | return match; | ||
239 | |||
240 | new = devm_kmalloc(dev, component_match_size(num), GFP_KERNEL); | ||
241 | if (!new) | ||
242 | return ERR_PTR(-ENOMEM); | ||
243 | |||
244 | if (match) { | ||
245 | memcpy(new, match, component_match_size(min(match->num, num))); | ||
246 | devm_kfree(dev, match); | ||
247 | } else { | ||
248 | new->num = 0; | ||
249 | } | ||
250 | |||
251 | new->alloc = num; | ||
252 | |||
253 | return new; | ||
254 | } | ||
255 | |||
256 | /* | ||
257 | * Add a component to be matched. | ||
258 | * | ||
259 | * The match array is first created or extended if necessary. | ||
260 | */ | ||
261 | void component_match_add(struct device *dev, struct component_match **matchptr, | ||
262 | int (*compare)(struct device *, void *), void *compare_data) | ||
263 | { | ||
264 | struct component_match *match = *matchptr; | ||
265 | |||
266 | if (IS_ERR(match)) | ||
267 | return; | ||
268 | |||
269 | if (!match || match->num == match->alloc) { | ||
270 | size_t new_size = match ? match->alloc + 16 : 15; | ||
271 | |||
272 | match = component_match_realloc(dev, match, new_size); | ||
273 | |||
274 | *matchptr = match; | ||
275 | |||
276 | if (IS_ERR(match)) | ||
277 | return; | ||
278 | } | ||
279 | |||
280 | match->compare[match->num].fn = compare; | ||
281 | match->compare[match->num].data = compare_data; | ||
282 | match->num++; | ||
283 | } | ||
284 | EXPORT_SYMBOL(component_match_add); | ||
285 | |||
286 | int component_master_add_with_match(struct device *dev, | ||
287 | const struct component_master_ops *ops, | ||
288 | struct component_match *match) | ||
185 | { | 289 | { |
186 | struct master *master; | 290 | struct master *master; |
187 | int ret; | 291 | int ret; |
188 | 292 | ||
293 | if (ops->add_components && match) | ||
294 | return -EINVAL; | ||
295 | |||
296 | if (match) { | ||
297 | /* Reallocate the match array for its true size */ | ||
298 | match = component_match_realloc(dev, match, match->num); | ||
299 | if (IS_ERR(match)) | ||
300 | return PTR_ERR(match); | ||
301 | } | ||
302 | |||
189 | master = kzalloc(sizeof(*master), GFP_KERNEL); | 303 | master = kzalloc(sizeof(*master), GFP_KERNEL); |
190 | if (!master) | 304 | if (!master) |
191 | return -ENOMEM; | 305 | return -ENOMEM; |
192 | 306 | ||
193 | master->dev = dev; | 307 | master->dev = dev; |
194 | master->ops = ops; | 308 | master->ops = ops; |
309 | master->match = match; | ||
195 | INIT_LIST_HEAD(&master->components); | 310 | INIT_LIST_HEAD(&master->components); |
196 | 311 | ||
197 | /* Add to the list of available masters. */ | 312 | /* Add to the list of available masters. */ |
@@ -209,6 +324,13 @@ int component_master_add(struct device *dev, | |||
209 | 324 | ||
210 | return ret < 0 ? ret : 0; | 325 | return ret < 0 ? ret : 0; |
211 | } | 326 | } |
327 | EXPORT_SYMBOL_GPL(component_master_add_with_match); | ||
328 | |||
329 | int component_master_add(struct device *dev, | ||
330 | const struct component_master_ops *ops) | ||
331 | { | ||
332 | return component_master_add_with_match(dev, ops, NULL); | ||
333 | } | ||
212 | EXPORT_SYMBOL_GPL(component_master_add); | 334 | EXPORT_SYMBOL_GPL(component_master_add); |
213 | 335 | ||
214 | void component_master_del(struct device *dev, | 336 | void component_master_del(struct device *dev, |
diff --git a/drivers/base/firmware_class.c b/drivers/base/firmware_class.c index d276e33880be..da77791793f1 100644 --- a/drivers/base/firmware_class.c +++ b/drivers/base/firmware_class.c | |||
@@ -100,10 +100,16 @@ static inline long firmware_loading_timeout(void) | |||
100 | #define FW_OPT_UEVENT (1U << 0) | 100 | #define FW_OPT_UEVENT (1U << 0) |
101 | #define FW_OPT_NOWAIT (1U << 1) | 101 | #define FW_OPT_NOWAIT (1U << 1) |
102 | #ifdef CONFIG_FW_LOADER_USER_HELPER | 102 | #ifdef CONFIG_FW_LOADER_USER_HELPER |
103 | #define FW_OPT_FALLBACK (1U << 2) | 103 | #define FW_OPT_USERHELPER (1U << 2) |
104 | #else | 104 | #else |
105 | #define FW_OPT_FALLBACK 0 | 105 | #define FW_OPT_USERHELPER 0 |
106 | #endif | 106 | #endif |
107 | #ifdef CONFIG_FW_LOADER_USER_HELPER_FALLBACK | ||
108 | #define FW_OPT_FALLBACK FW_OPT_USERHELPER | ||
109 | #else | ||
110 | #define FW_OPT_FALLBACK 0 | ||
111 | #endif | ||
112 | #define FW_OPT_NO_WARN (1U << 3) | ||
107 | 113 | ||
108 | struct firmware_cache { | 114 | struct firmware_cache { |
109 | /* firmware_buf instance will be added into the below list */ | 115 | /* firmware_buf instance will be added into the below list */ |
@@ -279,26 +285,15 @@ static const char * const fw_path[] = { | |||
279 | module_param_string(path, fw_path_para, sizeof(fw_path_para), 0644); | 285 | module_param_string(path, fw_path_para, sizeof(fw_path_para), 0644); |
280 | MODULE_PARM_DESC(path, "customized firmware image search path with a higher priority than default path"); | 286 | MODULE_PARM_DESC(path, "customized firmware image search path with a higher priority than default path"); |
281 | 287 | ||
282 | /* Don't inline this: 'struct kstat' is biggish */ | ||
283 | static noinline_for_stack int fw_file_size(struct file *file) | ||
284 | { | ||
285 | struct kstat st; | ||
286 | if (vfs_getattr(&file->f_path, &st)) | ||
287 | return -1; | ||
288 | if (!S_ISREG(st.mode)) | ||
289 | return -1; | ||
290 | if (st.size != (int)st.size) | ||
291 | return -1; | ||
292 | return st.size; | ||
293 | } | ||
294 | |||
295 | static int fw_read_file_contents(struct file *file, struct firmware_buf *fw_buf) | 288 | static int fw_read_file_contents(struct file *file, struct firmware_buf *fw_buf) |
296 | { | 289 | { |
297 | int size; | 290 | int size; |
298 | char *buf; | 291 | char *buf; |
299 | int rc; | 292 | int rc; |
300 | 293 | ||
301 | size = fw_file_size(file); | 294 | if (!S_ISREG(file_inode(file)->i_mode)) |
295 | return -EINVAL; | ||
296 | size = i_size_read(file_inode(file)); | ||
302 | if (size <= 0) | 297 | if (size <= 0) |
303 | return -EINVAL; | 298 | return -EINVAL; |
304 | buf = vmalloc(size); | 299 | buf = vmalloc(size); |
@@ -718,7 +713,7 @@ out: | |||
718 | static int fw_realloc_buffer(struct firmware_priv *fw_priv, int min_size) | 713 | static int fw_realloc_buffer(struct firmware_priv *fw_priv, int min_size) |
719 | { | 714 | { |
720 | struct firmware_buf *buf = fw_priv->buf; | 715 | struct firmware_buf *buf = fw_priv->buf; |
721 | int pages_needed = ALIGN(min_size, PAGE_SIZE) >> PAGE_SHIFT; | 716 | int pages_needed = PAGE_ALIGN(min_size) >> PAGE_SHIFT; |
722 | 717 | ||
723 | /* If the array of pages is too small, grow it... */ | 718 | /* If the array of pages is too small, grow it... */ |
724 | if (buf->page_array_size < pages_needed) { | 719 | if (buf->page_array_size < pages_needed) { |
@@ -911,7 +906,9 @@ static int _request_firmware_load(struct firmware_priv *fw_priv, | |||
911 | wait_for_completion(&buf->completion); | 906 | wait_for_completion(&buf->completion); |
912 | 907 | ||
913 | cancel_delayed_work_sync(&fw_priv->timeout_work); | 908 | cancel_delayed_work_sync(&fw_priv->timeout_work); |
914 | if (!buf->data) | 909 | if (is_fw_load_aborted(buf)) |
910 | retval = -EAGAIN; | ||
911 | else if (!buf->data) | ||
915 | retval = -ENOMEM; | 912 | retval = -ENOMEM; |
916 | 913 | ||
917 | device_remove_file(f_dev, &dev_attr_loading); | 914 | device_remove_file(f_dev, &dev_attr_loading); |
@@ -1111,10 +1108,11 @@ _request_firmware(const struct firmware **firmware_p, const char *name, | |||
1111 | 1108 | ||
1112 | ret = fw_get_filesystem_firmware(device, fw->priv); | 1109 | ret = fw_get_filesystem_firmware(device, fw->priv); |
1113 | if (ret) { | 1110 | if (ret) { |
1114 | if (opt_flags & FW_OPT_FALLBACK) { | 1111 | if (!(opt_flags & FW_OPT_NO_WARN)) |
1115 | dev_warn(device, | 1112 | dev_warn(device, |
1116 | "Direct firmware load failed with error %d\n", | 1113 | "Direct firmware load for %s failed with error %d\n", |
1117 | ret); | 1114 | name, ret); |
1115 | if (opt_flags & FW_OPT_USERHELPER) { | ||
1118 | dev_warn(device, "Falling back to user helper\n"); | 1116 | dev_warn(device, "Falling back to user helper\n"); |
1119 | ret = fw_load_from_user_helper(fw, name, device, | 1117 | ret = fw_load_from_user_helper(fw, name, device, |
1120 | opt_flags, timeout); | 1118 | opt_flags, timeout); |
@@ -1171,7 +1169,6 @@ request_firmware(const struct firmware **firmware_p, const char *name, | |||
1171 | } | 1169 | } |
1172 | EXPORT_SYMBOL(request_firmware); | 1170 | EXPORT_SYMBOL(request_firmware); |
1173 | 1171 | ||
1174 | #ifdef CONFIG_FW_LOADER_USER_HELPER | ||
1175 | /** | 1172 | /** |
1176 | * request_firmware: - load firmware directly without usermode helper | 1173 | * request_firmware: - load firmware directly without usermode helper |
1177 | * @firmware_p: pointer to firmware image | 1174 | * @firmware_p: pointer to firmware image |
@@ -1188,12 +1185,12 @@ int request_firmware_direct(const struct firmware **firmware_p, | |||
1188 | { | 1185 | { |
1189 | int ret; | 1186 | int ret; |
1190 | __module_get(THIS_MODULE); | 1187 | __module_get(THIS_MODULE); |
1191 | ret = _request_firmware(firmware_p, name, device, FW_OPT_UEVENT); | 1188 | ret = _request_firmware(firmware_p, name, device, |
1189 | FW_OPT_UEVENT | FW_OPT_NO_WARN); | ||
1192 | module_put(THIS_MODULE); | 1190 | module_put(THIS_MODULE); |
1193 | return ret; | 1191 | return ret; |
1194 | } | 1192 | } |
1195 | EXPORT_SYMBOL_GPL(request_firmware_direct); | 1193 | EXPORT_SYMBOL_GPL(request_firmware_direct); |
1196 | #endif | ||
1197 | 1194 | ||
1198 | /** | 1195 | /** |
1199 | * release_firmware: - release the resource associated with a firmware image | 1196 | * release_firmware: - release the resource associated with a firmware image |
@@ -1277,7 +1274,7 @@ request_firmware_nowait( | |||
1277 | fw_work->context = context; | 1274 | fw_work->context = context; |
1278 | fw_work->cont = cont; | 1275 | fw_work->cont = cont; |
1279 | fw_work->opt_flags = FW_OPT_NOWAIT | FW_OPT_FALLBACK | | 1276 | fw_work->opt_flags = FW_OPT_NOWAIT | FW_OPT_FALLBACK | |
1280 | (uevent ? FW_OPT_UEVENT : 0); | 1277 | (uevent ? FW_OPT_UEVENT : FW_OPT_USERHELPER); |
1281 | 1278 | ||
1282 | if (!try_module_get(module)) { | 1279 | if (!try_module_get(module)) { |
1283 | kfree(fw_work); | 1280 | kfree(fw_work); |
diff --git a/drivers/base/platform.c b/drivers/base/platform.c index 00f2208949d1..ab4f4ce02722 100644 --- a/drivers/base/platform.c +++ b/drivers/base/platform.c | |||
@@ -24,6 +24,7 @@ | |||
24 | #include <linux/idr.h> | 24 | #include <linux/idr.h> |
25 | #include <linux/acpi.h> | 25 | #include <linux/acpi.h> |
26 | #include <linux/clk/clk-conf.h> | 26 | #include <linux/clk/clk-conf.h> |
27 | #include <linux/limits.h> | ||
27 | 28 | ||
28 | #include "base.h" | 29 | #include "base.h" |
29 | #include "power/power.h" | 30 | #include "power/power.h" |
@@ -176,7 +177,7 @@ EXPORT_SYMBOL_GPL(platform_add_devices); | |||
176 | 177 | ||
177 | struct platform_object { | 178 | struct platform_object { |
178 | struct platform_device pdev; | 179 | struct platform_device pdev; |
179 | char name[1]; | 180 | char name[]; |
180 | }; | 181 | }; |
181 | 182 | ||
182 | /** | 183 | /** |
@@ -202,6 +203,7 @@ static void platform_device_release(struct device *dev) | |||
202 | kfree(pa->pdev.dev.platform_data); | 203 | kfree(pa->pdev.dev.platform_data); |
203 | kfree(pa->pdev.mfd_cell); | 204 | kfree(pa->pdev.mfd_cell); |
204 | kfree(pa->pdev.resource); | 205 | kfree(pa->pdev.resource); |
206 | kfree(pa->pdev.driver_override); | ||
205 | kfree(pa); | 207 | kfree(pa); |
206 | } | 208 | } |
207 | 209 | ||
@@ -217,7 +219,7 @@ struct platform_device *platform_device_alloc(const char *name, int id) | |||
217 | { | 219 | { |
218 | struct platform_object *pa; | 220 | struct platform_object *pa; |
219 | 221 | ||
220 | pa = kzalloc(sizeof(struct platform_object) + strlen(name), GFP_KERNEL); | 222 | pa = kzalloc(sizeof(*pa) + strlen(name) + 1, GFP_KERNEL); |
221 | if (pa) { | 223 | if (pa) { |
222 | strcpy(pa->name, name); | 224 | strcpy(pa->name, name); |
223 | pa->pdev.name = pa->name; | 225 | pa->pdev.name = pa->name; |
@@ -713,8 +715,49 @@ static ssize_t modalias_show(struct device *dev, struct device_attribute *a, | |||
713 | } | 715 | } |
714 | static DEVICE_ATTR_RO(modalias); | 716 | static DEVICE_ATTR_RO(modalias); |
715 | 717 | ||
718 | static ssize_t driver_override_store(struct device *dev, | ||
719 | struct device_attribute *attr, | ||
720 | const char *buf, size_t count) | ||
721 | { | ||
722 | struct platform_device *pdev = to_platform_device(dev); | ||
723 | char *driver_override, *old = pdev->driver_override, *cp; | ||
724 | |||
725 | if (count > PATH_MAX) | ||
726 | return -EINVAL; | ||
727 | |||
728 | driver_override = kstrndup(buf, count, GFP_KERNEL); | ||
729 | if (!driver_override) | ||
730 | return -ENOMEM; | ||
731 | |||
732 | cp = strchr(driver_override, '\n'); | ||
733 | if (cp) | ||
734 | *cp = '\0'; | ||
735 | |||
736 | if (strlen(driver_override)) { | ||
737 | pdev->driver_override = driver_override; | ||
738 | } else { | ||
739 | kfree(driver_override); | ||
740 | pdev->driver_override = NULL; | ||
741 | } | ||
742 | |||
743 | kfree(old); | ||
744 | |||
745 | return count; | ||
746 | } | ||
747 | |||
748 | static ssize_t driver_override_show(struct device *dev, | ||
749 | struct device_attribute *attr, char *buf) | ||
750 | { | ||
751 | struct platform_device *pdev = to_platform_device(dev); | ||
752 | |||
753 | return sprintf(buf, "%s\n", pdev->driver_override); | ||
754 | } | ||
755 | static DEVICE_ATTR_RW(driver_override); | ||
756 | |||
757 | |||
716 | static struct attribute *platform_dev_attrs[] = { | 758 | static struct attribute *platform_dev_attrs[] = { |
717 | &dev_attr_modalias.attr, | 759 | &dev_attr_modalias.attr, |
760 | &dev_attr_driver_override.attr, | ||
718 | NULL, | 761 | NULL, |
719 | }; | 762 | }; |
720 | ATTRIBUTE_GROUPS(platform_dev); | 763 | ATTRIBUTE_GROUPS(platform_dev); |
@@ -770,6 +813,10 @@ static int platform_match(struct device *dev, struct device_driver *drv) | |||
770 | struct platform_device *pdev = to_platform_device(dev); | 813 | struct platform_device *pdev = to_platform_device(dev); |
771 | struct platform_driver *pdrv = to_platform_driver(drv); | 814 | struct platform_driver *pdrv = to_platform_driver(drv); |
772 | 815 | ||
816 | /* When driver_override is set, only bind to the matching driver */ | ||
817 | if (pdev->driver_override) | ||
818 | return !strcmp(pdev->driver_override, drv->name); | ||
819 | |||
773 | /* Attempt an OF style match first */ | 820 | /* Attempt an OF style match first */ |
774 | if (of_driver_match_device(dev, drv)) | 821 | if (of_driver_match_device(dev, drv)) |
775 | return 1; | 822 | return 1; |
diff --git a/drivers/base/reservation.c b/drivers/base/reservation.c deleted file mode 100644 index a73fbf3b8e56..000000000000 --- a/drivers/base/reservation.c +++ /dev/null | |||
@@ -1,39 +0,0 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2012-2013 Canonical Ltd | ||
3 | * | ||
4 | * Based on bo.c which bears the following copyright notice, | ||
5 | * but is dual licensed: | ||
6 | * | ||
7 | * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA | ||
8 | * All Rights Reserved. | ||
9 | * | ||
10 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
11 | * copy of this software and associated documentation files (the | ||
12 | * "Software"), to deal in the Software without restriction, including | ||
13 | * without limitation the rights to use, copy, modify, merge, publish, | ||
14 | * distribute, sub license, and/or sell copies of the Software, and to | ||
15 | * permit persons to whom the Software is furnished to do so, subject to | ||
16 | * the following conditions: | ||
17 | * | ||
18 | * The above copyright notice and this permission notice (including the | ||
19 | * next paragraph) shall be included in all copies or substantial portions | ||
20 | * of the Software. | ||
21 | * | ||
22 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
23 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
24 | * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL | ||
25 | * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, | ||
26 | * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR | ||
27 | * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE | ||
28 | * USE OR OTHER DEALINGS IN THE SOFTWARE. | ||
29 | * | ||
30 | **************************************************************************/ | ||
31 | /* | ||
32 | * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com> | ||
33 | */ | ||
34 | |||
35 | #include <linux/reservation.h> | ||
36 | #include <linux/export.h> | ||
37 | |||
38 | DEFINE_WW_CLASS(reservation_ww_class); | ||
39 | EXPORT_SYMBOL(reservation_ww_class); | ||
diff --git a/drivers/bus/brcmstb_gisb.c b/drivers/bus/brcmstb_gisb.c index 6159b7752a64..f2cd6a2d40b4 100644 --- a/drivers/bus/brcmstb_gisb.c +++ b/drivers/bus/brcmstb_gisb.c | |||
@@ -212,9 +212,9 @@ static int brcmstb_gisb_arb_probe(struct platform_device *pdev) | |||
212 | mutex_init(&gdev->lock); | 212 | mutex_init(&gdev->lock); |
213 | INIT_LIST_HEAD(&gdev->next); | 213 | INIT_LIST_HEAD(&gdev->next); |
214 | 214 | ||
215 | gdev->base = devm_request_and_ioremap(&pdev->dev, r); | 215 | gdev->base = devm_ioremap_resource(&pdev->dev, r); |
216 | if (!gdev->base) | 216 | if (IS_ERR(gdev->base)) |
217 | return -ENOMEM; | 217 | return PTR_ERR(gdev->base); |
218 | 218 | ||
219 | err = devm_request_irq(&pdev->dev, timeout_irq, | 219 | err = devm_request_irq(&pdev->dev, timeout_irq, |
220 | brcmstb_gisb_timeout_handler, 0, pdev->name, | 220 | brcmstb_gisb_timeout_handler, 0, pdev->name, |
diff --git a/drivers/dma-buf/Makefile b/drivers/dma-buf/Makefile new file mode 100644 index 000000000000..57a675f90cd0 --- /dev/null +++ b/drivers/dma-buf/Makefile | |||
@@ -0,0 +1 @@ | |||
obj-y := dma-buf.o fence.o reservation.o seqno-fence.o | |||
diff --git a/drivers/base/dma-buf.c b/drivers/dma-buf/dma-buf.c index 840c7fa80983..f3014c448e1e 100644 --- a/drivers/base/dma-buf.c +++ b/drivers/dma-buf/dma-buf.c | |||
@@ -25,10 +25,13 @@ | |||
25 | #include <linux/fs.h> | 25 | #include <linux/fs.h> |
26 | #include <linux/slab.h> | 26 | #include <linux/slab.h> |
27 | #include <linux/dma-buf.h> | 27 | #include <linux/dma-buf.h> |
28 | #include <linux/fence.h> | ||
28 | #include <linux/anon_inodes.h> | 29 | #include <linux/anon_inodes.h> |
29 | #include <linux/export.h> | 30 | #include <linux/export.h> |
30 | #include <linux/debugfs.h> | 31 | #include <linux/debugfs.h> |
31 | #include <linux/seq_file.h> | 32 | #include <linux/seq_file.h> |
33 | #include <linux/poll.h> | ||
34 | #include <linux/reservation.h> | ||
32 | 35 | ||
33 | static inline int is_dma_buf_file(struct file *); | 36 | static inline int is_dma_buf_file(struct file *); |
34 | 37 | ||
@@ -50,12 +53,25 @@ static int dma_buf_release(struct inode *inode, struct file *file) | |||
50 | 53 | ||
51 | BUG_ON(dmabuf->vmapping_counter); | 54 | BUG_ON(dmabuf->vmapping_counter); |
52 | 55 | ||
56 | /* | ||
57 | * Any fences that a dma-buf poll can wait on should be signaled | ||
58 | * before releasing dma-buf. This is the responsibility of each | ||
59 | * driver that uses the reservation objects. | ||
60 | * | ||
61 | * If you hit this BUG() it means someone dropped their ref to the | ||
62 | * dma-buf while still having pending operation to the buffer. | ||
63 | */ | ||
64 | BUG_ON(dmabuf->cb_shared.active || dmabuf->cb_excl.active); | ||
65 | |||
53 | dmabuf->ops->release(dmabuf); | 66 | dmabuf->ops->release(dmabuf); |
54 | 67 | ||
55 | mutex_lock(&db_list.lock); | 68 | mutex_lock(&db_list.lock); |
56 | list_del(&dmabuf->list_node); | 69 | list_del(&dmabuf->list_node); |
57 | mutex_unlock(&db_list.lock); | 70 | mutex_unlock(&db_list.lock); |
58 | 71 | ||
72 | if (dmabuf->resv == (struct reservation_object *)&dmabuf[1]) | ||
73 | reservation_object_fini(dmabuf->resv); | ||
74 | |||
59 | kfree(dmabuf); | 75 | kfree(dmabuf); |
60 | return 0; | 76 | return 0; |
61 | } | 77 | } |
@@ -103,10 +119,141 @@ static loff_t dma_buf_llseek(struct file *file, loff_t offset, int whence) | |||
103 | return base + offset; | 119 | return base + offset; |
104 | } | 120 | } |
105 | 121 | ||
122 | static void dma_buf_poll_cb(struct fence *fence, struct fence_cb *cb) | ||
123 | { | ||
124 | struct dma_buf_poll_cb_t *dcb = (struct dma_buf_poll_cb_t *)cb; | ||
125 | unsigned long flags; | ||
126 | |||
127 | spin_lock_irqsave(&dcb->poll->lock, flags); | ||
128 | wake_up_locked_poll(dcb->poll, dcb->active); | ||
129 | dcb->active = 0; | ||
130 | spin_unlock_irqrestore(&dcb->poll->lock, flags); | ||
131 | } | ||
132 | |||
133 | static unsigned int dma_buf_poll(struct file *file, poll_table *poll) | ||
134 | { | ||
135 | struct dma_buf *dmabuf; | ||
136 | struct reservation_object *resv; | ||
137 | struct reservation_object_list *fobj; | ||
138 | struct fence *fence_excl; | ||
139 | unsigned long events; | ||
140 | unsigned shared_count, seq; | ||
141 | |||
142 | dmabuf = file->private_data; | ||
143 | if (!dmabuf || !dmabuf->resv) | ||
144 | return POLLERR; | ||
145 | |||
146 | resv = dmabuf->resv; | ||
147 | |||
148 | poll_wait(file, &dmabuf->poll, poll); | ||
149 | |||
150 | events = poll_requested_events(poll) & (POLLIN | POLLOUT); | ||
151 | if (!events) | ||
152 | return 0; | ||
153 | |||
154 | retry: | ||
155 | seq = read_seqcount_begin(&resv->seq); | ||
156 | rcu_read_lock(); | ||
157 | |||
158 | fobj = rcu_dereference(resv->fence); | ||
159 | if (fobj) | ||
160 | shared_count = fobj->shared_count; | ||
161 | else | ||
162 | shared_count = 0; | ||
163 | fence_excl = rcu_dereference(resv->fence_excl); | ||
164 | if (read_seqcount_retry(&resv->seq, seq)) { | ||
165 | rcu_read_unlock(); | ||
166 | goto retry; | ||
167 | } | ||
168 | |||
169 | if (fence_excl && (!(events & POLLOUT) || shared_count == 0)) { | ||
170 | struct dma_buf_poll_cb_t *dcb = &dmabuf->cb_excl; | ||
171 | unsigned long pevents = POLLIN; | ||
172 | |||
173 | if (shared_count == 0) | ||
174 | pevents |= POLLOUT; | ||
175 | |||
176 | spin_lock_irq(&dmabuf->poll.lock); | ||
177 | if (dcb->active) { | ||
178 | dcb->active |= pevents; | ||
179 | events &= ~pevents; | ||
180 | } else | ||
181 | dcb->active = pevents; | ||
182 | spin_unlock_irq(&dmabuf->poll.lock); | ||
183 | |||
184 | if (events & pevents) { | ||
185 | if (!fence_get_rcu(fence_excl)) { | ||
186 | /* force a recheck */ | ||
187 | events &= ~pevents; | ||
188 | dma_buf_poll_cb(NULL, &dcb->cb); | ||
189 | } else if (!fence_add_callback(fence_excl, &dcb->cb, | ||
190 | dma_buf_poll_cb)) { | ||
191 | events &= ~pevents; | ||
192 | fence_put(fence_excl); | ||
193 | } else { | ||
194 | /* | ||
195 | * No callback queued, wake up any additional | ||
196 | * waiters. | ||
197 | */ | ||
198 | fence_put(fence_excl); | ||
199 | dma_buf_poll_cb(NULL, &dcb->cb); | ||
200 | } | ||
201 | } | ||
202 | } | ||
203 | |||
204 | if ((events & POLLOUT) && shared_count > 0) { | ||
205 | struct dma_buf_poll_cb_t *dcb = &dmabuf->cb_shared; | ||
206 | int i; | ||
207 | |||
208 | /* Only queue a new callback if no event has fired yet */ | ||
209 | spin_lock_irq(&dmabuf->poll.lock); | ||
210 | if (dcb->active) | ||
211 | events &= ~POLLOUT; | ||
212 | else | ||
213 | dcb->active = POLLOUT; | ||
214 | spin_unlock_irq(&dmabuf->poll.lock); | ||
215 | |||
216 | if (!(events & POLLOUT)) | ||
217 | goto out; | ||
218 | |||
219 | for (i = 0; i < shared_count; ++i) { | ||
220 | struct fence *fence = rcu_dereference(fobj->shared[i]); | ||
221 | |||
222 | if (!fence_get_rcu(fence)) { | ||
223 | /* | ||
224 | * fence refcount dropped to zero, this means | ||
225 | * that fobj has been freed | ||
226 | * | ||
227 | * call dma_buf_poll_cb and force a recheck! | ||
228 | */ | ||
229 | events &= ~POLLOUT; | ||
230 | dma_buf_poll_cb(NULL, &dcb->cb); | ||
231 | break; | ||
232 | } | ||
233 | if (!fence_add_callback(fence, &dcb->cb, | ||
234 | dma_buf_poll_cb)) { | ||
235 | fence_put(fence); | ||
236 | events &= ~POLLOUT; | ||
237 | break; | ||
238 | } | ||
239 | fence_put(fence); | ||
240 | } | ||
241 | |||
242 | /* No callback queued, wake up any additional waiters. */ | ||
243 | if (i == shared_count) | ||
244 | dma_buf_poll_cb(NULL, &dcb->cb); | ||
245 | } | ||
246 | |||
247 | out: | ||
248 | rcu_read_unlock(); | ||
249 | return events; | ||
250 | } | ||
251 | |||
106 | static const struct file_operations dma_buf_fops = { | 252 | static const struct file_operations dma_buf_fops = { |
107 | .release = dma_buf_release, | 253 | .release = dma_buf_release, |
108 | .mmap = dma_buf_mmap_internal, | 254 | .mmap = dma_buf_mmap_internal, |
109 | .llseek = dma_buf_llseek, | 255 | .llseek = dma_buf_llseek, |
256 | .poll = dma_buf_poll, | ||
110 | }; | 257 | }; |
111 | 258 | ||
112 | /* | 259 | /* |
@@ -128,6 +275,7 @@ static inline int is_dma_buf_file(struct file *file) | |||
128 | * @size: [in] Size of the buffer | 275 | * @size: [in] Size of the buffer |
129 | * @flags: [in] mode flags for the file. | 276 | * @flags: [in] mode flags for the file. |
130 | * @exp_name: [in] name of the exporting module - useful for debugging. | 277 | * @exp_name: [in] name of the exporting module - useful for debugging. |
278 | * @resv: [in] reservation-object, NULL to allocate default one. | ||
131 | * | 279 | * |
132 | * Returns, on success, a newly created dma_buf object, which wraps the | 280 | * Returns, on success, a newly created dma_buf object, which wraps the |
133 | * supplied private data and operations for dma_buf_ops. On either missing | 281 | * supplied private data and operations for dma_buf_ops. On either missing |
@@ -135,10 +283,17 @@ static inline int is_dma_buf_file(struct file *file) | |||
135 | * | 283 | * |
136 | */ | 284 | */ |
137 | struct dma_buf *dma_buf_export_named(void *priv, const struct dma_buf_ops *ops, | 285 | struct dma_buf *dma_buf_export_named(void *priv, const struct dma_buf_ops *ops, |
138 | size_t size, int flags, const char *exp_name) | 286 | size_t size, int flags, const char *exp_name, |
287 | struct reservation_object *resv) | ||
139 | { | 288 | { |
140 | struct dma_buf *dmabuf; | 289 | struct dma_buf *dmabuf; |
141 | struct file *file; | 290 | struct file *file; |
291 | size_t alloc_size = sizeof(struct dma_buf); | ||
292 | if (!resv) | ||
293 | alloc_size += sizeof(struct reservation_object); | ||
294 | else | ||
295 | /* prevent &dma_buf[1] == dma_buf->resv */ | ||
296 | alloc_size += 1; | ||
142 | 297 | ||
143 | if (WARN_ON(!priv || !ops | 298 | if (WARN_ON(!priv || !ops |
144 | || !ops->map_dma_buf | 299 | || !ops->map_dma_buf |
@@ -150,7 +305,7 @@ struct dma_buf *dma_buf_export_named(void *priv, const struct dma_buf_ops *ops, | |||
150 | return ERR_PTR(-EINVAL); | 305 | return ERR_PTR(-EINVAL); |
151 | } | 306 | } |
152 | 307 | ||
153 | dmabuf = kzalloc(sizeof(struct dma_buf), GFP_KERNEL); | 308 | dmabuf = kzalloc(alloc_size, GFP_KERNEL); |
154 | if (dmabuf == NULL) | 309 | if (dmabuf == NULL) |
155 | return ERR_PTR(-ENOMEM); | 310 | return ERR_PTR(-ENOMEM); |
156 | 311 | ||
@@ -158,6 +313,15 @@ struct dma_buf *dma_buf_export_named(void *priv, const struct dma_buf_ops *ops, | |||
158 | dmabuf->ops = ops; | 313 | dmabuf->ops = ops; |
159 | dmabuf->size = size; | 314 | dmabuf->size = size; |
160 | dmabuf->exp_name = exp_name; | 315 | dmabuf->exp_name = exp_name; |
316 | init_waitqueue_head(&dmabuf->poll); | ||
317 | dmabuf->cb_excl.poll = dmabuf->cb_shared.poll = &dmabuf->poll; | ||
318 | dmabuf->cb_excl.active = dmabuf->cb_shared.active = 0; | ||
319 | |||
320 | if (!resv) { | ||
321 | resv = (struct reservation_object *)&dmabuf[1]; | ||
322 | reservation_object_init(resv); | ||
323 | } | ||
324 | dmabuf->resv = resv; | ||
161 | 325 | ||
162 | file = anon_inode_getfile("dmabuf", &dma_buf_fops, dmabuf, flags); | 326 | file = anon_inode_getfile("dmabuf", &dma_buf_fops, dmabuf, flags); |
163 | if (IS_ERR(file)) { | 327 | if (IS_ERR(file)) { |
diff --git a/drivers/dma-buf/fence.c b/drivers/dma-buf/fence.c new file mode 100644 index 000000000000..4222cb2aa96a --- /dev/null +++ b/drivers/dma-buf/fence.c | |||
@@ -0,0 +1,431 @@ | |||
1 | /* | ||
2 | * Fence mechanism for dma-buf and to allow for asynchronous dma access | ||
3 | * | ||
4 | * Copyright (C) 2012 Canonical Ltd | ||
5 | * Copyright (C) 2012 Texas Instruments | ||
6 | * | ||
7 | * Authors: | ||
8 | * Rob Clark <robdclark@gmail.com> | ||
9 | * Maarten Lankhorst <maarten.lankhorst@canonical.com> | ||
10 | * | ||
11 | * This program is free software; you can redistribute it and/or modify it | ||
12 | * under the terms of the GNU General Public License version 2 as published by | ||
13 | * the Free Software Foundation. | ||
14 | * | ||
15 | * This program is distributed in the hope that it will be useful, but WITHOUT | ||
16 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
17 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
18 | * more details. | ||
19 | */ | ||
20 | |||
21 | #include <linux/slab.h> | ||
22 | #include <linux/export.h> | ||
23 | #include <linux/atomic.h> | ||
24 | #include <linux/fence.h> | ||
25 | |||
26 | #define CREATE_TRACE_POINTS | ||
27 | #include <trace/events/fence.h> | ||
28 | |||
29 | EXPORT_TRACEPOINT_SYMBOL(fence_annotate_wait_on); | ||
30 | EXPORT_TRACEPOINT_SYMBOL(fence_emit); | ||
31 | |||
32 | /** | ||
33 | * fence context counter: each execution context should have its own | ||
34 | * fence context, this allows checking if fences belong to the same | ||
35 | * context or not. One device can have multiple separate contexts, | ||
36 | * and they're used if some engine can run independently of another. | ||
37 | */ | ||
38 | static atomic_t fence_context_counter = ATOMIC_INIT(0); | ||
39 | |||
40 | /** | ||
41 | * fence_context_alloc - allocate an array of fence contexts | ||
42 | * @num: [in] amount of contexts to allocate | ||
43 | * | ||
44 | * This function will return the first index of the number of fences allocated. | ||
45 | * The fence context is used for setting fence->context to a unique number. | ||
46 | */ | ||
47 | unsigned fence_context_alloc(unsigned num) | ||
48 | { | ||
49 | BUG_ON(!num); | ||
50 | return atomic_add_return(num, &fence_context_counter) - num; | ||
51 | } | ||
52 | EXPORT_SYMBOL(fence_context_alloc); | ||
53 | |||
54 | /** | ||
55 | * fence_signal_locked - signal completion of a fence | ||
56 | * @fence: the fence to signal | ||
57 | * | ||
58 | * Signal completion for software callbacks on a fence, this will unblock | ||
59 | * fence_wait() calls and run all the callbacks added with | ||
60 | * fence_add_callback(). Can be called multiple times, but since a fence | ||
61 | * can only go from unsignaled to signaled state, it will only be effective | ||
62 | * the first time. | ||
63 | * | ||
64 | * Unlike fence_signal, this function must be called with fence->lock held. | ||
65 | */ | ||
66 | int fence_signal_locked(struct fence *fence) | ||
67 | { | ||
68 | struct fence_cb *cur, *tmp; | ||
69 | int ret = 0; | ||
70 | |||
71 | if (WARN_ON(!fence)) | ||
72 | return -EINVAL; | ||
73 | |||
74 | if (!ktime_to_ns(fence->timestamp)) { | ||
75 | fence->timestamp = ktime_get(); | ||
76 | smp_mb__before_atomic(); | ||
77 | } | ||
78 | |||
79 | if (test_and_set_bit(FENCE_FLAG_SIGNALED_BIT, &fence->flags)) { | ||
80 | ret = -EINVAL; | ||
81 | |||
82 | /* | ||
83 | * we might have raced with the unlocked fence_signal, | ||
84 | * still run through all callbacks | ||
85 | */ | ||
86 | } else | ||
87 | trace_fence_signaled(fence); | ||
88 | |||
89 | list_for_each_entry_safe(cur, tmp, &fence->cb_list, node) { | ||
90 | list_del_init(&cur->node); | ||
91 | cur->func(fence, cur); | ||
92 | } | ||
93 | return ret; | ||
94 | } | ||
95 | EXPORT_SYMBOL(fence_signal_locked); | ||
96 | |||
97 | /** | ||
98 | * fence_signal - signal completion of a fence | ||
99 | * @fence: the fence to signal | ||
100 | * | ||
101 | * Signal completion for software callbacks on a fence, this will unblock | ||
102 | * fence_wait() calls and run all the callbacks added with | ||
103 | * fence_add_callback(). Can be called multiple times, but since a fence | ||
104 | * can only go from unsignaled to signaled state, it will only be effective | ||
105 | * the first time. | ||
106 | */ | ||
107 | int fence_signal(struct fence *fence) | ||
108 | { | ||
109 | unsigned long flags; | ||
110 | |||
111 | if (!fence) | ||
112 | return -EINVAL; | ||
113 | |||
114 | if (!ktime_to_ns(fence->timestamp)) { | ||
115 | fence->timestamp = ktime_get(); | ||
116 | smp_mb__before_atomic(); | ||
117 | } | ||
118 | |||
119 | if (test_and_set_bit(FENCE_FLAG_SIGNALED_BIT, &fence->flags)) | ||
120 | return -EINVAL; | ||
121 | |||
122 | trace_fence_signaled(fence); | ||
123 | |||
124 | if (test_bit(FENCE_FLAG_ENABLE_SIGNAL_BIT, &fence->flags)) { | ||
125 | struct fence_cb *cur, *tmp; | ||
126 | |||
127 | spin_lock_irqsave(fence->lock, flags); | ||
128 | list_for_each_entry_safe(cur, tmp, &fence->cb_list, node) { | ||
129 | list_del_init(&cur->node); | ||
130 | cur->func(fence, cur); | ||
131 | } | ||
132 | spin_unlock_irqrestore(fence->lock, flags); | ||
133 | } | ||
134 | return 0; | ||
135 | } | ||
136 | EXPORT_SYMBOL(fence_signal); | ||
137 | |||
138 | /** | ||
139 | * fence_wait_timeout - sleep until the fence gets signaled | ||
140 | * or until timeout elapses | ||
141 | * @fence: [in] the fence to wait on | ||
142 | * @intr: [in] if true, do an interruptible wait | ||
143 | * @timeout: [in] timeout value in jiffies, or MAX_SCHEDULE_TIMEOUT | ||
144 | * | ||
145 | * Returns -ERESTARTSYS if interrupted, 0 if the wait timed out, or the | ||
146 | * remaining timeout in jiffies on success. Other error values may be | ||
147 | * returned on custom implementations. | ||
148 | * | ||
149 | * Performs a synchronous wait on this fence. It is assumed the caller | ||
150 | * directly or indirectly (buf-mgr between reservation and committing) | ||
151 | * holds a reference to the fence, otherwise the fence might be | ||
152 | * freed before return, resulting in undefined behavior. | ||
153 | */ | ||
154 | signed long | ||
155 | fence_wait_timeout(struct fence *fence, bool intr, signed long timeout) | ||
156 | { | ||
157 | signed long ret; | ||
158 | |||
159 | if (WARN_ON(timeout < 0)) | ||
160 | return -EINVAL; | ||
161 | |||
162 | trace_fence_wait_start(fence); | ||
163 | ret = fence->ops->wait(fence, intr, timeout); | ||
164 | trace_fence_wait_end(fence); | ||
165 | return ret; | ||
166 | } | ||
167 | EXPORT_SYMBOL(fence_wait_timeout); | ||
168 | |||
169 | void fence_release(struct kref *kref) | ||
170 | { | ||
171 | struct fence *fence = | ||
172 | container_of(kref, struct fence, refcount); | ||
173 | |||
174 | trace_fence_destroy(fence); | ||
175 | |||
176 | BUG_ON(!list_empty(&fence->cb_list)); | ||
177 | |||
178 | if (fence->ops->release) | ||
179 | fence->ops->release(fence); | ||
180 | else | ||
181 | fence_free(fence); | ||
182 | } | ||
183 | EXPORT_SYMBOL(fence_release); | ||
184 | |||
185 | void fence_free(struct fence *fence) | ||
186 | { | ||
187 | kfree_rcu(fence, rcu); | ||
188 | } | ||
189 | EXPORT_SYMBOL(fence_free); | ||
190 | |||
191 | /** | ||
192 | * fence_enable_sw_signaling - enable signaling on fence | ||
193 | * @fence: [in] the fence to enable | ||
194 | * | ||
195 | * this will request for sw signaling to be enabled, to make the fence | ||
196 | * complete as soon as possible | ||
197 | */ | ||
198 | void fence_enable_sw_signaling(struct fence *fence) | ||
199 | { | ||
200 | unsigned long flags; | ||
201 | |||
202 | if (!test_and_set_bit(FENCE_FLAG_ENABLE_SIGNAL_BIT, &fence->flags) && | ||
203 | !test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->flags)) { | ||
204 | trace_fence_enable_signal(fence); | ||
205 | |||
206 | spin_lock_irqsave(fence->lock, flags); | ||
207 | |||
208 | if (!fence->ops->enable_signaling(fence)) | ||
209 | fence_signal_locked(fence); | ||
210 | |||
211 | spin_unlock_irqrestore(fence->lock, flags); | ||
212 | } | ||
213 | } | ||
214 | EXPORT_SYMBOL(fence_enable_sw_signaling); | ||
215 | |||
216 | /** | ||
217 | * fence_add_callback - add a callback to be called when the fence | ||
218 | * is signaled | ||
219 | * @fence: [in] the fence to wait on | ||
220 | * @cb: [in] the callback to register | ||
221 | * @func: [in] the function to call | ||
222 | * | ||
223 | * cb will be initialized by fence_add_callback, no initialization | ||
224 | * by the caller is required. Any number of callbacks can be registered | ||
225 | * to a fence, but a callback can only be registered to one fence at a time. | ||
226 | * | ||
227 | * Note that the callback can be called from an atomic context. If | ||
228 | * fence is already signaled, this function will return -ENOENT (and | ||
229 | * *not* call the callback) | ||
230 | * | ||
231 | * Add a software callback to the fence. Same restrictions apply to | ||
232 | * refcount as it does to fence_wait, however the caller doesn't need to | ||
233 | * keep a refcount to fence afterwards: when software access is enabled, | ||
234 | * the creator of the fence is required to keep the fence alive until | ||
235 | * after it signals with fence_signal. The callback itself can be called | ||
236 | * from irq context. | ||
237 | * | ||
238 | */ | ||
239 | int fence_add_callback(struct fence *fence, struct fence_cb *cb, | ||
240 | fence_func_t func) | ||
241 | { | ||
242 | unsigned long flags; | ||
243 | int ret = 0; | ||
244 | bool was_set; | ||
245 | |||
246 | if (WARN_ON(!fence || !func)) | ||
247 | return -EINVAL; | ||
248 | |||
249 | if (test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->flags)) { | ||
250 | INIT_LIST_HEAD(&cb->node); | ||
251 | return -ENOENT; | ||
252 | } | ||
253 | |||
254 | spin_lock_irqsave(fence->lock, flags); | ||
255 | |||
256 | was_set = test_and_set_bit(FENCE_FLAG_ENABLE_SIGNAL_BIT, &fence->flags); | ||
257 | |||
258 | if (test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->flags)) | ||
259 | ret = -ENOENT; | ||
260 | else if (!was_set) { | ||
261 | trace_fence_enable_signal(fence); | ||
262 | |||
263 | if (!fence->ops->enable_signaling(fence)) { | ||
264 | fence_signal_locked(fence); | ||
265 | ret = -ENOENT; | ||
266 | } | ||
267 | } | ||
268 | |||
269 | if (!ret) { | ||
270 | cb->func = func; | ||
271 | list_add_tail(&cb->node, &fence->cb_list); | ||
272 | } else | ||
273 | INIT_LIST_HEAD(&cb->node); | ||
274 | spin_unlock_irqrestore(fence->lock, flags); | ||
275 | |||
276 | return ret; | ||
277 | } | ||
278 | EXPORT_SYMBOL(fence_add_callback); | ||
279 | |||
280 | /** | ||
281 | * fence_remove_callback - remove a callback from the signaling list | ||
282 | * @fence: [in] the fence to wait on | ||
283 | * @cb: [in] the callback to remove | ||
284 | * | ||
285 | * Remove a previously queued callback from the fence. This function returns | ||
286 | * true if the callback is succesfully removed, or false if the fence has | ||
287 | * already been signaled. | ||
288 | * | ||
289 | * *WARNING*: | ||
290 | * Cancelling a callback should only be done if you really know what you're | ||
291 | * doing, since deadlocks and race conditions could occur all too easily. For | ||
292 | * this reason, it should only ever be done on hardware lockup recovery, | ||
293 | * with a reference held to the fence. | ||
294 | */ | ||
295 | bool | ||
296 | fence_remove_callback(struct fence *fence, struct fence_cb *cb) | ||
297 | { | ||
298 | unsigned long flags; | ||
299 | bool ret; | ||
300 | |||
301 | spin_lock_irqsave(fence->lock, flags); | ||
302 | |||
303 | ret = !list_empty(&cb->node); | ||
304 | if (ret) | ||
305 | list_del_init(&cb->node); | ||
306 | |||
307 | spin_unlock_irqrestore(fence->lock, flags); | ||
308 | |||
309 | return ret; | ||
310 | } | ||
311 | EXPORT_SYMBOL(fence_remove_callback); | ||
312 | |||
313 | struct default_wait_cb { | ||
314 | struct fence_cb base; | ||
315 | struct task_struct *task; | ||
316 | }; | ||
317 | |||
318 | static void | ||
319 | fence_default_wait_cb(struct fence *fence, struct fence_cb *cb) | ||
320 | { | ||
321 | struct default_wait_cb *wait = | ||
322 | container_of(cb, struct default_wait_cb, base); | ||
323 | |||
324 | wake_up_state(wait->task, TASK_NORMAL); | ||
325 | } | ||
326 | |||
327 | /** | ||
328 | * fence_default_wait - default sleep until the fence gets signaled | ||
329 | * or until timeout elapses | ||
330 | * @fence: [in] the fence to wait on | ||
331 | * @intr: [in] if true, do an interruptible wait | ||
332 | * @timeout: [in] timeout value in jiffies, or MAX_SCHEDULE_TIMEOUT | ||
333 | * | ||
334 | * Returns -ERESTARTSYS if interrupted, 0 if the wait timed out, or the | ||
335 | * remaining timeout in jiffies on success. | ||
336 | */ | ||
337 | signed long | ||
338 | fence_default_wait(struct fence *fence, bool intr, signed long timeout) | ||
339 | { | ||
340 | struct default_wait_cb cb; | ||
341 | unsigned long flags; | ||
342 | signed long ret = timeout; | ||
343 | bool was_set; | ||
344 | |||
345 | if (test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->flags)) | ||
346 | return timeout; | ||
347 | |||
348 | spin_lock_irqsave(fence->lock, flags); | ||
349 | |||
350 | if (intr && signal_pending(current)) { | ||
351 | ret = -ERESTARTSYS; | ||
352 | goto out; | ||
353 | } | ||
354 | |||
355 | was_set = test_and_set_bit(FENCE_FLAG_ENABLE_SIGNAL_BIT, &fence->flags); | ||
356 | |||
357 | if (test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->flags)) | ||
358 | goto out; | ||
359 | |||
360 | if (!was_set) { | ||
361 | trace_fence_enable_signal(fence); | ||
362 | |||
363 | if (!fence->ops->enable_signaling(fence)) { | ||
364 | fence_signal_locked(fence); | ||
365 | goto out; | ||
366 | } | ||
367 | } | ||
368 | |||
369 | cb.base.func = fence_default_wait_cb; | ||
370 | cb.task = current; | ||
371 | list_add(&cb.base.node, &fence->cb_list); | ||
372 | |||
373 | while (!test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->flags) && ret > 0) { | ||
374 | if (intr) | ||
375 | __set_current_state(TASK_INTERRUPTIBLE); | ||
376 | else | ||
377 | __set_current_state(TASK_UNINTERRUPTIBLE); | ||
378 | spin_unlock_irqrestore(fence->lock, flags); | ||
379 | |||
380 | ret = schedule_timeout(ret); | ||
381 | |||
382 | spin_lock_irqsave(fence->lock, flags); | ||
383 | if (ret > 0 && intr && signal_pending(current)) | ||
384 | ret = -ERESTARTSYS; | ||
385 | } | ||
386 | |||
387 | if (!list_empty(&cb.base.node)) | ||
388 | list_del(&cb.base.node); | ||
389 | __set_current_state(TASK_RUNNING); | ||
390 | |||
391 | out: | ||
392 | spin_unlock_irqrestore(fence->lock, flags); | ||
393 | return ret; | ||
394 | } | ||
395 | EXPORT_SYMBOL(fence_default_wait); | ||
396 | |||
397 | /** | ||
398 | * fence_init - Initialize a custom fence. | ||
399 | * @fence: [in] the fence to initialize | ||
400 | * @ops: [in] the fence_ops for operations on this fence | ||
401 | * @lock: [in] the irqsafe spinlock to use for locking this fence | ||
402 | * @context: [in] the execution context this fence is run on | ||
403 | * @seqno: [in] a linear increasing sequence number for this context | ||
404 | * | ||
405 | * Initializes an allocated fence, the caller doesn't have to keep its | ||
406 | * refcount after committing with this fence, but it will need to hold a | ||
407 | * refcount again if fence_ops.enable_signaling gets called. This can | ||
408 | * be used for other implementing other types of fence. | ||
409 | * | ||
410 | * context and seqno are used for easy comparison between fences, allowing | ||
411 | * to check which fence is later by simply using fence_later. | ||
412 | */ | ||
413 | void | ||
414 | fence_init(struct fence *fence, const struct fence_ops *ops, | ||
415 | spinlock_t *lock, unsigned context, unsigned seqno) | ||
416 | { | ||
417 | BUG_ON(!lock); | ||
418 | BUG_ON(!ops || !ops->wait || !ops->enable_signaling || | ||
419 | !ops->get_driver_name || !ops->get_timeline_name); | ||
420 | |||
421 | kref_init(&fence->refcount); | ||
422 | fence->ops = ops; | ||
423 | INIT_LIST_HEAD(&fence->cb_list); | ||
424 | fence->lock = lock; | ||
425 | fence->context = context; | ||
426 | fence->seqno = seqno; | ||
427 | fence->flags = 0UL; | ||
428 | |||
429 | trace_fence_init(fence); | ||
430 | } | ||
431 | EXPORT_SYMBOL(fence_init); | ||
diff --git a/drivers/dma-buf/reservation.c b/drivers/dma-buf/reservation.c new file mode 100644 index 000000000000..3c97c8fa8d02 --- /dev/null +++ b/drivers/dma-buf/reservation.c | |||
@@ -0,0 +1,477 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2012-2014 Canonical Ltd (Maarten Lankhorst) | ||
3 | * | ||
4 | * Based on bo.c which bears the following copyright notice, | ||
5 | * but is dual licensed: | ||
6 | * | ||
7 | * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA | ||
8 | * All Rights Reserved. | ||
9 | * | ||
10 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
11 | * copy of this software and associated documentation files (the | ||
12 | * "Software"), to deal in the Software without restriction, including | ||
13 | * without limitation the rights to use, copy, modify, merge, publish, | ||
14 | * distribute, sub license, and/or sell copies of the Software, and to | ||
15 | * permit persons to whom the Software is furnished to do so, subject to | ||
16 | * the following conditions: | ||
17 | * | ||
18 | * The above copyright notice and this permission notice (including the | ||
19 | * next paragraph) shall be included in all copies or substantial portions | ||
20 | * of the Software. | ||
21 | * | ||
22 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
23 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
24 | * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL | ||
25 | * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, | ||
26 | * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR | ||
27 | * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE | ||
28 | * USE OR OTHER DEALINGS IN THE SOFTWARE. | ||
29 | * | ||
30 | **************************************************************************/ | ||
31 | /* | ||
32 | * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com> | ||
33 | */ | ||
34 | |||
35 | #include <linux/reservation.h> | ||
36 | #include <linux/export.h> | ||
37 | |||
38 | DEFINE_WW_CLASS(reservation_ww_class); | ||
39 | EXPORT_SYMBOL(reservation_ww_class); | ||
40 | |||
41 | struct lock_class_key reservation_seqcount_class; | ||
42 | EXPORT_SYMBOL(reservation_seqcount_class); | ||
43 | |||
44 | const char reservation_seqcount_string[] = "reservation_seqcount"; | ||
45 | EXPORT_SYMBOL(reservation_seqcount_string); | ||
46 | /* | ||
47 | * Reserve space to add a shared fence to a reservation_object, | ||
48 | * must be called with obj->lock held. | ||
49 | */ | ||
50 | int reservation_object_reserve_shared(struct reservation_object *obj) | ||
51 | { | ||
52 | struct reservation_object_list *fobj, *old; | ||
53 | u32 max; | ||
54 | |||
55 | old = reservation_object_get_list(obj); | ||
56 | |||
57 | if (old && old->shared_max) { | ||
58 | if (old->shared_count < old->shared_max) { | ||
59 | /* perform an in-place update */ | ||
60 | kfree(obj->staged); | ||
61 | obj->staged = NULL; | ||
62 | return 0; | ||
63 | } else | ||
64 | max = old->shared_max * 2; | ||
65 | } else | ||
66 | max = 4; | ||
67 | |||
68 | /* | ||
69 | * resize obj->staged or allocate if it doesn't exist, | ||
70 | * noop if already correct size | ||
71 | */ | ||
72 | fobj = krealloc(obj->staged, offsetof(typeof(*fobj), shared[max]), | ||
73 | GFP_KERNEL); | ||
74 | if (!fobj) | ||
75 | return -ENOMEM; | ||
76 | |||
77 | obj->staged = fobj; | ||
78 | fobj->shared_max = max; | ||
79 | return 0; | ||
80 | } | ||
81 | EXPORT_SYMBOL(reservation_object_reserve_shared); | ||
82 | |||
83 | static void | ||
84 | reservation_object_add_shared_inplace(struct reservation_object *obj, | ||
85 | struct reservation_object_list *fobj, | ||
86 | struct fence *fence) | ||
87 | { | ||
88 | u32 i; | ||
89 | |||
90 | fence_get(fence); | ||
91 | |||
92 | preempt_disable(); | ||
93 | write_seqcount_begin(&obj->seq); | ||
94 | |||
95 | for (i = 0; i < fobj->shared_count; ++i) { | ||
96 | struct fence *old_fence; | ||
97 | |||
98 | old_fence = rcu_dereference_protected(fobj->shared[i], | ||
99 | reservation_object_held(obj)); | ||
100 | |||
101 | if (old_fence->context == fence->context) { | ||
102 | /* memory barrier is added by write_seqcount_begin */ | ||
103 | RCU_INIT_POINTER(fobj->shared[i], fence); | ||
104 | write_seqcount_end(&obj->seq); | ||
105 | preempt_enable(); | ||
106 | |||
107 | fence_put(old_fence); | ||
108 | return; | ||
109 | } | ||
110 | } | ||
111 | |||
112 | /* | ||
113 | * memory barrier is added by write_seqcount_begin, | ||
114 | * fobj->shared_count is protected by this lock too | ||
115 | */ | ||
116 | RCU_INIT_POINTER(fobj->shared[fobj->shared_count], fence); | ||
117 | fobj->shared_count++; | ||
118 | |||
119 | write_seqcount_end(&obj->seq); | ||
120 | preempt_enable(); | ||
121 | } | ||
122 | |||
123 | static void | ||
124 | reservation_object_add_shared_replace(struct reservation_object *obj, | ||
125 | struct reservation_object_list *old, | ||
126 | struct reservation_object_list *fobj, | ||
127 | struct fence *fence) | ||
128 | { | ||
129 | unsigned i; | ||
130 | struct fence *old_fence = NULL; | ||
131 | |||
132 | fence_get(fence); | ||
133 | |||
134 | if (!old) { | ||
135 | RCU_INIT_POINTER(fobj->shared[0], fence); | ||
136 | fobj->shared_count = 1; | ||
137 | goto done; | ||
138 | } | ||
139 | |||
140 | /* | ||
141 | * no need to bump fence refcounts, rcu_read access | ||
142 | * requires the use of kref_get_unless_zero, and the | ||
143 | * references from the old struct are carried over to | ||
144 | * the new. | ||
145 | */ | ||
146 | fobj->shared_count = old->shared_count; | ||
147 | |||
148 | for (i = 0; i < old->shared_count; ++i) { | ||
149 | struct fence *check; | ||
150 | |||
151 | check = rcu_dereference_protected(old->shared[i], | ||
152 | reservation_object_held(obj)); | ||
153 | |||
154 | if (!old_fence && check->context == fence->context) { | ||
155 | old_fence = check; | ||
156 | RCU_INIT_POINTER(fobj->shared[i], fence); | ||
157 | } else | ||
158 | RCU_INIT_POINTER(fobj->shared[i], check); | ||
159 | } | ||
160 | if (!old_fence) { | ||
161 | RCU_INIT_POINTER(fobj->shared[fobj->shared_count], fence); | ||
162 | fobj->shared_count++; | ||
163 | } | ||
164 | |||
165 | done: | ||
166 | preempt_disable(); | ||
167 | write_seqcount_begin(&obj->seq); | ||
168 | /* | ||
169 | * RCU_INIT_POINTER can be used here, | ||
170 | * seqcount provides the necessary barriers | ||
171 | */ | ||
172 | RCU_INIT_POINTER(obj->fence, fobj); | ||
173 | write_seqcount_end(&obj->seq); | ||
174 | preempt_enable(); | ||
175 | |||
176 | if (old) | ||
177 | kfree_rcu(old, rcu); | ||
178 | |||
179 | if (old_fence) | ||
180 | fence_put(old_fence); | ||
181 | } | ||
182 | |||
183 | /* | ||
184 | * Add a fence to a shared slot, obj->lock must be held, and | ||
185 | * reservation_object_reserve_shared_fence has been called. | ||
186 | */ | ||
187 | void reservation_object_add_shared_fence(struct reservation_object *obj, | ||
188 | struct fence *fence) | ||
189 | { | ||
190 | struct reservation_object_list *old, *fobj = obj->staged; | ||
191 | |||
192 | old = reservation_object_get_list(obj); | ||
193 | obj->staged = NULL; | ||
194 | |||
195 | if (!fobj) { | ||
196 | BUG_ON(old->shared_count >= old->shared_max); | ||
197 | reservation_object_add_shared_inplace(obj, old, fence); | ||
198 | } else | ||
199 | reservation_object_add_shared_replace(obj, old, fobj, fence); | ||
200 | } | ||
201 | EXPORT_SYMBOL(reservation_object_add_shared_fence); | ||
202 | |||
203 | void reservation_object_add_excl_fence(struct reservation_object *obj, | ||
204 | struct fence *fence) | ||
205 | { | ||
206 | struct fence *old_fence = reservation_object_get_excl(obj); | ||
207 | struct reservation_object_list *old; | ||
208 | u32 i = 0; | ||
209 | |||
210 | old = reservation_object_get_list(obj); | ||
211 | if (old) | ||
212 | i = old->shared_count; | ||
213 | |||
214 | if (fence) | ||
215 | fence_get(fence); | ||
216 | |||
217 | preempt_disable(); | ||
218 | write_seqcount_begin(&obj->seq); | ||
219 | /* write_seqcount_begin provides the necessary memory barrier */ | ||
220 | RCU_INIT_POINTER(obj->fence_excl, fence); | ||
221 | if (old) | ||
222 | old->shared_count = 0; | ||
223 | write_seqcount_end(&obj->seq); | ||
224 | preempt_enable(); | ||
225 | |||
226 | /* inplace update, no shared fences */ | ||
227 | while (i--) | ||
228 | fence_put(rcu_dereference_protected(old->shared[i], | ||
229 | reservation_object_held(obj))); | ||
230 | |||
231 | if (old_fence) | ||
232 | fence_put(old_fence); | ||
233 | } | ||
234 | EXPORT_SYMBOL(reservation_object_add_excl_fence); | ||
235 | |||
236 | int reservation_object_get_fences_rcu(struct reservation_object *obj, | ||
237 | struct fence **pfence_excl, | ||
238 | unsigned *pshared_count, | ||
239 | struct fence ***pshared) | ||
240 | { | ||
241 | unsigned shared_count = 0; | ||
242 | unsigned retry = 1; | ||
243 | struct fence **shared = NULL, *fence_excl = NULL; | ||
244 | int ret = 0; | ||
245 | |||
246 | while (retry) { | ||
247 | struct reservation_object_list *fobj; | ||
248 | unsigned seq; | ||
249 | |||
250 | seq = read_seqcount_begin(&obj->seq); | ||
251 | |||
252 | rcu_read_lock(); | ||
253 | |||
254 | fobj = rcu_dereference(obj->fence); | ||
255 | if (fobj) { | ||
256 | struct fence **nshared; | ||
257 | size_t sz = sizeof(*shared) * fobj->shared_max; | ||
258 | |||
259 | nshared = krealloc(shared, sz, | ||
260 | GFP_NOWAIT | __GFP_NOWARN); | ||
261 | if (!nshared) { | ||
262 | rcu_read_unlock(); | ||
263 | nshared = krealloc(shared, sz, GFP_KERNEL); | ||
264 | if (nshared) { | ||
265 | shared = nshared; | ||
266 | continue; | ||
267 | } | ||
268 | |||
269 | ret = -ENOMEM; | ||
270 | shared_count = 0; | ||
271 | break; | ||
272 | } | ||
273 | shared = nshared; | ||
274 | memcpy(shared, fobj->shared, sz); | ||
275 | shared_count = fobj->shared_count; | ||
276 | } else | ||
277 | shared_count = 0; | ||
278 | fence_excl = rcu_dereference(obj->fence_excl); | ||
279 | |||
280 | retry = read_seqcount_retry(&obj->seq, seq); | ||
281 | if (retry) | ||
282 | goto unlock; | ||
283 | |||
284 | if (!fence_excl || fence_get_rcu(fence_excl)) { | ||
285 | unsigned i; | ||
286 | |||
287 | for (i = 0; i < shared_count; ++i) { | ||
288 | if (fence_get_rcu(shared[i])) | ||
289 | continue; | ||
290 | |||
291 | /* uh oh, refcount failed, abort and retry */ | ||
292 | while (i--) | ||
293 | fence_put(shared[i]); | ||
294 | |||
295 | if (fence_excl) { | ||
296 | fence_put(fence_excl); | ||
297 | fence_excl = NULL; | ||
298 | } | ||
299 | |||
300 | retry = 1; | ||
301 | break; | ||
302 | } | ||
303 | } else | ||
304 | retry = 1; | ||
305 | |||
306 | unlock: | ||
307 | rcu_read_unlock(); | ||
308 | } | ||
309 | *pshared_count = shared_count; | ||
310 | if (shared_count) | ||
311 | *pshared = shared; | ||
312 | else { | ||
313 | *pshared = NULL; | ||
314 | kfree(shared); | ||
315 | } | ||
316 | *pfence_excl = fence_excl; | ||
317 | |||
318 | return ret; | ||
319 | } | ||
320 | EXPORT_SYMBOL_GPL(reservation_object_get_fences_rcu); | ||
321 | |||
322 | long reservation_object_wait_timeout_rcu(struct reservation_object *obj, | ||
323 | bool wait_all, bool intr, | ||
324 | unsigned long timeout) | ||
325 | { | ||
326 | struct fence *fence; | ||
327 | unsigned seq, shared_count, i = 0; | ||
328 | long ret = timeout; | ||
329 | |||
330 | retry: | ||
331 | fence = NULL; | ||
332 | shared_count = 0; | ||
333 | seq = read_seqcount_begin(&obj->seq); | ||
334 | rcu_read_lock(); | ||
335 | |||
336 | if (wait_all) { | ||
337 | struct reservation_object_list *fobj = rcu_dereference(obj->fence); | ||
338 | |||
339 | if (fobj) | ||
340 | shared_count = fobj->shared_count; | ||
341 | |||
342 | if (read_seqcount_retry(&obj->seq, seq)) | ||
343 | goto unlock_retry; | ||
344 | |||
345 | for (i = 0; i < shared_count; ++i) { | ||
346 | struct fence *lfence = rcu_dereference(fobj->shared[i]); | ||
347 | |||
348 | if (test_bit(FENCE_FLAG_SIGNALED_BIT, &lfence->flags)) | ||
349 | continue; | ||
350 | |||
351 | if (!fence_get_rcu(lfence)) | ||
352 | goto unlock_retry; | ||
353 | |||
354 | if (fence_is_signaled(lfence)) { | ||
355 | fence_put(lfence); | ||
356 | continue; | ||
357 | } | ||
358 | |||
359 | fence = lfence; | ||
360 | break; | ||
361 | } | ||
362 | } | ||
363 | |||
364 | if (!shared_count) { | ||
365 | struct fence *fence_excl = rcu_dereference(obj->fence_excl); | ||
366 | |||
367 | if (read_seqcount_retry(&obj->seq, seq)) | ||
368 | goto unlock_retry; | ||
369 | |||
370 | if (fence_excl && | ||
371 | !test_bit(FENCE_FLAG_SIGNALED_BIT, &fence_excl->flags)) { | ||
372 | if (!fence_get_rcu(fence_excl)) | ||
373 | goto unlock_retry; | ||
374 | |||
375 | if (fence_is_signaled(fence_excl)) | ||
376 | fence_put(fence_excl); | ||
377 | else | ||
378 | fence = fence_excl; | ||
379 | } | ||
380 | } | ||
381 | |||
382 | rcu_read_unlock(); | ||
383 | if (fence) { | ||
384 | ret = fence_wait_timeout(fence, intr, ret); | ||
385 | fence_put(fence); | ||
386 | if (ret > 0 && wait_all && (i + 1 < shared_count)) | ||
387 | goto retry; | ||
388 | } | ||
389 | return ret; | ||
390 | |||
391 | unlock_retry: | ||
392 | rcu_read_unlock(); | ||
393 | goto retry; | ||
394 | } | ||
395 | EXPORT_SYMBOL_GPL(reservation_object_wait_timeout_rcu); | ||
396 | |||
397 | |||
398 | static inline int | ||
399 | reservation_object_test_signaled_single(struct fence *passed_fence) | ||
400 | { | ||
401 | struct fence *fence, *lfence = passed_fence; | ||
402 | int ret = 1; | ||
403 | |||
404 | if (!test_bit(FENCE_FLAG_SIGNALED_BIT, &lfence->flags)) { | ||
405 | int ret; | ||
406 | |||
407 | fence = fence_get_rcu(lfence); | ||
408 | if (!fence) | ||
409 | return -1; | ||
410 | |||
411 | ret = !!fence_is_signaled(fence); | ||
412 | fence_put(fence); | ||
413 | } | ||
414 | return ret; | ||
415 | } | ||
416 | |||
417 | bool reservation_object_test_signaled_rcu(struct reservation_object *obj, | ||
418 | bool test_all) | ||
419 | { | ||
420 | unsigned seq, shared_count; | ||
421 | int ret = true; | ||
422 | |||
423 | retry: | ||
424 | shared_count = 0; | ||
425 | seq = read_seqcount_begin(&obj->seq); | ||
426 | rcu_read_lock(); | ||
427 | |||
428 | if (test_all) { | ||
429 | unsigned i; | ||
430 | |||
431 | struct reservation_object_list *fobj = rcu_dereference(obj->fence); | ||
432 | |||
433 | if (fobj) | ||
434 | shared_count = fobj->shared_count; | ||
435 | |||
436 | if (read_seqcount_retry(&obj->seq, seq)) | ||
437 | goto unlock_retry; | ||
438 | |||
439 | for (i = 0; i < shared_count; ++i) { | ||
440 | struct fence *fence = rcu_dereference(fobj->shared[i]); | ||
441 | |||
442 | ret = reservation_object_test_signaled_single(fence); | ||
443 | if (ret < 0) | ||
444 | goto unlock_retry; | ||
445 | else if (!ret) | ||
446 | break; | ||
447 | } | ||
448 | |||
449 | /* | ||
450 | * There could be a read_seqcount_retry here, but nothing cares | ||
451 | * about whether it's the old or newer fence pointers that are | ||
452 | * signaled. That race could still have happened after checking | ||
453 | * read_seqcount_retry. If you care, use ww_mutex_lock. | ||
454 | */ | ||
455 | } | ||
456 | |||
457 | if (!shared_count) { | ||
458 | struct fence *fence_excl = rcu_dereference(obj->fence_excl); | ||
459 | |||
460 | if (read_seqcount_retry(&obj->seq, seq)) | ||
461 | goto unlock_retry; | ||
462 | |||
463 | if (fence_excl) { | ||
464 | ret = reservation_object_test_signaled_single(fence_excl); | ||
465 | if (ret < 0) | ||
466 | goto unlock_retry; | ||
467 | } | ||
468 | } | ||
469 | |||
470 | rcu_read_unlock(); | ||
471 | return ret; | ||
472 | |||
473 | unlock_retry: | ||
474 | rcu_read_unlock(); | ||
475 | goto retry; | ||
476 | } | ||
477 | EXPORT_SYMBOL_GPL(reservation_object_test_signaled_rcu); | ||
diff --git a/drivers/dma-buf/seqno-fence.c b/drivers/dma-buf/seqno-fence.c new file mode 100644 index 000000000000..7d12a39a4b57 --- /dev/null +++ b/drivers/dma-buf/seqno-fence.c | |||
@@ -0,0 +1,73 @@ | |||
1 | /* | ||
2 | * seqno-fence, using a dma-buf to synchronize fencing | ||
3 | * | ||
4 | * Copyright (C) 2012 Texas Instruments | ||
5 | * Copyright (C) 2012-2014 Canonical Ltd | ||
6 | * Authors: | ||
7 | * Rob Clark <robdclark@gmail.com> | ||
8 | * Maarten Lankhorst <maarten.lankhorst@canonical.com> | ||
9 | * | ||
10 | * This program is free software; you can redistribute it and/or modify it | ||
11 | * under the terms of the GNU General Public License version 2 as published by | ||
12 | * the Free Software Foundation. | ||
13 | * | ||
14 | * This program is distributed in the hope that it will be useful, but WITHOUT | ||
15 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
16 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
17 | * more details. | ||
18 | */ | ||
19 | |||
20 | #include <linux/slab.h> | ||
21 | #include <linux/export.h> | ||
22 | #include <linux/seqno-fence.h> | ||
23 | |||
24 | static const char *seqno_fence_get_driver_name(struct fence *fence) | ||
25 | { | ||
26 | struct seqno_fence *seqno_fence = to_seqno_fence(fence); | ||
27 | return seqno_fence->ops->get_driver_name(fence); | ||
28 | } | ||
29 | |||
30 | static const char *seqno_fence_get_timeline_name(struct fence *fence) | ||
31 | { | ||
32 | struct seqno_fence *seqno_fence = to_seqno_fence(fence); | ||
33 | return seqno_fence->ops->get_timeline_name(fence); | ||
34 | } | ||
35 | |||
36 | static bool seqno_enable_signaling(struct fence *fence) | ||
37 | { | ||
38 | struct seqno_fence *seqno_fence = to_seqno_fence(fence); | ||
39 | return seqno_fence->ops->enable_signaling(fence); | ||
40 | } | ||
41 | |||
42 | static bool seqno_signaled(struct fence *fence) | ||
43 | { | ||
44 | struct seqno_fence *seqno_fence = to_seqno_fence(fence); | ||
45 | return seqno_fence->ops->signaled && seqno_fence->ops->signaled(fence); | ||
46 | } | ||
47 | |||
48 | static void seqno_release(struct fence *fence) | ||
49 | { | ||
50 | struct seqno_fence *f = to_seqno_fence(fence); | ||
51 | |||
52 | dma_buf_put(f->sync_buf); | ||
53 | if (f->ops->release) | ||
54 | f->ops->release(fence); | ||
55 | else | ||
56 | fence_free(&f->base); | ||
57 | } | ||
58 | |||
59 | static signed long seqno_wait(struct fence *fence, bool intr, signed long timeout) | ||
60 | { | ||
61 | struct seqno_fence *f = to_seqno_fence(fence); | ||
62 | return f->ops->wait(fence, intr, timeout); | ||
63 | } | ||
64 | |||
65 | const struct fence_ops seqno_fence_ops = { | ||
66 | .get_driver_name = seqno_fence_get_driver_name, | ||
67 | .get_timeline_name = seqno_fence_get_timeline_name, | ||
68 | .enable_signaling = seqno_enable_signaling, | ||
69 | .signaled = seqno_signaled, | ||
70 | .wait = seqno_wait, | ||
71 | .release = seqno_release, | ||
72 | }; | ||
73 | EXPORT_SYMBOL(seqno_fence_ops); | ||
diff --git a/drivers/gpu/drm/armada/armada_crtc.c b/drivers/gpu/drm/armada/armada_crtc.c index 81c34f949dfc..3aedf9e993e6 100644 --- a/drivers/gpu/drm/armada/armada_crtc.c +++ b/drivers/gpu/drm/armada/armada_crtc.c | |||
@@ -1039,11 +1039,9 @@ int armada_drm_crtc_create(struct drm_device *dev, unsigned num, | |||
1039 | if (ret) | 1039 | if (ret) |
1040 | return ret; | 1040 | return ret; |
1041 | 1041 | ||
1042 | base = devm_request_and_ioremap(dev->dev, res); | 1042 | base = devm_ioremap_resource(dev->dev, res); |
1043 | if (!base) { | 1043 | if (IS_ERR(base)) |
1044 | DRM_ERROR("failed to ioremap register\n"); | 1044 | return PTR_ERR(base); |
1045 | return -ENOMEM; | ||
1046 | } | ||
1047 | 1045 | ||
1048 | dcrtc = kzalloc(sizeof(*dcrtc), GFP_KERNEL); | 1046 | dcrtc = kzalloc(sizeof(*dcrtc), GFP_KERNEL); |
1049 | if (!dcrtc) { | 1047 | if (!dcrtc) { |
diff --git a/drivers/gpu/drm/armada/armada_gem.c b/drivers/gpu/drm/armada/armada_gem.c index bb9b642d8485..7496f55611a5 100644 --- a/drivers/gpu/drm/armada/armada_gem.c +++ b/drivers/gpu/drm/armada/armada_gem.c | |||
@@ -539,7 +539,7 @@ armada_gem_prime_export(struct drm_device *dev, struct drm_gem_object *obj, | |||
539 | int flags) | 539 | int flags) |
540 | { | 540 | { |
541 | return dma_buf_export(obj, &armada_gem_prime_dmabuf_ops, obj->size, | 541 | return dma_buf_export(obj, &armada_gem_prime_dmabuf_ops, obj->size, |
542 | O_RDWR); | 542 | O_RDWR, NULL); |
543 | } | 543 | } |
544 | 544 | ||
545 | struct drm_gem_object * | 545 | struct drm_gem_object * |
diff --git a/drivers/gpu/drm/drm_prime.c b/drivers/gpu/drm/drm_prime.c index 304ca8cacbc4..99d578bad17e 100644 --- a/drivers/gpu/drm/drm_prime.c +++ b/drivers/gpu/drm/drm_prime.c | |||
@@ -336,7 +336,13 @@ static const struct dma_buf_ops drm_gem_prime_dmabuf_ops = { | |||
336 | struct dma_buf *drm_gem_prime_export(struct drm_device *dev, | 336 | struct dma_buf *drm_gem_prime_export(struct drm_device *dev, |
337 | struct drm_gem_object *obj, int flags) | 337 | struct drm_gem_object *obj, int flags) |
338 | { | 338 | { |
339 | return dma_buf_export(obj, &drm_gem_prime_dmabuf_ops, obj->size, flags); | 339 | struct reservation_object *robj = NULL; |
340 | |||
341 | if (dev->driver->gem_prime_res_obj) | ||
342 | robj = dev->driver->gem_prime_res_obj(obj); | ||
343 | |||
344 | return dma_buf_export(obj, &drm_gem_prime_dmabuf_ops, obj->size, | ||
345 | flags, robj); | ||
340 | } | 346 | } |
341 | EXPORT_SYMBOL(drm_gem_prime_export); | 347 | EXPORT_SYMBOL(drm_gem_prime_export); |
342 | 348 | ||
diff --git a/drivers/gpu/drm/exynos/exynos_drm_dmabuf.c b/drivers/gpu/drm/exynos/exynos_drm_dmabuf.c index 2a3ad24276f8..60192ed544f0 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_dmabuf.c +++ b/drivers/gpu/drm/exynos/exynos_drm_dmabuf.c | |||
@@ -187,7 +187,7 @@ struct dma_buf *exynos_dmabuf_prime_export(struct drm_device *drm_dev, | |||
187 | struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj); | 187 | struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj); |
188 | 188 | ||
189 | return dma_buf_export(obj, &exynos_dmabuf_ops, | 189 | return dma_buf_export(obj, &exynos_dmabuf_ops, |
190 | exynos_gem_obj->base.size, flags); | 190 | exynos_gem_obj->base.size, flags, NULL); |
191 | } | 191 | } |
192 | 192 | ||
193 | struct drm_gem_object *exynos_dmabuf_prime_import(struct drm_device *drm_dev, | 193 | struct drm_gem_object *exynos_dmabuf_prime_import(struct drm_device *drm_dev, |
diff --git a/drivers/gpu/drm/i915/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/i915_gem_dmabuf.c index 580aa42443ed..82a1f4b57778 100644 --- a/drivers/gpu/drm/i915/i915_gem_dmabuf.c +++ b/drivers/gpu/drm/i915/i915_gem_dmabuf.c | |||
@@ -237,7 +237,8 @@ struct dma_buf *i915_gem_prime_export(struct drm_device *dev, | |||
237 | return ERR_PTR(ret); | 237 | return ERR_PTR(ret); |
238 | } | 238 | } |
239 | 239 | ||
240 | return dma_buf_export(gem_obj, &i915_dmabuf_ops, gem_obj->size, flags); | 240 | return dma_buf_export(gem_obj, &i915_dmabuf_ops, gem_obj->size, flags, |
241 | NULL); | ||
241 | } | 242 | } |
242 | 243 | ||
243 | static int i915_gem_object_get_pages_dmabuf(struct drm_i915_gem_object *obj) | 244 | static int i915_gem_object_get_pages_dmabuf(struct drm_i915_gem_object *obj) |
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c index 5425ffe3931d..c9428c943afb 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drm.c +++ b/drivers/gpu/drm/nouveau/nouveau_drm.c | |||
@@ -845,6 +845,7 @@ driver = { | |||
845 | .gem_prime_export = drm_gem_prime_export, | 845 | .gem_prime_export = drm_gem_prime_export, |
846 | .gem_prime_import = drm_gem_prime_import, | 846 | .gem_prime_import = drm_gem_prime_import, |
847 | .gem_prime_pin = nouveau_gem_prime_pin, | 847 | .gem_prime_pin = nouveau_gem_prime_pin, |
848 | .gem_prime_res_obj = nouveau_gem_prime_res_obj, | ||
848 | .gem_prime_unpin = nouveau_gem_prime_unpin, | 849 | .gem_prime_unpin = nouveau_gem_prime_unpin, |
849 | .gem_prime_get_sg_table = nouveau_gem_prime_get_sg_table, | 850 | .gem_prime_get_sg_table = nouveau_gem_prime_get_sg_table, |
850 | .gem_prime_import_sg_table = nouveau_gem_prime_import_sg_table, | 851 | .gem_prime_import_sg_table = nouveau_gem_prime_import_sg_table, |
diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.h b/drivers/gpu/drm/nouveau/nouveau_gem.h index 7caca057bc38..ddab762d81fe 100644 --- a/drivers/gpu/drm/nouveau/nouveau_gem.h +++ b/drivers/gpu/drm/nouveau/nouveau_gem.h | |||
@@ -35,6 +35,7 @@ extern int nouveau_gem_ioctl_info(struct drm_device *, void *, | |||
35 | struct drm_file *); | 35 | struct drm_file *); |
36 | 36 | ||
37 | extern int nouveau_gem_prime_pin(struct drm_gem_object *); | 37 | extern int nouveau_gem_prime_pin(struct drm_gem_object *); |
38 | struct reservation_object *nouveau_gem_prime_res_obj(struct drm_gem_object *); | ||
38 | extern void nouveau_gem_prime_unpin(struct drm_gem_object *); | 39 | extern void nouveau_gem_prime_unpin(struct drm_gem_object *); |
39 | extern struct sg_table *nouveau_gem_prime_get_sg_table(struct drm_gem_object *); | 40 | extern struct sg_table *nouveau_gem_prime_get_sg_table(struct drm_gem_object *); |
40 | extern struct drm_gem_object *nouveau_gem_prime_import_sg_table( | 41 | extern struct drm_gem_object *nouveau_gem_prime_import_sg_table( |
diff --git a/drivers/gpu/drm/nouveau/nouveau_prime.c b/drivers/gpu/drm/nouveau/nouveau_prime.c index 51a2cb102b44..1f51008e4d26 100644 --- a/drivers/gpu/drm/nouveau/nouveau_prime.c +++ b/drivers/gpu/drm/nouveau/nouveau_prime.c | |||
@@ -102,3 +102,10 @@ void nouveau_gem_prime_unpin(struct drm_gem_object *obj) | |||
102 | 102 | ||
103 | nouveau_bo_unpin(nvbo); | 103 | nouveau_bo_unpin(nvbo); |
104 | } | 104 | } |
105 | |||
106 | struct reservation_object *nouveau_gem_prime_res_obj(struct drm_gem_object *obj) | ||
107 | { | ||
108 | struct nouveau_bo *nvbo = nouveau_gem_object(obj); | ||
109 | |||
110 | return nvbo->bo.resv; | ||
111 | } | ||
diff --git a/drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c b/drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c index 4fcca8d42796..a2dbfb1737b4 100644 --- a/drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c +++ b/drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c | |||
@@ -171,7 +171,7 @@ static struct dma_buf_ops omap_dmabuf_ops = { | |||
171 | struct dma_buf *omap_gem_prime_export(struct drm_device *dev, | 171 | struct dma_buf *omap_gem_prime_export(struct drm_device *dev, |
172 | struct drm_gem_object *obj, int flags) | 172 | struct drm_gem_object *obj, int flags) |
173 | { | 173 | { |
174 | return dma_buf_export(obj, &omap_dmabuf_ops, obj->size, flags); | 174 | return dma_buf_export(obj, &omap_dmabuf_ops, obj->size, flags, NULL); |
175 | } | 175 | } |
176 | 176 | ||
177 | struct drm_gem_object *omap_gem_prime_import(struct drm_device *dev, | 177 | struct drm_gem_object *omap_gem_prime_import(struct drm_device *dev, |
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c index e9e361084249..959f0866d993 100644 --- a/drivers/gpu/drm/radeon/radeon_drv.c +++ b/drivers/gpu/drm/radeon/radeon_drv.c | |||
@@ -132,6 +132,7 @@ struct drm_gem_object *radeon_gem_prime_import_sg_table(struct drm_device *dev, | |||
132 | struct sg_table *sg); | 132 | struct sg_table *sg); |
133 | int radeon_gem_prime_pin(struct drm_gem_object *obj); | 133 | int radeon_gem_prime_pin(struct drm_gem_object *obj); |
134 | void radeon_gem_prime_unpin(struct drm_gem_object *obj); | 134 | void radeon_gem_prime_unpin(struct drm_gem_object *obj); |
135 | struct reservation_object *radeon_gem_prime_res_obj(struct drm_gem_object *); | ||
135 | void *radeon_gem_prime_vmap(struct drm_gem_object *obj); | 136 | void *radeon_gem_prime_vmap(struct drm_gem_object *obj); |
136 | void radeon_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr); | 137 | void radeon_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr); |
137 | extern long radeon_kms_compat_ioctl(struct file *filp, unsigned int cmd, | 138 | extern long radeon_kms_compat_ioctl(struct file *filp, unsigned int cmd, |
@@ -566,6 +567,7 @@ static struct drm_driver kms_driver = { | |||
566 | .gem_prime_import = drm_gem_prime_import, | 567 | .gem_prime_import = drm_gem_prime_import, |
567 | .gem_prime_pin = radeon_gem_prime_pin, | 568 | .gem_prime_pin = radeon_gem_prime_pin, |
568 | .gem_prime_unpin = radeon_gem_prime_unpin, | 569 | .gem_prime_unpin = radeon_gem_prime_unpin, |
570 | .gem_prime_res_obj = radeon_gem_prime_res_obj, | ||
569 | .gem_prime_get_sg_table = radeon_gem_prime_get_sg_table, | 571 | .gem_prime_get_sg_table = radeon_gem_prime_get_sg_table, |
570 | .gem_prime_import_sg_table = radeon_gem_prime_import_sg_table, | 572 | .gem_prime_import_sg_table = radeon_gem_prime_import_sg_table, |
571 | .gem_prime_vmap = radeon_gem_prime_vmap, | 573 | .gem_prime_vmap = radeon_gem_prime_vmap, |
diff --git a/drivers/gpu/drm/radeon/radeon_prime.c b/drivers/gpu/drm/radeon/radeon_prime.c index 20074560fc25..28d71070c389 100644 --- a/drivers/gpu/drm/radeon/radeon_prime.c +++ b/drivers/gpu/drm/radeon/radeon_prime.c | |||
@@ -103,3 +103,11 @@ void radeon_gem_prime_unpin(struct drm_gem_object *obj) | |||
103 | radeon_bo_unpin(bo); | 103 | radeon_bo_unpin(bo); |
104 | radeon_bo_unreserve(bo); | 104 | radeon_bo_unreserve(bo); |
105 | } | 105 | } |
106 | |||
107 | |||
108 | struct reservation_object *radeon_gem_prime_res_obj(struct drm_gem_object *obj) | ||
109 | { | ||
110 | struct radeon_bo *bo = gem_to_radeon_bo(obj); | ||
111 | |||
112 | return bo->tbo.resv; | ||
113 | } | ||
diff --git a/drivers/gpu/drm/tegra/gem.c b/drivers/gpu/drm/tegra/gem.c index aa85b7b26f10..78cc8143760a 100644 --- a/drivers/gpu/drm/tegra/gem.c +++ b/drivers/gpu/drm/tegra/gem.c | |||
@@ -420,7 +420,7 @@ struct dma_buf *tegra_gem_prime_export(struct drm_device *drm, | |||
420 | int flags) | 420 | int flags) |
421 | { | 421 | { |
422 | return dma_buf_export(gem, &tegra_gem_prime_dmabuf_ops, gem->size, | 422 | return dma_buf_export(gem, &tegra_gem_prime_dmabuf_ops, gem->size, |
423 | flags); | 423 | flags, NULL); |
424 | } | 424 | } |
425 | 425 | ||
426 | struct drm_gem_object *tegra_gem_prime_import(struct drm_device *drm, | 426 | struct drm_gem_object *tegra_gem_prime_import(struct drm_device *drm, |
diff --git a/drivers/gpu/drm/ttm/ttm_object.c b/drivers/gpu/drm/ttm/ttm_object.c index d2a053352789..12c87110db3a 100644 --- a/drivers/gpu/drm/ttm/ttm_object.c +++ b/drivers/gpu/drm/ttm/ttm_object.c | |||
@@ -695,7 +695,7 @@ int ttm_prime_handle_to_fd(struct ttm_object_file *tfile, | |||
695 | } | 695 | } |
696 | 696 | ||
697 | dma_buf = dma_buf_export(prime, &tdev->ops, | 697 | dma_buf = dma_buf_export(prime, &tdev->ops, |
698 | prime->size, flags); | 698 | prime->size, flags, NULL); |
699 | if (IS_ERR(dma_buf)) { | 699 | if (IS_ERR(dma_buf)) { |
700 | ret = PTR_ERR(dma_buf); | 700 | ret = PTR_ERR(dma_buf); |
701 | ttm_mem_global_free(tdev->mem_glob, | 701 | ttm_mem_global_free(tdev->mem_glob, |
diff --git a/drivers/media/v4l2-core/videobuf2-dma-contig.c b/drivers/media/v4l2-core/videobuf2-dma-contig.c index 880be0782dd9..c4e4dfa8123a 100644 --- a/drivers/media/v4l2-core/videobuf2-dma-contig.c +++ b/drivers/media/v4l2-core/videobuf2-dma-contig.c | |||
@@ -404,7 +404,7 @@ static struct dma_buf *vb2_dc_get_dmabuf(void *buf_priv, unsigned long flags) | |||
404 | if (WARN_ON(!buf->sgt_base)) | 404 | if (WARN_ON(!buf->sgt_base)) |
405 | return NULL; | 405 | return NULL; |
406 | 406 | ||
407 | dbuf = dma_buf_export(buf, &vb2_dc_dmabuf_ops, buf->size, flags); | 407 | dbuf = dma_buf_export(buf, &vb2_dc_dmabuf_ops, buf->size, flags, NULL); |
408 | if (IS_ERR(dbuf)) | 408 | if (IS_ERR(dbuf)) |
409 | return NULL; | 409 | return NULL; |
410 | 410 | ||
diff --git a/drivers/staging/android/Kconfig b/drivers/staging/android/Kconfig index 99e484f845f2..51607e9aa049 100644 --- a/drivers/staging/android/Kconfig +++ b/drivers/staging/android/Kconfig | |||
@@ -88,6 +88,7 @@ config SYNC | |||
88 | bool "Synchronization framework" | 88 | bool "Synchronization framework" |
89 | default n | 89 | default n |
90 | select ANON_INODES | 90 | select ANON_INODES |
91 | select DMA_SHARED_BUFFER | ||
91 | ---help--- | 92 | ---help--- |
92 | This option enables the framework for synchronization between multiple | 93 | This option enables the framework for synchronization between multiple |
93 | drivers. Sync implementations can take advantage of hardware | 94 | drivers. Sync implementations can take advantage of hardware |
diff --git a/drivers/staging/android/Makefile b/drivers/staging/android/Makefile index 0a01e1914905..517ad5ffa429 100644 --- a/drivers/staging/android/Makefile +++ b/drivers/staging/android/Makefile | |||
@@ -9,5 +9,5 @@ obj-$(CONFIG_ANDROID_TIMED_OUTPUT) += timed_output.o | |||
9 | obj-$(CONFIG_ANDROID_TIMED_GPIO) += timed_gpio.o | 9 | obj-$(CONFIG_ANDROID_TIMED_GPIO) += timed_gpio.o |
10 | obj-$(CONFIG_ANDROID_LOW_MEMORY_KILLER) += lowmemorykiller.o | 10 | obj-$(CONFIG_ANDROID_LOW_MEMORY_KILLER) += lowmemorykiller.o |
11 | obj-$(CONFIG_ANDROID_INTF_ALARM_DEV) += alarm-dev.o | 11 | obj-$(CONFIG_ANDROID_INTF_ALARM_DEV) += alarm-dev.o |
12 | obj-$(CONFIG_SYNC) += sync.o | 12 | obj-$(CONFIG_SYNC) += sync.o sync_debug.o |
13 | obj-$(CONFIG_SW_SYNC) += sw_sync.o | 13 | obj-$(CONFIG_SW_SYNC) += sw_sync.o |
diff --git a/drivers/staging/android/ion/ion.c b/drivers/staging/android/ion/ion.c index 389b8f67a2ec..270360912b2c 100644 --- a/drivers/staging/android/ion/ion.c +++ b/drivers/staging/android/ion/ion.c | |||
@@ -1120,7 +1120,8 @@ struct dma_buf *ion_share_dma_buf(struct ion_client *client, | |||
1120 | ion_buffer_get(buffer); | 1120 | ion_buffer_get(buffer); |
1121 | mutex_unlock(&client->lock); | 1121 | mutex_unlock(&client->lock); |
1122 | 1122 | ||
1123 | dmabuf = dma_buf_export(buffer, &dma_buf_ops, buffer->size, O_RDWR); | 1123 | dmabuf = dma_buf_export(buffer, &dma_buf_ops, buffer->size, O_RDWR, |
1124 | NULL); | ||
1124 | if (IS_ERR(dmabuf)) { | 1125 | if (IS_ERR(dmabuf)) { |
1125 | ion_buffer_put(buffer); | 1126 | ion_buffer_put(buffer); |
1126 | return dmabuf; | 1127 | return dmabuf; |
diff --git a/drivers/staging/android/sw_sync.c b/drivers/staging/android/sw_sync.c index 12a136ec1cec..a76db3ff87cb 100644 --- a/drivers/staging/android/sw_sync.c +++ b/drivers/staging/android/sw_sync.c | |||
@@ -50,7 +50,7 @@ static struct sync_pt *sw_sync_pt_dup(struct sync_pt *sync_pt) | |||
50 | { | 50 | { |
51 | struct sw_sync_pt *pt = (struct sw_sync_pt *) sync_pt; | 51 | struct sw_sync_pt *pt = (struct sw_sync_pt *) sync_pt; |
52 | struct sw_sync_timeline *obj = | 52 | struct sw_sync_timeline *obj = |
53 | (struct sw_sync_timeline *)sync_pt->parent; | 53 | (struct sw_sync_timeline *)sync_pt_parent(sync_pt); |
54 | 54 | ||
55 | return (struct sync_pt *) sw_sync_pt_create(obj, pt->value); | 55 | return (struct sync_pt *) sw_sync_pt_create(obj, pt->value); |
56 | } | 56 | } |
@@ -59,7 +59,7 @@ static int sw_sync_pt_has_signaled(struct sync_pt *sync_pt) | |||
59 | { | 59 | { |
60 | struct sw_sync_pt *pt = (struct sw_sync_pt *)sync_pt; | 60 | struct sw_sync_pt *pt = (struct sw_sync_pt *)sync_pt; |
61 | struct sw_sync_timeline *obj = | 61 | struct sw_sync_timeline *obj = |
62 | (struct sw_sync_timeline *)sync_pt->parent; | 62 | (struct sw_sync_timeline *)sync_pt_parent(sync_pt); |
63 | 63 | ||
64 | return sw_sync_cmp(obj->value, pt->value) >= 0; | 64 | return sw_sync_cmp(obj->value, pt->value) >= 0; |
65 | } | 65 | } |
@@ -97,7 +97,6 @@ static void sw_sync_pt_value_str(struct sync_pt *sync_pt, | |||
97 | char *str, int size) | 97 | char *str, int size) |
98 | { | 98 | { |
99 | struct sw_sync_pt *pt = (struct sw_sync_pt *)sync_pt; | 99 | struct sw_sync_pt *pt = (struct sw_sync_pt *)sync_pt; |
100 | |||
101 | snprintf(str, size, "%d", pt->value); | 100 | snprintf(str, size, "%d", pt->value); |
102 | } | 101 | } |
103 | 102 | ||
@@ -157,7 +156,6 @@ static int sw_sync_open(struct inode *inode, struct file *file) | |||
157 | static int sw_sync_release(struct inode *inode, struct file *file) | 156 | static int sw_sync_release(struct inode *inode, struct file *file) |
158 | { | 157 | { |
159 | struct sw_sync_timeline *obj = file->private_data; | 158 | struct sw_sync_timeline *obj = file->private_data; |
160 | |||
161 | sync_timeline_destroy(&obj->obj); | 159 | sync_timeline_destroy(&obj->obj); |
162 | return 0; | 160 | return 0; |
163 | } | 161 | } |
diff --git a/drivers/staging/android/sync.c b/drivers/staging/android/sync.c index 18174f7c871c..c9a0c2cdc81a 100644 --- a/drivers/staging/android/sync.c +++ b/drivers/staging/android/sync.c | |||
@@ -31,22 +31,13 @@ | |||
31 | #define CREATE_TRACE_POINTS | 31 | #define CREATE_TRACE_POINTS |
32 | #include "trace/sync.h" | 32 | #include "trace/sync.h" |
33 | 33 | ||
34 | static void sync_fence_signal_pt(struct sync_pt *pt); | 34 | static const struct fence_ops android_fence_ops; |
35 | static int _sync_pt_has_signaled(struct sync_pt *pt); | 35 | static const struct file_operations sync_fence_fops; |
36 | static void sync_fence_free(struct kref *kref); | ||
37 | static void sync_dump(void); | ||
38 | |||
39 | static LIST_HEAD(sync_timeline_list_head); | ||
40 | static DEFINE_SPINLOCK(sync_timeline_list_lock); | ||
41 | |||
42 | static LIST_HEAD(sync_fence_list_head); | ||
43 | static DEFINE_SPINLOCK(sync_fence_list_lock); | ||
44 | 36 | ||
45 | struct sync_timeline *sync_timeline_create(const struct sync_timeline_ops *ops, | 37 | struct sync_timeline *sync_timeline_create(const struct sync_timeline_ops *ops, |
46 | int size, const char *name) | 38 | int size, const char *name) |
47 | { | 39 | { |
48 | struct sync_timeline *obj; | 40 | struct sync_timeline *obj; |
49 | unsigned long flags; | ||
50 | 41 | ||
51 | if (size < sizeof(struct sync_timeline)) | 42 | if (size < sizeof(struct sync_timeline)) |
52 | return NULL; | 43 | return NULL; |
@@ -57,17 +48,14 @@ struct sync_timeline *sync_timeline_create(const struct sync_timeline_ops *ops, | |||
57 | 48 | ||
58 | kref_init(&obj->kref); | 49 | kref_init(&obj->kref); |
59 | obj->ops = ops; | 50 | obj->ops = ops; |
51 | obj->context = fence_context_alloc(1); | ||
60 | strlcpy(obj->name, name, sizeof(obj->name)); | 52 | strlcpy(obj->name, name, sizeof(obj->name)); |
61 | 53 | ||
62 | INIT_LIST_HEAD(&obj->child_list_head); | 54 | INIT_LIST_HEAD(&obj->child_list_head); |
63 | spin_lock_init(&obj->child_list_lock); | ||
64 | |||
65 | INIT_LIST_HEAD(&obj->active_list_head); | 55 | INIT_LIST_HEAD(&obj->active_list_head); |
66 | spin_lock_init(&obj->active_list_lock); | 56 | spin_lock_init(&obj->child_list_lock); |
67 | 57 | ||
68 | spin_lock_irqsave(&sync_timeline_list_lock, flags); | 58 | sync_timeline_debug_add(obj); |
69 | list_add_tail(&obj->sync_timeline_list, &sync_timeline_list_head); | ||
70 | spin_unlock_irqrestore(&sync_timeline_list_lock, flags); | ||
71 | 59 | ||
72 | return obj; | 60 | return obj; |
73 | } | 61 | } |
@@ -77,11 +65,8 @@ static void sync_timeline_free(struct kref *kref) | |||
77 | { | 65 | { |
78 | struct sync_timeline *obj = | 66 | struct sync_timeline *obj = |
79 | container_of(kref, struct sync_timeline, kref); | 67 | container_of(kref, struct sync_timeline, kref); |
80 | unsigned long flags; | ||
81 | 68 | ||
82 | spin_lock_irqsave(&sync_timeline_list_lock, flags); | 69 | sync_timeline_debug_remove(obj); |
83 | list_del(&obj->sync_timeline_list); | ||
84 | spin_unlock_irqrestore(&sync_timeline_list_lock, flags); | ||
85 | 70 | ||
86 | if (obj->ops->release_obj) | 71 | if (obj->ops->release_obj) |
87 | obj->ops->release_obj(obj); | 72 | obj->ops->release_obj(obj); |
@@ -89,6 +74,16 @@ static void sync_timeline_free(struct kref *kref) | |||
89 | kfree(obj); | 74 | kfree(obj); |
90 | } | 75 | } |
91 | 76 | ||
77 | static void sync_timeline_get(struct sync_timeline *obj) | ||
78 | { | ||
79 | kref_get(&obj->kref); | ||
80 | } | ||
81 | |||
82 | static void sync_timeline_put(struct sync_timeline *obj) | ||
83 | { | ||
84 | kref_put(&obj->kref, sync_timeline_free); | ||
85 | } | ||
86 | |||
92 | void sync_timeline_destroy(struct sync_timeline *obj) | 87 | void sync_timeline_destroy(struct sync_timeline *obj) |
93 | { | 88 | { |
94 | obj->destroyed = true; | 89 | obj->destroyed = true; |
@@ -102,75 +97,33 @@ void sync_timeline_destroy(struct sync_timeline *obj) | |||
102 | * signal any children that their parent is going away. | 97 | * signal any children that their parent is going away. |
103 | */ | 98 | */ |
104 | sync_timeline_signal(obj); | 99 | sync_timeline_signal(obj); |
105 | 100 | sync_timeline_put(obj); | |
106 | kref_put(&obj->kref, sync_timeline_free); | ||
107 | } | 101 | } |
108 | EXPORT_SYMBOL(sync_timeline_destroy); | 102 | EXPORT_SYMBOL(sync_timeline_destroy); |
109 | 103 | ||
110 | static void sync_timeline_add_pt(struct sync_timeline *obj, struct sync_pt *pt) | ||
111 | { | ||
112 | unsigned long flags; | ||
113 | |||
114 | pt->parent = obj; | ||
115 | |||
116 | spin_lock_irqsave(&obj->child_list_lock, flags); | ||
117 | list_add_tail(&pt->child_list, &obj->child_list_head); | ||
118 | spin_unlock_irqrestore(&obj->child_list_lock, flags); | ||
119 | } | ||
120 | |||
121 | static void sync_timeline_remove_pt(struct sync_pt *pt) | ||
122 | { | ||
123 | struct sync_timeline *obj = pt->parent; | ||
124 | unsigned long flags; | ||
125 | |||
126 | spin_lock_irqsave(&obj->active_list_lock, flags); | ||
127 | if (!list_empty(&pt->active_list)) | ||
128 | list_del_init(&pt->active_list); | ||
129 | spin_unlock_irqrestore(&obj->active_list_lock, flags); | ||
130 | |||
131 | spin_lock_irqsave(&obj->child_list_lock, flags); | ||
132 | if (!list_empty(&pt->child_list)) | ||
133 | list_del_init(&pt->child_list); | ||
134 | |||
135 | spin_unlock_irqrestore(&obj->child_list_lock, flags); | ||
136 | } | ||
137 | |||
138 | void sync_timeline_signal(struct sync_timeline *obj) | 104 | void sync_timeline_signal(struct sync_timeline *obj) |
139 | { | 105 | { |
140 | unsigned long flags; | 106 | unsigned long flags; |
141 | LIST_HEAD(signaled_pts); | 107 | LIST_HEAD(signaled_pts); |
142 | struct list_head *pos, *n; | 108 | struct sync_pt *pt, *next; |
143 | 109 | ||
144 | trace_sync_timeline(obj); | 110 | trace_sync_timeline(obj); |
145 | 111 | ||
146 | spin_lock_irqsave(&obj->active_list_lock, flags); | 112 | spin_lock_irqsave(&obj->child_list_lock, flags); |
147 | |||
148 | list_for_each_safe(pos, n, &obj->active_list_head) { | ||
149 | struct sync_pt *pt = | ||
150 | container_of(pos, struct sync_pt, active_list); | ||
151 | 113 | ||
152 | if (_sync_pt_has_signaled(pt)) { | 114 | list_for_each_entry_safe(pt, next, &obj->active_list_head, |
153 | list_del_init(pos); | 115 | active_list) { |
154 | list_add(&pt->signaled_list, &signaled_pts); | 116 | if (fence_is_signaled_locked(&pt->base)) |
155 | kref_get(&pt->fence->kref); | 117 | list_del(&pt->active_list); |
156 | } | ||
157 | } | 118 | } |
158 | 119 | ||
159 | spin_unlock_irqrestore(&obj->active_list_lock, flags); | 120 | spin_unlock_irqrestore(&obj->child_list_lock, flags); |
160 | |||
161 | list_for_each_safe(pos, n, &signaled_pts) { | ||
162 | struct sync_pt *pt = | ||
163 | container_of(pos, struct sync_pt, signaled_list); | ||
164 | |||
165 | list_del_init(pos); | ||
166 | sync_fence_signal_pt(pt); | ||
167 | kref_put(&pt->fence->kref, sync_fence_free); | ||
168 | } | ||
169 | } | 121 | } |
170 | EXPORT_SYMBOL(sync_timeline_signal); | 122 | EXPORT_SYMBOL(sync_timeline_signal); |
171 | 123 | ||
172 | struct sync_pt *sync_pt_create(struct sync_timeline *parent, int size) | 124 | struct sync_pt *sync_pt_create(struct sync_timeline *obj, int size) |
173 | { | 125 | { |
126 | unsigned long flags; | ||
174 | struct sync_pt *pt; | 127 | struct sync_pt *pt; |
175 | 128 | ||
176 | if (size < sizeof(struct sync_pt)) | 129 | if (size < sizeof(struct sync_pt)) |
@@ -180,87 +133,28 @@ struct sync_pt *sync_pt_create(struct sync_timeline *parent, int size) | |||
180 | if (pt == NULL) | 133 | if (pt == NULL) |
181 | return NULL; | 134 | return NULL; |
182 | 135 | ||
136 | spin_lock_irqsave(&obj->child_list_lock, flags); | ||
137 | sync_timeline_get(obj); | ||
138 | fence_init(&pt->base, &android_fence_ops, &obj->child_list_lock, | ||
139 | obj->context, ++obj->value); | ||
140 | list_add_tail(&pt->child_list, &obj->child_list_head); | ||
183 | INIT_LIST_HEAD(&pt->active_list); | 141 | INIT_LIST_HEAD(&pt->active_list); |
184 | kref_get(&parent->kref); | 142 | spin_unlock_irqrestore(&obj->child_list_lock, flags); |
185 | sync_timeline_add_pt(parent, pt); | ||
186 | |||
187 | return pt; | 143 | return pt; |
188 | } | 144 | } |
189 | EXPORT_SYMBOL(sync_pt_create); | 145 | EXPORT_SYMBOL(sync_pt_create); |
190 | 146 | ||
191 | void sync_pt_free(struct sync_pt *pt) | 147 | void sync_pt_free(struct sync_pt *pt) |
192 | { | 148 | { |
193 | if (pt->parent->ops->free_pt) | 149 | fence_put(&pt->base); |
194 | pt->parent->ops->free_pt(pt); | ||
195 | |||
196 | sync_timeline_remove_pt(pt); | ||
197 | |||
198 | kref_put(&pt->parent->kref, sync_timeline_free); | ||
199 | |||
200 | kfree(pt); | ||
201 | } | 150 | } |
202 | EXPORT_SYMBOL(sync_pt_free); | 151 | EXPORT_SYMBOL(sync_pt_free); |
203 | 152 | ||
204 | /* call with pt->parent->active_list_lock held */ | 153 | static struct sync_fence *sync_fence_alloc(int size, const char *name) |
205 | static int _sync_pt_has_signaled(struct sync_pt *pt) | ||
206 | { | ||
207 | int old_status = pt->status; | ||
208 | |||
209 | if (!pt->status) | ||
210 | pt->status = pt->parent->ops->has_signaled(pt); | ||
211 | |||
212 | if (!pt->status && pt->parent->destroyed) | ||
213 | pt->status = -ENOENT; | ||
214 | |||
215 | if (pt->status != old_status) | ||
216 | pt->timestamp = ktime_get(); | ||
217 | |||
218 | return pt->status; | ||
219 | } | ||
220 | |||
221 | static struct sync_pt *sync_pt_dup(struct sync_pt *pt) | ||
222 | { | ||
223 | return pt->parent->ops->dup(pt); | ||
224 | } | ||
225 | |||
226 | /* Adds a sync pt to the active queue. Called when added to a fence */ | ||
227 | static void sync_pt_activate(struct sync_pt *pt) | ||
228 | { | ||
229 | struct sync_timeline *obj = pt->parent; | ||
230 | unsigned long flags; | ||
231 | int err; | ||
232 | |||
233 | spin_lock_irqsave(&obj->active_list_lock, flags); | ||
234 | |||
235 | err = _sync_pt_has_signaled(pt); | ||
236 | if (err != 0) | ||
237 | goto out; | ||
238 | |||
239 | list_add_tail(&pt->active_list, &obj->active_list_head); | ||
240 | |||
241 | out: | ||
242 | spin_unlock_irqrestore(&obj->active_list_lock, flags); | ||
243 | } | ||
244 | |||
245 | static int sync_fence_release(struct inode *inode, struct file *file); | ||
246 | static unsigned int sync_fence_poll(struct file *file, poll_table *wait); | ||
247 | static long sync_fence_ioctl(struct file *file, unsigned int cmd, | ||
248 | unsigned long arg); | ||
249 | |||
250 | |||
251 | static const struct file_operations sync_fence_fops = { | ||
252 | .release = sync_fence_release, | ||
253 | .poll = sync_fence_poll, | ||
254 | .unlocked_ioctl = sync_fence_ioctl, | ||
255 | .compat_ioctl = sync_fence_ioctl, | ||
256 | }; | ||
257 | |||
258 | static struct sync_fence *sync_fence_alloc(const char *name) | ||
259 | { | 154 | { |
260 | struct sync_fence *fence; | 155 | struct sync_fence *fence; |
261 | unsigned long flags; | ||
262 | 156 | ||
263 | fence = kzalloc(sizeof(struct sync_fence), GFP_KERNEL); | 157 | fence = kzalloc(size, GFP_KERNEL); |
264 | if (fence == NULL) | 158 | if (fence == NULL) |
265 | return NULL; | 159 | return NULL; |
266 | 160 | ||
@@ -272,16 +166,8 @@ static struct sync_fence *sync_fence_alloc(const char *name) | |||
272 | kref_init(&fence->kref); | 166 | kref_init(&fence->kref); |
273 | strlcpy(fence->name, name, sizeof(fence->name)); | 167 | strlcpy(fence->name, name, sizeof(fence->name)); |
274 | 168 | ||
275 | INIT_LIST_HEAD(&fence->pt_list_head); | ||
276 | INIT_LIST_HEAD(&fence->waiter_list_head); | ||
277 | spin_lock_init(&fence->waiter_list_lock); | ||
278 | |||
279 | init_waitqueue_head(&fence->wq); | 169 | init_waitqueue_head(&fence->wq); |
280 | 170 | ||
281 | spin_lock_irqsave(&sync_fence_list_lock, flags); | ||
282 | list_add_tail(&fence->sync_fence_list, &sync_fence_list_head); | ||
283 | spin_unlock_irqrestore(&sync_fence_list_lock, flags); | ||
284 | |||
285 | return fence; | 171 | return fence; |
286 | 172 | ||
287 | err: | 173 | err: |
@@ -289,120 +175,42 @@ err: | |||
289 | return NULL; | 175 | return NULL; |
290 | } | 176 | } |
291 | 177 | ||
292 | /* TODO: implement a create which takes more that one sync_pt */ | 178 | static void fence_check_cb_func(struct fence *f, struct fence_cb *cb) |
293 | struct sync_fence *sync_fence_create(const char *name, struct sync_pt *pt) | ||
294 | { | 179 | { |
180 | struct sync_fence_cb *check; | ||
295 | struct sync_fence *fence; | 181 | struct sync_fence *fence; |
296 | 182 | ||
297 | if (pt->fence) | 183 | check = container_of(cb, struct sync_fence_cb, cb); |
298 | return NULL; | 184 | fence = check->fence; |
299 | |||
300 | fence = sync_fence_alloc(name); | ||
301 | if (fence == NULL) | ||
302 | return NULL; | ||
303 | 185 | ||
304 | pt->fence = fence; | 186 | if (atomic_dec_and_test(&fence->status)) |
305 | list_add(&pt->pt_list, &fence->pt_list_head); | 187 | wake_up_all(&fence->wq); |
306 | sync_pt_activate(pt); | ||
307 | |||
308 | /* | ||
309 | * signal the fence in case pt was activated before | ||
310 | * sync_pt_activate(pt) was called | ||
311 | */ | ||
312 | sync_fence_signal_pt(pt); | ||
313 | |||
314 | return fence; | ||
315 | } | ||
316 | EXPORT_SYMBOL(sync_fence_create); | ||
317 | |||
318 | static int sync_fence_copy_pts(struct sync_fence *dst, struct sync_fence *src) | ||
319 | { | ||
320 | struct list_head *pos; | ||
321 | |||
322 | list_for_each(pos, &src->pt_list_head) { | ||
323 | struct sync_pt *orig_pt = | ||
324 | container_of(pos, struct sync_pt, pt_list); | ||
325 | struct sync_pt *new_pt = sync_pt_dup(orig_pt); | ||
326 | |||
327 | if (new_pt == NULL) | ||
328 | return -ENOMEM; | ||
329 | |||
330 | new_pt->fence = dst; | ||
331 | list_add(&new_pt->pt_list, &dst->pt_list_head); | ||
332 | } | ||
333 | |||
334 | return 0; | ||
335 | } | 188 | } |
336 | 189 | ||
337 | static int sync_fence_merge_pts(struct sync_fence *dst, struct sync_fence *src) | 190 | /* TODO: implement a create which takes more that one sync_pt */ |
338 | { | 191 | struct sync_fence *sync_fence_create(const char *name, struct sync_pt *pt) |
339 | struct list_head *src_pos, *dst_pos, *n; | ||
340 | |||
341 | list_for_each(src_pos, &src->pt_list_head) { | ||
342 | struct sync_pt *src_pt = | ||
343 | container_of(src_pos, struct sync_pt, pt_list); | ||
344 | bool collapsed = false; | ||
345 | |||
346 | list_for_each_safe(dst_pos, n, &dst->pt_list_head) { | ||
347 | struct sync_pt *dst_pt = | ||
348 | container_of(dst_pos, struct sync_pt, pt_list); | ||
349 | /* collapse two sync_pts on the same timeline | ||
350 | * to a single sync_pt that will signal at | ||
351 | * the later of the two | ||
352 | */ | ||
353 | if (dst_pt->parent == src_pt->parent) { | ||
354 | if (dst_pt->parent->ops->compare(dst_pt, src_pt) | ||
355 | == -1) { | ||
356 | struct sync_pt *new_pt = | ||
357 | sync_pt_dup(src_pt); | ||
358 | if (new_pt == NULL) | ||
359 | return -ENOMEM; | ||
360 | |||
361 | new_pt->fence = dst; | ||
362 | list_replace(&dst_pt->pt_list, | ||
363 | &new_pt->pt_list); | ||
364 | sync_pt_free(dst_pt); | ||
365 | } | ||
366 | collapsed = true; | ||
367 | break; | ||
368 | } | ||
369 | } | ||
370 | |||
371 | if (!collapsed) { | ||
372 | struct sync_pt *new_pt = sync_pt_dup(src_pt); | ||
373 | |||
374 | if (new_pt == NULL) | ||
375 | return -ENOMEM; | ||
376 | |||
377 | new_pt->fence = dst; | ||
378 | list_add(&new_pt->pt_list, &dst->pt_list_head); | ||
379 | } | ||
380 | } | ||
381 | |||
382 | return 0; | ||
383 | } | ||
384 | |||
385 | static void sync_fence_detach_pts(struct sync_fence *fence) | ||
386 | { | 192 | { |
387 | struct list_head *pos, *n; | 193 | struct sync_fence *fence; |
388 | 194 | ||
389 | list_for_each_safe(pos, n, &fence->pt_list_head) { | 195 | fence = sync_fence_alloc(offsetof(struct sync_fence, cbs[1]), name); |
390 | struct sync_pt *pt = container_of(pos, struct sync_pt, pt_list); | 196 | if (fence == NULL) |
197 | return NULL; | ||
391 | 198 | ||
392 | sync_timeline_remove_pt(pt); | 199 | fence->num_fences = 1; |
393 | } | 200 | atomic_set(&fence->status, 1); |
394 | } | ||
395 | 201 | ||
396 | static void sync_fence_free_pts(struct sync_fence *fence) | 202 | fence_get(&pt->base); |
397 | { | 203 | fence->cbs[0].sync_pt = &pt->base; |
398 | struct list_head *pos, *n; | 204 | fence->cbs[0].fence = fence; |
205 | if (fence_add_callback(&pt->base, &fence->cbs[0].cb, | ||
206 | fence_check_cb_func)) | ||
207 | atomic_dec(&fence->status); | ||
399 | 208 | ||
400 | list_for_each_safe(pos, n, &fence->pt_list_head) { | 209 | sync_fence_debug_add(fence); |
401 | struct sync_pt *pt = container_of(pos, struct sync_pt, pt_list); | ||
402 | 210 | ||
403 | sync_pt_free(pt); | 211 | return fence; |
404 | } | ||
405 | } | 212 | } |
213 | EXPORT_SYMBOL(sync_fence_create); | ||
406 | 214 | ||
407 | struct sync_fence *sync_fence_fdget(int fd) | 215 | struct sync_fence *sync_fence_fdget(int fd) |
408 | { | 216 | { |
@@ -434,197 +242,155 @@ void sync_fence_install(struct sync_fence *fence, int fd) | |||
434 | } | 242 | } |
435 | EXPORT_SYMBOL(sync_fence_install); | 243 | EXPORT_SYMBOL(sync_fence_install); |
436 | 244 | ||
437 | static int sync_fence_get_status(struct sync_fence *fence) | 245 | static void sync_fence_add_pt(struct sync_fence *fence, |
246 | int *i, struct fence *pt) | ||
438 | { | 247 | { |
439 | struct list_head *pos; | 248 | fence->cbs[*i].sync_pt = pt; |
440 | int status = 1; | 249 | fence->cbs[*i].fence = fence; |
441 | |||
442 | list_for_each(pos, &fence->pt_list_head) { | ||
443 | struct sync_pt *pt = container_of(pos, struct sync_pt, pt_list); | ||
444 | int pt_status = pt->status; | ||
445 | |||
446 | if (pt_status < 0) { | ||
447 | status = pt_status; | ||
448 | break; | ||
449 | } else if (status == 1) { | ||
450 | status = pt_status; | ||
451 | } | ||
452 | } | ||
453 | 250 | ||
454 | return status; | 251 | if (!fence_add_callback(pt, &fence->cbs[*i].cb, fence_check_cb_func)) { |
252 | fence_get(pt); | ||
253 | (*i)++; | ||
254 | } | ||
455 | } | 255 | } |
456 | 256 | ||
457 | struct sync_fence *sync_fence_merge(const char *name, | 257 | struct sync_fence *sync_fence_merge(const char *name, |
458 | struct sync_fence *a, struct sync_fence *b) | 258 | struct sync_fence *a, struct sync_fence *b) |
459 | { | 259 | { |
260 | int num_fences = a->num_fences + b->num_fences; | ||
460 | struct sync_fence *fence; | 261 | struct sync_fence *fence; |
461 | struct list_head *pos; | 262 | int i, i_a, i_b; |
462 | int err; | 263 | unsigned long size = offsetof(struct sync_fence, cbs[num_fences]); |
463 | 264 | ||
464 | fence = sync_fence_alloc(name); | 265 | fence = sync_fence_alloc(size, name); |
465 | if (fence == NULL) | 266 | if (fence == NULL) |
466 | return NULL; | 267 | return NULL; |
467 | 268 | ||
468 | err = sync_fence_copy_pts(fence, a); | 269 | atomic_set(&fence->status, num_fences); |
469 | if (err < 0) | ||
470 | goto err; | ||
471 | 270 | ||
472 | err = sync_fence_merge_pts(fence, b); | 271 | /* |
473 | if (err < 0) | 272 | * Assume sync_fence a and b are both ordered and have no |
474 | goto err; | 273 | * duplicates with the same context. |
274 | * | ||
275 | * If a sync_fence can only be created with sync_fence_merge | ||
276 | * and sync_fence_create, this is a reasonable assumption. | ||
277 | */ | ||
278 | for (i = i_a = i_b = 0; i_a < a->num_fences && i_b < b->num_fences; ) { | ||
279 | struct fence *pt_a = a->cbs[i_a].sync_pt; | ||
280 | struct fence *pt_b = b->cbs[i_b].sync_pt; | ||
281 | |||
282 | if (pt_a->context < pt_b->context) { | ||
283 | sync_fence_add_pt(fence, &i, pt_a); | ||
284 | |||
285 | i_a++; | ||
286 | } else if (pt_a->context > pt_b->context) { | ||
287 | sync_fence_add_pt(fence, &i, pt_b); | ||
475 | 288 | ||
476 | list_for_each(pos, &fence->pt_list_head) { | 289 | i_b++; |
477 | struct sync_pt *pt = | 290 | } else { |
478 | container_of(pos, struct sync_pt, pt_list); | 291 | if (pt_a->seqno - pt_b->seqno <= INT_MAX) |
479 | sync_pt_activate(pt); | 292 | sync_fence_add_pt(fence, &i, pt_a); |
293 | else | ||
294 | sync_fence_add_pt(fence, &i, pt_b); | ||
295 | |||
296 | i_a++; | ||
297 | i_b++; | ||
298 | } | ||
480 | } | 299 | } |
481 | 300 | ||
482 | /* | 301 | for (; i_a < a->num_fences; i_a++) |
483 | * signal the fence in case one of it's pts were activated before | 302 | sync_fence_add_pt(fence, &i, a->cbs[i_a].sync_pt); |
484 | * they were activated | ||
485 | */ | ||
486 | sync_fence_signal_pt(list_first_entry(&fence->pt_list_head, | ||
487 | struct sync_pt, | ||
488 | pt_list)); | ||
489 | 303 | ||
304 | for (; i_b < b->num_fences; i_b++) | ||
305 | sync_fence_add_pt(fence, &i, b->cbs[i_b].sync_pt); | ||
306 | |||
307 | if (num_fences > i) | ||
308 | atomic_sub(num_fences - i, &fence->status); | ||
309 | fence->num_fences = i; | ||
310 | |||
311 | sync_fence_debug_add(fence); | ||
490 | return fence; | 312 | return fence; |
491 | err: | ||
492 | sync_fence_free_pts(fence); | ||
493 | kfree(fence); | ||
494 | return NULL; | ||
495 | } | 313 | } |
496 | EXPORT_SYMBOL(sync_fence_merge); | 314 | EXPORT_SYMBOL(sync_fence_merge); |
497 | 315 | ||
498 | static void sync_fence_signal_pt(struct sync_pt *pt) | 316 | int sync_fence_wake_up_wq(wait_queue_t *curr, unsigned mode, |
317 | int wake_flags, void *key) | ||
499 | { | 318 | { |
500 | LIST_HEAD(signaled_waiters); | 319 | struct sync_fence_waiter *wait; |
501 | struct sync_fence *fence = pt->fence; | ||
502 | struct list_head *pos; | ||
503 | struct list_head *n; | ||
504 | unsigned long flags; | ||
505 | int status; | ||
506 | |||
507 | status = sync_fence_get_status(fence); | ||
508 | |||
509 | spin_lock_irqsave(&fence->waiter_list_lock, flags); | ||
510 | /* | ||
511 | * this should protect against two threads racing on the signaled | ||
512 | * false -> true transition | ||
513 | */ | ||
514 | if (status && !fence->status) { | ||
515 | list_for_each_safe(pos, n, &fence->waiter_list_head) | ||
516 | list_move(pos, &signaled_waiters); | ||
517 | |||
518 | fence->status = status; | ||
519 | } else { | ||
520 | status = 0; | ||
521 | } | ||
522 | spin_unlock_irqrestore(&fence->waiter_list_lock, flags); | ||
523 | 320 | ||
524 | if (status) { | 321 | wait = container_of(curr, struct sync_fence_waiter, work); |
525 | list_for_each_safe(pos, n, &signaled_waiters) { | 322 | list_del_init(&wait->work.task_list); |
526 | struct sync_fence_waiter *waiter = | ||
527 | container_of(pos, struct sync_fence_waiter, | ||
528 | waiter_list); | ||
529 | 323 | ||
530 | list_del(pos); | 324 | wait->callback(wait->work.private, wait); |
531 | waiter->callback(fence, waiter); | 325 | return 1; |
532 | } | ||
533 | wake_up(&fence->wq); | ||
534 | } | ||
535 | } | 326 | } |
536 | 327 | ||
537 | int sync_fence_wait_async(struct sync_fence *fence, | 328 | int sync_fence_wait_async(struct sync_fence *fence, |
538 | struct sync_fence_waiter *waiter) | 329 | struct sync_fence_waiter *waiter) |
539 | { | 330 | { |
331 | int err = atomic_read(&fence->status); | ||
540 | unsigned long flags; | 332 | unsigned long flags; |
541 | int err = 0; | ||
542 | 333 | ||
543 | spin_lock_irqsave(&fence->waiter_list_lock, flags); | 334 | if (err < 0) |
335 | return err; | ||
544 | 336 | ||
545 | if (fence->status) { | 337 | if (!err) |
546 | err = fence->status; | 338 | return 1; |
547 | goto out; | ||
548 | } | ||
549 | 339 | ||
550 | list_add_tail(&waiter->waiter_list, &fence->waiter_list_head); | 340 | init_waitqueue_func_entry(&waiter->work, sync_fence_wake_up_wq); |
551 | out: | 341 | waiter->work.private = fence; |
552 | spin_unlock_irqrestore(&fence->waiter_list_lock, flags); | ||
553 | 342 | ||
554 | return err; | 343 | spin_lock_irqsave(&fence->wq.lock, flags); |
344 | err = atomic_read(&fence->status); | ||
345 | if (err > 0) | ||
346 | __add_wait_queue_tail(&fence->wq, &waiter->work); | ||
347 | spin_unlock_irqrestore(&fence->wq.lock, flags); | ||
348 | |||
349 | if (err < 0) | ||
350 | return err; | ||
351 | |||
352 | return !err; | ||
555 | } | 353 | } |
556 | EXPORT_SYMBOL(sync_fence_wait_async); | 354 | EXPORT_SYMBOL(sync_fence_wait_async); |
557 | 355 | ||
558 | int sync_fence_cancel_async(struct sync_fence *fence, | 356 | int sync_fence_cancel_async(struct sync_fence *fence, |
559 | struct sync_fence_waiter *waiter) | 357 | struct sync_fence_waiter *waiter) |
560 | { | 358 | { |
561 | struct list_head *pos; | ||
562 | struct list_head *n; | ||
563 | unsigned long flags; | 359 | unsigned long flags; |
564 | int ret = -ENOENT; | 360 | int ret = 0; |
565 | 361 | ||
566 | spin_lock_irqsave(&fence->waiter_list_lock, flags); | 362 | spin_lock_irqsave(&fence->wq.lock, flags); |
567 | /* | 363 | if (!list_empty(&waiter->work.task_list)) |
568 | * Make sure waiter is still in waiter_list because it is possible for | 364 | list_del_init(&waiter->work.task_list); |
569 | * the waiter to be removed from the list while the callback is still | 365 | else |
570 | * pending. | 366 | ret = -ENOENT; |
571 | */ | 367 | spin_unlock_irqrestore(&fence->wq.lock, flags); |
572 | list_for_each_safe(pos, n, &fence->waiter_list_head) { | ||
573 | struct sync_fence_waiter *list_waiter = | ||
574 | container_of(pos, struct sync_fence_waiter, | ||
575 | waiter_list); | ||
576 | if (list_waiter == waiter) { | ||
577 | list_del(pos); | ||
578 | ret = 0; | ||
579 | break; | ||
580 | } | ||
581 | } | ||
582 | spin_unlock_irqrestore(&fence->waiter_list_lock, flags); | ||
583 | return ret; | 368 | return ret; |
584 | } | 369 | } |
585 | EXPORT_SYMBOL(sync_fence_cancel_async); | 370 | EXPORT_SYMBOL(sync_fence_cancel_async); |
586 | 371 | ||
587 | static bool sync_fence_check(struct sync_fence *fence) | ||
588 | { | ||
589 | /* | ||
590 | * Make sure that reads to fence->status are ordered with the | ||
591 | * wait queue event triggering | ||
592 | */ | ||
593 | smp_rmb(); | ||
594 | return fence->status != 0; | ||
595 | } | ||
596 | |||
597 | int sync_fence_wait(struct sync_fence *fence, long timeout) | 372 | int sync_fence_wait(struct sync_fence *fence, long timeout) |
598 | { | 373 | { |
599 | int err = 0; | 374 | long ret; |
600 | struct sync_pt *pt; | 375 | int i; |
601 | |||
602 | trace_sync_wait(fence, 1); | ||
603 | list_for_each_entry(pt, &fence->pt_list_head, pt_list) | ||
604 | trace_sync_pt(pt); | ||
605 | 376 | ||
606 | if (timeout > 0) { | 377 | if (timeout < 0) |
378 | timeout = MAX_SCHEDULE_TIMEOUT; | ||
379 | else | ||
607 | timeout = msecs_to_jiffies(timeout); | 380 | timeout = msecs_to_jiffies(timeout); |
608 | err = wait_event_interruptible_timeout(fence->wq, | ||
609 | sync_fence_check(fence), | ||
610 | timeout); | ||
611 | } else if (timeout < 0) { | ||
612 | err = wait_event_interruptible(fence->wq, | ||
613 | sync_fence_check(fence)); | ||
614 | } | ||
615 | trace_sync_wait(fence, 0); | ||
616 | 381 | ||
617 | if (err < 0) | 382 | trace_sync_wait(fence, 1); |
618 | return err; | 383 | for (i = 0; i < fence->num_fences; ++i) |
619 | 384 | trace_sync_pt(fence->cbs[i].sync_pt); | |
620 | if (fence->status < 0) { | 385 | ret = wait_event_interruptible_timeout(fence->wq, |
621 | pr_info("fence error %d on [%p]\n", fence->status, fence); | 386 | atomic_read(&fence->status) <= 0, |
622 | sync_dump(); | 387 | timeout); |
623 | return fence->status; | 388 | trace_sync_wait(fence, 0); |
624 | } | ||
625 | 389 | ||
626 | if (fence->status == 0) { | 390 | if (ret < 0) |
627 | if (timeout > 0) { | 391 | return ret; |
392 | else if (ret == 0) { | ||
393 | if (timeout) { | ||
628 | pr_info("fence timeout on [%p] after %dms\n", fence, | 394 | pr_info("fence timeout on [%p] after %dms\n", fence, |
629 | jiffies_to_msecs(timeout)); | 395 | jiffies_to_msecs(timeout)); |
630 | sync_dump(); | 396 | sync_dump(); |
@@ -632,15 +398,136 @@ int sync_fence_wait(struct sync_fence *fence, long timeout) | |||
632 | return -ETIME; | 398 | return -ETIME; |
633 | } | 399 | } |
634 | 400 | ||
635 | return 0; | 401 | ret = atomic_read(&fence->status); |
402 | if (ret) { | ||
403 | pr_info("fence error %ld on [%p]\n", ret, fence); | ||
404 | sync_dump(); | ||
405 | } | ||
406 | return ret; | ||
636 | } | 407 | } |
637 | EXPORT_SYMBOL(sync_fence_wait); | 408 | EXPORT_SYMBOL(sync_fence_wait); |
638 | 409 | ||
410 | static const char *android_fence_get_driver_name(struct fence *fence) | ||
411 | { | ||
412 | struct sync_pt *pt = container_of(fence, struct sync_pt, base); | ||
413 | struct sync_timeline *parent = sync_pt_parent(pt); | ||
414 | |||
415 | return parent->ops->driver_name; | ||
416 | } | ||
417 | |||
418 | static const char *android_fence_get_timeline_name(struct fence *fence) | ||
419 | { | ||
420 | struct sync_pt *pt = container_of(fence, struct sync_pt, base); | ||
421 | struct sync_timeline *parent = sync_pt_parent(pt); | ||
422 | |||
423 | return parent->name; | ||
424 | } | ||
425 | |||
426 | static void android_fence_release(struct fence *fence) | ||
427 | { | ||
428 | struct sync_pt *pt = container_of(fence, struct sync_pt, base); | ||
429 | struct sync_timeline *parent = sync_pt_parent(pt); | ||
430 | unsigned long flags; | ||
431 | |||
432 | spin_lock_irqsave(fence->lock, flags); | ||
433 | list_del(&pt->child_list); | ||
434 | if (WARN_ON_ONCE(!list_empty(&pt->active_list))) | ||
435 | list_del(&pt->active_list); | ||
436 | spin_unlock_irqrestore(fence->lock, flags); | ||
437 | |||
438 | if (parent->ops->free_pt) | ||
439 | parent->ops->free_pt(pt); | ||
440 | |||
441 | sync_timeline_put(parent); | ||
442 | fence_free(&pt->base); | ||
443 | } | ||
444 | |||
445 | static bool android_fence_signaled(struct fence *fence) | ||
446 | { | ||
447 | struct sync_pt *pt = container_of(fence, struct sync_pt, base); | ||
448 | struct sync_timeline *parent = sync_pt_parent(pt); | ||
449 | int ret; | ||
450 | |||
451 | ret = parent->ops->has_signaled(pt); | ||
452 | if (ret < 0) | ||
453 | fence->status = ret; | ||
454 | return ret; | ||
455 | } | ||
456 | |||
457 | static bool android_fence_enable_signaling(struct fence *fence) | ||
458 | { | ||
459 | struct sync_pt *pt = container_of(fence, struct sync_pt, base); | ||
460 | struct sync_timeline *parent = sync_pt_parent(pt); | ||
461 | |||
462 | if (android_fence_signaled(fence)) | ||
463 | return false; | ||
464 | |||
465 | list_add_tail(&pt->active_list, &parent->active_list_head); | ||
466 | return true; | ||
467 | } | ||
468 | |||
469 | static int android_fence_fill_driver_data(struct fence *fence, | ||
470 | void *data, int size) | ||
471 | { | ||
472 | struct sync_pt *pt = container_of(fence, struct sync_pt, base); | ||
473 | struct sync_timeline *parent = sync_pt_parent(pt); | ||
474 | |||
475 | if (!parent->ops->fill_driver_data) | ||
476 | return 0; | ||
477 | return parent->ops->fill_driver_data(pt, data, size); | ||
478 | } | ||
479 | |||
480 | static void android_fence_value_str(struct fence *fence, | ||
481 | char *str, int size) | ||
482 | { | ||
483 | struct sync_pt *pt = container_of(fence, struct sync_pt, base); | ||
484 | struct sync_timeline *parent = sync_pt_parent(pt); | ||
485 | |||
486 | if (!parent->ops->pt_value_str) { | ||
487 | if (size) | ||
488 | *str = 0; | ||
489 | return; | ||
490 | } | ||
491 | parent->ops->pt_value_str(pt, str, size); | ||
492 | } | ||
493 | |||
494 | static void android_fence_timeline_value_str(struct fence *fence, | ||
495 | char *str, int size) | ||
496 | { | ||
497 | struct sync_pt *pt = container_of(fence, struct sync_pt, base); | ||
498 | struct sync_timeline *parent = sync_pt_parent(pt); | ||
499 | |||
500 | if (!parent->ops->timeline_value_str) { | ||
501 | if (size) | ||
502 | *str = 0; | ||
503 | return; | ||
504 | } | ||
505 | parent->ops->timeline_value_str(parent, str, size); | ||
506 | } | ||
507 | |||
508 | static const struct fence_ops android_fence_ops = { | ||
509 | .get_driver_name = android_fence_get_driver_name, | ||
510 | .get_timeline_name = android_fence_get_timeline_name, | ||
511 | .enable_signaling = android_fence_enable_signaling, | ||
512 | .signaled = android_fence_signaled, | ||
513 | .wait = fence_default_wait, | ||
514 | .release = android_fence_release, | ||
515 | .fill_driver_data = android_fence_fill_driver_data, | ||
516 | .fence_value_str = android_fence_value_str, | ||
517 | .timeline_value_str = android_fence_timeline_value_str, | ||
518 | }; | ||
519 | |||
639 | static void sync_fence_free(struct kref *kref) | 520 | static void sync_fence_free(struct kref *kref) |
640 | { | 521 | { |
641 | struct sync_fence *fence = container_of(kref, struct sync_fence, kref); | 522 | struct sync_fence *fence = container_of(kref, struct sync_fence, kref); |
523 | int i, status = atomic_read(&fence->status); | ||
642 | 524 | ||
643 | sync_fence_free_pts(fence); | 525 | for (i = 0; i < fence->num_fences; ++i) { |
526 | if (status) | ||
527 | fence_remove_callback(fence->cbs[i].sync_pt, | ||
528 | &fence->cbs[i].cb); | ||
529 | fence_put(fence->cbs[i].sync_pt); | ||
530 | } | ||
644 | 531 | ||
645 | kfree(fence); | 532 | kfree(fence); |
646 | } | 533 | } |
@@ -648,44 +535,25 @@ static void sync_fence_free(struct kref *kref) | |||
648 | static int sync_fence_release(struct inode *inode, struct file *file) | 535 | static int sync_fence_release(struct inode *inode, struct file *file) |
649 | { | 536 | { |
650 | struct sync_fence *fence = file->private_data; | 537 | struct sync_fence *fence = file->private_data; |
651 | unsigned long flags; | ||
652 | |||
653 | /* | ||
654 | * We need to remove all ways to access this fence before droping | ||
655 | * our ref. | ||
656 | * | ||
657 | * start with its membership in the global fence list | ||
658 | */ | ||
659 | spin_lock_irqsave(&sync_fence_list_lock, flags); | ||
660 | list_del(&fence->sync_fence_list); | ||
661 | spin_unlock_irqrestore(&sync_fence_list_lock, flags); | ||
662 | 538 | ||
663 | /* | 539 | sync_fence_debug_remove(fence); |
664 | * remove its pts from their parents so that sync_timeline_signal() | ||
665 | * can't reference the fence. | ||
666 | */ | ||
667 | sync_fence_detach_pts(fence); | ||
668 | 540 | ||
669 | kref_put(&fence->kref, sync_fence_free); | 541 | kref_put(&fence->kref, sync_fence_free); |
670 | |||
671 | return 0; | 542 | return 0; |
672 | } | 543 | } |
673 | 544 | ||
674 | static unsigned int sync_fence_poll(struct file *file, poll_table *wait) | 545 | static unsigned int sync_fence_poll(struct file *file, poll_table *wait) |
675 | { | 546 | { |
676 | struct sync_fence *fence = file->private_data; | 547 | struct sync_fence *fence = file->private_data; |
548 | int status; | ||
677 | 549 | ||
678 | poll_wait(file, &fence->wq, wait); | 550 | poll_wait(file, &fence->wq, wait); |
679 | 551 | ||
680 | /* | 552 | status = atomic_read(&fence->status); |
681 | * Make sure that reads to fence->status are ordered with the | ||
682 | * wait queue event triggering | ||
683 | */ | ||
684 | smp_rmb(); | ||
685 | 553 | ||
686 | if (fence->status == 1) | 554 | if (!status) |
687 | return POLLIN; | 555 | return POLLIN; |
688 | else if (fence->status < 0) | 556 | else if (status < 0) |
689 | return POLLERR; | 557 | return POLLERR; |
690 | else | 558 | else |
691 | return 0; | 559 | return 0; |
@@ -750,7 +618,7 @@ err_put_fd: | |||
750 | return err; | 618 | return err; |
751 | } | 619 | } |
752 | 620 | ||
753 | static int sync_fill_pt_info(struct sync_pt *pt, void *data, int size) | 621 | static int sync_fill_pt_info(struct fence *fence, void *data, int size) |
754 | { | 622 | { |
755 | struct sync_pt_info *info = data; | 623 | struct sync_pt_info *info = data; |
756 | int ret; | 624 | int ret; |
@@ -760,20 +628,24 @@ static int sync_fill_pt_info(struct sync_pt *pt, void *data, int size) | |||
760 | 628 | ||
761 | info->len = sizeof(struct sync_pt_info); | 629 | info->len = sizeof(struct sync_pt_info); |
762 | 630 | ||
763 | if (pt->parent->ops->fill_driver_data) { | 631 | if (fence->ops->fill_driver_data) { |
764 | ret = pt->parent->ops->fill_driver_data(pt, info->driver_data, | 632 | ret = fence->ops->fill_driver_data(fence, info->driver_data, |
765 | size - sizeof(*info)); | 633 | size - sizeof(*info)); |
766 | if (ret < 0) | 634 | if (ret < 0) |
767 | return ret; | 635 | return ret; |
768 | 636 | ||
769 | info->len += ret; | 637 | info->len += ret; |
770 | } | 638 | } |
771 | 639 | ||
772 | strlcpy(info->obj_name, pt->parent->name, sizeof(info->obj_name)); | 640 | strlcpy(info->obj_name, fence->ops->get_timeline_name(fence), |
773 | strlcpy(info->driver_name, pt->parent->ops->driver_name, | 641 | sizeof(info->obj_name)); |
642 | strlcpy(info->driver_name, fence->ops->get_driver_name(fence), | ||
774 | sizeof(info->driver_name)); | 643 | sizeof(info->driver_name)); |
775 | info->status = pt->status; | 644 | if (fence_is_signaled(fence)) |
776 | info->timestamp_ns = ktime_to_ns(pt->timestamp); | 645 | info->status = fence->status >= 0 ? 1 : fence->status; |
646 | else | ||
647 | info->status = 0; | ||
648 | info->timestamp_ns = ktime_to_ns(fence->timestamp); | ||
777 | 649 | ||
778 | return info->len; | 650 | return info->len; |
779 | } | 651 | } |
@@ -782,10 +654,9 @@ static long sync_fence_ioctl_fence_info(struct sync_fence *fence, | |||
782 | unsigned long arg) | 654 | unsigned long arg) |
783 | { | 655 | { |
784 | struct sync_fence_info_data *data; | 656 | struct sync_fence_info_data *data; |
785 | struct list_head *pos; | ||
786 | __u32 size; | 657 | __u32 size; |
787 | __u32 len = 0; | 658 | __u32 len = 0; |
788 | int ret; | 659 | int ret, i; |
789 | 660 | ||
790 | if (copy_from_user(&size, (void __user *)arg, sizeof(size))) | 661 | if (copy_from_user(&size, (void __user *)arg, sizeof(size))) |
791 | return -EFAULT; | 662 | return -EFAULT; |
@@ -801,12 +672,14 @@ static long sync_fence_ioctl_fence_info(struct sync_fence *fence, | |||
801 | return -ENOMEM; | 672 | return -ENOMEM; |
802 | 673 | ||
803 | strlcpy(data->name, fence->name, sizeof(data->name)); | 674 | strlcpy(data->name, fence->name, sizeof(data->name)); |
804 | data->status = fence->status; | 675 | data->status = atomic_read(&fence->status); |
676 | if (data->status >= 0) | ||
677 | data->status = !data->status; | ||
678 | |||
805 | len = sizeof(struct sync_fence_info_data); | 679 | len = sizeof(struct sync_fence_info_data); |
806 | 680 | ||
807 | list_for_each(pos, &fence->pt_list_head) { | 681 | for (i = 0; i < fence->num_fences; ++i) { |
808 | struct sync_pt *pt = | 682 | struct fence *pt = fence->cbs[i].sync_pt; |
809 | container_of(pos, struct sync_pt, pt_list); | ||
810 | 683 | ||
811 | ret = sync_fill_pt_info(pt, (u8 *)data + len, size - len); | 684 | ret = sync_fill_pt_info(pt, (u8 *)data + len, size - len); |
812 | 685 | ||
@@ -833,7 +706,6 @@ static long sync_fence_ioctl(struct file *file, unsigned int cmd, | |||
833 | unsigned long arg) | 706 | unsigned long arg) |
834 | { | 707 | { |
835 | struct sync_fence *fence = file->private_data; | 708 | struct sync_fence *fence = file->private_data; |
836 | |||
837 | switch (cmd) { | 709 | switch (cmd) { |
838 | case SYNC_IOC_WAIT: | 710 | case SYNC_IOC_WAIT: |
839 | return sync_fence_ioctl_wait(fence, arg); | 711 | return sync_fence_ioctl_wait(fence, arg); |
@@ -849,181 +721,10 @@ static long sync_fence_ioctl(struct file *file, unsigned int cmd, | |||
849 | } | 721 | } |
850 | } | 722 | } |
851 | 723 | ||
852 | #ifdef CONFIG_DEBUG_FS | 724 | static const struct file_operations sync_fence_fops = { |
853 | static const char *sync_status_str(int status) | 725 | .release = sync_fence_release, |
854 | { | 726 | .poll = sync_fence_poll, |
855 | if (status > 0) | 727 | .unlocked_ioctl = sync_fence_ioctl, |
856 | return "signaled"; | 728 | .compat_ioctl = sync_fence_ioctl, |
857 | else if (status == 0) | ||
858 | return "active"; | ||
859 | else | ||
860 | return "error"; | ||
861 | } | ||
862 | |||
863 | static void sync_print_pt(struct seq_file *s, struct sync_pt *pt, bool fence) | ||
864 | { | ||
865 | int status = pt->status; | ||
866 | |||
867 | seq_printf(s, " %s%spt %s", | ||
868 | fence ? pt->parent->name : "", | ||
869 | fence ? "_" : "", | ||
870 | sync_status_str(status)); | ||
871 | if (pt->status) { | ||
872 | struct timeval tv = ktime_to_timeval(pt->timestamp); | ||
873 | |||
874 | seq_printf(s, "@%ld.%06ld", tv.tv_sec, tv.tv_usec); | ||
875 | } | ||
876 | |||
877 | if (pt->parent->ops->timeline_value_str && | ||
878 | pt->parent->ops->pt_value_str) { | ||
879 | char value[64]; | ||
880 | |||
881 | pt->parent->ops->pt_value_str(pt, value, sizeof(value)); | ||
882 | seq_printf(s, ": %s", value); | ||
883 | if (fence) { | ||
884 | pt->parent->ops->timeline_value_str(pt->parent, value, | ||
885 | sizeof(value)); | ||
886 | seq_printf(s, " / %s", value); | ||
887 | } | ||
888 | } else if (pt->parent->ops->print_pt) { | ||
889 | seq_puts(s, ": "); | ||
890 | pt->parent->ops->print_pt(s, pt); | ||
891 | } | ||
892 | |||
893 | seq_puts(s, "\n"); | ||
894 | } | ||
895 | |||
896 | static void sync_print_obj(struct seq_file *s, struct sync_timeline *obj) | ||
897 | { | ||
898 | struct list_head *pos; | ||
899 | unsigned long flags; | ||
900 | |||
901 | seq_printf(s, "%s %s", obj->name, obj->ops->driver_name); | ||
902 | |||
903 | if (obj->ops->timeline_value_str) { | ||
904 | char value[64]; | ||
905 | |||
906 | obj->ops->timeline_value_str(obj, value, sizeof(value)); | ||
907 | seq_printf(s, ": %s", value); | ||
908 | } else if (obj->ops->print_obj) { | ||
909 | seq_puts(s, ": "); | ||
910 | obj->ops->print_obj(s, obj); | ||
911 | } | ||
912 | |||
913 | seq_puts(s, "\n"); | ||
914 | |||
915 | spin_lock_irqsave(&obj->child_list_lock, flags); | ||
916 | list_for_each(pos, &obj->child_list_head) { | ||
917 | struct sync_pt *pt = | ||
918 | container_of(pos, struct sync_pt, child_list); | ||
919 | sync_print_pt(s, pt, false); | ||
920 | } | ||
921 | spin_unlock_irqrestore(&obj->child_list_lock, flags); | ||
922 | } | ||
923 | |||
924 | static void sync_print_fence(struct seq_file *s, struct sync_fence *fence) | ||
925 | { | ||
926 | struct list_head *pos; | ||
927 | unsigned long flags; | ||
928 | |||
929 | seq_printf(s, "[%p] %s: %s\n", fence, fence->name, | ||
930 | sync_status_str(fence->status)); | ||
931 | |||
932 | list_for_each(pos, &fence->pt_list_head) { | ||
933 | struct sync_pt *pt = | ||
934 | container_of(pos, struct sync_pt, pt_list); | ||
935 | sync_print_pt(s, pt, true); | ||
936 | } | ||
937 | |||
938 | spin_lock_irqsave(&fence->waiter_list_lock, flags); | ||
939 | list_for_each(pos, &fence->waiter_list_head) { | ||
940 | struct sync_fence_waiter *waiter = | ||
941 | container_of(pos, struct sync_fence_waiter, | ||
942 | waiter_list); | ||
943 | |||
944 | seq_printf(s, "waiter %pF\n", waiter->callback); | ||
945 | } | ||
946 | spin_unlock_irqrestore(&fence->waiter_list_lock, flags); | ||
947 | } | ||
948 | |||
949 | static int sync_debugfs_show(struct seq_file *s, void *unused) | ||
950 | { | ||
951 | unsigned long flags; | ||
952 | struct list_head *pos; | ||
953 | |||
954 | seq_puts(s, "objs:\n--------------\n"); | ||
955 | |||
956 | spin_lock_irqsave(&sync_timeline_list_lock, flags); | ||
957 | list_for_each(pos, &sync_timeline_list_head) { | ||
958 | struct sync_timeline *obj = | ||
959 | container_of(pos, struct sync_timeline, | ||
960 | sync_timeline_list); | ||
961 | |||
962 | sync_print_obj(s, obj); | ||
963 | seq_puts(s, "\n"); | ||
964 | } | ||
965 | spin_unlock_irqrestore(&sync_timeline_list_lock, flags); | ||
966 | |||
967 | seq_puts(s, "fences:\n--------------\n"); | ||
968 | |||
969 | spin_lock_irqsave(&sync_fence_list_lock, flags); | ||
970 | list_for_each(pos, &sync_fence_list_head) { | ||
971 | struct sync_fence *fence = | ||
972 | container_of(pos, struct sync_fence, sync_fence_list); | ||
973 | |||
974 | sync_print_fence(s, fence); | ||
975 | seq_puts(s, "\n"); | ||
976 | } | ||
977 | spin_unlock_irqrestore(&sync_fence_list_lock, flags); | ||
978 | return 0; | ||
979 | } | ||
980 | |||
981 | static int sync_debugfs_open(struct inode *inode, struct file *file) | ||
982 | { | ||
983 | return single_open(file, sync_debugfs_show, inode->i_private); | ||
984 | } | ||
985 | |||
986 | static const struct file_operations sync_debugfs_fops = { | ||
987 | .open = sync_debugfs_open, | ||
988 | .read = seq_read, | ||
989 | .llseek = seq_lseek, | ||
990 | .release = single_release, | ||
991 | }; | 729 | }; |
992 | 730 | ||
993 | static __init int sync_debugfs_init(void) | ||
994 | { | ||
995 | debugfs_create_file("sync", S_IRUGO, NULL, NULL, &sync_debugfs_fops); | ||
996 | return 0; | ||
997 | } | ||
998 | late_initcall(sync_debugfs_init); | ||
999 | |||
1000 | #define DUMP_CHUNK 256 | ||
1001 | static char sync_dump_buf[64 * 1024]; | ||
1002 | static void sync_dump(void) | ||
1003 | { | ||
1004 | struct seq_file s = { | ||
1005 | .buf = sync_dump_buf, | ||
1006 | .size = sizeof(sync_dump_buf) - 1, | ||
1007 | }; | ||
1008 | int i; | ||
1009 | |||
1010 | sync_debugfs_show(&s, NULL); | ||
1011 | |||
1012 | for (i = 0; i < s.count; i += DUMP_CHUNK) { | ||
1013 | if ((s.count - i) > DUMP_CHUNK) { | ||
1014 | char c = s.buf[i + DUMP_CHUNK]; | ||
1015 | |||
1016 | s.buf[i + DUMP_CHUNK] = 0; | ||
1017 | pr_cont("%s", s.buf + i); | ||
1018 | s.buf[i + DUMP_CHUNK] = c; | ||
1019 | } else { | ||
1020 | s.buf[s.count] = 0; | ||
1021 | pr_cont("%s", s.buf + i); | ||
1022 | } | ||
1023 | } | ||
1024 | } | ||
1025 | #else | ||
1026 | static void sync_dump(void) | ||
1027 | { | ||
1028 | } | ||
1029 | #endif | ||
diff --git a/drivers/staging/android/sync.h b/drivers/staging/android/sync.h index eaf57cccf626..66b0f431f63e 100644 --- a/drivers/staging/android/sync.h +++ b/drivers/staging/android/sync.h | |||
@@ -19,6 +19,7 @@ | |||
19 | #include <linux/list.h> | 19 | #include <linux/list.h> |
20 | #include <linux/spinlock.h> | 20 | #include <linux/spinlock.h> |
21 | #include <linux/wait.h> | 21 | #include <linux/wait.h> |
22 | #include <linux/fence.h> | ||
22 | 23 | ||
23 | #include "uapi/sync.h" | 24 | #include "uapi/sync.h" |
24 | 25 | ||
@@ -40,8 +41,6 @@ struct sync_fence; | |||
40 | * -1 if a will signal before b | 41 | * -1 if a will signal before b |
41 | * @free_pt: called before sync_pt is freed | 42 | * @free_pt: called before sync_pt is freed |
42 | * @release_obj: called before sync_timeline is freed | 43 | * @release_obj: called before sync_timeline is freed |
43 | * @print_obj: deprecated | ||
44 | * @print_pt: deprecated | ||
45 | * @fill_driver_data: write implementation specific driver data to data. | 44 | * @fill_driver_data: write implementation specific driver data to data. |
46 | * should return an error if there is not enough room | 45 | * should return an error if there is not enough room |
47 | * as specified by size. This information is returned | 46 | * as specified by size. This information is returned |
@@ -67,13 +66,6 @@ struct sync_timeline_ops { | |||
67 | /* optional */ | 66 | /* optional */ |
68 | void (*release_obj)(struct sync_timeline *sync_timeline); | 67 | void (*release_obj)(struct sync_timeline *sync_timeline); |
69 | 68 | ||
70 | /* deprecated */ | ||
71 | void (*print_obj)(struct seq_file *s, | ||
72 | struct sync_timeline *sync_timeline); | ||
73 | |||
74 | /* deprecated */ | ||
75 | void (*print_pt)(struct seq_file *s, struct sync_pt *sync_pt); | ||
76 | |||
77 | /* optional */ | 69 | /* optional */ |
78 | int (*fill_driver_data)(struct sync_pt *syncpt, void *data, int size); | 70 | int (*fill_driver_data)(struct sync_pt *syncpt, void *data, int size); |
79 | 71 | ||
@@ -104,19 +96,21 @@ struct sync_timeline { | |||
104 | 96 | ||
105 | /* protected by child_list_lock */ | 97 | /* protected by child_list_lock */ |
106 | bool destroyed; | 98 | bool destroyed; |
99 | int context, value; | ||
107 | 100 | ||
108 | struct list_head child_list_head; | 101 | struct list_head child_list_head; |
109 | spinlock_t child_list_lock; | 102 | spinlock_t child_list_lock; |
110 | 103 | ||
111 | struct list_head active_list_head; | 104 | struct list_head active_list_head; |
112 | spinlock_t active_list_lock; | ||
113 | 105 | ||
106 | #ifdef CONFIG_DEBUG_FS | ||
114 | struct list_head sync_timeline_list; | 107 | struct list_head sync_timeline_list; |
108 | #endif | ||
115 | }; | 109 | }; |
116 | 110 | ||
117 | /** | 111 | /** |
118 | * struct sync_pt - sync point | 112 | * struct sync_pt - sync point |
119 | * @parent: sync_timeline to which this sync_pt belongs | 113 | * @fence: base fence class |
120 | * @child_list: membership in sync_timeline.child_list_head | 114 | * @child_list: membership in sync_timeline.child_list_head |
121 | * @active_list: membership in sync_timeline.active_list_head | 115 | * @active_list: membership in sync_timeline.active_list_head |
122 | * @signaled_list: membership in temporary signaled_list on stack | 116 | * @signaled_list: membership in temporary signaled_list on stack |
@@ -127,19 +121,22 @@ struct sync_timeline { | |||
127 | * signaled or error. | 121 | * signaled or error. |
128 | */ | 122 | */ |
129 | struct sync_pt { | 123 | struct sync_pt { |
130 | struct sync_timeline *parent; | 124 | struct fence base; |
131 | struct list_head child_list; | ||
132 | 125 | ||
126 | struct list_head child_list; | ||
133 | struct list_head active_list; | 127 | struct list_head active_list; |
134 | struct list_head signaled_list; | 128 | }; |
135 | |||
136 | struct sync_fence *fence; | ||
137 | struct list_head pt_list; | ||
138 | 129 | ||
139 | /* protected by parent->active_list_lock */ | 130 | static inline struct sync_timeline *sync_pt_parent(struct sync_pt *pt) |
140 | int status; | 131 | { |
132 | return container_of(pt->base.lock, struct sync_timeline, | ||
133 | child_list_lock); | ||
134 | } | ||
141 | 135 | ||
142 | ktime_t timestamp; | 136 | struct sync_fence_cb { |
137 | struct fence_cb cb; | ||
138 | struct fence *sync_pt; | ||
139 | struct sync_fence *fence; | ||
143 | }; | 140 | }; |
144 | 141 | ||
145 | /** | 142 | /** |
@@ -149,9 +146,7 @@ struct sync_pt { | |||
149 | * @name: name of sync_fence. Useful for debugging | 146 | * @name: name of sync_fence. Useful for debugging |
150 | * @pt_list_head: list of sync_pts in the fence. immutable once fence | 147 | * @pt_list_head: list of sync_pts in the fence. immutable once fence |
151 | * is created | 148 | * is created |
152 | * @waiter_list_head: list of asynchronous waiters on this fence | 149 | * @status: 0: signaled, >0:active, <0: error |
153 | * @waiter_list_lock: lock protecting @waiter_list_head and @status | ||
154 | * @status: 1: signaled, 0:active, <0: error | ||
155 | * | 150 | * |
156 | * @wq: wait queue for fence signaling | 151 | * @wq: wait queue for fence signaling |
157 | * @sync_fence_list: membership in global fence list | 152 | * @sync_fence_list: membership in global fence list |
@@ -160,17 +155,15 @@ struct sync_fence { | |||
160 | struct file *file; | 155 | struct file *file; |
161 | struct kref kref; | 156 | struct kref kref; |
162 | char name[32]; | 157 | char name[32]; |
163 | 158 | #ifdef CONFIG_DEBUG_FS | |
164 | /* this list is immutable once the fence is created */ | 159 | struct list_head sync_fence_list; |
165 | struct list_head pt_list_head; | 160 | #endif |
166 | 161 | int num_fences; | |
167 | struct list_head waiter_list_head; | ||
168 | spinlock_t waiter_list_lock; /* also protects status */ | ||
169 | int status; | ||
170 | 162 | ||
171 | wait_queue_head_t wq; | 163 | wait_queue_head_t wq; |
164 | atomic_t status; | ||
172 | 165 | ||
173 | struct list_head sync_fence_list; | 166 | struct sync_fence_cb cbs[]; |
174 | }; | 167 | }; |
175 | 168 | ||
176 | struct sync_fence_waiter; | 169 | struct sync_fence_waiter; |
@@ -184,14 +177,14 @@ typedef void (*sync_callback_t)(struct sync_fence *fence, | |||
184 | * @callback_data: pointer to pass to @callback | 177 | * @callback_data: pointer to pass to @callback |
185 | */ | 178 | */ |
186 | struct sync_fence_waiter { | 179 | struct sync_fence_waiter { |
187 | struct list_head waiter_list; | 180 | wait_queue_t work; |
188 | 181 | sync_callback_t callback; | |
189 | sync_callback_t callback; | ||
190 | }; | 182 | }; |
191 | 183 | ||
192 | static inline void sync_fence_waiter_init(struct sync_fence_waiter *waiter, | 184 | static inline void sync_fence_waiter_init(struct sync_fence_waiter *waiter, |
193 | sync_callback_t callback) | 185 | sync_callback_t callback) |
194 | { | 186 | { |
187 | INIT_LIST_HEAD(&waiter->work.task_list); | ||
195 | waiter->callback = callback; | 188 | waiter->callback = callback; |
196 | } | 189 | } |
197 | 190 | ||
@@ -341,4 +334,22 @@ int sync_fence_cancel_async(struct sync_fence *fence, | |||
341 | */ | 334 | */ |
342 | int sync_fence_wait(struct sync_fence *fence, long timeout); | 335 | int sync_fence_wait(struct sync_fence *fence, long timeout); |
343 | 336 | ||
337 | #ifdef CONFIG_DEBUG_FS | ||
338 | |||
339 | extern void sync_timeline_debug_add(struct sync_timeline *obj); | ||
340 | extern void sync_timeline_debug_remove(struct sync_timeline *obj); | ||
341 | extern void sync_fence_debug_add(struct sync_fence *fence); | ||
342 | extern void sync_fence_debug_remove(struct sync_fence *fence); | ||
343 | extern void sync_dump(void); | ||
344 | |||
345 | #else | ||
346 | # define sync_timeline_debug_add(obj) | ||
347 | # define sync_timeline_debug_remove(obj) | ||
348 | # define sync_fence_debug_add(fence) | ||
349 | # define sync_fence_debug_remove(fence) | ||
350 | # define sync_dump() | ||
351 | #endif | ||
352 | int sync_fence_wake_up_wq(wait_queue_t *curr, unsigned mode, | ||
353 | int wake_flags, void *key); | ||
354 | |||
344 | #endif /* _LINUX_SYNC_H */ | 355 | #endif /* _LINUX_SYNC_H */ |
diff --git a/drivers/staging/android/sync_debug.c b/drivers/staging/android/sync_debug.c new file mode 100644 index 000000000000..257fc91bf02b --- /dev/null +++ b/drivers/staging/android/sync_debug.c | |||
@@ -0,0 +1,252 @@ | |||
1 | /* | ||
2 | * drivers/base/sync.c | ||
3 | * | ||
4 | * Copyright (C) 2012 Google, Inc. | ||
5 | * | ||
6 | * This software is licensed under the terms of the GNU General Public | ||
7 | * License version 2, as published by the Free Software Foundation, and | ||
8 | * may be copied, distributed, and modified under those terms. | ||
9 | * | ||
10 | * This program is distributed in the hope that it will be useful, | ||
11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
13 | * GNU General Public License for more details. | ||
14 | * | ||
15 | */ | ||
16 | |||
17 | #include <linux/debugfs.h> | ||
18 | #include <linux/export.h> | ||
19 | #include <linux/file.h> | ||
20 | #include <linux/fs.h> | ||
21 | #include <linux/kernel.h> | ||
22 | #include <linux/poll.h> | ||
23 | #include <linux/sched.h> | ||
24 | #include <linux/seq_file.h> | ||
25 | #include <linux/slab.h> | ||
26 | #include <linux/uaccess.h> | ||
27 | #include <linux/anon_inodes.h> | ||
28 | #include "sync.h" | ||
29 | |||
30 | #ifdef CONFIG_DEBUG_FS | ||
31 | |||
32 | static LIST_HEAD(sync_timeline_list_head); | ||
33 | static DEFINE_SPINLOCK(sync_timeline_list_lock); | ||
34 | static LIST_HEAD(sync_fence_list_head); | ||
35 | static DEFINE_SPINLOCK(sync_fence_list_lock); | ||
36 | |||
37 | void sync_timeline_debug_add(struct sync_timeline *obj) | ||
38 | { | ||
39 | unsigned long flags; | ||
40 | |||
41 | spin_lock_irqsave(&sync_timeline_list_lock, flags); | ||
42 | list_add_tail(&obj->sync_timeline_list, &sync_timeline_list_head); | ||
43 | spin_unlock_irqrestore(&sync_timeline_list_lock, flags); | ||
44 | } | ||
45 | |||
46 | void sync_timeline_debug_remove(struct sync_timeline *obj) | ||
47 | { | ||
48 | unsigned long flags; | ||
49 | |||
50 | spin_lock_irqsave(&sync_timeline_list_lock, flags); | ||
51 | list_del(&obj->sync_timeline_list); | ||
52 | spin_unlock_irqrestore(&sync_timeline_list_lock, flags); | ||
53 | } | ||
54 | |||
55 | void sync_fence_debug_add(struct sync_fence *fence) | ||
56 | { | ||
57 | unsigned long flags; | ||
58 | |||
59 | spin_lock_irqsave(&sync_fence_list_lock, flags); | ||
60 | list_add_tail(&fence->sync_fence_list, &sync_fence_list_head); | ||
61 | spin_unlock_irqrestore(&sync_fence_list_lock, flags); | ||
62 | } | ||
63 | |||
64 | void sync_fence_debug_remove(struct sync_fence *fence) | ||
65 | { | ||
66 | unsigned long flags; | ||
67 | |||
68 | spin_lock_irqsave(&sync_fence_list_lock, flags); | ||
69 | list_del(&fence->sync_fence_list); | ||
70 | spin_unlock_irqrestore(&sync_fence_list_lock, flags); | ||
71 | } | ||
72 | |||
73 | static const char *sync_status_str(int status) | ||
74 | { | ||
75 | if (status == 0) | ||
76 | return "signaled"; | ||
77 | |||
78 | if (status > 0) | ||
79 | return "active"; | ||
80 | |||
81 | return "error"; | ||
82 | } | ||
83 | |||
84 | static void sync_print_pt(struct seq_file *s, struct sync_pt *pt, bool fence) | ||
85 | { | ||
86 | int status = 1; | ||
87 | struct sync_timeline *parent = sync_pt_parent(pt); | ||
88 | |||
89 | if (fence_is_signaled_locked(&pt->base)) | ||
90 | status = pt->base.status; | ||
91 | |||
92 | seq_printf(s, " %s%spt %s", | ||
93 | fence ? parent->name : "", | ||
94 | fence ? "_" : "", | ||
95 | sync_status_str(status)); | ||
96 | |||
97 | if (status <= 0) { | ||
98 | struct timeval tv = ktime_to_timeval(pt->base.timestamp); | ||
99 | |||
100 | seq_printf(s, "@%ld.%06ld", tv.tv_sec, tv.tv_usec); | ||
101 | } | ||
102 | |||
103 | if (parent->ops->timeline_value_str && | ||
104 | parent->ops->pt_value_str) { | ||
105 | char value[64]; | ||
106 | |||
107 | parent->ops->pt_value_str(pt, value, sizeof(value)); | ||
108 | seq_printf(s, ": %s", value); | ||
109 | if (fence) { | ||
110 | parent->ops->timeline_value_str(parent, value, | ||
111 | sizeof(value)); | ||
112 | seq_printf(s, " / %s", value); | ||
113 | } | ||
114 | } | ||
115 | |||
116 | seq_puts(s, "\n"); | ||
117 | } | ||
118 | |||
119 | static void sync_print_obj(struct seq_file *s, struct sync_timeline *obj) | ||
120 | { | ||
121 | struct list_head *pos; | ||
122 | unsigned long flags; | ||
123 | |||
124 | seq_printf(s, "%s %s", obj->name, obj->ops->driver_name); | ||
125 | |||
126 | if (obj->ops->timeline_value_str) { | ||
127 | char value[64]; | ||
128 | |||
129 | obj->ops->timeline_value_str(obj, value, sizeof(value)); | ||
130 | seq_printf(s, ": %s", value); | ||
131 | } | ||
132 | |||
133 | seq_puts(s, "\n"); | ||
134 | |||
135 | spin_lock_irqsave(&obj->child_list_lock, flags); | ||
136 | list_for_each(pos, &obj->child_list_head) { | ||
137 | struct sync_pt *pt = | ||
138 | container_of(pos, struct sync_pt, child_list); | ||
139 | sync_print_pt(s, pt, false); | ||
140 | } | ||
141 | spin_unlock_irqrestore(&obj->child_list_lock, flags); | ||
142 | } | ||
143 | |||
144 | static void sync_print_fence(struct seq_file *s, struct sync_fence *fence) | ||
145 | { | ||
146 | wait_queue_t *pos; | ||
147 | unsigned long flags; | ||
148 | int i; | ||
149 | |||
150 | seq_printf(s, "[%p] %s: %s\n", fence, fence->name, | ||
151 | sync_status_str(atomic_read(&fence->status))); | ||
152 | |||
153 | for (i = 0; i < fence->num_fences; ++i) { | ||
154 | struct sync_pt *pt = | ||
155 | container_of(fence->cbs[i].sync_pt, | ||
156 | struct sync_pt, base); | ||
157 | |||
158 | sync_print_pt(s, pt, true); | ||
159 | } | ||
160 | |||
161 | spin_lock_irqsave(&fence->wq.lock, flags); | ||
162 | list_for_each_entry(pos, &fence->wq.task_list, task_list) { | ||
163 | struct sync_fence_waiter *waiter; | ||
164 | |||
165 | if (pos->func != &sync_fence_wake_up_wq) | ||
166 | continue; | ||
167 | |||
168 | waiter = container_of(pos, struct sync_fence_waiter, work); | ||
169 | |||
170 | seq_printf(s, "waiter %pF\n", waiter->callback); | ||
171 | } | ||
172 | spin_unlock_irqrestore(&fence->wq.lock, flags); | ||
173 | } | ||
174 | |||
175 | static int sync_debugfs_show(struct seq_file *s, void *unused) | ||
176 | { | ||
177 | unsigned long flags; | ||
178 | struct list_head *pos; | ||
179 | |||
180 | seq_puts(s, "objs:\n--------------\n"); | ||
181 | |||
182 | spin_lock_irqsave(&sync_timeline_list_lock, flags); | ||
183 | list_for_each(pos, &sync_timeline_list_head) { | ||
184 | struct sync_timeline *obj = | ||
185 | container_of(pos, struct sync_timeline, | ||
186 | sync_timeline_list); | ||
187 | |||
188 | sync_print_obj(s, obj); | ||
189 | seq_puts(s, "\n"); | ||
190 | } | ||
191 | spin_unlock_irqrestore(&sync_timeline_list_lock, flags); | ||
192 | |||
193 | seq_puts(s, "fences:\n--------------\n"); | ||
194 | |||
195 | spin_lock_irqsave(&sync_fence_list_lock, flags); | ||
196 | list_for_each(pos, &sync_fence_list_head) { | ||
197 | struct sync_fence *fence = | ||
198 | container_of(pos, struct sync_fence, sync_fence_list); | ||
199 | |||
200 | sync_print_fence(s, fence); | ||
201 | seq_puts(s, "\n"); | ||
202 | } | ||
203 | spin_unlock_irqrestore(&sync_fence_list_lock, flags); | ||
204 | return 0; | ||
205 | } | ||
206 | |||
207 | static int sync_debugfs_open(struct inode *inode, struct file *file) | ||
208 | { | ||
209 | return single_open(file, sync_debugfs_show, inode->i_private); | ||
210 | } | ||
211 | |||
212 | static const struct file_operations sync_debugfs_fops = { | ||
213 | .open = sync_debugfs_open, | ||
214 | .read = seq_read, | ||
215 | .llseek = seq_lseek, | ||
216 | .release = single_release, | ||
217 | }; | ||
218 | |||
219 | static __init int sync_debugfs_init(void) | ||
220 | { | ||
221 | debugfs_create_file("sync", S_IRUGO, NULL, NULL, &sync_debugfs_fops); | ||
222 | return 0; | ||
223 | } | ||
224 | late_initcall(sync_debugfs_init); | ||
225 | |||
226 | #define DUMP_CHUNK 256 | ||
227 | static char sync_dump_buf[64 * 1024]; | ||
228 | void sync_dump(void) | ||
229 | { | ||
230 | struct seq_file s = { | ||
231 | .buf = sync_dump_buf, | ||
232 | .size = sizeof(sync_dump_buf) - 1, | ||
233 | }; | ||
234 | int i; | ||
235 | |||
236 | sync_debugfs_show(&s, NULL); | ||
237 | |||
238 | for (i = 0; i < s.count; i += DUMP_CHUNK) { | ||
239 | if ((s.count - i) > DUMP_CHUNK) { | ||
240 | char c = s.buf[i + DUMP_CHUNK]; | ||
241 | |||
242 | s.buf[i + DUMP_CHUNK] = 0; | ||
243 | pr_cont("%s", s.buf + i); | ||
244 | s.buf[i + DUMP_CHUNK] = c; | ||
245 | } else { | ||
246 | s.buf[s.count] = 0; | ||
247 | pr_cont("%s", s.buf + i); | ||
248 | } | ||
249 | } | ||
250 | } | ||
251 | |||
252 | #endif | ||
diff --git a/drivers/staging/android/trace/sync.h b/drivers/staging/android/trace/sync.h index 95462359ba57..77edb977a7bf 100644 --- a/drivers/staging/android/trace/sync.h +++ b/drivers/staging/android/trace/sync.h | |||
@@ -45,7 +45,7 @@ TRACE_EVENT(sync_wait, | |||
45 | 45 | ||
46 | TP_fast_assign( | 46 | TP_fast_assign( |
47 | __assign_str(name, fence->name); | 47 | __assign_str(name, fence->name); |
48 | __entry->status = fence->status; | 48 | __entry->status = atomic_read(&fence->status); |
49 | __entry->begin = begin; | 49 | __entry->begin = begin; |
50 | ), | 50 | ), |
51 | 51 | ||
@@ -54,19 +54,19 @@ TRACE_EVENT(sync_wait, | |||
54 | ); | 54 | ); |
55 | 55 | ||
56 | TRACE_EVENT(sync_pt, | 56 | TRACE_EVENT(sync_pt, |
57 | TP_PROTO(struct sync_pt *pt), | 57 | TP_PROTO(struct fence *pt), |
58 | 58 | ||
59 | TP_ARGS(pt), | 59 | TP_ARGS(pt), |
60 | 60 | ||
61 | TP_STRUCT__entry( | 61 | TP_STRUCT__entry( |
62 | __string(timeline, pt->parent->name) | 62 | __string(timeline, pt->ops->get_timeline_name(pt)) |
63 | __array(char, value, 32) | 63 | __array(char, value, 32) |
64 | ), | 64 | ), |
65 | 65 | ||
66 | TP_fast_assign( | 66 | TP_fast_assign( |
67 | __assign_str(timeline, pt->parent->name); | 67 | __assign_str(timeline, pt->ops->get_timeline_name(pt)); |
68 | if (pt->parent->ops->pt_value_str) { | 68 | if (pt->ops->fence_value_str) { |
69 | pt->parent->ops->pt_value_str(pt, __entry->value, | 69 | pt->ops->fence_value_str(pt, __entry->value, |
70 | sizeof(__entry->value)); | 70 | sizeof(__entry->value)); |
71 | } else { | 71 | } else { |
72 | __entry->value[0] = '\0'; | 72 | __entry->value[0] = '\0'; |