diff options
Diffstat (limited to 'drivers/base')
-rw-r--r-- | drivers/base/dma-buf.c | 165 | ||||
-rw-r--r-- | drivers/base/firmware_class.c | 208 | ||||
-rw-r--r-- | drivers/base/power/runtime.c | 3 | ||||
-rw-r--r-- | drivers/base/regmap/regcache-rbtree.c | 10 | ||||
-rw-r--r-- | drivers/base/regmap/regcache.c | 1 | ||||
-rw-r--r-- | drivers/base/regmap/regmap-debugfs.c | 12 |
6 files changed, 263 insertions, 136 deletions
diff --git a/drivers/base/dma-buf.c b/drivers/base/dma-buf.c index e38ad243b4bb..07cbbc6fddb4 100644 --- a/drivers/base/dma-buf.c +++ b/drivers/base/dma-buf.c | |||
@@ -71,7 +71,7 @@ static inline int is_dma_buf_file(struct file *file) | |||
71 | * ops, or error in allocating struct dma_buf, will return negative error. | 71 | * ops, or error in allocating struct dma_buf, will return negative error. |
72 | * | 72 | * |
73 | */ | 73 | */ |
74 | struct dma_buf *dma_buf_export(void *priv, struct dma_buf_ops *ops, | 74 | struct dma_buf *dma_buf_export(void *priv, const struct dma_buf_ops *ops, |
75 | size_t size, int flags) | 75 | size_t size, int flags) |
76 | { | 76 | { |
77 | struct dma_buf *dmabuf; | 77 | struct dma_buf *dmabuf; |
@@ -80,7 +80,9 @@ struct dma_buf *dma_buf_export(void *priv, struct dma_buf_ops *ops, | |||
80 | if (WARN_ON(!priv || !ops | 80 | if (WARN_ON(!priv || !ops |
81 | || !ops->map_dma_buf | 81 | || !ops->map_dma_buf |
82 | || !ops->unmap_dma_buf | 82 | || !ops->unmap_dma_buf |
83 | || !ops->release)) { | 83 | || !ops->release |
84 | || !ops->kmap_atomic | ||
85 | || !ops->kmap)) { | ||
84 | return ERR_PTR(-EINVAL); | 86 | return ERR_PTR(-EINVAL); |
85 | } | 87 | } |
86 | 88 | ||
@@ -107,17 +109,18 @@ EXPORT_SYMBOL_GPL(dma_buf_export); | |||
107 | /** | 109 | /** |
108 | * dma_buf_fd - returns a file descriptor for the given dma_buf | 110 | * dma_buf_fd - returns a file descriptor for the given dma_buf |
109 | * @dmabuf: [in] pointer to dma_buf for which fd is required. | 111 | * @dmabuf: [in] pointer to dma_buf for which fd is required. |
112 | * @flags: [in] flags to give to fd | ||
110 | * | 113 | * |
111 | * On success, returns an associated 'fd'. Else, returns error. | 114 | * On success, returns an associated 'fd'. Else, returns error. |
112 | */ | 115 | */ |
113 | int dma_buf_fd(struct dma_buf *dmabuf) | 116 | int dma_buf_fd(struct dma_buf *dmabuf, int flags) |
114 | { | 117 | { |
115 | int error, fd; | 118 | int error, fd; |
116 | 119 | ||
117 | if (!dmabuf || !dmabuf->file) | 120 | if (!dmabuf || !dmabuf->file) |
118 | return -EINVAL; | 121 | return -EINVAL; |
119 | 122 | ||
120 | error = get_unused_fd(); | 123 | error = get_unused_fd_flags(flags); |
121 | if (error < 0) | 124 | if (error < 0) |
122 | return error; | 125 | return error; |
123 | fd = error; | 126 | fd = error; |
@@ -185,17 +188,18 @@ struct dma_buf_attachment *dma_buf_attach(struct dma_buf *dmabuf, | |||
185 | struct dma_buf_attachment *attach; | 188 | struct dma_buf_attachment *attach; |
186 | int ret; | 189 | int ret; |
187 | 190 | ||
188 | if (WARN_ON(!dmabuf || !dev || !dmabuf->ops)) | 191 | if (WARN_ON(!dmabuf || !dev)) |
189 | return ERR_PTR(-EINVAL); | 192 | return ERR_PTR(-EINVAL); |
190 | 193 | ||
191 | attach = kzalloc(sizeof(struct dma_buf_attachment), GFP_KERNEL); | 194 | attach = kzalloc(sizeof(struct dma_buf_attachment), GFP_KERNEL); |
192 | if (attach == NULL) | 195 | if (attach == NULL) |
193 | goto err_alloc; | 196 | return ERR_PTR(-ENOMEM); |
194 | |||
195 | mutex_lock(&dmabuf->lock); | ||
196 | 197 | ||
197 | attach->dev = dev; | 198 | attach->dev = dev; |
198 | attach->dmabuf = dmabuf; | 199 | attach->dmabuf = dmabuf; |
200 | |||
201 | mutex_lock(&dmabuf->lock); | ||
202 | |||
199 | if (dmabuf->ops->attach) { | 203 | if (dmabuf->ops->attach) { |
200 | ret = dmabuf->ops->attach(dmabuf, dev, attach); | 204 | ret = dmabuf->ops->attach(dmabuf, dev, attach); |
201 | if (ret) | 205 | if (ret) |
@@ -206,8 +210,6 @@ struct dma_buf_attachment *dma_buf_attach(struct dma_buf *dmabuf, | |||
206 | mutex_unlock(&dmabuf->lock); | 210 | mutex_unlock(&dmabuf->lock); |
207 | return attach; | 211 | return attach; |
208 | 212 | ||
209 | err_alloc: | ||
210 | return ERR_PTR(-ENOMEM); | ||
211 | err_attach: | 213 | err_attach: |
212 | kfree(attach); | 214 | kfree(attach); |
213 | mutex_unlock(&dmabuf->lock); | 215 | mutex_unlock(&dmabuf->lock); |
@@ -224,7 +226,7 @@ EXPORT_SYMBOL_GPL(dma_buf_attach); | |||
224 | */ | 226 | */ |
225 | void dma_buf_detach(struct dma_buf *dmabuf, struct dma_buf_attachment *attach) | 227 | void dma_buf_detach(struct dma_buf *dmabuf, struct dma_buf_attachment *attach) |
226 | { | 228 | { |
227 | if (WARN_ON(!dmabuf || !attach || !dmabuf->ops)) | 229 | if (WARN_ON(!dmabuf || !attach)) |
228 | return; | 230 | return; |
229 | 231 | ||
230 | mutex_lock(&dmabuf->lock); | 232 | mutex_lock(&dmabuf->lock); |
@@ -255,13 +257,10 @@ struct sg_table *dma_buf_map_attachment(struct dma_buf_attachment *attach, | |||
255 | 257 | ||
256 | might_sleep(); | 258 | might_sleep(); |
257 | 259 | ||
258 | if (WARN_ON(!attach || !attach->dmabuf || !attach->dmabuf->ops)) | 260 | if (WARN_ON(!attach || !attach->dmabuf)) |
259 | return ERR_PTR(-EINVAL); | 261 | return ERR_PTR(-EINVAL); |
260 | 262 | ||
261 | mutex_lock(&attach->dmabuf->lock); | 263 | sg_table = attach->dmabuf->ops->map_dma_buf(attach, direction); |
262 | if (attach->dmabuf->ops->map_dma_buf) | ||
263 | sg_table = attach->dmabuf->ops->map_dma_buf(attach, direction); | ||
264 | mutex_unlock(&attach->dmabuf->lock); | ||
265 | 264 | ||
266 | return sg_table; | 265 | return sg_table; |
267 | } | 266 | } |
@@ -273,19 +272,137 @@ EXPORT_SYMBOL_GPL(dma_buf_map_attachment); | |||
273 | * dma_buf_ops. | 272 | * dma_buf_ops. |
274 | * @attach: [in] attachment to unmap buffer from | 273 | * @attach: [in] attachment to unmap buffer from |
275 | * @sg_table: [in] scatterlist info of the buffer to unmap | 274 | * @sg_table: [in] scatterlist info of the buffer to unmap |
275 | * @direction: [in] direction of DMA transfer | ||
276 | * | 276 | * |
277 | */ | 277 | */ |
278 | void dma_buf_unmap_attachment(struct dma_buf_attachment *attach, | 278 | void dma_buf_unmap_attachment(struct dma_buf_attachment *attach, |
279 | struct sg_table *sg_table) | 279 | struct sg_table *sg_table, |
280 | enum dma_data_direction direction) | ||
280 | { | 281 | { |
281 | if (WARN_ON(!attach || !attach->dmabuf || !sg_table | 282 | if (WARN_ON(!attach || !attach->dmabuf || !sg_table)) |
282 | || !attach->dmabuf->ops)) | ||
283 | return; | 283 | return; |
284 | 284 | ||
285 | mutex_lock(&attach->dmabuf->lock); | 285 | attach->dmabuf->ops->unmap_dma_buf(attach, sg_table, |
286 | if (attach->dmabuf->ops->unmap_dma_buf) | 286 | direction); |
287 | attach->dmabuf->ops->unmap_dma_buf(attach, sg_table); | ||
288 | mutex_unlock(&attach->dmabuf->lock); | ||
289 | |||
290 | } | 287 | } |
291 | EXPORT_SYMBOL_GPL(dma_buf_unmap_attachment); | 288 | EXPORT_SYMBOL_GPL(dma_buf_unmap_attachment); |
289 | |||
290 | |||
291 | /** | ||
292 | * dma_buf_begin_cpu_access - Must be called before accessing a dma_buf from the | ||
293 | * cpu in the kernel context. Calls begin_cpu_access to allow exporter-specific | ||
294 | * preparations. Coherency is only guaranteed in the specified range for the | ||
295 | * specified access direction. | ||
296 | * @dma_buf: [in] buffer to prepare cpu access for. | ||
297 | * @start: [in] start of range for cpu access. | ||
298 | * @len: [in] length of range for cpu access. | ||
299 | * @direction: [in] length of range for cpu access. | ||
300 | * | ||
301 | * Can return negative error values, returns 0 on success. | ||
302 | */ | ||
303 | int dma_buf_begin_cpu_access(struct dma_buf *dmabuf, size_t start, size_t len, | ||
304 | enum dma_data_direction direction) | ||
305 | { | ||
306 | int ret = 0; | ||
307 | |||
308 | if (WARN_ON(!dmabuf)) | ||
309 | return -EINVAL; | ||
310 | |||
311 | if (dmabuf->ops->begin_cpu_access) | ||
312 | ret = dmabuf->ops->begin_cpu_access(dmabuf, start, len, direction); | ||
313 | |||
314 | return ret; | ||
315 | } | ||
316 | EXPORT_SYMBOL_GPL(dma_buf_begin_cpu_access); | ||
317 | |||
318 | /** | ||
319 | * dma_buf_end_cpu_access - Must be called after accessing a dma_buf from the | ||
320 | * cpu in the kernel context. Calls end_cpu_access to allow exporter-specific | ||
321 | * actions. Coherency is only guaranteed in the specified range for the | ||
322 | * specified access direction. | ||
323 | * @dma_buf: [in] buffer to complete cpu access for. | ||
324 | * @start: [in] start of range for cpu access. | ||
325 | * @len: [in] length of range for cpu access. | ||
326 | * @direction: [in] length of range for cpu access. | ||
327 | * | ||
328 | * This call must always succeed. | ||
329 | */ | ||
330 | void dma_buf_end_cpu_access(struct dma_buf *dmabuf, size_t start, size_t len, | ||
331 | enum dma_data_direction direction) | ||
332 | { | ||
333 | WARN_ON(!dmabuf); | ||
334 | |||
335 | if (dmabuf->ops->end_cpu_access) | ||
336 | dmabuf->ops->end_cpu_access(dmabuf, start, len, direction); | ||
337 | } | ||
338 | EXPORT_SYMBOL_GPL(dma_buf_end_cpu_access); | ||
339 | |||
340 | /** | ||
341 | * dma_buf_kmap_atomic - Map a page of the buffer object into kernel address | ||
342 | * space. The same restrictions as for kmap_atomic and friends apply. | ||
343 | * @dma_buf: [in] buffer to map page from. | ||
344 | * @page_num: [in] page in PAGE_SIZE units to map. | ||
345 | * | ||
346 | * This call must always succeed, any necessary preparations that might fail | ||
347 | * need to be done in begin_cpu_access. | ||
348 | */ | ||
349 | void *dma_buf_kmap_atomic(struct dma_buf *dmabuf, unsigned long page_num) | ||
350 | { | ||
351 | WARN_ON(!dmabuf); | ||
352 | |||
353 | return dmabuf->ops->kmap_atomic(dmabuf, page_num); | ||
354 | } | ||
355 | EXPORT_SYMBOL_GPL(dma_buf_kmap_atomic); | ||
356 | |||
357 | /** | ||
358 | * dma_buf_kunmap_atomic - Unmap a page obtained by dma_buf_kmap_atomic. | ||
359 | * @dma_buf: [in] buffer to unmap page from. | ||
360 | * @page_num: [in] page in PAGE_SIZE units to unmap. | ||
361 | * @vaddr: [in] kernel space pointer obtained from dma_buf_kmap_atomic. | ||
362 | * | ||
363 | * This call must always succeed. | ||
364 | */ | ||
365 | void dma_buf_kunmap_atomic(struct dma_buf *dmabuf, unsigned long page_num, | ||
366 | void *vaddr) | ||
367 | { | ||
368 | WARN_ON(!dmabuf); | ||
369 | |||
370 | if (dmabuf->ops->kunmap_atomic) | ||
371 | dmabuf->ops->kunmap_atomic(dmabuf, page_num, vaddr); | ||
372 | } | ||
373 | EXPORT_SYMBOL_GPL(dma_buf_kunmap_atomic); | ||
374 | |||
375 | /** | ||
376 | * dma_buf_kmap - Map a page of the buffer object into kernel address space. The | ||
377 | * same restrictions as for kmap and friends apply. | ||
378 | * @dma_buf: [in] buffer to map page from. | ||
379 | * @page_num: [in] page in PAGE_SIZE units to map. | ||
380 | * | ||
381 | * This call must always succeed, any necessary preparations that might fail | ||
382 | * need to be done in begin_cpu_access. | ||
383 | */ | ||
384 | void *dma_buf_kmap(struct dma_buf *dmabuf, unsigned long page_num) | ||
385 | { | ||
386 | WARN_ON(!dmabuf); | ||
387 | |||
388 | return dmabuf->ops->kmap(dmabuf, page_num); | ||
389 | } | ||
390 | EXPORT_SYMBOL_GPL(dma_buf_kmap); | ||
391 | |||
392 | /** | ||
393 | * dma_buf_kunmap - Unmap a page obtained by dma_buf_kmap. | ||
394 | * @dma_buf: [in] buffer to unmap page from. | ||
395 | * @page_num: [in] page in PAGE_SIZE units to unmap. | ||
396 | * @vaddr: [in] kernel space pointer obtained from dma_buf_kmap. | ||
397 | * | ||
398 | * This call must always succeed. | ||
399 | */ | ||
400 | void dma_buf_kunmap(struct dma_buf *dmabuf, unsigned long page_num, | ||
401 | void *vaddr) | ||
402 | { | ||
403 | WARN_ON(!dmabuf); | ||
404 | |||
405 | if (dmabuf->ops->kunmap) | ||
406 | dmabuf->ops->kunmap(dmabuf, page_num, vaddr); | ||
407 | } | ||
408 | EXPORT_SYMBOL_GPL(dma_buf_kunmap); | ||
diff --git a/drivers/base/firmware_class.c b/drivers/base/firmware_class.c index 6c9387d646ec..5401814c874d 100644 --- a/drivers/base/firmware_class.c +++ b/drivers/base/firmware_class.c | |||
@@ -16,10 +16,11 @@ | |||
16 | #include <linux/interrupt.h> | 16 | #include <linux/interrupt.h> |
17 | #include <linux/bitops.h> | 17 | #include <linux/bitops.h> |
18 | #include <linux/mutex.h> | 18 | #include <linux/mutex.h> |
19 | #include <linux/kthread.h> | 19 | #include <linux/workqueue.h> |
20 | #include <linux/highmem.h> | 20 | #include <linux/highmem.h> |
21 | #include <linux/firmware.h> | 21 | #include <linux/firmware.h> |
22 | #include <linux/slab.h> | 22 | #include <linux/slab.h> |
23 | #include <linux/sched.h> | ||
23 | 24 | ||
24 | #define to_dev(obj) container_of(obj, struct device, kobj) | 25 | #define to_dev(obj) container_of(obj, struct device, kobj) |
25 | 26 | ||
@@ -81,6 +82,11 @@ enum { | |||
81 | 82 | ||
82 | static int loading_timeout = 60; /* In seconds */ | 83 | static int loading_timeout = 60; /* In seconds */ |
83 | 84 | ||
85 | static inline long firmware_loading_timeout(void) | ||
86 | { | ||
87 | return loading_timeout > 0 ? loading_timeout * HZ : MAX_SCHEDULE_TIMEOUT; | ||
88 | } | ||
89 | |||
84 | /* fw_lock could be moved to 'struct firmware_priv' but since it is just | 90 | /* fw_lock could be moved to 'struct firmware_priv' but since it is just |
85 | * guarding for corner cases a global lock should be OK */ | 91 | * guarding for corner cases a global lock should be OK */ |
86 | static DEFINE_MUTEX(fw_lock); | 92 | static DEFINE_MUTEX(fw_lock); |
@@ -440,13 +446,11 @@ fw_create_instance(struct firmware *firmware, const char *fw_name, | |||
440 | { | 446 | { |
441 | struct firmware_priv *fw_priv; | 447 | struct firmware_priv *fw_priv; |
442 | struct device *f_dev; | 448 | struct device *f_dev; |
443 | int error; | ||
444 | 449 | ||
445 | fw_priv = kzalloc(sizeof(*fw_priv) + strlen(fw_name) + 1 , GFP_KERNEL); | 450 | fw_priv = kzalloc(sizeof(*fw_priv) + strlen(fw_name) + 1 , GFP_KERNEL); |
446 | if (!fw_priv) { | 451 | if (!fw_priv) { |
447 | dev_err(device, "%s: kmalloc failed\n", __func__); | 452 | dev_err(device, "%s: kmalloc failed\n", __func__); |
448 | error = -ENOMEM; | 453 | return ERR_PTR(-ENOMEM); |
449 | goto err_out; | ||
450 | } | 454 | } |
451 | 455 | ||
452 | fw_priv->fw = firmware; | 456 | fw_priv->fw = firmware; |
@@ -463,98 +467,80 @@ fw_create_instance(struct firmware *firmware, const char *fw_name, | |||
463 | f_dev->parent = device; | 467 | f_dev->parent = device; |
464 | f_dev->class = &firmware_class; | 468 | f_dev->class = &firmware_class; |
465 | 469 | ||
466 | dev_set_uevent_suppress(f_dev, true); | ||
467 | |||
468 | /* Need to pin this module until class device is destroyed */ | ||
469 | __module_get(THIS_MODULE); | ||
470 | |||
471 | error = device_add(f_dev); | ||
472 | if (error) { | ||
473 | dev_err(device, "%s: device_register failed\n", __func__); | ||
474 | goto err_put_dev; | ||
475 | } | ||
476 | |||
477 | error = device_create_bin_file(f_dev, &firmware_attr_data); | ||
478 | if (error) { | ||
479 | dev_err(device, "%s: sysfs_create_bin_file failed\n", __func__); | ||
480 | goto err_del_dev; | ||
481 | } | ||
482 | |||
483 | error = device_create_file(f_dev, &dev_attr_loading); | ||
484 | if (error) { | ||
485 | dev_err(device, "%s: device_create_file failed\n", __func__); | ||
486 | goto err_del_bin_attr; | ||
487 | } | ||
488 | |||
489 | if (uevent) | ||
490 | dev_set_uevent_suppress(f_dev, false); | ||
491 | |||
492 | return fw_priv; | 470 | return fw_priv; |
493 | |||
494 | err_del_bin_attr: | ||
495 | device_remove_bin_file(f_dev, &firmware_attr_data); | ||
496 | err_del_dev: | ||
497 | device_del(f_dev); | ||
498 | err_put_dev: | ||
499 | put_device(f_dev); | ||
500 | err_out: | ||
501 | return ERR_PTR(error); | ||
502 | } | 471 | } |
503 | 472 | ||
504 | static void fw_destroy_instance(struct firmware_priv *fw_priv) | 473 | static struct firmware_priv * |
505 | { | 474 | _request_firmware_prepare(const struct firmware **firmware_p, const char *name, |
506 | struct device *f_dev = &fw_priv->dev; | 475 | struct device *device, bool uevent, bool nowait) |
507 | |||
508 | device_remove_file(f_dev, &dev_attr_loading); | ||
509 | device_remove_bin_file(f_dev, &firmware_attr_data); | ||
510 | device_unregister(f_dev); | ||
511 | } | ||
512 | |||
513 | static int _request_firmware(const struct firmware **firmware_p, | ||
514 | const char *name, struct device *device, | ||
515 | bool uevent, bool nowait) | ||
516 | { | 476 | { |
517 | struct firmware_priv *fw_priv; | ||
518 | struct firmware *firmware; | 477 | struct firmware *firmware; |
519 | int retval = 0; | 478 | struct firmware_priv *fw_priv; |
520 | 479 | ||
521 | if (!firmware_p) | 480 | if (!firmware_p) |
522 | return -EINVAL; | 481 | return ERR_PTR(-EINVAL); |
523 | 482 | ||
524 | *firmware_p = firmware = kzalloc(sizeof(*firmware), GFP_KERNEL); | 483 | *firmware_p = firmware = kzalloc(sizeof(*firmware), GFP_KERNEL); |
525 | if (!firmware) { | 484 | if (!firmware) { |
526 | dev_err(device, "%s: kmalloc(struct firmware) failed\n", | 485 | dev_err(device, "%s: kmalloc(struct firmware) failed\n", |
527 | __func__); | 486 | __func__); |
528 | return -ENOMEM; | 487 | return ERR_PTR(-ENOMEM); |
529 | } | 488 | } |
530 | 489 | ||
531 | if (fw_get_builtin_firmware(firmware, name)) { | 490 | if (fw_get_builtin_firmware(firmware, name)) { |
532 | dev_dbg(device, "firmware: using built-in firmware %s\n", name); | 491 | dev_dbg(device, "firmware: using built-in firmware %s\n", name); |
533 | return 0; | 492 | return NULL; |
493 | } | ||
494 | |||
495 | fw_priv = fw_create_instance(firmware, name, device, uevent, nowait); | ||
496 | if (IS_ERR(fw_priv)) { | ||
497 | release_firmware(firmware); | ||
498 | *firmware_p = NULL; | ||
534 | } | 499 | } |
500 | return fw_priv; | ||
501 | } | ||
535 | 502 | ||
536 | read_lock_usermodehelper(); | 503 | static void _request_firmware_cleanup(const struct firmware **firmware_p) |
504 | { | ||
505 | release_firmware(*firmware_p); | ||
506 | *firmware_p = NULL; | ||
507 | } | ||
537 | 508 | ||
538 | if (WARN_ON(usermodehelper_is_disabled())) { | 509 | static int _request_firmware_load(struct firmware_priv *fw_priv, bool uevent, |
539 | dev_err(device, "firmware: %s will not be loaded\n", name); | 510 | long timeout) |
540 | retval = -EBUSY; | 511 | { |
541 | goto out; | 512 | int retval = 0; |
513 | struct device *f_dev = &fw_priv->dev; | ||
514 | |||
515 | dev_set_uevent_suppress(f_dev, true); | ||
516 | |||
517 | /* Need to pin this module until class device is destroyed */ | ||
518 | __module_get(THIS_MODULE); | ||
519 | |||
520 | retval = device_add(f_dev); | ||
521 | if (retval) { | ||
522 | dev_err(f_dev, "%s: device_register failed\n", __func__); | ||
523 | goto err_put_dev; | ||
542 | } | 524 | } |
543 | 525 | ||
544 | if (uevent) | 526 | retval = device_create_bin_file(f_dev, &firmware_attr_data); |
545 | dev_dbg(device, "firmware: requesting %s\n", name); | 527 | if (retval) { |
528 | dev_err(f_dev, "%s: sysfs_create_bin_file failed\n", __func__); | ||
529 | goto err_del_dev; | ||
530 | } | ||
546 | 531 | ||
547 | fw_priv = fw_create_instance(firmware, name, device, uevent, nowait); | 532 | retval = device_create_file(f_dev, &dev_attr_loading); |
548 | if (IS_ERR(fw_priv)) { | 533 | if (retval) { |
549 | retval = PTR_ERR(fw_priv); | 534 | dev_err(f_dev, "%s: device_create_file failed\n", __func__); |
550 | goto out; | 535 | goto err_del_bin_attr; |
551 | } | 536 | } |
552 | 537 | ||
553 | if (uevent) { | 538 | if (uevent) { |
554 | if (loading_timeout > 0) | 539 | dev_set_uevent_suppress(f_dev, false); |
540 | dev_dbg(f_dev, "firmware: requesting %s\n", fw_priv->fw_id); | ||
541 | if (timeout != MAX_SCHEDULE_TIMEOUT) | ||
555 | mod_timer(&fw_priv->timeout, | 542 | mod_timer(&fw_priv->timeout, |
556 | round_jiffies_up(jiffies + | 543 | round_jiffies_up(jiffies + timeout)); |
557 | loading_timeout * HZ)); | ||
558 | 544 | ||
559 | kobject_uevent(&fw_priv->dev.kobj, KOBJ_ADD); | 545 | kobject_uevent(&fw_priv->dev.kobj, KOBJ_ADD); |
560 | } | 546 | } |
@@ -570,16 +556,13 @@ static int _request_firmware(const struct firmware **firmware_p, | |||
570 | fw_priv->fw = NULL; | 556 | fw_priv->fw = NULL; |
571 | mutex_unlock(&fw_lock); | 557 | mutex_unlock(&fw_lock); |
572 | 558 | ||
573 | fw_destroy_instance(fw_priv); | 559 | device_remove_file(f_dev, &dev_attr_loading); |
574 | 560 | err_del_bin_attr: | |
575 | out: | 561 | device_remove_bin_file(f_dev, &firmware_attr_data); |
576 | read_unlock_usermodehelper(); | 562 | err_del_dev: |
577 | 563 | device_del(f_dev); | |
578 | if (retval) { | 564 | err_put_dev: |
579 | release_firmware(firmware); | 565 | put_device(f_dev); |
580 | *firmware_p = NULL; | ||
581 | } | ||
582 | |||
583 | return retval; | 566 | return retval; |
584 | } | 567 | } |
585 | 568 | ||
@@ -602,7 +585,26 @@ int | |||
602 | request_firmware(const struct firmware **firmware_p, const char *name, | 585 | request_firmware(const struct firmware **firmware_p, const char *name, |
603 | struct device *device) | 586 | struct device *device) |
604 | { | 587 | { |
605 | return _request_firmware(firmware_p, name, device, true, false); | 588 | struct firmware_priv *fw_priv; |
589 | int ret; | ||
590 | |||
591 | fw_priv = _request_firmware_prepare(firmware_p, name, device, true, | ||
592 | false); | ||
593 | if (IS_ERR_OR_NULL(fw_priv)) | ||
594 | return PTR_RET(fw_priv); | ||
595 | |||
596 | ret = usermodehelper_read_trylock(); | ||
597 | if (WARN_ON(ret)) { | ||
598 | dev_err(device, "firmware: %s will not be loaded\n", name); | ||
599 | } else { | ||
600 | ret = _request_firmware_load(fw_priv, true, | ||
601 | firmware_loading_timeout()); | ||
602 | usermodehelper_read_unlock(); | ||
603 | } | ||
604 | if (ret) | ||
605 | _request_firmware_cleanup(firmware_p); | ||
606 | |||
607 | return ret; | ||
606 | } | 608 | } |
607 | 609 | ||
608 | /** | 610 | /** |
@@ -629,25 +631,39 @@ struct firmware_work { | |||
629 | bool uevent; | 631 | bool uevent; |
630 | }; | 632 | }; |
631 | 633 | ||
632 | static int request_firmware_work_func(void *arg) | 634 | static void request_firmware_work_func(struct work_struct *work) |
633 | { | 635 | { |
634 | struct firmware_work *fw_work = arg; | 636 | struct firmware_work *fw_work; |
635 | const struct firmware *fw; | 637 | const struct firmware *fw; |
638 | struct firmware_priv *fw_priv; | ||
639 | long timeout; | ||
636 | int ret; | 640 | int ret; |
637 | 641 | ||
638 | if (!arg) { | 642 | fw_work = container_of(work, struct firmware_work, work); |
639 | WARN_ON(1); | 643 | fw_priv = _request_firmware_prepare(&fw, fw_work->name, fw_work->device, |
640 | return 0; | 644 | fw_work->uevent, true); |
645 | if (IS_ERR_OR_NULL(fw_priv)) { | ||
646 | ret = PTR_RET(fw_priv); | ||
647 | goto out; | ||
648 | } | ||
649 | |||
650 | timeout = usermodehelper_read_lock_wait(firmware_loading_timeout()); | ||
651 | if (timeout) { | ||
652 | ret = _request_firmware_load(fw_priv, fw_work->uevent, timeout); | ||
653 | usermodehelper_read_unlock(); | ||
654 | } else { | ||
655 | dev_dbg(fw_work->device, "firmware: %s loading timed out\n", | ||
656 | fw_work->name); | ||
657 | ret = -EAGAIN; | ||
641 | } | 658 | } |
659 | if (ret) | ||
660 | _request_firmware_cleanup(&fw); | ||
642 | 661 | ||
643 | ret = _request_firmware(&fw, fw_work->name, fw_work->device, | 662 | out: |
644 | fw_work->uevent, true); | ||
645 | fw_work->cont(fw, fw_work->context); | 663 | fw_work->cont(fw, fw_work->context); |
646 | 664 | ||
647 | module_put(fw_work->module); | 665 | module_put(fw_work->module); |
648 | kfree(fw_work); | 666 | kfree(fw_work); |
649 | |||
650 | return ret; | ||
651 | } | 667 | } |
652 | 668 | ||
653 | /** | 669 | /** |
@@ -673,7 +689,6 @@ request_firmware_nowait( | |||
673 | const char *name, struct device *device, gfp_t gfp, void *context, | 689 | const char *name, struct device *device, gfp_t gfp, void *context, |
674 | void (*cont)(const struct firmware *fw, void *context)) | 690 | void (*cont)(const struct firmware *fw, void *context)) |
675 | { | 691 | { |
676 | struct task_struct *task; | ||
677 | struct firmware_work *fw_work; | 692 | struct firmware_work *fw_work; |
678 | 693 | ||
679 | fw_work = kzalloc(sizeof (struct firmware_work), gfp); | 694 | fw_work = kzalloc(sizeof (struct firmware_work), gfp); |
@@ -692,15 +707,8 @@ request_firmware_nowait( | |||
692 | return -EFAULT; | 707 | return -EFAULT; |
693 | } | 708 | } |
694 | 709 | ||
695 | task = kthread_run(request_firmware_work_func, fw_work, | 710 | INIT_WORK(&fw_work->work, request_firmware_work_func); |
696 | "firmware/%s", name); | 711 | schedule_work(&fw_work->work); |
697 | if (IS_ERR(task)) { | ||
698 | fw_work->cont(NULL, fw_work->context); | ||
699 | module_put(fw_work->module); | ||
700 | kfree(fw_work); | ||
701 | return PTR_ERR(task); | ||
702 | } | ||
703 | |||
704 | return 0; | 712 | return 0; |
705 | } | 713 | } |
706 | 714 | ||
diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c index 541f821d4ea6..bd0f3949bcf9 100644 --- a/drivers/base/power/runtime.c +++ b/drivers/base/power/runtime.c | |||
@@ -532,6 +532,8 @@ static int rpm_suspend(struct device *dev, int rpmflags) | |||
532 | dev->power.suspend_time = ktime_set(0, 0); | 532 | dev->power.suspend_time = ktime_set(0, 0); |
533 | dev->power.max_time_suspended_ns = -1; | 533 | dev->power.max_time_suspended_ns = -1; |
534 | dev->power.deferred_resume = false; | 534 | dev->power.deferred_resume = false; |
535 | wake_up_all(&dev->power.wait_queue); | ||
536 | |||
535 | if (retval == -EAGAIN || retval == -EBUSY) { | 537 | if (retval == -EAGAIN || retval == -EBUSY) { |
536 | dev->power.runtime_error = 0; | 538 | dev->power.runtime_error = 0; |
537 | 539 | ||
@@ -547,7 +549,6 @@ static int rpm_suspend(struct device *dev, int rpmflags) | |||
547 | } else { | 549 | } else { |
548 | pm_runtime_cancel_pending(dev); | 550 | pm_runtime_cancel_pending(dev); |
549 | } | 551 | } |
550 | wake_up_all(&dev->power.wait_queue); | ||
551 | goto out; | 552 | goto out; |
552 | } | 553 | } |
553 | 554 | ||
diff --git a/drivers/base/regmap/regcache-rbtree.c b/drivers/base/regmap/regcache-rbtree.c index 5157fa04c2f0..92b779ee002b 100644 --- a/drivers/base/regmap/regcache-rbtree.c +++ b/drivers/base/regmap/regcache-rbtree.c | |||
@@ -138,6 +138,7 @@ static int rbtree_show(struct seq_file *s, void *ignored) | |||
138 | unsigned int base, top; | 138 | unsigned int base, top; |
139 | int nodes = 0; | 139 | int nodes = 0; |
140 | int registers = 0; | 140 | int registers = 0; |
141 | int average; | ||
141 | 142 | ||
142 | mutex_lock(&map->lock); | 143 | mutex_lock(&map->lock); |
143 | 144 | ||
@@ -152,8 +153,13 @@ static int rbtree_show(struct seq_file *s, void *ignored) | |||
152 | registers += top - base + 1; | 153 | registers += top - base + 1; |
153 | } | 154 | } |
154 | 155 | ||
156 | if (nodes) | ||
157 | average = registers / nodes; | ||
158 | else | ||
159 | average = 0; | ||
160 | |||
155 | seq_printf(s, "%d nodes, %d registers, average %d registers\n", | 161 | seq_printf(s, "%d nodes, %d registers, average %d registers\n", |
156 | nodes, registers, registers / nodes); | 162 | nodes, registers, average); |
157 | 163 | ||
158 | mutex_unlock(&map->lock); | 164 | mutex_unlock(&map->lock); |
159 | 165 | ||
@@ -396,7 +402,7 @@ static int regcache_rbtree_sync(struct regmap *map, unsigned int min, | |||
396 | map->cache_word_size); | 402 | map->cache_word_size); |
397 | 403 | ||
398 | /* Is this the hardware default? If so skip. */ | 404 | /* Is this the hardware default? If so skip. */ |
399 | ret = regcache_lookup_reg(map, i); | 405 | ret = regcache_lookup_reg(map, regtmp); |
400 | if (ret >= 0 && val == map->reg_defaults[ret].def) | 406 | if (ret >= 0 && val == map->reg_defaults[ret].def) |
401 | continue; | 407 | continue; |
402 | 408 | ||
diff --git a/drivers/base/regmap/regcache.c b/drivers/base/regmap/regcache.c index 87f54dbf601b..74b69095def6 100644 --- a/drivers/base/regmap/regcache.c +++ b/drivers/base/regmap/regcache.c | |||
@@ -346,6 +346,7 @@ out: | |||
346 | 346 | ||
347 | return ret; | 347 | return ret; |
348 | } | 348 | } |
349 | EXPORT_SYMBOL_GPL(regcache_sync_region); | ||
349 | 350 | ||
350 | /** | 351 | /** |
351 | * regcache_cache_only: Put a register map into cache only mode | 352 | * regcache_cache_only: Put a register map into cache only mode |
diff --git a/drivers/base/regmap/regmap-debugfs.c b/drivers/base/regmap/regmap-debugfs.c index 58517a5dac13..251eb70f83e7 100644 --- a/drivers/base/regmap/regmap-debugfs.c +++ b/drivers/base/regmap/regmap-debugfs.c | |||
@@ -27,12 +27,6 @@ static size_t regmap_calc_reg_len(int max_val, char *buf, size_t buf_size) | |||
27 | return strlen(buf); | 27 | return strlen(buf); |
28 | } | 28 | } |
29 | 29 | ||
30 | static int regmap_open_file(struct inode *inode, struct file *file) | ||
31 | { | ||
32 | file->private_data = inode->i_private; | ||
33 | return 0; | ||
34 | } | ||
35 | |||
36 | static ssize_t regmap_name_read_file(struct file *file, | 30 | static ssize_t regmap_name_read_file(struct file *file, |
37 | char __user *user_buf, size_t count, | 31 | char __user *user_buf, size_t count, |
38 | loff_t *ppos) | 32 | loff_t *ppos) |
@@ -57,7 +51,7 @@ static ssize_t regmap_name_read_file(struct file *file, | |||
57 | } | 51 | } |
58 | 52 | ||
59 | static const struct file_operations regmap_name_fops = { | 53 | static const struct file_operations regmap_name_fops = { |
60 | .open = regmap_open_file, | 54 | .open = simple_open, |
61 | .read = regmap_name_read_file, | 55 | .read = regmap_name_read_file, |
62 | .llseek = default_llseek, | 56 | .llseek = default_llseek, |
63 | }; | 57 | }; |
@@ -174,7 +168,7 @@ static ssize_t regmap_map_write_file(struct file *file, | |||
174 | #endif | 168 | #endif |
175 | 169 | ||
176 | static const struct file_operations regmap_map_fops = { | 170 | static const struct file_operations regmap_map_fops = { |
177 | .open = regmap_open_file, | 171 | .open = simple_open, |
178 | .read = regmap_map_read_file, | 172 | .read = regmap_map_read_file, |
179 | .write = regmap_map_write_file, | 173 | .write = regmap_map_write_file, |
180 | .llseek = default_llseek, | 174 | .llseek = default_llseek, |
@@ -243,7 +237,7 @@ out: | |||
243 | } | 237 | } |
244 | 238 | ||
245 | static const struct file_operations regmap_access_fops = { | 239 | static const struct file_operations regmap_access_fops = { |
246 | .open = regmap_open_file, | 240 | .open = simple_open, |
247 | .read = regmap_access_read_file, | 241 | .read = regmap_access_read_file, |
248 | .llseek = default_llseek, | 242 | .llseek = default_llseek, |
249 | }; | 243 | }; |