aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2019-03-06 17:52:48 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2019-03-06 17:52:48 -0500
commite431f2d74e1b91e00e71e97cadcadffc4cda8a9b (patch)
treef034a1380709ffa4fb44ff4855ba9b3037eea9e3 /drivers
parent45763bf4bc1ebdf8eb95697607e1fd042a3e1221 (diff)
parent36cf3b1363f464c40f6ce647d3ac0ae9617d5fbc (diff)
Merge tag 'driver-core-5.1-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/driver-core
Pull driver core updates from Greg KH: "Here is the big driver core patchset for 5.1-rc1 More patches than "normal" here this merge window, due to some work in the driver core by Alexander Duyck to rework the async probe functionality to work better for a number of devices, and independant work from Rafael for the device link functionality to make it work "correctly". Also in here is: - lots of BUS_ATTR() removals, the macro is about to go away - firmware test fixups - ihex fixups and simplification - component additions (also includes i915 patches) - lots of minor coding style fixups and cleanups. All of these have been in linux-next for a while with no reported issues" * tag 'driver-core-5.1-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/driver-core: (65 commits) driver core: platform: remove misleading err_alloc label platform: set of_node in platform_device_register_full() firmware: hardcode the debug message for -ENOENT driver core: Add missing description of new struct device_link field driver core: Fix PM-runtime for links added during consumer probe drivers/component: kerneldoc polish async: Add cmdline option to specify drivers to be async probed driver core: Fix possible supplier PM-usage counter imbalance PM-runtime: Fix __pm_runtime_set_status() race with runtime resume driver: platform: Support parsing GpioInt 0 in platform_get_irq() selftests: firmware: fix verify_reqs() return value Revert "selftests: firmware: remove use of non-standard diff -Z option" Revert "selftests: firmware: add CONFIG_FW_LOADER_USER_HELPER_FALLBACK to config" device: Fix comment for driver_data in struct device kernfs: Allocating memory for kernfs_iattrs with kmem_cache. sysfs: remove unused include of kernfs-internal.h driver core: Postpone DMA tear-down until after devres release driver core: Document limitation related to DL_FLAG_RPM_ACTIVE PM-runtime: Take suppliers into account in __pm_runtime_set_status() device.h: Add __cold to dev_<level> logging functions ...
Diffstat (limited to 'drivers')
-rw-r--r--drivers/base/base.h12
-rw-r--r--drivers/base/bus.c66
-rw-r--r--drivers/base/class.c14
-rw-r--r--drivers/base/component.c14
-rw-r--r--drivers/base/core.c246
-rw-r--r--drivers/base/cpu.c1
-rw-r--r--drivers/base/dd.c188
-rw-r--r--drivers/base/firmware_loader/Makefile4
-rw-r--r--drivers/base/firmware_loader/builtin/.gitignore1
-rw-r--r--drivers/base/firmware_loader/builtin/Makefile40
-rw-r--r--drivers/base/firmware_loader/fallback_table.c5
-rw-r--r--drivers/base/firmware_loader/main.c8
-rw-r--r--drivers/base/platform.c21
-rw-r--r--drivers/base/power/main.c12
-rw-r--r--drivers/base/power/runtime.c101
-rw-r--r--drivers/base/test/test_async_driver_probe.c261
-rw-r--r--drivers/block/rbd.c45
-rw-r--r--drivers/firmware/Kconfig2
-rw-r--r--drivers/iommu/exynos-iommu.c1
-rw-r--r--drivers/iommu/rockchip-iommu.c3
-rw-r--r--drivers/nvdimm/bus.c11
-rw-r--r--drivers/pci/pci-sysfs.c5
-rw-r--r--drivers/pci/pci.c7
-rw-r--r--drivers/rapidio/rio-sysfs.c5
24 files changed, 790 insertions, 283 deletions
diff --git a/drivers/base/base.h b/drivers/base/base.h
index 7a419a7a6235..b405436ee28e 100644
--- a/drivers/base/base.h
+++ b/drivers/base/base.h
@@ -60,12 +60,17 @@ struct driver_private {
60 * @knode_parent - node in sibling list 60 * @knode_parent - node in sibling list
61 * @knode_driver - node in driver list 61 * @knode_driver - node in driver list
62 * @knode_bus - node in bus list 62 * @knode_bus - node in bus list
63 * @knode_class - node in class list
63 * @deferred_probe - entry in deferred_probe_list which is used to retry the 64 * @deferred_probe - entry in deferred_probe_list which is used to retry the
64 * binding of drivers which were unable to get all the resources needed by 65 * binding of drivers which were unable to get all the resources needed by
65 * the device; typically because it depends on another driver getting 66 * the device; typically because it depends on another driver getting
66 * probed first. 67 * probed first.
68 * @async_driver - pointer to device driver awaiting probe via async_probe
67 * @device - pointer back to the struct device that this structure is 69 * @device - pointer back to the struct device that this structure is
68 * associated with. 70 * associated with.
71 * @dead - This device is currently either in the process of or has been
72 * removed from the system. Any asynchronous events scheduled for this
73 * device should exit without taking any action.
69 * 74 *
70 * Nothing outside of the driver core should ever touch these fields. 75 * Nothing outside of the driver core should ever touch these fields.
71 */ 76 */
@@ -74,8 +79,11 @@ struct device_private {
74 struct klist_node knode_parent; 79 struct klist_node knode_parent;
75 struct klist_node knode_driver; 80 struct klist_node knode_driver;
76 struct klist_node knode_bus; 81 struct klist_node knode_bus;
82 struct klist_node knode_class;
77 struct list_head deferred_probe; 83 struct list_head deferred_probe;
84 struct device_driver *async_driver;
78 struct device *device; 85 struct device *device;
86 u8 dead:1;
79}; 87};
80#define to_device_private_parent(obj) \ 88#define to_device_private_parent(obj) \
81 container_of(obj, struct device_private, knode_parent) 89 container_of(obj, struct device_private, knode_parent)
@@ -83,6 +91,8 @@ struct device_private {
83 container_of(obj, struct device_private, knode_driver) 91 container_of(obj, struct device_private, knode_driver)
84#define to_device_private_bus(obj) \ 92#define to_device_private_bus(obj) \
85 container_of(obj, struct device_private, knode_bus) 93 container_of(obj, struct device_private, knode_bus)
94#define to_device_private_class(obj) \
95 container_of(obj, struct device_private, knode_class)
86 96
87/* initialisation functions */ 97/* initialisation functions */
88extern int devices_init(void); 98extern int devices_init(void);
@@ -124,6 +134,8 @@ extern int driver_add_groups(struct device_driver *drv,
124 const struct attribute_group **groups); 134 const struct attribute_group **groups);
125extern void driver_remove_groups(struct device_driver *drv, 135extern void driver_remove_groups(struct device_driver *drv,
126 const struct attribute_group **groups); 136 const struct attribute_group **groups);
137int device_driver_attach(struct device_driver *drv, struct device *dev);
138void device_driver_detach(struct device *dev);
127 139
128extern char *make_class_name(const char *name, struct kobject *kobj); 140extern char *make_class_name(const char *name, struct kobject *kobj);
129 141
diff --git a/drivers/base/bus.c b/drivers/base/bus.c
index e06a57936cc9..0a58e969f8b7 100644
--- a/drivers/base/bus.c
+++ b/drivers/base/bus.c
@@ -187,11 +187,7 @@ static ssize_t unbind_store(struct device_driver *drv, const char *buf,
187 187
188 dev = bus_find_device_by_name(bus, NULL, buf); 188 dev = bus_find_device_by_name(bus, NULL, buf);
189 if (dev && dev->driver == drv) { 189 if (dev && dev->driver == drv) {
190 if (dev->parent && dev->bus->need_parent_lock) 190 device_driver_detach(dev);
191 device_lock(dev->parent);
192 device_release_driver(dev);
193 if (dev->parent && dev->bus->need_parent_lock)
194 device_unlock(dev->parent);
195 err = count; 191 err = count;
196 } 192 }
197 put_device(dev); 193 put_device(dev);
@@ -214,13 +210,7 @@ static ssize_t bind_store(struct device_driver *drv, const char *buf,
214 210
215 dev = bus_find_device_by_name(bus, NULL, buf); 211 dev = bus_find_device_by_name(bus, NULL, buf);
216 if (dev && dev->driver == NULL && driver_match_device(drv, dev)) { 212 if (dev && dev->driver == NULL && driver_match_device(drv, dev)) {
217 if (dev->parent && bus->need_parent_lock) 213 err = device_driver_attach(drv, dev);
218 device_lock(dev->parent);
219 device_lock(dev);
220 err = driver_probe_device(drv, dev);
221 device_unlock(dev);
222 if (dev->parent && bus->need_parent_lock)
223 device_unlock(dev->parent);
224 214
225 if (err > 0) { 215 if (err > 0) {
226 /* success */ 216 /* success */
@@ -236,12 +226,12 @@ static ssize_t bind_store(struct device_driver *drv, const char *buf,
236} 226}
237static DRIVER_ATTR_IGNORE_LOCKDEP(bind, S_IWUSR, NULL, bind_store); 227static DRIVER_ATTR_IGNORE_LOCKDEP(bind, S_IWUSR, NULL, bind_store);
238 228
239static ssize_t show_drivers_autoprobe(struct bus_type *bus, char *buf) 229static ssize_t drivers_autoprobe_show(struct bus_type *bus, char *buf)
240{ 230{
241 return sprintf(buf, "%d\n", bus->p->drivers_autoprobe); 231 return sprintf(buf, "%d\n", bus->p->drivers_autoprobe);
242} 232}
243 233
244static ssize_t store_drivers_autoprobe(struct bus_type *bus, 234static ssize_t drivers_autoprobe_store(struct bus_type *bus,
245 const char *buf, size_t count) 235 const char *buf, size_t count)
246{ 236{
247 if (buf[0] == '0') 237 if (buf[0] == '0')
@@ -251,7 +241,7 @@ static ssize_t store_drivers_autoprobe(struct bus_type *bus,
251 return count; 241 return count;
252} 242}
253 243
254static ssize_t store_drivers_probe(struct bus_type *bus, 244static ssize_t drivers_probe_store(struct bus_type *bus,
255 const char *buf, size_t count) 245 const char *buf, size_t count)
256{ 246{
257 struct device *dev; 247 struct device *dev;
@@ -586,9 +576,8 @@ static void remove_bind_files(struct device_driver *drv)
586 driver_remove_file(drv, &driver_attr_unbind); 576 driver_remove_file(drv, &driver_attr_unbind);
587} 577}
588 578
589static BUS_ATTR(drivers_probe, S_IWUSR, NULL, store_drivers_probe); 579static BUS_ATTR_WO(drivers_probe);
590static BUS_ATTR(drivers_autoprobe, S_IWUSR | S_IRUGO, 580static BUS_ATTR_RW(drivers_autoprobe);
591 show_drivers_autoprobe, store_drivers_autoprobe);
592 581
593static int add_probe_files(struct bus_type *bus) 582static int add_probe_files(struct bus_type *bus)
594{ 583{
@@ -621,17 +610,6 @@ static ssize_t uevent_store(struct device_driver *drv, const char *buf,
621} 610}
622static DRIVER_ATTR_WO(uevent); 611static DRIVER_ATTR_WO(uevent);
623 612
624static void driver_attach_async(void *_drv, async_cookie_t cookie)
625{
626 struct device_driver *drv = _drv;
627 int ret;
628
629 ret = driver_attach(drv);
630
631 pr_debug("bus: '%s': driver %s async attach completed: %d\n",
632 drv->bus->name, drv->name, ret);
633}
634
635/** 613/**
636 * bus_add_driver - Add a driver to the bus. 614 * bus_add_driver - Add a driver to the bus.
637 * @drv: driver. 615 * @drv: driver.
@@ -664,15 +642,9 @@ int bus_add_driver(struct device_driver *drv)
664 642
665 klist_add_tail(&priv->knode_bus, &bus->p->klist_drivers); 643 klist_add_tail(&priv->knode_bus, &bus->p->klist_drivers);
666 if (drv->bus->p->drivers_autoprobe) { 644 if (drv->bus->p->drivers_autoprobe) {
667 if (driver_allows_async_probing(drv)) { 645 error = driver_attach(drv);
668 pr_debug("bus: '%s': probing driver %s asynchronously\n", 646 if (error)
669 drv->bus->name, drv->name); 647 goto out_unregister;
670 async_schedule(driver_attach_async, drv);
671 } else {
672 error = driver_attach(drv);
673 if (error)
674 goto out_unregister;
675 }
676 } 648 }
677 module_add_driver(drv->owner, drv); 649 module_add_driver(drv->owner, drv);
678 650
@@ -774,13 +746,8 @@ EXPORT_SYMBOL_GPL(bus_rescan_devices);
774 */ 746 */
775int device_reprobe(struct device *dev) 747int device_reprobe(struct device *dev)
776{ 748{
777 if (dev->driver) { 749 if (dev->driver)
778 if (dev->parent && dev->bus->need_parent_lock) 750 device_driver_detach(dev);
779 device_lock(dev->parent);
780 device_release_driver(dev);
781 if (dev->parent && dev->bus->need_parent_lock)
782 device_unlock(dev->parent);
783 }
784 return bus_rescan_devices_helper(dev, NULL); 751 return bus_rescan_devices_helper(dev, NULL);
785} 752}
786EXPORT_SYMBOL_GPL(device_reprobe); 753EXPORT_SYMBOL_GPL(device_reprobe);
@@ -838,7 +805,14 @@ static ssize_t bus_uevent_store(struct bus_type *bus,
838 rc = kobject_synth_uevent(&bus->p->subsys.kobj, buf, count); 805 rc = kobject_synth_uevent(&bus->p->subsys.kobj, buf, count);
839 return rc ? rc : count; 806 return rc ? rc : count;
840} 807}
841static BUS_ATTR(uevent, S_IWUSR, NULL, bus_uevent_store); 808/*
809 * "open code" the old BUS_ATTR() macro here. We want to use BUS_ATTR_WO()
810 * here, but can not use it as earlier in the file we have
811 * DEVICE_ATTR_WO(uevent), which would cause a clash with the with the store
812 * function name.
813 */
814static struct bus_attribute bus_attr_uevent = __ATTR(uevent, S_IWUSR, NULL,
815 bus_uevent_store);
842 816
843/** 817/**
844 * bus_register - register a driver-core subsystem 818 * bus_register - register a driver-core subsystem
diff --git a/drivers/base/class.c b/drivers/base/class.c
index 54def4e02f00..d8a6a5864c2e 100644
--- a/drivers/base/class.c
+++ b/drivers/base/class.c
@@ -117,16 +117,22 @@ static void class_put(struct class *cls)
117 kset_put(&cls->p->subsys); 117 kset_put(&cls->p->subsys);
118} 118}
119 119
120static struct device *klist_class_to_dev(struct klist_node *n)
121{
122 struct device_private *p = to_device_private_class(n);
123 return p->device;
124}
125
120static void klist_class_dev_get(struct klist_node *n) 126static void klist_class_dev_get(struct klist_node *n)
121{ 127{
122 struct device *dev = container_of(n, struct device, knode_class); 128 struct device *dev = klist_class_to_dev(n);
123 129
124 get_device(dev); 130 get_device(dev);
125} 131}
126 132
127static void klist_class_dev_put(struct klist_node *n) 133static void klist_class_dev_put(struct klist_node *n)
128{ 134{
129 struct device *dev = container_of(n, struct device, knode_class); 135 struct device *dev = klist_class_to_dev(n);
130 136
131 put_device(dev); 137 put_device(dev);
132} 138}
@@ -277,7 +283,7 @@ void class_dev_iter_init(struct class_dev_iter *iter, struct class *class,
277 struct klist_node *start_knode = NULL; 283 struct klist_node *start_knode = NULL;
278 284
279 if (start) 285 if (start)
280 start_knode = &start->knode_class; 286 start_knode = &start->p->knode_class;
281 klist_iter_init_node(&class->p->klist_devices, &iter->ki, start_knode); 287 klist_iter_init_node(&class->p->klist_devices, &iter->ki, start_knode);
282 iter->type = type; 288 iter->type = type;
283} 289}
@@ -304,7 +310,7 @@ struct device *class_dev_iter_next(struct class_dev_iter *iter)
304 knode = klist_next(&iter->ki); 310 knode = klist_next(&iter->ki);
305 if (!knode) 311 if (!knode)
306 return NULL; 312 return NULL;
307 dev = container_of(knode, struct device, knode_class); 313 dev = klist_class_to_dev(knode);
308 if (!iter->type || iter->type == dev->type) 314 if (!iter->type || iter->type == dev->type)
309 return dev; 315 return dev;
310 } 316 }
diff --git a/drivers/base/component.c b/drivers/base/component.c
index 7dbc41cccd58..532a3a5d8f63 100644
--- a/drivers/base/component.c
+++ b/drivers/base/component.c
@@ -27,7 +27,7 @@
27 * helper fills the niche of aggregate drivers for specific hardware, where 27 * helper fills the niche of aggregate drivers for specific hardware, where
28 * further standardization into a subsystem would not be practical. The common 28 * further standardization into a subsystem would not be practical. The common
29 * example is when a logical device (e.g. a DRM display driver) is spread around 29 * example is when a logical device (e.g. a DRM display driver) is spread around
30 * the SoC on various component (scanout engines, blending blocks, transcoders 30 * the SoC on various components (scanout engines, blending blocks, transcoders
31 * for various outputs and so on). 31 * for various outputs and so on).
32 * 32 *
33 * The component helper also doesn't solve runtime dependencies, e.g. for system 33 * The component helper also doesn't solve runtime dependencies, e.g. for system
@@ -378,7 +378,7 @@ static void __component_match_add(struct device *master,
378} 378}
379 379
380/** 380/**
381 * component_match_add_release - add a component match with release callback 381 * component_match_add_release - add a component match entry with release callback
382 * @master: device with the aggregate driver 382 * @master: device with the aggregate driver
383 * @matchptr: pointer to the list of component matches 383 * @matchptr: pointer to the list of component matches
384 * @release: release function for @compare_data 384 * @release: release function for @compare_data
@@ -408,7 +408,7 @@ void component_match_add_release(struct device *master,
408EXPORT_SYMBOL(component_match_add_release); 408EXPORT_SYMBOL(component_match_add_release);
409 409
410/** 410/**
411 * component_match_add_typed - add a compent match for a typed component 411 * component_match_add_typed - add a component match entry for a typed component
412 * @master: device with the aggregate driver 412 * @master: device with the aggregate driver
413 * @matchptr: pointer to the list of component matches 413 * @matchptr: pointer to the list of component matches
414 * @compare_typed: compare function to match against all typed components 414 * @compare_typed: compare function to match against all typed components
@@ -537,11 +537,11 @@ static void component_unbind(struct component *component,
537} 537}
538 538
539/** 539/**
540 * component_unbind_all - unbind all component to an aggregate driver 540 * component_unbind_all - unbind all components of an aggregate driver
541 * @master_dev: device with the aggregate driver 541 * @master_dev: device with the aggregate driver
542 * @data: opaque pointer, passed to all components 542 * @data: opaque pointer, passed to all components
543 * 543 *
544 * Unbinds all components to the aggregate @dev by passing @data to their 544 * Unbinds all components of the aggregate @dev by passing @data to their
545 * &component_ops.unbind functions. Should be called from 545 * &component_ops.unbind functions. Should be called from
546 * &component_master_ops.unbind. 546 * &component_master_ops.unbind.
547 */ 547 */
@@ -619,11 +619,11 @@ static int component_bind(struct component *component, struct master *master,
619} 619}
620 620
621/** 621/**
622 * component_bind_all - bind all component to an aggregate driver 622 * component_bind_all - bind all components of an aggregate driver
623 * @master_dev: device with the aggregate driver 623 * @master_dev: device with the aggregate driver
624 * @data: opaque pointer, passed to all components 624 * @data: opaque pointer, passed to all components
625 * 625 *
626 * Binds all components to the aggregate @dev by passing @data to their 626 * Binds all components of the aggregate @dev by passing @data to their
627 * &component_ops.bind functions. Should be called from 627 * &component_ops.bind functions. Should be called from
628 * &component_master_ops.bind. 628 * &component_master_ops.bind.
629 */ 629 */
diff --git a/drivers/base/core.c b/drivers/base/core.c
index 0073b09bb99f..4aeaa0c92bda 100644
--- a/drivers/base/core.c
+++ b/drivers/base/core.c
@@ -179,10 +179,31 @@ void device_pm_move_to_tail(struct device *dev)
179 * of the link. If DL_FLAG_PM_RUNTIME is not set, DL_FLAG_RPM_ACTIVE will be 179 * of the link. If DL_FLAG_PM_RUNTIME is not set, DL_FLAG_RPM_ACTIVE will be
180 * ignored. 180 * ignored.
181 * 181 *
182 * If the DL_FLAG_AUTOREMOVE_CONSUMER is set, the link will be removed 182 * If DL_FLAG_STATELESS is set in @flags, the link is not going to be managed by
183 * automatically when the consumer device driver unbinds from it. 183 * the driver core and, in particular, the caller of this function is expected
184 * The combination of both DL_FLAG_AUTOREMOVE_CONSUMER and DL_FLAG_STATELESS 184 * to drop the reference to the link acquired by it directly.
185 * set is invalid and will cause NULL to be returned. 185 *
186 * If that flag is not set, however, the caller of this function is handing the
187 * management of the link over to the driver core entirely and its return value
188 * can only be used to check whether or not the link is present. In that case,
189 * the DL_FLAG_AUTOREMOVE_CONSUMER and DL_FLAG_AUTOREMOVE_SUPPLIER device link
190 * flags can be used to indicate to the driver core when the link can be safely
191 * deleted. Namely, setting one of them in @flags indicates to the driver core
192 * that the link is not going to be used (by the given caller of this function)
193 * after unbinding the consumer or supplier driver, respectively, from its
194 * device, so the link can be deleted at that point. If none of them is set,
195 * the link will be maintained until one of the devices pointed to by it (either
196 * the consumer or the supplier) is unregistered.
197 *
198 * Also, if DL_FLAG_STATELESS, DL_FLAG_AUTOREMOVE_CONSUMER and
199 * DL_FLAG_AUTOREMOVE_SUPPLIER are not set in @flags (that is, a persistent
200 * managed device link is being added), the DL_FLAG_AUTOPROBE_CONSUMER flag can
201 * be used to request the driver core to automaticall probe for a consmer
202 * driver after successfully binding a driver to the supplier device.
203 *
204 * The combination of DL_FLAG_STATELESS and either DL_FLAG_AUTOREMOVE_CONSUMER
205 * or DL_FLAG_AUTOREMOVE_SUPPLIER set in @flags at the same time is invalid and
206 * will cause NULL to be returned upfront.
186 * 207 *
187 * A side effect of the link creation is re-ordering of dpm_list and the 208 * A side effect of the link creation is re-ordering of dpm_list and the
188 * devices_kset list by moving the consumer device and all devices depending 209 * devices_kset list by moving the consumer device and all devices depending
@@ -199,10 +220,22 @@ struct device_link *device_link_add(struct device *consumer,
199 struct device_link *link; 220 struct device_link *link;
200 221
201 if (!consumer || !supplier || 222 if (!consumer || !supplier ||
202 ((flags & DL_FLAG_STATELESS) && 223 (flags & DL_FLAG_STATELESS &&
203 (flags & DL_FLAG_AUTOREMOVE_CONSUMER))) 224 flags & (DL_FLAG_AUTOREMOVE_CONSUMER |
225 DL_FLAG_AUTOREMOVE_SUPPLIER |
226 DL_FLAG_AUTOPROBE_CONSUMER)) ||
227 (flags & DL_FLAG_AUTOPROBE_CONSUMER &&
228 flags & (DL_FLAG_AUTOREMOVE_CONSUMER |
229 DL_FLAG_AUTOREMOVE_SUPPLIER)))
204 return NULL; 230 return NULL;
205 231
232 if (flags & DL_FLAG_PM_RUNTIME && flags & DL_FLAG_RPM_ACTIVE) {
233 if (pm_runtime_get_sync(supplier) < 0) {
234 pm_runtime_put_noidle(supplier);
235 return NULL;
236 }
237 }
238
206 device_links_write_lock(); 239 device_links_write_lock();
207 device_pm_lock(); 240 device_pm_lock();
208 241
@@ -217,35 +250,71 @@ struct device_link *device_link_add(struct device *consumer,
217 goto out; 250 goto out;
218 } 251 }
219 252
220 list_for_each_entry(link, &supplier->links.consumers, s_node) 253 /*
221 if (link->consumer == consumer) { 254 * DL_FLAG_AUTOREMOVE_SUPPLIER indicates that the link will be needed
255 * longer than for DL_FLAG_AUTOREMOVE_CONSUMER and setting them both
256 * together doesn't make sense, so prefer DL_FLAG_AUTOREMOVE_SUPPLIER.
257 */
258 if (flags & DL_FLAG_AUTOREMOVE_SUPPLIER)
259 flags &= ~DL_FLAG_AUTOREMOVE_CONSUMER;
260
261 list_for_each_entry(link, &supplier->links.consumers, s_node) {
262 if (link->consumer != consumer)
263 continue;
264
265 /*
266 * Don't return a stateless link if the caller wants a stateful
267 * one and vice versa.
268 */
269 if (WARN_ON((flags & DL_FLAG_STATELESS) != (link->flags & DL_FLAG_STATELESS))) {
270 link = NULL;
271 goto out;
272 }
273
274 if (flags & DL_FLAG_PM_RUNTIME) {
275 if (!(link->flags & DL_FLAG_PM_RUNTIME)) {
276 pm_runtime_new_link(consumer);
277 link->flags |= DL_FLAG_PM_RUNTIME;
278 }
279 if (flags & DL_FLAG_RPM_ACTIVE)
280 refcount_inc(&link->rpm_active);
281 }
282
283 if (flags & DL_FLAG_STATELESS) {
222 kref_get(&link->kref); 284 kref_get(&link->kref);
223 goto out; 285 goto out;
224 } 286 }
225 287
288 /*
289 * If the life time of the link following from the new flags is
290 * longer than indicated by the flags of the existing link,
291 * update the existing link to stay around longer.
292 */
293 if (flags & DL_FLAG_AUTOREMOVE_SUPPLIER) {
294 if (link->flags & DL_FLAG_AUTOREMOVE_CONSUMER) {
295 link->flags &= ~DL_FLAG_AUTOREMOVE_CONSUMER;
296 link->flags |= DL_FLAG_AUTOREMOVE_SUPPLIER;
297 }
298 } else if (!(flags & DL_FLAG_AUTOREMOVE_CONSUMER)) {
299 link->flags &= ~(DL_FLAG_AUTOREMOVE_CONSUMER |
300 DL_FLAG_AUTOREMOVE_SUPPLIER);
301 }
302 goto out;
303 }
304
226 link = kzalloc(sizeof(*link), GFP_KERNEL); 305 link = kzalloc(sizeof(*link), GFP_KERNEL);
227 if (!link) 306 if (!link)
228 goto out; 307 goto out;
229 308
309 refcount_set(&link->rpm_active, 1);
310
230 if (flags & DL_FLAG_PM_RUNTIME) { 311 if (flags & DL_FLAG_PM_RUNTIME) {
231 if (flags & DL_FLAG_RPM_ACTIVE) { 312 if (flags & DL_FLAG_RPM_ACTIVE)
232 if (pm_runtime_get_sync(supplier) < 0) { 313 refcount_inc(&link->rpm_active);
233 pm_runtime_put_noidle(supplier); 314
234 kfree(link);
235 link = NULL;
236 goto out;
237 }
238 link->rpm_active = true;
239 }
240 pm_runtime_new_link(consumer); 315 pm_runtime_new_link(consumer);
241 /*
242 * If the link is being added by the consumer driver at probe
243 * time, balance the decrementation of the supplier's runtime PM
244 * usage counter after consumer probe in driver_probe_device().
245 */
246 if (consumer->links.status == DL_DEV_PROBING)
247 pm_runtime_get_noresume(supplier);
248 } 316 }
317
249 get_device(supplier); 318 get_device(supplier);
250 link->supplier = supplier; 319 link->supplier = supplier;
251 INIT_LIST_HEAD(&link->s_node); 320 INIT_LIST_HEAD(&link->s_node);
@@ -260,17 +329,26 @@ struct device_link *device_link_add(struct device *consumer,
260 link->status = DL_STATE_NONE; 329 link->status = DL_STATE_NONE;
261 } else { 330 } else {
262 switch (supplier->links.status) { 331 switch (supplier->links.status) {
263 case DL_DEV_DRIVER_BOUND: 332 case DL_DEV_PROBING:
264 switch (consumer->links.status) { 333 switch (consumer->links.status) {
265 case DL_DEV_PROBING: 334 case DL_DEV_PROBING:
266 /* 335 /*
267 * Some callers expect the link creation during 336 * A consumer driver can create a link to a
268 * consumer driver probe to resume the supplier 337 * supplier that has not completed its probing
269 * even without DL_FLAG_RPM_ACTIVE. 338 * yet as long as it knows that the supplier is
339 * already functional (for example, it has just
340 * acquired some resources from the supplier).
270 */ 341 */
271 if (flags & DL_FLAG_PM_RUNTIME) 342 link->status = DL_STATE_CONSUMER_PROBE;
272 pm_runtime_resume(supplier); 343 break;
273 344 default:
345 link->status = DL_STATE_DORMANT;
346 break;
347 }
348 break;
349 case DL_DEV_DRIVER_BOUND:
350 switch (consumer->links.status) {
351 case DL_DEV_PROBING:
274 link->status = DL_STATE_CONSUMER_PROBE; 352 link->status = DL_STATE_CONSUMER_PROBE;
275 break; 353 break;
276 case DL_DEV_DRIVER_BOUND: 354 case DL_DEV_DRIVER_BOUND:
@@ -291,6 +369,14 @@ struct device_link *device_link_add(struct device *consumer,
291 } 369 }
292 370
293 /* 371 /*
372 * Some callers expect the link creation during consumer driver probe to
373 * resume the supplier even without DL_FLAG_RPM_ACTIVE.
374 */
375 if (link->status == DL_STATE_CONSUMER_PROBE &&
376 flags & DL_FLAG_PM_RUNTIME)
377 pm_runtime_resume(supplier);
378
379 /*
294 * Move the consumer and all of the devices depending on it to the end 380 * Move the consumer and all of the devices depending on it to the end
295 * of dpm_list and the devices_kset list. 381 * of dpm_list and the devices_kset list.
296 * 382 *
@@ -302,17 +388,24 @@ struct device_link *device_link_add(struct device *consumer,
302 list_add_tail_rcu(&link->s_node, &supplier->links.consumers); 388 list_add_tail_rcu(&link->s_node, &supplier->links.consumers);
303 list_add_tail_rcu(&link->c_node, &consumer->links.suppliers); 389 list_add_tail_rcu(&link->c_node, &consumer->links.suppliers);
304 390
305 dev_info(consumer, "Linked as a consumer to %s\n", dev_name(supplier)); 391 dev_dbg(consumer, "Linked as a consumer to %s\n", dev_name(supplier));
306 392
307 out: 393 out:
308 device_pm_unlock(); 394 device_pm_unlock();
309 device_links_write_unlock(); 395 device_links_write_unlock();
396
397 if ((flags & DL_FLAG_PM_RUNTIME && flags & DL_FLAG_RPM_ACTIVE) && !link)
398 pm_runtime_put(supplier);
399
310 return link; 400 return link;
311} 401}
312EXPORT_SYMBOL_GPL(device_link_add); 402EXPORT_SYMBOL_GPL(device_link_add);
313 403
314static void device_link_free(struct device_link *link) 404static void device_link_free(struct device_link *link)
315{ 405{
406 while (refcount_dec_not_one(&link->rpm_active))
407 pm_runtime_put(link->supplier);
408
316 put_device(link->consumer); 409 put_device(link->consumer);
317 put_device(link->supplier); 410 put_device(link->supplier);
318 kfree(link); 411 kfree(link);
@@ -328,8 +421,8 @@ static void __device_link_del(struct kref *kref)
328{ 421{
329 struct device_link *link = container_of(kref, struct device_link, kref); 422 struct device_link *link = container_of(kref, struct device_link, kref);
330 423
331 dev_info(link->consumer, "Dropping the link to %s\n", 424 dev_dbg(link->consumer, "Dropping the link to %s\n",
332 dev_name(link->supplier)); 425 dev_name(link->supplier));
333 426
334 if (link->flags & DL_FLAG_PM_RUNTIME) 427 if (link->flags & DL_FLAG_PM_RUNTIME)
335 pm_runtime_drop_link(link->consumer); 428 pm_runtime_drop_link(link->consumer);
@@ -355,8 +448,16 @@ static void __device_link_del(struct kref *kref)
355} 448}
356#endif /* !CONFIG_SRCU */ 449#endif /* !CONFIG_SRCU */
357 450
451static void device_link_put_kref(struct device_link *link)
452{
453 if (link->flags & DL_FLAG_STATELESS)
454 kref_put(&link->kref, __device_link_del);
455 else
456 WARN(1, "Unable to drop a managed device link reference\n");
457}
458
358/** 459/**
359 * device_link_del - Delete a link between two devices. 460 * device_link_del - Delete a stateless link between two devices.
360 * @link: Device link to delete. 461 * @link: Device link to delete.
361 * 462 *
362 * The caller must ensure proper synchronization of this function with runtime 463 * The caller must ensure proper synchronization of this function with runtime
@@ -368,14 +469,14 @@ void device_link_del(struct device_link *link)
368{ 469{
369 device_links_write_lock(); 470 device_links_write_lock();
370 device_pm_lock(); 471 device_pm_lock();
371 kref_put(&link->kref, __device_link_del); 472 device_link_put_kref(link);
372 device_pm_unlock(); 473 device_pm_unlock();
373 device_links_write_unlock(); 474 device_links_write_unlock();
374} 475}
375EXPORT_SYMBOL_GPL(device_link_del); 476EXPORT_SYMBOL_GPL(device_link_del);
376 477
377/** 478/**
378 * device_link_remove - remove a link between two devices. 479 * device_link_remove - Delete a stateless link between two devices.
379 * @consumer: Consumer end of the link. 480 * @consumer: Consumer end of the link.
380 * @supplier: Supplier end of the link. 481 * @supplier: Supplier end of the link.
381 * 482 *
@@ -394,7 +495,7 @@ void device_link_remove(void *consumer, struct device *supplier)
394 495
395 list_for_each_entry(link, &supplier->links.consumers, s_node) { 496 list_for_each_entry(link, &supplier->links.consumers, s_node) {
396 if (link->consumer == consumer) { 497 if (link->consumer == consumer) {
397 kref_put(&link->kref, __device_link_del); 498 device_link_put_kref(link);
398 break; 499 break;
399 } 500 }
400 } 501 }
@@ -474,8 +575,21 @@ void device_links_driver_bound(struct device *dev)
474 if (link->flags & DL_FLAG_STATELESS) 575 if (link->flags & DL_FLAG_STATELESS)
475 continue; 576 continue;
476 577
578 /*
579 * Links created during consumer probe may be in the "consumer
580 * probe" state to start with if the supplier is still probing
581 * when they are created and they may become "active" if the
582 * consumer probe returns first. Skip them here.
583 */
584 if (link->status == DL_STATE_CONSUMER_PROBE ||
585 link->status == DL_STATE_ACTIVE)
586 continue;
587
477 WARN_ON(link->status != DL_STATE_DORMANT); 588 WARN_ON(link->status != DL_STATE_DORMANT);
478 WRITE_ONCE(link->status, DL_STATE_AVAILABLE); 589 WRITE_ONCE(link->status, DL_STATE_AVAILABLE);
590
591 if (link->flags & DL_FLAG_AUTOPROBE_CONSUMER)
592 driver_deferred_probe_add(link->consumer);
479 } 593 }
480 594
481 list_for_each_entry(link, &dev->links.suppliers, c_node) { 595 list_for_each_entry(link, &dev->links.suppliers, c_node) {
@@ -512,18 +626,49 @@ static void __device_links_no_driver(struct device *dev)
512 continue; 626 continue;
513 627
514 if (link->flags & DL_FLAG_AUTOREMOVE_CONSUMER) 628 if (link->flags & DL_FLAG_AUTOREMOVE_CONSUMER)
515 kref_put(&link->kref, __device_link_del); 629 __device_link_del(&link->kref);
516 else if (link->status != DL_STATE_SUPPLIER_UNBIND) 630 else if (link->status == DL_STATE_CONSUMER_PROBE ||
631 link->status == DL_STATE_ACTIVE)
517 WRITE_ONCE(link->status, DL_STATE_AVAILABLE); 632 WRITE_ONCE(link->status, DL_STATE_AVAILABLE);
518 } 633 }
519 634
520 dev->links.status = DL_DEV_NO_DRIVER; 635 dev->links.status = DL_DEV_NO_DRIVER;
521} 636}
522 637
638/**
639 * device_links_no_driver - Update links after failing driver probe.
640 * @dev: Device whose driver has just failed to probe.
641 *
642 * Clean up leftover links to consumers for @dev and invoke
643 * %__device_links_no_driver() to update links to suppliers for it as
644 * appropriate.
645 *
646 * Links with the DL_FLAG_STATELESS flag set are ignored.
647 */
523void device_links_no_driver(struct device *dev) 648void device_links_no_driver(struct device *dev)
524{ 649{
650 struct device_link *link;
651
525 device_links_write_lock(); 652 device_links_write_lock();
653
654 list_for_each_entry(link, &dev->links.consumers, s_node) {
655 if (link->flags & DL_FLAG_STATELESS)
656 continue;
657
658 /*
659 * The probe has failed, so if the status of the link is
660 * "consumer probe" or "active", it must have been added by
661 * a probing consumer while this device was still probing.
662 * Change its state to "dormant", as it represents a valid
663 * relationship, but it is not functionally meaningful.
664 */
665 if (link->status == DL_STATE_CONSUMER_PROBE ||
666 link->status == DL_STATE_ACTIVE)
667 WRITE_ONCE(link->status, DL_STATE_DORMANT);
668 }
669
526 __device_links_no_driver(dev); 670 __device_links_no_driver(dev);
671
527 device_links_write_unlock(); 672 device_links_write_unlock();
528} 673}
529 674
@@ -539,11 +684,11 @@ void device_links_no_driver(struct device *dev)
539 */ 684 */
540void device_links_driver_cleanup(struct device *dev) 685void device_links_driver_cleanup(struct device *dev)
541{ 686{
542 struct device_link *link; 687 struct device_link *link, *ln;
543 688
544 device_links_write_lock(); 689 device_links_write_lock();
545 690
546 list_for_each_entry(link, &dev->links.consumers, s_node) { 691 list_for_each_entry_safe(link, ln, &dev->links.consumers, s_node) {
547 if (link->flags & DL_FLAG_STATELESS) 692 if (link->flags & DL_FLAG_STATELESS)
548 continue; 693 continue;
549 694
@@ -557,7 +702,7 @@ void device_links_driver_cleanup(struct device *dev)
557 */ 702 */
558 if (link->status == DL_STATE_SUPPLIER_UNBIND && 703 if (link->status == DL_STATE_SUPPLIER_UNBIND &&
559 link->flags & DL_FLAG_AUTOREMOVE_SUPPLIER) 704 link->flags & DL_FLAG_AUTOREMOVE_SUPPLIER)
560 kref_put(&link->kref, __device_link_del); 705 __device_link_del(&link->kref);
561 706
562 WRITE_ONCE(link->status, DL_STATE_DORMANT); 707 WRITE_ONCE(link->status, DL_STATE_DORMANT);
563 } 708 }
@@ -1966,7 +2111,7 @@ int device_add(struct device *dev)
1966 if (dev->class) { 2111 if (dev->class) {
1967 mutex_lock(&dev->class->p->mutex); 2112 mutex_lock(&dev->class->p->mutex);
1968 /* tie the class to the device */ 2113 /* tie the class to the device */
1969 klist_add_tail(&dev->knode_class, 2114 klist_add_tail(&dev->p->knode_class,
1970 &dev->class->p->klist_devices); 2115 &dev->class->p->klist_devices);
1971 2116
1972 /* notify any interfaces that the device is here */ 2117 /* notify any interfaces that the device is here */
@@ -2080,6 +2225,17 @@ void device_del(struct device *dev)
2080 struct kobject *glue_dir = NULL; 2225 struct kobject *glue_dir = NULL;
2081 struct class_interface *class_intf; 2226 struct class_interface *class_intf;
2082 2227
2228 /*
2229 * Hold the device lock and set the "dead" flag to guarantee that
2230 * the update behavior is consistent with the other bitfields near
2231 * it and that we cannot have an asynchronous probe routine trying
2232 * to run while we are tearing out the bus/class/sysfs from
2233 * underneath the device.
2234 */
2235 device_lock(dev);
2236 dev->p->dead = true;
2237 device_unlock(dev);
2238
2083 /* Notify clients of device removal. This call must come 2239 /* Notify clients of device removal. This call must come
2084 * before dpm_sysfs_remove(). 2240 * before dpm_sysfs_remove().
2085 */ 2241 */
@@ -2105,7 +2261,7 @@ void device_del(struct device *dev)
2105 if (class_intf->remove_dev) 2261 if (class_intf->remove_dev)
2106 class_intf->remove_dev(dev, class_intf); 2262 class_intf->remove_dev(dev, class_intf);
2107 /* remove the device from the class list */ 2263 /* remove the device from the class list */
2108 klist_del(&dev->knode_class); 2264 klist_del(&dev->p->knode_class);
2109 mutex_unlock(&dev->class->p->mutex); 2265 mutex_unlock(&dev->class->p->mutex);
2110 } 2266 }
2111 device_remove_file(dev, &dev_attr_uevent); 2267 device_remove_file(dev, &dev_attr_uevent);
diff --git a/drivers/base/cpu.c b/drivers/base/cpu.c
index 6ce93a52bf3f..668139cfa664 100644
--- a/drivers/base/cpu.c
+++ b/drivers/base/cpu.c
@@ -409,6 +409,7 @@ static void device_create_release(struct device *dev)
409 kfree(dev); 409 kfree(dev);
410} 410}
411 411
412__printf(4, 0)
412static struct device * 413static struct device *
413__cpu_device_create(struct device *parent, void *drvdata, 414__cpu_device_create(struct device *parent, void *drvdata,
414 const struct attribute_group **groups, 415 const struct attribute_group **groups,
diff --git a/drivers/base/dd.c b/drivers/base/dd.c
index 8ac10af17c00..a823f469e53f 100644
--- a/drivers/base/dd.c
+++ b/drivers/base/dd.c
@@ -57,6 +57,10 @@ static atomic_t deferred_trigger_count = ATOMIC_INIT(0);
57static struct dentry *deferred_devices; 57static struct dentry *deferred_devices;
58static bool initcalls_done; 58static bool initcalls_done;
59 59
60/* Save the async probe drivers' name from kernel cmdline */
61#define ASYNC_DRV_NAMES_MAX_LEN 256
62static char async_probe_drv_names[ASYNC_DRV_NAMES_MAX_LEN];
63
60/* 64/*
61 * In some cases, like suspend to RAM or hibernation, It might be reasonable 65 * In some cases, like suspend to RAM or hibernation, It might be reasonable
62 * to prohibit probing of devices as it could be unsafe. 66 * to prohibit probing of devices as it could be unsafe.
@@ -116,7 +120,7 @@ static void deferred_probe_work_func(struct work_struct *work)
116} 120}
117static DECLARE_WORK(deferred_probe_work, deferred_probe_work_func); 121static DECLARE_WORK(deferred_probe_work, deferred_probe_work_func);
118 122
119static void driver_deferred_probe_add(struct device *dev) 123void driver_deferred_probe_add(struct device *dev)
120{ 124{
121 mutex_lock(&deferred_probe_mutex); 125 mutex_lock(&deferred_probe_mutex);
122 if (list_empty(&dev->p->deferred_probe)) { 126 if (list_empty(&dev->p->deferred_probe)) {
@@ -674,6 +678,23 @@ int driver_probe_device(struct device_driver *drv, struct device *dev)
674 return ret; 678 return ret;
675} 679}
676 680
681static inline bool cmdline_requested_async_probing(const char *drv_name)
682{
683 return parse_option_str(async_probe_drv_names, drv_name);
684}
685
686/* The option format is "driver_async_probe=drv_name1,drv_name2,..." */
687static int __init save_async_options(char *buf)
688{
689 if (strlen(buf) >= ASYNC_DRV_NAMES_MAX_LEN)
690 printk(KERN_WARNING
691 "Too long list of driver names for 'driver_async_probe'!\n");
692
693 strlcpy(async_probe_drv_names, buf, ASYNC_DRV_NAMES_MAX_LEN);
694 return 0;
695}
696__setup("driver_async_probe=", save_async_options);
697
677bool driver_allows_async_probing(struct device_driver *drv) 698bool driver_allows_async_probing(struct device_driver *drv)
678{ 699{
679 switch (drv->probe_type) { 700 switch (drv->probe_type) {
@@ -684,6 +705,9 @@ bool driver_allows_async_probing(struct device_driver *drv)
684 return false; 705 return false;
685 706
686 default: 707 default:
708 if (cmdline_requested_async_probing(drv->name))
709 return true;
710
687 if (module_requested_async_probing(drv->owner)) 711 if (module_requested_async_probing(drv->owner))
688 return true; 712 return true;
689 713
@@ -731,15 +755,6 @@ static int __device_attach_driver(struct device_driver *drv, void *_data)
731 bool async_allowed; 755 bool async_allowed;
732 int ret; 756 int ret;
733 757
734 /*
735 * Check if device has already been claimed. This may
736 * happen with driver loading, device discovery/registration,
737 * and deferred probe processing happens all at once with
738 * multiple threads.
739 */
740 if (dev->driver)
741 return -EBUSY;
742
743 ret = driver_match_device(drv, dev); 758 ret = driver_match_device(drv, dev);
744 if (ret == 0) { 759 if (ret == 0) {
745 /* no match */ 760 /* no match */
@@ -774,6 +789,15 @@ static void __device_attach_async_helper(void *_dev, async_cookie_t cookie)
774 789
775 device_lock(dev); 790 device_lock(dev);
776 791
792 /*
793 * Check if device has already been removed or claimed. This may
794 * happen with driver loading, device discovery/registration,
795 * and deferred probe processing happens all at once with
796 * multiple threads.
797 */
798 if (dev->p->dead || dev->driver)
799 goto out_unlock;
800
777 if (dev->parent) 801 if (dev->parent)
778 pm_runtime_get_sync(dev->parent); 802 pm_runtime_get_sync(dev->parent);
779 803
@@ -784,7 +808,7 @@ static void __device_attach_async_helper(void *_dev, async_cookie_t cookie)
784 808
785 if (dev->parent) 809 if (dev->parent)
786 pm_runtime_put(dev->parent); 810 pm_runtime_put(dev->parent);
787 811out_unlock:
788 device_unlock(dev); 812 device_unlock(dev);
789 813
790 put_device(dev); 814 put_device(dev);
@@ -829,7 +853,7 @@ static int __device_attach(struct device *dev, bool allow_async)
829 */ 853 */
830 dev_dbg(dev, "scheduling asynchronous probe\n"); 854 dev_dbg(dev, "scheduling asynchronous probe\n");
831 get_device(dev); 855 get_device(dev);
832 async_schedule(__device_attach_async_helper, dev); 856 async_schedule_dev(__device_attach_async_helper, dev);
833 } else { 857 } else {
834 pm_request_idle(dev); 858 pm_request_idle(dev);
835 } 859 }
@@ -867,6 +891,88 @@ void device_initial_probe(struct device *dev)
867 __device_attach(dev, true); 891 __device_attach(dev, true);
868} 892}
869 893
894/*
895 * __device_driver_lock - acquire locks needed to manipulate dev->drv
896 * @dev: Device we will update driver info for
897 * @parent: Parent device. Needed if the bus requires parent lock
898 *
899 * This function will take the required locks for manipulating dev->drv.
900 * Normally this will just be the @dev lock, but when called for a USB
901 * interface, @parent lock will be held as well.
902 */
903static void __device_driver_lock(struct device *dev, struct device *parent)
904{
905 if (parent && dev->bus->need_parent_lock)
906 device_lock(parent);
907 device_lock(dev);
908}
909
910/*
911 * __device_driver_unlock - release locks needed to manipulate dev->drv
912 * @dev: Device we will update driver info for
913 * @parent: Parent device. Needed if the bus requires parent lock
914 *
915 * This function will release the required locks for manipulating dev->drv.
916 * Normally this will just be the the @dev lock, but when called for a
917 * USB interface, @parent lock will be released as well.
918 */
919static void __device_driver_unlock(struct device *dev, struct device *parent)
920{
921 device_unlock(dev);
922 if (parent && dev->bus->need_parent_lock)
923 device_unlock(parent);
924}
925
926/**
927 * device_driver_attach - attach a specific driver to a specific device
928 * @drv: Driver to attach
929 * @dev: Device to attach it to
930 *
931 * Manually attach driver to a device. Will acquire both @dev lock and
932 * @dev->parent lock if needed.
933 */
934int device_driver_attach(struct device_driver *drv, struct device *dev)
935{
936 int ret = 0;
937
938 __device_driver_lock(dev, dev->parent);
939
940 /*
941 * If device has been removed or someone has already successfully
942 * bound a driver before us just skip the driver probe call.
943 */
944 if (!dev->p->dead && !dev->driver)
945 ret = driver_probe_device(drv, dev);
946
947 __device_driver_unlock(dev, dev->parent);
948
949 return ret;
950}
951
952static void __driver_attach_async_helper(void *_dev, async_cookie_t cookie)
953{
954 struct device *dev = _dev;
955 struct device_driver *drv;
956 int ret = 0;
957
958 __device_driver_lock(dev, dev->parent);
959
960 drv = dev->p->async_driver;
961
962 /*
963 * If device has been removed or someone has already successfully
964 * bound a driver before us just skip the driver probe call.
965 */
966 if (!dev->p->dead && !dev->driver)
967 ret = driver_probe_device(drv, dev);
968
969 __device_driver_unlock(dev, dev->parent);
970
971 dev_dbg(dev, "driver %s async attach completed: %d\n", drv->name, ret);
972
973 put_device(dev);
974}
975
870static int __driver_attach(struct device *dev, void *data) 976static int __driver_attach(struct device *dev, void *data)
871{ 977{
872 struct device_driver *drv = data; 978 struct device_driver *drv = data;
@@ -894,14 +1000,26 @@ static int __driver_attach(struct device *dev, void *data)
894 return ret; 1000 return ret;
895 } /* ret > 0 means positive match */ 1001 } /* ret > 0 means positive match */
896 1002
897 if (dev->parent && dev->bus->need_parent_lock) 1003 if (driver_allows_async_probing(drv)) {
898 device_lock(dev->parent); 1004 /*
899 device_lock(dev); 1005 * Instead of probing the device synchronously we will
900 if (!dev->driver) 1006 * probe it asynchronously to allow for more parallelism.
901 driver_probe_device(drv, dev); 1007 *
902 device_unlock(dev); 1008 * We only take the device lock here in order to guarantee
903 if (dev->parent && dev->bus->need_parent_lock) 1009 * that the dev->driver and async_driver fields are protected
904 device_unlock(dev->parent); 1010 */
1011 dev_dbg(dev, "probing driver %s asynchronously\n", drv->name);
1012 device_lock(dev);
1013 if (!dev->driver) {
1014 get_device(dev);
1015 dev->p->async_driver = drv;
1016 async_schedule_dev(__driver_attach_async_helper, dev);
1017 }
1018 device_unlock(dev);
1019 return 0;
1020 }
1021
1022 device_driver_attach(drv, dev);
905 1023
906 return 0; 1024 return 0;
907} 1025}
@@ -932,15 +1050,11 @@ static void __device_release_driver(struct device *dev, struct device *parent)
932 drv = dev->driver; 1050 drv = dev->driver;
933 if (drv) { 1051 if (drv) {
934 while (device_links_busy(dev)) { 1052 while (device_links_busy(dev)) {
935 device_unlock(dev); 1053 __device_driver_unlock(dev, parent);
936 if (parent && dev->bus->need_parent_lock)
937 device_unlock(parent);
938 1054
939 device_links_unbind_consumers(dev); 1055 device_links_unbind_consumers(dev);
940 if (parent && dev->bus->need_parent_lock)
941 device_lock(parent);
942 1056
943 device_lock(dev); 1057 __device_driver_lock(dev, parent);
944 /* 1058 /*
945 * A concurrent invocation of the same function might 1059 * A concurrent invocation of the same function might
946 * have released the driver successfully while this one 1060 * have released the driver successfully while this one
@@ -968,9 +1082,9 @@ static void __device_release_driver(struct device *dev, struct device *parent)
968 drv->remove(dev); 1082 drv->remove(dev);
969 1083
970 device_links_driver_cleanup(dev); 1084 device_links_driver_cleanup(dev);
971 arch_teardown_dma_ops(dev);
972 1085
973 devres_release_all(dev); 1086 devres_release_all(dev);
1087 arch_teardown_dma_ops(dev);
974 dev->driver = NULL; 1088 dev->driver = NULL;
975 dev_set_drvdata(dev, NULL); 1089 dev_set_drvdata(dev, NULL);
976 if (dev->pm_domain && dev->pm_domain->dismiss) 1090 if (dev->pm_domain && dev->pm_domain->dismiss)
@@ -993,16 +1107,12 @@ void device_release_driver_internal(struct device *dev,
993 struct device_driver *drv, 1107 struct device_driver *drv,
994 struct device *parent) 1108 struct device *parent)
995{ 1109{
996 if (parent && dev->bus->need_parent_lock) 1110 __device_driver_lock(dev, parent);
997 device_lock(parent);
998 1111
999 device_lock(dev);
1000 if (!drv || drv == dev->driver) 1112 if (!drv || drv == dev->driver)
1001 __device_release_driver(dev, parent); 1113 __device_release_driver(dev, parent);
1002 1114
1003 device_unlock(dev); 1115 __device_driver_unlock(dev, parent);
1004 if (parent && dev->bus->need_parent_lock)
1005 device_unlock(parent);
1006} 1116}
1007 1117
1008/** 1118/**
@@ -1028,6 +1138,18 @@ void device_release_driver(struct device *dev)
1028EXPORT_SYMBOL_GPL(device_release_driver); 1138EXPORT_SYMBOL_GPL(device_release_driver);
1029 1139
1030/** 1140/**
1141 * device_driver_detach - detach driver from a specific device
1142 * @dev: device to detach driver from
1143 *
1144 * Detach driver from device. Will acquire both @dev lock and @dev->parent
1145 * lock if needed.
1146 */
1147void device_driver_detach(struct device *dev)
1148{
1149 device_release_driver_internal(dev, NULL, dev->parent);
1150}
1151
1152/**
1031 * driver_detach - detach driver from all devices it controls. 1153 * driver_detach - detach driver from all devices it controls.
1032 * @drv: driver. 1154 * @drv: driver.
1033 */ 1155 */
diff --git a/drivers/base/firmware_loader/Makefile b/drivers/base/firmware_loader/Makefile
index a97eeb0be1d8..0b2dfa6259c9 100644
--- a/drivers/base/firmware_loader/Makefile
+++ b/drivers/base/firmware_loader/Makefile
@@ -1,7 +1,9 @@
1# SPDX-License-Identifier: GPL-2.0 1# SPDX-License-Identifier: GPL-2.0
2# Makefile for the Linux firmware loader 2# Makefile for the Linux firmware loader
3 3
4obj-y := fallback_table.o 4obj-$(CONFIG_FW_LOADER_USER_HELPER) += fallback_table.o
5obj-$(CONFIG_FW_LOADER) += firmware_class.o 5obj-$(CONFIG_FW_LOADER) += firmware_class.o
6firmware_class-objs := main.o 6firmware_class-objs := main.o
7firmware_class-$(CONFIG_FW_LOADER_USER_HELPER) += fallback.o 7firmware_class-$(CONFIG_FW_LOADER_USER_HELPER) += fallback.o
8
9obj-y += builtin/
diff --git a/drivers/base/firmware_loader/builtin/.gitignore b/drivers/base/firmware_loader/builtin/.gitignore
new file mode 100644
index 000000000000..9c8bdb9fdcc3
--- /dev/null
+++ b/drivers/base/firmware_loader/builtin/.gitignore
@@ -0,0 +1 @@
*.gen.S
diff --git a/drivers/base/firmware_loader/builtin/Makefile b/drivers/base/firmware_loader/builtin/Makefile
new file mode 100644
index 000000000000..37e5ae387400
--- /dev/null
+++ b/drivers/base/firmware_loader/builtin/Makefile
@@ -0,0 +1,40 @@
1# SPDX-License-Identifier: GPL-2.0
2
3# Create $(fwdir) from $(CONFIG_EXTRA_FIRMWARE_DIR) -- if it doesn't have a
4# leading /, it's relative to $(srctree).
5fwdir := $(subst $(quote),,$(CONFIG_EXTRA_FIRMWARE_DIR))
6fwdir := $(addprefix $(srctree)/,$(filter-out /%,$(fwdir)))$(filter /%,$(fwdir))
7
8obj-y := $(addsuffix .gen.o, $(subst $(quote),,$(CONFIG_EXTRA_FIRMWARE)))
9
10FWNAME = $(patsubst $(obj)/%.gen.S,%,$@)
11FWSTR = $(subst /,_,$(subst .,_,$(subst -,_,$(FWNAME))))
12ASM_WORD = $(if $(CONFIG_64BIT),.quad,.long)
13ASM_ALIGN = $(if $(CONFIG_64BIT),3,2)
14PROGBITS = $(if $(CONFIG_ARM),%,@)progbits
15
16filechk_fwbin = \
17 echo "/* Generated by $(src)/Makefile */" ;\
18 echo " .section .rodata" ;\
19 echo " .p2align $(ASM_ALIGN)" ;\
20 echo "_fw_$(FWSTR)_bin:" ;\
21 echo " .incbin \"$(fwdir)/$(FWNAME)\"" ;\
22 echo "_fw_end:" ;\
23 echo " .section .rodata.str,\"aMS\",$(PROGBITS),1" ;\
24 echo " .p2align $(ASM_ALIGN)" ;\
25 echo "_fw_$(FWSTR)_name:" ;\
26 echo " .string \"$(FWNAME)\"" ;\
27 echo " .section .builtin_fw,\"a\",$(PROGBITS)" ;\
28 echo " .p2align $(ASM_ALIGN)" ;\
29 echo " $(ASM_WORD) _fw_$(FWSTR)_name" ;\
30 echo " $(ASM_WORD) _fw_$(FWSTR)_bin" ;\
31 echo " $(ASM_WORD) _fw_end - _fw_$(FWSTR)_bin"
32
33$(obj)/%.gen.S: FORCE
34 $(call filechk,fwbin)
35
36# The .o files depend on the binaries directly; the .S files don't.
37$(addprefix $(obj)/, $(obj-y)): $(obj)/%.gen.o: $(fwdir)/%
38
39targets := $(patsubst $(obj)/%,%, \
40 $(shell find $(obj) -name \*.gen.S 2>/dev/null))
diff --git a/drivers/base/firmware_loader/fallback_table.c b/drivers/base/firmware_loader/fallback_table.c
index 7428659d8df9..776dd69cf5be 100644
--- a/drivers/base/firmware_loader/fallback_table.c
+++ b/drivers/base/firmware_loader/fallback_table.c
@@ -16,9 +16,6 @@
16 * firmware fallback configuration table 16 * firmware fallback configuration table
17 */ 17 */
18 18
19/* Module or buit-in */
20#ifdef CONFIG_FW_LOADER_USER_HELPER
21
22static unsigned int zero; 19static unsigned int zero;
23static unsigned int one = 1; 20static unsigned int one = 1;
24 21
@@ -51,5 +48,3 @@ struct ctl_table firmware_config_table[] = {
51 { } 48 { }
52}; 49};
53EXPORT_SYMBOL_GPL(firmware_config_table); 50EXPORT_SYMBOL_GPL(firmware_config_table);
54
55#endif
diff --git a/drivers/base/firmware_loader/main.c b/drivers/base/firmware_loader/main.c
index 8e9213b36e31..7eaaf5ee5ba6 100644
--- a/drivers/base/firmware_loader/main.c
+++ b/drivers/base/firmware_loader/main.c
@@ -328,12 +328,12 @@ fw_get_filesystem_firmware(struct device *device, struct fw_priv *fw_priv)
328 rc = kernel_read_file_from_path(path, &fw_priv->data, &size, 328 rc = kernel_read_file_from_path(path, &fw_priv->data, &size,
329 msize, id); 329 msize, id);
330 if (rc) { 330 if (rc) {
331 if (rc == -ENOENT) 331 if (rc != -ENOENT)
332 dev_dbg(device, "loading %s failed with error %d\n",
333 path, rc);
334 else
335 dev_warn(device, "loading %s failed with error %d\n", 332 dev_warn(device, "loading %s failed with error %d\n",
336 path, rc); 333 path, rc);
334 else
335 dev_dbg(device, "loading %s failed for no such file or directory.\n",
336 path);
337 continue; 337 continue;
338 } 338 }
339 dev_dbg(device, "direct-loading %s\n", fw_priv->fw_name); 339 dev_dbg(device, "direct-loading %s\n", fw_priv->fw_name);
diff --git a/drivers/base/platform.c b/drivers/base/platform.c
index 1c958eb33ef4..4e45ac21d672 100644
--- a/drivers/base/platform.c
+++ b/drivers/base/platform.c
@@ -127,7 +127,20 @@ int platform_get_irq(struct platform_device *dev, unsigned int num)
127 irqd_set_trigger_type(irqd, r->flags & IORESOURCE_BITS); 127 irqd_set_trigger_type(irqd, r->flags & IORESOURCE_BITS);
128 } 128 }
129 129
130 return r ? r->start : -ENXIO; 130 if (r)
131 return r->start;
132
133 /*
134 * For the index 0 interrupt, allow falling back to GpioInt
135 * resources. While a device could have both Interrupt and GpioInt
136 * resources, making this fallback ambiguous, in many common cases
137 * the device will only expose one IRQ, and this fallback
138 * allows a common code path across either kind of resource.
139 */
140 if (num == 0 && has_acpi_companion(&dev->dev))
141 return acpi_dev_gpio_irq_get(ACPI_COMPANION(&dev->dev), num);
142
143 return -ENXIO;
131#endif 144#endif
132} 145}
133EXPORT_SYMBOL_GPL(platform_get_irq); 146EXPORT_SYMBOL_GPL(platform_get_irq);
@@ -508,10 +521,12 @@ struct platform_device *platform_device_register_full(
508 521
509 pdev = platform_device_alloc(pdevinfo->name, pdevinfo->id); 522 pdev = platform_device_alloc(pdevinfo->name, pdevinfo->id);
510 if (!pdev) 523 if (!pdev)
511 goto err_alloc; 524 return ERR_PTR(-ENOMEM);
512 525
513 pdev->dev.parent = pdevinfo->parent; 526 pdev->dev.parent = pdevinfo->parent;
514 pdev->dev.fwnode = pdevinfo->fwnode; 527 pdev->dev.fwnode = pdevinfo->fwnode;
528 pdev->dev.of_node = of_node_get(to_of_node(pdev->dev.fwnode));
529 pdev->dev.of_node_reused = pdevinfo->of_node_reused;
515 530
516 if (pdevinfo->dma_mask) { 531 if (pdevinfo->dma_mask) {
517 /* 532 /*
@@ -553,8 +568,6 @@ struct platform_device *platform_device_register_full(
553err: 568err:
554 ACPI_COMPANION_SET(&pdev->dev, NULL); 569 ACPI_COMPANION_SET(&pdev->dev, NULL);
555 kfree(pdev->dev.dma_mask); 570 kfree(pdev->dev.dma_mask);
556
557err_alloc:
558 platform_device_put(pdev); 571 platform_device_put(pdev);
559 return ERR_PTR(ret); 572 return ERR_PTR(ret);
560 } 573 }
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
index 893ae464bfd6..5a8149829ab3 100644
--- a/drivers/base/power/main.c
+++ b/drivers/base/power/main.c
@@ -734,7 +734,7 @@ void dpm_noirq_resume_devices(pm_message_t state)
734 reinit_completion(&dev->power.completion); 734 reinit_completion(&dev->power.completion);
735 if (is_async(dev)) { 735 if (is_async(dev)) {
736 get_device(dev); 736 get_device(dev);
737 async_schedule(async_resume_noirq, dev); 737 async_schedule_dev(async_resume_noirq, dev);
738 } 738 }
739 } 739 }
740 740
@@ -891,7 +891,7 @@ void dpm_resume_early(pm_message_t state)
891 reinit_completion(&dev->power.completion); 891 reinit_completion(&dev->power.completion);
892 if (is_async(dev)) { 892 if (is_async(dev)) {
893 get_device(dev); 893 get_device(dev);
894 async_schedule(async_resume_early, dev); 894 async_schedule_dev(async_resume_early, dev);
895 } 895 }
896 } 896 }
897 897
@@ -1055,7 +1055,7 @@ void dpm_resume(pm_message_t state)
1055 reinit_completion(&dev->power.completion); 1055 reinit_completion(&dev->power.completion);
1056 if (is_async(dev)) { 1056 if (is_async(dev)) {
1057 get_device(dev); 1057 get_device(dev);
1058 async_schedule(async_resume, dev); 1058 async_schedule_dev(async_resume, dev);
1059 } 1059 }
1060 } 1060 }
1061 1061
@@ -1375,7 +1375,7 @@ static int device_suspend_noirq(struct device *dev)
1375 1375
1376 if (is_async(dev)) { 1376 if (is_async(dev)) {
1377 get_device(dev); 1377 get_device(dev);
1378 async_schedule(async_suspend_noirq, dev); 1378 async_schedule_dev(async_suspend_noirq, dev);
1379 return 0; 1379 return 0;
1380 } 1380 }
1381 return __device_suspend_noirq(dev, pm_transition, false); 1381 return __device_suspend_noirq(dev, pm_transition, false);
@@ -1578,7 +1578,7 @@ static int device_suspend_late(struct device *dev)
1578 1578
1579 if (is_async(dev)) { 1579 if (is_async(dev)) {
1580 get_device(dev); 1580 get_device(dev);
1581 async_schedule(async_suspend_late, dev); 1581 async_schedule_dev(async_suspend_late, dev);
1582 return 0; 1582 return 0;
1583 } 1583 }
1584 1584
@@ -1844,7 +1844,7 @@ static int device_suspend(struct device *dev)
1844 1844
1845 if (is_async(dev)) { 1845 if (is_async(dev)) {
1846 get_device(dev); 1846 get_device(dev);
1847 async_schedule(async_suspend, dev); 1847 async_schedule_dev(async_suspend, dev);
1848 return 0; 1848 return 0;
1849 } 1849 }
1850 1850
diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c
index 78937c45278c..a80dbf08a99c 100644
--- a/drivers/base/power/runtime.c
+++ b/drivers/base/power/runtime.c
@@ -282,11 +282,8 @@ static int rpm_get_suppliers(struct device *dev)
282 list_for_each_entry_rcu(link, &dev->links.suppliers, c_node) { 282 list_for_each_entry_rcu(link, &dev->links.suppliers, c_node) {
283 int retval; 283 int retval;
284 284
285 if (!(link->flags & DL_FLAG_PM_RUNTIME)) 285 if (!(link->flags & DL_FLAG_PM_RUNTIME) ||
286 continue; 286 READ_ONCE(link->status) == DL_STATE_SUPPLIER_UNBIND)
287
288 if (READ_ONCE(link->status) == DL_STATE_SUPPLIER_UNBIND ||
289 link->rpm_active)
290 continue; 287 continue;
291 288
292 retval = pm_runtime_get_sync(link->supplier); 289 retval = pm_runtime_get_sync(link->supplier);
@@ -295,7 +292,7 @@ static int rpm_get_suppliers(struct device *dev)
295 pm_runtime_put_noidle(link->supplier); 292 pm_runtime_put_noidle(link->supplier);
296 return retval; 293 return retval;
297 } 294 }
298 link->rpm_active = true; 295 refcount_inc(&link->rpm_active);
299 } 296 }
300 return 0; 297 return 0;
301} 298}
@@ -304,12 +301,13 @@ static void rpm_put_suppliers(struct device *dev)
304{ 301{
305 struct device_link *link; 302 struct device_link *link;
306 303
307 list_for_each_entry_rcu(link, &dev->links.suppliers, c_node) 304 list_for_each_entry_rcu(link, &dev->links.suppliers, c_node) {
308 if (link->rpm_active && 305 if (READ_ONCE(link->status) == DL_STATE_SUPPLIER_UNBIND)
309 READ_ONCE(link->status) != DL_STATE_SUPPLIER_UNBIND) { 306 continue;
307
308 while (refcount_dec_not_one(&link->rpm_active))
310 pm_runtime_put(link->supplier); 309 pm_runtime_put(link->supplier);
311 link->rpm_active = false; 310 }
312 }
313} 311}
314 312
315/** 313/**
@@ -1114,24 +1112,57 @@ EXPORT_SYMBOL_GPL(pm_runtime_get_if_in_use);
1114 * and the device parent's counter of unsuspended children is modified to 1112 * and the device parent's counter of unsuspended children is modified to
1115 * reflect the new status. If the new status is RPM_SUSPENDED, an idle 1113 * reflect the new status. If the new status is RPM_SUSPENDED, an idle
1116 * notification request for the parent is submitted. 1114 * notification request for the parent is submitted.
1115 *
1116 * If @dev has any suppliers (as reflected by device links to them), and @status
1117 * is RPM_ACTIVE, they will be activated upfront and if the activation of one
1118 * of them fails, the status of @dev will be changed to RPM_SUSPENDED (instead
1119 * of the @status value) and the suppliers will be deacticated on exit. The
1120 * error returned by the failing supplier activation will be returned in that
1121 * case.
1117 */ 1122 */
1118int __pm_runtime_set_status(struct device *dev, unsigned int status) 1123int __pm_runtime_set_status(struct device *dev, unsigned int status)
1119{ 1124{
1120 struct device *parent = dev->parent; 1125 struct device *parent = dev->parent;
1121 unsigned long flags;
1122 bool notify_parent = false; 1126 bool notify_parent = false;
1123 int error = 0; 1127 int error = 0;
1124 1128
1125 if (status != RPM_ACTIVE && status != RPM_SUSPENDED) 1129 if (status != RPM_ACTIVE && status != RPM_SUSPENDED)
1126 return -EINVAL; 1130 return -EINVAL;
1127 1131
1128 spin_lock_irqsave(&dev->power.lock, flags); 1132 spin_lock_irq(&dev->power.lock);
1129 1133
1130 if (!dev->power.runtime_error && !dev->power.disable_depth) { 1134 /*
1135 * Prevent PM-runtime from being enabled for the device or return an
1136 * error if it is enabled already and working.
1137 */
1138 if (dev->power.runtime_error || dev->power.disable_depth)
1139 dev->power.disable_depth++;
1140 else
1131 error = -EAGAIN; 1141 error = -EAGAIN;
1132 goto out; 1142
1143 spin_unlock_irq(&dev->power.lock);
1144
1145 if (error)
1146 return error;
1147
1148 /*
1149 * If the new status is RPM_ACTIVE, the suppliers can be activated
1150 * upfront regardless of the current status, because next time
1151 * rpm_put_suppliers() runs, the rpm_active refcounts of the links
1152 * involved will be dropped down to one anyway.
1153 */
1154 if (status == RPM_ACTIVE) {
1155 int idx = device_links_read_lock();
1156
1157 error = rpm_get_suppliers(dev);
1158 if (error)
1159 status = RPM_SUSPENDED;
1160
1161 device_links_read_unlock(idx);
1133 } 1162 }
1134 1163
1164 spin_lock_irq(&dev->power.lock);
1165
1135 if (dev->power.runtime_status == status || !parent) 1166 if (dev->power.runtime_status == status || !parent)
1136 goto out_set; 1167 goto out_set;
1137 1168
@@ -1159,19 +1190,33 @@ int __pm_runtime_set_status(struct device *dev, unsigned int status)
1159 1190
1160 spin_unlock(&parent->power.lock); 1191 spin_unlock(&parent->power.lock);
1161 1192
1162 if (error) 1193 if (error) {
1194 status = RPM_SUSPENDED;
1163 goto out; 1195 goto out;
1196 }
1164 } 1197 }
1165 1198
1166 out_set: 1199 out_set:
1167 __update_runtime_status(dev, status); 1200 __update_runtime_status(dev, status);
1168 dev->power.runtime_error = 0; 1201 if (!error)
1202 dev->power.runtime_error = 0;
1203
1169 out: 1204 out:
1170 spin_unlock_irqrestore(&dev->power.lock, flags); 1205 spin_unlock_irq(&dev->power.lock);
1171 1206
1172 if (notify_parent) 1207 if (notify_parent)
1173 pm_request_idle(parent); 1208 pm_request_idle(parent);
1174 1209
1210 if (status == RPM_SUSPENDED) {
1211 int idx = device_links_read_lock();
1212
1213 rpm_put_suppliers(dev);
1214
1215 device_links_read_unlock(idx);
1216 }
1217
1218 pm_runtime_enable(dev);
1219
1175 return error; 1220 return error;
1176} 1221}
1177EXPORT_SYMBOL_GPL(__pm_runtime_set_status); 1222EXPORT_SYMBOL_GPL(__pm_runtime_set_status);
@@ -1569,7 +1614,7 @@ void pm_runtime_remove(struct device *dev)
1569 * 1614 *
1570 * Check links from this device to any consumers and if any of them have active 1615 * Check links from this device to any consumers and if any of them have active
1571 * runtime PM references to the device, drop the usage counter of the device 1616 * runtime PM references to the device, drop the usage counter of the device
1572 * (once per link). 1617 * (as many times as needed).
1573 * 1618 *
1574 * Links with the DL_FLAG_STATELESS flag set are ignored. 1619 * Links with the DL_FLAG_STATELESS flag set are ignored.
1575 * 1620 *
@@ -1591,10 +1636,8 @@ void pm_runtime_clean_up_links(struct device *dev)
1591 if (link->flags & DL_FLAG_STATELESS) 1636 if (link->flags & DL_FLAG_STATELESS)
1592 continue; 1637 continue;
1593 1638
1594 if (link->rpm_active) { 1639 while (refcount_dec_not_one(&link->rpm_active))
1595 pm_runtime_put_noidle(dev); 1640 pm_runtime_put_noidle(dev);
1596 link->rpm_active = false;
1597 }
1598 } 1641 }
1599 1642
1600 device_links_read_unlock(idx); 1643 device_links_read_unlock(idx);
@@ -1612,8 +1655,11 @@ void pm_runtime_get_suppliers(struct device *dev)
1612 idx = device_links_read_lock(); 1655 idx = device_links_read_lock();
1613 1656
1614 list_for_each_entry_rcu(link, &dev->links.suppliers, c_node) 1657 list_for_each_entry_rcu(link, &dev->links.suppliers, c_node)
1615 if (link->flags & DL_FLAG_PM_RUNTIME) 1658 if (link->flags & DL_FLAG_PM_RUNTIME) {
1659 link->supplier_preactivated = true;
1660 refcount_inc(&link->rpm_active);
1616 pm_runtime_get_sync(link->supplier); 1661 pm_runtime_get_sync(link->supplier);
1662 }
1617 1663
1618 device_links_read_unlock(idx); 1664 device_links_read_unlock(idx);
1619} 1665}
@@ -1630,8 +1676,11 @@ void pm_runtime_put_suppliers(struct device *dev)
1630 idx = device_links_read_lock(); 1676 idx = device_links_read_lock();
1631 1677
1632 list_for_each_entry_rcu(link, &dev->links.suppliers, c_node) 1678 list_for_each_entry_rcu(link, &dev->links.suppliers, c_node)
1633 if (link->flags & DL_FLAG_PM_RUNTIME) 1679 if (link->supplier_preactivated) {
1634 pm_runtime_put(link->supplier); 1680 link->supplier_preactivated = false;
1681 if (refcount_dec_not_one(&link->rpm_active))
1682 pm_runtime_put(link->supplier);
1683 }
1635 1684
1636 device_links_read_unlock(idx); 1685 device_links_read_unlock(idx);
1637} 1686}
@@ -1645,8 +1694,6 @@ void pm_runtime_new_link(struct device *dev)
1645 1694
1646void pm_runtime_drop_link(struct device *dev) 1695void pm_runtime_drop_link(struct device *dev)
1647{ 1696{
1648 rpm_put_suppliers(dev);
1649
1650 spin_lock_irq(&dev->power.lock); 1697 spin_lock_irq(&dev->power.lock);
1651 WARN_ON(dev->power.links_count == 0); 1698 WARN_ON(dev->power.links_count == 0);
1652 dev->power.links_count--; 1699 dev->power.links_count--;
diff --git a/drivers/base/test/test_async_driver_probe.c b/drivers/base/test/test_async_driver_probe.c
index e7f145d662f0..f4b1d8e54daf 100644
--- a/drivers/base/test/test_async_driver_probe.c
+++ b/drivers/base/test/test_async_driver_probe.c
@@ -11,16 +11,47 @@
11#include <linux/module.h> 11#include <linux/module.h>
12#include <linux/platform_device.h> 12#include <linux/platform_device.h>
13#include <linux/time.h> 13#include <linux/time.h>
14#include <linux/numa.h>
15#include <linux/nodemask.h>
16#include <linux/topology.h>
14 17
15#define TEST_PROBE_DELAY (5 * 1000) /* 5 sec */ 18#define TEST_PROBE_DELAY (5 * 1000) /* 5 sec */
16#define TEST_PROBE_THRESHOLD (TEST_PROBE_DELAY / 2) 19#define TEST_PROBE_THRESHOLD (TEST_PROBE_DELAY / 2)
17 20
21static atomic_t warnings, errors, timeout, async_completed;
22
18static int test_probe(struct platform_device *pdev) 23static int test_probe(struct platform_device *pdev)
19{ 24{
20 dev_info(&pdev->dev, "sleeping for %d msecs in probe\n", 25 struct device *dev = &pdev->dev;
21 TEST_PROBE_DELAY); 26
22 msleep(TEST_PROBE_DELAY); 27 /*
23 dev_info(&pdev->dev, "done sleeping\n"); 28 * Determine if we have hit the "timeout" limit for the test if we
29 * have then report it as an error, otherwise we wil sleep for the
30 * required amount of time and then report completion.
31 */
32 if (atomic_read(&timeout)) {
33 dev_err(dev, "async probe took too long\n");
34 atomic_inc(&errors);
35 } else {
36 dev_dbg(&pdev->dev, "sleeping for %d msecs in probe\n",
37 TEST_PROBE_DELAY);
38 msleep(TEST_PROBE_DELAY);
39 dev_dbg(&pdev->dev, "done sleeping\n");
40 }
41
42 /*
43 * Report NUMA mismatch if device node is set and we are not
44 * performing an async init on that node.
45 */
46 if (dev->driver->probe_type == PROBE_PREFER_ASYNCHRONOUS) {
47 if (dev_to_node(dev) != numa_node_id()) {
48 dev_warn(dev, "NUMA node mismatch %d != %d\n",
49 dev_to_node(dev), numa_node_id());
50 atomic_inc(&warnings);
51 }
52
53 atomic_inc(&async_completed);
54 }
24 55
25 return 0; 56 return 0;
26} 57}
@@ -41,31 +72,64 @@ static struct platform_driver sync_driver = {
41 .probe = test_probe, 72 .probe = test_probe,
42}; 73};
43 74
44static struct platform_device *async_dev_1, *async_dev_2; 75static struct platform_device *async_dev[NR_CPUS * 2];
45static struct platform_device *sync_dev_1; 76static struct platform_device *sync_dev[2];
77
78static struct platform_device *
79test_platform_device_register_node(char *name, int id, int nid)
80{
81 struct platform_device *pdev;
82 int ret;
83
84 pdev = platform_device_alloc(name, id);
85 if (!pdev)
86 return NULL;
87
88 if (nid != NUMA_NO_NODE)
89 set_dev_node(&pdev->dev, nid);
90
91 ret = platform_device_add(pdev);
92 if (ret) {
93 platform_device_put(pdev);
94 return ERR_PTR(ret);
95 }
96
97 return pdev;
98
99}
46 100
47static int __init test_async_probe_init(void) 101static int __init test_async_probe_init(void)
48{ 102{
49 ktime_t calltime, delta; 103 struct platform_device **pdev = NULL;
104 int async_id = 0, sync_id = 0;
50 unsigned long long duration; 105 unsigned long long duration;
51 int error; 106 ktime_t calltime, delta;
107 int err, nid, cpu;
108
109 pr_info("registering first set of asynchronous devices...\n");
52 110
53 pr_info("registering first asynchronous device...\n"); 111 for_each_online_cpu(cpu) {
112 nid = cpu_to_node(cpu);
113 pdev = &async_dev[async_id];
114 *pdev = test_platform_device_register_node("test_async_driver",
115 async_id,
116 nid);
117 if (IS_ERR(*pdev)) {
118 err = PTR_ERR(*pdev);
119 *pdev = NULL;
120 pr_err("failed to create async_dev: %d\n", err);
121 goto err_unregister_async_devs;
122 }
54 123
55 async_dev_1 = platform_device_register_simple("test_async_driver", 1, 124 async_id++;
56 NULL, 0);
57 if (IS_ERR(async_dev_1)) {
58 error = PTR_ERR(async_dev_1);
59 pr_err("failed to create async_dev_1: %d\n", error);
60 return error;
61 } 125 }
62 126
63 pr_info("registering asynchronous driver...\n"); 127 pr_info("registering asynchronous driver...\n");
64 calltime = ktime_get(); 128 calltime = ktime_get();
65 error = platform_driver_register(&async_driver); 129 err = platform_driver_register(&async_driver);
66 if (error) { 130 if (err) {
67 pr_err("Failed to register async_driver: %d\n", error); 131 pr_err("Failed to register async_driver: %d\n", err);
68 goto err_unregister_async_dev_1; 132 goto err_unregister_async_devs;
69 } 133 }
70 134
71 delta = ktime_sub(ktime_get(), calltime); 135 delta = ktime_sub(ktime_get(), calltime);
@@ -73,86 +137,163 @@ static int __init test_async_probe_init(void)
73 pr_info("registration took %lld msecs\n", duration); 137 pr_info("registration took %lld msecs\n", duration);
74 if (duration > TEST_PROBE_THRESHOLD) { 138 if (duration > TEST_PROBE_THRESHOLD) {
75 pr_err("test failed: probe took too long\n"); 139 pr_err("test failed: probe took too long\n");
76 error = -ETIMEDOUT; 140 err = -ETIMEDOUT;
77 goto err_unregister_async_driver; 141 goto err_unregister_async_driver;
78 } 142 }
79 143
80 pr_info("registering second asynchronous device...\n"); 144 pr_info("registering second set of asynchronous devices...\n");
81 calltime = ktime_get(); 145 calltime = ktime_get();
82 async_dev_2 = platform_device_register_simple("test_async_driver", 2, 146 for_each_online_cpu(cpu) {
83 NULL, 0); 147 nid = cpu_to_node(cpu);
84 if (IS_ERR(async_dev_2)) { 148 pdev = &sync_dev[sync_id];
85 error = PTR_ERR(async_dev_2); 149
86 pr_err("failed to create async_dev_2: %d\n", error); 150 *pdev = test_platform_device_register_node("test_async_driver",
87 goto err_unregister_async_driver; 151 async_id,
152 nid);
153 if (IS_ERR(*pdev)) {
154 err = PTR_ERR(*pdev);
155 *pdev = NULL;
156 pr_err("failed to create async_dev: %d\n", err);
157 goto err_unregister_async_driver;
158 }
159
160 async_id++;
88 } 161 }
89 162
90 delta = ktime_sub(ktime_get(), calltime); 163 delta = ktime_sub(ktime_get(), calltime);
91 duration = (unsigned long long) ktime_to_ms(delta); 164 duration = (unsigned long long) ktime_to_ms(delta);
92 pr_info("registration took %lld msecs\n", duration); 165 dev_info(&(*pdev)->dev,
166 "registration took %lld msecs\n", duration);
93 if (duration > TEST_PROBE_THRESHOLD) { 167 if (duration > TEST_PROBE_THRESHOLD) {
94 pr_err("test failed: probe took too long\n"); 168 dev_err(&(*pdev)->dev,
95 error = -ETIMEDOUT; 169 "test failed: probe took too long\n");
96 goto err_unregister_async_dev_2; 170 err = -ETIMEDOUT;
171 goto err_unregister_async_driver;
97 } 172 }
98 173
99 pr_info("registering synchronous driver...\n");
100 174
101 error = platform_driver_register(&sync_driver); 175 pr_info("registering first synchronous device...\n");
102 if (error) { 176 nid = cpu_to_node(cpu);
103 pr_err("Failed to register async_driver: %d\n", error); 177 pdev = &sync_dev[sync_id];
104 goto err_unregister_async_dev_2; 178
179 *pdev = test_platform_device_register_node("test_sync_driver",
180 sync_id,
181 NUMA_NO_NODE);
182 if (IS_ERR(*pdev)) {
183 err = PTR_ERR(*pdev);
184 *pdev = NULL;
185 pr_err("failed to create sync_dev: %d\n", err);
186 goto err_unregister_async_driver;
105 } 187 }
106 188
107 pr_info("registering synchronous device...\n"); 189 sync_id++;
190
191 pr_info("registering synchronous driver...\n");
108 calltime = ktime_get(); 192 calltime = ktime_get();
109 sync_dev_1 = platform_device_register_simple("test_sync_driver", 1, 193 err = platform_driver_register(&sync_driver);
110 NULL, 0); 194 if (err) {
111 if (IS_ERR(sync_dev_1)) { 195 pr_err("Failed to register async_driver: %d\n", err);
112 error = PTR_ERR(sync_dev_1); 196 goto err_unregister_sync_devs;
113 pr_err("failed to create sync_dev_1: %d\n", error);
114 goto err_unregister_sync_driver;
115 } 197 }
116 198
117 delta = ktime_sub(ktime_get(), calltime); 199 delta = ktime_sub(ktime_get(), calltime);
118 duration = (unsigned long long) ktime_to_ms(delta); 200 duration = (unsigned long long) ktime_to_ms(delta);
119 pr_info("registration took %lld msecs\n", duration); 201 pr_info("registration took %lld msecs\n", duration);
120 if (duration < TEST_PROBE_THRESHOLD) { 202 if (duration < TEST_PROBE_THRESHOLD) {
121 pr_err("test failed: probe was too quick\n"); 203 dev_err(&(*pdev)->dev,
122 error = -ETIMEDOUT; 204 "test failed: probe was too quick\n");
123 goto err_unregister_sync_dev_1; 205 err = -ETIMEDOUT;
206 goto err_unregister_sync_driver;
124 } 207 }
125 208
126 pr_info("completed successfully"); 209 pr_info("registering second synchronous device...\n");
210 pdev = &sync_dev[sync_id];
211 calltime = ktime_get();
127 212
128 return 0; 213 *pdev = test_platform_device_register_node("test_sync_driver",
214 sync_id,
215 NUMA_NO_NODE);
216 if (IS_ERR(*pdev)) {
217 err = PTR_ERR(*pdev);
218 *pdev = NULL;
219 pr_err("failed to create sync_dev: %d\n", err);
220 goto err_unregister_sync_driver;
221 }
129 222
130err_unregister_sync_dev_1: 223 sync_id++;
131 platform_device_unregister(sync_dev_1);
132 224
133err_unregister_sync_driver: 225 delta = ktime_sub(ktime_get(), calltime);
134 platform_driver_unregister(&sync_driver); 226 duration = (unsigned long long) ktime_to_ms(delta);
227 dev_info(&(*pdev)->dev,
228 "registration took %lld msecs\n", duration);
229 if (duration < TEST_PROBE_THRESHOLD) {
230 dev_err(&(*pdev)->dev,
231 "test failed: probe was too quick\n");
232 err = -ETIMEDOUT;
233 goto err_unregister_sync_driver;
234 }
135 235
136err_unregister_async_dev_2: 236 /*
137 platform_device_unregister(async_dev_2); 237 * The async events should have completed while we were taking care
238 * of the synchronous events. We will now terminate any outstanding
239 * asynchronous probe calls remaining by forcing timeout and remove
240 * the driver before we return which should force the flush of the
241 * pending asynchronous probe calls.
242 *
243 * Otherwise if they completed without errors or warnings then
244 * report successful completion.
245 */
246 if (atomic_read(&async_completed) != async_id) {
247 pr_err("async events still pending, forcing timeout\n");
248 atomic_inc(&timeout);
249 err = -ETIMEDOUT;
250 } else if (!atomic_read(&errors) && !atomic_read(&warnings)) {
251 pr_info("completed successfully\n");
252 return 0;
253 }
138 254
255err_unregister_sync_driver:
256 platform_driver_unregister(&sync_driver);
257err_unregister_sync_devs:
258 while (sync_id--)
259 platform_device_unregister(sync_dev[sync_id]);
139err_unregister_async_driver: 260err_unregister_async_driver:
140 platform_driver_unregister(&async_driver); 261 platform_driver_unregister(&async_driver);
262err_unregister_async_devs:
263 while (async_id--)
264 platform_device_unregister(async_dev[async_id]);
265
266 /*
267 * If err is already set then count that as an additional error for
268 * the test. Otherwise we will report an invalid argument error and
269 * not count that as we should have reached here as a result of
270 * errors or warnings being reported by the probe routine.
271 */
272 if (err)
273 atomic_inc(&errors);
274 else
275 err = -EINVAL;
141 276
142err_unregister_async_dev_1: 277 pr_err("Test failed with %d errors and %d warnings\n",
143 platform_device_unregister(async_dev_1); 278 atomic_read(&errors), atomic_read(&warnings));
144 279
145 return error; 280 return err;
146} 281}
147module_init(test_async_probe_init); 282module_init(test_async_probe_init);
148 283
149static void __exit test_async_probe_exit(void) 284static void __exit test_async_probe_exit(void)
150{ 285{
286 int id = 2;
287
151 platform_driver_unregister(&async_driver); 288 platform_driver_unregister(&async_driver);
152 platform_driver_unregister(&sync_driver); 289 platform_driver_unregister(&sync_driver);
153 platform_device_unregister(async_dev_1); 290
154 platform_device_unregister(async_dev_2); 291 while (id--)
155 platform_device_unregister(sync_dev_1); 292 platform_device_unregister(sync_dev[id]);
293
294 id = NR_CPUS * 2;
295 while (id--)
296 platform_device_unregister(async_dev[id]);
156} 297}
157module_exit(test_async_probe_exit); 298module_exit(test_async_probe_exit);
158 299
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
index 1e92b61d0bd5..282e2e82d849 100644
--- a/drivers/block/rbd.c
+++ b/drivers/block/rbd.c
@@ -428,14 +428,13 @@ static bool single_major = true;
428module_param(single_major, bool, 0444); 428module_param(single_major, bool, 0444);
429MODULE_PARM_DESC(single_major, "Use a single major number for all rbd devices (default: true)"); 429MODULE_PARM_DESC(single_major, "Use a single major number for all rbd devices (default: true)");
430 430
431static ssize_t rbd_add(struct bus_type *bus, const char *buf, 431static ssize_t add_store(struct bus_type *bus, const char *buf, size_t count);
432 size_t count); 432static ssize_t remove_store(struct bus_type *bus, const char *buf,
433static ssize_t rbd_remove(struct bus_type *bus, const char *buf, 433 size_t count);
434 size_t count); 434static ssize_t add_single_major_store(struct bus_type *bus, const char *buf,
435static ssize_t rbd_add_single_major(struct bus_type *bus, const char *buf, 435 size_t count);
436 size_t count); 436static ssize_t remove_single_major_store(struct bus_type *bus, const char *buf,
437static ssize_t rbd_remove_single_major(struct bus_type *bus, const char *buf, 437 size_t count);
438 size_t count);
439static int rbd_dev_image_probe(struct rbd_device *rbd_dev, int depth); 438static int rbd_dev_image_probe(struct rbd_device *rbd_dev, int depth);
440 439
441static int rbd_dev_id_to_minor(int dev_id) 440static int rbd_dev_id_to_minor(int dev_id)
@@ -464,16 +463,16 @@ static bool rbd_is_lock_owner(struct rbd_device *rbd_dev)
464 return is_lock_owner; 463 return is_lock_owner;
465} 464}
466 465
467static ssize_t rbd_supported_features_show(struct bus_type *bus, char *buf) 466static ssize_t supported_features_show(struct bus_type *bus, char *buf)
468{ 467{
469 return sprintf(buf, "0x%llx\n", RBD_FEATURES_SUPPORTED); 468 return sprintf(buf, "0x%llx\n", RBD_FEATURES_SUPPORTED);
470} 469}
471 470
472static BUS_ATTR(add, 0200, NULL, rbd_add); 471static BUS_ATTR_WO(add);
473static BUS_ATTR(remove, 0200, NULL, rbd_remove); 472static BUS_ATTR_WO(remove);
474static BUS_ATTR(add_single_major, 0200, NULL, rbd_add_single_major); 473static BUS_ATTR_WO(add_single_major);
475static BUS_ATTR(remove_single_major, 0200, NULL, rbd_remove_single_major); 474static BUS_ATTR_WO(remove_single_major);
476static BUS_ATTR(supported_features, 0444, rbd_supported_features_show, NULL); 475static BUS_ATTR_RO(supported_features);
477 476
478static struct attribute *rbd_bus_attrs[] = { 477static struct attribute *rbd_bus_attrs[] = {
479 &bus_attr_add.attr, 478 &bus_attr_add.attr,
@@ -5934,9 +5933,7 @@ err_out_args:
5934 goto out; 5933 goto out;
5935} 5934}
5936 5935
5937static ssize_t rbd_add(struct bus_type *bus, 5936static ssize_t add_store(struct bus_type *bus, const char *buf, size_t count)
5938 const char *buf,
5939 size_t count)
5940{ 5937{
5941 if (single_major) 5938 if (single_major)
5942 return -EINVAL; 5939 return -EINVAL;
@@ -5944,9 +5941,8 @@ static ssize_t rbd_add(struct bus_type *bus,
5944 return do_rbd_add(bus, buf, count); 5941 return do_rbd_add(bus, buf, count);
5945} 5942}
5946 5943
5947static ssize_t rbd_add_single_major(struct bus_type *bus, 5944static ssize_t add_single_major_store(struct bus_type *bus, const char *buf,
5948 const char *buf, 5945 size_t count)
5949 size_t count)
5950{ 5946{
5951 return do_rbd_add(bus, buf, count); 5947 return do_rbd_add(bus, buf, count);
5952} 5948}
@@ -6049,9 +6045,7 @@ static ssize_t do_rbd_remove(struct bus_type *bus,
6049 return count; 6045 return count;
6050} 6046}
6051 6047
6052static ssize_t rbd_remove(struct bus_type *bus, 6048static ssize_t remove_store(struct bus_type *bus, const char *buf, size_t count)
6053 const char *buf,
6054 size_t count)
6055{ 6049{
6056 if (single_major) 6050 if (single_major)
6057 return -EINVAL; 6051 return -EINVAL;
@@ -6059,9 +6053,8 @@ static ssize_t rbd_remove(struct bus_type *bus,
6059 return do_rbd_remove(bus, buf, count); 6053 return do_rbd_remove(bus, buf, count);
6060} 6054}
6061 6055
6062static ssize_t rbd_remove_single_major(struct bus_type *bus, 6056static ssize_t remove_single_major_store(struct bus_type *bus, const char *buf,
6063 const char *buf, 6057 size_t count)
6064 size_t count)
6065{ 6058{
6066 return do_rbd_remove(bus, buf, count); 6059 return do_rbd_remove(bus, buf, count);
6067} 6060}
diff --git a/drivers/firmware/Kconfig b/drivers/firmware/Kconfig
index f754578414f0..cac16c4b0df3 100644
--- a/drivers/firmware/Kconfig
+++ b/drivers/firmware/Kconfig
@@ -218,7 +218,7 @@ config FW_CFG_SYSFS_CMDLINE
218 218
219config INTEL_STRATIX10_SERVICE 219config INTEL_STRATIX10_SERVICE
220 tristate "Intel Stratix10 Service Layer" 220 tristate "Intel Stratix10 Service Layer"
221 depends on HAVE_ARM_SMCCC 221 depends on ARCH_STRATIX10 && HAVE_ARM_SMCCC
222 default n 222 default n
223 help 223 help
224 Intel Stratix10 service layer runs at privileged exception level, 224 Intel Stratix10 service layer runs at privileged exception level,
diff --git a/drivers/iommu/exynos-iommu.c b/drivers/iommu/exynos-iommu.c
index 1bd0cd7168df..05c6bc099d62 100644
--- a/drivers/iommu/exynos-iommu.c
+++ b/drivers/iommu/exynos-iommu.c
@@ -1260,6 +1260,7 @@ static int exynos_iommu_add_device(struct device *dev)
1260 * direct calls to pm_runtime_get/put in this driver. 1260 * direct calls to pm_runtime_get/put in this driver.
1261 */ 1261 */
1262 data->link = device_link_add(dev, data->sysmmu, 1262 data->link = device_link_add(dev, data->sysmmu,
1263 DL_FLAG_STATELESS |
1263 DL_FLAG_PM_RUNTIME); 1264 DL_FLAG_PM_RUNTIME);
1264 } 1265 }
1265 iommu_group_put(group); 1266 iommu_group_put(group);
diff --git a/drivers/iommu/rockchip-iommu.c b/drivers/iommu/rockchip-iommu.c
index c9ba9f377f63..77d4bd93fe4b 100644
--- a/drivers/iommu/rockchip-iommu.c
+++ b/drivers/iommu/rockchip-iommu.c
@@ -1071,7 +1071,8 @@ static int rk_iommu_add_device(struct device *dev)
1071 iommu_group_put(group); 1071 iommu_group_put(group);
1072 1072
1073 iommu_device_link(&iommu->iommu, dev); 1073 iommu_device_link(&iommu->iommu, dev);
1074 data->link = device_link_add(dev, iommu->dev, DL_FLAG_PM_RUNTIME); 1074 data->link = device_link_add(dev, iommu->dev,
1075 DL_FLAG_STATELESS | DL_FLAG_PM_RUNTIME);
1075 1076
1076 return 0; 1077 return 0;
1077} 1078}
diff --git a/drivers/nvdimm/bus.c b/drivers/nvdimm/bus.c
index dca5f7a805cb..7bbff0af29b2 100644
--- a/drivers/nvdimm/bus.c
+++ b/drivers/nvdimm/bus.c
@@ -23,6 +23,7 @@
23#include <linux/ndctl.h> 23#include <linux/ndctl.h>
24#include <linux/sched.h> 24#include <linux/sched.h>
25#include <linux/slab.h> 25#include <linux/slab.h>
26#include <linux/cpu.h>
26#include <linux/fs.h> 27#include <linux/fs.h>
27#include <linux/io.h> 28#include <linux/io.h>
28#include <linux/mm.h> 29#include <linux/mm.h>
@@ -534,11 +535,15 @@ void __nd_device_register(struct device *dev)
534 set_dev_node(dev, to_nd_region(dev)->numa_node); 535 set_dev_node(dev, to_nd_region(dev)->numa_node);
535 536
536 dev->bus = &nvdimm_bus_type; 537 dev->bus = &nvdimm_bus_type;
537 if (dev->parent) 538 if (dev->parent) {
538 get_device(dev->parent); 539 get_device(dev->parent);
540 if (dev_to_node(dev) == NUMA_NO_NODE)
541 set_dev_node(dev, dev_to_node(dev->parent));
542 }
539 get_device(dev); 543 get_device(dev);
540 async_schedule_domain(nd_async_device_register, dev, 544
541 &nd_async_domain); 545 async_schedule_dev_domain(nd_async_device_register, dev,
546 &nd_async_domain);
542} 547}
543 548
544void nd_device_register(struct device *dev) 549void nd_device_register(struct device *dev)
diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c
index 9ecfe13157c0..25794c27c7a4 100644
--- a/drivers/pci/pci-sysfs.c
+++ b/drivers/pci/pci-sysfs.c
@@ -412,8 +412,7 @@ static ssize_t msi_bus_store(struct device *dev, struct device_attribute *attr,
412} 412}
413static DEVICE_ATTR_RW(msi_bus); 413static DEVICE_ATTR_RW(msi_bus);
414 414
415static ssize_t bus_rescan_store(struct bus_type *bus, const char *buf, 415static ssize_t rescan_store(struct bus_type *bus, const char *buf, size_t count)
416 size_t count)
417{ 416{
418 unsigned long val; 417 unsigned long val;
419 struct pci_bus *b = NULL; 418 struct pci_bus *b = NULL;
@@ -429,7 +428,7 @@ static ssize_t bus_rescan_store(struct bus_type *bus, const char *buf,
429 } 428 }
430 return count; 429 return count;
431} 430}
432static BUS_ATTR(rescan, (S_IWUSR|S_IWGRP), NULL, bus_rescan_store); 431static BUS_ATTR_WO(rescan);
433 432
434static struct attribute *pci_bus_attrs[] = { 433static struct attribute *pci_bus_attrs[] = {
435 &bus_attr_rescan.attr, 434 &bus_attr_rescan.attr,
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index c25acace7d91..db55acf32a7e 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -6034,19 +6034,18 @@ static ssize_t pci_get_resource_alignment_param(char *buf, size_t size)
6034 return count; 6034 return count;
6035} 6035}
6036 6036
6037static ssize_t pci_resource_alignment_show(struct bus_type *bus, char *buf) 6037static ssize_t resource_alignment_show(struct bus_type *bus, char *buf)
6038{ 6038{
6039 return pci_get_resource_alignment_param(buf, PAGE_SIZE); 6039 return pci_get_resource_alignment_param(buf, PAGE_SIZE);
6040} 6040}
6041 6041
6042static ssize_t pci_resource_alignment_store(struct bus_type *bus, 6042static ssize_t resource_alignment_store(struct bus_type *bus,
6043 const char *buf, size_t count) 6043 const char *buf, size_t count)
6044{ 6044{
6045 return pci_set_resource_alignment_param(buf, count); 6045 return pci_set_resource_alignment_param(buf, count);
6046} 6046}
6047 6047
6048static BUS_ATTR(resource_alignment, 0644, pci_resource_alignment_show, 6048static BUS_ATTR_RW(resource_alignment);
6049 pci_resource_alignment_store);
6050 6049
6051static int __init pci_resource_alignment_sysfs_init(void) 6050static int __init pci_resource_alignment_sysfs_init(void)
6052{ 6051{
diff --git a/drivers/rapidio/rio-sysfs.c b/drivers/rapidio/rio-sysfs.c
index 73e4b407f162..ad5e303dda05 100644
--- a/drivers/rapidio/rio-sysfs.c
+++ b/drivers/rapidio/rio-sysfs.c
@@ -290,8 +290,7 @@ const struct attribute_group *rio_dev_groups[] = {
290 NULL, 290 NULL,
291}; 291};
292 292
293static ssize_t bus_scan_store(struct bus_type *bus, const char *buf, 293static ssize_t scan_store(struct bus_type *bus, const char *buf, size_t count)
294 size_t count)
295{ 294{
296 long val; 295 long val;
297 int rc; 296 int rc;
@@ -314,7 +313,7 @@ exit:
314 313
315 return rc; 314 return rc;
316} 315}
317static BUS_ATTR(scan, (S_IWUSR|S_IWGRP), NULL, bus_scan_store); 316static BUS_ATTR_WO(scan);
318 317
319static struct attribute *rio_bus_attrs[] = { 318static struct attribute *rio_bus_attrs[] = {
320 &bus_attr_scan.attr, 319 &bus_attr_scan.attr,