aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2014-08-04 21:34:04 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2014-08-04 21:34:04 -0400
commit29b88e23a9212136d39b0161a39afe587d0170a5 (patch)
tree48d9f857b137222e35f853004973e12a515314f5
parent2521129a6d2fd8a81f99cf95055eddea3df914ff (diff)
parent4e3a25b0274b8474f5ad46215a270785dd18265e (diff)
Merge tag 'driver-core-3.17-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/driver-core
Pull driver core updates from Greg KH: "Here's the big driver-core pull request for 3.17-rc1. Largest thing in here is the dma-buf rework and fence code, that touched many different subsystems so it was agreed it should go through this tree to handle merge issues. There's also some firmware loading updates, as well as tests added, and a few other tiny changes, the changelog has the details. All have been in linux-next for a long time" * tag 'driver-core-3.17-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/driver-core: (32 commits) ARM: imx: Remove references to platform_bus in mxc code firmware loader: Fix _request_firmware_load() return val for fw load abort platform: Remove most references to platform_bus device test: add firmware_class loader test doc: fix minor typos in firmware_class README staging: android: Cleanup style issues Documentation: devres: Sort managed interfaces Documentation: devres: Add devm_kmalloc() et al fs: debugfs: remove trailing whitespace kernfs: kernel-doc warning fix debugfs: Fix corrupted loop in debugfs_remove_recursive stable_kernel_rules: Add pointer to netdev-FAQ for network patches driver core: platform: add device binding path 'driver_override' driver core/platform: remove unused implicit padding in platform_object firmware loader: inform direct failure when udev loader is disabled firmware: replace ALIGN(PAGE_SIZE) by PAGE_ALIGN firmware: read firmware size using i_size_read() firmware loader: allow disabling of udev as firmware loader reservation: add suppport for read-only access using rcu reservation: update api and add some helpers ... Conflicts: drivers/base/platform.c
-rw-r--r--Documentation/ABI/testing/sysfs-bus-platform20
-rw-r--r--Documentation/DocBook/device-drivers.tmpl8
-rw-r--r--Documentation/driver-model/devres.txt112
-rw-r--r--Documentation/firmware_class/README6
-rw-r--r--Documentation/stable_kernel_rules.txt3
-rw-r--r--MAINTAINERS4
-rw-r--r--arch/arm/mach-bcm/board_bcm21664.c3
-rw-r--r--arch/arm/mach-bcm/board_bcm281xx.c3
-rw-r--r--arch/arm/mach-clps711x/board-edb7211.c6
-rw-r--r--arch/arm/mach-clps711x/board-p720t.c6
-rw-r--r--arch/arm/mach-imx/devices/devices.c2
-rw-r--r--arch/arm/mach-imx/mach-mx27ads.c2
-rw-r--r--arch/arm/mach-shmobile/board-ape6evm.c14
-rw-r--r--arch/arm/mach-shmobile/board-bockw.c19
-rw-r--r--arch/arm/mach-shmobile/board-genmai.c5
-rw-r--r--arch/arm/mach-shmobile/board-koelsch.c26
-rw-r--r--arch/arm/mach-shmobile/board-lager.c34
-rw-r--r--arch/arm/mach-shmobile/board-marzen.c1
-rw-r--r--arch/arm/mach-shmobile/setup-r7s72100.c2
-rw-r--r--arch/arm/mach-shmobile/setup-r8a73a4.c8
-rw-r--r--arch/arm/mach-shmobile/setup-r8a7778.c13
-rw-r--r--arch/arm/mach-shmobile/setup-r8a7779.c6
-rw-r--r--arch/arm/mach-shmobile/setup-r8a7790.c10
-rw-r--r--arch/arm/mach-shmobile/setup-r8a7791.c8
-rw-r--r--arch/unicore32/kernel/puv3-core.c2
-rw-r--r--arch/unicore32/kernel/puv3-nb0916.c6
-rw-r--r--drivers/Makefile1
-rw-r--r--drivers/base/Kconfig19
-rw-r--r--drivers/base/Makefile1
-rw-r--r--drivers/base/component.c192
-rw-r--r--drivers/base/firmware_class.c47
-rw-r--r--drivers/base/platform.c51
-rw-r--r--drivers/base/reservation.c39
-rw-r--r--drivers/bus/brcmstb_gisb.c6
-rw-r--r--drivers/dma-buf/Makefile1
-rw-r--r--drivers/dma-buf/dma-buf.c (renamed from drivers/base/dma-buf.c)168
-rw-r--r--drivers/dma-buf/fence.c431
-rw-r--r--drivers/dma-buf/reservation.c477
-rw-r--r--drivers/dma-buf/seqno-fence.c73
-rw-r--r--drivers/gpu/drm/armada/armada_crtc.c8
-rw-r--r--drivers/gpu/drm/armada/armada_gem.c2
-rw-r--r--drivers/gpu/drm/drm_prime.c8
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_dmabuf.c2
-rw-r--r--drivers/gpu/drm/i915/i915_gem_dmabuf.c3
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drm.c1
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_gem.h1
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_prime.c7
-rw-r--r--drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_prime.c8
-rw-r--r--drivers/gpu/drm/tegra/gem.c2
-rw-r--r--drivers/gpu/drm/ttm/ttm_object.c2
-rw-r--r--drivers/media/v4l2-core/videobuf2-dma-contig.c2
-rw-r--r--drivers/staging/android/Kconfig1
-rw-r--r--drivers/staging/android/Makefile2
-rw-r--r--drivers/staging/android/ion/ion.c3
-rw-r--r--drivers/staging/android/sw_sync.c6
-rw-r--r--drivers/staging/android/sync.c913
-rw-r--r--drivers/staging/android/sync.h79
-rw-r--r--drivers/staging/android/sync_debug.c252
-rw-r--r--drivers/staging/android/trace/sync.h12
-rw-r--r--fs/debugfs/file.c2
-rw-r--r--fs/debugfs/inode.c39
-rw-r--r--fs/kernfs/file.c2
-rw-r--r--include/drm/drmP.h3
-rw-r--r--include/linux/component.h7
-rw-r--r--include/linux/device.h2
-rw-r--r--include/linux/dma-buf.h21
-rw-r--r--include/linux/fence.h360
-rw-r--r--include/linux/firmware.h15
-rw-r--r--include/linux/platform_device.h1
-rw-r--r--include/linux/reservation.h82
-rw-r--r--include/linux/seqno-fence.h116
-rw-r--r--include/trace/events/fence.h128
-rw-r--r--lib/Kconfig.debug13
-rw-r--r--lib/Makefile1
-rw-r--r--lib/devres.c28
-rw-r--r--lib/test_firmware.c117
-rw-r--r--scripts/coccinelle/api/devm_ioremap_resource.cocci90
-rw-r--r--tools/testing/selftests/Makefile1
-rw-r--r--tools/testing/selftests/firmware/Makefile27
-rw-r--r--tools/testing/selftests/firmware/fw_filesystem.sh62
-rw-r--r--tools/testing/selftests/firmware/fw_userhelper.sh89
83 files changed, 3283 insertions, 1076 deletions
diff --git a/Documentation/ABI/testing/sysfs-bus-platform b/Documentation/ABI/testing/sysfs-bus-platform
new file mode 100644
index 000000000000..5172a6124b27
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-bus-platform
@@ -0,0 +1,20 @@
1What: /sys/bus/platform/devices/.../driver_override
2Date: April 2014
3Contact: Kim Phillips <kim.phillips@freescale.com>
4Description:
5 This file allows the driver for a device to be specified which
6 will override standard OF, ACPI, ID table, and name matching.
7 When specified, only a driver with a name matching the value
8 written to driver_override will have an opportunity to bind
9 to the device. The override is specified by writing a string
10 to the driver_override file (echo vfio-platform > \
11 driver_override) and may be cleared with an empty string
12 (echo > driver_override). This returns the device to standard
13 matching rules binding. Writing to driver_override does not
14 automatically unbind the device from its current driver or make
15 any attempt to automatically load the specified driver. If no
16 driver with a matching name is currently loaded in the kernel,
17 the device will not bind to any driver. This also allows
18 devices to opt-out of driver binding using a driver_override
19 name such as "none". Only a single driver may be specified in
20 the override, there is no support for parsing delimiters.
diff --git a/Documentation/DocBook/device-drivers.tmpl b/Documentation/DocBook/device-drivers.tmpl
index cc63f30de166..dd3f278faa8a 100644
--- a/Documentation/DocBook/device-drivers.tmpl
+++ b/Documentation/DocBook/device-drivers.tmpl
@@ -128,8 +128,12 @@ X!Edrivers/base/interface.c
128!Edrivers/base/bus.c 128!Edrivers/base/bus.c
129 </sect1> 129 </sect1>
130 <sect1><title>Device Drivers DMA Management</title> 130 <sect1><title>Device Drivers DMA Management</title>
131!Edrivers/base/dma-buf.c 131!Edrivers/dma-buf/dma-buf.c
132!Edrivers/base/reservation.c 132!Edrivers/dma-buf/fence.c
133!Edrivers/dma-buf/seqno-fence.c
134!Iinclude/linux/fence.h
135!Iinclude/linux/seqno-fence.h
136!Edrivers/dma-buf/reservation.c
133!Iinclude/linux/reservation.h 137!Iinclude/linux/reservation.h
134!Edrivers/base/dma-coherent.c 138!Edrivers/base/dma-coherent.c
135!Edrivers/base/dma-mapping.c 139!Edrivers/base/dma-mapping.c
diff --git a/Documentation/driver-model/devres.txt b/Documentation/driver-model/devres.txt
index 1525e30483fd..d14710b04439 100644
--- a/Documentation/driver-model/devres.txt
+++ b/Documentation/driver-model/devres.txt
@@ -233,66 +233,78 @@ certainly invest a bit more effort into libata core layer).
233 6. List of managed interfaces 233 6. List of managed interfaces
234 ----------------------------- 234 -----------------------------
235 235
236MEM 236CLOCK
237 devm_kzalloc() 237 devm_clk_get()
238 devm_kfree() 238 devm_clk_put()
239 devm_kmemdup() 239
240 devm_get_free_pages() 240DMA
241 devm_free_pages() 241 dmam_alloc_coherent()
242 dmam_alloc_noncoherent()
243 dmam_declare_coherent_memory()
244 dmam_free_coherent()
245 dmam_free_noncoherent()
246 dmam_pool_create()
247 dmam_pool_destroy()
248
249GPIO
250 devm_gpiod_get()
251 devm_gpiod_get_index()
252 devm_gpiod_get_index_optional()
253 devm_gpiod_get_optional()
254 devm_gpiod_put()
242 255
243IIO 256IIO
244 devm_iio_device_alloc() 257 devm_iio_device_alloc()
245 devm_iio_device_free() 258 devm_iio_device_free()
246 devm_iio_trigger_alloc()
247 devm_iio_trigger_free()
248 devm_iio_device_register() 259 devm_iio_device_register()
249 devm_iio_device_unregister() 260 devm_iio_device_unregister()
261 devm_iio_trigger_alloc()
262 devm_iio_trigger_free()
250 263
251IO region 264IO region
252 devm_request_region()
253 devm_request_mem_region()
254 devm_release_region()
255 devm_release_mem_region() 265 devm_release_mem_region()
256 266 devm_release_region()
257IRQ 267 devm_request_mem_region()
258 devm_request_irq() 268 devm_request_region()
259 devm_free_irq()
260
261DMA
262 dmam_alloc_coherent()
263 dmam_free_coherent()
264 dmam_alloc_noncoherent()
265 dmam_free_noncoherent()
266 dmam_declare_coherent_memory()
267 dmam_pool_create()
268 dmam_pool_destroy()
269
270PCI
271 pcim_enable_device() : after success, all PCI ops become managed
272 pcim_pin_device() : keep PCI device enabled after release
273 269
274IOMAP 270IOMAP
275 devm_ioport_map() 271 devm_ioport_map()
276 devm_ioport_unmap() 272 devm_ioport_unmap()
277 devm_ioremap() 273 devm_ioremap()
278 devm_ioremap_nocache() 274 devm_ioremap_nocache()
279 devm_iounmap()
280 devm_ioremap_resource() : checks resource, requests memory region, ioremaps 275 devm_ioremap_resource() : checks resource, requests memory region, ioremaps
281 devm_request_and_ioremap() : obsoleted by devm_ioremap_resource() 276 devm_iounmap()
282 pcim_iomap() 277 pcim_iomap()
283 pcim_iounmap()
284 pcim_iomap_table() : array of mapped addresses indexed by BAR
285 pcim_iomap_regions() : do request_region() and iomap() on multiple BARs 278 pcim_iomap_regions() : do request_region() and iomap() on multiple BARs
279 pcim_iomap_table() : array of mapped addresses indexed by BAR
280 pcim_iounmap()
286 281
287REGULATOR 282IRQ
288 devm_regulator_get() 283 devm_free_irq()
289 devm_regulator_put() 284 devm_request_irq()
290 devm_regulator_bulk_get()
291 devm_regulator_register()
292 285
293CLOCK 286MDIO
294 devm_clk_get() 287 devm_mdiobus_alloc()
295 devm_clk_put() 288 devm_mdiobus_alloc_size()
289 devm_mdiobus_free()
290
291MEM
292 devm_free_pages()
293 devm_get_free_pages()
294 devm_kcalloc()
295 devm_kfree()
296 devm_kmalloc()
297 devm_kmalloc_array()
298 devm_kmemdup()
299 devm_kzalloc()
300
301PCI
302 pcim_enable_device() : after success, all PCI ops become managed
303 pcim_pin_device() : keep PCI device enabled after release
304
305PHY
306 devm_usb_get_phy()
307 devm_usb_put_phy()
296 308
297PINCTRL 309PINCTRL
298 devm_pinctrl_get() 310 devm_pinctrl_get()
@@ -302,24 +314,14 @@ PWM
302 devm_pwm_get() 314 devm_pwm_get()
303 devm_pwm_put() 315 devm_pwm_put()
304 316
305PHY 317REGULATOR
306 devm_usb_get_phy() 318 devm_regulator_bulk_get()
307 devm_usb_put_phy() 319 devm_regulator_get()
320 devm_regulator_put()
321 devm_regulator_register()
308 322
309SLAVE DMA ENGINE 323SLAVE DMA ENGINE
310 devm_acpi_dma_controller_register() 324 devm_acpi_dma_controller_register()
311 325
312SPI 326SPI
313 devm_spi_register_master() 327 devm_spi_register_master()
314
315GPIO
316 devm_gpiod_get()
317 devm_gpiod_get_index()
318 devm_gpiod_get_optional()
319 devm_gpiod_get_index_optional()
320 devm_gpiod_put()
321
322MDIO
323 devm_mdiobus_alloc()
324 devm_mdiobus_alloc_size()
325 devm_mdiobus_free()
diff --git a/Documentation/firmware_class/README b/Documentation/firmware_class/README
index 43fada989e65..71f86859d7d8 100644
--- a/Documentation/firmware_class/README
+++ b/Documentation/firmware_class/README
@@ -64,7 +64,7 @@
64 64
65 if(request_firmware(&fw_entry, $FIRMWARE, device) == 0) 65 if(request_firmware(&fw_entry, $FIRMWARE, device) == 0)
66 copy_fw_to_device(fw_entry->data, fw_entry->size); 66 copy_fw_to_device(fw_entry->data, fw_entry->size);
67 release(fw_entry); 67 release_firmware(fw_entry);
68 68
69 Sample/simple hotplug script: 69 Sample/simple hotplug script:
70 ============================ 70 ============================
@@ -74,7 +74,7 @@
74 HOTPLUG_FW_DIR=/usr/lib/hotplug/firmware/ 74 HOTPLUG_FW_DIR=/usr/lib/hotplug/firmware/
75 75
76 echo 1 > /sys/$DEVPATH/loading 76 echo 1 > /sys/$DEVPATH/loading
77 cat $HOTPLUG_FW_DIR/$FIRMWARE > /sysfs/$DEVPATH/data 77 cat $HOTPLUG_FW_DIR/$FIRMWARE > /sys/$DEVPATH/data
78 echo 0 > /sys/$DEVPATH/loading 78 echo 0 > /sys/$DEVPATH/loading
79 79
80 Random notes: 80 Random notes:
@@ -123,6 +123,6 @@
123 -------------------- 123 --------------------
124 After firmware cache mechanism is introduced during system sleep, 124 After firmware cache mechanism is introduced during system sleep,
125 request_firmware can be called safely inside device's suspend and 125 request_firmware can be called safely inside device's suspend and
126 resume callback, and callers need't cache the firmware by 126 resume callback, and callers needn't cache the firmware by
127 themselves any more for dealing with firmware loss during system 127 themselves any more for dealing with firmware loss during system
128 resume. 128 resume.
diff --git a/Documentation/stable_kernel_rules.txt b/Documentation/stable_kernel_rules.txt
index cbc2f03056bd..aee73e78c7d4 100644
--- a/Documentation/stable_kernel_rules.txt
+++ b/Documentation/stable_kernel_rules.txt
@@ -29,6 +29,9 @@ Rules on what kind of patches are accepted, and which ones are not, into the
29 29
30Procedure for submitting patches to the -stable tree: 30Procedure for submitting patches to the -stable tree:
31 31
32 - If the patch covers files in net/ or drivers/net please follow netdev stable
33 submission guidelines as described in
34 Documentation/networking/netdev-FAQ.txt
32 - Send the patch, after verifying that it follows the above rules, to 35 - Send the patch, after verifying that it follows the above rules, to
33 stable@vger.kernel.org. You must note the upstream commit ID in the 36 stable@vger.kernel.org. You must note the upstream commit ID in the
34 changelog of your submission, as well as the kernel version you wish 37 changelog of your submission, as well as the kernel version you wish
diff --git a/MAINTAINERS b/MAINTAINERS
index 211389b6182f..8ea208b3ea24 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -2915,8 +2915,8 @@ S: Maintained
2915L: linux-media@vger.kernel.org 2915L: linux-media@vger.kernel.org
2916L: dri-devel@lists.freedesktop.org 2916L: dri-devel@lists.freedesktop.org
2917L: linaro-mm-sig@lists.linaro.org 2917L: linaro-mm-sig@lists.linaro.org
2918F: drivers/base/dma-buf* 2918F: drivers/dma-buf/
2919F: include/linux/dma-buf* 2919F: include/linux/dma-buf* include/linux/reservation.h include/linux/*fence.h
2920F: Documentation/dma-buf-sharing.txt 2920F: Documentation/dma-buf-sharing.txt
2921T: git git://git.linaro.org/people/sumitsemwal/linux-dma-buf.git 2921T: git git://git.linaro.org/people/sumitsemwal/linux-dma-buf.git
2922 2922
diff --git a/arch/arm/mach-bcm/board_bcm21664.c b/arch/arm/mach-bcm/board_bcm21664.c
index f0521cc0640d..82ad5687771f 100644
--- a/arch/arm/mach-bcm/board_bcm21664.c
+++ b/arch/arm/mach-bcm/board_bcm21664.c
@@ -60,8 +60,7 @@ static void bcm21664_restart(enum reboot_mode mode, const char *cmd)
60 60
61static void __init bcm21664_init(void) 61static void __init bcm21664_init(void)
62{ 62{
63 of_platform_populate(NULL, of_default_bus_match_table, NULL, 63 of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL);
64 &platform_bus);
65 kona_l2_cache_init(); 64 kona_l2_cache_init();
66} 65}
67 66
diff --git a/arch/arm/mach-bcm/board_bcm281xx.c b/arch/arm/mach-bcm/board_bcm281xx.c
index 1ac59fc0cb15..2e367bd7c600 100644
--- a/arch/arm/mach-bcm/board_bcm281xx.c
+++ b/arch/arm/mach-bcm/board_bcm281xx.c
@@ -58,8 +58,7 @@ static void bcm281xx_restart(enum reboot_mode mode, const char *cmd)
58 58
59static void __init bcm281xx_init(void) 59static void __init bcm281xx_init(void)
60{ 60{
61 of_platform_populate(NULL, of_default_bus_match_table, NULL, 61 of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL);
62 &platform_bus);
63 kona_l2_cache_init(); 62 kona_l2_cache_init();
64} 63}
65 64
diff --git a/arch/arm/mach-clps711x/board-edb7211.c b/arch/arm/mach-clps711x/board-edb7211.c
index f9828f89972a..6144fb5cdc36 100644
--- a/arch/arm/mach-clps711x/board-edb7211.c
+++ b/arch/arm/mach-clps711x/board-edb7211.c
@@ -158,16 +158,16 @@ static void __init edb7211_init_late(void)
158 gpio_request_array(edb7211_gpios, ARRAY_SIZE(edb7211_gpios)); 158 gpio_request_array(edb7211_gpios, ARRAY_SIZE(edb7211_gpios));
159 159
160 platform_device_register(&edb7211_flash_pdev); 160 platform_device_register(&edb7211_flash_pdev);
161 platform_device_register_data(&platform_bus, "platform-lcd", 0, 161 platform_device_register_data(NULL, "platform-lcd", 0,
162 &edb7211_lcd_power_pdata, 162 &edb7211_lcd_power_pdata,
163 sizeof(edb7211_lcd_power_pdata)); 163 sizeof(edb7211_lcd_power_pdata));
164 platform_device_register_data(&platform_bus, "generic-bl", 0, 164 platform_device_register_data(NULL, "generic-bl", 0,
165 &edb7211_lcd_backlight_pdata, 165 &edb7211_lcd_backlight_pdata,
166 sizeof(edb7211_lcd_backlight_pdata)); 166 sizeof(edb7211_lcd_backlight_pdata));
167 platform_device_register_simple("video-clps711x", 0, NULL, 0); 167 platform_device_register_simple("video-clps711x", 0, NULL, 0);
168 platform_device_register_simple("cs89x0", 0, edb7211_cs8900_resource, 168 platform_device_register_simple("cs89x0", 0, edb7211_cs8900_resource,
169 ARRAY_SIZE(edb7211_cs8900_resource)); 169 ARRAY_SIZE(edb7211_cs8900_resource));
170 platform_device_register_data(&platform_bus, "i2c-gpio", 0, 170 platform_device_register_data(NULL, "i2c-gpio", 0,
171 &edb7211_i2c_pdata, 171 &edb7211_i2c_pdata,
172 sizeof(edb7211_i2c_pdata)); 172 sizeof(edb7211_i2c_pdata));
173} 173}
diff --git a/arch/arm/mach-clps711x/board-p720t.c b/arch/arm/mach-clps711x/board-p720t.c
index 0cf0e51e6546..96bcc76c4437 100644
--- a/arch/arm/mach-clps711x/board-p720t.c
+++ b/arch/arm/mach-clps711x/board-p720t.c
@@ -348,14 +348,14 @@ static void __init p720t_init_late(void)
348{ 348{
349 WARN_ON(gpio_request_array(p720t_gpios, ARRAY_SIZE(p720t_gpios))); 349 WARN_ON(gpio_request_array(p720t_gpios, ARRAY_SIZE(p720t_gpios)));
350 350
351 platform_device_register_data(&platform_bus, "platform-lcd", 0, 351 platform_device_register_data(NULL, "platform-lcd", 0,
352 &p720t_lcd_power_pdata, 352 &p720t_lcd_power_pdata,
353 sizeof(p720t_lcd_power_pdata)); 353 sizeof(p720t_lcd_power_pdata));
354 platform_device_register_data(&platform_bus, "generic-bl", 0, 354 platform_device_register_data(NULL, "generic-bl", 0,
355 &p720t_lcd_backlight_pdata, 355 &p720t_lcd_backlight_pdata,
356 sizeof(p720t_lcd_backlight_pdata)); 356 sizeof(p720t_lcd_backlight_pdata));
357 platform_device_register_simple("video-clps711x", 0, NULL, 0); 357 platform_device_register_simple("video-clps711x", 0, NULL, 0);
358 platform_device_register_data(&platform_bus, "leds-gpio", 0, 358 platform_device_register_data(NULL, "leds-gpio", 0,
359 &p720t_gpio_led_pdata, 359 &p720t_gpio_led_pdata,
360 sizeof(p720t_gpio_led_pdata)); 360 sizeof(p720t_gpio_led_pdata));
361} 361}
diff --git a/arch/arm/mach-imx/devices/devices.c b/arch/arm/mach-imx/devices/devices.c
index 1b4366a0e7c0..8eab5440da28 100644
--- a/arch/arm/mach-imx/devices/devices.c
+++ b/arch/arm/mach-imx/devices/devices.c
@@ -24,12 +24,10 @@
24 24
25struct device mxc_aips_bus = { 25struct device mxc_aips_bus = {
26 .init_name = "mxc_aips", 26 .init_name = "mxc_aips",
27 .parent = &platform_bus,
28}; 27};
29 28
30struct device mxc_ahb_bus = { 29struct device mxc_ahb_bus = {
31 .init_name = "mxc_ahb", 30 .init_name = "mxc_ahb",
32 .parent = &platform_bus,
33}; 31};
34 32
35int __init mxc_device_init(void) 33int __init mxc_device_init(void)
diff --git a/arch/arm/mach-imx/mach-mx27ads.c b/arch/arm/mach-imx/mach-mx27ads.c
index 2f834ce8f39c..eb1c3477c48a 100644
--- a/arch/arm/mach-imx/mach-mx27ads.c
+++ b/arch/arm/mach-imx/mach-mx27ads.c
@@ -245,7 +245,7 @@ static void __init mx27ads_regulator_init(void)
245 vchip->set = vgpio_set; 245 vchip->set = vgpio_set;
246 gpiochip_add(vchip); 246 gpiochip_add(vchip);
247 247
248 platform_device_register_data(&platform_bus, "reg-fixed-voltage", 248 platform_device_register_data(NULL, "reg-fixed-voltage",
249 PLATFORM_DEVID_AUTO, 249 PLATFORM_DEVID_AUTO,
250 &mx27ads_lcd_regulator_pdata, 250 &mx27ads_lcd_regulator_pdata,
251 sizeof(mx27ads_lcd_regulator_pdata)); 251 sizeof(mx27ads_lcd_regulator_pdata));
diff --git a/arch/arm/mach-shmobile/board-ape6evm.c b/arch/arm/mach-shmobile/board-ape6evm.c
index fe071a9130b7..7ab99a4972a0 100644
--- a/arch/arm/mach-shmobile/board-ape6evm.c
+++ b/arch/arm/mach-shmobile/board-ape6evm.c
@@ -248,29 +248,29 @@ static void __init ape6evm_add_standard_devices(void)
248 248
249 regulator_register_fixed(0, dummy_supplies, ARRAY_SIZE(dummy_supplies)); 249 regulator_register_fixed(0, dummy_supplies, ARRAY_SIZE(dummy_supplies));
250 250
251 platform_device_register_resndata(&platform_bus, "smsc911x", -1, 251 platform_device_register_resndata(NULL, "smsc911x", -1,
252 lan9220_res, ARRAY_SIZE(lan9220_res), 252 lan9220_res, ARRAY_SIZE(lan9220_res),
253 &lan9220_data, sizeof(lan9220_data)); 253 &lan9220_data, sizeof(lan9220_data));
254 254
255 regulator_register_always_on(1, "MMC0 Vcc", vcc_mmc0_consumers, 255 regulator_register_always_on(1, "MMC0 Vcc", vcc_mmc0_consumers,
256 ARRAY_SIZE(vcc_mmc0_consumers), 2800000); 256 ARRAY_SIZE(vcc_mmc0_consumers), 2800000);
257 platform_device_register_resndata(&platform_bus, "sh_mmcif", 0, 257 platform_device_register_resndata(NULL, "sh_mmcif", 0,
258 mmcif0_resources, ARRAY_SIZE(mmcif0_resources), 258 mmcif0_resources, ARRAY_SIZE(mmcif0_resources),
259 &mmcif0_pdata, sizeof(mmcif0_pdata)); 259 &mmcif0_pdata, sizeof(mmcif0_pdata));
260 platform_device_register_data(&platform_bus, "reg-fixed-voltage", 2, 260 platform_device_register_data(NULL, "reg-fixed-voltage", 2,
261 &vcc_sdhi0_info, sizeof(vcc_sdhi0_info)); 261 &vcc_sdhi0_info, sizeof(vcc_sdhi0_info));
262 platform_device_register_resndata(&platform_bus, "sh_mobile_sdhi", 0, 262 platform_device_register_resndata(NULL, "sh_mobile_sdhi", 0,
263 sdhi0_resources, ARRAY_SIZE(sdhi0_resources), 263 sdhi0_resources, ARRAY_SIZE(sdhi0_resources),
264 &sdhi0_pdata, sizeof(sdhi0_pdata)); 264 &sdhi0_pdata, sizeof(sdhi0_pdata));
265 regulator_register_always_on(3, "SDHI1 Vcc", vcc_sdhi1_consumers, 265 regulator_register_always_on(3, "SDHI1 Vcc", vcc_sdhi1_consumers,
266 ARRAY_SIZE(vcc_sdhi1_consumers), 3300000); 266 ARRAY_SIZE(vcc_sdhi1_consumers), 3300000);
267 platform_device_register_resndata(&platform_bus, "sh_mobile_sdhi", 1, 267 platform_device_register_resndata(NULL, "sh_mobile_sdhi", 1,
268 sdhi1_resources, ARRAY_SIZE(sdhi1_resources), 268 sdhi1_resources, ARRAY_SIZE(sdhi1_resources),
269 &sdhi1_pdata, sizeof(sdhi1_pdata)); 269 &sdhi1_pdata, sizeof(sdhi1_pdata));
270 platform_device_register_data(&platform_bus, "gpio-keys", -1, 270 platform_device_register_data(NULL, "gpio-keys", -1,
271 &ape6evm_keys_pdata, 271 &ape6evm_keys_pdata,
272 sizeof(ape6evm_keys_pdata)); 272 sizeof(ape6evm_keys_pdata));
273 platform_device_register_data(&platform_bus, "leds-gpio", -1, 273 platform_device_register_data(NULL, "leds-gpio", -1,
274 &ape6evm_leds_pdata, 274 &ape6evm_leds_pdata,
275 sizeof(ape6evm_leds_pdata)); 275 sizeof(ape6evm_leds_pdata));
276} 276}
diff --git a/arch/arm/mach-shmobile/board-bockw.c b/arch/arm/mach-shmobile/board-bockw.c
index f444be2f241e..3ec82a4c35c5 100644
--- a/arch/arm/mach-shmobile/board-bockw.c
+++ b/arch/arm/mach-shmobile/board-bockw.c
@@ -177,7 +177,7 @@ static struct renesas_usbhs_platform_info usbhs_info __initdata = {
177#define USB1_DEVICE "renesas_usbhs" 177#define USB1_DEVICE "renesas_usbhs"
178#define ADD_USB_FUNC_DEVICE_IF_POSSIBLE() \ 178#define ADD_USB_FUNC_DEVICE_IF_POSSIBLE() \
179 platform_device_register_resndata( \ 179 platform_device_register_resndata( \
180 &platform_bus, "renesas_usbhs", -1, \ 180 NULL, "renesas_usbhs", -1, \
181 usbhsf_resources, \ 181 usbhsf_resources, \
182 ARRAY_SIZE(usbhsf_resources), \ 182 ARRAY_SIZE(usbhsf_resources), \
183 &usbhs_info, sizeof(struct renesas_usbhs_platform_info)) 183 &usbhs_info, sizeof(struct renesas_usbhs_platform_info))
@@ -236,7 +236,6 @@ static struct sh_eth_plat_data ether_platform_data __initdata = {
236}; 236};
237 237
238static struct platform_device_info ether_info __initdata = { 238static struct platform_device_info ether_info __initdata = {
239 .parent = &platform_bus,
240 .name = "r8a777x-ether", 239 .name = "r8a777x-ether",
241 .id = -1, 240 .id = -1,
242 .res = ether_resources, 241 .res = ether_resources,
@@ -322,7 +321,6 @@ static struct resource vin##idx##_resources[] __initdata = { \
322}; \ 321}; \
323 \ 322 \
324static struct platform_device_info vin##idx##_info __initdata = { \ 323static struct platform_device_info vin##idx##_info __initdata = { \
325 .parent = &platform_bus, \
326 .name = "r8a7778-vin", \ 324 .name = "r8a7778-vin", \
327 .id = idx, \ 325 .id = idx, \
328 .res = vin##idx##_resources, \ 326 .res = vin##idx##_resources, \
@@ -621,10 +619,10 @@ static void __init bockw_init(void)
621 /* VIN1 has a pin conflict with Ether */ 619 /* VIN1 has a pin conflict with Ether */
622 if (!IS_ENABLED(CONFIG_SH_ETH)) 620 if (!IS_ENABLED(CONFIG_SH_ETH))
623 platform_device_register_full(&vin1_info); 621 platform_device_register_full(&vin1_info);
624 platform_device_register_data(&platform_bus, "soc-camera-pdrv", 0, 622 platform_device_register_data(NULL, "soc-camera-pdrv", 0,
625 &iclink0_ml86v7667, 623 &iclink0_ml86v7667,
626 sizeof(iclink0_ml86v7667)); 624 sizeof(iclink0_ml86v7667));
627 platform_device_register_data(&platform_bus, "soc-camera-pdrv", 1, 625 platform_device_register_data(NULL, "soc-camera-pdrv", 1,
628 &iclink1_ml86v7667, 626 &iclink1_ml86v7667,
629 sizeof(iclink1_ml86v7667)); 627 sizeof(iclink1_ml86v7667));
630 628
@@ -637,12 +635,12 @@ static void __init bockw_init(void)
637 r8a7778_pinmux_init(); 635 r8a7778_pinmux_init();
638 636
639 platform_device_register_resndata( 637 platform_device_register_resndata(
640 &platform_bus, "sh_mmcif", -1, 638 NULL, "sh_mmcif", -1,
641 mmc_resources, ARRAY_SIZE(mmc_resources), 639 mmc_resources, ARRAY_SIZE(mmc_resources),
642 &sh_mmcif_plat, sizeof(struct sh_mmcif_plat_data)); 640 &sh_mmcif_plat, sizeof(struct sh_mmcif_plat_data));
643 641
644 platform_device_register_resndata( 642 platform_device_register_resndata(
645 &platform_bus, "rcar_usb_phy", -1, 643 NULL, "rcar_usb_phy", -1,
646 usb_phy_resources, 644 usb_phy_resources,
647 ARRAY_SIZE(usb_phy_resources), 645 ARRAY_SIZE(usb_phy_resources),
648 &usb_phy_platform_data, 646 &usb_phy_platform_data,
@@ -668,7 +666,7 @@ static void __init bockw_init(void)
668 iowrite16(val, fpga + IRQ0MR); 666 iowrite16(val, fpga + IRQ0MR);
669 667
670 platform_device_register_resndata( 668 platform_device_register_resndata(
671 &platform_bus, "smsc911x", -1, 669 NULL, "smsc911x", -1,
672 smsc911x_resources, ARRAY_SIZE(smsc911x_resources), 670 smsc911x_resources, ARRAY_SIZE(smsc911x_resources),
673 &smsc911x_data, sizeof(smsc911x_data)); 671 &smsc911x_data, sizeof(smsc911x_data));
674 } 672 }
@@ -685,7 +683,7 @@ static void __init bockw_init(void)
685 iounmap(base); 683 iounmap(base);
686 684
687 platform_device_register_resndata( 685 platform_device_register_resndata(
688 &platform_bus, "sh_mobile_sdhi", 0, 686 NULL, "sh_mobile_sdhi", 0,
689 sdhi0_resources, ARRAY_SIZE(sdhi0_resources), 687 sdhi0_resources, ARRAY_SIZE(sdhi0_resources),
690 &sdhi0_info, sizeof(struct sh_mobile_sdhi_info)); 688 &sdhi0_info, sizeof(struct sh_mobile_sdhi_info));
691 } 689 }
@@ -700,7 +698,7 @@ static void __init bockw_init(void)
700 "ak4554-adc-dac", 1, NULL, 0); 698 "ak4554-adc-dac", 1, NULL, 0);
701 699
702 pdev = platform_device_register_resndata( 700 pdev = platform_device_register_resndata(
703 &platform_bus, "rcar_sound", -1, 701 NULL, "rcar_sound", -1,
704 rsnd_resources, ARRAY_SIZE(rsnd_resources), 702 rsnd_resources, ARRAY_SIZE(rsnd_resources),
705 &rsnd_info, sizeof(rsnd_info)); 703 &rsnd_info, sizeof(rsnd_info));
706 704
@@ -710,7 +708,6 @@ static void __init bockw_init(void)
710 708
711 for (i = 0; i < ARRAY_SIZE(rsnd_card_info); i++) { 709 for (i = 0; i < ARRAY_SIZE(rsnd_card_info); i++) {
712 struct platform_device_info cardinfo = { 710 struct platform_device_info cardinfo = {
713 .parent = &platform_bus,
714 .name = "asoc-simple-card", 711 .name = "asoc-simple-card",
715 .id = i, 712 .id = i,
716 .data = &rsnd_card_info[i], 713 .data = &rsnd_card_info[i],
diff --git a/arch/arm/mach-shmobile/board-genmai.c b/arch/arm/mach-shmobile/board-genmai.c
index c94201ee8596..37184ff8c5c2 100644
--- a/arch/arm/mach-shmobile/board-genmai.c
+++ b/arch/arm/mach-shmobile/board-genmai.c
@@ -46,7 +46,6 @@ static const struct resource ether_resources[] __initconst = {
46}; 46};
47 47
48static const struct platform_device_info ether_info __initconst = { 48static const struct platform_device_info ether_info __initconst = {
49 .parent = &platform_bus,
50 .name = "r7s72100-ether", 49 .name = "r7s72100-ether",
51 .id = -1, 50 .id = -1,
52 .res = ether_resources, 51 .res = ether_resources,
@@ -76,7 +75,7 @@ static const struct rspi_plat_data rspi_pdata __initconst = {
76}; 75};
77 76
78#define r7s72100_register_rspi(idx) \ 77#define r7s72100_register_rspi(idx) \
79 platform_device_register_resndata(&platform_bus, "rspi-rz", idx, \ 78 platform_device_register_resndata(NULL, "rspi-rz", idx, \
80 rspi##idx##_resources, \ 79 rspi##idx##_resources, \
81 ARRAY_SIZE(rspi##idx##_resources), \ 80 ARRAY_SIZE(rspi##idx##_resources), \
82 &rspi_pdata, sizeof(rspi_pdata)) 81 &rspi_pdata, sizeof(rspi_pdata))
@@ -118,7 +117,7 @@ R7S72100_SCIF(6, 0xe800a000, gic_iid(245));
118R7S72100_SCIF(7, 0xe800a800, gic_iid(249)); 117R7S72100_SCIF(7, 0xe800a800, gic_iid(249));
119 118
120#define r7s72100_register_scif(index) \ 119#define r7s72100_register_scif(index) \
121 platform_device_register_resndata(&platform_bus, "sh-sci", index, \ 120 platform_device_register_resndata(NULL, "sh-sci", index, \
122 scif##index##_resources, \ 121 scif##index##_resources, \
123 ARRAY_SIZE(scif##index##_resources), \ 122 ARRAY_SIZE(scif##index##_resources), \
124 &scif##index##_platform_data, \ 123 &scif##index##_platform_data, \
diff --git a/arch/arm/mach-shmobile/board-koelsch.c b/arch/arm/mach-shmobile/board-koelsch.c
index c6c68892caa3..d3aa6ae05eeb 100644
--- a/arch/arm/mach-shmobile/board-koelsch.c
+++ b/arch/arm/mach-shmobile/board-koelsch.c
@@ -118,7 +118,6 @@ static const struct resource ether_resources[] __initconst = {
118}; 118};
119 119
120static const struct platform_device_info ether_info __initconst = { 120static const struct platform_device_info ether_info __initconst = {
121 .parent = &platform_bus,
122 .name = "r8a7791-ether", 121 .name = "r8a7791-ether",
123 .id = -1, 122 .id = -1,
124 .res = ether_resources, 123 .res = ether_resources,
@@ -230,7 +229,6 @@ static const struct resource sata0_resources[] __initconst = {
230}; 229};
231 230
232static const struct platform_device_info sata0_info __initconst = { 231static const struct platform_device_info sata0_info __initconst = {
233 .parent = &platform_bus,
234 .name = "sata-r8a7791", 232 .name = "sata-r8a7791",
235 .id = 0, 233 .id = 0,
236 .res = sata0_resources, 234 .res = sata0_resources,
@@ -439,13 +437,13 @@ static void __init koelsch_add_standard_devices(void)
439 r8a7791_pinmux_init(); 437 r8a7791_pinmux_init();
440 r8a7791_add_standard_devices(); 438 r8a7791_add_standard_devices();
441 platform_device_register_full(&ether_info); 439 platform_device_register_full(&ether_info);
442 platform_device_register_data(&platform_bus, "leds-gpio", -1, 440 platform_device_register_data(NULL, "leds-gpio", -1,
443 &koelsch_leds_pdata, 441 &koelsch_leds_pdata,
444 sizeof(koelsch_leds_pdata)); 442 sizeof(koelsch_leds_pdata));
445 platform_device_register_data(&platform_bus, "gpio-keys", -1, 443 platform_device_register_data(NULL, "gpio-keys", -1,
446 &koelsch_keys_pdata, 444 &koelsch_keys_pdata,
447 sizeof(koelsch_keys_pdata)); 445 sizeof(koelsch_keys_pdata));
448 platform_device_register_resndata(&platform_bus, "qspi", 0, 446 platform_device_register_resndata(NULL, "qspi", 0,
449 qspi_resources, 447 qspi_resources,
450 ARRAY_SIZE(qspi_resources), 448 ARRAY_SIZE(qspi_resources),
451 &qspi_pdata, sizeof(qspi_pdata)); 449 &qspi_pdata, sizeof(qspi_pdata));
@@ -460,28 +458,28 @@ static void __init koelsch_add_standard_devices(void)
460 koelsch_add_i2c(4); 458 koelsch_add_i2c(4);
461 koelsch_add_i2c(5); 459 koelsch_add_i2c(5);
462 460
463 platform_device_register_data(&platform_bus, "reg-fixed-voltage", 0, 461 platform_device_register_data(NULL, "reg-fixed-voltage", 0,
464 &vcc_sdhi0_info, sizeof(struct fixed_voltage_config)); 462 &vcc_sdhi0_info, sizeof(struct fixed_voltage_config));
465 platform_device_register_data(&platform_bus, "reg-fixed-voltage", 1, 463 platform_device_register_data(NULL, "reg-fixed-voltage", 1,
466 &vcc_sdhi1_info, sizeof(struct fixed_voltage_config)); 464 &vcc_sdhi1_info, sizeof(struct fixed_voltage_config));
467 platform_device_register_data(&platform_bus, "reg-fixed-voltage", 2, 465 platform_device_register_data(NULL, "reg-fixed-voltage", 2,
468 &vcc_sdhi2_info, sizeof(struct fixed_voltage_config)); 466 &vcc_sdhi2_info, sizeof(struct fixed_voltage_config));
469 platform_device_register_data(&platform_bus, "gpio-regulator", 0, 467 platform_device_register_data(NULL, "gpio-regulator", 0,
470 &vccq_sdhi0_info, sizeof(struct gpio_regulator_config)); 468 &vccq_sdhi0_info, sizeof(struct gpio_regulator_config));
471 platform_device_register_data(&platform_bus, "gpio-regulator", 1, 469 platform_device_register_data(NULL, "gpio-regulator", 1,
472 &vccq_sdhi1_info, sizeof(struct gpio_regulator_config)); 470 &vccq_sdhi1_info, sizeof(struct gpio_regulator_config));
473 platform_device_register_data(&platform_bus, "gpio-regulator", 2, 471 platform_device_register_data(NULL, "gpio-regulator", 2,
474 &vccq_sdhi2_info, sizeof(struct gpio_regulator_config)); 472 &vccq_sdhi2_info, sizeof(struct gpio_regulator_config));
475 473
476 platform_device_register_resndata(&platform_bus, "sh_mobile_sdhi", 0, 474 platform_device_register_resndata(NULL, "sh_mobile_sdhi", 0,
477 sdhi0_resources, ARRAY_SIZE(sdhi0_resources), 475 sdhi0_resources, ARRAY_SIZE(sdhi0_resources),
478 &sdhi0_info, sizeof(struct sh_mobile_sdhi_info)); 476 &sdhi0_info, sizeof(struct sh_mobile_sdhi_info));
479 477
480 platform_device_register_resndata(&platform_bus, "sh_mobile_sdhi", 1, 478 platform_device_register_resndata(NULL, "sh_mobile_sdhi", 1,
481 sdhi1_resources, ARRAY_SIZE(sdhi1_resources), 479 sdhi1_resources, ARRAY_SIZE(sdhi1_resources),
482 &sdhi1_info, sizeof(struct sh_mobile_sdhi_info)); 480 &sdhi1_info, sizeof(struct sh_mobile_sdhi_info));
483 481
484 platform_device_register_resndata(&platform_bus, "sh_mobile_sdhi", 2, 482 platform_device_register_resndata(NULL, "sh_mobile_sdhi", 2,
485 sdhi2_resources, ARRAY_SIZE(sdhi2_resources), 483 sdhi2_resources, ARRAY_SIZE(sdhi2_resources),
486 &sdhi2_info, sizeof(struct sh_mobile_sdhi_info)); 484 &sdhi2_info, sizeof(struct sh_mobile_sdhi_info));
487 485
diff --git a/arch/arm/mach-shmobile/board-lager.c b/arch/arm/mach-shmobile/board-lager.c
index f8b1e05463cc..d18296164e89 100644
--- a/arch/arm/mach-shmobile/board-lager.c
+++ b/arch/arm/mach-shmobile/board-lager.c
@@ -277,7 +277,6 @@ static const struct resource ether_resources[] __initconst = {
277}; 277};
278 278
279static const struct platform_device_info ether_info __initconst = { 279static const struct platform_device_info ether_info __initconst = {
280 .parent = &platform_bus,
281 .name = "r8a7790-ether", 280 .name = "r8a7790-ether",
282 .id = -1, 281 .id = -1,
283 .res = ether_resources, 282 .res = ether_resources,
@@ -354,7 +353,6 @@ static void __init lager_add_vin_device(unsigned idx,
354 struct rcar_vin_platform_data *pdata) 353 struct rcar_vin_platform_data *pdata)
355{ 354{
356 struct platform_device_info vin_info = { 355 struct platform_device_info vin_info = {
357 .parent = &platform_bus,
358 .name = "r8a7790-vin", 356 .name = "r8a7790-vin",
359 .id = idx, 357 .id = idx,
360 .res = &vin_resources[idx * 2], 358 .res = &vin_resources[idx * 2],
@@ -391,7 +389,7 @@ LAGER_CAMERA(1, "adv7180", 0x20, NULL, RCAR_VIN_BT656);
391 389
392static void __init lager_add_camera1_device(void) 390static void __init lager_add_camera1_device(void)
393{ 391{
394 platform_device_register_data(&platform_bus, "soc-camera-pdrv", 1, 392 platform_device_register_data(NULL, "soc-camera-pdrv", 1,
395 &cam1_link, sizeof(cam1_link)); 393 &cam1_link, sizeof(cam1_link));
396 lager_add_vin_device(1, &vin1_pdata); 394 lager_add_vin_device(1, &vin1_pdata);
397} 395}
@@ -403,7 +401,6 @@ static const struct resource sata1_resources[] __initconst = {
403}; 401};
404 402
405static const struct platform_device_info sata1_info __initconst = { 403static const struct platform_device_info sata1_info __initconst = {
406 .parent = &platform_bus,
407 .name = "sata-r8a7790", 404 .name = "sata-r8a7790",
408 .id = 1, 405 .id = 1,
409 .res = sata1_resources, 406 .res = sata1_resources,
@@ -533,7 +530,7 @@ static struct usbhs_private usbhs_priv __initdata = {
533static void __init lager_register_usbhs(void) 530static void __init lager_register_usbhs(void)
534{ 531{
535 usb_bind_phy("renesas_usbhs", 0, "usb_phy_rcar_gen2"); 532 usb_bind_phy("renesas_usbhs", 0, "usb_phy_rcar_gen2");
536 platform_device_register_resndata(&platform_bus, 533 platform_device_register_resndata(NULL,
537 "renesas_usbhs", -1, 534 "renesas_usbhs", -1,
538 usbhs_resources, 535 usbhs_resources,
539 ARRAY_SIZE(usbhs_resources), 536 ARRAY_SIZE(usbhs_resources),
@@ -608,7 +605,6 @@ static struct asoc_simple_card_info rsnd_card_info = {
608static void __init lager_add_rsnd_device(void) 605static void __init lager_add_rsnd_device(void)
609{ 606{
610 struct platform_device_info cardinfo = { 607 struct platform_device_info cardinfo = {
611 .parent = &platform_bus,
612 .name = "asoc-simple-card", 608 .name = "asoc-simple-card",
613 .id = -1, 609 .id = -1,
614 .data = &rsnd_card_info, 610 .data = &rsnd_card_info,
@@ -620,7 +616,7 @@ static void __init lager_add_rsnd_device(void)
620 ARRAY_SIZE(i2c2_devices)); 616 ARRAY_SIZE(i2c2_devices));
621 617
622 platform_device_register_resndata( 618 platform_device_register_resndata(
623 &platform_bus, "rcar_sound", -1, 619 NULL, "rcar_sound", -1,
624 rsnd_resources, ARRAY_SIZE(rsnd_resources), 620 rsnd_resources, ARRAY_SIZE(rsnd_resources),
625 &rsnd_info, sizeof(rsnd_info)); 621 &rsnd_info, sizeof(rsnd_info));
626 622
@@ -663,7 +659,6 @@ static const struct resource pci1_resources[] __initconst = {
663}; 659};
664 660
665static const struct platform_device_info pci1_info __initconst = { 661static const struct platform_device_info pci1_info __initconst = {
666 .parent = &platform_bus,
667 .name = "pci-rcar-gen2", 662 .name = "pci-rcar-gen2",
668 .id = 1, 663 .id = 1,
669 .res = pci1_resources, 664 .res = pci1_resources,
@@ -684,7 +679,6 @@ static const struct resource pci2_resources[] __initconst = {
684}; 679};
685 680
686static const struct platform_device_info pci2_info __initconst = { 681static const struct platform_device_info pci2_info __initconst = {
687 .parent = &platform_bus,
688 .name = "pci-rcar-gen2", 682 .name = "pci-rcar-gen2",
689 .id = 2, 683 .id = 2,
690 .res = pci2_resources, 684 .res = pci2_resources,
@@ -795,16 +789,16 @@ static void __init lager_add_standard_devices(void)
795 r8a7790_pinmux_init(); 789 r8a7790_pinmux_init();
796 790
797 r8a7790_add_standard_devices(); 791 r8a7790_add_standard_devices();
798 platform_device_register_data(&platform_bus, "leds-gpio", -1, 792 platform_device_register_data(NULL, "leds-gpio", -1,
799 &lager_leds_pdata, 793 &lager_leds_pdata,
800 sizeof(lager_leds_pdata)); 794 sizeof(lager_leds_pdata));
801 platform_device_register_data(&platform_bus, "gpio-keys", -1, 795 platform_device_register_data(NULL, "gpio-keys", -1,
802 &lager_keys_pdata, 796 &lager_keys_pdata,
803 sizeof(lager_keys_pdata)); 797 sizeof(lager_keys_pdata));
804 regulator_register_always_on(fixed_regulator_idx++, 798 regulator_register_always_on(fixed_regulator_idx++,
805 "fixed-3.3V", fixed3v3_power_consumers, 799 "fixed-3.3V", fixed3v3_power_consumers,
806 ARRAY_SIZE(fixed3v3_power_consumers), 3300000); 800 ARRAY_SIZE(fixed3v3_power_consumers), 3300000);
807 platform_device_register_resndata(&platform_bus, "sh_mmcif", 1, 801 platform_device_register_resndata(NULL, "sh_mmcif", 1,
808 mmcif1_resources, ARRAY_SIZE(mmcif1_resources), 802 mmcif1_resources, ARRAY_SIZE(mmcif1_resources),
809 &mmcif1_pdata, sizeof(mmcif1_pdata)); 803 &mmcif1_pdata, sizeof(mmcif1_pdata));
810 804
@@ -812,27 +806,27 @@ static void __init lager_add_standard_devices(void)
812 806
813 lager_add_du_device(); 807 lager_add_du_device();
814 808
815 platform_device_register_resndata(&platform_bus, "qspi", 0, 809 platform_device_register_resndata(NULL, "qspi", 0,
816 qspi_resources, 810 qspi_resources,
817 ARRAY_SIZE(qspi_resources), 811 ARRAY_SIZE(qspi_resources),
818 &qspi_pdata, sizeof(qspi_pdata)); 812 &qspi_pdata, sizeof(qspi_pdata));
819 spi_register_board_info(spi_info, ARRAY_SIZE(spi_info)); 813 spi_register_board_info(spi_info, ARRAY_SIZE(spi_info));
820 814
821 platform_device_register_data(&platform_bus, "reg-fixed-voltage", fixed_regulator_idx++, 815 platform_device_register_data(NULL, "reg-fixed-voltage", fixed_regulator_idx++,
822 &vcc_sdhi0_info, sizeof(struct fixed_voltage_config)); 816 &vcc_sdhi0_info, sizeof(struct fixed_voltage_config));
823 platform_device_register_data(&platform_bus, "reg-fixed-voltage", fixed_regulator_idx++, 817 platform_device_register_data(NULL, "reg-fixed-voltage", fixed_regulator_idx++,
824 &vcc_sdhi2_info, sizeof(struct fixed_voltage_config)); 818 &vcc_sdhi2_info, sizeof(struct fixed_voltage_config));
825 819
826 platform_device_register_data(&platform_bus, "gpio-regulator", gpio_regulator_idx++, 820 platform_device_register_data(NULL, "gpio-regulator", gpio_regulator_idx++,
827 &vccq_sdhi0_info, sizeof(struct gpio_regulator_config)); 821 &vccq_sdhi0_info, sizeof(struct gpio_regulator_config));
828 platform_device_register_data(&platform_bus, "gpio-regulator", gpio_regulator_idx++, 822 platform_device_register_data(NULL, "gpio-regulator", gpio_regulator_idx++,
829 &vccq_sdhi2_info, sizeof(struct gpio_regulator_config)); 823 &vccq_sdhi2_info, sizeof(struct gpio_regulator_config));
830 824
831 lager_add_camera1_device(); 825 lager_add_camera1_device();
832 826
833 platform_device_register_full(&sata1_info); 827 platform_device_register_full(&sata1_info);
834 828
835 platform_device_register_resndata(&platform_bus, "usb_phy_rcar_gen2", 829 platform_device_register_resndata(NULL, "usb_phy_rcar_gen2",
836 -1, usbhs_phy_resources, 830 -1, usbhs_phy_resources,
837 ARRAY_SIZE(usbhs_phy_resources), 831 ARRAY_SIZE(usbhs_phy_resources),
838 &usbhs_phy_pdata, 832 &usbhs_phy_pdata,
@@ -843,10 +837,10 @@ static void __init lager_add_standard_devices(void)
843 837
844 lager_add_rsnd_device(); 838 lager_add_rsnd_device();
845 839
846 platform_device_register_resndata(&platform_bus, "sh_mobile_sdhi", 0, 840 platform_device_register_resndata(NULL, "sh_mobile_sdhi", 0,
847 sdhi0_resources, ARRAY_SIZE(sdhi0_resources), 841 sdhi0_resources, ARRAY_SIZE(sdhi0_resources),
848 &sdhi0_info, sizeof(struct sh_mobile_sdhi_info)); 842 &sdhi0_info, sizeof(struct sh_mobile_sdhi_info));
849 platform_device_register_resndata(&platform_bus, "sh_mobile_sdhi", 2, 843 platform_device_register_resndata(NULL, "sh_mobile_sdhi", 2,
850 sdhi2_resources, ARRAY_SIZE(sdhi2_resources), 844 sdhi2_resources, ARRAY_SIZE(sdhi2_resources),
851 &sdhi2_info, sizeof(struct sh_mobile_sdhi_info)); 845 &sdhi2_info, sizeof(struct sh_mobile_sdhi_info));
852} 846}
diff --git a/arch/arm/mach-shmobile/board-marzen.c b/arch/arm/mach-shmobile/board-marzen.c
index d832a4477b4b..6ed324ce848f 100644
--- a/arch/arm/mach-shmobile/board-marzen.c
+++ b/arch/arm/mach-shmobile/board-marzen.c
@@ -272,7 +272,6 @@ static struct resource vin##idx##_resources[] __initdata = { \
272}; \ 272}; \
273 \ 273 \
274static struct platform_device_info vin##idx##_info __initdata = { \ 274static struct platform_device_info vin##idx##_info __initdata = { \
275 .parent = &platform_bus, \
276 .name = "r8a7779-vin", \ 275 .name = "r8a7779-vin", \
277 .id = idx, \ 276 .id = idx, \
278 .res = vin##idx##_resources, \ 277 .res = vin##idx##_resources, \
diff --git a/arch/arm/mach-shmobile/setup-r7s72100.c b/arch/arm/mach-shmobile/setup-r7s72100.c
index 412e179429cd..3885a598c66b 100644
--- a/arch/arm/mach-shmobile/setup-r7s72100.c
+++ b/arch/arm/mach-shmobile/setup-r7s72100.c
@@ -33,7 +33,7 @@ static struct resource mtu2_resources[] __initdata = {
33}; 33};
34 34
35#define r7s72100_register_mtu2() \ 35#define r7s72100_register_mtu2() \
36 platform_device_register_resndata(&platform_bus, "sh-mtu2", \ 36 platform_device_register_resndata(NULL, "sh-mtu2", \
37 -1, mtu2_resources, \ 37 -1, mtu2_resources, \
38 ARRAY_SIZE(mtu2_resources), \ 38 ARRAY_SIZE(mtu2_resources), \
39 NULL, 0) 39 NULL, 0)
diff --git a/arch/arm/mach-shmobile/setup-r8a73a4.c b/arch/arm/mach-shmobile/setup-r8a73a4.c
index 9333770cfac2..aaaaf6e8b706 100644
--- a/arch/arm/mach-shmobile/setup-r8a73a4.c
+++ b/arch/arm/mach-shmobile/setup-r8a73a4.c
@@ -68,7 +68,7 @@ R8A73A4_SCIFB(4, 0xe6ce0000, gic_spi(150)); /* SCIFB2 */
68R8A73A4_SCIFB(5, 0xe6cf0000, gic_spi(151)); /* SCIFB3 */ 68R8A73A4_SCIFB(5, 0xe6cf0000, gic_spi(151)); /* SCIFB3 */
69 69
70#define r8a73a4_register_scif(index) \ 70#define r8a73a4_register_scif(index) \
71 platform_device_register_resndata(&platform_bus, "sh-sci", index, \ 71 platform_device_register_resndata(NULL, "sh-sci", index, \
72 scif##index##_resources, \ 72 scif##index##_resources, \
73 ARRAY_SIZE(scif##index##_resources), \ 73 ARRAY_SIZE(scif##index##_resources), \
74 &scif##index##_platform_data, \ 74 &scif##index##_platform_data, \
@@ -149,7 +149,7 @@ static const struct resource irqc1_resources[] = {
149}; 149};
150 150
151#define r8a73a4_register_irqc(idx) \ 151#define r8a73a4_register_irqc(idx) \
152 platform_device_register_resndata(&platform_bus, "renesas_irqc", \ 152 platform_device_register_resndata(NULL, "renesas_irqc", \
153 idx, irqc##idx##_resources, \ 153 idx, irqc##idx##_resources, \
154 ARRAY_SIZE(irqc##idx##_resources), \ 154 ARRAY_SIZE(irqc##idx##_resources), \
155 &irqc##idx##_data, \ 155 &irqc##idx##_data, \
@@ -179,7 +179,7 @@ static struct resource cmt1_resources[] = {
179}; 179};
180 180
181#define r8a7790_register_cmt(idx) \ 181#define r8a7790_register_cmt(idx) \
182 platform_device_register_resndata(&platform_bus, "sh-cmt-48-gen2", \ 182 platform_device_register_resndata(NULL, "sh-cmt-48-gen2", \
183 idx, cmt##idx##_resources, \ 183 idx, cmt##idx##_resources, \
184 ARRAY_SIZE(cmt##idx##_resources), \ 184 ARRAY_SIZE(cmt##idx##_resources), \
185 &cmt##idx##_platform_data, \ 185 &cmt##idx##_platform_data, \
@@ -280,7 +280,7 @@ static struct resource dma_resources[] = {
280}; 280};
281 281
282#define r8a73a4_register_dmac() \ 282#define r8a73a4_register_dmac() \
283 platform_device_register_resndata(&platform_bus, "sh-dma-engine", 0, \ 283 platform_device_register_resndata(NULL, "sh-dma-engine", 0, \
284 dma_resources, ARRAY_SIZE(dma_resources), \ 284 dma_resources, ARRAY_SIZE(dma_resources), \
285 &dma_pdata, sizeof(dma_pdata)) 285 &dma_pdata, sizeof(dma_pdata))
286 286
diff --git a/arch/arm/mach-shmobile/setup-r8a7778.c b/arch/arm/mach-shmobile/setup-r8a7778.c
index d311ef903b39..5de7b33295d4 100644
--- a/arch/arm/mach-shmobile/setup-r8a7778.c
+++ b/arch/arm/mach-shmobile/setup-r8a7778.c
@@ -64,7 +64,7 @@ R8A7778_SCIF(4, 0xffe44000, gic_iid(0x6a));
64R8A7778_SCIF(5, 0xffe45000, gic_iid(0x6b)); 64R8A7778_SCIF(5, 0xffe45000, gic_iid(0x6b));
65 65
66#define r8a7778_register_scif(index) \ 66#define r8a7778_register_scif(index) \
67 platform_device_register_resndata(&platform_bus, "sh-sci", index, \ 67 platform_device_register_resndata(NULL, "sh-sci", index, \
68 scif##index##_resources, \ 68 scif##index##_resources, \
69 ARRAY_SIZE(scif##index##_resources), \ 69 ARRAY_SIZE(scif##index##_resources), \
70 &scif##index##_platform_data, \ 70 &scif##index##_platform_data, \
@@ -84,7 +84,7 @@ static struct resource sh_tmu0_resources[] = {
84 84
85#define r8a7778_register_tmu(idx) \ 85#define r8a7778_register_tmu(idx) \
86 platform_device_register_resndata( \ 86 platform_device_register_resndata( \
87 &platform_bus, "sh-tmu", idx, \ 87 NULL, "sh-tmu", idx, \
88 sh_tmu##idx##_resources, \ 88 sh_tmu##idx##_resources, \
89 ARRAY_SIZE(sh_tmu##idx##_resources), \ 89 ARRAY_SIZE(sh_tmu##idx##_resources), \
90 &sh_tmu##idx##_platform_data, \ 90 &sh_tmu##idx##_platform_data, \
@@ -173,7 +173,6 @@ static struct resource ohci_resources[] __initdata = {
173 173
174#define USB_PLATFORM_INFO(hci) \ 174#define USB_PLATFORM_INFO(hci) \
175static struct platform_device_info hci##_info __initdata = { \ 175static struct platform_device_info hci##_info __initdata = { \
176 .parent = &platform_bus, \
177 .name = #hci "-platform", \ 176 .name = #hci "-platform", \
178 .id = -1, \ 177 .id = -1, \
179 .res = hci##_resources, \ 178 .res = hci##_resources, \
@@ -212,7 +211,7 @@ R8A7778_GPIO(4);
212 211
213#define r8a7778_register_gpio(idx) \ 212#define r8a7778_register_gpio(idx) \
214 platform_device_register_resndata( \ 213 platform_device_register_resndata( \
215 &platform_bus, "gpio_rcar", idx, \ 214 NULL, "gpio_rcar", idx, \
216 r8a7778_gpio##idx##_resources, \ 215 r8a7778_gpio##idx##_resources, \
217 ARRAY_SIZE(r8a7778_gpio##idx##_resources), \ 216 ARRAY_SIZE(r8a7778_gpio##idx##_resources), \
218 &r8a7778_gpio##idx##_platform_data, \ 217 &r8a7778_gpio##idx##_platform_data, \
@@ -496,8 +495,8 @@ static struct resource hpb_dmae_resources[] __initdata = {
496 495
497static void __init r8a7778_register_hpb_dmae(void) 496static void __init r8a7778_register_hpb_dmae(void)
498{ 497{
499 platform_device_register_resndata(&platform_bus, "hpb-dma-engine", -1, 498 platform_device_register_resndata(NULL, "hpb-dma-engine",
500 hpb_dmae_resources, 499 -1, hpb_dmae_resources,
501 ARRAY_SIZE(hpb_dmae_resources), 500 ARRAY_SIZE(hpb_dmae_resources),
502 &dma_platform_data, 501 &dma_platform_data,
503 sizeof(dma_platform_data)); 502 sizeof(dma_platform_data));
@@ -565,7 +564,7 @@ void __init r8a7778_init_irq_extpin(int irlm)
565 r8a7778_init_irq_extpin_dt(irlm); 564 r8a7778_init_irq_extpin_dt(irlm);
566 if (irlm) 565 if (irlm)
567 platform_device_register_resndata( 566 platform_device_register_resndata(
568 &platform_bus, "renesas_intc_irqpin", -1, 567 NULL, "renesas_intc_irqpin", -1,
569 irqpin_resources, ARRAY_SIZE(irqpin_resources), 568 irqpin_resources, ARRAY_SIZE(irqpin_resources),
570 &irqpin_platform_data, sizeof(irqpin_platform_data)); 569 &irqpin_platform_data, sizeof(irqpin_platform_data));
571} 570}
diff --git a/arch/arm/mach-shmobile/setup-r8a7779.c b/arch/arm/mach-shmobile/setup-r8a7779.c
index aba4ed652d54..9c79182d0568 100644
--- a/arch/arm/mach-shmobile/setup-r8a7779.c
+++ b/arch/arm/mach-shmobile/setup-r8a7779.c
@@ -123,7 +123,7 @@ void __init r8a7779_init_irq_extpin(int irlm)
123 r8a7779_init_irq_extpin_dt(irlm); 123 r8a7779_init_irq_extpin_dt(irlm);
124 if (irlm) 124 if (irlm)
125 platform_device_register_resndata( 125 platform_device_register_resndata(
126 &platform_bus, "renesas_intc_irqpin", -1, 126 NULL, "renesas_intc_irqpin", -1,
127 irqpin0_resources, ARRAY_SIZE(irqpin0_resources), 127 irqpin0_resources, ARRAY_SIZE(irqpin0_resources),
128 &irqpin0_platform_data, sizeof(irqpin0_platform_data)); 128 &irqpin0_platform_data, sizeof(irqpin0_platform_data));
129} 129}
@@ -632,8 +632,8 @@ static struct resource hpb_dmae_resources[] __initdata = {
632 632
633static void __init r8a7779_register_hpb_dmae(void) 633static void __init r8a7779_register_hpb_dmae(void)
634{ 634{
635 platform_device_register_resndata(&platform_bus, "hpb-dma-engine", -1, 635 platform_device_register_resndata(NULL, "hpb-dma-engine",
636 hpb_dmae_resources, 636 -1, hpb_dmae_resources,
637 ARRAY_SIZE(hpb_dmae_resources), 637 ARRAY_SIZE(hpb_dmae_resources),
638 &dma_platform_data, 638 &dma_platform_data,
639 sizeof(dma_platform_data)); 639 sizeof(dma_platform_data));
diff --git a/arch/arm/mach-shmobile/setup-r8a7790.c b/arch/arm/mach-shmobile/setup-r8a7790.c
index 6bd08b127fa4..10e6768968f7 100644
--- a/arch/arm/mach-shmobile/setup-r8a7790.c
+++ b/arch/arm/mach-shmobile/setup-r8a7790.c
@@ -113,7 +113,7 @@ static struct resource r8a7790_audio_dmac_resources[] = {
113 113
114#define r8a7790_register_audio_dmac(id) \ 114#define r8a7790_register_audio_dmac(id) \
115 platform_device_register_resndata( \ 115 platform_device_register_resndata( \
116 &platform_bus, "sh-dma-engine", id, \ 116 NULL, "sh-dma-engine", id, \
117 &r8a7790_audio_dmac_resources[id * 3], 3, \ 117 &r8a7790_audio_dmac_resources[id * 3], 3, \
118 &r8a7790_audio_dmac_platform_data, \ 118 &r8a7790_audio_dmac_platform_data, \
119 sizeof(r8a7790_audio_dmac_platform_data)) 119 sizeof(r8a7790_audio_dmac_platform_data))
@@ -149,7 +149,7 @@ R8A7790_GPIO(4);
149R8A7790_GPIO(5); 149R8A7790_GPIO(5);
150 150
151#define r8a7790_register_gpio(idx) \ 151#define r8a7790_register_gpio(idx) \
152 platform_device_register_resndata(&platform_bus, "gpio_rcar", idx, \ 152 platform_device_register_resndata(NULL, "gpio_rcar", idx, \
153 r8a7790_gpio##idx##_resources, \ 153 r8a7790_gpio##idx##_resources, \
154 ARRAY_SIZE(r8a7790_gpio##idx##_resources), \ 154 ARRAY_SIZE(r8a7790_gpio##idx##_resources), \
155 &r8a7790_gpio##idx##_platform_data, \ 155 &r8a7790_gpio##idx##_platform_data, \
@@ -227,7 +227,7 @@ R8A7790_HSCIF(8, 0xe62c0000, gic_spi(154)); /* HSCIF0 */
227R8A7790_HSCIF(9, 0xe62c8000, gic_spi(155)); /* HSCIF1 */ 227R8A7790_HSCIF(9, 0xe62c8000, gic_spi(155)); /* HSCIF1 */
228 228
229#define r8a7790_register_scif(index) \ 229#define r8a7790_register_scif(index) \
230 platform_device_register_resndata(&platform_bus, "sh-sci", index, \ 230 platform_device_register_resndata(NULL, "sh-sci", index, \
231 scif##index##_resources, \ 231 scif##index##_resources, \
232 ARRAY_SIZE(scif##index##_resources), \ 232 ARRAY_SIZE(scif##index##_resources), \
233 &scif##index##_platform_data, \ 233 &scif##index##_platform_data, \
@@ -246,7 +246,7 @@ static const struct resource irqc0_resources[] __initconst = {
246}; 246};
247 247
248#define r8a7790_register_irqc(idx) \ 248#define r8a7790_register_irqc(idx) \
249 platform_device_register_resndata(&platform_bus, "renesas_irqc", \ 249 platform_device_register_resndata(NULL, "renesas_irqc", \
250 idx, irqc##idx##_resources, \ 250 idx, irqc##idx##_resources, \
251 ARRAY_SIZE(irqc##idx##_resources), \ 251 ARRAY_SIZE(irqc##idx##_resources), \
252 &irqc##idx##_data, \ 252 &irqc##idx##_data, \
@@ -273,7 +273,7 @@ static struct resource cmt0_resources[] = {
273}; 273};
274 274
275#define r8a7790_register_cmt(idx) \ 275#define r8a7790_register_cmt(idx) \
276 platform_device_register_resndata(&platform_bus, "sh-cmt-48-gen2", \ 276 platform_device_register_resndata(NULL, "sh-cmt-48-gen2", \
277 idx, cmt##idx##_resources, \ 277 idx, cmt##idx##_resources, \
278 ARRAY_SIZE(cmt##idx##_resources), \ 278 ARRAY_SIZE(cmt##idx##_resources), \
279 &cmt##idx##_platform_data, \ 279 &cmt##idx##_platform_data, \
diff --git a/arch/arm/mach-shmobile/setup-r8a7791.c b/arch/arm/mach-shmobile/setup-r8a7791.c
index 04a96ddb3224..fd5443715b8d 100644
--- a/arch/arm/mach-shmobile/setup-r8a7791.c
+++ b/arch/arm/mach-shmobile/setup-r8a7791.c
@@ -65,7 +65,7 @@ R8A7791_GPIO(6, 0xe6055400, 32);
65R8A7791_GPIO(7, 0xe6055800, 26); 65R8A7791_GPIO(7, 0xe6055800, 26);
66 66
67#define r8a7791_register_gpio(idx) \ 67#define r8a7791_register_gpio(idx) \
68 platform_device_register_resndata(&platform_bus, "gpio_rcar", idx, \ 68 platform_device_register_resndata(NULL, "gpio_rcar", idx, \
69 r8a7791_gpio##idx##_resources, \ 69 r8a7791_gpio##idx##_resources, \
70 ARRAY_SIZE(r8a7791_gpio##idx##_resources), \ 70 ARRAY_SIZE(r8a7791_gpio##idx##_resources), \
71 &r8a7791_gpio##idx##_platform_data, \ 71 &r8a7791_gpio##idx##_platform_data, \
@@ -122,7 +122,7 @@ R8A7791_SCIFA(13, 0xe6c78000, gic_spi(30)); /* SCIFA4 */
122R8A7791_SCIFA(14, 0xe6c80000, gic_spi(31)); /* SCIFA5 */ 122R8A7791_SCIFA(14, 0xe6c80000, gic_spi(31)); /* SCIFA5 */
123 123
124#define r8a7791_register_scif(index) \ 124#define r8a7791_register_scif(index) \
125 platform_device_register_resndata(&platform_bus, "sh-sci", index, \ 125 platform_device_register_resndata(NULL, "sh-sci", index, \
126 scif##index##_resources, \ 126 scif##index##_resources, \
127 ARRAY_SIZE(scif##index##_resources), \ 127 ARRAY_SIZE(scif##index##_resources), \
128 &scif##index##_platform_data, \ 128 &scif##index##_platform_data, \
@@ -138,7 +138,7 @@ static struct resource cmt0_resources[] = {
138}; 138};
139 139
140#define r8a7791_register_cmt(idx) \ 140#define r8a7791_register_cmt(idx) \
141 platform_device_register_resndata(&platform_bus, "sh-cmt-48-gen2", \ 141 platform_device_register_resndata(NULL, "sh-cmt-48-gen2", \
142 idx, cmt##idx##_resources, \ 142 idx, cmt##idx##_resources, \
143 ARRAY_SIZE(cmt##idx##_resources), \ 143 ARRAY_SIZE(cmt##idx##_resources), \
144 &cmt##idx##_platform_data, \ 144 &cmt##idx##_platform_data, \
@@ -163,7 +163,7 @@ static struct resource irqc0_resources[] = {
163}; 163};
164 164
165#define r8a7791_register_irqc(idx) \ 165#define r8a7791_register_irqc(idx) \
166 platform_device_register_resndata(&platform_bus, "renesas_irqc", \ 166 platform_device_register_resndata(NULL, "renesas_irqc", \
167 idx, irqc##idx##_resources, \ 167 idx, irqc##idx##_resources, \
168 ARRAY_SIZE(irqc##idx##_resources), \ 168 ARRAY_SIZE(irqc##idx##_resources), \
169 &irqc##idx##_data, \ 169 &irqc##idx##_data, \
diff --git a/arch/unicore32/kernel/puv3-core.c b/arch/unicore32/kernel/puv3-core.c
index 254adeecc61a..438dd2edba4f 100644
--- a/arch/unicore32/kernel/puv3-core.c
+++ b/arch/unicore32/kernel/puv3-core.c
@@ -272,7 +272,7 @@ void __init puv3_core_init(void)
272 platform_device_register_simple("PKUnity-v3-UART", 1, 272 platform_device_register_simple("PKUnity-v3-UART", 1,
273 puv3_uart1_resources, ARRAY_SIZE(puv3_uart1_resources)); 273 puv3_uart1_resources, ARRAY_SIZE(puv3_uart1_resources));
274 platform_device_register_simple("PKUnity-v3-AC97", -1, NULL, 0); 274 platform_device_register_simple("PKUnity-v3-AC97", -1, NULL, 0);
275 platform_device_register_resndata(&platform_bus, "musb_hdrc", -1, 275 platform_device_register_resndata(NULL, "musb_hdrc", -1,
276 puv3_usb_resources, ARRAY_SIZE(puv3_usb_resources), 276 puv3_usb_resources, ARRAY_SIZE(puv3_usb_resources),
277 &puv3_usb_plat, sizeof(puv3_usb_plat)); 277 &puv3_usb_plat, sizeof(puv3_usb_plat));
278} 278}
diff --git a/arch/unicore32/kernel/puv3-nb0916.c b/arch/unicore32/kernel/puv3-nb0916.c
index 0c6618e71897..46ebfdccbc31 100644
--- a/arch/unicore32/kernel/puv3-nb0916.c
+++ b/arch/unicore32/kernel/puv3-nb0916.c
@@ -112,13 +112,13 @@ int __init mach_nb0916_init(void)
112 platform_device_register_simple("PKUnity-v3-I2C", -1, 112 platform_device_register_simple("PKUnity-v3-I2C", -1,
113 puv3_i2c_resources, ARRAY_SIZE(puv3_i2c_resources)); 113 puv3_i2c_resources, ARRAY_SIZE(puv3_i2c_resources));
114 114
115 platform_device_register_data(&platform_bus, "pwm-backlight", -1, 115 platform_device_register_data(NULL, "pwm-backlight", -1,
116 &nb0916_backlight_data, sizeof(nb0916_backlight_data)); 116 &nb0916_backlight_data, sizeof(nb0916_backlight_data));
117 117
118 platform_device_register_data(&platform_bus, "gpio-keys", -1, 118 platform_device_register_data(NULL, "gpio-keys", -1,
119 &nb0916_gpio_button_data, sizeof(nb0916_gpio_button_data)); 119 &nb0916_gpio_button_data, sizeof(nb0916_gpio_button_data));
120 120
121 platform_device_register_resndata(&platform_bus, "physmap-flash", -1, 121 platform_device_register_resndata(NULL, "physmap-flash", -1,
122 &physmap_flash_resource, 1, 122 &physmap_flash_resource, 1,
123 &physmap_flash_data, sizeof(physmap_flash_data)); 123 &physmap_flash_data, sizeof(physmap_flash_data));
124 124
diff --git a/drivers/Makefile b/drivers/Makefile
index 54bfae1f09a4..ebee55537a05 100644
--- a/drivers/Makefile
+++ b/drivers/Makefile
@@ -61,6 +61,7 @@ obj-$(CONFIG_FB_INTEL) += video/fbdev/intelfb/
61 61
62obj-$(CONFIG_PARPORT) += parport/ 62obj-$(CONFIG_PARPORT) += parport/
63obj-y += base/ block/ misc/ mfd/ nfc/ 63obj-y += base/ block/ misc/ mfd/ nfc/
64obj-$(CONFIG_DMA_SHARED_BUFFER) += dma-buf/
64obj-$(CONFIG_NUBUS) += nubus/ 65obj-$(CONFIG_NUBUS) += nubus/
65obj-y += macintosh/ 66obj-y += macintosh/
66obj-$(CONFIG_IDE) += ide/ 67obj-$(CONFIG_IDE) += ide/
diff --git a/drivers/base/Kconfig b/drivers/base/Kconfig
index 23b8726962af..88500fed3c7a 100644
--- a/drivers/base/Kconfig
+++ b/drivers/base/Kconfig
@@ -149,15 +149,21 @@ config EXTRA_FIRMWARE_DIR
149 some other directory containing the firmware files. 149 some other directory containing the firmware files.
150 150
151config FW_LOADER_USER_HELPER 151config FW_LOADER_USER_HELPER
152 bool
153
154config FW_LOADER_USER_HELPER_FALLBACK
152 bool "Fallback user-helper invocation for firmware loading" 155 bool "Fallback user-helper invocation for firmware loading"
153 depends on FW_LOADER 156 depends on FW_LOADER
154 default y 157 select FW_LOADER_USER_HELPER
155 help 158 help
156 This option enables / disables the invocation of user-helper 159 This option enables / disables the invocation of user-helper
157 (e.g. udev) for loading firmware files as a fallback after the 160 (e.g. udev) for loading firmware files as a fallback after the
158 direct file loading in kernel fails. The user-mode helper is 161 direct file loading in kernel fails. The user-mode helper is
159 no longer required unless you have a special firmware file that 162 no longer required unless you have a special firmware file that
160 resides in a non-standard path. 163 resides in a non-standard path. Moreover, the udev support has
164 been deprecated upstream.
165
166 If you are unsure about this, say N here.
161 167
162config DEBUG_DRIVER 168config DEBUG_DRIVER
163 bool "Driver Core verbose debug messages" 169 bool "Driver Core verbose debug messages"
@@ -208,6 +214,15 @@ config DMA_SHARED_BUFFER
208 APIs extension; the file's descriptor can then be passed on to other 214 APIs extension; the file's descriptor can then be passed on to other
209 driver. 215 driver.
210 216
217config FENCE_TRACE
218 bool "Enable verbose FENCE_TRACE messages"
219 depends on DMA_SHARED_BUFFER
220 help
221 Enable the FENCE_TRACE printks. This will add extra
222 spam to the console log, but will make it easier to diagnose
223 lockup related problems for dma-buffers shared across multiple
224 devices.
225
211config DMA_CMA 226config DMA_CMA
212 bool "DMA Contiguous Memory Allocator" 227 bool "DMA Contiguous Memory Allocator"
213 depends on HAVE_DMA_CONTIGUOUS && CMA 228 depends on HAVE_DMA_CONTIGUOUS && CMA
diff --git a/drivers/base/Makefile b/drivers/base/Makefile
index 04b314e0fa51..4aab26ec0292 100644
--- a/drivers/base/Makefile
+++ b/drivers/base/Makefile
@@ -10,7 +10,6 @@ obj-$(CONFIG_DMA_CMA) += dma-contiguous.o
10obj-y += power/ 10obj-y += power/
11obj-$(CONFIG_HAS_DMA) += dma-mapping.o 11obj-$(CONFIG_HAS_DMA) += dma-mapping.o
12obj-$(CONFIG_HAVE_GENERIC_DMA_COHERENT) += dma-coherent.o 12obj-$(CONFIG_HAVE_GENERIC_DMA_COHERENT) += dma-coherent.o
13obj-$(CONFIG_DMA_SHARED_BUFFER) += dma-buf.o reservation.o
14obj-$(CONFIG_ISA) += isa.o 13obj-$(CONFIG_ISA) += isa.o
15obj-$(CONFIG_FW_LOADER) += firmware_class.o 14obj-$(CONFIG_FW_LOADER) += firmware_class.o
16obj-$(CONFIG_NUMA) += node.o 15obj-$(CONFIG_NUMA) += node.o
diff --git a/drivers/base/component.c b/drivers/base/component.c
index c4778995cd72..f748430bb654 100644
--- a/drivers/base/component.c
+++ b/drivers/base/component.c
@@ -18,6 +18,15 @@
18#include <linux/mutex.h> 18#include <linux/mutex.h>
19#include <linux/slab.h> 19#include <linux/slab.h>
20 20
21struct component_match {
22 size_t alloc;
23 size_t num;
24 struct {
25 void *data;
26 int (*fn)(struct device *, void *);
27 } compare[0];
28};
29
21struct master { 30struct master {
22 struct list_head node; 31 struct list_head node;
23 struct list_head components; 32 struct list_head components;
@@ -25,6 +34,7 @@ struct master {
25 34
26 const struct component_master_ops *ops; 35 const struct component_master_ops *ops;
27 struct device *dev; 36 struct device *dev;
37 struct component_match *match;
28}; 38};
29 39
30struct component { 40struct component {
@@ -69,6 +79,11 @@ static void component_detach_master(struct master *master, struct component *c)
69 c->master = NULL; 79 c->master = NULL;
70} 80}
71 81
82/*
83 * Add a component to a master, finding the component via the compare
84 * function and compare data. This is safe to call for duplicate matches
85 * and will not result in the same component being added multiple times.
86 */
72int component_master_add_child(struct master *master, 87int component_master_add_child(struct master *master,
73 int (*compare)(struct device *, void *), void *compare_data) 88 int (*compare)(struct device *, void *), void *compare_data)
74{ 89{
@@ -76,11 +91,12 @@ int component_master_add_child(struct master *master,
76 int ret = -ENXIO; 91 int ret = -ENXIO;
77 92
78 list_for_each_entry(c, &component_list, node) { 93 list_for_each_entry(c, &component_list, node) {
79 if (c->master) 94 if (c->master && c->master != master)
80 continue; 95 continue;
81 96
82 if (compare(c->dev, compare_data)) { 97 if (compare(c->dev, compare_data)) {
83 component_attach_master(master, c); 98 if (!c->master)
99 component_attach_master(master, c);
84 ret = 0; 100 ret = 0;
85 break; 101 break;
86 } 102 }
@@ -90,6 +106,34 @@ int component_master_add_child(struct master *master,
90} 106}
91EXPORT_SYMBOL_GPL(component_master_add_child); 107EXPORT_SYMBOL_GPL(component_master_add_child);
92 108
109static int find_components(struct master *master)
110{
111 struct component_match *match = master->match;
112 size_t i;
113 int ret = 0;
114
115 if (!match) {
116 /*
117 * Search the list of components, looking for components that
118 * belong to this master, and attach them to the master.
119 */
120 return master->ops->add_components(master->dev, master);
121 }
122
123 /*
124 * Scan the array of match functions and attach
125 * any components which are found to this master.
126 */
127 for (i = 0; i < match->num; i++) {
128 ret = component_master_add_child(master,
129 match->compare[i].fn,
130 match->compare[i].data);
131 if (ret)
132 break;
133 }
134 return ret;
135}
136
93/* Detach all attached components from this master */ 137/* Detach all attached components from this master */
94static void master_remove_components(struct master *master) 138static void master_remove_components(struct master *master)
95{ 139{
@@ -113,44 +157,44 @@ static void master_remove_components(struct master *master)
113static int try_to_bring_up_master(struct master *master, 157static int try_to_bring_up_master(struct master *master,
114 struct component *component) 158 struct component *component)
115{ 159{
116 int ret = 0; 160 int ret;
117 161
118 if (!master->bound) { 162 if (master->bound)
119 /* 163 return 0;
120 * Search the list of components, looking for components that
121 * belong to this master, and attach them to the master.
122 */
123 if (master->ops->add_components(master->dev, master)) {
124 /* Failed to find all components */
125 master_remove_components(master);
126 ret = 0;
127 goto out;
128 }
129 164
130 if (component && component->master != master) { 165 /*
131 master_remove_components(master); 166 * Search the list of components, looking for components that
132 ret = 0; 167 * belong to this master, and attach them to the master.
133 goto out; 168 */
134 } 169 if (find_components(master)) {
170 /* Failed to find all components */
171 ret = 0;
172 goto out;
173 }
135 174
136 if (!devres_open_group(master->dev, NULL, GFP_KERNEL)) { 175 if (component && component->master != master) {
137 ret = -ENOMEM; 176 ret = 0;
138 goto out; 177 goto out;
139 } 178 }
140 179
141 /* Found all components */ 180 if (!devres_open_group(master->dev, NULL, GFP_KERNEL)) {
142 ret = master->ops->bind(master->dev); 181 ret = -ENOMEM;
143 if (ret < 0) { 182 goto out;
144 devres_release_group(master->dev, NULL); 183 }
145 dev_info(master->dev, "master bind failed: %d\n", ret);
146 master_remove_components(master);
147 goto out;
148 }
149 184
150 master->bound = true; 185 /* Found all components */
151 ret = 1; 186 ret = master->ops->bind(master->dev);
187 if (ret < 0) {
188 devres_release_group(master->dev, NULL);
189 dev_info(master->dev, "master bind failed: %d\n", ret);
190 goto out;
152 } 191 }
192
193 master->bound = true;
194 return 1;
195
153out: 196out:
197 master_remove_components(master);
154 198
155 return ret; 199 return ret;
156} 200}
@@ -180,18 +224,89 @@ static void take_down_master(struct master *master)
180 master_remove_components(master); 224 master_remove_components(master);
181} 225}
182 226
183int component_master_add(struct device *dev, 227static size_t component_match_size(size_t num)
184 const struct component_master_ops *ops) 228{
229 return offsetof(struct component_match, compare[num]);
230}
231
232static struct component_match *component_match_realloc(struct device *dev,
233 struct component_match *match, size_t num)
234{
235 struct component_match *new;
236
237 if (match && match->alloc == num)
238 return match;
239
240 new = devm_kmalloc(dev, component_match_size(num), GFP_KERNEL);
241 if (!new)
242 return ERR_PTR(-ENOMEM);
243
244 if (match) {
245 memcpy(new, match, component_match_size(min(match->num, num)));
246 devm_kfree(dev, match);
247 } else {
248 new->num = 0;
249 }
250
251 new->alloc = num;
252
253 return new;
254}
255
256/*
257 * Add a component to be matched.
258 *
259 * The match array is first created or extended if necessary.
260 */
261void component_match_add(struct device *dev, struct component_match **matchptr,
262 int (*compare)(struct device *, void *), void *compare_data)
263{
264 struct component_match *match = *matchptr;
265
266 if (IS_ERR(match))
267 return;
268
269 if (!match || match->num == match->alloc) {
270 size_t new_size = match ? match->alloc + 16 : 15;
271
272 match = component_match_realloc(dev, match, new_size);
273
274 *matchptr = match;
275
276 if (IS_ERR(match))
277 return;
278 }
279
280 match->compare[match->num].fn = compare;
281 match->compare[match->num].data = compare_data;
282 match->num++;
283}
284EXPORT_SYMBOL(component_match_add);
285
286int component_master_add_with_match(struct device *dev,
287 const struct component_master_ops *ops,
288 struct component_match *match)
185{ 289{
186 struct master *master; 290 struct master *master;
187 int ret; 291 int ret;
188 292
293 if (ops->add_components && match)
294 return -EINVAL;
295
296 if (match) {
297 /* Reallocate the match array for its true size */
298 match = component_match_realloc(dev, match, match->num);
299 if (IS_ERR(match))
300 return PTR_ERR(match);
301 }
302
189 master = kzalloc(sizeof(*master), GFP_KERNEL); 303 master = kzalloc(sizeof(*master), GFP_KERNEL);
190 if (!master) 304 if (!master)
191 return -ENOMEM; 305 return -ENOMEM;
192 306
193 master->dev = dev; 307 master->dev = dev;
194 master->ops = ops; 308 master->ops = ops;
309 master->match = match;
195 INIT_LIST_HEAD(&master->components); 310 INIT_LIST_HEAD(&master->components);
196 311
197 /* Add to the list of available masters. */ 312 /* Add to the list of available masters. */
@@ -209,6 +324,13 @@ int component_master_add(struct device *dev,
209 324
210 return ret < 0 ? ret : 0; 325 return ret < 0 ? ret : 0;
211} 326}
327EXPORT_SYMBOL_GPL(component_master_add_with_match);
328
329int component_master_add(struct device *dev,
330 const struct component_master_ops *ops)
331{
332 return component_master_add_with_match(dev, ops, NULL);
333}
212EXPORT_SYMBOL_GPL(component_master_add); 334EXPORT_SYMBOL_GPL(component_master_add);
213 335
214void component_master_del(struct device *dev, 336void component_master_del(struct device *dev,
diff --git a/drivers/base/firmware_class.c b/drivers/base/firmware_class.c
index d276e33880be..da77791793f1 100644
--- a/drivers/base/firmware_class.c
+++ b/drivers/base/firmware_class.c
@@ -100,10 +100,16 @@ static inline long firmware_loading_timeout(void)
100#define FW_OPT_UEVENT (1U << 0) 100#define FW_OPT_UEVENT (1U << 0)
101#define FW_OPT_NOWAIT (1U << 1) 101#define FW_OPT_NOWAIT (1U << 1)
102#ifdef CONFIG_FW_LOADER_USER_HELPER 102#ifdef CONFIG_FW_LOADER_USER_HELPER
103#define FW_OPT_FALLBACK (1U << 2) 103#define FW_OPT_USERHELPER (1U << 2)
104#else 104#else
105#define FW_OPT_FALLBACK 0 105#define FW_OPT_USERHELPER 0
106#endif 106#endif
107#ifdef CONFIG_FW_LOADER_USER_HELPER_FALLBACK
108#define FW_OPT_FALLBACK FW_OPT_USERHELPER
109#else
110#define FW_OPT_FALLBACK 0
111#endif
112#define FW_OPT_NO_WARN (1U << 3)
107 113
108struct firmware_cache { 114struct firmware_cache {
109 /* firmware_buf instance will be added into the below list */ 115 /* firmware_buf instance will be added into the below list */
@@ -279,26 +285,15 @@ static const char * const fw_path[] = {
279module_param_string(path, fw_path_para, sizeof(fw_path_para), 0644); 285module_param_string(path, fw_path_para, sizeof(fw_path_para), 0644);
280MODULE_PARM_DESC(path, "customized firmware image search path with a higher priority than default path"); 286MODULE_PARM_DESC(path, "customized firmware image search path with a higher priority than default path");
281 287
282/* Don't inline this: 'struct kstat' is biggish */
283static noinline_for_stack int fw_file_size(struct file *file)
284{
285 struct kstat st;
286 if (vfs_getattr(&file->f_path, &st))
287 return -1;
288 if (!S_ISREG(st.mode))
289 return -1;
290 if (st.size != (int)st.size)
291 return -1;
292 return st.size;
293}
294
295static int fw_read_file_contents(struct file *file, struct firmware_buf *fw_buf) 288static int fw_read_file_contents(struct file *file, struct firmware_buf *fw_buf)
296{ 289{
297 int size; 290 int size;
298 char *buf; 291 char *buf;
299 int rc; 292 int rc;
300 293
301 size = fw_file_size(file); 294 if (!S_ISREG(file_inode(file)->i_mode))
295 return -EINVAL;
296 size = i_size_read(file_inode(file));
302 if (size <= 0) 297 if (size <= 0)
303 return -EINVAL; 298 return -EINVAL;
304 buf = vmalloc(size); 299 buf = vmalloc(size);
@@ -718,7 +713,7 @@ out:
718static int fw_realloc_buffer(struct firmware_priv *fw_priv, int min_size) 713static int fw_realloc_buffer(struct firmware_priv *fw_priv, int min_size)
719{ 714{
720 struct firmware_buf *buf = fw_priv->buf; 715 struct firmware_buf *buf = fw_priv->buf;
721 int pages_needed = ALIGN(min_size, PAGE_SIZE) >> PAGE_SHIFT; 716 int pages_needed = PAGE_ALIGN(min_size) >> PAGE_SHIFT;
722 717
723 /* If the array of pages is too small, grow it... */ 718 /* If the array of pages is too small, grow it... */
724 if (buf->page_array_size < pages_needed) { 719 if (buf->page_array_size < pages_needed) {
@@ -911,7 +906,9 @@ static int _request_firmware_load(struct firmware_priv *fw_priv,
911 wait_for_completion(&buf->completion); 906 wait_for_completion(&buf->completion);
912 907
913 cancel_delayed_work_sync(&fw_priv->timeout_work); 908 cancel_delayed_work_sync(&fw_priv->timeout_work);
914 if (!buf->data) 909 if (is_fw_load_aborted(buf))
910 retval = -EAGAIN;
911 else if (!buf->data)
915 retval = -ENOMEM; 912 retval = -ENOMEM;
916 913
917 device_remove_file(f_dev, &dev_attr_loading); 914 device_remove_file(f_dev, &dev_attr_loading);
@@ -1111,10 +1108,11 @@ _request_firmware(const struct firmware **firmware_p, const char *name,
1111 1108
1112 ret = fw_get_filesystem_firmware(device, fw->priv); 1109 ret = fw_get_filesystem_firmware(device, fw->priv);
1113 if (ret) { 1110 if (ret) {
1114 if (opt_flags & FW_OPT_FALLBACK) { 1111 if (!(opt_flags & FW_OPT_NO_WARN))
1115 dev_warn(device, 1112 dev_warn(device,
1116 "Direct firmware load failed with error %d\n", 1113 "Direct firmware load for %s failed with error %d\n",
1117 ret); 1114 name, ret);
1115 if (opt_flags & FW_OPT_USERHELPER) {
1118 dev_warn(device, "Falling back to user helper\n"); 1116 dev_warn(device, "Falling back to user helper\n");
1119 ret = fw_load_from_user_helper(fw, name, device, 1117 ret = fw_load_from_user_helper(fw, name, device,
1120 opt_flags, timeout); 1118 opt_flags, timeout);
@@ -1171,7 +1169,6 @@ request_firmware(const struct firmware **firmware_p, const char *name,
1171} 1169}
1172EXPORT_SYMBOL(request_firmware); 1170EXPORT_SYMBOL(request_firmware);
1173 1171
1174#ifdef CONFIG_FW_LOADER_USER_HELPER
1175/** 1172/**
1176 * request_firmware: - load firmware directly without usermode helper 1173 * request_firmware: - load firmware directly without usermode helper
1177 * @firmware_p: pointer to firmware image 1174 * @firmware_p: pointer to firmware image
@@ -1188,12 +1185,12 @@ int request_firmware_direct(const struct firmware **firmware_p,
1188{ 1185{
1189 int ret; 1186 int ret;
1190 __module_get(THIS_MODULE); 1187 __module_get(THIS_MODULE);
1191 ret = _request_firmware(firmware_p, name, device, FW_OPT_UEVENT); 1188 ret = _request_firmware(firmware_p, name, device,
1189 FW_OPT_UEVENT | FW_OPT_NO_WARN);
1192 module_put(THIS_MODULE); 1190 module_put(THIS_MODULE);
1193 return ret; 1191 return ret;
1194} 1192}
1195EXPORT_SYMBOL_GPL(request_firmware_direct); 1193EXPORT_SYMBOL_GPL(request_firmware_direct);
1196#endif
1197 1194
1198/** 1195/**
1199 * release_firmware: - release the resource associated with a firmware image 1196 * release_firmware: - release the resource associated with a firmware image
@@ -1277,7 +1274,7 @@ request_firmware_nowait(
1277 fw_work->context = context; 1274 fw_work->context = context;
1278 fw_work->cont = cont; 1275 fw_work->cont = cont;
1279 fw_work->opt_flags = FW_OPT_NOWAIT | FW_OPT_FALLBACK | 1276 fw_work->opt_flags = FW_OPT_NOWAIT | FW_OPT_FALLBACK |
1280 (uevent ? FW_OPT_UEVENT : 0); 1277 (uevent ? FW_OPT_UEVENT : FW_OPT_USERHELPER);
1281 1278
1282 if (!try_module_get(module)) { 1279 if (!try_module_get(module)) {
1283 kfree(fw_work); 1280 kfree(fw_work);
diff --git a/drivers/base/platform.c b/drivers/base/platform.c
index 00f2208949d1..ab4f4ce02722 100644
--- a/drivers/base/platform.c
+++ b/drivers/base/platform.c
@@ -24,6 +24,7 @@
24#include <linux/idr.h> 24#include <linux/idr.h>
25#include <linux/acpi.h> 25#include <linux/acpi.h>
26#include <linux/clk/clk-conf.h> 26#include <linux/clk/clk-conf.h>
27#include <linux/limits.h>
27 28
28#include "base.h" 29#include "base.h"
29#include "power/power.h" 30#include "power/power.h"
@@ -176,7 +177,7 @@ EXPORT_SYMBOL_GPL(platform_add_devices);
176 177
177struct platform_object { 178struct platform_object {
178 struct platform_device pdev; 179 struct platform_device pdev;
179 char name[1]; 180 char name[];
180}; 181};
181 182
182/** 183/**
@@ -202,6 +203,7 @@ static void platform_device_release(struct device *dev)
202 kfree(pa->pdev.dev.platform_data); 203 kfree(pa->pdev.dev.platform_data);
203 kfree(pa->pdev.mfd_cell); 204 kfree(pa->pdev.mfd_cell);
204 kfree(pa->pdev.resource); 205 kfree(pa->pdev.resource);
206 kfree(pa->pdev.driver_override);
205 kfree(pa); 207 kfree(pa);
206} 208}
207 209
@@ -217,7 +219,7 @@ struct platform_device *platform_device_alloc(const char *name, int id)
217{ 219{
218 struct platform_object *pa; 220 struct platform_object *pa;
219 221
220 pa = kzalloc(sizeof(struct platform_object) + strlen(name), GFP_KERNEL); 222 pa = kzalloc(sizeof(*pa) + strlen(name) + 1, GFP_KERNEL);
221 if (pa) { 223 if (pa) {
222 strcpy(pa->name, name); 224 strcpy(pa->name, name);
223 pa->pdev.name = pa->name; 225 pa->pdev.name = pa->name;
@@ -713,8 +715,49 @@ static ssize_t modalias_show(struct device *dev, struct device_attribute *a,
713} 715}
714static DEVICE_ATTR_RO(modalias); 716static DEVICE_ATTR_RO(modalias);
715 717
718static ssize_t driver_override_store(struct device *dev,
719 struct device_attribute *attr,
720 const char *buf, size_t count)
721{
722 struct platform_device *pdev = to_platform_device(dev);
723 char *driver_override, *old = pdev->driver_override, *cp;
724
725 if (count > PATH_MAX)
726 return -EINVAL;
727
728 driver_override = kstrndup(buf, count, GFP_KERNEL);
729 if (!driver_override)
730 return -ENOMEM;
731
732 cp = strchr(driver_override, '\n');
733 if (cp)
734 *cp = '\0';
735
736 if (strlen(driver_override)) {
737 pdev->driver_override = driver_override;
738 } else {
739 kfree(driver_override);
740 pdev->driver_override = NULL;
741 }
742
743 kfree(old);
744
745 return count;
746}
747
748static ssize_t driver_override_show(struct device *dev,
749 struct device_attribute *attr, char *buf)
750{
751 struct platform_device *pdev = to_platform_device(dev);
752
753 return sprintf(buf, "%s\n", pdev->driver_override);
754}
755static DEVICE_ATTR_RW(driver_override);
756
757
716static struct attribute *platform_dev_attrs[] = { 758static struct attribute *platform_dev_attrs[] = {
717 &dev_attr_modalias.attr, 759 &dev_attr_modalias.attr,
760 &dev_attr_driver_override.attr,
718 NULL, 761 NULL,
719}; 762};
720ATTRIBUTE_GROUPS(platform_dev); 763ATTRIBUTE_GROUPS(platform_dev);
@@ -770,6 +813,10 @@ static int platform_match(struct device *dev, struct device_driver *drv)
770 struct platform_device *pdev = to_platform_device(dev); 813 struct platform_device *pdev = to_platform_device(dev);
771 struct platform_driver *pdrv = to_platform_driver(drv); 814 struct platform_driver *pdrv = to_platform_driver(drv);
772 815
816 /* When driver_override is set, only bind to the matching driver */
817 if (pdev->driver_override)
818 return !strcmp(pdev->driver_override, drv->name);
819
773 /* Attempt an OF style match first */ 820 /* Attempt an OF style match first */
774 if (of_driver_match_device(dev, drv)) 821 if (of_driver_match_device(dev, drv))
775 return 1; 822 return 1;
diff --git a/drivers/base/reservation.c b/drivers/base/reservation.c
deleted file mode 100644
index a73fbf3b8e56..000000000000
--- a/drivers/base/reservation.c
+++ /dev/null
@@ -1,39 +0,0 @@
1/*
2 * Copyright (C) 2012-2013 Canonical Ltd
3 *
4 * Based on bo.c which bears the following copyright notice,
5 * but is dual licensed:
6 *
7 * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
8 * All Rights Reserved.
9 *
10 * Permission is hereby granted, free of charge, to any person obtaining a
11 * copy of this software and associated documentation files (the
12 * "Software"), to deal in the Software without restriction, including
13 * without limitation the rights to use, copy, modify, merge, publish,
14 * distribute, sub license, and/or sell copies of the Software, and to
15 * permit persons to whom the Software is furnished to do so, subject to
16 * the following conditions:
17 *
18 * The above copyright notice and this permission notice (including the
19 * next paragraph) shall be included in all copies or substantial portions
20 * of the Software.
21 *
22 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
23 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
24 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
25 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
26 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
27 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
28 * USE OR OTHER DEALINGS IN THE SOFTWARE.
29 *
30 **************************************************************************/
31/*
32 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
33 */
34
35#include <linux/reservation.h>
36#include <linux/export.h>
37
38DEFINE_WW_CLASS(reservation_ww_class);
39EXPORT_SYMBOL(reservation_ww_class);
diff --git a/drivers/bus/brcmstb_gisb.c b/drivers/bus/brcmstb_gisb.c
index 6159b7752a64..f2cd6a2d40b4 100644
--- a/drivers/bus/brcmstb_gisb.c
+++ b/drivers/bus/brcmstb_gisb.c
@@ -212,9 +212,9 @@ static int brcmstb_gisb_arb_probe(struct platform_device *pdev)
212 mutex_init(&gdev->lock); 212 mutex_init(&gdev->lock);
213 INIT_LIST_HEAD(&gdev->next); 213 INIT_LIST_HEAD(&gdev->next);
214 214
215 gdev->base = devm_request_and_ioremap(&pdev->dev, r); 215 gdev->base = devm_ioremap_resource(&pdev->dev, r);
216 if (!gdev->base) 216 if (IS_ERR(gdev->base))
217 return -ENOMEM; 217 return PTR_ERR(gdev->base);
218 218
219 err = devm_request_irq(&pdev->dev, timeout_irq, 219 err = devm_request_irq(&pdev->dev, timeout_irq,
220 brcmstb_gisb_timeout_handler, 0, pdev->name, 220 brcmstb_gisb_timeout_handler, 0, pdev->name,
diff --git a/drivers/dma-buf/Makefile b/drivers/dma-buf/Makefile
new file mode 100644
index 000000000000..57a675f90cd0
--- /dev/null
+++ b/drivers/dma-buf/Makefile
@@ -0,0 +1 @@
obj-y := dma-buf.o fence.o reservation.o seqno-fence.o
diff --git a/drivers/base/dma-buf.c b/drivers/dma-buf/dma-buf.c
index 840c7fa80983..f3014c448e1e 100644
--- a/drivers/base/dma-buf.c
+++ b/drivers/dma-buf/dma-buf.c
@@ -25,10 +25,13 @@
25#include <linux/fs.h> 25#include <linux/fs.h>
26#include <linux/slab.h> 26#include <linux/slab.h>
27#include <linux/dma-buf.h> 27#include <linux/dma-buf.h>
28#include <linux/fence.h>
28#include <linux/anon_inodes.h> 29#include <linux/anon_inodes.h>
29#include <linux/export.h> 30#include <linux/export.h>
30#include <linux/debugfs.h> 31#include <linux/debugfs.h>
31#include <linux/seq_file.h> 32#include <linux/seq_file.h>
33#include <linux/poll.h>
34#include <linux/reservation.h>
32 35
33static inline int is_dma_buf_file(struct file *); 36static inline int is_dma_buf_file(struct file *);
34 37
@@ -50,12 +53,25 @@ static int dma_buf_release(struct inode *inode, struct file *file)
50 53
51 BUG_ON(dmabuf->vmapping_counter); 54 BUG_ON(dmabuf->vmapping_counter);
52 55
56 /*
57 * Any fences that a dma-buf poll can wait on should be signaled
58 * before releasing dma-buf. This is the responsibility of each
59 * driver that uses the reservation objects.
60 *
61 * If you hit this BUG() it means someone dropped their ref to the
62 * dma-buf while still having pending operation to the buffer.
63 */
64 BUG_ON(dmabuf->cb_shared.active || dmabuf->cb_excl.active);
65
53 dmabuf->ops->release(dmabuf); 66 dmabuf->ops->release(dmabuf);
54 67
55 mutex_lock(&db_list.lock); 68 mutex_lock(&db_list.lock);
56 list_del(&dmabuf->list_node); 69 list_del(&dmabuf->list_node);
57 mutex_unlock(&db_list.lock); 70 mutex_unlock(&db_list.lock);
58 71
72 if (dmabuf->resv == (struct reservation_object *)&dmabuf[1])
73 reservation_object_fini(dmabuf->resv);
74
59 kfree(dmabuf); 75 kfree(dmabuf);
60 return 0; 76 return 0;
61} 77}
@@ -103,10 +119,141 @@ static loff_t dma_buf_llseek(struct file *file, loff_t offset, int whence)
103 return base + offset; 119 return base + offset;
104} 120}
105 121
122static void dma_buf_poll_cb(struct fence *fence, struct fence_cb *cb)
123{
124 struct dma_buf_poll_cb_t *dcb = (struct dma_buf_poll_cb_t *)cb;
125 unsigned long flags;
126
127 spin_lock_irqsave(&dcb->poll->lock, flags);
128 wake_up_locked_poll(dcb->poll, dcb->active);
129 dcb->active = 0;
130 spin_unlock_irqrestore(&dcb->poll->lock, flags);
131}
132
133static unsigned int dma_buf_poll(struct file *file, poll_table *poll)
134{
135 struct dma_buf *dmabuf;
136 struct reservation_object *resv;
137 struct reservation_object_list *fobj;
138 struct fence *fence_excl;
139 unsigned long events;
140 unsigned shared_count, seq;
141
142 dmabuf = file->private_data;
143 if (!dmabuf || !dmabuf->resv)
144 return POLLERR;
145
146 resv = dmabuf->resv;
147
148 poll_wait(file, &dmabuf->poll, poll);
149
150 events = poll_requested_events(poll) & (POLLIN | POLLOUT);
151 if (!events)
152 return 0;
153
154retry:
155 seq = read_seqcount_begin(&resv->seq);
156 rcu_read_lock();
157
158 fobj = rcu_dereference(resv->fence);
159 if (fobj)
160 shared_count = fobj->shared_count;
161 else
162 shared_count = 0;
163 fence_excl = rcu_dereference(resv->fence_excl);
164 if (read_seqcount_retry(&resv->seq, seq)) {
165 rcu_read_unlock();
166 goto retry;
167 }
168
169 if (fence_excl && (!(events & POLLOUT) || shared_count == 0)) {
170 struct dma_buf_poll_cb_t *dcb = &dmabuf->cb_excl;
171 unsigned long pevents = POLLIN;
172
173 if (shared_count == 0)
174 pevents |= POLLOUT;
175
176 spin_lock_irq(&dmabuf->poll.lock);
177 if (dcb->active) {
178 dcb->active |= pevents;
179 events &= ~pevents;
180 } else
181 dcb->active = pevents;
182 spin_unlock_irq(&dmabuf->poll.lock);
183
184 if (events & pevents) {
185 if (!fence_get_rcu(fence_excl)) {
186 /* force a recheck */
187 events &= ~pevents;
188 dma_buf_poll_cb(NULL, &dcb->cb);
189 } else if (!fence_add_callback(fence_excl, &dcb->cb,
190 dma_buf_poll_cb)) {
191 events &= ~pevents;
192 fence_put(fence_excl);
193 } else {
194 /*
195 * No callback queued, wake up any additional
196 * waiters.
197 */
198 fence_put(fence_excl);
199 dma_buf_poll_cb(NULL, &dcb->cb);
200 }
201 }
202 }
203
204 if ((events & POLLOUT) && shared_count > 0) {
205 struct dma_buf_poll_cb_t *dcb = &dmabuf->cb_shared;
206 int i;
207
208 /* Only queue a new callback if no event has fired yet */
209 spin_lock_irq(&dmabuf->poll.lock);
210 if (dcb->active)
211 events &= ~POLLOUT;
212 else
213 dcb->active = POLLOUT;
214 spin_unlock_irq(&dmabuf->poll.lock);
215
216 if (!(events & POLLOUT))
217 goto out;
218
219 for (i = 0; i < shared_count; ++i) {
220 struct fence *fence = rcu_dereference(fobj->shared[i]);
221
222 if (!fence_get_rcu(fence)) {
223 /*
224 * fence refcount dropped to zero, this means
225 * that fobj has been freed
226 *
227 * call dma_buf_poll_cb and force a recheck!
228 */
229 events &= ~POLLOUT;
230 dma_buf_poll_cb(NULL, &dcb->cb);
231 break;
232 }
233 if (!fence_add_callback(fence, &dcb->cb,
234 dma_buf_poll_cb)) {
235 fence_put(fence);
236 events &= ~POLLOUT;
237 break;
238 }
239 fence_put(fence);
240 }
241
242 /* No callback queued, wake up any additional waiters. */
243 if (i == shared_count)
244 dma_buf_poll_cb(NULL, &dcb->cb);
245 }
246
247out:
248 rcu_read_unlock();
249 return events;
250}
251
106static const struct file_operations dma_buf_fops = { 252static const struct file_operations dma_buf_fops = {
107 .release = dma_buf_release, 253 .release = dma_buf_release,
108 .mmap = dma_buf_mmap_internal, 254 .mmap = dma_buf_mmap_internal,
109 .llseek = dma_buf_llseek, 255 .llseek = dma_buf_llseek,
256 .poll = dma_buf_poll,
110}; 257};
111 258
112/* 259/*
@@ -128,6 +275,7 @@ static inline int is_dma_buf_file(struct file *file)
128 * @size: [in] Size of the buffer 275 * @size: [in] Size of the buffer
129 * @flags: [in] mode flags for the file. 276 * @flags: [in] mode flags for the file.
130 * @exp_name: [in] name of the exporting module - useful for debugging. 277 * @exp_name: [in] name of the exporting module - useful for debugging.
278 * @resv: [in] reservation-object, NULL to allocate default one.
131 * 279 *
132 * Returns, on success, a newly created dma_buf object, which wraps the 280 * Returns, on success, a newly created dma_buf object, which wraps the
133 * supplied private data and operations for dma_buf_ops. On either missing 281 * supplied private data and operations for dma_buf_ops. On either missing
@@ -135,10 +283,17 @@ static inline int is_dma_buf_file(struct file *file)
135 * 283 *
136 */ 284 */
137struct dma_buf *dma_buf_export_named(void *priv, const struct dma_buf_ops *ops, 285struct dma_buf *dma_buf_export_named(void *priv, const struct dma_buf_ops *ops,
138 size_t size, int flags, const char *exp_name) 286 size_t size, int flags, const char *exp_name,
287 struct reservation_object *resv)
139{ 288{
140 struct dma_buf *dmabuf; 289 struct dma_buf *dmabuf;
141 struct file *file; 290 struct file *file;
291 size_t alloc_size = sizeof(struct dma_buf);
292 if (!resv)
293 alloc_size += sizeof(struct reservation_object);
294 else
295 /* prevent &dma_buf[1] == dma_buf->resv */
296 alloc_size += 1;
142 297
143 if (WARN_ON(!priv || !ops 298 if (WARN_ON(!priv || !ops
144 || !ops->map_dma_buf 299 || !ops->map_dma_buf
@@ -150,7 +305,7 @@ struct dma_buf *dma_buf_export_named(void *priv, const struct dma_buf_ops *ops,
150 return ERR_PTR(-EINVAL); 305 return ERR_PTR(-EINVAL);
151 } 306 }
152 307
153 dmabuf = kzalloc(sizeof(struct dma_buf), GFP_KERNEL); 308 dmabuf = kzalloc(alloc_size, GFP_KERNEL);
154 if (dmabuf == NULL) 309 if (dmabuf == NULL)
155 return ERR_PTR(-ENOMEM); 310 return ERR_PTR(-ENOMEM);
156 311
@@ -158,6 +313,15 @@ struct dma_buf *dma_buf_export_named(void *priv, const struct dma_buf_ops *ops,
158 dmabuf->ops = ops; 313 dmabuf->ops = ops;
159 dmabuf->size = size; 314 dmabuf->size = size;
160 dmabuf->exp_name = exp_name; 315 dmabuf->exp_name = exp_name;
316 init_waitqueue_head(&dmabuf->poll);
317 dmabuf->cb_excl.poll = dmabuf->cb_shared.poll = &dmabuf->poll;
318 dmabuf->cb_excl.active = dmabuf->cb_shared.active = 0;
319
320 if (!resv) {
321 resv = (struct reservation_object *)&dmabuf[1];
322 reservation_object_init(resv);
323 }
324 dmabuf->resv = resv;
161 325
162 file = anon_inode_getfile("dmabuf", &dma_buf_fops, dmabuf, flags); 326 file = anon_inode_getfile("dmabuf", &dma_buf_fops, dmabuf, flags);
163 if (IS_ERR(file)) { 327 if (IS_ERR(file)) {
diff --git a/drivers/dma-buf/fence.c b/drivers/dma-buf/fence.c
new file mode 100644
index 000000000000..4222cb2aa96a
--- /dev/null
+++ b/drivers/dma-buf/fence.c
@@ -0,0 +1,431 @@
1/*
2 * Fence mechanism for dma-buf and to allow for asynchronous dma access
3 *
4 * Copyright (C) 2012 Canonical Ltd
5 * Copyright (C) 2012 Texas Instruments
6 *
7 * Authors:
8 * Rob Clark <robdclark@gmail.com>
9 * Maarten Lankhorst <maarten.lankhorst@canonical.com>
10 *
11 * This program is free software; you can redistribute it and/or modify it
12 * under the terms of the GNU General Public License version 2 as published by
13 * the Free Software Foundation.
14 *
15 * This program is distributed in the hope that it will be useful, but WITHOUT
16 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
17 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
18 * more details.
19 */
20
21#include <linux/slab.h>
22#include <linux/export.h>
23#include <linux/atomic.h>
24#include <linux/fence.h>
25
26#define CREATE_TRACE_POINTS
27#include <trace/events/fence.h>
28
29EXPORT_TRACEPOINT_SYMBOL(fence_annotate_wait_on);
30EXPORT_TRACEPOINT_SYMBOL(fence_emit);
31
32/**
33 * fence context counter: each execution context should have its own
34 * fence context, this allows checking if fences belong to the same
35 * context or not. One device can have multiple separate contexts,
36 * and they're used if some engine can run independently of another.
37 */
38static atomic_t fence_context_counter = ATOMIC_INIT(0);
39
40/**
41 * fence_context_alloc - allocate an array of fence contexts
42 * @num: [in] amount of contexts to allocate
43 *
44 * This function will return the first index of the number of fences allocated.
45 * The fence context is used for setting fence->context to a unique number.
46 */
47unsigned fence_context_alloc(unsigned num)
48{
49 BUG_ON(!num);
50 return atomic_add_return(num, &fence_context_counter) - num;
51}
52EXPORT_SYMBOL(fence_context_alloc);
53
54/**
55 * fence_signal_locked - signal completion of a fence
56 * @fence: the fence to signal
57 *
58 * Signal completion for software callbacks on a fence, this will unblock
59 * fence_wait() calls and run all the callbacks added with
60 * fence_add_callback(). Can be called multiple times, but since a fence
61 * can only go from unsignaled to signaled state, it will only be effective
62 * the first time.
63 *
64 * Unlike fence_signal, this function must be called with fence->lock held.
65 */
66int fence_signal_locked(struct fence *fence)
67{
68 struct fence_cb *cur, *tmp;
69 int ret = 0;
70
71 if (WARN_ON(!fence))
72 return -EINVAL;
73
74 if (!ktime_to_ns(fence->timestamp)) {
75 fence->timestamp = ktime_get();
76 smp_mb__before_atomic();
77 }
78
79 if (test_and_set_bit(FENCE_FLAG_SIGNALED_BIT, &fence->flags)) {
80 ret = -EINVAL;
81
82 /*
83 * we might have raced with the unlocked fence_signal,
84 * still run through all callbacks
85 */
86 } else
87 trace_fence_signaled(fence);
88
89 list_for_each_entry_safe(cur, tmp, &fence->cb_list, node) {
90 list_del_init(&cur->node);
91 cur->func(fence, cur);
92 }
93 return ret;
94}
95EXPORT_SYMBOL(fence_signal_locked);
96
97/**
98 * fence_signal - signal completion of a fence
99 * @fence: the fence to signal
100 *
101 * Signal completion for software callbacks on a fence, this will unblock
102 * fence_wait() calls and run all the callbacks added with
103 * fence_add_callback(). Can be called multiple times, but since a fence
104 * can only go from unsignaled to signaled state, it will only be effective
105 * the first time.
106 */
107int fence_signal(struct fence *fence)
108{
109 unsigned long flags;
110
111 if (!fence)
112 return -EINVAL;
113
114 if (!ktime_to_ns(fence->timestamp)) {
115 fence->timestamp = ktime_get();
116 smp_mb__before_atomic();
117 }
118
119 if (test_and_set_bit(FENCE_FLAG_SIGNALED_BIT, &fence->flags))
120 return -EINVAL;
121
122 trace_fence_signaled(fence);
123
124 if (test_bit(FENCE_FLAG_ENABLE_SIGNAL_BIT, &fence->flags)) {
125 struct fence_cb *cur, *tmp;
126
127 spin_lock_irqsave(fence->lock, flags);
128 list_for_each_entry_safe(cur, tmp, &fence->cb_list, node) {
129 list_del_init(&cur->node);
130 cur->func(fence, cur);
131 }
132 spin_unlock_irqrestore(fence->lock, flags);
133 }
134 return 0;
135}
136EXPORT_SYMBOL(fence_signal);
137
138/**
139 * fence_wait_timeout - sleep until the fence gets signaled
140 * or until timeout elapses
141 * @fence: [in] the fence to wait on
142 * @intr: [in] if true, do an interruptible wait
143 * @timeout: [in] timeout value in jiffies, or MAX_SCHEDULE_TIMEOUT
144 *
145 * Returns -ERESTARTSYS if interrupted, 0 if the wait timed out, or the
146 * remaining timeout in jiffies on success. Other error values may be
147 * returned on custom implementations.
148 *
149 * Performs a synchronous wait on this fence. It is assumed the caller
150 * directly or indirectly (buf-mgr between reservation and committing)
151 * holds a reference to the fence, otherwise the fence might be
152 * freed before return, resulting in undefined behavior.
153 */
154signed long
155fence_wait_timeout(struct fence *fence, bool intr, signed long timeout)
156{
157 signed long ret;
158
159 if (WARN_ON(timeout < 0))
160 return -EINVAL;
161
162 trace_fence_wait_start(fence);
163 ret = fence->ops->wait(fence, intr, timeout);
164 trace_fence_wait_end(fence);
165 return ret;
166}
167EXPORT_SYMBOL(fence_wait_timeout);
168
169void fence_release(struct kref *kref)
170{
171 struct fence *fence =
172 container_of(kref, struct fence, refcount);
173
174 trace_fence_destroy(fence);
175
176 BUG_ON(!list_empty(&fence->cb_list));
177
178 if (fence->ops->release)
179 fence->ops->release(fence);
180 else
181 fence_free(fence);
182}
183EXPORT_SYMBOL(fence_release);
184
185void fence_free(struct fence *fence)
186{
187 kfree_rcu(fence, rcu);
188}
189EXPORT_SYMBOL(fence_free);
190
191/**
192 * fence_enable_sw_signaling - enable signaling on fence
193 * @fence: [in] the fence to enable
194 *
195 * this will request for sw signaling to be enabled, to make the fence
196 * complete as soon as possible
197 */
198void fence_enable_sw_signaling(struct fence *fence)
199{
200 unsigned long flags;
201
202 if (!test_and_set_bit(FENCE_FLAG_ENABLE_SIGNAL_BIT, &fence->flags) &&
203 !test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->flags)) {
204 trace_fence_enable_signal(fence);
205
206 spin_lock_irqsave(fence->lock, flags);
207
208 if (!fence->ops->enable_signaling(fence))
209 fence_signal_locked(fence);
210
211 spin_unlock_irqrestore(fence->lock, flags);
212 }
213}
214EXPORT_SYMBOL(fence_enable_sw_signaling);
215
216/**
217 * fence_add_callback - add a callback to be called when the fence
218 * is signaled
219 * @fence: [in] the fence to wait on
220 * @cb: [in] the callback to register
221 * @func: [in] the function to call
222 *
223 * cb will be initialized by fence_add_callback, no initialization
224 * by the caller is required. Any number of callbacks can be registered
225 * to a fence, but a callback can only be registered to one fence at a time.
226 *
227 * Note that the callback can be called from an atomic context. If
228 * fence is already signaled, this function will return -ENOENT (and
229 * *not* call the callback)
230 *
231 * Add a software callback to the fence. Same restrictions apply to
232 * refcount as it does to fence_wait, however the caller doesn't need to
233 * keep a refcount to fence afterwards: when software access is enabled,
234 * the creator of the fence is required to keep the fence alive until
235 * after it signals with fence_signal. The callback itself can be called
236 * from irq context.
237 *
238 */
239int fence_add_callback(struct fence *fence, struct fence_cb *cb,
240 fence_func_t func)
241{
242 unsigned long flags;
243 int ret = 0;
244 bool was_set;
245
246 if (WARN_ON(!fence || !func))
247 return -EINVAL;
248
249 if (test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->flags)) {
250 INIT_LIST_HEAD(&cb->node);
251 return -ENOENT;
252 }
253
254 spin_lock_irqsave(fence->lock, flags);
255
256 was_set = test_and_set_bit(FENCE_FLAG_ENABLE_SIGNAL_BIT, &fence->flags);
257
258 if (test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->flags))
259 ret = -ENOENT;
260 else if (!was_set) {
261 trace_fence_enable_signal(fence);
262
263 if (!fence->ops->enable_signaling(fence)) {
264 fence_signal_locked(fence);
265 ret = -ENOENT;
266 }
267 }
268
269 if (!ret) {
270 cb->func = func;
271 list_add_tail(&cb->node, &fence->cb_list);
272 } else
273 INIT_LIST_HEAD(&cb->node);
274 spin_unlock_irqrestore(fence->lock, flags);
275
276 return ret;
277}
278EXPORT_SYMBOL(fence_add_callback);
279
280/**
281 * fence_remove_callback - remove a callback from the signaling list
282 * @fence: [in] the fence to wait on
283 * @cb: [in] the callback to remove
284 *
285 * Remove a previously queued callback from the fence. This function returns
286 * true if the callback is succesfully removed, or false if the fence has
287 * already been signaled.
288 *
289 * *WARNING*:
290 * Cancelling a callback should only be done if you really know what you're
291 * doing, since deadlocks and race conditions could occur all too easily. For
292 * this reason, it should only ever be done on hardware lockup recovery,
293 * with a reference held to the fence.
294 */
295bool
296fence_remove_callback(struct fence *fence, struct fence_cb *cb)
297{
298 unsigned long flags;
299 bool ret;
300
301 spin_lock_irqsave(fence->lock, flags);
302
303 ret = !list_empty(&cb->node);
304 if (ret)
305 list_del_init(&cb->node);
306
307 spin_unlock_irqrestore(fence->lock, flags);
308
309 return ret;
310}
311EXPORT_SYMBOL(fence_remove_callback);
312
313struct default_wait_cb {
314 struct fence_cb base;
315 struct task_struct *task;
316};
317
318static void
319fence_default_wait_cb(struct fence *fence, struct fence_cb *cb)
320{
321 struct default_wait_cb *wait =
322 container_of(cb, struct default_wait_cb, base);
323
324 wake_up_state(wait->task, TASK_NORMAL);
325}
326
327/**
328 * fence_default_wait - default sleep until the fence gets signaled
329 * or until timeout elapses
330 * @fence: [in] the fence to wait on
331 * @intr: [in] if true, do an interruptible wait
332 * @timeout: [in] timeout value in jiffies, or MAX_SCHEDULE_TIMEOUT
333 *
334 * Returns -ERESTARTSYS if interrupted, 0 if the wait timed out, or the
335 * remaining timeout in jiffies on success.
336 */
337signed long
338fence_default_wait(struct fence *fence, bool intr, signed long timeout)
339{
340 struct default_wait_cb cb;
341 unsigned long flags;
342 signed long ret = timeout;
343 bool was_set;
344
345 if (test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->flags))
346 return timeout;
347
348 spin_lock_irqsave(fence->lock, flags);
349
350 if (intr && signal_pending(current)) {
351 ret = -ERESTARTSYS;
352 goto out;
353 }
354
355 was_set = test_and_set_bit(FENCE_FLAG_ENABLE_SIGNAL_BIT, &fence->flags);
356
357 if (test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->flags))
358 goto out;
359
360 if (!was_set) {
361 trace_fence_enable_signal(fence);
362
363 if (!fence->ops->enable_signaling(fence)) {
364 fence_signal_locked(fence);
365 goto out;
366 }
367 }
368
369 cb.base.func = fence_default_wait_cb;
370 cb.task = current;
371 list_add(&cb.base.node, &fence->cb_list);
372
373 while (!test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->flags) && ret > 0) {
374 if (intr)
375 __set_current_state(TASK_INTERRUPTIBLE);
376 else
377 __set_current_state(TASK_UNINTERRUPTIBLE);
378 spin_unlock_irqrestore(fence->lock, flags);
379
380 ret = schedule_timeout(ret);
381
382 spin_lock_irqsave(fence->lock, flags);
383 if (ret > 0 && intr && signal_pending(current))
384 ret = -ERESTARTSYS;
385 }
386
387 if (!list_empty(&cb.base.node))
388 list_del(&cb.base.node);
389 __set_current_state(TASK_RUNNING);
390
391out:
392 spin_unlock_irqrestore(fence->lock, flags);
393 return ret;
394}
395EXPORT_SYMBOL(fence_default_wait);
396
397/**
398 * fence_init - Initialize a custom fence.
399 * @fence: [in] the fence to initialize
400 * @ops: [in] the fence_ops for operations on this fence
401 * @lock: [in] the irqsafe spinlock to use for locking this fence
402 * @context: [in] the execution context this fence is run on
403 * @seqno: [in] a linear increasing sequence number for this context
404 *
405 * Initializes an allocated fence, the caller doesn't have to keep its
406 * refcount after committing with this fence, but it will need to hold a
407 * refcount again if fence_ops.enable_signaling gets called. This can
408 * be used for other implementing other types of fence.
409 *
410 * context and seqno are used for easy comparison between fences, allowing
411 * to check which fence is later by simply using fence_later.
412 */
413void
414fence_init(struct fence *fence, const struct fence_ops *ops,
415 spinlock_t *lock, unsigned context, unsigned seqno)
416{
417 BUG_ON(!lock);
418 BUG_ON(!ops || !ops->wait || !ops->enable_signaling ||
419 !ops->get_driver_name || !ops->get_timeline_name);
420
421 kref_init(&fence->refcount);
422 fence->ops = ops;
423 INIT_LIST_HEAD(&fence->cb_list);
424 fence->lock = lock;
425 fence->context = context;
426 fence->seqno = seqno;
427 fence->flags = 0UL;
428
429 trace_fence_init(fence);
430}
431EXPORT_SYMBOL(fence_init);
diff --git a/drivers/dma-buf/reservation.c b/drivers/dma-buf/reservation.c
new file mode 100644
index 000000000000..3c97c8fa8d02
--- /dev/null
+++ b/drivers/dma-buf/reservation.c
@@ -0,0 +1,477 @@
1/*
2 * Copyright (C) 2012-2014 Canonical Ltd (Maarten Lankhorst)
3 *
4 * Based on bo.c which bears the following copyright notice,
5 * but is dual licensed:
6 *
7 * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
8 * All Rights Reserved.
9 *
10 * Permission is hereby granted, free of charge, to any person obtaining a
11 * copy of this software and associated documentation files (the
12 * "Software"), to deal in the Software without restriction, including
13 * without limitation the rights to use, copy, modify, merge, publish,
14 * distribute, sub license, and/or sell copies of the Software, and to
15 * permit persons to whom the Software is furnished to do so, subject to
16 * the following conditions:
17 *
18 * The above copyright notice and this permission notice (including the
19 * next paragraph) shall be included in all copies or substantial portions
20 * of the Software.
21 *
22 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
23 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
24 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
25 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
26 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
27 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
28 * USE OR OTHER DEALINGS IN THE SOFTWARE.
29 *
30 **************************************************************************/
31/*
32 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
33 */
34
35#include <linux/reservation.h>
36#include <linux/export.h>
37
38DEFINE_WW_CLASS(reservation_ww_class);
39EXPORT_SYMBOL(reservation_ww_class);
40
41struct lock_class_key reservation_seqcount_class;
42EXPORT_SYMBOL(reservation_seqcount_class);
43
44const char reservation_seqcount_string[] = "reservation_seqcount";
45EXPORT_SYMBOL(reservation_seqcount_string);
46/*
47 * Reserve space to add a shared fence to a reservation_object,
48 * must be called with obj->lock held.
49 */
50int reservation_object_reserve_shared(struct reservation_object *obj)
51{
52 struct reservation_object_list *fobj, *old;
53 u32 max;
54
55 old = reservation_object_get_list(obj);
56
57 if (old && old->shared_max) {
58 if (old->shared_count < old->shared_max) {
59 /* perform an in-place update */
60 kfree(obj->staged);
61 obj->staged = NULL;
62 return 0;
63 } else
64 max = old->shared_max * 2;
65 } else
66 max = 4;
67
68 /*
69 * resize obj->staged or allocate if it doesn't exist,
70 * noop if already correct size
71 */
72 fobj = krealloc(obj->staged, offsetof(typeof(*fobj), shared[max]),
73 GFP_KERNEL);
74 if (!fobj)
75 return -ENOMEM;
76
77 obj->staged = fobj;
78 fobj->shared_max = max;
79 return 0;
80}
81EXPORT_SYMBOL(reservation_object_reserve_shared);
82
83static void
84reservation_object_add_shared_inplace(struct reservation_object *obj,
85 struct reservation_object_list *fobj,
86 struct fence *fence)
87{
88 u32 i;
89
90 fence_get(fence);
91
92 preempt_disable();
93 write_seqcount_begin(&obj->seq);
94
95 for (i = 0; i < fobj->shared_count; ++i) {
96 struct fence *old_fence;
97
98 old_fence = rcu_dereference_protected(fobj->shared[i],
99 reservation_object_held(obj));
100
101 if (old_fence->context == fence->context) {
102 /* memory barrier is added by write_seqcount_begin */
103 RCU_INIT_POINTER(fobj->shared[i], fence);
104 write_seqcount_end(&obj->seq);
105 preempt_enable();
106
107 fence_put(old_fence);
108 return;
109 }
110 }
111
112 /*
113 * memory barrier is added by write_seqcount_begin,
114 * fobj->shared_count is protected by this lock too
115 */
116 RCU_INIT_POINTER(fobj->shared[fobj->shared_count], fence);
117 fobj->shared_count++;
118
119 write_seqcount_end(&obj->seq);
120 preempt_enable();
121}
122
123static void
124reservation_object_add_shared_replace(struct reservation_object *obj,
125 struct reservation_object_list *old,
126 struct reservation_object_list *fobj,
127 struct fence *fence)
128{
129 unsigned i;
130 struct fence *old_fence = NULL;
131
132 fence_get(fence);
133
134 if (!old) {
135 RCU_INIT_POINTER(fobj->shared[0], fence);
136 fobj->shared_count = 1;
137 goto done;
138 }
139
140 /*
141 * no need to bump fence refcounts, rcu_read access
142 * requires the use of kref_get_unless_zero, and the
143 * references from the old struct are carried over to
144 * the new.
145 */
146 fobj->shared_count = old->shared_count;
147
148 for (i = 0; i < old->shared_count; ++i) {
149 struct fence *check;
150
151 check = rcu_dereference_protected(old->shared[i],
152 reservation_object_held(obj));
153
154 if (!old_fence && check->context == fence->context) {
155 old_fence = check;
156 RCU_INIT_POINTER(fobj->shared[i], fence);
157 } else
158 RCU_INIT_POINTER(fobj->shared[i], check);
159 }
160 if (!old_fence) {
161 RCU_INIT_POINTER(fobj->shared[fobj->shared_count], fence);
162 fobj->shared_count++;
163 }
164
165done:
166 preempt_disable();
167 write_seqcount_begin(&obj->seq);
168 /*
169 * RCU_INIT_POINTER can be used here,
170 * seqcount provides the necessary barriers
171 */
172 RCU_INIT_POINTER(obj->fence, fobj);
173 write_seqcount_end(&obj->seq);
174 preempt_enable();
175
176 if (old)
177 kfree_rcu(old, rcu);
178
179 if (old_fence)
180 fence_put(old_fence);
181}
182
183/*
184 * Add a fence to a shared slot, obj->lock must be held, and
185 * reservation_object_reserve_shared_fence has been called.
186 */
187void reservation_object_add_shared_fence(struct reservation_object *obj,
188 struct fence *fence)
189{
190 struct reservation_object_list *old, *fobj = obj->staged;
191
192 old = reservation_object_get_list(obj);
193 obj->staged = NULL;
194
195 if (!fobj) {
196 BUG_ON(old->shared_count >= old->shared_max);
197 reservation_object_add_shared_inplace(obj, old, fence);
198 } else
199 reservation_object_add_shared_replace(obj, old, fobj, fence);
200}
201EXPORT_SYMBOL(reservation_object_add_shared_fence);
202
203void reservation_object_add_excl_fence(struct reservation_object *obj,
204 struct fence *fence)
205{
206 struct fence *old_fence = reservation_object_get_excl(obj);
207 struct reservation_object_list *old;
208 u32 i = 0;
209
210 old = reservation_object_get_list(obj);
211 if (old)
212 i = old->shared_count;
213
214 if (fence)
215 fence_get(fence);
216
217 preempt_disable();
218 write_seqcount_begin(&obj->seq);
219 /* write_seqcount_begin provides the necessary memory barrier */
220 RCU_INIT_POINTER(obj->fence_excl, fence);
221 if (old)
222 old->shared_count = 0;
223 write_seqcount_end(&obj->seq);
224 preempt_enable();
225
226 /* inplace update, no shared fences */
227 while (i--)
228 fence_put(rcu_dereference_protected(old->shared[i],
229 reservation_object_held(obj)));
230
231 if (old_fence)
232 fence_put(old_fence);
233}
234EXPORT_SYMBOL(reservation_object_add_excl_fence);
235
236int reservation_object_get_fences_rcu(struct reservation_object *obj,
237 struct fence **pfence_excl,
238 unsigned *pshared_count,
239 struct fence ***pshared)
240{
241 unsigned shared_count = 0;
242 unsigned retry = 1;
243 struct fence **shared = NULL, *fence_excl = NULL;
244 int ret = 0;
245
246 while (retry) {
247 struct reservation_object_list *fobj;
248 unsigned seq;
249
250 seq = read_seqcount_begin(&obj->seq);
251
252 rcu_read_lock();
253
254 fobj = rcu_dereference(obj->fence);
255 if (fobj) {
256 struct fence **nshared;
257 size_t sz = sizeof(*shared) * fobj->shared_max;
258
259 nshared = krealloc(shared, sz,
260 GFP_NOWAIT | __GFP_NOWARN);
261 if (!nshared) {
262 rcu_read_unlock();
263 nshared = krealloc(shared, sz, GFP_KERNEL);
264 if (nshared) {
265 shared = nshared;
266 continue;
267 }
268
269 ret = -ENOMEM;
270 shared_count = 0;
271 break;
272 }
273 shared = nshared;
274 memcpy(shared, fobj->shared, sz);
275 shared_count = fobj->shared_count;
276 } else
277 shared_count = 0;
278 fence_excl = rcu_dereference(obj->fence_excl);
279
280 retry = read_seqcount_retry(&obj->seq, seq);
281 if (retry)
282 goto unlock;
283
284 if (!fence_excl || fence_get_rcu(fence_excl)) {
285 unsigned i;
286
287 for (i = 0; i < shared_count; ++i) {
288 if (fence_get_rcu(shared[i]))
289 continue;
290
291 /* uh oh, refcount failed, abort and retry */
292 while (i--)
293 fence_put(shared[i]);
294
295 if (fence_excl) {
296 fence_put(fence_excl);
297 fence_excl = NULL;
298 }
299
300 retry = 1;
301 break;
302 }
303 } else
304 retry = 1;
305
306unlock:
307 rcu_read_unlock();
308 }
309 *pshared_count = shared_count;
310 if (shared_count)
311 *pshared = shared;
312 else {
313 *pshared = NULL;
314 kfree(shared);
315 }
316 *pfence_excl = fence_excl;
317
318 return ret;
319}
320EXPORT_SYMBOL_GPL(reservation_object_get_fences_rcu);
321
322long reservation_object_wait_timeout_rcu(struct reservation_object *obj,
323 bool wait_all, bool intr,
324 unsigned long timeout)
325{
326 struct fence *fence;
327 unsigned seq, shared_count, i = 0;
328 long ret = timeout;
329
330retry:
331 fence = NULL;
332 shared_count = 0;
333 seq = read_seqcount_begin(&obj->seq);
334 rcu_read_lock();
335
336 if (wait_all) {
337 struct reservation_object_list *fobj = rcu_dereference(obj->fence);
338
339 if (fobj)
340 shared_count = fobj->shared_count;
341
342 if (read_seqcount_retry(&obj->seq, seq))
343 goto unlock_retry;
344
345 for (i = 0; i < shared_count; ++i) {
346 struct fence *lfence = rcu_dereference(fobj->shared[i]);
347
348 if (test_bit(FENCE_FLAG_SIGNALED_BIT, &lfence->flags))
349 continue;
350
351 if (!fence_get_rcu(lfence))
352 goto unlock_retry;
353
354 if (fence_is_signaled(lfence)) {
355 fence_put(lfence);
356 continue;
357 }
358
359 fence = lfence;
360 break;
361 }
362 }
363
364 if (!shared_count) {
365 struct fence *fence_excl = rcu_dereference(obj->fence_excl);
366
367 if (read_seqcount_retry(&obj->seq, seq))
368 goto unlock_retry;
369
370 if (fence_excl &&
371 !test_bit(FENCE_FLAG_SIGNALED_BIT, &fence_excl->flags)) {
372 if (!fence_get_rcu(fence_excl))
373 goto unlock_retry;
374
375 if (fence_is_signaled(fence_excl))
376 fence_put(fence_excl);
377 else
378 fence = fence_excl;
379 }
380 }
381
382 rcu_read_unlock();
383 if (fence) {
384 ret = fence_wait_timeout(fence, intr, ret);
385 fence_put(fence);
386 if (ret > 0 && wait_all && (i + 1 < shared_count))
387 goto retry;
388 }
389 return ret;
390
391unlock_retry:
392 rcu_read_unlock();
393 goto retry;
394}
395EXPORT_SYMBOL_GPL(reservation_object_wait_timeout_rcu);
396
397
398static inline int
399reservation_object_test_signaled_single(struct fence *passed_fence)
400{
401 struct fence *fence, *lfence = passed_fence;
402 int ret = 1;
403
404 if (!test_bit(FENCE_FLAG_SIGNALED_BIT, &lfence->flags)) {
405 int ret;
406
407 fence = fence_get_rcu(lfence);
408 if (!fence)
409 return -1;
410
411 ret = !!fence_is_signaled(fence);
412 fence_put(fence);
413 }
414 return ret;
415}
416
417bool reservation_object_test_signaled_rcu(struct reservation_object *obj,
418 bool test_all)
419{
420 unsigned seq, shared_count;
421 int ret = true;
422
423retry:
424 shared_count = 0;
425 seq = read_seqcount_begin(&obj->seq);
426 rcu_read_lock();
427
428 if (test_all) {
429 unsigned i;
430
431 struct reservation_object_list *fobj = rcu_dereference(obj->fence);
432
433 if (fobj)
434 shared_count = fobj->shared_count;
435
436 if (read_seqcount_retry(&obj->seq, seq))
437 goto unlock_retry;
438
439 for (i = 0; i < shared_count; ++i) {
440 struct fence *fence = rcu_dereference(fobj->shared[i]);
441
442 ret = reservation_object_test_signaled_single(fence);
443 if (ret < 0)
444 goto unlock_retry;
445 else if (!ret)
446 break;
447 }
448
449 /*
450 * There could be a read_seqcount_retry here, but nothing cares
451 * about whether it's the old or newer fence pointers that are
452 * signaled. That race could still have happened after checking
453 * read_seqcount_retry. If you care, use ww_mutex_lock.
454 */
455 }
456
457 if (!shared_count) {
458 struct fence *fence_excl = rcu_dereference(obj->fence_excl);
459
460 if (read_seqcount_retry(&obj->seq, seq))
461 goto unlock_retry;
462
463 if (fence_excl) {
464 ret = reservation_object_test_signaled_single(fence_excl);
465 if (ret < 0)
466 goto unlock_retry;
467 }
468 }
469
470 rcu_read_unlock();
471 return ret;
472
473unlock_retry:
474 rcu_read_unlock();
475 goto retry;
476}
477EXPORT_SYMBOL_GPL(reservation_object_test_signaled_rcu);
diff --git a/drivers/dma-buf/seqno-fence.c b/drivers/dma-buf/seqno-fence.c
new file mode 100644
index 000000000000..7d12a39a4b57
--- /dev/null
+++ b/drivers/dma-buf/seqno-fence.c
@@ -0,0 +1,73 @@
1/*
2 * seqno-fence, using a dma-buf to synchronize fencing
3 *
4 * Copyright (C) 2012 Texas Instruments
5 * Copyright (C) 2012-2014 Canonical Ltd
6 * Authors:
7 * Rob Clark <robdclark@gmail.com>
8 * Maarten Lankhorst <maarten.lankhorst@canonical.com>
9 *
10 * This program is free software; you can redistribute it and/or modify it
11 * under the terms of the GNU General Public License version 2 as published by
12 * the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful, but WITHOUT
15 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
16 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
17 * more details.
18 */
19
20#include <linux/slab.h>
21#include <linux/export.h>
22#include <linux/seqno-fence.h>
23
24static const char *seqno_fence_get_driver_name(struct fence *fence)
25{
26 struct seqno_fence *seqno_fence = to_seqno_fence(fence);
27 return seqno_fence->ops->get_driver_name(fence);
28}
29
30static const char *seqno_fence_get_timeline_name(struct fence *fence)
31{
32 struct seqno_fence *seqno_fence = to_seqno_fence(fence);
33 return seqno_fence->ops->get_timeline_name(fence);
34}
35
36static bool seqno_enable_signaling(struct fence *fence)
37{
38 struct seqno_fence *seqno_fence = to_seqno_fence(fence);
39 return seqno_fence->ops->enable_signaling(fence);
40}
41
42static bool seqno_signaled(struct fence *fence)
43{
44 struct seqno_fence *seqno_fence = to_seqno_fence(fence);
45 return seqno_fence->ops->signaled && seqno_fence->ops->signaled(fence);
46}
47
48static void seqno_release(struct fence *fence)
49{
50 struct seqno_fence *f = to_seqno_fence(fence);
51
52 dma_buf_put(f->sync_buf);
53 if (f->ops->release)
54 f->ops->release(fence);
55 else
56 fence_free(&f->base);
57}
58
59static signed long seqno_wait(struct fence *fence, bool intr, signed long timeout)
60{
61 struct seqno_fence *f = to_seqno_fence(fence);
62 return f->ops->wait(fence, intr, timeout);
63}
64
65const struct fence_ops seqno_fence_ops = {
66 .get_driver_name = seqno_fence_get_driver_name,
67 .get_timeline_name = seqno_fence_get_timeline_name,
68 .enable_signaling = seqno_enable_signaling,
69 .signaled = seqno_signaled,
70 .wait = seqno_wait,
71 .release = seqno_release,
72};
73EXPORT_SYMBOL(seqno_fence_ops);
diff --git a/drivers/gpu/drm/armada/armada_crtc.c b/drivers/gpu/drm/armada/armada_crtc.c
index 81c34f949dfc..3aedf9e993e6 100644
--- a/drivers/gpu/drm/armada/armada_crtc.c
+++ b/drivers/gpu/drm/armada/armada_crtc.c
@@ -1039,11 +1039,9 @@ int armada_drm_crtc_create(struct drm_device *dev, unsigned num,
1039 if (ret) 1039 if (ret)
1040 return ret; 1040 return ret;
1041 1041
1042 base = devm_request_and_ioremap(dev->dev, res); 1042 base = devm_ioremap_resource(dev->dev, res);
1043 if (!base) { 1043 if (IS_ERR(base))
1044 DRM_ERROR("failed to ioremap register\n"); 1044 return PTR_ERR(base);
1045 return -ENOMEM;
1046 }
1047 1045
1048 dcrtc = kzalloc(sizeof(*dcrtc), GFP_KERNEL); 1046 dcrtc = kzalloc(sizeof(*dcrtc), GFP_KERNEL);
1049 if (!dcrtc) { 1047 if (!dcrtc) {
diff --git a/drivers/gpu/drm/armada/armada_gem.c b/drivers/gpu/drm/armada/armada_gem.c
index bb9b642d8485..7496f55611a5 100644
--- a/drivers/gpu/drm/armada/armada_gem.c
+++ b/drivers/gpu/drm/armada/armada_gem.c
@@ -539,7 +539,7 @@ armada_gem_prime_export(struct drm_device *dev, struct drm_gem_object *obj,
539 int flags) 539 int flags)
540{ 540{
541 return dma_buf_export(obj, &armada_gem_prime_dmabuf_ops, obj->size, 541 return dma_buf_export(obj, &armada_gem_prime_dmabuf_ops, obj->size,
542 O_RDWR); 542 O_RDWR, NULL);
543} 543}
544 544
545struct drm_gem_object * 545struct drm_gem_object *
diff --git a/drivers/gpu/drm/drm_prime.c b/drivers/gpu/drm/drm_prime.c
index 304ca8cacbc4..99d578bad17e 100644
--- a/drivers/gpu/drm/drm_prime.c
+++ b/drivers/gpu/drm/drm_prime.c
@@ -336,7 +336,13 @@ static const struct dma_buf_ops drm_gem_prime_dmabuf_ops = {
336struct dma_buf *drm_gem_prime_export(struct drm_device *dev, 336struct dma_buf *drm_gem_prime_export(struct drm_device *dev,
337 struct drm_gem_object *obj, int flags) 337 struct drm_gem_object *obj, int flags)
338{ 338{
339 return dma_buf_export(obj, &drm_gem_prime_dmabuf_ops, obj->size, flags); 339 struct reservation_object *robj = NULL;
340
341 if (dev->driver->gem_prime_res_obj)
342 robj = dev->driver->gem_prime_res_obj(obj);
343
344 return dma_buf_export(obj, &drm_gem_prime_dmabuf_ops, obj->size,
345 flags, robj);
340} 346}
341EXPORT_SYMBOL(drm_gem_prime_export); 347EXPORT_SYMBOL(drm_gem_prime_export);
342 348
diff --git a/drivers/gpu/drm/exynos/exynos_drm_dmabuf.c b/drivers/gpu/drm/exynos/exynos_drm_dmabuf.c
index 2a3ad24276f8..60192ed544f0 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_dmabuf.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_dmabuf.c
@@ -187,7 +187,7 @@ struct dma_buf *exynos_dmabuf_prime_export(struct drm_device *drm_dev,
187 struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj); 187 struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj);
188 188
189 return dma_buf_export(obj, &exynos_dmabuf_ops, 189 return dma_buf_export(obj, &exynos_dmabuf_ops,
190 exynos_gem_obj->base.size, flags); 190 exynos_gem_obj->base.size, flags, NULL);
191} 191}
192 192
193struct drm_gem_object *exynos_dmabuf_prime_import(struct drm_device *drm_dev, 193struct drm_gem_object *exynos_dmabuf_prime_import(struct drm_device *drm_dev,
diff --git a/drivers/gpu/drm/i915/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/i915_gem_dmabuf.c
index 580aa42443ed..82a1f4b57778 100644
--- a/drivers/gpu/drm/i915/i915_gem_dmabuf.c
+++ b/drivers/gpu/drm/i915/i915_gem_dmabuf.c
@@ -237,7 +237,8 @@ struct dma_buf *i915_gem_prime_export(struct drm_device *dev,
237 return ERR_PTR(ret); 237 return ERR_PTR(ret);
238 } 238 }
239 239
240 return dma_buf_export(gem_obj, &i915_dmabuf_ops, gem_obj->size, flags); 240 return dma_buf_export(gem_obj, &i915_dmabuf_ops, gem_obj->size, flags,
241 NULL);
241} 242}
242 243
243static int i915_gem_object_get_pages_dmabuf(struct drm_i915_gem_object *obj) 244static int i915_gem_object_get_pages_dmabuf(struct drm_i915_gem_object *obj)
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c
index 5425ffe3931d..c9428c943afb 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drm.c
@@ -845,6 +845,7 @@ driver = {
845 .gem_prime_export = drm_gem_prime_export, 845 .gem_prime_export = drm_gem_prime_export,
846 .gem_prime_import = drm_gem_prime_import, 846 .gem_prime_import = drm_gem_prime_import,
847 .gem_prime_pin = nouveau_gem_prime_pin, 847 .gem_prime_pin = nouveau_gem_prime_pin,
848 .gem_prime_res_obj = nouveau_gem_prime_res_obj,
848 .gem_prime_unpin = nouveau_gem_prime_unpin, 849 .gem_prime_unpin = nouveau_gem_prime_unpin,
849 .gem_prime_get_sg_table = nouveau_gem_prime_get_sg_table, 850 .gem_prime_get_sg_table = nouveau_gem_prime_get_sg_table,
850 .gem_prime_import_sg_table = nouveau_gem_prime_import_sg_table, 851 .gem_prime_import_sg_table = nouveau_gem_prime_import_sg_table,
diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.h b/drivers/gpu/drm/nouveau/nouveau_gem.h
index 7caca057bc38..ddab762d81fe 100644
--- a/drivers/gpu/drm/nouveau/nouveau_gem.h
+++ b/drivers/gpu/drm/nouveau/nouveau_gem.h
@@ -35,6 +35,7 @@ extern int nouveau_gem_ioctl_info(struct drm_device *, void *,
35 struct drm_file *); 35 struct drm_file *);
36 36
37extern int nouveau_gem_prime_pin(struct drm_gem_object *); 37extern int nouveau_gem_prime_pin(struct drm_gem_object *);
38struct reservation_object *nouveau_gem_prime_res_obj(struct drm_gem_object *);
38extern void nouveau_gem_prime_unpin(struct drm_gem_object *); 39extern void nouveau_gem_prime_unpin(struct drm_gem_object *);
39extern struct sg_table *nouveau_gem_prime_get_sg_table(struct drm_gem_object *); 40extern struct sg_table *nouveau_gem_prime_get_sg_table(struct drm_gem_object *);
40extern struct drm_gem_object *nouveau_gem_prime_import_sg_table( 41extern struct drm_gem_object *nouveau_gem_prime_import_sg_table(
diff --git a/drivers/gpu/drm/nouveau/nouveau_prime.c b/drivers/gpu/drm/nouveau/nouveau_prime.c
index 51a2cb102b44..1f51008e4d26 100644
--- a/drivers/gpu/drm/nouveau/nouveau_prime.c
+++ b/drivers/gpu/drm/nouveau/nouveau_prime.c
@@ -102,3 +102,10 @@ void nouveau_gem_prime_unpin(struct drm_gem_object *obj)
102 102
103 nouveau_bo_unpin(nvbo); 103 nouveau_bo_unpin(nvbo);
104} 104}
105
106struct reservation_object *nouveau_gem_prime_res_obj(struct drm_gem_object *obj)
107{
108 struct nouveau_bo *nvbo = nouveau_gem_object(obj);
109
110 return nvbo->bo.resv;
111}
diff --git a/drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c b/drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c
index 4fcca8d42796..a2dbfb1737b4 100644
--- a/drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c
+++ b/drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c
@@ -171,7 +171,7 @@ static struct dma_buf_ops omap_dmabuf_ops = {
171struct dma_buf *omap_gem_prime_export(struct drm_device *dev, 171struct dma_buf *omap_gem_prime_export(struct drm_device *dev,
172 struct drm_gem_object *obj, int flags) 172 struct drm_gem_object *obj, int flags)
173{ 173{
174 return dma_buf_export(obj, &omap_dmabuf_ops, obj->size, flags); 174 return dma_buf_export(obj, &omap_dmabuf_ops, obj->size, flags, NULL);
175} 175}
176 176
177struct drm_gem_object *omap_gem_prime_import(struct drm_device *dev, 177struct drm_gem_object *omap_gem_prime_import(struct drm_device *dev,
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
index e9e361084249..959f0866d993 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.c
+++ b/drivers/gpu/drm/radeon/radeon_drv.c
@@ -132,6 +132,7 @@ struct drm_gem_object *radeon_gem_prime_import_sg_table(struct drm_device *dev,
132 struct sg_table *sg); 132 struct sg_table *sg);
133int radeon_gem_prime_pin(struct drm_gem_object *obj); 133int radeon_gem_prime_pin(struct drm_gem_object *obj);
134void radeon_gem_prime_unpin(struct drm_gem_object *obj); 134void radeon_gem_prime_unpin(struct drm_gem_object *obj);
135struct reservation_object *radeon_gem_prime_res_obj(struct drm_gem_object *);
135void *radeon_gem_prime_vmap(struct drm_gem_object *obj); 136void *radeon_gem_prime_vmap(struct drm_gem_object *obj);
136void radeon_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr); 137void radeon_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr);
137extern long radeon_kms_compat_ioctl(struct file *filp, unsigned int cmd, 138extern long radeon_kms_compat_ioctl(struct file *filp, unsigned int cmd,
@@ -566,6 +567,7 @@ static struct drm_driver kms_driver = {
566 .gem_prime_import = drm_gem_prime_import, 567 .gem_prime_import = drm_gem_prime_import,
567 .gem_prime_pin = radeon_gem_prime_pin, 568 .gem_prime_pin = radeon_gem_prime_pin,
568 .gem_prime_unpin = radeon_gem_prime_unpin, 569 .gem_prime_unpin = radeon_gem_prime_unpin,
570 .gem_prime_res_obj = radeon_gem_prime_res_obj,
569 .gem_prime_get_sg_table = radeon_gem_prime_get_sg_table, 571 .gem_prime_get_sg_table = radeon_gem_prime_get_sg_table,
570 .gem_prime_import_sg_table = radeon_gem_prime_import_sg_table, 572 .gem_prime_import_sg_table = radeon_gem_prime_import_sg_table,
571 .gem_prime_vmap = radeon_gem_prime_vmap, 573 .gem_prime_vmap = radeon_gem_prime_vmap,
diff --git a/drivers/gpu/drm/radeon/radeon_prime.c b/drivers/gpu/drm/radeon/radeon_prime.c
index 20074560fc25..28d71070c389 100644
--- a/drivers/gpu/drm/radeon/radeon_prime.c
+++ b/drivers/gpu/drm/radeon/radeon_prime.c
@@ -103,3 +103,11 @@ void radeon_gem_prime_unpin(struct drm_gem_object *obj)
103 radeon_bo_unpin(bo); 103 radeon_bo_unpin(bo);
104 radeon_bo_unreserve(bo); 104 radeon_bo_unreserve(bo);
105} 105}
106
107
108struct reservation_object *radeon_gem_prime_res_obj(struct drm_gem_object *obj)
109{
110 struct radeon_bo *bo = gem_to_radeon_bo(obj);
111
112 return bo->tbo.resv;
113}
diff --git a/drivers/gpu/drm/tegra/gem.c b/drivers/gpu/drm/tegra/gem.c
index aa85b7b26f10..78cc8143760a 100644
--- a/drivers/gpu/drm/tegra/gem.c
+++ b/drivers/gpu/drm/tegra/gem.c
@@ -420,7 +420,7 @@ struct dma_buf *tegra_gem_prime_export(struct drm_device *drm,
420 int flags) 420 int flags)
421{ 421{
422 return dma_buf_export(gem, &tegra_gem_prime_dmabuf_ops, gem->size, 422 return dma_buf_export(gem, &tegra_gem_prime_dmabuf_ops, gem->size,
423 flags); 423 flags, NULL);
424} 424}
425 425
426struct drm_gem_object *tegra_gem_prime_import(struct drm_device *drm, 426struct drm_gem_object *tegra_gem_prime_import(struct drm_device *drm,
diff --git a/drivers/gpu/drm/ttm/ttm_object.c b/drivers/gpu/drm/ttm/ttm_object.c
index d2a053352789..12c87110db3a 100644
--- a/drivers/gpu/drm/ttm/ttm_object.c
+++ b/drivers/gpu/drm/ttm/ttm_object.c
@@ -695,7 +695,7 @@ int ttm_prime_handle_to_fd(struct ttm_object_file *tfile,
695 } 695 }
696 696
697 dma_buf = dma_buf_export(prime, &tdev->ops, 697 dma_buf = dma_buf_export(prime, &tdev->ops,
698 prime->size, flags); 698 prime->size, flags, NULL);
699 if (IS_ERR(dma_buf)) { 699 if (IS_ERR(dma_buf)) {
700 ret = PTR_ERR(dma_buf); 700 ret = PTR_ERR(dma_buf);
701 ttm_mem_global_free(tdev->mem_glob, 701 ttm_mem_global_free(tdev->mem_glob,
diff --git a/drivers/media/v4l2-core/videobuf2-dma-contig.c b/drivers/media/v4l2-core/videobuf2-dma-contig.c
index 880be0782dd9..c4e4dfa8123a 100644
--- a/drivers/media/v4l2-core/videobuf2-dma-contig.c
+++ b/drivers/media/v4l2-core/videobuf2-dma-contig.c
@@ -404,7 +404,7 @@ static struct dma_buf *vb2_dc_get_dmabuf(void *buf_priv, unsigned long flags)
404 if (WARN_ON(!buf->sgt_base)) 404 if (WARN_ON(!buf->sgt_base))
405 return NULL; 405 return NULL;
406 406
407 dbuf = dma_buf_export(buf, &vb2_dc_dmabuf_ops, buf->size, flags); 407 dbuf = dma_buf_export(buf, &vb2_dc_dmabuf_ops, buf->size, flags, NULL);
408 if (IS_ERR(dbuf)) 408 if (IS_ERR(dbuf))
409 return NULL; 409 return NULL;
410 410
diff --git a/drivers/staging/android/Kconfig b/drivers/staging/android/Kconfig
index 99e484f845f2..51607e9aa049 100644
--- a/drivers/staging/android/Kconfig
+++ b/drivers/staging/android/Kconfig
@@ -88,6 +88,7 @@ config SYNC
88 bool "Synchronization framework" 88 bool "Synchronization framework"
89 default n 89 default n
90 select ANON_INODES 90 select ANON_INODES
91 select DMA_SHARED_BUFFER
91 ---help--- 92 ---help---
92 This option enables the framework for synchronization between multiple 93 This option enables the framework for synchronization between multiple
93 drivers. Sync implementations can take advantage of hardware 94 drivers. Sync implementations can take advantage of hardware
diff --git a/drivers/staging/android/Makefile b/drivers/staging/android/Makefile
index 0a01e1914905..517ad5ffa429 100644
--- a/drivers/staging/android/Makefile
+++ b/drivers/staging/android/Makefile
@@ -9,5 +9,5 @@ obj-$(CONFIG_ANDROID_TIMED_OUTPUT) += timed_output.o
9obj-$(CONFIG_ANDROID_TIMED_GPIO) += timed_gpio.o 9obj-$(CONFIG_ANDROID_TIMED_GPIO) += timed_gpio.o
10obj-$(CONFIG_ANDROID_LOW_MEMORY_KILLER) += lowmemorykiller.o 10obj-$(CONFIG_ANDROID_LOW_MEMORY_KILLER) += lowmemorykiller.o
11obj-$(CONFIG_ANDROID_INTF_ALARM_DEV) += alarm-dev.o 11obj-$(CONFIG_ANDROID_INTF_ALARM_DEV) += alarm-dev.o
12obj-$(CONFIG_SYNC) += sync.o 12obj-$(CONFIG_SYNC) += sync.o sync_debug.o
13obj-$(CONFIG_SW_SYNC) += sw_sync.o 13obj-$(CONFIG_SW_SYNC) += sw_sync.o
diff --git a/drivers/staging/android/ion/ion.c b/drivers/staging/android/ion/ion.c
index 389b8f67a2ec..270360912b2c 100644
--- a/drivers/staging/android/ion/ion.c
+++ b/drivers/staging/android/ion/ion.c
@@ -1120,7 +1120,8 @@ struct dma_buf *ion_share_dma_buf(struct ion_client *client,
1120 ion_buffer_get(buffer); 1120 ion_buffer_get(buffer);
1121 mutex_unlock(&client->lock); 1121 mutex_unlock(&client->lock);
1122 1122
1123 dmabuf = dma_buf_export(buffer, &dma_buf_ops, buffer->size, O_RDWR); 1123 dmabuf = dma_buf_export(buffer, &dma_buf_ops, buffer->size, O_RDWR,
1124 NULL);
1124 if (IS_ERR(dmabuf)) { 1125 if (IS_ERR(dmabuf)) {
1125 ion_buffer_put(buffer); 1126 ion_buffer_put(buffer);
1126 return dmabuf; 1127 return dmabuf;
diff --git a/drivers/staging/android/sw_sync.c b/drivers/staging/android/sw_sync.c
index 12a136ec1cec..a76db3ff87cb 100644
--- a/drivers/staging/android/sw_sync.c
+++ b/drivers/staging/android/sw_sync.c
@@ -50,7 +50,7 @@ static struct sync_pt *sw_sync_pt_dup(struct sync_pt *sync_pt)
50{ 50{
51 struct sw_sync_pt *pt = (struct sw_sync_pt *) sync_pt; 51 struct sw_sync_pt *pt = (struct sw_sync_pt *) sync_pt;
52 struct sw_sync_timeline *obj = 52 struct sw_sync_timeline *obj =
53 (struct sw_sync_timeline *)sync_pt->parent; 53 (struct sw_sync_timeline *)sync_pt_parent(sync_pt);
54 54
55 return (struct sync_pt *) sw_sync_pt_create(obj, pt->value); 55 return (struct sync_pt *) sw_sync_pt_create(obj, pt->value);
56} 56}
@@ -59,7 +59,7 @@ static int sw_sync_pt_has_signaled(struct sync_pt *sync_pt)
59{ 59{
60 struct sw_sync_pt *pt = (struct sw_sync_pt *)sync_pt; 60 struct sw_sync_pt *pt = (struct sw_sync_pt *)sync_pt;
61 struct sw_sync_timeline *obj = 61 struct sw_sync_timeline *obj =
62 (struct sw_sync_timeline *)sync_pt->parent; 62 (struct sw_sync_timeline *)sync_pt_parent(sync_pt);
63 63
64 return sw_sync_cmp(obj->value, pt->value) >= 0; 64 return sw_sync_cmp(obj->value, pt->value) >= 0;
65} 65}
@@ -97,7 +97,6 @@ static void sw_sync_pt_value_str(struct sync_pt *sync_pt,
97 char *str, int size) 97 char *str, int size)
98{ 98{
99 struct sw_sync_pt *pt = (struct sw_sync_pt *)sync_pt; 99 struct sw_sync_pt *pt = (struct sw_sync_pt *)sync_pt;
100
101 snprintf(str, size, "%d", pt->value); 100 snprintf(str, size, "%d", pt->value);
102} 101}
103 102
@@ -157,7 +156,6 @@ static int sw_sync_open(struct inode *inode, struct file *file)
157static int sw_sync_release(struct inode *inode, struct file *file) 156static int sw_sync_release(struct inode *inode, struct file *file)
158{ 157{
159 struct sw_sync_timeline *obj = file->private_data; 158 struct sw_sync_timeline *obj = file->private_data;
160
161 sync_timeline_destroy(&obj->obj); 159 sync_timeline_destroy(&obj->obj);
162 return 0; 160 return 0;
163} 161}
diff --git a/drivers/staging/android/sync.c b/drivers/staging/android/sync.c
index 18174f7c871c..c9a0c2cdc81a 100644
--- a/drivers/staging/android/sync.c
+++ b/drivers/staging/android/sync.c
@@ -31,22 +31,13 @@
31#define CREATE_TRACE_POINTS 31#define CREATE_TRACE_POINTS
32#include "trace/sync.h" 32#include "trace/sync.h"
33 33
34static void sync_fence_signal_pt(struct sync_pt *pt); 34static const struct fence_ops android_fence_ops;
35static int _sync_pt_has_signaled(struct sync_pt *pt); 35static const struct file_operations sync_fence_fops;
36static void sync_fence_free(struct kref *kref);
37static void sync_dump(void);
38
39static LIST_HEAD(sync_timeline_list_head);
40static DEFINE_SPINLOCK(sync_timeline_list_lock);
41
42static LIST_HEAD(sync_fence_list_head);
43static DEFINE_SPINLOCK(sync_fence_list_lock);
44 36
45struct sync_timeline *sync_timeline_create(const struct sync_timeline_ops *ops, 37struct sync_timeline *sync_timeline_create(const struct sync_timeline_ops *ops,
46 int size, const char *name) 38 int size, const char *name)
47{ 39{
48 struct sync_timeline *obj; 40 struct sync_timeline *obj;
49 unsigned long flags;
50 41
51 if (size < sizeof(struct sync_timeline)) 42 if (size < sizeof(struct sync_timeline))
52 return NULL; 43 return NULL;
@@ -57,17 +48,14 @@ struct sync_timeline *sync_timeline_create(const struct sync_timeline_ops *ops,
57 48
58 kref_init(&obj->kref); 49 kref_init(&obj->kref);
59 obj->ops = ops; 50 obj->ops = ops;
51 obj->context = fence_context_alloc(1);
60 strlcpy(obj->name, name, sizeof(obj->name)); 52 strlcpy(obj->name, name, sizeof(obj->name));
61 53
62 INIT_LIST_HEAD(&obj->child_list_head); 54 INIT_LIST_HEAD(&obj->child_list_head);
63 spin_lock_init(&obj->child_list_lock);
64
65 INIT_LIST_HEAD(&obj->active_list_head); 55 INIT_LIST_HEAD(&obj->active_list_head);
66 spin_lock_init(&obj->active_list_lock); 56 spin_lock_init(&obj->child_list_lock);
67 57
68 spin_lock_irqsave(&sync_timeline_list_lock, flags); 58 sync_timeline_debug_add(obj);
69 list_add_tail(&obj->sync_timeline_list, &sync_timeline_list_head);
70 spin_unlock_irqrestore(&sync_timeline_list_lock, flags);
71 59
72 return obj; 60 return obj;
73} 61}
@@ -77,11 +65,8 @@ static void sync_timeline_free(struct kref *kref)
77{ 65{
78 struct sync_timeline *obj = 66 struct sync_timeline *obj =
79 container_of(kref, struct sync_timeline, kref); 67 container_of(kref, struct sync_timeline, kref);
80 unsigned long flags;
81 68
82 spin_lock_irqsave(&sync_timeline_list_lock, flags); 69 sync_timeline_debug_remove(obj);
83 list_del(&obj->sync_timeline_list);
84 spin_unlock_irqrestore(&sync_timeline_list_lock, flags);
85 70
86 if (obj->ops->release_obj) 71 if (obj->ops->release_obj)
87 obj->ops->release_obj(obj); 72 obj->ops->release_obj(obj);
@@ -89,6 +74,16 @@ static void sync_timeline_free(struct kref *kref)
89 kfree(obj); 74 kfree(obj);
90} 75}
91 76
77static void sync_timeline_get(struct sync_timeline *obj)
78{
79 kref_get(&obj->kref);
80}
81
82static void sync_timeline_put(struct sync_timeline *obj)
83{
84 kref_put(&obj->kref, sync_timeline_free);
85}
86
92void sync_timeline_destroy(struct sync_timeline *obj) 87void sync_timeline_destroy(struct sync_timeline *obj)
93{ 88{
94 obj->destroyed = true; 89 obj->destroyed = true;
@@ -102,75 +97,33 @@ void sync_timeline_destroy(struct sync_timeline *obj)
102 * signal any children that their parent is going away. 97 * signal any children that their parent is going away.
103 */ 98 */
104 sync_timeline_signal(obj); 99 sync_timeline_signal(obj);
105 100 sync_timeline_put(obj);
106 kref_put(&obj->kref, sync_timeline_free);
107} 101}
108EXPORT_SYMBOL(sync_timeline_destroy); 102EXPORT_SYMBOL(sync_timeline_destroy);
109 103
110static void sync_timeline_add_pt(struct sync_timeline *obj, struct sync_pt *pt)
111{
112 unsigned long flags;
113
114 pt->parent = obj;
115
116 spin_lock_irqsave(&obj->child_list_lock, flags);
117 list_add_tail(&pt->child_list, &obj->child_list_head);
118 spin_unlock_irqrestore(&obj->child_list_lock, flags);
119}
120
121static void sync_timeline_remove_pt(struct sync_pt *pt)
122{
123 struct sync_timeline *obj = pt->parent;
124 unsigned long flags;
125
126 spin_lock_irqsave(&obj->active_list_lock, flags);
127 if (!list_empty(&pt->active_list))
128 list_del_init(&pt->active_list);
129 spin_unlock_irqrestore(&obj->active_list_lock, flags);
130
131 spin_lock_irqsave(&obj->child_list_lock, flags);
132 if (!list_empty(&pt->child_list))
133 list_del_init(&pt->child_list);
134
135 spin_unlock_irqrestore(&obj->child_list_lock, flags);
136}
137
138void sync_timeline_signal(struct sync_timeline *obj) 104void sync_timeline_signal(struct sync_timeline *obj)
139{ 105{
140 unsigned long flags; 106 unsigned long flags;
141 LIST_HEAD(signaled_pts); 107 LIST_HEAD(signaled_pts);
142 struct list_head *pos, *n; 108 struct sync_pt *pt, *next;
143 109
144 trace_sync_timeline(obj); 110 trace_sync_timeline(obj);
145 111
146 spin_lock_irqsave(&obj->active_list_lock, flags); 112 spin_lock_irqsave(&obj->child_list_lock, flags);
147
148 list_for_each_safe(pos, n, &obj->active_list_head) {
149 struct sync_pt *pt =
150 container_of(pos, struct sync_pt, active_list);
151 113
152 if (_sync_pt_has_signaled(pt)) { 114 list_for_each_entry_safe(pt, next, &obj->active_list_head,
153 list_del_init(pos); 115 active_list) {
154 list_add(&pt->signaled_list, &signaled_pts); 116 if (fence_is_signaled_locked(&pt->base))
155 kref_get(&pt->fence->kref); 117 list_del(&pt->active_list);
156 }
157 } 118 }
158 119
159 spin_unlock_irqrestore(&obj->active_list_lock, flags); 120 spin_unlock_irqrestore(&obj->child_list_lock, flags);
160
161 list_for_each_safe(pos, n, &signaled_pts) {
162 struct sync_pt *pt =
163 container_of(pos, struct sync_pt, signaled_list);
164
165 list_del_init(pos);
166 sync_fence_signal_pt(pt);
167 kref_put(&pt->fence->kref, sync_fence_free);
168 }
169} 121}
170EXPORT_SYMBOL(sync_timeline_signal); 122EXPORT_SYMBOL(sync_timeline_signal);
171 123
172struct sync_pt *sync_pt_create(struct sync_timeline *parent, int size) 124struct sync_pt *sync_pt_create(struct sync_timeline *obj, int size)
173{ 125{
126 unsigned long flags;
174 struct sync_pt *pt; 127 struct sync_pt *pt;
175 128
176 if (size < sizeof(struct sync_pt)) 129 if (size < sizeof(struct sync_pt))
@@ -180,87 +133,28 @@ struct sync_pt *sync_pt_create(struct sync_timeline *parent, int size)
180 if (pt == NULL) 133 if (pt == NULL)
181 return NULL; 134 return NULL;
182 135
136 spin_lock_irqsave(&obj->child_list_lock, flags);
137 sync_timeline_get(obj);
138 fence_init(&pt->base, &android_fence_ops, &obj->child_list_lock,
139 obj->context, ++obj->value);
140 list_add_tail(&pt->child_list, &obj->child_list_head);
183 INIT_LIST_HEAD(&pt->active_list); 141 INIT_LIST_HEAD(&pt->active_list);
184 kref_get(&parent->kref); 142 spin_unlock_irqrestore(&obj->child_list_lock, flags);
185 sync_timeline_add_pt(parent, pt);
186
187 return pt; 143 return pt;
188} 144}
189EXPORT_SYMBOL(sync_pt_create); 145EXPORT_SYMBOL(sync_pt_create);
190 146
191void sync_pt_free(struct sync_pt *pt) 147void sync_pt_free(struct sync_pt *pt)
192{ 148{
193 if (pt->parent->ops->free_pt) 149 fence_put(&pt->base);
194 pt->parent->ops->free_pt(pt);
195
196 sync_timeline_remove_pt(pt);
197
198 kref_put(&pt->parent->kref, sync_timeline_free);
199
200 kfree(pt);
201} 150}
202EXPORT_SYMBOL(sync_pt_free); 151EXPORT_SYMBOL(sync_pt_free);
203 152
204/* call with pt->parent->active_list_lock held */ 153static struct sync_fence *sync_fence_alloc(int size, const char *name)
205static int _sync_pt_has_signaled(struct sync_pt *pt)
206{
207 int old_status = pt->status;
208
209 if (!pt->status)
210 pt->status = pt->parent->ops->has_signaled(pt);
211
212 if (!pt->status && pt->parent->destroyed)
213 pt->status = -ENOENT;
214
215 if (pt->status != old_status)
216 pt->timestamp = ktime_get();
217
218 return pt->status;
219}
220
221static struct sync_pt *sync_pt_dup(struct sync_pt *pt)
222{
223 return pt->parent->ops->dup(pt);
224}
225
226/* Adds a sync pt to the active queue. Called when added to a fence */
227static void sync_pt_activate(struct sync_pt *pt)
228{
229 struct sync_timeline *obj = pt->parent;
230 unsigned long flags;
231 int err;
232
233 spin_lock_irqsave(&obj->active_list_lock, flags);
234
235 err = _sync_pt_has_signaled(pt);
236 if (err != 0)
237 goto out;
238
239 list_add_tail(&pt->active_list, &obj->active_list_head);
240
241out:
242 spin_unlock_irqrestore(&obj->active_list_lock, flags);
243}
244
245static int sync_fence_release(struct inode *inode, struct file *file);
246static unsigned int sync_fence_poll(struct file *file, poll_table *wait);
247static long sync_fence_ioctl(struct file *file, unsigned int cmd,
248 unsigned long arg);
249
250
251static const struct file_operations sync_fence_fops = {
252 .release = sync_fence_release,
253 .poll = sync_fence_poll,
254 .unlocked_ioctl = sync_fence_ioctl,
255 .compat_ioctl = sync_fence_ioctl,
256};
257
258static struct sync_fence *sync_fence_alloc(const char *name)
259{ 154{
260 struct sync_fence *fence; 155 struct sync_fence *fence;
261 unsigned long flags;
262 156
263 fence = kzalloc(sizeof(struct sync_fence), GFP_KERNEL); 157 fence = kzalloc(size, GFP_KERNEL);
264 if (fence == NULL) 158 if (fence == NULL)
265 return NULL; 159 return NULL;
266 160
@@ -272,16 +166,8 @@ static struct sync_fence *sync_fence_alloc(const char *name)
272 kref_init(&fence->kref); 166 kref_init(&fence->kref);
273 strlcpy(fence->name, name, sizeof(fence->name)); 167 strlcpy(fence->name, name, sizeof(fence->name));
274 168
275 INIT_LIST_HEAD(&fence->pt_list_head);
276 INIT_LIST_HEAD(&fence->waiter_list_head);
277 spin_lock_init(&fence->waiter_list_lock);
278
279 init_waitqueue_head(&fence->wq); 169 init_waitqueue_head(&fence->wq);
280 170
281 spin_lock_irqsave(&sync_fence_list_lock, flags);
282 list_add_tail(&fence->sync_fence_list, &sync_fence_list_head);
283 spin_unlock_irqrestore(&sync_fence_list_lock, flags);
284
285 return fence; 171 return fence;
286 172
287err: 173err:
@@ -289,120 +175,42 @@ err:
289 return NULL; 175 return NULL;
290} 176}
291 177
292/* TODO: implement a create which takes more that one sync_pt */ 178static void fence_check_cb_func(struct fence *f, struct fence_cb *cb)
293struct sync_fence *sync_fence_create(const char *name, struct sync_pt *pt)
294{ 179{
180 struct sync_fence_cb *check;
295 struct sync_fence *fence; 181 struct sync_fence *fence;
296 182
297 if (pt->fence) 183 check = container_of(cb, struct sync_fence_cb, cb);
298 return NULL; 184 fence = check->fence;
299
300 fence = sync_fence_alloc(name);
301 if (fence == NULL)
302 return NULL;
303 185
304 pt->fence = fence; 186 if (atomic_dec_and_test(&fence->status))
305 list_add(&pt->pt_list, &fence->pt_list_head); 187 wake_up_all(&fence->wq);
306 sync_pt_activate(pt);
307
308 /*
309 * signal the fence in case pt was activated before
310 * sync_pt_activate(pt) was called
311 */
312 sync_fence_signal_pt(pt);
313
314 return fence;
315}
316EXPORT_SYMBOL(sync_fence_create);
317
318static int sync_fence_copy_pts(struct sync_fence *dst, struct sync_fence *src)
319{
320 struct list_head *pos;
321
322 list_for_each(pos, &src->pt_list_head) {
323 struct sync_pt *orig_pt =
324 container_of(pos, struct sync_pt, pt_list);
325 struct sync_pt *new_pt = sync_pt_dup(orig_pt);
326
327 if (new_pt == NULL)
328 return -ENOMEM;
329
330 new_pt->fence = dst;
331 list_add(&new_pt->pt_list, &dst->pt_list_head);
332 }
333
334 return 0;
335} 188}
336 189
337static int sync_fence_merge_pts(struct sync_fence *dst, struct sync_fence *src) 190/* TODO: implement a create which takes more that one sync_pt */
338{ 191struct sync_fence *sync_fence_create(const char *name, struct sync_pt *pt)
339 struct list_head *src_pos, *dst_pos, *n;
340
341 list_for_each(src_pos, &src->pt_list_head) {
342 struct sync_pt *src_pt =
343 container_of(src_pos, struct sync_pt, pt_list);
344 bool collapsed = false;
345
346 list_for_each_safe(dst_pos, n, &dst->pt_list_head) {
347 struct sync_pt *dst_pt =
348 container_of(dst_pos, struct sync_pt, pt_list);
349 /* collapse two sync_pts on the same timeline
350 * to a single sync_pt that will signal at
351 * the later of the two
352 */
353 if (dst_pt->parent == src_pt->parent) {
354 if (dst_pt->parent->ops->compare(dst_pt, src_pt)
355 == -1) {
356 struct sync_pt *new_pt =
357 sync_pt_dup(src_pt);
358 if (new_pt == NULL)
359 return -ENOMEM;
360
361 new_pt->fence = dst;
362 list_replace(&dst_pt->pt_list,
363 &new_pt->pt_list);
364 sync_pt_free(dst_pt);
365 }
366 collapsed = true;
367 break;
368 }
369 }
370
371 if (!collapsed) {
372 struct sync_pt *new_pt = sync_pt_dup(src_pt);
373
374 if (new_pt == NULL)
375 return -ENOMEM;
376
377 new_pt->fence = dst;
378 list_add(&new_pt->pt_list, &dst->pt_list_head);
379 }
380 }
381
382 return 0;
383}
384
385static void sync_fence_detach_pts(struct sync_fence *fence)
386{ 192{
387 struct list_head *pos, *n; 193 struct sync_fence *fence;
388 194
389 list_for_each_safe(pos, n, &fence->pt_list_head) { 195 fence = sync_fence_alloc(offsetof(struct sync_fence, cbs[1]), name);
390 struct sync_pt *pt = container_of(pos, struct sync_pt, pt_list); 196 if (fence == NULL)
197 return NULL;
391 198
392 sync_timeline_remove_pt(pt); 199 fence->num_fences = 1;
393 } 200 atomic_set(&fence->status, 1);
394}
395 201
396static void sync_fence_free_pts(struct sync_fence *fence) 202 fence_get(&pt->base);
397{ 203 fence->cbs[0].sync_pt = &pt->base;
398 struct list_head *pos, *n; 204 fence->cbs[0].fence = fence;
205 if (fence_add_callback(&pt->base, &fence->cbs[0].cb,
206 fence_check_cb_func))
207 atomic_dec(&fence->status);
399 208
400 list_for_each_safe(pos, n, &fence->pt_list_head) { 209 sync_fence_debug_add(fence);
401 struct sync_pt *pt = container_of(pos, struct sync_pt, pt_list);
402 210
403 sync_pt_free(pt); 211 return fence;
404 }
405} 212}
213EXPORT_SYMBOL(sync_fence_create);
406 214
407struct sync_fence *sync_fence_fdget(int fd) 215struct sync_fence *sync_fence_fdget(int fd)
408{ 216{
@@ -434,197 +242,155 @@ void sync_fence_install(struct sync_fence *fence, int fd)
434} 242}
435EXPORT_SYMBOL(sync_fence_install); 243EXPORT_SYMBOL(sync_fence_install);
436 244
437static int sync_fence_get_status(struct sync_fence *fence) 245static void sync_fence_add_pt(struct sync_fence *fence,
246 int *i, struct fence *pt)
438{ 247{
439 struct list_head *pos; 248 fence->cbs[*i].sync_pt = pt;
440 int status = 1; 249 fence->cbs[*i].fence = fence;
441
442 list_for_each(pos, &fence->pt_list_head) {
443 struct sync_pt *pt = container_of(pos, struct sync_pt, pt_list);
444 int pt_status = pt->status;
445
446 if (pt_status < 0) {
447 status = pt_status;
448 break;
449 } else if (status == 1) {
450 status = pt_status;
451 }
452 }
453 250
454 return status; 251 if (!fence_add_callback(pt, &fence->cbs[*i].cb, fence_check_cb_func)) {
252 fence_get(pt);
253 (*i)++;
254 }
455} 255}
456 256
457struct sync_fence *sync_fence_merge(const char *name, 257struct sync_fence *sync_fence_merge(const char *name,
458 struct sync_fence *a, struct sync_fence *b) 258 struct sync_fence *a, struct sync_fence *b)
459{ 259{
260 int num_fences = a->num_fences + b->num_fences;
460 struct sync_fence *fence; 261 struct sync_fence *fence;
461 struct list_head *pos; 262 int i, i_a, i_b;
462 int err; 263 unsigned long size = offsetof(struct sync_fence, cbs[num_fences]);
463 264
464 fence = sync_fence_alloc(name); 265 fence = sync_fence_alloc(size, name);
465 if (fence == NULL) 266 if (fence == NULL)
466 return NULL; 267 return NULL;
467 268
468 err = sync_fence_copy_pts(fence, a); 269 atomic_set(&fence->status, num_fences);
469 if (err < 0)
470 goto err;
471 270
472 err = sync_fence_merge_pts(fence, b); 271 /*
473 if (err < 0) 272 * Assume sync_fence a and b are both ordered and have no
474 goto err; 273 * duplicates with the same context.
274 *
275 * If a sync_fence can only be created with sync_fence_merge
276 * and sync_fence_create, this is a reasonable assumption.
277 */
278 for (i = i_a = i_b = 0; i_a < a->num_fences && i_b < b->num_fences; ) {
279 struct fence *pt_a = a->cbs[i_a].sync_pt;
280 struct fence *pt_b = b->cbs[i_b].sync_pt;
281
282 if (pt_a->context < pt_b->context) {
283 sync_fence_add_pt(fence, &i, pt_a);
284
285 i_a++;
286 } else if (pt_a->context > pt_b->context) {
287 sync_fence_add_pt(fence, &i, pt_b);
475 288
476 list_for_each(pos, &fence->pt_list_head) { 289 i_b++;
477 struct sync_pt *pt = 290 } else {
478 container_of(pos, struct sync_pt, pt_list); 291 if (pt_a->seqno - pt_b->seqno <= INT_MAX)
479 sync_pt_activate(pt); 292 sync_fence_add_pt(fence, &i, pt_a);
293 else
294 sync_fence_add_pt(fence, &i, pt_b);
295
296 i_a++;
297 i_b++;
298 }
480 } 299 }
481 300
482 /* 301 for (; i_a < a->num_fences; i_a++)
483 * signal the fence in case one of it's pts were activated before 302 sync_fence_add_pt(fence, &i, a->cbs[i_a].sync_pt);
484 * they were activated
485 */
486 sync_fence_signal_pt(list_first_entry(&fence->pt_list_head,
487 struct sync_pt,
488 pt_list));
489 303
304 for (; i_b < b->num_fences; i_b++)
305 sync_fence_add_pt(fence, &i, b->cbs[i_b].sync_pt);
306
307 if (num_fences > i)
308 atomic_sub(num_fences - i, &fence->status);
309 fence->num_fences = i;
310
311 sync_fence_debug_add(fence);
490 return fence; 312 return fence;
491err:
492 sync_fence_free_pts(fence);
493 kfree(fence);
494 return NULL;
495} 313}
496EXPORT_SYMBOL(sync_fence_merge); 314EXPORT_SYMBOL(sync_fence_merge);
497 315
498static void sync_fence_signal_pt(struct sync_pt *pt) 316int sync_fence_wake_up_wq(wait_queue_t *curr, unsigned mode,
317 int wake_flags, void *key)
499{ 318{
500 LIST_HEAD(signaled_waiters); 319 struct sync_fence_waiter *wait;
501 struct sync_fence *fence = pt->fence;
502 struct list_head *pos;
503 struct list_head *n;
504 unsigned long flags;
505 int status;
506
507 status = sync_fence_get_status(fence);
508
509 spin_lock_irqsave(&fence->waiter_list_lock, flags);
510 /*
511 * this should protect against two threads racing on the signaled
512 * false -> true transition
513 */
514 if (status && !fence->status) {
515 list_for_each_safe(pos, n, &fence->waiter_list_head)
516 list_move(pos, &signaled_waiters);
517
518 fence->status = status;
519 } else {
520 status = 0;
521 }
522 spin_unlock_irqrestore(&fence->waiter_list_lock, flags);
523 320
524 if (status) { 321 wait = container_of(curr, struct sync_fence_waiter, work);
525 list_for_each_safe(pos, n, &signaled_waiters) { 322 list_del_init(&wait->work.task_list);
526 struct sync_fence_waiter *waiter =
527 container_of(pos, struct sync_fence_waiter,
528 waiter_list);
529 323
530 list_del(pos); 324 wait->callback(wait->work.private, wait);
531 waiter->callback(fence, waiter); 325 return 1;
532 }
533 wake_up(&fence->wq);
534 }
535} 326}
536 327
537int sync_fence_wait_async(struct sync_fence *fence, 328int sync_fence_wait_async(struct sync_fence *fence,
538 struct sync_fence_waiter *waiter) 329 struct sync_fence_waiter *waiter)
539{ 330{
331 int err = atomic_read(&fence->status);
540 unsigned long flags; 332 unsigned long flags;
541 int err = 0;
542 333
543 spin_lock_irqsave(&fence->waiter_list_lock, flags); 334 if (err < 0)
335 return err;
544 336
545 if (fence->status) { 337 if (!err)
546 err = fence->status; 338 return 1;
547 goto out;
548 }
549 339
550 list_add_tail(&waiter->waiter_list, &fence->waiter_list_head); 340 init_waitqueue_func_entry(&waiter->work, sync_fence_wake_up_wq);
551out: 341 waiter->work.private = fence;
552 spin_unlock_irqrestore(&fence->waiter_list_lock, flags);
553 342
554 return err; 343 spin_lock_irqsave(&fence->wq.lock, flags);
344 err = atomic_read(&fence->status);
345 if (err > 0)
346 __add_wait_queue_tail(&fence->wq, &waiter->work);
347 spin_unlock_irqrestore(&fence->wq.lock, flags);
348
349 if (err < 0)
350 return err;
351
352 return !err;
555} 353}
556EXPORT_SYMBOL(sync_fence_wait_async); 354EXPORT_SYMBOL(sync_fence_wait_async);
557 355
558int sync_fence_cancel_async(struct sync_fence *fence, 356int sync_fence_cancel_async(struct sync_fence *fence,
559 struct sync_fence_waiter *waiter) 357 struct sync_fence_waiter *waiter)
560{ 358{
561 struct list_head *pos;
562 struct list_head *n;
563 unsigned long flags; 359 unsigned long flags;
564 int ret = -ENOENT; 360 int ret = 0;
565 361
566 spin_lock_irqsave(&fence->waiter_list_lock, flags); 362 spin_lock_irqsave(&fence->wq.lock, flags);
567 /* 363 if (!list_empty(&waiter->work.task_list))
568 * Make sure waiter is still in waiter_list because it is possible for 364 list_del_init(&waiter->work.task_list);
569 * the waiter to be removed from the list while the callback is still 365 else
570 * pending. 366 ret = -ENOENT;
571 */ 367 spin_unlock_irqrestore(&fence->wq.lock, flags);
572 list_for_each_safe(pos, n, &fence->waiter_list_head) {
573 struct sync_fence_waiter *list_waiter =
574 container_of(pos, struct sync_fence_waiter,
575 waiter_list);
576 if (list_waiter == waiter) {
577 list_del(pos);
578 ret = 0;
579 break;
580 }
581 }
582 spin_unlock_irqrestore(&fence->waiter_list_lock, flags);
583 return ret; 368 return ret;
584} 369}
585EXPORT_SYMBOL(sync_fence_cancel_async); 370EXPORT_SYMBOL(sync_fence_cancel_async);
586 371
587static bool sync_fence_check(struct sync_fence *fence)
588{
589 /*
590 * Make sure that reads to fence->status are ordered with the
591 * wait queue event triggering
592 */
593 smp_rmb();
594 return fence->status != 0;
595}
596
597int sync_fence_wait(struct sync_fence *fence, long timeout) 372int sync_fence_wait(struct sync_fence *fence, long timeout)
598{ 373{
599 int err = 0; 374 long ret;
600 struct sync_pt *pt; 375 int i;
601
602 trace_sync_wait(fence, 1);
603 list_for_each_entry(pt, &fence->pt_list_head, pt_list)
604 trace_sync_pt(pt);
605 376
606 if (timeout > 0) { 377 if (timeout < 0)
378 timeout = MAX_SCHEDULE_TIMEOUT;
379 else
607 timeout = msecs_to_jiffies(timeout); 380 timeout = msecs_to_jiffies(timeout);
608 err = wait_event_interruptible_timeout(fence->wq,
609 sync_fence_check(fence),
610 timeout);
611 } else if (timeout < 0) {
612 err = wait_event_interruptible(fence->wq,
613 sync_fence_check(fence));
614 }
615 trace_sync_wait(fence, 0);
616 381
617 if (err < 0) 382 trace_sync_wait(fence, 1);
618 return err; 383 for (i = 0; i < fence->num_fences; ++i)
619 384 trace_sync_pt(fence->cbs[i].sync_pt);
620 if (fence->status < 0) { 385 ret = wait_event_interruptible_timeout(fence->wq,
621 pr_info("fence error %d on [%p]\n", fence->status, fence); 386 atomic_read(&fence->status) <= 0,
622 sync_dump(); 387 timeout);
623 return fence->status; 388 trace_sync_wait(fence, 0);
624 }
625 389
626 if (fence->status == 0) { 390 if (ret < 0)
627 if (timeout > 0) { 391 return ret;
392 else if (ret == 0) {
393 if (timeout) {
628 pr_info("fence timeout on [%p] after %dms\n", fence, 394 pr_info("fence timeout on [%p] after %dms\n", fence,
629 jiffies_to_msecs(timeout)); 395 jiffies_to_msecs(timeout));
630 sync_dump(); 396 sync_dump();
@@ -632,15 +398,136 @@ int sync_fence_wait(struct sync_fence *fence, long timeout)
632 return -ETIME; 398 return -ETIME;
633 } 399 }
634 400
635 return 0; 401 ret = atomic_read(&fence->status);
402 if (ret) {
403 pr_info("fence error %ld on [%p]\n", ret, fence);
404 sync_dump();
405 }
406 return ret;
636} 407}
637EXPORT_SYMBOL(sync_fence_wait); 408EXPORT_SYMBOL(sync_fence_wait);
638 409
410static const char *android_fence_get_driver_name(struct fence *fence)
411{
412 struct sync_pt *pt = container_of(fence, struct sync_pt, base);
413 struct sync_timeline *parent = sync_pt_parent(pt);
414
415 return parent->ops->driver_name;
416}
417
418static const char *android_fence_get_timeline_name(struct fence *fence)
419{
420 struct sync_pt *pt = container_of(fence, struct sync_pt, base);
421 struct sync_timeline *parent = sync_pt_parent(pt);
422
423 return parent->name;
424}
425
426static void android_fence_release(struct fence *fence)
427{
428 struct sync_pt *pt = container_of(fence, struct sync_pt, base);
429 struct sync_timeline *parent = sync_pt_parent(pt);
430 unsigned long flags;
431
432 spin_lock_irqsave(fence->lock, flags);
433 list_del(&pt->child_list);
434 if (WARN_ON_ONCE(!list_empty(&pt->active_list)))
435 list_del(&pt->active_list);
436 spin_unlock_irqrestore(fence->lock, flags);
437
438 if (parent->ops->free_pt)
439 parent->ops->free_pt(pt);
440
441 sync_timeline_put(parent);
442 fence_free(&pt->base);
443}
444
445static bool android_fence_signaled(struct fence *fence)
446{
447 struct sync_pt *pt = container_of(fence, struct sync_pt, base);
448 struct sync_timeline *parent = sync_pt_parent(pt);
449 int ret;
450
451 ret = parent->ops->has_signaled(pt);
452 if (ret < 0)
453 fence->status = ret;
454 return ret;
455}
456
457static bool android_fence_enable_signaling(struct fence *fence)
458{
459 struct sync_pt *pt = container_of(fence, struct sync_pt, base);
460 struct sync_timeline *parent = sync_pt_parent(pt);
461
462 if (android_fence_signaled(fence))
463 return false;
464
465 list_add_tail(&pt->active_list, &parent->active_list_head);
466 return true;
467}
468
469static int android_fence_fill_driver_data(struct fence *fence,
470 void *data, int size)
471{
472 struct sync_pt *pt = container_of(fence, struct sync_pt, base);
473 struct sync_timeline *parent = sync_pt_parent(pt);
474
475 if (!parent->ops->fill_driver_data)
476 return 0;
477 return parent->ops->fill_driver_data(pt, data, size);
478}
479
480static void android_fence_value_str(struct fence *fence,
481 char *str, int size)
482{
483 struct sync_pt *pt = container_of(fence, struct sync_pt, base);
484 struct sync_timeline *parent = sync_pt_parent(pt);
485
486 if (!parent->ops->pt_value_str) {
487 if (size)
488 *str = 0;
489 return;
490 }
491 parent->ops->pt_value_str(pt, str, size);
492}
493
494static void android_fence_timeline_value_str(struct fence *fence,
495 char *str, int size)
496{
497 struct sync_pt *pt = container_of(fence, struct sync_pt, base);
498 struct sync_timeline *parent = sync_pt_parent(pt);
499
500 if (!parent->ops->timeline_value_str) {
501 if (size)
502 *str = 0;
503 return;
504 }
505 parent->ops->timeline_value_str(parent, str, size);
506}
507
508static const struct fence_ops android_fence_ops = {
509 .get_driver_name = android_fence_get_driver_name,
510 .get_timeline_name = android_fence_get_timeline_name,
511 .enable_signaling = android_fence_enable_signaling,
512 .signaled = android_fence_signaled,
513 .wait = fence_default_wait,
514 .release = android_fence_release,
515 .fill_driver_data = android_fence_fill_driver_data,
516 .fence_value_str = android_fence_value_str,
517 .timeline_value_str = android_fence_timeline_value_str,
518};
519
639static void sync_fence_free(struct kref *kref) 520static void sync_fence_free(struct kref *kref)
640{ 521{
641 struct sync_fence *fence = container_of(kref, struct sync_fence, kref); 522 struct sync_fence *fence = container_of(kref, struct sync_fence, kref);
523 int i, status = atomic_read(&fence->status);
642 524
643 sync_fence_free_pts(fence); 525 for (i = 0; i < fence->num_fences; ++i) {
526 if (status)
527 fence_remove_callback(fence->cbs[i].sync_pt,
528 &fence->cbs[i].cb);
529 fence_put(fence->cbs[i].sync_pt);
530 }
644 531
645 kfree(fence); 532 kfree(fence);
646} 533}
@@ -648,44 +535,25 @@ static void sync_fence_free(struct kref *kref)
648static int sync_fence_release(struct inode *inode, struct file *file) 535static int sync_fence_release(struct inode *inode, struct file *file)
649{ 536{
650 struct sync_fence *fence = file->private_data; 537 struct sync_fence *fence = file->private_data;
651 unsigned long flags;
652
653 /*
654 * We need to remove all ways to access this fence before droping
655 * our ref.
656 *
657 * start with its membership in the global fence list
658 */
659 spin_lock_irqsave(&sync_fence_list_lock, flags);
660 list_del(&fence->sync_fence_list);
661 spin_unlock_irqrestore(&sync_fence_list_lock, flags);
662 538
663 /* 539 sync_fence_debug_remove(fence);
664 * remove its pts from their parents so that sync_timeline_signal()
665 * can't reference the fence.
666 */
667 sync_fence_detach_pts(fence);
668 540
669 kref_put(&fence->kref, sync_fence_free); 541 kref_put(&fence->kref, sync_fence_free);
670
671 return 0; 542 return 0;
672} 543}
673 544
674static unsigned int sync_fence_poll(struct file *file, poll_table *wait) 545static unsigned int sync_fence_poll(struct file *file, poll_table *wait)
675{ 546{
676 struct sync_fence *fence = file->private_data; 547 struct sync_fence *fence = file->private_data;
548 int status;
677 549
678 poll_wait(file, &fence->wq, wait); 550 poll_wait(file, &fence->wq, wait);
679 551
680 /* 552 status = atomic_read(&fence->status);
681 * Make sure that reads to fence->status are ordered with the
682 * wait queue event triggering
683 */
684 smp_rmb();
685 553
686 if (fence->status == 1) 554 if (!status)
687 return POLLIN; 555 return POLLIN;
688 else if (fence->status < 0) 556 else if (status < 0)
689 return POLLERR; 557 return POLLERR;
690 else 558 else
691 return 0; 559 return 0;
@@ -750,7 +618,7 @@ err_put_fd:
750 return err; 618 return err;
751} 619}
752 620
753static int sync_fill_pt_info(struct sync_pt *pt, void *data, int size) 621static int sync_fill_pt_info(struct fence *fence, void *data, int size)
754{ 622{
755 struct sync_pt_info *info = data; 623 struct sync_pt_info *info = data;
756 int ret; 624 int ret;
@@ -760,20 +628,24 @@ static int sync_fill_pt_info(struct sync_pt *pt, void *data, int size)
760 628
761 info->len = sizeof(struct sync_pt_info); 629 info->len = sizeof(struct sync_pt_info);
762 630
763 if (pt->parent->ops->fill_driver_data) { 631 if (fence->ops->fill_driver_data) {
764 ret = pt->parent->ops->fill_driver_data(pt, info->driver_data, 632 ret = fence->ops->fill_driver_data(fence, info->driver_data,
765 size - sizeof(*info)); 633 size - sizeof(*info));
766 if (ret < 0) 634 if (ret < 0)
767 return ret; 635 return ret;
768 636
769 info->len += ret; 637 info->len += ret;
770 } 638 }
771 639
772 strlcpy(info->obj_name, pt->parent->name, sizeof(info->obj_name)); 640 strlcpy(info->obj_name, fence->ops->get_timeline_name(fence),
773 strlcpy(info->driver_name, pt->parent->ops->driver_name, 641 sizeof(info->obj_name));
642 strlcpy(info->driver_name, fence->ops->get_driver_name(fence),
774 sizeof(info->driver_name)); 643 sizeof(info->driver_name));
775 info->status = pt->status; 644 if (fence_is_signaled(fence))
776 info->timestamp_ns = ktime_to_ns(pt->timestamp); 645 info->status = fence->status >= 0 ? 1 : fence->status;
646 else
647 info->status = 0;
648 info->timestamp_ns = ktime_to_ns(fence->timestamp);
777 649
778 return info->len; 650 return info->len;
779} 651}
@@ -782,10 +654,9 @@ static long sync_fence_ioctl_fence_info(struct sync_fence *fence,
782 unsigned long arg) 654 unsigned long arg)
783{ 655{
784 struct sync_fence_info_data *data; 656 struct sync_fence_info_data *data;
785 struct list_head *pos;
786 __u32 size; 657 __u32 size;
787 __u32 len = 0; 658 __u32 len = 0;
788 int ret; 659 int ret, i;
789 660
790 if (copy_from_user(&size, (void __user *)arg, sizeof(size))) 661 if (copy_from_user(&size, (void __user *)arg, sizeof(size)))
791 return -EFAULT; 662 return -EFAULT;
@@ -801,12 +672,14 @@ static long sync_fence_ioctl_fence_info(struct sync_fence *fence,
801 return -ENOMEM; 672 return -ENOMEM;
802 673
803 strlcpy(data->name, fence->name, sizeof(data->name)); 674 strlcpy(data->name, fence->name, sizeof(data->name));
804 data->status = fence->status; 675 data->status = atomic_read(&fence->status);
676 if (data->status >= 0)
677 data->status = !data->status;
678
805 len = sizeof(struct sync_fence_info_data); 679 len = sizeof(struct sync_fence_info_data);
806 680
807 list_for_each(pos, &fence->pt_list_head) { 681 for (i = 0; i < fence->num_fences; ++i) {
808 struct sync_pt *pt = 682 struct fence *pt = fence->cbs[i].sync_pt;
809 container_of(pos, struct sync_pt, pt_list);
810 683
811 ret = sync_fill_pt_info(pt, (u8 *)data + len, size - len); 684 ret = sync_fill_pt_info(pt, (u8 *)data + len, size - len);
812 685
@@ -833,7 +706,6 @@ static long sync_fence_ioctl(struct file *file, unsigned int cmd,
833 unsigned long arg) 706 unsigned long arg)
834{ 707{
835 struct sync_fence *fence = file->private_data; 708 struct sync_fence *fence = file->private_data;
836
837 switch (cmd) { 709 switch (cmd) {
838 case SYNC_IOC_WAIT: 710 case SYNC_IOC_WAIT:
839 return sync_fence_ioctl_wait(fence, arg); 711 return sync_fence_ioctl_wait(fence, arg);
@@ -849,181 +721,10 @@ static long sync_fence_ioctl(struct file *file, unsigned int cmd,
849 } 721 }
850} 722}
851 723
852#ifdef CONFIG_DEBUG_FS 724static const struct file_operations sync_fence_fops = {
853static const char *sync_status_str(int status) 725 .release = sync_fence_release,
854{ 726 .poll = sync_fence_poll,
855 if (status > 0) 727 .unlocked_ioctl = sync_fence_ioctl,
856 return "signaled"; 728 .compat_ioctl = sync_fence_ioctl,
857 else if (status == 0)
858 return "active";
859 else
860 return "error";
861}
862
863static void sync_print_pt(struct seq_file *s, struct sync_pt *pt, bool fence)
864{
865 int status = pt->status;
866
867 seq_printf(s, " %s%spt %s",
868 fence ? pt->parent->name : "",
869 fence ? "_" : "",
870 sync_status_str(status));
871 if (pt->status) {
872 struct timeval tv = ktime_to_timeval(pt->timestamp);
873
874 seq_printf(s, "@%ld.%06ld", tv.tv_sec, tv.tv_usec);
875 }
876
877 if (pt->parent->ops->timeline_value_str &&
878 pt->parent->ops->pt_value_str) {
879 char value[64];
880
881 pt->parent->ops->pt_value_str(pt, value, sizeof(value));
882 seq_printf(s, ": %s", value);
883 if (fence) {
884 pt->parent->ops->timeline_value_str(pt->parent, value,
885 sizeof(value));
886 seq_printf(s, " / %s", value);
887 }
888 } else if (pt->parent->ops->print_pt) {
889 seq_puts(s, ": ");
890 pt->parent->ops->print_pt(s, pt);
891 }
892
893 seq_puts(s, "\n");
894}
895
896static void sync_print_obj(struct seq_file *s, struct sync_timeline *obj)
897{
898 struct list_head *pos;
899 unsigned long flags;
900
901 seq_printf(s, "%s %s", obj->name, obj->ops->driver_name);
902
903 if (obj->ops->timeline_value_str) {
904 char value[64];
905
906 obj->ops->timeline_value_str(obj, value, sizeof(value));
907 seq_printf(s, ": %s", value);
908 } else if (obj->ops->print_obj) {
909 seq_puts(s, ": ");
910 obj->ops->print_obj(s, obj);
911 }
912
913 seq_puts(s, "\n");
914
915 spin_lock_irqsave(&obj->child_list_lock, flags);
916 list_for_each(pos, &obj->child_list_head) {
917 struct sync_pt *pt =
918 container_of(pos, struct sync_pt, child_list);
919 sync_print_pt(s, pt, false);
920 }
921 spin_unlock_irqrestore(&obj->child_list_lock, flags);
922}
923
924static void sync_print_fence(struct seq_file *s, struct sync_fence *fence)
925{
926 struct list_head *pos;
927 unsigned long flags;
928
929 seq_printf(s, "[%p] %s: %s\n", fence, fence->name,
930 sync_status_str(fence->status));
931
932 list_for_each(pos, &fence->pt_list_head) {
933 struct sync_pt *pt =
934 container_of(pos, struct sync_pt, pt_list);
935 sync_print_pt(s, pt, true);
936 }
937
938 spin_lock_irqsave(&fence->waiter_list_lock, flags);
939 list_for_each(pos, &fence->waiter_list_head) {
940 struct sync_fence_waiter *waiter =
941 container_of(pos, struct sync_fence_waiter,
942 waiter_list);
943
944 seq_printf(s, "waiter %pF\n", waiter->callback);
945 }
946 spin_unlock_irqrestore(&fence->waiter_list_lock, flags);
947}
948
949static int sync_debugfs_show(struct seq_file *s, void *unused)
950{
951 unsigned long flags;
952 struct list_head *pos;
953
954 seq_puts(s, "objs:\n--------------\n");
955
956 spin_lock_irqsave(&sync_timeline_list_lock, flags);
957 list_for_each(pos, &sync_timeline_list_head) {
958 struct sync_timeline *obj =
959 container_of(pos, struct sync_timeline,
960 sync_timeline_list);
961
962 sync_print_obj(s, obj);
963 seq_puts(s, "\n");
964 }
965 spin_unlock_irqrestore(&sync_timeline_list_lock, flags);
966
967 seq_puts(s, "fences:\n--------------\n");
968
969 spin_lock_irqsave(&sync_fence_list_lock, flags);
970 list_for_each(pos, &sync_fence_list_head) {
971 struct sync_fence *fence =
972 container_of(pos, struct sync_fence, sync_fence_list);
973
974 sync_print_fence(s, fence);
975 seq_puts(s, "\n");
976 }
977 spin_unlock_irqrestore(&sync_fence_list_lock, flags);
978 return 0;
979}
980
981static int sync_debugfs_open(struct inode *inode, struct file *file)
982{
983 return single_open(file, sync_debugfs_show, inode->i_private);
984}
985
986static const struct file_operations sync_debugfs_fops = {
987 .open = sync_debugfs_open,
988 .read = seq_read,
989 .llseek = seq_lseek,
990 .release = single_release,
991}; 729};
992 730
993static __init int sync_debugfs_init(void)
994{
995 debugfs_create_file("sync", S_IRUGO, NULL, NULL, &sync_debugfs_fops);
996 return 0;
997}
998late_initcall(sync_debugfs_init);
999
1000#define DUMP_CHUNK 256
1001static char sync_dump_buf[64 * 1024];
1002static void sync_dump(void)
1003{
1004 struct seq_file s = {
1005 .buf = sync_dump_buf,
1006 .size = sizeof(sync_dump_buf) - 1,
1007 };
1008 int i;
1009
1010 sync_debugfs_show(&s, NULL);
1011
1012 for (i = 0; i < s.count; i += DUMP_CHUNK) {
1013 if ((s.count - i) > DUMP_CHUNK) {
1014 char c = s.buf[i + DUMP_CHUNK];
1015
1016 s.buf[i + DUMP_CHUNK] = 0;
1017 pr_cont("%s", s.buf + i);
1018 s.buf[i + DUMP_CHUNK] = c;
1019 } else {
1020 s.buf[s.count] = 0;
1021 pr_cont("%s", s.buf + i);
1022 }
1023 }
1024}
1025#else
1026static void sync_dump(void)
1027{
1028}
1029#endif
diff --git a/drivers/staging/android/sync.h b/drivers/staging/android/sync.h
index eaf57cccf626..66b0f431f63e 100644
--- a/drivers/staging/android/sync.h
+++ b/drivers/staging/android/sync.h
@@ -19,6 +19,7 @@
19#include <linux/list.h> 19#include <linux/list.h>
20#include <linux/spinlock.h> 20#include <linux/spinlock.h>
21#include <linux/wait.h> 21#include <linux/wait.h>
22#include <linux/fence.h>
22 23
23#include "uapi/sync.h" 24#include "uapi/sync.h"
24 25
@@ -40,8 +41,6 @@ struct sync_fence;
40 * -1 if a will signal before b 41 * -1 if a will signal before b
41 * @free_pt: called before sync_pt is freed 42 * @free_pt: called before sync_pt is freed
42 * @release_obj: called before sync_timeline is freed 43 * @release_obj: called before sync_timeline is freed
43 * @print_obj: deprecated
44 * @print_pt: deprecated
45 * @fill_driver_data: write implementation specific driver data to data. 44 * @fill_driver_data: write implementation specific driver data to data.
46 * should return an error if there is not enough room 45 * should return an error if there is not enough room
47 * as specified by size. This information is returned 46 * as specified by size. This information is returned
@@ -67,13 +66,6 @@ struct sync_timeline_ops {
67 /* optional */ 66 /* optional */
68 void (*release_obj)(struct sync_timeline *sync_timeline); 67 void (*release_obj)(struct sync_timeline *sync_timeline);
69 68
70 /* deprecated */
71 void (*print_obj)(struct seq_file *s,
72 struct sync_timeline *sync_timeline);
73
74 /* deprecated */
75 void (*print_pt)(struct seq_file *s, struct sync_pt *sync_pt);
76
77 /* optional */ 69 /* optional */
78 int (*fill_driver_data)(struct sync_pt *syncpt, void *data, int size); 70 int (*fill_driver_data)(struct sync_pt *syncpt, void *data, int size);
79 71
@@ -104,19 +96,21 @@ struct sync_timeline {
104 96
105 /* protected by child_list_lock */ 97 /* protected by child_list_lock */
106 bool destroyed; 98 bool destroyed;
99 int context, value;
107 100
108 struct list_head child_list_head; 101 struct list_head child_list_head;
109 spinlock_t child_list_lock; 102 spinlock_t child_list_lock;
110 103
111 struct list_head active_list_head; 104 struct list_head active_list_head;
112 spinlock_t active_list_lock;
113 105
106#ifdef CONFIG_DEBUG_FS
114 struct list_head sync_timeline_list; 107 struct list_head sync_timeline_list;
108#endif
115}; 109};
116 110
117/** 111/**
118 * struct sync_pt - sync point 112 * struct sync_pt - sync point
119 * @parent: sync_timeline to which this sync_pt belongs 113 * @fence: base fence class
120 * @child_list: membership in sync_timeline.child_list_head 114 * @child_list: membership in sync_timeline.child_list_head
121 * @active_list: membership in sync_timeline.active_list_head 115 * @active_list: membership in sync_timeline.active_list_head
122 * @signaled_list: membership in temporary signaled_list on stack 116 * @signaled_list: membership in temporary signaled_list on stack
@@ -127,19 +121,22 @@ struct sync_timeline {
127 * signaled or error. 121 * signaled or error.
128 */ 122 */
129struct sync_pt { 123struct sync_pt {
130 struct sync_timeline *parent; 124 struct fence base;
131 struct list_head child_list;
132 125
126 struct list_head child_list;
133 struct list_head active_list; 127 struct list_head active_list;
134 struct list_head signaled_list; 128};
135
136 struct sync_fence *fence;
137 struct list_head pt_list;
138 129
139 /* protected by parent->active_list_lock */ 130static inline struct sync_timeline *sync_pt_parent(struct sync_pt *pt)
140 int status; 131{
132 return container_of(pt->base.lock, struct sync_timeline,
133 child_list_lock);
134}
141 135
142 ktime_t timestamp; 136struct sync_fence_cb {
137 struct fence_cb cb;
138 struct fence *sync_pt;
139 struct sync_fence *fence;
143}; 140};
144 141
145/** 142/**
@@ -149,9 +146,7 @@ struct sync_pt {
149 * @name: name of sync_fence. Useful for debugging 146 * @name: name of sync_fence. Useful for debugging
150 * @pt_list_head: list of sync_pts in the fence. immutable once fence 147 * @pt_list_head: list of sync_pts in the fence. immutable once fence
151 * is created 148 * is created
152 * @waiter_list_head: list of asynchronous waiters on this fence 149 * @status: 0: signaled, >0:active, <0: error
153 * @waiter_list_lock: lock protecting @waiter_list_head and @status
154 * @status: 1: signaled, 0:active, <0: error
155 * 150 *
156 * @wq: wait queue for fence signaling 151 * @wq: wait queue for fence signaling
157 * @sync_fence_list: membership in global fence list 152 * @sync_fence_list: membership in global fence list
@@ -160,17 +155,15 @@ struct sync_fence {
160 struct file *file; 155 struct file *file;
161 struct kref kref; 156 struct kref kref;
162 char name[32]; 157 char name[32];
163 158#ifdef CONFIG_DEBUG_FS
164 /* this list is immutable once the fence is created */ 159 struct list_head sync_fence_list;
165 struct list_head pt_list_head; 160#endif
166 161 int num_fences;
167 struct list_head waiter_list_head;
168 spinlock_t waiter_list_lock; /* also protects status */
169 int status;
170 162
171 wait_queue_head_t wq; 163 wait_queue_head_t wq;
164 atomic_t status;
172 165
173 struct list_head sync_fence_list; 166 struct sync_fence_cb cbs[];
174}; 167};
175 168
176struct sync_fence_waiter; 169struct sync_fence_waiter;
@@ -184,14 +177,14 @@ typedef void (*sync_callback_t)(struct sync_fence *fence,
184 * @callback_data: pointer to pass to @callback 177 * @callback_data: pointer to pass to @callback
185 */ 178 */
186struct sync_fence_waiter { 179struct sync_fence_waiter {
187 struct list_head waiter_list; 180 wait_queue_t work;
188 181 sync_callback_t callback;
189 sync_callback_t callback;
190}; 182};
191 183
192static inline void sync_fence_waiter_init(struct sync_fence_waiter *waiter, 184static inline void sync_fence_waiter_init(struct sync_fence_waiter *waiter,
193 sync_callback_t callback) 185 sync_callback_t callback)
194{ 186{
187 INIT_LIST_HEAD(&waiter->work.task_list);
195 waiter->callback = callback; 188 waiter->callback = callback;
196} 189}
197 190
@@ -341,4 +334,22 @@ int sync_fence_cancel_async(struct sync_fence *fence,
341 */ 334 */
342int sync_fence_wait(struct sync_fence *fence, long timeout); 335int sync_fence_wait(struct sync_fence *fence, long timeout);
343 336
337#ifdef CONFIG_DEBUG_FS
338
339extern void sync_timeline_debug_add(struct sync_timeline *obj);
340extern void sync_timeline_debug_remove(struct sync_timeline *obj);
341extern void sync_fence_debug_add(struct sync_fence *fence);
342extern void sync_fence_debug_remove(struct sync_fence *fence);
343extern void sync_dump(void);
344
345#else
346# define sync_timeline_debug_add(obj)
347# define sync_timeline_debug_remove(obj)
348# define sync_fence_debug_add(fence)
349# define sync_fence_debug_remove(fence)
350# define sync_dump()
351#endif
352int sync_fence_wake_up_wq(wait_queue_t *curr, unsigned mode,
353 int wake_flags, void *key);
354
344#endif /* _LINUX_SYNC_H */ 355#endif /* _LINUX_SYNC_H */
diff --git a/drivers/staging/android/sync_debug.c b/drivers/staging/android/sync_debug.c
new file mode 100644
index 000000000000..257fc91bf02b
--- /dev/null
+++ b/drivers/staging/android/sync_debug.c
@@ -0,0 +1,252 @@
1/*
2 * drivers/base/sync.c
3 *
4 * Copyright (C) 2012 Google, Inc.
5 *
6 * This software is licensed under the terms of the GNU General Public
7 * License version 2, as published by the Free Software Foundation, and
8 * may be copied, distributed, and modified under those terms.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 */
16
17#include <linux/debugfs.h>
18#include <linux/export.h>
19#include <linux/file.h>
20#include <linux/fs.h>
21#include <linux/kernel.h>
22#include <linux/poll.h>
23#include <linux/sched.h>
24#include <linux/seq_file.h>
25#include <linux/slab.h>
26#include <linux/uaccess.h>
27#include <linux/anon_inodes.h>
28#include "sync.h"
29
30#ifdef CONFIG_DEBUG_FS
31
32static LIST_HEAD(sync_timeline_list_head);
33static DEFINE_SPINLOCK(sync_timeline_list_lock);
34static LIST_HEAD(sync_fence_list_head);
35static DEFINE_SPINLOCK(sync_fence_list_lock);
36
37void sync_timeline_debug_add(struct sync_timeline *obj)
38{
39 unsigned long flags;
40
41 spin_lock_irqsave(&sync_timeline_list_lock, flags);
42 list_add_tail(&obj->sync_timeline_list, &sync_timeline_list_head);
43 spin_unlock_irqrestore(&sync_timeline_list_lock, flags);
44}
45
46void sync_timeline_debug_remove(struct sync_timeline *obj)
47{
48 unsigned long flags;
49
50 spin_lock_irqsave(&sync_timeline_list_lock, flags);
51 list_del(&obj->sync_timeline_list);
52 spin_unlock_irqrestore(&sync_timeline_list_lock, flags);
53}
54
55void sync_fence_debug_add(struct sync_fence *fence)
56{
57 unsigned long flags;
58
59 spin_lock_irqsave(&sync_fence_list_lock, flags);
60 list_add_tail(&fence->sync_fence_list, &sync_fence_list_head);
61 spin_unlock_irqrestore(&sync_fence_list_lock, flags);
62}
63
64void sync_fence_debug_remove(struct sync_fence *fence)
65{
66 unsigned long flags;
67
68 spin_lock_irqsave(&sync_fence_list_lock, flags);
69 list_del(&fence->sync_fence_list);
70 spin_unlock_irqrestore(&sync_fence_list_lock, flags);
71}
72
73static const char *sync_status_str(int status)
74{
75 if (status == 0)
76 return "signaled";
77
78 if (status > 0)
79 return "active";
80
81 return "error";
82}
83
84static void sync_print_pt(struct seq_file *s, struct sync_pt *pt, bool fence)
85{
86 int status = 1;
87 struct sync_timeline *parent = sync_pt_parent(pt);
88
89 if (fence_is_signaled_locked(&pt->base))
90 status = pt->base.status;
91
92 seq_printf(s, " %s%spt %s",
93 fence ? parent->name : "",
94 fence ? "_" : "",
95 sync_status_str(status));
96
97 if (status <= 0) {
98 struct timeval tv = ktime_to_timeval(pt->base.timestamp);
99
100 seq_printf(s, "@%ld.%06ld", tv.tv_sec, tv.tv_usec);
101 }
102
103 if (parent->ops->timeline_value_str &&
104 parent->ops->pt_value_str) {
105 char value[64];
106
107 parent->ops->pt_value_str(pt, value, sizeof(value));
108 seq_printf(s, ": %s", value);
109 if (fence) {
110 parent->ops->timeline_value_str(parent, value,
111 sizeof(value));
112 seq_printf(s, " / %s", value);
113 }
114 }
115
116 seq_puts(s, "\n");
117}
118
119static void sync_print_obj(struct seq_file *s, struct sync_timeline *obj)
120{
121 struct list_head *pos;
122 unsigned long flags;
123
124 seq_printf(s, "%s %s", obj->name, obj->ops->driver_name);
125
126 if (obj->ops->timeline_value_str) {
127 char value[64];
128
129 obj->ops->timeline_value_str(obj, value, sizeof(value));
130 seq_printf(s, ": %s", value);
131 }
132
133 seq_puts(s, "\n");
134
135 spin_lock_irqsave(&obj->child_list_lock, flags);
136 list_for_each(pos, &obj->child_list_head) {
137 struct sync_pt *pt =
138 container_of(pos, struct sync_pt, child_list);
139 sync_print_pt(s, pt, false);
140 }
141 spin_unlock_irqrestore(&obj->child_list_lock, flags);
142}
143
144static void sync_print_fence(struct seq_file *s, struct sync_fence *fence)
145{
146 wait_queue_t *pos;
147 unsigned long flags;
148 int i;
149
150 seq_printf(s, "[%p] %s: %s\n", fence, fence->name,
151 sync_status_str(atomic_read(&fence->status)));
152
153 for (i = 0; i < fence->num_fences; ++i) {
154 struct sync_pt *pt =
155 container_of(fence->cbs[i].sync_pt,
156 struct sync_pt, base);
157
158 sync_print_pt(s, pt, true);
159 }
160
161 spin_lock_irqsave(&fence->wq.lock, flags);
162 list_for_each_entry(pos, &fence->wq.task_list, task_list) {
163 struct sync_fence_waiter *waiter;
164
165 if (pos->func != &sync_fence_wake_up_wq)
166 continue;
167
168 waiter = container_of(pos, struct sync_fence_waiter, work);
169
170 seq_printf(s, "waiter %pF\n", waiter->callback);
171 }
172 spin_unlock_irqrestore(&fence->wq.lock, flags);
173}
174
175static int sync_debugfs_show(struct seq_file *s, void *unused)
176{
177 unsigned long flags;
178 struct list_head *pos;
179
180 seq_puts(s, "objs:\n--------------\n");
181
182 spin_lock_irqsave(&sync_timeline_list_lock, flags);
183 list_for_each(pos, &sync_timeline_list_head) {
184 struct sync_timeline *obj =
185 container_of(pos, struct sync_timeline,
186 sync_timeline_list);
187
188 sync_print_obj(s, obj);
189 seq_puts(s, "\n");
190 }
191 spin_unlock_irqrestore(&sync_timeline_list_lock, flags);
192
193 seq_puts(s, "fences:\n--------------\n");
194
195 spin_lock_irqsave(&sync_fence_list_lock, flags);
196 list_for_each(pos, &sync_fence_list_head) {
197 struct sync_fence *fence =
198 container_of(pos, struct sync_fence, sync_fence_list);
199
200 sync_print_fence(s, fence);
201 seq_puts(s, "\n");
202 }
203 spin_unlock_irqrestore(&sync_fence_list_lock, flags);
204 return 0;
205}
206
207static int sync_debugfs_open(struct inode *inode, struct file *file)
208{
209 return single_open(file, sync_debugfs_show, inode->i_private);
210}
211
212static const struct file_operations sync_debugfs_fops = {
213 .open = sync_debugfs_open,
214 .read = seq_read,
215 .llseek = seq_lseek,
216 .release = single_release,
217};
218
219static __init int sync_debugfs_init(void)
220{
221 debugfs_create_file("sync", S_IRUGO, NULL, NULL, &sync_debugfs_fops);
222 return 0;
223}
224late_initcall(sync_debugfs_init);
225
226#define DUMP_CHUNK 256
227static char sync_dump_buf[64 * 1024];
228void sync_dump(void)
229{
230 struct seq_file s = {
231 .buf = sync_dump_buf,
232 .size = sizeof(sync_dump_buf) - 1,
233 };
234 int i;
235
236 sync_debugfs_show(&s, NULL);
237
238 for (i = 0; i < s.count; i += DUMP_CHUNK) {
239 if ((s.count - i) > DUMP_CHUNK) {
240 char c = s.buf[i + DUMP_CHUNK];
241
242 s.buf[i + DUMP_CHUNK] = 0;
243 pr_cont("%s", s.buf + i);
244 s.buf[i + DUMP_CHUNK] = c;
245 } else {
246 s.buf[s.count] = 0;
247 pr_cont("%s", s.buf + i);
248 }
249 }
250}
251
252#endif
diff --git a/drivers/staging/android/trace/sync.h b/drivers/staging/android/trace/sync.h
index 95462359ba57..77edb977a7bf 100644
--- a/drivers/staging/android/trace/sync.h
+++ b/drivers/staging/android/trace/sync.h
@@ -45,7 +45,7 @@ TRACE_EVENT(sync_wait,
45 45
46 TP_fast_assign( 46 TP_fast_assign(
47 __assign_str(name, fence->name); 47 __assign_str(name, fence->name);
48 __entry->status = fence->status; 48 __entry->status = atomic_read(&fence->status);
49 __entry->begin = begin; 49 __entry->begin = begin;
50 ), 50 ),
51 51
@@ -54,19 +54,19 @@ TRACE_EVENT(sync_wait,
54); 54);
55 55
56TRACE_EVENT(sync_pt, 56TRACE_EVENT(sync_pt,
57 TP_PROTO(struct sync_pt *pt), 57 TP_PROTO(struct fence *pt),
58 58
59 TP_ARGS(pt), 59 TP_ARGS(pt),
60 60
61 TP_STRUCT__entry( 61 TP_STRUCT__entry(
62 __string(timeline, pt->parent->name) 62 __string(timeline, pt->ops->get_timeline_name(pt))
63 __array(char, value, 32) 63 __array(char, value, 32)
64 ), 64 ),
65 65
66 TP_fast_assign( 66 TP_fast_assign(
67 __assign_str(timeline, pt->parent->name); 67 __assign_str(timeline, pt->ops->get_timeline_name(pt));
68 if (pt->parent->ops->pt_value_str) { 68 if (pt->ops->fence_value_str) {
69 pt->parent->ops->pt_value_str(pt, __entry->value, 69 pt->ops->fence_value_str(pt, __entry->value,
70 sizeof(__entry->value)); 70 sizeof(__entry->value));
71 } else { 71 } else {
72 __entry->value[0] = '\0'; 72 __entry->value[0] = '\0';
diff --git a/fs/debugfs/file.c b/fs/debugfs/file.c
index 63146295153b..76c08c2beb2f 100644
--- a/fs/debugfs/file.c
+++ b/fs/debugfs/file.c
@@ -451,7 +451,7 @@ static ssize_t read_file_bool(struct file *file, char __user *user_buf,
451{ 451{
452 char buf[3]; 452 char buf[3];
453 u32 *val = file->private_data; 453 u32 *val = file->private_data;
454 454
455 if (*val) 455 if (*val)
456 buf[0] = 'Y'; 456 buf[0] = 'Y';
457 else 457 else
diff --git a/fs/debugfs/inode.c b/fs/debugfs/inode.c
index 8c41b52da358..1e3b99d3db0d 100644
--- a/fs/debugfs/inode.c
+++ b/fs/debugfs/inode.c
@@ -66,7 +66,7 @@ static struct inode *debugfs_get_inode(struct super_block *sb, umode_t mode, dev
66 break; 66 break;
67 } 67 }
68 } 68 }
69 return inode; 69 return inode;
70} 70}
71 71
72/* SMP-safe */ 72/* SMP-safe */
@@ -317,7 +317,7 @@ static struct dentry *__create_file(const char *name, umode_t mode,
317 goto exit; 317 goto exit;
318 318
319 /* If the parent is not specified, we create it in the root. 319 /* If the parent is not specified, we create it in the root.
320 * We need the root dentry to do this, which is in the super 320 * We need the root dentry to do this, which is in the super
321 * block. A pointer to that is in the struct vfsmount that we 321 * block. A pointer to that is in the struct vfsmount that we
322 * have around. 322 * have around.
323 */ 323 */
@@ -330,7 +330,7 @@ static struct dentry *__create_file(const char *name, umode_t mode,
330 switch (mode & S_IFMT) { 330 switch (mode & S_IFMT) {
331 case S_IFDIR: 331 case S_IFDIR:
332 error = debugfs_mkdir(parent->d_inode, dentry, mode); 332 error = debugfs_mkdir(parent->d_inode, dentry, mode);
333 333
334 break; 334 break;
335 case S_IFLNK: 335 case S_IFLNK:
336 error = debugfs_link(parent->d_inode, dentry, mode, 336 error = debugfs_link(parent->d_inode, dentry, mode,
@@ -534,7 +534,7 @@ EXPORT_SYMBOL_GPL(debugfs_remove);
534 */ 534 */
535void debugfs_remove_recursive(struct dentry *dentry) 535void debugfs_remove_recursive(struct dentry *dentry)
536{ 536{
537 struct dentry *child, *next, *parent; 537 struct dentry *child, *parent;
538 538
539 if (IS_ERR_OR_NULL(dentry)) 539 if (IS_ERR_OR_NULL(dentry))
540 return; 540 return;
@@ -546,30 +546,49 @@ void debugfs_remove_recursive(struct dentry *dentry)
546 parent = dentry; 546 parent = dentry;
547 down: 547 down:
548 mutex_lock(&parent->d_inode->i_mutex); 548 mutex_lock(&parent->d_inode->i_mutex);
549 list_for_each_entry_safe(child, next, &parent->d_subdirs, d_u.d_child) { 549 loop:
550 /*
551 * The parent->d_subdirs is protected by the d_lock. Outside that
552 * lock, the child can be unlinked and set to be freed which can
553 * use the d_u.d_child as the rcu head and corrupt this list.
554 */
555 spin_lock(&parent->d_lock);
556 list_for_each_entry(child, &parent->d_subdirs, d_u.d_child) {
550 if (!debugfs_positive(child)) 557 if (!debugfs_positive(child))
551 continue; 558 continue;
552 559
553 /* perhaps simple_empty(child) makes more sense */ 560 /* perhaps simple_empty(child) makes more sense */
554 if (!list_empty(&child->d_subdirs)) { 561 if (!list_empty(&child->d_subdirs)) {
562 spin_unlock(&parent->d_lock);
555 mutex_unlock(&parent->d_inode->i_mutex); 563 mutex_unlock(&parent->d_inode->i_mutex);
556 parent = child; 564 parent = child;
557 goto down; 565 goto down;
558 } 566 }
559 up: 567
568 spin_unlock(&parent->d_lock);
569
560 if (!__debugfs_remove(child, parent)) 570 if (!__debugfs_remove(child, parent))
561 simple_release_fs(&debugfs_mount, &debugfs_mount_count); 571 simple_release_fs(&debugfs_mount, &debugfs_mount_count);
572
573 /*
574 * The parent->d_lock protects agaist child from unlinking
575 * from d_subdirs. When releasing the parent->d_lock we can
576 * no longer trust that the next pointer is valid.
577 * Restart the loop. We'll skip this one with the
578 * debugfs_positive() check.
579 */
580 goto loop;
562 } 581 }
582 spin_unlock(&parent->d_lock);
563 583
564 mutex_unlock(&parent->d_inode->i_mutex); 584 mutex_unlock(&parent->d_inode->i_mutex);
565 child = parent; 585 child = parent;
566 parent = parent->d_parent; 586 parent = parent->d_parent;
567 mutex_lock(&parent->d_inode->i_mutex); 587 mutex_lock(&parent->d_inode->i_mutex);
568 588
569 if (child != dentry) { 589 if (child != dentry)
570 next = list_next_entry(child, d_u.d_child); 590 /* go up */
571 goto up; 591 goto loop;
572 }
573 592
574 if (!__debugfs_remove(child, parent)) 593 if (!__debugfs_remove(child, parent))
575 simple_release_fs(&debugfs_mount, &debugfs_mount_count); 594 simple_release_fs(&debugfs_mount, &debugfs_mount_count);
diff --git a/fs/kernfs/file.c b/fs/kernfs/file.c
index d895b4b7b661..4429d6d9217f 100644
--- a/fs/kernfs/file.c
+++ b/fs/kernfs/file.c
@@ -896,7 +896,7 @@ const struct file_operations kernfs_file_fops = {
896 * @ops: kernfs operations for the file 896 * @ops: kernfs operations for the file
897 * @priv: private data for the file 897 * @priv: private data for the file
898 * @ns: optional namespace tag of the file 898 * @ns: optional namespace tag of the file
899 * @static_name: don't copy file name 899 * @name_is_static: don't copy file name
900 * @key: lockdep key for the file's active_ref, %NULL to disable lockdep 900 * @key: lockdep key for the file's active_ref, %NULL to disable lockdep
901 * 901 *
902 * Returns the created node on success, ERR_PTR() value on error. 902 * Returns the created node on success, ERR_PTR() value on error.
diff --git a/include/drm/drmP.h b/include/drm/drmP.h
index 8af71a8e2c00..e41f17ea1f13 100644
--- a/include/drm/drmP.h
+++ b/include/drm/drmP.h
@@ -83,6 +83,7 @@ struct drm_device;
83 83
84struct device_node; 84struct device_node;
85struct videomode; 85struct videomode;
86struct reservation_object;
86 87
87#include <drm/drm_os_linux.h> 88#include <drm/drm_os_linux.h>
88#include <drm/drm_hashtab.h> 89#include <drm/drm_hashtab.h>
@@ -923,6 +924,8 @@ struct drm_driver {
923 /* low-level interface used by drm_gem_prime_{import,export} */ 924 /* low-level interface used by drm_gem_prime_{import,export} */
924 int (*gem_prime_pin)(struct drm_gem_object *obj); 925 int (*gem_prime_pin)(struct drm_gem_object *obj);
925 void (*gem_prime_unpin)(struct drm_gem_object *obj); 926 void (*gem_prime_unpin)(struct drm_gem_object *obj);
927 struct reservation_object * (*gem_prime_res_obj)(
928 struct drm_gem_object *obj);
926 struct sg_table *(*gem_prime_get_sg_table)(struct drm_gem_object *obj); 929 struct sg_table *(*gem_prime_get_sg_table)(struct drm_gem_object *obj);
927 struct drm_gem_object *(*gem_prime_import_sg_table)( 930 struct drm_gem_object *(*gem_prime_import_sg_table)(
928 struct drm_device *dev, size_t size, 931 struct drm_device *dev, size_t size,
diff --git a/include/linux/component.h b/include/linux/component.h
index 68870182ca1e..c00dcc302611 100644
--- a/include/linux/component.h
+++ b/include/linux/component.h
@@ -29,4 +29,11 @@ void component_master_del(struct device *,
29int component_master_add_child(struct master *master, 29int component_master_add_child(struct master *master,
30 int (*compare)(struct device *, void *), void *compare_data); 30 int (*compare)(struct device *, void *), void *compare_data);
31 31
32struct component_match;
33
34int component_master_add_with_match(struct device *,
35 const struct component_master_ops *, struct component_match *);
36void component_match_add(struct device *, struct component_match **,
37 int (*compare)(struct device *, void *), void *compare_data);
38
32#endif 39#endif
diff --git a/include/linux/device.h b/include/linux/device.h
index af424acd393d..921fa0a74df6 100644
--- a/include/linux/device.h
+++ b/include/linux/device.h
@@ -631,8 +631,6 @@ extern unsigned long devm_get_free_pages(struct device *dev,
631extern void devm_free_pages(struct device *dev, unsigned long addr); 631extern void devm_free_pages(struct device *dev, unsigned long addr);
632 632
633void __iomem *devm_ioremap_resource(struct device *dev, struct resource *res); 633void __iomem *devm_ioremap_resource(struct device *dev, struct resource *res);
634void __iomem *devm_request_and_ioremap(struct device *dev,
635 struct resource *res);
636 634
637/* allows to add/remove a custom action to devres stack */ 635/* allows to add/remove a custom action to devres stack */
638int devm_add_action(struct device *dev, void (*action)(void *), void *data); 636int devm_add_action(struct device *dev, void (*action)(void *), void *data);
diff --git a/include/linux/dma-buf.h b/include/linux/dma-buf.h
index f886985a28b2..694e1fe1c4b4 100644
--- a/include/linux/dma-buf.h
+++ b/include/linux/dma-buf.h
@@ -30,6 +30,8 @@
30#include <linux/list.h> 30#include <linux/list.h>
31#include <linux/dma-mapping.h> 31#include <linux/dma-mapping.h>
32#include <linux/fs.h> 32#include <linux/fs.h>
33#include <linux/fence.h>
34#include <linux/wait.h>
33 35
34struct device; 36struct device;
35struct dma_buf; 37struct dma_buf;
@@ -115,6 +117,7 @@ struct dma_buf_ops {
115 * @exp_name: name of the exporter; useful for debugging. 117 * @exp_name: name of the exporter; useful for debugging.
116 * @list_node: node for dma_buf accounting and debugging. 118 * @list_node: node for dma_buf accounting and debugging.
117 * @priv: exporter specific private data for this buffer object. 119 * @priv: exporter specific private data for this buffer object.
120 * @resv: reservation object linked to this dma-buf
118 */ 121 */
119struct dma_buf { 122struct dma_buf {
120 size_t size; 123 size_t size;
@@ -128,6 +131,17 @@ struct dma_buf {
128 const char *exp_name; 131 const char *exp_name;
129 struct list_head list_node; 132 struct list_head list_node;
130 void *priv; 133 void *priv;
134 struct reservation_object *resv;
135
136 /* poll support */
137 wait_queue_head_t poll;
138
139 struct dma_buf_poll_cb_t {
140 struct fence_cb cb;
141 wait_queue_head_t *poll;
142
143 unsigned long active;
144 } cb_excl, cb_shared;
131}; 145};
132 146
133/** 147/**
@@ -168,10 +182,11 @@ void dma_buf_detach(struct dma_buf *dmabuf,
168 struct dma_buf_attachment *dmabuf_attach); 182 struct dma_buf_attachment *dmabuf_attach);
169 183
170struct dma_buf *dma_buf_export_named(void *priv, const struct dma_buf_ops *ops, 184struct dma_buf *dma_buf_export_named(void *priv, const struct dma_buf_ops *ops,
171 size_t size, int flags, const char *); 185 size_t size, int flags, const char *,
186 struct reservation_object *);
172 187
173#define dma_buf_export(priv, ops, size, flags) \ 188#define dma_buf_export(priv, ops, size, flags, resv) \
174 dma_buf_export_named(priv, ops, size, flags, KBUILD_MODNAME) 189 dma_buf_export_named(priv, ops, size, flags, KBUILD_MODNAME, resv)
175 190
176int dma_buf_fd(struct dma_buf *dmabuf, int flags); 191int dma_buf_fd(struct dma_buf *dmabuf, int flags);
177struct dma_buf *dma_buf_get(int fd); 192struct dma_buf *dma_buf_get(int fd);
diff --git a/include/linux/fence.h b/include/linux/fence.h
new file mode 100644
index 000000000000..d174585b874b
--- /dev/null
+++ b/include/linux/fence.h
@@ -0,0 +1,360 @@
1/*
2 * Fence mechanism for dma-buf to allow for asynchronous dma access
3 *
4 * Copyright (C) 2012 Canonical Ltd
5 * Copyright (C) 2012 Texas Instruments
6 *
7 * Authors:
8 * Rob Clark <robdclark@gmail.com>
9 * Maarten Lankhorst <maarten.lankhorst@canonical.com>
10 *
11 * This program is free software; you can redistribute it and/or modify it
12 * under the terms of the GNU General Public License version 2 as published by
13 * the Free Software Foundation.
14 *
15 * This program is distributed in the hope that it will be useful, but WITHOUT
16 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
17 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
18 * more details.
19 */
20
21#ifndef __LINUX_FENCE_H
22#define __LINUX_FENCE_H
23
24#include <linux/err.h>
25#include <linux/wait.h>
26#include <linux/list.h>
27#include <linux/bitops.h>
28#include <linux/kref.h>
29#include <linux/sched.h>
30#include <linux/printk.h>
31#include <linux/rcupdate.h>
32
33struct fence;
34struct fence_ops;
35struct fence_cb;
36
37/**
38 * struct fence - software synchronization primitive
39 * @refcount: refcount for this fence
40 * @ops: fence_ops associated with this fence
41 * @rcu: used for releasing fence with kfree_rcu
42 * @cb_list: list of all callbacks to call
43 * @lock: spin_lock_irqsave used for locking
44 * @context: execution context this fence belongs to, returned by
45 * fence_context_alloc()
46 * @seqno: the sequence number of this fence inside the execution context,
47 * can be compared to decide which fence would be signaled later.
48 * @flags: A mask of FENCE_FLAG_* defined below
49 * @timestamp: Timestamp when the fence was signaled.
50 * @status: Optional, only valid if < 0, must be set before calling
51 * fence_signal, indicates that the fence has completed with an error.
52 *
53 * the flags member must be manipulated and read using the appropriate
54 * atomic ops (bit_*), so taking the spinlock will not be needed most
55 * of the time.
56 *
57 * FENCE_FLAG_SIGNALED_BIT - fence is already signaled
58 * FENCE_FLAG_ENABLE_SIGNAL_BIT - enable_signaling might have been called*
59 * FENCE_FLAG_USER_BITS - start of the unused bits, can be used by the
60 * implementer of the fence for its own purposes. Can be used in different
61 * ways by different fence implementers, so do not rely on this.
62 *
63 * *) Since atomic bitops are used, this is not guaranteed to be the case.
64 * Particularly, if the bit was set, but fence_signal was called right
65 * before this bit was set, it would have been able to set the
66 * FENCE_FLAG_SIGNALED_BIT, before enable_signaling was called.
67 * Adding a check for FENCE_FLAG_SIGNALED_BIT after setting
68 * FENCE_FLAG_ENABLE_SIGNAL_BIT closes this race, and makes sure that
69 * after fence_signal was called, any enable_signaling call will have either
70 * been completed, or never called at all.
71 */
72struct fence {
73 struct kref refcount;
74 const struct fence_ops *ops;
75 struct rcu_head rcu;
76 struct list_head cb_list;
77 spinlock_t *lock;
78 unsigned context, seqno;
79 unsigned long flags;
80 ktime_t timestamp;
81 int status;
82};
83
84enum fence_flag_bits {
85 FENCE_FLAG_SIGNALED_BIT,
86 FENCE_FLAG_ENABLE_SIGNAL_BIT,
87 FENCE_FLAG_USER_BITS, /* must always be last member */
88};
89
90typedef void (*fence_func_t)(struct fence *fence, struct fence_cb *cb);
91
92/**
93 * struct fence_cb - callback for fence_add_callback
94 * @node: used by fence_add_callback to append this struct to fence::cb_list
95 * @func: fence_func_t to call
96 *
97 * This struct will be initialized by fence_add_callback, additional
98 * data can be passed along by embedding fence_cb in another struct.
99 */
100struct fence_cb {
101 struct list_head node;
102 fence_func_t func;
103};
104
105/**
106 * struct fence_ops - operations implemented for fence
107 * @get_driver_name: returns the driver name.
108 * @get_timeline_name: return the name of the context this fence belongs to.
109 * @enable_signaling: enable software signaling of fence.
110 * @signaled: [optional] peek whether the fence is signaled, can be null.
111 * @wait: custom wait implementation, or fence_default_wait.
112 * @release: [optional] called on destruction of fence, can be null
113 * @fill_driver_data: [optional] callback to fill in free-form debug info
114 * Returns amount of bytes filled, or -errno.
115 * @fence_value_str: [optional] fills in the value of the fence as a string
116 * @timeline_value_str: [optional] fills in the current value of the timeline
117 * as a string
118 *
119 * Notes on enable_signaling:
120 * For fence implementations that have the capability for hw->hw
121 * signaling, they can implement this op to enable the necessary
122 * irqs, or insert commands into cmdstream, etc. This is called
123 * in the first wait() or add_callback() path to let the fence
124 * implementation know that there is another driver waiting on
125 * the signal (ie. hw->sw case).
126 *
127 * This function can be called called from atomic context, but not
128 * from irq context, so normal spinlocks can be used.
129 *
130 * A return value of false indicates the fence already passed,
131 * or some failure occured that made it impossible to enable
132 * signaling. True indicates succesful enabling.
133 *
134 * fence->status may be set in enable_signaling, but only when false is
135 * returned.
136 *
137 * Calling fence_signal before enable_signaling is called allows
138 * for a tiny race window in which enable_signaling is called during,
139 * before, or after fence_signal. To fight this, it is recommended
140 * that before enable_signaling returns true an extra reference is
141 * taken on the fence, to be released when the fence is signaled.
142 * This will mean fence_signal will still be called twice, but
143 * the second time will be a noop since it was already signaled.
144 *
145 * Notes on signaled:
146 * May set fence->status if returning true.
147 *
148 * Notes on wait:
149 * Must not be NULL, set to fence_default_wait for default implementation.
150 * the fence_default_wait implementation should work for any fence, as long
151 * as enable_signaling works correctly.
152 *
153 * Must return -ERESTARTSYS if the wait is intr = true and the wait was
154 * interrupted, and remaining jiffies if fence has signaled, or 0 if wait
155 * timed out. Can also return other error values on custom implementations,
156 * which should be treated as if the fence is signaled. For example a hardware
157 * lockup could be reported like that.
158 *
159 * Notes on release:
160 * Can be NULL, this function allows additional commands to run on
161 * destruction of the fence. Can be called from irq context.
162 * If pointer is set to NULL, kfree will get called instead.
163 */
164
165struct fence_ops {
166 const char * (*get_driver_name)(struct fence *fence);
167 const char * (*get_timeline_name)(struct fence *fence);
168 bool (*enable_signaling)(struct fence *fence);
169 bool (*signaled)(struct fence *fence);
170 signed long (*wait)(struct fence *fence, bool intr, signed long timeout);
171 void (*release)(struct fence *fence);
172
173 int (*fill_driver_data)(struct fence *fence, void *data, int size);
174 void (*fence_value_str)(struct fence *fence, char *str, int size);
175 void (*timeline_value_str)(struct fence *fence, char *str, int size);
176};
177
178void fence_init(struct fence *fence, const struct fence_ops *ops,
179 spinlock_t *lock, unsigned context, unsigned seqno);
180
181void fence_release(struct kref *kref);
182void fence_free(struct fence *fence);
183
184/**
185 * fence_get - increases refcount of the fence
186 * @fence: [in] fence to increase refcount of
187 *
188 * Returns the same fence, with refcount increased by 1.
189 */
190static inline struct fence *fence_get(struct fence *fence)
191{
192 if (fence)
193 kref_get(&fence->refcount);
194 return fence;
195}
196
197/**
198 * fence_get_rcu - get a fence from a reservation_object_list with rcu read lock
199 * @fence: [in] fence to increase refcount of
200 *
201 * Function returns NULL if no refcount could be obtained, or the fence.
202 */
203static inline struct fence *fence_get_rcu(struct fence *fence)
204{
205 if (kref_get_unless_zero(&fence->refcount))
206 return fence;
207 else
208 return NULL;
209}
210
211/**
212 * fence_put - decreases refcount of the fence
213 * @fence: [in] fence to reduce refcount of
214 */
215static inline void fence_put(struct fence *fence)
216{
217 if (fence)
218 kref_put(&fence->refcount, fence_release);
219}
220
221int fence_signal(struct fence *fence);
222int fence_signal_locked(struct fence *fence);
223signed long fence_default_wait(struct fence *fence, bool intr, signed long timeout);
224int fence_add_callback(struct fence *fence, struct fence_cb *cb,
225 fence_func_t func);
226bool fence_remove_callback(struct fence *fence, struct fence_cb *cb);
227void fence_enable_sw_signaling(struct fence *fence);
228
229/**
230 * fence_is_signaled_locked - Return an indication if the fence is signaled yet.
231 * @fence: [in] the fence to check
232 *
233 * Returns true if the fence was already signaled, false if not. Since this
234 * function doesn't enable signaling, it is not guaranteed to ever return
235 * true if fence_add_callback, fence_wait or fence_enable_sw_signaling
236 * haven't been called before.
237 *
238 * This function requires fence->lock to be held.
239 */
240static inline bool
241fence_is_signaled_locked(struct fence *fence)
242{
243 if (test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->flags))
244 return true;
245
246 if (fence->ops->signaled && fence->ops->signaled(fence)) {
247 fence_signal_locked(fence);
248 return true;
249 }
250
251 return false;
252}
253
254/**
255 * fence_is_signaled - Return an indication if the fence is signaled yet.
256 * @fence: [in] the fence to check
257 *
258 * Returns true if the fence was already signaled, false if not. Since this
259 * function doesn't enable signaling, it is not guaranteed to ever return
260 * true if fence_add_callback, fence_wait or fence_enable_sw_signaling
261 * haven't been called before.
262 *
263 * It's recommended for seqno fences to call fence_signal when the
264 * operation is complete, it makes it possible to prevent issues from
265 * wraparound between time of issue and time of use by checking the return
266 * value of this function before calling hardware-specific wait instructions.
267 */
268static inline bool
269fence_is_signaled(struct fence *fence)
270{
271 if (test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->flags))
272 return true;
273
274 if (fence->ops->signaled && fence->ops->signaled(fence)) {
275 fence_signal(fence);
276 return true;
277 }
278
279 return false;
280}
281
282/**
283 * fence_later - return the chronologically later fence
284 * @f1: [in] the first fence from the same context
285 * @f2: [in] the second fence from the same context
286 *
287 * Returns NULL if both fences are signaled, otherwise the fence that would be
288 * signaled last. Both fences must be from the same context, since a seqno is
289 * not re-used across contexts.
290 */
291static inline struct fence *fence_later(struct fence *f1, struct fence *f2)
292{
293 if (WARN_ON(f1->context != f2->context))
294 return NULL;
295
296 /*
297 * can't check just FENCE_FLAG_SIGNALED_BIT here, it may never have been
298 * set if enable_signaling wasn't called, and enabling that here is
299 * overkill.
300 */
301 if (f2->seqno - f1->seqno <= INT_MAX)
302 return fence_is_signaled(f2) ? NULL : f2;
303 else
304 return fence_is_signaled(f1) ? NULL : f1;
305}
306
307signed long fence_wait_timeout(struct fence *, bool intr, signed long timeout);
308
309
310/**
311 * fence_wait - sleep until the fence gets signaled
312 * @fence: [in] the fence to wait on
313 * @intr: [in] if true, do an interruptible wait
314 *
315 * This function will return -ERESTARTSYS if interrupted by a signal,
316 * or 0 if the fence was signaled. Other error values may be
317 * returned on custom implementations.
318 *
319 * Performs a synchronous wait on this fence. It is assumed the caller
320 * directly or indirectly holds a reference to the fence, otherwise the
321 * fence might be freed before return, resulting in undefined behavior.
322 */
323static inline signed long fence_wait(struct fence *fence, bool intr)
324{
325 signed long ret;
326
327 /* Since fence_wait_timeout cannot timeout with
328 * MAX_SCHEDULE_TIMEOUT, only valid return values are
329 * -ERESTARTSYS and MAX_SCHEDULE_TIMEOUT.
330 */
331 ret = fence_wait_timeout(fence, intr, MAX_SCHEDULE_TIMEOUT);
332
333 return ret < 0 ? ret : 0;
334}
335
336unsigned fence_context_alloc(unsigned num);
337
338#define FENCE_TRACE(f, fmt, args...) \
339 do { \
340 struct fence *__ff = (f); \
341 if (config_enabled(CONFIG_FENCE_TRACE)) \
342 pr_info("f %u#%u: " fmt, \
343 __ff->context, __ff->seqno, ##args); \
344 } while (0)
345
346#define FENCE_WARN(f, fmt, args...) \
347 do { \
348 struct fence *__ff = (f); \
349 pr_warn("f %u#%u: " fmt, __ff->context, __ff->seqno, \
350 ##args); \
351 } while (0)
352
353#define FENCE_ERR(f, fmt, args...) \
354 do { \
355 struct fence *__ff = (f); \
356 pr_err("f %u#%u: " fmt, __ff->context, __ff->seqno, \
357 ##args); \
358 } while (0)
359
360#endif /* __LINUX_FENCE_H */
diff --git a/include/linux/firmware.h b/include/linux/firmware.h
index 59529330efd6..5c41c5e75b5c 100644
--- a/include/linux/firmware.h
+++ b/include/linux/firmware.h
@@ -45,6 +45,8 @@ int request_firmware_nowait(
45 struct module *module, bool uevent, 45 struct module *module, bool uevent,
46 const char *name, struct device *device, gfp_t gfp, void *context, 46 const char *name, struct device *device, gfp_t gfp, void *context,
47 void (*cont)(const struct firmware *fw, void *context)); 47 void (*cont)(const struct firmware *fw, void *context));
48int request_firmware_direct(const struct firmware **fw, const char *name,
49 struct device *device);
48 50
49void release_firmware(const struct firmware *fw); 51void release_firmware(const struct firmware *fw);
50#else 52#else
@@ -66,13 +68,12 @@ static inline void release_firmware(const struct firmware *fw)
66{ 68{
67} 69}
68 70
69#endif 71static inline int request_firmware_direct(const struct firmware **fw,
72 const char *name,
73 struct device *device)
74{
75 return -EINVAL;
76}
70 77
71#ifdef CONFIG_FW_LOADER_USER_HELPER
72int request_firmware_direct(const struct firmware **fw, const char *name,
73 struct device *device);
74#else
75#define request_firmware_direct request_firmware
76#endif 78#endif
77
78#endif 79#endif
diff --git a/include/linux/platform_device.h b/include/linux/platform_device.h
index 16f6654082dd..153d303af7eb 100644
--- a/include/linux/platform_device.h
+++ b/include/linux/platform_device.h
@@ -28,6 +28,7 @@ struct platform_device {
28 struct resource *resource; 28 struct resource *resource;
29 29
30 const struct platform_device_id *id_entry; 30 const struct platform_device_id *id_entry;
31 char *driver_override; /* Driver name to force a match */
31 32
32 /* MFD cell pointer */ 33 /* MFD cell pointer */
33 struct mfd_cell *mfd_cell; 34 struct mfd_cell *mfd_cell;
diff --git a/include/linux/reservation.h b/include/linux/reservation.h
index 813dae960ebd..5a0b64cf68b4 100644
--- a/include/linux/reservation.h
+++ b/include/linux/reservation.h
@@ -6,7 +6,7 @@
6 * Copyright (C) 2012 Texas Instruments 6 * Copyright (C) 2012 Texas Instruments
7 * 7 *
8 * Authors: 8 * Authors:
9 * Rob Clark <rob.clark@linaro.org> 9 * Rob Clark <robdclark@gmail.com>
10 * Maarten Lankhorst <maarten.lankhorst@canonical.com> 10 * Maarten Lankhorst <maarten.lankhorst@canonical.com>
11 * Thomas Hellstrom <thellstrom-at-vmware-dot-com> 11 * Thomas Hellstrom <thellstrom-at-vmware-dot-com>
12 * 12 *
@@ -40,23 +40,103 @@
40#define _LINUX_RESERVATION_H 40#define _LINUX_RESERVATION_H
41 41
42#include <linux/ww_mutex.h> 42#include <linux/ww_mutex.h>
43#include <linux/fence.h>
44#include <linux/slab.h>
45#include <linux/seqlock.h>
46#include <linux/rcupdate.h>
43 47
44extern struct ww_class reservation_ww_class; 48extern struct ww_class reservation_ww_class;
49extern struct lock_class_key reservation_seqcount_class;
50extern const char reservation_seqcount_string[];
51
52struct reservation_object_list {
53 struct rcu_head rcu;
54 u32 shared_count, shared_max;
55 struct fence __rcu *shared[];
56};
45 57
46struct reservation_object { 58struct reservation_object {
47 struct ww_mutex lock; 59 struct ww_mutex lock;
60 seqcount_t seq;
61
62 struct fence __rcu *fence_excl;
63 struct reservation_object_list __rcu *fence;
64 struct reservation_object_list *staged;
48}; 65};
49 66
67#define reservation_object_held(obj) lockdep_is_held(&(obj)->lock.base)
68#define reservation_object_assert_held(obj) \
69 lockdep_assert_held(&(obj)->lock.base)
70
50static inline void 71static inline void
51reservation_object_init(struct reservation_object *obj) 72reservation_object_init(struct reservation_object *obj)
52{ 73{
53 ww_mutex_init(&obj->lock, &reservation_ww_class); 74 ww_mutex_init(&obj->lock, &reservation_ww_class);
75
76 __seqcount_init(&obj->seq, reservation_seqcount_string, &reservation_seqcount_class);
77 RCU_INIT_POINTER(obj->fence, NULL);
78 RCU_INIT_POINTER(obj->fence_excl, NULL);
79 obj->staged = NULL;
54} 80}
55 81
56static inline void 82static inline void
57reservation_object_fini(struct reservation_object *obj) 83reservation_object_fini(struct reservation_object *obj)
58{ 84{
85 int i;
86 struct reservation_object_list *fobj;
87 struct fence *excl;
88
89 /*
90 * This object should be dead and all references must have
91 * been released to it, so no need to be protected with rcu.
92 */
93 excl = rcu_dereference_protected(obj->fence_excl, 1);
94 if (excl)
95 fence_put(excl);
96
97 fobj = rcu_dereference_protected(obj->fence, 1);
98 if (fobj) {
99 for (i = 0; i < fobj->shared_count; ++i)
100 fence_put(rcu_dereference_protected(fobj->shared[i], 1));
101
102 kfree(fobj);
103 }
104 kfree(obj->staged);
105
59 ww_mutex_destroy(&obj->lock); 106 ww_mutex_destroy(&obj->lock);
60} 107}
61 108
109static inline struct reservation_object_list *
110reservation_object_get_list(struct reservation_object *obj)
111{
112 return rcu_dereference_protected(obj->fence,
113 reservation_object_held(obj));
114}
115
116static inline struct fence *
117reservation_object_get_excl(struct reservation_object *obj)
118{
119 return rcu_dereference_protected(obj->fence_excl,
120 reservation_object_held(obj));
121}
122
123int reservation_object_reserve_shared(struct reservation_object *obj);
124void reservation_object_add_shared_fence(struct reservation_object *obj,
125 struct fence *fence);
126
127void reservation_object_add_excl_fence(struct reservation_object *obj,
128 struct fence *fence);
129
130int reservation_object_get_fences_rcu(struct reservation_object *obj,
131 struct fence **pfence_excl,
132 unsigned *pshared_count,
133 struct fence ***pshared);
134
135long reservation_object_wait_timeout_rcu(struct reservation_object *obj,
136 bool wait_all, bool intr,
137 unsigned long timeout);
138
139bool reservation_object_test_signaled_rcu(struct reservation_object *obj,
140 bool test_all);
141
62#endif /* _LINUX_RESERVATION_H */ 142#endif /* _LINUX_RESERVATION_H */
diff --git a/include/linux/seqno-fence.h b/include/linux/seqno-fence.h
new file mode 100644
index 000000000000..3d6003de4b0d
--- /dev/null
+++ b/include/linux/seqno-fence.h
@@ -0,0 +1,116 @@
1/*
2 * seqno-fence, using a dma-buf to synchronize fencing
3 *
4 * Copyright (C) 2012 Texas Instruments
5 * Copyright (C) 2012 Canonical Ltd
6 * Authors:
7 * Rob Clark <robdclark@gmail.com>
8 * Maarten Lankhorst <maarten.lankhorst@canonical.com>
9 *
10 * This program is free software; you can redistribute it and/or modify it
11 * under the terms of the GNU General Public License version 2 as published by
12 * the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful, but WITHOUT
15 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
16 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
17 * more details.
18 */
19
20#ifndef __LINUX_SEQNO_FENCE_H
21#define __LINUX_SEQNO_FENCE_H
22
23#include <linux/fence.h>
24#include <linux/dma-buf.h>
25
26enum seqno_fence_condition {
27 SEQNO_FENCE_WAIT_GEQUAL,
28 SEQNO_FENCE_WAIT_NONZERO
29};
30
31struct seqno_fence {
32 struct fence base;
33
34 const struct fence_ops *ops;
35 struct dma_buf *sync_buf;
36 uint32_t seqno_ofs;
37 enum seqno_fence_condition condition;
38};
39
40extern const struct fence_ops seqno_fence_ops;
41
42/**
43 * to_seqno_fence - cast a fence to a seqno_fence
44 * @fence: fence to cast to a seqno_fence
45 *
46 * Returns NULL if the fence is not a seqno_fence,
47 * or the seqno_fence otherwise.
48 */
49static inline struct seqno_fence *
50to_seqno_fence(struct fence *fence)
51{
52 if (fence->ops != &seqno_fence_ops)
53 return NULL;
54 return container_of(fence, struct seqno_fence, base);
55}
56
57/**
58 * seqno_fence_init - initialize a seqno fence
59 * @fence: seqno_fence to initialize
60 * @lock: pointer to spinlock to use for fence
61 * @sync_buf: buffer containing the memory location to signal on
62 * @context: the execution context this fence is a part of
63 * @seqno_ofs: the offset within @sync_buf
64 * @seqno: the sequence # to signal on
65 * @ops: the fence_ops for operations on this seqno fence
66 *
67 * This function initializes a struct seqno_fence with passed parameters,
68 * and takes a reference on sync_buf which is released on fence destruction.
69 *
70 * A seqno_fence is a dma_fence which can complete in software when
71 * enable_signaling is called, but it also completes when
72 * (s32)((sync_buf)[seqno_ofs] - seqno) >= 0 is true
73 *
74 * The seqno_fence will take a refcount on the sync_buf until it's
75 * destroyed, but actual lifetime of sync_buf may be longer if one of the
76 * callers take a reference to it.
77 *
78 * Certain hardware have instructions to insert this type of wait condition
79 * in the command stream, so no intervention from software would be needed.
80 * This type of fence can be destroyed before completed, however a reference
81 * on the sync_buf dma-buf can be taken. It is encouraged to re-use the same
82 * dma-buf for sync_buf, since mapping or unmapping the sync_buf to the
83 * device's vm can be expensive.
84 *
85 * It is recommended for creators of seqno_fence to call fence_signal
86 * before destruction. This will prevent possible issues from wraparound at
87 * time of issue vs time of check, since users can check fence_is_signaled
88 * before submitting instructions for the hardware to wait on the fence.
89 * However, when ops.enable_signaling is not called, it doesn't have to be
90 * done as soon as possible, just before there's any real danger of seqno
91 * wraparound.
92 */
93static inline void
94seqno_fence_init(struct seqno_fence *fence, spinlock_t *lock,
95 struct dma_buf *sync_buf, uint32_t context,
96 uint32_t seqno_ofs, uint32_t seqno,
97 enum seqno_fence_condition cond,
98 const struct fence_ops *ops)
99{
100 BUG_ON(!fence || !sync_buf || !ops);
101 BUG_ON(!ops->wait || !ops->enable_signaling ||
102 !ops->get_driver_name || !ops->get_timeline_name);
103
104 /*
105 * ops is used in fence_init for get_driver_name, so needs to be
106 * initialized first
107 */
108 fence->ops = ops;
109 fence_init(&fence->base, &seqno_fence_ops, lock, context, seqno);
110 get_dma_buf(sync_buf);
111 fence->sync_buf = sync_buf;
112 fence->seqno_ofs = seqno_ofs;
113 fence->condition = cond;
114}
115
116#endif /* __LINUX_SEQNO_FENCE_H */
diff --git a/include/trace/events/fence.h b/include/trace/events/fence.h
new file mode 100644
index 000000000000..98feb1b82896
--- /dev/null
+++ b/include/trace/events/fence.h
@@ -0,0 +1,128 @@
1#undef TRACE_SYSTEM
2#define TRACE_SYSTEM fence
3
4#if !defined(_TRACE_FENCE_H) || defined(TRACE_HEADER_MULTI_READ)
5#define _TRACE_FENCE_H
6
7#include <linux/tracepoint.h>
8
9struct fence;
10
11TRACE_EVENT(fence_annotate_wait_on,
12
13 /* fence: the fence waiting on f1, f1: the fence to be waited on. */
14 TP_PROTO(struct fence *fence, struct fence *f1),
15
16 TP_ARGS(fence, f1),
17
18 TP_STRUCT__entry(
19 __string(driver, fence->ops->get_driver_name(fence))
20 __string(timeline, fence->ops->get_driver_name(fence))
21 __field(unsigned int, context)
22 __field(unsigned int, seqno)
23
24 __string(waiting_driver, f1->ops->get_driver_name(f1))
25 __string(waiting_timeline, f1->ops->get_timeline_name(f1))
26 __field(unsigned int, waiting_context)
27 __field(unsigned int, waiting_seqno)
28 ),
29
30 TP_fast_assign(
31 __assign_str(driver, fence->ops->get_driver_name(fence))
32 __assign_str(timeline, fence->ops->get_timeline_name(fence))
33 __entry->context = fence->context;
34 __entry->seqno = fence->seqno;
35
36 __assign_str(waiting_driver, f1->ops->get_driver_name(f1))
37 __assign_str(waiting_timeline, f1->ops->get_timeline_name(f1))
38 __entry->waiting_context = f1->context;
39 __entry->waiting_seqno = f1->seqno;
40
41 ),
42
43 TP_printk("driver=%s timeline=%s context=%u seqno=%u " \
44 "waits on driver=%s timeline=%s context=%u seqno=%u",
45 __get_str(driver), __get_str(timeline), __entry->context,
46 __entry->seqno,
47 __get_str(waiting_driver), __get_str(waiting_timeline),
48 __entry->waiting_context, __entry->waiting_seqno)
49);
50
51DECLARE_EVENT_CLASS(fence,
52
53 TP_PROTO(struct fence *fence),
54
55 TP_ARGS(fence),
56
57 TP_STRUCT__entry(
58 __string(driver, fence->ops->get_driver_name(fence))
59 __string(timeline, fence->ops->get_timeline_name(fence))
60 __field(unsigned int, context)
61 __field(unsigned int, seqno)
62 ),
63
64 TP_fast_assign(
65 __assign_str(driver, fence->ops->get_driver_name(fence))
66 __assign_str(timeline, fence->ops->get_timeline_name(fence))
67 __entry->context = fence->context;
68 __entry->seqno = fence->seqno;
69 ),
70
71 TP_printk("driver=%s timeline=%s context=%u seqno=%u",
72 __get_str(driver), __get_str(timeline), __entry->context,
73 __entry->seqno)
74);
75
76DEFINE_EVENT(fence, fence_emit,
77
78 TP_PROTO(struct fence *fence),
79
80 TP_ARGS(fence)
81);
82
83DEFINE_EVENT(fence, fence_init,
84
85 TP_PROTO(struct fence *fence),
86
87 TP_ARGS(fence)
88);
89
90DEFINE_EVENT(fence, fence_destroy,
91
92 TP_PROTO(struct fence *fence),
93
94 TP_ARGS(fence)
95);
96
97DEFINE_EVENT(fence, fence_enable_signal,
98
99 TP_PROTO(struct fence *fence),
100
101 TP_ARGS(fence)
102);
103
104DEFINE_EVENT(fence, fence_signaled,
105
106 TP_PROTO(struct fence *fence),
107
108 TP_ARGS(fence)
109);
110
111DEFINE_EVENT(fence, fence_wait_start,
112
113 TP_PROTO(struct fence *fence),
114
115 TP_ARGS(fence)
116);
117
118DEFINE_EVENT(fence, fence_wait_end,
119
120 TP_PROTO(struct fence *fence),
121
122 TP_ARGS(fence)
123);
124
125#endif /* _TRACE_FENCE_H */
126
127/* This part must be outside protection */
128#include <trace/define_trace.h>
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 901096d31c66..f8f45ec0ed46 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -1635,6 +1635,19 @@ config TEST_BPF
1635 1635
1636 If unsure, say N. 1636 If unsure, say N.
1637 1637
1638config TEST_FIRMWARE
1639 tristate "Test firmware loading via userspace interface"
1640 default n
1641 depends on FW_LOADER
1642 help
1643 This builds the "test_firmware" module that creates a userspace
1644 interface for testing firmware loading. This can be used to
1645 control the triggering of firmware loading without needing an
1646 actual firmware-using device. The contents can be rechecked by
1647 userspace.
1648
1649 If unsure, say N.
1650
1638source "samples/Kconfig" 1651source "samples/Kconfig"
1639 1652
1640source "lib/Kconfig.kgdb" 1653source "lib/Kconfig.kgdb"
diff --git a/lib/Makefile b/lib/Makefile
index ba967a19edba..230b4b1456d6 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -34,6 +34,7 @@ obj-$(CONFIG_TEST_KSTRTOX) += test-kstrtox.o
34obj-$(CONFIG_TEST_MODULE) += test_module.o 34obj-$(CONFIG_TEST_MODULE) += test_module.o
35obj-$(CONFIG_TEST_USER_COPY) += test_user_copy.o 35obj-$(CONFIG_TEST_USER_COPY) += test_user_copy.o
36obj-$(CONFIG_TEST_BPF) += test_bpf.o 36obj-$(CONFIG_TEST_BPF) += test_bpf.o
37obj-$(CONFIG_TEST_FIRMWARE) += test_firmware.o
37 38
38ifeq ($(CONFIG_DEBUG_KOBJECT),y) 39ifeq ($(CONFIG_DEBUG_KOBJECT),y)
39CFLAGS_kobject.o += -DDEBUG 40CFLAGS_kobject.o += -DDEBUG
diff --git a/lib/devres.c b/lib/devres.c
index f562bf6ff71d..6a4aee8a3a7e 100644
--- a/lib/devres.c
+++ b/lib/devres.c
@@ -142,34 +142,6 @@ void __iomem *devm_ioremap_resource(struct device *dev, struct resource *res)
142} 142}
143EXPORT_SYMBOL(devm_ioremap_resource); 143EXPORT_SYMBOL(devm_ioremap_resource);
144 144
145/**
146 * devm_request_and_ioremap() - Check, request region, and ioremap resource
147 * @dev: Generic device to handle the resource for
148 * @res: resource to be handled
149 *
150 * Takes all necessary steps to ioremap a mem resource. Uses managed device, so
151 * everything is undone on driver detach. Checks arguments, so you can feed
152 * it the result from e.g. platform_get_resource() directly. Returns the
153 * remapped pointer or NULL on error. Usage example:
154 *
155 * res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
156 * base = devm_request_and_ioremap(&pdev->dev, res);
157 * if (!base)
158 * return -EADDRNOTAVAIL;
159 */
160void __iomem *devm_request_and_ioremap(struct device *dev,
161 struct resource *res)
162{
163 void __iomem *dest_ptr;
164
165 dest_ptr = devm_ioremap_resource(dev, res);
166 if (IS_ERR(dest_ptr))
167 return NULL;
168
169 return dest_ptr;
170}
171EXPORT_SYMBOL(devm_request_and_ioremap);
172
173#ifdef CONFIG_HAS_IOPORT_MAP 145#ifdef CONFIG_HAS_IOPORT_MAP
174/* 146/*
175 * Generic iomap devres 147 * Generic iomap devres
diff --git a/lib/test_firmware.c b/lib/test_firmware.c
new file mode 100644
index 000000000000..86374c1c49a4
--- /dev/null
+++ b/lib/test_firmware.c
@@ -0,0 +1,117 @@
1/*
2 * This module provides an interface to trigger and test firmware loading.
3 *
4 * It is designed to be used for basic evaluation of the firmware loading
5 * subsystem (for example when validating firmware verification). It lacks
6 * any extra dependencies, and will not normally be loaded by the system
7 * unless explicitly requested by name.
8 */
9
10#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
11
12#include <linux/init.h>
13#include <linux/module.h>
14#include <linux/printk.h>
15#include <linux/firmware.h>
16#include <linux/device.h>
17#include <linux/fs.h>
18#include <linux/miscdevice.h>
19#include <linux/slab.h>
20#include <linux/uaccess.h>
21
22static DEFINE_MUTEX(test_fw_mutex);
23static const struct firmware *test_firmware;
24
25static ssize_t test_fw_misc_read(struct file *f, char __user *buf,
26 size_t size, loff_t *offset)
27{
28 ssize_t rc = 0;
29
30 mutex_lock(&test_fw_mutex);
31 if (test_firmware)
32 rc = simple_read_from_buffer(buf, size, offset,
33 test_firmware->data,
34 test_firmware->size);
35 mutex_unlock(&test_fw_mutex);
36 return rc;
37}
38
39static const struct file_operations test_fw_fops = {
40 .owner = THIS_MODULE,
41 .read = test_fw_misc_read,
42};
43
44static struct miscdevice test_fw_misc_device = {
45 .minor = MISC_DYNAMIC_MINOR,
46 .name = "test_firmware",
47 .fops = &test_fw_fops,
48};
49
50static ssize_t trigger_request_store(struct device *dev,
51 struct device_attribute *attr,
52 const char *buf, size_t count)
53{
54 int rc;
55 char *name;
56
57 name = kzalloc(count + 1, GFP_KERNEL);
58 if (!name)
59 return -ENOSPC;
60 memcpy(name, buf, count);
61
62 pr_info("loading '%s'\n", name);
63
64 mutex_lock(&test_fw_mutex);
65 release_firmware(test_firmware);
66 test_firmware = NULL;
67 rc = request_firmware(&test_firmware, name, dev);
68 if (rc)
69 pr_info("load of '%s' failed: %d\n", name, rc);
70 pr_info("loaded: %zu\n", test_firmware ? test_firmware->size : 0);
71 mutex_unlock(&test_fw_mutex);
72
73 kfree(name);
74
75 return count;
76}
77static DEVICE_ATTR_WO(trigger_request);
78
79static int __init test_firmware_init(void)
80{
81 int rc;
82
83 rc = misc_register(&test_fw_misc_device);
84 if (rc) {
85 pr_err("could not register misc device: %d\n", rc);
86 return rc;
87 }
88 rc = device_create_file(test_fw_misc_device.this_device,
89 &dev_attr_trigger_request);
90 if (rc) {
91 pr_err("could not create sysfs interface: %d\n", rc);
92 goto dereg;
93 }
94
95 pr_warn("interface ready\n");
96
97 return 0;
98dereg:
99 misc_deregister(&test_fw_misc_device);
100 return rc;
101}
102
103module_init(test_firmware_init);
104
105static void __exit test_firmware_exit(void)
106{
107 release_firmware(test_firmware);
108 device_remove_file(test_fw_misc_device.this_device,
109 &dev_attr_trigger_request);
110 misc_deregister(&test_fw_misc_device);
111 pr_warn("removed interface\n");
112}
113
114module_exit(test_firmware_exit);
115
116MODULE_AUTHOR("Kees Cook <keescook@chromium.org>");
117MODULE_LICENSE("GPL");
diff --git a/scripts/coccinelle/api/devm_ioremap_resource.cocci b/scripts/coccinelle/api/devm_ioremap_resource.cocci
deleted file mode 100644
index 495daa3dbf77..000000000000
--- a/scripts/coccinelle/api/devm_ioremap_resource.cocci
+++ /dev/null
@@ -1,90 +0,0 @@
1virtual patch
2virtual report
3
4@depends on patch@
5expression base, dev, res;
6@@
7
8-base = devm_request_and_ioremap(dev, res);
9+base = devm_ioremap_resource(dev, res);
10 ...
11 if (
12-base == NULL
13+IS_ERR(base)
14 || ...) {
15<...
16- return ...;
17+ return PTR_ERR(base);
18...>
19 }
20
21@depends on patch@
22expression e, E, ret;
23identifier l;
24@@
25
26 e = devm_ioremap_resource(...);
27 ...
28 if (IS_ERR(e) || ...) {
29 ... when any
30- ret = E;
31+ ret = PTR_ERR(e);
32 ...
33(
34 return ret;
35|
36 goto l;
37)
38 }
39
40@depends on patch@
41expression e;
42@@
43
44 e = devm_ioremap_resource(...);
45 ...
46 if (IS_ERR(e) || ...) {
47 ...
48- \(dev_dbg\|dev_err\|pr_debug\|pr_err\|DRM_ERROR\)(...);
49 ...
50 }
51
52@depends on patch@
53expression e;
54identifier l;
55@@
56
57 e = devm_ioremap_resource(...);
58 ...
59 if (IS_ERR(e) || ...)
60-{
61(
62 return ...;
63|
64 goto l;
65)
66-}
67
68@r depends on report@
69expression e;
70identifier l;
71position p1;
72@@
73
74*e = devm_request_and_ioremap@p1(...);
75 ...
76 if (e == NULL || ...) {
77 ...
78(
79 return ...;
80|
81 goto l;
82)
83 }
84
85@script:python depends on r@
86p1 << r.p1;
87@@
88
89msg = "ERROR: deprecated devm_request_and_ioremap() API used on line %s" % (p1[0].line)
90coccilib.report.print_report(p1[0], msg)
diff --git a/tools/testing/selftests/Makefile b/tools/testing/selftests/Makefile
index 4c2aa357e12f..d10f95ce2ea4 100644
--- a/tools/testing/selftests/Makefile
+++ b/tools/testing/selftests/Makefile
@@ -11,6 +11,7 @@ TARGETS += vm
11TARGETS += powerpc 11TARGETS += powerpc
12TARGETS += user 12TARGETS += user
13TARGETS += sysctl 13TARGETS += sysctl
14TARGETS += firmware
14 15
15TARGETS_HOTPLUG = cpu-hotplug 16TARGETS_HOTPLUG = cpu-hotplug
16TARGETS_HOTPLUG += memory-hotplug 17TARGETS_HOTPLUG += memory-hotplug
diff --git a/tools/testing/selftests/firmware/Makefile b/tools/testing/selftests/firmware/Makefile
new file mode 100644
index 000000000000..e23cce0bbc3a
--- /dev/null
+++ b/tools/testing/selftests/firmware/Makefile
@@ -0,0 +1,27 @@
1# Makefile for firmware loading selftests
2
3# No binaries, but make sure arg-less "make" doesn't trigger "run_tests"
4all:
5
6fw_filesystem:
7 @if /bin/sh ./fw_filesystem.sh ; then \
8 echo "fw_filesystem: ok"; \
9 else \
10 echo "fw_filesystem: [FAIL]"; \
11 exit 1; \
12 fi
13
14fw_userhelper:
15 @if /bin/sh ./fw_userhelper.sh ; then \
16 echo "fw_userhelper: ok"; \
17 else \
18 echo "fw_userhelper: [FAIL]"; \
19 exit 1; \
20 fi
21
22run_tests: all fw_filesystem fw_userhelper
23
24# Nothing to clean up.
25clean:
26
27.PHONY: all clean run_tests fw_filesystem fw_userhelper
diff --git a/tools/testing/selftests/firmware/fw_filesystem.sh b/tools/testing/selftests/firmware/fw_filesystem.sh
new file mode 100644
index 000000000000..3fc6c10c2479
--- /dev/null
+++ b/tools/testing/selftests/firmware/fw_filesystem.sh
@@ -0,0 +1,62 @@
1#!/bin/sh
2# This validates that the kernel will load firmware out of its list of
3# firmware locations on disk. Since the user helper does similar work,
4# we reset the custom load directory to a location the user helper doesn't
5# know so we can be sure we're not accidentally testing the user helper.
6set -e
7
8modprobe test_firmware
9
10DIR=/sys/devices/virtual/misc/test_firmware
11
12OLD_TIMEOUT=$(cat /sys/class/firmware/timeout)
13OLD_FWPATH=$(cat /sys/module/firmware_class/parameters/path)
14
15FWPATH=$(mktemp -d)
16FW="$FWPATH/test-firmware.bin"
17
18test_finish()
19{
20 echo "$OLD_TIMEOUT" >/sys/class/firmware/timeout
21 echo -n "$OLD_PATH" >/sys/module/firmware_class/parameters/path
22 rm -f "$FW"
23 rmdir "$FWPATH"
24}
25
26trap "test_finish" EXIT
27
28# Turn down the timeout so failures don't take so long.
29echo 1 >/sys/class/firmware/timeout
30# Set the kernel search path.
31echo -n "$FWPATH" >/sys/module/firmware_class/parameters/path
32
33# This is an unlikely real-world firmware content. :)
34echo "ABCD0123" >"$FW"
35
36NAME=$(basename "$FW")
37
38# Request a firmware that doesn't exist, it should fail.
39echo -n "nope-$NAME" >"$DIR"/trigger_request
40if diff -q "$FW" /dev/test_firmware >/dev/null ; then
41 echo "$0: firmware was not expected to match" >&2
42 exit 1
43else
44 echo "$0: timeout works"
45fi
46
47# This should succeed via kernel load or will fail after 1 second after
48# being handed over to the user helper, which won't find the fw either.
49if ! echo -n "$NAME" >"$DIR"/trigger_request ; then
50 echo "$0: could not trigger request" >&2
51 exit 1
52fi
53
54# Verify the contents are what we expect.
55if ! diff -q "$FW" /dev/test_firmware >/dev/null ; then
56 echo "$0: firmware was not loaded" >&2
57 exit 1
58else
59 echo "$0: filesystem loading works"
60fi
61
62exit 0
diff --git a/tools/testing/selftests/firmware/fw_userhelper.sh b/tools/testing/selftests/firmware/fw_userhelper.sh
new file mode 100644
index 000000000000..6efbade12139
--- /dev/null
+++ b/tools/testing/selftests/firmware/fw_userhelper.sh
@@ -0,0 +1,89 @@
1#!/bin/sh
2# This validates that the kernel will fall back to using the user helper
3# to load firmware it can't find on disk itself. We must request a firmware
4# that the kernel won't find, and any installed helper (e.g. udev) also
5# won't find so that we can do the load ourself manually.
6set -e
7
8modprobe test_firmware
9
10DIR=/sys/devices/virtual/misc/test_firmware
11
12OLD_TIMEOUT=$(cat /sys/class/firmware/timeout)
13
14FWPATH=$(mktemp -d)
15FW="$FWPATH/test-firmware.bin"
16
17test_finish()
18{
19 echo "$OLD_TIMEOUT" >/sys/class/firmware/timeout
20 rm -f "$FW"
21 rmdir "$FWPATH"
22}
23
24load_fw()
25{
26 local name="$1"
27 local file="$2"
28
29 # This will block until our load (below) has finished.
30 echo -n "$name" >"$DIR"/trigger_request &
31
32 # Give kernel a chance to react.
33 local timeout=10
34 while [ ! -e "$DIR"/"$name"/loading ]; do
35 sleep 0.1
36 timeout=$(( $timeout - 1 ))
37 if [ "$timeout" -eq 0 ]; then
38 echo "$0: firmware interface never appeared" >&2
39 exit 1
40 fi
41 done
42
43 echo 1 >"$DIR"/"$name"/loading
44 cat "$file" >"$DIR"/"$name"/data
45 echo 0 >"$DIR"/"$name"/loading
46
47 # Wait for request to finish.
48 wait
49}
50
51trap "test_finish" EXIT
52
53# This is an unlikely real-world firmware content. :)
54echo "ABCD0123" >"$FW"
55NAME=$(basename "$FW")
56
57# Test failure when doing nothing (timeout works).
58echo 1 >/sys/class/firmware/timeout
59echo -n "$NAME" >"$DIR"/trigger_request
60if diff -q "$FW" /dev/test_firmware >/dev/null ; then
61 echo "$0: firmware was not expected to match" >&2
62 exit 1
63else
64 echo "$0: timeout works"
65fi
66
67# Put timeout high enough for us to do work but not so long that failures
68# slow down this test too much.
69echo 4 >/sys/class/firmware/timeout
70
71# Load this script instead of the desired firmware.
72load_fw "$NAME" "$0"
73if diff -q "$FW" /dev/test_firmware >/dev/null ; then
74 echo "$0: firmware was not expected to match" >&2
75 exit 1
76else
77 echo "$0: firmware comparison works"
78fi
79
80# Do a proper load, which should work correctly.
81load_fw "$NAME" "$FW"
82if ! diff -q "$FW" /dev/test_firmware >/dev/null ; then
83 echo "$0: firmware was not loaded" >&2
84 exit 1
85else
86 echo "$0: user helper firmware loading works"
87fi
88
89exit 0