aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/ABI/testing/sysfs-driver-samsung-laptop19
-rw-r--r--Documentation/ABI/testing/sysfs-platform-asus-wmi31
-rw-r--r--Documentation/ABI/testing/sysfs-platform-eeepc-wmi10
-rw-r--r--Documentation/laptops/sony-laptop.txt37
-rw-r--r--MAINTAINERS30
-rw-r--r--arch/arm/Kconfig1
-rw-r--r--arch/arm/mach-exynos4/Kconfig7
-rw-r--r--arch/arm/mach-exynos4/Makefile2
-rw-r--r--arch/arm/mach-exynos4/include/mach/debug-macro.S4
-rw-r--r--arch/arm/mach-exynos4/mach-smdkc210.c2
-rw-r--r--arch/arm/mach-exynos4/mach-smdkv310.c2
-rw-r--r--arch/arm/mach-ns9xxx/irq.c58
-rw-r--r--arch/arm/mach-s5p64x0/cpu.c2
-rw-r--r--arch/arm/mach-s5pv210/include/mach/irqs.h6
-rw-r--r--arch/arm/mach-s5pv210/mach-smdkv210.c1
-rw-r--r--arch/arm/mach-ux500/Kconfig1
-rw-r--r--arch/arm/mach-ux500/board-mop500-regulators.c201
-rw-r--r--arch/arm/mach-ux500/board-mop500-regulators.h1
-rw-r--r--arch/arm/mach-ux500/board-mop500.c49
-rw-r--r--arch/arm/mach-ux500/board-mop500.h4
-rw-r--r--arch/arm/mach-ux500/include/mach/irqs-board-mop500.h15
-rw-r--r--arch/arm/plat-s5p/cpu.c8
-rw-r--r--arch/arm/plat-samsung/init.c2
-rw-r--r--arch/cris/Kconfig3
-rw-r--r--arch/cris/arch-v10/drivers/pcf8563.c2
-rw-r--r--arch/cris/arch-v10/kernel/signal.c2
-rw-r--r--arch/cris/arch-v32/drivers/Makefile1
-rw-r--r--arch/cris/arch-v32/drivers/pcf8563.c377
-rw-r--r--arch/powerpc/platforms/cell/Kconfig1
-rw-r--r--arch/powerpc/platforms/cell/interrupt.c50
-rw-r--r--arch/score/Kconfig2
-rw-r--r--arch/score/include/asm/irqflags.h2
-rw-r--r--arch/score/kernel/irq.c53
-rw-r--r--arch/x86/crypto/aesni-intel_asm.S5
-rw-r--r--arch/x86/crypto/aesni-intel_glue.c14
-rw-r--r--drivers/acpi/video.c6
-rw-r--r--drivers/gpio/Kconfig5
-rw-r--r--drivers/gpio/Makefile1
-rw-r--r--drivers/gpio/ab8500-gpio.c522
-rw-r--r--drivers/memstick/host/r592.c6
-rw-r--r--drivers/mfd/ab8500-core.c14
-rw-r--r--drivers/mfd/ab8500-i2c.c2
-rw-r--r--drivers/platform/x86/Kconfig90
-rw-r--r--drivers/platform/x86/Makefile9
-rw-r--r--drivers/platform/x86/acer-wmi.c127
-rw-r--r--drivers/platform/x86/asus-laptop.c182
-rw-r--r--drivers/platform/x86/asus-nb-wmi.c98
-rw-r--r--drivers/platform/x86/asus-wmi.c1656
-rw-r--r--drivers/platform/x86/asus-wmi.h58
-rw-r--r--drivers/platform/x86/compal-laptop.c8
-rw-r--r--drivers/platform/x86/dell-wmi-aio.c171
-rw-r--r--drivers/platform/x86/eeepc-laptop.c2
-rw-r--r--drivers/platform/x86/eeepc-wmi.c912
-rw-r--r--drivers/platform/x86/hp-wmi.c312
-rw-r--r--drivers/platform/x86/ideapad-laptop.c2
-rw-r--r--drivers/platform/x86/intel_ips.c2
-rw-r--r--drivers/platform/x86/intel_mid_powerbtn.c148
-rw-r--r--drivers/platform/x86/intel_mid_thermal.c576
-rw-r--r--drivers/platform/x86/intel_rar_register.c2
-rw-r--r--drivers/platform/x86/intel_scu_ipc.c2
-rw-r--r--drivers/platform/x86/msi-laptop.c95
-rw-r--r--drivers/platform/x86/samsung-laptop.c832
-rw-r--r--drivers/platform/x86/sony-laptop.c501
-rw-r--r--drivers/platform/x86/thinkpad_acpi.c4
-rw-r--r--drivers/platform/x86/xo15-ebook.c180
-rw-r--r--fs/btrfs/btrfs_inode.h3
-rw-r--r--fs/btrfs/compression.c17
-rw-r--r--fs/btrfs/ctree.c159
-rw-r--r--fs/btrfs/ctree.h19
-rw-r--r--fs/btrfs/delayed-ref.c6
-rw-r--r--fs/btrfs/dir-item.c45
-rw-r--r--fs/btrfs/disk-io.c132
-rw-r--r--fs/btrfs/extent-tree.c229
-rw-r--r--fs/btrfs/extent_io.c3
-rw-r--r--fs/btrfs/extent_io.h1
-rw-r--r--fs/btrfs/file-item.c5
-rw-r--r--fs/btrfs/file.c388
-rw-r--r--fs/btrfs/free-space-cache.c510
-rw-r--r--fs/btrfs/free-space-cache.h2
-rw-r--r--fs/btrfs/inode-map.c3
-rw-r--r--fs/btrfs/inode.c366
-rw-r--r--fs/btrfs/ioctl.c100
-rw-r--r--fs/btrfs/ordered-data.c8
-rw-r--r--fs/btrfs/relocation.c8
-rw-r--r--fs/btrfs/root-tree.c6
-rw-r--r--fs/btrfs/super.c5
-rw-r--r--fs/btrfs/transaction.c14
-rw-r--r--fs/btrfs/tree-log.c57
-rw-r--r--fs/btrfs/volumes.c164
-rw-r--r--fs/btrfs/volumes.h12
-rw-r--r--fs/btrfs/xattr.c2
-rw-r--r--fs/ecryptfs/crypto.c2
-rw-r--r--fs/ecryptfs/ecryptfs_kernel.h30
-rw-r--r--fs/ecryptfs/file.c9
-rw-r--r--fs/ecryptfs/inode.c24
-rw-r--r--fs/ecryptfs/keystore.c272
-rw-r--r--fs/ecryptfs/main.c10
-rw-r--r--fs/ecryptfs/mmap.c61
-rw-r--r--fs/ecryptfs/read_write.c12
-rw-r--r--fs/ecryptfs/super.c3
-rw-r--r--fs/ocfs2/acl.c1
-rw-r--r--fs/ocfs2/alloc.c214
-rw-r--r--fs/ocfs2/aops.c82
-rw-r--r--fs/ocfs2/buffer_head_io.c49
-rw-r--r--fs/ocfs2/cluster/heartbeat.c3
-rw-r--r--fs/ocfs2/cluster/masklog.c20
-rw-r--r--fs/ocfs2/cluster/masklog.h105
-rw-r--r--fs/ocfs2/cluster/tcp.c10
-rw-r--r--fs/ocfs2/dcache.c45
-rw-r--r--fs/ocfs2/dir.c121
-rw-r--r--fs/ocfs2/dlm/dlmconvert.c6
-rw-r--r--fs/ocfs2/dlm/dlmdomain.c36
-rw-r--r--fs/ocfs2/dlm/dlmlock.c10
-rw-r--r--fs/ocfs2/dlm/dlmmaster.c6
-rw-r--r--fs/ocfs2/dlm/dlmrecovery.c9
-rw-r--r--fs/ocfs2/dlm/dlmunlock.c4
-rw-r--r--fs/ocfs2/dlmglue.c246
-rw-r--r--fs/ocfs2/export.c47
-rw-r--r--fs/ocfs2/extent_map.c10
-rw-r--r--fs/ocfs2/file.c220
-rw-r--r--fs/ocfs2/heartbeat.c4
-rw-r--r--fs/ocfs2/inode.c134
-rw-r--r--fs/ocfs2/ioctl.c41
-rw-r--r--fs/ocfs2/journal.c168
-rw-r--r--fs/ocfs2/localalloc.c109
-rw-r--r--fs/ocfs2/locks.c1
-rw-r--r--fs/ocfs2/mmap.c7
-rw-r--r--fs/ocfs2/namei.c175
-rw-r--r--fs/ocfs2/ocfs2.h23
-rw-r--r--fs/ocfs2/ocfs2_trace.h2739
-rw-r--r--fs/ocfs2/quota_global.c45
-rw-r--r--fs/ocfs2/quota_local.c16
-rw-r--r--fs/ocfs2/refcounttree.c158
-rw-r--r--fs/ocfs2/reservations.c57
-rw-r--r--fs/ocfs2/resize.c23
-rw-r--r--fs/ocfs2/slot_map.c16
-rw-r--r--fs/ocfs2/suballoc.c189
-rw-r--r--fs/ocfs2/super.c89
-rw-r--r--fs/ocfs2/symlink.c14
-rw-r--r--fs/ocfs2/sysfile.c1
-rw-r--r--fs/ocfs2/uptodate.c73
-rw-r--r--fs/ocfs2/xattr.c155
-rw-r--r--fs/xfs/linux-2.6/xfs_buf.c344
-rw-r--r--fs/xfs/linux-2.6/xfs_buf.h40
-rw-r--r--fs/xfs/linux-2.6/xfs_file.c6
-rw-r--r--fs/xfs/linux-2.6/xfs_ioctl.c4
-rw-r--r--fs/xfs/linux-2.6/xfs_super.c36
-rw-r--r--fs/xfs/linux-2.6/xfs_sync.c30
-rw-r--r--fs/xfs/xfs_inode.c2
-rw-r--r--fs/xfs/xfs_inode_item.c6
-rw-r--r--fs/xfs/xfs_trans_buf.c3
-rw-r--r--fs/xfs/xfs_vnodeops.c3
-rw-r--r--fs/xfs/xfs_vnodeops.h1
-rw-r--r--include/linux/fs.h2
-rw-r--r--include/linux/input.h7
-rw-r--r--include/linux/irq.h59
-rw-r--r--include/linux/mfd/ab8500.h41
-rw-r--r--include/linux/mfd/ab8500/gpio.h21
-rw-r--r--include/linux/sonypi.h1
-rw-r--r--include/trace/events/btrfs.h667
-rw-r--r--kernel/irq/Kconfig4
-rw-r--r--kernel/irq/chip.c154
-rw-r--r--kernel/irq/debug.h10
-rw-r--r--kernel/irq/handle.c16
-rw-r--r--kernel/irq/internals.h6
-rw-r--r--kernel/irq/irqdesc.c3
-rw-r--r--kernel/irq/manage.c92
-rw-r--r--kernel/irq/migration.c5
-rw-r--r--kernel/irq/spurious.c10
-rw-r--r--kernel/signal.c4
170 files changed, 12419 insertions, 4856 deletions
diff --git a/Documentation/ABI/testing/sysfs-driver-samsung-laptop b/Documentation/ABI/testing/sysfs-driver-samsung-laptop
new file mode 100644
index 000000000000..0a810231aad4
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-driver-samsung-laptop
@@ -0,0 +1,19 @@
1What: /sys/devices/platform/samsung/performance_level
2Date: January 1, 2010
3KernelVersion: 2.6.33
4Contact: Greg Kroah-Hartman <gregkh@suse.de>
5Description: Some Samsung laptops have different "performance levels"
6 that are can be modified by a function key, and by this
7 sysfs file. These values don't always make a whole lot
8 of sense, but some users like to modify them to keep
9 their fans quiet at all costs. Reading from this file
10 will show the current performance level. Writing to the
11 file can change this value.
12 Valid options:
13 "silent"
14 "normal"
15 "overclock"
16 Note that not all laptops support all of these options.
17 Specifically, not all support the "overclock" option,
18 and it's still unknown if this value even changes
19 anything, other than making the user feel a bit better.
diff --git a/Documentation/ABI/testing/sysfs-platform-asus-wmi b/Documentation/ABI/testing/sysfs-platform-asus-wmi
new file mode 100644
index 000000000000..2e7df91620de
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-platform-asus-wmi
@@ -0,0 +1,31 @@
1What: /sys/devices/platform/<platform>/cpufv
2Date: Oct 2010
3KernelVersion: 2.6.37
4Contact: "Corentin Chary" <corentincj@iksaif.net>
5Description:
6 Change CPU clock configuration (write-only).
7 There are three available clock configuration:
8 * 0 -> Super Performance Mode
9 * 1 -> High Performance Mode
10 * 2 -> Power Saving Mode
11
12What: /sys/devices/platform/<platform>/camera
13Date: Jan 2010
14KernelVersion: 2.6.39
15Contact: "Corentin Chary" <corentincj@iksaif.net>
16Description:
17 Control the camera. 1 means on, 0 means off.
18
19What: /sys/devices/platform/<platform>/cardr
20Date: Jan 2010
21KernelVersion: 2.6.39
22Contact: "Corentin Chary" <corentincj@iksaif.net>
23Description:
24 Control the card reader. 1 means on, 0 means off.
25
26What: /sys/devices/platform/<platform>/touchpad
27Date: Jan 2010
28KernelVersion: 2.6.39
29Contact: "Corentin Chary" <corentincj@iksaif.net>
30Description:
31 Control the card touchpad. 1 means on, 0 means off.
diff --git a/Documentation/ABI/testing/sysfs-platform-eeepc-wmi b/Documentation/ABI/testing/sysfs-platform-eeepc-wmi
deleted file mode 100644
index e4b5fef5fadd..000000000000
--- a/Documentation/ABI/testing/sysfs-platform-eeepc-wmi
+++ /dev/null
@@ -1,10 +0,0 @@
1What: /sys/devices/platform/eeepc-wmi/cpufv
2Date: Oct 2010
3KernelVersion: 2.6.37
4Contact: "Corentin Chary" <corentincj@iksaif.net>
5Description:
6 Change CPU clock configuration (write-only).
7 There are three available clock configuration:
8 * 0 -> Super Performance Mode
9 * 1 -> High Performance Mode
10 * 2 -> Power Saving Mode
diff --git a/Documentation/laptops/sony-laptop.txt b/Documentation/laptops/sony-laptop.txt
index 23ce7d350d1a..2bd4e82e5d9f 100644
--- a/Documentation/laptops/sony-laptop.txt
+++ b/Documentation/laptops/sony-laptop.txt
@@ -14,7 +14,8 @@ Some models report hotkeys through the SNC or SPIC devices, such events are
14reported both through the ACPI subsystem as acpi events and through the INPUT 14reported both through the ACPI subsystem as acpi events and through the INPUT
15subsystem. See the logs of acpid or /proc/acpi/event and 15subsystem. See the logs of acpid or /proc/acpi/event and
16/proc/bus/input/devices to find out what those events are and which input 16/proc/bus/input/devices to find out what those events are and which input
17devices are created by the driver. 17devices are created by the driver. Additionally, loading the driver with the
18debug option will report all events in the kernel log.
18 19
19Backlight control: 20Backlight control:
20------------------ 21------------------
@@ -64,6 +65,16 @@ powers off the sound card,
64 # echo "1" > /sys/devices/platform/sony-laptop/audiopower 65 # echo "1" > /sys/devices/platform/sony-laptop/audiopower
65powers on the sound card. 66powers on the sound card.
66 67
68
69RFkill control:
70---------------
71More recent Vaio models expose a consistent set of ACPI methods to
72control radio frequency emitting devices. If you are a lucky owner of
73such a laptop you will find the necessary rfkill devices under
74/sys/class/rfkill. Check those starting with sony-* in
75 # grep . /sys/class/rfkill/*/{state,name}
76
77
67Development: 78Development:
68------------ 79------------
69 80
@@ -75,8 +86,21 @@ pass the option 'debug=1'.
75REPEAT: DON'T DO THIS IF YOU DON'T LIKE RISKY BUSINESS. 86REPEAT: DON'T DO THIS IF YOU DON'T LIKE RISKY BUSINESS.
76 87
77In your kernel logs you will find the list of all ACPI methods 88In your kernel logs you will find the list of all ACPI methods
78the SNC device has on your laptop. You can see the GCDP/GCDP methods 89the SNC device has on your laptop.
79used to pwer on/off the CD drive, but there are others. 90
91* For new models you will see a long list of meaningless method names,
92reading the DSDT table source should reveal that:
93(1) the SNC device uses an internal capability lookup table
94(2) SN00 is used to find values in the lookup table
95(3) SN06 and SN07 are used to call into the real methods based on
96 offsets you can obtain iterating the table using SN00
97(4) SN02 used to enable events.
98Some values in the capability lookup table are more or less known, see
99the code for all sony_call_snc_handle calls, others are more obscure.
100
101* For old models you can see the GCDP/GCDP methods used to pwer on/off
102the CD drive, but there are others and they are usually different from
103model to model.
80 104
81I HAVE NO IDEA WHAT THOSE METHODS DO. 105I HAVE NO IDEA WHAT THOSE METHODS DO.
82 106
@@ -108,9 +132,8 @@ Bugs/Limitations:
108 laptop, including permanent damage. 132 laptop, including permanent damage.
109 133
110* The sony-laptop and sonypi drivers do not interact at all. In the 134* The sony-laptop and sonypi drivers do not interact at all. In the
111 future, sonypi could use sony-laptop to do (part of) its business. 135 future, sonypi will be removed and replaced by sony-laptop.
112 136
113* spicctrl, which is the userspace tool used to communicate with the 137* spicctrl, which is the userspace tool used to communicate with the
114 sonypi driver (through /dev/sonypi) does not try to use the 138 sonypi driver (through /dev/sonypi) is deprecated as well since all
115 sony-laptop driver. In the future, spicctrl could try sonypi first, 139 its features are now available under the sysfs tree via sony-laptop.
116 and if it isn't present, try sony-laptop instead.
diff --git a/MAINTAINERS b/MAINTAINERS
index 8aa1cacddbcc..6b4b9cdec370 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -1157,14 +1157,14 @@ S: Maintained
1157F: Documentation/hwmon/asc7621 1157F: Documentation/hwmon/asc7621
1158F: drivers/hwmon/asc7621.c 1158F: drivers/hwmon/asc7621.c
1159 1159
1160ASUS ACPI EXTRAS DRIVER 1160ASUS NOTEBOOKS AND EEEPC ACPI/WMI EXTRAS DRIVERS
1161M: Corentin Chary <corentincj@iksaif.net> 1161M: Corentin Chary <corentincj@iksaif.net>
1162M: Karol Kozimor <sziwan@users.sourceforge.net>
1163L: acpi4asus-user@lists.sourceforge.net 1162L: acpi4asus-user@lists.sourceforge.net
1164L: platform-driver-x86@vger.kernel.org 1163L: platform-driver-x86@vger.kernel.org
1165W: http://acpi4asus.sf.net 1164W: http://acpi4asus.sf.net
1166S: Maintained 1165S: Maintained
1167F: drivers/platform/x86/asus_acpi.c 1166F: drivers/platform/x86/asus*.c
1167F: drivers/platform/x86/eeepc*.c
1168 1168
1169ASUS ASB100 HARDWARE MONITOR DRIVER 1169ASUS ASB100 HARDWARE MONITOR DRIVER
1170M: "Mark M. Hoffman" <mhoffman@lightlink.com> 1170M: "Mark M. Hoffman" <mhoffman@lightlink.com>
@@ -1172,14 +1172,6 @@ L: lm-sensors@lm-sensors.org
1172S: Maintained 1172S: Maintained
1173F: drivers/hwmon/asb100.c 1173F: drivers/hwmon/asb100.c
1174 1174
1175ASUS LAPTOP EXTRAS DRIVER
1176M: Corentin Chary <corentincj@iksaif.net>
1177L: acpi4asus-user@lists.sourceforge.net
1178L: platform-driver-x86@vger.kernel.org
1179W: http://acpi4asus.sf.net
1180S: Maintained
1181F: drivers/platform/x86/asus-laptop.c
1182
1183ASYNCHRONOUS TRANSFERS/TRANSFORMS (IOAT) API 1175ASYNCHRONOUS TRANSFERS/TRANSFORMS (IOAT) API
1184M: Dan Williams <dan.j.williams@intel.com> 1176M: Dan Williams <dan.j.williams@intel.com>
1185W: http://sourceforge.net/projects/xscaleiop 1177W: http://sourceforge.net/projects/xscaleiop
@@ -2414,22 +2406,6 @@ T: git git://git.alsa-project.org/alsa-kernel.git
2414S: Maintained 2406S: Maintained
2415F: sound/usb/misc/ua101.c 2407F: sound/usb/misc/ua101.c
2416 2408
2417EEEPC LAPTOP EXTRAS DRIVER
2418M: Corentin Chary <corentincj@iksaif.net>
2419L: acpi4asus-user@lists.sourceforge.net
2420L: platform-driver-x86@vger.kernel.org
2421W: http://acpi4asus.sf.net
2422S: Maintained
2423F: drivers/platform/x86/eeepc-laptop.c
2424
2425EEEPC WMI EXTRAS DRIVER
2426M: Corentin Chary <corentincj@iksaif.net>
2427L: acpi4asus-user@lists.sourceforge.net
2428L: platform-driver-x86@vger.kernel.org
2429W: http://acpi4asus.sf.net
2430S: Maintained
2431F: drivers/platform/x86/eeepc-wmi.c
2432
2433EFIFB FRAMEBUFFER DRIVER 2409EFIFB FRAMEBUFFER DRIVER
2434L: linux-fbdev@vger.kernel.org 2410L: linux-fbdev@vger.kernel.org
2435M: Peter Jones <pjones@redhat.com> 2411M: Peter Jones <pjones@redhat.com>
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index 93d595a7477a..efe06e004714 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -2009,6 +2009,7 @@ menu "Power management options"
2009source "kernel/power/Kconfig" 2009source "kernel/power/Kconfig"
2010 2010
2011config ARCH_SUSPEND_POSSIBLE 2011config ARCH_SUSPEND_POSSIBLE
2012 depends on !ARCH_S5P64X0 && !ARCH_S5P6442
2012 def_bool y 2013 def_bool y
2013 2014
2014endmenu 2015endmenu
diff --git a/arch/arm/mach-exynos4/Kconfig b/arch/arm/mach-exynos4/Kconfig
index a021b5240bba..e849f67be47d 100644
--- a/arch/arm/mach-exynos4/Kconfig
+++ b/arch/arm/mach-exynos4/Kconfig
@@ -20,6 +20,11 @@ config EXYNOS4_MCT
20 help 20 help
21 Use MCT (Multi Core Timer) as kernel timers 21 Use MCT (Multi Core Timer) as kernel timers
22 22
23config EXYNOS4_DEV_AHCI
24 bool
25 help
26 Compile in platform device definitions for AHCI
27
23config EXYNOS4_DEV_PD 28config EXYNOS4_DEV_PD
24 bool 29 bool
25 help 30 help
@@ -134,9 +139,9 @@ config MACH_ARMLEX4210
134 select S3C_DEV_HSMMC 139 select S3C_DEV_HSMMC
135 select S3C_DEV_HSMMC2 140 select S3C_DEV_HSMMC2
136 select S3C_DEV_HSMMC3 141 select S3C_DEV_HSMMC3
142 select EXYNOS4_DEV_AHCI
137 select EXYNOS4_DEV_SYSMMU 143 select EXYNOS4_DEV_SYSMMU
138 select EXYNOS4_SETUP_SDHCI 144 select EXYNOS4_SETUP_SDHCI
139 select SATA_AHCI_PLATFORM
140 help 145 help
141 Machine support for Samsung ARMLEX4210 based on EXYNOS4210 146 Machine support for Samsung ARMLEX4210 based on EXYNOS4210
142 147
diff --git a/arch/arm/mach-exynos4/Makefile b/arch/arm/mach-exynos4/Makefile
index b8f0e7d82d7e..9be104f63c0b 100644
--- a/arch/arm/mach-exynos4/Makefile
+++ b/arch/arm/mach-exynos4/Makefile
@@ -39,6 +39,7 @@ obj-$(CONFIG_MACH_NURI) += mach-nuri.o
39# device support 39# device support
40 40
41obj-y += dev-audio.o 41obj-y += dev-audio.o
42obj-$(CONFIG_EXYNOS4_DEV_AHCI) += dev-ahci.o
42obj-$(CONFIG_EXYNOS4_DEV_PD) += dev-pd.o 43obj-$(CONFIG_EXYNOS4_DEV_PD) += dev-pd.o
43obj-$(CONFIG_EXYNOS4_DEV_SYSMMU) += dev-sysmmu.o 44obj-$(CONFIG_EXYNOS4_DEV_SYSMMU) += dev-sysmmu.o
44 45
@@ -53,4 +54,3 @@ obj-$(CONFIG_EXYNOS4_SETUP_I2C7) += setup-i2c7.o
53obj-$(CONFIG_EXYNOS4_SETUP_KEYPAD) += setup-keypad.o 54obj-$(CONFIG_EXYNOS4_SETUP_KEYPAD) += setup-keypad.o
54obj-$(CONFIG_EXYNOS4_SETUP_SDHCI) += setup-sdhci.o 55obj-$(CONFIG_EXYNOS4_SETUP_SDHCI) += setup-sdhci.o
55obj-$(CONFIG_EXYNOS4_SETUP_SDHCI_GPIO) += setup-sdhci-gpio.o 56obj-$(CONFIG_EXYNOS4_SETUP_SDHCI_GPIO) += setup-sdhci-gpio.o
56obj-$(CONFIG_SATA_AHCI_PLATFORM) += dev-ahci.o
diff --git a/arch/arm/mach-exynos4/include/mach/debug-macro.S b/arch/arm/mach-exynos4/include/mach/debug-macro.S
index 58bbd049a6c4..a442ef861167 100644
--- a/arch/arm/mach-exynos4/include/mach/debug-macro.S
+++ b/arch/arm/mach-exynos4/include/mach/debug-macro.S
@@ -21,8 +21,8 @@
21 */ 21 */
22 22
23 .macro addruart, rp, rv 23 .macro addruart, rp, rv
24 ldreq \rp, = S3C_PA_UART 24 ldr \rp, = S3C_PA_UART
25 ldrne \rv, = S3C_VA_UART 25 ldr \rv, = S3C_VA_UART
26#if CONFIG_DEBUG_S3C_UART != 0 26#if CONFIG_DEBUG_S3C_UART != 0
27 add \rp, \rp, #(0x10000 * CONFIG_DEBUG_S3C_UART) 27 add \rp, \rp, #(0x10000 * CONFIG_DEBUG_S3C_UART)
28 add \rv, \rv, #(0x10000 * CONFIG_DEBUG_S3C_UART) 28 add \rv, \rv, #(0x10000 * CONFIG_DEBUG_S3C_UART)
diff --git a/arch/arm/mach-exynos4/mach-smdkc210.c b/arch/arm/mach-exynos4/mach-smdkc210.c
index 25a256818122..e645f7a955f0 100644
--- a/arch/arm/mach-exynos4/mach-smdkc210.c
+++ b/arch/arm/mach-exynos4/mach-smdkc210.c
@@ -125,7 +125,7 @@ static struct resource smdkc210_smsc911x_resources[] = {
125}; 125};
126 126
127static struct smsc911x_platform_config smsc9215_config = { 127static struct smsc911x_platform_config smsc9215_config = {
128 .irq_polarity = SMSC911X_IRQ_POLARITY_ACTIVE_HIGH, 128 .irq_polarity = SMSC911X_IRQ_POLARITY_ACTIVE_LOW,
129 .irq_type = SMSC911X_IRQ_TYPE_PUSH_PULL, 129 .irq_type = SMSC911X_IRQ_TYPE_PUSH_PULL,
130 .flags = SMSC911X_USE_16BIT | SMSC911X_FORCE_INTERNAL_PHY, 130 .flags = SMSC911X_USE_16BIT | SMSC911X_FORCE_INTERNAL_PHY,
131 .phy_interface = PHY_INTERFACE_MODE_MII, 131 .phy_interface = PHY_INTERFACE_MODE_MII,
diff --git a/arch/arm/mach-exynos4/mach-smdkv310.c b/arch/arm/mach-exynos4/mach-smdkv310.c
index 88e0275143be..152676471b67 100644
--- a/arch/arm/mach-exynos4/mach-smdkv310.c
+++ b/arch/arm/mach-exynos4/mach-smdkv310.c
@@ -127,7 +127,7 @@ static struct resource smdkv310_smsc911x_resources[] = {
127}; 127};
128 128
129static struct smsc911x_platform_config smsc9215_config = { 129static struct smsc911x_platform_config smsc9215_config = {
130 .irq_polarity = SMSC911X_IRQ_POLARITY_ACTIVE_HIGH, 130 .irq_polarity = SMSC911X_IRQ_POLARITY_ACTIVE_LOW,
131 .irq_type = SMSC911X_IRQ_TYPE_PUSH_PULL, 131 .irq_type = SMSC911X_IRQ_TYPE_PUSH_PULL,
132 .flags = SMSC911X_USE_16BIT | SMSC911X_FORCE_INTERNAL_PHY, 132 .flags = SMSC911X_USE_16BIT | SMSC911X_FORCE_INTERNAL_PHY,
133 .phy_interface = PHY_INTERFACE_MODE_MII, 133 .phy_interface = PHY_INTERFACE_MODE_MII,
diff --git a/arch/arm/mach-ns9xxx/irq.c b/arch/arm/mach-ns9xxx/irq.c
index 389fa5c669de..bf0fd48cbd80 100644
--- a/arch/arm/mach-ns9xxx/irq.c
+++ b/arch/arm/mach-ns9xxx/irq.c
@@ -31,17 +31,11 @@ static void ns9xxx_mask_irq(struct irq_data *d)
31 __raw_writel(ic, SYS_IC(prio / 4)); 31 __raw_writel(ic, SYS_IC(prio / 4));
32} 32}
33 33
34static void ns9xxx_ack_irq(struct irq_data *d) 34static void ns9xxx_eoi_irq(struct irq_data *d)
35{ 35{
36 __raw_writel(0, SYS_ISRADDR); 36 __raw_writel(0, SYS_ISRADDR);
37} 37}
38 38
39static void ns9xxx_maskack_irq(struct irq_data *d)
40{
41 ns9xxx_mask_irq(d);
42 ns9xxx_ack_irq(d);
43}
44
45static void ns9xxx_unmask_irq(struct irq_data *d) 39static void ns9xxx_unmask_irq(struct irq_data *d)
46{ 40{
47 /* XXX: better use cpp symbols */ 41 /* XXX: better use cpp symbols */
@@ -52,56 +46,11 @@ static void ns9xxx_unmask_irq(struct irq_data *d)
52} 46}
53 47
54static struct irq_chip ns9xxx_chip = { 48static struct irq_chip ns9xxx_chip = {
55 .irq_ack = ns9xxx_ack_irq, 49 .irq_eoi = ns9xxx_eoi_irq,
56 .irq_mask = ns9xxx_mask_irq, 50 .irq_mask = ns9xxx_mask_irq,
57 .irq_mask_ack = ns9xxx_maskack_irq,
58 .irq_unmask = ns9xxx_unmask_irq, 51 .irq_unmask = ns9xxx_unmask_irq,
59}; 52};
60 53
61#if 0
62#define handle_irq handle_level_irq
63#else
64static void handle_prio_irq(unsigned int irq, struct irq_desc *desc)
65{
66 struct irqaction *action;
67 irqreturn_t action_ret;
68
69 raw_spin_lock(&desc->lock);
70
71 BUG_ON(desc->status & IRQ_INPROGRESS);
72
73 desc->status &= ~(IRQ_REPLAY | IRQ_WAITING);
74 kstat_incr_irqs_this_cpu(irq, desc);
75
76 action = desc->action;
77 if (unlikely(!action || (desc->status & IRQ_DISABLED)))
78 goto out_mask;
79
80 desc->status |= IRQ_INPROGRESS;
81 raw_spin_unlock(&desc->lock);
82
83 action_ret = handle_IRQ_event(irq, action);
84
85 /* XXX: There is no direct way to access noirqdebug, so check
86 * unconditionally for spurious irqs...
87 * Maybe this function should go to kernel/irq/chip.c? */
88 note_interrupt(irq, desc, action_ret);
89
90 raw_spin_lock(&desc->lock);
91 desc->status &= ~IRQ_INPROGRESS;
92
93 if (desc->status & IRQ_DISABLED)
94out_mask:
95 desc->irq_data.chip->irq_mask(&desc->irq_data);
96
97 /* ack unconditionally to unmask lower prio irqs */
98 desc->irq_data.chip->irq_ack(&desc->irq_data);
99
100 raw_spin_unlock(&desc->lock);
101}
102#define handle_irq handle_prio_irq
103#endif
104
105void __init ns9xxx_init_irq(void) 54void __init ns9xxx_init_irq(void)
106{ 55{
107 int i; 56 int i;
@@ -119,7 +68,8 @@ void __init ns9xxx_init_irq(void)
119 68
120 for (i = 0; i <= 31; ++i) { 69 for (i = 0; i <= 31; ++i) {
121 set_irq_chip(i, &ns9xxx_chip); 70 set_irq_chip(i, &ns9xxx_chip);
122 set_irq_handler(i, handle_irq); 71 set_irq_handler(i, handle_fasteoi_irq);
123 set_irq_flags(i, IRQF_VALID); 72 set_irq_flags(i, IRQF_VALID);
73 irq_set_status_flags(i, IRQ_LEVEL);
124 } 74 }
125} 75}
diff --git a/arch/arm/mach-s5p64x0/cpu.c b/arch/arm/mach-s5p64x0/cpu.c
index b8d02eb4cf30..a5c00952ea35 100644
--- a/arch/arm/mach-s5p64x0/cpu.c
+++ b/arch/arm/mach-s5p64x0/cpu.c
@@ -119,7 +119,7 @@ void __init s5p6450_map_io(void)
119 s3c_adc_setname("s3c64xx-adc"); 119 s3c_adc_setname("s3c64xx-adc");
120 120
121 iotable_init(s5p64x0_iodesc, ARRAY_SIZE(s5p64x0_iodesc)); 121 iotable_init(s5p64x0_iodesc, ARRAY_SIZE(s5p64x0_iodesc));
122 iotable_init(s5p6450_iodesc, ARRAY_SIZE(s5p6440_iodesc)); 122 iotable_init(s5p6450_iodesc, ARRAY_SIZE(s5p6450_iodesc));
123} 123}
124 124
125/* 125/*
diff --git a/arch/arm/mach-s5pv210/include/mach/irqs.h b/arch/arm/mach-s5pv210/include/mach/irqs.h
index 26710b35ef87..b9f9ec33384d 100644
--- a/arch/arm/mach-s5pv210/include/mach/irqs.h
+++ b/arch/arm/mach-s5pv210/include/mach/irqs.h
@@ -99,9 +99,9 @@
99#define IRQ_TC IRQ_PENDN 99#define IRQ_TC IRQ_PENDN
100#define IRQ_KEYPAD S5P_IRQ_VIC2(25) 100#define IRQ_KEYPAD S5P_IRQ_VIC2(25)
101#define IRQ_CG S5P_IRQ_VIC2(26) 101#define IRQ_CG S5P_IRQ_VIC2(26)
102#define IRQ_SEC S5P_IRQ_VIC2(27) 102#define IRQ_SSS_INT S5P_IRQ_VIC2(27)
103#define IRQ_SECRX S5P_IRQ_VIC2(28) 103#define IRQ_SSS_HASH S5P_IRQ_VIC2(28)
104#define IRQ_SECTX S5P_IRQ_VIC2(29) 104#define IRQ_PCM2 S5P_IRQ_VIC2(29)
105#define IRQ_SDMIRQ S5P_IRQ_VIC2(30) 105#define IRQ_SDMIRQ S5P_IRQ_VIC2(30)
106#define IRQ_SDMFIQ S5P_IRQ_VIC2(31) 106#define IRQ_SDMFIQ S5P_IRQ_VIC2(31)
107 107
diff --git a/arch/arm/mach-s5pv210/mach-smdkv210.c b/arch/arm/mach-s5pv210/mach-smdkv210.c
index bc08ac42e7cc..c6a9e86c2d5c 100644
--- a/arch/arm/mach-s5pv210/mach-smdkv210.c
+++ b/arch/arm/mach-s5pv210/mach-smdkv210.c
@@ -44,7 +44,6 @@
44#include <plat/keypad.h> 44#include <plat/keypad.h>
45#include <plat/pm.h> 45#include <plat/pm.h>
46#include <plat/fb.h> 46#include <plat/fb.h>
47#include <plat/gpio-cfg.h>
48#include <plat/s5p-time.h> 47#include <plat/s5p-time.h>
49 48
50/* Following are default values for UCON, ULCON and UFCON UART registers */ 49/* Following are default values for UCON, ULCON and UFCON UART registers */
diff --git a/arch/arm/mach-ux500/Kconfig b/arch/arm/mach-ux500/Kconfig
index 203b986280f5..58626013aa32 100644
--- a/arch/arm/mach-ux500/Kconfig
+++ b/arch/arm/mach-ux500/Kconfig
@@ -23,6 +23,7 @@ menu "Ux500 target platform"
23config MACH_U8500 23config MACH_U8500
24 bool "U8500 Development platform" 24 bool "U8500 Development platform"
25 depends on UX500_SOC_DB8500 25 depends on UX500_SOC_DB8500
26 select TPS6105X
26 help 27 help
27 Include support for the mop500 development platform. 28 Include support for the mop500 development platform.
28 29
diff --git a/arch/arm/mach-ux500/board-mop500-regulators.c b/arch/arm/mach-ux500/board-mop500-regulators.c
index 875c91b2f8a4..9ed0f90cfe23 100644
--- a/arch/arm/mach-ux500/board-mop500-regulators.c
+++ b/arch/arm/mach-ux500/board-mop500-regulators.c
@@ -13,6 +13,30 @@
13#include <linux/regulator/ab8500.h> 13#include <linux/regulator/ab8500.h>
14#include "board-mop500-regulators.h" 14#include "board-mop500-regulators.h"
15 15
16/*
17 * TPS61052 regulator
18 */
19static struct regulator_consumer_supply tps61052_vaudio_consumers[] = {
20 /*
21 * Boost converter supply to raise voltage on audio speaker, this
22 * is actually connected to three pins, VInVhfL (left amplifier)
23 * VInVhfR (right amplifier) and VIntDClassInt - all three must
24 * be connected to the same voltage.
25 */
26 REGULATOR_SUPPLY("vintdclassint", "ab8500-codec.0"),
27};
28
29struct regulator_init_data tps61052_regulator = {
30 .constraints = {
31 .name = "vaudio-hf",
32 .min_uV = 4500000,
33 .max_uV = 4500000,
34 .valid_ops_mask = REGULATOR_CHANGE_STATUS,
35 },
36 .num_consumer_supplies = ARRAY_SIZE(tps61052_vaudio_consumers),
37 .consumer_supplies = tps61052_vaudio_consumers,
38};
39
16static struct regulator_consumer_supply ab8500_vaux1_consumers[] = { 40static struct regulator_consumer_supply ab8500_vaux1_consumers[] = {
17 /* External displays, connector on board 2v5 power supply */ 41 /* External displays, connector on board 2v5 power supply */
18 REGULATOR_SUPPLY("vaux12v5", "mcde.0"), 42 REGULATOR_SUPPLY("vaux12v5", "mcde.0"),
@@ -62,6 +86,182 @@ static struct regulator_consumer_supply ab8500_vana_consumers[] = {
62 REGULATOR_SUPPLY("vsmps2", "mcde.0"), 86 REGULATOR_SUPPLY("vsmps2", "mcde.0"),
63}; 87};
64 88
89/* ab8500 regulator register initialization */
90struct ab8500_regulator_reg_init
91ab8500_regulator_reg_init[AB8500_NUM_REGULATOR_REGISTERS] = {
92 /*
93 * VanaRequestCtrl = HP/LP depending on VxRequest
94 * VextSupply1RequestCtrl = HP/LP depending on VxRequest
95 */
96 INIT_REGULATOR_REGISTER(AB8500_REGUREQUESTCTRL2, 0x00),
97 /*
98 * VextSupply2RequestCtrl = HP/LP depending on VxRequest
99 * VextSupply3RequestCtrl = HP/LP depending on VxRequest
100 * Vaux1RequestCtrl = HP/LP depending on VxRequest
101 * Vaux2RequestCtrl = HP/LP depending on VxRequest
102 */
103 INIT_REGULATOR_REGISTER(AB8500_REGUREQUESTCTRL3, 0x00),
104 /*
105 * Vaux3RequestCtrl = HP/LP depending on VxRequest
106 * SwHPReq = Control through SWValid disabled
107 */
108 INIT_REGULATOR_REGISTER(AB8500_REGUREQUESTCTRL4, 0x00),
109 /*
110 * VanaSysClkReq1HPValid = disabled
111 * Vaux1SysClkReq1HPValid = disabled
112 * Vaux2SysClkReq1HPValid = disabled
113 * Vaux3SysClkReq1HPValid = disabled
114 */
115 INIT_REGULATOR_REGISTER(AB8500_REGUSYSCLKREQ1HPVALID1, 0x00),
116 /*
117 * VextSupply1SysClkReq1HPValid = disabled
118 * VextSupply2SysClkReq1HPValid = disabled
119 * VextSupply3SysClkReq1HPValid = SysClkReq1 controlled
120 */
121 INIT_REGULATOR_REGISTER(AB8500_REGUSYSCLKREQ1HPVALID2, 0x40),
122 /*
123 * VanaHwHPReq1Valid = disabled
124 * Vaux1HwHPreq1Valid = disabled
125 * Vaux2HwHPReq1Valid = disabled
126 * Vaux3HwHPReqValid = disabled
127 */
128 INIT_REGULATOR_REGISTER(AB8500_REGUHWHPREQ1VALID1, 0x00),
129 /*
130 * VextSupply1HwHPReq1Valid = disabled
131 * VextSupply2HwHPReq1Valid = disabled
132 * VextSupply3HwHPReq1Valid = disabled
133 */
134 INIT_REGULATOR_REGISTER(AB8500_REGUHWHPREQ1VALID2, 0x00),
135 /*
136 * VanaHwHPReq2Valid = disabled
137 * Vaux1HwHPReq2Valid = disabled
138 * Vaux2HwHPReq2Valid = disabled
139 * Vaux3HwHPReq2Valid = disabled
140 */
141 INIT_REGULATOR_REGISTER(AB8500_REGUHWHPREQ2VALID1, 0x00),
142 /*
143 * VextSupply1HwHPReq2Valid = disabled
144 * VextSupply2HwHPReq2Valid = disabled
145 * VextSupply3HwHPReq2Valid = HWReq2 controlled
146 */
147 INIT_REGULATOR_REGISTER(AB8500_REGUHWHPREQ2VALID2, 0x04),
148 /*
149 * VanaSwHPReqValid = disabled
150 * Vaux1SwHPReqValid = disabled
151 */
152 INIT_REGULATOR_REGISTER(AB8500_REGUSWHPREQVALID1, 0x00),
153 /*
154 * Vaux2SwHPReqValid = disabled
155 * Vaux3SwHPReqValid = disabled
156 * VextSupply1SwHPReqValid = disabled
157 * VextSupply2SwHPReqValid = disabled
158 * VextSupply3SwHPReqValid = disabled
159 */
160 INIT_REGULATOR_REGISTER(AB8500_REGUSWHPREQVALID2, 0x00),
161 /*
162 * SysClkReq2Valid1 = SysClkReq2 controlled
163 * SysClkReq3Valid1 = disabled
164 * SysClkReq4Valid1 = SysClkReq4 controlled
165 * SysClkReq5Valid1 = disabled
166 * SysClkReq6Valid1 = SysClkReq6 controlled
167 * SysClkReq7Valid1 = disabled
168 * SysClkReq8Valid1 = disabled
169 */
170 INIT_REGULATOR_REGISTER(AB8500_REGUSYSCLKREQVALID1, 0x2a),
171 /*
172 * SysClkReq2Valid2 = disabled
173 * SysClkReq3Valid2 = disabled
174 * SysClkReq4Valid2 = disabled
175 * SysClkReq5Valid2 = disabled
176 * SysClkReq6Valid2 = SysClkReq6 controlled
177 * SysClkReq7Valid2 = disabled
178 * SysClkReq8Valid2 = disabled
179 */
180 INIT_REGULATOR_REGISTER(AB8500_REGUSYSCLKREQVALID2, 0x20),
181 /*
182 * VTVoutEna = disabled
183 * Vintcore12Ena = disabled
184 * Vintcore12Sel = 1.25 V
185 * Vintcore12LP = inactive (HP)
186 * VTVoutLP = inactive (HP)
187 */
188 INIT_REGULATOR_REGISTER(AB8500_REGUMISC1, 0x10),
189 /*
190 * VaudioEna = disabled
191 * VdmicEna = disabled
192 * Vamic1Ena = disabled
193 * Vamic2Ena = disabled
194 */
195 INIT_REGULATOR_REGISTER(AB8500_VAUDIOSUPPLY, 0x00),
196 /*
197 * Vamic1_dzout = high-Z when Vamic1 is disabled
198 * Vamic2_dzout = high-Z when Vamic2 is disabled
199 */
200 INIT_REGULATOR_REGISTER(AB8500_REGUCTRL1VAMIC, 0x00),
201 /*
202 * VPll = Hw controlled
203 * VanaRegu = force off
204 */
205 INIT_REGULATOR_REGISTER(AB8500_VPLLVANAREGU, 0x02),
206 /*
207 * VrefDDREna = disabled
208 * VrefDDRSleepMode = inactive (no pulldown)
209 */
210 INIT_REGULATOR_REGISTER(AB8500_VREFDDR, 0x00),
211 /*
212 * VextSupply1Regu = HW control
213 * VextSupply2Regu = HW control
214 * VextSupply3Regu = HW control
215 * ExtSupply2Bypass = ExtSupply12LPn ball is 0 when Ena is 0
216 * ExtSupply3Bypass = ExtSupply3LPn ball is 0 when Ena is 0
217 */
218 INIT_REGULATOR_REGISTER(AB8500_EXTSUPPLYREGU, 0x2a),
219 /*
220 * Vaux1Regu = force HP
221 * Vaux2Regu = force off
222 */
223 INIT_REGULATOR_REGISTER(AB8500_VAUX12REGU, 0x01),
224 /*
225 * Vaux3regu = force off
226 */
227 INIT_REGULATOR_REGISTER(AB8500_VRF1VAUX3REGU, 0x00),
228 /*
229 * Vsmps1 = 1.15V
230 */
231 INIT_REGULATOR_REGISTER(AB8500_VSMPS1SEL1, 0x24),
232 /*
233 * Vaux1Sel = 2.5 V
234 */
235 INIT_REGULATOR_REGISTER(AB8500_VAUX1SEL, 0x08),
236 /*
237 * Vaux2Sel = 2.9 V
238 */
239 INIT_REGULATOR_REGISTER(AB8500_VAUX2SEL, 0x0d),
240 /*
241 * Vaux3Sel = 2.91 V
242 */
243 INIT_REGULATOR_REGISTER(AB8500_VRF1VAUX3SEL, 0x07),
244 /*
245 * VextSupply12LP = disabled (no LP)
246 */
247 INIT_REGULATOR_REGISTER(AB8500_REGUCTRL2SPARE, 0x00),
248 /*
249 * Vaux1Disch = short discharge time
250 * Vaux2Disch = short discharge time
251 * Vaux3Disch = short discharge time
252 * Vintcore12Disch = short discharge time
253 * VTVoutDisch = short discharge time
254 * VaudioDisch = short discharge time
255 */
256 INIT_REGULATOR_REGISTER(AB8500_REGUCTRLDISCH, 0x00),
257 /*
258 * VanaDisch = short discharge time
259 * VdmicPullDownEna = pulldown disabled when Vdmic is disabled
260 * VdmicDisch = short discharge time
261 */
262 INIT_REGULATOR_REGISTER(AB8500_REGUCTRLDISCH2, 0x00),
263};
264
65/* AB8500 regulators */ 265/* AB8500 regulators */
66struct regulator_init_data ab8500_regulators[AB8500_NUM_REGULATORS] = { 266struct regulator_init_data ab8500_regulators[AB8500_NUM_REGULATORS] = {
67 /* supplies to the display/camera */ 267 /* supplies to the display/camera */
@@ -72,6 +272,7 @@ struct regulator_init_data ab8500_regulators[AB8500_NUM_REGULATORS] = {
72 .max_uV = 2900000, 272 .max_uV = 2900000,
73 .valid_ops_mask = REGULATOR_CHANGE_VOLTAGE | 273 .valid_ops_mask = REGULATOR_CHANGE_VOLTAGE |
74 REGULATOR_CHANGE_STATUS, 274 REGULATOR_CHANGE_STATUS,
275 .boot_on = 1, /* must be on for display */
75 }, 276 },
76 .num_consumer_supplies = ARRAY_SIZE(ab8500_vaux1_consumers), 277 .num_consumer_supplies = ARRAY_SIZE(ab8500_vaux1_consumers),
77 .consumer_supplies = ab8500_vaux1_consumers, 278 .consumer_supplies = ab8500_vaux1_consumers,
diff --git a/arch/arm/mach-ux500/board-mop500-regulators.h b/arch/arm/mach-ux500/board-mop500-regulators.h
index f979b892e4fa..94992158d962 100644
--- a/arch/arm/mach-ux500/board-mop500-regulators.h
+++ b/arch/arm/mach-ux500/board-mop500-regulators.h
@@ -17,5 +17,6 @@
17extern struct ab8500_regulator_reg_init 17extern struct ab8500_regulator_reg_init
18ab8500_regulator_reg_init[AB8500_NUM_REGULATOR_REGISTERS]; 18ab8500_regulator_reg_init[AB8500_NUM_REGULATOR_REGISTERS];
19extern struct regulator_init_data ab8500_regulators[AB8500_NUM_REGULATORS]; 19extern struct regulator_init_data ab8500_regulators[AB8500_NUM_REGULATORS];
20extern struct regulator_init_data tps61052_regulator;
20 21
21#endif 22#endif
diff --git a/arch/arm/mach-ux500/board-mop500.c b/arch/arm/mach-ux500/board-mop500.c
index d0076453d7ff..dc8746d7826e 100644
--- a/arch/arm/mach-ux500/board-mop500.c
+++ b/arch/arm/mach-ux500/board-mop500.c
@@ -22,6 +22,8 @@
22#include <linux/mfd/ab8500.h> 22#include <linux/mfd/ab8500.h>
23#include <linux/regulator/ab8500.h> 23#include <linux/regulator/ab8500.h>
24#include <linux/mfd/tc3589x.h> 24#include <linux/mfd/tc3589x.h>
25#include <linux/mfd/tps6105x.h>
26#include <linux/mfd/ab8500/gpio.h>
25#include <linux/leds-lp5521.h> 27#include <linux/leds-lp5521.h>
26#include <linux/input.h> 28#include <linux/input.h>
27#include <linux/gpio_keys.h> 29#include <linux/gpio_keys.h>
@@ -42,10 +44,35 @@
42#include "board-mop500.h" 44#include "board-mop500.h"
43#include "board-mop500-regulators.h" 45#include "board-mop500-regulators.h"
44 46
47static struct ab8500_gpio_platform_data ab8500_gpio_pdata = {
48 .gpio_base = MOP500_AB8500_GPIO(0),
49 .irq_base = MOP500_AB8500_VIR_GPIO_IRQ_BASE,
50 /* config_reg is the initial configuration of ab8500 pins.
51 * The pins can be configured as GPIO or alt functions based
52 * on value present in GpioSel1 to GpioSel6 and AlternatFunction
53 * register. This is the array of 7 configuration settings.
54 * One has to compile time decide these settings. Below is the
55 * explaination of these setting
56 * GpioSel1 = 0x00 => Pins GPIO1 to GPIO8 are not used as GPIO
57 * GpioSel2 = 0x1E => Pins GPIO10 to GPIO13 are configured as GPIO
58 * GpioSel3 = 0x80 => Pin GPIO24 is configured as GPIO
59 * GpioSel4 = 0x01 => Pin GPIo25 is configured as GPIO
60 * GpioSel5 = 0x7A => Pins GPIO34, GPIO36 to GPIO39 are conf as GPIO
61 * GpioSel6 = 0x00 => Pins GPIO41 & GPIo42 are not configured as GPIO
62 * AlternaFunction = 0x00 => If Pins GPIO10 to 13 are not configured
63 * as GPIO then this register selectes the alternate fucntions
64 */
65 .config_reg = {0x00, 0x1E, 0x80, 0x01,
66 0x7A, 0x00, 0x00},
67};
68
45static struct ab8500_platform_data ab8500_platdata = { 69static struct ab8500_platform_data ab8500_platdata = {
46 .irq_base = MOP500_AB8500_IRQ_BASE, 70 .irq_base = MOP500_AB8500_IRQ_BASE,
71 .regulator_reg_init = ab8500_regulator_reg_init,
72 .num_regulator_reg_init = ARRAY_SIZE(ab8500_regulator_reg_init),
47 .regulator = ab8500_regulators, 73 .regulator = ab8500_regulators,
48 .num_regulator = ARRAY_SIZE(ab8500_regulators), 74 .num_regulator = ARRAY_SIZE(ab8500_regulators),
75 .gpio = &ab8500_gpio_pdata,
49}; 76};
50 77
51static struct resource ab8500_resources[] = { 78static struct resource ab8500_resources[] = {
@@ -67,6 +94,15 @@ struct platform_device ab8500_device = {
67}; 94};
68 95
69/* 96/*
97 * TPS61052
98 */
99
100static struct tps6105x_platform_data mop500_tps61052_data = {
101 .mode = TPS6105X_MODE_VOLTAGE,
102 .regulator_data = &tps61052_regulator,
103};
104
105/*
70 * TC35892 106 * TC35892
71 */ 107 */
72 108
@@ -136,7 +172,7 @@ static struct lp5521_platform_data __initdata lp5521_sec_data = {
136 .clock_mode = LP5521_CLOCK_EXT, 172 .clock_mode = LP5521_CLOCK_EXT,
137}; 173};
138 174
139static struct i2c_board_info mop500_i2c0_devices[] = { 175static struct i2c_board_info __initdata mop500_i2c0_devices[] = {
140 { 176 {
141 I2C_BOARD_INFO("tc3589x", 0x42), 177 I2C_BOARD_INFO("tc3589x", 0x42),
142 .irq = NOMADIK_GPIO_TO_IRQ(217), 178 .irq = NOMADIK_GPIO_TO_IRQ(217),
@@ -144,6 +180,14 @@ static struct i2c_board_info mop500_i2c0_devices[] = {
144 }, 180 },
145}; 181};
146 182
183/* I2C0 devices only available prior to HREFv60 */
184static struct i2c_board_info __initdata mop500_i2c0_old_devices[] = {
185 {
186 I2C_BOARD_INFO("tps61052", 0x33),
187 .platform_data = &mop500_tps61052_data,
188 },
189};
190
147static struct i2c_board_info __initdata mop500_i2c2_devices[] = { 191static struct i2c_board_info __initdata mop500_i2c2_devices[] = {
148 { 192 {
149 /* lp5521 LED driver, 1st device */ 193 /* lp5521 LED driver, 1st device */
@@ -406,6 +450,9 @@ static void __init mop500_init_machine(void)
406 450
407 i2c_register_board_info(0, mop500_i2c0_devices, 451 i2c_register_board_info(0, mop500_i2c0_devices,
408 ARRAY_SIZE(mop500_i2c0_devices)); 452 ARRAY_SIZE(mop500_i2c0_devices));
453 if (!machine_is_hrefv60())
454 i2c_register_board_info(0, mop500_i2c0_old_devices,
455 ARRAY_SIZE(mop500_i2c0_old_devices));
409 i2c_register_board_info(2, mop500_i2c2_devices, 456 i2c_register_board_info(2, mop500_i2c2_devices,
410 ARRAY_SIZE(mop500_i2c2_devices)); 457 ARRAY_SIZE(mop500_i2c2_devices));
411} 458}
diff --git a/arch/arm/mach-ux500/board-mop500.h b/arch/arm/mach-ux500/board-mop500.h
index 56722f4be71b..03a31cc9b084 100644
--- a/arch/arm/mach-ux500/board-mop500.h
+++ b/arch/arm/mach-ux500/board-mop500.h
@@ -27,6 +27,10 @@
27#define GPIO_BU21013_CS MOP500_EGPIO(13) 27#define GPIO_BU21013_CS MOP500_EGPIO(13)
28#define GPIO_SDMMC_EN MOP500_EGPIO(17) 28#define GPIO_SDMMC_EN MOP500_EGPIO(17)
29#define GPIO_SDMMC_1V8_3V_SEL MOP500_EGPIO(18) 29#define GPIO_SDMMC_1V8_3V_SEL MOP500_EGPIO(18)
30#define MOP500_EGPIO_END MOP500_EGPIO(24)
31
32/* GPIOs on the AB8500 mixed-signals circuit */
33#define MOP500_AB8500_GPIO(x) (MOP500_EGPIO_END + (x))
30 34
31struct i2c_board_info; 35struct i2c_board_info;
32 36
diff --git a/arch/arm/mach-ux500/include/mach/irqs-board-mop500.h b/arch/arm/mach-ux500/include/mach/irqs-board-mop500.h
index 7cdeb2af0ebb..97ef55f84934 100644
--- a/arch/arm/mach-ux500/include/mach/irqs-board-mop500.h
+++ b/arch/arm/mach-ux500/include/mach/irqs-board-mop500.h
@@ -35,9 +35,20 @@
35#define MOP500_STMPE1601_IRQBASE MOP500_EGPIO_IRQ_END 35#define MOP500_STMPE1601_IRQBASE MOP500_EGPIO_IRQ_END
36#define MOP500_STMPE1601_IRQ(x) (MOP500_STMPE1601_IRQBASE + (x)) 36#define MOP500_STMPE1601_IRQ(x) (MOP500_STMPE1601_IRQBASE + (x))
37 37
38#define MOP500_NR_IRQS MOP500_STMPE1601_IRQ(STMPE_NR_INTERNAL_IRQS) 38#define MOP500_STMPE1601_IRQ_END \
39 MOP500_STMPE1601_IRQ(STMPE_NR_INTERNAL_IRQS)
39 40
40#define MOP500_IRQ_END MOP500_NR_IRQS 41/* AB8500 virtual gpio IRQ */
42#define AB8500_VIR_GPIO_NR_IRQS 16
43
44#define MOP500_AB8500_VIR_GPIO_IRQ_BASE \
45 MOP500_STMPE1601_IRQ_END
46#define MOP500_AB8500_VIR_GPIO_IRQ_END \
47 (MOP500_AB8500_VIR_GPIO_IRQ_BASE + AB8500_VIR_GPIO_NR_IRQS)
48
49#define MOP500_NR_IRQS MOP500_AB8500_VIR_GPIO_IRQ_END
50
51#define MOP500_IRQ_END MOP500_NR_IRQS
41 52
42#if MOP500_IRQ_END > IRQ_BOARD_END 53#if MOP500_IRQ_END > IRQ_BOARD_END
43#undef IRQ_BOARD_END 54#undef IRQ_BOARD_END
diff --git a/arch/arm/plat-s5p/cpu.c b/arch/arm/plat-s5p/cpu.c
index c3bfe9b13acf..5cf5e721e6ca 100644
--- a/arch/arm/plat-s5p/cpu.c
+++ b/arch/arm/plat-s5p/cpu.c
@@ -39,7 +39,7 @@ static const char name_exynos4210[] = "EXYNOS4210";
39static struct cpu_table cpu_ids[] __initdata = { 39static struct cpu_table cpu_ids[] __initdata = {
40 { 40 {
41 .idcode = 0x56440100, 41 .idcode = 0x56440100,
42 .idmask = 0xffffff00, 42 .idmask = 0xfffff000,
43 .map_io = s5p6440_map_io, 43 .map_io = s5p6440_map_io,
44 .init_clocks = s5p6440_init_clocks, 44 .init_clocks = s5p6440_init_clocks,
45 .init_uarts = s5p6440_init_uarts, 45 .init_uarts = s5p6440_init_uarts,
@@ -47,7 +47,7 @@ static struct cpu_table cpu_ids[] __initdata = {
47 .name = name_s5p6440, 47 .name = name_s5p6440,
48 }, { 48 }, {
49 .idcode = 0x36442000, 49 .idcode = 0x36442000,
50 .idmask = 0xffffff00, 50 .idmask = 0xfffff000,
51 .map_io = s5p6442_map_io, 51 .map_io = s5p6442_map_io,
52 .init_clocks = s5p6442_init_clocks, 52 .init_clocks = s5p6442_init_clocks,
53 .init_uarts = s5p6442_init_uarts, 53 .init_uarts = s5p6442_init_uarts,
@@ -55,7 +55,7 @@ static struct cpu_table cpu_ids[] __initdata = {
55 .name = name_s5p6442, 55 .name = name_s5p6442,
56 }, { 56 }, {
57 .idcode = 0x36450000, 57 .idcode = 0x36450000,
58 .idmask = 0xffffff00, 58 .idmask = 0xfffff000,
59 .map_io = s5p6450_map_io, 59 .map_io = s5p6450_map_io,
60 .init_clocks = s5p6450_init_clocks, 60 .init_clocks = s5p6450_init_clocks,
61 .init_uarts = s5p6450_init_uarts, 61 .init_uarts = s5p6450_init_uarts,
@@ -79,7 +79,7 @@ static struct cpu_table cpu_ids[] __initdata = {
79 .name = name_s5pv210, 79 .name = name_s5pv210,
80 }, { 80 }, {
81 .idcode = 0x43210000, 81 .idcode = 0x43210000,
82 .idmask = 0xfffff000, 82 .idmask = 0xfffe0000,
83 .map_io = exynos4_map_io, 83 .map_io = exynos4_map_io,
84 .init_clocks = exynos4_init_clocks, 84 .init_clocks = exynos4_init_clocks,
85 .init_uarts = exynos4_init_uarts, 85 .init_uarts = exynos4_init_uarts,
diff --git a/arch/arm/plat-samsung/init.c b/arch/arm/plat-samsung/init.c
index 6790edfaca6f..79d10fca9090 100644
--- a/arch/arm/plat-samsung/init.c
+++ b/arch/arm/plat-samsung/init.c
@@ -36,7 +36,7 @@ static struct cpu_table * __init s3c_lookup_cpu(unsigned long idcode,
36 unsigned int count) 36 unsigned int count)
37{ 37{
38 for (; count != 0; count--, tab++) { 38 for (; count != 0; count--, tab++) {
39 if ((idcode & tab->idmask) == tab->idcode) 39 if ((idcode & tab->idmask) == (tab->idcode & tab->idmask))
40 return tab; 40 return tab;
41 } 41 }
42 42
diff --git a/arch/cris/Kconfig b/arch/cris/Kconfig
index 04a7fc5eaf46..617925ddd142 100644
--- a/arch/cris/Kconfig
+++ b/arch/cris/Kconfig
@@ -296,8 +296,7 @@ config ETRAX_RTC
296choice 296choice
297 prompt "RTC chip" 297 prompt "RTC chip"
298 depends on ETRAX_RTC 298 depends on ETRAX_RTC
299 default ETRAX_PCF8563 if ETRAX_ARCH_V32 299 default ETRAX_DS1302
300 default ETRAX_DS1302 if ETRAX_ARCH_V10
301 300
302config ETRAX_DS1302 301config ETRAX_DS1302
303 depends on ETRAX_ARCH_V10 302 depends on ETRAX_ARCH_V10
diff --git a/arch/cris/arch-v10/drivers/pcf8563.c b/arch/cris/arch-v10/drivers/pcf8563.c
index ea69faba9b62..1391b731ad1c 100644
--- a/arch/cris/arch-v10/drivers/pcf8563.c
+++ b/arch/cris/arch-v10/drivers/pcf8563.c
@@ -345,7 +345,7 @@ static long pcf8563_unlocked_ioctl(struct file *filp, unsigned int cmd, unsigned
345 int ret; 345 int ret;
346 346
347 mutex_lock(&pcf8563_mutex); 347 mutex_lock(&pcf8563_mutex);
348 return pcf8563_ioctl(filp, cmd, arg); 348 ret = pcf8563_ioctl(filp, cmd, arg);
349 mutex_unlock(&pcf8563_mutex); 349 mutex_unlock(&pcf8563_mutex);
350 350
351 return ret; 351 return ret;
diff --git a/arch/cris/arch-v10/kernel/signal.c b/arch/cris/arch-v10/kernel/signal.c
index b6be705c2a3e..e78fe49a9849 100644
--- a/arch/cris/arch-v10/kernel/signal.c
+++ b/arch/cris/arch-v10/kernel/signal.c
@@ -537,7 +537,7 @@ void do_signal(int canrestart, struct pt_regs *regs)
537 RESTART_CRIS_SYS(regs); 537 RESTART_CRIS_SYS(regs);
538 } 538 }
539 if (regs->r10 == -ERESTART_RESTARTBLOCK) { 539 if (regs->r10 == -ERESTART_RESTARTBLOCK) {
540 regs->r10 = __NR_restart_syscall; 540 regs->r9 = __NR_restart_syscall;
541 regs->irp -= 2; 541 regs->irp -= 2;
542 } 542 }
543 } 543 }
diff --git a/arch/cris/arch-v32/drivers/Makefile b/arch/cris/arch-v32/drivers/Makefile
index e8c02437edaf..39aa3c117a86 100644
--- a/arch/cris/arch-v32/drivers/Makefile
+++ b/arch/cris/arch-v32/drivers/Makefile
@@ -7,7 +7,6 @@ obj-$(CONFIG_ETRAX_AXISFLASHMAP) += axisflashmap.o
7obj-$(CONFIG_ETRAXFS) += mach-fs/ 7obj-$(CONFIG_ETRAXFS) += mach-fs/
8obj-$(CONFIG_CRIS_MACH_ARTPEC3) += mach-a3/ 8obj-$(CONFIG_CRIS_MACH_ARTPEC3) += mach-a3/
9obj-$(CONFIG_ETRAX_IOP_FW_LOAD) += iop_fw_load.o 9obj-$(CONFIG_ETRAX_IOP_FW_LOAD) += iop_fw_load.o
10obj-$(CONFIG_ETRAX_PCF8563) += pcf8563.o
11obj-$(CONFIG_ETRAX_I2C) += i2c.o 10obj-$(CONFIG_ETRAX_I2C) += i2c.o
12obj-$(CONFIG_ETRAX_SYNCHRONOUS_SERIAL) += sync_serial.o 11obj-$(CONFIG_ETRAX_SYNCHRONOUS_SERIAL) += sync_serial.o
13obj-$(CONFIG_PCI) += pci/ 12obj-$(CONFIG_PCI) += pci/
diff --git a/arch/cris/arch-v32/drivers/pcf8563.c b/arch/cris/arch-v32/drivers/pcf8563.c
deleted file mode 100644
index b6e4fc0aad42..000000000000
--- a/arch/cris/arch-v32/drivers/pcf8563.c
+++ /dev/null
@@ -1,377 +0,0 @@
1/*
2 * PCF8563 RTC
3 *
4 * From Phillips' datasheet:
5 *
6 * The PCF8563 is a CMOS real-time clock/calendar optimized for low power
7 * consumption. A programmable clock output, interrupt output and voltage
8 * low detector are also provided. All address and data are transferred
9 * serially via two-line bidirectional I2C-bus. Maximum bus speed is
10 * 400 kbits/s. The built-in word address register is incremented
11 * automatically after each written or read byte.
12 *
13 * Copyright (c) 2002-2007, Axis Communications AB
14 * All rights reserved.
15 *
16 * Author: Tobias Anderberg <tobiasa@axis.com>.
17 *
18 */
19
20#include <linux/module.h>
21#include <linux/kernel.h>
22#include <linux/types.h>
23#include <linux/sched.h>
24#include <linux/init.h>
25#include <linux/fs.h>
26#include <linux/ioctl.h>
27#include <linux/delay.h>
28#include <linux/bcd.h>
29#include <linux/mutex.h>
30
31#include <asm/uaccess.h>
32#include <asm/system.h>
33#include <asm/io.h>
34#include <asm/rtc.h>
35
36#include "i2c.h"
37
38#define PCF8563_MAJOR 121 /* Local major number. */
39#define DEVICE_NAME "rtc" /* Name which is registered in /proc/devices. */
40#define PCF8563_NAME "PCF8563"
41#define DRIVER_VERSION "$Revision: 1.17 $"
42
43/* Two simple wrapper macros, saves a few keystrokes. */
44#define rtc_read(x) i2c_readreg(RTC_I2C_READ, x)
45#define rtc_write(x,y) i2c_writereg(RTC_I2C_WRITE, x, y)
46
47static DEFINE_MUTEX(pcf8563_mutex);
48static DEFINE_MUTEX(rtc_lock); /* Protect state etc */
49
50static const unsigned char days_in_month[] =
51 { 0, 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31 };
52
53static long pcf8563_unlocked_ioctl(struct file *filp, unsigned int cmd, unsigned long arg);
54
55/* Cache VL bit value read at driver init since writing the RTC_SECOND
56 * register clears the VL status.
57 */
58static int voltage_low;
59
60static const struct file_operations pcf8563_fops = {
61 .owner = THIS_MODULE,
62 .unlocked_ioctl = pcf8563_unlocked_ioctl,
63 .llseek = noop_llseek,
64};
65
66unsigned char
67pcf8563_readreg(int reg)
68{
69 unsigned char res = rtc_read(reg);
70
71 /* The PCF8563 does not return 0 for unimplemented bits. */
72 switch (reg) {
73 case RTC_SECONDS:
74 case RTC_MINUTES:
75 res &= 0x7F;
76 break;
77 case RTC_HOURS:
78 case RTC_DAY_OF_MONTH:
79 res &= 0x3F;
80 break;
81 case RTC_WEEKDAY:
82 res &= 0x07;
83 break;
84 case RTC_MONTH:
85 res &= 0x1F;
86 break;
87 case RTC_CONTROL1:
88 res &= 0xA8;
89 break;
90 case RTC_CONTROL2:
91 res &= 0x1F;
92 break;
93 case RTC_CLOCKOUT_FREQ:
94 case RTC_TIMER_CONTROL:
95 res &= 0x83;
96 break;
97 }
98 return res;
99}
100
101void
102pcf8563_writereg(int reg, unsigned char val)
103{
104 rtc_write(reg, val);
105}
106
107void
108get_rtc_time(struct rtc_time *tm)
109{
110 tm->tm_sec = rtc_read(RTC_SECONDS);
111 tm->tm_min = rtc_read(RTC_MINUTES);
112 tm->tm_hour = rtc_read(RTC_HOURS);
113 tm->tm_mday = rtc_read(RTC_DAY_OF_MONTH);
114 tm->tm_wday = rtc_read(RTC_WEEKDAY);
115 tm->tm_mon = rtc_read(RTC_MONTH);
116 tm->tm_year = rtc_read(RTC_YEAR);
117
118 if (tm->tm_sec & 0x80) {
119 printk(KERN_ERR "%s: RTC Voltage Low - reliable date/time "
120 "information is no longer guaranteed!\n", PCF8563_NAME);
121 }
122
123 tm->tm_year = bcd2bin(tm->tm_year) +
124 ((tm->tm_mon & 0x80) ? 100 : 0);
125 tm->tm_sec &= 0x7F;
126 tm->tm_min &= 0x7F;
127 tm->tm_hour &= 0x3F;
128 tm->tm_mday &= 0x3F;
129 tm->tm_wday &= 0x07; /* Not coded in BCD. */
130 tm->tm_mon &= 0x1F;
131
132 tm->tm_sec = bcd2bin(tm->tm_sec);
133 tm->tm_min = bcd2bin(tm->tm_min);
134 tm->tm_hour = bcd2bin(tm->tm_hour);
135 tm->tm_mday = bcd2bin(tm->tm_mday);
136 tm->tm_mon = bcd2bin(tm->tm_mon);
137 tm->tm_mon--; /* Month is 1..12 in RTC but 0..11 in linux */
138}
139
140int __init
141pcf8563_init(void)
142{
143 static int res;
144 static int first = 1;
145
146 if (!first)
147 return res;
148 first = 0;
149
150 /* Initiate the i2c protocol. */
151 res = i2c_init();
152 if (res < 0) {
153 printk(KERN_CRIT "pcf8563_init: Failed to init i2c.\n");
154 return res;
155 }
156
157 /*
158 * First of all we need to reset the chip. This is done by
159 * clearing control1, control2 and clk freq and resetting
160 * all alarms.
161 */
162 if (rtc_write(RTC_CONTROL1, 0x00) < 0)
163 goto err;
164
165 if (rtc_write(RTC_CONTROL2, 0x00) < 0)
166 goto err;
167
168 if (rtc_write(RTC_CLOCKOUT_FREQ, 0x00) < 0)
169 goto err;
170
171 if (rtc_write(RTC_TIMER_CONTROL, 0x03) < 0)
172 goto err;
173
174 /* Reset the alarms. */
175 if (rtc_write(RTC_MINUTE_ALARM, 0x80) < 0)
176 goto err;
177
178 if (rtc_write(RTC_HOUR_ALARM, 0x80) < 0)
179 goto err;
180
181 if (rtc_write(RTC_DAY_ALARM, 0x80) < 0)
182 goto err;
183
184 if (rtc_write(RTC_WEEKDAY_ALARM, 0x80) < 0)
185 goto err;
186
187 /* Check for low voltage, and warn about it. */
188 if (rtc_read(RTC_SECONDS) & 0x80) {
189 voltage_low = 1;
190 printk(KERN_WARNING "%s: RTC Voltage Low - reliable "
191 "date/time information is no longer guaranteed!\n",
192 PCF8563_NAME);
193 }
194
195 return res;
196
197err:
198 printk(KERN_INFO "%s: Error initializing chip.\n", PCF8563_NAME);
199 res = -1;
200 return res;
201}
202
203void __exit
204pcf8563_exit(void)
205{
206 unregister_chrdev(PCF8563_MAJOR, DEVICE_NAME);
207}
208
209/*
210 * ioctl calls for this driver. Why return -ENOTTY upon error? Because
211 * POSIX says so!
212 */
213static int pcf8563_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
214{
215 /* Some sanity checks. */
216 if (_IOC_TYPE(cmd) != RTC_MAGIC)
217 return -ENOTTY;
218
219 if (_IOC_NR(cmd) > RTC_MAX_IOCTL)
220 return -ENOTTY;
221
222 switch (cmd) {
223 case RTC_RD_TIME:
224 {
225 struct rtc_time tm;
226
227 mutex_lock(&rtc_lock);
228 memset(&tm, 0, sizeof tm);
229 get_rtc_time(&tm);
230
231 if (copy_to_user((struct rtc_time *) arg, &tm,
232 sizeof tm)) {
233 mutex_unlock(&rtc_lock);
234 return -EFAULT;
235 }
236
237 mutex_unlock(&rtc_lock);
238
239 return 0;
240 }
241 case RTC_SET_TIME:
242 {
243 int leap;
244 int year;
245 int century;
246 struct rtc_time tm;
247
248 memset(&tm, 0, sizeof tm);
249 if (!capable(CAP_SYS_TIME))
250 return -EPERM;
251
252 if (copy_from_user(&tm, (struct rtc_time *) arg,
253 sizeof tm))
254 return -EFAULT;
255
256 /* Convert from struct tm to struct rtc_time. */
257 tm.tm_year += 1900;
258 tm.tm_mon += 1;
259
260 /*
261 * Check if tm.tm_year is a leap year. A year is a leap
262 * year if it is divisible by 4 but not 100, except
263 * that years divisible by 400 _are_ leap years.
264 */
265 year = tm.tm_year;
266 leap = (tm.tm_mon == 2) &&
267 ((year % 4 == 0 && year % 100 != 0) || year % 400 == 0);
268
269 /* Perform some sanity checks. */
270 if ((tm.tm_year < 1970) ||
271 (tm.tm_mon > 12) ||
272 (tm.tm_mday == 0) ||
273 (tm.tm_mday > days_in_month[tm.tm_mon] + leap) ||
274 (tm.tm_wday >= 7) ||
275 (tm.tm_hour >= 24) ||
276 (tm.tm_min >= 60) ||
277 (tm.tm_sec >= 60))
278 return -EINVAL;
279
280 century = (tm.tm_year >= 2000) ? 0x80 : 0;
281 tm.tm_year = tm.tm_year % 100;
282
283 tm.tm_year = bin2bcd(tm.tm_year);
284 tm.tm_mon = bin2bcd(tm.tm_mon);
285 tm.tm_mday = bin2bcd(tm.tm_mday);
286 tm.tm_hour = bin2bcd(tm.tm_hour);
287 tm.tm_min = bin2bcd(tm.tm_min);
288 tm.tm_sec = bin2bcd(tm.tm_sec);
289 tm.tm_mon |= century;
290
291 mutex_lock(&rtc_lock);
292
293 rtc_write(RTC_YEAR, tm.tm_year);
294 rtc_write(RTC_MONTH, tm.tm_mon);
295 rtc_write(RTC_WEEKDAY, tm.tm_wday); /* Not coded in BCD. */
296 rtc_write(RTC_DAY_OF_MONTH, tm.tm_mday);
297 rtc_write(RTC_HOURS, tm.tm_hour);
298 rtc_write(RTC_MINUTES, tm.tm_min);
299 rtc_write(RTC_SECONDS, tm.tm_sec);
300
301 mutex_unlock(&rtc_lock);
302
303 return 0;
304 }
305 case RTC_VL_READ:
306 if (voltage_low)
307 printk(KERN_ERR "%s: RTC Voltage Low - "
308 "reliable date/time information is no "
309 "longer guaranteed!\n", PCF8563_NAME);
310
311 if (copy_to_user((int *) arg, &voltage_low, sizeof(int)))
312 return -EFAULT;
313 return 0;
314
315 case RTC_VL_CLR:
316 {
317 /* Clear the VL bit in the seconds register in case
318 * the time has not been set already (which would
319 * have cleared it). This does not really matter
320 * because of the cached voltage_low value but do it
321 * anyway for consistency. */
322
323 int ret = rtc_read(RTC_SECONDS);
324
325 rtc_write(RTC_SECONDS, (ret & 0x7F));
326
327 /* Clear the cached value. */
328 voltage_low = 0;
329
330 return 0;
331 }
332 default:
333 return -ENOTTY;
334 }
335
336 return 0;
337}
338
339static long pcf8563_unlocked_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
340{
341 int ret;
342
343 mutex_lock(&pcf8563_mutex);
344 return pcf8563_ioctl(filp, cmd, arg);
345 mutex_unlock(&pcf8563_mutex);
346
347 return ret;
348}
349
350static int __init pcf8563_register(void)
351{
352 if (pcf8563_init() < 0) {
353 printk(KERN_INFO "%s: Unable to initialize Real-Time Clock "
354 "Driver, %s\n", PCF8563_NAME, DRIVER_VERSION);
355 return -1;
356 }
357
358 if (register_chrdev(PCF8563_MAJOR, DEVICE_NAME, &pcf8563_fops) < 0) {
359 printk(KERN_INFO "%s: Unable to get major numer %d for RTC "
360 "device.\n", PCF8563_NAME, PCF8563_MAJOR);
361 return -1;
362 }
363
364 printk(KERN_INFO "%s Real-Time Clock Driver, %s\n", PCF8563_NAME,
365 DRIVER_VERSION);
366
367 /* Check for low voltage, and warn about it. */
368 if (voltage_low) {
369 printk(KERN_WARNING "%s: RTC Voltage Low - reliable date/time "
370 "information is no longer guaranteed!\n", PCF8563_NAME);
371 }
372
373 return 0;
374}
375
376module_init(pcf8563_register);
377module_exit(pcf8563_exit);
diff --git a/arch/powerpc/platforms/cell/Kconfig b/arch/powerpc/platforms/cell/Kconfig
index 48cd7d2e1b75..81239ebed83f 100644
--- a/arch/powerpc/platforms/cell/Kconfig
+++ b/arch/powerpc/platforms/cell/Kconfig
@@ -9,6 +9,7 @@ config PPC_CELL_COMMON
9 select PPC_INDIRECT_IO 9 select PPC_INDIRECT_IO
10 select PPC_NATIVE 10 select PPC_NATIVE
11 select PPC_RTAS 11 select PPC_RTAS
12 select IRQ_EDGE_EOI_HANDLER
12 13
13config PPC_CELL_NATIVE 14config PPC_CELL_NATIVE
14 bool 15 bool
diff --git a/arch/powerpc/platforms/cell/interrupt.c b/arch/powerpc/platforms/cell/interrupt.c
index 624d26e72f1d..ec9fc7d82068 100644
--- a/arch/powerpc/platforms/cell/interrupt.c
+++ b/arch/powerpc/platforms/cell/interrupt.c
@@ -235,54 +235,6 @@ static int iic_host_match(struct irq_host *h, struct device_node *node)
235 "IBM,CBEA-Internal-Interrupt-Controller"); 235 "IBM,CBEA-Internal-Interrupt-Controller");
236} 236}
237 237
238extern int noirqdebug;
239
240static void handle_iic_irq(unsigned int irq, struct irq_desc *desc)
241{
242 struct irq_chip *chip = get_irq_desc_chip(desc);
243
244 raw_spin_lock(&desc->lock);
245
246 desc->status &= ~(IRQ_REPLAY | IRQ_WAITING);
247
248 /*
249 * If we're currently running this IRQ, or its disabled,
250 * we shouldn't process the IRQ. Mark it pending, handle
251 * the necessary masking and go out
252 */
253 if (unlikely((desc->status & (IRQ_INPROGRESS | IRQ_DISABLED)) ||
254 !desc->action)) {
255 desc->status |= IRQ_PENDING;
256 goto out_eoi;
257 }
258
259 kstat_incr_irqs_this_cpu(irq, desc);
260
261 /* Mark the IRQ currently in progress.*/
262 desc->status |= IRQ_INPROGRESS;
263
264 do {
265 struct irqaction *action = desc->action;
266 irqreturn_t action_ret;
267
268 if (unlikely(!action))
269 goto out_eoi;
270
271 desc->status &= ~IRQ_PENDING;
272 raw_spin_unlock(&desc->lock);
273 action_ret = handle_IRQ_event(irq, action);
274 if (!noirqdebug)
275 note_interrupt(irq, desc, action_ret);
276 raw_spin_lock(&desc->lock);
277
278 } while ((desc->status & (IRQ_PENDING | IRQ_DISABLED)) == IRQ_PENDING);
279
280 desc->status &= ~IRQ_INPROGRESS;
281out_eoi:
282 chip->irq_eoi(&desc->irq_data);
283 raw_spin_unlock(&desc->lock);
284}
285
286static int iic_host_map(struct irq_host *h, unsigned int virq, 238static int iic_host_map(struct irq_host *h, unsigned int virq,
287 irq_hw_number_t hw) 239 irq_hw_number_t hw)
288{ 240{
@@ -295,7 +247,7 @@ static int iic_host_map(struct irq_host *h, unsigned int virq,
295 handle_iic_irq); 247 handle_iic_irq);
296 break; 248 break;
297 default: 249 default:
298 set_irq_chip_and_handler(virq, &iic_chip, handle_iic_irq); 250 set_irq_chip_and_handler(virq, &iic_chip, handle_edge_eoi_irq);
299 } 251 }
300 return 0; 252 return 0;
301} 253}
diff --git a/arch/score/Kconfig b/arch/score/Kconfig
index 27b2295f41f3..4278bbc032ce 100644
--- a/arch/score/Kconfig
+++ b/arch/score/Kconfig
@@ -3,6 +3,8 @@ menu "Machine selection"
3config SCORE 3config SCORE
4 def_bool y 4 def_bool y
5 select HAVE_GENERIC_HARDIRQS 5 select HAVE_GENERIC_HARDIRQS
6 select GENERIC_HARDIRQS_NO_DEPRECATED
7 select GENERIC_IRQ_SHOW
6 8
7choice 9choice
8 prompt "System type" 10 prompt "System type"
diff --git a/arch/score/include/asm/irqflags.h b/arch/score/include/asm/irqflags.h
index 5c7563891e28..37c6ac9dd6e8 100644
--- a/arch/score/include/asm/irqflags.h
+++ b/arch/score/include/asm/irqflags.h
@@ -29,7 +29,7 @@ static inline unsigned long arch_local_save_flags(void)
29 29
30static inline unsigned long arch_local_irq_save(void) 30static inline unsigned long arch_local_irq_save(void)
31{ 31{
32 unsigned long flags 32 unsigned long flags;
33 33
34 asm volatile( 34 asm volatile(
35 " mfcr r8, cr0 \n" 35 " mfcr r8, cr0 \n"
diff --git a/arch/score/kernel/irq.c b/arch/score/kernel/irq.c
index 47647dde09ca..d4196732c65e 100644
--- a/arch/score/kernel/irq.c
+++ b/arch/score/kernel/irq.c
@@ -52,9 +52,9 @@ asmlinkage void do_IRQ(int irq)
52 irq_exit(); 52 irq_exit();
53} 53}
54 54
55static void score_mask(unsigned int irq_nr) 55static void score_mask(struct irq_data *d)
56{ 56{
57 unsigned int irq_source = 63 - irq_nr; 57 unsigned int irq_source = 63 - d->irq;
58 58
59 if (irq_source < 32) 59 if (irq_source < 32)
60 __raw_writel((__raw_readl(SCORE_PIC + INT_MASKL) | \ 60 __raw_writel((__raw_readl(SCORE_PIC + INT_MASKL) | \
@@ -64,9 +64,9 @@ static void score_mask(unsigned int irq_nr)
64 (1 << (irq_source - 32))), SCORE_PIC + INT_MASKH); 64 (1 << (irq_source - 32))), SCORE_PIC + INT_MASKH);
65} 65}
66 66
67static void score_unmask(unsigned int irq_nr) 67static void score_unmask(struct irq_data *d)
68{ 68{
69 unsigned int irq_source = 63 - irq_nr; 69 unsigned int irq_source = 63 - d->irq;
70 70
71 if (irq_source < 32) 71 if (irq_source < 32)
72 __raw_writel((__raw_readl(SCORE_PIC + INT_MASKL) & \ 72 __raw_writel((__raw_readl(SCORE_PIC + INT_MASKL) & \
@@ -78,9 +78,9 @@ static void score_unmask(unsigned int irq_nr)
78 78
79struct irq_chip score_irq_chip = { 79struct irq_chip score_irq_chip = {
80 .name = "Score7-level", 80 .name = "Score7-level",
81 .mask = score_mask, 81 .irq_mask = score_mask,
82 .mask_ack = score_mask, 82 .irq_mask_ack = score_mask,
83 .unmask = score_unmask, 83 .irq_unmask = score_unmask,
84}; 84};
85 85
86/* 86/*
@@ -92,7 +92,7 @@ void __init init_IRQ(void)
92 unsigned long target_addr; 92 unsigned long target_addr;
93 93
94 for (index = 0; index < NR_IRQS; ++index) 94 for (index = 0; index < NR_IRQS; ++index)
95 set_irq_chip_and_handler(index, &score_irq_chip, 95 irq_set_chip_and_handler(index, &score_irq_chip,
96 handle_level_irq); 96 handle_level_irq);
97 97
98 for (target_addr = IRQ_VECTOR_BASE_ADDR; 98 for (target_addr = IRQ_VECTOR_BASE_ADDR;
@@ -109,40 +109,3 @@ void __init init_IRQ(void)
109 : : "r" (EXCEPTION_VECTOR_BASE_ADDR | \ 109 : : "r" (EXCEPTION_VECTOR_BASE_ADDR | \
110 VECTOR_ADDRESS_OFFSET_MODE16)); 110 VECTOR_ADDRESS_OFFSET_MODE16));
111} 111}
112
113/*
114 * Generic, controller-independent functions:
115 */
116int show_interrupts(struct seq_file *p, void *v)
117{
118 int i = *(loff_t *)v, cpu;
119 struct irqaction *action;
120 unsigned long flags;
121
122 if (i == 0) {
123 seq_puts(p, " ");
124 for_each_online_cpu(cpu)
125 seq_printf(p, "CPU%d ", cpu);
126 seq_putc(p, '\n');
127 }
128
129 if (i < NR_IRQS) {
130 spin_lock_irqsave(&irq_desc[i].lock, flags);
131 action = irq_desc[i].action;
132 if (!action)
133 goto unlock;
134
135 seq_printf(p, "%3d: ", i);
136 seq_printf(p, "%10u ", kstat_irqs(i));
137 seq_printf(p, " %8s", irq_desc[i].chip->name ? : "-");
138 seq_printf(p, " %s", action->name);
139 for (action = action->next; action; action = action->next)
140 seq_printf(p, ", %s", action->name);
141
142 seq_putc(p, '\n');
143unlock:
144 spin_unlock_irqrestore(&irq_desc[i].lock, flags);
145 }
146
147 return 0;
148}
diff --git a/arch/x86/crypto/aesni-intel_asm.S b/arch/x86/crypto/aesni-intel_asm.S
index adcf794b22e2..be6d9e365a80 100644
--- a/arch/x86/crypto/aesni-intel_asm.S
+++ b/arch/x86/crypto/aesni-intel_asm.S
@@ -1612,6 +1612,7 @@ _zero_cipher_left_encrypt:
1612 movdqa SHUF_MASK(%rip), %xmm10 1612 movdqa SHUF_MASK(%rip), %xmm10
1613 PSHUFB_XMM %xmm10, %xmm0 1613 PSHUFB_XMM %xmm10, %xmm0
1614 1614
1615
1615 ENCRYPT_SINGLE_BLOCK %xmm0, %xmm1 # Encrypt(K, Yn) 1616 ENCRYPT_SINGLE_BLOCK %xmm0, %xmm1 # Encrypt(K, Yn)
1616 sub $16, %r11 1617 sub $16, %r11
1617 add %r13, %r11 1618 add %r13, %r11
@@ -1634,7 +1635,9 @@ _zero_cipher_left_encrypt:
1634 # GHASH computation for the last <16 byte block 1635 # GHASH computation for the last <16 byte block
1635 sub %r13, %r11 1636 sub %r13, %r11
1636 add $16, %r11 1637 add $16, %r11
1637 PSHUFB_XMM %xmm10, %xmm1 1638
1639 movdqa SHUF_MASK(%rip), %xmm10
1640 PSHUFB_XMM %xmm10, %xmm0
1638 1641
1639 # shuffle xmm0 back to output as ciphertext 1642 # shuffle xmm0 back to output as ciphertext
1640 1643
diff --git a/arch/x86/crypto/aesni-intel_glue.c b/arch/x86/crypto/aesni-intel_glue.c
index e0e6340c8dad..2577613fb32b 100644
--- a/arch/x86/crypto/aesni-intel_glue.c
+++ b/arch/x86/crypto/aesni-intel_glue.c
@@ -828,9 +828,15 @@ static int rfc4106_init(struct crypto_tfm *tfm)
828 struct cryptd_aead *cryptd_tfm; 828 struct cryptd_aead *cryptd_tfm;
829 struct aesni_rfc4106_gcm_ctx *ctx = (struct aesni_rfc4106_gcm_ctx *) 829 struct aesni_rfc4106_gcm_ctx *ctx = (struct aesni_rfc4106_gcm_ctx *)
830 PTR_ALIGN((u8 *)crypto_tfm_ctx(tfm), AESNI_ALIGN); 830 PTR_ALIGN((u8 *)crypto_tfm_ctx(tfm), AESNI_ALIGN);
831 struct crypto_aead *cryptd_child;
832 struct aesni_rfc4106_gcm_ctx *child_ctx;
831 cryptd_tfm = cryptd_alloc_aead("__driver-gcm-aes-aesni", 0, 0); 833 cryptd_tfm = cryptd_alloc_aead("__driver-gcm-aes-aesni", 0, 0);
832 if (IS_ERR(cryptd_tfm)) 834 if (IS_ERR(cryptd_tfm))
833 return PTR_ERR(cryptd_tfm); 835 return PTR_ERR(cryptd_tfm);
836
837 cryptd_child = cryptd_aead_child(cryptd_tfm);
838 child_ctx = aesni_rfc4106_gcm_ctx_get(cryptd_child);
839 memcpy(child_ctx, ctx, sizeof(*ctx));
834 ctx->cryptd_tfm = cryptd_tfm; 840 ctx->cryptd_tfm = cryptd_tfm;
835 tfm->crt_aead.reqsize = sizeof(struct aead_request) 841 tfm->crt_aead.reqsize = sizeof(struct aead_request)
836 + crypto_aead_reqsize(&cryptd_tfm->base); 842 + crypto_aead_reqsize(&cryptd_tfm->base);
@@ -923,6 +929,9 @@ static int rfc4106_set_key(struct crypto_aead *parent, const u8 *key,
923 int ret = 0; 929 int ret = 0;
924 struct crypto_tfm *tfm = crypto_aead_tfm(parent); 930 struct crypto_tfm *tfm = crypto_aead_tfm(parent);
925 struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(parent); 931 struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(parent);
932 struct crypto_aead *cryptd_child = cryptd_aead_child(ctx->cryptd_tfm);
933 struct aesni_rfc4106_gcm_ctx *child_ctx =
934 aesni_rfc4106_gcm_ctx_get(cryptd_child);
926 u8 *new_key_mem = NULL; 935 u8 *new_key_mem = NULL;
927 936
928 if (key_len < 4) { 937 if (key_len < 4) {
@@ -966,6 +975,7 @@ static int rfc4106_set_key(struct crypto_aead *parent, const u8 *key,
966 goto exit; 975 goto exit;
967 } 976 }
968 ret = rfc4106_set_hash_subkey(ctx->hash_subkey, key, key_len); 977 ret = rfc4106_set_hash_subkey(ctx->hash_subkey, key, key_len);
978 memcpy(child_ctx, ctx, sizeof(*ctx));
969exit: 979exit:
970 kfree(new_key_mem); 980 kfree(new_key_mem);
971 return ret; 981 return ret;
@@ -997,7 +1007,6 @@ static int rfc4106_encrypt(struct aead_request *req)
997 int ret; 1007 int ret;
998 struct crypto_aead *tfm = crypto_aead_reqtfm(req); 1008 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
999 struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm); 1009 struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm);
1000 struct crypto_aead *cryptd_child = cryptd_aead_child(ctx->cryptd_tfm);
1001 1010
1002 if (!irq_fpu_usable()) { 1011 if (!irq_fpu_usable()) {
1003 struct aead_request *cryptd_req = 1012 struct aead_request *cryptd_req =
@@ -1006,6 +1015,7 @@ static int rfc4106_encrypt(struct aead_request *req)
1006 aead_request_set_tfm(cryptd_req, &ctx->cryptd_tfm->base); 1015 aead_request_set_tfm(cryptd_req, &ctx->cryptd_tfm->base);
1007 return crypto_aead_encrypt(cryptd_req); 1016 return crypto_aead_encrypt(cryptd_req);
1008 } else { 1017 } else {
1018 struct crypto_aead *cryptd_child = cryptd_aead_child(ctx->cryptd_tfm);
1009 kernel_fpu_begin(); 1019 kernel_fpu_begin();
1010 ret = cryptd_child->base.crt_aead.encrypt(req); 1020 ret = cryptd_child->base.crt_aead.encrypt(req);
1011 kernel_fpu_end(); 1021 kernel_fpu_end();
@@ -1018,7 +1028,6 @@ static int rfc4106_decrypt(struct aead_request *req)
1018 int ret; 1028 int ret;
1019 struct crypto_aead *tfm = crypto_aead_reqtfm(req); 1029 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1020 struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm); 1030 struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm);
1021 struct crypto_aead *cryptd_child = cryptd_aead_child(ctx->cryptd_tfm);
1022 1031
1023 if (!irq_fpu_usable()) { 1032 if (!irq_fpu_usable()) {
1024 struct aead_request *cryptd_req = 1033 struct aead_request *cryptd_req =
@@ -1027,6 +1036,7 @@ static int rfc4106_decrypt(struct aead_request *req)
1027 aead_request_set_tfm(cryptd_req, &ctx->cryptd_tfm->base); 1036 aead_request_set_tfm(cryptd_req, &ctx->cryptd_tfm->base);
1028 return crypto_aead_decrypt(cryptd_req); 1037 return crypto_aead_decrypt(cryptd_req);
1029 } else { 1038 } else {
1039 struct crypto_aead *cryptd_child = cryptd_aead_child(ctx->cryptd_tfm);
1030 kernel_fpu_begin(); 1040 kernel_fpu_begin();
1031 ret = cryptd_child->base.crt_aead.decrypt(req); 1041 ret = cryptd_child->base.crt_aead.decrypt(req);
1032 kernel_fpu_end(); 1042 kernel_fpu_end();
diff --git a/drivers/acpi/video.c b/drivers/acpi/video.c
index a18e497f1c3c..31e9e10f657e 100644
--- a/drivers/acpi/video.c
+++ b/drivers/acpi/video.c
@@ -824,11 +824,6 @@ static void acpi_video_device_find_cap(struct acpi_video_device *device)
824 device->backlight->props.brightness = 824 device->backlight->props.brightness =
825 acpi_video_get_brightness(device->backlight); 825 acpi_video_get_brightness(device->backlight);
826 826
827 result = sysfs_create_link(&device->backlight->dev.kobj,
828 &device->dev->dev.kobj, "device");
829 if (result)
830 printk(KERN_ERR PREFIX "Create sysfs link\n");
831
832 device->cooling_dev = thermal_cooling_device_register("LCD", 827 device->cooling_dev = thermal_cooling_device_register("LCD",
833 device->dev, &video_cooling_ops); 828 device->dev, &video_cooling_ops);
834 if (IS_ERR(device->cooling_dev)) { 829 if (IS_ERR(device->cooling_dev)) {
@@ -1381,7 +1376,6 @@ static int acpi_video_bus_put_one_device(struct acpi_video_device *device)
1381 "Cant remove video notify handler\n"); 1376 "Cant remove video notify handler\n");
1382 } 1377 }
1383 if (device->backlight) { 1378 if (device->backlight) {
1384 sysfs_remove_link(&device->backlight->dev.kobj, "device");
1385 backlight_device_unregister(device->backlight); 1379 backlight_device_unregister(device->backlight);
1386 device->backlight = NULL; 1380 device->backlight = NULL;
1387 } 1381 }
diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig
index d8d0cda2641d..d3743204a7e9 100644
--- a/drivers/gpio/Kconfig
+++ b/drivers/gpio/Kconfig
@@ -414,4 +414,9 @@ config GPIO_JANZ_TTL
414 This driver provides support for driving the pins in output 414 This driver provides support for driving the pins in output
415 mode only. Input mode is not supported. 415 mode only. Input mode is not supported.
416 416
417config AB8500_GPIO
418 bool "ST-Ericsson AB8500 Mixed Signal Circuit gpio functions"
419 depends on AB8500_CORE
420 help
421 Select this to enable the AB8500 IC GPIO driver
417endif 422endif
diff --git a/drivers/gpio/Makefile b/drivers/gpio/Makefile
index 3351cf87b0ed..becef5954356 100644
--- a/drivers/gpio/Makefile
+++ b/drivers/gpio/Makefile
@@ -42,3 +42,4 @@ obj-$(CONFIG_GPIO_JANZ_TTL) += janz-ttl.o
42obj-$(CONFIG_GPIO_SX150X) += sx150x.o 42obj-$(CONFIG_GPIO_SX150X) += sx150x.o
43obj-$(CONFIG_GPIO_VX855) += vx855_gpio.o 43obj-$(CONFIG_GPIO_VX855) += vx855_gpio.o
44obj-$(CONFIG_GPIO_ML_IOH) += ml_ioh_gpio.o 44obj-$(CONFIG_GPIO_ML_IOH) += ml_ioh_gpio.o
45obj-$(CONFIG_AB8500_GPIO) += ab8500-gpio.o
diff --git a/drivers/gpio/ab8500-gpio.c b/drivers/gpio/ab8500-gpio.c
new file mode 100644
index 000000000000..e7b834d054b7
--- /dev/null
+++ b/drivers/gpio/ab8500-gpio.c
@@ -0,0 +1,522 @@
1/*
2 * Copyright (C) ST-Ericsson SA 2011
3 *
4 * Author: BIBEK BASU <bibek.basu@stericsson.com>
5 * License terms: GNU General Public License (GPL) version 2
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11#include <linux/kernel.h>
12#include <linux/types.h>
13#include <linux/slab.h>
14#include <linux/init.h>
15#include <linux/module.h>
16#include <linux/err.h>
17#include <linux/platform_device.h>
18#include <linux/slab.h>
19#include <linux/gpio.h>
20#include <linux/irq.h>
21#include <linux/interrupt.h>
22#include <linux/mfd/ab8500.h>
23#include <linux/mfd/abx500.h>
24#include <linux/mfd/ab8500/gpio.h>
25
26/*
27 * GPIO registers offset
28 * Bank: 0x10
29 */
30#define AB8500_GPIO_SEL1_REG 0x00
31#define AB8500_GPIO_SEL2_REG 0x01
32#define AB8500_GPIO_SEL3_REG 0x02
33#define AB8500_GPIO_SEL4_REG 0x03
34#define AB8500_GPIO_SEL5_REG 0x04
35#define AB8500_GPIO_SEL6_REG 0x05
36
37#define AB8500_GPIO_DIR1_REG 0x10
38#define AB8500_GPIO_DIR2_REG 0x11
39#define AB8500_GPIO_DIR3_REG 0x12
40#define AB8500_GPIO_DIR4_REG 0x13
41#define AB8500_GPIO_DIR5_REG 0x14
42#define AB8500_GPIO_DIR6_REG 0x15
43
44#define AB8500_GPIO_OUT1_REG 0x20
45#define AB8500_GPIO_OUT2_REG 0x21
46#define AB8500_GPIO_OUT3_REG 0x22
47#define AB8500_GPIO_OUT4_REG 0x23
48#define AB8500_GPIO_OUT5_REG 0x24
49#define AB8500_GPIO_OUT6_REG 0x25
50
51#define AB8500_GPIO_PUD1_REG 0x30
52#define AB8500_GPIO_PUD2_REG 0x31
53#define AB8500_GPIO_PUD3_REG 0x32
54#define AB8500_GPIO_PUD4_REG 0x33
55#define AB8500_GPIO_PUD5_REG 0x34
56#define AB8500_GPIO_PUD6_REG 0x35
57
58#define AB8500_GPIO_IN1_REG 0x40
59#define AB8500_GPIO_IN2_REG 0x41
60#define AB8500_GPIO_IN3_REG 0x42
61#define AB8500_GPIO_IN4_REG 0x43
62#define AB8500_GPIO_IN5_REG 0x44
63#define AB8500_GPIO_IN6_REG 0x45
64#define AB8500_GPIO_ALTFUN_REG 0x45
65#define ALTFUN_REG_INDEX 6
66#define AB8500_NUM_GPIO 42
67#define AB8500_NUM_VIR_GPIO_IRQ 16
68
69enum ab8500_gpio_action {
70 NONE,
71 STARTUP,
72 SHUTDOWN,
73 MASK,
74 UNMASK
75};
76
77struct ab8500_gpio {
78 struct gpio_chip chip;
79 struct ab8500 *parent;
80 struct device *dev;
81 struct mutex lock;
82 u32 irq_base;
83 enum ab8500_gpio_action irq_action;
84 u16 rising;
85 u16 falling;
86};
87/**
88 * to_ab8500_gpio() - get the pointer to ab8500_gpio
89 * @chip: Member of the structure ab8500_gpio
90 */
91static inline struct ab8500_gpio *to_ab8500_gpio(struct gpio_chip *chip)
92{
93 return container_of(chip, struct ab8500_gpio, chip);
94}
95
96static int ab8500_gpio_set_bits(struct gpio_chip *chip, u8 reg,
97 unsigned offset, int val)
98{
99 struct ab8500_gpio *ab8500_gpio = to_ab8500_gpio(chip);
100 u8 pos = offset % 8;
101 int ret;
102
103 reg = reg + (offset / 8);
104 ret = abx500_mask_and_set_register_interruptible(ab8500_gpio->dev,
105 AB8500_MISC, reg, 1 << pos, val << pos);
106 if (ret < 0)
107 dev_err(ab8500_gpio->dev, "%s write failed\n", __func__);
108 return ret;
109}
110/**
111 * ab8500_gpio_get() - Get the particular GPIO value
112 * @chip: Gpio device
113 * @offset: GPIO number to read
114 */
115static int ab8500_gpio_get(struct gpio_chip *chip, unsigned offset)
116{
117 struct ab8500_gpio *ab8500_gpio = to_ab8500_gpio(chip);
118 u8 mask = 1 << (offset % 8);
119 u8 reg = AB8500_GPIO_OUT1_REG + (offset / 8);
120 int ret;
121 u8 data;
122 ret = abx500_get_register_interruptible(ab8500_gpio->dev, AB8500_MISC,
123 reg, &data);
124 if (ret < 0) {
125 dev_err(ab8500_gpio->dev, "%s read failed\n", __func__);
126 return ret;
127 }
128 return (data & mask) >> (offset % 8);
129}
130
131static void ab8500_gpio_set(struct gpio_chip *chip, unsigned offset, int val)
132{
133 struct ab8500_gpio *ab8500_gpio = to_ab8500_gpio(chip);
134 int ret;
135 /* Write the data */
136 ret = ab8500_gpio_set_bits(chip, AB8500_GPIO_OUT1_REG, offset, 1);
137 if (ret < 0)
138 dev_err(ab8500_gpio->dev, "%s write failed\n", __func__);
139}
140
141static int ab8500_gpio_direction_output(struct gpio_chip *chip, unsigned offset,
142 int val)
143{
144 int ret;
145 /* set direction as output */
146 ret = ab8500_gpio_set_bits(chip, AB8500_GPIO_DIR1_REG, offset, 1);
147 if (ret < 0)
148 return ret;
149 /* disable pull down */
150 ret = ab8500_gpio_set_bits(chip, AB8500_GPIO_PUD1_REG, offset, 1);
151 if (ret < 0)
152 return ret;
153 /* set the output as 1 or 0 */
154 return ab8500_gpio_set_bits(chip, AB8500_GPIO_OUT1_REG, offset, val);
155
156}
157
158static int ab8500_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
159{
160 /* set the register as input */
161 return ab8500_gpio_set_bits(chip, AB8500_GPIO_DIR1_REG, offset, 0);
162}
163
164static int ab8500_gpio_to_irq(struct gpio_chip *chip, unsigned offset)
165{
166 /*
167 * Only some GPIOs are interrupt capable, and they are
168 * organized in discontiguous clusters:
169 *
170 * GPIO6 to GPIO13
171 * GPIO24 and GPIO25
172 * GPIO36 to GPIO41
173 */
174 static struct ab8500_gpio_irq_cluster {
175 int start;
176 int end;
177 } clusters[] = {
178 {.start = 6, .end = 13},
179 {.start = 24, .end = 25},
180 {.start = 36, .end = 41},
181 };
182 struct ab8500_gpio *ab8500_gpio = to_ab8500_gpio(chip);
183 int base = ab8500_gpio->irq_base;
184 int i;
185
186 for (i = 0; i < ARRAY_SIZE(clusters); i++) {
187 struct ab8500_gpio_irq_cluster *cluster = &clusters[i];
188
189 if (offset >= cluster->start && offset <= cluster->end)
190 return base + offset - cluster->start;
191
192 /* Advance by the number of gpios in this cluster */
193 base += cluster->end - cluster->start + 1;
194 }
195
196 return -EINVAL;
197}
198
199static struct gpio_chip ab8500gpio_chip = {
200 .label = "ab8500_gpio",
201 .owner = THIS_MODULE,
202 .direction_input = ab8500_gpio_direction_input,
203 .get = ab8500_gpio_get,
204 .direction_output = ab8500_gpio_direction_output,
205 .set = ab8500_gpio_set,
206 .to_irq = ab8500_gpio_to_irq,
207};
208
209static unsigned int irq_to_rising(unsigned int irq)
210{
211 struct ab8500_gpio *ab8500_gpio = get_irq_chip_data(irq);
212 int offset = irq - ab8500_gpio->irq_base;
213 int new_irq = offset + AB8500_INT_GPIO6R
214 + ab8500_gpio->parent->irq_base;
215 return new_irq;
216}
217
218static unsigned int irq_to_falling(unsigned int irq)
219{
220 struct ab8500_gpio *ab8500_gpio = get_irq_chip_data(irq);
221 int offset = irq - ab8500_gpio->irq_base;
222 int new_irq = offset + AB8500_INT_GPIO6F
223 + ab8500_gpio->parent->irq_base;
224 return new_irq;
225
226}
227
228static unsigned int rising_to_irq(unsigned int irq, void *dev)
229{
230 struct ab8500_gpio *ab8500_gpio = dev;
231 int offset = irq - AB8500_INT_GPIO6R
232 - ab8500_gpio->parent->irq_base ;
233 int new_irq = offset + ab8500_gpio->irq_base;
234 return new_irq;
235}
236
237static unsigned int falling_to_irq(unsigned int irq, void *dev)
238{
239 struct ab8500_gpio *ab8500_gpio = dev;
240 int offset = irq - AB8500_INT_GPIO6F
241 - ab8500_gpio->parent->irq_base ;
242 int new_irq = offset + ab8500_gpio->irq_base;
243 return new_irq;
244
245}
246
247/*
248 * IRQ handler
249 */
250
251static irqreturn_t handle_rising(int irq, void *dev)
252{
253
254 handle_nested_irq(rising_to_irq(irq , dev));
255 return IRQ_HANDLED;
256}
257
258static irqreturn_t handle_falling(int irq, void *dev)
259{
260
261 handle_nested_irq(falling_to_irq(irq, dev));
262 return IRQ_HANDLED;
263}
264
265static void ab8500_gpio_irq_lock(unsigned int irq)
266{
267 struct ab8500_gpio *ab8500_gpio = get_irq_chip_data(irq);
268 mutex_lock(&ab8500_gpio->lock);
269}
270
271static void ab8500_gpio_irq_sync_unlock(unsigned int irq)
272{
273 struct ab8500_gpio *ab8500_gpio = get_irq_chip_data(irq);
274 int offset = irq - ab8500_gpio->irq_base;
275 bool rising = ab8500_gpio->rising & BIT(offset);
276 bool falling = ab8500_gpio->falling & BIT(offset);
277 int ret;
278
279 switch (ab8500_gpio->irq_action) {
280 case STARTUP:
281 if (rising)
282 ret = request_threaded_irq(irq_to_rising(irq),
283 NULL, handle_rising,
284 IRQF_TRIGGER_RISING,
285 "ab8500-gpio-r", ab8500_gpio);
286 if (falling)
287 ret = request_threaded_irq(irq_to_falling(irq),
288 NULL, handle_falling,
289 IRQF_TRIGGER_FALLING,
290 "ab8500-gpio-f", ab8500_gpio);
291 break;
292 case SHUTDOWN:
293 if (rising)
294 free_irq(irq_to_rising(irq), ab8500_gpio);
295 if (falling)
296 free_irq(irq_to_falling(irq), ab8500_gpio);
297 break;
298 case MASK:
299 if (rising)
300 disable_irq(irq_to_rising(irq));
301 if (falling)
302 disable_irq(irq_to_falling(irq));
303 break;
304 case UNMASK:
305 if (rising)
306 enable_irq(irq_to_rising(irq));
307 if (falling)
308 enable_irq(irq_to_falling(irq));
309 break;
310 case NONE:
311 break;
312 }
313 ab8500_gpio->irq_action = NONE;
314 ab8500_gpio->rising &= ~(BIT(offset));
315 ab8500_gpio->falling &= ~(BIT(offset));
316 mutex_unlock(&ab8500_gpio->lock);
317}
318
319
320static void ab8500_gpio_irq_mask(unsigned int irq)
321{
322 struct ab8500_gpio *ab8500_gpio = get_irq_chip_data(irq);
323 ab8500_gpio->irq_action = MASK;
324}
325
326static void ab8500_gpio_irq_unmask(unsigned int irq)
327{
328 struct ab8500_gpio *ab8500_gpio = get_irq_chip_data(irq);
329 ab8500_gpio->irq_action = UNMASK;
330}
331
332static int ab8500_gpio_irq_set_type(unsigned int irq, unsigned int type)
333{
334 struct ab8500_gpio *ab8500_gpio = get_irq_chip_data(irq);
335 int offset = irq - ab8500_gpio->irq_base;
336
337 if (type == IRQ_TYPE_EDGE_BOTH) {
338 ab8500_gpio->rising = BIT(offset);
339 ab8500_gpio->falling = BIT(offset);
340 } else if (type == IRQ_TYPE_EDGE_RISING) {
341 ab8500_gpio->rising = BIT(offset);
342 } else {
343 ab8500_gpio->falling = BIT(offset);
344 }
345 return 0;
346}
347
348unsigned int ab8500_gpio_irq_startup(unsigned int irq)
349{
350 struct ab8500_gpio *ab8500_gpio = get_irq_chip_data(irq);
351 ab8500_gpio->irq_action = STARTUP;
352 return 0;
353}
354
355void ab8500_gpio_irq_shutdown(unsigned int irq)
356{
357 struct ab8500_gpio *ab8500_gpio = get_irq_chip_data(irq);
358 ab8500_gpio->irq_action = SHUTDOWN;
359}
360
361static struct irq_chip ab8500_gpio_irq_chip = {
362 .name = "ab8500-gpio",
363 .startup = ab8500_gpio_irq_startup,
364 .shutdown = ab8500_gpio_irq_shutdown,
365 .bus_lock = ab8500_gpio_irq_lock,
366 .bus_sync_unlock = ab8500_gpio_irq_sync_unlock,
367 .mask = ab8500_gpio_irq_mask,
368 .unmask = ab8500_gpio_irq_unmask,
369 .set_type = ab8500_gpio_irq_set_type,
370};
371
372static int ab8500_gpio_irq_init(struct ab8500_gpio *ab8500_gpio)
373{
374 u32 base = ab8500_gpio->irq_base;
375 int irq;
376
377 for (irq = base; irq < base + AB8500_NUM_VIR_GPIO_IRQ ; irq++) {
378 set_irq_chip_data(irq, ab8500_gpio);
379 set_irq_chip_and_handler(irq, &ab8500_gpio_irq_chip,
380 handle_simple_irq);
381 set_irq_nested_thread(irq, 1);
382#ifdef CONFIG_ARM
383 set_irq_flags(irq, IRQF_VALID);
384#else
385 set_irq_noprobe(irq);
386#endif
387 }
388
389 return 0;
390}
391
392static void ab8500_gpio_irq_remove(struct ab8500_gpio *ab8500_gpio)
393{
394 int base = ab8500_gpio->irq_base;
395 int irq;
396
397 for (irq = base; irq < base + AB8500_NUM_VIR_GPIO_IRQ; irq++) {
398#ifdef CONFIG_ARM
399 set_irq_flags(irq, 0);
400#endif
401 set_irq_chip_and_handler(irq, NULL, NULL);
402 set_irq_chip_data(irq, NULL);
403 }
404}
405
406static int __devinit ab8500_gpio_probe(struct platform_device *pdev)
407{
408 struct ab8500_platform_data *ab8500_pdata =
409 dev_get_platdata(pdev->dev.parent);
410 struct ab8500_gpio_platform_data *pdata;
411 struct ab8500_gpio *ab8500_gpio;
412 int ret;
413 int i;
414
415 pdata = ab8500_pdata->gpio;
416 if (!pdata) {
417 dev_err(&pdev->dev, "gpio platform data missing\n");
418 return -ENODEV;
419 }
420
421 ab8500_gpio = kzalloc(sizeof(struct ab8500_gpio), GFP_KERNEL);
422 if (ab8500_gpio == NULL) {
423 dev_err(&pdev->dev, "failed to allocate memory\n");
424 return -ENOMEM;
425 }
426 ab8500_gpio->dev = &pdev->dev;
427 ab8500_gpio->parent = dev_get_drvdata(pdev->dev.parent);
428 ab8500_gpio->chip = ab8500gpio_chip;
429 ab8500_gpio->chip.ngpio = AB8500_NUM_GPIO;
430 ab8500_gpio->chip.dev = &pdev->dev;
431 ab8500_gpio->chip.base = pdata->gpio_base;
432 ab8500_gpio->irq_base = pdata->irq_base;
433 /* initialize the lock */
434 mutex_init(&ab8500_gpio->lock);
435 /*
436 * AB8500 core will handle and clear the IRQ
437 * configre GPIO based on config-reg value.
438 * These values are for selecting the PINs as
439 * GPIO or alternate function
440 */
441 for (i = AB8500_GPIO_SEL1_REG; i <= AB8500_GPIO_SEL6_REG; i++) {
442 ret = abx500_set_register_interruptible(ab8500_gpio->dev,
443 AB8500_MISC, i,
444 pdata->config_reg[i]);
445 if (ret < 0)
446 goto out_free;
447 }
448 ret = abx500_set_register_interruptible(ab8500_gpio->dev, AB8500_MISC,
449 AB8500_GPIO_ALTFUN_REG,
450 pdata->config_reg[ALTFUN_REG_INDEX]);
451 if (ret < 0)
452 goto out_free;
453
454 ret = ab8500_gpio_irq_init(ab8500_gpio);
455 if (ret)
456 goto out_free;
457 ret = gpiochip_add(&ab8500_gpio->chip);
458 if (ret) {
459 dev_err(&pdev->dev, "unable to add gpiochip: %d\n",
460 ret);
461 goto out_rem_irq;
462 }
463 platform_set_drvdata(pdev, ab8500_gpio);
464 return 0;
465
466out_rem_irq:
467 ab8500_gpio_irq_remove(ab8500_gpio);
468out_free:
469 mutex_destroy(&ab8500_gpio->lock);
470 kfree(ab8500_gpio);
471 return ret;
472}
473
474/*
475 * ab8500_gpio_remove() - remove Ab8500-gpio driver
476 * @pdev : Platform device registered
477 */
478static int __devexit ab8500_gpio_remove(struct platform_device *pdev)
479{
480 struct ab8500_gpio *ab8500_gpio = platform_get_drvdata(pdev);
481 int ret;
482
483 ret = gpiochip_remove(&ab8500_gpio->chip);
484 if (ret < 0) {
485 dev_err(ab8500_gpio->dev, "unable to remove gpiochip:\
486 %d\n", ret);
487 return ret;
488 }
489
490 platform_set_drvdata(pdev, NULL);
491 mutex_destroy(&ab8500_gpio->lock);
492 kfree(ab8500_gpio);
493
494 return 0;
495}
496
497static struct platform_driver ab8500_gpio_driver = {
498 .driver = {
499 .name = "ab8500-gpio",
500 .owner = THIS_MODULE,
501 },
502 .probe = ab8500_gpio_probe,
503 .remove = __devexit_p(ab8500_gpio_remove),
504};
505
506static int __init ab8500_gpio_init(void)
507{
508 return platform_driver_register(&ab8500_gpio_driver);
509}
510arch_initcall(ab8500_gpio_init);
511
512static void __exit ab8500_gpio_exit(void)
513{
514 platform_driver_unregister(&ab8500_gpio_driver);
515}
516module_exit(ab8500_gpio_exit);
517
518MODULE_AUTHOR("BIBEK BASU <bibek.basu@stericsson.com>");
519MODULE_DESCRIPTION("Driver allows to use AB8500 unused pins\
520 to be used as GPIO");
521MODULE_ALIAS("AB8500 GPIO driver");
522MODULE_LICENSE("GPL v2");
diff --git a/drivers/memstick/host/r592.c b/drivers/memstick/host/r592.c
index 767406c95291..700d420a59ac 100644
--- a/drivers/memstick/host/r592.c
+++ b/drivers/memstick/host/r592.c
@@ -23,7 +23,7 @@
23#include <linux/swab.h> 23#include <linux/swab.h>
24#include "r592.h" 24#include "r592.h"
25 25
26static int enable_dma = 1; 26static int r592_enable_dma = 1;
27static int debug; 27static int debug;
28 28
29static const char *tpc_names[] = { 29static const char *tpc_names[] = {
@@ -267,7 +267,7 @@ static void r592_stop_dma(struct r592_device *dev, int error)
267/* Test if hardware supports DMA */ 267/* Test if hardware supports DMA */
268static void r592_check_dma(struct r592_device *dev) 268static void r592_check_dma(struct r592_device *dev)
269{ 269{
270 dev->dma_capable = enable_dma && 270 dev->dma_capable = r592_enable_dma &&
271 (r592_read_reg(dev, R592_FIFO_DMA_SETTINGS) & 271 (r592_read_reg(dev, R592_FIFO_DMA_SETTINGS) &
272 R592_FIFO_DMA_SETTINGS_CAP); 272 R592_FIFO_DMA_SETTINGS_CAP);
273} 273}
@@ -898,7 +898,7 @@ static void __exit r592_module_exit(void)
898module_init(r592_module_init); 898module_init(r592_module_init);
899module_exit(r592_module_exit); 899module_exit(r592_module_exit);
900 900
901module_param(enable_dma, bool, S_IRUGO); 901module_param_named(enable_dma, r592_enable_dma, bool, S_IRUGO);
902MODULE_PARM_DESC(enable_dma, "Enable usage of the DMA (default)"); 902MODULE_PARM_DESC(enable_dma, "Enable usage of the DMA (default)");
903module_param(debug, int, S_IRUGO | S_IWUSR); 903module_param(debug, int, S_IRUGO | S_IWUSR);
904MODULE_PARM_DESC(debug, "Debug level (0-3)"); 904MODULE_PARM_DESC(debug, "Debug level (0-3)");
diff --git a/drivers/mfd/ab8500-core.c b/drivers/mfd/ab8500-core.c
index 62e33e2258d4..67d01c938284 100644
--- a/drivers/mfd/ab8500-core.c
+++ b/drivers/mfd/ab8500-core.c
@@ -362,6 +362,15 @@ static void ab8500_irq_remove(struct ab8500 *ab8500)
362 } 362 }
363} 363}
364 364
365static struct resource ab8500_gpio_resources[] = {
366 {
367 .name = "GPIO_INT6",
368 .start = AB8500_INT_GPIO6R,
369 .end = AB8500_INT_GPIO41F,
370 .flags = IORESOURCE_IRQ,
371 }
372};
373
365static struct resource ab8500_gpadc_resources[] = { 374static struct resource ab8500_gpadc_resources[] = {
366 { 375 {
367 .name = "HW_CONV_END", 376 .name = "HW_CONV_END",
@@ -596,6 +605,11 @@ static struct mfd_cell ab8500_devs[] = {
596 .name = "ab8500-regulator", 605 .name = "ab8500-regulator",
597 }, 606 },
598 { 607 {
608 .name = "ab8500-gpio",
609 .num_resources = ARRAY_SIZE(ab8500_gpio_resources),
610 .resources = ab8500_gpio_resources,
611 },
612 {
599 .name = "ab8500-gpadc", 613 .name = "ab8500-gpadc",
600 .num_resources = ARRAY_SIZE(ab8500_gpadc_resources), 614 .num_resources = ARRAY_SIZE(ab8500_gpadc_resources),
601 .resources = ab8500_gpadc_resources, 615 .resources = ab8500_gpadc_resources,
diff --git a/drivers/mfd/ab8500-i2c.c b/drivers/mfd/ab8500-i2c.c
index 6820327adf4a..821e6b86afd2 100644
--- a/drivers/mfd/ab8500-i2c.c
+++ b/drivers/mfd/ab8500-i2c.c
@@ -97,7 +97,7 @@ static void __exit ab8500_i2c_exit(void)
97{ 97{
98 platform_driver_unregister(&ab8500_i2c_driver); 98 platform_driver_unregister(&ab8500_i2c_driver);
99} 99}
100subsys_initcall(ab8500_i2c_init); 100arch_initcall(ab8500_i2c_init);
101module_exit(ab8500_i2c_exit); 101module_exit(ab8500_i2c_exit);
102 102
103MODULE_AUTHOR("Mattias WALLIN <mattias.wallin@stericsson.com"); 103MODULE_AUTHOR("Mattias WALLIN <mattias.wallin@stericsson.com");
diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig
index 222dfb737b11..2ee442c2a5db 100644
--- a/drivers/platform/x86/Kconfig
+++ b/drivers/platform/x86/Kconfig
@@ -101,6 +101,19 @@ config DELL_WMI
101 To compile this driver as a module, choose M here: the module will 101 To compile this driver as a module, choose M here: the module will
102 be called dell-wmi. 102 be called dell-wmi.
103 103
104config DELL_WMI_AIO
105 tristate "WMI Hotkeys for Dell All-In-One series"
106 depends on ACPI_WMI
107 depends on INPUT
108 select INPUT_SPARSEKMAP
109 ---help---
110 Say Y here if you want to support WMI-based hotkeys on Dell
111 All-In-One machines.
112
113 To compile this driver as a module, choose M here: the module will
114 be called dell-wmi.
115
116
104config FUJITSU_LAPTOP 117config FUJITSU_LAPTOP
105 tristate "Fujitsu Laptop Extras" 118 tristate "Fujitsu Laptop Extras"
106 depends on ACPI 119 depends on ACPI
@@ -438,23 +451,53 @@ config EEEPC_LAPTOP
438 Bluetooth, backlight and allows powering on/off some other 451 Bluetooth, backlight and allows powering on/off some other
439 devices. 452 devices.
440 453
441 If you have an Eee PC laptop, say Y or M here. 454 If you have an Eee PC laptop, say Y or M here. If this driver
455 doesn't work on your Eee PC, try eeepc-wmi instead.
442 456
443config EEEPC_WMI 457config ASUS_WMI
444 tristate "Eee PC WMI Hotkey Driver (EXPERIMENTAL)" 458 tristate "ASUS WMI Driver (EXPERIMENTAL)"
445 depends on ACPI_WMI 459 depends on ACPI_WMI
446 depends on INPUT 460 depends on INPUT
461 depends on HWMON
447 depends on EXPERIMENTAL 462 depends on EXPERIMENTAL
448 depends on BACKLIGHT_CLASS_DEVICE 463 depends on BACKLIGHT_CLASS_DEVICE
449 depends on RFKILL || RFKILL = n 464 depends on RFKILL || RFKILL = n
465 depends on HOTPLUG_PCI
450 select INPUT_SPARSEKMAP 466 select INPUT_SPARSEKMAP
451 select LEDS_CLASS 467 select LEDS_CLASS
452 select NEW_LEDS 468 select NEW_LEDS
453 ---help--- 469 ---help---
454 Say Y here if you want to support WMI-based hotkeys on Eee PC laptops. 470 Say Y here if you have a WMI aware Asus laptop (like Eee PCs or new
471 Asus Notebooks).
455 472
456 To compile this driver as a module, choose M here: the module will 473 To compile this driver as a module, choose M here: the module will
457 be called eeepc-wmi. 474 be called asus-wmi.
475
476config ASUS_NB_WMI
477 tristate "Asus Notebook WMI Driver (EXPERIMENTAL)"
478 depends on ASUS_WMI
479 ---help---
480 This is a driver for newer Asus notebooks. It adds extra features
481 like wireless radio and bluetooth control, leds, hotkeys, backlight...
482
483 For more informations, see
484 <file:Documentation/ABI/testing/sysfs-platform-asus-wmi>
485
486 If you have an ACPI-WMI compatible Asus Notebook, say Y or M
487 here.
488
489config EEEPC_WMI
490 tristate "Eee PC WMI Driver (EXPERIMENTAL)"
491 depends on ASUS_WMI
492 ---help---
493 This is a driver for newer Eee PC laptops. It adds extra features
494 like wireless radio and bluetooth control, leds, hotkeys, backlight...
495
496 For more informations, see
497 <file:Documentation/ABI/testing/sysfs-platform-asus-wmi>
498
499 If you have an ACPI-WMI compatible Eee PC laptop (>= 1000), say Y or M
500 here.
458 501
459config ACPI_WMI 502config ACPI_WMI
460 tristate "WMI" 503 tristate "WMI"
@@ -616,6 +659,21 @@ config GPIO_INTEL_PMIC
616 Say Y here to support GPIO via the SCU IPC interface 659 Say Y here to support GPIO via the SCU IPC interface
617 on Intel MID platforms. 660 on Intel MID platforms.
618 661
662config INTEL_MID_POWER_BUTTON
663 tristate "power button driver for Intel MID platforms"
664 depends on INTEL_SCU_IPC && INPUT
665 help
666 This driver handles the power button on the Intel MID platforms.
667
668 If unsure, say N.
669
670config INTEL_MFLD_THERMAL
671 tristate "Thermal driver for Intel Medfield platform"
672 depends on INTEL_SCU_IPC && THERMAL
673 help
674 Say Y here to enable thermal driver support for the Intel Medfield
675 platform.
676
619config RAR_REGISTER 677config RAR_REGISTER
620 bool "Restricted Access Region Register Driver" 678 bool "Restricted Access Region Register Driver"
621 depends on PCI && X86_MRST 679 depends on PCI && X86_MRST
@@ -672,4 +730,26 @@ config XO1_RFKILL
672 Support for enabling/disabling the WLAN interface on the OLPC XO-1 730 Support for enabling/disabling the WLAN interface on the OLPC XO-1
673 laptop. 731 laptop.
674 732
733config XO15_EBOOK
734 tristate "OLPC XO-1.5 ebook switch"
735 depends on ACPI && INPUT
736 ---help---
737 Support for the ebook switch on the OLPC XO-1.5 laptop.
738
739 This switch is triggered as the screen is rotated and folded down to
740 convert the device into ebook form.
741
742config SAMSUNG_LAPTOP
743 tristate "Samsung Laptop driver"
744 depends on RFKILL && BACKLIGHT_CLASS_DEVICE && X86
745 ---help---
746 This module implements a driver for a wide range of different
747 Samsung laptops. It offers control over the different
748 function keys, wireless LED, LCD backlight level, and
749 sometimes provides a "performance_control" sysfs file to allow
750 the performance level of the laptop to be changed.
751
752 To compile this driver as a module, choose M here: the module
753 will be called samsung-laptop.
754
675endif # X86_PLATFORM_DEVICES 755endif # X86_PLATFORM_DEVICES
diff --git a/drivers/platform/x86/Makefile b/drivers/platform/x86/Makefile
index 299aefb3e74c..029e8861d086 100644
--- a/drivers/platform/x86/Makefile
+++ b/drivers/platform/x86/Makefile
@@ -3,6 +3,8 @@
3# x86 Platform-Specific Drivers 3# x86 Platform-Specific Drivers
4# 4#
5obj-$(CONFIG_ASUS_LAPTOP) += asus-laptop.o 5obj-$(CONFIG_ASUS_LAPTOP) += asus-laptop.o
6obj-$(CONFIG_ASUS_WMI) += asus-wmi.o
7obj-$(CONFIG_ASUS_NB_WMI) += asus-nb-wmi.o
6obj-$(CONFIG_EEEPC_LAPTOP) += eeepc-laptop.o 8obj-$(CONFIG_EEEPC_LAPTOP) += eeepc-laptop.o
7obj-$(CONFIG_EEEPC_WMI) += eeepc-wmi.o 9obj-$(CONFIG_EEEPC_WMI) += eeepc-wmi.o
8obj-$(CONFIG_MSI_LAPTOP) += msi-laptop.o 10obj-$(CONFIG_MSI_LAPTOP) += msi-laptop.o
@@ -10,6 +12,7 @@ obj-$(CONFIG_ACPI_CMPC) += classmate-laptop.o
10obj-$(CONFIG_COMPAL_LAPTOP) += compal-laptop.o 12obj-$(CONFIG_COMPAL_LAPTOP) += compal-laptop.o
11obj-$(CONFIG_DELL_LAPTOP) += dell-laptop.o 13obj-$(CONFIG_DELL_LAPTOP) += dell-laptop.o
12obj-$(CONFIG_DELL_WMI) += dell-wmi.o 14obj-$(CONFIG_DELL_WMI) += dell-wmi.o
15obj-$(CONFIG_DELL_WMI_AIO) += dell-wmi-aio.o
13obj-$(CONFIG_ACER_WMI) += acer-wmi.o 16obj-$(CONFIG_ACER_WMI) += acer-wmi.o
14obj-$(CONFIG_ACERHDF) += acerhdf.o 17obj-$(CONFIG_ACERHDF) += acerhdf.o
15obj-$(CONFIG_HP_ACCEL) += hp_accel.o 18obj-$(CONFIG_HP_ACCEL) += hp_accel.o
@@ -29,9 +32,13 @@ obj-$(CONFIG_TOPSTAR_LAPTOP) += topstar-laptop.o
29obj-$(CONFIG_ACPI_TOSHIBA) += toshiba_acpi.o 32obj-$(CONFIG_ACPI_TOSHIBA) += toshiba_acpi.o
30obj-$(CONFIG_TOSHIBA_BT_RFKILL) += toshiba_bluetooth.o 33obj-$(CONFIG_TOSHIBA_BT_RFKILL) += toshiba_bluetooth.o
31obj-$(CONFIG_INTEL_SCU_IPC) += intel_scu_ipc.o 34obj-$(CONFIG_INTEL_SCU_IPC) += intel_scu_ipc.o
32obj-$(CONFIG_INTEL_SCU_IPC_UTIL)+= intel_scu_ipcutil.o 35obj-$(CONFIG_INTEL_SCU_IPC_UTIL) += intel_scu_ipcutil.o
36obj-$(CONFIG_INTEL_MFLD_THERMAL) += intel_mid_thermal.o
33obj-$(CONFIG_RAR_REGISTER) += intel_rar_register.o 37obj-$(CONFIG_RAR_REGISTER) += intel_rar_register.o
34obj-$(CONFIG_INTEL_IPS) += intel_ips.o 38obj-$(CONFIG_INTEL_IPS) += intel_ips.o
35obj-$(CONFIG_GPIO_INTEL_PMIC) += intel_pmic_gpio.o 39obj-$(CONFIG_GPIO_INTEL_PMIC) += intel_pmic_gpio.o
36obj-$(CONFIG_XO1_RFKILL) += xo1-rfkill.o 40obj-$(CONFIG_XO1_RFKILL) += xo1-rfkill.o
41obj-$(CONFIG_XO15_EBOOK) += xo15-ebook.o
37obj-$(CONFIG_IBM_RTL) += ibm_rtl.o 42obj-$(CONFIG_IBM_RTL) += ibm_rtl.o
43obj-$(CONFIG_SAMSUNG_LAPTOP) += samsung-laptop.o
44obj-$(CONFIG_INTEL_MFLD_THERMAL) += intel_mid_thermal.o
diff --git a/drivers/platform/x86/acer-wmi.c b/drivers/platform/x86/acer-wmi.c
index c9784705f6ac..5ea6c3477d17 100644
--- a/drivers/platform/x86/acer-wmi.c
+++ b/drivers/platform/x86/acer-wmi.c
@@ -22,6 +22,8 @@
22 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 22 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
23 */ 23 */
24 24
25#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
26
25#include <linux/kernel.h> 27#include <linux/kernel.h>
26#include <linux/module.h> 28#include <linux/module.h>
27#include <linux/init.h> 29#include <linux/init.h>
@@ -46,12 +48,6 @@ MODULE_AUTHOR("Carlos Corbacho");
46MODULE_DESCRIPTION("Acer Laptop WMI Extras Driver"); 48MODULE_DESCRIPTION("Acer Laptop WMI Extras Driver");
47MODULE_LICENSE("GPL"); 49MODULE_LICENSE("GPL");
48 50
49#define ACER_LOGPREFIX "acer-wmi: "
50#define ACER_ERR KERN_ERR ACER_LOGPREFIX
51#define ACER_NOTICE KERN_NOTICE ACER_LOGPREFIX
52#define ACER_INFO KERN_INFO ACER_LOGPREFIX
53#define ACER_WARNING KERN_WARNING ACER_LOGPREFIX
54
55/* 51/*
56 * Magic Number 52 * Magic Number
57 * Meaning is unknown - this number is required for writing to ACPI for AMW0 53 * Meaning is unknown - this number is required for writing to ACPI for AMW0
@@ -84,7 +80,7 @@ MODULE_LICENSE("GPL");
84#define AMW0_GUID1 "67C3371D-95A3-4C37-BB61-DD47B491DAAB" 80#define AMW0_GUID1 "67C3371D-95A3-4C37-BB61-DD47B491DAAB"
85#define AMW0_GUID2 "431F16ED-0C2B-444C-B267-27DEB140CF9C" 81#define AMW0_GUID2 "431F16ED-0C2B-444C-B267-27DEB140CF9C"
86#define WMID_GUID1 "6AF4F258-B401-42FD-BE91-3D4AC2D7C0D3" 82#define WMID_GUID1 "6AF4F258-B401-42FD-BE91-3D4AC2D7C0D3"
87#define WMID_GUID2 "95764E09-FB56-4e83-B31A-37761F60994A" 83#define WMID_GUID2 "95764E09-FB56-4E83-B31A-37761F60994A"
88#define WMID_GUID3 "61EF69EA-865C-4BC3-A502-A0DEBA0CB531" 84#define WMID_GUID3 "61EF69EA-865C-4BC3-A502-A0DEBA0CB531"
89 85
90/* 86/*
@@ -93,7 +89,7 @@ MODULE_LICENSE("GPL");
93#define ACERWMID_EVENT_GUID "676AA15E-6A47-4D9F-A2CC-1E6D18D14026" 89#define ACERWMID_EVENT_GUID "676AA15E-6A47-4D9F-A2CC-1E6D18D14026"
94 90
95MODULE_ALIAS("wmi:67C3371D-95A3-4C37-BB61-DD47B491DAAB"); 91MODULE_ALIAS("wmi:67C3371D-95A3-4C37-BB61-DD47B491DAAB");
96MODULE_ALIAS("wmi:6AF4F258-B401-42fd-BE91-3D4AC2D7C0D3"); 92MODULE_ALIAS("wmi:6AF4F258-B401-42Fd-BE91-3D4AC2D7C0D3");
97MODULE_ALIAS("wmi:676AA15E-6A47-4D9F-A2CC-1E6D18D14026"); 93MODULE_ALIAS("wmi:676AA15E-6A47-4D9F-A2CC-1E6D18D14026");
98 94
99enum acer_wmi_event_ids { 95enum acer_wmi_event_ids {
@@ -108,7 +104,7 @@ static const struct key_entry acer_wmi_keymap[] = {
108 {KE_KEY, 0x23, {KEY_PROG3} }, /* P_Key */ 104 {KE_KEY, 0x23, {KEY_PROG3} }, /* P_Key */
109 {KE_KEY, 0x24, {KEY_PROG4} }, /* Social networking_Key */ 105 {KE_KEY, 0x24, {KEY_PROG4} }, /* Social networking_Key */
110 {KE_KEY, 0x64, {KEY_SWITCHVIDEOMODE} }, /* Display Switch */ 106 {KE_KEY, 0x64, {KEY_SWITCHVIDEOMODE} }, /* Display Switch */
111 {KE_KEY, 0x82, {KEY_F22} }, /* Touch Pad On/Off */ 107 {KE_KEY, 0x82, {KEY_TOUCHPAD_TOGGLE} }, /* Touch Pad On/Off */
112 {KE_END, 0} 108 {KE_END, 0}
113}; 109};
114 110
@@ -221,6 +217,7 @@ struct acer_debug {
221static struct rfkill *wireless_rfkill; 217static struct rfkill *wireless_rfkill;
222static struct rfkill *bluetooth_rfkill; 218static struct rfkill *bluetooth_rfkill;
223static struct rfkill *threeg_rfkill; 219static struct rfkill *threeg_rfkill;
220static bool rfkill_inited;
224 221
225/* Each low-level interface must define at least some of the following */ 222/* Each low-level interface must define at least some of the following */
226struct wmi_interface { 223struct wmi_interface {
@@ -845,7 +842,7 @@ static void type_aa_dmi_decode(const struct dmi_header *header, void *dummy)
845 has_type_aa = true; 842 has_type_aa = true;
846 type_aa = (struct hotkey_function_type_aa *) header; 843 type_aa = (struct hotkey_function_type_aa *) header;
847 844
848 printk(ACER_INFO "Function bitmap for Communication Button: 0x%x\n", 845 pr_info("Function bitmap for Communication Button: 0x%x\n",
849 type_aa->commun_func_bitmap); 846 type_aa->commun_func_bitmap);
850 847
851 if (type_aa->commun_func_bitmap & ACER_WMID3_GDS_WIRELESS) 848 if (type_aa->commun_func_bitmap & ACER_WMID3_GDS_WIRELESS)
@@ -991,6 +988,7 @@ static int __devinit acer_led_init(struct device *dev)
991 988
992static void acer_led_exit(void) 989static void acer_led_exit(void)
993{ 990{
991 set_u32(LED_OFF, ACER_CAP_MAILLED);
994 led_classdev_unregister(&mail_led); 992 led_classdev_unregister(&mail_led);
995} 993}
996 994
@@ -1036,7 +1034,7 @@ static int __devinit acer_backlight_init(struct device *dev)
1036 bd = backlight_device_register("acer-wmi", dev, NULL, &acer_bl_ops, 1034 bd = backlight_device_register("acer-wmi", dev, NULL, &acer_bl_ops,
1037 &props); 1035 &props);
1038 if (IS_ERR(bd)) { 1036 if (IS_ERR(bd)) {
1039 printk(ACER_ERR "Could not register Acer backlight device\n"); 1037 pr_err("Could not register Acer backlight device\n");
1040 acer_backlight_device = NULL; 1038 acer_backlight_device = NULL;
1041 return PTR_ERR(bd); 1039 return PTR_ERR(bd);
1042 } 1040 }
@@ -1083,8 +1081,7 @@ static acpi_status wmid3_get_device_status(u32 *value, u16 device)
1083 return AE_ERROR; 1081 return AE_ERROR;
1084 } 1082 }
1085 if (obj->buffer.length != 8) { 1083 if (obj->buffer.length != 8) {
1086 printk(ACER_WARNING "Unknown buffer length %d\n", 1084 pr_warning("Unknown buffer length %d\n", obj->buffer.length);
1087 obj->buffer.length);
1088 kfree(obj); 1085 kfree(obj);
1089 return AE_ERROR; 1086 return AE_ERROR;
1090 } 1087 }
@@ -1093,7 +1090,7 @@ static acpi_status wmid3_get_device_status(u32 *value, u16 device)
1093 kfree(obj); 1090 kfree(obj);
1094 1091
1095 if (return_value.error_code || return_value.ec_return_value) 1092 if (return_value.error_code || return_value.ec_return_value)
1096 printk(ACER_WARNING "Get Device Status failed: " 1093 pr_warning("Get Device Status failed: "
1097 "0x%x - 0x%x\n", return_value.error_code, 1094 "0x%x - 0x%x\n", return_value.error_code,
1098 return_value.ec_return_value); 1095 return_value.ec_return_value);
1099 else 1096 else
@@ -1161,9 +1158,13 @@ static int acer_rfkill_set(void *data, bool blocked)
1161{ 1158{
1162 acpi_status status; 1159 acpi_status status;
1163 u32 cap = (unsigned long)data; 1160 u32 cap = (unsigned long)data;
1164 status = set_u32(!blocked, cap); 1161
1165 if (ACPI_FAILURE(status)) 1162 if (rfkill_inited) {
1166 return -ENODEV; 1163 status = set_u32(!blocked, cap);
1164 if (ACPI_FAILURE(status))
1165 return -ENODEV;
1166 }
1167
1167 return 0; 1168 return 0;
1168} 1169}
1169 1170
@@ -1187,14 +1188,16 @@ static struct rfkill *acer_rfkill_register(struct device *dev,
1187 return ERR_PTR(-ENOMEM); 1188 return ERR_PTR(-ENOMEM);
1188 1189
1189 status = get_device_status(&state, cap); 1190 status = get_device_status(&state, cap);
1190 if (ACPI_SUCCESS(status))
1191 rfkill_init_sw_state(rfkill_dev, !state);
1192 1191
1193 err = rfkill_register(rfkill_dev); 1192 err = rfkill_register(rfkill_dev);
1194 if (err) { 1193 if (err) {
1195 rfkill_destroy(rfkill_dev); 1194 rfkill_destroy(rfkill_dev);
1196 return ERR_PTR(err); 1195 return ERR_PTR(err);
1197 } 1196 }
1197
1198 if (ACPI_SUCCESS(status))
1199 rfkill_set_sw_state(rfkill_dev, !state);
1200
1198 return rfkill_dev; 1201 return rfkill_dev;
1199} 1202}
1200 1203
@@ -1229,14 +1232,19 @@ static int acer_rfkill_init(struct device *dev)
1229 } 1232 }
1230 } 1233 }
1231 1234
1232 schedule_delayed_work(&acer_rfkill_work, round_jiffies_relative(HZ)); 1235 rfkill_inited = true;
1236
1237 if (ec_raw_mode || !wmi_has_guid(ACERWMID_EVENT_GUID))
1238 schedule_delayed_work(&acer_rfkill_work,
1239 round_jiffies_relative(HZ));
1233 1240
1234 return 0; 1241 return 0;
1235} 1242}
1236 1243
1237static void acer_rfkill_exit(void) 1244static void acer_rfkill_exit(void)
1238{ 1245{
1239 cancel_delayed_work_sync(&acer_rfkill_work); 1246 if (ec_raw_mode || !wmi_has_guid(ACERWMID_EVENT_GUID))
1247 cancel_delayed_work_sync(&acer_rfkill_work);
1240 1248
1241 rfkill_unregister(wireless_rfkill); 1249 rfkill_unregister(wireless_rfkill);
1242 rfkill_destroy(wireless_rfkill); 1250 rfkill_destroy(wireless_rfkill);
@@ -1309,7 +1317,7 @@ static void acer_wmi_notify(u32 value, void *context)
1309 1317
1310 status = wmi_get_event_data(value, &response); 1318 status = wmi_get_event_data(value, &response);
1311 if (status != AE_OK) { 1319 if (status != AE_OK) {
1312 printk(ACER_WARNING "bad event status 0x%x\n", status); 1320 pr_warning("bad event status 0x%x\n", status);
1313 return; 1321 return;
1314 } 1322 }
1315 1323
@@ -1318,14 +1326,12 @@ static void acer_wmi_notify(u32 value, void *context)
1318 if (!obj) 1326 if (!obj)
1319 return; 1327 return;
1320 if (obj->type != ACPI_TYPE_BUFFER) { 1328 if (obj->type != ACPI_TYPE_BUFFER) {
1321 printk(ACER_WARNING "Unknown response received %d\n", 1329 pr_warning("Unknown response received %d\n", obj->type);
1322 obj->type);
1323 kfree(obj); 1330 kfree(obj);
1324 return; 1331 return;
1325 } 1332 }
1326 if (obj->buffer.length != 8) { 1333 if (obj->buffer.length != 8) {
1327 printk(ACER_WARNING "Unknown buffer length %d\n", 1334 pr_warning("Unknown buffer length %d\n", obj->buffer.length);
1328 obj->buffer.length);
1329 kfree(obj); 1335 kfree(obj);
1330 return; 1336 return;
1331 } 1337 }
@@ -1335,13 +1341,26 @@ static void acer_wmi_notify(u32 value, void *context)
1335 1341
1336 switch (return_value.function) { 1342 switch (return_value.function) {
1337 case WMID_HOTKEY_EVENT: 1343 case WMID_HOTKEY_EVENT:
1344 if (return_value.device_state) {
1345 u16 device_state = return_value.device_state;
1346 pr_debug("deivces states: 0x%x\n", device_state);
1347 if (has_cap(ACER_CAP_WIRELESS))
1348 rfkill_set_sw_state(wireless_rfkill,
1349 !(device_state & ACER_WMID3_GDS_WIRELESS));
1350 if (has_cap(ACER_CAP_BLUETOOTH))
1351 rfkill_set_sw_state(bluetooth_rfkill,
1352 !(device_state & ACER_WMID3_GDS_BLUETOOTH));
1353 if (has_cap(ACER_CAP_THREEG))
1354 rfkill_set_sw_state(threeg_rfkill,
1355 !(device_state & ACER_WMID3_GDS_THREEG));
1356 }
1338 if (!sparse_keymap_report_event(acer_wmi_input_dev, 1357 if (!sparse_keymap_report_event(acer_wmi_input_dev,
1339 return_value.key_num, 1, true)) 1358 return_value.key_num, 1, true))
1340 printk(ACER_WARNING "Unknown key number - 0x%x\n", 1359 pr_warning("Unknown key number - 0x%x\n",
1341 return_value.key_num); 1360 return_value.key_num);
1342 break; 1361 break;
1343 default: 1362 default:
1344 printk(ACER_WARNING "Unknown function number - %d - %d\n", 1363 pr_warning("Unknown function number - %d - %d\n",
1345 return_value.function, return_value.key_num); 1364 return_value.function, return_value.key_num);
1346 break; 1365 break;
1347 } 1366 }
@@ -1370,8 +1389,7 @@ wmid3_set_lm_mode(struct lm_input_params *params,
1370 return AE_ERROR; 1389 return AE_ERROR;
1371 } 1390 }
1372 if (obj->buffer.length != 4) { 1391 if (obj->buffer.length != 4) {
1373 printk(ACER_WARNING "Unknown buffer length %d\n", 1392 pr_warning("Unknown buffer length %d\n", obj->buffer.length);
1374 obj->buffer.length);
1375 kfree(obj); 1393 kfree(obj);
1376 return AE_ERROR; 1394 return AE_ERROR;
1377 } 1395 }
@@ -1396,11 +1414,11 @@ static int acer_wmi_enable_ec_raw(void)
1396 status = wmid3_set_lm_mode(&params, &return_value); 1414 status = wmid3_set_lm_mode(&params, &return_value);
1397 1415
1398 if (return_value.error_code || return_value.ec_return_value) 1416 if (return_value.error_code || return_value.ec_return_value)
1399 printk(ACER_WARNING "Enabling EC raw mode failed: " 1417 pr_warning("Enabling EC raw mode failed: "
1400 "0x%x - 0x%x\n", return_value.error_code, 1418 "0x%x - 0x%x\n", return_value.error_code,
1401 return_value.ec_return_value); 1419 return_value.ec_return_value);
1402 else 1420 else
1403 printk(ACER_INFO "Enabled EC raw mode"); 1421 pr_info("Enabled EC raw mode");
1404 1422
1405 return status; 1423 return status;
1406} 1424}
@@ -1419,7 +1437,7 @@ static int acer_wmi_enable_lm(void)
1419 status = wmid3_set_lm_mode(&params, &return_value); 1437 status = wmid3_set_lm_mode(&params, &return_value);
1420 1438
1421 if (return_value.error_code || return_value.ec_return_value) 1439 if (return_value.error_code || return_value.ec_return_value)
1422 printk(ACER_WARNING "Enabling Launch Manager failed: " 1440 pr_warning("Enabling Launch Manager failed: "
1423 "0x%x - 0x%x\n", return_value.error_code, 1441 "0x%x - 0x%x\n", return_value.error_code,
1424 return_value.ec_return_value); 1442 return_value.ec_return_value);
1425 1443
@@ -1553,6 +1571,7 @@ pm_message_t state)
1553 1571
1554 if (has_cap(ACER_CAP_MAILLED)) { 1572 if (has_cap(ACER_CAP_MAILLED)) {
1555 get_u32(&value, ACER_CAP_MAILLED); 1573 get_u32(&value, ACER_CAP_MAILLED);
1574 set_u32(LED_OFF, ACER_CAP_MAILLED);
1556 data->mailled = value; 1575 data->mailled = value;
1557 } 1576 }
1558 1577
@@ -1580,6 +1599,17 @@ static int acer_platform_resume(struct platform_device *device)
1580 return 0; 1599 return 0;
1581} 1600}
1582 1601
1602static void acer_platform_shutdown(struct platform_device *device)
1603{
1604 struct acer_data *data = &interface->data;
1605
1606 if (!data)
1607 return;
1608
1609 if (has_cap(ACER_CAP_MAILLED))
1610 set_u32(LED_OFF, ACER_CAP_MAILLED);
1611}
1612
1583static struct platform_driver acer_platform_driver = { 1613static struct platform_driver acer_platform_driver = {
1584 .driver = { 1614 .driver = {
1585 .name = "acer-wmi", 1615 .name = "acer-wmi",
@@ -1589,6 +1619,7 @@ static struct platform_driver acer_platform_driver = {
1589 .remove = acer_platform_remove, 1619 .remove = acer_platform_remove,
1590 .suspend = acer_platform_suspend, 1620 .suspend = acer_platform_suspend,
1591 .resume = acer_platform_resume, 1621 .resume = acer_platform_resume,
1622 .shutdown = acer_platform_shutdown,
1592}; 1623};
1593 1624
1594static struct platform_device *acer_platform_device; 1625static struct platform_device *acer_platform_device;
@@ -1636,7 +1667,7 @@ static int create_debugfs(void)
1636{ 1667{
1637 interface->debug.root = debugfs_create_dir("acer-wmi", NULL); 1668 interface->debug.root = debugfs_create_dir("acer-wmi", NULL);
1638 if (!interface->debug.root) { 1669 if (!interface->debug.root) {
1639 printk(ACER_ERR "Failed to create debugfs directory"); 1670 pr_err("Failed to create debugfs directory");
1640 return -ENOMEM; 1671 return -ENOMEM;
1641 } 1672 }
1642 1673
@@ -1657,11 +1688,10 @@ static int __init acer_wmi_init(void)
1657{ 1688{
1658 int err; 1689 int err;
1659 1690
1660 printk(ACER_INFO "Acer Laptop ACPI-WMI Extras\n"); 1691 pr_info("Acer Laptop ACPI-WMI Extras\n");
1661 1692
1662 if (dmi_check_system(acer_blacklist)) { 1693 if (dmi_check_system(acer_blacklist)) {
1663 printk(ACER_INFO "Blacklisted hardware detected - " 1694 pr_info("Blacklisted hardware detected - not loading\n");
1664 "not loading\n");
1665 return -ENODEV; 1695 return -ENODEV;
1666 } 1696 }
1667 1697
@@ -1678,12 +1708,11 @@ static int __init acer_wmi_init(void)
1678 1708
1679 if (wmi_has_guid(WMID_GUID2) && interface) { 1709 if (wmi_has_guid(WMID_GUID2) && interface) {
1680 if (ACPI_FAILURE(WMID_set_capabilities())) { 1710 if (ACPI_FAILURE(WMID_set_capabilities())) {
1681 printk(ACER_ERR "Unable to detect available WMID " 1711 pr_err("Unable to detect available WMID devices\n");
1682 "devices\n");
1683 return -ENODEV; 1712 return -ENODEV;
1684 } 1713 }
1685 } else if (!wmi_has_guid(WMID_GUID2) && interface) { 1714 } else if (!wmi_has_guid(WMID_GUID2) && interface) {
1686 printk(ACER_ERR "No WMID device detection method found\n"); 1715 pr_err("No WMID device detection method found\n");
1687 return -ENODEV; 1716 return -ENODEV;
1688 } 1717 }
1689 1718
@@ -1691,8 +1720,7 @@ static int __init acer_wmi_init(void)
1691 interface = &AMW0_interface; 1720 interface = &AMW0_interface;
1692 1721
1693 if (ACPI_FAILURE(AMW0_set_capabilities())) { 1722 if (ACPI_FAILURE(AMW0_set_capabilities())) {
1694 printk(ACER_ERR "Unable to detect available AMW0 " 1723 pr_err("Unable to detect available AMW0 devices\n");
1695 "devices\n");
1696 return -ENODEV; 1724 return -ENODEV;
1697 } 1725 }
1698 } 1726 }
@@ -1701,8 +1729,7 @@ static int __init acer_wmi_init(void)
1701 AMW0_find_mailled(); 1729 AMW0_find_mailled();
1702 1730
1703 if (!interface) { 1731 if (!interface) {
1704 printk(ACER_INFO "No or unsupported WMI interface, unable to " 1732 pr_err("No or unsupported WMI interface, unable to load\n");
1705 "load\n");
1706 return -ENODEV; 1733 return -ENODEV;
1707 } 1734 }
1708 1735
@@ -1710,22 +1737,22 @@ static int __init acer_wmi_init(void)
1710 1737
1711 if (acpi_video_backlight_support() && has_cap(ACER_CAP_BRIGHTNESS)) { 1738 if (acpi_video_backlight_support() && has_cap(ACER_CAP_BRIGHTNESS)) {
1712 interface->capability &= ~ACER_CAP_BRIGHTNESS; 1739 interface->capability &= ~ACER_CAP_BRIGHTNESS;
1713 printk(ACER_INFO "Brightness must be controlled by " 1740 pr_info("Brightness must be controlled by "
1714 "generic video driver\n"); 1741 "generic video driver\n");
1715 } 1742 }
1716 1743
1717 if (wmi_has_guid(WMID_GUID3)) { 1744 if (wmi_has_guid(WMID_GUID3)) {
1718 if (ec_raw_mode) { 1745 if (ec_raw_mode) {
1719 if (ACPI_FAILURE(acer_wmi_enable_ec_raw())) { 1746 if (ACPI_FAILURE(acer_wmi_enable_ec_raw())) {
1720 printk(ACER_ERR "Cannot enable EC raw mode\n"); 1747 pr_err("Cannot enable EC raw mode\n");
1721 return -ENODEV; 1748 return -ENODEV;
1722 } 1749 }
1723 } else if (ACPI_FAILURE(acer_wmi_enable_lm())) { 1750 } else if (ACPI_FAILURE(acer_wmi_enable_lm())) {
1724 printk(ACER_ERR "Cannot enable Launch Manager mode\n"); 1751 pr_err("Cannot enable Launch Manager mode\n");
1725 return -ENODEV; 1752 return -ENODEV;
1726 } 1753 }
1727 } else if (ec_raw_mode) { 1754 } else if (ec_raw_mode) {
1728 printk(ACER_INFO "No WMID EC raw mode enable method\n"); 1755 pr_info("No WMID EC raw mode enable method\n");
1729 } 1756 }
1730 1757
1731 if (wmi_has_guid(ACERWMID_EVENT_GUID)) { 1758 if (wmi_has_guid(ACERWMID_EVENT_GUID)) {
@@ -1736,7 +1763,7 @@ static int __init acer_wmi_init(void)
1736 1763
1737 err = platform_driver_register(&acer_platform_driver); 1764 err = platform_driver_register(&acer_platform_driver);
1738 if (err) { 1765 if (err) {
1739 printk(ACER_ERR "Unable to register platform driver.\n"); 1766 pr_err("Unable to register platform driver.\n");
1740 goto error_platform_register; 1767 goto error_platform_register;
1741 } 1768 }
1742 1769
@@ -1791,7 +1818,7 @@ static void __exit acer_wmi_exit(void)
1791 platform_device_unregister(acer_platform_device); 1818 platform_device_unregister(acer_platform_device);
1792 platform_driver_unregister(&acer_platform_driver); 1819 platform_driver_unregister(&acer_platform_driver);
1793 1820
1794 printk(ACER_INFO "Acer Laptop WMI Extras unloaded\n"); 1821 pr_info("Acer Laptop WMI Extras unloaded\n");
1795 return; 1822 return;
1796} 1823}
1797 1824
diff --git a/drivers/platform/x86/asus-laptop.c b/drivers/platform/x86/asus-laptop.c
index 5a6f7d7575d6..c53b3ff7978a 100644
--- a/drivers/platform/x86/asus-laptop.c
+++ b/drivers/platform/x86/asus-laptop.c
@@ -29,7 +29,7 @@
29 * John Belmonte - ACPI code for Toshiba laptop was a good starting point. 29 * John Belmonte - ACPI code for Toshiba laptop was a good starting point.
30 * Eric Burghard - LED display support for W1N 30 * Eric Burghard - LED display support for W1N
31 * Josh Green - Light Sens support 31 * Josh Green - Light Sens support
32 * Thomas Tuttle - His first patch for led support was very helpfull 32 * Thomas Tuttle - His first patch for led support was very helpful
33 * Sam Lin - GPS support 33 * Sam Lin - GPS support
34 */ 34 */
35 35
@@ -50,6 +50,7 @@
50#include <linux/input/sparse-keymap.h> 50#include <linux/input/sparse-keymap.h>
51#include <linux/rfkill.h> 51#include <linux/rfkill.h>
52#include <linux/slab.h> 52#include <linux/slab.h>
53#include <linux/dmi.h>
53#include <acpi/acpi_drivers.h> 54#include <acpi/acpi_drivers.h>
54#include <acpi/acpi_bus.h> 55#include <acpi/acpi_bus.h>
55 56
@@ -157,46 +158,9 @@ MODULE_PARM_DESC(wwan_status, "Set the wireless status on boot "
157#define METHOD_BRIGHTNESS_SET "SPLV" 158#define METHOD_BRIGHTNESS_SET "SPLV"
158#define METHOD_BRIGHTNESS_GET "GPLV" 159#define METHOD_BRIGHTNESS_GET "GPLV"
159 160
160/* Backlight */
161static acpi_handle lcd_switch_handle;
162static char *lcd_switch_paths[] = {
163 "\\_SB.PCI0.SBRG.EC0._Q10", /* All new models */
164 "\\_SB.PCI0.ISA.EC0._Q10", /* A1x */
165 "\\_SB.PCI0.PX40.ECD0._Q10", /* L3C */
166 "\\_SB.PCI0.PX40.EC0.Q10", /* M1A */
167 "\\_SB.PCI0.LPCB.EC0._Q10", /* P30 */
168 "\\_SB.PCI0.LPCB.EC0._Q0E", /* P30/P35 */
169 "\\_SB.PCI0.PX40.Q10", /* S1x */
170 "\\Q10"}; /* A2x, L2D, L3D, M2E */
171
172/* Display */ 161/* Display */
173#define METHOD_SWITCH_DISPLAY "SDSP" 162#define METHOD_SWITCH_DISPLAY "SDSP"
174 163
175static acpi_handle display_get_handle;
176static char *display_get_paths[] = {
177 /* A6B, A6K A6R A7D F3JM L4R M6R A3G M6A M6V VX-1 V6J V6V W3Z */
178 "\\_SB.PCI0.P0P1.VGA.GETD",
179 /* A3E A4K, A4D A4L A6J A7J A8J Z71V M9V S5A M5A z33A W1Jc W2V G1 */
180 "\\_SB.PCI0.P0P2.VGA.GETD",
181 /* A6V A6Q */
182 "\\_SB.PCI0.P0P3.VGA.GETD",
183 /* A6T, A6M */
184 "\\_SB.PCI0.P0PA.VGA.GETD",
185 /* L3C */
186 "\\_SB.PCI0.PCI1.VGAC.NMAP",
187 /* Z96F */
188 "\\_SB.PCI0.VGA.GETD",
189 /* A2D */
190 "\\ACTD",
191 /* A4G Z71A W1N W5A W5F M2N M3N M5N M6N S1N S5N */
192 "\\ADVG",
193 /* P30 */
194 "\\DNXT",
195 /* A2H D1 L2D L3D L3H L2E L5D L5C M1A M2E L4L W3V */
196 "\\INFB",
197 /* A3F A6F A3N A3L M6N W3N W6A */
198 "\\SSTE"};
199
200#define METHOD_ALS_CONTROL "ALSC" /* Z71A Z71V */ 164#define METHOD_ALS_CONTROL "ALSC" /* Z71A Z71V */
201#define METHOD_ALS_LEVEL "ALSL" /* Z71A Z71V */ 165#define METHOD_ALS_LEVEL "ALSL" /* Z71A Z71V */
202 166
@@ -246,7 +210,6 @@ struct asus_laptop {
246 210
247 int wireless_status; 211 int wireless_status;
248 bool have_rsts; 212 bool have_rsts;
249 int lcd_state;
250 213
251 struct rfkill *gps_rfkill; 214 struct rfkill *gps_rfkill;
252 215
@@ -559,48 +522,6 @@ error:
559/* 522/*
560 * Backlight device 523 * Backlight device
561 */ 524 */
562static int asus_lcd_status(struct asus_laptop *asus)
563{
564 return asus->lcd_state;
565}
566
567static int asus_lcd_set(struct asus_laptop *asus, int value)
568{
569 int lcd = 0;
570 acpi_status status = 0;
571
572 lcd = !!value;
573
574 if (lcd == asus_lcd_status(asus))
575 return 0;
576
577 if (!lcd_switch_handle)
578 return -ENODEV;
579
580 status = acpi_evaluate_object(lcd_switch_handle,
581 NULL, NULL, NULL);
582
583 if (ACPI_FAILURE(status)) {
584 pr_warning("Error switching LCD\n");
585 return -ENODEV;
586 }
587
588 asus->lcd_state = lcd;
589 return 0;
590}
591
592static void lcd_blank(struct asus_laptop *asus, int blank)
593{
594 struct backlight_device *bd = asus->backlight_device;
595
596 asus->lcd_state = (blank == FB_BLANK_UNBLANK);
597
598 if (bd) {
599 bd->props.power = blank;
600 backlight_update_status(bd);
601 }
602}
603
604static int asus_read_brightness(struct backlight_device *bd) 525static int asus_read_brightness(struct backlight_device *bd)
605{ 526{
606 struct asus_laptop *asus = bl_get_data(bd); 527 struct asus_laptop *asus = bl_get_data(bd);
@@ -628,16 +549,9 @@ static int asus_set_brightness(struct backlight_device *bd, int value)
628 549
629static int update_bl_status(struct backlight_device *bd) 550static int update_bl_status(struct backlight_device *bd)
630{ 551{
631 struct asus_laptop *asus = bl_get_data(bd);
632 int rv;
633 int value = bd->props.brightness; 552 int value = bd->props.brightness;
634 553
635 rv = asus_set_brightness(bd, value); 554 return asus_set_brightness(bd, value);
636 if (rv)
637 return rv;
638
639 value = (bd->props.power == FB_BLANK_UNBLANK) ? 1 : 0;
640 return asus_lcd_set(asus, value);
641} 555}
642 556
643static const struct backlight_ops asusbl_ops = { 557static const struct backlight_ops asusbl_ops = {
@@ -661,8 +575,7 @@ static int asus_backlight_init(struct asus_laptop *asus)
661 struct backlight_properties props; 575 struct backlight_properties props;
662 576
663 if (acpi_check_handle(asus->handle, METHOD_BRIGHTNESS_GET, NULL) || 577 if (acpi_check_handle(asus->handle, METHOD_BRIGHTNESS_GET, NULL) ||
664 acpi_check_handle(asus->handle, METHOD_BRIGHTNESS_SET, NULL) || 578 acpi_check_handle(asus->handle, METHOD_BRIGHTNESS_SET, NULL))
665 !lcd_switch_handle)
666 return 0; 579 return 0;
667 580
668 memset(&props, 0, sizeof(struct backlight_properties)); 581 memset(&props, 0, sizeof(struct backlight_properties));
@@ -971,41 +884,6 @@ static void asus_set_display(struct asus_laptop *asus, int value)
971 return; 884 return;
972} 885}
973 886
974static int read_display(struct asus_laptop *asus)
975{
976 unsigned long long value = 0;
977 acpi_status rv = AE_OK;
978
979 /*
980 * In most of the case, we know how to set the display, but sometime
981 * we can't read it
982 */
983 if (display_get_handle) {
984 rv = acpi_evaluate_integer(display_get_handle, NULL,
985 NULL, &value);
986 if (ACPI_FAILURE(rv))
987 pr_warning("Error reading display status\n");
988 }
989
990 value &= 0x0F; /* needed for some models, shouldn't hurt others */
991
992 return value;
993}
994
995/*
996 * Now, *this* one could be more user-friendly, but so far, no-one has
997 * complained. The significance of bits is the same as in store_disp()
998 */
999static ssize_t show_disp(struct device *dev,
1000 struct device_attribute *attr, char *buf)
1001{
1002 struct asus_laptop *asus = dev_get_drvdata(dev);
1003
1004 if (!display_get_handle)
1005 return -ENODEV;
1006 return sprintf(buf, "%d\n", read_display(asus));
1007}
1008
1009/* 887/*
1010 * Experimental support for display switching. As of now: 1 should activate 888 * Experimental support for display switching. As of now: 1 should activate
1011 * the LCD output, 2 should do for CRT, 4 for TV-Out and 8 for DVI. 889 * the LCD output, 2 should do for CRT, 4 for TV-Out and 8 for DVI.
@@ -1247,15 +1125,6 @@ static void asus_acpi_notify(struct acpi_device *device, u32 event)
1247 struct asus_laptop *asus = acpi_driver_data(device); 1125 struct asus_laptop *asus = acpi_driver_data(device);
1248 u16 count; 1126 u16 count;
1249 1127
1250 /*
1251 * We need to tell the backlight device when the backlight power is
1252 * switched
1253 */
1254 if (event == ATKD_LCD_ON)
1255 lcd_blank(asus, FB_BLANK_UNBLANK);
1256 else if (event == ATKD_LCD_OFF)
1257 lcd_blank(asus, FB_BLANK_POWERDOWN);
1258
1259 /* TODO Find a better way to handle events count. */ 1128 /* TODO Find a better way to handle events count. */
1260 count = asus->event_count[event % 128]++; 1129 count = asus->event_count[event % 128]++;
1261 acpi_bus_generate_proc_event(asus->device, event, count); 1130 acpi_bus_generate_proc_event(asus->device, event, count);
@@ -1282,7 +1151,7 @@ static DEVICE_ATTR(bluetooth, S_IRUGO | S_IWUSR,
1282 show_bluetooth, store_bluetooth); 1151 show_bluetooth, store_bluetooth);
1283static DEVICE_ATTR(wimax, S_IRUGO | S_IWUSR, show_wimax, store_wimax); 1152static DEVICE_ATTR(wimax, S_IRUGO | S_IWUSR, show_wimax, store_wimax);
1284static DEVICE_ATTR(wwan, S_IRUGO | S_IWUSR, show_wwan, store_wwan); 1153static DEVICE_ATTR(wwan, S_IRUGO | S_IWUSR, show_wwan, store_wwan);
1285static DEVICE_ATTR(display, S_IRUGO | S_IWUSR, show_disp, store_disp); 1154static DEVICE_ATTR(display, S_IWUSR, NULL, store_disp);
1286static DEVICE_ATTR(ledd, S_IRUGO | S_IWUSR, show_ledd, store_ledd); 1155static DEVICE_ATTR(ledd, S_IRUGO | S_IWUSR, show_ledd, store_ledd);
1287static DEVICE_ATTR(ls_level, S_IRUGO | S_IWUSR, show_lslvl, store_lslvl); 1156static DEVICE_ATTR(ls_level, S_IRUGO | S_IWUSR, show_lslvl, store_lslvl);
1288static DEVICE_ATTR(ls_switch, S_IRUGO | S_IWUSR, show_lssw, store_lssw); 1157static DEVICE_ATTR(ls_switch, S_IRUGO | S_IWUSR, show_lssw, store_lssw);
@@ -1393,26 +1262,6 @@ static struct platform_driver platform_driver = {
1393 } 1262 }
1394}; 1263};
1395 1264
1396static int asus_handle_init(char *name, acpi_handle * handle,
1397 char **paths, int num_paths)
1398{
1399 int i;
1400 acpi_status status;
1401
1402 for (i = 0; i < num_paths; i++) {
1403 status = acpi_get_handle(NULL, paths[i], handle);
1404 if (ACPI_SUCCESS(status))
1405 return 0;
1406 }
1407
1408 *handle = NULL;
1409 return -ENODEV;
1410}
1411
1412#define ASUS_HANDLE_INIT(object) \
1413 asus_handle_init(#object, &object##_handle, object##_paths, \
1414 ARRAY_SIZE(object##_paths))
1415
1416/* 1265/*
1417 * This function is used to initialize the context with right values. In this 1266 * This function is used to initialize the context with right values. In this
1418 * method, we can make all the detection we want, and modify the asus_laptop 1267 * method, we can make all the detection we want, and modify the asus_laptop
@@ -1498,10 +1347,6 @@ static int asus_laptop_get_info(struct asus_laptop *asus)
1498 if (!acpi_check_handle(asus->handle, METHOD_WL_STATUS, NULL)) 1347 if (!acpi_check_handle(asus->handle, METHOD_WL_STATUS, NULL))
1499 asus->have_rsts = true; 1348 asus->have_rsts = true;
1500 1349
1501 /* Scheduled for removal */
1502 ASUS_HANDLE_INIT(lcd_switch);
1503 ASUS_HANDLE_INIT(display_get);
1504
1505 kfree(model); 1350 kfree(model);
1506 1351
1507 return AE_OK; 1352 return AE_OK;
@@ -1553,10 +1398,23 @@ static int __devinit asus_acpi_init(struct asus_laptop *asus)
1553 asus_als_level(asus, asus->light_level); 1398 asus_als_level(asus, asus->light_level);
1554 } 1399 }
1555 1400
1556 asus->lcd_state = 1; /* LCD should be on when the module load */
1557 return result; 1401 return result;
1558} 1402}
1559 1403
1404static void __devinit asus_dmi_check(void)
1405{
1406 const char *model;
1407
1408 model = dmi_get_system_info(DMI_PRODUCT_NAME);
1409 if (!model)
1410 return;
1411
1412 /* On L1400B WLED control the sound card, don't mess with it ... */
1413 if (strncmp(model, "L1400B", 6) == 0) {
1414 wlan_status = -1;
1415 }
1416}
1417
1560static bool asus_device_present; 1418static bool asus_device_present;
1561 1419
1562static int __devinit asus_acpi_add(struct acpi_device *device) 1420static int __devinit asus_acpi_add(struct acpi_device *device)
@@ -1575,6 +1433,8 @@ static int __devinit asus_acpi_add(struct acpi_device *device)
1575 device->driver_data = asus; 1433 device->driver_data = asus;
1576 asus->device = device; 1434 asus->device = device;
1577 1435
1436 asus_dmi_check();
1437
1578 result = asus_acpi_init(asus); 1438 result = asus_acpi_init(asus);
1579 if (result) 1439 if (result)
1580 goto fail_platform; 1440 goto fail_platform;
diff --git a/drivers/platform/x86/asus-nb-wmi.c b/drivers/platform/x86/asus-nb-wmi.c
new file mode 100644
index 000000000000..0580d99b0798
--- /dev/null
+++ b/drivers/platform/x86/asus-nb-wmi.c
@@ -0,0 +1,98 @@
1/*
2 * Asus Notebooks WMI hotkey driver
3 *
4 * Copyright(C) 2010 Corentin Chary <corentin.chary@gmail.com>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
20
21#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
22
23#include <linux/kernel.h>
24#include <linux/module.h>
25#include <linux/init.h>
26#include <linux/input.h>
27#include <linux/input/sparse-keymap.h>
28
29#include "asus-wmi.h"
30
31#define ASUS_NB_WMI_FILE "asus-nb-wmi"
32
33MODULE_AUTHOR("Corentin Chary <corentincj@iksaif.net>");
34MODULE_DESCRIPTION("Asus Notebooks WMI Hotkey Driver");
35MODULE_LICENSE("GPL");
36
37#define ASUS_NB_WMI_EVENT_GUID "0B3CBB35-E3C2-45ED-91C2-4C5A6D195D1C"
38
39MODULE_ALIAS("wmi:"ASUS_NB_WMI_EVENT_GUID);
40
41static const struct key_entry asus_nb_wmi_keymap[] = {
42 { KE_KEY, 0x30, { KEY_VOLUMEUP } },
43 { KE_KEY, 0x31, { KEY_VOLUMEDOWN } },
44 { KE_KEY, 0x32, { KEY_MUTE } },
45 { KE_KEY, 0x33, { KEY_DISPLAYTOGGLE } }, /* LCD on */
46 { KE_KEY, 0x34, { KEY_DISPLAY_OFF } }, /* LCD off */
47 { KE_KEY, 0x40, { KEY_PREVIOUSSONG } },
48 { KE_KEY, 0x41, { KEY_NEXTSONG } },
49 { KE_KEY, 0x43, { KEY_STOPCD } },
50 { KE_KEY, 0x45, { KEY_PLAYPAUSE } },
51 { KE_KEY, 0x4c, { KEY_MEDIA } },
52 { KE_KEY, 0x50, { KEY_EMAIL } },
53 { KE_KEY, 0x51, { KEY_WWW } },
54 { KE_KEY, 0x55, { KEY_CALC } },
55 { KE_KEY, 0x5C, { KEY_F15 } }, /* Power Gear key */
56 { KE_KEY, 0x5D, { KEY_WLAN } },
57 { KE_KEY, 0x5E, { KEY_WLAN } },
58 { KE_KEY, 0x5F, { KEY_WLAN } },
59 { KE_KEY, 0x60, { KEY_SWITCHVIDEOMODE } },
60 { KE_KEY, 0x61, { KEY_SWITCHVIDEOMODE } },
61 { KE_KEY, 0x62, { KEY_SWITCHVIDEOMODE } },
62 { KE_KEY, 0x63, { KEY_SWITCHVIDEOMODE } },
63 { KE_KEY, 0x6B, { KEY_TOUCHPAD_TOGGLE } },
64 { KE_KEY, 0x7E, { KEY_BLUETOOTH } },
65 { KE_KEY, 0x7D, { KEY_BLUETOOTH } },
66 { KE_KEY, 0x82, { KEY_CAMERA } },
67 { KE_KEY, 0x88, { KEY_RFKILL } },
68 { KE_KEY, 0x8A, { KEY_PROG1 } },
69 { KE_KEY, 0x95, { KEY_MEDIA } },
70 { KE_KEY, 0x99, { KEY_PHONE } },
71 { KE_KEY, 0xb5, { KEY_CALC } },
72 { KE_KEY, 0xc4, { KEY_KBDILLUMUP } },
73 { KE_KEY, 0xc5, { KEY_KBDILLUMDOWN } },
74 { KE_END, 0},
75};
76
77static struct asus_wmi_driver asus_nb_wmi_driver = {
78 .name = ASUS_NB_WMI_FILE,
79 .owner = THIS_MODULE,
80 .event_guid = ASUS_NB_WMI_EVENT_GUID,
81 .keymap = asus_nb_wmi_keymap,
82 .input_name = "Asus WMI hotkeys",
83 .input_phys = ASUS_NB_WMI_FILE "/input0",
84};
85
86
87static int __init asus_nb_wmi_init(void)
88{
89 return asus_wmi_register_driver(&asus_nb_wmi_driver);
90}
91
92static void __exit asus_nb_wmi_exit(void)
93{
94 asus_wmi_unregister_driver(&asus_nb_wmi_driver);
95}
96
97module_init(asus_nb_wmi_init);
98module_exit(asus_nb_wmi_exit);
diff --git a/drivers/platform/x86/asus-wmi.c b/drivers/platform/x86/asus-wmi.c
new file mode 100644
index 000000000000..efc776cb0c66
--- /dev/null
+++ b/drivers/platform/x86/asus-wmi.c
@@ -0,0 +1,1656 @@
1/*
2 * Asus PC WMI hotkey driver
3 *
4 * Copyright(C) 2010 Intel Corporation.
5 * Copyright(C) 2010-2011 Corentin Chary <corentin.chary@gmail.com>
6 *
7 * Portions based on wistron_btns.c:
8 * Copyright (C) 2005 Miloslav Trmac <mitr@volny.cz>
9 * Copyright (C) 2005 Bernhard Rosenkraenzer <bero@arklinux.org>
10 * Copyright (C) 2005 Dmitry Torokhov <dtor@mail.ru>
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2 of the License, or
15 * (at your option) any later version.
16 *
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, write to the Free Software
24 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
25 */
26
27#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
28
29#include <linux/kernel.h>
30#include <linux/module.h>
31#include <linux/init.h>
32#include <linux/types.h>
33#include <linux/slab.h>
34#include <linux/input.h>
35#include <linux/input/sparse-keymap.h>
36#include <linux/fb.h>
37#include <linux/backlight.h>
38#include <linux/leds.h>
39#include <linux/rfkill.h>
40#include <linux/pci.h>
41#include <linux/pci_hotplug.h>
42#include <linux/hwmon.h>
43#include <linux/hwmon-sysfs.h>
44#include <linux/debugfs.h>
45#include <linux/seq_file.h>
46#include <linux/platform_device.h>
47#include <acpi/acpi_bus.h>
48#include <acpi/acpi_drivers.h>
49
50#include "asus-wmi.h"
51
52MODULE_AUTHOR("Corentin Chary <corentincj@iksaif.net>, "
53 "Yong Wang <yong.y.wang@intel.com>");
54MODULE_DESCRIPTION("Asus Generic WMI Driver");
55MODULE_LICENSE("GPL");
56
57#define to_platform_driver(drv) \
58 (container_of((drv), struct platform_driver, driver))
59
60#define to_asus_wmi_driver(pdrv) \
61 (container_of((pdrv), struct asus_wmi_driver, platform_driver))
62
63#define ASUS_WMI_MGMT_GUID "97845ED0-4E6D-11DE-8A39-0800200C9A66"
64
65#define NOTIFY_BRNUP_MIN 0x11
66#define NOTIFY_BRNUP_MAX 0x1f
67#define NOTIFY_BRNDOWN_MIN 0x20
68#define NOTIFY_BRNDOWN_MAX 0x2e
69
70/* WMI Methods */
71#define ASUS_WMI_METHODID_SPEC 0x43455053 /* BIOS SPECification */
72#define ASUS_WMI_METHODID_SFBD 0x44424653 /* Set First Boot Device */
73#define ASUS_WMI_METHODID_GLCD 0x44434C47 /* Get LCD status */
74#define ASUS_WMI_METHODID_GPID 0x44495047 /* Get Panel ID?? (Resol) */
75#define ASUS_WMI_METHODID_QMOD 0x444F4D51 /* Quiet MODe */
76#define ASUS_WMI_METHODID_SPLV 0x4C425053 /* Set Panel Light Value */
77#define ASUS_WMI_METHODID_SFUN 0x4E554653 /* FUNCtionalities */
78#define ASUS_WMI_METHODID_SDSP 0x50534453 /* Set DiSPlay output */
79#define ASUS_WMI_METHODID_GDSP 0x50534447 /* Get DiSPlay output */
80#define ASUS_WMI_METHODID_DEVP 0x50564544 /* DEVice Policy */
81#define ASUS_WMI_METHODID_OSVR 0x5256534F /* OS VeRsion */
82#define ASUS_WMI_METHODID_DSTS 0x53544344 /* Device STatuS */
83#define ASUS_WMI_METHODID_DSTS2 0x53545344 /* Device STatuS #2*/
84#define ASUS_WMI_METHODID_BSTS 0x53545342 /* Bios STatuS ? */
85#define ASUS_WMI_METHODID_DEVS 0x53564544 /* DEVice Set */
86#define ASUS_WMI_METHODID_CFVS 0x53564643 /* CPU Frequency Volt Set */
87#define ASUS_WMI_METHODID_KBFT 0x5446424B /* KeyBoard FilTer */
88#define ASUS_WMI_METHODID_INIT 0x54494E49 /* INITialize */
89#define ASUS_WMI_METHODID_HKEY 0x59454B48 /* Hot KEY ?? */
90
91#define ASUS_WMI_UNSUPPORTED_METHOD 0xFFFFFFFE
92
93/* Wireless */
94#define ASUS_WMI_DEVID_HW_SWITCH 0x00010001
95#define ASUS_WMI_DEVID_WIRELESS_LED 0x00010002
96#define ASUS_WMI_DEVID_WLAN 0x00010011
97#define ASUS_WMI_DEVID_BLUETOOTH 0x00010013
98#define ASUS_WMI_DEVID_GPS 0x00010015
99#define ASUS_WMI_DEVID_WIMAX 0x00010017
100#define ASUS_WMI_DEVID_WWAN3G 0x00010019
101#define ASUS_WMI_DEVID_UWB 0x00010021
102
103/* Leds */
104/* 0x000200XX and 0x000400XX */
105
106/* Backlight and Brightness */
107#define ASUS_WMI_DEVID_BACKLIGHT 0x00050011
108#define ASUS_WMI_DEVID_BRIGHTNESS 0x00050012
109#define ASUS_WMI_DEVID_KBD_BACKLIGHT 0x00050021
110#define ASUS_WMI_DEVID_LIGHT_SENSOR 0x00050022 /* ?? */
111
112/* Misc */
113#define ASUS_WMI_DEVID_CAMERA 0x00060013
114
115/* Storage */
116#define ASUS_WMI_DEVID_CARDREADER 0x00080013
117
118/* Input */
119#define ASUS_WMI_DEVID_TOUCHPAD 0x00100011
120#define ASUS_WMI_DEVID_TOUCHPAD_LED 0x00100012
121
122/* Fan, Thermal */
123#define ASUS_WMI_DEVID_THERMAL_CTRL 0x00110011
124#define ASUS_WMI_DEVID_FAN_CTRL 0x00110012
125
126/* Power */
127#define ASUS_WMI_DEVID_PROCESSOR_STATE 0x00120012
128
129/* DSTS masks */
130#define ASUS_WMI_DSTS_STATUS_BIT 0x00000001
131#define ASUS_WMI_DSTS_UNKNOWN_BIT 0x00000002
132#define ASUS_WMI_DSTS_PRESENCE_BIT 0x00010000
133#define ASUS_WMI_DSTS_USER_BIT 0x00020000
134#define ASUS_WMI_DSTS_BIOS_BIT 0x00040000
135#define ASUS_WMI_DSTS_BRIGHTNESS_MASK 0x000000FF
136#define ASUS_WMI_DSTS_MAX_BRIGTH_MASK 0x0000FF00
137
138struct bios_args {
139 u32 arg0;
140 u32 arg1;
141} __packed;
142
143/*
144 * <platform>/ - debugfs root directory
145 * dev_id - current dev_id
146 * ctrl_param - current ctrl_param
147 * method_id - current method_id
148 * devs - call DEVS(dev_id, ctrl_param) and print result
149 * dsts - call DSTS(dev_id) and print result
150 * call - call method_id(dev_id, ctrl_param) and print result
151 */
152struct asus_wmi_debug {
153 struct dentry *root;
154 u32 method_id;
155 u32 dev_id;
156 u32 ctrl_param;
157};
158
159struct asus_rfkill {
160 struct asus_wmi *asus;
161 struct rfkill *rfkill;
162 u32 dev_id;
163};
164
165struct asus_wmi {
166 int dsts_id;
167 int spec;
168 int sfun;
169
170 struct input_dev *inputdev;
171 struct backlight_device *backlight_device;
172 struct device *hwmon_device;
173 struct platform_device *platform_device;
174
175 struct led_classdev tpd_led;
176 int tpd_led_wk;
177 struct workqueue_struct *led_workqueue;
178 struct work_struct tpd_led_work;
179
180 struct asus_rfkill wlan;
181 struct asus_rfkill bluetooth;
182 struct asus_rfkill wimax;
183 struct asus_rfkill wwan3g;
184
185 struct hotplug_slot *hotplug_slot;
186 struct mutex hotplug_lock;
187 struct mutex wmi_lock;
188 struct workqueue_struct *hotplug_workqueue;
189 struct work_struct hotplug_work;
190
191 struct asus_wmi_debug debug;
192
193 struct asus_wmi_driver *driver;
194};
195
196static int asus_wmi_input_init(struct asus_wmi *asus)
197{
198 int err;
199
200 asus->inputdev = input_allocate_device();
201 if (!asus->inputdev)
202 return -ENOMEM;
203
204 asus->inputdev->name = asus->driver->input_phys;
205 asus->inputdev->phys = asus->driver->input_name;
206 asus->inputdev->id.bustype = BUS_HOST;
207 asus->inputdev->dev.parent = &asus->platform_device->dev;
208
209 err = sparse_keymap_setup(asus->inputdev, asus->driver->keymap, NULL);
210 if (err)
211 goto err_free_dev;
212
213 err = input_register_device(asus->inputdev);
214 if (err)
215 goto err_free_keymap;
216
217 return 0;
218
219err_free_keymap:
220 sparse_keymap_free(asus->inputdev);
221err_free_dev:
222 input_free_device(asus->inputdev);
223 return err;
224}
225
226static void asus_wmi_input_exit(struct asus_wmi *asus)
227{
228 if (asus->inputdev) {
229 sparse_keymap_free(asus->inputdev);
230 input_unregister_device(asus->inputdev);
231 }
232
233 asus->inputdev = NULL;
234}
235
236static int asus_wmi_evaluate_method(u32 method_id, u32 arg0, u32 arg1,
237 u32 *retval)
238{
239 struct bios_args args = {
240 .arg0 = arg0,
241 .arg1 = arg1,
242 };
243 struct acpi_buffer input = { (acpi_size) sizeof(args), &args };
244 struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL };
245 acpi_status status;
246 union acpi_object *obj;
247 u32 tmp;
248
249 status = wmi_evaluate_method(ASUS_WMI_MGMT_GUID, 1, method_id,
250 &input, &output);
251
252 if (ACPI_FAILURE(status))
253 goto exit;
254
255 obj = (union acpi_object *)output.pointer;
256 if (obj && obj->type == ACPI_TYPE_INTEGER)
257 tmp = (u32) obj->integer.value;
258 else
259 tmp = 0;
260
261 if (retval)
262 *retval = tmp;
263
264 kfree(obj);
265
266exit:
267 if (ACPI_FAILURE(status))
268 return -EIO;
269
270 if (tmp == ASUS_WMI_UNSUPPORTED_METHOD)
271 return -ENODEV;
272
273 return 0;
274}
275
276static int asus_wmi_get_devstate(struct asus_wmi *asus, u32 dev_id, u32 *retval)
277{
278 return asus_wmi_evaluate_method(asus->dsts_id, dev_id, 0, retval);
279}
280
281static int asus_wmi_set_devstate(u32 dev_id, u32 ctrl_param,
282 u32 *retval)
283{
284 return asus_wmi_evaluate_method(ASUS_WMI_METHODID_DEVS, dev_id,
285 ctrl_param, retval);
286}
287
288/* Helper for special devices with magic return codes */
289static int asus_wmi_get_devstate_bits(struct asus_wmi *asus,
290 u32 dev_id, u32 mask)
291{
292 u32 retval = 0;
293 int err;
294
295 err = asus_wmi_get_devstate(asus, dev_id, &retval);
296
297 if (err < 0)
298 return err;
299
300 if (!(retval & ASUS_WMI_DSTS_PRESENCE_BIT))
301 return -ENODEV;
302
303 if (mask == ASUS_WMI_DSTS_STATUS_BIT) {
304 if (retval & ASUS_WMI_DSTS_UNKNOWN_BIT)
305 return -ENODEV;
306 }
307
308 return retval & mask;
309}
310
311static int asus_wmi_get_devstate_simple(struct asus_wmi *asus, u32 dev_id)
312{
313 return asus_wmi_get_devstate_bits(asus, dev_id,
314 ASUS_WMI_DSTS_STATUS_BIT);
315}
316
317/*
318 * LEDs
319 */
320/*
321 * These functions actually update the LED's, and are called from a
322 * workqueue. By doing this as separate work rather than when the LED
323 * subsystem asks, we avoid messing with the Asus ACPI stuff during a
324 * potentially bad time, such as a timer interrupt.
325 */
326static void tpd_led_update(struct work_struct *work)
327{
328 int ctrl_param;
329 struct asus_wmi *asus;
330
331 asus = container_of(work, struct asus_wmi, tpd_led_work);
332
333 ctrl_param = asus->tpd_led_wk;
334 asus_wmi_set_devstate(ASUS_WMI_DEVID_TOUCHPAD_LED, ctrl_param, NULL);
335}
336
337static void tpd_led_set(struct led_classdev *led_cdev,
338 enum led_brightness value)
339{
340 struct asus_wmi *asus;
341
342 asus = container_of(led_cdev, struct asus_wmi, tpd_led);
343
344 asus->tpd_led_wk = !!value;
345 queue_work(asus->led_workqueue, &asus->tpd_led_work);
346}
347
348static int read_tpd_led_state(struct asus_wmi *asus)
349{
350 return asus_wmi_get_devstate_simple(asus, ASUS_WMI_DEVID_TOUCHPAD_LED);
351}
352
353static enum led_brightness tpd_led_get(struct led_classdev *led_cdev)
354{
355 struct asus_wmi *asus;
356
357 asus = container_of(led_cdev, struct asus_wmi, tpd_led);
358
359 return read_tpd_led_state(asus);
360}
361
362static int asus_wmi_led_init(struct asus_wmi *asus)
363{
364 int rv;
365
366 if (read_tpd_led_state(asus) < 0)
367 return 0;
368
369 asus->led_workqueue = create_singlethread_workqueue("led_workqueue");
370 if (!asus->led_workqueue)
371 return -ENOMEM;
372 INIT_WORK(&asus->tpd_led_work, tpd_led_update);
373
374 asus->tpd_led.name = "asus::touchpad";
375 asus->tpd_led.brightness_set = tpd_led_set;
376 asus->tpd_led.brightness_get = tpd_led_get;
377 asus->tpd_led.max_brightness = 1;
378
379 rv = led_classdev_register(&asus->platform_device->dev, &asus->tpd_led);
380 if (rv) {
381 destroy_workqueue(asus->led_workqueue);
382 return rv;
383 }
384
385 return 0;
386}
387
388static void asus_wmi_led_exit(struct asus_wmi *asus)
389{
390 if (asus->tpd_led.dev)
391 led_classdev_unregister(&asus->tpd_led);
392 if (asus->led_workqueue)
393 destroy_workqueue(asus->led_workqueue);
394}
395
396/*
397 * PCI hotplug (for wlan rfkill)
398 */
399static bool asus_wlan_rfkill_blocked(struct asus_wmi *asus)
400{
401 int result = asus_wmi_get_devstate_simple(asus, ASUS_WMI_DEVID_WLAN);
402
403 if (result < 0)
404 return false;
405 return !result;
406}
407
408static void asus_rfkill_hotplug(struct asus_wmi *asus)
409{
410 struct pci_dev *dev;
411 struct pci_bus *bus;
412 bool blocked;
413 bool absent;
414 u32 l;
415
416 mutex_lock(&asus->wmi_lock);
417 blocked = asus_wlan_rfkill_blocked(asus);
418 mutex_unlock(&asus->wmi_lock);
419
420 mutex_lock(&asus->hotplug_lock);
421
422 if (asus->wlan.rfkill)
423 rfkill_set_sw_state(asus->wlan.rfkill, blocked);
424
425 if (asus->hotplug_slot) {
426 bus = pci_find_bus(0, 1);
427 if (!bus) {
428 pr_warning("Unable to find PCI bus 1?\n");
429 goto out_unlock;
430 }
431
432 if (pci_bus_read_config_dword(bus, 0, PCI_VENDOR_ID, &l)) {
433 pr_err("Unable to read PCI config space?\n");
434 goto out_unlock;
435 }
436 absent = (l == 0xffffffff);
437
438 if (blocked != absent) {
439 pr_warning("BIOS says wireless lan is %s, "
440 "but the pci device is %s\n",
441 blocked ? "blocked" : "unblocked",
442 absent ? "absent" : "present");
443 pr_warning("skipped wireless hotplug as probably "
444 "inappropriate for this model\n");
445 goto out_unlock;
446 }
447
448 if (!blocked) {
449 dev = pci_get_slot(bus, 0);
450 if (dev) {
451 /* Device already present */
452 pci_dev_put(dev);
453 goto out_unlock;
454 }
455 dev = pci_scan_single_device(bus, 0);
456 if (dev) {
457 pci_bus_assign_resources(bus);
458 if (pci_bus_add_device(dev))
459 pr_err("Unable to hotplug wifi\n");
460 }
461 } else {
462 dev = pci_get_slot(bus, 0);
463 if (dev) {
464 pci_remove_bus_device(dev);
465 pci_dev_put(dev);
466 }
467 }
468 }
469
470out_unlock:
471 mutex_unlock(&asus->hotplug_lock);
472}
473
474static void asus_rfkill_notify(acpi_handle handle, u32 event, void *data)
475{
476 struct asus_wmi *asus = data;
477
478 if (event != ACPI_NOTIFY_BUS_CHECK)
479 return;
480
481 /*
482 * We can't call directly asus_rfkill_hotplug because most
483 * of the time WMBC is still being executed and not reetrant.
484 * There is currently no way to tell ACPICA that we want this
485 * method to be serialized, we schedule a asus_rfkill_hotplug
486 * call later, in a safer context.
487 */
488 queue_work(asus->hotplug_workqueue, &asus->hotplug_work);
489}
490
491static int asus_register_rfkill_notifier(struct asus_wmi *asus, char *node)
492{
493 acpi_status status;
494 acpi_handle handle;
495
496 status = acpi_get_handle(NULL, node, &handle);
497
498 if (ACPI_SUCCESS(status)) {
499 status = acpi_install_notify_handler(handle,
500 ACPI_SYSTEM_NOTIFY,
501 asus_rfkill_notify, asus);
502 if (ACPI_FAILURE(status))
503 pr_warning("Failed to register notify on %s\n", node);
504 } else
505 return -ENODEV;
506
507 return 0;
508}
509
510static void asus_unregister_rfkill_notifier(struct asus_wmi *asus, char *node)
511{
512 acpi_status status = AE_OK;
513 acpi_handle handle;
514
515 status = acpi_get_handle(NULL, node, &handle);
516
517 if (ACPI_SUCCESS(status)) {
518 status = acpi_remove_notify_handler(handle,
519 ACPI_SYSTEM_NOTIFY,
520 asus_rfkill_notify);
521 if (ACPI_FAILURE(status))
522 pr_err("Error removing rfkill notify handler %s\n",
523 node);
524 }
525}
526
527static int asus_get_adapter_status(struct hotplug_slot *hotplug_slot,
528 u8 *value)
529{
530 struct asus_wmi *asus = hotplug_slot->private;
531 int result = asus_wmi_get_devstate_simple(asus, ASUS_WMI_DEVID_WLAN);
532
533 if (result < 0)
534 return result;
535
536 *value = !!result;
537 return 0;
538}
539
540static void asus_cleanup_pci_hotplug(struct hotplug_slot *hotplug_slot)
541{
542 kfree(hotplug_slot->info);
543 kfree(hotplug_slot);
544}
545
546static struct hotplug_slot_ops asus_hotplug_slot_ops = {
547 .owner = THIS_MODULE,
548 .get_adapter_status = asus_get_adapter_status,
549 .get_power_status = asus_get_adapter_status,
550};
551
552static void asus_hotplug_work(struct work_struct *work)
553{
554 struct asus_wmi *asus;
555
556 asus = container_of(work, struct asus_wmi, hotplug_work);
557 asus_rfkill_hotplug(asus);
558}
559
560static int asus_setup_pci_hotplug(struct asus_wmi *asus)
561{
562 int ret = -ENOMEM;
563 struct pci_bus *bus = pci_find_bus(0, 1);
564
565 if (!bus) {
566 pr_err("Unable to find wifi PCI bus\n");
567 return -ENODEV;
568 }
569
570 asus->hotplug_workqueue =
571 create_singlethread_workqueue("hotplug_workqueue");
572 if (!asus->hotplug_workqueue)
573 goto error_workqueue;
574
575 INIT_WORK(&asus->hotplug_work, asus_hotplug_work);
576
577 asus->hotplug_slot = kzalloc(sizeof(struct hotplug_slot), GFP_KERNEL);
578 if (!asus->hotplug_slot)
579 goto error_slot;
580
581 asus->hotplug_slot->info = kzalloc(sizeof(struct hotplug_slot_info),
582 GFP_KERNEL);
583 if (!asus->hotplug_slot->info)
584 goto error_info;
585
586 asus->hotplug_slot->private = asus;
587 asus->hotplug_slot->release = &asus_cleanup_pci_hotplug;
588 asus->hotplug_slot->ops = &asus_hotplug_slot_ops;
589 asus_get_adapter_status(asus->hotplug_slot,
590 &asus->hotplug_slot->info->adapter_status);
591
592 ret = pci_hp_register(asus->hotplug_slot, bus, 0, "asus-wifi");
593 if (ret) {
594 pr_err("Unable to register hotplug slot - %d\n", ret);
595 goto error_register;
596 }
597
598 return 0;
599
600error_register:
601 kfree(asus->hotplug_slot->info);
602error_info:
603 kfree(asus->hotplug_slot);
604 asus->hotplug_slot = NULL;
605error_slot:
606 destroy_workqueue(asus->hotplug_workqueue);
607error_workqueue:
608 return ret;
609}
610
611/*
612 * Rfkill devices
613 */
614static int asus_rfkill_set(void *data, bool blocked)
615{
616 struct asus_rfkill *priv = data;
617 u32 ctrl_param = !blocked;
618
619 return asus_wmi_set_devstate(priv->dev_id, ctrl_param, NULL);
620}
621
622static void asus_rfkill_query(struct rfkill *rfkill, void *data)
623{
624 struct asus_rfkill *priv = data;
625 int result;
626
627 result = asus_wmi_get_devstate_simple(priv->asus, priv->dev_id);
628
629 if (result < 0)
630 return;
631
632 rfkill_set_sw_state(priv->rfkill, !result);
633}
634
635static int asus_rfkill_wlan_set(void *data, bool blocked)
636{
637 struct asus_rfkill *priv = data;
638 struct asus_wmi *asus = priv->asus;
639 int ret;
640
641 /*
642 * This handler is enabled only if hotplug is enabled.
643 * In this case, the asus_wmi_set_devstate() will
644 * trigger a wmi notification and we need to wait
645 * this call to finish before being able to call
646 * any wmi method
647 */
648 mutex_lock(&asus->wmi_lock);
649 ret = asus_rfkill_set(data, blocked);
650 mutex_unlock(&asus->wmi_lock);
651 return ret;
652}
653
654static const struct rfkill_ops asus_rfkill_wlan_ops = {
655 .set_block = asus_rfkill_wlan_set,
656 .query = asus_rfkill_query,
657};
658
659static const struct rfkill_ops asus_rfkill_ops = {
660 .set_block = asus_rfkill_set,
661 .query = asus_rfkill_query,
662};
663
664static int asus_new_rfkill(struct asus_wmi *asus,
665 struct asus_rfkill *arfkill,
666 const char *name, enum rfkill_type type, int dev_id)
667{
668 int result = asus_wmi_get_devstate_simple(asus, dev_id);
669 struct rfkill **rfkill = &arfkill->rfkill;
670
671 if (result < 0)
672 return result;
673
674 arfkill->dev_id = dev_id;
675 arfkill->asus = asus;
676
677 if (dev_id == ASUS_WMI_DEVID_WLAN && asus->driver->hotplug_wireless)
678 *rfkill = rfkill_alloc(name, &asus->platform_device->dev, type,
679 &asus_rfkill_wlan_ops, arfkill);
680 else
681 *rfkill = rfkill_alloc(name, &asus->platform_device->dev, type,
682 &asus_rfkill_ops, arfkill);
683
684 if (!*rfkill)
685 return -EINVAL;
686
687 rfkill_init_sw_state(*rfkill, !result);
688 result = rfkill_register(*rfkill);
689 if (result) {
690 rfkill_destroy(*rfkill);
691 *rfkill = NULL;
692 return result;
693 }
694 return 0;
695}
696
697static void asus_wmi_rfkill_exit(struct asus_wmi *asus)
698{
699 asus_unregister_rfkill_notifier(asus, "\\_SB.PCI0.P0P5");
700 asus_unregister_rfkill_notifier(asus, "\\_SB.PCI0.P0P6");
701 asus_unregister_rfkill_notifier(asus, "\\_SB.PCI0.P0P7");
702 if (asus->wlan.rfkill) {
703 rfkill_unregister(asus->wlan.rfkill);
704 rfkill_destroy(asus->wlan.rfkill);
705 asus->wlan.rfkill = NULL;
706 }
707 /*
708 * Refresh pci hotplug in case the rfkill state was changed after
709 * asus_unregister_rfkill_notifier()
710 */
711 asus_rfkill_hotplug(asus);
712 if (asus->hotplug_slot)
713 pci_hp_deregister(asus->hotplug_slot);
714 if (asus->hotplug_workqueue)
715 destroy_workqueue(asus->hotplug_workqueue);
716
717 if (asus->bluetooth.rfkill) {
718 rfkill_unregister(asus->bluetooth.rfkill);
719 rfkill_destroy(asus->bluetooth.rfkill);
720 asus->bluetooth.rfkill = NULL;
721 }
722 if (asus->wimax.rfkill) {
723 rfkill_unregister(asus->wimax.rfkill);
724 rfkill_destroy(asus->wimax.rfkill);
725 asus->wimax.rfkill = NULL;
726 }
727 if (asus->wwan3g.rfkill) {
728 rfkill_unregister(asus->wwan3g.rfkill);
729 rfkill_destroy(asus->wwan3g.rfkill);
730 asus->wwan3g.rfkill = NULL;
731 }
732}
733
734static int asus_wmi_rfkill_init(struct asus_wmi *asus)
735{
736 int result = 0;
737
738 mutex_init(&asus->hotplug_lock);
739 mutex_init(&asus->wmi_lock);
740
741 result = asus_new_rfkill(asus, &asus->wlan, "asus-wlan",
742 RFKILL_TYPE_WLAN, ASUS_WMI_DEVID_WLAN);
743
744 if (result && result != -ENODEV)
745 goto exit;
746
747 result = asus_new_rfkill(asus, &asus->bluetooth,
748 "asus-bluetooth", RFKILL_TYPE_BLUETOOTH,
749 ASUS_WMI_DEVID_BLUETOOTH);
750
751 if (result && result != -ENODEV)
752 goto exit;
753
754 result = asus_new_rfkill(asus, &asus->wimax, "asus-wimax",
755 RFKILL_TYPE_WIMAX, ASUS_WMI_DEVID_WIMAX);
756
757 if (result && result != -ENODEV)
758 goto exit;
759
760 result = asus_new_rfkill(asus, &asus->wwan3g, "asus-wwan3g",
761 RFKILL_TYPE_WWAN, ASUS_WMI_DEVID_WWAN3G);
762
763 if (result && result != -ENODEV)
764 goto exit;
765
766 if (!asus->driver->hotplug_wireless)
767 goto exit;
768
769 result = asus_setup_pci_hotplug(asus);
770 /*
771 * If we get -EBUSY then something else is handling the PCI hotplug -
772 * don't fail in this case
773 */
774 if (result == -EBUSY)
775 result = 0;
776
777 asus_register_rfkill_notifier(asus, "\\_SB.PCI0.P0P5");
778 asus_register_rfkill_notifier(asus, "\\_SB.PCI0.P0P6");
779 asus_register_rfkill_notifier(asus, "\\_SB.PCI0.P0P7");
780 /*
781 * Refresh pci hotplug in case the rfkill state was changed during
782 * setup.
783 */
784 asus_rfkill_hotplug(asus);
785
786exit:
787 if (result && result != -ENODEV)
788 asus_wmi_rfkill_exit(asus);
789
790 if (result == -ENODEV)
791 result = 0;
792
793 return result;
794}
795
796/*
797 * Hwmon device
798 */
799static ssize_t asus_hwmon_pwm1(struct device *dev,
800 struct device_attribute *attr,
801 char *buf)
802{
803 struct asus_wmi *asus = dev_get_drvdata(dev);
804 u32 value;
805 int err;
806
807 err = asus_wmi_get_devstate(asus, ASUS_WMI_DEVID_FAN_CTRL, &value);
808
809 if (err < 0)
810 return err;
811
812 value |= 0xFF;
813
814 if (value == 1) /* Low Speed */
815 value = 85;
816 else if (value == 2)
817 value = 170;
818 else if (value == 3)
819 value = 255;
820 else if (value != 0) {
821 pr_err("Unknown fan speed %#x", value);
822 value = -1;
823 }
824
825 return sprintf(buf, "%d\n", value);
826}
827
828static SENSOR_DEVICE_ATTR(pwm1, S_IRUGO, asus_hwmon_pwm1, NULL, 0);
829
830static ssize_t
831show_name(struct device *dev, struct device_attribute *attr, char *buf)
832{
833 return sprintf(buf, "asus\n");
834}
835static SENSOR_DEVICE_ATTR(name, S_IRUGO, show_name, NULL, 0);
836
837static struct attribute *hwmon_attributes[] = {
838 &sensor_dev_attr_pwm1.dev_attr.attr,
839 &sensor_dev_attr_name.dev_attr.attr,
840 NULL
841};
842
843static mode_t asus_hwmon_sysfs_is_visible(struct kobject *kobj,
844 struct attribute *attr, int idx)
845{
846 struct device *dev = container_of(kobj, struct device, kobj);
847 struct platform_device *pdev = to_platform_device(dev->parent);
848 struct asus_wmi *asus = platform_get_drvdata(pdev);
849 bool ok = true;
850 int dev_id = -1;
851 u32 value = ASUS_WMI_UNSUPPORTED_METHOD;
852
853 if (attr == &sensor_dev_attr_pwm1.dev_attr.attr)
854 dev_id = ASUS_WMI_DEVID_FAN_CTRL;
855
856 if (dev_id != -1) {
857 int err = asus_wmi_get_devstate(asus, dev_id, &value);
858
859 if (err < 0)
860 return err;
861 }
862
863 if (dev_id == ASUS_WMI_DEVID_FAN_CTRL) {
864 /*
865 * We need to find a better way, probably using sfun,
866 * bits or spec ...
867 * Currently we disable it if:
868 * - ASUS_WMI_UNSUPPORTED_METHOD is returned
869 * - reverved bits are non-zero
870 * - sfun and presence bit are not set
871 */
872 if (value != ASUS_WMI_UNSUPPORTED_METHOD || value & 0xFFF80000
873 || (!asus->sfun && !(value & ASUS_WMI_DSTS_PRESENCE_BIT)))
874 ok = false;
875 }
876
877 return ok ? attr->mode : 0;
878}
879
880static struct attribute_group hwmon_attribute_group = {
881 .is_visible = asus_hwmon_sysfs_is_visible,
882 .attrs = hwmon_attributes
883};
884
885static void asus_wmi_hwmon_exit(struct asus_wmi *asus)
886{
887 struct device *hwmon;
888
889 hwmon = asus->hwmon_device;
890 if (!hwmon)
891 return;
892 sysfs_remove_group(&hwmon->kobj, &hwmon_attribute_group);
893 hwmon_device_unregister(hwmon);
894 asus->hwmon_device = NULL;
895}
896
897static int asus_wmi_hwmon_init(struct asus_wmi *asus)
898{
899 struct device *hwmon;
900 int result;
901
902 hwmon = hwmon_device_register(&asus->platform_device->dev);
903 if (IS_ERR(hwmon)) {
904 pr_err("Could not register asus hwmon device\n");
905 return PTR_ERR(hwmon);
906 }
907 asus->hwmon_device = hwmon;
908 result = sysfs_create_group(&hwmon->kobj, &hwmon_attribute_group);
909 if (result)
910 asus_wmi_hwmon_exit(asus);
911 return result;
912}
913
914/*
915 * Backlight
916 */
917static int read_backlight_power(struct asus_wmi *asus)
918{
919 int ret = asus_wmi_get_devstate_simple(asus, ASUS_WMI_DEVID_BACKLIGHT);
920
921 if (ret < 0)
922 return ret;
923
924 return ret ? FB_BLANK_UNBLANK : FB_BLANK_POWERDOWN;
925}
926
927static int read_brightness_max(struct asus_wmi *asus)
928{
929 u32 retval;
930 int err;
931
932 err = asus_wmi_get_devstate(asus, ASUS_WMI_DEVID_BRIGHTNESS, &retval);
933
934 if (err < 0)
935 return err;
936
937 retval = retval & ASUS_WMI_DSTS_MAX_BRIGTH_MASK;
938 retval >>= 8;
939
940 if (!retval)
941 return -ENODEV;
942
943 return retval;
944}
945
946static int read_brightness(struct backlight_device *bd)
947{
948 struct asus_wmi *asus = bl_get_data(bd);
949 u32 retval;
950 int err;
951
952 err = asus_wmi_get_devstate(asus, ASUS_WMI_DEVID_BRIGHTNESS, &retval);
953
954 if (err < 0)
955 return err;
956
957 return retval & ASUS_WMI_DSTS_BRIGHTNESS_MASK;
958}
959
960static int update_bl_status(struct backlight_device *bd)
961{
962 struct asus_wmi *asus = bl_get_data(bd);
963 u32 ctrl_param;
964 int power, err;
965
966 ctrl_param = bd->props.brightness;
967
968 err = asus_wmi_set_devstate(ASUS_WMI_DEVID_BRIGHTNESS,
969 ctrl_param, NULL);
970
971 if (err < 0)
972 return err;
973
974 power = read_backlight_power(asus);
975 if (power != -ENODEV && bd->props.power != power) {
976 ctrl_param = !!(bd->props.power == FB_BLANK_UNBLANK);
977 err = asus_wmi_set_devstate(ASUS_WMI_DEVID_BACKLIGHT,
978 ctrl_param, NULL);
979 }
980 return err;
981}
982
983static const struct backlight_ops asus_wmi_bl_ops = {
984 .get_brightness = read_brightness,
985 .update_status = update_bl_status,
986};
987
988static int asus_wmi_backlight_notify(struct asus_wmi *asus, int code)
989{
990 struct backlight_device *bd = asus->backlight_device;
991 int old = bd->props.brightness;
992 int new = old;
993
994 if (code >= NOTIFY_BRNUP_MIN && code <= NOTIFY_BRNUP_MAX)
995 new = code - NOTIFY_BRNUP_MIN + 1;
996 else if (code >= NOTIFY_BRNDOWN_MIN && code <= NOTIFY_BRNDOWN_MAX)
997 new = code - NOTIFY_BRNDOWN_MIN;
998
999 bd->props.brightness = new;
1000 backlight_update_status(bd);
1001 backlight_force_update(bd, BACKLIGHT_UPDATE_HOTKEY);
1002
1003 return old;
1004}
1005
1006static int asus_wmi_backlight_init(struct asus_wmi *asus)
1007{
1008 struct backlight_device *bd;
1009 struct backlight_properties props;
1010 int max;
1011 int power;
1012
1013 max = read_brightness_max(asus);
1014
1015 if (max == -ENODEV)
1016 max = 0;
1017 else if (max < 0)
1018 return max;
1019
1020 power = read_backlight_power(asus);
1021
1022 if (power == -ENODEV)
1023 power = FB_BLANK_UNBLANK;
1024 else if (power < 0)
1025 return power;
1026
1027 memset(&props, 0, sizeof(struct backlight_properties));
1028 props.max_brightness = max;
1029 bd = backlight_device_register(asus->driver->name,
1030 &asus->platform_device->dev, asus,
1031 &asus_wmi_bl_ops, &props);
1032 if (IS_ERR(bd)) {
1033 pr_err("Could not register backlight device\n");
1034 return PTR_ERR(bd);
1035 }
1036
1037 asus->backlight_device = bd;
1038
1039 bd->props.brightness = read_brightness(bd);
1040 bd->props.power = power;
1041 backlight_update_status(bd);
1042
1043 return 0;
1044}
1045
1046static void asus_wmi_backlight_exit(struct asus_wmi *asus)
1047{
1048 if (asus->backlight_device)
1049 backlight_device_unregister(asus->backlight_device);
1050
1051 asus->backlight_device = NULL;
1052}
1053
1054static void asus_wmi_notify(u32 value, void *context)
1055{
1056 struct asus_wmi *asus = context;
1057 struct acpi_buffer response = { ACPI_ALLOCATE_BUFFER, NULL };
1058 union acpi_object *obj;
1059 acpi_status status;
1060 int code;
1061 int orig_code;
1062
1063 status = wmi_get_event_data(value, &response);
1064 if (status != AE_OK) {
1065 pr_err("bad event status 0x%x\n", status);
1066 return;
1067 }
1068
1069 obj = (union acpi_object *)response.pointer;
1070
1071 if (!obj || obj->type != ACPI_TYPE_INTEGER)
1072 goto exit;
1073
1074 code = obj->integer.value;
1075 orig_code = code;
1076
1077 if (code >= NOTIFY_BRNUP_MIN && code <= NOTIFY_BRNUP_MAX)
1078 code = NOTIFY_BRNUP_MIN;
1079 else if (code >= NOTIFY_BRNDOWN_MIN &&
1080 code <= NOTIFY_BRNDOWN_MAX)
1081 code = NOTIFY_BRNDOWN_MIN;
1082
1083 if (code == NOTIFY_BRNUP_MIN || code == NOTIFY_BRNDOWN_MIN) {
1084 if (!acpi_video_backlight_support())
1085 asus_wmi_backlight_notify(asus, orig_code);
1086 } else if (!sparse_keymap_report_event(asus->inputdev, code, 1, true))
1087 pr_info("Unknown key %x pressed\n", code);
1088
1089exit:
1090 kfree(obj);
1091}
1092
1093/*
1094 * Sys helpers
1095 */
1096static int parse_arg(const char *buf, unsigned long count, int *val)
1097{
1098 if (!count)
1099 return 0;
1100 if (sscanf(buf, "%i", val) != 1)
1101 return -EINVAL;
1102 return count;
1103}
1104
1105static ssize_t store_sys_wmi(struct asus_wmi *asus, int devid,
1106 const char *buf, size_t count)
1107{
1108 u32 retval;
1109 int rv, err, value;
1110
1111 value = asus_wmi_get_devstate_simple(asus, devid);
1112 if (value == -ENODEV) /* Check device presence */
1113 return value;
1114
1115 rv = parse_arg(buf, count, &value);
1116 err = asus_wmi_set_devstate(devid, value, &retval);
1117
1118 if (err < 0)
1119 return err;
1120
1121 return rv;
1122}
1123
1124static ssize_t show_sys_wmi(struct asus_wmi *asus, int devid, char *buf)
1125{
1126 int value = asus_wmi_get_devstate_simple(asus, devid);
1127
1128 if (value < 0)
1129 return value;
1130
1131 return sprintf(buf, "%d\n", value);
1132}
1133
1134#define ASUS_WMI_CREATE_DEVICE_ATTR(_name, _mode, _cm) \
1135 static ssize_t show_##_name(struct device *dev, \
1136 struct device_attribute *attr, \
1137 char *buf) \
1138 { \
1139 struct asus_wmi *asus = dev_get_drvdata(dev); \
1140 \
1141 return show_sys_wmi(asus, _cm, buf); \
1142 } \
1143 static ssize_t store_##_name(struct device *dev, \
1144 struct device_attribute *attr, \
1145 const char *buf, size_t count) \
1146 { \
1147 struct asus_wmi *asus = dev_get_drvdata(dev); \
1148 \
1149 return store_sys_wmi(asus, _cm, buf, count); \
1150 } \
1151 static struct device_attribute dev_attr_##_name = { \
1152 .attr = { \
1153 .name = __stringify(_name), \
1154 .mode = _mode }, \
1155 .show = show_##_name, \
1156 .store = store_##_name, \
1157 }
1158
1159ASUS_WMI_CREATE_DEVICE_ATTR(touchpad, 0644, ASUS_WMI_DEVID_TOUCHPAD);
1160ASUS_WMI_CREATE_DEVICE_ATTR(camera, 0644, ASUS_WMI_DEVID_CAMERA);
1161ASUS_WMI_CREATE_DEVICE_ATTR(cardr, 0644, ASUS_WMI_DEVID_CARDREADER);
1162
1163static ssize_t store_cpufv(struct device *dev, struct device_attribute *attr,
1164 const char *buf, size_t count)
1165{
1166 int value;
1167
1168 if (!count || sscanf(buf, "%i", &value) != 1)
1169 return -EINVAL;
1170 if (value < 0 || value > 2)
1171 return -EINVAL;
1172
1173 return asus_wmi_evaluate_method(ASUS_WMI_METHODID_CFVS, value, 0, NULL);
1174}
1175
1176static DEVICE_ATTR(cpufv, S_IRUGO | S_IWUSR, NULL, store_cpufv);
1177
1178static struct attribute *platform_attributes[] = {
1179 &dev_attr_cpufv.attr,
1180 &dev_attr_camera.attr,
1181 &dev_attr_cardr.attr,
1182 &dev_attr_touchpad.attr,
1183 NULL
1184};
1185
1186static mode_t asus_sysfs_is_visible(struct kobject *kobj,
1187 struct attribute *attr, int idx)
1188{
1189 struct device *dev = container_of(kobj, struct device, kobj);
1190 struct platform_device *pdev = to_platform_device(dev);
1191 struct asus_wmi *asus = platform_get_drvdata(pdev);
1192 bool ok = true;
1193 int devid = -1;
1194
1195 if (attr == &dev_attr_camera.attr)
1196 devid = ASUS_WMI_DEVID_CAMERA;
1197 else if (attr == &dev_attr_cardr.attr)
1198 devid = ASUS_WMI_DEVID_CARDREADER;
1199 else if (attr == &dev_attr_touchpad.attr)
1200 devid = ASUS_WMI_DEVID_TOUCHPAD;
1201
1202 if (devid != -1)
1203 ok = !(asus_wmi_get_devstate_simple(asus, devid) < 0);
1204
1205 return ok ? attr->mode : 0;
1206}
1207
1208static struct attribute_group platform_attribute_group = {
1209 .is_visible = asus_sysfs_is_visible,
1210 .attrs = platform_attributes
1211};
1212
1213static void asus_wmi_sysfs_exit(struct platform_device *device)
1214{
1215 sysfs_remove_group(&device->dev.kobj, &platform_attribute_group);
1216}
1217
1218static int asus_wmi_sysfs_init(struct platform_device *device)
1219{
1220 return sysfs_create_group(&device->dev.kobj, &platform_attribute_group);
1221}
1222
1223/*
1224 * Platform device
1225 */
1226static int __init asus_wmi_platform_init(struct asus_wmi *asus)
1227{
1228 int rv;
1229
1230 /* INIT enable hotkeys on some models */
1231 if (!asus_wmi_evaluate_method(ASUS_WMI_METHODID_INIT, 0, 0, &rv))
1232 pr_info("Initialization: %#x", rv);
1233
1234 /* We don't know yet what to do with this version... */
1235 if (!asus_wmi_evaluate_method(ASUS_WMI_METHODID_SPEC, 0, 0x9, &rv)) {
1236 pr_info("BIOS WMI version: %d.%d", rv >> 8, rv & 0xFF);
1237 asus->spec = rv;
1238 }
1239
1240 /*
1241 * The SFUN method probably allows the original driver to get the list
1242 * of features supported by a given model. For now, 0x0100 or 0x0800
1243 * bit signifies that the laptop is equipped with a Wi-Fi MiniPCI card.
1244 * The significance of others is yet to be found.
1245 */
1246 if (!asus_wmi_evaluate_method(ASUS_WMI_METHODID_SFUN, 0, 0, &rv)) {
1247 pr_info("SFUN value: %#x", rv);
1248 asus->sfun = rv;
1249 }
1250
1251 /*
1252 * Eee PC and Notebooks seems to have different method_id for DSTS,
1253 * but it may also be related to the BIOS's SPEC.
1254 * Note, on most Eeepc, there is no way to check if a method exist
1255 * or note, while on notebooks, they returns 0xFFFFFFFE on failure,
1256 * but once again, SPEC may probably be used for that kind of things.
1257 */
1258 if (!asus_wmi_evaluate_method(ASUS_WMI_METHODID_DSTS, 0, 0, NULL))
1259 asus->dsts_id = ASUS_WMI_METHODID_DSTS;
1260 else if (!asus_wmi_evaluate_method(ASUS_WMI_METHODID_DSTS2, 0, 0, NULL))
1261 asus->dsts_id = ASUS_WMI_METHODID_DSTS2;
1262
1263 if (!asus->dsts_id) {
1264 pr_err("Can't find DSTS");
1265 return -ENODEV;
1266 }
1267
1268 return asus_wmi_sysfs_init(asus->platform_device);
1269}
1270
1271static void asus_wmi_platform_exit(struct asus_wmi *asus)
1272{
1273 asus_wmi_sysfs_exit(asus->platform_device);
1274}
1275
1276/*
1277 * debugfs
1278 */
1279struct asus_wmi_debugfs_node {
1280 struct asus_wmi *asus;
1281 char *name;
1282 int (*show) (struct seq_file *m, void *data);
1283};
1284
1285static int show_dsts(struct seq_file *m, void *data)
1286{
1287 struct asus_wmi *asus = m->private;
1288 int err;
1289 u32 retval = -1;
1290
1291 err = asus_wmi_get_devstate(asus, asus->debug.dev_id, &retval);
1292
1293 if (err < 0)
1294 return err;
1295
1296 seq_printf(m, "DSTS(%#x) = %#x\n", asus->debug.dev_id, retval);
1297
1298 return 0;
1299}
1300
1301static int show_devs(struct seq_file *m, void *data)
1302{
1303 struct asus_wmi *asus = m->private;
1304 int err;
1305 u32 retval = -1;
1306
1307 err = asus_wmi_set_devstate(asus->debug.dev_id, asus->debug.ctrl_param,
1308 &retval);
1309
1310 if (err < 0)
1311 return err;
1312
1313 seq_printf(m, "DEVS(%#x, %#x) = %#x\n", asus->debug.dev_id,
1314 asus->debug.ctrl_param, retval);
1315
1316 return 0;
1317}
1318
1319static int show_call(struct seq_file *m, void *data)
1320{
1321 struct asus_wmi *asus = m->private;
1322 struct bios_args args = {
1323 .arg0 = asus->debug.dev_id,
1324 .arg1 = asus->debug.ctrl_param,
1325 };
1326 struct acpi_buffer input = { (acpi_size) sizeof(args), &args };
1327 struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL };
1328 union acpi_object *obj;
1329 acpi_status status;
1330
1331 status = wmi_evaluate_method(ASUS_WMI_MGMT_GUID,
1332 1, asus->debug.method_id,
1333 &input, &output);
1334
1335 if (ACPI_FAILURE(status))
1336 return -EIO;
1337
1338 obj = (union acpi_object *)output.pointer;
1339 if (obj && obj->type == ACPI_TYPE_INTEGER)
1340 seq_printf(m, "%#x(%#x, %#x) = %#x\n", asus->debug.method_id,
1341 asus->debug.dev_id, asus->debug.ctrl_param,
1342 (u32) obj->integer.value);
1343 else
1344 seq_printf(m, "%#x(%#x, %#x) = t:%d\n", asus->debug.method_id,
1345 asus->debug.dev_id, asus->debug.ctrl_param,
1346 obj ? obj->type : -1);
1347
1348 kfree(obj);
1349
1350 return 0;
1351}
1352
1353static struct asus_wmi_debugfs_node asus_wmi_debug_files[] = {
1354 {NULL, "devs", show_devs},
1355 {NULL, "dsts", show_dsts},
1356 {NULL, "call", show_call},
1357};
1358
1359static int asus_wmi_debugfs_open(struct inode *inode, struct file *file)
1360{
1361 struct asus_wmi_debugfs_node *node = inode->i_private;
1362
1363 return single_open(file, node->show, node->asus);
1364}
1365
1366static const struct file_operations asus_wmi_debugfs_io_ops = {
1367 .owner = THIS_MODULE,
1368 .open = asus_wmi_debugfs_open,
1369 .read = seq_read,
1370 .llseek = seq_lseek,
1371 .release = single_release,
1372};
1373
1374static void asus_wmi_debugfs_exit(struct asus_wmi *asus)
1375{
1376 debugfs_remove_recursive(asus->debug.root);
1377}
1378
1379static int asus_wmi_debugfs_init(struct asus_wmi *asus)
1380{
1381 struct dentry *dent;
1382 int i;
1383
1384 asus->debug.root = debugfs_create_dir(asus->driver->name, NULL);
1385 if (!asus->debug.root) {
1386 pr_err("failed to create debugfs directory");
1387 goto error_debugfs;
1388 }
1389
1390 dent = debugfs_create_x32("method_id", S_IRUGO | S_IWUSR,
1391 asus->debug.root, &asus->debug.method_id);
1392 if (!dent)
1393 goto error_debugfs;
1394
1395 dent = debugfs_create_x32("dev_id", S_IRUGO | S_IWUSR,
1396 asus->debug.root, &asus->debug.dev_id);
1397 if (!dent)
1398 goto error_debugfs;
1399
1400 dent = debugfs_create_x32("ctrl_param", S_IRUGO | S_IWUSR,
1401 asus->debug.root, &asus->debug.ctrl_param);
1402 if (!dent)
1403 goto error_debugfs;
1404
1405 for (i = 0; i < ARRAY_SIZE(asus_wmi_debug_files); i++) {
1406 struct asus_wmi_debugfs_node *node = &asus_wmi_debug_files[i];
1407
1408 node->asus = asus;
1409 dent = debugfs_create_file(node->name, S_IFREG | S_IRUGO,
1410 asus->debug.root, node,
1411 &asus_wmi_debugfs_io_ops);
1412 if (!dent) {
1413 pr_err("failed to create debug file: %s\n", node->name);
1414 goto error_debugfs;
1415 }
1416 }
1417
1418 return 0;
1419
1420error_debugfs:
1421 asus_wmi_debugfs_exit(asus);
1422 return -ENOMEM;
1423}
1424
1425/*
1426 * WMI Driver
1427 */
1428static int asus_wmi_add(struct platform_device *pdev)
1429{
1430 struct platform_driver *pdrv = to_platform_driver(pdev->dev.driver);
1431 struct asus_wmi_driver *wdrv = to_asus_wmi_driver(pdrv);
1432 struct asus_wmi *asus;
1433 acpi_status status;
1434 int err;
1435
1436 asus = kzalloc(sizeof(struct asus_wmi), GFP_KERNEL);
1437 if (!asus)
1438 return -ENOMEM;
1439
1440 asus->driver = wdrv;
1441 asus->platform_device = pdev;
1442 wdrv->platform_device = pdev;
1443 platform_set_drvdata(asus->platform_device, asus);
1444
1445 if (wdrv->quirks)
1446 wdrv->quirks(asus->driver);
1447
1448 err = asus_wmi_platform_init(asus);
1449 if (err)
1450 goto fail_platform;
1451
1452 err = asus_wmi_input_init(asus);
1453 if (err)
1454 goto fail_input;
1455
1456 err = asus_wmi_hwmon_init(asus);
1457 if (err)
1458 goto fail_hwmon;
1459
1460 err = asus_wmi_led_init(asus);
1461 if (err)
1462 goto fail_leds;
1463
1464 err = asus_wmi_rfkill_init(asus);
1465 if (err)
1466 goto fail_rfkill;
1467
1468 if (!acpi_video_backlight_support()) {
1469 err = asus_wmi_backlight_init(asus);
1470 if (err && err != -ENODEV)
1471 goto fail_backlight;
1472 } else
1473 pr_info("Backlight controlled by ACPI video driver\n");
1474
1475 status = wmi_install_notify_handler(asus->driver->event_guid,
1476 asus_wmi_notify, asus);
1477 if (ACPI_FAILURE(status)) {
1478 pr_err("Unable to register notify handler - %d\n", status);
1479 err = -ENODEV;
1480 goto fail_wmi_handler;
1481 }
1482
1483 err = asus_wmi_debugfs_init(asus);
1484 if (err)
1485 goto fail_debugfs;
1486
1487 return 0;
1488
1489fail_debugfs:
1490 wmi_remove_notify_handler(asus->driver->event_guid);
1491fail_wmi_handler:
1492 asus_wmi_backlight_exit(asus);
1493fail_backlight:
1494 asus_wmi_rfkill_exit(asus);
1495fail_rfkill:
1496 asus_wmi_led_exit(asus);
1497fail_leds:
1498 asus_wmi_hwmon_exit(asus);
1499fail_hwmon:
1500 asus_wmi_input_exit(asus);
1501fail_input:
1502 asus_wmi_platform_exit(asus);
1503fail_platform:
1504 kfree(asus);
1505 return err;
1506}
1507
1508static int asus_wmi_remove(struct platform_device *device)
1509{
1510 struct asus_wmi *asus;
1511
1512 asus = platform_get_drvdata(device);
1513 wmi_remove_notify_handler(asus->driver->event_guid);
1514 asus_wmi_backlight_exit(asus);
1515 asus_wmi_input_exit(asus);
1516 asus_wmi_hwmon_exit(asus);
1517 asus_wmi_led_exit(asus);
1518 asus_wmi_rfkill_exit(asus);
1519 asus_wmi_debugfs_exit(asus);
1520 asus_wmi_platform_exit(asus);
1521
1522 kfree(asus);
1523 return 0;
1524}
1525
1526/*
1527 * Platform driver - hibernate/resume callbacks
1528 */
1529static int asus_hotk_thaw(struct device *device)
1530{
1531 struct asus_wmi *asus = dev_get_drvdata(device);
1532
1533 if (asus->wlan.rfkill) {
1534 bool wlan;
1535
1536 /*
1537 * Work around bios bug - acpi _PTS turns off the wireless led
1538 * during suspend. Normally it restores it on resume, but
1539 * we should kick it ourselves in case hibernation is aborted.
1540 */
1541 wlan = asus_wmi_get_devstate_simple(asus, ASUS_WMI_DEVID_WLAN);
1542 asus_wmi_set_devstate(ASUS_WMI_DEVID_WLAN, wlan, NULL);
1543 }
1544
1545 return 0;
1546}
1547
1548static int asus_hotk_restore(struct device *device)
1549{
1550 struct asus_wmi *asus = dev_get_drvdata(device);
1551 int bl;
1552
1553 /* Refresh both wlan rfkill state and pci hotplug */
1554 if (asus->wlan.rfkill)
1555 asus_rfkill_hotplug(asus);
1556
1557 if (asus->bluetooth.rfkill) {
1558 bl = !asus_wmi_get_devstate_simple(asus,
1559 ASUS_WMI_DEVID_BLUETOOTH);
1560 rfkill_set_sw_state(asus->bluetooth.rfkill, bl);
1561 }
1562 if (asus->wimax.rfkill) {
1563 bl = !asus_wmi_get_devstate_simple(asus, ASUS_WMI_DEVID_WIMAX);
1564 rfkill_set_sw_state(asus->wimax.rfkill, bl);
1565 }
1566 if (asus->wwan3g.rfkill) {
1567 bl = !asus_wmi_get_devstate_simple(asus, ASUS_WMI_DEVID_WWAN3G);
1568 rfkill_set_sw_state(asus->wwan3g.rfkill, bl);
1569 }
1570
1571 return 0;
1572}
1573
1574static const struct dev_pm_ops asus_pm_ops = {
1575 .thaw = asus_hotk_thaw,
1576 .restore = asus_hotk_restore,
1577};
1578
1579static int asus_wmi_probe(struct platform_device *pdev)
1580{
1581 struct platform_driver *pdrv = to_platform_driver(pdev->dev.driver);
1582 struct asus_wmi_driver *wdrv = to_asus_wmi_driver(pdrv);
1583 int ret;
1584
1585 if (!wmi_has_guid(ASUS_WMI_MGMT_GUID)) {
1586 pr_warning("Management GUID not found\n");
1587 return -ENODEV;
1588 }
1589
1590 if (wdrv->event_guid && !wmi_has_guid(wdrv->event_guid)) {
1591 pr_warning("Event GUID not found\n");
1592 return -ENODEV;
1593 }
1594
1595 if (wdrv->probe) {
1596 ret = wdrv->probe(pdev);
1597 if (ret)
1598 return ret;
1599 }
1600
1601 return asus_wmi_add(pdev);
1602}
1603
1604static bool used;
1605
1606int asus_wmi_register_driver(struct asus_wmi_driver *driver)
1607{
1608 struct platform_driver *platform_driver;
1609 struct platform_device *platform_device;
1610
1611 if (used)
1612 return -EBUSY;
1613
1614 platform_driver = &driver->platform_driver;
1615 platform_driver->remove = asus_wmi_remove;
1616 platform_driver->driver.owner = driver->owner;
1617 platform_driver->driver.name = driver->name;
1618 platform_driver->driver.pm = &asus_pm_ops;
1619
1620 platform_device = platform_create_bundle(platform_driver,
1621 asus_wmi_probe,
1622 NULL, 0, NULL, 0);
1623 if (IS_ERR(platform_device))
1624 return PTR_ERR(platform_device);
1625
1626 used = true;
1627 return 0;
1628}
1629EXPORT_SYMBOL_GPL(asus_wmi_register_driver);
1630
1631void asus_wmi_unregister_driver(struct asus_wmi_driver *driver)
1632{
1633 platform_device_unregister(driver->platform_device);
1634 platform_driver_unregister(&driver->platform_driver);
1635 used = false;
1636}
1637EXPORT_SYMBOL_GPL(asus_wmi_unregister_driver);
1638
1639static int __init asus_wmi_init(void)
1640{
1641 if (!wmi_has_guid(ASUS_WMI_MGMT_GUID)) {
1642 pr_info("Asus Management GUID not found");
1643 return -ENODEV;
1644 }
1645
1646 pr_info("ASUS WMI generic driver loaded");
1647 return 0;
1648}
1649
1650static void __exit asus_wmi_exit(void)
1651{
1652 pr_info("ASUS WMI generic driver unloaded");
1653}
1654
1655module_init(asus_wmi_init);
1656module_exit(asus_wmi_exit);
diff --git a/drivers/platform/x86/asus-wmi.h b/drivers/platform/x86/asus-wmi.h
new file mode 100644
index 000000000000..c044522c8766
--- /dev/null
+++ b/drivers/platform/x86/asus-wmi.h
@@ -0,0 +1,58 @@
1/*
2 * Asus PC WMI hotkey driver
3 *
4 * Copyright(C) 2010 Intel Corporation.
5 * Copyright(C) 2010-2011 Corentin Chary <corentin.chary@gmail.com>
6 *
7 * Portions based on wistron_btns.c:
8 * Copyright (C) 2005 Miloslav Trmac <mitr@volny.cz>
9 * Copyright (C) 2005 Bernhard Rosenkraenzer <bero@arklinux.org>
10 * Copyright (C) 2005 Dmitry Torokhov <dtor@mail.ru>
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2 of the License, or
15 * (at your option) any later version.
16 *
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, write to the Free Software
24 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
25 */
26
27#ifndef _ASUS_WMI_H_
28#define _ASUS_WMI_H_
29
30#include <linux/platform_device.h>
31
32struct module;
33struct key_entry;
34struct asus_wmi;
35
36struct asus_wmi_driver {
37 bool hotplug_wireless;
38
39 const char *name;
40 struct module *owner;
41
42 const char *event_guid;
43
44 const struct key_entry *keymap;
45 const char *input_name;
46 const char *input_phys;
47
48 int (*probe) (struct platform_device *device);
49 void (*quirks) (struct asus_wmi_driver *driver);
50
51 struct platform_driver platform_driver;
52 struct platform_device *platform_device;
53};
54
55int asus_wmi_register_driver(struct asus_wmi_driver *driver);
56void asus_wmi_unregister_driver(struct asus_wmi_driver *driver);
57
58#endif /* !_ASUS_WMI_H_ */
diff --git a/drivers/platform/x86/compal-laptop.c b/drivers/platform/x86/compal-laptop.c
index eb95878fa583..c16a27641ced 100644
--- a/drivers/platform/x86/compal-laptop.c
+++ b/drivers/platform/x86/compal-laptop.c
@@ -201,7 +201,7 @@ static bool extra_features;
201 * into 0x4F and read a few bytes from the output, like so: 201 * into 0x4F and read a few bytes from the output, like so:
202 * u8 writeData = 0x33; 202 * u8 writeData = 0x33;
203 * ec_transaction(0x4F, &writeData, 1, buffer, 32, 0); 203 * ec_transaction(0x4F, &writeData, 1, buffer, 32, 0);
204 * That address is labled "fan1 table information" in the service manual. 204 * That address is labelled "fan1 table information" in the service manual.
205 * It should be clear which value in 'buffer' changes). This seems to be 205 * It should be clear which value in 'buffer' changes). This seems to be
206 * related to fan speed. It isn't a proper 'realtime' fan speed value 206 * related to fan speed. It isn't a proper 'realtime' fan speed value
207 * though, because physically stopping or speeding up the fan doesn't 207 * though, because physically stopping or speeding up the fan doesn't
@@ -275,7 +275,7 @@ static int set_backlight_level(int level)
275 275
276 ec_write(BACKLIGHT_LEVEL_ADDR, level); 276 ec_write(BACKLIGHT_LEVEL_ADDR, level);
277 277
278 return 1; 278 return 0;
279} 279}
280 280
281static int get_backlight_level(void) 281static int get_backlight_level(void)
@@ -763,7 +763,7 @@ static int dmi_check_cb(const struct dmi_system_id *id)
763 printk(KERN_INFO DRIVER_NAME": Identified laptop model '%s'\n", 763 printk(KERN_INFO DRIVER_NAME": Identified laptop model '%s'\n",
764 id->ident); 764 id->ident);
765 extra_features = false; 765 extra_features = false;
766 return 0; 766 return 1;
767} 767}
768 768
769static int dmi_check_cb_extra(const struct dmi_system_id *id) 769static int dmi_check_cb_extra(const struct dmi_system_id *id)
@@ -772,7 +772,7 @@ static int dmi_check_cb_extra(const struct dmi_system_id *id)
772 "enabling extra features\n", 772 "enabling extra features\n",
773 id->ident); 773 id->ident);
774 extra_features = true; 774 extra_features = true;
775 return 0; 775 return 1;
776} 776}
777 777
778static struct dmi_system_id __initdata compal_dmi_table[] = { 778static struct dmi_system_id __initdata compal_dmi_table[] = {
diff --git a/drivers/platform/x86/dell-wmi-aio.c b/drivers/platform/x86/dell-wmi-aio.c
new file mode 100644
index 000000000000..0ed84573ae1f
--- /dev/null
+++ b/drivers/platform/x86/dell-wmi-aio.c
@@ -0,0 +1,171 @@
1/*
2 * WMI hotkeys support for Dell All-In-One series
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
17 */
18#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
19
20#include <linux/kernel.h>
21#include <linux/module.h>
22#include <linux/init.h>
23#include <linux/types.h>
24#include <linux/input.h>
25#include <linux/input/sparse-keymap.h>
26#include <acpi/acpi_drivers.h>
27#include <linux/acpi.h>
28#include <linux/string.h>
29
30MODULE_DESCRIPTION("WMI hotkeys driver for Dell All-In-One series");
31MODULE_LICENSE("GPL");
32
33#define EVENT_GUID1 "284A0E6B-380E-472A-921F-E52786257FB4"
34#define EVENT_GUID2 "02314822-307C-4F66-BF0E-48AEAEB26CC8"
35
36static const char *dell_wmi_aio_guids[] = {
37 EVENT_GUID1,
38 EVENT_GUID2,
39 NULL
40};
41
42MODULE_ALIAS("wmi:"EVENT_GUID1);
43MODULE_ALIAS("wmi:"EVENT_GUID2);
44
45static const struct key_entry dell_wmi_aio_keymap[] = {
46 { KE_KEY, 0xc0, { KEY_VOLUMEUP } },
47 { KE_KEY, 0xc1, { KEY_VOLUMEDOWN } },
48 { KE_END, 0 }
49};
50
51static struct input_dev *dell_wmi_aio_input_dev;
52
53static void dell_wmi_aio_notify(u32 value, void *context)
54{
55 struct acpi_buffer response = { ACPI_ALLOCATE_BUFFER, NULL };
56 union acpi_object *obj;
57 acpi_status status;
58
59 status = wmi_get_event_data(value, &response);
60 if (status != AE_OK) {
61 pr_info("bad event status 0x%x\n", status);
62 return;
63 }
64
65 obj = (union acpi_object *)response.pointer;
66 if (obj) {
67 unsigned int scancode;
68
69 switch (obj->type) {
70 case ACPI_TYPE_INTEGER:
71 /* Most All-In-One correctly return integer scancode */
72 scancode = obj->integer.value;
73 sparse_keymap_report_event(dell_wmi_aio_input_dev,
74 scancode, 1, true);
75 break;
76 case ACPI_TYPE_BUFFER:
77 /* Broken machines return the scancode in a buffer */
78 if (obj->buffer.pointer && obj->buffer.length > 0) {
79 scancode = obj->buffer.pointer[0];
80 sparse_keymap_report_event(
81 dell_wmi_aio_input_dev,
82 scancode, 1, true);
83 }
84 break;
85 }
86 }
87 kfree(obj);
88}
89
90static int __init dell_wmi_aio_input_setup(void)
91{
92 int err;
93
94 dell_wmi_aio_input_dev = input_allocate_device();
95
96 if (!dell_wmi_aio_input_dev)
97 return -ENOMEM;
98
99 dell_wmi_aio_input_dev->name = "Dell AIO WMI hotkeys";
100 dell_wmi_aio_input_dev->phys = "wmi/input0";
101 dell_wmi_aio_input_dev->id.bustype = BUS_HOST;
102
103 err = sparse_keymap_setup(dell_wmi_aio_input_dev,
104 dell_wmi_aio_keymap, NULL);
105 if (err) {
106 pr_err("Unable to setup input device keymap\n");
107 goto err_free_dev;
108 }
109 err = input_register_device(dell_wmi_aio_input_dev);
110 if (err) {
111 pr_info("Unable to register input device\n");
112 goto err_free_keymap;
113 }
114 return 0;
115
116err_free_keymap:
117 sparse_keymap_free(dell_wmi_aio_input_dev);
118err_free_dev:
119 input_free_device(dell_wmi_aio_input_dev);
120 return err;
121}
122
123static const char *dell_wmi_aio_find(void)
124{
125 int i;
126
127 for (i = 0; dell_wmi_aio_guids[i] != NULL; i++)
128 if (wmi_has_guid(dell_wmi_aio_guids[i]))
129 return dell_wmi_aio_guids[i];
130
131 return NULL;
132}
133
134static int __init dell_wmi_aio_init(void)
135{
136 int err;
137 const char *guid;
138
139 guid = dell_wmi_aio_find();
140 if (!guid) {
141 pr_warning("No known WMI GUID found\n");
142 return -ENXIO;
143 }
144
145 err = dell_wmi_aio_input_setup();
146 if (err)
147 return err;
148
149 err = wmi_install_notify_handler(guid, dell_wmi_aio_notify, NULL);
150 if (err) {
151 pr_err("Unable to register notify handler - %d\n", err);
152 sparse_keymap_free(dell_wmi_aio_input_dev);
153 input_unregister_device(dell_wmi_aio_input_dev);
154 return err;
155 }
156
157 return 0;
158}
159
160static void __exit dell_wmi_aio_exit(void)
161{
162 const char *guid;
163
164 guid = dell_wmi_aio_find();
165 wmi_remove_notify_handler(guid);
166 sparse_keymap_free(dell_wmi_aio_input_dev);
167 input_unregister_device(dell_wmi_aio_input_dev);
168}
169
170module_init(dell_wmi_aio_init);
171module_exit(dell_wmi_aio_exit);
diff --git a/drivers/platform/x86/eeepc-laptop.c b/drivers/platform/x86/eeepc-laptop.c
index 6605beac0d0e..5f2dd386152b 100644
--- a/drivers/platform/x86/eeepc-laptop.c
+++ b/drivers/platform/x86/eeepc-laptop.c
@@ -1322,7 +1322,7 @@ static void cmsg_quirk(struct eeepc_laptop *eeepc, int cm, const char *name)
1322{ 1322{
1323 int dummy; 1323 int dummy;
1324 1324
1325 /* Some BIOSes do not report cm although it is avaliable. 1325 /* Some BIOSes do not report cm although it is available.
1326 Check if cm_getv[cm] works and, if yes, assume cm should be set. */ 1326 Check if cm_getv[cm] works and, if yes, assume cm should be set. */
1327 if (!(eeepc->cm_supported & (1 << cm)) 1327 if (!(eeepc->cm_supported & (1 << cm))
1328 && !read_acpi_int(eeepc->handle, cm_getv[cm], &dummy)) { 1328 && !read_acpi_int(eeepc->handle, cm_getv[cm], &dummy)) {
diff --git a/drivers/platform/x86/eeepc-wmi.c b/drivers/platform/x86/eeepc-wmi.c
index 4d38f98aa976..0ddc434fb93b 100644
--- a/drivers/platform/x86/eeepc-wmi.c
+++ b/drivers/platform/x86/eeepc-wmi.c
@@ -2,7 +2,7 @@
2 * Eee PC WMI hotkey driver 2 * Eee PC WMI hotkey driver
3 * 3 *
4 * Copyright(C) 2010 Intel Corporation. 4 * Copyright(C) 2010 Intel Corporation.
5 * Copyright(C) 2010 Corentin Chary <corentin.chary@gmail.com> 5 * Copyright(C) 2010-2011 Corentin Chary <corentin.chary@gmail.com>
6 * 6 *
7 * Portions based on wistron_btns.c: 7 * Portions based on wistron_btns.c:
8 * Copyright (C) 2005 Miloslav Trmac <mitr@volny.cz> 8 * Copyright (C) 2005 Miloslav Trmac <mitr@volny.cz>
@@ -29,841 +29,57 @@
29#include <linux/kernel.h> 29#include <linux/kernel.h>
30#include <linux/module.h> 30#include <linux/module.h>
31#include <linux/init.h> 31#include <linux/init.h>
32#include <linux/types.h>
33#include <linux/slab.h>
34#include <linux/input.h> 32#include <linux/input.h>
35#include <linux/input/sparse-keymap.h> 33#include <linux/input/sparse-keymap.h>
36#include <linux/fb.h> 34#include <linux/dmi.h>
37#include <linux/backlight.h>
38#include <linux/leds.h>
39#include <linux/rfkill.h>
40#include <linux/debugfs.h>
41#include <linux/seq_file.h>
42#include <linux/platform_device.h>
43#include <acpi/acpi_bus.h> 35#include <acpi/acpi_bus.h>
44#include <acpi/acpi_drivers.h> 36
37#include "asus-wmi.h"
45 38
46#define EEEPC_WMI_FILE "eeepc-wmi" 39#define EEEPC_WMI_FILE "eeepc-wmi"
47 40
48MODULE_AUTHOR("Yong Wang <yong.y.wang@intel.com>"); 41MODULE_AUTHOR("Corentin Chary <corentincj@iksaif.net>");
49MODULE_DESCRIPTION("Eee PC WMI Hotkey Driver"); 42MODULE_DESCRIPTION("Eee PC WMI Hotkey Driver");
50MODULE_LICENSE("GPL"); 43MODULE_LICENSE("GPL");
51 44
52#define EEEPC_ACPI_HID "ASUS010" /* old _HID used in eeepc-laptop */ 45#define EEEPC_ACPI_HID "ASUS010" /* old _HID used in eeepc-laptop */
53 46
54#define EEEPC_WMI_EVENT_GUID "ABBC0F72-8EA1-11D1-00A0-C90629100000" 47#define EEEPC_WMI_EVENT_GUID "ABBC0F72-8EA1-11D1-00A0-C90629100000"
55#define EEEPC_WMI_MGMT_GUID "97845ED0-4E6D-11DE-8A39-0800200C9A66"
56 48
57MODULE_ALIAS("wmi:"EEEPC_WMI_EVENT_GUID); 49MODULE_ALIAS("wmi:"EEEPC_WMI_EVENT_GUID);
58MODULE_ALIAS("wmi:"EEEPC_WMI_MGMT_GUID);
59
60#define NOTIFY_BRNUP_MIN 0x11
61#define NOTIFY_BRNUP_MAX 0x1f
62#define NOTIFY_BRNDOWN_MIN 0x20
63#define NOTIFY_BRNDOWN_MAX 0x2e
64 50
65#define EEEPC_WMI_METHODID_DEVS 0x53564544 51static bool hotplug_wireless;
66#define EEEPC_WMI_METHODID_DSTS 0x53544344
67#define EEEPC_WMI_METHODID_CFVS 0x53564643
68 52
69#define EEEPC_WMI_DEVID_BACKLIGHT 0x00050012 53module_param(hotplug_wireless, bool, 0444);
70#define EEEPC_WMI_DEVID_TPDLED 0x00100011 54MODULE_PARM_DESC(hotplug_wireless,
71#define EEEPC_WMI_DEVID_WLAN 0x00010011 55 "Enable hotplug for wireless device. "
72#define EEEPC_WMI_DEVID_BLUETOOTH 0x00010013 56 "If your laptop needs that, please report to "
73#define EEEPC_WMI_DEVID_WWAN3G 0x00010019 57 "acpi4asus-user@lists.sourceforge.net.");
74 58
75static const struct key_entry eeepc_wmi_keymap[] = { 59static const struct key_entry eeepc_wmi_keymap[] = {
76 /* Sleep already handled via generic ACPI code */ 60 /* Sleep already handled via generic ACPI code */
77 { KE_KEY, 0x5d, { KEY_WLAN } },
78 { KE_KEY, 0x32, { KEY_MUTE } },
79 { KE_KEY, 0x31, { KEY_VOLUMEDOWN } },
80 { KE_KEY, 0x30, { KEY_VOLUMEUP } }, 61 { KE_KEY, 0x30, { KEY_VOLUMEUP } },
81 { KE_IGNORE, NOTIFY_BRNDOWN_MIN, { KEY_BRIGHTNESSDOWN } }, 62 { KE_KEY, 0x31, { KEY_VOLUMEDOWN } },
82 { KE_IGNORE, NOTIFY_BRNUP_MIN, { KEY_BRIGHTNESSUP } }, 63 { KE_KEY, 0x32, { KEY_MUTE } },
64 { KE_KEY, 0x5c, { KEY_F15 } }, /* Power Gear key */
65 { KE_KEY, 0x5d, { KEY_WLAN } },
66 { KE_KEY, 0x6b, { KEY_TOUCHPAD_TOGGLE } }, /* Toggle Touchpad */
67 { KE_KEY, 0x82, { KEY_CAMERA } },
68 { KE_KEY, 0x83, { KEY_CAMERA_ZOOMIN } },
69 { KE_KEY, 0x88, { KEY_WLAN } },
83 { KE_KEY, 0xcc, { KEY_SWITCHVIDEOMODE } }, 70 { KE_KEY, 0xcc, { KEY_SWITCHVIDEOMODE } },
84 { KE_KEY, 0x6b, { KEY_F13 } }, /* Disable Touchpad */ 71 { KE_KEY, 0xe0, { KEY_PROG1 } }, /* Task Manager */
85 { KE_KEY, 0xe1, { KEY_F14 } }, 72 { KE_KEY, 0xe1, { KEY_F14 } }, /* Change Resolution */
86 { KE_KEY, 0xe9, { KEY_DISPLAY_OFF } }, 73 { KE_KEY, 0xe9, { KEY_BRIGHTNESS_ZERO } },
87 { KE_KEY, 0xe0, { KEY_PROG1 } }, 74 { KE_KEY, 0xeb, { KEY_CAMERA_ZOOMOUT } },
88 { KE_KEY, 0x5c, { KEY_F15 } }, 75 { KE_KEY, 0xec, { KEY_CAMERA_UP } },
76 { KE_KEY, 0xed, { KEY_CAMERA_DOWN } },
77 { KE_KEY, 0xee, { KEY_CAMERA_LEFT } },
78 { KE_KEY, 0xef, { KEY_CAMERA_RIGHT } },
89 { KE_END, 0}, 79 { KE_END, 0},
90}; 80};
91 81
92struct bios_args { 82static acpi_status eeepc_wmi_parse_device(acpi_handle handle, u32 level,
93 u32 dev_id;
94 u32 ctrl_param;
95};
96
97/*
98 * eeepc-wmi/ - debugfs root directory
99 * dev_id - current dev_id
100 * ctrl_param - current ctrl_param
101 * devs - call DEVS(dev_id, ctrl_param) and print result
102 * dsts - call DSTS(dev_id) and print result
103 */
104struct eeepc_wmi_debug {
105 struct dentry *root;
106 u32 dev_id;
107 u32 ctrl_param;
108};
109
110struct eeepc_wmi {
111 struct input_dev *inputdev;
112 struct backlight_device *backlight_device;
113 struct platform_device *platform_device;
114
115 struct led_classdev tpd_led;
116 int tpd_led_wk;
117 struct workqueue_struct *led_workqueue;
118 struct work_struct tpd_led_work;
119
120 struct rfkill *wlan_rfkill;
121 struct rfkill *bluetooth_rfkill;
122 struct rfkill *wwan3g_rfkill;
123
124 struct eeepc_wmi_debug debug;
125};
126
127/* Only used in eeepc_wmi_init() and eeepc_wmi_exit() */
128static struct platform_device *platform_device;
129
130static int eeepc_wmi_input_init(struct eeepc_wmi *eeepc)
131{
132 int err;
133
134 eeepc->inputdev = input_allocate_device();
135 if (!eeepc->inputdev)
136 return -ENOMEM;
137
138 eeepc->inputdev->name = "Eee PC WMI hotkeys";
139 eeepc->inputdev->phys = EEEPC_WMI_FILE "/input0";
140 eeepc->inputdev->id.bustype = BUS_HOST;
141 eeepc->inputdev->dev.parent = &eeepc->platform_device->dev;
142
143 err = sparse_keymap_setup(eeepc->inputdev, eeepc_wmi_keymap, NULL);
144 if (err)
145 goto err_free_dev;
146
147 err = input_register_device(eeepc->inputdev);
148 if (err)
149 goto err_free_keymap;
150
151 return 0;
152
153err_free_keymap:
154 sparse_keymap_free(eeepc->inputdev);
155err_free_dev:
156 input_free_device(eeepc->inputdev);
157 return err;
158}
159
160static void eeepc_wmi_input_exit(struct eeepc_wmi *eeepc)
161{
162 if (eeepc->inputdev) {
163 sparse_keymap_free(eeepc->inputdev);
164 input_unregister_device(eeepc->inputdev);
165 }
166
167 eeepc->inputdev = NULL;
168}
169
170static acpi_status eeepc_wmi_get_devstate(u32 dev_id, u32 *retval)
171{
172 struct acpi_buffer input = { (acpi_size)sizeof(u32), &dev_id };
173 struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL };
174 union acpi_object *obj;
175 acpi_status status;
176 u32 tmp;
177
178 status = wmi_evaluate_method(EEEPC_WMI_MGMT_GUID,
179 1, EEEPC_WMI_METHODID_DSTS, &input, &output);
180
181 if (ACPI_FAILURE(status))
182 return status;
183
184 obj = (union acpi_object *)output.pointer;
185 if (obj && obj->type == ACPI_TYPE_INTEGER)
186 tmp = (u32)obj->integer.value;
187 else
188 tmp = 0;
189
190 if (retval)
191 *retval = tmp;
192
193 kfree(obj);
194
195 return status;
196
197}
198
199static acpi_status eeepc_wmi_set_devstate(u32 dev_id, u32 ctrl_param,
200 u32 *retval)
201{
202 struct bios_args args = {
203 .dev_id = dev_id,
204 .ctrl_param = ctrl_param,
205 };
206 struct acpi_buffer input = { (acpi_size)sizeof(args), &args };
207 acpi_status status;
208
209 if (!retval) {
210 status = wmi_evaluate_method(EEEPC_WMI_MGMT_GUID, 1,
211 EEEPC_WMI_METHODID_DEVS,
212 &input, NULL);
213 } else {
214 struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL };
215 union acpi_object *obj;
216 u32 tmp;
217
218 status = wmi_evaluate_method(EEEPC_WMI_MGMT_GUID, 1,
219 EEEPC_WMI_METHODID_DEVS,
220 &input, &output);
221
222 if (ACPI_FAILURE(status))
223 return status;
224
225 obj = (union acpi_object *)output.pointer;
226 if (obj && obj->type == ACPI_TYPE_INTEGER)
227 tmp = (u32)obj->integer.value;
228 else
229 tmp = 0;
230
231 *retval = tmp;
232
233 kfree(obj);
234 }
235
236 return status;
237}
238
239/*
240 * LEDs
241 */
242/*
243 * These functions actually update the LED's, and are called from a
244 * workqueue. By doing this as separate work rather than when the LED
245 * subsystem asks, we avoid messing with the Eeepc ACPI stuff during a
246 * potentially bad time, such as a timer interrupt.
247 */
248static void tpd_led_update(struct work_struct *work)
249{
250 int ctrl_param;
251 struct eeepc_wmi *eeepc;
252
253 eeepc = container_of(work, struct eeepc_wmi, tpd_led_work);
254
255 ctrl_param = eeepc->tpd_led_wk;
256 eeepc_wmi_set_devstate(EEEPC_WMI_DEVID_TPDLED, ctrl_param, NULL);
257}
258
259static void tpd_led_set(struct led_classdev *led_cdev,
260 enum led_brightness value)
261{
262 struct eeepc_wmi *eeepc;
263
264 eeepc = container_of(led_cdev, struct eeepc_wmi, tpd_led);
265
266 eeepc->tpd_led_wk = !!value;
267 queue_work(eeepc->led_workqueue, &eeepc->tpd_led_work);
268}
269
270static int read_tpd_state(struct eeepc_wmi *eeepc)
271{
272 u32 retval;
273 acpi_status status;
274
275 status = eeepc_wmi_get_devstate(EEEPC_WMI_DEVID_TPDLED, &retval);
276
277 if (ACPI_FAILURE(status))
278 return -1;
279 else if (!retval || retval == 0x00060000)
280 /*
281 * if touchpad led is present, DSTS will set some bits,
282 * usually 0x00020000.
283 * 0x00060000 means that the device is not supported
284 */
285 return -ENODEV;
286 else
287 /* Status is stored in the first bit */
288 return retval & 0x1;
289}
290
291static enum led_brightness tpd_led_get(struct led_classdev *led_cdev)
292{
293 struct eeepc_wmi *eeepc;
294
295 eeepc = container_of(led_cdev, struct eeepc_wmi, tpd_led);
296
297 return read_tpd_state(eeepc);
298}
299
300static int eeepc_wmi_led_init(struct eeepc_wmi *eeepc)
301{
302 int rv;
303
304 if (read_tpd_state(eeepc) < 0)
305 return 0;
306
307 eeepc->led_workqueue = create_singlethread_workqueue("led_workqueue");
308 if (!eeepc->led_workqueue)
309 return -ENOMEM;
310 INIT_WORK(&eeepc->tpd_led_work, tpd_led_update);
311
312 eeepc->tpd_led.name = "eeepc::touchpad";
313 eeepc->tpd_led.brightness_set = tpd_led_set;
314 eeepc->tpd_led.brightness_get = tpd_led_get;
315 eeepc->tpd_led.max_brightness = 1;
316
317 rv = led_classdev_register(&eeepc->platform_device->dev,
318 &eeepc->tpd_led);
319 if (rv) {
320 destroy_workqueue(eeepc->led_workqueue);
321 return rv;
322 }
323
324 return 0;
325}
326
327static void eeepc_wmi_led_exit(struct eeepc_wmi *eeepc)
328{
329 if (eeepc->tpd_led.dev)
330 led_classdev_unregister(&eeepc->tpd_led);
331 if (eeepc->led_workqueue)
332 destroy_workqueue(eeepc->led_workqueue);
333}
334
335/*
336 * Rfkill devices
337 */
338static int eeepc_rfkill_set(void *data, bool blocked)
339{
340 int dev_id = (unsigned long)data;
341 u32 ctrl_param = !blocked;
342
343 return eeepc_wmi_set_devstate(dev_id, ctrl_param, NULL);
344}
345
346static void eeepc_rfkill_query(struct rfkill *rfkill, void *data)
347{
348 int dev_id = (unsigned long)data;
349 u32 retval;
350 acpi_status status;
351
352 status = eeepc_wmi_get_devstate(dev_id, &retval);
353
354 if (ACPI_FAILURE(status))
355 return ;
356
357 rfkill_set_sw_state(rfkill, !(retval & 0x1));
358}
359
360static const struct rfkill_ops eeepc_rfkill_ops = {
361 .set_block = eeepc_rfkill_set,
362 .query = eeepc_rfkill_query,
363};
364
365static int eeepc_new_rfkill(struct eeepc_wmi *eeepc,
366 struct rfkill **rfkill,
367 const char *name,
368 enum rfkill_type type, int dev_id)
369{
370 int result;
371 u32 retval;
372 acpi_status status;
373
374 status = eeepc_wmi_get_devstate(dev_id, &retval);
375
376 if (ACPI_FAILURE(status))
377 return -1;
378
379 /* If the device is present, DSTS will always set some bits
380 * 0x00070000 - 1110000000000000000 - device supported
381 * 0x00060000 - 1100000000000000000 - not supported
382 * 0x00020000 - 0100000000000000000 - device supported
383 * 0x00010000 - 0010000000000000000 - not supported / special mode ?
384 */
385 if (!retval || retval == 0x00060000)
386 return -ENODEV;
387
388 *rfkill = rfkill_alloc(name, &eeepc->platform_device->dev, type,
389 &eeepc_rfkill_ops, (void *)(long)dev_id);
390
391 if (!*rfkill)
392 return -EINVAL;
393
394 rfkill_init_sw_state(*rfkill, !(retval & 0x1));
395 result = rfkill_register(*rfkill);
396 if (result) {
397 rfkill_destroy(*rfkill);
398 *rfkill = NULL;
399 return result;
400 }
401 return 0;
402}
403
404static void eeepc_wmi_rfkill_exit(struct eeepc_wmi *eeepc)
405{
406 if (eeepc->wlan_rfkill) {
407 rfkill_unregister(eeepc->wlan_rfkill);
408 rfkill_destroy(eeepc->wlan_rfkill);
409 eeepc->wlan_rfkill = NULL;
410 }
411 if (eeepc->bluetooth_rfkill) {
412 rfkill_unregister(eeepc->bluetooth_rfkill);
413 rfkill_destroy(eeepc->bluetooth_rfkill);
414 eeepc->bluetooth_rfkill = NULL;
415 }
416 if (eeepc->wwan3g_rfkill) {
417 rfkill_unregister(eeepc->wwan3g_rfkill);
418 rfkill_destroy(eeepc->wwan3g_rfkill);
419 eeepc->wwan3g_rfkill = NULL;
420 }
421}
422
423static int eeepc_wmi_rfkill_init(struct eeepc_wmi *eeepc)
424{
425 int result = 0;
426
427 result = eeepc_new_rfkill(eeepc, &eeepc->wlan_rfkill,
428 "eeepc-wlan", RFKILL_TYPE_WLAN,
429 EEEPC_WMI_DEVID_WLAN);
430
431 if (result && result != -ENODEV)
432 goto exit;
433
434 result = eeepc_new_rfkill(eeepc, &eeepc->bluetooth_rfkill,
435 "eeepc-bluetooth", RFKILL_TYPE_BLUETOOTH,
436 EEEPC_WMI_DEVID_BLUETOOTH);
437
438 if (result && result != -ENODEV)
439 goto exit;
440
441 result = eeepc_new_rfkill(eeepc, &eeepc->wwan3g_rfkill,
442 "eeepc-wwan3g", RFKILL_TYPE_WWAN,
443 EEEPC_WMI_DEVID_WWAN3G);
444
445 if (result && result != -ENODEV)
446 goto exit;
447
448exit:
449 if (result && result != -ENODEV)
450 eeepc_wmi_rfkill_exit(eeepc);
451
452 if (result == -ENODEV)
453 result = 0;
454
455 return result;
456}
457
458/*
459 * Backlight
460 */
461static int read_brightness(struct backlight_device *bd)
462{
463 u32 retval;
464 acpi_status status;
465
466 status = eeepc_wmi_get_devstate(EEEPC_WMI_DEVID_BACKLIGHT, &retval);
467
468 if (ACPI_FAILURE(status))
469 return -1;
470 else
471 return retval & 0xFF;
472}
473
474static int update_bl_status(struct backlight_device *bd)
475{
476
477 u32 ctrl_param;
478 acpi_status status;
479
480 ctrl_param = bd->props.brightness;
481
482 status = eeepc_wmi_set_devstate(EEEPC_WMI_DEVID_BACKLIGHT,
483 ctrl_param, NULL);
484
485 if (ACPI_FAILURE(status))
486 return -1;
487 else
488 return 0;
489}
490
491static const struct backlight_ops eeepc_wmi_bl_ops = {
492 .get_brightness = read_brightness,
493 .update_status = update_bl_status,
494};
495
496static int eeepc_wmi_backlight_notify(struct eeepc_wmi *eeepc, int code)
497{
498 struct backlight_device *bd = eeepc->backlight_device;
499 int old = bd->props.brightness;
500 int new = old;
501
502 if (code >= NOTIFY_BRNUP_MIN && code <= NOTIFY_BRNUP_MAX)
503 new = code - NOTIFY_BRNUP_MIN + 1;
504 else if (code >= NOTIFY_BRNDOWN_MIN && code <= NOTIFY_BRNDOWN_MAX)
505 new = code - NOTIFY_BRNDOWN_MIN;
506
507 bd->props.brightness = new;
508 backlight_update_status(bd);
509 backlight_force_update(bd, BACKLIGHT_UPDATE_HOTKEY);
510
511 return old;
512}
513
514static int eeepc_wmi_backlight_init(struct eeepc_wmi *eeepc)
515{
516 struct backlight_device *bd;
517 struct backlight_properties props;
518
519 memset(&props, 0, sizeof(struct backlight_properties));
520 props.max_brightness = 15;
521 bd = backlight_device_register(EEEPC_WMI_FILE,
522 &eeepc->platform_device->dev, eeepc,
523 &eeepc_wmi_bl_ops, &props);
524 if (IS_ERR(bd)) {
525 pr_err("Could not register backlight device\n");
526 return PTR_ERR(bd);
527 }
528
529 eeepc->backlight_device = bd;
530
531 bd->props.brightness = read_brightness(bd);
532 bd->props.power = FB_BLANK_UNBLANK;
533 backlight_update_status(bd);
534
535 return 0;
536}
537
538static void eeepc_wmi_backlight_exit(struct eeepc_wmi *eeepc)
539{
540 if (eeepc->backlight_device)
541 backlight_device_unregister(eeepc->backlight_device);
542
543 eeepc->backlight_device = NULL;
544}
545
546static void eeepc_wmi_notify(u32 value, void *context)
547{
548 struct eeepc_wmi *eeepc = context;
549 struct acpi_buffer response = { ACPI_ALLOCATE_BUFFER, NULL };
550 union acpi_object *obj;
551 acpi_status status;
552 int code;
553 int orig_code;
554
555 status = wmi_get_event_data(value, &response);
556 if (status != AE_OK) {
557 pr_err("bad event status 0x%x\n", status);
558 return;
559 }
560
561 obj = (union acpi_object *)response.pointer;
562
563 if (obj && obj->type == ACPI_TYPE_INTEGER) {
564 code = obj->integer.value;
565 orig_code = code;
566
567 if (code >= NOTIFY_BRNUP_MIN && code <= NOTIFY_BRNUP_MAX)
568 code = NOTIFY_BRNUP_MIN;
569 else if (code >= NOTIFY_BRNDOWN_MIN &&
570 code <= NOTIFY_BRNDOWN_MAX)
571 code = NOTIFY_BRNDOWN_MIN;
572
573 if (code == NOTIFY_BRNUP_MIN || code == NOTIFY_BRNDOWN_MIN) {
574 if (!acpi_video_backlight_support())
575 eeepc_wmi_backlight_notify(eeepc, orig_code);
576 }
577
578 if (!sparse_keymap_report_event(eeepc->inputdev,
579 code, 1, true))
580 pr_info("Unknown key %x pressed\n", code);
581 }
582
583 kfree(obj);
584}
585
586static ssize_t store_cpufv(struct device *dev, struct device_attribute *attr,
587 const char *buf, size_t count)
588{
589 int value;
590 struct acpi_buffer input = { (acpi_size)sizeof(value), &value };
591 acpi_status status;
592
593 if (!count || sscanf(buf, "%i", &value) != 1)
594 return -EINVAL;
595 if (value < 0 || value > 2)
596 return -EINVAL;
597
598 status = wmi_evaluate_method(EEEPC_WMI_MGMT_GUID,
599 1, EEEPC_WMI_METHODID_CFVS, &input, NULL);
600
601 if (ACPI_FAILURE(status))
602 return -EIO;
603 else
604 return count;
605}
606
607static DEVICE_ATTR(cpufv, S_IRUGO | S_IWUSR, NULL, store_cpufv);
608
609static struct attribute *platform_attributes[] = {
610 &dev_attr_cpufv.attr,
611 NULL
612};
613
614static struct attribute_group platform_attribute_group = {
615 .attrs = platform_attributes
616};
617
618static void eeepc_wmi_sysfs_exit(struct platform_device *device)
619{
620 sysfs_remove_group(&device->dev.kobj, &platform_attribute_group);
621}
622
623static int eeepc_wmi_sysfs_init(struct platform_device *device)
624{
625 return sysfs_create_group(&device->dev.kobj, &platform_attribute_group);
626}
627
628/*
629 * Platform device
630 */
631static int __init eeepc_wmi_platform_init(struct eeepc_wmi *eeepc)
632{
633 int err;
634
635 eeepc->platform_device = platform_device_alloc(EEEPC_WMI_FILE, -1);
636 if (!eeepc->platform_device)
637 return -ENOMEM;
638 platform_set_drvdata(eeepc->platform_device, eeepc);
639
640 err = platform_device_add(eeepc->platform_device);
641 if (err)
642 goto fail_platform_device;
643
644 err = eeepc_wmi_sysfs_init(eeepc->platform_device);
645 if (err)
646 goto fail_sysfs;
647 return 0;
648
649fail_sysfs:
650 platform_device_del(eeepc->platform_device);
651fail_platform_device:
652 platform_device_put(eeepc->platform_device);
653 return err;
654}
655
656static void eeepc_wmi_platform_exit(struct eeepc_wmi *eeepc)
657{
658 eeepc_wmi_sysfs_exit(eeepc->platform_device);
659 platform_device_unregister(eeepc->platform_device);
660}
661
662/*
663 * debugfs
664 */
665struct eeepc_wmi_debugfs_node {
666 struct eeepc_wmi *eeepc;
667 char *name;
668 int (*show)(struct seq_file *m, void *data);
669};
670
671static int show_dsts(struct seq_file *m, void *data)
672{
673 struct eeepc_wmi *eeepc = m->private;
674 acpi_status status;
675 u32 retval = -1;
676
677 status = eeepc_wmi_get_devstate(eeepc->debug.dev_id, &retval);
678
679 if (ACPI_FAILURE(status))
680 return -EIO;
681
682 seq_printf(m, "DSTS(%x) = %x\n", eeepc->debug.dev_id, retval);
683
684 return 0;
685}
686
687static int show_devs(struct seq_file *m, void *data)
688{
689 struct eeepc_wmi *eeepc = m->private;
690 acpi_status status;
691 u32 retval = -1;
692
693 status = eeepc_wmi_set_devstate(eeepc->debug.dev_id,
694 eeepc->debug.ctrl_param, &retval);
695 if (ACPI_FAILURE(status))
696 return -EIO;
697
698 seq_printf(m, "DEVS(%x, %x) = %x\n", eeepc->debug.dev_id,
699 eeepc->debug.ctrl_param, retval);
700
701 return 0;
702}
703
704static struct eeepc_wmi_debugfs_node eeepc_wmi_debug_files[] = {
705 { NULL, "devs", show_devs },
706 { NULL, "dsts", show_dsts },
707};
708
709static int eeepc_wmi_debugfs_open(struct inode *inode, struct file *file)
710{
711 struct eeepc_wmi_debugfs_node *node = inode->i_private;
712
713 return single_open(file, node->show, node->eeepc);
714}
715
716static const struct file_operations eeepc_wmi_debugfs_io_ops = {
717 .owner = THIS_MODULE,
718 .open = eeepc_wmi_debugfs_open,
719 .read = seq_read,
720 .llseek = seq_lseek,
721 .release = single_release,
722};
723
724static void eeepc_wmi_debugfs_exit(struct eeepc_wmi *eeepc)
725{
726 debugfs_remove_recursive(eeepc->debug.root);
727}
728
729static int eeepc_wmi_debugfs_init(struct eeepc_wmi *eeepc)
730{
731 struct dentry *dent;
732 int i;
733
734 eeepc->debug.root = debugfs_create_dir(EEEPC_WMI_FILE, NULL);
735 if (!eeepc->debug.root) {
736 pr_err("failed to create debugfs directory");
737 goto error_debugfs;
738 }
739
740 dent = debugfs_create_x32("dev_id", S_IRUGO|S_IWUSR,
741 eeepc->debug.root, &eeepc->debug.dev_id);
742 if (!dent)
743 goto error_debugfs;
744
745 dent = debugfs_create_x32("ctrl_param", S_IRUGO|S_IWUSR,
746 eeepc->debug.root, &eeepc->debug.ctrl_param);
747 if (!dent)
748 goto error_debugfs;
749
750 for (i = 0; i < ARRAY_SIZE(eeepc_wmi_debug_files); i++) {
751 struct eeepc_wmi_debugfs_node *node = &eeepc_wmi_debug_files[i];
752
753 node->eeepc = eeepc;
754 dent = debugfs_create_file(node->name, S_IFREG | S_IRUGO,
755 eeepc->debug.root, node,
756 &eeepc_wmi_debugfs_io_ops);
757 if (!dent) {
758 pr_err("failed to create debug file: %s\n", node->name);
759 goto error_debugfs;
760 }
761 }
762
763 return 0;
764
765error_debugfs:
766 eeepc_wmi_debugfs_exit(eeepc);
767 return -ENOMEM;
768}
769
770/*
771 * WMI Driver
772 */
773static struct platform_device * __init eeepc_wmi_add(void)
774{
775 struct eeepc_wmi *eeepc;
776 acpi_status status;
777 int err;
778
779 eeepc = kzalloc(sizeof(struct eeepc_wmi), GFP_KERNEL);
780 if (!eeepc)
781 return ERR_PTR(-ENOMEM);
782
783 /*
784 * Register the platform device first. It is used as a parent for the
785 * sub-devices below.
786 */
787 err = eeepc_wmi_platform_init(eeepc);
788 if (err)
789 goto fail_platform;
790
791 err = eeepc_wmi_input_init(eeepc);
792 if (err)
793 goto fail_input;
794
795 err = eeepc_wmi_led_init(eeepc);
796 if (err)
797 goto fail_leds;
798
799 err = eeepc_wmi_rfkill_init(eeepc);
800 if (err)
801 goto fail_rfkill;
802
803 if (!acpi_video_backlight_support()) {
804 err = eeepc_wmi_backlight_init(eeepc);
805 if (err)
806 goto fail_backlight;
807 } else
808 pr_info("Backlight controlled by ACPI video driver\n");
809
810 status = wmi_install_notify_handler(EEEPC_WMI_EVENT_GUID,
811 eeepc_wmi_notify, eeepc);
812 if (ACPI_FAILURE(status)) {
813 pr_err("Unable to register notify handler - %d\n",
814 status);
815 err = -ENODEV;
816 goto fail_wmi_handler;
817 }
818
819 err = eeepc_wmi_debugfs_init(eeepc);
820 if (err)
821 goto fail_debugfs;
822
823 return eeepc->platform_device;
824
825fail_debugfs:
826 wmi_remove_notify_handler(EEEPC_WMI_EVENT_GUID);
827fail_wmi_handler:
828 eeepc_wmi_backlight_exit(eeepc);
829fail_backlight:
830 eeepc_wmi_rfkill_exit(eeepc);
831fail_rfkill:
832 eeepc_wmi_led_exit(eeepc);
833fail_leds:
834 eeepc_wmi_input_exit(eeepc);
835fail_input:
836 eeepc_wmi_platform_exit(eeepc);
837fail_platform:
838 kfree(eeepc);
839 return ERR_PTR(err);
840}
841
842static int eeepc_wmi_remove(struct platform_device *device)
843{
844 struct eeepc_wmi *eeepc;
845
846 eeepc = platform_get_drvdata(device);
847 wmi_remove_notify_handler(EEEPC_WMI_EVENT_GUID);
848 eeepc_wmi_backlight_exit(eeepc);
849 eeepc_wmi_input_exit(eeepc);
850 eeepc_wmi_led_exit(eeepc);
851 eeepc_wmi_rfkill_exit(eeepc);
852 eeepc_wmi_debugfs_exit(eeepc);
853 eeepc_wmi_platform_exit(eeepc);
854
855 kfree(eeepc);
856 return 0;
857}
858
859static struct platform_driver platform_driver = {
860 .driver = {
861 .name = EEEPC_WMI_FILE,
862 .owner = THIS_MODULE,
863 },
864};
865
866static acpi_status __init eeepc_wmi_parse_device(acpi_handle handle, u32 level,
867 void *context, void **retval) 83 void *context, void **retval)
868{ 84{
869 pr_warning("Found legacy ATKD device (%s)", EEEPC_ACPI_HID); 85 pr_warning("Found legacy ATKD device (%s)", EEEPC_ACPI_HID);
@@ -871,7 +87,7 @@ static acpi_status __init eeepc_wmi_parse_device(acpi_handle handle, u32 level,
871 return AE_CTRL_TERMINATE; 87 return AE_CTRL_TERMINATE;
872} 88}
873 89
874static int __init eeepc_wmi_check_atkd(void) 90static int eeepc_wmi_check_atkd(void)
875{ 91{
876 acpi_status status; 92 acpi_status status;
877 bool found = false; 93 bool found = false;
@@ -884,16 +100,8 @@ static int __init eeepc_wmi_check_atkd(void)
884 return -1; 100 return -1;
885} 101}
886 102
887static int __init eeepc_wmi_init(void) 103static int eeepc_wmi_probe(struct platform_device *pdev)
888{ 104{
889 int err;
890
891 if (!wmi_has_guid(EEEPC_WMI_EVENT_GUID) ||
892 !wmi_has_guid(EEEPC_WMI_MGMT_GUID)) {
893 pr_warning("No known WMI GUID found\n");
894 return -ENODEV;
895 }
896
897 if (eeepc_wmi_check_atkd()) { 105 if (eeepc_wmi_check_atkd()) {
898 pr_warning("WMI device present, but legacy ATKD device is also " 106 pr_warning("WMI device present, but legacy ATKD device is also "
899 "present and enabled."); 107 "present and enabled.");
@@ -901,33 +109,59 @@ static int __init eeepc_wmi_init(void)
901 "acpi_osi=\"!Windows 2009\""); 109 "acpi_osi=\"!Windows 2009\"");
902 pr_warning("Can't load eeepc-wmi, use default acpi_osi " 110 pr_warning("Can't load eeepc-wmi, use default acpi_osi "
903 "(preferred) or eeepc-laptop"); 111 "(preferred) or eeepc-laptop");
904 return -ENODEV; 112 return -EBUSY;
905 } 113 }
114 return 0;
115}
906 116
907 platform_device = eeepc_wmi_add(); 117static void eeepc_dmi_check(struct asus_wmi_driver *driver)
908 if (IS_ERR(platform_device)) { 118{
909 err = PTR_ERR(platform_device); 119 const char *model;
910 goto fail_eeepc_wmi; 120
911 } 121 model = dmi_get_system_info(DMI_PRODUCT_NAME);
122 if (!model)
123 return;
912 124
913 err = platform_driver_register(&platform_driver); 125 /*
914 if (err) { 126 * Whitelist for wlan hotplug
915 pr_warning("Unable to register platform driver\n"); 127 *
916 goto fail_platform_driver; 128 * Asus 1000H needs the current hotplug code to handle
129 * Fn+F2 correctly. We may add other Asus here later, but
130 * it seems that most of the laptops supported by asus-wmi
131 * don't need to be on this list
132 */
133 if (strcmp(model, "1000H") == 0) {
134 driver->hotplug_wireless = true;
135 pr_info("wlan hotplug enabled\n");
917 } 136 }
137}
138
139static void eeepc_wmi_quirks(struct asus_wmi_driver *driver)
140{
141 driver->hotplug_wireless = hotplug_wireless;
142 eeepc_dmi_check(driver);
143}
144
145static struct asus_wmi_driver asus_wmi_driver = {
146 .name = EEEPC_WMI_FILE,
147 .owner = THIS_MODULE,
148 .event_guid = EEEPC_WMI_EVENT_GUID,
149 .keymap = eeepc_wmi_keymap,
150 .input_name = "Eee PC WMI hotkeys",
151 .input_phys = EEEPC_WMI_FILE "/input0",
152 .probe = eeepc_wmi_probe,
153 .quirks = eeepc_wmi_quirks,
154};
918 155
919 return 0;
920 156
921fail_platform_driver: 157static int __init eeepc_wmi_init(void)
922 eeepc_wmi_remove(platform_device); 158{
923fail_eeepc_wmi: 159 return asus_wmi_register_driver(&asus_wmi_driver);
924 return err;
925} 160}
926 161
927static void __exit eeepc_wmi_exit(void) 162static void __exit eeepc_wmi_exit(void)
928{ 163{
929 eeepc_wmi_remove(platform_device); 164 asus_wmi_unregister_driver(&asus_wmi_driver);
930 platform_driver_unregister(&platform_driver);
931} 165}
932 166
933module_init(eeepc_wmi_init); 167module_init(eeepc_wmi_init);
diff --git a/drivers/platform/x86/hp-wmi.c b/drivers/platform/x86/hp-wmi.c
index 9e05af9c41cb..1bc4a7539ba9 100644
--- a/drivers/platform/x86/hp-wmi.c
+++ b/drivers/platform/x86/hp-wmi.c
@@ -2,6 +2,7 @@
2 * HP WMI hotkeys 2 * HP WMI hotkeys
3 * 3 *
4 * Copyright (C) 2008 Red Hat <mjg@redhat.com> 4 * Copyright (C) 2008 Red Hat <mjg@redhat.com>
5 * Copyright (C) 2010, 2011 Anssi Hannula <anssi.hannula@iki.fi>
5 * 6 *
6 * Portions based on wistron_btns.c: 7 * Portions based on wistron_btns.c:
7 * Copyright (C) 2005 Miloslav Trmac <mitr@volny.cz> 8 * Copyright (C) 2005 Miloslav Trmac <mitr@volny.cz>
@@ -51,6 +52,7 @@ MODULE_ALIAS("wmi:5FB7F034-2C63-45e9-BE91-3D44E2C707E4");
51#define HPWMI_HARDWARE_QUERY 0x4 52#define HPWMI_HARDWARE_QUERY 0x4
52#define HPWMI_WIRELESS_QUERY 0x5 53#define HPWMI_WIRELESS_QUERY 0x5
53#define HPWMI_HOTKEY_QUERY 0xc 54#define HPWMI_HOTKEY_QUERY 0xc
55#define HPWMI_WIRELESS2_QUERY 0x1b
54 56
55#define PREFIX "HP WMI: " 57#define PREFIX "HP WMI: "
56#define UNIMP "Unimplemented " 58#define UNIMP "Unimplemented "
@@ -86,7 +88,46 @@ struct bios_args {
86struct bios_return { 88struct bios_return {
87 u32 sigpass; 89 u32 sigpass;
88 u32 return_code; 90 u32 return_code;
89 u32 value; 91};
92
93enum hp_return_value {
94 HPWMI_RET_WRONG_SIGNATURE = 0x02,
95 HPWMI_RET_UNKNOWN_COMMAND = 0x03,
96 HPWMI_RET_UNKNOWN_CMDTYPE = 0x04,
97 HPWMI_RET_INVALID_PARAMETERS = 0x05,
98};
99
100enum hp_wireless2_bits {
101 HPWMI_POWER_STATE = 0x01,
102 HPWMI_POWER_SOFT = 0x02,
103 HPWMI_POWER_BIOS = 0x04,
104 HPWMI_POWER_HARD = 0x08,
105};
106
107#define IS_HWBLOCKED(x) ((x & (HPWMI_POWER_BIOS | HPWMI_POWER_HARD)) \
108 != (HPWMI_POWER_BIOS | HPWMI_POWER_HARD))
109#define IS_SWBLOCKED(x) !(x & HPWMI_POWER_SOFT)
110
111struct bios_rfkill2_device_state {
112 u8 radio_type;
113 u8 bus_type;
114 u16 vendor_id;
115 u16 product_id;
116 u16 subsys_vendor_id;
117 u16 subsys_product_id;
118 u8 rfkill_id;
119 u8 power;
120 u8 unknown[4];
121};
122
123/* 7 devices fit into the 128 byte buffer */
124#define HPWMI_MAX_RFKILL2_DEVICES 7
125
126struct bios_rfkill2_state {
127 u8 unknown[7];
128 u8 count;
129 u8 pad[8];
130 struct bios_rfkill2_device_state device[HPWMI_MAX_RFKILL2_DEVICES];
90}; 131};
91 132
92static const struct key_entry hp_wmi_keymap[] = { 133static const struct key_entry hp_wmi_keymap[] = {
@@ -108,6 +149,15 @@ static struct rfkill *wifi_rfkill;
108static struct rfkill *bluetooth_rfkill; 149static struct rfkill *bluetooth_rfkill;
109static struct rfkill *wwan_rfkill; 150static struct rfkill *wwan_rfkill;
110 151
152struct rfkill2_device {
153 u8 id;
154 int num;
155 struct rfkill *rfkill;
156};
157
158static int rfkill2_count;
159static struct rfkill2_device rfkill2[HPWMI_MAX_RFKILL2_DEVICES];
160
111static const struct dev_pm_ops hp_wmi_pm_ops = { 161static const struct dev_pm_ops hp_wmi_pm_ops = {
112 .resume = hp_wmi_resume_handler, 162 .resume = hp_wmi_resume_handler,
113 .restore = hp_wmi_resume_handler, 163 .restore = hp_wmi_resume_handler,
@@ -129,7 +179,8 @@ static struct platform_driver hp_wmi_driver = {
129 * query: The commandtype -> What should be queried 179 * query: The commandtype -> What should be queried
130 * write: The command -> 0 read, 1 write, 3 ODM specific 180 * write: The command -> 0 read, 1 write, 3 ODM specific
131 * buffer: Buffer used as input and/or output 181 * buffer: Buffer used as input and/or output
132 * buffersize: Size of buffer 182 * insize: Size of input buffer
183 * outsize: Size of output buffer
133 * 184 *
134 * returns zero on success 185 * returns zero on success
135 * an HP WMI query specific error code (which is positive) 186 * an HP WMI query specific error code (which is positive)
@@ -140,25 +191,29 @@ static struct platform_driver hp_wmi_driver = {
140 * size. E.g. Battery info query (0x7) is defined to have 1 byte input 191 * size. E.g. Battery info query (0x7) is defined to have 1 byte input
141 * and 128 byte output. The caller would do: 192 * and 128 byte output. The caller would do:
142 * buffer = kzalloc(128, GFP_KERNEL); 193 * buffer = kzalloc(128, GFP_KERNEL);
143 * ret = hp_wmi_perform_query(0x7, 0, buffer, 128) 194 * ret = hp_wmi_perform_query(0x7, 0, buffer, 1, 128)
144 */ 195 */
145static int hp_wmi_perform_query(int query, int write, u32 *buffer, 196static int hp_wmi_perform_query(int query, int write, void *buffer,
146 int buffersize) 197 int insize, int outsize)
147{ 198{
148 struct bios_return bios_return; 199 struct bios_return *bios_return;
149 acpi_status status; 200 int actual_outsize;
150 union acpi_object *obj; 201 union acpi_object *obj;
151 struct bios_args args = { 202 struct bios_args args = {
152 .signature = 0x55434553, 203 .signature = 0x55434553,
153 .command = write ? 0x2 : 0x1, 204 .command = write ? 0x2 : 0x1,
154 .commandtype = query, 205 .commandtype = query,
155 .datasize = buffersize, 206 .datasize = insize,
156 .data = *buffer, 207 .data = 0,
157 }; 208 };
158 struct acpi_buffer input = { sizeof(struct bios_args), &args }; 209 struct acpi_buffer input = { sizeof(struct bios_args), &args };
159 struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL }; 210 struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL };
160 211
161 status = wmi_evaluate_method(HPWMI_BIOS_GUID, 0, 0x3, &input, &output); 212 if (WARN_ON(insize > sizeof(args.data)))
213 return -EINVAL;
214 memcpy(&args.data, buffer, insize);
215
216 wmi_evaluate_method(HPWMI_BIOS_GUID, 0, 0x3, &input, &output);
162 217
163 obj = output.pointer; 218 obj = output.pointer;
164 219
@@ -169,10 +224,26 @@ static int hp_wmi_perform_query(int query, int write, u32 *buffer,
169 return -EINVAL; 224 return -EINVAL;
170 } 225 }
171 226
172 bios_return = *((struct bios_return *)obj->buffer.pointer); 227 bios_return = (struct bios_return *)obj->buffer.pointer;
173 228
174 memcpy(buffer, &bios_return.value, sizeof(bios_return.value)); 229 if (bios_return->return_code) {
230 if (bios_return->return_code != HPWMI_RET_UNKNOWN_CMDTYPE)
231 printk(KERN_WARNING PREFIX "query 0x%x returned "
232 "error 0x%x\n",
233 query, bios_return->return_code);
234 kfree(obj);
235 return bios_return->return_code;
236 }
237
238 if (!outsize) {
239 /* ignore output data */
240 kfree(obj);
241 return 0;
242 }
175 243
244 actual_outsize = min(outsize, (int)(obj->buffer.length - sizeof(*bios_return)));
245 memcpy(buffer, obj->buffer.pointer + sizeof(*bios_return), actual_outsize);
246 memset(buffer + actual_outsize, 0, outsize - actual_outsize);
176 kfree(obj); 247 kfree(obj);
177 return 0; 248 return 0;
178} 249}
@@ -181,7 +252,7 @@ static int hp_wmi_display_state(void)
181{ 252{
182 int state = 0; 253 int state = 0;
183 int ret = hp_wmi_perform_query(HPWMI_DISPLAY_QUERY, 0, &state, 254 int ret = hp_wmi_perform_query(HPWMI_DISPLAY_QUERY, 0, &state,
184 sizeof(state)); 255 sizeof(state), sizeof(state));
185 if (ret) 256 if (ret)
186 return -EINVAL; 257 return -EINVAL;
187 return state; 258 return state;
@@ -191,7 +262,7 @@ static int hp_wmi_hddtemp_state(void)
191{ 262{
192 int state = 0; 263 int state = 0;
193 int ret = hp_wmi_perform_query(HPWMI_HDDTEMP_QUERY, 0, &state, 264 int ret = hp_wmi_perform_query(HPWMI_HDDTEMP_QUERY, 0, &state,
194 sizeof(state)); 265 sizeof(state), sizeof(state));
195 if (ret) 266 if (ret)
196 return -EINVAL; 267 return -EINVAL;
197 return state; 268 return state;
@@ -201,7 +272,7 @@ static int hp_wmi_als_state(void)
201{ 272{
202 int state = 0; 273 int state = 0;
203 int ret = hp_wmi_perform_query(HPWMI_ALS_QUERY, 0, &state, 274 int ret = hp_wmi_perform_query(HPWMI_ALS_QUERY, 0, &state,
204 sizeof(state)); 275 sizeof(state), sizeof(state));
205 if (ret) 276 if (ret)
206 return -EINVAL; 277 return -EINVAL;
207 return state; 278 return state;
@@ -211,7 +282,7 @@ static int hp_wmi_dock_state(void)
211{ 282{
212 int state = 0; 283 int state = 0;
213 int ret = hp_wmi_perform_query(HPWMI_HARDWARE_QUERY, 0, &state, 284 int ret = hp_wmi_perform_query(HPWMI_HARDWARE_QUERY, 0, &state,
214 sizeof(state)); 285 sizeof(state), sizeof(state));
215 286
216 if (ret) 287 if (ret)
217 return -EINVAL; 288 return -EINVAL;
@@ -223,7 +294,7 @@ static int hp_wmi_tablet_state(void)
223{ 294{
224 int state = 0; 295 int state = 0;
225 int ret = hp_wmi_perform_query(HPWMI_HARDWARE_QUERY, 0, &state, 296 int ret = hp_wmi_perform_query(HPWMI_HARDWARE_QUERY, 0, &state,
226 sizeof(state)); 297 sizeof(state), sizeof(state));
227 if (ret) 298 if (ret)
228 return ret; 299 return ret;
229 300
@@ -237,7 +308,7 @@ static int hp_wmi_set_block(void *data, bool blocked)
237 int ret; 308 int ret;
238 309
239 ret = hp_wmi_perform_query(HPWMI_WIRELESS_QUERY, 1, 310 ret = hp_wmi_perform_query(HPWMI_WIRELESS_QUERY, 1,
240 &query, sizeof(query)); 311 &query, sizeof(query), 0);
241 if (ret) 312 if (ret)
242 return -EINVAL; 313 return -EINVAL;
243 return 0; 314 return 0;
@@ -252,7 +323,8 @@ static bool hp_wmi_get_sw_state(enum hp_wmi_radio r)
252 int wireless = 0; 323 int wireless = 0;
253 int mask; 324 int mask;
254 hp_wmi_perform_query(HPWMI_WIRELESS_QUERY, 0, 325 hp_wmi_perform_query(HPWMI_WIRELESS_QUERY, 0,
255 &wireless, sizeof(wireless)); 326 &wireless, sizeof(wireless),
327 sizeof(wireless));
256 /* TBD: Pass error */ 328 /* TBD: Pass error */
257 329
258 mask = 0x200 << (r * 8); 330 mask = 0x200 << (r * 8);
@@ -268,7 +340,8 @@ static bool hp_wmi_get_hw_state(enum hp_wmi_radio r)
268 int wireless = 0; 340 int wireless = 0;
269 int mask; 341 int mask;
270 hp_wmi_perform_query(HPWMI_WIRELESS_QUERY, 0, 342 hp_wmi_perform_query(HPWMI_WIRELESS_QUERY, 0,
271 &wireless, sizeof(wireless)); 343 &wireless, sizeof(wireless),
344 sizeof(wireless));
272 /* TBD: Pass error */ 345 /* TBD: Pass error */
273 346
274 mask = 0x800 << (r * 8); 347 mask = 0x800 << (r * 8);
@@ -279,6 +352,51 @@ static bool hp_wmi_get_hw_state(enum hp_wmi_radio r)
279 return true; 352 return true;
280} 353}
281 354
355static int hp_wmi_rfkill2_set_block(void *data, bool blocked)
356{
357 int rfkill_id = (int)(long)data;
358 char buffer[4] = { 0x01, 0x00, rfkill_id, !blocked };
359
360 if (hp_wmi_perform_query(HPWMI_WIRELESS2_QUERY, 1,
361 buffer, sizeof(buffer), 0))
362 return -EINVAL;
363 return 0;
364}
365
366static const struct rfkill_ops hp_wmi_rfkill2_ops = {
367 .set_block = hp_wmi_rfkill2_set_block,
368};
369
370static int hp_wmi_rfkill2_refresh(void)
371{
372 int err, i;
373 struct bios_rfkill2_state state;
374
375 err = hp_wmi_perform_query(HPWMI_WIRELESS2_QUERY, 0, &state,
376 0, sizeof(state));
377 if (err)
378 return err;
379
380 for (i = 0; i < rfkill2_count; i++) {
381 int num = rfkill2[i].num;
382 struct bios_rfkill2_device_state *devstate;
383 devstate = &state.device[num];
384
385 if (num >= state.count ||
386 devstate->rfkill_id != rfkill2[i].id) {
387 printk(KERN_WARNING PREFIX "power configuration of "
388 "the wireless devices unexpectedly changed\n");
389 continue;
390 }
391
392 rfkill_set_states(rfkill2[i].rfkill,
393 IS_SWBLOCKED(devstate->power),
394 IS_HWBLOCKED(devstate->power));
395 }
396
397 return 0;
398}
399
282static ssize_t show_display(struct device *dev, struct device_attribute *attr, 400static ssize_t show_display(struct device *dev, struct device_attribute *attr,
283 char *buf) 401 char *buf)
284{ 402{
@@ -329,7 +447,7 @@ static ssize_t set_als(struct device *dev, struct device_attribute *attr,
329{ 447{
330 u32 tmp = simple_strtoul(buf, NULL, 10); 448 u32 tmp = simple_strtoul(buf, NULL, 10);
331 int ret = hp_wmi_perform_query(HPWMI_ALS_QUERY, 1, &tmp, 449 int ret = hp_wmi_perform_query(HPWMI_ALS_QUERY, 1, &tmp,
332 sizeof(tmp)); 450 sizeof(tmp), sizeof(tmp));
333 if (ret) 451 if (ret)
334 return -EINVAL; 452 return -EINVAL;
335 453
@@ -402,6 +520,7 @@ static void hp_wmi_notify(u32 value, void *context)
402 case HPWMI_BEZEL_BUTTON: 520 case HPWMI_BEZEL_BUTTON:
403 ret = hp_wmi_perform_query(HPWMI_HOTKEY_QUERY, 0, 521 ret = hp_wmi_perform_query(HPWMI_HOTKEY_QUERY, 0,
404 &key_code, 522 &key_code,
523 sizeof(key_code),
405 sizeof(key_code)); 524 sizeof(key_code));
406 if (ret) 525 if (ret)
407 break; 526 break;
@@ -412,6 +531,11 @@ static void hp_wmi_notify(u32 value, void *context)
412 key_code); 531 key_code);
413 break; 532 break;
414 case HPWMI_WIRELESS: 533 case HPWMI_WIRELESS:
534 if (rfkill2_count) {
535 hp_wmi_rfkill2_refresh();
536 break;
537 }
538
415 if (wifi_rfkill) 539 if (wifi_rfkill)
416 rfkill_set_states(wifi_rfkill, 540 rfkill_set_states(wifi_rfkill,
417 hp_wmi_get_sw_state(HPWMI_WIFI), 541 hp_wmi_get_sw_state(HPWMI_WIFI),
@@ -502,32 +626,16 @@ static void cleanup_sysfs(struct platform_device *device)
502 device_remove_file(&device->dev, &dev_attr_tablet); 626 device_remove_file(&device->dev, &dev_attr_tablet);
503} 627}
504 628
505static int __devinit hp_wmi_bios_setup(struct platform_device *device) 629static int __devinit hp_wmi_rfkill_setup(struct platform_device *device)
506{ 630{
507 int err; 631 int err;
508 int wireless = 0; 632 int wireless = 0;
509 633
510 err = hp_wmi_perform_query(HPWMI_WIRELESS_QUERY, 0, &wireless, 634 err = hp_wmi_perform_query(HPWMI_WIRELESS_QUERY, 0, &wireless,
511 sizeof(wireless)); 635 sizeof(wireless), sizeof(wireless));
512 if (err) 636 if (err)
513 return err; 637 return err;
514 638
515 err = device_create_file(&device->dev, &dev_attr_display);
516 if (err)
517 goto add_sysfs_error;
518 err = device_create_file(&device->dev, &dev_attr_hddtemp);
519 if (err)
520 goto add_sysfs_error;
521 err = device_create_file(&device->dev, &dev_attr_als);
522 if (err)
523 goto add_sysfs_error;
524 err = device_create_file(&device->dev, &dev_attr_dock);
525 if (err)
526 goto add_sysfs_error;
527 err = device_create_file(&device->dev, &dev_attr_tablet);
528 if (err)
529 goto add_sysfs_error;
530
531 if (wireless & 0x1) { 639 if (wireless & 0x1) {
532 wifi_rfkill = rfkill_alloc("hp-wifi", &device->dev, 640 wifi_rfkill = rfkill_alloc("hp-wifi", &device->dev,
533 RFKILL_TYPE_WLAN, 641 RFKILL_TYPE_WLAN,
@@ -573,14 +681,131 @@ static int __devinit hp_wmi_bios_setup(struct platform_device *device)
573 return 0; 681 return 0;
574register_wwan_err: 682register_wwan_err:
575 rfkill_destroy(wwan_rfkill); 683 rfkill_destroy(wwan_rfkill);
684 wwan_rfkill = NULL;
576 if (bluetooth_rfkill) 685 if (bluetooth_rfkill)
577 rfkill_unregister(bluetooth_rfkill); 686 rfkill_unregister(bluetooth_rfkill);
578register_bluetooth_error: 687register_bluetooth_error:
579 rfkill_destroy(bluetooth_rfkill); 688 rfkill_destroy(bluetooth_rfkill);
689 bluetooth_rfkill = NULL;
580 if (wifi_rfkill) 690 if (wifi_rfkill)
581 rfkill_unregister(wifi_rfkill); 691 rfkill_unregister(wifi_rfkill);
582register_wifi_error: 692register_wifi_error:
583 rfkill_destroy(wifi_rfkill); 693 rfkill_destroy(wifi_rfkill);
694 wifi_rfkill = NULL;
695 return err;
696}
697
698static int __devinit hp_wmi_rfkill2_setup(struct platform_device *device)
699{
700 int err, i;
701 struct bios_rfkill2_state state;
702 err = hp_wmi_perform_query(HPWMI_WIRELESS2_QUERY, 0, &state,
703 0, sizeof(state));
704 if (err)
705 return err;
706
707 if (state.count > HPWMI_MAX_RFKILL2_DEVICES) {
708 printk(KERN_WARNING PREFIX "unable to parse 0x1b query output\n");
709 return -EINVAL;
710 }
711
712 for (i = 0; i < state.count; i++) {
713 struct rfkill *rfkill;
714 enum rfkill_type type;
715 char *name;
716 switch (state.device[i].radio_type) {
717 case HPWMI_WIFI:
718 type = RFKILL_TYPE_WLAN;
719 name = "hp-wifi";
720 break;
721 case HPWMI_BLUETOOTH:
722 type = RFKILL_TYPE_BLUETOOTH;
723 name = "hp-bluetooth";
724 break;
725 case HPWMI_WWAN:
726 type = RFKILL_TYPE_WWAN;
727 name = "hp-wwan";
728 break;
729 default:
730 printk(KERN_WARNING PREFIX "unknown device type 0x%x\n",
731 state.device[i].radio_type);
732 continue;
733 }
734
735 if (!state.device[i].vendor_id) {
736 printk(KERN_WARNING PREFIX "zero device %d while %d "
737 "reported\n", i, state.count);
738 continue;
739 }
740
741 rfkill = rfkill_alloc(name, &device->dev, type,
742 &hp_wmi_rfkill2_ops, (void *)(long)i);
743 if (!rfkill) {
744 err = -ENOMEM;
745 goto fail;
746 }
747
748 rfkill2[rfkill2_count].id = state.device[i].rfkill_id;
749 rfkill2[rfkill2_count].num = i;
750 rfkill2[rfkill2_count].rfkill = rfkill;
751
752 rfkill_init_sw_state(rfkill,
753 IS_SWBLOCKED(state.device[i].power));
754 rfkill_set_hw_state(rfkill,
755 IS_HWBLOCKED(state.device[i].power));
756
757 if (!(state.device[i].power & HPWMI_POWER_BIOS))
758 printk(KERN_INFO PREFIX "device %s blocked by BIOS\n",
759 name);
760
761 err = rfkill_register(rfkill);
762 if (err) {
763 rfkill_destroy(rfkill);
764 goto fail;
765 }
766
767 rfkill2_count++;
768 }
769
770 return 0;
771fail:
772 for (; rfkill2_count > 0; rfkill2_count--) {
773 rfkill_unregister(rfkill2[rfkill2_count - 1].rfkill);
774 rfkill_destroy(rfkill2[rfkill2_count - 1].rfkill);
775 }
776 return err;
777}
778
779static int __devinit hp_wmi_bios_setup(struct platform_device *device)
780{
781 int err;
782
783 /* clear detected rfkill devices */
784 wifi_rfkill = NULL;
785 bluetooth_rfkill = NULL;
786 wwan_rfkill = NULL;
787 rfkill2_count = 0;
788
789 if (hp_wmi_rfkill_setup(device))
790 hp_wmi_rfkill2_setup(device);
791
792 err = device_create_file(&device->dev, &dev_attr_display);
793 if (err)
794 goto add_sysfs_error;
795 err = device_create_file(&device->dev, &dev_attr_hddtemp);
796 if (err)
797 goto add_sysfs_error;
798 err = device_create_file(&device->dev, &dev_attr_als);
799 if (err)
800 goto add_sysfs_error;
801 err = device_create_file(&device->dev, &dev_attr_dock);
802 if (err)
803 goto add_sysfs_error;
804 err = device_create_file(&device->dev, &dev_attr_tablet);
805 if (err)
806 goto add_sysfs_error;
807 return 0;
808
584add_sysfs_error: 809add_sysfs_error:
585 cleanup_sysfs(device); 810 cleanup_sysfs(device);
586 return err; 811 return err;
@@ -588,8 +813,14 @@ add_sysfs_error:
588 813
589static int __exit hp_wmi_bios_remove(struct platform_device *device) 814static int __exit hp_wmi_bios_remove(struct platform_device *device)
590{ 815{
816 int i;
591 cleanup_sysfs(device); 817 cleanup_sysfs(device);
592 818
819 for (i = 0; i < rfkill2_count; i++) {
820 rfkill_unregister(rfkill2[i].rfkill);
821 rfkill_destroy(rfkill2[i].rfkill);
822 }
823
593 if (wifi_rfkill) { 824 if (wifi_rfkill) {
594 rfkill_unregister(wifi_rfkill); 825 rfkill_unregister(wifi_rfkill);
595 rfkill_destroy(wifi_rfkill); 826 rfkill_destroy(wifi_rfkill);
@@ -622,6 +853,9 @@ static int hp_wmi_resume_handler(struct device *device)
622 input_sync(hp_wmi_input_dev); 853 input_sync(hp_wmi_input_dev);
623 } 854 }
624 855
856 if (rfkill2_count)
857 hp_wmi_rfkill2_refresh();
858
625 if (wifi_rfkill) 859 if (wifi_rfkill)
626 rfkill_set_states(wifi_rfkill, 860 rfkill_set_states(wifi_rfkill,
627 hp_wmi_get_sw_state(HPWMI_WIFI), 861 hp_wmi_get_sw_state(HPWMI_WIFI),
diff --git a/drivers/platform/x86/ideapad-laptop.c b/drivers/platform/x86/ideapad-laptop.c
index 114d95247cdf..21b101899bae 100644
--- a/drivers/platform/x86/ideapad-laptop.c
+++ b/drivers/platform/x86/ideapad-laptop.c
@@ -459,6 +459,8 @@ static void ideapad_acpi_notify(struct acpi_device *adevice, u32 event)
459 if (test_bit(vpc_bit, &vpc1)) { 459 if (test_bit(vpc_bit, &vpc1)) {
460 if (vpc_bit == 9) 460 if (vpc_bit == 9)
461 ideapad_sync_rfk_state(adevice); 461 ideapad_sync_rfk_state(adevice);
462 else if (vpc_bit == 4)
463 read_ec_data(handle, 0x12, &vpc2);
462 else 464 else
463 ideapad_input_report(priv, vpc_bit); 465 ideapad_input_report(priv, vpc_bit);
464 } 466 }
diff --git a/drivers/platform/x86/intel_ips.c b/drivers/platform/x86/intel_ips.c
index 1294a39373ba..85c8ad43c0c5 100644
--- a/drivers/platform/x86/intel_ips.c
+++ b/drivers/platform/x86/intel_ips.c
@@ -1111,7 +1111,7 @@ static int ips_monitor(void *data)
1111 last_msecs = jiffies_to_msecs(jiffies); 1111 last_msecs = jiffies_to_msecs(jiffies);
1112 expire = jiffies + msecs_to_jiffies(IPS_SAMPLE_PERIOD); 1112 expire = jiffies + msecs_to_jiffies(IPS_SAMPLE_PERIOD);
1113 1113
1114 __set_current_state(TASK_UNINTERRUPTIBLE); 1114 __set_current_state(TASK_INTERRUPTIBLE);
1115 mod_timer(&timer, expire); 1115 mod_timer(&timer, expire);
1116 schedule(); 1116 schedule();
1117 1117
diff --git a/drivers/platform/x86/intel_mid_powerbtn.c b/drivers/platform/x86/intel_mid_powerbtn.c
new file mode 100644
index 000000000000..213e79ba68d5
--- /dev/null
+++ b/drivers/platform/x86/intel_mid_powerbtn.c
@@ -0,0 +1,148 @@
1/*
2 * Power button driver for Medfield.
3 *
4 * Copyright (C) 2010 Intel Corp
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; version 2 of the License.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License along
16 * with this program; if not, write to the Free Software Foundation, Inc.,
17 * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
18 */
19
20#include <linux/module.h>
21#include <linux/init.h>
22#include <linux/interrupt.h>
23#include <linux/slab.h>
24#include <linux/platform_device.h>
25#include <linux/input.h>
26#include <asm/intel_scu_ipc.h>
27
28#define DRIVER_NAME "msic_power_btn"
29
30#define MSIC_IRQ_STAT 0x02
31 #define MSIC_IRQ_PB (1 << 0)
32#define MSIC_PB_CONFIG 0x3e
33#define MSIC_PB_STATUS 0x3f
34 #define MSIC_PB_LEVEL (1 << 3) /* 1 - release, 0 - press */
35
36struct mfld_pb_priv {
37 struct input_dev *input;
38 unsigned int irq;
39};
40
41static irqreturn_t mfld_pb_isr(int irq, void *dev_id)
42{
43 struct mfld_pb_priv *priv = dev_id;
44 int ret;
45 u8 pbstat;
46
47 ret = intel_scu_ipc_ioread8(MSIC_PB_STATUS, &pbstat);
48 if (ret < 0)
49 return IRQ_HANDLED;
50
51 input_event(priv->input, EV_KEY, KEY_POWER, !(pbstat & MSIC_PB_LEVEL));
52 input_sync(priv->input);
53
54 return IRQ_HANDLED;
55}
56
57static int __devinit mfld_pb_probe(struct platform_device *pdev)
58{
59 struct mfld_pb_priv *priv;
60 struct input_dev *input;
61 int irq;
62 int error;
63
64 irq = platform_get_irq(pdev, 0);
65 if (irq < 0)
66 return -EINVAL;
67
68 priv = kzalloc(sizeof(struct mfld_pb_priv), GFP_KERNEL);
69 input = input_allocate_device();
70 if (!priv || !input) {
71 error = -ENOMEM;
72 goto err_free_mem;
73 }
74
75 priv->input = input;
76 priv->irq = irq;
77
78 input->name = pdev->name;
79 input->phys = "power-button/input0";
80 input->id.bustype = BUS_HOST;
81 input->dev.parent = &pdev->dev;
82
83 input_set_capability(input, EV_KEY, KEY_POWER);
84
85 error = request_threaded_irq(priv->irq, NULL, mfld_pb_isr,
86 0, DRIVER_NAME, priv);
87 if (error) {
88 dev_err(&pdev->dev,
89 "unable to request irq %d for mfld power button\n",
90 irq);
91 goto err_free_mem;
92 }
93
94 error = input_register_device(input);
95 if (error) {
96 dev_err(&pdev->dev,
97 "unable to register input dev, error %d\n", error);
98 goto err_free_irq;
99 }
100
101 platform_set_drvdata(pdev, priv);
102 return 0;
103
104err_free_irq:
105 free_irq(priv->irq, priv);
106err_free_mem:
107 input_free_device(input);
108 kfree(priv);
109 return error;
110}
111
112static int __devexit mfld_pb_remove(struct platform_device *pdev)
113{
114 struct mfld_pb_priv *priv = platform_get_drvdata(pdev);
115
116 free_irq(priv->irq, priv);
117 input_unregister_device(priv->input);
118 kfree(priv);
119
120 platform_set_drvdata(pdev, NULL);
121 return 0;
122}
123
124static struct platform_driver mfld_pb_driver = {
125 .driver = {
126 .name = DRIVER_NAME,
127 .owner = THIS_MODULE,
128 },
129 .probe = mfld_pb_probe,
130 .remove = __devexit_p(mfld_pb_remove),
131};
132
133static int __init mfld_pb_init(void)
134{
135 return platform_driver_register(&mfld_pb_driver);
136}
137module_init(mfld_pb_init);
138
139static void __exit mfld_pb_exit(void)
140{
141 platform_driver_unregister(&mfld_pb_driver);
142}
143module_exit(mfld_pb_exit);
144
145MODULE_AUTHOR("Hong Liu <hong.liu@intel.com>");
146MODULE_DESCRIPTION("Intel Medfield Power Button Driver");
147MODULE_LICENSE("GPL v2");
148MODULE_ALIAS("platform:" DRIVER_NAME);
diff --git a/drivers/platform/x86/intel_mid_thermal.c b/drivers/platform/x86/intel_mid_thermal.c
new file mode 100644
index 000000000000..6c12db503161
--- /dev/null
+++ b/drivers/platform/x86/intel_mid_thermal.c
@@ -0,0 +1,576 @@
1/*
2 * intel_mid_thermal.c - Intel MID platform thermal driver
3 *
4 * Copyright (C) 2011 Intel Corporation
5 *
6 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; version 2 of the License.
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License along
18 * with this program; if not, write to the Free Software Foundation, Inc.,
19 * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
20 *
21 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
22 * Author: Durgadoss R <durgadoss.r@intel.com>
23 */
24
25#define pr_fmt(fmt) "intel_mid_thermal: " fmt
26
27#include <linux/module.h>
28#include <linux/init.h>
29#include <linux/err.h>
30#include <linux/param.h>
31#include <linux/device.h>
32#include <linux/platform_device.h>
33#include <linux/slab.h>
34#include <linux/pm.h>
35#include <linux/thermal.h>
36
37#include <asm/intel_scu_ipc.h>
38
39/* Number of thermal sensors */
40#define MSIC_THERMAL_SENSORS 4
41
42/* ADC1 - thermal registers */
43#define MSIC_THERM_ADC1CNTL1 0x1C0
44#define MSIC_ADC_ENBL 0x10
45#define MSIC_ADC_START 0x08
46
47#define MSIC_THERM_ADC1CNTL3 0x1C2
48#define MSIC_ADCTHERM_ENBL 0x04
49#define MSIC_ADCRRDATA_ENBL 0x05
50#define MSIC_CHANL_MASK_VAL 0x0F
51
52#define MSIC_STOPBIT_MASK 16
53#define MSIC_ADCTHERM_MASK 4
54#define ADC_CHANLS_MAX 15 /* Number of ADC channels */
55#define ADC_LOOP_MAX (ADC_CHANLS_MAX - MSIC_THERMAL_SENSORS)
56
57/* ADC channel code values */
58#define SKIN_SENSOR0_CODE 0x08
59#define SKIN_SENSOR1_CODE 0x09
60#define SYS_SENSOR_CODE 0x0A
61#define MSIC_DIE_SENSOR_CODE 0x03
62
63#define SKIN_THERM_SENSOR0 0
64#define SKIN_THERM_SENSOR1 1
65#define SYS_THERM_SENSOR2 2
66#define MSIC_DIE_THERM_SENSOR3 3
67
68/* ADC code range */
69#define ADC_MAX 977
70#define ADC_MIN 162
71#define ADC_VAL0C 887
72#define ADC_VAL20C 720
73#define ADC_VAL40C 508
74#define ADC_VAL60C 315
75
76/* ADC base addresses */
77#define ADC_CHNL_START_ADDR 0x1C5 /* increments by 1 */
78#define ADC_DATA_START_ADDR 0x1D4 /* increments by 2 */
79
80/* MSIC die attributes */
81#define MSIC_DIE_ADC_MIN 488
82#define MSIC_DIE_ADC_MAX 1004
83
84/* This holds the address of the first free ADC channel,
85 * among the 15 channels
86 */
87static int channel_index;
88
89struct platform_info {
90 struct platform_device *pdev;
91 struct thermal_zone_device *tzd[MSIC_THERMAL_SENSORS];
92};
93
94struct thermal_device_info {
95 unsigned int chnl_addr;
96 int direct;
97 /* This holds the current temperature in millidegree celsius */
98 long curr_temp;
99};
100
101/**
102 * to_msic_die_temp - converts adc_val to msic_die temperature
103 * @adc_val: ADC value to be converted
104 *
105 * Can sleep
106 */
107static int to_msic_die_temp(uint16_t adc_val)
108{
109 return (368 * (adc_val) / 1000) - 220;
110}
111
112/**
113 * is_valid_adc - checks whether the adc code is within the defined range
114 * @min: minimum value for the sensor
115 * @max: maximum value for the sensor
116 *
117 * Can sleep
118 */
119static int is_valid_adc(uint16_t adc_val, uint16_t min, uint16_t max)
120{
121 return (adc_val >= min) && (adc_val <= max);
122}
123
124/**
125 * adc_to_temp - converts the ADC code to temperature in C
126 * @direct: true if ths channel is direct index
127 * @adc_val: the adc_val that needs to be converted
128 * @tp: temperature return value
129 *
130 * Linear approximation is used to covert the skin adc value into temperature.
131 * This technique is used to avoid very long look-up table to get
132 * the appropriate temp value from ADC value.
133 * The adc code vs sensor temp curve is split into five parts
134 * to achieve very close approximate temp value with less than
135 * 0.5C error
136 */
137static int adc_to_temp(int direct, uint16_t adc_val, unsigned long *tp)
138{
139 int temp;
140
141 /* Direct conversion for die temperature */
142 if (direct) {
143 if (is_valid_adc(adc_val, MSIC_DIE_ADC_MIN, MSIC_DIE_ADC_MAX)) {
144 *tp = to_msic_die_temp(adc_val) * 1000;
145 return 0;
146 }
147 return -ERANGE;
148 }
149
150 if (!is_valid_adc(adc_val, ADC_MIN, ADC_MAX))
151 return -ERANGE;
152
153 /* Linear approximation for skin temperature */
154 if (adc_val > ADC_VAL0C)
155 temp = 177 - (adc_val/5);
156 else if ((adc_val <= ADC_VAL0C) && (adc_val > ADC_VAL20C))
157 temp = 111 - (adc_val/8);
158 else if ((adc_val <= ADC_VAL20C) && (adc_val > ADC_VAL40C))
159 temp = 92 - (adc_val/10);
160 else if ((adc_val <= ADC_VAL40C) && (adc_val > ADC_VAL60C))
161 temp = 91 - (adc_val/10);
162 else
163 temp = 112 - (adc_val/6);
164
165 /* Convert temperature in celsius to milli degree celsius */
166 *tp = temp * 1000;
167 return 0;
168}
169
170/**
171 * mid_read_temp - read sensors for temperature
172 * @temp: holds the current temperature for the sensor after reading
173 *
174 * reads the adc_code from the channel and converts it to real
175 * temperature. The converted value is stored in temp.
176 *
177 * Can sleep
178 */
179static int mid_read_temp(struct thermal_zone_device *tzd, unsigned long *temp)
180{
181 struct thermal_device_info *td_info = tzd->devdata;
182 uint16_t adc_val, addr;
183 uint8_t data = 0;
184 int ret;
185 unsigned long curr_temp;
186
187
188 addr = td_info->chnl_addr;
189
190 /* Enable the msic for conversion before reading */
191 ret = intel_scu_ipc_iowrite8(MSIC_THERM_ADC1CNTL3, MSIC_ADCRRDATA_ENBL);
192 if (ret)
193 return ret;
194
195 /* Re-toggle the RRDATARD bit (temporary workaround) */
196 ret = intel_scu_ipc_iowrite8(MSIC_THERM_ADC1CNTL3, MSIC_ADCTHERM_ENBL);
197 if (ret)
198 return ret;
199
200 /* Read the higher bits of data */
201 ret = intel_scu_ipc_ioread8(addr, &data);
202 if (ret)
203 return ret;
204
205 /* Shift bits to accomodate the lower two data bits */
206 adc_val = (data << 2);
207 addr++;
208
209 ret = intel_scu_ipc_ioread8(addr, &data);/* Read lower bits */
210 if (ret)
211 return ret;
212
213 /* Adding lower two bits to the higher bits */
214 data &= 03;
215 adc_val += data;
216
217 /* Convert ADC value to temperature */
218 ret = adc_to_temp(td_info->direct, adc_val, &curr_temp);
219 if (ret == 0)
220 *temp = td_info->curr_temp = curr_temp;
221 return ret;
222}
223
224/**
225 * configure_adc - enables/disables the ADC for conversion
226 * @val: zero: disables the ADC non-zero:enables the ADC
227 *
228 * Enable/Disable the ADC depending on the argument
229 *
230 * Can sleep
231 */
232static int configure_adc(int val)
233{
234 int ret;
235 uint8_t data;
236
237 ret = intel_scu_ipc_ioread8(MSIC_THERM_ADC1CNTL1, &data);
238 if (ret)
239 return ret;
240
241 if (val) {
242 /* Enable and start the ADC */
243 data |= (MSIC_ADC_ENBL | MSIC_ADC_START);
244 } else {
245 /* Just stop the ADC */
246 data &= (~MSIC_ADC_START);
247 }
248
249 return intel_scu_ipc_iowrite8(MSIC_THERM_ADC1CNTL1, data);
250}
251
252/**
253 * set_up_therm_channel - enable thermal channel for conversion
254 * @base_addr: index of free msic ADC channel
255 *
256 * Enable all the three channels for conversion
257 *
258 * Can sleep
259 */
260static int set_up_therm_channel(u16 base_addr)
261{
262 int ret;
263
264 /* Enable all the sensor channels */
265 ret = intel_scu_ipc_iowrite8(base_addr, SKIN_SENSOR0_CODE);
266 if (ret)
267 return ret;
268
269 ret = intel_scu_ipc_iowrite8(base_addr + 1, SKIN_SENSOR1_CODE);
270 if (ret)
271 return ret;
272
273 ret = intel_scu_ipc_iowrite8(base_addr + 2, SYS_SENSOR_CODE);
274 if (ret)
275 return ret;
276
277 /* Since this is the last channel, set the stop bit
278 to 1 by ORing the DIE_SENSOR_CODE with 0x10 */
279 ret = intel_scu_ipc_iowrite8(base_addr + 3,
280 (MSIC_DIE_SENSOR_CODE | 0x10));
281 if (ret)
282 return ret;
283
284 /* Enable ADC and start it */
285 return configure_adc(1);
286}
287
288/**
289 * reset_stopbit - sets the stop bit to 0 on the given channel
290 * @addr: address of the channel
291 *
292 * Can sleep
293 */
294static int reset_stopbit(uint16_t addr)
295{
296 int ret;
297 uint8_t data;
298 ret = intel_scu_ipc_ioread8(addr, &data);
299 if (ret)
300 return ret;
301 /* Set the stop bit to zero */
302 return intel_scu_ipc_iowrite8(addr, (data & 0xEF));
303}
304
305/**
306 * find_free_channel - finds an empty channel for conversion
307 *
308 * If the ADC is not enabled then start using 0th channel
309 * itself. Otherwise find an empty channel by looking for a
310 * channel in which the stopbit is set to 1. returns the index
311 * of the first free channel if succeeds or an error code.
312 *
313 * Context: can sleep
314 *
315 * FIXME: Ultimately the channel allocator will move into the intel_scu_ipc
316 * code.
317 */
318static int find_free_channel(void)
319{
320 int ret;
321 int i;
322 uint8_t data;
323
324 /* check whether ADC is enabled */
325 ret = intel_scu_ipc_ioread8(MSIC_THERM_ADC1CNTL1, &data);
326 if (ret)
327 return ret;
328
329 if ((data & MSIC_ADC_ENBL) == 0)
330 return 0;
331
332 /* ADC is already enabled; Looking for an empty channel */
333 for (i = 0; i < ADC_CHANLS_MAX; i++) {
334 ret = intel_scu_ipc_ioread8(ADC_CHNL_START_ADDR + i, &data);
335 if (ret)
336 return ret;
337
338 if (data & MSIC_STOPBIT_MASK) {
339 ret = i;
340 break;
341 }
342 }
343 return (ret > ADC_LOOP_MAX) ? (-EINVAL) : ret;
344}
345
346/**
347 * mid_initialize_adc - initializing the ADC
348 * @dev: our device structure
349 *
350 * Initialize the ADC for reading thermistor values. Can sleep.
351 */
352static int mid_initialize_adc(struct device *dev)
353{
354 u8 data;
355 u16 base_addr;
356 int ret;
357
358 /*
359 * Ensure that adctherm is disabled before we
360 * initialize the ADC
361 */
362 ret = intel_scu_ipc_ioread8(MSIC_THERM_ADC1CNTL3, &data);
363 if (ret)
364 return ret;
365
366 if (data & MSIC_ADCTHERM_MASK)
367 dev_warn(dev, "ADCTHERM already set");
368
369 /* Index of the first channel in which the stop bit is set */
370 channel_index = find_free_channel();
371 if (channel_index < 0) {
372 dev_err(dev, "No free ADC channels");
373 return channel_index;
374 }
375
376 base_addr = ADC_CHNL_START_ADDR + channel_index;
377
378 if (!(channel_index == 0 || channel_index == ADC_LOOP_MAX)) {
379 /* Reset stop bit for channels other than 0 and 12 */
380 ret = reset_stopbit(base_addr);
381 if (ret)
382 return ret;
383
384 /* Index of the first free channel */
385 base_addr++;
386 channel_index++;
387 }
388
389 ret = set_up_therm_channel(base_addr);
390 if (ret) {
391 dev_err(dev, "unable to enable ADC");
392 return ret;
393 }
394 dev_dbg(dev, "ADC initialization successful");
395 return ret;
396}
397
398/**
399 * initialize_sensor - sets default temp and timer ranges
400 * @index: index of the sensor
401 *
402 * Context: can sleep
403 */
404static struct thermal_device_info *initialize_sensor(int index)
405{
406 struct thermal_device_info *td_info =
407 kzalloc(sizeof(struct thermal_device_info), GFP_KERNEL);
408
409 if (!td_info)
410 return NULL;
411
412 /* Set the base addr of the channel for this sensor */
413 td_info->chnl_addr = ADC_DATA_START_ADDR + 2 * (channel_index + index);
414 /* Sensor 3 is direct conversion */
415 if (index == 3)
416 td_info->direct = 1;
417 return td_info;
418}
419
420/**
421 * mid_thermal_resume - resume routine
422 * @pdev: platform device structure
423 *
424 * mid thermal resume: re-initializes the adc. Can sleep.
425 */
426static int mid_thermal_resume(struct platform_device *pdev)
427{
428 return mid_initialize_adc(&pdev->dev);
429}
430
431/**
432 * mid_thermal_suspend - suspend routine
433 * @pdev: platform device structure
434 *
435 * mid thermal suspend implements the suspend functionality
436 * by stopping the ADC. Can sleep.
437 */
438static int mid_thermal_suspend(struct platform_device *pdev, pm_message_t mesg)
439{
440 /*
441 * This just stops the ADC and does not disable it.
442 * temporary workaround until we have a generic ADC driver.
443 * If 0 is passed, it disables the ADC.
444 */
445 return configure_adc(0);
446}
447
448/**
449 * read_curr_temp - reads the current temperature and stores in temp
450 * @temp: holds the current temperature value after reading
451 *
452 * Can sleep
453 */
454static int read_curr_temp(struct thermal_zone_device *tzd, unsigned long *temp)
455{
456 WARN_ON(tzd == NULL);
457 return mid_read_temp(tzd, temp);
458}
459
460/* Can't be const */
461static struct thermal_zone_device_ops tzd_ops = {
462 .get_temp = read_curr_temp,
463};
464
465
466/**
467 * mid_thermal_probe - mfld thermal initialize
468 * @pdev: platform device structure
469 *
470 * mid thermal probe initializes the hardware and registers
471 * all the sensors with the generic thermal framework. Can sleep.
472 */
473static int mid_thermal_probe(struct platform_device *pdev)
474{
475 static char *name[MSIC_THERMAL_SENSORS] = {
476 "skin0", "skin1", "sys", "msicdie"
477 };
478
479 int ret;
480 int i;
481 struct platform_info *pinfo;
482
483 pinfo = kzalloc(sizeof(struct platform_info), GFP_KERNEL);
484 if (!pinfo)
485 return -ENOMEM;
486
487 /* Initializing the hardware */
488 ret = mid_initialize_adc(&pdev->dev);
489 if (ret) {
490 dev_err(&pdev->dev, "ADC init failed");
491 kfree(pinfo);
492 return ret;
493 }
494
495 /* Register each sensor with the generic thermal framework*/
496 for (i = 0; i < MSIC_THERMAL_SENSORS; i++) {
497 pinfo->tzd[i] = thermal_zone_device_register(name[i],
498 0, initialize_sensor(i),
499 &tzd_ops, 0, 0, 0, 0);
500 if (IS_ERR(pinfo->tzd[i]))
501 goto reg_fail;
502 }
503
504 pinfo->pdev = pdev;
505 platform_set_drvdata(pdev, pinfo);
506 return 0;
507
508reg_fail:
509 ret = PTR_ERR(pinfo->tzd[i]);
510 while (--i >= 0)
511 thermal_zone_device_unregister(pinfo->tzd[i]);
512 configure_adc(0);
513 kfree(pinfo);
514 return ret;
515}
516
517/**
518 * mid_thermal_remove - mfld thermal finalize
519 * @dev: platform device structure
520 *
521 * MLFD thermal remove unregisters all the sensors from the generic
522 * thermal framework. Can sleep.
523 */
524static int mid_thermal_remove(struct platform_device *pdev)
525{
526 int i;
527 struct platform_info *pinfo = platform_get_drvdata(pdev);
528
529 for (i = 0; i < MSIC_THERMAL_SENSORS; i++)
530 thermal_zone_device_unregister(pinfo->tzd[i]);
531
532 platform_set_drvdata(pdev, NULL);
533
534 /* Stop the ADC */
535 return configure_adc(0);
536}
537
538/*********************************************************************
539 * Driver initialisation and finalization
540 *********************************************************************/
541
542#define DRIVER_NAME "msic_sensor"
543
544static const struct platform_device_id therm_id_table[] = {
545 { DRIVER_NAME, 1 },
546 { }
547};
548
549static struct platform_driver mid_thermal_driver = {
550 .driver = {
551 .name = DRIVER_NAME,
552 .owner = THIS_MODULE,
553 },
554 .probe = mid_thermal_probe,
555 .suspend = mid_thermal_suspend,
556 .resume = mid_thermal_resume,
557 .remove = __devexit_p(mid_thermal_remove),
558 .id_table = therm_id_table,
559};
560
561static int __init mid_thermal_module_init(void)
562{
563 return platform_driver_register(&mid_thermal_driver);
564}
565
566static void __exit mid_thermal_module_exit(void)
567{
568 platform_driver_unregister(&mid_thermal_driver);
569}
570
571module_init(mid_thermal_module_init);
572module_exit(mid_thermal_module_exit);
573
574MODULE_AUTHOR("Durgadoss R <durgadoss.r@intel.com>");
575MODULE_DESCRIPTION("Intel Medfield Platform Thermal Driver");
576MODULE_LICENSE("GPL");
diff --git a/drivers/platform/x86/intel_rar_register.c b/drivers/platform/x86/intel_rar_register.c
index 2b11a33325e6..bde47e9080cd 100644
--- a/drivers/platform/x86/intel_rar_register.c
+++ b/drivers/platform/x86/intel_rar_register.c
@@ -485,7 +485,7 @@ EXPORT_SYMBOL(rar_lock);
485 * 485 *
486 * The register_rar function is to used by other device drivers 486 * The register_rar function is to used by other device drivers
487 * to ensure that this driver is ready. As we cannot be sure of 487 * to ensure that this driver is ready. As we cannot be sure of
488 * the compile/execute order of drivers in ther kernel, it is 488 * the compile/execute order of drivers in the kernel, it is
489 * best to give this driver a callback function to call when 489 * best to give this driver a callback function to call when
490 * it is ready to give out addresses. The callback function 490 * it is ready to give out addresses. The callback function
491 * would have those steps that continue the initialization of 491 * would have those steps that continue the initialization of
diff --git a/drivers/platform/x86/intel_scu_ipc.c b/drivers/platform/x86/intel_scu_ipc.c
index a91d510a798b..940accbe28d3 100644
--- a/drivers/platform/x86/intel_scu_ipc.c
+++ b/drivers/platform/x86/intel_scu_ipc.c
@@ -9,7 +9,7 @@
9 * as published by the Free Software Foundation; version 2 9 * as published by the Free Software Foundation; version 2
10 * of the License. 10 * of the License.
11 * 11 *
12 * SCU runing in ARC processor communicates with other entity running in IA 12 * SCU running in ARC processor communicates with other entity running in IA
13 * core through IPC mechanism which in turn messaging between IA core ad SCU. 13 * core through IPC mechanism which in turn messaging between IA core ad SCU.
14 * SCU has two IPC mechanism IPC-1 and IPC-2. IPC-1 is used between IA32 and 14 * SCU has two IPC mechanism IPC-1 and IPC-2. IPC-1 is used between IA32 and
15 * SCU where IPC-2 is used between P-Unit and SCU. This driver delas with 15 * SCU where IPC-2 is used between P-Unit and SCU. This driver delas with
diff --git a/drivers/platform/x86/msi-laptop.c b/drivers/platform/x86/msi-laptop.c
index 142d38579314..23fb2afda00b 100644
--- a/drivers/platform/x86/msi-laptop.c
+++ b/drivers/platform/x86/msi-laptop.c
@@ -51,6 +51,8 @@
51 * laptop as MSI S270. YMMV. 51 * laptop as MSI S270. YMMV.
52 */ 52 */
53 53
54#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
55
54#include <linux/module.h> 56#include <linux/module.h>
55#include <linux/kernel.h> 57#include <linux/kernel.h>
56#include <linux/init.h> 58#include <linux/init.h>
@@ -60,6 +62,8 @@
60#include <linux/platform_device.h> 62#include <linux/platform_device.h>
61#include <linux/rfkill.h> 63#include <linux/rfkill.h>
62#include <linux/i8042.h> 64#include <linux/i8042.h>
65#include <linux/input.h>
66#include <linux/input/sparse-keymap.h>
63 67
64#define MSI_DRIVER_VERSION "0.5" 68#define MSI_DRIVER_VERSION "0.5"
65 69
@@ -78,6 +82,9 @@
78#define MSI_STANDARD_EC_SCM_LOAD_ADDRESS 0x2d 82#define MSI_STANDARD_EC_SCM_LOAD_ADDRESS 0x2d
79#define MSI_STANDARD_EC_SCM_LOAD_MASK (1 << 0) 83#define MSI_STANDARD_EC_SCM_LOAD_MASK (1 << 0)
80 84
85#define MSI_STANDARD_EC_TOUCHPAD_ADDRESS 0xe4
86#define MSI_STANDARD_EC_TOUCHPAD_MASK (1 << 4)
87
81static int msi_laptop_resume(struct platform_device *device); 88static int msi_laptop_resume(struct platform_device *device);
82 89
83#define MSI_STANDARD_EC_DEVICES_EXISTS_ADDRESS 0x2f 90#define MSI_STANDARD_EC_DEVICES_EXISTS_ADDRESS 0x2f
@@ -90,6 +97,14 @@ static int auto_brightness;
90module_param(auto_brightness, int, 0); 97module_param(auto_brightness, int, 0);
91MODULE_PARM_DESC(auto_brightness, "Enable automatic brightness control (0: disabled; 1: enabled; 2: don't touch)"); 98MODULE_PARM_DESC(auto_brightness, "Enable automatic brightness control (0: disabled; 1: enabled; 2: don't touch)");
92 99
100static const struct key_entry msi_laptop_keymap[] = {
101 {KE_KEY, KEY_TOUCHPAD_ON, {KEY_TOUCHPAD_ON} }, /* Touch Pad On */
102 {KE_KEY, KEY_TOUCHPAD_OFF, {KEY_TOUCHPAD_OFF} },/* Touch Pad On */
103 {KE_END, 0}
104};
105
106static struct input_dev *msi_laptop_input_dev;
107
93static bool old_ec_model; 108static bool old_ec_model;
94static int wlan_s, bluetooth_s, threeg_s; 109static int wlan_s, bluetooth_s, threeg_s;
95static int threeg_exists; 110static int threeg_exists;
@@ -432,8 +447,7 @@ static struct platform_device *msipf_device;
432 447
433static int dmi_check_cb(const struct dmi_system_id *id) 448static int dmi_check_cb(const struct dmi_system_id *id)
434{ 449{
435 printk(KERN_INFO "msi-laptop: Identified laptop model '%s'.\n", 450 pr_info("Identified laptop model '%s'.\n", id->ident);
436 id->ident);
437 return 1; 451 return 1;
438} 452}
439 453
@@ -605,6 +619,21 @@ static void msi_update_rfkill(struct work_struct *ignored)
605} 619}
606static DECLARE_DELAYED_WORK(msi_rfkill_work, msi_update_rfkill); 620static DECLARE_DELAYED_WORK(msi_rfkill_work, msi_update_rfkill);
607 621
622static void msi_send_touchpad_key(struct work_struct *ignored)
623{
624 u8 rdata;
625 int result;
626
627 result = ec_read(MSI_STANDARD_EC_TOUCHPAD_ADDRESS, &rdata);
628 if (result < 0)
629 return;
630
631 sparse_keymap_report_event(msi_laptop_input_dev,
632 (rdata & MSI_STANDARD_EC_TOUCHPAD_MASK) ?
633 KEY_TOUCHPAD_ON : KEY_TOUCHPAD_OFF, 1, true);
634}
635static DECLARE_DELAYED_WORK(msi_touchpad_work, msi_send_touchpad_key);
636
608static bool msi_laptop_i8042_filter(unsigned char data, unsigned char str, 637static bool msi_laptop_i8042_filter(unsigned char data, unsigned char str,
609 struct serio *port) 638 struct serio *port)
610{ 639{
@@ -613,12 +642,17 @@ static bool msi_laptop_i8042_filter(unsigned char data, unsigned char str,
613 if (str & 0x20) 642 if (str & 0x20)
614 return false; 643 return false;
615 644
616 /* 0x54 wwan, 0x62 bluetooth, 0x76 wlan*/ 645 /* 0x54 wwan, 0x62 bluetooth, 0x76 wlan, 0xE4 touchpad toggle*/
617 if (unlikely(data == 0xe0)) { 646 if (unlikely(data == 0xe0)) {
618 extended = true; 647 extended = true;
619 return false; 648 return false;
620 } else if (unlikely(extended)) { 649 } else if (unlikely(extended)) {
650 extended = false;
621 switch (data) { 651 switch (data) {
652 case 0xE4:
653 schedule_delayed_work(&msi_touchpad_work,
654 round_jiffies_relative(0.5 * HZ));
655 break;
622 case 0x54: 656 case 0x54:
623 case 0x62: 657 case 0x62:
624 case 0x76: 658 case 0x76:
@@ -626,7 +660,6 @@ static bool msi_laptop_i8042_filter(unsigned char data, unsigned char str,
626 round_jiffies_relative(0.5 * HZ)); 660 round_jiffies_relative(0.5 * HZ));
627 break; 661 break;
628 } 662 }
629 extended = false;
630 } 663 }
631 664
632 return false; 665 return false;
@@ -731,6 +764,42 @@ static int msi_laptop_resume(struct platform_device *device)
731 return 0; 764 return 0;
732} 765}
733 766
767static int __init msi_laptop_input_setup(void)
768{
769 int err;
770
771 msi_laptop_input_dev = input_allocate_device();
772 if (!msi_laptop_input_dev)
773 return -ENOMEM;
774
775 msi_laptop_input_dev->name = "MSI Laptop hotkeys";
776 msi_laptop_input_dev->phys = "msi-laptop/input0";
777 msi_laptop_input_dev->id.bustype = BUS_HOST;
778
779 err = sparse_keymap_setup(msi_laptop_input_dev,
780 msi_laptop_keymap, NULL);
781 if (err)
782 goto err_free_dev;
783
784 err = input_register_device(msi_laptop_input_dev);
785 if (err)
786 goto err_free_keymap;
787
788 return 0;
789
790err_free_keymap:
791 sparse_keymap_free(msi_laptop_input_dev);
792err_free_dev:
793 input_free_device(msi_laptop_input_dev);
794 return err;
795}
796
797static void msi_laptop_input_destroy(void)
798{
799 sparse_keymap_free(msi_laptop_input_dev);
800 input_unregister_device(msi_laptop_input_dev);
801}
802
734static int load_scm_model_init(struct platform_device *sdev) 803static int load_scm_model_init(struct platform_device *sdev)
735{ 804{
736 u8 data; 805 u8 data;
@@ -759,16 +828,23 @@ static int load_scm_model_init(struct platform_device *sdev)
759 if (result < 0) 828 if (result < 0)
760 goto fail_rfkill; 829 goto fail_rfkill;
761 830
831 /* setup input device */
832 result = msi_laptop_input_setup();
833 if (result)
834 goto fail_input;
835
762 result = i8042_install_filter(msi_laptop_i8042_filter); 836 result = i8042_install_filter(msi_laptop_i8042_filter);
763 if (result) { 837 if (result) {
764 printk(KERN_ERR 838 pr_err("Unable to install key filter\n");
765 "msi-laptop: Unable to install key filter\n");
766 goto fail_filter; 839 goto fail_filter;
767 } 840 }
768 841
769 return 0; 842 return 0;
770 843
771fail_filter: 844fail_filter:
845 msi_laptop_input_destroy();
846
847fail_input:
772 rfkill_cleanup(); 848 rfkill_cleanup();
773 849
774fail_rfkill: 850fail_rfkill:
@@ -799,7 +875,7 @@ static int __init msi_init(void)
799 /* Register backlight stuff */ 875 /* Register backlight stuff */
800 876
801 if (acpi_video_backlight_support()) { 877 if (acpi_video_backlight_support()) {
802 printk(KERN_INFO "MSI: Brightness ignored, must be controlled " 878 pr_info("Brightness ignored, must be controlled "
803 "by ACPI video driver\n"); 879 "by ACPI video driver\n");
804 } else { 880 } else {
805 struct backlight_properties props; 881 struct backlight_properties props;
@@ -854,7 +930,7 @@ static int __init msi_init(void)
854 if (auto_brightness != 2) 930 if (auto_brightness != 2)
855 set_auto_brightness(auto_brightness); 931 set_auto_brightness(auto_brightness);
856 932
857 printk(KERN_INFO "msi-laptop: driver "MSI_DRIVER_VERSION" successfully loaded.\n"); 933 pr_info("driver "MSI_DRIVER_VERSION" successfully loaded.\n");
858 934
859 return 0; 935 return 0;
860 936
@@ -886,6 +962,7 @@ static void __exit msi_cleanup(void)
886{ 962{
887 if (load_scm_model) { 963 if (load_scm_model) {
888 i8042_remove_filter(msi_laptop_i8042_filter); 964 i8042_remove_filter(msi_laptop_i8042_filter);
965 msi_laptop_input_destroy();
889 cancel_delayed_work_sync(&msi_rfkill_work); 966 cancel_delayed_work_sync(&msi_rfkill_work);
890 rfkill_cleanup(); 967 rfkill_cleanup();
891 } 968 }
@@ -901,7 +978,7 @@ static void __exit msi_cleanup(void)
901 if (auto_brightness != 2) 978 if (auto_brightness != 2)
902 set_auto_brightness(1); 979 set_auto_brightness(1);
903 980
904 printk(KERN_INFO "msi-laptop: driver unloaded.\n"); 981 pr_info("driver unloaded.\n");
905} 982}
906 983
907module_init(msi_init); 984module_init(msi_init);
diff --git a/drivers/platform/x86/samsung-laptop.c b/drivers/platform/x86/samsung-laptop.c
new file mode 100644
index 000000000000..de434c6dc2d6
--- /dev/null
+++ b/drivers/platform/x86/samsung-laptop.c
@@ -0,0 +1,832 @@
1/*
2 * Samsung Laptop driver
3 *
4 * Copyright (C) 2009,2011 Greg Kroah-Hartman (gregkh@suse.de)
5 * Copyright (C) 2009,2011 Novell Inc.
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms of the GNU General Public License version 2 as published by
9 * the Free Software Foundation.
10 *
11 */
12#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
13
14#include <linux/kernel.h>
15#include <linux/init.h>
16#include <linux/module.h>
17#include <linux/delay.h>
18#include <linux/pci.h>
19#include <linux/backlight.h>
20#include <linux/fb.h>
21#include <linux/dmi.h>
22#include <linux/platform_device.h>
23#include <linux/rfkill.h>
24
25/*
26 * This driver is needed because a number of Samsung laptops do not hook
27 * their control settings through ACPI. So we have to poke around in the
28 * BIOS to do things like brightness values, and "special" key controls.
29 */
30
31/*
32 * We have 0 - 8 as valid brightness levels. The specs say that level 0 should
33 * be reserved by the BIOS (which really doesn't make much sense), we tell
34 * userspace that the value is 0 - 7 and then just tell the hardware 1 - 8
35 */
36#define MAX_BRIGHT 0x07
37
38
39#define SABI_IFACE_MAIN 0x00
40#define SABI_IFACE_SUB 0x02
41#define SABI_IFACE_COMPLETE 0x04
42#define SABI_IFACE_DATA 0x05
43
44/* Structure to get data back to the calling function */
45struct sabi_retval {
46 u8 retval[20];
47};
48
49struct sabi_header_offsets {
50 u8 port;
51 u8 re_mem;
52 u8 iface_func;
53 u8 en_mem;
54 u8 data_offset;
55 u8 data_segment;
56};
57
58struct sabi_commands {
59 /*
60 * Brightness is 0 - 8, as described above.
61 * Value 0 is for the BIOS to use
62 */
63 u8 get_brightness;
64 u8 set_brightness;
65
66 /*
67 * first byte:
68 * 0x00 - wireless is off
69 * 0x01 - wireless is on
70 * second byte:
71 * 0x02 - 3G is off
72 * 0x03 - 3G is on
73 * TODO, verify 3G is correct, that doesn't seem right...
74 */
75 u8 get_wireless_button;
76 u8 set_wireless_button;
77
78 /* 0 is off, 1 is on */
79 u8 get_backlight;
80 u8 set_backlight;
81
82 /*
83 * 0x80 or 0x00 - no action
84 * 0x81 - recovery key pressed
85 */
86 u8 get_recovery_mode;
87 u8 set_recovery_mode;
88
89 /*
90 * on seclinux: 0 is low, 1 is high,
91 * on swsmi: 0 is normal, 1 is silent, 2 is turbo
92 */
93 u8 get_performance_level;
94 u8 set_performance_level;
95
96 /*
97 * Tell the BIOS that Linux is running on this machine.
98 * 81 is on, 80 is off
99 */
100 u8 set_linux;
101};
102
103struct sabi_performance_level {
104 const char *name;
105 u8 value;
106};
107
108struct sabi_config {
109 const char *test_string;
110 u16 main_function;
111 const struct sabi_header_offsets header_offsets;
112 const struct sabi_commands commands;
113 const struct sabi_performance_level performance_levels[4];
114 u8 min_brightness;
115 u8 max_brightness;
116};
117
118static const struct sabi_config sabi_configs[] = {
119 {
120 .test_string = "SECLINUX",
121
122 .main_function = 0x4c49,
123
124 .header_offsets = {
125 .port = 0x00,
126 .re_mem = 0x02,
127 .iface_func = 0x03,
128 .en_mem = 0x04,
129 .data_offset = 0x05,
130 .data_segment = 0x07,
131 },
132
133 .commands = {
134 .get_brightness = 0x00,
135 .set_brightness = 0x01,
136
137 .get_wireless_button = 0x02,
138 .set_wireless_button = 0x03,
139
140 .get_backlight = 0x04,
141 .set_backlight = 0x05,
142
143 .get_recovery_mode = 0x06,
144 .set_recovery_mode = 0x07,
145
146 .get_performance_level = 0x08,
147 .set_performance_level = 0x09,
148
149 .set_linux = 0x0a,
150 },
151
152 .performance_levels = {
153 {
154 .name = "silent",
155 .value = 0,
156 },
157 {
158 .name = "normal",
159 .value = 1,
160 },
161 { },
162 },
163 .min_brightness = 1,
164 .max_brightness = 8,
165 },
166 {
167 .test_string = "SwSmi@",
168
169 .main_function = 0x5843,
170
171 .header_offsets = {
172 .port = 0x00,
173 .re_mem = 0x04,
174 .iface_func = 0x02,
175 .en_mem = 0x03,
176 .data_offset = 0x05,
177 .data_segment = 0x07,
178 },
179
180 .commands = {
181 .get_brightness = 0x10,
182 .set_brightness = 0x11,
183
184 .get_wireless_button = 0x12,
185 .set_wireless_button = 0x13,
186
187 .get_backlight = 0x2d,
188 .set_backlight = 0x2e,
189
190 .get_recovery_mode = 0xff,
191 .set_recovery_mode = 0xff,
192
193 .get_performance_level = 0x31,
194 .set_performance_level = 0x32,
195
196 .set_linux = 0xff,
197 },
198
199 .performance_levels = {
200 {
201 .name = "normal",
202 .value = 0,
203 },
204 {
205 .name = "silent",
206 .value = 1,
207 },
208 {
209 .name = "overclock",
210 .value = 2,
211 },
212 { },
213 },
214 .min_brightness = 0,
215 .max_brightness = 8,
216 },
217 { },
218};
219
220static const struct sabi_config *sabi_config;
221
222static void __iomem *sabi;
223static void __iomem *sabi_iface;
224static void __iomem *f0000_segment;
225static struct backlight_device *backlight_device;
226static struct mutex sabi_mutex;
227static struct platform_device *sdev;
228static struct rfkill *rfk;
229
230static int force;
231module_param(force, bool, 0);
232MODULE_PARM_DESC(force,
233 "Disable the DMI check and forces the driver to be loaded");
234
235static int debug;
236module_param(debug, bool, S_IRUGO | S_IWUSR);
237MODULE_PARM_DESC(debug, "Debug enabled or not");
238
239static int sabi_get_command(u8 command, struct sabi_retval *sretval)
240{
241 int retval = 0;
242 u16 port = readw(sabi + sabi_config->header_offsets.port);
243 u8 complete, iface_data;
244
245 mutex_lock(&sabi_mutex);
246
247 /* enable memory to be able to write to it */
248 outb(readb(sabi + sabi_config->header_offsets.en_mem), port);
249
250 /* write out the command */
251 writew(sabi_config->main_function, sabi_iface + SABI_IFACE_MAIN);
252 writew(command, sabi_iface + SABI_IFACE_SUB);
253 writeb(0, sabi_iface + SABI_IFACE_COMPLETE);
254 outb(readb(sabi + sabi_config->header_offsets.iface_func), port);
255
256 /* write protect memory to make it safe */
257 outb(readb(sabi + sabi_config->header_offsets.re_mem), port);
258
259 /* see if the command actually succeeded */
260 complete = readb(sabi_iface + SABI_IFACE_COMPLETE);
261 iface_data = readb(sabi_iface + SABI_IFACE_DATA);
262 if (complete != 0xaa || iface_data == 0xff) {
263 pr_warn("SABI get command 0x%02x failed with completion flag 0x%02x and data 0x%02x\n",
264 command, complete, iface_data);
265 retval = -EINVAL;
266 goto exit;
267 }
268 /*
269 * Save off the data into a structure so the caller use it.
270 * Right now we only want the first 4 bytes,
271 * There are commands that need more, but not for the ones we
272 * currently care about.
273 */
274 sretval->retval[0] = readb(sabi_iface + SABI_IFACE_DATA);
275 sretval->retval[1] = readb(sabi_iface + SABI_IFACE_DATA + 1);
276 sretval->retval[2] = readb(sabi_iface + SABI_IFACE_DATA + 2);
277 sretval->retval[3] = readb(sabi_iface + SABI_IFACE_DATA + 3);
278
279exit:
280 mutex_unlock(&sabi_mutex);
281 return retval;
282
283}
284
285static int sabi_set_command(u8 command, u8 data)
286{
287 int retval = 0;
288 u16 port = readw(sabi + sabi_config->header_offsets.port);
289 u8 complete, iface_data;
290
291 mutex_lock(&sabi_mutex);
292
293 /* enable memory to be able to write to it */
294 outb(readb(sabi + sabi_config->header_offsets.en_mem), port);
295
296 /* write out the command */
297 writew(sabi_config->main_function, sabi_iface + SABI_IFACE_MAIN);
298 writew(command, sabi_iface + SABI_IFACE_SUB);
299 writeb(0, sabi_iface + SABI_IFACE_COMPLETE);
300 writeb(data, sabi_iface + SABI_IFACE_DATA);
301 outb(readb(sabi + sabi_config->header_offsets.iface_func), port);
302
303 /* write protect memory to make it safe */
304 outb(readb(sabi + sabi_config->header_offsets.re_mem), port);
305
306 /* see if the command actually succeeded */
307 complete = readb(sabi_iface + SABI_IFACE_COMPLETE);
308 iface_data = readb(sabi_iface + SABI_IFACE_DATA);
309 if (complete != 0xaa || iface_data == 0xff) {
310 pr_warn("SABI set command 0x%02x failed with completion flag 0x%02x and data 0x%02x\n",
311 command, complete, iface_data);
312 retval = -EINVAL;
313 }
314
315 mutex_unlock(&sabi_mutex);
316 return retval;
317}
318
319static void test_backlight(void)
320{
321 struct sabi_retval sretval;
322
323 sabi_get_command(sabi_config->commands.get_backlight, &sretval);
324 printk(KERN_DEBUG "backlight = 0x%02x\n", sretval.retval[0]);
325
326 sabi_set_command(sabi_config->commands.set_backlight, 0);
327 printk(KERN_DEBUG "backlight should be off\n");
328
329 sabi_get_command(sabi_config->commands.get_backlight, &sretval);
330 printk(KERN_DEBUG "backlight = 0x%02x\n", sretval.retval[0]);
331
332 msleep(1000);
333
334 sabi_set_command(sabi_config->commands.set_backlight, 1);
335 printk(KERN_DEBUG "backlight should be on\n");
336
337 sabi_get_command(sabi_config->commands.get_backlight, &sretval);
338 printk(KERN_DEBUG "backlight = 0x%02x\n", sretval.retval[0]);
339}
340
341static void test_wireless(void)
342{
343 struct sabi_retval sretval;
344
345 sabi_get_command(sabi_config->commands.get_wireless_button, &sretval);
346 printk(KERN_DEBUG "wireless led = 0x%02x\n", sretval.retval[0]);
347
348 sabi_set_command(sabi_config->commands.set_wireless_button, 0);
349 printk(KERN_DEBUG "wireless led should be off\n");
350
351 sabi_get_command(sabi_config->commands.get_wireless_button, &sretval);
352 printk(KERN_DEBUG "wireless led = 0x%02x\n", sretval.retval[0]);
353
354 msleep(1000);
355
356 sabi_set_command(sabi_config->commands.set_wireless_button, 1);
357 printk(KERN_DEBUG "wireless led should be on\n");
358
359 sabi_get_command(sabi_config->commands.get_wireless_button, &sretval);
360 printk(KERN_DEBUG "wireless led = 0x%02x\n", sretval.retval[0]);
361}
362
363static u8 read_brightness(void)
364{
365 struct sabi_retval sretval;
366 int user_brightness = 0;
367 int retval;
368
369 retval = sabi_get_command(sabi_config->commands.get_brightness,
370 &sretval);
371 if (!retval) {
372 user_brightness = sretval.retval[0];
373 if (user_brightness != 0)
374 user_brightness -= sabi_config->min_brightness;
375 }
376 return user_brightness;
377}
378
379static void set_brightness(u8 user_brightness)
380{
381 u8 user_level = user_brightness - sabi_config->min_brightness;
382
383 sabi_set_command(sabi_config->commands.set_brightness, user_level);
384}
385
386static int get_brightness(struct backlight_device *bd)
387{
388 return (int)read_brightness();
389}
390
391static int update_status(struct backlight_device *bd)
392{
393 set_brightness(bd->props.brightness);
394
395 if (bd->props.power == FB_BLANK_UNBLANK)
396 sabi_set_command(sabi_config->commands.set_backlight, 1);
397 else
398 sabi_set_command(sabi_config->commands.set_backlight, 0);
399 return 0;
400}
401
402static const struct backlight_ops backlight_ops = {
403 .get_brightness = get_brightness,
404 .update_status = update_status,
405};
406
407static int rfkill_set(void *data, bool blocked)
408{
409 /* Do something with blocked...*/
410 /*
411 * blocked == false is on
412 * blocked == true is off
413 */
414 if (blocked)
415 sabi_set_command(sabi_config->commands.set_wireless_button, 0);
416 else
417 sabi_set_command(sabi_config->commands.set_wireless_button, 1);
418
419 return 0;
420}
421
422static struct rfkill_ops rfkill_ops = {
423 .set_block = rfkill_set,
424};
425
426static int init_wireless(struct platform_device *sdev)
427{
428 int retval;
429
430 rfk = rfkill_alloc("samsung-wifi", &sdev->dev, RFKILL_TYPE_WLAN,
431 &rfkill_ops, NULL);
432 if (!rfk)
433 return -ENOMEM;
434
435 retval = rfkill_register(rfk);
436 if (retval) {
437 rfkill_destroy(rfk);
438 return -ENODEV;
439 }
440
441 return 0;
442}
443
444static void destroy_wireless(void)
445{
446 rfkill_unregister(rfk);
447 rfkill_destroy(rfk);
448}
449
450static ssize_t get_performance_level(struct device *dev,
451 struct device_attribute *attr, char *buf)
452{
453 struct sabi_retval sretval;
454 int retval;
455 int i;
456
457 /* Read the state */
458 retval = sabi_get_command(sabi_config->commands.get_performance_level,
459 &sretval);
460 if (retval)
461 return retval;
462
463 /* The logic is backwards, yeah, lots of fun... */
464 for (i = 0; sabi_config->performance_levels[i].name; ++i) {
465 if (sretval.retval[0] == sabi_config->performance_levels[i].value)
466 return sprintf(buf, "%s\n", sabi_config->performance_levels[i].name);
467 }
468 return sprintf(buf, "%s\n", "unknown");
469}
470
471static ssize_t set_performance_level(struct device *dev,
472 struct device_attribute *attr, const char *buf,
473 size_t count)
474{
475 if (count >= 1) {
476 int i;
477 for (i = 0; sabi_config->performance_levels[i].name; ++i) {
478 const struct sabi_performance_level *level =
479 &sabi_config->performance_levels[i];
480 if (!strncasecmp(level->name, buf, strlen(level->name))) {
481 sabi_set_command(sabi_config->commands.set_performance_level,
482 level->value);
483 break;
484 }
485 }
486 if (!sabi_config->performance_levels[i].name)
487 return -EINVAL;
488 }
489 return count;
490}
491static DEVICE_ATTR(performance_level, S_IWUSR | S_IRUGO,
492 get_performance_level, set_performance_level);
493
494
495static int __init dmi_check_cb(const struct dmi_system_id *id)
496{
497 pr_info("found laptop model '%s'\n",
498 id->ident);
499 return 1;
500}
501
502static struct dmi_system_id __initdata samsung_dmi_table[] = {
503 {
504 .ident = "N128",
505 .matches = {
506 DMI_MATCH(DMI_SYS_VENDOR,
507 "SAMSUNG ELECTRONICS CO., LTD."),
508 DMI_MATCH(DMI_PRODUCT_NAME, "N128"),
509 DMI_MATCH(DMI_BOARD_NAME, "N128"),
510 },
511 .callback = dmi_check_cb,
512 },
513 {
514 .ident = "N130",
515 .matches = {
516 DMI_MATCH(DMI_SYS_VENDOR,
517 "SAMSUNG ELECTRONICS CO., LTD."),
518 DMI_MATCH(DMI_PRODUCT_NAME, "N130"),
519 DMI_MATCH(DMI_BOARD_NAME, "N130"),
520 },
521 .callback = dmi_check_cb,
522 },
523 {
524 .ident = "X125",
525 .matches = {
526 DMI_MATCH(DMI_SYS_VENDOR,
527 "SAMSUNG ELECTRONICS CO., LTD."),
528 DMI_MATCH(DMI_PRODUCT_NAME, "X125"),
529 DMI_MATCH(DMI_BOARD_NAME, "X125"),
530 },
531 .callback = dmi_check_cb,
532 },
533 {
534 .ident = "X120/X170",
535 .matches = {
536 DMI_MATCH(DMI_SYS_VENDOR,
537 "SAMSUNG ELECTRONICS CO., LTD."),
538 DMI_MATCH(DMI_PRODUCT_NAME, "X120/X170"),
539 DMI_MATCH(DMI_BOARD_NAME, "X120/X170"),
540 },
541 .callback = dmi_check_cb,
542 },
543 {
544 .ident = "NC10",
545 .matches = {
546 DMI_MATCH(DMI_SYS_VENDOR,
547 "SAMSUNG ELECTRONICS CO., LTD."),
548 DMI_MATCH(DMI_PRODUCT_NAME, "NC10"),
549 DMI_MATCH(DMI_BOARD_NAME, "NC10"),
550 },
551 .callback = dmi_check_cb,
552 },
553 {
554 .ident = "NP-Q45",
555 .matches = {
556 DMI_MATCH(DMI_SYS_VENDOR,
557 "SAMSUNG ELECTRONICS CO., LTD."),
558 DMI_MATCH(DMI_PRODUCT_NAME, "SQ45S70S"),
559 DMI_MATCH(DMI_BOARD_NAME, "SQ45S70S"),
560 },
561 .callback = dmi_check_cb,
562 },
563 {
564 .ident = "X360",
565 .matches = {
566 DMI_MATCH(DMI_SYS_VENDOR,
567 "SAMSUNG ELECTRONICS CO., LTD."),
568 DMI_MATCH(DMI_PRODUCT_NAME, "X360"),
569 DMI_MATCH(DMI_BOARD_NAME, "X360"),
570 },
571 .callback = dmi_check_cb,
572 },
573 {
574 .ident = "R518",
575 .matches = {
576 DMI_MATCH(DMI_SYS_VENDOR,
577 "SAMSUNG ELECTRONICS CO., LTD."),
578 DMI_MATCH(DMI_PRODUCT_NAME, "R518"),
579 DMI_MATCH(DMI_BOARD_NAME, "R518"),
580 },
581 .callback = dmi_check_cb,
582 },
583 {
584 .ident = "R519/R719",
585 .matches = {
586 DMI_MATCH(DMI_SYS_VENDOR,
587 "SAMSUNG ELECTRONICS CO., LTD."),
588 DMI_MATCH(DMI_PRODUCT_NAME, "R519/R719"),
589 DMI_MATCH(DMI_BOARD_NAME, "R519/R719"),
590 },
591 .callback = dmi_check_cb,
592 },
593 {
594 .ident = "N150/N210/N220",
595 .matches = {
596 DMI_MATCH(DMI_SYS_VENDOR,
597 "SAMSUNG ELECTRONICS CO., LTD."),
598 DMI_MATCH(DMI_PRODUCT_NAME, "N150/N210/N220"),
599 DMI_MATCH(DMI_BOARD_NAME, "N150/N210/N220"),
600 },
601 .callback = dmi_check_cb,
602 },
603 {
604 .ident = "N150P/N210P/N220P",
605 .matches = {
606 DMI_MATCH(DMI_SYS_VENDOR,
607 "SAMSUNG ELECTRONICS CO., LTD."),
608 DMI_MATCH(DMI_PRODUCT_NAME, "N150P/N210P/N220P"),
609 DMI_MATCH(DMI_BOARD_NAME, "N150P/N210P/N220P"),
610 },
611 .callback = dmi_check_cb,
612 },
613 {
614 .ident = "R530/R730",
615 .matches = {
616 DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."),
617 DMI_MATCH(DMI_PRODUCT_NAME, "R530/R730"),
618 DMI_MATCH(DMI_BOARD_NAME, "R530/R730"),
619 },
620 .callback = dmi_check_cb,
621 },
622 {
623 .ident = "NF110/NF210/NF310",
624 .matches = {
625 DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."),
626 DMI_MATCH(DMI_PRODUCT_NAME, "NF110/NF210/NF310"),
627 DMI_MATCH(DMI_BOARD_NAME, "NF110/NF210/NF310"),
628 },
629 .callback = dmi_check_cb,
630 },
631 {
632 .ident = "N145P/N250P/N260P",
633 .matches = {
634 DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."),
635 DMI_MATCH(DMI_PRODUCT_NAME, "N145P/N250P/N260P"),
636 DMI_MATCH(DMI_BOARD_NAME, "N145P/N250P/N260P"),
637 },
638 .callback = dmi_check_cb,
639 },
640 {
641 .ident = "R70/R71",
642 .matches = {
643 DMI_MATCH(DMI_SYS_VENDOR,
644 "SAMSUNG ELECTRONICS CO., LTD."),
645 DMI_MATCH(DMI_PRODUCT_NAME, "R70/R71"),
646 DMI_MATCH(DMI_BOARD_NAME, "R70/R71"),
647 },
648 .callback = dmi_check_cb,
649 },
650 {
651 .ident = "P460",
652 .matches = {
653 DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."),
654 DMI_MATCH(DMI_PRODUCT_NAME, "P460"),
655 DMI_MATCH(DMI_BOARD_NAME, "P460"),
656 },
657 .callback = dmi_check_cb,
658 },
659 { },
660};
661MODULE_DEVICE_TABLE(dmi, samsung_dmi_table);
662
663static int find_signature(void __iomem *memcheck, const char *testStr)
664{
665 int i = 0;
666 int loca;
667
668 for (loca = 0; loca < 0xffff; loca++) {
669 char temp = readb(memcheck + loca);
670
671 if (temp == testStr[i]) {
672 if (i == strlen(testStr)-1)
673 break;
674 ++i;
675 } else {
676 i = 0;
677 }
678 }
679 return loca;
680}
681
682static int __init samsung_init(void)
683{
684 struct backlight_properties props;
685 struct sabi_retval sretval;
686 unsigned int ifaceP;
687 int i;
688 int loca;
689 int retval;
690
691 mutex_init(&sabi_mutex);
692
693 if (!force && !dmi_check_system(samsung_dmi_table))
694 return -ENODEV;
695
696 f0000_segment = ioremap_nocache(0xf0000, 0xffff);
697 if (!f0000_segment) {
698 pr_err("Can't map the segment at 0xf0000\n");
699 return -EINVAL;
700 }
701
702 /* Try to find one of the signatures in memory to find the header */
703 for (i = 0; sabi_configs[i].test_string != 0; ++i) {
704 sabi_config = &sabi_configs[i];
705 loca = find_signature(f0000_segment, sabi_config->test_string);
706 if (loca != 0xffff)
707 break;
708 }
709
710 if (loca == 0xffff) {
711 pr_err("This computer does not support SABI\n");
712 goto error_no_signature;
713 }
714
715 /* point to the SMI port Number */
716 loca += 1;
717 sabi = (f0000_segment + loca);
718
719 if (debug) {
720 printk(KERN_DEBUG "This computer supports SABI==%x\n",
721 loca + 0xf0000 - 6);
722 printk(KERN_DEBUG "SABI header:\n");
723 printk(KERN_DEBUG " SMI Port Number = 0x%04x\n",
724 readw(sabi + sabi_config->header_offsets.port));
725 printk(KERN_DEBUG " SMI Interface Function = 0x%02x\n",
726 readb(sabi + sabi_config->header_offsets.iface_func));
727 printk(KERN_DEBUG " SMI enable memory buffer = 0x%02x\n",
728 readb(sabi + sabi_config->header_offsets.en_mem));
729 printk(KERN_DEBUG " SMI restore memory buffer = 0x%02x\n",
730 readb(sabi + sabi_config->header_offsets.re_mem));
731 printk(KERN_DEBUG " SABI data offset = 0x%04x\n",
732 readw(sabi + sabi_config->header_offsets.data_offset));
733 printk(KERN_DEBUG " SABI data segment = 0x%04x\n",
734 readw(sabi + sabi_config->header_offsets.data_segment));
735 }
736
737 /* Get a pointer to the SABI Interface */
738 ifaceP = (readw(sabi + sabi_config->header_offsets.data_segment) & 0x0ffff) << 4;
739 ifaceP += readw(sabi + sabi_config->header_offsets.data_offset) & 0x0ffff;
740 sabi_iface = ioremap_nocache(ifaceP, 16);
741 if (!sabi_iface) {
742 pr_err("Can't remap %x\n", ifaceP);
743 goto exit;
744 }
745 if (debug) {
746 printk(KERN_DEBUG "ifaceP = 0x%08x\n", ifaceP);
747 printk(KERN_DEBUG "sabi_iface = %p\n", sabi_iface);
748
749 test_backlight();
750 test_wireless();
751
752 retval = sabi_get_command(sabi_config->commands.get_brightness,
753 &sretval);
754 printk(KERN_DEBUG "brightness = 0x%02x\n", sretval.retval[0]);
755 }
756
757 /* Turn on "Linux" mode in the BIOS */
758 if (sabi_config->commands.set_linux != 0xff) {
759 retval = sabi_set_command(sabi_config->commands.set_linux,
760 0x81);
761 if (retval) {
762 pr_warn("Linux mode was not set!\n");
763 goto error_no_platform;
764 }
765 }
766
767 /* knock up a platform device to hang stuff off of */
768 sdev = platform_device_register_simple("samsung", -1, NULL, 0);
769 if (IS_ERR(sdev))
770 goto error_no_platform;
771
772 /* create a backlight device to talk to this one */
773 memset(&props, 0, sizeof(struct backlight_properties));
774 props.max_brightness = sabi_config->max_brightness;
775 backlight_device = backlight_device_register("samsung", &sdev->dev,
776 NULL, &backlight_ops,
777 &props);
778 if (IS_ERR(backlight_device))
779 goto error_no_backlight;
780
781 backlight_device->props.brightness = read_brightness();
782 backlight_device->props.power = FB_BLANK_UNBLANK;
783 backlight_update_status(backlight_device);
784
785 retval = init_wireless(sdev);
786 if (retval)
787 goto error_no_rfk;
788
789 retval = device_create_file(&sdev->dev, &dev_attr_performance_level);
790 if (retval)
791 goto error_file_create;
792
793exit:
794 return 0;
795
796error_file_create:
797 destroy_wireless();
798
799error_no_rfk:
800 backlight_device_unregister(backlight_device);
801
802error_no_backlight:
803 platform_device_unregister(sdev);
804
805error_no_platform:
806 iounmap(sabi_iface);
807
808error_no_signature:
809 iounmap(f0000_segment);
810 return -EINVAL;
811}
812
813static void __exit samsung_exit(void)
814{
815 /* Turn off "Linux" mode in the BIOS */
816 if (sabi_config->commands.set_linux != 0xff)
817 sabi_set_command(sabi_config->commands.set_linux, 0x80);
818
819 device_remove_file(&sdev->dev, &dev_attr_performance_level);
820 backlight_device_unregister(backlight_device);
821 destroy_wireless();
822 iounmap(sabi_iface);
823 iounmap(f0000_segment);
824 platform_device_unregister(sdev);
825}
826
827module_init(samsung_init);
828module_exit(samsung_exit);
829
830MODULE_AUTHOR("Greg Kroah-Hartman <gregkh@suse.de>");
831MODULE_DESCRIPTION("Samsung Backlight driver");
832MODULE_LICENSE("GPL");
diff --git a/drivers/platform/x86/sony-laptop.c b/drivers/platform/x86/sony-laptop.c
index 13d8d63bcca9..e642f5f29504 100644
--- a/drivers/platform/x86/sony-laptop.c
+++ b/drivers/platform/x86/sony-laptop.c
@@ -71,8 +71,9 @@
71#endif 71#endif
72 72
73#define DRV_PFX "sony-laptop: " 73#define DRV_PFX "sony-laptop: "
74#define dprintk(msg...) do { \ 74#define dprintk(msg...) do { \
75 if (debug) printk(KERN_WARNING DRV_PFX msg); \ 75 if (debug) \
76 pr_warn(DRV_PFX msg); \
76} while (0) 77} while (0)
77 78
78#define SONY_LAPTOP_DRIVER_VERSION "0.6" 79#define SONY_LAPTOP_DRIVER_VERSION "0.6"
@@ -124,6 +125,19 @@ MODULE_PARM_DESC(minor,
124 "default is -1 (automatic)"); 125 "default is -1 (automatic)");
125#endif 126#endif
126 127
128static int kbd_backlight; /* = 1 */
129module_param(kbd_backlight, int, 0444);
130MODULE_PARM_DESC(kbd_backlight,
131 "set this to 0 to disable keyboard backlight, "
132 "1 to enable it (default: 0)");
133
134static int kbd_backlight_timeout; /* = 0 */
135module_param(kbd_backlight_timeout, int, 0444);
136MODULE_PARM_DESC(kbd_backlight_timeout,
137 "set this to 0 to set the default 10 seconds timeout, "
138 "1 for 30 seconds, 2 for 60 seconds and 3 to disable timeout "
139 "(default: 0)");
140
127enum sony_nc_rfkill { 141enum sony_nc_rfkill {
128 SONY_WIFI, 142 SONY_WIFI,
129 SONY_BLUETOOTH, 143 SONY_BLUETOOTH,
@@ -402,7 +416,7 @@ static int sony_laptop_setup_input(struct acpi_device *acpi_device)
402 error = kfifo_alloc(&sony_laptop_input.fifo, 416 error = kfifo_alloc(&sony_laptop_input.fifo,
403 SONY_LAPTOP_BUF_SIZE, GFP_KERNEL); 417 SONY_LAPTOP_BUF_SIZE, GFP_KERNEL);
404 if (error) { 418 if (error) {
405 printk(KERN_ERR DRV_PFX "kfifo_alloc failed\n"); 419 pr_err(DRV_PFX "kfifo_alloc failed\n");
406 goto err_dec_users; 420 goto err_dec_users;
407 } 421 }
408 422
@@ -591,7 +605,7 @@ struct sony_nc_value {
591 int value; /* current setting */ 605 int value; /* current setting */
592 int valid; /* Has ever been set */ 606 int valid; /* Has ever been set */
593 int debug; /* active only in debug mode ? */ 607 int debug; /* active only in debug mode ? */
594 struct device_attribute devattr; /* sysfs atribute */ 608 struct device_attribute devattr; /* sysfs attribute */
595}; 609};
596 610
597#define SNC_HANDLE_NAMES(_name, _values...) \ 611#define SNC_HANDLE_NAMES(_name, _values...) \
@@ -686,7 +700,7 @@ static int acpi_callgetfunc(acpi_handle handle, char *name, int *result)
686 return 0; 700 return 0;
687 } 701 }
688 702
689 printk(KERN_WARNING DRV_PFX "acpi_callreadfunc failed\n"); 703 pr_warn(DRV_PFX "acpi_callreadfunc failed\n");
690 704
691 return -1; 705 return -1;
692} 706}
@@ -712,7 +726,7 @@ static int acpi_callsetfunc(acpi_handle handle, char *name, int value,
712 if (status == AE_OK) { 726 if (status == AE_OK) {
713 if (result != NULL) { 727 if (result != NULL) {
714 if (out_obj.type != ACPI_TYPE_INTEGER) { 728 if (out_obj.type != ACPI_TYPE_INTEGER) {
715 printk(KERN_WARNING DRV_PFX "acpi_evaluate_object bad " 729 pr_warn(DRV_PFX "acpi_evaluate_object bad "
716 "return type\n"); 730 "return type\n");
717 return -1; 731 return -1;
718 } 732 }
@@ -721,34 +735,103 @@ static int acpi_callsetfunc(acpi_handle handle, char *name, int value,
721 return 0; 735 return 0;
722 } 736 }
723 737
724 printk(KERN_WARNING DRV_PFX "acpi_evaluate_object failed\n"); 738 pr_warn(DRV_PFX "acpi_evaluate_object failed\n");
725 739
726 return -1; 740 return -1;
727} 741}
728 742
729static int sony_find_snc_handle(int handle) 743struct sony_nc_handles {
744 u16 cap[0x10];
745 struct device_attribute devattr;
746};
747
748static struct sony_nc_handles *handles;
749
750static ssize_t sony_nc_handles_show(struct device *dev,
751 struct device_attribute *attr, char *buffer)
752{
753 ssize_t len = 0;
754 int i;
755
756 for (i = 0; i < ARRAY_SIZE(handles->cap); i++) {
757 len += snprintf(buffer + len, PAGE_SIZE - len, "0x%.4x ",
758 handles->cap[i]);
759 }
760 len += snprintf(buffer + len, PAGE_SIZE - len, "\n");
761
762 return len;
763}
764
765static int sony_nc_handles_setup(struct platform_device *pd)
730{ 766{
731 int i; 767 int i;
732 int result; 768 int result;
733 769
734 for (i = 0x20; i < 0x30; i++) { 770 handles = kzalloc(sizeof(*handles), GFP_KERNEL);
735 acpi_callsetfunc(sony_nc_acpi_handle, "SN00", i, &result); 771 if (!handles)
736 if (result == handle) 772 return -ENOMEM;
737 return i-0x20; 773
774 sysfs_attr_init(&handles->devattr.attr);
775 handles->devattr.attr.name = "handles";
776 handles->devattr.attr.mode = S_IRUGO;
777 handles->devattr.show = sony_nc_handles_show;
778
779 for (i = 0; i < ARRAY_SIZE(handles->cap); i++) {
780 if (!acpi_callsetfunc(sony_nc_acpi_handle,
781 "SN00", i + 0x20, &result)) {
782 dprintk("caching handle 0x%.4x (offset: 0x%.2x)\n",
783 result, i);
784 handles->cap[i] = result;
785 }
786 }
787
788 /* allow reading capabilities via sysfs */
789 if (device_create_file(&pd->dev, &handles->devattr)) {
790 kfree(handles);
791 handles = NULL;
792 return -1;
793 }
794
795 return 0;
796}
797
798static int sony_nc_handles_cleanup(struct platform_device *pd)
799{
800 if (handles) {
801 device_remove_file(&pd->dev, &handles->devattr);
802 kfree(handles);
803 handles = NULL;
738 } 804 }
805 return 0;
806}
739 807
808static int sony_find_snc_handle(int handle)
809{
810 int i;
811 for (i = 0; i < 0x10; i++) {
812 if (handles->cap[i] == handle) {
813 dprintk("found handle 0x%.4x (offset: 0x%.2x)\n",
814 handle, i);
815 return i;
816 }
817 }
818 dprintk("handle 0x%.4x not found\n", handle);
740 return -1; 819 return -1;
741} 820}
742 821
743static int sony_call_snc_handle(int handle, int argument, int *result) 822static int sony_call_snc_handle(int handle, int argument, int *result)
744{ 823{
824 int ret = 0;
745 int offset = sony_find_snc_handle(handle); 825 int offset = sony_find_snc_handle(handle);
746 826
747 if (offset < 0) 827 if (offset < 0)
748 return -1; 828 return -1;
749 829
750 return acpi_callsetfunc(sony_nc_acpi_handle, "SN07", offset | argument, 830 ret = acpi_callsetfunc(sony_nc_acpi_handle, "SN07", offset | argument,
751 result); 831 result);
832 dprintk("called SN07 with 0x%.4x (result: 0x%.4x)\n", offset | argument,
833 *result);
834 return ret;
752} 835}
753 836
754/* 837/*
@@ -857,11 +940,39 @@ static int sony_backlight_get_brightness(struct backlight_device *bd)
857 return value - 1; 940 return value - 1;
858} 941}
859 942
860static struct backlight_device *sony_backlight_device; 943static int sony_nc_get_brightness_ng(struct backlight_device *bd)
944{
945 int result;
946 int *handle = (int *)bl_get_data(bd);
947
948 sony_call_snc_handle(*handle, 0x0200, &result);
949
950 return result & 0xff;
951}
952
953static int sony_nc_update_status_ng(struct backlight_device *bd)
954{
955 int value, result;
956 int *handle = (int *)bl_get_data(bd);
957
958 value = bd->props.brightness;
959 sony_call_snc_handle(*handle, 0x0100 | (value << 16), &result);
960
961 return sony_nc_get_brightness_ng(bd);
962}
963
861static const struct backlight_ops sony_backlight_ops = { 964static const struct backlight_ops sony_backlight_ops = {
965 .options = BL_CORE_SUSPENDRESUME,
862 .update_status = sony_backlight_update_status, 966 .update_status = sony_backlight_update_status,
863 .get_brightness = sony_backlight_get_brightness, 967 .get_brightness = sony_backlight_get_brightness,
864}; 968};
969static const struct backlight_ops sony_backlight_ng_ops = {
970 .options = BL_CORE_SUSPENDRESUME,
971 .update_status = sony_nc_update_status_ng,
972 .get_brightness = sony_nc_get_brightness_ng,
973};
974static int backlight_ng_handle;
975static struct backlight_device *sony_backlight_device;
865 976
866/* 977/*
867 * New SNC-only Vaios event mapping to driver known keys 978 * New SNC-only Vaios event mapping to driver known keys
@@ -972,7 +1083,7 @@ static void sony_nc_notify(struct acpi_device *device, u32 event)
972 } 1083 }
973 1084
974 if (!key_event->data) 1085 if (!key_event->data)
975 printk(KERN_INFO DRV_PFX 1086 pr_info(DRV_PFX
976 "Unknown event: 0x%x 0x%x\n", 1087 "Unknown event: 0x%x 0x%x\n",
977 key_handle, 1088 key_handle,
978 ev); 1089 ev);
@@ -996,7 +1107,7 @@ static acpi_status sony_walk_callback(acpi_handle handle, u32 level,
996 struct acpi_device_info *info; 1107 struct acpi_device_info *info;
997 1108
998 if (ACPI_SUCCESS(acpi_get_object_info(handle, &info))) { 1109 if (ACPI_SUCCESS(acpi_get_object_info(handle, &info))) {
999 printk(KERN_WARNING DRV_PFX "method: name: %4.4s, args %X\n", 1110 pr_warn(DRV_PFX "method: name: %4.4s, args %X\n",
1000 (char *)&info->name, info->param_count); 1111 (char *)&info->name, info->param_count);
1001 1112
1002 kfree(info); 1113 kfree(info);
@@ -1037,7 +1148,7 @@ static int sony_nc_resume(struct acpi_device *device)
1037 ret = acpi_callsetfunc(sony_nc_acpi_handle, *item->acpiset, 1148 ret = acpi_callsetfunc(sony_nc_acpi_handle, *item->acpiset,
1038 item->value, NULL); 1149 item->value, NULL);
1039 if (ret < 0) { 1150 if (ret < 0) {
1040 printk("%s: %d\n", __func__, ret); 1151 pr_err(DRV_PFX "%s: %d\n", __func__, ret);
1041 break; 1152 break;
1042 } 1153 }
1043 } 1154 }
@@ -1054,11 +1165,6 @@ static int sony_nc_resume(struct acpi_device *device)
1054 sony_nc_function_setup(device); 1165 sony_nc_function_setup(device);
1055 } 1166 }
1056 1167
1057 /* set the last requested brightness level */
1058 if (sony_backlight_device &&
1059 sony_backlight_update_status(sony_backlight_device) < 0)
1060 printk(KERN_WARNING DRV_PFX "unable to restore brightness level\n");
1061
1062 /* re-read rfkill state */ 1168 /* re-read rfkill state */
1063 sony_nc_rfkill_update(); 1169 sony_nc_rfkill_update();
1064 1170
@@ -1206,12 +1312,12 @@ static void sony_nc_rfkill_setup(struct acpi_device *device)
1206 1312
1207 device_enum = (union acpi_object *) buffer.pointer; 1313 device_enum = (union acpi_object *) buffer.pointer;
1208 if (!device_enum) { 1314 if (!device_enum) {
1209 pr_err("Invalid SN06 return object\n"); 1315 pr_err(DRV_PFX "No SN06 return object.");
1210 goto out_no_enum; 1316 goto out_no_enum;
1211 } 1317 }
1212 if (device_enum->type != ACPI_TYPE_BUFFER) { 1318 if (device_enum->type != ACPI_TYPE_BUFFER) {
1213 pr_err("Invalid SN06 return object type 0x%.2x\n", 1319 pr_err(DRV_PFX "Invalid SN06 return object 0x%.2x\n",
1214 device_enum->type); 1320 device_enum->type);
1215 goto out_no_enum; 1321 goto out_no_enum;
1216 } 1322 }
1217 1323
@@ -1245,6 +1351,209 @@ out_no_enum:
1245 return; 1351 return;
1246} 1352}
1247 1353
1354/* Keyboard backlight feature */
1355#define KBDBL_HANDLER 0x137
1356#define KBDBL_PRESENT 0xB00
1357#define SET_MODE 0xC00
1358#define SET_TIMEOUT 0xE00
1359
1360struct kbd_backlight {
1361 int mode;
1362 int timeout;
1363 struct device_attribute mode_attr;
1364 struct device_attribute timeout_attr;
1365};
1366
1367static struct kbd_backlight *kbdbl_handle;
1368
1369static ssize_t __sony_nc_kbd_backlight_mode_set(u8 value)
1370{
1371 int result;
1372
1373 if (value > 1)
1374 return -EINVAL;
1375
1376 if (sony_call_snc_handle(KBDBL_HANDLER,
1377 (value << 0x10) | SET_MODE, &result))
1378 return -EIO;
1379
1380 kbdbl_handle->mode = value;
1381
1382 return 0;
1383}
1384
1385static ssize_t sony_nc_kbd_backlight_mode_store(struct device *dev,
1386 struct device_attribute *attr,
1387 const char *buffer, size_t count)
1388{
1389 int ret = 0;
1390 unsigned long value;
1391
1392 if (count > 31)
1393 return -EINVAL;
1394
1395 if (strict_strtoul(buffer, 10, &value))
1396 return -EINVAL;
1397
1398 ret = __sony_nc_kbd_backlight_mode_set(value);
1399 if (ret < 0)
1400 return ret;
1401
1402 return count;
1403}
1404
1405static ssize_t sony_nc_kbd_backlight_mode_show(struct device *dev,
1406 struct device_attribute *attr, char *buffer)
1407{
1408 ssize_t count = 0;
1409 count = snprintf(buffer, PAGE_SIZE, "%d\n", kbdbl_handle->mode);
1410 return count;
1411}
1412
1413static int __sony_nc_kbd_backlight_timeout_set(u8 value)
1414{
1415 int result;
1416
1417 if (value > 3)
1418 return -EINVAL;
1419
1420 if (sony_call_snc_handle(KBDBL_HANDLER,
1421 (value << 0x10) | SET_TIMEOUT, &result))
1422 return -EIO;
1423
1424 kbdbl_handle->timeout = value;
1425
1426 return 0;
1427}
1428
1429static ssize_t sony_nc_kbd_backlight_timeout_store(struct device *dev,
1430 struct device_attribute *attr,
1431 const char *buffer, size_t count)
1432{
1433 int ret = 0;
1434 unsigned long value;
1435
1436 if (count > 31)
1437 return -EINVAL;
1438
1439 if (strict_strtoul(buffer, 10, &value))
1440 return -EINVAL;
1441
1442 ret = __sony_nc_kbd_backlight_timeout_set(value);
1443 if (ret < 0)
1444 return ret;
1445
1446 return count;
1447}
1448
1449static ssize_t sony_nc_kbd_backlight_timeout_show(struct device *dev,
1450 struct device_attribute *attr, char *buffer)
1451{
1452 ssize_t count = 0;
1453 count = snprintf(buffer, PAGE_SIZE, "%d\n", kbdbl_handle->timeout);
1454 return count;
1455}
1456
1457static int sony_nc_kbd_backlight_setup(struct platform_device *pd)
1458{
1459 int result;
1460
1461 if (sony_call_snc_handle(0x137, KBDBL_PRESENT, &result))
1462 return 0;
1463 if (!(result & 0x02))
1464 return 0;
1465
1466 kbdbl_handle = kzalloc(sizeof(*kbdbl_handle), GFP_KERNEL);
1467 if (!kbdbl_handle)
1468 return -ENOMEM;
1469
1470 sysfs_attr_init(&kbdbl_handle->mode_attr.attr);
1471 kbdbl_handle->mode_attr.attr.name = "kbd_backlight";
1472 kbdbl_handle->mode_attr.attr.mode = S_IRUGO | S_IWUSR;
1473 kbdbl_handle->mode_attr.show = sony_nc_kbd_backlight_mode_show;
1474 kbdbl_handle->mode_attr.store = sony_nc_kbd_backlight_mode_store;
1475
1476 sysfs_attr_init(&kbdbl_handle->timeout_attr.attr);
1477 kbdbl_handle->timeout_attr.attr.name = "kbd_backlight_timeout";
1478 kbdbl_handle->timeout_attr.attr.mode = S_IRUGO | S_IWUSR;
1479 kbdbl_handle->timeout_attr.show = sony_nc_kbd_backlight_timeout_show;
1480 kbdbl_handle->timeout_attr.store = sony_nc_kbd_backlight_timeout_store;
1481
1482 if (device_create_file(&pd->dev, &kbdbl_handle->mode_attr))
1483 goto outkzalloc;
1484
1485 if (device_create_file(&pd->dev, &kbdbl_handle->timeout_attr))
1486 goto outmode;
1487
1488 __sony_nc_kbd_backlight_mode_set(kbd_backlight);
1489 __sony_nc_kbd_backlight_timeout_set(kbd_backlight_timeout);
1490
1491 return 0;
1492
1493outmode:
1494 device_remove_file(&pd->dev, &kbdbl_handle->mode_attr);
1495outkzalloc:
1496 kfree(kbdbl_handle);
1497 kbdbl_handle = NULL;
1498 return -1;
1499}
1500
1501static int sony_nc_kbd_backlight_cleanup(struct platform_device *pd)
1502{
1503 if (kbdbl_handle) {
1504 device_remove_file(&pd->dev, &kbdbl_handle->mode_attr);
1505 device_remove_file(&pd->dev, &kbdbl_handle->timeout_attr);
1506 kfree(kbdbl_handle);
1507 }
1508 return 0;
1509}
1510
1511static void sony_nc_backlight_setup(void)
1512{
1513 acpi_handle unused;
1514 int max_brightness = 0;
1515 const struct backlight_ops *ops = NULL;
1516 struct backlight_properties props;
1517
1518 if (sony_find_snc_handle(0x12f) != -1) {
1519 backlight_ng_handle = 0x12f;
1520 ops = &sony_backlight_ng_ops;
1521 max_brightness = 0xff;
1522
1523 } else if (sony_find_snc_handle(0x137) != -1) {
1524 backlight_ng_handle = 0x137;
1525 ops = &sony_backlight_ng_ops;
1526 max_brightness = 0xff;
1527
1528 } else if (ACPI_SUCCESS(acpi_get_handle(sony_nc_acpi_handle, "GBRT",
1529 &unused))) {
1530 ops = &sony_backlight_ops;
1531 max_brightness = SONY_MAX_BRIGHTNESS - 1;
1532
1533 } else
1534 return;
1535
1536 memset(&props, 0, sizeof(struct backlight_properties));
1537 props.type = BACKLIGHT_PLATFORM;
1538 props.max_brightness = max_brightness;
1539 sony_backlight_device = backlight_device_register("sony", NULL,
1540 &backlight_ng_handle,
1541 ops, &props);
1542
1543 if (IS_ERR(sony_backlight_device)) {
1544 pr_warning(DRV_PFX "unable to register backlight device\n");
1545 sony_backlight_device = NULL;
1546 } else
1547 sony_backlight_device->props.brightness =
1548 ops->get_brightness(sony_backlight_device);
1549}
1550
1551static void sony_nc_backlight_cleanup(void)
1552{
1553 if (sony_backlight_device)
1554 backlight_device_unregister(sony_backlight_device);
1555}
1556
1248static int sony_nc_add(struct acpi_device *device) 1557static int sony_nc_add(struct acpi_device *device)
1249{ 1558{
1250 acpi_status status; 1559 acpi_status status;
@@ -1252,8 +1561,8 @@ static int sony_nc_add(struct acpi_device *device)
1252 acpi_handle handle; 1561 acpi_handle handle;
1253 struct sony_nc_value *item; 1562 struct sony_nc_value *item;
1254 1563
1255 printk(KERN_INFO DRV_PFX "%s v%s.\n", 1564 pr_info(DRV_PFX "%s v%s.\n", SONY_NC_DRIVER_NAME,
1256 SONY_NC_DRIVER_NAME, SONY_LAPTOP_DRIVER_VERSION); 1565 SONY_LAPTOP_DRIVER_VERSION);
1257 1566
1258 sony_nc_acpi_device = device; 1567 sony_nc_acpi_device = device;
1259 strcpy(acpi_device_class(device), "sony/hotkey"); 1568 strcpy(acpi_device_class(device), "sony/hotkey");
@@ -1269,13 +1578,18 @@ static int sony_nc_add(struct acpi_device *device)
1269 goto outwalk; 1578 goto outwalk;
1270 } 1579 }
1271 1580
1581 result = sony_pf_add();
1582 if (result)
1583 goto outpresent;
1584
1272 if (debug) { 1585 if (debug) {
1273 status = acpi_walk_namespace(ACPI_TYPE_METHOD, sony_nc_acpi_handle, 1586 status = acpi_walk_namespace(ACPI_TYPE_METHOD,
1274 1, sony_walk_callback, NULL, NULL, NULL); 1587 sony_nc_acpi_handle, 1, sony_walk_callback,
1588 NULL, NULL, NULL);
1275 if (ACPI_FAILURE(status)) { 1589 if (ACPI_FAILURE(status)) {
1276 printk(KERN_WARNING DRV_PFX "unable to walk acpi resources\n"); 1590 pr_warn(DRV_PFX "unable to walk acpi resources\n");
1277 result = -ENODEV; 1591 result = -ENODEV;
1278 goto outwalk; 1592 goto outpresent;
1279 } 1593 }
1280 } 1594 }
1281 1595
@@ -1288,6 +1602,12 @@ static int sony_nc_add(struct acpi_device *device)
1288 if (ACPI_SUCCESS(acpi_get_handle(sony_nc_acpi_handle, "SN00", 1602 if (ACPI_SUCCESS(acpi_get_handle(sony_nc_acpi_handle, "SN00",
1289 &handle))) { 1603 &handle))) {
1290 dprintk("Doing SNC setup\n"); 1604 dprintk("Doing SNC setup\n");
1605 result = sony_nc_handles_setup(sony_pf_device);
1606 if (result)
1607 goto outpresent;
1608 result = sony_nc_kbd_backlight_setup(sony_pf_device);
1609 if (result)
1610 goto outsnc;
1291 sony_nc_function_setup(device); 1611 sony_nc_function_setup(device);
1292 sony_nc_rfkill_setup(device); 1612 sony_nc_rfkill_setup(device);
1293 } 1613 }
@@ -1295,40 +1615,17 @@ static int sony_nc_add(struct acpi_device *device)
1295 /* setup input devices and helper fifo */ 1615 /* setup input devices and helper fifo */
1296 result = sony_laptop_setup_input(device); 1616 result = sony_laptop_setup_input(device);
1297 if (result) { 1617 if (result) {
1298 printk(KERN_ERR DRV_PFX 1618 pr_err(DRV_PFX "Unable to create input devices.\n");
1299 "Unable to create input devices.\n"); 1619 goto outkbdbacklight;
1300 goto outwalk;
1301 } 1620 }
1302 1621
1303 if (acpi_video_backlight_support()) { 1622 if (acpi_video_backlight_support()) {
1304 printk(KERN_INFO DRV_PFX "brightness ignored, must be " 1623 pr_info(DRV_PFX "brightness ignored, must be "
1305 "controlled by ACPI video driver\n"); 1624 "controlled by ACPI video driver\n");
1306 } else if (ACPI_SUCCESS(acpi_get_handle(sony_nc_acpi_handle, "GBRT", 1625 } else {
1307 &handle))) { 1626 sony_nc_backlight_setup();
1308 struct backlight_properties props;
1309 memset(&props, 0, sizeof(struct backlight_properties));
1310 props.type = BACKLIGHT_PLATFORM;
1311 props.max_brightness = SONY_MAX_BRIGHTNESS - 1;
1312 sony_backlight_device = backlight_device_register("sony", NULL,
1313 NULL,
1314 &sony_backlight_ops,
1315 &props);
1316
1317 if (IS_ERR(sony_backlight_device)) {
1318 printk(KERN_WARNING DRV_PFX "unable to register backlight device\n");
1319 sony_backlight_device = NULL;
1320 } else {
1321 sony_backlight_device->props.brightness =
1322 sony_backlight_get_brightness
1323 (sony_backlight_device);
1324 }
1325
1326 } 1627 }
1327 1628
1328 result = sony_pf_add();
1329 if (result)
1330 goto outbacklight;
1331
1332 /* create sony_pf sysfs attributes related to the SNC device */ 1629 /* create sony_pf sysfs attributes related to the SNC device */
1333 for (item = sony_nc_values; item->name; ++item) { 1630 for (item = sony_nc_values; item->name; ++item) {
1334 1631
@@ -1374,14 +1671,19 @@ static int sony_nc_add(struct acpi_device *device)
1374 for (item = sony_nc_values; item->name; ++item) { 1671 for (item = sony_nc_values; item->name; ++item) {
1375 device_remove_file(&sony_pf_device->dev, &item->devattr); 1672 device_remove_file(&sony_pf_device->dev, &item->devattr);
1376 } 1673 }
1377 sony_pf_remove(); 1674 sony_nc_backlight_cleanup();
1378
1379 outbacklight:
1380 if (sony_backlight_device)
1381 backlight_device_unregister(sony_backlight_device);
1382 1675
1383 sony_laptop_remove_input(); 1676 sony_laptop_remove_input();
1384 1677
1678 outkbdbacklight:
1679 sony_nc_kbd_backlight_cleanup(sony_pf_device);
1680
1681 outsnc:
1682 sony_nc_handles_cleanup(sony_pf_device);
1683
1684 outpresent:
1685 sony_pf_remove();
1686
1385 outwalk: 1687 outwalk:
1386 sony_nc_rfkill_cleanup(); 1688 sony_nc_rfkill_cleanup();
1387 return result; 1689 return result;
@@ -1391,8 +1693,7 @@ static int sony_nc_remove(struct acpi_device *device, int type)
1391{ 1693{
1392 struct sony_nc_value *item; 1694 struct sony_nc_value *item;
1393 1695
1394 if (sony_backlight_device) 1696 sony_nc_backlight_cleanup();
1395 backlight_device_unregister(sony_backlight_device);
1396 1697
1397 sony_nc_acpi_device = NULL; 1698 sony_nc_acpi_device = NULL;
1398 1699
@@ -1400,6 +1701,8 @@ static int sony_nc_remove(struct acpi_device *device, int type)
1400 device_remove_file(&sony_pf_device->dev, &item->devattr); 1701 device_remove_file(&sony_pf_device->dev, &item->devattr);
1401 } 1702 }
1402 1703
1704 sony_nc_kbd_backlight_cleanup(sony_pf_device);
1705 sony_nc_handles_cleanup(sony_pf_device);
1403 sony_pf_remove(); 1706 sony_pf_remove();
1404 sony_laptop_remove_input(); 1707 sony_laptop_remove_input();
1405 sony_nc_rfkill_cleanup(); 1708 sony_nc_rfkill_cleanup();
@@ -1438,7 +1741,6 @@ static struct acpi_driver sony_nc_driver = {
1438#define SONYPI_DEVICE_TYPE1 0x00000001 1741#define SONYPI_DEVICE_TYPE1 0x00000001
1439#define SONYPI_DEVICE_TYPE2 0x00000002 1742#define SONYPI_DEVICE_TYPE2 0x00000002
1440#define SONYPI_DEVICE_TYPE3 0x00000004 1743#define SONYPI_DEVICE_TYPE3 0x00000004
1441#define SONYPI_DEVICE_TYPE4 0x00000008
1442 1744
1443#define SONYPI_TYPE1_OFFSET 0x04 1745#define SONYPI_TYPE1_OFFSET 0x04
1444#define SONYPI_TYPE2_OFFSET 0x12 1746#define SONYPI_TYPE2_OFFSET 0x12
@@ -1584,8 +1886,8 @@ static struct sonypi_event sonypi_blueev[] = {
1584 1886
1585/* The set of possible wireless events */ 1887/* The set of possible wireless events */
1586static struct sonypi_event sonypi_wlessev[] = { 1888static struct sonypi_event sonypi_wlessev[] = {
1587 { 0x59, SONYPI_EVENT_WIRELESS_ON }, 1889 { 0x59, SONYPI_EVENT_IGNORE },
1588 { 0x5a, SONYPI_EVENT_WIRELESS_OFF }, 1890 { 0x5a, SONYPI_EVENT_IGNORE },
1589 { 0, 0 } 1891 { 0, 0 }
1590}; 1892};
1591 1893
@@ -1842,7 +2144,7 @@ out:
1842 if (pcidev) 2144 if (pcidev)
1843 pci_dev_put(pcidev); 2145 pci_dev_put(pcidev);
1844 2146
1845 printk(KERN_INFO DRV_PFX "detected Type%d model\n", 2147 pr_info(DRV_PFX "detected Type%d model\n",
1846 dev->model == SONYPI_DEVICE_TYPE1 ? 1 : 2148 dev->model == SONYPI_DEVICE_TYPE1 ? 1 :
1847 dev->model == SONYPI_DEVICE_TYPE2 ? 2 : 3); 2149 dev->model == SONYPI_DEVICE_TYPE2 ? 2 : 3);
1848} 2150}
@@ -1890,7 +2192,7 @@ static int __sony_pic_camera_ready(void)
1890static int __sony_pic_camera_off(void) 2192static int __sony_pic_camera_off(void)
1891{ 2193{
1892 if (!camera) { 2194 if (!camera) {
1893 printk(KERN_WARNING DRV_PFX "camera control not enabled\n"); 2195 pr_warn(DRV_PFX "camera control not enabled\n");
1894 return -ENODEV; 2196 return -ENODEV;
1895 } 2197 }
1896 2198
@@ -1910,7 +2212,7 @@ static int __sony_pic_camera_on(void)
1910 int i, j, x; 2212 int i, j, x;
1911 2213
1912 if (!camera) { 2214 if (!camera) {
1913 printk(KERN_WARNING DRV_PFX "camera control not enabled\n"); 2215 pr_warn(DRV_PFX "camera control not enabled\n");
1914 return -ENODEV; 2216 return -ENODEV;
1915 } 2217 }
1916 2218
@@ -1933,7 +2235,7 @@ static int __sony_pic_camera_on(void)
1933 } 2235 }
1934 2236
1935 if (j == 0) { 2237 if (j == 0) {
1936 printk(KERN_WARNING DRV_PFX "failed to power on camera\n"); 2238 pr_warn(DRV_PFX "failed to power on camera\n");
1937 return -ENODEV; 2239 return -ENODEV;
1938 } 2240 }
1939 2241
@@ -1989,7 +2291,7 @@ int sony_pic_camera_command(int command, u8 value)
1989 ITERATIONS_SHORT); 2291 ITERATIONS_SHORT);
1990 break; 2292 break;
1991 default: 2293 default:
1992 printk(KERN_ERR DRV_PFX "sony_pic_camera_command invalid: %d\n", 2294 pr_err(DRV_PFX "sony_pic_camera_command invalid: %d\n",
1993 command); 2295 command);
1994 break; 2296 break;
1995 } 2297 }
@@ -2396,7 +2698,7 @@ static int sonypi_compat_init(void)
2396 error = 2698 error =
2397 kfifo_alloc(&sonypi_compat.fifo, SONY_LAPTOP_BUF_SIZE, GFP_KERNEL); 2699 kfifo_alloc(&sonypi_compat.fifo, SONY_LAPTOP_BUF_SIZE, GFP_KERNEL);
2398 if (error) { 2700 if (error) {
2399 printk(KERN_ERR DRV_PFX "kfifo_alloc failed\n"); 2701 pr_err(DRV_PFX "kfifo_alloc failed\n");
2400 return error; 2702 return error;
2401 } 2703 }
2402 2704
@@ -2406,11 +2708,11 @@ static int sonypi_compat_init(void)
2406 sonypi_misc_device.minor = minor; 2708 sonypi_misc_device.minor = minor;
2407 error = misc_register(&sonypi_misc_device); 2709 error = misc_register(&sonypi_misc_device);
2408 if (error) { 2710 if (error) {
2409 printk(KERN_ERR DRV_PFX "misc_register failed\n"); 2711 pr_err(DRV_PFX "misc_register failed\n");
2410 goto err_free_kfifo; 2712 goto err_free_kfifo;
2411 } 2713 }
2412 if (minor == -1) 2714 if (minor == -1)
2413 printk(KERN_INFO DRV_PFX "device allocated minor is %d\n", 2715 pr_info(DRV_PFX "device allocated minor is %d\n",
2414 sonypi_misc_device.minor); 2716 sonypi_misc_device.minor);
2415 2717
2416 return 0; 2718 return 0;
@@ -2470,8 +2772,7 @@ sony_pic_read_possible_resource(struct acpi_resource *resource, void *context)
2470 } 2772 }
2471 for (i = 0; i < p->interrupt_count; i++) { 2773 for (i = 0; i < p->interrupt_count; i++) {
2472 if (!p->interrupts[i]) { 2774 if (!p->interrupts[i]) {
2473 printk(KERN_WARNING DRV_PFX 2775 pr_warn(DRV_PFX "Invalid IRQ %d\n",
2474 "Invalid IRQ %d\n",
2475 p->interrupts[i]); 2776 p->interrupts[i]);
2476 continue; 2777 continue;
2477 } 2778 }
@@ -2510,7 +2811,7 @@ sony_pic_read_possible_resource(struct acpi_resource *resource, void *context)
2510 ioport->io2.address_length); 2811 ioport->io2.address_length);
2511 } 2812 }
2512 else { 2813 else {
2513 printk(KERN_ERR DRV_PFX "Unknown SPIC Type, more than 2 IO Ports\n"); 2814 pr_err(DRV_PFX "Unknown SPIC Type, more than 2 IO Ports\n");
2514 return AE_ERROR; 2815 return AE_ERROR;
2515 } 2816 }
2516 return AE_OK; 2817 return AE_OK;
@@ -2538,7 +2839,7 @@ static int sony_pic_possible_resources(struct acpi_device *device)
2538 dprintk("Evaluating _STA\n"); 2839 dprintk("Evaluating _STA\n");
2539 result = acpi_bus_get_status(device); 2840 result = acpi_bus_get_status(device);
2540 if (result) { 2841 if (result) {
2541 printk(KERN_WARNING DRV_PFX "Unable to read status\n"); 2842 pr_warn(DRV_PFX "Unable to read status\n");
2542 goto end; 2843 goto end;
2543 } 2844 }
2544 2845
@@ -2554,8 +2855,7 @@ static int sony_pic_possible_resources(struct acpi_device *device)
2554 status = acpi_walk_resources(device->handle, METHOD_NAME__PRS, 2855 status = acpi_walk_resources(device->handle, METHOD_NAME__PRS,
2555 sony_pic_read_possible_resource, &spic_dev); 2856 sony_pic_read_possible_resource, &spic_dev);
2556 if (ACPI_FAILURE(status)) { 2857 if (ACPI_FAILURE(status)) {
2557 printk(KERN_WARNING DRV_PFX 2858 pr_warn(DRV_PFX "Failure evaluating %s\n",
2558 "Failure evaluating %s\n",
2559 METHOD_NAME__PRS); 2859 METHOD_NAME__PRS);
2560 result = -ENODEV; 2860 result = -ENODEV;
2561 } 2861 }
@@ -2669,7 +2969,7 @@ static int sony_pic_enable(struct acpi_device *device,
2669 2969
2670 /* check for total failure */ 2970 /* check for total failure */
2671 if (ACPI_FAILURE(status)) { 2971 if (ACPI_FAILURE(status)) {
2672 printk(KERN_ERR DRV_PFX "Error evaluating _SRS\n"); 2972 pr_err(DRV_PFX "Error evaluating _SRS\n");
2673 result = -ENODEV; 2973 result = -ENODEV;
2674 goto end; 2974 goto end;
2675 } 2975 }
@@ -2725,6 +3025,9 @@ static irqreturn_t sony_pic_irq(int irq, void *dev_id)
2725 if (ev == dev->event_types[i].events[j].data) { 3025 if (ev == dev->event_types[i].events[j].data) {
2726 device_event = 3026 device_event =
2727 dev->event_types[i].events[j].event; 3027 dev->event_types[i].events[j].event;
3028 /* some events may require ignoring */
3029 if (!device_event)
3030 return IRQ_HANDLED;
2728 goto found; 3031 goto found;
2729 } 3032 }
2730 } 3033 }
@@ -2744,7 +3047,6 @@ found:
2744 sony_laptop_report_input_event(device_event); 3047 sony_laptop_report_input_event(device_event);
2745 acpi_bus_generate_proc_event(dev->acpi_dev, 1, device_event); 3048 acpi_bus_generate_proc_event(dev->acpi_dev, 1, device_event);
2746 sonypi_compat_report_event(device_event); 3049 sonypi_compat_report_event(device_event);
2747
2748 return IRQ_HANDLED; 3050 return IRQ_HANDLED;
2749} 3051}
2750 3052
@@ -2759,7 +3061,7 @@ static int sony_pic_remove(struct acpi_device *device, int type)
2759 struct sony_pic_irq *irq, *tmp_irq; 3061 struct sony_pic_irq *irq, *tmp_irq;
2760 3062
2761 if (sony_pic_disable(device)) { 3063 if (sony_pic_disable(device)) {
2762 printk(KERN_ERR DRV_PFX "Couldn't disable device.\n"); 3064 pr_err(DRV_PFX "Couldn't disable device.\n");
2763 return -ENXIO; 3065 return -ENXIO;
2764 } 3066 }
2765 3067
@@ -2799,8 +3101,8 @@ static int sony_pic_add(struct acpi_device *device)
2799 struct sony_pic_ioport *io, *tmp_io; 3101 struct sony_pic_ioport *io, *tmp_io;
2800 struct sony_pic_irq *irq, *tmp_irq; 3102 struct sony_pic_irq *irq, *tmp_irq;
2801 3103
2802 printk(KERN_INFO DRV_PFX "%s v%s.\n", 3104 pr_info(DRV_PFX "%s v%s.\n", SONY_PIC_DRIVER_NAME,
2803 SONY_PIC_DRIVER_NAME, SONY_LAPTOP_DRIVER_VERSION); 3105 SONY_LAPTOP_DRIVER_VERSION);
2804 3106
2805 spic_dev.acpi_dev = device; 3107 spic_dev.acpi_dev = device;
2806 strcpy(acpi_device_class(device), "sony/hotkey"); 3108 strcpy(acpi_device_class(device), "sony/hotkey");
@@ -2810,16 +3112,14 @@ static int sony_pic_add(struct acpi_device *device)
2810 /* read _PRS resources */ 3112 /* read _PRS resources */
2811 result = sony_pic_possible_resources(device); 3113 result = sony_pic_possible_resources(device);
2812 if (result) { 3114 if (result) {
2813 printk(KERN_ERR DRV_PFX 3115 pr_err(DRV_PFX "Unable to read possible resources.\n");
2814 "Unable to read possible resources.\n");
2815 goto err_free_resources; 3116 goto err_free_resources;
2816 } 3117 }
2817 3118
2818 /* setup input devices and helper fifo */ 3119 /* setup input devices and helper fifo */
2819 result = sony_laptop_setup_input(device); 3120 result = sony_laptop_setup_input(device);
2820 if (result) { 3121 if (result) {
2821 printk(KERN_ERR DRV_PFX 3122 pr_err(DRV_PFX "Unable to create input devices.\n");
2822 "Unable to create input devices.\n");
2823 goto err_free_resources; 3123 goto err_free_resources;
2824 } 3124 }
2825 3125
@@ -2829,7 +3129,7 @@ static int sony_pic_add(struct acpi_device *device)
2829 /* request io port */ 3129 /* request io port */
2830 list_for_each_entry_reverse(io, &spic_dev.ioports, list) { 3130 list_for_each_entry_reverse(io, &spic_dev.ioports, list) {
2831 if (request_region(io->io1.minimum, io->io1.address_length, 3131 if (request_region(io->io1.minimum, io->io1.address_length,
2832 "Sony Programable I/O Device")) { 3132 "Sony Programmable I/O Device")) {
2833 dprintk("I/O port1: 0x%.4x (0x%.4x) + 0x%.2x\n", 3133 dprintk("I/O port1: 0x%.4x (0x%.4x) + 0x%.2x\n",
2834 io->io1.minimum, io->io1.maximum, 3134 io->io1.minimum, io->io1.maximum,
2835 io->io1.address_length); 3135 io->io1.address_length);
@@ -2837,7 +3137,7 @@ static int sony_pic_add(struct acpi_device *device)
2837 if (io->io2.minimum) { 3137 if (io->io2.minimum) {
2838 if (request_region(io->io2.minimum, 3138 if (request_region(io->io2.minimum,
2839 io->io2.address_length, 3139 io->io2.address_length,
2840 "Sony Programable I/O Device")) { 3140 "Sony Programmable I/O Device")) {
2841 dprintk("I/O port2: 0x%.4x (0x%.4x) + 0x%.2x\n", 3141 dprintk("I/O port2: 0x%.4x (0x%.4x) + 0x%.2x\n",
2842 io->io2.minimum, io->io2.maximum, 3142 io->io2.minimum, io->io2.maximum,
2843 io->io2.address_length); 3143 io->io2.address_length);
@@ -2860,7 +3160,7 @@ static int sony_pic_add(struct acpi_device *device)
2860 } 3160 }
2861 } 3161 }
2862 if (!spic_dev.cur_ioport) { 3162 if (!spic_dev.cur_ioport) {
2863 printk(KERN_ERR DRV_PFX "Failed to request_region.\n"); 3163 pr_err(DRV_PFX "Failed to request_region.\n");
2864 result = -ENODEV; 3164 result = -ENODEV;
2865 goto err_remove_compat; 3165 goto err_remove_compat;
2866 } 3166 }
@@ -2880,7 +3180,7 @@ static int sony_pic_add(struct acpi_device *device)
2880 } 3180 }
2881 } 3181 }
2882 if (!spic_dev.cur_irq) { 3182 if (!spic_dev.cur_irq) {
2883 printk(KERN_ERR DRV_PFX "Failed to request_irq.\n"); 3183 pr_err(DRV_PFX "Failed to request_irq.\n");
2884 result = -ENODEV; 3184 result = -ENODEV;
2885 goto err_release_region; 3185 goto err_release_region;
2886 } 3186 }
@@ -2888,7 +3188,7 @@ static int sony_pic_add(struct acpi_device *device)
2888 /* set resource status _SRS */ 3188 /* set resource status _SRS */
2889 result = sony_pic_enable(device, spic_dev.cur_ioport, spic_dev.cur_irq); 3189 result = sony_pic_enable(device, spic_dev.cur_ioport, spic_dev.cur_irq);
2890 if (result) { 3190 if (result) {
2891 printk(KERN_ERR DRV_PFX "Couldn't enable device.\n"); 3191 pr_err(DRV_PFX "Couldn't enable device.\n");
2892 goto err_free_irq; 3192 goto err_free_irq;
2893 } 3193 }
2894 3194
@@ -2997,8 +3297,7 @@ static int __init sony_laptop_init(void)
2997 if (!no_spic && dmi_check_system(sonypi_dmi_table)) { 3297 if (!no_spic && dmi_check_system(sonypi_dmi_table)) {
2998 result = acpi_bus_register_driver(&sony_pic_driver); 3298 result = acpi_bus_register_driver(&sony_pic_driver);
2999 if (result) { 3299 if (result) {
3000 printk(KERN_ERR DRV_PFX 3300 pr_err(DRV_PFX "Unable to register SPIC driver.");
3001 "Unable to register SPIC driver.");
3002 goto out; 3301 goto out;
3003 } 3302 }
3004 spic_drv_registered = 1; 3303 spic_drv_registered = 1;
@@ -3006,7 +3305,7 @@ static int __init sony_laptop_init(void)
3006 3305
3007 result = acpi_bus_register_driver(&sony_nc_driver); 3306 result = acpi_bus_register_driver(&sony_nc_driver);
3008 if (result) { 3307 if (result) {
3009 printk(KERN_ERR DRV_PFX "Unable to register SNC driver."); 3308 pr_err(DRV_PFX "Unable to register SNC driver.");
3010 goto out_unregister_pic; 3309 goto out_unregister_pic;
3011 } 3310 }
3012 3311
diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
index 947bdcaa0ce9..a08561f5349e 100644
--- a/drivers/platform/x86/thinkpad_acpi.c
+++ b/drivers/platform/x86/thinkpad_acpi.c
@@ -2407,7 +2407,7 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
2407 * This code is supposed to duplicate the IBM firmware behaviour: 2407 * This code is supposed to duplicate the IBM firmware behaviour:
2408 * - Pressing MUTE issues mute hotkey message, even when already mute 2408 * - Pressing MUTE issues mute hotkey message, even when already mute
2409 * - Pressing Volume up/down issues volume up/down hotkey messages, 2409 * - Pressing Volume up/down issues volume up/down hotkey messages,
2410 * even when already at maximum or minumum volume 2410 * even when already at maximum or minimum volume
2411 * - The act of unmuting issues volume up/down notification, 2411 * - The act of unmuting issues volume up/down notification,
2412 * depending which key was used to unmute 2412 * depending which key was used to unmute
2413 * 2413 *
@@ -2990,7 +2990,7 @@ static void tpacpi_send_radiosw_update(void)
2990 * rfkill input events, or we will race the rfkill core input 2990 * rfkill input events, or we will race the rfkill core input
2991 * handler. 2991 * handler.
2992 * 2992 *
2993 * tpacpi_inputdev_send_mutex works as a syncronization point 2993 * tpacpi_inputdev_send_mutex works as a synchronization point
2994 * for the above. 2994 * for the above.
2995 * 2995 *
2996 * We optimize to avoid numerous calls to hotkey_get_wlsw. 2996 * We optimize to avoid numerous calls to hotkey_get_wlsw.
diff --git a/drivers/platform/x86/xo15-ebook.c b/drivers/platform/x86/xo15-ebook.c
new file mode 100644
index 000000000000..c1372ed9d2e9
--- /dev/null
+++ b/drivers/platform/x86/xo15-ebook.c
@@ -0,0 +1,180 @@
1/*
2 * OLPC XO-1.5 ebook switch driver
3 * (based on generic ACPI button driver)
4 *
5 * Copyright (C) 2009 Paul Fox <pgf@laptop.org>
6 * Copyright (C) 2010 One Laptop per Child
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or (at
11 * your option) any later version.
12 */
13
14#include <linux/kernel.h>
15#include <linux/module.h>
16#include <linux/init.h>
17#include <linux/types.h>
18#include <linux/input.h>
19#include <acpi/acpi_bus.h>
20#include <acpi/acpi_drivers.h>
21
22#define MODULE_NAME "xo15-ebook"
23#define PREFIX MODULE_NAME ": "
24
25#define XO15_EBOOK_CLASS MODULE_NAME
26#define XO15_EBOOK_TYPE_UNKNOWN 0x00
27#define XO15_EBOOK_NOTIFY_STATUS 0x80
28
29#define XO15_EBOOK_SUBCLASS "ebook"
30#define XO15_EBOOK_HID "XO15EBK"
31#define XO15_EBOOK_DEVICE_NAME "EBook Switch"
32
33ACPI_MODULE_NAME(MODULE_NAME);
34
35MODULE_DESCRIPTION("OLPC XO-1.5 ebook switch driver");
36MODULE_LICENSE("GPL");
37
38static const struct acpi_device_id ebook_device_ids[] = {
39 { XO15_EBOOK_HID, 0 },
40 { "", 0 },
41};
42MODULE_DEVICE_TABLE(acpi, ebook_device_ids);
43
44struct ebook_switch {
45 struct input_dev *input;
46 char phys[32]; /* for input device */
47};
48
49static int ebook_send_state(struct acpi_device *device)
50{
51 struct ebook_switch *button = acpi_driver_data(device);
52 unsigned long long state;
53 acpi_status status;
54
55 status = acpi_evaluate_integer(device->handle, "EBK", NULL, &state);
56 if (ACPI_FAILURE(status))
57 return -EIO;
58
59 /* input layer checks if event is redundant */
60 input_report_switch(button->input, SW_TABLET_MODE, !state);
61 input_sync(button->input);
62 return 0;
63}
64
65static void ebook_switch_notify(struct acpi_device *device, u32 event)
66{
67 switch (event) {
68 case ACPI_FIXED_HARDWARE_EVENT:
69 case XO15_EBOOK_NOTIFY_STATUS:
70 ebook_send_state(device);
71 break;
72 default:
73 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
74 "Unsupported event [0x%x]\n", event));
75 break;
76 }
77}
78
79static int ebook_switch_resume(struct acpi_device *device)
80{
81 return ebook_send_state(device);
82}
83
84static int ebook_switch_add(struct acpi_device *device)
85{
86 struct ebook_switch *button;
87 struct input_dev *input;
88 const char *hid = acpi_device_hid(device);
89 char *name, *class;
90 int error;
91
92 button = kzalloc(sizeof(struct ebook_switch), GFP_KERNEL);
93 if (!button)
94 return -ENOMEM;
95
96 device->driver_data = button;
97
98 button->input = input = input_allocate_device();
99 if (!input) {
100 error = -ENOMEM;
101 goto err_free_button;
102 }
103
104 name = acpi_device_name(device);
105 class = acpi_device_class(device);
106
107 if (strcmp(hid, XO15_EBOOK_HID)) {
108 printk(KERN_ERR PREFIX "Unsupported hid [%s]\n", hid);
109 error = -ENODEV;
110 goto err_free_input;
111 }
112
113 strcpy(name, XO15_EBOOK_DEVICE_NAME);
114 sprintf(class, "%s/%s", XO15_EBOOK_CLASS, XO15_EBOOK_SUBCLASS);
115
116 snprintf(button->phys, sizeof(button->phys), "%s/button/input0", hid);
117
118 input->name = name;
119 input->phys = button->phys;
120 input->id.bustype = BUS_HOST;
121 input->dev.parent = &device->dev;
122
123 input->evbit[0] = BIT_MASK(EV_SW);
124 set_bit(SW_TABLET_MODE, input->swbit);
125
126 error = input_register_device(input);
127 if (error)
128 goto err_free_input;
129
130 ebook_send_state(device);
131
132 if (device->wakeup.flags.valid) {
133 /* Button's GPE is run-wake GPE */
134 acpi_enable_gpe(device->wakeup.gpe_device,
135 device->wakeup.gpe_number);
136 device_set_wakeup_enable(&device->dev, true);
137 }
138
139 return 0;
140
141 err_free_input:
142 input_free_device(input);
143 err_free_button:
144 kfree(button);
145 return error;
146}
147
148static int ebook_switch_remove(struct acpi_device *device, int type)
149{
150 struct ebook_switch *button = acpi_driver_data(device);
151
152 input_unregister_device(button->input);
153 kfree(button);
154 return 0;
155}
156
157static struct acpi_driver xo15_ebook_driver = {
158 .name = MODULE_NAME,
159 .class = XO15_EBOOK_CLASS,
160 .ids = ebook_device_ids,
161 .ops = {
162 .add = ebook_switch_add,
163 .resume = ebook_switch_resume,
164 .remove = ebook_switch_remove,
165 .notify = ebook_switch_notify,
166 },
167};
168
169static int __init xo15_ebook_init(void)
170{
171 return acpi_bus_register_driver(&xo15_ebook_driver);
172}
173
174static void __exit xo15_ebook_exit(void)
175{
176 acpi_bus_unregister_driver(&xo15_ebook_driver);
177}
178
179module_init(xo15_ebook_init);
180module_exit(xo15_ebook_exit);
diff --git a/fs/btrfs/btrfs_inode.h b/fs/btrfs/btrfs_inode.h
index ccc991c542df..57c3bb2884ce 100644
--- a/fs/btrfs/btrfs_inode.h
+++ b/fs/btrfs/btrfs_inode.h
@@ -136,9 +136,8 @@ struct btrfs_inode {
136 * items we think we'll end up using, and reserved_extents is the number 136 * items we think we'll end up using, and reserved_extents is the number
137 * of extent items we've reserved metadata for. 137 * of extent items we've reserved metadata for.
138 */ 138 */
139 spinlock_t accounting_lock;
140 atomic_t outstanding_extents; 139 atomic_t outstanding_extents;
141 int reserved_extents; 140 atomic_t reserved_extents;
142 141
143 /* 142 /*
144 * ordered_data_close is set by truncate when a file that used 143 * ordered_data_close is set by truncate when a file that used
diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c
index 4d2110eafe29..41d1d7c70e29 100644
--- a/fs/btrfs/compression.c
+++ b/fs/btrfs/compression.c
@@ -340,6 +340,8 @@ int btrfs_submit_compressed_write(struct inode *inode, u64 start,
340 340
341 WARN_ON(start & ((u64)PAGE_CACHE_SIZE - 1)); 341 WARN_ON(start & ((u64)PAGE_CACHE_SIZE - 1));
342 cb = kmalloc(compressed_bio_size(root, compressed_len), GFP_NOFS); 342 cb = kmalloc(compressed_bio_size(root, compressed_len), GFP_NOFS);
343 if (!cb)
344 return -ENOMEM;
343 atomic_set(&cb->pending_bios, 0); 345 atomic_set(&cb->pending_bios, 0);
344 cb->errors = 0; 346 cb->errors = 0;
345 cb->inode = inode; 347 cb->inode = inode;
@@ -354,6 +356,10 @@ int btrfs_submit_compressed_write(struct inode *inode, u64 start,
354 bdev = BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev; 356 bdev = BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev;
355 357
356 bio = compressed_bio_alloc(bdev, first_byte, GFP_NOFS); 358 bio = compressed_bio_alloc(bdev, first_byte, GFP_NOFS);
359 if(!bio) {
360 kfree(cb);
361 return -ENOMEM;
362 }
357 bio->bi_private = cb; 363 bio->bi_private = cb;
358 bio->bi_end_io = end_compressed_bio_write; 364 bio->bi_end_io = end_compressed_bio_write;
359 atomic_inc(&cb->pending_bios); 365 atomic_inc(&cb->pending_bios);
@@ -657,8 +663,9 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
657 atomic_inc(&cb->pending_bios); 663 atomic_inc(&cb->pending_bios);
658 664
659 if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM)) { 665 if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM)) {
660 btrfs_lookup_bio_sums(root, inode, comp_bio, 666 ret = btrfs_lookup_bio_sums(root, inode,
661 sums); 667 comp_bio, sums);
668 BUG_ON(ret);
662 } 669 }
663 sums += (comp_bio->bi_size + root->sectorsize - 1) / 670 sums += (comp_bio->bi_size + root->sectorsize - 1) /
664 root->sectorsize; 671 root->sectorsize;
@@ -683,8 +690,10 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
683 ret = btrfs_bio_wq_end_io(root->fs_info, comp_bio, 0); 690 ret = btrfs_bio_wq_end_io(root->fs_info, comp_bio, 0);
684 BUG_ON(ret); 691 BUG_ON(ret);
685 692
686 if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM)) 693 if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM)) {
687 btrfs_lookup_bio_sums(root, inode, comp_bio, sums); 694 ret = btrfs_lookup_bio_sums(root, inode, comp_bio, sums);
695 BUG_ON(ret);
696 }
688 697
689 ret = btrfs_map_bio(root, READ, comp_bio, mirror_num, 0); 698 ret = btrfs_map_bio(root, READ, comp_bio, mirror_num, 0);
690 BUG_ON(ret); 699 BUG_ON(ret);
diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
index b5baff0dccfe..84d7ca1fe0ba 100644
--- a/fs/btrfs/ctree.c
+++ b/fs/btrfs/ctree.c
@@ -147,10 +147,11 @@ noinline void btrfs_release_path(struct btrfs_root *root, struct btrfs_path *p)
147struct extent_buffer *btrfs_root_node(struct btrfs_root *root) 147struct extent_buffer *btrfs_root_node(struct btrfs_root *root)
148{ 148{
149 struct extent_buffer *eb; 149 struct extent_buffer *eb;
150 spin_lock(&root->node_lock); 150
151 eb = root->node; 151 rcu_read_lock();
152 eb = rcu_dereference(root->node);
152 extent_buffer_get(eb); 153 extent_buffer_get(eb);
153 spin_unlock(&root->node_lock); 154 rcu_read_unlock();
154 return eb; 155 return eb;
155} 156}
156 157
@@ -165,14 +166,8 @@ struct extent_buffer *btrfs_lock_root_node(struct btrfs_root *root)
165 while (1) { 166 while (1) {
166 eb = btrfs_root_node(root); 167 eb = btrfs_root_node(root);
167 btrfs_tree_lock(eb); 168 btrfs_tree_lock(eb);
168 169 if (eb == root->node)
169 spin_lock(&root->node_lock);
170 if (eb == root->node) {
171 spin_unlock(&root->node_lock);
172 break; 170 break;
173 }
174 spin_unlock(&root->node_lock);
175
176 btrfs_tree_unlock(eb); 171 btrfs_tree_unlock(eb);
177 free_extent_buffer(eb); 172 free_extent_buffer(eb);
178 } 173 }
@@ -458,10 +453,8 @@ static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
458 else 453 else
459 parent_start = 0; 454 parent_start = 0;
460 455
461 spin_lock(&root->node_lock);
462 root->node = cow;
463 extent_buffer_get(cow); 456 extent_buffer_get(cow);
464 spin_unlock(&root->node_lock); 457 rcu_assign_pointer(root->node, cow);
465 458
466 btrfs_free_tree_block(trans, root, buf, parent_start, 459 btrfs_free_tree_block(trans, root, buf, parent_start,
467 last_ref); 460 last_ref);
@@ -542,6 +535,9 @@ noinline int btrfs_cow_block(struct btrfs_trans_handle *trans,
542 535
543 ret = __btrfs_cow_block(trans, root, buf, parent, 536 ret = __btrfs_cow_block(trans, root, buf, parent,
544 parent_slot, cow_ret, search_start, 0); 537 parent_slot, cow_ret, search_start, 0);
538
539 trace_btrfs_cow_block(root, buf, *cow_ret);
540
545 return ret; 541 return ret;
546} 542}
547 543
@@ -686,6 +682,8 @@ int btrfs_realloc_node(struct btrfs_trans_handle *trans,
686 if (!cur) { 682 if (!cur) {
687 cur = read_tree_block(root, blocknr, 683 cur = read_tree_block(root, blocknr,
688 blocksize, gen); 684 blocksize, gen);
685 if (!cur)
686 return -EIO;
689 } else if (!uptodate) { 687 } else if (!uptodate) {
690 btrfs_read_buffer(cur, gen); 688 btrfs_read_buffer(cur, gen);
691 } 689 }
@@ -732,122 +730,6 @@ static inline unsigned int leaf_data_end(struct btrfs_root *root,
732 return btrfs_item_offset_nr(leaf, nr - 1); 730 return btrfs_item_offset_nr(leaf, nr - 1);
733} 731}
734 732
735/*
736 * extra debugging checks to make sure all the items in a key are
737 * well formed and in the proper order
738 */
739static int check_node(struct btrfs_root *root, struct btrfs_path *path,
740 int level)
741{
742 struct extent_buffer *parent = NULL;
743 struct extent_buffer *node = path->nodes[level];
744 struct btrfs_disk_key parent_key;
745 struct btrfs_disk_key node_key;
746 int parent_slot;
747 int slot;
748 struct btrfs_key cpukey;
749 u32 nritems = btrfs_header_nritems(node);
750
751 if (path->nodes[level + 1])
752 parent = path->nodes[level + 1];
753
754 slot = path->slots[level];
755 BUG_ON(nritems == 0);
756 if (parent) {
757 parent_slot = path->slots[level + 1];
758 btrfs_node_key(parent, &parent_key, parent_slot);
759 btrfs_node_key(node, &node_key, 0);
760 BUG_ON(memcmp(&parent_key, &node_key,
761 sizeof(struct btrfs_disk_key)));
762 BUG_ON(btrfs_node_blockptr(parent, parent_slot) !=
763 btrfs_header_bytenr(node));
764 }
765 BUG_ON(nritems > BTRFS_NODEPTRS_PER_BLOCK(root));
766 if (slot != 0) {
767 btrfs_node_key_to_cpu(node, &cpukey, slot - 1);
768 btrfs_node_key(node, &node_key, slot);
769 BUG_ON(comp_keys(&node_key, &cpukey) <= 0);
770 }
771 if (slot < nritems - 1) {
772 btrfs_node_key_to_cpu(node, &cpukey, slot + 1);
773 btrfs_node_key(node, &node_key, slot);
774 BUG_ON(comp_keys(&node_key, &cpukey) >= 0);
775 }
776 return 0;
777}
778
779/*
780 * extra checking to make sure all the items in a leaf are
781 * well formed and in the proper order
782 */
783static int check_leaf(struct btrfs_root *root, struct btrfs_path *path,
784 int level)
785{
786 struct extent_buffer *leaf = path->nodes[level];
787 struct extent_buffer *parent = NULL;
788 int parent_slot;
789 struct btrfs_key cpukey;
790 struct btrfs_disk_key parent_key;
791 struct btrfs_disk_key leaf_key;
792 int slot = path->slots[0];
793
794 u32 nritems = btrfs_header_nritems(leaf);
795
796 if (path->nodes[level + 1])
797 parent = path->nodes[level + 1];
798
799 if (nritems == 0)
800 return 0;
801
802 if (parent) {
803 parent_slot = path->slots[level + 1];
804 btrfs_node_key(parent, &parent_key, parent_slot);
805 btrfs_item_key(leaf, &leaf_key, 0);
806
807 BUG_ON(memcmp(&parent_key, &leaf_key,
808 sizeof(struct btrfs_disk_key)));
809 BUG_ON(btrfs_node_blockptr(parent, parent_slot) !=
810 btrfs_header_bytenr(leaf));
811 }
812 if (slot != 0 && slot < nritems - 1) {
813 btrfs_item_key(leaf, &leaf_key, slot);
814 btrfs_item_key_to_cpu(leaf, &cpukey, slot - 1);
815 if (comp_keys(&leaf_key, &cpukey) <= 0) {
816 btrfs_print_leaf(root, leaf);
817 printk(KERN_CRIT "slot %d offset bad key\n", slot);
818 BUG_ON(1);
819 }
820 if (btrfs_item_offset_nr(leaf, slot - 1) !=
821 btrfs_item_end_nr(leaf, slot)) {
822 btrfs_print_leaf(root, leaf);
823 printk(KERN_CRIT "slot %d offset bad\n", slot);
824 BUG_ON(1);
825 }
826 }
827 if (slot < nritems - 1) {
828 btrfs_item_key(leaf, &leaf_key, slot);
829 btrfs_item_key_to_cpu(leaf, &cpukey, slot + 1);
830 BUG_ON(comp_keys(&leaf_key, &cpukey) >= 0);
831 if (btrfs_item_offset_nr(leaf, slot) !=
832 btrfs_item_end_nr(leaf, slot + 1)) {
833 btrfs_print_leaf(root, leaf);
834 printk(KERN_CRIT "slot %d offset bad\n", slot);
835 BUG_ON(1);
836 }
837 }
838 BUG_ON(btrfs_item_offset_nr(leaf, 0) +
839 btrfs_item_size_nr(leaf, 0) != BTRFS_LEAF_DATA_SIZE(root));
840 return 0;
841}
842
843static noinline int check_block(struct btrfs_root *root,
844 struct btrfs_path *path, int level)
845{
846 return 0;
847 if (level == 0)
848 return check_leaf(root, path, level);
849 return check_node(root, path, level);
850}
851 733
852/* 734/*
853 * search for key in the extent_buffer. The items start at offset p, 735 * search for key in the extent_buffer. The items start at offset p,
@@ -1046,9 +928,7 @@ static noinline int balance_level(struct btrfs_trans_handle *trans,
1046 goto enospc; 928 goto enospc;
1047 } 929 }
1048 930
1049 spin_lock(&root->node_lock); 931 rcu_assign_pointer(root->node, child);
1050 root->node = child;
1051 spin_unlock(&root->node_lock);
1052 932
1053 add_root_to_dirty_list(root); 933 add_root_to_dirty_list(root);
1054 btrfs_tree_unlock(child); 934 btrfs_tree_unlock(child);
@@ -1188,7 +1068,6 @@ static noinline int balance_level(struct btrfs_trans_handle *trans,
1188 } 1068 }
1189 } 1069 }
1190 /* double check we haven't messed things up */ 1070 /* double check we haven't messed things up */
1191 check_block(root, path, level);
1192 if (orig_ptr != 1071 if (orig_ptr !=
1193 btrfs_node_blockptr(path->nodes[level], path->slots[level])) 1072 btrfs_node_blockptr(path->nodes[level], path->slots[level]))
1194 BUG(); 1073 BUG();
@@ -1798,12 +1677,6 @@ cow_done:
1798 if (!cow) 1677 if (!cow)
1799 btrfs_unlock_up_safe(p, level + 1); 1678 btrfs_unlock_up_safe(p, level + 1);
1800 1679
1801 ret = check_block(root, p, level);
1802 if (ret) {
1803 ret = -1;
1804 goto done;
1805 }
1806
1807 ret = bin_search(b, key, level, &slot); 1680 ret = bin_search(b, key, level, &slot);
1808 1681
1809 if (level != 0) { 1682 if (level != 0) {
@@ -2130,10 +2003,8 @@ static noinline int insert_new_root(struct btrfs_trans_handle *trans,
2130 2003
2131 btrfs_mark_buffer_dirty(c); 2004 btrfs_mark_buffer_dirty(c);
2132 2005
2133 spin_lock(&root->node_lock);
2134 old = root->node; 2006 old = root->node;
2135 root->node = c; 2007 rcu_assign_pointer(root->node, c);
2136 spin_unlock(&root->node_lock);
2137 2008
2138 /* the super has an extra ref to root->node */ 2009 /* the super has an extra ref to root->node */
2139 free_extent_buffer(old); 2010 free_extent_buffer(old);
@@ -3840,7 +3711,8 @@ int btrfs_insert_item(struct btrfs_trans_handle *trans, struct btrfs_root
3840 unsigned long ptr; 3711 unsigned long ptr;
3841 3712
3842 path = btrfs_alloc_path(); 3713 path = btrfs_alloc_path();
3843 BUG_ON(!path); 3714 if (!path)
3715 return -ENOMEM;
3844 ret = btrfs_insert_empty_item(trans, root, path, cpu_key, data_size); 3716 ret = btrfs_insert_empty_item(trans, root, path, cpu_key, data_size);
3845 if (!ret) { 3717 if (!ret) {
3846 leaf = path->nodes[0]; 3718 leaf = path->nodes[0];
@@ -4217,6 +4089,7 @@ find_next_key:
4217 } 4089 }
4218 btrfs_set_path_blocking(path); 4090 btrfs_set_path_blocking(path);
4219 cur = read_node_slot(root, cur, slot); 4091 cur = read_node_slot(root, cur, slot);
4092 BUG_ON(!cur);
4220 4093
4221 btrfs_tree_lock(cur); 4094 btrfs_tree_lock(cur);
4222 4095
diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
index 7f78cc78fdd0..d47ce8307854 100644
--- a/fs/btrfs/ctree.h
+++ b/fs/btrfs/ctree.h
@@ -28,6 +28,7 @@
28#include <linux/wait.h> 28#include <linux/wait.h>
29#include <linux/slab.h> 29#include <linux/slab.h>
30#include <linux/kobject.h> 30#include <linux/kobject.h>
31#include <trace/events/btrfs.h>
31#include <asm/kmap_types.h> 32#include <asm/kmap_types.h>
32#include "extent_io.h" 33#include "extent_io.h"
33#include "extent_map.h" 34#include "extent_map.h"
@@ -40,6 +41,7 @@ extern struct kmem_cache *btrfs_trans_handle_cachep;
40extern struct kmem_cache *btrfs_transaction_cachep; 41extern struct kmem_cache *btrfs_transaction_cachep;
41extern struct kmem_cache *btrfs_bit_radix_cachep; 42extern struct kmem_cache *btrfs_bit_radix_cachep;
42extern struct kmem_cache *btrfs_path_cachep; 43extern struct kmem_cache *btrfs_path_cachep;
44extern struct kmem_cache *btrfs_free_space_cachep;
43struct btrfs_ordered_sum; 45struct btrfs_ordered_sum;
44 46
45#define BTRFS_MAGIC "_BHRfS_M" 47#define BTRFS_MAGIC "_BHRfS_M"
@@ -782,9 +784,6 @@ struct btrfs_free_cluster {
782 /* first extent starting offset */ 784 /* first extent starting offset */
783 u64 window_start; 785 u64 window_start;
784 786
785 /* if this cluster simply points at a bitmap in the block group */
786 bool points_to_bitmap;
787
788 struct btrfs_block_group_cache *block_group; 787 struct btrfs_block_group_cache *block_group;
789 /* 788 /*
790 * when a cluster is allocated from a block group, we put the 789 * when a cluster is allocated from a block group, we put the
@@ -1283,6 +1282,7 @@ struct btrfs_root {
1283#define BTRFS_INODE_NODUMP (1 << 8) 1282#define BTRFS_INODE_NODUMP (1 << 8)
1284#define BTRFS_INODE_NOATIME (1 << 9) 1283#define BTRFS_INODE_NOATIME (1 << 9)
1285#define BTRFS_INODE_DIRSYNC (1 << 10) 1284#define BTRFS_INODE_DIRSYNC (1 << 10)
1285#define BTRFS_INODE_COMPRESS (1 << 11)
1286 1286
1287/* some macros to generate set/get funcs for the struct fields. This 1287/* some macros to generate set/get funcs for the struct fields. This
1288 * assumes there is a lefoo_to_cpu for every type, so lets make a simple 1288 * assumes there is a lefoo_to_cpu for every type, so lets make a simple
@@ -2157,6 +2157,8 @@ int btrfs_free_extent(struct btrfs_trans_handle *trans,
2157 u64 root_objectid, u64 owner, u64 offset); 2157 u64 root_objectid, u64 owner, u64 offset);
2158 2158
2159int btrfs_free_reserved_extent(struct btrfs_root *root, u64 start, u64 len); 2159int btrfs_free_reserved_extent(struct btrfs_root *root, u64 start, u64 len);
2160int btrfs_update_reserved_bytes(struct btrfs_block_group_cache *cache,
2161 u64 num_bytes, int reserve, int sinfo);
2160int btrfs_prepare_extent_commit(struct btrfs_trans_handle *trans, 2162int btrfs_prepare_extent_commit(struct btrfs_trans_handle *trans,
2161 struct btrfs_root *root); 2163 struct btrfs_root *root);
2162int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans, 2164int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans,
@@ -2227,10 +2229,12 @@ u64 btrfs_account_ro_block_groups_free_space(struct btrfs_space_info *sinfo);
2227int btrfs_error_unpin_extent_range(struct btrfs_root *root, 2229int btrfs_error_unpin_extent_range(struct btrfs_root *root,
2228 u64 start, u64 end); 2230 u64 start, u64 end);
2229int btrfs_error_discard_extent(struct btrfs_root *root, u64 bytenr, 2231int btrfs_error_discard_extent(struct btrfs_root *root, u64 bytenr,
2230 u64 num_bytes); 2232 u64 num_bytes, u64 *actual_bytes);
2231int btrfs_force_chunk_alloc(struct btrfs_trans_handle *trans, 2233int btrfs_force_chunk_alloc(struct btrfs_trans_handle *trans,
2232 struct btrfs_root *root, u64 type); 2234 struct btrfs_root *root, u64 type);
2235int btrfs_trim_fs(struct btrfs_root *root, struct fstrim_range *range);
2233 2236
2237int btrfs_init_space_info(struct btrfs_fs_info *fs_info);
2234/* ctree.c */ 2238/* ctree.c */
2235int btrfs_bin_search(struct extent_buffer *eb, struct btrfs_key *key, 2239int btrfs_bin_search(struct extent_buffer *eb, struct btrfs_key *key,
2236 int level, int *slot); 2240 int level, int *slot);
@@ -2392,6 +2396,9 @@ struct btrfs_dir_item *btrfs_lookup_xattr(struct btrfs_trans_handle *trans,
2392 struct btrfs_path *path, u64 dir, 2396 struct btrfs_path *path, u64 dir,
2393 const char *name, u16 name_len, 2397 const char *name, u16 name_len,
2394 int mod); 2398 int mod);
2399int verify_dir_item(struct btrfs_root *root,
2400 struct extent_buffer *leaf,
2401 struct btrfs_dir_item *dir_item);
2395 2402
2396/* orphan.c */ 2403/* orphan.c */
2397int btrfs_insert_orphan_item(struct btrfs_trans_handle *trans, 2404int btrfs_insert_orphan_item(struct btrfs_trans_handle *trans,
@@ -2528,7 +2535,7 @@ int btrfs_update_inode(struct btrfs_trans_handle *trans,
2528 struct inode *inode); 2535 struct inode *inode);
2529int btrfs_orphan_add(struct btrfs_trans_handle *trans, struct inode *inode); 2536int btrfs_orphan_add(struct btrfs_trans_handle *trans, struct inode *inode);
2530int btrfs_orphan_del(struct btrfs_trans_handle *trans, struct inode *inode); 2537int btrfs_orphan_del(struct btrfs_trans_handle *trans, struct inode *inode);
2531void btrfs_orphan_cleanup(struct btrfs_root *root); 2538int btrfs_orphan_cleanup(struct btrfs_root *root);
2532void btrfs_orphan_pre_snapshot(struct btrfs_trans_handle *trans, 2539void btrfs_orphan_pre_snapshot(struct btrfs_trans_handle *trans,
2533 struct btrfs_pending_snapshot *pending, 2540 struct btrfs_pending_snapshot *pending,
2534 u64 *bytes_to_reserve); 2541 u64 *bytes_to_reserve);
@@ -2536,7 +2543,7 @@ void btrfs_orphan_post_snapshot(struct btrfs_trans_handle *trans,
2536 struct btrfs_pending_snapshot *pending); 2543 struct btrfs_pending_snapshot *pending);
2537void btrfs_orphan_commit_root(struct btrfs_trans_handle *trans, 2544void btrfs_orphan_commit_root(struct btrfs_trans_handle *trans,
2538 struct btrfs_root *root); 2545 struct btrfs_root *root);
2539int btrfs_cont_expand(struct inode *inode, loff_t size); 2546int btrfs_cont_expand(struct inode *inode, loff_t oldsize, loff_t size);
2540int btrfs_invalidate_inodes(struct btrfs_root *root); 2547int btrfs_invalidate_inodes(struct btrfs_root *root);
2541void btrfs_add_delayed_iput(struct inode *inode); 2548void btrfs_add_delayed_iput(struct inode *inode);
2542void btrfs_run_delayed_iputs(struct btrfs_root *root); 2549void btrfs_run_delayed_iputs(struct btrfs_root *root);
diff --git a/fs/btrfs/delayed-ref.c b/fs/btrfs/delayed-ref.c
index e807b143b857..bce28f653899 100644
--- a/fs/btrfs/delayed-ref.c
+++ b/fs/btrfs/delayed-ref.c
@@ -483,6 +483,8 @@ static noinline int add_delayed_ref_head(struct btrfs_trans_handle *trans,
483 INIT_LIST_HEAD(&head_ref->cluster); 483 INIT_LIST_HEAD(&head_ref->cluster);
484 mutex_init(&head_ref->mutex); 484 mutex_init(&head_ref->mutex);
485 485
486 trace_btrfs_delayed_ref_head(ref, head_ref, action);
487
486 existing = tree_insert(&delayed_refs->root, &ref->rb_node); 488 existing = tree_insert(&delayed_refs->root, &ref->rb_node);
487 489
488 if (existing) { 490 if (existing) {
@@ -537,6 +539,8 @@ static noinline int add_delayed_tree_ref(struct btrfs_trans_handle *trans,
537 } 539 }
538 full_ref->level = level; 540 full_ref->level = level;
539 541
542 trace_btrfs_delayed_tree_ref(ref, full_ref, action);
543
540 existing = tree_insert(&delayed_refs->root, &ref->rb_node); 544 existing = tree_insert(&delayed_refs->root, &ref->rb_node);
541 545
542 if (existing) { 546 if (existing) {
@@ -591,6 +595,8 @@ static noinline int add_delayed_data_ref(struct btrfs_trans_handle *trans,
591 full_ref->objectid = owner; 595 full_ref->objectid = owner;
592 full_ref->offset = offset; 596 full_ref->offset = offset;
593 597
598 trace_btrfs_delayed_data_ref(ref, full_ref, action);
599
594 existing = tree_insert(&delayed_refs->root, &ref->rb_node); 600 existing = tree_insert(&delayed_refs->root, &ref->rb_node);
595 601
596 if (existing) { 602 if (existing) {
diff --git a/fs/btrfs/dir-item.c b/fs/btrfs/dir-item.c
index f0cad5ae5be7..c62f02f6ae69 100644
--- a/fs/btrfs/dir-item.c
+++ b/fs/btrfs/dir-item.c
@@ -151,7 +151,7 @@ int btrfs_insert_dir_item(struct btrfs_trans_handle *trans, struct btrfs_root
151 ret = PTR_ERR(dir_item); 151 ret = PTR_ERR(dir_item);
152 if (ret == -EEXIST) 152 if (ret == -EEXIST)
153 goto second_insert; 153 goto second_insert;
154 goto out; 154 goto out_free;
155 } 155 }
156 156
157 leaf = path->nodes[0]; 157 leaf = path->nodes[0];
@@ -170,7 +170,7 @@ second_insert:
170 /* FIXME, use some real flag for selecting the extra index */ 170 /* FIXME, use some real flag for selecting the extra index */
171 if (root == root->fs_info->tree_root) { 171 if (root == root->fs_info->tree_root) {
172 ret = 0; 172 ret = 0;
173 goto out; 173 goto out_free;
174 } 174 }
175 btrfs_release_path(root, path); 175 btrfs_release_path(root, path);
176 176
@@ -180,7 +180,7 @@ second_insert:
180 name, name_len); 180 name, name_len);
181 if (IS_ERR(dir_item)) { 181 if (IS_ERR(dir_item)) {
182 ret2 = PTR_ERR(dir_item); 182 ret2 = PTR_ERR(dir_item);
183 goto out; 183 goto out_free;
184 } 184 }
185 leaf = path->nodes[0]; 185 leaf = path->nodes[0];
186 btrfs_cpu_key_to_disk(&disk_key, location); 186 btrfs_cpu_key_to_disk(&disk_key, location);
@@ -192,7 +192,9 @@ second_insert:
192 name_ptr = (unsigned long)(dir_item + 1); 192 name_ptr = (unsigned long)(dir_item + 1);
193 write_extent_buffer(leaf, name, name_ptr, name_len); 193 write_extent_buffer(leaf, name, name_ptr, name_len);
194 btrfs_mark_buffer_dirty(leaf); 194 btrfs_mark_buffer_dirty(leaf);
195out: 195
196out_free:
197
196 btrfs_free_path(path); 198 btrfs_free_path(path);
197 if (ret) 199 if (ret)
198 return ret; 200 return ret;
@@ -377,6 +379,9 @@ struct btrfs_dir_item *btrfs_match_dir_item_name(struct btrfs_root *root,
377 379
378 leaf = path->nodes[0]; 380 leaf = path->nodes[0];
379 dir_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dir_item); 381 dir_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dir_item);
382 if (verify_dir_item(root, leaf, dir_item))
383 return NULL;
384
380 total_len = btrfs_item_size_nr(leaf, path->slots[0]); 385 total_len = btrfs_item_size_nr(leaf, path->slots[0]);
381 while (cur < total_len) { 386 while (cur < total_len) {
382 this_len = sizeof(*dir_item) + 387 this_len = sizeof(*dir_item) +
@@ -429,3 +434,35 @@ int btrfs_delete_one_dir_name(struct btrfs_trans_handle *trans,
429 } 434 }
430 return ret; 435 return ret;
431} 436}
437
438int verify_dir_item(struct btrfs_root *root,
439 struct extent_buffer *leaf,
440 struct btrfs_dir_item *dir_item)
441{
442 u16 namelen = BTRFS_NAME_LEN;
443 u8 type = btrfs_dir_type(leaf, dir_item);
444
445 if (type >= BTRFS_FT_MAX) {
446 printk(KERN_CRIT "btrfs: invalid dir item type: %d\n",
447 (int)type);
448 return 1;
449 }
450
451 if (type == BTRFS_FT_XATTR)
452 namelen = XATTR_NAME_MAX;
453
454 if (btrfs_dir_name_len(leaf, dir_item) > namelen) {
455 printk(KERN_CRIT "btrfS: invalid dir item name len: %u\n",
456 (unsigned)btrfs_dir_data_len(leaf, dir_item));
457 return 1;
458 }
459
460 /* BTRFS_MAX_XATTR_SIZE is the same for all dir items */
461 if (btrfs_dir_data_len(leaf, dir_item) > BTRFS_MAX_XATTR_SIZE(root)) {
462 printk(KERN_CRIT "btrfs: invalid dir item data len: %u\n",
463 (unsigned)btrfs_dir_data_len(leaf, dir_item));
464 return 1;
465 }
466
467 return 0;
468}
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index 830d261d0e6b..d7a7315bd031 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -29,6 +29,7 @@
29#include <linux/crc32c.h> 29#include <linux/crc32c.h>
30#include <linux/slab.h> 30#include <linux/slab.h>
31#include <linux/migrate.h> 31#include <linux/migrate.h>
32#include <asm/unaligned.h>
32#include "compat.h" 33#include "compat.h"
33#include "ctree.h" 34#include "ctree.h"
34#include "disk-io.h" 35#include "disk-io.h"
@@ -198,7 +199,7 @@ u32 btrfs_csum_data(struct btrfs_root *root, char *data, u32 seed, size_t len)
198 199
199void btrfs_csum_final(u32 crc, char *result) 200void btrfs_csum_final(u32 crc, char *result)
200{ 201{
201 *(__le32 *)result = ~cpu_to_le32(crc); 202 put_unaligned_le32(~crc, result);
202} 203}
203 204
204/* 205/*
@@ -323,6 +324,7 @@ static int btree_read_extent_buffer_pages(struct btrfs_root *root,
323 int num_copies = 0; 324 int num_copies = 0;
324 int mirror_num = 0; 325 int mirror_num = 0;
325 326
327 clear_bit(EXTENT_BUFFER_CORRUPT, &eb->bflags);
326 io_tree = &BTRFS_I(root->fs_info->btree_inode)->io_tree; 328 io_tree = &BTRFS_I(root->fs_info->btree_inode)->io_tree;
327 while (1) { 329 while (1) {
328 ret = read_extent_buffer_pages(io_tree, eb, start, 1, 330 ret = read_extent_buffer_pages(io_tree, eb, start, 1,
@@ -331,6 +333,14 @@ static int btree_read_extent_buffer_pages(struct btrfs_root *root,
331 !verify_parent_transid(io_tree, eb, parent_transid)) 333 !verify_parent_transid(io_tree, eb, parent_transid))
332 return ret; 334 return ret;
333 335
336 /*
337 * This buffer's crc is fine, but its contents are corrupted, so
338 * there is no reason to read the other copies, they won't be
339 * any less wrong.
340 */
341 if (test_bit(EXTENT_BUFFER_CORRUPT, &eb->bflags))
342 return ret;
343
334 num_copies = btrfs_num_copies(&root->fs_info->mapping_tree, 344 num_copies = btrfs_num_copies(&root->fs_info->mapping_tree,
335 eb->start, eb->len); 345 eb->start, eb->len);
336 if (num_copies == 1) 346 if (num_copies == 1)
@@ -419,6 +429,73 @@ static int check_tree_block_fsid(struct btrfs_root *root,
419 return ret; 429 return ret;
420} 430}
421 431
432#define CORRUPT(reason, eb, root, slot) \
433 printk(KERN_CRIT "btrfs: corrupt leaf, %s: block=%llu," \
434 "root=%llu, slot=%d\n", reason, \
435 (unsigned long long)btrfs_header_bytenr(eb), \
436 (unsigned long long)root->objectid, slot)
437
438static noinline int check_leaf(struct btrfs_root *root,
439 struct extent_buffer *leaf)
440{
441 struct btrfs_key key;
442 struct btrfs_key leaf_key;
443 u32 nritems = btrfs_header_nritems(leaf);
444 int slot;
445
446 if (nritems == 0)
447 return 0;
448
449 /* Check the 0 item */
450 if (btrfs_item_offset_nr(leaf, 0) + btrfs_item_size_nr(leaf, 0) !=
451 BTRFS_LEAF_DATA_SIZE(root)) {
452 CORRUPT("invalid item offset size pair", leaf, root, 0);
453 return -EIO;
454 }
455
456 /*
457 * Check to make sure each items keys are in the correct order and their
458 * offsets make sense. We only have to loop through nritems-1 because
459 * we check the current slot against the next slot, which verifies the
460 * next slot's offset+size makes sense and that the current's slot
461 * offset is correct.
462 */
463 for (slot = 0; slot < nritems - 1; slot++) {
464 btrfs_item_key_to_cpu(leaf, &leaf_key, slot);
465 btrfs_item_key_to_cpu(leaf, &key, slot + 1);
466
467 /* Make sure the keys are in the right order */
468 if (btrfs_comp_cpu_keys(&leaf_key, &key) >= 0) {
469 CORRUPT("bad key order", leaf, root, slot);
470 return -EIO;
471 }
472
473 /*
474 * Make sure the offset and ends are right, remember that the
475 * item data starts at the end of the leaf and grows towards the
476 * front.
477 */
478 if (btrfs_item_offset_nr(leaf, slot) !=
479 btrfs_item_end_nr(leaf, slot + 1)) {
480 CORRUPT("slot offset bad", leaf, root, slot);
481 return -EIO;
482 }
483
484 /*
485 * Check to make sure that we don't point outside of the leaf,
486 * just incase all the items are consistent to eachother, but
487 * all point outside of the leaf.
488 */
489 if (btrfs_item_end_nr(leaf, slot) >
490 BTRFS_LEAF_DATA_SIZE(root)) {
491 CORRUPT("slot end outside of leaf", leaf, root, slot);
492 return -EIO;
493 }
494 }
495
496 return 0;
497}
498
422#ifdef CONFIG_DEBUG_LOCK_ALLOC 499#ifdef CONFIG_DEBUG_LOCK_ALLOC
423void btrfs_set_buffer_lockdep_class(struct extent_buffer *eb, int level) 500void btrfs_set_buffer_lockdep_class(struct extent_buffer *eb, int level)
424{ 501{
@@ -485,8 +562,20 @@ static int btree_readpage_end_io_hook(struct page *page, u64 start, u64 end,
485 btrfs_set_buffer_lockdep_class(eb, found_level); 562 btrfs_set_buffer_lockdep_class(eb, found_level);
486 563
487 ret = csum_tree_block(root, eb, 1); 564 ret = csum_tree_block(root, eb, 1);
488 if (ret) 565 if (ret) {
489 ret = -EIO; 566 ret = -EIO;
567 goto err;
568 }
569
570 /*
571 * If this is a leaf block and it is corrupt, set the corrupt bit so
572 * that we don't try and read the other copies of this block, just
573 * return -EIO.
574 */
575 if (found_level == 0 && check_leaf(root, eb)) {
576 set_bit(EXTENT_BUFFER_CORRUPT, &eb->bflags);
577 ret = -EIO;
578 }
490 579
491 end = min_t(u64, eb->len, PAGE_CACHE_SIZE); 580 end = min_t(u64, eb->len, PAGE_CACHE_SIZE);
492 end = eb->start + end - 1; 581 end = eb->start + end - 1;
@@ -1159,7 +1248,10 @@ struct btrfs_root *btrfs_read_fs_root_no_radix(struct btrfs_root *tree_root,
1159 root, fs_info, location->objectid); 1248 root, fs_info, location->objectid);
1160 1249
1161 path = btrfs_alloc_path(); 1250 path = btrfs_alloc_path();
1162 BUG_ON(!path); 1251 if (!path) {
1252 kfree(root);
1253 return ERR_PTR(-ENOMEM);
1254 }
1163 ret = btrfs_search_slot(NULL, tree_root, location, path, 0, 0); 1255 ret = btrfs_search_slot(NULL, tree_root, location, path, 0, 0);
1164 if (ret == 0) { 1256 if (ret == 0) {
1165 l = path->nodes[0]; 1257 l = path->nodes[0];
@@ -1553,6 +1645,8 @@ struct btrfs_root *open_ctree(struct super_block *sb,
1553 goto fail_bdi; 1645 goto fail_bdi;
1554 } 1646 }
1555 1647
1648 fs_info->btree_inode->i_mapping->flags &= ~__GFP_FS;
1649
1556 INIT_RADIX_TREE(&fs_info->fs_roots_radix, GFP_ATOMIC); 1650 INIT_RADIX_TREE(&fs_info->fs_roots_radix, GFP_ATOMIC);
1557 INIT_LIST_HEAD(&fs_info->trans_list); 1651 INIT_LIST_HEAD(&fs_info->trans_list);
1558 INIT_LIST_HEAD(&fs_info->dead_roots); 1652 INIT_LIST_HEAD(&fs_info->dead_roots);
@@ -1683,6 +1777,12 @@ struct btrfs_root *open_ctree(struct super_block *sb,
1683 1777
1684 btrfs_check_super_valid(fs_info, sb->s_flags & MS_RDONLY); 1778 btrfs_check_super_valid(fs_info, sb->s_flags & MS_RDONLY);
1685 1779
1780 /*
1781 * In the long term, we'll store the compression type in the super
1782 * block, and it'll be used for per file compression control.
1783 */
1784 fs_info->compress_type = BTRFS_COMPRESS_ZLIB;
1785
1686 ret = btrfs_parse_options(tree_root, options); 1786 ret = btrfs_parse_options(tree_root, options);
1687 if (ret) { 1787 if (ret) {
1688 err = ret; 1788 err = ret;
@@ -1888,6 +1988,12 @@ struct btrfs_root *open_ctree(struct super_block *sb,
1888 fs_info->metadata_alloc_profile = (u64)-1; 1988 fs_info->metadata_alloc_profile = (u64)-1;
1889 fs_info->system_alloc_profile = fs_info->metadata_alloc_profile; 1989 fs_info->system_alloc_profile = fs_info->metadata_alloc_profile;
1890 1990
1991 ret = btrfs_init_space_info(fs_info);
1992 if (ret) {
1993 printk(KERN_ERR "Failed to initial space info: %d\n", ret);
1994 goto fail_block_groups;
1995 }
1996
1891 ret = btrfs_read_block_groups(extent_root); 1997 ret = btrfs_read_block_groups(extent_root);
1892 if (ret) { 1998 if (ret) {
1893 printk(KERN_ERR "Failed to read block groups: %d\n", ret); 1999 printk(KERN_ERR "Failed to read block groups: %d\n", ret);
@@ -1979,9 +2085,14 @@ struct btrfs_root *open_ctree(struct super_block *sb,
1979 2085
1980 if (!(sb->s_flags & MS_RDONLY)) { 2086 if (!(sb->s_flags & MS_RDONLY)) {
1981 down_read(&fs_info->cleanup_work_sem); 2087 down_read(&fs_info->cleanup_work_sem);
1982 btrfs_orphan_cleanup(fs_info->fs_root); 2088 err = btrfs_orphan_cleanup(fs_info->fs_root);
1983 btrfs_orphan_cleanup(fs_info->tree_root); 2089 if (!err)
2090 err = btrfs_orphan_cleanup(fs_info->tree_root);
1984 up_read(&fs_info->cleanup_work_sem); 2091 up_read(&fs_info->cleanup_work_sem);
2092 if (err) {
2093 close_ctree(tree_root);
2094 return ERR_PTR(err);
2095 }
1985 } 2096 }
1986 2097
1987 return tree_root; 2098 return tree_root;
@@ -2356,8 +2467,12 @@ int btrfs_cleanup_fs_roots(struct btrfs_fs_info *fs_info)
2356 2467
2357 root_objectid = gang[ret - 1]->root_key.objectid + 1; 2468 root_objectid = gang[ret - 1]->root_key.objectid + 1;
2358 for (i = 0; i < ret; i++) { 2469 for (i = 0; i < ret; i++) {
2470 int err;
2471
2359 root_objectid = gang[i]->root_key.objectid; 2472 root_objectid = gang[i]->root_key.objectid;
2360 btrfs_orphan_cleanup(gang[i]); 2473 err = btrfs_orphan_cleanup(gang[i]);
2474 if (err)
2475 return err;
2361 } 2476 }
2362 root_objectid++; 2477 root_objectid++;
2363 } 2478 }
@@ -2868,7 +2983,10 @@ static int btrfs_destroy_pinned_extent(struct btrfs_root *root,
2868 break; 2983 break;
2869 2984
2870 /* opt_discard */ 2985 /* opt_discard */
2871 ret = btrfs_error_discard_extent(root, start, end + 1 - start); 2986 if (btrfs_test_opt(root, DISCARD))
2987 ret = btrfs_error_discard_extent(root, start,
2988 end + 1 - start,
2989 NULL);
2872 2990
2873 clear_extent_dirty(unpin, start, end, GFP_NOFS); 2991 clear_extent_dirty(unpin, start, end, GFP_NOFS);
2874 btrfs_error_unpin_extent_range(root, start, end); 2992 btrfs_error_unpin_extent_range(root, start, end);
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index 7b3089b5c2df..f619c3cb13b7 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -36,8 +36,6 @@
36static int update_block_group(struct btrfs_trans_handle *trans, 36static int update_block_group(struct btrfs_trans_handle *trans,
37 struct btrfs_root *root, 37 struct btrfs_root *root,
38 u64 bytenr, u64 num_bytes, int alloc); 38 u64 bytenr, u64 num_bytes, int alloc);
39static int update_reserved_bytes(struct btrfs_block_group_cache *cache,
40 u64 num_bytes, int reserve, int sinfo);
41static int __btrfs_free_extent(struct btrfs_trans_handle *trans, 39static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
42 struct btrfs_root *root, 40 struct btrfs_root *root,
43 u64 bytenr, u64 num_bytes, u64 parent, 41 u64 bytenr, u64 num_bytes, u64 parent,
@@ -442,7 +440,7 @@ static int cache_block_group(struct btrfs_block_group_cache *cache,
442 * allocate blocks for the tree root we can't do the fast caching since 440 * allocate blocks for the tree root we can't do the fast caching since
443 * we likely hold important locks. 441 * we likely hold important locks.
444 */ 442 */
445 if (!trans->transaction->in_commit && 443 if (trans && (!trans->transaction->in_commit) &&
446 (root && root != root->fs_info->tree_root)) { 444 (root && root != root->fs_info->tree_root)) {
447 spin_lock(&cache->lock); 445 spin_lock(&cache->lock);
448 if (cache->cached != BTRFS_CACHE_NO) { 446 if (cache->cached != BTRFS_CACHE_NO) {
@@ -471,7 +469,7 @@ static int cache_block_group(struct btrfs_block_group_cache *cache,
471 if (load_cache_only) 469 if (load_cache_only)
472 return 0; 470 return 0;
473 471
474 caching_ctl = kzalloc(sizeof(*caching_ctl), GFP_KERNEL); 472 caching_ctl = kzalloc(sizeof(*caching_ctl), GFP_NOFS);
475 BUG_ON(!caching_ctl); 473 BUG_ON(!caching_ctl);
476 474
477 INIT_LIST_HEAD(&caching_ctl->list); 475 INIT_LIST_HEAD(&caching_ctl->list);
@@ -1740,39 +1738,45 @@ static int remove_extent_backref(struct btrfs_trans_handle *trans,
1740 return ret; 1738 return ret;
1741} 1739}
1742 1740
1743static void btrfs_issue_discard(struct block_device *bdev, 1741static int btrfs_issue_discard(struct block_device *bdev,
1744 u64 start, u64 len) 1742 u64 start, u64 len)
1745{ 1743{
1746 blkdev_issue_discard(bdev, start >> 9, len >> 9, GFP_KERNEL, 0); 1744 return blkdev_issue_discard(bdev, start >> 9, len >> 9, GFP_NOFS, 0);
1747} 1745}
1748 1746
1749static int btrfs_discard_extent(struct btrfs_root *root, u64 bytenr, 1747static int btrfs_discard_extent(struct btrfs_root *root, u64 bytenr,
1750 u64 num_bytes) 1748 u64 num_bytes, u64 *actual_bytes)
1751{ 1749{
1752 int ret; 1750 int ret;
1753 u64 map_length = num_bytes; 1751 u64 discarded_bytes = 0;
1754 struct btrfs_multi_bio *multi = NULL; 1752 struct btrfs_multi_bio *multi = NULL;
1755 1753
1756 if (!btrfs_test_opt(root, DISCARD))
1757 return 0;
1758 1754
1759 /* Tell the block device(s) that the sectors can be discarded */ 1755 /* Tell the block device(s) that the sectors can be discarded */
1760 ret = btrfs_map_block(&root->fs_info->mapping_tree, READ, 1756 ret = btrfs_map_block(&root->fs_info->mapping_tree, REQ_DISCARD,
1761 bytenr, &map_length, &multi, 0); 1757 bytenr, &num_bytes, &multi, 0);
1762 if (!ret) { 1758 if (!ret) {
1763 struct btrfs_bio_stripe *stripe = multi->stripes; 1759 struct btrfs_bio_stripe *stripe = multi->stripes;
1764 int i; 1760 int i;
1765 1761
1766 if (map_length > num_bytes)
1767 map_length = num_bytes;
1768 1762
1769 for (i = 0; i < multi->num_stripes; i++, stripe++) { 1763 for (i = 0; i < multi->num_stripes; i++, stripe++) {
1770 btrfs_issue_discard(stripe->dev->bdev, 1764 ret = btrfs_issue_discard(stripe->dev->bdev,
1771 stripe->physical, 1765 stripe->physical,
1772 map_length); 1766 stripe->length);
1767 if (!ret)
1768 discarded_bytes += stripe->length;
1769 else if (ret != -EOPNOTSUPP)
1770 break;
1773 } 1771 }
1774 kfree(multi); 1772 kfree(multi);
1775 } 1773 }
1774 if (discarded_bytes && ret == -EOPNOTSUPP)
1775 ret = 0;
1776
1777 if (actual_bytes)
1778 *actual_bytes = discarded_bytes;
1779
1776 1780
1777 return ret; 1781 return ret;
1778} 1782}
@@ -3996,6 +4000,7 @@ int btrfs_delalloc_reserve_metadata(struct inode *inode, u64 num_bytes)
3996 struct btrfs_block_rsv *block_rsv = &root->fs_info->delalloc_block_rsv; 4000 struct btrfs_block_rsv *block_rsv = &root->fs_info->delalloc_block_rsv;
3997 u64 to_reserve; 4001 u64 to_reserve;
3998 int nr_extents; 4002 int nr_extents;
4003 int reserved_extents;
3999 int ret; 4004 int ret;
4000 4005
4001 if (btrfs_transaction_in_commit(root->fs_info)) 4006 if (btrfs_transaction_in_commit(root->fs_info))
@@ -4003,25 +4008,24 @@ int btrfs_delalloc_reserve_metadata(struct inode *inode, u64 num_bytes)
4003 4008
4004 num_bytes = ALIGN(num_bytes, root->sectorsize); 4009 num_bytes = ALIGN(num_bytes, root->sectorsize);
4005 4010
4006 spin_lock(&BTRFS_I(inode)->accounting_lock);
4007 nr_extents = atomic_read(&BTRFS_I(inode)->outstanding_extents) + 1; 4011 nr_extents = atomic_read(&BTRFS_I(inode)->outstanding_extents) + 1;
4008 if (nr_extents > BTRFS_I(inode)->reserved_extents) { 4012 reserved_extents = atomic_read(&BTRFS_I(inode)->reserved_extents);
4009 nr_extents -= BTRFS_I(inode)->reserved_extents; 4013
4014 if (nr_extents > reserved_extents) {
4015 nr_extents -= reserved_extents;
4010 to_reserve = calc_trans_metadata_size(root, nr_extents); 4016 to_reserve = calc_trans_metadata_size(root, nr_extents);
4011 } else { 4017 } else {
4012 nr_extents = 0; 4018 nr_extents = 0;
4013 to_reserve = 0; 4019 to_reserve = 0;
4014 } 4020 }
4015 spin_unlock(&BTRFS_I(inode)->accounting_lock); 4021
4016 to_reserve += calc_csum_metadata_size(inode, num_bytes); 4022 to_reserve += calc_csum_metadata_size(inode, num_bytes);
4017 ret = reserve_metadata_bytes(NULL, root, block_rsv, to_reserve, 1); 4023 ret = reserve_metadata_bytes(NULL, root, block_rsv, to_reserve, 1);
4018 if (ret) 4024 if (ret)
4019 return ret; 4025 return ret;
4020 4026
4021 spin_lock(&BTRFS_I(inode)->accounting_lock); 4027 atomic_add(nr_extents, &BTRFS_I(inode)->reserved_extents);
4022 BTRFS_I(inode)->reserved_extents += nr_extents;
4023 atomic_inc(&BTRFS_I(inode)->outstanding_extents); 4028 atomic_inc(&BTRFS_I(inode)->outstanding_extents);
4024 spin_unlock(&BTRFS_I(inode)->accounting_lock);
4025 4029
4026 block_rsv_add_bytes(block_rsv, to_reserve, 1); 4030 block_rsv_add_bytes(block_rsv, to_reserve, 1);
4027 4031
@@ -4036,20 +4040,30 @@ void btrfs_delalloc_release_metadata(struct inode *inode, u64 num_bytes)
4036 struct btrfs_root *root = BTRFS_I(inode)->root; 4040 struct btrfs_root *root = BTRFS_I(inode)->root;
4037 u64 to_free; 4041 u64 to_free;
4038 int nr_extents; 4042 int nr_extents;
4043 int reserved_extents;
4039 4044
4040 num_bytes = ALIGN(num_bytes, root->sectorsize); 4045 num_bytes = ALIGN(num_bytes, root->sectorsize);
4041 atomic_dec(&BTRFS_I(inode)->outstanding_extents); 4046 atomic_dec(&BTRFS_I(inode)->outstanding_extents);
4042 WARN_ON(atomic_read(&BTRFS_I(inode)->outstanding_extents) < 0); 4047 WARN_ON(atomic_read(&BTRFS_I(inode)->outstanding_extents) < 0);
4043 4048
4044 spin_lock(&BTRFS_I(inode)->accounting_lock); 4049 reserved_extents = atomic_read(&BTRFS_I(inode)->reserved_extents);
4045 nr_extents = atomic_read(&BTRFS_I(inode)->outstanding_extents); 4050 do {
4046 if (nr_extents < BTRFS_I(inode)->reserved_extents) { 4051 int old, new;
4047 nr_extents = BTRFS_I(inode)->reserved_extents - nr_extents; 4052
4048 BTRFS_I(inode)->reserved_extents -= nr_extents; 4053 nr_extents = atomic_read(&BTRFS_I(inode)->outstanding_extents);
4049 } else { 4054 if (nr_extents >= reserved_extents) {
4050 nr_extents = 0; 4055 nr_extents = 0;
4051 } 4056 break;
4052 spin_unlock(&BTRFS_I(inode)->accounting_lock); 4057 }
4058 old = reserved_extents;
4059 nr_extents = reserved_extents - nr_extents;
4060 new = reserved_extents - nr_extents;
4061 old = atomic_cmpxchg(&BTRFS_I(inode)->reserved_extents,
4062 reserved_extents, new);
4063 if (likely(old == reserved_extents))
4064 break;
4065 reserved_extents = old;
4066 } while (1);
4053 4067
4054 to_free = calc_csum_metadata_size(inode, num_bytes); 4068 to_free = calc_csum_metadata_size(inode, num_bytes);
4055 if (nr_extents > 0) 4069 if (nr_extents > 0)
@@ -4223,8 +4237,8 @@ int btrfs_pin_extent(struct btrfs_root *root,
4223 * update size of reserved extents. this function may return -EAGAIN 4237 * update size of reserved extents. this function may return -EAGAIN
4224 * if 'reserve' is true or 'sinfo' is false. 4238 * if 'reserve' is true or 'sinfo' is false.
4225 */ 4239 */
4226static int update_reserved_bytes(struct btrfs_block_group_cache *cache, 4240int btrfs_update_reserved_bytes(struct btrfs_block_group_cache *cache,
4227 u64 num_bytes, int reserve, int sinfo) 4241 u64 num_bytes, int reserve, int sinfo)
4228{ 4242{
4229 int ret = 0; 4243 int ret = 0;
4230 if (sinfo) { 4244 if (sinfo) {
@@ -4363,7 +4377,9 @@ int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans,
4363 if (ret) 4377 if (ret)
4364 break; 4378 break;
4365 4379
4366 ret = btrfs_discard_extent(root, start, end + 1 - start); 4380 if (btrfs_test_opt(root, DISCARD))
4381 ret = btrfs_discard_extent(root, start,
4382 end + 1 - start, NULL);
4367 4383
4368 clear_extent_dirty(unpin, start, end, GFP_NOFS); 4384 clear_extent_dirty(unpin, start, end, GFP_NOFS);
4369 unpin_extent_range(root, start, end); 4385 unpin_extent_range(root, start, end);
@@ -4704,10 +4720,10 @@ void btrfs_free_tree_block(struct btrfs_trans_handle *trans,
4704 WARN_ON(test_bit(EXTENT_BUFFER_DIRTY, &buf->bflags)); 4720 WARN_ON(test_bit(EXTENT_BUFFER_DIRTY, &buf->bflags));
4705 4721
4706 btrfs_add_free_space(cache, buf->start, buf->len); 4722 btrfs_add_free_space(cache, buf->start, buf->len);
4707 ret = update_reserved_bytes(cache, buf->len, 0, 0); 4723 ret = btrfs_update_reserved_bytes(cache, buf->len, 0, 0);
4708 if (ret == -EAGAIN) { 4724 if (ret == -EAGAIN) {
4709 /* block group became read-only */ 4725 /* block group became read-only */
4710 update_reserved_bytes(cache, buf->len, 0, 1); 4726 btrfs_update_reserved_bytes(cache, buf->len, 0, 1);
4711 goto out; 4727 goto out;
4712 } 4728 }
4713 4729
@@ -4744,6 +4760,11 @@ pin:
4744 } 4760 }
4745 } 4761 }
4746out: 4762out:
4763 /*
4764 * Deleting the buffer, clear the corrupt flag since it doesn't matter
4765 * anymore.
4766 */
4767 clear_bit(EXTENT_BUFFER_CORRUPT, &buf->bflags);
4747 btrfs_put_block_group(cache); 4768 btrfs_put_block_group(cache);
4748} 4769}
4749 4770
@@ -5191,7 +5212,7 @@ checks:
5191 search_start - offset); 5212 search_start - offset);
5192 BUG_ON(offset > search_start); 5213 BUG_ON(offset > search_start);
5193 5214
5194 ret = update_reserved_bytes(block_group, num_bytes, 1, 5215 ret = btrfs_update_reserved_bytes(block_group, num_bytes, 1,
5195 (data & BTRFS_BLOCK_GROUP_DATA)); 5216 (data & BTRFS_BLOCK_GROUP_DATA));
5196 if (ret == -EAGAIN) { 5217 if (ret == -EAGAIN) {
5197 btrfs_add_free_space(block_group, offset, num_bytes); 5218 btrfs_add_free_space(block_group, offset, num_bytes);
@@ -5397,6 +5418,8 @@ again:
5397 dump_space_info(sinfo, num_bytes, 1); 5418 dump_space_info(sinfo, num_bytes, 1);
5398 } 5419 }
5399 5420
5421 trace_btrfs_reserved_extent_alloc(root, ins->objectid, ins->offset);
5422
5400 return ret; 5423 return ret;
5401} 5424}
5402 5425
@@ -5412,12 +5435,15 @@ int btrfs_free_reserved_extent(struct btrfs_root *root, u64 start, u64 len)
5412 return -ENOSPC; 5435 return -ENOSPC;
5413 } 5436 }
5414 5437
5415 ret = btrfs_discard_extent(root, start, len); 5438 if (btrfs_test_opt(root, DISCARD))
5439 ret = btrfs_discard_extent(root, start, len, NULL);
5416 5440
5417 btrfs_add_free_space(cache, start, len); 5441 btrfs_add_free_space(cache, start, len);
5418 update_reserved_bytes(cache, len, 0, 1); 5442 btrfs_update_reserved_bytes(cache, len, 0, 1);
5419 btrfs_put_block_group(cache); 5443 btrfs_put_block_group(cache);
5420 5444
5445 trace_btrfs_reserved_extent_free(root, start, len);
5446
5421 return ret; 5447 return ret;
5422} 5448}
5423 5449
@@ -5444,7 +5470,8 @@ static int alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
5444 size = sizeof(*extent_item) + btrfs_extent_inline_ref_size(type); 5470 size = sizeof(*extent_item) + btrfs_extent_inline_ref_size(type);
5445 5471
5446 path = btrfs_alloc_path(); 5472 path = btrfs_alloc_path();
5447 BUG_ON(!path); 5473 if (!path)
5474 return -ENOMEM;
5448 5475
5449 path->leave_spinning = 1; 5476 path->leave_spinning = 1;
5450 ret = btrfs_insert_empty_item(trans, fs_info->extent_root, path, 5477 ret = btrfs_insert_empty_item(trans, fs_info->extent_root, path,
@@ -5614,7 +5641,7 @@ int btrfs_alloc_logged_file_extent(struct btrfs_trans_handle *trans,
5614 put_caching_control(caching_ctl); 5641 put_caching_control(caching_ctl);
5615 } 5642 }
5616 5643
5617 ret = update_reserved_bytes(block_group, ins->offset, 1, 1); 5644 ret = btrfs_update_reserved_bytes(block_group, ins->offset, 1, 1);
5618 BUG_ON(ret); 5645 BUG_ON(ret);
5619 btrfs_put_block_group(block_group); 5646 btrfs_put_block_group(block_group);
5620 ret = alloc_reserved_file_extent(trans, root, 0, root_objectid, 5647 ret = alloc_reserved_file_extent(trans, root, 0, root_objectid,
@@ -6047,6 +6074,8 @@ static noinline int do_walk_down(struct btrfs_trans_handle *trans,
6047 if (reada && level == 1) 6074 if (reada && level == 1)
6048 reada_walk_down(trans, root, wc, path); 6075 reada_walk_down(trans, root, wc, path);
6049 next = read_tree_block(root, bytenr, blocksize, generation); 6076 next = read_tree_block(root, bytenr, blocksize, generation);
6077 if (!next)
6078 return -EIO;
6050 btrfs_tree_lock(next); 6079 btrfs_tree_lock(next);
6051 btrfs_set_lock_blocking(next); 6080 btrfs_set_lock_blocking(next);
6052 } 6081 }
@@ -6438,10 +6467,14 @@ int btrfs_drop_subtree(struct btrfs_trans_handle *trans,
6438 BUG_ON(root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID); 6467 BUG_ON(root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID);
6439 6468
6440 path = btrfs_alloc_path(); 6469 path = btrfs_alloc_path();
6441 BUG_ON(!path); 6470 if (!path)
6471 return -ENOMEM;
6442 6472
6443 wc = kzalloc(sizeof(*wc), GFP_NOFS); 6473 wc = kzalloc(sizeof(*wc), GFP_NOFS);
6444 BUG_ON(!wc); 6474 if (!wc) {
6475 btrfs_free_path(path);
6476 return -ENOMEM;
6477 }
6445 6478
6446 btrfs_assert_tree_locked(parent); 6479 btrfs_assert_tree_locked(parent);
6447 parent_level = btrfs_header_level(parent); 6480 parent_level = btrfs_header_level(parent);
@@ -6899,7 +6932,11 @@ static noinline int get_new_locations(struct inode *reloc_inode,
6899 } 6932 }
6900 6933
6901 path = btrfs_alloc_path(); 6934 path = btrfs_alloc_path();
6902 BUG_ON(!path); 6935 if (!path) {
6936 if (exts != *extents)
6937 kfree(exts);
6938 return -ENOMEM;
6939 }
6903 6940
6904 cur_pos = extent_key->objectid - offset; 6941 cur_pos = extent_key->objectid - offset;
6905 last_byte = extent_key->objectid + extent_key->offset; 6942 last_byte = extent_key->objectid + extent_key->offset;
@@ -6941,6 +6978,10 @@ static noinline int get_new_locations(struct inode *reloc_inode,
6941 struct disk_extent *old = exts; 6978 struct disk_extent *old = exts;
6942 max *= 2; 6979 max *= 2;
6943 exts = kzalloc(sizeof(*exts) * max, GFP_NOFS); 6980 exts = kzalloc(sizeof(*exts) * max, GFP_NOFS);
6981 if (!exts) {
6982 ret = -ENOMEM;
6983 goto out;
6984 }
6944 memcpy(exts, old, sizeof(*exts) * nr); 6985 memcpy(exts, old, sizeof(*exts) * nr);
6945 if (old != *extents) 6986 if (old != *extents)
6946 kfree(old); 6987 kfree(old);
@@ -7423,7 +7464,8 @@ static noinline int replace_extents_in_leaf(struct btrfs_trans_handle *trans,
7423 int ret; 7464 int ret;
7424 7465
7425 new_extent = kmalloc(sizeof(*new_extent), GFP_NOFS); 7466 new_extent = kmalloc(sizeof(*new_extent), GFP_NOFS);
7426 BUG_ON(!new_extent); 7467 if (!new_extent)
7468 return -ENOMEM;
7427 7469
7428 ref = btrfs_lookup_leaf_ref(root, leaf->start); 7470 ref = btrfs_lookup_leaf_ref(root, leaf->start);
7429 BUG_ON(!ref); 7471 BUG_ON(!ref);
@@ -7609,7 +7651,8 @@ int btrfs_cleanup_reloc_trees(struct btrfs_root *root)
7609 7651
7610 reloc_root = btrfs_read_fs_root_no_name(root->fs_info, &location); 7652 reloc_root = btrfs_read_fs_root_no_name(root->fs_info, &location);
7611 BUG_ON(!reloc_root); 7653 BUG_ON(!reloc_root);
7612 btrfs_orphan_cleanup(reloc_root); 7654 ret = btrfs_orphan_cleanup(reloc_root);
7655 BUG_ON(ret);
7613 return 0; 7656 return 0;
7614} 7657}
7615 7658
@@ -7627,7 +7670,8 @@ static noinline int init_reloc_tree(struct btrfs_trans_handle *trans,
7627 return 0; 7670 return 0;
7628 7671
7629 root_item = kmalloc(sizeof(*root_item), GFP_NOFS); 7672 root_item = kmalloc(sizeof(*root_item), GFP_NOFS);
7630 BUG_ON(!root_item); 7673 if (!root_item)
7674 return -ENOMEM;
7631 7675
7632 ret = btrfs_copy_root(trans, root, root->commit_root, 7676 ret = btrfs_copy_root(trans, root, root->commit_root,
7633 &eb, BTRFS_TREE_RELOC_OBJECTID); 7677 &eb, BTRFS_TREE_RELOC_OBJECTID);
@@ -7653,7 +7697,7 @@ static noinline int init_reloc_tree(struct btrfs_trans_handle *trans,
7653 7697
7654 reloc_root = btrfs_read_fs_root_no_radix(root->fs_info->tree_root, 7698 reloc_root = btrfs_read_fs_root_no_radix(root->fs_info->tree_root,
7655 &root_key); 7699 &root_key);
7656 BUG_ON(!reloc_root); 7700 BUG_ON(IS_ERR(reloc_root));
7657 reloc_root->last_trans = trans->transid; 7701 reloc_root->last_trans = trans->transid;
7658 reloc_root->commit_root = NULL; 7702 reloc_root->commit_root = NULL;
7659 reloc_root->ref_tree = &root->fs_info->reloc_ref_tree; 7703 reloc_root->ref_tree = &root->fs_info->reloc_ref_tree;
@@ -7906,6 +7950,10 @@ static noinline int relocate_one_extent(struct btrfs_root *extent_root,
7906 7950
7907 eb = read_tree_block(found_root, block_start, 7951 eb = read_tree_block(found_root, block_start,
7908 block_size, 0); 7952 block_size, 0);
7953 if (!eb) {
7954 ret = -EIO;
7955 goto out;
7956 }
7909 btrfs_tree_lock(eb); 7957 btrfs_tree_lock(eb);
7910 BUG_ON(level != btrfs_header_level(eb)); 7958 BUG_ON(level != btrfs_header_level(eb));
7911 7959
@@ -8621,6 +8669,12 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
8621 BUG_ON(!block_group); 8669 BUG_ON(!block_group);
8622 BUG_ON(!block_group->ro); 8670 BUG_ON(!block_group->ro);
8623 8671
8672 /*
8673 * Free the reserved super bytes from this block group before
8674 * remove it.
8675 */
8676 free_excluded_extents(root, block_group);
8677
8624 memcpy(&key, &block_group->key, sizeof(key)); 8678 memcpy(&key, &block_group->key, sizeof(key));
8625 if (block_group->flags & (BTRFS_BLOCK_GROUP_DUP | 8679 if (block_group->flags & (BTRFS_BLOCK_GROUP_DUP |
8626 BTRFS_BLOCK_GROUP_RAID1 | 8680 BTRFS_BLOCK_GROUP_RAID1 |
@@ -8724,13 +8778,84 @@ out:
8724 return ret; 8778 return ret;
8725} 8779}
8726 8780
8781int btrfs_init_space_info(struct btrfs_fs_info *fs_info)
8782{
8783 struct btrfs_space_info *space_info;
8784 int ret;
8785
8786 ret = update_space_info(fs_info, BTRFS_BLOCK_GROUP_SYSTEM, 0, 0,
8787 &space_info);
8788 if (ret)
8789 return ret;
8790
8791 ret = update_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA, 0, 0,
8792 &space_info);
8793 if (ret)
8794 return ret;
8795
8796 ret = update_space_info(fs_info, BTRFS_BLOCK_GROUP_DATA, 0, 0,
8797 &space_info);
8798 if (ret)
8799 return ret;
8800
8801 return ret;
8802}
8803
8727int btrfs_error_unpin_extent_range(struct btrfs_root *root, u64 start, u64 end) 8804int btrfs_error_unpin_extent_range(struct btrfs_root *root, u64 start, u64 end)
8728{ 8805{
8729 return unpin_extent_range(root, start, end); 8806 return unpin_extent_range(root, start, end);
8730} 8807}
8731 8808
8732int btrfs_error_discard_extent(struct btrfs_root *root, u64 bytenr, 8809int btrfs_error_discard_extent(struct btrfs_root *root, u64 bytenr,
8733 u64 num_bytes) 8810 u64 num_bytes, u64 *actual_bytes)
8811{
8812 return btrfs_discard_extent(root, bytenr, num_bytes, actual_bytes);
8813}
8814
8815int btrfs_trim_fs(struct btrfs_root *root, struct fstrim_range *range)
8734{ 8816{
8735 return btrfs_discard_extent(root, bytenr, num_bytes); 8817 struct btrfs_fs_info *fs_info = root->fs_info;
8818 struct btrfs_block_group_cache *cache = NULL;
8819 u64 group_trimmed;
8820 u64 start;
8821 u64 end;
8822 u64 trimmed = 0;
8823 int ret = 0;
8824
8825 cache = btrfs_lookup_block_group(fs_info, range->start);
8826
8827 while (cache) {
8828 if (cache->key.objectid >= (range->start + range->len)) {
8829 btrfs_put_block_group(cache);
8830 break;
8831 }
8832
8833 start = max(range->start, cache->key.objectid);
8834 end = min(range->start + range->len,
8835 cache->key.objectid + cache->key.offset);
8836
8837 if (end - start >= range->minlen) {
8838 if (!block_group_cache_done(cache)) {
8839 ret = cache_block_group(cache, NULL, root, 0);
8840 if (!ret)
8841 wait_block_group_cache_done(cache);
8842 }
8843 ret = btrfs_trim_block_group(cache,
8844 &group_trimmed,
8845 start,
8846 end,
8847 range->minlen);
8848
8849 trimmed += group_trimmed;
8850 if (ret) {
8851 btrfs_put_block_group(cache);
8852 break;
8853 }
8854 }
8855
8856 cache = next_block_group(fs_info->tree_root, cache);
8857 }
8858
8859 range->len = trimmed;
8860 return ret;
8736} 8861}
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index b5b92824a271..20ddb28602a8 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -2192,6 +2192,8 @@ static int __extent_writepage(struct page *page, struct writeback_control *wbc,
2192 else 2192 else
2193 write_flags = WRITE; 2193 write_flags = WRITE;
2194 2194
2195 trace___extent_writepage(page, inode, wbc);
2196
2195 WARN_ON(!PageLocked(page)); 2197 WARN_ON(!PageLocked(page));
2196 pg_offset = i_size & (PAGE_CACHE_SIZE - 1); 2198 pg_offset = i_size & (PAGE_CACHE_SIZE - 1);
2197 if (page->index > end_index || 2199 if (page->index > end_index ||
@@ -3690,6 +3692,7 @@ int map_private_extent_buffer(struct extent_buffer *eb, unsigned long start,
3690 "wanted %lu %lu\n", (unsigned long long)eb->start, 3692 "wanted %lu %lu\n", (unsigned long long)eb->start,
3691 eb->len, start, min_len); 3693 eb->len, start, min_len);
3692 WARN_ON(1); 3694 WARN_ON(1);
3695 return -EINVAL;
3693 } 3696 }
3694 3697
3695 p = extent_buffer_page(eb, i); 3698 p = extent_buffer_page(eb, i);
diff --git a/fs/btrfs/extent_io.h b/fs/btrfs/extent_io.h
index 9318dfefd59c..f62c5442835d 100644
--- a/fs/btrfs/extent_io.h
+++ b/fs/btrfs/extent_io.h
@@ -31,6 +31,7 @@
31#define EXTENT_BUFFER_UPTODATE 0 31#define EXTENT_BUFFER_UPTODATE 0
32#define EXTENT_BUFFER_BLOCKING 1 32#define EXTENT_BUFFER_BLOCKING 1
33#define EXTENT_BUFFER_DIRTY 2 33#define EXTENT_BUFFER_DIRTY 2
34#define EXTENT_BUFFER_CORRUPT 3
34 35
35/* these are flags for extent_clear_unlock_delalloc */ 36/* these are flags for extent_clear_unlock_delalloc */
36#define EXTENT_CLEAR_UNLOCK_PAGE 0x1 37#define EXTENT_CLEAR_UNLOCK_PAGE 0x1
diff --git a/fs/btrfs/file-item.c b/fs/btrfs/file-item.c
index 4f19a3e1bf32..a6a9d4e8b491 100644
--- a/fs/btrfs/file-item.c
+++ b/fs/btrfs/file-item.c
@@ -48,7 +48,8 @@ int btrfs_insert_file_extent(struct btrfs_trans_handle *trans,
48 struct extent_buffer *leaf; 48 struct extent_buffer *leaf;
49 49
50 path = btrfs_alloc_path(); 50 path = btrfs_alloc_path();
51 BUG_ON(!path); 51 if (!path)
52 return -ENOMEM;
52 file_key.objectid = objectid; 53 file_key.objectid = objectid;
53 file_key.offset = pos; 54 file_key.offset = pos;
54 btrfs_set_key_type(&file_key, BTRFS_EXTENT_DATA_KEY); 55 btrfs_set_key_type(&file_key, BTRFS_EXTENT_DATA_KEY);
@@ -169,6 +170,8 @@ static int __btrfs_lookup_bio_sums(struct btrfs_root *root,
169 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; 170 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
170 171
171 path = btrfs_alloc_path(); 172 path = btrfs_alloc_path();
173 if (!path)
174 return -ENOMEM;
172 if (bio->bi_size > PAGE_CACHE_SIZE * 8) 175 if (bio->bi_size > PAGE_CACHE_SIZE * 8)
173 path->reada = 2; 176 path->reada = 2;
174 177
diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
index f447b783bb84..656bc0a892b1 100644
--- a/fs/btrfs/file.c
+++ b/fs/btrfs/file.c
@@ -45,14 +45,14 @@
45 * and be replaced with calls into generic code. 45 * and be replaced with calls into generic code.
46 */ 46 */
47static noinline int btrfs_copy_from_user(loff_t pos, int num_pages, 47static noinline int btrfs_copy_from_user(loff_t pos, int num_pages,
48 int write_bytes, 48 size_t write_bytes,
49 struct page **prepared_pages, 49 struct page **prepared_pages,
50 struct iov_iter *i) 50 struct iov_iter *i)
51{ 51{
52 size_t copied = 0; 52 size_t copied = 0;
53 size_t total_copied = 0;
53 int pg = 0; 54 int pg = 0;
54 int offset = pos & (PAGE_CACHE_SIZE - 1); 55 int offset = pos & (PAGE_CACHE_SIZE - 1);
55 int total_copied = 0;
56 56
57 while (write_bytes > 0) { 57 while (write_bytes > 0) {
58 size_t count = min_t(size_t, 58 size_t count = min_t(size_t,
@@ -88,9 +88,8 @@ static noinline int btrfs_copy_from_user(loff_t pos, int num_pages,
88 total_copied += copied; 88 total_copied += copied;
89 89
90 /* Return to btrfs_file_aio_write to fault page */ 90 /* Return to btrfs_file_aio_write to fault page */
91 if (unlikely(copied == 0)) { 91 if (unlikely(copied == 0))
92 break; 92 break;
93 }
94 93
95 if (unlikely(copied < PAGE_CACHE_SIZE - offset)) { 94 if (unlikely(copied < PAGE_CACHE_SIZE - offset)) {
96 offset += copied; 95 offset += copied;
@@ -109,8 +108,6 @@ static noinline void btrfs_drop_pages(struct page **pages, size_t num_pages)
109{ 108{
110 size_t i; 109 size_t i;
111 for (i = 0; i < num_pages; i++) { 110 for (i = 0; i < num_pages; i++) {
112 if (!pages[i])
113 break;
114 /* page checked is some magic around finding pages that 111 /* page checked is some magic around finding pages that
115 * have been modified without going through btrfs_set_page_dirty 112 * have been modified without going through btrfs_set_page_dirty
116 * clear it here 113 * clear it here
@@ -130,13 +127,12 @@ static noinline void btrfs_drop_pages(struct page **pages, size_t num_pages)
130 * this also makes the decision about creating an inline extent vs 127 * this also makes the decision about creating an inline extent vs
131 * doing real data extents, marking pages dirty and delalloc as required. 128 * doing real data extents, marking pages dirty and delalloc as required.
132 */ 129 */
133static noinline int dirty_and_release_pages(struct btrfs_trans_handle *trans, 130static noinline int dirty_and_release_pages(struct btrfs_root *root,
134 struct btrfs_root *root, 131 struct file *file,
135 struct file *file, 132 struct page **pages,
136 struct page **pages, 133 size_t num_pages,
137 size_t num_pages, 134 loff_t pos,
138 loff_t pos, 135 size_t write_bytes)
139 size_t write_bytes)
140{ 136{
141 int err = 0; 137 int err = 0;
142 int i; 138 int i;
@@ -154,7 +150,8 @@ static noinline int dirty_and_release_pages(struct btrfs_trans_handle *trans,
154 end_of_last_block = start_pos + num_bytes - 1; 150 end_of_last_block = start_pos + num_bytes - 1;
155 err = btrfs_set_extent_delalloc(inode, start_pos, end_of_last_block, 151 err = btrfs_set_extent_delalloc(inode, start_pos, end_of_last_block,
156 NULL); 152 NULL);
157 BUG_ON(err); 153 if (err)
154 return err;
158 155
159 for (i = 0; i < num_pages; i++) { 156 for (i = 0; i < num_pages; i++) {
160 struct page *p = pages[i]; 157 struct page *p = pages[i];
@@ -162,13 +159,14 @@ static noinline int dirty_and_release_pages(struct btrfs_trans_handle *trans,
162 ClearPageChecked(p); 159 ClearPageChecked(p);
163 set_page_dirty(p); 160 set_page_dirty(p);
164 } 161 }
165 if (end_pos > isize) { 162
163 /*
164 * we've only changed i_size in ram, and we haven't updated
165 * the disk i_size. There is no need to log the inode
166 * at this time.
167 */
168 if (end_pos > isize)
166 i_size_write(inode, end_pos); 169 i_size_write(inode, end_pos);
167 /* we've only changed i_size in ram, and we haven't updated
168 * the disk i_size. There is no need to log the inode
169 * at this time.
170 */
171 }
172 return 0; 170 return 0;
173} 171}
174 172
@@ -610,6 +608,8 @@ again:
610 key.offset = split; 608 key.offset = split;
611 609
612 ret = btrfs_search_slot(trans, root, &key, path, -1, 1); 610 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
611 if (ret < 0)
612 goto out;
613 if (ret > 0 && path->slots[0] > 0) 613 if (ret > 0 && path->slots[0] > 0)
614 path->slots[0]--; 614 path->slots[0]--;
615 615
@@ -819,12 +819,11 @@ static noinline int prepare_pages(struct btrfs_root *root, struct file *file,
819 last_pos = ((u64)index + num_pages) << PAGE_CACHE_SHIFT; 819 last_pos = ((u64)index + num_pages) << PAGE_CACHE_SHIFT;
820 820
821 if (start_pos > inode->i_size) { 821 if (start_pos > inode->i_size) {
822 err = btrfs_cont_expand(inode, start_pos); 822 err = btrfs_cont_expand(inode, i_size_read(inode), start_pos);
823 if (err) 823 if (err)
824 return err; 824 return err;
825 } 825 }
826 826
827 memset(pages, 0, num_pages * sizeof(struct page *));
828again: 827again:
829 for (i = 0; i < num_pages; i++) { 828 for (i = 0; i < num_pages; i++) {
830 pages[i] = grab_cache_page(inode->i_mapping, index + i); 829 pages[i] = grab_cache_page(inode->i_mapping, index + i);
@@ -896,156 +895,71 @@ fail:
896 895
897} 896}
898 897
899static ssize_t btrfs_file_aio_write(struct kiocb *iocb, 898static noinline ssize_t __btrfs_buffered_write(struct file *file,
900 const struct iovec *iov, 899 struct iov_iter *i,
901 unsigned long nr_segs, loff_t pos) 900 loff_t pos)
902{ 901{
903 struct file *file = iocb->ki_filp;
904 struct inode *inode = fdentry(file)->d_inode; 902 struct inode *inode = fdentry(file)->d_inode;
905 struct btrfs_root *root = BTRFS_I(inode)->root; 903 struct btrfs_root *root = BTRFS_I(inode)->root;
906 struct page **pages = NULL; 904 struct page **pages = NULL;
907 struct iov_iter i;
908 loff_t *ppos = &iocb->ki_pos;
909 loff_t start_pos;
910 ssize_t num_written = 0;
911 ssize_t err = 0;
912 size_t count;
913 size_t ocount;
914 int ret = 0;
915 int nrptrs;
916 unsigned long first_index; 905 unsigned long first_index;
917 unsigned long last_index; 906 unsigned long last_index;
918 int will_write; 907 size_t num_written = 0;
919 int buffered = 0; 908 int nrptrs;
920 int copied = 0; 909 int ret;
921 int dirty_pages = 0;
922
923 will_write = ((file->f_flags & O_DSYNC) || IS_SYNC(inode) ||
924 (file->f_flags & O_DIRECT));
925
926 start_pos = pos;
927
928 vfs_check_frozen(inode->i_sb, SB_FREEZE_WRITE);
929
930 mutex_lock(&inode->i_mutex);
931
932 err = generic_segment_checks(iov, &nr_segs, &ocount, VERIFY_READ);
933 if (err)
934 goto out;
935 count = ocount;
936
937 current->backing_dev_info = inode->i_mapping->backing_dev_info;
938 err = generic_write_checks(file, &pos, &count, S_ISBLK(inode->i_mode));
939 if (err)
940 goto out;
941
942 if (count == 0)
943 goto out;
944
945 err = file_remove_suid(file);
946 if (err)
947 goto out;
948
949 /*
950 * If BTRFS flips readonly due to some impossible error
951 * (fs_info->fs_state now has BTRFS_SUPER_FLAG_ERROR),
952 * although we have opened a file as writable, we have
953 * to stop this write operation to ensure FS consistency.
954 */
955 if (root->fs_info->fs_state & BTRFS_SUPER_FLAG_ERROR) {
956 err = -EROFS;
957 goto out;
958 }
959
960 file_update_time(file);
961 BTRFS_I(inode)->sequence++;
962
963 if (unlikely(file->f_flags & O_DIRECT)) {
964 num_written = generic_file_direct_write(iocb, iov, &nr_segs,
965 pos, ppos, count,
966 ocount);
967 /*
968 * the generic O_DIRECT will update in-memory i_size after the
969 * DIOs are done. But our endio handlers that update the on
970 * disk i_size never update past the in memory i_size. So we
971 * need one more update here to catch any additions to the
972 * file
973 */
974 if (inode->i_size != BTRFS_I(inode)->disk_i_size) {
975 btrfs_ordered_update_i_size(inode, inode->i_size, NULL);
976 mark_inode_dirty(inode);
977 }
978
979 if (num_written < 0) {
980 ret = num_written;
981 num_written = 0;
982 goto out;
983 } else if (num_written == count) {
984 /* pick up pos changes done by the generic code */
985 pos = *ppos;
986 goto out;
987 }
988 /*
989 * We are going to do buffered for the rest of the range, so we
990 * need to make sure to invalidate the buffered pages when we're
991 * done.
992 */
993 buffered = 1;
994 pos += num_written;
995 }
996 910
997 iov_iter_init(&i, iov, nr_segs, count, num_written); 911 nrptrs = min((iov_iter_count(i) + PAGE_CACHE_SIZE - 1) /
998 nrptrs = min((iov_iter_count(&i) + PAGE_CACHE_SIZE - 1) /
999 PAGE_CACHE_SIZE, PAGE_CACHE_SIZE / 912 PAGE_CACHE_SIZE, PAGE_CACHE_SIZE /
1000 (sizeof(struct page *))); 913 (sizeof(struct page *)));
1001 pages = kmalloc(nrptrs * sizeof(struct page *), GFP_KERNEL); 914 pages = kmalloc(nrptrs * sizeof(struct page *), GFP_KERNEL);
1002 if (!pages) { 915 if (!pages)
1003 ret = -ENOMEM; 916 return -ENOMEM;
1004 goto out;
1005 }
1006
1007 /* generic_write_checks can change our pos */
1008 start_pos = pos;
1009 917
1010 first_index = pos >> PAGE_CACHE_SHIFT; 918 first_index = pos >> PAGE_CACHE_SHIFT;
1011 last_index = (pos + iov_iter_count(&i)) >> PAGE_CACHE_SHIFT; 919 last_index = (pos + iov_iter_count(i)) >> PAGE_CACHE_SHIFT;
1012 920
1013 while (iov_iter_count(&i) > 0) { 921 while (iov_iter_count(i) > 0) {
1014 size_t offset = pos & (PAGE_CACHE_SIZE - 1); 922 size_t offset = pos & (PAGE_CACHE_SIZE - 1);
1015 size_t write_bytes = min(iov_iter_count(&i), 923 size_t write_bytes = min(iov_iter_count(i),
1016 nrptrs * (size_t)PAGE_CACHE_SIZE - 924 nrptrs * (size_t)PAGE_CACHE_SIZE -
1017 offset); 925 offset);
1018 size_t num_pages = (write_bytes + offset + 926 size_t num_pages = (write_bytes + offset +
1019 PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; 927 PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
928 size_t dirty_pages;
929 size_t copied;
1020 930
1021 WARN_ON(num_pages > nrptrs); 931 WARN_ON(num_pages > nrptrs);
1022 memset(pages, 0, sizeof(struct page *) * nrptrs);
1023 932
1024 /* 933 /*
1025 * Fault pages before locking them in prepare_pages 934 * Fault pages before locking them in prepare_pages
1026 * to avoid recursive lock 935 * to avoid recursive lock
1027 */ 936 */
1028 if (unlikely(iov_iter_fault_in_readable(&i, write_bytes))) { 937 if (unlikely(iov_iter_fault_in_readable(i, write_bytes))) {
1029 ret = -EFAULT; 938 ret = -EFAULT;
1030 goto out; 939 break;
1031 } 940 }
1032 941
1033 ret = btrfs_delalloc_reserve_space(inode, 942 ret = btrfs_delalloc_reserve_space(inode,
1034 num_pages << PAGE_CACHE_SHIFT); 943 num_pages << PAGE_CACHE_SHIFT);
1035 if (ret) 944 if (ret)
1036 goto out; 945 break;
1037 946
947 /*
948 * This is going to setup the pages array with the number of
949 * pages we want, so we don't really need to worry about the
950 * contents of pages from loop to loop
951 */
1038 ret = prepare_pages(root, file, pages, num_pages, 952 ret = prepare_pages(root, file, pages, num_pages,
1039 pos, first_index, last_index, 953 pos, first_index, last_index,
1040 write_bytes); 954 write_bytes);
1041 if (ret) { 955 if (ret) {
1042 btrfs_delalloc_release_space(inode, 956 btrfs_delalloc_release_space(inode,
1043 num_pages << PAGE_CACHE_SHIFT); 957 num_pages << PAGE_CACHE_SHIFT);
1044 goto out; 958 break;
1045 } 959 }
1046 960
1047 copied = btrfs_copy_from_user(pos, num_pages, 961 copied = btrfs_copy_from_user(pos, num_pages,
1048 write_bytes, pages, &i); 962 write_bytes, pages, i);
1049 963
1050 /* 964 /*
1051 * if we have trouble faulting in the pages, fall 965 * if we have trouble faulting in the pages, fall
@@ -1061,6 +975,13 @@ static ssize_t btrfs_file_aio_write(struct kiocb *iocb,
1061 PAGE_CACHE_SIZE - 1) >> 975 PAGE_CACHE_SIZE - 1) >>
1062 PAGE_CACHE_SHIFT; 976 PAGE_CACHE_SHIFT;
1063 977
978 /*
979 * If we had a short copy we need to release the excess delaloc
980 * bytes we reserved. We need to increment outstanding_extents
981 * because btrfs_delalloc_release_space will decrement it, but
982 * we still have an outstanding extent for the chunk we actually
983 * managed to copy.
984 */
1064 if (num_pages > dirty_pages) { 985 if (num_pages > dirty_pages) {
1065 if (copied > 0) 986 if (copied > 0)
1066 atomic_inc( 987 atomic_inc(
@@ -1071,39 +992,157 @@ static ssize_t btrfs_file_aio_write(struct kiocb *iocb,
1071 } 992 }
1072 993
1073 if (copied > 0) { 994 if (copied > 0) {
1074 dirty_and_release_pages(NULL, root, file, pages, 995 ret = dirty_and_release_pages(root, file, pages,
1075 dirty_pages, pos, copied); 996 dirty_pages, pos,
997 copied);
998 if (ret) {
999 btrfs_delalloc_release_space(inode,
1000 dirty_pages << PAGE_CACHE_SHIFT);
1001 btrfs_drop_pages(pages, num_pages);
1002 break;
1003 }
1076 } 1004 }
1077 1005
1078 btrfs_drop_pages(pages, num_pages); 1006 btrfs_drop_pages(pages, num_pages);
1079 1007
1080 if (copied > 0) { 1008 cond_resched();
1081 if (will_write) { 1009
1082 filemap_fdatawrite_range(inode->i_mapping, pos, 1010 balance_dirty_pages_ratelimited_nr(inode->i_mapping,
1083 pos + copied - 1); 1011 dirty_pages);
1084 } else { 1012 if (dirty_pages < (root->leafsize >> PAGE_CACHE_SHIFT) + 1)
1085 balance_dirty_pages_ratelimited_nr( 1013 btrfs_btree_balance_dirty(root, 1);
1086 inode->i_mapping, 1014 btrfs_throttle(root);
1087 dirty_pages);
1088 if (dirty_pages <
1089 (root->leafsize >> PAGE_CACHE_SHIFT) + 1)
1090 btrfs_btree_balance_dirty(root, 1);
1091 btrfs_throttle(root);
1092 }
1093 }
1094 1015
1095 pos += copied; 1016 pos += copied;
1096 num_written += copied; 1017 num_written += copied;
1018 }
1097 1019
1098 cond_resched(); 1020 kfree(pages);
1021
1022 return num_written ? num_written : ret;
1023}
1024
1025static ssize_t __btrfs_direct_write(struct kiocb *iocb,
1026 const struct iovec *iov,
1027 unsigned long nr_segs, loff_t pos,
1028 loff_t *ppos, size_t count, size_t ocount)
1029{
1030 struct file *file = iocb->ki_filp;
1031 struct inode *inode = fdentry(file)->d_inode;
1032 struct iov_iter i;
1033 ssize_t written;
1034 ssize_t written_buffered;
1035 loff_t endbyte;
1036 int err;
1037
1038 written = generic_file_direct_write(iocb, iov, &nr_segs, pos, ppos,
1039 count, ocount);
1040
1041 /*
1042 * the generic O_DIRECT will update in-memory i_size after the
1043 * DIOs are done. But our endio handlers that update the on
1044 * disk i_size never update past the in memory i_size. So we
1045 * need one more update here to catch any additions to the
1046 * file
1047 */
1048 if (inode->i_size != BTRFS_I(inode)->disk_i_size) {
1049 btrfs_ordered_update_i_size(inode, inode->i_size, NULL);
1050 mark_inode_dirty(inode);
1099 } 1051 }
1052
1053 if (written < 0 || written == count)
1054 return written;
1055
1056 pos += written;
1057 count -= written;
1058 iov_iter_init(&i, iov, nr_segs, count, written);
1059 written_buffered = __btrfs_buffered_write(file, &i, pos);
1060 if (written_buffered < 0) {
1061 err = written_buffered;
1062 goto out;
1063 }
1064 endbyte = pos + written_buffered - 1;
1065 err = filemap_write_and_wait_range(file->f_mapping, pos, endbyte);
1066 if (err)
1067 goto out;
1068 written += written_buffered;
1069 *ppos = pos + written_buffered;
1070 invalidate_mapping_pages(file->f_mapping, pos >> PAGE_CACHE_SHIFT,
1071 endbyte >> PAGE_CACHE_SHIFT);
1100out: 1072out:
1101 mutex_unlock(&inode->i_mutex); 1073 return written ? written : err;
1102 if (ret) 1074}
1103 err = ret;
1104 1075
1105 kfree(pages); 1076static ssize_t btrfs_file_aio_write(struct kiocb *iocb,
1106 *ppos = pos; 1077 const struct iovec *iov,
1078 unsigned long nr_segs, loff_t pos)
1079{
1080 struct file *file = iocb->ki_filp;
1081 struct inode *inode = fdentry(file)->d_inode;
1082 struct btrfs_root *root = BTRFS_I(inode)->root;
1083 loff_t *ppos = &iocb->ki_pos;
1084 ssize_t num_written = 0;
1085 ssize_t err = 0;
1086 size_t count, ocount;
1087
1088 vfs_check_frozen(inode->i_sb, SB_FREEZE_WRITE);
1089
1090 mutex_lock(&inode->i_mutex);
1091
1092 err = generic_segment_checks(iov, &nr_segs, &ocount, VERIFY_READ);
1093 if (err) {
1094 mutex_unlock(&inode->i_mutex);
1095 goto out;
1096 }
1097 count = ocount;
1098
1099 current->backing_dev_info = inode->i_mapping->backing_dev_info;
1100 err = generic_write_checks(file, &pos, &count, S_ISBLK(inode->i_mode));
1101 if (err) {
1102 mutex_unlock(&inode->i_mutex);
1103 goto out;
1104 }
1105
1106 if (count == 0) {
1107 mutex_unlock(&inode->i_mutex);
1108 goto out;
1109 }
1110
1111 err = file_remove_suid(file);
1112 if (err) {
1113 mutex_unlock(&inode->i_mutex);
1114 goto out;
1115 }
1116
1117 /*
1118 * If BTRFS flips readonly due to some impossible error
1119 * (fs_info->fs_state now has BTRFS_SUPER_FLAG_ERROR),
1120 * although we have opened a file as writable, we have
1121 * to stop this write operation to ensure FS consistency.
1122 */
1123 if (root->fs_info->fs_state & BTRFS_SUPER_FLAG_ERROR) {
1124 mutex_unlock(&inode->i_mutex);
1125 err = -EROFS;
1126 goto out;
1127 }
1128
1129 file_update_time(file);
1130 BTRFS_I(inode)->sequence++;
1131
1132 if (unlikely(file->f_flags & O_DIRECT)) {
1133 num_written = __btrfs_direct_write(iocb, iov, nr_segs,
1134 pos, ppos, count, ocount);
1135 } else {
1136 struct iov_iter i;
1137
1138 iov_iter_init(&i, iov, nr_segs, count, num_written);
1139
1140 num_written = __btrfs_buffered_write(file, &i, pos);
1141 if (num_written > 0)
1142 *ppos = pos + num_written;
1143 }
1144
1145 mutex_unlock(&inode->i_mutex);
1107 1146
1108 /* 1147 /*
1109 * we want to make sure fsync finds this change 1148 * we want to make sure fsync finds this change
@@ -1118,43 +1157,12 @@ out:
1118 * one running right now. 1157 * one running right now.
1119 */ 1158 */
1120 BTRFS_I(inode)->last_trans = root->fs_info->generation + 1; 1159 BTRFS_I(inode)->last_trans = root->fs_info->generation + 1;
1121 1160 if (num_written > 0 || num_written == -EIOCBQUEUED) {
1122 if (num_written > 0 && will_write) { 1161 err = generic_write_sync(file, pos, num_written);
1123 struct btrfs_trans_handle *trans; 1162 if (err < 0 && num_written > 0)
1124
1125 err = btrfs_wait_ordered_range(inode, start_pos, num_written);
1126 if (err)
1127 num_written = err; 1163 num_written = err;
1128
1129 if ((file->f_flags & O_DSYNC) || IS_SYNC(inode)) {
1130 trans = btrfs_start_transaction(root, 0);
1131 if (IS_ERR(trans)) {
1132 num_written = PTR_ERR(trans);
1133 goto done;
1134 }
1135 mutex_lock(&inode->i_mutex);
1136 ret = btrfs_log_dentry_safe(trans, root,
1137 file->f_dentry);
1138 mutex_unlock(&inode->i_mutex);
1139 if (ret == 0) {
1140 ret = btrfs_sync_log(trans, root);
1141 if (ret == 0)
1142 btrfs_end_transaction(trans, root);
1143 else
1144 btrfs_commit_transaction(trans, root);
1145 } else if (ret != BTRFS_NO_LOG_SYNC) {
1146 btrfs_commit_transaction(trans, root);
1147 } else {
1148 btrfs_end_transaction(trans, root);
1149 }
1150 }
1151 if (file->f_flags & O_DIRECT && buffered) {
1152 invalidate_mapping_pages(inode->i_mapping,
1153 start_pos >> PAGE_CACHE_SHIFT,
1154 (start_pos + num_written - 1) >> PAGE_CACHE_SHIFT);
1155 }
1156 } 1164 }
1157done: 1165out:
1158 current->backing_dev_info = NULL; 1166 current->backing_dev_info = NULL;
1159 return num_written ? num_written : err; 1167 return num_written ? num_written : err;
1160} 1168}
@@ -1197,6 +1205,7 @@ int btrfs_sync_file(struct file *file, int datasync)
1197 int ret = 0; 1205 int ret = 0;
1198 struct btrfs_trans_handle *trans; 1206 struct btrfs_trans_handle *trans;
1199 1207
1208 trace_btrfs_sync_file(file, datasync);
1200 1209
1201 /* we wait first, since the writeback may change the inode */ 1210 /* we wait first, since the writeback may change the inode */
1202 root->log_batch++; 1211 root->log_batch++;
@@ -1324,7 +1333,8 @@ static long btrfs_fallocate(struct file *file, int mode,
1324 goto out; 1333 goto out;
1325 1334
1326 if (alloc_start > inode->i_size) { 1335 if (alloc_start > inode->i_size) {
1327 ret = btrfs_cont_expand(inode, alloc_start); 1336 ret = btrfs_cont_expand(inode, i_size_read(inode),
1337 alloc_start);
1328 if (ret) 1338 if (ret)
1329 goto out; 1339 goto out;
1330 } 1340 }
diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c
index a0390657451b..0037427d8a9d 100644
--- a/fs/btrfs/free-space-cache.c
+++ b/fs/btrfs/free-space-cache.c
@@ -393,7 +393,8 @@ int load_free_space_cache(struct btrfs_fs_info *fs_info,
393 break; 393 break;
394 394
395 need_loop = 1; 395 need_loop = 1;
396 e = kzalloc(sizeof(struct btrfs_free_space), GFP_NOFS); 396 e = kmem_cache_zalloc(btrfs_free_space_cachep,
397 GFP_NOFS);
397 if (!e) { 398 if (!e) {
398 kunmap(page); 399 kunmap(page);
399 unlock_page(page); 400 unlock_page(page);
@@ -405,7 +406,7 @@ int load_free_space_cache(struct btrfs_fs_info *fs_info,
405 e->bytes = le64_to_cpu(entry->bytes); 406 e->bytes = le64_to_cpu(entry->bytes);
406 if (!e->bytes) { 407 if (!e->bytes) {
407 kunmap(page); 408 kunmap(page);
408 kfree(e); 409 kmem_cache_free(btrfs_free_space_cachep, e);
409 unlock_page(page); 410 unlock_page(page);
410 page_cache_release(page); 411 page_cache_release(page);
411 goto free_cache; 412 goto free_cache;
@@ -420,7 +421,8 @@ int load_free_space_cache(struct btrfs_fs_info *fs_info,
420 e->bitmap = kzalloc(PAGE_CACHE_SIZE, GFP_NOFS); 421 e->bitmap = kzalloc(PAGE_CACHE_SIZE, GFP_NOFS);
421 if (!e->bitmap) { 422 if (!e->bitmap) {
422 kunmap(page); 423 kunmap(page);
423 kfree(e); 424 kmem_cache_free(
425 btrfs_free_space_cachep, e);
424 unlock_page(page); 426 unlock_page(page);
425 page_cache_release(page); 427 page_cache_release(page);
426 goto free_cache; 428 goto free_cache;
@@ -1187,7 +1189,7 @@ static void free_bitmap(struct btrfs_block_group_cache *block_group,
1187{ 1189{
1188 unlink_free_space(block_group, bitmap_info); 1190 unlink_free_space(block_group, bitmap_info);
1189 kfree(bitmap_info->bitmap); 1191 kfree(bitmap_info->bitmap);
1190 kfree(bitmap_info); 1192 kmem_cache_free(btrfs_free_space_cachep, bitmap_info);
1191 block_group->total_bitmaps--; 1193 block_group->total_bitmaps--;
1192 recalculate_thresholds(block_group); 1194 recalculate_thresholds(block_group);
1193} 1195}
@@ -1285,9 +1287,22 @@ static int insert_into_bitmap(struct btrfs_block_group_cache *block_group,
1285 * If we are below the extents threshold then we can add this as an 1287 * If we are below the extents threshold then we can add this as an
1286 * extent, and don't have to deal with the bitmap 1288 * extent, and don't have to deal with the bitmap
1287 */ 1289 */
1288 if (block_group->free_extents < block_group->extents_thresh && 1290 if (block_group->free_extents < block_group->extents_thresh) {
1289 info->bytes > block_group->sectorsize * 4) 1291 /*
1290 return 0; 1292 * If this block group has some small extents we don't want to
1293 * use up all of our free slots in the cache with them, we want
1294 * to reserve them to larger extents, however if we have plent
1295 * of cache left then go ahead an dadd them, no sense in adding
1296 * the overhead of a bitmap if we don't have to.
1297 */
1298 if (info->bytes <= block_group->sectorsize * 4) {
1299 if (block_group->free_extents * 2 <=
1300 block_group->extents_thresh)
1301 return 0;
1302 } else {
1303 return 0;
1304 }
1305 }
1291 1306
1292 /* 1307 /*
1293 * some block groups are so tiny they can't be enveloped by a bitmap, so 1308 * some block groups are so tiny they can't be enveloped by a bitmap, so
@@ -1342,8 +1357,8 @@ new_bitmap:
1342 1357
1343 /* no pre-allocated info, allocate a new one */ 1358 /* no pre-allocated info, allocate a new one */
1344 if (!info) { 1359 if (!info) {
1345 info = kzalloc(sizeof(struct btrfs_free_space), 1360 info = kmem_cache_zalloc(btrfs_free_space_cachep,
1346 GFP_NOFS); 1361 GFP_NOFS);
1347 if (!info) { 1362 if (!info) {
1348 spin_lock(&block_group->tree_lock); 1363 spin_lock(&block_group->tree_lock);
1349 ret = -ENOMEM; 1364 ret = -ENOMEM;
@@ -1365,7 +1380,7 @@ out:
1365 if (info) { 1380 if (info) {
1366 if (info->bitmap) 1381 if (info->bitmap)
1367 kfree(info->bitmap); 1382 kfree(info->bitmap);
1368 kfree(info); 1383 kmem_cache_free(btrfs_free_space_cachep, info);
1369 } 1384 }
1370 1385
1371 return ret; 1386 return ret;
@@ -1398,7 +1413,7 @@ bool try_merge_free_space(struct btrfs_block_group_cache *block_group,
1398 else 1413 else
1399 __unlink_free_space(block_group, right_info); 1414 __unlink_free_space(block_group, right_info);
1400 info->bytes += right_info->bytes; 1415 info->bytes += right_info->bytes;
1401 kfree(right_info); 1416 kmem_cache_free(btrfs_free_space_cachep, right_info);
1402 merged = true; 1417 merged = true;
1403 } 1418 }
1404 1419
@@ -1410,7 +1425,7 @@ bool try_merge_free_space(struct btrfs_block_group_cache *block_group,
1410 __unlink_free_space(block_group, left_info); 1425 __unlink_free_space(block_group, left_info);
1411 info->offset = left_info->offset; 1426 info->offset = left_info->offset;
1412 info->bytes += left_info->bytes; 1427 info->bytes += left_info->bytes;
1413 kfree(left_info); 1428 kmem_cache_free(btrfs_free_space_cachep, left_info);
1414 merged = true; 1429 merged = true;
1415 } 1430 }
1416 1431
@@ -1423,7 +1438,7 @@ int btrfs_add_free_space(struct btrfs_block_group_cache *block_group,
1423 struct btrfs_free_space *info; 1438 struct btrfs_free_space *info;
1424 int ret = 0; 1439 int ret = 0;
1425 1440
1426 info = kzalloc(sizeof(struct btrfs_free_space), GFP_NOFS); 1441 info = kmem_cache_zalloc(btrfs_free_space_cachep, GFP_NOFS);
1427 if (!info) 1442 if (!info)
1428 return -ENOMEM; 1443 return -ENOMEM;
1429 1444
@@ -1450,7 +1465,7 @@ int btrfs_add_free_space(struct btrfs_block_group_cache *block_group,
1450link: 1465link:
1451 ret = link_free_space(block_group, info); 1466 ret = link_free_space(block_group, info);
1452 if (ret) 1467 if (ret)
1453 kfree(info); 1468 kmem_cache_free(btrfs_free_space_cachep, info);
1454out: 1469out:
1455 spin_unlock(&block_group->tree_lock); 1470 spin_unlock(&block_group->tree_lock);
1456 1471
@@ -1520,7 +1535,7 @@ again:
1520 kfree(info->bitmap); 1535 kfree(info->bitmap);
1521 block_group->total_bitmaps--; 1536 block_group->total_bitmaps--;
1522 } 1537 }
1523 kfree(info); 1538 kmem_cache_free(btrfs_free_space_cachep, info);
1524 goto out_lock; 1539 goto out_lock;
1525 } 1540 }
1526 1541
@@ -1556,7 +1571,7 @@ again:
1556 /* the hole we're creating ends at the end 1571 /* the hole we're creating ends at the end
1557 * of the info struct, just free the info 1572 * of the info struct, just free the info
1558 */ 1573 */
1559 kfree(info); 1574 kmem_cache_free(btrfs_free_space_cachep, info);
1560 } 1575 }
1561 spin_unlock(&block_group->tree_lock); 1576 spin_unlock(&block_group->tree_lock);
1562 1577
@@ -1629,30 +1644,28 @@ __btrfs_return_cluster_to_free_space(
1629{ 1644{
1630 struct btrfs_free_space *entry; 1645 struct btrfs_free_space *entry;
1631 struct rb_node *node; 1646 struct rb_node *node;
1632 bool bitmap;
1633 1647
1634 spin_lock(&cluster->lock); 1648 spin_lock(&cluster->lock);
1635 if (cluster->block_group != block_group) 1649 if (cluster->block_group != block_group)
1636 goto out; 1650 goto out;
1637 1651
1638 bitmap = cluster->points_to_bitmap;
1639 cluster->block_group = NULL; 1652 cluster->block_group = NULL;
1640 cluster->window_start = 0; 1653 cluster->window_start = 0;
1641 list_del_init(&cluster->block_group_list); 1654 list_del_init(&cluster->block_group_list);
1642 cluster->points_to_bitmap = false;
1643
1644 if (bitmap)
1645 goto out;
1646 1655
1647 node = rb_first(&cluster->root); 1656 node = rb_first(&cluster->root);
1648 while (node) { 1657 while (node) {
1658 bool bitmap;
1659
1649 entry = rb_entry(node, struct btrfs_free_space, offset_index); 1660 entry = rb_entry(node, struct btrfs_free_space, offset_index);
1650 node = rb_next(&entry->offset_index); 1661 node = rb_next(&entry->offset_index);
1651 rb_erase(&entry->offset_index, &cluster->root); 1662 rb_erase(&entry->offset_index, &cluster->root);
1652 BUG_ON(entry->bitmap); 1663
1653 try_merge_free_space(block_group, entry, false); 1664 bitmap = (entry->bitmap != NULL);
1665 if (!bitmap)
1666 try_merge_free_space(block_group, entry, false);
1654 tree_insert_offset(&block_group->free_space_offset, 1667 tree_insert_offset(&block_group->free_space_offset,
1655 entry->offset, &entry->offset_index, 0); 1668 entry->offset, &entry->offset_index, bitmap);
1656 } 1669 }
1657 cluster->root = RB_ROOT; 1670 cluster->root = RB_ROOT;
1658 1671
@@ -1689,7 +1702,7 @@ void btrfs_remove_free_space_cache(struct btrfs_block_group_cache *block_group)
1689 unlink_free_space(block_group, info); 1702 unlink_free_space(block_group, info);
1690 if (info->bitmap) 1703 if (info->bitmap)
1691 kfree(info->bitmap); 1704 kfree(info->bitmap);
1692 kfree(info); 1705 kmem_cache_free(btrfs_free_space_cachep, info);
1693 if (need_resched()) { 1706 if (need_resched()) {
1694 spin_unlock(&block_group->tree_lock); 1707 spin_unlock(&block_group->tree_lock);
1695 cond_resched(); 1708 cond_resched();
@@ -1722,7 +1735,7 @@ u64 btrfs_find_space_for_alloc(struct btrfs_block_group_cache *block_group,
1722 entry->offset += bytes; 1735 entry->offset += bytes;
1723 entry->bytes -= bytes; 1736 entry->bytes -= bytes;
1724 if (!entry->bytes) 1737 if (!entry->bytes)
1725 kfree(entry); 1738 kmem_cache_free(btrfs_free_space_cachep, entry);
1726 else 1739 else
1727 link_free_space(block_group, entry); 1740 link_free_space(block_group, entry);
1728 } 1741 }
@@ -1775,50 +1788,24 @@ int btrfs_return_cluster_to_free_space(
1775 1788
1776static u64 btrfs_alloc_from_bitmap(struct btrfs_block_group_cache *block_group, 1789static u64 btrfs_alloc_from_bitmap(struct btrfs_block_group_cache *block_group,
1777 struct btrfs_free_cluster *cluster, 1790 struct btrfs_free_cluster *cluster,
1791 struct btrfs_free_space *entry,
1778 u64 bytes, u64 min_start) 1792 u64 bytes, u64 min_start)
1779{ 1793{
1780 struct btrfs_free_space *entry;
1781 int err; 1794 int err;
1782 u64 search_start = cluster->window_start; 1795 u64 search_start = cluster->window_start;
1783 u64 search_bytes = bytes; 1796 u64 search_bytes = bytes;
1784 u64 ret = 0; 1797 u64 ret = 0;
1785 1798
1786 spin_lock(&block_group->tree_lock);
1787 spin_lock(&cluster->lock);
1788
1789 if (!cluster->points_to_bitmap)
1790 goto out;
1791
1792 if (cluster->block_group != block_group)
1793 goto out;
1794
1795 /*
1796 * search_start is the beginning of the bitmap, but at some point it may
1797 * be a good idea to point to the actual start of the free area in the
1798 * bitmap, so do the offset_to_bitmap trick anyway, and set bitmap_only
1799 * to 1 to make sure we get the bitmap entry
1800 */
1801 entry = tree_search_offset(block_group,
1802 offset_to_bitmap(block_group, search_start),
1803 1, 0);
1804 if (!entry || !entry->bitmap)
1805 goto out;
1806
1807 search_start = min_start; 1799 search_start = min_start;
1808 search_bytes = bytes; 1800 search_bytes = bytes;
1809 1801
1810 err = search_bitmap(block_group, entry, &search_start, 1802 err = search_bitmap(block_group, entry, &search_start,
1811 &search_bytes); 1803 &search_bytes);
1812 if (err) 1804 if (err)
1813 goto out; 1805 return 0;
1814 1806
1815 ret = search_start; 1807 ret = search_start;
1816 bitmap_clear_bits(block_group, entry, ret, bytes); 1808 bitmap_clear_bits(block_group, entry, ret, bytes);
1817 if (entry->bytes == 0)
1818 free_bitmap(block_group, entry);
1819out:
1820 spin_unlock(&cluster->lock);
1821 spin_unlock(&block_group->tree_lock);
1822 1809
1823 return ret; 1810 return ret;
1824} 1811}
@@ -1836,10 +1823,6 @@ u64 btrfs_alloc_from_cluster(struct btrfs_block_group_cache *block_group,
1836 struct rb_node *node; 1823 struct rb_node *node;
1837 u64 ret = 0; 1824 u64 ret = 0;
1838 1825
1839 if (cluster->points_to_bitmap)
1840 return btrfs_alloc_from_bitmap(block_group, cluster, bytes,
1841 min_start);
1842
1843 spin_lock(&cluster->lock); 1826 spin_lock(&cluster->lock);
1844 if (bytes > cluster->max_size) 1827 if (bytes > cluster->max_size)
1845 goto out; 1828 goto out;
@@ -1852,9 +1835,9 @@ u64 btrfs_alloc_from_cluster(struct btrfs_block_group_cache *block_group,
1852 goto out; 1835 goto out;
1853 1836
1854 entry = rb_entry(node, struct btrfs_free_space, offset_index); 1837 entry = rb_entry(node, struct btrfs_free_space, offset_index);
1855
1856 while(1) { 1838 while(1) {
1857 if (entry->bytes < bytes || entry->offset < min_start) { 1839 if (entry->bytes < bytes ||
1840 (!entry->bitmap && entry->offset < min_start)) {
1858 struct rb_node *node; 1841 struct rb_node *node;
1859 1842
1860 node = rb_next(&entry->offset_index); 1843 node = rb_next(&entry->offset_index);
@@ -1864,10 +1847,27 @@ u64 btrfs_alloc_from_cluster(struct btrfs_block_group_cache *block_group,
1864 offset_index); 1847 offset_index);
1865 continue; 1848 continue;
1866 } 1849 }
1867 ret = entry->offset;
1868 1850
1869 entry->offset += bytes; 1851 if (entry->bitmap) {
1870 entry->bytes -= bytes; 1852 ret = btrfs_alloc_from_bitmap(block_group,
1853 cluster, entry, bytes,
1854 min_start);
1855 if (ret == 0) {
1856 struct rb_node *node;
1857 node = rb_next(&entry->offset_index);
1858 if (!node)
1859 break;
1860 entry = rb_entry(node, struct btrfs_free_space,
1861 offset_index);
1862 continue;
1863 }
1864 } else {
1865
1866 ret = entry->offset;
1867
1868 entry->offset += bytes;
1869 entry->bytes -= bytes;
1870 }
1871 1871
1872 if (entry->bytes == 0) 1872 if (entry->bytes == 0)
1873 rb_erase(&entry->offset_index, &cluster->root); 1873 rb_erase(&entry->offset_index, &cluster->root);
@@ -1884,7 +1884,12 @@ out:
1884 block_group->free_space -= bytes; 1884 block_group->free_space -= bytes;
1885 if (entry->bytes == 0) { 1885 if (entry->bytes == 0) {
1886 block_group->free_extents--; 1886 block_group->free_extents--;
1887 kfree(entry); 1887 if (entry->bitmap) {
1888 kfree(entry->bitmap);
1889 block_group->total_bitmaps--;
1890 recalculate_thresholds(block_group);
1891 }
1892 kmem_cache_free(btrfs_free_space_cachep, entry);
1888 } 1893 }
1889 1894
1890 spin_unlock(&block_group->tree_lock); 1895 spin_unlock(&block_group->tree_lock);
@@ -1904,12 +1909,13 @@ static int btrfs_bitmap_cluster(struct btrfs_block_group_cache *block_group,
1904 unsigned long found_bits; 1909 unsigned long found_bits;
1905 unsigned long start = 0; 1910 unsigned long start = 0;
1906 unsigned long total_found = 0; 1911 unsigned long total_found = 0;
1912 int ret;
1907 bool found = false; 1913 bool found = false;
1908 1914
1909 i = offset_to_bit(entry->offset, block_group->sectorsize, 1915 i = offset_to_bit(entry->offset, block_group->sectorsize,
1910 max_t(u64, offset, entry->offset)); 1916 max_t(u64, offset, entry->offset));
1911 search_bits = bytes_to_bits(min_bytes, block_group->sectorsize); 1917 search_bits = bytes_to_bits(bytes, block_group->sectorsize);
1912 total_bits = bytes_to_bits(bytes, block_group->sectorsize); 1918 total_bits = bytes_to_bits(min_bytes, block_group->sectorsize);
1913 1919
1914again: 1920again:
1915 found_bits = 0; 1921 found_bits = 0;
@@ -1926,7 +1932,7 @@ again:
1926 } 1932 }
1927 1933
1928 if (!found_bits) 1934 if (!found_bits)
1929 return -1; 1935 return -ENOSPC;
1930 1936
1931 if (!found) { 1937 if (!found) {
1932 start = i; 1938 start = i;
@@ -1950,189 +1956,208 @@ again:
1950 1956
1951 cluster->window_start = start * block_group->sectorsize + 1957 cluster->window_start = start * block_group->sectorsize +
1952 entry->offset; 1958 entry->offset;
1953 cluster->points_to_bitmap = true; 1959 rb_erase(&entry->offset_index, &block_group->free_space_offset);
1960 ret = tree_insert_offset(&cluster->root, entry->offset,
1961 &entry->offset_index, 1);
1962 BUG_ON(ret);
1954 1963
1955 return 0; 1964 return 0;
1956} 1965}
1957 1966
1958/* 1967/*
1959 * here we try to find a cluster of blocks in a block group. The goal 1968 * This searches the block group for just extents to fill the cluster with.
1960 * is to find at least bytes free and up to empty_size + bytes free.
1961 * We might not find them all in one contiguous area.
1962 *
1963 * returns zero and sets up cluster if things worked out, otherwise
1964 * it returns -enospc
1965 */ 1969 */
1966int btrfs_find_space_cluster(struct btrfs_trans_handle *trans, 1970static int setup_cluster_no_bitmap(struct btrfs_block_group_cache *block_group,
1967 struct btrfs_root *root, 1971 struct btrfs_free_cluster *cluster,
1968 struct btrfs_block_group_cache *block_group, 1972 u64 offset, u64 bytes, u64 min_bytes)
1969 struct btrfs_free_cluster *cluster,
1970 u64 offset, u64 bytes, u64 empty_size)
1971{ 1973{
1974 struct btrfs_free_space *first = NULL;
1972 struct btrfs_free_space *entry = NULL; 1975 struct btrfs_free_space *entry = NULL;
1976 struct btrfs_free_space *prev = NULL;
1977 struct btrfs_free_space *last;
1973 struct rb_node *node; 1978 struct rb_node *node;
1974 struct btrfs_free_space *next;
1975 struct btrfs_free_space *last = NULL;
1976 u64 min_bytes;
1977 u64 window_start; 1979 u64 window_start;
1978 u64 window_free; 1980 u64 window_free;
1979 u64 max_extent = 0; 1981 u64 max_extent;
1980 bool found_bitmap = false; 1982 u64 max_gap = 128 * 1024;
1981 int ret;
1982 1983
1983 /* for metadata, allow allocates with more holes */ 1984 entry = tree_search_offset(block_group, offset, 0, 1);
1984 if (btrfs_test_opt(root, SSD_SPREAD)) { 1985 if (!entry)
1985 min_bytes = bytes + empty_size; 1986 return -ENOSPC;
1986 } else if (block_group->flags & BTRFS_BLOCK_GROUP_METADATA) {
1987 /*
1988 * we want to do larger allocations when we are
1989 * flushing out the delayed refs, it helps prevent
1990 * making more work as we go along.
1991 */
1992 if (trans->transaction->delayed_refs.flushing)
1993 min_bytes = max(bytes, (bytes + empty_size) >> 1);
1994 else
1995 min_bytes = max(bytes, (bytes + empty_size) >> 4);
1996 } else
1997 min_bytes = max(bytes, (bytes + empty_size) >> 2);
1998
1999 spin_lock(&block_group->tree_lock);
2000 spin_lock(&cluster->lock);
2001
2002 /* someone already found a cluster, hooray */
2003 if (cluster->block_group) {
2004 ret = 0;
2005 goto out;
2006 }
2007again:
2008 entry = tree_search_offset(block_group, offset, found_bitmap, 1);
2009 if (!entry) {
2010 ret = -ENOSPC;
2011 goto out;
2012 }
2013 1987
2014 /* 1988 /*
2015 * If found_bitmap is true, we exhausted our search for extent entries, 1989 * We don't want bitmaps, so just move along until we find a normal
2016 * and we just want to search all of the bitmaps that we can find, and 1990 * extent entry.
2017 * ignore any extent entries we find.
2018 */ 1991 */
2019 while (entry->bitmap || found_bitmap || 1992 while (entry->bitmap) {
2020 (!entry->bitmap && entry->bytes < min_bytes)) { 1993 node = rb_next(&entry->offset_index);
2021 struct rb_node *node = rb_next(&entry->offset_index); 1994 if (!node)
2022 1995 return -ENOSPC;
2023 if (entry->bitmap && entry->bytes > bytes + empty_size) {
2024 ret = btrfs_bitmap_cluster(block_group, entry, cluster,
2025 offset, bytes + empty_size,
2026 min_bytes);
2027 if (!ret)
2028 goto got_it;
2029 }
2030
2031 if (!node) {
2032 ret = -ENOSPC;
2033 goto out;
2034 }
2035 entry = rb_entry(node, struct btrfs_free_space, offset_index); 1996 entry = rb_entry(node, struct btrfs_free_space, offset_index);
2036 } 1997 }
2037 1998
2038 /*
2039 * We already searched all the extent entries from the passed in offset
2040 * to the end and didn't find enough space for the cluster, and we also
2041 * didn't find any bitmaps that met our criteria, just go ahead and exit
2042 */
2043 if (found_bitmap) {
2044 ret = -ENOSPC;
2045 goto out;
2046 }
2047
2048 cluster->points_to_bitmap = false;
2049 window_start = entry->offset; 1999 window_start = entry->offset;
2050 window_free = entry->bytes; 2000 window_free = entry->bytes;
2051 last = entry;
2052 max_extent = entry->bytes; 2001 max_extent = entry->bytes;
2002 first = entry;
2003 last = entry;
2004 prev = entry;
2053 2005
2054 while (1) { 2006 while (window_free <= min_bytes) {
2055 /* out window is just right, lets fill it */ 2007 node = rb_next(&entry->offset_index);
2056 if (window_free >= bytes + empty_size) 2008 if (!node)
2057 break; 2009 return -ENOSPC;
2058 2010 entry = rb_entry(node, struct btrfs_free_space, offset_index);
2059 node = rb_next(&last->offset_index);
2060 if (!node) {
2061 if (found_bitmap)
2062 goto again;
2063 ret = -ENOSPC;
2064 goto out;
2065 }
2066 next = rb_entry(node, struct btrfs_free_space, offset_index);
2067 2011
2068 /* 2012 if (entry->bitmap)
2069 * we found a bitmap, so if this search doesn't result in a
2070 * cluster, we know to go and search again for the bitmaps and
2071 * start looking for space there
2072 */
2073 if (next->bitmap) {
2074 if (!found_bitmap)
2075 offset = next->offset;
2076 found_bitmap = true;
2077 last = next;
2078 continue; 2013 continue;
2079 }
2080
2081 /* 2014 /*
2082 * we haven't filled the empty size and the window is 2015 * we haven't filled the empty size and the window is
2083 * very large. reset and try again 2016 * very large. reset and try again
2084 */ 2017 */
2085 if (next->offset - (last->offset + last->bytes) > 128 * 1024 || 2018 if (entry->offset - (prev->offset + prev->bytes) > max_gap ||
2086 next->offset - window_start > (bytes + empty_size) * 2) { 2019 entry->offset - window_start > (min_bytes * 2)) {
2087 entry = next; 2020 first = entry;
2088 window_start = entry->offset; 2021 window_start = entry->offset;
2089 window_free = entry->bytes; 2022 window_free = entry->bytes;
2090 last = entry; 2023 last = entry;
2091 max_extent = entry->bytes; 2024 max_extent = entry->bytes;
2092 } else { 2025 } else {
2093 last = next; 2026 last = entry;
2094 window_free += next->bytes; 2027 window_free += entry->bytes;
2095 if (entry->bytes > max_extent) 2028 if (entry->bytes > max_extent)
2096 max_extent = entry->bytes; 2029 max_extent = entry->bytes;
2097 } 2030 }
2031 prev = entry;
2098 } 2032 }
2099 2033
2100 cluster->window_start = entry->offset; 2034 cluster->window_start = first->offset;
2035
2036 node = &first->offset_index;
2101 2037
2102 /* 2038 /*
2103 * now we've found our entries, pull them out of the free space 2039 * now we've found our entries, pull them out of the free space
2104 * cache and put them into the cluster rbtree 2040 * cache and put them into the cluster rbtree
2105 *
2106 * The cluster includes an rbtree, but only uses the offset index
2107 * of each free space cache entry.
2108 */ 2041 */
2109 while (1) { 2042 do {
2043 int ret;
2044
2045 entry = rb_entry(node, struct btrfs_free_space, offset_index);
2110 node = rb_next(&entry->offset_index); 2046 node = rb_next(&entry->offset_index);
2111 if (entry->bitmap && node) { 2047 if (entry->bitmap)
2112 entry = rb_entry(node, struct btrfs_free_space,
2113 offset_index);
2114 continue; 2048 continue;
2115 } else if (entry->bitmap && !node) {
2116 break;
2117 }
2118 2049
2119 rb_erase(&entry->offset_index, &block_group->free_space_offset); 2050 rb_erase(&entry->offset_index, &block_group->free_space_offset);
2120 ret = tree_insert_offset(&cluster->root, entry->offset, 2051 ret = tree_insert_offset(&cluster->root, entry->offset,
2121 &entry->offset_index, 0); 2052 &entry->offset_index, 0);
2122 BUG_ON(ret); 2053 BUG_ON(ret);
2054 } while (node && entry != last);
2123 2055
2124 if (!node || entry == last) 2056 cluster->max_size = max_extent;
2125 break; 2057
2058 return 0;
2059}
2060
2061/*
2062 * This specifically looks for bitmaps that may work in the cluster, we assume
2063 * that we have already failed to find extents that will work.
2064 */
2065static int setup_cluster_bitmap(struct btrfs_block_group_cache *block_group,
2066 struct btrfs_free_cluster *cluster,
2067 u64 offset, u64 bytes, u64 min_bytes)
2068{
2069 struct btrfs_free_space *entry;
2070 struct rb_node *node;
2071 int ret = -ENOSPC;
2072
2073 if (block_group->total_bitmaps == 0)
2074 return -ENOSPC;
2126 2075
2076 entry = tree_search_offset(block_group,
2077 offset_to_bitmap(block_group, offset),
2078 0, 1);
2079 if (!entry)
2080 return -ENOSPC;
2081
2082 node = &entry->offset_index;
2083 do {
2127 entry = rb_entry(node, struct btrfs_free_space, offset_index); 2084 entry = rb_entry(node, struct btrfs_free_space, offset_index);
2085 node = rb_next(&entry->offset_index);
2086 if (!entry->bitmap)
2087 continue;
2088 if (entry->bytes < min_bytes)
2089 continue;
2090 ret = btrfs_bitmap_cluster(block_group, entry, cluster, offset,
2091 bytes, min_bytes);
2092 } while (ret && node);
2093
2094 return ret;
2095}
2096
2097/*
2098 * here we try to find a cluster of blocks in a block group. The goal
2099 * is to find at least bytes free and up to empty_size + bytes free.
2100 * We might not find them all in one contiguous area.
2101 *
2102 * returns zero and sets up cluster if things worked out, otherwise
2103 * it returns -enospc
2104 */
2105int btrfs_find_space_cluster(struct btrfs_trans_handle *trans,
2106 struct btrfs_root *root,
2107 struct btrfs_block_group_cache *block_group,
2108 struct btrfs_free_cluster *cluster,
2109 u64 offset, u64 bytes, u64 empty_size)
2110{
2111 u64 min_bytes;
2112 int ret;
2113
2114 /* for metadata, allow allocates with more holes */
2115 if (btrfs_test_opt(root, SSD_SPREAD)) {
2116 min_bytes = bytes + empty_size;
2117 } else if (block_group->flags & BTRFS_BLOCK_GROUP_METADATA) {
2118 /*
2119 * we want to do larger allocations when we are
2120 * flushing out the delayed refs, it helps prevent
2121 * making more work as we go along.
2122 */
2123 if (trans->transaction->delayed_refs.flushing)
2124 min_bytes = max(bytes, (bytes + empty_size) >> 1);
2125 else
2126 min_bytes = max(bytes, (bytes + empty_size) >> 4);
2127 } else
2128 min_bytes = max(bytes, (bytes + empty_size) >> 2);
2129
2130 spin_lock(&block_group->tree_lock);
2131
2132 /*
2133 * If we know we don't have enough space to make a cluster don't even
2134 * bother doing all the work to try and find one.
2135 */
2136 if (block_group->free_space < min_bytes) {
2137 spin_unlock(&block_group->tree_lock);
2138 return -ENOSPC;
2128 } 2139 }
2129 2140
2130 cluster->max_size = max_extent; 2141 spin_lock(&cluster->lock);
2131got_it: 2142
2132 ret = 0; 2143 /* someone already found a cluster, hooray */
2133 atomic_inc(&block_group->count); 2144 if (cluster->block_group) {
2134 list_add_tail(&cluster->block_group_list, &block_group->cluster_list); 2145 ret = 0;
2135 cluster->block_group = block_group; 2146 goto out;
2147 }
2148
2149 ret = setup_cluster_no_bitmap(block_group, cluster, offset, bytes,
2150 min_bytes);
2151 if (ret)
2152 ret = setup_cluster_bitmap(block_group, cluster, offset,
2153 bytes, min_bytes);
2154
2155 if (!ret) {
2156 atomic_inc(&block_group->count);
2157 list_add_tail(&cluster->block_group_list,
2158 &block_group->cluster_list);
2159 cluster->block_group = block_group;
2160 }
2136out: 2161out:
2137 spin_unlock(&cluster->lock); 2162 spin_unlock(&cluster->lock);
2138 spin_unlock(&block_group->tree_lock); 2163 spin_unlock(&block_group->tree_lock);
@@ -2149,8 +2174,99 @@ void btrfs_init_free_cluster(struct btrfs_free_cluster *cluster)
2149 spin_lock_init(&cluster->refill_lock); 2174 spin_lock_init(&cluster->refill_lock);
2150 cluster->root = RB_ROOT; 2175 cluster->root = RB_ROOT;
2151 cluster->max_size = 0; 2176 cluster->max_size = 0;
2152 cluster->points_to_bitmap = false;
2153 INIT_LIST_HEAD(&cluster->block_group_list); 2177 INIT_LIST_HEAD(&cluster->block_group_list);
2154 cluster->block_group = NULL; 2178 cluster->block_group = NULL;
2155} 2179}
2156 2180
2181int btrfs_trim_block_group(struct btrfs_block_group_cache *block_group,
2182 u64 *trimmed, u64 start, u64 end, u64 minlen)
2183{
2184 struct btrfs_free_space *entry = NULL;
2185 struct btrfs_fs_info *fs_info = block_group->fs_info;
2186 u64 bytes = 0;
2187 u64 actually_trimmed;
2188 int ret = 0;
2189
2190 *trimmed = 0;
2191
2192 while (start < end) {
2193 spin_lock(&block_group->tree_lock);
2194
2195 if (block_group->free_space < minlen) {
2196 spin_unlock(&block_group->tree_lock);
2197 break;
2198 }
2199
2200 entry = tree_search_offset(block_group, start, 0, 1);
2201 if (!entry)
2202 entry = tree_search_offset(block_group,
2203 offset_to_bitmap(block_group,
2204 start),
2205 1, 1);
2206
2207 if (!entry || entry->offset >= end) {
2208 spin_unlock(&block_group->tree_lock);
2209 break;
2210 }
2211
2212 if (entry->bitmap) {
2213 ret = search_bitmap(block_group, entry, &start, &bytes);
2214 if (!ret) {
2215 if (start >= end) {
2216 spin_unlock(&block_group->tree_lock);
2217 break;
2218 }
2219 bytes = min(bytes, end - start);
2220 bitmap_clear_bits(block_group, entry,
2221 start, bytes);
2222 if (entry->bytes == 0)
2223 free_bitmap(block_group, entry);
2224 } else {
2225 start = entry->offset + BITS_PER_BITMAP *
2226 block_group->sectorsize;
2227 spin_unlock(&block_group->tree_lock);
2228 ret = 0;
2229 continue;
2230 }
2231 } else {
2232 start = entry->offset;
2233 bytes = min(entry->bytes, end - start);
2234 unlink_free_space(block_group, entry);
2235 kfree(entry);
2236 }
2237
2238 spin_unlock(&block_group->tree_lock);
2239
2240 if (bytes >= minlen) {
2241 int update_ret;
2242 update_ret = btrfs_update_reserved_bytes(block_group,
2243 bytes, 1, 1);
2244
2245 ret = btrfs_error_discard_extent(fs_info->extent_root,
2246 start,
2247 bytes,
2248 &actually_trimmed);
2249
2250 btrfs_add_free_space(block_group,
2251 start, bytes);
2252 if (!update_ret)
2253 btrfs_update_reserved_bytes(block_group,
2254 bytes, 0, 1);
2255
2256 if (ret)
2257 break;
2258 *trimmed += actually_trimmed;
2259 }
2260 start += bytes;
2261 bytes = 0;
2262
2263 if (fatal_signal_pending(current)) {
2264 ret = -ERESTARTSYS;
2265 break;
2266 }
2267
2268 cond_resched();
2269 }
2270
2271 return ret;
2272}
diff --git a/fs/btrfs/free-space-cache.h b/fs/btrfs/free-space-cache.h
index e49ca5c321b5..65c3b935289f 100644
--- a/fs/btrfs/free-space-cache.h
+++ b/fs/btrfs/free-space-cache.h
@@ -68,4 +68,6 @@ u64 btrfs_alloc_from_cluster(struct btrfs_block_group_cache *block_group,
68int btrfs_return_cluster_to_free_space( 68int btrfs_return_cluster_to_free_space(
69 struct btrfs_block_group_cache *block_group, 69 struct btrfs_block_group_cache *block_group,
70 struct btrfs_free_cluster *cluster); 70 struct btrfs_free_cluster *cluster);
71int btrfs_trim_block_group(struct btrfs_block_group_cache *block_group,
72 u64 *trimmed, u64 start, u64 end, u64 minlen);
71#endif 73#endif
diff --git a/fs/btrfs/inode-map.c b/fs/btrfs/inode-map.c
index c56eb5909172..c05a08f4c411 100644
--- a/fs/btrfs/inode-map.c
+++ b/fs/btrfs/inode-map.c
@@ -30,7 +30,8 @@ int btrfs_find_highest_inode(struct btrfs_root *root, u64 *objectid)
30 int slot; 30 int slot;
31 31
32 path = btrfs_alloc_path(); 32 path = btrfs_alloc_path();
33 BUG_ON(!path); 33 if (!path)
34 return -ENOMEM;
34 35
35 search_key.objectid = BTRFS_LAST_FREE_OBJECTID; 36 search_key.objectid = BTRFS_LAST_FREE_OBJECTID;
36 search_key.type = -1; 37 search_key.type = -1;
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index 119520bdb9a5..93c28a1d6bdc 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -50,6 +50,7 @@
50#include "tree-log.h" 50#include "tree-log.h"
51#include "compression.h" 51#include "compression.h"
52#include "locking.h" 52#include "locking.h"
53#include "free-space-cache.h"
53 54
54struct btrfs_iget_args { 55struct btrfs_iget_args {
55 u64 ino; 56 u64 ino;
@@ -70,6 +71,7 @@ static struct kmem_cache *btrfs_inode_cachep;
70struct kmem_cache *btrfs_trans_handle_cachep; 71struct kmem_cache *btrfs_trans_handle_cachep;
71struct kmem_cache *btrfs_transaction_cachep; 72struct kmem_cache *btrfs_transaction_cachep;
72struct kmem_cache *btrfs_path_cachep; 73struct kmem_cache *btrfs_path_cachep;
74struct kmem_cache *btrfs_free_space_cachep;
73 75
74#define S_SHIFT 12 76#define S_SHIFT 12
75static unsigned char btrfs_type_by_mode[S_IFMT >> S_SHIFT] = { 77static unsigned char btrfs_type_by_mode[S_IFMT >> S_SHIFT] = {
@@ -82,7 +84,8 @@ static unsigned char btrfs_type_by_mode[S_IFMT >> S_SHIFT] = {
82 [S_IFLNK >> S_SHIFT] = BTRFS_FT_SYMLINK, 84 [S_IFLNK >> S_SHIFT] = BTRFS_FT_SYMLINK,
83}; 85};
84 86
85static void btrfs_truncate(struct inode *inode); 87static int btrfs_setsize(struct inode *inode, loff_t newsize);
88static int btrfs_truncate(struct inode *inode);
86static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end); 89static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end);
87static noinline int cow_file_range(struct inode *inode, 90static noinline int cow_file_range(struct inode *inode,
88 struct page *locked_page, 91 struct page *locked_page,
@@ -288,6 +291,7 @@ static noinline int add_async_extent(struct async_cow *cow,
288 struct async_extent *async_extent; 291 struct async_extent *async_extent;
289 292
290 async_extent = kmalloc(sizeof(*async_extent), GFP_NOFS); 293 async_extent = kmalloc(sizeof(*async_extent), GFP_NOFS);
294 BUG_ON(!async_extent);
291 async_extent->start = start; 295 async_extent->start = start;
292 async_extent->ram_size = ram_size; 296 async_extent->ram_size = ram_size;
293 async_extent->compressed_size = compressed_size; 297 async_extent->compressed_size = compressed_size;
@@ -382,9 +386,11 @@ again:
382 */ 386 */
383 if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NOCOMPRESS) && 387 if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NOCOMPRESS) &&
384 (btrfs_test_opt(root, COMPRESS) || 388 (btrfs_test_opt(root, COMPRESS) ||
385 (BTRFS_I(inode)->force_compress))) { 389 (BTRFS_I(inode)->force_compress) ||
390 (BTRFS_I(inode)->flags & BTRFS_INODE_COMPRESS))) {
386 WARN_ON(pages); 391 WARN_ON(pages);
387 pages = kzalloc(sizeof(struct page *) * nr_pages, GFP_NOFS); 392 pages = kzalloc(sizeof(struct page *) * nr_pages, GFP_NOFS);
393 BUG_ON(!pages);
388 394
389 if (BTRFS_I(inode)->force_compress) 395 if (BTRFS_I(inode)->force_compress)
390 compress_type = BTRFS_I(inode)->force_compress; 396 compress_type = BTRFS_I(inode)->force_compress;
@@ -1254,7 +1260,8 @@ static int run_delalloc_range(struct inode *inode, struct page *locked_page,
1254 ret = run_delalloc_nocow(inode, locked_page, start, end, 1260 ret = run_delalloc_nocow(inode, locked_page, start, end,
1255 page_started, 0, nr_written); 1261 page_started, 0, nr_written);
1256 else if (!btrfs_test_opt(root, COMPRESS) && 1262 else if (!btrfs_test_opt(root, COMPRESS) &&
1257 !(BTRFS_I(inode)->force_compress)) 1263 !(BTRFS_I(inode)->force_compress) &&
1264 !(BTRFS_I(inode)->flags & BTRFS_INODE_COMPRESS))
1258 ret = cow_file_range(inode, locked_page, start, end, 1265 ret = cow_file_range(inode, locked_page, start, end,
1259 page_started, nr_written, 1); 1266 page_started, nr_written, 1);
1260 else 1267 else
@@ -1461,8 +1468,11 @@ static int btrfs_submit_bio_hook(struct inode *inode, int rw, struct bio *bio,
1461 if (bio_flags & EXTENT_BIO_COMPRESSED) { 1468 if (bio_flags & EXTENT_BIO_COMPRESSED) {
1462 return btrfs_submit_compressed_read(inode, bio, 1469 return btrfs_submit_compressed_read(inode, bio,
1463 mirror_num, bio_flags); 1470 mirror_num, bio_flags);
1464 } else if (!skip_sum) 1471 } else if (!skip_sum) {
1465 btrfs_lookup_bio_sums(root, inode, bio, NULL); 1472 ret = btrfs_lookup_bio_sums(root, inode, bio, NULL);
1473 if (ret)
1474 return ret;
1475 }
1466 goto mapit; 1476 goto mapit;
1467 } else if (!skip_sum) { 1477 } else if (!skip_sum) {
1468 /* csum items have already been cloned */ 1478 /* csum items have already been cloned */
@@ -1785,6 +1795,8 @@ out:
1785static int btrfs_writepage_end_io_hook(struct page *page, u64 start, u64 end, 1795static int btrfs_writepage_end_io_hook(struct page *page, u64 start, u64 end,
1786 struct extent_state *state, int uptodate) 1796 struct extent_state *state, int uptodate)
1787{ 1797{
1798 trace_btrfs_writepage_end_io_hook(page, start, end, uptodate);
1799
1788 ClearPagePrivate2(page); 1800 ClearPagePrivate2(page);
1789 return btrfs_finish_ordered_io(page->mapping->host, start, end); 1801 return btrfs_finish_ordered_io(page->mapping->host, start, end);
1790} 1802}
@@ -1895,10 +1907,10 @@ static int btrfs_io_failed_hook(struct bio *failed_bio,
1895 else 1907 else
1896 rw = READ; 1908 rw = READ;
1897 1909
1898 BTRFS_I(inode)->io_tree.ops->submit_bio_hook(inode, rw, bio, 1910 ret = BTRFS_I(inode)->io_tree.ops->submit_bio_hook(inode, rw, bio,
1899 failrec->last_mirror, 1911 failrec->last_mirror,
1900 failrec->bio_flags, 0); 1912 failrec->bio_flags, 0);
1901 return 0; 1913 return ret;
1902} 1914}
1903 1915
1904/* 1916/*
@@ -2282,7 +2294,7 @@ int btrfs_orphan_del(struct btrfs_trans_handle *trans, struct inode *inode)
2282 * this cleans up any orphans that may be left on the list from the last use 2294 * this cleans up any orphans that may be left on the list from the last use
2283 * of this root. 2295 * of this root.
2284 */ 2296 */
2285void btrfs_orphan_cleanup(struct btrfs_root *root) 2297int btrfs_orphan_cleanup(struct btrfs_root *root)
2286{ 2298{
2287 struct btrfs_path *path; 2299 struct btrfs_path *path;
2288 struct extent_buffer *leaf; 2300 struct extent_buffer *leaf;
@@ -2292,10 +2304,13 @@ void btrfs_orphan_cleanup(struct btrfs_root *root)
2292 int ret = 0, nr_unlink = 0, nr_truncate = 0; 2304 int ret = 0, nr_unlink = 0, nr_truncate = 0;
2293 2305
2294 if (cmpxchg(&root->orphan_cleanup_state, 0, ORPHAN_CLEANUP_STARTED)) 2306 if (cmpxchg(&root->orphan_cleanup_state, 0, ORPHAN_CLEANUP_STARTED))
2295 return; 2307 return 0;
2296 2308
2297 path = btrfs_alloc_path(); 2309 path = btrfs_alloc_path();
2298 BUG_ON(!path); 2310 if (!path) {
2311 ret = -ENOMEM;
2312 goto out;
2313 }
2299 path->reada = -1; 2314 path->reada = -1;
2300 2315
2301 key.objectid = BTRFS_ORPHAN_OBJECTID; 2316 key.objectid = BTRFS_ORPHAN_OBJECTID;
@@ -2304,11 +2319,8 @@ void btrfs_orphan_cleanup(struct btrfs_root *root)
2304 2319
2305 while (1) { 2320 while (1) {
2306 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 2321 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
2307 if (ret < 0) { 2322 if (ret < 0)
2308 printk(KERN_ERR "Error searching slot for orphan: %d" 2323 goto out;
2309 "\n", ret);
2310 break;
2311 }
2312 2324
2313 /* 2325 /*
2314 * if ret == 0 means we found what we were searching for, which 2326 * if ret == 0 means we found what we were searching for, which
@@ -2316,6 +2328,7 @@ void btrfs_orphan_cleanup(struct btrfs_root *root)
2316 * find the key and see if we have stuff that matches 2328 * find the key and see if we have stuff that matches
2317 */ 2329 */
2318 if (ret > 0) { 2330 if (ret > 0) {
2331 ret = 0;
2319 if (path->slots[0] == 0) 2332 if (path->slots[0] == 0)
2320 break; 2333 break;
2321 path->slots[0]--; 2334 path->slots[0]--;
@@ -2343,7 +2356,10 @@ void btrfs_orphan_cleanup(struct btrfs_root *root)
2343 found_key.type = BTRFS_INODE_ITEM_KEY; 2356 found_key.type = BTRFS_INODE_ITEM_KEY;
2344 found_key.offset = 0; 2357 found_key.offset = 0;
2345 inode = btrfs_iget(root->fs_info->sb, &found_key, root, NULL); 2358 inode = btrfs_iget(root->fs_info->sb, &found_key, root, NULL);
2346 BUG_ON(IS_ERR(inode)); 2359 if (IS_ERR(inode)) {
2360 ret = PTR_ERR(inode);
2361 goto out;
2362 }
2347 2363
2348 /* 2364 /*
2349 * add this inode to the orphan list so btrfs_orphan_del does 2365 * add this inode to the orphan list so btrfs_orphan_del does
@@ -2361,7 +2377,10 @@ void btrfs_orphan_cleanup(struct btrfs_root *root)
2361 */ 2377 */
2362 if (is_bad_inode(inode)) { 2378 if (is_bad_inode(inode)) {
2363 trans = btrfs_start_transaction(root, 0); 2379 trans = btrfs_start_transaction(root, 0);
2364 BUG_ON(IS_ERR(trans)); 2380 if (IS_ERR(trans)) {
2381 ret = PTR_ERR(trans);
2382 goto out;
2383 }
2365 btrfs_orphan_del(trans, inode); 2384 btrfs_orphan_del(trans, inode);
2366 btrfs_end_transaction(trans, root); 2385 btrfs_end_transaction(trans, root);
2367 iput(inode); 2386 iput(inode);
@@ -2370,17 +2389,22 @@ void btrfs_orphan_cleanup(struct btrfs_root *root)
2370 2389
2371 /* if we have links, this was a truncate, lets do that */ 2390 /* if we have links, this was a truncate, lets do that */
2372 if (inode->i_nlink) { 2391 if (inode->i_nlink) {
2392 if (!S_ISREG(inode->i_mode)) {
2393 WARN_ON(1);
2394 iput(inode);
2395 continue;
2396 }
2373 nr_truncate++; 2397 nr_truncate++;
2374 btrfs_truncate(inode); 2398 ret = btrfs_truncate(inode);
2375 } else { 2399 } else {
2376 nr_unlink++; 2400 nr_unlink++;
2377 } 2401 }
2378 2402
2379 /* this will do delete_inode and everything for us */ 2403 /* this will do delete_inode and everything for us */
2380 iput(inode); 2404 iput(inode);
2405 if (ret)
2406 goto out;
2381 } 2407 }
2382 btrfs_free_path(path);
2383
2384 root->orphan_cleanup_state = ORPHAN_CLEANUP_DONE; 2408 root->orphan_cleanup_state = ORPHAN_CLEANUP_DONE;
2385 2409
2386 if (root->orphan_block_rsv) 2410 if (root->orphan_block_rsv)
@@ -2389,14 +2413,20 @@ void btrfs_orphan_cleanup(struct btrfs_root *root)
2389 2413
2390 if (root->orphan_block_rsv || root->orphan_item_inserted) { 2414 if (root->orphan_block_rsv || root->orphan_item_inserted) {
2391 trans = btrfs_join_transaction(root, 1); 2415 trans = btrfs_join_transaction(root, 1);
2392 BUG_ON(IS_ERR(trans)); 2416 if (!IS_ERR(trans))
2393 btrfs_end_transaction(trans, root); 2417 btrfs_end_transaction(trans, root);
2394 } 2418 }
2395 2419
2396 if (nr_unlink) 2420 if (nr_unlink)
2397 printk(KERN_INFO "btrfs: unlinked %d orphans\n", nr_unlink); 2421 printk(KERN_INFO "btrfs: unlinked %d orphans\n", nr_unlink);
2398 if (nr_truncate) 2422 if (nr_truncate)
2399 printk(KERN_INFO "btrfs: truncated %d orphans\n", nr_truncate); 2423 printk(KERN_INFO "btrfs: truncated %d orphans\n", nr_truncate);
2424
2425out:
2426 if (ret)
2427 printk(KERN_CRIT "btrfs: could not do orphan cleanup %d\n", ret);
2428 btrfs_free_path(path);
2429 return ret;
2400} 2430}
2401 2431
2402/* 2432/*
@@ -2507,6 +2537,8 @@ static void btrfs_read_locked_inode(struct inode *inode)
2507 BTRFS_I(inode)->flags = btrfs_inode_flags(leaf, inode_item); 2537 BTRFS_I(inode)->flags = btrfs_inode_flags(leaf, inode_item);
2508 2538
2509 alloc_group_block = btrfs_inode_block_group(leaf, inode_item); 2539 alloc_group_block = btrfs_inode_block_group(leaf, inode_item);
2540 if (location.objectid == BTRFS_FREE_SPACE_OBJECTID)
2541 inode->i_mapping->flags &= ~__GFP_FS;
2510 2542
2511 /* 2543 /*
2512 * try to precache a NULL acl entry for files that don't have 2544 * try to precache a NULL acl entry for files that don't have
@@ -2635,10 +2667,10 @@ failed:
2635 * recovery code. It remove a link in a directory with a given name, and 2667 * recovery code. It remove a link in a directory with a given name, and
2636 * also drops the back refs in the inode to the directory 2668 * also drops the back refs in the inode to the directory
2637 */ 2669 */
2638int btrfs_unlink_inode(struct btrfs_trans_handle *trans, 2670static int __btrfs_unlink_inode(struct btrfs_trans_handle *trans,
2639 struct btrfs_root *root, 2671 struct btrfs_root *root,
2640 struct inode *dir, struct inode *inode, 2672 struct inode *dir, struct inode *inode,
2641 const char *name, int name_len) 2673 const char *name, int name_len)
2642{ 2674{
2643 struct btrfs_path *path; 2675 struct btrfs_path *path;
2644 int ret = 0; 2676 int ret = 0;
@@ -2710,12 +2742,25 @@ err:
2710 btrfs_i_size_write(dir, dir->i_size - name_len * 2); 2742 btrfs_i_size_write(dir, dir->i_size - name_len * 2);
2711 inode->i_ctime = dir->i_mtime = dir->i_ctime = CURRENT_TIME; 2743 inode->i_ctime = dir->i_mtime = dir->i_ctime = CURRENT_TIME;
2712 btrfs_update_inode(trans, root, dir); 2744 btrfs_update_inode(trans, root, dir);
2713 btrfs_drop_nlink(inode);
2714 ret = btrfs_update_inode(trans, root, inode);
2715out: 2745out:
2716 return ret; 2746 return ret;
2717} 2747}
2718 2748
2749int btrfs_unlink_inode(struct btrfs_trans_handle *trans,
2750 struct btrfs_root *root,
2751 struct inode *dir, struct inode *inode,
2752 const char *name, int name_len)
2753{
2754 int ret;
2755 ret = __btrfs_unlink_inode(trans, root, dir, inode, name, name_len);
2756 if (!ret) {
2757 btrfs_drop_nlink(inode);
2758 ret = btrfs_update_inode(trans, root, inode);
2759 }
2760 return ret;
2761}
2762
2763
2719/* helper to check if there is any shared block in the path */ 2764/* helper to check if there is any shared block in the path */
2720static int check_path_shared(struct btrfs_root *root, 2765static int check_path_shared(struct btrfs_root *root,
2721 struct btrfs_path *path) 2766 struct btrfs_path *path)
@@ -3537,7 +3582,13 @@ out:
3537 return ret; 3582 return ret;
3538} 3583}
3539 3584
3540int btrfs_cont_expand(struct inode *inode, loff_t size) 3585/*
3586 * This function puts in dummy file extents for the area we're creating a hole
3587 * for. So if we are truncating this file to a larger size we need to insert
3588 * these file extents so that btrfs_get_extent will return a EXTENT_MAP_HOLE for
3589 * the range between oldsize and size
3590 */
3591int btrfs_cont_expand(struct inode *inode, loff_t oldsize, loff_t size)
3541{ 3592{
3542 struct btrfs_trans_handle *trans; 3593 struct btrfs_trans_handle *trans;
3543 struct btrfs_root *root = BTRFS_I(inode)->root; 3594 struct btrfs_root *root = BTRFS_I(inode)->root;
@@ -3545,7 +3596,7 @@ int btrfs_cont_expand(struct inode *inode, loff_t size)
3545 struct extent_map *em = NULL; 3596 struct extent_map *em = NULL;
3546 struct extent_state *cached_state = NULL; 3597 struct extent_state *cached_state = NULL;
3547 u64 mask = root->sectorsize - 1; 3598 u64 mask = root->sectorsize - 1;
3548 u64 hole_start = (inode->i_size + mask) & ~mask; 3599 u64 hole_start = (oldsize + mask) & ~mask;
3549 u64 block_end = (size + mask) & ~mask; 3600 u64 block_end = (size + mask) & ~mask;
3550 u64 last_byte; 3601 u64 last_byte;
3551 u64 cur_offset; 3602 u64 cur_offset;
@@ -3590,13 +3641,15 @@ int btrfs_cont_expand(struct inode *inode, loff_t size)
3590 err = btrfs_drop_extents(trans, inode, cur_offset, 3641 err = btrfs_drop_extents(trans, inode, cur_offset,
3591 cur_offset + hole_size, 3642 cur_offset + hole_size,
3592 &hint_byte, 1); 3643 &hint_byte, 1);
3593 BUG_ON(err); 3644 if (err)
3645 break;
3594 3646
3595 err = btrfs_insert_file_extent(trans, root, 3647 err = btrfs_insert_file_extent(trans, root,
3596 inode->i_ino, cur_offset, 0, 3648 inode->i_ino, cur_offset, 0,
3597 0, hole_size, 0, hole_size, 3649 0, hole_size, 0, hole_size,
3598 0, 0, 0); 3650 0, 0, 0);
3599 BUG_ON(err); 3651 if (err)
3652 break;
3600 3653
3601 btrfs_drop_extent_cache(inode, hole_start, 3654 btrfs_drop_extent_cache(inode, hole_start,
3602 last_byte - 1, 0); 3655 last_byte - 1, 0);
@@ -3616,81 +3669,41 @@ int btrfs_cont_expand(struct inode *inode, loff_t size)
3616 return err; 3669 return err;
3617} 3670}
3618 3671
3619static int btrfs_setattr_size(struct inode *inode, struct iattr *attr) 3672static int btrfs_setsize(struct inode *inode, loff_t newsize)
3620{ 3673{
3621 struct btrfs_root *root = BTRFS_I(inode)->root; 3674 loff_t oldsize = i_size_read(inode);
3622 struct btrfs_trans_handle *trans;
3623 unsigned long nr;
3624 int ret; 3675 int ret;
3625 3676
3626 if (attr->ia_size == inode->i_size) 3677 if (newsize == oldsize)
3627 return 0; 3678 return 0;
3628 3679
3629 if (attr->ia_size > inode->i_size) { 3680 if (newsize > oldsize) {
3630 unsigned long limit; 3681 i_size_write(inode, newsize);
3631 limit = current->signal->rlim[RLIMIT_FSIZE].rlim_cur; 3682 btrfs_ordered_update_i_size(inode, i_size_read(inode), NULL);
3632 if (attr->ia_size > inode->i_sb->s_maxbytes) 3683 truncate_pagecache(inode, oldsize, newsize);
3633 return -EFBIG; 3684 ret = btrfs_cont_expand(inode, oldsize, newsize);
3634 if (limit != RLIM_INFINITY && attr->ia_size > limit) {
3635 send_sig(SIGXFSZ, current, 0);
3636 return -EFBIG;
3637 }
3638 }
3639
3640 trans = btrfs_start_transaction(root, 5);
3641 if (IS_ERR(trans))
3642 return PTR_ERR(trans);
3643
3644 btrfs_set_trans_block_group(trans, inode);
3645
3646 ret = btrfs_orphan_add(trans, inode);
3647 BUG_ON(ret);
3648
3649 nr = trans->blocks_used;
3650 btrfs_end_transaction(trans, root);
3651 btrfs_btree_balance_dirty(root, nr);
3652
3653 if (attr->ia_size > inode->i_size) {
3654 ret = btrfs_cont_expand(inode, attr->ia_size);
3655 if (ret) { 3685 if (ret) {
3656 btrfs_truncate(inode); 3686 btrfs_setsize(inode, oldsize);
3657 return ret; 3687 return ret;
3658 } 3688 }
3659 3689
3660 i_size_write(inode, attr->ia_size); 3690 mark_inode_dirty(inode);
3661 btrfs_ordered_update_i_size(inode, inode->i_size, NULL); 3691 } else {
3662 3692
3663 trans = btrfs_start_transaction(root, 0); 3693 /*
3664 BUG_ON(IS_ERR(trans)); 3694 * We're truncating a file that used to have good data down to
3665 btrfs_set_trans_block_group(trans, inode); 3695 * zero. Make sure it gets into the ordered flush list so that
3666 trans->block_rsv = root->orphan_block_rsv; 3696 * any new writes get down to disk quickly.
3667 BUG_ON(!trans->block_rsv); 3697 */
3698 if (newsize == 0)
3699 BTRFS_I(inode)->ordered_data_close = 1;
3668 3700
3669 ret = btrfs_update_inode(trans, root, inode); 3701 /* we don't support swapfiles, so vmtruncate shouldn't fail */
3670 BUG_ON(ret); 3702 truncate_setsize(inode, newsize);
3671 if (inode->i_nlink > 0) { 3703 ret = btrfs_truncate(inode);
3672 ret = btrfs_orphan_del(trans, inode);
3673 BUG_ON(ret);
3674 }
3675 nr = trans->blocks_used;
3676 btrfs_end_transaction(trans, root);
3677 btrfs_btree_balance_dirty(root, nr);
3678 return 0;
3679 } 3704 }
3680 3705
3681 /* 3706 return ret;
3682 * We're truncating a file that used to have good data down to
3683 * zero. Make sure it gets into the ordered flush list so that
3684 * any new writes get down to disk quickly.
3685 */
3686 if (attr->ia_size == 0)
3687 BTRFS_I(inode)->ordered_data_close = 1;
3688
3689 /* we don't support swapfiles, so vmtruncate shouldn't fail */
3690 ret = vmtruncate(inode, attr->ia_size);
3691 BUG_ON(ret);
3692
3693 return 0;
3694} 3707}
3695 3708
3696static int btrfs_setattr(struct dentry *dentry, struct iattr *attr) 3709static int btrfs_setattr(struct dentry *dentry, struct iattr *attr)
@@ -3707,7 +3720,7 @@ static int btrfs_setattr(struct dentry *dentry, struct iattr *attr)
3707 return err; 3720 return err;
3708 3721
3709 if (S_ISREG(inode->i_mode) && (attr->ia_valid & ATTR_SIZE)) { 3722 if (S_ISREG(inode->i_mode) && (attr->ia_valid & ATTR_SIZE)) {
3710 err = btrfs_setattr_size(inode, attr); 3723 err = btrfs_setsize(inode, attr->ia_size);
3711 if (err) 3724 if (err)
3712 return err; 3725 return err;
3713 } 3726 }
@@ -3730,6 +3743,8 @@ void btrfs_evict_inode(struct inode *inode)
3730 unsigned long nr; 3743 unsigned long nr;
3731 int ret; 3744 int ret;
3732 3745
3746 trace_btrfs_inode_evict(inode);
3747
3733 truncate_inode_pages(&inode->i_data, 0); 3748 truncate_inode_pages(&inode->i_data, 0);
3734 if (inode->i_nlink && (btrfs_root_refs(&root->root_item) != 0 || 3749 if (inode->i_nlink && (btrfs_root_refs(&root->root_item) != 0 ||
3735 root == root->fs_info->tree_root)) 3750 root == root->fs_info->tree_root))
@@ -4072,7 +4087,6 @@ struct inode *btrfs_iget(struct super_block *s, struct btrfs_key *location,
4072 BTRFS_I(inode)->root = root; 4087 BTRFS_I(inode)->root = root;
4073 memcpy(&BTRFS_I(inode)->location, location, sizeof(*location)); 4088 memcpy(&BTRFS_I(inode)->location, location, sizeof(*location));
4074 btrfs_read_locked_inode(inode); 4089 btrfs_read_locked_inode(inode);
4075
4076 inode_tree_add(inode); 4090 inode_tree_add(inode);
4077 unlock_new_inode(inode); 4091 unlock_new_inode(inode);
4078 if (new) 4092 if (new)
@@ -4147,8 +4161,10 @@ struct inode *btrfs_lookup_dentry(struct inode *dir, struct dentry *dentry)
4147 if (!IS_ERR(inode) && root != sub_root) { 4161 if (!IS_ERR(inode) && root != sub_root) {
4148 down_read(&root->fs_info->cleanup_work_sem); 4162 down_read(&root->fs_info->cleanup_work_sem);
4149 if (!(inode->i_sb->s_flags & MS_RDONLY)) 4163 if (!(inode->i_sb->s_flags & MS_RDONLY))
4150 btrfs_orphan_cleanup(sub_root); 4164 ret = btrfs_orphan_cleanup(sub_root);
4151 up_read(&root->fs_info->cleanup_work_sem); 4165 up_read(&root->fs_info->cleanup_work_sem);
4166 if (ret)
4167 inode = ERR_PTR(ret);
4152 } 4168 }
4153 4169
4154 return inode; 4170 return inode;
@@ -4282,6 +4298,9 @@ static int btrfs_real_readdir(struct file *filp, void *dirent,
4282 while (di_cur < di_total) { 4298 while (di_cur < di_total) {
4283 struct btrfs_key location; 4299 struct btrfs_key location;
4284 4300
4301 if (verify_dir_item(root, leaf, di))
4302 break;
4303
4285 name_len = btrfs_dir_name_len(leaf, di); 4304 name_len = btrfs_dir_name_len(leaf, di);
4286 if (name_len <= sizeof(tmp_name)) { 4305 if (name_len <= sizeof(tmp_name)) {
4287 name_ptr = tmp_name; 4306 name_ptr = tmp_name;
@@ -4517,6 +4536,8 @@ static struct inode *btrfs_new_inode(struct btrfs_trans_handle *trans,
4517 return ERR_PTR(-ENOMEM); 4536 return ERR_PTR(-ENOMEM);
4518 4537
4519 if (dir) { 4538 if (dir) {
4539 trace_btrfs_inode_request(dir);
4540
4520 ret = btrfs_set_inode_index(dir, index); 4541 ret = btrfs_set_inode_index(dir, index);
4521 if (ret) { 4542 if (ret) {
4522 iput(inode); 4543 iput(inode);
@@ -4585,12 +4606,16 @@ static struct inode *btrfs_new_inode(struct btrfs_trans_handle *trans,
4585 if ((mode & S_IFREG)) { 4606 if ((mode & S_IFREG)) {
4586 if (btrfs_test_opt(root, NODATASUM)) 4607 if (btrfs_test_opt(root, NODATASUM))
4587 BTRFS_I(inode)->flags |= BTRFS_INODE_NODATASUM; 4608 BTRFS_I(inode)->flags |= BTRFS_INODE_NODATASUM;
4588 if (btrfs_test_opt(root, NODATACOW)) 4609 if (btrfs_test_opt(root, NODATACOW) ||
4610 (BTRFS_I(dir)->flags & BTRFS_INODE_NODATACOW))
4589 BTRFS_I(inode)->flags |= BTRFS_INODE_NODATACOW; 4611 BTRFS_I(inode)->flags |= BTRFS_INODE_NODATACOW;
4590 } 4612 }
4591 4613
4592 insert_inode_hash(inode); 4614 insert_inode_hash(inode);
4593 inode_tree_add(inode); 4615 inode_tree_add(inode);
4616
4617 trace_btrfs_inode_new(inode);
4618
4594 return inode; 4619 return inode;
4595fail: 4620fail:
4596 if (dir) 4621 if (dir)
@@ -4809,7 +4834,10 @@ static int btrfs_link(struct dentry *old_dentry, struct inode *dir,
4809 4834
4810 /* do not allow sys_link's with other subvols of the same device */ 4835 /* do not allow sys_link's with other subvols of the same device */
4811 if (root->objectid != BTRFS_I(inode)->root->objectid) 4836 if (root->objectid != BTRFS_I(inode)->root->objectid)
4812 return -EPERM; 4837 return -EXDEV;
4838
4839 if (inode->i_nlink == ~0U)
4840 return -EMLINK;
4813 4841
4814 btrfs_inc_nlink(inode); 4842 btrfs_inc_nlink(inode);
4815 inode->i_ctime = CURRENT_TIME; 4843 inode->i_ctime = CURRENT_TIME;
@@ -5265,6 +5293,9 @@ insert:
5265 } 5293 }
5266 write_unlock(&em_tree->lock); 5294 write_unlock(&em_tree->lock);
5267out: 5295out:
5296
5297 trace_btrfs_get_extent(root, em);
5298
5268 if (path) 5299 if (path)
5269 btrfs_free_path(path); 5300 btrfs_free_path(path);
5270 if (trans) { 5301 if (trans) {
@@ -5748,6 +5779,10 @@ static void btrfs_endio_direct_read(struct bio *bio, int err)
5748 5779
5749 kfree(dip->csums); 5780 kfree(dip->csums);
5750 kfree(dip); 5781 kfree(dip);
5782
5783 /* If we had a csum failure make sure to clear the uptodate flag */
5784 if (err)
5785 clear_bit(BIO_UPTODATE, &bio->bi_flags);
5751 dio_end_io(bio, err); 5786 dio_end_io(bio, err);
5752} 5787}
5753 5788
@@ -5849,6 +5884,10 @@ out_done:
5849 5884
5850 kfree(dip->csums); 5885 kfree(dip->csums);
5851 kfree(dip); 5886 kfree(dip);
5887
5888 /* If we had an error make sure to clear the uptodate flag */
5889 if (err)
5890 clear_bit(BIO_UPTODATE, &bio->bi_flags);
5852 dio_end_io(bio, err); 5891 dio_end_io(bio, err);
5853} 5892}
5854 5893
@@ -5922,9 +5961,12 @@ static inline int __btrfs_submit_dio_bio(struct bio *bio, struct inode *inode,
5922 __btrfs_submit_bio_start_direct_io, 5961 __btrfs_submit_bio_start_direct_io,
5923 __btrfs_submit_bio_done); 5962 __btrfs_submit_bio_done);
5924 goto err; 5963 goto err;
5925 } else if (!skip_sum) 5964 } else if (!skip_sum) {
5926 btrfs_lookup_bio_sums_dio(root, inode, bio, 5965 ret = btrfs_lookup_bio_sums_dio(root, inode, bio,
5927 file_offset, csums); 5966 file_offset, csums);
5967 if (ret)
5968 goto err;
5969 }
5928 5970
5929 ret = btrfs_map_bio(root, rw, bio, 0, 1); 5971 ret = btrfs_map_bio(root, rw, bio, 0, 1);
5930err: 5972err:
@@ -5948,6 +5990,7 @@ static int btrfs_submit_direct_hook(int rw, struct btrfs_dio_private *dip,
5948 int nr_pages = 0; 5990 int nr_pages = 0;
5949 u32 *csums = dip->csums; 5991 u32 *csums = dip->csums;
5950 int ret = 0; 5992 int ret = 0;
5993 int write = rw & REQ_WRITE;
5951 5994
5952 bio = btrfs_dio_bio_alloc(orig_bio->bi_bdev, start_sector, GFP_NOFS); 5995 bio = btrfs_dio_bio_alloc(orig_bio->bi_bdev, start_sector, GFP_NOFS);
5953 if (!bio) 5996 if (!bio)
@@ -5984,7 +6027,8 @@ static int btrfs_submit_direct_hook(int rw, struct btrfs_dio_private *dip,
5984 goto out_err; 6027 goto out_err;
5985 } 6028 }
5986 6029
5987 if (!skip_sum) 6030 /* Write's use the ordered csums */
6031 if (!write && !skip_sum)
5988 csums = csums + nr_pages; 6032 csums = csums + nr_pages;
5989 start_sector += submit_len >> 9; 6033 start_sector += submit_len >> 9;
5990 file_offset += submit_len; 6034 file_offset += submit_len;
@@ -6052,7 +6096,8 @@ static void btrfs_submit_direct(int rw, struct bio *bio, struct inode *inode,
6052 } 6096 }
6053 dip->csums = NULL; 6097 dip->csums = NULL;
6054 6098
6055 if (!skip_sum) { 6099 /* Write's use the ordered csum stuff, so we don't need dip->csums */
6100 if (!write && !skip_sum) {
6056 dip->csums = kmalloc(sizeof(u32) * bio->bi_vcnt, GFP_NOFS); 6101 dip->csums = kmalloc(sizeof(u32) * bio->bi_vcnt, GFP_NOFS);
6057 if (!dip->csums) { 6102 if (!dip->csums) {
6058 kfree(dip); 6103 kfree(dip);
@@ -6474,28 +6519,42 @@ out:
6474 return ret; 6519 return ret;
6475} 6520}
6476 6521
6477static void btrfs_truncate(struct inode *inode) 6522static int btrfs_truncate(struct inode *inode)
6478{ 6523{
6479 struct btrfs_root *root = BTRFS_I(inode)->root; 6524 struct btrfs_root *root = BTRFS_I(inode)->root;
6480 int ret; 6525 int ret;
6526 int err = 0;
6481 struct btrfs_trans_handle *trans; 6527 struct btrfs_trans_handle *trans;
6482 unsigned long nr; 6528 unsigned long nr;
6483 u64 mask = root->sectorsize - 1; 6529 u64 mask = root->sectorsize - 1;
6484 6530
6485 if (!S_ISREG(inode->i_mode)) {
6486 WARN_ON(1);
6487 return;
6488 }
6489
6490 ret = btrfs_truncate_page(inode->i_mapping, inode->i_size); 6531 ret = btrfs_truncate_page(inode->i_mapping, inode->i_size);
6491 if (ret) 6532 if (ret)
6492 return; 6533 return ret;
6493 6534
6494 btrfs_wait_ordered_range(inode, inode->i_size & (~mask), (u64)-1); 6535 btrfs_wait_ordered_range(inode, inode->i_size & (~mask), (u64)-1);
6495 btrfs_ordered_update_i_size(inode, inode->i_size, NULL); 6536 btrfs_ordered_update_i_size(inode, inode->i_size, NULL);
6496 6537
6538 trans = btrfs_start_transaction(root, 5);
6539 if (IS_ERR(trans))
6540 return PTR_ERR(trans);
6541
6542 btrfs_set_trans_block_group(trans, inode);
6543
6544 ret = btrfs_orphan_add(trans, inode);
6545 if (ret) {
6546 btrfs_end_transaction(trans, root);
6547 return ret;
6548 }
6549
6550 nr = trans->blocks_used;
6551 btrfs_end_transaction(trans, root);
6552 btrfs_btree_balance_dirty(root, nr);
6553
6554 /* Now start a transaction for the truncate */
6497 trans = btrfs_start_transaction(root, 0); 6555 trans = btrfs_start_transaction(root, 0);
6498 BUG_ON(IS_ERR(trans)); 6556 if (IS_ERR(trans))
6557 return PTR_ERR(trans);
6499 btrfs_set_trans_block_group(trans, inode); 6558 btrfs_set_trans_block_group(trans, inode);
6500 trans->block_rsv = root->orphan_block_rsv; 6559 trans->block_rsv = root->orphan_block_rsv;
6501 6560
@@ -6522,29 +6581,38 @@ static void btrfs_truncate(struct inode *inode)
6522 while (1) { 6581 while (1) {
6523 if (!trans) { 6582 if (!trans) {
6524 trans = btrfs_start_transaction(root, 0); 6583 trans = btrfs_start_transaction(root, 0);
6525 BUG_ON(IS_ERR(trans)); 6584 if (IS_ERR(trans))
6585 return PTR_ERR(trans);
6526 btrfs_set_trans_block_group(trans, inode); 6586 btrfs_set_trans_block_group(trans, inode);
6527 trans->block_rsv = root->orphan_block_rsv; 6587 trans->block_rsv = root->orphan_block_rsv;
6528 } 6588 }
6529 6589
6530 ret = btrfs_block_rsv_check(trans, root, 6590 ret = btrfs_block_rsv_check(trans, root,
6531 root->orphan_block_rsv, 0, 5); 6591 root->orphan_block_rsv, 0, 5);
6532 if (ret) { 6592 if (ret == -EAGAIN) {
6533 BUG_ON(ret != -EAGAIN);
6534 ret = btrfs_commit_transaction(trans, root); 6593 ret = btrfs_commit_transaction(trans, root);
6535 BUG_ON(ret); 6594 if (ret)
6595 return ret;
6536 trans = NULL; 6596 trans = NULL;
6537 continue; 6597 continue;
6598 } else if (ret) {
6599 err = ret;
6600 break;
6538 } 6601 }
6539 6602
6540 ret = btrfs_truncate_inode_items(trans, root, inode, 6603 ret = btrfs_truncate_inode_items(trans, root, inode,
6541 inode->i_size, 6604 inode->i_size,
6542 BTRFS_EXTENT_DATA_KEY); 6605 BTRFS_EXTENT_DATA_KEY);
6543 if (ret != -EAGAIN) 6606 if (ret != -EAGAIN) {
6607 err = ret;
6544 break; 6608 break;
6609 }
6545 6610
6546 ret = btrfs_update_inode(trans, root, inode); 6611 ret = btrfs_update_inode(trans, root, inode);
6547 BUG_ON(ret); 6612 if (ret) {
6613 err = ret;
6614 break;
6615 }
6548 6616
6549 nr = trans->blocks_used; 6617 nr = trans->blocks_used;
6550 btrfs_end_transaction(trans, root); 6618 btrfs_end_transaction(trans, root);
@@ -6554,16 +6622,27 @@ static void btrfs_truncate(struct inode *inode)
6554 6622
6555 if (ret == 0 && inode->i_nlink > 0) { 6623 if (ret == 0 && inode->i_nlink > 0) {
6556 ret = btrfs_orphan_del(trans, inode); 6624 ret = btrfs_orphan_del(trans, inode);
6557 BUG_ON(ret); 6625 if (ret)
6626 err = ret;
6627 } else if (ret && inode->i_nlink > 0) {
6628 /*
6629 * Failed to do the truncate, remove us from the in memory
6630 * orphan list.
6631 */
6632 ret = btrfs_orphan_del(NULL, inode);
6558 } 6633 }
6559 6634
6560 ret = btrfs_update_inode(trans, root, inode); 6635 ret = btrfs_update_inode(trans, root, inode);
6561 BUG_ON(ret); 6636 if (ret && !err)
6637 err = ret;
6562 6638
6563 nr = trans->blocks_used; 6639 nr = trans->blocks_used;
6564 ret = btrfs_end_transaction_throttle(trans, root); 6640 ret = btrfs_end_transaction_throttle(trans, root);
6565 BUG_ON(ret); 6641 if (ret && !err)
6642 err = ret;
6566 btrfs_btree_balance_dirty(root, nr); 6643 btrfs_btree_balance_dirty(root, nr);
6644
6645 return err;
6567} 6646}
6568 6647
6569/* 6648/*
@@ -6630,9 +6709,8 @@ struct inode *btrfs_alloc_inode(struct super_block *sb)
6630 ei->index_cnt = (u64)-1; 6709 ei->index_cnt = (u64)-1;
6631 ei->last_unlink_trans = 0; 6710 ei->last_unlink_trans = 0;
6632 6711
6633 spin_lock_init(&ei->accounting_lock);
6634 atomic_set(&ei->outstanding_extents, 0); 6712 atomic_set(&ei->outstanding_extents, 0);
6635 ei->reserved_extents = 0; 6713 atomic_set(&ei->reserved_extents, 0);
6636 6714
6637 ei->ordered_data_close = 0; 6715 ei->ordered_data_close = 0;
6638 ei->orphan_meta_reserved = 0; 6716 ei->orphan_meta_reserved = 0;
@@ -6668,7 +6746,7 @@ void btrfs_destroy_inode(struct inode *inode)
6668 WARN_ON(!list_empty(&inode->i_dentry)); 6746 WARN_ON(!list_empty(&inode->i_dentry));
6669 WARN_ON(inode->i_data.nrpages); 6747 WARN_ON(inode->i_data.nrpages);
6670 WARN_ON(atomic_read(&BTRFS_I(inode)->outstanding_extents)); 6748 WARN_ON(atomic_read(&BTRFS_I(inode)->outstanding_extents));
6671 WARN_ON(BTRFS_I(inode)->reserved_extents); 6749 WARN_ON(atomic_read(&BTRFS_I(inode)->reserved_extents));
6672 6750
6673 /* 6751 /*
6674 * This can happen where we create an inode, but somebody else also 6752 * This can happen where we create an inode, but somebody else also
@@ -6760,6 +6838,8 @@ void btrfs_destroy_cachep(void)
6760 kmem_cache_destroy(btrfs_transaction_cachep); 6838 kmem_cache_destroy(btrfs_transaction_cachep);
6761 if (btrfs_path_cachep) 6839 if (btrfs_path_cachep)
6762 kmem_cache_destroy(btrfs_path_cachep); 6840 kmem_cache_destroy(btrfs_path_cachep);
6841 if (btrfs_free_space_cachep)
6842 kmem_cache_destroy(btrfs_free_space_cachep);
6763} 6843}
6764 6844
6765int btrfs_init_cachep(void) 6845int btrfs_init_cachep(void)
@@ -6788,6 +6868,12 @@ int btrfs_init_cachep(void)
6788 if (!btrfs_path_cachep) 6868 if (!btrfs_path_cachep)
6789 goto fail; 6869 goto fail;
6790 6870
6871 btrfs_free_space_cachep = kmem_cache_create("btrfs_free_space_cache",
6872 sizeof(struct btrfs_free_space), 0,
6873 SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
6874 if (!btrfs_free_space_cachep)
6875 goto fail;
6876
6791 return 0; 6877 return 0;
6792fail: 6878fail:
6793 btrfs_destroy_cachep(); 6879 btrfs_destroy_cachep();
@@ -6806,6 +6892,26 @@ static int btrfs_getattr(struct vfsmount *mnt,
6806 return 0; 6892 return 0;
6807} 6893}
6808 6894
6895/*
6896 * If a file is moved, it will inherit the cow and compression flags of the new
6897 * directory.
6898 */
6899static void fixup_inode_flags(struct inode *dir, struct inode *inode)
6900{
6901 struct btrfs_inode *b_dir = BTRFS_I(dir);
6902 struct btrfs_inode *b_inode = BTRFS_I(inode);
6903
6904 if (b_dir->flags & BTRFS_INODE_NODATACOW)
6905 b_inode->flags |= BTRFS_INODE_NODATACOW;
6906 else
6907 b_inode->flags &= ~BTRFS_INODE_NODATACOW;
6908
6909 if (b_dir->flags & BTRFS_INODE_COMPRESS)
6910 b_inode->flags |= BTRFS_INODE_COMPRESS;
6911 else
6912 b_inode->flags &= ~BTRFS_INODE_COMPRESS;
6913}
6914
6809static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry, 6915static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry,
6810 struct inode *new_dir, struct dentry *new_dentry) 6916 struct inode *new_dir, struct dentry *new_dentry)
6811{ 6917{
@@ -6908,11 +7014,12 @@ static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry,
6908 old_dentry->d_name.name, 7014 old_dentry->d_name.name,
6909 old_dentry->d_name.len); 7015 old_dentry->d_name.len);
6910 } else { 7016 } else {
6911 btrfs_inc_nlink(old_dentry->d_inode); 7017 ret = __btrfs_unlink_inode(trans, root, old_dir,
6912 ret = btrfs_unlink_inode(trans, root, old_dir, 7018 old_dentry->d_inode,
6913 old_dentry->d_inode, 7019 old_dentry->d_name.name,
6914 old_dentry->d_name.name, 7020 old_dentry->d_name.len);
6915 old_dentry->d_name.len); 7021 if (!ret)
7022 ret = btrfs_update_inode(trans, root, old_inode);
6916 } 7023 }
6917 BUG_ON(ret); 7024 BUG_ON(ret);
6918 7025
@@ -6939,6 +7046,8 @@ static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry,
6939 } 7046 }
6940 } 7047 }
6941 7048
7049 fixup_inode_flags(new_dir, old_inode);
7050
6942 ret = btrfs_add_link(trans, new_dir, old_inode, 7051 ret = btrfs_add_link(trans, new_dir, old_inode,
6943 new_dentry->d_name.name, 7052 new_dentry->d_name.name,
6944 new_dentry->d_name.len, 0, index); 7053 new_dentry->d_name.len, 0, index);
@@ -7355,7 +7464,6 @@ static const struct address_space_operations btrfs_symlink_aops = {
7355}; 7464};
7356 7465
7357static const struct inode_operations btrfs_file_inode_operations = { 7466static const struct inode_operations btrfs_file_inode_operations = {
7358 .truncate = btrfs_truncate,
7359 .getattr = btrfs_getattr, 7467 .getattr = btrfs_getattr,
7360 .setattr = btrfs_setattr, 7468 .setattr = btrfs_setattr,
7361 .setxattr = btrfs_setxattr, 7469 .setxattr = btrfs_setxattr,
diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
index d1bace3df9b6..7c07fe26b7cf 100644
--- a/fs/btrfs/ioctl.c
+++ b/fs/btrfs/ioctl.c
@@ -40,6 +40,7 @@
40#include <linux/xattr.h> 40#include <linux/xattr.h>
41#include <linux/vmalloc.h> 41#include <linux/vmalloc.h>
42#include <linux/slab.h> 42#include <linux/slab.h>
43#include <linux/blkdev.h>
43#include "compat.h" 44#include "compat.h"
44#include "ctree.h" 45#include "ctree.h"
45#include "disk-io.h" 46#include "disk-io.h"
@@ -138,6 +139,24 @@ static int btrfs_ioctl_getflags(struct file *file, void __user *arg)
138 return 0; 139 return 0;
139} 140}
140 141
142static int check_flags(unsigned int flags)
143{
144 if (flags & ~(FS_IMMUTABLE_FL | FS_APPEND_FL | \
145 FS_NOATIME_FL | FS_NODUMP_FL | \
146 FS_SYNC_FL | FS_DIRSYNC_FL | \
147 FS_NOCOMP_FL | FS_COMPR_FL | \
148 FS_NOCOW_FL | FS_COW_FL))
149 return -EOPNOTSUPP;
150
151 if ((flags & FS_NOCOMP_FL) && (flags & FS_COMPR_FL))
152 return -EINVAL;
153
154 if ((flags & FS_NOCOW_FL) && (flags & FS_COW_FL))
155 return -EINVAL;
156
157 return 0;
158}
159
141static int btrfs_ioctl_setflags(struct file *file, void __user *arg) 160static int btrfs_ioctl_setflags(struct file *file, void __user *arg)
142{ 161{
143 struct inode *inode = file->f_path.dentry->d_inode; 162 struct inode *inode = file->f_path.dentry->d_inode;
@@ -153,10 +172,9 @@ static int btrfs_ioctl_setflags(struct file *file, void __user *arg)
153 if (copy_from_user(&flags, arg, sizeof(flags))) 172 if (copy_from_user(&flags, arg, sizeof(flags)))
154 return -EFAULT; 173 return -EFAULT;
155 174
156 if (flags & ~(FS_IMMUTABLE_FL | FS_APPEND_FL | \ 175 ret = check_flags(flags);
157 FS_NOATIME_FL | FS_NODUMP_FL | \ 176 if (ret)
158 FS_SYNC_FL | FS_DIRSYNC_FL)) 177 return ret;
159 return -EOPNOTSUPP;
160 178
161 if (!inode_owner_or_capable(inode)) 179 if (!inode_owner_or_capable(inode))
162 return -EACCES; 180 return -EACCES;
@@ -201,6 +219,22 @@ static int btrfs_ioctl_setflags(struct file *file, void __user *arg)
201 else 219 else
202 ip->flags &= ~BTRFS_INODE_DIRSYNC; 220 ip->flags &= ~BTRFS_INODE_DIRSYNC;
203 221
222 /*
223 * The COMPRESS flag can only be changed by users, while the NOCOMPRESS
224 * flag may be changed automatically if compression code won't make
225 * things smaller.
226 */
227 if (flags & FS_NOCOMP_FL) {
228 ip->flags &= ~BTRFS_INODE_COMPRESS;
229 ip->flags |= BTRFS_INODE_NOCOMPRESS;
230 } else if (flags & FS_COMPR_FL) {
231 ip->flags |= BTRFS_INODE_COMPRESS;
232 ip->flags &= ~BTRFS_INODE_NOCOMPRESS;
233 }
234 if (flags & FS_NOCOW_FL)
235 ip->flags |= BTRFS_INODE_NODATACOW;
236 else if (flags & FS_COW_FL)
237 ip->flags &= ~BTRFS_INODE_NODATACOW;
204 238
205 trans = btrfs_join_transaction(root, 1); 239 trans = btrfs_join_transaction(root, 1);
206 BUG_ON(IS_ERR(trans)); 240 BUG_ON(IS_ERR(trans));
@@ -213,9 +247,11 @@ static int btrfs_ioctl_setflags(struct file *file, void __user *arg)
213 btrfs_end_transaction(trans, root); 247 btrfs_end_transaction(trans, root);
214 248
215 mnt_drop_write(file->f_path.mnt); 249 mnt_drop_write(file->f_path.mnt);
250
251 ret = 0;
216 out_unlock: 252 out_unlock:
217 mutex_unlock(&inode->i_mutex); 253 mutex_unlock(&inode->i_mutex);
218 return 0; 254 return ret;
219} 255}
220 256
221static int btrfs_ioctl_getversion(struct file *file, int __user *arg) 257static int btrfs_ioctl_getversion(struct file *file, int __user *arg)
@@ -225,6 +261,49 @@ static int btrfs_ioctl_getversion(struct file *file, int __user *arg)
225 return put_user(inode->i_generation, arg); 261 return put_user(inode->i_generation, arg);
226} 262}
227 263
264static noinline int btrfs_ioctl_fitrim(struct file *file, void __user *arg)
265{
266 struct btrfs_root *root = fdentry(file)->d_sb->s_fs_info;
267 struct btrfs_fs_info *fs_info = root->fs_info;
268 struct btrfs_device *device;
269 struct request_queue *q;
270 struct fstrim_range range;
271 u64 minlen = ULLONG_MAX;
272 u64 num_devices = 0;
273 int ret;
274
275 if (!capable(CAP_SYS_ADMIN))
276 return -EPERM;
277
278 mutex_lock(&fs_info->fs_devices->device_list_mutex);
279 list_for_each_entry(device, &fs_info->fs_devices->devices, dev_list) {
280 if (!device->bdev)
281 continue;
282 q = bdev_get_queue(device->bdev);
283 if (blk_queue_discard(q)) {
284 num_devices++;
285 minlen = min((u64)q->limits.discard_granularity,
286 minlen);
287 }
288 }
289 mutex_unlock(&fs_info->fs_devices->device_list_mutex);
290 if (!num_devices)
291 return -EOPNOTSUPP;
292
293 if (copy_from_user(&range, arg, sizeof(range)))
294 return -EFAULT;
295
296 range.minlen = max(range.minlen, minlen);
297 ret = btrfs_trim_fs(root, &range);
298 if (ret < 0)
299 return ret;
300
301 if (copy_to_user(arg, &range, sizeof(range)))
302 return -EFAULT;
303
304 return 0;
305}
306
228static noinline int create_subvol(struct btrfs_root *root, 307static noinline int create_subvol(struct btrfs_root *root,
229 struct dentry *dentry, 308 struct dentry *dentry,
230 char *name, int namelen, 309 char *name, int namelen,
@@ -409,7 +488,9 @@ static int create_snapshot(struct btrfs_root *root, struct dentry *dentry,
409 if (ret) 488 if (ret)
410 goto fail; 489 goto fail;
411 490
412 btrfs_orphan_cleanup(pending_snapshot->snap); 491 ret = btrfs_orphan_cleanup(pending_snapshot->snap);
492 if (ret)
493 goto fail;
413 494
414 parent = dget_parent(dentry); 495 parent = dget_parent(dentry);
415 inode = btrfs_lookup_dentry(parent->d_inode, dentry); 496 inode = btrfs_lookup_dentry(parent->d_inode, dentry);
@@ -2348,12 +2429,15 @@ static noinline long btrfs_ioctl_start_sync(struct file *file, void __user *argp
2348 struct btrfs_root *root = BTRFS_I(file->f_dentry->d_inode)->root; 2429 struct btrfs_root *root = BTRFS_I(file->f_dentry->d_inode)->root;
2349 struct btrfs_trans_handle *trans; 2430 struct btrfs_trans_handle *trans;
2350 u64 transid; 2431 u64 transid;
2432 int ret;
2351 2433
2352 trans = btrfs_start_transaction(root, 0); 2434 trans = btrfs_start_transaction(root, 0);
2353 if (IS_ERR(trans)) 2435 if (IS_ERR(trans))
2354 return PTR_ERR(trans); 2436 return PTR_ERR(trans);
2355 transid = trans->transid; 2437 transid = trans->transid;
2356 btrfs_commit_transaction_async(trans, root, 0); 2438 ret = btrfs_commit_transaction_async(trans, root, 0);
2439 if (ret)
2440 return ret;
2357 2441
2358 if (argp) 2442 if (argp)
2359 if (copy_to_user(argp, &transid, sizeof(transid))) 2443 if (copy_to_user(argp, &transid, sizeof(transid)))
@@ -2388,6 +2472,8 @@ long btrfs_ioctl(struct file *file, unsigned int
2388 return btrfs_ioctl_setflags(file, argp); 2472 return btrfs_ioctl_setflags(file, argp);
2389 case FS_IOC_GETVERSION: 2473 case FS_IOC_GETVERSION:
2390 return btrfs_ioctl_getversion(file, argp); 2474 return btrfs_ioctl_getversion(file, argp);
2475 case FITRIM:
2476 return btrfs_ioctl_fitrim(file, argp);
2391 case BTRFS_IOC_SNAP_CREATE: 2477 case BTRFS_IOC_SNAP_CREATE:
2392 return btrfs_ioctl_snap_create(file, argp, 0); 2478 return btrfs_ioctl_snap_create(file, argp, 0);
2393 case BTRFS_IOC_SNAP_CREATE_V2: 2479 case BTRFS_IOC_SNAP_CREATE_V2:
diff --git a/fs/btrfs/ordered-data.c b/fs/btrfs/ordered-data.c
index 083a55477375..a1c940425307 100644
--- a/fs/btrfs/ordered-data.c
+++ b/fs/btrfs/ordered-data.c
@@ -202,6 +202,8 @@ static int __btrfs_add_ordered_extent(struct inode *inode, u64 file_offset,
202 INIT_LIST_HEAD(&entry->list); 202 INIT_LIST_HEAD(&entry->list);
203 INIT_LIST_HEAD(&entry->root_extent_list); 203 INIT_LIST_HEAD(&entry->root_extent_list);
204 204
205 trace_btrfs_ordered_extent_add(inode, entry);
206
205 spin_lock(&tree->lock); 207 spin_lock(&tree->lock);
206 node = tree_insert(&tree->tree, file_offset, 208 node = tree_insert(&tree->tree, file_offset,
207 &entry->rb_node); 209 &entry->rb_node);
@@ -387,6 +389,8 @@ int btrfs_put_ordered_extent(struct btrfs_ordered_extent *entry)
387 struct list_head *cur; 389 struct list_head *cur;
388 struct btrfs_ordered_sum *sum; 390 struct btrfs_ordered_sum *sum;
389 391
392 trace_btrfs_ordered_extent_put(entry->inode, entry);
393
390 if (atomic_dec_and_test(&entry->refs)) { 394 if (atomic_dec_and_test(&entry->refs)) {
391 while (!list_empty(&entry->list)) { 395 while (!list_empty(&entry->list)) {
392 cur = entry->list.next; 396 cur = entry->list.next;
@@ -420,6 +424,8 @@ static int __btrfs_remove_ordered_extent(struct inode *inode,
420 spin_lock(&root->fs_info->ordered_extent_lock); 424 spin_lock(&root->fs_info->ordered_extent_lock);
421 list_del_init(&entry->root_extent_list); 425 list_del_init(&entry->root_extent_list);
422 426
427 trace_btrfs_ordered_extent_remove(inode, entry);
428
423 /* 429 /*
424 * we have no more ordered extents for this inode and 430 * we have no more ordered extents for this inode and
425 * no dirty pages. We can safely remove it from the 431 * no dirty pages. We can safely remove it from the
@@ -585,6 +591,8 @@ void btrfs_start_ordered_extent(struct inode *inode,
585 u64 start = entry->file_offset; 591 u64 start = entry->file_offset;
586 u64 end = start + entry->len - 1; 592 u64 end = start + entry->len - 1;
587 593
594 trace_btrfs_ordered_extent_start(inode, entry);
595
588 /* 596 /*
589 * pages in the range can be dirty, clean or writeback. We 597 * pages in the range can be dirty, clean or writeback. We
590 * start IO on any dirty ones so the wait doesn't stall waiting 598 * start IO on any dirty ones so the wait doesn't stall waiting
diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c
index 31ade5802ae8..58250e09eb05 100644
--- a/fs/btrfs/relocation.c
+++ b/fs/btrfs/relocation.c
@@ -1724,6 +1724,7 @@ again:
1724 1724
1725 eb = read_tree_block(dest, old_bytenr, blocksize, 1725 eb = read_tree_block(dest, old_bytenr, blocksize,
1726 old_ptr_gen); 1726 old_ptr_gen);
1727 BUG_ON(!eb);
1727 btrfs_tree_lock(eb); 1728 btrfs_tree_lock(eb);
1728 if (cow) { 1729 if (cow) {
1729 ret = btrfs_cow_block(trans, dest, eb, parent, 1730 ret = btrfs_cow_block(trans, dest, eb, parent,
@@ -2513,6 +2514,10 @@ static int do_relocation(struct btrfs_trans_handle *trans,
2513 blocksize = btrfs_level_size(root, node->level); 2514 blocksize = btrfs_level_size(root, node->level);
2514 generation = btrfs_node_ptr_generation(upper->eb, slot); 2515 generation = btrfs_node_ptr_generation(upper->eb, slot);
2515 eb = read_tree_block(root, bytenr, blocksize, generation); 2516 eb = read_tree_block(root, bytenr, blocksize, generation);
2517 if (!eb) {
2518 err = -EIO;
2519 goto next;
2520 }
2516 btrfs_tree_lock(eb); 2521 btrfs_tree_lock(eb);
2517 btrfs_set_lock_blocking(eb); 2522 btrfs_set_lock_blocking(eb);
2518 2523
@@ -2670,6 +2675,7 @@ static int get_tree_block_key(struct reloc_control *rc,
2670 BUG_ON(block->key_ready); 2675 BUG_ON(block->key_ready);
2671 eb = read_tree_block(rc->extent_root, block->bytenr, 2676 eb = read_tree_block(rc->extent_root, block->bytenr,
2672 block->key.objectid, block->key.offset); 2677 block->key.objectid, block->key.offset);
2678 BUG_ON(!eb);
2673 WARN_ON(btrfs_header_level(eb) != block->level); 2679 WARN_ON(btrfs_header_level(eb) != block->level);
2674 if (block->level == 0) 2680 if (block->level == 0)
2675 btrfs_item_key_to_cpu(eb, &block->key, 0); 2681 btrfs_item_key_to_cpu(eb, &block->key, 0);
@@ -4209,7 +4215,7 @@ out:
4209 if (IS_ERR(fs_root)) 4215 if (IS_ERR(fs_root))
4210 err = PTR_ERR(fs_root); 4216 err = PTR_ERR(fs_root);
4211 else 4217 else
4212 btrfs_orphan_cleanup(fs_root); 4218 err = btrfs_orphan_cleanup(fs_root);
4213 } 4219 }
4214 return err; 4220 return err;
4215} 4221}
diff --git a/fs/btrfs/root-tree.c b/fs/btrfs/root-tree.c
index 6a1086e83ffc..29b2d7c930eb 100644
--- a/fs/btrfs/root-tree.c
+++ b/fs/btrfs/root-tree.c
@@ -88,7 +88,8 @@ int btrfs_find_last_root(struct btrfs_root *root, u64 objectid,
88 search_key.offset = (u64)-1; 88 search_key.offset = (u64)-1;
89 89
90 path = btrfs_alloc_path(); 90 path = btrfs_alloc_path();
91 BUG_ON(!path); 91 if (!path)
92 return -ENOMEM;
92 ret = btrfs_search_slot(NULL, root, &search_key, path, 0, 0); 93 ret = btrfs_search_slot(NULL, root, &search_key, path, 0, 0);
93 if (ret < 0) 94 if (ret < 0)
94 goto out; 95 goto out;
@@ -332,7 +333,8 @@ int btrfs_del_root(struct btrfs_trans_handle *trans, struct btrfs_root *root,
332 struct extent_buffer *leaf; 333 struct extent_buffer *leaf;
333 334
334 path = btrfs_alloc_path(); 335 path = btrfs_alloc_path();
335 BUG_ON(!path); 336 if (!path)
337 return -ENOMEM;
336 ret = btrfs_search_slot(trans, root, key, path, -1, 1); 338 ret = btrfs_search_slot(trans, root, key, path, -1, 1);
337 if (ret < 0) 339 if (ret < 0)
338 goto out; 340 goto out;
diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
index d39a9895d932..2edfc039f098 100644
--- a/fs/btrfs/super.c
+++ b/fs/btrfs/super.c
@@ -52,6 +52,9 @@
52#include "export.h" 52#include "export.h"
53#include "compression.h" 53#include "compression.h"
54 54
55#define CREATE_TRACE_POINTS
56#include <trace/events/btrfs.h>
57
55static const struct super_operations btrfs_super_ops; 58static const struct super_operations btrfs_super_ops;
56 59
57static const char *btrfs_decode_error(struct btrfs_fs_info *fs_info, int errno, 60static const char *btrfs_decode_error(struct btrfs_fs_info *fs_info, int errno,
@@ -620,6 +623,8 @@ int btrfs_sync_fs(struct super_block *sb, int wait)
620 struct btrfs_root *root = btrfs_sb(sb); 623 struct btrfs_root *root = btrfs_sb(sb);
621 int ret; 624 int ret;
622 625
626 trace_btrfs_sync_fs(wait);
627
623 if (!wait) { 628 if (!wait) {
624 filemap_flush(root->fs_info->btree_inode->i_mapping); 629 filemap_flush(root->fs_info->btree_inode->i_mapping);
625 return 0; 630 return 0;
diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c
index 3d73c8d93bbb..ce48eb59d615 100644
--- a/fs/btrfs/transaction.c
+++ b/fs/btrfs/transaction.c
@@ -57,7 +57,8 @@ static noinline int join_transaction(struct btrfs_root *root)
57 if (!cur_trans) { 57 if (!cur_trans) {
58 cur_trans = kmem_cache_alloc(btrfs_transaction_cachep, 58 cur_trans = kmem_cache_alloc(btrfs_transaction_cachep,
59 GFP_NOFS); 59 GFP_NOFS);
60 BUG_ON(!cur_trans); 60 if (!cur_trans)
61 return -ENOMEM;
61 root->fs_info->generation++; 62 root->fs_info->generation++;
62 cur_trans->num_writers = 1; 63 cur_trans->num_writers = 1;
63 cur_trans->num_joined = 0; 64 cur_trans->num_joined = 0;
@@ -195,7 +196,11 @@ again:
195 wait_current_trans(root); 196 wait_current_trans(root);
196 197
197 ret = join_transaction(root); 198 ret = join_transaction(root);
198 BUG_ON(ret); 199 if (ret < 0) {
200 if (type != TRANS_JOIN_NOLOCK)
201 mutex_unlock(&root->fs_info->trans_mutex);
202 return ERR_PTR(ret);
203 }
199 204
200 cur_trans = root->fs_info->running_transaction; 205 cur_trans = root->fs_info->running_transaction;
201 cur_trans->use_count++; 206 cur_trans->use_count++;
@@ -1156,7 +1161,8 @@ int btrfs_commit_transaction_async(struct btrfs_trans_handle *trans,
1156 struct btrfs_transaction *cur_trans; 1161 struct btrfs_transaction *cur_trans;
1157 1162
1158 ac = kmalloc(sizeof(*ac), GFP_NOFS); 1163 ac = kmalloc(sizeof(*ac), GFP_NOFS);
1159 BUG_ON(!ac); 1164 if (!ac)
1165 return -ENOMEM;
1160 1166
1161 INIT_DELAYED_WORK(&ac->work, do_async_commit); 1167 INIT_DELAYED_WORK(&ac->work, do_async_commit);
1162 ac->root = root; 1168 ac->root = root;
@@ -1389,6 +1395,8 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
1389 put_transaction(cur_trans); 1395 put_transaction(cur_trans);
1390 put_transaction(cur_trans); 1396 put_transaction(cur_trans);
1391 1397
1398 trace_btrfs_transaction_commit(root);
1399
1392 mutex_unlock(&root->fs_info->trans_mutex); 1400 mutex_unlock(&root->fs_info->trans_mutex);
1393 1401
1394 if (current->journal_info == trans) 1402 if (current->journal_info == trans)
diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
index a4bbb854dfd2..c50271ad3157 100644
--- a/fs/btrfs/tree-log.c
+++ b/fs/btrfs/tree-log.c
@@ -799,12 +799,12 @@ static noinline int add_inode_ref(struct btrfs_trans_handle *trans,
799 struct inode *dir; 799 struct inode *dir;
800 int ret; 800 int ret;
801 struct btrfs_inode_ref *ref; 801 struct btrfs_inode_ref *ref;
802 struct btrfs_dir_item *di;
803 struct inode *inode; 802 struct inode *inode;
804 char *name; 803 char *name;
805 int namelen; 804 int namelen;
806 unsigned long ref_ptr; 805 unsigned long ref_ptr;
807 unsigned long ref_end; 806 unsigned long ref_end;
807 int search_done = 0;
808 808
809 /* 809 /*
810 * it is possible that we didn't log all the parent directories 810 * it is possible that we didn't log all the parent directories
@@ -845,7 +845,10 @@ again:
845 * existing back reference, and we don't want to create 845 * existing back reference, and we don't want to create
846 * dangling pointers in the directory. 846 * dangling pointers in the directory.
847 */ 847 */
848conflict_again: 848
849 if (search_done)
850 goto insert;
851
849 ret = btrfs_search_slot(NULL, root, key, path, 0, 0); 852 ret = btrfs_search_slot(NULL, root, key, path, 0, 0);
850 if (ret == 0) { 853 if (ret == 0) {
851 char *victim_name; 854 char *victim_name;
@@ -886,37 +889,21 @@ conflict_again:
886 ret = btrfs_unlink_inode(trans, root, dir, 889 ret = btrfs_unlink_inode(trans, root, dir,
887 inode, victim_name, 890 inode, victim_name,
888 victim_name_len); 891 victim_name_len);
889 kfree(victim_name);
890 btrfs_release_path(root, path);
891 goto conflict_again;
892 } 892 }
893 kfree(victim_name); 893 kfree(victim_name);
894 ptr = (unsigned long)(victim_ref + 1) + victim_name_len; 894 ptr = (unsigned long)(victim_ref + 1) + victim_name_len;
895 } 895 }
896 BUG_ON(ret); 896 BUG_ON(ret);
897 }
898 btrfs_release_path(root, path);
899
900 /* look for a conflicting sequence number */
901 di = btrfs_lookup_dir_index_item(trans, root, path, dir->i_ino,
902 btrfs_inode_ref_index(eb, ref),
903 name, namelen, 0);
904 if (di && !IS_ERR(di)) {
905 ret = drop_one_dir_item(trans, root, path, dir, di);
906 BUG_ON(ret);
907 }
908 btrfs_release_path(root, path);
909 897
910 898 /*
911 /* look for a conflicting name */ 899 * NOTE: we have searched root tree and checked the
912 di = btrfs_lookup_dir_item(trans, root, path, dir->i_ino, 900 * coresponding ref, it does not need to check again.
913 name, namelen, 0); 901 */
914 if (di && !IS_ERR(di)) { 902 search_done = 1;
915 ret = drop_one_dir_item(trans, root, path, dir, di);
916 BUG_ON(ret);
917 } 903 }
918 btrfs_release_path(root, path); 904 btrfs_release_path(root, path);
919 905
906insert:
920 /* insert our name */ 907 /* insert our name */
921 ret = btrfs_add_link(trans, dir, inode, name, namelen, 0, 908 ret = btrfs_add_link(trans, dir, inode, name, namelen, 0,
922 btrfs_inode_ref_index(eb, ref)); 909 btrfs_inode_ref_index(eb, ref));
@@ -1286,6 +1273,8 @@ static noinline int replay_one_dir_item(struct btrfs_trans_handle *trans,
1286 ptr_end = ptr + item_size; 1273 ptr_end = ptr + item_size;
1287 while (ptr < ptr_end) { 1274 while (ptr < ptr_end) {
1288 di = (struct btrfs_dir_item *)ptr; 1275 di = (struct btrfs_dir_item *)ptr;
1276 if (verify_dir_item(root, eb, di))
1277 return -EIO;
1289 name_len = btrfs_dir_name_len(eb, di); 1278 name_len = btrfs_dir_name_len(eb, di);
1290 ret = replay_one_name(trans, root, path, eb, di, key); 1279 ret = replay_one_name(trans, root, path, eb, di, key);
1291 BUG_ON(ret); 1280 BUG_ON(ret);
@@ -1412,6 +1401,11 @@ again:
1412 ptr_end = ptr + item_size; 1401 ptr_end = ptr + item_size;
1413 while (ptr < ptr_end) { 1402 while (ptr < ptr_end) {
1414 di = (struct btrfs_dir_item *)ptr; 1403 di = (struct btrfs_dir_item *)ptr;
1404 if (verify_dir_item(root, eb, di)) {
1405 ret = -EIO;
1406 goto out;
1407 }
1408
1415 name_len = btrfs_dir_name_len(eb, di); 1409 name_len = btrfs_dir_name_len(eb, di);
1416 name = kmalloc(name_len, GFP_NOFS); 1410 name = kmalloc(name_len, GFP_NOFS);
1417 if (!name) { 1411 if (!name) {
@@ -1821,7 +1815,8 @@ static int walk_log_tree(struct btrfs_trans_handle *trans,
1821 int orig_level; 1815 int orig_level;
1822 1816
1823 path = btrfs_alloc_path(); 1817 path = btrfs_alloc_path();
1824 BUG_ON(!path); 1818 if (!path)
1819 return -ENOMEM;
1825 1820
1826 level = btrfs_header_level(log->node); 1821 level = btrfs_header_level(log->node);
1827 orig_level = level; 1822 orig_level = level;
@@ -3107,9 +3102,11 @@ int btrfs_recover_log_trees(struct btrfs_root *log_root_tree)
3107 .stage = 0, 3102 .stage = 0,
3108 }; 3103 };
3109 3104
3110 fs_info->log_root_recovering = 1;
3111 path = btrfs_alloc_path(); 3105 path = btrfs_alloc_path();
3112 BUG_ON(!path); 3106 if (!path)
3107 return -ENOMEM;
3108
3109 fs_info->log_root_recovering = 1;
3113 3110
3114 trans = btrfs_start_transaction(fs_info->tree_root, 0); 3111 trans = btrfs_start_transaction(fs_info->tree_root, 0);
3115 BUG_ON(IS_ERR(trans)); 3112 BUG_ON(IS_ERR(trans));
@@ -3117,7 +3114,8 @@ int btrfs_recover_log_trees(struct btrfs_root *log_root_tree)
3117 wc.trans = trans; 3114 wc.trans = trans;
3118 wc.pin = 1; 3115 wc.pin = 1;
3119 3116
3120 walk_log_tree(trans, log_root_tree, &wc); 3117 ret = walk_log_tree(trans, log_root_tree, &wc);
3118 BUG_ON(ret);
3121 3119
3122again: 3120again:
3123 key.objectid = BTRFS_TREE_LOG_OBJECTID; 3121 key.objectid = BTRFS_TREE_LOG_OBJECTID;
@@ -3141,8 +3139,7 @@ again:
3141 3139
3142 log = btrfs_read_fs_root_no_radix(log_root_tree, 3140 log = btrfs_read_fs_root_no_radix(log_root_tree,
3143 &found_key); 3141 &found_key);
3144 BUG_ON(!log); 3142 BUG_ON(IS_ERR(log));
3145
3146 3143
3147 tmp_key.objectid = found_key.offset; 3144 tmp_key.objectid = found_key.offset;
3148 tmp_key.type = BTRFS_ROOT_ITEM_KEY; 3145 tmp_key.type = BTRFS_ROOT_ITEM_KEY;
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
index 9d554e8e6583..309a57b9fc85 100644
--- a/fs/btrfs/volumes.c
+++ b/fs/btrfs/volumes.c
@@ -33,17 +33,6 @@
33#include "volumes.h" 33#include "volumes.h"
34#include "async-thread.h" 34#include "async-thread.h"
35 35
36struct map_lookup {
37 u64 type;
38 int io_align;
39 int io_width;
40 int stripe_len;
41 int sector_size;
42 int num_stripes;
43 int sub_stripes;
44 struct btrfs_bio_stripe stripes[];
45};
46
47static int init_first_rw_device(struct btrfs_trans_handle *trans, 36static int init_first_rw_device(struct btrfs_trans_handle *trans,
48 struct btrfs_root *root, 37 struct btrfs_root *root,
49 struct btrfs_device *device); 38 struct btrfs_device *device);
@@ -1879,6 +1868,8 @@ static int btrfs_relocate_chunk(struct btrfs_root *root,
1879 1868
1880 BUG_ON(ret); 1869 BUG_ON(ret);
1881 1870
1871 trace_btrfs_chunk_free(root, map, chunk_offset, em->len);
1872
1882 if (map->type & BTRFS_BLOCK_GROUP_SYSTEM) { 1873 if (map->type & BTRFS_BLOCK_GROUP_SYSTEM) {
1883 ret = btrfs_del_sys_chunk(root, chunk_objectid, chunk_offset); 1874 ret = btrfs_del_sys_chunk(root, chunk_objectid, chunk_offset);
1884 BUG_ON(ret); 1875 BUG_ON(ret);
@@ -2606,6 +2597,8 @@ static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
2606 *num_bytes = chunk_bytes_by_type(type, calc_size, 2597 *num_bytes = chunk_bytes_by_type(type, calc_size,
2607 map->num_stripes, sub_stripes); 2598 map->num_stripes, sub_stripes);
2608 2599
2600 trace_btrfs_chunk_alloc(info->chunk_root, map, start, *num_bytes);
2601
2609 em = alloc_extent_map(GFP_NOFS); 2602 em = alloc_extent_map(GFP_NOFS);
2610 if (!em) { 2603 if (!em) {
2611 ret = -ENOMEM; 2604 ret = -ENOMEM;
@@ -2714,6 +2707,7 @@ static int __finish_chunk_alloc(struct btrfs_trans_handle *trans,
2714 item_size); 2707 item_size);
2715 BUG_ON(ret); 2708 BUG_ON(ret);
2716 } 2709 }
2710
2717 kfree(chunk); 2711 kfree(chunk);
2718 return 0; 2712 return 0;
2719} 2713}
@@ -2918,7 +2912,10 @@ static int __btrfs_map_block(struct btrfs_mapping_tree *map_tree, int rw,
2918 struct extent_map_tree *em_tree = &map_tree->map_tree; 2912 struct extent_map_tree *em_tree = &map_tree->map_tree;
2919 u64 offset; 2913 u64 offset;
2920 u64 stripe_offset; 2914 u64 stripe_offset;
2915 u64 stripe_end_offset;
2921 u64 stripe_nr; 2916 u64 stripe_nr;
2917 u64 stripe_nr_orig;
2918 u64 stripe_nr_end;
2922 int stripes_allocated = 8; 2919 int stripes_allocated = 8;
2923 int stripes_required = 1; 2920 int stripes_required = 1;
2924 int stripe_index; 2921 int stripe_index;
@@ -2927,7 +2924,7 @@ static int __btrfs_map_block(struct btrfs_mapping_tree *map_tree, int rw,
2927 int max_errors = 0; 2924 int max_errors = 0;
2928 struct btrfs_multi_bio *multi = NULL; 2925 struct btrfs_multi_bio *multi = NULL;
2929 2926
2930 if (multi_ret && !(rw & REQ_WRITE)) 2927 if (multi_ret && !(rw & (REQ_WRITE | REQ_DISCARD)))
2931 stripes_allocated = 1; 2928 stripes_allocated = 1;
2932again: 2929again:
2933 if (multi_ret) { 2930 if (multi_ret) {
@@ -2968,7 +2965,15 @@ again:
2968 max_errors = 1; 2965 max_errors = 1;
2969 } 2966 }
2970 } 2967 }
2971 if (multi_ret && (rw & REQ_WRITE) && 2968 if (rw & REQ_DISCARD) {
2969 if (map->type & (BTRFS_BLOCK_GROUP_RAID0 |
2970 BTRFS_BLOCK_GROUP_RAID1 |
2971 BTRFS_BLOCK_GROUP_DUP |
2972 BTRFS_BLOCK_GROUP_RAID10)) {
2973 stripes_required = map->num_stripes;
2974 }
2975 }
2976 if (multi_ret && (rw & (REQ_WRITE | REQ_DISCARD)) &&
2972 stripes_allocated < stripes_required) { 2977 stripes_allocated < stripes_required) {
2973 stripes_allocated = map->num_stripes; 2978 stripes_allocated = map->num_stripes;
2974 free_extent_map(em); 2979 free_extent_map(em);
@@ -2988,12 +2993,15 @@ again:
2988 /* stripe_offset is the offset of this block in its stripe*/ 2993 /* stripe_offset is the offset of this block in its stripe*/
2989 stripe_offset = offset - stripe_offset; 2994 stripe_offset = offset - stripe_offset;
2990 2995
2991 if (map->type & (BTRFS_BLOCK_GROUP_RAID0 | BTRFS_BLOCK_GROUP_RAID1 | 2996 if (rw & REQ_DISCARD)
2992 BTRFS_BLOCK_GROUP_RAID10 | 2997 *length = min_t(u64, em->len - offset, *length);
2993 BTRFS_BLOCK_GROUP_DUP)) { 2998 else if (map->type & (BTRFS_BLOCK_GROUP_RAID0 |
2999 BTRFS_BLOCK_GROUP_RAID1 |
3000 BTRFS_BLOCK_GROUP_RAID10 |
3001 BTRFS_BLOCK_GROUP_DUP)) {
2994 /* we limit the length of each bio to what fits in a stripe */ 3002 /* we limit the length of each bio to what fits in a stripe */
2995 *length = min_t(u64, em->len - offset, 3003 *length = min_t(u64, em->len - offset,
2996 map->stripe_len - stripe_offset); 3004 map->stripe_len - stripe_offset);
2997 } else { 3005 } else {
2998 *length = em->len - offset; 3006 *length = em->len - offset;
2999 } 3007 }
@@ -3003,8 +3011,19 @@ again:
3003 3011
3004 num_stripes = 1; 3012 num_stripes = 1;
3005 stripe_index = 0; 3013 stripe_index = 0;
3006 if (map->type & BTRFS_BLOCK_GROUP_RAID1) { 3014 stripe_nr_orig = stripe_nr;
3007 if (rw & REQ_WRITE) 3015 stripe_nr_end = (offset + *length + map->stripe_len - 1) &
3016 (~(map->stripe_len - 1));
3017 do_div(stripe_nr_end, map->stripe_len);
3018 stripe_end_offset = stripe_nr_end * map->stripe_len -
3019 (offset + *length);
3020 if (map->type & BTRFS_BLOCK_GROUP_RAID0) {
3021 if (rw & REQ_DISCARD)
3022 num_stripes = min_t(u64, map->num_stripes,
3023 stripe_nr_end - stripe_nr_orig);
3024 stripe_index = do_div(stripe_nr, map->num_stripes);
3025 } else if (map->type & BTRFS_BLOCK_GROUP_RAID1) {
3026 if (rw & (REQ_WRITE | REQ_DISCARD))
3008 num_stripes = map->num_stripes; 3027 num_stripes = map->num_stripes;
3009 else if (mirror_num) 3028 else if (mirror_num)
3010 stripe_index = mirror_num - 1; 3029 stripe_index = mirror_num - 1;
@@ -3015,7 +3034,7 @@ again:
3015 } 3034 }
3016 3035
3017 } else if (map->type & BTRFS_BLOCK_GROUP_DUP) { 3036 } else if (map->type & BTRFS_BLOCK_GROUP_DUP) {
3018 if (rw & REQ_WRITE) 3037 if (rw & (REQ_WRITE | REQ_DISCARD))
3019 num_stripes = map->num_stripes; 3038 num_stripes = map->num_stripes;
3020 else if (mirror_num) 3039 else if (mirror_num)
3021 stripe_index = mirror_num - 1; 3040 stripe_index = mirror_num - 1;
@@ -3028,6 +3047,10 @@ again:
3028 3047
3029 if (rw & REQ_WRITE) 3048 if (rw & REQ_WRITE)
3030 num_stripes = map->sub_stripes; 3049 num_stripes = map->sub_stripes;
3050 else if (rw & REQ_DISCARD)
3051 num_stripes = min_t(u64, map->sub_stripes *
3052 (stripe_nr_end - stripe_nr_orig),
3053 map->num_stripes);
3031 else if (mirror_num) 3054 else if (mirror_num)
3032 stripe_index += mirror_num - 1; 3055 stripe_index += mirror_num - 1;
3033 else { 3056 else {
@@ -3045,12 +3068,101 @@ again:
3045 } 3068 }
3046 BUG_ON(stripe_index >= map->num_stripes); 3069 BUG_ON(stripe_index >= map->num_stripes);
3047 3070
3048 for (i = 0; i < num_stripes; i++) { 3071 if (rw & REQ_DISCARD) {
3049 multi->stripes[i].physical = 3072 for (i = 0; i < num_stripes; i++) {
3050 map->stripes[stripe_index].physical + 3073 multi->stripes[i].physical =
3051 stripe_offset + stripe_nr * map->stripe_len; 3074 map->stripes[stripe_index].physical +
3052 multi->stripes[i].dev = map->stripes[stripe_index].dev; 3075 stripe_offset + stripe_nr * map->stripe_len;
3053 stripe_index++; 3076 multi->stripes[i].dev = map->stripes[stripe_index].dev;
3077
3078 if (map->type & BTRFS_BLOCK_GROUP_RAID0) {
3079 u64 stripes;
3080 u32 last_stripe = 0;
3081 int j;
3082
3083 div_u64_rem(stripe_nr_end - 1,
3084 map->num_stripes,
3085 &last_stripe);
3086
3087 for (j = 0; j < map->num_stripes; j++) {
3088 u32 test;
3089
3090 div_u64_rem(stripe_nr_end - 1 - j,
3091 map->num_stripes, &test);
3092 if (test == stripe_index)
3093 break;
3094 }
3095 stripes = stripe_nr_end - 1 - j;
3096 do_div(stripes, map->num_stripes);
3097 multi->stripes[i].length = map->stripe_len *
3098 (stripes - stripe_nr + 1);
3099
3100 if (i == 0) {
3101 multi->stripes[i].length -=
3102 stripe_offset;
3103 stripe_offset = 0;
3104 }
3105 if (stripe_index == last_stripe)
3106 multi->stripes[i].length -=
3107 stripe_end_offset;
3108 } else if (map->type & BTRFS_BLOCK_GROUP_RAID10) {
3109 u64 stripes;
3110 int j;
3111 int factor = map->num_stripes /
3112 map->sub_stripes;
3113 u32 last_stripe = 0;
3114
3115 div_u64_rem(stripe_nr_end - 1,
3116 factor, &last_stripe);
3117 last_stripe *= map->sub_stripes;
3118
3119 for (j = 0; j < factor; j++) {
3120 u32 test;
3121
3122 div_u64_rem(stripe_nr_end - 1 - j,
3123 factor, &test);
3124
3125 if (test ==
3126 stripe_index / map->sub_stripes)
3127 break;
3128 }
3129 stripes = stripe_nr_end - 1 - j;
3130 do_div(stripes, factor);
3131 multi->stripes[i].length = map->stripe_len *
3132 (stripes - stripe_nr + 1);
3133
3134 if (i < map->sub_stripes) {
3135 multi->stripes[i].length -=
3136 stripe_offset;
3137 if (i == map->sub_stripes - 1)
3138 stripe_offset = 0;
3139 }
3140 if (stripe_index >= last_stripe &&
3141 stripe_index <= (last_stripe +
3142 map->sub_stripes - 1)) {
3143 multi->stripes[i].length -=
3144 stripe_end_offset;
3145 }
3146 } else
3147 multi->stripes[i].length = *length;
3148
3149 stripe_index++;
3150 if (stripe_index == map->num_stripes) {
3151 /* This could only happen for RAID0/10 */
3152 stripe_index = 0;
3153 stripe_nr++;
3154 }
3155 }
3156 } else {
3157 for (i = 0; i < num_stripes; i++) {
3158 multi->stripes[i].physical =
3159 map->stripes[stripe_index].physical +
3160 stripe_offset +
3161 stripe_nr * map->stripe_len;
3162 multi->stripes[i].dev =
3163 map->stripes[stripe_index].dev;
3164 stripe_index++;
3165 }
3054 } 3166 }
3055 if (multi_ret) { 3167 if (multi_ret) {
3056 *multi_ret = multi; 3168 *multi_ret = multi;
diff --git a/fs/btrfs/volumes.h b/fs/btrfs/volumes.h
index 7fb59d45fe8c..cc2eadaf7a27 100644
--- a/fs/btrfs/volumes.h
+++ b/fs/btrfs/volumes.h
@@ -126,6 +126,7 @@ struct btrfs_fs_devices {
126struct btrfs_bio_stripe { 126struct btrfs_bio_stripe {
127 struct btrfs_device *dev; 127 struct btrfs_device *dev;
128 u64 physical; 128 u64 physical;
129 u64 length; /* only used for discard mappings */
129}; 130};
130 131
131struct btrfs_multi_bio { 132struct btrfs_multi_bio {
@@ -145,6 +146,17 @@ struct btrfs_device_info {
145 u64 max_avail; 146 u64 max_avail;
146}; 147};
147 148
149struct map_lookup {
150 u64 type;
151 int io_align;
152 int io_width;
153 int stripe_len;
154 int sector_size;
155 int num_stripes;
156 int sub_stripes;
157 struct btrfs_bio_stripe stripes[];
158};
159
148/* Used to sort the devices by max_avail(descending sort) */ 160/* Used to sort the devices by max_avail(descending sort) */
149int btrfs_cmp_device_free_bytes(const void *dev_info1, const void *dev_info2); 161int btrfs_cmp_device_free_bytes(const void *dev_info1, const void *dev_info2);
150 162
diff --git a/fs/btrfs/xattr.c b/fs/btrfs/xattr.c
index d779cefcfd7d..a5303b871b13 100644
--- a/fs/btrfs/xattr.c
+++ b/fs/btrfs/xattr.c
@@ -242,6 +242,8 @@ ssize_t btrfs_listxattr(struct dentry *dentry, char *buffer, size_t size)
242 break; 242 break;
243 243
244 di = btrfs_item_ptr(leaf, slot, struct btrfs_dir_item); 244 di = btrfs_item_ptr(leaf, slot, struct btrfs_dir_item);
245 if (verify_dir_item(root, leaf, di))
246 continue;
245 247
246 name_len = btrfs_dir_name_len(leaf, di); 248 name_len = btrfs_dir_name_len(leaf, di);
247 total_size += name_len + 1; 249 total_size += name_len + 1;
diff --git a/fs/ecryptfs/crypto.c b/fs/ecryptfs/crypto.c
index bfd8b680e648..d2a70a4561f9 100644
--- a/fs/ecryptfs/crypto.c
+++ b/fs/ecryptfs/crypto.c
@@ -266,7 +266,6 @@ void ecryptfs_destroy_mount_crypt_stat(
266 &mount_crypt_stat->global_auth_tok_list, 266 &mount_crypt_stat->global_auth_tok_list,
267 mount_crypt_stat_list) { 267 mount_crypt_stat_list) {
268 list_del(&auth_tok->mount_crypt_stat_list); 268 list_del(&auth_tok->mount_crypt_stat_list);
269 mount_crypt_stat->num_global_auth_toks--;
270 if (auth_tok->global_auth_tok_key 269 if (auth_tok->global_auth_tok_key
271 && !(auth_tok->flags & ECRYPTFS_AUTH_TOK_INVALID)) 270 && !(auth_tok->flags & ECRYPTFS_AUTH_TOK_INVALID))
272 key_put(auth_tok->global_auth_tok_key); 271 key_put(auth_tok->global_auth_tok_key);
@@ -1389,6 +1388,7 @@ int ecryptfs_write_metadata(struct dentry *ecryptfs_dentry)
1389 rc = -ENOMEM; 1388 rc = -ENOMEM;
1390 goto out; 1389 goto out;
1391 } 1390 }
1391 /* Zeroed page ensures the in-header unencrypted i_size is set to 0 */
1392 rc = ecryptfs_write_headers_virt(virt, virt_len, &size, crypt_stat, 1392 rc = ecryptfs_write_headers_virt(virt, virt_len, &size, crypt_stat,
1393 ecryptfs_dentry); 1393 ecryptfs_dentry);
1394 if (unlikely(rc)) { 1394 if (unlikely(rc)) {
diff --git a/fs/ecryptfs/ecryptfs_kernel.h b/fs/ecryptfs/ecryptfs_kernel.h
index e00753496e3e..bd3cafd0949d 100644
--- a/fs/ecryptfs/ecryptfs_kernel.h
+++ b/fs/ecryptfs/ecryptfs_kernel.h
@@ -233,7 +233,7 @@ ecryptfs_get_key_payload_data(struct key *key)
233 233
234struct ecryptfs_key_sig { 234struct ecryptfs_key_sig {
235 struct list_head crypt_stat_list; 235 struct list_head crypt_stat_list;
236 char keysig[ECRYPTFS_SIG_SIZE_HEX]; 236 char keysig[ECRYPTFS_SIG_SIZE_HEX + 1];
237}; 237};
238 238
239struct ecryptfs_filename { 239struct ecryptfs_filename {
@@ -257,19 +257,18 @@ struct ecryptfs_filename {
257struct ecryptfs_crypt_stat { 257struct ecryptfs_crypt_stat {
258#define ECRYPTFS_STRUCT_INITIALIZED 0x00000001 258#define ECRYPTFS_STRUCT_INITIALIZED 0x00000001
259#define ECRYPTFS_POLICY_APPLIED 0x00000002 259#define ECRYPTFS_POLICY_APPLIED 0x00000002
260#define ECRYPTFS_NEW_FILE 0x00000004 260#define ECRYPTFS_ENCRYPTED 0x00000004
261#define ECRYPTFS_ENCRYPTED 0x00000008 261#define ECRYPTFS_SECURITY_WARNING 0x00000008
262#define ECRYPTFS_SECURITY_WARNING 0x00000010 262#define ECRYPTFS_ENABLE_HMAC 0x00000010
263#define ECRYPTFS_ENABLE_HMAC 0x00000020 263#define ECRYPTFS_ENCRYPT_IV_PAGES 0x00000020
264#define ECRYPTFS_ENCRYPT_IV_PAGES 0x00000040 264#define ECRYPTFS_KEY_VALID 0x00000040
265#define ECRYPTFS_KEY_VALID 0x00000080 265#define ECRYPTFS_METADATA_IN_XATTR 0x00000080
266#define ECRYPTFS_METADATA_IN_XATTR 0x00000100 266#define ECRYPTFS_VIEW_AS_ENCRYPTED 0x00000100
267#define ECRYPTFS_VIEW_AS_ENCRYPTED 0x00000200 267#define ECRYPTFS_KEY_SET 0x00000200
268#define ECRYPTFS_KEY_SET 0x00000400 268#define ECRYPTFS_ENCRYPT_FILENAMES 0x00000400
269#define ECRYPTFS_ENCRYPT_FILENAMES 0x00000800 269#define ECRYPTFS_ENCFN_USE_MOUNT_FNEK 0x00000800
270#define ECRYPTFS_ENCFN_USE_MOUNT_FNEK 0x00001000 270#define ECRYPTFS_ENCFN_USE_FEK 0x00001000
271#define ECRYPTFS_ENCFN_USE_FEK 0x00002000 271#define ECRYPTFS_UNLINK_SIGS 0x00002000
272#define ECRYPTFS_UNLINK_SIGS 0x00004000
273 u32 flags; 272 u32 flags;
274 unsigned int file_version; 273 unsigned int file_version;
275 size_t iv_bytes; 274 size_t iv_bytes;
@@ -297,7 +296,6 @@ struct ecryptfs_inode_info {
297 struct inode vfs_inode; 296 struct inode vfs_inode;
298 struct inode *wii_inode; 297 struct inode *wii_inode;
299 struct file *lower_file; 298 struct file *lower_file;
300 struct mutex lower_file_mutex;
301 struct ecryptfs_crypt_stat crypt_stat; 299 struct ecryptfs_crypt_stat crypt_stat;
302}; 300};
303 301
@@ -333,7 +331,6 @@ struct ecryptfs_global_auth_tok {
333 u32 flags; 331 u32 flags;
334 struct list_head mount_crypt_stat_list; 332 struct list_head mount_crypt_stat_list;
335 struct key *global_auth_tok_key; 333 struct key *global_auth_tok_key;
336 struct ecryptfs_auth_tok *global_auth_tok;
337 unsigned char sig[ECRYPTFS_SIG_SIZE_HEX + 1]; 334 unsigned char sig[ECRYPTFS_SIG_SIZE_HEX + 1];
338}; 335};
339 336
@@ -380,7 +377,6 @@ struct ecryptfs_mount_crypt_stat {
380 u32 flags; 377 u32 flags;
381 struct list_head global_auth_tok_list; 378 struct list_head global_auth_tok_list;
382 struct mutex global_auth_tok_list_mutex; 379 struct mutex global_auth_tok_list_mutex;
383 size_t num_global_auth_toks;
384 size_t global_default_cipher_key_size; 380 size_t global_default_cipher_key_size;
385 size_t global_default_fn_cipher_key_bytes; 381 size_t global_default_fn_cipher_key_bytes;
386 unsigned char global_default_cipher_name[ECRYPTFS_MAX_CIPHER_NAME_SIZE 382 unsigned char global_default_cipher_name[ECRYPTFS_MAX_CIPHER_NAME_SIZE
diff --git a/fs/ecryptfs/file.c b/fs/ecryptfs/file.c
index 7d1050e254f9..cedc913d11ba 100644
--- a/fs/ecryptfs/file.c
+++ b/fs/ecryptfs/file.c
@@ -273,7 +273,14 @@ static int ecryptfs_release(struct inode *inode, struct file *file)
273static int 273static int
274ecryptfs_fsync(struct file *file, int datasync) 274ecryptfs_fsync(struct file *file, int datasync)
275{ 275{
276 return vfs_fsync(ecryptfs_file_to_lower(file), datasync); 276 int rc = 0;
277
278 rc = generic_file_fsync(file, datasync);
279 if (rc)
280 goto out;
281 rc = vfs_fsync(ecryptfs_file_to_lower(file), datasync);
282out:
283 return rc;
277} 284}
278 285
279static int ecryptfs_fasync(int fd, struct file *file, int flag) 286static int ecryptfs_fasync(int fd, struct file *file, int flag)
diff --git a/fs/ecryptfs/inode.c b/fs/ecryptfs/inode.c
index b592938a84bc..f99051b7adab 100644
--- a/fs/ecryptfs/inode.c
+++ b/fs/ecryptfs/inode.c
@@ -143,26 +143,6 @@ out:
143} 143}
144 144
145/** 145/**
146 * grow_file
147 * @ecryptfs_dentry: the eCryptfs dentry
148 *
149 * This is the code which will grow the file to its correct size.
150 */
151static int grow_file(struct dentry *ecryptfs_dentry)
152{
153 struct inode *ecryptfs_inode = ecryptfs_dentry->d_inode;
154 char zero_virt[] = { 0x00 };
155 int rc = 0;
156
157 rc = ecryptfs_write(ecryptfs_inode, zero_virt, 0, 1);
158 i_size_write(ecryptfs_inode, 0);
159 rc = ecryptfs_write_inode_size_to_metadata(ecryptfs_inode);
160 ecryptfs_inode_to_private(ecryptfs_inode)->crypt_stat.flags |=
161 ECRYPTFS_NEW_FILE;
162 return rc;
163}
164
165/**
166 * ecryptfs_initialize_file 146 * ecryptfs_initialize_file
167 * 147 *
168 * Cause the file to be changed from a basic empty file to an ecryptfs 148 * Cause the file to be changed from a basic empty file to an ecryptfs
@@ -181,7 +161,6 @@ static int ecryptfs_initialize_file(struct dentry *ecryptfs_dentry)
181 crypt_stat->flags &= ~(ECRYPTFS_ENCRYPTED); 161 crypt_stat->flags &= ~(ECRYPTFS_ENCRYPTED);
182 goto out; 162 goto out;
183 } 163 }
184 crypt_stat->flags |= ECRYPTFS_NEW_FILE;
185 ecryptfs_printk(KERN_DEBUG, "Initializing crypto context\n"); 164 ecryptfs_printk(KERN_DEBUG, "Initializing crypto context\n");
186 rc = ecryptfs_new_file_context(ecryptfs_dentry); 165 rc = ecryptfs_new_file_context(ecryptfs_dentry);
187 if (rc) { 166 if (rc) {
@@ -202,9 +181,6 @@ static int ecryptfs_initialize_file(struct dentry *ecryptfs_dentry)
202 printk(KERN_ERR "Error writing headers; rc = [%d]\n", rc); 181 printk(KERN_ERR "Error writing headers; rc = [%d]\n", rc);
203 goto out; 182 goto out;
204 } 183 }
205 rc = grow_file(ecryptfs_dentry);
206 if (rc)
207 printk(KERN_ERR "Error growing file; rc = [%d]\n", rc);
208out: 184out:
209 return rc; 185 return rc;
210} 186}
diff --git a/fs/ecryptfs/keystore.c b/fs/ecryptfs/keystore.c
index c1436cff6f2d..03e609c45012 100644
--- a/fs/ecryptfs/keystore.c
+++ b/fs/ecryptfs/keystore.c
@@ -65,6 +65,24 @@ static int process_request_key_err(long err_code)
65 return rc; 65 return rc;
66} 66}
67 67
68static int process_find_global_auth_tok_for_sig_err(int err_code)
69{
70 int rc = err_code;
71
72 switch (err_code) {
73 case -ENOENT:
74 ecryptfs_printk(KERN_WARNING, "Missing auth tok\n");
75 break;
76 case -EINVAL:
77 ecryptfs_printk(KERN_WARNING, "Invalid auth tok\n");
78 break;
79 default:
80 rc = process_request_key_err(err_code);
81 break;
82 }
83 return rc;
84}
85
68/** 86/**
69 * ecryptfs_parse_packet_length 87 * ecryptfs_parse_packet_length
70 * @data: Pointer to memory containing length at offset 88 * @data: Pointer to memory containing length at offset
@@ -403,27 +421,120 @@ out:
403 return rc; 421 return rc;
404} 422}
405 423
424/**
425 * ecryptfs_verify_version
426 * @version: The version number to confirm
427 *
428 * Returns zero on good version; non-zero otherwise
429 */
430static int ecryptfs_verify_version(u16 version)
431{
432 int rc = 0;
433 unsigned char major;
434 unsigned char minor;
435
436 major = ((version >> 8) & 0xFF);
437 minor = (version & 0xFF);
438 if (major != ECRYPTFS_VERSION_MAJOR) {
439 ecryptfs_printk(KERN_ERR, "Major version number mismatch. "
440 "Expected [%d]; got [%d]\n",
441 ECRYPTFS_VERSION_MAJOR, major);
442 rc = -EINVAL;
443 goto out;
444 }
445 if (minor != ECRYPTFS_VERSION_MINOR) {
446 ecryptfs_printk(KERN_ERR, "Minor version number mismatch. "
447 "Expected [%d]; got [%d]\n",
448 ECRYPTFS_VERSION_MINOR, minor);
449 rc = -EINVAL;
450 goto out;
451 }
452out:
453 return rc;
454}
455
456/**
457 * ecryptfs_verify_auth_tok_from_key
458 * @auth_tok_key: key containing the authentication token
459 * @auth_tok: authentication token
460 *
461 * Returns zero on valid auth tok; -EINVAL otherwise
462 */
463static int
464ecryptfs_verify_auth_tok_from_key(struct key *auth_tok_key,
465 struct ecryptfs_auth_tok **auth_tok)
466{
467 int rc = 0;
468
469 (*auth_tok) = ecryptfs_get_key_payload_data(auth_tok_key);
470 if (ecryptfs_verify_version((*auth_tok)->version)) {
471 printk(KERN_ERR "Data structure version mismatch. Userspace "
472 "tools must match eCryptfs kernel module with major "
473 "version [%d] and minor version [%d]\n",
474 ECRYPTFS_VERSION_MAJOR, ECRYPTFS_VERSION_MINOR);
475 rc = -EINVAL;
476 goto out;
477 }
478 if ((*auth_tok)->token_type != ECRYPTFS_PASSWORD
479 && (*auth_tok)->token_type != ECRYPTFS_PRIVATE_KEY) {
480 printk(KERN_ERR "Invalid auth_tok structure "
481 "returned from key query\n");
482 rc = -EINVAL;
483 goto out;
484 }
485out:
486 return rc;
487}
488
406static int 489static int
407ecryptfs_find_global_auth_tok_for_sig( 490ecryptfs_find_global_auth_tok_for_sig(
408 struct ecryptfs_global_auth_tok **global_auth_tok, 491 struct key **auth_tok_key,
492 struct ecryptfs_auth_tok **auth_tok,
409 struct ecryptfs_mount_crypt_stat *mount_crypt_stat, char *sig) 493 struct ecryptfs_mount_crypt_stat *mount_crypt_stat, char *sig)
410{ 494{
411 struct ecryptfs_global_auth_tok *walker; 495 struct ecryptfs_global_auth_tok *walker;
412 int rc = 0; 496 int rc = 0;
413 497
414 (*global_auth_tok) = NULL; 498 (*auth_tok_key) = NULL;
499 (*auth_tok) = NULL;
415 mutex_lock(&mount_crypt_stat->global_auth_tok_list_mutex); 500 mutex_lock(&mount_crypt_stat->global_auth_tok_list_mutex);
416 list_for_each_entry(walker, 501 list_for_each_entry(walker,
417 &mount_crypt_stat->global_auth_tok_list, 502 &mount_crypt_stat->global_auth_tok_list,
418 mount_crypt_stat_list) { 503 mount_crypt_stat_list) {
419 if (memcmp(walker->sig, sig, ECRYPTFS_SIG_SIZE_HEX) == 0) { 504 if (memcmp(walker->sig, sig, ECRYPTFS_SIG_SIZE_HEX))
420 rc = key_validate(walker->global_auth_tok_key); 505 continue;
421 if (!rc) 506
422 (*global_auth_tok) = walker; 507 if (walker->flags & ECRYPTFS_AUTH_TOK_INVALID) {
508 rc = -EINVAL;
423 goto out; 509 goto out;
424 } 510 }
511
512 rc = key_validate(walker->global_auth_tok_key);
513 if (rc) {
514 if (rc == -EKEYEXPIRED)
515 goto out;
516 goto out_invalid_auth_tok;
517 }
518
519 down_write(&(walker->global_auth_tok_key->sem));
520 rc = ecryptfs_verify_auth_tok_from_key(
521 walker->global_auth_tok_key, auth_tok);
522 if (rc)
523 goto out_invalid_auth_tok_unlock;
524
525 (*auth_tok_key) = walker->global_auth_tok_key;
526 key_get(*auth_tok_key);
527 goto out;
425 } 528 }
426 rc = -EINVAL; 529 rc = -ENOENT;
530 goto out;
531out_invalid_auth_tok_unlock:
532 up_write(&(walker->global_auth_tok_key->sem));
533out_invalid_auth_tok:
534 printk(KERN_WARNING "Invalidating auth tok with sig = [%s]\n", sig);
535 walker->flags |= ECRYPTFS_AUTH_TOK_INVALID;
536 key_put(walker->global_auth_tok_key);
537 walker->global_auth_tok_key = NULL;
427out: 538out:
428 mutex_unlock(&mount_crypt_stat->global_auth_tok_list_mutex); 539 mutex_unlock(&mount_crypt_stat->global_auth_tok_list_mutex);
429 return rc; 540 return rc;
@@ -451,14 +562,11 @@ ecryptfs_find_auth_tok_for_sig(
451 struct ecryptfs_mount_crypt_stat *mount_crypt_stat, 562 struct ecryptfs_mount_crypt_stat *mount_crypt_stat,
452 char *sig) 563 char *sig)
453{ 564{
454 struct ecryptfs_global_auth_tok *global_auth_tok;
455 int rc = 0; 565 int rc = 0;
456 566
457 (*auth_tok_key) = NULL; 567 rc = ecryptfs_find_global_auth_tok_for_sig(auth_tok_key, auth_tok,
458 (*auth_tok) = NULL; 568 mount_crypt_stat, sig);
459 if (ecryptfs_find_global_auth_tok_for_sig(&global_auth_tok, 569 if (rc == -ENOENT) {
460 mount_crypt_stat, sig)) {
461
462 /* if the flag ECRYPTFS_GLOBAL_MOUNT_AUTH_TOK_ONLY is set in the 570 /* if the flag ECRYPTFS_GLOBAL_MOUNT_AUTH_TOK_ONLY is set in the
463 * mount_crypt_stat structure, we prevent to use auth toks that 571 * mount_crypt_stat structure, we prevent to use auth toks that
464 * are not inserted through the ecryptfs_add_global_auth_tok 572 * are not inserted through the ecryptfs_add_global_auth_tok
@@ -470,8 +578,7 @@ ecryptfs_find_auth_tok_for_sig(
470 578
471 rc = ecryptfs_keyring_auth_tok_for_sig(auth_tok_key, auth_tok, 579 rc = ecryptfs_keyring_auth_tok_for_sig(auth_tok_key, auth_tok,
472 sig); 580 sig);
473 } else 581 }
474 (*auth_tok) = global_auth_tok->global_auth_tok;
475 return rc; 582 return rc;
476} 583}
477 584
@@ -531,6 +638,16 @@ ecryptfs_write_tag_70_packet(char *dest, size_t *remaining_bytes,
531 } 638 }
532 s->desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP; 639 s->desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP;
533 (*packet_size) = 0; 640 (*packet_size) = 0;
641 rc = ecryptfs_find_auth_tok_for_sig(
642 &auth_tok_key,
643 &s->auth_tok, mount_crypt_stat,
644 mount_crypt_stat->global_default_fnek_sig);
645 if (rc) {
646 printk(KERN_ERR "%s: Error attempting to find auth tok for "
647 "fnek sig [%s]; rc = [%d]\n", __func__,
648 mount_crypt_stat->global_default_fnek_sig, rc);
649 goto out;
650 }
534 rc = ecryptfs_get_tfm_and_mutex_for_cipher_name( 651 rc = ecryptfs_get_tfm_and_mutex_for_cipher_name(
535 &s->desc.tfm, 652 &s->desc.tfm,
536 &s->tfm_mutex, mount_crypt_stat->global_default_fn_cipher_name); 653 &s->tfm_mutex, mount_crypt_stat->global_default_fn_cipher_name);
@@ -616,16 +733,6 @@ ecryptfs_write_tag_70_packet(char *dest, size_t *remaining_bytes,
616 goto out_free_unlock; 733 goto out_free_unlock;
617 } 734 }
618 dest[s->i++] = s->cipher_code; 735 dest[s->i++] = s->cipher_code;
619 rc = ecryptfs_find_auth_tok_for_sig(
620 &auth_tok_key,
621 &s->auth_tok, mount_crypt_stat,
622 mount_crypt_stat->global_default_fnek_sig);
623 if (rc) {
624 printk(KERN_ERR "%s: Error attempting to find auth tok for "
625 "fnek sig [%s]; rc = [%d]\n", __func__,
626 mount_crypt_stat->global_default_fnek_sig, rc);
627 goto out_free_unlock;
628 }
629 /* TODO: Support other key modules than passphrase for 736 /* TODO: Support other key modules than passphrase for
630 * filename encryption */ 737 * filename encryption */
631 if (s->auth_tok->token_type != ECRYPTFS_PASSWORD) { 738 if (s->auth_tok->token_type != ECRYPTFS_PASSWORD) {
@@ -765,8 +872,10 @@ out_free_unlock:
765out_unlock: 872out_unlock:
766 mutex_unlock(s->tfm_mutex); 873 mutex_unlock(s->tfm_mutex);
767out: 874out:
768 if (auth_tok_key) 875 if (auth_tok_key) {
876 up_write(&(auth_tok_key->sem));
769 key_put(auth_tok_key); 877 key_put(auth_tok_key);
878 }
770 kfree(s); 879 kfree(s);
771 return rc; 880 return rc;
772} 881}
@@ -879,6 +988,15 @@ ecryptfs_parse_tag_70_packet(char **filename, size_t *filename_size,
879 __func__, s->cipher_code); 988 __func__, s->cipher_code);
880 goto out; 989 goto out;
881 } 990 }
991 rc = ecryptfs_find_auth_tok_for_sig(&auth_tok_key,
992 &s->auth_tok, mount_crypt_stat,
993 s->fnek_sig_hex);
994 if (rc) {
995 printk(KERN_ERR "%s: Error attempting to find auth tok for "
996 "fnek sig [%s]; rc = [%d]\n", __func__, s->fnek_sig_hex,
997 rc);
998 goto out;
999 }
882 rc = ecryptfs_get_tfm_and_mutex_for_cipher_name(&s->desc.tfm, 1000 rc = ecryptfs_get_tfm_and_mutex_for_cipher_name(&s->desc.tfm,
883 &s->tfm_mutex, 1001 &s->tfm_mutex,
884 s->cipher_string); 1002 s->cipher_string);
@@ -925,15 +1043,6 @@ ecryptfs_parse_tag_70_packet(char **filename, size_t *filename_size,
925 * >= ECRYPTFS_MAX_IV_BYTES. */ 1043 * >= ECRYPTFS_MAX_IV_BYTES. */
926 memset(s->iv, 0, ECRYPTFS_MAX_IV_BYTES); 1044 memset(s->iv, 0, ECRYPTFS_MAX_IV_BYTES);
927 s->desc.info = s->iv; 1045 s->desc.info = s->iv;
928 rc = ecryptfs_find_auth_tok_for_sig(&auth_tok_key,
929 &s->auth_tok, mount_crypt_stat,
930 s->fnek_sig_hex);
931 if (rc) {
932 printk(KERN_ERR "%s: Error attempting to find auth tok for "
933 "fnek sig [%s]; rc = [%d]\n", __func__, s->fnek_sig_hex,
934 rc);
935 goto out_free_unlock;
936 }
937 /* TODO: Support other key modules than passphrase for 1046 /* TODO: Support other key modules than passphrase for
938 * filename encryption */ 1047 * filename encryption */
939 if (s->auth_tok->token_type != ECRYPTFS_PASSWORD) { 1048 if (s->auth_tok->token_type != ECRYPTFS_PASSWORD) {
@@ -1002,8 +1111,10 @@ out:
1002 (*filename_size) = 0; 1111 (*filename_size) = 0;
1003 (*filename) = NULL; 1112 (*filename) = NULL;
1004 } 1113 }
1005 if (auth_tok_key) 1114 if (auth_tok_key) {
1115 up_write(&(auth_tok_key->sem));
1006 key_put(auth_tok_key); 1116 key_put(auth_tok_key);
1117 }
1007 kfree(s); 1118 kfree(s);
1008 return rc; 1119 return rc;
1009} 1120}
@@ -1520,38 +1631,6 @@ out:
1520 return rc; 1631 return rc;
1521} 1632}
1522 1633
1523/**
1524 * ecryptfs_verify_version
1525 * @version: The version number to confirm
1526 *
1527 * Returns zero on good version; non-zero otherwise
1528 */
1529static int ecryptfs_verify_version(u16 version)
1530{
1531 int rc = 0;
1532 unsigned char major;
1533 unsigned char minor;
1534
1535 major = ((version >> 8) & 0xFF);
1536 minor = (version & 0xFF);
1537 if (major != ECRYPTFS_VERSION_MAJOR) {
1538 ecryptfs_printk(KERN_ERR, "Major version number mismatch. "
1539 "Expected [%d]; got [%d]\n",
1540 ECRYPTFS_VERSION_MAJOR, major);
1541 rc = -EINVAL;
1542 goto out;
1543 }
1544 if (minor != ECRYPTFS_VERSION_MINOR) {
1545 ecryptfs_printk(KERN_ERR, "Minor version number mismatch. "
1546 "Expected [%d]; got [%d]\n",
1547 ECRYPTFS_VERSION_MINOR, minor);
1548 rc = -EINVAL;
1549 goto out;
1550 }
1551out:
1552 return rc;
1553}
1554
1555int ecryptfs_keyring_auth_tok_for_sig(struct key **auth_tok_key, 1634int ecryptfs_keyring_auth_tok_for_sig(struct key **auth_tok_key,
1556 struct ecryptfs_auth_tok **auth_tok, 1635 struct ecryptfs_auth_tok **auth_tok,
1557 char *sig) 1636 char *sig)
@@ -1563,31 +1642,16 @@ int ecryptfs_keyring_auth_tok_for_sig(struct key **auth_tok_key,
1563 printk(KERN_ERR "Could not find key with description: [%s]\n", 1642 printk(KERN_ERR "Could not find key with description: [%s]\n",
1564 sig); 1643 sig);
1565 rc = process_request_key_err(PTR_ERR(*auth_tok_key)); 1644 rc = process_request_key_err(PTR_ERR(*auth_tok_key));
1645 (*auth_tok_key) = NULL;
1566 goto out; 1646 goto out;
1567 } 1647 }
1568 (*auth_tok) = ecryptfs_get_key_payload_data(*auth_tok_key); 1648 down_write(&(*auth_tok_key)->sem);
1569 if (ecryptfs_verify_version((*auth_tok)->version)) { 1649 rc = ecryptfs_verify_auth_tok_from_key(*auth_tok_key, auth_tok);
1570 printk(KERN_ERR
1571 "Data structure version mismatch. "
1572 "Userspace tools must match eCryptfs "
1573 "kernel module with major version [%d] "
1574 "and minor version [%d]\n",
1575 ECRYPTFS_VERSION_MAJOR,
1576 ECRYPTFS_VERSION_MINOR);
1577 rc = -EINVAL;
1578 goto out_release_key;
1579 }
1580 if ((*auth_tok)->token_type != ECRYPTFS_PASSWORD
1581 && (*auth_tok)->token_type != ECRYPTFS_PRIVATE_KEY) {
1582 printk(KERN_ERR "Invalid auth_tok structure "
1583 "returned from key query\n");
1584 rc = -EINVAL;
1585 goto out_release_key;
1586 }
1587out_release_key:
1588 if (rc) { 1650 if (rc) {
1651 up_write(&(*auth_tok_key)->sem);
1589 key_put(*auth_tok_key); 1652 key_put(*auth_tok_key);
1590 (*auth_tok_key) = NULL; 1653 (*auth_tok_key) = NULL;
1654 goto out;
1591 } 1655 }
1592out: 1656out:
1593 return rc; 1657 return rc;
@@ -1809,6 +1873,7 @@ int ecryptfs_parse_packet_set(struct ecryptfs_crypt_stat *crypt_stat,
1809find_next_matching_auth_tok: 1873find_next_matching_auth_tok:
1810 found_auth_tok = 0; 1874 found_auth_tok = 0;
1811 if (auth_tok_key) { 1875 if (auth_tok_key) {
1876 up_write(&(auth_tok_key->sem));
1812 key_put(auth_tok_key); 1877 key_put(auth_tok_key);
1813 auth_tok_key = NULL; 1878 auth_tok_key = NULL;
1814 } 1879 }
@@ -1895,8 +1960,10 @@ found_matching_auth_tok:
1895out_wipe_list: 1960out_wipe_list:
1896 wipe_auth_tok_list(&auth_tok_list); 1961 wipe_auth_tok_list(&auth_tok_list);
1897out: 1962out:
1898 if (auth_tok_key) 1963 if (auth_tok_key) {
1964 up_write(&(auth_tok_key->sem));
1899 key_put(auth_tok_key); 1965 key_put(auth_tok_key);
1966 }
1900 return rc; 1967 return rc;
1901} 1968}
1902 1969
@@ -2324,7 +2391,7 @@ ecryptfs_generate_key_packet_set(char *dest_base,
2324 size_t max) 2391 size_t max)
2325{ 2392{
2326 struct ecryptfs_auth_tok *auth_tok; 2393 struct ecryptfs_auth_tok *auth_tok;
2327 struct ecryptfs_global_auth_tok *global_auth_tok; 2394 struct key *auth_tok_key = NULL;
2328 struct ecryptfs_mount_crypt_stat *mount_crypt_stat = 2395 struct ecryptfs_mount_crypt_stat *mount_crypt_stat =
2329 &ecryptfs_superblock_to_private( 2396 &ecryptfs_superblock_to_private(
2330 ecryptfs_dentry->d_sb)->mount_crypt_stat; 2397 ecryptfs_dentry->d_sb)->mount_crypt_stat;
@@ -2343,21 +2410,16 @@ ecryptfs_generate_key_packet_set(char *dest_base,
2343 list_for_each_entry(key_sig, &crypt_stat->keysig_list, 2410 list_for_each_entry(key_sig, &crypt_stat->keysig_list,
2344 crypt_stat_list) { 2411 crypt_stat_list) {
2345 memset(key_rec, 0, sizeof(*key_rec)); 2412 memset(key_rec, 0, sizeof(*key_rec));
2346 rc = ecryptfs_find_global_auth_tok_for_sig(&global_auth_tok, 2413 rc = ecryptfs_find_global_auth_tok_for_sig(&auth_tok_key,
2414 &auth_tok,
2347 mount_crypt_stat, 2415 mount_crypt_stat,
2348 key_sig->keysig); 2416 key_sig->keysig);
2349 if (rc) { 2417 if (rc) {
2350 printk(KERN_ERR "Error attempting to get the global " 2418 printk(KERN_WARNING "Unable to retrieve auth tok with "
2351 "auth_tok; rc = [%d]\n", rc); 2419 "sig = [%s]\n", key_sig->keysig);
2420 rc = process_find_global_auth_tok_for_sig_err(rc);
2352 goto out_free; 2421 goto out_free;
2353 } 2422 }
2354 if (global_auth_tok->flags & ECRYPTFS_AUTH_TOK_INVALID) {
2355 printk(KERN_WARNING
2356 "Skipping invalid auth tok with sig = [%s]\n",
2357 global_auth_tok->sig);
2358 continue;
2359 }
2360 auth_tok = global_auth_tok->global_auth_tok;
2361 if (auth_tok->token_type == ECRYPTFS_PASSWORD) { 2423 if (auth_tok->token_type == ECRYPTFS_PASSWORD) {
2362 rc = write_tag_3_packet((dest_base + (*len)), 2424 rc = write_tag_3_packet((dest_base + (*len)),
2363 &max, auth_tok, 2425 &max, auth_tok,
@@ -2395,6 +2457,9 @@ ecryptfs_generate_key_packet_set(char *dest_base,
2395 rc = -EINVAL; 2457 rc = -EINVAL;
2396 goto out_free; 2458 goto out_free;
2397 } 2459 }
2460 up_write(&(auth_tok_key->sem));
2461 key_put(auth_tok_key);
2462 auth_tok_key = NULL;
2398 } 2463 }
2399 if (likely(max > 0)) { 2464 if (likely(max > 0)) {
2400 dest_base[(*len)] = 0x00; 2465 dest_base[(*len)] = 0x00;
@@ -2407,6 +2472,11 @@ out_free:
2407out: 2472out:
2408 if (rc) 2473 if (rc)
2409 (*len) = 0; 2474 (*len) = 0;
2475 if (auth_tok_key) {
2476 up_write(&(auth_tok_key->sem));
2477 key_put(auth_tok_key);
2478 }
2479
2410 mutex_unlock(&crypt_stat->keysig_list_mutex); 2480 mutex_unlock(&crypt_stat->keysig_list_mutex);
2411 return rc; 2481 return rc;
2412} 2482}
@@ -2424,6 +2494,7 @@ int ecryptfs_add_keysig(struct ecryptfs_crypt_stat *crypt_stat, char *sig)
2424 return -ENOMEM; 2494 return -ENOMEM;
2425 } 2495 }
2426 memcpy(new_key_sig->keysig, sig, ECRYPTFS_SIG_SIZE_HEX); 2496 memcpy(new_key_sig->keysig, sig, ECRYPTFS_SIG_SIZE_HEX);
2497 new_key_sig->keysig[ECRYPTFS_SIG_SIZE_HEX] = '\0';
2427 /* Caller must hold keysig_list_mutex */ 2498 /* Caller must hold keysig_list_mutex */
2428 list_add(&new_key_sig->crypt_stat_list, &crypt_stat->keysig_list); 2499 list_add(&new_key_sig->crypt_stat_list, &crypt_stat->keysig_list);
2429 2500
@@ -2453,7 +2524,6 @@ ecryptfs_add_global_auth_tok(struct ecryptfs_mount_crypt_stat *mount_crypt_stat,
2453 mutex_lock(&mount_crypt_stat->global_auth_tok_list_mutex); 2524 mutex_lock(&mount_crypt_stat->global_auth_tok_list_mutex);
2454 list_add(&new_auth_tok->mount_crypt_stat_list, 2525 list_add(&new_auth_tok->mount_crypt_stat_list,
2455 &mount_crypt_stat->global_auth_tok_list); 2526 &mount_crypt_stat->global_auth_tok_list);
2456 mount_crypt_stat->num_global_auth_toks++;
2457 mutex_unlock(&mount_crypt_stat->global_auth_tok_list_mutex); 2527 mutex_unlock(&mount_crypt_stat->global_auth_tok_list_mutex);
2458out: 2528out:
2459 return rc; 2529 return rc;
diff --git a/fs/ecryptfs/main.c b/fs/ecryptfs/main.c
index 758323a0f09a..c27c0ecf90bc 100644
--- a/fs/ecryptfs/main.c
+++ b/fs/ecryptfs/main.c
@@ -122,7 +122,6 @@ int ecryptfs_init_persistent_file(struct dentry *ecryptfs_dentry)
122 ecryptfs_inode_to_private(ecryptfs_dentry->d_inode); 122 ecryptfs_inode_to_private(ecryptfs_dentry->d_inode);
123 int rc = 0; 123 int rc = 0;
124 124
125 mutex_lock(&inode_info->lower_file_mutex);
126 if (!inode_info->lower_file) { 125 if (!inode_info->lower_file) {
127 struct dentry *lower_dentry; 126 struct dentry *lower_dentry;
128 struct vfsmount *lower_mnt = 127 struct vfsmount *lower_mnt =
@@ -138,7 +137,6 @@ int ecryptfs_init_persistent_file(struct dentry *ecryptfs_dentry)
138 inode_info->lower_file = NULL; 137 inode_info->lower_file = NULL;
139 } 138 }
140 } 139 }
141 mutex_unlock(&inode_info->lower_file_mutex);
142 return rc; 140 return rc;
143} 141}
144 142
@@ -241,14 +239,14 @@ static int ecryptfs_init_global_auth_toks(
241 struct ecryptfs_mount_crypt_stat *mount_crypt_stat) 239 struct ecryptfs_mount_crypt_stat *mount_crypt_stat)
242{ 240{
243 struct ecryptfs_global_auth_tok *global_auth_tok; 241 struct ecryptfs_global_auth_tok *global_auth_tok;
242 struct ecryptfs_auth_tok *auth_tok;
244 int rc = 0; 243 int rc = 0;
245 244
246 list_for_each_entry(global_auth_tok, 245 list_for_each_entry(global_auth_tok,
247 &mount_crypt_stat->global_auth_tok_list, 246 &mount_crypt_stat->global_auth_tok_list,
248 mount_crypt_stat_list) { 247 mount_crypt_stat_list) {
249 rc = ecryptfs_keyring_auth_tok_for_sig( 248 rc = ecryptfs_keyring_auth_tok_for_sig(
250 &global_auth_tok->global_auth_tok_key, 249 &global_auth_tok->global_auth_tok_key, &auth_tok,
251 &global_auth_tok->global_auth_tok,
252 global_auth_tok->sig); 250 global_auth_tok->sig);
253 if (rc) { 251 if (rc) {
254 printk(KERN_ERR "Could not find valid key in user " 252 printk(KERN_ERR "Could not find valid key in user "
@@ -256,8 +254,10 @@ static int ecryptfs_init_global_auth_toks(
256 "option: [%s]\n", global_auth_tok->sig); 254 "option: [%s]\n", global_auth_tok->sig);
257 global_auth_tok->flags |= ECRYPTFS_AUTH_TOK_INVALID; 255 global_auth_tok->flags |= ECRYPTFS_AUTH_TOK_INVALID;
258 goto out; 256 goto out;
259 } else 257 } else {
260 global_auth_tok->flags &= ~ECRYPTFS_AUTH_TOK_INVALID; 258 global_auth_tok->flags &= ~ECRYPTFS_AUTH_TOK_INVALID;
259 up_write(&(global_auth_tok->global_auth_tok_key)->sem);
260 }
261 } 261 }
262out: 262out:
263 return rc; 263 return rc;
diff --git a/fs/ecryptfs/mmap.c b/fs/ecryptfs/mmap.c
index cc64fca89f8d..6a44148c5fb9 100644
--- a/fs/ecryptfs/mmap.c
+++ b/fs/ecryptfs/mmap.c
@@ -62,6 +62,18 @@ static int ecryptfs_writepage(struct page *page, struct writeback_control *wbc)
62{ 62{
63 int rc; 63 int rc;
64 64
65 /*
66 * Refuse to write the page out if we are called from reclaim context
67 * since our writepage() path may potentially allocate memory when
68 * calling into the lower fs vfs_write() which may in turn invoke
69 * us again.
70 */
71 if (current->flags & PF_MEMALLOC) {
72 redirty_page_for_writepage(wbc, page);
73 rc = 0;
74 goto out;
75 }
76
65 rc = ecryptfs_encrypt_page(page); 77 rc = ecryptfs_encrypt_page(page);
66 if (rc) { 78 if (rc) {
67 ecryptfs_printk(KERN_WARNING, "Error encrypting " 79 ecryptfs_printk(KERN_WARNING, "Error encrypting "
@@ -70,8 +82,8 @@ static int ecryptfs_writepage(struct page *page, struct writeback_control *wbc)
70 goto out; 82 goto out;
71 } 83 }
72 SetPageUptodate(page); 84 SetPageUptodate(page);
73 unlock_page(page);
74out: 85out:
86 unlock_page(page);
75 return rc; 87 return rc;
76} 88}
77 89
@@ -193,11 +205,7 @@ static int ecryptfs_readpage(struct file *file, struct page *page)
193 &ecryptfs_inode_to_private(page->mapping->host)->crypt_stat; 205 &ecryptfs_inode_to_private(page->mapping->host)->crypt_stat;
194 int rc = 0; 206 int rc = 0;
195 207
196 if (!crypt_stat 208 if (!crypt_stat || !(crypt_stat->flags & ECRYPTFS_ENCRYPTED)) {
197 || !(crypt_stat->flags & ECRYPTFS_ENCRYPTED)
198 || (crypt_stat->flags & ECRYPTFS_NEW_FILE)) {
199 ecryptfs_printk(KERN_DEBUG,
200 "Passing through unencrypted page\n");
201 rc = ecryptfs_read_lower_page_segment(page, page->index, 0, 209 rc = ecryptfs_read_lower_page_segment(page, page->index, 0,
202 PAGE_CACHE_SIZE, 210 PAGE_CACHE_SIZE,
203 page->mapping->host); 211 page->mapping->host);
@@ -295,8 +303,7 @@ static int ecryptfs_write_begin(struct file *file,
295 struct ecryptfs_crypt_stat *crypt_stat = 303 struct ecryptfs_crypt_stat *crypt_stat =
296 &ecryptfs_inode_to_private(mapping->host)->crypt_stat; 304 &ecryptfs_inode_to_private(mapping->host)->crypt_stat;
297 305
298 if (!(crypt_stat->flags & ECRYPTFS_ENCRYPTED) 306 if (!(crypt_stat->flags & ECRYPTFS_ENCRYPTED)) {
299 || (crypt_stat->flags & ECRYPTFS_NEW_FILE)) {
300 rc = ecryptfs_read_lower_page_segment( 307 rc = ecryptfs_read_lower_page_segment(
301 page, index, 0, PAGE_CACHE_SIZE, mapping->host); 308 page, index, 0, PAGE_CACHE_SIZE, mapping->host);
302 if (rc) { 309 if (rc) {
@@ -374,6 +381,11 @@ static int ecryptfs_write_begin(struct file *file,
374 && (pos != 0)) 381 && (pos != 0))
375 zero_user(page, 0, PAGE_CACHE_SIZE); 382 zero_user(page, 0, PAGE_CACHE_SIZE);
376out: 383out:
384 if (unlikely(rc)) {
385 unlock_page(page);
386 page_cache_release(page);
387 *pagep = NULL;
388 }
377 return rc; 389 return rc;
378} 390}
379 391
@@ -486,13 +498,8 @@ static int ecryptfs_write_end(struct file *file,
486 struct ecryptfs_crypt_stat *crypt_stat = 498 struct ecryptfs_crypt_stat *crypt_stat =
487 &ecryptfs_inode_to_private(ecryptfs_inode)->crypt_stat; 499 &ecryptfs_inode_to_private(ecryptfs_inode)->crypt_stat;
488 int rc; 500 int rc;
501 int need_unlock_page = 1;
489 502
490 if (crypt_stat->flags & ECRYPTFS_NEW_FILE) {
491 ecryptfs_printk(KERN_DEBUG, "ECRYPTFS_NEW_FILE flag set in "
492 "crypt_stat at memory location [%p]\n", crypt_stat);
493 crypt_stat->flags &= ~(ECRYPTFS_NEW_FILE);
494 } else
495 ecryptfs_printk(KERN_DEBUG, "Not a new file\n");
496 ecryptfs_printk(KERN_DEBUG, "Calling fill_zeros_to_end_of_page" 503 ecryptfs_printk(KERN_DEBUG, "Calling fill_zeros_to_end_of_page"
497 "(page w/ index = [0x%.16lx], to = [%d])\n", index, to); 504 "(page w/ index = [0x%.16lx], to = [%d])\n", index, to);
498 if (!(crypt_stat->flags & ECRYPTFS_ENCRYPTED)) { 505 if (!(crypt_stat->flags & ECRYPTFS_ENCRYPTED)) {
@@ -512,26 +519,26 @@ static int ecryptfs_write_end(struct file *file,
512 "zeros in page with index = [0x%.16lx]\n", index); 519 "zeros in page with index = [0x%.16lx]\n", index);
513 goto out; 520 goto out;
514 } 521 }
515 rc = ecryptfs_encrypt_page(page); 522 set_page_dirty(page);
516 if (rc) { 523 unlock_page(page);
517 ecryptfs_printk(KERN_WARNING, "Error encrypting page (upper " 524 need_unlock_page = 0;
518 "index [0x%.16lx])\n", index);
519 goto out;
520 }
521 if (pos + copied > i_size_read(ecryptfs_inode)) { 525 if (pos + copied > i_size_read(ecryptfs_inode)) {
522 i_size_write(ecryptfs_inode, pos + copied); 526 i_size_write(ecryptfs_inode, pos + copied);
523 ecryptfs_printk(KERN_DEBUG, "Expanded file size to " 527 ecryptfs_printk(KERN_DEBUG, "Expanded file size to "
524 "[0x%.16llx]\n", 528 "[0x%.16llx]\n",
525 (unsigned long long)i_size_read(ecryptfs_inode)); 529 (unsigned long long)i_size_read(ecryptfs_inode));
530 balance_dirty_pages_ratelimited(mapping);
531 rc = ecryptfs_write_inode_size_to_metadata(ecryptfs_inode);
532 if (rc) {
533 printk(KERN_ERR "Error writing inode size to metadata; "
534 "rc = [%d]\n", rc);
535 goto out;
536 }
526 } 537 }
527 rc = ecryptfs_write_inode_size_to_metadata(ecryptfs_inode); 538 rc = copied;
528 if (rc)
529 printk(KERN_ERR "Error writing inode size to metadata; "
530 "rc = [%d]\n", rc);
531 else
532 rc = copied;
533out: 539out:
534 unlock_page(page); 540 if (need_unlock_page)
541 unlock_page(page);
535 page_cache_release(page); 542 page_cache_release(page);
536 return rc; 543 return rc;
537} 544}
diff --git a/fs/ecryptfs/read_write.c b/fs/ecryptfs/read_write.c
index db184ef15d3d..85d430963116 100644
--- a/fs/ecryptfs/read_write.c
+++ b/fs/ecryptfs/read_write.c
@@ -44,15 +44,11 @@ int ecryptfs_write_lower(struct inode *ecryptfs_inode, char *data,
44 ssize_t rc; 44 ssize_t rc;
45 45
46 inode_info = ecryptfs_inode_to_private(ecryptfs_inode); 46 inode_info = ecryptfs_inode_to_private(ecryptfs_inode);
47 mutex_lock(&inode_info->lower_file_mutex);
48 BUG_ON(!inode_info->lower_file); 47 BUG_ON(!inode_info->lower_file);
49 inode_info->lower_file->f_pos = offset;
50 fs_save = get_fs(); 48 fs_save = get_fs();
51 set_fs(get_ds()); 49 set_fs(get_ds());
52 rc = vfs_write(inode_info->lower_file, data, size, 50 rc = vfs_write(inode_info->lower_file, data, size, &offset);
53 &inode_info->lower_file->f_pos);
54 set_fs(fs_save); 51 set_fs(fs_save);
55 mutex_unlock(&inode_info->lower_file_mutex);
56 mark_inode_dirty_sync(ecryptfs_inode); 52 mark_inode_dirty_sync(ecryptfs_inode);
57 return rc; 53 return rc;
58} 54}
@@ -234,15 +230,11 @@ int ecryptfs_read_lower(char *data, loff_t offset, size_t size,
234 mm_segment_t fs_save; 230 mm_segment_t fs_save;
235 ssize_t rc; 231 ssize_t rc;
236 232
237 mutex_lock(&inode_info->lower_file_mutex);
238 BUG_ON(!inode_info->lower_file); 233 BUG_ON(!inode_info->lower_file);
239 inode_info->lower_file->f_pos = offset;
240 fs_save = get_fs(); 234 fs_save = get_fs();
241 set_fs(get_ds()); 235 set_fs(get_ds());
242 rc = vfs_read(inode_info->lower_file, data, size, 236 rc = vfs_read(inode_info->lower_file, data, size, &offset);
243 &inode_info->lower_file->f_pos);
244 set_fs(fs_save); 237 set_fs(fs_save);
245 mutex_unlock(&inode_info->lower_file_mutex);
246 return rc; 238 return rc;
247} 239}
248 240
diff --git a/fs/ecryptfs/super.c b/fs/ecryptfs/super.c
index 3042fe123a34..bacc882e1ae4 100644
--- a/fs/ecryptfs/super.c
+++ b/fs/ecryptfs/super.c
@@ -55,7 +55,6 @@ static struct inode *ecryptfs_alloc_inode(struct super_block *sb)
55 if (unlikely(!inode_info)) 55 if (unlikely(!inode_info))
56 goto out; 56 goto out;
57 ecryptfs_init_crypt_stat(&inode_info->crypt_stat); 57 ecryptfs_init_crypt_stat(&inode_info->crypt_stat);
58 mutex_init(&inode_info->lower_file_mutex);
59 inode_info->lower_file = NULL; 58 inode_info->lower_file = NULL;
60 inode = &inode_info->vfs_inode; 59 inode = &inode_info->vfs_inode;
61out: 60out:
@@ -198,7 +197,7 @@ static int ecryptfs_show_options(struct seq_file *m, struct vfsmount *mnt)
198const struct super_operations ecryptfs_sops = { 197const struct super_operations ecryptfs_sops = {
199 .alloc_inode = ecryptfs_alloc_inode, 198 .alloc_inode = ecryptfs_alloc_inode,
200 .destroy_inode = ecryptfs_destroy_inode, 199 .destroy_inode = ecryptfs_destroy_inode,
201 .drop_inode = generic_delete_inode, 200 .drop_inode = generic_drop_inode,
202 .statfs = ecryptfs_statfs, 201 .statfs = ecryptfs_statfs,
203 .remount_fs = NULL, 202 .remount_fs = NULL,
204 .evict_inode = ecryptfs_evict_inode, 203 .evict_inode = ecryptfs_evict_inode,
diff --git a/fs/ocfs2/acl.c b/fs/ocfs2/acl.c
index 90f2729b7a5b..e913ad130fdd 100644
--- a/fs/ocfs2/acl.c
+++ b/fs/ocfs2/acl.c
@@ -24,7 +24,6 @@
24#include <linux/slab.h> 24#include <linux/slab.h>
25#include <linux/string.h> 25#include <linux/string.h>
26 26
27#define MLOG_MASK_PREFIX ML_INODE
28#include <cluster/masklog.h> 27#include <cluster/masklog.h>
29 28
30#include "ocfs2.h" 29#include "ocfs2.h"
diff --git a/fs/ocfs2/alloc.c b/fs/ocfs2/alloc.c
index e4984e259cb6..b27a0d86f8c5 100644
--- a/fs/ocfs2/alloc.c
+++ b/fs/ocfs2/alloc.c
@@ -30,7 +30,6 @@
30#include <linux/swap.h> 30#include <linux/swap.h>
31#include <linux/quotaops.h> 31#include <linux/quotaops.h>
32 32
33#define MLOG_MASK_PREFIX ML_DISK_ALLOC
34#include <cluster/masklog.h> 33#include <cluster/masklog.h>
35 34
36#include "ocfs2.h" 35#include "ocfs2.h"
@@ -50,6 +49,7 @@
50#include "uptodate.h" 49#include "uptodate.h"
51#include "xattr.h" 50#include "xattr.h"
52#include "refcounttree.h" 51#include "refcounttree.h"
52#include "ocfs2_trace.h"
53 53
54#include "buffer_head_io.h" 54#include "buffer_head_io.h"
55 55
@@ -886,8 +886,7 @@ static int ocfs2_validate_extent_block(struct super_block *sb,
886 struct ocfs2_extent_block *eb = 886 struct ocfs2_extent_block *eb =
887 (struct ocfs2_extent_block *)bh->b_data; 887 (struct ocfs2_extent_block *)bh->b_data;
888 888
889 mlog(0, "Validating extent block %llu\n", 889 trace_ocfs2_validate_extent_block((unsigned long long)bh->b_blocknr);
890 (unsigned long long)bh->b_blocknr);
891 890
892 BUG_ON(!buffer_uptodate(bh)); 891 BUG_ON(!buffer_uptodate(bh));
893 892
@@ -965,8 +964,6 @@ int ocfs2_num_free_extents(struct ocfs2_super *osb,
965 struct buffer_head *eb_bh = NULL; 964 struct buffer_head *eb_bh = NULL;
966 u64 last_eb_blk = 0; 965 u64 last_eb_blk = 0;
967 966
968 mlog_entry_void();
969
970 el = et->et_root_el; 967 el = et->et_root_el;
971 last_eb_blk = ocfs2_et_get_last_eb_blk(et); 968 last_eb_blk = ocfs2_et_get_last_eb_blk(et);
972 969
@@ -987,7 +984,7 @@ int ocfs2_num_free_extents(struct ocfs2_super *osb,
987bail: 984bail:
988 brelse(eb_bh); 985 brelse(eb_bh);
989 986
990 mlog_exit(retval); 987 trace_ocfs2_num_free_extents(retval);
991 return retval; 988 return retval;
992} 989}
993 990
@@ -1010,8 +1007,6 @@ static int ocfs2_create_new_meta_bhs(handle_t *handle,
1010 OCFS2_SB(ocfs2_metadata_cache_get_super(et->et_ci)); 1007 OCFS2_SB(ocfs2_metadata_cache_get_super(et->et_ci));
1011 struct ocfs2_extent_block *eb; 1008 struct ocfs2_extent_block *eb;
1012 1009
1013 mlog_entry_void();
1014
1015 count = 0; 1010 count = 0;
1016 while (count < wanted) { 1011 while (count < wanted) {
1017 status = ocfs2_claim_metadata(handle, 1012 status = ocfs2_claim_metadata(handle,
@@ -1074,8 +1069,8 @@ bail:
1074 brelse(bhs[i]); 1069 brelse(bhs[i]);
1075 bhs[i] = NULL; 1070 bhs[i] = NULL;
1076 } 1071 }
1072 mlog_errno(status);
1077 } 1073 }
1078 mlog_exit(status);
1079 return status; 1074 return status;
1080} 1075}
1081 1076
@@ -1173,8 +1168,6 @@ static int ocfs2_add_branch(handle_t *handle,
1173 struct ocfs2_extent_list *el; 1168 struct ocfs2_extent_list *el;
1174 u32 new_cpos, root_end; 1169 u32 new_cpos, root_end;
1175 1170
1176 mlog_entry_void();
1177
1178 BUG_ON(!last_eb_bh || !*last_eb_bh); 1171 BUG_ON(!last_eb_bh || !*last_eb_bh);
1179 1172
1180 if (eb_bh) { 1173 if (eb_bh) {
@@ -1200,8 +1193,11 @@ static int ocfs2_add_branch(handle_t *handle,
1200 * from new_cpos). 1193 * from new_cpos).
1201 */ 1194 */
1202 if (root_end > new_cpos) { 1195 if (root_end > new_cpos) {
1203 mlog(0, "adjust the cluster end from %u to %u\n", 1196 trace_ocfs2_adjust_rightmost_branch(
1204 root_end, new_cpos); 1197 (unsigned long long)
1198 ocfs2_metadata_cache_owner(et->et_ci),
1199 root_end, new_cpos);
1200
1205 status = ocfs2_adjust_rightmost_branch(handle, et); 1201 status = ocfs2_adjust_rightmost_branch(handle, et);
1206 if (status) { 1202 if (status) {
1207 mlog_errno(status); 1203 mlog_errno(status);
@@ -1332,7 +1328,6 @@ bail:
1332 kfree(new_eb_bhs); 1328 kfree(new_eb_bhs);
1333 } 1329 }
1334 1330
1335 mlog_exit(status);
1336 return status; 1331 return status;
1337} 1332}
1338 1333
@@ -1353,8 +1348,6 @@ static int ocfs2_shift_tree_depth(handle_t *handle,
1353 struct ocfs2_extent_list *root_el; 1348 struct ocfs2_extent_list *root_el;
1354 struct ocfs2_extent_list *eb_el; 1349 struct ocfs2_extent_list *eb_el;
1355 1350
1356 mlog_entry_void();
1357
1358 status = ocfs2_create_new_meta_bhs(handle, et, 1, meta_ac, 1351 status = ocfs2_create_new_meta_bhs(handle, et, 1, meta_ac,
1359 &new_eb_bh); 1352 &new_eb_bh);
1360 if (status < 0) { 1353 if (status < 0) {
@@ -1415,7 +1408,6 @@ static int ocfs2_shift_tree_depth(handle_t *handle,
1415bail: 1408bail:
1416 brelse(new_eb_bh); 1409 brelse(new_eb_bh);
1417 1410
1418 mlog_exit(status);
1419 return status; 1411 return status;
1420} 1412}
1421 1413
@@ -1446,8 +1438,6 @@ static int ocfs2_find_branch_target(struct ocfs2_extent_tree *et,
1446 struct buffer_head *bh = NULL; 1438 struct buffer_head *bh = NULL;
1447 struct buffer_head *lowest_bh = NULL; 1439 struct buffer_head *lowest_bh = NULL;
1448 1440
1449 mlog_entry_void();
1450
1451 *target_bh = NULL; 1441 *target_bh = NULL;
1452 1442
1453 el = et->et_root_el; 1443 el = et->et_root_el;
@@ -1503,7 +1493,6 @@ static int ocfs2_find_branch_target(struct ocfs2_extent_tree *et,
1503bail: 1493bail:
1504 brelse(bh); 1494 brelse(bh);
1505 1495
1506 mlog_exit(status);
1507 return status; 1496 return status;
1508} 1497}
1509 1498
@@ -1540,7 +1529,10 @@ static int ocfs2_grow_tree(handle_t *handle, struct ocfs2_extent_tree *et,
1540 * another tree level */ 1529 * another tree level */
1541 if (shift) { 1530 if (shift) {
1542 BUG_ON(bh); 1531 BUG_ON(bh);
1543 mlog(0, "need to shift tree depth (current = %d)\n", depth); 1532 trace_ocfs2_grow_tree(
1533 (unsigned long long)
1534 ocfs2_metadata_cache_owner(et->et_ci),
1535 depth);
1544 1536
1545 /* ocfs2_shift_tree_depth will return us a buffer with 1537 /* ocfs2_shift_tree_depth will return us a buffer with
1546 * the new extent block (so we can pass that to 1538 * the new extent block (so we can pass that to
@@ -1570,7 +1562,6 @@ static int ocfs2_grow_tree(handle_t *handle, struct ocfs2_extent_tree *et,
1570 1562
1571 /* call ocfs2_add_branch to add the final part of the tree with 1563 /* call ocfs2_add_branch to add the final part of the tree with
1572 * the new data. */ 1564 * the new data. */
1573 mlog(0, "add branch. bh = %p\n", bh);
1574 ret = ocfs2_add_branch(handle, et, bh, last_eb_bh, 1565 ret = ocfs2_add_branch(handle, et, bh, last_eb_bh,
1575 meta_ac); 1566 meta_ac);
1576 if (ret < 0) { 1567 if (ret < 0) {
@@ -1645,8 +1636,9 @@ static void ocfs2_rotate_leaf(struct ocfs2_extent_list *el,
1645 } 1636 }
1646 insert_index = i; 1637 insert_index = i;
1647 1638
1648 mlog(0, "ins %u: index %d, has_empty %d, next_free %d, count %d\n", 1639 trace_ocfs2_rotate_leaf(insert_cpos, insert_index,
1649 insert_cpos, insert_index, has_empty, next_free, le16_to_cpu(el->l_count)); 1640 has_empty, next_free,
1641 le16_to_cpu(el->l_count));
1650 1642
1651 BUG_ON(insert_index < 0); 1643 BUG_ON(insert_index < 0);
1652 BUG_ON(insert_index >= le16_to_cpu(el->l_count)); 1644 BUG_ON(insert_index >= le16_to_cpu(el->l_count));
@@ -2059,7 +2051,7 @@ static void ocfs2_complete_edge_insert(handle_t *handle,
2059 left_el = path_leaf_el(left_path); 2051 left_el = path_leaf_el(left_path);
2060 right_el = path_leaf_el(right_path); 2052 right_el = path_leaf_el(right_path);
2061 for(i = left_path->p_tree_depth - 1; i > subtree_index; i--) { 2053 for(i = left_path->p_tree_depth - 1; i > subtree_index; i--) {
2062 mlog(0, "Adjust records at index %u\n", i); 2054 trace_ocfs2_complete_edge_insert(i);
2063 2055
2064 /* 2056 /*
2065 * One nice property of knowing that all of these 2057 * One nice property of knowing that all of these
@@ -2389,7 +2381,9 @@ static int ocfs2_rotate_tree_right(handle_t *handle,
2389 goto out; 2381 goto out;
2390 } 2382 }
2391 2383
2392 mlog(0, "Insert: %u, first left path cpos: %u\n", insert_cpos, cpos); 2384 trace_ocfs2_rotate_tree_right(
2385 (unsigned long long)ocfs2_metadata_cache_owner(et->et_ci),
2386 insert_cpos, cpos);
2393 2387
2394 /* 2388 /*
2395 * What we want to do here is: 2389 * What we want to do here is:
@@ -2418,8 +2412,10 @@ static int ocfs2_rotate_tree_right(handle_t *handle,
2418 * rotating subtrees. 2412 * rotating subtrees.
2419 */ 2413 */
2420 while (cpos && insert_cpos <= cpos) { 2414 while (cpos && insert_cpos <= cpos) {
2421 mlog(0, "Rotating a tree: ins. cpos: %u, left path cpos: %u\n", 2415 trace_ocfs2_rotate_tree_right(
2422 insert_cpos, cpos); 2416 (unsigned long long)
2417 ocfs2_metadata_cache_owner(et->et_ci),
2418 insert_cpos, cpos);
2423 2419
2424 ret = ocfs2_find_path(et->et_ci, left_path, cpos); 2420 ret = ocfs2_find_path(et->et_ci, left_path, cpos);
2425 if (ret) { 2421 if (ret) {
@@ -2461,10 +2457,10 @@ static int ocfs2_rotate_tree_right(handle_t *handle,
2461 2457
2462 start = ocfs2_find_subtree_root(et, left_path, right_path); 2458 start = ocfs2_find_subtree_root(et, left_path, right_path);
2463 2459
2464 mlog(0, "Subtree root at index %d (blk %llu, depth %d)\n", 2460 trace_ocfs2_rotate_subtree(start,
2465 start, 2461 (unsigned long long)
2466 (unsigned long long) right_path->p_node[start].bh->b_blocknr, 2462 right_path->p_node[start].bh->b_blocknr,
2467 right_path->p_tree_depth); 2463 right_path->p_tree_depth);
2468 2464
2469 ret = ocfs2_extend_rotate_transaction(handle, start, 2465 ret = ocfs2_extend_rotate_transaction(handle, start,
2470 orig_credits, right_path); 2466 orig_credits, right_path);
@@ -2964,8 +2960,7 @@ static int __ocfs2_rotate_tree_left(handle_t *handle,
2964 subtree_root = ocfs2_find_subtree_root(et, left_path, 2960 subtree_root = ocfs2_find_subtree_root(et, left_path,
2965 right_path); 2961 right_path);
2966 2962
2967 mlog(0, "Subtree root at index %d (blk %llu, depth %d)\n", 2963 trace_ocfs2_rotate_subtree(subtree_root,
2968 subtree_root,
2969 (unsigned long long) 2964 (unsigned long long)
2970 right_path->p_node[subtree_root].bh->b_blocknr, 2965 right_path->p_node[subtree_root].bh->b_blocknr,
2971 right_path->p_tree_depth); 2966 right_path->p_tree_depth);
@@ -3989,9 +3984,11 @@ static int ocfs2_append_rec_to_path(handle_t *handle,
3989 goto out; 3984 goto out;
3990 } 3985 }
3991 3986
3992 mlog(0, "Append may need a left path update. cpos: %u, " 3987 trace_ocfs2_append_rec_to_path(
3993 "left_cpos: %u\n", le32_to_cpu(insert_rec->e_cpos), 3988 (unsigned long long)
3994 left_cpos); 3989 ocfs2_metadata_cache_owner(et->et_ci),
3990 le32_to_cpu(insert_rec->e_cpos),
3991 left_cpos);
3995 3992
3996 /* 3993 /*
3997 * No need to worry if the append is already in the 3994 * No need to worry if the append is already in the
@@ -4562,7 +4559,7 @@ static int ocfs2_figure_insert_type(struct ocfs2_extent_tree *et,
4562 ocfs2_et_get_last_eb_blk(et), 4559 ocfs2_et_get_last_eb_blk(et),
4563 &bh); 4560 &bh);
4564 if (ret) { 4561 if (ret) {
4565 mlog_exit(ret); 4562 mlog_errno(ret);
4566 goto out; 4563 goto out;
4567 } 4564 }
4568 eb = (struct ocfs2_extent_block *) bh->b_data; 4565 eb = (struct ocfs2_extent_block *) bh->b_data;
@@ -4678,9 +4675,9 @@ int ocfs2_insert_extent(handle_t *handle,
4678 struct ocfs2_insert_type insert = {0, }; 4675 struct ocfs2_insert_type insert = {0, };
4679 struct ocfs2_extent_rec rec; 4676 struct ocfs2_extent_rec rec;
4680 4677
4681 mlog(0, "add %u clusters at position %u to owner %llu\n", 4678 trace_ocfs2_insert_extent_start(
4682 new_clusters, cpos, 4679 (unsigned long long)ocfs2_metadata_cache_owner(et->et_ci),
4683 (unsigned long long)ocfs2_metadata_cache_owner(et->et_ci)); 4680 cpos, new_clusters);
4684 4681
4685 memset(&rec, 0, sizeof(rec)); 4682 memset(&rec, 0, sizeof(rec));
4686 rec.e_cpos = cpu_to_le32(cpos); 4683 rec.e_cpos = cpu_to_le32(cpos);
@@ -4700,11 +4697,9 @@ int ocfs2_insert_extent(handle_t *handle,
4700 goto bail; 4697 goto bail;
4701 } 4698 }
4702 4699
4703 mlog(0, "Insert.appending: %u, Insert.Contig: %u, " 4700 trace_ocfs2_insert_extent(insert.ins_appending, insert.ins_contig,
4704 "Insert.contig_index: %d, Insert.free_records: %d, " 4701 insert.ins_contig_index, free_records,
4705 "Insert.tree_depth: %d\n", 4702 insert.ins_tree_depth);
4706 insert.ins_appending, insert.ins_contig, insert.ins_contig_index,
4707 free_records, insert.ins_tree_depth);
4708 4703
4709 if (insert.ins_contig == CONTIG_NONE && free_records == 0) { 4704 if (insert.ins_contig == CONTIG_NONE && free_records == 0) {
4710 status = ocfs2_grow_tree(handle, et, 4705 status = ocfs2_grow_tree(handle, et,
@@ -4726,7 +4721,6 @@ int ocfs2_insert_extent(handle_t *handle,
4726bail: 4721bail:
4727 brelse(last_eb_bh); 4722 brelse(last_eb_bh);
4728 4723
4729 mlog_exit(status);
4730 return status; 4724 return status;
4731} 4725}
4732 4726
@@ -4746,7 +4740,7 @@ int ocfs2_add_clusters_in_btree(handle_t *handle,
4746 struct ocfs2_alloc_context *meta_ac, 4740 struct ocfs2_alloc_context *meta_ac,
4747 enum ocfs2_alloc_restarted *reason_ret) 4741 enum ocfs2_alloc_restarted *reason_ret)
4748{ 4742{
4749 int status = 0; 4743 int status = 0, err = 0;
4750 int free_extents; 4744 int free_extents;
4751 enum ocfs2_alloc_restarted reason = RESTART_NONE; 4745 enum ocfs2_alloc_restarted reason = RESTART_NONE;
4752 u32 bit_off, num_bits; 4746 u32 bit_off, num_bits;
@@ -4773,14 +4767,14 @@ int ocfs2_add_clusters_in_btree(handle_t *handle,
4773 * 2) we are so fragmented, we've needed to add metadata too 4767 * 2) we are so fragmented, we've needed to add metadata too
4774 * many times. */ 4768 * many times. */
4775 if (!free_extents && !meta_ac) { 4769 if (!free_extents && !meta_ac) {
4776 mlog(0, "we haven't reserved any metadata!\n"); 4770 err = -1;
4777 status = -EAGAIN; 4771 status = -EAGAIN;
4778 reason = RESTART_META; 4772 reason = RESTART_META;
4779 goto leave; 4773 goto leave;
4780 } else if ((!free_extents) 4774 } else if ((!free_extents)
4781 && (ocfs2_alloc_context_bits_left(meta_ac) 4775 && (ocfs2_alloc_context_bits_left(meta_ac)
4782 < ocfs2_extend_meta_needed(et->et_root_el))) { 4776 < ocfs2_extend_meta_needed(et->et_root_el))) {
4783 mlog(0, "filesystem is really fragmented...\n"); 4777 err = -2;
4784 status = -EAGAIN; 4778 status = -EAGAIN;
4785 reason = RESTART_META; 4779 reason = RESTART_META;
4786 goto leave; 4780 goto leave;
@@ -4805,9 +4799,9 @@ int ocfs2_add_clusters_in_btree(handle_t *handle,
4805 } 4799 }
4806 4800
4807 block = ocfs2_clusters_to_blocks(osb->sb, bit_off); 4801 block = ocfs2_clusters_to_blocks(osb->sb, bit_off);
4808 mlog(0, "Allocating %u clusters at block %u for owner %llu\n", 4802 trace_ocfs2_add_clusters_in_btree(
4809 num_bits, bit_off, 4803 (unsigned long long)ocfs2_metadata_cache_owner(et->et_ci),
4810 (unsigned long long)ocfs2_metadata_cache_owner(et->et_ci)); 4804 bit_off, num_bits);
4811 status = ocfs2_insert_extent(handle, et, *logical_offset, block, 4805 status = ocfs2_insert_extent(handle, et, *logical_offset, block,
4812 num_bits, flags, meta_ac); 4806 num_bits, flags, meta_ac);
4813 if (status < 0) { 4807 if (status < 0) {
@@ -4821,16 +4815,15 @@ int ocfs2_add_clusters_in_btree(handle_t *handle,
4821 *logical_offset += num_bits; 4815 *logical_offset += num_bits;
4822 4816
4823 if (clusters_to_add) { 4817 if (clusters_to_add) {
4824 mlog(0, "need to alloc once more, wanted = %u\n", 4818 err = clusters_to_add;
4825 clusters_to_add);
4826 status = -EAGAIN; 4819 status = -EAGAIN;
4827 reason = RESTART_TRANS; 4820 reason = RESTART_TRANS;
4828 } 4821 }
4829 4822
4830leave: 4823leave:
4831 mlog_exit(status);
4832 if (reason_ret) 4824 if (reason_ret)
4833 *reason_ret = reason; 4825 *reason_ret = reason;
4826 trace_ocfs2_add_clusters_in_btree_ret(status, reason, err);
4834 return status; 4827 return status;
4835} 4828}
4836 4829
@@ -5039,7 +5032,7 @@ int ocfs2_split_extent(handle_t *handle,
5039 ocfs2_et_get_last_eb_blk(et), 5032 ocfs2_et_get_last_eb_blk(et),
5040 &last_eb_bh); 5033 &last_eb_bh);
5041 if (ret) { 5034 if (ret) {
5042 mlog_exit(ret); 5035 mlog_errno(ret);
5043 goto out; 5036 goto out;
5044 } 5037 }
5045 5038
@@ -5056,9 +5049,9 @@ int ocfs2_split_extent(handle_t *handle,
5056 5049
5057 ctxt.c_has_empty_extent = ocfs2_is_empty_extent(&el->l_recs[0]); 5050 ctxt.c_has_empty_extent = ocfs2_is_empty_extent(&el->l_recs[0]);
5058 5051
5059 mlog(0, "index: %d, contig: %u, has_empty: %u, split_covers: %u\n", 5052 trace_ocfs2_split_extent(split_index, ctxt.c_contig_type,
5060 split_index, ctxt.c_contig_type, ctxt.c_has_empty_extent, 5053 ctxt.c_has_empty_extent,
5061 ctxt.c_split_covers_rec); 5054 ctxt.c_split_covers_rec);
5062 5055
5063 if (ctxt.c_contig_type == CONTIG_NONE) { 5056 if (ctxt.c_contig_type == CONTIG_NONE) {
5064 if (ctxt.c_split_covers_rec) 5057 if (ctxt.c_split_covers_rec)
@@ -5192,8 +5185,9 @@ int ocfs2_mark_extent_written(struct inode *inode,
5192{ 5185{
5193 int ret; 5186 int ret;
5194 5187
5195 mlog(0, "Inode %lu cpos %u, len %u, phys clusters %u\n", 5188 trace_ocfs2_mark_extent_written(
5196 inode->i_ino, cpos, len, phys); 5189 (unsigned long long)OCFS2_I(inode)->ip_blkno,
5190 cpos, len, phys);
5197 5191
5198 if (!ocfs2_writes_unwritten_extents(OCFS2_SB(inode->i_sb))) { 5192 if (!ocfs2_writes_unwritten_extents(OCFS2_SB(inode->i_sb))) {
5199 ocfs2_error(inode->i_sb, "Inode %llu has unwritten extents " 5193 ocfs2_error(inode->i_sb, "Inode %llu has unwritten extents "
@@ -5512,11 +5506,10 @@ int ocfs2_remove_extent(handle_t *handle,
5512 5506
5513 BUG_ON(cpos < le32_to_cpu(rec->e_cpos) || trunc_range > rec_range); 5507 BUG_ON(cpos < le32_to_cpu(rec->e_cpos) || trunc_range > rec_range);
5514 5508
5515 mlog(0, "Owner %llu, remove (cpos %u, len %u). Existing index %d " 5509 trace_ocfs2_remove_extent(
5516 "(cpos %u, len %u)\n", 5510 (unsigned long long)ocfs2_metadata_cache_owner(et->et_ci),
5517 (unsigned long long)ocfs2_metadata_cache_owner(et->et_ci), 5511 cpos, len, index, le32_to_cpu(rec->e_cpos),
5518 cpos, len, index, 5512 ocfs2_rec_clusters(el, rec));
5519 le32_to_cpu(rec->e_cpos), ocfs2_rec_clusters(el, rec));
5520 5513
5521 if (le32_to_cpu(rec->e_cpos) == cpos || rec_range == trunc_range) { 5514 if (le32_to_cpu(rec->e_cpos) == cpos || rec_range == trunc_range) {
5522 ret = ocfs2_truncate_rec(handle, et, path, index, dealloc, 5515 ret = ocfs2_truncate_rec(handle, et, path, index, dealloc,
@@ -5795,9 +5788,6 @@ int ocfs2_truncate_log_append(struct ocfs2_super *osb,
5795 struct ocfs2_dinode *di; 5788 struct ocfs2_dinode *di;
5796 struct ocfs2_truncate_log *tl; 5789 struct ocfs2_truncate_log *tl;
5797 5790
5798 mlog_entry("start_blk = %llu, num_clusters = %u\n",
5799 (unsigned long long)start_blk, num_clusters);
5800
5801 BUG_ON(mutex_trylock(&tl_inode->i_mutex)); 5791 BUG_ON(mutex_trylock(&tl_inode->i_mutex));
5802 5792
5803 start_cluster = ocfs2_blocks_to_clusters(osb->sb, start_blk); 5793 start_cluster = ocfs2_blocks_to_clusters(osb->sb, start_blk);
@@ -5834,10 +5824,9 @@ int ocfs2_truncate_log_append(struct ocfs2_super *osb,
5834 goto bail; 5824 goto bail;
5835 } 5825 }
5836 5826
5837 mlog(0, "Log truncate of %u clusters starting at cluster %u to " 5827 trace_ocfs2_truncate_log_append(
5838 "%llu (index = %d)\n", num_clusters, start_cluster, 5828 (unsigned long long)OCFS2_I(tl_inode)->ip_blkno, index,
5839 (unsigned long long)OCFS2_I(tl_inode)->ip_blkno, index); 5829 start_cluster, num_clusters);
5840
5841 if (ocfs2_truncate_log_can_coalesce(tl, start_cluster)) { 5830 if (ocfs2_truncate_log_can_coalesce(tl, start_cluster)) {
5842 /* 5831 /*
5843 * Move index back to the record we are coalescing with. 5832 * Move index back to the record we are coalescing with.
@@ -5846,9 +5835,10 @@ int ocfs2_truncate_log_append(struct ocfs2_super *osb,
5846 index--; 5835 index--;
5847 5836
5848 num_clusters += le32_to_cpu(tl->tl_recs[index].t_clusters); 5837 num_clusters += le32_to_cpu(tl->tl_recs[index].t_clusters);
5849 mlog(0, "Coalesce with index %u (start = %u, clusters = %u)\n", 5838 trace_ocfs2_truncate_log_append(
5850 index, le32_to_cpu(tl->tl_recs[index].t_start), 5839 (unsigned long long)OCFS2_I(tl_inode)->ip_blkno,
5851 num_clusters); 5840 index, le32_to_cpu(tl->tl_recs[index].t_start),
5841 num_clusters);
5852 } else { 5842 } else {
5853 tl->tl_recs[index].t_start = cpu_to_le32(start_cluster); 5843 tl->tl_recs[index].t_start = cpu_to_le32(start_cluster);
5854 tl->tl_used = cpu_to_le16(index + 1); 5844 tl->tl_used = cpu_to_le16(index + 1);
@@ -5859,7 +5849,6 @@ int ocfs2_truncate_log_append(struct ocfs2_super *osb,
5859 5849
5860 osb->truncated_clusters += num_clusters; 5850 osb->truncated_clusters += num_clusters;
5861bail: 5851bail:
5862 mlog_exit(status);
5863 return status; 5852 return status;
5864} 5853}
5865 5854
@@ -5878,8 +5867,6 @@ static int ocfs2_replay_truncate_records(struct ocfs2_super *osb,
5878 struct inode *tl_inode = osb->osb_tl_inode; 5867 struct inode *tl_inode = osb->osb_tl_inode;
5879 struct buffer_head *tl_bh = osb->osb_tl_bh; 5868 struct buffer_head *tl_bh = osb->osb_tl_bh;
5880 5869
5881 mlog_entry_void();
5882
5883 di = (struct ocfs2_dinode *) tl_bh->b_data; 5870 di = (struct ocfs2_dinode *) tl_bh->b_data;
5884 tl = &di->id2.i_dealloc; 5871 tl = &di->id2.i_dealloc;
5885 i = le16_to_cpu(tl->tl_used) - 1; 5872 i = le16_to_cpu(tl->tl_used) - 1;
@@ -5915,8 +5902,9 @@ static int ocfs2_replay_truncate_records(struct ocfs2_super *osb,
5915 /* if start_blk is not set, we ignore the record as 5902 /* if start_blk is not set, we ignore the record as
5916 * invalid. */ 5903 * invalid. */
5917 if (start_blk) { 5904 if (start_blk) {
5918 mlog(0, "free record %d, start = %u, clusters = %u\n", 5905 trace_ocfs2_replay_truncate_records(
5919 i, le32_to_cpu(rec.t_start), num_clusters); 5906 (unsigned long long)OCFS2_I(tl_inode)->ip_blkno,
5907 i, le32_to_cpu(rec.t_start), num_clusters);
5920 5908
5921 status = ocfs2_free_clusters(handle, data_alloc_inode, 5909 status = ocfs2_free_clusters(handle, data_alloc_inode,
5922 data_alloc_bh, start_blk, 5910 data_alloc_bh, start_blk,
@@ -5932,7 +5920,6 @@ static int ocfs2_replay_truncate_records(struct ocfs2_super *osb,
5932 osb->truncated_clusters = 0; 5920 osb->truncated_clusters = 0;
5933 5921
5934bail: 5922bail:
5935 mlog_exit(status);
5936 return status; 5923 return status;
5937} 5924}
5938 5925
@@ -5949,8 +5936,6 @@ int __ocfs2_flush_truncate_log(struct ocfs2_super *osb)
5949 struct ocfs2_dinode *di; 5936 struct ocfs2_dinode *di;
5950 struct ocfs2_truncate_log *tl; 5937 struct ocfs2_truncate_log *tl;
5951 5938
5952 mlog_entry_void();
5953
5954 BUG_ON(mutex_trylock(&tl_inode->i_mutex)); 5939 BUG_ON(mutex_trylock(&tl_inode->i_mutex));
5955 5940
5956 di = (struct ocfs2_dinode *) tl_bh->b_data; 5941 di = (struct ocfs2_dinode *) tl_bh->b_data;
@@ -5962,8 +5947,9 @@ int __ocfs2_flush_truncate_log(struct ocfs2_super *osb)
5962 5947
5963 tl = &di->id2.i_dealloc; 5948 tl = &di->id2.i_dealloc;
5964 num_to_flush = le16_to_cpu(tl->tl_used); 5949 num_to_flush = le16_to_cpu(tl->tl_used);
5965 mlog(0, "Flush %u records from truncate log #%llu\n", 5950 trace_ocfs2_flush_truncate_log(
5966 num_to_flush, (unsigned long long)OCFS2_I(tl_inode)->ip_blkno); 5951 (unsigned long long)OCFS2_I(tl_inode)->ip_blkno,
5952 num_to_flush);
5967 if (!num_to_flush) { 5953 if (!num_to_flush) {
5968 status = 0; 5954 status = 0;
5969 goto out; 5955 goto out;
@@ -6009,7 +5995,6 @@ out_mutex:
6009 iput(data_alloc_inode); 5995 iput(data_alloc_inode);
6010 5996
6011out: 5997out:
6012 mlog_exit(status);
6013 return status; 5998 return status;
6014} 5999}
6015 6000
@@ -6032,15 +6017,11 @@ static void ocfs2_truncate_log_worker(struct work_struct *work)
6032 container_of(work, struct ocfs2_super, 6017 container_of(work, struct ocfs2_super,
6033 osb_truncate_log_wq.work); 6018 osb_truncate_log_wq.work);
6034 6019
6035 mlog_entry_void();
6036
6037 status = ocfs2_flush_truncate_log(osb); 6020 status = ocfs2_flush_truncate_log(osb);
6038 if (status < 0) 6021 if (status < 0)
6039 mlog_errno(status); 6022 mlog_errno(status);
6040 else 6023 else
6041 ocfs2_init_steal_slots(osb); 6024 ocfs2_init_steal_slots(osb);
6042
6043 mlog_exit(status);
6044} 6025}
6045 6026
6046#define OCFS2_TRUNCATE_LOG_FLUSH_INTERVAL (2 * HZ) 6027#define OCFS2_TRUNCATE_LOG_FLUSH_INTERVAL (2 * HZ)
@@ -6086,7 +6067,6 @@ static int ocfs2_get_truncate_log_info(struct ocfs2_super *osb,
6086 *tl_inode = inode; 6067 *tl_inode = inode;
6087 *tl_bh = bh; 6068 *tl_bh = bh;
6088bail: 6069bail:
6089 mlog_exit(status);
6090 return status; 6070 return status;
6091} 6071}
6092 6072
@@ -6106,7 +6086,7 @@ int ocfs2_begin_truncate_log_recovery(struct ocfs2_super *osb,
6106 6086
6107 *tl_copy = NULL; 6087 *tl_copy = NULL;
6108 6088
6109 mlog(0, "recover truncate log from slot %d\n", slot_num); 6089 trace_ocfs2_begin_truncate_log_recovery(slot_num);
6110 6090
6111 status = ocfs2_get_truncate_log_info(osb, slot_num, &tl_inode, &tl_bh); 6091 status = ocfs2_get_truncate_log_info(osb, slot_num, &tl_inode, &tl_bh);
6112 if (status < 0) { 6092 if (status < 0) {
@@ -6123,8 +6103,7 @@ int ocfs2_begin_truncate_log_recovery(struct ocfs2_super *osb,
6123 6103
6124 tl = &di->id2.i_dealloc; 6104 tl = &di->id2.i_dealloc;
6125 if (le16_to_cpu(tl->tl_used)) { 6105 if (le16_to_cpu(tl->tl_used)) {
6126 mlog(0, "We'll have %u logs to recover\n", 6106 trace_ocfs2_truncate_log_recovery_num(le16_to_cpu(tl->tl_used));
6127 le16_to_cpu(tl->tl_used));
6128 6107
6129 *tl_copy = kmalloc(tl_bh->b_size, GFP_KERNEL); 6108 *tl_copy = kmalloc(tl_bh->b_size, GFP_KERNEL);
6130 if (!(*tl_copy)) { 6109 if (!(*tl_copy)) {
@@ -6157,9 +6136,9 @@ bail:
6157 if (status < 0 && (*tl_copy)) { 6136 if (status < 0 && (*tl_copy)) {
6158 kfree(*tl_copy); 6137 kfree(*tl_copy);
6159 *tl_copy = NULL; 6138 *tl_copy = NULL;
6139 mlog_errno(status);
6160 } 6140 }
6161 6141
6162 mlog_exit(status);
6163 return status; 6142 return status;
6164} 6143}
6165 6144
@@ -6174,8 +6153,6 @@ int ocfs2_complete_truncate_log_recovery(struct ocfs2_super *osb,
6174 struct inode *tl_inode = osb->osb_tl_inode; 6153 struct inode *tl_inode = osb->osb_tl_inode;
6175 struct ocfs2_truncate_log *tl; 6154 struct ocfs2_truncate_log *tl;
6176 6155
6177 mlog_entry_void();
6178
6179 if (OCFS2_I(tl_inode)->ip_blkno == le64_to_cpu(tl_copy->i_blkno)) { 6156 if (OCFS2_I(tl_inode)->ip_blkno == le64_to_cpu(tl_copy->i_blkno)) {
6180 mlog(ML_ERROR, "Asked to recover my own truncate log!\n"); 6157 mlog(ML_ERROR, "Asked to recover my own truncate log!\n");
6181 return -EINVAL; 6158 return -EINVAL;
@@ -6183,8 +6160,9 @@ int ocfs2_complete_truncate_log_recovery(struct ocfs2_super *osb,
6183 6160
6184 tl = &tl_copy->id2.i_dealloc; 6161 tl = &tl_copy->id2.i_dealloc;
6185 num_recs = le16_to_cpu(tl->tl_used); 6162 num_recs = le16_to_cpu(tl->tl_used);
6186 mlog(0, "cleanup %u records from %llu\n", num_recs, 6163 trace_ocfs2_complete_truncate_log_recovery(
6187 (unsigned long long)le64_to_cpu(tl_copy->i_blkno)); 6164 (unsigned long long)le64_to_cpu(tl_copy->i_blkno),
6165 num_recs);
6188 6166
6189 mutex_lock(&tl_inode->i_mutex); 6167 mutex_lock(&tl_inode->i_mutex);
6190 for(i = 0; i < num_recs; i++) { 6168 for(i = 0; i < num_recs; i++) {
@@ -6219,7 +6197,6 @@ int ocfs2_complete_truncate_log_recovery(struct ocfs2_super *osb,
6219bail_up: 6197bail_up:
6220 mutex_unlock(&tl_inode->i_mutex); 6198 mutex_unlock(&tl_inode->i_mutex);
6221 6199
6222 mlog_exit(status);
6223 return status; 6200 return status;
6224} 6201}
6225 6202
@@ -6228,8 +6205,6 @@ void ocfs2_truncate_log_shutdown(struct ocfs2_super *osb)
6228 int status; 6205 int status;
6229 struct inode *tl_inode = osb->osb_tl_inode; 6206 struct inode *tl_inode = osb->osb_tl_inode;
6230 6207
6231 mlog_entry_void();
6232
6233 if (tl_inode) { 6208 if (tl_inode) {
6234 cancel_delayed_work(&osb->osb_truncate_log_wq); 6209 cancel_delayed_work(&osb->osb_truncate_log_wq);
6235 flush_workqueue(ocfs2_wq); 6210 flush_workqueue(ocfs2_wq);
@@ -6241,8 +6216,6 @@ void ocfs2_truncate_log_shutdown(struct ocfs2_super *osb)
6241 brelse(osb->osb_tl_bh); 6216 brelse(osb->osb_tl_bh);
6242 iput(osb->osb_tl_inode); 6217 iput(osb->osb_tl_inode);
6243 } 6218 }
6244
6245 mlog_exit_void();
6246} 6219}
6247 6220
6248int ocfs2_truncate_log_init(struct ocfs2_super *osb) 6221int ocfs2_truncate_log_init(struct ocfs2_super *osb)
@@ -6251,8 +6224,6 @@ int ocfs2_truncate_log_init(struct ocfs2_super *osb)
6251 struct inode *tl_inode = NULL; 6224 struct inode *tl_inode = NULL;
6252 struct buffer_head *tl_bh = NULL; 6225 struct buffer_head *tl_bh = NULL;
6253 6226
6254 mlog_entry_void();
6255
6256 status = ocfs2_get_truncate_log_info(osb, 6227 status = ocfs2_get_truncate_log_info(osb,
6257 osb->slot_num, 6228 osb->slot_num,
6258 &tl_inode, 6229 &tl_inode,
@@ -6268,7 +6239,6 @@ int ocfs2_truncate_log_init(struct ocfs2_super *osb)
6268 osb->osb_tl_bh = tl_bh; 6239 osb->osb_tl_bh = tl_bh;
6269 osb->osb_tl_inode = tl_inode; 6240 osb->osb_tl_inode = tl_inode;
6270 6241
6271 mlog_exit(status);
6272 return status; 6242 return status;
6273} 6243}
6274 6244
@@ -6350,8 +6320,8 @@ static int ocfs2_free_cached_blocks(struct ocfs2_super *osb,
6350 else 6320 else
6351 bg_blkno = ocfs2_which_suballoc_group(head->free_blk, 6321 bg_blkno = ocfs2_which_suballoc_group(head->free_blk,
6352 head->free_bit); 6322 head->free_bit);
6353 mlog(0, "Free bit: (bit %u, blkno %llu)\n", 6323 trace_ocfs2_free_cached_blocks(
6354 head->free_bit, (unsigned long long)head->free_blk); 6324 (unsigned long long)head->free_blk, head->free_bit);
6355 6325
6356 ret = ocfs2_free_suballoc_bits(handle, inode, di_bh, 6326 ret = ocfs2_free_suballoc_bits(handle, inode, di_bh,
6357 head->free_bit, bg_blkno, 1); 6327 head->free_bit, bg_blkno, 1);
@@ -6404,8 +6374,7 @@ int ocfs2_cache_cluster_dealloc(struct ocfs2_cached_dealloc_ctxt *ctxt,
6404 return ret; 6374 return ret;
6405 } 6375 }
6406 6376
6407 mlog(0, "Insert clusters: (bit %u, blk %llu)\n", 6377 trace_ocfs2_cache_cluster_dealloc((unsigned long long)blkno, bit);
6408 bit, (unsigned long long)blkno);
6409 6378
6410 item->free_blk = blkno; 6379 item->free_blk = blkno;
6411 item->free_bit = bit; 6380 item->free_bit = bit;
@@ -6480,8 +6449,8 @@ int ocfs2_run_deallocs(struct ocfs2_super *osb,
6480 fl = ctxt->c_first_suballocator; 6449 fl = ctxt->c_first_suballocator;
6481 6450
6482 if (fl->f_first) { 6451 if (fl->f_first) {
6483 mlog(0, "Free items: (type %u, slot %d)\n", 6452 trace_ocfs2_run_deallocs(fl->f_inode_type,
6484 fl->f_inode_type, fl->f_slot); 6453 fl->f_slot);
6485 ret2 = ocfs2_free_cached_blocks(osb, 6454 ret2 = ocfs2_free_cached_blocks(osb,
6486 fl->f_inode_type, 6455 fl->f_inode_type,
6487 fl->f_slot, 6456 fl->f_slot,
@@ -6558,8 +6527,9 @@ int ocfs2_cache_block_dealloc(struct ocfs2_cached_dealloc_ctxt *ctxt,
6558 goto out; 6527 goto out;
6559 } 6528 }
6560 6529
6561 mlog(0, "Insert: (type %d, slot %u, bit %u, blk %llu)\n", 6530 trace_ocfs2_cache_block_dealloc(type, slot,
6562 type, slot, bit, (unsigned long long)blkno); 6531 (unsigned long long)suballoc,
6532 (unsigned long long)blkno, bit);
6563 6533
6564 item->free_bg = suballoc; 6534 item->free_bg = suballoc;
6565 item->free_blk = blkno; 6535 item->free_blk = blkno;
@@ -7005,8 +6975,6 @@ int ocfs2_commit_truncate(struct ocfs2_super *osb,
7005 struct ocfs2_extent_tree et; 6975 struct ocfs2_extent_tree et;
7006 struct ocfs2_cached_dealloc_ctxt dealloc; 6976 struct ocfs2_cached_dealloc_ctxt dealloc;
7007 6977
7008 mlog_entry_void();
7009
7010 ocfs2_init_dinode_extent_tree(&et, INODE_CACHE(inode), di_bh); 6978 ocfs2_init_dinode_extent_tree(&et, INODE_CACHE(inode), di_bh);
7011 ocfs2_init_dealloc_ctxt(&dealloc); 6979 ocfs2_init_dealloc_ctxt(&dealloc);
7012 6980
@@ -7041,8 +7009,11 @@ start:
7041 goto bail; 7009 goto bail;
7042 } 7010 }
7043 7011
7044 mlog(0, "inode->ip_clusters = %u, tree_depth = %u\n", 7012 trace_ocfs2_commit_truncate(
7045 OCFS2_I(inode)->ip_clusters, path->p_tree_depth); 7013 (unsigned long long)OCFS2_I(inode)->ip_blkno,
7014 new_highest_cpos,
7015 OCFS2_I(inode)->ip_clusters,
7016 path->p_tree_depth);
7046 7017
7047 /* 7018 /*
7048 * By now, el will point to the extent list on the bottom most 7019 * By now, el will point to the extent list on the bottom most
@@ -7136,7 +7107,6 @@ bail:
7136 7107
7137 ocfs2_free_path(path); 7108 ocfs2_free_path(path);
7138 7109
7139 mlog_exit(status);
7140 return status; 7110 return status;
7141} 7111}
7142 7112
diff --git a/fs/ocfs2/aops.c b/fs/ocfs2/aops.c
index daea0359e974..ac97bca282d2 100644
--- a/fs/ocfs2/aops.c
+++ b/fs/ocfs2/aops.c
@@ -29,7 +29,6 @@
29#include <linux/mpage.h> 29#include <linux/mpage.h>
30#include <linux/quotaops.h> 30#include <linux/quotaops.h>
31 31
32#define MLOG_MASK_PREFIX ML_FILE_IO
33#include <cluster/masklog.h> 32#include <cluster/masklog.h>
34 33
35#include "ocfs2.h" 34#include "ocfs2.h"
@@ -45,6 +44,7 @@
45#include "super.h" 44#include "super.h"
46#include "symlink.h" 45#include "symlink.h"
47#include "refcounttree.h" 46#include "refcounttree.h"
47#include "ocfs2_trace.h"
48 48
49#include "buffer_head_io.h" 49#include "buffer_head_io.h"
50 50
@@ -59,8 +59,9 @@ static int ocfs2_symlink_get_block(struct inode *inode, sector_t iblock,
59 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); 59 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
60 void *kaddr; 60 void *kaddr;
61 61
62 mlog_entry("(0x%p, %llu, 0x%p, %d)\n", inode, 62 trace_ocfs2_symlink_get_block(
63 (unsigned long long)iblock, bh_result, create); 63 (unsigned long long)OCFS2_I(inode)->ip_blkno,
64 (unsigned long long)iblock, bh_result, create);
64 65
65 BUG_ON(ocfs2_inode_is_fast_symlink(inode)); 66 BUG_ON(ocfs2_inode_is_fast_symlink(inode));
66 67
@@ -123,7 +124,6 @@ static int ocfs2_symlink_get_block(struct inode *inode, sector_t iblock,
123bail: 124bail:
124 brelse(bh); 125 brelse(bh);
125 126
126 mlog_exit(err);
127 return err; 127 return err;
128} 128}
129 129
@@ -136,8 +136,8 @@ int ocfs2_get_block(struct inode *inode, sector_t iblock,
136 u64 p_blkno, count, past_eof; 136 u64 p_blkno, count, past_eof;
137 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); 137 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
138 138
139 mlog_entry("(0x%p, %llu, 0x%p, %d)\n", inode, 139 trace_ocfs2_get_block((unsigned long long)OCFS2_I(inode)->ip_blkno,
140 (unsigned long long)iblock, bh_result, create); 140 (unsigned long long)iblock, bh_result, create);
141 141
142 if (OCFS2_I(inode)->ip_flags & OCFS2_INODE_SYSTEM_FILE) 142 if (OCFS2_I(inode)->ip_flags & OCFS2_INODE_SYSTEM_FILE)
143 mlog(ML_NOTICE, "get_block on system inode 0x%p (%lu)\n", 143 mlog(ML_NOTICE, "get_block on system inode 0x%p (%lu)\n",
@@ -199,8 +199,9 @@ int ocfs2_get_block(struct inode *inode, sector_t iblock,
199 } 199 }
200 200
201 past_eof = ocfs2_blocks_for_bytes(inode->i_sb, i_size_read(inode)); 201 past_eof = ocfs2_blocks_for_bytes(inode->i_sb, i_size_read(inode));
202 mlog(0, "Inode %lu, past_eof = %llu\n", inode->i_ino, 202
203 (unsigned long long)past_eof); 203 trace_ocfs2_get_block_end((unsigned long long)OCFS2_I(inode)->ip_blkno,
204 (unsigned long long)past_eof);
204 if (create && (iblock >= past_eof)) 205 if (create && (iblock >= past_eof))
205 set_buffer_new(bh_result); 206 set_buffer_new(bh_result);
206 207
@@ -208,7 +209,6 @@ bail:
208 if (err < 0) 209 if (err < 0)
209 err = -EIO; 210 err = -EIO;
210 211
211 mlog_exit(err);
212 return err; 212 return err;
213} 213}
214 214
@@ -278,7 +278,8 @@ static int ocfs2_readpage(struct file *file, struct page *page)
278 loff_t start = (loff_t)page->index << PAGE_CACHE_SHIFT; 278 loff_t start = (loff_t)page->index << PAGE_CACHE_SHIFT;
279 int ret, unlock = 1; 279 int ret, unlock = 1;
280 280
281 mlog_entry("(0x%p, %lu)\n", file, (page ? page->index : 0)); 281 trace_ocfs2_readpage((unsigned long long)oi->ip_blkno,
282 (page ? page->index : 0));
282 283
283 ret = ocfs2_inode_lock_with_page(inode, NULL, 0, page); 284 ret = ocfs2_inode_lock_with_page(inode, NULL, 0, page);
284 if (ret != 0) { 285 if (ret != 0) {
@@ -323,7 +324,6 @@ out_inode_unlock:
323out: 324out:
324 if (unlock) 325 if (unlock)
325 unlock_page(page); 326 unlock_page(page);
326 mlog_exit(ret);
327 return ret; 327 return ret;
328} 328}
329 329
@@ -396,15 +396,11 @@ out_unlock:
396 */ 396 */
397static int ocfs2_writepage(struct page *page, struct writeback_control *wbc) 397static int ocfs2_writepage(struct page *page, struct writeback_control *wbc)
398{ 398{
399 int ret; 399 trace_ocfs2_writepage(
400 400 (unsigned long long)OCFS2_I(page->mapping->host)->ip_blkno,
401 mlog_entry("(0x%p)\n", page); 401 page->index);
402
403 ret = block_write_full_page(page, ocfs2_get_block, wbc);
404 402
405 mlog_exit(ret); 403 return block_write_full_page(page, ocfs2_get_block, wbc);
406
407 return ret;
408} 404}
409 405
410/* Taken from ext3. We don't necessarily need the full blown 406/* Taken from ext3. We don't necessarily need the full blown
@@ -450,7 +446,8 @@ static sector_t ocfs2_bmap(struct address_space *mapping, sector_t block)
450 int err = 0; 446 int err = 0;
451 struct inode *inode = mapping->host; 447 struct inode *inode = mapping->host;
452 448
453 mlog_entry("(block = %llu)\n", (unsigned long long)block); 449 trace_ocfs2_bmap((unsigned long long)OCFS2_I(inode)->ip_blkno,
450 (unsigned long long)block);
454 451
455 /* We don't need to lock journal system files, since they aren't 452 /* We don't need to lock journal system files, since they aren't
456 * accessed concurrently from multiple nodes. 453 * accessed concurrently from multiple nodes.
@@ -484,8 +481,6 @@ static sector_t ocfs2_bmap(struct address_space *mapping, sector_t block)
484bail: 481bail:
485 status = err ? 0 : p_blkno; 482 status = err ? 0 : p_blkno;
486 483
487 mlog_exit((int)status);
488
489 return status; 484 return status;
490} 485}
491 486
@@ -616,9 +611,6 @@ static ssize_t ocfs2_direct_IO(int rw,
616{ 611{
617 struct file *file = iocb->ki_filp; 612 struct file *file = iocb->ki_filp;
618 struct inode *inode = file->f_path.dentry->d_inode->i_mapping->host; 613 struct inode *inode = file->f_path.dentry->d_inode->i_mapping->host;
619 int ret;
620
621 mlog_entry_void();
622 614
623 /* 615 /*
624 * Fallback to buffered I/O if we see an inode without 616 * Fallback to buffered I/O if we see an inode without
@@ -631,13 +623,10 @@ static ssize_t ocfs2_direct_IO(int rw,
631 if (i_size_read(inode) <= offset) 623 if (i_size_read(inode) <= offset)
632 return 0; 624 return 0;
633 625
634 ret = __blockdev_direct_IO(rw, iocb, inode, inode->i_sb->s_bdev, 626 return __blockdev_direct_IO(rw, iocb, inode, inode->i_sb->s_bdev,
635 iov, offset, nr_segs, 627 iov, offset, nr_segs,
636 ocfs2_direct_IO_get_blocks, 628 ocfs2_direct_IO_get_blocks,
637 ocfs2_dio_end_io, NULL, 0); 629 ocfs2_dio_end_io, NULL, 0);
638
639 mlog_exit(ret);
640 return ret;
641} 630}
642 631
643static void ocfs2_figure_cluster_boundaries(struct ocfs2_super *osb, 632static void ocfs2_figure_cluster_boundaries(struct ocfs2_super *osb,
@@ -1026,6 +1015,12 @@ static int ocfs2_prepare_page_for_write(struct inode *inode, u64 *p_blkno,
1026 ocfs2_figure_cluster_boundaries(OCFS2_SB(inode->i_sb), cpos, 1015 ocfs2_figure_cluster_boundaries(OCFS2_SB(inode->i_sb), cpos,
1027 &cluster_start, &cluster_end); 1016 &cluster_start, &cluster_end);
1028 1017
1018 /* treat the write as new if the a hole/lseek spanned across
1019 * the page boundary.
1020 */
1021 new = new | ((i_size_read(inode) <= page_offset(page)) &&
1022 (page_offset(page) <= user_pos));
1023
1029 if (page == wc->w_target_page) { 1024 if (page == wc->w_target_page) {
1030 map_from = user_pos & (PAGE_CACHE_SIZE - 1); 1025 map_from = user_pos & (PAGE_CACHE_SIZE - 1);
1031 map_to = map_from + user_len; 1026 map_to = map_from + user_len;
@@ -1534,9 +1529,9 @@ static int ocfs2_try_to_write_inline_data(struct address_space *mapping,
1534 struct ocfs2_inode_info *oi = OCFS2_I(inode); 1529 struct ocfs2_inode_info *oi = OCFS2_I(inode);
1535 struct ocfs2_dinode *di = NULL; 1530 struct ocfs2_dinode *di = NULL;
1536 1531
1537 mlog(0, "Inode %llu, write of %u bytes at off %llu. features: 0x%x\n", 1532 trace_ocfs2_try_to_write_inline_data((unsigned long long)oi->ip_blkno,
1538 (unsigned long long)oi->ip_blkno, len, (unsigned long long)pos, 1533 len, (unsigned long long)pos,
1539 oi->ip_dyn_features); 1534 oi->ip_dyn_features);
1540 1535
1541 /* 1536 /*
1542 * Handle inodes which already have inline data 1st. 1537 * Handle inodes which already have inline data 1st.
@@ -1739,6 +1734,13 @@ try_again:
1739 1734
1740 di = (struct ocfs2_dinode *)wc->w_di_bh->b_data; 1735 di = (struct ocfs2_dinode *)wc->w_di_bh->b_data;
1741 1736
1737 trace_ocfs2_write_begin_nolock(
1738 (unsigned long long)OCFS2_I(inode)->ip_blkno,
1739 (long long)i_size_read(inode),
1740 le32_to_cpu(di->i_clusters),
1741 pos, len, flags, mmap_page,
1742 clusters_to_alloc, extents_to_split);
1743
1742 /* 1744 /*
1743 * We set w_target_from, w_target_to here so that 1745 * We set w_target_from, w_target_to here so that
1744 * ocfs2_write_end() knows which range in the target page to 1746 * ocfs2_write_end() knows which range in the target page to
@@ -1751,12 +1753,6 @@ try_again:
1751 * ocfs2_lock_allocators(). It greatly over-estimates 1753 * ocfs2_lock_allocators(). It greatly over-estimates
1752 * the work to be done. 1754 * the work to be done.
1753 */ 1755 */
1754 mlog(0, "extend inode %llu, i_size = %lld, di->i_clusters = %u,"
1755 " clusters_to_add = %u, extents_to_split = %u\n",
1756 (unsigned long long)OCFS2_I(inode)->ip_blkno,
1757 (long long)i_size_read(inode), le32_to_cpu(di->i_clusters),
1758 clusters_to_alloc, extents_to_split);
1759
1760 ocfs2_init_dinode_extent_tree(&et, INODE_CACHE(inode), 1756 ocfs2_init_dinode_extent_tree(&et, INODE_CACHE(inode),
1761 wc->w_di_bh); 1757 wc->w_di_bh);
1762 ret = ocfs2_lock_allocators(inode, &et, 1758 ret = ocfs2_lock_allocators(inode, &et,
@@ -1938,8 +1934,8 @@ static void ocfs2_write_end_inline(struct inode *inode, loff_t pos,
1938 memcpy(di->id2.i_data.id_data + pos, kaddr + pos, *copied); 1934 memcpy(di->id2.i_data.id_data + pos, kaddr + pos, *copied);
1939 kunmap_atomic(kaddr, KM_USER0); 1935 kunmap_atomic(kaddr, KM_USER0);
1940 1936
1941 mlog(0, "Data written to inode at offset %llu. " 1937 trace_ocfs2_write_end_inline(
1942 "id_count = %u, copied = %u, i_dyn_features = 0x%x\n", 1938 (unsigned long long)OCFS2_I(inode)->ip_blkno,
1943 (unsigned long long)pos, *copied, 1939 (unsigned long long)pos, *copied,
1944 le16_to_cpu(di->id2.i_data.id_count), 1940 le16_to_cpu(di->id2.i_data.id_count),
1945 le16_to_cpu(di->i_dyn_features)); 1941 le16_to_cpu(di->i_dyn_features));
diff --git a/fs/ocfs2/buffer_head_io.c b/fs/ocfs2/buffer_head_io.c
index f9d5d3ffc75a..5d18ad10c27f 100644
--- a/fs/ocfs2/buffer_head_io.c
+++ b/fs/ocfs2/buffer_head_io.c
@@ -35,8 +35,8 @@
35#include "inode.h" 35#include "inode.h"
36#include "journal.h" 36#include "journal.h"
37#include "uptodate.h" 37#include "uptodate.h"
38
39#include "buffer_head_io.h" 38#include "buffer_head_io.h"
39#include "ocfs2_trace.h"
40 40
41/* 41/*
42 * Bits on bh->b_state used by ocfs2. 42 * Bits on bh->b_state used by ocfs2.
@@ -55,8 +55,7 @@ int ocfs2_write_block(struct ocfs2_super *osb, struct buffer_head *bh,
55{ 55{
56 int ret = 0; 56 int ret = 0;
57 57
58 mlog_entry("(bh->b_blocknr = %llu, ci=%p)\n", 58 trace_ocfs2_write_block((unsigned long long)bh->b_blocknr, ci);
59 (unsigned long long)bh->b_blocknr, ci);
60 59
61 BUG_ON(bh->b_blocknr < OCFS2_SUPER_BLOCK_BLKNO); 60 BUG_ON(bh->b_blocknr < OCFS2_SUPER_BLOCK_BLKNO);
62 BUG_ON(buffer_jbd(bh)); 61 BUG_ON(buffer_jbd(bh));
@@ -66,6 +65,7 @@ int ocfs2_write_block(struct ocfs2_super *osb, struct buffer_head *bh,
66 * can get modified during recovery even if read-only. */ 65 * can get modified during recovery even if read-only. */
67 if (ocfs2_is_hard_readonly(osb)) { 66 if (ocfs2_is_hard_readonly(osb)) {
68 ret = -EROFS; 67 ret = -EROFS;
68 mlog_errno(ret);
69 goto out; 69 goto out;
70 } 70 }
71 71
@@ -91,11 +91,11 @@ int ocfs2_write_block(struct ocfs2_super *osb, struct buffer_head *bh,
91 * uptodate. */ 91 * uptodate. */
92 ret = -EIO; 92 ret = -EIO;
93 put_bh(bh); 93 put_bh(bh);
94 mlog_errno(ret);
94 } 95 }
95 96
96 ocfs2_metadata_cache_io_unlock(ci); 97 ocfs2_metadata_cache_io_unlock(ci);
97out: 98out:
98 mlog_exit(ret);
99 return ret; 99 return ret;
100} 100}
101 101
@@ -106,10 +106,10 @@ int ocfs2_read_blocks_sync(struct ocfs2_super *osb, u64 block,
106 unsigned int i; 106 unsigned int i;
107 struct buffer_head *bh; 107 struct buffer_head *bh;
108 108
109 if (!nr) { 109 trace_ocfs2_read_blocks_sync((unsigned long long)block, nr);
110 mlog(ML_BH_IO, "No buffers will be read!\n"); 110
111 if (!nr)
111 goto bail; 112 goto bail;
112 }
113 113
114 for (i = 0 ; i < nr ; i++) { 114 for (i = 0 ; i < nr ; i++) {
115 if (bhs[i] == NULL) { 115 if (bhs[i] == NULL) {
@@ -123,10 +123,8 @@ int ocfs2_read_blocks_sync(struct ocfs2_super *osb, u64 block,
123 bh = bhs[i]; 123 bh = bhs[i];
124 124
125 if (buffer_jbd(bh)) { 125 if (buffer_jbd(bh)) {
126 mlog(ML_BH_IO, 126 trace_ocfs2_read_blocks_sync_jbd(
127 "trying to sync read a jbd " 127 (unsigned long long)bh->b_blocknr);
128 "managed bh (blocknr = %llu), skipping\n",
129 (unsigned long long)bh->b_blocknr);
130 continue; 128 continue;
131 } 129 }
132 130
@@ -186,8 +184,7 @@ int ocfs2_read_blocks(struct ocfs2_caching_info *ci, u64 block, int nr,
186 struct buffer_head *bh; 184 struct buffer_head *bh;
187 struct super_block *sb = ocfs2_metadata_cache_get_super(ci); 185 struct super_block *sb = ocfs2_metadata_cache_get_super(ci);
188 186
189 mlog_entry("(ci=%p, block=(%llu), nr=(%d), flags=%d)\n", 187 trace_ocfs2_read_blocks_begin(ci, (unsigned long long)block, nr, flags);
190 ci, (unsigned long long)block, nr, flags);
191 188
192 BUG_ON(!ci); 189 BUG_ON(!ci);
193 BUG_ON((flags & OCFS2_BH_READAHEAD) && 190 BUG_ON((flags & OCFS2_BH_READAHEAD) &&
@@ -207,7 +204,6 @@ int ocfs2_read_blocks(struct ocfs2_caching_info *ci, u64 block, int nr,
207 } 204 }
208 205
209 if (nr == 0) { 206 if (nr == 0) {
210 mlog(ML_BH_IO, "No buffers will be read!\n");
211 status = 0; 207 status = 0;
212 goto bail; 208 goto bail;
213 } 209 }
@@ -251,8 +247,7 @@ int ocfs2_read_blocks(struct ocfs2_caching_info *ci, u64 block, int nr,
251 */ 247 */
252 248
253 if (!ignore_cache && !ocfs2_buffer_uptodate(ci, bh)) { 249 if (!ignore_cache && !ocfs2_buffer_uptodate(ci, bh)) {
254 mlog(ML_UPTODATE, 250 trace_ocfs2_read_blocks_from_disk(
255 "bh (%llu), owner %llu not uptodate\n",
256 (unsigned long long)bh->b_blocknr, 251 (unsigned long long)bh->b_blocknr,
257 (unsigned long long)ocfs2_metadata_cache_owner(ci)); 252 (unsigned long long)ocfs2_metadata_cache_owner(ci));
258 /* We're using ignore_cache here to say 253 /* We're using ignore_cache here to say
@@ -260,11 +255,10 @@ int ocfs2_read_blocks(struct ocfs2_caching_info *ci, u64 block, int nr,
260 ignore_cache = 1; 255 ignore_cache = 1;
261 } 256 }
262 257
258 trace_ocfs2_read_blocks_bh((unsigned long long)bh->b_blocknr,
259 ignore_cache, buffer_jbd(bh), buffer_dirty(bh));
260
263 if (buffer_jbd(bh)) { 261 if (buffer_jbd(bh)) {
264 if (ignore_cache)
265 mlog(ML_BH_IO, "trying to sync read a jbd "
266 "managed bh (blocknr = %llu)\n",
267 (unsigned long long)bh->b_blocknr);
268 continue; 262 continue;
269 } 263 }
270 264
@@ -272,9 +266,6 @@ int ocfs2_read_blocks(struct ocfs2_caching_info *ci, u64 block, int nr,
272 if (buffer_dirty(bh)) { 266 if (buffer_dirty(bh)) {
273 /* This should probably be a BUG, or 267 /* This should probably be a BUG, or
274 * at least return an error. */ 268 * at least return an error. */
275 mlog(ML_BH_IO, "asking me to sync read a dirty "
276 "buffer! (blocknr = %llu)\n",
277 (unsigned long long)bh->b_blocknr);
278 continue; 269 continue;
279 } 270 }
280 271
@@ -367,14 +358,11 @@ int ocfs2_read_blocks(struct ocfs2_caching_info *ci, u64 block, int nr,
367 } 358 }
368 ocfs2_metadata_cache_io_unlock(ci); 359 ocfs2_metadata_cache_io_unlock(ci);
369 360
370 mlog(ML_BH_IO, "block=(%llu), nr=(%d), cached=%s, flags=0x%x\n", 361 trace_ocfs2_read_blocks_end((unsigned long long)block, nr,
371 (unsigned long long)block, nr, 362 flags, ignore_cache);
372 ((flags & OCFS2_BH_IGNORE_CACHE) || ignore_cache) ? "no" : "yes",
373 flags);
374 363
375bail: 364bail:
376 365
377 mlog_exit(status);
378 return status; 366 return status;
379} 367}
380 368
@@ -408,13 +396,12 @@ int ocfs2_write_super_or_backup(struct ocfs2_super *osb,
408 int ret = 0; 396 int ret = 0;
409 struct ocfs2_dinode *di = (struct ocfs2_dinode *)bh->b_data; 397 struct ocfs2_dinode *di = (struct ocfs2_dinode *)bh->b_data;
410 398
411 mlog_entry_void();
412
413 BUG_ON(buffer_jbd(bh)); 399 BUG_ON(buffer_jbd(bh));
414 ocfs2_check_super_or_backup(osb->sb, bh->b_blocknr); 400 ocfs2_check_super_or_backup(osb->sb, bh->b_blocknr);
415 401
416 if (ocfs2_is_hard_readonly(osb) || ocfs2_is_soft_readonly(osb)) { 402 if (ocfs2_is_hard_readonly(osb) || ocfs2_is_soft_readonly(osb)) {
417 ret = -EROFS; 403 ret = -EROFS;
404 mlog_errno(ret);
418 goto out; 405 goto out;
419 } 406 }
420 407
@@ -434,9 +421,9 @@ int ocfs2_write_super_or_backup(struct ocfs2_super *osb,
434 if (!buffer_uptodate(bh)) { 421 if (!buffer_uptodate(bh)) {
435 ret = -EIO; 422 ret = -EIO;
436 put_bh(bh); 423 put_bh(bh);
424 mlog_errno(ret);
437 } 425 }
438 426
439out: 427out:
440 mlog_exit(ret);
441 return ret; 428 return ret;
442} 429}
diff --git a/fs/ocfs2/cluster/heartbeat.c b/fs/ocfs2/cluster/heartbeat.c
index 1adab287bd24..2461eb3272ed 100644
--- a/fs/ocfs2/cluster/heartbeat.c
+++ b/fs/ocfs2/cluster/heartbeat.c
@@ -1654,8 +1654,6 @@ static int o2hb_populate_slot_data(struct o2hb_region *reg)
1654 struct o2hb_disk_slot *slot; 1654 struct o2hb_disk_slot *slot;
1655 struct o2hb_disk_heartbeat_block *hb_block; 1655 struct o2hb_disk_heartbeat_block *hb_block;
1656 1656
1657 mlog_entry_void();
1658
1659 ret = o2hb_read_slots(reg, reg->hr_blocks); 1657 ret = o2hb_read_slots(reg, reg->hr_blocks);
1660 if (ret) { 1658 if (ret) {
1661 mlog_errno(ret); 1659 mlog_errno(ret);
@@ -1677,7 +1675,6 @@ static int o2hb_populate_slot_data(struct o2hb_region *reg)
1677 } 1675 }
1678 1676
1679out: 1677out:
1680 mlog_exit(ret);
1681 return ret; 1678 return ret;
1682} 1679}
1683 1680
diff --git a/fs/ocfs2/cluster/masklog.c b/fs/ocfs2/cluster/masklog.c
index 6c61771469af..07ac24fd9252 100644
--- a/fs/ocfs2/cluster/masklog.c
+++ b/fs/ocfs2/cluster/masklog.c
@@ -30,7 +30,7 @@
30 30
31struct mlog_bits mlog_and_bits = MLOG_BITS_RHS(MLOG_INITIAL_AND_MASK); 31struct mlog_bits mlog_and_bits = MLOG_BITS_RHS(MLOG_INITIAL_AND_MASK);
32EXPORT_SYMBOL_GPL(mlog_and_bits); 32EXPORT_SYMBOL_GPL(mlog_and_bits);
33struct mlog_bits mlog_not_bits = MLOG_BITS_RHS(MLOG_INITIAL_NOT_MASK); 33struct mlog_bits mlog_not_bits = MLOG_BITS_RHS(0);
34EXPORT_SYMBOL_GPL(mlog_not_bits); 34EXPORT_SYMBOL_GPL(mlog_not_bits);
35 35
36static ssize_t mlog_mask_show(u64 mask, char *buf) 36static ssize_t mlog_mask_show(u64 mask, char *buf)
@@ -80,8 +80,6 @@ struct mlog_attribute {
80} 80}
81 81
82static struct mlog_attribute mlog_attrs[MLOG_MAX_BITS] = { 82static struct mlog_attribute mlog_attrs[MLOG_MAX_BITS] = {
83 define_mask(ENTRY),
84 define_mask(EXIT),
85 define_mask(TCP), 83 define_mask(TCP),
86 define_mask(MSG), 84 define_mask(MSG),
87 define_mask(SOCKET), 85 define_mask(SOCKET),
@@ -93,27 +91,11 @@ static struct mlog_attribute mlog_attrs[MLOG_MAX_BITS] = {
93 define_mask(DLM_THREAD), 91 define_mask(DLM_THREAD),
94 define_mask(DLM_MASTER), 92 define_mask(DLM_MASTER),
95 define_mask(DLM_RECOVERY), 93 define_mask(DLM_RECOVERY),
96 define_mask(AIO),
97 define_mask(JOURNAL),
98 define_mask(DISK_ALLOC),
99 define_mask(SUPER),
100 define_mask(FILE_IO),
101 define_mask(EXTENT_MAP),
102 define_mask(DLM_GLUE), 94 define_mask(DLM_GLUE),
103 define_mask(BH_IO),
104 define_mask(UPTODATE),
105 define_mask(NAMEI),
106 define_mask(INODE),
107 define_mask(VOTE), 95 define_mask(VOTE),
108 define_mask(DCACHE),
109 define_mask(CONN), 96 define_mask(CONN),
110 define_mask(QUORUM), 97 define_mask(QUORUM),
111 define_mask(EXPORT),
112 define_mask(XATTR),
113 define_mask(QUOTA),
114 define_mask(REFCOUNT),
115 define_mask(BASTS), 98 define_mask(BASTS),
116 define_mask(RESERVATIONS),
117 define_mask(CLUSTER), 99 define_mask(CLUSTER),
118 define_mask(ERROR), 100 define_mask(ERROR),
119 define_mask(NOTICE), 101 define_mask(NOTICE),
diff --git a/fs/ocfs2/cluster/masklog.h b/fs/ocfs2/cluster/masklog.h
index 34d6544357d9..baa2b9ef7eef 100644
--- a/fs/ocfs2/cluster/masklog.h
+++ b/fs/ocfs2/cluster/masklog.h
@@ -82,41 +82,23 @@
82 82
83/* bits that are frequently given and infrequently matched in the low word */ 83/* bits that are frequently given and infrequently matched in the low word */
84/* NOTE: If you add a flag, you need to also update masklog.c! */ 84/* NOTE: If you add a flag, you need to also update masklog.c! */
85#define ML_ENTRY 0x0000000000000001ULL /* func call entry */ 85#define ML_TCP 0x0000000000000001ULL /* net cluster/tcp.c */
86#define ML_EXIT 0x0000000000000002ULL /* func call exit */ 86#define ML_MSG 0x0000000000000002ULL /* net network messages */
87#define ML_TCP 0x0000000000000004ULL /* net cluster/tcp.c */ 87#define ML_SOCKET 0x0000000000000004ULL /* net socket lifetime */
88#define ML_MSG 0x0000000000000008ULL /* net network messages */ 88#define ML_HEARTBEAT 0x0000000000000008ULL /* hb all heartbeat tracking */
89#define ML_SOCKET 0x0000000000000010ULL /* net socket lifetime */ 89#define ML_HB_BIO 0x0000000000000010ULL /* hb io tracing */
90#define ML_HEARTBEAT 0x0000000000000020ULL /* hb all heartbeat tracking */ 90#define ML_DLMFS 0x0000000000000020ULL /* dlm user dlmfs */
91#define ML_HB_BIO 0x0000000000000040ULL /* hb io tracing */ 91#define ML_DLM 0x0000000000000040ULL /* dlm general debugging */
92#define ML_DLMFS 0x0000000000000080ULL /* dlm user dlmfs */ 92#define ML_DLM_DOMAIN 0x0000000000000080ULL /* dlm domain debugging */
93#define ML_DLM 0x0000000000000100ULL /* dlm general debugging */ 93#define ML_DLM_THREAD 0x0000000000000100ULL /* dlm domain thread */
94#define ML_DLM_DOMAIN 0x0000000000000200ULL /* dlm domain debugging */ 94#define ML_DLM_MASTER 0x0000000000000200ULL /* dlm master functions */
95#define ML_DLM_THREAD 0x0000000000000400ULL /* dlm domain thread */ 95#define ML_DLM_RECOVERY 0x0000000000000400ULL /* dlm master functions */
96#define ML_DLM_MASTER 0x0000000000000800ULL /* dlm master functions */ 96#define ML_DLM_GLUE 0x0000000000000800ULL /* ocfs2 dlm glue layer */
97#define ML_DLM_RECOVERY 0x0000000000001000ULL /* dlm master functions */ 97#define ML_VOTE 0x0000000000001000ULL /* ocfs2 node messaging */
98#define ML_AIO 0x0000000000002000ULL /* ocfs2 aio read and write */ 98#define ML_CONN 0x0000000000002000ULL /* net connection management */
99#define ML_JOURNAL 0x0000000000004000ULL /* ocfs2 journalling functions */ 99#define ML_QUORUM 0x0000000000004000ULL /* net connection quorum */
100#define ML_DISK_ALLOC 0x0000000000008000ULL /* ocfs2 disk allocation */ 100#define ML_BASTS 0x0000000000008000ULL /* dlmglue asts and basts */
101#define ML_SUPER 0x0000000000010000ULL /* ocfs2 mount / umount */ 101#define ML_CLUSTER 0x0000000000010000ULL /* cluster stack */
102#define ML_FILE_IO 0x0000000000020000ULL /* ocfs2 file I/O */
103#define ML_EXTENT_MAP 0x0000000000040000ULL /* ocfs2 extent map caching */
104#define ML_DLM_GLUE 0x0000000000080000ULL /* ocfs2 dlm glue layer */
105#define ML_BH_IO 0x0000000000100000ULL /* ocfs2 buffer I/O */
106#define ML_UPTODATE 0x0000000000200000ULL /* ocfs2 caching sequence #'s */
107#define ML_NAMEI 0x0000000000400000ULL /* ocfs2 directory / namespace */
108#define ML_INODE 0x0000000000800000ULL /* ocfs2 inode manipulation */
109#define ML_VOTE 0x0000000001000000ULL /* ocfs2 node messaging */
110#define ML_DCACHE 0x0000000002000000ULL /* ocfs2 dcache operations */
111#define ML_CONN 0x0000000004000000ULL /* net connection management */
112#define ML_QUORUM 0x0000000008000000ULL /* net connection quorum */
113#define ML_EXPORT 0x0000000010000000ULL /* ocfs2 export operations */
114#define ML_XATTR 0x0000000020000000ULL /* ocfs2 extended attributes */
115#define ML_QUOTA 0x0000000040000000ULL /* ocfs2 quota operations */
116#define ML_REFCOUNT 0x0000000080000000ULL /* refcount tree operations */
117#define ML_BASTS 0x0000000100000000ULL /* dlmglue asts and basts */
118#define ML_RESERVATIONS 0x0000000200000000ULL /* ocfs2 alloc reservations */
119#define ML_CLUSTER 0x0000000400000000ULL /* cluster stack */
120 102
121/* bits that are infrequently given and frequently matched in the high word */ 103/* bits that are infrequently given and frequently matched in the high word */
122#define ML_ERROR 0x1000000000000000ULL /* sent to KERN_ERR */ 104#define ML_ERROR 0x1000000000000000ULL /* sent to KERN_ERR */
@@ -124,7 +106,6 @@
124#define ML_KTHREAD 0x4000000000000000ULL /* kernel thread activity */ 106#define ML_KTHREAD 0x4000000000000000ULL /* kernel thread activity */
125 107
126#define MLOG_INITIAL_AND_MASK (ML_ERROR|ML_NOTICE) 108#define MLOG_INITIAL_AND_MASK (ML_ERROR|ML_NOTICE)
127#define MLOG_INITIAL_NOT_MASK (ML_ENTRY|ML_EXIT)
128#ifndef MLOG_MASK_PREFIX 109#ifndef MLOG_MASK_PREFIX
129#define MLOG_MASK_PREFIX 0 110#define MLOG_MASK_PREFIX 0
130#endif 111#endif
@@ -222,58 +203,6 @@ extern struct mlog_bits mlog_and_bits, mlog_not_bits;
222 mlog(ML_ERROR, "status = %lld\n", (long long)_st); \ 203 mlog(ML_ERROR, "status = %lld\n", (long long)_st); \
223} while (0) 204} while (0)
224 205
225#if defined(CONFIG_OCFS2_DEBUG_MASKLOG)
226#define mlog_entry(fmt, args...) do { \
227 mlog(ML_ENTRY, "ENTRY:" fmt , ##args); \
228} while (0)
229
230#define mlog_entry_void() do { \
231 mlog(ML_ENTRY, "ENTRY:\n"); \
232} while (0)
233
234/*
235 * We disable this for sparse.
236 */
237#if !defined(__CHECKER__)
238#define mlog_exit(st) do { \
239 if (__builtin_types_compatible_p(typeof(st), unsigned long)) \
240 mlog(ML_EXIT, "EXIT: %lu\n", (unsigned long) (st)); \
241 else if (__builtin_types_compatible_p(typeof(st), signed long)) \
242 mlog(ML_EXIT, "EXIT: %ld\n", (signed long) (st)); \
243 else if (__builtin_types_compatible_p(typeof(st), unsigned int) \
244 || __builtin_types_compatible_p(typeof(st), unsigned short) \
245 || __builtin_types_compatible_p(typeof(st), unsigned char)) \
246 mlog(ML_EXIT, "EXIT: %u\n", (unsigned int) (st)); \
247 else if (__builtin_types_compatible_p(typeof(st), signed int) \
248 || __builtin_types_compatible_p(typeof(st), signed short) \
249 || __builtin_types_compatible_p(typeof(st), signed char)) \
250 mlog(ML_EXIT, "EXIT: %d\n", (signed int) (st)); \
251 else if (__builtin_types_compatible_p(typeof(st), long long)) \
252 mlog(ML_EXIT, "EXIT: %lld\n", (long long) (st)); \
253 else \
254 mlog(ML_EXIT, "EXIT: %llu\n", (unsigned long long) (st)); \
255} while (0)
256#else
257#define mlog_exit(st) do { \
258 mlog(ML_EXIT, "EXIT: %lld\n", (long long) (st)); \
259} while (0)
260#endif
261
262#define mlog_exit_ptr(ptr) do { \
263 mlog(ML_EXIT, "EXIT: %p\n", ptr); \
264} while (0)
265
266#define mlog_exit_void() do { \
267 mlog(ML_EXIT, "EXIT\n"); \
268} while (0)
269#else
270#define mlog_entry(...) do { } while (0)
271#define mlog_entry_void(...) do { } while (0)
272#define mlog_exit(...) do { } while (0)
273#define mlog_exit_ptr(...) do { } while (0)
274#define mlog_exit_void(...) do { } while (0)
275#endif /* defined(CONFIG_OCFS2_DEBUG_MASKLOG) */
276
277#define mlog_bug_on_msg(cond, fmt, args...) do { \ 206#define mlog_bug_on_msg(cond, fmt, args...) do { \
278 if (cond) { \ 207 if (cond) { \
279 mlog(ML_ERROR, "bug expression: " #cond "\n"); \ 208 mlog(ML_ERROR, "bug expression: " #cond "\n"); \
diff --git a/fs/ocfs2/cluster/tcp.c b/fs/ocfs2/cluster/tcp.c
index 3b11cb1e38fc..ee04ff5ee603 100644
--- a/fs/ocfs2/cluster/tcp.c
+++ b/fs/ocfs2/cluster/tcp.c
@@ -210,10 +210,6 @@ static inline void o2net_set_func_stop_time(struct o2net_sock_container *sc)
210 sc->sc_tv_func_stop = ktime_get(); 210 sc->sc_tv_func_stop = ktime_get();
211} 211}
212 212
213static ktime_t o2net_get_func_run_time(struct o2net_sock_container *sc)
214{
215 return ktime_sub(sc->sc_tv_func_stop, sc->sc_tv_func_start);
216}
217#else /* CONFIG_DEBUG_FS */ 213#else /* CONFIG_DEBUG_FS */
218# define o2net_init_nst(a, b, c, d, e) 214# define o2net_init_nst(a, b, c, d, e)
219# define o2net_set_nst_sock_time(a) 215# define o2net_set_nst_sock_time(a)
@@ -227,10 +223,14 @@ static ktime_t o2net_get_func_run_time(struct o2net_sock_container *sc)
227# define o2net_set_advance_stop_time(a) 223# define o2net_set_advance_stop_time(a)
228# define o2net_set_func_start_time(a) 224# define o2net_set_func_start_time(a)
229# define o2net_set_func_stop_time(a) 225# define o2net_set_func_stop_time(a)
230# define o2net_get_func_run_time(a) (ktime_t)0
231#endif /* CONFIG_DEBUG_FS */ 226#endif /* CONFIG_DEBUG_FS */
232 227
233#ifdef CONFIG_OCFS2_FS_STATS 228#ifdef CONFIG_OCFS2_FS_STATS
229static ktime_t o2net_get_func_run_time(struct o2net_sock_container *sc)
230{
231 return ktime_sub(sc->sc_tv_func_stop, sc->sc_tv_func_start);
232}
233
234static void o2net_update_send_stats(struct o2net_send_tracking *nst, 234static void o2net_update_send_stats(struct o2net_send_tracking *nst,
235 struct o2net_sock_container *sc) 235 struct o2net_sock_container *sc)
236{ 236{
diff --git a/fs/ocfs2/dcache.c b/fs/ocfs2/dcache.c
index 7eb90403fc8a..e5ba34818332 100644
--- a/fs/ocfs2/dcache.c
+++ b/fs/ocfs2/dcache.c
@@ -28,7 +28,6 @@
28#include <linux/slab.h> 28#include <linux/slab.h>
29#include <linux/namei.h> 29#include <linux/namei.h>
30 30
31#define MLOG_MASK_PREFIX ML_DCACHE
32#include <cluster/masklog.h> 31#include <cluster/masklog.h>
33 32
34#include "ocfs2.h" 33#include "ocfs2.h"
@@ -39,6 +38,7 @@
39#include "file.h" 38#include "file.h"
40#include "inode.h" 39#include "inode.h"
41#include "super.h" 40#include "super.h"
41#include "ocfs2_trace.h"
42 42
43void ocfs2_dentry_attach_gen(struct dentry *dentry) 43void ocfs2_dentry_attach_gen(struct dentry *dentry)
44{ 44{
@@ -62,8 +62,8 @@ static int ocfs2_dentry_revalidate(struct dentry *dentry,
62 inode = dentry->d_inode; 62 inode = dentry->d_inode;
63 osb = OCFS2_SB(dentry->d_sb); 63 osb = OCFS2_SB(dentry->d_sb);
64 64
65 mlog_entry("(0x%p, '%.*s')\n", dentry, 65 trace_ocfs2_dentry_revalidate(dentry, dentry->d_name.len,
66 dentry->d_name.len, dentry->d_name.name); 66 dentry->d_name.name);
67 67
68 /* For a negative dentry - 68 /* For a negative dentry -
69 * check the generation number of the parent and compare with the 69 * check the generation number of the parent and compare with the
@@ -73,9 +73,10 @@ static int ocfs2_dentry_revalidate(struct dentry *dentry,
73 unsigned long gen = (unsigned long) dentry->d_fsdata; 73 unsigned long gen = (unsigned long) dentry->d_fsdata;
74 unsigned long pgen = 74 unsigned long pgen =
75 OCFS2_I(dentry->d_parent->d_inode)->ip_dir_lock_gen; 75 OCFS2_I(dentry->d_parent->d_inode)->ip_dir_lock_gen;
76 mlog(0, "negative dentry: %.*s parent gen: %lu " 76
77 "dentry gen: %lu\n", 77 trace_ocfs2_dentry_revalidate_negative(dentry->d_name.len,
78 dentry->d_name.len, dentry->d_name.name, pgen, gen); 78 dentry->d_name.name,
79 pgen, gen);
79 if (gen != pgen) 80 if (gen != pgen)
80 goto bail; 81 goto bail;
81 goto valid; 82 goto valid;
@@ -90,8 +91,8 @@ static int ocfs2_dentry_revalidate(struct dentry *dentry,
90 /* did we or someone else delete this inode? */ 91 /* did we or someone else delete this inode? */
91 if (OCFS2_I(inode)->ip_flags & OCFS2_INODE_DELETED) { 92 if (OCFS2_I(inode)->ip_flags & OCFS2_INODE_DELETED) {
92 spin_unlock(&OCFS2_I(inode)->ip_lock); 93 spin_unlock(&OCFS2_I(inode)->ip_lock);
93 mlog(0, "inode (%llu) deleted, returning false\n", 94 trace_ocfs2_dentry_revalidate_delete(
94 (unsigned long long)OCFS2_I(inode)->ip_blkno); 95 (unsigned long long)OCFS2_I(inode)->ip_blkno);
95 goto bail; 96 goto bail;
96 } 97 }
97 spin_unlock(&OCFS2_I(inode)->ip_lock); 98 spin_unlock(&OCFS2_I(inode)->ip_lock);
@@ -101,10 +102,9 @@ static int ocfs2_dentry_revalidate(struct dentry *dentry,
101 * inode nlink hits zero, it never goes back. 102 * inode nlink hits zero, it never goes back.
102 */ 103 */
103 if (inode->i_nlink == 0) { 104 if (inode->i_nlink == 0) {
104 mlog(0, "Inode %llu orphaned, returning false " 105 trace_ocfs2_dentry_revalidate_orphaned(
105 "dir = %d\n", 106 (unsigned long long)OCFS2_I(inode)->ip_blkno,
106 (unsigned long long)OCFS2_I(inode)->ip_blkno, 107 S_ISDIR(inode->i_mode));
107 S_ISDIR(inode->i_mode));
108 goto bail; 108 goto bail;
109 } 109 }
110 110
@@ -113,9 +113,8 @@ static int ocfs2_dentry_revalidate(struct dentry *dentry,
113 * redo it. 113 * redo it.
114 */ 114 */
115 if (!dentry->d_fsdata) { 115 if (!dentry->d_fsdata) {
116 mlog(0, "Inode %llu doesn't have dentry lock, " 116 trace_ocfs2_dentry_revalidate_nofsdata(
117 "returning false\n", 117 (unsigned long long)OCFS2_I(inode)->ip_blkno);
118 (unsigned long long)OCFS2_I(inode)->ip_blkno);
119 goto bail; 118 goto bail;
120 } 119 }
121 120
@@ -123,8 +122,7 @@ valid:
123 ret = 1; 122 ret = 1;
124 123
125bail: 124bail:
126 mlog_exit(ret); 125 trace_ocfs2_dentry_revalidate_ret(ret);
127
128 return ret; 126 return ret;
129} 127}
130 128
@@ -181,8 +179,8 @@ struct dentry *ocfs2_find_local_alias(struct inode *inode,
181 179
182 spin_lock(&dentry->d_lock); 180 spin_lock(&dentry->d_lock);
183 if (ocfs2_match_dentry(dentry, parent_blkno, skip_unhashed)) { 181 if (ocfs2_match_dentry(dentry, parent_blkno, skip_unhashed)) {
184 mlog(0, "dentry found: %.*s\n", 182 trace_ocfs2_find_local_alias(dentry->d_name.len,
185 dentry->d_name.len, dentry->d_name.name); 183 dentry->d_name.name);
186 184
187 dget_dlock(dentry); 185 dget_dlock(dentry);
188 spin_unlock(&dentry->d_lock); 186 spin_unlock(&dentry->d_lock);
@@ -240,9 +238,8 @@ int ocfs2_dentry_attach_lock(struct dentry *dentry,
240 struct dentry *alias; 238 struct dentry *alias;
241 struct ocfs2_dentry_lock *dl = dentry->d_fsdata; 239 struct ocfs2_dentry_lock *dl = dentry->d_fsdata;
242 240
243 mlog(0, "Attach \"%.*s\", parent %llu, fsdata: %p\n", 241 trace_ocfs2_dentry_attach_lock(dentry->d_name.len, dentry->d_name.name,
244 dentry->d_name.len, dentry->d_name.name, 242 (unsigned long long)parent_blkno, dl);
245 (unsigned long long)parent_blkno, dl);
246 243
247 /* 244 /*
248 * Negative dentry. We ignore these for now. 245 * Negative dentry. We ignore these for now.
@@ -292,7 +289,9 @@ int ocfs2_dentry_attach_lock(struct dentry *dentry,
292 (unsigned long long)parent_blkno, 289 (unsigned long long)parent_blkno,
293 (unsigned long long)dl->dl_parent_blkno); 290 (unsigned long long)dl->dl_parent_blkno);
294 291
295 mlog(0, "Found: %s\n", dl->dl_lockres.l_name); 292 trace_ocfs2_dentry_attach_lock_found(dl->dl_lockres.l_name,
293 (unsigned long long)parent_blkno,
294 (unsigned long long)OCFS2_I(inode)->ip_blkno);
296 295
297 goto out_attach; 296 goto out_attach;
298 } 297 }
diff --git a/fs/ocfs2/dir.c b/fs/ocfs2/dir.c
index f97b6f1c61dd..9fe5b8fd658f 100644
--- a/fs/ocfs2/dir.c
+++ b/fs/ocfs2/dir.c
@@ -43,7 +43,6 @@
43#include <linux/quotaops.h> 43#include <linux/quotaops.h>
44#include <linux/sort.h> 44#include <linux/sort.h>
45 45
46#define MLOG_MASK_PREFIX ML_NAMEI
47#include <cluster/masklog.h> 46#include <cluster/masklog.h>
48 47
49#include "ocfs2.h" 48#include "ocfs2.h"
@@ -61,6 +60,7 @@
61#include "super.h" 60#include "super.h"
62#include "sysfile.h" 61#include "sysfile.h"
63#include "uptodate.h" 62#include "uptodate.h"
63#include "ocfs2_trace.h"
64 64
65#include "buffer_head_io.h" 65#include "buffer_head_io.h"
66 66
@@ -322,21 +322,23 @@ static int ocfs2_check_dir_entry(struct inode * dir,
322 const char *error_msg = NULL; 322 const char *error_msg = NULL;
323 const int rlen = le16_to_cpu(de->rec_len); 323 const int rlen = le16_to_cpu(de->rec_len);
324 324
325 if (rlen < OCFS2_DIR_REC_LEN(1)) 325 if (unlikely(rlen < OCFS2_DIR_REC_LEN(1)))
326 error_msg = "rec_len is smaller than minimal"; 326 error_msg = "rec_len is smaller than minimal";
327 else if (rlen % 4 != 0) 327 else if (unlikely(rlen % 4 != 0))
328 error_msg = "rec_len % 4 != 0"; 328 error_msg = "rec_len % 4 != 0";
329 else if (rlen < OCFS2_DIR_REC_LEN(de->name_len)) 329 else if (unlikely(rlen < OCFS2_DIR_REC_LEN(de->name_len)))
330 error_msg = "rec_len is too small for name_len"; 330 error_msg = "rec_len is too small for name_len";
331 else if (((char *) de - bh->b_data) + rlen > dir->i_sb->s_blocksize) 331 else if (unlikely(
332 ((char *) de - bh->b_data) + rlen > dir->i_sb->s_blocksize))
332 error_msg = "directory entry across blocks"; 333 error_msg = "directory entry across blocks";
333 334
334 if (error_msg != NULL) 335 if (unlikely(error_msg != NULL))
335 mlog(ML_ERROR, "bad entry in directory #%llu: %s - " 336 mlog(ML_ERROR, "bad entry in directory #%llu: %s - "
336 "offset=%lu, inode=%llu, rec_len=%d, name_len=%d\n", 337 "offset=%lu, inode=%llu, rec_len=%d, name_len=%d\n",
337 (unsigned long long)OCFS2_I(dir)->ip_blkno, error_msg, 338 (unsigned long long)OCFS2_I(dir)->ip_blkno, error_msg,
338 offset, (unsigned long long)le64_to_cpu(de->inode), rlen, 339 offset, (unsigned long long)le64_to_cpu(de->inode), rlen,
339 de->name_len); 340 de->name_len);
341
340 return error_msg == NULL ? 1 : 0; 342 return error_msg == NULL ? 1 : 0;
341} 343}
342 344
@@ -367,8 +369,6 @@ static inline int ocfs2_search_dirblock(struct buffer_head *bh,
367 int de_len; 369 int de_len;
368 int ret = 0; 370 int ret = 0;
369 371
370 mlog_entry_void();
371
372 de_buf = first_de; 372 de_buf = first_de;
373 dlimit = de_buf + bytes; 373 dlimit = de_buf + bytes;
374 374
@@ -402,7 +402,7 @@ static inline int ocfs2_search_dirblock(struct buffer_head *bh,
402 } 402 }
403 403
404bail: 404bail:
405 mlog_exit(ret); 405 trace_ocfs2_search_dirblock(ret);
406 return ret; 406 return ret;
407} 407}
408 408
@@ -447,8 +447,7 @@ static int ocfs2_validate_dir_block(struct super_block *sb,
447 * We don't validate dirents here, that's handled 447 * We don't validate dirents here, that's handled
448 * in-place when the code walks them. 448 * in-place when the code walks them.
449 */ 449 */
450 mlog(0, "Validating dirblock %llu\n", 450 trace_ocfs2_validate_dir_block((unsigned long long)bh->b_blocknr);
451 (unsigned long long)bh->b_blocknr);
452 451
453 BUG_ON(!buffer_uptodate(bh)); 452 BUG_ON(!buffer_uptodate(bh));
454 453
@@ -706,8 +705,6 @@ static struct buffer_head *ocfs2_find_entry_el(const char *name, int namelen,
706 int num = 0; 705 int num = 0;
707 int nblocks, i, err; 706 int nblocks, i, err;
708 707
709 mlog_entry_void();
710
711 sb = dir->i_sb; 708 sb = dir->i_sb;
712 709
713 nblocks = i_size_read(dir) >> sb->s_blocksize_bits; 710 nblocks = i_size_read(dir) >> sb->s_blocksize_bits;
@@ -788,7 +785,7 @@ cleanup_and_exit:
788 for (; ra_ptr < ra_max; ra_ptr++) 785 for (; ra_ptr < ra_max; ra_ptr++)
789 brelse(bh_use[ra_ptr]); 786 brelse(bh_use[ra_ptr]);
790 787
791 mlog_exit_ptr(ret); 788 trace_ocfs2_find_entry_el(ret);
792 return ret; 789 return ret;
793} 790}
794 791
@@ -950,11 +947,9 @@ static int ocfs2_dx_dir_search(const char *name, int namelen,
950 goto out; 947 goto out;
951 } 948 }
952 949
953 mlog(0, "Dir %llu: name: \"%.*s\", lookup of hash: %u.0x%x " 950 trace_ocfs2_dx_dir_search((unsigned long long)OCFS2_I(dir)->ip_blkno,
954 "returns: %llu\n", 951 namelen, name, hinfo->major_hash,
955 (unsigned long long)OCFS2_I(dir)->ip_blkno, 952 hinfo->minor_hash, (unsigned long long)phys);
956 namelen, name, hinfo->major_hash, hinfo->minor_hash,
957 (unsigned long long)phys);
958 953
959 ret = ocfs2_read_dx_leaf(dir, phys, &dx_leaf_bh); 954 ret = ocfs2_read_dx_leaf(dir, phys, &dx_leaf_bh);
960 if (ret) { 955 if (ret) {
@@ -964,9 +959,9 @@ static int ocfs2_dx_dir_search(const char *name, int namelen,
964 959
965 dx_leaf = (struct ocfs2_dx_leaf *) dx_leaf_bh->b_data; 960 dx_leaf = (struct ocfs2_dx_leaf *) dx_leaf_bh->b_data;
966 961
967 mlog(0, "leaf info: num_used: %d, count: %d\n", 962 trace_ocfs2_dx_dir_search_leaf_info(
968 le16_to_cpu(dx_leaf->dl_list.de_num_used), 963 le16_to_cpu(dx_leaf->dl_list.de_num_used),
969 le16_to_cpu(dx_leaf->dl_list.de_count)); 964 le16_to_cpu(dx_leaf->dl_list.de_count));
970 965
971 entry_list = &dx_leaf->dl_list; 966 entry_list = &dx_leaf->dl_list;
972 967
@@ -1166,8 +1161,6 @@ static int __ocfs2_delete_entry(handle_t *handle, struct inode *dir,
1166 int i, status = -ENOENT; 1161 int i, status = -ENOENT;
1167 ocfs2_journal_access_func access = ocfs2_journal_access_db; 1162 ocfs2_journal_access_func access = ocfs2_journal_access_db;
1168 1163
1169 mlog_entry("(0x%p, 0x%p, 0x%p, 0x%p)\n", handle, dir, de_del, bh);
1170
1171 if (OCFS2_I(dir)->ip_dyn_features & OCFS2_INLINE_DATA_FL) 1164 if (OCFS2_I(dir)->ip_dyn_features & OCFS2_INLINE_DATA_FL)
1172 access = ocfs2_journal_access_di; 1165 access = ocfs2_journal_access_di;
1173 1166
@@ -1202,7 +1195,6 @@ static int __ocfs2_delete_entry(handle_t *handle, struct inode *dir,
1202 de = (struct ocfs2_dir_entry *)((char *)de + le16_to_cpu(de->rec_len)); 1195 de = (struct ocfs2_dir_entry *)((char *)de + le16_to_cpu(de->rec_len));
1203 } 1196 }
1204bail: 1197bail:
1205 mlog_exit(status);
1206 return status; 1198 return status;
1207} 1199}
1208 1200
@@ -1348,8 +1340,8 @@ static int ocfs2_delete_entry_dx(handle_t *handle, struct inode *dir,
1348 } 1340 }
1349 } 1341 }
1350 1342
1351 mlog(0, "Dir %llu: delete entry at index: %d\n", 1343 trace_ocfs2_delete_entry_dx((unsigned long long)OCFS2_I(dir)->ip_blkno,
1352 (unsigned long long)OCFS2_I(dir)->ip_blkno, index); 1344 index);
1353 1345
1354 ret = __ocfs2_delete_entry(handle, dir, lookup->dl_entry, 1346 ret = __ocfs2_delete_entry(handle, dir, lookup->dl_entry,
1355 leaf_bh, leaf_bh->b_data, leaf_bh->b_size); 1347 leaf_bh, leaf_bh->b_data, leaf_bh->b_size);
@@ -1632,8 +1624,6 @@ int __ocfs2_add_entry(handle_t *handle,
1632 struct buffer_head *insert_bh = lookup->dl_leaf_bh; 1624 struct buffer_head *insert_bh = lookup->dl_leaf_bh;
1633 char *data_start = insert_bh->b_data; 1625 char *data_start = insert_bh->b_data;
1634 1626
1635 mlog_entry_void();
1636
1637 if (!namelen) 1627 if (!namelen)
1638 return -EINVAL; 1628 return -EINVAL;
1639 1629
@@ -1765,8 +1755,9 @@ int __ocfs2_add_entry(handle_t *handle,
1765 * from ever getting here. */ 1755 * from ever getting here. */
1766 retval = -ENOSPC; 1756 retval = -ENOSPC;
1767bail: 1757bail:
1758 if (retval)
1759 mlog_errno(retval);
1768 1760
1769 mlog_exit(retval);
1770 return retval; 1761 return retval;
1771} 1762}
1772 1763
@@ -2028,8 +2019,7 @@ int ocfs2_readdir(struct file * filp, void * dirent, filldir_t filldir)
2028 struct inode *inode = filp->f_path.dentry->d_inode; 2019 struct inode *inode = filp->f_path.dentry->d_inode;
2029 int lock_level = 0; 2020 int lock_level = 0;
2030 2021
2031 mlog_entry("dirino=%llu\n", 2022 trace_ocfs2_readdir((unsigned long long)OCFS2_I(inode)->ip_blkno);
2032 (unsigned long long)OCFS2_I(inode)->ip_blkno);
2033 2023
2034 error = ocfs2_inode_lock_atime(inode, filp->f_vfsmnt, &lock_level); 2024 error = ocfs2_inode_lock_atime(inode, filp->f_vfsmnt, &lock_level);
2035 if (lock_level && error >= 0) { 2025 if (lock_level && error >= 0) {
@@ -2051,9 +2041,10 @@ int ocfs2_readdir(struct file * filp, void * dirent, filldir_t filldir)
2051 dirent, filldir, NULL); 2041 dirent, filldir, NULL);
2052 2042
2053 ocfs2_inode_unlock(inode, lock_level); 2043 ocfs2_inode_unlock(inode, lock_level);
2044 if (error)
2045 mlog_errno(error);
2054 2046
2055bail_nolock: 2047bail_nolock:
2056 mlog_exit(error);
2057 2048
2058 return error; 2049 return error;
2059} 2050}
@@ -2069,8 +2060,8 @@ int ocfs2_find_files_on_disk(const char *name,
2069{ 2060{
2070 int status = -ENOENT; 2061 int status = -ENOENT;
2071 2062
2072 mlog(0, "name=%.*s, blkno=%p, inode=%llu\n", namelen, name, blkno, 2063 trace_ocfs2_find_files_on_disk(namelen, name, blkno,
2073 (unsigned long long)OCFS2_I(inode)->ip_blkno); 2064 (unsigned long long)OCFS2_I(inode)->ip_blkno);
2074 2065
2075 status = ocfs2_find_entry(name, namelen, inode, lookup); 2066 status = ocfs2_find_entry(name, namelen, inode, lookup);
2076 if (status) 2067 if (status)
@@ -2114,8 +2105,8 @@ int ocfs2_check_dir_for_entry(struct inode *dir,
2114 int ret; 2105 int ret;
2115 struct ocfs2_dir_lookup_result lookup = { NULL, }; 2106 struct ocfs2_dir_lookup_result lookup = { NULL, };
2116 2107
2117 mlog_entry("dir %llu, name '%.*s'\n", 2108 trace_ocfs2_check_dir_for_entry(
2118 (unsigned long long)OCFS2_I(dir)->ip_blkno, namelen, name); 2109 (unsigned long long)OCFS2_I(dir)->ip_blkno, namelen, name);
2119 2110
2120 ret = -EEXIST; 2111 ret = -EEXIST;
2121 if (ocfs2_find_entry(name, namelen, dir, &lookup) == 0) 2112 if (ocfs2_find_entry(name, namelen, dir, &lookup) == 0)
@@ -2125,7 +2116,8 @@ int ocfs2_check_dir_for_entry(struct inode *dir,
2125bail: 2116bail:
2126 ocfs2_free_dir_lookup_result(&lookup); 2117 ocfs2_free_dir_lookup_result(&lookup);
2127 2118
2128 mlog_exit(ret); 2119 if (ret)
2120 mlog_errno(ret);
2129 return ret; 2121 return ret;
2130} 2122}
2131 2123
@@ -2324,8 +2316,6 @@ static int ocfs2_fill_new_dir_el(struct ocfs2_super *osb,
2324 struct buffer_head *new_bh = NULL; 2316 struct buffer_head *new_bh = NULL;
2325 struct ocfs2_dir_entry *de; 2317 struct ocfs2_dir_entry *de;
2326 2318
2327 mlog_entry_void();
2328
2329 if (ocfs2_new_dir_wants_trailer(inode)) 2319 if (ocfs2_new_dir_wants_trailer(inode))
2330 size = ocfs2_dir_trailer_blk_off(parent->i_sb); 2320 size = ocfs2_dir_trailer_blk_off(parent->i_sb);
2331 2321
@@ -2380,7 +2370,6 @@ static int ocfs2_fill_new_dir_el(struct ocfs2_super *osb,
2380bail: 2370bail:
2381 brelse(new_bh); 2371 brelse(new_bh);
2382 2372
2383 mlog_exit(status);
2384 return status; 2373 return status;
2385} 2374}
2386 2375
@@ -2409,9 +2398,9 @@ static int ocfs2_dx_dir_attach_index(struct ocfs2_super *osb,
2409 goto out; 2398 goto out;
2410 } 2399 }
2411 2400
2412 mlog(0, "Dir %llu, attach new index block: %llu\n", 2401 trace_ocfs2_dx_dir_attach_index(
2413 (unsigned long long)OCFS2_I(dir)->ip_blkno, 2402 (unsigned long long)OCFS2_I(dir)->ip_blkno,
2414 (unsigned long long)dr_blkno); 2403 (unsigned long long)dr_blkno);
2415 2404
2416 dx_root_bh = sb_getblk(osb->sb, dr_blkno); 2405 dx_root_bh = sb_getblk(osb->sb, dr_blkno);
2417 if (dx_root_bh == NULL) { 2406 if (dx_root_bh == NULL) {
@@ -2511,11 +2500,10 @@ static int ocfs2_dx_dir_format_cluster(struct ocfs2_super *osb,
2511 dx_leaf->dl_list.de_count = 2500 dx_leaf->dl_list.de_count =
2512 cpu_to_le16(ocfs2_dx_entries_per_leaf(osb->sb)); 2501 cpu_to_le16(ocfs2_dx_entries_per_leaf(osb->sb));
2513 2502
2514 mlog(0, 2503 trace_ocfs2_dx_dir_format_cluster(
2515 "Dir %llu, format dx_leaf: %llu, entry count: %u\n", 2504 (unsigned long long)OCFS2_I(dir)->ip_blkno,
2516 (unsigned long long)OCFS2_I(dir)->ip_blkno, 2505 (unsigned long long)bh->b_blocknr,
2517 (unsigned long long)bh->b_blocknr, 2506 le16_to_cpu(dx_leaf->dl_list.de_count));
2518 le16_to_cpu(dx_leaf->dl_list.de_count));
2519 2507
2520 ocfs2_journal_dirty(handle, bh); 2508 ocfs2_journal_dirty(handle, bh);
2521 } 2509 }
@@ -2759,12 +2747,11 @@ static void ocfs2_dx_dir_index_root_block(struct inode *dir,
2759 2747
2760 ocfs2_dx_dir_name_hash(dir, de->name, de->name_len, &hinfo); 2748 ocfs2_dx_dir_name_hash(dir, de->name, de->name_len, &hinfo);
2761 2749
2762 mlog(0, 2750 trace_ocfs2_dx_dir_index_root_block(
2763 "dir: %llu, major: 0x%x minor: 0x%x, index: %u, name: %.*s\n", 2751 (unsigned long long)dir->i_ino,
2764 (unsigned long long)dir->i_ino, hinfo.major_hash, 2752 hinfo.major_hash, hinfo.minor_hash,
2765 hinfo.minor_hash, 2753 de->name_len, de->name,
2766 le16_to_cpu(dx_root->dr_entries.de_num_used), 2754 le16_to_cpu(dx_root->dr_entries.de_num_used));
2767 de->name_len, de->name);
2768 2755
2769 ocfs2_dx_entry_list_insert(&dx_root->dr_entries, &hinfo, 2756 ocfs2_dx_entry_list_insert(&dx_root->dr_entries, &hinfo,
2770 dirent_blk); 2757 dirent_blk);
@@ -3235,7 +3222,6 @@ static int ocfs2_do_extend_dir(struct super_block *sb,
3235bail: 3222bail:
3236 if (did_quota && status < 0) 3223 if (did_quota && status < 0)
3237 dquot_free_space_nodirty(dir, ocfs2_clusters_to_bytes(sb, 1)); 3224 dquot_free_space_nodirty(dir, ocfs2_clusters_to_bytes(sb, 1));
3238 mlog_exit(status);
3239 return status; 3225 return status;
3240} 3226}
3241 3227
@@ -3270,8 +3256,6 @@ static int ocfs2_extend_dir(struct ocfs2_super *osb,
3270 struct ocfs2_extent_tree et; 3256 struct ocfs2_extent_tree et;
3271 struct buffer_head *dx_root_bh = lookup->dl_dx_root_bh; 3257 struct buffer_head *dx_root_bh = lookup->dl_dx_root_bh;
3272 3258
3273 mlog_entry_void();
3274
3275 if (OCFS2_I(dir)->ip_dyn_features & OCFS2_INLINE_DATA_FL) { 3259 if (OCFS2_I(dir)->ip_dyn_features & OCFS2_INLINE_DATA_FL) {
3276 /* 3260 /*
3277 * This would be a code error as an inline directory should 3261 * This would be a code error as an inline directory should
@@ -3320,8 +3304,8 @@ static int ocfs2_extend_dir(struct ocfs2_super *osb,
3320 down_write(&OCFS2_I(dir)->ip_alloc_sem); 3304 down_write(&OCFS2_I(dir)->ip_alloc_sem);
3321 drop_alloc_sem = 1; 3305 drop_alloc_sem = 1;
3322 dir_i_size = i_size_read(dir); 3306 dir_i_size = i_size_read(dir);
3323 mlog(0, "extending dir %llu (i_size = %lld)\n", 3307 trace_ocfs2_extend_dir((unsigned long long)OCFS2_I(dir)->ip_blkno,
3324 (unsigned long long)OCFS2_I(dir)->ip_blkno, dir_i_size); 3308 dir_i_size);
3325 3309
3326 /* dir->i_size is always block aligned. */ 3310 /* dir->i_size is always block aligned. */
3327 spin_lock(&OCFS2_I(dir)->ip_lock); 3311 spin_lock(&OCFS2_I(dir)->ip_lock);
@@ -3436,7 +3420,6 @@ bail:
3436 3420
3437 brelse(new_bh); 3421 brelse(new_bh);
3438 3422
3439 mlog_exit(status);
3440 return status; 3423 return status;
3441} 3424}
3442 3425
@@ -3583,8 +3566,9 @@ next:
3583 status = 0; 3566 status = 0;
3584bail: 3567bail:
3585 brelse(bh); 3568 brelse(bh);
3569 if (status)
3570 mlog_errno(status);
3586 3571
3587 mlog_exit(status);
3588 return status; 3572 return status;
3589} 3573}
3590 3574
@@ -3815,9 +3799,9 @@ static int ocfs2_dx_dir_rebalance(struct ocfs2_super *osb, struct inode *dir,
3815 struct ocfs2_dx_root_block *dx_root; 3799 struct ocfs2_dx_root_block *dx_root;
3816 struct ocfs2_dx_leaf *tmp_dx_leaf = NULL; 3800 struct ocfs2_dx_leaf *tmp_dx_leaf = NULL;
3817 3801
3818 mlog(0, "DX Dir: %llu, rebalance leaf leaf_blkno: %llu insert: %u\n", 3802 trace_ocfs2_dx_dir_rebalance((unsigned long long)OCFS2_I(dir)->ip_blkno,
3819 (unsigned long long)OCFS2_I(dir)->ip_blkno, 3803 (unsigned long long)leaf_blkno,
3820 (unsigned long long)leaf_blkno, insert_hash); 3804 insert_hash);
3821 3805
3822 ocfs2_init_dx_root_extent_tree(&et, INODE_CACHE(dir), dx_root_bh); 3806 ocfs2_init_dx_root_extent_tree(&et, INODE_CACHE(dir), dx_root_bh);
3823 3807
@@ -3897,8 +3881,7 @@ static int ocfs2_dx_dir_rebalance(struct ocfs2_super *osb, struct inode *dir,
3897 goto out_commit; 3881 goto out_commit;
3898 } 3882 }
3899 3883
3900 mlog(0, "Split leaf (%u) at %u, insert major hash is %u\n", 3884 trace_ocfs2_dx_dir_rebalance_split(leaf_cpos, split_hash, insert_hash);
3901 leaf_cpos, split_hash, insert_hash);
3902 3885
3903 /* 3886 /*
3904 * We have to carefully order operations here. There are items 3887 * We have to carefully order operations here. There are items
@@ -4355,8 +4338,8 @@ int ocfs2_prepare_dir_for_insert(struct ocfs2_super *osb,
4355 unsigned int blocks_wanted = 1; 4338 unsigned int blocks_wanted = 1;
4356 struct buffer_head *bh = NULL; 4339 struct buffer_head *bh = NULL;
4357 4340
4358 mlog(0, "getting ready to insert namelen %d into dir %llu\n", 4341 trace_ocfs2_prepare_dir_for_insert(
4359 namelen, (unsigned long long)OCFS2_I(dir)->ip_blkno); 4342 (unsigned long long)OCFS2_I(dir)->ip_blkno, namelen);
4360 4343
4361 if (!namelen) { 4344 if (!namelen) {
4362 ret = -EINVAL; 4345 ret = -EINVAL;
diff --git a/fs/ocfs2/dlm/dlmconvert.c b/fs/ocfs2/dlm/dlmconvert.c
index 9f30491e5e88..29a886d1e82c 100644
--- a/fs/ocfs2/dlm/dlmconvert.c
+++ b/fs/ocfs2/dlm/dlmconvert.c
@@ -128,8 +128,8 @@ static enum dlm_status __dlmconvert_master(struct dlm_ctxt *dlm,
128 128
129 assert_spin_locked(&res->spinlock); 129 assert_spin_locked(&res->spinlock);
130 130
131 mlog_entry("type=%d, convert_type=%d, new convert_type=%d\n", 131 mlog(0, "type=%d, convert_type=%d, new convert_type=%d\n",
132 lock->ml.type, lock->ml.convert_type, type); 132 lock->ml.type, lock->ml.convert_type, type);
133 133
134 spin_lock(&lock->spinlock); 134 spin_lock(&lock->spinlock);
135 135
@@ -353,7 +353,7 @@ static enum dlm_status dlm_send_remote_convert_request(struct dlm_ctxt *dlm,
353 struct kvec vec[2]; 353 struct kvec vec[2];
354 size_t veclen = 1; 354 size_t veclen = 1;
355 355
356 mlog_entry("%.*s\n", res->lockname.len, res->lockname.name); 356 mlog(0, "%.*s\n", res->lockname.len, res->lockname.name);
357 357
358 memset(&convert, 0, sizeof(struct dlm_convert_lock)); 358 memset(&convert, 0, sizeof(struct dlm_convert_lock));
359 convert.node_idx = dlm->node_num; 359 convert.node_idx = dlm->node_num;
diff --git a/fs/ocfs2/dlm/dlmdomain.c b/fs/ocfs2/dlm/dlmdomain.c
index 7e38a072d720..7540a492eaba 100644
--- a/fs/ocfs2/dlm/dlmdomain.c
+++ b/fs/ocfs2/dlm/dlmdomain.c
@@ -188,7 +188,7 @@ struct dlm_lock_resource * __dlm_lookup_lockres_full(struct dlm_ctxt *dlm,
188 struct hlist_head *bucket; 188 struct hlist_head *bucket;
189 struct hlist_node *list; 189 struct hlist_node *list;
190 190
191 mlog_entry("%.*s\n", len, name); 191 mlog(0, "%.*s\n", len, name);
192 192
193 assert_spin_locked(&dlm->spinlock); 193 assert_spin_locked(&dlm->spinlock);
194 194
@@ -222,7 +222,7 @@ struct dlm_lock_resource * __dlm_lookup_lockres(struct dlm_ctxt *dlm,
222{ 222{
223 struct dlm_lock_resource *res = NULL; 223 struct dlm_lock_resource *res = NULL;
224 224
225 mlog_entry("%.*s\n", len, name); 225 mlog(0, "%.*s\n", len, name);
226 226
227 assert_spin_locked(&dlm->spinlock); 227 assert_spin_locked(&dlm->spinlock);
228 228
@@ -531,7 +531,7 @@ static int dlm_exit_domain_handler(struct o2net_msg *msg, u32 len, void *data,
531 unsigned int node; 531 unsigned int node;
532 struct dlm_exit_domain *exit_msg = (struct dlm_exit_domain *) msg->buf; 532 struct dlm_exit_domain *exit_msg = (struct dlm_exit_domain *) msg->buf;
533 533
534 mlog_entry("%p %u %p", msg, len, data); 534 mlog(0, "%p %u %p", msg, len, data);
535 535
536 if (!dlm_grab(dlm)) 536 if (!dlm_grab(dlm))
537 return 0; 537 return 0;
@@ -926,9 +926,10 @@ static int dlm_assert_joined_handler(struct o2net_msg *msg, u32 len, void *data,
926} 926}
927 927
928static int dlm_match_regions(struct dlm_ctxt *dlm, 928static int dlm_match_regions(struct dlm_ctxt *dlm,
929 struct dlm_query_region *qr) 929 struct dlm_query_region *qr,
930 char *local, int locallen)
930{ 931{
931 char *local = NULL, *remote = qr->qr_regions; 932 char *remote = qr->qr_regions;
932 char *l, *r; 933 char *l, *r;
933 int localnr, i, j, foundit; 934 int localnr, i, j, foundit;
934 int status = 0; 935 int status = 0;
@@ -957,13 +958,8 @@ static int dlm_match_regions(struct dlm_ctxt *dlm,
957 r += O2HB_MAX_REGION_NAME_LEN; 958 r += O2HB_MAX_REGION_NAME_LEN;
958 } 959 }
959 960
960 local = kmalloc(sizeof(qr->qr_regions), GFP_ATOMIC); 961 localnr = min(O2NM_MAX_REGIONS, locallen/O2HB_MAX_REGION_NAME_LEN);
961 if (!local) { 962 localnr = o2hb_get_all_regions(local, (u8)localnr);
962 status = -ENOMEM;
963 goto bail;
964 }
965
966 localnr = o2hb_get_all_regions(local, O2NM_MAX_REGIONS);
967 963
968 /* compare local regions with remote */ 964 /* compare local regions with remote */
969 l = local; 965 l = local;
@@ -1012,8 +1008,6 @@ static int dlm_match_regions(struct dlm_ctxt *dlm,
1012 } 1008 }
1013 1009
1014bail: 1010bail:
1015 kfree(local);
1016
1017 return status; 1011 return status;
1018} 1012}
1019 1013
@@ -1075,6 +1069,7 @@ static int dlm_query_region_handler(struct o2net_msg *msg, u32 len,
1075{ 1069{
1076 struct dlm_query_region *qr; 1070 struct dlm_query_region *qr;
1077 struct dlm_ctxt *dlm = NULL; 1071 struct dlm_ctxt *dlm = NULL;
1072 char *local = NULL;
1078 int status = 0; 1073 int status = 0;
1079 int locked = 0; 1074 int locked = 0;
1080 1075
@@ -1083,6 +1078,13 @@ static int dlm_query_region_handler(struct o2net_msg *msg, u32 len,
1083 mlog(0, "Node %u queries hb regions on domain %s\n", qr->qr_node, 1078 mlog(0, "Node %u queries hb regions on domain %s\n", qr->qr_node,
1084 qr->qr_domain); 1079 qr->qr_domain);
1085 1080
1081 /* buffer used in dlm_mast_regions() */
1082 local = kmalloc(sizeof(qr->qr_regions), GFP_KERNEL);
1083 if (!local) {
1084 status = -ENOMEM;
1085 goto bail;
1086 }
1087
1086 status = -EINVAL; 1088 status = -EINVAL;
1087 1089
1088 spin_lock(&dlm_domain_lock); 1090 spin_lock(&dlm_domain_lock);
@@ -1112,13 +1114,15 @@ static int dlm_query_region_handler(struct o2net_msg *msg, u32 len,
1112 goto bail; 1114 goto bail;
1113 } 1115 }
1114 1116
1115 status = dlm_match_regions(dlm, qr); 1117 status = dlm_match_regions(dlm, qr, local, sizeof(qr->qr_regions));
1116 1118
1117bail: 1119bail:
1118 if (locked) 1120 if (locked)
1119 spin_unlock(&dlm->spinlock); 1121 spin_unlock(&dlm->spinlock);
1120 spin_unlock(&dlm_domain_lock); 1122 spin_unlock(&dlm_domain_lock);
1121 1123
1124 kfree(local);
1125
1122 return status; 1126 return status;
1123} 1127}
1124 1128
@@ -1553,7 +1557,7 @@ static int dlm_try_to_join_domain(struct dlm_ctxt *dlm)
1553 struct domain_join_ctxt *ctxt; 1557 struct domain_join_ctxt *ctxt;
1554 enum dlm_query_join_response_code response = JOIN_DISALLOW; 1558 enum dlm_query_join_response_code response = JOIN_DISALLOW;
1555 1559
1556 mlog_entry("%p", dlm); 1560 mlog(0, "%p", dlm);
1557 1561
1558 ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL); 1562 ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL);
1559 if (!ctxt) { 1563 if (!ctxt) {
diff --git a/fs/ocfs2/dlm/dlmlock.c b/fs/ocfs2/dlm/dlmlock.c
index 7009292aac5a..8d39e0fd66f7 100644
--- a/fs/ocfs2/dlm/dlmlock.c
+++ b/fs/ocfs2/dlm/dlmlock.c
@@ -128,7 +128,7 @@ static enum dlm_status dlmlock_master(struct dlm_ctxt *dlm,
128 int call_ast = 0, kick_thread = 0; 128 int call_ast = 0, kick_thread = 0;
129 enum dlm_status status = DLM_NORMAL; 129 enum dlm_status status = DLM_NORMAL;
130 130
131 mlog_entry("type=%d\n", lock->ml.type); 131 mlog(0, "type=%d\n", lock->ml.type);
132 132
133 spin_lock(&res->spinlock); 133 spin_lock(&res->spinlock);
134 /* if called from dlm_create_lock_handler, need to 134 /* if called from dlm_create_lock_handler, need to
@@ -227,8 +227,8 @@ static enum dlm_status dlmlock_remote(struct dlm_ctxt *dlm,
227 enum dlm_status status = DLM_DENIED; 227 enum dlm_status status = DLM_DENIED;
228 int lockres_changed = 1; 228 int lockres_changed = 1;
229 229
230 mlog_entry("type=%d\n", lock->ml.type); 230 mlog(0, "type=%d, lockres %.*s, flags = 0x%x\n",
231 mlog(0, "lockres %.*s, flags = 0x%x\n", res->lockname.len, 231 lock->ml.type, res->lockname.len,
232 res->lockname.name, flags); 232 res->lockname.name, flags);
233 233
234 spin_lock(&res->spinlock); 234 spin_lock(&res->spinlock);
@@ -308,8 +308,6 @@ static enum dlm_status dlm_send_remote_lock_request(struct dlm_ctxt *dlm,
308 int tmpret, status = 0; 308 int tmpret, status = 0;
309 enum dlm_status ret; 309 enum dlm_status ret;
310 310
311 mlog_entry_void();
312
313 memset(&create, 0, sizeof(create)); 311 memset(&create, 0, sizeof(create));
314 create.node_idx = dlm->node_num; 312 create.node_idx = dlm->node_num;
315 create.requested_type = lock->ml.type; 313 create.requested_type = lock->ml.type;
@@ -477,8 +475,6 @@ int dlm_create_lock_handler(struct o2net_msg *msg, u32 len, void *data,
477 475
478 BUG_ON(!dlm); 476 BUG_ON(!dlm);
479 477
480 mlog_entry_void();
481
482 if (!dlm_grab(dlm)) 478 if (!dlm_grab(dlm))
483 return DLM_REJECTED; 479 return DLM_REJECTED;
484 480
diff --git a/fs/ocfs2/dlm/dlmmaster.c b/fs/ocfs2/dlm/dlmmaster.c
index 59f0f6bdfc62..9d67610dfc74 100644
--- a/fs/ocfs2/dlm/dlmmaster.c
+++ b/fs/ocfs2/dlm/dlmmaster.c
@@ -426,8 +426,6 @@ static void dlm_mle_release(struct kref *kref)
426 struct dlm_master_list_entry *mle; 426 struct dlm_master_list_entry *mle;
427 struct dlm_ctxt *dlm; 427 struct dlm_ctxt *dlm;
428 428
429 mlog_entry_void();
430
431 mle = container_of(kref, struct dlm_master_list_entry, mle_refs); 429 mle = container_of(kref, struct dlm_master_list_entry, mle_refs);
432 dlm = mle->dlm; 430 dlm = mle->dlm;
433 431
@@ -3120,8 +3118,6 @@ static int dlm_add_migration_mle(struct dlm_ctxt *dlm,
3120 3118
3121 *oldmle = NULL; 3119 *oldmle = NULL;
3122 3120
3123 mlog_entry_void();
3124
3125 assert_spin_locked(&dlm->spinlock); 3121 assert_spin_locked(&dlm->spinlock);
3126 assert_spin_locked(&dlm->master_lock); 3122 assert_spin_locked(&dlm->master_lock);
3127 3123
@@ -3261,7 +3257,7 @@ void dlm_clean_master_list(struct dlm_ctxt *dlm, u8 dead_node)
3261 struct hlist_node *list; 3257 struct hlist_node *list;
3262 unsigned int i; 3258 unsigned int i;
3263 3259
3264 mlog_entry("dlm=%s, dead node=%u\n", dlm->name, dead_node); 3260 mlog(0, "dlm=%s, dead node=%u\n", dlm->name, dead_node);
3265top: 3261top:
3266 assert_spin_locked(&dlm->spinlock); 3262 assert_spin_locked(&dlm->spinlock);
3267 3263
diff --git a/fs/ocfs2/dlm/dlmrecovery.c b/fs/ocfs2/dlm/dlmrecovery.c
index aaaffbcbe916..f1beb6fc254d 100644
--- a/fs/ocfs2/dlm/dlmrecovery.c
+++ b/fs/ocfs2/dlm/dlmrecovery.c
@@ -727,7 +727,6 @@ static int dlm_remaster_locks(struct dlm_ctxt *dlm, u8 dead_node)
727 if (destroy) 727 if (destroy)
728 dlm_destroy_recovery_area(dlm, dead_node); 728 dlm_destroy_recovery_area(dlm, dead_node);
729 729
730 mlog_exit(status);
731 return status; 730 return status;
732} 731}
733 732
@@ -1496,9 +1495,9 @@ leave:
1496 kfree(buf); 1495 kfree(buf);
1497 if (item) 1496 if (item)
1498 kfree(item); 1497 kfree(item);
1498 mlog_errno(ret);
1499 } 1499 }
1500 1500
1501 mlog_exit(ret);
1502 return ret; 1501 return ret;
1503} 1502}
1504 1503
@@ -1567,7 +1566,6 @@ leave:
1567 dlm_lockres_put(res); 1566 dlm_lockres_put(res);
1568 } 1567 }
1569 kfree(data); 1568 kfree(data);
1570 mlog_exit(ret);
1571} 1569}
1572 1570
1573 1571
@@ -1986,7 +1984,6 @@ leave:
1986 dlm_lock_put(newlock); 1984 dlm_lock_put(newlock);
1987 } 1985 }
1988 1986
1989 mlog_exit(ret);
1990 return ret; 1987 return ret;
1991} 1988}
1992 1989
@@ -2083,8 +2080,6 @@ static void dlm_finish_local_lockres_recovery(struct dlm_ctxt *dlm,
2083 struct hlist_head *bucket; 2080 struct hlist_head *bucket;
2084 struct dlm_lock_resource *res, *next; 2081 struct dlm_lock_resource *res, *next;
2085 2082
2086 mlog_entry_void();
2087
2088 assert_spin_locked(&dlm->spinlock); 2083 assert_spin_locked(&dlm->spinlock);
2089 2084
2090 list_for_each_entry_safe(res, next, &dlm->reco.resources, recovering) { 2085 list_for_each_entry_safe(res, next, &dlm->reco.resources, recovering) {
@@ -2607,8 +2602,6 @@ static int dlm_send_begin_reco_message(struct dlm_ctxt *dlm, u8 dead_node)
2607 int nodenum; 2602 int nodenum;
2608 int status; 2603 int status;
2609 2604
2610 mlog_entry("%u\n", dead_node);
2611
2612 mlog(0, "%s: dead node is %u\n", dlm->name, dead_node); 2605 mlog(0, "%s: dead node is %u\n", dlm->name, dead_node);
2613 2606
2614 spin_lock(&dlm->spinlock); 2607 spin_lock(&dlm->spinlock);
diff --git a/fs/ocfs2/dlm/dlmunlock.c b/fs/ocfs2/dlm/dlmunlock.c
index 817287c6a6db..850aa7e87537 100644
--- a/fs/ocfs2/dlm/dlmunlock.c
+++ b/fs/ocfs2/dlm/dlmunlock.c
@@ -317,7 +317,7 @@ static enum dlm_status dlm_send_remote_unlock_request(struct dlm_ctxt *dlm,
317 struct kvec vec[2]; 317 struct kvec vec[2];
318 size_t veclen = 1; 318 size_t veclen = 1;
319 319
320 mlog_entry("%.*s\n", res->lockname.len, res->lockname.name); 320 mlog(0, "%.*s\n", res->lockname.len, res->lockname.name);
321 321
322 if (owner == dlm->node_num) { 322 if (owner == dlm->node_num) {
323 /* ended up trying to contact ourself. this means 323 /* ended up trying to contact ourself. this means
@@ -588,8 +588,6 @@ enum dlm_status dlmunlock(struct dlm_ctxt *dlm, struct dlm_lockstatus *lksb,
588 struct dlm_lock *lock = NULL; 588 struct dlm_lock *lock = NULL;
589 int call_ast, is_master; 589 int call_ast, is_master;
590 590
591 mlog_entry_void();
592
593 if (!lksb) { 591 if (!lksb) {
594 dlm_error(DLM_BADARGS); 592 dlm_error(DLM_BADARGS);
595 return DLM_BADARGS; 593 return DLM_BADARGS;
diff --git a/fs/ocfs2/dlmglue.c b/fs/ocfs2/dlmglue.c
index e8d94d722ecb..7642d7ca73e5 100644
--- a/fs/ocfs2/dlmglue.c
+++ b/fs/ocfs2/dlmglue.c
@@ -64,7 +64,7 @@ struct ocfs2_mask_waiter {
64 unsigned long mw_mask; 64 unsigned long mw_mask;
65 unsigned long mw_goal; 65 unsigned long mw_goal;
66#ifdef CONFIG_OCFS2_FS_STATS 66#ifdef CONFIG_OCFS2_FS_STATS
67 unsigned long long mw_lock_start; 67 ktime_t mw_lock_start;
68#endif 68#endif
69}; 69};
70 70
@@ -397,8 +397,6 @@ static void ocfs2_build_lock_name(enum ocfs2_lock_type type,
397{ 397{
398 int len; 398 int len;
399 399
400 mlog_entry_void();
401
402 BUG_ON(type >= OCFS2_NUM_LOCK_TYPES); 400 BUG_ON(type >= OCFS2_NUM_LOCK_TYPES);
403 401
404 len = snprintf(name, OCFS2_LOCK_ID_MAX_LEN, "%c%s%016llx%08x", 402 len = snprintf(name, OCFS2_LOCK_ID_MAX_LEN, "%c%s%016llx%08x",
@@ -408,8 +406,6 @@ static void ocfs2_build_lock_name(enum ocfs2_lock_type type,
408 BUG_ON(len != (OCFS2_LOCK_ID_MAX_LEN - 1)); 406 BUG_ON(len != (OCFS2_LOCK_ID_MAX_LEN - 1));
409 407
410 mlog(0, "built lock resource with name: %s\n", name); 408 mlog(0, "built lock resource with name: %s\n", name);
411
412 mlog_exit_void();
413} 409}
414 410
415static DEFINE_SPINLOCK(ocfs2_dlm_tracking_lock); 411static DEFINE_SPINLOCK(ocfs2_dlm_tracking_lock);
@@ -435,44 +431,41 @@ static void ocfs2_remove_lockres_tracking(struct ocfs2_lock_res *res)
435#ifdef CONFIG_OCFS2_FS_STATS 431#ifdef CONFIG_OCFS2_FS_STATS
436static void ocfs2_init_lock_stats(struct ocfs2_lock_res *res) 432static void ocfs2_init_lock_stats(struct ocfs2_lock_res *res)
437{ 433{
438 res->l_lock_num_prmode = 0;
439 res->l_lock_num_prmode_failed = 0;
440 res->l_lock_total_prmode = 0;
441 res->l_lock_max_prmode = 0;
442 res->l_lock_num_exmode = 0;
443 res->l_lock_num_exmode_failed = 0;
444 res->l_lock_total_exmode = 0;
445 res->l_lock_max_exmode = 0;
446 res->l_lock_refresh = 0; 434 res->l_lock_refresh = 0;
435 memset(&res->l_lock_prmode, 0, sizeof(struct ocfs2_lock_stats));
436 memset(&res->l_lock_exmode, 0, sizeof(struct ocfs2_lock_stats));
447} 437}
448 438
449static void ocfs2_update_lock_stats(struct ocfs2_lock_res *res, int level, 439static void ocfs2_update_lock_stats(struct ocfs2_lock_res *res, int level,
450 struct ocfs2_mask_waiter *mw, int ret) 440 struct ocfs2_mask_waiter *mw, int ret)
451{ 441{
452 unsigned long long *num, *sum; 442 u32 usec;
453 unsigned int *max, *failed; 443 ktime_t kt;
454 struct timespec ts = current_kernel_time(); 444 struct ocfs2_lock_stats *stats;
455 unsigned long long time = timespec_to_ns(&ts) - mw->mw_lock_start; 445
456 446 if (level == LKM_PRMODE)
457 if (level == LKM_PRMODE) { 447 stats = &res->l_lock_prmode;
458 num = &res->l_lock_num_prmode; 448 else if (level == LKM_EXMODE)
459 sum = &res->l_lock_total_prmode; 449 stats = &res->l_lock_exmode;
460 max = &res->l_lock_max_prmode; 450 else
461 failed = &res->l_lock_num_prmode_failed;
462 } else if (level == LKM_EXMODE) {
463 num = &res->l_lock_num_exmode;
464 sum = &res->l_lock_total_exmode;
465 max = &res->l_lock_max_exmode;
466 failed = &res->l_lock_num_exmode_failed;
467 } else
468 return; 451 return;
469 452
470 (*num)++; 453 kt = ktime_sub(ktime_get(), mw->mw_lock_start);
471 (*sum) += time; 454 usec = ktime_to_us(kt);
472 if (time > *max) 455
473 *max = time; 456 stats->ls_gets++;
457 stats->ls_total += ktime_to_ns(kt);
458 /* overflow */
459 if (unlikely(stats->ls_gets) == 0) {
460 stats->ls_gets++;
461 stats->ls_total = ktime_to_ns(kt);
462 }
463
464 if (stats->ls_max < usec)
465 stats->ls_max = usec;
466
474 if (ret) 467 if (ret)
475 (*failed)++; 468 stats->ls_fail++;
476} 469}
477 470
478static inline void ocfs2_track_lock_refresh(struct ocfs2_lock_res *lockres) 471static inline void ocfs2_track_lock_refresh(struct ocfs2_lock_res *lockres)
@@ -482,8 +475,7 @@ static inline void ocfs2_track_lock_refresh(struct ocfs2_lock_res *lockres)
482 475
483static inline void ocfs2_init_start_time(struct ocfs2_mask_waiter *mw) 476static inline void ocfs2_init_start_time(struct ocfs2_mask_waiter *mw)
484{ 477{
485 struct timespec ts = current_kernel_time(); 478 mw->mw_lock_start = ktime_get();
486 mw->mw_lock_start = timespec_to_ns(&ts);
487} 479}
488#else 480#else
489static inline void ocfs2_init_lock_stats(struct ocfs2_lock_res *res) 481static inline void ocfs2_init_lock_stats(struct ocfs2_lock_res *res)
@@ -729,8 +721,6 @@ void ocfs2_refcount_lock_res_init(struct ocfs2_lock_res *lockres,
729 721
730void ocfs2_lock_res_free(struct ocfs2_lock_res *res) 722void ocfs2_lock_res_free(struct ocfs2_lock_res *res)
731{ 723{
732 mlog_entry_void();
733
734 if (!(res->l_flags & OCFS2_LOCK_INITIALIZED)) 724 if (!(res->l_flags & OCFS2_LOCK_INITIALIZED))
735 return; 725 return;
736 726
@@ -756,14 +746,11 @@ void ocfs2_lock_res_free(struct ocfs2_lock_res *res)
756 memset(&res->l_lksb, 0, sizeof(res->l_lksb)); 746 memset(&res->l_lksb, 0, sizeof(res->l_lksb));
757 747
758 res->l_flags = 0UL; 748 res->l_flags = 0UL;
759 mlog_exit_void();
760} 749}
761 750
762static inline void ocfs2_inc_holders(struct ocfs2_lock_res *lockres, 751static inline void ocfs2_inc_holders(struct ocfs2_lock_res *lockres,
763 int level) 752 int level)
764{ 753{
765 mlog_entry_void();
766
767 BUG_ON(!lockres); 754 BUG_ON(!lockres);
768 755
769 switch(level) { 756 switch(level) {
@@ -776,15 +763,11 @@ static inline void ocfs2_inc_holders(struct ocfs2_lock_res *lockres,
776 default: 763 default:
777 BUG(); 764 BUG();
778 } 765 }
779
780 mlog_exit_void();
781} 766}
782 767
783static inline void ocfs2_dec_holders(struct ocfs2_lock_res *lockres, 768static inline void ocfs2_dec_holders(struct ocfs2_lock_res *lockres,
784 int level) 769 int level)
785{ 770{
786 mlog_entry_void();
787
788 BUG_ON(!lockres); 771 BUG_ON(!lockres);
789 772
790 switch(level) { 773 switch(level) {
@@ -799,7 +782,6 @@ static inline void ocfs2_dec_holders(struct ocfs2_lock_res *lockres,
799 default: 782 default:
800 BUG(); 783 BUG();
801 } 784 }
802 mlog_exit_void();
803} 785}
804 786
805/* WARNING: This function lives in a world where the only three lock 787/* WARNING: This function lives in a world where the only three lock
@@ -846,8 +828,6 @@ static void lockres_clear_flags(struct ocfs2_lock_res *lockres,
846 828
847static inline void ocfs2_generic_handle_downconvert_action(struct ocfs2_lock_res *lockres) 829static inline void ocfs2_generic_handle_downconvert_action(struct ocfs2_lock_res *lockres)
848{ 830{
849 mlog_entry_void();
850
851 BUG_ON(!(lockres->l_flags & OCFS2_LOCK_BUSY)); 831 BUG_ON(!(lockres->l_flags & OCFS2_LOCK_BUSY));
852 BUG_ON(!(lockres->l_flags & OCFS2_LOCK_ATTACHED)); 832 BUG_ON(!(lockres->l_flags & OCFS2_LOCK_ATTACHED));
853 BUG_ON(!(lockres->l_flags & OCFS2_LOCK_BLOCKED)); 833 BUG_ON(!(lockres->l_flags & OCFS2_LOCK_BLOCKED));
@@ -860,14 +840,10 @@ static inline void ocfs2_generic_handle_downconvert_action(struct ocfs2_lock_res
860 lockres_clear_flags(lockres, OCFS2_LOCK_BLOCKED); 840 lockres_clear_flags(lockres, OCFS2_LOCK_BLOCKED);
861 } 841 }
862 lockres_clear_flags(lockres, OCFS2_LOCK_BUSY); 842 lockres_clear_flags(lockres, OCFS2_LOCK_BUSY);
863
864 mlog_exit_void();
865} 843}
866 844
867static inline void ocfs2_generic_handle_convert_action(struct ocfs2_lock_res *lockres) 845static inline void ocfs2_generic_handle_convert_action(struct ocfs2_lock_res *lockres)
868{ 846{
869 mlog_entry_void();
870
871 BUG_ON(!(lockres->l_flags & OCFS2_LOCK_BUSY)); 847 BUG_ON(!(lockres->l_flags & OCFS2_LOCK_BUSY));
872 BUG_ON(!(lockres->l_flags & OCFS2_LOCK_ATTACHED)); 848 BUG_ON(!(lockres->l_flags & OCFS2_LOCK_ATTACHED));
873 849
@@ -889,14 +865,10 @@ static inline void ocfs2_generic_handle_convert_action(struct ocfs2_lock_res *lo
889 lockres_or_flags(lockres, OCFS2_LOCK_UPCONVERT_FINISHING); 865 lockres_or_flags(lockres, OCFS2_LOCK_UPCONVERT_FINISHING);
890 866
891 lockres_clear_flags(lockres, OCFS2_LOCK_BUSY); 867 lockres_clear_flags(lockres, OCFS2_LOCK_BUSY);
892
893 mlog_exit_void();
894} 868}
895 869
896static inline void ocfs2_generic_handle_attach_action(struct ocfs2_lock_res *lockres) 870static inline void ocfs2_generic_handle_attach_action(struct ocfs2_lock_res *lockres)
897{ 871{
898 mlog_entry_void();
899
900 BUG_ON((!(lockres->l_flags & OCFS2_LOCK_BUSY))); 872 BUG_ON((!(lockres->l_flags & OCFS2_LOCK_BUSY)));
901 BUG_ON(lockres->l_flags & OCFS2_LOCK_ATTACHED); 873 BUG_ON(lockres->l_flags & OCFS2_LOCK_ATTACHED);
902 874
@@ -908,15 +880,12 @@ static inline void ocfs2_generic_handle_attach_action(struct ocfs2_lock_res *loc
908 lockres->l_level = lockres->l_requested; 880 lockres->l_level = lockres->l_requested;
909 lockres_or_flags(lockres, OCFS2_LOCK_ATTACHED); 881 lockres_or_flags(lockres, OCFS2_LOCK_ATTACHED);
910 lockres_clear_flags(lockres, OCFS2_LOCK_BUSY); 882 lockres_clear_flags(lockres, OCFS2_LOCK_BUSY);
911
912 mlog_exit_void();
913} 883}
914 884
915static int ocfs2_generic_handle_bast(struct ocfs2_lock_res *lockres, 885static int ocfs2_generic_handle_bast(struct ocfs2_lock_res *lockres,
916 int level) 886 int level)
917{ 887{
918 int needs_downconvert = 0; 888 int needs_downconvert = 0;
919 mlog_entry_void();
920 889
921 assert_spin_locked(&lockres->l_lock); 890 assert_spin_locked(&lockres->l_lock);
922 891
@@ -938,8 +907,7 @@ static int ocfs2_generic_handle_bast(struct ocfs2_lock_res *lockres,
938 907
939 if (needs_downconvert) 908 if (needs_downconvert)
940 lockres_or_flags(lockres, OCFS2_LOCK_BLOCKED); 909 lockres_or_flags(lockres, OCFS2_LOCK_BLOCKED);
941 910 mlog(0, "needs_downconvert = %d\n", needs_downconvert);
942 mlog_exit(needs_downconvert);
943 return needs_downconvert; 911 return needs_downconvert;
944} 912}
945 913
@@ -1151,8 +1119,6 @@ static void ocfs2_unlock_ast(struct ocfs2_dlm_lksb *lksb, int error)
1151 struct ocfs2_lock_res *lockres = ocfs2_lksb_to_lock_res(lksb); 1119 struct ocfs2_lock_res *lockres = ocfs2_lksb_to_lock_res(lksb);
1152 unsigned long flags; 1120 unsigned long flags;
1153 1121
1154 mlog_entry_void();
1155
1156 mlog(ML_BASTS, "UNLOCK AST fired for lockres %s, action = %d\n", 1122 mlog(ML_BASTS, "UNLOCK AST fired for lockres %s, action = %d\n",
1157 lockres->l_name, lockres->l_unlock_action); 1123 lockres->l_name, lockres->l_unlock_action);
1158 1124
@@ -1162,7 +1128,6 @@ static void ocfs2_unlock_ast(struct ocfs2_dlm_lksb *lksb, int error)
1162 "unlock_action %d\n", error, lockres->l_name, 1128 "unlock_action %d\n", error, lockres->l_name,
1163 lockres->l_unlock_action); 1129 lockres->l_unlock_action);
1164 spin_unlock_irqrestore(&lockres->l_lock, flags); 1130 spin_unlock_irqrestore(&lockres->l_lock, flags);
1165 mlog_exit_void();
1166 return; 1131 return;
1167 } 1132 }
1168 1133
@@ -1186,8 +1151,6 @@ static void ocfs2_unlock_ast(struct ocfs2_dlm_lksb *lksb, int error)
1186 lockres->l_unlock_action = OCFS2_UNLOCK_INVALID; 1151 lockres->l_unlock_action = OCFS2_UNLOCK_INVALID;
1187 wake_up(&lockres->l_event); 1152 wake_up(&lockres->l_event);
1188 spin_unlock_irqrestore(&lockres->l_lock, flags); 1153 spin_unlock_irqrestore(&lockres->l_lock, flags);
1189
1190 mlog_exit_void();
1191} 1154}
1192 1155
1193/* 1156/*
@@ -1233,7 +1196,6 @@ static inline void ocfs2_recover_from_dlm_error(struct ocfs2_lock_res *lockres,
1233{ 1196{
1234 unsigned long flags; 1197 unsigned long flags;
1235 1198
1236 mlog_entry_void();
1237 spin_lock_irqsave(&lockres->l_lock, flags); 1199 spin_lock_irqsave(&lockres->l_lock, flags);
1238 lockres_clear_flags(lockres, OCFS2_LOCK_BUSY); 1200 lockres_clear_flags(lockres, OCFS2_LOCK_BUSY);
1239 lockres_clear_flags(lockres, OCFS2_LOCK_UPCONVERT_FINISHING); 1201 lockres_clear_flags(lockres, OCFS2_LOCK_UPCONVERT_FINISHING);
@@ -1244,7 +1206,6 @@ static inline void ocfs2_recover_from_dlm_error(struct ocfs2_lock_res *lockres,
1244 spin_unlock_irqrestore(&lockres->l_lock, flags); 1206 spin_unlock_irqrestore(&lockres->l_lock, flags);
1245 1207
1246 wake_up(&lockres->l_event); 1208 wake_up(&lockres->l_event);
1247 mlog_exit_void();
1248} 1209}
1249 1210
1250/* Note: If we detect another process working on the lock (i.e., 1211/* Note: If we detect another process working on the lock (i.e.,
@@ -1260,8 +1221,6 @@ static int ocfs2_lock_create(struct ocfs2_super *osb,
1260 unsigned long flags; 1221 unsigned long flags;
1261 unsigned int gen; 1222 unsigned int gen;
1262 1223
1263 mlog_entry_void();
1264
1265 mlog(0, "lock %s, level = %d, flags = %u\n", lockres->l_name, level, 1224 mlog(0, "lock %s, level = %d, flags = %u\n", lockres->l_name, level,
1266 dlm_flags); 1225 dlm_flags);
1267 1226
@@ -1293,7 +1252,6 @@ static int ocfs2_lock_create(struct ocfs2_super *osb,
1293 mlog(0, "lock %s, return from ocfs2_dlm_lock\n", lockres->l_name); 1252 mlog(0, "lock %s, return from ocfs2_dlm_lock\n", lockres->l_name);
1294 1253
1295bail: 1254bail:
1296 mlog_exit(ret);
1297 return ret; 1255 return ret;
1298} 1256}
1299 1257
@@ -1416,8 +1374,6 @@ static int __ocfs2_cluster_lock(struct ocfs2_super *osb,
1416 unsigned int gen; 1374 unsigned int gen;
1417 int noqueue_attempted = 0; 1375 int noqueue_attempted = 0;
1418 1376
1419 mlog_entry_void();
1420
1421 ocfs2_init_mask_waiter(&mw); 1377 ocfs2_init_mask_waiter(&mw);
1422 1378
1423 if (lockres->l_ops->flags & LOCK_TYPE_USES_LVB) 1379 if (lockres->l_ops->flags & LOCK_TYPE_USES_LVB)
@@ -1583,7 +1539,6 @@ out:
1583 caller_ip); 1539 caller_ip);
1584 } 1540 }
1585#endif 1541#endif
1586 mlog_exit(ret);
1587 return ret; 1542 return ret;
1588} 1543}
1589 1544
@@ -1605,7 +1560,6 @@ static void __ocfs2_cluster_unlock(struct ocfs2_super *osb,
1605{ 1560{
1606 unsigned long flags; 1561 unsigned long flags;
1607 1562
1608 mlog_entry_void();
1609 spin_lock_irqsave(&lockres->l_lock, flags); 1563 spin_lock_irqsave(&lockres->l_lock, flags);
1610 ocfs2_dec_holders(lockres, level); 1564 ocfs2_dec_holders(lockres, level);
1611 ocfs2_downconvert_on_unlock(osb, lockres); 1565 ocfs2_downconvert_on_unlock(osb, lockres);
@@ -1614,7 +1568,6 @@ static void __ocfs2_cluster_unlock(struct ocfs2_super *osb,
1614 if (lockres->l_lockdep_map.key != NULL) 1568 if (lockres->l_lockdep_map.key != NULL)
1615 rwsem_release(&lockres->l_lockdep_map, 1, caller_ip); 1569 rwsem_release(&lockres->l_lockdep_map, 1, caller_ip);
1616#endif 1570#endif
1617 mlog_exit_void();
1618} 1571}
1619 1572
1620static int ocfs2_create_new_lock(struct ocfs2_super *osb, 1573static int ocfs2_create_new_lock(struct ocfs2_super *osb,
@@ -1648,8 +1601,6 @@ int ocfs2_create_new_inode_locks(struct inode *inode)
1648 BUG_ON(!inode); 1601 BUG_ON(!inode);
1649 BUG_ON(!ocfs2_inode_is_new(inode)); 1602 BUG_ON(!ocfs2_inode_is_new(inode));
1650 1603
1651 mlog_entry_void();
1652
1653 mlog(0, "Inode %llu\n", (unsigned long long)OCFS2_I(inode)->ip_blkno); 1604 mlog(0, "Inode %llu\n", (unsigned long long)OCFS2_I(inode)->ip_blkno);
1654 1605
1655 /* NOTE: That we don't increment any of the holder counts, nor 1606 /* NOTE: That we don't increment any of the holder counts, nor
@@ -1683,7 +1634,6 @@ int ocfs2_create_new_inode_locks(struct inode *inode)
1683 } 1634 }
1684 1635
1685bail: 1636bail:
1686 mlog_exit(ret);
1687 return ret; 1637 return ret;
1688} 1638}
1689 1639
@@ -1695,16 +1645,12 @@ int ocfs2_rw_lock(struct inode *inode, int write)
1695 1645
1696 BUG_ON(!inode); 1646 BUG_ON(!inode);
1697 1647
1698 mlog_entry_void();
1699
1700 mlog(0, "inode %llu take %s RW lock\n", 1648 mlog(0, "inode %llu take %s RW lock\n",
1701 (unsigned long long)OCFS2_I(inode)->ip_blkno, 1649 (unsigned long long)OCFS2_I(inode)->ip_blkno,
1702 write ? "EXMODE" : "PRMODE"); 1650 write ? "EXMODE" : "PRMODE");
1703 1651
1704 if (ocfs2_mount_local(osb)) { 1652 if (ocfs2_mount_local(osb))
1705 mlog_exit(0);
1706 return 0; 1653 return 0;
1707 }
1708 1654
1709 lockres = &OCFS2_I(inode)->ip_rw_lockres; 1655 lockres = &OCFS2_I(inode)->ip_rw_lockres;
1710 1656
@@ -1715,7 +1661,6 @@ int ocfs2_rw_lock(struct inode *inode, int write)
1715 if (status < 0) 1661 if (status < 0)
1716 mlog_errno(status); 1662 mlog_errno(status);
1717 1663
1718 mlog_exit(status);
1719 return status; 1664 return status;
1720} 1665}
1721 1666
@@ -1725,16 +1670,12 @@ void ocfs2_rw_unlock(struct inode *inode, int write)
1725 struct ocfs2_lock_res *lockres = &OCFS2_I(inode)->ip_rw_lockres; 1670 struct ocfs2_lock_res *lockres = &OCFS2_I(inode)->ip_rw_lockres;
1726 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); 1671 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
1727 1672
1728 mlog_entry_void();
1729
1730 mlog(0, "inode %llu drop %s RW lock\n", 1673 mlog(0, "inode %llu drop %s RW lock\n",
1731 (unsigned long long)OCFS2_I(inode)->ip_blkno, 1674 (unsigned long long)OCFS2_I(inode)->ip_blkno,
1732 write ? "EXMODE" : "PRMODE"); 1675 write ? "EXMODE" : "PRMODE");
1733 1676
1734 if (!ocfs2_mount_local(osb)) 1677 if (!ocfs2_mount_local(osb))
1735 ocfs2_cluster_unlock(OCFS2_SB(inode->i_sb), lockres, level); 1678 ocfs2_cluster_unlock(OCFS2_SB(inode->i_sb), lockres, level);
1736
1737 mlog_exit_void();
1738} 1679}
1739 1680
1740/* 1681/*
@@ -1748,8 +1689,6 @@ int ocfs2_open_lock(struct inode *inode)
1748 1689
1749 BUG_ON(!inode); 1690 BUG_ON(!inode);
1750 1691
1751 mlog_entry_void();
1752
1753 mlog(0, "inode %llu take PRMODE open lock\n", 1692 mlog(0, "inode %llu take PRMODE open lock\n",
1754 (unsigned long long)OCFS2_I(inode)->ip_blkno); 1693 (unsigned long long)OCFS2_I(inode)->ip_blkno);
1755 1694
@@ -1764,7 +1703,6 @@ int ocfs2_open_lock(struct inode *inode)
1764 mlog_errno(status); 1703 mlog_errno(status);
1765 1704
1766out: 1705out:
1767 mlog_exit(status);
1768 return status; 1706 return status;
1769} 1707}
1770 1708
@@ -1776,8 +1714,6 @@ int ocfs2_try_open_lock(struct inode *inode, int write)
1776 1714
1777 BUG_ON(!inode); 1715 BUG_ON(!inode);
1778 1716
1779 mlog_entry_void();
1780
1781 mlog(0, "inode %llu try to take %s open lock\n", 1717 mlog(0, "inode %llu try to take %s open lock\n",
1782 (unsigned long long)OCFS2_I(inode)->ip_blkno, 1718 (unsigned long long)OCFS2_I(inode)->ip_blkno,
1783 write ? "EXMODE" : "PRMODE"); 1719 write ? "EXMODE" : "PRMODE");
@@ -1799,7 +1735,6 @@ int ocfs2_try_open_lock(struct inode *inode, int write)
1799 level, DLM_LKF_NOQUEUE, 0); 1735 level, DLM_LKF_NOQUEUE, 0);
1800 1736
1801out: 1737out:
1802 mlog_exit(status);
1803 return status; 1738 return status;
1804} 1739}
1805 1740
@@ -1811,8 +1746,6 @@ void ocfs2_open_unlock(struct inode *inode)
1811 struct ocfs2_lock_res *lockres = &OCFS2_I(inode)->ip_open_lockres; 1746 struct ocfs2_lock_res *lockres = &OCFS2_I(inode)->ip_open_lockres;
1812 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); 1747 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
1813 1748
1814 mlog_entry_void();
1815
1816 mlog(0, "inode %llu drop open lock\n", 1749 mlog(0, "inode %llu drop open lock\n",
1817 (unsigned long long)OCFS2_I(inode)->ip_blkno); 1750 (unsigned long long)OCFS2_I(inode)->ip_blkno);
1818 1751
@@ -1827,7 +1760,7 @@ void ocfs2_open_unlock(struct inode *inode)
1827 DLM_LOCK_EX); 1760 DLM_LOCK_EX);
1828 1761
1829out: 1762out:
1830 mlog_exit_void(); 1763 return;
1831} 1764}
1832 1765
1833static int ocfs2_flock_handle_signal(struct ocfs2_lock_res *lockres, 1766static int ocfs2_flock_handle_signal(struct ocfs2_lock_res *lockres,
@@ -2043,8 +1976,6 @@ static void ocfs2_downconvert_on_unlock(struct ocfs2_super *osb,
2043{ 1976{
2044 int kick = 0; 1977 int kick = 0;
2045 1978
2046 mlog_entry_void();
2047
2048 /* If we know that another node is waiting on our lock, kick 1979 /* If we know that another node is waiting on our lock, kick
2049 * the downconvert thread * pre-emptively when we reach a release 1980 * the downconvert thread * pre-emptively when we reach a release
2050 * condition. */ 1981 * condition. */
@@ -2065,8 +1996,6 @@ static void ocfs2_downconvert_on_unlock(struct ocfs2_super *osb,
2065 1996
2066 if (kick) 1997 if (kick)
2067 ocfs2_wake_downconvert_thread(osb); 1998 ocfs2_wake_downconvert_thread(osb);
2068
2069 mlog_exit_void();
2070} 1999}
2071 2000
2072#define OCFS2_SEC_BITS 34 2001#define OCFS2_SEC_BITS 34
@@ -2095,8 +2024,6 @@ static void __ocfs2_stuff_meta_lvb(struct inode *inode)
2095 struct ocfs2_lock_res *lockres = &oi->ip_inode_lockres; 2024 struct ocfs2_lock_res *lockres = &oi->ip_inode_lockres;
2096 struct ocfs2_meta_lvb *lvb; 2025 struct ocfs2_meta_lvb *lvb;
2097 2026
2098 mlog_entry_void();
2099
2100 lvb = ocfs2_dlm_lvb(&lockres->l_lksb); 2027 lvb = ocfs2_dlm_lvb(&lockres->l_lksb);
2101 2028
2102 /* 2029 /*
@@ -2128,8 +2055,6 @@ static void __ocfs2_stuff_meta_lvb(struct inode *inode)
2128 2055
2129out: 2056out:
2130 mlog_meta_lvb(0, lockres); 2057 mlog_meta_lvb(0, lockres);
2131
2132 mlog_exit_void();
2133} 2058}
2134 2059
2135static void ocfs2_unpack_timespec(struct timespec *spec, 2060static void ocfs2_unpack_timespec(struct timespec *spec,
@@ -2145,8 +2070,6 @@ static void ocfs2_refresh_inode_from_lvb(struct inode *inode)
2145 struct ocfs2_lock_res *lockres = &oi->ip_inode_lockres; 2070 struct ocfs2_lock_res *lockres = &oi->ip_inode_lockres;
2146 struct ocfs2_meta_lvb *lvb; 2071 struct ocfs2_meta_lvb *lvb;
2147 2072
2148 mlog_entry_void();
2149
2150 mlog_meta_lvb(0, lockres); 2073 mlog_meta_lvb(0, lockres);
2151 2074
2152 lvb = ocfs2_dlm_lvb(&lockres->l_lksb); 2075 lvb = ocfs2_dlm_lvb(&lockres->l_lksb);
@@ -2177,8 +2100,6 @@ static void ocfs2_refresh_inode_from_lvb(struct inode *inode)
2177 ocfs2_unpack_timespec(&inode->i_ctime, 2100 ocfs2_unpack_timespec(&inode->i_ctime,
2178 be64_to_cpu(lvb->lvb_ictime_packed)); 2101 be64_to_cpu(lvb->lvb_ictime_packed));
2179 spin_unlock(&oi->ip_lock); 2102 spin_unlock(&oi->ip_lock);
2180
2181 mlog_exit_void();
2182} 2103}
2183 2104
2184static inline int ocfs2_meta_lvb_is_trustable(struct inode *inode, 2105static inline int ocfs2_meta_lvb_is_trustable(struct inode *inode,
@@ -2205,8 +2126,6 @@ static int ocfs2_should_refresh_lock_res(struct ocfs2_lock_res *lockres)
2205 unsigned long flags; 2126 unsigned long flags;
2206 int status = 0; 2127 int status = 0;
2207 2128
2208 mlog_entry_void();
2209
2210refresh_check: 2129refresh_check:
2211 spin_lock_irqsave(&lockres->l_lock, flags); 2130 spin_lock_irqsave(&lockres->l_lock, flags);
2212 if (!(lockres->l_flags & OCFS2_LOCK_NEEDS_REFRESH)) { 2131 if (!(lockres->l_flags & OCFS2_LOCK_NEEDS_REFRESH)) {
@@ -2227,7 +2146,7 @@ refresh_check:
2227 2146
2228 status = 1; 2147 status = 1;
2229bail: 2148bail:
2230 mlog_exit(status); 2149 mlog(0, "status %d\n", status);
2231 return status; 2150 return status;
2232} 2151}
2233 2152
@@ -2237,7 +2156,6 @@ static inline void ocfs2_complete_lock_res_refresh(struct ocfs2_lock_res *lockre
2237 int status) 2156 int status)
2238{ 2157{
2239 unsigned long flags; 2158 unsigned long flags;
2240 mlog_entry_void();
2241 2159
2242 spin_lock_irqsave(&lockres->l_lock, flags); 2160 spin_lock_irqsave(&lockres->l_lock, flags);
2243 lockres_clear_flags(lockres, OCFS2_LOCK_REFRESHING); 2161 lockres_clear_flags(lockres, OCFS2_LOCK_REFRESHING);
@@ -2246,8 +2164,6 @@ static inline void ocfs2_complete_lock_res_refresh(struct ocfs2_lock_res *lockre
2246 spin_unlock_irqrestore(&lockres->l_lock, flags); 2164 spin_unlock_irqrestore(&lockres->l_lock, flags);
2247 2165
2248 wake_up(&lockres->l_event); 2166 wake_up(&lockres->l_event);
2249
2250 mlog_exit_void();
2251} 2167}
2252 2168
2253/* may or may not return a bh if it went to disk. */ 2169/* may or may not return a bh if it went to disk. */
@@ -2260,8 +2176,6 @@ static int ocfs2_inode_lock_update(struct inode *inode,
2260 struct ocfs2_dinode *fe; 2176 struct ocfs2_dinode *fe;
2261 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); 2177 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
2262 2178
2263 mlog_entry_void();
2264
2265 if (ocfs2_mount_local(osb)) 2179 if (ocfs2_mount_local(osb))
2266 goto bail; 2180 goto bail;
2267 2181
@@ -2330,7 +2244,6 @@ static int ocfs2_inode_lock_update(struct inode *inode,
2330bail_refresh: 2244bail_refresh:
2331 ocfs2_complete_lock_res_refresh(lockres, status); 2245 ocfs2_complete_lock_res_refresh(lockres, status);
2332bail: 2246bail:
2333 mlog_exit(status);
2334 return status; 2247 return status;
2335} 2248}
2336 2249
@@ -2374,8 +2287,6 @@ int ocfs2_inode_lock_full_nested(struct inode *inode,
2374 2287
2375 BUG_ON(!inode); 2288 BUG_ON(!inode);
2376 2289
2377 mlog_entry_void();
2378
2379 mlog(0, "inode %llu, take %s META lock\n", 2290 mlog(0, "inode %llu, take %s META lock\n",
2380 (unsigned long long)OCFS2_I(inode)->ip_blkno, 2291 (unsigned long long)OCFS2_I(inode)->ip_blkno,
2381 ex ? "EXMODE" : "PRMODE"); 2292 ex ? "EXMODE" : "PRMODE");
@@ -2467,7 +2378,6 @@ bail:
2467 if (local_bh) 2378 if (local_bh)
2468 brelse(local_bh); 2379 brelse(local_bh);
2469 2380
2470 mlog_exit(status);
2471 return status; 2381 return status;
2472} 2382}
2473 2383
@@ -2517,7 +2427,6 @@ int ocfs2_inode_lock_atime(struct inode *inode,
2517{ 2427{
2518 int ret; 2428 int ret;
2519 2429
2520 mlog_entry_void();
2521 ret = ocfs2_inode_lock(inode, NULL, 0); 2430 ret = ocfs2_inode_lock(inode, NULL, 0);
2522 if (ret < 0) { 2431 if (ret < 0) {
2523 mlog_errno(ret); 2432 mlog_errno(ret);
@@ -2545,7 +2454,6 @@ int ocfs2_inode_lock_atime(struct inode *inode,
2545 } else 2454 } else
2546 *level = 0; 2455 *level = 0;
2547 2456
2548 mlog_exit(ret);
2549 return ret; 2457 return ret;
2550} 2458}
2551 2459
@@ -2556,8 +2464,6 @@ void ocfs2_inode_unlock(struct inode *inode,
2556 struct ocfs2_lock_res *lockres = &OCFS2_I(inode)->ip_inode_lockres; 2464 struct ocfs2_lock_res *lockres = &OCFS2_I(inode)->ip_inode_lockres;
2557 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); 2465 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
2558 2466
2559 mlog_entry_void();
2560
2561 mlog(0, "inode %llu drop %s META lock\n", 2467 mlog(0, "inode %llu drop %s META lock\n",
2562 (unsigned long long)OCFS2_I(inode)->ip_blkno, 2468 (unsigned long long)OCFS2_I(inode)->ip_blkno,
2563 ex ? "EXMODE" : "PRMODE"); 2469 ex ? "EXMODE" : "PRMODE");
@@ -2565,8 +2471,6 @@ void ocfs2_inode_unlock(struct inode *inode,
2565 if (!ocfs2_is_hard_readonly(OCFS2_SB(inode->i_sb)) && 2471 if (!ocfs2_is_hard_readonly(OCFS2_SB(inode->i_sb)) &&
2566 !ocfs2_mount_local(osb)) 2472 !ocfs2_mount_local(osb))
2567 ocfs2_cluster_unlock(OCFS2_SB(inode->i_sb), lockres, level); 2473 ocfs2_cluster_unlock(OCFS2_SB(inode->i_sb), lockres, level);
2568
2569 mlog_exit_void();
2570} 2474}
2571 2475
2572int ocfs2_orphan_scan_lock(struct ocfs2_super *osb, u32 *seqno) 2476int ocfs2_orphan_scan_lock(struct ocfs2_super *osb, u32 *seqno)
@@ -2617,8 +2521,6 @@ int ocfs2_super_lock(struct ocfs2_super *osb,
2617 int level = ex ? DLM_LOCK_EX : DLM_LOCK_PR; 2521 int level = ex ? DLM_LOCK_EX : DLM_LOCK_PR;
2618 struct ocfs2_lock_res *lockres = &osb->osb_super_lockres; 2522 struct ocfs2_lock_res *lockres = &osb->osb_super_lockres;
2619 2523
2620 mlog_entry_void();
2621
2622 if (ocfs2_is_hard_readonly(osb)) 2524 if (ocfs2_is_hard_readonly(osb))
2623 return -EROFS; 2525 return -EROFS;
2624 2526
@@ -2650,7 +2552,6 @@ int ocfs2_super_lock(struct ocfs2_super *osb,
2650 ocfs2_track_lock_refresh(lockres); 2552 ocfs2_track_lock_refresh(lockres);
2651 } 2553 }
2652bail: 2554bail:
2653 mlog_exit(status);
2654 return status; 2555 return status;
2655} 2556}
2656 2557
@@ -2869,8 +2770,15 @@ static void *ocfs2_dlm_seq_next(struct seq_file *m, void *v, loff_t *pos)
2869 return iter; 2770 return iter;
2870} 2771}
2871 2772
2872/* So that debugfs.ocfs2 can determine which format is being used */ 2773/*
2873#define OCFS2_DLM_DEBUG_STR_VERSION 2 2774 * Version is used by debugfs.ocfs2 to determine the format being used
2775 *
2776 * New in version 2
2777 * - Lock stats printed
2778 * New in version 3
2779 * - Max time in lock stats is in usecs (instead of nsecs)
2780 */
2781#define OCFS2_DLM_DEBUG_STR_VERSION 3
2874static int ocfs2_dlm_seq_show(struct seq_file *m, void *v) 2782static int ocfs2_dlm_seq_show(struct seq_file *m, void *v)
2875{ 2783{
2876 int i; 2784 int i;
@@ -2912,18 +2820,18 @@ static int ocfs2_dlm_seq_show(struct seq_file *m, void *v)
2912 seq_printf(m, "0x%x\t", lvb[i]); 2820 seq_printf(m, "0x%x\t", lvb[i]);
2913 2821
2914#ifdef CONFIG_OCFS2_FS_STATS 2822#ifdef CONFIG_OCFS2_FS_STATS
2915# define lock_num_prmode(_l) (_l)->l_lock_num_prmode 2823# define lock_num_prmode(_l) ((_l)->l_lock_prmode.ls_gets)
2916# define lock_num_exmode(_l) (_l)->l_lock_num_exmode 2824# define lock_num_exmode(_l) ((_l)->l_lock_exmode.ls_gets)
2917# define lock_num_prmode_failed(_l) (_l)->l_lock_num_prmode_failed 2825# define lock_num_prmode_failed(_l) ((_l)->l_lock_prmode.ls_fail)
2918# define lock_num_exmode_failed(_l) (_l)->l_lock_num_exmode_failed 2826# define lock_num_exmode_failed(_l) ((_l)->l_lock_exmode.ls_fail)
2919# define lock_total_prmode(_l) (_l)->l_lock_total_prmode 2827# define lock_total_prmode(_l) ((_l)->l_lock_prmode.ls_total)
2920# define lock_total_exmode(_l) (_l)->l_lock_total_exmode 2828# define lock_total_exmode(_l) ((_l)->l_lock_exmode.ls_total)
2921# define lock_max_prmode(_l) (_l)->l_lock_max_prmode 2829# define lock_max_prmode(_l) ((_l)->l_lock_prmode.ls_max)
2922# define lock_max_exmode(_l) (_l)->l_lock_max_exmode 2830# define lock_max_exmode(_l) ((_l)->l_lock_exmode.ls_max)
2923# define lock_refresh(_l) (_l)->l_lock_refresh 2831# define lock_refresh(_l) ((_l)->l_lock_refresh)
2924#else 2832#else
2925# define lock_num_prmode(_l) (0ULL) 2833# define lock_num_prmode(_l) (0)
2926# define lock_num_exmode(_l) (0ULL) 2834# define lock_num_exmode(_l) (0)
2927# define lock_num_prmode_failed(_l) (0) 2835# define lock_num_prmode_failed(_l) (0)
2928# define lock_num_exmode_failed(_l) (0) 2836# define lock_num_exmode_failed(_l) (0)
2929# define lock_total_prmode(_l) (0ULL) 2837# define lock_total_prmode(_l) (0ULL)
@@ -2933,8 +2841,8 @@ static int ocfs2_dlm_seq_show(struct seq_file *m, void *v)
2933# define lock_refresh(_l) (0) 2841# define lock_refresh(_l) (0)
2934#endif 2842#endif
2935 /* The following seq_print was added in version 2 of this output */ 2843 /* The following seq_print was added in version 2 of this output */
2936 seq_printf(m, "%llu\t" 2844 seq_printf(m, "%u\t"
2937 "%llu\t" 2845 "%u\t"
2938 "%u\t" 2846 "%u\t"
2939 "%u\t" 2847 "%u\t"
2940 "%llu\t" 2848 "%llu\t"
@@ -3054,8 +2962,6 @@ int ocfs2_dlm_init(struct ocfs2_super *osb)
3054 int status = 0; 2962 int status = 0;
3055 struct ocfs2_cluster_connection *conn = NULL; 2963 struct ocfs2_cluster_connection *conn = NULL;
3056 2964
3057 mlog_entry_void();
3058
3059 if (ocfs2_mount_local(osb)) { 2965 if (ocfs2_mount_local(osb)) {
3060 osb->node_num = 0; 2966 osb->node_num = 0;
3061 goto local; 2967 goto local;
@@ -3112,15 +3018,12 @@ bail:
3112 kthread_stop(osb->dc_task); 3018 kthread_stop(osb->dc_task);
3113 } 3019 }
3114 3020
3115 mlog_exit(status);
3116 return status; 3021 return status;
3117} 3022}
3118 3023
3119void ocfs2_dlm_shutdown(struct ocfs2_super *osb, 3024void ocfs2_dlm_shutdown(struct ocfs2_super *osb,
3120 int hangup_pending) 3025 int hangup_pending)
3121{ 3026{
3122 mlog_entry_void();
3123
3124 ocfs2_drop_osb_locks(osb); 3027 ocfs2_drop_osb_locks(osb);
3125 3028
3126 /* 3029 /*
@@ -3143,8 +3046,6 @@ void ocfs2_dlm_shutdown(struct ocfs2_super *osb,
3143 osb->cconn = NULL; 3046 osb->cconn = NULL;
3144 3047
3145 ocfs2_dlm_shutdown_debug(osb); 3048 ocfs2_dlm_shutdown_debug(osb);
3146
3147 mlog_exit_void();
3148} 3049}
3149 3050
3150static int ocfs2_drop_lock(struct ocfs2_super *osb, 3051static int ocfs2_drop_lock(struct ocfs2_super *osb,
@@ -3226,7 +3127,6 @@ static int ocfs2_drop_lock(struct ocfs2_super *osb,
3226 3127
3227 ocfs2_wait_on_busy_lock(lockres); 3128 ocfs2_wait_on_busy_lock(lockres);
3228out: 3129out:
3229 mlog_exit(0);
3230 return 0; 3130 return 0;
3231} 3131}
3232 3132
@@ -3284,8 +3184,6 @@ int ocfs2_drop_inode_locks(struct inode *inode)
3284{ 3184{
3285 int status, err; 3185 int status, err;
3286 3186
3287 mlog_entry_void();
3288
3289 /* No need to call ocfs2_mark_lockres_freeing here - 3187 /* No need to call ocfs2_mark_lockres_freeing here -
3290 * ocfs2_clear_inode has done it for us. */ 3188 * ocfs2_clear_inode has done it for us. */
3291 3189
@@ -3310,7 +3208,6 @@ int ocfs2_drop_inode_locks(struct inode *inode)
3310 if (err < 0 && !status) 3208 if (err < 0 && !status)
3311 status = err; 3209 status = err;
3312 3210
3313 mlog_exit(status);
3314 return status; 3211 return status;
3315} 3212}
3316 3213
@@ -3352,8 +3249,6 @@ static int ocfs2_downconvert_lock(struct ocfs2_super *osb,
3352 int ret; 3249 int ret;
3353 u32 dlm_flags = DLM_LKF_CONVERT; 3250 u32 dlm_flags = DLM_LKF_CONVERT;
3354 3251
3355 mlog_entry_void();
3356
3357 mlog(ML_BASTS, "lockres %s, level %d => %d\n", lockres->l_name, 3252 mlog(ML_BASTS, "lockres %s, level %d => %d\n", lockres->l_name,
3358 lockres->l_level, new_level); 3253 lockres->l_level, new_level);
3359 3254
@@ -3375,7 +3270,6 @@ static int ocfs2_downconvert_lock(struct ocfs2_super *osb,
3375 3270
3376 ret = 0; 3271 ret = 0;
3377bail: 3272bail:
3378 mlog_exit(ret);
3379 return ret; 3273 return ret;
3380} 3274}
3381 3275
@@ -3385,8 +3279,6 @@ static int ocfs2_prepare_cancel_convert(struct ocfs2_super *osb,
3385{ 3279{
3386 assert_spin_locked(&lockres->l_lock); 3280 assert_spin_locked(&lockres->l_lock);
3387 3281
3388 mlog_entry_void();
3389
3390 if (lockres->l_unlock_action == OCFS2_UNLOCK_CANCEL_CONVERT) { 3282 if (lockres->l_unlock_action == OCFS2_UNLOCK_CANCEL_CONVERT) {
3391 /* If we're already trying to cancel a lock conversion 3283 /* If we're already trying to cancel a lock conversion
3392 * then just drop the spinlock and allow the caller to 3284 * then just drop the spinlock and allow the caller to
@@ -3416,8 +3308,6 @@ static int ocfs2_cancel_convert(struct ocfs2_super *osb,
3416{ 3308{
3417 int ret; 3309 int ret;
3418 3310
3419 mlog_entry_void();
3420
3421 ret = ocfs2_dlm_unlock(osb->cconn, &lockres->l_lksb, 3311 ret = ocfs2_dlm_unlock(osb->cconn, &lockres->l_lksb,
3422 DLM_LKF_CANCEL); 3312 DLM_LKF_CANCEL);
3423 if (ret) { 3313 if (ret) {
@@ -3427,7 +3317,6 @@ static int ocfs2_cancel_convert(struct ocfs2_super *osb,
3427 3317
3428 mlog(ML_BASTS, "lockres %s\n", lockres->l_name); 3318 mlog(ML_BASTS, "lockres %s\n", lockres->l_name);
3429 3319
3430 mlog_exit(ret);
3431 return ret; 3320 return ret;
3432} 3321}
3433 3322
@@ -3443,8 +3332,6 @@ static int ocfs2_unblock_lock(struct ocfs2_super *osb,
3443 int set_lvb = 0; 3332 int set_lvb = 0;
3444 unsigned int gen; 3333 unsigned int gen;
3445 3334
3446 mlog_entry_void();
3447
3448 spin_lock_irqsave(&lockres->l_lock, flags); 3335 spin_lock_irqsave(&lockres->l_lock, flags);
3449 3336
3450recheck: 3337recheck:
@@ -3619,14 +3506,14 @@ downconvert:
3619 gen); 3506 gen);
3620 3507
3621leave: 3508leave:
3622 mlog_exit(ret); 3509 if (ret)
3510 mlog_errno(ret);
3623 return ret; 3511 return ret;
3624 3512
3625leave_requeue: 3513leave_requeue:
3626 spin_unlock_irqrestore(&lockres->l_lock, flags); 3514 spin_unlock_irqrestore(&lockres->l_lock, flags);
3627 ctl->requeue = 1; 3515 ctl->requeue = 1;
3628 3516
3629 mlog_exit(0);
3630 return 0; 3517 return 0;
3631} 3518}
3632 3519
@@ -3859,8 +3746,6 @@ static void ocfs2_set_qinfo_lvb(struct ocfs2_lock_res *lockres)
3859 struct mem_dqinfo *info = sb_dqinfo(oinfo->dqi_gi.dqi_sb, 3746 struct mem_dqinfo *info = sb_dqinfo(oinfo->dqi_gi.dqi_sb,
3860 oinfo->dqi_gi.dqi_type); 3747 oinfo->dqi_gi.dqi_type);
3861 3748
3862 mlog_entry_void();
3863
3864 lvb = ocfs2_dlm_lvb(&lockres->l_lksb); 3749 lvb = ocfs2_dlm_lvb(&lockres->l_lksb);
3865 lvb->lvb_version = OCFS2_QINFO_LVB_VERSION; 3750 lvb->lvb_version = OCFS2_QINFO_LVB_VERSION;
3866 lvb->lvb_bgrace = cpu_to_be32(info->dqi_bgrace); 3751 lvb->lvb_bgrace = cpu_to_be32(info->dqi_bgrace);
@@ -3869,8 +3754,6 @@ static void ocfs2_set_qinfo_lvb(struct ocfs2_lock_res *lockres)
3869 lvb->lvb_blocks = cpu_to_be32(oinfo->dqi_gi.dqi_blocks); 3754 lvb->lvb_blocks = cpu_to_be32(oinfo->dqi_gi.dqi_blocks);
3870 lvb->lvb_free_blk = cpu_to_be32(oinfo->dqi_gi.dqi_free_blk); 3755 lvb->lvb_free_blk = cpu_to_be32(oinfo->dqi_gi.dqi_free_blk);
3871 lvb->lvb_free_entry = cpu_to_be32(oinfo->dqi_gi.dqi_free_entry); 3756 lvb->lvb_free_entry = cpu_to_be32(oinfo->dqi_gi.dqi_free_entry);
3872
3873 mlog_exit_void();
3874} 3757}
3875 3758
3876void ocfs2_qinfo_unlock(struct ocfs2_mem_dqinfo *oinfo, int ex) 3759void ocfs2_qinfo_unlock(struct ocfs2_mem_dqinfo *oinfo, int ex)
@@ -3879,10 +3762,8 @@ void ocfs2_qinfo_unlock(struct ocfs2_mem_dqinfo *oinfo, int ex)
3879 struct ocfs2_super *osb = OCFS2_SB(oinfo->dqi_gi.dqi_sb); 3762 struct ocfs2_super *osb = OCFS2_SB(oinfo->dqi_gi.dqi_sb);
3880 int level = ex ? DLM_LOCK_EX : DLM_LOCK_PR; 3763 int level = ex ? DLM_LOCK_EX : DLM_LOCK_PR;
3881 3764
3882 mlog_entry_void();
3883 if (!ocfs2_is_hard_readonly(osb) && !ocfs2_mount_local(osb)) 3765 if (!ocfs2_is_hard_readonly(osb) && !ocfs2_mount_local(osb))
3884 ocfs2_cluster_unlock(osb, lockres, level); 3766 ocfs2_cluster_unlock(osb, lockres, level);
3885 mlog_exit_void();
3886} 3767}
3887 3768
3888static int ocfs2_refresh_qinfo(struct ocfs2_mem_dqinfo *oinfo) 3769static int ocfs2_refresh_qinfo(struct ocfs2_mem_dqinfo *oinfo)
@@ -3937,8 +3818,6 @@ int ocfs2_qinfo_lock(struct ocfs2_mem_dqinfo *oinfo, int ex)
3937 int level = ex ? DLM_LOCK_EX : DLM_LOCK_PR; 3818 int level = ex ? DLM_LOCK_EX : DLM_LOCK_PR;
3938 int status = 0; 3819 int status = 0;
3939 3820
3940 mlog_entry_void();
3941
3942 /* On RO devices, locking really isn't needed... */ 3821 /* On RO devices, locking really isn't needed... */
3943 if (ocfs2_is_hard_readonly(osb)) { 3822 if (ocfs2_is_hard_readonly(osb)) {
3944 if (ex) 3823 if (ex)
@@ -3961,7 +3840,6 @@ int ocfs2_qinfo_lock(struct ocfs2_mem_dqinfo *oinfo, int ex)
3961 ocfs2_qinfo_unlock(oinfo, ex); 3840 ocfs2_qinfo_unlock(oinfo, ex);
3962 ocfs2_complete_lock_res_refresh(lockres, status); 3841 ocfs2_complete_lock_res_refresh(lockres, status);
3963bail: 3842bail:
3964 mlog_exit(status);
3965 return status; 3843 return status;
3966} 3844}
3967 3845
@@ -4007,8 +3885,6 @@ static void ocfs2_process_blocked_lock(struct ocfs2_super *osb,
4007 * considered valid until we remove the OCFS2_LOCK_QUEUED 3885 * considered valid until we remove the OCFS2_LOCK_QUEUED
4008 * flag. */ 3886 * flag. */
4009 3887
4010 mlog_entry_void();
4011
4012 BUG_ON(!lockres); 3888 BUG_ON(!lockres);
4013 BUG_ON(!lockres->l_ops); 3889 BUG_ON(!lockres->l_ops);
4014 3890
@@ -4042,15 +3918,11 @@ unqueue:
4042 if (ctl.unblock_action != UNBLOCK_CONTINUE 3918 if (ctl.unblock_action != UNBLOCK_CONTINUE
4043 && lockres->l_ops->post_unlock) 3919 && lockres->l_ops->post_unlock)
4044 lockres->l_ops->post_unlock(osb, lockres); 3920 lockres->l_ops->post_unlock(osb, lockres);
4045
4046 mlog_exit_void();
4047} 3921}
4048 3922
4049static void ocfs2_schedule_blocked_lock(struct ocfs2_super *osb, 3923static void ocfs2_schedule_blocked_lock(struct ocfs2_super *osb,
4050 struct ocfs2_lock_res *lockres) 3924 struct ocfs2_lock_res *lockres)
4051{ 3925{
4052 mlog_entry_void();
4053
4054 assert_spin_locked(&lockres->l_lock); 3926 assert_spin_locked(&lockres->l_lock);
4055 3927
4056 if (lockres->l_flags & OCFS2_LOCK_FREEING) { 3928 if (lockres->l_flags & OCFS2_LOCK_FREEING) {
@@ -4071,8 +3943,6 @@ static void ocfs2_schedule_blocked_lock(struct ocfs2_super *osb,
4071 osb->blocked_lock_count++; 3943 osb->blocked_lock_count++;
4072 } 3944 }
4073 spin_unlock(&osb->dc_task_lock); 3945 spin_unlock(&osb->dc_task_lock);
4074
4075 mlog_exit_void();
4076} 3946}
4077 3947
4078static void ocfs2_downconvert_thread_do_work(struct ocfs2_super *osb) 3948static void ocfs2_downconvert_thread_do_work(struct ocfs2_super *osb)
@@ -4080,8 +3950,6 @@ static void ocfs2_downconvert_thread_do_work(struct ocfs2_super *osb)
4080 unsigned long processed; 3950 unsigned long processed;
4081 struct ocfs2_lock_res *lockres; 3951 struct ocfs2_lock_res *lockres;
4082 3952
4083 mlog_entry_void();
4084
4085 spin_lock(&osb->dc_task_lock); 3953 spin_lock(&osb->dc_task_lock);
4086 /* grab this early so we know to try again if a state change and 3954 /* grab this early so we know to try again if a state change and
4087 * wake happens part-way through our work */ 3955 * wake happens part-way through our work */
@@ -4105,8 +3973,6 @@ static void ocfs2_downconvert_thread_do_work(struct ocfs2_super *osb)
4105 spin_lock(&osb->dc_task_lock); 3973 spin_lock(&osb->dc_task_lock);
4106 } 3974 }
4107 spin_unlock(&osb->dc_task_lock); 3975 spin_unlock(&osb->dc_task_lock);
4108
4109 mlog_exit_void();
4110} 3976}
4111 3977
4112static int ocfs2_downconvert_thread_lists_empty(struct ocfs2_super *osb) 3978static int ocfs2_downconvert_thread_lists_empty(struct ocfs2_super *osb)
diff --git a/fs/ocfs2/export.c b/fs/ocfs2/export.c
index 254652a9b542..745db42528d5 100644
--- a/fs/ocfs2/export.c
+++ b/fs/ocfs2/export.c
@@ -26,7 +26,6 @@
26#include <linux/fs.h> 26#include <linux/fs.h>
27#include <linux/types.h> 27#include <linux/types.h>
28 28
29#define MLOG_MASK_PREFIX ML_EXPORT
30#include <cluster/masklog.h> 29#include <cluster/masklog.h>
31 30
32#include "ocfs2.h" 31#include "ocfs2.h"
@@ -40,6 +39,7 @@
40 39
41#include "buffer_head_io.h" 40#include "buffer_head_io.h"
42#include "suballoc.h" 41#include "suballoc.h"
42#include "ocfs2_trace.h"
43 43
44struct ocfs2_inode_handle 44struct ocfs2_inode_handle
45{ 45{
@@ -56,10 +56,9 @@ static struct dentry *ocfs2_get_dentry(struct super_block *sb,
56 int status, set; 56 int status, set;
57 struct dentry *result; 57 struct dentry *result;
58 58
59 mlog_entry("(0x%p, 0x%p)\n", sb, handle); 59 trace_ocfs2_get_dentry_begin(sb, handle, (unsigned long long)blkno);
60 60
61 if (blkno == 0) { 61 if (blkno == 0) {
62 mlog(0, "nfs wants inode with blkno: 0\n");
63 result = ERR_PTR(-ESTALE); 62 result = ERR_PTR(-ESTALE);
64 goto bail; 63 goto bail;
65 } 64 }
@@ -83,6 +82,7 @@ static struct dentry *ocfs2_get_dentry(struct super_block *sb,
83 } 82 }
84 83
85 status = ocfs2_test_inode_bit(osb, blkno, &set); 84 status = ocfs2_test_inode_bit(osb, blkno, &set);
85 trace_ocfs2_get_dentry_test_bit(status, set);
86 if (status < 0) { 86 if (status < 0) {
87 if (status == -EINVAL) { 87 if (status == -EINVAL) {
88 /* 88 /*
@@ -90,18 +90,14 @@ static struct dentry *ocfs2_get_dentry(struct super_block *sb,
90 * as an inode, we return -ESTALE to be 90 * as an inode, we return -ESTALE to be
91 * nice 91 * nice
92 */ 92 */
93 mlog(0, "test inode bit failed %d\n", status);
94 status = -ESTALE; 93 status = -ESTALE;
95 } else { 94 } else
96 mlog(ML_ERROR, "test inode bit failed %d\n", status); 95 mlog(ML_ERROR, "test inode bit failed %d\n", status);
97 }
98 goto unlock_nfs_sync; 96 goto unlock_nfs_sync;
99 } 97 }
100 98
101 /* If the inode allocator bit is clear, this inode must be stale */ 99 /* If the inode allocator bit is clear, this inode must be stale */
102 if (!set) { 100 if (!set) {
103 mlog(0, "inode %llu suballoc bit is clear\n",
104 (unsigned long long)blkno);
105 status = -ESTALE; 101 status = -ESTALE;
106 goto unlock_nfs_sync; 102 goto unlock_nfs_sync;
107 } 103 }
@@ -114,8 +110,8 @@ unlock_nfs_sync:
114check_err: 110check_err:
115 if (status < 0) { 111 if (status < 0) {
116 if (status == -ESTALE) { 112 if (status == -ESTALE) {
117 mlog(0, "stale inode ino: %llu generation: %u\n", 113 trace_ocfs2_get_dentry_stale((unsigned long long)blkno,
118 (unsigned long long)blkno, handle->ih_generation); 114 handle->ih_generation);
119 } 115 }
120 result = ERR_PTR(status); 116 result = ERR_PTR(status);
121 goto bail; 117 goto bail;
@@ -130,8 +126,9 @@ check_err:
130check_gen: 126check_gen:
131 if (handle->ih_generation != inode->i_generation) { 127 if (handle->ih_generation != inode->i_generation) {
132 iput(inode); 128 iput(inode);
133 mlog(0, "stale inode ino: %llu generation: %u\n", 129 trace_ocfs2_get_dentry_generation((unsigned long long)blkno,
134 (unsigned long long)blkno, handle->ih_generation); 130 handle->ih_generation,
131 inode->i_generation);
135 result = ERR_PTR(-ESTALE); 132 result = ERR_PTR(-ESTALE);
136 goto bail; 133 goto bail;
137 } 134 }
@@ -141,7 +138,7 @@ check_gen:
141 mlog_errno(PTR_ERR(result)); 138 mlog_errno(PTR_ERR(result));
142 139
143bail: 140bail:
144 mlog_exit_ptr(result); 141 trace_ocfs2_get_dentry_end(result);
145 return result; 142 return result;
146} 143}
147 144
@@ -152,11 +149,8 @@ static struct dentry *ocfs2_get_parent(struct dentry *child)
152 struct dentry *parent; 149 struct dentry *parent;
153 struct inode *dir = child->d_inode; 150 struct inode *dir = child->d_inode;
154 151
155 mlog_entry("(0x%p, '%.*s')\n", child, 152 trace_ocfs2_get_parent(child, child->d_name.len, child->d_name.name,
156 child->d_name.len, child->d_name.name); 153 (unsigned long long)OCFS2_I(dir)->ip_blkno);
157
158 mlog(0, "find parent of directory %llu\n",
159 (unsigned long long)OCFS2_I(dir)->ip_blkno);
160 154
161 status = ocfs2_inode_lock(dir, NULL, 0); 155 status = ocfs2_inode_lock(dir, NULL, 0);
162 if (status < 0) { 156 if (status < 0) {
@@ -178,7 +172,7 @@ bail_unlock:
178 ocfs2_inode_unlock(dir, 0); 172 ocfs2_inode_unlock(dir, 0);
179 173
180bail: 174bail:
181 mlog_exit_ptr(parent); 175 trace_ocfs2_get_parent_end(parent);
182 176
183 return parent; 177 return parent;
184} 178}
@@ -193,9 +187,9 @@ static int ocfs2_encode_fh(struct dentry *dentry, u32 *fh_in, int *max_len,
193 u32 generation; 187 u32 generation;
194 __le32 *fh = (__force __le32 *) fh_in; 188 __le32 *fh = (__force __le32 *) fh_in;
195 189
196 mlog_entry("(0x%p, '%.*s', 0x%p, %d, %d)\n", dentry, 190 trace_ocfs2_encode_fh_begin(dentry, dentry->d_name.len,
197 dentry->d_name.len, dentry->d_name.name, 191 dentry->d_name.name,
198 fh, len, connectable); 192 fh, len, connectable);
199 193
200 if (connectable && (len < 6)) { 194 if (connectable && (len < 6)) {
201 *max_len = 6; 195 *max_len = 6;
@@ -210,8 +204,7 @@ static int ocfs2_encode_fh(struct dentry *dentry, u32 *fh_in, int *max_len,
210 blkno = OCFS2_I(inode)->ip_blkno; 204 blkno = OCFS2_I(inode)->ip_blkno;
211 generation = inode->i_generation; 205 generation = inode->i_generation;
212 206
213 mlog(0, "Encoding fh: blkno: %llu, generation: %u\n", 207 trace_ocfs2_encode_fh_self((unsigned long long)blkno, generation);
214 (unsigned long long)blkno, generation);
215 208
216 len = 3; 209 len = 3;
217 fh[0] = cpu_to_le32((u32)(blkno >> 32)); 210 fh[0] = cpu_to_le32((u32)(blkno >> 32));
@@ -236,14 +229,14 @@ static int ocfs2_encode_fh(struct dentry *dentry, u32 *fh_in, int *max_len,
236 len = 6; 229 len = 6;
237 type = 2; 230 type = 2;
238 231
239 mlog(0, "Encoding parent: blkno: %llu, generation: %u\n", 232 trace_ocfs2_encode_fh_parent((unsigned long long)blkno,
240 (unsigned long long)blkno, generation); 233 generation);
241 } 234 }
242 235
243 *max_len = len; 236 *max_len = len;
244 237
245bail: 238bail:
246 mlog_exit(type); 239 trace_ocfs2_encode_fh_type(type);
247 return type; 240 return type;
248} 241}
249 242
diff --git a/fs/ocfs2/extent_map.c b/fs/ocfs2/extent_map.c
index 09e3fdfa6d33..23457b491e8c 100644
--- a/fs/ocfs2/extent_map.c
+++ b/fs/ocfs2/extent_map.c
@@ -28,7 +28,6 @@
28#include <linux/types.h> 28#include <linux/types.h>
29#include <linux/fiemap.h> 29#include <linux/fiemap.h>
30 30
31#define MLOG_MASK_PREFIX ML_EXTENT_MAP
32#include <cluster/masklog.h> 31#include <cluster/masklog.h>
33 32
34#include "ocfs2.h" 33#include "ocfs2.h"
@@ -39,6 +38,7 @@
39#include "inode.h" 38#include "inode.h"
40#include "super.h" 39#include "super.h"
41#include "symlink.h" 40#include "symlink.h"
41#include "ocfs2_trace.h"
42 42
43#include "buffer_head_io.h" 43#include "buffer_head_io.h"
44 44
@@ -841,10 +841,9 @@ int ocfs2_read_virt_blocks(struct inode *inode, u64 v_block, int nr,
841 u64 p_block, p_count; 841 u64 p_block, p_count;
842 int i, count, done = 0; 842 int i, count, done = 0;
843 843
844 mlog_entry("(inode = %p, v_block = %llu, nr = %d, bhs = %p, " 844 trace_ocfs2_read_virt_blocks(
845 "flags = %x, validate = %p)\n", 845 inode, (unsigned long long)v_block, nr, bhs, flags,
846 inode, (unsigned long long)v_block, nr, bhs, flags, 846 validate);
847 validate);
848 847
849 if (((v_block + nr - 1) << inode->i_sb->s_blocksize_bits) >= 848 if (((v_block + nr - 1) << inode->i_sb->s_blocksize_bits) >=
850 i_size_read(inode)) { 849 i_size_read(inode)) {
@@ -897,7 +896,6 @@ int ocfs2_read_virt_blocks(struct inode *inode, u64 v_block, int nr,
897 } 896 }
898 897
899out: 898out:
900 mlog_exit(rc);
901 return rc; 899 return rc;
902} 900}
903 901
diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c
index a6651956482e..41565ae52856 100644
--- a/fs/ocfs2/file.c
+++ b/fs/ocfs2/file.c
@@ -38,7 +38,6 @@
38#include <linux/quotaops.h> 38#include <linux/quotaops.h>
39#include <linux/blkdev.h> 39#include <linux/blkdev.h>
40 40
41#define MLOG_MASK_PREFIX ML_INODE
42#include <cluster/masklog.h> 41#include <cluster/masklog.h>
43 42
44#include "ocfs2.h" 43#include "ocfs2.h"
@@ -61,6 +60,7 @@
61#include "acl.h" 60#include "acl.h"
62#include "quota.h" 61#include "quota.h"
63#include "refcounttree.h" 62#include "refcounttree.h"
63#include "ocfs2_trace.h"
64 64
65#include "buffer_head_io.h" 65#include "buffer_head_io.h"
66 66
@@ -99,8 +99,10 @@ static int ocfs2_file_open(struct inode *inode, struct file *file)
99 int mode = file->f_flags; 99 int mode = file->f_flags;
100 struct ocfs2_inode_info *oi = OCFS2_I(inode); 100 struct ocfs2_inode_info *oi = OCFS2_I(inode);
101 101
102 mlog_entry("(0x%p, 0x%p, '%.*s')\n", inode, file, 102 trace_ocfs2_file_open(inode, file, file->f_path.dentry,
103 file->f_path.dentry->d_name.len, file->f_path.dentry->d_name.name); 103 (unsigned long long)OCFS2_I(inode)->ip_blkno,
104 file->f_path.dentry->d_name.len,
105 file->f_path.dentry->d_name.name, mode);
104 106
105 if (file->f_mode & FMODE_WRITE) 107 if (file->f_mode & FMODE_WRITE)
106 dquot_initialize(inode); 108 dquot_initialize(inode);
@@ -135,7 +137,6 @@ static int ocfs2_file_open(struct inode *inode, struct file *file)
135 } 137 }
136 138
137leave: 139leave:
138 mlog_exit(status);
139 return status; 140 return status;
140} 141}
141 142
@@ -143,19 +144,19 @@ static int ocfs2_file_release(struct inode *inode, struct file *file)
143{ 144{
144 struct ocfs2_inode_info *oi = OCFS2_I(inode); 145 struct ocfs2_inode_info *oi = OCFS2_I(inode);
145 146
146 mlog_entry("(0x%p, 0x%p, '%.*s')\n", inode, file,
147 file->f_path.dentry->d_name.len,
148 file->f_path.dentry->d_name.name);
149
150 spin_lock(&oi->ip_lock); 147 spin_lock(&oi->ip_lock);
151 if (!--oi->ip_open_count) 148 if (!--oi->ip_open_count)
152 oi->ip_flags &= ~OCFS2_INODE_OPEN_DIRECT; 149 oi->ip_flags &= ~OCFS2_INODE_OPEN_DIRECT;
150
151 trace_ocfs2_file_release(inode, file, file->f_path.dentry,
152 oi->ip_blkno,
153 file->f_path.dentry->d_name.len,
154 file->f_path.dentry->d_name.name,
155 oi->ip_open_count);
153 spin_unlock(&oi->ip_lock); 156 spin_unlock(&oi->ip_lock);
154 157
155 ocfs2_free_file_private(inode, file); 158 ocfs2_free_file_private(inode, file);
156 159
157 mlog_exit(0);
158
159 return 0; 160 return 0;
160} 161}
161 162
@@ -177,9 +178,11 @@ static int ocfs2_sync_file(struct file *file, int datasync)
177 struct inode *inode = file->f_mapping->host; 178 struct inode *inode = file->f_mapping->host;
178 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); 179 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
179 180
180 mlog_entry("(0x%p, %d, 0x%p, '%.*s')\n", file, datasync, 181 trace_ocfs2_sync_file(inode, file, file->f_path.dentry,
181 file->f_path.dentry, file->f_path.dentry->d_name.len, 182 OCFS2_I(inode)->ip_blkno,
182 file->f_path.dentry->d_name.name); 183 file->f_path.dentry->d_name.len,
184 file->f_path.dentry->d_name.name,
185 (unsigned long long)datasync);
183 186
184 if (datasync && !(inode->i_state & I_DIRTY_DATASYNC)) { 187 if (datasync && !(inode->i_state & I_DIRTY_DATASYNC)) {
185 /* 188 /*
@@ -195,7 +198,8 @@ static int ocfs2_sync_file(struct file *file, int datasync)
195 err = jbd2_journal_force_commit(journal); 198 err = jbd2_journal_force_commit(journal);
196 199
197bail: 200bail:
198 mlog_exit(err); 201 if (err)
202 mlog_errno(err);
199 203
200 return (err < 0) ? -EIO : 0; 204 return (err < 0) ? -EIO : 0;
201} 205}
@@ -251,8 +255,6 @@ int ocfs2_update_inode_atime(struct inode *inode,
251 handle_t *handle; 255 handle_t *handle;
252 struct ocfs2_dinode *di = (struct ocfs2_dinode *) bh->b_data; 256 struct ocfs2_dinode *di = (struct ocfs2_dinode *) bh->b_data;
253 257
254 mlog_entry_void();
255
256 handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS); 258 handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS);
257 if (IS_ERR(handle)) { 259 if (IS_ERR(handle)) {
258 ret = PTR_ERR(handle); 260 ret = PTR_ERR(handle);
@@ -280,7 +282,6 @@ int ocfs2_update_inode_atime(struct inode *inode,
280out_commit: 282out_commit:
281 ocfs2_commit_trans(OCFS2_SB(inode->i_sb), handle); 283 ocfs2_commit_trans(OCFS2_SB(inode->i_sb), handle);
282out: 284out:
283 mlog_exit(ret);
284 return ret; 285 return ret;
285} 286}
286 287
@@ -291,7 +292,6 @@ static int ocfs2_set_inode_size(handle_t *handle,
291{ 292{
292 int status; 293 int status;
293 294
294 mlog_entry_void();
295 i_size_write(inode, new_i_size); 295 i_size_write(inode, new_i_size);
296 inode->i_blocks = ocfs2_inode_sector_count(inode); 296 inode->i_blocks = ocfs2_inode_sector_count(inode);
297 inode->i_ctime = inode->i_mtime = CURRENT_TIME; 297 inode->i_ctime = inode->i_mtime = CURRENT_TIME;
@@ -303,7 +303,6 @@ static int ocfs2_set_inode_size(handle_t *handle,
303 } 303 }
304 304
305bail: 305bail:
306 mlog_exit(status);
307 return status; 306 return status;
308} 307}
309 308
@@ -375,8 +374,6 @@ static int ocfs2_orphan_for_truncate(struct ocfs2_super *osb,
375 struct ocfs2_dinode *di; 374 struct ocfs2_dinode *di;
376 u64 cluster_bytes; 375 u64 cluster_bytes;
377 376
378 mlog_entry_void();
379
380 /* 377 /*
381 * We need to CoW the cluster contains the offset if it is reflinked 378 * We need to CoW the cluster contains the offset if it is reflinked
382 * since we will call ocfs2_zero_range_for_truncate later which will 379 * since we will call ocfs2_zero_range_for_truncate later which will
@@ -429,8 +426,6 @@ static int ocfs2_orphan_for_truncate(struct ocfs2_super *osb,
429out_commit: 426out_commit:
430 ocfs2_commit_trans(osb, handle); 427 ocfs2_commit_trans(osb, handle);
431out: 428out:
432
433 mlog_exit(status);
434 return status; 429 return status;
435} 430}
436 431
@@ -442,14 +437,14 @@ static int ocfs2_truncate_file(struct inode *inode,
442 struct ocfs2_dinode *fe = NULL; 437 struct ocfs2_dinode *fe = NULL;
443 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); 438 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
444 439
445 mlog_entry("(inode = %llu, new_i_size = %llu\n",
446 (unsigned long long)OCFS2_I(inode)->ip_blkno,
447 (unsigned long long)new_i_size);
448
449 /* We trust di_bh because it comes from ocfs2_inode_lock(), which 440 /* We trust di_bh because it comes from ocfs2_inode_lock(), which
450 * already validated it */ 441 * already validated it */
451 fe = (struct ocfs2_dinode *) di_bh->b_data; 442 fe = (struct ocfs2_dinode *) di_bh->b_data;
452 443
444 trace_ocfs2_truncate_file((unsigned long long)OCFS2_I(inode)->ip_blkno,
445 (unsigned long long)le64_to_cpu(fe->i_size),
446 (unsigned long long)new_i_size);
447
453 mlog_bug_on_msg(le64_to_cpu(fe->i_size) != i_size_read(inode), 448 mlog_bug_on_msg(le64_to_cpu(fe->i_size) != i_size_read(inode),
454 "Inode %llu, inode i_size = %lld != di " 449 "Inode %llu, inode i_size = %lld != di "
455 "i_size = %llu, i_flags = 0x%x\n", 450 "i_size = %llu, i_flags = 0x%x\n",
@@ -459,19 +454,14 @@ static int ocfs2_truncate_file(struct inode *inode,
459 le32_to_cpu(fe->i_flags)); 454 le32_to_cpu(fe->i_flags));
460 455
461 if (new_i_size > le64_to_cpu(fe->i_size)) { 456 if (new_i_size > le64_to_cpu(fe->i_size)) {
462 mlog(0, "asked to truncate file with size (%llu) to size (%llu)!\n", 457 trace_ocfs2_truncate_file_error(
463 (unsigned long long)le64_to_cpu(fe->i_size), 458 (unsigned long long)le64_to_cpu(fe->i_size),
464 (unsigned long long)new_i_size); 459 (unsigned long long)new_i_size);
465 status = -EINVAL; 460 status = -EINVAL;
466 mlog_errno(status); 461 mlog_errno(status);
467 goto bail; 462 goto bail;
468 } 463 }
469 464
470 mlog(0, "inode %llu, i_size = %llu, new_i_size = %llu\n",
471 (unsigned long long)le64_to_cpu(fe->i_blkno),
472 (unsigned long long)le64_to_cpu(fe->i_size),
473 (unsigned long long)new_i_size);
474
475 /* lets handle the simple truncate cases before doing any more 465 /* lets handle the simple truncate cases before doing any more
476 * cluster locking. */ 466 * cluster locking. */
477 if (new_i_size == le64_to_cpu(fe->i_size)) 467 if (new_i_size == le64_to_cpu(fe->i_size))
@@ -525,7 +515,6 @@ bail:
525 if (!status && OCFS2_I(inode)->ip_clusters == 0) 515 if (!status && OCFS2_I(inode)->ip_clusters == 0)
526 status = ocfs2_try_remove_refcount_tree(inode, di_bh); 516 status = ocfs2_try_remove_refcount_tree(inode, di_bh);
527 517
528 mlog_exit(status);
529 return status; 518 return status;
530} 519}
531 520
@@ -578,8 +567,6 @@ static int __ocfs2_extend_allocation(struct inode *inode, u32 logical_start,
578 struct ocfs2_extent_tree et; 567 struct ocfs2_extent_tree et;
579 int did_quota = 0; 568 int did_quota = 0;
580 569
581 mlog_entry("(clusters_to_add = %u)\n", clusters_to_add);
582
583 /* 570 /*
584 * This function only exists for file systems which don't 571 * This function only exists for file systems which don't
585 * support holes. 572 * support holes.
@@ -596,11 +583,6 @@ static int __ocfs2_extend_allocation(struct inode *inode, u32 logical_start,
596restart_all: 583restart_all:
597 BUG_ON(le32_to_cpu(fe->i_clusters) != OCFS2_I(inode)->ip_clusters); 584 BUG_ON(le32_to_cpu(fe->i_clusters) != OCFS2_I(inode)->ip_clusters);
598 585
599 mlog(0, "extend inode %llu, i_size = %lld, di->i_clusters = %u, "
600 "clusters_to_add = %u\n",
601 (unsigned long long)OCFS2_I(inode)->ip_blkno,
602 (long long)i_size_read(inode), le32_to_cpu(fe->i_clusters),
603 clusters_to_add);
604 ocfs2_init_dinode_extent_tree(&et, INODE_CACHE(inode), bh); 586 ocfs2_init_dinode_extent_tree(&et, INODE_CACHE(inode), bh);
605 status = ocfs2_lock_allocators(inode, &et, clusters_to_add, 0, 587 status = ocfs2_lock_allocators(inode, &et, clusters_to_add, 0,
606 &data_ac, &meta_ac); 588 &data_ac, &meta_ac);
@@ -620,6 +602,12 @@ restart_all:
620 } 602 }
621 603
622restarted_transaction: 604restarted_transaction:
605 trace_ocfs2_extend_allocation(
606 (unsigned long long)OCFS2_I(inode)->ip_blkno,
607 (unsigned long long)i_size_read(inode),
608 le32_to_cpu(fe->i_clusters), clusters_to_add,
609 why, restart_func);
610
623 status = dquot_alloc_space_nodirty(inode, 611 status = dquot_alloc_space_nodirty(inode,
624 ocfs2_clusters_to_bytes(osb->sb, clusters_to_add)); 612 ocfs2_clusters_to_bytes(osb->sb, clusters_to_add));
625 if (status) 613 if (status)
@@ -666,13 +654,11 @@ restarted_transaction:
666 654
667 if (why != RESTART_NONE && clusters_to_add) { 655 if (why != RESTART_NONE && clusters_to_add) {
668 if (why == RESTART_META) { 656 if (why == RESTART_META) {
669 mlog(0, "restarting function.\n");
670 restart_func = 1; 657 restart_func = 1;
671 status = 0; 658 status = 0;
672 } else { 659 } else {
673 BUG_ON(why != RESTART_TRANS); 660 BUG_ON(why != RESTART_TRANS);
674 661
675 mlog(0, "restarting transaction.\n");
676 /* TODO: This can be more intelligent. */ 662 /* TODO: This can be more intelligent. */
677 credits = ocfs2_calc_extend_credits(osb->sb, 663 credits = ocfs2_calc_extend_credits(osb->sb,
678 &fe->id2.i_list, 664 &fe->id2.i_list,
@@ -689,11 +675,11 @@ restarted_transaction:
689 } 675 }
690 } 676 }
691 677
692 mlog(0, "fe: i_clusters = %u, i_size=%llu\n", 678 trace_ocfs2_extend_allocation_end(OCFS2_I(inode)->ip_blkno,
693 le32_to_cpu(fe->i_clusters), 679 le32_to_cpu(fe->i_clusters),
694 (unsigned long long)le64_to_cpu(fe->i_size)); 680 (unsigned long long)le64_to_cpu(fe->i_size),
695 mlog(0, "inode: ip_clusters=%u, i_size=%lld\n", 681 OCFS2_I(inode)->ip_clusters,
696 OCFS2_I(inode)->ip_clusters, (long long)i_size_read(inode)); 682 (unsigned long long)i_size_read(inode));
697 683
698leave: 684leave:
699 if (status < 0 && did_quota) 685 if (status < 0 && did_quota)
@@ -718,7 +704,6 @@ leave:
718 brelse(bh); 704 brelse(bh);
719 bh = NULL; 705 bh = NULL;
720 706
721 mlog_exit(status);
722 return status; 707 return status;
723} 708}
724 709
@@ -785,10 +770,11 @@ static int ocfs2_write_zero_page(struct inode *inode, u64 abs_from,
785 if (!zero_to) 770 if (!zero_to)
786 zero_to = PAGE_CACHE_SIZE; 771 zero_to = PAGE_CACHE_SIZE;
787 772
788 mlog(0, 773 trace_ocfs2_write_zero_page(
789 "abs_from = %llu, abs_to = %llu, index = %lu, zero_from = %u, zero_to = %u\n", 774 (unsigned long long)OCFS2_I(inode)->ip_blkno,
790 (unsigned long long)abs_from, (unsigned long long)abs_to, 775 (unsigned long long)abs_from,
791 index, zero_from, zero_to); 776 (unsigned long long)abs_to,
777 index, zero_from, zero_to);
792 778
793 /* We know that zero_from is block aligned */ 779 /* We know that zero_from is block aligned */
794 for (block_start = zero_from; block_start < zero_to; 780 for (block_start = zero_from; block_start < zero_to;
@@ -928,9 +914,10 @@ static int ocfs2_zero_extend_range(struct inode *inode, u64 range_start,
928 u64 next_pos; 914 u64 next_pos;
929 u64 zero_pos = range_start; 915 u64 zero_pos = range_start;
930 916
931 mlog(0, "range_start = %llu, range_end = %llu\n", 917 trace_ocfs2_zero_extend_range(
932 (unsigned long long)range_start, 918 (unsigned long long)OCFS2_I(inode)->ip_blkno,
933 (unsigned long long)range_end); 919 (unsigned long long)range_start,
920 (unsigned long long)range_end);
934 BUG_ON(range_start >= range_end); 921 BUG_ON(range_start >= range_end);
935 922
936 while (zero_pos < range_end) { 923 while (zero_pos < range_end) {
@@ -962,9 +949,9 @@ int ocfs2_zero_extend(struct inode *inode, struct buffer_head *di_bh,
962 struct super_block *sb = inode->i_sb; 949 struct super_block *sb = inode->i_sb;
963 950
964 zero_start = ocfs2_align_bytes_to_blocks(sb, i_size_read(inode)); 951 zero_start = ocfs2_align_bytes_to_blocks(sb, i_size_read(inode));
965 mlog(0, "zero_start %llu for i_size %llu\n", 952 trace_ocfs2_zero_extend((unsigned long long)OCFS2_I(inode)->ip_blkno,
966 (unsigned long long)zero_start, 953 (unsigned long long)zero_start,
967 (unsigned long long)i_size_read(inode)); 954 (unsigned long long)i_size_read(inode));
968 while (zero_start < zero_to_size) { 955 while (zero_start < zero_to_size) {
969 ret = ocfs2_zero_extend_get_range(inode, di_bh, zero_start, 956 ret = ocfs2_zero_extend_get_range(inode, di_bh, zero_start,
970 zero_to_size, 957 zero_to_size,
@@ -1113,30 +1100,20 @@ int ocfs2_setattr(struct dentry *dentry, struct iattr *attr)
1113 struct dquot *transfer_to[MAXQUOTAS] = { }; 1100 struct dquot *transfer_to[MAXQUOTAS] = { };
1114 int qtype; 1101 int qtype;
1115 1102
1116 mlog_entry("(0x%p, '%.*s')\n", dentry, 1103 trace_ocfs2_setattr(inode, dentry,
1117 dentry->d_name.len, dentry->d_name.name); 1104 (unsigned long long)OCFS2_I(inode)->ip_blkno,
1105 dentry->d_name.len, dentry->d_name.name,
1106 attr->ia_valid, attr->ia_mode,
1107 attr->ia_uid, attr->ia_gid);
1118 1108
1119 /* ensuring we don't even attempt to truncate a symlink */ 1109 /* ensuring we don't even attempt to truncate a symlink */
1120 if (S_ISLNK(inode->i_mode)) 1110 if (S_ISLNK(inode->i_mode))
1121 attr->ia_valid &= ~ATTR_SIZE; 1111 attr->ia_valid &= ~ATTR_SIZE;
1122 1112
1123 if (attr->ia_valid & ATTR_MODE)
1124 mlog(0, "mode change: %d\n", attr->ia_mode);
1125 if (attr->ia_valid & ATTR_UID)
1126 mlog(0, "uid change: %d\n", attr->ia_uid);
1127 if (attr->ia_valid & ATTR_GID)
1128 mlog(0, "gid change: %d\n", attr->ia_gid);
1129 if (attr->ia_valid & ATTR_SIZE)
1130 mlog(0, "size change...\n");
1131 if (attr->ia_valid & (ATTR_ATIME | ATTR_MTIME | ATTR_CTIME))
1132 mlog(0, "time change...\n");
1133
1134#define OCFS2_VALID_ATTRS (ATTR_ATIME | ATTR_MTIME | ATTR_CTIME | ATTR_SIZE \ 1113#define OCFS2_VALID_ATTRS (ATTR_ATIME | ATTR_MTIME | ATTR_CTIME | ATTR_SIZE \
1135 | ATTR_GID | ATTR_UID | ATTR_MODE) 1114 | ATTR_GID | ATTR_UID | ATTR_MODE)
1136 if (!(attr->ia_valid & OCFS2_VALID_ATTRS)) { 1115 if (!(attr->ia_valid & OCFS2_VALID_ATTRS))
1137 mlog(0, "can't handle attrs: 0x%x\n", attr->ia_valid);
1138 return 0; 1116 return 0;
1139 }
1140 1117
1141 status = inode_change_ok(inode, attr); 1118 status = inode_change_ok(inode, attr);
1142 if (status) 1119 if (status)
@@ -1274,7 +1251,6 @@ bail:
1274 mlog_errno(status); 1251 mlog_errno(status);
1275 } 1252 }
1276 1253
1277 mlog_exit(status);
1278 return status; 1254 return status;
1279} 1255}
1280 1256
@@ -1287,8 +1263,6 @@ int ocfs2_getattr(struct vfsmount *mnt,
1287 struct ocfs2_super *osb = sb->s_fs_info; 1263 struct ocfs2_super *osb = sb->s_fs_info;
1288 int err; 1264 int err;
1289 1265
1290 mlog_entry_void();
1291
1292 err = ocfs2_inode_revalidate(dentry); 1266 err = ocfs2_inode_revalidate(dentry);
1293 if (err) { 1267 if (err) {
1294 if (err != -ENOENT) 1268 if (err != -ENOENT)
@@ -1302,8 +1276,6 @@ int ocfs2_getattr(struct vfsmount *mnt,
1302 stat->blksize = osb->s_clustersize; 1276 stat->blksize = osb->s_clustersize;
1303 1277
1304bail: 1278bail:
1305 mlog_exit(err);
1306
1307 return err; 1279 return err;
1308} 1280}
1309 1281
@@ -1314,8 +1286,6 @@ int ocfs2_permission(struct inode *inode, int mask, unsigned int flags)
1314 if (flags & IPERM_FLAG_RCU) 1286 if (flags & IPERM_FLAG_RCU)
1315 return -ECHILD; 1287 return -ECHILD;
1316 1288
1317 mlog_entry_void();
1318
1319 ret = ocfs2_inode_lock(inode, NULL, 0); 1289 ret = ocfs2_inode_lock(inode, NULL, 0);
1320 if (ret) { 1290 if (ret) {
1321 if (ret != -ENOENT) 1291 if (ret != -ENOENT)
@@ -1327,7 +1297,6 @@ int ocfs2_permission(struct inode *inode, int mask, unsigned int flags)
1327 1297
1328 ocfs2_inode_unlock(inode, 0); 1298 ocfs2_inode_unlock(inode, 0);
1329out: 1299out:
1330 mlog_exit(ret);
1331 return ret; 1300 return ret;
1332} 1301}
1333 1302
@@ -1339,8 +1308,9 @@ static int __ocfs2_write_remove_suid(struct inode *inode,
1339 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); 1308 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
1340 struct ocfs2_dinode *di; 1309 struct ocfs2_dinode *di;
1341 1310
1342 mlog_entry("(Inode %llu, mode 0%o)\n", 1311 trace_ocfs2_write_remove_suid(
1343 (unsigned long long)OCFS2_I(inode)->ip_blkno, inode->i_mode); 1312 (unsigned long long)OCFS2_I(inode)->ip_blkno,
1313 inode->i_mode);
1344 1314
1345 handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS); 1315 handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS);
1346 if (IS_ERR(handle)) { 1316 if (IS_ERR(handle)) {
@@ -1368,7 +1338,6 @@ static int __ocfs2_write_remove_suid(struct inode *inode,
1368out_trans: 1338out_trans:
1369 ocfs2_commit_trans(osb, handle); 1339 ocfs2_commit_trans(osb, handle);
1370out: 1340out:
1371 mlog_exit(ret);
1372 return ret; 1341 return ret;
1373} 1342}
1374 1343
@@ -1547,8 +1516,9 @@ static int ocfs2_zero_partial_clusters(struct inode *inode,
1547 * partial clusters here. There's no need to worry about 1516 * partial clusters here. There's no need to worry about
1548 * physical allocation - the zeroing code knows to skip holes. 1517 * physical allocation - the zeroing code knows to skip holes.
1549 */ 1518 */
1550 mlog(0, "byte start: %llu, end: %llu\n", 1519 trace_ocfs2_zero_partial_clusters(
1551 (unsigned long long)start, (unsigned long long)end); 1520 (unsigned long long)OCFS2_I(inode)->ip_blkno,
1521 (unsigned long long)start, (unsigned long long)end);
1552 1522
1553 /* 1523 /*
1554 * If both edges are on a cluster boundary then there's no 1524 * If both edges are on a cluster boundary then there's no
@@ -1572,8 +1542,8 @@ static int ocfs2_zero_partial_clusters(struct inode *inode,
1572 if (tmpend > end) 1542 if (tmpend > end)
1573 tmpend = end; 1543 tmpend = end;
1574 1544
1575 mlog(0, "1st range: start: %llu, tmpend: %llu\n", 1545 trace_ocfs2_zero_partial_clusters_range1((unsigned long long)start,
1576 (unsigned long long)start, (unsigned long long)tmpend); 1546 (unsigned long long)tmpend);
1577 1547
1578 ret = ocfs2_zero_range_for_truncate(inode, handle, start, tmpend); 1548 ret = ocfs2_zero_range_for_truncate(inode, handle, start, tmpend);
1579 if (ret) 1549 if (ret)
@@ -1587,8 +1557,8 @@ static int ocfs2_zero_partial_clusters(struct inode *inode,
1587 */ 1557 */
1588 start = end & ~(osb->s_clustersize - 1); 1558 start = end & ~(osb->s_clustersize - 1);
1589 1559
1590 mlog(0, "2nd range: start: %llu, end: %llu\n", 1560 trace_ocfs2_zero_partial_clusters_range2(
1591 (unsigned long long)start, (unsigned long long)end); 1561 (unsigned long long)start, (unsigned long long)end);
1592 1562
1593 ret = ocfs2_zero_range_for_truncate(inode, handle, start, end); 1563 ret = ocfs2_zero_range_for_truncate(inode, handle, start, end);
1594 if (ret) 1564 if (ret)
@@ -1688,6 +1658,11 @@ static int ocfs2_remove_inode_range(struct inode *inode,
1688 ocfs2_init_dinode_extent_tree(&et, INODE_CACHE(inode), di_bh); 1658 ocfs2_init_dinode_extent_tree(&et, INODE_CACHE(inode), di_bh);
1689 ocfs2_init_dealloc_ctxt(&dealloc); 1659 ocfs2_init_dealloc_ctxt(&dealloc);
1690 1660
1661 trace_ocfs2_remove_inode_range(
1662 (unsigned long long)OCFS2_I(inode)->ip_blkno,
1663 (unsigned long long)byte_start,
1664 (unsigned long long)byte_len);
1665
1691 if (byte_len == 0) 1666 if (byte_len == 0)
1692 return 0; 1667 return 0;
1693 1668
@@ -1734,11 +1709,6 @@ static int ocfs2_remove_inode_range(struct inode *inode,
1734 trunc_end = (byte_start + byte_len) >> osb->s_clustersize_bits; 1709 trunc_end = (byte_start + byte_len) >> osb->s_clustersize_bits;
1735 cluster_in_el = trunc_end; 1710 cluster_in_el = trunc_end;
1736 1711
1737 mlog(0, "Inode: %llu, start: %llu, len: %llu, cstart: %u, cend: %u\n",
1738 (unsigned long long)OCFS2_I(inode)->ip_blkno,
1739 (unsigned long long)byte_start,
1740 (unsigned long long)byte_len, trunc_start, trunc_end);
1741
1742 ret = ocfs2_zero_partial_clusters(inode, byte_start, byte_len); 1712 ret = ocfs2_zero_partial_clusters(inode, byte_start, byte_len);
1743 if (ret) { 1713 if (ret) {
1744 mlog_errno(ret); 1714 mlog_errno(ret);
@@ -2093,7 +2063,7 @@ static int ocfs2_prepare_inode_for_write(struct file *file,
2093 int ret = 0, meta_level = 0; 2063 int ret = 0, meta_level = 0;
2094 struct dentry *dentry = file->f_path.dentry; 2064 struct dentry *dentry = file->f_path.dentry;
2095 struct inode *inode = dentry->d_inode; 2065 struct inode *inode = dentry->d_inode;
2096 loff_t saved_pos, end; 2066 loff_t saved_pos = 0, end;
2097 2067
2098 /* 2068 /*
2099 * We start with a read level meta lock and only jump to an ex 2069 * We start with a read level meta lock and only jump to an ex
@@ -2132,12 +2102,10 @@ static int ocfs2_prepare_inode_for_write(struct file *file,
2132 2102
2133 /* work on a copy of ppos until we're sure that we won't have 2103 /* work on a copy of ppos until we're sure that we won't have
2134 * to recalculate it due to relocking. */ 2104 * to recalculate it due to relocking. */
2135 if (appending) { 2105 if (appending)
2136 saved_pos = i_size_read(inode); 2106 saved_pos = i_size_read(inode);
2137 mlog(0, "O_APPEND: inode->i_size=%llu\n", saved_pos); 2107 else
2138 } else {
2139 saved_pos = *ppos; 2108 saved_pos = *ppos;
2140 }
2141 2109
2142 end = saved_pos + count; 2110 end = saved_pos + count;
2143 2111
@@ -2208,6 +2176,10 @@ static int ocfs2_prepare_inode_for_write(struct file *file,
2208 *ppos = saved_pos; 2176 *ppos = saved_pos;
2209 2177
2210out_unlock: 2178out_unlock:
2179 trace_ocfs2_prepare_inode_for_write(OCFS2_I(inode)->ip_blkno,
2180 saved_pos, appending, count,
2181 direct_io, has_refcount);
2182
2211 if (meta_level >= 0) 2183 if (meta_level >= 0)
2212 ocfs2_inode_unlock(inode, meta_level); 2184 ocfs2_inode_unlock(inode, meta_level);
2213 2185
@@ -2233,10 +2205,11 @@ static ssize_t ocfs2_file_aio_write(struct kiocb *iocb,
2233 int full_coherency = !(osb->s_mount_opt & 2205 int full_coherency = !(osb->s_mount_opt &
2234 OCFS2_MOUNT_COHERENCY_BUFFERED); 2206 OCFS2_MOUNT_COHERENCY_BUFFERED);
2235 2207
2236 mlog_entry("(0x%p, %u, '%.*s')\n", file, 2208 trace_ocfs2_file_aio_write(inode, file, file->f_path.dentry,
2237 (unsigned int)nr_segs, 2209 (unsigned long long)OCFS2_I(inode)->ip_blkno,
2238 file->f_path.dentry->d_name.len, 2210 file->f_path.dentry->d_name.len,
2239 file->f_path.dentry->d_name.name); 2211 file->f_path.dentry->d_name.name,
2212 (unsigned int)nr_segs);
2240 2213
2241 if (iocb->ki_left == 0) 2214 if (iocb->ki_left == 0)
2242 return 0; 2215 return 0;
@@ -2402,7 +2375,6 @@ out_sems:
2402 2375
2403 if (written) 2376 if (written)
2404 ret = written; 2377 ret = written;
2405 mlog_exit(ret);
2406 return ret; 2378 return ret;
2407} 2379}
2408 2380
@@ -2438,10 +2410,11 @@ static ssize_t ocfs2_file_splice_write(struct pipe_inode_info *pipe,
2438 .u.file = out, 2410 .u.file = out,
2439 }; 2411 };
2440 2412
2441 mlog_entry("(0x%p, 0x%p, %u, '%.*s')\n", out, pipe, 2413
2442 (unsigned int)len, 2414 trace_ocfs2_file_splice_write(inode, out, out->f_path.dentry,
2443 out->f_path.dentry->d_name.len, 2415 (unsigned long long)OCFS2_I(inode)->ip_blkno,
2444 out->f_path.dentry->d_name.name); 2416 out->f_path.dentry->d_name.len,
2417 out->f_path.dentry->d_name.name, len);
2445 2418
2446 if (pipe->inode) 2419 if (pipe->inode)
2447 mutex_lock_nested(&pipe->inode->i_mutex, I_MUTEX_PARENT); 2420 mutex_lock_nested(&pipe->inode->i_mutex, I_MUTEX_PARENT);
@@ -2485,7 +2458,6 @@ static ssize_t ocfs2_file_splice_write(struct pipe_inode_info *pipe,
2485 balance_dirty_pages_ratelimited_nr(mapping, nr_pages); 2458 balance_dirty_pages_ratelimited_nr(mapping, nr_pages);
2486 } 2459 }
2487 2460
2488 mlog_exit(ret);
2489 return ret; 2461 return ret;
2490} 2462}
2491 2463
@@ -2498,10 +2470,10 @@ static ssize_t ocfs2_file_splice_read(struct file *in,
2498 int ret = 0, lock_level = 0; 2470 int ret = 0, lock_level = 0;
2499 struct inode *inode = in->f_path.dentry->d_inode; 2471 struct inode *inode = in->f_path.dentry->d_inode;
2500 2472
2501 mlog_entry("(0x%p, 0x%p, %u, '%.*s')\n", in, pipe, 2473 trace_ocfs2_file_splice_read(inode, in, in->f_path.dentry,
2502 (unsigned int)len, 2474 (unsigned long long)OCFS2_I(inode)->ip_blkno,
2503 in->f_path.dentry->d_name.len, 2475 in->f_path.dentry->d_name.len,
2504 in->f_path.dentry->d_name.name); 2476 in->f_path.dentry->d_name.name, len);
2505 2477
2506 /* 2478 /*
2507 * See the comment in ocfs2_file_aio_read() 2479 * See the comment in ocfs2_file_aio_read()
@@ -2516,7 +2488,6 @@ static ssize_t ocfs2_file_splice_read(struct file *in,
2516 ret = generic_file_splice_read(in, ppos, pipe, len, flags); 2488 ret = generic_file_splice_read(in, ppos, pipe, len, flags);
2517 2489
2518bail: 2490bail:
2519 mlog_exit(ret);
2520 return ret; 2491 return ret;
2521} 2492}
2522 2493
@@ -2529,10 +2500,11 @@ static ssize_t ocfs2_file_aio_read(struct kiocb *iocb,
2529 struct file *filp = iocb->ki_filp; 2500 struct file *filp = iocb->ki_filp;
2530 struct inode *inode = filp->f_path.dentry->d_inode; 2501 struct inode *inode = filp->f_path.dentry->d_inode;
2531 2502
2532 mlog_entry("(0x%p, %u, '%.*s')\n", filp, 2503 trace_ocfs2_file_aio_read(inode, filp, filp->f_path.dentry,
2533 (unsigned int)nr_segs, 2504 (unsigned long long)OCFS2_I(inode)->ip_blkno,
2534 filp->f_path.dentry->d_name.len, 2505 filp->f_path.dentry->d_name.len,
2535 filp->f_path.dentry->d_name.name); 2506 filp->f_path.dentry->d_name.name, nr_segs);
2507
2536 2508
2537 if (!inode) { 2509 if (!inode) {
2538 ret = -EINVAL; 2510 ret = -EINVAL;
@@ -2578,8 +2550,7 @@ static ssize_t ocfs2_file_aio_read(struct kiocb *iocb,
2578 ocfs2_inode_unlock(inode, lock_level); 2550 ocfs2_inode_unlock(inode, lock_level);
2579 2551
2580 ret = generic_file_aio_read(iocb, iov, nr_segs, iocb->ki_pos); 2552 ret = generic_file_aio_read(iocb, iov, nr_segs, iocb->ki_pos);
2581 if (ret == -EINVAL) 2553 trace_generic_file_aio_read_ret(ret);
2582 mlog(0, "generic_file_aio_read returned -EINVAL\n");
2583 2554
2584 /* buffered aio wouldn't have proper lock coverage today */ 2555 /* buffered aio wouldn't have proper lock coverage today */
2585 BUG_ON(ret == -EIOCBQUEUED && !(filp->f_flags & O_DIRECT)); 2556 BUG_ON(ret == -EIOCBQUEUED && !(filp->f_flags & O_DIRECT));
@@ -2597,7 +2568,6 @@ bail:
2597 } 2568 }
2598 if (rw_level != -1) 2569 if (rw_level != -1)
2599 ocfs2_rw_unlock(inode, rw_level); 2570 ocfs2_rw_unlock(inode, rw_level);
2600 mlog_exit(ret);
2601 2571
2602 return ret; 2572 return ret;
2603} 2573}
diff --git a/fs/ocfs2/heartbeat.c b/fs/ocfs2/heartbeat.c
index 1aa863dd901f..d8208b20dc53 100644
--- a/fs/ocfs2/heartbeat.c
+++ b/fs/ocfs2/heartbeat.c
@@ -28,7 +28,6 @@
28#include <linux/types.h> 28#include <linux/types.h>
29#include <linux/highmem.h> 29#include <linux/highmem.h>
30 30
31#define MLOG_MASK_PREFIX ML_SUPER
32#include <cluster/masklog.h> 31#include <cluster/masklog.h>
33 32
34#include "ocfs2.h" 33#include "ocfs2.h"
@@ -37,6 +36,7 @@
37#include "heartbeat.h" 36#include "heartbeat.h"
38#include "inode.h" 37#include "inode.h"
39#include "journal.h" 38#include "journal.h"
39#include "ocfs2_trace.h"
40 40
41#include "buffer_head_io.h" 41#include "buffer_head_io.h"
42 42
@@ -66,7 +66,7 @@ void ocfs2_do_node_down(int node_num, void *data)
66 66
67 BUG_ON(osb->node_num == node_num); 67 BUG_ON(osb->node_num == node_num);
68 68
69 mlog(0, "ocfs2: node down event for %d\n", node_num); 69 trace_ocfs2_do_node_down(node_num);
70 70
71 if (!osb->cconn) { 71 if (!osb->cconn) {
72 /* 72 /*
diff --git a/fs/ocfs2/inode.c b/fs/ocfs2/inode.c
index 4068c6c4c6f6..177d3a6c2a5f 100644
--- a/fs/ocfs2/inode.c
+++ b/fs/ocfs2/inode.c
@@ -31,7 +31,6 @@
31 31
32#include <asm/byteorder.h> 32#include <asm/byteorder.h>
33 33
34#define MLOG_MASK_PREFIX ML_INODE
35#include <cluster/masklog.h> 34#include <cluster/masklog.h>
36 35
37#include "ocfs2.h" 36#include "ocfs2.h"
@@ -53,6 +52,7 @@
53#include "uptodate.h" 52#include "uptodate.h"
54#include "xattr.h" 53#include "xattr.h"
55#include "refcounttree.h" 54#include "refcounttree.h"
55#include "ocfs2_trace.h"
56 56
57#include "buffer_head_io.h" 57#include "buffer_head_io.h"
58 58
@@ -131,7 +131,8 @@ struct inode *ocfs2_iget(struct ocfs2_super *osb, u64 blkno, unsigned flags,
131 struct super_block *sb = osb->sb; 131 struct super_block *sb = osb->sb;
132 struct ocfs2_find_inode_args args; 132 struct ocfs2_find_inode_args args;
133 133
134 mlog_entry("(blkno = %llu)\n", (unsigned long long)blkno); 134 trace_ocfs2_iget_begin((unsigned long long)blkno, flags,
135 sysfile_type);
135 136
136 /* Ok. By now we've either got the offsets passed to us by the 137 /* Ok. By now we've either got the offsets passed to us by the
137 * caller, or we just pulled them off the bh. Lets do some 138 * caller, or we just pulled them off the bh. Lets do some
@@ -152,16 +153,16 @@ struct inode *ocfs2_iget(struct ocfs2_super *osb, u64 blkno, unsigned flags,
152 /* inode was *not* in the inode cache. 2.6.x requires 153 /* inode was *not* in the inode cache. 2.6.x requires
153 * us to do our own read_inode call and unlock it 154 * us to do our own read_inode call and unlock it
154 * afterwards. */ 155 * afterwards. */
155 if (inode && inode->i_state & I_NEW) {
156 mlog(0, "Inode was not in inode cache, reading it.\n");
157 ocfs2_read_locked_inode(inode, &args);
158 unlock_new_inode(inode);
159 }
160 if (inode == NULL) { 156 if (inode == NULL) {
161 inode = ERR_PTR(-ENOMEM); 157 inode = ERR_PTR(-ENOMEM);
162 mlog_errno(PTR_ERR(inode)); 158 mlog_errno(PTR_ERR(inode));
163 goto bail; 159 goto bail;
164 } 160 }
161 trace_ocfs2_iget5_locked(inode->i_state);
162 if (inode->i_state & I_NEW) {
163 ocfs2_read_locked_inode(inode, &args);
164 unlock_new_inode(inode);
165 }
165 if (is_bad_inode(inode)) { 166 if (is_bad_inode(inode)) {
166 iput(inode); 167 iput(inode);
167 inode = ERR_PTR(-ESTALE); 168 inode = ERR_PTR(-ESTALE);
@@ -170,9 +171,8 @@ struct inode *ocfs2_iget(struct ocfs2_super *osb, u64 blkno, unsigned flags,
170 171
171bail: 172bail:
172 if (!IS_ERR(inode)) { 173 if (!IS_ERR(inode)) {
173 mlog(0, "returning inode with number %llu\n", 174 trace_ocfs2_iget_end(inode,
174 (unsigned long long)OCFS2_I(inode)->ip_blkno); 175 (unsigned long long)OCFS2_I(inode)->ip_blkno);
175 mlog_exit_ptr(inode);
176 } 176 }
177 177
178 return inode; 178 return inode;
@@ -192,18 +192,17 @@ static int ocfs2_find_actor(struct inode *inode, void *opaque)
192 struct ocfs2_inode_info *oi = OCFS2_I(inode); 192 struct ocfs2_inode_info *oi = OCFS2_I(inode);
193 int ret = 0; 193 int ret = 0;
194 194
195 mlog_entry("(0x%p, %lu, 0x%p)\n", inode, inode->i_ino, opaque);
196
197 args = opaque; 195 args = opaque;
198 196
199 mlog_bug_on_msg(!inode, "No inode in find actor!\n"); 197 mlog_bug_on_msg(!inode, "No inode in find actor!\n");
200 198
199 trace_ocfs2_find_actor(inode, inode->i_ino, opaque, args->fi_blkno);
200
201 if (oi->ip_blkno != args->fi_blkno) 201 if (oi->ip_blkno != args->fi_blkno)
202 goto bail; 202 goto bail;
203 203
204 ret = 1; 204 ret = 1;
205bail: 205bail:
206 mlog_exit(ret);
207 return ret; 206 return ret;
208} 207}
209 208
@@ -218,8 +217,6 @@ static int ocfs2_init_locked_inode(struct inode *inode, void *opaque)
218 static struct lock_class_key ocfs2_quota_ip_alloc_sem_key, 217 static struct lock_class_key ocfs2_quota_ip_alloc_sem_key,
219 ocfs2_file_ip_alloc_sem_key; 218 ocfs2_file_ip_alloc_sem_key;
220 219
221 mlog_entry("inode = %p, opaque = %p\n", inode, opaque);
222
223 inode->i_ino = args->fi_ino; 220 inode->i_ino = args->fi_ino;
224 OCFS2_I(inode)->ip_blkno = args->fi_blkno; 221 OCFS2_I(inode)->ip_blkno = args->fi_blkno;
225 if (args->fi_sysfile_type != 0) 222 if (args->fi_sysfile_type != 0)
@@ -235,7 +232,6 @@ static int ocfs2_init_locked_inode(struct inode *inode, void *opaque)
235 lockdep_set_class(&OCFS2_I(inode)->ip_alloc_sem, 232 lockdep_set_class(&OCFS2_I(inode)->ip_alloc_sem,
236 &ocfs2_file_ip_alloc_sem_key); 233 &ocfs2_file_ip_alloc_sem_key);
237 234
238 mlog_exit(0);
239 return 0; 235 return 0;
240} 236}
241 237
@@ -246,9 +242,6 @@ void ocfs2_populate_inode(struct inode *inode, struct ocfs2_dinode *fe,
246 struct ocfs2_super *osb; 242 struct ocfs2_super *osb;
247 int use_plocks = 1; 243 int use_plocks = 1;
248 244
249 mlog_entry("(0x%p, size:%llu)\n", inode,
250 (unsigned long long)le64_to_cpu(fe->i_size));
251
252 sb = inode->i_sb; 245 sb = inode->i_sb;
253 osb = OCFS2_SB(sb); 246 osb = OCFS2_SB(sb);
254 247
@@ -300,20 +293,20 @@ void ocfs2_populate_inode(struct inode *inode, struct ocfs2_dinode *fe,
300 293
301 inode->i_nlink = ocfs2_read_links_count(fe); 294 inode->i_nlink = ocfs2_read_links_count(fe);
302 295
296 trace_ocfs2_populate_inode(OCFS2_I(inode)->ip_blkno,
297 le32_to_cpu(fe->i_flags));
303 if (fe->i_flags & cpu_to_le32(OCFS2_SYSTEM_FL)) { 298 if (fe->i_flags & cpu_to_le32(OCFS2_SYSTEM_FL)) {
304 OCFS2_I(inode)->ip_flags |= OCFS2_INODE_SYSTEM_FILE; 299 OCFS2_I(inode)->ip_flags |= OCFS2_INODE_SYSTEM_FILE;
305 inode->i_flags |= S_NOQUOTA; 300 inode->i_flags |= S_NOQUOTA;
306 } 301 }
307 302
308 if (fe->i_flags & cpu_to_le32(OCFS2_LOCAL_ALLOC_FL)) { 303 if (fe->i_flags & cpu_to_le32(OCFS2_LOCAL_ALLOC_FL)) {
309 OCFS2_I(inode)->ip_flags |= OCFS2_INODE_BITMAP; 304 OCFS2_I(inode)->ip_flags |= OCFS2_INODE_BITMAP;
310 mlog(0, "local alloc inode: i_ino=%lu\n", inode->i_ino);
311 } else if (fe->i_flags & cpu_to_le32(OCFS2_BITMAP_FL)) { 305 } else if (fe->i_flags & cpu_to_le32(OCFS2_BITMAP_FL)) {
312 OCFS2_I(inode)->ip_flags |= OCFS2_INODE_BITMAP; 306 OCFS2_I(inode)->ip_flags |= OCFS2_INODE_BITMAP;
313 } else if (fe->i_flags & cpu_to_le32(OCFS2_QUOTA_FL)) { 307 } else if (fe->i_flags & cpu_to_le32(OCFS2_QUOTA_FL)) {
314 inode->i_flags |= S_NOQUOTA; 308 inode->i_flags |= S_NOQUOTA;
315 } else if (fe->i_flags & cpu_to_le32(OCFS2_SUPER_BLOCK_FL)) { 309 } else if (fe->i_flags & cpu_to_le32(OCFS2_SUPER_BLOCK_FL)) {
316 mlog(0, "superblock inode: i_ino=%lu\n", inode->i_ino);
317 /* we can't actually hit this as read_inode can't 310 /* we can't actually hit this as read_inode can't
318 * handle superblocks today ;-) */ 311 * handle superblocks today ;-) */
319 BUG(); 312 BUG();
@@ -381,7 +374,6 @@ void ocfs2_populate_inode(struct inode *inode, struct ocfs2_dinode *fe,
381 if (S_ISDIR(inode->i_mode)) 374 if (S_ISDIR(inode->i_mode))
382 ocfs2_resv_set_type(&OCFS2_I(inode)->ip_la_data_resv, 375 ocfs2_resv_set_type(&OCFS2_I(inode)->ip_la_data_resv,
383 OCFS2_RESV_FLAG_DIR); 376 OCFS2_RESV_FLAG_DIR);
384 mlog_exit_void();
385} 377}
386 378
387static int ocfs2_read_locked_inode(struct inode *inode, 379static int ocfs2_read_locked_inode(struct inode *inode,
@@ -394,8 +386,6 @@ static int ocfs2_read_locked_inode(struct inode *inode,
394 int status, can_lock; 386 int status, can_lock;
395 u32 generation = 0; 387 u32 generation = 0;
396 388
397 mlog_entry("(0x%p, 0x%p)\n", inode, args);
398
399 status = -EINVAL; 389 status = -EINVAL;
400 if (inode == NULL || inode->i_sb == NULL) { 390 if (inode == NULL || inode->i_sb == NULL) {
401 mlog(ML_ERROR, "bad inode\n"); 391 mlog(ML_ERROR, "bad inode\n");
@@ -443,6 +433,9 @@ static int ocfs2_read_locked_inode(struct inode *inode,
443 && !(args->fi_flags & OCFS2_FI_FLAG_ORPHAN_RECOVERY) 433 && !(args->fi_flags & OCFS2_FI_FLAG_ORPHAN_RECOVERY)
444 && !ocfs2_mount_local(osb); 434 && !ocfs2_mount_local(osb);
445 435
436 trace_ocfs2_read_locked_inode(
437 (unsigned long long)OCFS2_I(inode)->ip_blkno, can_lock);
438
446 /* 439 /*
447 * To maintain backwards compatibility with older versions of 440 * To maintain backwards compatibility with older versions of
448 * ocfs2-tools, we still store the generation value for system 441 * ocfs2-tools, we still store the generation value for system
@@ -534,7 +527,6 @@ bail:
534 if (args && bh) 527 if (args && bh)
535 brelse(bh); 528 brelse(bh);
536 529
537 mlog_exit(status);
538 return status; 530 return status;
539} 531}
540 532
@@ -551,8 +543,6 @@ static int ocfs2_truncate_for_delete(struct ocfs2_super *osb,
551 struct ocfs2_dinode *fe; 543 struct ocfs2_dinode *fe;
552 handle_t *handle = NULL; 544 handle_t *handle = NULL;
553 545
554 mlog_entry_void();
555
556 fe = (struct ocfs2_dinode *) fe_bh->b_data; 546 fe = (struct ocfs2_dinode *) fe_bh->b_data;
557 547
558 /* 548 /*
@@ -600,7 +590,6 @@ static int ocfs2_truncate_for_delete(struct ocfs2_super *osb,
600out: 590out:
601 if (handle) 591 if (handle)
602 ocfs2_commit_trans(osb, handle); 592 ocfs2_commit_trans(osb, handle);
603 mlog_exit(status);
604 return status; 593 return status;
605} 594}
606 595
@@ -696,8 +685,6 @@ static int ocfs2_check_orphan_recovery_state(struct ocfs2_super *osb,
696 685
697 spin_lock(&osb->osb_lock); 686 spin_lock(&osb->osb_lock);
698 if (ocfs2_node_map_test_bit(osb, &osb->osb_recovering_orphan_dirs, slot)) { 687 if (ocfs2_node_map_test_bit(osb, &osb->osb_recovering_orphan_dirs, slot)) {
699 mlog(0, "Recovery is happening on orphan dir %d, will skip "
700 "this inode\n", slot);
701 ret = -EDEADLK; 688 ret = -EDEADLK;
702 goto out; 689 goto out;
703 } 690 }
@@ -706,6 +693,7 @@ static int ocfs2_check_orphan_recovery_state(struct ocfs2_super *osb,
706 osb->osb_orphan_wipes[slot]++; 693 osb->osb_orphan_wipes[slot]++;
707out: 694out:
708 spin_unlock(&osb->osb_lock); 695 spin_unlock(&osb->osb_lock);
696 trace_ocfs2_check_orphan_recovery_state(slot, ret);
709 return ret; 697 return ret;
710} 698}
711 699
@@ -816,6 +804,10 @@ static int ocfs2_inode_is_valid_to_delete(struct inode *inode)
816 struct ocfs2_inode_info *oi = OCFS2_I(inode); 804 struct ocfs2_inode_info *oi = OCFS2_I(inode);
817 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); 805 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
818 806
807 trace_ocfs2_inode_is_valid_to_delete(current, osb->dc_task,
808 (unsigned long long)oi->ip_blkno,
809 oi->ip_flags);
810
819 /* We shouldn't be getting here for the root directory 811 /* We shouldn't be getting here for the root directory
820 * inode.. */ 812 * inode.. */
821 if (inode == osb->root_inode) { 813 if (inode == osb->root_inode) {
@@ -828,11 +820,8 @@ static int ocfs2_inode_is_valid_to_delete(struct inode *inode)
828 * have to skip deleting this guy. That's OK though because 820 * have to skip deleting this guy. That's OK though because
829 * the node who's doing the actual deleting should handle it 821 * the node who's doing the actual deleting should handle it
830 * anyway. */ 822 * anyway. */
831 if (current == osb->dc_task) { 823 if (current == osb->dc_task)
832 mlog(0, "Skipping delete of %lu because we're currently "
833 "in downconvert\n", inode->i_ino);
834 goto bail; 824 goto bail;
835 }
836 825
837 spin_lock(&oi->ip_lock); 826 spin_lock(&oi->ip_lock);
838 /* OCFS2 *never* deletes system files. This should technically 827 /* OCFS2 *never* deletes system files. This should technically
@@ -847,11 +836,8 @@ static int ocfs2_inode_is_valid_to_delete(struct inode *inode)
847 /* If we have allowd wipe of this inode for another node, it 836 /* If we have allowd wipe of this inode for another node, it
848 * will be marked here so we can safely skip it. Recovery will 837 * will be marked here so we can safely skip it. Recovery will
849 * cleanup any inodes we might inadvertantly skip here. */ 838 * cleanup any inodes we might inadvertantly skip here. */
850 if (oi->ip_flags & OCFS2_INODE_SKIP_DELETE) { 839 if (oi->ip_flags & OCFS2_INODE_SKIP_DELETE)
851 mlog(0, "Skipping delete of %lu because another node "
852 "has done this for us.\n", inode->i_ino);
853 goto bail_unlock; 840 goto bail_unlock;
854 }
855 841
856 ret = 1; 842 ret = 1;
857bail_unlock: 843bail_unlock:
@@ -868,28 +854,27 @@ static int ocfs2_query_inode_wipe(struct inode *inode,
868 struct buffer_head *di_bh, 854 struct buffer_head *di_bh,
869 int *wipe) 855 int *wipe)
870{ 856{
871 int status = 0; 857 int status = 0, reason = 0;
872 struct ocfs2_inode_info *oi = OCFS2_I(inode); 858 struct ocfs2_inode_info *oi = OCFS2_I(inode);
873 struct ocfs2_dinode *di; 859 struct ocfs2_dinode *di;
874 860
875 *wipe = 0; 861 *wipe = 0;
876 862
863 trace_ocfs2_query_inode_wipe_begin((unsigned long long)oi->ip_blkno,
864 inode->i_nlink);
865
877 /* While we were waiting for the cluster lock in 866 /* While we were waiting for the cluster lock in
878 * ocfs2_delete_inode, another node might have asked to delete 867 * ocfs2_delete_inode, another node might have asked to delete
879 * the inode. Recheck our flags to catch this. */ 868 * the inode. Recheck our flags to catch this. */
880 if (!ocfs2_inode_is_valid_to_delete(inode)) { 869 if (!ocfs2_inode_is_valid_to_delete(inode)) {
881 mlog(0, "Skipping delete of %llu because flags changed\n", 870 reason = 1;
882 (unsigned long long)oi->ip_blkno);
883 goto bail; 871 goto bail;
884 } 872 }
885 873
886 /* Now that we have an up to date inode, we can double check 874 /* Now that we have an up to date inode, we can double check
887 * the link count. */ 875 * the link count. */
888 if (inode->i_nlink) { 876 if (inode->i_nlink)
889 mlog(0, "Skipping delete of %llu because nlink = %u\n",
890 (unsigned long long)oi->ip_blkno, inode->i_nlink);
891 goto bail; 877 goto bail;
892 }
893 878
894 /* Do some basic inode verification... */ 879 /* Do some basic inode verification... */
895 di = (struct ocfs2_dinode *) di_bh->b_data; 880 di = (struct ocfs2_dinode *) di_bh->b_data;
@@ -904,9 +889,7 @@ static int ocfs2_query_inode_wipe(struct inode *inode,
904 * ORPHANED_FL not. 889 * ORPHANED_FL not.
905 */ 890 */
906 if (di->i_dyn_features & cpu_to_le16(OCFS2_HAS_REFCOUNT_FL)) { 891 if (di->i_dyn_features & cpu_to_le16(OCFS2_HAS_REFCOUNT_FL)) {
907 mlog(0, "Reflinked inode %llu is no longer orphaned. " 892 reason = 2;
908 "it shouldn't be deleted\n",
909 (unsigned long long)oi->ip_blkno);
910 goto bail; 893 goto bail;
911 } 894 }
912 895
@@ -943,8 +926,7 @@ static int ocfs2_query_inode_wipe(struct inode *inode,
943 status = ocfs2_try_open_lock(inode, 1); 926 status = ocfs2_try_open_lock(inode, 1);
944 if (status == -EAGAIN) { 927 if (status == -EAGAIN) {
945 status = 0; 928 status = 0;
946 mlog(0, "Skipping delete of %llu because it is in use on " 929 reason = 3;
947 "other nodes\n", (unsigned long long)oi->ip_blkno);
948 goto bail; 930 goto bail;
949 } 931 }
950 if (status < 0) { 932 if (status < 0) {
@@ -953,11 +935,10 @@ static int ocfs2_query_inode_wipe(struct inode *inode,
953 } 935 }
954 936
955 *wipe = 1; 937 *wipe = 1;
956 mlog(0, "Inode %llu is ok to wipe from orphan dir %u\n", 938 trace_ocfs2_query_inode_wipe_succ(le16_to_cpu(di->i_orphaned_slot));
957 (unsigned long long)oi->ip_blkno,
958 le16_to_cpu(di->i_orphaned_slot));
959 939
960bail: 940bail:
941 trace_ocfs2_query_inode_wipe_end(status, reason);
961 return status; 942 return status;
962} 943}
963 944
@@ -967,8 +948,8 @@ bail:
967static void ocfs2_cleanup_delete_inode(struct inode *inode, 948static void ocfs2_cleanup_delete_inode(struct inode *inode,
968 int sync_data) 949 int sync_data)
969{ 950{
970 mlog(0, "Cleanup inode %llu, sync = %d\n", 951 trace_ocfs2_cleanup_delete_inode(
971 (unsigned long long)OCFS2_I(inode)->ip_blkno, sync_data); 952 (unsigned long long)OCFS2_I(inode)->ip_blkno, sync_data);
972 if (sync_data) 953 if (sync_data)
973 write_inode_now(inode, 1); 954 write_inode_now(inode, 1);
974 truncate_inode_pages(&inode->i_data, 0); 955 truncate_inode_pages(&inode->i_data, 0);
@@ -980,15 +961,15 @@ static void ocfs2_delete_inode(struct inode *inode)
980 sigset_t oldset; 961 sigset_t oldset;
981 struct buffer_head *di_bh = NULL; 962 struct buffer_head *di_bh = NULL;
982 963
983 mlog_entry("(inode->i_ino = %lu)\n", inode->i_ino); 964 trace_ocfs2_delete_inode(inode->i_ino,
965 (unsigned long long)OCFS2_I(inode)->ip_blkno,
966 is_bad_inode(inode));
984 967
985 /* When we fail in read_inode() we mark inode as bad. The second test 968 /* When we fail in read_inode() we mark inode as bad. The second test
986 * catches the case when inode allocation fails before allocating 969 * catches the case when inode allocation fails before allocating
987 * a block for inode. */ 970 * a block for inode. */
988 if (is_bad_inode(inode) || !OCFS2_I(inode)->ip_blkno) { 971 if (is_bad_inode(inode) || !OCFS2_I(inode)->ip_blkno)
989 mlog(0, "Skipping delete of bad inode\n");
990 goto bail; 972 goto bail;
991 }
992 973
993 dquot_initialize(inode); 974 dquot_initialize(inode);
994 975
@@ -1080,7 +1061,7 @@ bail_unlock_nfs_sync:
1080bail_unblock: 1061bail_unblock:
1081 ocfs2_unblock_signals(&oldset); 1062 ocfs2_unblock_signals(&oldset);
1082bail: 1063bail:
1083 mlog_exit_void(); 1064 return;
1084} 1065}
1085 1066
1086static void ocfs2_clear_inode(struct inode *inode) 1067static void ocfs2_clear_inode(struct inode *inode)
@@ -1088,11 +1069,9 @@ static void ocfs2_clear_inode(struct inode *inode)
1088 int status; 1069 int status;
1089 struct ocfs2_inode_info *oi = OCFS2_I(inode); 1070 struct ocfs2_inode_info *oi = OCFS2_I(inode);
1090 1071
1091 mlog_entry_void();
1092
1093 end_writeback(inode); 1072 end_writeback(inode);
1094 mlog(0, "Clearing inode: %llu, nlink = %u\n", 1073 trace_ocfs2_clear_inode((unsigned long long)oi->ip_blkno,
1095 (unsigned long long)OCFS2_I(inode)->ip_blkno, inode->i_nlink); 1074 inode->i_nlink);
1096 1075
1097 mlog_bug_on_msg(OCFS2_SB(inode->i_sb) == NULL, 1076 mlog_bug_on_msg(OCFS2_SB(inode->i_sb) == NULL,
1098 "Inode=%lu\n", inode->i_ino); 1077 "Inode=%lu\n", inode->i_ino);
@@ -1181,8 +1160,6 @@ static void ocfs2_clear_inode(struct inode *inode)
1181 */ 1160 */
1182 jbd2_journal_release_jbd_inode(OCFS2_SB(inode->i_sb)->journal->j_journal, 1161 jbd2_journal_release_jbd_inode(OCFS2_SB(inode->i_sb)->journal->j_journal,
1183 &oi->ip_jinode); 1162 &oi->ip_jinode);
1184
1185 mlog_exit_void();
1186} 1163}
1187 1164
1188void ocfs2_evict_inode(struct inode *inode) 1165void ocfs2_evict_inode(struct inode *inode)
@@ -1204,17 +1181,14 @@ int ocfs2_drop_inode(struct inode *inode)
1204 struct ocfs2_inode_info *oi = OCFS2_I(inode); 1181 struct ocfs2_inode_info *oi = OCFS2_I(inode);
1205 int res; 1182 int res;
1206 1183
1207 mlog_entry_void(); 1184 trace_ocfs2_drop_inode((unsigned long long)oi->ip_blkno,
1208 1185 inode->i_nlink, oi->ip_flags);
1209 mlog(0, "Drop inode %llu, nlink = %u, ip_flags = 0x%x\n",
1210 (unsigned long long)oi->ip_blkno, inode->i_nlink, oi->ip_flags);
1211 1186
1212 if (oi->ip_flags & OCFS2_INODE_MAYBE_ORPHANED) 1187 if (oi->ip_flags & OCFS2_INODE_MAYBE_ORPHANED)
1213 res = 1; 1188 res = 1;
1214 else 1189 else
1215 res = generic_drop_inode(inode); 1190 res = generic_drop_inode(inode);
1216 1191
1217 mlog_exit_void();
1218 return res; 1192 return res;
1219} 1193}
1220 1194
@@ -1226,11 +1200,11 @@ int ocfs2_inode_revalidate(struct dentry *dentry)
1226 struct inode *inode = dentry->d_inode; 1200 struct inode *inode = dentry->d_inode;
1227 int status = 0; 1201 int status = 0;
1228 1202
1229 mlog_entry("(inode = 0x%p, ino = %llu)\n", inode, 1203 trace_ocfs2_inode_revalidate(inode,
1230 inode ? (unsigned long long)OCFS2_I(inode)->ip_blkno : 0ULL); 1204 inode ? (unsigned long long)OCFS2_I(inode)->ip_blkno : 0ULL,
1205 inode ? (unsigned long long)OCFS2_I(inode)->ip_flags : 0);
1231 1206
1232 if (!inode) { 1207 if (!inode) {
1233 mlog(0, "eep, no inode!\n");
1234 status = -ENOENT; 1208 status = -ENOENT;
1235 goto bail; 1209 goto bail;
1236 } 1210 }
@@ -1238,7 +1212,6 @@ int ocfs2_inode_revalidate(struct dentry *dentry)
1238 spin_lock(&OCFS2_I(inode)->ip_lock); 1212 spin_lock(&OCFS2_I(inode)->ip_lock);
1239 if (OCFS2_I(inode)->ip_flags & OCFS2_INODE_DELETED) { 1213 if (OCFS2_I(inode)->ip_flags & OCFS2_INODE_DELETED) {
1240 spin_unlock(&OCFS2_I(inode)->ip_lock); 1214 spin_unlock(&OCFS2_I(inode)->ip_lock);
1241 mlog(0, "inode deleted!\n");
1242 status = -ENOENT; 1215 status = -ENOENT;
1243 goto bail; 1216 goto bail;
1244 } 1217 }
@@ -1254,8 +1227,6 @@ int ocfs2_inode_revalidate(struct dentry *dentry)
1254 } 1227 }
1255 ocfs2_inode_unlock(inode, 0); 1228 ocfs2_inode_unlock(inode, 0);
1256bail: 1229bail:
1257 mlog_exit(status);
1258
1259 return status; 1230 return status;
1260} 1231}
1261 1232
@@ -1271,8 +1242,7 @@ int ocfs2_mark_inode_dirty(handle_t *handle,
1271 int status; 1242 int status;
1272 struct ocfs2_dinode *fe = (struct ocfs2_dinode *) bh->b_data; 1243 struct ocfs2_dinode *fe = (struct ocfs2_dinode *) bh->b_data;
1273 1244
1274 mlog_entry("(inode %llu)\n", 1245 trace_ocfs2_mark_inode_dirty((unsigned long long)OCFS2_I(inode)->ip_blkno);
1275 (unsigned long long)OCFS2_I(inode)->ip_blkno);
1276 1246
1277 status = ocfs2_journal_access_di(handle, INODE_CACHE(inode), bh, 1247 status = ocfs2_journal_access_di(handle, INODE_CACHE(inode), bh,
1278 OCFS2_JOURNAL_ACCESS_WRITE); 1248 OCFS2_JOURNAL_ACCESS_WRITE);
@@ -1302,7 +1272,6 @@ int ocfs2_mark_inode_dirty(handle_t *handle,
1302 1272
1303 ocfs2_journal_dirty(handle, bh); 1273 ocfs2_journal_dirty(handle, bh);
1304leave: 1274leave:
1305 mlog_exit(status);
1306 return status; 1275 return status;
1307} 1276}
1308 1277
@@ -1345,8 +1314,7 @@ int ocfs2_validate_inode_block(struct super_block *sb,
1345 int rc; 1314 int rc;
1346 struct ocfs2_dinode *di = (struct ocfs2_dinode *)bh->b_data; 1315 struct ocfs2_dinode *di = (struct ocfs2_dinode *)bh->b_data;
1347 1316
1348 mlog(0, "Validating dinode %llu\n", 1317 trace_ocfs2_validate_inode_block((unsigned long long)bh->b_blocknr);
1349 (unsigned long long)bh->b_blocknr);
1350 1318
1351 BUG_ON(!buffer_uptodate(bh)); 1319 BUG_ON(!buffer_uptodate(bh));
1352 1320
diff --git a/fs/ocfs2/ioctl.c b/fs/ocfs2/ioctl.c
index 09de77ce002a..8f13c5989eae 100644
--- a/fs/ocfs2/ioctl.c
+++ b/fs/ocfs2/ioctl.c
@@ -9,7 +9,6 @@
9#include <linux/mount.h> 9#include <linux/mount.h>
10#include <linux/compat.h> 10#include <linux/compat.h>
11 11
12#define MLOG_MASK_PREFIX ML_INODE
13#include <cluster/masklog.h> 12#include <cluster/masklog.h>
14 13
15#include "ocfs2.h" 14#include "ocfs2.h"
@@ -46,6 +45,22 @@ static inline void __o2info_set_request_error(struct ocfs2_info_request *kreq,
46#define o2info_set_request_error(a, b) \ 45#define o2info_set_request_error(a, b) \
47 __o2info_set_request_error((struct ocfs2_info_request *)&(a), b) 46 __o2info_set_request_error((struct ocfs2_info_request *)&(a), b)
48 47
48static inline void __o2info_set_request_filled(struct ocfs2_info_request *req)
49{
50 req->ir_flags |= OCFS2_INFO_FL_FILLED;
51}
52
53#define o2info_set_request_filled(a) \
54 __o2info_set_request_filled((struct ocfs2_info_request *)&(a))
55
56static inline void __o2info_clear_request_filled(struct ocfs2_info_request *req)
57{
58 req->ir_flags &= ~OCFS2_INFO_FL_FILLED;
59}
60
61#define o2info_clear_request_filled(a) \
62 __o2info_clear_request_filled((struct ocfs2_info_request *)&(a))
63
49static int ocfs2_get_inode_attr(struct inode *inode, unsigned *flags) 64static int ocfs2_get_inode_attr(struct inode *inode, unsigned *flags)
50{ 65{
51 int status; 66 int status;
@@ -59,7 +74,6 @@ static int ocfs2_get_inode_attr(struct inode *inode, unsigned *flags)
59 *flags = OCFS2_I(inode)->ip_attr; 74 *flags = OCFS2_I(inode)->ip_attr;
60 ocfs2_inode_unlock(inode, 0); 75 ocfs2_inode_unlock(inode, 0);
61 76
62 mlog_exit(status);
63 return status; 77 return status;
64} 78}
65 79
@@ -125,7 +139,6 @@ bail:
125 139
126 brelse(bh); 140 brelse(bh);
127 141
128 mlog_exit(status);
129 return status; 142 return status;
130} 143}
131 144
@@ -139,7 +152,8 @@ int ocfs2_info_handle_blocksize(struct inode *inode,
139 goto bail; 152 goto bail;
140 153
141 oib.ib_blocksize = inode->i_sb->s_blocksize; 154 oib.ib_blocksize = inode->i_sb->s_blocksize;
142 oib.ib_req.ir_flags |= OCFS2_INFO_FL_FILLED; 155
156 o2info_set_request_filled(oib);
143 157
144 if (o2info_to_user(oib, req)) 158 if (o2info_to_user(oib, req))
145 goto bail; 159 goto bail;
@@ -163,7 +177,8 @@ int ocfs2_info_handle_clustersize(struct inode *inode,
163 goto bail; 177 goto bail;
164 178
165 oic.ic_clustersize = osb->s_clustersize; 179 oic.ic_clustersize = osb->s_clustersize;
166 oic.ic_req.ir_flags |= OCFS2_INFO_FL_FILLED; 180
181 o2info_set_request_filled(oic);
167 182
168 if (o2info_to_user(oic, req)) 183 if (o2info_to_user(oic, req))
169 goto bail; 184 goto bail;
@@ -187,7 +202,8 @@ int ocfs2_info_handle_maxslots(struct inode *inode,
187 goto bail; 202 goto bail;
188 203
189 oim.im_max_slots = osb->max_slots; 204 oim.im_max_slots = osb->max_slots;
190 oim.im_req.ir_flags |= OCFS2_INFO_FL_FILLED; 205
206 o2info_set_request_filled(oim);
191 207
192 if (o2info_to_user(oim, req)) 208 if (o2info_to_user(oim, req))
193 goto bail; 209 goto bail;
@@ -211,7 +227,8 @@ int ocfs2_info_handle_label(struct inode *inode,
211 goto bail; 227 goto bail;
212 228
213 memcpy(oil.il_label, osb->vol_label, OCFS2_MAX_VOL_LABEL_LEN); 229 memcpy(oil.il_label, osb->vol_label, OCFS2_MAX_VOL_LABEL_LEN);
214 oil.il_req.ir_flags |= OCFS2_INFO_FL_FILLED; 230
231 o2info_set_request_filled(oil);
215 232
216 if (o2info_to_user(oil, req)) 233 if (o2info_to_user(oil, req))
217 goto bail; 234 goto bail;
@@ -235,7 +252,8 @@ int ocfs2_info_handle_uuid(struct inode *inode,
235 goto bail; 252 goto bail;
236 253
237 memcpy(oiu.iu_uuid_str, osb->uuid_str, OCFS2_TEXT_UUID_LEN + 1); 254 memcpy(oiu.iu_uuid_str, osb->uuid_str, OCFS2_TEXT_UUID_LEN + 1);
238 oiu.iu_req.ir_flags |= OCFS2_INFO_FL_FILLED; 255
256 o2info_set_request_filled(oiu);
239 257
240 if (o2info_to_user(oiu, req)) 258 if (o2info_to_user(oiu, req))
241 goto bail; 259 goto bail;
@@ -261,7 +279,8 @@ int ocfs2_info_handle_fs_features(struct inode *inode,
261 oif.if_compat_features = osb->s_feature_compat; 279 oif.if_compat_features = osb->s_feature_compat;
262 oif.if_incompat_features = osb->s_feature_incompat; 280 oif.if_incompat_features = osb->s_feature_incompat;
263 oif.if_ro_compat_features = osb->s_feature_ro_compat; 281 oif.if_ro_compat_features = osb->s_feature_ro_compat;
264 oif.if_req.ir_flags |= OCFS2_INFO_FL_FILLED; 282
283 o2info_set_request_filled(oif);
265 284
266 if (o2info_to_user(oif, req)) 285 if (o2info_to_user(oif, req))
267 goto bail; 286 goto bail;
@@ -286,7 +305,7 @@ int ocfs2_info_handle_journal_size(struct inode *inode,
286 305
287 oij.ij_journal_size = osb->journal->j_inode->i_size; 306 oij.ij_journal_size = osb->journal->j_inode->i_size;
288 307
289 oij.ij_req.ir_flags |= OCFS2_INFO_FL_FILLED; 308 o2info_set_request_filled(oij);
290 309
291 if (o2info_to_user(oij, req)) 310 if (o2info_to_user(oij, req))
292 goto bail; 311 goto bail;
@@ -308,7 +327,7 @@ int ocfs2_info_handle_unknown(struct inode *inode,
308 if (o2info_from_user(oir, req)) 327 if (o2info_from_user(oir, req))
309 goto bail; 328 goto bail;
310 329
311 oir.ir_flags &= ~OCFS2_INFO_FL_FILLED; 330 o2info_clear_request_filled(oir);
312 331
313 if (o2info_to_user(oir, req)) 332 if (o2info_to_user(oir, req))
314 goto bail; 333 goto bail;
diff --git a/fs/ocfs2/journal.c b/fs/ocfs2/journal.c
index faa2303dbf0a..dcc2d9327150 100644
--- a/fs/ocfs2/journal.c
+++ b/fs/ocfs2/journal.c
@@ -31,7 +31,6 @@
31#include <linux/time.h> 31#include <linux/time.h>
32#include <linux/random.h> 32#include <linux/random.h>
33 33
34#define MLOG_MASK_PREFIX ML_JOURNAL
35#include <cluster/masklog.h> 34#include <cluster/masklog.h>
36 35
37#include "ocfs2.h" 36#include "ocfs2.h"
@@ -52,6 +51,7 @@
52#include "quota.h" 51#include "quota.h"
53 52
54#include "buffer_head_io.h" 53#include "buffer_head_io.h"
54#include "ocfs2_trace.h"
55 55
56DEFINE_SPINLOCK(trans_inc_lock); 56DEFINE_SPINLOCK(trans_inc_lock);
57 57
@@ -303,16 +303,15 @@ static int ocfs2_commit_cache(struct ocfs2_super *osb)
303 unsigned int flushed; 303 unsigned int flushed;
304 struct ocfs2_journal *journal = NULL; 304 struct ocfs2_journal *journal = NULL;
305 305
306 mlog_entry_void();
307
308 journal = osb->journal; 306 journal = osb->journal;
309 307
310 /* Flush all pending commits and checkpoint the journal. */ 308 /* Flush all pending commits and checkpoint the journal. */
311 down_write(&journal->j_trans_barrier); 309 down_write(&journal->j_trans_barrier);
312 310
313 if (atomic_read(&journal->j_num_trans) == 0) { 311 flushed = atomic_read(&journal->j_num_trans);
312 trace_ocfs2_commit_cache_begin(flushed);
313 if (flushed == 0) {
314 up_write(&journal->j_trans_barrier); 314 up_write(&journal->j_trans_barrier);
315 mlog(0, "No transactions for me to flush!\n");
316 goto finally; 315 goto finally;
317 } 316 }
318 317
@@ -331,13 +330,11 @@ static int ocfs2_commit_cache(struct ocfs2_super *osb)
331 atomic_set(&journal->j_num_trans, 0); 330 atomic_set(&journal->j_num_trans, 0);
332 up_write(&journal->j_trans_barrier); 331 up_write(&journal->j_trans_barrier);
333 332
334 mlog(0, "commit_thread: flushed transaction %lu (%u handles)\n", 333 trace_ocfs2_commit_cache_end(journal->j_trans_id, flushed);
335 journal->j_trans_id, flushed);
336 334
337 ocfs2_wake_downconvert_thread(osb); 335 ocfs2_wake_downconvert_thread(osb);
338 wake_up(&journal->j_checkpointed); 336 wake_up(&journal->j_checkpointed);
339finally: 337finally:
340 mlog_exit(status);
341 return status; 338 return status;
342} 339}
343 340
@@ -425,9 +422,8 @@ int ocfs2_extend_trans(handle_t *handle, int nblocks)
425 return 0; 422 return 0;
426 423
427 old_nblocks = handle->h_buffer_credits; 424 old_nblocks = handle->h_buffer_credits;
428 mlog_entry_void();
429 425
430 mlog(0, "Trying to extend transaction by %d blocks\n", nblocks); 426 trace_ocfs2_extend_trans(old_nblocks, nblocks);
431 427
432#ifdef CONFIG_OCFS2_DEBUG_FS 428#ifdef CONFIG_OCFS2_DEBUG_FS
433 status = 1; 429 status = 1;
@@ -440,9 +436,7 @@ int ocfs2_extend_trans(handle_t *handle, int nblocks)
440#endif 436#endif
441 437
442 if (status > 0) { 438 if (status > 0) {
443 mlog(0, 439 trace_ocfs2_extend_trans_restart(old_nblocks + nblocks);
444 "jbd2_journal_extend failed, trying "
445 "jbd2_journal_restart\n");
446 status = jbd2_journal_restart(handle, 440 status = jbd2_journal_restart(handle,
447 old_nblocks + nblocks); 441 old_nblocks + nblocks);
448 if (status < 0) { 442 if (status < 0) {
@@ -453,8 +447,6 @@ int ocfs2_extend_trans(handle_t *handle, int nblocks)
453 447
454 status = 0; 448 status = 0;
455bail: 449bail:
456
457 mlog_exit(status);
458 return status; 450 return status;
459} 451}
460 452
@@ -622,12 +614,9 @@ static int __ocfs2_journal_access(handle_t *handle,
622 BUG_ON(!handle); 614 BUG_ON(!handle);
623 BUG_ON(!bh); 615 BUG_ON(!bh);
624 616
625 mlog_entry("bh->b_blocknr=%llu, type=%d (\"%s\"), bh->b_size = %zu\n", 617 trace_ocfs2_journal_access(
626 (unsigned long long)bh->b_blocknr, type, 618 (unsigned long long)ocfs2_metadata_cache_owner(ci),
627 (type == OCFS2_JOURNAL_ACCESS_CREATE) ? 619 (unsigned long long)bh->b_blocknr, type, bh->b_size);
628 "OCFS2_JOURNAL_ACCESS_CREATE" :
629 "OCFS2_JOURNAL_ACCESS_WRITE",
630 bh->b_size);
631 620
632 /* we can safely remove this assertion after testing. */ 621 /* we can safely remove this assertion after testing. */
633 if (!buffer_uptodate(bh)) { 622 if (!buffer_uptodate(bh)) {
@@ -668,7 +657,6 @@ static int __ocfs2_journal_access(handle_t *handle,
668 mlog(ML_ERROR, "Error %d getting %d access to buffer!\n", 657 mlog(ML_ERROR, "Error %d getting %d access to buffer!\n",
669 status, type); 658 status, type);
670 659
671 mlog_exit(status);
672 return status; 660 return status;
673} 661}
674 662
@@ -737,13 +725,10 @@ void ocfs2_journal_dirty(handle_t *handle, struct buffer_head *bh)
737{ 725{
738 int status; 726 int status;
739 727
740 mlog_entry("(bh->b_blocknr=%llu)\n", 728 trace_ocfs2_journal_dirty((unsigned long long)bh->b_blocknr);
741 (unsigned long long)bh->b_blocknr);
742 729
743 status = jbd2_journal_dirty_metadata(handle, bh); 730 status = jbd2_journal_dirty_metadata(handle, bh);
744 BUG_ON(status); 731 BUG_ON(status);
745
746 mlog_exit_void();
747} 732}
748 733
749#define OCFS2_DEFAULT_COMMIT_INTERVAL (HZ * JBD2_DEFAULT_MAX_COMMIT_AGE) 734#define OCFS2_DEFAULT_COMMIT_INTERVAL (HZ * JBD2_DEFAULT_MAX_COMMIT_AGE)
@@ -775,8 +760,6 @@ int ocfs2_journal_init(struct ocfs2_journal *journal, int *dirty)
775 struct ocfs2_super *osb; 760 struct ocfs2_super *osb;
776 int inode_lock = 0; 761 int inode_lock = 0;
777 762
778 mlog_entry_void();
779
780 BUG_ON(!journal); 763 BUG_ON(!journal);
781 764
782 osb = journal->j_osb; 765 osb = journal->j_osb;
@@ -820,10 +803,9 @@ int ocfs2_journal_init(struct ocfs2_journal *journal, int *dirty)
820 goto done; 803 goto done;
821 } 804 }
822 805
823 mlog(0, "inode->i_size = %lld\n", inode->i_size); 806 trace_ocfs2_journal_init(inode->i_size,
824 mlog(0, "inode->i_blocks = %llu\n", 807 (unsigned long long)inode->i_blocks,
825 (unsigned long long)inode->i_blocks); 808 OCFS2_I(inode)->ip_clusters);
826 mlog(0, "inode->ip_clusters = %u\n", OCFS2_I(inode)->ip_clusters);
827 809
828 /* call the kernels journal init function now */ 810 /* call the kernels journal init function now */
829 j_journal = jbd2_journal_init_inode(inode); 811 j_journal = jbd2_journal_init_inode(inode);
@@ -833,8 +815,7 @@ int ocfs2_journal_init(struct ocfs2_journal *journal, int *dirty)
833 goto done; 815 goto done;
834 } 816 }
835 817
836 mlog(0, "Returned from jbd2_journal_init_inode\n"); 818 trace_ocfs2_journal_init_maxlen(j_journal->j_maxlen);
837 mlog(0, "j_journal->j_maxlen = %u\n", j_journal->j_maxlen);
838 819
839 *dirty = (le32_to_cpu(di->id1.journal1.ij_flags) & 820 *dirty = (le32_to_cpu(di->id1.journal1.ij_flags) &
840 OCFS2_JOURNAL_DIRTY_FL); 821 OCFS2_JOURNAL_DIRTY_FL);
@@ -859,7 +840,6 @@ done:
859 } 840 }
860 } 841 }
861 842
862 mlog_exit(status);
863 return status; 843 return status;
864} 844}
865 845
@@ -882,8 +862,6 @@ static int ocfs2_journal_toggle_dirty(struct ocfs2_super *osb,
882 struct buffer_head *bh = journal->j_bh; 862 struct buffer_head *bh = journal->j_bh;
883 struct ocfs2_dinode *fe; 863 struct ocfs2_dinode *fe;
884 864
885 mlog_entry_void();
886
887 fe = (struct ocfs2_dinode *)bh->b_data; 865 fe = (struct ocfs2_dinode *)bh->b_data;
888 866
889 /* The journal bh on the osb always comes from ocfs2_journal_init() 867 /* The journal bh on the osb always comes from ocfs2_journal_init()
@@ -906,7 +884,6 @@ static int ocfs2_journal_toggle_dirty(struct ocfs2_super *osb,
906 if (status < 0) 884 if (status < 0)
907 mlog_errno(status); 885 mlog_errno(status);
908 886
909 mlog_exit(status);
910 return status; 887 return status;
911} 888}
912 889
@@ -921,8 +898,6 @@ void ocfs2_journal_shutdown(struct ocfs2_super *osb)
921 struct inode *inode = NULL; 898 struct inode *inode = NULL;
922 int num_running_trans = 0; 899 int num_running_trans = 0;
923 900
924 mlog_entry_void();
925
926 BUG_ON(!osb); 901 BUG_ON(!osb);
927 902
928 journal = osb->journal; 903 journal = osb->journal;
@@ -939,10 +914,7 @@ void ocfs2_journal_shutdown(struct ocfs2_super *osb)
939 BUG(); 914 BUG();
940 915
941 num_running_trans = atomic_read(&(osb->journal->j_num_trans)); 916 num_running_trans = atomic_read(&(osb->journal->j_num_trans));
942 if (num_running_trans > 0) 917 trace_ocfs2_journal_shutdown(num_running_trans);
943 mlog(0, "Shutting down journal: must wait on %d "
944 "running transactions!\n",
945 num_running_trans);
946 918
947 /* Do a commit_cache here. It will flush our journal, *and* 919 /* Do a commit_cache here. It will flush our journal, *and*
948 * release any locks that are still held. 920 * release any locks that are still held.
@@ -955,7 +927,7 @@ void ocfs2_journal_shutdown(struct ocfs2_super *osb)
955 * completely destroy the journal. */ 927 * completely destroy the journal. */
956 if (osb->commit_task) { 928 if (osb->commit_task) {
957 /* Wait for the commit thread */ 929 /* Wait for the commit thread */
958 mlog(0, "Waiting for ocfs2commit to exit....\n"); 930 trace_ocfs2_journal_shutdown_wait(osb->commit_task);
959 kthread_stop(osb->commit_task); 931 kthread_stop(osb->commit_task);
960 osb->commit_task = NULL; 932 osb->commit_task = NULL;
961 } 933 }
@@ -998,7 +970,6 @@ void ocfs2_journal_shutdown(struct ocfs2_super *osb)
998done: 970done:
999 if (inode) 971 if (inode)
1000 iput(inode); 972 iput(inode);
1001 mlog_exit_void();
1002} 973}
1003 974
1004static void ocfs2_clear_journal_error(struct super_block *sb, 975static void ocfs2_clear_journal_error(struct super_block *sb,
@@ -1024,8 +995,6 @@ int ocfs2_journal_load(struct ocfs2_journal *journal, int local, int replayed)
1024 int status = 0; 995 int status = 0;
1025 struct ocfs2_super *osb; 996 struct ocfs2_super *osb;
1026 997
1027 mlog_entry_void();
1028
1029 BUG_ON(!journal); 998 BUG_ON(!journal);
1030 999
1031 osb = journal->j_osb; 1000 osb = journal->j_osb;
@@ -1059,7 +1028,6 @@ int ocfs2_journal_load(struct ocfs2_journal *journal, int local, int replayed)
1059 osb->commit_task = NULL; 1028 osb->commit_task = NULL;
1060 1029
1061done: 1030done:
1062 mlog_exit(status);
1063 return status; 1031 return status;
1064} 1032}
1065 1033
@@ -1070,8 +1038,6 @@ int ocfs2_journal_wipe(struct ocfs2_journal *journal, int full)
1070{ 1038{
1071 int status; 1039 int status;
1072 1040
1073 mlog_entry_void();
1074
1075 BUG_ON(!journal); 1041 BUG_ON(!journal);
1076 1042
1077 status = jbd2_journal_wipe(journal->j_journal, full); 1043 status = jbd2_journal_wipe(journal->j_journal, full);
@@ -1085,7 +1051,6 @@ int ocfs2_journal_wipe(struct ocfs2_journal *journal, int full)
1085 mlog_errno(status); 1051 mlog_errno(status);
1086 1052
1087bail: 1053bail:
1088 mlog_exit(status);
1089 return status; 1054 return status;
1090} 1055}
1091 1056
@@ -1124,8 +1089,6 @@ static int ocfs2_force_read_journal(struct inode *inode)
1124#define CONCURRENT_JOURNAL_FILL 32ULL 1089#define CONCURRENT_JOURNAL_FILL 32ULL
1125 struct buffer_head *bhs[CONCURRENT_JOURNAL_FILL]; 1090 struct buffer_head *bhs[CONCURRENT_JOURNAL_FILL];
1126 1091
1127 mlog_entry_void();
1128
1129 memset(bhs, 0, sizeof(struct buffer_head *) * CONCURRENT_JOURNAL_FILL); 1092 memset(bhs, 0, sizeof(struct buffer_head *) * CONCURRENT_JOURNAL_FILL);
1130 1093
1131 num_blocks = ocfs2_blocks_for_bytes(inode->i_sb, inode->i_size); 1094 num_blocks = ocfs2_blocks_for_bytes(inode->i_sb, inode->i_size);
@@ -1161,7 +1124,6 @@ static int ocfs2_force_read_journal(struct inode *inode)
1161bail: 1124bail:
1162 for(i = 0; i < CONCURRENT_JOURNAL_FILL; i++) 1125 for(i = 0; i < CONCURRENT_JOURNAL_FILL; i++)
1163 brelse(bhs[i]); 1126 brelse(bhs[i]);
1164 mlog_exit(status);
1165 return status; 1127 return status;
1166} 1128}
1167 1129
@@ -1185,7 +1147,7 @@ struct ocfs2_la_recovery_item {
1185 */ 1147 */
1186void ocfs2_complete_recovery(struct work_struct *work) 1148void ocfs2_complete_recovery(struct work_struct *work)
1187{ 1149{
1188 int ret; 1150 int ret = 0;
1189 struct ocfs2_journal *journal = 1151 struct ocfs2_journal *journal =
1190 container_of(work, struct ocfs2_journal, j_recovery_work); 1152 container_of(work, struct ocfs2_journal, j_recovery_work);
1191 struct ocfs2_super *osb = journal->j_osb; 1153 struct ocfs2_super *osb = journal->j_osb;
@@ -1194,9 +1156,8 @@ void ocfs2_complete_recovery(struct work_struct *work)
1194 struct ocfs2_quota_recovery *qrec; 1156 struct ocfs2_quota_recovery *qrec;
1195 LIST_HEAD(tmp_la_list); 1157 LIST_HEAD(tmp_la_list);
1196 1158
1197 mlog_entry_void(); 1159 trace_ocfs2_complete_recovery(
1198 1160 (unsigned long long)OCFS2_I(journal->j_inode)->ip_blkno);
1199 mlog(0, "completing recovery from keventd\n");
1200 1161
1201 spin_lock(&journal->j_lock); 1162 spin_lock(&journal->j_lock);
1202 list_splice_init(&journal->j_la_cleanups, &tmp_la_list); 1163 list_splice_init(&journal->j_la_cleanups, &tmp_la_list);
@@ -1205,15 +1166,18 @@ void ocfs2_complete_recovery(struct work_struct *work)
1205 list_for_each_entry_safe(item, n, &tmp_la_list, lri_list) { 1166 list_for_each_entry_safe(item, n, &tmp_la_list, lri_list) {
1206 list_del_init(&item->lri_list); 1167 list_del_init(&item->lri_list);
1207 1168
1208 mlog(0, "Complete recovery for slot %d\n", item->lri_slot);
1209
1210 ocfs2_wait_on_quotas(osb); 1169 ocfs2_wait_on_quotas(osb);
1211 1170
1212 la_dinode = item->lri_la_dinode; 1171 la_dinode = item->lri_la_dinode;
1213 if (la_dinode) { 1172 tl_dinode = item->lri_tl_dinode;
1214 mlog(0, "Clean up local alloc %llu\n", 1173 qrec = item->lri_qrec;
1215 (unsigned long long)le64_to_cpu(la_dinode->i_blkno)); 1174
1175 trace_ocfs2_complete_recovery_slot(item->lri_slot,
1176 la_dinode ? le64_to_cpu(la_dinode->i_blkno) : 0,
1177 tl_dinode ? le64_to_cpu(tl_dinode->i_blkno) : 0,
1178 qrec);
1216 1179
1180 if (la_dinode) {
1217 ret = ocfs2_complete_local_alloc_recovery(osb, 1181 ret = ocfs2_complete_local_alloc_recovery(osb,
1218 la_dinode); 1182 la_dinode);
1219 if (ret < 0) 1183 if (ret < 0)
@@ -1222,11 +1186,7 @@ void ocfs2_complete_recovery(struct work_struct *work)
1222 kfree(la_dinode); 1186 kfree(la_dinode);
1223 } 1187 }
1224 1188
1225 tl_dinode = item->lri_tl_dinode;
1226 if (tl_dinode) { 1189 if (tl_dinode) {
1227 mlog(0, "Clean up truncate log %llu\n",
1228 (unsigned long long)le64_to_cpu(tl_dinode->i_blkno));
1229
1230 ret = ocfs2_complete_truncate_log_recovery(osb, 1190 ret = ocfs2_complete_truncate_log_recovery(osb,
1231 tl_dinode); 1191 tl_dinode);
1232 if (ret < 0) 1192 if (ret < 0)
@@ -1239,9 +1199,7 @@ void ocfs2_complete_recovery(struct work_struct *work)
1239 if (ret < 0) 1199 if (ret < 0)
1240 mlog_errno(ret); 1200 mlog_errno(ret);
1241 1201
1242 qrec = item->lri_qrec;
1243 if (qrec) { 1202 if (qrec) {
1244 mlog(0, "Recovering quota files");
1245 ret = ocfs2_finish_quota_recovery(osb, qrec, 1203 ret = ocfs2_finish_quota_recovery(osb, qrec,
1246 item->lri_slot); 1204 item->lri_slot);
1247 if (ret < 0) 1205 if (ret < 0)
@@ -1252,8 +1210,7 @@ void ocfs2_complete_recovery(struct work_struct *work)
1252 kfree(item); 1210 kfree(item);
1253 } 1211 }
1254 1212
1255 mlog(0, "Recovery completion\n"); 1213 trace_ocfs2_complete_recovery_end(ret);
1256 mlog_exit_void();
1257} 1214}
1258 1215
1259/* NOTE: This function always eats your references to la_dinode and 1216/* NOTE: This function always eats your references to la_dinode and
@@ -1339,8 +1296,6 @@ static int __ocfs2_recovery_thread(void *arg)
1339 int rm_quota_used = 0, i; 1296 int rm_quota_used = 0, i;
1340 struct ocfs2_quota_recovery *qrec; 1297 struct ocfs2_quota_recovery *qrec;
1341 1298
1342 mlog_entry_void();
1343
1344 status = ocfs2_wait_on_mount(osb); 1299 status = ocfs2_wait_on_mount(osb);
1345 if (status < 0) { 1300 if (status < 0) {
1346 goto bail; 1301 goto bail;
@@ -1372,15 +1327,12 @@ restart:
1372 * clear it until ocfs2_recover_node() has succeeded. */ 1327 * clear it until ocfs2_recover_node() has succeeded. */
1373 node_num = rm->rm_entries[0]; 1328 node_num = rm->rm_entries[0];
1374 spin_unlock(&osb->osb_lock); 1329 spin_unlock(&osb->osb_lock);
1375 mlog(0, "checking node %d\n", node_num);
1376 slot_num = ocfs2_node_num_to_slot(osb, node_num); 1330 slot_num = ocfs2_node_num_to_slot(osb, node_num);
1331 trace_ocfs2_recovery_thread_node(node_num, slot_num);
1377 if (slot_num == -ENOENT) { 1332 if (slot_num == -ENOENT) {
1378 status = 0; 1333 status = 0;
1379 mlog(0, "no slot for this node, so no recovery"
1380 "required.\n");
1381 goto skip_recovery; 1334 goto skip_recovery;
1382 } 1335 }
1383 mlog(0, "node %d was using slot %d\n", node_num, slot_num);
1384 1336
1385 /* It is a bit subtle with quota recovery. We cannot do it 1337 /* It is a bit subtle with quota recovery. We cannot do it
1386 * immediately because we have to obtain cluster locks from 1338 * immediately because we have to obtain cluster locks from
@@ -1407,7 +1359,7 @@ skip_recovery:
1407 spin_lock(&osb->osb_lock); 1359 spin_lock(&osb->osb_lock);
1408 } 1360 }
1409 spin_unlock(&osb->osb_lock); 1361 spin_unlock(&osb->osb_lock);
1410 mlog(0, "All nodes recovered\n"); 1362 trace_ocfs2_recovery_thread_end(status);
1411 1363
1412 /* Refresh all journal recovery generations from disk */ 1364 /* Refresh all journal recovery generations from disk */
1413 status = ocfs2_check_journals_nolocks(osb); 1365 status = ocfs2_check_journals_nolocks(osb);
@@ -1451,7 +1403,6 @@ bail:
1451 if (rm_quota) 1403 if (rm_quota)
1452 kfree(rm_quota); 1404 kfree(rm_quota);
1453 1405
1454 mlog_exit(status);
1455 /* no one is callint kthread_stop() for us so the kthread() api 1406 /* no one is callint kthread_stop() for us so the kthread() api
1456 * requires that we call do_exit(). And it isn't exported, but 1407 * requires that we call do_exit(). And it isn't exported, but
1457 * complete_and_exit() seems to be a minimal wrapper around it. */ 1408 * complete_and_exit() seems to be a minimal wrapper around it. */
@@ -1461,19 +1412,15 @@ bail:
1461 1412
1462void ocfs2_recovery_thread(struct ocfs2_super *osb, int node_num) 1413void ocfs2_recovery_thread(struct ocfs2_super *osb, int node_num)
1463{ 1414{
1464 mlog_entry("(node_num=%d, osb->node_num = %d)\n",
1465 node_num, osb->node_num);
1466
1467 mutex_lock(&osb->recovery_lock); 1415 mutex_lock(&osb->recovery_lock);
1468 if (osb->disable_recovery)
1469 goto out;
1470 1416
1471 /* People waiting on recovery will wait on 1417 trace_ocfs2_recovery_thread(node_num, osb->node_num,
1472 * the recovery map to empty. */ 1418 osb->disable_recovery, osb->recovery_thread_task,
1473 if (ocfs2_recovery_map_set(osb, node_num)) 1419 osb->disable_recovery ?
1474 mlog(0, "node %d already in recovery map.\n", node_num); 1420 -1 : ocfs2_recovery_map_set(osb, node_num));
1475 1421
1476 mlog(0, "starting recovery thread...\n"); 1422 if (osb->disable_recovery)
1423 goto out;
1477 1424
1478 if (osb->recovery_thread_task) 1425 if (osb->recovery_thread_task)
1479 goto out; 1426 goto out;
@@ -1488,8 +1435,6 @@ void ocfs2_recovery_thread(struct ocfs2_super *osb, int node_num)
1488out: 1435out:
1489 mutex_unlock(&osb->recovery_lock); 1436 mutex_unlock(&osb->recovery_lock);
1490 wake_up(&osb->recovery_event); 1437 wake_up(&osb->recovery_event);
1491
1492 mlog_exit_void();
1493} 1438}
1494 1439
1495static int ocfs2_read_journal_inode(struct ocfs2_super *osb, 1440static int ocfs2_read_journal_inode(struct ocfs2_super *osb,
@@ -1563,7 +1508,7 @@ static int ocfs2_replay_journal(struct ocfs2_super *osb,
1563 * If not, it needs recovery. 1508 * If not, it needs recovery.
1564 */ 1509 */
1565 if (osb->slot_recovery_generations[slot_num] != slot_reco_gen) { 1510 if (osb->slot_recovery_generations[slot_num] != slot_reco_gen) {
1566 mlog(0, "Slot %u already recovered (old/new=%u/%u)\n", slot_num, 1511 trace_ocfs2_replay_journal_recovered(slot_num,
1567 osb->slot_recovery_generations[slot_num], slot_reco_gen); 1512 osb->slot_recovery_generations[slot_num], slot_reco_gen);
1568 osb->slot_recovery_generations[slot_num] = slot_reco_gen; 1513 osb->slot_recovery_generations[slot_num] = slot_reco_gen;
1569 status = -EBUSY; 1514 status = -EBUSY;
@@ -1574,7 +1519,7 @@ static int ocfs2_replay_journal(struct ocfs2_super *osb,
1574 1519
1575 status = ocfs2_inode_lock_full(inode, &bh, 1, OCFS2_META_LOCK_RECOVERY); 1520 status = ocfs2_inode_lock_full(inode, &bh, 1, OCFS2_META_LOCK_RECOVERY);
1576 if (status < 0) { 1521 if (status < 0) {
1577 mlog(0, "status returned from ocfs2_inode_lock=%d\n", status); 1522 trace_ocfs2_replay_journal_lock_err(status);
1578 if (status != -ERESTARTSYS) 1523 if (status != -ERESTARTSYS)
1579 mlog(ML_ERROR, "Could not lock journal!\n"); 1524 mlog(ML_ERROR, "Could not lock journal!\n");
1580 goto done; 1525 goto done;
@@ -1587,7 +1532,7 @@ static int ocfs2_replay_journal(struct ocfs2_super *osb,
1587 slot_reco_gen = ocfs2_get_recovery_generation(fe); 1532 slot_reco_gen = ocfs2_get_recovery_generation(fe);
1588 1533
1589 if (!(flags & OCFS2_JOURNAL_DIRTY_FL)) { 1534 if (!(flags & OCFS2_JOURNAL_DIRTY_FL)) {
1590 mlog(0, "No recovery required for node %d\n", node_num); 1535 trace_ocfs2_replay_journal_skip(node_num);
1591 /* Refresh recovery generation for the slot */ 1536 /* Refresh recovery generation for the slot */
1592 osb->slot_recovery_generations[slot_num] = slot_reco_gen; 1537 osb->slot_recovery_generations[slot_num] = slot_reco_gen;
1593 goto done; 1538 goto done;
@@ -1608,7 +1553,6 @@ static int ocfs2_replay_journal(struct ocfs2_super *osb,
1608 goto done; 1553 goto done;
1609 } 1554 }
1610 1555
1611 mlog(0, "calling journal_init_inode\n");
1612 journal = jbd2_journal_init_inode(inode); 1556 journal = jbd2_journal_init_inode(inode);
1613 if (journal == NULL) { 1557 if (journal == NULL) {
1614 mlog(ML_ERROR, "Linux journal layer error\n"); 1558 mlog(ML_ERROR, "Linux journal layer error\n");
@@ -1628,7 +1572,6 @@ static int ocfs2_replay_journal(struct ocfs2_super *osb,
1628 ocfs2_clear_journal_error(osb->sb, journal, slot_num); 1572 ocfs2_clear_journal_error(osb->sb, journal, slot_num);
1629 1573
1630 /* wipe the journal */ 1574 /* wipe the journal */
1631 mlog(0, "flushing the journal.\n");
1632 jbd2_journal_lock_updates(journal); 1575 jbd2_journal_lock_updates(journal);
1633 status = jbd2_journal_flush(journal); 1576 status = jbd2_journal_flush(journal);
1634 jbd2_journal_unlock_updates(journal); 1577 jbd2_journal_unlock_updates(journal);
@@ -1665,7 +1608,6 @@ done:
1665 1608
1666 brelse(bh); 1609 brelse(bh);
1667 1610
1668 mlog_exit(status);
1669 return status; 1611 return status;
1670} 1612}
1671 1613
@@ -1688,8 +1630,7 @@ static int ocfs2_recover_node(struct ocfs2_super *osb,
1688 struct ocfs2_dinode *la_copy = NULL; 1630 struct ocfs2_dinode *la_copy = NULL;
1689 struct ocfs2_dinode *tl_copy = NULL; 1631 struct ocfs2_dinode *tl_copy = NULL;
1690 1632
1691 mlog_entry("(node_num=%d, slot_num=%d, osb->node_num = %d)\n", 1633 trace_ocfs2_recover_node(node_num, slot_num, osb->node_num);
1692 node_num, slot_num, osb->node_num);
1693 1634
1694 /* Should not ever be called to recover ourselves -- in that 1635 /* Should not ever be called to recover ourselves -- in that
1695 * case we should've called ocfs2_journal_load instead. */ 1636 * case we should've called ocfs2_journal_load instead. */
@@ -1698,9 +1639,7 @@ static int ocfs2_recover_node(struct ocfs2_super *osb,
1698 status = ocfs2_replay_journal(osb, node_num, slot_num); 1639 status = ocfs2_replay_journal(osb, node_num, slot_num);
1699 if (status < 0) { 1640 if (status < 0) {
1700 if (status == -EBUSY) { 1641 if (status == -EBUSY) {
1701 mlog(0, "Skipping recovery for slot %u (node %u) " 1642 trace_ocfs2_recover_node_skip(slot_num, node_num);
1702 "as another node has recovered it\n", slot_num,
1703 node_num);
1704 status = 0; 1643 status = 0;
1705 goto done; 1644 goto done;
1706 } 1645 }
@@ -1735,7 +1674,6 @@ static int ocfs2_recover_node(struct ocfs2_super *osb,
1735 status = 0; 1674 status = 0;
1736done: 1675done:
1737 1676
1738 mlog_exit(status);
1739 return status; 1677 return status;
1740} 1678}
1741 1679
@@ -1808,8 +1746,8 @@ int ocfs2_mark_dead_nodes(struct ocfs2_super *osb)
1808 spin_lock(&osb->osb_lock); 1746 spin_lock(&osb->osb_lock);
1809 osb->slot_recovery_generations[i] = gen; 1747 osb->slot_recovery_generations[i] = gen;
1810 1748
1811 mlog(0, "Slot %u recovery generation is %u\n", i, 1749 trace_ocfs2_mark_dead_nodes(i,
1812 osb->slot_recovery_generations[i]); 1750 osb->slot_recovery_generations[i]);
1813 1751
1814 if (i == osb->slot_num) { 1752 if (i == osb->slot_num) {
1815 spin_unlock(&osb->osb_lock); 1753 spin_unlock(&osb->osb_lock);
@@ -1845,7 +1783,6 @@ int ocfs2_mark_dead_nodes(struct ocfs2_super *osb)
1845 1783
1846 status = 0; 1784 status = 0;
1847bail: 1785bail:
1848 mlog_exit(status);
1849 return status; 1786 return status;
1850} 1787}
1851 1788
@@ -1884,11 +1821,12 @@ void ocfs2_queue_orphan_scan(struct ocfs2_super *osb)
1884 1821
1885 os = &osb->osb_orphan_scan; 1822 os = &osb->osb_orphan_scan;
1886 1823
1887 mlog(0, "Begin orphan scan\n");
1888
1889 if (atomic_read(&os->os_state) == ORPHAN_SCAN_INACTIVE) 1824 if (atomic_read(&os->os_state) == ORPHAN_SCAN_INACTIVE)
1890 goto out; 1825 goto out;
1891 1826
1827 trace_ocfs2_queue_orphan_scan_begin(os->os_count, os->os_seqno,
1828 atomic_read(&os->os_state));
1829
1892 status = ocfs2_orphan_scan_lock(osb, &seqno); 1830 status = ocfs2_orphan_scan_lock(osb, &seqno);
1893 if (status < 0) { 1831 if (status < 0) {
1894 if (status != -EAGAIN) 1832 if (status != -EAGAIN)
@@ -1918,7 +1856,8 @@ void ocfs2_queue_orphan_scan(struct ocfs2_super *osb)
1918unlock: 1856unlock:
1919 ocfs2_orphan_scan_unlock(osb, seqno); 1857 ocfs2_orphan_scan_unlock(osb, seqno);
1920out: 1858out:
1921 mlog(0, "Orphan scan completed\n"); 1859 trace_ocfs2_queue_orphan_scan_end(os->os_count, os->os_seqno,
1860 atomic_read(&os->os_state));
1922 return; 1861 return;
1923} 1862}
1924 1863
@@ -2002,8 +1941,7 @@ static int ocfs2_orphan_filldir(void *priv, const char *name, int name_len,
2002 if (IS_ERR(iter)) 1941 if (IS_ERR(iter))
2003 return 0; 1942 return 0;
2004 1943
2005 mlog(0, "queue orphan %llu\n", 1944 trace_ocfs2_orphan_filldir((unsigned long long)OCFS2_I(iter)->ip_blkno);
2006 (unsigned long long)OCFS2_I(iter)->ip_blkno);
2007 /* No locking is required for the next_orphan queue as there 1945 /* No locking is required for the next_orphan queue as there
2008 * is only ever a single process doing orphan recovery. */ 1946 * is only ever a single process doing orphan recovery. */
2009 OCFS2_I(iter)->ip_next_orphan = p->head; 1947 OCFS2_I(iter)->ip_next_orphan = p->head;
@@ -2119,7 +2057,7 @@ static int ocfs2_recover_orphans(struct ocfs2_super *osb,
2119 struct inode *iter; 2057 struct inode *iter;
2120 struct ocfs2_inode_info *oi; 2058 struct ocfs2_inode_info *oi;
2121 2059
2122 mlog(0, "Recover inodes from orphan dir in slot %d\n", slot); 2060 trace_ocfs2_recover_orphans(slot);
2123 2061
2124 ocfs2_mark_recovering_orphan_dir(osb, slot); 2062 ocfs2_mark_recovering_orphan_dir(osb, slot);
2125 ret = ocfs2_queue_orphans(osb, slot, &inode); 2063 ret = ocfs2_queue_orphans(osb, slot, &inode);
@@ -2132,7 +2070,8 @@ static int ocfs2_recover_orphans(struct ocfs2_super *osb,
2132 2070
2133 while (inode) { 2071 while (inode) {
2134 oi = OCFS2_I(inode); 2072 oi = OCFS2_I(inode);
2135 mlog(0, "iput orphan %llu\n", (unsigned long long)oi->ip_blkno); 2073 trace_ocfs2_recover_orphans_iput(
2074 (unsigned long long)oi->ip_blkno);
2136 2075
2137 iter = oi->ip_next_orphan; 2076 iter = oi->ip_next_orphan;
2138 2077
@@ -2170,6 +2109,7 @@ static int __ocfs2_wait_on_mount(struct ocfs2_super *osb, int quota)
2170 * MOUNTED flag, but this is set right before 2109 * MOUNTED flag, but this is set right before
2171 * dismount_volume() so we can trust it. */ 2110 * dismount_volume() so we can trust it. */
2172 if (atomic_read(&osb->vol_state) == VOLUME_DISABLED) { 2111 if (atomic_read(&osb->vol_state) == VOLUME_DISABLED) {
2112 trace_ocfs2_wait_on_mount(VOLUME_DISABLED);
2173 mlog(0, "mount error, exiting!\n"); 2113 mlog(0, "mount error, exiting!\n");
2174 return -EBUSY; 2114 return -EBUSY;
2175 } 2115 }
diff --git a/fs/ocfs2/localalloc.c b/fs/ocfs2/localalloc.c
index ec6adbf8f551..210c35237548 100644
--- a/fs/ocfs2/localalloc.c
+++ b/fs/ocfs2/localalloc.c
@@ -29,7 +29,6 @@
29#include <linux/highmem.h> 29#include <linux/highmem.h>
30#include <linux/bitops.h> 30#include <linux/bitops.h>
31 31
32#define MLOG_MASK_PREFIX ML_DISK_ALLOC
33#include <cluster/masklog.h> 32#include <cluster/masklog.h>
34 33
35#include "ocfs2.h" 34#include "ocfs2.h"
@@ -43,6 +42,7 @@
43#include "suballoc.h" 42#include "suballoc.h"
44#include "super.h" 43#include "super.h"
45#include "sysfile.h" 44#include "sysfile.h"
45#include "ocfs2_trace.h"
46 46
47#include "buffer_head_io.h" 47#include "buffer_head_io.h"
48 48
@@ -201,8 +201,7 @@ void ocfs2_la_set_sizes(struct ocfs2_super *osb, int requested_mb)
201 la_max_mb = ocfs2_clusters_to_megabytes(sb, 201 la_max_mb = ocfs2_clusters_to_megabytes(sb,
202 ocfs2_local_alloc_size(sb) * 8); 202 ocfs2_local_alloc_size(sb) * 8);
203 203
204 mlog(0, "requested: %dM, max: %uM, default: %uM\n", 204 trace_ocfs2_la_set_sizes(requested_mb, la_max_mb, la_default_mb);
205 requested_mb, la_max_mb, la_default_mb);
206 205
207 if (requested_mb == -1) { 206 if (requested_mb == -1) {
208 /* No user request - use defaults */ 207 /* No user request - use defaults */
@@ -276,8 +275,8 @@ int ocfs2_alloc_should_use_local(struct ocfs2_super *osb, u64 bits)
276 275
277 ret = 1; 276 ret = 1;
278bail: 277bail:
279 mlog(0, "state=%d, bits=%llu, la_bits=%d, ret=%d\n", 278 trace_ocfs2_alloc_should_use_local(
280 osb->local_alloc_state, (unsigned long long)bits, la_bits, ret); 279 (unsigned long long)bits, osb->local_alloc_state, la_bits, ret);
281 spin_unlock(&osb->osb_lock); 280 spin_unlock(&osb->osb_lock);
282 return ret; 281 return ret;
283} 282}
@@ -291,8 +290,6 @@ int ocfs2_load_local_alloc(struct ocfs2_super *osb)
291 struct inode *inode = NULL; 290 struct inode *inode = NULL;
292 struct ocfs2_local_alloc *la; 291 struct ocfs2_local_alloc *la;
293 292
294 mlog_entry_void();
295
296 if (osb->local_alloc_bits == 0) 293 if (osb->local_alloc_bits == 0)
297 goto bail; 294 goto bail;
298 295
@@ -364,9 +361,10 @@ bail:
364 if (inode) 361 if (inode)
365 iput(inode); 362 iput(inode);
366 363
367 mlog(0, "Local alloc window bits = %d\n", osb->local_alloc_bits); 364 trace_ocfs2_load_local_alloc(osb->local_alloc_bits);
368 365
369 mlog_exit(status); 366 if (status)
367 mlog_errno(status);
370 return status; 368 return status;
371} 369}
372 370
@@ -388,8 +386,6 @@ void ocfs2_shutdown_local_alloc(struct ocfs2_super *osb)
388 struct ocfs2_dinode *alloc_copy = NULL; 386 struct ocfs2_dinode *alloc_copy = NULL;
389 struct ocfs2_dinode *alloc = NULL; 387 struct ocfs2_dinode *alloc = NULL;
390 388
391 mlog_entry_void();
392
393 cancel_delayed_work(&osb->la_enable_wq); 389 cancel_delayed_work(&osb->la_enable_wq);
394 flush_workqueue(ocfs2_wq); 390 flush_workqueue(ocfs2_wq);
395 391
@@ -482,8 +478,6 @@ out:
482 478
483 if (alloc_copy) 479 if (alloc_copy)
484 kfree(alloc_copy); 480 kfree(alloc_copy);
485
486 mlog_exit_void();
487} 481}
488 482
489/* 483/*
@@ -502,7 +496,7 @@ int ocfs2_begin_local_alloc_recovery(struct ocfs2_super *osb,
502 struct inode *inode = NULL; 496 struct inode *inode = NULL;
503 struct ocfs2_dinode *alloc; 497 struct ocfs2_dinode *alloc;
504 498
505 mlog_entry("(slot_num = %d)\n", slot_num); 499 trace_ocfs2_begin_local_alloc_recovery(slot_num);
506 500
507 *alloc_copy = NULL; 501 *alloc_copy = NULL;
508 502
@@ -552,7 +546,8 @@ bail:
552 iput(inode); 546 iput(inode);
553 } 547 }
554 548
555 mlog_exit(status); 549 if (status)
550 mlog_errno(status);
556 return status; 551 return status;
557} 552}
558 553
@@ -570,8 +565,6 @@ int ocfs2_complete_local_alloc_recovery(struct ocfs2_super *osb,
570 struct buffer_head *main_bm_bh = NULL; 565 struct buffer_head *main_bm_bh = NULL;
571 struct inode *main_bm_inode; 566 struct inode *main_bm_inode;
572 567
573 mlog_entry_void();
574
575 main_bm_inode = ocfs2_get_system_file_inode(osb, 568 main_bm_inode = ocfs2_get_system_file_inode(osb,
576 GLOBAL_BITMAP_SYSTEM_INODE, 569 GLOBAL_BITMAP_SYSTEM_INODE,
577 OCFS2_INVALID_SLOT); 570 OCFS2_INVALID_SLOT);
@@ -620,7 +613,8 @@ out_mutex:
620out: 613out:
621 if (!status) 614 if (!status)
622 ocfs2_init_steal_slots(osb); 615 ocfs2_init_steal_slots(osb);
623 mlog_exit(status); 616 if (status)
617 mlog_errno(status);
624 return status; 618 return status;
625} 619}
626 620
@@ -640,8 +634,6 @@ int ocfs2_reserve_local_alloc_bits(struct ocfs2_super *osb,
640 struct inode *local_alloc_inode; 634 struct inode *local_alloc_inode;
641 unsigned int free_bits; 635 unsigned int free_bits;
642 636
643 mlog_entry_void();
644
645 BUG_ON(!ac); 637 BUG_ON(!ac);
646 638
647 local_alloc_inode = 639 local_alloc_inode =
@@ -712,10 +704,6 @@ int ocfs2_reserve_local_alloc_bits(struct ocfs2_super *osb,
712 goto bail; 704 goto bail;
713 } 705 }
714 706
715 if (ac->ac_max_block)
716 mlog(0, "Calling in_range for max block %llu\n",
717 (unsigned long long)ac->ac_max_block);
718
719 ac->ac_inode = local_alloc_inode; 707 ac->ac_inode = local_alloc_inode;
720 /* We should never use localalloc from another slot */ 708 /* We should never use localalloc from another slot */
721 ac->ac_alloc_slot = osb->slot_num; 709 ac->ac_alloc_slot = osb->slot_num;
@@ -729,10 +717,12 @@ bail:
729 iput(local_alloc_inode); 717 iput(local_alloc_inode);
730 } 718 }
731 719
732 mlog(0, "bits=%d, slot=%d, ret=%d\n", bits_wanted, osb->slot_num, 720 trace_ocfs2_reserve_local_alloc_bits(
733 status); 721 (unsigned long long)ac->ac_max_block,
722 bits_wanted, osb->slot_num, status);
734 723
735 mlog_exit(status); 724 if (status)
725 mlog_errno(status);
736 return status; 726 return status;
737} 727}
738 728
@@ -749,7 +739,6 @@ int ocfs2_claim_local_alloc_bits(struct ocfs2_super *osb,
749 struct ocfs2_dinode *alloc; 739 struct ocfs2_dinode *alloc;
750 struct ocfs2_local_alloc *la; 740 struct ocfs2_local_alloc *la;
751 741
752 mlog_entry_void();
753 BUG_ON(ac->ac_which != OCFS2_AC_USE_LOCAL); 742 BUG_ON(ac->ac_which != OCFS2_AC_USE_LOCAL);
754 743
755 local_alloc_inode = ac->ac_inode; 744 local_alloc_inode = ac->ac_inode;
@@ -788,7 +777,8 @@ int ocfs2_claim_local_alloc_bits(struct ocfs2_super *osb,
788 ocfs2_journal_dirty(handle, osb->local_alloc_bh); 777 ocfs2_journal_dirty(handle, osb->local_alloc_bh);
789 778
790bail: 779bail:
791 mlog_exit(status); 780 if (status)
781 mlog_errno(status);
792 return status; 782 return status;
793} 783}
794 784
@@ -799,13 +789,11 @@ static u32 ocfs2_local_alloc_count_bits(struct ocfs2_dinode *alloc)
799 u32 count = 0; 789 u32 count = 0;
800 struct ocfs2_local_alloc *la = OCFS2_LOCAL_ALLOC(alloc); 790 struct ocfs2_local_alloc *la = OCFS2_LOCAL_ALLOC(alloc);
801 791
802 mlog_entry_void();
803
804 buffer = la->la_bitmap; 792 buffer = la->la_bitmap;
805 for (i = 0; i < le16_to_cpu(la->la_size); i++) 793 for (i = 0; i < le16_to_cpu(la->la_size); i++)
806 count += hweight8(buffer[i]); 794 count += hweight8(buffer[i]);
807 795
808 mlog_exit(count); 796 trace_ocfs2_local_alloc_count_bits(count);
809 return count; 797 return count;
810} 798}
811 799
@@ -820,10 +808,7 @@ static int ocfs2_local_alloc_find_clear_bits(struct ocfs2_super *osb,
820 void *bitmap = NULL; 808 void *bitmap = NULL;
821 struct ocfs2_reservation_map *resmap = &osb->osb_la_resmap; 809 struct ocfs2_reservation_map *resmap = &osb->osb_la_resmap;
822 810
823 mlog_entry("(numbits wanted = %u)\n", *numbits);
824
825 if (!alloc->id1.bitmap1.i_total) { 811 if (!alloc->id1.bitmap1.i_total) {
826 mlog(0, "No bits in my window!\n");
827 bitoff = -1; 812 bitoff = -1;
828 goto bail; 813 goto bail;
829 } 814 }
@@ -883,8 +868,7 @@ static int ocfs2_local_alloc_find_clear_bits(struct ocfs2_super *osb,
883 } 868 }
884 } 869 }
885 870
886 mlog(0, "Exiting loop, bitoff = %d, numfound = %d\n", bitoff, 871 trace_ocfs2_local_alloc_find_clear_bits_search_bitmap(bitoff, numfound);
887 numfound);
888 872
889 if (numfound == *numbits) 873 if (numfound == *numbits)
890 bitoff = startoff - numfound; 874 bitoff = startoff - numfound;
@@ -895,7 +879,10 @@ bail:
895 if (local_resv) 879 if (local_resv)
896 ocfs2_resv_discard(resmap, resv); 880 ocfs2_resv_discard(resmap, resv);
897 881
898 mlog_exit(bitoff); 882 trace_ocfs2_local_alloc_find_clear_bits(*numbits,
883 le32_to_cpu(alloc->id1.bitmap1.i_total),
884 bitoff, numfound);
885
899 return bitoff; 886 return bitoff;
900} 887}
901 888
@@ -903,15 +890,12 @@ static void ocfs2_clear_local_alloc(struct ocfs2_dinode *alloc)
903{ 890{
904 struct ocfs2_local_alloc *la = OCFS2_LOCAL_ALLOC(alloc); 891 struct ocfs2_local_alloc *la = OCFS2_LOCAL_ALLOC(alloc);
905 int i; 892 int i;
906 mlog_entry_void();
907 893
908 alloc->id1.bitmap1.i_total = 0; 894 alloc->id1.bitmap1.i_total = 0;
909 alloc->id1.bitmap1.i_used = 0; 895 alloc->id1.bitmap1.i_used = 0;
910 la->la_bm_off = 0; 896 la->la_bm_off = 0;
911 for(i = 0; i < le16_to_cpu(la->la_size); i++) 897 for(i = 0; i < le16_to_cpu(la->la_size); i++)
912 la->la_bitmap[i] = 0; 898 la->la_bitmap[i] = 0;
913
914 mlog_exit_void();
915} 899}
916 900
917#if 0 901#if 0
@@ -952,18 +936,16 @@ static int ocfs2_sync_local_to_main(struct ocfs2_super *osb,
952 void *bitmap; 936 void *bitmap;
953 struct ocfs2_local_alloc *la = OCFS2_LOCAL_ALLOC(alloc); 937 struct ocfs2_local_alloc *la = OCFS2_LOCAL_ALLOC(alloc);
954 938
955 mlog_entry("total = %u, used = %u\n", 939 trace_ocfs2_sync_local_to_main(
956 le32_to_cpu(alloc->id1.bitmap1.i_total), 940 le32_to_cpu(alloc->id1.bitmap1.i_total),
957 le32_to_cpu(alloc->id1.bitmap1.i_used)); 941 le32_to_cpu(alloc->id1.bitmap1.i_used));
958 942
959 if (!alloc->id1.bitmap1.i_total) { 943 if (!alloc->id1.bitmap1.i_total) {
960 mlog(0, "nothing to sync!\n");
961 goto bail; 944 goto bail;
962 } 945 }
963 946
964 if (le32_to_cpu(alloc->id1.bitmap1.i_used) == 947 if (le32_to_cpu(alloc->id1.bitmap1.i_used) ==
965 le32_to_cpu(alloc->id1.bitmap1.i_total)) { 948 le32_to_cpu(alloc->id1.bitmap1.i_total)) {
966 mlog(0, "all bits were taken!\n");
967 goto bail; 949 goto bail;
968 } 950 }
969 951
@@ -985,8 +967,7 @@ static int ocfs2_sync_local_to_main(struct ocfs2_super *osb,
985 ocfs2_clusters_to_blocks(osb->sb, 967 ocfs2_clusters_to_blocks(osb->sb,
986 start - count); 968 start - count);
987 969
988 mlog(0, "freeing %u bits starting at local alloc bit " 970 trace_ocfs2_sync_local_to_main_free(
989 "%u (la_start_blk = %llu, blkno = %llu)\n",
990 count, start - count, 971 count, start - count,
991 (unsigned long long)la_start_blk, 972 (unsigned long long)la_start_blk,
992 (unsigned long long)blkno); 973 (unsigned long long)blkno);
@@ -1007,7 +988,8 @@ static int ocfs2_sync_local_to_main(struct ocfs2_super *osb,
1007 } 988 }
1008 989
1009bail: 990bail:
1010 mlog_exit(status); 991 if (status)
992 mlog_errno(status);
1011 return status; 993 return status;
1012} 994}
1013 995
@@ -1132,7 +1114,8 @@ bail:
1132 *ac = NULL; 1114 *ac = NULL;
1133 } 1115 }
1134 1116
1135 mlog_exit(status); 1117 if (status)
1118 mlog_errno(status);
1136 return status; 1119 return status;
1137} 1120}
1138 1121
@@ -1148,17 +1131,12 @@ static int ocfs2_local_alloc_new_window(struct ocfs2_super *osb,
1148 struct ocfs2_dinode *alloc = NULL; 1131 struct ocfs2_dinode *alloc = NULL;
1149 struct ocfs2_local_alloc *la; 1132 struct ocfs2_local_alloc *la;
1150 1133
1151 mlog_entry_void();
1152
1153 alloc = (struct ocfs2_dinode *) osb->local_alloc_bh->b_data; 1134 alloc = (struct ocfs2_dinode *) osb->local_alloc_bh->b_data;
1154 la = OCFS2_LOCAL_ALLOC(alloc); 1135 la = OCFS2_LOCAL_ALLOC(alloc);
1155 1136
1156 if (alloc->id1.bitmap1.i_total) 1137 trace_ocfs2_local_alloc_new_window(
1157 mlog(0, "asking me to alloc a new window over a non-empty " 1138 le32_to_cpu(alloc->id1.bitmap1.i_total),
1158 "one\n"); 1139 osb->local_alloc_bits);
1159
1160 mlog(0, "Allocating %u clusters for a new window.\n",
1161 osb->local_alloc_bits);
1162 1140
1163 /* Instruct the allocation code to try the most recently used 1141 /* Instruct the allocation code to try the most recently used
1164 * cluster group. We'll re-record the group used this pass 1142 * cluster group. We'll re-record the group used this pass
@@ -1220,13 +1198,13 @@ retry_enospc:
1220 ocfs2_resmap_restart(&osb->osb_la_resmap, cluster_count, 1198 ocfs2_resmap_restart(&osb->osb_la_resmap, cluster_count,
1221 OCFS2_LOCAL_ALLOC(alloc)->la_bitmap); 1199 OCFS2_LOCAL_ALLOC(alloc)->la_bitmap);
1222 1200
1223 mlog(0, "New window allocated:\n"); 1201 trace_ocfs2_local_alloc_new_window_result(
1224 mlog(0, "window la_bm_off = %u\n", 1202 OCFS2_LOCAL_ALLOC(alloc)->la_bm_off,
1225 OCFS2_LOCAL_ALLOC(alloc)->la_bm_off); 1203 le32_to_cpu(alloc->id1.bitmap1.i_total));
1226 mlog(0, "window bits = %u\n", le32_to_cpu(alloc->id1.bitmap1.i_total));
1227 1204
1228bail: 1205bail:
1229 mlog_exit(status); 1206 if (status)
1207 mlog_errno(status);
1230 return status; 1208 return status;
1231} 1209}
1232 1210
@@ -1243,8 +1221,6 @@ static int ocfs2_local_alloc_slide_window(struct ocfs2_super *osb,
1243 struct ocfs2_dinode *alloc_copy = NULL; 1221 struct ocfs2_dinode *alloc_copy = NULL;
1244 struct ocfs2_alloc_context *ac = NULL; 1222 struct ocfs2_alloc_context *ac = NULL;
1245 1223
1246 mlog_entry_void();
1247
1248 ocfs2_recalc_la_window(osb, OCFS2_LA_EVENT_SLIDE); 1224 ocfs2_recalc_la_window(osb, OCFS2_LA_EVENT_SLIDE);
1249 1225
1250 /* This will lock the main bitmap for us. */ 1226 /* This will lock the main bitmap for us. */
@@ -1324,7 +1300,8 @@ bail:
1324 if (ac) 1300 if (ac)
1325 ocfs2_free_alloc_context(ac); 1301 ocfs2_free_alloc_context(ac);
1326 1302
1327 mlog_exit(status); 1303 if (status)
1304 mlog_errno(status);
1328 return status; 1305 return status;
1329} 1306}
1330 1307
diff --git a/fs/ocfs2/locks.c b/fs/ocfs2/locks.c
index b5cb3ede9408..e57c804069ea 100644
--- a/fs/ocfs2/locks.c
+++ b/fs/ocfs2/locks.c
@@ -26,7 +26,6 @@
26#include <linux/fs.h> 26#include <linux/fs.h>
27#include <linux/fcntl.h> 27#include <linux/fcntl.h>
28 28
29#define MLOG_MASK_PREFIX ML_INODE
30#include <cluster/masklog.h> 29#include <cluster/masklog.h>
31 30
32#include "ocfs2.h" 31#include "ocfs2.h"
diff --git a/fs/ocfs2/mmap.c b/fs/ocfs2/mmap.c
index 7e32db9c2c99..3e9393ca39eb 100644
--- a/fs/ocfs2/mmap.c
+++ b/fs/ocfs2/mmap.c
@@ -31,7 +31,6 @@
31#include <linux/signal.h> 31#include <linux/signal.h>
32#include <linux/rbtree.h> 32#include <linux/rbtree.h>
33 33
34#define MLOG_MASK_PREFIX ML_FILE_IO
35#include <cluster/masklog.h> 34#include <cluster/masklog.h>
36 35
37#include "ocfs2.h" 36#include "ocfs2.h"
@@ -42,6 +41,7 @@
42#include "inode.h" 41#include "inode.h"
43#include "mmap.h" 42#include "mmap.h"
44#include "super.h" 43#include "super.h"
44#include "ocfs2_trace.h"
45 45
46 46
47static int ocfs2_fault(struct vm_area_struct *area, struct vm_fault *vmf) 47static int ocfs2_fault(struct vm_area_struct *area, struct vm_fault *vmf)
@@ -49,13 +49,12 @@ static int ocfs2_fault(struct vm_area_struct *area, struct vm_fault *vmf)
49 sigset_t oldset; 49 sigset_t oldset;
50 int ret; 50 int ret;
51 51
52 mlog_entry("(area=%p, page offset=%lu)\n", area, vmf->pgoff);
53
54 ocfs2_block_signals(&oldset); 52 ocfs2_block_signals(&oldset);
55 ret = filemap_fault(area, vmf); 53 ret = filemap_fault(area, vmf);
56 ocfs2_unblock_signals(&oldset); 54 ocfs2_unblock_signals(&oldset);
57 55
58 mlog_exit_ptr(vmf->page); 56 trace_ocfs2_fault(OCFS2_I(area->vm_file->f_mapping->host)->ip_blkno,
57 area, vmf->page, vmf->pgoff);
59 return ret; 58 return ret;
60} 59}
61 60
diff --git a/fs/ocfs2/namei.c b/fs/ocfs2/namei.c
index d6c25d76b537..28f2cc1080d8 100644
--- a/fs/ocfs2/namei.c
+++ b/fs/ocfs2/namei.c
@@ -42,7 +42,6 @@
42#include <linux/highmem.h> 42#include <linux/highmem.h>
43#include <linux/quotaops.h> 43#include <linux/quotaops.h>
44 44
45#define MLOG_MASK_PREFIX ML_NAMEI
46#include <cluster/masklog.h> 45#include <cluster/masklog.h>
47 46
48#include "ocfs2.h" 47#include "ocfs2.h"
@@ -63,6 +62,7 @@
63#include "uptodate.h" 62#include "uptodate.h"
64#include "xattr.h" 63#include "xattr.h"
65#include "acl.h" 64#include "acl.h"
65#include "ocfs2_trace.h"
66 66
67#include "buffer_head_io.h" 67#include "buffer_head_io.h"
68 68
@@ -106,17 +106,15 @@ static struct dentry *ocfs2_lookup(struct inode *dir, struct dentry *dentry,
106 struct dentry *ret; 106 struct dentry *ret;
107 struct ocfs2_inode_info *oi; 107 struct ocfs2_inode_info *oi;
108 108
109 mlog_entry("(0x%p, 0x%p, '%.*s')\n", dir, dentry, 109 trace_ocfs2_lookup(dir, dentry, dentry->d_name.len,
110 dentry->d_name.len, dentry->d_name.name); 110 dentry->d_name.name,
111 (unsigned long long)OCFS2_I(dir)->ip_blkno, 0);
111 112
112 if (dentry->d_name.len > OCFS2_MAX_FILENAME_LEN) { 113 if (dentry->d_name.len > OCFS2_MAX_FILENAME_LEN) {
113 ret = ERR_PTR(-ENAMETOOLONG); 114 ret = ERR_PTR(-ENAMETOOLONG);
114 goto bail; 115 goto bail;
115 } 116 }
116 117
117 mlog(0, "find name %.*s in directory %llu\n", dentry->d_name.len,
118 dentry->d_name.name, (unsigned long long)OCFS2_I(dir)->ip_blkno);
119
120 status = ocfs2_inode_lock_nested(dir, NULL, 0, OI_LS_PARENT); 118 status = ocfs2_inode_lock_nested(dir, NULL, 0, OI_LS_PARENT);
121 if (status < 0) { 119 if (status < 0) {
122 if (status != -ENOENT) 120 if (status != -ENOENT)
@@ -182,7 +180,7 @@ bail_unlock:
182 180
183bail: 181bail:
184 182
185 mlog_exit_ptr(ret); 183 trace_ocfs2_lookup_ret(ret);
186 184
187 return ret; 185 return ret;
188} 186}
@@ -235,9 +233,9 @@ static int ocfs2_mknod(struct inode *dir,
235 sigset_t oldset; 233 sigset_t oldset;
236 int did_block_signals = 0; 234 int did_block_signals = 0;
237 235
238 mlog_entry("(0x%p, 0x%p, %d, %lu, '%.*s')\n", dir, dentry, mode, 236 trace_ocfs2_mknod(dir, dentry, dentry->d_name.len, dentry->d_name.name,
239 (unsigned long)dev, dentry->d_name.len, 237 (unsigned long long)OCFS2_I(dir)->ip_blkno,
240 dentry->d_name.name); 238 (unsigned long)dev, mode);
241 239
242 dquot_initialize(dir); 240 dquot_initialize(dir);
243 241
@@ -354,10 +352,6 @@ static int ocfs2_mknod(struct inode *dir,
354 goto leave; 352 goto leave;
355 did_quota_inode = 1; 353 did_quota_inode = 1;
356 354
357 mlog_entry("(0x%p, 0x%p, %d, %lu, '%.*s')\n", dir, dentry,
358 inode->i_mode, (unsigned long)dev, dentry->d_name.len,
359 dentry->d_name.name);
360
361 /* do the real work now. */ 355 /* do the real work now. */
362 status = ocfs2_mknod_locked(osb, dir, inode, dev, 356 status = ocfs2_mknod_locked(osb, dir, inode, dev,
363 &new_fe_bh, parent_fe_bh, handle, 357 &new_fe_bh, parent_fe_bh, handle,
@@ -436,9 +430,6 @@ leave:
436 if (did_block_signals) 430 if (did_block_signals)
437 ocfs2_unblock_signals(&oldset); 431 ocfs2_unblock_signals(&oldset);
438 432
439 if (status == -ENOSPC)
440 mlog(0, "Disk is full\n");
441
442 brelse(new_fe_bh); 433 brelse(new_fe_bh);
443 brelse(parent_fe_bh); 434 brelse(parent_fe_bh);
444 kfree(si.name); 435 kfree(si.name);
@@ -466,7 +457,8 @@ leave:
466 iput(inode); 457 iput(inode);
467 } 458 }
468 459
469 mlog_exit(status); 460 if (status)
461 mlog_errno(status);
470 462
471 return status; 463 return status;
472} 464}
@@ -577,7 +569,8 @@ leave:
577 } 569 }
578 } 570 }
579 571
580 mlog_exit(status); 572 if (status)
573 mlog_errno(status);
581 return status; 574 return status;
582} 575}
583 576
@@ -615,10 +608,11 @@ static int ocfs2_mkdir(struct inode *dir,
615{ 608{
616 int ret; 609 int ret;
617 610
618 mlog_entry("(0x%p, 0x%p, %d, '%.*s')\n", dir, dentry, mode, 611 trace_ocfs2_mkdir(dir, dentry, dentry->d_name.len, dentry->d_name.name,
619 dentry->d_name.len, dentry->d_name.name); 612 OCFS2_I(dir)->ip_blkno, mode);
620 ret = ocfs2_mknod(dir, dentry, mode | S_IFDIR, 0); 613 ret = ocfs2_mknod(dir, dentry, mode | S_IFDIR, 0);
621 mlog_exit(ret); 614 if (ret)
615 mlog_errno(ret);
622 616
623 return ret; 617 return ret;
624} 618}
@@ -630,10 +624,11 @@ static int ocfs2_create(struct inode *dir,
630{ 624{
631 int ret; 625 int ret;
632 626
633 mlog_entry("(0x%p, 0x%p, %d, '%.*s')\n", dir, dentry, mode, 627 trace_ocfs2_create(dir, dentry, dentry->d_name.len, dentry->d_name.name,
634 dentry->d_name.len, dentry->d_name.name); 628 (unsigned long long)OCFS2_I(dir)->ip_blkno, mode);
635 ret = ocfs2_mknod(dir, dentry, mode | S_IFREG, 0); 629 ret = ocfs2_mknod(dir, dentry, mode | S_IFREG, 0);
636 mlog_exit(ret); 630 if (ret)
631 mlog_errno(ret);
637 632
638 return ret; 633 return ret;
639} 634}
@@ -652,9 +647,9 @@ static int ocfs2_link(struct dentry *old_dentry,
652 struct ocfs2_dir_lookup_result lookup = { NULL, }; 647 struct ocfs2_dir_lookup_result lookup = { NULL, };
653 sigset_t oldset; 648 sigset_t oldset;
654 649
655 mlog_entry("(inode=%lu, old='%.*s' new='%.*s')\n", inode->i_ino, 650 trace_ocfs2_link((unsigned long long)OCFS2_I(inode)->ip_blkno,
656 old_dentry->d_name.len, old_dentry->d_name.name, 651 old_dentry->d_name.len, old_dentry->d_name.name,
657 dentry->d_name.len, dentry->d_name.name); 652 dentry->d_name.len, dentry->d_name.name);
658 653
659 if (S_ISDIR(inode->i_mode)) 654 if (S_ISDIR(inode->i_mode))
660 return -EPERM; 655 return -EPERM;
@@ -757,7 +752,8 @@ out:
757 752
758 ocfs2_free_dir_lookup_result(&lookup); 753 ocfs2_free_dir_lookup_result(&lookup);
759 754
760 mlog_exit(err); 755 if (err)
756 mlog_errno(err);
761 757
762 return err; 758 return err;
763} 759}
@@ -809,19 +805,17 @@ static int ocfs2_unlink(struct inode *dir,
809 struct ocfs2_dir_lookup_result lookup = { NULL, }; 805 struct ocfs2_dir_lookup_result lookup = { NULL, };
810 struct ocfs2_dir_lookup_result orphan_insert = { NULL, }; 806 struct ocfs2_dir_lookup_result orphan_insert = { NULL, };
811 807
812 mlog_entry("(0x%p, 0x%p, '%.*s')\n", dir, dentry, 808 trace_ocfs2_unlink(dir, dentry, dentry->d_name.len,
813 dentry->d_name.len, dentry->d_name.name); 809 dentry->d_name.name,
810 (unsigned long long)OCFS2_I(dir)->ip_blkno,
811 (unsigned long long)OCFS2_I(inode)->ip_blkno);
814 812
815 dquot_initialize(dir); 813 dquot_initialize(dir);
816 814
817 BUG_ON(dentry->d_parent->d_inode != dir); 815 BUG_ON(dentry->d_parent->d_inode != dir);
818 816
819 mlog(0, "ino = %llu\n", (unsigned long long)OCFS2_I(inode)->ip_blkno); 817 if (inode == osb->root_inode)
820
821 if (inode == osb->root_inode) {
822 mlog(0, "Cannot delete the root directory\n");
823 return -EPERM; 818 return -EPERM;
824 }
825 819
826 status = ocfs2_inode_lock_nested(dir, &parent_node_bh, 1, 820 status = ocfs2_inode_lock_nested(dir, &parent_node_bh, 1,
827 OI_LS_PARENT); 821 OI_LS_PARENT);
@@ -843,9 +837,10 @@ static int ocfs2_unlink(struct inode *dir,
843 if (OCFS2_I(inode)->ip_blkno != blkno) { 837 if (OCFS2_I(inode)->ip_blkno != blkno) {
844 status = -ENOENT; 838 status = -ENOENT;
845 839
846 mlog(0, "ip_blkno %llu != dirent blkno %llu ip_flags = %x\n", 840 trace_ocfs2_unlink_noent(
847 (unsigned long long)OCFS2_I(inode)->ip_blkno, 841 (unsigned long long)OCFS2_I(inode)->ip_blkno,
848 (unsigned long long)blkno, OCFS2_I(inode)->ip_flags); 842 (unsigned long long)blkno,
843 OCFS2_I(inode)->ip_flags);
849 goto leave; 844 goto leave;
850 } 845 }
851 846
@@ -954,7 +949,8 @@ leave:
954 ocfs2_free_dir_lookup_result(&orphan_insert); 949 ocfs2_free_dir_lookup_result(&orphan_insert);
955 ocfs2_free_dir_lookup_result(&lookup); 950 ocfs2_free_dir_lookup_result(&lookup);
956 951
957 mlog_exit(status); 952 if (status)
953 mlog_errno(status);
958 954
959 return status; 955 return status;
960} 956}
@@ -975,9 +971,8 @@ static int ocfs2_double_lock(struct ocfs2_super *osb,
975 struct buffer_head **tmpbh; 971 struct buffer_head **tmpbh;
976 struct inode *tmpinode; 972 struct inode *tmpinode;
977 973
978 mlog_entry("(inode1 = %llu, inode2 = %llu)\n", 974 trace_ocfs2_double_lock((unsigned long long)oi1->ip_blkno,
979 (unsigned long long)oi1->ip_blkno, 975 (unsigned long long)oi2->ip_blkno);
980 (unsigned long long)oi2->ip_blkno);
981 976
982 if (*bh1) 977 if (*bh1)
983 *bh1 = NULL; 978 *bh1 = NULL;
@@ -988,7 +983,6 @@ static int ocfs2_double_lock(struct ocfs2_super *osb,
988 if (oi1->ip_blkno != oi2->ip_blkno) { 983 if (oi1->ip_blkno != oi2->ip_blkno) {
989 if (oi1->ip_blkno < oi2->ip_blkno) { 984 if (oi1->ip_blkno < oi2->ip_blkno) {
990 /* switch id1 and id2 around */ 985 /* switch id1 and id2 around */
991 mlog(0, "switching them around...\n");
992 tmpbh = bh2; 986 tmpbh = bh2;
993 bh2 = bh1; 987 bh2 = bh1;
994 bh1 = tmpbh; 988 bh1 = tmpbh;
@@ -1024,8 +1018,13 @@ static int ocfs2_double_lock(struct ocfs2_super *osb,
1024 mlog_errno(status); 1018 mlog_errno(status);
1025 } 1019 }
1026 1020
1021 trace_ocfs2_double_lock_end(
1022 (unsigned long long)OCFS2_I(inode1)->ip_blkno,
1023 (unsigned long long)OCFS2_I(inode2)->ip_blkno);
1024
1027bail: 1025bail:
1028 mlog_exit(status); 1026 if (status)
1027 mlog_errno(status);
1029 return status; 1028 return status;
1030} 1029}
1031 1030
@@ -1067,10 +1066,9 @@ static int ocfs2_rename(struct inode *old_dir,
1067 /* At some point it might be nice to break this function up a 1066 /* At some point it might be nice to break this function up a
1068 * bit. */ 1067 * bit. */
1069 1068
1070 mlog_entry("(0x%p, 0x%p, 0x%p, 0x%p, from='%.*s' to='%.*s')\n", 1069 trace_ocfs2_rename(old_dir, old_dentry, new_dir, new_dentry,
1071 old_dir, old_dentry, new_dir, new_dentry, 1070 old_dentry->d_name.len, old_dentry->d_name.name,
1072 old_dentry->d_name.len, old_dentry->d_name.name, 1071 new_dentry->d_name.len, new_dentry->d_name.name);
1073 new_dentry->d_name.len, new_dentry->d_name.name);
1074 1072
1075 dquot_initialize(old_dir); 1073 dquot_initialize(old_dir);
1076 dquot_initialize(new_dir); 1074 dquot_initialize(new_dir);
@@ -1227,16 +1225,15 @@ static int ocfs2_rename(struct inode *old_dir,
1227 if (!new_inode) { 1225 if (!new_inode) {
1228 status = -EACCES; 1226 status = -EACCES;
1229 1227
1230 mlog(0, "We found an inode for name %.*s but VFS " 1228 trace_ocfs2_rename_target_exists(new_dentry->d_name.len,
1231 "didn't give us one.\n", new_dentry->d_name.len, 1229 new_dentry->d_name.name);
1232 new_dentry->d_name.name);
1233 goto bail; 1230 goto bail;
1234 } 1231 }
1235 1232
1236 if (OCFS2_I(new_inode)->ip_blkno != newfe_blkno) { 1233 if (OCFS2_I(new_inode)->ip_blkno != newfe_blkno) {
1237 status = -EACCES; 1234 status = -EACCES;
1238 1235
1239 mlog(0, "Inode %llu and dir %llu disagree. flags = %x\n", 1236 trace_ocfs2_rename_disagree(
1240 (unsigned long long)OCFS2_I(new_inode)->ip_blkno, 1237 (unsigned long long)OCFS2_I(new_inode)->ip_blkno,
1241 (unsigned long long)newfe_blkno, 1238 (unsigned long long)newfe_blkno,
1242 OCFS2_I(new_inode)->ip_flags); 1239 OCFS2_I(new_inode)->ip_flags);
@@ -1259,8 +1256,7 @@ static int ocfs2_rename(struct inode *old_dir,
1259 1256
1260 newfe = (struct ocfs2_dinode *) newfe_bh->b_data; 1257 newfe = (struct ocfs2_dinode *) newfe_bh->b_data;
1261 1258
1262 mlog(0, "aha rename over existing... new_blkno=%llu " 1259 trace_ocfs2_rename_over_existing(
1263 "newfebh=%p bhblocknr=%llu\n",
1264 (unsigned long long)newfe_blkno, newfe_bh, newfe_bh ? 1260 (unsigned long long)newfe_blkno, newfe_bh, newfe_bh ?
1265 (unsigned long long)newfe_bh->b_blocknr : 0ULL); 1261 (unsigned long long)newfe_bh->b_blocknr : 0ULL);
1266 1262
@@ -1476,7 +1472,8 @@ bail:
1476 brelse(old_dir_bh); 1472 brelse(old_dir_bh);
1477 brelse(new_dir_bh); 1473 brelse(new_dir_bh);
1478 1474
1479 mlog_exit(status); 1475 if (status)
1476 mlog_errno(status);
1480 1477
1481 return status; 1478 return status;
1482} 1479}
@@ -1501,9 +1498,8 @@ static int ocfs2_create_symlink_data(struct ocfs2_super *osb,
1501 * write i_size + 1 bytes. */ 1498 * write i_size + 1 bytes. */
1502 blocks = (bytes_left + sb->s_blocksize - 1) >> sb->s_blocksize_bits; 1499 blocks = (bytes_left + sb->s_blocksize - 1) >> sb->s_blocksize_bits;
1503 1500
1504 mlog_entry("i_blocks = %llu, i_size = %llu, blocks = %d\n", 1501 trace_ocfs2_create_symlink_data((unsigned long long)inode->i_blocks,
1505 (unsigned long long)inode->i_blocks, 1502 i_size_read(inode), blocks);
1506 i_size_read(inode), blocks);
1507 1503
1508 /* Sanity check -- make sure we're going to fit. */ 1504 /* Sanity check -- make sure we're going to fit. */
1509 if (bytes_left > 1505 if (bytes_left >
@@ -1579,7 +1575,8 @@ bail:
1579 kfree(bhs); 1575 kfree(bhs);
1580 } 1576 }
1581 1577
1582 mlog_exit(status); 1578 if (status)
1579 mlog_errno(status);
1583 return status; 1580 return status;
1584} 1581}
1585 1582
@@ -1610,8 +1607,8 @@ static int ocfs2_symlink(struct inode *dir,
1610 sigset_t oldset; 1607 sigset_t oldset;
1611 int did_block_signals = 0; 1608 int did_block_signals = 0;
1612 1609
1613 mlog_entry("(0x%p, 0x%p, symname='%s' actual='%.*s')\n", dir, 1610 trace_ocfs2_symlink_begin(dir, dentry, symname,
1614 dentry, symname, dentry->d_name.len, dentry->d_name.name); 1611 dentry->d_name.len, dentry->d_name.name);
1615 1612
1616 dquot_initialize(dir); 1613 dquot_initialize(dir);
1617 1614
@@ -1713,9 +1710,10 @@ static int ocfs2_symlink(struct inode *dir,
1713 goto bail; 1710 goto bail;
1714 did_quota_inode = 1; 1711 did_quota_inode = 1;
1715 1712
1716 mlog_entry("(0x%p, 0x%p, %d, '%.*s')\n", dir, dentry, 1713 trace_ocfs2_symlink_create(dir, dentry, dentry->d_name.len,
1717 inode->i_mode, dentry->d_name.len, 1714 dentry->d_name.name,
1718 dentry->d_name.name); 1715 (unsigned long long)OCFS2_I(dir)->ip_blkno,
1716 inode->i_mode);
1719 1717
1720 status = ocfs2_mknod_locked(osb, dir, inode, 1718 status = ocfs2_mknod_locked(osb, dir, inode,
1721 0, &new_fe_bh, parent_fe_bh, handle, 1719 0, &new_fe_bh, parent_fe_bh, handle,
@@ -1835,7 +1833,8 @@ bail:
1835 iput(inode); 1833 iput(inode);
1836 } 1834 }
1837 1835
1838 mlog_exit(status); 1836 if (status)
1837 mlog_errno(status);
1839 1838
1840 return status; 1839 return status;
1841} 1840}
@@ -1844,8 +1843,6 @@ static int ocfs2_blkno_stringify(u64 blkno, char *name)
1844{ 1843{
1845 int status, namelen; 1844 int status, namelen;
1846 1845
1847 mlog_entry_void();
1848
1849 namelen = snprintf(name, OCFS2_ORPHAN_NAMELEN + 1, "%016llx", 1846 namelen = snprintf(name, OCFS2_ORPHAN_NAMELEN + 1, "%016llx",
1850 (long long)blkno); 1847 (long long)blkno);
1851 if (namelen <= 0) { 1848 if (namelen <= 0) {
@@ -1862,12 +1859,12 @@ static int ocfs2_blkno_stringify(u64 blkno, char *name)
1862 goto bail; 1859 goto bail;
1863 } 1860 }
1864 1861
1865 mlog(0, "built filename '%s' for orphan dir (len=%d)\n", name, 1862 trace_ocfs2_blkno_stringify(blkno, name, namelen);
1866 namelen);
1867 1863
1868 status = 0; 1864 status = 0;
1869bail: 1865bail:
1870 mlog_exit(status); 1866 if (status < 0)
1867 mlog_errno(status);
1871 return status; 1868 return status;
1872} 1869}
1873 1870
@@ -1980,7 +1977,8 @@ out:
1980 iput(orphan_dir_inode); 1977 iput(orphan_dir_inode);
1981 } 1978 }
1982 1979
1983 mlog_exit(ret); 1980 if (ret)
1981 mlog_errno(ret);
1984 return ret; 1982 return ret;
1985} 1983}
1986 1984
@@ -1997,7 +1995,8 @@ static int ocfs2_orphan_add(struct ocfs2_super *osb,
1997 struct ocfs2_dinode *orphan_fe; 1995 struct ocfs2_dinode *orphan_fe;
1998 struct ocfs2_dinode *fe = (struct ocfs2_dinode *) fe_bh->b_data; 1996 struct ocfs2_dinode *fe = (struct ocfs2_dinode *) fe_bh->b_data;
1999 1997
2000 mlog_entry("(inode->i_ino = %lu)\n", inode->i_ino); 1998 trace_ocfs2_orphan_add_begin(
1999 (unsigned long long)OCFS2_I(inode)->ip_blkno);
2001 2000
2002 status = ocfs2_read_inode_block(orphan_dir_inode, &orphan_dir_bh); 2001 status = ocfs2_read_inode_block(orphan_dir_inode, &orphan_dir_bh);
2003 if (status < 0) { 2002 if (status < 0) {
@@ -2056,13 +2055,14 @@ static int ocfs2_orphan_add(struct ocfs2_super *osb,
2056 2055
2057 ocfs2_journal_dirty(handle, fe_bh); 2056 ocfs2_journal_dirty(handle, fe_bh);
2058 2057
2059 mlog(0, "Inode %llu orphaned in slot %d\n", 2058 trace_ocfs2_orphan_add_end((unsigned long long)OCFS2_I(inode)->ip_blkno,
2060 (unsigned long long)OCFS2_I(inode)->ip_blkno, osb->slot_num); 2059 osb->slot_num);
2061 2060
2062leave: 2061leave:
2063 brelse(orphan_dir_bh); 2062 brelse(orphan_dir_bh);
2064 2063
2065 mlog_exit(status); 2064 if (status)
2065 mlog_errno(status);
2066 return status; 2066 return status;
2067} 2067}
2068 2068
@@ -2078,17 +2078,15 @@ int ocfs2_orphan_del(struct ocfs2_super *osb,
2078 int status = 0; 2078 int status = 0;
2079 struct ocfs2_dir_lookup_result lookup = { NULL, }; 2079 struct ocfs2_dir_lookup_result lookup = { NULL, };
2080 2080
2081 mlog_entry_void();
2082
2083 status = ocfs2_blkno_stringify(OCFS2_I(inode)->ip_blkno, name); 2081 status = ocfs2_blkno_stringify(OCFS2_I(inode)->ip_blkno, name);
2084 if (status < 0) { 2082 if (status < 0) {
2085 mlog_errno(status); 2083 mlog_errno(status);
2086 goto leave; 2084 goto leave;
2087 } 2085 }
2088 2086
2089 mlog(0, "removing '%s' from orphan dir %llu (namelen=%d)\n", 2087 trace_ocfs2_orphan_del(
2090 name, (unsigned long long)OCFS2_I(orphan_dir_inode)->ip_blkno, 2088 (unsigned long long)OCFS2_I(orphan_dir_inode)->ip_blkno,
2091 OCFS2_ORPHAN_NAMELEN); 2089 name, OCFS2_ORPHAN_NAMELEN);
2092 2090
2093 /* find it's spot in the orphan directory */ 2091 /* find it's spot in the orphan directory */
2094 status = ocfs2_find_entry(name, OCFS2_ORPHAN_NAMELEN, orphan_dir_inode, 2092 status = ocfs2_find_entry(name, OCFS2_ORPHAN_NAMELEN, orphan_dir_inode,
@@ -2124,7 +2122,8 @@ int ocfs2_orphan_del(struct ocfs2_super *osb,
2124leave: 2122leave:
2125 ocfs2_free_dir_lookup_result(&lookup); 2123 ocfs2_free_dir_lookup_result(&lookup);
2126 2124
2127 mlog_exit(status); 2125 if (status)
2126 mlog_errno(status);
2128 return status; 2127 return status;
2129} 2128}
2130 2129
@@ -2321,9 +2320,6 @@ leave:
2321 iput(orphan_dir); 2320 iput(orphan_dir);
2322 } 2321 }
2323 2322
2324 if (status == -ENOSPC)
2325 mlog(0, "Disk is full\n");
2326
2327 if ((status < 0) && inode) { 2323 if ((status < 0) && inode) {
2328 clear_nlink(inode); 2324 clear_nlink(inode);
2329 iput(inode); 2325 iput(inode);
@@ -2358,8 +2354,10 @@ int ocfs2_mv_orphaned_inode_to_new(struct inode *dir,
2358 struct buffer_head *di_bh = NULL; 2354 struct buffer_head *di_bh = NULL;
2359 struct ocfs2_dir_lookup_result lookup = { NULL, }; 2355 struct ocfs2_dir_lookup_result lookup = { NULL, };
2360 2356
2361 mlog_entry("(0x%p, 0x%p, %.*s')\n", dir, dentry, 2357 trace_ocfs2_mv_orphaned_inode_to_new(dir, dentry,
2362 dentry->d_name.len, dentry->d_name.name); 2358 dentry->d_name.len, dentry->d_name.name,
2359 (unsigned long long)OCFS2_I(dir)->ip_blkno,
2360 (unsigned long long)OCFS2_I(inode)->ip_blkno);
2363 2361
2364 status = ocfs2_inode_lock(dir, &parent_di_bh, 1); 2362 status = ocfs2_inode_lock(dir, &parent_di_bh, 1);
2365 if (status < 0) { 2363 if (status < 0) {
@@ -2476,7 +2474,8 @@ leave:
2476 2474
2477 ocfs2_free_dir_lookup_result(&lookup); 2475 ocfs2_free_dir_lookup_result(&lookup);
2478 2476
2479 mlog_exit(status); 2477 if (status)
2478 mlog_errno(status);
2480 2479
2481 return status; 2480 return status;
2482} 2481}
diff --git a/fs/ocfs2/ocfs2.h b/fs/ocfs2/ocfs2.h
index 1a97ba1ec3fc..409285854f64 100644
--- a/fs/ocfs2/ocfs2.h
+++ b/fs/ocfs2/ocfs2.h
@@ -147,6 +147,17 @@ struct ocfs2_lock_res_ops;
147 147
148typedef void (*ocfs2_lock_callback)(int status, unsigned long data); 148typedef void (*ocfs2_lock_callback)(int status, unsigned long data);
149 149
150#ifdef CONFIG_OCFS2_FS_STATS
151struct ocfs2_lock_stats {
152 u64 ls_total; /* Total wait in NSEC */
153 u32 ls_gets; /* Num acquires */
154 u32 ls_fail; /* Num failed acquires */
155
156 /* Storing max wait in usecs saves 24 bytes per inode */
157 u32 ls_max; /* Max wait in USEC */
158};
159#endif
160
150struct ocfs2_lock_res { 161struct ocfs2_lock_res {
151 void *l_priv; 162 void *l_priv;
152 struct ocfs2_lock_res_ops *l_ops; 163 struct ocfs2_lock_res_ops *l_ops;
@@ -182,15 +193,9 @@ struct ocfs2_lock_res {
182 struct list_head l_debug_list; 193 struct list_head l_debug_list;
183 194
184#ifdef CONFIG_OCFS2_FS_STATS 195#ifdef CONFIG_OCFS2_FS_STATS
185 unsigned long long l_lock_num_prmode; /* PR acquires */ 196 struct ocfs2_lock_stats l_lock_prmode; /* PR mode stats */
186 unsigned long long l_lock_num_exmode; /* EX acquires */ 197 u32 l_lock_refresh; /* Disk refreshes */
187 unsigned int l_lock_num_prmode_failed; /* Failed PR gets */ 198 struct ocfs2_lock_stats l_lock_exmode; /* EX mode stats */
188 unsigned int l_lock_num_exmode_failed; /* Failed EX gets */
189 unsigned long long l_lock_total_prmode; /* Tot wait for PR */
190 unsigned long long l_lock_total_exmode; /* Tot wait for EX */
191 unsigned int l_lock_max_prmode; /* Max wait for PR */
192 unsigned int l_lock_max_exmode; /* Max wait for EX */
193 unsigned int l_lock_refresh; /* Disk refreshes */
194#endif 199#endif
195#ifdef CONFIG_DEBUG_LOCK_ALLOC 200#ifdef CONFIG_DEBUG_LOCK_ALLOC
196 struct lockdep_map l_lockdep_map; 201 struct lockdep_map l_lockdep_map;
diff --git a/fs/ocfs2/ocfs2_trace.h b/fs/ocfs2/ocfs2_trace.h
new file mode 100644
index 000000000000..a1dae5bb54ac
--- /dev/null
+++ b/fs/ocfs2/ocfs2_trace.h
@@ -0,0 +1,2739 @@
1#undef TRACE_SYSTEM
2#define TRACE_SYSTEM ocfs2
3
4#if !defined(_TRACE_OCFS2_H) || defined(TRACE_HEADER_MULTI_READ)
5#define _TRACE_OCFS2_H
6
7#include <linux/tracepoint.h>
8
9DECLARE_EVENT_CLASS(ocfs2__int,
10 TP_PROTO(int num),
11 TP_ARGS(num),
12 TP_STRUCT__entry(
13 __field(int, num)
14 ),
15 TP_fast_assign(
16 __entry->num = num;
17 ),
18 TP_printk("%d", __entry->num)
19);
20
21#define DEFINE_OCFS2_INT_EVENT(name) \
22DEFINE_EVENT(ocfs2__int, name, \
23 TP_PROTO(int num), \
24 TP_ARGS(num))
25
26DECLARE_EVENT_CLASS(ocfs2__uint,
27 TP_PROTO(unsigned int num),
28 TP_ARGS(num),
29 TP_STRUCT__entry(
30 __field( unsigned int, num )
31 ),
32 TP_fast_assign(
33 __entry->num = num;
34 ),
35 TP_printk("%u", __entry->num)
36);
37
38#define DEFINE_OCFS2_UINT_EVENT(name) \
39DEFINE_EVENT(ocfs2__uint, name, \
40 TP_PROTO(unsigned int num), \
41 TP_ARGS(num))
42
43DECLARE_EVENT_CLASS(ocfs2__ull,
44 TP_PROTO(unsigned long long blkno),
45 TP_ARGS(blkno),
46 TP_STRUCT__entry(
47 __field(unsigned long long, blkno)
48 ),
49 TP_fast_assign(
50 __entry->blkno = blkno;
51 ),
52 TP_printk("%llu", __entry->blkno)
53);
54
55#define DEFINE_OCFS2_ULL_EVENT(name) \
56DEFINE_EVENT(ocfs2__ull, name, \
57 TP_PROTO(unsigned long long num), \
58 TP_ARGS(num))
59
60DECLARE_EVENT_CLASS(ocfs2__pointer,
61 TP_PROTO(void *pointer),
62 TP_ARGS(pointer),
63 TP_STRUCT__entry(
64 __field(void *, pointer)
65 ),
66 TP_fast_assign(
67 __entry->pointer = pointer;
68 ),
69 TP_printk("%p", __entry->pointer)
70);
71
72#define DEFINE_OCFS2_POINTER_EVENT(name) \
73DEFINE_EVENT(ocfs2__pointer, name, \
74 TP_PROTO(void *pointer), \
75 TP_ARGS(pointer))
76
77DECLARE_EVENT_CLASS(ocfs2__string,
78 TP_PROTO(const char *name),
79 TP_ARGS(name),
80 TP_STRUCT__entry(
81 __string(name,name)
82 ),
83 TP_fast_assign(
84 __assign_str(name, name);
85 ),
86 TP_printk("%s", __get_str(name))
87);
88
89#define DEFINE_OCFS2_STRING_EVENT(name) \
90DEFINE_EVENT(ocfs2__string, name, \
91 TP_PROTO(const char *name), \
92 TP_ARGS(name))
93
94DECLARE_EVENT_CLASS(ocfs2__int_int,
95 TP_PROTO(int value1, int value2),
96 TP_ARGS(value1, value2),
97 TP_STRUCT__entry(
98 __field(int, value1)
99 __field(int, value2)
100 ),
101 TP_fast_assign(
102 __entry->value1 = value1;
103 __entry->value2 = value2;
104 ),
105 TP_printk("%d %d", __entry->value1, __entry->value2)
106);
107
108#define DEFINE_OCFS2_INT_INT_EVENT(name) \
109DEFINE_EVENT(ocfs2__int_int, name, \
110 TP_PROTO(int val1, int val2), \
111 TP_ARGS(val1, val2))
112
113DECLARE_EVENT_CLASS(ocfs2__uint_int,
114 TP_PROTO(unsigned int value1, int value2),
115 TP_ARGS(value1, value2),
116 TP_STRUCT__entry(
117 __field(unsigned int, value1)
118 __field(int, value2)
119 ),
120 TP_fast_assign(
121 __entry->value1 = value1;
122 __entry->value2 = value2;
123 ),
124 TP_printk("%u %d", __entry->value1, __entry->value2)
125);
126
127#define DEFINE_OCFS2_UINT_INT_EVENT(name) \
128DEFINE_EVENT(ocfs2__uint_int, name, \
129 TP_PROTO(unsigned int val1, int val2), \
130 TP_ARGS(val1, val2))
131
132DECLARE_EVENT_CLASS(ocfs2__uint_uint,
133 TP_PROTO(unsigned int value1, unsigned int value2),
134 TP_ARGS(value1, value2),
135 TP_STRUCT__entry(
136 __field(unsigned int, value1)
137 __field(unsigned int, value2)
138 ),
139 TP_fast_assign(
140 __entry->value1 = value1;
141 __entry->value2 = value2;
142 ),
143 TP_printk("%u %u", __entry->value1, __entry->value2)
144);
145
146#define DEFINE_OCFS2_UINT_UINT_EVENT(name) \
147DEFINE_EVENT(ocfs2__uint_uint, name, \
148 TP_PROTO(unsigned int val1, unsigned int val2), \
149 TP_ARGS(val1, val2))
150
151DECLARE_EVENT_CLASS(ocfs2__ull_uint,
152 TP_PROTO(unsigned long long value1, unsigned int value2),
153 TP_ARGS(value1, value2),
154 TP_STRUCT__entry(
155 __field(unsigned long long, value1)
156 __field(unsigned int, value2)
157 ),
158 TP_fast_assign(
159 __entry->value1 = value1;
160 __entry->value2 = value2;
161 ),
162 TP_printk("%llu %u", __entry->value1, __entry->value2)
163);
164
165#define DEFINE_OCFS2_ULL_UINT_EVENT(name) \
166DEFINE_EVENT(ocfs2__ull_uint, name, \
167 TP_PROTO(unsigned long long val1, unsigned int val2), \
168 TP_ARGS(val1, val2))
169
170DECLARE_EVENT_CLASS(ocfs2__ull_int,
171 TP_PROTO(unsigned long long value1, int value2),
172 TP_ARGS(value1, value2),
173 TP_STRUCT__entry(
174 __field(unsigned long long, value1)
175 __field(int, value2)
176 ),
177 TP_fast_assign(
178 __entry->value1 = value1;
179 __entry->value2 = value2;
180 ),
181 TP_printk("%llu %d", __entry->value1, __entry->value2)
182);
183
184#define DEFINE_OCFS2_ULL_INT_EVENT(name) \
185DEFINE_EVENT(ocfs2__ull_int, name, \
186 TP_PROTO(unsigned long long val1, int val2), \
187 TP_ARGS(val1, val2))
188
189DECLARE_EVENT_CLASS(ocfs2__ull_ull,
190 TP_PROTO(unsigned long long value1, unsigned long long value2),
191 TP_ARGS(value1, value2),
192 TP_STRUCT__entry(
193 __field(unsigned long long, value1)
194 __field(unsigned long long, value2)
195 ),
196 TP_fast_assign(
197 __entry->value1 = value1;
198 __entry->value2 = value2;
199 ),
200 TP_printk("%llu %llu", __entry->value1, __entry->value2)
201);
202
203#define DEFINE_OCFS2_ULL_ULL_EVENT(name) \
204DEFINE_EVENT(ocfs2__ull_ull, name, \
205 TP_PROTO(unsigned long long val1, unsigned long long val2), \
206 TP_ARGS(val1, val2))
207
208DECLARE_EVENT_CLASS(ocfs2__ull_ull_uint,
209 TP_PROTO(unsigned long long value1,
210 unsigned long long value2, unsigned int value3),
211 TP_ARGS(value1, value2, value3),
212 TP_STRUCT__entry(
213 __field(unsigned long long, value1)
214 __field(unsigned long long, value2)
215 __field(unsigned int, value3)
216 ),
217 TP_fast_assign(
218 __entry->value1 = value1;
219 __entry->value2 = value2;
220 __entry->value3 = value3;
221 ),
222 TP_printk("%llu %llu %u",
223 __entry->value1, __entry->value2, __entry->value3)
224);
225
226#define DEFINE_OCFS2_ULL_ULL_UINT_EVENT(name) \
227DEFINE_EVENT(ocfs2__ull_ull_uint, name, \
228 TP_PROTO(unsigned long long val1, \
229 unsigned long long val2, unsigned int val3), \
230 TP_ARGS(val1, val2, val3))
231
232DECLARE_EVENT_CLASS(ocfs2__ull_uint_uint,
233 TP_PROTO(unsigned long long value1,
234 unsigned int value2, unsigned int value3),
235 TP_ARGS(value1, value2, value3),
236 TP_STRUCT__entry(
237 __field(unsigned long long, value1)
238 __field(unsigned int, value2)
239 __field(unsigned int, value3)
240 ),
241 TP_fast_assign(
242 __entry->value1 = value1;
243 __entry->value2 = value2;
244 __entry->value3 = value3;
245 ),
246 TP_printk("%llu %u %u", __entry->value1,
247 __entry->value2, __entry->value3)
248);
249
250#define DEFINE_OCFS2_ULL_UINT_UINT_EVENT(name) \
251DEFINE_EVENT(ocfs2__ull_uint_uint, name, \
252 TP_PROTO(unsigned long long val1, \
253 unsigned int val2, unsigned int val3), \
254 TP_ARGS(val1, val2, val3))
255
256DECLARE_EVENT_CLASS(ocfs2__uint_uint_uint,
257 TP_PROTO(unsigned int value1, unsigned int value2,
258 unsigned int value3),
259 TP_ARGS(value1, value2, value3),
260 TP_STRUCT__entry(
261 __field( unsigned int, value1 )
262 __field( unsigned int, value2 )
263 __field( unsigned int, value3 )
264 ),
265 TP_fast_assign(
266 __entry->value1 = value1;
267 __entry->value2 = value2;
268 __entry->value3 = value3;
269 ),
270 TP_printk("%u %u %u", __entry->value1, __entry->value2, __entry->value3)
271);
272
273#define DEFINE_OCFS2_UINT_UINT_UINT_EVENT(name) \
274DEFINE_EVENT(ocfs2__uint_uint_uint, name, \
275 TP_PROTO(unsigned int value1, unsigned int value2, \
276 unsigned int value3), \
277 TP_ARGS(value1, value2, value3))
278
279DECLARE_EVENT_CLASS(ocfs2__ull_ull_ull,
280 TP_PROTO(unsigned long long value1,
281 unsigned long long value2, unsigned long long value3),
282 TP_ARGS(value1, value2, value3),
283 TP_STRUCT__entry(
284 __field(unsigned long long, value1)
285 __field(unsigned long long, value2)
286 __field(unsigned long long, value3)
287 ),
288 TP_fast_assign(
289 __entry->value1 = value1;
290 __entry->value2 = value2;
291 __entry->value3 = value3;
292 ),
293 TP_printk("%llu %llu %llu",
294 __entry->value1, __entry->value2, __entry->value3)
295);
296
297#define DEFINE_OCFS2_ULL_ULL_ULL_EVENT(name) \
298DEFINE_EVENT(ocfs2__ull_ull_ull, name, \
299 TP_PROTO(unsigned long long value1, unsigned long long value2, \
300 unsigned long long value3), \
301 TP_ARGS(value1, value2, value3))
302
303DECLARE_EVENT_CLASS(ocfs2__ull_int_int_int,
304 TP_PROTO(unsigned long long ull, int value1, int value2, int value3),
305 TP_ARGS(ull, value1, value2, value3),
306 TP_STRUCT__entry(
307 __field( unsigned long long, ull )
308 __field( int, value1 )
309 __field( int, value2 )
310 __field( int, value3 )
311 ),
312 TP_fast_assign(
313 __entry->ull = ull;
314 __entry->value1 = value1;
315 __entry->value2 = value2;
316 __entry->value3 = value3;
317 ),
318 TP_printk("%llu %d %d %d",
319 __entry->ull, __entry->value1,
320 __entry->value2, __entry->value3)
321);
322
323#define DEFINE_OCFS2_ULL_INT_INT_INT_EVENT(name) \
324DEFINE_EVENT(ocfs2__ull_int_int_int, name, \
325 TP_PROTO(unsigned long long ull, int value1, \
326 int value2, int value3), \
327 TP_ARGS(ull, value1, value2, value3))
328
329DECLARE_EVENT_CLASS(ocfs2__ull_uint_uint_uint,
330 TP_PROTO(unsigned long long ull, unsigned int value1,
331 unsigned int value2, unsigned int value3),
332 TP_ARGS(ull, value1, value2, value3),
333 TP_STRUCT__entry(
334 __field(unsigned long long, ull)
335 __field(unsigned int, value1)
336 __field(unsigned int, value2)
337 __field(unsigned int, value3)
338 ),
339 TP_fast_assign(
340 __entry->ull = ull;
341 __entry->value1 = value1;
342 __entry->value2 = value2;
343 __entry->value3 = value3;
344 ),
345 TP_printk("%llu %u %u %u",
346 __entry->ull, __entry->value1,
347 __entry->value2, __entry->value3)
348);
349
350#define DEFINE_OCFS2_ULL_UINT_UINT_UINT_EVENT(name) \
351DEFINE_EVENT(ocfs2__ull_uint_uint_uint, name, \
352 TP_PROTO(unsigned long long ull, unsigned int value1, \
353 unsigned int value2, unsigned int value3), \
354 TP_ARGS(ull, value1, value2, value3))
355
356DECLARE_EVENT_CLASS(ocfs2__ull_ull_uint_uint,
357 TP_PROTO(unsigned long long value1, unsigned long long value2,
358 unsigned int value3, unsigned int value4),
359 TP_ARGS(value1, value2, value3, value4),
360 TP_STRUCT__entry(
361 __field(unsigned long long, value1)
362 __field(unsigned long long, value2)
363 __field(unsigned int, value3)
364 __field(unsigned int, value4)
365 ),
366 TP_fast_assign(
367 __entry->value1 = value1;
368 __entry->value2 = value2;
369 __entry->value3 = value3;
370 __entry->value4 = value4;
371 ),
372 TP_printk("%llu %llu %u %u",
373 __entry->value1, __entry->value2,
374 __entry->value3, __entry->value4)
375);
376
377#define DEFINE_OCFS2_ULL_ULL_UINT_UINT_EVENT(name) \
378DEFINE_EVENT(ocfs2__ull_ull_uint_uint, name, \
379 TP_PROTO(unsigned long long ull, unsigned long long ull1, \
380 unsigned int value2, unsigned int value3), \
381 TP_ARGS(ull, ull1, value2, value3))
382
383/* Trace events for fs/ocfs2/alloc.c. */
384DECLARE_EVENT_CLASS(ocfs2__btree_ops,
385 TP_PROTO(unsigned long long owner,\
386 unsigned int value1, unsigned int value2),
387 TP_ARGS(owner, value1, value2),
388 TP_STRUCT__entry(
389 __field(unsigned long long, owner)
390 __field(unsigned int, value1)
391 __field(unsigned int, value2)
392 ),
393 TP_fast_assign(
394 __entry->owner = owner;
395 __entry->value1 = value1;
396 __entry->value2 = value2;
397 ),
398 TP_printk("%llu %u %u",
399 __entry->owner, __entry->value1, __entry->value2)
400);
401
402#define DEFINE_OCFS2_BTREE_EVENT(name) \
403DEFINE_EVENT(ocfs2__btree_ops, name, \
404 TP_PROTO(unsigned long long owner, \
405 unsigned int value1, unsigned int value2), \
406 TP_ARGS(owner, value1, value2))
407
408DEFINE_OCFS2_BTREE_EVENT(ocfs2_adjust_rightmost_branch);
409
410DEFINE_OCFS2_BTREE_EVENT(ocfs2_rotate_tree_right);
411
412DEFINE_OCFS2_BTREE_EVENT(ocfs2_append_rec_to_path);
413
414DEFINE_OCFS2_BTREE_EVENT(ocfs2_insert_extent_start);
415
416DEFINE_OCFS2_BTREE_EVENT(ocfs2_add_clusters_in_btree);
417
418DEFINE_OCFS2_INT_EVENT(ocfs2_num_free_extents);
419
420DEFINE_OCFS2_INT_EVENT(ocfs2_complete_edge_insert);
421
422TRACE_EVENT(ocfs2_grow_tree,
423 TP_PROTO(unsigned long long owner, int depth),
424 TP_ARGS(owner, depth),
425 TP_STRUCT__entry(
426 __field(unsigned long long, owner)
427 __field(int, depth)
428 ),
429 TP_fast_assign(
430 __entry->owner = owner;
431 __entry->depth = depth;
432 ),
433 TP_printk("%llu %d", __entry->owner, __entry->depth)
434);
435
436TRACE_EVENT(ocfs2_rotate_subtree,
437 TP_PROTO(int subtree_root, unsigned long long blkno,
438 int depth),
439 TP_ARGS(subtree_root, blkno, depth),
440 TP_STRUCT__entry(
441 __field(int, subtree_root)
442 __field(unsigned long long, blkno)
443 __field(int, depth)
444 ),
445 TP_fast_assign(
446 __entry->subtree_root = subtree_root;
447 __entry->blkno = blkno;
448 __entry->depth = depth;
449 ),
450 TP_printk("%d %llu %d", __entry->subtree_root,
451 __entry->blkno, __entry->depth)
452);
453
454TRACE_EVENT(ocfs2_insert_extent,
455 TP_PROTO(unsigned int ins_appending, unsigned int ins_contig,
456 int ins_contig_index, int free_records, int ins_tree_depth),
457 TP_ARGS(ins_appending, ins_contig, ins_contig_index, free_records,
458 ins_tree_depth),
459 TP_STRUCT__entry(
460 __field(unsigned int, ins_appending)
461 __field(unsigned int, ins_contig)
462 __field(int, ins_contig_index)
463 __field(int, free_records)
464 __field(int, ins_tree_depth)
465 ),
466 TP_fast_assign(
467 __entry->ins_appending = ins_appending;
468 __entry->ins_contig = ins_contig;
469 __entry->ins_contig_index = ins_contig_index;
470 __entry->free_records = free_records;
471 __entry->ins_tree_depth = ins_tree_depth;
472 ),
473 TP_printk("%u %u %d %d %d",
474 __entry->ins_appending, __entry->ins_contig,
475 __entry->ins_contig_index, __entry->free_records,
476 __entry->ins_tree_depth)
477);
478
479TRACE_EVENT(ocfs2_split_extent,
480 TP_PROTO(int split_index, unsigned int c_contig_type,
481 unsigned int c_has_empty_extent,
482 unsigned int c_split_covers_rec),
483 TP_ARGS(split_index, c_contig_type,
484 c_has_empty_extent, c_split_covers_rec),
485 TP_STRUCT__entry(
486 __field(int, split_index)
487 __field(unsigned int, c_contig_type)
488 __field(unsigned int, c_has_empty_extent)
489 __field(unsigned int, c_split_covers_rec)
490 ),
491 TP_fast_assign(
492 __entry->split_index = split_index;
493 __entry->c_contig_type = c_contig_type;
494 __entry->c_has_empty_extent = c_has_empty_extent;
495 __entry->c_split_covers_rec = c_split_covers_rec;
496 ),
497 TP_printk("%d %u %u %u", __entry->split_index, __entry->c_contig_type,
498 __entry->c_has_empty_extent, __entry->c_split_covers_rec)
499);
500
501TRACE_EVENT(ocfs2_remove_extent,
502 TP_PROTO(unsigned long long owner, unsigned int cpos,
503 unsigned int len, int index,
504 unsigned int e_cpos, unsigned int clusters),
505 TP_ARGS(owner, cpos, len, index, e_cpos, clusters),
506 TP_STRUCT__entry(
507 __field(unsigned long long, owner)
508 __field(unsigned int, cpos)
509 __field(unsigned int, len)
510 __field(int, index)
511 __field(unsigned int, e_cpos)
512 __field(unsigned int, clusters)
513 ),
514 TP_fast_assign(
515 __entry->owner = owner;
516 __entry->cpos = cpos;
517 __entry->len = len;
518 __entry->index = index;
519 __entry->e_cpos = e_cpos;
520 __entry->clusters = clusters;
521 ),
522 TP_printk("%llu %u %u %d %u %u",
523 __entry->owner, __entry->cpos, __entry->len, __entry->index,
524 __entry->e_cpos, __entry->clusters)
525);
526
527TRACE_EVENT(ocfs2_commit_truncate,
528 TP_PROTO(unsigned long long ino, unsigned int new_cpos,
529 unsigned int clusters, unsigned int depth),
530 TP_ARGS(ino, new_cpos, clusters, depth),
531 TP_STRUCT__entry(
532 __field(unsigned long long, ino)
533 __field(unsigned int, new_cpos)
534 __field(unsigned int, clusters)
535 __field(unsigned int, depth)
536 ),
537 TP_fast_assign(
538 __entry->ino = ino;
539 __entry->new_cpos = new_cpos;
540 __entry->clusters = clusters;
541 __entry->depth = depth;
542 ),
543 TP_printk("%llu %u %u %u",
544 __entry->ino, __entry->new_cpos,
545 __entry->clusters, __entry->depth)
546);
547
548TRACE_EVENT(ocfs2_validate_extent_block,
549 TP_PROTO(unsigned long long blkno),
550 TP_ARGS(blkno),
551 TP_STRUCT__entry(
552 __field(unsigned long long, blkno)
553 ),
554 TP_fast_assign(
555 __entry->blkno = blkno;
556 ),
557 TP_printk("%llu ", __entry->blkno)
558);
559
560TRACE_EVENT(ocfs2_rotate_leaf,
561 TP_PROTO(unsigned int insert_cpos, int insert_index,
562 int has_empty, int next_free,
563 unsigned int l_count),
564 TP_ARGS(insert_cpos, insert_index, has_empty,
565 next_free, l_count),
566 TP_STRUCT__entry(
567 __field(unsigned int, insert_cpos)
568 __field(int, insert_index)
569 __field(int, has_empty)
570 __field(int, next_free)
571 __field(unsigned int, l_count)
572 ),
573 TP_fast_assign(
574 __entry->insert_cpos = insert_cpos;
575 __entry->insert_index = insert_index;
576 __entry->has_empty = has_empty;
577 __entry->next_free = next_free;
578 __entry->l_count = l_count;
579 ),
580 TP_printk("%u %d %d %d %u", __entry->insert_cpos,
581 __entry->insert_index, __entry->has_empty,
582 __entry->next_free, __entry->l_count)
583);
584
585TRACE_EVENT(ocfs2_add_clusters_in_btree_ret,
586 TP_PROTO(int status, int reason, int err),
587 TP_ARGS(status, reason, err),
588 TP_STRUCT__entry(
589 __field(int, status)
590 __field(int, reason)
591 __field(int, err)
592 ),
593 TP_fast_assign(
594 __entry->status = status;
595 __entry->reason = reason;
596 __entry->err = err;
597 ),
598 TP_printk("%d %d %d", __entry->status,
599 __entry->reason, __entry->err)
600);
601
602TRACE_EVENT(ocfs2_mark_extent_written,
603 TP_PROTO(unsigned long long owner, unsigned int cpos,
604 unsigned int len, unsigned int phys),
605 TP_ARGS(owner, cpos, len, phys),
606 TP_STRUCT__entry(
607 __field(unsigned long long, owner)
608 __field(unsigned int, cpos)
609 __field(unsigned int, len)
610 __field(unsigned int, phys)
611 ),
612 TP_fast_assign(
613 __entry->owner = owner;
614 __entry->cpos = cpos;
615 __entry->len = len;
616 __entry->phys = phys;
617 ),
618 TP_printk("%llu %u %u %u",
619 __entry->owner, __entry->cpos,
620 __entry->len, __entry->phys)
621);
622
623DECLARE_EVENT_CLASS(ocfs2__truncate_log_ops,
624 TP_PROTO(unsigned long long blkno, int index,
625 unsigned int start, unsigned int num),
626 TP_ARGS(blkno, index, start, num),
627 TP_STRUCT__entry(
628 __field(unsigned long long, blkno)
629 __field(int, index)
630 __field(unsigned int, start)
631 __field(unsigned int, num)
632 ),
633 TP_fast_assign(
634 __entry->blkno = blkno;
635 __entry->index = index;
636 __entry->start = start;
637 __entry->num = num;
638 ),
639 TP_printk("%llu %d %u %u",
640 __entry->blkno, __entry->index,
641 __entry->start, __entry->num)
642);
643
644#define DEFINE_OCFS2_TRUNCATE_LOG_OPS_EVENT(name) \
645DEFINE_EVENT(ocfs2__truncate_log_ops, name, \
646 TP_PROTO(unsigned long long blkno, int index, \
647 unsigned int start, unsigned int num), \
648 TP_ARGS(blkno, index, start, num))
649
650DEFINE_OCFS2_TRUNCATE_LOG_OPS_EVENT(ocfs2_truncate_log_append);
651
652DEFINE_OCFS2_TRUNCATE_LOG_OPS_EVENT(ocfs2_replay_truncate_records);
653
654DEFINE_OCFS2_ULL_UINT_EVENT(ocfs2_flush_truncate_log);
655
656DEFINE_OCFS2_INT_EVENT(ocfs2_begin_truncate_log_recovery);
657
658DEFINE_OCFS2_INT_EVENT(ocfs2_truncate_log_recovery_num);
659
660DEFINE_OCFS2_ULL_UINT_EVENT(ocfs2_complete_truncate_log_recovery);
661
662DEFINE_OCFS2_ULL_UINT_EVENT(ocfs2_free_cached_blocks);
663
664DEFINE_OCFS2_ULL_UINT_EVENT(ocfs2_cache_cluster_dealloc);
665
666DEFINE_OCFS2_INT_INT_EVENT(ocfs2_run_deallocs);
667
668TRACE_EVENT(ocfs2_cache_block_dealloc,
669 TP_PROTO(int type, int slot, unsigned long long suballoc,
670 unsigned long long blkno, unsigned int bit),
671 TP_ARGS(type, slot, suballoc, blkno, bit),
672 TP_STRUCT__entry(
673 __field(int, type)
674 __field(int, slot)
675 __field(unsigned long long, suballoc)
676 __field(unsigned long long, blkno)
677 __field(unsigned int, bit)
678 ),
679 TP_fast_assign(
680 __entry->type = type;
681 __entry->slot = slot;
682 __entry->suballoc = suballoc;
683 __entry->blkno = blkno;
684 __entry->bit = bit;
685 ),
686 TP_printk("%d %d %llu %llu %u",
687 __entry->type, __entry->slot, __entry->suballoc,
688 __entry->blkno, __entry->bit)
689);
690
691/* End of trace events for fs/ocfs2/alloc.c. */
692
693/* Trace events for fs/ocfs2/localalloc.c. */
694
695DEFINE_OCFS2_UINT_UINT_UINT_EVENT(ocfs2_la_set_sizes);
696
697DEFINE_OCFS2_ULL_INT_INT_INT_EVENT(ocfs2_alloc_should_use_local);
698
699DEFINE_OCFS2_INT_EVENT(ocfs2_load_local_alloc);
700
701DEFINE_OCFS2_INT_EVENT(ocfs2_begin_local_alloc_recovery);
702
703DEFINE_OCFS2_ULL_INT_INT_INT_EVENT(ocfs2_reserve_local_alloc_bits);
704
705DEFINE_OCFS2_UINT_EVENT(ocfs2_local_alloc_count_bits);
706
707DEFINE_OCFS2_INT_INT_EVENT(ocfs2_local_alloc_find_clear_bits_search_bitmap);
708
709DEFINE_OCFS2_ULL_INT_INT_INT_EVENT(ocfs2_local_alloc_find_clear_bits);
710
711DEFINE_OCFS2_INT_INT_EVENT(ocfs2_sync_local_to_main);
712
713TRACE_EVENT(ocfs2_sync_local_to_main_free,
714 TP_PROTO(int count, int bit, unsigned long long start_blk,
715 unsigned long long blkno),
716 TP_ARGS(count, bit, start_blk, blkno),
717 TP_STRUCT__entry(
718 __field(int, count)
719 __field(int, bit)
720 __field(unsigned long long, start_blk)
721 __field(unsigned long long, blkno)
722 ),
723 TP_fast_assign(
724 __entry->count = count;
725 __entry->bit = bit;
726 __entry->start_blk = start_blk;
727 __entry->blkno = blkno;
728 ),
729 TP_printk("%d %d %llu %llu",
730 __entry->count, __entry->bit, __entry->start_blk,
731 __entry->blkno)
732);
733
734DEFINE_OCFS2_INT_INT_EVENT(ocfs2_local_alloc_new_window);
735
736DEFINE_OCFS2_ULL_UINT_EVENT(ocfs2_local_alloc_new_window_result);
737
738/* End of trace events for fs/ocfs2/localalloc.c. */
739
740/* Trace events for fs/ocfs2/resize.c. */
741
742DEFINE_OCFS2_UINT_UINT_EVENT(ocfs2_update_last_group_and_inode);
743
744DEFINE_OCFS2_ULL_UINT_EVENT(ocfs2_group_extend);
745
746DEFINE_OCFS2_ULL_UINT_UINT_UINT_EVENT(ocfs2_group_add);
747
748/* End of trace events for fs/ocfs2/resize.c. */
749
750/* Trace events for fs/ocfs2/suballoc.c. */
751
752DEFINE_OCFS2_ULL_EVENT(ocfs2_validate_group_descriptor);
753
754DEFINE_OCFS2_ULL_UINT_EVENT(ocfs2_block_group_alloc_contig);
755
756DEFINE_OCFS2_ULL_UINT_EVENT(ocfs2_block_group_alloc_discontig);
757
758DEFINE_OCFS2_ULL_EVENT(ocfs2_block_group_alloc);
759
760DEFINE_OCFS2_UINT_UINT_EVENT(ocfs2_reserve_suballoc_bits_nospc);
761
762DEFINE_OCFS2_UINT_UINT_UINT_EVENT(ocfs2_reserve_suballoc_bits_no_new_group);
763
764DEFINE_OCFS2_ULL_EVENT(ocfs2_reserve_new_inode_new_group);
765
766DEFINE_OCFS2_UINT_UINT_EVENT(ocfs2_block_group_set_bits);
767
768TRACE_EVENT(ocfs2_relink_block_group,
769 TP_PROTO(unsigned long long i_blkno, unsigned int chain,
770 unsigned long long bg_blkno,
771 unsigned long long prev_blkno),
772 TP_ARGS(i_blkno, chain, bg_blkno, prev_blkno),
773 TP_STRUCT__entry(
774 __field(unsigned long long, i_blkno)
775 __field(unsigned int, chain)
776 __field(unsigned long long, bg_blkno)
777 __field(unsigned long long, prev_blkno)
778 ),
779 TP_fast_assign(
780 __entry->i_blkno = i_blkno;
781 __entry->chain = chain;
782 __entry->bg_blkno = bg_blkno;
783 __entry->prev_blkno = prev_blkno;
784 ),
785 TP_printk("%llu %u %llu %llu",
786 __entry->i_blkno, __entry->chain, __entry->bg_blkno,
787 __entry->prev_blkno)
788);
789
790DEFINE_OCFS2_ULL_UINT_UINT_UINT_EVENT(ocfs2_cluster_group_search_wrong_max_bits);
791
792DEFINE_OCFS2_ULL_ULL_EVENT(ocfs2_cluster_group_search_max_block);
793
794DEFINE_OCFS2_ULL_ULL_EVENT(ocfs2_block_group_search_max_block);
795
796DEFINE_OCFS2_ULL_UINT_UINT_EVENT(ocfs2_search_chain_begin);
797
798DEFINE_OCFS2_ULL_UINT_EVENT(ocfs2_search_chain_succ);
799
800DEFINE_OCFS2_ULL_UINT_EVENT(ocfs2_search_chain_end);
801
802DEFINE_OCFS2_UINT_EVENT(ocfs2_claim_suballoc_bits);
803
804DEFINE_OCFS2_ULL_UINT_EVENT(ocfs2_claim_new_inode_at_loc);
805
806DEFINE_OCFS2_UINT_UINT_EVENT(ocfs2_block_group_clear_bits);
807
808TRACE_EVENT(ocfs2_free_suballoc_bits,
809 TP_PROTO(unsigned long long inode, unsigned long long group,
810 unsigned int start_bit, unsigned int count),
811 TP_ARGS(inode, group, start_bit, count),
812 TP_STRUCT__entry(
813 __field(unsigned long long, inode)
814 __field(unsigned long long, group)
815 __field(unsigned int, start_bit)
816 __field(unsigned int, count)
817 ),
818 TP_fast_assign(
819 __entry->inode = inode;
820 __entry->group = group;
821 __entry->start_bit = start_bit;
822 __entry->count = count;
823 ),
824 TP_printk("%llu %llu %u %u", __entry->inode, __entry->group,
825 __entry->start_bit, __entry->count)
826);
827
828TRACE_EVENT(ocfs2_free_clusters,
829 TP_PROTO(unsigned long long bg_blkno, unsigned long long start_blk,
830 unsigned int start_bit, unsigned int count),
831 TP_ARGS(bg_blkno, start_blk, start_bit, count),
832 TP_STRUCT__entry(
833 __field(unsigned long long, bg_blkno)
834 __field(unsigned long long, start_blk)
835 __field(unsigned int, start_bit)
836 __field(unsigned int, count)
837 ),
838 TP_fast_assign(
839 __entry->bg_blkno = bg_blkno;
840 __entry->start_blk = start_blk;
841 __entry->start_bit = start_bit;
842 __entry->count = count;
843 ),
844 TP_printk("%llu %llu %u %u", __entry->bg_blkno, __entry->start_blk,
845 __entry->start_bit, __entry->count)
846);
847
848DEFINE_OCFS2_ULL_EVENT(ocfs2_get_suballoc_slot_bit);
849
850DEFINE_OCFS2_ULL_UINT_EVENT(ocfs2_test_suballoc_bit);
851
852DEFINE_OCFS2_ULL_EVENT(ocfs2_test_inode_bit);
853
854/* End of trace events for fs/ocfs2/suballoc.c. */
855
856/* Trace events for fs/ocfs2/refcounttree.c. */
857
858DEFINE_OCFS2_ULL_EVENT(ocfs2_validate_refcount_block);
859
860DEFINE_OCFS2_ULL_EVENT(ocfs2_purge_refcount_trees);
861
862DEFINE_OCFS2_ULL_EVENT(ocfs2_create_refcount_tree);
863
864DEFINE_OCFS2_ULL_EVENT(ocfs2_create_refcount_tree_blkno);
865
866DEFINE_OCFS2_ULL_INT_INT_INT_EVENT(ocfs2_change_refcount_rec);
867
868DEFINE_OCFS2_ULL_UINT_EVENT(ocfs2_expand_inline_ref_root);
869
870DEFINE_OCFS2_ULL_UINT_UINT_EVENT(ocfs2_divide_leaf_refcount_block);
871
872DEFINE_OCFS2_ULL_UINT_EVENT(ocfs2_new_leaf_refcount_block);
873
874DECLARE_EVENT_CLASS(ocfs2__refcount_tree_ops,
875 TP_PROTO(unsigned long long blkno, int index,
876 unsigned long long cpos,
877 unsigned int clusters, unsigned int refcount),
878 TP_ARGS(blkno, index, cpos, clusters, refcount),
879 TP_STRUCT__entry(
880 __field(unsigned long long, blkno)
881 __field(int, index)
882 __field(unsigned long long, cpos)
883 __field(unsigned int, clusters)
884 __field(unsigned int, refcount)
885 ),
886 TP_fast_assign(
887 __entry->blkno = blkno;
888 __entry->index = index;
889 __entry->cpos = cpos;
890 __entry->clusters = clusters;
891 __entry->refcount = refcount;
892 ),
893 TP_printk("%llu %d %llu %u %u", __entry->blkno, __entry->index,
894 __entry->cpos, __entry->clusters, __entry->refcount)
895);
896
897#define DEFINE_OCFS2_REFCOUNT_TREE_OPS_EVENT(name) \
898DEFINE_EVENT(ocfs2__refcount_tree_ops, name, \
899 TP_PROTO(unsigned long long blkno, int index, \
900 unsigned long long cpos, \
901 unsigned int count, unsigned int refcount), \
902 TP_ARGS(blkno, index, cpos, count, refcount))
903
904DEFINE_OCFS2_REFCOUNT_TREE_OPS_EVENT(ocfs2_insert_refcount_rec);
905
906TRACE_EVENT(ocfs2_split_refcount_rec,
907 TP_PROTO(unsigned long long cpos,
908 unsigned int clusters, unsigned int refcount,
909 unsigned long long split_cpos,
910 unsigned int split_clusters, unsigned int split_refcount),
911 TP_ARGS(cpos, clusters, refcount,
912 split_cpos, split_clusters, split_refcount),
913 TP_STRUCT__entry(
914 __field(unsigned long long, cpos)
915 __field(unsigned int, clusters)
916 __field(unsigned int, refcount)
917 __field(unsigned long long, split_cpos)
918 __field(unsigned int, split_clusters)
919 __field(unsigned int, split_refcount)
920 ),
921 TP_fast_assign(
922 __entry->cpos = cpos;
923 __entry->clusters = clusters;
924 __entry->refcount = refcount;
925 __entry->split_cpos = split_cpos;
926 __entry->split_clusters = split_clusters;
927 __entry->split_refcount = split_refcount;
928 ),
929 TP_printk("%llu %u %u %llu %u %u",
930 __entry->cpos, __entry->clusters, __entry->refcount,
931 __entry->split_cpos, __entry->split_clusters,
932 __entry->split_refcount)
933);
934
935DEFINE_OCFS2_REFCOUNT_TREE_OPS_EVENT(ocfs2_split_refcount_rec_insert);
936
937DEFINE_OCFS2_ULL_ULL_UINT_EVENT(ocfs2_increase_refcount_begin);
938
939DEFINE_OCFS2_ULL_UINT_UINT_EVENT(ocfs2_increase_refcount_change);
940
941DEFINE_OCFS2_ULL_UINT_EVENT(ocfs2_increase_refcount_insert);
942
943DEFINE_OCFS2_ULL_UINT_UINT_EVENT(ocfs2_increase_refcount_split);
944
945DEFINE_OCFS2_ULL_ULL_UINT_EVENT(ocfs2_remove_refcount_extent);
946
947DEFINE_OCFS2_ULL_EVENT(ocfs2_restore_refcount_block);
948
949DEFINE_OCFS2_ULL_ULL_UINT_EVENT(ocfs2_decrease_refcount_rec);
950
951TRACE_EVENT(ocfs2_decrease_refcount,
952 TP_PROTO(unsigned long long owner,
953 unsigned long long cpos,
954 unsigned int len, int delete),
955 TP_ARGS(owner, cpos, len, delete),
956 TP_STRUCT__entry(
957 __field(unsigned long long, owner)
958 __field(unsigned long long, cpos)
959 __field(unsigned int, len)
960 __field(int, delete)
961 ),
962 TP_fast_assign(
963 __entry->owner = owner;
964 __entry->cpos = cpos;
965 __entry->len = len;
966 __entry->delete = delete;
967 ),
968 TP_printk("%llu %llu %u %d",
969 __entry->owner, __entry->cpos, __entry->len, __entry->delete)
970);
971
972DEFINE_OCFS2_ULL_UINT_UINT_UINT_EVENT(ocfs2_mark_extent_refcounted);
973
974DEFINE_OCFS2_ULL_UINT_UINT_UINT_EVENT(ocfs2_calc_refcount_meta_credits);
975
976TRACE_EVENT(ocfs2_calc_refcount_meta_credits_iterate,
977 TP_PROTO(int recs_add, unsigned long long cpos,
978 unsigned int clusters, unsigned long long r_cpos,
979 unsigned int r_clusters, unsigned int refcount, int index),
980 TP_ARGS(recs_add, cpos, clusters, r_cpos, r_clusters, refcount, index),
981 TP_STRUCT__entry(
982 __field(int, recs_add)
983 __field(unsigned long long, cpos)
984 __field(unsigned int, clusters)
985 __field(unsigned long long, r_cpos)
986 __field(unsigned int, r_clusters)
987 __field(unsigned int, refcount)
988 __field(int, index)
989 ),
990 TP_fast_assign(
991 __entry->recs_add = recs_add;
992 __entry->cpos = cpos;
993 __entry->clusters = clusters;
994 __entry->r_cpos = r_cpos;
995 __entry->r_clusters = r_clusters;
996 __entry->refcount = refcount;
997 __entry->index = index;
998 ),
999 TP_printk("%d %llu %u %llu %u %u %d",
1000 __entry->recs_add, __entry->cpos, __entry->clusters,
1001 __entry->r_cpos, __entry->r_clusters,
1002 __entry->refcount, __entry->index)
1003);
1004
1005DEFINE_OCFS2_INT_INT_EVENT(ocfs2_add_refcount_flag);
1006
1007DEFINE_OCFS2_INT_INT_EVENT(ocfs2_prepare_refcount_change_for_del);
1008
1009DEFINE_OCFS2_INT_INT_EVENT(ocfs2_lock_refcount_allocators);
1010
1011DEFINE_OCFS2_ULL_UINT_UINT_UINT_EVENT(ocfs2_duplicate_clusters_by_page);
1012
1013DEFINE_OCFS2_ULL_UINT_UINT_UINT_EVENT(ocfs2_duplicate_clusters_by_jbd);
1014
1015TRACE_EVENT(ocfs2_clear_ext_refcount,
1016 TP_PROTO(unsigned long long ino, unsigned int cpos,
1017 unsigned int len, unsigned int p_cluster,
1018 unsigned int ext_flags),
1019 TP_ARGS(ino, cpos, len, p_cluster, ext_flags),
1020 TP_STRUCT__entry(
1021 __field(unsigned long long, ino)
1022 __field(unsigned int, cpos)
1023 __field(unsigned int, len)
1024 __field(unsigned int, p_cluster)
1025 __field(unsigned int, ext_flags)
1026 ),
1027 TP_fast_assign(
1028 __entry->ino = ino;
1029 __entry->cpos = cpos;
1030 __entry->len = len;
1031 __entry->p_cluster = p_cluster;
1032 __entry->ext_flags = ext_flags;
1033 ),
1034 TP_printk("%llu %u %u %u %u",
1035 __entry->ino, __entry->cpos, __entry->len,
1036 __entry->p_cluster, __entry->ext_flags)
1037);
1038
1039TRACE_EVENT(ocfs2_replace_clusters,
1040 TP_PROTO(unsigned long long ino, unsigned int cpos,
1041 unsigned int old, unsigned int new, unsigned int len,
1042 unsigned int ext_flags),
1043 TP_ARGS(ino, cpos, old, new, len, ext_flags),
1044 TP_STRUCT__entry(
1045 __field(unsigned long long, ino)
1046 __field(unsigned int, cpos)
1047 __field(unsigned int, old)
1048 __field(unsigned int, new)
1049 __field(unsigned int, len)
1050 __field(unsigned int, ext_flags)
1051 ),
1052 TP_fast_assign(
1053 __entry->ino = ino;
1054 __entry->cpos = cpos;
1055 __entry->old = old;
1056 __entry->new = new;
1057 __entry->len = len;
1058 __entry->ext_flags = ext_flags;
1059 ),
1060 TP_printk("%llu %u %u %u %u %u",
1061 __entry->ino, __entry->cpos, __entry->old, __entry->new,
1062 __entry->len, __entry->ext_flags)
1063);
1064
1065DEFINE_OCFS2_ULL_UINT_UINT_UINT_EVENT(ocfs2_make_clusters_writable);
1066
1067TRACE_EVENT(ocfs2_refcount_cow_hunk,
1068 TP_PROTO(unsigned long long ino, unsigned int cpos,
1069 unsigned int write_len, unsigned int max_cpos,
1070 unsigned int cow_start, unsigned int cow_len),
1071 TP_ARGS(ino, cpos, write_len, max_cpos, cow_start, cow_len),
1072 TP_STRUCT__entry(
1073 __field(unsigned long long, ino)
1074 __field(unsigned int, cpos)
1075 __field(unsigned int, write_len)
1076 __field(unsigned int, max_cpos)
1077 __field(unsigned int, cow_start)
1078 __field(unsigned int, cow_len)
1079 ),
1080 TP_fast_assign(
1081 __entry->ino = ino;
1082 __entry->cpos = cpos;
1083 __entry->write_len = write_len;
1084 __entry->max_cpos = max_cpos;
1085 __entry->cow_start = cow_start;
1086 __entry->cow_len = cow_len;
1087 ),
1088 TP_printk("%llu %u %u %u %u %u",
1089 __entry->ino, __entry->cpos, __entry->write_len,
1090 __entry->max_cpos, __entry->cow_start, __entry->cow_len)
1091);
1092
1093/* End of trace events for fs/ocfs2/refcounttree.c. */
1094
1095/* Trace events for fs/ocfs2/aops.c. */
1096
1097DECLARE_EVENT_CLASS(ocfs2__get_block,
1098 TP_PROTO(unsigned long long ino, unsigned long long iblock,
1099 void *bh_result, int create),
1100 TP_ARGS(ino, iblock, bh_result, create),
1101 TP_STRUCT__entry(
1102 __field(unsigned long long, ino)
1103 __field(unsigned long long, iblock)
1104 __field(void *, bh_result)
1105 __field(int, create)
1106 ),
1107 TP_fast_assign(
1108 __entry->ino = ino;
1109 __entry->iblock = iblock;
1110 __entry->bh_result = bh_result;
1111 __entry->create = create;
1112 ),
1113 TP_printk("%llu %llu %p %d",
1114 __entry->ino, __entry->iblock,
1115 __entry->bh_result, __entry->create)
1116);
1117
1118#define DEFINE_OCFS2_GET_BLOCK_EVENT(name) \
1119DEFINE_EVENT(ocfs2__get_block, name, \
1120 TP_PROTO(unsigned long long ino, unsigned long long iblock, \
1121 void *bh_result, int create), \
1122 TP_ARGS(ino, iblock, bh_result, create))
1123
1124DEFINE_OCFS2_GET_BLOCK_EVENT(ocfs2_symlink_get_block);
1125
1126DEFINE_OCFS2_GET_BLOCK_EVENT(ocfs2_get_block);
1127
1128DEFINE_OCFS2_ULL_ULL_EVENT(ocfs2_get_block_end);
1129
1130DEFINE_OCFS2_ULL_ULL_EVENT(ocfs2_readpage);
1131
1132DEFINE_OCFS2_ULL_ULL_EVENT(ocfs2_writepage);
1133
1134DEFINE_OCFS2_ULL_ULL_EVENT(ocfs2_bmap);
1135
1136TRACE_EVENT(ocfs2_try_to_write_inline_data,
1137 TP_PROTO(unsigned long long ino, unsigned int len,
1138 unsigned long long pos, unsigned int flags),
1139 TP_ARGS(ino, len, pos, flags),
1140 TP_STRUCT__entry(
1141 __field(unsigned long long, ino)
1142 __field(unsigned int, len)
1143 __field(unsigned long long, pos)
1144 __field(unsigned int, flags)
1145 ),
1146 TP_fast_assign(
1147 __entry->ino = ino;
1148 __entry->len = len;
1149 __entry->pos = pos;
1150 __entry->flags = flags;
1151 ),
1152 TP_printk("%llu %u %llu 0x%x",
1153 __entry->ino, __entry->len, __entry->pos, __entry->flags)
1154);
1155
1156TRACE_EVENT(ocfs2_write_begin_nolock,
1157 TP_PROTO(unsigned long long ino,
1158 long long i_size, unsigned int i_clusters,
1159 unsigned long long pos, unsigned int len,
1160 unsigned int flags, void *page,
1161 unsigned int clusters, unsigned int extents_to_split),
1162 TP_ARGS(ino, i_size, i_clusters, pos, len, flags,
1163 page, clusters, extents_to_split),
1164 TP_STRUCT__entry(
1165 __field(unsigned long long, ino)
1166 __field(long long, i_size)
1167 __field(unsigned int, i_clusters)
1168 __field(unsigned long long, pos)
1169 __field(unsigned int, len)
1170 __field(unsigned int, flags)
1171 __field(void *, page)
1172 __field(unsigned int, clusters)
1173 __field(unsigned int, extents_to_split)
1174 ),
1175 TP_fast_assign(
1176 __entry->ino = ino;
1177 __entry->i_size = i_size;
1178 __entry->i_clusters = i_clusters;
1179 __entry->pos = pos;
1180 __entry->len = len;
1181 __entry->flags = flags;
1182 __entry->page = page;
1183 __entry->clusters = clusters;
1184 __entry->extents_to_split = extents_to_split;
1185 ),
1186 TP_printk("%llu %lld %u %llu %u %u %p %u %u",
1187 __entry->ino, __entry->i_size, __entry->i_clusters,
1188 __entry->pos, __entry->len,
1189 __entry->flags, __entry->page, __entry->clusters,
1190 __entry->extents_to_split)
1191);
1192
1193TRACE_EVENT(ocfs2_write_end_inline,
1194 TP_PROTO(unsigned long long ino,
1195 unsigned long long pos, unsigned int copied,
1196 unsigned int id_count, unsigned int features),
1197 TP_ARGS(ino, pos, copied, id_count, features),
1198 TP_STRUCT__entry(
1199 __field(unsigned long long, ino)
1200 __field(unsigned long long, pos)
1201 __field(unsigned int, copied)
1202 __field(unsigned int, id_count)
1203 __field(unsigned int, features)
1204 ),
1205 TP_fast_assign(
1206 __entry->ino = ino;
1207 __entry->pos = pos;
1208 __entry->copied = copied;
1209 __entry->id_count = id_count;
1210 __entry->features = features;
1211 ),
1212 TP_printk("%llu %llu %u %u %u",
1213 __entry->ino, __entry->pos, __entry->copied,
1214 __entry->id_count, __entry->features)
1215);
1216
1217/* End of trace events for fs/ocfs2/aops.c. */
1218
1219/* Trace events for fs/ocfs2/mmap.c. */
1220
1221TRACE_EVENT(ocfs2_fault,
1222 TP_PROTO(unsigned long long ino,
1223 void *area, void *page, unsigned long pgoff),
1224 TP_ARGS(ino, area, page, pgoff),
1225 TP_STRUCT__entry(
1226 __field(unsigned long long, ino)
1227 __field(void *, area)
1228 __field(void *, page)
1229 __field(unsigned long, pgoff)
1230 ),
1231 TP_fast_assign(
1232 __entry->ino = ino;
1233 __entry->area = area;
1234 __entry->page = page;
1235 __entry->pgoff = pgoff;
1236 ),
1237 TP_printk("%llu %p %p %lu",
1238 __entry->ino, __entry->area, __entry->page, __entry->pgoff)
1239);
1240
1241/* End of trace events for fs/ocfs2/mmap.c. */
1242
1243/* Trace events for fs/ocfs2/file.c. */
1244
1245DECLARE_EVENT_CLASS(ocfs2__file_ops,
1246 TP_PROTO(void *inode, void *file, void *dentry,
1247 unsigned long long ino,
1248 unsigned int d_len, const unsigned char *d_name,
1249 unsigned long long para),
1250 TP_ARGS(inode, file, dentry, ino, d_len, d_name, para),
1251 TP_STRUCT__entry(
1252 __field(void *, inode)
1253 __field(void *, file)
1254 __field(void *, dentry)
1255 __field(unsigned long long, ino)
1256 __field(unsigned int, d_len)
1257 __string(d_name, d_name)
1258 __field(unsigned long long, para)
1259 ),
1260 TP_fast_assign(
1261 __entry->inode = inode;
1262 __entry->file = file;
1263 __entry->dentry = dentry;
1264 __entry->ino = ino;
1265 __entry->d_len = d_len;
1266 __assign_str(d_name, d_name);
1267 __entry->para = para;
1268 ),
1269 TP_printk("%p %p %p %llu %llu %.*s", __entry->inode, __entry->file,
1270 __entry->dentry, __entry->ino, __entry->para,
1271 __entry->d_len, __get_str(d_name))
1272);
1273
1274#define DEFINE_OCFS2_FILE_OPS(name) \
1275DEFINE_EVENT(ocfs2__file_ops, name, \
1276TP_PROTO(void *inode, void *file, void *dentry, \
1277 unsigned long long ino, \
1278 unsigned int d_len, const unsigned char *d_name, \
1279 unsigned long long mode), \
1280 TP_ARGS(inode, file, dentry, ino, d_len, d_name, mode))
1281
1282DEFINE_OCFS2_FILE_OPS(ocfs2_file_open);
1283
1284DEFINE_OCFS2_FILE_OPS(ocfs2_file_release);
1285
1286DEFINE_OCFS2_FILE_OPS(ocfs2_sync_file);
1287
1288DEFINE_OCFS2_FILE_OPS(ocfs2_file_aio_write);
1289
1290DEFINE_OCFS2_FILE_OPS(ocfs2_file_splice_write);
1291
1292DEFINE_OCFS2_FILE_OPS(ocfs2_file_splice_read);
1293
1294DEFINE_OCFS2_FILE_OPS(ocfs2_file_aio_read);
1295
1296DEFINE_OCFS2_ULL_ULL_ULL_EVENT(ocfs2_truncate_file);
1297
1298DEFINE_OCFS2_ULL_ULL_EVENT(ocfs2_truncate_file_error);
1299
1300TRACE_EVENT(ocfs2_extend_allocation,
1301 TP_PROTO(unsigned long long ip_blkno, unsigned long long size,
1302 unsigned int clusters, unsigned int clusters_to_add,
1303 int why, int restart_func),
1304 TP_ARGS(ip_blkno, size, clusters, clusters_to_add, why, restart_func),
1305 TP_STRUCT__entry(
1306 __field(unsigned long long, ip_blkno)
1307 __field(unsigned long long, size)
1308 __field(unsigned int, clusters)
1309 __field(unsigned int, clusters_to_add)
1310 __field(int, why)
1311 __field(int, restart_func)
1312 ),
1313 TP_fast_assign(
1314 __entry->ip_blkno = ip_blkno;
1315 __entry->size = size;
1316 __entry->clusters = clusters;
1317 __entry->clusters_to_add = clusters_to_add;
1318 __entry->why = why;
1319 __entry->restart_func = restart_func;
1320 ),
1321 TP_printk("%llu %llu %u %u %d %d",
1322 __entry->ip_blkno, __entry->size, __entry->clusters,
1323 __entry->clusters_to_add, __entry->why, __entry->restart_func)
1324);
1325
1326TRACE_EVENT(ocfs2_extend_allocation_end,
1327 TP_PROTO(unsigned long long ino,
1328 unsigned int di_clusters, unsigned long long di_size,
1329 unsigned int ip_clusters, unsigned long long i_size),
1330 TP_ARGS(ino, di_clusters, di_size, ip_clusters, i_size),
1331 TP_STRUCT__entry(
1332 __field(unsigned long long, ino)
1333 __field(unsigned int, di_clusters)
1334 __field(unsigned long long, di_size)
1335 __field(unsigned int, ip_clusters)
1336 __field(unsigned long long, i_size)
1337 ),
1338 TP_fast_assign(
1339 __entry->ino = ino;
1340 __entry->di_clusters = di_clusters;
1341 __entry->di_size = di_size;
1342 __entry->ip_clusters = ip_clusters;
1343 __entry->i_size = i_size;
1344 ),
1345 TP_printk("%llu %u %llu %u %llu", __entry->ino, __entry->di_clusters,
1346 __entry->di_size, __entry->ip_clusters, __entry->i_size)
1347);
1348
1349TRACE_EVENT(ocfs2_write_zero_page,
1350 TP_PROTO(unsigned long long ino,
1351 unsigned long long abs_from, unsigned long long abs_to,
1352 unsigned long index, unsigned int zero_from,
1353 unsigned int zero_to),
1354 TP_ARGS(ino, abs_from, abs_to, index, zero_from, zero_to),
1355 TP_STRUCT__entry(
1356 __field(unsigned long long, ino)
1357 __field(unsigned long long, abs_from)
1358 __field(unsigned long long, abs_to)
1359 __field(unsigned long, index)
1360 __field(unsigned int, zero_from)
1361 __field(unsigned int, zero_to)
1362 ),
1363 TP_fast_assign(
1364 __entry->ino = ino;
1365 __entry->abs_from = abs_from;
1366 __entry->abs_to = abs_to;
1367 __entry->index = index;
1368 __entry->zero_from = zero_from;
1369 __entry->zero_to = zero_to;
1370 ),
1371 TP_printk("%llu %llu %llu %lu %u %u", __entry->ino,
1372 __entry->abs_from, __entry->abs_to,
1373 __entry->index, __entry->zero_from, __entry->zero_to)
1374);
1375
1376DEFINE_OCFS2_ULL_ULL_ULL_EVENT(ocfs2_zero_extend_range);
1377
1378DEFINE_OCFS2_ULL_ULL_ULL_EVENT(ocfs2_zero_extend);
1379
1380TRACE_EVENT(ocfs2_setattr,
1381 TP_PROTO(void *inode, void *dentry,
1382 unsigned long long ino,
1383 unsigned int d_len, const unsigned char *d_name,
1384 unsigned int ia_valid, unsigned int ia_mode,
1385 unsigned int ia_uid, unsigned int ia_gid),
1386 TP_ARGS(inode, dentry, ino, d_len, d_name,
1387 ia_valid, ia_mode, ia_uid, ia_gid),
1388 TP_STRUCT__entry(
1389 __field(void *, inode)
1390 __field(void *, dentry)
1391 __field(unsigned long long, ino)
1392 __field(unsigned int, d_len)
1393 __string(d_name, d_name)
1394 __field(unsigned int, ia_valid)
1395 __field(unsigned int, ia_mode)
1396 __field(unsigned int, ia_uid)
1397 __field(unsigned int, ia_gid)
1398 ),
1399 TP_fast_assign(
1400 __entry->inode = inode;
1401 __entry->dentry = dentry;
1402 __entry->ino = ino;
1403 __entry->d_len = d_len;
1404 __assign_str(d_name, d_name);
1405 __entry->ia_valid = ia_valid;
1406 __entry->ia_mode = ia_mode;
1407 __entry->ia_uid = ia_uid;
1408 __entry->ia_gid = ia_gid;
1409 ),
1410 TP_printk("%p %p %llu %.*s %u %u %u %u", __entry->inode,
1411 __entry->dentry, __entry->ino, __entry->d_len,
1412 __get_str(d_name), __entry->ia_valid, __entry->ia_mode,
1413 __entry->ia_uid, __entry->ia_gid)
1414);
1415
1416DEFINE_OCFS2_ULL_UINT_EVENT(ocfs2_write_remove_suid);
1417
1418DEFINE_OCFS2_ULL_ULL_ULL_EVENT(ocfs2_zero_partial_clusters);
1419
1420DEFINE_OCFS2_ULL_ULL_EVENT(ocfs2_zero_partial_clusters_range1);
1421
1422DEFINE_OCFS2_ULL_ULL_EVENT(ocfs2_zero_partial_clusters_range2);
1423
1424DEFINE_OCFS2_ULL_ULL_ULL_EVENT(ocfs2_remove_inode_range);
1425
1426TRACE_EVENT(ocfs2_prepare_inode_for_write,
1427 TP_PROTO(unsigned long long ino, unsigned long long saved_pos,
1428 int appending, unsigned long count,
1429 int *direct_io, int *has_refcount),
1430 TP_ARGS(ino, saved_pos, appending, count, direct_io, has_refcount),
1431 TP_STRUCT__entry(
1432 __field(unsigned long long, ino)
1433 __field(unsigned long long, saved_pos)
1434 __field(int, appending)
1435 __field(unsigned long, count)
1436 __field(int, direct_io)
1437 __field(int, has_refcount)
1438 ),
1439 TP_fast_assign(
1440 __entry->ino = ino;
1441 __entry->saved_pos = saved_pos;
1442 __entry->appending = appending;
1443 __entry->count = count;
1444 __entry->direct_io = direct_io ? *direct_io : -1;
1445 __entry->has_refcount = has_refcount ? *has_refcount : -1;
1446 ),
1447 TP_printk("%llu %llu %d %lu %d %d", __entry->ino,
1448 __entry->saved_pos, __entry->appending, __entry->count,
1449 __entry->direct_io, __entry->has_refcount)
1450);
1451
1452DEFINE_OCFS2_INT_EVENT(generic_file_aio_read_ret);
1453
1454/* End of trace events for fs/ocfs2/file.c. */
1455
1456/* Trace events for fs/ocfs2/inode.c. */
1457
1458TRACE_EVENT(ocfs2_iget_begin,
1459 TP_PROTO(unsigned long long ino, unsigned int flags, int sysfile_type),
1460 TP_ARGS(ino, flags, sysfile_type),
1461 TP_STRUCT__entry(
1462 __field(unsigned long long, ino)
1463 __field(unsigned int, flags)
1464 __field(int, sysfile_type)
1465 ),
1466 TP_fast_assign(
1467 __entry->ino = ino;
1468 __entry->flags = flags;
1469 __entry->sysfile_type = sysfile_type;
1470 ),
1471 TP_printk("%llu %u %d", __entry->ino,
1472 __entry->flags, __entry->sysfile_type)
1473);
1474
1475DEFINE_OCFS2_ULL_EVENT(ocfs2_iget5_locked);
1476
1477TRACE_EVENT(ocfs2_iget_end,
1478 TP_PROTO(void *inode, unsigned long long ino),
1479 TP_ARGS(inode, ino),
1480 TP_STRUCT__entry(
1481 __field(void *, inode)
1482 __field(unsigned long long, ino)
1483 ),
1484 TP_fast_assign(
1485 __entry->inode = inode;
1486 __entry->ino = ino;
1487 ),
1488 TP_printk("%p %llu", __entry->inode, __entry->ino)
1489);
1490
1491TRACE_EVENT(ocfs2_find_actor,
1492 TP_PROTO(void *inode, unsigned long long ino,
1493 void *args, unsigned long long fi_blkno),
1494 TP_ARGS(inode, ino, args, fi_blkno),
1495 TP_STRUCT__entry(
1496 __field(void *, inode)
1497 __field(unsigned long long, ino)
1498 __field(void *, args)
1499 __field(unsigned long long, fi_blkno)
1500 ),
1501 TP_fast_assign(
1502 __entry->inode = inode;
1503 __entry->ino = ino;
1504 __entry->args = args;
1505 __entry->fi_blkno = fi_blkno;
1506 ),
1507 TP_printk("%p %llu %p %llu", __entry->inode, __entry->ino,
1508 __entry->args, __entry->fi_blkno)
1509);
1510
1511DEFINE_OCFS2_ULL_UINT_EVENT(ocfs2_populate_inode);
1512
1513DEFINE_OCFS2_ULL_INT_EVENT(ocfs2_read_locked_inode);
1514
1515DEFINE_OCFS2_INT_INT_EVENT(ocfs2_check_orphan_recovery_state);
1516
1517DEFINE_OCFS2_ULL_EVENT(ocfs2_validate_inode_block);
1518
1519TRACE_EVENT(ocfs2_inode_is_valid_to_delete,
1520 TP_PROTO(void *task, void *dc_task, unsigned long long ino,
1521 unsigned int flags),
1522 TP_ARGS(task, dc_task, ino, flags),
1523 TP_STRUCT__entry(
1524 __field(void *, task)
1525 __field(void *, dc_task)
1526 __field(unsigned long long, ino)
1527 __field(unsigned int, flags)
1528 ),
1529 TP_fast_assign(
1530 __entry->task = task;
1531 __entry->dc_task = dc_task;
1532 __entry->ino = ino;
1533 __entry->flags = flags;
1534 ),
1535 TP_printk("%p %p %llu %u", __entry->task, __entry->dc_task,
1536 __entry->ino, __entry->flags)
1537);
1538
1539DEFINE_OCFS2_ULL_UINT_EVENT(ocfs2_query_inode_wipe_begin);
1540
1541DEFINE_OCFS2_UINT_EVENT(ocfs2_query_inode_wipe_succ);
1542
1543DEFINE_OCFS2_INT_INT_EVENT(ocfs2_query_inode_wipe_end);
1544
1545DEFINE_OCFS2_ULL_INT_EVENT(ocfs2_cleanup_delete_inode);
1546
1547DEFINE_OCFS2_ULL_ULL_UINT_EVENT(ocfs2_delete_inode);
1548
1549DEFINE_OCFS2_ULL_UINT_EVENT(ocfs2_clear_inode);
1550
1551DEFINE_OCFS2_ULL_UINT_UINT_EVENT(ocfs2_drop_inode);
1552
1553TRACE_EVENT(ocfs2_inode_revalidate,
1554 TP_PROTO(void *inode, unsigned long long ino,
1555 unsigned int flags),
1556 TP_ARGS(inode, ino, flags),
1557 TP_STRUCT__entry(
1558 __field(void *, inode)
1559 __field(unsigned long long, ino)
1560 __field(unsigned int, flags)
1561 ),
1562 TP_fast_assign(
1563 __entry->inode = inode;
1564 __entry->ino = ino;
1565 __entry->flags = flags;
1566 ),
1567 TP_printk("%p %llu %u", __entry->inode, __entry->ino, __entry->flags)
1568);
1569
1570DEFINE_OCFS2_ULL_EVENT(ocfs2_mark_inode_dirty);
1571
1572/* End of trace events for fs/ocfs2/inode.c. */
1573
1574/* Trace events for fs/ocfs2/extent_map.c. */
1575
1576TRACE_EVENT(ocfs2_read_virt_blocks,
1577 TP_PROTO(void *inode, unsigned long long vblock, int nr,
1578 void *bhs, unsigned int flags, void *validate),
1579 TP_ARGS(inode, vblock, nr, bhs, flags, validate),
1580 TP_STRUCT__entry(
1581 __field(void *, inode)
1582 __field(unsigned long long, vblock)
1583 __field(int, nr)
1584 __field(void *, bhs)
1585 __field(unsigned int, flags)
1586 __field(void *, validate)
1587 ),
1588 TP_fast_assign(
1589 __entry->inode = inode;
1590 __entry->vblock = vblock;
1591 __entry->nr = nr;
1592 __entry->bhs = bhs;
1593 __entry->flags = flags;
1594 __entry->validate = validate;
1595 ),
1596 TP_printk("%p %llu %d %p %x %p", __entry->inode, __entry->vblock,
1597 __entry->nr, __entry->bhs, __entry->flags, __entry->validate)
1598);
1599
1600/* End of trace events for fs/ocfs2/extent_map.c. */
1601
1602/* Trace events for fs/ocfs2/slot_map.c. */
1603
1604DEFINE_OCFS2_UINT_EVENT(ocfs2_refresh_slot_info);
1605
1606DEFINE_OCFS2_ULL_UINT_EVENT(ocfs2_map_slot_buffers);
1607
1608DEFINE_OCFS2_ULL_UINT_EVENT(ocfs2_map_slot_buffers_block);
1609
1610DEFINE_OCFS2_INT_EVENT(ocfs2_find_slot);
1611
1612/* End of trace events for fs/ocfs2/slot_map.c. */
1613
1614/* Trace events for fs/ocfs2/heartbeat.c. */
1615
1616DEFINE_OCFS2_INT_EVENT(ocfs2_do_node_down);
1617
1618/* End of trace events for fs/ocfs2/heartbeat.c. */
1619
1620/* Trace events for fs/ocfs2/super.c. */
1621
1622TRACE_EVENT(ocfs2_remount,
1623 TP_PROTO(unsigned long s_flags, unsigned long osb_flags, int flags),
1624 TP_ARGS(s_flags, osb_flags, flags),
1625 TP_STRUCT__entry(
1626 __field(unsigned long, s_flags)
1627 __field(unsigned long, osb_flags)
1628 __field(int, flags)
1629 ),
1630 TP_fast_assign(
1631 __entry->s_flags = s_flags;
1632 __entry->osb_flags = osb_flags;
1633 __entry->flags = flags;
1634 ),
1635 TP_printk("%lu %lu %d", __entry->s_flags,
1636 __entry->osb_flags, __entry->flags)
1637);
1638
1639TRACE_EVENT(ocfs2_fill_super,
1640 TP_PROTO(void *sb, void *data, int silent),
1641 TP_ARGS(sb, data, silent),
1642 TP_STRUCT__entry(
1643 __field(void *, sb)
1644 __field(void *, data)
1645 __field(int, silent)
1646 ),
1647 TP_fast_assign(
1648 __entry->sb = sb;
1649 __entry->data = data;
1650 __entry->silent = silent;
1651 ),
1652 TP_printk("%p %p %d", __entry->sb,
1653 __entry->data, __entry->silent)
1654);
1655
1656TRACE_EVENT(ocfs2_parse_options,
1657 TP_PROTO(int is_remount, char *options),
1658 TP_ARGS(is_remount, options),
1659 TP_STRUCT__entry(
1660 __field(int, is_remount)
1661 __string(options, options)
1662 ),
1663 TP_fast_assign(
1664 __entry->is_remount = is_remount;
1665 __assign_str(options, options);
1666 ),
1667 TP_printk("%d %s", __entry->is_remount, __get_str(options))
1668);
1669
1670DEFINE_OCFS2_POINTER_EVENT(ocfs2_put_super);
1671
1672TRACE_EVENT(ocfs2_statfs,
1673 TP_PROTO(void *sb, void *buf),
1674 TP_ARGS(sb, buf),
1675 TP_STRUCT__entry(
1676 __field(void *, sb)
1677 __field(void *, buf)
1678 ),
1679 TP_fast_assign(
1680 __entry->sb = sb;
1681 __entry->buf = buf;
1682 ),
1683 TP_printk("%p %p", __entry->sb, __entry->buf)
1684);
1685
1686DEFINE_OCFS2_POINTER_EVENT(ocfs2_dismount_volume);
1687
1688TRACE_EVENT(ocfs2_initialize_super,
1689 TP_PROTO(char *label, char *uuid_str, unsigned long long root_dir,
1690 unsigned long long system_dir, int cluster_bits),
1691 TP_ARGS(label, uuid_str, root_dir, system_dir, cluster_bits),
1692 TP_STRUCT__entry(
1693 __string(label, label)
1694 __string(uuid_str, uuid_str)
1695 __field(unsigned long long, root_dir)
1696 __field(unsigned long long, system_dir)
1697 __field(int, cluster_bits)
1698 ),
1699 TP_fast_assign(
1700 __assign_str(label, label);
1701 __assign_str(uuid_str, uuid_str);
1702 __entry->root_dir = root_dir;
1703 __entry->system_dir = system_dir;
1704 __entry->cluster_bits = cluster_bits;
1705 ),
1706 TP_printk("%s %s %llu %llu %d", __get_str(label), __get_str(uuid_str),
1707 __entry->root_dir, __entry->system_dir, __entry->cluster_bits)
1708);
1709
1710/* End of trace events for fs/ocfs2/super.c. */
1711
1712/* Trace events for fs/ocfs2/xattr.c. */
1713
1714DEFINE_OCFS2_ULL_EVENT(ocfs2_validate_xattr_block);
1715
1716DEFINE_OCFS2_UINT_EVENT(ocfs2_xattr_extend_allocation);
1717
1718TRACE_EVENT(ocfs2_init_xattr_set_ctxt,
1719 TP_PROTO(const char *name, int meta, int clusters, int credits),
1720 TP_ARGS(name, meta, clusters, credits),
1721 TP_STRUCT__entry(
1722 __string(name, name)
1723 __field(int, meta)
1724 __field(int, clusters)
1725 __field(int, credits)
1726 ),
1727 TP_fast_assign(
1728 __assign_str(name, name);
1729 __entry->meta = meta;
1730 __entry->clusters = clusters;
1731 __entry->credits = credits;
1732 ),
1733 TP_printk("%s %d %d %d", __get_str(name), __entry->meta,
1734 __entry->clusters, __entry->credits)
1735);
1736
1737DECLARE_EVENT_CLASS(ocfs2__xattr_find,
1738 TP_PROTO(unsigned long long ino, const char *name, int name_index,
1739 unsigned int hash, unsigned long long location,
1740 int xe_index),
1741 TP_ARGS(ino, name, name_index, hash, location, xe_index),
1742 TP_STRUCT__entry(
1743 __field(unsigned long long, ino)
1744 __string(name, name)
1745 __field(int, name_index)
1746 __field(unsigned int, hash)
1747 __field(unsigned long long, location)
1748 __field(int, xe_index)
1749 ),
1750 TP_fast_assign(
1751 __entry->ino = ino;
1752 __assign_str(name, name);
1753 __entry->name_index = name_index;
1754 __entry->hash = hash;
1755 __entry->location = location;
1756 __entry->xe_index = xe_index;
1757 ),
1758 TP_printk("%llu %s %d %u %llu %d", __entry->ino, __get_str(name),
1759 __entry->name_index, __entry->hash, __entry->location,
1760 __entry->xe_index)
1761);
1762
1763#define DEFINE_OCFS2_XATTR_FIND_EVENT(name) \
1764DEFINE_EVENT(ocfs2__xattr_find, name, \
1765TP_PROTO(unsigned long long ino, const char *name, int name_index, \
1766 unsigned int hash, unsigned long long bucket, \
1767 int xe_index), \
1768 TP_ARGS(ino, name, name_index, hash, bucket, xe_index))
1769
1770DEFINE_OCFS2_XATTR_FIND_EVENT(ocfs2_xattr_bucket_find);
1771
1772DEFINE_OCFS2_XATTR_FIND_EVENT(ocfs2_xattr_index_block_find);
1773
1774DEFINE_OCFS2_XATTR_FIND_EVENT(ocfs2_xattr_index_block_find_rec);
1775
1776DEFINE_OCFS2_ULL_ULL_UINT_EVENT(ocfs2_iterate_xattr_buckets);
1777
1778DEFINE_OCFS2_ULL_UINT_EVENT(ocfs2_iterate_xattr_bucket);
1779
1780DEFINE_OCFS2_ULL_ULL_EVENT(ocfs2_cp_xattr_block_to_bucket_begin);
1781
1782DEFINE_OCFS2_UINT_UINT_UINT_EVENT(ocfs2_cp_xattr_block_to_bucket_end);
1783
1784DEFINE_OCFS2_ULL_EVENT(ocfs2_xattr_create_index_block_begin);
1785
1786DEFINE_OCFS2_ULL_EVENT(ocfs2_xattr_create_index_block);
1787
1788DEFINE_OCFS2_ULL_UINT_UINT_UINT_EVENT(ocfs2_defrag_xattr_bucket);
1789
1790DEFINE_OCFS2_ULL_ULL_EVENT(ocfs2_mv_xattr_bucket_cross_cluster);
1791
1792DEFINE_OCFS2_ULL_ULL_EVENT(ocfs2_divide_xattr_bucket_begin);
1793
1794DEFINE_OCFS2_UINT_UINT_UINT_EVENT(ocfs2_divide_xattr_bucket_move);
1795
1796DEFINE_OCFS2_ULL_ULL_UINT_EVENT(ocfs2_cp_xattr_bucket);
1797
1798DEFINE_OCFS2_ULL_ULL_EVENT(ocfs2_mv_xattr_buckets);
1799
1800DEFINE_OCFS2_ULL_ULL_UINT_EVENT(ocfs2_adjust_xattr_cross_cluster);
1801
1802DEFINE_OCFS2_ULL_ULL_UINT_UINT_EVENT(ocfs2_add_new_xattr_cluster_begin);
1803
1804DEFINE_OCFS2_ULL_UINT_EVENT(ocfs2_add_new_xattr_cluster);
1805
1806DEFINE_OCFS2_ULL_UINT_UINT_EVENT(ocfs2_add_new_xattr_cluster_insert);
1807
1808DEFINE_OCFS2_ULL_ULL_UINT_UINT_EVENT(ocfs2_extend_xattr_bucket);
1809
1810DEFINE_OCFS2_ULL_EVENT(ocfs2_add_new_xattr_bucket);
1811
1812DEFINE_OCFS2_ULL_UINT_UINT_EVENT(ocfs2_xattr_bucket_value_truncate);
1813
1814DEFINE_OCFS2_ULL_ULL_UINT_UINT_EVENT(ocfs2_rm_xattr_cluster);
1815
1816DEFINE_OCFS2_ULL_UINT_EVENT(ocfs2_reflink_xattr_header);
1817
1818DEFINE_OCFS2_ULL_INT_EVENT(ocfs2_create_empty_xattr_block);
1819
1820DEFINE_OCFS2_STRING_EVENT(ocfs2_xattr_set_entry_bucket);
1821
1822DEFINE_OCFS2_STRING_EVENT(ocfs2_xattr_set_entry_index_block);
1823
1824DEFINE_OCFS2_ULL_UINT_EVENT(ocfs2_xattr_bucket_value_refcount);
1825
1826DEFINE_OCFS2_ULL_UINT_UINT_EVENT(ocfs2_reflink_xattr_buckets);
1827
1828DEFINE_OCFS2_ULL_UINT_EVENT(ocfs2_reflink_xattr_rec);
1829
1830/* End of trace events for fs/ocfs2/xattr.c. */
1831
1832/* Trace events for fs/ocfs2/reservations.c. */
1833
1834DEFINE_OCFS2_UINT_UINT_EVENT(ocfs2_resv_insert);
1835
1836DEFINE_OCFS2_ULL_UINT_UINT_UINT_EVENT(ocfs2_resmap_find_free_bits_begin);
1837
1838DEFINE_OCFS2_UINT_UINT_EVENT(ocfs2_resmap_find_free_bits_end);
1839
1840TRACE_EVENT(ocfs2_resv_find_window_begin,
1841 TP_PROTO(unsigned int r_start, unsigned int r_end, unsigned int goal,
1842 unsigned int wanted, int empty_root),
1843 TP_ARGS(r_start, r_end, goal, wanted, empty_root),
1844 TP_STRUCT__entry(
1845 __field(unsigned int, r_start)
1846 __field(unsigned int, r_end)
1847 __field(unsigned int, goal)
1848 __field(unsigned int, wanted)
1849 __field(int, empty_root)
1850 ),
1851 TP_fast_assign(
1852 __entry->r_start = r_start;
1853 __entry->r_end = r_end;
1854 __entry->goal = goal;
1855 __entry->wanted = wanted;
1856 __entry->empty_root = empty_root;
1857 ),
1858 TP_printk("%u %u %u %u %d", __entry->r_start, __entry->r_end,
1859 __entry->goal, __entry->wanted, __entry->empty_root)
1860);
1861
1862DEFINE_OCFS2_UINT_UINT_EVENT(ocfs2_resv_find_window_prev);
1863
1864DEFINE_OCFS2_INT_INT_EVENT(ocfs2_resv_find_window_next);
1865
1866DEFINE_OCFS2_UINT_UINT_UINT_EVENT(ocfs2_cannibalize_resv_begin);
1867
1868TRACE_EVENT(ocfs2_cannibalize_resv_end,
1869 TP_PROTO(unsigned int start, unsigned int end, unsigned int len,
1870 unsigned int last_start, unsigned int last_len),
1871 TP_ARGS(start, end, len, last_start, last_len),
1872 TP_STRUCT__entry(
1873 __field(unsigned int, start)
1874 __field(unsigned int, end)
1875 __field(unsigned int, len)
1876 __field(unsigned int, last_start)
1877 __field(unsigned int, last_len)
1878 ),
1879 TP_fast_assign(
1880 __entry->start = start;
1881 __entry->end = end;
1882 __entry->len = len;
1883 __entry->last_start = last_start;
1884 __entry->last_len = last_len;
1885 ),
1886 TP_printk("%u %u %u %u %u", __entry->start, __entry->end,
1887 __entry->len, __entry->last_start, __entry->last_len)
1888);
1889
1890DEFINE_OCFS2_UINT_UINT_EVENT(ocfs2_resmap_resv_bits);
1891
1892TRACE_EVENT(ocfs2_resmap_claimed_bits_begin,
1893 TP_PROTO(unsigned int cstart, unsigned int cend, unsigned int clen,
1894 unsigned int r_start, unsigned int r_end, unsigned int r_len,
1895 unsigned int last_start, unsigned int last_len),
1896 TP_ARGS(cstart, cend, clen, r_start, r_end,
1897 r_len, last_start, last_len),
1898 TP_STRUCT__entry(
1899 __field(unsigned int, cstart)
1900 __field(unsigned int, cend)
1901 __field(unsigned int, clen)
1902 __field(unsigned int, r_start)
1903 __field(unsigned int, r_end)
1904 __field(unsigned int, r_len)
1905 __field(unsigned int, last_start)
1906 __field(unsigned int, last_len)
1907 ),
1908 TP_fast_assign(
1909 __entry->cstart = cstart;
1910 __entry->cend = cend;
1911 __entry->clen = clen;
1912 __entry->r_start = r_start;
1913 __entry->r_end = r_end;
1914 __entry->r_len = r_len;
1915 __entry->last_start = last_start;
1916 __entry->last_len = last_len;
1917 ),
1918 TP_printk("%u %u %u %u %u %u %u %u",
1919 __entry->cstart, __entry->cend, __entry->clen,
1920 __entry->r_start, __entry->r_end, __entry->r_len,
1921 __entry->last_start, __entry->last_len)
1922);
1923
1924TRACE_EVENT(ocfs2_resmap_claimed_bits_end,
1925 TP_PROTO(unsigned int start, unsigned int end, unsigned int len,
1926 unsigned int last_start, unsigned int last_len),
1927 TP_ARGS(start, end, len, last_start, last_len),
1928 TP_STRUCT__entry(
1929 __field(unsigned int, start)
1930 __field(unsigned int, end)
1931 __field(unsigned int, len)
1932 __field(unsigned int, last_start)
1933 __field(unsigned int, last_len)
1934 ),
1935 TP_fast_assign(
1936 __entry->start = start;
1937 __entry->end = end;
1938 __entry->len = len;
1939 __entry->last_start = last_start;
1940 __entry->last_len = last_len;
1941 ),
1942 TP_printk("%u %u %u %u %u", __entry->start, __entry->end,
1943 __entry->len, __entry->last_start, __entry->last_len)
1944);
1945
1946/* End of trace events for fs/ocfs2/reservations.c. */
1947
1948/* Trace events for fs/ocfs2/quota_local.c. */
1949
1950DEFINE_OCFS2_ULL_UINT_EVENT(ocfs2_recover_local_quota_file);
1951
1952DEFINE_OCFS2_INT_EVENT(ocfs2_finish_quota_recovery);
1953
1954DEFINE_OCFS2_ULL_ULL_UINT_EVENT(olq_set_dquot);
1955
1956/* End of trace events for fs/ocfs2/quota_local.c. */
1957
1958/* Trace events for fs/ocfs2/quota_global.c. */
1959
1960DEFINE_OCFS2_ULL_EVENT(ocfs2_validate_quota_block);
1961
1962TRACE_EVENT(ocfs2_sync_dquot,
1963 TP_PROTO(unsigned int dq_id, long long dqb_curspace,
1964 long long spacechange, long long curinodes,
1965 long long inodechange),
1966 TP_ARGS(dq_id, dqb_curspace, spacechange, curinodes, inodechange),
1967 TP_STRUCT__entry(
1968 __field(unsigned int, dq_id)
1969 __field(long long, dqb_curspace)
1970 __field(long long, spacechange)
1971 __field(long long, curinodes)
1972 __field(long long, inodechange)
1973 ),
1974 TP_fast_assign(
1975 __entry->dq_id = dq_id;
1976 __entry->dqb_curspace = dqb_curspace;
1977 __entry->spacechange = spacechange;
1978 __entry->curinodes = curinodes;
1979 __entry->inodechange = inodechange;
1980 ),
1981 TP_printk("%u %lld %lld %lld %lld", __entry->dq_id,
1982 __entry->dqb_curspace, __entry->spacechange,
1983 __entry->curinodes, __entry->inodechange)
1984);
1985
1986TRACE_EVENT(ocfs2_sync_dquot_helper,
1987 TP_PROTO(unsigned int dq_id, unsigned int dq_type, unsigned long type,
1988 const char *s_id),
1989 TP_ARGS(dq_id, dq_type, type, s_id),
1990
1991 TP_STRUCT__entry(
1992 __field(unsigned int, dq_id)
1993 __field(unsigned int, dq_type)
1994 __field(unsigned long, type)
1995 __string(s_id, s_id)
1996 ),
1997 TP_fast_assign(
1998 __entry->dq_id = dq_id;
1999 __entry->dq_type = dq_type;
2000 __entry->type = type;
2001 __assign_str(s_id, s_id);
2002 ),
2003 TP_printk("%u %u %lu %s", __entry->dq_id, __entry->dq_type,
2004 __entry->type, __get_str(s_id))
2005);
2006
2007DEFINE_OCFS2_UINT_INT_EVENT(ocfs2_write_dquot);
2008
2009DEFINE_OCFS2_UINT_INT_EVENT(ocfs2_release_dquot);
2010
2011DEFINE_OCFS2_UINT_INT_EVENT(ocfs2_acquire_dquot);
2012
2013DEFINE_OCFS2_UINT_INT_EVENT(ocfs2_mark_dquot_dirty);
2014
2015/* End of trace events for fs/ocfs2/quota_global.c. */
2016
2017/* Trace events for fs/ocfs2/dir.c. */
2018DEFINE_OCFS2_INT_EVENT(ocfs2_search_dirblock);
2019
2020DEFINE_OCFS2_ULL_EVENT(ocfs2_validate_dir_block);
2021
2022DEFINE_OCFS2_POINTER_EVENT(ocfs2_find_entry_el);
2023
2024TRACE_EVENT(ocfs2_dx_dir_search,
2025 TP_PROTO(unsigned long long ino, int namelen, const char *name,
2026 unsigned int major_hash, unsigned int minor_hash,
2027 unsigned long long blkno),
2028 TP_ARGS(ino, namelen, name, major_hash, minor_hash, blkno),
2029 TP_STRUCT__entry(
2030 __field(unsigned long long, ino)
2031 __field(int, namelen)
2032 __string(name, name)
2033 __field(unsigned int, major_hash)
2034 __field(unsigned int,minor_hash)
2035 __field(unsigned long long, blkno)
2036 ),
2037 TP_fast_assign(
2038 __entry->ino = ino;
2039 __entry->namelen = namelen;
2040 __assign_str(name, name);
2041 __entry->major_hash = major_hash;
2042 __entry->minor_hash = minor_hash;
2043 __entry->blkno = blkno;
2044 ),
2045 TP_printk("%llu %.*s %u %u %llu", __entry->ino,
2046 __entry->namelen, __get_str(name),
2047 __entry->major_hash, __entry->minor_hash, __entry->blkno)
2048);
2049
2050DEFINE_OCFS2_UINT_UINT_EVENT(ocfs2_dx_dir_search_leaf_info);
2051
2052DEFINE_OCFS2_ULL_INT_EVENT(ocfs2_delete_entry_dx);
2053
2054DEFINE_OCFS2_ULL_EVENT(ocfs2_readdir);
2055
2056TRACE_EVENT(ocfs2_find_files_on_disk,
2057 TP_PROTO(int namelen, const char *name, void *blkno,
2058 unsigned long long dir),
2059 TP_ARGS(namelen, name, blkno, dir),
2060 TP_STRUCT__entry(
2061 __field(int, namelen)
2062 __string(name, name)
2063 __field(void *, blkno)
2064 __field(unsigned long long, dir)
2065 ),
2066 TP_fast_assign(
2067 __entry->namelen = namelen;
2068 __assign_str(name, name);
2069 __entry->blkno = blkno;
2070 __entry->dir = dir;
2071 ),
2072 TP_printk("%.*s %p %llu", __entry->namelen, __get_str(name),
2073 __entry->blkno, __entry->dir)
2074);
2075
2076TRACE_EVENT(ocfs2_check_dir_for_entry,
2077 TP_PROTO(unsigned long long dir, int namelen, const char *name),
2078 TP_ARGS(dir, namelen, name),
2079 TP_STRUCT__entry(
2080 __field(unsigned long long, dir)
2081 __field(int, namelen)
2082 __string(name, name)
2083 ),
2084 TP_fast_assign(
2085 __entry->dir = dir;
2086 __entry->namelen = namelen;
2087 __assign_str(name, name);
2088 ),
2089 TP_printk("%llu %.*s", __entry->dir,
2090 __entry->namelen, __get_str(name))
2091);
2092
2093DEFINE_OCFS2_ULL_ULL_EVENT(ocfs2_dx_dir_attach_index);
2094
2095DEFINE_OCFS2_ULL_ULL_UINT_EVENT(ocfs2_dx_dir_format_cluster);
2096
2097TRACE_EVENT(ocfs2_dx_dir_index_root_block,
2098 TP_PROTO(unsigned long long dir,
2099 unsigned int major_hash, unsigned int minor_hash,
2100 int namelen, const char *name, unsigned int num_used),
2101 TP_ARGS(dir, major_hash, minor_hash, namelen, name, num_used),
2102 TP_STRUCT__entry(
2103 __field(unsigned long long, dir)
2104 __field(unsigned int, major_hash)
2105 __field(unsigned int, minor_hash)
2106 __field(int, namelen)
2107 __string(name, name)
2108 __field(unsigned int, num_used)
2109 ),
2110 TP_fast_assign(
2111 __entry->dir = dir;
2112 __entry->major_hash = major_hash;
2113 __entry->minor_hash = minor_hash;
2114 __entry->namelen = namelen;
2115 __assign_str(name, name);
2116 __entry->num_used = num_used;
2117 ),
2118 TP_printk("%llu %x %x %.*s %u", __entry->dir,
2119 __entry->major_hash, __entry->minor_hash,
2120 __entry->namelen, __get_str(name), __entry->num_used)
2121);
2122
2123DEFINE_OCFS2_ULL_ULL_EVENT(ocfs2_extend_dir);
2124
2125DEFINE_OCFS2_ULL_ULL_UINT_EVENT(ocfs2_dx_dir_rebalance);
2126
2127DEFINE_OCFS2_UINT_UINT_UINT_EVENT(ocfs2_dx_dir_rebalance_split);
2128
2129DEFINE_OCFS2_ULL_INT_EVENT(ocfs2_prepare_dir_for_insert);
2130
2131/* End of trace events for fs/ocfs2/dir.c. */
2132
2133/* Trace events for fs/ocfs2/namei.c. */
2134
2135DECLARE_EVENT_CLASS(ocfs2__dentry_ops,
2136 TP_PROTO(void *dir, void *dentry, int name_len, const char *name,
2137 unsigned long long dir_blkno, unsigned long long extra),
2138 TP_ARGS(dir, dentry, name_len, name, dir_blkno, extra),
2139 TP_STRUCT__entry(
2140 __field(void *, dir)
2141 __field(void *, dentry)
2142 __field(int, name_len)
2143 __string(name, name)
2144 __field(unsigned long long, dir_blkno)
2145 __field(unsigned long long, extra)
2146 ),
2147 TP_fast_assign(
2148 __entry->dir = dir;
2149 __entry->dentry = dentry;
2150 __entry->name_len = name_len;
2151 __assign_str(name, name);
2152 __entry->dir_blkno = dir_blkno;
2153 __entry->extra = extra;
2154 ),
2155 TP_printk("%p %p %.*s %llu %llu", __entry->dir, __entry->dentry,
2156 __entry->name_len, __get_str(name),
2157 __entry->dir_blkno, __entry->extra)
2158);
2159
2160#define DEFINE_OCFS2_DENTRY_OPS(name) \
2161DEFINE_EVENT(ocfs2__dentry_ops, name, \
2162TP_PROTO(void *dir, void *dentry, int name_len, const char *name, \
2163 unsigned long long dir_blkno, unsigned long long extra), \
2164 TP_ARGS(dir, dentry, name_len, name, dir_blkno, extra))
2165
2166DEFINE_OCFS2_DENTRY_OPS(ocfs2_lookup);
2167
2168DEFINE_OCFS2_DENTRY_OPS(ocfs2_mkdir);
2169
2170DEFINE_OCFS2_DENTRY_OPS(ocfs2_create);
2171
2172DEFINE_OCFS2_DENTRY_OPS(ocfs2_unlink);
2173
2174DEFINE_OCFS2_DENTRY_OPS(ocfs2_symlink_create);
2175
2176DEFINE_OCFS2_DENTRY_OPS(ocfs2_mv_orphaned_inode_to_new);
2177
2178DEFINE_OCFS2_POINTER_EVENT(ocfs2_lookup_ret);
2179
2180TRACE_EVENT(ocfs2_mknod,
2181 TP_PROTO(void *dir, void *dentry, int name_len, const char *name,
2182 unsigned long long dir_blkno, unsigned long dev, int mode),
2183 TP_ARGS(dir, dentry, name_len, name, dir_blkno, dev, mode),
2184 TP_STRUCT__entry(
2185 __field(void *, dir)
2186 __field(void *, dentry)
2187 __field(int, name_len)
2188 __string(name, name)
2189 __field(unsigned long long, dir_blkno)
2190 __field(unsigned long, dev)
2191 __field(int, mode)
2192 ),
2193 TP_fast_assign(
2194 __entry->dir = dir;
2195 __entry->dentry = dentry;
2196 __entry->name_len = name_len;
2197 __assign_str(name, name);
2198 __entry->dir_blkno = dir_blkno;
2199 __entry->dev = dev;
2200 __entry->mode = mode;
2201 ),
2202 TP_printk("%p %p %.*s %llu %lu %d", __entry->dir, __entry->dentry,
2203 __entry->name_len, __get_str(name),
2204 __entry->dir_blkno, __entry->dev, __entry->mode)
2205);
2206
2207TRACE_EVENT(ocfs2_link,
2208 TP_PROTO(unsigned long long ino, int old_len, const char *old_name,
2209 int name_len, const char *name),
2210 TP_ARGS(ino, old_len, old_name, name_len, name),
2211 TP_STRUCT__entry(
2212 __field(unsigned long long, ino)
2213 __field(int, old_len)
2214 __string(old_name, old_name)
2215 __field(int, name_len)
2216 __string(name, name)
2217 ),
2218 TP_fast_assign(
2219 __entry->ino = ino;
2220 __entry->old_len = old_len;
2221 __assign_str(old_name, old_name);
2222 __entry->name_len = name_len;
2223 __assign_str(name, name);
2224 ),
2225 TP_printk("%llu %.*s %.*s", __entry->ino,
2226 __entry->old_len, __get_str(old_name),
2227 __entry->name_len, __get_str(name))
2228);
2229
2230DEFINE_OCFS2_ULL_ULL_UINT_EVENT(ocfs2_unlink_noent);
2231
2232DEFINE_OCFS2_ULL_ULL_EVENT(ocfs2_double_lock);
2233
2234DEFINE_OCFS2_ULL_ULL_EVENT(ocfs2_double_lock_end);
2235
2236TRACE_EVENT(ocfs2_rename,
2237 TP_PROTO(void *old_dir, void *old_dentry,
2238 void *new_dir, void *new_dentry,
2239 int old_len, const char *old_name,
2240 int new_len, const char *new_name),
2241 TP_ARGS(old_dir, old_dentry, new_dir, new_dentry,
2242 old_len, old_name, new_len, new_name),
2243 TP_STRUCT__entry(
2244 __field(void *, old_dir)
2245 __field(void *, old_dentry)
2246 __field(void *, new_dir)
2247 __field(void *, new_dentry)
2248 __field(int, old_len)
2249 __string(old_name, old_name)
2250 __field(int, new_len)
2251 __string(new_name, new_name)
2252 ),
2253 TP_fast_assign(
2254 __entry->old_dir = old_dir;
2255 __entry->old_dentry = old_dentry;
2256 __entry->new_dir = new_dir;
2257 __entry->new_dentry = new_dentry;
2258 __entry->old_len = old_len;
2259 __assign_str(old_name, old_name);
2260 __entry->new_len = new_len;
2261 __assign_str(new_name, new_name);
2262 ),
2263 TP_printk("%p %p %p %p %.*s %.*s",
2264 __entry->old_dir, __entry->old_dentry,
2265 __entry->new_dir, __entry->new_dentry,
2266 __entry->old_len, __get_str(old_name),
2267 __entry->new_len, __get_str(new_name))
2268);
2269
2270TRACE_EVENT(ocfs2_rename_target_exists,
2271 TP_PROTO(int new_len, const char *new_name),
2272 TP_ARGS(new_len, new_name),
2273 TP_STRUCT__entry(
2274 __field(int, new_len)
2275 __string(new_name, new_name)
2276 ),
2277 TP_fast_assign(
2278 __entry->new_len = new_len;
2279 __assign_str(new_name, new_name);
2280 ),
2281 TP_printk("%.*s", __entry->new_len, __get_str(new_name))
2282);
2283
2284DEFINE_OCFS2_ULL_ULL_UINT_EVENT(ocfs2_rename_disagree);
2285
2286TRACE_EVENT(ocfs2_rename_over_existing,
2287 TP_PROTO(unsigned long long new_blkno, void *new_bh,
2288 unsigned long long newdi_blkno),
2289 TP_ARGS(new_blkno, new_bh, newdi_blkno),
2290 TP_STRUCT__entry(
2291 __field(unsigned long long, new_blkno)
2292 __field(void *, new_bh)
2293 __field(unsigned long long, newdi_blkno)
2294 ),
2295 TP_fast_assign(
2296 __entry->new_blkno = new_blkno;
2297 __entry->new_bh = new_bh;
2298 __entry->newdi_blkno = newdi_blkno;
2299 ),
2300 TP_printk("%llu %p %llu", __entry->new_blkno, __entry->new_bh,
2301 __entry->newdi_blkno)
2302);
2303
2304DEFINE_OCFS2_ULL_ULL_UINT_EVENT(ocfs2_create_symlink_data);
2305
2306TRACE_EVENT(ocfs2_symlink_begin,
2307 TP_PROTO(void *dir, void *dentry, const char *symname,
2308 int len, const char *name),
2309 TP_ARGS(dir, dentry, symname, len, name),
2310 TP_STRUCT__entry(
2311 __field(void *, dir)
2312 __field(void *, dentry)
2313 __field(const char *, symname)
2314 __field(int, len)
2315 __string(name, name)
2316 ),
2317 TP_fast_assign(
2318 __entry->dir = dir;
2319 __entry->dentry = dentry;
2320 __entry->symname = symname;
2321 __entry->len = len;
2322 __assign_str(name, name);
2323 ),
2324 TP_printk("%p %p %s %.*s", __entry->dir, __entry->dentry,
2325 __entry->symname, __entry->len, __get_str(name))
2326);
2327
2328TRACE_EVENT(ocfs2_blkno_stringify,
2329 TP_PROTO(unsigned long long blkno, const char *name, int namelen),
2330 TP_ARGS(blkno, name, namelen),
2331 TP_STRUCT__entry(
2332 __field(unsigned long long, blkno)
2333 __string(name, name)
2334 __field(int, namelen)
2335 ),
2336 TP_fast_assign(
2337 __entry->blkno = blkno;
2338 __assign_str(name, name);
2339 __entry->namelen = namelen;
2340 ),
2341 TP_printk("%llu %s %d", __entry->blkno, __get_str(name),
2342 __entry->namelen)
2343);
2344
2345DEFINE_OCFS2_ULL_EVENT(ocfs2_orphan_add_begin);
2346
2347DEFINE_OCFS2_ULL_UINT_EVENT(ocfs2_orphan_add_end);
2348
2349TRACE_EVENT(ocfs2_orphan_del,
2350 TP_PROTO(unsigned long long dir, const char *name, int namelen),
2351 TP_ARGS(dir, name, namelen),
2352 TP_STRUCT__entry(
2353 __field(unsigned long long, dir)
2354 __string(name, name)
2355 __field(int, namelen)
2356 ),
2357 TP_fast_assign(
2358 __entry->dir = dir;
2359 __assign_str(name, name);
2360 __entry->namelen = namelen;
2361 ),
2362 TP_printk("%llu %s %d", __entry->dir, __get_str(name),
2363 __entry->namelen)
2364);
2365
2366/* End of trace events for fs/ocfs2/namei.c. */
2367
2368/* Trace events for fs/ocfs2/dcache.c. */
2369
2370TRACE_EVENT(ocfs2_dentry_revalidate,
2371 TP_PROTO(void *dentry, int len, const char *name),
2372 TP_ARGS(dentry, len, name),
2373 TP_STRUCT__entry(
2374 __field(void *, dentry)
2375 __field(int, len)
2376 __string(name, name)
2377 ),
2378 TP_fast_assign(
2379 __entry->dentry = dentry;
2380 __entry->len = len;
2381 __assign_str(name, name);
2382 ),
2383 TP_printk("%p %.*s", __entry->dentry, __entry->len, __get_str(name))
2384);
2385
2386TRACE_EVENT(ocfs2_dentry_revalidate_negative,
2387 TP_PROTO(int len, const char *name, unsigned long pgen,
2388 unsigned long gen),
2389 TP_ARGS(len, name, pgen, gen),
2390 TP_STRUCT__entry(
2391 __field(int, len)
2392 __string(name, name)
2393 __field(unsigned long, pgen)
2394 __field(unsigned long, gen)
2395 ),
2396 TP_fast_assign(
2397 __entry->len = len;
2398 __assign_str(name, name);
2399 __entry->pgen = pgen;
2400 __entry->gen = gen;
2401 ),
2402 TP_printk("%.*s %lu %lu", __entry->len, __get_str(name),
2403 __entry->pgen, __entry->gen)
2404);
2405
2406DEFINE_OCFS2_ULL_EVENT(ocfs2_dentry_revalidate_delete);
2407
2408DEFINE_OCFS2_ULL_INT_EVENT(ocfs2_dentry_revalidate_orphaned);
2409
2410DEFINE_OCFS2_ULL_EVENT(ocfs2_dentry_revalidate_nofsdata);
2411
2412DEFINE_OCFS2_INT_EVENT(ocfs2_dentry_revalidate_ret);
2413
2414TRACE_EVENT(ocfs2_find_local_alias,
2415 TP_PROTO(int len, const char *name),
2416 TP_ARGS(len, name),
2417 TP_STRUCT__entry(
2418 __field(int, len)
2419 __string(name, name)
2420 ),
2421 TP_fast_assign(
2422 __entry->len = len;
2423 __assign_str(name, name);
2424 ),
2425 TP_printk("%.*s", __entry->len, __get_str(name))
2426);
2427
2428TRACE_EVENT(ocfs2_dentry_attach_lock,
2429 TP_PROTO(int len, const char *name,
2430 unsigned long long parent, void *fsdata),
2431 TP_ARGS(len, name, parent, fsdata),
2432 TP_STRUCT__entry(
2433 __field(int, len)
2434 __string(name, name)
2435 __field(unsigned long long, parent)
2436 __field(void *, fsdata)
2437 ),
2438 TP_fast_assign(
2439 __entry->len = len;
2440 __assign_str(name, name);
2441 __entry->parent = parent;
2442 __entry->fsdata = fsdata;
2443 ),
2444 TP_printk("%.*s %llu %p", __entry->len, __get_str(name),
2445 __entry->parent, __entry->fsdata)
2446);
2447
2448TRACE_EVENT(ocfs2_dentry_attach_lock_found,
2449 TP_PROTO(const char *name, unsigned long long parent,
2450 unsigned long long ino),
2451 TP_ARGS(name, parent, ino),
2452 TP_STRUCT__entry(
2453 __string(name, name)
2454 __field(unsigned long long, parent)
2455 __field(unsigned long long, ino)
2456 ),
2457 TP_fast_assign(
2458 __assign_str(name, name);
2459 __entry->parent = parent;
2460 __entry->ino = ino;
2461 ),
2462 TP_printk("%s %llu %llu", __get_str(name), __entry->parent, __entry->ino)
2463);
2464/* End of trace events for fs/ocfs2/dcache.c. */
2465
2466/* Trace events for fs/ocfs2/export.c. */
2467
2468TRACE_EVENT(ocfs2_get_dentry_begin,
2469 TP_PROTO(void *sb, void *handle, unsigned long long blkno),
2470 TP_ARGS(sb, handle, blkno),
2471 TP_STRUCT__entry(
2472 __field(void *, sb)
2473 __field(void *, handle)
2474 __field(unsigned long long, blkno)
2475 ),
2476 TP_fast_assign(
2477 __entry->sb = sb;
2478 __entry->handle = handle;
2479 __entry->blkno = blkno;
2480 ),
2481 TP_printk("%p %p %llu", __entry->sb, __entry->handle, __entry->blkno)
2482);
2483
2484DEFINE_OCFS2_INT_INT_EVENT(ocfs2_get_dentry_test_bit);
2485
2486DEFINE_OCFS2_ULL_UINT_EVENT(ocfs2_get_dentry_stale);
2487
2488DEFINE_OCFS2_ULL_UINT_UINT_EVENT(ocfs2_get_dentry_generation);
2489
2490DEFINE_OCFS2_POINTER_EVENT(ocfs2_get_dentry_end);
2491
2492TRACE_EVENT(ocfs2_get_parent,
2493 TP_PROTO(void *child, int len, const char *name,
2494 unsigned long long ino),
2495 TP_ARGS(child, len, name, ino),
2496 TP_STRUCT__entry(
2497 __field(void *, child)
2498 __field(int, len)
2499 __string(name, name)
2500 __field(unsigned long long, ino)
2501 ),
2502 TP_fast_assign(
2503 __entry->child = child;
2504 __entry->len = len;
2505 __assign_str(name, name);
2506 __entry->ino = ino;
2507 ),
2508 TP_printk("%p %.*s %llu", __entry->child, __entry->len,
2509 __get_str(name), __entry->ino)
2510);
2511
2512DEFINE_OCFS2_POINTER_EVENT(ocfs2_get_parent_end);
2513
2514TRACE_EVENT(ocfs2_encode_fh_begin,
2515 TP_PROTO(void *dentry, int name_len, const char *name,
2516 void *fh, int len, int connectable),
2517 TP_ARGS(dentry, name_len, name, fh, len, connectable),
2518 TP_STRUCT__entry(
2519 __field(void *, dentry)
2520 __field(int, name_len)
2521 __string(name, name)
2522 __field(void *, fh)
2523 __field(int, len)
2524 __field(int, connectable)
2525 ),
2526 TP_fast_assign(
2527 __entry->dentry = dentry;
2528 __entry->name_len = name_len;
2529 __assign_str(name, name);
2530 __entry->fh = fh;
2531 __entry->len = len;
2532 __entry->connectable = connectable;
2533 ),
2534 TP_printk("%p %.*s %p %d %d", __entry->dentry, __entry->name_len,
2535 __get_str(name), __entry->fh, __entry->len,
2536 __entry->connectable)
2537);
2538
2539DEFINE_OCFS2_ULL_UINT_EVENT(ocfs2_encode_fh_self);
2540
2541DEFINE_OCFS2_ULL_UINT_EVENT(ocfs2_encode_fh_parent);
2542
2543DEFINE_OCFS2_INT_EVENT(ocfs2_encode_fh_type);
2544
2545/* End of trace events for fs/ocfs2/export.c. */
2546
2547/* Trace events for fs/ocfs2/journal.c. */
2548
2549DEFINE_OCFS2_UINT_EVENT(ocfs2_commit_cache_begin);
2550
2551DEFINE_OCFS2_ULL_UINT_EVENT(ocfs2_commit_cache_end);
2552
2553DEFINE_OCFS2_INT_INT_EVENT(ocfs2_extend_trans);
2554
2555DEFINE_OCFS2_INT_EVENT(ocfs2_extend_trans_restart);
2556
2557DEFINE_OCFS2_ULL_ULL_UINT_UINT_EVENT(ocfs2_journal_access);
2558
2559DEFINE_OCFS2_ULL_EVENT(ocfs2_journal_dirty);
2560
2561DEFINE_OCFS2_ULL_ULL_UINT_EVENT(ocfs2_journal_init);
2562
2563DEFINE_OCFS2_UINT_EVENT(ocfs2_journal_init_maxlen);
2564
2565DEFINE_OCFS2_INT_EVENT(ocfs2_journal_shutdown);
2566
2567DEFINE_OCFS2_POINTER_EVENT(ocfs2_journal_shutdown_wait);
2568
2569DEFINE_OCFS2_ULL_EVENT(ocfs2_complete_recovery);
2570
2571DEFINE_OCFS2_INT_EVENT(ocfs2_complete_recovery_end);
2572
2573TRACE_EVENT(ocfs2_complete_recovery_slot,
2574 TP_PROTO(int slot, unsigned long long la_ino,
2575 unsigned long long tl_ino, void *qrec),
2576 TP_ARGS(slot, la_ino, tl_ino, qrec),
2577 TP_STRUCT__entry(
2578 __field(int, slot)
2579 __field(unsigned long long, la_ino)
2580 __field(unsigned long long, tl_ino)
2581 __field(void *, qrec)
2582 ),
2583 TP_fast_assign(
2584 __entry->slot = slot;
2585 __entry->la_ino = la_ino;
2586 __entry->tl_ino = tl_ino;
2587 __entry->qrec = qrec;
2588 ),
2589 TP_printk("%d %llu %llu %p", __entry->slot, __entry->la_ino,
2590 __entry->tl_ino, __entry->qrec)
2591);
2592
2593DEFINE_OCFS2_INT_INT_EVENT(ocfs2_recovery_thread_node);
2594
2595DEFINE_OCFS2_INT_EVENT(ocfs2_recovery_thread_end);
2596
2597TRACE_EVENT(ocfs2_recovery_thread,
2598 TP_PROTO(int node_num, int osb_node_num, int disable,
2599 void *recovery_thread, int map_set),
2600 TP_ARGS(node_num, osb_node_num, disable, recovery_thread, map_set),
2601 TP_STRUCT__entry(
2602 __field(int, node_num)
2603 __field(int, osb_node_num)
2604 __field(int,disable)
2605 __field(void *, recovery_thread)
2606 __field(int,map_set)
2607 ),
2608 TP_fast_assign(
2609 __entry->node_num = node_num;
2610 __entry->osb_node_num = osb_node_num;
2611 __entry->disable = disable;
2612 __entry->recovery_thread = recovery_thread;
2613 __entry->map_set = map_set;
2614 ),
2615 TP_printk("%d %d %d %p %d", __entry->node_num,
2616 __entry->osb_node_num, __entry->disable,
2617 __entry->recovery_thread, __entry->map_set)
2618);
2619
2620DEFINE_OCFS2_UINT_UINT_UINT_EVENT(ocfs2_replay_journal_recovered);
2621
2622DEFINE_OCFS2_INT_EVENT(ocfs2_replay_journal_lock_err);
2623
2624DEFINE_OCFS2_INT_EVENT(ocfs2_replay_journal_skip);
2625
2626DEFINE_OCFS2_UINT_UINT_UINT_EVENT(ocfs2_recover_node);
2627
2628DEFINE_OCFS2_UINT_UINT_EVENT(ocfs2_recover_node_skip);
2629
2630DEFINE_OCFS2_UINT_UINT_EVENT(ocfs2_mark_dead_nodes);
2631
2632DEFINE_OCFS2_UINT_UINT_UINT_EVENT(ocfs2_queue_orphan_scan_begin);
2633
2634DEFINE_OCFS2_UINT_UINT_UINT_EVENT(ocfs2_queue_orphan_scan_end);
2635
2636DEFINE_OCFS2_ULL_EVENT(ocfs2_orphan_filldir);
2637
2638DEFINE_OCFS2_INT_EVENT(ocfs2_recover_orphans);
2639
2640DEFINE_OCFS2_ULL_EVENT(ocfs2_recover_orphans_iput);
2641
2642DEFINE_OCFS2_INT_EVENT(ocfs2_wait_on_mount);
2643
2644/* End of trace events for fs/ocfs2/journal.c. */
2645
2646/* Trace events for fs/ocfs2/buffer_head_io.c. */
2647
2648DEFINE_OCFS2_ULL_UINT_EVENT(ocfs2_read_blocks_sync);
2649
2650DEFINE_OCFS2_ULL_EVENT(ocfs2_read_blocks_sync_jbd);
2651
2652DEFINE_OCFS2_ULL_ULL_EVENT(ocfs2_read_blocks_from_disk);
2653
2654DEFINE_OCFS2_ULL_INT_INT_INT_EVENT(ocfs2_read_blocks_bh);
2655
2656DEFINE_OCFS2_ULL_INT_INT_INT_EVENT(ocfs2_read_blocks_end);
2657
2658TRACE_EVENT(ocfs2_write_block,
2659 TP_PROTO(unsigned long long block, void *ci),
2660 TP_ARGS(block, ci),
2661 TP_STRUCT__entry(
2662 __field(unsigned long long, block)
2663 __field(void *, ci)
2664 ),
2665 TP_fast_assign(
2666 __entry->block = block;
2667 __entry->ci = ci;
2668 ),
2669 TP_printk("%llu %p", __entry->block, __entry->ci)
2670);
2671
2672TRACE_EVENT(ocfs2_read_blocks_begin,
2673 TP_PROTO(void *ci, unsigned long long block,
2674 unsigned int nr, int flags),
2675 TP_ARGS(ci, block, nr, flags),
2676 TP_STRUCT__entry(
2677 __field(void *, ci)
2678 __field(unsigned long long, block)
2679 __field(unsigned int, nr)
2680 __field(int, flags)
2681 ),
2682 TP_fast_assign(
2683 __entry->ci = ci;
2684 __entry->block = block;
2685 __entry->nr = nr;
2686 __entry->flags = flags;
2687 ),
2688 TP_printk("%p %llu %u %d", __entry->ci, __entry->block,
2689 __entry->nr, __entry->flags)
2690);
2691
2692/* End of trace events for fs/ocfs2/buffer_head_io.c. */
2693
2694/* Trace events for fs/ocfs2/uptodate.c. */
2695
2696DEFINE_OCFS2_ULL_EVENT(ocfs2_purge_copied_metadata_tree);
2697
2698DEFINE_OCFS2_ULL_UINT_UINT_EVENT(ocfs2_metadata_cache_purge);
2699
2700DEFINE_OCFS2_ULL_ULL_UINT_EVENT(ocfs2_buffer_cached_begin);
2701
2702TRACE_EVENT(ocfs2_buffer_cached_end,
2703 TP_PROTO(int index, void *item),
2704 TP_ARGS(index, item),
2705 TP_STRUCT__entry(
2706 __field(int, index)
2707 __field(void *, item)
2708 ),
2709 TP_fast_assign(
2710 __entry->index = index;
2711 __entry->item = item;
2712 ),
2713 TP_printk("%d %p", __entry->index, __entry->item)
2714);
2715
2716DEFINE_OCFS2_ULL_ULL_UINT_EVENT(ocfs2_append_cache_array);
2717
2718DEFINE_OCFS2_ULL_ULL_UINT_EVENT(ocfs2_insert_cache_tree);
2719
2720DEFINE_OCFS2_ULL_UINT_UINT_EVENT(ocfs2_expand_cache);
2721
2722DEFINE_OCFS2_ULL_UINT_UINT_EVENT(ocfs2_set_buffer_uptodate);
2723
2724DEFINE_OCFS2_ULL_ULL_EVENT(ocfs2_set_buffer_uptodate_begin);
2725
2726DEFINE_OCFS2_ULL_UINT_UINT_EVENT(ocfs2_remove_metadata_array);
2727
2728DEFINE_OCFS2_ULL_ULL_EVENT(ocfs2_remove_metadata_tree);
2729
2730DEFINE_OCFS2_ULL_ULL_UINT_UINT_EVENT(ocfs2_remove_block_from_cache);
2731
2732/* End of trace events for fs/ocfs2/uptodate.c. */
2733#endif /* _TRACE_OCFS2_H */
2734
2735/* This part must be outside protection */
2736#undef TRACE_INCLUDE_PATH
2737#define TRACE_INCLUDE_PATH .
2738#define TRACE_INCLUDE_FILE ocfs2_trace
2739#include <trace/define_trace.h>
diff --git a/fs/ocfs2/quota_global.c b/fs/ocfs2/quota_global.c
index a73f64166481..279aef68025b 100644
--- a/fs/ocfs2/quota_global.c
+++ b/fs/ocfs2/quota_global.c
@@ -11,7 +11,6 @@
11#include <linux/writeback.h> 11#include <linux/writeback.h>
12#include <linux/workqueue.h> 12#include <linux/workqueue.h>
13 13
14#define MLOG_MASK_PREFIX ML_QUOTA
15#include <cluster/masklog.h> 14#include <cluster/masklog.h>
16 15
17#include "ocfs2_fs.h" 16#include "ocfs2_fs.h"
@@ -27,6 +26,7 @@
27#include "super.h" 26#include "super.h"
28#include "buffer_head_io.h" 27#include "buffer_head_io.h"
29#include "quota.h" 28#include "quota.h"
29#include "ocfs2_trace.h"
30 30
31/* 31/*
32 * Locking of quotas with OCFS2 is rather complex. Here are rules that 32 * Locking of quotas with OCFS2 is rather complex. Here are rules that
@@ -130,8 +130,7 @@ int ocfs2_validate_quota_block(struct super_block *sb, struct buffer_head *bh)
130 struct ocfs2_disk_dqtrailer *dqt = 130 struct ocfs2_disk_dqtrailer *dqt =
131 ocfs2_block_dqtrailer(sb->s_blocksize, bh->b_data); 131 ocfs2_block_dqtrailer(sb->s_blocksize, bh->b_data);
132 132
133 mlog(0, "Validating quota block %llu\n", 133 trace_ocfs2_validate_quota_block((unsigned long long)bh->b_blocknr);
134 (unsigned long long)bh->b_blocknr);
135 134
136 BUG_ON(!buffer_uptodate(bh)); 135 BUG_ON(!buffer_uptodate(bh));
137 136
@@ -341,8 +340,6 @@ int ocfs2_global_read_info(struct super_block *sb, int type)
341 u64 pcount; 340 u64 pcount;
342 int status; 341 int status;
343 342
344 mlog_entry_void();
345
346 /* Read global header */ 343 /* Read global header */
347 gqinode = ocfs2_get_system_file_inode(OCFS2_SB(sb), ino[type], 344 gqinode = ocfs2_get_system_file_inode(OCFS2_SB(sb), ino[type],
348 OCFS2_INVALID_SLOT); 345 OCFS2_INVALID_SLOT);
@@ -402,7 +399,8 @@ int ocfs2_global_read_info(struct super_block *sb, int type)
402 msecs_to_jiffies(oinfo->dqi_syncms)); 399 msecs_to_jiffies(oinfo->dqi_syncms));
403 400
404out_err: 401out_err:
405 mlog_exit(status); 402 if (status)
403 mlog_errno(status);
406 return status; 404 return status;
407out_unlock: 405out_unlock:
408 ocfs2_unlock_global_qf(oinfo, 0); 406 ocfs2_unlock_global_qf(oinfo, 0);
@@ -508,9 +506,10 @@ int __ocfs2_sync_dquot(struct dquot *dquot, int freeing)
508 olditime = dquot->dq_dqb.dqb_itime; 506 olditime = dquot->dq_dqb.dqb_itime;
509 oldbtime = dquot->dq_dqb.dqb_btime; 507 oldbtime = dquot->dq_dqb.dqb_btime;
510 ocfs2_global_disk2memdqb(dquot, &dqblk); 508 ocfs2_global_disk2memdqb(dquot, &dqblk);
511 mlog(0, "Syncing global dquot %u space %lld+%lld, inodes %lld+%lld\n", 509 trace_ocfs2_sync_dquot(dquot->dq_id, dquot->dq_dqb.dqb_curspace,
512 dquot->dq_id, dquot->dq_dqb.dqb_curspace, (long long)spacechange, 510 (long long)spacechange,
513 dquot->dq_dqb.dqb_curinodes, (long long)inodechange); 511 dquot->dq_dqb.dqb_curinodes,
512 (long long)inodechange);
514 if (!test_bit(DQ_LASTSET_B + QIF_SPACE_B, &dquot->dq_flags)) 513 if (!test_bit(DQ_LASTSET_B + QIF_SPACE_B, &dquot->dq_flags))
515 dquot->dq_dqb.dqb_curspace += spacechange; 514 dquot->dq_dqb.dqb_curspace += spacechange;
516 if (!test_bit(DQ_LASTSET_B + QIF_INODES_B, &dquot->dq_flags)) 515 if (!test_bit(DQ_LASTSET_B + QIF_INODES_B, &dquot->dq_flags))
@@ -594,8 +593,8 @@ static int ocfs2_sync_dquot_helper(struct dquot *dquot, unsigned long type)
594 struct ocfs2_super *osb = OCFS2_SB(sb); 593 struct ocfs2_super *osb = OCFS2_SB(sb);
595 int status = 0; 594 int status = 0;
596 595
597 mlog_entry("id=%u qtype=%u type=%lu device=%s\n", dquot->dq_id, 596 trace_ocfs2_sync_dquot_helper(dquot->dq_id, dquot->dq_type,
598 dquot->dq_type, type, sb->s_id); 597 type, sb->s_id);
599 if (type != dquot->dq_type) 598 if (type != dquot->dq_type)
600 goto out; 599 goto out;
601 status = ocfs2_lock_global_qf(oinfo, 1); 600 status = ocfs2_lock_global_qf(oinfo, 1);
@@ -621,7 +620,6 @@ static int ocfs2_sync_dquot_helper(struct dquot *dquot, unsigned long type)
621out_ilock: 620out_ilock:
622 ocfs2_unlock_global_qf(oinfo, 1); 621 ocfs2_unlock_global_qf(oinfo, 1);
623out: 622out:
624 mlog_exit(status);
625 return status; 623 return status;
626} 624}
627 625
@@ -647,7 +645,7 @@ static int ocfs2_write_dquot(struct dquot *dquot)
647 struct ocfs2_super *osb = OCFS2_SB(dquot->dq_sb); 645 struct ocfs2_super *osb = OCFS2_SB(dquot->dq_sb);
648 int status = 0; 646 int status = 0;
649 647
650 mlog_entry("id=%u, type=%d", dquot->dq_id, dquot->dq_type); 648 trace_ocfs2_write_dquot(dquot->dq_id, dquot->dq_type);
651 649
652 handle = ocfs2_start_trans(osb, OCFS2_QWRITE_CREDITS); 650 handle = ocfs2_start_trans(osb, OCFS2_QWRITE_CREDITS);
653 if (IS_ERR(handle)) { 651 if (IS_ERR(handle)) {
@@ -660,7 +658,6 @@ static int ocfs2_write_dquot(struct dquot *dquot)
660 mutex_unlock(&sb_dqopt(dquot->dq_sb)->dqio_mutex); 658 mutex_unlock(&sb_dqopt(dquot->dq_sb)->dqio_mutex);
661 ocfs2_commit_trans(osb, handle); 659 ocfs2_commit_trans(osb, handle);
662out: 660out:
663 mlog_exit(status);
664 return status; 661 return status;
665} 662}
666 663
@@ -686,7 +683,7 @@ static int ocfs2_release_dquot(struct dquot *dquot)
686 struct ocfs2_super *osb = OCFS2_SB(dquot->dq_sb); 683 struct ocfs2_super *osb = OCFS2_SB(dquot->dq_sb);
687 int status = 0; 684 int status = 0;
688 685
689 mlog_entry("id=%u, type=%d", dquot->dq_id, dquot->dq_type); 686 trace_ocfs2_release_dquot(dquot->dq_id, dquot->dq_type);
690 687
691 mutex_lock(&dquot->dq_lock); 688 mutex_lock(&dquot->dq_lock);
692 /* Check whether we are not racing with some other dqget() */ 689 /* Check whether we are not racing with some other dqget() */
@@ -722,7 +719,8 @@ out_ilock:
722 ocfs2_unlock_global_qf(oinfo, 1); 719 ocfs2_unlock_global_qf(oinfo, 1);
723out: 720out:
724 mutex_unlock(&dquot->dq_lock); 721 mutex_unlock(&dquot->dq_lock);
725 mlog_exit(status); 722 if (status)
723 mlog_errno(status);
726 return status; 724 return status;
727} 725}
728 726
@@ -743,7 +741,7 @@ static int ocfs2_acquire_dquot(struct dquot *dquot)
743 int need_alloc = ocfs2_global_qinit_alloc(sb, type); 741 int need_alloc = ocfs2_global_qinit_alloc(sb, type);
744 handle_t *handle; 742 handle_t *handle;
745 743
746 mlog_entry("id=%u, type=%d", dquot->dq_id, type); 744 trace_ocfs2_acquire_dquot(dquot->dq_id, type);
747 mutex_lock(&dquot->dq_lock); 745 mutex_lock(&dquot->dq_lock);
748 /* 746 /*
749 * We need an exclusive lock, because we're going to update use count 747 * We need an exclusive lock, because we're going to update use count
@@ -809,7 +807,8 @@ out_dq:
809 set_bit(DQ_ACTIVE_B, &dquot->dq_flags); 807 set_bit(DQ_ACTIVE_B, &dquot->dq_flags);
810out: 808out:
811 mutex_unlock(&dquot->dq_lock); 809 mutex_unlock(&dquot->dq_lock);
812 mlog_exit(status); 810 if (status)
811 mlog_errno(status);
813 return status; 812 return status;
814} 813}
815 814
@@ -829,7 +828,7 @@ static int ocfs2_mark_dquot_dirty(struct dquot *dquot)
829 handle_t *handle; 828 handle_t *handle;
830 struct ocfs2_super *osb = OCFS2_SB(sb); 829 struct ocfs2_super *osb = OCFS2_SB(sb);
831 830
832 mlog_entry("id=%u, type=%d", dquot->dq_id, type); 831 trace_ocfs2_mark_dquot_dirty(dquot->dq_id, type);
833 832
834 /* In case user set some limits, sync dquot immediately to global 833 /* In case user set some limits, sync dquot immediately to global
835 * quota file so that information propagates quicker */ 834 * quota file so that information propagates quicker */
@@ -866,7 +865,8 @@ out_dlock:
866out_ilock: 865out_ilock:
867 ocfs2_unlock_global_qf(oinfo, 1); 866 ocfs2_unlock_global_qf(oinfo, 1);
868out: 867out:
869 mlog_exit(status); 868 if (status)
869 mlog_errno(status);
870 return status; 870 return status;
871} 871}
872 872
@@ -877,8 +877,6 @@ static int ocfs2_write_info(struct super_block *sb, int type)
877 int status = 0; 877 int status = 0;
878 struct ocfs2_mem_dqinfo *oinfo = sb_dqinfo(sb, type)->dqi_priv; 878 struct ocfs2_mem_dqinfo *oinfo = sb_dqinfo(sb, type)->dqi_priv;
879 879
880 mlog_entry_void();
881
882 status = ocfs2_lock_global_qf(oinfo, 1); 880 status = ocfs2_lock_global_qf(oinfo, 1);
883 if (status < 0) 881 if (status < 0)
884 goto out; 882 goto out;
@@ -893,7 +891,8 @@ static int ocfs2_write_info(struct super_block *sb, int type)
893out_ilock: 891out_ilock:
894 ocfs2_unlock_global_qf(oinfo, 1); 892 ocfs2_unlock_global_qf(oinfo, 1);
895out: 893out:
896 mlog_exit(status); 894 if (status)
895 mlog_errno(status);
897 return status; 896 return status;
898} 897}
899 898
diff --git a/fs/ocfs2/quota_local.c b/fs/ocfs2/quota_local.c
index dc78764ccc4c..dc8007fc9247 100644
--- a/fs/ocfs2/quota_local.c
+++ b/fs/ocfs2/quota_local.c
@@ -8,7 +8,6 @@
8#include <linux/quotaops.h> 8#include <linux/quotaops.h>
9#include <linux/module.h> 9#include <linux/module.h>
10 10
11#define MLOG_MASK_PREFIX ML_QUOTA
12#include <cluster/masklog.h> 11#include <cluster/masklog.h>
13 12
14#include "ocfs2_fs.h" 13#include "ocfs2_fs.h"
@@ -23,6 +22,7 @@
23#include "quota.h" 22#include "quota.h"
24#include "uptodate.h" 23#include "uptodate.h"
25#include "super.h" 24#include "super.h"
25#include "ocfs2_trace.h"
26 26
27/* Number of local quota structures per block */ 27/* Number of local quota structures per block */
28static inline unsigned int ol_quota_entries_per_block(struct super_block *sb) 28static inline unsigned int ol_quota_entries_per_block(struct super_block *sb)
@@ -475,7 +475,7 @@ static int ocfs2_recover_local_quota_file(struct inode *lqinode,
475 struct ocfs2_recovery_chunk *rchunk, *next; 475 struct ocfs2_recovery_chunk *rchunk, *next;
476 qsize_t spacechange, inodechange; 476 qsize_t spacechange, inodechange;
477 477
478 mlog_entry("ino=%lu type=%u", (unsigned long)lqinode->i_ino, type); 478 trace_ocfs2_recover_local_quota_file((unsigned long)lqinode->i_ino, type);
479 479
480 list_for_each_entry_safe(rchunk, next, &(rec->r_list[type]), rc_list) { 480 list_for_each_entry_safe(rchunk, next, &(rec->r_list[type]), rc_list) {
481 chunk = rchunk->rc_chunk; 481 chunk = rchunk->rc_chunk;
@@ -575,7 +575,8 @@ out_put_bh:
575 } 575 }
576 if (status < 0) 576 if (status < 0)
577 free_recovery_list(&(rec->r_list[type])); 577 free_recovery_list(&(rec->r_list[type]));
578 mlog_exit(status); 578 if (status)
579 mlog_errno(status);
579 return status; 580 return status;
580} 581}
581 582
@@ -600,7 +601,7 @@ int ocfs2_finish_quota_recovery(struct ocfs2_super *osb,
600 for (type = 0; type < MAXQUOTAS; type++) { 601 for (type = 0; type < MAXQUOTAS; type++) {
601 if (list_empty(&(rec->r_list[type]))) 602 if (list_empty(&(rec->r_list[type])))
602 continue; 603 continue;
603 mlog(0, "Recovering quota in slot %d\n", slot_num); 604 trace_ocfs2_finish_quota_recovery(slot_num);
604 lqinode = ocfs2_get_system_file_inode(osb, ino[type], slot_num); 605 lqinode = ocfs2_get_system_file_inode(osb, ino[type], slot_num);
605 if (!lqinode) { 606 if (!lqinode) {
606 status = -ENOENT; 607 status = -ENOENT;
@@ -882,9 +883,10 @@ static void olq_set_dquot(struct buffer_head *bh, void *private)
882 dqblk->dqb_inodemod = cpu_to_le64(od->dq_dquot.dq_dqb.dqb_curinodes - 883 dqblk->dqb_inodemod = cpu_to_le64(od->dq_dquot.dq_dqb.dqb_curinodes -
883 od->dq_originodes); 884 od->dq_originodes);
884 spin_unlock(&dq_data_lock); 885 spin_unlock(&dq_data_lock);
885 mlog(0, "Writing local dquot %u space %lld inodes %lld\n", 886 trace_olq_set_dquot(
886 od->dq_dquot.dq_id, (long long)le64_to_cpu(dqblk->dqb_spacemod), 887 (unsigned long long)le64_to_cpu(dqblk->dqb_spacemod),
887 (long long)le64_to_cpu(dqblk->dqb_inodemod)); 888 (unsigned long long)le64_to_cpu(dqblk->dqb_inodemod),
889 od->dq_dquot.dq_id);
888} 890}
889 891
890/* Write dquot to local quota file */ 892/* Write dquot to local quota file */
diff --git a/fs/ocfs2/refcounttree.c b/fs/ocfs2/refcounttree.c
index c384d634872a..5d32749c896d 100644
--- a/fs/ocfs2/refcounttree.c
+++ b/fs/ocfs2/refcounttree.c
@@ -16,7 +16,6 @@
16 */ 16 */
17 17
18#include <linux/sort.h> 18#include <linux/sort.h>
19#define MLOG_MASK_PREFIX ML_REFCOUNT
20#include <cluster/masklog.h> 19#include <cluster/masklog.h>
21#include "ocfs2.h" 20#include "ocfs2.h"
22#include "inode.h" 21#include "inode.h"
@@ -34,6 +33,7 @@
34#include "aops.h" 33#include "aops.h"
35#include "xattr.h" 34#include "xattr.h"
36#include "namei.h" 35#include "namei.h"
36#include "ocfs2_trace.h"
37 37
38#include <linux/bio.h> 38#include <linux/bio.h>
39#include <linux/blkdev.h> 39#include <linux/blkdev.h>
@@ -84,8 +84,7 @@ static int ocfs2_validate_refcount_block(struct super_block *sb,
84 struct ocfs2_refcount_block *rb = 84 struct ocfs2_refcount_block *rb =
85 (struct ocfs2_refcount_block *)bh->b_data; 85 (struct ocfs2_refcount_block *)bh->b_data;
86 86
87 mlog(0, "Validating refcount block %llu\n", 87 trace_ocfs2_validate_refcount_block((unsigned long long)bh->b_blocknr);
88 (unsigned long long)bh->b_blocknr);
89 88
90 BUG_ON(!buffer_uptodate(bh)); 89 BUG_ON(!buffer_uptodate(bh));
91 90
@@ -545,8 +544,8 @@ void ocfs2_purge_refcount_trees(struct ocfs2_super *osb)
545 while ((node = rb_last(root)) != NULL) { 544 while ((node = rb_last(root)) != NULL) {
546 tree = rb_entry(node, struct ocfs2_refcount_tree, rf_node); 545 tree = rb_entry(node, struct ocfs2_refcount_tree, rf_node);
547 546
548 mlog(0, "Purge tree %llu\n", 547 trace_ocfs2_purge_refcount_trees(
549 (unsigned long long) tree->rf_blkno); 548 (unsigned long long) tree->rf_blkno);
550 549
551 rb_erase(&tree->rf_node, root); 550 rb_erase(&tree->rf_node, root);
552 ocfs2_free_refcount_tree(tree); 551 ocfs2_free_refcount_tree(tree);
@@ -575,7 +574,8 @@ static int ocfs2_create_refcount_tree(struct inode *inode,
575 574
576 BUG_ON(oi->ip_dyn_features & OCFS2_HAS_REFCOUNT_FL); 575 BUG_ON(oi->ip_dyn_features & OCFS2_HAS_REFCOUNT_FL);
577 576
578 mlog(0, "create tree for inode %lu\n", inode->i_ino); 577 trace_ocfs2_create_refcount_tree(
578 (unsigned long long)OCFS2_I(inode)->ip_blkno);
579 579
580 ret = ocfs2_reserve_new_metadata_blocks(osb, 1, &meta_ac); 580 ret = ocfs2_reserve_new_metadata_blocks(osb, 1, &meta_ac);
581 if (ret) { 581 if (ret) {
@@ -646,8 +646,7 @@ static int ocfs2_create_refcount_tree(struct inode *inode,
646 di->i_refcount_loc = cpu_to_le64(first_blkno); 646 di->i_refcount_loc = cpu_to_le64(first_blkno);
647 spin_unlock(&oi->ip_lock); 647 spin_unlock(&oi->ip_lock);
648 648
649 mlog(0, "created tree for inode %lu, refblock %llu\n", 649 trace_ocfs2_create_refcount_tree_blkno((unsigned long long)first_blkno);
650 inode->i_ino, (unsigned long long)first_blkno);
651 650
652 ocfs2_journal_dirty(handle, di_bh); 651 ocfs2_journal_dirty(handle, di_bh);
653 652
@@ -1256,8 +1255,9 @@ static int ocfs2_change_refcount_rec(handle_t *handle,
1256 goto out; 1255 goto out;
1257 } 1256 }
1258 1257
1259 mlog(0, "change index %d, old count %u, change %d\n", index, 1258 trace_ocfs2_change_refcount_rec(
1260 le32_to_cpu(rec->r_refcount), change); 1259 (unsigned long long)ocfs2_metadata_cache_owner(ci),
1260 index, le32_to_cpu(rec->r_refcount), change);
1261 le32_add_cpu(&rec->r_refcount, change); 1261 le32_add_cpu(&rec->r_refcount, change);
1262 1262
1263 if (!rec->r_refcount) { 1263 if (!rec->r_refcount) {
@@ -1353,8 +1353,8 @@ static int ocfs2_expand_inline_ref_root(handle_t *handle,
1353 1353
1354 ocfs2_journal_dirty(handle, ref_root_bh); 1354 ocfs2_journal_dirty(handle, ref_root_bh);
1355 1355
1356 mlog(0, "new leaf block %llu, used %u\n", (unsigned long long)blkno, 1356 trace_ocfs2_expand_inline_ref_root((unsigned long long)blkno,
1357 le16_to_cpu(new_rb->rf_records.rl_used)); 1357 le16_to_cpu(new_rb->rf_records.rl_used));
1358 1358
1359 *ref_leaf_bh = new_bh; 1359 *ref_leaf_bh = new_bh;
1360 new_bh = NULL; 1360 new_bh = NULL;
@@ -1466,9 +1466,9 @@ static int ocfs2_divide_leaf_refcount_block(struct buffer_head *ref_leaf_bh,
1466 (struct ocfs2_refcount_block *)new_bh->b_data; 1466 (struct ocfs2_refcount_block *)new_bh->b_data;
1467 struct ocfs2_refcount_list *new_rl = &new_rb->rf_records; 1467 struct ocfs2_refcount_list *new_rl = &new_rb->rf_records;
1468 1468
1469 mlog(0, "split old leaf refcount block %llu, count = %u, used = %u\n", 1469 trace_ocfs2_divide_leaf_refcount_block(
1470 (unsigned long long)ref_leaf_bh->b_blocknr, 1470 (unsigned long long)ref_leaf_bh->b_blocknr,
1471 le32_to_cpu(rl->rl_count), le32_to_cpu(rl->rl_used)); 1471 le32_to_cpu(rl->rl_count), le32_to_cpu(rl->rl_used));
1472 1472
1473 /* 1473 /*
1474 * XXX: Improvement later. 1474 * XXX: Improvement later.
@@ -1601,8 +1601,8 @@ static int ocfs2_new_leaf_refcount_block(handle_t *handle,
1601 1601
1602 ocfs2_init_refcount_extent_tree(&ref_et, ci, ref_root_bh); 1602 ocfs2_init_refcount_extent_tree(&ref_et, ci, ref_root_bh);
1603 1603
1604 mlog(0, "insert new leaf block %llu at %u\n", 1604 trace_ocfs2_new_leaf_refcount_block(
1605 (unsigned long long)new_bh->b_blocknr, new_cpos); 1605 (unsigned long long)new_bh->b_blocknr, new_cpos);
1606 1606
1607 /* Insert the new leaf block with the specific offset cpos. */ 1607 /* Insert the new leaf block with the specific offset cpos. */
1608 ret = ocfs2_insert_extent(handle, &ref_et, new_cpos, new_bh->b_blocknr, 1608 ret = ocfs2_insert_extent(handle, &ref_et, new_cpos, new_bh->b_blocknr,
@@ -1794,11 +1794,10 @@ static int ocfs2_insert_refcount_rec(handle_t *handle,
1794 (le16_to_cpu(rf_list->rl_used) - index) * 1794 (le16_to_cpu(rf_list->rl_used) - index) *
1795 sizeof(struct ocfs2_refcount_rec)); 1795 sizeof(struct ocfs2_refcount_rec));
1796 1796
1797 mlog(0, "insert refcount record start %llu, len %u, count %u " 1797 trace_ocfs2_insert_refcount_rec(
1798 "to leaf block %llu at index %d\n", 1798 (unsigned long long)ref_leaf_bh->b_blocknr, index,
1799 (unsigned long long)le64_to_cpu(rec->r_cpos), 1799 (unsigned long long)le64_to_cpu(rec->r_cpos),
1800 le32_to_cpu(rec->r_clusters), le32_to_cpu(rec->r_refcount), 1800 le32_to_cpu(rec->r_clusters), le32_to_cpu(rec->r_refcount));
1801 (unsigned long long)ref_leaf_bh->b_blocknr, index);
1802 1801
1803 rf_list->rl_recs[index] = *rec; 1802 rf_list->rl_recs[index] = *rec;
1804 1803
@@ -1850,10 +1849,12 @@ static int ocfs2_split_refcount_rec(handle_t *handle,
1850 1849
1851 BUG_ON(le32_to_cpu(rb->rf_flags) & OCFS2_REFCOUNT_TREE_FL); 1850 BUG_ON(le32_to_cpu(rb->rf_flags) & OCFS2_REFCOUNT_TREE_FL);
1852 1851
1853 mlog(0, "original r_pos %llu, cluster %u, split %llu, cluster %u\n", 1852 trace_ocfs2_split_refcount_rec(le64_to_cpu(orig_rec->r_cpos),
1854 le64_to_cpu(orig_rec->r_cpos), le32_to_cpu(orig_rec->r_clusters), 1853 le32_to_cpu(orig_rec->r_clusters),
1855 le64_to_cpu(split_rec->r_cpos), 1854 le32_to_cpu(orig_rec->r_refcount),
1856 le32_to_cpu(split_rec->r_clusters)); 1855 le64_to_cpu(split_rec->r_cpos),
1856 le32_to_cpu(split_rec->r_clusters),
1857 le32_to_cpu(split_rec->r_refcount));
1857 1858
1858 /* 1859 /*
1859 * If we just need to split the header or tail clusters, 1860 * If we just need to split the header or tail clusters,
@@ -1967,12 +1968,11 @@ static int ocfs2_split_refcount_rec(handle_t *handle,
1967 1968
1968 if (split_rec->r_refcount) { 1969 if (split_rec->r_refcount) {
1969 rf_list->rl_recs[index] = *split_rec; 1970 rf_list->rl_recs[index] = *split_rec;
1970 mlog(0, "insert refcount record start %llu, len %u, count %u " 1971 trace_ocfs2_split_refcount_rec_insert(
1971 "to leaf block %llu at index %d\n", 1972 (unsigned long long)ref_leaf_bh->b_blocknr, index,
1972 (unsigned long long)le64_to_cpu(split_rec->r_cpos), 1973 (unsigned long long)le64_to_cpu(split_rec->r_cpos),
1973 le32_to_cpu(split_rec->r_clusters), 1974 le32_to_cpu(split_rec->r_clusters),
1974 le32_to_cpu(split_rec->r_refcount), 1975 le32_to_cpu(split_rec->r_refcount));
1975 (unsigned long long)ref_leaf_bh->b_blocknr, index);
1976 1976
1977 if (merge) 1977 if (merge)
1978 ocfs2_refcount_rec_merge(rb, index); 1978 ocfs2_refcount_rec_merge(rb, index);
@@ -1997,7 +1997,7 @@ static int __ocfs2_increase_refcount(handle_t *handle,
1997 struct ocfs2_refcount_rec rec; 1997 struct ocfs2_refcount_rec rec;
1998 unsigned int set_len = 0; 1998 unsigned int set_len = 0;
1999 1999
2000 mlog(0, "Tree owner %llu, add refcount start %llu, len %u\n", 2000 trace_ocfs2_increase_refcount_begin(
2001 (unsigned long long)ocfs2_metadata_cache_owner(ci), 2001 (unsigned long long)ocfs2_metadata_cache_owner(ci),
2002 (unsigned long long)cpos, len); 2002 (unsigned long long)cpos, len);
2003 2003
@@ -2024,9 +2024,9 @@ static int __ocfs2_increase_refcount(handle_t *handle,
2024 */ 2024 */
2025 if (rec.r_refcount && le64_to_cpu(rec.r_cpos) == cpos && 2025 if (rec.r_refcount && le64_to_cpu(rec.r_cpos) == cpos &&
2026 set_len <= len) { 2026 set_len <= len) {
2027 mlog(0, "increase refcount rec, start %llu, len %u, " 2027 trace_ocfs2_increase_refcount_change(
2028 "count %u\n", (unsigned long long)cpos, set_len, 2028 (unsigned long long)cpos, set_len,
2029 le32_to_cpu(rec.r_refcount)); 2029 le32_to_cpu(rec.r_refcount));
2030 ret = ocfs2_change_refcount_rec(handle, ci, 2030 ret = ocfs2_change_refcount_rec(handle, ci,
2031 ref_leaf_bh, index, 2031 ref_leaf_bh, index,
2032 merge, 1); 2032 merge, 1);
@@ -2037,7 +2037,7 @@ static int __ocfs2_increase_refcount(handle_t *handle,
2037 } else if (!rec.r_refcount) { 2037 } else if (!rec.r_refcount) {
2038 rec.r_refcount = cpu_to_le32(1); 2038 rec.r_refcount = cpu_to_le32(1);
2039 2039
2040 mlog(0, "insert refcount rec, start %llu, len %u\n", 2040 trace_ocfs2_increase_refcount_insert(
2041 (unsigned long long)le64_to_cpu(rec.r_cpos), 2041 (unsigned long long)le64_to_cpu(rec.r_cpos),
2042 set_len); 2042 set_len);
2043 ret = ocfs2_insert_refcount_rec(handle, ci, ref_root_bh, 2043 ret = ocfs2_insert_refcount_rec(handle, ci, ref_root_bh,
@@ -2055,8 +2055,7 @@ static int __ocfs2_increase_refcount(handle_t *handle,
2055 rec.r_clusters = cpu_to_le32(set_len); 2055 rec.r_clusters = cpu_to_le32(set_len);
2056 le32_add_cpu(&rec.r_refcount, 1); 2056 le32_add_cpu(&rec.r_refcount, 1);
2057 2057
2058 mlog(0, "split refcount rec, start %llu, " 2058 trace_ocfs2_increase_refcount_split(
2059 "len %u, count %u\n",
2060 (unsigned long long)le64_to_cpu(rec.r_cpos), 2059 (unsigned long long)le64_to_cpu(rec.r_cpos),
2061 set_len, le32_to_cpu(rec.r_refcount)); 2060 set_len, le32_to_cpu(rec.r_refcount));
2062 ret = ocfs2_split_refcount_rec(handle, ci, 2061 ret = ocfs2_split_refcount_rec(handle, ci,
@@ -2095,6 +2094,11 @@ static int ocfs2_remove_refcount_extent(handle_t *handle,
2095 2094
2096 BUG_ON(rb->rf_records.rl_used); 2095 BUG_ON(rb->rf_records.rl_used);
2097 2096
2097 trace_ocfs2_remove_refcount_extent(
2098 (unsigned long long)ocfs2_metadata_cache_owner(ci),
2099 (unsigned long long)ref_leaf_bh->b_blocknr,
2100 le32_to_cpu(rb->rf_cpos));
2101
2098 ocfs2_init_refcount_extent_tree(&et, ci, ref_root_bh); 2102 ocfs2_init_refcount_extent_tree(&et, ci, ref_root_bh);
2099 ret = ocfs2_remove_extent(handle, &et, le32_to_cpu(rb->rf_cpos), 2103 ret = ocfs2_remove_extent(handle, &et, le32_to_cpu(rb->rf_cpos),
2100 1, meta_ac, dealloc); 2104 1, meta_ac, dealloc);
@@ -2137,7 +2141,7 @@ static int ocfs2_remove_refcount_extent(handle_t *handle,
2137 if (!rb->rf_list.l_next_free_rec) { 2141 if (!rb->rf_list.l_next_free_rec) {
2138 BUG_ON(rb->rf_clusters); 2142 BUG_ON(rb->rf_clusters);
2139 2143
2140 mlog(0, "reset refcount tree root %llu to be a record block.\n", 2144 trace_ocfs2_restore_refcount_block(
2141 (unsigned long long)ref_root_bh->b_blocknr); 2145 (unsigned long long)ref_root_bh->b_blocknr);
2142 2146
2143 rb->rf_flags = 0; 2147 rb->rf_flags = 0;
@@ -2184,6 +2188,10 @@ static int ocfs2_decrease_refcount_rec(handle_t *handle,
2184 BUG_ON(cpos + len > 2188 BUG_ON(cpos + len >
2185 le64_to_cpu(rec->r_cpos) + le32_to_cpu(rec->r_clusters)); 2189 le64_to_cpu(rec->r_cpos) + le32_to_cpu(rec->r_clusters));
2186 2190
2191 trace_ocfs2_decrease_refcount_rec(
2192 (unsigned long long)ocfs2_metadata_cache_owner(ci),
2193 (unsigned long long)cpos, len);
2194
2187 if (cpos == le64_to_cpu(rec->r_cpos) && 2195 if (cpos == le64_to_cpu(rec->r_cpos) &&
2188 len == le32_to_cpu(rec->r_clusters)) 2196 len == le32_to_cpu(rec->r_clusters))
2189 ret = ocfs2_change_refcount_rec(handle, ci, 2197 ret = ocfs2_change_refcount_rec(handle, ci,
@@ -2195,12 +2203,6 @@ static int ocfs2_decrease_refcount_rec(handle_t *handle,
2195 2203
2196 le32_add_cpu(&split.r_refcount, -1); 2204 le32_add_cpu(&split.r_refcount, -1);
2197 2205
2198 mlog(0, "split refcount rec, start %llu, "
2199 "len %u, count %u, original start %llu, len %u\n",
2200 (unsigned long long)le64_to_cpu(split.r_cpos),
2201 len, le32_to_cpu(split.r_refcount),
2202 (unsigned long long)le64_to_cpu(rec->r_cpos),
2203 le32_to_cpu(rec->r_clusters));
2204 ret = ocfs2_split_refcount_rec(handle, ci, 2206 ret = ocfs2_split_refcount_rec(handle, ci,
2205 ref_root_bh, ref_leaf_bh, 2207 ref_root_bh, ref_leaf_bh,
2206 &split, index, 1, 2208 &split, index, 1,
@@ -2239,10 +2241,9 @@ static int __ocfs2_decrease_refcount(handle_t *handle,
2239 struct super_block *sb = ocfs2_metadata_cache_get_super(ci); 2241 struct super_block *sb = ocfs2_metadata_cache_get_super(ci);
2240 struct buffer_head *ref_leaf_bh = NULL; 2242 struct buffer_head *ref_leaf_bh = NULL;
2241 2243
2242 mlog(0, "Tree owner %llu, decrease refcount start %llu, " 2244 trace_ocfs2_decrease_refcount(
2243 "len %u, delete %u\n", 2245 (unsigned long long)ocfs2_metadata_cache_owner(ci),
2244 (unsigned long long)ocfs2_metadata_cache_owner(ci), 2246 (unsigned long long)cpos, len, delete);
2245 (unsigned long long)cpos, len, delete);
2246 2247
2247 while (len) { 2248 while (len) {
2248 ret = ocfs2_get_refcount_rec(ci, ref_root_bh, 2249 ret = ocfs2_get_refcount_rec(ci, ref_root_bh,
@@ -2352,8 +2353,8 @@ static int ocfs2_mark_extent_refcounted(struct inode *inode,
2352{ 2353{
2353 int ret; 2354 int ret;
2354 2355
2355 mlog(0, "Inode %lu refcount tree cpos %u, len %u, phys cluster %u\n", 2356 trace_ocfs2_mark_extent_refcounted(OCFS2_I(inode)->ip_blkno,
2356 inode->i_ino, cpos, len, phys); 2357 cpos, len, phys);
2357 2358
2358 if (!ocfs2_refcount_tree(OCFS2_SB(inode->i_sb))) { 2359 if (!ocfs2_refcount_tree(OCFS2_SB(inode->i_sb))) {
2359 ocfs2_error(inode->i_sb, "Inode %lu want to use refcount " 2360 ocfs2_error(inode->i_sb, "Inode %lu want to use refcount "
@@ -2392,8 +2393,6 @@ static int ocfs2_calc_refcount_meta_credits(struct super_block *sb,
2392 struct buffer_head *ref_leaf_bh = NULL, *prev_bh = NULL; 2393 struct buffer_head *ref_leaf_bh = NULL, *prev_bh = NULL;
2393 u32 len; 2394 u32 len;
2394 2395
2395 mlog(0, "start_cpos %llu, clusters %u\n",
2396 (unsigned long long)start_cpos, clusters);
2397 while (clusters) { 2396 while (clusters) {
2398 ret = ocfs2_get_refcount_rec(ci, ref_root_bh, 2397 ret = ocfs2_get_refcount_rec(ci, ref_root_bh,
2399 cpos, clusters, &rec, 2398 cpos, clusters, &rec,
@@ -2427,12 +2426,11 @@ static int ocfs2_calc_refcount_meta_credits(struct super_block *sb,
2427 2426
2428 rb = (struct ocfs2_refcount_block *)ref_leaf_bh->b_data; 2427 rb = (struct ocfs2_refcount_block *)ref_leaf_bh->b_data;
2429 2428
2430 mlog(0, "recs_add %d,cpos %llu, clusters %u, rec->r_cpos %llu," 2429 trace_ocfs2_calc_refcount_meta_credits_iterate(
2431 "rec->r_clusters %u, rec->r_refcount %u, index %d\n", 2430 recs_add, (unsigned long long)cpos, clusters,
2432 recs_add, (unsigned long long)cpos, clusters, 2431 (unsigned long long)le64_to_cpu(rec.r_cpos),
2433 (unsigned long long)le64_to_cpu(rec.r_cpos), 2432 le32_to_cpu(rec.r_clusters),
2434 le32_to_cpu(rec.r_clusters), 2433 le32_to_cpu(rec.r_refcount), index);
2435 le32_to_cpu(rec.r_refcount), index);
2436 2434
2437 len = min((u64)cpos + clusters, le64_to_cpu(rec.r_cpos) + 2435 len = min((u64)cpos + clusters, le64_to_cpu(rec.r_cpos) +
2438 le32_to_cpu(rec.r_clusters)) - cpos; 2436 le32_to_cpu(rec.r_clusters)) - cpos;
@@ -2488,7 +2486,6 @@ static int ocfs2_calc_refcount_meta_credits(struct super_block *sb,
2488 if (!ref_blocks) 2486 if (!ref_blocks)
2489 goto out; 2487 goto out;
2490 2488
2491 mlog(0, "we need ref_blocks %d\n", ref_blocks);
2492 *meta_add += ref_blocks; 2489 *meta_add += ref_blocks;
2493 *credits += ref_blocks; 2490 *credits += ref_blocks;
2494 2491
@@ -2514,6 +2511,10 @@ static int ocfs2_calc_refcount_meta_credits(struct super_block *sb,
2514 } 2511 }
2515 2512
2516out: 2513out:
2514
2515 trace_ocfs2_calc_refcount_meta_credits(
2516 (unsigned long long)start_cpos, clusters,
2517 *meta_add, *credits);
2517 brelse(ref_leaf_bh); 2518 brelse(ref_leaf_bh);
2518 brelse(prev_bh); 2519 brelse(prev_bh);
2519 return ret; 2520 return ret;
@@ -2578,8 +2579,7 @@ int ocfs2_prepare_refcount_change_for_del(struct inode *inode,
2578 goto out; 2579 goto out;
2579 } 2580 }
2580 2581
2581 mlog(0, "reserve new metadata %d blocks, credits = %d\n", 2582 trace_ocfs2_prepare_refcount_change_for_del(*ref_blocks, *credits);
2582 *ref_blocks, *credits);
2583 2583
2584out: 2584out:
2585 brelse(ref_root_bh); 2585 brelse(ref_root_bh);
@@ -2886,8 +2886,7 @@ static int ocfs2_lock_refcount_allocators(struct super_block *sb,
2886 goto out; 2886 goto out;
2887 } 2887 }
2888 2888
2889 mlog(0, "reserve new metadata %d, clusters %u, credits = %d\n", 2889 trace_ocfs2_lock_refcount_allocators(meta_add, *credits);
2890 meta_add, num_clusters, *credits);
2891 ret = ocfs2_reserve_new_metadata_blocks(OCFS2_SB(sb), meta_add, 2890 ret = ocfs2_reserve_new_metadata_blocks(OCFS2_SB(sb), meta_add,
2892 meta_ac); 2891 meta_ac);
2893 if (ret) { 2892 if (ret) {
@@ -2937,8 +2936,8 @@ static int ocfs2_duplicate_clusters_by_page(handle_t *handle,
2937 loff_t offset, end, map_end; 2936 loff_t offset, end, map_end;
2938 struct address_space *mapping = context->inode->i_mapping; 2937 struct address_space *mapping = context->inode->i_mapping;
2939 2938
2940 mlog(0, "old_cluster %u, new %u, len %u at offset %u\n", old_cluster, 2939 trace_ocfs2_duplicate_clusters_by_page(cpos, old_cluster,
2941 new_cluster, new_len, cpos); 2940 new_cluster, new_len);
2942 2941
2943 readahead_pages = 2942 readahead_pages =
2944 (ocfs2_cow_contig_clusters(sb) << 2943 (ocfs2_cow_contig_clusters(sb) <<
@@ -3031,8 +3030,8 @@ static int ocfs2_duplicate_clusters_by_jbd(handle_t *handle,
3031 struct buffer_head *old_bh = NULL; 3030 struct buffer_head *old_bh = NULL;
3032 struct buffer_head *new_bh = NULL; 3031 struct buffer_head *new_bh = NULL;
3033 3032
3034 mlog(0, "old_cluster %u, new %u, len %u\n", old_cluster, 3033 trace_ocfs2_duplicate_clusters_by_page(cpos, old_cluster,
3035 new_cluster, new_len); 3034 new_cluster, new_len);
3036 3035
3037 for (i = 0; i < blocks; i++, old_block++, new_block++) { 3036 for (i = 0; i < blocks; i++, old_block++, new_block++) {
3038 new_bh = sb_getblk(osb->sb, new_block); 3037 new_bh = sb_getblk(osb->sb, new_block);
@@ -3085,8 +3084,8 @@ static int ocfs2_clear_ext_refcount(handle_t *handle,
3085 struct super_block *sb = ocfs2_metadata_cache_get_super(et->et_ci); 3084 struct super_block *sb = ocfs2_metadata_cache_get_super(et->et_ci);
3086 u64 ino = ocfs2_metadata_cache_owner(et->et_ci); 3085 u64 ino = ocfs2_metadata_cache_owner(et->et_ci);
3087 3086
3088 mlog(0, "inode %llu cpos %u, len %u, p_cluster %u, ext_flags %u\n", 3087 trace_ocfs2_clear_ext_refcount((unsigned long long)ino,
3089 (unsigned long long)ino, cpos, len, p_cluster, ext_flags); 3088 cpos, len, p_cluster, ext_flags);
3090 3089
3091 memset(&replace_rec, 0, sizeof(replace_rec)); 3090 memset(&replace_rec, 0, sizeof(replace_rec));
3092 replace_rec.e_cpos = cpu_to_le32(cpos); 3091 replace_rec.e_cpos = cpu_to_le32(cpos);
@@ -3141,8 +3140,8 @@ static int ocfs2_replace_clusters(handle_t *handle,
3141 struct ocfs2_caching_info *ci = context->data_et.et_ci; 3140 struct ocfs2_caching_info *ci = context->data_et.et_ci;
3142 u64 ino = ocfs2_metadata_cache_owner(ci); 3141 u64 ino = ocfs2_metadata_cache_owner(ci);
3143 3142
3144 mlog(0, "inode %llu, cpos %u, old %u, new %u, len %u, ext_flags %u\n", 3143 trace_ocfs2_replace_clusters((unsigned long long)ino,
3145 (unsigned long long)ino, cpos, old, new, len, ext_flags); 3144 cpos, old, new, len, ext_flags);
3146 3145
3147 /*If the old clusters is unwritten, no need to duplicate. */ 3146 /*If the old clusters is unwritten, no need to duplicate. */
3148 if (!(ext_flags & OCFS2_EXT_UNWRITTEN)) { 3147 if (!(ext_flags & OCFS2_EXT_UNWRITTEN)) {
@@ -3236,8 +3235,8 @@ static int ocfs2_make_clusters_writable(struct super_block *sb,
3236 struct ocfs2_caching_info *ref_ci = &context->ref_tree->rf_ci; 3235 struct ocfs2_caching_info *ref_ci = &context->ref_tree->rf_ci;
3237 struct ocfs2_refcount_rec rec; 3236 struct ocfs2_refcount_rec rec;
3238 3237
3239 mlog(0, "cpos %u, p_cluster %u, num_clusters %u, e_flags %u\n", 3238 trace_ocfs2_make_clusters_writable(cpos, p_cluster,
3240 cpos, p_cluster, num_clusters, e_flags); 3239 num_clusters, e_flags);
3241 3240
3242 ret = ocfs2_lock_refcount_allocators(sb, p_cluster, num_clusters, 3241 ret = ocfs2_lock_refcount_allocators(sb, p_cluster, num_clusters,
3243 &context->data_et, 3242 &context->data_et,
@@ -3475,9 +3474,9 @@ static int ocfs2_refcount_cow_hunk(struct inode *inode,
3475 goto out; 3474 goto out;
3476 } 3475 }
3477 3476
3478 mlog(0, "CoW inode %lu, cpos %u, write_len %u, cow_start %u, " 3477 trace_ocfs2_refcount_cow_hunk(OCFS2_I(inode)->ip_blkno,
3479 "cow_len %u\n", inode->i_ino, 3478 cpos, write_len, max_cpos,
3480 cpos, write_len, cow_start, cow_len); 3479 cow_start, cow_len);
3481 3480
3482 BUG_ON(cow_len == 0); 3481 BUG_ON(cow_len == 0);
3483 3482
@@ -3756,8 +3755,7 @@ int ocfs2_add_refcount_flag(struct inode *inode,
3756 goto out; 3755 goto out;
3757 } 3756 }
3758 3757
3759 mlog(0, "reserve new metadata %d, credits = %d\n", 3758 trace_ocfs2_add_refcount_flag(ref_blocks, credits);
3760 ref_blocks, credits);
3761 3759
3762 if (ref_blocks) { 3760 if (ref_blocks) {
3763 ret = ocfs2_reserve_new_metadata_blocks(OCFS2_SB(inode->i_sb), 3761 ret = ocfs2_reserve_new_metadata_blocks(OCFS2_SB(inode->i_sb),
diff --git a/fs/ocfs2/reservations.c b/fs/ocfs2/reservations.c
index 3e78db361bc7..41ffd36c689c 100644
--- a/fs/ocfs2/reservations.c
+++ b/fs/ocfs2/reservations.c
@@ -30,10 +30,10 @@
30#include <linux/bitops.h> 30#include <linux/bitops.h>
31#include <linux/list.h> 31#include <linux/list.h>
32 32
33#define MLOG_MASK_PREFIX ML_RESERVATIONS
34#include <cluster/masklog.h> 33#include <cluster/masklog.h>
35 34
36#include "ocfs2.h" 35#include "ocfs2.h"
36#include "ocfs2_trace.h"
37 37
38#ifdef CONFIG_OCFS2_DEBUG_FS 38#ifdef CONFIG_OCFS2_DEBUG_FS
39#define OCFS2_CHECK_RESERVATIONS 39#define OCFS2_CHECK_RESERVATIONS
@@ -321,8 +321,7 @@ static void ocfs2_resv_insert(struct ocfs2_reservation_map *resmap,
321 321
322 assert_spin_locked(&resv_lock); 322 assert_spin_locked(&resv_lock);
323 323
324 mlog(0, "Insert reservation start: %u len: %u\n", new->r_start, 324 trace_ocfs2_resv_insert(new->r_start, new->r_len);
325 new->r_len);
326 325
327 while (*p) { 326 while (*p) {
328 parent = *p; 327 parent = *p;
@@ -423,8 +422,8 @@ static int ocfs2_resmap_find_free_bits(struct ocfs2_reservation_map *resmap,
423 unsigned int best_start, best_len = 0; 422 unsigned int best_start, best_len = 0;
424 int offset, start, found; 423 int offset, start, found;
425 424
426 mlog(0, "Find %u bits within range (%u, len %u) resmap len: %u\n", 425 trace_ocfs2_resmap_find_free_bits_begin(search_start, search_len,
427 wanted, search_start, search_len, resmap->m_bitmap_len); 426 wanted, resmap->m_bitmap_len);
428 427
429 found = best_start = best_len = 0; 428 found = best_start = best_len = 0;
430 429
@@ -463,7 +462,7 @@ static int ocfs2_resmap_find_free_bits(struct ocfs2_reservation_map *resmap,
463 *rlen = best_len; 462 *rlen = best_len;
464 *rstart = best_start; 463 *rstart = best_start;
465 464
466 mlog(0, "Found start: %u len: %u\n", best_start, best_len); 465 trace_ocfs2_resmap_find_free_bits_end(best_start, best_len);
467 466
468 return *rlen; 467 return *rlen;
469} 468}
@@ -487,9 +486,8 @@ static void __ocfs2_resv_find_window(struct ocfs2_reservation_map *resmap,
487 * - our window should be last in all reservations 486 * - our window should be last in all reservations
488 * - need to make sure we don't go past end of bitmap 487 * - need to make sure we don't go past end of bitmap
489 */ 488 */
490 489 trace_ocfs2_resv_find_window_begin(resv->r_start, ocfs2_resv_end(resv),
491 mlog(0, "resv start: %u resv end: %u goal: %u wanted: %u\n", 490 goal, wanted, RB_EMPTY_ROOT(root));
492 resv->r_start, ocfs2_resv_end(resv), goal, wanted);
493 491
494 assert_spin_locked(&resv_lock); 492 assert_spin_locked(&resv_lock);
495 493
@@ -498,9 +496,6 @@ static void __ocfs2_resv_find_window(struct ocfs2_reservation_map *resmap,
498 * Easiest case - empty tree. We can just take 496 * Easiest case - empty tree. We can just take
499 * whatever window of free bits we want. 497 * whatever window of free bits we want.
500 */ 498 */
501
502 mlog(0, "Empty root\n");
503
504 clen = ocfs2_resmap_find_free_bits(resmap, wanted, goal, 499 clen = ocfs2_resmap_find_free_bits(resmap, wanted, goal,
505 resmap->m_bitmap_len - goal, 500 resmap->m_bitmap_len - goal,
506 &cstart, &clen); 501 &cstart, &clen);
@@ -524,8 +519,6 @@ static void __ocfs2_resv_find_window(struct ocfs2_reservation_map *resmap,
524 prev_resv = ocfs2_find_resv_lhs(resmap, goal); 519 prev_resv = ocfs2_find_resv_lhs(resmap, goal);
525 520
526 if (prev_resv == NULL) { 521 if (prev_resv == NULL) {
527 mlog(0, "Goal on LHS of leftmost window\n");
528
529 /* 522 /*
530 * A NULL here means that the search code couldn't 523 * A NULL here means that the search code couldn't
531 * find a window that starts before goal. 524 * find a window that starts before goal.
@@ -570,13 +563,15 @@ static void __ocfs2_resv_find_window(struct ocfs2_reservation_map *resmap,
570 next_resv = NULL; 563 next_resv = NULL;
571 } 564 }
572 565
566 trace_ocfs2_resv_find_window_prev(prev_resv->r_start,
567 ocfs2_resv_end(prev_resv));
568
573 prev = &prev_resv->r_node; 569 prev = &prev_resv->r_node;
574 570
575 /* Now we do a linear search for a window, starting at 'prev_rsv' */ 571 /* Now we do a linear search for a window, starting at 'prev_rsv' */
576 while (1) { 572 while (1) {
577 next = rb_next(prev); 573 next = rb_next(prev);
578 if (next) { 574 if (next) {
579 mlog(0, "One more resv found in linear search\n");
580 next_resv = rb_entry(next, 575 next_resv = rb_entry(next,
581 struct ocfs2_alloc_reservation, 576 struct ocfs2_alloc_reservation,
582 r_node); 577 r_node);
@@ -585,7 +580,6 @@ static void __ocfs2_resv_find_window(struct ocfs2_reservation_map *resmap,
585 gap_end = next_resv->r_start - 1; 580 gap_end = next_resv->r_start - 1;
586 gap_len = gap_end - gap_start + 1; 581 gap_len = gap_end - gap_start + 1;
587 } else { 582 } else {
588 mlog(0, "No next node\n");
589 /* 583 /*
590 * We're at the rightmost edge of the 584 * We're at the rightmost edge of the
591 * tree. See if a reservation between this 585 * tree. See if a reservation between this
@@ -596,6 +590,8 @@ static void __ocfs2_resv_find_window(struct ocfs2_reservation_map *resmap,
596 gap_end = resmap->m_bitmap_len - 1; 590 gap_end = resmap->m_bitmap_len - 1;
597 } 591 }
598 592
593 trace_ocfs2_resv_find_window_next(next ? next_resv->r_start: -1,
594 next ? ocfs2_resv_end(next_resv) : -1);
599 /* 595 /*
600 * No need to check this gap if we have already found 596 * No need to check this gap if we have already found
601 * a larger region of free bits. 597 * a larger region of free bits.
@@ -654,8 +650,9 @@ static void ocfs2_cannibalize_resv(struct ocfs2_reservation_map *resmap,
654 lru_resv = list_first_entry(&resmap->m_lru, 650 lru_resv = list_first_entry(&resmap->m_lru,
655 struct ocfs2_alloc_reservation, r_lru); 651 struct ocfs2_alloc_reservation, r_lru);
656 652
657 mlog(0, "lru resv: start: %u len: %u end: %u\n", lru_resv->r_start, 653 trace_ocfs2_cannibalize_resv_begin(lru_resv->r_start,
658 lru_resv->r_len, ocfs2_resv_end(lru_resv)); 654 lru_resv->r_len,
655 ocfs2_resv_end(lru_resv));
659 656
660 /* 657 /*
661 * Cannibalize (some or all) of the target reservation and 658 * Cannibalize (some or all) of the target reservation and
@@ -684,10 +681,9 @@ static void ocfs2_cannibalize_resv(struct ocfs2_reservation_map *resmap,
684 resv->r_len = shrink; 681 resv->r_len = shrink;
685 } 682 }
686 683
687 mlog(0, "Reservation now looks like: r_start: %u r_end: %u " 684 trace_ocfs2_cannibalize_resv_end(resv->r_start, ocfs2_resv_end(resv),
688 "r_len: %u r_last_start: %u r_last_len: %u\n", 685 resv->r_len, resv->r_last_start,
689 resv->r_start, ocfs2_resv_end(resv), resv->r_len, 686 resv->r_last_len);
690 resv->r_last_start, resv->r_last_len);
691 687
692 ocfs2_resv_insert(resmap, resv); 688 ocfs2_resv_insert(resmap, resv);
693} 689}
@@ -748,7 +744,6 @@ int ocfs2_resmap_resv_bits(struct ocfs2_reservation_map *resmap,
748 if ((resv->r_flags & OCFS2_RESV_FLAG_TMP) || wanted < *clen) 744 if ((resv->r_flags & OCFS2_RESV_FLAG_TMP) || wanted < *clen)
749 wanted = *clen; 745 wanted = *clen;
750 746
751 mlog(0, "empty reservation, find new window\n");
752 /* 747 /*
753 * Try to get a window here. If it works, we must fall 748 * Try to get a window here. If it works, we must fall
754 * through and test the bitmap . This avoids some 749 * through and test the bitmap . This avoids some
@@ -757,6 +752,7 @@ int ocfs2_resmap_resv_bits(struct ocfs2_reservation_map *resmap,
757 * that inode. 752 * that inode.
758 */ 753 */
759 ocfs2_resv_find_window(resmap, resv, wanted); 754 ocfs2_resv_find_window(resmap, resv, wanted);
755 trace_ocfs2_resmap_resv_bits(resv->r_start, resv->r_len);
760 } 756 }
761 757
762 BUG_ON(ocfs2_resv_empty(resv)); 758 BUG_ON(ocfs2_resv_empty(resv));
@@ -813,10 +809,10 @@ void ocfs2_resmap_claimed_bits(struct ocfs2_reservation_map *resmap,
813 809
814 spin_lock(&resv_lock); 810 spin_lock(&resv_lock);
815 811
816 mlog(0, "claim bits: cstart: %u cend: %u clen: %u r_start: %u " 812 trace_ocfs2_resmap_claimed_bits_begin(cstart, cend, clen, resv->r_start,
817 "r_end: %u r_len: %u, r_last_start: %u r_last_len: %u\n", 813 ocfs2_resv_end(resv), resv->r_len,
818 cstart, cend, clen, resv->r_start, ocfs2_resv_end(resv), 814 resv->r_last_start,
819 resv->r_len, resv->r_last_start, resv->r_last_len); 815 resv->r_last_len);
820 816
821 BUG_ON(cstart < resv->r_start); 817 BUG_ON(cstart < resv->r_start);
822 BUG_ON(cstart > ocfs2_resv_end(resv)); 818 BUG_ON(cstart > ocfs2_resv_end(resv));
@@ -833,10 +829,9 @@ void ocfs2_resmap_claimed_bits(struct ocfs2_reservation_map *resmap,
833 if (!ocfs2_resv_empty(resv)) 829 if (!ocfs2_resv_empty(resv))
834 ocfs2_resv_mark_lru(resmap, resv); 830 ocfs2_resv_mark_lru(resmap, resv);
835 831
836 mlog(0, "Reservation now looks like: r_start: %u r_end: %u " 832 trace_ocfs2_resmap_claimed_bits_end(resv->r_start, ocfs2_resv_end(resv),
837 "r_len: %u r_last_start: %u r_last_len: %u\n", 833 resv->r_len, resv->r_last_start,
838 resv->r_start, ocfs2_resv_end(resv), resv->r_len, 834 resv->r_last_len);
839 resv->r_last_start, resv->r_last_len);
840 835
841 ocfs2_check_resmap(resmap); 836 ocfs2_check_resmap(resmap);
842 837
diff --git a/fs/ocfs2/resize.c b/fs/ocfs2/resize.c
index dacd553d8617..ec55add7604a 100644
--- a/fs/ocfs2/resize.c
+++ b/fs/ocfs2/resize.c
@@ -27,7 +27,6 @@
27#include <linux/fs.h> 27#include <linux/fs.h>
28#include <linux/types.h> 28#include <linux/types.h>
29 29
30#define MLOG_MASK_PREFIX ML_DISK_ALLOC
31#include <cluster/masklog.h> 30#include <cluster/masklog.h>
32 31
33#include "ocfs2.h" 32#include "ocfs2.h"
@@ -39,6 +38,7 @@
39#include "super.h" 38#include "super.h"
40#include "sysfile.h" 39#include "sysfile.h"
41#include "uptodate.h" 40#include "uptodate.h"
41#include "ocfs2_trace.h"
42 42
43#include "buffer_head_io.h" 43#include "buffer_head_io.h"
44#include "suballoc.h" 44#include "suballoc.h"
@@ -82,7 +82,6 @@ static u16 ocfs2_calc_new_backup_super(struct inode *inode,
82 backups++; 82 backups++;
83 } 83 }
84 84
85 mlog_exit_void();
86 return backups; 85 return backups;
87} 86}
88 87
@@ -103,8 +102,8 @@ static int ocfs2_update_last_group_and_inode(handle_t *handle,
103 u16 cl_bpc = le16_to_cpu(cl->cl_bpc); 102 u16 cl_bpc = le16_to_cpu(cl->cl_bpc);
104 u16 cl_cpg = le16_to_cpu(cl->cl_cpg); 103 u16 cl_cpg = le16_to_cpu(cl->cl_cpg);
105 104
106 mlog_entry("(new_clusters=%d, first_new_cluster = %u)\n", 105 trace_ocfs2_update_last_group_and_inode(new_clusters,
107 new_clusters, first_new_cluster); 106 first_new_cluster);
108 107
109 ret = ocfs2_journal_access_gd(handle, INODE_CACHE(bm_inode), 108 ret = ocfs2_journal_access_gd(handle, INODE_CACHE(bm_inode),
110 group_bh, OCFS2_JOURNAL_ACCESS_WRITE); 109 group_bh, OCFS2_JOURNAL_ACCESS_WRITE);
@@ -176,7 +175,8 @@ out_rollback:
176 le16_add_cpu(&group->bg_free_bits_count, -1 * num_bits); 175 le16_add_cpu(&group->bg_free_bits_count, -1 * num_bits);
177 } 176 }
178out: 177out:
179 mlog_exit(ret); 178 if (ret)
179 mlog_errno(ret);
180 return ret; 180 return ret;
181} 181}
182 182
@@ -281,8 +281,6 @@ int ocfs2_group_extend(struct inode * inode, int new_clusters)
281 u32 first_new_cluster; 281 u32 first_new_cluster;
282 u64 lgd_blkno; 282 u64 lgd_blkno;
283 283
284 mlog_entry_void();
285
286 if (ocfs2_is_hard_readonly(osb) || ocfs2_is_soft_readonly(osb)) 284 if (ocfs2_is_hard_readonly(osb) || ocfs2_is_soft_readonly(osb))
287 return -EROFS; 285 return -EROFS;
288 286
@@ -342,7 +340,8 @@ int ocfs2_group_extend(struct inode * inode, int new_clusters)
342 goto out_unlock; 340 goto out_unlock;
343 } 341 }
344 342
345 mlog(0, "extend the last group at %llu, new clusters = %d\n", 343
344 trace_ocfs2_group_extend(
346 (unsigned long long)le64_to_cpu(group->bg_blkno), new_clusters); 345 (unsigned long long)le64_to_cpu(group->bg_blkno), new_clusters);
347 346
348 handle = ocfs2_start_trans(osb, OCFS2_GROUP_EXTEND_CREDITS); 347 handle = ocfs2_start_trans(osb, OCFS2_GROUP_EXTEND_CREDITS);
@@ -377,7 +376,6 @@ out_mutex:
377 iput(main_bm_inode); 376 iput(main_bm_inode);
378 377
379out: 378out:
380 mlog_exit_void();
381 return ret; 379 return ret;
382} 380}
383 381
@@ -472,8 +470,6 @@ int ocfs2_group_add(struct inode *inode, struct ocfs2_new_group_input *input)
472 struct ocfs2_chain_rec *cr; 470 struct ocfs2_chain_rec *cr;
473 u16 cl_bpc; 471 u16 cl_bpc;
474 472
475 mlog_entry_void();
476
477 if (ocfs2_is_hard_readonly(osb) || ocfs2_is_soft_readonly(osb)) 473 if (ocfs2_is_hard_readonly(osb) || ocfs2_is_soft_readonly(osb))
478 return -EROFS; 474 return -EROFS;
479 475
@@ -520,8 +516,8 @@ int ocfs2_group_add(struct inode *inode, struct ocfs2_new_group_input *input)
520 goto out_unlock; 516 goto out_unlock;
521 } 517 }
522 518
523 mlog(0, "Add a new group %llu in chain = %u, length = %u\n", 519 trace_ocfs2_group_add((unsigned long long)input->group,
524 (unsigned long long)input->group, input->chain, input->clusters); 520 input->chain, input->clusters, input->frees);
525 521
526 handle = ocfs2_start_trans(osb, OCFS2_GROUP_ADD_CREDITS); 522 handle = ocfs2_start_trans(osb, OCFS2_GROUP_ADD_CREDITS);
527 if (IS_ERR(handle)) { 523 if (IS_ERR(handle)) {
@@ -589,6 +585,5 @@ out_mutex:
589 iput(main_bm_inode); 585 iput(main_bm_inode);
590 586
591out: 587out:
592 mlog_exit_void();
593 return ret; 588 return ret;
594} 589}
diff --git a/fs/ocfs2/slot_map.c b/fs/ocfs2/slot_map.c
index ab4e0172cc1d..26fc0014d509 100644
--- a/fs/ocfs2/slot_map.c
+++ b/fs/ocfs2/slot_map.c
@@ -27,7 +27,6 @@
27#include <linux/slab.h> 27#include <linux/slab.h>
28#include <linux/highmem.h> 28#include <linux/highmem.h>
29 29
30#define MLOG_MASK_PREFIX ML_SUPER
31#include <cluster/masklog.h> 30#include <cluster/masklog.h>
32 31
33#include "ocfs2.h" 32#include "ocfs2.h"
@@ -39,6 +38,7 @@
39#include "slot_map.h" 38#include "slot_map.h"
40#include "super.h" 39#include "super.h"
41#include "sysfile.h" 40#include "sysfile.h"
41#include "ocfs2_trace.h"
42 42
43#include "buffer_head_io.h" 43#include "buffer_head_io.h"
44 44
@@ -142,8 +142,7 @@ int ocfs2_refresh_slot_info(struct ocfs2_super *osb)
142 BUG_ON(si->si_blocks == 0); 142 BUG_ON(si->si_blocks == 0);
143 BUG_ON(si->si_bh == NULL); 143 BUG_ON(si->si_bh == NULL);
144 144
145 mlog(0, "Refreshing slot map, reading %u block(s)\n", 145 trace_ocfs2_refresh_slot_info(si->si_blocks);
146 si->si_blocks);
147 146
148 /* 147 /*
149 * We pass -1 as blocknr because we expect all of si->si_bh to 148 * We pass -1 as blocknr because we expect all of si->si_bh to
@@ -381,8 +380,7 @@ static int ocfs2_map_slot_buffers(struct ocfs2_super *osb,
381 /* The size checks above should ensure this */ 380 /* The size checks above should ensure this */
382 BUG_ON((osb->max_slots / si->si_slots_per_block) > blocks); 381 BUG_ON((osb->max_slots / si->si_slots_per_block) > blocks);
383 382
384 mlog(0, "Slot map needs %u buffers for %llu bytes\n", 383 trace_ocfs2_map_slot_buffers(bytes, si->si_blocks);
385 si->si_blocks, bytes);
386 384
387 si->si_bh = kzalloc(sizeof(struct buffer_head *) * si->si_blocks, 385 si->si_bh = kzalloc(sizeof(struct buffer_head *) * si->si_blocks,
388 GFP_KERNEL); 386 GFP_KERNEL);
@@ -400,8 +398,7 @@ static int ocfs2_map_slot_buffers(struct ocfs2_super *osb,
400 goto bail; 398 goto bail;
401 } 399 }
402 400
403 mlog(0, "Reading slot map block %u at %llu\n", i, 401 trace_ocfs2_map_slot_buffers_block((unsigned long long)blkno, i);
404 (unsigned long long)blkno);
405 402
406 bh = NULL; /* Acquire a fresh bh */ 403 bh = NULL; /* Acquire a fresh bh */
407 status = ocfs2_read_blocks(INODE_CACHE(si->si_inode), blkno, 404 status = ocfs2_read_blocks(INODE_CACHE(si->si_inode), blkno,
@@ -475,8 +472,6 @@ int ocfs2_find_slot(struct ocfs2_super *osb)
475 int slot; 472 int slot;
476 struct ocfs2_slot_info *si; 473 struct ocfs2_slot_info *si;
477 474
478 mlog_entry_void();
479
480 si = osb->slot_info; 475 si = osb->slot_info;
481 476
482 spin_lock(&osb->osb_lock); 477 spin_lock(&osb->osb_lock);
@@ -505,14 +500,13 @@ int ocfs2_find_slot(struct ocfs2_super *osb)
505 osb->slot_num = slot; 500 osb->slot_num = slot;
506 spin_unlock(&osb->osb_lock); 501 spin_unlock(&osb->osb_lock);
507 502
508 mlog(0, "taking node slot %d\n", osb->slot_num); 503 trace_ocfs2_find_slot(osb->slot_num);
509 504
510 status = ocfs2_update_disk_slot(osb, si, osb->slot_num); 505 status = ocfs2_update_disk_slot(osb, si, osb->slot_num);
511 if (status < 0) 506 if (status < 0)
512 mlog_errno(status); 507 mlog_errno(status);
513 508
514bail: 509bail:
515 mlog_exit(status);
516 return status; 510 return status;
517} 511}
518 512
diff --git a/fs/ocfs2/suballoc.c b/fs/ocfs2/suballoc.c
index 71998d4d61d5..ab6e2061074f 100644
--- a/fs/ocfs2/suballoc.c
+++ b/fs/ocfs2/suballoc.c
@@ -29,7 +29,6 @@
29#include <linux/slab.h> 29#include <linux/slab.h>
30#include <linux/highmem.h> 30#include <linux/highmem.h>
31 31
32#define MLOG_MASK_PREFIX ML_DISK_ALLOC
33#include <cluster/masklog.h> 32#include <cluster/masklog.h>
34 33
35#include "ocfs2.h" 34#include "ocfs2.h"
@@ -44,6 +43,7 @@
44#include "super.h" 43#include "super.h"
45#include "sysfile.h" 44#include "sysfile.h"
46#include "uptodate.h" 45#include "uptodate.h"
46#include "ocfs2_trace.h"
47 47
48#include "buffer_head_io.h" 48#include "buffer_head_io.h"
49 49
@@ -308,8 +308,8 @@ static int ocfs2_validate_group_descriptor(struct super_block *sb,
308 int rc; 308 int rc;
309 struct ocfs2_group_desc *gd = (struct ocfs2_group_desc *)bh->b_data; 309 struct ocfs2_group_desc *gd = (struct ocfs2_group_desc *)bh->b_data;
310 310
311 mlog(0, "Validating group descriptor %llu\n", 311 trace_ocfs2_validate_group_descriptor(
312 (unsigned long long)bh->b_blocknr); 312 (unsigned long long)bh->b_blocknr);
313 313
314 BUG_ON(!buffer_uptodate(bh)); 314 BUG_ON(!buffer_uptodate(bh));
315 315
@@ -389,8 +389,6 @@ static int ocfs2_block_group_fill(handle_t *handle,
389 struct ocfs2_group_desc *bg = (struct ocfs2_group_desc *) bg_bh->b_data; 389 struct ocfs2_group_desc *bg = (struct ocfs2_group_desc *) bg_bh->b_data;
390 struct super_block * sb = alloc_inode->i_sb; 390 struct super_block * sb = alloc_inode->i_sb;
391 391
392 mlog_entry_void();
393
394 if (((unsigned long long) bg_bh->b_blocknr) != group_blkno) { 392 if (((unsigned long long) bg_bh->b_blocknr) != group_blkno) {
395 ocfs2_error(alloc_inode->i_sb, "group block (%llu) != " 393 ocfs2_error(alloc_inode->i_sb, "group block (%llu) != "
396 "b_blocknr (%llu)", 394 "b_blocknr (%llu)",
@@ -436,7 +434,8 @@ static int ocfs2_block_group_fill(handle_t *handle,
436 * allocation time. */ 434 * allocation time. */
437 435
438bail: 436bail:
439 mlog_exit(status); 437 if (status)
438 mlog_errno(status);
440 return status; 439 return status;
441} 440}
442 441
@@ -477,8 +476,8 @@ ocfs2_block_group_alloc_contig(struct ocfs2_super *osb, handle_t *handle,
477 476
478 /* setup the group */ 477 /* setup the group */
479 bg_blkno = ocfs2_clusters_to_blocks(osb->sb, bit_off); 478 bg_blkno = ocfs2_clusters_to_blocks(osb->sb, bit_off);
480 mlog(0, "new descriptor, record %u, at block %llu\n", 479 trace_ocfs2_block_group_alloc_contig(
481 alloc_rec, (unsigned long long)bg_blkno); 480 (unsigned long long)bg_blkno, alloc_rec);
482 481
483 bg_bh = sb_getblk(osb->sb, bg_blkno); 482 bg_bh = sb_getblk(osb->sb, bg_blkno);
484 if (!bg_bh) { 483 if (!bg_bh) {
@@ -657,8 +656,8 @@ ocfs2_block_group_alloc_discontig(handle_t *handle,
657 656
658 /* setup the group */ 657 /* setup the group */
659 bg_blkno = ocfs2_clusters_to_blocks(osb->sb, bit_off); 658 bg_blkno = ocfs2_clusters_to_blocks(osb->sb, bit_off);
660 mlog(0, "new descriptor, record %u, at block %llu\n", 659 trace_ocfs2_block_group_alloc_discontig(
661 alloc_rec, (unsigned long long)bg_blkno); 660 (unsigned long long)bg_blkno, alloc_rec);
662 661
663 bg_bh = sb_getblk(osb->sb, bg_blkno); 662 bg_bh = sb_getblk(osb->sb, bg_blkno);
664 if (!bg_bh) { 663 if (!bg_bh) {
@@ -707,8 +706,6 @@ static int ocfs2_block_group_alloc(struct ocfs2_super *osb,
707 706
708 BUG_ON(ocfs2_is_cluster_bitmap(alloc_inode)); 707 BUG_ON(ocfs2_is_cluster_bitmap(alloc_inode));
709 708
710 mlog_entry_void();
711
712 cl = &fe->id2.i_chain; 709 cl = &fe->id2.i_chain;
713 status = ocfs2_reserve_clusters_with_limit(osb, 710 status = ocfs2_reserve_clusters_with_limit(osb,
714 le16_to_cpu(cl->cl_cpg), 711 le16_to_cpu(cl->cl_cpg),
@@ -730,8 +727,8 @@ static int ocfs2_block_group_alloc(struct ocfs2_super *osb,
730 } 727 }
731 728
732 if (last_alloc_group && *last_alloc_group != 0) { 729 if (last_alloc_group && *last_alloc_group != 0) {
733 mlog(0, "use old allocation group %llu for block group alloc\n", 730 trace_ocfs2_block_group_alloc(
734 (unsigned long long)*last_alloc_group); 731 (unsigned long long)*last_alloc_group);
735 ac->ac_last_group = *last_alloc_group; 732 ac->ac_last_group = *last_alloc_group;
736 } 733 }
737 734
@@ -796,7 +793,8 @@ bail:
796 793
797 brelse(bg_bh); 794 brelse(bg_bh);
798 795
799 mlog_exit(status); 796 if (status)
797 mlog_errno(status);
800 return status; 798 return status;
801} 799}
802 800
@@ -814,8 +812,6 @@ static int ocfs2_reserve_suballoc_bits(struct ocfs2_super *osb,
814 struct ocfs2_dinode *fe; 812 struct ocfs2_dinode *fe;
815 u32 free_bits; 813 u32 free_bits;
816 814
817 mlog_entry_void();
818
819 alloc_inode = ocfs2_get_system_file_inode(osb, type, slot); 815 alloc_inode = ocfs2_get_system_file_inode(osb, type, slot);
820 if (!alloc_inode) { 816 if (!alloc_inode) {
821 mlog_errno(-EINVAL); 817 mlog_errno(-EINVAL);
@@ -855,16 +851,15 @@ static int ocfs2_reserve_suballoc_bits(struct ocfs2_super *osb,
855 if (bits_wanted > free_bits) { 851 if (bits_wanted > free_bits) {
856 /* cluster bitmap never grows */ 852 /* cluster bitmap never grows */
857 if (ocfs2_is_cluster_bitmap(alloc_inode)) { 853 if (ocfs2_is_cluster_bitmap(alloc_inode)) {
858 mlog(0, "Disk Full: wanted=%u, free_bits=%u\n", 854 trace_ocfs2_reserve_suballoc_bits_nospc(bits_wanted,
859 bits_wanted, free_bits); 855 free_bits);
860 status = -ENOSPC; 856 status = -ENOSPC;
861 goto bail; 857 goto bail;
862 } 858 }
863 859
864 if (!(flags & ALLOC_NEW_GROUP)) { 860 if (!(flags & ALLOC_NEW_GROUP)) {
865 mlog(0, "Alloc File %u Full: wanted=%u, free_bits=%u, " 861 trace_ocfs2_reserve_suballoc_bits_no_new_group(
866 "and we don't alloc a new group for it.\n", 862 slot, bits_wanted, free_bits);
867 slot, bits_wanted, free_bits);
868 status = -ENOSPC; 863 status = -ENOSPC;
869 goto bail; 864 goto bail;
870 } 865 }
@@ -890,7 +885,8 @@ static int ocfs2_reserve_suballoc_bits(struct ocfs2_super *osb,
890bail: 885bail:
891 brelse(bh); 886 brelse(bh);
892 887
893 mlog_exit(status); 888 if (status)
889 mlog_errno(status);
894 return status; 890 return status;
895} 891}
896 892
@@ -1052,7 +1048,8 @@ bail:
1052 *ac = NULL; 1048 *ac = NULL;
1053 } 1049 }
1054 1050
1055 mlog_exit(status); 1051 if (status)
1052 mlog_errno(status);
1056 return status; 1053 return status;
1057} 1054}
1058 1055
@@ -1119,8 +1116,8 @@ int ocfs2_reserve_new_inode(struct ocfs2_super *osb,
1119 spin_lock(&osb->osb_lock); 1116 spin_lock(&osb->osb_lock);
1120 osb->osb_inode_alloc_group = alloc_group; 1117 osb->osb_inode_alloc_group = alloc_group;
1121 spin_unlock(&osb->osb_lock); 1118 spin_unlock(&osb->osb_lock);
1122 mlog(0, "after reservation, new allocation group is " 1119 trace_ocfs2_reserve_new_inode_new_group(
1123 "%llu\n", (unsigned long long)alloc_group); 1120 (unsigned long long)alloc_group);
1124 1121
1125 /* 1122 /*
1126 * Some inodes must be freed by us, so try to allocate 1123 * Some inodes must be freed by us, so try to allocate
@@ -1152,7 +1149,8 @@ bail:
1152 *ac = NULL; 1149 *ac = NULL;
1153 } 1150 }
1154 1151
1155 mlog_exit(status); 1152 if (status)
1153 mlog_errno(status);
1156 return status; 1154 return status;
1157} 1155}
1158 1156
@@ -1189,8 +1187,6 @@ static int ocfs2_reserve_clusters_with_limit(struct ocfs2_super *osb,
1189{ 1187{
1190 int status; 1188 int status;
1191 1189
1192 mlog_entry_void();
1193
1194 *ac = kzalloc(sizeof(struct ocfs2_alloc_context), GFP_KERNEL); 1190 *ac = kzalloc(sizeof(struct ocfs2_alloc_context), GFP_KERNEL);
1195 if (!(*ac)) { 1191 if (!(*ac)) {
1196 status = -ENOMEM; 1192 status = -ENOMEM;
@@ -1229,7 +1225,8 @@ bail:
1229 *ac = NULL; 1225 *ac = NULL;
1230 } 1226 }
1231 1227
1232 mlog_exit(status); 1228 if (status)
1229 mlog_errno(status);
1233 return status; 1230 return status;
1234} 1231}
1235 1232
@@ -1357,15 +1354,12 @@ static inline int ocfs2_block_group_set_bits(handle_t *handle,
1357 void *bitmap = bg->bg_bitmap; 1354 void *bitmap = bg->bg_bitmap;
1358 int journal_type = OCFS2_JOURNAL_ACCESS_WRITE; 1355 int journal_type = OCFS2_JOURNAL_ACCESS_WRITE;
1359 1356
1360 mlog_entry_void();
1361
1362 /* All callers get the descriptor via 1357 /* All callers get the descriptor via
1363 * ocfs2_read_group_descriptor(). Any corruption is a code bug. */ 1358 * ocfs2_read_group_descriptor(). Any corruption is a code bug. */
1364 BUG_ON(!OCFS2_IS_VALID_GROUP_DESC(bg)); 1359 BUG_ON(!OCFS2_IS_VALID_GROUP_DESC(bg));
1365 BUG_ON(le16_to_cpu(bg->bg_free_bits_count) < num_bits); 1360 BUG_ON(le16_to_cpu(bg->bg_free_bits_count) < num_bits);
1366 1361
1367 mlog(0, "block_group_set_bits: off = %u, num = %u\n", bit_off, 1362 trace_ocfs2_block_group_set_bits(bit_off, num_bits);
1368 num_bits);
1369 1363
1370 if (ocfs2_is_cluster_bitmap(alloc_inode)) 1364 if (ocfs2_is_cluster_bitmap(alloc_inode))
1371 journal_type = OCFS2_JOURNAL_ACCESS_UNDO; 1365 journal_type = OCFS2_JOURNAL_ACCESS_UNDO;
@@ -1394,7 +1388,8 @@ static inline int ocfs2_block_group_set_bits(handle_t *handle,
1394 ocfs2_journal_dirty(handle, group_bh); 1388 ocfs2_journal_dirty(handle, group_bh);
1395 1389
1396bail: 1390bail:
1397 mlog_exit(status); 1391 if (status)
1392 mlog_errno(status);
1398 return status; 1393 return status;
1399} 1394}
1400 1395
@@ -1437,10 +1432,10 @@ static int ocfs2_relink_block_group(handle_t *handle,
1437 BUG_ON(!OCFS2_IS_VALID_GROUP_DESC(bg)); 1432 BUG_ON(!OCFS2_IS_VALID_GROUP_DESC(bg));
1438 BUG_ON(!OCFS2_IS_VALID_GROUP_DESC(prev_bg)); 1433 BUG_ON(!OCFS2_IS_VALID_GROUP_DESC(prev_bg));
1439 1434
1440 mlog(0, "Suballoc %llu, chain %u, move group %llu to top, prev = %llu\n", 1435 trace_ocfs2_relink_block_group(
1441 (unsigned long long)le64_to_cpu(fe->i_blkno), chain, 1436 (unsigned long long)le64_to_cpu(fe->i_blkno), chain,
1442 (unsigned long long)le64_to_cpu(bg->bg_blkno), 1437 (unsigned long long)le64_to_cpu(bg->bg_blkno),
1443 (unsigned long long)le64_to_cpu(prev_bg->bg_blkno)); 1438 (unsigned long long)le64_to_cpu(prev_bg->bg_blkno));
1444 1439
1445 fe_ptr = le64_to_cpu(fe->id2.i_chain.cl_recs[chain].c_blkno); 1440 fe_ptr = le64_to_cpu(fe->id2.i_chain.cl_recs[chain].c_blkno);
1446 bg_ptr = le64_to_cpu(bg->bg_next_group); 1441 bg_ptr = le64_to_cpu(bg->bg_next_group);
@@ -1484,7 +1479,8 @@ out_rollback:
1484 prev_bg->bg_next_group = cpu_to_le64(prev_bg_ptr); 1479 prev_bg->bg_next_group = cpu_to_le64(prev_bg_ptr);
1485 } 1480 }
1486 1481
1487 mlog_exit(status); 1482 if (status)
1483 mlog_errno(status);
1488 return status; 1484 return status;
1489} 1485}
1490 1486
@@ -1525,10 +1521,10 @@ static int ocfs2_cluster_group_search(struct inode *inode,
1525 if ((gd_cluster_off + max_bits) > 1521 if ((gd_cluster_off + max_bits) >
1526 OCFS2_I(inode)->ip_clusters) { 1522 OCFS2_I(inode)->ip_clusters) {
1527 max_bits = OCFS2_I(inode)->ip_clusters - gd_cluster_off; 1523 max_bits = OCFS2_I(inode)->ip_clusters - gd_cluster_off;
1528 mlog(0, "Desc %llu, bg_bits %u, clusters %u, use %u\n", 1524 trace_ocfs2_cluster_group_search_wrong_max_bits(
1529 (unsigned long long)le64_to_cpu(gd->bg_blkno), 1525 (unsigned long long)le64_to_cpu(gd->bg_blkno),
1530 le16_to_cpu(gd->bg_bits), 1526 le16_to_cpu(gd->bg_bits),
1531 OCFS2_I(inode)->ip_clusters, max_bits); 1527 OCFS2_I(inode)->ip_clusters, max_bits);
1532 } 1528 }
1533 1529
1534 ret = ocfs2_block_group_find_clear_bits(OCFS2_SB(inode->i_sb), 1530 ret = ocfs2_block_group_find_clear_bits(OCFS2_SB(inode->i_sb),
@@ -1542,9 +1538,9 @@ static int ocfs2_cluster_group_search(struct inode *inode,
1542 gd_cluster_off + 1538 gd_cluster_off +
1543 res->sr_bit_offset + 1539 res->sr_bit_offset +
1544 res->sr_bits); 1540 res->sr_bits);
1545 mlog(0, "Checking %llu against %llu\n", 1541 trace_ocfs2_cluster_group_search_max_block(
1546 (unsigned long long)blkoff, 1542 (unsigned long long)blkoff,
1547 (unsigned long long)max_block); 1543 (unsigned long long)max_block);
1548 if (blkoff > max_block) 1544 if (blkoff > max_block)
1549 return -ENOSPC; 1545 return -ENOSPC;
1550 } 1546 }
@@ -1588,9 +1584,9 @@ static int ocfs2_block_group_search(struct inode *inode,
1588 if (!ret && max_block) { 1584 if (!ret && max_block) {
1589 blkoff = le64_to_cpu(bg->bg_blkno) + 1585 blkoff = le64_to_cpu(bg->bg_blkno) +
1590 res->sr_bit_offset + res->sr_bits; 1586 res->sr_bit_offset + res->sr_bits;
1591 mlog(0, "Checking %llu against %llu\n", 1587 trace_ocfs2_block_group_search_max_block(
1592 (unsigned long long)blkoff, 1588 (unsigned long long)blkoff,
1593 (unsigned long long)max_block); 1589 (unsigned long long)max_block);
1594 if (blkoff > max_block) 1590 if (blkoff > max_block)
1595 ret = -ENOSPC; 1591 ret = -ENOSPC;
1596 } 1592 }
@@ -1756,9 +1752,9 @@ static int ocfs2_search_chain(struct ocfs2_alloc_context *ac,
1756 struct ocfs2_group_desc *bg; 1752 struct ocfs2_group_desc *bg;
1757 1753
1758 chain = ac->ac_chain; 1754 chain = ac->ac_chain;
1759 mlog(0, "trying to alloc %u bits from chain %u, inode %llu\n", 1755 trace_ocfs2_search_chain_begin(
1760 bits_wanted, chain, 1756 (unsigned long long)OCFS2_I(alloc_inode)->ip_blkno,
1761 (unsigned long long)OCFS2_I(alloc_inode)->ip_blkno); 1757 bits_wanted, chain);
1762 1758
1763 status = ocfs2_read_group_descriptor(alloc_inode, fe, 1759 status = ocfs2_read_group_descriptor(alloc_inode, fe,
1764 le64_to_cpu(cl->cl_recs[chain].c_blkno), 1760 le64_to_cpu(cl->cl_recs[chain].c_blkno),
@@ -1799,8 +1795,8 @@ static int ocfs2_search_chain(struct ocfs2_alloc_context *ac,
1799 goto bail; 1795 goto bail;
1800 } 1796 }
1801 1797
1802 mlog(0, "alloc succeeds: we give %u bits from block group %llu\n", 1798 trace_ocfs2_search_chain_succ(
1803 res->sr_bits, (unsigned long long)le64_to_cpu(bg->bg_blkno)); 1799 (unsigned long long)le64_to_cpu(bg->bg_blkno), res->sr_bits);
1804 1800
1805 res->sr_bg_blkno = le64_to_cpu(bg->bg_blkno); 1801 res->sr_bg_blkno = le64_to_cpu(bg->bg_blkno);
1806 1802
@@ -1861,8 +1857,9 @@ static int ocfs2_search_chain(struct ocfs2_alloc_context *ac,
1861 goto bail; 1857 goto bail;
1862 } 1858 }
1863 1859
1864 mlog(0, "Allocated %u bits from suballocator %llu\n", res->sr_bits, 1860 trace_ocfs2_search_chain_end(
1865 (unsigned long long)le64_to_cpu(fe->i_blkno)); 1861 (unsigned long long)le64_to_cpu(fe->i_blkno),
1862 res->sr_bits);
1866 1863
1867out_loc_only: 1864out_loc_only:
1868 *bits_left = le16_to_cpu(bg->bg_free_bits_count); 1865 *bits_left = le16_to_cpu(bg->bg_free_bits_count);
@@ -1870,7 +1867,8 @@ bail:
1870 brelse(group_bh); 1867 brelse(group_bh);
1871 brelse(prev_group_bh); 1868 brelse(prev_group_bh);
1872 1869
1873 mlog_exit(status); 1870 if (status)
1871 mlog_errno(status);
1874 return status; 1872 return status;
1875} 1873}
1876 1874
@@ -1888,8 +1886,6 @@ static int ocfs2_claim_suballoc_bits(struct ocfs2_alloc_context *ac,
1888 struct ocfs2_chain_list *cl; 1886 struct ocfs2_chain_list *cl;
1889 struct ocfs2_dinode *fe; 1887 struct ocfs2_dinode *fe;
1890 1888
1891 mlog_entry_void();
1892
1893 BUG_ON(ac->ac_bits_given >= ac->ac_bits_wanted); 1889 BUG_ON(ac->ac_bits_given >= ac->ac_bits_wanted);
1894 BUG_ON(bits_wanted > (ac->ac_bits_wanted - ac->ac_bits_given)); 1890 BUG_ON(bits_wanted > (ac->ac_bits_wanted - ac->ac_bits_given));
1895 BUG_ON(!ac->ac_bh); 1891 BUG_ON(!ac->ac_bh);
@@ -1945,8 +1941,7 @@ static int ocfs2_claim_suballoc_bits(struct ocfs2_alloc_context *ac,
1945 goto bail; 1941 goto bail;
1946 } 1942 }
1947 1943
1948 mlog(0, "Search of victim chain %u came up with nothing, " 1944 trace_ocfs2_claim_suballoc_bits(victim);
1949 "trying all chains now.\n", victim);
1950 1945
1951 /* If we didn't pick a good victim, then just default to 1946 /* If we didn't pick a good victim, then just default to
1952 * searching each chain in order. Don't allow chain relinking 1947 * searching each chain in order. Don't allow chain relinking
@@ -1984,7 +1979,8 @@ set_hint:
1984 } 1979 }
1985 1980
1986bail: 1981bail:
1987 mlog_exit(status); 1982 if (status)
1983 mlog_errno(status);
1988 return status; 1984 return status;
1989} 1985}
1990 1986
@@ -2021,7 +2017,8 @@ int ocfs2_claim_metadata(handle_t *handle,
2021 *num_bits = res.sr_bits; 2017 *num_bits = res.sr_bits;
2022 status = 0; 2018 status = 0;
2023bail: 2019bail:
2024 mlog_exit(status); 2020 if (status)
2021 mlog_errno(status);
2025 return status; 2022 return status;
2026} 2023}
2027 2024
@@ -2172,8 +2169,8 @@ int ocfs2_claim_new_inode_at_loc(handle_t *handle,
2172 goto out; 2169 goto out;
2173 } 2170 }
2174 2171
2175 mlog(0, "Allocated %u bits from suballocator %llu\n", res->sr_bits, 2172 trace_ocfs2_claim_new_inode_at_loc((unsigned long long)di_blkno,
2176 (unsigned long long)di_blkno); 2173 res->sr_bits);
2177 2174
2178 atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs); 2175 atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
2179 2176
@@ -2201,8 +2198,6 @@ int ocfs2_claim_new_inode(handle_t *handle,
2201 int status; 2198 int status;
2202 struct ocfs2_suballoc_result res; 2199 struct ocfs2_suballoc_result res;
2203 2200
2204 mlog_entry_void();
2205
2206 BUG_ON(!ac); 2201 BUG_ON(!ac);
2207 BUG_ON(ac->ac_bits_given != 0); 2202 BUG_ON(ac->ac_bits_given != 0);
2208 BUG_ON(ac->ac_bits_wanted != 1); 2203 BUG_ON(ac->ac_bits_wanted != 1);
@@ -2230,7 +2225,8 @@ int ocfs2_claim_new_inode(handle_t *handle,
2230 ocfs2_save_inode_ac_group(dir, ac); 2225 ocfs2_save_inode_ac_group(dir, ac);
2231 status = 0; 2226 status = 0;
2232bail: 2227bail:
2233 mlog_exit(status); 2228 if (status)
2229 mlog_errno(status);
2234 return status; 2230 return status;
2235} 2231}
2236 2232
@@ -2307,8 +2303,6 @@ int __ocfs2_claim_clusters(handle_t *handle,
2307 struct ocfs2_suballoc_result res = { .sr_blkno = 0, }; 2303 struct ocfs2_suballoc_result res = { .sr_blkno = 0, };
2308 struct ocfs2_super *osb = OCFS2_SB(ac->ac_inode->i_sb); 2304 struct ocfs2_super *osb = OCFS2_SB(ac->ac_inode->i_sb);
2309 2305
2310 mlog_entry_void();
2311
2312 BUG_ON(ac->ac_bits_given >= ac->ac_bits_wanted); 2306 BUG_ON(ac->ac_bits_given >= ac->ac_bits_wanted);
2313 2307
2314 BUG_ON(ac->ac_which != OCFS2_AC_USE_LOCAL 2308 BUG_ON(ac->ac_which != OCFS2_AC_USE_LOCAL
@@ -2363,7 +2357,8 @@ int __ocfs2_claim_clusters(handle_t *handle,
2363 ac->ac_bits_given += *num_clusters; 2357 ac->ac_bits_given += *num_clusters;
2364 2358
2365bail: 2359bail:
2366 mlog_exit(status); 2360 if (status)
2361 mlog_errno(status);
2367 return status; 2362 return status;
2368} 2363}
2369 2364
@@ -2392,13 +2387,11 @@ static int ocfs2_block_group_clear_bits(handle_t *handle,
2392 unsigned int tmp; 2387 unsigned int tmp;
2393 struct ocfs2_group_desc *undo_bg = NULL; 2388 struct ocfs2_group_desc *undo_bg = NULL;
2394 2389
2395 mlog_entry_void();
2396
2397 /* The caller got this descriptor from 2390 /* The caller got this descriptor from
2398 * ocfs2_read_group_descriptor(). Any corruption is a code bug. */ 2391 * ocfs2_read_group_descriptor(). Any corruption is a code bug. */
2399 BUG_ON(!OCFS2_IS_VALID_GROUP_DESC(bg)); 2392 BUG_ON(!OCFS2_IS_VALID_GROUP_DESC(bg));
2400 2393
2401 mlog(0, "off = %u, num = %u\n", bit_off, num_bits); 2394 trace_ocfs2_block_group_clear_bits(bit_off, num_bits);
2402 2395
2403 BUG_ON(undo_fn && !ocfs2_is_cluster_bitmap(alloc_inode)); 2396 BUG_ON(undo_fn && !ocfs2_is_cluster_bitmap(alloc_inode));
2404 status = ocfs2_journal_access_gd(handle, INODE_CACHE(alloc_inode), 2397 status = ocfs2_journal_access_gd(handle, INODE_CACHE(alloc_inode),
@@ -2463,8 +2456,6 @@ static int _ocfs2_free_suballoc_bits(handle_t *handle,
2463 struct buffer_head *group_bh = NULL; 2456 struct buffer_head *group_bh = NULL;
2464 struct ocfs2_group_desc *group; 2457 struct ocfs2_group_desc *group;
2465 2458
2466 mlog_entry_void();
2467
2468 /* The alloc_bh comes from ocfs2_free_dinode() or 2459 /* The alloc_bh comes from ocfs2_free_dinode() or
2469 * ocfs2_free_clusters(). The callers have all locked the 2460 * ocfs2_free_clusters(). The callers have all locked the
2470 * allocator and gotten alloc_bh from the lock call. This 2461 * allocator and gotten alloc_bh from the lock call. This
@@ -2473,9 +2464,10 @@ static int _ocfs2_free_suballoc_bits(handle_t *handle,
2473 BUG_ON(!OCFS2_IS_VALID_DINODE(fe)); 2464 BUG_ON(!OCFS2_IS_VALID_DINODE(fe));
2474 BUG_ON((count + start_bit) > ocfs2_bits_per_group(cl)); 2465 BUG_ON((count + start_bit) > ocfs2_bits_per_group(cl));
2475 2466
2476 mlog(0, "%llu: freeing %u bits from group %llu, starting at %u\n", 2467 trace_ocfs2_free_suballoc_bits(
2477 (unsigned long long)OCFS2_I(alloc_inode)->ip_blkno, count, 2468 (unsigned long long)OCFS2_I(alloc_inode)->ip_blkno,
2478 (unsigned long long)bg_blkno, start_bit); 2469 (unsigned long long)bg_blkno,
2470 start_bit, count);
2479 2471
2480 status = ocfs2_read_group_descriptor(alloc_inode, fe, bg_blkno, 2472 status = ocfs2_read_group_descriptor(alloc_inode, fe, bg_blkno,
2481 &group_bh); 2473 &group_bh);
@@ -2511,7 +2503,8 @@ static int _ocfs2_free_suballoc_bits(handle_t *handle,
2511bail: 2503bail:
2512 brelse(group_bh); 2504 brelse(group_bh);
2513 2505
2514 mlog_exit(status); 2506 if (status)
2507 mlog_errno(status);
2515 return status; 2508 return status;
2516} 2509}
2517 2510
@@ -2556,11 +2549,8 @@ static int _ocfs2_free_clusters(handle_t *handle,
2556 2549
2557 /* You can't ever have a contiguous set of clusters 2550 /* You can't ever have a contiguous set of clusters
2558 * bigger than a block group bitmap so we never have to worry 2551 * bigger than a block group bitmap so we never have to worry
2559 * about looping on them. */ 2552 * about looping on them.
2560 2553 * This is expensive. We can safely remove once this stuff has
2561 mlog_entry_void();
2562
2563 /* This is expensive. We can safely remove once this stuff has
2564 * gotten tested really well. */ 2554 * gotten tested really well. */
2565 BUG_ON(start_blk != ocfs2_clusters_to_blocks(bitmap_inode->i_sb, ocfs2_blocks_to_clusters(bitmap_inode->i_sb, start_blk))); 2555 BUG_ON(start_blk != ocfs2_clusters_to_blocks(bitmap_inode->i_sb, ocfs2_blocks_to_clusters(bitmap_inode->i_sb, start_blk)));
2566 2556
@@ -2569,10 +2559,9 @@ static int _ocfs2_free_clusters(handle_t *handle,
2569 ocfs2_block_to_cluster_group(bitmap_inode, start_blk, &bg_blkno, 2559 ocfs2_block_to_cluster_group(bitmap_inode, start_blk, &bg_blkno,
2570 &bg_start_bit); 2560 &bg_start_bit);
2571 2561
2572 mlog(0, "want to free %u clusters starting at block %llu\n", 2562 trace_ocfs2_free_clusters((unsigned long long)bg_blkno,
2573 num_clusters, (unsigned long long)start_blk); 2563 (unsigned long long)start_blk,
2574 mlog(0, "bg_blkno = %llu, bg_start_bit = %u\n", 2564 bg_start_bit, num_clusters);
2575 (unsigned long long)bg_blkno, bg_start_bit);
2576 2565
2577 status = _ocfs2_free_suballoc_bits(handle, bitmap_inode, bitmap_bh, 2566 status = _ocfs2_free_suballoc_bits(handle, bitmap_inode, bitmap_bh,
2578 bg_start_bit, bg_blkno, 2567 bg_start_bit, bg_blkno,
@@ -2586,7 +2575,8 @@ static int _ocfs2_free_clusters(handle_t *handle,
2586 num_clusters); 2575 num_clusters);
2587 2576
2588out: 2577out:
2589 mlog_exit(status); 2578 if (status)
2579 mlog_errno(status);
2590 return status; 2580 return status;
2591} 2581}
2592 2582
@@ -2756,7 +2746,7 @@ static int ocfs2_get_suballoc_slot_bit(struct ocfs2_super *osb, u64 blkno,
2756 struct buffer_head *inode_bh = NULL; 2746 struct buffer_head *inode_bh = NULL;
2757 struct ocfs2_dinode *inode_fe; 2747 struct ocfs2_dinode *inode_fe;
2758 2748
2759 mlog_entry("blkno: %llu\n", (unsigned long long)blkno); 2749 trace_ocfs2_get_suballoc_slot_bit((unsigned long long)blkno);
2760 2750
2761 /* dirty read disk */ 2751 /* dirty read disk */
2762 status = ocfs2_read_blocks_sync(osb, blkno, 1, &inode_bh); 2752 status = ocfs2_read_blocks_sync(osb, blkno, 1, &inode_bh);
@@ -2793,7 +2783,8 @@ static int ocfs2_get_suballoc_slot_bit(struct ocfs2_super *osb, u64 blkno,
2793bail: 2783bail:
2794 brelse(inode_bh); 2784 brelse(inode_bh);
2795 2785
2796 mlog_exit(status); 2786 if (status)
2787 mlog_errno(status);
2797 return status; 2788 return status;
2798} 2789}
2799 2790
@@ -2816,8 +2807,8 @@ static int ocfs2_test_suballoc_bit(struct ocfs2_super *osb,
2816 u64 bg_blkno; 2807 u64 bg_blkno;
2817 int status; 2808 int status;
2818 2809
2819 mlog_entry("blkno: %llu bit: %u\n", (unsigned long long)blkno, 2810 trace_ocfs2_test_suballoc_bit((unsigned long long)blkno,
2820 (unsigned int)bit); 2811 (unsigned int)bit);
2821 2812
2822 alloc_di = (struct ocfs2_dinode *)alloc_bh->b_data; 2813 alloc_di = (struct ocfs2_dinode *)alloc_bh->b_data;
2823 if ((bit + 1) > ocfs2_bits_per_group(&alloc_di->id2.i_chain)) { 2814 if ((bit + 1) > ocfs2_bits_per_group(&alloc_di->id2.i_chain)) {
@@ -2844,7 +2835,8 @@ static int ocfs2_test_suballoc_bit(struct ocfs2_super *osb,
2844bail: 2835bail:
2845 brelse(group_bh); 2836 brelse(group_bh);
2846 2837
2847 mlog_exit(status); 2838 if (status)
2839 mlog_errno(status);
2848 return status; 2840 return status;
2849} 2841}
2850 2842
@@ -2869,7 +2861,7 @@ int ocfs2_test_inode_bit(struct ocfs2_super *osb, u64 blkno, int *res)
2869 struct inode *inode_alloc_inode; 2861 struct inode *inode_alloc_inode;
2870 struct buffer_head *alloc_bh = NULL; 2862 struct buffer_head *alloc_bh = NULL;
2871 2863
2872 mlog_entry("blkno: %llu", (unsigned long long)blkno); 2864 trace_ocfs2_test_inode_bit((unsigned long long)blkno);
2873 2865
2874 status = ocfs2_get_suballoc_slot_bit(osb, blkno, &suballoc_slot, 2866 status = ocfs2_get_suballoc_slot_bit(osb, blkno, &suballoc_slot,
2875 &group_blkno, &suballoc_bit); 2867 &group_blkno, &suballoc_bit);
@@ -2910,6 +2902,7 @@ int ocfs2_test_inode_bit(struct ocfs2_super *osb, u64 blkno, int *res)
2910 iput(inode_alloc_inode); 2902 iput(inode_alloc_inode);
2911 brelse(alloc_bh); 2903 brelse(alloc_bh);
2912bail: 2904bail:
2913 mlog_exit(status); 2905 if (status)
2906 mlog_errno(status);
2914 return status; 2907 return status;
2915} 2908}
diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c
index 236ed1bdca2c..69fa11b35aa4 100644
--- a/fs/ocfs2/super.c
+++ b/fs/ocfs2/super.c
@@ -42,7 +42,9 @@
42#include <linux/seq_file.h> 42#include <linux/seq_file.h>
43#include <linux/quotaops.h> 43#include <linux/quotaops.h>
44 44
45#define MLOG_MASK_PREFIX ML_SUPER 45#define CREATE_TRACE_POINTS
46#include "ocfs2_trace.h"
47
46#include <cluster/masklog.h> 48#include <cluster/masklog.h>
47 49
48#include "ocfs2.h" 50#include "ocfs2.h"
@@ -441,8 +443,6 @@ static int ocfs2_init_global_system_inodes(struct ocfs2_super *osb)
441 int status = 0; 443 int status = 0;
442 int i; 444 int i;
443 445
444 mlog_entry_void();
445
446 new = ocfs2_iget(osb, osb->root_blkno, OCFS2_FI_FLAG_SYSFILE, 0); 446 new = ocfs2_iget(osb, osb->root_blkno, OCFS2_FI_FLAG_SYSFILE, 0);
447 if (IS_ERR(new)) { 447 if (IS_ERR(new)) {
448 status = PTR_ERR(new); 448 status = PTR_ERR(new);
@@ -478,7 +478,8 @@ static int ocfs2_init_global_system_inodes(struct ocfs2_super *osb)
478 } 478 }
479 479
480bail: 480bail:
481 mlog_exit(status); 481 if (status)
482 mlog_errno(status);
482 return status; 483 return status;
483} 484}
484 485
@@ -488,8 +489,6 @@ static int ocfs2_init_local_system_inodes(struct ocfs2_super *osb)
488 int status = 0; 489 int status = 0;
489 int i; 490 int i;
490 491
491 mlog_entry_void();
492
493 for (i = OCFS2_LAST_GLOBAL_SYSTEM_INODE + 1; 492 for (i = OCFS2_LAST_GLOBAL_SYSTEM_INODE + 1;
494 i < NUM_SYSTEM_INODES; 493 i < NUM_SYSTEM_INODES;
495 i++) { 494 i++) {
@@ -508,7 +507,8 @@ static int ocfs2_init_local_system_inodes(struct ocfs2_super *osb)
508 } 507 }
509 508
510bail: 509bail:
511 mlog_exit(status); 510 if (status)
511 mlog_errno(status);
512 return status; 512 return status;
513} 513}
514 514
@@ -517,8 +517,6 @@ static void ocfs2_release_system_inodes(struct ocfs2_super *osb)
517 int i; 517 int i;
518 struct inode *inode; 518 struct inode *inode;
519 519
520 mlog_entry_void();
521
522 for (i = 0; i < NUM_GLOBAL_SYSTEM_INODES; i++) { 520 for (i = 0; i < NUM_GLOBAL_SYSTEM_INODES; i++) {
523 inode = osb->global_system_inodes[i]; 521 inode = osb->global_system_inodes[i];
524 if (inode) { 522 if (inode) {
@@ -540,7 +538,7 @@ static void ocfs2_release_system_inodes(struct ocfs2_super *osb)
540 } 538 }
541 539
542 if (!osb->local_system_inodes) 540 if (!osb->local_system_inodes)
543 goto out; 541 return;
544 542
545 for (i = 0; i < NUM_LOCAL_SYSTEM_INODES * osb->max_slots; i++) { 543 for (i = 0; i < NUM_LOCAL_SYSTEM_INODES * osb->max_slots; i++) {
546 if (osb->local_system_inodes[i]) { 544 if (osb->local_system_inodes[i]) {
@@ -551,9 +549,6 @@ static void ocfs2_release_system_inodes(struct ocfs2_super *osb)
551 549
552 kfree(osb->local_system_inodes); 550 kfree(osb->local_system_inodes);
553 osb->local_system_inodes = NULL; 551 osb->local_system_inodes = NULL;
554
555out:
556 mlog_exit(0);
557} 552}
558 553
559/* We're allocating fs objects, use GFP_NOFS */ 554/* We're allocating fs objects, use GFP_NOFS */
@@ -684,12 +679,9 @@ static int ocfs2_remount(struct super_block *sb, int *flags, char *data)
684 } 679 }
685 680
686 if (*flags & MS_RDONLY) { 681 if (*flags & MS_RDONLY) {
687 mlog(0, "Going to ro mode.\n");
688 sb->s_flags |= MS_RDONLY; 682 sb->s_flags |= MS_RDONLY;
689 osb->osb_flags |= OCFS2_OSB_SOFT_RO; 683 osb->osb_flags |= OCFS2_OSB_SOFT_RO;
690 } else { 684 } else {
691 mlog(0, "Making ro filesystem writeable.\n");
692
693 if (osb->osb_flags & OCFS2_OSB_ERROR_FS) { 685 if (osb->osb_flags & OCFS2_OSB_ERROR_FS) {
694 mlog(ML_ERROR, "Cannot remount RDWR " 686 mlog(ML_ERROR, "Cannot remount RDWR "
695 "filesystem due to previous errors.\n"); 687 "filesystem due to previous errors.\n");
@@ -707,6 +699,7 @@ static int ocfs2_remount(struct super_block *sb, int *flags, char *data)
707 sb->s_flags &= ~MS_RDONLY; 699 sb->s_flags &= ~MS_RDONLY;
708 osb->osb_flags &= ~OCFS2_OSB_SOFT_RO; 700 osb->osb_flags &= ~OCFS2_OSB_SOFT_RO;
709 } 701 }
702 trace_ocfs2_remount(sb->s_flags, osb->osb_flags, *flags);
710unlock_osb: 703unlock_osb:
711 spin_unlock(&osb->osb_lock); 704 spin_unlock(&osb->osb_lock);
712 /* Enable quota accounting after remounting RW */ 705 /* Enable quota accounting after remounting RW */
@@ -1032,7 +1025,7 @@ static int ocfs2_fill_super(struct super_block *sb, void *data, int silent)
1032 char nodestr[8]; 1025 char nodestr[8];
1033 struct ocfs2_blockcheck_stats stats; 1026 struct ocfs2_blockcheck_stats stats;
1034 1027
1035 mlog_entry("%p, %p, %i", sb, data, silent); 1028 trace_ocfs2_fill_super(sb, data, silent);
1036 1029
1037 if (!ocfs2_parse_options(sb, data, &parsed_options, 0)) { 1030 if (!ocfs2_parse_options(sb, data, &parsed_options, 0)) {
1038 status = -EINVAL; 1031 status = -EINVAL;
@@ -1208,7 +1201,6 @@ static int ocfs2_fill_super(struct super_block *sb, void *data, int silent)
1208 mlog_errno(status); 1201 mlog_errno(status);
1209 atomic_set(&osb->vol_state, VOLUME_DISABLED); 1202 atomic_set(&osb->vol_state, VOLUME_DISABLED);
1210 wake_up(&osb->osb_mount_event); 1203 wake_up(&osb->osb_mount_event);
1211 mlog_exit(status);
1212 return status; 1204 return status;
1213 } 1205 }
1214 } 1206 }
@@ -1222,7 +1214,6 @@ static int ocfs2_fill_super(struct super_block *sb, void *data, int silent)
1222 /* Start this when the mount is almost sure of being successful */ 1214 /* Start this when the mount is almost sure of being successful */
1223 ocfs2_orphan_scan_start(osb); 1215 ocfs2_orphan_scan_start(osb);
1224 1216
1225 mlog_exit(status);
1226 return status; 1217 return status;
1227 1218
1228read_super_error: 1219read_super_error:
@@ -1237,7 +1228,8 @@ read_super_error:
1237 ocfs2_dismount_volume(sb, 1); 1228 ocfs2_dismount_volume(sb, 1);
1238 } 1229 }
1239 1230
1240 mlog_exit(status); 1231 if (status)
1232 mlog_errno(status);
1241 return status; 1233 return status;
1242} 1234}
1243 1235
@@ -1320,8 +1312,7 @@ static int ocfs2_parse_options(struct super_block *sb,
1320 char *p; 1312 char *p;
1321 u32 tmp; 1313 u32 tmp;
1322 1314
1323 mlog_entry("remount: %d, options: \"%s\"\n", is_remount, 1315 trace_ocfs2_parse_options(is_remount, options ? options : "(none)");
1324 options ? options : "(none)");
1325 1316
1326 mopt->commit_interval = 0; 1317 mopt->commit_interval = 0;
1327 mopt->mount_opt = OCFS2_MOUNT_NOINTR; 1318 mopt->mount_opt = OCFS2_MOUNT_NOINTR;
@@ -1538,7 +1529,6 @@ static int ocfs2_parse_options(struct super_block *sb,
1538 status = 1; 1529 status = 1;
1539 1530
1540bail: 1531bail:
1541 mlog_exit(status);
1542 return status; 1532 return status;
1543} 1533}
1544 1534
@@ -1629,8 +1619,6 @@ static int __init ocfs2_init(void)
1629{ 1619{
1630 int status; 1620 int status;
1631 1621
1632 mlog_entry_void();
1633
1634 ocfs2_print_version(); 1622 ocfs2_print_version();
1635 1623
1636 status = init_ocfs2_uptodate_cache(); 1624 status = init_ocfs2_uptodate_cache();
@@ -1664,10 +1652,9 @@ leave:
1664 if (status < 0) { 1652 if (status < 0) {
1665 ocfs2_free_mem_caches(); 1653 ocfs2_free_mem_caches();
1666 exit_ocfs2_uptodate_cache(); 1654 exit_ocfs2_uptodate_cache();
1655 mlog_errno(status);
1667 } 1656 }
1668 1657
1669 mlog_exit(status);
1670
1671 if (status >= 0) { 1658 if (status >= 0) {
1672 return register_filesystem(&ocfs2_fs_type); 1659 return register_filesystem(&ocfs2_fs_type);
1673 } else 1660 } else
@@ -1676,8 +1663,6 @@ leave:
1676 1663
1677static void __exit ocfs2_exit(void) 1664static void __exit ocfs2_exit(void)
1678{ 1665{
1679 mlog_entry_void();
1680
1681 if (ocfs2_wq) { 1666 if (ocfs2_wq) {
1682 flush_workqueue(ocfs2_wq); 1667 flush_workqueue(ocfs2_wq);
1683 destroy_workqueue(ocfs2_wq); 1668 destroy_workqueue(ocfs2_wq);
@@ -1692,18 +1677,14 @@ static void __exit ocfs2_exit(void)
1692 unregister_filesystem(&ocfs2_fs_type); 1677 unregister_filesystem(&ocfs2_fs_type);
1693 1678
1694 exit_ocfs2_uptodate_cache(); 1679 exit_ocfs2_uptodate_cache();
1695
1696 mlog_exit_void();
1697} 1680}
1698 1681
1699static void ocfs2_put_super(struct super_block *sb) 1682static void ocfs2_put_super(struct super_block *sb)
1700{ 1683{
1701 mlog_entry("(0x%p)\n", sb); 1684 trace_ocfs2_put_super(sb);
1702 1685
1703 ocfs2_sync_blockdev(sb); 1686 ocfs2_sync_blockdev(sb);
1704 ocfs2_dismount_volume(sb, 0); 1687 ocfs2_dismount_volume(sb, 0);
1705
1706 mlog_exit_void();
1707} 1688}
1708 1689
1709static int ocfs2_statfs(struct dentry *dentry, struct kstatfs *buf) 1690static int ocfs2_statfs(struct dentry *dentry, struct kstatfs *buf)
@@ -1715,7 +1696,7 @@ static int ocfs2_statfs(struct dentry *dentry, struct kstatfs *buf)
1715 struct buffer_head *bh = NULL; 1696 struct buffer_head *bh = NULL;
1716 struct inode *inode = NULL; 1697 struct inode *inode = NULL;
1717 1698
1718 mlog_entry("(%p, %p)\n", dentry->d_sb, buf); 1699 trace_ocfs2_statfs(dentry->d_sb, buf);
1719 1700
1720 osb = OCFS2_SB(dentry->d_sb); 1701 osb = OCFS2_SB(dentry->d_sb);
1721 1702
@@ -1762,7 +1743,8 @@ bail:
1762 if (inode) 1743 if (inode)
1763 iput(inode); 1744 iput(inode);
1764 1745
1765 mlog_exit(status); 1746 if (status)
1747 mlog_errno(status);
1766 1748
1767 return status; 1749 return status;
1768} 1750}
@@ -1882,8 +1864,6 @@ static int ocfs2_mount_volume(struct super_block *sb)
1882 int unlock_super = 0; 1864 int unlock_super = 0;
1883 struct ocfs2_super *osb = OCFS2_SB(sb); 1865 struct ocfs2_super *osb = OCFS2_SB(sb);
1884 1866
1885 mlog_entry_void();
1886
1887 if (ocfs2_is_hard_readonly(osb)) 1867 if (ocfs2_is_hard_readonly(osb))
1888 goto leave; 1868 goto leave;
1889 1869
@@ -1928,7 +1908,6 @@ leave:
1928 if (unlock_super) 1908 if (unlock_super)
1929 ocfs2_super_unlock(osb, 1); 1909 ocfs2_super_unlock(osb, 1);
1930 1910
1931 mlog_exit(status);
1932 return status; 1911 return status;
1933} 1912}
1934 1913
@@ -1938,7 +1917,7 @@ static void ocfs2_dismount_volume(struct super_block *sb, int mnt_err)
1938 struct ocfs2_super *osb = NULL; 1917 struct ocfs2_super *osb = NULL;
1939 char nodestr[8]; 1918 char nodestr[8];
1940 1919
1941 mlog_entry("(0x%p)\n", sb); 1920 trace_ocfs2_dismount_volume(sb);
1942 1921
1943 BUG_ON(!sb); 1922 BUG_ON(!sb);
1944 osb = OCFS2_SB(sb); 1923 osb = OCFS2_SB(sb);
@@ -2090,8 +2069,6 @@ static int ocfs2_initialize_super(struct super_block *sb,
2090 struct ocfs2_super *osb; 2069 struct ocfs2_super *osb;
2091 u64 total_blocks; 2070 u64 total_blocks;
2092 2071
2093 mlog_entry_void();
2094
2095 osb = kzalloc(sizeof(struct ocfs2_super), GFP_KERNEL); 2072 osb = kzalloc(sizeof(struct ocfs2_super), GFP_KERNEL);
2096 if (!osb) { 2073 if (!osb) {
2097 status = -ENOMEM; 2074 status = -ENOMEM;
@@ -2155,7 +2132,6 @@ static int ocfs2_initialize_super(struct super_block *sb,
2155 status = -EINVAL; 2132 status = -EINVAL;
2156 goto bail; 2133 goto bail;
2157 } 2134 }
2158 mlog(0, "max_slots for this device: %u\n", osb->max_slots);
2159 2135
2160 ocfs2_orphan_scan_init(osb); 2136 ocfs2_orphan_scan_init(osb);
2161 2137
@@ -2294,7 +2270,6 @@ static int ocfs2_initialize_super(struct super_block *sb,
2294 osb->s_clustersize_bits = 2270 osb->s_clustersize_bits =
2295 le32_to_cpu(di->id2.i_super.s_clustersize_bits); 2271 le32_to_cpu(di->id2.i_super.s_clustersize_bits);
2296 osb->s_clustersize = 1 << osb->s_clustersize_bits; 2272 osb->s_clustersize = 1 << osb->s_clustersize_bits;
2297 mlog(0, "clusterbits=%d\n", osb->s_clustersize_bits);
2298 2273
2299 if (osb->s_clustersize < OCFS2_MIN_CLUSTERSIZE || 2274 if (osb->s_clustersize < OCFS2_MIN_CLUSTERSIZE ||
2300 osb->s_clustersize > OCFS2_MAX_CLUSTERSIZE) { 2275 osb->s_clustersize > OCFS2_MAX_CLUSTERSIZE) {
@@ -2333,11 +2308,10 @@ static int ocfs2_initialize_super(struct super_block *sb,
2333 le64_to_cpu(di->id2.i_super.s_first_cluster_group); 2308 le64_to_cpu(di->id2.i_super.s_first_cluster_group);
2334 osb->fs_generation = le32_to_cpu(di->i_fs_generation); 2309 osb->fs_generation = le32_to_cpu(di->i_fs_generation);
2335 osb->uuid_hash = le32_to_cpu(di->id2.i_super.s_uuid_hash); 2310 osb->uuid_hash = le32_to_cpu(di->id2.i_super.s_uuid_hash);
2336 mlog(0, "vol_label: %s\n", osb->vol_label); 2311 trace_ocfs2_initialize_super(osb->vol_label, osb->uuid_str,
2337 mlog(0, "uuid: %s\n", osb->uuid_str); 2312 (unsigned long long)osb->root_blkno,
2338 mlog(0, "root_blkno=%llu, system_dir_blkno=%llu\n", 2313 (unsigned long long)osb->system_dir_blkno,
2339 (unsigned long long)osb->root_blkno, 2314 osb->s_clustersize_bits);
2340 (unsigned long long)osb->system_dir_blkno);
2341 2315
2342 osb->osb_dlm_debug = ocfs2_new_dlm_debug(); 2316 osb->osb_dlm_debug = ocfs2_new_dlm_debug();
2343 if (!osb->osb_dlm_debug) { 2317 if (!osb->osb_dlm_debug) {
@@ -2380,7 +2354,6 @@ static int ocfs2_initialize_super(struct super_block *sb,
2380 } 2354 }
2381 2355
2382bail: 2356bail:
2383 mlog_exit(status);
2384 return status; 2357 return status;
2385} 2358}
2386 2359
@@ -2396,8 +2369,6 @@ static int ocfs2_verify_volume(struct ocfs2_dinode *di,
2396{ 2369{
2397 int status = -EAGAIN; 2370 int status = -EAGAIN;
2398 2371
2399 mlog_entry_void();
2400
2401 if (memcmp(di->i_signature, OCFS2_SUPER_BLOCK_SIGNATURE, 2372 if (memcmp(di->i_signature, OCFS2_SUPER_BLOCK_SIGNATURE,
2402 strlen(OCFS2_SUPER_BLOCK_SIGNATURE)) == 0) { 2373 strlen(OCFS2_SUPER_BLOCK_SIGNATURE)) == 0) {
2403 /* We have to do a raw check of the feature here */ 2374 /* We have to do a raw check of the feature here */
@@ -2452,7 +2423,8 @@ static int ocfs2_verify_volume(struct ocfs2_dinode *di,
2452 } 2423 }
2453 2424
2454out: 2425out:
2455 mlog_exit(status); 2426 if (status && status != -EAGAIN)
2427 mlog_errno(status);
2456 return status; 2428 return status;
2457} 2429}
2458 2430
@@ -2465,8 +2437,6 @@ static int ocfs2_check_volume(struct ocfs2_super *osb)
2465 * recover 2437 * recover
2466 * ourselves. */ 2438 * ourselves. */
2467 2439
2468 mlog_entry_void();
2469
2470 /* Init our journal object. */ 2440 /* Init our journal object. */
2471 status = ocfs2_journal_init(osb->journal, &dirty); 2441 status = ocfs2_journal_init(osb->journal, &dirty);
2472 if (status < 0) { 2442 if (status < 0) {
@@ -2516,8 +2486,6 @@ static int ocfs2_check_volume(struct ocfs2_super *osb)
2516 * ourselves as mounted. */ 2486 * ourselves as mounted. */
2517 } 2487 }
2518 2488
2519 mlog(0, "Journal loaded.\n");
2520
2521 status = ocfs2_load_local_alloc(osb); 2489 status = ocfs2_load_local_alloc(osb);
2522 if (status < 0) { 2490 if (status < 0) {
2523 mlog_errno(status); 2491 mlog_errno(status);
@@ -2549,7 +2517,8 @@ finally:
2549 if (local_alloc) 2517 if (local_alloc)
2550 kfree(local_alloc); 2518 kfree(local_alloc);
2551 2519
2552 mlog_exit(status); 2520 if (status)
2521 mlog_errno(status);
2553 return status; 2522 return status;
2554} 2523}
2555 2524
@@ -2561,8 +2530,6 @@ finally:
2561 */ 2530 */
2562static void ocfs2_delete_osb(struct ocfs2_super *osb) 2531static void ocfs2_delete_osb(struct ocfs2_super *osb)
2563{ 2532{
2564 mlog_entry_void();
2565
2566 /* This function assumes that the caller has the main osb resource */ 2533 /* This function assumes that the caller has the main osb resource */
2567 2534
2568 ocfs2_free_slot_info(osb); 2535 ocfs2_free_slot_info(osb);
@@ -2580,8 +2547,6 @@ static void ocfs2_delete_osb(struct ocfs2_super *osb)
2580 kfree(osb->uuid_str); 2547 kfree(osb->uuid_str);
2581 ocfs2_put_dlm_debug(osb->osb_dlm_debug); 2548 ocfs2_put_dlm_debug(osb->osb_dlm_debug);
2582 memset(osb, 0, sizeof(struct ocfs2_super)); 2549 memset(osb, 0, sizeof(struct ocfs2_super));
2583
2584 mlog_exit_void();
2585} 2550}
2586 2551
2587/* Put OCFS2 into a readonly state, or (if the user specifies it), 2552/* Put OCFS2 into a readonly state, or (if the user specifies it),
diff --git a/fs/ocfs2/symlink.c b/fs/ocfs2/symlink.c
index 9975457c981f..5d22872e2bb3 100644
--- a/fs/ocfs2/symlink.c
+++ b/fs/ocfs2/symlink.c
@@ -40,7 +40,6 @@
40#include <linux/pagemap.h> 40#include <linux/pagemap.h>
41#include <linux/namei.h> 41#include <linux/namei.h>
42 42
43#define MLOG_MASK_PREFIX ML_NAMEI
44#include <cluster/masklog.h> 43#include <cluster/masklog.h>
45 44
46#include "ocfs2.h" 45#include "ocfs2.h"
@@ -62,8 +61,6 @@ static char *ocfs2_fast_symlink_getlink(struct inode *inode,
62 char *link = NULL; 61 char *link = NULL;
63 struct ocfs2_dinode *fe; 62 struct ocfs2_dinode *fe;
64 63
65 mlog_entry_void();
66
67 status = ocfs2_read_inode_block(inode, bh); 64 status = ocfs2_read_inode_block(inode, bh);
68 if (status < 0) { 65 if (status < 0) {
69 mlog_errno(status); 66 mlog_errno(status);
@@ -74,7 +71,6 @@ static char *ocfs2_fast_symlink_getlink(struct inode *inode,
74 fe = (struct ocfs2_dinode *) (*bh)->b_data; 71 fe = (struct ocfs2_dinode *) (*bh)->b_data;
75 link = (char *) fe->id2.i_symlink; 72 link = (char *) fe->id2.i_symlink;
76bail: 73bail:
77 mlog_exit(status);
78 74
79 return link; 75 return link;
80} 76}
@@ -88,8 +84,6 @@ static int ocfs2_readlink(struct dentry *dentry,
88 struct buffer_head *bh = NULL; 84 struct buffer_head *bh = NULL;
89 struct inode *inode = dentry->d_inode; 85 struct inode *inode = dentry->d_inode;
90 86
91 mlog_entry_void();
92
93 link = ocfs2_fast_symlink_getlink(inode, &bh); 87 link = ocfs2_fast_symlink_getlink(inode, &bh);
94 if (IS_ERR(link)) { 88 if (IS_ERR(link)) {
95 ret = PTR_ERR(link); 89 ret = PTR_ERR(link);
@@ -104,7 +98,8 @@ static int ocfs2_readlink(struct dentry *dentry,
104 98
105 brelse(bh); 99 brelse(bh);
106out: 100out:
107 mlog_exit(ret); 101 if (ret < 0)
102 mlog_errno(ret);
108 return ret; 103 return ret;
109} 104}
110 105
@@ -117,8 +112,6 @@ static void *ocfs2_fast_follow_link(struct dentry *dentry,
117 struct inode *inode = dentry->d_inode; 112 struct inode *inode = dentry->d_inode;
118 struct buffer_head *bh = NULL; 113 struct buffer_head *bh = NULL;
119 114
120 mlog_entry_void();
121
122 BUG_ON(!ocfs2_inode_is_fast_symlink(inode)); 115 BUG_ON(!ocfs2_inode_is_fast_symlink(inode));
123 target = ocfs2_fast_symlink_getlink(inode, &bh); 116 target = ocfs2_fast_symlink_getlink(inode, &bh);
124 if (IS_ERR(target)) { 117 if (IS_ERR(target)) {
@@ -142,7 +135,8 @@ bail:
142 nd_set_link(nd, status ? ERR_PTR(status) : link); 135 nd_set_link(nd, status ? ERR_PTR(status) : link);
143 brelse(bh); 136 brelse(bh);
144 137
145 mlog_exit(status); 138 if (status)
139 mlog_errno(status);
146 return NULL; 140 return NULL;
147} 141}
148 142
diff --git a/fs/ocfs2/sysfile.c b/fs/ocfs2/sysfile.c
index 902efb23b6a6..3d635f4bbb20 100644
--- a/fs/ocfs2/sysfile.c
+++ b/fs/ocfs2/sysfile.c
@@ -27,7 +27,6 @@
27#include <linux/types.h> 27#include <linux/types.h>
28#include <linux/highmem.h> 28#include <linux/highmem.h>
29 29
30#define MLOG_MASK_PREFIX ML_INODE
31#include <cluster/masklog.h> 30#include <cluster/masklog.h>
32 31
33#include "ocfs2.h" 32#include "ocfs2.h"
diff --git a/fs/ocfs2/uptodate.c b/fs/ocfs2/uptodate.c
index a0a120e82b97..52eaf33d346f 100644
--- a/fs/ocfs2/uptodate.c
+++ b/fs/ocfs2/uptodate.c
@@ -54,14 +54,13 @@
54#include <linux/buffer_head.h> 54#include <linux/buffer_head.h>
55#include <linux/rbtree.h> 55#include <linux/rbtree.h>
56 56
57#define MLOG_MASK_PREFIX ML_UPTODATE
58
59#include <cluster/masklog.h> 57#include <cluster/masklog.h>
60 58
61#include "ocfs2.h" 59#include "ocfs2.h"
62 60
63#include "inode.h" 61#include "inode.h"
64#include "uptodate.h" 62#include "uptodate.h"
63#include "ocfs2_trace.h"
65 64
66struct ocfs2_meta_cache_item { 65struct ocfs2_meta_cache_item {
67 struct rb_node c_node; 66 struct rb_node c_node;
@@ -152,8 +151,8 @@ static unsigned int ocfs2_purge_copied_metadata_tree(struct rb_root *root)
152 while ((node = rb_last(root)) != NULL) { 151 while ((node = rb_last(root)) != NULL) {
153 item = rb_entry(node, struct ocfs2_meta_cache_item, c_node); 152 item = rb_entry(node, struct ocfs2_meta_cache_item, c_node);
154 153
155 mlog(0, "Purge item %llu\n", 154 trace_ocfs2_purge_copied_metadata_tree(
156 (unsigned long long) item->c_block); 155 (unsigned long long) item->c_block);
157 156
158 rb_erase(&item->c_node, root); 157 rb_erase(&item->c_node, root);
159 kmem_cache_free(ocfs2_uptodate_cachep, item); 158 kmem_cache_free(ocfs2_uptodate_cachep, item);
@@ -180,9 +179,9 @@ void ocfs2_metadata_cache_purge(struct ocfs2_caching_info *ci)
180 tree = !(ci->ci_flags & OCFS2_CACHE_FL_INLINE); 179 tree = !(ci->ci_flags & OCFS2_CACHE_FL_INLINE);
181 to_purge = ci->ci_num_cached; 180 to_purge = ci->ci_num_cached;
182 181
183 mlog(0, "Purge %u %s items from Owner %llu\n", to_purge, 182 trace_ocfs2_metadata_cache_purge(
184 tree ? "array" : "tree", 183 (unsigned long long)ocfs2_metadata_cache_owner(ci),
185 (unsigned long long)ocfs2_metadata_cache_owner(ci)); 184 to_purge, tree);
186 185
187 /* If we're a tree, save off the root so that we can safely 186 /* If we're a tree, save off the root so that we can safely
188 * initialize the cache. We do the work to free tree members 187 * initialize the cache. We do the work to free tree members
@@ -249,10 +248,10 @@ static int ocfs2_buffer_cached(struct ocfs2_caching_info *ci,
249 248
250 ocfs2_metadata_cache_lock(ci); 249 ocfs2_metadata_cache_lock(ci);
251 250
252 mlog(0, "Owner %llu, query block %llu (inline = %u)\n", 251 trace_ocfs2_buffer_cached_begin(
253 (unsigned long long)ocfs2_metadata_cache_owner(ci), 252 (unsigned long long)ocfs2_metadata_cache_owner(ci),
254 (unsigned long long) bh->b_blocknr, 253 (unsigned long long) bh->b_blocknr,
255 !!(ci->ci_flags & OCFS2_CACHE_FL_INLINE)); 254 !!(ci->ci_flags & OCFS2_CACHE_FL_INLINE));
256 255
257 if (ci->ci_flags & OCFS2_CACHE_FL_INLINE) 256 if (ci->ci_flags & OCFS2_CACHE_FL_INLINE)
258 index = ocfs2_search_cache_array(ci, bh->b_blocknr); 257 index = ocfs2_search_cache_array(ci, bh->b_blocknr);
@@ -261,7 +260,7 @@ static int ocfs2_buffer_cached(struct ocfs2_caching_info *ci,
261 260
262 ocfs2_metadata_cache_unlock(ci); 261 ocfs2_metadata_cache_unlock(ci);
263 262
264 mlog(0, "index = %d, item = %p\n", index, item); 263 trace_ocfs2_buffer_cached_end(index, item);
265 264
266 return (index != -1) || (item != NULL); 265 return (index != -1) || (item != NULL);
267} 266}
@@ -306,8 +305,9 @@ static void ocfs2_append_cache_array(struct ocfs2_caching_info *ci,
306{ 305{
307 BUG_ON(ci->ci_num_cached >= OCFS2_CACHE_INFO_MAX_ARRAY); 306 BUG_ON(ci->ci_num_cached >= OCFS2_CACHE_INFO_MAX_ARRAY);
308 307
309 mlog(0, "block %llu takes position %u\n", (unsigned long long) block, 308 trace_ocfs2_append_cache_array(
310 ci->ci_num_cached); 309 (unsigned long long)ocfs2_metadata_cache_owner(ci),
310 (unsigned long long)block, ci->ci_num_cached);
311 311
312 ci->ci_cache.ci_array[ci->ci_num_cached] = block; 312 ci->ci_cache.ci_array[ci->ci_num_cached] = block;
313 ci->ci_num_cached++; 313 ci->ci_num_cached++;
@@ -324,8 +324,9 @@ static void __ocfs2_insert_cache_tree(struct ocfs2_caching_info *ci,
324 struct rb_node **p = &ci->ci_cache.ci_tree.rb_node; 324 struct rb_node **p = &ci->ci_cache.ci_tree.rb_node;
325 struct ocfs2_meta_cache_item *tmp; 325 struct ocfs2_meta_cache_item *tmp;
326 326
327 mlog(0, "Insert block %llu num = %u\n", (unsigned long long) block, 327 trace_ocfs2_insert_cache_tree(
328 ci->ci_num_cached); 328 (unsigned long long)ocfs2_metadata_cache_owner(ci),
329 (unsigned long long)block, ci->ci_num_cached);
329 330
330 while(*p) { 331 while(*p) {
331 parent = *p; 332 parent = *p;
@@ -389,9 +390,9 @@ static void ocfs2_expand_cache(struct ocfs2_caching_info *ci,
389 tree[i] = NULL; 390 tree[i] = NULL;
390 } 391 }
391 392
392 mlog(0, "Expanded %llu to a tree cache: flags 0x%x, num = %u\n", 393 trace_ocfs2_expand_cache(
393 (unsigned long long)ocfs2_metadata_cache_owner(ci), 394 (unsigned long long)ocfs2_metadata_cache_owner(ci),
394 ci->ci_flags, ci->ci_num_cached); 395 ci->ci_flags, ci->ci_num_cached);
395} 396}
396 397
397/* Slow path function - memory allocation is necessary. See the 398/* Slow path function - memory allocation is necessary. See the
@@ -405,9 +406,9 @@ static void __ocfs2_set_buffer_uptodate(struct ocfs2_caching_info *ci,
405 struct ocfs2_meta_cache_item *tree[OCFS2_CACHE_INFO_MAX_ARRAY] = 406 struct ocfs2_meta_cache_item *tree[OCFS2_CACHE_INFO_MAX_ARRAY] =
406 { NULL, }; 407 { NULL, };
407 408
408 mlog(0, "Owner %llu, block %llu, expand = %d\n", 409 trace_ocfs2_set_buffer_uptodate(
409 (unsigned long long)ocfs2_metadata_cache_owner(ci), 410 (unsigned long long)ocfs2_metadata_cache_owner(ci),
410 (unsigned long long)block, expand_tree); 411 (unsigned long long)block, expand_tree);
411 412
412 new = kmem_cache_alloc(ocfs2_uptodate_cachep, GFP_NOFS); 413 new = kmem_cache_alloc(ocfs2_uptodate_cachep, GFP_NOFS);
413 if (!new) { 414 if (!new) {
@@ -433,7 +434,6 @@ static void __ocfs2_set_buffer_uptodate(struct ocfs2_caching_info *ci,
433 434
434 ocfs2_metadata_cache_lock(ci); 435 ocfs2_metadata_cache_lock(ci);
435 if (ocfs2_insert_can_use_array(ci)) { 436 if (ocfs2_insert_can_use_array(ci)) {
436 mlog(0, "Someone cleared the tree underneath us\n");
437 /* Ok, items were removed from the cache in between 437 /* Ok, items were removed from the cache in between
438 * locks. Detect this and revert back to the fast path */ 438 * locks. Detect this and revert back to the fast path */
439 ocfs2_append_cache_array(ci, block); 439 ocfs2_append_cache_array(ci, block);
@@ -490,9 +490,9 @@ void ocfs2_set_buffer_uptodate(struct ocfs2_caching_info *ci,
490 if (ocfs2_buffer_cached(ci, bh)) 490 if (ocfs2_buffer_cached(ci, bh))
491 return; 491 return;
492 492
493 mlog(0, "Owner %llu, inserting block %llu\n", 493 trace_ocfs2_set_buffer_uptodate_begin(
494 (unsigned long long)ocfs2_metadata_cache_owner(ci), 494 (unsigned long long)ocfs2_metadata_cache_owner(ci),
495 (unsigned long long)bh->b_blocknr); 495 (unsigned long long)bh->b_blocknr);
496 496
497 /* No need to recheck under spinlock - insertion is guarded by 497 /* No need to recheck under spinlock - insertion is guarded by
498 * co_io_lock() */ 498 * co_io_lock() */
@@ -542,8 +542,9 @@ static void ocfs2_remove_metadata_array(struct ocfs2_caching_info *ci,
542 BUG_ON(index >= ci->ci_num_cached); 542 BUG_ON(index >= ci->ci_num_cached);
543 BUG_ON(!ci->ci_num_cached); 543 BUG_ON(!ci->ci_num_cached);
544 544
545 mlog(0, "remove index %d (num_cached = %u\n", index, 545 trace_ocfs2_remove_metadata_array(
546 ci->ci_num_cached); 546 (unsigned long long)ocfs2_metadata_cache_owner(ci),
547 index, ci->ci_num_cached);
547 548
548 ci->ci_num_cached--; 549 ci->ci_num_cached--;
549 550
@@ -559,8 +560,9 @@ static void ocfs2_remove_metadata_array(struct ocfs2_caching_info *ci,
559static void ocfs2_remove_metadata_tree(struct ocfs2_caching_info *ci, 560static void ocfs2_remove_metadata_tree(struct ocfs2_caching_info *ci,
560 struct ocfs2_meta_cache_item *item) 561 struct ocfs2_meta_cache_item *item)
561{ 562{
562 mlog(0, "remove block %llu from tree\n", 563 trace_ocfs2_remove_metadata_tree(
563 (unsigned long long) item->c_block); 564 (unsigned long long)ocfs2_metadata_cache_owner(ci),
565 (unsigned long long)item->c_block);
564 566
565 rb_erase(&item->c_node, &ci->ci_cache.ci_tree); 567 rb_erase(&item->c_node, &ci->ci_cache.ci_tree);
566 ci->ci_num_cached--; 568 ci->ci_num_cached--;
@@ -573,10 +575,10 @@ static void ocfs2_remove_block_from_cache(struct ocfs2_caching_info *ci,
573 struct ocfs2_meta_cache_item *item = NULL; 575 struct ocfs2_meta_cache_item *item = NULL;
574 576
575 ocfs2_metadata_cache_lock(ci); 577 ocfs2_metadata_cache_lock(ci);
576 mlog(0, "Owner %llu, remove %llu, items = %u, array = %u\n", 578 trace_ocfs2_remove_block_from_cache(
577 (unsigned long long)ocfs2_metadata_cache_owner(ci), 579 (unsigned long long)ocfs2_metadata_cache_owner(ci),
578 (unsigned long long) block, ci->ci_num_cached, 580 (unsigned long long) block, ci->ci_num_cached,
579 ci->ci_flags & OCFS2_CACHE_FL_INLINE); 581 ci->ci_flags);
580 582
581 if (ci->ci_flags & OCFS2_CACHE_FL_INLINE) { 583 if (ci->ci_flags & OCFS2_CACHE_FL_INLINE) {
582 index = ocfs2_search_cache_array(ci, block); 584 index = ocfs2_search_cache_array(ci, block);
@@ -626,9 +628,6 @@ int __init init_ocfs2_uptodate_cache(void)
626 if (!ocfs2_uptodate_cachep) 628 if (!ocfs2_uptodate_cachep)
627 return -ENOMEM; 629 return -ENOMEM;
628 630
629 mlog(0, "%u inlined cache items per inode.\n",
630 OCFS2_CACHE_INFO_MAX_ARRAY);
631
632 return 0; 631 return 0;
633} 632}
634 633
diff --git a/fs/ocfs2/xattr.c b/fs/ocfs2/xattr.c
index 6bb602486c6b..57a215dc2d9b 100644
--- a/fs/ocfs2/xattr.c
+++ b/fs/ocfs2/xattr.c
@@ -37,7 +37,6 @@
37#include <linux/string.h> 37#include <linux/string.h>
38#include <linux/security.h> 38#include <linux/security.h>
39 39
40#define MLOG_MASK_PREFIX ML_XATTR
41#include <cluster/masklog.h> 40#include <cluster/masklog.h>
42 41
43#include "ocfs2.h" 42#include "ocfs2.h"
@@ -57,6 +56,7 @@
57#include "xattr.h" 56#include "xattr.h"
58#include "refcounttree.h" 57#include "refcounttree.h"
59#include "acl.h" 58#include "acl.h"
59#include "ocfs2_trace.h"
60 60
61struct ocfs2_xattr_def_value_root { 61struct ocfs2_xattr_def_value_root {
62 struct ocfs2_xattr_value_root xv; 62 struct ocfs2_xattr_value_root xv;
@@ -474,8 +474,7 @@ static int ocfs2_validate_xattr_block(struct super_block *sb,
474 struct ocfs2_xattr_block *xb = 474 struct ocfs2_xattr_block *xb =
475 (struct ocfs2_xattr_block *)bh->b_data; 475 (struct ocfs2_xattr_block *)bh->b_data;
476 476
477 mlog(0, "Validating xattr block %llu\n", 477 trace_ocfs2_validate_xattr_block((unsigned long long)bh->b_blocknr);
478 (unsigned long long)bh->b_blocknr);
479 478
480 BUG_ON(!buffer_uptodate(bh)); 479 BUG_ON(!buffer_uptodate(bh));
481 480
@@ -715,11 +714,11 @@ static int ocfs2_xattr_extend_allocation(struct inode *inode,
715 u32 prev_clusters, logical_start = le32_to_cpu(vb->vb_xv->xr_clusters); 714 u32 prev_clusters, logical_start = le32_to_cpu(vb->vb_xv->xr_clusters);
716 struct ocfs2_extent_tree et; 715 struct ocfs2_extent_tree et;
717 716
718 mlog(0, "(clusters_to_add for xattr= %u)\n", clusters_to_add);
719
720 ocfs2_init_xattr_value_extent_tree(&et, INODE_CACHE(inode), vb); 717 ocfs2_init_xattr_value_extent_tree(&et, INODE_CACHE(inode), vb);
721 718
722 while (clusters_to_add) { 719 while (clusters_to_add) {
720 trace_ocfs2_xattr_extend_allocation(clusters_to_add);
721
723 status = vb->vb_access(handle, INODE_CACHE(inode), vb->vb_bh, 722 status = vb->vb_access(handle, INODE_CACHE(inode), vb->vb_bh,
724 OCFS2_JOURNAL_ACCESS_WRITE); 723 OCFS2_JOURNAL_ACCESS_WRITE);
725 if (status < 0) { 724 if (status < 0) {
@@ -754,8 +753,6 @@ static int ocfs2_xattr_extend_allocation(struct inode *inode,
754 */ 753 */
755 BUG_ON(why == RESTART_META); 754 BUG_ON(why == RESTART_META);
756 755
757 mlog(0, "restarting xattr value extension for %u"
758 " clusters,.\n", clusters_to_add);
759 credits = ocfs2_calc_extend_credits(inode->i_sb, 756 credits = ocfs2_calc_extend_credits(inode->i_sb,
760 &vb->vb_xv->xr_list, 757 &vb->vb_xv->xr_list,
761 clusters_to_add); 758 clusters_to_add);
@@ -3246,8 +3243,8 @@ static int ocfs2_init_xattr_set_ctxt(struct inode *inode,
3246 } 3243 }
3247 3244
3248 meta_add += extra_meta; 3245 meta_add += extra_meta;
3249 mlog(0, "Set xattr %s, reserve meta blocks = %d, clusters = %d, " 3246 trace_ocfs2_init_xattr_set_ctxt(xi->xi_name, meta_add,
3250 "credits = %d\n", xi->xi_name, meta_add, clusters_add, *credits); 3247 clusters_add, *credits);
3251 3248
3252 if (meta_add) { 3249 if (meta_add) {
3253 ret = ocfs2_reserve_new_metadata_blocks(osb, meta_add, 3250 ret = ocfs2_reserve_new_metadata_blocks(osb, meta_add,
@@ -3887,8 +3884,10 @@ static int ocfs2_xattr_bucket_find(struct inode *inode,
3887 3884
3888 if (found) { 3885 if (found) {
3889 xs->here = &xs->header->xh_entries[index]; 3886 xs->here = &xs->header->xh_entries[index];
3890 mlog(0, "find xattr %s in bucket %llu, entry = %u\n", name, 3887 trace_ocfs2_xattr_bucket_find(OCFS2_I(inode)->ip_blkno,
3891 (unsigned long long)bucket_blkno(xs->bucket), index); 3888 name, name_index, name_hash,
3889 (unsigned long long)bucket_blkno(xs->bucket),
3890 index);
3892 } else 3891 } else
3893 ret = -ENODATA; 3892 ret = -ENODATA;
3894 3893
@@ -3915,8 +3914,10 @@ static int ocfs2_xattr_index_block_find(struct inode *inode,
3915 if (le16_to_cpu(el->l_next_free_rec) == 0) 3914 if (le16_to_cpu(el->l_next_free_rec) == 0)
3916 return -ENODATA; 3915 return -ENODATA;
3917 3916
3918 mlog(0, "find xattr %s, hash = %u, index = %d in xattr tree\n", 3917 trace_ocfs2_xattr_index_block_find(OCFS2_I(inode)->ip_blkno,
3919 name, name_hash, name_index); 3918 name, name_index, name_hash,
3919 (unsigned long long)root_bh->b_blocknr,
3920 -1);
3920 3921
3921 ret = ocfs2_xattr_get_rec(inode, name_hash, &p_blkno, &first_hash, 3922 ret = ocfs2_xattr_get_rec(inode, name_hash, &p_blkno, &first_hash,
3922 &num_clusters, el); 3923 &num_clusters, el);
@@ -3927,9 +3928,10 @@ static int ocfs2_xattr_index_block_find(struct inode *inode,
3927 3928
3928 BUG_ON(p_blkno == 0 || num_clusters == 0 || first_hash > name_hash); 3929 BUG_ON(p_blkno == 0 || num_clusters == 0 || first_hash > name_hash);
3929 3930
3930 mlog(0, "find xattr extent rec %u clusters from %llu, the first hash " 3931 trace_ocfs2_xattr_index_block_find_rec(OCFS2_I(inode)->ip_blkno,
3931 "in the rec is %u\n", num_clusters, (unsigned long long)p_blkno, 3932 name, name_index, first_hash,
3932 first_hash); 3933 (unsigned long long)p_blkno,
3934 num_clusters);
3933 3935
3934 ret = ocfs2_xattr_bucket_find(inode, name_index, name, name_hash, 3936 ret = ocfs2_xattr_bucket_find(inode, name_index, name, name_hash,
3935 p_blkno, first_hash, num_clusters, xs); 3937 p_blkno, first_hash, num_clusters, xs);
@@ -3955,8 +3957,9 @@ static int ocfs2_iterate_xattr_buckets(struct inode *inode,
3955 return -ENOMEM; 3957 return -ENOMEM;
3956 } 3958 }
3957 3959
3958 mlog(0, "iterating xattr buckets in %u clusters starting from %llu\n", 3960 trace_ocfs2_iterate_xattr_buckets(
3959 clusters, (unsigned long long)blkno); 3961 (unsigned long long)OCFS2_I(inode)->ip_blkno,
3962 (unsigned long long)blkno, clusters);
3960 3963
3961 for (i = 0; i < num_buckets; i++, blkno += bucket->bu_blocks) { 3964 for (i = 0; i < num_buckets; i++, blkno += bucket->bu_blocks) {
3962 ret = ocfs2_read_xattr_bucket(bucket, blkno); 3965 ret = ocfs2_read_xattr_bucket(bucket, blkno);
@@ -3972,8 +3975,7 @@ static int ocfs2_iterate_xattr_buckets(struct inode *inode,
3972 if (i == 0) 3975 if (i == 0)
3973 num_buckets = le16_to_cpu(bucket_xh(bucket)->xh_num_buckets); 3976 num_buckets = le16_to_cpu(bucket_xh(bucket)->xh_num_buckets);
3974 3977
3975 mlog(0, "iterating xattr bucket %llu, first hash %u\n", 3978 trace_ocfs2_iterate_xattr_bucket((unsigned long long)blkno,
3976 (unsigned long long)blkno,
3977 le32_to_cpu(bucket_xh(bucket)->xh_entries[0].xe_name_hash)); 3979 le32_to_cpu(bucket_xh(bucket)->xh_entries[0].xe_name_hash));
3978 if (func) { 3980 if (func) {
3979 ret = func(inode, bucket, para); 3981 ret = func(inode, bucket, para);
@@ -4173,9 +4175,9 @@ static void ocfs2_cp_xattr_block_to_bucket(struct inode *inode,
4173 char *src = xb_bh->b_data; 4175 char *src = xb_bh->b_data;
4174 char *target = bucket_block(bucket, blks - 1); 4176 char *target = bucket_block(bucket, blks - 1);
4175 4177
4176 mlog(0, "cp xattr from block %llu to bucket %llu\n", 4178 trace_ocfs2_cp_xattr_block_to_bucket_begin(
4177 (unsigned long long)xb_bh->b_blocknr, 4179 (unsigned long long)xb_bh->b_blocknr,
4178 (unsigned long long)bucket_blkno(bucket)); 4180 (unsigned long long)bucket_blkno(bucket));
4179 4181
4180 for (i = 0; i < blks; i++) 4182 for (i = 0; i < blks; i++)
4181 memset(bucket_block(bucket, i), 0, blocksize); 4183 memset(bucket_block(bucket, i), 0, blocksize);
@@ -4211,8 +4213,7 @@ static void ocfs2_cp_xattr_block_to_bucket(struct inode *inode,
4211 for (i = 0; i < count; i++) 4213 for (i = 0; i < count; i++)
4212 le16_add_cpu(&xh->xh_entries[i].xe_name_offset, off_change); 4214 le16_add_cpu(&xh->xh_entries[i].xe_name_offset, off_change);
4213 4215
4214 mlog(0, "copy entry: start = %u, size = %u, offset_change = %u\n", 4216 trace_ocfs2_cp_xattr_block_to_bucket_end(offset, size, off_change);
4215 offset, size, off_change);
4216 4217
4217 sort(target + offset, count, sizeof(struct ocfs2_xattr_entry), 4218 sort(target + offset, count, sizeof(struct ocfs2_xattr_entry),
4218 cmp_xe, swap_xe); 4219 cmp_xe, swap_xe);
@@ -4261,8 +4262,8 @@ static int ocfs2_xattr_create_index_block(struct inode *inode,
4261 struct ocfs2_xattr_tree_root *xr; 4262 struct ocfs2_xattr_tree_root *xr;
4262 u16 xb_flags = le16_to_cpu(xb->xb_flags); 4263 u16 xb_flags = le16_to_cpu(xb->xb_flags);
4263 4264
4264 mlog(0, "create xattr index block for %llu\n", 4265 trace_ocfs2_xattr_create_index_block_begin(
4265 (unsigned long long)xb_bh->b_blocknr); 4266 (unsigned long long)xb_bh->b_blocknr);
4266 4267
4267 BUG_ON(xb_flags & OCFS2_XATTR_INDEXED); 4268 BUG_ON(xb_flags & OCFS2_XATTR_INDEXED);
4268 BUG_ON(!xs->bucket); 4269 BUG_ON(!xs->bucket);
@@ -4295,8 +4296,7 @@ static int ocfs2_xattr_create_index_block(struct inode *inode,
4295 */ 4296 */
4296 blkno = ocfs2_clusters_to_blocks(inode->i_sb, bit_off); 4297 blkno = ocfs2_clusters_to_blocks(inode->i_sb, bit_off);
4297 4298
4298 mlog(0, "allocate 1 cluster from %llu to xattr block\n", 4299 trace_ocfs2_xattr_create_index_block((unsigned long long)blkno);
4299 (unsigned long long)blkno);
4300 4300
4301 ret = ocfs2_init_xattr_bucket(xs->bucket, blkno); 4301 ret = ocfs2_init_xattr_bucket(xs->bucket, blkno);
4302 if (ret) { 4302 if (ret) {
@@ -4400,8 +4400,7 @@ static int ocfs2_defrag_xattr_bucket(struct inode *inode,
4400 entries = (char *)xh->xh_entries; 4400 entries = (char *)xh->xh_entries;
4401 xh_free_start = le16_to_cpu(xh->xh_free_start); 4401 xh_free_start = le16_to_cpu(xh->xh_free_start);
4402 4402
4403 mlog(0, "adjust xattr bucket in %llu, count = %u, " 4403 trace_ocfs2_defrag_xattr_bucket(
4404 "xh_free_start = %u, xh_name_value_len = %u.\n",
4405 (unsigned long long)blkno, le16_to_cpu(xh->xh_count), 4404 (unsigned long long)blkno, le16_to_cpu(xh->xh_count),
4406 xh_free_start, le16_to_cpu(xh->xh_name_value_len)); 4405 xh_free_start, le16_to_cpu(xh->xh_name_value_len));
4407 4406
@@ -4503,8 +4502,9 @@ static int ocfs2_mv_xattr_bucket_cross_cluster(struct inode *inode,
4503 BUG_ON(le16_to_cpu(bucket_xh(first)->xh_num_buckets) < num_buckets); 4502 BUG_ON(le16_to_cpu(bucket_xh(first)->xh_num_buckets) < num_buckets);
4504 BUG_ON(OCFS2_XATTR_BUCKET_SIZE == OCFS2_SB(sb)->s_clustersize); 4503 BUG_ON(OCFS2_XATTR_BUCKET_SIZE == OCFS2_SB(sb)->s_clustersize);
4505 4504
4506 mlog(0, "move half of xattrs in cluster %llu to %llu\n", 4505 trace_ocfs2_mv_xattr_bucket_cross_cluster(
4507 (unsigned long long)last_cluster_blkno, (unsigned long long)new_blkno); 4506 (unsigned long long)last_cluster_blkno,
4507 (unsigned long long)new_blkno);
4508 4508
4509 ret = ocfs2_mv_xattr_buckets(inode, handle, bucket_blkno(first), 4509 ret = ocfs2_mv_xattr_buckets(inode, handle, bucket_blkno(first),
4510 last_cluster_blkno, new_blkno, 4510 last_cluster_blkno, new_blkno,
@@ -4614,8 +4614,8 @@ static int ocfs2_divide_xattr_bucket(struct inode *inode,
4614 struct ocfs2_xattr_entry *xe; 4614 struct ocfs2_xattr_entry *xe;
4615 int blocksize = inode->i_sb->s_blocksize; 4615 int blocksize = inode->i_sb->s_blocksize;
4616 4616
4617 mlog(0, "move some of xattrs from bucket %llu to %llu\n", 4617 trace_ocfs2_divide_xattr_bucket_begin((unsigned long long)blk,
4618 (unsigned long long)blk, (unsigned long long)new_blk); 4618 (unsigned long long)new_blk);
4619 4619
4620 s_bucket = ocfs2_xattr_bucket_new(inode); 4620 s_bucket = ocfs2_xattr_bucket_new(inode);
4621 t_bucket = ocfs2_xattr_bucket_new(inode); 4621 t_bucket = ocfs2_xattr_bucket_new(inode);
@@ -4714,9 +4714,9 @@ static int ocfs2_divide_xattr_bucket(struct inode *inode,
4714 */ 4714 */
4715 xe = &xh->xh_entries[start]; 4715 xe = &xh->xh_entries[start];
4716 len = sizeof(struct ocfs2_xattr_entry) * (count - start); 4716 len = sizeof(struct ocfs2_xattr_entry) * (count - start);
4717 mlog(0, "mv xattr entry len %d from %d to %d\n", len, 4717 trace_ocfs2_divide_xattr_bucket_move(len,
4718 (int)((char *)xe - (char *)xh), 4718 (int)((char *)xe - (char *)xh),
4719 (int)((char *)xh->xh_entries - (char *)xh)); 4719 (int)((char *)xh->xh_entries - (char *)xh));
4720 memmove((char *)xh->xh_entries, (char *)xe, len); 4720 memmove((char *)xh->xh_entries, (char *)xe, len);
4721 xe = &xh->xh_entries[count - start]; 4721 xe = &xh->xh_entries[count - start];
4722 len = sizeof(struct ocfs2_xattr_entry) * start; 4722 len = sizeof(struct ocfs2_xattr_entry) * start;
@@ -4788,9 +4788,9 @@ static int ocfs2_cp_xattr_bucket(struct inode *inode,
4788 4788
4789 BUG_ON(s_blkno == t_blkno); 4789 BUG_ON(s_blkno == t_blkno);
4790 4790
4791 mlog(0, "cp bucket %llu to %llu, target is %d\n", 4791 trace_ocfs2_cp_xattr_bucket((unsigned long long)s_blkno,
4792 (unsigned long long)s_blkno, (unsigned long long)t_blkno, 4792 (unsigned long long)t_blkno,
4793 t_is_new); 4793 t_is_new);
4794 4794
4795 s_bucket = ocfs2_xattr_bucket_new(inode); 4795 s_bucket = ocfs2_xattr_bucket_new(inode);
4796 t_bucket = ocfs2_xattr_bucket_new(inode); 4796 t_bucket = ocfs2_xattr_bucket_new(inode);
@@ -4862,8 +4862,8 @@ static int ocfs2_mv_xattr_buckets(struct inode *inode, handle_t *handle,
4862 int num_buckets = ocfs2_xattr_buckets_per_cluster(osb); 4862 int num_buckets = ocfs2_xattr_buckets_per_cluster(osb);
4863 struct ocfs2_xattr_bucket *old_first, *new_first; 4863 struct ocfs2_xattr_bucket *old_first, *new_first;
4864 4864
4865 mlog(0, "mv xattrs from cluster %llu to %llu\n", 4865 trace_ocfs2_mv_xattr_buckets((unsigned long long)last_blk,
4866 (unsigned long long)last_blk, (unsigned long long)to_blk); 4866 (unsigned long long)to_blk);
4867 4867
4868 BUG_ON(start_bucket >= num_buckets); 4868 BUG_ON(start_bucket >= num_buckets);
4869 if (start_bucket) { 4869 if (start_bucket) {
@@ -5013,9 +5013,9 @@ static int ocfs2_adjust_xattr_cross_cluster(struct inode *inode,
5013{ 5013{
5014 int ret; 5014 int ret;
5015 5015
5016 mlog(0, "adjust xattrs from cluster %llu len %u to %llu\n", 5016 trace_ocfs2_adjust_xattr_cross_cluster(
5017 (unsigned long long)bucket_blkno(first), prev_clusters, 5017 (unsigned long long)bucket_blkno(first),
5018 (unsigned long long)new_blk); 5018 (unsigned long long)new_blk, prev_clusters);
5019 5019
5020 if (ocfs2_xattr_buckets_per_cluster(OCFS2_SB(inode->i_sb)) > 1) { 5020 if (ocfs2_xattr_buckets_per_cluster(OCFS2_SB(inode->i_sb)) > 1) {
5021 ret = ocfs2_mv_xattr_bucket_cross_cluster(inode, 5021 ret = ocfs2_mv_xattr_bucket_cross_cluster(inode,
@@ -5088,10 +5088,10 @@ static int ocfs2_add_new_xattr_cluster(struct inode *inode,
5088 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); 5088 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
5089 struct ocfs2_extent_tree et; 5089 struct ocfs2_extent_tree et;
5090 5090
5091 mlog(0, "Add new xattr cluster for %llu, previous xattr hash = %u, " 5091 trace_ocfs2_add_new_xattr_cluster_begin(
5092 "previous xattr blkno = %llu\n", 5092 (unsigned long long)OCFS2_I(inode)->ip_blkno,
5093 (unsigned long long)OCFS2_I(inode)->ip_blkno, 5093 (unsigned long long)bucket_blkno(first),
5094 prev_cpos, (unsigned long long)bucket_blkno(first)); 5094 prev_cpos, prev_clusters);
5095 5095
5096 ocfs2_init_xattr_tree_extent_tree(&et, INODE_CACHE(inode), root_bh); 5096 ocfs2_init_xattr_tree_extent_tree(&et, INODE_CACHE(inode), root_bh);
5097 5097
@@ -5113,8 +5113,7 @@ static int ocfs2_add_new_xattr_cluster(struct inode *inode,
5113 BUG_ON(num_bits > clusters_to_add); 5113 BUG_ON(num_bits > clusters_to_add);
5114 5114
5115 block = ocfs2_clusters_to_blocks(osb->sb, bit_off); 5115 block = ocfs2_clusters_to_blocks(osb->sb, bit_off);
5116 mlog(0, "Allocating %u clusters at block %u for xattr in inode %llu\n", 5116 trace_ocfs2_add_new_xattr_cluster((unsigned long long)block, num_bits);
5117 num_bits, bit_off, (unsigned long long)OCFS2_I(inode)->ip_blkno);
5118 5117
5119 if (bucket_blkno(first) + (prev_clusters * bpc) == block && 5118 if (bucket_blkno(first) + (prev_clusters * bpc) == block &&
5120 (prev_clusters + num_bits) << osb->s_clustersize_bits <= 5119 (prev_clusters + num_bits) << osb->s_clustersize_bits <=
@@ -5130,8 +5129,6 @@ static int ocfs2_add_new_xattr_cluster(struct inode *inode,
5130 */ 5129 */
5131 v_start = prev_cpos + prev_clusters; 5130 v_start = prev_cpos + prev_clusters;
5132 *num_clusters = prev_clusters + num_bits; 5131 *num_clusters = prev_clusters + num_bits;
5133 mlog(0, "Add contiguous %u clusters to previous extent rec.\n",
5134 num_bits);
5135 } else { 5132 } else {
5136 ret = ocfs2_adjust_xattr_cross_cluster(inode, 5133 ret = ocfs2_adjust_xattr_cross_cluster(inode,
5137 handle, 5134 handle,
@@ -5147,8 +5144,8 @@ static int ocfs2_add_new_xattr_cluster(struct inode *inode,
5147 } 5144 }
5148 } 5145 }
5149 5146
5150 mlog(0, "Insert %u clusters at block %llu for xattr at %u\n", 5147 trace_ocfs2_add_new_xattr_cluster_insert((unsigned long long)block,
5151 num_bits, (unsigned long long)block, v_start); 5148 v_start, num_bits);
5152 ret = ocfs2_insert_extent(handle, &et, v_start, block, 5149 ret = ocfs2_insert_extent(handle, &et, v_start, block,
5153 num_bits, 0, ctxt->meta_ac); 5150 num_bits, 0, ctxt->meta_ac);
5154 if (ret < 0) { 5151 if (ret < 0) {
@@ -5183,9 +5180,9 @@ static int ocfs2_extend_xattr_bucket(struct inode *inode,
5183 u64 end_blk; 5180 u64 end_blk;
5184 u16 new_bucket = le16_to_cpu(bucket_xh(first)->xh_num_buckets); 5181 u16 new_bucket = le16_to_cpu(bucket_xh(first)->xh_num_buckets);
5185 5182
5186 mlog(0, "extend xattr bucket in %llu, xattr extend rec starting " 5183 trace_ocfs2_extend_xattr_bucket((unsigned long long)target_blk,
5187 "from %llu, len = %u\n", (unsigned long long)target_blk, 5184 (unsigned long long)bucket_blkno(first),
5188 (unsigned long long)bucket_blkno(first), num_clusters); 5185 num_clusters, new_bucket);
5189 5186
5190 /* The extent must have room for an additional bucket */ 5187 /* The extent must have room for an additional bucket */
5191 BUG_ON(new_bucket >= 5188 BUG_ON(new_bucket >=
@@ -5265,8 +5262,8 @@ static int ocfs2_add_new_xattr_bucket(struct inode *inode,
5265 /* The bucket at the front of the extent */ 5262 /* The bucket at the front of the extent */
5266 struct ocfs2_xattr_bucket *first; 5263 struct ocfs2_xattr_bucket *first;
5267 5264
5268 mlog(0, "Add new xattr bucket starting from %llu\n", 5265 trace_ocfs2_add_new_xattr_bucket(
5269 (unsigned long long)bucket_blkno(target)); 5266 (unsigned long long)bucket_blkno(target));
5270 5267
5271 /* The first bucket of the original extent */ 5268 /* The first bucket of the original extent */
5272 first = ocfs2_xattr_bucket_new(inode); 5269 first = ocfs2_xattr_bucket_new(inode);
@@ -5382,8 +5379,8 @@ static int ocfs2_xattr_bucket_value_truncate(struct inode *inode,
5382 * modified something. We have to assume they did, and dirty 5379 * modified something. We have to assume they did, and dirty
5383 * the whole bucket. This leaves us in a consistent state. 5380 * the whole bucket. This leaves us in a consistent state.
5384 */ 5381 */
5385 mlog(0, "truncate %u in xattr bucket %llu to %d bytes.\n", 5382 trace_ocfs2_xattr_bucket_value_truncate(
5386 xe_off, (unsigned long long)bucket_blkno(bucket), len); 5383 (unsigned long long)bucket_blkno(bucket), xe_off, len);
5387 ret = ocfs2_xattr_value_truncate(inode, &vb, len, ctxt); 5384 ret = ocfs2_xattr_value_truncate(inode, &vb, len, ctxt);
5388 if (ret) { 5385 if (ret) {
5389 mlog_errno(ret); 5386 mlog_errno(ret);
@@ -5433,8 +5430,9 @@ static int ocfs2_rm_xattr_cluster(struct inode *inode,
5433 5430
5434 ocfs2_init_dealloc_ctxt(&dealloc); 5431 ocfs2_init_dealloc_ctxt(&dealloc);
5435 5432
5436 mlog(0, "rm xattr extent rec at %u len = %u, start from %llu\n", 5433 trace_ocfs2_rm_xattr_cluster(
5437 cpos, len, (unsigned long long)blkno); 5434 (unsigned long long)OCFS2_I(inode)->ip_blkno,
5435 (unsigned long long)blkno, cpos, len);
5438 5436
5439 ocfs2_remove_xattr_clusters_from_cache(INODE_CACHE(inode), blkno, 5437 ocfs2_remove_xattr_clusters_from_cache(INODE_CACHE(inode), blkno,
5440 len); 5438 len);
@@ -5538,7 +5536,7 @@ static int ocfs2_xattr_set_entry_bucket(struct inode *inode,
5538 int ret; 5536 int ret;
5539 struct ocfs2_xa_loc loc; 5537 struct ocfs2_xa_loc loc;
5540 5538
5541 mlog_entry("Set xattr %s in xattr bucket\n", xi->xi_name); 5539 trace_ocfs2_xattr_set_entry_bucket(xi->xi_name);
5542 5540
5543 ocfs2_init_xattr_bucket_xa_loc(&loc, xs->bucket, 5541 ocfs2_init_xattr_bucket_xa_loc(&loc, xs->bucket,
5544 xs->not_found ? NULL : xs->here); 5542 xs->not_found ? NULL : xs->here);
@@ -5570,7 +5568,6 @@ static int ocfs2_xattr_set_entry_bucket(struct inode *inode,
5570 5568
5571 5569
5572out: 5570out:
5573 mlog_exit(ret);
5574 return ret; 5571 return ret;
5575} 5572}
5576 5573
@@ -5581,7 +5578,7 @@ static int ocfs2_xattr_set_entry_index_block(struct inode *inode,
5581{ 5578{
5582 int ret; 5579 int ret;
5583 5580
5584 mlog_entry("Set xattr %s in xattr index block\n", xi->xi_name); 5581 trace_ocfs2_xattr_set_entry_index_block(xi->xi_name);
5585 5582
5586 ret = ocfs2_xattr_set_entry_bucket(inode, xi, xs, ctxt); 5583 ret = ocfs2_xattr_set_entry_bucket(inode, xi, xs, ctxt);
5587 if (!ret) 5584 if (!ret)
@@ -5637,7 +5634,6 @@ static int ocfs2_xattr_set_entry_index_block(struct inode *inode,
5637 mlog_errno(ret); 5634 mlog_errno(ret);
5638 5635
5639out: 5636out:
5640 mlog_exit(ret);
5641 return ret; 5637 return ret;
5642} 5638}
5643 5639
@@ -6041,9 +6037,9 @@ static int ocfs2_xattr_bucket_value_refcount(struct inode *inode,
6041 if (ocfs2_meta_ecc(OCFS2_SB(inode->i_sb))) 6037 if (ocfs2_meta_ecc(OCFS2_SB(inode->i_sb)))
6042 p = &refcount; 6038 p = &refcount;
6043 6039
6044 mlog(0, "refcount bucket %llu, count = %u\n", 6040 trace_ocfs2_xattr_bucket_value_refcount(
6045 (unsigned long long)bucket_blkno(bucket), 6041 (unsigned long long)bucket_blkno(bucket),
6046 le16_to_cpu(xh->xh_count)); 6042 le16_to_cpu(xh->xh_count));
6047 for (i = 0; i < le16_to_cpu(xh->xh_count); i++) { 6043 for (i = 0; i < le16_to_cpu(xh->xh_count); i++) {
6048 xe = &xh->xh_entries[i]; 6044 xe = &xh->xh_entries[i];
6049 6045
@@ -6339,8 +6335,8 @@ static int ocfs2_reflink_xattr_header(handle_t *handle,
6339 u32 clusters, cpos, p_cluster, num_clusters; 6335 u32 clusters, cpos, p_cluster, num_clusters;
6340 unsigned int ext_flags = 0; 6336 unsigned int ext_flags = 0;
6341 6337
6342 mlog(0, "reflink xattr in container %llu, count = %u\n", 6338 trace_ocfs2_reflink_xattr_header((unsigned long long)old_bh->b_blocknr,
6343 (unsigned long long)old_bh->b_blocknr, le16_to_cpu(xh->xh_count)); 6339 le16_to_cpu(xh->xh_count));
6344 6340
6345 last = &new_xh->xh_entries[le16_to_cpu(new_xh->xh_count)]; 6341 last = &new_xh->xh_entries[le16_to_cpu(new_xh->xh_count)];
6346 for (i = 0, j = 0; i < le16_to_cpu(xh->xh_count); i++, j++) { 6342 for (i = 0, j = 0; i < le16_to_cpu(xh->xh_count); i++, j++) {
@@ -6540,8 +6536,8 @@ static int ocfs2_create_empty_xattr_block(struct inode *inode,
6540 goto out; 6536 goto out;
6541 } 6537 }
6542 6538
6543 mlog(0, "create new xattr block for inode %llu, index = %d\n", 6539 trace_ocfs2_create_empty_xattr_block(
6544 (unsigned long long)fe_bh->b_blocknr, indexed); 6540 (unsigned long long)fe_bh->b_blocknr, indexed);
6545 ret = ocfs2_create_xattr_block(inode, fe_bh, &ctxt, indexed, 6541 ret = ocfs2_create_xattr_block(inode, fe_bh, &ctxt, indexed,
6546 ret_bh); 6542 ret_bh);
6547 if (ret) 6543 if (ret)
@@ -6952,8 +6948,8 @@ static int ocfs2_reflink_xattr_buckets(handle_t *handle,
6952 if (ret) 6948 if (ret)
6953 mlog_errno(ret); 6949 mlog_errno(ret);
6954 6950
6955 mlog(0, "insert new xattr extent rec start %llu len %u to %u\n", 6951 trace_ocfs2_reflink_xattr_buckets((unsigned long long)new_blkno,
6956 (unsigned long long)new_blkno, num_clusters, reflink_cpos); 6952 num_clusters, reflink_cpos);
6957 6953
6958 len -= num_clusters; 6954 len -= num_clusters;
6959 blkno += ocfs2_clusters_to_blocks(inode->i_sb, num_clusters); 6955 blkno += ocfs2_clusters_to_blocks(inode->i_sb, num_clusters);
@@ -6982,8 +6978,7 @@ static int ocfs2_reflink_xattr_rec(struct inode *inode,
6982 struct ocfs2_alloc_context *data_ac = NULL; 6978 struct ocfs2_alloc_context *data_ac = NULL;
6983 struct ocfs2_extent_tree et; 6979 struct ocfs2_extent_tree et;
6984 6980
6985 mlog(0, "reflink xattr buckets %llu len %u\n", 6981 trace_ocfs2_reflink_xattr_rec((unsigned long long)blkno, len);
6986 (unsigned long long)blkno, len);
6987 6982
6988 ocfs2_init_xattr_tree_extent_tree(&et, 6983 ocfs2_init_xattr_tree_extent_tree(&et,
6989 INODE_CACHE(args->reflink->new_inode), 6984 INODE_CACHE(args->reflink->new_inode),
diff --git a/fs/xfs/linux-2.6/xfs_buf.c b/fs/xfs/linux-2.6/xfs_buf.c
index c05324d3282c..596bb2c9de42 100644
--- a/fs/xfs/linux-2.6/xfs_buf.c
+++ b/fs/xfs/linux-2.6/xfs_buf.c
@@ -94,75 +94,6 @@ xfs_buf_vmap_len(
94} 94}
95 95
96/* 96/*
97 * Page Region interfaces.
98 *
99 * For pages in filesystems where the blocksize is smaller than the
100 * pagesize, we use the page->private field (long) to hold a bitmap
101 * of uptodate regions within the page.
102 *
103 * Each such region is "bytes per page / bits per long" bytes long.
104 *
105 * NBPPR == number-of-bytes-per-page-region
106 * BTOPR == bytes-to-page-region (rounded up)
107 * BTOPRT == bytes-to-page-region-truncated (rounded down)
108 */
109#if (BITS_PER_LONG == 32)
110#define PRSHIFT (PAGE_CACHE_SHIFT - 5) /* (32 == 1<<5) */
111#elif (BITS_PER_LONG == 64)
112#define PRSHIFT (PAGE_CACHE_SHIFT - 6) /* (64 == 1<<6) */
113#else
114#error BITS_PER_LONG must be 32 or 64
115#endif
116#define NBPPR (PAGE_CACHE_SIZE/BITS_PER_LONG)
117#define BTOPR(b) (((unsigned int)(b) + (NBPPR - 1)) >> PRSHIFT)
118#define BTOPRT(b) (((unsigned int)(b) >> PRSHIFT))
119
120STATIC unsigned long
121page_region_mask(
122 size_t offset,
123 size_t length)
124{
125 unsigned long mask;
126 int first, final;
127
128 first = BTOPR(offset);
129 final = BTOPRT(offset + length - 1);
130 first = min(first, final);
131
132 mask = ~0UL;
133 mask <<= BITS_PER_LONG - (final - first);
134 mask >>= BITS_PER_LONG - (final);
135
136 ASSERT(offset + length <= PAGE_CACHE_SIZE);
137 ASSERT((final - first) < BITS_PER_LONG && (final - first) >= 0);
138
139 return mask;
140}
141
142STATIC void
143set_page_region(
144 struct page *page,
145 size_t offset,
146 size_t length)
147{
148 set_page_private(page,
149 page_private(page) | page_region_mask(offset, length));
150 if (page_private(page) == ~0UL)
151 SetPageUptodate(page);
152}
153
154STATIC int
155test_page_region(
156 struct page *page,
157 size_t offset,
158 size_t length)
159{
160 unsigned long mask = page_region_mask(offset, length);
161
162 return (mask && (page_private(page) & mask) == mask);
163}
164
165/*
166 * xfs_buf_lru_add - add a buffer to the LRU. 97 * xfs_buf_lru_add - add a buffer to the LRU.
167 * 98 *
168 * The LRU takes a new reference to the buffer so that it will only be freed 99 * The LRU takes a new reference to the buffer so that it will only be freed
@@ -332,7 +263,7 @@ xfs_buf_free(
332 263
333 ASSERT(list_empty(&bp->b_lru)); 264 ASSERT(list_empty(&bp->b_lru));
334 265
335 if (bp->b_flags & (_XBF_PAGE_CACHE|_XBF_PAGES)) { 266 if (bp->b_flags & _XBF_PAGES) {
336 uint i; 267 uint i;
337 268
338 if (xfs_buf_is_vmapped(bp)) 269 if (xfs_buf_is_vmapped(bp))
@@ -342,25 +273,22 @@ xfs_buf_free(
342 for (i = 0; i < bp->b_page_count; i++) { 273 for (i = 0; i < bp->b_page_count; i++) {
343 struct page *page = bp->b_pages[i]; 274 struct page *page = bp->b_pages[i];
344 275
345 if (bp->b_flags & _XBF_PAGE_CACHE) 276 __free_page(page);
346 ASSERT(!PagePrivate(page));
347 page_cache_release(page);
348 } 277 }
349 } 278 } else if (bp->b_flags & _XBF_KMEM)
279 kmem_free(bp->b_addr);
350 _xfs_buf_free_pages(bp); 280 _xfs_buf_free_pages(bp);
351 xfs_buf_deallocate(bp); 281 xfs_buf_deallocate(bp);
352} 282}
353 283
354/* 284/*
355 * Finds all pages for buffer in question and builds it's page list. 285 * Allocates all the pages for buffer in question and builds it's page list.
356 */ 286 */
357STATIC int 287STATIC int
358_xfs_buf_lookup_pages( 288xfs_buf_allocate_memory(
359 xfs_buf_t *bp, 289 xfs_buf_t *bp,
360 uint flags) 290 uint flags)
361{ 291{
362 struct address_space *mapping = bp->b_target->bt_mapping;
363 size_t blocksize = bp->b_target->bt_bsize;
364 size_t size = bp->b_count_desired; 292 size_t size = bp->b_count_desired;
365 size_t nbytes, offset; 293 size_t nbytes, offset;
366 gfp_t gfp_mask = xb_to_gfp(flags); 294 gfp_t gfp_mask = xb_to_gfp(flags);
@@ -369,29 +297,55 @@ _xfs_buf_lookup_pages(
369 xfs_off_t end; 297 xfs_off_t end;
370 int error; 298 int error;
371 299
300 /*
301 * for buffers that are contained within a single page, just allocate
302 * the memory from the heap - there's no need for the complexity of
303 * page arrays to keep allocation down to order 0.
304 */
305 if (bp->b_buffer_length < PAGE_SIZE) {
306 bp->b_addr = kmem_alloc(bp->b_buffer_length, xb_to_km(flags));
307 if (!bp->b_addr) {
308 /* low memory - use alloc_page loop instead */
309 goto use_alloc_page;
310 }
311
312 if (((unsigned long)(bp->b_addr + bp->b_buffer_length - 1) &
313 PAGE_MASK) !=
314 ((unsigned long)bp->b_addr & PAGE_MASK)) {
315 /* b_addr spans two pages - use alloc_page instead */
316 kmem_free(bp->b_addr);
317 bp->b_addr = NULL;
318 goto use_alloc_page;
319 }
320 bp->b_offset = offset_in_page(bp->b_addr);
321 bp->b_pages = bp->b_page_array;
322 bp->b_pages[0] = virt_to_page(bp->b_addr);
323 bp->b_page_count = 1;
324 bp->b_flags |= XBF_MAPPED | _XBF_KMEM;
325 return 0;
326 }
327
328use_alloc_page:
372 end = bp->b_file_offset + bp->b_buffer_length; 329 end = bp->b_file_offset + bp->b_buffer_length;
373 page_count = xfs_buf_btoc(end) - xfs_buf_btoct(bp->b_file_offset); 330 page_count = xfs_buf_btoc(end) - xfs_buf_btoct(bp->b_file_offset);
374
375 error = _xfs_buf_get_pages(bp, page_count, flags); 331 error = _xfs_buf_get_pages(bp, page_count, flags);
376 if (unlikely(error)) 332 if (unlikely(error))
377 return error; 333 return error;
378 bp->b_flags |= _XBF_PAGE_CACHE;
379 334
380 offset = bp->b_offset; 335 offset = bp->b_offset;
381 first = bp->b_file_offset >> PAGE_CACHE_SHIFT; 336 first = bp->b_file_offset >> PAGE_SHIFT;
337 bp->b_flags |= _XBF_PAGES;
382 338
383 for (i = 0; i < bp->b_page_count; i++) { 339 for (i = 0; i < bp->b_page_count; i++) {
384 struct page *page; 340 struct page *page;
385 uint retries = 0; 341 uint retries = 0;
386 342retry:
387 retry: 343 page = alloc_page(gfp_mask);
388 page = find_or_create_page(mapping, first + i, gfp_mask);
389 if (unlikely(page == NULL)) { 344 if (unlikely(page == NULL)) {
390 if (flags & XBF_READ_AHEAD) { 345 if (flags & XBF_READ_AHEAD) {
391 bp->b_page_count = i; 346 bp->b_page_count = i;
392 for (i = 0; i < bp->b_page_count; i++) 347 error = ENOMEM;
393 unlock_page(bp->b_pages[i]); 348 goto out_free_pages;
394 return -ENOMEM;
395 } 349 }
396 350
397 /* 351 /*
@@ -412,33 +366,16 @@ _xfs_buf_lookup_pages(
412 366
413 XFS_STATS_INC(xb_page_found); 367 XFS_STATS_INC(xb_page_found);
414 368
415 nbytes = min_t(size_t, size, PAGE_CACHE_SIZE - offset); 369 nbytes = min_t(size_t, size, PAGE_SIZE - offset);
416 size -= nbytes; 370 size -= nbytes;
417
418 ASSERT(!PagePrivate(page));
419 if (!PageUptodate(page)) {
420 page_count--;
421 if (blocksize >= PAGE_CACHE_SIZE) {
422 if (flags & XBF_READ)
423 bp->b_flags |= _XBF_PAGE_LOCKED;
424 } else if (!PagePrivate(page)) {
425 if (test_page_region(page, offset, nbytes))
426 page_count++;
427 }
428 }
429
430 bp->b_pages[i] = page; 371 bp->b_pages[i] = page;
431 offset = 0; 372 offset = 0;
432 } 373 }
374 return 0;
433 375
434 if (!(bp->b_flags & _XBF_PAGE_LOCKED)) { 376out_free_pages:
435 for (i = 0; i < bp->b_page_count; i++) 377 for (i = 0; i < bp->b_page_count; i++)
436 unlock_page(bp->b_pages[i]); 378 __free_page(bp->b_pages[i]);
437 }
438
439 if (page_count == bp->b_page_count)
440 bp->b_flags |= XBF_DONE;
441
442 return error; 379 return error;
443} 380}
444 381
@@ -450,14 +387,23 @@ _xfs_buf_map_pages(
450 xfs_buf_t *bp, 387 xfs_buf_t *bp,
451 uint flags) 388 uint flags)
452{ 389{
453 /* A single page buffer is always mappable */ 390 ASSERT(bp->b_flags & _XBF_PAGES);
454 if (bp->b_page_count == 1) { 391 if (bp->b_page_count == 1) {
392 /* A single page buffer is always mappable */
455 bp->b_addr = page_address(bp->b_pages[0]) + bp->b_offset; 393 bp->b_addr = page_address(bp->b_pages[0]) + bp->b_offset;
456 bp->b_flags |= XBF_MAPPED; 394 bp->b_flags |= XBF_MAPPED;
457 } else if (flags & XBF_MAPPED) { 395 } else if (flags & XBF_MAPPED) {
458 bp->b_addr = vm_map_ram(bp->b_pages, bp->b_page_count, 396 int retried = 0;
459 -1, PAGE_KERNEL); 397
460 if (unlikely(bp->b_addr == NULL)) 398 do {
399 bp->b_addr = vm_map_ram(bp->b_pages, bp->b_page_count,
400 -1, PAGE_KERNEL);
401 if (bp->b_addr)
402 break;
403 vm_unmap_aliases();
404 } while (retried++ <= 1);
405
406 if (!bp->b_addr)
461 return -ENOMEM; 407 return -ENOMEM;
462 bp->b_addr += bp->b_offset; 408 bp->b_addr += bp->b_offset;
463 bp->b_flags |= XBF_MAPPED; 409 bp->b_flags |= XBF_MAPPED;
@@ -568,9 +514,14 @@ found:
568 } 514 }
569 } 515 }
570 516
517 /*
518 * if the buffer is stale, clear all the external state associated with
519 * it. We need to keep flags such as how we allocated the buffer memory
520 * intact here.
521 */
571 if (bp->b_flags & XBF_STALE) { 522 if (bp->b_flags & XBF_STALE) {
572 ASSERT((bp->b_flags & _XBF_DELWRI_Q) == 0); 523 ASSERT((bp->b_flags & _XBF_DELWRI_Q) == 0);
573 bp->b_flags &= XBF_MAPPED; 524 bp->b_flags &= XBF_MAPPED | _XBF_KMEM | _XBF_PAGES;
574 } 525 }
575 526
576 trace_xfs_buf_find(bp, flags, _RET_IP_); 527 trace_xfs_buf_find(bp, flags, _RET_IP_);
@@ -591,7 +542,7 @@ xfs_buf_get(
591 xfs_buf_flags_t flags) 542 xfs_buf_flags_t flags)
592{ 543{
593 xfs_buf_t *bp, *new_bp; 544 xfs_buf_t *bp, *new_bp;
594 int error = 0, i; 545 int error = 0;
595 546
596 new_bp = xfs_buf_allocate(flags); 547 new_bp = xfs_buf_allocate(flags);
597 if (unlikely(!new_bp)) 548 if (unlikely(!new_bp))
@@ -599,7 +550,7 @@ xfs_buf_get(
599 550
600 bp = _xfs_buf_find(target, ioff, isize, flags, new_bp); 551 bp = _xfs_buf_find(target, ioff, isize, flags, new_bp);
601 if (bp == new_bp) { 552 if (bp == new_bp) {
602 error = _xfs_buf_lookup_pages(bp, flags); 553 error = xfs_buf_allocate_memory(bp, flags);
603 if (error) 554 if (error)
604 goto no_buffer; 555 goto no_buffer;
605 } else { 556 } else {
@@ -608,9 +559,6 @@ xfs_buf_get(
608 return NULL; 559 return NULL;
609 } 560 }
610 561
611 for (i = 0; i < bp->b_page_count; i++)
612 mark_page_accessed(bp->b_pages[i]);
613
614 if (!(bp->b_flags & XBF_MAPPED)) { 562 if (!(bp->b_flags & XBF_MAPPED)) {
615 error = _xfs_buf_map_pages(bp, flags); 563 error = _xfs_buf_map_pages(bp, flags);
616 if (unlikely(error)) { 564 if (unlikely(error)) {
@@ -711,8 +659,7 @@ xfs_buf_readahead(
711{ 659{
712 struct backing_dev_info *bdi; 660 struct backing_dev_info *bdi;
713 661
714 bdi = target->bt_mapping->backing_dev_info; 662 if (bdi_read_congested(target->bt_bdi))
715 if (bdi_read_congested(bdi))
716 return; 663 return;
717 664
718 xfs_buf_read(target, ioff, isize, 665 xfs_buf_read(target, ioff, isize,
@@ -790,10 +737,10 @@ xfs_buf_associate_memory(
790 size_t buflen; 737 size_t buflen;
791 int page_count; 738 int page_count;
792 739
793 pageaddr = (unsigned long)mem & PAGE_CACHE_MASK; 740 pageaddr = (unsigned long)mem & PAGE_MASK;
794 offset = (unsigned long)mem - pageaddr; 741 offset = (unsigned long)mem - pageaddr;
795 buflen = PAGE_CACHE_ALIGN(len + offset); 742 buflen = PAGE_ALIGN(len + offset);
796 page_count = buflen >> PAGE_CACHE_SHIFT; 743 page_count = buflen >> PAGE_SHIFT;
797 744
798 /* Free any previous set of page pointers */ 745 /* Free any previous set of page pointers */
799 if (bp->b_pages) 746 if (bp->b_pages)
@@ -810,13 +757,12 @@ xfs_buf_associate_memory(
810 757
811 for (i = 0; i < bp->b_page_count; i++) { 758 for (i = 0; i < bp->b_page_count; i++) {
812 bp->b_pages[i] = mem_to_page((void *)pageaddr); 759 bp->b_pages[i] = mem_to_page((void *)pageaddr);
813 pageaddr += PAGE_CACHE_SIZE; 760 pageaddr += PAGE_SIZE;
814 } 761 }
815 762
816 bp->b_count_desired = len; 763 bp->b_count_desired = len;
817 bp->b_buffer_length = buflen; 764 bp->b_buffer_length = buflen;
818 bp->b_flags |= XBF_MAPPED; 765 bp->b_flags |= XBF_MAPPED;
819 bp->b_flags &= ~_XBF_PAGE_LOCKED;
820 766
821 return 0; 767 return 0;
822} 768}
@@ -923,20 +869,7 @@ xfs_buf_rele(
923 869
924 870
925/* 871/*
926 * Mutual exclusion on buffers. Locking model: 872 * Lock a buffer object, if it is not already locked.
927 *
928 * Buffers associated with inodes for which buffer locking
929 * is not enabled are not protected by semaphores, and are
930 * assumed to be exclusively owned by the caller. There is a
931 * spinlock in the buffer, used by the caller when concurrent
932 * access is possible.
933 */
934
935/*
936 * Locks a buffer object, if it is not already locked. Note that this in
937 * no way locks the underlying pages, so it is only useful for
938 * synchronizing concurrent use of buffer objects, not for synchronizing
939 * independent access to the underlying pages.
940 * 873 *
941 * If we come across a stale, pinned, locked buffer, we know that we are 874 * If we come across a stale, pinned, locked buffer, we know that we are
942 * being asked to lock a buffer that has been reallocated. Because it is 875 * being asked to lock a buffer that has been reallocated. Because it is
@@ -970,10 +903,7 @@ xfs_buf_lock_value(
970} 903}
971 904
972/* 905/*
973 * Locks a buffer object. 906 * Lock a buffer object.
974 * Note that this in no way locks the underlying pages, so it is only
975 * useful for synchronizing concurrent use of buffer objects, not for
976 * synchronizing independent access to the underlying pages.
977 * 907 *
978 * If we come across a stale, pinned, locked buffer, we know that we 908 * If we come across a stale, pinned, locked buffer, we know that we
979 * are being asked to lock a buffer that has been reallocated. Because 909 * are being asked to lock a buffer that has been reallocated. Because
@@ -1246,10 +1176,8 @@ _xfs_buf_ioend(
1246 xfs_buf_t *bp, 1176 xfs_buf_t *bp,
1247 int schedule) 1177 int schedule)
1248{ 1178{
1249 if (atomic_dec_and_test(&bp->b_io_remaining) == 1) { 1179 if (atomic_dec_and_test(&bp->b_io_remaining) == 1)
1250 bp->b_flags &= ~_XBF_PAGE_LOCKED;
1251 xfs_buf_ioend(bp, schedule); 1180 xfs_buf_ioend(bp, schedule);
1252 }
1253} 1181}
1254 1182
1255STATIC void 1183STATIC void
@@ -1258,35 +1186,12 @@ xfs_buf_bio_end_io(
1258 int error) 1186 int error)
1259{ 1187{
1260 xfs_buf_t *bp = (xfs_buf_t *)bio->bi_private; 1188 xfs_buf_t *bp = (xfs_buf_t *)bio->bi_private;
1261 unsigned int blocksize = bp->b_target->bt_bsize;
1262 struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
1263 1189
1264 xfs_buf_ioerror(bp, -error); 1190 xfs_buf_ioerror(bp, -error);
1265 1191
1266 if (!error && xfs_buf_is_vmapped(bp) && (bp->b_flags & XBF_READ)) 1192 if (!error && xfs_buf_is_vmapped(bp) && (bp->b_flags & XBF_READ))
1267 invalidate_kernel_vmap_range(bp->b_addr, xfs_buf_vmap_len(bp)); 1193 invalidate_kernel_vmap_range(bp->b_addr, xfs_buf_vmap_len(bp));
1268 1194
1269 do {
1270 struct page *page = bvec->bv_page;
1271
1272 ASSERT(!PagePrivate(page));
1273 if (unlikely(bp->b_error)) {
1274 if (bp->b_flags & XBF_READ)
1275 ClearPageUptodate(page);
1276 } else if (blocksize >= PAGE_CACHE_SIZE) {
1277 SetPageUptodate(page);
1278 } else if (!PagePrivate(page) &&
1279 (bp->b_flags & _XBF_PAGE_CACHE)) {
1280 set_page_region(page, bvec->bv_offset, bvec->bv_len);
1281 }
1282
1283 if (--bvec >= bio->bi_io_vec)
1284 prefetchw(&bvec->bv_page->flags);
1285
1286 if (bp->b_flags & _XBF_PAGE_LOCKED)
1287 unlock_page(page);
1288 } while (bvec >= bio->bi_io_vec);
1289
1290 _xfs_buf_ioend(bp, 1); 1195 _xfs_buf_ioend(bp, 1);
1291 bio_put(bio); 1196 bio_put(bio);
1292} 1197}
@@ -1300,7 +1205,6 @@ _xfs_buf_ioapply(
1300 int offset = bp->b_offset; 1205 int offset = bp->b_offset;
1301 int size = bp->b_count_desired; 1206 int size = bp->b_count_desired;
1302 sector_t sector = bp->b_bn; 1207 sector_t sector = bp->b_bn;
1303 unsigned int blocksize = bp->b_target->bt_bsize;
1304 1208
1305 total_nr_pages = bp->b_page_count; 1209 total_nr_pages = bp->b_page_count;
1306 map_i = 0; 1210 map_i = 0;
@@ -1321,29 +1225,6 @@ _xfs_buf_ioapply(
1321 (bp->b_flags & XBF_READ_AHEAD) ? READA : READ; 1225 (bp->b_flags & XBF_READ_AHEAD) ? READA : READ;
1322 } 1226 }
1323 1227
1324 /* Special code path for reading a sub page size buffer in --
1325 * we populate up the whole page, and hence the other metadata
1326 * in the same page. This optimization is only valid when the
1327 * filesystem block size is not smaller than the page size.
1328 */
1329 if ((bp->b_buffer_length < PAGE_CACHE_SIZE) &&
1330 ((bp->b_flags & (XBF_READ|_XBF_PAGE_LOCKED)) ==
1331 (XBF_READ|_XBF_PAGE_LOCKED)) &&
1332 (blocksize >= PAGE_CACHE_SIZE)) {
1333 bio = bio_alloc(GFP_NOIO, 1);
1334
1335 bio->bi_bdev = bp->b_target->bt_bdev;
1336 bio->bi_sector = sector - (offset >> BBSHIFT);
1337 bio->bi_end_io = xfs_buf_bio_end_io;
1338 bio->bi_private = bp;
1339
1340 bio_add_page(bio, bp->b_pages[0], PAGE_CACHE_SIZE, 0);
1341 size = 0;
1342
1343 atomic_inc(&bp->b_io_remaining);
1344
1345 goto submit_io;
1346 }
1347 1228
1348next_chunk: 1229next_chunk:
1349 atomic_inc(&bp->b_io_remaining); 1230 atomic_inc(&bp->b_io_remaining);
@@ -1357,8 +1238,9 @@ next_chunk:
1357 bio->bi_end_io = xfs_buf_bio_end_io; 1238 bio->bi_end_io = xfs_buf_bio_end_io;
1358 bio->bi_private = bp; 1239 bio->bi_private = bp;
1359 1240
1241
1360 for (; size && nr_pages; nr_pages--, map_i++) { 1242 for (; size && nr_pages; nr_pages--, map_i++) {
1361 int rbytes, nbytes = PAGE_CACHE_SIZE - offset; 1243 int rbytes, nbytes = PAGE_SIZE - offset;
1362 1244
1363 if (nbytes > size) 1245 if (nbytes > size)
1364 nbytes = size; 1246 nbytes = size;
@@ -1373,7 +1255,6 @@ next_chunk:
1373 total_nr_pages--; 1255 total_nr_pages--;
1374 } 1256 }
1375 1257
1376submit_io:
1377 if (likely(bio->bi_size)) { 1258 if (likely(bio->bi_size)) {
1378 if (xfs_buf_is_vmapped(bp)) { 1259 if (xfs_buf_is_vmapped(bp)) {
1379 flush_kernel_vmap_range(bp->b_addr, 1260 flush_kernel_vmap_range(bp->b_addr,
@@ -1383,18 +1264,7 @@ submit_io:
1383 if (size) 1264 if (size)
1384 goto next_chunk; 1265 goto next_chunk;
1385 } else { 1266 } else {
1386 /*
1387 * if we get here, no pages were added to the bio. However,
1388 * we can't just error out here - if the pages are locked then
1389 * we have to unlock them otherwise we can hang on a later
1390 * access to the page.
1391 */
1392 xfs_buf_ioerror(bp, EIO); 1267 xfs_buf_ioerror(bp, EIO);
1393 if (bp->b_flags & _XBF_PAGE_LOCKED) {
1394 int i;
1395 for (i = 0; i < bp->b_page_count; i++)
1396 unlock_page(bp->b_pages[i]);
1397 }
1398 bio_put(bio); 1268 bio_put(bio);
1399 } 1269 }
1400} 1270}
@@ -1458,8 +1328,8 @@ xfs_buf_offset(
1458 return XFS_BUF_PTR(bp) + offset; 1328 return XFS_BUF_PTR(bp) + offset;
1459 1329
1460 offset += bp->b_offset; 1330 offset += bp->b_offset;
1461 page = bp->b_pages[offset >> PAGE_CACHE_SHIFT]; 1331 page = bp->b_pages[offset >> PAGE_SHIFT];
1462 return (xfs_caddr_t)page_address(page) + (offset & (PAGE_CACHE_SIZE-1)); 1332 return (xfs_caddr_t)page_address(page) + (offset & (PAGE_SIZE-1));
1463} 1333}
1464 1334
1465/* 1335/*
@@ -1481,9 +1351,9 @@ xfs_buf_iomove(
1481 page = bp->b_pages[xfs_buf_btoct(boff + bp->b_offset)]; 1351 page = bp->b_pages[xfs_buf_btoct(boff + bp->b_offset)];
1482 cpoff = xfs_buf_poff(boff + bp->b_offset); 1352 cpoff = xfs_buf_poff(boff + bp->b_offset);
1483 csize = min_t(size_t, 1353 csize = min_t(size_t,
1484 PAGE_CACHE_SIZE-cpoff, bp->b_count_desired-boff); 1354 PAGE_SIZE-cpoff, bp->b_count_desired-boff);
1485 1355
1486 ASSERT(((csize + cpoff) <= PAGE_CACHE_SIZE)); 1356 ASSERT(((csize + cpoff) <= PAGE_SIZE));
1487 1357
1488 switch (mode) { 1358 switch (mode) {
1489 case XBRW_ZERO: 1359 case XBRW_ZERO:
@@ -1596,7 +1466,6 @@ xfs_free_buftarg(
1596 xfs_flush_buftarg(btp, 1); 1466 xfs_flush_buftarg(btp, 1);
1597 if (mp->m_flags & XFS_MOUNT_BARRIER) 1467 if (mp->m_flags & XFS_MOUNT_BARRIER)
1598 xfs_blkdev_issue_flush(btp); 1468 xfs_blkdev_issue_flush(btp);
1599 iput(btp->bt_mapping->host);
1600 1469
1601 kthread_stop(btp->bt_task); 1470 kthread_stop(btp->bt_task);
1602 kmem_free(btp); 1471 kmem_free(btp);
@@ -1620,15 +1489,6 @@ xfs_setsize_buftarg_flags(
1620 return EINVAL; 1489 return EINVAL;
1621 } 1490 }
1622 1491
1623 if (verbose &&
1624 (PAGE_CACHE_SIZE / BITS_PER_LONG) > sectorsize) {
1625 printk(KERN_WARNING
1626 "XFS: %u byte sectors in use on device %s. "
1627 "This is suboptimal; %u or greater is ideal.\n",
1628 sectorsize, XFS_BUFTARG_NAME(btp),
1629 (unsigned int)PAGE_CACHE_SIZE / BITS_PER_LONG);
1630 }
1631
1632 return 0; 1492 return 0;
1633} 1493}
1634 1494
@@ -1643,7 +1503,7 @@ xfs_setsize_buftarg_early(
1643 struct block_device *bdev) 1503 struct block_device *bdev)
1644{ 1504{
1645 return xfs_setsize_buftarg_flags(btp, 1505 return xfs_setsize_buftarg_flags(btp,
1646 PAGE_CACHE_SIZE, bdev_logical_block_size(bdev), 0); 1506 PAGE_SIZE, bdev_logical_block_size(bdev), 0);
1647} 1507}
1648 1508
1649int 1509int
@@ -1656,40 +1516,6 @@ xfs_setsize_buftarg(
1656} 1516}
1657 1517
1658STATIC int 1518STATIC int
1659xfs_mapping_buftarg(
1660 xfs_buftarg_t *btp,
1661 struct block_device *bdev)
1662{
1663 struct backing_dev_info *bdi;
1664 struct inode *inode;
1665 struct address_space *mapping;
1666 static const struct address_space_operations mapping_aops = {
1667 .migratepage = fail_migrate_page,
1668 };
1669
1670 inode = new_inode(bdev->bd_inode->i_sb);
1671 if (!inode) {
1672 printk(KERN_WARNING
1673 "XFS: Cannot allocate mapping inode for device %s\n",
1674 XFS_BUFTARG_NAME(btp));
1675 return ENOMEM;
1676 }
1677 inode->i_ino = get_next_ino();
1678 inode->i_mode = S_IFBLK;
1679 inode->i_bdev = bdev;
1680 inode->i_rdev = bdev->bd_dev;
1681 bdi = blk_get_backing_dev_info(bdev);
1682 if (!bdi)
1683 bdi = &default_backing_dev_info;
1684 mapping = &inode->i_data;
1685 mapping->a_ops = &mapping_aops;
1686 mapping->backing_dev_info = bdi;
1687 mapping_set_gfp_mask(mapping, GFP_NOFS);
1688 btp->bt_mapping = mapping;
1689 return 0;
1690}
1691
1692STATIC int
1693xfs_alloc_delwrite_queue( 1519xfs_alloc_delwrite_queue(
1694 xfs_buftarg_t *btp, 1520 xfs_buftarg_t *btp,
1695 const char *fsname) 1521 const char *fsname)
@@ -1717,12 +1543,14 @@ xfs_alloc_buftarg(
1717 btp->bt_mount = mp; 1543 btp->bt_mount = mp;
1718 btp->bt_dev = bdev->bd_dev; 1544 btp->bt_dev = bdev->bd_dev;
1719 btp->bt_bdev = bdev; 1545 btp->bt_bdev = bdev;
1546 btp->bt_bdi = blk_get_backing_dev_info(bdev);
1547 if (!btp->bt_bdi)
1548 goto error;
1549
1720 INIT_LIST_HEAD(&btp->bt_lru); 1550 INIT_LIST_HEAD(&btp->bt_lru);
1721 spin_lock_init(&btp->bt_lru_lock); 1551 spin_lock_init(&btp->bt_lru_lock);
1722 if (xfs_setsize_buftarg_early(btp, bdev)) 1552 if (xfs_setsize_buftarg_early(btp, bdev))
1723 goto error; 1553 goto error;
1724 if (xfs_mapping_buftarg(btp, bdev))
1725 goto error;
1726 if (xfs_alloc_delwrite_queue(btp, fsname)) 1554 if (xfs_alloc_delwrite_queue(btp, fsname))
1727 goto error; 1555 goto error;
1728 btp->bt_shrinker.shrink = xfs_buftarg_shrink; 1556 btp->bt_shrinker.shrink = xfs_buftarg_shrink;
diff --git a/fs/xfs/linux-2.6/xfs_buf.h b/fs/xfs/linux-2.6/xfs_buf.h
index cbe65950e524..a9a1c4512645 100644
--- a/fs/xfs/linux-2.6/xfs_buf.h
+++ b/fs/xfs/linux-2.6/xfs_buf.h
@@ -61,30 +61,11 @@ typedef enum {
61#define XBF_DONT_BLOCK (1 << 16)/* do not block in current thread */ 61#define XBF_DONT_BLOCK (1 << 16)/* do not block in current thread */
62 62
63/* flags used only internally */ 63/* flags used only internally */
64#define _XBF_PAGE_CACHE (1 << 17)/* backed by pagecache */
65#define _XBF_PAGES (1 << 18)/* backed by refcounted pages */ 64#define _XBF_PAGES (1 << 18)/* backed by refcounted pages */
66#define _XBF_RUN_QUEUES (1 << 19)/* run block device task queue */ 65#define _XBF_RUN_QUEUES (1 << 19)/* run block device task queue */
66#define _XBF_KMEM (1 << 20)/* backed by heap memory */
67#define _XBF_DELWRI_Q (1 << 21)/* buffer on delwri queue */ 67#define _XBF_DELWRI_Q (1 << 21)/* buffer on delwri queue */
68 68
69/*
70 * Special flag for supporting metadata blocks smaller than a FSB.
71 *
72 * In this case we can have multiple xfs_buf_t on a single page and
73 * need to lock out concurrent xfs_buf_t readers as they only
74 * serialise access to the buffer.
75 *
76 * If the FSB size >= PAGE_CACHE_SIZE case, we have no serialisation
77 * between reads of the page. Hence we can have one thread read the
78 * page and modify it, but then race with another thread that thinks
79 * the page is not up-to-date and hence reads it again.
80 *
81 * The result is that the first modifcation to the page is lost.
82 * This sort of AGF/AGI reading race can happen when unlinking inodes
83 * that require truncation and results in the AGI unlinked list
84 * modifications being lost.
85 */
86#define _XBF_PAGE_LOCKED (1 << 22)
87
88typedef unsigned int xfs_buf_flags_t; 69typedef unsigned int xfs_buf_flags_t;
89 70
90#define XFS_BUF_FLAGS \ 71#define XFS_BUF_FLAGS \
@@ -100,12 +81,10 @@ typedef unsigned int xfs_buf_flags_t;
100 { XBF_LOCK, "LOCK" }, /* should never be set */\ 81 { XBF_LOCK, "LOCK" }, /* should never be set */\
101 { XBF_TRYLOCK, "TRYLOCK" }, /* ditto */\ 82 { XBF_TRYLOCK, "TRYLOCK" }, /* ditto */\
102 { XBF_DONT_BLOCK, "DONT_BLOCK" }, /* ditto */\ 83 { XBF_DONT_BLOCK, "DONT_BLOCK" }, /* ditto */\
103 { _XBF_PAGE_CACHE, "PAGE_CACHE" }, \
104 { _XBF_PAGES, "PAGES" }, \ 84 { _XBF_PAGES, "PAGES" }, \
105 { _XBF_RUN_QUEUES, "RUN_QUEUES" }, \ 85 { _XBF_RUN_QUEUES, "RUN_QUEUES" }, \
106 { _XBF_DELWRI_Q, "DELWRI_Q" }, \ 86 { _XBF_KMEM, "KMEM" }, \
107 { _XBF_PAGE_LOCKED, "PAGE_LOCKED" } 87 { _XBF_DELWRI_Q, "DELWRI_Q" }
108
109 88
110typedef enum { 89typedef enum {
111 XBT_FORCE_SLEEP = 0, 90 XBT_FORCE_SLEEP = 0,
@@ -120,7 +99,7 @@ typedef struct xfs_bufhash {
120typedef struct xfs_buftarg { 99typedef struct xfs_buftarg {
121 dev_t bt_dev; 100 dev_t bt_dev;
122 struct block_device *bt_bdev; 101 struct block_device *bt_bdev;
123 struct address_space *bt_mapping; 102 struct backing_dev_info *bt_bdi;
124 struct xfs_mount *bt_mount; 103 struct xfs_mount *bt_mount;
125 unsigned int bt_bsize; 104 unsigned int bt_bsize;
126 unsigned int bt_sshift; 105 unsigned int bt_sshift;
@@ -139,17 +118,6 @@ typedef struct xfs_buftarg {
139 unsigned int bt_lru_nr; 118 unsigned int bt_lru_nr;
140} xfs_buftarg_t; 119} xfs_buftarg_t;
141 120
142/*
143 * xfs_buf_t: Buffer structure for pagecache-based buffers
144 *
145 * This buffer structure is used by the pagecache buffer management routines
146 * to refer to an assembly of pages forming a logical buffer.
147 *
148 * The buffer structure is used on a temporary basis only, and discarded when
149 * released. The real data storage is recorded in the pagecache. Buffers are
150 * hashed to the block device on which the file system resides.
151 */
152
153struct xfs_buf; 121struct xfs_buf;
154typedef void (*xfs_buf_iodone_t)(struct xfs_buf *); 122typedef void (*xfs_buf_iodone_t)(struct xfs_buf *);
155 123
diff --git a/fs/xfs/linux-2.6/xfs_file.c b/fs/xfs/linux-2.6/xfs_file.c
index a55c1b46b219..52aadfbed132 100644
--- a/fs/xfs/linux-2.6/xfs_file.c
+++ b/fs/xfs/linux-2.6/xfs_file.c
@@ -896,6 +896,7 @@ xfs_file_fallocate(
896 xfs_flock64_t bf; 896 xfs_flock64_t bf;
897 xfs_inode_t *ip = XFS_I(inode); 897 xfs_inode_t *ip = XFS_I(inode);
898 int cmd = XFS_IOC_RESVSP; 898 int cmd = XFS_IOC_RESVSP;
899 int attr_flags = XFS_ATTR_NOLOCK;
899 900
900 if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE)) 901 if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE))
901 return -EOPNOTSUPP; 902 return -EOPNOTSUPP;
@@ -918,7 +919,10 @@ xfs_file_fallocate(
918 goto out_unlock; 919 goto out_unlock;
919 } 920 }
920 921
921 error = -xfs_change_file_space(ip, cmd, &bf, 0, XFS_ATTR_NOLOCK); 922 if (file->f_flags & O_DSYNC)
923 attr_flags |= XFS_ATTR_SYNC;
924
925 error = -xfs_change_file_space(ip, cmd, &bf, 0, attr_flags);
922 if (error) 926 if (error)
923 goto out_unlock; 927 goto out_unlock;
924 928
diff --git a/fs/xfs/linux-2.6/xfs_ioctl.c b/fs/xfs/linux-2.6/xfs_ioctl.c
index 0ca0e3c024d7..acca2c5ca3fa 100644
--- a/fs/xfs/linux-2.6/xfs_ioctl.c
+++ b/fs/xfs/linux-2.6/xfs_ioctl.c
@@ -624,6 +624,10 @@ xfs_ioc_space(
624 624
625 if (filp->f_flags & (O_NDELAY|O_NONBLOCK)) 625 if (filp->f_flags & (O_NDELAY|O_NONBLOCK))
626 attr_flags |= XFS_ATTR_NONBLOCK; 626 attr_flags |= XFS_ATTR_NONBLOCK;
627
628 if (filp->f_flags & O_DSYNC)
629 attr_flags |= XFS_ATTR_SYNC;
630
627 if (ioflags & IO_INVIS) 631 if (ioflags & IO_INVIS)
628 attr_flags |= XFS_ATTR_DMI; 632 attr_flags |= XFS_ATTR_DMI;
629 633
diff --git a/fs/xfs/linux-2.6/xfs_super.c b/fs/xfs/linux-2.6/xfs_super.c
index 818c4cf2de86..1ba5c451da36 100644
--- a/fs/xfs/linux-2.6/xfs_super.c
+++ b/fs/xfs/linux-2.6/xfs_super.c
@@ -1078,7 +1078,7 @@ xfs_fs_write_inode(
1078 error = 0; 1078 error = 0;
1079 goto out_unlock; 1079 goto out_unlock;
1080 } 1080 }
1081 error = xfs_iflush(ip, 0); 1081 error = xfs_iflush(ip, SYNC_TRYLOCK);
1082 } 1082 }
1083 1083
1084 out_unlock: 1084 out_unlock:
@@ -1539,10 +1539,14 @@ xfs_fs_fill_super(
1539 if (error) 1539 if (error)
1540 goto out_free_sb; 1540 goto out_free_sb;
1541 1541
1542 error = xfs_mountfs(mp); 1542 /*
1543 if (error) 1543 * we must configure the block size in the superblock before we run the
1544 goto out_filestream_unmount; 1544 * full mount process as the mount process can lookup and cache inodes.
1545 1545 * For the same reason we must also initialise the syncd and register
1546 * the inode cache shrinker so that inodes can be reclaimed during
1547 * operations like a quotacheck that iterate all inodes in the
1548 * filesystem.
1549 */
1546 sb->s_magic = XFS_SB_MAGIC; 1550 sb->s_magic = XFS_SB_MAGIC;
1547 sb->s_blocksize = mp->m_sb.sb_blocksize; 1551 sb->s_blocksize = mp->m_sb.sb_blocksize;
1548 sb->s_blocksize_bits = ffs(sb->s_blocksize) - 1; 1552 sb->s_blocksize_bits = ffs(sb->s_blocksize) - 1;
@@ -1550,6 +1554,16 @@ xfs_fs_fill_super(
1550 sb->s_time_gran = 1; 1554 sb->s_time_gran = 1;
1551 set_posix_acl_flag(sb); 1555 set_posix_acl_flag(sb);
1552 1556
1557 error = xfs_syncd_init(mp);
1558 if (error)
1559 goto out_filestream_unmount;
1560
1561 xfs_inode_shrinker_register(mp);
1562
1563 error = xfs_mountfs(mp);
1564 if (error)
1565 goto out_syncd_stop;
1566
1553 root = igrab(VFS_I(mp->m_rootip)); 1567 root = igrab(VFS_I(mp->m_rootip));
1554 if (!root) { 1568 if (!root) {
1555 error = ENOENT; 1569 error = ENOENT;
@@ -1565,14 +1579,11 @@ xfs_fs_fill_super(
1565 goto fail_vnrele; 1579 goto fail_vnrele;
1566 } 1580 }
1567 1581
1568 error = xfs_syncd_init(mp);
1569 if (error)
1570 goto fail_vnrele;
1571
1572 xfs_inode_shrinker_register(mp);
1573
1574 return 0; 1582 return 0;
1575 1583
1584 out_syncd_stop:
1585 xfs_inode_shrinker_unregister(mp);
1586 xfs_syncd_stop(mp);
1576 out_filestream_unmount: 1587 out_filestream_unmount:
1577 xfs_filestream_unmount(mp); 1588 xfs_filestream_unmount(mp);
1578 out_free_sb: 1589 out_free_sb:
@@ -1596,6 +1607,9 @@ xfs_fs_fill_super(
1596 } 1607 }
1597 1608
1598 fail_unmount: 1609 fail_unmount:
1610 xfs_inode_shrinker_unregister(mp);
1611 xfs_syncd_stop(mp);
1612
1599 /* 1613 /*
1600 * Blow away any referenced inode in the filestreams cache. 1614 * Blow away any referenced inode in the filestreams cache.
1601 * This can and will cause log traffic as inodes go inactive 1615 * This can and will cause log traffic as inodes go inactive
diff --git a/fs/xfs/linux-2.6/xfs_sync.c b/fs/xfs/linux-2.6/xfs_sync.c
index 6c10f1d2e3d3..594cd822d84d 100644
--- a/fs/xfs/linux-2.6/xfs_sync.c
+++ b/fs/xfs/linux-2.6/xfs_sync.c
@@ -761,8 +761,10 @@ xfs_reclaim_inode(
761 struct xfs_perag *pag, 761 struct xfs_perag *pag,
762 int sync_mode) 762 int sync_mode)
763{ 763{
764 int error = 0; 764 int error;
765 765
766restart:
767 error = 0;
766 xfs_ilock(ip, XFS_ILOCK_EXCL); 768 xfs_ilock(ip, XFS_ILOCK_EXCL);
767 if (!xfs_iflock_nowait(ip)) { 769 if (!xfs_iflock_nowait(ip)) {
768 if (!(sync_mode & SYNC_WAIT)) 770 if (!(sync_mode & SYNC_WAIT))
@@ -788,9 +790,31 @@ xfs_reclaim_inode(
788 if (xfs_inode_clean(ip)) 790 if (xfs_inode_clean(ip))
789 goto reclaim; 791 goto reclaim;
790 792
791 /* Now we have an inode that needs flushing */ 793 /*
792 error = xfs_iflush(ip, sync_mode); 794 * Now we have an inode that needs flushing.
795 *
796 * We do a nonblocking flush here even if we are doing a SYNC_WAIT
797 * reclaim as we can deadlock with inode cluster removal.
798 * xfs_ifree_cluster() can lock the inode buffer before it locks the
799 * ip->i_lock, and we are doing the exact opposite here. As a result,
800 * doing a blocking xfs_itobp() to get the cluster buffer will result
801 * in an ABBA deadlock with xfs_ifree_cluster().
802 *
803 * As xfs_ifree_cluser() must gather all inodes that are active in the
804 * cache to mark them stale, if we hit this case we don't actually want
805 * to do IO here - we want the inode marked stale so we can simply
806 * reclaim it. Hence if we get an EAGAIN error on a SYNC_WAIT flush,
807 * just unlock the inode, back off and try again. Hopefully the next
808 * pass through will see the stale flag set on the inode.
809 */
810 error = xfs_iflush(ip, SYNC_TRYLOCK | sync_mode);
793 if (sync_mode & SYNC_WAIT) { 811 if (sync_mode & SYNC_WAIT) {
812 if (error == EAGAIN) {
813 xfs_iunlock(ip, XFS_ILOCK_EXCL);
814 /* backoff longer than in xfs_ifree_cluster */
815 delay(2);
816 goto restart;
817 }
794 xfs_iflock(ip); 818 xfs_iflock(ip);
795 goto reclaim; 819 goto reclaim;
796 } 820 }
diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c
index da871f532236..742c8330994a 100644
--- a/fs/xfs/xfs_inode.c
+++ b/fs/xfs/xfs_inode.c
@@ -2835,7 +2835,7 @@ xfs_iflush(
2835 * Get the buffer containing the on-disk inode. 2835 * Get the buffer containing the on-disk inode.
2836 */ 2836 */
2837 error = xfs_itobp(mp, NULL, ip, &dip, &bp, 2837 error = xfs_itobp(mp, NULL, ip, &dip, &bp,
2838 (flags & SYNC_WAIT) ? XBF_LOCK : XBF_TRYLOCK); 2838 (flags & SYNC_TRYLOCK) ? XBF_TRYLOCK : XBF_LOCK);
2839 if (error || !bp) { 2839 if (error || !bp) {
2840 xfs_ifunlock(ip); 2840 xfs_ifunlock(ip);
2841 return error; 2841 return error;
diff --git a/fs/xfs/xfs_inode_item.c b/fs/xfs/xfs_inode_item.c
index fd4f398bd6f1..46cc40131d4a 100644
--- a/fs/xfs/xfs_inode_item.c
+++ b/fs/xfs/xfs_inode_item.c
@@ -760,11 +760,11 @@ xfs_inode_item_push(
760 * Push the inode to it's backing buffer. This will not remove the 760 * Push the inode to it's backing buffer. This will not remove the
761 * inode from the AIL - a further push will be required to trigger a 761 * inode from the AIL - a further push will be required to trigger a
762 * buffer push. However, this allows all the dirty inodes to be pushed 762 * buffer push. However, this allows all the dirty inodes to be pushed
763 * to the buffer before it is pushed to disk. THe buffer IO completion 763 * to the buffer before it is pushed to disk. The buffer IO completion
764 * will pull th einode from the AIL, mark it clean and unlock the flush 764 * will pull the inode from the AIL, mark it clean and unlock the flush
765 * lock. 765 * lock.
766 */ 766 */
767 (void) xfs_iflush(ip, 0); 767 (void) xfs_iflush(ip, SYNC_TRYLOCK);
768 xfs_iunlock(ip, XFS_ILOCK_SHARED); 768 xfs_iunlock(ip, XFS_ILOCK_SHARED);
769} 769}
770 770
diff --git a/fs/xfs/xfs_trans_buf.c b/fs/xfs/xfs_trans_buf.c
index 3bea66132334..03b3b7f85a3b 100644
--- a/fs/xfs/xfs_trans_buf.c
+++ b/fs/xfs/xfs_trans_buf.c
@@ -383,7 +383,8 @@ xfs_trans_read_buf(
383 bp = xfs_buf_read(target, blkno, len, flags | XBF_DONT_BLOCK); 383 bp = xfs_buf_read(target, blkno, len, flags | XBF_DONT_BLOCK);
384 if (bp == NULL) { 384 if (bp == NULL) {
385 *bpp = NULL; 385 *bpp = NULL;
386 return 0; 386 return (flags & XBF_TRYLOCK) ?
387 0 : XFS_ERROR(ENOMEM);
387 } 388 }
388 if (XFS_BUF_GETERROR(bp) != 0) { 389 if (XFS_BUF_GETERROR(bp) != 0) {
389 XFS_BUF_SUPER_STALE(bp); 390 XFS_BUF_SUPER_STALE(bp);
diff --git a/fs/xfs/xfs_vnodeops.c b/fs/xfs/xfs_vnodeops.c
index 37d8146ee15b..c48b4217ec47 100644
--- a/fs/xfs/xfs_vnodeops.c
+++ b/fs/xfs/xfs_vnodeops.c
@@ -2831,7 +2831,8 @@ xfs_change_file_space(
2831 ip->i_d.di_flags &= ~XFS_DIFLAG_PREALLOC; 2831 ip->i_d.di_flags &= ~XFS_DIFLAG_PREALLOC;
2832 2832
2833 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); 2833 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
2834 xfs_trans_set_sync(tp); 2834 if (attr_flags & XFS_ATTR_SYNC)
2835 xfs_trans_set_sync(tp);
2835 2836
2836 error = xfs_trans_commit(tp, 0); 2837 error = xfs_trans_commit(tp, 0);
2837 2838
diff --git a/fs/xfs/xfs_vnodeops.h b/fs/xfs/xfs_vnodeops.h
index f6702927eee4..3bcd23353d6c 100644
--- a/fs/xfs/xfs_vnodeops.h
+++ b/fs/xfs/xfs_vnodeops.h
@@ -18,6 +18,7 @@ int xfs_setattr(struct xfs_inode *ip, struct iattr *vap, int flags);
18#define XFS_ATTR_NONBLOCK 0x02 /* return EAGAIN if operation would block */ 18#define XFS_ATTR_NONBLOCK 0x02 /* return EAGAIN if operation would block */
19#define XFS_ATTR_NOLOCK 0x04 /* Don't grab any conflicting locks */ 19#define XFS_ATTR_NOLOCK 0x04 /* Don't grab any conflicting locks */
20#define XFS_ATTR_NOACL 0x08 /* Don't call xfs_acl_chmod */ 20#define XFS_ATTR_NOACL 0x08 /* Don't call xfs_acl_chmod */
21#define XFS_ATTR_SYNC 0x10 /* synchronous operation required */
21 22
22int xfs_readlink(struct xfs_inode *ip, char *link); 23int xfs_readlink(struct xfs_inode *ip, char *link);
23int xfs_release(struct xfs_inode *ip); 24int xfs_release(struct xfs_inode *ip);
diff --git a/include/linux/fs.h b/include/linux/fs.h
index b677bd77f2d6..52f283c1edb2 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -357,6 +357,8 @@ struct inodes_stat_t {
357#define FS_TOPDIR_FL 0x00020000 /* Top of directory hierarchies*/ 357#define FS_TOPDIR_FL 0x00020000 /* Top of directory hierarchies*/
358#define FS_EXTENT_FL 0x00080000 /* Extents */ 358#define FS_EXTENT_FL 0x00080000 /* Extents */
359#define FS_DIRECTIO_FL 0x00100000 /* Use direct i/o */ 359#define FS_DIRECTIO_FL 0x00100000 /* Use direct i/o */
360#define FS_NOCOW_FL 0x00800000 /* Do not cow file */
361#define FS_COW_FL 0x02000000 /* Cow file */
360#define FS_RESERVED_FL 0x80000000 /* reserved for ext2 lib */ 362#define FS_RESERVED_FL 0x80000000 /* reserved for ext2 lib */
361 363
362#define FS_FL_USER_VISIBLE 0x0003DFFF /* User visible flags */ 364#define FS_FL_USER_VISIBLE 0x0003DFFF /* User visible flags */
diff --git a/include/linux/input.h b/include/linux/input.h
index 056ae8a5bd9b..f3a7794a18c4 100644
--- a/include/linux/input.h
+++ b/include/linux/input.h
@@ -664,6 +664,13 @@ struct input_keymap_entry {
664#define KEY_TOUCHPAD_ON 0x213 664#define KEY_TOUCHPAD_ON 0x213
665#define KEY_TOUCHPAD_OFF 0x214 665#define KEY_TOUCHPAD_OFF 0x214
666 666
667#define KEY_CAMERA_ZOOMIN 0x215
668#define KEY_CAMERA_ZOOMOUT 0x216
669#define KEY_CAMERA_UP 0x217
670#define KEY_CAMERA_DOWN 0x218
671#define KEY_CAMERA_LEFT 0x219
672#define KEY_CAMERA_RIGHT 0x21a
673
667#define BTN_TRIGGER_HAPPY 0x2c0 674#define BTN_TRIGGER_HAPPY 0x2c0
668#define BTN_TRIGGER_HAPPY1 0x2c0 675#define BTN_TRIGGER_HAPPY1 0x2c0
669#define BTN_TRIGGER_HAPPY2 0x2c1 676#define BTN_TRIGGER_HAPPY2 0x2c1
diff --git a/include/linux/irq.h b/include/linux/irq.h
index 5d876c9b3a3d..b3741c83774c 100644
--- a/include/linux/irq.h
+++ b/include/linux/irq.h
@@ -135,7 +135,7 @@ struct msi_desc;
135 * struct irq_data - per irq and irq chip data passed down to chip functions 135 * struct irq_data - per irq and irq chip data passed down to chip functions
136 * @irq: interrupt number 136 * @irq: interrupt number
137 * @node: node index useful for balancing 137 * @node: node index useful for balancing
138 * @state_use_accessor: status information for irq chip functions. 138 * @state_use_accessors: status information for irq chip functions.
139 * Use accessor functions to deal with it 139 * Use accessor functions to deal with it
140 * @chip: low level interrupt hardware access 140 * @chip: low level interrupt hardware access
141 * @handler_data: per-IRQ data for the irq_chip methods 141 * @handler_data: per-IRQ data for the irq_chip methods
@@ -174,6 +174,9 @@ struct irq_data {
174 * from suspend 174 * from suspend
175 * IRDQ_MOVE_PCNTXT - Interrupt can be moved in process 175 * IRDQ_MOVE_PCNTXT - Interrupt can be moved in process
176 * context 176 * context
177 * IRQD_IRQ_DISABLED - Disabled state of the interrupt
178 * IRQD_IRQ_MASKED - Masked state of the interrupt
179 * IRQD_IRQ_INPROGRESS - In progress state of the interrupt
177 */ 180 */
178enum { 181enum {
179 IRQD_TRIGGER_MASK = 0xf, 182 IRQD_TRIGGER_MASK = 0xf,
@@ -184,6 +187,9 @@ enum {
184 IRQD_LEVEL = (1 << 13), 187 IRQD_LEVEL = (1 << 13),
185 IRQD_WAKEUP_STATE = (1 << 14), 188 IRQD_WAKEUP_STATE = (1 << 14),
186 IRQD_MOVE_PCNTXT = (1 << 15), 189 IRQD_MOVE_PCNTXT = (1 << 15),
190 IRQD_IRQ_DISABLED = (1 << 16),
191 IRQD_IRQ_MASKED = (1 << 17),
192 IRQD_IRQ_INPROGRESS = (1 << 18),
187}; 193};
188 194
189static inline bool irqd_is_setaffinity_pending(struct irq_data *d) 195static inline bool irqd_is_setaffinity_pending(struct irq_data *d)
@@ -206,6 +212,11 @@ static inline bool irqd_affinity_was_set(struct irq_data *d)
206 return d->state_use_accessors & IRQD_AFFINITY_SET; 212 return d->state_use_accessors & IRQD_AFFINITY_SET;
207} 213}
208 214
215static inline void irqd_mark_affinity_was_set(struct irq_data *d)
216{
217 d->state_use_accessors |= IRQD_AFFINITY_SET;
218}
219
209static inline u32 irqd_get_trigger_type(struct irq_data *d) 220static inline u32 irqd_get_trigger_type(struct irq_data *d)
210{ 221{
211 return d->state_use_accessors & IRQD_TRIGGER_MASK; 222 return d->state_use_accessors & IRQD_TRIGGER_MASK;
@@ -235,6 +246,36 @@ static inline bool irqd_can_move_in_process_context(struct irq_data *d)
235 return d->state_use_accessors & IRQD_MOVE_PCNTXT; 246 return d->state_use_accessors & IRQD_MOVE_PCNTXT;
236} 247}
237 248
249static inline bool irqd_irq_disabled(struct irq_data *d)
250{
251 return d->state_use_accessors & IRQD_IRQ_DISABLED;
252}
253
254static inline bool irqd_irq_masked(struct irq_data *d)
255{
256 return d->state_use_accessors & IRQD_IRQ_MASKED;
257}
258
259static inline bool irqd_irq_inprogress(struct irq_data *d)
260{
261 return d->state_use_accessors & IRQD_IRQ_INPROGRESS;
262}
263
264/*
265 * Functions for chained handlers which can be enabled/disabled by the
266 * standard disable_irq/enable_irq calls. Must be called with
267 * irq_desc->lock held.
268 */
269static inline void irqd_set_chained_irq_inprogress(struct irq_data *d)
270{
271 d->state_use_accessors |= IRQD_IRQ_INPROGRESS;
272}
273
274static inline void irqd_clr_chained_irq_inprogress(struct irq_data *d)
275{
276 d->state_use_accessors &= ~IRQD_IRQ_INPROGRESS;
277}
278
238/** 279/**
239 * struct irq_chip - hardware interrupt chip descriptor 280 * struct irq_chip - hardware interrupt chip descriptor
240 * 281 *
@@ -271,6 +312,8 @@ static inline bool irqd_can_move_in_process_context(struct irq_data *d)
271 * @irq_set_wake: enable/disable power-management wake-on of an IRQ 312 * @irq_set_wake: enable/disable power-management wake-on of an IRQ
272 * @irq_bus_lock: function to lock access to slow bus (i2c) chips 313 * @irq_bus_lock: function to lock access to slow bus (i2c) chips
273 * @irq_bus_sync_unlock:function to sync and unlock slow bus (i2c) chips 314 * @irq_bus_sync_unlock:function to sync and unlock slow bus (i2c) chips
315 * @irq_cpu_online: configure an interrupt source for a secondary CPU
316 * @irq_cpu_offline: un-configure an interrupt source for a secondary CPU
274 * @irq_print_chip: optional to print special chip info in show_interrupts 317 * @irq_print_chip: optional to print special chip info in show_interrupts
275 * @flags: chip specific flags 318 * @flags: chip specific flags
276 * 319 *
@@ -319,6 +362,9 @@ struct irq_chip {
319 void (*irq_bus_lock)(struct irq_data *data); 362 void (*irq_bus_lock)(struct irq_data *data);
320 void (*irq_bus_sync_unlock)(struct irq_data *data); 363 void (*irq_bus_sync_unlock)(struct irq_data *data);
321 364
365 void (*irq_cpu_online)(struct irq_data *data);
366 void (*irq_cpu_offline)(struct irq_data *data);
367
322 void (*irq_print_chip)(struct irq_data *data, struct seq_file *p); 368 void (*irq_print_chip)(struct irq_data *data, struct seq_file *p);
323 369
324 unsigned long flags; 370 unsigned long flags;
@@ -335,11 +381,14 @@ struct irq_chip {
335 * IRQCHIP_SET_TYPE_MASKED: Mask before calling chip.irq_set_type() 381 * IRQCHIP_SET_TYPE_MASKED: Mask before calling chip.irq_set_type()
336 * IRQCHIP_EOI_IF_HANDLED: Only issue irq_eoi() when irq was handled 382 * IRQCHIP_EOI_IF_HANDLED: Only issue irq_eoi() when irq was handled
337 * IRQCHIP_MASK_ON_SUSPEND: Mask non wake irqs in the suspend path 383 * IRQCHIP_MASK_ON_SUSPEND: Mask non wake irqs in the suspend path
384 * IRQCHIP_ONOFFLINE_ENABLED: Only call irq_on/off_line callbacks
385 * when irq enabled
338 */ 386 */
339enum { 387enum {
340 IRQCHIP_SET_TYPE_MASKED = (1 << 0), 388 IRQCHIP_SET_TYPE_MASKED = (1 << 0),
341 IRQCHIP_EOI_IF_HANDLED = (1 << 1), 389 IRQCHIP_EOI_IF_HANDLED = (1 << 1),
342 IRQCHIP_MASK_ON_SUSPEND = (1 << 2), 390 IRQCHIP_MASK_ON_SUSPEND = (1 << 2),
391 IRQCHIP_ONOFFLINE_ENABLED = (1 << 3),
343}; 392};
344 393
345/* This include will go away once we isolated irq_desc usage to core code */ 394/* This include will go away once we isolated irq_desc usage to core code */
@@ -364,6 +413,10 @@ struct irqaction;
364extern int setup_irq(unsigned int irq, struct irqaction *new); 413extern int setup_irq(unsigned int irq, struct irqaction *new);
365extern void remove_irq(unsigned int irq, struct irqaction *act); 414extern void remove_irq(unsigned int irq, struct irqaction *act);
366 415
416extern void irq_cpu_online(void);
417extern void irq_cpu_offline(void);
418extern int __irq_set_affinity_locked(struct irq_data *data, const struct cpumask *cpumask);
419
367#ifdef CONFIG_GENERIC_HARDIRQS 420#ifdef CONFIG_GENERIC_HARDIRQS
368 421
369#if defined(CONFIG_SMP) && defined(CONFIG_GENERIC_PENDING_IRQ) 422#if defined(CONFIG_SMP) && defined(CONFIG_GENERIC_PENDING_IRQ)
@@ -380,9 +433,6 @@ static inline void irq_move_masked_irq(struct irq_data *data) { }
380 433
381extern int no_irq_affinity; 434extern int no_irq_affinity;
382 435
383/* Handle irq action chains: */
384extern irqreturn_t handle_IRQ_event(unsigned int irq, struct irqaction *action);
385
386/* 436/*
387 * Built-in IRQ handlers for various IRQ types, 437 * Built-in IRQ handlers for various IRQ types,
388 * callable via desc->handle_irq() 438 * callable via desc->handle_irq()
@@ -390,6 +440,7 @@ extern irqreturn_t handle_IRQ_event(unsigned int irq, struct irqaction *action);
390extern void handle_level_irq(unsigned int irq, struct irq_desc *desc); 440extern void handle_level_irq(unsigned int irq, struct irq_desc *desc);
391extern void handle_fasteoi_irq(unsigned int irq, struct irq_desc *desc); 441extern void handle_fasteoi_irq(unsigned int irq, struct irq_desc *desc);
392extern void handle_edge_irq(unsigned int irq, struct irq_desc *desc); 442extern void handle_edge_irq(unsigned int irq, struct irq_desc *desc);
443extern void handle_edge_eoi_irq(unsigned int irq, struct irq_desc *desc);
393extern void handle_simple_irq(unsigned int irq, struct irq_desc *desc); 444extern void handle_simple_irq(unsigned int irq, struct irq_desc *desc);
394extern void handle_percpu_irq(unsigned int irq, struct irq_desc *desc); 445extern void handle_percpu_irq(unsigned int irq, struct irq_desc *desc);
395extern void handle_bad_irq(unsigned int irq, struct irq_desc *desc); 446extern void handle_bad_irq(unsigned int irq, struct irq_desc *desc);
diff --git a/include/linux/mfd/ab8500.h b/include/linux/mfd/ab8500.h
index 6e4f77ef4d20..b31843075198 100644
--- a/include/linux/mfd/ab8500.h
+++ b/include/linux/mfd/ab8500.h
@@ -74,6 +74,45 @@
74#define AB8500_INT_ACC_DETECT_21DB_F 37 74#define AB8500_INT_ACC_DETECT_21DB_F 37
75#define AB8500_INT_ACC_DETECT_21DB_R 38 75#define AB8500_INT_ACC_DETECT_21DB_R 38
76#define AB8500_INT_GP_SW_ADC_CONV_END 39 76#define AB8500_INT_GP_SW_ADC_CONV_END 39
77#define AB8500_INT_ACC_DETECT_1DB_F 33
78#define AB8500_INT_ACC_DETECT_1DB_R 34
79#define AB8500_INT_ACC_DETECT_22DB_F 35
80#define AB8500_INT_ACC_DETECT_22DB_R 36
81#define AB8500_INT_ACC_DETECT_21DB_F 37
82#define AB8500_INT_ACC_DETECT_21DB_R 38
83#define AB8500_INT_GP_SW_ADC_CONV_END 39
84#define AB8500_INT_GPIO6R 40
85#define AB8500_INT_GPIO7R 41
86#define AB8500_INT_GPIO8R 42
87#define AB8500_INT_GPIO9R 43
88#define AB8500_INT_GPIO10R 44
89#define AB8500_INT_GPIO11R 45
90#define AB8500_INT_GPIO12R 46
91#define AB8500_INT_GPIO13R 47
92#define AB8500_INT_GPIO24R 48
93#define AB8500_INT_GPIO25R 49
94#define AB8500_INT_GPIO36R 50
95#define AB8500_INT_GPIO37R 51
96#define AB8500_INT_GPIO38R 52
97#define AB8500_INT_GPIO39R 53
98#define AB8500_INT_GPIO40R 54
99#define AB8500_INT_GPIO41R 55
100#define AB8500_INT_GPIO6F 56
101#define AB8500_INT_GPIO7F 57
102#define AB8500_INT_GPIO8F 58
103#define AB8500_INT_GPIO9F 59
104#define AB8500_INT_GPIO10F 60
105#define AB8500_INT_GPIO11F 61
106#define AB8500_INT_GPIO12F 62
107#define AB8500_INT_GPIO13F 63
108#define AB8500_INT_GPIO24F 64
109#define AB8500_INT_GPIO25F 65
110#define AB8500_INT_GPIO36F 66
111#define AB8500_INT_GPIO37F 67
112#define AB8500_INT_GPIO38F 68
113#define AB8500_INT_GPIO39F 69
114#define AB8500_INT_GPIO40F 70
115#define AB8500_INT_GPIO41F 71
77#define AB8500_INT_ADP_SOURCE_ERROR 72 116#define AB8500_INT_ADP_SOURCE_ERROR 72
78#define AB8500_INT_ADP_SINK_ERROR 73 117#define AB8500_INT_ADP_SINK_ERROR 73
79#define AB8500_INT_ADP_PROBE_PLUG 74 118#define AB8500_INT_ADP_PROBE_PLUG 74
@@ -141,6 +180,7 @@ struct ab8500 {
141 180
142struct regulator_reg_init; 181struct regulator_reg_init;
143struct regulator_init_data; 182struct regulator_init_data;
183struct ab8500_gpio_platform_data;
144 184
145/** 185/**
146 * struct ab8500_platform_data - AB8500 platform data 186 * struct ab8500_platform_data - AB8500 platform data
@@ -158,6 +198,7 @@ struct ab8500_platform_data {
158 struct ab8500_regulator_reg_init *regulator_reg_init; 198 struct ab8500_regulator_reg_init *regulator_reg_init;
159 int num_regulator; 199 int num_regulator;
160 struct regulator_init_data *regulator; 200 struct regulator_init_data *regulator;
201 struct ab8500_gpio_platform_data *gpio;
161}; 202};
162 203
163extern int __devinit ab8500_init(struct ab8500 *ab8500); 204extern int __devinit ab8500_init(struct ab8500 *ab8500);
diff --git a/include/linux/mfd/ab8500/gpio.h b/include/linux/mfd/ab8500/gpio.h
new file mode 100644
index 000000000000..488a8c920a29
--- /dev/null
+++ b/include/linux/mfd/ab8500/gpio.h
@@ -0,0 +1,21 @@
1/*
2 * Copyright ST-Ericsson 2010.
3 *
4 * Author: Bibek Basu <bibek.basu@stericsson.com>
5 * Licensed under GPLv2.
6 */
7
8#ifndef _AB8500_GPIO_H
9#define _AB8500_GPIO_H
10
11/*
12 * Platform data to register a block: only the initial gpio/irq number.
13 */
14
15struct ab8500_gpio_platform_data {
16 int gpio_base;
17 u32 irq_base;
18 u8 config_reg[7];
19};
20
21#endif /* _AB8500_GPIO_H */
diff --git a/include/linux/sonypi.h b/include/linux/sonypi.h
index 0e6dc3891942..c0f87da78f8a 100644
--- a/include/linux/sonypi.h
+++ b/include/linux/sonypi.h
@@ -40,6 +40,7 @@
40 40
41/* events the user application reading /dev/sonypi can use */ 41/* events the user application reading /dev/sonypi can use */
42 42
43#define SONYPI_EVENT_IGNORE 0
43#define SONYPI_EVENT_JOGDIAL_DOWN 1 44#define SONYPI_EVENT_JOGDIAL_DOWN 1
44#define SONYPI_EVENT_JOGDIAL_UP 2 45#define SONYPI_EVENT_JOGDIAL_UP 2
45#define SONYPI_EVENT_JOGDIAL_DOWN_PRESSED 3 46#define SONYPI_EVENT_JOGDIAL_DOWN_PRESSED 3
diff --git a/include/trace/events/btrfs.h b/include/trace/events/btrfs.h
new file mode 100644
index 000000000000..f445cff66ab7
--- /dev/null
+++ b/include/trace/events/btrfs.h
@@ -0,0 +1,667 @@
1#undef TRACE_SYSTEM
2#define TRACE_SYSTEM btrfs
3
4#if !defined(_TRACE_BTRFS_H) || defined(TRACE_HEADER_MULTI_READ)
5#define _TRACE_BTRFS_H
6
7#include <linux/writeback.h>
8#include <linux/tracepoint.h>
9
10struct btrfs_root;
11struct btrfs_fs_info;
12struct btrfs_inode;
13struct extent_map;
14struct btrfs_ordered_extent;
15struct btrfs_delayed_ref_node;
16struct btrfs_delayed_tree_ref;
17struct btrfs_delayed_data_ref;
18struct btrfs_delayed_ref_head;
19struct map_lookup;
20struct extent_buffer;
21
22#define show_ref_type(type) \
23 __print_symbolic(type, \
24 { BTRFS_TREE_BLOCK_REF_KEY, "TREE_BLOCK_REF" }, \
25 { BTRFS_EXTENT_DATA_REF_KEY, "EXTENT_DATA_REF" }, \
26 { BTRFS_EXTENT_REF_V0_KEY, "EXTENT_REF_V0" }, \
27 { BTRFS_SHARED_BLOCK_REF_KEY, "SHARED_BLOCK_REF" }, \
28 { BTRFS_SHARED_DATA_REF_KEY, "SHARED_DATA_REF" })
29
30#define __show_root_type(obj) \
31 __print_symbolic(obj, \
32 { BTRFS_ROOT_TREE_OBJECTID, "ROOT_TREE" }, \
33 { BTRFS_EXTENT_TREE_OBJECTID, "EXTENT_TREE" }, \
34 { BTRFS_CHUNK_TREE_OBJECTID, "CHUNK_TREE" }, \
35 { BTRFS_DEV_TREE_OBJECTID, "DEV_TREE" }, \
36 { BTRFS_FS_TREE_OBJECTID, "FS_TREE" }, \
37 { BTRFS_ROOT_TREE_DIR_OBJECTID, "ROOT_TREE_DIR" }, \
38 { BTRFS_CSUM_TREE_OBJECTID, "CSUM_TREE" }, \
39 { BTRFS_TREE_LOG_OBJECTID, "TREE_LOG" }, \
40 { BTRFS_TREE_RELOC_OBJECTID, "TREE_RELOC" }, \
41 { BTRFS_DATA_RELOC_TREE_OBJECTID, "DATA_RELOC_TREE" })
42
43#define show_root_type(obj) \
44 obj, ((obj >= BTRFS_DATA_RELOC_TREE_OBJECTID) || \
45 (obj <= BTRFS_CSUM_TREE_OBJECTID )) ? __show_root_type(obj) : "-"
46
47TRACE_EVENT(btrfs_transaction_commit,
48
49 TP_PROTO(struct btrfs_root *root),
50
51 TP_ARGS(root),
52
53 TP_STRUCT__entry(
54 __field( u64, generation )
55 __field( u64, root_objectid )
56 ),
57
58 TP_fast_assign(
59 __entry->generation = root->fs_info->generation;
60 __entry->root_objectid = root->root_key.objectid;
61 ),
62
63 TP_printk("root = %llu(%s), gen = %llu",
64 show_root_type(__entry->root_objectid),
65 (unsigned long long)__entry->generation)
66);
67
68DECLARE_EVENT_CLASS(btrfs__inode,
69
70 TP_PROTO(struct inode *inode),
71
72 TP_ARGS(inode),
73
74 TP_STRUCT__entry(
75 __field( ino_t, ino )
76 __field( blkcnt_t, blocks )
77 __field( u64, disk_i_size )
78 __field( u64, generation )
79 __field( u64, last_trans )
80 __field( u64, logged_trans )
81 __field( u64, root_objectid )
82 ),
83
84 TP_fast_assign(
85 __entry->ino = inode->i_ino;
86 __entry->blocks = inode->i_blocks;
87 __entry->disk_i_size = BTRFS_I(inode)->disk_i_size;
88 __entry->generation = BTRFS_I(inode)->generation;
89 __entry->last_trans = BTRFS_I(inode)->last_trans;
90 __entry->logged_trans = BTRFS_I(inode)->logged_trans;
91 __entry->root_objectid =
92 BTRFS_I(inode)->root->root_key.objectid;
93 ),
94
95 TP_printk("root = %llu(%s), gen = %llu, ino = %lu, blocks = %llu, "
96 "disk_i_size = %llu, last_trans = %llu, logged_trans = %llu",
97 show_root_type(__entry->root_objectid),
98 (unsigned long long)__entry->generation,
99 (unsigned long)__entry->ino,
100 (unsigned long long)__entry->blocks,
101 (unsigned long long)__entry->disk_i_size,
102 (unsigned long long)__entry->last_trans,
103 (unsigned long long)__entry->logged_trans)
104);
105
106DEFINE_EVENT(btrfs__inode, btrfs_inode_new,
107
108 TP_PROTO(struct inode *inode),
109
110 TP_ARGS(inode)
111);
112
113DEFINE_EVENT(btrfs__inode, btrfs_inode_request,
114
115 TP_PROTO(struct inode *inode),
116
117 TP_ARGS(inode)
118);
119
120DEFINE_EVENT(btrfs__inode, btrfs_inode_evict,
121
122 TP_PROTO(struct inode *inode),
123
124 TP_ARGS(inode)
125);
126
127#define __show_map_type(type) \
128 __print_symbolic(type, \
129 { EXTENT_MAP_LAST_BYTE, "LAST_BYTE" }, \
130 { EXTENT_MAP_HOLE, "HOLE" }, \
131 { EXTENT_MAP_INLINE, "INLINE" }, \
132 { EXTENT_MAP_DELALLOC, "DELALLOC" })
133
134#define show_map_type(type) \
135 type, (type >= EXTENT_MAP_LAST_BYTE) ? "-" : __show_map_type(type)
136
137#define show_map_flags(flag) \
138 __print_flags(flag, "|", \
139 { EXTENT_FLAG_PINNED, "PINNED" }, \
140 { EXTENT_FLAG_COMPRESSED, "COMPRESSED" }, \
141 { EXTENT_FLAG_VACANCY, "VACANCY" }, \
142 { EXTENT_FLAG_PREALLOC, "PREALLOC" })
143
144TRACE_EVENT(btrfs_get_extent,
145
146 TP_PROTO(struct btrfs_root *root, struct extent_map *map),
147
148 TP_ARGS(root, map),
149
150 TP_STRUCT__entry(
151 __field( u64, root_objectid )
152 __field( u64, start )
153 __field( u64, len )
154 __field( u64, orig_start )
155 __field( u64, block_start )
156 __field( u64, block_len )
157 __field( unsigned long, flags )
158 __field( int, refs )
159 __field( unsigned int, compress_type )
160 ),
161
162 TP_fast_assign(
163 __entry->root_objectid = root->root_key.objectid;
164 __entry->start = map->start;
165 __entry->len = map->len;
166 __entry->orig_start = map->orig_start;
167 __entry->block_start = map->block_start;
168 __entry->block_len = map->block_len;
169 __entry->flags = map->flags;
170 __entry->refs = atomic_read(&map->refs);
171 __entry->compress_type = map->compress_type;
172 ),
173
174 TP_printk("root = %llu(%s), start = %llu, len = %llu, "
175 "orig_start = %llu, block_start = %llu(%s), "
176 "block_len = %llu, flags = %s, refs = %u, "
177 "compress_type = %u",
178 show_root_type(__entry->root_objectid),
179 (unsigned long long)__entry->start,
180 (unsigned long long)__entry->len,
181 (unsigned long long)__entry->orig_start,
182 show_map_type(__entry->block_start),
183 (unsigned long long)__entry->block_len,
184 show_map_flags(__entry->flags),
185 __entry->refs, __entry->compress_type)
186);
187
188#define show_ordered_flags(flags) \
189 __print_symbolic(flags, \
190 { BTRFS_ORDERED_IO_DONE, "IO_DONE" }, \
191 { BTRFS_ORDERED_COMPLETE, "COMPLETE" }, \
192 { BTRFS_ORDERED_NOCOW, "NOCOW" }, \
193 { BTRFS_ORDERED_COMPRESSED, "COMPRESSED" }, \
194 { BTRFS_ORDERED_PREALLOC, "PREALLOC" }, \
195 { BTRFS_ORDERED_DIRECT, "DIRECT" })
196
197DECLARE_EVENT_CLASS(btrfs__ordered_extent,
198
199 TP_PROTO(struct inode *inode, struct btrfs_ordered_extent *ordered),
200
201 TP_ARGS(inode, ordered),
202
203 TP_STRUCT__entry(
204 __field( ino_t, ino )
205 __field( u64, file_offset )
206 __field( u64, start )
207 __field( u64, len )
208 __field( u64, disk_len )
209 __field( u64, bytes_left )
210 __field( unsigned long, flags )
211 __field( int, compress_type )
212 __field( int, refs )
213 __field( u64, root_objectid )
214 ),
215
216 TP_fast_assign(
217 __entry->ino = inode->i_ino;
218 __entry->file_offset = ordered->file_offset;
219 __entry->start = ordered->start;
220 __entry->len = ordered->len;
221 __entry->disk_len = ordered->disk_len;
222 __entry->bytes_left = ordered->bytes_left;
223 __entry->flags = ordered->flags;
224 __entry->compress_type = ordered->compress_type;
225 __entry->refs = atomic_read(&ordered->refs);
226 __entry->root_objectid =
227 BTRFS_I(inode)->root->root_key.objectid;
228 ),
229
230 TP_printk("root = %llu(%s), ino = %llu, file_offset = %llu, "
231 "start = %llu, len = %llu, disk_len = %llu, "
232 "bytes_left = %llu, flags = %s, compress_type = %d, "
233 "refs = %d",
234 show_root_type(__entry->root_objectid),
235 (unsigned long long)__entry->ino,
236 (unsigned long long)__entry->file_offset,
237 (unsigned long long)__entry->start,
238 (unsigned long long)__entry->len,
239 (unsigned long long)__entry->disk_len,
240 (unsigned long long)__entry->bytes_left,
241 show_ordered_flags(__entry->flags),
242 __entry->compress_type, __entry->refs)
243);
244
245DEFINE_EVENT(btrfs__ordered_extent, btrfs_ordered_extent_add,
246
247 TP_PROTO(struct inode *inode, struct btrfs_ordered_extent *ordered),
248
249 TP_ARGS(inode, ordered)
250);
251
252DEFINE_EVENT(btrfs__ordered_extent, btrfs_ordered_extent_remove,
253
254 TP_PROTO(struct inode *inode, struct btrfs_ordered_extent *ordered),
255
256 TP_ARGS(inode, ordered)
257);
258
259DEFINE_EVENT(btrfs__ordered_extent, btrfs_ordered_extent_start,
260
261 TP_PROTO(struct inode *inode, struct btrfs_ordered_extent *ordered),
262
263 TP_ARGS(inode, ordered)
264);
265
266DEFINE_EVENT(btrfs__ordered_extent, btrfs_ordered_extent_put,
267
268 TP_PROTO(struct inode *inode, struct btrfs_ordered_extent *ordered),
269
270 TP_ARGS(inode, ordered)
271);
272
273DECLARE_EVENT_CLASS(btrfs__writepage,
274
275 TP_PROTO(struct page *page, struct inode *inode,
276 struct writeback_control *wbc),
277
278 TP_ARGS(page, inode, wbc),
279
280 TP_STRUCT__entry(
281 __field( ino_t, ino )
282 __field( pgoff_t, index )
283 __field( long, nr_to_write )
284 __field( long, pages_skipped )
285 __field( loff_t, range_start )
286 __field( loff_t, range_end )
287 __field( char, nonblocking )
288 __field( char, for_kupdate )
289 __field( char, for_reclaim )
290 __field( char, range_cyclic )
291 __field( pgoff_t, writeback_index )
292 __field( u64, root_objectid )
293 ),
294
295 TP_fast_assign(
296 __entry->ino = inode->i_ino;
297 __entry->index = page->index;
298 __entry->nr_to_write = wbc->nr_to_write;
299 __entry->pages_skipped = wbc->pages_skipped;
300 __entry->range_start = wbc->range_start;
301 __entry->range_end = wbc->range_end;
302 __entry->nonblocking = wbc->nonblocking;
303 __entry->for_kupdate = wbc->for_kupdate;
304 __entry->for_reclaim = wbc->for_reclaim;
305 __entry->range_cyclic = wbc->range_cyclic;
306 __entry->writeback_index = inode->i_mapping->writeback_index;
307 __entry->root_objectid =
308 BTRFS_I(inode)->root->root_key.objectid;
309 ),
310
311 TP_printk("root = %llu(%s), ino = %lu, page_index = %lu, "
312 "nr_to_write = %ld, pages_skipped = %ld, range_start = %llu, "
313 "range_end = %llu, nonblocking = %d, for_kupdate = %d, "
314 "for_reclaim = %d, range_cyclic = %d, writeback_index = %lu",
315 show_root_type(__entry->root_objectid),
316 (unsigned long)__entry->ino, __entry->index,
317 __entry->nr_to_write, __entry->pages_skipped,
318 __entry->range_start, __entry->range_end,
319 __entry->nonblocking, __entry->for_kupdate,
320 __entry->for_reclaim, __entry->range_cyclic,
321 (unsigned long)__entry->writeback_index)
322);
323
324DEFINE_EVENT(btrfs__writepage, __extent_writepage,
325
326 TP_PROTO(struct page *page, struct inode *inode,
327 struct writeback_control *wbc),
328
329 TP_ARGS(page, inode, wbc)
330);
331
332TRACE_EVENT(btrfs_writepage_end_io_hook,
333
334 TP_PROTO(struct page *page, u64 start, u64 end, int uptodate),
335
336 TP_ARGS(page, start, end, uptodate),
337
338 TP_STRUCT__entry(
339 __field( ino_t, ino )
340 __field( pgoff_t, index )
341 __field( u64, start )
342 __field( u64, end )
343 __field( int, uptodate )
344 __field( u64, root_objectid )
345 ),
346
347 TP_fast_assign(
348 __entry->ino = page->mapping->host->i_ino;
349 __entry->index = page->index;
350 __entry->start = start;
351 __entry->end = end;
352 __entry->uptodate = uptodate;
353 __entry->root_objectid =
354 BTRFS_I(page->mapping->host)->root->root_key.objectid;
355 ),
356
357 TP_printk("root = %llu(%s), ino = %lu, page_index = %lu, start = %llu, "
358 "end = %llu, uptodate = %d",
359 show_root_type(__entry->root_objectid),
360 (unsigned long)__entry->ino, (unsigned long)__entry->index,
361 (unsigned long long)__entry->start,
362 (unsigned long long)__entry->end, __entry->uptodate)
363);
364
365TRACE_EVENT(btrfs_sync_file,
366
367 TP_PROTO(struct file *file, int datasync),
368
369 TP_ARGS(file, datasync),
370
371 TP_STRUCT__entry(
372 __field( ino_t, ino )
373 __field( ino_t, parent )
374 __field( int, datasync )
375 __field( u64, root_objectid )
376 ),
377
378 TP_fast_assign(
379 struct dentry *dentry = file->f_path.dentry;
380 struct inode *inode = dentry->d_inode;
381
382 __entry->ino = inode->i_ino;
383 __entry->parent = dentry->d_parent->d_inode->i_ino;
384 __entry->datasync = datasync;
385 __entry->root_objectid =
386 BTRFS_I(inode)->root->root_key.objectid;
387 ),
388
389 TP_printk("root = %llu(%s), ino = %ld, parent = %ld, datasync = %d",
390 show_root_type(__entry->root_objectid),
391 (unsigned long)__entry->ino, (unsigned long)__entry->parent,
392 __entry->datasync)
393);
394
395TRACE_EVENT(btrfs_sync_fs,
396
397 TP_PROTO(int wait),
398
399 TP_ARGS(wait),
400
401 TP_STRUCT__entry(
402 __field( int, wait )
403 ),
404
405 TP_fast_assign(
406 __entry->wait = wait;
407 ),
408
409 TP_printk("wait = %d", __entry->wait)
410);
411
412#define show_ref_action(action) \
413 __print_symbolic(action, \
414 { BTRFS_ADD_DELAYED_REF, "ADD_DELAYED_REF" }, \
415 { BTRFS_DROP_DELAYED_REF, "DROP_DELAYED_REF" }, \
416 { BTRFS_ADD_DELAYED_EXTENT, "ADD_DELAYED_EXTENT" }, \
417 { BTRFS_UPDATE_DELAYED_HEAD, "UPDATE_DELAYED_HEAD" })
418
419
420TRACE_EVENT(btrfs_delayed_tree_ref,
421
422 TP_PROTO(struct btrfs_delayed_ref_node *ref,
423 struct btrfs_delayed_tree_ref *full_ref,
424 int action),
425
426 TP_ARGS(ref, full_ref, action),
427
428 TP_STRUCT__entry(
429 __field( u64, bytenr )
430 __field( u64, num_bytes )
431 __field( int, action )
432 __field( u64, parent )
433 __field( u64, ref_root )
434 __field( int, level )
435 __field( int, type )
436 ),
437
438 TP_fast_assign(
439 __entry->bytenr = ref->bytenr;
440 __entry->num_bytes = ref->num_bytes;
441 __entry->action = action;
442 __entry->parent = full_ref->parent;
443 __entry->ref_root = full_ref->root;
444 __entry->level = full_ref->level;
445 __entry->type = ref->type;
446 ),
447
448 TP_printk("bytenr = %llu, num_bytes = %llu, action = %s, "
449 "parent = %llu(%s), ref_root = %llu(%s), level = %d, "
450 "type = %s",
451 (unsigned long long)__entry->bytenr,
452 (unsigned long long)__entry->num_bytes,
453 show_ref_action(__entry->action),
454 show_root_type(__entry->parent),
455 show_root_type(__entry->ref_root),
456 __entry->level, show_ref_type(__entry->type))
457);
458
459TRACE_EVENT(btrfs_delayed_data_ref,
460
461 TP_PROTO(struct btrfs_delayed_ref_node *ref,
462 struct btrfs_delayed_data_ref *full_ref,
463 int action),
464
465 TP_ARGS(ref, full_ref, action),
466
467 TP_STRUCT__entry(
468 __field( u64, bytenr )
469 __field( u64, num_bytes )
470 __field( int, action )
471 __field( u64, parent )
472 __field( u64, ref_root )
473 __field( u64, owner )
474 __field( u64, offset )
475 __field( int, type )
476 ),
477
478 TP_fast_assign(
479 __entry->bytenr = ref->bytenr;
480 __entry->num_bytes = ref->num_bytes;
481 __entry->action = action;
482 __entry->parent = full_ref->parent;
483 __entry->ref_root = full_ref->root;
484 __entry->owner = full_ref->objectid;
485 __entry->offset = full_ref->offset;
486 __entry->type = ref->type;
487 ),
488
489 TP_printk("bytenr = %llu, num_bytes = %llu, action = %s, "
490 "parent = %llu(%s), ref_root = %llu(%s), owner = %llu, "
491 "offset = %llu, type = %s",
492 (unsigned long long)__entry->bytenr,
493 (unsigned long long)__entry->num_bytes,
494 show_ref_action(__entry->action),
495 show_root_type(__entry->parent),
496 show_root_type(__entry->ref_root),
497 (unsigned long long)__entry->owner,
498 (unsigned long long)__entry->offset,
499 show_ref_type(__entry->type))
500);
501
502TRACE_EVENT(btrfs_delayed_ref_head,
503
504 TP_PROTO(struct btrfs_delayed_ref_node *ref,
505 struct btrfs_delayed_ref_head *head_ref,
506 int action),
507
508 TP_ARGS(ref, head_ref, action),
509
510 TP_STRUCT__entry(
511 __field( u64, bytenr )
512 __field( u64, num_bytes )
513 __field( int, action )
514 __field( int, is_data )
515 ),
516
517 TP_fast_assign(
518 __entry->bytenr = ref->bytenr;
519 __entry->num_bytes = ref->num_bytes;
520 __entry->action = action;
521 __entry->is_data = head_ref->is_data;
522 ),
523
524 TP_printk("bytenr = %llu, num_bytes = %llu, action = %s, is_data = %d",
525 (unsigned long long)__entry->bytenr,
526 (unsigned long long)__entry->num_bytes,
527 show_ref_action(__entry->action),
528 __entry->is_data)
529);
530
531#define show_chunk_type(type) \
532 __print_flags(type, "|", \
533 { BTRFS_BLOCK_GROUP_DATA, "DATA" }, \
534 { BTRFS_BLOCK_GROUP_SYSTEM, "SYSTEM"}, \
535 { BTRFS_BLOCK_GROUP_METADATA, "METADATA"}, \
536 { BTRFS_BLOCK_GROUP_RAID0, "RAID0" }, \
537 { BTRFS_BLOCK_GROUP_RAID1, "RAID1" }, \
538 { BTRFS_BLOCK_GROUP_DUP, "DUP" }, \
539 { BTRFS_BLOCK_GROUP_RAID10, "RAID10"})
540
541DECLARE_EVENT_CLASS(btrfs__chunk,
542
543 TP_PROTO(struct btrfs_root *root, struct map_lookup *map,
544 u64 offset, u64 size),
545
546 TP_ARGS(root, map, offset, size),
547
548 TP_STRUCT__entry(
549 __field( int, num_stripes )
550 __field( u64, type )
551 __field( int, sub_stripes )
552 __field( u64, offset )
553 __field( u64, size )
554 __field( u64, root_objectid )
555 ),
556
557 TP_fast_assign(
558 __entry->num_stripes = map->num_stripes;
559 __entry->type = map->type;
560 __entry->sub_stripes = map->sub_stripes;
561 __entry->offset = offset;
562 __entry->size = size;
563 __entry->root_objectid = root->root_key.objectid;
564 ),
565
566 TP_printk("root = %llu(%s), offset = %llu, size = %llu, "
567 "num_stripes = %d, sub_stripes = %d, type = %s",
568 show_root_type(__entry->root_objectid),
569 (unsigned long long)__entry->offset,
570 (unsigned long long)__entry->size,
571 __entry->num_stripes, __entry->sub_stripes,
572 show_chunk_type(__entry->type))
573);
574
575DEFINE_EVENT(btrfs__chunk, btrfs_chunk_alloc,
576
577 TP_PROTO(struct btrfs_root *root, struct map_lookup *map,
578 u64 offset, u64 size),
579
580 TP_ARGS(root, map, offset, size)
581);
582
583DEFINE_EVENT(btrfs__chunk, btrfs_chunk_free,
584
585 TP_PROTO(struct btrfs_root *root, struct map_lookup *map,
586 u64 offset, u64 size),
587
588 TP_ARGS(root, map, offset, size)
589);
590
591TRACE_EVENT(btrfs_cow_block,
592
593 TP_PROTO(struct btrfs_root *root, struct extent_buffer *buf,
594 struct extent_buffer *cow),
595
596 TP_ARGS(root, buf, cow),
597
598 TP_STRUCT__entry(
599 __field( u64, root_objectid )
600 __field( u64, buf_start )
601 __field( int, refs )
602 __field( u64, cow_start )
603 __field( int, buf_level )
604 __field( int, cow_level )
605 ),
606
607 TP_fast_assign(
608 __entry->root_objectid = root->root_key.objectid;
609 __entry->buf_start = buf->start;
610 __entry->refs = atomic_read(&buf->refs);
611 __entry->cow_start = cow->start;
612 __entry->buf_level = btrfs_header_level(buf);
613 __entry->cow_level = btrfs_header_level(cow);
614 ),
615
616 TP_printk("root = %llu(%s), refs = %d, orig_buf = %llu "
617 "(orig_level = %d), cow_buf = %llu (cow_level = %d)",
618 show_root_type(__entry->root_objectid),
619 __entry->refs,
620 (unsigned long long)__entry->buf_start,
621 __entry->buf_level,
622 (unsigned long long)__entry->cow_start,
623 __entry->cow_level)
624);
625
626DECLARE_EVENT_CLASS(btrfs__reserved_extent,
627
628 TP_PROTO(struct btrfs_root *root, u64 start, u64 len),
629
630 TP_ARGS(root, start, len),
631
632 TP_STRUCT__entry(
633 __field( u64, root_objectid )
634 __field( u64, start )
635 __field( u64, len )
636 ),
637
638 TP_fast_assign(
639 __entry->root_objectid = root->root_key.objectid;
640 __entry->start = start;
641 __entry->len = len;
642 ),
643
644 TP_printk("root = %llu(%s), start = %llu, len = %llu",
645 show_root_type(__entry->root_objectid),
646 (unsigned long long)__entry->start,
647 (unsigned long long)__entry->len)
648);
649
650DEFINE_EVENT(btrfs__reserved_extent, btrfs_reserved_extent_alloc,
651
652 TP_PROTO(struct btrfs_root *root, u64 start, u64 len),
653
654 TP_ARGS(root, start, len)
655);
656
657DEFINE_EVENT(btrfs__reserved_extent, btrfs_reserved_extent_free,
658
659 TP_PROTO(struct btrfs_root *root, u64 start, u64 len),
660
661 TP_ARGS(root, start, len)
662);
663
664#endif /* _TRACE_BTRFS_H */
665
666/* This part must be outside protection */
667#include <trace/define_trace.h>
diff --git a/kernel/irq/Kconfig b/kernel/irq/Kconfig
index 00f2c037267a..72606ba10b14 100644
--- a/kernel/irq/Kconfig
+++ b/kernel/irq/Kconfig
@@ -51,6 +51,10 @@ config HARDIRQS_SW_RESEND
51config IRQ_PREFLOW_FASTEOI 51config IRQ_PREFLOW_FASTEOI
52 bool 52 bool
53 53
54# Edge style eoi based handler (cell)
55config IRQ_EDGE_EOI_HANDLER
56 bool
57
54# Support forced irq threading 58# Support forced irq threading
55config IRQ_FORCED_THREADING 59config IRQ_FORCED_THREADING
56 bool 60 bool
diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c
index c9c0601f0615..03099d521f5e 100644
--- a/kernel/irq/chip.c
+++ b/kernel/irq/chip.c
@@ -37,6 +37,12 @@ int irq_set_chip(unsigned int irq, struct irq_chip *chip)
37 irq_chip_set_defaults(chip); 37 irq_chip_set_defaults(chip);
38 desc->irq_data.chip = chip; 38 desc->irq_data.chip = chip;
39 irq_put_desc_unlock(desc, flags); 39 irq_put_desc_unlock(desc, flags);
40 /*
41 * For !CONFIG_SPARSE_IRQ make the irq show up in
42 * allocated_irqs. For the CONFIG_SPARSE_IRQ case, it is
43 * already marked, and this call is harmless.
44 */
45 irq_reserve_irq(irq);
40 return 0; 46 return 0;
41} 47}
42EXPORT_SYMBOL(irq_set_chip); 48EXPORT_SYMBOL(irq_set_chip);
@@ -134,25 +140,25 @@ EXPORT_SYMBOL_GPL(irq_get_irq_data);
134 140
135static void irq_state_clr_disabled(struct irq_desc *desc) 141static void irq_state_clr_disabled(struct irq_desc *desc)
136{ 142{
137 desc->istate &= ~IRQS_DISABLED; 143 irqd_clear(&desc->irq_data, IRQD_IRQ_DISABLED);
138 irq_compat_clr_disabled(desc); 144 irq_compat_clr_disabled(desc);
139} 145}
140 146
141static void irq_state_set_disabled(struct irq_desc *desc) 147static void irq_state_set_disabled(struct irq_desc *desc)
142{ 148{
143 desc->istate |= IRQS_DISABLED; 149 irqd_set(&desc->irq_data, IRQD_IRQ_DISABLED);
144 irq_compat_set_disabled(desc); 150 irq_compat_set_disabled(desc);
145} 151}
146 152
147static void irq_state_clr_masked(struct irq_desc *desc) 153static void irq_state_clr_masked(struct irq_desc *desc)
148{ 154{
149 desc->istate &= ~IRQS_MASKED; 155 irqd_clear(&desc->irq_data, IRQD_IRQ_MASKED);
150 irq_compat_clr_masked(desc); 156 irq_compat_clr_masked(desc);
151} 157}
152 158
153static void irq_state_set_masked(struct irq_desc *desc) 159static void irq_state_set_masked(struct irq_desc *desc)
154{ 160{
155 desc->istate |= IRQS_MASKED; 161 irqd_set(&desc->irq_data, IRQD_IRQ_MASKED);
156 irq_compat_set_masked(desc); 162 irq_compat_set_masked(desc);
157} 163}
158 164
@@ -372,11 +378,11 @@ void handle_nested_irq(unsigned int irq)
372 kstat_incr_irqs_this_cpu(irq, desc); 378 kstat_incr_irqs_this_cpu(irq, desc);
373 379
374 action = desc->action; 380 action = desc->action;
375 if (unlikely(!action || (desc->istate & IRQS_DISABLED))) 381 if (unlikely(!action || irqd_irq_disabled(&desc->irq_data)))
376 goto out_unlock; 382 goto out_unlock;
377 383
378 irq_compat_set_progress(desc); 384 irq_compat_set_progress(desc);
379 desc->istate |= IRQS_INPROGRESS; 385 irqd_set(&desc->irq_data, IRQD_IRQ_INPROGRESS);
380 raw_spin_unlock_irq(&desc->lock); 386 raw_spin_unlock_irq(&desc->lock);
381 387
382 action_ret = action->thread_fn(action->irq, action->dev_id); 388 action_ret = action->thread_fn(action->irq, action->dev_id);
@@ -384,7 +390,7 @@ void handle_nested_irq(unsigned int irq)
384 note_interrupt(irq, desc, action_ret); 390 note_interrupt(irq, desc, action_ret);
385 391
386 raw_spin_lock_irq(&desc->lock); 392 raw_spin_lock_irq(&desc->lock);
387 desc->istate &= ~IRQS_INPROGRESS; 393 irqd_clear(&desc->irq_data, IRQD_IRQ_INPROGRESS);
388 irq_compat_clr_progress(desc); 394 irq_compat_clr_progress(desc);
389 395
390out_unlock: 396out_unlock:
@@ -416,14 +422,14 @@ handle_simple_irq(unsigned int irq, struct irq_desc *desc)
416{ 422{
417 raw_spin_lock(&desc->lock); 423 raw_spin_lock(&desc->lock);
418 424
419 if (unlikely(desc->istate & IRQS_INPROGRESS)) 425 if (unlikely(irqd_irq_inprogress(&desc->irq_data)))
420 if (!irq_check_poll(desc)) 426 if (!irq_check_poll(desc))
421 goto out_unlock; 427 goto out_unlock;
422 428
423 desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); 429 desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
424 kstat_incr_irqs_this_cpu(irq, desc); 430 kstat_incr_irqs_this_cpu(irq, desc);
425 431
426 if (unlikely(!desc->action || (desc->istate & IRQS_DISABLED))) 432 if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data)))
427 goto out_unlock; 433 goto out_unlock;
428 434
429 handle_irq_event(desc); 435 handle_irq_event(desc);
@@ -448,7 +454,7 @@ handle_level_irq(unsigned int irq, struct irq_desc *desc)
448 raw_spin_lock(&desc->lock); 454 raw_spin_lock(&desc->lock);
449 mask_ack_irq(desc); 455 mask_ack_irq(desc);
450 456
451 if (unlikely(desc->istate & IRQS_INPROGRESS)) 457 if (unlikely(irqd_irq_inprogress(&desc->irq_data)))
452 if (!irq_check_poll(desc)) 458 if (!irq_check_poll(desc))
453 goto out_unlock; 459 goto out_unlock;
454 460
@@ -459,12 +465,12 @@ handle_level_irq(unsigned int irq, struct irq_desc *desc)
459 * If its disabled or no action available 465 * If its disabled or no action available
460 * keep it masked and get out of here 466 * keep it masked and get out of here
461 */ 467 */
462 if (unlikely(!desc->action || (desc->istate & IRQS_DISABLED))) 468 if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data)))
463 goto out_unlock; 469 goto out_unlock;
464 470
465 handle_irq_event(desc); 471 handle_irq_event(desc);
466 472
467 if (!(desc->istate & (IRQS_DISABLED | IRQS_ONESHOT))) 473 if (!irqd_irq_disabled(&desc->irq_data) && !(desc->istate & IRQS_ONESHOT))
468 unmask_irq(desc); 474 unmask_irq(desc);
469out_unlock: 475out_unlock:
470 raw_spin_unlock(&desc->lock); 476 raw_spin_unlock(&desc->lock);
@@ -496,7 +502,7 @@ handle_fasteoi_irq(unsigned int irq, struct irq_desc *desc)
496{ 502{
497 raw_spin_lock(&desc->lock); 503 raw_spin_lock(&desc->lock);
498 504
499 if (unlikely(desc->istate & IRQS_INPROGRESS)) 505 if (unlikely(irqd_irq_inprogress(&desc->irq_data)))
500 if (!irq_check_poll(desc)) 506 if (!irq_check_poll(desc))
501 goto out; 507 goto out;
502 508
@@ -507,7 +513,7 @@ handle_fasteoi_irq(unsigned int irq, struct irq_desc *desc)
507 * If its disabled or no action available 513 * If its disabled or no action available
508 * then mask it and get out of here: 514 * then mask it and get out of here:
509 */ 515 */
510 if (unlikely(!desc->action || (desc->istate & IRQS_DISABLED))) { 516 if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) {
511 irq_compat_set_pending(desc); 517 irq_compat_set_pending(desc);
512 desc->istate |= IRQS_PENDING; 518 desc->istate |= IRQS_PENDING;
513 mask_irq(desc); 519 mask_irq(desc);
@@ -558,8 +564,8 @@ handle_edge_irq(unsigned int irq, struct irq_desc *desc)
558 * we shouldn't process the IRQ. Mark it pending, handle 564 * we shouldn't process the IRQ. Mark it pending, handle
559 * the necessary masking and go out 565 * the necessary masking and go out
560 */ 566 */
561 if (unlikely((desc->istate & (IRQS_DISABLED | IRQS_INPROGRESS) || 567 if (unlikely(irqd_irq_disabled(&desc->irq_data) ||
562 !desc->action))) { 568 irqd_irq_inprogress(&desc->irq_data) || !desc->action)) {
563 if (!irq_check_poll(desc)) { 569 if (!irq_check_poll(desc)) {
564 irq_compat_set_pending(desc); 570 irq_compat_set_pending(desc);
565 desc->istate |= IRQS_PENDING; 571 desc->istate |= IRQS_PENDING;
@@ -584,20 +590,65 @@ handle_edge_irq(unsigned int irq, struct irq_desc *desc)
584 * Renable it, if it was not disabled in meantime. 590 * Renable it, if it was not disabled in meantime.
585 */ 591 */
586 if (unlikely(desc->istate & IRQS_PENDING)) { 592 if (unlikely(desc->istate & IRQS_PENDING)) {
587 if (!(desc->istate & IRQS_DISABLED) && 593 if (!irqd_irq_disabled(&desc->irq_data) &&
588 (desc->istate & IRQS_MASKED)) 594 irqd_irq_masked(&desc->irq_data))
589 unmask_irq(desc); 595 unmask_irq(desc);
590 } 596 }
591 597
592 handle_irq_event(desc); 598 handle_irq_event(desc);
593 599
594 } while ((desc->istate & IRQS_PENDING) && 600 } while ((desc->istate & IRQS_PENDING) &&
595 !(desc->istate & IRQS_DISABLED)); 601 !irqd_irq_disabled(&desc->irq_data));
596 602
597out_unlock: 603out_unlock:
598 raw_spin_unlock(&desc->lock); 604 raw_spin_unlock(&desc->lock);
599} 605}
600 606
607#ifdef CONFIG_IRQ_EDGE_EOI_HANDLER
608/**
609 * handle_edge_eoi_irq - edge eoi type IRQ handler
610 * @irq: the interrupt number
611 * @desc: the interrupt description structure for this irq
612 *
613 * Similar as the above handle_edge_irq, but using eoi and w/o the
614 * mask/unmask logic.
615 */
616void handle_edge_eoi_irq(unsigned int irq, struct irq_desc *desc)
617{
618 struct irq_chip *chip = irq_desc_get_chip(desc);
619
620 raw_spin_lock(&desc->lock);
621
622 desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
623 /*
624 * If we're currently running this IRQ, or its disabled,
625 * we shouldn't process the IRQ. Mark it pending, handle
626 * the necessary masking and go out
627 */
628 if (unlikely(irqd_irq_disabled(&desc->irq_data) ||
629 irqd_irq_inprogress(&desc->irq_data) || !desc->action)) {
630 if (!irq_check_poll(desc)) {
631 desc->istate |= IRQS_PENDING;
632 goto out_eoi;
633 }
634 }
635 kstat_incr_irqs_this_cpu(irq, desc);
636
637 do {
638 if (unlikely(!desc->action))
639 goto out_eoi;
640
641 handle_irq_event(desc);
642
643 } while ((desc->istate & IRQS_PENDING) &&
644 !irqd_irq_disabled(&desc->irq_data));
645
646out_unlock:
647 chip->irq_eoi(&desc->irq_data);
648 raw_spin_unlock(&desc->lock);
649}
650#endif
651
601/** 652/**
602 * handle_percpu_irq - Per CPU local irq handler 653 * handle_percpu_irq - Per CPU local irq handler
603 * @irq: the interrupt number 654 * @irq: the interrupt number
@@ -642,8 +693,7 @@ __irq_set_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained,
642 if (handle == handle_bad_irq) { 693 if (handle == handle_bad_irq) {
643 if (desc->irq_data.chip != &no_irq_chip) 694 if (desc->irq_data.chip != &no_irq_chip)
644 mask_ack_irq(desc); 695 mask_ack_irq(desc);
645 irq_compat_set_disabled(desc); 696 irq_state_set_disabled(desc);
646 desc->istate |= IRQS_DISABLED;
647 desc->depth = 1; 697 desc->depth = 1;
648 } 698 }
649 desc->handle_irq = handle; 699 desc->handle_irq = handle;
@@ -684,8 +734,70 @@ void irq_modify_status(unsigned int irq, unsigned long clr, unsigned long set)
684 irqd_set(&desc->irq_data, IRQD_PER_CPU); 734 irqd_set(&desc->irq_data, IRQD_PER_CPU);
685 if (irq_settings_can_move_pcntxt(desc)) 735 if (irq_settings_can_move_pcntxt(desc))
686 irqd_set(&desc->irq_data, IRQD_MOVE_PCNTXT); 736 irqd_set(&desc->irq_data, IRQD_MOVE_PCNTXT);
737 if (irq_settings_is_level(desc))
738 irqd_set(&desc->irq_data, IRQD_LEVEL);
687 739
688 irqd_set(&desc->irq_data, irq_settings_get_trigger_mask(desc)); 740 irqd_set(&desc->irq_data, irq_settings_get_trigger_mask(desc));
689 741
690 irq_put_desc_unlock(desc, flags); 742 irq_put_desc_unlock(desc, flags);
691} 743}
744
745/**
746 * irq_cpu_online - Invoke all irq_cpu_online functions.
747 *
748 * Iterate through all irqs and invoke the chip.irq_cpu_online()
749 * for each.
750 */
751void irq_cpu_online(void)
752{
753 struct irq_desc *desc;
754 struct irq_chip *chip;
755 unsigned long flags;
756 unsigned int irq;
757
758 for_each_active_irq(irq) {
759 desc = irq_to_desc(irq);
760 if (!desc)
761 continue;
762
763 raw_spin_lock_irqsave(&desc->lock, flags);
764
765 chip = irq_data_get_irq_chip(&desc->irq_data);
766 if (chip && chip->irq_cpu_online &&
767 (!(chip->flags & IRQCHIP_ONOFFLINE_ENABLED) ||
768 !irqd_irq_disabled(&desc->irq_data)))
769 chip->irq_cpu_online(&desc->irq_data);
770
771 raw_spin_unlock_irqrestore(&desc->lock, flags);
772 }
773}
774
775/**
776 * irq_cpu_offline - Invoke all irq_cpu_offline functions.
777 *
778 * Iterate through all irqs and invoke the chip.irq_cpu_offline()
779 * for each.
780 */
781void irq_cpu_offline(void)
782{
783 struct irq_desc *desc;
784 struct irq_chip *chip;
785 unsigned long flags;
786 unsigned int irq;
787
788 for_each_active_irq(irq) {
789 desc = irq_to_desc(irq);
790 if (!desc)
791 continue;
792
793 raw_spin_lock_irqsave(&desc->lock, flags);
794
795 chip = irq_data_get_irq_chip(&desc->irq_data);
796 if (chip && chip->irq_cpu_offline &&
797 (!(chip->flags & IRQCHIP_ONOFFLINE_ENABLED) ||
798 !irqd_irq_disabled(&desc->irq_data)))
799 chip->irq_cpu_offline(&desc->irq_data);
800
801 raw_spin_unlock_irqrestore(&desc->lock, flags);
802 }
803}
diff --git a/kernel/irq/debug.h b/kernel/irq/debug.h
index d1a33b7fa61d..a0bd875ba3d5 100644
--- a/kernel/irq/debug.h
+++ b/kernel/irq/debug.h
@@ -6,6 +6,8 @@
6 6
7#define P(f) if (desc->status & f) printk("%14s set\n", #f) 7#define P(f) if (desc->status & f) printk("%14s set\n", #f)
8#define PS(f) if (desc->istate & f) printk("%14s set\n", #f) 8#define PS(f) if (desc->istate & f) printk("%14s set\n", #f)
9/* FIXME */
10#define PD(f) do { } while (0)
9 11
10static inline void print_irq_desc(unsigned int irq, struct irq_desc *desc) 12static inline void print_irq_desc(unsigned int irq, struct irq_desc *desc)
11{ 13{
@@ -28,13 +30,15 @@ static inline void print_irq_desc(unsigned int irq, struct irq_desc *desc)
28 P(IRQ_NOAUTOEN); 30 P(IRQ_NOAUTOEN);
29 31
30 PS(IRQS_AUTODETECT); 32 PS(IRQS_AUTODETECT);
31 PS(IRQS_INPROGRESS);
32 PS(IRQS_REPLAY); 33 PS(IRQS_REPLAY);
33 PS(IRQS_WAITING); 34 PS(IRQS_WAITING);
34 PS(IRQS_DISABLED);
35 PS(IRQS_PENDING); 35 PS(IRQS_PENDING);
36 PS(IRQS_MASKED); 36
37 PD(IRQS_INPROGRESS);
38 PD(IRQS_DISABLED);
39 PD(IRQS_MASKED);
37} 40}
38 41
39#undef P 42#undef P
40#undef PS 43#undef PS
44#undef PD
diff --git a/kernel/irq/handle.c b/kernel/irq/handle.c
index 517561fc7317..1a2fb77f2fd6 100644
--- a/kernel/irq/handle.c
+++ b/kernel/irq/handle.c
@@ -178,25 +178,13 @@ irqreturn_t handle_irq_event(struct irq_desc *desc)
178 irq_compat_clr_pending(desc); 178 irq_compat_clr_pending(desc);
179 desc->istate &= ~IRQS_PENDING; 179 desc->istate &= ~IRQS_PENDING;
180 irq_compat_set_progress(desc); 180 irq_compat_set_progress(desc);
181 desc->istate |= IRQS_INPROGRESS; 181 irqd_set(&desc->irq_data, IRQD_IRQ_INPROGRESS);
182 raw_spin_unlock(&desc->lock); 182 raw_spin_unlock(&desc->lock);
183 183
184 ret = handle_irq_event_percpu(desc, action); 184 ret = handle_irq_event_percpu(desc, action);
185 185
186 raw_spin_lock(&desc->lock); 186 raw_spin_lock(&desc->lock);
187 desc->istate &= ~IRQS_INPROGRESS; 187 irqd_clear(&desc->irq_data, IRQD_IRQ_INPROGRESS);
188 irq_compat_clr_progress(desc); 188 irq_compat_clr_progress(desc);
189 return ret; 189 return ret;
190} 190}
191
192/**
193 * handle_IRQ_event - irq action chain handler
194 * @irq: the interrupt number
195 * @action: the interrupt action chain for this irq
196 *
197 * Handles the action chain of an irq event
198 */
199irqreturn_t handle_IRQ_event(unsigned int irq, struct irqaction *action)
200{
201 return handle_irq_event_percpu(irq_to_desc(irq), action);
202}
diff --git a/kernel/irq/internals.h b/kernel/irq/internals.h
index 6c6ec9a49027..6b8b9713e28d 100644
--- a/kernel/irq/internals.h
+++ b/kernel/irq/internals.h
@@ -44,26 +44,20 @@ enum {
44 * IRQS_SPURIOUS_DISABLED - was disabled due to spurious interrupt 44 * IRQS_SPURIOUS_DISABLED - was disabled due to spurious interrupt
45 * detection 45 * detection
46 * IRQS_POLL_INPROGRESS - polling in progress 46 * IRQS_POLL_INPROGRESS - polling in progress
47 * IRQS_INPROGRESS - Interrupt in progress
48 * IRQS_ONESHOT - irq is not unmasked in primary handler 47 * IRQS_ONESHOT - irq is not unmasked in primary handler
49 * IRQS_REPLAY - irq is replayed 48 * IRQS_REPLAY - irq is replayed
50 * IRQS_WAITING - irq is waiting 49 * IRQS_WAITING - irq is waiting
51 * IRQS_DISABLED - irq is disabled
52 * IRQS_PENDING - irq is pending and replayed later 50 * IRQS_PENDING - irq is pending and replayed later
53 * IRQS_MASKED - irq is masked
54 * IRQS_SUSPENDED - irq is suspended 51 * IRQS_SUSPENDED - irq is suspended
55 */ 52 */
56enum { 53enum {
57 IRQS_AUTODETECT = 0x00000001, 54 IRQS_AUTODETECT = 0x00000001,
58 IRQS_SPURIOUS_DISABLED = 0x00000002, 55 IRQS_SPURIOUS_DISABLED = 0x00000002,
59 IRQS_POLL_INPROGRESS = 0x00000008, 56 IRQS_POLL_INPROGRESS = 0x00000008,
60 IRQS_INPROGRESS = 0x00000010,
61 IRQS_ONESHOT = 0x00000020, 57 IRQS_ONESHOT = 0x00000020,
62 IRQS_REPLAY = 0x00000040, 58 IRQS_REPLAY = 0x00000040,
63 IRQS_WAITING = 0x00000080, 59 IRQS_WAITING = 0x00000080,
64 IRQS_DISABLED = 0x00000100,
65 IRQS_PENDING = 0x00000200, 60 IRQS_PENDING = 0x00000200,
66 IRQS_MASKED = 0x00000400,
67 IRQS_SUSPENDED = 0x00000800, 61 IRQS_SUSPENDED = 0x00000800,
68}; 62};
69 63
diff --git a/kernel/irq/irqdesc.c b/kernel/irq/irqdesc.c
index 6fb014f172f7..2c039c9b9383 100644
--- a/kernel/irq/irqdesc.c
+++ b/kernel/irq/irqdesc.c
@@ -80,7 +80,7 @@ static void desc_set_defaults(unsigned int irq, struct irq_desc *desc, int node)
80 desc->irq_data.handler_data = NULL; 80 desc->irq_data.handler_data = NULL;
81 desc->irq_data.msi_desc = NULL; 81 desc->irq_data.msi_desc = NULL;
82 irq_settings_clr_and_set(desc, ~0, _IRQ_DEFAULT_INIT_FLAGS); 82 irq_settings_clr_and_set(desc, ~0, _IRQ_DEFAULT_INIT_FLAGS);
83 desc->istate = IRQS_DISABLED; 83 irqd_set(&desc->irq_data, IRQD_IRQ_DISABLED);
84 desc->handle_irq = handle_bad_irq; 84 desc->handle_irq = handle_bad_irq;
85 desc->depth = 1; 85 desc->depth = 1;
86 desc->irq_count = 0; 86 desc->irq_count = 0;
@@ -238,7 +238,6 @@ int __init early_irq_init(void)
238 238
239struct irq_desc irq_desc[NR_IRQS] __cacheline_aligned_in_smp = { 239struct irq_desc irq_desc[NR_IRQS] __cacheline_aligned_in_smp = {
240 [0 ... NR_IRQS-1] = { 240 [0 ... NR_IRQS-1] = {
241 .istate = IRQS_DISABLED,
242 .handle_irq = handle_bad_irq, 241 .handle_irq = handle_bad_irq,
243 .depth = 1, 242 .depth = 1,
244 .lock = __RAW_SPIN_LOCK_UNLOCKED(irq_desc->lock), 243 .lock = __RAW_SPIN_LOCK_UNLOCKED(irq_desc->lock),
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
index 0a2aa73e536c..acf540768b8f 100644
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -41,7 +41,7 @@ early_param("threadirqs", setup_forced_irqthreads);
41void synchronize_irq(unsigned int irq) 41void synchronize_irq(unsigned int irq)
42{ 42{
43 struct irq_desc *desc = irq_to_desc(irq); 43 struct irq_desc *desc = irq_to_desc(irq);
44 unsigned int state; 44 bool inprogress;
45 45
46 if (!desc) 46 if (!desc)
47 return; 47 return;
@@ -53,16 +53,16 @@ void synchronize_irq(unsigned int irq)
53 * Wait until we're out of the critical section. This might 53 * Wait until we're out of the critical section. This might
54 * give the wrong answer due to the lack of memory barriers. 54 * give the wrong answer due to the lack of memory barriers.
55 */ 55 */
56 while (desc->istate & IRQS_INPROGRESS) 56 while (irqd_irq_inprogress(&desc->irq_data))
57 cpu_relax(); 57 cpu_relax();
58 58
59 /* Ok, that indicated we're done: double-check carefully. */ 59 /* Ok, that indicated we're done: double-check carefully. */
60 raw_spin_lock_irqsave(&desc->lock, flags); 60 raw_spin_lock_irqsave(&desc->lock, flags);
61 state = desc->istate; 61 inprogress = irqd_irq_inprogress(&desc->irq_data);
62 raw_spin_unlock_irqrestore(&desc->lock, flags); 62 raw_spin_unlock_irqrestore(&desc->lock, flags);
63 63
64 /* Oops, that failed? */ 64 /* Oops, that failed? */
65 } while (state & IRQS_INPROGRESS); 65 } while (inprogress);
66 66
67 /* 67 /*
68 * We made sure that no hardirq handler is running. Now verify 68 * We made sure that no hardirq handler is running. Now verify
@@ -112,13 +112,13 @@ void irq_set_thread_affinity(struct irq_desc *desc)
112} 112}
113 113
114#ifdef CONFIG_GENERIC_PENDING_IRQ 114#ifdef CONFIG_GENERIC_PENDING_IRQ
115static inline bool irq_can_move_pcntxt(struct irq_desc *desc) 115static inline bool irq_can_move_pcntxt(struct irq_data *data)
116{ 116{
117 return irq_settings_can_move_pcntxt(desc); 117 return irqd_can_move_in_process_context(data);
118} 118}
119static inline bool irq_move_pending(struct irq_desc *desc) 119static inline bool irq_move_pending(struct irq_data *data)
120{ 120{
121 return irqd_is_setaffinity_pending(&desc->irq_data); 121 return irqd_is_setaffinity_pending(data);
122} 122}
123static inline void 123static inline void
124irq_copy_pending(struct irq_desc *desc, const struct cpumask *mask) 124irq_copy_pending(struct irq_desc *desc, const struct cpumask *mask)
@@ -131,43 +131,34 @@ irq_get_pending(struct cpumask *mask, struct irq_desc *desc)
131 cpumask_copy(mask, desc->pending_mask); 131 cpumask_copy(mask, desc->pending_mask);
132} 132}
133#else 133#else
134static inline bool irq_can_move_pcntxt(struct irq_desc *desc) { return true; } 134static inline bool irq_can_move_pcntxt(struct irq_data *data) { return true; }
135static inline bool irq_move_pending(struct irq_desc *desc) { return false; } 135static inline bool irq_move_pending(struct irq_desc *data) { return false; }
136static inline void 136static inline void
137irq_copy_pending(struct irq_desc *desc, const struct cpumask *mask) { } 137irq_copy_pending(struct irq_desc *desc, const struct cpumask *mask) { }
138static inline void 138static inline void
139irq_get_pending(struct cpumask *mask, struct irq_desc *desc) { } 139irq_get_pending(struct cpumask *mask, struct irq_desc *desc) { }
140#endif 140#endif
141 141
142/** 142int __irq_set_affinity_locked(struct irq_data *data, const struct cpumask *mask)
143 * irq_set_affinity - Set the irq affinity of a given irq
144 * @irq: Interrupt to set affinity
145 * @cpumask: cpumask
146 *
147 */
148int irq_set_affinity(unsigned int irq, const struct cpumask *mask)
149{ 143{
150 struct irq_desc *desc = irq_to_desc(irq); 144 struct irq_chip *chip = irq_data_get_irq_chip(data);
151 struct irq_chip *chip = desc->irq_data.chip; 145 struct irq_desc *desc = irq_data_to_desc(data);
152 unsigned long flags;
153 int ret = 0; 146 int ret = 0;
154 147
155 if (!chip->irq_set_affinity) 148 if (!chip || !chip->irq_set_affinity)
156 return -EINVAL; 149 return -EINVAL;
157 150
158 raw_spin_lock_irqsave(&desc->lock, flags); 151 if (irq_can_move_pcntxt(data)) {
159 152 ret = chip->irq_set_affinity(data, mask, false);
160 if (irq_can_move_pcntxt(desc)) {
161 ret = chip->irq_set_affinity(&desc->irq_data, mask, false);
162 switch (ret) { 153 switch (ret) {
163 case IRQ_SET_MASK_OK: 154 case IRQ_SET_MASK_OK:
164 cpumask_copy(desc->irq_data.affinity, mask); 155 cpumask_copy(data->affinity, mask);
165 case IRQ_SET_MASK_OK_NOCOPY: 156 case IRQ_SET_MASK_OK_NOCOPY:
166 irq_set_thread_affinity(desc); 157 irq_set_thread_affinity(desc);
167 ret = 0; 158 ret = 0;
168 } 159 }
169 } else { 160 } else {
170 irqd_set_move_pending(&desc->irq_data); 161 irqd_set_move_pending(data);
171 irq_copy_pending(desc, mask); 162 irq_copy_pending(desc, mask);
172 } 163 }
173 164
@@ -176,7 +167,28 @@ int irq_set_affinity(unsigned int irq, const struct cpumask *mask)
176 schedule_work(&desc->affinity_notify->work); 167 schedule_work(&desc->affinity_notify->work);
177 } 168 }
178 irq_compat_set_affinity(desc); 169 irq_compat_set_affinity(desc);
179 irqd_set(&desc->irq_data, IRQD_AFFINITY_SET); 170 irqd_set(data, IRQD_AFFINITY_SET);
171
172 return ret;
173}
174
175/**
176 * irq_set_affinity - Set the irq affinity of a given irq
177 * @irq: Interrupt to set affinity
178 * @mask: cpumask
179 *
180 */
181int irq_set_affinity(unsigned int irq, const struct cpumask *mask)
182{
183 struct irq_desc *desc = irq_to_desc(irq);
184 unsigned long flags;
185 int ret;
186
187 if (!desc)
188 return -EINVAL;
189
190 raw_spin_lock_irqsave(&desc->lock, flags);
191 ret = __irq_set_affinity_locked(irq_desc_get_irq_data(desc), mask);
180 raw_spin_unlock_irqrestore(&desc->lock, flags); 192 raw_spin_unlock_irqrestore(&desc->lock, flags);
181 return ret; 193 return ret;
182} 194}
@@ -206,7 +218,7 @@ static void irq_affinity_notify(struct work_struct *work)
206 goto out; 218 goto out;
207 219
208 raw_spin_lock_irqsave(&desc->lock, flags); 220 raw_spin_lock_irqsave(&desc->lock, flags);
209 if (irq_move_pending(desc)) 221 if (irq_move_pending(&desc->irq_data))
210 irq_get_pending(cpumask, desc); 222 irq_get_pending(cpumask, desc);
211 else 223 else
212 cpumask_copy(cpumask, desc->irq_data.affinity); 224 cpumask_copy(cpumask, desc->irq_data.affinity);
@@ -551,9 +563,9 @@ int __irq_set_trigger(struct irq_desc *desc, unsigned int irq,
551 flags &= IRQ_TYPE_SENSE_MASK; 563 flags &= IRQ_TYPE_SENSE_MASK;
552 564
553 if (chip->flags & IRQCHIP_SET_TYPE_MASKED) { 565 if (chip->flags & IRQCHIP_SET_TYPE_MASKED) {
554 if (!(desc->istate & IRQS_MASKED)) 566 if (!irqd_irq_masked(&desc->irq_data))
555 mask_irq(desc); 567 mask_irq(desc);
556 if (!(desc->istate & IRQS_DISABLED)) 568 if (!irqd_irq_disabled(&desc->irq_data))
557 unmask = 1; 569 unmask = 1;
558 } 570 }
559 571
@@ -651,7 +663,7 @@ again:
651 * irq_wake_thread(). See the comment there which explains the 663 * irq_wake_thread(). See the comment there which explains the
652 * serialization. 664 * serialization.
653 */ 665 */
654 if (unlikely(desc->istate & IRQS_INPROGRESS)) { 666 if (unlikely(irqd_irq_inprogress(&desc->irq_data))) {
655 raw_spin_unlock_irq(&desc->lock); 667 raw_spin_unlock_irq(&desc->lock);
656 chip_bus_sync_unlock(desc); 668 chip_bus_sync_unlock(desc);
657 cpu_relax(); 669 cpu_relax();
@@ -668,12 +680,10 @@ again:
668 680
669 desc->threads_oneshot &= ~action->thread_mask; 681 desc->threads_oneshot &= ~action->thread_mask;
670 682
671 if (!desc->threads_oneshot && !(desc->istate & IRQS_DISABLED) && 683 if (!desc->threads_oneshot && !irqd_irq_disabled(&desc->irq_data) &&
672 (desc->istate & IRQS_MASKED)) { 684 irqd_irq_masked(&desc->irq_data))
673 irq_compat_clr_masked(desc); 685 unmask_irq(desc);
674 desc->istate &= ~IRQS_MASKED; 686
675 desc->irq_data.chip->irq_unmask(&desc->irq_data);
676 }
677out_unlock: 687out_unlock:
678 raw_spin_unlock_irq(&desc->lock); 688 raw_spin_unlock_irq(&desc->lock);
679 chip_bus_sync_unlock(desc); 689 chip_bus_sync_unlock(desc);
@@ -767,7 +777,7 @@ static int irq_thread(void *data)
767 atomic_inc(&desc->threads_active); 777 atomic_inc(&desc->threads_active);
768 778
769 raw_spin_lock_irq(&desc->lock); 779 raw_spin_lock_irq(&desc->lock);
770 if (unlikely(desc->istate & IRQS_DISABLED)) { 780 if (unlikely(irqd_irq_disabled(&desc->irq_data))) {
771 /* 781 /*
772 * CHECKME: We might need a dedicated 782 * CHECKME: We might need a dedicated
773 * IRQ_THREAD_PENDING flag here, which 783 * IRQ_THREAD_PENDING flag here, which
@@ -985,8 +995,8 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
985 } 995 }
986 996
987 desc->istate &= ~(IRQS_AUTODETECT | IRQS_SPURIOUS_DISABLED | \ 997 desc->istate &= ~(IRQS_AUTODETECT | IRQS_SPURIOUS_DISABLED | \
988 IRQS_INPROGRESS | IRQS_ONESHOT | \ 998 IRQS_ONESHOT | IRQS_WAITING);
989 IRQS_WAITING); 999 irqd_clear(&desc->irq_data, IRQD_IRQ_INPROGRESS);
990 1000
991 if (new->flags & IRQF_PERCPU) { 1001 if (new->flags & IRQF_PERCPU) {
992 irqd_set(&desc->irq_data, IRQD_PER_CPU); 1002 irqd_set(&desc->irq_data, IRQD_PER_CPU);
diff --git a/kernel/irq/migration.c b/kernel/irq/migration.c
index ec4806d4778b..e33d9c8d5089 100644
--- a/kernel/irq/migration.c
+++ b/kernel/irq/migration.c
@@ -60,13 +60,12 @@ void move_masked_irq(int irq)
60 60
61void irq_move_irq(struct irq_data *idata) 61void irq_move_irq(struct irq_data *idata)
62{ 62{
63 struct irq_desc *desc = irq_data_to_desc(idata);
64 bool masked; 63 bool masked;
65 64
66 if (likely(!irqd_is_setaffinity_pending(idata))) 65 if (likely(!irqd_is_setaffinity_pending(idata)))
67 return; 66 return;
68 67
69 if (unlikely(desc->istate & IRQS_DISABLED)) 68 if (unlikely(irqd_irq_disabled(idata)))
70 return; 69 return;
71 70
72 /* 71 /*
@@ -74,7 +73,7 @@ void irq_move_irq(struct irq_data *idata)
74 * threaded interrupt with ONESHOT set, we can end up with an 73 * threaded interrupt with ONESHOT set, we can end up with an
75 * interrupt storm. 74 * interrupt storm.
76 */ 75 */
77 masked = desc->istate & IRQS_MASKED; 76 masked = irqd_irq_masked(idata);
78 if (!masked) 77 if (!masked)
79 idata->chip->irq_mask(idata); 78 idata->chip->irq_mask(idata);
80 irq_move_masked_irq(idata); 79 irq_move_masked_irq(idata);
diff --git a/kernel/irq/spurious.c b/kernel/irq/spurious.c
index dd586ebf9c8c..83f4799f46be 100644
--- a/kernel/irq/spurious.c
+++ b/kernel/irq/spurious.c
@@ -45,12 +45,12 @@ bool irq_wait_for_poll(struct irq_desc *desc)
45#ifdef CONFIG_SMP 45#ifdef CONFIG_SMP
46 do { 46 do {
47 raw_spin_unlock(&desc->lock); 47 raw_spin_unlock(&desc->lock);
48 while (desc->istate & IRQS_INPROGRESS) 48 while (irqd_irq_inprogress(&desc->irq_data))
49 cpu_relax(); 49 cpu_relax();
50 raw_spin_lock(&desc->lock); 50 raw_spin_lock(&desc->lock);
51 } while (desc->istate & IRQS_INPROGRESS); 51 } while (irqd_irq_inprogress(&desc->irq_data));
52 /* Might have been disabled in meantime */ 52 /* Might have been disabled in meantime */
53 return !(desc->istate & IRQS_DISABLED) && desc->action; 53 return !irqd_irq_disabled(&desc->irq_data) && desc->action;
54#else 54#else
55 return false; 55 return false;
56#endif 56#endif
@@ -75,7 +75,7 @@ static int try_one_irq(int irq, struct irq_desc *desc, bool force)
75 * Do not poll disabled interrupts unless the spurious 75 * Do not poll disabled interrupts unless the spurious
76 * disabled poller asks explicitely. 76 * disabled poller asks explicitely.
77 */ 77 */
78 if ((desc->istate & IRQS_DISABLED) && !force) 78 if (irqd_irq_disabled(&desc->irq_data) && !force)
79 goto out; 79 goto out;
80 80
81 /* 81 /*
@@ -88,7 +88,7 @@ static int try_one_irq(int irq, struct irq_desc *desc, bool force)
88 goto out; 88 goto out;
89 89
90 /* Already running on another processor */ 90 /* Already running on another processor */
91 if (desc->istate & IRQS_INPROGRESS) { 91 if (irqd_irq_inprogress(&desc->irq_data)) {
92 /* 92 /*
93 * Already running: If it is shared get the other 93 * Already running: If it is shared get the other
94 * CPU to go looking for our mystery interrupt too 94 * CPU to go looking for our mystery interrupt too
diff --git a/kernel/signal.c b/kernel/signal.c
index 324eff5468ad..1186cf7fac77 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -2437,7 +2437,7 @@ SYSCALL_DEFINE3(rt_sigqueueinfo, pid_t, pid, int, sig,
2437 /* Not even root can pretend to send signals from the kernel. 2437 /* Not even root can pretend to send signals from the kernel.
2438 * Nor can they impersonate a kill()/tgkill(), which adds source info. 2438 * Nor can they impersonate a kill()/tgkill(), which adds source info.
2439 */ 2439 */
2440 if (info.si_code != SI_QUEUE) { 2440 if (info.si_code >= 0 || info.si_code == SI_TKILL) {
2441 /* We used to allow any < 0 si_code */ 2441 /* We used to allow any < 0 si_code */
2442 WARN_ON_ONCE(info.si_code < 0); 2442 WARN_ON_ONCE(info.si_code < 0);
2443 return -EPERM; 2443 return -EPERM;
@@ -2457,7 +2457,7 @@ long do_rt_tgsigqueueinfo(pid_t tgid, pid_t pid, int sig, siginfo_t *info)
2457 /* Not even root can pretend to send signals from the kernel. 2457 /* Not even root can pretend to send signals from the kernel.
2458 * Nor can they impersonate a kill()/tgkill(), which adds source info. 2458 * Nor can they impersonate a kill()/tgkill(), which adds source info.
2459 */ 2459 */
2460 if (info->si_code != SI_QUEUE) { 2460 if (info->si_code >= 0 || info->si_code == SI_TKILL) {
2461 /* We used to allow any < 0 si_code */ 2461 /* We used to allow any < 0 si_code */
2462 WARN_ON_ONCE(info->si_code < 0); 2462 WARN_ON_ONCE(info->si_code < 0);
2463 return -EPERM; 2463 return -EPERM;