diff options
author | Ingo Molnar <mingo@elte.hu> | 2011-03-30 03:07:39 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2011-03-30 03:07:43 -0400 |
commit | 9f644c4ba86b76159d36747fda7da496f72a1872 (patch) | |
tree | 31e025a5f283aff691fb636bf07fd0b445cf07a3 | |
parent | 1b7155f7de119870f0d3fad89f125de2ff6c16be (diff) | |
parent | 0ce790e7d736cedc563e1fb4e998babf5a4dbc3d (diff) |
Merge commit 'v2.6.39-rc1' into perf/urgent
Merge reason: use the post-merge-window tree.
Signed-off-by: Ingo Molnar <mingo@elte.hu>
671 files changed, 17319 insertions, 10380 deletions
diff --git a/Documentation/ABI/testing/sysfs-driver-samsung-laptop b/Documentation/ABI/testing/sysfs-driver-samsung-laptop new file mode 100644 index 000000000000..0a810231aad4 --- /dev/null +++ b/Documentation/ABI/testing/sysfs-driver-samsung-laptop | |||
@@ -0,0 +1,19 @@ | |||
1 | What: /sys/devices/platform/samsung/performance_level | ||
2 | Date: January 1, 2010 | ||
3 | KernelVersion: 2.6.33 | ||
4 | Contact: Greg Kroah-Hartman <gregkh@suse.de> | ||
5 | Description: Some Samsung laptops have different "performance levels" | ||
6 | that are can be modified by a function key, and by this | ||
7 | sysfs file. These values don't always make a whole lot | ||
8 | of sense, but some users like to modify them to keep | ||
9 | their fans quiet at all costs. Reading from this file | ||
10 | will show the current performance level. Writing to the | ||
11 | file can change this value. | ||
12 | Valid options: | ||
13 | "silent" | ||
14 | "normal" | ||
15 | "overclock" | ||
16 | Note that not all laptops support all of these options. | ||
17 | Specifically, not all support the "overclock" option, | ||
18 | and it's still unknown if this value even changes | ||
19 | anything, other than making the user feel a bit better. | ||
diff --git a/Documentation/ABI/testing/sysfs-platform-asus-wmi b/Documentation/ABI/testing/sysfs-platform-asus-wmi new file mode 100644 index 000000000000..2e7df91620de --- /dev/null +++ b/Documentation/ABI/testing/sysfs-platform-asus-wmi | |||
@@ -0,0 +1,31 @@ | |||
1 | What: /sys/devices/platform/<platform>/cpufv | ||
2 | Date: Oct 2010 | ||
3 | KernelVersion: 2.6.37 | ||
4 | Contact: "Corentin Chary" <corentincj@iksaif.net> | ||
5 | Description: | ||
6 | Change CPU clock configuration (write-only). | ||
7 | There are three available clock configuration: | ||
8 | * 0 -> Super Performance Mode | ||
9 | * 1 -> High Performance Mode | ||
10 | * 2 -> Power Saving Mode | ||
11 | |||
12 | What: /sys/devices/platform/<platform>/camera | ||
13 | Date: Jan 2010 | ||
14 | KernelVersion: 2.6.39 | ||
15 | Contact: "Corentin Chary" <corentincj@iksaif.net> | ||
16 | Description: | ||
17 | Control the camera. 1 means on, 0 means off. | ||
18 | |||
19 | What: /sys/devices/platform/<platform>/cardr | ||
20 | Date: Jan 2010 | ||
21 | KernelVersion: 2.6.39 | ||
22 | Contact: "Corentin Chary" <corentincj@iksaif.net> | ||
23 | Description: | ||
24 | Control the card reader. 1 means on, 0 means off. | ||
25 | |||
26 | What: /sys/devices/platform/<platform>/touchpad | ||
27 | Date: Jan 2010 | ||
28 | KernelVersion: 2.6.39 | ||
29 | Contact: "Corentin Chary" <corentincj@iksaif.net> | ||
30 | Description: | ||
31 | Control the card touchpad. 1 means on, 0 means off. | ||
diff --git a/Documentation/ABI/testing/sysfs-platform-eeepc-wmi b/Documentation/ABI/testing/sysfs-platform-eeepc-wmi deleted file mode 100644 index e4b5fef5fadd..000000000000 --- a/Documentation/ABI/testing/sysfs-platform-eeepc-wmi +++ /dev/null | |||
@@ -1,10 +0,0 @@ | |||
1 | What: /sys/devices/platform/eeepc-wmi/cpufv | ||
2 | Date: Oct 2010 | ||
3 | KernelVersion: 2.6.37 | ||
4 | Contact: "Corentin Chary" <corentincj@iksaif.net> | ||
5 | Description: | ||
6 | Change CPU clock configuration (write-only). | ||
7 | There are three available clock configuration: | ||
8 | * 0 -> Super Performance Mode | ||
9 | * 1 -> High Performance Mode | ||
10 | * 2 -> Power Saving Mode | ||
diff --git a/Documentation/laptops/sony-laptop.txt b/Documentation/laptops/sony-laptop.txt index 23ce7d350d1a..2bd4e82e5d9f 100644 --- a/Documentation/laptops/sony-laptop.txt +++ b/Documentation/laptops/sony-laptop.txt | |||
@@ -14,7 +14,8 @@ Some models report hotkeys through the SNC or SPIC devices, such events are | |||
14 | reported both through the ACPI subsystem as acpi events and through the INPUT | 14 | reported both through the ACPI subsystem as acpi events and through the INPUT |
15 | subsystem. See the logs of acpid or /proc/acpi/event and | 15 | subsystem. See the logs of acpid or /proc/acpi/event and |
16 | /proc/bus/input/devices to find out what those events are and which input | 16 | /proc/bus/input/devices to find out what those events are and which input |
17 | devices are created by the driver. | 17 | devices are created by the driver. Additionally, loading the driver with the |
18 | debug option will report all events in the kernel log. | ||
18 | 19 | ||
19 | Backlight control: | 20 | Backlight control: |
20 | ------------------ | 21 | ------------------ |
@@ -64,6 +65,16 @@ powers off the sound card, | |||
64 | # echo "1" > /sys/devices/platform/sony-laptop/audiopower | 65 | # echo "1" > /sys/devices/platform/sony-laptop/audiopower |
65 | powers on the sound card. | 66 | powers on the sound card. |
66 | 67 | ||
68 | |||
69 | RFkill control: | ||
70 | --------------- | ||
71 | More recent Vaio models expose a consistent set of ACPI methods to | ||
72 | control radio frequency emitting devices. If you are a lucky owner of | ||
73 | such a laptop you will find the necessary rfkill devices under | ||
74 | /sys/class/rfkill. Check those starting with sony-* in | ||
75 | # grep . /sys/class/rfkill/*/{state,name} | ||
76 | |||
77 | |||
67 | Development: | 78 | Development: |
68 | ------------ | 79 | ------------ |
69 | 80 | ||
@@ -75,8 +86,21 @@ pass the option 'debug=1'. | |||
75 | REPEAT: DON'T DO THIS IF YOU DON'T LIKE RISKY BUSINESS. | 86 | REPEAT: DON'T DO THIS IF YOU DON'T LIKE RISKY BUSINESS. |
76 | 87 | ||
77 | In your kernel logs you will find the list of all ACPI methods | 88 | In your kernel logs you will find the list of all ACPI methods |
78 | the SNC device has on your laptop. You can see the GCDP/GCDP methods | 89 | the SNC device has on your laptop. |
79 | used to pwer on/off the CD drive, but there are others. | 90 | |
91 | * For new models you will see a long list of meaningless method names, | ||
92 | reading the DSDT table source should reveal that: | ||
93 | (1) the SNC device uses an internal capability lookup table | ||
94 | (2) SN00 is used to find values in the lookup table | ||
95 | (3) SN06 and SN07 are used to call into the real methods based on | ||
96 | offsets you can obtain iterating the table using SN00 | ||
97 | (4) SN02 used to enable events. | ||
98 | Some values in the capability lookup table are more or less known, see | ||
99 | the code for all sony_call_snc_handle calls, others are more obscure. | ||
100 | |||
101 | * For old models you can see the GCDP/GCDP methods used to pwer on/off | ||
102 | the CD drive, but there are others and they are usually different from | ||
103 | model to model. | ||
80 | 104 | ||
81 | I HAVE NO IDEA WHAT THOSE METHODS DO. | 105 | I HAVE NO IDEA WHAT THOSE METHODS DO. |
82 | 106 | ||
@@ -108,9 +132,8 @@ Bugs/Limitations: | |||
108 | laptop, including permanent damage. | 132 | laptop, including permanent damage. |
109 | 133 | ||
110 | * The sony-laptop and sonypi drivers do not interact at all. In the | 134 | * The sony-laptop and sonypi drivers do not interact at all. In the |
111 | future, sonypi could use sony-laptop to do (part of) its business. | 135 | future, sonypi will be removed and replaced by sony-laptop. |
112 | 136 | ||
113 | * spicctrl, which is the userspace tool used to communicate with the | 137 | * spicctrl, which is the userspace tool used to communicate with the |
114 | sonypi driver (through /dev/sonypi) does not try to use the | 138 | sonypi driver (through /dev/sonypi) is deprecated as well since all |
115 | sony-laptop driver. In the future, spicctrl could try sonypi first, | 139 | its features are now available under the sysfs tree via sony-laptop. |
116 | and if it isn't present, try sony-laptop instead. | ||
diff --git a/MAINTAINERS b/MAINTAINERS index 8aa1cacddbcc..6b4b9cdec370 100644 --- a/MAINTAINERS +++ b/MAINTAINERS | |||
@@ -1157,14 +1157,14 @@ S: Maintained | |||
1157 | F: Documentation/hwmon/asc7621 | 1157 | F: Documentation/hwmon/asc7621 |
1158 | F: drivers/hwmon/asc7621.c | 1158 | F: drivers/hwmon/asc7621.c |
1159 | 1159 | ||
1160 | ASUS ACPI EXTRAS DRIVER | 1160 | ASUS NOTEBOOKS AND EEEPC ACPI/WMI EXTRAS DRIVERS |
1161 | M: Corentin Chary <corentincj@iksaif.net> | 1161 | M: Corentin Chary <corentincj@iksaif.net> |
1162 | M: Karol Kozimor <sziwan@users.sourceforge.net> | ||
1163 | L: acpi4asus-user@lists.sourceforge.net | 1162 | L: acpi4asus-user@lists.sourceforge.net |
1164 | L: platform-driver-x86@vger.kernel.org | 1163 | L: platform-driver-x86@vger.kernel.org |
1165 | W: http://acpi4asus.sf.net | 1164 | W: http://acpi4asus.sf.net |
1166 | S: Maintained | 1165 | S: Maintained |
1167 | F: drivers/platform/x86/asus_acpi.c | 1166 | F: drivers/platform/x86/asus*.c |
1167 | F: drivers/platform/x86/eeepc*.c | ||
1168 | 1168 | ||
1169 | ASUS ASB100 HARDWARE MONITOR DRIVER | 1169 | ASUS ASB100 HARDWARE MONITOR DRIVER |
1170 | M: "Mark M. Hoffman" <mhoffman@lightlink.com> | 1170 | M: "Mark M. Hoffman" <mhoffman@lightlink.com> |
@@ -1172,14 +1172,6 @@ L: lm-sensors@lm-sensors.org | |||
1172 | S: Maintained | 1172 | S: Maintained |
1173 | F: drivers/hwmon/asb100.c | 1173 | F: drivers/hwmon/asb100.c |
1174 | 1174 | ||
1175 | ASUS LAPTOP EXTRAS DRIVER | ||
1176 | M: Corentin Chary <corentincj@iksaif.net> | ||
1177 | L: acpi4asus-user@lists.sourceforge.net | ||
1178 | L: platform-driver-x86@vger.kernel.org | ||
1179 | W: http://acpi4asus.sf.net | ||
1180 | S: Maintained | ||
1181 | F: drivers/platform/x86/asus-laptop.c | ||
1182 | |||
1183 | ASYNCHRONOUS TRANSFERS/TRANSFORMS (IOAT) API | 1175 | ASYNCHRONOUS TRANSFERS/TRANSFORMS (IOAT) API |
1184 | M: Dan Williams <dan.j.williams@intel.com> | 1176 | M: Dan Williams <dan.j.williams@intel.com> |
1185 | W: http://sourceforge.net/projects/xscaleiop | 1177 | W: http://sourceforge.net/projects/xscaleiop |
@@ -2414,22 +2406,6 @@ T: git git://git.alsa-project.org/alsa-kernel.git | |||
2414 | S: Maintained | 2406 | S: Maintained |
2415 | F: sound/usb/misc/ua101.c | 2407 | F: sound/usb/misc/ua101.c |
2416 | 2408 | ||
2417 | EEEPC LAPTOP EXTRAS DRIVER | ||
2418 | M: Corentin Chary <corentincj@iksaif.net> | ||
2419 | L: acpi4asus-user@lists.sourceforge.net | ||
2420 | L: platform-driver-x86@vger.kernel.org | ||
2421 | W: http://acpi4asus.sf.net | ||
2422 | S: Maintained | ||
2423 | F: drivers/platform/x86/eeepc-laptop.c | ||
2424 | |||
2425 | EEEPC WMI EXTRAS DRIVER | ||
2426 | M: Corentin Chary <corentincj@iksaif.net> | ||
2427 | L: acpi4asus-user@lists.sourceforge.net | ||
2428 | L: platform-driver-x86@vger.kernel.org | ||
2429 | W: http://acpi4asus.sf.net | ||
2430 | S: Maintained | ||
2431 | F: drivers/platform/x86/eeepc-wmi.c | ||
2432 | |||
2433 | EFIFB FRAMEBUFFER DRIVER | 2409 | EFIFB FRAMEBUFFER DRIVER |
2434 | L: linux-fbdev@vger.kernel.org | 2410 | L: linux-fbdev@vger.kernel.org |
2435 | M: Peter Jones <pjones@redhat.com> | 2411 | M: Peter Jones <pjones@redhat.com> |
@@ -1,7 +1,7 @@ | |||
1 | VERSION = 2 | 1 | VERSION = 2 |
2 | PATCHLEVEL = 6 | 2 | PATCHLEVEL = 6 |
3 | SUBLEVEL = 38 | 3 | SUBLEVEL = 39 |
4 | EXTRAVERSION = | 4 | EXTRAVERSION = -rc1 |
5 | NAME = Flesh-Eating Bats with Fangs | 5 | NAME = Flesh-Eating Bats with Fangs |
6 | 6 | ||
7 | # *DOCUMENTATION* | 7 | # *DOCUMENTATION* |
diff --git a/arch/alpha/Kconfig b/arch/alpha/Kconfig index cc31bec2e316..bd4160c57196 100644 --- a/arch/alpha/Kconfig +++ b/arch/alpha/Kconfig | |||
@@ -11,6 +11,7 @@ config ALPHA | |||
11 | select HAVE_GENERIC_HARDIRQS | 11 | select HAVE_GENERIC_HARDIRQS |
12 | select GENERIC_IRQ_PROBE | 12 | select GENERIC_IRQ_PROBE |
13 | select AUTO_IRQ_AFFINITY if SMP | 13 | select AUTO_IRQ_AFFINITY if SMP |
14 | select GENERIC_IRQ_SHOW | ||
14 | select GENERIC_HARDIRQS_NO_DEPRECATED | 15 | select GENERIC_HARDIRQS_NO_DEPRECATED |
15 | help | 16 | help |
16 | The Alpha is a 64-bit general-purpose processor designed and | 17 | The Alpha is a 64-bit general-purpose processor designed and |
diff --git a/arch/alpha/kernel/irq.c b/arch/alpha/kernel/irq.c index a19d60082299..381431a2d6d9 100644 --- a/arch/alpha/kernel/irq.c +++ b/arch/alpha/kernel/irq.c | |||
@@ -67,68 +67,21 @@ int irq_select_affinity(unsigned int irq) | |||
67 | } | 67 | } |
68 | #endif /* CONFIG_SMP */ | 68 | #endif /* CONFIG_SMP */ |
69 | 69 | ||
70 | int | 70 | int arch_show_interrupts(struct seq_file *p, int prec) |
71 | show_interrupts(struct seq_file *p, void *v) | ||
72 | { | 71 | { |
73 | int j; | 72 | int j; |
74 | int irq = *(loff_t *) v; | ||
75 | struct irqaction * action; | ||
76 | struct irq_desc *desc; | ||
77 | unsigned long flags; | ||
78 | 73 | ||
79 | #ifdef CONFIG_SMP | 74 | #ifdef CONFIG_SMP |
80 | if (irq == 0) { | 75 | seq_puts(p, "IPI: "); |
81 | seq_puts(p, " "); | 76 | for_each_online_cpu(j) |
82 | for_each_online_cpu(j) | 77 | seq_printf(p, "%10lu ", cpu_data[j].ipi_count); |
83 | seq_printf(p, "CPU%d ", j); | 78 | seq_putc(p, '\n'); |
84 | seq_putc(p, '\n'); | ||
85 | } | ||
86 | #endif | ||
87 | |||
88 | if (irq < ACTUAL_NR_IRQS) { | ||
89 | desc = irq_to_desc(irq); | ||
90 | |||
91 | if (!desc) | ||
92 | return 0; | ||
93 | |||
94 | raw_spin_lock_irqsave(&desc->lock, flags); | ||
95 | action = desc->action; | ||
96 | if (!action) | ||
97 | goto unlock; | ||
98 | seq_printf(p, "%3d: ", irq); | ||
99 | #ifndef CONFIG_SMP | ||
100 | seq_printf(p, "%10u ", kstat_irqs(irq)); | ||
101 | #else | ||
102 | for_each_online_cpu(j) | ||
103 | seq_printf(p, "%10u ", kstat_irqs_cpu(irq, j)); | ||
104 | #endif | 79 | #endif |
105 | seq_printf(p, " %14s", get_irq_desc_chip(desc)->name); | 80 | seq_puts(p, "PMI: "); |
106 | seq_printf(p, " %c%s", | 81 | for_each_online_cpu(j) |
107 | (action->flags & IRQF_DISABLED)?'+':' ', | 82 | seq_printf(p, "%10lu ", per_cpu(irq_pmi_count, j)); |
108 | action->name); | 83 | seq_puts(p, " Performance Monitoring\n"); |
109 | 84 | seq_printf(p, "ERR: %10lu\n", irq_err_count); | |
110 | for (action=action->next; action; action = action->next) { | ||
111 | seq_printf(p, ", %c%s", | ||
112 | (action->flags & IRQF_DISABLED)?'+':' ', | ||
113 | action->name); | ||
114 | } | ||
115 | |||
116 | seq_putc(p, '\n'); | ||
117 | unlock: | ||
118 | raw_spin_unlock_irqrestore(&desc->lock, flags); | ||
119 | } else if (irq == ACTUAL_NR_IRQS) { | ||
120 | #ifdef CONFIG_SMP | ||
121 | seq_puts(p, "IPI: "); | ||
122 | for_each_online_cpu(j) | ||
123 | seq_printf(p, "%10lu ", cpu_data[j].ipi_count); | ||
124 | seq_putc(p, '\n'); | ||
125 | #endif | ||
126 | seq_puts(p, "PMI: "); | ||
127 | for_each_online_cpu(j) | ||
128 | seq_printf(p, "%10lu ", per_cpu(irq_pmi_count, j)); | ||
129 | seq_puts(p, " Performance Monitoring\n"); | ||
130 | seq_printf(p, "ERR: %10lu\n", irq_err_count); | ||
131 | } | ||
132 | return 0; | 85 | return 0; |
133 | } | 86 | } |
134 | 87 | ||
diff --git a/arch/alpha/kernel/irq_alpha.c b/arch/alpha/kernel/irq_alpha.c index 411ca11d0a18..1479dc6ebd97 100644 --- a/arch/alpha/kernel/irq_alpha.c +++ b/arch/alpha/kernel/irq_alpha.c | |||
@@ -228,7 +228,7 @@ struct irqaction timer_irqaction = { | |||
228 | void __init | 228 | void __init |
229 | init_rtc_irq(void) | 229 | init_rtc_irq(void) |
230 | { | 230 | { |
231 | set_irq_chip_and_handler_name(RTC_IRQ, &no_irq_chip, | 231 | irq_set_chip_and_handler_name(RTC_IRQ, &no_irq_chip, |
232 | handle_simple_irq, "RTC"); | 232 | handle_simple_irq, "RTC"); |
233 | setup_irq(RTC_IRQ, &timer_irqaction); | 233 | setup_irq(RTC_IRQ, &timer_irqaction); |
234 | } | 234 | } |
diff --git a/arch/alpha/kernel/irq_i8259.c b/arch/alpha/kernel/irq_i8259.c index c7cc9813e45f..e1861c77dabc 100644 --- a/arch/alpha/kernel/irq_i8259.c +++ b/arch/alpha/kernel/irq_i8259.c | |||
@@ -92,7 +92,7 @@ init_i8259a_irqs(void) | |||
92 | outb(0xff, 0xA1); /* mask all of 8259A-2 */ | 92 | outb(0xff, 0xA1); /* mask all of 8259A-2 */ |
93 | 93 | ||
94 | for (i = 0; i < 16; i++) { | 94 | for (i = 0; i < 16; i++) { |
95 | set_irq_chip_and_handler(i, &i8259a_irq_type, handle_level_irq); | 95 | irq_set_chip_and_handler(i, &i8259a_irq_type, handle_level_irq); |
96 | } | 96 | } |
97 | 97 | ||
98 | setup_irq(2, &cascade); | 98 | setup_irq(2, &cascade); |
diff --git a/arch/alpha/kernel/irq_pyxis.c b/arch/alpha/kernel/irq_pyxis.c index b30227fa7f5f..13c97a5b31e8 100644 --- a/arch/alpha/kernel/irq_pyxis.c +++ b/arch/alpha/kernel/irq_pyxis.c | |||
@@ -102,7 +102,7 @@ init_pyxis_irqs(unsigned long ignore_mask) | |||
102 | for (i = 16; i < 48; ++i) { | 102 | for (i = 16; i < 48; ++i) { |
103 | if ((ignore_mask >> i) & 1) | 103 | if ((ignore_mask >> i) & 1) |
104 | continue; | 104 | continue; |
105 | set_irq_chip_and_handler(i, &pyxis_irq_type, handle_level_irq); | 105 | irq_set_chip_and_handler(i, &pyxis_irq_type, handle_level_irq); |
106 | irq_set_status_flags(i, IRQ_LEVEL); | 106 | irq_set_status_flags(i, IRQ_LEVEL); |
107 | } | 107 | } |
108 | 108 | ||
diff --git a/arch/alpha/kernel/irq_srm.c b/arch/alpha/kernel/irq_srm.c index 82a47bba41c4..a79fa30e7552 100644 --- a/arch/alpha/kernel/irq_srm.c +++ b/arch/alpha/kernel/irq_srm.c | |||
@@ -51,7 +51,7 @@ init_srm_irqs(long max, unsigned long ignore_mask) | |||
51 | for (i = 16; i < max; ++i) { | 51 | for (i = 16; i < max; ++i) { |
52 | if (i < 64 && ((ignore_mask >> i) & 1)) | 52 | if (i < 64 && ((ignore_mask >> i) & 1)) |
53 | continue; | 53 | continue; |
54 | set_irq_chip_and_handler(i, &srm_irq_type, handle_level_irq); | 54 | irq_set_chip_and_handler(i, &srm_irq_type, handle_level_irq); |
55 | irq_set_status_flags(i, IRQ_LEVEL); | 55 | irq_set_status_flags(i, IRQ_LEVEL); |
56 | } | 56 | } |
57 | } | 57 | } |
diff --git a/arch/alpha/kernel/sys_alcor.c b/arch/alpha/kernel/sys_alcor.c index 88d95e872f55..0e1439904cdb 100644 --- a/arch/alpha/kernel/sys_alcor.c +++ b/arch/alpha/kernel/sys_alcor.c | |||
@@ -125,7 +125,7 @@ alcor_init_irq(void) | |||
125 | on while IRQ probing. */ | 125 | on while IRQ probing. */ |
126 | if (i >= 16+20 && i <= 16+30) | 126 | if (i >= 16+20 && i <= 16+30) |
127 | continue; | 127 | continue; |
128 | set_irq_chip_and_handler(i, &alcor_irq_type, handle_level_irq); | 128 | irq_set_chip_and_handler(i, &alcor_irq_type, handle_level_irq); |
129 | irq_set_status_flags(i, IRQ_LEVEL); | 129 | irq_set_status_flags(i, IRQ_LEVEL); |
130 | } | 130 | } |
131 | i8259a_irq_type.irq_ack = alcor_isa_mask_and_ack_irq; | 131 | i8259a_irq_type.irq_ack = alcor_isa_mask_and_ack_irq; |
diff --git a/arch/alpha/kernel/sys_cabriolet.c b/arch/alpha/kernel/sys_cabriolet.c index 57eb6307bc27..c8c112d51584 100644 --- a/arch/alpha/kernel/sys_cabriolet.c +++ b/arch/alpha/kernel/sys_cabriolet.c | |||
@@ -105,8 +105,8 @@ common_init_irq(void (*srm_dev_int)(unsigned long v)) | |||
105 | outb(0xff, 0x806); | 105 | outb(0xff, 0x806); |
106 | 106 | ||
107 | for (i = 16; i < 35; ++i) { | 107 | for (i = 16; i < 35; ++i) { |
108 | set_irq_chip_and_handler(i, &cabriolet_irq_type, | 108 | irq_set_chip_and_handler(i, &cabriolet_irq_type, |
109 | handle_level_irq); | 109 | handle_level_irq); |
110 | irq_set_status_flags(i, IRQ_LEVEL); | 110 | irq_set_status_flags(i, IRQ_LEVEL); |
111 | } | 111 | } |
112 | } | 112 | } |
diff --git a/arch/alpha/kernel/sys_dp264.c b/arch/alpha/kernel/sys_dp264.c index 481df4ecb651..5ac00fd4cd0c 100644 --- a/arch/alpha/kernel/sys_dp264.c +++ b/arch/alpha/kernel/sys_dp264.c | |||
@@ -270,7 +270,7 @@ init_tsunami_irqs(struct irq_chip * ops, int imin, int imax) | |||
270 | { | 270 | { |
271 | long i; | 271 | long i; |
272 | for (i = imin; i <= imax; ++i) { | 272 | for (i = imin; i <= imax; ++i) { |
273 | set_irq_chip_and_handler(i, ops, handle_level_irq); | 273 | irq_set_chip_and_handler(i, ops, handle_level_irq); |
274 | irq_set_status_flags(i, IRQ_LEVEL); | 274 | irq_set_status_flags(i, IRQ_LEVEL); |
275 | } | 275 | } |
276 | } | 276 | } |
diff --git a/arch/alpha/kernel/sys_eb64p.c b/arch/alpha/kernel/sys_eb64p.c index 402e908ffb3e..a7a23b40eec5 100644 --- a/arch/alpha/kernel/sys_eb64p.c +++ b/arch/alpha/kernel/sys_eb64p.c | |||
@@ -118,7 +118,7 @@ eb64p_init_irq(void) | |||
118 | init_i8259a_irqs(); | 118 | init_i8259a_irqs(); |
119 | 119 | ||
120 | for (i = 16; i < 32; ++i) { | 120 | for (i = 16; i < 32; ++i) { |
121 | set_irq_chip_and_handler(i, &eb64p_irq_type, handle_level_irq); | 121 | irq_set_chip_and_handler(i, &eb64p_irq_type, handle_level_irq); |
122 | irq_set_status_flags(i, IRQ_LEVEL); | 122 | irq_set_status_flags(i, IRQ_LEVEL); |
123 | } | 123 | } |
124 | 124 | ||
diff --git a/arch/alpha/kernel/sys_eiger.c b/arch/alpha/kernel/sys_eiger.c index 0b44a54c1522..a60cd5b2621e 100644 --- a/arch/alpha/kernel/sys_eiger.c +++ b/arch/alpha/kernel/sys_eiger.c | |||
@@ -138,7 +138,7 @@ eiger_init_irq(void) | |||
138 | init_i8259a_irqs(); | 138 | init_i8259a_irqs(); |
139 | 139 | ||
140 | for (i = 16; i < 128; ++i) { | 140 | for (i = 16; i < 128; ++i) { |
141 | set_irq_chip_and_handler(i, &eiger_irq_type, handle_level_irq); | 141 | irq_set_chip_and_handler(i, &eiger_irq_type, handle_level_irq); |
142 | irq_set_status_flags(i, IRQ_LEVEL); | 142 | irq_set_status_flags(i, IRQ_LEVEL); |
143 | } | 143 | } |
144 | } | 144 | } |
diff --git a/arch/alpha/kernel/sys_jensen.c b/arch/alpha/kernel/sys_jensen.c index 00341b75c8b2..7f1a87f176e2 100644 --- a/arch/alpha/kernel/sys_jensen.c +++ b/arch/alpha/kernel/sys_jensen.c | |||
@@ -171,11 +171,11 @@ jensen_init_irq(void) | |||
171 | { | 171 | { |
172 | init_i8259a_irqs(); | 172 | init_i8259a_irqs(); |
173 | 173 | ||
174 | set_irq_chip_and_handler(1, &jensen_local_irq_type, handle_level_irq); | 174 | irq_set_chip_and_handler(1, &jensen_local_irq_type, handle_level_irq); |
175 | set_irq_chip_and_handler(4, &jensen_local_irq_type, handle_level_irq); | 175 | irq_set_chip_and_handler(4, &jensen_local_irq_type, handle_level_irq); |
176 | set_irq_chip_and_handler(3, &jensen_local_irq_type, handle_level_irq); | 176 | irq_set_chip_and_handler(3, &jensen_local_irq_type, handle_level_irq); |
177 | set_irq_chip_and_handler(7, &jensen_local_irq_type, handle_level_irq); | 177 | irq_set_chip_and_handler(7, &jensen_local_irq_type, handle_level_irq); |
178 | set_irq_chip_and_handler(9, &jensen_local_irq_type, handle_level_irq); | 178 | irq_set_chip_and_handler(9, &jensen_local_irq_type, handle_level_irq); |
179 | 179 | ||
180 | common_init_isa_dma(); | 180 | common_init_isa_dma(); |
181 | } | 181 | } |
diff --git a/arch/alpha/kernel/sys_marvel.c b/arch/alpha/kernel/sys_marvel.c index e61910734e41..388b99d1779d 100644 --- a/arch/alpha/kernel/sys_marvel.c +++ b/arch/alpha/kernel/sys_marvel.c | |||
@@ -276,7 +276,7 @@ init_io7_irqs(struct io7 *io7, | |||
276 | 276 | ||
277 | /* Set up the lsi irqs. */ | 277 | /* Set up the lsi irqs. */ |
278 | for (i = 0; i < 128; ++i) { | 278 | for (i = 0; i < 128; ++i) { |
279 | set_irq_chip_and_handler(base + i, lsi_ops, handle_level_irq); | 279 | irq_set_chip_and_handler(base + i, lsi_ops, handle_level_irq); |
280 | irq_set_status_flags(i, IRQ_LEVEL); | 280 | irq_set_status_flags(i, IRQ_LEVEL); |
281 | } | 281 | } |
282 | 282 | ||
@@ -290,7 +290,7 @@ init_io7_irqs(struct io7 *io7, | |||
290 | 290 | ||
291 | /* Set up the msi irqs. */ | 291 | /* Set up the msi irqs. */ |
292 | for (i = 128; i < (128 + 512); ++i) { | 292 | for (i = 128; i < (128 + 512); ++i) { |
293 | set_irq_chip_and_handler(base + i, msi_ops, handle_level_irq); | 293 | irq_set_chip_and_handler(base + i, msi_ops, handle_level_irq); |
294 | irq_set_status_flags(i, IRQ_LEVEL); | 294 | irq_set_status_flags(i, IRQ_LEVEL); |
295 | } | 295 | } |
296 | 296 | ||
@@ -308,8 +308,8 @@ marvel_init_irq(void) | |||
308 | 308 | ||
309 | /* Reserve the legacy irqs. */ | 309 | /* Reserve the legacy irqs. */ |
310 | for (i = 0; i < 16; ++i) { | 310 | for (i = 0; i < 16; ++i) { |
311 | set_irq_chip_and_handler(i, &marvel_legacy_irq_type, | 311 | irq_set_chip_and_handler(i, &marvel_legacy_irq_type, |
312 | handle_level_irq); | 312 | handle_level_irq); |
313 | } | 313 | } |
314 | 314 | ||
315 | /* Init the io7 irqs. */ | 315 | /* Init the io7 irqs. */ |
diff --git a/arch/alpha/kernel/sys_mikasa.c b/arch/alpha/kernel/sys_mikasa.c index cf7f43dd3147..0e6e4697a025 100644 --- a/arch/alpha/kernel/sys_mikasa.c +++ b/arch/alpha/kernel/sys_mikasa.c | |||
@@ -98,7 +98,8 @@ mikasa_init_irq(void) | |||
98 | mikasa_update_irq_hw(0); | 98 | mikasa_update_irq_hw(0); |
99 | 99 | ||
100 | for (i = 16; i < 32; ++i) { | 100 | for (i = 16; i < 32; ++i) { |
101 | set_irq_chip_and_handler(i, &mikasa_irq_type, handle_level_irq); | 101 | irq_set_chip_and_handler(i, &mikasa_irq_type, |
102 | handle_level_irq); | ||
102 | irq_set_status_flags(i, IRQ_LEVEL); | 103 | irq_set_status_flags(i, IRQ_LEVEL); |
103 | } | 104 | } |
104 | 105 | ||
diff --git a/arch/alpha/kernel/sys_noritake.c b/arch/alpha/kernel/sys_noritake.c index 92bc188e94a9..a00ac7087167 100644 --- a/arch/alpha/kernel/sys_noritake.c +++ b/arch/alpha/kernel/sys_noritake.c | |||
@@ -127,7 +127,8 @@ noritake_init_irq(void) | |||
127 | outw(0, 0x54c); | 127 | outw(0, 0x54c); |
128 | 128 | ||
129 | for (i = 16; i < 48; ++i) { | 129 | for (i = 16; i < 48; ++i) { |
130 | set_irq_chip_and_handler(i, &noritake_irq_type, handle_level_irq); | 130 | irq_set_chip_and_handler(i, &noritake_irq_type, |
131 | handle_level_irq); | ||
131 | irq_set_status_flags(i, IRQ_LEVEL); | 132 | irq_set_status_flags(i, IRQ_LEVEL); |
132 | } | 133 | } |
133 | 134 | ||
diff --git a/arch/alpha/kernel/sys_rawhide.c b/arch/alpha/kernel/sys_rawhide.c index 936d4140ed5f..7f52161f3d88 100644 --- a/arch/alpha/kernel/sys_rawhide.c +++ b/arch/alpha/kernel/sys_rawhide.c | |||
@@ -180,7 +180,8 @@ rawhide_init_irq(void) | |||
180 | } | 180 | } |
181 | 181 | ||
182 | for (i = 16; i < 128; ++i) { | 182 | for (i = 16; i < 128; ++i) { |
183 | set_irq_chip_and_handler(i, &rawhide_irq_type, handle_level_irq); | 183 | irq_set_chip_and_handler(i, &rawhide_irq_type, |
184 | handle_level_irq); | ||
184 | irq_set_status_flags(i, IRQ_LEVEL); | 185 | irq_set_status_flags(i, IRQ_LEVEL); |
185 | } | 186 | } |
186 | 187 | ||
diff --git a/arch/alpha/kernel/sys_rx164.c b/arch/alpha/kernel/sys_rx164.c index cea22a62913b..216d94d9c0c1 100644 --- a/arch/alpha/kernel/sys_rx164.c +++ b/arch/alpha/kernel/sys_rx164.c | |||
@@ -99,7 +99,7 @@ rx164_init_irq(void) | |||
99 | 99 | ||
100 | rx164_update_irq_hw(0); | 100 | rx164_update_irq_hw(0); |
101 | for (i = 16; i < 40; ++i) { | 101 | for (i = 16; i < 40; ++i) { |
102 | set_irq_chip_and_handler(i, &rx164_irq_type, handle_level_irq); | 102 | irq_set_chip_and_handler(i, &rx164_irq_type, handle_level_irq); |
103 | irq_set_status_flags(i, IRQ_LEVEL); | 103 | irq_set_status_flags(i, IRQ_LEVEL); |
104 | } | 104 | } |
105 | 105 | ||
diff --git a/arch/alpha/kernel/sys_sable.c b/arch/alpha/kernel/sys_sable.c index a349538aabc9..da714e427c5f 100644 --- a/arch/alpha/kernel/sys_sable.c +++ b/arch/alpha/kernel/sys_sable.c | |||
@@ -518,8 +518,8 @@ sable_lynx_init_irq(int nr_of_irqs) | |||
518 | long i; | 518 | long i; |
519 | 519 | ||
520 | for (i = 0; i < nr_of_irqs; ++i) { | 520 | for (i = 0; i < nr_of_irqs; ++i) { |
521 | set_irq_chip_and_handler(i, &sable_lynx_irq_type, | 521 | irq_set_chip_and_handler(i, &sable_lynx_irq_type, |
522 | handle_level_irq); | 522 | handle_level_irq); |
523 | irq_set_status_flags(i, IRQ_LEVEL); | 523 | irq_set_status_flags(i, IRQ_LEVEL); |
524 | } | 524 | } |
525 | 525 | ||
diff --git a/arch/alpha/kernel/sys_takara.c b/arch/alpha/kernel/sys_takara.c index 42a5331f13c4..a31f8cd9bd6b 100644 --- a/arch/alpha/kernel/sys_takara.c +++ b/arch/alpha/kernel/sys_takara.c | |||
@@ -138,7 +138,8 @@ takara_init_irq(void) | |||
138 | takara_update_irq_hw(i, -1); | 138 | takara_update_irq_hw(i, -1); |
139 | 139 | ||
140 | for (i = 16; i < 128; ++i) { | 140 | for (i = 16; i < 128; ++i) { |
141 | set_irq_chip_and_handler(i, &takara_irq_type, handle_level_irq); | 141 | irq_set_chip_and_handler(i, &takara_irq_type, |
142 | handle_level_irq); | ||
142 | irq_set_status_flags(i, IRQ_LEVEL); | 143 | irq_set_status_flags(i, IRQ_LEVEL); |
143 | } | 144 | } |
144 | 145 | ||
diff --git a/arch/alpha/kernel/sys_titan.c b/arch/alpha/kernel/sys_titan.c index 8c13a0c77830..fea0e4620994 100644 --- a/arch/alpha/kernel/sys_titan.c +++ b/arch/alpha/kernel/sys_titan.c | |||
@@ -179,7 +179,7 @@ init_titan_irqs(struct irq_chip * ops, int imin, int imax) | |||
179 | { | 179 | { |
180 | long i; | 180 | long i; |
181 | for (i = imin; i <= imax; ++i) { | 181 | for (i = imin; i <= imax; ++i) { |
182 | set_irq_chip_and_handler(i, ops, handle_level_irq); | 182 | irq_set_chip_and_handler(i, ops, handle_level_irq); |
183 | irq_set_status_flags(i, IRQ_LEVEL); | 183 | irq_set_status_flags(i, IRQ_LEVEL); |
184 | } | 184 | } |
185 | } | 185 | } |
diff --git a/arch/alpha/kernel/sys_wildfire.c b/arch/alpha/kernel/sys_wildfire.c index ca60a387ef0a..d3cb28bb8eb0 100644 --- a/arch/alpha/kernel/sys_wildfire.c +++ b/arch/alpha/kernel/sys_wildfire.c | |||
@@ -183,17 +183,17 @@ wildfire_init_irq_per_pca(int qbbno, int pcano) | |||
183 | for (i = 0; i < 16; ++i) { | 183 | for (i = 0; i < 16; ++i) { |
184 | if (i == 2) | 184 | if (i == 2) |
185 | continue; | 185 | continue; |
186 | set_irq_chip_and_handler(i+irq_bias, &wildfire_irq_type, | 186 | irq_set_chip_and_handler(i + irq_bias, &wildfire_irq_type, |
187 | handle_level_irq); | 187 | handle_level_irq); |
188 | irq_set_status_flags(i + irq_bias, IRQ_LEVEL); | 188 | irq_set_status_flags(i + irq_bias, IRQ_LEVEL); |
189 | } | 189 | } |
190 | 190 | ||
191 | set_irq_chip_and_handler(36+irq_bias, &wildfire_irq_type, | 191 | irq_set_chip_and_handler(36 + irq_bias, &wildfire_irq_type, |
192 | handle_level_irq); | 192 | handle_level_irq); |
193 | irq_set_status_flags(36 + irq_bias, IRQ_LEVEL); | 193 | irq_set_status_flags(36 + irq_bias, IRQ_LEVEL); |
194 | for (i = 40; i < 64; ++i) { | 194 | for (i = 40; i < 64; ++i) { |
195 | set_irq_chip_and_handler(i+irq_bias, &wildfire_irq_type, | 195 | irq_set_chip_and_handler(i + irq_bias, &wildfire_irq_type, |
196 | handle_level_irq); | 196 | handle_level_irq); |
197 | irq_set_status_flags(i + irq_bias, IRQ_LEVEL); | 197 | irq_set_status_flags(i + irq_bias, IRQ_LEVEL); |
198 | } | 198 | } |
199 | 199 | ||
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index 93d595a7477a..7c0effb69fc7 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig | |||
@@ -28,6 +28,7 @@ config ARM | |||
28 | select HAVE_C_RECORDMCOUNT | 28 | select HAVE_C_RECORDMCOUNT |
29 | select HAVE_GENERIC_HARDIRQS | 29 | select HAVE_GENERIC_HARDIRQS |
30 | select HAVE_SPARSE_IRQ | 30 | select HAVE_SPARSE_IRQ |
31 | select GENERIC_IRQ_SHOW | ||
31 | help | 32 | help |
32 | The ARM series is a line of low-power-consumption RISC chip designs | 33 | The ARM series is a line of low-power-consumption RISC chip designs |
33 | licensed by ARM Ltd and targeted at embedded applications and | 34 | licensed by ARM Ltd and targeted at embedded applications and |
@@ -2009,6 +2010,7 @@ menu "Power management options" | |||
2009 | source "kernel/power/Kconfig" | 2010 | source "kernel/power/Kconfig" |
2010 | 2011 | ||
2011 | config ARCH_SUSPEND_POSSIBLE | 2012 | config ARCH_SUSPEND_POSSIBLE |
2013 | depends on !ARCH_S5P64X0 && !ARCH_S5P6442 | ||
2012 | def_bool y | 2014 | def_bool y |
2013 | 2015 | ||
2014 | endmenu | 2016 | endmenu |
diff --git a/arch/arm/common/gic.c b/arch/arm/common/gic.c index cb6b041c39d2..f70ec7dadebb 100644 --- a/arch/arm/common/gic.c +++ b/arch/arm/common/gic.c | |||
@@ -213,8 +213,8 @@ static int gic_set_wake(struct irq_data *d, unsigned int on) | |||
213 | 213 | ||
214 | static void gic_handle_cascade_irq(unsigned int irq, struct irq_desc *desc) | 214 | static void gic_handle_cascade_irq(unsigned int irq, struct irq_desc *desc) |
215 | { | 215 | { |
216 | struct gic_chip_data *chip_data = get_irq_data(irq); | 216 | struct gic_chip_data *chip_data = irq_get_handler_data(irq); |
217 | struct irq_chip *chip = get_irq_chip(irq); | 217 | struct irq_chip *chip = irq_get_chip(irq); |
218 | unsigned int cascade_irq, gic_irq; | 218 | unsigned int cascade_irq, gic_irq; |
219 | unsigned long status; | 219 | unsigned long status; |
220 | 220 | ||
@@ -257,9 +257,9 @@ void __init gic_cascade_irq(unsigned int gic_nr, unsigned int irq) | |||
257 | { | 257 | { |
258 | if (gic_nr >= MAX_GIC_NR) | 258 | if (gic_nr >= MAX_GIC_NR) |
259 | BUG(); | 259 | BUG(); |
260 | if (set_irq_data(irq, &gic_data[gic_nr]) != 0) | 260 | if (irq_set_handler_data(irq, &gic_data[gic_nr]) != 0) |
261 | BUG(); | 261 | BUG(); |
262 | set_irq_chained_handler(irq, gic_handle_cascade_irq); | 262 | irq_set_chained_handler(irq, gic_handle_cascade_irq); |
263 | } | 263 | } |
264 | 264 | ||
265 | static void __init gic_dist_init(struct gic_chip_data *gic, | 265 | static void __init gic_dist_init(struct gic_chip_data *gic, |
@@ -319,9 +319,8 @@ static void __init gic_dist_init(struct gic_chip_data *gic, | |||
319 | * Setup the Linux IRQ subsystem. | 319 | * Setup the Linux IRQ subsystem. |
320 | */ | 320 | */ |
321 | for (i = irq_start; i < irq_limit; i++) { | 321 | for (i = irq_start; i < irq_limit; i++) { |
322 | set_irq_chip(i, &gic_chip); | 322 | irq_set_chip_and_handler(i, &gic_chip, handle_level_irq); |
323 | set_irq_chip_data(i, gic); | 323 | irq_set_chip_data(i, gic); |
324 | set_irq_handler(i, handle_level_irq); | ||
325 | set_irq_flags(i, IRQF_VALID | IRQF_PROBE); | 324 | set_irq_flags(i, IRQF_VALID | IRQF_PROBE); |
326 | } | 325 | } |
327 | 326 | ||
@@ -382,7 +381,7 @@ void __cpuinit gic_enable_ppi(unsigned int irq) | |||
382 | unsigned long flags; | 381 | unsigned long flags; |
383 | 382 | ||
384 | local_irq_save(flags); | 383 | local_irq_save(flags); |
385 | irq_to_desc(irq)->status |= IRQ_NOPROBE; | 384 | irq_set_status_flags(irq, IRQ_NOPROBE); |
386 | gic_unmask_irq(irq_get_irq_data(irq)); | 385 | gic_unmask_irq(irq_get_irq_data(irq)); |
387 | local_irq_restore(flags); | 386 | local_irq_restore(flags); |
388 | } | 387 | } |
diff --git a/arch/arm/common/it8152.c b/arch/arm/common/it8152.c index fcddd48fe9da..7a21927c52e1 100644 --- a/arch/arm/common/it8152.c +++ b/arch/arm/common/it8152.c | |||
@@ -88,8 +88,8 @@ void it8152_init_irq(void) | |||
88 | __raw_writel((0), IT8152_INTC_LDCNIRR); | 88 | __raw_writel((0), IT8152_INTC_LDCNIRR); |
89 | 89 | ||
90 | for (irq = IT8152_IRQ(0); irq <= IT8152_LAST_IRQ; irq++) { | 90 | for (irq = IT8152_IRQ(0); irq <= IT8152_LAST_IRQ; irq++) { |
91 | set_irq_chip(irq, &it8152_irq_chip); | 91 | irq_set_chip_and_handler(irq, &it8152_irq_chip, |
92 | set_irq_handler(irq, handle_level_irq); | 92 | handle_level_irq); |
93 | set_irq_flags(irq, IRQF_VALID | IRQF_PROBE); | 93 | set_irq_flags(irq, IRQF_VALID | IRQF_PROBE); |
94 | } | 94 | } |
95 | } | 95 | } |
diff --git a/arch/arm/common/locomo.c b/arch/arm/common/locomo.c index a026a6bf4892..b55c3625d7ee 100644 --- a/arch/arm/common/locomo.c +++ b/arch/arm/common/locomo.c | |||
@@ -140,7 +140,7 @@ static struct locomo_dev_info locomo_devices[] = { | |||
140 | 140 | ||
141 | static void locomo_handler(unsigned int irq, struct irq_desc *desc) | 141 | static void locomo_handler(unsigned int irq, struct irq_desc *desc) |
142 | { | 142 | { |
143 | struct locomo *lchip = get_irq_chip_data(irq); | 143 | struct locomo *lchip = irq_get_chip_data(irq); |
144 | int req, i; | 144 | int req, i; |
145 | 145 | ||
146 | /* Acknowledge the parent IRQ */ | 146 | /* Acknowledge the parent IRQ */ |
@@ -197,15 +197,14 @@ static void locomo_setup_irq(struct locomo *lchip) | |||
197 | /* | 197 | /* |
198 | * Install handler for IRQ_LOCOMO_HW. | 198 | * Install handler for IRQ_LOCOMO_HW. |
199 | */ | 199 | */ |
200 | set_irq_type(lchip->irq, IRQ_TYPE_EDGE_FALLING); | 200 | irq_set_irq_type(lchip->irq, IRQ_TYPE_EDGE_FALLING); |
201 | set_irq_chip_data(lchip->irq, lchip); | 201 | irq_set_chip_data(lchip->irq, lchip); |
202 | set_irq_chained_handler(lchip->irq, locomo_handler); | 202 | irq_set_chained_handler(lchip->irq, locomo_handler); |
203 | 203 | ||
204 | /* Install handlers for IRQ_LOCOMO_* */ | 204 | /* Install handlers for IRQ_LOCOMO_* */ |
205 | for ( ; irq <= lchip->irq_base + 3; irq++) { | 205 | for ( ; irq <= lchip->irq_base + 3; irq++) { |
206 | set_irq_chip(irq, &locomo_chip); | 206 | irq_set_chip_and_handler(irq, &locomo_chip, handle_level_irq); |
207 | set_irq_chip_data(irq, lchip); | 207 | irq_set_chip_data(irq, lchip); |
208 | set_irq_handler(irq, handle_level_irq); | ||
209 | set_irq_flags(irq, IRQF_VALID | IRQF_PROBE); | 208 | set_irq_flags(irq, IRQF_VALID | IRQF_PROBE); |
210 | } | 209 | } |
211 | } | 210 | } |
@@ -476,8 +475,8 @@ static void __locomo_remove(struct locomo *lchip) | |||
476 | device_for_each_child(lchip->dev, NULL, locomo_remove_child); | 475 | device_for_each_child(lchip->dev, NULL, locomo_remove_child); |
477 | 476 | ||
478 | if (lchip->irq != NO_IRQ) { | 477 | if (lchip->irq != NO_IRQ) { |
479 | set_irq_chained_handler(lchip->irq, NULL); | 478 | irq_set_chained_handler(lchip->irq, NULL); |
480 | set_irq_data(lchip->irq, NULL); | 479 | irq_set_handler_data(lchip->irq, NULL); |
481 | } | 480 | } |
482 | 481 | ||
483 | iounmap(lchip->base); | 482 | iounmap(lchip->base); |
diff --git a/arch/arm/common/sa1111.c b/arch/arm/common/sa1111.c index eb9796b0dab2..a12b33c0dc42 100644 --- a/arch/arm/common/sa1111.c +++ b/arch/arm/common/sa1111.c | |||
@@ -202,7 +202,7 @@ static void | |||
202 | sa1111_irq_handler(unsigned int irq, struct irq_desc *desc) | 202 | sa1111_irq_handler(unsigned int irq, struct irq_desc *desc) |
203 | { | 203 | { |
204 | unsigned int stat0, stat1, i; | 204 | unsigned int stat0, stat1, i; |
205 | struct sa1111 *sachip = get_irq_data(irq); | 205 | struct sa1111 *sachip = irq_get_handler_data(irq); |
206 | void __iomem *mapbase = sachip->base + SA1111_INTC; | 206 | void __iomem *mapbase = sachip->base + SA1111_INTC; |
207 | 207 | ||
208 | stat0 = sa1111_readl(mapbase + SA1111_INTSTATCLR0); | 208 | stat0 = sa1111_readl(mapbase + SA1111_INTSTATCLR0); |
@@ -472,25 +472,25 @@ static void sa1111_setup_irq(struct sa1111 *sachip) | |||
472 | sa1111_writel(~0, irqbase + SA1111_INTSTATCLR1); | 472 | sa1111_writel(~0, irqbase + SA1111_INTSTATCLR1); |
473 | 473 | ||
474 | for (irq = IRQ_GPAIN0; irq <= SSPROR; irq++) { | 474 | for (irq = IRQ_GPAIN0; irq <= SSPROR; irq++) { |
475 | set_irq_chip(irq, &sa1111_low_chip); | 475 | irq_set_chip_and_handler(irq, &sa1111_low_chip, |
476 | set_irq_chip_data(irq, sachip); | 476 | handle_edge_irq); |
477 | set_irq_handler(irq, handle_edge_irq); | 477 | irq_set_chip_data(irq, sachip); |
478 | set_irq_flags(irq, IRQF_VALID | IRQF_PROBE); | 478 | set_irq_flags(irq, IRQF_VALID | IRQF_PROBE); |
479 | } | 479 | } |
480 | 480 | ||
481 | for (irq = AUDXMTDMADONEA; irq <= IRQ_S1_BVD1_STSCHG; irq++) { | 481 | for (irq = AUDXMTDMADONEA; irq <= IRQ_S1_BVD1_STSCHG; irq++) { |
482 | set_irq_chip(irq, &sa1111_high_chip); | 482 | irq_set_chip_and_handler(irq, &sa1111_high_chip, |
483 | set_irq_chip_data(irq, sachip); | 483 | handle_edge_irq); |
484 | set_irq_handler(irq, handle_edge_irq); | 484 | irq_set_chip_data(irq, sachip); |
485 | set_irq_flags(irq, IRQF_VALID | IRQF_PROBE); | 485 | set_irq_flags(irq, IRQF_VALID | IRQF_PROBE); |
486 | } | 486 | } |
487 | 487 | ||
488 | /* | 488 | /* |
489 | * Register SA1111 interrupt | 489 | * Register SA1111 interrupt |
490 | */ | 490 | */ |
491 | set_irq_type(sachip->irq, IRQ_TYPE_EDGE_RISING); | 491 | irq_set_irq_type(sachip->irq, IRQ_TYPE_EDGE_RISING); |
492 | set_irq_data(sachip->irq, sachip); | 492 | irq_set_handler_data(sachip->irq, sachip); |
493 | set_irq_chained_handler(sachip->irq, sa1111_irq_handler); | 493 | irq_set_chained_handler(sachip->irq, sa1111_irq_handler); |
494 | } | 494 | } |
495 | 495 | ||
496 | /* | 496 | /* |
@@ -815,8 +815,8 @@ static void __sa1111_remove(struct sa1111 *sachip) | |||
815 | clk_disable(sachip->clk); | 815 | clk_disable(sachip->clk); |
816 | 816 | ||
817 | if (sachip->irq != NO_IRQ) { | 817 | if (sachip->irq != NO_IRQ) { |
818 | set_irq_chained_handler(sachip->irq, NULL); | 818 | irq_set_chained_handler(sachip->irq, NULL); |
819 | set_irq_data(sachip->irq, NULL); | 819 | irq_set_handler_data(sachip->irq, NULL); |
820 | 820 | ||
821 | release_mem_region(sachip->phys + SA1111_INTC, 512); | 821 | release_mem_region(sachip->phys + SA1111_INTC, 512); |
822 | } | 822 | } |
diff --git a/arch/arm/common/vic.c b/arch/arm/common/vic.c index ae5fe7292e0d..113085a77123 100644 --- a/arch/arm/common/vic.c +++ b/arch/arm/common/vic.c | |||
@@ -305,9 +305,9 @@ static void __init vic_set_irq_sources(void __iomem *base, | |||
305 | if (vic_sources & (1 << i)) { | 305 | if (vic_sources & (1 << i)) { |
306 | unsigned int irq = irq_start + i; | 306 | unsigned int irq = irq_start + i; |
307 | 307 | ||
308 | set_irq_chip(irq, &vic_chip); | 308 | irq_set_chip_and_handler(irq, &vic_chip, |
309 | set_irq_chip_data(irq, base); | 309 | handle_level_irq); |
310 | set_irq_handler(irq, handle_level_irq); | 310 | irq_set_chip_data(irq, base); |
311 | set_irq_flags(irq, IRQF_VALID | IRQF_PROBE); | 311 | set_irq_flags(irq, IRQF_VALID | IRQF_PROBE); |
312 | } | 312 | } |
313 | } | 313 | } |
diff --git a/arch/arm/include/asm/hw_irq.h b/arch/arm/include/asm/hw_irq.h index 5586b7c8ef6f..a71b417b1856 100644 --- a/arch/arm/include/asm/hw_irq.h +++ b/arch/arm/include/asm/hw_irq.h | |||
@@ -10,14 +10,6 @@ static inline void ack_bad_irq(int irq) | |||
10 | irq_err_count++; | 10 | irq_err_count++; |
11 | } | 11 | } |
12 | 12 | ||
13 | /* | ||
14 | * Obsolete inline function for calling irq descriptor handlers. | ||
15 | */ | ||
16 | static inline void desc_handle_irq(unsigned int irq, struct irq_desc *desc) | ||
17 | { | ||
18 | desc->handle_irq(irq, desc); | ||
19 | } | ||
20 | |||
21 | void set_irq_flags(unsigned int irq, unsigned int flags); | 13 | void set_irq_flags(unsigned int irq, unsigned int flags); |
22 | 14 | ||
23 | #define IRQF_VALID (1 << 0) | 15 | #define IRQF_VALID (1 << 0) |
diff --git a/arch/arm/kernel/bios32.c b/arch/arm/kernel/bios32.c index d86fcd44b220..e4ee050aad7d 100644 --- a/arch/arm/kernel/bios32.c +++ b/arch/arm/kernel/bios32.c | |||
@@ -159,31 +159,6 @@ static void __devinit pci_fixup_dec21285(struct pci_dev *dev) | |||
159 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_DEC_21285, pci_fixup_dec21285); | 159 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_DEC_21285, pci_fixup_dec21285); |
160 | 160 | ||
161 | /* | 161 | /* |
162 | * Same as above. The PrPMC800 carrier board for the PrPMC1100 | ||
163 | * card maps the host-bridge @ 00:01:00 for some reason and it | ||
164 | * ends up getting scanned. Note that we only want to do this | ||
165 | * fixup when we find the IXP4xx on a PrPMC system, which is why | ||
166 | * we check the machine type. We could be running on a board | ||
167 | * with an IXP4xx target device and we don't want to kill the | ||
168 | * resources in that case. | ||
169 | */ | ||
170 | static void __devinit pci_fixup_prpmc1100(struct pci_dev *dev) | ||
171 | { | ||
172 | int i; | ||
173 | |||
174 | if (machine_is_prpmc1100()) { | ||
175 | dev->class &= 0xff; | ||
176 | dev->class |= PCI_CLASS_BRIDGE_HOST << 8; | ||
177 | for (i = 0; i < PCI_NUM_RESOURCES; i++) { | ||
178 | dev->resource[i].start = 0; | ||
179 | dev->resource[i].end = 0; | ||
180 | dev->resource[i].flags = 0; | ||
181 | } | ||
182 | } | ||
183 | } | ||
184 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IXP4XX, pci_fixup_prpmc1100); | ||
185 | |||
186 | /* | ||
187 | * PCI IDE controllers use non-standard I/O port decoding, respect it. | 162 | * PCI IDE controllers use non-standard I/O port decoding, respect it. |
188 | */ | 163 | */ |
189 | static void __devinit pci_fixup_ide_bases(struct pci_dev *dev) | 164 | static void __devinit pci_fixup_ide_bases(struct pci_dev *dev) |
diff --git a/arch/arm/kernel/ecard.c b/arch/arm/kernel/ecard.c index 2ad62df37730..d16500110ee9 100644 --- a/arch/arm/kernel/ecard.c +++ b/arch/arm/kernel/ecard.c | |||
@@ -1043,8 +1043,8 @@ ecard_probe(int slot, card_type_t type) | |||
1043 | */ | 1043 | */ |
1044 | if (slot < 8) { | 1044 | if (slot < 8) { |
1045 | ec->irq = 32 + slot; | 1045 | ec->irq = 32 + slot; |
1046 | set_irq_chip(ec->irq, &ecard_chip); | 1046 | irq_set_chip_and_handler(ec->irq, &ecard_chip, |
1047 | set_irq_handler(ec->irq, handle_level_irq); | 1047 | handle_level_irq); |
1048 | set_irq_flags(ec->irq, IRQF_VALID); | 1048 | set_irq_flags(ec->irq, IRQF_VALID); |
1049 | } | 1049 | } |
1050 | 1050 | ||
@@ -1103,7 +1103,7 @@ static int __init ecard_init(void) | |||
1103 | 1103 | ||
1104 | irqhw = ecard_probeirqhw(); | 1104 | irqhw = ecard_probeirqhw(); |
1105 | 1105 | ||
1106 | set_irq_chained_handler(IRQ_EXPANSIONCARD, | 1106 | irq_set_chained_handler(IRQ_EXPANSIONCARD, |
1107 | irqhw ? ecard_irqexp_handler : ecard_irq_handler); | 1107 | irqhw ? ecard_irqexp_handler : ecard_irq_handler); |
1108 | 1108 | ||
1109 | ecard_proc_init(); | 1109 | ecard_proc_init(); |
diff --git a/arch/arm/kernel/irq.c b/arch/arm/kernel/irq.c index 3535d3793e65..83bbad03fcc6 100644 --- a/arch/arm/kernel/irq.c +++ b/arch/arm/kernel/irq.c | |||
@@ -51,63 +51,18 @@ | |||
51 | 51 | ||
52 | unsigned long irq_err_count; | 52 | unsigned long irq_err_count; |
53 | 53 | ||
54 | int show_interrupts(struct seq_file *p, void *v) | 54 | int arch_show_interrupts(struct seq_file *p, int prec) |
55 | { | 55 | { |
56 | int i = *(loff_t *) v, cpu; | ||
57 | struct irq_desc *desc; | ||
58 | struct irqaction * action; | ||
59 | unsigned long flags; | ||
60 | int prec, n; | ||
61 | |||
62 | for (prec = 3, n = 1000; prec < 10 && n <= nr_irqs; prec++) | ||
63 | n *= 10; | ||
64 | |||
65 | #ifdef CONFIG_SMP | ||
66 | if (prec < 4) | ||
67 | prec = 4; | ||
68 | #endif | ||
69 | |||
70 | if (i == 0) { | ||
71 | char cpuname[12]; | ||
72 | |||
73 | seq_printf(p, "%*s ", prec, ""); | ||
74 | for_each_present_cpu(cpu) { | ||
75 | sprintf(cpuname, "CPU%d", cpu); | ||
76 | seq_printf(p, " %10s", cpuname); | ||
77 | } | ||
78 | seq_putc(p, '\n'); | ||
79 | } | ||
80 | |||
81 | if (i < nr_irqs) { | ||
82 | desc = irq_to_desc(i); | ||
83 | raw_spin_lock_irqsave(&desc->lock, flags); | ||
84 | action = desc->action; | ||
85 | if (!action) | ||
86 | goto unlock; | ||
87 | |||
88 | seq_printf(p, "%*d: ", prec, i); | ||
89 | for_each_present_cpu(cpu) | ||
90 | seq_printf(p, "%10u ", kstat_irqs_cpu(i, cpu)); | ||
91 | seq_printf(p, " %10s", desc->irq_data.chip->name ? : "-"); | ||
92 | seq_printf(p, " %s", action->name); | ||
93 | for (action = action->next; action; action = action->next) | ||
94 | seq_printf(p, ", %s", action->name); | ||
95 | |||
96 | seq_putc(p, '\n'); | ||
97 | unlock: | ||
98 | raw_spin_unlock_irqrestore(&desc->lock, flags); | ||
99 | } else if (i == nr_irqs) { | ||
100 | #ifdef CONFIG_FIQ | 56 | #ifdef CONFIG_FIQ |
101 | show_fiq_list(p, prec); | 57 | show_fiq_list(p, prec); |
102 | #endif | 58 | #endif |
103 | #ifdef CONFIG_SMP | 59 | #ifdef CONFIG_SMP |
104 | show_ipi_list(p, prec); | 60 | show_ipi_list(p, prec); |
105 | #endif | 61 | #endif |
106 | #ifdef CONFIG_LOCAL_TIMERS | 62 | #ifdef CONFIG_LOCAL_TIMERS |
107 | show_local_irqs(p, prec); | 63 | show_local_irqs(p, prec); |
108 | #endif | 64 | #endif |
109 | seq_printf(p, "%*s: %10lu\n", prec, "Err", irq_err_count); | 65 | seq_printf(p, "%*s: %10lu\n", prec, "Err", irq_err_count); |
110 | } | ||
111 | return 0; | 66 | return 0; |
112 | } | 67 | } |
113 | 68 | ||
@@ -144,24 +99,21 @@ asm_do_IRQ(unsigned int irq, struct pt_regs *regs) | |||
144 | 99 | ||
145 | void set_irq_flags(unsigned int irq, unsigned int iflags) | 100 | void set_irq_flags(unsigned int irq, unsigned int iflags) |
146 | { | 101 | { |
147 | struct irq_desc *desc; | 102 | unsigned long clr = 0, set = IRQ_NOREQUEST | IRQ_NOPROBE | IRQ_NOAUTOEN; |
148 | unsigned long flags; | ||
149 | 103 | ||
150 | if (irq >= nr_irqs) { | 104 | if (irq >= nr_irqs) { |
151 | printk(KERN_ERR "Trying to set irq flags for IRQ%d\n", irq); | 105 | printk(KERN_ERR "Trying to set irq flags for IRQ%d\n", irq); |
152 | return; | 106 | return; |
153 | } | 107 | } |
154 | 108 | ||
155 | desc = irq_to_desc(irq); | ||
156 | raw_spin_lock_irqsave(&desc->lock, flags); | ||
157 | desc->status |= IRQ_NOREQUEST | IRQ_NOPROBE | IRQ_NOAUTOEN; | ||
158 | if (iflags & IRQF_VALID) | 109 | if (iflags & IRQF_VALID) |
159 | desc->status &= ~IRQ_NOREQUEST; | 110 | clr |= IRQ_NOREQUEST; |
160 | if (iflags & IRQF_PROBE) | 111 | if (iflags & IRQF_PROBE) |
161 | desc->status &= ~IRQ_NOPROBE; | 112 | clr |= IRQ_NOPROBE; |
162 | if (!(iflags & IRQF_NOAUTOEN)) | 113 | if (!(iflags & IRQF_NOAUTOEN)) |
163 | desc->status &= ~IRQ_NOAUTOEN; | 114 | clr |= IRQ_NOAUTOEN; |
164 | raw_spin_unlock_irqrestore(&desc->lock, flags); | 115 | /* Order is clear bits in "clr" then set bits in "set" */ |
116 | irq_modify_status(irq, clr, set & ~clr); | ||
165 | } | 117 | } |
166 | 118 | ||
167 | void __init init_IRQ(void) | 119 | void __init init_IRQ(void) |
diff --git a/arch/arm/mach-at91/at91cap9_devices.c b/arch/arm/mach-at91/at91cap9_devices.c index d1f775e86353..9ffbf3a2dfea 100644 --- a/arch/arm/mach-at91/at91cap9_devices.c +++ b/arch/arm/mach-at91/at91cap9_devices.c | |||
@@ -72,7 +72,7 @@ void __init at91_add_device_usbh(struct at91_usbh_data *data) | |||
72 | return; | 72 | return; |
73 | 73 | ||
74 | if (cpu_is_at91cap9_revB()) | 74 | if (cpu_is_at91cap9_revB()) |
75 | set_irq_type(AT91CAP9_ID_UHP, IRQ_TYPE_LEVEL_HIGH); | 75 | irq_set_irq_type(AT91CAP9_ID_UHP, IRQ_TYPE_LEVEL_HIGH); |
76 | 76 | ||
77 | /* Enable VBus control for UHP ports */ | 77 | /* Enable VBus control for UHP ports */ |
78 | for (i = 0; i < data->ports; i++) { | 78 | for (i = 0; i < data->ports; i++) { |
@@ -157,7 +157,7 @@ static struct platform_device at91_usba_udc_device = { | |||
157 | void __init at91_add_device_usba(struct usba_platform_data *data) | 157 | void __init at91_add_device_usba(struct usba_platform_data *data) |
158 | { | 158 | { |
159 | if (cpu_is_at91cap9_revB()) { | 159 | if (cpu_is_at91cap9_revB()) { |
160 | set_irq_type(AT91CAP9_ID_UDPHS, IRQ_TYPE_LEVEL_HIGH); | 160 | irq_set_irq_type(AT91CAP9_ID_UDPHS, IRQ_TYPE_LEVEL_HIGH); |
161 | at91_sys_write(AT91_MATRIX_UDPHS, AT91_MATRIX_SELECT_UDPHS | | 161 | at91_sys_write(AT91_MATRIX_UDPHS, AT91_MATRIX_SELECT_UDPHS | |
162 | AT91_MATRIX_UDPHS_BYPASS_LOCK); | 162 | AT91_MATRIX_UDPHS_BYPASS_LOCK); |
163 | } | 163 | } |
@@ -861,7 +861,7 @@ void __init at91_add_device_lcdc(struct atmel_lcdfb_info *data) | |||
861 | return; | 861 | return; |
862 | 862 | ||
863 | if (cpu_is_at91cap9_revB()) | 863 | if (cpu_is_at91cap9_revB()) |
864 | set_irq_type(AT91CAP9_ID_LCDC, IRQ_TYPE_LEVEL_HIGH); | 864 | irq_set_irq_type(AT91CAP9_ID_LCDC, IRQ_TYPE_LEVEL_HIGH); |
865 | 865 | ||
866 | at91_set_A_periph(AT91_PIN_PC1, 0); /* LCDHSYNC */ | 866 | at91_set_A_periph(AT91_PIN_PC1, 0); /* LCDHSYNC */ |
867 | at91_set_A_periph(AT91_PIN_PC2, 0); /* LCDDOTCK */ | 867 | at91_set_A_periph(AT91_PIN_PC2, 0); /* LCDDOTCK */ |
diff --git a/arch/arm/mach-at91/gpio.c b/arch/arm/mach-at91/gpio.c index af818a21587c..4615528205c8 100644 --- a/arch/arm/mach-at91/gpio.c +++ b/arch/arm/mach-at91/gpio.c | |||
@@ -287,7 +287,7 @@ static int gpio_irq_set_wake(struct irq_data *d, unsigned state) | |||
287 | else | 287 | else |
288 | wakeups[bank] &= ~mask; | 288 | wakeups[bank] &= ~mask; |
289 | 289 | ||
290 | set_irq_wake(gpio_chip[bank].bank->id, state); | 290 | irq_set_irq_wake(gpio_chip[bank].bank->id, state); |
291 | 291 | ||
292 | return 0; | 292 | return 0; |
293 | } | 293 | } |
@@ -375,6 +375,7 @@ static int gpio_irq_type(struct irq_data *d, unsigned type) | |||
375 | 375 | ||
376 | static struct irq_chip gpio_irqchip = { | 376 | static struct irq_chip gpio_irqchip = { |
377 | .name = "GPIO", | 377 | .name = "GPIO", |
378 | .irq_disable = gpio_irq_mask, | ||
378 | .irq_mask = gpio_irq_mask, | 379 | .irq_mask = gpio_irq_mask, |
379 | .irq_unmask = gpio_irq_unmask, | 380 | .irq_unmask = gpio_irq_unmask, |
380 | .irq_set_type = gpio_irq_type, | 381 | .irq_set_type = gpio_irq_type, |
@@ -384,16 +385,14 @@ static struct irq_chip gpio_irqchip = { | |||
384 | static void gpio_irq_handler(unsigned irq, struct irq_desc *desc) | 385 | static void gpio_irq_handler(unsigned irq, struct irq_desc *desc) |
385 | { | 386 | { |
386 | unsigned pin; | 387 | unsigned pin; |
387 | struct irq_desc *gpio; | 388 | struct irq_data *idata = irq_desc_get_irq_data(desc); |
388 | struct at91_gpio_chip *at91_gpio; | 389 | struct irq_chip *chip = irq_data_get_irq_chip(idata); |
389 | void __iomem *pio; | 390 | struct at91_gpio_chip *at91_gpio = irq_data_get_irq_chip_data(idata); |
391 | void __iomem *pio = at91_gpio->regbase; | ||
390 | u32 isr; | 392 | u32 isr; |
391 | 393 | ||
392 | at91_gpio = get_irq_chip_data(irq); | ||
393 | pio = at91_gpio->regbase; | ||
394 | |||
395 | /* temporarily mask (level sensitive) parent IRQ */ | 394 | /* temporarily mask (level sensitive) parent IRQ */ |
396 | desc->irq_data.chip->irq_ack(&desc->irq_data); | 395 | chip->irq_ack(idata); |
397 | for (;;) { | 396 | for (;;) { |
398 | /* Reading ISR acks pending (edge triggered) GPIO interrupts. | 397 | /* Reading ISR acks pending (edge triggered) GPIO interrupts. |
399 | * When there none are pending, we're finished unless we need | 398 | * When there none are pending, we're finished unless we need |
@@ -409,27 +408,15 @@ static void gpio_irq_handler(unsigned irq, struct irq_desc *desc) | |||
409 | } | 408 | } |
410 | 409 | ||
411 | pin = at91_gpio->chip.base; | 410 | pin = at91_gpio->chip.base; |
412 | gpio = &irq_desc[pin]; | ||
413 | 411 | ||
414 | while (isr) { | 412 | while (isr) { |
415 | if (isr & 1) { | 413 | if (isr & 1) |
416 | if (unlikely(gpio->depth)) { | 414 | generic_handle_irq(pin); |
417 | /* | ||
418 | * The core ARM interrupt handler lazily disables IRQs so | ||
419 | * another IRQ must be generated before it actually gets | ||
420 | * here to be disabled on the GPIO controller. | ||
421 | */ | ||
422 | gpio_irq_mask(irq_get_irq_data(pin)); | ||
423 | } | ||
424 | else | ||
425 | generic_handle_irq(pin); | ||
426 | } | ||
427 | pin++; | 415 | pin++; |
428 | gpio++; | ||
429 | isr >>= 1; | 416 | isr >>= 1; |
430 | } | 417 | } |
431 | } | 418 | } |
432 | desc->irq_data.chip->irq_unmask(&desc->irq_data); | 419 | chip->irq_unmask(idata); |
433 | /* now it may re-trigger */ | 420 | /* now it may re-trigger */ |
434 | } | 421 | } |
435 | 422 | ||
@@ -518,14 +505,14 @@ void __init at91_gpio_irq_setup(void) | |||
518 | __raw_writel(~0, this->regbase + PIO_IDR); | 505 | __raw_writel(~0, this->regbase + PIO_IDR); |
519 | 506 | ||
520 | for (i = 0, pin = this->chip.base; i < 32; i++, pin++) { | 507 | for (i = 0, pin = this->chip.base; i < 32; i++, pin++) { |
521 | lockdep_set_class(&irq_desc[pin].lock, &gpio_lock_class); | 508 | irq_set_lockdep_class(pin, &gpio_lock_class); |
522 | 509 | ||
523 | /* | 510 | /* |
524 | * Can use the "simple" and not "edge" handler since it's | 511 | * Can use the "simple" and not "edge" handler since it's |
525 | * shorter, and the AIC handles interrupts sanely. | 512 | * shorter, and the AIC handles interrupts sanely. |
526 | */ | 513 | */ |
527 | set_irq_chip(pin, &gpio_irqchip); | 514 | irq_set_chip_and_handler(pin, &gpio_irqchip, |
528 | set_irq_handler(pin, handle_simple_irq); | 515 | handle_simple_irq); |
529 | set_irq_flags(pin, IRQF_VALID); | 516 | set_irq_flags(pin, IRQF_VALID); |
530 | } | 517 | } |
531 | 518 | ||
@@ -536,8 +523,8 @@ void __init at91_gpio_irq_setup(void) | |||
536 | if (prev && prev->next == this) | 523 | if (prev && prev->next == this) |
537 | continue; | 524 | continue; |
538 | 525 | ||
539 | set_irq_chip_data(id, this); | 526 | irq_set_chip_data(id, this); |
540 | set_irq_chained_handler(id, gpio_irq_handler); | 527 | irq_set_chained_handler(id, gpio_irq_handler); |
541 | } | 528 | } |
542 | pr_info("AT91: %d gpio irqs in %d banks\n", pin - PIN_BASE, gpio_banks); | 529 | pr_info("AT91: %d gpio irqs in %d banks\n", pin - PIN_BASE, gpio_banks); |
543 | } | 530 | } |
diff --git a/arch/arm/mach-at91/include/mach/at572d940hf.h b/arch/arm/mach-at91/include/mach/at572d940hf.h index 2d9b0af9c4d5..be510cfc56be 100644 --- a/arch/arm/mach-at91/include/mach/at572d940hf.h +++ b/arch/arm/mach-at91/include/mach/at572d940hf.h | |||
@@ -89,7 +89,7 @@ | |||
89 | /* | 89 | /* |
90 | * System Peripherals (offset from AT91_BASE_SYS) | 90 | * System Peripherals (offset from AT91_BASE_SYS) |
91 | */ | 91 | */ |
92 | #define AT91_SDRAMC (0xffffea00 - AT91_BASE_SYS) | 92 | #define AT91_SDRAMC0 (0xffffea00 - AT91_BASE_SYS) |
93 | #define AT91_SMC (0xffffec00 - AT91_BASE_SYS) | 93 | #define AT91_SMC (0xffffec00 - AT91_BASE_SYS) |
94 | #define AT91_MATRIX (0xffffee00 - AT91_BASE_SYS) | 94 | #define AT91_MATRIX (0xffffee00 - AT91_BASE_SYS) |
95 | #define AT91_AIC (0xfffff000 - AT91_BASE_SYS) | 95 | #define AT91_AIC (0xfffff000 - AT91_BASE_SYS) |
diff --git a/arch/arm/mach-at91/irq.c b/arch/arm/mach-at91/irq.c index b56d6b3a4087..9665265ec757 100644 --- a/arch/arm/mach-at91/irq.c +++ b/arch/arm/mach-at91/irq.c | |||
@@ -143,8 +143,7 @@ void __init at91_aic_init(unsigned int priority[NR_AIC_IRQS]) | |||
143 | /* Active Low interrupt, with the specified priority */ | 143 | /* Active Low interrupt, with the specified priority */ |
144 | at91_sys_write(AT91_AIC_SMR(i), AT91_AIC_SRCTYPE_LOW | priority[i]); | 144 | at91_sys_write(AT91_AIC_SMR(i), AT91_AIC_SRCTYPE_LOW | priority[i]); |
145 | 145 | ||
146 | set_irq_chip(i, &at91_aic_chip); | 146 | irq_set_chip_and_handler(i, &at91_aic_chip, handle_level_irq); |
147 | set_irq_handler(i, handle_level_irq); | ||
148 | set_irq_flags(i, IRQF_VALID | IRQF_PROBE); | 147 | set_irq_flags(i, IRQF_VALID | IRQF_PROBE); |
149 | 148 | ||
150 | /* Perform 8 End Of Interrupt Command to make sure AIC will not Lock out nIRQ */ | 149 | /* Perform 8 End Of Interrupt Command to make sure AIC will not Lock out nIRQ */ |
diff --git a/arch/arm/mach-bcmring/irq.c b/arch/arm/mach-bcmring/irq.c index 84dcda0d1d9a..c48feaf4e8e9 100644 --- a/arch/arm/mach-bcmring/irq.c +++ b/arch/arm/mach-bcmring/irq.c | |||
@@ -93,11 +93,11 @@ static void vic_init(void __iomem *base, struct irq_chip *chip, | |||
93 | unsigned int i; | 93 | unsigned int i; |
94 | for (i = 0; i < 32; i++) { | 94 | for (i = 0; i < 32; i++) { |
95 | unsigned int irq = irq_start + i; | 95 | unsigned int irq = irq_start + i; |
96 | set_irq_chip(irq, chip); | 96 | irq_set_chip(irq, chip); |
97 | set_irq_chip_data(irq, base); | 97 | irq_set_chip_data(irq, base); |
98 | 98 | ||
99 | if (vic_sources & (1 << i)) { | 99 | if (vic_sources & (1 << i)) { |
100 | set_irq_handler(irq, handle_level_irq); | 100 | irq_set_handler(irq, handle_level_irq); |
101 | set_irq_flags(irq, IRQF_VALID | IRQF_PROBE); | 101 | set_irq_flags(irq, IRQF_VALID | IRQF_PROBE); |
102 | } | 102 | } |
103 | } | 103 | } |
@@ -119,9 +119,9 @@ void __init bcmring_init_irq(void) | |||
119 | 119 | ||
120 | /* special cases */ | 120 | /* special cases */ |
121 | if (INTCHW_INTC1_GPIO0 & IRQ_INTC1_VALID_MASK) { | 121 | if (INTCHW_INTC1_GPIO0 & IRQ_INTC1_VALID_MASK) { |
122 | set_irq_handler(IRQ_GPIO0, handle_simple_irq); | 122 | irq_set_handler(IRQ_GPIO0, handle_simple_irq); |
123 | } | 123 | } |
124 | if (INTCHW_INTC1_GPIO1 & IRQ_INTC1_VALID_MASK) { | 124 | if (INTCHW_INTC1_GPIO1 & IRQ_INTC1_VALID_MASK) { |
125 | set_irq_handler(IRQ_GPIO1, handle_simple_irq); | 125 | irq_set_handler(IRQ_GPIO1, handle_simple_irq); |
126 | } | 126 | } |
127 | } | 127 | } |
diff --git a/arch/arm/mach-clps711x/irq.c b/arch/arm/mach-clps711x/irq.c index 86da7a1b2bbe..c2eceee645e3 100644 --- a/arch/arm/mach-clps711x/irq.c +++ b/arch/arm/mach-clps711x/irq.c | |||
@@ -112,13 +112,13 @@ void __init clps711x_init_irq(void) | |||
112 | 112 | ||
113 | for (i = 0; i < NR_IRQS; i++) { | 113 | for (i = 0; i < NR_IRQS; i++) { |
114 | if (INT1_IRQS & (1 << i)) { | 114 | if (INT1_IRQS & (1 << i)) { |
115 | set_irq_handler(i, handle_level_irq); | 115 | irq_set_chip_and_handler(i, &int1_chip, |
116 | set_irq_chip(i, &int1_chip); | 116 | handle_level_irq); |
117 | set_irq_flags(i, IRQF_VALID | IRQF_PROBE); | 117 | set_irq_flags(i, IRQF_VALID | IRQF_PROBE); |
118 | } | 118 | } |
119 | if (INT2_IRQS & (1 << i)) { | 119 | if (INT2_IRQS & (1 << i)) { |
120 | set_irq_handler(i, handle_level_irq); | 120 | irq_set_chip_and_handler(i, &int2_chip, |
121 | set_irq_chip(i, &int2_chip); | 121 | handle_level_irq); |
122 | set_irq_flags(i, IRQF_VALID | IRQF_PROBE); | 122 | set_irq_flags(i, IRQF_VALID | IRQF_PROBE); |
123 | } | 123 | } |
124 | } | 124 | } |
diff --git a/arch/arm/mach-davinci/cp_intc.c b/arch/arm/mach-davinci/cp_intc.c index 9abc80a86a22..f83152d643c5 100644 --- a/arch/arm/mach-davinci/cp_intc.c +++ b/arch/arm/mach-davinci/cp_intc.c | |||
@@ -167,9 +167,9 @@ void __init cp_intc_init(void) | |||
167 | 167 | ||
168 | /* Set up genirq dispatching for cp_intc */ | 168 | /* Set up genirq dispatching for cp_intc */ |
169 | for (i = 0; i < num_irq; i++) { | 169 | for (i = 0; i < num_irq; i++) { |
170 | set_irq_chip(i, &cp_intc_irq_chip); | 170 | irq_set_chip(i, &cp_intc_irq_chip); |
171 | set_irq_flags(i, IRQF_VALID | IRQF_PROBE); | 171 | set_irq_flags(i, IRQF_VALID | IRQF_PROBE); |
172 | set_irq_handler(i, handle_edge_irq); | 172 | irq_set_handler(i, handle_edge_irq); |
173 | } | 173 | } |
174 | 174 | ||
175 | /* Enable global interrupt */ | 175 | /* Enable global interrupt */ |
diff --git a/arch/arm/mach-davinci/gpio.c b/arch/arm/mach-davinci/gpio.c index 20d66e5e4663..a0b838894ac9 100644 --- a/arch/arm/mach-davinci/gpio.c +++ b/arch/arm/mach-davinci/gpio.c | |||
@@ -62,7 +62,7 @@ static inline struct davinci_gpio_regs __iomem *irq2regs(int irq) | |||
62 | { | 62 | { |
63 | struct davinci_gpio_regs __iomem *g; | 63 | struct davinci_gpio_regs __iomem *g; |
64 | 64 | ||
65 | g = (__force struct davinci_gpio_regs __iomem *)get_irq_chip_data(irq); | 65 | g = (__force struct davinci_gpio_regs __iomem *)irq_get_chip_data(irq); |
66 | 66 | ||
67 | return g; | 67 | return g; |
68 | } | 68 | } |
@@ -208,7 +208,7 @@ pure_initcall(davinci_gpio_setup); | |||
208 | static void gpio_irq_disable(struct irq_data *d) | 208 | static void gpio_irq_disable(struct irq_data *d) |
209 | { | 209 | { |
210 | struct davinci_gpio_regs __iomem *g = irq2regs(d->irq); | 210 | struct davinci_gpio_regs __iomem *g = irq2regs(d->irq); |
211 | u32 mask = (u32) irq_data_get_irq_data(d); | 211 | u32 mask = (u32) irq_data_get_irq_handler_data(d); |
212 | 212 | ||
213 | __raw_writel(mask, &g->clr_falling); | 213 | __raw_writel(mask, &g->clr_falling); |
214 | __raw_writel(mask, &g->clr_rising); | 214 | __raw_writel(mask, &g->clr_rising); |
@@ -217,8 +217,8 @@ static void gpio_irq_disable(struct irq_data *d) | |||
217 | static void gpio_irq_enable(struct irq_data *d) | 217 | static void gpio_irq_enable(struct irq_data *d) |
218 | { | 218 | { |
219 | struct davinci_gpio_regs __iomem *g = irq2regs(d->irq); | 219 | struct davinci_gpio_regs __iomem *g = irq2regs(d->irq); |
220 | u32 mask = (u32) irq_data_get_irq_data(d); | 220 | u32 mask = (u32) irq_data_get_irq_handler_data(d); |
221 | unsigned status = irq_desc[d->irq].status; | 221 | unsigned status = irqd_get_trigger_type(d); |
222 | 222 | ||
223 | status &= IRQ_TYPE_EDGE_FALLING | IRQ_TYPE_EDGE_RISING; | 223 | status &= IRQ_TYPE_EDGE_FALLING | IRQ_TYPE_EDGE_RISING; |
224 | if (!status) | 224 | if (!status) |
@@ -233,21 +233,11 @@ static void gpio_irq_enable(struct irq_data *d) | |||
233 | static int gpio_irq_type(struct irq_data *d, unsigned trigger) | 233 | static int gpio_irq_type(struct irq_data *d, unsigned trigger) |
234 | { | 234 | { |
235 | struct davinci_gpio_regs __iomem *g = irq2regs(d->irq); | 235 | struct davinci_gpio_regs __iomem *g = irq2regs(d->irq); |
236 | u32 mask = (u32) irq_data_get_irq_data(d); | 236 | u32 mask = (u32) irq_data_get_irq_handler_data(d); |
237 | 237 | ||
238 | if (trigger & ~(IRQ_TYPE_EDGE_FALLING | IRQ_TYPE_EDGE_RISING)) | 238 | if (trigger & ~(IRQ_TYPE_EDGE_FALLING | IRQ_TYPE_EDGE_RISING)) |
239 | return -EINVAL; | 239 | return -EINVAL; |
240 | 240 | ||
241 | irq_desc[d->irq].status &= ~IRQ_TYPE_SENSE_MASK; | ||
242 | irq_desc[d->irq].status |= trigger; | ||
243 | |||
244 | /* don't enable the IRQ if it's currently disabled */ | ||
245 | if (irq_desc[d->irq].depth == 0) { | ||
246 | __raw_writel(mask, (trigger & IRQ_TYPE_EDGE_FALLING) | ||
247 | ? &g->set_falling : &g->clr_falling); | ||
248 | __raw_writel(mask, (trigger & IRQ_TYPE_EDGE_RISING) | ||
249 | ? &g->set_rising : &g->clr_rising); | ||
250 | } | ||
251 | return 0; | 241 | return 0; |
252 | } | 242 | } |
253 | 243 | ||
@@ -256,6 +246,7 @@ static struct irq_chip gpio_irqchip = { | |||
256 | .irq_enable = gpio_irq_enable, | 246 | .irq_enable = gpio_irq_enable, |
257 | .irq_disable = gpio_irq_disable, | 247 | .irq_disable = gpio_irq_disable, |
258 | .irq_set_type = gpio_irq_type, | 248 | .irq_set_type = gpio_irq_type, |
249 | .flags = IRQCHIP_SET_TYPE_MASKED, | ||
259 | }; | 250 | }; |
260 | 251 | ||
261 | static void | 252 | static void |
@@ -285,7 +276,7 @@ gpio_irq_handler(unsigned irq, struct irq_desc *desc) | |||
285 | status >>= 16; | 276 | status >>= 16; |
286 | 277 | ||
287 | /* now demux them to the right lowlevel handler */ | 278 | /* now demux them to the right lowlevel handler */ |
288 | n = (int)get_irq_data(irq); | 279 | n = (int)irq_get_handler_data(irq); |
289 | while (status) { | 280 | while (status) { |
290 | res = ffs(status); | 281 | res = ffs(status); |
291 | n += res; | 282 | n += res; |
@@ -323,7 +314,7 @@ static int gpio_to_irq_unbanked(struct gpio_chip *chip, unsigned offset) | |||
323 | static int gpio_irq_type_unbanked(struct irq_data *d, unsigned trigger) | 314 | static int gpio_irq_type_unbanked(struct irq_data *d, unsigned trigger) |
324 | { | 315 | { |
325 | struct davinci_gpio_regs __iomem *g = irq2regs(d->irq); | 316 | struct davinci_gpio_regs __iomem *g = irq2regs(d->irq); |
326 | u32 mask = (u32) irq_data_get_irq_data(d); | 317 | u32 mask = (u32) irq_data_get_irq_handler_data(d); |
327 | 318 | ||
328 | if (trigger & ~(IRQ_TYPE_EDGE_FALLING | IRQ_TYPE_EDGE_RISING)) | 319 | if (trigger & ~(IRQ_TYPE_EDGE_FALLING | IRQ_TYPE_EDGE_RISING)) |
329 | return -EINVAL; | 320 | return -EINVAL; |
@@ -395,7 +386,7 @@ static int __init davinci_gpio_irq_setup(void) | |||
395 | 386 | ||
396 | /* AINTC handles mask/unmask; GPIO handles triggering */ | 387 | /* AINTC handles mask/unmask; GPIO handles triggering */ |
397 | irq = bank_irq; | 388 | irq = bank_irq; |
398 | gpio_irqchip_unbanked = *get_irq_desc_chip(irq_to_desc(irq)); | 389 | gpio_irqchip_unbanked = *irq_get_chip(irq); |
399 | gpio_irqchip_unbanked.name = "GPIO-AINTC"; | 390 | gpio_irqchip_unbanked.name = "GPIO-AINTC"; |
400 | gpio_irqchip_unbanked.irq_set_type = gpio_irq_type_unbanked; | 391 | gpio_irqchip_unbanked.irq_set_type = gpio_irq_type_unbanked; |
401 | 392 | ||
@@ -406,10 +397,10 @@ static int __init davinci_gpio_irq_setup(void) | |||
406 | 397 | ||
407 | /* set the direct IRQs up to use that irqchip */ | 398 | /* set the direct IRQs up to use that irqchip */ |
408 | for (gpio = 0; gpio < soc_info->gpio_unbanked; gpio++, irq++) { | 399 | for (gpio = 0; gpio < soc_info->gpio_unbanked; gpio++, irq++) { |
409 | set_irq_chip(irq, &gpio_irqchip_unbanked); | 400 | irq_set_chip(irq, &gpio_irqchip_unbanked); |
410 | set_irq_data(irq, (void *) __gpio_mask(gpio)); | 401 | irq_set_handler_data(irq, (void *)__gpio_mask(gpio)); |
411 | set_irq_chip_data(irq, (__force void *) g); | 402 | irq_set_chip_data(irq, (__force void *)g); |
412 | irq_desc[irq].status |= IRQ_TYPE_EDGE_BOTH; | 403 | irq_set_status_flags(irq, IRQ_TYPE_EDGE_BOTH); |
413 | } | 404 | } |
414 | 405 | ||
415 | goto done; | 406 | goto done; |
@@ -430,15 +421,15 @@ static int __init davinci_gpio_irq_setup(void) | |||
430 | __raw_writel(~0, &g->clr_rising); | 421 | __raw_writel(~0, &g->clr_rising); |
431 | 422 | ||
432 | /* set up all irqs in this bank */ | 423 | /* set up all irqs in this bank */ |
433 | set_irq_chained_handler(bank_irq, gpio_irq_handler); | 424 | irq_set_chained_handler(bank_irq, gpio_irq_handler); |
434 | set_irq_chip_data(bank_irq, (__force void *) g); | 425 | irq_set_chip_data(bank_irq, (__force void *)g); |
435 | set_irq_data(bank_irq, (void *) irq); | 426 | irq_set_handler_data(bank_irq, (void *)irq); |
436 | 427 | ||
437 | for (i = 0; i < 16 && gpio < ngpio; i++, irq++, gpio++) { | 428 | for (i = 0; i < 16 && gpio < ngpio; i++, irq++, gpio++) { |
438 | set_irq_chip(irq, &gpio_irqchip); | 429 | irq_set_chip(irq, &gpio_irqchip); |
439 | set_irq_chip_data(irq, (__force void *) g); | 430 | irq_set_chip_data(irq, (__force void *)g); |
440 | set_irq_data(irq, (void *) __gpio_mask(gpio)); | 431 | irq_set_handler_data(irq, (void *)__gpio_mask(gpio)); |
441 | set_irq_handler(irq, handle_simple_irq); | 432 | irq_set_handler(irq, handle_simple_irq); |
442 | set_irq_flags(irq, IRQF_VALID); | 433 | set_irq_flags(irq, IRQF_VALID); |
443 | } | 434 | } |
444 | 435 | ||
diff --git a/arch/arm/mach-davinci/irq.c b/arch/arm/mach-davinci/irq.c index 5e05c9b64e1f..e6269a6e0014 100644 --- a/arch/arm/mach-davinci/irq.c +++ b/arch/arm/mach-davinci/irq.c | |||
@@ -154,11 +154,11 @@ void __init davinci_irq_init(void) | |||
154 | 154 | ||
155 | /* set up genirq dispatch for ARM INTC */ | 155 | /* set up genirq dispatch for ARM INTC */ |
156 | for (i = 0; i < davinci_soc_info.intc_irq_num; i++) { | 156 | for (i = 0; i < davinci_soc_info.intc_irq_num; i++) { |
157 | set_irq_chip(i, &davinci_irq_chip_0); | 157 | irq_set_chip(i, &davinci_irq_chip_0); |
158 | set_irq_flags(i, IRQF_VALID | IRQF_PROBE); | 158 | set_irq_flags(i, IRQF_VALID | IRQF_PROBE); |
159 | if (i != IRQ_TINT1_TINT34) | 159 | if (i != IRQ_TINT1_TINT34) |
160 | set_irq_handler(i, handle_edge_irq); | 160 | irq_set_handler(i, handle_edge_irq); |
161 | else | 161 | else |
162 | set_irq_handler(i, handle_level_irq); | 162 | irq_set_handler(i, handle_level_irq); |
163 | } | 163 | } |
164 | } | 164 | } |
diff --git a/arch/arm/mach-dove/include/mach/dove.h b/arch/arm/mach-dove/include/mach/dove.h index e5fcdd3f5bf5..b20ec9af7882 100644 --- a/arch/arm/mach-dove/include/mach/dove.h +++ b/arch/arm/mach-dove/include/mach/dove.h | |||
@@ -136,7 +136,7 @@ | |||
136 | #define DOVE_MPP_GENERAL_VIRT_BASE (DOVE_SB_REGS_VIRT_BASE | 0xe803c) | 136 | #define DOVE_MPP_GENERAL_VIRT_BASE (DOVE_SB_REGS_VIRT_BASE | 0xe803c) |
137 | #define DOVE_AU1_SPDIFO_GPIO_EN (1 << 1) | 137 | #define DOVE_AU1_SPDIFO_GPIO_EN (1 << 1) |
138 | #define DOVE_NAND_GPIO_EN (1 << 0) | 138 | #define DOVE_NAND_GPIO_EN (1 << 0) |
139 | #define DOVE_MPP_CTRL4_VIRT_BASE (DOVE_GPIO_VIRT_BASE + 0x40) | 139 | #define DOVE_MPP_CTRL4_VIRT_BASE (DOVE_GPIO_LO_VIRT_BASE + 0x40) |
140 | #define DOVE_SPI_GPIO_SEL (1 << 5) | 140 | #define DOVE_SPI_GPIO_SEL (1 << 5) |
141 | #define DOVE_UART1_GPIO_SEL (1 << 4) | 141 | #define DOVE_UART1_GPIO_SEL (1 << 4) |
142 | #define DOVE_AU1_GPIO_SEL (1 << 3) | 142 | #define DOVE_AU1_GPIO_SEL (1 << 3) |
diff --git a/arch/arm/mach-dove/irq.c b/arch/arm/mach-dove/irq.c index 101707fa2e2c..f07fd16e0c9b 100644 --- a/arch/arm/mach-dove/irq.c +++ b/arch/arm/mach-dove/irq.c | |||
@@ -86,8 +86,7 @@ static void pmu_irq_handler(unsigned int irq, struct irq_desc *desc) | |||
86 | if (!(cause & (1 << irq))) | 86 | if (!(cause & (1 << irq))) |
87 | continue; | 87 | continue; |
88 | irq = pmu_to_irq(irq); | 88 | irq = pmu_to_irq(irq); |
89 | desc = irq_desc + irq; | 89 | generic_handle_irq(irq); |
90 | desc_handle_irq(irq, desc); | ||
91 | } | 90 | } |
92 | } | 91 | } |
93 | 92 | ||
@@ -103,14 +102,14 @@ void __init dove_init_irq(void) | |||
103 | */ | 102 | */ |
104 | orion_gpio_init(0, 32, DOVE_GPIO_LO_VIRT_BASE, 0, | 103 | orion_gpio_init(0, 32, DOVE_GPIO_LO_VIRT_BASE, 0, |
105 | IRQ_DOVE_GPIO_START); | 104 | IRQ_DOVE_GPIO_START); |
106 | set_irq_chained_handler(IRQ_DOVE_GPIO_0_7, gpio_irq_handler); | 105 | irq_set_chained_handler(IRQ_DOVE_GPIO_0_7, gpio_irq_handler); |
107 | set_irq_chained_handler(IRQ_DOVE_GPIO_8_15, gpio_irq_handler); | 106 | irq_set_chained_handler(IRQ_DOVE_GPIO_8_15, gpio_irq_handler); |
108 | set_irq_chained_handler(IRQ_DOVE_GPIO_16_23, gpio_irq_handler); | 107 | irq_set_chained_handler(IRQ_DOVE_GPIO_16_23, gpio_irq_handler); |
109 | set_irq_chained_handler(IRQ_DOVE_GPIO_24_31, gpio_irq_handler); | 108 | irq_set_chained_handler(IRQ_DOVE_GPIO_24_31, gpio_irq_handler); |
110 | 109 | ||
111 | orion_gpio_init(32, 32, DOVE_GPIO_HI_VIRT_BASE, 0, | 110 | orion_gpio_init(32, 32, DOVE_GPIO_HI_VIRT_BASE, 0, |
112 | IRQ_DOVE_GPIO_START + 32); | 111 | IRQ_DOVE_GPIO_START + 32); |
113 | set_irq_chained_handler(IRQ_DOVE_HIGH_GPIO, gpio_irq_handler); | 112 | irq_set_chained_handler(IRQ_DOVE_HIGH_GPIO, gpio_irq_handler); |
114 | 113 | ||
115 | orion_gpio_init(64, 8, DOVE_GPIO2_VIRT_BASE, 0, | 114 | orion_gpio_init(64, 8, DOVE_GPIO2_VIRT_BASE, 0, |
116 | IRQ_DOVE_GPIO_START + 64); | 115 | IRQ_DOVE_GPIO_START + 64); |
@@ -122,10 +121,9 @@ void __init dove_init_irq(void) | |||
122 | writel(0, PMU_INTERRUPT_CAUSE); | 121 | writel(0, PMU_INTERRUPT_CAUSE); |
123 | 122 | ||
124 | for (i = IRQ_DOVE_PMU_START; i < NR_IRQS; i++) { | 123 | for (i = IRQ_DOVE_PMU_START; i < NR_IRQS; i++) { |
125 | set_irq_chip(i, &pmu_irq_chip); | 124 | irq_set_chip_and_handler(i, &pmu_irq_chip, handle_level_irq); |
126 | set_irq_handler(i, handle_level_irq); | 125 | irq_set_status_flags(i, IRQ_LEVEL); |
127 | irq_desc[i].status |= IRQ_LEVEL; | ||
128 | set_irq_flags(i, IRQF_VALID); | 126 | set_irq_flags(i, IRQF_VALID); |
129 | } | 127 | } |
130 | set_irq_chained_handler(IRQ_DOVE_PMU, pmu_irq_handler); | 128 | irq_set_chained_handler(IRQ_DOVE_PMU, pmu_irq_handler); |
131 | } | 129 | } |
diff --git a/arch/arm/mach-dove/mpp.c b/arch/arm/mach-dove/mpp.c index 71db2bdf2f28..c66c76346904 100644 --- a/arch/arm/mach-dove/mpp.c +++ b/arch/arm/mach-dove/mpp.c | |||
@@ -147,9 +147,6 @@ void __init dove_mpp_conf(unsigned int *mpp_list) | |||
147 | u32 pmu_sig_ctrl[PMU_SIG_REGS]; | 147 | u32 pmu_sig_ctrl[PMU_SIG_REGS]; |
148 | int i; | 148 | int i; |
149 | 149 | ||
150 | /* Initialize gpiolib. */ | ||
151 | orion_gpio_init(); | ||
152 | |||
153 | for (i = 0; i < MPP_NR_REGS; i++) | 150 | for (i = 0; i < MPP_NR_REGS; i++) |
154 | mpp_ctrl[i] = readl(MPP_CTRL(i)); | 151 | mpp_ctrl[i] = readl(MPP_CTRL(i)); |
155 | 152 | ||
diff --git a/arch/arm/mach-ebsa110/core.c b/arch/arm/mach-ebsa110/core.c index 7df083f37fa7..087bc771ac23 100644 --- a/arch/arm/mach-ebsa110/core.c +++ b/arch/arm/mach-ebsa110/core.c | |||
@@ -66,8 +66,8 @@ static void __init ebsa110_init_irq(void) | |||
66 | local_irq_restore(flags); | 66 | local_irq_restore(flags); |
67 | 67 | ||
68 | for (irq = 0; irq < NR_IRQS; irq++) { | 68 | for (irq = 0; irq < NR_IRQS; irq++) { |
69 | set_irq_chip(irq, &ebsa110_irq_chip); | 69 | irq_set_chip_and_handler(irq, &ebsa110_irq_chip, |
70 | set_irq_handler(irq, handle_level_irq); | 70 | handle_level_irq); |
71 | set_irq_flags(irq, IRQF_VALID | IRQF_PROBE); | 71 | set_irq_flags(irq, IRQF_VALID | IRQF_PROBE); |
72 | } | 72 | } |
73 | } | 73 | } |
diff --git a/arch/arm/mach-ep93xx/gpio.c b/arch/arm/mach-ep93xx/gpio.c index 34e071d79761..180b8a9d0d21 100644 --- a/arch/arm/mach-ep93xx/gpio.c +++ b/arch/arm/mach-ep93xx/gpio.c | |||
@@ -117,7 +117,7 @@ static void ep93xx_gpio_irq_ack(struct irq_data *d) | |||
117 | int port = line >> 3; | 117 | int port = line >> 3; |
118 | int port_mask = 1 << (line & 7); | 118 | int port_mask = 1 << (line & 7); |
119 | 119 | ||
120 | if ((irq_desc[d->irq].status & IRQ_TYPE_SENSE_MASK) == IRQ_TYPE_EDGE_BOTH) { | 120 | if (irqd_get_trigger_type(d) == IRQ_TYPE_EDGE_BOTH) { |
121 | gpio_int_type2[port] ^= port_mask; /* switch edge direction */ | 121 | gpio_int_type2[port] ^= port_mask; /* switch edge direction */ |
122 | ep93xx_gpio_update_int_params(port); | 122 | ep93xx_gpio_update_int_params(port); |
123 | } | 123 | } |
@@ -131,7 +131,7 @@ static void ep93xx_gpio_irq_mask_ack(struct irq_data *d) | |||
131 | int port = line >> 3; | 131 | int port = line >> 3; |
132 | int port_mask = 1 << (line & 7); | 132 | int port_mask = 1 << (line & 7); |
133 | 133 | ||
134 | if ((irq_desc[d->irq].status & IRQ_TYPE_SENSE_MASK) == IRQ_TYPE_EDGE_BOTH) | 134 | if (irqd_get_trigger_type(d) == IRQ_TYPE_EDGE_BOTH) |
135 | gpio_int_type2[port] ^= port_mask; /* switch edge direction */ | 135 | gpio_int_type2[port] ^= port_mask; /* switch edge direction */ |
136 | 136 | ||
137 | gpio_int_unmasked[port] &= ~port_mask; | 137 | gpio_int_unmasked[port] &= ~port_mask; |
@@ -165,10 +165,10 @@ static void ep93xx_gpio_irq_unmask(struct irq_data *d) | |||
165 | */ | 165 | */ |
166 | static int ep93xx_gpio_irq_type(struct irq_data *d, unsigned int type) | 166 | static int ep93xx_gpio_irq_type(struct irq_data *d, unsigned int type) |
167 | { | 167 | { |
168 | struct irq_desc *desc = irq_desc + d->irq; | ||
169 | const int gpio = irq_to_gpio(d->irq); | 168 | const int gpio = irq_to_gpio(d->irq); |
170 | const int port = gpio >> 3; | 169 | const int port = gpio >> 3; |
171 | const int port_mask = 1 << (gpio & 7); | 170 | const int port_mask = 1 << (gpio & 7); |
171 | irq_flow_handler_t handler; | ||
172 | 172 | ||
173 | gpio_direction_input(gpio); | 173 | gpio_direction_input(gpio); |
174 | 174 | ||
@@ -176,22 +176,22 @@ static int ep93xx_gpio_irq_type(struct irq_data *d, unsigned int type) | |||
176 | case IRQ_TYPE_EDGE_RISING: | 176 | case IRQ_TYPE_EDGE_RISING: |
177 | gpio_int_type1[port] |= port_mask; | 177 | gpio_int_type1[port] |= port_mask; |
178 | gpio_int_type2[port] |= port_mask; | 178 | gpio_int_type2[port] |= port_mask; |
179 | desc->handle_irq = handle_edge_irq; | 179 | handler = handle_edge_irq; |
180 | break; | 180 | break; |
181 | case IRQ_TYPE_EDGE_FALLING: | 181 | case IRQ_TYPE_EDGE_FALLING: |
182 | gpio_int_type1[port] |= port_mask; | 182 | gpio_int_type1[port] |= port_mask; |
183 | gpio_int_type2[port] &= ~port_mask; | 183 | gpio_int_type2[port] &= ~port_mask; |
184 | desc->handle_irq = handle_edge_irq; | 184 | handler = handle_edge_irq; |
185 | break; | 185 | break; |
186 | case IRQ_TYPE_LEVEL_HIGH: | 186 | case IRQ_TYPE_LEVEL_HIGH: |
187 | gpio_int_type1[port] &= ~port_mask; | 187 | gpio_int_type1[port] &= ~port_mask; |
188 | gpio_int_type2[port] |= port_mask; | 188 | gpio_int_type2[port] |= port_mask; |
189 | desc->handle_irq = handle_level_irq; | 189 | handler = handle_level_irq; |
190 | break; | 190 | break; |
191 | case IRQ_TYPE_LEVEL_LOW: | 191 | case IRQ_TYPE_LEVEL_LOW: |
192 | gpio_int_type1[port] &= ~port_mask; | 192 | gpio_int_type1[port] &= ~port_mask; |
193 | gpio_int_type2[port] &= ~port_mask; | 193 | gpio_int_type2[port] &= ~port_mask; |
194 | desc->handle_irq = handle_level_irq; | 194 | handler = handle_level_irq; |
195 | break; | 195 | break; |
196 | case IRQ_TYPE_EDGE_BOTH: | 196 | case IRQ_TYPE_EDGE_BOTH: |
197 | gpio_int_type1[port] |= port_mask; | 197 | gpio_int_type1[port] |= port_mask; |
@@ -200,17 +200,16 @@ static int ep93xx_gpio_irq_type(struct irq_data *d, unsigned int type) | |||
200 | gpio_int_type2[port] &= ~port_mask; /* falling */ | 200 | gpio_int_type2[port] &= ~port_mask; /* falling */ |
201 | else | 201 | else |
202 | gpio_int_type2[port] |= port_mask; /* rising */ | 202 | gpio_int_type2[port] |= port_mask; /* rising */ |
203 | desc->handle_irq = handle_edge_irq; | 203 | handler = handle_edge_irq; |
204 | break; | 204 | break; |
205 | default: | 205 | default: |
206 | pr_err("failed to set irq type %d for gpio %d\n", type, gpio); | 206 | pr_err("failed to set irq type %d for gpio %d\n", type, gpio); |
207 | return -EINVAL; | 207 | return -EINVAL; |
208 | } | 208 | } |
209 | 209 | ||
210 | gpio_int_enabled[port] |= port_mask; | 210 | __irq_set_handler_locked(d->irq, handler); |
211 | 211 | ||
212 | desc->status &= ~IRQ_TYPE_SENSE_MASK; | 212 | gpio_int_enabled[port] |= port_mask; |
213 | desc->status |= type & IRQ_TYPE_SENSE_MASK; | ||
214 | 213 | ||
215 | ep93xx_gpio_update_int_params(port); | 214 | ep93xx_gpio_update_int_params(port); |
216 | 215 | ||
@@ -232,20 +231,29 @@ void __init ep93xx_gpio_init_irq(void) | |||
232 | 231 | ||
233 | for (gpio_irq = gpio_to_irq(0); | 232 | for (gpio_irq = gpio_to_irq(0); |
234 | gpio_irq <= gpio_to_irq(EP93XX_GPIO_LINE_MAX_IRQ); ++gpio_irq) { | 233 | gpio_irq <= gpio_to_irq(EP93XX_GPIO_LINE_MAX_IRQ); ++gpio_irq) { |
235 | set_irq_chip(gpio_irq, &ep93xx_gpio_irq_chip); | 234 | irq_set_chip_and_handler(gpio_irq, &ep93xx_gpio_irq_chip, |
236 | set_irq_handler(gpio_irq, handle_level_irq); | 235 | handle_level_irq); |
237 | set_irq_flags(gpio_irq, IRQF_VALID); | 236 | set_irq_flags(gpio_irq, IRQF_VALID); |
238 | } | 237 | } |
239 | 238 | ||
240 | set_irq_chained_handler(IRQ_EP93XX_GPIO_AB, ep93xx_gpio_ab_irq_handler); | 239 | irq_set_chained_handler(IRQ_EP93XX_GPIO_AB, |
241 | set_irq_chained_handler(IRQ_EP93XX_GPIO0MUX, ep93xx_gpio_f_irq_handler); | 240 | ep93xx_gpio_ab_irq_handler); |
242 | set_irq_chained_handler(IRQ_EP93XX_GPIO1MUX, ep93xx_gpio_f_irq_handler); | 241 | irq_set_chained_handler(IRQ_EP93XX_GPIO0MUX, |
243 | set_irq_chained_handler(IRQ_EP93XX_GPIO2MUX, ep93xx_gpio_f_irq_handler); | 242 | ep93xx_gpio_f_irq_handler); |
244 | set_irq_chained_handler(IRQ_EP93XX_GPIO3MUX, ep93xx_gpio_f_irq_handler); | 243 | irq_set_chained_handler(IRQ_EP93XX_GPIO1MUX, |
245 | set_irq_chained_handler(IRQ_EP93XX_GPIO4MUX, ep93xx_gpio_f_irq_handler); | 244 | ep93xx_gpio_f_irq_handler); |
246 | set_irq_chained_handler(IRQ_EP93XX_GPIO5MUX, ep93xx_gpio_f_irq_handler); | 245 | irq_set_chained_handler(IRQ_EP93XX_GPIO2MUX, |
247 | set_irq_chained_handler(IRQ_EP93XX_GPIO6MUX, ep93xx_gpio_f_irq_handler); | 246 | ep93xx_gpio_f_irq_handler); |
248 | set_irq_chained_handler(IRQ_EP93XX_GPIO7MUX, ep93xx_gpio_f_irq_handler); | 247 | irq_set_chained_handler(IRQ_EP93XX_GPIO3MUX, |
248 | ep93xx_gpio_f_irq_handler); | ||
249 | irq_set_chained_handler(IRQ_EP93XX_GPIO4MUX, | ||
250 | ep93xx_gpio_f_irq_handler); | ||
251 | irq_set_chained_handler(IRQ_EP93XX_GPIO5MUX, | ||
252 | ep93xx_gpio_f_irq_handler); | ||
253 | irq_set_chained_handler(IRQ_EP93XX_GPIO6MUX, | ||
254 | ep93xx_gpio_f_irq_handler); | ||
255 | irq_set_chained_handler(IRQ_EP93XX_GPIO7MUX, | ||
256 | ep93xx_gpio_f_irq_handler); | ||
249 | } | 257 | } |
250 | 258 | ||
251 | 259 | ||
diff --git a/arch/arm/mach-exynos4/Kconfig b/arch/arm/mach-exynos4/Kconfig index a021b5240bba..e849f67be47d 100644 --- a/arch/arm/mach-exynos4/Kconfig +++ b/arch/arm/mach-exynos4/Kconfig | |||
@@ -20,6 +20,11 @@ config EXYNOS4_MCT | |||
20 | help | 20 | help |
21 | Use MCT (Multi Core Timer) as kernel timers | 21 | Use MCT (Multi Core Timer) as kernel timers |
22 | 22 | ||
23 | config EXYNOS4_DEV_AHCI | ||
24 | bool | ||
25 | help | ||
26 | Compile in platform device definitions for AHCI | ||
27 | |||
23 | config EXYNOS4_DEV_PD | 28 | config EXYNOS4_DEV_PD |
24 | bool | 29 | bool |
25 | help | 30 | help |
@@ -134,9 +139,9 @@ config MACH_ARMLEX4210 | |||
134 | select S3C_DEV_HSMMC | 139 | select S3C_DEV_HSMMC |
135 | select S3C_DEV_HSMMC2 | 140 | select S3C_DEV_HSMMC2 |
136 | select S3C_DEV_HSMMC3 | 141 | select S3C_DEV_HSMMC3 |
142 | select EXYNOS4_DEV_AHCI | ||
137 | select EXYNOS4_DEV_SYSMMU | 143 | select EXYNOS4_DEV_SYSMMU |
138 | select EXYNOS4_SETUP_SDHCI | 144 | select EXYNOS4_SETUP_SDHCI |
139 | select SATA_AHCI_PLATFORM | ||
140 | help | 145 | help |
141 | Machine support for Samsung ARMLEX4210 based on EXYNOS4210 | 146 | Machine support for Samsung ARMLEX4210 based on EXYNOS4210 |
142 | 147 | ||
diff --git a/arch/arm/mach-exynos4/Makefile b/arch/arm/mach-exynos4/Makefile index b8f0e7d82d7e..9be104f63c0b 100644 --- a/arch/arm/mach-exynos4/Makefile +++ b/arch/arm/mach-exynos4/Makefile | |||
@@ -39,6 +39,7 @@ obj-$(CONFIG_MACH_NURI) += mach-nuri.o | |||
39 | # device support | 39 | # device support |
40 | 40 | ||
41 | obj-y += dev-audio.o | 41 | obj-y += dev-audio.o |
42 | obj-$(CONFIG_EXYNOS4_DEV_AHCI) += dev-ahci.o | ||
42 | obj-$(CONFIG_EXYNOS4_DEV_PD) += dev-pd.o | 43 | obj-$(CONFIG_EXYNOS4_DEV_PD) += dev-pd.o |
43 | obj-$(CONFIG_EXYNOS4_DEV_SYSMMU) += dev-sysmmu.o | 44 | obj-$(CONFIG_EXYNOS4_DEV_SYSMMU) += dev-sysmmu.o |
44 | 45 | ||
@@ -53,4 +54,3 @@ obj-$(CONFIG_EXYNOS4_SETUP_I2C7) += setup-i2c7.o | |||
53 | obj-$(CONFIG_EXYNOS4_SETUP_KEYPAD) += setup-keypad.o | 54 | obj-$(CONFIG_EXYNOS4_SETUP_KEYPAD) += setup-keypad.o |
54 | obj-$(CONFIG_EXYNOS4_SETUP_SDHCI) += setup-sdhci.o | 55 | obj-$(CONFIG_EXYNOS4_SETUP_SDHCI) += setup-sdhci.o |
55 | obj-$(CONFIG_EXYNOS4_SETUP_SDHCI_GPIO) += setup-sdhci-gpio.o | 56 | obj-$(CONFIG_EXYNOS4_SETUP_SDHCI_GPIO) += setup-sdhci-gpio.o |
56 | obj-$(CONFIG_SATA_AHCI_PLATFORM) += dev-ahci.o | ||
diff --git a/arch/arm/mach-exynos4/include/mach/debug-macro.S b/arch/arm/mach-exynos4/include/mach/debug-macro.S index 58bbd049a6c4..a442ef861167 100644 --- a/arch/arm/mach-exynos4/include/mach/debug-macro.S +++ b/arch/arm/mach-exynos4/include/mach/debug-macro.S | |||
@@ -21,8 +21,8 @@ | |||
21 | */ | 21 | */ |
22 | 22 | ||
23 | .macro addruart, rp, rv | 23 | .macro addruart, rp, rv |
24 | ldreq \rp, = S3C_PA_UART | 24 | ldr \rp, = S3C_PA_UART |
25 | ldrne \rv, = S3C_VA_UART | 25 | ldr \rv, = S3C_VA_UART |
26 | #if CONFIG_DEBUG_S3C_UART != 0 | 26 | #if CONFIG_DEBUG_S3C_UART != 0 |
27 | add \rp, \rp, #(0x10000 * CONFIG_DEBUG_S3C_UART) | 27 | add \rp, \rp, #(0x10000 * CONFIG_DEBUG_S3C_UART) |
28 | add \rv, \rv, #(0x10000 * CONFIG_DEBUG_S3C_UART) | 28 | add \rv, \rv, #(0x10000 * CONFIG_DEBUG_S3C_UART) |
diff --git a/arch/arm/mach-exynos4/irq-combiner.c b/arch/arm/mach-exynos4/irq-combiner.c index 31618d91ce15..f488b66d6806 100644 --- a/arch/arm/mach-exynos4/irq-combiner.c +++ b/arch/arm/mach-exynos4/irq-combiner.c | |||
@@ -54,8 +54,8 @@ static void combiner_unmask_irq(struct irq_data *data) | |||
54 | 54 | ||
55 | static void combiner_handle_cascade_irq(unsigned int irq, struct irq_desc *desc) | 55 | static void combiner_handle_cascade_irq(unsigned int irq, struct irq_desc *desc) |
56 | { | 56 | { |
57 | struct combiner_chip_data *chip_data = get_irq_data(irq); | 57 | struct combiner_chip_data *chip_data = irq_get_handler_data(irq); |
58 | struct irq_chip *chip = get_irq_chip(irq); | 58 | struct irq_chip *chip = irq_get_chip(irq); |
59 | unsigned int cascade_irq, combiner_irq; | 59 | unsigned int cascade_irq, combiner_irq; |
60 | unsigned long status; | 60 | unsigned long status; |
61 | 61 | ||
@@ -93,9 +93,9 @@ void __init combiner_cascade_irq(unsigned int combiner_nr, unsigned int irq) | |||
93 | { | 93 | { |
94 | if (combiner_nr >= MAX_COMBINER_NR) | 94 | if (combiner_nr >= MAX_COMBINER_NR) |
95 | BUG(); | 95 | BUG(); |
96 | if (set_irq_data(irq, &combiner_data[combiner_nr]) != 0) | 96 | if (irq_set_handler_data(irq, &combiner_data[combiner_nr]) != 0) |
97 | BUG(); | 97 | BUG(); |
98 | set_irq_chained_handler(irq, combiner_handle_cascade_irq); | 98 | irq_set_chained_handler(irq, combiner_handle_cascade_irq); |
99 | } | 99 | } |
100 | 100 | ||
101 | void __init combiner_init(unsigned int combiner_nr, void __iomem *base, | 101 | void __init combiner_init(unsigned int combiner_nr, void __iomem *base, |
@@ -119,9 +119,8 @@ void __init combiner_init(unsigned int combiner_nr, void __iomem *base, | |||
119 | 119 | ||
120 | for (i = irq_start; i < combiner_data[combiner_nr].irq_offset | 120 | for (i = irq_start; i < combiner_data[combiner_nr].irq_offset |
121 | + MAX_IRQ_IN_COMBINER; i++) { | 121 | + MAX_IRQ_IN_COMBINER; i++) { |
122 | set_irq_chip(i, &combiner_chip); | 122 | irq_set_chip_and_handler(i, &combiner_chip, handle_level_irq); |
123 | set_irq_chip_data(i, &combiner_data[combiner_nr]); | 123 | irq_set_chip_data(i, &combiner_data[combiner_nr]); |
124 | set_irq_handler(i, handle_level_irq); | ||
125 | set_irq_flags(i, IRQF_VALID | IRQF_PROBE); | 124 | set_irq_flags(i, IRQF_VALID | IRQF_PROBE); |
126 | } | 125 | } |
127 | } | 126 | } |
diff --git a/arch/arm/mach-exynos4/irq-eint.c b/arch/arm/mach-exynos4/irq-eint.c index 4f7ad4a796e4..9d87d2ac7f68 100644 --- a/arch/arm/mach-exynos4/irq-eint.c +++ b/arch/arm/mach-exynos4/irq-eint.c | |||
@@ -190,8 +190,8 @@ static void exynos4_irq_demux_eint16_31(unsigned int irq, struct irq_desc *desc) | |||
190 | 190 | ||
191 | static void exynos4_irq_eint0_15(unsigned int irq, struct irq_desc *desc) | 191 | static void exynos4_irq_eint0_15(unsigned int irq, struct irq_desc *desc) |
192 | { | 192 | { |
193 | u32 *irq_data = get_irq_data(irq); | 193 | u32 *irq_data = irq_get_handler_data(irq); |
194 | struct irq_chip *chip = get_irq_chip(irq); | 194 | struct irq_chip *chip = irq_get_chip(irq); |
195 | 195 | ||
196 | chip->irq_mask(&desc->irq_data); | 196 | chip->irq_mask(&desc->irq_data); |
197 | 197 | ||
@@ -208,18 +208,19 @@ int __init exynos4_init_irq_eint(void) | |||
208 | int irq; | 208 | int irq; |
209 | 209 | ||
210 | for (irq = 0 ; irq <= 31 ; irq++) { | 210 | for (irq = 0 ; irq <= 31 ; irq++) { |
211 | set_irq_chip(IRQ_EINT(irq), &exynos4_irq_eint); | 211 | irq_set_chip_and_handler(IRQ_EINT(irq), &exynos4_irq_eint, |
212 | set_irq_handler(IRQ_EINT(irq), handle_level_irq); | 212 | handle_level_irq); |
213 | set_irq_flags(IRQ_EINT(irq), IRQF_VALID); | 213 | set_irq_flags(IRQ_EINT(irq), IRQF_VALID); |
214 | } | 214 | } |
215 | 215 | ||
216 | set_irq_chained_handler(IRQ_EINT16_31, exynos4_irq_demux_eint16_31); | 216 | irq_set_chained_handler(IRQ_EINT16_31, exynos4_irq_demux_eint16_31); |
217 | 217 | ||
218 | for (irq = 0 ; irq <= 15 ; irq++) { | 218 | for (irq = 0 ; irq <= 15 ; irq++) { |
219 | eint0_15_data[irq] = IRQ_EINT(irq); | 219 | eint0_15_data[irq] = IRQ_EINT(irq); |
220 | 220 | ||
221 | set_irq_data(exynos4_get_irq_nr(irq), &eint0_15_data[irq]); | 221 | irq_set_handler_data(exynos4_get_irq_nr(irq), |
222 | set_irq_chained_handler(exynos4_get_irq_nr(irq), | 222 | &eint0_15_data[irq]); |
223 | irq_set_chained_handler(exynos4_get_irq_nr(irq), | ||
223 | exynos4_irq_eint0_15); | 224 | exynos4_irq_eint0_15); |
224 | } | 225 | } |
225 | 226 | ||
diff --git a/arch/arm/mach-exynos4/mach-smdkc210.c b/arch/arm/mach-exynos4/mach-smdkc210.c index 25a256818122..e645f7a955f0 100644 --- a/arch/arm/mach-exynos4/mach-smdkc210.c +++ b/arch/arm/mach-exynos4/mach-smdkc210.c | |||
@@ -125,7 +125,7 @@ static struct resource smdkc210_smsc911x_resources[] = { | |||
125 | }; | 125 | }; |
126 | 126 | ||
127 | static struct smsc911x_platform_config smsc9215_config = { | 127 | static struct smsc911x_platform_config smsc9215_config = { |
128 | .irq_polarity = SMSC911X_IRQ_POLARITY_ACTIVE_HIGH, | 128 | .irq_polarity = SMSC911X_IRQ_POLARITY_ACTIVE_LOW, |
129 | .irq_type = SMSC911X_IRQ_TYPE_PUSH_PULL, | 129 | .irq_type = SMSC911X_IRQ_TYPE_PUSH_PULL, |
130 | .flags = SMSC911X_USE_16BIT | SMSC911X_FORCE_INTERNAL_PHY, | 130 | .flags = SMSC911X_USE_16BIT | SMSC911X_FORCE_INTERNAL_PHY, |
131 | .phy_interface = PHY_INTERFACE_MODE_MII, | 131 | .phy_interface = PHY_INTERFACE_MODE_MII, |
diff --git a/arch/arm/mach-exynos4/mach-smdkv310.c b/arch/arm/mach-exynos4/mach-smdkv310.c index 88e0275143be..152676471b67 100644 --- a/arch/arm/mach-exynos4/mach-smdkv310.c +++ b/arch/arm/mach-exynos4/mach-smdkv310.c | |||
@@ -127,7 +127,7 @@ static struct resource smdkv310_smsc911x_resources[] = { | |||
127 | }; | 127 | }; |
128 | 128 | ||
129 | static struct smsc911x_platform_config smsc9215_config = { | 129 | static struct smsc911x_platform_config smsc9215_config = { |
130 | .irq_polarity = SMSC911X_IRQ_POLARITY_ACTIVE_HIGH, | 130 | .irq_polarity = SMSC911X_IRQ_POLARITY_ACTIVE_LOW, |
131 | .irq_type = SMSC911X_IRQ_TYPE_PUSH_PULL, | 131 | .irq_type = SMSC911X_IRQ_TYPE_PUSH_PULL, |
132 | .flags = SMSC911X_USE_16BIT | SMSC911X_FORCE_INTERNAL_PHY, | 132 | .flags = SMSC911X_USE_16BIT | SMSC911X_FORCE_INTERNAL_PHY, |
133 | .phy_interface = PHY_INTERFACE_MODE_MII, | 133 | .phy_interface = PHY_INTERFACE_MODE_MII, |
diff --git a/arch/arm/mach-footbridge/common.c b/arch/arm/mach-footbridge/common.c index 84c5f258f2d8..38a44f9b9da2 100644 --- a/arch/arm/mach-footbridge/common.c +++ b/arch/arm/mach-footbridge/common.c | |||
@@ -102,8 +102,7 @@ static void __init __fb_init_irq(void) | |||
102 | *CSR_FIQ_DISABLE = -1; | 102 | *CSR_FIQ_DISABLE = -1; |
103 | 103 | ||
104 | for (irq = _DC21285_IRQ(0); irq < _DC21285_IRQ(20); irq++) { | 104 | for (irq = _DC21285_IRQ(0); irq < _DC21285_IRQ(20); irq++) { |
105 | set_irq_chip(irq, &fb_chip); | 105 | irq_set_chip_and_handler(irq, &fb_chip, handle_level_irq); |
106 | set_irq_handler(irq, handle_level_irq); | ||
107 | set_irq_flags(irq, IRQF_VALID | IRQF_PROBE); | 106 | set_irq_flags(irq, IRQF_VALID | IRQF_PROBE); |
108 | } | 107 | } |
109 | } | 108 | } |
diff --git a/arch/arm/mach-footbridge/dc21285-timer.c b/arch/arm/mach-footbridge/dc21285-timer.c index a921fe92b858..5f1f9867fc70 100644 --- a/arch/arm/mach-footbridge/dc21285-timer.c +++ b/arch/arm/mach-footbridge/dc21285-timer.c | |||
@@ -30,7 +30,7 @@ static int cksrc_dc21285_enable(struct clocksource *cs) | |||
30 | return 0; | 30 | return 0; |
31 | } | 31 | } |
32 | 32 | ||
33 | static int cksrc_dc21285_disable(struct clocksource *cs) | 33 | static void cksrc_dc21285_disable(struct clocksource *cs) |
34 | { | 34 | { |
35 | *CSR_TIMER2_CNTL = 0; | 35 | *CSR_TIMER2_CNTL = 0; |
36 | } | 36 | } |
diff --git a/arch/arm/mach-footbridge/isa-irq.c b/arch/arm/mach-footbridge/isa-irq.c index de7a5cb5dbe1..c3a0abbc9049 100644 --- a/arch/arm/mach-footbridge/isa-irq.c +++ b/arch/arm/mach-footbridge/isa-irq.c | |||
@@ -151,14 +151,14 @@ void __init isa_init_irq(unsigned int host_irq) | |||
151 | 151 | ||
152 | if (host_irq != (unsigned int)-1) { | 152 | if (host_irq != (unsigned int)-1) { |
153 | for (irq = _ISA_IRQ(0); irq < _ISA_IRQ(8); irq++) { | 153 | for (irq = _ISA_IRQ(0); irq < _ISA_IRQ(8); irq++) { |
154 | set_irq_chip(irq, &isa_lo_chip); | 154 | irq_set_chip_and_handler(irq, &isa_lo_chip, |
155 | set_irq_handler(irq, handle_level_irq); | 155 | handle_level_irq); |
156 | set_irq_flags(irq, IRQF_VALID | IRQF_PROBE); | 156 | set_irq_flags(irq, IRQF_VALID | IRQF_PROBE); |
157 | } | 157 | } |
158 | 158 | ||
159 | for (irq = _ISA_IRQ(8); irq < _ISA_IRQ(16); irq++) { | 159 | for (irq = _ISA_IRQ(8); irq < _ISA_IRQ(16); irq++) { |
160 | set_irq_chip(irq, &isa_hi_chip); | 160 | irq_set_chip_and_handler(irq, &isa_hi_chip, |
161 | set_irq_handler(irq, handle_level_irq); | 161 | handle_level_irq); |
162 | set_irq_flags(irq, IRQF_VALID | IRQF_PROBE); | 162 | set_irq_flags(irq, IRQF_VALID | IRQF_PROBE); |
163 | } | 163 | } |
164 | 164 | ||
@@ -166,7 +166,7 @@ void __init isa_init_irq(unsigned int host_irq) | |||
166 | request_resource(&ioport_resource, &pic2_resource); | 166 | request_resource(&ioport_resource, &pic2_resource); |
167 | setup_irq(IRQ_ISA_CASCADE, &irq_cascade); | 167 | setup_irq(IRQ_ISA_CASCADE, &irq_cascade); |
168 | 168 | ||
169 | set_irq_chained_handler(host_irq, isa_irq_handler); | 169 | irq_set_chained_handler(host_irq, isa_irq_handler); |
170 | 170 | ||
171 | /* | 171 | /* |
172 | * On the NetWinder, don't automatically | 172 | * On the NetWinder, don't automatically |
diff --git a/arch/arm/mach-gemini/gpio.c b/arch/arm/mach-gemini/gpio.c index fa3d333f21e1..fdc7ef1391d3 100644 --- a/arch/arm/mach-gemini/gpio.c +++ b/arch/arm/mach-gemini/gpio.c | |||
@@ -127,8 +127,8 @@ static int gpio_set_irq_type(struct irq_data *d, unsigned int type) | |||
127 | 127 | ||
128 | static void gpio_irq_handler(unsigned int irq, struct irq_desc *desc) | 128 | static void gpio_irq_handler(unsigned int irq, struct irq_desc *desc) |
129 | { | 129 | { |
130 | unsigned int port = (unsigned int)irq_desc_get_handler_data(desc); | ||
130 | unsigned int gpio_irq_no, irq_stat; | 131 | unsigned int gpio_irq_no, irq_stat; |
131 | unsigned int port = (unsigned int)get_irq_data(irq); | ||
132 | 132 | ||
133 | irq_stat = __raw_readl(GPIO_BASE(port) + GPIO_INT_STAT); | 133 | irq_stat = __raw_readl(GPIO_BASE(port) + GPIO_INT_STAT); |
134 | 134 | ||
@@ -138,9 +138,7 @@ static void gpio_irq_handler(unsigned int irq, struct irq_desc *desc) | |||
138 | if ((irq_stat & 1) == 0) | 138 | if ((irq_stat & 1) == 0) |
139 | continue; | 139 | continue; |
140 | 140 | ||
141 | BUG_ON(!(irq_desc[gpio_irq_no].handle_irq)); | 141 | generic_handle_irq(gpio_irq_no); |
142 | irq_desc[gpio_irq_no].handle_irq(gpio_irq_no, | ||
143 | &irq_desc[gpio_irq_no]); | ||
144 | } | 142 | } |
145 | } | 143 | } |
146 | 144 | ||
@@ -219,13 +217,13 @@ void __init gemini_gpio_init(void) | |||
219 | 217 | ||
220 | for (j = GPIO_IRQ_BASE + i * 32; | 218 | for (j = GPIO_IRQ_BASE + i * 32; |
221 | j < GPIO_IRQ_BASE + (i + 1) * 32; j++) { | 219 | j < GPIO_IRQ_BASE + (i + 1) * 32; j++) { |
222 | set_irq_chip(j, &gpio_irq_chip); | 220 | irq_set_chip_and_handler(j, &gpio_irq_chip, |
223 | set_irq_handler(j, handle_edge_irq); | 221 | handle_edge_irq); |
224 | set_irq_flags(j, IRQF_VALID); | 222 | set_irq_flags(j, IRQF_VALID); |
225 | } | 223 | } |
226 | 224 | ||
227 | set_irq_chained_handler(IRQ_GPIO(i), gpio_irq_handler); | 225 | irq_set_chained_handler(IRQ_GPIO(i), gpio_irq_handler); |
228 | set_irq_data(IRQ_GPIO(i), (void *)i); | 226 | irq_set_handler_data(IRQ_GPIO(i), (void *)i); |
229 | } | 227 | } |
230 | 228 | ||
231 | BUG_ON(gpiochip_add(&gemini_gpio_chip)); | 229 | BUG_ON(gpiochip_add(&gemini_gpio_chip)); |
diff --git a/arch/arm/mach-gemini/irq.c b/arch/arm/mach-gemini/irq.c index 96bc227dd849..9485a8fdf851 100644 --- a/arch/arm/mach-gemini/irq.c +++ b/arch/arm/mach-gemini/irq.c | |||
@@ -81,13 +81,13 @@ void __init gemini_init_irq(void) | |||
81 | request_resource(&iomem_resource, &irq_resource); | 81 | request_resource(&iomem_resource, &irq_resource); |
82 | 82 | ||
83 | for (i = 0; i < NR_IRQS; i++) { | 83 | for (i = 0; i < NR_IRQS; i++) { |
84 | set_irq_chip(i, &gemini_irq_chip); | 84 | irq_set_chip(i, &gemini_irq_chip); |
85 | if((i >= IRQ_TIMER1 && i <= IRQ_TIMER3) || (i >= IRQ_SERIRQ0 && i <= IRQ_SERIRQ1)) { | 85 | if((i >= IRQ_TIMER1 && i <= IRQ_TIMER3) || (i >= IRQ_SERIRQ0 && i <= IRQ_SERIRQ1)) { |
86 | set_irq_handler(i, handle_edge_irq); | 86 | irq_set_handler(i, handle_edge_irq); |
87 | mode |= 1 << i; | 87 | mode |= 1 << i; |
88 | level |= 1 << i; | 88 | level |= 1 << i; |
89 | } else { | 89 | } else { |
90 | set_irq_handler(i, handle_level_irq); | 90 | irq_set_handler(i, handle_level_irq); |
91 | } | 91 | } |
92 | set_irq_flags(i, IRQF_VALID | IRQF_PROBE); | 92 | set_irq_flags(i, IRQF_VALID | IRQF_PROBE); |
93 | } | 93 | } |
diff --git a/arch/arm/mach-h720x/common.c b/arch/arm/mach-h720x/common.c index 1f28c90932c7..51d4e44ab973 100644 --- a/arch/arm/mach-h720x/common.c +++ b/arch/arm/mach-h720x/common.c | |||
@@ -199,29 +199,29 @@ void __init h720x_init_irq (void) | |||
199 | 199 | ||
200 | /* Initialize global IRQ's, fast path */ | 200 | /* Initialize global IRQ's, fast path */ |
201 | for (irq = 0; irq < NR_GLBL_IRQS; irq++) { | 201 | for (irq = 0; irq < NR_GLBL_IRQS; irq++) { |
202 | set_irq_chip(irq, &h720x_global_chip); | 202 | irq_set_chip_and_handler(irq, &h720x_global_chip, |
203 | set_irq_handler(irq, handle_level_irq); | 203 | handle_level_irq); |
204 | set_irq_flags(irq, IRQF_VALID | IRQF_PROBE); | 204 | set_irq_flags(irq, IRQF_VALID | IRQF_PROBE); |
205 | } | 205 | } |
206 | 206 | ||
207 | /* Initialize multiplexed IRQ's, slow path */ | 207 | /* Initialize multiplexed IRQ's, slow path */ |
208 | for (irq = IRQ_CHAINED_GPIOA(0) ; irq <= IRQ_CHAINED_GPIOD(31); irq++) { | 208 | for (irq = IRQ_CHAINED_GPIOA(0) ; irq <= IRQ_CHAINED_GPIOD(31); irq++) { |
209 | set_irq_chip(irq, &h720x_gpio_chip); | 209 | irq_set_chip_and_handler(irq, &h720x_gpio_chip, |
210 | set_irq_handler(irq, handle_edge_irq); | 210 | handle_edge_irq); |
211 | set_irq_flags(irq, IRQF_VALID ); | 211 | set_irq_flags(irq, IRQF_VALID ); |
212 | } | 212 | } |
213 | set_irq_chained_handler(IRQ_GPIOA, h720x_gpioa_demux_handler); | 213 | irq_set_chained_handler(IRQ_GPIOA, h720x_gpioa_demux_handler); |
214 | set_irq_chained_handler(IRQ_GPIOB, h720x_gpiob_demux_handler); | 214 | irq_set_chained_handler(IRQ_GPIOB, h720x_gpiob_demux_handler); |
215 | set_irq_chained_handler(IRQ_GPIOC, h720x_gpioc_demux_handler); | 215 | irq_set_chained_handler(IRQ_GPIOC, h720x_gpioc_demux_handler); |
216 | set_irq_chained_handler(IRQ_GPIOD, h720x_gpiod_demux_handler); | 216 | irq_set_chained_handler(IRQ_GPIOD, h720x_gpiod_demux_handler); |
217 | 217 | ||
218 | #ifdef CONFIG_CPU_H7202 | 218 | #ifdef CONFIG_CPU_H7202 |
219 | for (irq = IRQ_CHAINED_GPIOE(0) ; irq <= IRQ_CHAINED_GPIOE(31); irq++) { | 219 | for (irq = IRQ_CHAINED_GPIOE(0) ; irq <= IRQ_CHAINED_GPIOE(31); irq++) { |
220 | set_irq_chip(irq, &h720x_gpio_chip); | 220 | irq_set_chip_and_handler(irq, &h720x_gpio_chip, |
221 | set_irq_handler(irq, handle_edge_irq); | 221 | handle_edge_irq); |
222 | set_irq_flags(irq, IRQF_VALID ); | 222 | set_irq_flags(irq, IRQF_VALID ); |
223 | } | 223 | } |
224 | set_irq_chained_handler(IRQ_GPIOE, h720x_gpioe_demux_handler); | 224 | irq_set_chained_handler(IRQ_GPIOE, h720x_gpioe_demux_handler); |
225 | #endif | 225 | #endif |
226 | 226 | ||
227 | /* Enable multiplexed irq's */ | 227 | /* Enable multiplexed irq's */ |
diff --git a/arch/arm/mach-h720x/cpu-h7202.c b/arch/arm/mach-h720x/cpu-h7202.c index ac3f91442376..c37d570b852d 100644 --- a/arch/arm/mach-h720x/cpu-h7202.c +++ b/arch/arm/mach-h720x/cpu-h7202.c | |||
@@ -141,13 +141,18 @@ h7202_timer_interrupt(int irq, void *dev_id) | |||
141 | /* | 141 | /* |
142 | * mask multiplexed timer IRQs | 142 | * mask multiplexed timer IRQs |
143 | */ | 143 | */ |
144 | static void inline mask_timerx_irq(struct irq_data *d) | 144 | static void inline __mask_timerx_irq(unsigned int irq) |
145 | { | 145 | { |
146 | unsigned int bit; | 146 | unsigned int bit; |
147 | bit = 2 << ((d->irq == IRQ_TIMER64B) ? 4 : (d->irq - IRQ_TIMER1)); | 147 | bit = 2 << ((irq == IRQ_TIMER64B) ? 4 : (irq - IRQ_TIMER1)); |
148 | CPU_REG (TIMER_VIRT, TIMER_TOPCTRL) &= ~bit; | 148 | CPU_REG (TIMER_VIRT, TIMER_TOPCTRL) &= ~bit; |
149 | } | 149 | } |
150 | 150 | ||
151 | static void inline mask_timerx_irq(struct irq_data *d) | ||
152 | { | ||
153 | __mask_timerx_irq(d->irq); | ||
154 | } | ||
155 | |||
151 | /* | 156 | /* |
152 | * unmask multiplexed timer IRQs | 157 | * unmask multiplexed timer IRQs |
153 | */ | 158 | */ |
@@ -196,12 +201,12 @@ void __init h7202_init_irq (void) | |||
196 | 201 | ||
197 | for (irq = IRQ_TIMER1; | 202 | for (irq = IRQ_TIMER1; |
198 | irq < IRQ_CHAINED_TIMERX(NR_TIMERX_IRQS); irq++) { | 203 | irq < IRQ_CHAINED_TIMERX(NR_TIMERX_IRQS); irq++) { |
199 | mask_timerx_irq(irq); | 204 | __mask_timerx_irq(irq); |
200 | set_irq_chip(irq, &h7202_timerx_chip); | 205 | irq_set_chip_and_handler(irq, &h7202_timerx_chip, |
201 | set_irq_handler(irq, handle_edge_irq); | 206 | handle_edge_irq); |
202 | set_irq_flags(irq, IRQF_VALID ); | 207 | set_irq_flags(irq, IRQF_VALID ); |
203 | } | 208 | } |
204 | set_irq_chained_handler(IRQ_TIMERX, h7202_timerx_demux_handler); | 209 | irq_set_chained_handler(IRQ_TIMERX, h7202_timerx_demux_handler); |
205 | 210 | ||
206 | h720x_init_irq(); | 211 | h720x_init_irq(); |
207 | } | 212 | } |
diff --git a/arch/arm/mach-iop13xx/irq.c b/arch/arm/mach-iop13xx/irq.c index a233470dd10c..bc739701c301 100644 --- a/arch/arm/mach-iop13xx/irq.c +++ b/arch/arm/mach-iop13xx/irq.c | |||
@@ -224,15 +224,15 @@ void __init iop13xx_init_irq(void) | |||
224 | 224 | ||
225 | for(i = 0; i <= IRQ_IOP13XX_HPI; i++) { | 225 | for(i = 0; i <= IRQ_IOP13XX_HPI; i++) { |
226 | if (i < 32) | 226 | if (i < 32) |
227 | set_irq_chip(i, &iop13xx_irqchip1); | 227 | irq_set_chip(i, &iop13xx_irqchip1); |
228 | else if (i < 64) | 228 | else if (i < 64) |
229 | set_irq_chip(i, &iop13xx_irqchip2); | 229 | irq_set_chip(i, &iop13xx_irqchip2); |
230 | else if (i < 96) | 230 | else if (i < 96) |
231 | set_irq_chip(i, &iop13xx_irqchip3); | 231 | irq_set_chip(i, &iop13xx_irqchip3); |
232 | else | 232 | else |
233 | set_irq_chip(i, &iop13xx_irqchip4); | 233 | irq_set_chip(i, &iop13xx_irqchip4); |
234 | 234 | ||
235 | set_irq_handler(i, handle_level_irq); | 235 | irq_set_handler(i, handle_level_irq); |
236 | set_irq_flags(i, IRQF_VALID | IRQF_PROBE); | 236 | set_irq_flags(i, IRQF_VALID | IRQF_PROBE); |
237 | } | 237 | } |
238 | 238 | ||
diff --git a/arch/arm/mach-iop13xx/msi.c b/arch/arm/mach-iop13xx/msi.c index c9c02e3698bc..560d5b2dec22 100644 --- a/arch/arm/mach-iop13xx/msi.c +++ b/arch/arm/mach-iop13xx/msi.c | |||
@@ -118,7 +118,7 @@ static void iop13xx_msi_handler(unsigned int irq, struct irq_desc *desc) | |||
118 | 118 | ||
119 | void __init iop13xx_msi_init(void) | 119 | void __init iop13xx_msi_init(void) |
120 | { | 120 | { |
121 | set_irq_chained_handler(IRQ_IOP13XX_INBD_MSI, iop13xx_msi_handler); | 121 | irq_set_chained_handler(IRQ_IOP13XX_INBD_MSI, iop13xx_msi_handler); |
122 | } | 122 | } |
123 | 123 | ||
124 | /* | 124 | /* |
@@ -178,7 +178,7 @@ int arch_setup_msi_irq(struct pci_dev *pdev, struct msi_desc *desc) | |||
178 | if (irq < 0) | 178 | if (irq < 0) |
179 | return irq; | 179 | return irq; |
180 | 180 | ||
181 | set_irq_msi(irq, desc); | 181 | irq_set_msi_desc(irq, desc); |
182 | 182 | ||
183 | msg.address_hi = 0x0; | 183 | msg.address_hi = 0x0; |
184 | msg.address_lo = IOP13XX_MU_MIMR_PCI; | 184 | msg.address_lo = IOP13XX_MU_MIMR_PCI; |
@@ -187,7 +187,7 @@ int arch_setup_msi_irq(struct pci_dev *pdev, struct msi_desc *desc) | |||
187 | msg.data = (id << IOP13XX_MU_MIMR_CORE_SELECT) | (irq & 0x7f); | 187 | msg.data = (id << IOP13XX_MU_MIMR_CORE_SELECT) | (irq & 0x7f); |
188 | 188 | ||
189 | write_msi_msg(irq, &msg); | 189 | write_msi_msg(irq, &msg); |
190 | set_irq_chip_and_handler(irq, &iop13xx_msi_chip, handle_simple_irq); | 190 | irq_set_chip_and_handler(irq, &iop13xx_msi_chip, handle_simple_irq); |
191 | 191 | ||
192 | return 0; | 192 | return 0; |
193 | } | 193 | } |
diff --git a/arch/arm/mach-iop32x/irq.c b/arch/arm/mach-iop32x/irq.c index d3426a120599..d7ee2789d890 100644 --- a/arch/arm/mach-iop32x/irq.c +++ b/arch/arm/mach-iop32x/irq.c | |||
@@ -68,8 +68,7 @@ void __init iop32x_init_irq(void) | |||
68 | *IOP3XX_PCIIRSR = 0x0f; | 68 | *IOP3XX_PCIIRSR = 0x0f; |
69 | 69 | ||
70 | for (i = 0; i < NR_IRQS; i++) { | 70 | for (i = 0; i < NR_IRQS; i++) { |
71 | set_irq_chip(i, &ext_chip); | 71 | irq_set_chip_and_handler(i, &ext_chip, handle_level_irq); |
72 | set_irq_handler(i, handle_level_irq); | ||
73 | set_irq_flags(i, IRQF_VALID | IRQF_PROBE); | 72 | set_irq_flags(i, IRQF_VALID | IRQF_PROBE); |
74 | } | 73 | } |
75 | } | 74 | } |
diff --git a/arch/arm/mach-iop33x/irq.c b/arch/arm/mach-iop33x/irq.c index 0ff2f74363a5..f7f5d3e451c7 100644 --- a/arch/arm/mach-iop33x/irq.c +++ b/arch/arm/mach-iop33x/irq.c | |||
@@ -110,8 +110,9 @@ void __init iop33x_init_irq(void) | |||
110 | *IOP3XX_PCIIRSR = 0x0f; | 110 | *IOP3XX_PCIIRSR = 0x0f; |
111 | 111 | ||
112 | for (i = 0; i < NR_IRQS; i++) { | 112 | for (i = 0; i < NR_IRQS; i++) { |
113 | set_irq_chip(i, (i < 32) ? &iop33x_irqchip1 : &iop33x_irqchip2); | 113 | irq_set_chip_and_handler(i, |
114 | set_irq_handler(i, handle_level_irq); | 114 | (i < 32) ? &iop33x_irqchip1 : &iop33x_irqchip2, |
115 | handle_level_irq); | ||
115 | set_irq_flags(i, IRQF_VALID | IRQF_PROBE); | 116 | set_irq_flags(i, IRQF_VALID | IRQF_PROBE); |
116 | } | 117 | } |
117 | } | 118 | } |
diff --git a/arch/arm/mach-ixp2000/core.c b/arch/arm/mach-ixp2000/core.c index 5fc4e064b650..4068166c8993 100644 --- a/arch/arm/mach-ixp2000/core.c +++ b/arch/arm/mach-ixp2000/core.c | |||
@@ -476,8 +476,8 @@ void __init ixp2000_init_irq(void) | |||
476 | */ | 476 | */ |
477 | for (irq = IRQ_IXP2000_SOFT_INT; irq <= IRQ_IXP2000_THDB3; irq++) { | 477 | for (irq = IRQ_IXP2000_SOFT_INT; irq <= IRQ_IXP2000_THDB3; irq++) { |
478 | if ((1 << irq) & IXP2000_VALID_IRQ_MASK) { | 478 | if ((1 << irq) & IXP2000_VALID_IRQ_MASK) { |
479 | set_irq_chip(irq, &ixp2000_irq_chip); | 479 | irq_set_chip_and_handler(irq, &ixp2000_irq_chip, |
480 | set_irq_handler(irq, handle_level_irq); | 480 | handle_level_irq); |
481 | set_irq_flags(irq, IRQF_VALID); | 481 | set_irq_flags(irq, IRQF_VALID); |
482 | } else set_irq_flags(irq, 0); | 482 | } else set_irq_flags(irq, 0); |
483 | } | 483 | } |
@@ -485,21 +485,21 @@ void __init ixp2000_init_irq(void) | |||
485 | for (irq = IRQ_IXP2000_DRAM0_MIN_ERR; irq <= IRQ_IXP2000_SP_INT; irq++) { | 485 | for (irq = IRQ_IXP2000_DRAM0_MIN_ERR; irq <= IRQ_IXP2000_SP_INT; irq++) { |
486 | if((1 << (irq - IRQ_IXP2000_DRAM0_MIN_ERR)) & | 486 | if((1 << (irq - IRQ_IXP2000_DRAM0_MIN_ERR)) & |
487 | IXP2000_VALID_ERR_IRQ_MASK) { | 487 | IXP2000_VALID_ERR_IRQ_MASK) { |
488 | set_irq_chip(irq, &ixp2000_err_irq_chip); | 488 | irq_set_chip_and_handler(irq, &ixp2000_err_irq_chip, |
489 | set_irq_handler(irq, handle_level_irq); | 489 | handle_level_irq); |
490 | set_irq_flags(irq, IRQF_VALID); | 490 | set_irq_flags(irq, IRQF_VALID); |
491 | } | 491 | } |
492 | else | 492 | else |
493 | set_irq_flags(irq, 0); | 493 | set_irq_flags(irq, 0); |
494 | } | 494 | } |
495 | set_irq_chained_handler(IRQ_IXP2000_ERRSUM, ixp2000_err_irq_handler); | 495 | irq_set_chained_handler(IRQ_IXP2000_ERRSUM, ixp2000_err_irq_handler); |
496 | 496 | ||
497 | for (irq = IRQ_IXP2000_GPIO0; irq <= IRQ_IXP2000_GPIO7; irq++) { | 497 | for (irq = IRQ_IXP2000_GPIO0; irq <= IRQ_IXP2000_GPIO7; irq++) { |
498 | set_irq_chip(irq, &ixp2000_GPIO_irq_chip); | 498 | irq_set_chip_and_handler(irq, &ixp2000_GPIO_irq_chip, |
499 | set_irq_handler(irq, handle_level_irq); | 499 | handle_level_irq); |
500 | set_irq_flags(irq, IRQF_VALID); | 500 | set_irq_flags(irq, IRQF_VALID); |
501 | } | 501 | } |
502 | set_irq_chained_handler(IRQ_IXP2000_GPIO, ixp2000_GPIO_irq_handler); | 502 | irq_set_chained_handler(IRQ_IXP2000_GPIO, ixp2000_GPIO_irq_handler); |
503 | 503 | ||
504 | /* | 504 | /* |
505 | * Enable PCI irqs. The actual PCI[AB] decoding is done in | 505 | * Enable PCI irqs. The actual PCI[AB] decoding is done in |
@@ -508,8 +508,8 @@ void __init ixp2000_init_irq(void) | |||
508 | */ | 508 | */ |
509 | ixp2000_reg_write(IXP2000_IRQ_ENABLE_SET, (1 << IRQ_IXP2000_PCI)); | 509 | ixp2000_reg_write(IXP2000_IRQ_ENABLE_SET, (1 << IRQ_IXP2000_PCI)); |
510 | for (irq = IRQ_IXP2000_PCIA; irq <= IRQ_IXP2000_PCIB; irq++) { | 510 | for (irq = IRQ_IXP2000_PCIA; irq <= IRQ_IXP2000_PCIB; irq++) { |
511 | set_irq_chip(irq, &ixp2000_pci_irq_chip); | 511 | irq_set_chip_and_handler(irq, &ixp2000_pci_irq_chip, |
512 | set_irq_handler(irq, handle_level_irq); | 512 | handle_level_irq); |
513 | set_irq_flags(irq, IRQF_VALID); | 513 | set_irq_flags(irq, IRQF_VALID); |
514 | } | 514 | } |
515 | } | 515 | } |
diff --git a/arch/arm/mach-ixp2000/ixdp2x00.c b/arch/arm/mach-ixp2000/ixdp2x00.c index 7d90d3f13ee8..235638f800e5 100644 --- a/arch/arm/mach-ixp2000/ixdp2x00.c +++ b/arch/arm/mach-ixp2000/ixdp2x00.c | |||
@@ -158,13 +158,13 @@ void __init ixdp2x00_init_irq(volatile unsigned long *stat_reg, volatile unsigne | |||
158 | *board_irq_mask = 0xffffffff; | 158 | *board_irq_mask = 0xffffffff; |
159 | 159 | ||
160 | for(irq = IXP2000_BOARD_IRQ(0); irq < IXP2000_BOARD_IRQ(board_irq_count); irq++) { | 160 | for(irq = IXP2000_BOARD_IRQ(0); irq < IXP2000_BOARD_IRQ(board_irq_count); irq++) { |
161 | set_irq_chip(irq, &ixdp2x00_cpld_irq_chip); | 161 | irq_set_chip_and_handler(irq, &ixdp2x00_cpld_irq_chip, |
162 | set_irq_handler(irq, handle_level_irq); | 162 | handle_level_irq); |
163 | set_irq_flags(irq, IRQF_VALID); | 163 | set_irq_flags(irq, IRQF_VALID); |
164 | } | 164 | } |
165 | 165 | ||
166 | /* Hook into PCI interrupt */ | 166 | /* Hook into PCI interrupt */ |
167 | set_irq_chained_handler(IRQ_IXP2000_PCIB, ixdp2x00_irq_handler); | 167 | irq_set_chained_handler(IRQ_IXP2000_PCIB, ixdp2x00_irq_handler); |
168 | } | 168 | } |
169 | 169 | ||
170 | /************************************************************************* | 170 | /************************************************************************* |
diff --git a/arch/arm/mach-ixp2000/ixdp2x01.c b/arch/arm/mach-ixp2000/ixdp2x01.c index 34b1b2af37c8..84835b209557 100644 --- a/arch/arm/mach-ixp2000/ixdp2x01.c +++ b/arch/arm/mach-ixp2000/ixdp2x01.c | |||
@@ -115,8 +115,8 @@ void __init ixdp2x01_init_irq(void) | |||
115 | 115 | ||
116 | for (irq = NR_IXP2000_IRQS; irq < NR_IXDP2X01_IRQS; irq++) { | 116 | for (irq = NR_IXP2000_IRQS; irq < NR_IXDP2X01_IRQS; irq++) { |
117 | if (irq & valid_irq_mask) { | 117 | if (irq & valid_irq_mask) { |
118 | set_irq_chip(irq, &ixdp2x01_irq_chip); | 118 | irq_set_chip_and_handler(irq, &ixdp2x01_irq_chip, |
119 | set_irq_handler(irq, handle_level_irq); | 119 | handle_level_irq); |
120 | set_irq_flags(irq, IRQF_VALID); | 120 | set_irq_flags(irq, IRQF_VALID); |
121 | } else { | 121 | } else { |
122 | set_irq_flags(irq, 0); | 122 | set_irq_flags(irq, 0); |
@@ -124,7 +124,7 @@ void __init ixdp2x01_init_irq(void) | |||
124 | } | 124 | } |
125 | 125 | ||
126 | /* Hook into PCI interrupts */ | 126 | /* Hook into PCI interrupts */ |
127 | set_irq_chained_handler(IRQ_IXP2000_PCIB, ixdp2x01_irq_handler); | 127 | irq_set_chained_handler(IRQ_IXP2000_PCIB, ixdp2x01_irq_handler); |
128 | } | 128 | } |
129 | 129 | ||
130 | 130 | ||
diff --git a/arch/arm/mach-ixp23xx/core.c b/arch/arm/mach-ixp23xx/core.c index 9c8a33903216..a1bee33d183e 100644 --- a/arch/arm/mach-ixp23xx/core.c +++ b/arch/arm/mach-ixp23xx/core.c | |||
@@ -289,12 +289,12 @@ static void ixp23xx_config_irq(unsigned int irq, enum ixp23xx_irq_type type) | |||
289 | { | 289 | { |
290 | switch (type) { | 290 | switch (type) { |
291 | case IXP23XX_IRQ_LEVEL: | 291 | case IXP23XX_IRQ_LEVEL: |
292 | set_irq_chip(irq, &ixp23xx_irq_level_chip); | 292 | irq_set_chip_and_handler(irq, &ixp23xx_irq_level_chip, |
293 | set_irq_handler(irq, handle_level_irq); | 293 | handle_level_irq); |
294 | break; | 294 | break; |
295 | case IXP23XX_IRQ_EDGE: | 295 | case IXP23XX_IRQ_EDGE: |
296 | set_irq_chip(irq, &ixp23xx_irq_edge_chip); | 296 | irq_set_chip_and_handler(irq, &ixp23xx_irq_edge_chip, |
297 | set_irq_handler(irq, handle_edge_irq); | 297 | handle_edge_irq); |
298 | break; | 298 | break; |
299 | } | 299 | } |
300 | set_irq_flags(irq, IRQF_VALID); | 300 | set_irq_flags(irq, IRQF_VALID); |
@@ -324,12 +324,12 @@ void __init ixp23xx_init_irq(void) | |||
324 | } | 324 | } |
325 | 325 | ||
326 | for (irq = IRQ_IXP23XX_INTA; irq <= IRQ_IXP23XX_INTB; irq++) { | 326 | for (irq = IRQ_IXP23XX_INTA; irq <= IRQ_IXP23XX_INTB; irq++) { |
327 | set_irq_chip(irq, &ixp23xx_pci_irq_chip); | 327 | irq_set_chip_and_handler(irq, &ixp23xx_pci_irq_chip, |
328 | set_irq_handler(irq, handle_level_irq); | 328 | handle_level_irq); |
329 | set_irq_flags(irq, IRQF_VALID); | 329 | set_irq_flags(irq, IRQF_VALID); |
330 | } | 330 | } |
331 | 331 | ||
332 | set_irq_chained_handler(IRQ_IXP23XX_PCI_INT_RPH, pci_handler); | 332 | irq_set_chained_handler(IRQ_IXP23XX_PCI_INT_RPH, pci_handler); |
333 | } | 333 | } |
334 | 334 | ||
335 | 335 | ||
diff --git a/arch/arm/mach-ixp23xx/ixdp2351.c b/arch/arm/mach-ixp23xx/ixdp2351.c index 181116aa6591..8dcba17c81e7 100644 --- a/arch/arm/mach-ixp23xx/ixdp2351.c +++ b/arch/arm/mach-ixp23xx/ixdp2351.c | |||
@@ -136,8 +136,8 @@ void __init ixdp2351_init_irq(void) | |||
136 | irq++) { | 136 | irq++) { |
137 | if (IXDP2351_INTA_IRQ_MASK(irq) & IXDP2351_INTA_IRQ_VALID) { | 137 | if (IXDP2351_INTA_IRQ_MASK(irq) & IXDP2351_INTA_IRQ_VALID) { |
138 | set_irq_flags(irq, IRQF_VALID); | 138 | set_irq_flags(irq, IRQF_VALID); |
139 | set_irq_handler(irq, handle_level_irq); | 139 | irq_set_chip_and_handler(irq, &ixdp2351_inta_chip, |
140 | set_irq_chip(irq, &ixdp2351_inta_chip); | 140 | handle_level_irq); |
141 | } | 141 | } |
142 | } | 142 | } |
143 | 143 | ||
@@ -147,13 +147,13 @@ void __init ixdp2351_init_irq(void) | |||
147 | irq++) { | 147 | irq++) { |
148 | if (IXDP2351_INTB_IRQ_MASK(irq) & IXDP2351_INTB_IRQ_VALID) { | 148 | if (IXDP2351_INTB_IRQ_MASK(irq) & IXDP2351_INTB_IRQ_VALID) { |
149 | set_irq_flags(irq, IRQF_VALID); | 149 | set_irq_flags(irq, IRQF_VALID); |
150 | set_irq_handler(irq, handle_level_irq); | 150 | irq_set_chip_and_handler(irq, &ixdp2351_intb_chip, |
151 | set_irq_chip(irq, &ixdp2351_intb_chip); | 151 | handle_level_irq); |
152 | } | 152 | } |
153 | } | 153 | } |
154 | 154 | ||
155 | set_irq_chained_handler(IRQ_IXP23XX_INTA, ixdp2351_inta_handler); | 155 | irq_set_chained_handler(IRQ_IXP23XX_INTA, ixdp2351_inta_handler); |
156 | set_irq_chained_handler(IRQ_IXP23XX_INTB, ixdp2351_intb_handler); | 156 | irq_set_chained_handler(IRQ_IXP23XX_INTB, ixdp2351_intb_handler); |
157 | } | 157 | } |
158 | 158 | ||
159 | /* | 159 | /* |
diff --git a/arch/arm/mach-ixp23xx/roadrunner.c b/arch/arm/mach-ixp23xx/roadrunner.c index 76c61ba73218..8fe0c6273262 100644 --- a/arch/arm/mach-ixp23xx/roadrunner.c +++ b/arch/arm/mach-ixp23xx/roadrunner.c | |||
@@ -110,8 +110,8 @@ static int __init roadrunner_map_irq(struct pci_dev *dev, u8 idsel, u8 pin) | |||
110 | 110 | ||
111 | static void __init roadrunner_pci_preinit(void) | 111 | static void __init roadrunner_pci_preinit(void) |
112 | { | 112 | { |
113 | set_irq_type(IRQ_ROADRUNNER_PCI_INTC, IRQ_TYPE_LEVEL_LOW); | 113 | irq_set_irq_type(IRQ_ROADRUNNER_PCI_INTC, IRQ_TYPE_LEVEL_LOW); |
114 | set_irq_type(IRQ_ROADRUNNER_PCI_INTD, IRQ_TYPE_LEVEL_LOW); | 114 | irq_set_irq_type(IRQ_ROADRUNNER_PCI_INTD, IRQ_TYPE_LEVEL_LOW); |
115 | 115 | ||
116 | ixp23xx_pci_preinit(); | 116 | ixp23xx_pci_preinit(); |
117 | } | 117 | } |
diff --git a/arch/arm/mach-ixp4xx/avila-pci.c b/arch/arm/mach-ixp4xx/avila-pci.c index 845e1b500548..162043ff29ff 100644 --- a/arch/arm/mach-ixp4xx/avila-pci.c +++ b/arch/arm/mach-ixp4xx/avila-pci.c | |||
@@ -39,10 +39,10 @@ | |||
39 | 39 | ||
40 | void __init avila_pci_preinit(void) | 40 | void __init avila_pci_preinit(void) |
41 | { | 41 | { |
42 | set_irq_type(IXP4XX_GPIO_IRQ(INTA), IRQ_TYPE_LEVEL_LOW); | 42 | irq_set_irq_type(IXP4XX_GPIO_IRQ(INTA), IRQ_TYPE_LEVEL_LOW); |
43 | set_irq_type(IXP4XX_GPIO_IRQ(INTB), IRQ_TYPE_LEVEL_LOW); | 43 | irq_set_irq_type(IXP4XX_GPIO_IRQ(INTB), IRQ_TYPE_LEVEL_LOW); |
44 | set_irq_type(IXP4XX_GPIO_IRQ(INTC), IRQ_TYPE_LEVEL_LOW); | 44 | irq_set_irq_type(IXP4XX_GPIO_IRQ(INTC), IRQ_TYPE_LEVEL_LOW); |
45 | set_irq_type(IXP4XX_GPIO_IRQ(INTD), IRQ_TYPE_LEVEL_LOW); | 45 | irq_set_irq_type(IXP4XX_GPIO_IRQ(INTD), IRQ_TYPE_LEVEL_LOW); |
46 | ixp4xx_pci_preinit(); | 46 | ixp4xx_pci_preinit(); |
47 | } | 47 | } |
48 | 48 | ||
diff --git a/arch/arm/mach-ixp4xx/common.c b/arch/arm/mach-ixp4xx/common.c index 9fd894271d5d..ed19bc314318 100644 --- a/arch/arm/mach-ixp4xx/common.c +++ b/arch/arm/mach-ixp4xx/common.c | |||
@@ -252,8 +252,8 @@ void __init ixp4xx_init_irq(void) | |||
252 | 252 | ||
253 | /* Default to all level triggered */ | 253 | /* Default to all level triggered */ |
254 | for(i = 0; i < NR_IRQS; i++) { | 254 | for(i = 0; i < NR_IRQS; i++) { |
255 | set_irq_chip(i, &ixp4xx_irq_chip); | 255 | irq_set_chip_and_handler(i, &ixp4xx_irq_chip, |
256 | set_irq_handler(i, handle_level_irq); | 256 | handle_level_irq); |
257 | set_irq_flags(i, IRQF_VALID); | 257 | set_irq_flags(i, IRQF_VALID); |
258 | } | 258 | } |
259 | } | 259 | } |
diff --git a/arch/arm/mach-ixp4xx/coyote-pci.c b/arch/arm/mach-ixp4xx/coyote-pci.c index b978ea8bd6f0..37fda7d6e83d 100644 --- a/arch/arm/mach-ixp4xx/coyote-pci.c +++ b/arch/arm/mach-ixp4xx/coyote-pci.c | |||
@@ -32,8 +32,8 @@ | |||
32 | 32 | ||
33 | void __init coyote_pci_preinit(void) | 33 | void __init coyote_pci_preinit(void) |
34 | { | 34 | { |
35 | set_irq_type(IXP4XX_GPIO_IRQ(SLOT0_INTA), IRQ_TYPE_LEVEL_LOW); | 35 | irq_set_irq_type(IXP4XX_GPIO_IRQ(SLOT0_INTA), IRQ_TYPE_LEVEL_LOW); |
36 | set_irq_type(IXP4XX_GPIO_IRQ(SLOT1_INTA), IRQ_TYPE_LEVEL_LOW); | 36 | irq_set_irq_type(IXP4XX_GPIO_IRQ(SLOT1_INTA), IRQ_TYPE_LEVEL_LOW); |
37 | ixp4xx_pci_preinit(); | 37 | ixp4xx_pci_preinit(); |
38 | } | 38 | } |
39 | 39 | ||
diff --git a/arch/arm/mach-ixp4xx/dsmg600-pci.c b/arch/arm/mach-ixp4xx/dsmg600-pci.c index fa70fed462ba..c7612010b3fc 100644 --- a/arch/arm/mach-ixp4xx/dsmg600-pci.c +++ b/arch/arm/mach-ixp4xx/dsmg600-pci.c | |||
@@ -35,12 +35,12 @@ | |||
35 | 35 | ||
36 | void __init dsmg600_pci_preinit(void) | 36 | void __init dsmg600_pci_preinit(void) |
37 | { | 37 | { |
38 | set_irq_type(IXP4XX_GPIO_IRQ(INTA), IRQ_TYPE_LEVEL_LOW); | 38 | irq_set_irq_type(IXP4XX_GPIO_IRQ(INTA), IRQ_TYPE_LEVEL_LOW); |
39 | set_irq_type(IXP4XX_GPIO_IRQ(INTB), IRQ_TYPE_LEVEL_LOW); | 39 | irq_set_irq_type(IXP4XX_GPIO_IRQ(INTB), IRQ_TYPE_LEVEL_LOW); |
40 | set_irq_type(IXP4XX_GPIO_IRQ(INTC), IRQ_TYPE_LEVEL_LOW); | 40 | irq_set_irq_type(IXP4XX_GPIO_IRQ(INTC), IRQ_TYPE_LEVEL_LOW); |
41 | set_irq_type(IXP4XX_GPIO_IRQ(INTD), IRQ_TYPE_LEVEL_LOW); | 41 | irq_set_irq_type(IXP4XX_GPIO_IRQ(INTD), IRQ_TYPE_LEVEL_LOW); |
42 | set_irq_type(IXP4XX_GPIO_IRQ(INTE), IRQ_TYPE_LEVEL_LOW); | 42 | irq_set_irq_type(IXP4XX_GPIO_IRQ(INTE), IRQ_TYPE_LEVEL_LOW); |
43 | set_irq_type(IXP4XX_GPIO_IRQ(INTF), IRQ_TYPE_LEVEL_LOW); | 43 | irq_set_irq_type(IXP4XX_GPIO_IRQ(INTF), IRQ_TYPE_LEVEL_LOW); |
44 | ixp4xx_pci_preinit(); | 44 | ixp4xx_pci_preinit(); |
45 | } | 45 | } |
46 | 46 | ||
diff --git a/arch/arm/mach-ixp4xx/fsg-pci.c b/arch/arm/mach-ixp4xx/fsg-pci.c index 5a810c930624..44ccde9d4879 100644 --- a/arch/arm/mach-ixp4xx/fsg-pci.c +++ b/arch/arm/mach-ixp4xx/fsg-pci.c | |||
@@ -32,9 +32,9 @@ | |||
32 | 32 | ||
33 | void __init fsg_pci_preinit(void) | 33 | void __init fsg_pci_preinit(void) |
34 | { | 34 | { |
35 | set_irq_type(IXP4XX_GPIO_IRQ(INTA), IRQ_TYPE_LEVEL_LOW); | 35 | irq_set_irq_type(IXP4XX_GPIO_IRQ(INTA), IRQ_TYPE_LEVEL_LOW); |
36 | set_irq_type(IXP4XX_GPIO_IRQ(INTB), IRQ_TYPE_LEVEL_LOW); | 36 | irq_set_irq_type(IXP4XX_GPIO_IRQ(INTB), IRQ_TYPE_LEVEL_LOW); |
37 | set_irq_type(IXP4XX_GPIO_IRQ(INTC), IRQ_TYPE_LEVEL_LOW); | 37 | irq_set_irq_type(IXP4XX_GPIO_IRQ(INTC), IRQ_TYPE_LEVEL_LOW); |
38 | ixp4xx_pci_preinit(); | 38 | ixp4xx_pci_preinit(); |
39 | } | 39 | } |
40 | 40 | ||
diff --git a/arch/arm/mach-ixp4xx/gateway7001-pci.c b/arch/arm/mach-ixp4xx/gateway7001-pci.c index 7e93a0975c4d..fc1124168874 100644 --- a/arch/arm/mach-ixp4xx/gateway7001-pci.c +++ b/arch/arm/mach-ixp4xx/gateway7001-pci.c | |||
@@ -29,8 +29,8 @@ | |||
29 | 29 | ||
30 | void __init gateway7001_pci_preinit(void) | 30 | void __init gateway7001_pci_preinit(void) |
31 | { | 31 | { |
32 | set_irq_type(IRQ_IXP4XX_GPIO10, IRQ_TYPE_LEVEL_LOW); | 32 | irq_set_irq_type(IRQ_IXP4XX_GPIO10, IRQ_TYPE_LEVEL_LOW); |
33 | set_irq_type(IRQ_IXP4XX_GPIO11, IRQ_TYPE_LEVEL_LOW); | 33 | irq_set_irq_type(IRQ_IXP4XX_GPIO11, IRQ_TYPE_LEVEL_LOW); |
34 | 34 | ||
35 | ixp4xx_pci_preinit(); | 35 | ixp4xx_pci_preinit(); |
36 | } | 36 | } |
diff --git a/arch/arm/mach-ixp4xx/goramo_mlr.c b/arch/arm/mach-ixp4xx/goramo_mlr.c index d0e4861ac03d..3e8c0e33b59c 100644 --- a/arch/arm/mach-ixp4xx/goramo_mlr.c +++ b/arch/arm/mach-ixp4xx/goramo_mlr.c | |||
@@ -420,8 +420,8 @@ static void __init gmlr_init(void) | |||
420 | gpio_line_config(GPIO_HSS1_RTS_N, IXP4XX_GPIO_OUT); | 420 | gpio_line_config(GPIO_HSS1_RTS_N, IXP4XX_GPIO_OUT); |
421 | gpio_line_config(GPIO_HSS0_DCD_N, IXP4XX_GPIO_IN); | 421 | gpio_line_config(GPIO_HSS0_DCD_N, IXP4XX_GPIO_IN); |
422 | gpio_line_config(GPIO_HSS1_DCD_N, IXP4XX_GPIO_IN); | 422 | gpio_line_config(GPIO_HSS1_DCD_N, IXP4XX_GPIO_IN); |
423 | set_irq_type(IXP4XX_GPIO_IRQ(GPIO_HSS0_DCD_N), IRQ_TYPE_EDGE_BOTH); | 423 | irq_set_irq_type(IXP4XX_GPIO_IRQ(GPIO_HSS0_DCD_N), IRQ_TYPE_EDGE_BOTH); |
424 | set_irq_type(IXP4XX_GPIO_IRQ(GPIO_HSS1_DCD_N), IRQ_TYPE_EDGE_BOTH); | 424 | irq_set_irq_type(IXP4XX_GPIO_IRQ(GPIO_HSS1_DCD_N), IRQ_TYPE_EDGE_BOTH); |
425 | 425 | ||
426 | set_control(CONTROL_HSS0_DTR_N, 1); | 426 | set_control(CONTROL_HSS0_DTR_N, 1); |
427 | set_control(CONTROL_HSS1_DTR_N, 1); | 427 | set_control(CONTROL_HSS1_DTR_N, 1); |
@@ -441,10 +441,10 @@ static void __init gmlr_init(void) | |||
441 | #ifdef CONFIG_PCI | 441 | #ifdef CONFIG_PCI |
442 | static void __init gmlr_pci_preinit(void) | 442 | static void __init gmlr_pci_preinit(void) |
443 | { | 443 | { |
444 | set_irq_type(IXP4XX_GPIO_IRQ(GPIO_IRQ_ETHA), IRQ_TYPE_LEVEL_LOW); | 444 | irq_set_irq_type(IXP4XX_GPIO_IRQ(GPIO_IRQ_ETHA), IRQ_TYPE_LEVEL_LOW); |
445 | set_irq_type(IXP4XX_GPIO_IRQ(GPIO_IRQ_ETHB), IRQ_TYPE_LEVEL_LOW); | 445 | irq_set_irq_type(IXP4XX_GPIO_IRQ(GPIO_IRQ_ETHB), IRQ_TYPE_LEVEL_LOW); |
446 | set_irq_type(IXP4XX_GPIO_IRQ(GPIO_IRQ_NEC), IRQ_TYPE_LEVEL_LOW); | 446 | irq_set_irq_type(IXP4XX_GPIO_IRQ(GPIO_IRQ_NEC), IRQ_TYPE_LEVEL_LOW); |
447 | set_irq_type(IXP4XX_GPIO_IRQ(GPIO_IRQ_MPCI), IRQ_TYPE_LEVEL_LOW); | 447 | irq_set_irq_type(IXP4XX_GPIO_IRQ(GPIO_IRQ_MPCI), IRQ_TYPE_LEVEL_LOW); |
448 | ixp4xx_pci_preinit(); | 448 | ixp4xx_pci_preinit(); |
449 | } | 449 | } |
450 | 450 | ||
diff --git a/arch/arm/mach-ixp4xx/gtwx5715-pci.c b/arch/arm/mach-ixp4xx/gtwx5715-pci.c index 25d2c333c204..38cc0725dbd8 100644 --- a/arch/arm/mach-ixp4xx/gtwx5715-pci.c +++ b/arch/arm/mach-ixp4xx/gtwx5715-pci.c | |||
@@ -43,8 +43,8 @@ | |||
43 | */ | 43 | */ |
44 | void __init gtwx5715_pci_preinit(void) | 44 | void __init gtwx5715_pci_preinit(void) |
45 | { | 45 | { |
46 | set_irq_type(IXP4XX_GPIO_IRQ(INTA), IRQ_TYPE_LEVEL_LOW); | 46 | irq_set_irq_type(IXP4XX_GPIO_IRQ(INTA), IRQ_TYPE_LEVEL_LOW); |
47 | set_irq_type(IXP4XX_GPIO_IRQ(INTB), IRQ_TYPE_LEVEL_LOW); | 47 | irq_set_irq_type(IXP4XX_GPIO_IRQ(INTB), IRQ_TYPE_LEVEL_LOW); |
48 | ixp4xx_pci_preinit(); | 48 | ixp4xx_pci_preinit(); |
49 | } | 49 | } |
50 | 50 | ||
diff --git a/arch/arm/mach-ixp4xx/ixdp425-pci.c b/arch/arm/mach-ixp4xx/ixdp425-pci.c index 1ba165a6edac..58f400417eaf 100644 --- a/arch/arm/mach-ixp4xx/ixdp425-pci.c +++ b/arch/arm/mach-ixp4xx/ixdp425-pci.c | |||
@@ -36,10 +36,10 @@ | |||
36 | 36 | ||
37 | void __init ixdp425_pci_preinit(void) | 37 | void __init ixdp425_pci_preinit(void) |
38 | { | 38 | { |
39 | set_irq_type(IXP4XX_GPIO_IRQ(INTA), IRQ_TYPE_LEVEL_LOW); | 39 | irq_set_irq_type(IXP4XX_GPIO_IRQ(INTA), IRQ_TYPE_LEVEL_LOW); |
40 | set_irq_type(IXP4XX_GPIO_IRQ(INTB), IRQ_TYPE_LEVEL_LOW); | 40 | irq_set_irq_type(IXP4XX_GPIO_IRQ(INTB), IRQ_TYPE_LEVEL_LOW); |
41 | set_irq_type(IXP4XX_GPIO_IRQ(INTC), IRQ_TYPE_LEVEL_LOW); | 41 | irq_set_irq_type(IXP4XX_GPIO_IRQ(INTC), IRQ_TYPE_LEVEL_LOW); |
42 | set_irq_type(IXP4XX_GPIO_IRQ(INTD), IRQ_TYPE_LEVEL_LOW); | 42 | irq_set_irq_type(IXP4XX_GPIO_IRQ(INTD), IRQ_TYPE_LEVEL_LOW); |
43 | ixp4xx_pci_preinit(); | 43 | ixp4xx_pci_preinit(); |
44 | } | 44 | } |
45 | 45 | ||
diff --git a/arch/arm/mach-ixp4xx/ixdpg425-pci.c b/arch/arm/mach-ixp4xx/ixdpg425-pci.c index 4ed7ac614920..e64f6d041488 100644 --- a/arch/arm/mach-ixp4xx/ixdpg425-pci.c +++ b/arch/arm/mach-ixp4xx/ixdpg425-pci.c | |||
@@ -25,8 +25,8 @@ | |||
25 | 25 | ||
26 | void __init ixdpg425_pci_preinit(void) | 26 | void __init ixdpg425_pci_preinit(void) |
27 | { | 27 | { |
28 | set_irq_type(IRQ_IXP4XX_GPIO6, IRQ_TYPE_LEVEL_LOW); | 28 | irq_set_irq_type(IRQ_IXP4XX_GPIO6, IRQ_TYPE_LEVEL_LOW); |
29 | set_irq_type(IRQ_IXP4XX_GPIO7, IRQ_TYPE_LEVEL_LOW); | 29 | irq_set_irq_type(IRQ_IXP4XX_GPIO7, IRQ_TYPE_LEVEL_LOW); |
30 | 30 | ||
31 | ixp4xx_pci_preinit(); | 31 | ixp4xx_pci_preinit(); |
32 | } | 32 | } |
diff --git a/arch/arm/mach-ixp4xx/nas100d-pci.c b/arch/arm/mach-ixp4xx/nas100d-pci.c index d0cea34cf61e..428d1202b799 100644 --- a/arch/arm/mach-ixp4xx/nas100d-pci.c +++ b/arch/arm/mach-ixp4xx/nas100d-pci.c | |||
@@ -33,11 +33,11 @@ | |||
33 | 33 | ||
34 | void __init nas100d_pci_preinit(void) | 34 | void __init nas100d_pci_preinit(void) |
35 | { | 35 | { |
36 | set_irq_type(IXP4XX_GPIO_IRQ(INTA), IRQ_TYPE_LEVEL_LOW); | 36 | irq_set_irq_type(IXP4XX_GPIO_IRQ(INTA), IRQ_TYPE_LEVEL_LOW); |
37 | set_irq_type(IXP4XX_GPIO_IRQ(INTB), IRQ_TYPE_LEVEL_LOW); | 37 | irq_set_irq_type(IXP4XX_GPIO_IRQ(INTB), IRQ_TYPE_LEVEL_LOW); |
38 | set_irq_type(IXP4XX_GPIO_IRQ(INTC), IRQ_TYPE_LEVEL_LOW); | 38 | irq_set_irq_type(IXP4XX_GPIO_IRQ(INTC), IRQ_TYPE_LEVEL_LOW); |
39 | set_irq_type(IXP4XX_GPIO_IRQ(INTD), IRQ_TYPE_LEVEL_LOW); | 39 | irq_set_irq_type(IXP4XX_GPIO_IRQ(INTD), IRQ_TYPE_LEVEL_LOW); |
40 | set_irq_type(IXP4XX_GPIO_IRQ(INTE), IRQ_TYPE_LEVEL_LOW); | 40 | irq_set_irq_type(IXP4XX_GPIO_IRQ(INTE), IRQ_TYPE_LEVEL_LOW); |
41 | ixp4xx_pci_preinit(); | 41 | ixp4xx_pci_preinit(); |
42 | } | 42 | } |
43 | 43 | ||
diff --git a/arch/arm/mach-ixp4xx/nslu2-pci.c b/arch/arm/mach-ixp4xx/nslu2-pci.c index 1eb5a90470bc..2e85f76b950d 100644 --- a/arch/arm/mach-ixp4xx/nslu2-pci.c +++ b/arch/arm/mach-ixp4xx/nslu2-pci.c | |||
@@ -32,9 +32,9 @@ | |||
32 | 32 | ||
33 | void __init nslu2_pci_preinit(void) | 33 | void __init nslu2_pci_preinit(void) |
34 | { | 34 | { |
35 | set_irq_type(IXP4XX_GPIO_IRQ(INTA), IRQ_TYPE_LEVEL_LOW); | 35 | irq_set_irq_type(IXP4XX_GPIO_IRQ(INTA), IRQ_TYPE_LEVEL_LOW); |
36 | set_irq_type(IXP4XX_GPIO_IRQ(INTB), IRQ_TYPE_LEVEL_LOW); | 36 | irq_set_irq_type(IXP4XX_GPIO_IRQ(INTB), IRQ_TYPE_LEVEL_LOW); |
37 | set_irq_type(IXP4XX_GPIO_IRQ(INTC), IRQ_TYPE_LEVEL_LOW); | 37 | irq_set_irq_type(IXP4XX_GPIO_IRQ(INTC), IRQ_TYPE_LEVEL_LOW); |
38 | ixp4xx_pci_preinit(); | 38 | ixp4xx_pci_preinit(); |
39 | } | 39 | } |
40 | 40 | ||
diff --git a/arch/arm/mach-ixp4xx/vulcan-pci.c b/arch/arm/mach-ixp4xx/vulcan-pci.c index f3111c6840ef..03bdec5140a7 100644 --- a/arch/arm/mach-ixp4xx/vulcan-pci.c +++ b/arch/arm/mach-ixp4xx/vulcan-pci.c | |||
@@ -38,8 +38,8 @@ void __init vulcan_pci_preinit(void) | |||
38 | pr_info("Vulcan PCI: limiting CardBus memory size to %dMB\n", | 38 | pr_info("Vulcan PCI: limiting CardBus memory size to %dMB\n", |
39 | (int)(pci_cardbus_mem_size >> 20)); | 39 | (int)(pci_cardbus_mem_size >> 20)); |
40 | #endif | 40 | #endif |
41 | set_irq_type(IXP4XX_GPIO_IRQ(INTA), IRQ_TYPE_LEVEL_LOW); | 41 | irq_set_irq_type(IXP4XX_GPIO_IRQ(INTA), IRQ_TYPE_LEVEL_LOW); |
42 | set_irq_type(IXP4XX_GPIO_IRQ(INTB), IRQ_TYPE_LEVEL_LOW); | 42 | irq_set_irq_type(IXP4XX_GPIO_IRQ(INTB), IRQ_TYPE_LEVEL_LOW); |
43 | ixp4xx_pci_preinit(); | 43 | ixp4xx_pci_preinit(); |
44 | } | 44 | } |
45 | 45 | ||
diff --git a/arch/arm/mach-ixp4xx/wg302v2-pci.c b/arch/arm/mach-ixp4xx/wg302v2-pci.c index 9b59ed03b151..17f3cf59a31b 100644 --- a/arch/arm/mach-ixp4xx/wg302v2-pci.c +++ b/arch/arm/mach-ixp4xx/wg302v2-pci.c | |||
@@ -29,8 +29,8 @@ | |||
29 | 29 | ||
30 | void __init wg302v2_pci_preinit(void) | 30 | void __init wg302v2_pci_preinit(void) |
31 | { | 31 | { |
32 | set_irq_type(IRQ_IXP4XX_GPIO8, IRQ_TYPE_LEVEL_LOW); | 32 | irq_set_irq_type(IRQ_IXP4XX_GPIO8, IRQ_TYPE_LEVEL_LOW); |
33 | set_irq_type(IRQ_IXP4XX_GPIO9, IRQ_TYPE_LEVEL_LOW); | 33 | irq_set_irq_type(IRQ_IXP4XX_GPIO9, IRQ_TYPE_LEVEL_LOW); |
34 | 34 | ||
35 | ixp4xx_pci_preinit(); | 35 | ixp4xx_pci_preinit(); |
36 | } | 36 | } |
diff --git a/arch/arm/mach-kirkwood/irq.c b/arch/arm/mach-kirkwood/irq.c index cbdb5863d13b..05d193a25b25 100644 --- a/arch/arm/mach-kirkwood/irq.c +++ b/arch/arm/mach-kirkwood/irq.c | |||
@@ -35,14 +35,15 @@ void __init kirkwood_init_irq(void) | |||
35 | */ | 35 | */ |
36 | orion_gpio_init(0, 32, GPIO_LOW_VIRT_BASE, 0, | 36 | orion_gpio_init(0, 32, GPIO_LOW_VIRT_BASE, 0, |
37 | IRQ_KIRKWOOD_GPIO_START); | 37 | IRQ_KIRKWOOD_GPIO_START); |
38 | set_irq_chained_handler(IRQ_KIRKWOOD_GPIO_LOW_0_7, gpio_irq_handler); | 38 | irq_set_chained_handler(IRQ_KIRKWOOD_GPIO_LOW_0_7, gpio_irq_handler); |
39 | set_irq_chained_handler(IRQ_KIRKWOOD_GPIO_LOW_8_15, gpio_irq_handler); | 39 | irq_set_chained_handler(IRQ_KIRKWOOD_GPIO_LOW_8_15, gpio_irq_handler); |
40 | set_irq_chained_handler(IRQ_KIRKWOOD_GPIO_LOW_16_23, gpio_irq_handler); | 40 | irq_set_chained_handler(IRQ_KIRKWOOD_GPIO_LOW_16_23, gpio_irq_handler); |
41 | set_irq_chained_handler(IRQ_KIRKWOOD_GPIO_LOW_24_31, gpio_irq_handler); | 41 | irq_set_chained_handler(IRQ_KIRKWOOD_GPIO_LOW_24_31, gpio_irq_handler); |
42 | 42 | ||
43 | orion_gpio_init(32, 18, GPIO_HIGH_VIRT_BASE, 0, | 43 | orion_gpio_init(32, 18, GPIO_HIGH_VIRT_BASE, 0, |
44 | IRQ_KIRKWOOD_GPIO_START + 32); | 44 | IRQ_KIRKWOOD_GPIO_START + 32); |
45 | set_irq_chained_handler(IRQ_KIRKWOOD_GPIO_HIGH_0_7, gpio_irq_handler); | 45 | irq_set_chained_handler(IRQ_KIRKWOOD_GPIO_HIGH_0_7, gpio_irq_handler); |
46 | set_irq_chained_handler(IRQ_KIRKWOOD_GPIO_HIGH_8_15, gpio_irq_handler); | 46 | irq_set_chained_handler(IRQ_KIRKWOOD_GPIO_HIGH_8_15, gpio_irq_handler); |
47 | set_irq_chained_handler(IRQ_KIRKWOOD_GPIO_HIGH_16_23, gpio_irq_handler); | 47 | irq_set_chained_handler(IRQ_KIRKWOOD_GPIO_HIGH_16_23, |
48 | gpio_irq_handler); | ||
48 | } | 49 | } |
diff --git a/arch/arm/mach-ks8695/gpio.c b/arch/arm/mach-ks8695/gpio.c index 55fbf7111a5b..31e456508a6f 100644 --- a/arch/arm/mach-ks8695/gpio.c +++ b/arch/arm/mach-ks8695/gpio.c | |||
@@ -80,7 +80,7 @@ int ks8695_gpio_interrupt(unsigned int pin, unsigned int type) | |||
80 | local_irq_restore(flags); | 80 | local_irq_restore(flags); |
81 | 81 | ||
82 | /* Set IRQ triggering type */ | 82 | /* Set IRQ triggering type */ |
83 | set_irq_type(gpio_irq[pin], type); | 83 | irq_set_irq_type(gpio_irq[pin], type); |
84 | 84 | ||
85 | /* enable interrupt mode */ | 85 | /* enable interrupt mode */ |
86 | ks8695_gpio_mode(pin, 0); | 86 | ks8695_gpio_mode(pin, 0); |
diff --git a/arch/arm/mach-ks8695/irq.c b/arch/arm/mach-ks8695/irq.c index 7998ccaa6333..a78092dcd6fb 100644 --- a/arch/arm/mach-ks8695/irq.c +++ b/arch/arm/mach-ks8695/irq.c | |||
@@ -115,12 +115,12 @@ static int ks8695_irq_set_type(struct irq_data *d, unsigned int type) | |||
115 | } | 115 | } |
116 | 116 | ||
117 | if (level_triggered) { | 117 | if (level_triggered) { |
118 | set_irq_chip(d->irq, &ks8695_irq_level_chip); | 118 | irq_set_chip_and_handler(d->irq, &ks8695_irq_level_chip, |
119 | set_irq_handler(d->irq, handle_level_irq); | 119 | handle_level_irq); |
120 | } | 120 | } |
121 | else { | 121 | else { |
122 | set_irq_chip(d->irq, &ks8695_irq_edge_chip); | 122 | irq_set_chip_and_handler(d->irq, &ks8695_irq_edge_chip, |
123 | set_irq_handler(d->irq, handle_edge_irq); | 123 | handle_edge_irq); |
124 | } | 124 | } |
125 | 125 | ||
126 | __raw_writel(ctrl, KS8695_GPIO_VA + KS8695_IOPC); | 126 | __raw_writel(ctrl, KS8695_GPIO_VA + KS8695_IOPC); |
@@ -158,16 +158,18 @@ void __init ks8695_init_irq(void) | |||
158 | case KS8695_IRQ_UART_RX: | 158 | case KS8695_IRQ_UART_RX: |
159 | case KS8695_IRQ_COMM_TX: | 159 | case KS8695_IRQ_COMM_TX: |
160 | case KS8695_IRQ_COMM_RX: | 160 | case KS8695_IRQ_COMM_RX: |
161 | set_irq_chip(irq, &ks8695_irq_level_chip); | 161 | irq_set_chip_and_handler(irq, |
162 | set_irq_handler(irq, handle_level_irq); | 162 | &ks8695_irq_level_chip, |
163 | handle_level_irq); | ||
163 | break; | 164 | break; |
164 | 165 | ||
165 | /* Edge-triggered interrupts */ | 166 | /* Edge-triggered interrupts */ |
166 | default: | 167 | default: |
167 | /* clear pending bit */ | 168 | /* clear pending bit */ |
168 | ks8695_irq_ack(irq_get_irq_data(irq)); | 169 | ks8695_irq_ack(irq_get_irq_data(irq)); |
169 | set_irq_chip(irq, &ks8695_irq_edge_chip); | 170 | irq_set_chip_and_handler(irq, |
170 | set_irq_handler(irq, handle_edge_irq); | 171 | &ks8695_irq_edge_chip, |
172 | handle_edge_irq); | ||
171 | } | 173 | } |
172 | 174 | ||
173 | set_irq_flags(irq, IRQF_VALID); | 175 | set_irq_flags(irq, IRQF_VALID); |
diff --git a/arch/arm/mach-lpc32xx/irq.c b/arch/arm/mach-lpc32xx/irq.c index 316ecbf6c586..4eae566dfdc7 100644 --- a/arch/arm/mach-lpc32xx/irq.c +++ b/arch/arm/mach-lpc32xx/irq.c | |||
@@ -290,7 +290,7 @@ static int lpc32xx_set_irq_type(struct irq_data *d, unsigned int type) | |||
290 | } | 290 | } |
291 | 291 | ||
292 | /* Ok to use the level handler for all types */ | 292 | /* Ok to use the level handler for all types */ |
293 | set_irq_handler(d->irq, handle_level_irq); | 293 | irq_set_handler(d->irq, handle_level_irq); |
294 | 294 | ||
295 | return 0; | 295 | return 0; |
296 | } | 296 | } |
@@ -390,8 +390,8 @@ void __init lpc32xx_init_irq(void) | |||
390 | 390 | ||
391 | /* Configure supported IRQ's */ | 391 | /* Configure supported IRQ's */ |
392 | for (i = 0; i < NR_IRQS; i++) { | 392 | for (i = 0; i < NR_IRQS; i++) { |
393 | set_irq_chip(i, &lpc32xx_irq_chip); | 393 | irq_set_chip_and_handler(i, &lpc32xx_irq_chip, |
394 | set_irq_handler(i, handle_level_irq); | 394 | handle_level_irq); |
395 | set_irq_flags(i, IRQF_VALID); | 395 | set_irq_flags(i, IRQF_VALID); |
396 | } | 396 | } |
397 | 397 | ||
@@ -406,8 +406,8 @@ void __init lpc32xx_init_irq(void) | |||
406 | __raw_writel(0, LPC32XX_INTC_MASK(LPC32XX_SIC2_BASE)); | 406 | __raw_writel(0, LPC32XX_INTC_MASK(LPC32XX_SIC2_BASE)); |
407 | 407 | ||
408 | /* MIC SUBIRQx interrupts will route handling to the chain handlers */ | 408 | /* MIC SUBIRQx interrupts will route handling to the chain handlers */ |
409 | set_irq_chained_handler(IRQ_LPC32XX_SUB1IRQ, lpc32xx_sic1_handler); | 409 | irq_set_chained_handler(IRQ_LPC32XX_SUB1IRQ, lpc32xx_sic1_handler); |
410 | set_irq_chained_handler(IRQ_LPC32XX_SUB2IRQ, lpc32xx_sic2_handler); | 410 | irq_set_chained_handler(IRQ_LPC32XX_SUB2IRQ, lpc32xx_sic2_handler); |
411 | 411 | ||
412 | /* Initially disable all wake events */ | 412 | /* Initially disable all wake events */ |
413 | __raw_writel(0, LPC32XX_CLKPWR_P01_ER); | 413 | __raw_writel(0, LPC32XX_CLKPWR_P01_ER); |
diff --git a/arch/arm/mach-mmp/irq-mmp2.c b/arch/arm/mach-mmp/irq-mmp2.c index fa037038e7b8..d21c5441a3d0 100644 --- a/arch/arm/mach-mmp/irq-mmp2.c +++ b/arch/arm/mach-mmp/irq-mmp2.c | |||
@@ -110,9 +110,9 @@ static void init_mux_irq(struct irq_chip *chip, int start, int num) | |||
110 | if (chip->irq_ack) | 110 | if (chip->irq_ack) |
111 | chip->irq_ack(d); | 111 | chip->irq_ack(d); |
112 | 112 | ||
113 | set_irq_chip(irq, chip); | 113 | irq_set_chip(irq, chip); |
114 | set_irq_flags(irq, IRQF_VALID); | 114 | set_irq_flags(irq, IRQF_VALID); |
115 | set_irq_handler(irq, handle_level_irq); | 115 | irq_set_handler(irq, handle_level_irq); |
116 | } | 116 | } |
117 | } | 117 | } |
118 | 118 | ||
@@ -122,7 +122,7 @@ void __init mmp2_init_icu(void) | |||
122 | 122 | ||
123 | for (irq = 0; irq < IRQ_MMP2_MUX_BASE; irq++) { | 123 | for (irq = 0; irq < IRQ_MMP2_MUX_BASE; irq++) { |
124 | icu_mask_irq(irq_get_irq_data(irq)); | 124 | icu_mask_irq(irq_get_irq_data(irq)); |
125 | set_irq_chip(irq, &icu_irq_chip); | 125 | irq_set_chip(irq, &icu_irq_chip); |
126 | set_irq_flags(irq, IRQF_VALID); | 126 | set_irq_flags(irq, IRQF_VALID); |
127 | 127 | ||
128 | switch (irq) { | 128 | switch (irq) { |
@@ -133,7 +133,7 @@ void __init mmp2_init_icu(void) | |||
133 | case IRQ_MMP2_SSP_MUX: | 133 | case IRQ_MMP2_SSP_MUX: |
134 | break; | 134 | break; |
135 | default: | 135 | default: |
136 | set_irq_handler(irq, handle_level_irq); | 136 | irq_set_handler(irq, handle_level_irq); |
137 | break; | 137 | break; |
138 | } | 138 | } |
139 | } | 139 | } |
@@ -149,9 +149,9 @@ void __init mmp2_init_icu(void) | |||
149 | init_mux_irq(&misc_irq_chip, IRQ_MMP2_MISC_BASE, 15); | 149 | init_mux_irq(&misc_irq_chip, IRQ_MMP2_MISC_BASE, 15); |
150 | init_mux_irq(&ssp_irq_chip, IRQ_MMP2_SSP_BASE, 2); | 150 | init_mux_irq(&ssp_irq_chip, IRQ_MMP2_SSP_BASE, 2); |
151 | 151 | ||
152 | set_irq_chained_handler(IRQ_MMP2_PMIC_MUX, pmic_irq_demux); | 152 | irq_set_chained_handler(IRQ_MMP2_PMIC_MUX, pmic_irq_demux); |
153 | set_irq_chained_handler(IRQ_MMP2_RTC_MUX, rtc_irq_demux); | 153 | irq_set_chained_handler(IRQ_MMP2_RTC_MUX, rtc_irq_demux); |
154 | set_irq_chained_handler(IRQ_MMP2_TWSI_MUX, twsi_irq_demux); | 154 | irq_set_chained_handler(IRQ_MMP2_TWSI_MUX, twsi_irq_demux); |
155 | set_irq_chained_handler(IRQ_MMP2_MISC_MUX, misc_irq_demux); | 155 | irq_set_chained_handler(IRQ_MMP2_MISC_MUX, misc_irq_demux); |
156 | set_irq_chained_handler(IRQ_MMP2_SSP_MUX, ssp_irq_demux); | 156 | irq_set_chained_handler(IRQ_MMP2_SSP_MUX, ssp_irq_demux); |
157 | } | 157 | } |
diff --git a/arch/arm/mach-mmp/irq-pxa168.c b/arch/arm/mach-mmp/irq-pxa168.c index f86b450cb93c..89706a0d08f1 100644 --- a/arch/arm/mach-mmp/irq-pxa168.c +++ b/arch/arm/mach-mmp/irq-pxa168.c | |||
@@ -48,8 +48,7 @@ void __init icu_init_irq(void) | |||
48 | 48 | ||
49 | for (irq = 0; irq < 64; irq++) { | 49 | for (irq = 0; irq < 64; irq++) { |
50 | icu_mask_irq(irq_get_irq_data(irq)); | 50 | icu_mask_irq(irq_get_irq_data(irq)); |
51 | set_irq_chip(irq, &icu_irq_chip); | 51 | irq_set_chip_and_handler(irq, &icu_irq_chip, handle_level_irq); |
52 | set_irq_handler(irq, handle_level_irq); | ||
53 | set_irq_flags(irq, IRQF_VALID); | 52 | set_irq_flags(irq, IRQF_VALID); |
54 | } | 53 | } |
55 | } | 54 | } |
diff --git a/arch/arm/mach-msm/board-msm8960.c b/arch/arm/mach-msm/board-msm8960.c index 1993721d472e..35c7ceeb3f29 100644 --- a/arch/arm/mach-msm/board-msm8960.c +++ b/arch/arm/mach-msm/board-msm8960.c | |||
@@ -53,7 +53,7 @@ static void __init msm8960_init_irq(void) | |||
53 | */ | 53 | */ |
54 | for (i = GIC_PPI_START; i < GIC_SPI_START; i++) { | 54 | for (i = GIC_PPI_START; i < GIC_SPI_START; i++) { |
55 | if (i != AVS_SVICINT && i != AVS_SVICINTSWDONE) | 55 | if (i != AVS_SVICINT && i != AVS_SVICINTSWDONE) |
56 | set_irq_handler(i, handle_percpu_irq); | 56 | irq_set_handler(i, handle_percpu_irq); |
57 | } | 57 | } |
58 | } | 58 | } |
59 | 59 | ||
diff --git a/arch/arm/mach-msm/board-msm8x60.c b/arch/arm/mach-msm/board-msm8x60.c index b3c55f138fce..1163b6fd05d2 100644 --- a/arch/arm/mach-msm/board-msm8x60.c +++ b/arch/arm/mach-msm/board-msm8x60.c | |||
@@ -56,7 +56,7 @@ static void __init msm8x60_init_irq(void) | |||
56 | */ | 56 | */ |
57 | for (i = GIC_PPI_START; i < GIC_SPI_START; i++) { | 57 | for (i = GIC_PPI_START; i < GIC_SPI_START; i++) { |
58 | if (i != AVS_SVICINT && i != AVS_SVICINTSWDONE) | 58 | if (i != AVS_SVICINT && i != AVS_SVICINTSWDONE) |
59 | set_irq_handler(i, handle_percpu_irq); | 59 | irq_set_handler(i, handle_percpu_irq); |
60 | } | 60 | } |
61 | } | 61 | } |
62 | 62 | ||
diff --git a/arch/arm/mach-msm/board-trout-gpio.c b/arch/arm/mach-msm/board-trout-gpio.c index 31117a4499c4..87e1d01edecc 100644 --- a/arch/arm/mach-msm/board-trout-gpio.c +++ b/arch/arm/mach-msm/board-trout-gpio.c | |||
@@ -214,17 +214,17 @@ int __init trout_init_gpio(void) | |||
214 | { | 214 | { |
215 | int i; | 215 | int i; |
216 | for(i = TROUT_INT_START; i <= TROUT_INT_END; i++) { | 216 | for(i = TROUT_INT_START; i <= TROUT_INT_END; i++) { |
217 | set_irq_chip(i, &trout_gpio_irq_chip); | 217 | irq_set_chip_and_handler(i, &trout_gpio_irq_chip, |
218 | set_irq_handler(i, handle_edge_irq); | 218 | handle_edge_irq); |
219 | set_irq_flags(i, IRQF_VALID); | 219 | set_irq_flags(i, IRQF_VALID); |
220 | } | 220 | } |
221 | 221 | ||
222 | for (i = 0; i < ARRAY_SIZE(msm_gpio_banks); i++) | 222 | for (i = 0; i < ARRAY_SIZE(msm_gpio_banks); i++) |
223 | gpiochip_add(&msm_gpio_banks[i].chip); | 223 | gpiochip_add(&msm_gpio_banks[i].chip); |
224 | 224 | ||
225 | set_irq_type(MSM_GPIO_TO_INT(17), IRQF_TRIGGER_HIGH); | 225 | irq_set_irq_type(MSM_GPIO_TO_INT(17), IRQF_TRIGGER_HIGH); |
226 | set_irq_chained_handler(MSM_GPIO_TO_INT(17), trout_gpio_irq_handler); | 226 | irq_set_chained_handler(MSM_GPIO_TO_INT(17), trout_gpio_irq_handler); |
227 | set_irq_wake(MSM_GPIO_TO_INT(17), 1); | 227 | irq_set_irq_wake(MSM_GPIO_TO_INT(17), 1); |
228 | 228 | ||
229 | return 0; | 229 | return 0; |
230 | } | 230 | } |
diff --git a/arch/arm/mach-msm/board-trout-mmc.c b/arch/arm/mach-msm/board-trout-mmc.c index 44be8464657b..f7a9724788b0 100644 --- a/arch/arm/mach-msm/board-trout-mmc.c +++ b/arch/arm/mach-msm/board-trout-mmc.c | |||
@@ -174,7 +174,7 @@ int __init trout_init_mmc(unsigned int sys_rev) | |||
174 | if (IS_ERR(vreg_sdslot)) | 174 | if (IS_ERR(vreg_sdslot)) |
175 | return PTR_ERR(vreg_sdslot); | 175 | return PTR_ERR(vreg_sdslot); |
176 | 176 | ||
177 | set_irq_wake(TROUT_GPIO_TO_INT(TROUT_GPIO_SDMC_CD_N), 1); | 177 | irq_set_irq_wake(TROUT_GPIO_TO_INT(TROUT_GPIO_SDMC_CD_N), 1); |
178 | 178 | ||
179 | if (!opt_disable_sdcard) | 179 | if (!opt_disable_sdcard) |
180 | msm_add_sdcc(2, &trout_sdslot_data, | 180 | msm_add_sdcc(2, &trout_sdslot_data, |
diff --git a/arch/arm/mach-msm/gpio-v2.c b/arch/arm/mach-msm/gpio-v2.c index 0de19ec74e34..56a964e52ad3 100644 --- a/arch/arm/mach-msm/gpio-v2.c +++ b/arch/arm/mach-msm/gpio-v2.c | |||
@@ -230,18 +230,18 @@ static void msm_gpio_update_dual_edge_pos(unsigned gpio) | |||
230 | val, val2); | 230 | val, val2); |
231 | } | 231 | } |
232 | 232 | ||
233 | static void msm_gpio_irq_ack(unsigned int irq) | 233 | static void msm_gpio_irq_ack(struct irq_data *d) |
234 | { | 234 | { |
235 | int gpio = msm_irq_to_gpio(&msm_gpio.gpio_chip, irq); | 235 | int gpio = msm_irq_to_gpio(&msm_gpio.gpio_chip, d->irq); |
236 | 236 | ||
237 | writel(BIT(INTR_STATUS), GPIO_INTR_STATUS(gpio)); | 237 | writel(BIT(INTR_STATUS), GPIO_INTR_STATUS(gpio)); |
238 | if (test_bit(gpio, msm_gpio.dual_edge_irqs)) | 238 | if (test_bit(gpio, msm_gpio.dual_edge_irqs)) |
239 | msm_gpio_update_dual_edge_pos(gpio); | 239 | msm_gpio_update_dual_edge_pos(gpio); |
240 | } | 240 | } |
241 | 241 | ||
242 | static void msm_gpio_irq_mask(unsigned int irq) | 242 | static void msm_gpio_irq_mask(struct irq_data *d) |
243 | { | 243 | { |
244 | int gpio = msm_irq_to_gpio(&msm_gpio.gpio_chip, irq); | 244 | int gpio = msm_irq_to_gpio(&msm_gpio.gpio_chip, d->irq); |
245 | unsigned long irq_flags; | 245 | unsigned long irq_flags; |
246 | 246 | ||
247 | spin_lock_irqsave(&tlmm_lock, irq_flags); | 247 | spin_lock_irqsave(&tlmm_lock, irq_flags); |
@@ -251,9 +251,9 @@ static void msm_gpio_irq_mask(unsigned int irq) | |||
251 | spin_unlock_irqrestore(&tlmm_lock, irq_flags); | 251 | spin_unlock_irqrestore(&tlmm_lock, irq_flags); |
252 | } | 252 | } |
253 | 253 | ||
254 | static void msm_gpio_irq_unmask(unsigned int irq) | 254 | static void msm_gpio_irq_unmask(struct irq_data *d) |
255 | { | 255 | { |
256 | int gpio = msm_irq_to_gpio(&msm_gpio.gpio_chip, irq); | 256 | int gpio = msm_irq_to_gpio(&msm_gpio.gpio_chip, d->irq); |
257 | unsigned long irq_flags; | 257 | unsigned long irq_flags; |
258 | 258 | ||
259 | spin_lock_irqsave(&tlmm_lock, irq_flags); | 259 | spin_lock_irqsave(&tlmm_lock, irq_flags); |
@@ -263,9 +263,9 @@ static void msm_gpio_irq_unmask(unsigned int irq) | |||
263 | spin_unlock_irqrestore(&tlmm_lock, irq_flags); | 263 | spin_unlock_irqrestore(&tlmm_lock, irq_flags); |
264 | } | 264 | } |
265 | 265 | ||
266 | static int msm_gpio_irq_set_type(unsigned int irq, unsigned int flow_type) | 266 | static int msm_gpio_irq_set_type(struct irq_data *d, unsigned int flow_type) |
267 | { | 267 | { |
268 | int gpio = msm_irq_to_gpio(&msm_gpio.gpio_chip, irq); | 268 | int gpio = msm_irq_to_gpio(&msm_gpio.gpio_chip, d->irq); |
269 | unsigned long irq_flags; | 269 | unsigned long irq_flags; |
270 | uint32_t bits; | 270 | uint32_t bits; |
271 | 271 | ||
@@ -275,14 +275,14 @@ static int msm_gpio_irq_set_type(unsigned int irq, unsigned int flow_type) | |||
275 | 275 | ||
276 | if (flow_type & IRQ_TYPE_EDGE_BOTH) { | 276 | if (flow_type & IRQ_TYPE_EDGE_BOTH) { |
277 | bits |= BIT(INTR_DECT_CTL); | 277 | bits |= BIT(INTR_DECT_CTL); |
278 | irq_desc[irq].handle_irq = handle_edge_irq; | 278 | __irq_set_handler_locked(d->irq, handle_edge_irq); |
279 | if ((flow_type & IRQ_TYPE_EDGE_BOTH) == IRQ_TYPE_EDGE_BOTH) | 279 | if ((flow_type & IRQ_TYPE_EDGE_BOTH) == IRQ_TYPE_EDGE_BOTH) |
280 | __set_bit(gpio, msm_gpio.dual_edge_irqs); | 280 | __set_bit(gpio, msm_gpio.dual_edge_irqs); |
281 | else | 281 | else |
282 | __clear_bit(gpio, msm_gpio.dual_edge_irqs); | 282 | __clear_bit(gpio, msm_gpio.dual_edge_irqs); |
283 | } else { | 283 | } else { |
284 | bits &= ~BIT(INTR_DECT_CTL); | 284 | bits &= ~BIT(INTR_DECT_CTL); |
285 | irq_desc[irq].handle_irq = handle_level_irq; | 285 | __irq_set_handler_locked(d->irq, handle_level_irq); |
286 | __clear_bit(gpio, msm_gpio.dual_edge_irqs); | 286 | __clear_bit(gpio, msm_gpio.dual_edge_irqs); |
287 | } | 287 | } |
288 | 288 | ||
@@ -309,6 +309,7 @@ static int msm_gpio_irq_set_type(unsigned int irq, unsigned int flow_type) | |||
309 | */ | 309 | */ |
310 | static void msm_summary_irq_handler(unsigned int irq, struct irq_desc *desc) | 310 | static void msm_summary_irq_handler(unsigned int irq, struct irq_desc *desc) |
311 | { | 311 | { |
312 | struct irq_data *data = irq_desc_get_irq_data(desc); | ||
312 | unsigned long i; | 313 | unsigned long i; |
313 | 314 | ||
314 | for (i = find_first_bit(msm_gpio.enabled_irqs, NR_GPIO_IRQS); | 315 | for (i = find_first_bit(msm_gpio.enabled_irqs, NR_GPIO_IRQS); |
@@ -318,21 +319,21 @@ static void msm_summary_irq_handler(unsigned int irq, struct irq_desc *desc) | |||
318 | generic_handle_irq(msm_gpio_to_irq(&msm_gpio.gpio_chip, | 319 | generic_handle_irq(msm_gpio_to_irq(&msm_gpio.gpio_chip, |
319 | i)); | 320 | i)); |
320 | } | 321 | } |
321 | desc->chip->ack(irq); | 322 | data->chip->irq_ack(data); |
322 | } | 323 | } |
323 | 324 | ||
324 | static int msm_gpio_irq_set_wake(unsigned int irq, unsigned int on) | 325 | static int msm_gpio_irq_set_wake(struct irq_data *d, unsigned int on) |
325 | { | 326 | { |
326 | int gpio = msm_irq_to_gpio(&msm_gpio.gpio_chip, irq); | 327 | int gpio = msm_irq_to_gpio(&msm_gpio.gpio_chip, d->irq); |
327 | 328 | ||
328 | if (on) { | 329 | if (on) { |
329 | if (bitmap_empty(msm_gpio.wake_irqs, NR_GPIO_IRQS)) | 330 | if (bitmap_empty(msm_gpio.wake_irqs, NR_GPIO_IRQS)) |
330 | set_irq_wake(TLMM_SCSS_SUMMARY_IRQ, 1); | 331 | irq_set_irq_wake(TLMM_SCSS_SUMMARY_IRQ, 1); |
331 | set_bit(gpio, msm_gpio.wake_irqs); | 332 | set_bit(gpio, msm_gpio.wake_irqs); |
332 | } else { | 333 | } else { |
333 | clear_bit(gpio, msm_gpio.wake_irqs); | 334 | clear_bit(gpio, msm_gpio.wake_irqs); |
334 | if (bitmap_empty(msm_gpio.wake_irqs, NR_GPIO_IRQS)) | 335 | if (bitmap_empty(msm_gpio.wake_irqs, NR_GPIO_IRQS)) |
335 | set_irq_wake(TLMM_SCSS_SUMMARY_IRQ, 0); | 336 | irq_set_irq_wake(TLMM_SCSS_SUMMARY_IRQ, 0); |
336 | } | 337 | } |
337 | 338 | ||
338 | return 0; | 339 | return 0; |
@@ -340,11 +341,11 @@ static int msm_gpio_irq_set_wake(unsigned int irq, unsigned int on) | |||
340 | 341 | ||
341 | static struct irq_chip msm_gpio_irq_chip = { | 342 | static struct irq_chip msm_gpio_irq_chip = { |
342 | .name = "msmgpio", | 343 | .name = "msmgpio", |
343 | .mask = msm_gpio_irq_mask, | 344 | .irq_mask = msm_gpio_irq_mask, |
344 | .unmask = msm_gpio_irq_unmask, | 345 | .irq_unmask = msm_gpio_irq_unmask, |
345 | .ack = msm_gpio_irq_ack, | 346 | .irq_ack = msm_gpio_irq_ack, |
346 | .set_type = msm_gpio_irq_set_type, | 347 | .irq_set_type = msm_gpio_irq_set_type, |
347 | .set_wake = msm_gpio_irq_set_wake, | 348 | .irq_set_wake = msm_gpio_irq_set_wake, |
348 | }; | 349 | }; |
349 | 350 | ||
350 | static int __devinit msm_gpio_probe(struct platform_device *dev) | 351 | static int __devinit msm_gpio_probe(struct platform_device *dev) |
@@ -361,12 +362,12 @@ static int __devinit msm_gpio_probe(struct platform_device *dev) | |||
361 | 362 | ||
362 | for (i = 0; i < msm_gpio.gpio_chip.ngpio; ++i) { | 363 | for (i = 0; i < msm_gpio.gpio_chip.ngpio; ++i) { |
363 | irq = msm_gpio_to_irq(&msm_gpio.gpio_chip, i); | 364 | irq = msm_gpio_to_irq(&msm_gpio.gpio_chip, i); |
364 | set_irq_chip(irq, &msm_gpio_irq_chip); | 365 | irq_set_chip_and_handler(irq, &msm_gpio_irq_chip, |
365 | set_irq_handler(irq, handle_level_irq); | 366 | handle_level_irq); |
366 | set_irq_flags(irq, IRQF_VALID); | 367 | set_irq_flags(irq, IRQF_VALID); |
367 | } | 368 | } |
368 | 369 | ||
369 | set_irq_chained_handler(TLMM_SCSS_SUMMARY_IRQ, | 370 | irq_set_chained_handler(TLMM_SCSS_SUMMARY_IRQ, |
370 | msm_summary_irq_handler); | 371 | msm_summary_irq_handler); |
371 | return 0; | 372 | return 0; |
372 | } | 373 | } |
@@ -378,7 +379,7 @@ static int __devexit msm_gpio_remove(struct platform_device *dev) | |||
378 | if (ret < 0) | 379 | if (ret < 0) |
379 | return ret; | 380 | return ret; |
380 | 381 | ||
381 | set_irq_handler(TLMM_SCSS_SUMMARY_IRQ, NULL); | 382 | irq_set_handler(TLMM_SCSS_SUMMARY_IRQ, NULL); |
382 | 383 | ||
383 | return 0; | 384 | return 0; |
384 | } | 385 | } |
diff --git a/arch/arm/mach-msm/gpio.c b/arch/arm/mach-msm/gpio.c index 176af9dcb8ee..5ea273b00da8 100644 --- a/arch/arm/mach-msm/gpio.c +++ b/arch/arm/mach-msm/gpio.c | |||
@@ -293,10 +293,10 @@ static int msm_gpio_irq_set_type(struct irq_data *d, unsigned int flow_type) | |||
293 | val = readl(msm_chip->regs.int_edge); | 293 | val = readl(msm_chip->regs.int_edge); |
294 | if (flow_type & IRQ_TYPE_EDGE_BOTH) { | 294 | if (flow_type & IRQ_TYPE_EDGE_BOTH) { |
295 | writel(val | mask, msm_chip->regs.int_edge); | 295 | writel(val | mask, msm_chip->regs.int_edge); |
296 | irq_desc[d->irq].handle_irq = handle_edge_irq; | 296 | __irq_set_handler_locked(d->irq, handle_edge_irq); |
297 | } else { | 297 | } else { |
298 | writel(val & ~mask, msm_chip->regs.int_edge); | 298 | writel(val & ~mask, msm_chip->regs.int_edge); |
299 | irq_desc[d->irq].handle_irq = handle_level_irq; | 299 | __irq_set_handler_locked(d->irq, handle_level_irq); |
300 | } | 300 | } |
301 | if ((flow_type & IRQ_TYPE_EDGE_BOTH) == IRQ_TYPE_EDGE_BOTH) { | 301 | if ((flow_type & IRQ_TYPE_EDGE_BOTH) == IRQ_TYPE_EDGE_BOTH) { |
302 | msm_chip->both_edge_detect |= mask; | 302 | msm_chip->both_edge_detect |= mask; |
@@ -354,9 +354,9 @@ static int __init msm_init_gpio(void) | |||
354 | msm_gpio_chips[j].chip.base + | 354 | msm_gpio_chips[j].chip.base + |
355 | msm_gpio_chips[j].chip.ngpio) | 355 | msm_gpio_chips[j].chip.ngpio) |
356 | j++; | 356 | j++; |
357 | set_irq_chip_data(i, &msm_gpio_chips[j]); | 357 | irq_set_chip_data(i, &msm_gpio_chips[j]); |
358 | set_irq_chip(i, &msm_gpio_irq_chip); | 358 | irq_set_chip_and_handler(i, &msm_gpio_irq_chip, |
359 | set_irq_handler(i, handle_edge_irq); | 359 | handle_edge_irq); |
360 | set_irq_flags(i, IRQF_VALID); | 360 | set_irq_flags(i, IRQF_VALID); |
361 | } | 361 | } |
362 | 362 | ||
@@ -366,10 +366,10 @@ static int __init msm_init_gpio(void) | |||
366 | gpiochip_add(&msm_gpio_chips[i].chip); | 366 | gpiochip_add(&msm_gpio_chips[i].chip); |
367 | } | 367 | } |
368 | 368 | ||
369 | set_irq_chained_handler(INT_GPIO_GROUP1, msm_gpio_irq_handler); | 369 | irq_set_chained_handler(INT_GPIO_GROUP1, msm_gpio_irq_handler); |
370 | set_irq_chained_handler(INT_GPIO_GROUP2, msm_gpio_irq_handler); | 370 | irq_set_chained_handler(INT_GPIO_GROUP2, msm_gpio_irq_handler); |
371 | set_irq_wake(INT_GPIO_GROUP1, 1); | 371 | irq_set_irq_wake(INT_GPIO_GROUP1, 1); |
372 | set_irq_wake(INT_GPIO_GROUP2, 2); | 372 | irq_set_irq_wake(INT_GPIO_GROUP2, 2); |
373 | return 0; | 373 | return 0; |
374 | } | 374 | } |
375 | 375 | ||
diff --git a/arch/arm/mach-msm/irq-vic.c b/arch/arm/mach-msm/irq-vic.c index 68c28bbdc969..1b54f807c2d0 100644 --- a/arch/arm/mach-msm/irq-vic.c +++ b/arch/arm/mach-msm/irq-vic.c | |||
@@ -313,11 +313,11 @@ static int msm_irq_set_type(struct irq_data *d, unsigned int flow_type) | |||
313 | type = msm_irq_shadow_reg[index].int_type; | 313 | type = msm_irq_shadow_reg[index].int_type; |
314 | if (flow_type & (IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING)) { | 314 | if (flow_type & (IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING)) { |
315 | type |= b; | 315 | type |= b; |
316 | irq_desc[d->irq].handle_irq = handle_edge_irq; | 316 | __irq_set_handler_locked(d->irq, handle_edge_irq); |
317 | } | 317 | } |
318 | if (flow_type & (IRQF_TRIGGER_HIGH | IRQF_TRIGGER_LOW)) { | 318 | if (flow_type & (IRQF_TRIGGER_HIGH | IRQF_TRIGGER_LOW)) { |
319 | type &= ~b; | 319 | type &= ~b; |
320 | irq_desc[d->irq].handle_irq = handle_level_irq; | 320 | __irq_set_handler_locked(d->irq, handle_level_irq); |
321 | } | 321 | } |
322 | writel(type, treg); | 322 | writel(type, treg); |
323 | msm_irq_shadow_reg[index].int_type = type; | 323 | msm_irq_shadow_reg[index].int_type = type; |
@@ -357,8 +357,7 @@ void __init msm_init_irq(void) | |||
357 | writel(3, VIC_INT_MASTEREN); | 357 | writel(3, VIC_INT_MASTEREN); |
358 | 358 | ||
359 | for (n = 0; n < NR_MSM_IRQS; n++) { | 359 | for (n = 0; n < NR_MSM_IRQS; n++) { |
360 | set_irq_chip(n, &msm_irq_chip); | 360 | irq_set_chip_and_handler(n, &msm_irq_chip, handle_level_irq); |
361 | set_irq_handler(n, handle_level_irq); | ||
362 | set_irq_flags(n, IRQF_VALID); | 361 | set_irq_flags(n, IRQF_VALID); |
363 | } | 362 | } |
364 | } | 363 | } |
diff --git a/arch/arm/mach-msm/irq.c b/arch/arm/mach-msm/irq.c index 0b27d899f40e..ea514be390c6 100644 --- a/arch/arm/mach-msm/irq.c +++ b/arch/arm/mach-msm/irq.c | |||
@@ -100,11 +100,11 @@ static int msm_irq_set_type(struct irq_data *d, unsigned int flow_type) | |||
100 | 100 | ||
101 | if (flow_type & (IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING)) { | 101 | if (flow_type & (IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING)) { |
102 | writel(readl(treg) | b, treg); | 102 | writel(readl(treg) | b, treg); |
103 | irq_desc[d->irq].handle_irq = handle_edge_irq; | 103 | __irq_set_handler_locked(d->irq, handle_edge_irq); |
104 | } | 104 | } |
105 | if (flow_type & (IRQF_TRIGGER_HIGH | IRQF_TRIGGER_LOW)) { | 105 | if (flow_type & (IRQF_TRIGGER_HIGH | IRQF_TRIGGER_LOW)) { |
106 | writel(readl(treg) & (~b), treg); | 106 | writel(readl(treg) & (~b), treg); |
107 | irq_desc[d->irq].handle_irq = handle_level_irq; | 107 | __irq_set_handler_locked(d->irq, handle_level_irq); |
108 | } | 108 | } |
109 | return 0; | 109 | return 0; |
110 | } | 110 | } |
@@ -145,8 +145,7 @@ void __init msm_init_irq(void) | |||
145 | writel(1, VIC_INT_MASTEREN); | 145 | writel(1, VIC_INT_MASTEREN); |
146 | 146 | ||
147 | for (n = 0; n < NR_MSM_IRQS; n++) { | 147 | for (n = 0; n < NR_MSM_IRQS; n++) { |
148 | set_irq_chip(n, &msm_irq_chip); | 148 | irq_set_chip_and_handler(n, &msm_irq_chip, handle_level_irq); |
149 | set_irq_handler(n, handle_level_irq); | ||
150 | set_irq_flags(n, IRQF_VALID); | 149 | set_irq_flags(n, IRQF_VALID); |
151 | } | 150 | } |
152 | } | 151 | } |
diff --git a/arch/arm/mach-msm/sirc.c b/arch/arm/mach-msm/sirc.c index 11b54c7aeb09..689e78c95f38 100644 --- a/arch/arm/mach-msm/sirc.c +++ b/arch/arm/mach-msm/sirc.c | |||
@@ -105,10 +105,10 @@ static int sirc_irq_set_type(struct irq_data *d, unsigned int flow_type) | |||
105 | val = readl(sirc_regs.int_type); | 105 | val = readl(sirc_regs.int_type); |
106 | if (flow_type & (IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING)) { | 106 | if (flow_type & (IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING)) { |
107 | val |= mask; | 107 | val |= mask; |
108 | irq_desc[d->irq].handle_irq = handle_edge_irq; | 108 | __irq_set_handler_locked(d->irq, handle_edge_irq); |
109 | } else { | 109 | } else { |
110 | val &= ~mask; | 110 | val &= ~mask; |
111 | irq_desc[d->irq].handle_irq = handle_level_irq; | 111 | __irq_set_handler_locked(d->irq, handle_level_irq); |
112 | } | 112 | } |
113 | 113 | ||
114 | writel(val, sirc_regs.int_type); | 114 | writel(val, sirc_regs.int_type); |
@@ -158,15 +158,14 @@ void __init msm_init_sirc(void) | |||
158 | wake_enable = 0; | 158 | wake_enable = 0; |
159 | 159 | ||
160 | for (i = FIRST_SIRC_IRQ; i < LAST_SIRC_IRQ; i++) { | 160 | for (i = FIRST_SIRC_IRQ; i < LAST_SIRC_IRQ; i++) { |
161 | set_irq_chip(i, &sirc_irq_chip); | 161 | irq_set_chip_and_handler(i, &sirc_irq_chip, handle_edge_irq); |
162 | set_irq_handler(i, handle_edge_irq); | ||
163 | set_irq_flags(i, IRQF_VALID); | 162 | set_irq_flags(i, IRQF_VALID); |
164 | } | 163 | } |
165 | 164 | ||
166 | for (i = 0; i < ARRAY_SIZE(sirc_reg_table); i++) { | 165 | for (i = 0; i < ARRAY_SIZE(sirc_reg_table); i++) { |
167 | set_irq_chained_handler(sirc_reg_table[i].cascade_irq, | 166 | irq_set_chained_handler(sirc_reg_table[i].cascade_irq, |
168 | sirc_irq_handler); | 167 | sirc_irq_handler); |
169 | set_irq_wake(sirc_reg_table[i].cascade_irq, 1); | 168 | irq_set_irq_wake(sirc_reg_table[i].cascade_irq, 1); |
170 | } | 169 | } |
171 | return; | 170 | return; |
172 | } | 171 | } |
diff --git a/arch/arm/mach-mv78xx0/irq.c b/arch/arm/mach-mv78xx0/irq.c index 08da497c39c2..3e24431bb5ea 100644 --- a/arch/arm/mach-mv78xx0/irq.c +++ b/arch/arm/mach-mv78xx0/irq.c | |||
@@ -38,8 +38,8 @@ void __init mv78xx0_init_irq(void) | |||
38 | orion_gpio_init(0, 32, GPIO_VIRT_BASE, | 38 | orion_gpio_init(0, 32, GPIO_VIRT_BASE, |
39 | mv78xx0_core_index() ? 0x18 : 0, | 39 | mv78xx0_core_index() ? 0x18 : 0, |
40 | IRQ_MV78XX0_GPIO_START); | 40 | IRQ_MV78XX0_GPIO_START); |
41 | set_irq_chained_handler(IRQ_MV78XX0_GPIO_0_7, gpio_irq_handler); | 41 | irq_set_chained_handler(IRQ_MV78XX0_GPIO_0_7, gpio_irq_handler); |
42 | set_irq_chained_handler(IRQ_MV78XX0_GPIO_8_15, gpio_irq_handler); | 42 | irq_set_chained_handler(IRQ_MV78XX0_GPIO_8_15, gpio_irq_handler); |
43 | set_irq_chained_handler(IRQ_MV78XX0_GPIO_16_23, gpio_irq_handler); | 43 | irq_set_chained_handler(IRQ_MV78XX0_GPIO_16_23, gpio_irq_handler); |
44 | set_irq_chained_handler(IRQ_MV78XX0_GPIO_24_31, gpio_irq_handler); | 44 | irq_set_chained_handler(IRQ_MV78XX0_GPIO_24_31, gpio_irq_handler); |
45 | } | 45 | } |
diff --git a/arch/arm/mach-mx3/mach-mx31ads.c b/arch/arm/mach-mx3/mach-mx31ads.c index 4e4b780c481d..3d095d69bc68 100644 --- a/arch/arm/mach-mx3/mach-mx31ads.c +++ b/arch/arm/mach-mx3/mach-mx31ads.c | |||
@@ -199,12 +199,11 @@ static void __init mx31ads_init_expio(void) | |||
199 | __raw_writew(0xFFFF, PBC_INTSTATUS_REG); | 199 | __raw_writew(0xFFFF, PBC_INTSTATUS_REG); |
200 | for (i = MXC_EXP_IO_BASE; i < (MXC_EXP_IO_BASE + MXC_MAX_EXP_IO_LINES); | 200 | for (i = MXC_EXP_IO_BASE; i < (MXC_EXP_IO_BASE + MXC_MAX_EXP_IO_LINES); |
201 | i++) { | 201 | i++) { |
202 | set_irq_chip(i, &expio_irq_chip); | 202 | irq_set_chip_and_handler(i, &expio_irq_chip, handle_level_irq); |
203 | set_irq_handler(i, handle_level_irq); | ||
204 | set_irq_flags(i, IRQF_VALID); | 203 | set_irq_flags(i, IRQF_VALID); |
205 | } | 204 | } |
206 | set_irq_type(EXPIO_PARENT_INT, IRQ_TYPE_LEVEL_HIGH); | 205 | irq_set_irq_type(EXPIO_PARENT_INT, IRQ_TYPE_LEVEL_HIGH); |
207 | set_irq_chained_handler(EXPIO_PARENT_INT, mx31ads_expio_irq_handler); | 206 | irq_set_chained_handler(EXPIO_PARENT_INT, mx31ads_expio_irq_handler); |
208 | } | 207 | } |
209 | 208 | ||
210 | #ifdef CONFIG_MACH_MX31ADS_WM1133_EV1 | 209 | #ifdef CONFIG_MACH_MX31ADS_WM1133_EV1 |
diff --git a/arch/arm/mach-mx5/eukrea_mbimx51-baseboard.c b/arch/arm/mach-mx5/eukrea_mbimx51-baseboard.c index e83ffadb65f8..4a8550529b04 100644 --- a/arch/arm/mach-mx5/eukrea_mbimx51-baseboard.c +++ b/arch/arm/mach-mx5/eukrea_mbimx51-baseboard.c | |||
@@ -212,7 +212,7 @@ void __init eukrea_mbimx51_baseboard_init(void) | |||
212 | 212 | ||
213 | gpio_request(MBIMX51_TSC2007_GPIO, "tsc2007_irq"); | 213 | gpio_request(MBIMX51_TSC2007_GPIO, "tsc2007_irq"); |
214 | gpio_direction_input(MBIMX51_TSC2007_GPIO); | 214 | gpio_direction_input(MBIMX51_TSC2007_GPIO); |
215 | set_irq_type(MBIMX51_TSC2007_IRQ, IRQF_TRIGGER_FALLING); | 215 | irq_set_irq_type(MBIMX51_TSC2007_IRQ, IRQF_TRIGGER_FALLING); |
216 | i2c_register_board_info(1, mbimx51_i2c_devices, | 216 | i2c_register_board_info(1, mbimx51_i2c_devices, |
217 | ARRAY_SIZE(mbimx51_i2c_devices)); | 217 | ARRAY_SIZE(mbimx51_i2c_devices)); |
218 | 218 | ||
diff --git a/arch/arm/mach-mx5/mx51_efika.c b/arch/arm/mach-mx5/mx51_efika.c index 51a67fc7f0ef..868af8f435fa 100644 --- a/arch/arm/mach-mx5/mx51_efika.c +++ b/arch/arm/mach-mx5/mx51_efika.c | |||
@@ -572,8 +572,10 @@ static struct mc13xxx_regulator_init_data mx51_efika_regulators[] = { | |||
572 | 572 | ||
573 | static struct mc13xxx_platform_data mx51_efika_mc13892_data = { | 573 | static struct mc13xxx_platform_data mx51_efika_mc13892_data = { |
574 | .flags = MC13XXX_USE_RTC | MC13XXX_USE_REGULATOR, | 574 | .flags = MC13XXX_USE_RTC | MC13XXX_USE_REGULATOR, |
575 | .num_regulators = ARRAY_SIZE(mx51_efika_regulators), | 575 | .regulators = { |
576 | .regulators = mx51_efika_regulators, | 576 | .num_regulators = ARRAY_SIZE(mx51_efika_regulators), |
577 | .regulators = mx51_efika_regulators, | ||
578 | }, | ||
577 | }; | 579 | }; |
578 | 580 | ||
579 | static struct spi_board_info mx51_efika_spi_board_info[] __initdata = { | 581 | static struct spi_board_info mx51_efika_spi_board_info[] __initdata = { |
diff --git a/arch/arm/mach-mxs/gpio.c b/arch/arm/mach-mxs/gpio.c index 56fa2ed15222..2c950fef71a8 100644 --- a/arch/arm/mach-mxs/gpio.c +++ b/arch/arm/mach-mxs/gpio.c | |||
@@ -136,7 +136,7 @@ static int mxs_gpio_set_irq_type(struct irq_data *d, unsigned int type) | |||
136 | static void mxs_gpio_irq_handler(u32 irq, struct irq_desc *desc) | 136 | static void mxs_gpio_irq_handler(u32 irq, struct irq_desc *desc) |
137 | { | 137 | { |
138 | u32 irq_stat; | 138 | u32 irq_stat; |
139 | struct mxs_gpio_port *port = (struct mxs_gpio_port *)get_irq_data(irq); | 139 | struct mxs_gpio_port *port = (struct mxs_gpio_port *)irq_get_handler_data(irq); |
140 | u32 gpio_irq_no_base = port->virtual_irq_start; | 140 | u32 gpio_irq_no_base = port->virtual_irq_start; |
141 | 141 | ||
142 | desc->irq_data.chip->irq_ack(&desc->irq_data); | 142 | desc->irq_data.chip->irq_ack(&desc->irq_data); |
@@ -265,14 +265,14 @@ int __init mxs_gpio_init(struct mxs_gpio_port *port, int cnt) | |||
265 | 265 | ||
266 | for (j = port[i].virtual_irq_start; | 266 | for (j = port[i].virtual_irq_start; |
267 | j < port[i].virtual_irq_start + 32; j++) { | 267 | j < port[i].virtual_irq_start + 32; j++) { |
268 | set_irq_chip(j, &gpio_irq_chip); | 268 | irq_set_chip_and_handler(j, &gpio_irq_chip, |
269 | set_irq_handler(j, handle_level_irq); | 269 | handle_level_irq); |
270 | set_irq_flags(j, IRQF_VALID); | 270 | set_irq_flags(j, IRQF_VALID); |
271 | } | 271 | } |
272 | 272 | ||
273 | /* setup one handler for each entry */ | 273 | /* setup one handler for each entry */ |
274 | set_irq_chained_handler(port[i].irq, mxs_gpio_irq_handler); | 274 | irq_set_chained_handler(port[i].irq, mxs_gpio_irq_handler); |
275 | set_irq_data(port[i].irq, &port[i]); | 275 | irq_set_handler_data(port[i].irq, &port[i]); |
276 | 276 | ||
277 | /* register gpio chip */ | 277 | /* register gpio chip */ |
278 | port[i].chip.direction_input = mxs_gpio_direction_input; | 278 | port[i].chip.direction_input = mxs_gpio_direction_input; |
diff --git a/arch/arm/mach-mxs/icoll.c b/arch/arm/mach-mxs/icoll.c index 0f4c120fc169..23ca9d083b2c 100644 --- a/arch/arm/mach-mxs/icoll.c +++ b/arch/arm/mach-mxs/icoll.c | |||
@@ -74,8 +74,7 @@ void __init icoll_init_irq(void) | |||
74 | mxs_reset_block(icoll_base + HW_ICOLL_CTRL); | 74 | mxs_reset_block(icoll_base + HW_ICOLL_CTRL); |
75 | 75 | ||
76 | for (i = 0; i < MXS_INTERNAL_IRQS; i++) { | 76 | for (i = 0; i < MXS_INTERNAL_IRQS; i++) { |
77 | set_irq_chip(i, &mxs_icoll_chip); | 77 | irq_set_chip_and_handler(i, &mxs_icoll_chip, handle_level_irq); |
78 | set_irq_handler(i, handle_level_irq); | ||
79 | set_irq_flags(i, IRQF_VALID); | 78 | set_irq_flags(i, IRQF_VALID); |
80 | } | 79 | } |
81 | } | 80 | } |
diff --git a/arch/arm/mach-netx/generic.c b/arch/arm/mach-netx/generic.c index 29ffa750fbe6..00023b5cf12b 100644 --- a/arch/arm/mach-netx/generic.c +++ b/arch/arm/mach-netx/generic.c | |||
@@ -171,13 +171,13 @@ void __init netx_init_irq(void) | |||
171 | vic_init(__io(io_p2v(NETX_PA_VIC)), 0, ~0, 0); | 171 | vic_init(__io(io_p2v(NETX_PA_VIC)), 0, ~0, 0); |
172 | 172 | ||
173 | for (irq = NETX_IRQ_HIF_CHAINED(0); irq <= NETX_IRQ_HIF_LAST; irq++) { | 173 | for (irq = NETX_IRQ_HIF_CHAINED(0); irq <= NETX_IRQ_HIF_LAST; irq++) { |
174 | set_irq_chip(irq, &netx_hif_chip); | 174 | irq_set_chip_and_handler(irq, &netx_hif_chip, |
175 | set_irq_handler(irq, handle_level_irq); | 175 | handle_level_irq); |
176 | set_irq_flags(irq, IRQF_VALID); | 176 | set_irq_flags(irq, IRQF_VALID); |
177 | } | 177 | } |
178 | 178 | ||
179 | writel(NETX_DPMAS_INT_EN_GLB_EN, NETX_DPMAS_INT_EN); | 179 | writel(NETX_DPMAS_INT_EN_GLB_EN, NETX_DPMAS_INT_EN); |
180 | set_irq_chained_handler(NETX_IRQ_HIF, netx_hif_demux_handler); | 180 | irq_set_chained_handler(NETX_IRQ_HIF, netx_hif_demux_handler); |
181 | } | 181 | } |
182 | 182 | ||
183 | static int __init netx_init(void) | 183 | static int __init netx_init(void) |
diff --git a/arch/arm/mach-ns9xxx/board-a9m9750dev.c b/arch/arm/mach-ns9xxx/board-a9m9750dev.c index 0c0d5248c368..e27687d53504 100644 --- a/arch/arm/mach-ns9xxx/board-a9m9750dev.c +++ b/arch/arm/mach-ns9xxx/board-a9m9750dev.c | |||
@@ -107,8 +107,8 @@ void __init board_a9m9750dev_init_irq(void) | |||
107 | __func__); | 107 | __func__); |
108 | 108 | ||
109 | for (i = FPGA_IRQ(0); i <= FPGA_IRQ(7); ++i) { | 109 | for (i = FPGA_IRQ(0); i <= FPGA_IRQ(7); ++i) { |
110 | set_irq_chip(i, &a9m9750dev_fpga_chip); | 110 | irq_set_chip_and_handler(i, &a9m9750dev_fpga_chip, |
111 | set_irq_handler(i, handle_level_irq); | 111 | handle_level_irq); |
112 | set_irq_flags(i, IRQF_VALID); | 112 | set_irq_flags(i, IRQF_VALID); |
113 | } | 113 | } |
114 | 114 | ||
@@ -118,8 +118,8 @@ void __init board_a9m9750dev_init_irq(void) | |||
118 | REGSET(eic, SYS_EIC, LVEDG, LEVEL); | 118 | REGSET(eic, SYS_EIC, LVEDG, LEVEL); |
119 | __raw_writel(eic, SYS_EIC(2)); | 119 | __raw_writel(eic, SYS_EIC(2)); |
120 | 120 | ||
121 | set_irq_chained_handler(IRQ_NS9XXX_EXT2, | 121 | irq_set_chained_handler(IRQ_NS9XXX_EXT2, |
122 | a9m9750dev_fpga_demux_handler); | 122 | a9m9750dev_fpga_demux_handler); |
123 | } | 123 | } |
124 | 124 | ||
125 | void __init board_a9m9750dev_init_machine(void) | 125 | void __init board_a9m9750dev_init_machine(void) |
diff --git a/arch/arm/mach-ns9xxx/include/mach/board.h b/arch/arm/mach-ns9xxx/include/mach/board.h index f7e9196eb9ab..19ca6de46a45 100644 --- a/arch/arm/mach-ns9xxx/include/mach/board.h +++ b/arch/arm/mach-ns9xxx/include/mach/board.h | |||
@@ -14,12 +14,10 @@ | |||
14 | #include <asm/mach-types.h> | 14 | #include <asm/mach-types.h> |
15 | 15 | ||
16 | #define board_is_a9m9750dev() (0 \ | 16 | #define board_is_a9m9750dev() (0 \ |
17 | || machine_is_cc9p9360dev() \ | ||
18 | || machine_is_cc9p9750dev() \ | 17 | || machine_is_cc9p9750dev() \ |
19 | ) | 18 | ) |
20 | 19 | ||
21 | #define board_is_a9mvali() (0 \ | 20 | #define board_is_a9mvali() (0 \ |
22 | || machine_is_cc9p9360val() \ | ||
23 | || machine_is_cc9p9750val() \ | 21 | || machine_is_cc9p9750val() \ |
24 | ) | 22 | ) |
25 | 23 | ||
diff --git a/arch/arm/mach-ns9xxx/include/mach/module.h b/arch/arm/mach-ns9xxx/include/mach/module.h index f851a6b7da6c..628e9752589b 100644 --- a/arch/arm/mach-ns9xxx/include/mach/module.h +++ b/arch/arm/mach-ns9xxx/include/mach/module.h | |||
@@ -18,7 +18,6 @@ | |||
18 | ) | 18 | ) |
19 | 19 | ||
20 | #define module_is_cc9c() (0 \ | 20 | #define module_is_cc9c() (0 \ |
21 | || machine_is_cc9c() \ | ||
22 | ) | 21 | ) |
23 | 22 | ||
24 | #define module_is_cc9p9210() (0 \ | 23 | #define module_is_cc9p9210() (0 \ |
@@ -32,21 +31,17 @@ | |||
32 | ) | 31 | ) |
33 | 32 | ||
34 | #define module_is_cc9p9360() (0 \ | 33 | #define module_is_cc9p9360() (0 \ |
35 | || machine_is_a9m9360() \ | ||
36 | || machine_is_cc9p9360dev() \ | 34 | || machine_is_cc9p9360dev() \ |
37 | || machine_is_cc9p9360js() \ | 35 | || machine_is_cc9p9360js() \ |
38 | || machine_is_cc9p9360val() \ | ||
39 | ) | 36 | ) |
40 | 37 | ||
41 | #define module_is_cc9p9750() (0 \ | 38 | #define module_is_cc9p9750() (0 \ |
42 | || machine_is_a9m9750() \ | 39 | || machine_is_a9m9750() \ |
43 | || machine_is_cc9p9750dev() \ | ||
44 | || machine_is_cc9p9750js() \ | 40 | || machine_is_cc9p9750js() \ |
45 | || machine_is_cc9p9750val() \ | 41 | || machine_is_cc9p9750val() \ |
46 | ) | 42 | ) |
47 | 43 | ||
48 | #define module_is_ccw9c() (0 \ | 44 | #define module_is_ccw9c() (0 \ |
49 | || machine_is_ccw9c() \ | ||
50 | ) | 45 | ) |
51 | 46 | ||
52 | #define module_is_inc20otter() (0 \ | 47 | #define module_is_inc20otter() (0 \ |
diff --git a/arch/arm/mach-ns9xxx/irq.c b/arch/arm/mach-ns9xxx/irq.c index 389fa5c669de..37ab0a2b83ad 100644 --- a/arch/arm/mach-ns9xxx/irq.c +++ b/arch/arm/mach-ns9xxx/irq.c | |||
@@ -31,17 +31,11 @@ static void ns9xxx_mask_irq(struct irq_data *d) | |||
31 | __raw_writel(ic, SYS_IC(prio / 4)); | 31 | __raw_writel(ic, SYS_IC(prio / 4)); |
32 | } | 32 | } |
33 | 33 | ||
34 | static void ns9xxx_ack_irq(struct irq_data *d) | 34 | static void ns9xxx_eoi_irq(struct irq_data *d) |
35 | { | 35 | { |
36 | __raw_writel(0, SYS_ISRADDR); | 36 | __raw_writel(0, SYS_ISRADDR); |
37 | } | 37 | } |
38 | 38 | ||
39 | static void ns9xxx_maskack_irq(struct irq_data *d) | ||
40 | { | ||
41 | ns9xxx_mask_irq(d); | ||
42 | ns9xxx_ack_irq(d); | ||
43 | } | ||
44 | |||
45 | static void ns9xxx_unmask_irq(struct irq_data *d) | 39 | static void ns9xxx_unmask_irq(struct irq_data *d) |
46 | { | 40 | { |
47 | /* XXX: better use cpp symbols */ | 41 | /* XXX: better use cpp symbols */ |
@@ -52,56 +46,11 @@ static void ns9xxx_unmask_irq(struct irq_data *d) | |||
52 | } | 46 | } |
53 | 47 | ||
54 | static struct irq_chip ns9xxx_chip = { | 48 | static struct irq_chip ns9xxx_chip = { |
55 | .irq_ack = ns9xxx_ack_irq, | 49 | .irq_eoi = ns9xxx_eoi_irq, |
56 | .irq_mask = ns9xxx_mask_irq, | 50 | .irq_mask = ns9xxx_mask_irq, |
57 | .irq_mask_ack = ns9xxx_maskack_irq, | ||
58 | .irq_unmask = ns9xxx_unmask_irq, | 51 | .irq_unmask = ns9xxx_unmask_irq, |
59 | }; | 52 | }; |
60 | 53 | ||
61 | #if 0 | ||
62 | #define handle_irq handle_level_irq | ||
63 | #else | ||
64 | static void handle_prio_irq(unsigned int irq, struct irq_desc *desc) | ||
65 | { | ||
66 | struct irqaction *action; | ||
67 | irqreturn_t action_ret; | ||
68 | |||
69 | raw_spin_lock(&desc->lock); | ||
70 | |||
71 | BUG_ON(desc->status & IRQ_INPROGRESS); | ||
72 | |||
73 | desc->status &= ~(IRQ_REPLAY | IRQ_WAITING); | ||
74 | kstat_incr_irqs_this_cpu(irq, desc); | ||
75 | |||
76 | action = desc->action; | ||
77 | if (unlikely(!action || (desc->status & IRQ_DISABLED))) | ||
78 | goto out_mask; | ||
79 | |||
80 | desc->status |= IRQ_INPROGRESS; | ||
81 | raw_spin_unlock(&desc->lock); | ||
82 | |||
83 | action_ret = handle_IRQ_event(irq, action); | ||
84 | |||
85 | /* XXX: There is no direct way to access noirqdebug, so check | ||
86 | * unconditionally for spurious irqs... | ||
87 | * Maybe this function should go to kernel/irq/chip.c? */ | ||
88 | note_interrupt(irq, desc, action_ret); | ||
89 | |||
90 | raw_spin_lock(&desc->lock); | ||
91 | desc->status &= ~IRQ_INPROGRESS; | ||
92 | |||
93 | if (desc->status & IRQ_DISABLED) | ||
94 | out_mask: | ||
95 | desc->irq_data.chip->irq_mask(&desc->irq_data); | ||
96 | |||
97 | /* ack unconditionally to unmask lower prio irqs */ | ||
98 | desc->irq_data.chip->irq_ack(&desc->irq_data); | ||
99 | |||
100 | raw_spin_unlock(&desc->lock); | ||
101 | } | ||
102 | #define handle_irq handle_prio_irq | ||
103 | #endif | ||
104 | |||
105 | void __init ns9xxx_init_irq(void) | 54 | void __init ns9xxx_init_irq(void) |
106 | { | 55 | { |
107 | int i; | 56 | int i; |
@@ -118,8 +67,8 @@ void __init ns9xxx_init_irq(void) | |||
118 | __raw_writel(prio2irq(i), SYS_IVA(i)); | 67 | __raw_writel(prio2irq(i), SYS_IVA(i)); |
119 | 68 | ||
120 | for (i = 0; i <= 31; ++i) { | 69 | for (i = 0; i <= 31; ++i) { |
121 | set_irq_chip(i, &ns9xxx_chip); | 70 | irq_set_chip_and_handler(i, &ns9xxx_chip, handle_fasteoi_irq); |
122 | set_irq_handler(i, handle_irq); | ||
123 | set_irq_flags(i, IRQF_VALID); | 71 | set_irq_flags(i, IRQF_VALID); |
72 | irq_set_status_flags(i, IRQ_LEVEL); | ||
124 | } | 73 | } |
125 | } | 74 | } |
diff --git a/arch/arm/mach-nuc93x/irq.c b/arch/arm/mach-nuc93x/irq.c index 1f8a05a22834..aa279f23e342 100644 --- a/arch/arm/mach-nuc93x/irq.c +++ b/arch/arm/mach-nuc93x/irq.c | |||
@@ -59,8 +59,8 @@ void __init nuc93x_init_irq(void) | |||
59 | __raw_writel(0xFFFFFFFE, REG_AIC_MDCR); | 59 | __raw_writel(0xFFFFFFFE, REG_AIC_MDCR); |
60 | 60 | ||
61 | for (irqno = IRQ_WDT; irqno <= NR_IRQS; irqno++) { | 61 | for (irqno = IRQ_WDT; irqno <= NR_IRQS; irqno++) { |
62 | set_irq_chip(irqno, &nuc93x_irq_chip); | 62 | irq_set_chip_and_handler(irqno, &nuc93x_irq_chip, |
63 | set_irq_handler(irqno, handle_level_irq); | 63 | handle_level_irq); |
64 | set_irq_flags(irqno, IRQF_VALID); | 64 | set_irq_flags(irqno, IRQF_VALID); |
65 | } | 65 | } |
66 | } | 66 | } |
diff --git a/arch/arm/mach-omap1/board-osk.c b/arch/arm/mach-omap1/board-osk.c index 7c5e2112c776..e68dfde1918e 100644 --- a/arch/arm/mach-omap1/board-osk.c +++ b/arch/arm/mach-omap1/board-osk.c | |||
@@ -276,7 +276,7 @@ static void __init osk_init_cf(void) | |||
276 | return; | 276 | return; |
277 | } | 277 | } |
278 | /* the CF I/O IRQ is really active-low */ | 278 | /* the CF I/O IRQ is really active-low */ |
279 | set_irq_type(gpio_to_irq(62), IRQ_TYPE_EDGE_FALLING); | 279 | irq_set_irq_type(gpio_to_irq(62), IRQ_TYPE_EDGE_FALLING); |
280 | } | 280 | } |
281 | 281 | ||
282 | static void __init osk_init_irq(void) | 282 | static void __init osk_init_irq(void) |
@@ -482,7 +482,7 @@ static void __init osk_mistral_init(void) | |||
482 | omap_cfg_reg(P20_1610_GPIO4); /* PENIRQ */ | 482 | omap_cfg_reg(P20_1610_GPIO4); /* PENIRQ */ |
483 | gpio_request(4, "ts_int"); | 483 | gpio_request(4, "ts_int"); |
484 | gpio_direction_input(4); | 484 | gpio_direction_input(4); |
485 | set_irq_type(gpio_to_irq(4), IRQ_TYPE_EDGE_FALLING); | 485 | irq_set_irq_type(gpio_to_irq(4), IRQ_TYPE_EDGE_FALLING); |
486 | 486 | ||
487 | spi_register_board_info(mistral_boardinfo, | 487 | spi_register_board_info(mistral_boardinfo, |
488 | ARRAY_SIZE(mistral_boardinfo)); | 488 | ARRAY_SIZE(mistral_boardinfo)); |
@@ -500,7 +500,7 @@ static void __init osk_mistral_init(void) | |||
500 | int irq = gpio_to_irq(OMAP_MPUIO(2)); | 500 | int irq = gpio_to_irq(OMAP_MPUIO(2)); |
501 | 501 | ||
502 | gpio_direction_input(OMAP_MPUIO(2)); | 502 | gpio_direction_input(OMAP_MPUIO(2)); |
503 | set_irq_type(irq, IRQ_TYPE_EDGE_RISING); | 503 | irq_set_irq_type(irq, IRQ_TYPE_EDGE_RISING); |
504 | #ifdef CONFIG_PM | 504 | #ifdef CONFIG_PM |
505 | /* share the IRQ in case someone wants to use the | 505 | /* share the IRQ in case someone wants to use the |
506 | * button for more than wakeup from system sleep. | 506 | * button for more than wakeup from system sleep. |
diff --git a/arch/arm/mach-omap1/board-palmz71.c b/arch/arm/mach-omap1/board-palmz71.c index d7bbbe721a75..45f01d2c3a7a 100644 --- a/arch/arm/mach-omap1/board-palmz71.c +++ b/arch/arm/mach-omap1/board-palmz71.c | |||
@@ -256,12 +256,12 @@ palmz71_powercable(int irq, void *dev_id) | |||
256 | { | 256 | { |
257 | if (gpio_get_value(PALMZ71_USBDETECT_GPIO)) { | 257 | if (gpio_get_value(PALMZ71_USBDETECT_GPIO)) { |
258 | printk(KERN_INFO "PM: Power cable connected\n"); | 258 | printk(KERN_INFO "PM: Power cable connected\n"); |
259 | set_irq_type(gpio_to_irq(PALMZ71_USBDETECT_GPIO), | 259 | irq_set_irq_type(gpio_to_irq(PALMZ71_USBDETECT_GPIO), |
260 | IRQ_TYPE_EDGE_FALLING); | 260 | IRQ_TYPE_EDGE_FALLING); |
261 | } else { | 261 | } else { |
262 | printk(KERN_INFO "PM: Power cable disconnected\n"); | 262 | printk(KERN_INFO "PM: Power cable disconnected\n"); |
263 | set_irq_type(gpio_to_irq(PALMZ71_USBDETECT_GPIO), | 263 | irq_set_irq_type(gpio_to_irq(PALMZ71_USBDETECT_GPIO), |
264 | IRQ_TYPE_EDGE_RISING); | 264 | IRQ_TYPE_EDGE_RISING); |
265 | } | 265 | } |
266 | return IRQ_HANDLED; | 266 | return IRQ_HANDLED; |
267 | } | 267 | } |
diff --git a/arch/arm/mach-omap1/board-voiceblue.c b/arch/arm/mach-omap1/board-voiceblue.c index bdc0ac8dc21f..65d24204937a 100644 --- a/arch/arm/mach-omap1/board-voiceblue.c +++ b/arch/arm/mach-omap1/board-voiceblue.c | |||
@@ -279,10 +279,10 @@ static void __init voiceblue_init(void) | |||
279 | gpio_request(13, "16C554 irq"); | 279 | gpio_request(13, "16C554 irq"); |
280 | gpio_request(14, "16C554 irq"); | 280 | gpio_request(14, "16C554 irq"); |
281 | gpio_request(15, "16C554 irq"); | 281 | gpio_request(15, "16C554 irq"); |
282 | set_irq_type(gpio_to_irq(12), IRQ_TYPE_EDGE_RISING); | 282 | irq_set_irq_type(gpio_to_irq(12), IRQ_TYPE_EDGE_RISING); |
283 | set_irq_type(gpio_to_irq(13), IRQ_TYPE_EDGE_RISING); | 283 | irq_set_irq_type(gpio_to_irq(13), IRQ_TYPE_EDGE_RISING); |
284 | set_irq_type(gpio_to_irq(14), IRQ_TYPE_EDGE_RISING); | 284 | irq_set_irq_type(gpio_to_irq(14), IRQ_TYPE_EDGE_RISING); |
285 | set_irq_type(gpio_to_irq(15), IRQ_TYPE_EDGE_RISING); | 285 | irq_set_irq_type(gpio_to_irq(15), IRQ_TYPE_EDGE_RISING); |
286 | 286 | ||
287 | platform_add_devices(voiceblue_devices, ARRAY_SIZE(voiceblue_devices)); | 287 | platform_add_devices(voiceblue_devices, ARRAY_SIZE(voiceblue_devices)); |
288 | omap_board_config = voiceblue_config; | 288 | omap_board_config = voiceblue_config; |
diff --git a/arch/arm/mach-omap1/fpga.c b/arch/arm/mach-omap1/fpga.c index 0ace7998aaa5..cddbf8b089ce 100644 --- a/arch/arm/mach-omap1/fpga.c +++ b/arch/arm/mach-omap1/fpga.c | |||
@@ -156,17 +156,17 @@ void omap1510_fpga_init_irq(void) | |||
156 | * The touchscreen interrupt is level-sensitive, so | 156 | * The touchscreen interrupt is level-sensitive, so |
157 | * we'll use the regular mask_ack routine for it. | 157 | * we'll use the regular mask_ack routine for it. |
158 | */ | 158 | */ |
159 | set_irq_chip(i, &omap_fpga_irq_ack); | 159 | irq_set_chip(i, &omap_fpga_irq_ack); |
160 | } | 160 | } |
161 | else { | 161 | else { |
162 | /* | 162 | /* |
163 | * All FPGA interrupts except the touchscreen are | 163 | * All FPGA interrupts except the touchscreen are |
164 | * edge-sensitive, so we won't mask them. | 164 | * edge-sensitive, so we won't mask them. |
165 | */ | 165 | */ |
166 | set_irq_chip(i, &omap_fpga_irq); | 166 | irq_set_chip(i, &omap_fpga_irq); |
167 | } | 167 | } |
168 | 168 | ||
169 | set_irq_handler(i, handle_edge_irq); | 169 | irq_set_handler(i, handle_edge_irq); |
170 | set_irq_flags(i, IRQF_VALID); | 170 | set_irq_flags(i, IRQF_VALID); |
171 | } | 171 | } |
172 | 172 | ||
@@ -183,6 +183,6 @@ void omap1510_fpga_init_irq(void) | |||
183 | return; | 183 | return; |
184 | } | 184 | } |
185 | gpio_direction_input(13); | 185 | gpio_direction_input(13); |
186 | set_irq_type(gpio_to_irq(13), IRQ_TYPE_EDGE_RISING); | 186 | irq_set_irq_type(gpio_to_irq(13), IRQ_TYPE_EDGE_RISING); |
187 | set_irq_chained_handler(OMAP1510_INT_FPGA, innovator_fpga_IRQ_demux); | 187 | irq_set_chained_handler(OMAP1510_INT_FPGA, innovator_fpga_IRQ_demux); |
188 | } | 188 | } |
diff --git a/arch/arm/mach-omap1/irq.c b/arch/arm/mach-omap1/irq.c index 731dd33bff51..5d3da7a63af3 100644 --- a/arch/arm/mach-omap1/irq.c +++ b/arch/arm/mach-omap1/irq.c | |||
@@ -230,8 +230,8 @@ void __init omap_init_irq(void) | |||
230 | irq_trigger = irq_banks[i].trigger_map >> IRQ_BIT(j); | 230 | irq_trigger = irq_banks[i].trigger_map >> IRQ_BIT(j); |
231 | omap_irq_set_cfg(j, 0, 0, irq_trigger); | 231 | omap_irq_set_cfg(j, 0, 0, irq_trigger); |
232 | 232 | ||
233 | set_irq_chip(j, &omap_irq_chip); | 233 | irq_set_chip_and_handler(j, &omap_irq_chip, |
234 | set_irq_handler(j, handle_level_irq); | 234 | handle_level_irq); |
235 | set_irq_flags(j, IRQF_VALID); | 235 | set_irq_flags(j, IRQF_VALID); |
236 | } | 236 | } |
237 | } | 237 | } |
diff --git a/arch/arm/mach-omap2/gpmc.c b/arch/arm/mach-omap2/gpmc.c index 493505c3b2f5..130034bf01d5 100644 --- a/arch/arm/mach-omap2/gpmc.c +++ b/arch/arm/mach-omap2/gpmc.c | |||
@@ -743,7 +743,7 @@ static int __init gpmc_init(void) | |||
743 | /* initalize the irq_chained */ | 743 | /* initalize the irq_chained */ |
744 | irq = OMAP_GPMC_IRQ_BASE; | 744 | irq = OMAP_GPMC_IRQ_BASE; |
745 | for (cs = 0; cs < GPMC_CS_NUM; cs++) { | 745 | for (cs = 0; cs < GPMC_CS_NUM; cs++) { |
746 | set_irq_chip_and_handler(irq, &dummy_irq_chip, | 746 | irq_set_chip_and_handler(irq, &dummy_irq_chip, |
747 | handle_simple_irq); | 747 | handle_simple_irq); |
748 | set_irq_flags(irq, IRQF_VALID); | 748 | set_irq_flags(irq, IRQF_VALID); |
749 | irq++; | 749 | irq++; |
diff --git a/arch/arm/mach-omap2/irq.c b/arch/arm/mach-omap2/irq.c index bc524b94fd59..237e4530abf2 100644 --- a/arch/arm/mach-omap2/irq.c +++ b/arch/arm/mach-omap2/irq.c | |||
@@ -223,8 +223,7 @@ void __init omap_init_irq(void) | |||
223 | nr_of_irqs, nr_banks, nr_banks > 1 ? "s" : ""); | 223 | nr_of_irqs, nr_banks, nr_banks > 1 ? "s" : ""); |
224 | 224 | ||
225 | for (i = 0; i < nr_of_irqs; i++) { | 225 | for (i = 0; i < nr_of_irqs; i++) { |
226 | set_irq_chip(i, &omap_irq_chip); | 226 | irq_set_chip_and_handler(i, &omap_irq_chip, handle_level_irq); |
227 | set_irq_handler(i, handle_level_irq); | ||
228 | set_irq_flags(i, IRQF_VALID); | 227 | set_irq_flags(i, IRQF_VALID); |
229 | } | 228 | } |
230 | } | 229 | } |
diff --git a/arch/arm/mach-orion5x/db88f5281-setup.c b/arch/arm/mach-orion5x/db88f5281-setup.c index c10a11715376..b7d4591214e0 100644 --- a/arch/arm/mach-orion5x/db88f5281-setup.c +++ b/arch/arm/mach-orion5x/db88f5281-setup.c | |||
@@ -213,7 +213,7 @@ void __init db88f5281_pci_preinit(void) | |||
213 | pin = DB88F5281_PCI_SLOT0_IRQ_PIN; | 213 | pin = DB88F5281_PCI_SLOT0_IRQ_PIN; |
214 | if (gpio_request(pin, "PCI Int1") == 0) { | 214 | if (gpio_request(pin, "PCI Int1") == 0) { |
215 | if (gpio_direction_input(pin) == 0) { | 215 | if (gpio_direction_input(pin) == 0) { |
216 | set_irq_type(gpio_to_irq(pin), IRQ_TYPE_LEVEL_LOW); | 216 | irq_set_irq_type(gpio_to_irq(pin), IRQ_TYPE_LEVEL_LOW); |
217 | } else { | 217 | } else { |
218 | printk(KERN_ERR "db88f5281_pci_preinit faield to " | 218 | printk(KERN_ERR "db88f5281_pci_preinit faield to " |
219 | "set_irq_type pin %d\n", pin); | 219 | "set_irq_type pin %d\n", pin); |
@@ -226,7 +226,7 @@ void __init db88f5281_pci_preinit(void) | |||
226 | pin = DB88F5281_PCI_SLOT1_SLOT2_IRQ_PIN; | 226 | pin = DB88F5281_PCI_SLOT1_SLOT2_IRQ_PIN; |
227 | if (gpio_request(pin, "PCI Int2") == 0) { | 227 | if (gpio_request(pin, "PCI Int2") == 0) { |
228 | if (gpio_direction_input(pin) == 0) { | 228 | if (gpio_direction_input(pin) == 0) { |
229 | set_irq_type(gpio_to_irq(pin), IRQ_TYPE_LEVEL_LOW); | 229 | irq_set_irq_type(gpio_to_irq(pin), IRQ_TYPE_LEVEL_LOW); |
230 | } else { | 230 | } else { |
231 | printk(KERN_ERR "db88f5281_pci_preinit faield " | 231 | printk(KERN_ERR "db88f5281_pci_preinit faield " |
232 | "to set_irq_type pin %d\n", pin); | 232 | "to set_irq_type pin %d\n", pin); |
diff --git a/arch/arm/mach-orion5x/irq.c b/arch/arm/mach-orion5x/irq.c index ed85891f8699..43cf8bc9767b 100644 --- a/arch/arm/mach-orion5x/irq.c +++ b/arch/arm/mach-orion5x/irq.c | |||
@@ -34,8 +34,8 @@ void __init orion5x_init_irq(void) | |||
34 | * Initialize gpiolib for GPIOs 0-31. | 34 | * Initialize gpiolib for GPIOs 0-31. |
35 | */ | 35 | */ |
36 | orion_gpio_init(0, 32, GPIO_VIRT_BASE, 0, IRQ_ORION5X_GPIO_START); | 36 | orion_gpio_init(0, 32, GPIO_VIRT_BASE, 0, IRQ_ORION5X_GPIO_START); |
37 | set_irq_chained_handler(IRQ_ORION5X_GPIO_0_7, gpio_irq_handler); | 37 | irq_set_chained_handler(IRQ_ORION5X_GPIO_0_7, gpio_irq_handler); |
38 | set_irq_chained_handler(IRQ_ORION5X_GPIO_8_15, gpio_irq_handler); | 38 | irq_set_chained_handler(IRQ_ORION5X_GPIO_8_15, gpio_irq_handler); |
39 | set_irq_chained_handler(IRQ_ORION5X_GPIO_16_23, gpio_irq_handler); | 39 | irq_set_chained_handler(IRQ_ORION5X_GPIO_16_23, gpio_irq_handler); |
40 | set_irq_chained_handler(IRQ_ORION5X_GPIO_24_31, gpio_irq_handler); | 40 | irq_set_chained_handler(IRQ_ORION5X_GPIO_24_31, gpio_irq_handler); |
41 | } | 41 | } |
diff --git a/arch/arm/mach-orion5x/rd88f5182-setup.c b/arch/arm/mach-orion5x/rd88f5182-setup.c index 67ec6959b267..4fc46772a087 100644 --- a/arch/arm/mach-orion5x/rd88f5182-setup.c +++ b/arch/arm/mach-orion5x/rd88f5182-setup.c | |||
@@ -148,7 +148,7 @@ void __init rd88f5182_pci_preinit(void) | |||
148 | pin = RD88F5182_PCI_SLOT0_IRQ_A_PIN; | 148 | pin = RD88F5182_PCI_SLOT0_IRQ_A_PIN; |
149 | if (gpio_request(pin, "PCI IntA") == 0) { | 149 | if (gpio_request(pin, "PCI IntA") == 0) { |
150 | if (gpio_direction_input(pin) == 0) { | 150 | if (gpio_direction_input(pin) == 0) { |
151 | set_irq_type(gpio_to_irq(pin), IRQ_TYPE_LEVEL_LOW); | 151 | irq_set_irq_type(gpio_to_irq(pin), IRQ_TYPE_LEVEL_LOW); |
152 | } else { | 152 | } else { |
153 | printk(KERN_ERR "rd88f5182_pci_preinit faield to " | 153 | printk(KERN_ERR "rd88f5182_pci_preinit faield to " |
154 | "set_irq_type pin %d\n", pin); | 154 | "set_irq_type pin %d\n", pin); |
@@ -161,7 +161,7 @@ void __init rd88f5182_pci_preinit(void) | |||
161 | pin = RD88F5182_PCI_SLOT0_IRQ_B_PIN; | 161 | pin = RD88F5182_PCI_SLOT0_IRQ_B_PIN; |
162 | if (gpio_request(pin, "PCI IntB") == 0) { | 162 | if (gpio_request(pin, "PCI IntB") == 0) { |
163 | if (gpio_direction_input(pin) == 0) { | 163 | if (gpio_direction_input(pin) == 0) { |
164 | set_irq_type(gpio_to_irq(pin), IRQ_TYPE_LEVEL_LOW); | 164 | irq_set_irq_type(gpio_to_irq(pin), IRQ_TYPE_LEVEL_LOW); |
165 | } else { | 165 | } else { |
166 | printk(KERN_ERR "rd88f5182_pci_preinit faield to " | 166 | printk(KERN_ERR "rd88f5182_pci_preinit faield to " |
167 | "set_irq_type pin %d\n", pin); | 167 | "set_irq_type pin %d\n", pin); |
diff --git a/arch/arm/mach-orion5x/terastation_pro2-setup.c b/arch/arm/mach-orion5x/terastation_pro2-setup.c index 5653ee6c71d8..616004143912 100644 --- a/arch/arm/mach-orion5x/terastation_pro2-setup.c +++ b/arch/arm/mach-orion5x/terastation_pro2-setup.c | |||
@@ -88,7 +88,7 @@ void __init tsp2_pci_preinit(void) | |||
88 | pin = TSP2_PCI_SLOT0_IRQ_PIN; | 88 | pin = TSP2_PCI_SLOT0_IRQ_PIN; |
89 | if (gpio_request(pin, "PCI Int1") == 0) { | 89 | if (gpio_request(pin, "PCI Int1") == 0) { |
90 | if (gpio_direction_input(pin) == 0) { | 90 | if (gpio_direction_input(pin) == 0) { |
91 | set_irq_type(gpio_to_irq(pin), IRQ_TYPE_LEVEL_LOW); | 91 | irq_set_irq_type(gpio_to_irq(pin), IRQ_TYPE_LEVEL_LOW); |
92 | } else { | 92 | } else { |
93 | printk(KERN_ERR "tsp2_pci_preinit failed " | 93 | printk(KERN_ERR "tsp2_pci_preinit failed " |
94 | "to set_irq_type pin %d\n", pin); | 94 | "to set_irq_type pin %d\n", pin); |
diff --git a/arch/arm/mach-orion5x/ts209-setup.c b/arch/arm/mach-orion5x/ts209-setup.c index 8bbd27ea6735..f0f43e13ac87 100644 --- a/arch/arm/mach-orion5x/ts209-setup.c +++ b/arch/arm/mach-orion5x/ts209-setup.c | |||
@@ -117,7 +117,7 @@ void __init qnap_ts209_pci_preinit(void) | |||
117 | pin = QNAP_TS209_PCI_SLOT0_IRQ_PIN; | 117 | pin = QNAP_TS209_PCI_SLOT0_IRQ_PIN; |
118 | if (gpio_request(pin, "PCI Int1") == 0) { | 118 | if (gpio_request(pin, "PCI Int1") == 0) { |
119 | if (gpio_direction_input(pin) == 0) { | 119 | if (gpio_direction_input(pin) == 0) { |
120 | set_irq_type(gpio_to_irq(pin), IRQ_TYPE_LEVEL_LOW); | 120 | irq_set_irq_type(gpio_to_irq(pin), IRQ_TYPE_LEVEL_LOW); |
121 | } else { | 121 | } else { |
122 | printk(KERN_ERR "qnap_ts209_pci_preinit failed to " | 122 | printk(KERN_ERR "qnap_ts209_pci_preinit failed to " |
123 | "set_irq_type pin %d\n", pin); | 123 | "set_irq_type pin %d\n", pin); |
@@ -131,7 +131,7 @@ void __init qnap_ts209_pci_preinit(void) | |||
131 | pin = QNAP_TS209_PCI_SLOT1_IRQ_PIN; | 131 | pin = QNAP_TS209_PCI_SLOT1_IRQ_PIN; |
132 | if (gpio_request(pin, "PCI Int2") == 0) { | 132 | if (gpio_request(pin, "PCI Int2") == 0) { |
133 | if (gpio_direction_input(pin) == 0) { | 133 | if (gpio_direction_input(pin) == 0) { |
134 | set_irq_type(gpio_to_irq(pin), IRQ_TYPE_LEVEL_LOW); | 134 | irq_set_irq_type(gpio_to_irq(pin), IRQ_TYPE_LEVEL_LOW); |
135 | } else { | 135 | } else { |
136 | printk(KERN_ERR "qnap_ts209_pci_preinit failed " | 136 | printk(KERN_ERR "qnap_ts209_pci_preinit failed " |
137 | "to set_irq_type pin %d\n", pin); | 137 | "to set_irq_type pin %d\n", pin); |
diff --git a/arch/arm/mach-pnx4008/irq.c b/arch/arm/mach-pnx4008/irq.c index c69c180aec76..7608c7a288cf 100644 --- a/arch/arm/mach-pnx4008/irq.c +++ b/arch/arm/mach-pnx4008/irq.c | |||
@@ -58,22 +58,22 @@ static int pnx4008_set_irq_type(struct irq_data *d, unsigned int type) | |||
58 | case IRQ_TYPE_EDGE_RISING: | 58 | case IRQ_TYPE_EDGE_RISING: |
59 | __raw_writel(__raw_readl(INTC_ATR(d->irq)) | INTC_BIT(d->irq), INTC_ATR(d->irq)); /*edge sensitive */ | 59 | __raw_writel(__raw_readl(INTC_ATR(d->irq)) | INTC_BIT(d->irq), INTC_ATR(d->irq)); /*edge sensitive */ |
60 | __raw_writel(__raw_readl(INTC_APR(d->irq)) | INTC_BIT(d->irq), INTC_APR(d->irq)); /*rising edge */ | 60 | __raw_writel(__raw_readl(INTC_APR(d->irq)) | INTC_BIT(d->irq), INTC_APR(d->irq)); /*rising edge */ |
61 | set_irq_handler(d->irq, handle_edge_irq); | 61 | irq_set_handler(d->irq, handle_edge_irq); |
62 | break; | 62 | break; |
63 | case IRQ_TYPE_EDGE_FALLING: | 63 | case IRQ_TYPE_EDGE_FALLING: |
64 | __raw_writel(__raw_readl(INTC_ATR(d->irq)) | INTC_BIT(d->irq), INTC_ATR(d->irq)); /*edge sensitive */ | 64 | __raw_writel(__raw_readl(INTC_ATR(d->irq)) | INTC_BIT(d->irq), INTC_ATR(d->irq)); /*edge sensitive */ |
65 | __raw_writel(__raw_readl(INTC_APR(d->irq)) & ~INTC_BIT(d->irq), INTC_APR(d->irq)); /*falling edge */ | 65 | __raw_writel(__raw_readl(INTC_APR(d->irq)) & ~INTC_BIT(d->irq), INTC_APR(d->irq)); /*falling edge */ |
66 | set_irq_handler(d->irq, handle_edge_irq); | 66 | irq_set_handler(d->irq, handle_edge_irq); |
67 | break; | 67 | break; |
68 | case IRQ_TYPE_LEVEL_LOW: | 68 | case IRQ_TYPE_LEVEL_LOW: |
69 | __raw_writel(__raw_readl(INTC_ATR(d->irq)) & ~INTC_BIT(d->irq), INTC_ATR(d->irq)); /*level sensitive */ | 69 | __raw_writel(__raw_readl(INTC_ATR(d->irq)) & ~INTC_BIT(d->irq), INTC_ATR(d->irq)); /*level sensitive */ |
70 | __raw_writel(__raw_readl(INTC_APR(d->irq)) & ~INTC_BIT(d->irq), INTC_APR(d->irq)); /*low level */ | 70 | __raw_writel(__raw_readl(INTC_APR(d->irq)) & ~INTC_BIT(d->irq), INTC_APR(d->irq)); /*low level */ |
71 | set_irq_handler(d->irq, handle_level_irq); | 71 | irq_set_handler(d->irq, handle_level_irq); |
72 | break; | 72 | break; |
73 | case IRQ_TYPE_LEVEL_HIGH: | 73 | case IRQ_TYPE_LEVEL_HIGH: |
74 | __raw_writel(__raw_readl(INTC_ATR(d->irq)) & ~INTC_BIT(d->irq), INTC_ATR(d->irq)); /*level sensitive */ | 74 | __raw_writel(__raw_readl(INTC_ATR(d->irq)) & ~INTC_BIT(d->irq), INTC_ATR(d->irq)); /*level sensitive */ |
75 | __raw_writel(__raw_readl(INTC_APR(d->irq)) | INTC_BIT(d->irq), INTC_APR(d->irq)); /* high level */ | 75 | __raw_writel(__raw_readl(INTC_APR(d->irq)) | INTC_BIT(d->irq), INTC_APR(d->irq)); /* high level */ |
76 | set_irq_handler(d->irq, handle_level_irq); | 76 | irq_set_handler(d->irq, handle_level_irq); |
77 | break; | 77 | break; |
78 | 78 | ||
79 | /* IRQ_TYPE_EDGE_BOTH is not supported */ | 79 | /* IRQ_TYPE_EDGE_BOTH is not supported */ |
@@ -98,7 +98,7 @@ void __init pnx4008_init_irq(void) | |||
98 | /* configure IRQ's */ | 98 | /* configure IRQ's */ |
99 | for (i = 0; i < NR_IRQS; i++) { | 99 | for (i = 0; i < NR_IRQS; i++) { |
100 | set_irq_flags(i, IRQF_VALID); | 100 | set_irq_flags(i, IRQF_VALID); |
101 | set_irq_chip(i, &pnx4008_irq_chip); | 101 | irq_set_chip(i, &pnx4008_irq_chip); |
102 | pnx4008_set_irq_type(irq_get_irq_data(i), pnx4008_irq_type[i]); | 102 | pnx4008_set_irq_type(irq_get_irq_data(i), pnx4008_irq_type[i]); |
103 | } | 103 | } |
104 | 104 | ||
diff --git a/arch/arm/mach-pxa/balloon3.c b/arch/arm/mach-pxa/balloon3.c index d2af73321dae..38dea05df7f8 100644 --- a/arch/arm/mach-pxa/balloon3.c +++ b/arch/arm/mach-pxa/balloon3.c | |||
@@ -527,13 +527,13 @@ static void __init balloon3_init_irq(void) | |||
527 | pxa27x_init_irq(); | 527 | pxa27x_init_irq(); |
528 | /* setup extra Balloon3 irqs */ | 528 | /* setup extra Balloon3 irqs */ |
529 | for (irq = BALLOON3_IRQ(0); irq <= BALLOON3_IRQ(7); irq++) { | 529 | for (irq = BALLOON3_IRQ(0); irq <= BALLOON3_IRQ(7); irq++) { |
530 | set_irq_chip(irq, &balloon3_irq_chip); | 530 | irq_set_chip_and_handler(irq, &balloon3_irq_chip, |
531 | set_irq_handler(irq, handle_level_irq); | 531 | handle_level_irq); |
532 | set_irq_flags(irq, IRQF_VALID | IRQF_PROBE); | 532 | set_irq_flags(irq, IRQF_VALID | IRQF_PROBE); |
533 | } | 533 | } |
534 | 534 | ||
535 | set_irq_chained_handler(BALLOON3_AUX_NIRQ, balloon3_irq_handler); | 535 | irq_set_chained_handler(BALLOON3_AUX_NIRQ, balloon3_irq_handler); |
536 | set_irq_type(BALLOON3_AUX_NIRQ, IRQ_TYPE_EDGE_FALLING); | 536 | irq_set_irq_type(BALLOON3_AUX_NIRQ, IRQ_TYPE_EDGE_FALLING); |
537 | 537 | ||
538 | pr_debug("%s: chained handler installed - irq %d automatically " | 538 | pr_debug("%s: chained handler installed - irq %d automatically " |
539 | "enabled\n", __func__, BALLOON3_AUX_NIRQ); | 539 | "enabled\n", __func__, BALLOON3_AUX_NIRQ); |
diff --git a/arch/arm/mach-pxa/cm-x2xx-pci.c b/arch/arm/mach-pxa/cm-x2xx-pci.c index a2380cd76f80..8b1a30959fae 100644 --- a/arch/arm/mach-pxa/cm-x2xx-pci.c +++ b/arch/arm/mach-pxa/cm-x2xx-pci.c | |||
@@ -70,9 +70,10 @@ void __cmx2xx_pci_init_irq(int irq_gpio) | |||
70 | 70 | ||
71 | cmx2xx_it8152_irq_gpio = irq_gpio; | 71 | cmx2xx_it8152_irq_gpio = irq_gpio; |
72 | 72 | ||
73 | set_irq_type(gpio_to_irq(irq_gpio), IRQ_TYPE_EDGE_RISING); | 73 | irq_set_irq_type(gpio_to_irq(irq_gpio), IRQ_TYPE_EDGE_RISING); |
74 | 74 | ||
75 | set_irq_chained_handler(gpio_to_irq(irq_gpio), cmx2xx_it8152_irq_demux); | 75 | irq_set_chained_handler(gpio_to_irq(irq_gpio), |
76 | cmx2xx_it8152_irq_demux); | ||
76 | } | 77 | } |
77 | 78 | ||
78 | #ifdef CONFIG_PM | 79 | #ifdef CONFIG_PM |
diff --git a/arch/arm/mach-pxa/cm-x300.c b/arch/arm/mach-pxa/cm-x300.c index bfca7ed2fea3..06d0a03f462d 100644 --- a/arch/arm/mach-pxa/cm-x300.c +++ b/arch/arm/mach-pxa/cm-x300.c | |||
@@ -765,7 +765,7 @@ static void __init cm_x300_init_da9030(void) | |||
765 | { | 765 | { |
766 | pxa3xx_set_i2c_power_info(&cm_x300_pwr_i2c_info); | 766 | pxa3xx_set_i2c_power_info(&cm_x300_pwr_i2c_info); |
767 | i2c_register_board_info(1, &cm_x300_pmic_info, 1); | 767 | i2c_register_board_info(1, &cm_x300_pmic_info, 1); |
768 | set_irq_wake(IRQ_WAKEUP0, 1); | 768 | irq_set_irq_wake(IRQ_WAKEUP0, 1); |
769 | } | 769 | } |
770 | 770 | ||
771 | static void __init cm_x300_init_wi2wi(void) | 771 | static void __init cm_x300_init_wi2wi(void) |
diff --git a/arch/arm/mach-pxa/irq.c b/arch/arm/mach-pxa/irq.c index 2693e3c3776f..6251e3f5c62c 100644 --- a/arch/arm/mach-pxa/irq.c +++ b/arch/arm/mach-pxa/irq.c | |||
@@ -137,9 +137,9 @@ static void __init pxa_init_low_gpio_irq(set_wake_t fn) | |||
137 | GEDR0 = 0x3; | 137 | GEDR0 = 0x3; |
138 | 138 | ||
139 | for (irq = IRQ_GPIO0; irq <= IRQ_GPIO1; irq++) { | 139 | for (irq = IRQ_GPIO0; irq <= IRQ_GPIO1; irq++) { |
140 | set_irq_chip(irq, &pxa_low_gpio_chip); | 140 | irq_set_chip_and_handler(irq, &pxa_low_gpio_chip, |
141 | set_irq_chip_data(irq, irq_base(0)); | 141 | handle_edge_irq); |
142 | set_irq_handler(irq, handle_edge_irq); | 142 | irq_set_chip_data(irq, irq_base(0)); |
143 | set_irq_flags(irq, IRQF_VALID); | 143 | set_irq_flags(irq, IRQF_VALID); |
144 | } | 144 | } |
145 | 145 | ||
@@ -165,9 +165,9 @@ void __init pxa_init_irq(int irq_nr, set_wake_t fn) | |||
165 | __raw_writel(i | IPR_VALID, IRQ_BASE + IPR(i)); | 165 | __raw_writel(i | IPR_VALID, IRQ_BASE + IPR(i)); |
166 | 166 | ||
167 | irq = PXA_IRQ(i); | 167 | irq = PXA_IRQ(i); |
168 | set_irq_chip(irq, &pxa_internal_irq_chip); | 168 | irq_set_chip_and_handler(irq, &pxa_internal_irq_chip, |
169 | set_irq_chip_data(irq, base); | 169 | handle_level_irq); |
170 | set_irq_handler(irq, handle_level_irq); | 170 | irq_set_chip_data(irq, base); |
171 | set_irq_flags(irq, IRQF_VALID); | 171 | set_irq_flags(irq, IRQF_VALID); |
172 | } | 172 | } |
173 | } | 173 | } |
diff --git a/arch/arm/mach-pxa/lpd270.c b/arch/arm/mach-pxa/lpd270.c index c9a3e775c2de..6307f70ae22a 100644 --- a/arch/arm/mach-pxa/lpd270.c +++ b/arch/arm/mach-pxa/lpd270.c | |||
@@ -149,12 +149,12 @@ static void __init lpd270_init_irq(void) | |||
149 | 149 | ||
150 | /* setup extra LogicPD PXA270 irqs */ | 150 | /* setup extra LogicPD PXA270 irqs */ |
151 | for (irq = LPD270_IRQ(2); irq <= LPD270_IRQ(4); irq++) { | 151 | for (irq = LPD270_IRQ(2); irq <= LPD270_IRQ(4); irq++) { |
152 | set_irq_chip(irq, &lpd270_irq_chip); | 152 | irq_set_chip_and_handler(irq, &lpd270_irq_chip, |
153 | set_irq_handler(irq, handle_level_irq); | 153 | handle_level_irq); |
154 | set_irq_flags(irq, IRQF_VALID | IRQF_PROBE); | 154 | set_irq_flags(irq, IRQF_VALID | IRQF_PROBE); |
155 | } | 155 | } |
156 | set_irq_chained_handler(IRQ_GPIO(0), lpd270_irq_handler); | 156 | irq_set_chained_handler(IRQ_GPIO(0), lpd270_irq_handler); |
157 | set_irq_type(IRQ_GPIO(0), IRQ_TYPE_EDGE_FALLING); | 157 | irq_set_irq_type(IRQ_GPIO(0), IRQ_TYPE_EDGE_FALLING); |
158 | } | 158 | } |
159 | 159 | ||
160 | 160 | ||
diff --git a/arch/arm/mach-pxa/lubbock.c b/arch/arm/mach-pxa/lubbock.c index dca20de306bb..0fea945dd6f2 100644 --- a/arch/arm/mach-pxa/lubbock.c +++ b/arch/arm/mach-pxa/lubbock.c | |||
@@ -165,13 +165,13 @@ static void __init lubbock_init_irq(void) | |||
165 | 165 | ||
166 | /* setup extra lubbock irqs */ | 166 | /* setup extra lubbock irqs */ |
167 | for (irq = LUBBOCK_IRQ(0); irq <= LUBBOCK_LAST_IRQ; irq++) { | 167 | for (irq = LUBBOCK_IRQ(0); irq <= LUBBOCK_LAST_IRQ; irq++) { |
168 | set_irq_chip(irq, &lubbock_irq_chip); | 168 | irq_set_chip_and_handler(irq, &lubbock_irq_chip, |
169 | set_irq_handler(irq, handle_level_irq); | 169 | handle_level_irq); |
170 | set_irq_flags(irq, IRQF_VALID | IRQF_PROBE); | 170 | set_irq_flags(irq, IRQF_VALID | IRQF_PROBE); |
171 | } | 171 | } |
172 | 172 | ||
173 | set_irq_chained_handler(IRQ_GPIO(0), lubbock_irq_handler); | 173 | irq_set_chained_handler(IRQ_GPIO(0), lubbock_irq_handler); |
174 | set_irq_type(IRQ_GPIO(0), IRQ_TYPE_EDGE_FALLING); | 174 | irq_set_irq_type(IRQ_GPIO(0), IRQ_TYPE_EDGE_FALLING); |
175 | } | 175 | } |
176 | 176 | ||
177 | #ifdef CONFIG_PM | 177 | #ifdef CONFIG_PM |
diff --git a/arch/arm/mach-pxa/mainstone.c b/arch/arm/mach-pxa/mainstone.c index f9542220595a..29b6e7a94e11 100644 --- a/arch/arm/mach-pxa/mainstone.c +++ b/arch/arm/mach-pxa/mainstone.c | |||
@@ -166,8 +166,8 @@ static void __init mainstone_init_irq(void) | |||
166 | 166 | ||
167 | /* setup extra Mainstone irqs */ | 167 | /* setup extra Mainstone irqs */ |
168 | for(irq = MAINSTONE_IRQ(0); irq <= MAINSTONE_IRQ(15); irq++) { | 168 | for(irq = MAINSTONE_IRQ(0); irq <= MAINSTONE_IRQ(15); irq++) { |
169 | set_irq_chip(irq, &mainstone_irq_chip); | 169 | irq_set_chip_and_handler(irq, &mainstone_irq_chip, |
170 | set_irq_handler(irq, handle_level_irq); | 170 | handle_level_irq); |
171 | if (irq == MAINSTONE_IRQ(10) || irq == MAINSTONE_IRQ(14)) | 171 | if (irq == MAINSTONE_IRQ(10) || irq == MAINSTONE_IRQ(14)) |
172 | set_irq_flags(irq, IRQF_VALID | IRQF_PROBE | IRQF_NOAUTOEN); | 172 | set_irq_flags(irq, IRQF_VALID | IRQF_PROBE | IRQF_NOAUTOEN); |
173 | else | 173 | else |
@@ -179,8 +179,8 @@ static void __init mainstone_init_irq(void) | |||
179 | MST_INTMSKENA = 0; | 179 | MST_INTMSKENA = 0; |
180 | MST_INTSETCLR = 0; | 180 | MST_INTSETCLR = 0; |
181 | 181 | ||
182 | set_irq_chained_handler(IRQ_GPIO(0), mainstone_irq_handler); | 182 | irq_set_chained_handler(IRQ_GPIO(0), mainstone_irq_handler); |
183 | set_irq_type(IRQ_GPIO(0), IRQ_TYPE_EDGE_FALLING); | 183 | irq_set_irq_type(IRQ_GPIO(0), IRQ_TYPE_EDGE_FALLING); |
184 | } | 184 | } |
185 | 185 | ||
186 | #ifdef CONFIG_PM | 186 | #ifdef CONFIG_PM |
diff --git a/arch/arm/mach-pxa/pcm990-baseboard.c b/arch/arm/mach-pxa/pcm990-baseboard.c index 9dbf3ccd4150..4d0120540124 100644 --- a/arch/arm/mach-pxa/pcm990-baseboard.c +++ b/arch/arm/mach-pxa/pcm990-baseboard.c | |||
@@ -281,16 +281,16 @@ static void __init pcm990_init_irq(void) | |||
281 | 281 | ||
282 | /* setup extra PCM990 irqs */ | 282 | /* setup extra PCM990 irqs */ |
283 | for (irq = PCM027_IRQ(0); irq <= PCM027_IRQ(3); irq++) { | 283 | for (irq = PCM027_IRQ(0); irq <= PCM027_IRQ(3); irq++) { |
284 | set_irq_chip(irq, &pcm990_irq_chip); | 284 | irq_set_chip_and_handler(irq, &pcm990_irq_chip, |
285 | set_irq_handler(irq, handle_level_irq); | 285 | handle_level_irq); |
286 | set_irq_flags(irq, IRQF_VALID | IRQF_PROBE); | 286 | set_irq_flags(irq, IRQF_VALID | IRQF_PROBE); |
287 | } | 287 | } |
288 | 288 | ||
289 | PCM990_INTMSKENA = 0x00; /* disable all Interrupts */ | 289 | PCM990_INTMSKENA = 0x00; /* disable all Interrupts */ |
290 | PCM990_INTSETCLR = 0xFF; | 290 | PCM990_INTSETCLR = 0xFF; |
291 | 291 | ||
292 | set_irq_chained_handler(PCM990_CTRL_INT_IRQ, pcm990_irq_handler); | 292 | irq_set_chained_handler(PCM990_CTRL_INT_IRQ, pcm990_irq_handler); |
293 | set_irq_type(PCM990_CTRL_INT_IRQ, PCM990_CTRL_INT_IRQ_EDGE); | 293 | irq_set_irq_type(PCM990_CTRL_INT_IRQ, PCM990_CTRL_INT_IRQ_EDGE); |
294 | } | 294 | } |
295 | 295 | ||
296 | static int pcm990_mci_init(struct device *dev, irq_handler_t mci_detect_int, | 296 | static int pcm990_mci_init(struct device *dev, irq_handler_t mci_detect_int, |
diff --git a/arch/arm/mach-pxa/pxa3xx.c b/arch/arm/mach-pxa/pxa3xx.c index f374247b8466..8dd107391157 100644 --- a/arch/arm/mach-pxa/pxa3xx.c +++ b/arch/arm/mach-pxa/pxa3xx.c | |||
@@ -362,8 +362,8 @@ static void __init pxa_init_ext_wakeup_irq(set_wake_t fn) | |||
362 | int irq; | 362 | int irq; |
363 | 363 | ||
364 | for (irq = IRQ_WAKEUP0; irq <= IRQ_WAKEUP1; irq++) { | 364 | for (irq = IRQ_WAKEUP0; irq <= IRQ_WAKEUP1; irq++) { |
365 | set_irq_chip(irq, &pxa_ext_wakeup_chip); | 365 | irq_set_chip_and_handler(irq, &pxa_ext_wakeup_chip, |
366 | set_irq_handler(irq, handle_edge_irq); | 366 | handle_edge_irq); |
367 | set_irq_flags(irq, IRQF_VALID); | 367 | set_irq_flags(irq, IRQF_VALID); |
368 | } | 368 | } |
369 | 369 | ||
diff --git a/arch/arm/mach-pxa/viper.c b/arch/arm/mach-pxa/viper.c index 12279214c875..aa70331c0805 100644 --- a/arch/arm/mach-pxa/viper.c +++ b/arch/arm/mach-pxa/viper.c | |||
@@ -310,14 +310,14 @@ static void __init viper_init_irq(void) | |||
310 | /* setup ISA IRQs */ | 310 | /* setup ISA IRQs */ |
311 | for (level = 0; level < ARRAY_SIZE(viper_isa_irqs); level++) { | 311 | for (level = 0; level < ARRAY_SIZE(viper_isa_irqs); level++) { |
312 | isa_irq = viper_bit_to_irq(level); | 312 | isa_irq = viper_bit_to_irq(level); |
313 | set_irq_chip(isa_irq, &viper_irq_chip); | 313 | irq_set_chip_and_handler(isa_irq, &viper_irq_chip, |
314 | set_irq_handler(isa_irq, handle_edge_irq); | 314 | handle_edge_irq); |
315 | set_irq_flags(isa_irq, IRQF_VALID | IRQF_PROBE); | 315 | set_irq_flags(isa_irq, IRQF_VALID | IRQF_PROBE); |
316 | } | 316 | } |
317 | 317 | ||
318 | set_irq_chained_handler(gpio_to_irq(VIPER_CPLD_GPIO), | 318 | irq_set_chained_handler(gpio_to_irq(VIPER_CPLD_GPIO), |
319 | viper_irq_handler); | 319 | viper_irq_handler); |
320 | set_irq_type(gpio_to_irq(VIPER_CPLD_GPIO), IRQ_TYPE_EDGE_BOTH); | 320 | irq_set_irq_type(gpio_to_irq(VIPER_CPLD_GPIO), IRQ_TYPE_EDGE_BOTH); |
321 | } | 321 | } |
322 | 322 | ||
323 | /* Flat Panel */ | 323 | /* Flat Panel */ |
diff --git a/arch/arm/mach-pxa/zeus.c b/arch/arm/mach-pxa/zeus.c index 730f51e57c17..139aa7f2ed90 100644 --- a/arch/arm/mach-pxa/zeus.c +++ b/arch/arm/mach-pxa/zeus.c | |||
@@ -136,22 +136,23 @@ static void __init zeus_init_irq(void) | |||
136 | 136 | ||
137 | /* Peripheral IRQs. It would be nice to move those inside driver | 137 | /* Peripheral IRQs. It would be nice to move those inside driver |
138 | configuration, but it is not supported at the moment. */ | 138 | configuration, but it is not supported at the moment. */ |
139 | set_irq_type(gpio_to_irq(ZEUS_AC97_GPIO), IRQ_TYPE_EDGE_RISING); | 139 | irq_set_irq_type(gpio_to_irq(ZEUS_AC97_GPIO), IRQ_TYPE_EDGE_RISING); |
140 | set_irq_type(gpio_to_irq(ZEUS_WAKEUP_GPIO), IRQ_TYPE_EDGE_RISING); | 140 | irq_set_irq_type(gpio_to_irq(ZEUS_WAKEUP_GPIO), IRQ_TYPE_EDGE_RISING); |
141 | set_irq_type(gpio_to_irq(ZEUS_PTT_GPIO), IRQ_TYPE_EDGE_RISING); | 141 | irq_set_irq_type(gpio_to_irq(ZEUS_PTT_GPIO), IRQ_TYPE_EDGE_RISING); |
142 | set_irq_type(gpio_to_irq(ZEUS_EXTGPIO_GPIO), IRQ_TYPE_EDGE_FALLING); | 142 | irq_set_irq_type(gpio_to_irq(ZEUS_EXTGPIO_GPIO), |
143 | set_irq_type(gpio_to_irq(ZEUS_CAN_GPIO), IRQ_TYPE_EDGE_FALLING); | 143 | IRQ_TYPE_EDGE_FALLING); |
144 | irq_set_irq_type(gpio_to_irq(ZEUS_CAN_GPIO), IRQ_TYPE_EDGE_FALLING); | ||
144 | 145 | ||
145 | /* Setup ISA IRQs */ | 146 | /* Setup ISA IRQs */ |
146 | for (level = 0; level < ARRAY_SIZE(zeus_isa_irqs); level++) { | 147 | for (level = 0; level < ARRAY_SIZE(zeus_isa_irqs); level++) { |
147 | isa_irq = zeus_bit_to_irq(level); | 148 | isa_irq = zeus_bit_to_irq(level); |
148 | set_irq_chip(isa_irq, &zeus_irq_chip); | 149 | irq_set_chip_and_handler(isa_irq, &zeus_irq_chip, |
149 | set_irq_handler(isa_irq, handle_edge_irq); | 150 | handle_edge_irq); |
150 | set_irq_flags(isa_irq, IRQF_VALID | IRQF_PROBE); | 151 | set_irq_flags(isa_irq, IRQF_VALID | IRQF_PROBE); |
151 | } | 152 | } |
152 | 153 | ||
153 | set_irq_type(gpio_to_irq(ZEUS_ISA_GPIO), IRQ_TYPE_EDGE_RISING); | 154 | irq_set_irq_type(gpio_to_irq(ZEUS_ISA_GPIO), IRQ_TYPE_EDGE_RISING); |
154 | set_irq_chained_handler(gpio_to_irq(ZEUS_ISA_GPIO), zeus_irq_handler); | 155 | irq_set_chained_handler(gpio_to_irq(ZEUS_ISA_GPIO), zeus_irq_handler); |
155 | } | 156 | } |
156 | 157 | ||
157 | 158 | ||
diff --git a/arch/arm/mach-rpc/irq.c b/arch/arm/mach-rpc/irq.c index d29cd9b737fc..2e1b5309fbab 100644 --- a/arch/arm/mach-rpc/irq.c +++ b/arch/arm/mach-rpc/irq.c | |||
@@ -133,25 +133,25 @@ void __init rpc_init_irq(void) | |||
133 | 133 | ||
134 | switch (irq) { | 134 | switch (irq) { |
135 | case 0 ... 7: | 135 | case 0 ... 7: |
136 | set_irq_chip(irq, &iomd_a_chip); | 136 | irq_set_chip_and_handler(irq, &iomd_a_chip, |
137 | set_irq_handler(irq, handle_level_irq); | 137 | handle_level_irq); |
138 | set_irq_flags(irq, flags); | 138 | set_irq_flags(irq, flags); |
139 | break; | 139 | break; |
140 | 140 | ||
141 | case 8 ... 15: | 141 | case 8 ... 15: |
142 | set_irq_chip(irq, &iomd_b_chip); | 142 | irq_set_chip_and_handler(irq, &iomd_b_chip, |
143 | set_irq_handler(irq, handle_level_irq); | 143 | handle_level_irq); |
144 | set_irq_flags(irq, flags); | 144 | set_irq_flags(irq, flags); |
145 | break; | 145 | break; |
146 | 146 | ||
147 | case 16 ... 21: | 147 | case 16 ... 21: |
148 | set_irq_chip(irq, &iomd_dma_chip); | 148 | irq_set_chip_and_handler(irq, &iomd_dma_chip, |
149 | set_irq_handler(irq, handle_level_irq); | 149 | handle_level_irq); |
150 | set_irq_flags(irq, flags); | 150 | set_irq_flags(irq, flags); |
151 | break; | 151 | break; |
152 | 152 | ||
153 | case 64 ... 71: | 153 | case 64 ... 71: |
154 | set_irq_chip(irq, &iomd_fiq_chip); | 154 | irq_set_chip(irq, &iomd_fiq_chip); |
155 | set_irq_flags(irq, IRQF_VALID); | 155 | set_irq_flags(irq, IRQF_VALID); |
156 | break; | 156 | break; |
157 | } | 157 | } |
diff --git a/arch/arm/mach-s3c2410/bast-irq.c b/arch/arm/mach-s3c2410/bast-irq.c index 606cb6b1cc47..bc53d2d16d1a 100644 --- a/arch/arm/mach-s3c2410/bast-irq.c +++ b/arch/arm/mach-s3c2410/bast-irq.c | |||
@@ -147,15 +147,15 @@ static __init int bast_irq_init(void) | |||
147 | 147 | ||
148 | __raw_writeb(0x0, BAST_VA_PC104_IRQMASK); | 148 | __raw_writeb(0x0, BAST_VA_PC104_IRQMASK); |
149 | 149 | ||
150 | set_irq_chained_handler(IRQ_ISA, bast_irq_pc104_demux); | 150 | irq_set_chained_handler(IRQ_ISA, bast_irq_pc104_demux); |
151 | 151 | ||
152 | /* register our IRQs */ | 152 | /* register our IRQs */ |
153 | 153 | ||
154 | for (i = 0; i < 4; i++) { | 154 | for (i = 0; i < 4; i++) { |
155 | unsigned int irqno = bast_pc104_irqs[i]; | 155 | unsigned int irqno = bast_pc104_irqs[i]; |
156 | 156 | ||
157 | set_irq_chip(irqno, &bast_pc104_chip); | 157 | irq_set_chip_and_handler(irqno, &bast_pc104_chip, |
158 | set_irq_handler(irqno, handle_level_irq); | 158 | handle_level_irq); |
159 | set_irq_flags(irqno, IRQF_VALID); | 159 | set_irq_flags(irqno, IRQF_VALID); |
160 | } | 160 | } |
161 | } | 161 | } |
diff --git a/arch/arm/mach-s3c2412/irq.c b/arch/arm/mach-s3c2412/irq.c index eddb52ba5b65..f3355d2ec634 100644 --- a/arch/arm/mach-s3c2412/irq.c +++ b/arch/arm/mach-s3c2412/irq.c | |||
@@ -175,18 +175,18 @@ static int s3c2412_irq_add(struct sys_device *sysdev) | |||
175 | unsigned int irqno; | 175 | unsigned int irqno; |
176 | 176 | ||
177 | for (irqno = IRQ_EINT0; irqno <= IRQ_EINT3; irqno++) { | 177 | for (irqno = IRQ_EINT0; irqno <= IRQ_EINT3; irqno++) { |
178 | set_irq_chip(irqno, &s3c2412_irq_eint0t4); | 178 | irq_set_chip_and_handler(irqno, &s3c2412_irq_eint0t4, |
179 | set_irq_handler(irqno, handle_edge_irq); | 179 | handle_edge_irq); |
180 | set_irq_flags(irqno, IRQF_VALID); | 180 | set_irq_flags(irqno, IRQF_VALID); |
181 | } | 181 | } |
182 | 182 | ||
183 | /* add demux support for CF/SDI */ | 183 | /* add demux support for CF/SDI */ |
184 | 184 | ||
185 | set_irq_chained_handler(IRQ_S3C2412_CFSDI, s3c2412_irq_demux_cfsdi); | 185 | irq_set_chained_handler(IRQ_S3C2412_CFSDI, s3c2412_irq_demux_cfsdi); |
186 | 186 | ||
187 | for (irqno = IRQ_S3C2412_SDI; irqno <= IRQ_S3C2412_CF; irqno++) { | 187 | for (irqno = IRQ_S3C2412_SDI; irqno <= IRQ_S3C2412_CF; irqno++) { |
188 | set_irq_chip(irqno, &s3c2412_irq_cfsdi); | 188 | irq_set_chip_and_handler(irqno, &s3c2412_irq_cfsdi, |
189 | set_irq_handler(irqno, handle_level_irq); | 189 | handle_level_irq); |
190 | set_irq_flags(irqno, IRQF_VALID); | 190 | set_irq_flags(irqno, IRQF_VALID); |
191 | } | 191 | } |
192 | 192 | ||
@@ -195,7 +195,7 @@ static int s3c2412_irq_add(struct sys_device *sysdev) | |||
195 | s3c2412_irq_rtc_chip = s3c_irq_chip; | 195 | s3c2412_irq_rtc_chip = s3c_irq_chip; |
196 | s3c2412_irq_rtc_chip.irq_set_wake = s3c2412_irq_rtc_wake; | 196 | s3c2412_irq_rtc_chip.irq_set_wake = s3c2412_irq_rtc_wake; |
197 | 197 | ||
198 | set_irq_chip(IRQ_RTC, &s3c2412_irq_rtc_chip); | 198 | irq_set_chip(IRQ_RTC, &s3c2412_irq_rtc_chip); |
199 | 199 | ||
200 | return 0; | 200 | return 0; |
201 | } | 201 | } |
diff --git a/arch/arm/mach-s3c2416/irq.c b/arch/arm/mach-s3c2416/irq.c index 680fe386aca5..77b38f2381c1 100644 --- a/arch/arm/mach-s3c2416/irq.c +++ b/arch/arm/mach-s3c2416/irq.c | |||
@@ -202,13 +202,11 @@ static int __init s3c2416_add_sub(unsigned int base, | |||
202 | { | 202 | { |
203 | unsigned int irqno; | 203 | unsigned int irqno; |
204 | 204 | ||
205 | set_irq_chip(base, &s3c_irq_level_chip); | 205 | irq_set_chip_and_handler(base, &s3c_irq_level_chip, handle_level_irq); |
206 | set_irq_handler(base, handle_level_irq); | 206 | irq_set_chained_handler(base, demux); |
207 | set_irq_chained_handler(base, demux); | ||
208 | 207 | ||
209 | for (irqno = start; irqno <= end; irqno++) { | 208 | for (irqno = start; irqno <= end; irqno++) { |
210 | set_irq_chip(irqno, chip); | 209 | irq_set_chip_and_handler(irqno, chip, handle_level_irq); |
211 | set_irq_handler(irqno, handle_level_irq); | ||
212 | set_irq_flags(irqno, IRQF_VALID); | 210 | set_irq_flags(irqno, IRQF_VALID); |
213 | } | 211 | } |
214 | 212 | ||
diff --git a/arch/arm/mach-s3c2440/irq.c b/arch/arm/mach-s3c2440/irq.c index acad4428bef0..eb1cc0f0705e 100644 --- a/arch/arm/mach-s3c2440/irq.c +++ b/arch/arm/mach-s3c2440/irq.c | |||
@@ -100,13 +100,13 @@ static int s3c2440_irq_add(struct sys_device *sysdev) | |||
100 | 100 | ||
101 | /* add new chained handler for wdt, ac7 */ | 101 | /* add new chained handler for wdt, ac7 */ |
102 | 102 | ||
103 | set_irq_chip(IRQ_WDT, &s3c_irq_level_chip); | 103 | irq_set_chip_and_handler(IRQ_WDT, &s3c_irq_level_chip, |
104 | set_irq_handler(IRQ_WDT, handle_level_irq); | 104 | handle_level_irq); |
105 | set_irq_chained_handler(IRQ_WDT, s3c_irq_demux_wdtac97); | 105 | irq_set_chained_handler(IRQ_WDT, s3c_irq_demux_wdtac97); |
106 | 106 | ||
107 | for (irqno = IRQ_S3C2440_WDT; irqno <= IRQ_S3C2440_AC97; irqno++) { | 107 | for (irqno = IRQ_S3C2440_WDT; irqno <= IRQ_S3C2440_AC97; irqno++) { |
108 | set_irq_chip(irqno, &s3c_irq_wdtac97); | 108 | irq_set_chip_and_handler(irqno, &s3c_irq_wdtac97, |
109 | set_irq_handler(irqno, handle_level_irq); | 109 | handle_level_irq); |
110 | set_irq_flags(irqno, IRQF_VALID); | 110 | set_irq_flags(irqno, IRQF_VALID); |
111 | } | 111 | } |
112 | 112 | ||
diff --git a/arch/arm/mach-s3c2440/s3c244x-irq.c b/arch/arm/mach-s3c2440/s3c244x-irq.c index 83daf4ece764..de07c2feaa32 100644 --- a/arch/arm/mach-s3c2440/s3c244x-irq.c +++ b/arch/arm/mach-s3c2440/s3c244x-irq.c | |||
@@ -95,19 +95,19 @@ static int s3c244x_irq_add(struct sys_device *sysdev) | |||
95 | { | 95 | { |
96 | unsigned int irqno; | 96 | unsigned int irqno; |
97 | 97 | ||
98 | set_irq_chip(IRQ_NFCON, &s3c_irq_level_chip); | 98 | irq_set_chip_and_handler(IRQ_NFCON, &s3c_irq_level_chip, |
99 | set_irq_handler(IRQ_NFCON, handle_level_irq); | 99 | handle_level_irq); |
100 | set_irq_flags(IRQ_NFCON, IRQF_VALID); | 100 | set_irq_flags(IRQ_NFCON, IRQF_VALID); |
101 | 101 | ||
102 | /* add chained handler for camera */ | 102 | /* add chained handler for camera */ |
103 | 103 | ||
104 | set_irq_chip(IRQ_CAM, &s3c_irq_level_chip); | 104 | irq_set_chip_and_handler(IRQ_CAM, &s3c_irq_level_chip, |
105 | set_irq_handler(IRQ_CAM, handle_level_irq); | 105 | handle_level_irq); |
106 | set_irq_chained_handler(IRQ_CAM, s3c_irq_demux_cam); | 106 | irq_set_chained_handler(IRQ_CAM, s3c_irq_demux_cam); |
107 | 107 | ||
108 | for (irqno = IRQ_S3C2440_CAM_C; irqno <= IRQ_S3C2440_CAM_P; irqno++) { | 108 | for (irqno = IRQ_S3C2440_CAM_C; irqno <= IRQ_S3C2440_CAM_P; irqno++) { |
109 | set_irq_chip(irqno, &s3c_irq_cam); | 109 | irq_set_chip_and_handler(irqno, &s3c_irq_cam, |
110 | set_irq_handler(irqno, handle_level_irq); | 110 | handle_level_irq); |
111 | set_irq_flags(irqno, IRQF_VALID); | 111 | set_irq_flags(irqno, IRQF_VALID); |
112 | } | 112 | } |
113 | 113 | ||
diff --git a/arch/arm/mach-s3c2443/irq.c b/arch/arm/mach-s3c2443/irq.c index c7820f9c1352..83ecb1173fb1 100644 --- a/arch/arm/mach-s3c2443/irq.c +++ b/arch/arm/mach-s3c2443/irq.c | |||
@@ -230,13 +230,11 @@ static int __init s3c2443_add_sub(unsigned int base, | |||
230 | { | 230 | { |
231 | unsigned int irqno; | 231 | unsigned int irqno; |
232 | 232 | ||
233 | set_irq_chip(base, &s3c_irq_level_chip); | 233 | irq_set_chip_and_handler(base, &s3c_irq_level_chip, handle_level_irq); |
234 | set_irq_handler(base, handle_level_irq); | 234 | irq_set_chained_handler(base, demux); |
235 | set_irq_chained_handler(base, demux); | ||
236 | 235 | ||
237 | for (irqno = start; irqno <= end; irqno++) { | 236 | for (irqno = start; irqno <= end; irqno++) { |
238 | set_irq_chip(irqno, chip); | 237 | irq_set_chip_and_handler(irqno, chip, handle_level_irq); |
239 | set_irq_handler(irqno, handle_level_irq); | ||
240 | set_irq_flags(irqno, IRQF_VALID); | 238 | set_irq_flags(irqno, IRQF_VALID); |
241 | } | 239 | } |
242 | 240 | ||
diff --git a/arch/arm/mach-s3c64xx/irq-eint.c b/arch/arm/mach-s3c64xx/irq-eint.c index 2ead8189da74..4d203be1f4c3 100644 --- a/arch/arm/mach-s3c64xx/irq-eint.c +++ b/arch/arm/mach-s3c64xx/irq-eint.c | |||
@@ -197,16 +197,15 @@ static int __init s3c64xx_init_irq_eint(void) | |||
197 | int irq; | 197 | int irq; |
198 | 198 | ||
199 | for (irq = IRQ_EINT(0); irq <= IRQ_EINT(27); irq++) { | 199 | for (irq = IRQ_EINT(0); irq <= IRQ_EINT(27); irq++) { |
200 | set_irq_chip(irq, &s3c_irq_eint); | 200 | irq_set_chip_and_handler(irq, &s3c_irq_eint, handle_level_irq); |
201 | set_irq_chip_data(irq, (void *)eint_irq_to_bit(irq)); | 201 | irq_set_chip_data(irq, (void *)eint_irq_to_bit(irq)); |
202 | set_irq_handler(irq, handle_level_irq); | ||
203 | set_irq_flags(irq, IRQF_VALID); | 202 | set_irq_flags(irq, IRQF_VALID); |
204 | } | 203 | } |
205 | 204 | ||
206 | set_irq_chained_handler(IRQ_EINT0_3, s3c_irq_demux_eint0_3); | 205 | irq_set_chained_handler(IRQ_EINT0_3, s3c_irq_demux_eint0_3); |
207 | set_irq_chained_handler(IRQ_EINT4_11, s3c_irq_demux_eint4_11); | 206 | irq_set_chained_handler(IRQ_EINT4_11, s3c_irq_demux_eint4_11); |
208 | set_irq_chained_handler(IRQ_EINT12_19, s3c_irq_demux_eint12_19); | 207 | irq_set_chained_handler(IRQ_EINT12_19, s3c_irq_demux_eint12_19); |
209 | set_irq_chained_handler(IRQ_EINT20_27, s3c_irq_demux_eint20_27); | 208 | irq_set_chained_handler(IRQ_EINT20_27, s3c_irq_demux_eint20_27); |
210 | 209 | ||
211 | return 0; | 210 | return 0; |
212 | } | 211 | } |
diff --git a/arch/arm/mach-s5p64x0/cpu.c b/arch/arm/mach-s5p64x0/cpu.c index b8d02eb4cf30..a5c00952ea35 100644 --- a/arch/arm/mach-s5p64x0/cpu.c +++ b/arch/arm/mach-s5p64x0/cpu.c | |||
@@ -119,7 +119,7 @@ void __init s5p6450_map_io(void) | |||
119 | s3c_adc_setname("s3c64xx-adc"); | 119 | s3c_adc_setname("s3c64xx-adc"); |
120 | 120 | ||
121 | iotable_init(s5p64x0_iodesc, ARRAY_SIZE(s5p64x0_iodesc)); | 121 | iotable_init(s5p64x0_iodesc, ARRAY_SIZE(s5p64x0_iodesc)); |
122 | iotable_init(s5p6450_iodesc, ARRAY_SIZE(s5p6440_iodesc)); | 122 | iotable_init(s5p6450_iodesc, ARRAY_SIZE(s5p6450_iodesc)); |
123 | } | 123 | } |
124 | 124 | ||
125 | /* | 125 | /* |
diff --git a/arch/arm/mach-s5pv210/include/mach/irqs.h b/arch/arm/mach-s5pv210/include/mach/irqs.h index 26710b35ef87..b9f9ec33384d 100644 --- a/arch/arm/mach-s5pv210/include/mach/irqs.h +++ b/arch/arm/mach-s5pv210/include/mach/irqs.h | |||
@@ -99,9 +99,9 @@ | |||
99 | #define IRQ_TC IRQ_PENDN | 99 | #define IRQ_TC IRQ_PENDN |
100 | #define IRQ_KEYPAD S5P_IRQ_VIC2(25) | 100 | #define IRQ_KEYPAD S5P_IRQ_VIC2(25) |
101 | #define IRQ_CG S5P_IRQ_VIC2(26) | 101 | #define IRQ_CG S5P_IRQ_VIC2(26) |
102 | #define IRQ_SEC S5P_IRQ_VIC2(27) | 102 | #define IRQ_SSS_INT S5P_IRQ_VIC2(27) |
103 | #define IRQ_SECRX S5P_IRQ_VIC2(28) | 103 | #define IRQ_SSS_HASH S5P_IRQ_VIC2(28) |
104 | #define IRQ_SECTX S5P_IRQ_VIC2(29) | 104 | #define IRQ_PCM2 S5P_IRQ_VIC2(29) |
105 | #define IRQ_SDMIRQ S5P_IRQ_VIC2(30) | 105 | #define IRQ_SDMIRQ S5P_IRQ_VIC2(30) |
106 | #define IRQ_SDMFIQ S5P_IRQ_VIC2(31) | 106 | #define IRQ_SDMFIQ S5P_IRQ_VIC2(31) |
107 | 107 | ||
diff --git a/arch/arm/mach-s5pv210/mach-smdkv210.c b/arch/arm/mach-s5pv210/mach-smdkv210.c index bc08ac42e7cc..c6a9e86c2d5c 100644 --- a/arch/arm/mach-s5pv210/mach-smdkv210.c +++ b/arch/arm/mach-s5pv210/mach-smdkv210.c | |||
@@ -44,7 +44,6 @@ | |||
44 | #include <plat/keypad.h> | 44 | #include <plat/keypad.h> |
45 | #include <plat/pm.h> | 45 | #include <plat/pm.h> |
46 | #include <plat/fb.h> | 46 | #include <plat/fb.h> |
47 | #include <plat/gpio-cfg.h> | ||
48 | #include <plat/s5p-time.h> | 47 | #include <plat/s5p-time.h> |
49 | 48 | ||
50 | /* Following are default values for UCON, ULCON and UFCON UART registers */ | 49 | /* Following are default values for UCON, ULCON and UFCON UART registers */ |
diff --git a/arch/arm/mach-sa1100/cerf.c b/arch/arm/mach-sa1100/cerf.c index 98d780608c7e..7f3da4b11ec9 100644 --- a/arch/arm/mach-sa1100/cerf.c +++ b/arch/arm/mach-sa1100/cerf.c | |||
@@ -96,7 +96,7 @@ static struct resource cerf_flash_resource = { | |||
96 | static void __init cerf_init_irq(void) | 96 | static void __init cerf_init_irq(void) |
97 | { | 97 | { |
98 | sa1100_init_irq(); | 98 | sa1100_init_irq(); |
99 | set_irq_type(CERF_ETH_IRQ, IRQ_TYPE_EDGE_RISING); | 99 | irq_set_irq_type(CERF_ETH_IRQ, IRQ_TYPE_EDGE_RISING); |
100 | } | 100 | } |
101 | 101 | ||
102 | static struct map_desc cerf_io_desc[] __initdata = { | 102 | static struct map_desc cerf_io_desc[] __initdata = { |
diff --git a/arch/arm/mach-sa1100/irq.c b/arch/arm/mach-sa1100/irq.c index 3d85dfad9c1f..423ddb3d65e9 100644 --- a/arch/arm/mach-sa1100/irq.c +++ b/arch/arm/mach-sa1100/irq.c | |||
@@ -323,28 +323,28 @@ void __init sa1100_init_irq(void) | |||
323 | ICCR = 1; | 323 | ICCR = 1; |
324 | 324 | ||
325 | for (irq = 0; irq <= 10; irq++) { | 325 | for (irq = 0; irq <= 10; irq++) { |
326 | set_irq_chip(irq, &sa1100_low_gpio_chip); | 326 | irq_set_chip_and_handler(irq, &sa1100_low_gpio_chip, |
327 | set_irq_handler(irq, handle_edge_irq); | 327 | handle_edge_irq); |
328 | set_irq_flags(irq, IRQF_VALID | IRQF_PROBE); | 328 | set_irq_flags(irq, IRQF_VALID | IRQF_PROBE); |
329 | } | 329 | } |
330 | 330 | ||
331 | for (irq = 12; irq <= 31; irq++) { | 331 | for (irq = 12; irq <= 31; irq++) { |
332 | set_irq_chip(irq, &sa1100_normal_chip); | 332 | irq_set_chip_and_handler(irq, &sa1100_normal_chip, |
333 | set_irq_handler(irq, handle_level_irq); | 333 | handle_level_irq); |
334 | set_irq_flags(irq, IRQF_VALID); | 334 | set_irq_flags(irq, IRQF_VALID); |
335 | } | 335 | } |
336 | 336 | ||
337 | for (irq = 32; irq <= 48; irq++) { | 337 | for (irq = 32; irq <= 48; irq++) { |
338 | set_irq_chip(irq, &sa1100_high_gpio_chip); | 338 | irq_set_chip_and_handler(irq, &sa1100_high_gpio_chip, |
339 | set_irq_handler(irq, handle_edge_irq); | 339 | handle_edge_irq); |
340 | set_irq_flags(irq, IRQF_VALID | IRQF_PROBE); | 340 | set_irq_flags(irq, IRQF_VALID | IRQF_PROBE); |
341 | } | 341 | } |
342 | 342 | ||
343 | /* | 343 | /* |
344 | * Install handler for GPIO 11-27 edge detect interrupts | 344 | * Install handler for GPIO 11-27 edge detect interrupts |
345 | */ | 345 | */ |
346 | set_irq_chip(IRQ_GPIO11_27, &sa1100_normal_chip); | 346 | irq_set_chip(IRQ_GPIO11_27, &sa1100_normal_chip); |
347 | set_irq_chained_handler(IRQ_GPIO11_27, sa1100_high_gpio_handler); | 347 | irq_set_chained_handler(IRQ_GPIO11_27, sa1100_high_gpio_handler); |
348 | 348 | ||
349 | sa1100_init_gpio(); | 349 | sa1100_init_gpio(); |
350 | } | 350 | } |
diff --git a/arch/arm/mach-sa1100/neponset.c b/arch/arm/mach-sa1100/neponset.c index 4aad01f73660..b4fa53a1427e 100644 --- a/arch/arm/mach-sa1100/neponset.c +++ b/arch/arm/mach-sa1100/neponset.c | |||
@@ -145,8 +145,8 @@ static int __devinit neponset_probe(struct platform_device *dev) | |||
145 | /* | 145 | /* |
146 | * Install handler for GPIO25. | 146 | * Install handler for GPIO25. |
147 | */ | 147 | */ |
148 | set_irq_type(IRQ_GPIO25, IRQ_TYPE_EDGE_RISING); | 148 | irq_set_irq_type(IRQ_GPIO25, IRQ_TYPE_EDGE_RISING); |
149 | set_irq_chained_handler(IRQ_GPIO25, neponset_irq_handler); | 149 | irq_set_chained_handler(IRQ_GPIO25, neponset_irq_handler); |
150 | 150 | ||
151 | /* | 151 | /* |
152 | * We would set IRQ_GPIO25 to be a wake-up IRQ, but | 152 | * We would set IRQ_GPIO25 to be a wake-up IRQ, but |
@@ -161,9 +161,9 @@ static int __devinit neponset_probe(struct platform_device *dev) | |||
161 | * Setup other Neponset IRQs. SA1111 will be done by the | 161 | * Setup other Neponset IRQs. SA1111 will be done by the |
162 | * generic SA1111 code. | 162 | * generic SA1111 code. |
163 | */ | 163 | */ |
164 | set_irq_handler(IRQ_NEPONSET_SMC9196, handle_simple_irq); | 164 | irq_set_handler(IRQ_NEPONSET_SMC9196, handle_simple_irq); |
165 | set_irq_flags(IRQ_NEPONSET_SMC9196, IRQF_VALID | IRQF_PROBE); | 165 | set_irq_flags(IRQ_NEPONSET_SMC9196, IRQF_VALID | IRQF_PROBE); |
166 | set_irq_handler(IRQ_NEPONSET_USAR, handle_simple_irq); | 166 | irq_set_handler(IRQ_NEPONSET_USAR, handle_simple_irq); |
167 | set_irq_flags(IRQ_NEPONSET_USAR, IRQF_VALID | IRQF_PROBE); | 167 | set_irq_flags(IRQ_NEPONSET_USAR, IRQF_VALID | IRQF_PROBE); |
168 | 168 | ||
169 | /* | 169 | /* |
diff --git a/arch/arm/mach-sa1100/pleb.c b/arch/arm/mach-sa1100/pleb.c index 42b80400c100..65161f2bea29 100644 --- a/arch/arm/mach-sa1100/pleb.c +++ b/arch/arm/mach-sa1100/pleb.c | |||
@@ -142,7 +142,7 @@ static void __init pleb_map_io(void) | |||
142 | 142 | ||
143 | GPDR &= ~GPIO_ETH0_IRQ; | 143 | GPDR &= ~GPIO_ETH0_IRQ; |
144 | 144 | ||
145 | set_irq_type(GPIO_ETH0_IRQ, IRQ_TYPE_EDGE_FALLING); | 145 | irq_set_irq_type(GPIO_ETH0_IRQ, IRQ_TYPE_EDGE_FALLING); |
146 | } | 146 | } |
147 | 147 | ||
148 | MACHINE_START(PLEB, "PLEB") | 148 | MACHINE_START(PLEB, "PLEB") |
diff --git a/arch/arm/mach-shark/irq.c b/arch/arm/mach-shark/irq.c index 831fc66dfa4d..5dce13e429f3 100644 --- a/arch/arm/mach-shark/irq.c +++ b/arch/arm/mach-shark/irq.c | |||
@@ -80,8 +80,7 @@ void __init shark_init_irq(void) | |||
80 | int irq; | 80 | int irq; |
81 | 81 | ||
82 | for (irq = 0; irq < NR_IRQS; irq++) { | 82 | for (irq = 0; irq < NR_IRQS; irq++) { |
83 | set_irq_chip(irq, &fb_chip); | 83 | irq_set_chip_and_handler(irq, &fb_chip, handle_edge_irq); |
84 | set_irq_handler(irq, handle_edge_irq); | ||
85 | set_irq_flags(irq, IRQF_VALID | IRQF_PROBE); | 84 | set_irq_flags(irq, IRQF_VALID | IRQF_PROBE); |
86 | } | 85 | } |
87 | 86 | ||
diff --git a/arch/arm/mach-shmobile/board-ap4evb.c b/arch/arm/mach-shmobile/board-ap4evb.c index a94f29da5d30..783b66fa95fb 100644 --- a/arch/arm/mach-shmobile/board-ap4evb.c +++ b/arch/arm/mach-shmobile/board-ap4evb.c | |||
@@ -24,9 +24,9 @@ | |||
24 | #include <linux/irq.h> | 24 | #include <linux/irq.h> |
25 | #include <linux/platform_device.h> | 25 | #include <linux/platform_device.h> |
26 | #include <linux/delay.h> | 26 | #include <linux/delay.h> |
27 | #include <linux/mfd/sh_mobile_sdhi.h> | ||
28 | #include <linux/mfd/tmio.h> | 27 | #include <linux/mfd/tmio.h> |
29 | #include <linux/mmc/host.h> | 28 | #include <linux/mmc/host.h> |
29 | #include <linux/mmc/sh_mobile_sdhi.h> | ||
30 | #include <linux/mtd/mtd.h> | 30 | #include <linux/mtd/mtd.h> |
31 | #include <linux/mtd/partitions.h> | 31 | #include <linux/mtd/partitions.h> |
32 | #include <linux/mtd/physmap.h> | 32 | #include <linux/mtd/physmap.h> |
@@ -312,7 +312,7 @@ static struct resource sdhi0_resources[] = { | |||
312 | [0] = { | 312 | [0] = { |
313 | .name = "SDHI0", | 313 | .name = "SDHI0", |
314 | .start = 0xe6850000, | 314 | .start = 0xe6850000, |
315 | .end = 0xe68501ff, | 315 | .end = 0xe68500ff, |
316 | .flags = IORESOURCE_MEM, | 316 | .flags = IORESOURCE_MEM, |
317 | }, | 317 | }, |
318 | [1] = { | 318 | [1] = { |
@@ -345,7 +345,7 @@ static struct resource sdhi1_resources[] = { | |||
345 | [0] = { | 345 | [0] = { |
346 | .name = "SDHI1", | 346 | .name = "SDHI1", |
347 | .start = 0xe6860000, | 347 | .start = 0xe6860000, |
348 | .end = 0xe68601ff, | 348 | .end = 0xe68600ff, |
349 | .flags = IORESOURCE_MEM, | 349 | .flags = IORESOURCE_MEM, |
350 | }, | 350 | }, |
351 | [1] = { | 351 | [1] = { |
@@ -1255,7 +1255,7 @@ static void __init ap4evb_init(void) | |||
1255 | gpio_request(GPIO_FN_KEYIN4, NULL); | 1255 | gpio_request(GPIO_FN_KEYIN4, NULL); |
1256 | 1256 | ||
1257 | /* enable TouchScreen */ | 1257 | /* enable TouchScreen */ |
1258 | set_irq_type(IRQ28, IRQ_TYPE_LEVEL_LOW); | 1258 | irq_set_irq_type(IRQ28, IRQ_TYPE_LEVEL_LOW); |
1259 | 1259 | ||
1260 | tsc_device.irq = IRQ28; | 1260 | tsc_device.irq = IRQ28; |
1261 | i2c_register_board_info(1, &tsc_device, 1); | 1261 | i2c_register_board_info(1, &tsc_device, 1); |
@@ -1311,7 +1311,7 @@ static void __init ap4evb_init(void) | |||
1311 | lcdc_info.ch[0].lcd_size_cfg.height = 91; | 1311 | lcdc_info.ch[0].lcd_size_cfg.height = 91; |
1312 | 1312 | ||
1313 | /* enable TouchScreen */ | 1313 | /* enable TouchScreen */ |
1314 | set_irq_type(IRQ7, IRQ_TYPE_LEVEL_LOW); | 1314 | irq_set_irq_type(IRQ7, IRQ_TYPE_LEVEL_LOW); |
1315 | 1315 | ||
1316 | tsc_device.irq = IRQ7; | 1316 | tsc_device.irq = IRQ7; |
1317 | i2c_register_board_info(0, &tsc_device, 1); | 1317 | i2c_register_board_info(0, &tsc_device, 1); |
diff --git a/arch/arm/mach-shmobile/board-g4evm.c b/arch/arm/mach-shmobile/board-g4evm.c index dee3e9231fb9..c87a7b7c5832 100644 --- a/arch/arm/mach-shmobile/board-g4evm.c +++ b/arch/arm/mach-shmobile/board-g4evm.c | |||
@@ -31,7 +31,7 @@ | |||
31 | #include <linux/input.h> | 31 | #include <linux/input.h> |
32 | #include <linux/input/sh_keysc.h> | 32 | #include <linux/input/sh_keysc.h> |
33 | #include <linux/mmc/host.h> | 33 | #include <linux/mmc/host.h> |
34 | #include <linux/mfd/sh_mobile_sdhi.h> | 34 | #include <linux/mmc/sh_mobile_sdhi.h> |
35 | #include <linux/gpio.h> | 35 | #include <linux/gpio.h> |
36 | #include <mach/sh7377.h> | 36 | #include <mach/sh7377.h> |
37 | #include <mach/common.h> | 37 | #include <mach/common.h> |
@@ -205,7 +205,7 @@ static struct resource sdhi0_resources[] = { | |||
205 | [0] = { | 205 | [0] = { |
206 | .name = "SDHI0", | 206 | .name = "SDHI0", |
207 | .start = 0xe6d50000, | 207 | .start = 0xe6d50000, |
208 | .end = 0xe6d501ff, | 208 | .end = 0xe6d50nff, |
209 | .flags = IORESOURCE_MEM, | 209 | .flags = IORESOURCE_MEM, |
210 | }, | 210 | }, |
211 | [1] = { | 211 | [1] = { |
@@ -232,7 +232,7 @@ static struct resource sdhi1_resources[] = { | |||
232 | [0] = { | 232 | [0] = { |
233 | .name = "SDHI1", | 233 | .name = "SDHI1", |
234 | .start = 0xe6d60000, | 234 | .start = 0xe6d60000, |
235 | .end = 0xe6d601ff, | 235 | .end = 0xe6d600ff, |
236 | .flags = IORESOURCE_MEM, | 236 | .flags = IORESOURCE_MEM, |
237 | }, | 237 | }, |
238 | [1] = { | 238 | [1] = { |
diff --git a/arch/arm/mach-shmobile/board-mackerel.c b/arch/arm/mach-shmobile/board-mackerel.c index 49bc07482179..8184d4d4f234 100644 --- a/arch/arm/mach-shmobile/board-mackerel.c +++ b/arch/arm/mach-shmobile/board-mackerel.c | |||
@@ -32,10 +32,10 @@ | |||
32 | #include <linux/io.h> | 32 | #include <linux/io.h> |
33 | #include <linux/i2c.h> | 33 | #include <linux/i2c.h> |
34 | #include <linux/leds.h> | 34 | #include <linux/leds.h> |
35 | #include <linux/mfd/sh_mobile_sdhi.h> | ||
36 | #include <linux/mfd/tmio.h> | 35 | #include <linux/mfd/tmio.h> |
37 | #include <linux/mmc/host.h> | 36 | #include <linux/mmc/host.h> |
38 | #include <linux/mmc/sh_mmcif.h> | 37 | #include <linux/mmc/sh_mmcif.h> |
38 | #include <linux/mmc/sh_mobile_sdhi.h> | ||
39 | #include <linux/mtd/mtd.h> | 39 | #include <linux/mtd/mtd.h> |
40 | #include <linux/mtd/partitions.h> | 40 | #include <linux/mtd/partitions.h> |
41 | #include <linux/mtd/physmap.h> | 41 | #include <linux/mtd/physmap.h> |
@@ -690,7 +690,7 @@ static struct resource sdhi0_resources[] = { | |||
690 | [0] = { | 690 | [0] = { |
691 | .name = "SDHI0", | 691 | .name = "SDHI0", |
692 | .start = 0xe6850000, | 692 | .start = 0xe6850000, |
693 | .end = 0xe68501ff, | 693 | .end = 0xe68500ff, |
694 | .flags = IORESOURCE_MEM, | 694 | .flags = IORESOURCE_MEM, |
695 | }, | 695 | }, |
696 | [1] = { | 696 | [1] = { |
@@ -725,7 +725,7 @@ static struct resource sdhi1_resources[] = { | |||
725 | [0] = { | 725 | [0] = { |
726 | .name = "SDHI1", | 726 | .name = "SDHI1", |
727 | .start = 0xe6860000, | 727 | .start = 0xe6860000, |
728 | .end = 0xe68601ff, | 728 | .end = 0xe68600ff, |
729 | .flags = IORESOURCE_MEM, | 729 | .flags = IORESOURCE_MEM, |
730 | }, | 730 | }, |
731 | [1] = { | 731 | [1] = { |
@@ -768,7 +768,7 @@ static struct resource sdhi2_resources[] = { | |||
768 | [0] = { | 768 | [0] = { |
769 | .name = "SDHI2", | 769 | .name = "SDHI2", |
770 | .start = 0xe6870000, | 770 | .start = 0xe6870000, |
771 | .end = 0xe68701ff, | 771 | .end = 0xe68700ff, |
772 | .flags = IORESOURCE_MEM, | 772 | .flags = IORESOURCE_MEM, |
773 | }, | 773 | }, |
774 | [1] = { | 774 | [1] = { |
@@ -1124,15 +1124,15 @@ static void __init mackerel_init(void) | |||
1124 | 1124 | ||
1125 | /* enable Keypad */ | 1125 | /* enable Keypad */ |
1126 | gpio_request(GPIO_FN_IRQ9_42, NULL); | 1126 | gpio_request(GPIO_FN_IRQ9_42, NULL); |
1127 | set_irq_type(IRQ9, IRQ_TYPE_LEVEL_HIGH); | 1127 | irq_set_irq_type(IRQ9, IRQ_TYPE_LEVEL_HIGH); |
1128 | 1128 | ||
1129 | /* enable Touchscreen */ | 1129 | /* enable Touchscreen */ |
1130 | gpio_request(GPIO_FN_IRQ7_40, NULL); | 1130 | gpio_request(GPIO_FN_IRQ7_40, NULL); |
1131 | set_irq_type(IRQ7, IRQ_TYPE_LEVEL_LOW); | 1131 | irq_set_irq_type(IRQ7, IRQ_TYPE_LEVEL_LOW); |
1132 | 1132 | ||
1133 | /* enable Accelerometer */ | 1133 | /* enable Accelerometer */ |
1134 | gpio_request(GPIO_FN_IRQ21, NULL); | 1134 | gpio_request(GPIO_FN_IRQ21, NULL); |
1135 | set_irq_type(IRQ21, IRQ_TYPE_LEVEL_HIGH); | 1135 | irq_set_irq_type(IRQ21, IRQ_TYPE_LEVEL_HIGH); |
1136 | 1136 | ||
1137 | /* enable SDHI0 */ | 1137 | /* enable SDHI0 */ |
1138 | gpio_request(GPIO_FN_SDHICD0, NULL); | 1138 | gpio_request(GPIO_FN_SDHICD0, NULL); |
diff --git a/arch/arm/mach-shmobile/intc-sh7367.c b/arch/arm/mach-shmobile/intc-sh7367.c index 2fe9704d5ea1..cc442d198cdc 100644 --- a/arch/arm/mach-shmobile/intc-sh7367.c +++ b/arch/arm/mach-shmobile/intc-sh7367.c | |||
@@ -421,7 +421,7 @@ static struct intc_desc intcs_desc __initdata = { | |||
421 | 421 | ||
422 | static void intcs_demux(unsigned int irq, struct irq_desc *desc) | 422 | static void intcs_demux(unsigned int irq, struct irq_desc *desc) |
423 | { | 423 | { |
424 | void __iomem *reg = (void *)get_irq_data(irq); | 424 | void __iomem *reg = (void *)irq_get_handler_data(irq); |
425 | unsigned int evtcodeas = ioread32(reg); | 425 | unsigned int evtcodeas = ioread32(reg); |
426 | 426 | ||
427 | generic_handle_irq(intcs_evt2irq(evtcodeas)); | 427 | generic_handle_irq(intcs_evt2irq(evtcodeas)); |
@@ -435,6 +435,6 @@ void __init sh7367_init_irq(void) | |||
435 | register_intc_controller(&intcs_desc); | 435 | register_intc_controller(&intcs_desc); |
436 | 436 | ||
437 | /* demux using INTEVTSA */ | 437 | /* demux using INTEVTSA */ |
438 | set_irq_data(evt2irq(0xf80), (void *)intevtsa); | 438 | irq_set_handler_data(evt2irq(0xf80), (void *)intevtsa); |
439 | set_irq_chained_handler(evt2irq(0xf80), intcs_demux); | 439 | irq_set_chained_handler(evt2irq(0xf80), intcs_demux); |
440 | } | 440 | } |
diff --git a/arch/arm/mach-shmobile/intc-sh7372.c b/arch/arm/mach-shmobile/intc-sh7372.c index ca5f9d17b39a..7a4960f9c1e3 100644 --- a/arch/arm/mach-shmobile/intc-sh7372.c +++ b/arch/arm/mach-shmobile/intc-sh7372.c | |||
@@ -601,7 +601,7 @@ static struct intc_desc intcs_desc __initdata = { | |||
601 | 601 | ||
602 | static void intcs_demux(unsigned int irq, struct irq_desc *desc) | 602 | static void intcs_demux(unsigned int irq, struct irq_desc *desc) |
603 | { | 603 | { |
604 | void __iomem *reg = (void *)get_irq_data(irq); | 604 | void __iomem *reg = (void *)irq_get_handler_data(irq); |
605 | unsigned int evtcodeas = ioread32(reg); | 605 | unsigned int evtcodeas = ioread32(reg); |
606 | 606 | ||
607 | generic_handle_irq(intcs_evt2irq(evtcodeas)); | 607 | generic_handle_irq(intcs_evt2irq(evtcodeas)); |
@@ -615,6 +615,6 @@ void __init sh7372_init_irq(void) | |||
615 | register_intc_controller(&intcs_desc); | 615 | register_intc_controller(&intcs_desc); |
616 | 616 | ||
617 | /* demux using INTEVTSA */ | 617 | /* demux using INTEVTSA */ |
618 | set_irq_data(evt2irq(0xf80), (void *)intevtsa); | 618 | irq_set_handler_data(evt2irq(0xf80), (void *)intevtsa); |
619 | set_irq_chained_handler(evt2irq(0xf80), intcs_demux); | 619 | irq_set_chained_handler(evt2irq(0xf80), intcs_demux); |
620 | } | 620 | } |
diff --git a/arch/arm/mach-shmobile/intc-sh7377.c b/arch/arm/mach-shmobile/intc-sh7377.c index dd568382cc9f..fe45154ce660 100644 --- a/arch/arm/mach-shmobile/intc-sh7377.c +++ b/arch/arm/mach-shmobile/intc-sh7377.c | |||
@@ -626,7 +626,7 @@ static struct intc_desc intcs_desc __initdata = { | |||
626 | 626 | ||
627 | static void intcs_demux(unsigned int irq, struct irq_desc *desc) | 627 | static void intcs_demux(unsigned int irq, struct irq_desc *desc) |
628 | { | 628 | { |
629 | void __iomem *reg = (void *)get_irq_data(irq); | 629 | void __iomem *reg = (void *)irq_get_handler_data(irq); |
630 | unsigned int evtcodeas = ioread32(reg); | 630 | unsigned int evtcodeas = ioread32(reg); |
631 | 631 | ||
632 | generic_handle_irq(intcs_evt2irq(evtcodeas)); | 632 | generic_handle_irq(intcs_evt2irq(evtcodeas)); |
@@ -641,6 +641,6 @@ void __init sh7377_init_irq(void) | |||
641 | register_intc_controller(&intcs_desc); | 641 | register_intc_controller(&intcs_desc); |
642 | 642 | ||
643 | /* demux using INTEVTSA */ | 643 | /* demux using INTEVTSA */ |
644 | set_irq_data(evt2irq(INTCS_INTVECT), (void *)intevtsa); | 644 | irq_set_handler_data(evt2irq(INTCS_INTVECT), (void *)intevtsa); |
645 | set_irq_chained_handler(evt2irq(INTCS_INTVECT), intcs_demux); | 645 | irq_set_chained_handler(evt2irq(INTCS_INTVECT), intcs_demux); |
646 | } | 646 | } |
diff --git a/arch/arm/mach-tcc8k/irq.c b/arch/arm/mach-tcc8k/irq.c index aa9231f4fc6e..209fa5c65d4c 100644 --- a/arch/arm/mach-tcc8k/irq.c +++ b/arch/arm/mach-tcc8k/irq.c | |||
@@ -102,10 +102,10 @@ void __init tcc8k_init_irq(void) | |||
102 | 102 | ||
103 | for (irqno = 0; irqno < NR_IRQS; irqno++) { | 103 | for (irqno = 0; irqno < NR_IRQS; irqno++) { |
104 | if (irqno < 32) | 104 | if (irqno < 32) |
105 | set_irq_chip(irqno, &tcc8000_irq_chip0); | 105 | irq_set_chip(irqno, &tcc8000_irq_chip0); |
106 | else | 106 | else |
107 | set_irq_chip(irqno, &tcc8000_irq_chip1); | 107 | irq_set_chip(irqno, &tcc8000_irq_chip1); |
108 | set_irq_handler(irqno, handle_level_irq); | 108 | irq_set_handler(irqno, handle_level_irq); |
109 | set_irq_flags(irqno, IRQF_VALID); | 109 | set_irq_flags(irqno, IRQF_VALID); |
110 | } | 110 | } |
111 | } | 111 | } |
diff --git a/arch/arm/mach-tegra/gpio.c b/arch/arm/mach-tegra/gpio.c index 12090a2cf3e0..76a3f654220f 100644 --- a/arch/arm/mach-tegra/gpio.c +++ b/arch/arm/mach-tegra/gpio.c | |||
@@ -208,9 +208,9 @@ static int tegra_gpio_irq_set_type(struct irq_data *d, unsigned int type) | |||
208 | spin_unlock_irqrestore(&bank->lvl_lock[port], flags); | 208 | spin_unlock_irqrestore(&bank->lvl_lock[port], flags); |
209 | 209 | ||
210 | if (type & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_LEVEL_HIGH)) | 210 | if (type & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_LEVEL_HIGH)) |
211 | __set_irq_handler_unlocked(d->irq, handle_level_irq); | 211 | __irq_set_handler_locked(d->irq, handle_level_irq); |
212 | else if (type & (IRQ_TYPE_EDGE_FALLING | IRQ_TYPE_EDGE_RISING)) | 212 | else if (type & (IRQ_TYPE_EDGE_FALLING | IRQ_TYPE_EDGE_RISING)) |
213 | __set_irq_handler_unlocked(d->irq, handle_edge_irq); | 213 | __irq_set_handler_locked(d->irq, handle_edge_irq); |
214 | 214 | ||
215 | return 0; | 215 | return 0; |
216 | } | 216 | } |
@@ -224,7 +224,7 @@ static void tegra_gpio_irq_handler(unsigned int irq, struct irq_desc *desc) | |||
224 | 224 | ||
225 | desc->irq_data.chip->irq_ack(&desc->irq_data); | 225 | desc->irq_data.chip->irq_ack(&desc->irq_data); |
226 | 226 | ||
227 | bank = get_irq_data(irq); | 227 | bank = irq_get_handler_data(irq); |
228 | 228 | ||
229 | for (port = 0; port < 4; port++) { | 229 | for (port = 0; port < 4; port++) { |
230 | int gpio = tegra_gpio_compose(bank->bank, port, 0); | 230 | int gpio = tegra_gpio_compose(bank->bank, port, 0); |
@@ -275,13 +275,6 @@ void tegra_gpio_resume(void) | |||
275 | } | 275 | } |
276 | 276 | ||
277 | local_irq_restore(flags); | 277 | local_irq_restore(flags); |
278 | |||
279 | for (i = INT_GPIO_BASE; i < (INT_GPIO_BASE + TEGRA_NR_GPIOS); i++) { | ||
280 | struct irq_desc *desc = irq_to_desc(i); | ||
281 | if (!desc || (desc->status & IRQ_WAKEUP)) | ||
282 | continue; | ||
283 | enable_irq(i); | ||
284 | } | ||
285 | } | 278 | } |
286 | 279 | ||
287 | void tegra_gpio_suspend(void) | 280 | void tegra_gpio_suspend(void) |
@@ -289,18 +282,6 @@ void tegra_gpio_suspend(void) | |||
289 | unsigned long flags; | 282 | unsigned long flags; |
290 | int b, p, i; | 283 | int b, p, i; |
291 | 284 | ||
292 | for (i = INT_GPIO_BASE; i < (INT_GPIO_BASE + TEGRA_NR_GPIOS); i++) { | ||
293 | struct irq_desc *desc = irq_to_desc(i); | ||
294 | if (!desc) | ||
295 | continue; | ||
296 | if (desc->status & IRQ_WAKEUP) { | ||
297 | int gpio = i - INT_GPIO_BASE; | ||
298 | pr_debug("gpio %d.%d is wakeup\n", gpio/8, gpio&7); | ||
299 | continue; | ||
300 | } | ||
301 | disable_irq(i); | ||
302 | } | ||
303 | |||
304 | local_irq_save(flags); | 285 | local_irq_save(flags); |
305 | for (b = 0; b < ARRAY_SIZE(tegra_gpio_banks); b++) { | 286 | for (b = 0; b < ARRAY_SIZE(tegra_gpio_banks); b++) { |
306 | struct tegra_gpio_bank *bank = &tegra_gpio_banks[b]; | 287 | struct tegra_gpio_bank *bank = &tegra_gpio_banks[b]; |
@@ -320,7 +301,7 @@ void tegra_gpio_suspend(void) | |||
320 | static int tegra_gpio_wake_enable(struct irq_data *d, unsigned int enable) | 301 | static int tegra_gpio_wake_enable(struct irq_data *d, unsigned int enable) |
321 | { | 302 | { |
322 | struct tegra_gpio_bank *bank = irq_data_get_irq_chip_data(d); | 303 | struct tegra_gpio_bank *bank = irq_data_get_irq_chip_data(d); |
323 | return set_irq_wake(bank->irq, enable); | 304 | return irq_set_irq_wake(bank->irq, enable); |
324 | } | 305 | } |
325 | #endif | 306 | #endif |
326 | 307 | ||
@@ -359,18 +340,18 @@ static int __init tegra_gpio_init(void) | |||
359 | for (i = INT_GPIO_BASE; i < (INT_GPIO_BASE + TEGRA_NR_GPIOS); i++) { | 340 | for (i = INT_GPIO_BASE; i < (INT_GPIO_BASE + TEGRA_NR_GPIOS); i++) { |
360 | bank = &tegra_gpio_banks[GPIO_BANK(irq_to_gpio(i))]; | 341 | bank = &tegra_gpio_banks[GPIO_BANK(irq_to_gpio(i))]; |
361 | 342 | ||
362 | lockdep_set_class(&irq_desc[i].lock, &gpio_lock_class); | 343 | irq_set_lockdep_class(i, &gpio_lock_class); |
363 | set_irq_chip_data(i, bank); | 344 | irq_set_chip_data(i, bank); |
364 | set_irq_chip(i, &tegra_gpio_irq_chip); | 345 | irq_set_chip_and_handler(i, &tegra_gpio_irq_chip, |
365 | set_irq_handler(i, handle_simple_irq); | 346 | handle_simple_irq); |
366 | set_irq_flags(i, IRQF_VALID); | 347 | set_irq_flags(i, IRQF_VALID); |
367 | } | 348 | } |
368 | 349 | ||
369 | for (i = 0; i < ARRAY_SIZE(tegra_gpio_banks); i++) { | 350 | for (i = 0; i < ARRAY_SIZE(tegra_gpio_banks); i++) { |
370 | bank = &tegra_gpio_banks[i]; | 351 | bank = &tegra_gpio_banks[i]; |
371 | 352 | ||
372 | set_irq_chained_handler(bank->irq, tegra_gpio_irq_handler); | 353 | irq_set_chained_handler(bank->irq, tegra_gpio_irq_handler); |
373 | set_irq_data(bank->irq, bank); | 354 | irq_set_handler_data(bank->irq, bank); |
374 | 355 | ||
375 | for (j = 0; j < 4; j++) | 356 | for (j = 0; j < 4; j++) |
376 | spin_lock_init(&bank->lvl_lock[j]); | 357 | spin_lock_init(&bank->lvl_lock[j]); |
diff --git a/arch/arm/mach-tegra/irq.c b/arch/arm/mach-tegra/irq.c index dfbc219ea492..4330d8995b27 100644 --- a/arch/arm/mach-tegra/irq.c +++ b/arch/arm/mach-tegra/irq.c | |||
@@ -144,7 +144,7 @@ void __init tegra_init_irq(void) | |||
144 | gic_init(0, 29, IO_ADDRESS(TEGRA_ARM_INT_DIST_BASE), | 144 | gic_init(0, 29, IO_ADDRESS(TEGRA_ARM_INT_DIST_BASE), |
145 | IO_ADDRESS(TEGRA_ARM_PERIF_BASE + 0x100)); | 145 | IO_ADDRESS(TEGRA_ARM_PERIF_BASE + 0x100)); |
146 | 146 | ||
147 | gic = get_irq_chip(29); | 147 | gic = irq_get_chip(29); |
148 | tegra_gic_unmask_irq = gic->irq_unmask; | 148 | tegra_gic_unmask_irq = gic->irq_unmask; |
149 | tegra_gic_mask_irq = gic->irq_mask; | 149 | tegra_gic_mask_irq = gic->irq_mask; |
150 | tegra_gic_ack_irq = gic->irq_ack; | 150 | tegra_gic_ack_irq = gic->irq_ack; |
@@ -154,8 +154,7 @@ void __init tegra_init_irq(void) | |||
154 | 154 | ||
155 | for (i = 0; i < INT_MAIN_NR; i++) { | 155 | for (i = 0; i < INT_MAIN_NR; i++) { |
156 | irq = INT_PRI_BASE + i; | 156 | irq = INT_PRI_BASE + i; |
157 | set_irq_chip(irq, &tegra_irq); | 157 | irq_set_chip_and_handler(irq, &tegra_irq, handle_level_irq); |
158 | set_irq_handler(irq, handle_level_irq); | ||
159 | set_irq_flags(irq, IRQF_VALID); | 158 | set_irq_flags(irq, IRQF_VALID); |
160 | } | 159 | } |
161 | } | 160 | } |
diff --git a/arch/arm/mach-ux500/Kconfig b/arch/arm/mach-ux500/Kconfig index 203b986280f5..58626013aa32 100644 --- a/arch/arm/mach-ux500/Kconfig +++ b/arch/arm/mach-ux500/Kconfig | |||
@@ -23,6 +23,7 @@ menu "Ux500 target platform" | |||
23 | config MACH_U8500 | 23 | config MACH_U8500 |
24 | bool "U8500 Development platform" | 24 | bool "U8500 Development platform" |
25 | depends on UX500_SOC_DB8500 | 25 | depends on UX500_SOC_DB8500 |
26 | select TPS6105X | ||
26 | help | 27 | help |
27 | Include support for the mop500 development platform. | 28 | Include support for the mop500 development platform. |
28 | 29 | ||
diff --git a/arch/arm/mach-ux500/board-mop500-regulators.c b/arch/arm/mach-ux500/board-mop500-regulators.c index 875c91b2f8a4..9ed0f90cfe23 100644 --- a/arch/arm/mach-ux500/board-mop500-regulators.c +++ b/arch/arm/mach-ux500/board-mop500-regulators.c | |||
@@ -13,6 +13,30 @@ | |||
13 | #include <linux/regulator/ab8500.h> | 13 | #include <linux/regulator/ab8500.h> |
14 | #include "board-mop500-regulators.h" | 14 | #include "board-mop500-regulators.h" |
15 | 15 | ||
16 | /* | ||
17 | * TPS61052 regulator | ||
18 | */ | ||
19 | static struct regulator_consumer_supply tps61052_vaudio_consumers[] = { | ||
20 | /* | ||
21 | * Boost converter supply to raise voltage on audio speaker, this | ||
22 | * is actually connected to three pins, VInVhfL (left amplifier) | ||
23 | * VInVhfR (right amplifier) and VIntDClassInt - all three must | ||
24 | * be connected to the same voltage. | ||
25 | */ | ||
26 | REGULATOR_SUPPLY("vintdclassint", "ab8500-codec.0"), | ||
27 | }; | ||
28 | |||
29 | struct regulator_init_data tps61052_regulator = { | ||
30 | .constraints = { | ||
31 | .name = "vaudio-hf", | ||
32 | .min_uV = 4500000, | ||
33 | .max_uV = 4500000, | ||
34 | .valid_ops_mask = REGULATOR_CHANGE_STATUS, | ||
35 | }, | ||
36 | .num_consumer_supplies = ARRAY_SIZE(tps61052_vaudio_consumers), | ||
37 | .consumer_supplies = tps61052_vaudio_consumers, | ||
38 | }; | ||
39 | |||
16 | static struct regulator_consumer_supply ab8500_vaux1_consumers[] = { | 40 | static struct regulator_consumer_supply ab8500_vaux1_consumers[] = { |
17 | /* External displays, connector on board 2v5 power supply */ | 41 | /* External displays, connector on board 2v5 power supply */ |
18 | REGULATOR_SUPPLY("vaux12v5", "mcde.0"), | 42 | REGULATOR_SUPPLY("vaux12v5", "mcde.0"), |
@@ -62,6 +86,182 @@ static struct regulator_consumer_supply ab8500_vana_consumers[] = { | |||
62 | REGULATOR_SUPPLY("vsmps2", "mcde.0"), | 86 | REGULATOR_SUPPLY("vsmps2", "mcde.0"), |
63 | }; | 87 | }; |
64 | 88 | ||
89 | /* ab8500 regulator register initialization */ | ||
90 | struct ab8500_regulator_reg_init | ||
91 | ab8500_regulator_reg_init[AB8500_NUM_REGULATOR_REGISTERS] = { | ||
92 | /* | ||
93 | * VanaRequestCtrl = HP/LP depending on VxRequest | ||
94 | * VextSupply1RequestCtrl = HP/LP depending on VxRequest | ||
95 | */ | ||
96 | INIT_REGULATOR_REGISTER(AB8500_REGUREQUESTCTRL2, 0x00), | ||
97 | /* | ||
98 | * VextSupply2RequestCtrl = HP/LP depending on VxRequest | ||
99 | * VextSupply3RequestCtrl = HP/LP depending on VxRequest | ||
100 | * Vaux1RequestCtrl = HP/LP depending on VxRequest | ||
101 | * Vaux2RequestCtrl = HP/LP depending on VxRequest | ||
102 | */ | ||
103 | INIT_REGULATOR_REGISTER(AB8500_REGUREQUESTCTRL3, 0x00), | ||
104 | /* | ||
105 | * Vaux3RequestCtrl = HP/LP depending on VxRequest | ||
106 | * SwHPReq = Control through SWValid disabled | ||
107 | */ | ||
108 | INIT_REGULATOR_REGISTER(AB8500_REGUREQUESTCTRL4, 0x00), | ||
109 | /* | ||
110 | * VanaSysClkReq1HPValid = disabled | ||
111 | * Vaux1SysClkReq1HPValid = disabled | ||
112 | * Vaux2SysClkReq1HPValid = disabled | ||
113 | * Vaux3SysClkReq1HPValid = disabled | ||
114 | */ | ||
115 | INIT_REGULATOR_REGISTER(AB8500_REGUSYSCLKREQ1HPVALID1, 0x00), | ||
116 | /* | ||
117 | * VextSupply1SysClkReq1HPValid = disabled | ||
118 | * VextSupply2SysClkReq1HPValid = disabled | ||
119 | * VextSupply3SysClkReq1HPValid = SysClkReq1 controlled | ||
120 | */ | ||
121 | INIT_REGULATOR_REGISTER(AB8500_REGUSYSCLKREQ1HPVALID2, 0x40), | ||
122 | /* | ||
123 | * VanaHwHPReq1Valid = disabled | ||
124 | * Vaux1HwHPreq1Valid = disabled | ||
125 | * Vaux2HwHPReq1Valid = disabled | ||
126 | * Vaux3HwHPReqValid = disabled | ||
127 | */ | ||
128 | INIT_REGULATOR_REGISTER(AB8500_REGUHWHPREQ1VALID1, 0x00), | ||
129 | /* | ||
130 | * VextSupply1HwHPReq1Valid = disabled | ||
131 | * VextSupply2HwHPReq1Valid = disabled | ||
132 | * VextSupply3HwHPReq1Valid = disabled | ||
133 | */ | ||
134 | INIT_REGULATOR_REGISTER(AB8500_REGUHWHPREQ1VALID2, 0x00), | ||
135 | /* | ||
136 | * VanaHwHPReq2Valid = disabled | ||
137 | * Vaux1HwHPReq2Valid = disabled | ||
138 | * Vaux2HwHPReq2Valid = disabled | ||
139 | * Vaux3HwHPReq2Valid = disabled | ||
140 | */ | ||
141 | INIT_REGULATOR_REGISTER(AB8500_REGUHWHPREQ2VALID1, 0x00), | ||
142 | /* | ||
143 | * VextSupply1HwHPReq2Valid = disabled | ||
144 | * VextSupply2HwHPReq2Valid = disabled | ||
145 | * VextSupply3HwHPReq2Valid = HWReq2 controlled | ||
146 | */ | ||
147 | INIT_REGULATOR_REGISTER(AB8500_REGUHWHPREQ2VALID2, 0x04), | ||
148 | /* | ||
149 | * VanaSwHPReqValid = disabled | ||
150 | * Vaux1SwHPReqValid = disabled | ||
151 | */ | ||
152 | INIT_REGULATOR_REGISTER(AB8500_REGUSWHPREQVALID1, 0x00), | ||
153 | /* | ||
154 | * Vaux2SwHPReqValid = disabled | ||
155 | * Vaux3SwHPReqValid = disabled | ||
156 | * VextSupply1SwHPReqValid = disabled | ||
157 | * VextSupply2SwHPReqValid = disabled | ||
158 | * VextSupply3SwHPReqValid = disabled | ||
159 | */ | ||
160 | INIT_REGULATOR_REGISTER(AB8500_REGUSWHPREQVALID2, 0x00), | ||
161 | /* | ||
162 | * SysClkReq2Valid1 = SysClkReq2 controlled | ||
163 | * SysClkReq3Valid1 = disabled | ||
164 | * SysClkReq4Valid1 = SysClkReq4 controlled | ||
165 | * SysClkReq5Valid1 = disabled | ||
166 | * SysClkReq6Valid1 = SysClkReq6 controlled | ||
167 | * SysClkReq7Valid1 = disabled | ||
168 | * SysClkReq8Valid1 = disabled | ||
169 | */ | ||
170 | INIT_REGULATOR_REGISTER(AB8500_REGUSYSCLKREQVALID1, 0x2a), | ||
171 | /* | ||
172 | * SysClkReq2Valid2 = disabled | ||
173 | * SysClkReq3Valid2 = disabled | ||
174 | * SysClkReq4Valid2 = disabled | ||
175 | * SysClkReq5Valid2 = disabled | ||
176 | * SysClkReq6Valid2 = SysClkReq6 controlled | ||
177 | * SysClkReq7Valid2 = disabled | ||
178 | * SysClkReq8Valid2 = disabled | ||
179 | */ | ||
180 | INIT_REGULATOR_REGISTER(AB8500_REGUSYSCLKREQVALID2, 0x20), | ||
181 | /* | ||
182 | * VTVoutEna = disabled | ||
183 | * Vintcore12Ena = disabled | ||
184 | * Vintcore12Sel = 1.25 V | ||
185 | * Vintcore12LP = inactive (HP) | ||
186 | * VTVoutLP = inactive (HP) | ||
187 | */ | ||
188 | INIT_REGULATOR_REGISTER(AB8500_REGUMISC1, 0x10), | ||
189 | /* | ||
190 | * VaudioEna = disabled | ||
191 | * VdmicEna = disabled | ||
192 | * Vamic1Ena = disabled | ||
193 | * Vamic2Ena = disabled | ||
194 | */ | ||
195 | INIT_REGULATOR_REGISTER(AB8500_VAUDIOSUPPLY, 0x00), | ||
196 | /* | ||
197 | * Vamic1_dzout = high-Z when Vamic1 is disabled | ||
198 | * Vamic2_dzout = high-Z when Vamic2 is disabled | ||
199 | */ | ||
200 | INIT_REGULATOR_REGISTER(AB8500_REGUCTRL1VAMIC, 0x00), | ||
201 | /* | ||
202 | * VPll = Hw controlled | ||
203 | * VanaRegu = force off | ||
204 | */ | ||
205 | INIT_REGULATOR_REGISTER(AB8500_VPLLVANAREGU, 0x02), | ||
206 | /* | ||
207 | * VrefDDREna = disabled | ||
208 | * VrefDDRSleepMode = inactive (no pulldown) | ||
209 | */ | ||
210 | INIT_REGULATOR_REGISTER(AB8500_VREFDDR, 0x00), | ||
211 | /* | ||
212 | * VextSupply1Regu = HW control | ||
213 | * VextSupply2Regu = HW control | ||
214 | * VextSupply3Regu = HW control | ||
215 | * ExtSupply2Bypass = ExtSupply12LPn ball is 0 when Ena is 0 | ||
216 | * ExtSupply3Bypass = ExtSupply3LPn ball is 0 when Ena is 0 | ||
217 | */ | ||
218 | INIT_REGULATOR_REGISTER(AB8500_EXTSUPPLYREGU, 0x2a), | ||
219 | /* | ||
220 | * Vaux1Regu = force HP | ||
221 | * Vaux2Regu = force off | ||
222 | */ | ||
223 | INIT_REGULATOR_REGISTER(AB8500_VAUX12REGU, 0x01), | ||
224 | /* | ||
225 | * Vaux3regu = force off | ||
226 | */ | ||
227 | INIT_REGULATOR_REGISTER(AB8500_VRF1VAUX3REGU, 0x00), | ||
228 | /* | ||
229 | * Vsmps1 = 1.15V | ||
230 | */ | ||
231 | INIT_REGULATOR_REGISTER(AB8500_VSMPS1SEL1, 0x24), | ||
232 | /* | ||
233 | * Vaux1Sel = 2.5 V | ||
234 | */ | ||
235 | INIT_REGULATOR_REGISTER(AB8500_VAUX1SEL, 0x08), | ||
236 | /* | ||
237 | * Vaux2Sel = 2.9 V | ||
238 | */ | ||
239 | INIT_REGULATOR_REGISTER(AB8500_VAUX2SEL, 0x0d), | ||
240 | /* | ||
241 | * Vaux3Sel = 2.91 V | ||
242 | */ | ||
243 | INIT_REGULATOR_REGISTER(AB8500_VRF1VAUX3SEL, 0x07), | ||
244 | /* | ||
245 | * VextSupply12LP = disabled (no LP) | ||
246 | */ | ||
247 | INIT_REGULATOR_REGISTER(AB8500_REGUCTRL2SPARE, 0x00), | ||
248 | /* | ||
249 | * Vaux1Disch = short discharge time | ||
250 | * Vaux2Disch = short discharge time | ||
251 | * Vaux3Disch = short discharge time | ||
252 | * Vintcore12Disch = short discharge time | ||
253 | * VTVoutDisch = short discharge time | ||
254 | * VaudioDisch = short discharge time | ||
255 | */ | ||
256 | INIT_REGULATOR_REGISTER(AB8500_REGUCTRLDISCH, 0x00), | ||
257 | /* | ||
258 | * VanaDisch = short discharge time | ||
259 | * VdmicPullDownEna = pulldown disabled when Vdmic is disabled | ||
260 | * VdmicDisch = short discharge time | ||
261 | */ | ||
262 | INIT_REGULATOR_REGISTER(AB8500_REGUCTRLDISCH2, 0x00), | ||
263 | }; | ||
264 | |||
65 | /* AB8500 regulators */ | 265 | /* AB8500 regulators */ |
66 | struct regulator_init_data ab8500_regulators[AB8500_NUM_REGULATORS] = { | 266 | struct regulator_init_data ab8500_regulators[AB8500_NUM_REGULATORS] = { |
67 | /* supplies to the display/camera */ | 267 | /* supplies to the display/camera */ |
@@ -72,6 +272,7 @@ struct regulator_init_data ab8500_regulators[AB8500_NUM_REGULATORS] = { | |||
72 | .max_uV = 2900000, | 272 | .max_uV = 2900000, |
73 | .valid_ops_mask = REGULATOR_CHANGE_VOLTAGE | | 273 | .valid_ops_mask = REGULATOR_CHANGE_VOLTAGE | |
74 | REGULATOR_CHANGE_STATUS, | 274 | REGULATOR_CHANGE_STATUS, |
275 | .boot_on = 1, /* must be on for display */ | ||
75 | }, | 276 | }, |
76 | .num_consumer_supplies = ARRAY_SIZE(ab8500_vaux1_consumers), | 277 | .num_consumer_supplies = ARRAY_SIZE(ab8500_vaux1_consumers), |
77 | .consumer_supplies = ab8500_vaux1_consumers, | 278 | .consumer_supplies = ab8500_vaux1_consumers, |
diff --git a/arch/arm/mach-ux500/board-mop500-regulators.h b/arch/arm/mach-ux500/board-mop500-regulators.h index f979b892e4fa..94992158d962 100644 --- a/arch/arm/mach-ux500/board-mop500-regulators.h +++ b/arch/arm/mach-ux500/board-mop500-regulators.h | |||
@@ -17,5 +17,6 @@ | |||
17 | extern struct ab8500_regulator_reg_init | 17 | extern struct ab8500_regulator_reg_init |
18 | ab8500_regulator_reg_init[AB8500_NUM_REGULATOR_REGISTERS]; | 18 | ab8500_regulator_reg_init[AB8500_NUM_REGULATOR_REGISTERS]; |
19 | extern struct regulator_init_data ab8500_regulators[AB8500_NUM_REGULATORS]; | 19 | extern struct regulator_init_data ab8500_regulators[AB8500_NUM_REGULATORS]; |
20 | extern struct regulator_init_data tps61052_regulator; | ||
20 | 21 | ||
21 | #endif | 22 | #endif |
diff --git a/arch/arm/mach-ux500/board-mop500.c b/arch/arm/mach-ux500/board-mop500.c index d0076453d7ff..dc8746d7826e 100644 --- a/arch/arm/mach-ux500/board-mop500.c +++ b/arch/arm/mach-ux500/board-mop500.c | |||
@@ -22,6 +22,8 @@ | |||
22 | #include <linux/mfd/ab8500.h> | 22 | #include <linux/mfd/ab8500.h> |
23 | #include <linux/regulator/ab8500.h> | 23 | #include <linux/regulator/ab8500.h> |
24 | #include <linux/mfd/tc3589x.h> | 24 | #include <linux/mfd/tc3589x.h> |
25 | #include <linux/mfd/tps6105x.h> | ||
26 | #include <linux/mfd/ab8500/gpio.h> | ||
25 | #include <linux/leds-lp5521.h> | 27 | #include <linux/leds-lp5521.h> |
26 | #include <linux/input.h> | 28 | #include <linux/input.h> |
27 | #include <linux/gpio_keys.h> | 29 | #include <linux/gpio_keys.h> |
@@ -42,10 +44,35 @@ | |||
42 | #include "board-mop500.h" | 44 | #include "board-mop500.h" |
43 | #include "board-mop500-regulators.h" | 45 | #include "board-mop500-regulators.h" |
44 | 46 | ||
47 | static struct ab8500_gpio_platform_data ab8500_gpio_pdata = { | ||
48 | .gpio_base = MOP500_AB8500_GPIO(0), | ||
49 | .irq_base = MOP500_AB8500_VIR_GPIO_IRQ_BASE, | ||
50 | /* config_reg is the initial configuration of ab8500 pins. | ||
51 | * The pins can be configured as GPIO or alt functions based | ||
52 | * on value present in GpioSel1 to GpioSel6 and AlternatFunction | ||
53 | * register. This is the array of 7 configuration settings. | ||
54 | * One has to compile time decide these settings. Below is the | ||
55 | * explaination of these setting | ||
56 | * GpioSel1 = 0x00 => Pins GPIO1 to GPIO8 are not used as GPIO | ||
57 | * GpioSel2 = 0x1E => Pins GPIO10 to GPIO13 are configured as GPIO | ||
58 | * GpioSel3 = 0x80 => Pin GPIO24 is configured as GPIO | ||
59 | * GpioSel4 = 0x01 => Pin GPIo25 is configured as GPIO | ||
60 | * GpioSel5 = 0x7A => Pins GPIO34, GPIO36 to GPIO39 are conf as GPIO | ||
61 | * GpioSel6 = 0x00 => Pins GPIO41 & GPIo42 are not configured as GPIO | ||
62 | * AlternaFunction = 0x00 => If Pins GPIO10 to 13 are not configured | ||
63 | * as GPIO then this register selectes the alternate fucntions | ||
64 | */ | ||
65 | .config_reg = {0x00, 0x1E, 0x80, 0x01, | ||
66 | 0x7A, 0x00, 0x00}, | ||
67 | }; | ||
68 | |||
45 | static struct ab8500_platform_data ab8500_platdata = { | 69 | static struct ab8500_platform_data ab8500_platdata = { |
46 | .irq_base = MOP500_AB8500_IRQ_BASE, | 70 | .irq_base = MOP500_AB8500_IRQ_BASE, |
71 | .regulator_reg_init = ab8500_regulator_reg_init, | ||
72 | .num_regulator_reg_init = ARRAY_SIZE(ab8500_regulator_reg_init), | ||
47 | .regulator = ab8500_regulators, | 73 | .regulator = ab8500_regulators, |
48 | .num_regulator = ARRAY_SIZE(ab8500_regulators), | 74 | .num_regulator = ARRAY_SIZE(ab8500_regulators), |
75 | .gpio = &ab8500_gpio_pdata, | ||
49 | }; | 76 | }; |
50 | 77 | ||
51 | static struct resource ab8500_resources[] = { | 78 | static struct resource ab8500_resources[] = { |
@@ -67,6 +94,15 @@ struct platform_device ab8500_device = { | |||
67 | }; | 94 | }; |
68 | 95 | ||
69 | /* | 96 | /* |
97 | * TPS61052 | ||
98 | */ | ||
99 | |||
100 | static struct tps6105x_platform_data mop500_tps61052_data = { | ||
101 | .mode = TPS6105X_MODE_VOLTAGE, | ||
102 | .regulator_data = &tps61052_regulator, | ||
103 | }; | ||
104 | |||
105 | /* | ||
70 | * TC35892 | 106 | * TC35892 |
71 | */ | 107 | */ |
72 | 108 | ||
@@ -136,7 +172,7 @@ static struct lp5521_platform_data __initdata lp5521_sec_data = { | |||
136 | .clock_mode = LP5521_CLOCK_EXT, | 172 | .clock_mode = LP5521_CLOCK_EXT, |
137 | }; | 173 | }; |
138 | 174 | ||
139 | static struct i2c_board_info mop500_i2c0_devices[] = { | 175 | static struct i2c_board_info __initdata mop500_i2c0_devices[] = { |
140 | { | 176 | { |
141 | I2C_BOARD_INFO("tc3589x", 0x42), | 177 | I2C_BOARD_INFO("tc3589x", 0x42), |
142 | .irq = NOMADIK_GPIO_TO_IRQ(217), | 178 | .irq = NOMADIK_GPIO_TO_IRQ(217), |
@@ -144,6 +180,14 @@ static struct i2c_board_info mop500_i2c0_devices[] = { | |||
144 | }, | 180 | }, |
145 | }; | 181 | }; |
146 | 182 | ||
183 | /* I2C0 devices only available prior to HREFv60 */ | ||
184 | static struct i2c_board_info __initdata mop500_i2c0_old_devices[] = { | ||
185 | { | ||
186 | I2C_BOARD_INFO("tps61052", 0x33), | ||
187 | .platform_data = &mop500_tps61052_data, | ||
188 | }, | ||
189 | }; | ||
190 | |||
147 | static struct i2c_board_info __initdata mop500_i2c2_devices[] = { | 191 | static struct i2c_board_info __initdata mop500_i2c2_devices[] = { |
148 | { | 192 | { |
149 | /* lp5521 LED driver, 1st device */ | 193 | /* lp5521 LED driver, 1st device */ |
@@ -406,6 +450,9 @@ static void __init mop500_init_machine(void) | |||
406 | 450 | ||
407 | i2c_register_board_info(0, mop500_i2c0_devices, | 451 | i2c_register_board_info(0, mop500_i2c0_devices, |
408 | ARRAY_SIZE(mop500_i2c0_devices)); | 452 | ARRAY_SIZE(mop500_i2c0_devices)); |
453 | if (!machine_is_hrefv60()) | ||
454 | i2c_register_board_info(0, mop500_i2c0_old_devices, | ||
455 | ARRAY_SIZE(mop500_i2c0_old_devices)); | ||
409 | i2c_register_board_info(2, mop500_i2c2_devices, | 456 | i2c_register_board_info(2, mop500_i2c2_devices, |
410 | ARRAY_SIZE(mop500_i2c2_devices)); | 457 | ARRAY_SIZE(mop500_i2c2_devices)); |
411 | } | 458 | } |
diff --git a/arch/arm/mach-ux500/board-mop500.h b/arch/arm/mach-ux500/board-mop500.h index 56722f4be71b..03a31cc9b084 100644 --- a/arch/arm/mach-ux500/board-mop500.h +++ b/arch/arm/mach-ux500/board-mop500.h | |||
@@ -27,6 +27,10 @@ | |||
27 | #define GPIO_BU21013_CS MOP500_EGPIO(13) | 27 | #define GPIO_BU21013_CS MOP500_EGPIO(13) |
28 | #define GPIO_SDMMC_EN MOP500_EGPIO(17) | 28 | #define GPIO_SDMMC_EN MOP500_EGPIO(17) |
29 | #define GPIO_SDMMC_1V8_3V_SEL MOP500_EGPIO(18) | 29 | #define GPIO_SDMMC_1V8_3V_SEL MOP500_EGPIO(18) |
30 | #define MOP500_EGPIO_END MOP500_EGPIO(24) | ||
31 | |||
32 | /* GPIOs on the AB8500 mixed-signals circuit */ | ||
33 | #define MOP500_AB8500_GPIO(x) (MOP500_EGPIO_END + (x)) | ||
30 | 34 | ||
31 | struct i2c_board_info; | 35 | struct i2c_board_info; |
32 | 36 | ||
diff --git a/arch/arm/mach-ux500/include/mach/irqs-board-mop500.h b/arch/arm/mach-ux500/include/mach/irqs-board-mop500.h index 7cdeb2af0ebb..97ef55f84934 100644 --- a/arch/arm/mach-ux500/include/mach/irqs-board-mop500.h +++ b/arch/arm/mach-ux500/include/mach/irqs-board-mop500.h | |||
@@ -35,9 +35,20 @@ | |||
35 | #define MOP500_STMPE1601_IRQBASE MOP500_EGPIO_IRQ_END | 35 | #define MOP500_STMPE1601_IRQBASE MOP500_EGPIO_IRQ_END |
36 | #define MOP500_STMPE1601_IRQ(x) (MOP500_STMPE1601_IRQBASE + (x)) | 36 | #define MOP500_STMPE1601_IRQ(x) (MOP500_STMPE1601_IRQBASE + (x)) |
37 | 37 | ||
38 | #define MOP500_NR_IRQS MOP500_STMPE1601_IRQ(STMPE_NR_INTERNAL_IRQS) | 38 | #define MOP500_STMPE1601_IRQ_END \ |
39 | MOP500_STMPE1601_IRQ(STMPE_NR_INTERNAL_IRQS) | ||
39 | 40 | ||
40 | #define MOP500_IRQ_END MOP500_NR_IRQS | 41 | /* AB8500 virtual gpio IRQ */ |
42 | #define AB8500_VIR_GPIO_NR_IRQS 16 | ||
43 | |||
44 | #define MOP500_AB8500_VIR_GPIO_IRQ_BASE \ | ||
45 | MOP500_STMPE1601_IRQ_END | ||
46 | #define MOP500_AB8500_VIR_GPIO_IRQ_END \ | ||
47 | (MOP500_AB8500_VIR_GPIO_IRQ_BASE + AB8500_VIR_GPIO_NR_IRQS) | ||
48 | |||
49 | #define MOP500_NR_IRQS MOP500_AB8500_VIR_GPIO_IRQ_END | ||
50 | |||
51 | #define MOP500_IRQ_END MOP500_NR_IRQS | ||
41 | 52 | ||
42 | #if MOP500_IRQ_END > IRQ_BOARD_END | 53 | #if MOP500_IRQ_END > IRQ_BOARD_END |
43 | #undef IRQ_BOARD_END | 54 | #undef IRQ_BOARD_END |
diff --git a/arch/arm/mach-ux500/modem-irq-db5500.c b/arch/arm/mach-ux500/modem-irq-db5500.c index e1296a7447c8..6b86416c94c9 100644 --- a/arch/arm/mach-ux500/modem-irq-db5500.c +++ b/arch/arm/mach-ux500/modem-irq-db5500.c | |||
@@ -90,8 +90,7 @@ static irqreturn_t modem_cpu_irq_handler(int irq, void *data) | |||
90 | 90 | ||
91 | static void create_virtual_irq(int irq, struct irq_chip *modem_irq_chip) | 91 | static void create_virtual_irq(int irq, struct irq_chip *modem_irq_chip) |
92 | { | 92 | { |
93 | set_irq_chip(irq, modem_irq_chip); | 93 | irq_set_chip_and_handler(irq, modem_irq_chip, handle_simple_irq); |
94 | set_irq_handler(irq, handle_simple_irq); | ||
95 | set_irq_flags(irq, IRQF_VALID); | 94 | set_irq_flags(irq, IRQF_VALID); |
96 | 95 | ||
97 | pr_debug("modem_irq: Created virtual IRQ %d\n", irq); | 96 | pr_debug("modem_irq: Created virtual IRQ %d\n", irq); |
diff --git a/arch/arm/mach-versatile/core.c b/arch/arm/mach-versatile/core.c index eb7ffa0ee8b5..96e59e3ee4f5 100644 --- a/arch/arm/mach-versatile/core.c +++ b/arch/arm/mach-versatile/core.c | |||
@@ -314,7 +314,7 @@ static struct mmci_platform_data mmc0_plat_data = { | |||
314 | .gpio_cd = -1, | 314 | .gpio_cd = -1, |
315 | }; | 315 | }; |
316 | 316 | ||
317 | static struct resource char_lcd_resources[] = { | 317 | static struct resource chalcd_resources[] = { |
318 | { | 318 | { |
319 | .start = VERSATILE_CHAR_LCD_BASE, | 319 | .start = VERSATILE_CHAR_LCD_BASE, |
320 | .end = (VERSATILE_CHAR_LCD_BASE + SZ_4K - 1), | 320 | .end = (VERSATILE_CHAR_LCD_BASE + SZ_4K - 1), |
diff --git a/arch/arm/mach-vt8500/irq.c b/arch/arm/mach-vt8500/irq.c index 5f4ddde4f02a..245140c0df10 100644 --- a/arch/arm/mach-vt8500/irq.c +++ b/arch/arm/mach-vt8500/irq.c | |||
@@ -97,15 +97,15 @@ static int vt8500_irq_set_type(unsigned int irq, unsigned int flow_type) | |||
97 | return -EINVAL; | 97 | return -EINVAL; |
98 | case IRQF_TRIGGER_HIGH: | 98 | case IRQF_TRIGGER_HIGH: |
99 | dctr |= VT8500_TRIGGER_HIGH; | 99 | dctr |= VT8500_TRIGGER_HIGH; |
100 | irq_desc[orig_irq].handle_irq = handle_level_irq; | 100 | __irq_set_handler_locked(orig_irq, handle_level_irq); |
101 | break; | 101 | break; |
102 | case IRQF_TRIGGER_FALLING: | 102 | case IRQF_TRIGGER_FALLING: |
103 | dctr |= VT8500_TRIGGER_FALLING; | 103 | dctr |= VT8500_TRIGGER_FALLING; |
104 | irq_desc[orig_irq].handle_irq = handle_edge_irq; | 104 | __irq_set_handler_locked(orig_irq, handle_edge_irq); |
105 | break; | 105 | break; |
106 | case IRQF_TRIGGER_RISING: | 106 | case IRQF_TRIGGER_RISING: |
107 | dctr |= VT8500_TRIGGER_RISING; | 107 | dctr |= VT8500_TRIGGER_RISING; |
108 | irq_desc[orig_irq].handle_irq = handle_edge_irq; | 108 | __irq_set_handler_locked(orig_irq, handle_edge_irq); |
109 | break; | 109 | break; |
110 | } | 110 | } |
111 | writeb(dctr, base + VT8500_IC_DCTR + irq); | 111 | writeb(dctr, base + VT8500_IC_DCTR + irq); |
@@ -136,8 +136,8 @@ void __init vt8500_init_irq(void) | |||
136 | /* Disable all interrupts and route them to IRQ */ | 136 | /* Disable all interrupts and route them to IRQ */ |
137 | writeb(0x00, ic_regbase + VT8500_IC_DCTR + i); | 137 | writeb(0x00, ic_regbase + VT8500_IC_DCTR + i); |
138 | 138 | ||
139 | set_irq_chip(i, &vt8500_irq_chip); | 139 | irq_set_chip_and_handler(i, &vt8500_irq_chip, |
140 | set_irq_handler(i, handle_level_irq); | 140 | handle_level_irq); |
141 | set_irq_flags(i, IRQF_VALID); | 141 | set_irq_flags(i, IRQF_VALID); |
142 | } | 142 | } |
143 | } else { | 143 | } else { |
@@ -167,8 +167,8 @@ void __init wm8505_init_irq(void) | |||
167 | writeb(0x00, sic_regbase + VT8500_IC_DCTR | 167 | writeb(0x00, sic_regbase + VT8500_IC_DCTR |
168 | + i - 64); | 168 | + i - 64); |
169 | 169 | ||
170 | set_irq_chip(i, &vt8500_irq_chip); | 170 | irq_set_chip_and_handler(i, &vt8500_irq_chip, |
171 | set_irq_handler(i, handle_level_irq); | 171 | handle_level_irq); |
172 | set_irq_flags(i, IRQF_VALID); | 172 | set_irq_flags(i, IRQF_VALID); |
173 | } | 173 | } |
174 | } else { | 174 | } else { |
diff --git a/arch/arm/mach-w90x900/irq.c b/arch/arm/mach-w90x900/irq.c index 9c350103dcda..7bf143c443f1 100644 --- a/arch/arm/mach-w90x900/irq.c +++ b/arch/arm/mach-w90x900/irq.c | |||
@@ -207,8 +207,8 @@ void __init nuc900_init_irq(void) | |||
207 | __raw_writel(0xFFFFFFFE, REG_AIC_MDCR); | 207 | __raw_writel(0xFFFFFFFE, REG_AIC_MDCR); |
208 | 208 | ||
209 | for (irqno = IRQ_WDT; irqno <= IRQ_ADC; irqno++) { | 209 | for (irqno = IRQ_WDT; irqno <= IRQ_ADC; irqno++) { |
210 | set_irq_chip(irqno, &nuc900_irq_chip); | 210 | irq_set_chip_and_handler(irqno, &nuc900_irq_chip, |
211 | set_irq_handler(irqno, handle_level_irq); | 211 | handle_level_irq); |
212 | set_irq_flags(irqno, IRQF_VALID); | 212 | set_irq_flags(irqno, IRQF_VALID); |
213 | } | 213 | } |
214 | } | 214 | } |
diff --git a/arch/arm/plat-mxc/3ds_debugboard.c b/arch/arm/plat-mxc/3ds_debugboard.c index c856fa397606..f0ba0726306c 100644 --- a/arch/arm/plat-mxc/3ds_debugboard.c +++ b/arch/arm/plat-mxc/3ds_debugboard.c | |||
@@ -100,14 +100,9 @@ static void mxc_expio_irq_handler(u32 irq, struct irq_desc *desc) | |||
100 | 100 | ||
101 | expio_irq = MXC_BOARD_IRQ_START; | 101 | expio_irq = MXC_BOARD_IRQ_START; |
102 | for (; int_valid != 0; int_valid >>= 1, expio_irq++) { | 102 | for (; int_valid != 0; int_valid >>= 1, expio_irq++) { |
103 | struct irq_desc *d; | ||
104 | if ((int_valid & 1) == 0) | 103 | if ((int_valid & 1) == 0) |
105 | continue; | 104 | continue; |
106 | d = irq_desc + expio_irq; | 105 | generic_handle_irq(expio_irq); |
107 | if (unlikely(!(d->handle_irq))) | ||
108 | pr_err("\nEXPIO irq: %d unhandled\n", expio_irq); | ||
109 | else | ||
110 | d->handle_irq(expio_irq, d); | ||
111 | } | 106 | } |
112 | 107 | ||
113 | desc->irq_data.chip->irq_ack(&desc->irq_data); | 108 | desc->irq_data.chip->irq_ack(&desc->irq_data); |
@@ -186,12 +181,11 @@ int __init mxc_expio_init(u32 base, u32 p_irq) | |||
186 | __raw_writew(0x1F, brd_io + INTR_MASK_REG); | 181 | __raw_writew(0x1F, brd_io + INTR_MASK_REG); |
187 | for (i = MXC_EXP_IO_BASE; | 182 | for (i = MXC_EXP_IO_BASE; |
188 | i < (MXC_EXP_IO_BASE + MXC_MAX_EXP_IO_LINES); i++) { | 183 | i < (MXC_EXP_IO_BASE + MXC_MAX_EXP_IO_LINES); i++) { |
189 | set_irq_chip(i, &expio_irq_chip); | 184 | irq_set_chip_and_handler(i, &expio_irq_chip, handle_level_irq); |
190 | set_irq_handler(i, handle_level_irq); | ||
191 | set_irq_flags(i, IRQF_VALID); | 185 | set_irq_flags(i, IRQF_VALID); |
192 | } | 186 | } |
193 | set_irq_type(p_irq, IRQF_TRIGGER_LOW); | 187 | irq_set_irq_type(p_irq, IRQF_TRIGGER_LOW); |
194 | set_irq_chained_handler(p_irq, mxc_expio_irq_handler); | 188 | irq_set_chained_handler(p_irq, mxc_expio_irq_handler); |
195 | 189 | ||
196 | /* Register Lan device on the debugboard */ | 190 | /* Register Lan device on the debugboard */ |
197 | smsc911x_resources[0].start = LAN9217_BASE_ADDR(base); | 191 | smsc911x_resources[0].start = LAN9217_BASE_ADDR(base); |
diff --git a/arch/arm/plat-mxc/avic.c b/arch/arm/plat-mxc/avic.c index deb284bc7c4b..09e2bd0fcdca 100644 --- a/arch/arm/plat-mxc/avic.c +++ b/arch/arm/plat-mxc/avic.c | |||
@@ -139,8 +139,8 @@ void __init mxc_init_irq(void __iomem *irqbase) | |||
139 | __raw_writel(0, avic_base + AVIC_INTTYPEH); | 139 | __raw_writel(0, avic_base + AVIC_INTTYPEH); |
140 | __raw_writel(0, avic_base + AVIC_INTTYPEL); | 140 | __raw_writel(0, avic_base + AVIC_INTTYPEL); |
141 | for (i = 0; i < MXC_INTERNAL_IRQS; i++) { | 141 | for (i = 0; i < MXC_INTERNAL_IRQS; i++) { |
142 | set_irq_chip(i, &mxc_avic_chip.base); | 142 | irq_set_chip_and_handler(i, &mxc_avic_chip.base, |
143 | set_irq_handler(i, handle_level_irq); | 143 | handle_level_irq); |
144 | set_irq_flags(i, IRQF_VALID); | 144 | set_irq_flags(i, IRQF_VALID); |
145 | } | 145 | } |
146 | 146 | ||
diff --git a/arch/arm/plat-mxc/gpio.c b/arch/arm/plat-mxc/gpio.c index 57d59855f9ec..7a107246fd98 100644 --- a/arch/arm/plat-mxc/gpio.c +++ b/arch/arm/plat-mxc/gpio.c | |||
@@ -175,7 +175,7 @@ static void mxc_gpio_irq_handler(struct mxc_gpio_port *port, u32 irq_stat) | |||
175 | static void mx3_gpio_irq_handler(u32 irq, struct irq_desc *desc) | 175 | static void mx3_gpio_irq_handler(u32 irq, struct irq_desc *desc) |
176 | { | 176 | { |
177 | u32 irq_stat; | 177 | u32 irq_stat; |
178 | struct mxc_gpio_port *port = get_irq_data(irq); | 178 | struct mxc_gpio_port *port = irq_get_handler_data(irq); |
179 | 179 | ||
180 | irq_stat = __raw_readl(port->base + GPIO_ISR) & | 180 | irq_stat = __raw_readl(port->base + GPIO_ISR) & |
181 | __raw_readl(port->base + GPIO_IMR); | 181 | __raw_readl(port->base + GPIO_IMR); |
@@ -188,7 +188,7 @@ static void mx2_gpio_irq_handler(u32 irq, struct irq_desc *desc) | |||
188 | { | 188 | { |
189 | int i; | 189 | int i; |
190 | u32 irq_msk, irq_stat; | 190 | u32 irq_msk, irq_stat; |
191 | struct mxc_gpio_port *port = get_irq_data(irq); | 191 | struct mxc_gpio_port *port = irq_get_handler_data(irq); |
192 | 192 | ||
193 | /* walk through all interrupt status registers */ | 193 | /* walk through all interrupt status registers */ |
194 | for (i = 0; i < gpio_table_size; i++) { | 194 | for (i = 0; i < gpio_table_size; i++) { |
@@ -311,8 +311,8 @@ int __init mxc_gpio_init(struct mxc_gpio_port *port, int cnt) | |||
311 | __raw_writel(~0, port[i].base + GPIO_ISR); | 311 | __raw_writel(~0, port[i].base + GPIO_ISR); |
312 | for (j = port[i].virtual_irq_start; | 312 | for (j = port[i].virtual_irq_start; |
313 | j < port[i].virtual_irq_start + 32; j++) { | 313 | j < port[i].virtual_irq_start + 32; j++) { |
314 | set_irq_chip(j, &gpio_irq_chip); | 314 | irq_set_chip_and_handler(j, &gpio_irq_chip, |
315 | set_irq_handler(j, handle_level_irq); | 315 | handle_level_irq); |
316 | set_irq_flags(j, IRQF_VALID); | 316 | set_irq_flags(j, IRQF_VALID); |
317 | } | 317 | } |
318 | 318 | ||
@@ -331,21 +331,23 @@ int __init mxc_gpio_init(struct mxc_gpio_port *port, int cnt) | |||
331 | 331 | ||
332 | if (cpu_is_mx1() || cpu_is_mx3() || cpu_is_mx25() || cpu_is_mx51()) { | 332 | if (cpu_is_mx1() || cpu_is_mx3() || cpu_is_mx25() || cpu_is_mx51()) { |
333 | /* setup one handler for each entry */ | 333 | /* setup one handler for each entry */ |
334 | set_irq_chained_handler(port[i].irq, mx3_gpio_irq_handler); | 334 | irq_set_chained_handler(port[i].irq, |
335 | set_irq_data(port[i].irq, &port[i]); | 335 | mx3_gpio_irq_handler); |
336 | irq_set_handler_data(port[i].irq, &port[i]); | ||
336 | if (port[i].irq_high) { | 337 | if (port[i].irq_high) { |
337 | /* setup handler for GPIO 16 to 31 */ | 338 | /* setup handler for GPIO 16 to 31 */ |
338 | set_irq_chained_handler(port[i].irq_high, | 339 | irq_set_chained_handler(port[i].irq_high, |
339 | mx3_gpio_irq_handler); | 340 | mx3_gpio_irq_handler); |
340 | set_irq_data(port[i].irq_high, &port[i]); | 341 | irq_set_handler_data(port[i].irq_high, |
342 | &port[i]); | ||
341 | } | 343 | } |
342 | } | 344 | } |
343 | } | 345 | } |
344 | 346 | ||
345 | if (cpu_is_mx2()) { | 347 | if (cpu_is_mx2()) { |
346 | /* setup one handler for all GPIO interrupts */ | 348 | /* setup one handler for all GPIO interrupts */ |
347 | set_irq_chained_handler(port[0].irq, mx2_gpio_irq_handler); | 349 | irq_set_chained_handler(port[0].irq, mx2_gpio_irq_handler); |
348 | set_irq_data(port[0].irq, port); | 350 | irq_set_handler_data(port[0].irq, port); |
349 | } | 351 | } |
350 | 352 | ||
351 | return 0; | 353 | return 0; |
diff --git a/arch/arm/plat-mxc/irq-common.c b/arch/arm/plat-mxc/irq-common.c index 0c799ac27730..e1c6eff7258a 100644 --- a/arch/arm/plat-mxc/irq-common.c +++ b/arch/arm/plat-mxc/irq-common.c | |||
@@ -29,7 +29,7 @@ int imx_irq_set_priority(unsigned char irq, unsigned char prio) | |||
29 | 29 | ||
30 | ret = -ENOSYS; | 30 | ret = -ENOSYS; |
31 | 31 | ||
32 | base = get_irq_chip(irq); | 32 | base = irq_get_chip(irq); |
33 | if (base) { | 33 | if (base) { |
34 | chip = container_of(base, struct mxc_irq_chip, base); | 34 | chip = container_of(base, struct mxc_irq_chip, base); |
35 | if (chip->set_priority) | 35 | if (chip->set_priority) |
@@ -48,7 +48,7 @@ int mxc_set_irq_fiq(unsigned int irq, unsigned int type) | |||
48 | 48 | ||
49 | ret = -ENOSYS; | 49 | ret = -ENOSYS; |
50 | 50 | ||
51 | base = get_irq_chip(irq); | 51 | base = irq_get_chip(irq); |
52 | if (base) { | 52 | if (base) { |
53 | chip = container_of(base, struct mxc_irq_chip, base); | 53 | chip = container_of(base, struct mxc_irq_chip, base); |
54 | if (chip->set_irq_fiq) | 54 | if (chip->set_irq_fiq) |
diff --git a/arch/arm/plat-mxc/tzic.c b/arch/arm/plat-mxc/tzic.c index bc3a6be8a27f..57f9395f87ce 100644 --- a/arch/arm/plat-mxc/tzic.c +++ b/arch/arm/plat-mxc/tzic.c | |||
@@ -167,8 +167,8 @@ void __init tzic_init_irq(void __iomem *irqbase) | |||
167 | /* all IRQ no FIQ Warning :: No selection */ | 167 | /* all IRQ no FIQ Warning :: No selection */ |
168 | 168 | ||
169 | for (i = 0; i < MXC_INTERNAL_IRQS; i++) { | 169 | for (i = 0; i < MXC_INTERNAL_IRQS; i++) { |
170 | set_irq_chip(i, &mxc_tzic_chip.base); | 170 | irq_set_chip_and_handler(i, &mxc_tzic_chip.base, |
171 | set_irq_handler(i, handle_level_irq); | 171 | handle_level_irq); |
172 | set_irq_flags(i, IRQF_VALID); | 172 | set_irq_flags(i, IRQF_VALID); |
173 | } | 173 | } |
174 | 174 | ||
diff --git a/arch/arm/plat-nomadik/gpio.c b/arch/arm/plat-nomadik/gpio.c index 80643bc38e10..f49748eca1a3 100644 --- a/arch/arm/plat-nomadik/gpio.c +++ b/arch/arm/plat-nomadik/gpio.c | |||
@@ -54,6 +54,7 @@ struct nmk_gpio_chip { | |||
54 | u32 rwimsc; | 54 | u32 rwimsc; |
55 | u32 fwimsc; | 55 | u32 fwimsc; |
56 | u32 slpm; | 56 | u32 slpm; |
57 | u32 enabled; | ||
57 | }; | 58 | }; |
58 | 59 | ||
59 | static struct nmk_gpio_chip * | 60 | static struct nmk_gpio_chip * |
@@ -318,7 +319,7 @@ static int __nmk_config_pins(pin_cfg_t *cfgs, int num, bool sleep) | |||
318 | struct nmk_gpio_chip *nmk_chip; | 319 | struct nmk_gpio_chip *nmk_chip; |
319 | int pin = PIN_NUM(cfgs[i]); | 320 | int pin = PIN_NUM(cfgs[i]); |
320 | 321 | ||
321 | nmk_chip = get_irq_chip_data(NOMADIK_GPIO_TO_IRQ(pin)); | 322 | nmk_chip = irq_get_chip_data(NOMADIK_GPIO_TO_IRQ(pin)); |
322 | if (!nmk_chip) { | 323 | if (!nmk_chip) { |
323 | ret = -EINVAL; | 324 | ret = -EINVAL; |
324 | break; | 325 | break; |
@@ -397,7 +398,7 @@ int nmk_gpio_set_slpm(int gpio, enum nmk_gpio_slpm mode) | |||
397 | struct nmk_gpio_chip *nmk_chip; | 398 | struct nmk_gpio_chip *nmk_chip; |
398 | unsigned long flags; | 399 | unsigned long flags; |
399 | 400 | ||
400 | nmk_chip = get_irq_chip_data(NOMADIK_GPIO_TO_IRQ(gpio)); | 401 | nmk_chip = irq_get_chip_data(NOMADIK_GPIO_TO_IRQ(gpio)); |
401 | if (!nmk_chip) | 402 | if (!nmk_chip) |
402 | return -EINVAL; | 403 | return -EINVAL; |
403 | 404 | ||
@@ -430,7 +431,7 @@ int nmk_gpio_set_pull(int gpio, enum nmk_gpio_pull pull) | |||
430 | struct nmk_gpio_chip *nmk_chip; | 431 | struct nmk_gpio_chip *nmk_chip; |
431 | unsigned long flags; | 432 | unsigned long flags; |
432 | 433 | ||
433 | nmk_chip = get_irq_chip_data(NOMADIK_GPIO_TO_IRQ(gpio)); | 434 | nmk_chip = irq_get_chip_data(NOMADIK_GPIO_TO_IRQ(gpio)); |
434 | if (!nmk_chip) | 435 | if (!nmk_chip) |
435 | return -EINVAL; | 436 | return -EINVAL; |
436 | 437 | ||
@@ -456,7 +457,7 @@ int nmk_gpio_set_mode(int gpio, int gpio_mode) | |||
456 | struct nmk_gpio_chip *nmk_chip; | 457 | struct nmk_gpio_chip *nmk_chip; |
457 | unsigned long flags; | 458 | unsigned long flags; |
458 | 459 | ||
459 | nmk_chip = get_irq_chip_data(NOMADIK_GPIO_TO_IRQ(gpio)); | 460 | nmk_chip = irq_get_chip_data(NOMADIK_GPIO_TO_IRQ(gpio)); |
460 | if (!nmk_chip) | 461 | if (!nmk_chip) |
461 | return -EINVAL; | 462 | return -EINVAL; |
462 | 463 | ||
@@ -473,7 +474,7 @@ int nmk_gpio_get_mode(int gpio) | |||
473 | struct nmk_gpio_chip *nmk_chip; | 474 | struct nmk_gpio_chip *nmk_chip; |
474 | u32 afunc, bfunc, bit; | 475 | u32 afunc, bfunc, bit; |
475 | 476 | ||
476 | nmk_chip = get_irq_chip_data(NOMADIK_GPIO_TO_IRQ(gpio)); | 477 | nmk_chip = irq_get_chip_data(NOMADIK_GPIO_TO_IRQ(gpio)); |
477 | if (!nmk_chip) | 478 | if (!nmk_chip) |
478 | return -EINVAL; | 479 | return -EINVAL; |
479 | 480 | ||
@@ -541,13 +542,6 @@ static void __nmk_gpio_irq_modify(struct nmk_gpio_chip *nmk_chip, | |||
541 | static void __nmk_gpio_set_wake(struct nmk_gpio_chip *nmk_chip, | 542 | static void __nmk_gpio_set_wake(struct nmk_gpio_chip *nmk_chip, |
542 | int gpio, bool on) | 543 | int gpio, bool on) |
543 | { | 544 | { |
544 | #ifdef CONFIG_ARCH_U8500 | ||
545 | if (cpu_is_u8500v2()) { | ||
546 | __nmk_gpio_set_slpm(nmk_chip, gpio - nmk_chip->chip.base, | ||
547 | on ? NMK_GPIO_SLPM_WAKEUP_ENABLE | ||
548 | : NMK_GPIO_SLPM_WAKEUP_DISABLE); | ||
549 | } | ||
550 | #endif | ||
551 | __nmk_gpio_irq_modify(nmk_chip, gpio, WAKE, on); | 545 | __nmk_gpio_irq_modify(nmk_chip, gpio, WAKE, on); |
552 | } | 546 | } |
553 | 547 | ||
@@ -564,6 +558,11 @@ static int nmk_gpio_irq_maskunmask(struct irq_data *d, bool enable) | |||
564 | if (!nmk_chip) | 558 | if (!nmk_chip) |
565 | return -EINVAL; | 559 | return -EINVAL; |
566 | 560 | ||
561 | if (enable) | ||
562 | nmk_chip->enabled |= bitmask; | ||
563 | else | ||
564 | nmk_chip->enabled &= ~bitmask; | ||
565 | |||
567 | spin_lock_irqsave(&nmk_gpio_slpm_lock, flags); | 566 | spin_lock_irqsave(&nmk_gpio_slpm_lock, flags); |
568 | spin_lock(&nmk_chip->lock); | 567 | spin_lock(&nmk_chip->lock); |
569 | 568 | ||
@@ -590,8 +589,6 @@ static void nmk_gpio_irq_unmask(struct irq_data *d) | |||
590 | 589 | ||
591 | static int nmk_gpio_irq_set_wake(struct irq_data *d, unsigned int on) | 590 | static int nmk_gpio_irq_set_wake(struct irq_data *d, unsigned int on) |
592 | { | 591 | { |
593 | struct irq_desc *desc = irq_to_desc(d->irq); | ||
594 | bool enabled = !(desc->status & IRQ_DISABLED); | ||
595 | struct nmk_gpio_chip *nmk_chip; | 592 | struct nmk_gpio_chip *nmk_chip; |
596 | unsigned long flags; | 593 | unsigned long flags; |
597 | u32 bitmask; | 594 | u32 bitmask; |
@@ -606,7 +603,7 @@ static int nmk_gpio_irq_set_wake(struct irq_data *d, unsigned int on) | |||
606 | spin_lock_irqsave(&nmk_gpio_slpm_lock, flags); | 603 | spin_lock_irqsave(&nmk_gpio_slpm_lock, flags); |
607 | spin_lock(&nmk_chip->lock); | 604 | spin_lock(&nmk_chip->lock); |
608 | 605 | ||
609 | if (!enabled) | 606 | if (!(nmk_chip->enabled & bitmask)) |
610 | __nmk_gpio_set_wake(nmk_chip, gpio, on); | 607 | __nmk_gpio_set_wake(nmk_chip, gpio, on); |
611 | 608 | ||
612 | if (on) | 609 | if (on) |
@@ -622,9 +619,7 @@ static int nmk_gpio_irq_set_wake(struct irq_data *d, unsigned int on) | |||
622 | 619 | ||
623 | static int nmk_gpio_irq_set_type(struct irq_data *d, unsigned int type) | 620 | static int nmk_gpio_irq_set_type(struct irq_data *d, unsigned int type) |
624 | { | 621 | { |
625 | struct irq_desc *desc = irq_to_desc(d->irq); | 622 | bool enabled, wake = irqd_is_wakeup_set(d); |
626 | bool enabled = !(desc->status & IRQ_DISABLED); | ||
627 | bool wake = desc->wake_depth; | ||
628 | int gpio; | 623 | int gpio; |
629 | struct nmk_gpio_chip *nmk_chip; | 624 | struct nmk_gpio_chip *nmk_chip; |
630 | unsigned long flags; | 625 | unsigned long flags; |
@@ -641,6 +636,8 @@ static int nmk_gpio_irq_set_type(struct irq_data *d, unsigned int type) | |||
641 | if (type & IRQ_TYPE_LEVEL_LOW) | 636 | if (type & IRQ_TYPE_LEVEL_LOW) |
642 | return -EINVAL; | 637 | return -EINVAL; |
643 | 638 | ||
639 | enabled = nmk_chip->enabled & bitmask; | ||
640 | |||
644 | spin_lock_irqsave(&nmk_chip->lock, flags); | 641 | spin_lock_irqsave(&nmk_chip->lock, flags); |
645 | 642 | ||
646 | if (enabled) | 643 | if (enabled) |
@@ -681,7 +678,7 @@ static void __nmk_gpio_irq_handler(unsigned int irq, struct irq_desc *desc, | |||
681 | u32 status) | 678 | u32 status) |
682 | { | 679 | { |
683 | struct nmk_gpio_chip *nmk_chip; | 680 | struct nmk_gpio_chip *nmk_chip; |
684 | struct irq_chip *host_chip = get_irq_chip(irq); | 681 | struct irq_chip *host_chip = irq_get_chip(irq); |
685 | unsigned int first_irq; | 682 | unsigned int first_irq; |
686 | 683 | ||
687 | if (host_chip->irq_mask_ack) | 684 | if (host_chip->irq_mask_ack) |
@@ -692,7 +689,7 @@ static void __nmk_gpio_irq_handler(unsigned int irq, struct irq_desc *desc, | |||
692 | host_chip->irq_ack(&desc->irq_data); | 689 | host_chip->irq_ack(&desc->irq_data); |
693 | } | 690 | } |
694 | 691 | ||
695 | nmk_chip = get_irq_data(irq); | 692 | nmk_chip = irq_get_handler_data(irq); |
696 | first_irq = NOMADIK_GPIO_TO_IRQ(nmk_chip->chip.base); | 693 | first_irq = NOMADIK_GPIO_TO_IRQ(nmk_chip->chip.base); |
697 | while (status) { | 694 | while (status) { |
698 | int bit = __ffs(status); | 695 | int bit = __ffs(status); |
@@ -706,7 +703,7 @@ static void __nmk_gpio_irq_handler(unsigned int irq, struct irq_desc *desc, | |||
706 | 703 | ||
707 | static void nmk_gpio_irq_handler(unsigned int irq, struct irq_desc *desc) | 704 | static void nmk_gpio_irq_handler(unsigned int irq, struct irq_desc *desc) |
708 | { | 705 | { |
709 | struct nmk_gpio_chip *nmk_chip = get_irq_data(irq); | 706 | struct nmk_gpio_chip *nmk_chip = irq_get_handler_data(irq); |
710 | u32 status = readl(nmk_chip->addr + NMK_GPIO_IS); | 707 | u32 status = readl(nmk_chip->addr + NMK_GPIO_IS); |
711 | 708 | ||
712 | __nmk_gpio_irq_handler(irq, desc, status); | 709 | __nmk_gpio_irq_handler(irq, desc, status); |
@@ -715,7 +712,7 @@ static void nmk_gpio_irq_handler(unsigned int irq, struct irq_desc *desc) | |||
715 | static void nmk_gpio_secondary_irq_handler(unsigned int irq, | 712 | static void nmk_gpio_secondary_irq_handler(unsigned int irq, |
716 | struct irq_desc *desc) | 713 | struct irq_desc *desc) |
717 | { | 714 | { |
718 | struct nmk_gpio_chip *nmk_chip = get_irq_data(irq); | 715 | struct nmk_gpio_chip *nmk_chip = irq_get_handler_data(irq); |
719 | u32 status = nmk_chip->get_secondary_status(nmk_chip->bank); | 716 | u32 status = nmk_chip->get_secondary_status(nmk_chip->bank); |
720 | 717 | ||
721 | __nmk_gpio_irq_handler(irq, desc, status); | 718 | __nmk_gpio_irq_handler(irq, desc, status); |
@@ -728,20 +725,20 @@ static int nmk_gpio_init_irq(struct nmk_gpio_chip *nmk_chip) | |||
728 | 725 | ||
729 | first_irq = NOMADIK_GPIO_TO_IRQ(nmk_chip->chip.base); | 726 | first_irq = NOMADIK_GPIO_TO_IRQ(nmk_chip->chip.base); |
730 | for (i = first_irq; i < first_irq + nmk_chip->chip.ngpio; i++) { | 727 | for (i = first_irq; i < first_irq + nmk_chip->chip.ngpio; i++) { |
731 | set_irq_chip(i, &nmk_gpio_irq_chip); | 728 | irq_set_chip_and_handler(i, &nmk_gpio_irq_chip, |
732 | set_irq_handler(i, handle_edge_irq); | 729 | handle_edge_irq); |
733 | set_irq_flags(i, IRQF_VALID); | 730 | set_irq_flags(i, IRQF_VALID); |
734 | set_irq_chip_data(i, nmk_chip); | 731 | irq_set_chip_data(i, nmk_chip); |
735 | set_irq_type(i, IRQ_TYPE_EDGE_FALLING); | 732 | irq_set_irq_type(i, IRQ_TYPE_EDGE_FALLING); |
736 | } | 733 | } |
737 | 734 | ||
738 | set_irq_chained_handler(nmk_chip->parent_irq, nmk_gpio_irq_handler); | 735 | irq_set_chained_handler(nmk_chip->parent_irq, nmk_gpio_irq_handler); |
739 | set_irq_data(nmk_chip->parent_irq, nmk_chip); | 736 | irq_set_handler_data(nmk_chip->parent_irq, nmk_chip); |
740 | 737 | ||
741 | if (nmk_chip->secondary_parent_irq >= 0) { | 738 | if (nmk_chip->secondary_parent_irq >= 0) { |
742 | set_irq_chained_handler(nmk_chip->secondary_parent_irq, | 739 | irq_set_chained_handler(nmk_chip->secondary_parent_irq, |
743 | nmk_gpio_secondary_irq_handler); | 740 | nmk_gpio_secondary_irq_handler); |
744 | set_irq_data(nmk_chip->secondary_parent_irq, nmk_chip); | 741 | irq_set_handler_data(nmk_chip->secondary_parent_irq, nmk_chip); |
745 | } | 742 | } |
746 | 743 | ||
747 | return 0; | 744 | return 0; |
diff --git a/arch/arm/plat-omap/gpio.c b/arch/arm/plat-omap/gpio.c index 971d18636942..d2adcdda23cf 100644 --- a/arch/arm/plat-omap/gpio.c +++ b/arch/arm/plat-omap/gpio.c | |||
@@ -755,18 +755,12 @@ static int gpio_irq_type(struct irq_data *d, unsigned type) | |||
755 | bank = irq_data_get_irq_chip_data(d); | 755 | bank = irq_data_get_irq_chip_data(d); |
756 | spin_lock_irqsave(&bank->lock, flags); | 756 | spin_lock_irqsave(&bank->lock, flags); |
757 | retval = _set_gpio_triggering(bank, get_gpio_index(gpio), type); | 757 | retval = _set_gpio_triggering(bank, get_gpio_index(gpio), type); |
758 | if (retval == 0) { | ||
759 | struct irq_desc *desc = irq_to_desc(d->irq); | ||
760 | |||
761 | desc->status &= ~IRQ_TYPE_SENSE_MASK; | ||
762 | desc->status |= type; | ||
763 | } | ||
764 | spin_unlock_irqrestore(&bank->lock, flags); | 758 | spin_unlock_irqrestore(&bank->lock, flags); |
765 | 759 | ||
766 | if (type & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_LEVEL_HIGH)) | 760 | if (type & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_LEVEL_HIGH)) |
767 | __set_irq_handler_unlocked(d->irq, handle_level_irq); | 761 | __irq_set_handler_locked(d->irq, handle_level_irq); |
768 | else if (type & (IRQ_TYPE_EDGE_FALLING | IRQ_TYPE_EDGE_RISING)) | 762 | else if (type & (IRQ_TYPE_EDGE_FALLING | IRQ_TYPE_EDGE_RISING)) |
769 | __set_irq_handler_unlocked(d->irq, handle_edge_irq); | 763 | __irq_set_handler_locked(d->irq, handle_edge_irq); |
770 | 764 | ||
771 | return retval; | 765 | return retval; |
772 | } | 766 | } |
@@ -1146,7 +1140,7 @@ static void gpio_irq_handler(unsigned int irq, struct irq_desc *desc) | |||
1146 | 1140 | ||
1147 | desc->irq_data.chip->irq_ack(&desc->irq_data); | 1141 | desc->irq_data.chip->irq_ack(&desc->irq_data); |
1148 | 1142 | ||
1149 | bank = get_irq_data(irq); | 1143 | bank = irq_get_handler_data(irq); |
1150 | #ifdef CONFIG_ARCH_OMAP1 | 1144 | #ifdef CONFIG_ARCH_OMAP1 |
1151 | if (bank->method == METHOD_MPUIO) | 1145 | if (bank->method == METHOD_MPUIO) |
1152 | isr_reg = bank->base + | 1146 | isr_reg = bank->base + |
@@ -1270,8 +1264,7 @@ static void gpio_unmask_irq(struct irq_data *d) | |||
1270 | unsigned int gpio = d->irq - IH_GPIO_BASE; | 1264 | unsigned int gpio = d->irq - IH_GPIO_BASE; |
1271 | struct gpio_bank *bank = irq_data_get_irq_chip_data(d); | 1265 | struct gpio_bank *bank = irq_data_get_irq_chip_data(d); |
1272 | unsigned int irq_mask = 1 << get_gpio_index(gpio); | 1266 | unsigned int irq_mask = 1 << get_gpio_index(gpio); |
1273 | struct irq_desc *desc = irq_to_desc(d->irq); | 1267 | u32 trigger = irqd_get_trigger_type(d); |
1274 | u32 trigger = desc->status & IRQ_TYPE_SENSE_MASK; | ||
1275 | 1268 | ||
1276 | if (trigger) | 1269 | if (trigger) |
1277 | _set_gpio_triggering(bank, get_gpio_index(gpio), trigger); | 1270 | _set_gpio_triggering(bank, get_gpio_index(gpio), trigger); |
@@ -1672,19 +1665,17 @@ static void __init omap_gpio_chip_init(struct gpio_bank *bank) | |||
1672 | 1665 | ||
1673 | for (j = bank->virtual_irq_start; | 1666 | for (j = bank->virtual_irq_start; |
1674 | j < bank->virtual_irq_start + bank_width; j++) { | 1667 | j < bank->virtual_irq_start + bank_width; j++) { |
1675 | struct irq_desc *d = irq_to_desc(j); | 1668 | irq_set_lockdep_class(j, &gpio_lock_class); |
1676 | 1669 | irq_set_chip_data(j, bank); | |
1677 | lockdep_set_class(&d->lock, &gpio_lock_class); | ||
1678 | set_irq_chip_data(j, bank); | ||
1679 | if (bank_is_mpuio(bank)) | 1670 | if (bank_is_mpuio(bank)) |
1680 | set_irq_chip(j, &mpuio_irq_chip); | 1671 | irq_set_chip(j, &mpuio_irq_chip); |
1681 | else | 1672 | else |
1682 | set_irq_chip(j, &gpio_irq_chip); | 1673 | irq_set_chip(j, &gpio_irq_chip); |
1683 | set_irq_handler(j, handle_simple_irq); | 1674 | irq_set_handler(j, handle_simple_irq); |
1684 | set_irq_flags(j, IRQF_VALID); | 1675 | set_irq_flags(j, IRQF_VALID); |
1685 | } | 1676 | } |
1686 | set_irq_chained_handler(bank->irq, gpio_irq_handler); | 1677 | irq_set_chained_handler(bank->irq, gpio_irq_handler); |
1687 | set_irq_data(bank->irq, bank); | 1678 | irq_set_handler_data(bank->irq, bank); |
1688 | } | 1679 | } |
1689 | 1680 | ||
1690 | static int __devinit omap_gpio_probe(struct platform_device *pdev) | 1681 | static int __devinit omap_gpio_probe(struct platform_device *pdev) |
diff --git a/arch/arm/plat-orion/gpio.c b/arch/arm/plat-orion/gpio.c index 078894bc3b9a..a431a138f402 100644 --- a/arch/arm/plat-orion/gpio.c +++ b/arch/arm/plat-orion/gpio.c | |||
@@ -324,9 +324,8 @@ EXPORT_SYMBOL(orion_gpio_set_blink); | |||
324 | static void gpio_irq_ack(struct irq_data *d) | 324 | static void gpio_irq_ack(struct irq_data *d) |
325 | { | 325 | { |
326 | struct orion_gpio_chip *ochip = irq_data_get_irq_chip_data(d); | 326 | struct orion_gpio_chip *ochip = irq_data_get_irq_chip_data(d); |
327 | int type; | 327 | int type = irqd_get_trigger_type(d); |
328 | 328 | ||
329 | type = irq_desc[d->irq].status & IRQ_TYPE_SENSE_MASK; | ||
330 | if (type & (IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING)) { | 329 | if (type & (IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING)) { |
331 | int pin = d->irq - ochip->secondary_irq_base; | 330 | int pin = d->irq - ochip->secondary_irq_base; |
332 | 331 | ||
@@ -337,11 +336,10 @@ static void gpio_irq_ack(struct irq_data *d) | |||
337 | static void gpio_irq_mask(struct irq_data *d) | 336 | static void gpio_irq_mask(struct irq_data *d) |
338 | { | 337 | { |
339 | struct orion_gpio_chip *ochip = irq_data_get_irq_chip_data(d); | 338 | struct orion_gpio_chip *ochip = irq_data_get_irq_chip_data(d); |
340 | int type; | 339 | int type = irqd_get_trigger_type(d); |
341 | void __iomem *reg; | 340 | void __iomem *reg; |
342 | int pin; | 341 | int pin; |
343 | 342 | ||
344 | type = irq_desc[d->irq].status & IRQ_TYPE_SENSE_MASK; | ||
345 | if (type & (IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING)) | 343 | if (type & (IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING)) |
346 | reg = GPIO_EDGE_MASK(ochip); | 344 | reg = GPIO_EDGE_MASK(ochip); |
347 | else | 345 | else |
@@ -355,11 +353,10 @@ static void gpio_irq_mask(struct irq_data *d) | |||
355 | static void gpio_irq_unmask(struct irq_data *d) | 353 | static void gpio_irq_unmask(struct irq_data *d) |
356 | { | 354 | { |
357 | struct orion_gpio_chip *ochip = irq_data_get_irq_chip_data(d); | 355 | struct orion_gpio_chip *ochip = irq_data_get_irq_chip_data(d); |
358 | int type; | 356 | int type = irqd_get_trigger_type(d); |
359 | void __iomem *reg; | 357 | void __iomem *reg; |
360 | int pin; | 358 | int pin; |
361 | 359 | ||
362 | type = irq_desc[d->irq].status & IRQ_TYPE_SENSE_MASK; | ||
363 | if (type & (IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING)) | 360 | if (type & (IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING)) |
364 | reg = GPIO_EDGE_MASK(ochip); | 361 | reg = GPIO_EDGE_MASK(ochip); |
365 | else | 362 | else |
@@ -389,9 +386,9 @@ static int gpio_irq_set_type(struct irq_data *d, u32 type) | |||
389 | * Set edge/level type. | 386 | * Set edge/level type. |
390 | */ | 387 | */ |
391 | if (type & (IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING)) { | 388 | if (type & (IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING)) { |
392 | set_irq_handler(d->irq, handle_edge_irq); | 389 | __irq_set_handler_locked(d->irq, handle_edge_irq); |
393 | } else if (type & (IRQ_TYPE_LEVEL_HIGH | IRQ_TYPE_LEVEL_LOW)) { | 390 | } else if (type & (IRQ_TYPE_LEVEL_HIGH | IRQ_TYPE_LEVEL_LOW)) { |
394 | set_irq_handler(d->irq, handle_level_irq); | 391 | __irq_set_handler_locked(d->irq, handle_level_irq); |
395 | } else { | 392 | } else { |
396 | printk(KERN_ERR "failed to set irq=%d (type=%d)\n", | 393 | printk(KERN_ERR "failed to set irq=%d (type=%d)\n", |
397 | d->irq, type); | 394 | d->irq, type); |
@@ -477,10 +474,10 @@ void __init orion_gpio_init(int gpio_base, int ngpio, | |||
477 | for (i = 0; i < ngpio; i++) { | 474 | for (i = 0; i < ngpio; i++) { |
478 | unsigned int irq = secondary_irq_base + i; | 475 | unsigned int irq = secondary_irq_base + i; |
479 | 476 | ||
480 | set_irq_chip(irq, &orion_gpio_irq_chip); | 477 | irq_set_chip_and_handler(irq, &orion_gpio_irq_chip, |
481 | set_irq_handler(irq, handle_level_irq); | 478 | handle_level_irq); |
482 | set_irq_chip_data(irq, ochip); | 479 | irq_set_chip_data(irq, ochip); |
483 | irq_desc[irq].status |= IRQ_LEVEL; | 480 | irq_set_status_flags(irq, IRQ_LEVEL); |
484 | set_irq_flags(irq, IRQF_VALID); | 481 | set_irq_flags(irq, IRQF_VALID); |
485 | } | 482 | } |
486 | } | 483 | } |
@@ -488,7 +485,7 @@ void __init orion_gpio_init(int gpio_base, int ngpio, | |||
488 | void orion_gpio_irq_handler(int pinoff) | 485 | void orion_gpio_irq_handler(int pinoff) |
489 | { | 486 | { |
490 | struct orion_gpio_chip *ochip; | 487 | struct orion_gpio_chip *ochip; |
491 | u32 cause; | 488 | u32 cause, type; |
492 | int i; | 489 | int i; |
493 | 490 | ||
494 | ochip = orion_gpio_chip_find(pinoff); | 491 | ochip = orion_gpio_chip_find(pinoff); |
@@ -500,15 +497,14 @@ void orion_gpio_irq_handler(int pinoff) | |||
500 | 497 | ||
501 | for (i = 0; i < ochip->chip.ngpio; i++) { | 498 | for (i = 0; i < ochip->chip.ngpio; i++) { |
502 | int irq; | 499 | int irq; |
503 | struct irq_desc *desc; | ||
504 | 500 | ||
505 | irq = ochip->secondary_irq_base + i; | 501 | irq = ochip->secondary_irq_base + i; |
506 | 502 | ||
507 | if (!(cause & (1 << i))) | 503 | if (!(cause & (1 << i))) |
508 | continue; | 504 | continue; |
509 | 505 | ||
510 | desc = irq_desc + irq; | 506 | type = irqd_get_trigger_type(irq_get_irq_data(irq)); |
511 | if ((desc->status & IRQ_TYPE_SENSE_MASK) == IRQ_TYPE_EDGE_BOTH) { | 507 | if ((type & IRQ_TYPE_SENSE_MASK) == IRQ_TYPE_EDGE_BOTH) { |
512 | /* Swap polarity (race with GPIO line) */ | 508 | /* Swap polarity (race with GPIO line) */ |
513 | u32 polarity; | 509 | u32 polarity; |
514 | 510 | ||
@@ -516,7 +512,6 @@ void orion_gpio_irq_handler(int pinoff) | |||
516 | polarity ^= 1 << i; | 512 | polarity ^= 1 << i; |
517 | writel(polarity, GPIO_IN_POL(ochip)); | 513 | writel(polarity, GPIO_IN_POL(ochip)); |
518 | } | 514 | } |
519 | 515 | generic_handle_irq(irq); | |
520 | desc_handle_irq(irq, desc); | ||
521 | } | 516 | } |
522 | } | 517 | } |
diff --git a/arch/arm/plat-orion/irq.c b/arch/arm/plat-orion/irq.c index 7d0c7eb59f09..d8d638e09f8f 100644 --- a/arch/arm/plat-orion/irq.c +++ b/arch/arm/plat-orion/irq.c | |||
@@ -56,10 +56,10 @@ void __init orion_irq_init(unsigned int irq_start, void __iomem *maskaddr) | |||
56 | for (i = 0; i < 32; i++) { | 56 | for (i = 0; i < 32; i++) { |
57 | unsigned int irq = irq_start + i; | 57 | unsigned int irq = irq_start + i; |
58 | 58 | ||
59 | set_irq_chip(irq, &orion_irq_chip); | 59 | irq_set_chip_and_handler(irq, &orion_irq_chip, |
60 | set_irq_chip_data(irq, maskaddr); | 60 | handle_level_irq); |
61 | set_irq_handler(irq, handle_level_irq); | 61 | irq_set_chip_data(irq, maskaddr); |
62 | irq_desc[irq].status |= IRQ_LEVEL; | 62 | irq_set_status_flags(irq, IRQ_LEVEL); |
63 | set_irq_flags(irq, IRQF_VALID); | 63 | set_irq_flags(irq, IRQF_VALID); |
64 | } | 64 | } |
65 | } | 65 | } |
diff --git a/arch/arm/plat-pxa/gpio.c b/arch/arm/plat-pxa/gpio.c index e7de6ae2a1e8..dce088f45678 100644 --- a/arch/arm/plat-pxa/gpio.c +++ b/arch/arm/plat-pxa/gpio.c | |||
@@ -284,13 +284,13 @@ void __init pxa_init_gpio(int mux_irq, int start, int end, set_wake_t fn) | |||
284 | } | 284 | } |
285 | 285 | ||
286 | for (irq = gpio_to_irq(start); irq <= gpio_to_irq(end); irq++) { | 286 | for (irq = gpio_to_irq(start); irq <= gpio_to_irq(end); irq++) { |
287 | set_irq_chip(irq, &pxa_muxed_gpio_chip); | 287 | irq_set_chip_and_handler(irq, &pxa_muxed_gpio_chip, |
288 | set_irq_handler(irq, handle_edge_irq); | 288 | handle_edge_irq); |
289 | set_irq_flags(irq, IRQF_VALID | IRQF_PROBE); | 289 | set_irq_flags(irq, IRQF_VALID | IRQF_PROBE); |
290 | } | 290 | } |
291 | 291 | ||
292 | /* Install handler for GPIO>=2 edge detect interrupts */ | 292 | /* Install handler for GPIO>=2 edge detect interrupts */ |
293 | set_irq_chained_handler(mux_irq, pxa_gpio_demux_handler); | 293 | irq_set_chained_handler(mux_irq, pxa_gpio_demux_handler); |
294 | pxa_muxed_gpio_chip.irq_set_wake = fn; | 294 | pxa_muxed_gpio_chip.irq_set_wake = fn; |
295 | } | 295 | } |
296 | 296 | ||
diff --git a/arch/arm/plat-s3c24xx/irq.c b/arch/arm/plat-s3c24xx/irq.c index 4434cb56bd9a..9aee7e1668b1 100644 --- a/arch/arm/plat-s3c24xx/irq.c +++ b/arch/arm/plat-s3c24xx/irq.c | |||
@@ -592,8 +592,8 @@ void __init s3c24xx_init_irq(void) | |||
592 | case IRQ_UART1: | 592 | case IRQ_UART1: |
593 | case IRQ_UART2: | 593 | case IRQ_UART2: |
594 | case IRQ_ADCPARENT: | 594 | case IRQ_ADCPARENT: |
595 | set_irq_chip(irqno, &s3c_irq_level_chip); | 595 | irq_set_chip_and_handler(irqno, &s3c_irq_level_chip, |
596 | set_irq_handler(irqno, handle_level_irq); | 596 | handle_level_irq); |
597 | break; | 597 | break; |
598 | 598 | ||
599 | case IRQ_RESERVED6: | 599 | case IRQ_RESERVED6: |
@@ -603,35 +603,35 @@ void __init s3c24xx_init_irq(void) | |||
603 | 603 | ||
604 | default: | 604 | default: |
605 | //irqdbf("registering irq %d (s3c irq)\n", irqno); | 605 | //irqdbf("registering irq %d (s3c irq)\n", irqno); |
606 | set_irq_chip(irqno, &s3c_irq_chip); | 606 | irq_set_chip_and_handler(irqno, &s3c_irq_chip, |
607 | set_irq_handler(irqno, handle_edge_irq); | 607 | handle_edge_irq); |
608 | set_irq_flags(irqno, IRQF_VALID); | 608 | set_irq_flags(irqno, IRQF_VALID); |
609 | } | 609 | } |
610 | } | 610 | } |
611 | 611 | ||
612 | /* setup the cascade irq handlers */ | 612 | /* setup the cascade irq handlers */ |
613 | 613 | ||
614 | set_irq_chained_handler(IRQ_EINT4t7, s3c_irq_demux_extint4t7); | 614 | irq_set_chained_handler(IRQ_EINT4t7, s3c_irq_demux_extint4t7); |
615 | set_irq_chained_handler(IRQ_EINT8t23, s3c_irq_demux_extint8); | 615 | irq_set_chained_handler(IRQ_EINT8t23, s3c_irq_demux_extint8); |
616 | 616 | ||
617 | set_irq_chained_handler(IRQ_UART0, s3c_irq_demux_uart0); | 617 | irq_set_chained_handler(IRQ_UART0, s3c_irq_demux_uart0); |
618 | set_irq_chained_handler(IRQ_UART1, s3c_irq_demux_uart1); | 618 | irq_set_chained_handler(IRQ_UART1, s3c_irq_demux_uart1); |
619 | set_irq_chained_handler(IRQ_UART2, s3c_irq_demux_uart2); | 619 | irq_set_chained_handler(IRQ_UART2, s3c_irq_demux_uart2); |
620 | set_irq_chained_handler(IRQ_ADCPARENT, s3c_irq_demux_adc); | 620 | irq_set_chained_handler(IRQ_ADCPARENT, s3c_irq_demux_adc); |
621 | 621 | ||
622 | /* external interrupts */ | 622 | /* external interrupts */ |
623 | 623 | ||
624 | for (irqno = IRQ_EINT0; irqno <= IRQ_EINT3; irqno++) { | 624 | for (irqno = IRQ_EINT0; irqno <= IRQ_EINT3; irqno++) { |
625 | irqdbf("registering irq %d (ext int)\n", irqno); | 625 | irqdbf("registering irq %d (ext int)\n", irqno); |
626 | set_irq_chip(irqno, &s3c_irq_eint0t4); | 626 | irq_set_chip_and_handler(irqno, &s3c_irq_eint0t4, |
627 | set_irq_handler(irqno, handle_edge_irq); | 627 | handle_edge_irq); |
628 | set_irq_flags(irqno, IRQF_VALID); | 628 | set_irq_flags(irqno, IRQF_VALID); |
629 | } | 629 | } |
630 | 630 | ||
631 | for (irqno = IRQ_EINT4; irqno <= IRQ_EINT23; irqno++) { | 631 | for (irqno = IRQ_EINT4; irqno <= IRQ_EINT23; irqno++) { |
632 | irqdbf("registering irq %d (extended s3c irq)\n", irqno); | 632 | irqdbf("registering irq %d (extended s3c irq)\n", irqno); |
633 | set_irq_chip(irqno, &s3c_irqext_chip); | 633 | irq_set_chip_and_handler(irqno, &s3c_irqext_chip, |
634 | set_irq_handler(irqno, handle_edge_irq); | 634 | handle_edge_irq); |
635 | set_irq_flags(irqno, IRQF_VALID); | 635 | set_irq_flags(irqno, IRQF_VALID); |
636 | } | 636 | } |
637 | 637 | ||
@@ -641,29 +641,28 @@ void __init s3c24xx_init_irq(void) | |||
641 | 641 | ||
642 | for (irqno = IRQ_S3CUART_RX0; irqno <= IRQ_S3CUART_ERR0; irqno++) { | 642 | for (irqno = IRQ_S3CUART_RX0; irqno <= IRQ_S3CUART_ERR0; irqno++) { |
643 | irqdbf("registering irq %d (s3c uart0 irq)\n", irqno); | 643 | irqdbf("registering irq %d (s3c uart0 irq)\n", irqno); |
644 | set_irq_chip(irqno, &s3c_irq_uart0); | 644 | irq_set_chip_and_handler(irqno, &s3c_irq_uart0, |
645 | set_irq_handler(irqno, handle_level_irq); | 645 | handle_level_irq); |
646 | set_irq_flags(irqno, IRQF_VALID); | 646 | set_irq_flags(irqno, IRQF_VALID); |
647 | } | 647 | } |
648 | 648 | ||
649 | for (irqno = IRQ_S3CUART_RX1; irqno <= IRQ_S3CUART_ERR1; irqno++) { | 649 | for (irqno = IRQ_S3CUART_RX1; irqno <= IRQ_S3CUART_ERR1; irqno++) { |
650 | irqdbf("registering irq %d (s3c uart1 irq)\n", irqno); | 650 | irqdbf("registering irq %d (s3c uart1 irq)\n", irqno); |
651 | set_irq_chip(irqno, &s3c_irq_uart1); | 651 | irq_set_chip_and_handler(irqno, &s3c_irq_uart1, |
652 | set_irq_handler(irqno, handle_level_irq); | 652 | handle_level_irq); |
653 | set_irq_flags(irqno, IRQF_VALID); | 653 | set_irq_flags(irqno, IRQF_VALID); |
654 | } | 654 | } |
655 | 655 | ||
656 | for (irqno = IRQ_S3CUART_RX2; irqno <= IRQ_S3CUART_ERR2; irqno++) { | 656 | for (irqno = IRQ_S3CUART_RX2; irqno <= IRQ_S3CUART_ERR2; irqno++) { |
657 | irqdbf("registering irq %d (s3c uart2 irq)\n", irqno); | 657 | irqdbf("registering irq %d (s3c uart2 irq)\n", irqno); |
658 | set_irq_chip(irqno, &s3c_irq_uart2); | 658 | irq_set_chip_and_handler(irqno, &s3c_irq_uart2, |
659 | set_irq_handler(irqno, handle_level_irq); | 659 | handle_level_irq); |
660 | set_irq_flags(irqno, IRQF_VALID); | 660 | set_irq_flags(irqno, IRQF_VALID); |
661 | } | 661 | } |
662 | 662 | ||
663 | for (irqno = IRQ_TC; irqno <= IRQ_ADC; irqno++) { | 663 | for (irqno = IRQ_TC; irqno <= IRQ_ADC; irqno++) { |
664 | irqdbf("registering irq %d (s3c adc irq)\n", irqno); | 664 | irqdbf("registering irq %d (s3c adc irq)\n", irqno); |
665 | set_irq_chip(irqno, &s3c_irq_adc); | 665 | irq_set_chip_and_handler(irqno, &s3c_irq_adc, handle_edge_irq); |
666 | set_irq_handler(irqno, handle_edge_irq); | ||
667 | set_irq_flags(irqno, IRQF_VALID); | 666 | set_irq_flags(irqno, IRQF_VALID); |
668 | } | 667 | } |
669 | 668 | ||
diff --git a/arch/arm/plat-s5p/cpu.c b/arch/arm/plat-s5p/cpu.c index c3bfe9b13acf..5cf5e721e6ca 100644 --- a/arch/arm/plat-s5p/cpu.c +++ b/arch/arm/plat-s5p/cpu.c | |||
@@ -39,7 +39,7 @@ static const char name_exynos4210[] = "EXYNOS4210"; | |||
39 | static struct cpu_table cpu_ids[] __initdata = { | 39 | static struct cpu_table cpu_ids[] __initdata = { |
40 | { | 40 | { |
41 | .idcode = 0x56440100, | 41 | .idcode = 0x56440100, |
42 | .idmask = 0xffffff00, | 42 | .idmask = 0xfffff000, |
43 | .map_io = s5p6440_map_io, | 43 | .map_io = s5p6440_map_io, |
44 | .init_clocks = s5p6440_init_clocks, | 44 | .init_clocks = s5p6440_init_clocks, |
45 | .init_uarts = s5p6440_init_uarts, | 45 | .init_uarts = s5p6440_init_uarts, |
@@ -47,7 +47,7 @@ static struct cpu_table cpu_ids[] __initdata = { | |||
47 | .name = name_s5p6440, | 47 | .name = name_s5p6440, |
48 | }, { | 48 | }, { |
49 | .idcode = 0x36442000, | 49 | .idcode = 0x36442000, |
50 | .idmask = 0xffffff00, | 50 | .idmask = 0xfffff000, |
51 | .map_io = s5p6442_map_io, | 51 | .map_io = s5p6442_map_io, |
52 | .init_clocks = s5p6442_init_clocks, | 52 | .init_clocks = s5p6442_init_clocks, |
53 | .init_uarts = s5p6442_init_uarts, | 53 | .init_uarts = s5p6442_init_uarts, |
@@ -55,7 +55,7 @@ static struct cpu_table cpu_ids[] __initdata = { | |||
55 | .name = name_s5p6442, | 55 | .name = name_s5p6442, |
56 | }, { | 56 | }, { |
57 | .idcode = 0x36450000, | 57 | .idcode = 0x36450000, |
58 | .idmask = 0xffffff00, | 58 | .idmask = 0xfffff000, |
59 | .map_io = s5p6450_map_io, | 59 | .map_io = s5p6450_map_io, |
60 | .init_clocks = s5p6450_init_clocks, | 60 | .init_clocks = s5p6450_init_clocks, |
61 | .init_uarts = s5p6450_init_uarts, | 61 | .init_uarts = s5p6450_init_uarts, |
@@ -79,7 +79,7 @@ static struct cpu_table cpu_ids[] __initdata = { | |||
79 | .name = name_s5pv210, | 79 | .name = name_s5pv210, |
80 | }, { | 80 | }, { |
81 | .idcode = 0x43210000, | 81 | .idcode = 0x43210000, |
82 | .idmask = 0xfffff000, | 82 | .idmask = 0xfffe0000, |
83 | .map_io = exynos4_map_io, | 83 | .map_io = exynos4_map_io, |
84 | .init_clocks = exynos4_init_clocks, | 84 | .init_clocks = exynos4_init_clocks, |
85 | .init_uarts = exynos4_init_uarts, | 85 | .init_uarts = exynos4_init_uarts, |
diff --git a/arch/arm/plat-s5p/irq-eint.c b/arch/arm/plat-s5p/irq-eint.c index 225aa25405db..b5bb774985b0 100644 --- a/arch/arm/plat-s5p/irq-eint.c +++ b/arch/arm/plat-s5p/irq-eint.c | |||
@@ -205,15 +205,14 @@ int __init s5p_init_irq_eint(void) | |||
205 | int irq; | 205 | int irq; |
206 | 206 | ||
207 | for (irq = IRQ_EINT(0); irq <= IRQ_EINT(15); irq++) | 207 | for (irq = IRQ_EINT(0); irq <= IRQ_EINT(15); irq++) |
208 | set_irq_chip(irq, &s5p_irq_vic_eint); | 208 | irq_set_chip(irq, &s5p_irq_vic_eint); |
209 | 209 | ||
210 | for (irq = IRQ_EINT(16); irq <= IRQ_EINT(31); irq++) { | 210 | for (irq = IRQ_EINT(16); irq <= IRQ_EINT(31); irq++) { |
211 | set_irq_chip(irq, &s5p_irq_eint); | 211 | irq_set_chip_and_handler(irq, &s5p_irq_eint, handle_level_irq); |
212 | set_irq_handler(irq, handle_level_irq); | ||
213 | set_irq_flags(irq, IRQF_VALID); | 212 | set_irq_flags(irq, IRQF_VALID); |
214 | } | 213 | } |
215 | 214 | ||
216 | set_irq_chained_handler(IRQ_EINT16_31, s5p_irq_demux_eint16_31); | 215 | irq_set_chained_handler(IRQ_EINT16_31, s5p_irq_demux_eint16_31); |
217 | return 0; | 216 | return 0; |
218 | } | 217 | } |
219 | 218 | ||
diff --git a/arch/arm/plat-s5p/irq-gpioint.c b/arch/arm/plat-s5p/irq-gpioint.c index cd87d3256e03..46dd078147d8 100644 --- a/arch/arm/plat-s5p/irq-gpioint.c +++ b/arch/arm/plat-s5p/irq-gpioint.c | |||
@@ -43,13 +43,13 @@ LIST_HEAD(banks); | |||
43 | 43 | ||
44 | static int s5p_gpioint_get_offset(struct irq_data *data) | 44 | static int s5p_gpioint_get_offset(struct irq_data *data) |
45 | { | 45 | { |
46 | struct s3c_gpio_chip *chip = irq_data_get_irq_data(data); | 46 | struct s3c_gpio_chip *chip = irq_data_get_irq_handler_data(data); |
47 | return data->irq - chip->irq_base; | 47 | return data->irq - chip->irq_base; |
48 | } | 48 | } |
49 | 49 | ||
50 | static void s5p_gpioint_ack(struct irq_data *data) | 50 | static void s5p_gpioint_ack(struct irq_data *data) |
51 | { | 51 | { |
52 | struct s3c_gpio_chip *chip = irq_data_get_irq_data(data); | 52 | struct s3c_gpio_chip *chip = irq_data_get_irq_handler_data(data); |
53 | int group, offset, pend_offset; | 53 | int group, offset, pend_offset; |
54 | unsigned int value; | 54 | unsigned int value; |
55 | 55 | ||
@@ -64,7 +64,7 @@ static void s5p_gpioint_ack(struct irq_data *data) | |||
64 | 64 | ||
65 | static void s5p_gpioint_mask(struct irq_data *data) | 65 | static void s5p_gpioint_mask(struct irq_data *data) |
66 | { | 66 | { |
67 | struct s3c_gpio_chip *chip = irq_data_get_irq_data(data); | 67 | struct s3c_gpio_chip *chip = irq_data_get_irq_handler_data(data); |
68 | int group, offset, mask_offset; | 68 | int group, offset, mask_offset; |
69 | unsigned int value; | 69 | unsigned int value; |
70 | 70 | ||
@@ -79,7 +79,7 @@ static void s5p_gpioint_mask(struct irq_data *data) | |||
79 | 79 | ||
80 | static void s5p_gpioint_unmask(struct irq_data *data) | 80 | static void s5p_gpioint_unmask(struct irq_data *data) |
81 | { | 81 | { |
82 | struct s3c_gpio_chip *chip = irq_data_get_irq_data(data); | 82 | struct s3c_gpio_chip *chip = irq_data_get_irq_handler_data(data); |
83 | int group, offset, mask_offset; | 83 | int group, offset, mask_offset; |
84 | unsigned int value; | 84 | unsigned int value; |
85 | 85 | ||
@@ -100,7 +100,7 @@ static void s5p_gpioint_mask_ack(struct irq_data *data) | |||
100 | 100 | ||
101 | static int s5p_gpioint_set_type(struct irq_data *data, unsigned int type) | 101 | static int s5p_gpioint_set_type(struct irq_data *data, unsigned int type) |
102 | { | 102 | { |
103 | struct s3c_gpio_chip *chip = irq_data_get_irq_data(data); | 103 | struct s3c_gpio_chip *chip = irq_data_get_irq_handler_data(data); |
104 | int group, offset, con_offset; | 104 | int group, offset, con_offset; |
105 | unsigned int value; | 105 | unsigned int value; |
106 | 106 | ||
@@ -149,7 +149,7 @@ static struct irq_chip s5p_gpioint = { | |||
149 | 149 | ||
150 | static void s5p_gpioint_handler(unsigned int irq, struct irq_desc *desc) | 150 | static void s5p_gpioint_handler(unsigned int irq, struct irq_desc *desc) |
151 | { | 151 | { |
152 | struct s5p_gpioint_bank *bank = get_irq_data(irq); | 152 | struct s5p_gpioint_bank *bank = irq_get_handler_data(irq); |
153 | int group, pend_offset, mask_offset; | 153 | int group, pend_offset, mask_offset; |
154 | unsigned int pend, mask; | 154 | unsigned int pend, mask; |
155 | 155 | ||
@@ -200,8 +200,8 @@ static __init int s5p_gpioint_add(struct s3c_gpio_chip *chip) | |||
200 | if (!bank->chips) | 200 | if (!bank->chips) |
201 | return -ENOMEM; | 201 | return -ENOMEM; |
202 | 202 | ||
203 | set_irq_chained_handler(bank->irq, s5p_gpioint_handler); | 203 | irq_set_chained_handler(bank->irq, s5p_gpioint_handler); |
204 | set_irq_data(bank->irq, bank); | 204 | irq_set_handler_data(bank->irq, bank); |
205 | bank->handler = s5p_gpioint_handler; | 205 | bank->handler = s5p_gpioint_handler; |
206 | printk(KERN_INFO "Registered chained gpio int handler for interrupt %d.\n", | 206 | printk(KERN_INFO "Registered chained gpio int handler for interrupt %d.\n", |
207 | bank->irq); | 207 | bank->irq); |
@@ -219,9 +219,9 @@ static __init int s5p_gpioint_add(struct s3c_gpio_chip *chip) | |||
219 | bank->chips[group - bank->start] = chip; | 219 | bank->chips[group - bank->start] = chip; |
220 | for (i = 0; i < chip->chip.ngpio; i++) { | 220 | for (i = 0; i < chip->chip.ngpio; i++) { |
221 | irq = chip->irq_base + i; | 221 | irq = chip->irq_base + i; |
222 | set_irq_chip(irq, &s5p_gpioint); | 222 | irq_set_chip(irq, &s5p_gpioint); |
223 | set_irq_data(irq, chip); | 223 | irq_set_handler_data(irq, chip); |
224 | set_irq_handler(irq, handle_level_irq); | 224 | irq_set_handler(irq, handle_level_irq); |
225 | set_irq_flags(irq, IRQF_VALID); | 225 | set_irq_flags(irq, IRQF_VALID); |
226 | } | 226 | } |
227 | return 0; | 227 | return 0; |
diff --git a/arch/arm/plat-samsung/init.c b/arch/arm/plat-samsung/init.c index 6790edfaca6f..79d10fca9090 100644 --- a/arch/arm/plat-samsung/init.c +++ b/arch/arm/plat-samsung/init.c | |||
@@ -36,7 +36,7 @@ static struct cpu_table * __init s3c_lookup_cpu(unsigned long idcode, | |||
36 | unsigned int count) | 36 | unsigned int count) |
37 | { | 37 | { |
38 | for (; count != 0; count--, tab++) { | 38 | for (; count != 0; count--, tab++) { |
39 | if ((idcode & tab->idmask) == tab->idcode) | 39 | if ((idcode & tab->idmask) == (tab->idcode & tab->idmask)) |
40 | return tab; | 40 | return tab; |
41 | } | 41 | } |
42 | 42 | ||
diff --git a/arch/arm/plat-samsung/irq-uart.c b/arch/arm/plat-samsung/irq-uart.c index 4e770355ccbc..4d4e571af553 100644 --- a/arch/arm/plat-samsung/irq-uart.c +++ b/arch/arm/plat-samsung/irq-uart.c | |||
@@ -107,7 +107,6 @@ static struct irq_chip s3c_irq_uart = { | |||
107 | 107 | ||
108 | static void __init s3c_init_uart_irq(struct s3c_uart_irq *uirq) | 108 | static void __init s3c_init_uart_irq(struct s3c_uart_irq *uirq) |
109 | { | 109 | { |
110 | struct irq_desc *desc = irq_to_desc(uirq->parent_irq); | ||
111 | void __iomem *reg_base = uirq->regs; | 110 | void __iomem *reg_base = uirq->regs; |
112 | unsigned int irq; | 111 | unsigned int irq; |
113 | int offs; | 112 | int offs; |
@@ -118,14 +117,13 @@ static void __init s3c_init_uart_irq(struct s3c_uart_irq *uirq) | |||
118 | for (offs = 0; offs < 3; offs++) { | 117 | for (offs = 0; offs < 3; offs++) { |
119 | irq = uirq->base_irq + offs; | 118 | irq = uirq->base_irq + offs; |
120 | 119 | ||
121 | set_irq_chip(irq, &s3c_irq_uart); | 120 | irq_set_chip_and_handler(irq, &s3c_irq_uart, handle_level_irq); |
122 | set_irq_chip_data(irq, uirq); | 121 | irq_set_chip_data(irq, uirq); |
123 | set_irq_handler(irq, handle_level_irq); | ||
124 | set_irq_flags(irq, IRQF_VALID); | 122 | set_irq_flags(irq, IRQF_VALID); |
125 | } | 123 | } |
126 | 124 | ||
127 | desc->irq_data.handler_data = uirq; | 125 | irq_set_handler_data(uirq->parent_irq, uirq); |
128 | set_irq_chained_handler(uirq->parent_irq, s3c_irq_demux_uart); | 126 | irq_set_chained_handler(uirq->parent_irq, s3c_irq_demux_uart); |
129 | } | 127 | } |
130 | 128 | ||
131 | /** | 129 | /** |
diff --git a/arch/arm/plat-samsung/irq-vic-timer.c b/arch/arm/plat-samsung/irq-vic-timer.c index dd8692ae5c4c..d6ad66ab9290 100644 --- a/arch/arm/plat-samsung/irq-vic-timer.c +++ b/arch/arm/plat-samsung/irq-vic-timer.c | |||
@@ -77,14 +77,11 @@ static struct irq_chip s3c_irq_timer = { | |||
77 | void __init s3c_init_vic_timer_irq(unsigned int parent_irq, | 77 | void __init s3c_init_vic_timer_irq(unsigned int parent_irq, |
78 | unsigned int timer_irq) | 78 | unsigned int timer_irq) |
79 | { | 79 | { |
80 | struct irq_desc *desc = irq_to_desc(parent_irq); | ||
81 | 80 | ||
82 | set_irq_chained_handler(parent_irq, s3c_irq_demux_vic_timer); | 81 | irq_set_chained_handler(parent_irq, s3c_irq_demux_vic_timer); |
82 | irq_set_handler_data(parent_irq, (void *)timer_irq); | ||
83 | 83 | ||
84 | set_irq_chip(timer_irq, &s3c_irq_timer); | 84 | irq_set_chip_and_handler(timer_irq, &s3c_irq_timer, handle_level_irq); |
85 | set_irq_chip_data(timer_irq, (void *)(1 << (timer_irq - IRQ_TIMER0))); | 85 | irq_set_chip_data(timer_irq, (void *)(1 << (timer_irq - IRQ_TIMER0))); |
86 | set_irq_handler(timer_irq, handle_level_irq); | ||
87 | set_irq_flags(timer_irq, IRQF_VALID); | 86 | set_irq_flags(timer_irq, IRQF_VALID); |
88 | |||
89 | desc->irq_data.handler_data = (void *)timer_irq; | ||
90 | } | 87 | } |
diff --git a/arch/arm/plat-samsung/wakeup-mask.c b/arch/arm/plat-samsung/wakeup-mask.c index 2e09b6ad84ca..dc814037297b 100644 --- a/arch/arm/plat-samsung/wakeup-mask.c +++ b/arch/arm/plat-samsung/wakeup-mask.c | |||
@@ -22,7 +22,7 @@ | |||
22 | void samsung_sync_wakemask(void __iomem *reg, | 22 | void samsung_sync_wakemask(void __iomem *reg, |
23 | struct samsung_wakeup_mask *mask, int nr_mask) | 23 | struct samsung_wakeup_mask *mask, int nr_mask) |
24 | { | 24 | { |
25 | struct irq_desc *desc; | 25 | struct irq_data *data; |
26 | u32 val; | 26 | u32 val; |
27 | 27 | ||
28 | val = __raw_readl(reg); | 28 | val = __raw_readl(reg); |
@@ -33,10 +33,10 @@ void samsung_sync_wakemask(void __iomem *reg, | |||
33 | continue; | 33 | continue; |
34 | } | 34 | } |
35 | 35 | ||
36 | desc = irq_to_desc(mask->irq); | 36 | data = irq_get_irq_data(mask->irq); |
37 | 37 | ||
38 | /* bit of a liberty to read this directly from irq_desc. */ | 38 | /* bit of a liberty to read this directly from irq_data. */ |
39 | if (desc->wake_depth > 0) | 39 | if (irqd_is_wakeup_set(data)) |
40 | val &= ~mask->bit; | 40 | val &= ~mask->bit; |
41 | else | 41 | else |
42 | val |= mask->bit; | 42 | val |= mask->bit; |
diff --git a/arch/arm/plat-spear/shirq.c b/arch/arm/plat-spear/shirq.c index 78189035e7f1..961fb7261243 100644 --- a/arch/arm/plat-spear/shirq.c +++ b/arch/arm/plat-spear/shirq.c | |||
@@ -68,7 +68,7 @@ static struct irq_chip shirq_chip = { | |||
68 | static void shirq_handler(unsigned irq, struct irq_desc *desc) | 68 | static void shirq_handler(unsigned irq, struct irq_desc *desc) |
69 | { | 69 | { |
70 | u32 i, val, mask; | 70 | u32 i, val, mask; |
71 | struct spear_shirq *shirq = get_irq_data(irq); | 71 | struct spear_shirq *shirq = irq_get_handler_data(irq); |
72 | 72 | ||
73 | desc->irq_data.chip->irq_ack(&desc->irq_data); | 73 | desc->irq_data.chip->irq_ack(&desc->irq_data); |
74 | while ((val = readl(shirq->regs.base + shirq->regs.status_reg) & | 74 | while ((val = readl(shirq->regs.base + shirq->regs.status_reg) & |
@@ -105,14 +105,14 @@ int spear_shirq_register(struct spear_shirq *shirq) | |||
105 | if (!shirq->dev_count) | 105 | if (!shirq->dev_count) |
106 | return -EINVAL; | 106 | return -EINVAL; |
107 | 107 | ||
108 | set_irq_chained_handler(shirq->irq, shirq_handler); | 108 | irq_set_chained_handler(shirq->irq, shirq_handler); |
109 | for (i = 0; i < shirq->dev_count; i++) { | 109 | for (i = 0; i < shirq->dev_count; i++) { |
110 | set_irq_chip(shirq->dev_config[i].virq, &shirq_chip); | 110 | irq_set_chip_and_handler(shirq->dev_config[i].virq, |
111 | set_irq_handler(shirq->dev_config[i].virq, handle_simple_irq); | 111 | &shirq_chip, handle_simple_irq); |
112 | set_irq_flags(shirq->dev_config[i].virq, IRQF_VALID); | 112 | set_irq_flags(shirq->dev_config[i].virq, IRQF_VALID); |
113 | set_irq_chip_data(shirq->dev_config[i].virq, shirq); | 113 | irq_set_chip_data(shirq->dev_config[i].virq, shirq); |
114 | } | 114 | } |
115 | 115 | ||
116 | set_irq_data(shirq->irq, shirq); | 116 | irq_set_handler_data(shirq->irq, shirq); |
117 | return 0; | 117 | return 0; |
118 | } | 118 | } |
diff --git a/arch/arm/plat-stmp3xxx/irq.c b/arch/arm/plat-stmp3xxx/irq.c index aaa168683d4e..6fdf9acf82ed 100644 --- a/arch/arm/plat-stmp3xxx/irq.c +++ b/arch/arm/plat-stmp3xxx/irq.c | |||
@@ -35,8 +35,7 @@ void __init stmp3xxx_init_irq(struct irq_chip *chip) | |||
35 | /* Disable all interrupts initially */ | 35 | /* Disable all interrupts initially */ |
36 | for (i = 0; i < NR_REAL_IRQS; i++) { | 36 | for (i = 0; i < NR_REAL_IRQS; i++) { |
37 | chip->irq_mask(irq_get_irq_data(i)); | 37 | chip->irq_mask(irq_get_irq_data(i)); |
38 | set_irq_chip(i, chip); | 38 | irq_set_chip_and_handler(i, chip, handle_level_irq); |
39 | set_irq_handler(i, handle_level_irq); | ||
40 | set_irq_flags(i, IRQF_VALID | IRQF_PROBE); | 39 | set_irq_flags(i, IRQF_VALID | IRQF_PROBE); |
41 | } | 40 | } |
42 | 41 | ||
diff --git a/arch/arm/plat-stmp3xxx/pinmux.c b/arch/arm/plat-stmp3xxx/pinmux.c index 66d5bac3ace2..3def03b3217d 100644 --- a/arch/arm/plat-stmp3xxx/pinmux.c +++ b/arch/arm/plat-stmp3xxx/pinmux.c | |||
@@ -489,14 +489,13 @@ static void stmp3xxx_gpio_free(struct gpio_chip *chip, unsigned offset) | |||
489 | 489 | ||
490 | static void stmp3xxx_gpio_irq(u32 irq, struct irq_desc *desc) | 490 | static void stmp3xxx_gpio_irq(u32 irq, struct irq_desc *desc) |
491 | { | 491 | { |
492 | struct stmp3xxx_pinmux_bank *pm = get_irq_data(irq); | 492 | struct stmp3xxx_pinmux_bank *pm = irq_get_handler_data(irq); |
493 | int gpio_irq = pm->virq; | 493 | int gpio_irq = pm->virq; |
494 | u32 stat = __raw_readl(pm->irqstat); | 494 | u32 stat = __raw_readl(pm->irqstat); |
495 | 495 | ||
496 | while (stat) { | 496 | while (stat) { |
497 | if (stat & 1) | 497 | if (stat & 1) |
498 | irq_desc[gpio_irq].handle_irq(gpio_irq, | 498 | generic_handle_irq(gpio_irq); |
499 | &irq_desc[gpio_irq]); | ||
500 | gpio_irq++; | 499 | gpio_irq++; |
501 | stat >>= 1; | 500 | stat >>= 1; |
502 | } | 501 | } |
@@ -534,15 +533,15 @@ int __init stmp3xxx_pinmux_init(int virtual_irq_start) | |||
534 | 533 | ||
535 | for (virq = pm->virq; virq < pm->virq; virq++) { | 534 | for (virq = pm->virq; virq < pm->virq; virq++) { |
536 | gpio_irq_chip.irq_mask(irq_get_irq_data(virq)); | 535 | gpio_irq_chip.irq_mask(irq_get_irq_data(virq)); |
537 | set_irq_chip(virq, &gpio_irq_chip); | 536 | irq_set_chip_and_handler(virq, &gpio_irq_chip, |
538 | set_irq_handler(virq, handle_level_irq); | 537 | handle_level_irq); |
539 | set_irq_flags(virq, IRQF_VALID); | 538 | set_irq_flags(virq, IRQF_VALID); |
540 | } | 539 | } |
541 | r = gpiochip_add(&pm->chip); | 540 | r = gpiochip_add(&pm->chip); |
542 | if (r < 0) | 541 | if (r < 0) |
543 | break; | 542 | break; |
544 | set_irq_chained_handler(pm->irq, stmp3xxx_gpio_irq); | 543 | irq_set_chained_handler(pm->irq, stmp3xxx_gpio_irq); |
545 | set_irq_data(pm->irq, pm); | 544 | irq_set_handler_data(pm->irq, pm); |
546 | } | 545 | } |
547 | return r; | 546 | return r; |
548 | } | 547 | } |
diff --git a/arch/arm/plat-versatile/fpga-irq.c b/arch/arm/plat-versatile/fpga-irq.c index 31d945d37e4f..f0cc8e19b094 100644 --- a/arch/arm/plat-versatile/fpga-irq.c +++ b/arch/arm/plat-versatile/fpga-irq.c | |||
@@ -30,7 +30,7 @@ static void fpga_irq_unmask(struct irq_data *d) | |||
30 | 30 | ||
31 | static void fpga_irq_handle(unsigned int irq, struct irq_desc *desc) | 31 | static void fpga_irq_handle(unsigned int irq, struct irq_desc *desc) |
32 | { | 32 | { |
33 | struct fpga_irq_data *f = get_irq_desc_data(desc); | 33 | struct fpga_irq_data *f = irq_desc_get_handler_data(desc); |
34 | u32 status = readl(f->base + IRQ_STATUS); | 34 | u32 status = readl(f->base + IRQ_STATUS); |
35 | 35 | ||
36 | if (status == 0) { | 36 | if (status == 0) { |
@@ -55,17 +55,17 @@ void __init fpga_irq_init(int parent_irq, u32 valid, struct fpga_irq_data *f) | |||
55 | f->chip.irq_unmask = fpga_irq_unmask; | 55 | f->chip.irq_unmask = fpga_irq_unmask; |
56 | 56 | ||
57 | if (parent_irq != -1) { | 57 | if (parent_irq != -1) { |
58 | set_irq_data(parent_irq, f); | 58 | irq_set_handler_data(parent_irq, f); |
59 | set_irq_chained_handler(parent_irq, fpga_irq_handle); | 59 | irq_set_chained_handler(parent_irq, fpga_irq_handle); |
60 | } | 60 | } |
61 | 61 | ||
62 | for (i = 0; i < 32; i++) { | 62 | for (i = 0; i < 32; i++) { |
63 | if (valid & (1 << i)) { | 63 | if (valid & (1 << i)) { |
64 | unsigned int irq = f->irq_start + i; | 64 | unsigned int irq = f->irq_start + i; |
65 | 65 | ||
66 | set_irq_chip_data(irq, f); | 66 | irq_set_chip_data(irq, f); |
67 | set_irq_chip(irq, &f->chip); | 67 | irq_set_chip_and_handler(irq, &f->chip, |
68 | set_irq_handler(irq, handle_level_irq); | 68 | handle_level_irq); |
69 | set_irq_flags(irq, IRQF_VALID | IRQF_PROBE); | 69 | set_irq_flags(irq, IRQF_VALID | IRQF_PROBE); |
70 | } | 70 | } |
71 | } | 71 | } |
diff --git a/arch/blackfin/kernel/irqchip.c b/arch/blackfin/kernel/irqchip.c index 8f079392aff0..1696d34f51c2 100644 --- a/arch/blackfin/kernel/irqchip.c +++ b/arch/blackfin/kernel/irqchip.c | |||
@@ -48,7 +48,7 @@ int show_interrupts(struct seq_file *p, void *v) | |||
48 | seq_printf(p, "%3d: ", i); | 48 | seq_printf(p, "%3d: ", i); |
49 | for_each_online_cpu(j) | 49 | for_each_online_cpu(j) |
50 | seq_printf(p, "%10u ", kstat_irqs_cpu(i, j)); | 50 | seq_printf(p, "%10u ", kstat_irqs_cpu(i, j)); |
51 | seq_printf(p, " %8s", get_irq_desc_chip(desc)->name); | 51 | seq_printf(p, " %8s", irq_desc_get_chip(desc)->name); |
52 | seq_printf(p, " %s", action->name); | 52 | seq_printf(p, " %s", action->name); |
53 | for (action = action->next; action; action = action->next) | 53 | for (action = action->next; action; action = action->next) |
54 | seq_printf(p, " %s", action->name); | 54 | seq_printf(p, " %s", action->name); |
diff --git a/arch/blackfin/kernel/trace.c b/arch/blackfin/kernel/trace.c index 05b550891ce5..050db44fe919 100644 --- a/arch/blackfin/kernel/trace.c +++ b/arch/blackfin/kernel/trace.c | |||
@@ -912,10 +912,11 @@ void show_regs(struct pt_regs *fp) | |||
912 | /* if no interrupts are going off, don't print this out */ | 912 | /* if no interrupts are going off, don't print this out */ |
913 | if (fp->ipend & ~0x3F) { | 913 | if (fp->ipend & ~0x3F) { |
914 | for (i = 0; i < (NR_IRQS - 1); i++) { | 914 | for (i = 0; i < (NR_IRQS - 1); i++) { |
915 | struct irq_desc *desc = irq_to_desc(i); | ||
915 | if (!in_atomic) | 916 | if (!in_atomic) |
916 | raw_spin_lock_irqsave(&irq_desc[i].lock, flags); | 917 | raw_spin_lock_irqsave(&desc->lock, flags); |
917 | 918 | ||
918 | action = irq_desc[i].action; | 919 | action = desc->action; |
919 | if (!action) | 920 | if (!action) |
920 | goto unlock; | 921 | goto unlock; |
921 | 922 | ||
@@ -928,7 +929,7 @@ void show_regs(struct pt_regs *fp) | |||
928 | pr_cont("\n"); | 929 | pr_cont("\n"); |
929 | unlock: | 930 | unlock: |
930 | if (!in_atomic) | 931 | if (!in_atomic) |
931 | raw_spin_unlock_irqrestore(&irq_desc[i].lock, flags); | 932 | raw_spin_unlock_irqrestore(&desc->lock, flags); |
932 | } | 933 | } |
933 | } | 934 | } |
934 | 935 | ||
diff --git a/arch/blackfin/mach-bf561/smp.c b/arch/blackfin/mach-bf561/smp.c index 5d68bf613b0b..7b07740cf68c 100644 --- a/arch/blackfin/mach-bf561/smp.c +++ b/arch/blackfin/mach-bf561/smp.c | |||
@@ -154,13 +154,13 @@ void platform_clear_ipi(unsigned int cpu, int irq) | |||
154 | void __cpuinit bfin_local_timer_setup(void) | 154 | void __cpuinit bfin_local_timer_setup(void) |
155 | { | 155 | { |
156 | #if defined(CONFIG_TICKSOURCE_CORETMR) | 156 | #if defined(CONFIG_TICKSOURCE_CORETMR) |
157 | struct irq_chip *chip = get_irq_chip(IRQ_CORETMR); | 157 | struct irq_data *data = irq_get_irq_data(IRQ_CORETMR); |
158 | struct irq_desc *desc = irq_to_desc(IRQ_CORETMR); | 158 | struct irq_chip *chip = irq_data_get_irq_chip(data); |
159 | 159 | ||
160 | bfin_coretmr_init(); | 160 | bfin_coretmr_init(); |
161 | bfin_coretmr_clockevent_init(); | 161 | bfin_coretmr_clockevent_init(); |
162 | 162 | ||
163 | chip->irq_unmask(&desc->irq_data); | 163 | chip->irq_unmask(data); |
164 | #else | 164 | #else |
165 | /* Power down the core timer, just to play safe. */ | 165 | /* Power down the core timer, just to play safe. */ |
166 | bfin_write_TCNTL(0); | 166 | bfin_write_TCNTL(0); |
diff --git a/arch/blackfin/mach-common/ints-priority.c b/arch/blackfin/mach-common/ints-priority.c index 6cd52395a999..43d9fb195c1e 100644 --- a/arch/blackfin/mach-common/ints-priority.c +++ b/arch/blackfin/mach-common/ints-priority.c | |||
@@ -559,7 +559,7 @@ static inline void bfin_set_irq_handler(unsigned irq, irq_flow_handler_t handle) | |||
559 | #ifdef CONFIG_IPIPE | 559 | #ifdef CONFIG_IPIPE |
560 | handle = handle_level_irq; | 560 | handle = handle_level_irq; |
561 | #endif | 561 | #endif |
562 | __set_irq_handler_unlocked(irq, handle); | 562 | __irq_set_handler_locked(irq, handle); |
563 | } | 563 | } |
564 | 564 | ||
565 | static DECLARE_BITMAP(gpio_enabled, MAX_BLACKFIN_GPIOS); | 565 | static DECLARE_BITMAP(gpio_enabled, MAX_BLACKFIN_GPIOS); |
@@ -578,10 +578,9 @@ static void bfin_gpio_ack_irq(struct irq_data *d) | |||
578 | static void bfin_gpio_mask_ack_irq(struct irq_data *d) | 578 | static void bfin_gpio_mask_ack_irq(struct irq_data *d) |
579 | { | 579 | { |
580 | unsigned int irq = d->irq; | 580 | unsigned int irq = d->irq; |
581 | struct irq_desc *desc = irq_to_desc(irq); | ||
582 | u32 gpionr = irq_to_gpio(irq); | 581 | u32 gpionr = irq_to_gpio(irq); |
583 | 582 | ||
584 | if (desc->handle_irq == handle_edge_irq) | 583 | if (!irqd_is_level_type(d)) |
585 | set_gpio_data(gpionr, 0); | 584 | set_gpio_data(gpionr, 0); |
586 | 585 | ||
587 | set_gpio_maska(gpionr, 0); | 586 | set_gpio_maska(gpionr, 0); |
@@ -837,12 +836,11 @@ void init_pint_lut(void) | |||
837 | 836 | ||
838 | static void bfin_gpio_ack_irq(struct irq_data *d) | 837 | static void bfin_gpio_ack_irq(struct irq_data *d) |
839 | { | 838 | { |
840 | struct irq_desc *desc = irq_to_desc(d->irq); | ||
841 | u32 pint_val = irq2pint_lut[d->irq - SYS_IRQS]; | 839 | u32 pint_val = irq2pint_lut[d->irq - SYS_IRQS]; |
842 | u32 pintbit = PINT_BIT(pint_val); | 840 | u32 pintbit = PINT_BIT(pint_val); |
843 | u32 bank = PINT_2_BANK(pint_val); | 841 | u32 bank = PINT_2_BANK(pint_val); |
844 | 842 | ||
845 | if ((desc->status & IRQ_TYPE_SENSE_MASK) == IRQ_TYPE_EDGE_BOTH) { | 843 | if (irqd_get_trigger_type(d) == IRQ_TYPE_EDGE_BOTH) { |
846 | if (pint[bank]->invert_set & pintbit) | 844 | if (pint[bank]->invert_set & pintbit) |
847 | pint[bank]->invert_clear = pintbit; | 845 | pint[bank]->invert_clear = pintbit; |
848 | else | 846 | else |
@@ -854,12 +852,11 @@ static void bfin_gpio_ack_irq(struct irq_data *d) | |||
854 | 852 | ||
855 | static void bfin_gpio_mask_ack_irq(struct irq_data *d) | 853 | static void bfin_gpio_mask_ack_irq(struct irq_data *d) |
856 | { | 854 | { |
857 | struct irq_desc *desc = irq_to_desc(d->irq); | ||
858 | u32 pint_val = irq2pint_lut[d->irq - SYS_IRQS]; | 855 | u32 pint_val = irq2pint_lut[d->irq - SYS_IRQS]; |
859 | u32 pintbit = PINT_BIT(pint_val); | 856 | u32 pintbit = PINT_BIT(pint_val); |
860 | u32 bank = PINT_2_BANK(pint_val); | 857 | u32 bank = PINT_2_BANK(pint_val); |
861 | 858 | ||
862 | if ((desc->status & IRQ_TYPE_SENSE_MASK) == IRQ_TYPE_EDGE_BOTH) { | 859 | if (irqd_get_trigger_type(d) == IRQ_TYPE_EDGE_BOTH) { |
863 | if (pint[bank]->invert_set & pintbit) | 860 | if (pint[bank]->invert_set & pintbit) |
864 | pint[bank]->invert_clear = pintbit; | 861 | pint[bank]->invert_clear = pintbit; |
865 | else | 862 | else |
@@ -1166,9 +1163,9 @@ int __init init_arch_irq(void) | |||
1166 | 1163 | ||
1167 | for (irq = 0; irq <= SYS_IRQS; irq++) { | 1164 | for (irq = 0; irq <= SYS_IRQS; irq++) { |
1168 | if (irq <= IRQ_CORETMR) | 1165 | if (irq <= IRQ_CORETMR) |
1169 | set_irq_chip(irq, &bfin_core_irqchip); | 1166 | irq_set_chip(irq, &bfin_core_irqchip); |
1170 | else | 1167 | else |
1171 | set_irq_chip(irq, &bfin_internal_irqchip); | 1168 | irq_set_chip(irq, &bfin_internal_irqchip); |
1172 | 1169 | ||
1173 | switch (irq) { | 1170 | switch (irq) { |
1174 | #if defined(CONFIG_BF53x) | 1171 | #if defined(CONFIG_BF53x) |
@@ -1192,50 +1189,50 @@ int __init init_arch_irq(void) | |||
1192 | #elif defined(CONFIG_BF538) || defined(CONFIG_BF539) | 1189 | #elif defined(CONFIG_BF538) || defined(CONFIG_BF539) |
1193 | case IRQ_PORTF_INTA: | 1190 | case IRQ_PORTF_INTA: |
1194 | #endif | 1191 | #endif |
1195 | set_irq_chained_handler(irq, | 1192 | irq_set_chained_handler(irq, bfin_demux_gpio_irq); |
1196 | bfin_demux_gpio_irq); | ||
1197 | break; | 1193 | break; |
1198 | #ifdef BF537_GENERIC_ERROR_INT_DEMUX | 1194 | #ifdef BF537_GENERIC_ERROR_INT_DEMUX |
1199 | case IRQ_GENERIC_ERROR: | 1195 | case IRQ_GENERIC_ERROR: |
1200 | set_irq_chained_handler(irq, bfin_demux_error_irq); | 1196 | irq_set_chained_handler(irq, bfin_demux_error_irq); |
1201 | break; | 1197 | break; |
1202 | #endif | 1198 | #endif |
1203 | #if defined(CONFIG_BFIN_MAC) || defined(CONFIG_BFIN_MAC_MODULE) | 1199 | #if defined(CONFIG_BFIN_MAC) || defined(CONFIG_BFIN_MAC_MODULE) |
1204 | case IRQ_MAC_ERROR: | 1200 | case IRQ_MAC_ERROR: |
1205 | set_irq_chained_handler(irq, bfin_demux_mac_status_irq); | 1201 | irq_set_chained_handler(irq, |
1202 | bfin_demux_mac_status_irq); | ||
1206 | break; | 1203 | break; |
1207 | #endif | 1204 | #endif |
1208 | #ifdef CONFIG_SMP | 1205 | #ifdef CONFIG_SMP |
1209 | case IRQ_SUPPLE_0: | 1206 | case IRQ_SUPPLE_0: |
1210 | case IRQ_SUPPLE_1: | 1207 | case IRQ_SUPPLE_1: |
1211 | set_irq_handler(irq, handle_percpu_irq); | 1208 | irq_set_handler(irq, handle_percpu_irq); |
1212 | break; | 1209 | break; |
1213 | #endif | 1210 | #endif |
1214 | 1211 | ||
1215 | #ifdef CONFIG_TICKSOURCE_CORETMR | 1212 | #ifdef CONFIG_TICKSOURCE_CORETMR |
1216 | case IRQ_CORETMR: | 1213 | case IRQ_CORETMR: |
1217 | # ifdef CONFIG_SMP | 1214 | # ifdef CONFIG_SMP |
1218 | set_irq_handler(irq, handle_percpu_irq); | 1215 | irq_set_handler(irq, handle_percpu_irq); |
1219 | break; | 1216 | break; |
1220 | # else | 1217 | # else |
1221 | set_irq_handler(irq, handle_simple_irq); | 1218 | irq_set_handler(irq, handle_simple_irq); |
1222 | break; | 1219 | break; |
1223 | # endif | 1220 | # endif |
1224 | #endif | 1221 | #endif |
1225 | 1222 | ||
1226 | #ifdef CONFIG_TICKSOURCE_GPTMR0 | 1223 | #ifdef CONFIG_TICKSOURCE_GPTMR0 |
1227 | case IRQ_TIMER0: | 1224 | case IRQ_TIMER0: |
1228 | set_irq_handler(irq, handle_simple_irq); | 1225 | irq_set_handler(irq, handle_simple_irq); |
1229 | break; | 1226 | break; |
1230 | #endif | 1227 | #endif |
1231 | 1228 | ||
1232 | #ifdef CONFIG_IPIPE | 1229 | #ifdef CONFIG_IPIPE |
1233 | default: | 1230 | default: |
1234 | set_irq_handler(irq, handle_level_irq); | 1231 | irq_set_handler(irq, handle_level_irq); |
1235 | break; | 1232 | break; |
1236 | #else /* !CONFIG_IPIPE */ | 1233 | #else /* !CONFIG_IPIPE */ |
1237 | default: | 1234 | default: |
1238 | set_irq_handler(irq, handle_simple_irq); | 1235 | irq_set_handler(irq, handle_simple_irq); |
1239 | break; | 1236 | break; |
1240 | #endif /* !CONFIG_IPIPE */ | 1237 | #endif /* !CONFIG_IPIPE */ |
1241 | } | 1238 | } |
@@ -1243,22 +1240,22 @@ int __init init_arch_irq(void) | |||
1243 | 1240 | ||
1244 | #ifdef BF537_GENERIC_ERROR_INT_DEMUX | 1241 | #ifdef BF537_GENERIC_ERROR_INT_DEMUX |
1245 | for (irq = IRQ_PPI_ERROR; irq <= IRQ_UART1_ERROR; irq++) | 1242 | for (irq = IRQ_PPI_ERROR; irq <= IRQ_UART1_ERROR; irq++) |
1246 | set_irq_chip_and_handler(irq, &bfin_generic_error_irqchip, | 1243 | irq_set_chip_and_handler(irq, &bfin_generic_error_irqchip, |
1247 | handle_level_irq); | 1244 | handle_level_irq); |
1248 | #if defined(CONFIG_BFIN_MAC) || defined(CONFIG_BFIN_MAC_MODULE) | 1245 | #if defined(CONFIG_BFIN_MAC) || defined(CONFIG_BFIN_MAC_MODULE) |
1249 | set_irq_chained_handler(IRQ_MAC_ERROR, bfin_demux_mac_status_irq); | 1246 | irq_set_chained_handler(IRQ_MAC_ERROR, bfin_demux_mac_status_irq); |
1250 | #endif | 1247 | #endif |
1251 | #endif | 1248 | #endif |
1252 | 1249 | ||
1253 | #if defined(CONFIG_BFIN_MAC) || defined(CONFIG_BFIN_MAC_MODULE) | 1250 | #if defined(CONFIG_BFIN_MAC) || defined(CONFIG_BFIN_MAC_MODULE) |
1254 | for (irq = IRQ_MAC_PHYINT; irq <= IRQ_MAC_STMDONE; irq++) | 1251 | for (irq = IRQ_MAC_PHYINT; irq <= IRQ_MAC_STMDONE; irq++) |
1255 | set_irq_chip_and_handler(irq, &bfin_mac_status_irqchip, | 1252 | irq_set_chip_and_handler(irq, &bfin_mac_status_irqchip, |
1256 | handle_level_irq); | 1253 | handle_level_irq); |
1257 | #endif | 1254 | #endif |
1258 | /* if configured as edge, then will be changed to do_edge_IRQ */ | 1255 | /* if configured as edge, then will be changed to do_edge_IRQ */ |
1259 | for (irq = GPIO_IRQ_BASE; | 1256 | for (irq = GPIO_IRQ_BASE; |
1260 | irq < (GPIO_IRQ_BASE + MAX_BLACKFIN_GPIOS); irq++) | 1257 | irq < (GPIO_IRQ_BASE + MAX_BLACKFIN_GPIOS); irq++) |
1261 | set_irq_chip_and_handler(irq, &bfin_gpio_irqchip, | 1258 | irq_set_chip_and_handler(irq, &bfin_gpio_irqchip, |
1262 | handle_level_irq); | 1259 | handle_level_irq); |
1263 | 1260 | ||
1264 | bfin_write_IMASK(0); | 1261 | bfin_write_IMASK(0); |
diff --git a/arch/cris/Kconfig b/arch/cris/Kconfig index 04a7fc5eaf46..617925ddd142 100644 --- a/arch/cris/Kconfig +++ b/arch/cris/Kconfig | |||
@@ -296,8 +296,7 @@ config ETRAX_RTC | |||
296 | choice | 296 | choice |
297 | prompt "RTC chip" | 297 | prompt "RTC chip" |
298 | depends on ETRAX_RTC | 298 | depends on ETRAX_RTC |
299 | default ETRAX_PCF8563 if ETRAX_ARCH_V32 | 299 | default ETRAX_DS1302 |
300 | default ETRAX_DS1302 if ETRAX_ARCH_V10 | ||
301 | 300 | ||
302 | config ETRAX_DS1302 | 301 | config ETRAX_DS1302 |
303 | depends on ETRAX_ARCH_V10 | 302 | depends on ETRAX_ARCH_V10 |
diff --git a/arch/cris/arch-v10/drivers/pcf8563.c b/arch/cris/arch-v10/drivers/pcf8563.c index ea69faba9b62..1391b731ad1c 100644 --- a/arch/cris/arch-v10/drivers/pcf8563.c +++ b/arch/cris/arch-v10/drivers/pcf8563.c | |||
@@ -345,7 +345,7 @@ static long pcf8563_unlocked_ioctl(struct file *filp, unsigned int cmd, unsigned | |||
345 | int ret; | 345 | int ret; |
346 | 346 | ||
347 | mutex_lock(&pcf8563_mutex); | 347 | mutex_lock(&pcf8563_mutex); |
348 | return pcf8563_ioctl(filp, cmd, arg); | 348 | ret = pcf8563_ioctl(filp, cmd, arg); |
349 | mutex_unlock(&pcf8563_mutex); | 349 | mutex_unlock(&pcf8563_mutex); |
350 | 350 | ||
351 | return ret; | 351 | return ret; |
diff --git a/arch/cris/arch-v10/kernel/signal.c b/arch/cris/arch-v10/kernel/signal.c index b6be705c2a3e..e78fe49a9849 100644 --- a/arch/cris/arch-v10/kernel/signal.c +++ b/arch/cris/arch-v10/kernel/signal.c | |||
@@ -537,7 +537,7 @@ void do_signal(int canrestart, struct pt_regs *regs) | |||
537 | RESTART_CRIS_SYS(regs); | 537 | RESTART_CRIS_SYS(regs); |
538 | } | 538 | } |
539 | if (regs->r10 == -ERESTART_RESTARTBLOCK) { | 539 | if (regs->r10 == -ERESTART_RESTARTBLOCK) { |
540 | regs->r10 = __NR_restart_syscall; | 540 | regs->r9 = __NR_restart_syscall; |
541 | regs->irp -= 2; | 541 | regs->irp -= 2; |
542 | } | 542 | } |
543 | } | 543 | } |
diff --git a/arch/cris/arch-v32/drivers/Makefile b/arch/cris/arch-v32/drivers/Makefile index e8c02437edaf..39aa3c117a86 100644 --- a/arch/cris/arch-v32/drivers/Makefile +++ b/arch/cris/arch-v32/drivers/Makefile | |||
@@ -7,7 +7,6 @@ obj-$(CONFIG_ETRAX_AXISFLASHMAP) += axisflashmap.o | |||
7 | obj-$(CONFIG_ETRAXFS) += mach-fs/ | 7 | obj-$(CONFIG_ETRAXFS) += mach-fs/ |
8 | obj-$(CONFIG_CRIS_MACH_ARTPEC3) += mach-a3/ | 8 | obj-$(CONFIG_CRIS_MACH_ARTPEC3) += mach-a3/ |
9 | obj-$(CONFIG_ETRAX_IOP_FW_LOAD) += iop_fw_load.o | 9 | obj-$(CONFIG_ETRAX_IOP_FW_LOAD) += iop_fw_load.o |
10 | obj-$(CONFIG_ETRAX_PCF8563) += pcf8563.o | ||
11 | obj-$(CONFIG_ETRAX_I2C) += i2c.o | 10 | obj-$(CONFIG_ETRAX_I2C) += i2c.o |
12 | obj-$(CONFIG_ETRAX_SYNCHRONOUS_SERIAL) += sync_serial.o | 11 | obj-$(CONFIG_ETRAX_SYNCHRONOUS_SERIAL) += sync_serial.o |
13 | obj-$(CONFIG_PCI) += pci/ | 12 | obj-$(CONFIG_PCI) += pci/ |
diff --git a/arch/cris/arch-v32/drivers/pcf8563.c b/arch/cris/arch-v32/drivers/pcf8563.c deleted file mode 100644 index b6e4fc0aad42..000000000000 --- a/arch/cris/arch-v32/drivers/pcf8563.c +++ /dev/null | |||
@@ -1,377 +0,0 @@ | |||
1 | /* | ||
2 | * PCF8563 RTC | ||
3 | * | ||
4 | * From Phillips' datasheet: | ||
5 | * | ||
6 | * The PCF8563 is a CMOS real-time clock/calendar optimized for low power | ||
7 | * consumption. A programmable clock output, interrupt output and voltage | ||
8 | * low detector are also provided. All address and data are transferred | ||
9 | * serially via two-line bidirectional I2C-bus. Maximum bus speed is | ||
10 | * 400 kbits/s. The built-in word address register is incremented | ||
11 | * automatically after each written or read byte. | ||
12 | * | ||
13 | * Copyright (c) 2002-2007, Axis Communications AB | ||
14 | * All rights reserved. | ||
15 | * | ||
16 | * Author: Tobias Anderberg <tobiasa@axis.com>. | ||
17 | * | ||
18 | */ | ||
19 | |||
20 | #include <linux/module.h> | ||
21 | #include <linux/kernel.h> | ||
22 | #include <linux/types.h> | ||
23 | #include <linux/sched.h> | ||
24 | #include <linux/init.h> | ||
25 | #include <linux/fs.h> | ||
26 | #include <linux/ioctl.h> | ||
27 | #include <linux/delay.h> | ||
28 | #include <linux/bcd.h> | ||
29 | #include <linux/mutex.h> | ||
30 | |||
31 | #include <asm/uaccess.h> | ||
32 | #include <asm/system.h> | ||
33 | #include <asm/io.h> | ||
34 | #include <asm/rtc.h> | ||
35 | |||
36 | #include "i2c.h" | ||
37 | |||
38 | #define PCF8563_MAJOR 121 /* Local major number. */ | ||
39 | #define DEVICE_NAME "rtc" /* Name which is registered in /proc/devices. */ | ||
40 | #define PCF8563_NAME "PCF8563" | ||
41 | #define DRIVER_VERSION "$Revision: 1.17 $" | ||
42 | |||
43 | /* Two simple wrapper macros, saves a few keystrokes. */ | ||
44 | #define rtc_read(x) i2c_readreg(RTC_I2C_READ, x) | ||
45 | #define rtc_write(x,y) i2c_writereg(RTC_I2C_WRITE, x, y) | ||
46 | |||
47 | static DEFINE_MUTEX(pcf8563_mutex); | ||
48 | static DEFINE_MUTEX(rtc_lock); /* Protect state etc */ | ||
49 | |||
50 | static const unsigned char days_in_month[] = | ||
51 | { 0, 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31 }; | ||
52 | |||
53 | static long pcf8563_unlocked_ioctl(struct file *filp, unsigned int cmd, unsigned long arg); | ||
54 | |||
55 | /* Cache VL bit value read at driver init since writing the RTC_SECOND | ||
56 | * register clears the VL status. | ||
57 | */ | ||
58 | static int voltage_low; | ||
59 | |||
60 | static const struct file_operations pcf8563_fops = { | ||
61 | .owner = THIS_MODULE, | ||
62 | .unlocked_ioctl = pcf8563_unlocked_ioctl, | ||
63 | .llseek = noop_llseek, | ||
64 | }; | ||
65 | |||
66 | unsigned char | ||
67 | pcf8563_readreg(int reg) | ||
68 | { | ||
69 | unsigned char res = rtc_read(reg); | ||
70 | |||
71 | /* The PCF8563 does not return 0 for unimplemented bits. */ | ||
72 | switch (reg) { | ||
73 | case RTC_SECONDS: | ||
74 | case RTC_MINUTES: | ||
75 | res &= 0x7F; | ||
76 | break; | ||
77 | case RTC_HOURS: | ||
78 | case RTC_DAY_OF_MONTH: | ||
79 | res &= 0x3F; | ||
80 | break; | ||
81 | case RTC_WEEKDAY: | ||
82 | res &= 0x07; | ||
83 | break; | ||
84 | case RTC_MONTH: | ||
85 | res &= 0x1F; | ||
86 | break; | ||
87 | case RTC_CONTROL1: | ||
88 | res &= 0xA8; | ||
89 | break; | ||
90 | case RTC_CONTROL2: | ||
91 | res &= 0x1F; | ||
92 | break; | ||
93 | case RTC_CLOCKOUT_FREQ: | ||
94 | case RTC_TIMER_CONTROL: | ||
95 | res &= 0x83; | ||
96 | break; | ||
97 | } | ||
98 | return res; | ||
99 | } | ||
100 | |||
101 | void | ||
102 | pcf8563_writereg(int reg, unsigned char val) | ||
103 | { | ||
104 | rtc_write(reg, val); | ||
105 | } | ||
106 | |||
107 | void | ||
108 | get_rtc_time(struct rtc_time *tm) | ||
109 | { | ||
110 | tm->tm_sec = rtc_read(RTC_SECONDS); | ||
111 | tm->tm_min = rtc_read(RTC_MINUTES); | ||
112 | tm->tm_hour = rtc_read(RTC_HOURS); | ||
113 | tm->tm_mday = rtc_read(RTC_DAY_OF_MONTH); | ||
114 | tm->tm_wday = rtc_read(RTC_WEEKDAY); | ||
115 | tm->tm_mon = rtc_read(RTC_MONTH); | ||
116 | tm->tm_year = rtc_read(RTC_YEAR); | ||
117 | |||
118 | if (tm->tm_sec & 0x80) { | ||
119 | printk(KERN_ERR "%s: RTC Voltage Low - reliable date/time " | ||
120 | "information is no longer guaranteed!\n", PCF8563_NAME); | ||
121 | } | ||
122 | |||
123 | tm->tm_year = bcd2bin(tm->tm_year) + | ||
124 | ((tm->tm_mon & 0x80) ? 100 : 0); | ||
125 | tm->tm_sec &= 0x7F; | ||
126 | tm->tm_min &= 0x7F; | ||
127 | tm->tm_hour &= 0x3F; | ||
128 | tm->tm_mday &= 0x3F; | ||
129 | tm->tm_wday &= 0x07; /* Not coded in BCD. */ | ||
130 | tm->tm_mon &= 0x1F; | ||
131 | |||
132 | tm->tm_sec = bcd2bin(tm->tm_sec); | ||
133 | tm->tm_min = bcd2bin(tm->tm_min); | ||
134 | tm->tm_hour = bcd2bin(tm->tm_hour); | ||
135 | tm->tm_mday = bcd2bin(tm->tm_mday); | ||
136 | tm->tm_mon = bcd2bin(tm->tm_mon); | ||
137 | tm->tm_mon--; /* Month is 1..12 in RTC but 0..11 in linux */ | ||
138 | } | ||
139 | |||
140 | int __init | ||
141 | pcf8563_init(void) | ||
142 | { | ||
143 | static int res; | ||
144 | static int first = 1; | ||
145 | |||
146 | if (!first) | ||
147 | return res; | ||
148 | first = 0; | ||
149 | |||
150 | /* Initiate the i2c protocol. */ | ||
151 | res = i2c_init(); | ||
152 | if (res < 0) { | ||
153 | printk(KERN_CRIT "pcf8563_init: Failed to init i2c.\n"); | ||
154 | return res; | ||
155 | } | ||
156 | |||
157 | /* | ||
158 | * First of all we need to reset the chip. This is done by | ||
159 | * clearing control1, control2 and clk freq and resetting | ||
160 | * all alarms. | ||
161 | */ | ||
162 | if (rtc_write(RTC_CONTROL1, 0x00) < 0) | ||
163 | goto err; | ||
164 | |||
165 | if (rtc_write(RTC_CONTROL2, 0x00) < 0) | ||
166 | goto err; | ||
167 | |||
168 | if (rtc_write(RTC_CLOCKOUT_FREQ, 0x00) < 0) | ||
169 | goto err; | ||
170 | |||
171 | if (rtc_write(RTC_TIMER_CONTROL, 0x03) < 0) | ||
172 | goto err; | ||
173 | |||
174 | /* Reset the alarms. */ | ||
175 | if (rtc_write(RTC_MINUTE_ALARM, 0x80) < 0) | ||
176 | goto err; | ||
177 | |||
178 | if (rtc_write(RTC_HOUR_ALARM, 0x80) < 0) | ||
179 | goto err; | ||
180 | |||
181 | if (rtc_write(RTC_DAY_ALARM, 0x80) < 0) | ||
182 | goto err; | ||
183 | |||
184 | if (rtc_write(RTC_WEEKDAY_ALARM, 0x80) < 0) | ||
185 | goto err; | ||
186 | |||
187 | /* Check for low voltage, and warn about it. */ | ||
188 | if (rtc_read(RTC_SECONDS) & 0x80) { | ||
189 | voltage_low = 1; | ||
190 | printk(KERN_WARNING "%s: RTC Voltage Low - reliable " | ||
191 | "date/time information is no longer guaranteed!\n", | ||
192 | PCF8563_NAME); | ||
193 | } | ||
194 | |||
195 | return res; | ||
196 | |||
197 | err: | ||
198 | printk(KERN_INFO "%s: Error initializing chip.\n", PCF8563_NAME); | ||
199 | res = -1; | ||
200 | return res; | ||
201 | } | ||
202 | |||
203 | void __exit | ||
204 | pcf8563_exit(void) | ||
205 | { | ||
206 | unregister_chrdev(PCF8563_MAJOR, DEVICE_NAME); | ||
207 | } | ||
208 | |||
209 | /* | ||
210 | * ioctl calls for this driver. Why return -ENOTTY upon error? Because | ||
211 | * POSIX says so! | ||
212 | */ | ||
213 | static int pcf8563_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) | ||
214 | { | ||
215 | /* Some sanity checks. */ | ||
216 | if (_IOC_TYPE(cmd) != RTC_MAGIC) | ||
217 | return -ENOTTY; | ||
218 | |||
219 | if (_IOC_NR(cmd) > RTC_MAX_IOCTL) | ||
220 | return -ENOTTY; | ||
221 | |||
222 | switch (cmd) { | ||
223 | case RTC_RD_TIME: | ||
224 | { | ||
225 | struct rtc_time tm; | ||
226 | |||
227 | mutex_lock(&rtc_lock); | ||
228 | memset(&tm, 0, sizeof tm); | ||
229 | get_rtc_time(&tm); | ||
230 | |||
231 | if (copy_to_user((struct rtc_time *) arg, &tm, | ||
232 | sizeof tm)) { | ||
233 | mutex_unlock(&rtc_lock); | ||
234 | return -EFAULT; | ||
235 | } | ||
236 | |||
237 | mutex_unlock(&rtc_lock); | ||
238 | |||
239 | return 0; | ||
240 | } | ||
241 | case RTC_SET_TIME: | ||
242 | { | ||
243 | int leap; | ||
244 | int year; | ||
245 | int century; | ||
246 | struct rtc_time tm; | ||
247 | |||
248 | memset(&tm, 0, sizeof tm); | ||
249 | if (!capable(CAP_SYS_TIME)) | ||
250 | return -EPERM; | ||
251 | |||
252 | if (copy_from_user(&tm, (struct rtc_time *) arg, | ||
253 | sizeof tm)) | ||
254 | return -EFAULT; | ||
255 | |||
256 | /* Convert from struct tm to struct rtc_time. */ | ||
257 | tm.tm_year += 1900; | ||
258 | tm.tm_mon += 1; | ||
259 | |||
260 | /* | ||
261 | * Check if tm.tm_year is a leap year. A year is a leap | ||
262 | * year if it is divisible by 4 but not 100, except | ||
263 | * that years divisible by 400 _are_ leap years. | ||
264 | */ | ||
265 | year = tm.tm_year; | ||
266 | leap = (tm.tm_mon == 2) && | ||
267 | ((year % 4 == 0 && year % 100 != 0) || year % 400 == 0); | ||
268 | |||
269 | /* Perform some sanity checks. */ | ||
270 | if ((tm.tm_year < 1970) || | ||
271 | (tm.tm_mon > 12) || | ||
272 | (tm.tm_mday == 0) || | ||
273 | (tm.tm_mday > days_in_month[tm.tm_mon] + leap) || | ||
274 | (tm.tm_wday >= 7) || | ||
275 | (tm.tm_hour >= 24) || | ||
276 | (tm.tm_min >= 60) || | ||
277 | (tm.tm_sec >= 60)) | ||
278 | return -EINVAL; | ||
279 | |||
280 | century = (tm.tm_year >= 2000) ? 0x80 : 0; | ||
281 | tm.tm_year = tm.tm_year % 100; | ||
282 | |||
283 | tm.tm_year = bin2bcd(tm.tm_year); | ||
284 | tm.tm_mon = bin2bcd(tm.tm_mon); | ||
285 | tm.tm_mday = bin2bcd(tm.tm_mday); | ||
286 | tm.tm_hour = bin2bcd(tm.tm_hour); | ||
287 | tm.tm_min = bin2bcd(tm.tm_min); | ||
288 | tm.tm_sec = bin2bcd(tm.tm_sec); | ||
289 | tm.tm_mon |= century; | ||
290 | |||
291 | mutex_lock(&rtc_lock); | ||
292 | |||
293 | rtc_write(RTC_YEAR, tm.tm_year); | ||
294 | rtc_write(RTC_MONTH, tm.tm_mon); | ||
295 | rtc_write(RTC_WEEKDAY, tm.tm_wday); /* Not coded in BCD. */ | ||
296 | rtc_write(RTC_DAY_OF_MONTH, tm.tm_mday); | ||
297 | rtc_write(RTC_HOURS, tm.tm_hour); | ||
298 | rtc_write(RTC_MINUTES, tm.tm_min); | ||
299 | rtc_write(RTC_SECONDS, tm.tm_sec); | ||
300 | |||
301 | mutex_unlock(&rtc_lock); | ||
302 | |||
303 | return 0; | ||
304 | } | ||
305 | case RTC_VL_READ: | ||
306 | if (voltage_low) | ||
307 | printk(KERN_ERR "%s: RTC Voltage Low - " | ||
308 | "reliable date/time information is no " | ||
309 | "longer guaranteed!\n", PCF8563_NAME); | ||
310 | |||
311 | if (copy_to_user((int *) arg, &voltage_low, sizeof(int))) | ||
312 | return -EFAULT; | ||
313 | return 0; | ||
314 | |||
315 | case RTC_VL_CLR: | ||
316 | { | ||
317 | /* Clear the VL bit in the seconds register in case | ||
318 | * the time has not been set already (which would | ||
319 | * have cleared it). This does not really matter | ||
320 | * because of the cached voltage_low value but do it | ||
321 | * anyway for consistency. */ | ||
322 | |||
323 | int ret = rtc_read(RTC_SECONDS); | ||
324 | |||
325 | rtc_write(RTC_SECONDS, (ret & 0x7F)); | ||
326 | |||
327 | /* Clear the cached value. */ | ||
328 | voltage_low = 0; | ||
329 | |||
330 | return 0; | ||
331 | } | ||
332 | default: | ||
333 | return -ENOTTY; | ||
334 | } | ||
335 | |||
336 | return 0; | ||
337 | } | ||
338 | |||
339 | static long pcf8563_unlocked_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) | ||
340 | { | ||
341 | int ret; | ||
342 | |||
343 | mutex_lock(&pcf8563_mutex); | ||
344 | return pcf8563_ioctl(filp, cmd, arg); | ||
345 | mutex_unlock(&pcf8563_mutex); | ||
346 | |||
347 | return ret; | ||
348 | } | ||
349 | |||
350 | static int __init pcf8563_register(void) | ||
351 | { | ||
352 | if (pcf8563_init() < 0) { | ||
353 | printk(KERN_INFO "%s: Unable to initialize Real-Time Clock " | ||
354 | "Driver, %s\n", PCF8563_NAME, DRIVER_VERSION); | ||
355 | return -1; | ||
356 | } | ||
357 | |||
358 | if (register_chrdev(PCF8563_MAJOR, DEVICE_NAME, &pcf8563_fops) < 0) { | ||
359 | printk(KERN_INFO "%s: Unable to get major numer %d for RTC " | ||
360 | "device.\n", PCF8563_NAME, PCF8563_MAJOR); | ||
361 | return -1; | ||
362 | } | ||
363 | |||
364 | printk(KERN_INFO "%s Real-Time Clock Driver, %s\n", PCF8563_NAME, | ||
365 | DRIVER_VERSION); | ||
366 | |||
367 | /* Check for low voltage, and warn about it. */ | ||
368 | if (voltage_low) { | ||
369 | printk(KERN_WARNING "%s: RTC Voltage Low - reliable date/time " | ||
370 | "information is no longer guaranteed!\n", PCF8563_NAME); | ||
371 | } | ||
372 | |||
373 | return 0; | ||
374 | } | ||
375 | |||
376 | module_init(pcf8563_register); | ||
377 | module_exit(pcf8563_exit); | ||
diff --git a/arch/frv/Kconfig b/arch/frv/Kconfig index f6037b2da25e..6db8aea5667f 100644 --- a/arch/frv/Kconfig +++ b/arch/frv/Kconfig | |||
@@ -6,6 +6,8 @@ config FRV | |||
6 | select HAVE_IRQ_WORK | 6 | select HAVE_IRQ_WORK |
7 | select HAVE_PERF_EVENTS | 7 | select HAVE_PERF_EVENTS |
8 | select HAVE_GENERIC_HARDIRQS | 8 | select HAVE_GENERIC_HARDIRQS |
9 | select GENERIC_IRQ_SHOW | ||
10 | select GENERIC_HARDIRQS_NO_DEPRECATED | ||
9 | 11 | ||
10 | config ZONE_DMA | 12 | config ZONE_DMA |
11 | bool | 13 | bool |
@@ -361,7 +363,6 @@ menu "Power management options" | |||
361 | 363 | ||
362 | config ARCH_SUSPEND_POSSIBLE | 364 | config ARCH_SUSPEND_POSSIBLE |
363 | def_bool y | 365 | def_bool y |
364 | depends on !SMP | ||
365 | 366 | ||
366 | source kernel/power/Kconfig | 367 | source kernel/power/Kconfig |
367 | endmenu | 368 | endmenu |
diff --git a/arch/frv/include/asm/system.h b/arch/frv/include/asm/system.h index 0a6d8d9ca45b..6c10fd2c626d 100644 --- a/arch/frv/include/asm/system.h +++ b/arch/frv/include/asm/system.h | |||
@@ -45,21 +45,12 @@ do { \ | |||
45 | #define wmb() asm volatile ("membar" : : :"memory") | 45 | #define wmb() asm volatile ("membar" : : :"memory") |
46 | #define read_barrier_depends() do { } while (0) | 46 | #define read_barrier_depends() do { } while (0) |
47 | 47 | ||
48 | #ifdef CONFIG_SMP | ||
49 | #define smp_mb() mb() | ||
50 | #define smp_rmb() rmb() | ||
51 | #define smp_wmb() wmb() | ||
52 | #define smp_read_barrier_depends() read_barrier_depends() | ||
53 | #define set_mb(var, value) \ | ||
54 | do { xchg(&var, (value)); } while (0) | ||
55 | #else | ||
56 | #define smp_mb() barrier() | 48 | #define smp_mb() barrier() |
57 | #define smp_rmb() barrier() | 49 | #define smp_rmb() barrier() |
58 | #define smp_wmb() barrier() | 50 | #define smp_wmb() barrier() |
59 | #define smp_read_barrier_depends() do {} while(0) | 51 | #define smp_read_barrier_depends() do {} while(0) |
60 | #define set_mb(var, value) \ | 52 | #define set_mb(var, value) \ |
61 | do { var = (value); barrier(); } while (0) | 53 | do { var = (value); barrier(); } while (0) |
62 | #endif | ||
63 | 54 | ||
64 | extern void die_if_kernel(const char *, ...) __attribute__((format(printf, 1, 2))); | 55 | extern void die_if_kernel(const char *, ...) __attribute__((format(printf, 1, 2))); |
65 | extern void free_initmem(void); | 56 | extern void free_initmem(void); |
diff --git a/arch/frv/include/asm/thread_info.h b/arch/frv/include/asm/thread_info.h index 8582e9c7531c..cefbe73dc119 100644 --- a/arch/frv/include/asm/thread_info.h +++ b/arch/frv/include/asm/thread_info.h | |||
@@ -21,6 +21,8 @@ | |||
21 | 21 | ||
22 | #define THREAD_SIZE 8192 | 22 | #define THREAD_SIZE 8192 |
23 | 23 | ||
24 | #define __HAVE_ARCH_TASK_STRUCT_ALLOCATOR | ||
25 | |||
24 | /* | 26 | /* |
25 | * low level task data that entry.S needs immediate access to | 27 | * low level task data that entry.S needs immediate access to |
26 | * - this struct should fit entirely inside of one cache line | 28 | * - this struct should fit entirely inside of one cache line |
@@ -87,7 +89,7 @@ register struct thread_info *__current_thread_info asm("gr15"); | |||
87 | #define alloc_thread_info_node(tsk, node) \ | 89 | #define alloc_thread_info_node(tsk, node) \ |
88 | kzalloc_node(THREAD_SIZE, GFP_KERNEL, node) | 90 | kzalloc_node(THREAD_SIZE, GFP_KERNEL, node) |
89 | #else | 91 | #else |
90 | #define alloc_thread_info_node(tsk) \ | 92 | #define alloc_thread_info_node(tsk, node) \ |
91 | kmalloc_node(THREAD_SIZE, GFP_KERNEL, node) | 93 | kmalloc_node(THREAD_SIZE, GFP_KERNEL, node) |
92 | #endif | 94 | #endif |
93 | 95 | ||
diff --git a/arch/frv/kernel/irq-mb93091.c b/arch/frv/kernel/irq-mb93091.c index 4dd9adaf115a..9afc2ea400dc 100644 --- a/arch/frv/kernel/irq-mb93091.c +++ b/arch/frv/kernel/irq-mb93091.c | |||
@@ -36,45 +36,45 @@ | |||
36 | /* | 36 | /* |
37 | * on-motherboard FPGA PIC operations | 37 | * on-motherboard FPGA PIC operations |
38 | */ | 38 | */ |
39 | static void frv_fpga_mask(unsigned int irq) | 39 | static void frv_fpga_mask(struct irq_data *d) |
40 | { | 40 | { |
41 | uint16_t imr = __get_IMR(); | 41 | uint16_t imr = __get_IMR(); |
42 | 42 | ||
43 | imr |= 1 << (irq - IRQ_BASE_FPGA); | 43 | imr |= 1 << (d->irq - IRQ_BASE_FPGA); |
44 | 44 | ||
45 | __set_IMR(imr); | 45 | __set_IMR(imr); |
46 | } | 46 | } |
47 | 47 | ||
48 | static void frv_fpga_ack(unsigned int irq) | 48 | static void frv_fpga_ack(struct irq_data *d) |
49 | { | 49 | { |
50 | __clr_IFR(1 << (irq - IRQ_BASE_FPGA)); | 50 | __clr_IFR(1 << (d->irq - IRQ_BASE_FPGA)); |
51 | } | 51 | } |
52 | 52 | ||
53 | static void frv_fpga_mask_ack(unsigned int irq) | 53 | static void frv_fpga_mask_ack(struct irq_data *d) |
54 | { | 54 | { |
55 | uint16_t imr = __get_IMR(); | 55 | uint16_t imr = __get_IMR(); |
56 | 56 | ||
57 | imr |= 1 << (irq - IRQ_BASE_FPGA); | 57 | imr |= 1 << (d->irq - IRQ_BASE_FPGA); |
58 | __set_IMR(imr); | 58 | __set_IMR(imr); |
59 | 59 | ||
60 | __clr_IFR(1 << (irq - IRQ_BASE_FPGA)); | 60 | __clr_IFR(1 << (d->irq - IRQ_BASE_FPGA)); |
61 | } | 61 | } |
62 | 62 | ||
63 | static void frv_fpga_unmask(unsigned int irq) | 63 | static void frv_fpga_unmask(struct irq_data *d) |
64 | { | 64 | { |
65 | uint16_t imr = __get_IMR(); | 65 | uint16_t imr = __get_IMR(); |
66 | 66 | ||
67 | imr &= ~(1 << (irq - IRQ_BASE_FPGA)); | 67 | imr &= ~(1 << (d->irq - IRQ_BASE_FPGA)); |
68 | 68 | ||
69 | __set_IMR(imr); | 69 | __set_IMR(imr); |
70 | } | 70 | } |
71 | 71 | ||
72 | static struct irq_chip frv_fpga_pic = { | 72 | static struct irq_chip frv_fpga_pic = { |
73 | .name = "mb93091", | 73 | .name = "mb93091", |
74 | .ack = frv_fpga_ack, | 74 | .irq_ack = frv_fpga_ack, |
75 | .mask = frv_fpga_mask, | 75 | .irq_mask = frv_fpga_mask, |
76 | .mask_ack = frv_fpga_mask_ack, | 76 | .irq_mask_ack = frv_fpga_mask_ack, |
77 | .unmask = frv_fpga_unmask, | 77 | .irq_unmask = frv_fpga_unmask, |
78 | }; | 78 | }; |
79 | 79 | ||
80 | /* | 80 | /* |
@@ -146,9 +146,9 @@ void __init fpga_init(void) | |||
146 | __clr_IFR(0x0000); | 146 | __clr_IFR(0x0000); |
147 | 147 | ||
148 | for (irq = IRQ_BASE_FPGA + 1; irq <= IRQ_BASE_FPGA + 14; irq++) | 148 | for (irq = IRQ_BASE_FPGA + 1; irq <= IRQ_BASE_FPGA + 14; irq++) |
149 | set_irq_chip_and_handler(irq, &frv_fpga_pic, handle_level_irq); | 149 | irq_set_chip_and_handler(irq, &frv_fpga_pic, handle_level_irq); |
150 | 150 | ||
151 | set_irq_chip_and_handler(IRQ_FPGA_NMI, &frv_fpga_pic, handle_edge_irq); | 151 | irq_set_chip_and_handler(IRQ_FPGA_NMI, &frv_fpga_pic, handle_edge_irq); |
152 | 152 | ||
153 | /* the FPGA drives the first four external IRQ inputs on the CPU PIC */ | 153 | /* the FPGA drives the first four external IRQ inputs on the CPU PIC */ |
154 | setup_irq(IRQ_CPU_EXTERNAL0, &fpga_irq[0]); | 154 | setup_irq(IRQ_CPU_EXTERNAL0, &fpga_irq[0]); |
diff --git a/arch/frv/kernel/irq-mb93093.c b/arch/frv/kernel/irq-mb93093.c index e45209031873..4d4ad09d3c91 100644 --- a/arch/frv/kernel/irq-mb93093.c +++ b/arch/frv/kernel/irq-mb93093.c | |||
@@ -35,45 +35,44 @@ | |||
35 | /* | 35 | /* |
36 | * off-CPU FPGA PIC operations | 36 | * off-CPU FPGA PIC operations |
37 | */ | 37 | */ |
38 | static void frv_fpga_mask(unsigned int irq) | 38 | static void frv_fpga_mask(struct irq_data *d) |
39 | { | 39 | { |
40 | uint16_t imr = __get_IMR(); | 40 | uint16_t imr = __get_IMR(); |
41 | 41 | ||
42 | imr |= 1 << (irq - IRQ_BASE_FPGA); | 42 | imr |= 1 << (d->irq - IRQ_BASE_FPGA); |
43 | __set_IMR(imr); | 43 | __set_IMR(imr); |
44 | } | 44 | } |
45 | 45 | ||
46 | static void frv_fpga_ack(unsigned int irq) | 46 | static void frv_fpga_ack(struct irq_data *d) |
47 | { | 47 | { |
48 | __clr_IFR(1 << (irq - IRQ_BASE_FPGA)); | 48 | __clr_IFR(1 << (d->irq - IRQ_BASE_FPGA)); |
49 | } | 49 | } |
50 | 50 | ||
51 | static void frv_fpga_mask_ack(unsigned int irq) | 51 | static void frv_fpga_mask_ack(struct irq_data *d) |
52 | { | 52 | { |
53 | uint16_t imr = __get_IMR(); | 53 | uint16_t imr = __get_IMR(); |
54 | 54 | ||
55 | imr |= 1 << (irq - IRQ_BASE_FPGA); | 55 | imr |= 1 << (d->irq - IRQ_BASE_FPGA); |
56 | __set_IMR(imr); | 56 | __set_IMR(imr); |
57 | 57 | ||
58 | __clr_IFR(1 << (irq - IRQ_BASE_FPGA)); | 58 | __clr_IFR(1 << (d->irq - IRQ_BASE_FPGA)); |
59 | } | 59 | } |
60 | 60 | ||
61 | static void frv_fpga_unmask(unsigned int irq) | 61 | static void frv_fpga_unmask(struct irq_data *d) |
62 | { | 62 | { |
63 | uint16_t imr = __get_IMR(); | 63 | uint16_t imr = __get_IMR(); |
64 | 64 | ||
65 | imr &= ~(1 << (irq - IRQ_BASE_FPGA)); | 65 | imr &= ~(1 << (d->irq - IRQ_BASE_FPGA)); |
66 | 66 | ||
67 | __set_IMR(imr); | 67 | __set_IMR(imr); |
68 | } | 68 | } |
69 | 69 | ||
70 | static struct irq_chip frv_fpga_pic = { | 70 | static struct irq_chip frv_fpga_pic = { |
71 | .name = "mb93093", | 71 | .name = "mb93093", |
72 | .ack = frv_fpga_ack, | 72 | .irq_ack = frv_fpga_ack, |
73 | .mask = frv_fpga_mask, | 73 | .irq_mask = frv_fpga_mask, |
74 | .mask_ack = frv_fpga_mask_ack, | 74 | .irq_mask_ack = frv_fpga_mask_ack, |
75 | .unmask = frv_fpga_unmask, | 75 | .irq_unmask = frv_fpga_unmask, |
76 | .end = frv_fpga_end, | ||
77 | }; | 76 | }; |
78 | 77 | ||
79 | /* | 78 | /* |
@@ -94,7 +93,7 @@ static irqreturn_t fpga_interrupt(int irq, void *_mask) | |||
94 | irq = 31 - irq; | 93 | irq = 31 - irq; |
95 | mask &= ~(1 << irq); | 94 | mask &= ~(1 << irq); |
96 | 95 | ||
97 | generic_irq_handle(IRQ_BASE_FPGA + irq); | 96 | generic_handle_irq(IRQ_BASE_FPGA + irq); |
98 | } | 97 | } |
99 | 98 | ||
100 | return IRQ_HANDLED; | 99 | return IRQ_HANDLED; |
@@ -125,7 +124,7 @@ void __init fpga_init(void) | |||
125 | __clr_IFR(0x0000); | 124 | __clr_IFR(0x0000); |
126 | 125 | ||
127 | for (irq = IRQ_BASE_FPGA + 8; irq <= IRQ_BASE_FPGA + 10; irq++) | 126 | for (irq = IRQ_BASE_FPGA + 8; irq <= IRQ_BASE_FPGA + 10; irq++) |
128 | set_irq_chip_and_handler(irq, &frv_fpga_pic, handle_edge_irq); | 127 | irq_set_chip_and_handler(irq, &frv_fpga_pic, handle_edge_irq); |
129 | 128 | ||
130 | /* the FPGA drives external IRQ input #2 on the CPU PIC */ | 129 | /* the FPGA drives external IRQ input #2 on the CPU PIC */ |
131 | setup_irq(IRQ_CPU_EXTERNAL2, &fpga_irq[0]); | 130 | setup_irq(IRQ_CPU_EXTERNAL2, &fpga_irq[0]); |
diff --git a/arch/frv/kernel/irq-mb93493.c b/arch/frv/kernel/irq-mb93493.c index ba55ecdfb245..4d034c7840c9 100644 --- a/arch/frv/kernel/irq-mb93493.c +++ b/arch/frv/kernel/irq-mb93493.c | |||
@@ -45,46 +45,46 @@ | |||
45 | * daughter board PIC operations | 45 | * daughter board PIC operations |
46 | * - there is no way to ACK interrupts in the MB93493 chip | 46 | * - there is no way to ACK interrupts in the MB93493 chip |
47 | */ | 47 | */ |
48 | static void frv_mb93493_mask(unsigned int irq) | 48 | static void frv_mb93493_mask(struct irq_data *d) |
49 | { | 49 | { |
50 | uint32_t iqsr; | 50 | uint32_t iqsr; |
51 | volatile void *piqsr; | 51 | volatile void *piqsr; |
52 | 52 | ||
53 | if (IRQ_ROUTING & (1 << (irq - IRQ_BASE_MB93493))) | 53 | if (IRQ_ROUTING & (1 << (d->irq - IRQ_BASE_MB93493))) |
54 | piqsr = __addr_MB93493_IQSR(1); | 54 | piqsr = __addr_MB93493_IQSR(1); |
55 | else | 55 | else |
56 | piqsr = __addr_MB93493_IQSR(0); | 56 | piqsr = __addr_MB93493_IQSR(0); |
57 | 57 | ||
58 | iqsr = readl(piqsr); | 58 | iqsr = readl(piqsr); |
59 | iqsr &= ~(1 << (irq - IRQ_BASE_MB93493 + 16)); | 59 | iqsr &= ~(1 << (d->irq - IRQ_BASE_MB93493 + 16)); |
60 | writel(iqsr, piqsr); | 60 | writel(iqsr, piqsr); |
61 | } | 61 | } |
62 | 62 | ||
63 | static void frv_mb93493_ack(unsigned int irq) | 63 | static void frv_mb93493_ack(struct irq_data *d) |
64 | { | 64 | { |
65 | } | 65 | } |
66 | 66 | ||
67 | static void frv_mb93493_unmask(unsigned int irq) | 67 | static void frv_mb93493_unmask(struct irq_data *d) |
68 | { | 68 | { |
69 | uint32_t iqsr; | 69 | uint32_t iqsr; |
70 | volatile void *piqsr; | 70 | volatile void *piqsr; |
71 | 71 | ||
72 | if (IRQ_ROUTING & (1 << (irq - IRQ_BASE_MB93493))) | 72 | if (IRQ_ROUTING & (1 << (d->irq - IRQ_BASE_MB93493))) |
73 | piqsr = __addr_MB93493_IQSR(1); | 73 | piqsr = __addr_MB93493_IQSR(1); |
74 | else | 74 | else |
75 | piqsr = __addr_MB93493_IQSR(0); | 75 | piqsr = __addr_MB93493_IQSR(0); |
76 | 76 | ||
77 | iqsr = readl(piqsr); | 77 | iqsr = readl(piqsr); |
78 | iqsr |= 1 << (irq - IRQ_BASE_MB93493 + 16); | 78 | iqsr |= 1 << (d->irq - IRQ_BASE_MB93493 + 16); |
79 | writel(iqsr, piqsr); | 79 | writel(iqsr, piqsr); |
80 | } | 80 | } |
81 | 81 | ||
82 | static struct irq_chip frv_mb93493_pic = { | 82 | static struct irq_chip frv_mb93493_pic = { |
83 | .name = "mb93093", | 83 | .name = "mb93093", |
84 | .ack = frv_mb93493_ack, | 84 | .irq_ack = frv_mb93493_ack, |
85 | .mask = frv_mb93493_mask, | 85 | .irq_mask = frv_mb93493_mask, |
86 | .mask_ack = frv_mb93493_mask, | 86 | .irq_mask_ack = frv_mb93493_mask, |
87 | .unmask = frv_mb93493_unmask, | 87 | .irq_unmask = frv_mb93493_unmask, |
88 | }; | 88 | }; |
89 | 89 | ||
90 | /* | 90 | /* |
@@ -139,7 +139,8 @@ void __init mb93493_init(void) | |||
139 | int irq; | 139 | int irq; |
140 | 140 | ||
141 | for (irq = IRQ_BASE_MB93493 + 0; irq <= IRQ_BASE_MB93493 + 10; irq++) | 141 | for (irq = IRQ_BASE_MB93493 + 0; irq <= IRQ_BASE_MB93493 + 10; irq++) |
142 | set_irq_chip_and_handler(irq, &frv_mb93493_pic, handle_edge_irq); | 142 | irq_set_chip_and_handler(irq, &frv_mb93493_pic, |
143 | handle_edge_irq); | ||
143 | 144 | ||
144 | /* the MB93493 drives external IRQ inputs on the CPU PIC */ | 145 | /* the MB93493 drives external IRQ inputs on the CPU PIC */ |
145 | setup_irq(IRQ_CPU_MB93493_0, &mb93493_irq[0]); | 146 | setup_irq(IRQ_CPU_MB93493_0, &mb93493_irq[0]); |
diff --git a/arch/frv/kernel/irq.c b/arch/frv/kernel/irq.c index 625136625a7f..a5f624a9f559 100644 --- a/arch/frv/kernel/irq.c +++ b/arch/frv/kernel/irq.c | |||
@@ -47,89 +47,45 @@ extern void __init mb93493_init(void); | |||
47 | 47 | ||
48 | atomic_t irq_err_count; | 48 | atomic_t irq_err_count; |
49 | 49 | ||
50 | /* | 50 | int arch_show_interrupts(struct seq_file *p, int prec) |
51 | * Generic, controller-independent functions: | ||
52 | */ | ||
53 | int show_interrupts(struct seq_file *p, void *v) | ||
54 | { | 51 | { |
55 | int i = *(loff_t *) v, cpu; | 52 | seq_printf(p, "%*s: ", prec, "ERR"); |
56 | struct irqaction * action; | 53 | seq_printf(p, "%10u\n", atomic_read(&irq_err_count)); |
57 | unsigned long flags; | ||
58 | |||
59 | if (i == 0) { | ||
60 | char cpuname[12]; | ||
61 | |||
62 | seq_printf(p, " "); | ||
63 | for_each_present_cpu(cpu) { | ||
64 | sprintf(cpuname, "CPU%d", cpu); | ||
65 | seq_printf(p, " %10s", cpuname); | ||
66 | } | ||
67 | seq_putc(p, '\n'); | ||
68 | } | ||
69 | |||
70 | if (i < NR_IRQS) { | ||
71 | raw_spin_lock_irqsave(&irq_desc[i].lock, flags); | ||
72 | action = irq_desc[i].action; | ||
73 | if (action) { | ||
74 | seq_printf(p, "%3d: ", i); | ||
75 | for_each_present_cpu(cpu) | ||
76 | seq_printf(p, "%10u ", kstat_irqs_cpu(i, cpu)); | ||
77 | seq_printf(p, " %10s", irq_desc[i].chip->name ? : "-"); | ||
78 | seq_printf(p, " %s", action->name); | ||
79 | for (action = action->next; | ||
80 | action; | ||
81 | action = action->next) | ||
82 | seq_printf(p, ", %s", action->name); | ||
83 | |||
84 | seq_putc(p, '\n'); | ||
85 | } | ||
86 | |||
87 | raw_spin_unlock_irqrestore(&irq_desc[i].lock, flags); | ||
88 | } else if (i == NR_IRQS) { | ||
89 | seq_printf(p, "Err: %10u\n", atomic_read(&irq_err_count)); | ||
90 | } | ||
91 | |||
92 | return 0; | 54 | return 0; |
93 | } | 55 | } |
94 | 56 | ||
95 | /* | 57 | /* |
96 | * on-CPU PIC operations | 58 | * on-CPU PIC operations |
97 | */ | 59 | */ |
98 | static void frv_cpupic_ack(unsigned int irqlevel) | 60 | static void frv_cpupic_ack(struct irq_data *d) |
99 | { | 61 | { |
100 | __clr_RC(irqlevel); | 62 | __clr_RC(d->irq); |
101 | __clr_IRL(); | 63 | __clr_IRL(); |
102 | } | 64 | } |
103 | 65 | ||
104 | static void frv_cpupic_mask(unsigned int irqlevel) | 66 | static void frv_cpupic_mask(struct irq_data *d) |
105 | { | 67 | { |
106 | __set_MASK(irqlevel); | 68 | __set_MASK(d->irq); |
107 | } | 69 | } |
108 | 70 | ||
109 | static void frv_cpupic_mask_ack(unsigned int irqlevel) | 71 | static void frv_cpupic_mask_ack(struct irq_data *d) |
110 | { | 72 | { |
111 | __set_MASK(irqlevel); | 73 | __set_MASK(d->irq); |
112 | __clr_RC(irqlevel); | 74 | __clr_RC(d->irq); |
113 | __clr_IRL(); | 75 | __clr_IRL(); |
114 | } | 76 | } |
115 | 77 | ||
116 | static void frv_cpupic_unmask(unsigned int irqlevel) | 78 | static void frv_cpupic_unmask(struct irq_data *d) |
117 | { | ||
118 | __clr_MASK(irqlevel); | ||
119 | } | ||
120 | |||
121 | static void frv_cpupic_end(unsigned int irqlevel) | ||
122 | { | 79 | { |
123 | __clr_MASK(irqlevel); | 80 | __clr_MASK(d->irq); |
124 | } | 81 | } |
125 | 82 | ||
126 | static struct irq_chip frv_cpu_pic = { | 83 | static struct irq_chip frv_cpu_pic = { |
127 | .name = "cpu", | 84 | .name = "cpu", |
128 | .ack = frv_cpupic_ack, | 85 | .irq_ack = frv_cpupic_ack, |
129 | .mask = frv_cpupic_mask, | 86 | .irq_mask = frv_cpupic_mask, |
130 | .mask_ack = frv_cpupic_mask_ack, | 87 | .irq_mask_ack = frv_cpupic_mask_ack, |
131 | .unmask = frv_cpupic_unmask, | 88 | .irq_unmask = frv_cpupic_unmask, |
132 | .end = frv_cpupic_end, | ||
133 | }; | 89 | }; |
134 | 90 | ||
135 | /* | 91 | /* |
@@ -161,10 +117,10 @@ void __init init_IRQ(void) | |||
161 | int level; | 117 | int level; |
162 | 118 | ||
163 | for (level = 1; level <= 14; level++) | 119 | for (level = 1; level <= 14; level++) |
164 | set_irq_chip_and_handler(level, &frv_cpu_pic, | 120 | irq_set_chip_and_handler(level, &frv_cpu_pic, |
165 | handle_level_irq); | 121 | handle_level_irq); |
166 | 122 | ||
167 | set_irq_handler(IRQ_CPU_TIMER0, handle_edge_irq); | 123 | irq_set_handler(IRQ_CPU_TIMER0, handle_edge_irq); |
168 | 124 | ||
169 | /* set the trigger levels for internal interrupt sources | 125 | /* set the trigger levels for internal interrupt sources |
170 | * - timers all falling-edge | 126 | * - timers all falling-edge |
diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig index fcf3b437a2d9..c4ea0925cdbd 100644 --- a/arch/ia64/Kconfig +++ b/arch/ia64/Kconfig | |||
@@ -26,6 +26,7 @@ config IA64 | |||
26 | select GENERIC_IRQ_PROBE | 26 | select GENERIC_IRQ_PROBE |
27 | select GENERIC_PENDING_IRQ if SMP | 27 | select GENERIC_PENDING_IRQ if SMP |
28 | select IRQ_PER_CPU | 28 | select IRQ_PER_CPU |
29 | select GENERIC_IRQ_SHOW | ||
29 | default y | 30 | default y |
30 | help | 31 | help |
31 | The Itanium Processor Family is Intel's 64-bit successor to | 32 | The Itanium Processor Family is Intel's 64-bit successor to |
diff --git a/arch/ia64/hp/sim/hpsim_irq.c b/arch/ia64/hp/sim/hpsim_irq.c index b272261d77cc..4bd9a63260ee 100644 --- a/arch/ia64/hp/sim/hpsim_irq.c +++ b/arch/ia64/hp/sim/hpsim_irq.c | |||
@@ -11,42 +11,41 @@ | |||
11 | #include <linux/irq.h> | 11 | #include <linux/irq.h> |
12 | 12 | ||
13 | static unsigned int | 13 | static unsigned int |
14 | hpsim_irq_startup (unsigned int irq) | 14 | hpsim_irq_startup(struct irq_data *data) |
15 | { | 15 | { |
16 | return 0; | 16 | return 0; |
17 | } | 17 | } |
18 | 18 | ||
19 | static void | 19 | static void |
20 | hpsim_irq_noop (unsigned int irq) | 20 | hpsim_irq_noop(struct irq_data *data) |
21 | { | 21 | { |
22 | } | 22 | } |
23 | 23 | ||
24 | static int | 24 | static int |
25 | hpsim_set_affinity_noop(unsigned int a, const struct cpumask *b) | 25 | hpsim_set_affinity_noop(struct irq_data *d, const struct cpumask *b, bool f) |
26 | { | 26 | { |
27 | return 0; | 27 | return 0; |
28 | } | 28 | } |
29 | 29 | ||
30 | static struct irq_chip irq_type_hp_sim = { | 30 | static struct irq_chip irq_type_hp_sim = { |
31 | .name = "hpsim", | 31 | .name = "hpsim", |
32 | .startup = hpsim_irq_startup, | 32 | .irq_startup = hpsim_irq_startup, |
33 | .shutdown = hpsim_irq_noop, | 33 | .irq_shutdown = hpsim_irq_noop, |
34 | .enable = hpsim_irq_noop, | 34 | .irq_enable = hpsim_irq_noop, |
35 | .disable = hpsim_irq_noop, | 35 | .irq_disable = hpsim_irq_noop, |
36 | .ack = hpsim_irq_noop, | 36 | .irq_ack = hpsim_irq_noop, |
37 | .end = hpsim_irq_noop, | 37 | .irq_set_affinity = hpsim_set_affinity_noop, |
38 | .set_affinity = hpsim_set_affinity_noop, | ||
39 | }; | 38 | }; |
40 | 39 | ||
41 | void __init | 40 | void __init |
42 | hpsim_irq_init (void) | 41 | hpsim_irq_init (void) |
43 | { | 42 | { |
44 | struct irq_desc *idesc; | ||
45 | int i; | 43 | int i; |
46 | 44 | ||
47 | for (i = 0; i < NR_IRQS; ++i) { | 45 | for_each_active_irq(i) { |
48 | idesc = irq_desc + i; | 46 | struct irq_chip *chip = irq_get_chip(i); |
49 | if (idesc->chip == &no_irq_chip) | 47 | |
50 | idesc->chip = &irq_type_hp_sim; | 48 | if (chip == &no_irq_chip) |
49 | irq_set_chip(i, &irq_type_hp_sim); | ||
51 | } | 50 | } |
52 | } | 51 | } |
diff --git a/arch/ia64/include/asm/hw_irq.h b/arch/ia64/include/asm/hw_irq.h index bf2e37493e04..a681d02cb324 100644 --- a/arch/ia64/include/asm/hw_irq.h +++ b/arch/ia64/include/asm/hw_irq.h | |||
@@ -151,9 +151,6 @@ static inline void ia64_native_resend_irq(unsigned int vector) | |||
151 | /* | 151 | /* |
152 | * Default implementations for the irq-descriptor API: | 152 | * Default implementations for the irq-descriptor API: |
153 | */ | 153 | */ |
154 | |||
155 | extern struct irq_desc irq_desc[NR_IRQS]; | ||
156 | |||
157 | #ifndef CONFIG_IA64_GENERIC | 154 | #ifndef CONFIG_IA64_GENERIC |
158 | static inline ia64_vector __ia64_irq_to_vector(int irq) | 155 | static inline ia64_vector __ia64_irq_to_vector(int irq) |
159 | { | 156 | { |
diff --git a/arch/ia64/kernel/iosapic.c b/arch/ia64/kernel/iosapic.c index 22c38404f539..b0f9afebb146 100644 --- a/arch/ia64/kernel/iosapic.c +++ b/arch/ia64/kernel/iosapic.c | |||
@@ -257,7 +257,7 @@ set_rte (unsigned int gsi, unsigned int irq, unsigned int dest, int mask) | |||
257 | } | 257 | } |
258 | 258 | ||
259 | static void | 259 | static void |
260 | nop (unsigned int irq) | 260 | nop (struct irq_data *data) |
261 | { | 261 | { |
262 | /* do nothing... */ | 262 | /* do nothing... */ |
263 | } | 263 | } |
@@ -287,8 +287,9 @@ kexec_disable_iosapic(void) | |||
287 | #endif | 287 | #endif |
288 | 288 | ||
289 | static void | 289 | static void |
290 | mask_irq (unsigned int irq) | 290 | mask_irq (struct irq_data *data) |
291 | { | 291 | { |
292 | unsigned int irq = data->irq; | ||
292 | u32 low32; | 293 | u32 low32; |
293 | int rte_index; | 294 | int rte_index; |
294 | struct iosapic_rte_info *rte; | 295 | struct iosapic_rte_info *rte; |
@@ -305,8 +306,9 @@ mask_irq (unsigned int irq) | |||
305 | } | 306 | } |
306 | 307 | ||
307 | static void | 308 | static void |
308 | unmask_irq (unsigned int irq) | 309 | unmask_irq (struct irq_data *data) |
309 | { | 310 | { |
311 | unsigned int irq = data->irq; | ||
310 | u32 low32; | 312 | u32 low32; |
311 | int rte_index; | 313 | int rte_index; |
312 | struct iosapic_rte_info *rte; | 314 | struct iosapic_rte_info *rte; |
@@ -323,9 +325,11 @@ unmask_irq (unsigned int irq) | |||
323 | 325 | ||
324 | 326 | ||
325 | static int | 327 | static int |
326 | iosapic_set_affinity(unsigned int irq, const struct cpumask *mask) | 328 | iosapic_set_affinity(struct irq_data *data, const struct cpumask *mask, |
329 | bool force) | ||
327 | { | 330 | { |
328 | #ifdef CONFIG_SMP | 331 | #ifdef CONFIG_SMP |
332 | unsigned int irq = data->irq; | ||
329 | u32 high32, low32; | 333 | u32 high32, low32; |
330 | int cpu, dest, rte_index; | 334 | int cpu, dest, rte_index; |
331 | int redir = (irq & IA64_IRQ_REDIRECTED) ? 1 : 0; | 335 | int redir = (irq & IA64_IRQ_REDIRECTED) ? 1 : 0; |
@@ -379,32 +383,33 @@ iosapic_set_affinity(unsigned int irq, const struct cpumask *mask) | |||
379 | */ | 383 | */ |
380 | 384 | ||
381 | static unsigned int | 385 | static unsigned int |
382 | iosapic_startup_level_irq (unsigned int irq) | 386 | iosapic_startup_level_irq (struct irq_data *data) |
383 | { | 387 | { |
384 | unmask_irq(irq); | 388 | unmask_irq(data); |
385 | return 0; | 389 | return 0; |
386 | } | 390 | } |
387 | 391 | ||
388 | static void | 392 | static void |
389 | iosapic_unmask_level_irq (unsigned int irq) | 393 | iosapic_unmask_level_irq (struct irq_data *data) |
390 | { | 394 | { |
395 | unsigned int irq = data->irq; | ||
391 | ia64_vector vec = irq_to_vector(irq); | 396 | ia64_vector vec = irq_to_vector(irq); |
392 | struct iosapic_rte_info *rte; | 397 | struct iosapic_rte_info *rte; |
393 | int do_unmask_irq = 0; | 398 | int do_unmask_irq = 0; |
394 | 399 | ||
395 | irq_complete_move(irq); | 400 | irq_complete_move(irq); |
396 | if (unlikely(irq_desc[irq].status & IRQ_MOVE_PENDING)) { | 401 | if (unlikely(irqd_is_setaffinity_pending(data))) { |
397 | do_unmask_irq = 1; | 402 | do_unmask_irq = 1; |
398 | mask_irq(irq); | 403 | mask_irq(data); |
399 | } else | 404 | } else |
400 | unmask_irq(irq); | 405 | unmask_irq(data); |
401 | 406 | ||
402 | list_for_each_entry(rte, &iosapic_intr_info[irq].rtes, rte_list) | 407 | list_for_each_entry(rte, &iosapic_intr_info[irq].rtes, rte_list) |
403 | iosapic_eoi(rte->iosapic->addr, vec); | 408 | iosapic_eoi(rte->iosapic->addr, vec); |
404 | 409 | ||
405 | if (unlikely(do_unmask_irq)) { | 410 | if (unlikely(do_unmask_irq)) { |
406 | move_masked_irq(irq); | 411 | irq_move_masked_irq(data); |
407 | unmask_irq(irq); | 412 | unmask_irq(data); |
408 | } | 413 | } |
409 | } | 414 | } |
410 | 415 | ||
@@ -414,15 +419,15 @@ iosapic_unmask_level_irq (unsigned int irq) | |||
414 | #define iosapic_ack_level_irq nop | 419 | #define iosapic_ack_level_irq nop |
415 | 420 | ||
416 | static struct irq_chip irq_type_iosapic_level = { | 421 | static struct irq_chip irq_type_iosapic_level = { |
417 | .name = "IO-SAPIC-level", | 422 | .name = "IO-SAPIC-level", |
418 | .startup = iosapic_startup_level_irq, | 423 | .irq_startup = iosapic_startup_level_irq, |
419 | .shutdown = iosapic_shutdown_level_irq, | 424 | .irq_shutdown = iosapic_shutdown_level_irq, |
420 | .enable = iosapic_enable_level_irq, | 425 | .irq_enable = iosapic_enable_level_irq, |
421 | .disable = iosapic_disable_level_irq, | 426 | .irq_disable = iosapic_disable_level_irq, |
422 | .ack = iosapic_ack_level_irq, | 427 | .irq_ack = iosapic_ack_level_irq, |
423 | .mask = mask_irq, | 428 | .irq_mask = mask_irq, |
424 | .unmask = iosapic_unmask_level_irq, | 429 | .irq_unmask = iosapic_unmask_level_irq, |
425 | .set_affinity = iosapic_set_affinity | 430 | .irq_set_affinity = iosapic_set_affinity |
426 | }; | 431 | }; |
427 | 432 | ||
428 | /* | 433 | /* |
@@ -430,9 +435,9 @@ static struct irq_chip irq_type_iosapic_level = { | |||
430 | */ | 435 | */ |
431 | 436 | ||
432 | static unsigned int | 437 | static unsigned int |
433 | iosapic_startup_edge_irq (unsigned int irq) | 438 | iosapic_startup_edge_irq (struct irq_data *data) |
434 | { | 439 | { |
435 | unmask_irq(irq); | 440 | unmask_irq(data); |
436 | /* | 441 | /* |
437 | * IOSAPIC simply drops interrupts pended while the | 442 | * IOSAPIC simply drops interrupts pended while the |
438 | * corresponding pin was masked, so we can't know if an | 443 | * corresponding pin was masked, so we can't know if an |
@@ -442,37 +447,25 @@ iosapic_startup_edge_irq (unsigned int irq) | |||
442 | } | 447 | } |
443 | 448 | ||
444 | static void | 449 | static void |
445 | iosapic_ack_edge_irq (unsigned int irq) | 450 | iosapic_ack_edge_irq (struct irq_data *data) |
446 | { | 451 | { |
447 | struct irq_desc *idesc = irq_desc + irq; | 452 | irq_complete_move(data->irq); |
448 | 453 | irq_move_irq(data); | |
449 | irq_complete_move(irq); | ||
450 | move_native_irq(irq); | ||
451 | /* | ||
452 | * Once we have recorded IRQ_PENDING already, we can mask the | ||
453 | * interrupt for real. This prevents IRQ storms from unhandled | ||
454 | * devices. | ||
455 | */ | ||
456 | if ((idesc->status & (IRQ_PENDING|IRQ_DISABLED)) == | ||
457 | (IRQ_PENDING|IRQ_DISABLED)) | ||
458 | mask_irq(irq); | ||
459 | } | 454 | } |
460 | 455 | ||
461 | #define iosapic_enable_edge_irq unmask_irq | 456 | #define iosapic_enable_edge_irq unmask_irq |
462 | #define iosapic_disable_edge_irq nop | 457 | #define iosapic_disable_edge_irq nop |
463 | #define iosapic_end_edge_irq nop | ||
464 | 458 | ||
465 | static struct irq_chip irq_type_iosapic_edge = { | 459 | static struct irq_chip irq_type_iosapic_edge = { |
466 | .name = "IO-SAPIC-edge", | 460 | .name = "IO-SAPIC-edge", |
467 | .startup = iosapic_startup_edge_irq, | 461 | .irq_startup = iosapic_startup_edge_irq, |
468 | .shutdown = iosapic_disable_edge_irq, | 462 | .irq_shutdown = iosapic_disable_edge_irq, |
469 | .enable = iosapic_enable_edge_irq, | 463 | .irq_enable = iosapic_enable_edge_irq, |
470 | .disable = iosapic_disable_edge_irq, | 464 | .irq_disable = iosapic_disable_edge_irq, |
471 | .ack = iosapic_ack_edge_irq, | 465 | .irq_ack = iosapic_ack_edge_irq, |
472 | .end = iosapic_end_edge_irq, | 466 | .irq_mask = mask_irq, |
473 | .mask = mask_irq, | 467 | .irq_unmask = unmask_irq, |
474 | .unmask = unmask_irq, | 468 | .irq_set_affinity = iosapic_set_affinity |
475 | .set_affinity = iosapic_set_affinity | ||
476 | }; | 469 | }; |
477 | 470 | ||
478 | static unsigned int | 471 | static unsigned int |
@@ -562,8 +555,7 @@ static int | |||
562 | register_intr (unsigned int gsi, int irq, unsigned char delivery, | 555 | register_intr (unsigned int gsi, int irq, unsigned char delivery, |
563 | unsigned long polarity, unsigned long trigger) | 556 | unsigned long polarity, unsigned long trigger) |
564 | { | 557 | { |
565 | struct irq_desc *idesc; | 558 | struct irq_chip *chip, *irq_type; |
566 | struct irq_chip *irq_type; | ||
567 | int index; | 559 | int index; |
568 | struct iosapic_rte_info *rte; | 560 | struct iosapic_rte_info *rte; |
569 | 561 | ||
@@ -610,19 +602,18 @@ register_intr (unsigned int gsi, int irq, unsigned char delivery, | |||
610 | 602 | ||
611 | irq_type = iosapic_get_irq_chip(trigger); | 603 | irq_type = iosapic_get_irq_chip(trigger); |
612 | 604 | ||
613 | idesc = irq_desc + irq; | 605 | chip = irq_get_chip(irq); |
614 | if (irq_type != NULL && idesc->chip != irq_type) { | 606 | if (irq_type != NULL && chip != irq_type) { |
615 | if (idesc->chip != &no_irq_chip) | 607 | if (chip != &no_irq_chip) |
616 | printk(KERN_WARNING | 608 | printk(KERN_WARNING |
617 | "%s: changing vector %d from %s to %s\n", | 609 | "%s: changing vector %d from %s to %s\n", |
618 | __func__, irq_to_vector(irq), | 610 | __func__, irq_to_vector(irq), |
619 | idesc->chip->name, irq_type->name); | 611 | chip->name, irq_type->name); |
620 | idesc->chip = irq_type; | 612 | chip = irq_type; |
621 | } | 613 | } |
622 | if (trigger == IOSAPIC_EDGE) | 614 | __irq_set_chip_handler_name_locked(irq, chip, trigger == IOSAPIC_EDGE ? |
623 | __set_irq_handler_unlocked(irq, handle_edge_irq); | 615 | handle_edge_irq : handle_level_irq, |
624 | else | 616 | NULL); |
625 | __set_irq_handler_unlocked(irq, handle_level_irq); | ||
626 | return 0; | 617 | return 0; |
627 | } | 618 | } |
628 | 619 | ||
@@ -732,6 +723,7 @@ iosapic_register_intr (unsigned int gsi, | |||
732 | struct iosapic_rte_info *rte; | 723 | struct iosapic_rte_info *rte; |
733 | u32 low32; | 724 | u32 low32; |
734 | unsigned char dmode; | 725 | unsigned char dmode; |
726 | struct irq_desc *desc; | ||
735 | 727 | ||
736 | /* | 728 | /* |
737 | * If this GSI has already been registered (i.e., it's a | 729 | * If this GSI has already been registered (i.e., it's a |
@@ -759,12 +751,13 @@ iosapic_register_intr (unsigned int gsi, | |||
759 | goto unlock_iosapic_lock; | 751 | goto unlock_iosapic_lock; |
760 | } | 752 | } |
761 | 753 | ||
762 | raw_spin_lock(&irq_desc[irq].lock); | 754 | desc = irq_to_desc(irq); |
755 | raw_spin_lock(&desc->lock); | ||
763 | dest = get_target_cpu(gsi, irq); | 756 | dest = get_target_cpu(gsi, irq); |
764 | dmode = choose_dmode(); | 757 | dmode = choose_dmode(); |
765 | err = register_intr(gsi, irq, dmode, polarity, trigger); | 758 | err = register_intr(gsi, irq, dmode, polarity, trigger); |
766 | if (err < 0) { | 759 | if (err < 0) { |
767 | raw_spin_unlock(&irq_desc[irq].lock); | 760 | raw_spin_unlock(&desc->lock); |
768 | irq = err; | 761 | irq = err; |
769 | goto unlock_iosapic_lock; | 762 | goto unlock_iosapic_lock; |
770 | } | 763 | } |
@@ -783,7 +776,7 @@ iosapic_register_intr (unsigned int gsi, | |||
783 | (polarity == IOSAPIC_POL_HIGH ? "high" : "low"), | 776 | (polarity == IOSAPIC_POL_HIGH ? "high" : "low"), |
784 | cpu_logical_id(dest), dest, irq_to_vector(irq)); | 777 | cpu_logical_id(dest), dest, irq_to_vector(irq)); |
785 | 778 | ||
786 | raw_spin_unlock(&irq_desc[irq].lock); | 779 | raw_spin_unlock(&desc->lock); |
787 | unlock_iosapic_lock: | 780 | unlock_iosapic_lock: |
788 | spin_unlock_irqrestore(&iosapic_lock, flags); | 781 | spin_unlock_irqrestore(&iosapic_lock, flags); |
789 | return irq; | 782 | return irq; |
@@ -794,7 +787,6 @@ iosapic_unregister_intr (unsigned int gsi) | |||
794 | { | 787 | { |
795 | unsigned long flags; | 788 | unsigned long flags; |
796 | int irq, index; | 789 | int irq, index; |
797 | struct irq_desc *idesc; | ||
798 | u32 low32; | 790 | u32 low32; |
799 | unsigned long trigger, polarity; | 791 | unsigned long trigger, polarity; |
800 | unsigned int dest; | 792 | unsigned int dest; |
@@ -824,7 +816,6 @@ iosapic_unregister_intr (unsigned int gsi) | |||
824 | if (--rte->refcnt > 0) | 816 | if (--rte->refcnt > 0) |
825 | goto out; | 817 | goto out; |
826 | 818 | ||
827 | idesc = irq_desc + irq; | ||
828 | rte->refcnt = NO_REF_RTE; | 819 | rte->refcnt = NO_REF_RTE; |
829 | 820 | ||
830 | /* Mask the interrupt */ | 821 | /* Mask the interrupt */ |
@@ -848,7 +839,7 @@ iosapic_unregister_intr (unsigned int gsi) | |||
848 | if (iosapic_intr_info[irq].count == 0) { | 839 | if (iosapic_intr_info[irq].count == 0) { |
849 | #ifdef CONFIG_SMP | 840 | #ifdef CONFIG_SMP |
850 | /* Clear affinity */ | 841 | /* Clear affinity */ |
851 | cpumask_setall(idesc->affinity); | 842 | cpumask_setall(irq_get_irq_data(irq)->affinity); |
852 | #endif | 843 | #endif |
853 | /* Clear the interrupt information */ | 844 | /* Clear the interrupt information */ |
854 | iosapic_intr_info[irq].dest = 0; | 845 | iosapic_intr_info[irq].dest = 0; |
diff --git a/arch/ia64/kernel/irq.c b/arch/ia64/kernel/irq.c index 94ee9d067cbd..ad69606613eb 100644 --- a/arch/ia64/kernel/irq.c +++ b/arch/ia64/kernel/irq.c | |||
@@ -53,47 +53,9 @@ atomic_t irq_err_count; | |||
53 | /* | 53 | /* |
54 | * /proc/interrupts printing: | 54 | * /proc/interrupts printing: |
55 | */ | 55 | */ |
56 | 56 | int arch_show_interrupts(struct seq_file *p, int prec) | |
57 | int show_interrupts(struct seq_file *p, void *v) | ||
58 | { | 57 | { |
59 | int i = *(loff_t *) v, j; | 58 | seq_printf(p, "ERR: %10u\n", atomic_read(&irq_err_count)); |
60 | struct irqaction * action; | ||
61 | unsigned long flags; | ||
62 | |||
63 | if (i == 0) { | ||
64 | char cpuname[16]; | ||
65 | seq_printf(p, " "); | ||
66 | for_each_online_cpu(j) { | ||
67 | snprintf(cpuname, 10, "CPU%d", j); | ||
68 | seq_printf(p, "%10s ", cpuname); | ||
69 | } | ||
70 | seq_putc(p, '\n'); | ||
71 | } | ||
72 | |||
73 | if (i < NR_IRQS) { | ||
74 | raw_spin_lock_irqsave(&irq_desc[i].lock, flags); | ||
75 | action = irq_desc[i].action; | ||
76 | if (!action) | ||
77 | goto skip; | ||
78 | seq_printf(p, "%3d: ",i); | ||
79 | #ifndef CONFIG_SMP | ||
80 | seq_printf(p, "%10u ", kstat_irqs(i)); | ||
81 | #else | ||
82 | for_each_online_cpu(j) { | ||
83 | seq_printf(p, "%10u ", kstat_irqs_cpu(i, j)); | ||
84 | } | ||
85 | #endif | ||
86 | seq_printf(p, " %14s", irq_desc[i].chip->name); | ||
87 | seq_printf(p, " %s", action->name); | ||
88 | |||
89 | for (action=action->next; action; action = action->next) | ||
90 | seq_printf(p, ", %s", action->name); | ||
91 | |||
92 | seq_putc(p, '\n'); | ||
93 | skip: | ||
94 | raw_spin_unlock_irqrestore(&irq_desc[i].lock, flags); | ||
95 | } else if (i == NR_IRQS) | ||
96 | seq_printf(p, "ERR: %10u\n", atomic_read(&irq_err_count)); | ||
97 | return 0; | 59 | return 0; |
98 | } | 60 | } |
99 | 61 | ||
@@ -103,7 +65,7 @@ static char irq_redir [NR_IRQS]; // = { [0 ... NR_IRQS-1] = 1 }; | |||
103 | void set_irq_affinity_info (unsigned int irq, int hwid, int redir) | 65 | void set_irq_affinity_info (unsigned int irq, int hwid, int redir) |
104 | { | 66 | { |
105 | if (irq < NR_IRQS) { | 67 | if (irq < NR_IRQS) { |
106 | cpumask_copy(irq_desc[irq].affinity, | 68 | cpumask_copy(irq_get_irq_data(irq)->affinity, |
107 | cpumask_of(cpu_logical_id(hwid))); | 69 | cpumask_of(cpu_logical_id(hwid))); |
108 | irq_redir[irq] = (char) (redir & 0xff); | 70 | irq_redir[irq] = (char) (redir & 0xff); |
109 | } | 71 | } |
@@ -130,13 +92,14 @@ unsigned int vectors_in_migration[NR_IRQS]; | |||
130 | */ | 92 | */ |
131 | static void migrate_irqs(void) | 93 | static void migrate_irqs(void) |
132 | { | 94 | { |
133 | struct irq_desc *desc; | ||
134 | int irq, new_cpu; | 95 | int irq, new_cpu; |
135 | 96 | ||
136 | for (irq=0; irq < NR_IRQS; irq++) { | 97 | for (irq=0; irq < NR_IRQS; irq++) { |
137 | desc = irq_desc + irq; | 98 | struct irq_desc *desc = irq_to_desc(irq); |
99 | struct irq_data *data = irq_desc_get_irq_data(desc); | ||
100 | struct irq_chip *chip = irq_data_get_irq_chip(data); | ||
138 | 101 | ||
139 | if (desc->status == IRQ_DISABLED) | 102 | if (irqd_irq_disabled(data)) |
140 | continue; | 103 | continue; |
141 | 104 | ||
142 | /* | 105 | /* |
@@ -145,10 +108,10 @@ static void migrate_irqs(void) | |||
145 | * tell CPU not to respond to these local intr sources. | 108 | * tell CPU not to respond to these local intr sources. |
146 | * such as ITV,CPEI,MCA etc. | 109 | * such as ITV,CPEI,MCA etc. |
147 | */ | 110 | */ |
148 | if (desc->status == IRQ_PER_CPU) | 111 | if (irqd_is_per_cpu(data)) |
149 | continue; | 112 | continue; |
150 | 113 | ||
151 | if (cpumask_any_and(irq_desc[irq].affinity, cpu_online_mask) | 114 | if (cpumask_any_and(data->affinity, cpu_online_mask) |
152 | >= nr_cpu_ids) { | 115 | >= nr_cpu_ids) { |
153 | /* | 116 | /* |
154 | * Save it for phase 2 processing | 117 | * Save it for phase 2 processing |
@@ -160,16 +123,16 @@ static void migrate_irqs(void) | |||
160 | /* | 123 | /* |
161 | * Al three are essential, currently WARN_ON.. maybe panic? | 124 | * Al three are essential, currently WARN_ON.. maybe panic? |
162 | */ | 125 | */ |
163 | if (desc->chip && desc->chip->disable && | 126 | if (chip && chip->irq_disable && |
164 | desc->chip->enable && desc->chip->set_affinity) { | 127 | chip->irq_enable && chip->irq_set_affinity) { |
165 | desc->chip->disable(irq); | 128 | chip->irq_disable(data); |
166 | desc->chip->set_affinity(irq, | 129 | chip->irq_set_affinity(data, |
167 | cpumask_of(new_cpu)); | 130 | cpumask_of(new_cpu), false); |
168 | desc->chip->enable(irq); | 131 | chip->irq_enable(data); |
169 | } else { | 132 | } else { |
170 | WARN_ON((!(desc->chip) || !(desc->chip->disable) || | 133 | WARN_ON((!chip || !chip->irq_disable || |
171 | !(desc->chip->enable) || | 134 | !chip->irq_enable || |
172 | !(desc->chip->set_affinity))); | 135 | !chip->irq_set_affinity)); |
173 | } | 136 | } |
174 | } | 137 | } |
175 | } | 138 | } |
diff --git a/arch/ia64/kernel/irq_ia64.c b/arch/ia64/kernel/irq_ia64.c index 38c07b866901..5b704740f160 100644 --- a/arch/ia64/kernel/irq_ia64.c +++ b/arch/ia64/kernel/irq_ia64.c | |||
@@ -343,7 +343,7 @@ static irqreturn_t smp_irq_move_cleanup_interrupt(int irq, void *dev_id) | |||
343 | if (irq < 0) | 343 | if (irq < 0) |
344 | continue; | 344 | continue; |
345 | 345 | ||
346 | desc = irq_desc + irq; | 346 | desc = irq_to_desc(irq); |
347 | cfg = irq_cfg + irq; | 347 | cfg = irq_cfg + irq; |
348 | raw_spin_lock(&desc->lock); | 348 | raw_spin_lock(&desc->lock); |
349 | if (!cfg->move_cleanup_count) | 349 | if (!cfg->move_cleanup_count) |
@@ -626,17 +626,15 @@ static struct irqaction tlb_irqaction = { | |||
626 | void | 626 | void |
627 | ia64_native_register_percpu_irq (ia64_vector vec, struct irqaction *action) | 627 | ia64_native_register_percpu_irq (ia64_vector vec, struct irqaction *action) |
628 | { | 628 | { |
629 | struct irq_desc *desc; | ||
630 | unsigned int irq; | 629 | unsigned int irq; |
631 | 630 | ||
632 | irq = vec; | 631 | irq = vec; |
633 | BUG_ON(bind_irq_vector(irq, vec, CPU_MASK_ALL)); | 632 | BUG_ON(bind_irq_vector(irq, vec, CPU_MASK_ALL)); |
634 | desc = irq_desc + irq; | 633 | irq_set_status_flags(irq, IRQ_PER_CPU); |
635 | desc->status |= IRQ_PER_CPU; | 634 | irq_set_chip(irq, &irq_type_ia64_lsapic); |
636 | set_irq_chip(irq, &irq_type_ia64_lsapic); | ||
637 | if (action) | 635 | if (action) |
638 | setup_irq(irq, action); | 636 | setup_irq(irq, action); |
639 | set_irq_handler(irq, handle_percpu_irq); | 637 | irq_set_handler(irq, handle_percpu_irq); |
640 | } | 638 | } |
641 | 639 | ||
642 | void __init | 640 | void __init |
diff --git a/arch/ia64/kernel/irq_lsapic.c b/arch/ia64/kernel/irq_lsapic.c index fc1549d4564d..1b3a776e5161 100644 --- a/arch/ia64/kernel/irq_lsapic.c +++ b/arch/ia64/kernel/irq_lsapic.c | |||
@@ -15,31 +15,30 @@ | |||
15 | #include <linux/irq.h> | 15 | #include <linux/irq.h> |
16 | 16 | ||
17 | static unsigned int | 17 | static unsigned int |
18 | lsapic_noop_startup (unsigned int irq) | 18 | lsapic_noop_startup (struct irq_data *data) |
19 | { | 19 | { |
20 | return 0; | 20 | return 0; |
21 | } | 21 | } |
22 | 22 | ||
23 | static void | 23 | static void |
24 | lsapic_noop (unsigned int irq) | 24 | lsapic_noop (struct irq_data *data) |
25 | { | 25 | { |
26 | /* nothing to do... */ | 26 | /* nothing to do... */ |
27 | } | 27 | } |
28 | 28 | ||
29 | static int lsapic_retrigger(unsigned int irq) | 29 | static int lsapic_retrigger(struct irq_data *data) |
30 | { | 30 | { |
31 | ia64_resend_irq(irq); | 31 | ia64_resend_irq(data->irq); |
32 | 32 | ||
33 | return 1; | 33 | return 1; |
34 | } | 34 | } |
35 | 35 | ||
36 | struct irq_chip irq_type_ia64_lsapic = { | 36 | struct irq_chip irq_type_ia64_lsapic = { |
37 | .name = "LSAPIC", | 37 | .name = "LSAPIC", |
38 | .startup = lsapic_noop_startup, | 38 | .irq_startup = lsapic_noop_startup, |
39 | .shutdown = lsapic_noop, | 39 | .irq_shutdown = lsapic_noop, |
40 | .enable = lsapic_noop, | 40 | .irq_enable = lsapic_noop, |
41 | .disable = lsapic_noop, | 41 | .irq_disable = lsapic_noop, |
42 | .ack = lsapic_noop, | 42 | .irq_ack = lsapic_noop, |
43 | .end = lsapic_noop, | 43 | .irq_retrigger = lsapic_retrigger, |
44 | .retrigger = lsapic_retrigger, | ||
45 | }; | 44 | }; |
diff --git a/arch/ia64/kernel/mca.c b/arch/ia64/kernel/mca.c index 80d50b83d419..84fb405eee87 100644 --- a/arch/ia64/kernel/mca.c +++ b/arch/ia64/kernel/mca.c | |||
@@ -2125,7 +2125,6 @@ ia64_mca_late_init(void) | |||
2125 | cpe_poll_timer.function = ia64_mca_cpe_poll; | 2125 | cpe_poll_timer.function = ia64_mca_cpe_poll; |
2126 | 2126 | ||
2127 | { | 2127 | { |
2128 | struct irq_desc *desc; | ||
2129 | unsigned int irq; | 2128 | unsigned int irq; |
2130 | 2129 | ||
2131 | if (cpe_vector >= 0) { | 2130 | if (cpe_vector >= 0) { |
@@ -2133,8 +2132,7 @@ ia64_mca_late_init(void) | |||
2133 | irq = local_vector_to_irq(cpe_vector); | 2132 | irq = local_vector_to_irq(cpe_vector); |
2134 | if (irq > 0) { | 2133 | if (irq > 0) { |
2135 | cpe_poll_enabled = 0; | 2134 | cpe_poll_enabled = 0; |
2136 | desc = irq_desc + irq; | 2135 | irq_set_status_flags(irq, IRQ_PER_CPU); |
2137 | desc->status |= IRQ_PER_CPU; | ||
2138 | setup_irq(irq, &mca_cpe_irqaction); | 2136 | setup_irq(irq, &mca_cpe_irqaction); |
2139 | ia64_cpe_irq = irq; | 2137 | ia64_cpe_irq = irq; |
2140 | ia64_mca_register_cpev(cpe_vector); | 2138 | ia64_mca_register_cpev(cpe_vector); |
diff --git a/arch/ia64/kernel/msi_ia64.c b/arch/ia64/kernel/msi_ia64.c index 00b19a416eab..009df5434a7a 100644 --- a/arch/ia64/kernel/msi_ia64.c +++ b/arch/ia64/kernel/msi_ia64.c | |||
@@ -12,12 +12,13 @@ | |||
12 | static struct irq_chip ia64_msi_chip; | 12 | static struct irq_chip ia64_msi_chip; |
13 | 13 | ||
14 | #ifdef CONFIG_SMP | 14 | #ifdef CONFIG_SMP |
15 | static int ia64_set_msi_irq_affinity(unsigned int irq, | 15 | static int ia64_set_msi_irq_affinity(struct irq_data *idata, |
16 | const cpumask_t *cpu_mask) | 16 | const cpumask_t *cpu_mask, bool force) |
17 | { | 17 | { |
18 | struct msi_msg msg; | 18 | struct msi_msg msg; |
19 | u32 addr, data; | 19 | u32 addr, data; |
20 | int cpu = first_cpu(*cpu_mask); | 20 | int cpu = first_cpu(*cpu_mask); |
21 | unsigned int irq = idata->irq; | ||
21 | 22 | ||
22 | if (!cpu_online(cpu)) | 23 | if (!cpu_online(cpu)) |
23 | return -1; | 24 | return -1; |
@@ -38,7 +39,7 @@ static int ia64_set_msi_irq_affinity(unsigned int irq, | |||
38 | msg.data = data; | 39 | msg.data = data; |
39 | 40 | ||
40 | write_msi_msg(irq, &msg); | 41 | write_msi_msg(irq, &msg); |
41 | cpumask_copy(irq_desc[irq].affinity, cpumask_of(cpu)); | 42 | cpumask_copy(idata->affinity, cpumask_of(cpu)); |
42 | 43 | ||
43 | return 0; | 44 | return 0; |
44 | } | 45 | } |
@@ -55,7 +56,7 @@ int ia64_setup_msi_irq(struct pci_dev *pdev, struct msi_desc *desc) | |||
55 | if (irq < 0) | 56 | if (irq < 0) |
56 | return irq; | 57 | return irq; |
57 | 58 | ||
58 | set_irq_msi(irq, desc); | 59 | irq_set_msi_desc(irq, desc); |
59 | cpus_and(mask, irq_to_domain(irq), cpu_online_map); | 60 | cpus_and(mask, irq_to_domain(irq), cpu_online_map); |
60 | dest_phys_id = cpu_physical_id(first_cpu(mask)); | 61 | dest_phys_id = cpu_physical_id(first_cpu(mask)); |
61 | vector = irq_to_vector(irq); | 62 | vector = irq_to_vector(irq); |
@@ -74,7 +75,7 @@ int ia64_setup_msi_irq(struct pci_dev *pdev, struct msi_desc *desc) | |||
74 | MSI_DATA_VECTOR(vector); | 75 | MSI_DATA_VECTOR(vector); |
75 | 76 | ||
76 | write_msi_msg(irq, &msg); | 77 | write_msi_msg(irq, &msg); |
77 | set_irq_chip_and_handler(irq, &ia64_msi_chip, handle_edge_irq); | 78 | irq_set_chip_and_handler(irq, &ia64_msi_chip, handle_edge_irq); |
78 | 79 | ||
79 | return 0; | 80 | return 0; |
80 | } | 81 | } |
@@ -84,16 +85,16 @@ void ia64_teardown_msi_irq(unsigned int irq) | |||
84 | destroy_irq(irq); | 85 | destroy_irq(irq); |
85 | } | 86 | } |
86 | 87 | ||
87 | static void ia64_ack_msi_irq(unsigned int irq) | 88 | static void ia64_ack_msi_irq(struct irq_data *data) |
88 | { | 89 | { |
89 | irq_complete_move(irq); | 90 | irq_complete_move(data->irq); |
90 | move_native_irq(irq); | 91 | irq_move_irq(data); |
91 | ia64_eoi(); | 92 | ia64_eoi(); |
92 | } | 93 | } |
93 | 94 | ||
94 | static int ia64_msi_retrigger_irq(unsigned int irq) | 95 | static int ia64_msi_retrigger_irq(struct irq_data *data) |
95 | { | 96 | { |
96 | unsigned int vector = irq_to_vector(irq); | 97 | unsigned int vector = irq_to_vector(data->irq); |
97 | ia64_resend_irq(vector); | 98 | ia64_resend_irq(vector); |
98 | 99 | ||
99 | return 1; | 100 | return 1; |
@@ -103,14 +104,14 @@ static int ia64_msi_retrigger_irq(unsigned int irq) | |||
103 | * Generic ops used on most IA64 platforms. | 104 | * Generic ops used on most IA64 platforms. |
104 | */ | 105 | */ |
105 | static struct irq_chip ia64_msi_chip = { | 106 | static struct irq_chip ia64_msi_chip = { |
106 | .name = "PCI-MSI", | 107 | .name = "PCI-MSI", |
107 | .irq_mask = mask_msi_irq, | 108 | .irq_mask = mask_msi_irq, |
108 | .irq_unmask = unmask_msi_irq, | 109 | .irq_unmask = unmask_msi_irq, |
109 | .ack = ia64_ack_msi_irq, | 110 | .irq_ack = ia64_ack_msi_irq, |
110 | #ifdef CONFIG_SMP | 111 | #ifdef CONFIG_SMP |
111 | .set_affinity = ia64_set_msi_irq_affinity, | 112 | .irq_set_affinity = ia64_set_msi_irq_affinity, |
112 | #endif | 113 | #endif |
113 | .retrigger = ia64_msi_retrigger_irq, | 114 | .irq_retrigger = ia64_msi_retrigger_irq, |
114 | }; | 115 | }; |
115 | 116 | ||
116 | 117 | ||
@@ -132,8 +133,10 @@ void arch_teardown_msi_irq(unsigned int irq) | |||
132 | 133 | ||
133 | #ifdef CONFIG_DMAR | 134 | #ifdef CONFIG_DMAR |
134 | #ifdef CONFIG_SMP | 135 | #ifdef CONFIG_SMP |
135 | static int dmar_msi_set_affinity(unsigned int irq, const struct cpumask *mask) | 136 | static int dmar_msi_set_affinity(struct irq_data *data, |
137 | const struct cpumask *mask, bool force) | ||
136 | { | 138 | { |
139 | unsigned int irq = data->irq; | ||
137 | struct irq_cfg *cfg = irq_cfg + irq; | 140 | struct irq_cfg *cfg = irq_cfg + irq; |
138 | struct msi_msg msg; | 141 | struct msi_msg msg; |
139 | int cpu = cpumask_first(mask); | 142 | int cpu = cpumask_first(mask); |
@@ -152,7 +155,7 @@ static int dmar_msi_set_affinity(unsigned int irq, const struct cpumask *mask) | |||
152 | msg.address_lo |= MSI_ADDR_DEST_ID_CPU(cpu_physical_id(cpu)); | 155 | msg.address_lo |= MSI_ADDR_DEST_ID_CPU(cpu_physical_id(cpu)); |
153 | 156 | ||
154 | dmar_msi_write(irq, &msg); | 157 | dmar_msi_write(irq, &msg); |
155 | cpumask_copy(irq_desc[irq].affinity, mask); | 158 | cpumask_copy(data->affinity, mask); |
156 | 159 | ||
157 | return 0; | 160 | return 0; |
158 | } | 161 | } |
@@ -162,11 +165,11 @@ static struct irq_chip dmar_msi_type = { | |||
162 | .name = "DMAR_MSI", | 165 | .name = "DMAR_MSI", |
163 | .irq_unmask = dmar_msi_unmask, | 166 | .irq_unmask = dmar_msi_unmask, |
164 | .irq_mask = dmar_msi_mask, | 167 | .irq_mask = dmar_msi_mask, |
165 | .ack = ia64_ack_msi_irq, | 168 | .irq_ack = ia64_ack_msi_irq, |
166 | #ifdef CONFIG_SMP | 169 | #ifdef CONFIG_SMP |
167 | .set_affinity = dmar_msi_set_affinity, | 170 | .irq_set_affinity = dmar_msi_set_affinity, |
168 | #endif | 171 | #endif |
169 | .retrigger = ia64_msi_retrigger_irq, | 172 | .irq_retrigger = ia64_msi_retrigger_irq, |
170 | }; | 173 | }; |
171 | 174 | ||
172 | static int | 175 | static int |
@@ -203,8 +206,8 @@ int arch_setup_dmar_msi(unsigned int irq) | |||
203 | if (ret < 0) | 206 | if (ret < 0) |
204 | return ret; | 207 | return ret; |
205 | dmar_msi_write(irq, &msg); | 208 | dmar_msi_write(irq, &msg); |
206 | set_irq_chip_and_handler_name(irq, &dmar_msi_type, handle_edge_irq, | 209 | irq_set_chip_and_handler_name(irq, &dmar_msi_type, handle_edge_irq, |
207 | "edge"); | 210 | "edge"); |
208 | return 0; | 211 | return 0; |
209 | } | 212 | } |
210 | #endif /* CONFIG_DMAR */ | 213 | #endif /* CONFIG_DMAR */ |
diff --git a/arch/ia64/kernel/smpboot.c b/arch/ia64/kernel/smpboot.c index d003b502a432..44f11ee411c0 100644 --- a/arch/ia64/kernel/smpboot.c +++ b/arch/ia64/kernel/smpboot.c | |||
@@ -677,7 +677,7 @@ extern void fixup_irqs(void); | |||
677 | int migrate_platform_irqs(unsigned int cpu) | 677 | int migrate_platform_irqs(unsigned int cpu) |
678 | { | 678 | { |
679 | int new_cpei_cpu; | 679 | int new_cpei_cpu; |
680 | struct irq_desc *desc = NULL; | 680 | struct irq_data *data = NULL; |
681 | const struct cpumask *mask; | 681 | const struct cpumask *mask; |
682 | int retval = 0; | 682 | int retval = 0; |
683 | 683 | ||
@@ -693,20 +693,20 @@ int migrate_platform_irqs(unsigned int cpu) | |||
693 | new_cpei_cpu = any_online_cpu(cpu_online_map); | 693 | new_cpei_cpu = any_online_cpu(cpu_online_map); |
694 | mask = cpumask_of(new_cpei_cpu); | 694 | mask = cpumask_of(new_cpei_cpu); |
695 | set_cpei_target_cpu(new_cpei_cpu); | 695 | set_cpei_target_cpu(new_cpei_cpu); |
696 | desc = irq_desc + ia64_cpe_irq; | 696 | data = irq_get_irq_data(ia64_cpe_irq); |
697 | /* | 697 | /* |
698 | * Switch for now, immediately, we need to do fake intr | 698 | * Switch for now, immediately, we need to do fake intr |
699 | * as other interrupts, but need to study CPEI behaviour with | 699 | * as other interrupts, but need to study CPEI behaviour with |
700 | * polling before making changes. | 700 | * polling before making changes. |
701 | */ | 701 | */ |
702 | if (desc) { | 702 | if (data && data->chip) { |
703 | desc->chip->disable(ia64_cpe_irq); | 703 | data->chip->irq_disable(data); |
704 | desc->chip->set_affinity(ia64_cpe_irq, mask); | 704 | data->chip->irq_set_affinity(data, mask, false); |
705 | desc->chip->enable(ia64_cpe_irq); | 705 | data->chip->irq_enable(data); |
706 | printk ("Re-targetting CPEI to cpu %d\n", new_cpei_cpu); | 706 | printk ("Re-targetting CPEI to cpu %d\n", new_cpei_cpu); |
707 | } | 707 | } |
708 | } | 708 | } |
709 | if (!desc) { | 709 | if (!data) { |
710 | printk ("Unable to retarget CPEI, offline cpu [%d] failed\n", cpu); | 710 | printk ("Unable to retarget CPEI, offline cpu [%d] failed\n", cpu); |
711 | retval = -EBUSY; | 711 | retval = -EBUSY; |
712 | } | 712 | } |
diff --git a/arch/ia64/sn/kernel/irq.c b/arch/ia64/sn/kernel/irq.c index 13c15d968098..7f399f9d99c7 100644 --- a/arch/ia64/sn/kernel/irq.c +++ b/arch/ia64/sn/kernel/irq.c | |||
@@ -23,11 +23,9 @@ | |||
23 | #include <asm/sn/sn_sal.h> | 23 | #include <asm/sn/sn_sal.h> |
24 | #include <asm/sn/sn_feature_sets.h> | 24 | #include <asm/sn/sn_feature_sets.h> |
25 | 25 | ||
26 | static void force_interrupt(int irq); | ||
27 | static void register_intr_pda(struct sn_irq_info *sn_irq_info); | 26 | static void register_intr_pda(struct sn_irq_info *sn_irq_info); |
28 | static void unregister_intr_pda(struct sn_irq_info *sn_irq_info); | 27 | static void unregister_intr_pda(struct sn_irq_info *sn_irq_info); |
29 | 28 | ||
30 | int sn_force_interrupt_flag = 1; | ||
31 | extern int sn_ioif_inited; | 29 | extern int sn_ioif_inited; |
32 | struct list_head **sn_irq_lh; | 30 | struct list_head **sn_irq_lh; |
33 | static DEFINE_SPINLOCK(sn_irq_info_lock); /* non-IRQ lock */ | 31 | static DEFINE_SPINLOCK(sn_irq_info_lock); /* non-IRQ lock */ |
@@ -78,62 +76,40 @@ u64 sn_intr_redirect(nasid_t local_nasid, int local_widget, | |||
78 | return ret_stuff.status; | 76 | return ret_stuff.status; |
79 | } | 77 | } |
80 | 78 | ||
81 | static unsigned int sn_startup_irq(unsigned int irq) | 79 | static unsigned int sn_startup_irq(struct irq_data *data) |
82 | { | 80 | { |
83 | return 0; | 81 | return 0; |
84 | } | 82 | } |
85 | 83 | ||
86 | static void sn_shutdown_irq(unsigned int irq) | 84 | static void sn_shutdown_irq(struct irq_data *data) |
87 | { | 85 | { |
88 | } | 86 | } |
89 | 87 | ||
90 | extern void ia64_mca_register_cpev(int); | 88 | extern void ia64_mca_register_cpev(int); |
91 | 89 | ||
92 | static void sn_disable_irq(unsigned int irq) | 90 | static void sn_disable_irq(struct irq_data *data) |
93 | { | 91 | { |
94 | if (irq == local_vector_to_irq(IA64_CPE_VECTOR)) | 92 | if (data->irq == local_vector_to_irq(IA64_CPE_VECTOR)) |
95 | ia64_mca_register_cpev(0); | 93 | ia64_mca_register_cpev(0); |
96 | } | 94 | } |
97 | 95 | ||
98 | static void sn_enable_irq(unsigned int irq) | 96 | static void sn_enable_irq(struct irq_data *data) |
99 | { | 97 | { |
100 | if (irq == local_vector_to_irq(IA64_CPE_VECTOR)) | 98 | if (data->irq == local_vector_to_irq(IA64_CPE_VECTOR)) |
101 | ia64_mca_register_cpev(irq); | 99 | ia64_mca_register_cpev(data->irq); |
102 | } | 100 | } |
103 | 101 | ||
104 | static void sn_ack_irq(unsigned int irq) | 102 | static void sn_ack_irq(struct irq_data *data) |
105 | { | 103 | { |
106 | u64 event_occurred, mask; | 104 | u64 event_occurred, mask; |
105 | unsigned int irq = data->irq & 0xff; | ||
107 | 106 | ||
108 | irq = irq & 0xff; | ||
109 | event_occurred = HUB_L((u64*)LOCAL_MMR_ADDR(SH_EVENT_OCCURRED)); | 107 | event_occurred = HUB_L((u64*)LOCAL_MMR_ADDR(SH_EVENT_OCCURRED)); |
110 | mask = event_occurred & SH_ALL_INT_MASK; | 108 | mask = event_occurred & SH_ALL_INT_MASK; |
111 | HUB_S((u64*)LOCAL_MMR_ADDR(SH_EVENT_OCCURRED_ALIAS), mask); | 109 | HUB_S((u64*)LOCAL_MMR_ADDR(SH_EVENT_OCCURRED_ALIAS), mask); |
112 | __set_bit(irq, (volatile void *)pda->sn_in_service_ivecs); | 110 | __set_bit(irq, (volatile void *)pda->sn_in_service_ivecs); |
113 | 111 | ||
114 | move_native_irq(irq); | 112 | irq_move_irq(data); |
115 | } | ||
116 | |||
117 | static void sn_end_irq(unsigned int irq) | ||
118 | { | ||
119 | int ivec; | ||
120 | u64 event_occurred; | ||
121 | |||
122 | ivec = irq & 0xff; | ||
123 | if (ivec == SGI_UART_VECTOR) { | ||
124 | event_occurred = HUB_L((u64*)LOCAL_MMR_ADDR (SH_EVENT_OCCURRED)); | ||
125 | /* If the UART bit is set here, we may have received an | ||
126 | * interrupt from the UART that the driver missed. To | ||
127 | * make sure, we IPI ourselves to force us to look again. | ||
128 | */ | ||
129 | if (event_occurred & SH_EVENT_OCCURRED_UART_INT_MASK) { | ||
130 | platform_send_ipi(smp_processor_id(), SGI_UART_VECTOR, | ||
131 | IA64_IPI_DM_INT, 0); | ||
132 | } | ||
133 | } | ||
134 | __clear_bit(ivec, (volatile void *)pda->sn_in_service_ivecs); | ||
135 | if (sn_force_interrupt_flag) | ||
136 | force_interrupt(irq); | ||
137 | } | 113 | } |
138 | 114 | ||
139 | static void sn_irq_info_free(struct rcu_head *head); | 115 | static void sn_irq_info_free(struct rcu_head *head); |
@@ -228,9 +204,11 @@ finish_up: | |||
228 | return new_irq_info; | 204 | return new_irq_info; |
229 | } | 205 | } |
230 | 206 | ||
231 | static int sn_set_affinity_irq(unsigned int irq, const struct cpumask *mask) | 207 | static int sn_set_affinity_irq(struct irq_data *data, |
208 | const struct cpumask *mask, bool force) | ||
232 | { | 209 | { |
233 | struct sn_irq_info *sn_irq_info, *sn_irq_info_safe; | 210 | struct sn_irq_info *sn_irq_info, *sn_irq_info_safe; |
211 | unsigned int irq = data->irq; | ||
234 | nasid_t nasid; | 212 | nasid_t nasid; |
235 | int slice; | 213 | int slice; |
236 | 214 | ||
@@ -259,26 +237,25 @@ void sn_set_err_irq_affinity(unsigned int irq) { } | |||
259 | #endif | 237 | #endif |
260 | 238 | ||
261 | static void | 239 | static void |
262 | sn_mask_irq(unsigned int irq) | 240 | sn_mask_irq(struct irq_data *data) |
263 | { | 241 | { |
264 | } | 242 | } |
265 | 243 | ||
266 | static void | 244 | static void |
267 | sn_unmask_irq(unsigned int irq) | 245 | sn_unmask_irq(struct irq_data *data) |
268 | { | 246 | { |
269 | } | 247 | } |
270 | 248 | ||
271 | struct irq_chip irq_type_sn = { | 249 | struct irq_chip irq_type_sn = { |
272 | .name = "SN hub", | 250 | .name = "SN hub", |
273 | .startup = sn_startup_irq, | 251 | .irq_startup = sn_startup_irq, |
274 | .shutdown = sn_shutdown_irq, | 252 | .irq_shutdown = sn_shutdown_irq, |
275 | .enable = sn_enable_irq, | 253 | .irq_enable = sn_enable_irq, |
276 | .disable = sn_disable_irq, | 254 | .irq_disable = sn_disable_irq, |
277 | .ack = sn_ack_irq, | 255 | .irq_ack = sn_ack_irq, |
278 | .end = sn_end_irq, | 256 | .irq_mask = sn_mask_irq, |
279 | .mask = sn_mask_irq, | 257 | .irq_unmask = sn_unmask_irq, |
280 | .unmask = sn_unmask_irq, | 258 | .irq_set_affinity = sn_set_affinity_irq |
281 | .set_affinity = sn_set_affinity_irq | ||
282 | }; | 259 | }; |
283 | 260 | ||
284 | ia64_vector sn_irq_to_vector(int irq) | 261 | ia64_vector sn_irq_to_vector(int irq) |
@@ -296,15 +273,13 @@ unsigned int sn_local_vector_to_irq(u8 vector) | |||
296 | void sn_irq_init(void) | 273 | void sn_irq_init(void) |
297 | { | 274 | { |
298 | int i; | 275 | int i; |
299 | struct irq_desc *base_desc = irq_desc; | ||
300 | 276 | ||
301 | ia64_first_device_vector = IA64_SN2_FIRST_DEVICE_VECTOR; | 277 | ia64_first_device_vector = IA64_SN2_FIRST_DEVICE_VECTOR; |
302 | ia64_last_device_vector = IA64_SN2_LAST_DEVICE_VECTOR; | 278 | ia64_last_device_vector = IA64_SN2_LAST_DEVICE_VECTOR; |
303 | 279 | ||
304 | for (i = 0; i < NR_IRQS; i++) { | 280 | for (i = 0; i < NR_IRQS; i++) { |
305 | if (base_desc[i].chip == &no_irq_chip) { | 281 | if (irq_get_chip(i) == &no_irq_chip) |
306 | base_desc[i].chip = &irq_type_sn; | 282 | irq_set_chip(i, &irq_type_sn); |
307 | } | ||
308 | } | 283 | } |
309 | } | 284 | } |
310 | 285 | ||
@@ -378,7 +353,6 @@ void sn_irq_fixup(struct pci_dev *pci_dev, struct sn_irq_info *sn_irq_info) | |||
378 | int cpu = nasid_slice_to_cpuid(nasid, slice); | 353 | int cpu = nasid_slice_to_cpuid(nasid, slice); |
379 | #ifdef CONFIG_SMP | 354 | #ifdef CONFIG_SMP |
380 | int cpuphys; | 355 | int cpuphys; |
381 | struct irq_desc *desc; | ||
382 | #endif | 356 | #endif |
383 | 357 | ||
384 | pci_dev_get(pci_dev); | 358 | pci_dev_get(pci_dev); |
@@ -395,12 +369,11 @@ void sn_irq_fixup(struct pci_dev *pci_dev, struct sn_irq_info *sn_irq_info) | |||
395 | #ifdef CONFIG_SMP | 369 | #ifdef CONFIG_SMP |
396 | cpuphys = cpu_physical_id(cpu); | 370 | cpuphys = cpu_physical_id(cpu); |
397 | set_irq_affinity_info(sn_irq_info->irq_irq, cpuphys, 0); | 371 | set_irq_affinity_info(sn_irq_info->irq_irq, cpuphys, 0); |
398 | desc = irq_to_desc(sn_irq_info->irq_irq); | ||
399 | /* | 372 | /* |
400 | * Affinity was set by the PROM, prevent it from | 373 | * Affinity was set by the PROM, prevent it from |
401 | * being reset by the request_irq() path. | 374 | * being reset by the request_irq() path. |
402 | */ | 375 | */ |
403 | desc->status |= IRQ_AFFINITY_SET; | 376 | irqd_mark_affinity_was_set(irq_get_irq_data(sn_irq_info->irq_irq)); |
404 | #endif | 377 | #endif |
405 | } | 378 | } |
406 | 379 | ||
@@ -439,25 +412,11 @@ sn_call_force_intr_provider(struct sn_irq_info *sn_irq_info) | |||
439 | pci_provider = sn_pci_provider[sn_irq_info->irq_bridge_type]; | 412 | pci_provider = sn_pci_provider[sn_irq_info->irq_bridge_type]; |
440 | 413 | ||
441 | /* Don't force an interrupt if the irq has been disabled */ | 414 | /* Don't force an interrupt if the irq has been disabled */ |
442 | if (!(irq_desc[sn_irq_info->irq_irq].status & IRQ_DISABLED) && | 415 | if (!irqd_irq_disabled(sn_irq_info->irq_irq) && |
443 | pci_provider && pci_provider->force_interrupt) | 416 | pci_provider && pci_provider->force_interrupt) |
444 | (*pci_provider->force_interrupt)(sn_irq_info); | 417 | (*pci_provider->force_interrupt)(sn_irq_info); |
445 | } | 418 | } |
446 | 419 | ||
447 | static void force_interrupt(int irq) | ||
448 | { | ||
449 | struct sn_irq_info *sn_irq_info; | ||
450 | |||
451 | if (!sn_ioif_inited) | ||
452 | return; | ||
453 | |||
454 | rcu_read_lock(); | ||
455 | list_for_each_entry_rcu(sn_irq_info, sn_irq_lh[irq], list) | ||
456 | sn_call_force_intr_provider(sn_irq_info); | ||
457 | |||
458 | rcu_read_unlock(); | ||
459 | } | ||
460 | |||
461 | /* | 420 | /* |
462 | * Check for lost interrupts. If the PIC int_status reg. says that | 421 | * Check for lost interrupts. If the PIC int_status reg. says that |
463 | * an interrupt has been sent, but not handled, and the interrupt | 422 | * an interrupt has been sent, but not handled, and the interrupt |
diff --git a/arch/ia64/sn/kernel/msi_sn.c b/arch/ia64/sn/kernel/msi_sn.c index a5e500f02853..2b98b9e088de 100644 --- a/arch/ia64/sn/kernel/msi_sn.c +++ b/arch/ia64/sn/kernel/msi_sn.c | |||
@@ -144,16 +144,16 @@ int sn_setup_msi_irq(struct pci_dev *pdev, struct msi_desc *entry) | |||
144 | */ | 144 | */ |
145 | msg.data = 0x100 + irq; | 145 | msg.data = 0x100 + irq; |
146 | 146 | ||
147 | set_irq_msi(irq, entry); | 147 | irq_set_msi_desc(irq, entry); |
148 | write_msi_msg(irq, &msg); | 148 | write_msi_msg(irq, &msg); |
149 | set_irq_chip_and_handler(irq, &sn_msi_chip, handle_edge_irq); | 149 | irq_set_chip_and_handler(irq, &sn_msi_chip, handle_edge_irq); |
150 | 150 | ||
151 | return 0; | 151 | return 0; |
152 | } | 152 | } |
153 | 153 | ||
154 | #ifdef CONFIG_SMP | 154 | #ifdef CONFIG_SMP |
155 | static int sn_set_msi_irq_affinity(unsigned int irq, | 155 | static int sn_set_msi_irq_affinity(struct irq_data *data, |
156 | const struct cpumask *cpu_mask) | 156 | const struct cpumask *cpu_mask, bool force) |
157 | { | 157 | { |
158 | struct msi_msg msg; | 158 | struct msi_msg msg; |
159 | int slice; | 159 | int slice; |
@@ -164,7 +164,7 @@ static int sn_set_msi_irq_affinity(unsigned int irq, | |||
164 | struct sn_irq_info *sn_irq_info; | 164 | struct sn_irq_info *sn_irq_info; |
165 | struct sn_irq_info *new_irq_info; | 165 | struct sn_irq_info *new_irq_info; |
166 | struct sn_pcibus_provider *provider; | 166 | struct sn_pcibus_provider *provider; |
167 | unsigned int cpu; | 167 | unsigned int cpu, irq = data->irq; |
168 | 168 | ||
169 | cpu = cpumask_first(cpu_mask); | 169 | cpu = cpumask_first(cpu_mask); |
170 | sn_irq_info = sn_msi_info[irq].sn_irq_info; | 170 | sn_irq_info = sn_msi_info[irq].sn_irq_info; |
@@ -206,33 +206,33 @@ static int sn_set_msi_irq_affinity(unsigned int irq, | |||
206 | msg.address_lo = (u32)(bus_addr & 0x00000000ffffffff); | 206 | msg.address_lo = (u32)(bus_addr & 0x00000000ffffffff); |
207 | 207 | ||
208 | write_msi_msg(irq, &msg); | 208 | write_msi_msg(irq, &msg); |
209 | cpumask_copy(irq_desc[irq].affinity, cpu_mask); | 209 | cpumask_copy(data->affinity, cpu_mask); |
210 | 210 | ||
211 | return 0; | 211 | return 0; |
212 | } | 212 | } |
213 | #endif /* CONFIG_SMP */ | 213 | #endif /* CONFIG_SMP */ |
214 | 214 | ||
215 | static void sn_ack_msi_irq(unsigned int irq) | 215 | static void sn_ack_msi_irq(struct irq_data *data) |
216 | { | 216 | { |
217 | move_native_irq(irq); | 217 | irq_move_irq(data); |
218 | ia64_eoi(); | 218 | ia64_eoi(); |
219 | } | 219 | } |
220 | 220 | ||
221 | static int sn_msi_retrigger_irq(unsigned int irq) | 221 | static int sn_msi_retrigger_irq(struct irq_data *data) |
222 | { | 222 | { |
223 | unsigned int vector = irq; | 223 | unsigned int vector = data->irq; |
224 | ia64_resend_irq(vector); | 224 | ia64_resend_irq(vector); |
225 | 225 | ||
226 | return 1; | 226 | return 1; |
227 | } | 227 | } |
228 | 228 | ||
229 | static struct irq_chip sn_msi_chip = { | 229 | static struct irq_chip sn_msi_chip = { |
230 | .name = "PCI-MSI", | 230 | .name = "PCI-MSI", |
231 | .irq_mask = mask_msi_irq, | 231 | .irq_mask = mask_msi_irq, |
232 | .irq_unmask = unmask_msi_irq, | 232 | .irq_unmask = unmask_msi_irq, |
233 | .ack = sn_ack_msi_irq, | 233 | .irq_ack = sn_ack_msi_irq, |
234 | #ifdef CONFIG_SMP | 234 | #ifdef CONFIG_SMP |
235 | .set_affinity = sn_set_msi_irq_affinity, | 235 | .irq_set_affinity = sn_set_msi_irq_affinity, |
236 | #endif | 236 | #endif |
237 | .retrigger = sn_msi_retrigger_irq, | 237 | .irq_retrigger = sn_msi_retrigger_irq, |
238 | }; | 238 | }; |
diff --git a/arch/ia64/xen/irq_xen.c b/arch/ia64/xen/irq_xen.c index a3fb7cf9ae1d..108bb858acf2 100644 --- a/arch/ia64/xen/irq_xen.c +++ b/arch/ia64/xen/irq_xen.c | |||
@@ -138,7 +138,6 @@ static void | |||
138 | __xen_register_percpu_irq(unsigned int cpu, unsigned int vec, | 138 | __xen_register_percpu_irq(unsigned int cpu, unsigned int vec, |
139 | struct irqaction *action, int save) | 139 | struct irqaction *action, int save) |
140 | { | 140 | { |
141 | struct irq_desc *desc; | ||
142 | int irq = 0; | 141 | int irq = 0; |
143 | 142 | ||
144 | if (xen_slab_ready) { | 143 | if (xen_slab_ready) { |
@@ -223,8 +222,7 @@ __xen_register_percpu_irq(unsigned int cpu, unsigned int vec, | |||
223 | * mark the interrupt for migrations and trigger it | 222 | * mark the interrupt for migrations and trigger it |
224 | * on cpu hotplug. | 223 | * on cpu hotplug. |
225 | */ | 224 | */ |
226 | desc = irq_desc + irq; | 225 | irq_set_status_flags(irq, IRQ_PER_CPU); |
227 | desc->status |= IRQ_PER_CPU; | ||
228 | } | 226 | } |
229 | } | 227 | } |
230 | 228 | ||
diff --git a/arch/m68k/kernel/irq.c b/arch/m68k/kernel/irq.c index c7dd48f37bee..15dbc3e9d20c 100644 --- a/arch/m68k/kernel/irq.c +++ b/arch/m68k/kernel/irq.c | |||
@@ -44,7 +44,7 @@ int show_interrupts(struct seq_file *p, void *v) | |||
44 | if (ap) { | 44 | if (ap) { |
45 | seq_printf(p, "%3d: ", irq); | 45 | seq_printf(p, "%3d: ", irq); |
46 | seq_printf(p, "%10u ", kstat_irqs(irq)); | 46 | seq_printf(p, "%10u ", kstat_irqs(irq)); |
47 | seq_printf(p, "%14s ", get_irq_desc_chip(desc)->name); | 47 | seq_printf(p, "%14s ", irq_desc_get_chip(desc)->name); |
48 | 48 | ||
49 | seq_printf(p, "%s", ap->name); | 49 | seq_printf(p, "%s", ap->name); |
50 | for (ap = ap->next; ap; ap = ap->next) | 50 | for (ap = ap->next; ap; ap = ap->next) |
diff --git a/arch/m68k/platform/5249/intc2.c b/arch/m68k/platform/5249/intc2.c index 8f4b63e17366..f343bf7bf5b0 100644 --- a/arch/m68k/platform/5249/intc2.c +++ b/arch/m68k/platform/5249/intc2.c | |||
@@ -51,8 +51,8 @@ static int __init mcf_intc2_init(void) | |||
51 | 51 | ||
52 | /* GPIO interrupt sources */ | 52 | /* GPIO interrupt sources */ |
53 | for (irq = MCFINTC2_GPIOIRQ0; (irq <= MCFINTC2_GPIOIRQ7); irq++) { | 53 | for (irq = MCFINTC2_GPIOIRQ0; (irq <= MCFINTC2_GPIOIRQ7); irq++) { |
54 | set_irq_chip(irq, &intc2_irq_gpio_chip); | 54 | irq_set_chip(irq, &intc2_irq_gpio_chip); |
55 | set_irq_handler(irq, handle_edge_irq); | 55 | irq_set_handler(irq, handle_edge_irq); |
56 | } | 56 | } |
57 | 57 | ||
58 | return 0; | 58 | return 0; |
diff --git a/arch/m68k/platform/5272/intc.c b/arch/m68k/platform/5272/intc.c index 969ff0a467c6..43e6e96f087f 100644 --- a/arch/m68k/platform/5272/intc.c +++ b/arch/m68k/platform/5272/intc.c | |||
@@ -145,7 +145,7 @@ static int intc_irq_set_type(struct irq_data *d, unsigned int type) | |||
145 | */ | 145 | */ |
146 | static void intc_external_irq(unsigned int irq, struct irq_desc *desc) | 146 | static void intc_external_irq(unsigned int irq, struct irq_desc *desc) |
147 | { | 147 | { |
148 | get_irq_desc_chip(desc)->irq_ack(&desc->irq_data); | 148 | irq_desc_get_chip(desc)->irq_ack(&desc->irq_data); |
149 | handle_simple_irq(irq, desc); | 149 | handle_simple_irq(irq, desc); |
150 | } | 150 | } |
151 | 151 | ||
@@ -171,16 +171,16 @@ void __init init_IRQ(void) | |||
171 | writel(0x88888888, MCF_MBAR + MCFSIM_ICR4); | 171 | writel(0x88888888, MCF_MBAR + MCFSIM_ICR4); |
172 | 172 | ||
173 | for (irq = 0; (irq < NR_IRQS); irq++) { | 173 | for (irq = 0; (irq < NR_IRQS); irq++) { |
174 | set_irq_chip(irq, &intc_irq_chip); | 174 | irq_set_chip(irq, &intc_irq_chip); |
175 | edge = 0; | 175 | edge = 0; |
176 | if ((irq >= MCFINT_VECBASE) && (irq <= MCFINT_VECMAX)) | 176 | if ((irq >= MCFINT_VECBASE) && (irq <= MCFINT_VECMAX)) |
177 | edge = intc_irqmap[irq - MCFINT_VECBASE].ack; | 177 | edge = intc_irqmap[irq - MCFINT_VECBASE].ack; |
178 | if (edge) { | 178 | if (edge) { |
179 | set_irq_type(irq, IRQ_TYPE_EDGE_RISING); | 179 | irq_set_irq_type(irq, IRQ_TYPE_EDGE_RISING); |
180 | set_irq_handler(irq, intc_external_irq); | 180 | irq_set_handler(irq, intc_external_irq); |
181 | } else { | 181 | } else { |
182 | set_irq_type(irq, IRQ_TYPE_LEVEL_HIGH); | 182 | irq_set_irq_type(irq, IRQ_TYPE_LEVEL_HIGH); |
183 | set_irq_handler(irq, handle_level_irq); | 183 | irq_set_handler(irq, handle_level_irq); |
184 | } | 184 | } |
185 | } | 185 | } |
186 | } | 186 | } |
diff --git a/arch/m68k/platform/68328/ints.c b/arch/m68k/platform/68328/ints.c index e5631831a200..a90288cf7446 100644 --- a/arch/m68k/platform/68328/ints.c +++ b/arch/m68k/platform/68328/ints.c | |||
@@ -179,8 +179,8 @@ void __init init_IRQ(void) | |||
179 | IMR = ~0; | 179 | IMR = ~0; |
180 | 180 | ||
181 | for (i = 0; (i < NR_IRQS); i++) { | 181 | for (i = 0; (i < NR_IRQS); i++) { |
182 | set_irq_chip(i, &intc_irq_chip); | 182 | irq_set_chip(i, &intc_irq_chip); |
183 | set_irq_handler(i, handle_level_irq); | 183 | irq_set_handler(i, handle_level_irq); |
184 | } | 184 | } |
185 | } | 185 | } |
186 | 186 | ||
diff --git a/arch/m68k/platform/68360/ints.c b/arch/m68k/platform/68360/ints.c index 8de3feb568c6..4af0f4e30f74 100644 --- a/arch/m68k/platform/68360/ints.c +++ b/arch/m68k/platform/68360/ints.c | |||
@@ -132,8 +132,8 @@ void init_IRQ(void) | |||
132 | pquicc->intr_cimr = 0x00000000; | 132 | pquicc->intr_cimr = 0x00000000; |
133 | 133 | ||
134 | for (i = 0; (i < NR_IRQS); i++) { | 134 | for (i = 0; (i < NR_IRQS); i++) { |
135 | set_irq_chip(i, &intc_irq_chip); | 135 | irq_set_chip(i, &intc_irq_chip); |
136 | set_irq_handler(i, handle_level_irq); | 136 | irq_set_handler(i, handle_level_irq); |
137 | } | 137 | } |
138 | } | 138 | } |
139 | 139 | ||
diff --git a/arch/m68k/platform/coldfire/intc-2.c b/arch/m68k/platform/coldfire/intc-2.c index 2cbfbf035db9..74b55cfbc3cb 100644 --- a/arch/m68k/platform/coldfire/intc-2.c +++ b/arch/m68k/platform/coldfire/intc-2.c | |||
@@ -164,7 +164,7 @@ static int intc_irq_set_type(struct irq_data *d, unsigned int type) | |||
164 | } | 164 | } |
165 | 165 | ||
166 | if (tb) | 166 | if (tb) |
167 | set_irq_handler(irq, handle_edge_irq); | 167 | irq_set_handler(irq, handle_edge_irq); |
168 | 168 | ||
169 | irq -= EINT0; | 169 | irq -= EINT0; |
170 | pa = __raw_readw(MCFEPORT_EPPAR); | 170 | pa = __raw_readw(MCFEPORT_EPPAR); |
@@ -204,11 +204,11 @@ void __init init_IRQ(void) | |||
204 | 204 | ||
205 | for (irq = MCFINT_VECBASE; (irq < MCFINT_VECBASE + NR_VECS); irq++) { | 205 | for (irq = MCFINT_VECBASE; (irq < MCFINT_VECBASE + NR_VECS); irq++) { |
206 | if ((irq >= EINT1) && (irq <=EINT7)) | 206 | if ((irq >= EINT1) && (irq <=EINT7)) |
207 | set_irq_chip(irq, &intc_irq_chip_edge_port); | 207 | irq_set_chip(irq, &intc_irq_chip_edge_port); |
208 | else | 208 | else |
209 | set_irq_chip(irq, &intc_irq_chip); | 209 | irq_set_chip(irq, &intc_irq_chip); |
210 | set_irq_type(irq, IRQ_TYPE_LEVEL_HIGH); | 210 | irq_set_irq_type(irq, IRQ_TYPE_LEVEL_HIGH); |
211 | set_irq_handler(irq, handle_level_irq); | 211 | irq_set_handler(irq, handle_level_irq); |
212 | } | 212 | } |
213 | } | 213 | } |
214 | 214 | ||
diff --git a/arch/m68k/platform/coldfire/intc-simr.c b/arch/m68k/platform/coldfire/intc-simr.c index e642b24ab729..d6a4d9d53e42 100644 --- a/arch/m68k/platform/coldfire/intc-simr.c +++ b/arch/m68k/platform/coldfire/intc-simr.c | |||
@@ -141,7 +141,7 @@ static int intc_irq_set_type(struct irq_data *d, unsigned int type) | |||
141 | } | 141 | } |
142 | 142 | ||
143 | if (tb) | 143 | if (tb) |
144 | set_irq_handler(irq, handle_edge_irq); | 144 | irq_set_handler(irq, handle_edge_irq); |
145 | 145 | ||
146 | ebit = irq2ebit(irq) * 2; | 146 | ebit = irq2ebit(irq) * 2; |
147 | pa = __raw_readw(MCFEPORT_EPPAR); | 147 | pa = __raw_readw(MCFEPORT_EPPAR); |
@@ -181,11 +181,11 @@ void __init init_IRQ(void) | |||
181 | eirq = MCFINT_VECBASE + 64 + (MCFINTC1_ICR0 ? 64 : 0); | 181 | eirq = MCFINT_VECBASE + 64 + (MCFINTC1_ICR0 ? 64 : 0); |
182 | for (irq = MCFINT_VECBASE; (irq < eirq); irq++) { | 182 | for (irq = MCFINT_VECBASE; (irq < eirq); irq++) { |
183 | if ((irq >= EINT1) && (irq <= EINT7)) | 183 | if ((irq >= EINT1) && (irq <= EINT7)) |
184 | set_irq_chip(irq, &intc_irq_chip_edge_port); | 184 | irq_set_chip(irq, &intc_irq_chip_edge_port); |
185 | else | 185 | else |
186 | set_irq_chip(irq, &intc_irq_chip); | 186 | irq_set_chip(irq, &intc_irq_chip); |
187 | set_irq_type(irq, IRQ_TYPE_LEVEL_HIGH); | 187 | irq_set_irq_type(irq, IRQ_TYPE_LEVEL_HIGH); |
188 | set_irq_handler(irq, handle_level_irq); | 188 | irq_set_handler(irq, handle_level_irq); |
189 | } | 189 | } |
190 | } | 190 | } |
191 | 191 | ||
diff --git a/arch/m68k/platform/coldfire/intc.c b/arch/m68k/platform/coldfire/intc.c index d648081a63f6..c28a6ed6cb23 100644 --- a/arch/m68k/platform/coldfire/intc.c +++ b/arch/m68k/platform/coldfire/intc.c | |||
@@ -143,9 +143,9 @@ void __init init_IRQ(void) | |||
143 | mcf_maskimr(0xffffffff); | 143 | mcf_maskimr(0xffffffff); |
144 | 144 | ||
145 | for (irq = 0; (irq < NR_IRQS); irq++) { | 145 | for (irq = 0; (irq < NR_IRQS); irq++) { |
146 | set_irq_chip(irq, &intc_irq_chip); | 146 | irq_set_chip(irq, &intc_irq_chip); |
147 | set_irq_type(irq, IRQ_TYPE_LEVEL_HIGH); | 147 | irq_set_irq_type(irq, IRQ_TYPE_LEVEL_HIGH); |
148 | set_irq_handler(irq, handle_level_irq); | 148 | irq_set_handler(irq, handle_level_irq); |
149 | } | 149 | } |
150 | } | 150 | } |
151 | 151 | ||
diff --git a/arch/microblaze/Kconfig b/arch/microblaze/Kconfig index 5f0cf0e32653..c49c326e7af1 100644 --- a/arch/microblaze/Kconfig +++ b/arch/microblaze/Kconfig | |||
@@ -18,6 +18,7 @@ config MICROBLAZE | |||
18 | select HAVE_GENERIC_HARDIRQS | 18 | select HAVE_GENERIC_HARDIRQS |
19 | select GENERIC_IRQ_PROBE | 19 | select GENERIC_IRQ_PROBE |
20 | select GENERIC_HARDIRQS_NO_DEPRECATED | 20 | select GENERIC_HARDIRQS_NO_DEPRECATED |
21 | select GENERIC_IRQ_SHOW | ||
21 | 22 | ||
22 | config SWAP | 23 | config SWAP |
23 | def_bool n | 24 | def_bool n |
diff --git a/arch/microblaze/kernel/intc.c b/arch/microblaze/kernel/intc.c index e4661285118e..5ba7e162833b 100644 --- a/arch/microblaze/kernel/intc.c +++ b/arch/microblaze/kernel/intc.c | |||
@@ -50,7 +50,7 @@ static void intc_enable_or_unmask(struct irq_data *d) | |||
50 | * ack function since the handle_level_irq function | 50 | * ack function since the handle_level_irq function |
51 | * acks the irq before calling the interrupt handler | 51 | * acks the irq before calling the interrupt handler |
52 | */ | 52 | */ |
53 | if (irq_to_desc(d->irq)->status & IRQ_LEVEL) | 53 | if (irqd_is_level_type(d)) |
54 | out_be32(INTC_BASE + IAR, mask); | 54 | out_be32(INTC_BASE + IAR, mask); |
55 | } | 55 | } |
56 | 56 | ||
@@ -157,11 +157,11 @@ void __init init_IRQ(void) | |||
157 | 157 | ||
158 | for (i = 0; i < nr_irq; ++i) { | 158 | for (i = 0; i < nr_irq; ++i) { |
159 | if (intr_type & (0x00000001 << i)) { | 159 | if (intr_type & (0x00000001 << i)) { |
160 | set_irq_chip_and_handler_name(i, &intc_dev, | 160 | irq_set_chip_and_handler_name(i, &intc_dev, |
161 | handle_edge_irq, intc_dev.name); | 161 | handle_edge_irq, intc_dev.name); |
162 | irq_clear_status_flags(i, IRQ_LEVEL); | 162 | irq_clear_status_flags(i, IRQ_LEVEL); |
163 | } else { | 163 | } else { |
164 | set_irq_chip_and_handler_name(i, &intc_dev, | 164 | irq_set_chip_and_handler_name(i, &intc_dev, |
165 | handle_level_irq, intc_dev.name); | 165 | handle_level_irq, intc_dev.name); |
166 | irq_set_status_flags(i, IRQ_LEVEL); | 166 | irq_set_status_flags(i, IRQ_LEVEL); |
167 | } | 167 | } |
diff --git a/arch/microblaze/kernel/irq.c b/arch/microblaze/kernel/irq.c index 098822413729..ce7ac8435d5c 100644 --- a/arch/microblaze/kernel/irq.c +++ b/arch/microblaze/kernel/irq.c | |||
@@ -47,48 +47,6 @@ next_irq: | |||
47 | trace_hardirqs_on(); | 47 | trace_hardirqs_on(); |
48 | } | 48 | } |
49 | 49 | ||
50 | int show_interrupts(struct seq_file *p, void *v) | ||
51 | { | ||
52 | int i = *(loff_t *) v, j; | ||
53 | struct irq_desc *desc; | ||
54 | struct irqaction *action; | ||
55 | unsigned long flags; | ||
56 | |||
57 | if (i == 0) { | ||
58 | seq_printf(p, " "); | ||
59 | for_each_online_cpu(j) | ||
60 | seq_printf(p, "CPU%-8d", j); | ||
61 | seq_putc(p, '\n'); | ||
62 | } | ||
63 | |||
64 | if (i < nr_irq) { | ||
65 | desc = irq_to_desc(i); | ||
66 | raw_spin_lock_irqsave(&desc->lock, flags); | ||
67 | action = desc->action; | ||
68 | if (!action) | ||
69 | goto skip; | ||
70 | seq_printf(p, "%3d: ", i); | ||
71 | #ifndef CONFIG_SMP | ||
72 | seq_printf(p, "%10u ", kstat_irqs(i)); | ||
73 | #else | ||
74 | for_each_online_cpu(j) | ||
75 | seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]); | ||
76 | #endif | ||
77 | seq_printf(p, " %8s", desc->status & | ||
78 | IRQ_LEVEL ? "level" : "edge"); | ||
79 | seq_printf(p, " %8s", desc->irq_data.chip->name); | ||
80 | seq_printf(p, " %s", action->name); | ||
81 | |||
82 | for (action = action->next; action; action = action->next) | ||
83 | seq_printf(p, ", %s", action->name); | ||
84 | |||
85 | seq_putc(p, '\n'); | ||
86 | skip: | ||
87 | raw_spin_unlock_irqrestore(&desc->lock, flags); | ||
88 | } | ||
89 | return 0; | ||
90 | } | ||
91 | |||
92 | /* MS: There is no any advance mapping mechanism. We are using simple 32bit | 50 | /* MS: There is no any advance mapping mechanism. We are using simple 32bit |
93 | intc without any cascades or any connection that's why mapping is 1:1 */ | 51 | intc without any cascades or any connection that's why mapping is 1:1 */ |
94 | unsigned int irq_create_mapping(struct irq_host *host, irq_hw_number_t hwirq) | 52 | unsigned int irq_create_mapping(struct irq_host *host, irq_hw_number_t hwirq) |
diff --git a/arch/microblaze/pci/pci-common.c b/arch/microblaze/pci/pci-common.c index 1e01a1253631..53599067d2f9 100644 --- a/arch/microblaze/pci/pci-common.c +++ b/arch/microblaze/pci/pci-common.c | |||
@@ -237,7 +237,7 @@ int pci_read_irq_line(struct pci_dev *pci_dev) | |||
237 | 237 | ||
238 | virq = irq_create_mapping(NULL, line); | 238 | virq = irq_create_mapping(NULL, line); |
239 | if (virq != NO_IRQ) | 239 | if (virq != NO_IRQ) |
240 | set_irq_type(virq, IRQ_TYPE_LEVEL_LOW); | 240 | irq_set_irq_type(virq, IRQ_TYPE_LEVEL_LOW); |
241 | } else { | 241 | } else { |
242 | pr_debug(" Got one, spec %d cells (0x%08x 0x%08x...) on %s\n", | 242 | pr_debug(" Got one, spec %d cells (0x%08x 0x%08x...) on %s\n", |
243 | oirq.size, oirq.specifier[0], oirq.specifier[1], | 243 | oirq.size, oirq.specifier[0], oirq.specifier[1], |
diff --git a/arch/mips/alchemy/devboards/bcsr.c b/arch/mips/alchemy/devboards/bcsr.c index f91c43a7d5dc..596ad00e7f05 100644 --- a/arch/mips/alchemy/devboards/bcsr.c +++ b/arch/mips/alchemy/devboards/bcsr.c | |||
@@ -142,8 +142,8 @@ void __init bcsr_init_irq(int csc_start, int csc_end, int hook_irq) | |||
142 | bcsr_csc_base = csc_start; | 142 | bcsr_csc_base = csc_start; |
143 | 143 | ||
144 | for (irq = csc_start; irq <= csc_end; irq++) | 144 | for (irq = csc_start; irq <= csc_end; irq++) |
145 | set_irq_chip_and_handler_name(irq, &bcsr_irq_type, | 145 | irq_set_chip_and_handler_name(irq, &bcsr_irq_type, |
146 | handle_level_irq, "level"); | 146 | handle_level_irq, "level"); |
147 | 147 | ||
148 | set_irq_chained_handler(hook_irq, bcsr_csc_handler); | 148 | irq_set_chained_handler(hook_irq, bcsr_csc_handler); |
149 | } | 149 | } |
diff --git a/arch/mips/alchemy/devboards/db1200/setup.c b/arch/mips/alchemy/devboards/db1200/setup.c index 887619547553..4a8980027ecf 100644 --- a/arch/mips/alchemy/devboards/db1200/setup.c +++ b/arch/mips/alchemy/devboards/db1200/setup.c | |||
@@ -63,20 +63,19 @@ void __init board_setup(void) | |||
63 | static int __init db1200_arch_init(void) | 63 | static int __init db1200_arch_init(void) |
64 | { | 64 | { |
65 | /* GPIO7 is low-level triggered CPLD cascade */ | 65 | /* GPIO7 is low-level triggered CPLD cascade */ |
66 | set_irq_type(AU1200_GPIO7_INT, IRQF_TRIGGER_LOW); | 66 | irq_set_irq_type(AU1200_GPIO7_INT, IRQF_TRIGGER_LOW); |
67 | bcsr_init_irq(DB1200_INT_BEGIN, DB1200_INT_END, AU1200_GPIO7_INT); | 67 | bcsr_init_irq(DB1200_INT_BEGIN, DB1200_INT_END, AU1200_GPIO7_INT); |
68 | 68 | ||
69 | /* insert/eject pairs: one of both is always screaming. To avoid | 69 | /* insert/eject pairs: one of both is always screaming. To avoid |
70 | * issues they must not be automatically enabled when initially | 70 | * issues they must not be automatically enabled when initially |
71 | * requested. | 71 | * requested. |
72 | */ | 72 | */ |
73 | irq_to_desc(DB1200_SD0_INSERT_INT)->status |= IRQ_NOAUTOEN; | 73 | irq_set_status_flags(DB1200_SD0_INSERT_INT, IRQ_NOAUTOEN); |
74 | irq_to_desc(DB1200_SD0_EJECT_INT)->status |= IRQ_NOAUTOEN; | 74 | irq_set_status_flags(DB1200_SD0_EJECT_INT, IRQ_NOAUTOEN); |
75 | irq_to_desc(DB1200_PC0_INSERT_INT)->status |= IRQ_NOAUTOEN; | 75 | irq_set_status_flags(DB1200_PC0_INSERT_INT, IRQ_NOAUTOEN); |
76 | irq_to_desc(DB1200_PC0_EJECT_INT)->status |= IRQ_NOAUTOEN; | 76 | irq_set_status_flags(DB1200_PC0_EJECT_INT, IRQ_NOAUTOEN); |
77 | irq_to_desc(DB1200_PC1_INSERT_INT)->status |= IRQ_NOAUTOEN; | 77 | irq_set_status_flags(DB1200_PC1_INSERT_INT, IRQ_NOAUTOEN); |
78 | irq_to_desc(DB1200_PC1_EJECT_INT)->status |= IRQ_NOAUTOEN; | 78 | irq_set_status_flags(DB1200_PC1_EJECT_INT, IRQ_NOAUTOEN); |
79 | |||
80 | return 0; | 79 | return 0; |
81 | } | 80 | } |
82 | arch_initcall(db1200_arch_init); | 81 | arch_initcall(db1200_arch_init); |
diff --git a/arch/mips/alchemy/devboards/db1x00/board_setup.c b/arch/mips/alchemy/devboards/db1x00/board_setup.c index 9e45971343ed..05f120ff90f9 100644 --- a/arch/mips/alchemy/devboards/db1x00/board_setup.c +++ b/arch/mips/alchemy/devboards/db1x00/board_setup.c | |||
@@ -215,35 +215,35 @@ void __init board_setup(void) | |||
215 | static int __init db1x00_init_irq(void) | 215 | static int __init db1x00_init_irq(void) |
216 | { | 216 | { |
217 | #if defined(CONFIG_MIPS_MIRAGE) | 217 | #if defined(CONFIG_MIPS_MIRAGE) |
218 | set_irq_type(AU1500_GPIO7_INT, IRQF_TRIGGER_RISING); /* TS pendown */ | 218 | irq_set_irq_type(AU1500_GPIO7_INT, IRQF_TRIGGER_RISING); /* TS pendown */ |
219 | #elif defined(CONFIG_MIPS_DB1550) | 219 | #elif defined(CONFIG_MIPS_DB1550) |
220 | set_irq_type(AU1550_GPIO0_INT, IRQF_TRIGGER_LOW); /* CD0# */ | 220 | irq_set_irq_type(AU1550_GPIO0_INT, IRQF_TRIGGER_LOW); /* CD0# */ |
221 | set_irq_type(AU1550_GPIO1_INT, IRQF_TRIGGER_LOW); /* CD1# */ | 221 | irq_set_irq_type(AU1550_GPIO1_INT, IRQF_TRIGGER_LOW); /* CD1# */ |
222 | set_irq_type(AU1550_GPIO3_INT, IRQF_TRIGGER_LOW); /* CARD0# */ | 222 | irq_set_irq_type(AU1550_GPIO3_INT, IRQF_TRIGGER_LOW); /* CARD0# */ |
223 | set_irq_type(AU1550_GPIO5_INT, IRQF_TRIGGER_LOW); /* CARD1# */ | 223 | irq_set_irq_type(AU1550_GPIO5_INT, IRQF_TRIGGER_LOW); /* CARD1# */ |
224 | set_irq_type(AU1550_GPIO21_INT, IRQF_TRIGGER_LOW); /* STSCHG0# */ | 224 | irq_set_irq_type(AU1550_GPIO21_INT, IRQF_TRIGGER_LOW); /* STSCHG0# */ |
225 | set_irq_type(AU1550_GPIO22_INT, IRQF_TRIGGER_LOW); /* STSCHG1# */ | 225 | irq_set_irq_type(AU1550_GPIO22_INT, IRQF_TRIGGER_LOW); /* STSCHG1# */ |
226 | #elif defined(CONFIG_MIPS_DB1500) | 226 | #elif defined(CONFIG_MIPS_DB1500) |
227 | set_irq_type(AU1500_GPIO0_INT, IRQF_TRIGGER_LOW); /* CD0# */ | 227 | irq_set_irq_type(AU1500_GPIO0_INT, IRQF_TRIGGER_LOW); /* CD0# */ |
228 | set_irq_type(AU1500_GPIO3_INT, IRQF_TRIGGER_LOW); /* CD1# */ | 228 | irq_set_irq_type(AU1500_GPIO3_INT, IRQF_TRIGGER_LOW); /* CD1# */ |
229 | set_irq_type(AU1500_GPIO2_INT, IRQF_TRIGGER_LOW); /* CARD0# */ | 229 | irq_set_irq_type(AU1500_GPIO2_INT, IRQF_TRIGGER_LOW); /* CARD0# */ |
230 | set_irq_type(AU1500_GPIO5_INT, IRQF_TRIGGER_LOW); /* CARD1# */ | 230 | irq_set_irq_type(AU1500_GPIO5_INT, IRQF_TRIGGER_LOW); /* CARD1# */ |
231 | set_irq_type(AU1500_GPIO1_INT, IRQF_TRIGGER_LOW); /* STSCHG0# */ | 231 | irq_set_irq_type(AU1500_GPIO1_INT, IRQF_TRIGGER_LOW); /* STSCHG0# */ |
232 | set_irq_type(AU1500_GPIO4_INT, IRQF_TRIGGER_LOW); /* STSCHG1# */ | 232 | irq_set_irq_type(AU1500_GPIO4_INT, IRQF_TRIGGER_LOW); /* STSCHG1# */ |
233 | #elif defined(CONFIG_MIPS_DB1100) | 233 | #elif defined(CONFIG_MIPS_DB1100) |
234 | set_irq_type(AU1100_GPIO0_INT, IRQF_TRIGGER_LOW); /* CD0# */ | 234 | irq_set_irq_type(AU1100_GPIO0_INT, IRQF_TRIGGER_LOW); /* CD0# */ |
235 | set_irq_type(AU1100_GPIO3_INT, IRQF_TRIGGER_LOW); /* CD1# */ | 235 | irq_set_irq_type(AU1100_GPIO3_INT, IRQF_TRIGGER_LOW); /* CD1# */ |
236 | set_irq_type(AU1100_GPIO2_INT, IRQF_TRIGGER_LOW); /* CARD0# */ | 236 | irq_set_irq_type(AU1100_GPIO2_INT, IRQF_TRIGGER_LOW); /* CARD0# */ |
237 | set_irq_type(AU1100_GPIO5_INT, IRQF_TRIGGER_LOW); /* CARD1# */ | 237 | irq_set_irq_type(AU1100_GPIO5_INT, IRQF_TRIGGER_LOW); /* CARD1# */ |
238 | set_irq_type(AU1100_GPIO1_INT, IRQF_TRIGGER_LOW); /* STSCHG0# */ | 238 | irq_set_irq_type(AU1100_GPIO1_INT, IRQF_TRIGGER_LOW); /* STSCHG0# */ |
239 | set_irq_type(AU1100_GPIO4_INT, IRQF_TRIGGER_LOW); /* STSCHG1# */ | 239 | irq_set_irq_type(AU1100_GPIO4_INT, IRQF_TRIGGER_LOW); /* STSCHG1# */ |
240 | #elif defined(CONFIG_MIPS_DB1000) | 240 | #elif defined(CONFIG_MIPS_DB1000) |
241 | set_irq_type(AU1000_GPIO0_INT, IRQF_TRIGGER_LOW); /* CD0# */ | 241 | irq_set_irq_type(AU1000_GPIO0_INT, IRQF_TRIGGER_LOW); /* CD0# */ |
242 | set_irq_type(AU1000_GPIO3_INT, IRQF_TRIGGER_LOW); /* CD1# */ | 242 | irq_set_irq_type(AU1000_GPIO3_INT, IRQF_TRIGGER_LOW); /* CD1# */ |
243 | set_irq_type(AU1000_GPIO2_INT, IRQF_TRIGGER_LOW); /* CARD0# */ | 243 | irq_set_irq_type(AU1000_GPIO2_INT, IRQF_TRIGGER_LOW); /* CARD0# */ |
244 | set_irq_type(AU1000_GPIO5_INT, IRQF_TRIGGER_LOW); /* CARD1# */ | 244 | irq_set_irq_type(AU1000_GPIO5_INT, IRQF_TRIGGER_LOW); /* CARD1# */ |
245 | set_irq_type(AU1000_GPIO1_INT, IRQF_TRIGGER_LOW); /* STSCHG0# */ | 245 | irq_set_irq_type(AU1000_GPIO1_INT, IRQF_TRIGGER_LOW); /* STSCHG0# */ |
246 | set_irq_type(AU1000_GPIO4_INT, IRQF_TRIGGER_LOW); /* STSCHG1# */ | 246 | irq_set_irq_type(AU1000_GPIO4_INT, IRQF_TRIGGER_LOW); /* STSCHG1# */ |
247 | #endif | 247 | #endif |
248 | return 0; | 248 | return 0; |
249 | } | 249 | } |
diff --git a/arch/mips/alchemy/devboards/pb1000/board_setup.c b/arch/mips/alchemy/devboards/pb1000/board_setup.c index f6540ec47a64..2d85c4b5be09 100644 --- a/arch/mips/alchemy/devboards/pb1000/board_setup.c +++ b/arch/mips/alchemy/devboards/pb1000/board_setup.c | |||
@@ -197,7 +197,7 @@ void __init board_setup(void) | |||
197 | 197 | ||
198 | static int __init pb1000_init_irq(void) | 198 | static int __init pb1000_init_irq(void) |
199 | { | 199 | { |
200 | set_irq_type(AU1000_GPIO15_INT, IRQF_TRIGGER_LOW); | 200 | irq_set_irq_type(AU1000_GPIO15_INT, IRQF_TRIGGER_LOW); |
201 | return 0; | 201 | return 0; |
202 | } | 202 | } |
203 | arch_initcall(pb1000_init_irq); | 203 | arch_initcall(pb1000_init_irq); |
diff --git a/arch/mips/alchemy/devboards/pb1100/board_setup.c b/arch/mips/alchemy/devboards/pb1100/board_setup.c index 90dda5f3ecc5..d108fd573aaf 100644 --- a/arch/mips/alchemy/devboards/pb1100/board_setup.c +++ b/arch/mips/alchemy/devboards/pb1100/board_setup.c | |||
@@ -117,10 +117,10 @@ void __init board_setup(void) | |||
117 | 117 | ||
118 | static int __init pb1100_init_irq(void) | 118 | static int __init pb1100_init_irq(void) |
119 | { | 119 | { |
120 | set_irq_type(AU1100_GPIO9_INT, IRQF_TRIGGER_LOW); /* PCCD# */ | 120 | irq_set_irq_type(AU1100_GPIO9_INT, IRQF_TRIGGER_LOW); /* PCCD# */ |
121 | set_irq_type(AU1100_GPIO10_INT, IRQF_TRIGGER_LOW); /* PCSTSCHG# */ | 121 | irq_set_irq_type(AU1100_GPIO10_INT, IRQF_TRIGGER_LOW); /* PCSTSCHG# */ |
122 | set_irq_type(AU1100_GPIO11_INT, IRQF_TRIGGER_LOW); /* PCCard# */ | 122 | irq_set_irq_type(AU1100_GPIO11_INT, IRQF_TRIGGER_LOW); /* PCCard# */ |
123 | set_irq_type(AU1100_GPIO13_INT, IRQF_TRIGGER_LOW); /* DC_IRQ# */ | 123 | irq_set_irq_type(AU1100_GPIO13_INT, IRQF_TRIGGER_LOW); /* DC_IRQ# */ |
124 | 124 | ||
125 | return 0; | 125 | return 0; |
126 | } | 126 | } |
diff --git a/arch/mips/alchemy/devboards/pb1200/board_setup.c b/arch/mips/alchemy/devboards/pb1200/board_setup.c index 8b4466f2d44a..6d06b07c2381 100644 --- a/arch/mips/alchemy/devboards/pb1200/board_setup.c +++ b/arch/mips/alchemy/devboards/pb1200/board_setup.c | |||
@@ -142,7 +142,7 @@ static int __init pb1200_init_irq(void) | |||
142 | panic("Game over. Your score is 0."); | 142 | panic("Game over. Your score is 0."); |
143 | } | 143 | } |
144 | 144 | ||
145 | set_irq_type(AU1200_GPIO7_INT, IRQF_TRIGGER_LOW); | 145 | irq_set_irq_type(AU1200_GPIO7_INT, IRQF_TRIGGER_LOW); |
146 | bcsr_init_irq(PB1200_INT_BEGIN, PB1200_INT_END, AU1200_GPIO7_INT); | 146 | bcsr_init_irq(PB1200_INT_BEGIN, PB1200_INT_END, AU1200_GPIO7_INT); |
147 | 147 | ||
148 | return 0; | 148 | return 0; |
diff --git a/arch/mips/alchemy/devboards/pb1500/board_setup.c b/arch/mips/alchemy/devboards/pb1500/board_setup.c index 9cd9dfa698e7..83f46215eb0c 100644 --- a/arch/mips/alchemy/devboards/pb1500/board_setup.c +++ b/arch/mips/alchemy/devboards/pb1500/board_setup.c | |||
@@ -134,14 +134,14 @@ void __init board_setup(void) | |||
134 | 134 | ||
135 | static int __init pb1500_init_irq(void) | 135 | static int __init pb1500_init_irq(void) |
136 | { | 136 | { |
137 | set_irq_type(AU1500_GPIO9_INT, IRQF_TRIGGER_LOW); /* CD0# */ | 137 | irq_set_irq_type(AU1500_GPIO9_INT, IRQF_TRIGGER_LOW); /* CD0# */ |
138 | set_irq_type(AU1500_GPIO10_INT, IRQF_TRIGGER_LOW); /* CARD0 */ | 138 | irq_set_irq_type(AU1500_GPIO10_INT, IRQF_TRIGGER_LOW); /* CARD0 */ |
139 | set_irq_type(AU1500_GPIO11_INT, IRQF_TRIGGER_LOW); /* STSCHG0# */ | 139 | irq_set_irq_type(AU1500_GPIO11_INT, IRQF_TRIGGER_LOW); /* STSCHG0# */ |
140 | set_irq_type(AU1500_GPIO204_INT, IRQF_TRIGGER_HIGH); | 140 | irq_set_irq_type(AU1500_GPIO204_INT, IRQF_TRIGGER_HIGH); |
141 | set_irq_type(AU1500_GPIO201_INT, IRQF_TRIGGER_LOW); | 141 | irq_set_irq_type(AU1500_GPIO201_INT, IRQF_TRIGGER_LOW); |
142 | set_irq_type(AU1500_GPIO202_INT, IRQF_TRIGGER_LOW); | 142 | irq_set_irq_type(AU1500_GPIO202_INT, IRQF_TRIGGER_LOW); |
143 | set_irq_type(AU1500_GPIO203_INT, IRQF_TRIGGER_LOW); | 143 | irq_set_irq_type(AU1500_GPIO203_INT, IRQF_TRIGGER_LOW); |
144 | set_irq_type(AU1500_GPIO205_INT, IRQF_TRIGGER_LOW); | 144 | irq_set_irq_type(AU1500_GPIO205_INT, IRQF_TRIGGER_LOW); |
145 | 145 | ||
146 | return 0; | 146 | return 0; |
147 | } | 147 | } |
diff --git a/arch/mips/alchemy/devboards/pb1550/board_setup.c b/arch/mips/alchemy/devboards/pb1550/board_setup.c index 9d7d6edafa8d..b790213848bd 100644 --- a/arch/mips/alchemy/devboards/pb1550/board_setup.c +++ b/arch/mips/alchemy/devboards/pb1550/board_setup.c | |||
@@ -73,9 +73,9 @@ void __init board_setup(void) | |||
73 | 73 | ||
74 | static int __init pb1550_init_irq(void) | 74 | static int __init pb1550_init_irq(void) |
75 | { | 75 | { |
76 | set_irq_type(AU1550_GPIO0_INT, IRQF_TRIGGER_LOW); | 76 | irq_set_irq_type(AU1550_GPIO0_INT, IRQF_TRIGGER_LOW); |
77 | set_irq_type(AU1550_GPIO1_INT, IRQF_TRIGGER_LOW); | 77 | irq_set_irq_type(AU1550_GPIO1_INT, IRQF_TRIGGER_LOW); |
78 | set_irq_type(AU1550_GPIO201_205_INT, IRQF_TRIGGER_HIGH); | 78 | irq_set_irq_type(AU1550_GPIO201_205_INT, IRQF_TRIGGER_HIGH); |
79 | 79 | ||
80 | /* enable both PCMCIA card irqs in the shared line */ | 80 | /* enable both PCMCIA card irqs in the shared line */ |
81 | alchemy_gpio2_enable_int(201); | 81 | alchemy_gpio2_enable_int(201); |
diff --git a/arch/mips/alchemy/mtx-1/board_setup.c b/arch/mips/alchemy/mtx-1/board_setup.c index 40b84b991191..cf436ab679ae 100644 --- a/arch/mips/alchemy/mtx-1/board_setup.c +++ b/arch/mips/alchemy/mtx-1/board_setup.c | |||
@@ -123,11 +123,11 @@ mtx1_pci_idsel(unsigned int devsel, int assert) | |||
123 | 123 | ||
124 | static int __init mtx1_init_irq(void) | 124 | static int __init mtx1_init_irq(void) |
125 | { | 125 | { |
126 | set_irq_type(AU1500_GPIO204_INT, IRQF_TRIGGER_HIGH); | 126 | irq_set_irq_type(AU1500_GPIO204_INT, IRQF_TRIGGER_HIGH); |
127 | set_irq_type(AU1500_GPIO201_INT, IRQF_TRIGGER_LOW); | 127 | irq_set_irq_type(AU1500_GPIO201_INT, IRQF_TRIGGER_LOW); |
128 | set_irq_type(AU1500_GPIO202_INT, IRQF_TRIGGER_LOW); | 128 | irq_set_irq_type(AU1500_GPIO202_INT, IRQF_TRIGGER_LOW); |
129 | set_irq_type(AU1500_GPIO203_INT, IRQF_TRIGGER_LOW); | 129 | irq_set_irq_type(AU1500_GPIO203_INT, IRQF_TRIGGER_LOW); |
130 | set_irq_type(AU1500_GPIO205_INT, IRQF_TRIGGER_LOW); | 130 | irq_set_irq_type(AU1500_GPIO205_INT, IRQF_TRIGGER_LOW); |
131 | 131 | ||
132 | return 0; | 132 | return 0; |
133 | } | 133 | } |
diff --git a/arch/mips/alchemy/xxs1500/board_setup.c b/arch/mips/alchemy/xxs1500/board_setup.c index 80c521e5290d..febfb0fb0896 100644 --- a/arch/mips/alchemy/xxs1500/board_setup.c +++ b/arch/mips/alchemy/xxs1500/board_setup.c | |||
@@ -85,19 +85,19 @@ void __init board_setup(void) | |||
85 | 85 | ||
86 | static int __init xxs1500_init_irq(void) | 86 | static int __init xxs1500_init_irq(void) |
87 | { | 87 | { |
88 | set_irq_type(AU1500_GPIO204_INT, IRQF_TRIGGER_HIGH); | 88 | irq_set_irq_type(AU1500_GPIO204_INT, IRQF_TRIGGER_HIGH); |
89 | set_irq_type(AU1500_GPIO201_INT, IRQF_TRIGGER_LOW); | 89 | irq_set_irq_type(AU1500_GPIO201_INT, IRQF_TRIGGER_LOW); |
90 | set_irq_type(AU1500_GPIO202_INT, IRQF_TRIGGER_LOW); | 90 | irq_set_irq_type(AU1500_GPIO202_INT, IRQF_TRIGGER_LOW); |
91 | set_irq_type(AU1500_GPIO203_INT, IRQF_TRIGGER_LOW); | 91 | irq_set_irq_type(AU1500_GPIO203_INT, IRQF_TRIGGER_LOW); |
92 | set_irq_type(AU1500_GPIO205_INT, IRQF_TRIGGER_LOW); | 92 | irq_set_irq_type(AU1500_GPIO205_INT, IRQF_TRIGGER_LOW); |
93 | set_irq_type(AU1500_GPIO207_INT, IRQF_TRIGGER_LOW); | 93 | irq_set_irq_type(AU1500_GPIO207_INT, IRQF_TRIGGER_LOW); |
94 | 94 | ||
95 | set_irq_type(AU1500_GPIO0_INT, IRQF_TRIGGER_LOW); | 95 | irq_set_irq_type(AU1500_GPIO0_INT, IRQF_TRIGGER_LOW); |
96 | set_irq_type(AU1500_GPIO1_INT, IRQF_TRIGGER_LOW); | 96 | irq_set_irq_type(AU1500_GPIO1_INT, IRQF_TRIGGER_LOW); |
97 | set_irq_type(AU1500_GPIO2_INT, IRQF_TRIGGER_LOW); | 97 | irq_set_irq_type(AU1500_GPIO2_INT, IRQF_TRIGGER_LOW); |
98 | set_irq_type(AU1500_GPIO3_INT, IRQF_TRIGGER_LOW); | 98 | irq_set_irq_type(AU1500_GPIO3_INT, IRQF_TRIGGER_LOW); |
99 | set_irq_type(AU1500_GPIO4_INT, IRQF_TRIGGER_LOW); /* CF irq */ | 99 | irq_set_irq_type(AU1500_GPIO4_INT, IRQF_TRIGGER_LOW); /* CF irq */ |
100 | set_irq_type(AU1500_GPIO5_INT, IRQF_TRIGGER_LOW); | 100 | irq_set_irq_type(AU1500_GPIO5_INT, IRQF_TRIGGER_LOW); |
101 | 101 | ||
102 | return 0; | 102 | return 0; |
103 | } | 103 | } |
diff --git a/arch/mips/ar7/irq.c b/arch/mips/ar7/irq.c index a6484b60642f..03db3daadbd8 100644 --- a/arch/mips/ar7/irq.c +++ b/arch/mips/ar7/irq.c | |||
@@ -119,11 +119,11 @@ static void __init ar7_irq_init(int base) | |||
119 | for (i = 0; i < 40; i++) { | 119 | for (i = 0; i < 40; i++) { |
120 | writel(i, REG(CHNL_OFFSET(i))); | 120 | writel(i, REG(CHNL_OFFSET(i))); |
121 | /* Primary IRQ's */ | 121 | /* Primary IRQ's */ |
122 | set_irq_chip_and_handler(base + i, &ar7_irq_type, | 122 | irq_set_chip_and_handler(base + i, &ar7_irq_type, |
123 | handle_level_irq); | 123 | handle_level_irq); |
124 | /* Secondary IRQ's */ | 124 | /* Secondary IRQ's */ |
125 | if (i < 32) | 125 | if (i < 32) |
126 | set_irq_chip_and_handler(base + i + 40, | 126 | irq_set_chip_and_handler(base + i + 40, |
127 | &ar7_sec_irq_type, | 127 | &ar7_sec_irq_type, |
128 | handle_level_irq); | 128 | handle_level_irq); |
129 | } | 129 | } |
diff --git a/arch/mips/ath79/irq.c b/arch/mips/ath79/irq.c index 7c02bc948a31..ac610d5fe3ba 100644 --- a/arch/mips/ath79/irq.c +++ b/arch/mips/ath79/irq.c | |||
@@ -124,11 +124,11 @@ static void __init ath79_misc_irq_init(void) | |||
124 | 124 | ||
125 | for (i = ATH79_MISC_IRQ_BASE; | 125 | for (i = ATH79_MISC_IRQ_BASE; |
126 | i < ATH79_MISC_IRQ_BASE + ATH79_MISC_IRQ_COUNT; i++) { | 126 | i < ATH79_MISC_IRQ_BASE + ATH79_MISC_IRQ_COUNT; i++) { |
127 | set_irq_chip_and_handler(i, &ath79_misc_irq_chip, | 127 | irq_set_chip_and_handler(i, &ath79_misc_irq_chip, |
128 | handle_level_irq); | 128 | handle_level_irq); |
129 | } | 129 | } |
130 | 130 | ||
131 | set_irq_chained_handler(ATH79_CPU_IRQ_MISC, ath79_misc_irq_handler); | 131 | irq_set_chained_handler(ATH79_CPU_IRQ_MISC, ath79_misc_irq_handler); |
132 | } | 132 | } |
133 | 133 | ||
134 | asmlinkage void plat_irq_dispatch(void) | 134 | asmlinkage void plat_irq_dispatch(void) |
diff --git a/arch/mips/bcm63xx/irq.c b/arch/mips/bcm63xx/irq.c index 1691531aa34d..cea6021cb8d7 100644 --- a/arch/mips/bcm63xx/irq.c +++ b/arch/mips/bcm63xx/irq.c | |||
@@ -230,11 +230,11 @@ void __init arch_init_irq(void) | |||
230 | 230 | ||
231 | mips_cpu_irq_init(); | 231 | mips_cpu_irq_init(); |
232 | for (i = IRQ_INTERNAL_BASE; i < NR_IRQS; ++i) | 232 | for (i = IRQ_INTERNAL_BASE; i < NR_IRQS; ++i) |
233 | set_irq_chip_and_handler(i, &bcm63xx_internal_irq_chip, | 233 | irq_set_chip_and_handler(i, &bcm63xx_internal_irq_chip, |
234 | handle_level_irq); | 234 | handle_level_irq); |
235 | 235 | ||
236 | for (i = IRQ_EXT_BASE; i < IRQ_EXT_BASE + 4; ++i) | 236 | for (i = IRQ_EXT_BASE; i < IRQ_EXT_BASE + 4; ++i) |
237 | set_irq_chip_and_handler(i, &bcm63xx_external_irq_chip, | 237 | irq_set_chip_and_handler(i, &bcm63xx_external_irq_chip, |
238 | handle_edge_irq); | 238 | handle_edge_irq); |
239 | 239 | ||
240 | setup_irq(IRQ_MIPS_BASE + 2, &cpu_ip2_cascade_action); | 240 | setup_irq(IRQ_MIPS_BASE + 2, &cpu_ip2_cascade_action); |
diff --git a/arch/mips/cavium-octeon/octeon-irq.c b/arch/mips/cavium-octeon/octeon-irq.c index ce7500cdf5b7..ffd4ae660f79 100644 --- a/arch/mips/cavium-octeon/octeon-irq.c +++ b/arch/mips/cavium-octeon/octeon-irq.c | |||
@@ -3,10 +3,13 @@ | |||
3 | * License. See the file "COPYING" in the main directory of this archive | 3 | * License. See the file "COPYING" in the main directory of this archive |
4 | * for more details. | 4 | * for more details. |
5 | * | 5 | * |
6 | * Copyright (C) 2004-2008, 2009, 2010 Cavium Networks | 6 | * Copyright (C) 2004-2008, 2009, 2010, 2011 Cavium Networks |
7 | */ | 7 | */ |
8 | #include <linux/irq.h> | 8 | |
9 | #include <linux/interrupt.h> | 9 | #include <linux/interrupt.h> |
10 | #include <linux/bitops.h> | ||
11 | #include <linux/percpu.h> | ||
12 | #include <linux/irq.h> | ||
10 | #include <linux/smp.h> | 13 | #include <linux/smp.h> |
11 | 14 | ||
12 | #include <asm/octeon/octeon.h> | 15 | #include <asm/octeon/octeon.h> |
@@ -14,6 +17,47 @@ | |||
14 | static DEFINE_RAW_SPINLOCK(octeon_irq_ciu0_lock); | 17 | static DEFINE_RAW_SPINLOCK(octeon_irq_ciu0_lock); |
15 | static DEFINE_RAW_SPINLOCK(octeon_irq_ciu1_lock); | 18 | static DEFINE_RAW_SPINLOCK(octeon_irq_ciu1_lock); |
16 | 19 | ||
20 | static DEFINE_PER_CPU(unsigned long, octeon_irq_ciu0_en_mirror); | ||
21 | static DEFINE_PER_CPU(unsigned long, octeon_irq_ciu1_en_mirror); | ||
22 | |||
23 | static __read_mostly u8 octeon_irq_ciu_to_irq[8][64]; | ||
24 | |||
25 | union octeon_ciu_chip_data { | ||
26 | void *p; | ||
27 | unsigned long l; | ||
28 | struct { | ||
29 | unsigned int line:6; | ||
30 | unsigned int bit:6; | ||
31 | } s; | ||
32 | }; | ||
33 | |||
34 | struct octeon_core_chip_data { | ||
35 | struct mutex core_irq_mutex; | ||
36 | bool current_en; | ||
37 | bool desired_en; | ||
38 | u8 bit; | ||
39 | }; | ||
40 | |||
41 | #define MIPS_CORE_IRQ_LINES 8 | ||
42 | |||
43 | static struct octeon_core_chip_data octeon_irq_core_chip_data[MIPS_CORE_IRQ_LINES]; | ||
44 | |||
45 | static void __init octeon_irq_set_ciu_mapping(int irq, int line, int bit, | ||
46 | struct irq_chip *chip, | ||
47 | irq_flow_handler_t handler) | ||
48 | { | ||
49 | union octeon_ciu_chip_data cd; | ||
50 | |||
51 | irq_set_chip_and_handler(irq, chip, handler); | ||
52 | |||
53 | cd.l = 0; | ||
54 | cd.s.line = line; | ||
55 | cd.s.bit = bit; | ||
56 | |||
57 | irq_set_chip_data(irq, cd.p); | ||
58 | octeon_irq_ciu_to_irq[line][bit] = irq; | ||
59 | } | ||
60 | |||
17 | static int octeon_coreid_for_cpu(int cpu) | 61 | static int octeon_coreid_for_cpu(int cpu) |
18 | { | 62 | { |
19 | #ifdef CONFIG_SMP | 63 | #ifdef CONFIG_SMP |
@@ -23,9 +67,20 @@ static int octeon_coreid_for_cpu(int cpu) | |||
23 | #endif | 67 | #endif |
24 | } | 68 | } |
25 | 69 | ||
26 | static void octeon_irq_core_ack(unsigned int irq) | 70 | static int octeon_cpu_for_coreid(int coreid) |
71 | { | ||
72 | #ifdef CONFIG_SMP | ||
73 | return cpu_number_map(coreid); | ||
74 | #else | ||
75 | return smp_processor_id(); | ||
76 | #endif | ||
77 | } | ||
78 | |||
79 | static void octeon_irq_core_ack(struct irq_data *data) | ||
27 | { | 80 | { |
28 | unsigned int bit = irq - OCTEON_IRQ_SW0; | 81 | struct octeon_core_chip_data *cd = irq_data_get_irq_chip_data(data); |
82 | unsigned int bit = cd->bit; | ||
83 | |||
29 | /* | 84 | /* |
30 | * We don't need to disable IRQs to make these atomic since | 85 | * We don't need to disable IRQs to make these atomic since |
31 | * they are already disabled earlier in the low level | 86 | * they are already disabled earlier in the low level |
@@ -37,131 +92,121 @@ static void octeon_irq_core_ack(unsigned int irq) | |||
37 | clear_c0_cause(0x100 << bit); | 92 | clear_c0_cause(0x100 << bit); |
38 | } | 93 | } |
39 | 94 | ||
40 | static void octeon_irq_core_eoi(unsigned int irq) | 95 | static void octeon_irq_core_eoi(struct irq_data *data) |
41 | { | 96 | { |
42 | struct irq_desc *desc = irq_to_desc(irq); | 97 | struct octeon_core_chip_data *cd = irq_data_get_irq_chip_data(data); |
43 | unsigned int bit = irq - OCTEON_IRQ_SW0; | 98 | |
44 | /* | ||
45 | * If an IRQ is being processed while we are disabling it the | ||
46 | * handler will attempt to unmask the interrupt after it has | ||
47 | * been disabled. | ||
48 | */ | ||
49 | if ((unlikely(desc->status & IRQ_DISABLED))) | ||
50 | return; | ||
51 | /* | 99 | /* |
52 | * We don't need to disable IRQs to make these atomic since | 100 | * We don't need to disable IRQs to make these atomic since |
53 | * they are already disabled earlier in the low level | 101 | * they are already disabled earlier in the low level |
54 | * interrupt code. | 102 | * interrupt code. |
55 | */ | 103 | */ |
56 | set_c0_status(0x100 << bit); | 104 | set_c0_status(0x100 << cd->bit); |
57 | } | 105 | } |
58 | 106 | ||
59 | static void octeon_irq_core_enable(unsigned int irq) | 107 | static void octeon_irq_core_set_enable_local(void *arg) |
60 | { | 108 | { |
61 | unsigned long flags; | 109 | struct irq_data *data = arg; |
62 | unsigned int bit = irq - OCTEON_IRQ_SW0; | 110 | struct octeon_core_chip_data *cd = irq_data_get_irq_chip_data(data); |
111 | unsigned int mask = 0x100 << cd->bit; | ||
63 | 112 | ||
64 | /* | 113 | /* |
65 | * We need to disable interrupts to make sure our updates are | 114 | * Interrupts are already disabled, so these are atomic. |
66 | * atomic. | ||
67 | */ | 115 | */ |
68 | local_irq_save(flags); | 116 | if (cd->desired_en) |
69 | set_c0_status(0x100 << bit); | 117 | set_c0_status(mask); |
70 | local_irq_restore(flags); | 118 | else |
119 | clear_c0_status(mask); | ||
120 | |||
71 | } | 121 | } |
72 | 122 | ||
73 | static void octeon_irq_core_disable_local(unsigned int irq) | 123 | static void octeon_irq_core_disable(struct irq_data *data) |
74 | { | 124 | { |
75 | unsigned long flags; | 125 | struct octeon_core_chip_data *cd = irq_data_get_irq_chip_data(data); |
76 | unsigned int bit = irq - OCTEON_IRQ_SW0; | 126 | cd->desired_en = false; |
77 | /* | ||
78 | * We need to disable interrupts to make sure our updates are | ||
79 | * atomic. | ||
80 | */ | ||
81 | local_irq_save(flags); | ||
82 | clear_c0_status(0x100 << bit); | ||
83 | local_irq_restore(flags); | ||
84 | } | 127 | } |
85 | 128 | ||
86 | static void octeon_irq_core_disable(unsigned int irq) | 129 | static void octeon_irq_core_enable(struct irq_data *data) |
87 | { | 130 | { |
88 | #ifdef CONFIG_SMP | 131 | struct octeon_core_chip_data *cd = irq_data_get_irq_chip_data(data); |
89 | on_each_cpu((void (*)(void *)) octeon_irq_core_disable_local, | 132 | cd->desired_en = true; |
90 | (void *) (long) irq, 1); | ||
91 | #else | ||
92 | octeon_irq_core_disable_local(irq); | ||
93 | #endif | ||
94 | } | 133 | } |
95 | 134 | ||
96 | static struct irq_chip octeon_irq_chip_core = { | 135 | static void octeon_irq_core_bus_lock(struct irq_data *data) |
97 | .name = "Core", | 136 | { |
98 | .enable = octeon_irq_core_enable, | 137 | struct octeon_core_chip_data *cd = irq_data_get_irq_chip_data(data); |
99 | .disable = octeon_irq_core_disable, | ||
100 | .ack = octeon_irq_core_ack, | ||
101 | .eoi = octeon_irq_core_eoi, | ||
102 | }; | ||
103 | 138 | ||
139 | mutex_lock(&cd->core_irq_mutex); | ||
140 | } | ||
104 | 141 | ||
105 | static void octeon_irq_ciu0_ack(unsigned int irq) | 142 | static void octeon_irq_core_bus_sync_unlock(struct irq_data *data) |
106 | { | 143 | { |
107 | switch (irq) { | 144 | struct octeon_core_chip_data *cd = irq_data_get_irq_chip_data(data); |
108 | case OCTEON_IRQ_GMX_DRP0: | 145 | |
109 | case OCTEON_IRQ_GMX_DRP1: | 146 | if (cd->desired_en != cd->current_en) { |
110 | case OCTEON_IRQ_IPD_DRP: | 147 | on_each_cpu(octeon_irq_core_set_enable_local, data, 1); |
111 | case OCTEON_IRQ_KEY_ZERO: | 148 | |
112 | case OCTEON_IRQ_TIMER0: | 149 | cd->current_en = cd->desired_en; |
113 | case OCTEON_IRQ_TIMER1: | ||
114 | case OCTEON_IRQ_TIMER2: | ||
115 | case OCTEON_IRQ_TIMER3: | ||
116 | { | ||
117 | int index = cvmx_get_core_num() * 2; | ||
118 | u64 mask = 1ull << (irq - OCTEON_IRQ_WORKQ0); | ||
119 | /* | ||
120 | * CIU timer type interrupts must be acknoleged by | ||
121 | * writing a '1' bit to their sum0 bit. | ||
122 | */ | ||
123 | cvmx_write_csr(CVMX_CIU_INTX_SUM0(index), mask); | ||
124 | break; | ||
125 | } | ||
126 | default: | ||
127 | break; | ||
128 | } | 150 | } |
129 | 151 | ||
130 | /* | 152 | mutex_unlock(&cd->core_irq_mutex); |
131 | * In order to avoid any locking accessing the CIU, we | ||
132 | * acknowledge CIU interrupts by disabling all of them. This | ||
133 | * way we can use a per core register and avoid any out of | ||
134 | * core locking requirements. This has the side affect that | ||
135 | * CIU interrupts can't be processed recursively. | ||
136 | * | ||
137 | * We don't need to disable IRQs to make these atomic since | ||
138 | * they are already disabled earlier in the low level | ||
139 | * interrupt code. | ||
140 | */ | ||
141 | clear_c0_status(0x100 << 2); | ||
142 | } | 153 | } |
143 | 154 | ||
144 | static void octeon_irq_ciu0_eoi(unsigned int irq) | 155 | static struct irq_chip octeon_irq_chip_core = { |
156 | .name = "Core", | ||
157 | .irq_enable = octeon_irq_core_enable, | ||
158 | .irq_disable = octeon_irq_core_disable, | ||
159 | .irq_ack = octeon_irq_core_ack, | ||
160 | .irq_eoi = octeon_irq_core_eoi, | ||
161 | .irq_bus_lock = octeon_irq_core_bus_lock, | ||
162 | .irq_bus_sync_unlock = octeon_irq_core_bus_sync_unlock, | ||
163 | |||
164 | .irq_cpu_online = octeon_irq_core_eoi, | ||
165 | .irq_cpu_offline = octeon_irq_core_ack, | ||
166 | .flags = IRQCHIP_ONOFFLINE_ENABLED, | ||
167 | }; | ||
168 | |||
169 | static void __init octeon_irq_init_core(void) | ||
145 | { | 170 | { |
146 | /* | 171 | int i; |
147 | * Enable all CIU interrupts again. We don't need to disable | 172 | int irq; |
148 | * IRQs to make these atomic since they are already disabled | 173 | struct octeon_core_chip_data *cd; |
149 | * earlier in the low level interrupt code. | 174 | |
150 | */ | 175 | for (i = 0; i < MIPS_CORE_IRQ_LINES; i++) { |
151 | set_c0_status(0x100 << 2); | 176 | cd = &octeon_irq_core_chip_data[i]; |
177 | cd->current_en = false; | ||
178 | cd->desired_en = false; | ||
179 | cd->bit = i; | ||
180 | mutex_init(&cd->core_irq_mutex); | ||
181 | |||
182 | irq = OCTEON_IRQ_SW0 + i; | ||
183 | switch (irq) { | ||
184 | case OCTEON_IRQ_TIMER: | ||
185 | case OCTEON_IRQ_SW0: | ||
186 | case OCTEON_IRQ_SW1: | ||
187 | case OCTEON_IRQ_5: | ||
188 | case OCTEON_IRQ_PERF: | ||
189 | irq_set_chip_data(irq, cd); | ||
190 | irq_set_chip_and_handler(irq, &octeon_irq_chip_core, | ||
191 | handle_percpu_irq); | ||
192 | break; | ||
193 | default: | ||
194 | break; | ||
195 | } | ||
196 | } | ||
152 | } | 197 | } |
153 | 198 | ||
154 | static int next_coreid_for_irq(struct irq_desc *desc) | 199 | static int next_cpu_for_irq(struct irq_data *data) |
155 | { | 200 | { |
156 | 201 | ||
157 | #ifdef CONFIG_SMP | 202 | #ifdef CONFIG_SMP |
158 | int coreid; | 203 | int cpu; |
159 | int weight = cpumask_weight(desc->affinity); | 204 | int weight = cpumask_weight(data->affinity); |
160 | 205 | ||
161 | if (weight > 1) { | 206 | if (weight > 1) { |
162 | int cpu = smp_processor_id(); | 207 | cpu = smp_processor_id(); |
163 | for (;;) { | 208 | for (;;) { |
164 | cpu = cpumask_next(cpu, desc->affinity); | 209 | cpu = cpumask_next(cpu, data->affinity); |
165 | if (cpu >= nr_cpu_ids) { | 210 | if (cpu >= nr_cpu_ids) { |
166 | cpu = -1; | 211 | cpu = -1; |
167 | continue; | 212 | continue; |
@@ -169,83 +214,175 @@ static int next_coreid_for_irq(struct irq_desc *desc) | |||
169 | break; | 214 | break; |
170 | } | 215 | } |
171 | } | 216 | } |
172 | coreid = octeon_coreid_for_cpu(cpu); | ||
173 | } else if (weight == 1) { | 217 | } else if (weight == 1) { |
174 | coreid = octeon_coreid_for_cpu(cpumask_first(desc->affinity)); | 218 | cpu = cpumask_first(data->affinity); |
175 | } else { | 219 | } else { |
176 | coreid = cvmx_get_core_num(); | 220 | cpu = smp_processor_id(); |
177 | } | 221 | } |
178 | return coreid; | 222 | return cpu; |
179 | #else | 223 | #else |
180 | return cvmx_get_core_num(); | 224 | return smp_processor_id(); |
181 | #endif | 225 | #endif |
182 | } | 226 | } |
183 | 227 | ||
184 | static void octeon_irq_ciu0_enable(unsigned int irq) | 228 | static void octeon_irq_ciu_enable(struct irq_data *data) |
185 | { | 229 | { |
186 | struct irq_desc *desc = irq_to_desc(irq); | 230 | int cpu = next_cpu_for_irq(data); |
187 | int coreid = next_coreid_for_irq(desc); | 231 | int coreid = octeon_coreid_for_cpu(cpu); |
232 | unsigned long *pen; | ||
188 | unsigned long flags; | 233 | unsigned long flags; |
189 | uint64_t en0; | 234 | union octeon_ciu_chip_data cd; |
190 | int bit = irq - OCTEON_IRQ_WORKQ0; /* Bit 0-63 of EN0 */ | 235 | |
236 | cd.p = irq_data_get_irq_chip_data(data); | ||
191 | 237 | ||
192 | raw_spin_lock_irqsave(&octeon_irq_ciu0_lock, flags); | 238 | if (cd.s.line == 0) { |
193 | en0 = cvmx_read_csr(CVMX_CIU_INTX_EN0(coreid * 2)); | 239 | raw_spin_lock_irqsave(&octeon_irq_ciu0_lock, flags); |
194 | en0 |= 1ull << bit; | 240 | pen = &per_cpu(octeon_irq_ciu0_en_mirror, cpu); |
195 | cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), en0); | 241 | set_bit(cd.s.bit, pen); |
196 | cvmx_read_csr(CVMX_CIU_INTX_EN0(coreid * 2)); | 242 | cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), *pen); |
197 | raw_spin_unlock_irqrestore(&octeon_irq_ciu0_lock, flags); | 243 | raw_spin_unlock_irqrestore(&octeon_irq_ciu0_lock, flags); |
244 | } else { | ||
245 | raw_spin_lock_irqsave(&octeon_irq_ciu1_lock, flags); | ||
246 | pen = &per_cpu(octeon_irq_ciu1_en_mirror, cpu); | ||
247 | set_bit(cd.s.bit, pen); | ||
248 | cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), *pen); | ||
249 | raw_spin_unlock_irqrestore(&octeon_irq_ciu1_lock, flags); | ||
250 | } | ||
198 | } | 251 | } |
199 | 252 | ||
200 | static void octeon_irq_ciu0_enable_mbox(unsigned int irq) | 253 | static void octeon_irq_ciu_enable_local(struct irq_data *data) |
201 | { | 254 | { |
202 | int coreid = cvmx_get_core_num(); | 255 | unsigned long *pen; |
256 | unsigned long flags; | ||
257 | union octeon_ciu_chip_data cd; | ||
258 | |||
259 | cd.p = irq_data_get_irq_chip_data(data); | ||
260 | |||
261 | if (cd.s.line == 0) { | ||
262 | raw_spin_lock_irqsave(&octeon_irq_ciu0_lock, flags); | ||
263 | pen = &__get_cpu_var(octeon_irq_ciu0_en_mirror); | ||
264 | set_bit(cd.s.bit, pen); | ||
265 | cvmx_write_csr(CVMX_CIU_INTX_EN0(cvmx_get_core_num() * 2), *pen); | ||
266 | raw_spin_unlock_irqrestore(&octeon_irq_ciu0_lock, flags); | ||
267 | } else { | ||
268 | raw_spin_lock_irqsave(&octeon_irq_ciu1_lock, flags); | ||
269 | pen = &__get_cpu_var(octeon_irq_ciu1_en_mirror); | ||
270 | set_bit(cd.s.bit, pen); | ||
271 | cvmx_write_csr(CVMX_CIU_INTX_EN1(cvmx_get_core_num() * 2 + 1), *pen); | ||
272 | raw_spin_unlock_irqrestore(&octeon_irq_ciu1_lock, flags); | ||
273 | } | ||
274 | } | ||
275 | |||
276 | static void octeon_irq_ciu_disable_local(struct irq_data *data) | ||
277 | { | ||
278 | unsigned long *pen; | ||
203 | unsigned long flags; | 279 | unsigned long flags; |
204 | uint64_t en0; | 280 | union octeon_ciu_chip_data cd; |
205 | int bit = irq - OCTEON_IRQ_WORKQ0; /* Bit 0-63 of EN0 */ | 281 | |
282 | cd.p = irq_data_get_irq_chip_data(data); | ||
206 | 283 | ||
207 | raw_spin_lock_irqsave(&octeon_irq_ciu0_lock, flags); | 284 | if (cd.s.line == 0) { |
208 | en0 = cvmx_read_csr(CVMX_CIU_INTX_EN0(coreid * 2)); | 285 | raw_spin_lock_irqsave(&octeon_irq_ciu0_lock, flags); |
209 | en0 |= 1ull << bit; | 286 | pen = &__get_cpu_var(octeon_irq_ciu0_en_mirror); |
210 | cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), en0); | 287 | clear_bit(cd.s.bit, pen); |
211 | cvmx_read_csr(CVMX_CIU_INTX_EN0(coreid * 2)); | 288 | cvmx_write_csr(CVMX_CIU_INTX_EN0(cvmx_get_core_num() * 2), *pen); |
212 | raw_spin_unlock_irqrestore(&octeon_irq_ciu0_lock, flags); | 289 | raw_spin_unlock_irqrestore(&octeon_irq_ciu0_lock, flags); |
290 | } else { | ||
291 | raw_spin_lock_irqsave(&octeon_irq_ciu1_lock, flags); | ||
292 | pen = &__get_cpu_var(octeon_irq_ciu1_en_mirror); | ||
293 | clear_bit(cd.s.bit, pen); | ||
294 | cvmx_write_csr(CVMX_CIU_INTX_EN1(cvmx_get_core_num() * 2 + 1), *pen); | ||
295 | raw_spin_unlock_irqrestore(&octeon_irq_ciu1_lock, flags); | ||
296 | } | ||
213 | } | 297 | } |
214 | 298 | ||
215 | static void octeon_irq_ciu0_disable(unsigned int irq) | 299 | static void octeon_irq_ciu_disable_all(struct irq_data *data) |
216 | { | 300 | { |
217 | int bit = irq - OCTEON_IRQ_WORKQ0; /* Bit 0-63 of EN0 */ | ||
218 | unsigned long flags; | 301 | unsigned long flags; |
219 | uint64_t en0; | 302 | unsigned long *pen; |
220 | int cpu; | 303 | int cpu; |
221 | raw_spin_lock_irqsave(&octeon_irq_ciu0_lock, flags); | 304 | union octeon_ciu_chip_data cd; |
222 | for_each_online_cpu(cpu) { | 305 | |
223 | int coreid = octeon_coreid_for_cpu(cpu); | 306 | wmb(); /* Make sure flag changes arrive before register updates. */ |
224 | en0 = cvmx_read_csr(CVMX_CIU_INTX_EN0(coreid * 2)); | 307 | |
225 | en0 &= ~(1ull << bit); | 308 | cd.p = irq_data_get_irq_chip_data(data); |
226 | cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), en0); | 309 | |
310 | if (cd.s.line == 0) { | ||
311 | raw_spin_lock_irqsave(&octeon_irq_ciu0_lock, flags); | ||
312 | for_each_online_cpu(cpu) { | ||
313 | int coreid = octeon_coreid_for_cpu(cpu); | ||
314 | pen = &per_cpu(octeon_irq_ciu0_en_mirror, cpu); | ||
315 | clear_bit(cd.s.bit, pen); | ||
316 | cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), *pen); | ||
317 | } | ||
318 | raw_spin_unlock_irqrestore(&octeon_irq_ciu0_lock, flags); | ||
319 | } else { | ||
320 | raw_spin_lock_irqsave(&octeon_irq_ciu1_lock, flags); | ||
321 | for_each_online_cpu(cpu) { | ||
322 | int coreid = octeon_coreid_for_cpu(cpu); | ||
323 | pen = &per_cpu(octeon_irq_ciu1_en_mirror, cpu); | ||
324 | clear_bit(cd.s.bit, pen); | ||
325 | cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), *pen); | ||
326 | } | ||
327 | raw_spin_unlock_irqrestore(&octeon_irq_ciu1_lock, flags); | ||
328 | } | ||
329 | } | ||
330 | |||
331 | static void octeon_irq_ciu_enable_all(struct irq_data *data) | ||
332 | { | ||
333 | unsigned long flags; | ||
334 | unsigned long *pen; | ||
335 | int cpu; | ||
336 | union octeon_ciu_chip_data cd; | ||
337 | |||
338 | cd.p = irq_data_get_irq_chip_data(data); | ||
339 | |||
340 | if (cd.s.line == 0) { | ||
341 | raw_spin_lock_irqsave(&octeon_irq_ciu0_lock, flags); | ||
342 | for_each_online_cpu(cpu) { | ||
343 | int coreid = octeon_coreid_for_cpu(cpu); | ||
344 | pen = &per_cpu(octeon_irq_ciu0_en_mirror, cpu); | ||
345 | set_bit(cd.s.bit, pen); | ||
346 | cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), *pen); | ||
347 | } | ||
348 | raw_spin_unlock_irqrestore(&octeon_irq_ciu0_lock, flags); | ||
349 | } else { | ||
350 | raw_spin_lock_irqsave(&octeon_irq_ciu1_lock, flags); | ||
351 | for_each_online_cpu(cpu) { | ||
352 | int coreid = octeon_coreid_for_cpu(cpu); | ||
353 | pen = &per_cpu(octeon_irq_ciu1_en_mirror, cpu); | ||
354 | set_bit(cd.s.bit, pen); | ||
355 | cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), *pen); | ||
356 | } | ||
357 | raw_spin_unlock_irqrestore(&octeon_irq_ciu1_lock, flags); | ||
227 | } | 358 | } |
228 | /* | ||
229 | * We need to do a read after the last update to make sure all | ||
230 | * of them are done. | ||
231 | */ | ||
232 | cvmx_read_csr(CVMX_CIU_INTX_EN0(cvmx_get_core_num() * 2)); | ||
233 | raw_spin_unlock_irqrestore(&octeon_irq_ciu0_lock, flags); | ||
234 | } | 359 | } |
235 | 360 | ||
236 | /* | 361 | /* |
237 | * Enable the irq on the next core in the affinity set for chips that | 362 | * Enable the irq on the next core in the affinity set for chips that |
238 | * have the EN*_W1{S,C} registers. | 363 | * have the EN*_W1{S,C} registers. |
239 | */ | 364 | */ |
240 | static void octeon_irq_ciu0_enable_v2(unsigned int irq) | 365 | static void octeon_irq_ciu_enable_v2(struct irq_data *data) |
241 | { | 366 | { |
242 | int index; | 367 | u64 mask; |
243 | u64 mask = 1ull << (irq - OCTEON_IRQ_WORKQ0); | 368 | int cpu = next_cpu_for_irq(data); |
244 | struct irq_desc *desc = irq_to_desc(irq); | 369 | union octeon_ciu_chip_data cd; |
370 | |||
371 | cd.p = irq_data_get_irq_chip_data(data); | ||
372 | mask = 1ull << (cd.s.bit); | ||
245 | 373 | ||
246 | if ((desc->status & IRQ_DISABLED) == 0) { | 374 | /* |
247 | index = next_coreid_for_irq(desc) * 2; | 375 | * Called under the desc lock, so these should never get out |
376 | * of sync. | ||
377 | */ | ||
378 | if (cd.s.line == 0) { | ||
379 | int index = octeon_coreid_for_cpu(cpu) * 2; | ||
380 | set_bit(cd.s.bit, &per_cpu(octeon_irq_ciu0_en_mirror, cpu)); | ||
248 | cvmx_write_csr(CVMX_CIU_INTX_EN0_W1S(index), mask); | 381 | cvmx_write_csr(CVMX_CIU_INTX_EN0_W1S(index), mask); |
382 | } else { | ||
383 | int index = octeon_coreid_for_cpu(cpu) * 2 + 1; | ||
384 | set_bit(cd.s.bit, &per_cpu(octeon_irq_ciu1_en_mirror, cpu)); | ||
385 | cvmx_write_csr(CVMX_CIU_INTX_EN1_W1S(index), mask); | ||
249 | } | 386 | } |
250 | } | 387 | } |
251 | 388 | ||
@@ -253,83 +390,155 @@ static void octeon_irq_ciu0_enable_v2(unsigned int irq) | |||
253 | * Enable the irq on the current CPU for chips that | 390 | * Enable the irq on the current CPU for chips that |
254 | * have the EN*_W1{S,C} registers. | 391 | * have the EN*_W1{S,C} registers. |
255 | */ | 392 | */ |
256 | static void octeon_irq_ciu0_enable_mbox_v2(unsigned int irq) | 393 | static void octeon_irq_ciu_enable_local_v2(struct irq_data *data) |
394 | { | ||
395 | u64 mask; | ||
396 | union octeon_ciu_chip_data cd; | ||
397 | |||
398 | cd.p = irq_data_get_irq_chip_data(data); | ||
399 | mask = 1ull << (cd.s.bit); | ||
400 | |||
401 | if (cd.s.line == 0) { | ||
402 | int index = cvmx_get_core_num() * 2; | ||
403 | set_bit(cd.s.bit, &__get_cpu_var(octeon_irq_ciu0_en_mirror)); | ||
404 | cvmx_write_csr(CVMX_CIU_INTX_EN0_W1S(index), mask); | ||
405 | } else { | ||
406 | int index = cvmx_get_core_num() * 2 + 1; | ||
407 | set_bit(cd.s.bit, &__get_cpu_var(octeon_irq_ciu1_en_mirror)); | ||
408 | cvmx_write_csr(CVMX_CIU_INTX_EN1_W1S(index), mask); | ||
409 | } | ||
410 | } | ||
411 | |||
412 | static void octeon_irq_ciu_disable_local_v2(struct irq_data *data) | ||
257 | { | 413 | { |
258 | int index; | 414 | u64 mask; |
259 | u64 mask = 1ull << (irq - OCTEON_IRQ_WORKQ0); | 415 | union octeon_ciu_chip_data cd; |
260 | 416 | ||
261 | index = cvmx_get_core_num() * 2; | 417 | cd.p = irq_data_get_irq_chip_data(data); |
262 | cvmx_write_csr(CVMX_CIU_INTX_EN0_W1S(index), mask); | 418 | mask = 1ull << (cd.s.bit); |
419 | |||
420 | if (cd.s.line == 0) { | ||
421 | int index = cvmx_get_core_num() * 2; | ||
422 | clear_bit(cd.s.bit, &__get_cpu_var(octeon_irq_ciu0_en_mirror)); | ||
423 | cvmx_write_csr(CVMX_CIU_INTX_EN0_W1C(index), mask); | ||
424 | } else { | ||
425 | int index = cvmx_get_core_num() * 2 + 1; | ||
426 | clear_bit(cd.s.bit, &__get_cpu_var(octeon_irq_ciu1_en_mirror)); | ||
427 | cvmx_write_csr(CVMX_CIU_INTX_EN1_W1C(index), mask); | ||
428 | } | ||
263 | } | 429 | } |
264 | 430 | ||
265 | /* | 431 | /* |
266 | * Disable the irq on the current core for chips that have the EN*_W1{S,C} | 432 | * Write to the W1C bit in CVMX_CIU_INTX_SUM0 to clear the irq. |
267 | * registers. | ||
268 | */ | 433 | */ |
269 | static void octeon_irq_ciu0_ack_v2(unsigned int irq) | 434 | static void octeon_irq_ciu_ack(struct irq_data *data) |
270 | { | 435 | { |
271 | int index = cvmx_get_core_num() * 2; | 436 | u64 mask; |
272 | u64 mask = 1ull << (irq - OCTEON_IRQ_WORKQ0); | 437 | union octeon_ciu_chip_data cd; |
273 | 438 | ||
274 | switch (irq) { | 439 | cd.p = data->chip_data; |
275 | case OCTEON_IRQ_GMX_DRP0: | 440 | mask = 1ull << (cd.s.bit); |
276 | case OCTEON_IRQ_GMX_DRP1: | 441 | |
277 | case OCTEON_IRQ_IPD_DRP: | 442 | if (cd.s.line == 0) { |
278 | case OCTEON_IRQ_KEY_ZERO: | 443 | int index = cvmx_get_core_num() * 2; |
279 | case OCTEON_IRQ_TIMER0: | ||
280 | case OCTEON_IRQ_TIMER1: | ||
281 | case OCTEON_IRQ_TIMER2: | ||
282 | case OCTEON_IRQ_TIMER3: | ||
283 | /* | ||
284 | * CIU timer type interrupts must be acknoleged by | ||
285 | * writing a '1' bit to their sum0 bit. | ||
286 | */ | ||
287 | cvmx_write_csr(CVMX_CIU_INTX_SUM0(index), mask); | 444 | cvmx_write_csr(CVMX_CIU_INTX_SUM0(index), mask); |
288 | break; | 445 | } else { |
289 | default: | 446 | cvmx_write_csr(CVMX_CIU_INT_SUM1, mask); |
290 | break; | ||
291 | } | 447 | } |
292 | |||
293 | cvmx_write_csr(CVMX_CIU_INTX_EN0_W1C(index), mask); | ||
294 | } | 448 | } |
295 | 449 | ||
296 | /* | 450 | /* |
297 | * Enable the irq on the current core for chips that have the EN*_W1{S,C} | 451 | * Disable the irq on the all cores for chips that have the EN*_W1{S,C} |
298 | * registers. | 452 | * registers. |
299 | */ | 453 | */ |
300 | static void octeon_irq_ciu0_eoi_mbox_v2(unsigned int irq) | 454 | static void octeon_irq_ciu_disable_all_v2(struct irq_data *data) |
301 | { | 455 | { |
302 | struct irq_desc *desc = irq_to_desc(irq); | 456 | int cpu; |
303 | int index = cvmx_get_core_num() * 2; | 457 | u64 mask; |
304 | u64 mask = 1ull << (irq - OCTEON_IRQ_WORKQ0); | 458 | union octeon_ciu_chip_data cd; |
305 | 459 | ||
306 | if (likely((desc->status & IRQ_DISABLED) == 0)) | 460 | wmb(); /* Make sure flag changes arrive before register updates. */ |
307 | cvmx_write_csr(CVMX_CIU_INTX_EN0_W1S(index), mask); | 461 | |
462 | cd.p = data->chip_data; | ||
463 | mask = 1ull << (cd.s.bit); | ||
464 | |||
465 | if (cd.s.line == 0) { | ||
466 | for_each_online_cpu(cpu) { | ||
467 | int index = octeon_coreid_for_cpu(cpu) * 2; | ||
468 | clear_bit(cd.s.bit, &per_cpu(octeon_irq_ciu0_en_mirror, cpu)); | ||
469 | cvmx_write_csr(CVMX_CIU_INTX_EN0_W1C(index), mask); | ||
470 | } | ||
471 | } else { | ||
472 | for_each_online_cpu(cpu) { | ||
473 | int index = octeon_coreid_for_cpu(cpu) * 2 + 1; | ||
474 | clear_bit(cd.s.bit, &per_cpu(octeon_irq_ciu1_en_mirror, cpu)); | ||
475 | cvmx_write_csr(CVMX_CIU_INTX_EN1_W1C(index), mask); | ||
476 | } | ||
477 | } | ||
308 | } | 478 | } |
309 | 479 | ||
310 | /* | 480 | /* |
311 | * Disable the irq on the all cores for chips that have the EN*_W1{S,C} | 481 | * Enable the irq on the all cores for chips that have the EN*_W1{S,C} |
312 | * registers. | 482 | * registers. |
313 | */ | 483 | */ |
314 | static void octeon_irq_ciu0_disable_all_v2(unsigned int irq) | 484 | static void octeon_irq_ciu_enable_all_v2(struct irq_data *data) |
315 | { | 485 | { |
316 | u64 mask = 1ull << (irq - OCTEON_IRQ_WORKQ0); | ||
317 | int index; | ||
318 | int cpu; | 486 | int cpu; |
319 | for_each_online_cpu(cpu) { | 487 | u64 mask; |
320 | index = octeon_coreid_for_cpu(cpu) * 2; | 488 | union octeon_ciu_chip_data cd; |
321 | cvmx_write_csr(CVMX_CIU_INTX_EN0_W1C(index), mask); | 489 | |
490 | cd.p = data->chip_data; | ||
491 | mask = 1ull << (cd.s.bit); | ||
492 | |||
493 | if (cd.s.line == 0) { | ||
494 | for_each_online_cpu(cpu) { | ||
495 | int index = octeon_coreid_for_cpu(cpu) * 2; | ||
496 | set_bit(cd.s.bit, &per_cpu(octeon_irq_ciu0_en_mirror, cpu)); | ||
497 | cvmx_write_csr(CVMX_CIU_INTX_EN0_W1S(index), mask); | ||
498 | } | ||
499 | } else { | ||
500 | for_each_online_cpu(cpu) { | ||
501 | int index = octeon_coreid_for_cpu(cpu) * 2 + 1; | ||
502 | set_bit(cd.s.bit, &per_cpu(octeon_irq_ciu1_en_mirror, cpu)); | ||
503 | cvmx_write_csr(CVMX_CIU_INTX_EN1_W1S(index), mask); | ||
504 | } | ||
322 | } | 505 | } |
323 | } | 506 | } |
324 | 507 | ||
325 | #ifdef CONFIG_SMP | 508 | #ifdef CONFIG_SMP |
326 | static int octeon_irq_ciu0_set_affinity(unsigned int irq, const struct cpumask *dest) | 509 | |
510 | static void octeon_irq_cpu_offline_ciu(struct irq_data *data) | ||
511 | { | ||
512 | int cpu = smp_processor_id(); | ||
513 | cpumask_t new_affinity; | ||
514 | |||
515 | if (!cpumask_test_cpu(cpu, data->affinity)) | ||
516 | return; | ||
517 | |||
518 | if (cpumask_weight(data->affinity) > 1) { | ||
519 | /* | ||
520 | * It has multi CPU affinity, just remove this CPU | ||
521 | * from the affinity set. | ||
522 | */ | ||
523 | cpumask_copy(&new_affinity, data->affinity); | ||
524 | cpumask_clear_cpu(cpu, &new_affinity); | ||
525 | } else { | ||
526 | /* Otherwise, put it on lowest numbered online CPU. */ | ||
527 | cpumask_clear(&new_affinity); | ||
528 | cpumask_set_cpu(cpumask_first(cpu_online_mask), &new_affinity); | ||
529 | } | ||
530 | __irq_set_affinity_locked(data, &new_affinity); | ||
531 | } | ||
532 | |||
533 | static int octeon_irq_ciu_set_affinity(struct irq_data *data, | ||
534 | const struct cpumask *dest, bool force) | ||
327 | { | 535 | { |
328 | int cpu; | 536 | int cpu; |
329 | struct irq_desc *desc = irq_to_desc(irq); | 537 | bool enable_one = !irqd_irq_disabled(data) && !irqd_irq_masked(data); |
330 | int enable_one = (desc->status & IRQ_DISABLED) == 0; | ||
331 | unsigned long flags; | 538 | unsigned long flags; |
332 | int bit = irq - OCTEON_IRQ_WORKQ0; /* Bit 0-63 of EN0 */ | 539 | union octeon_ciu_chip_data cd; |
540 | |||
541 | cd.p = data->chip_data; | ||
333 | 542 | ||
334 | /* | 543 | /* |
335 | * For non-v2 CIU, we will allow only single CPU affinity. | 544 | * For non-v2 CIU, we will allow only single CPU affinity. |
@@ -339,26 +548,40 @@ static int octeon_irq_ciu0_set_affinity(unsigned int irq, const struct cpumask * | |||
339 | if (cpumask_weight(dest) != 1) | 548 | if (cpumask_weight(dest) != 1) |
340 | return -EINVAL; | 549 | return -EINVAL; |
341 | 550 | ||
342 | raw_spin_lock_irqsave(&octeon_irq_ciu0_lock, flags); | 551 | if (!enable_one) |
343 | for_each_online_cpu(cpu) { | 552 | return 0; |
344 | int coreid = octeon_coreid_for_cpu(cpu); | 553 | |
345 | uint64_t en0 = | 554 | if (cd.s.line == 0) { |
346 | cvmx_read_csr(CVMX_CIU_INTX_EN0(coreid * 2)); | 555 | raw_spin_lock_irqsave(&octeon_irq_ciu0_lock, flags); |
347 | if (cpumask_test_cpu(cpu, dest) && enable_one) { | 556 | for_each_online_cpu(cpu) { |
348 | enable_one = 0; | 557 | int coreid = octeon_coreid_for_cpu(cpu); |
349 | en0 |= 1ull << bit; | 558 | unsigned long *pen = &per_cpu(octeon_irq_ciu0_en_mirror, cpu); |
350 | } else { | 559 | |
351 | en0 &= ~(1ull << bit); | 560 | if (cpumask_test_cpu(cpu, dest) && enable_one) { |
561 | enable_one = false; | ||
562 | set_bit(cd.s.bit, pen); | ||
563 | } else { | ||
564 | clear_bit(cd.s.bit, pen); | ||
565 | } | ||
566 | cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), *pen); | ||
352 | } | 567 | } |
353 | cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), en0); | 568 | raw_spin_unlock_irqrestore(&octeon_irq_ciu0_lock, flags); |
569 | } else { | ||
570 | raw_spin_lock_irqsave(&octeon_irq_ciu1_lock, flags); | ||
571 | for_each_online_cpu(cpu) { | ||
572 | int coreid = octeon_coreid_for_cpu(cpu); | ||
573 | unsigned long *pen = &per_cpu(octeon_irq_ciu1_en_mirror, cpu); | ||
574 | |||
575 | if (cpumask_test_cpu(cpu, dest) && enable_one) { | ||
576 | enable_one = false; | ||
577 | set_bit(cd.s.bit, pen); | ||
578 | } else { | ||
579 | clear_bit(cd.s.bit, pen); | ||
580 | } | ||
581 | cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), *pen); | ||
582 | } | ||
583 | raw_spin_unlock_irqrestore(&octeon_irq_ciu1_lock, flags); | ||
354 | } | 584 | } |
355 | /* | ||
356 | * We need to do a read after the last update to make sure all | ||
357 | * of them are done. | ||
358 | */ | ||
359 | cvmx_read_csr(CVMX_CIU_INTX_EN0(cvmx_get_core_num() * 2)); | ||
360 | raw_spin_unlock_irqrestore(&octeon_irq_ciu0_lock, flags); | ||
361 | |||
362 | return 0; | 585 | return 0; |
363 | } | 586 | } |
364 | 587 | ||
@@ -366,22 +589,46 @@ static int octeon_irq_ciu0_set_affinity(unsigned int irq, const struct cpumask * | |||
366 | * Set affinity for the irq for chips that have the EN*_W1{S,C} | 589 | * Set affinity for the irq for chips that have the EN*_W1{S,C} |
367 | * registers. | 590 | * registers. |
368 | */ | 591 | */ |
369 | static int octeon_irq_ciu0_set_affinity_v2(unsigned int irq, | 592 | static int octeon_irq_ciu_set_affinity_v2(struct irq_data *data, |
370 | const struct cpumask *dest) | 593 | const struct cpumask *dest, |
594 | bool force) | ||
371 | { | 595 | { |
372 | int cpu; | 596 | int cpu; |
373 | int index; | 597 | bool enable_one = !irqd_irq_disabled(data) && !irqd_irq_masked(data); |
374 | struct irq_desc *desc = irq_to_desc(irq); | 598 | u64 mask; |
375 | int enable_one = (desc->status & IRQ_DISABLED) == 0; | 599 | union octeon_ciu_chip_data cd; |
376 | u64 mask = 1ull << (irq - OCTEON_IRQ_WORKQ0); | 600 | |
377 | 601 | if (!enable_one) | |
378 | for_each_online_cpu(cpu) { | 602 | return 0; |
379 | index = octeon_coreid_for_cpu(cpu) * 2; | 603 | |
380 | if (cpumask_test_cpu(cpu, dest) && enable_one) { | 604 | cd.p = data->chip_data; |
381 | enable_one = 0; | 605 | mask = 1ull << cd.s.bit; |
382 | cvmx_write_csr(CVMX_CIU_INTX_EN0_W1S(index), mask); | 606 | |
383 | } else { | 607 | if (cd.s.line == 0) { |
384 | cvmx_write_csr(CVMX_CIU_INTX_EN0_W1C(index), mask); | 608 | for_each_online_cpu(cpu) { |
609 | unsigned long *pen = &per_cpu(octeon_irq_ciu0_en_mirror, cpu); | ||
610 | int index = octeon_coreid_for_cpu(cpu) * 2; | ||
611 | if (cpumask_test_cpu(cpu, dest) && enable_one) { | ||
612 | enable_one = false; | ||
613 | set_bit(cd.s.bit, pen); | ||
614 | cvmx_write_csr(CVMX_CIU_INTX_EN0_W1S(index), mask); | ||
615 | } else { | ||
616 | clear_bit(cd.s.bit, pen); | ||
617 | cvmx_write_csr(CVMX_CIU_INTX_EN0_W1C(index), mask); | ||
618 | } | ||
619 | } | ||
620 | } else { | ||
621 | for_each_online_cpu(cpu) { | ||
622 | unsigned long *pen = &per_cpu(octeon_irq_ciu1_en_mirror, cpu); | ||
623 | int index = octeon_coreid_for_cpu(cpu) * 2 + 1; | ||
624 | if (cpumask_test_cpu(cpu, dest) && enable_one) { | ||
625 | enable_one = false; | ||
626 | set_bit(cd.s.bit, pen); | ||
627 | cvmx_write_csr(CVMX_CIU_INTX_EN1_W1S(index), mask); | ||
628 | } else { | ||
629 | clear_bit(cd.s.bit, pen); | ||
630 | cvmx_write_csr(CVMX_CIU_INTX_EN1_W1C(index), mask); | ||
631 | } | ||
385 | } | 632 | } |
386 | } | 633 | } |
387 | return 0; | 634 | return 0; |
@@ -389,80 +636,102 @@ static int octeon_irq_ciu0_set_affinity_v2(unsigned int irq, | |||
389 | #endif | 636 | #endif |
390 | 637 | ||
391 | /* | 638 | /* |
639 | * The v1 CIU code already masks things, so supply a dummy version to | ||
640 | * the core chip code. | ||
641 | */ | ||
642 | static void octeon_irq_dummy_mask(struct irq_data *data) | ||
643 | { | ||
644 | } | ||
645 | |||
646 | /* | ||
392 | * Newer octeon chips have support for lockless CIU operation. | 647 | * Newer octeon chips have support for lockless CIU operation. |
393 | */ | 648 | */ |
394 | static struct irq_chip octeon_irq_chip_ciu0_v2 = { | 649 | static struct irq_chip octeon_irq_chip_ciu_v2 = { |
395 | .name = "CIU0", | 650 | .name = "CIU", |
396 | .enable = octeon_irq_ciu0_enable_v2, | 651 | .irq_enable = octeon_irq_ciu_enable_v2, |
397 | .disable = octeon_irq_ciu0_disable_all_v2, | 652 | .irq_disable = octeon_irq_ciu_disable_all_v2, |
398 | .eoi = octeon_irq_ciu0_enable_v2, | 653 | .irq_mask = octeon_irq_ciu_disable_local_v2, |
654 | .irq_unmask = octeon_irq_ciu_enable_v2, | ||
399 | #ifdef CONFIG_SMP | 655 | #ifdef CONFIG_SMP |
400 | .set_affinity = octeon_irq_ciu0_set_affinity_v2, | 656 | .irq_set_affinity = octeon_irq_ciu_set_affinity_v2, |
657 | .irq_cpu_offline = octeon_irq_cpu_offline_ciu, | ||
401 | #endif | 658 | #endif |
402 | }; | 659 | }; |
403 | 660 | ||
404 | static struct irq_chip octeon_irq_chip_ciu0 = { | 661 | static struct irq_chip octeon_irq_chip_ciu_edge_v2 = { |
405 | .name = "CIU0", | 662 | .name = "CIU-E", |
406 | .enable = octeon_irq_ciu0_enable, | 663 | .irq_enable = octeon_irq_ciu_enable_v2, |
407 | .disable = octeon_irq_ciu0_disable, | 664 | .irq_disable = octeon_irq_ciu_disable_all_v2, |
408 | .eoi = octeon_irq_ciu0_eoi, | 665 | .irq_ack = octeon_irq_ciu_ack, |
666 | .irq_mask = octeon_irq_ciu_disable_local_v2, | ||
667 | .irq_unmask = octeon_irq_ciu_enable_v2, | ||
409 | #ifdef CONFIG_SMP | 668 | #ifdef CONFIG_SMP |
410 | .set_affinity = octeon_irq_ciu0_set_affinity, | 669 | .irq_set_affinity = octeon_irq_ciu_set_affinity_v2, |
670 | .irq_cpu_offline = octeon_irq_cpu_offline_ciu, | ||
411 | #endif | 671 | #endif |
412 | }; | 672 | }; |
413 | 673 | ||
414 | /* The mbox versions don't do any affinity or round-robin. */ | 674 | static struct irq_chip octeon_irq_chip_ciu = { |
415 | static struct irq_chip octeon_irq_chip_ciu0_mbox_v2 = { | 675 | .name = "CIU", |
416 | .name = "CIU0-M", | 676 | .irq_enable = octeon_irq_ciu_enable, |
417 | .enable = octeon_irq_ciu0_enable_mbox_v2, | 677 | .irq_disable = octeon_irq_ciu_disable_all, |
418 | .disable = octeon_irq_ciu0_disable, | 678 | .irq_mask = octeon_irq_dummy_mask, |
419 | .eoi = octeon_irq_ciu0_eoi_mbox_v2, | 679 | #ifdef CONFIG_SMP |
680 | .irq_set_affinity = octeon_irq_ciu_set_affinity, | ||
681 | .irq_cpu_offline = octeon_irq_cpu_offline_ciu, | ||
682 | #endif | ||
420 | }; | 683 | }; |
421 | 684 | ||
422 | static struct irq_chip octeon_irq_chip_ciu0_mbox = { | 685 | static struct irq_chip octeon_irq_chip_ciu_edge = { |
423 | .name = "CIU0-M", | 686 | .name = "CIU-E", |
424 | .enable = octeon_irq_ciu0_enable_mbox, | 687 | .irq_enable = octeon_irq_ciu_enable, |
425 | .disable = octeon_irq_ciu0_disable, | 688 | .irq_disable = octeon_irq_ciu_disable_all, |
426 | .eoi = octeon_irq_ciu0_eoi, | 689 | .irq_mask = octeon_irq_dummy_mask, |
690 | .irq_ack = octeon_irq_ciu_ack, | ||
691 | #ifdef CONFIG_SMP | ||
692 | .irq_set_affinity = octeon_irq_ciu_set_affinity, | ||
693 | .irq_cpu_offline = octeon_irq_cpu_offline_ciu, | ||
694 | #endif | ||
427 | }; | 695 | }; |
428 | 696 | ||
429 | static void octeon_irq_ciu1_ack(unsigned int irq) | 697 | /* The mbox versions don't do any affinity or round-robin. */ |
430 | { | 698 | static struct irq_chip octeon_irq_chip_ciu_mbox_v2 = { |
431 | /* | 699 | .name = "CIU-M", |
432 | * In order to avoid any locking accessing the CIU, we | 700 | .irq_enable = octeon_irq_ciu_enable_all_v2, |
433 | * acknowledge CIU interrupts by disabling all of them. This | 701 | .irq_disable = octeon_irq_ciu_disable_all_v2, |
434 | * way we can use a per core register and avoid any out of | 702 | .irq_ack = octeon_irq_ciu_disable_local_v2, |
435 | * core locking requirements. This has the side affect that | 703 | .irq_eoi = octeon_irq_ciu_enable_local_v2, |
436 | * CIU interrupts can't be processed recursively. We don't | 704 | |
437 | * need to disable IRQs to make these atomic since they are | 705 | .irq_cpu_online = octeon_irq_ciu_enable_local_v2, |
438 | * already disabled earlier in the low level interrupt code. | 706 | .irq_cpu_offline = octeon_irq_ciu_disable_local_v2, |
439 | */ | 707 | .flags = IRQCHIP_ONOFFLINE_ENABLED, |
440 | clear_c0_status(0x100 << 3); | 708 | }; |
441 | } | ||
442 | 709 | ||
443 | static void octeon_irq_ciu1_eoi(unsigned int irq) | 710 | static struct irq_chip octeon_irq_chip_ciu_mbox = { |
444 | { | 711 | .name = "CIU-M", |
445 | /* | 712 | .irq_enable = octeon_irq_ciu_enable_all, |
446 | * Enable all CIU interrupts again. We don't need to disable | 713 | .irq_disable = octeon_irq_ciu_disable_all, |
447 | * IRQs to make these atomic since they are already disabled | 714 | |
448 | * earlier in the low level interrupt code. | 715 | .irq_cpu_online = octeon_irq_ciu_enable_local, |
449 | */ | 716 | .irq_cpu_offline = octeon_irq_ciu_disable_local, |
450 | set_c0_status(0x100 << 3); | 717 | .flags = IRQCHIP_ONOFFLINE_ENABLED, |
451 | } | 718 | }; |
452 | 719 | ||
453 | static void octeon_irq_ciu1_enable(unsigned int irq) | 720 | /* |
721 | * Watchdog interrupts are special. They are associated with a single | ||
722 | * core, so we hardwire the affinity to that core. | ||
723 | */ | ||
724 | static void octeon_irq_ciu_wd_enable(struct irq_data *data) | ||
454 | { | 725 | { |
455 | struct irq_desc *desc = irq_to_desc(irq); | ||
456 | int coreid = next_coreid_for_irq(desc); | ||
457 | unsigned long flags; | 726 | unsigned long flags; |
458 | uint64_t en1; | 727 | unsigned long *pen; |
459 | int bit = irq - OCTEON_IRQ_WDOG0; /* Bit 0-63 of EN1 */ | 728 | int coreid = data->irq - OCTEON_IRQ_WDOG0; /* Bit 0-63 of EN1 */ |
729 | int cpu = octeon_cpu_for_coreid(coreid); | ||
460 | 730 | ||
461 | raw_spin_lock_irqsave(&octeon_irq_ciu1_lock, flags); | 731 | raw_spin_lock_irqsave(&octeon_irq_ciu1_lock, flags); |
462 | en1 = cvmx_read_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1)); | 732 | pen = &per_cpu(octeon_irq_ciu1_en_mirror, cpu); |
463 | en1 |= 1ull << bit; | 733 | set_bit(coreid, pen); |
464 | cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), en1); | 734 | cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), *pen); |
465 | cvmx_read_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1)); | ||
466 | raw_spin_unlock_irqrestore(&octeon_irq_ciu1_lock, flags); | 735 | raw_spin_unlock_irqrestore(&octeon_irq_ciu1_lock, flags); |
467 | } | 736 | } |
468 | 737 | ||
@@ -470,286 +739,281 @@ static void octeon_irq_ciu1_enable(unsigned int irq) | |||
470 | * Watchdog interrupts are special. They are associated with a single | 739 | * Watchdog interrupts are special. They are associated with a single |
471 | * core, so we hardwire the affinity to that core. | 740 | * core, so we hardwire the affinity to that core. |
472 | */ | 741 | */ |
473 | static void octeon_irq_ciu1_wd_enable(unsigned int irq) | 742 | static void octeon_irq_ciu1_wd_enable_v2(struct irq_data *data) |
474 | { | 743 | { |
475 | unsigned long flags; | 744 | int coreid = data->irq - OCTEON_IRQ_WDOG0; |
476 | uint64_t en1; | 745 | int cpu = octeon_cpu_for_coreid(coreid); |
477 | int bit = irq - OCTEON_IRQ_WDOG0; /* Bit 0-63 of EN1 */ | ||
478 | int coreid = bit; | ||
479 | 746 | ||
480 | raw_spin_lock_irqsave(&octeon_irq_ciu1_lock, flags); | 747 | set_bit(coreid, &per_cpu(octeon_irq_ciu1_en_mirror, cpu)); |
481 | en1 = cvmx_read_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1)); | 748 | cvmx_write_csr(CVMX_CIU_INTX_EN1_W1S(coreid * 2 + 1), 1ull << coreid); |
482 | en1 |= 1ull << bit; | ||
483 | cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), en1); | ||
484 | cvmx_read_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1)); | ||
485 | raw_spin_unlock_irqrestore(&octeon_irq_ciu1_lock, flags); | ||
486 | } | 749 | } |
487 | 750 | ||
488 | static void octeon_irq_ciu1_disable(unsigned int irq) | 751 | |
752 | static struct irq_chip octeon_irq_chip_ciu_wd_v2 = { | ||
753 | .name = "CIU-W", | ||
754 | .irq_enable = octeon_irq_ciu1_wd_enable_v2, | ||
755 | .irq_disable = octeon_irq_ciu_disable_all_v2, | ||
756 | .irq_mask = octeon_irq_ciu_disable_local_v2, | ||
757 | .irq_unmask = octeon_irq_ciu_enable_local_v2, | ||
758 | }; | ||
759 | |||
760 | static struct irq_chip octeon_irq_chip_ciu_wd = { | ||
761 | .name = "CIU-W", | ||
762 | .irq_enable = octeon_irq_ciu_wd_enable, | ||
763 | .irq_disable = octeon_irq_ciu_disable_all, | ||
764 | .irq_mask = octeon_irq_dummy_mask, | ||
765 | }; | ||
766 | |||
767 | static void octeon_irq_ip2_v1(void) | ||
489 | { | 768 | { |
490 | int bit = irq - OCTEON_IRQ_WDOG0; /* Bit 0-63 of EN1 */ | 769 | const unsigned long core_id = cvmx_get_core_num(); |
491 | unsigned long flags; | 770 | u64 ciu_sum = cvmx_read_csr(CVMX_CIU_INTX_SUM0(core_id * 2)); |
492 | uint64_t en1; | 771 | |
493 | int cpu; | 772 | ciu_sum &= __get_cpu_var(octeon_irq_ciu0_en_mirror); |
494 | raw_spin_lock_irqsave(&octeon_irq_ciu1_lock, flags); | 773 | clear_c0_status(STATUSF_IP2); |
495 | for_each_online_cpu(cpu) { | 774 | if (likely(ciu_sum)) { |
496 | int coreid = octeon_coreid_for_cpu(cpu); | 775 | int bit = fls64(ciu_sum) - 1; |
497 | en1 = cvmx_read_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1)); | 776 | int irq = octeon_irq_ciu_to_irq[0][bit]; |
498 | en1 &= ~(1ull << bit); | 777 | if (likely(irq)) |
499 | cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), en1); | 778 | do_IRQ(irq); |
779 | else | ||
780 | spurious_interrupt(); | ||
781 | } else { | ||
782 | spurious_interrupt(); | ||
500 | } | 783 | } |
501 | /* | 784 | set_c0_status(STATUSF_IP2); |
502 | * We need to do a read after the last update to make sure all | ||
503 | * of them are done. | ||
504 | */ | ||
505 | cvmx_read_csr(CVMX_CIU_INTX_EN1(cvmx_get_core_num() * 2 + 1)); | ||
506 | raw_spin_unlock_irqrestore(&octeon_irq_ciu1_lock, flags); | ||
507 | } | 785 | } |
508 | 786 | ||
509 | /* | 787 | static void octeon_irq_ip2_v2(void) |
510 | * Enable the irq on the current core for chips that have the EN*_W1{S,C} | ||
511 | * registers. | ||
512 | */ | ||
513 | static void octeon_irq_ciu1_enable_v2(unsigned int irq) | ||
514 | { | 788 | { |
515 | int index; | 789 | const unsigned long core_id = cvmx_get_core_num(); |
516 | u64 mask = 1ull << (irq - OCTEON_IRQ_WDOG0); | 790 | u64 ciu_sum = cvmx_read_csr(CVMX_CIU_INTX_SUM0(core_id * 2)); |
517 | struct irq_desc *desc = irq_to_desc(irq); | 791 | |
518 | 792 | ciu_sum &= __get_cpu_var(octeon_irq_ciu0_en_mirror); | |
519 | if ((desc->status & IRQ_DISABLED) == 0) { | 793 | if (likely(ciu_sum)) { |
520 | index = next_coreid_for_irq(desc) * 2 + 1; | 794 | int bit = fls64(ciu_sum) - 1; |
521 | cvmx_write_csr(CVMX_CIU_INTX_EN1_W1S(index), mask); | 795 | int irq = octeon_irq_ciu_to_irq[0][bit]; |
796 | if (likely(irq)) | ||
797 | do_IRQ(irq); | ||
798 | else | ||
799 | spurious_interrupt(); | ||
800 | } else { | ||
801 | spurious_interrupt(); | ||
522 | } | 802 | } |
523 | } | 803 | } |
524 | 804 | static void octeon_irq_ip3_v1(void) | |
525 | /* | ||
526 | * Watchdog interrupts are special. They are associated with a single | ||
527 | * core, so we hardwire the affinity to that core. | ||
528 | */ | ||
529 | static void octeon_irq_ciu1_wd_enable_v2(unsigned int irq) | ||
530 | { | 805 | { |
531 | int index; | 806 | u64 ciu_sum = cvmx_read_csr(CVMX_CIU_INT_SUM1); |
532 | int coreid = irq - OCTEON_IRQ_WDOG0; | 807 | |
533 | u64 mask = 1ull << (irq - OCTEON_IRQ_WDOG0); | 808 | ciu_sum &= __get_cpu_var(octeon_irq_ciu1_en_mirror); |
534 | struct irq_desc *desc = irq_to_desc(irq); | 809 | clear_c0_status(STATUSF_IP3); |
535 | 810 | if (likely(ciu_sum)) { | |
536 | if ((desc->status & IRQ_DISABLED) == 0) { | 811 | int bit = fls64(ciu_sum) - 1; |
537 | index = coreid * 2 + 1; | 812 | int irq = octeon_irq_ciu_to_irq[1][bit]; |
538 | cvmx_write_csr(CVMX_CIU_INTX_EN1_W1S(index), mask); | 813 | if (likely(irq)) |
814 | do_IRQ(irq); | ||
815 | else | ||
816 | spurious_interrupt(); | ||
817 | } else { | ||
818 | spurious_interrupt(); | ||
539 | } | 819 | } |
820 | set_c0_status(STATUSF_IP3); | ||
540 | } | 821 | } |
541 | 822 | ||
542 | /* | 823 | static void octeon_irq_ip3_v2(void) |
543 | * Disable the irq on the current core for chips that have the EN*_W1{S,C} | ||
544 | * registers. | ||
545 | */ | ||
546 | static void octeon_irq_ciu1_ack_v2(unsigned int irq) | ||
547 | { | 824 | { |
548 | int index = cvmx_get_core_num() * 2 + 1; | 825 | u64 ciu_sum = cvmx_read_csr(CVMX_CIU_INT_SUM1); |
549 | u64 mask = 1ull << (irq - OCTEON_IRQ_WDOG0); | 826 | |
550 | 827 | ciu_sum &= __get_cpu_var(octeon_irq_ciu1_en_mirror); | |
551 | cvmx_write_csr(CVMX_CIU_INTX_EN1_W1C(index), mask); | 828 | if (likely(ciu_sum)) { |
829 | int bit = fls64(ciu_sum) - 1; | ||
830 | int irq = octeon_irq_ciu_to_irq[1][bit]; | ||
831 | if (likely(irq)) | ||
832 | do_IRQ(irq); | ||
833 | else | ||
834 | spurious_interrupt(); | ||
835 | } else { | ||
836 | spurious_interrupt(); | ||
837 | } | ||
552 | } | 838 | } |
553 | 839 | ||
554 | /* | 840 | static void octeon_irq_ip4_mask(void) |
555 | * Disable the irq on the all cores for chips that have the EN*_W1{S,C} | ||
556 | * registers. | ||
557 | */ | ||
558 | static void octeon_irq_ciu1_disable_all_v2(unsigned int irq) | ||
559 | { | 841 | { |
560 | u64 mask = 1ull << (irq - OCTEON_IRQ_WDOG0); | 842 | clear_c0_status(STATUSF_IP4); |
561 | int index; | 843 | spurious_interrupt(); |
562 | int cpu; | ||
563 | for_each_online_cpu(cpu) { | ||
564 | index = octeon_coreid_for_cpu(cpu) * 2 + 1; | ||
565 | cvmx_write_csr(CVMX_CIU_INTX_EN1_W1C(index), mask); | ||
566 | } | ||
567 | } | 844 | } |
568 | 845 | ||
569 | #ifdef CONFIG_SMP | 846 | static void (*octeon_irq_ip2)(void); |
570 | static int octeon_irq_ciu1_set_affinity(unsigned int irq, | 847 | static void (*octeon_irq_ip3)(void); |
571 | const struct cpumask *dest) | 848 | static void (*octeon_irq_ip4)(void); |
572 | { | ||
573 | int cpu; | ||
574 | struct irq_desc *desc = irq_to_desc(irq); | ||
575 | int enable_one = (desc->status & IRQ_DISABLED) == 0; | ||
576 | unsigned long flags; | ||
577 | int bit = irq - OCTEON_IRQ_WDOG0; /* Bit 0-63 of EN1 */ | ||
578 | 849 | ||
579 | /* | 850 | void __cpuinitdata (*octeon_irq_setup_secondary)(void); |
580 | * For non-v2 CIU, we will allow only single CPU affinity. | ||
581 | * This removes the need to do locking in the .ack/.eoi | ||
582 | * functions. | ||
583 | */ | ||
584 | if (cpumask_weight(dest) != 1) | ||
585 | return -EINVAL; | ||
586 | 851 | ||
587 | raw_spin_lock_irqsave(&octeon_irq_ciu1_lock, flags); | 852 | static void __cpuinit octeon_irq_percpu_enable(void) |
588 | for_each_online_cpu(cpu) { | 853 | { |
589 | int coreid = octeon_coreid_for_cpu(cpu); | 854 | irq_cpu_online(); |
590 | uint64_t en1 = | 855 | } |
591 | cvmx_read_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1)); | 856 | |
592 | if (cpumask_test_cpu(cpu, dest) && enable_one) { | 857 | static void __cpuinit octeon_irq_init_ciu_percpu(void) |
593 | enable_one = 0; | 858 | { |
594 | en1 |= 1ull << bit; | 859 | int coreid = cvmx_get_core_num(); |
595 | } else { | ||
596 | en1 &= ~(1ull << bit); | ||
597 | } | ||
598 | cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), en1); | ||
599 | } | ||
600 | /* | 860 | /* |
601 | * We need to do a read after the last update to make sure all | 861 | * Disable All CIU Interrupts. The ones we need will be |
602 | * of them are done. | 862 | * enabled later. Read the SUM register so we know the write |
863 | * completed. | ||
603 | */ | 864 | */ |
604 | cvmx_read_csr(CVMX_CIU_INTX_EN1(cvmx_get_core_num() * 2 + 1)); | 865 | cvmx_write_csr(CVMX_CIU_INTX_EN0((coreid * 2)), 0); |
605 | raw_spin_unlock_irqrestore(&octeon_irq_ciu1_lock, flags); | 866 | cvmx_write_csr(CVMX_CIU_INTX_EN0((coreid * 2 + 1)), 0); |
606 | 867 | cvmx_write_csr(CVMX_CIU_INTX_EN1((coreid * 2)), 0); | |
607 | return 0; | 868 | cvmx_write_csr(CVMX_CIU_INTX_EN1((coreid * 2 + 1)), 0); |
869 | cvmx_read_csr(CVMX_CIU_INTX_SUM0((coreid * 2))); | ||
608 | } | 870 | } |
609 | 871 | ||
610 | /* | 872 | static void __cpuinit octeon_irq_setup_secondary_ciu(void) |
611 | * Set affinity for the irq for chips that have the EN*_W1{S,C} | ||
612 | * registers. | ||
613 | */ | ||
614 | static int octeon_irq_ciu1_set_affinity_v2(unsigned int irq, | ||
615 | const struct cpumask *dest) | ||
616 | { | 873 | { |
617 | int cpu; | ||
618 | int index; | ||
619 | struct irq_desc *desc = irq_to_desc(irq); | ||
620 | int enable_one = (desc->status & IRQ_DISABLED) == 0; | ||
621 | u64 mask = 1ull << (irq - OCTEON_IRQ_WDOG0); | ||
622 | for_each_online_cpu(cpu) { | ||
623 | index = octeon_coreid_for_cpu(cpu) * 2 + 1; | ||
624 | if (cpumask_test_cpu(cpu, dest) && enable_one) { | ||
625 | enable_one = 0; | ||
626 | cvmx_write_csr(CVMX_CIU_INTX_EN1_W1S(index), mask); | ||
627 | } else { | ||
628 | cvmx_write_csr(CVMX_CIU_INTX_EN1_W1C(index), mask); | ||
629 | } | ||
630 | } | ||
631 | return 0; | ||
632 | } | ||
633 | #endif | ||
634 | 874 | ||
635 | /* | 875 | __get_cpu_var(octeon_irq_ciu0_en_mirror) = 0; |
636 | * Newer octeon chips have support for lockless CIU operation. | 876 | __get_cpu_var(octeon_irq_ciu1_en_mirror) = 0; |
637 | */ | ||
638 | static struct irq_chip octeon_irq_chip_ciu1_v2 = { | ||
639 | .name = "CIU1", | ||
640 | .enable = octeon_irq_ciu1_enable_v2, | ||
641 | .disable = octeon_irq_ciu1_disable_all_v2, | ||
642 | .eoi = octeon_irq_ciu1_enable_v2, | ||
643 | #ifdef CONFIG_SMP | ||
644 | .set_affinity = octeon_irq_ciu1_set_affinity_v2, | ||
645 | #endif | ||
646 | }; | ||
647 | 877 | ||
648 | static struct irq_chip octeon_irq_chip_ciu1 = { | 878 | octeon_irq_init_ciu_percpu(); |
649 | .name = "CIU1", | 879 | octeon_irq_percpu_enable(); |
650 | .enable = octeon_irq_ciu1_enable, | ||
651 | .disable = octeon_irq_ciu1_disable, | ||
652 | .eoi = octeon_irq_ciu1_eoi, | ||
653 | #ifdef CONFIG_SMP | ||
654 | .set_affinity = octeon_irq_ciu1_set_affinity, | ||
655 | #endif | ||
656 | }; | ||
657 | 880 | ||
658 | static struct irq_chip octeon_irq_chip_ciu1_wd_v2 = { | 881 | /* Enable the CIU lines */ |
659 | .name = "CIU1-W", | 882 | set_c0_status(STATUSF_IP3 | STATUSF_IP2); |
660 | .enable = octeon_irq_ciu1_wd_enable_v2, | 883 | clear_c0_status(STATUSF_IP4); |
661 | .disable = octeon_irq_ciu1_disable_all_v2, | 884 | } |
662 | .eoi = octeon_irq_ciu1_wd_enable_v2, | ||
663 | }; | ||
664 | 885 | ||
665 | static struct irq_chip octeon_irq_chip_ciu1_wd = { | 886 | static void __init octeon_irq_init_ciu(void) |
666 | .name = "CIU1-W", | 887 | { |
667 | .enable = octeon_irq_ciu1_wd_enable, | 888 | unsigned int i; |
668 | .disable = octeon_irq_ciu1_disable, | 889 | struct irq_chip *chip; |
669 | .eoi = octeon_irq_ciu1_eoi, | 890 | struct irq_chip *chip_edge; |
670 | }; | 891 | struct irq_chip *chip_mbox; |
892 | struct irq_chip *chip_wd; | ||
893 | |||
894 | octeon_irq_init_ciu_percpu(); | ||
895 | octeon_irq_setup_secondary = octeon_irq_setup_secondary_ciu; | ||
671 | 896 | ||
672 | static void (*octeon_ciu0_ack)(unsigned int); | 897 | if (OCTEON_IS_MODEL(OCTEON_CN58XX_PASS2_X) || |
673 | static void (*octeon_ciu1_ack)(unsigned int); | 898 | OCTEON_IS_MODEL(OCTEON_CN56XX_PASS2_X) || |
899 | OCTEON_IS_MODEL(OCTEON_CN52XX_PASS2_X) || | ||
900 | OCTEON_IS_MODEL(OCTEON_CN6XXX)) { | ||
901 | octeon_irq_ip2 = octeon_irq_ip2_v2; | ||
902 | octeon_irq_ip3 = octeon_irq_ip3_v2; | ||
903 | chip = &octeon_irq_chip_ciu_v2; | ||
904 | chip_edge = &octeon_irq_chip_ciu_edge_v2; | ||
905 | chip_mbox = &octeon_irq_chip_ciu_mbox_v2; | ||
906 | chip_wd = &octeon_irq_chip_ciu_wd_v2; | ||
907 | } else { | ||
908 | octeon_irq_ip2 = octeon_irq_ip2_v1; | ||
909 | octeon_irq_ip3 = octeon_irq_ip3_v1; | ||
910 | chip = &octeon_irq_chip_ciu; | ||
911 | chip_edge = &octeon_irq_chip_ciu_edge; | ||
912 | chip_mbox = &octeon_irq_chip_ciu_mbox; | ||
913 | chip_wd = &octeon_irq_chip_ciu_wd; | ||
914 | } | ||
915 | octeon_irq_ip4 = octeon_irq_ip4_mask; | ||
916 | |||
917 | /* Mips internal */ | ||
918 | octeon_irq_init_core(); | ||
919 | |||
920 | /* CIU_0 */ | ||
921 | for (i = 0; i < 16; i++) | ||
922 | octeon_irq_set_ciu_mapping(i + OCTEON_IRQ_WORKQ0, 0, i + 0, chip, handle_level_irq); | ||
923 | for (i = 0; i < 16; i++) | ||
924 | octeon_irq_set_ciu_mapping(i + OCTEON_IRQ_GPIO0, 0, i + 16, chip, handle_level_irq); | ||
925 | |||
926 | octeon_irq_set_ciu_mapping(OCTEON_IRQ_MBOX0, 0, 32, chip_mbox, handle_percpu_irq); | ||
927 | octeon_irq_set_ciu_mapping(OCTEON_IRQ_MBOX1, 0, 33, chip_mbox, handle_percpu_irq); | ||
928 | |||
929 | octeon_irq_set_ciu_mapping(OCTEON_IRQ_UART0, 0, 34, chip, handle_level_irq); | ||
930 | octeon_irq_set_ciu_mapping(OCTEON_IRQ_UART1, 0, 35, chip, handle_level_irq); | ||
931 | |||
932 | for (i = 0; i < 4; i++) | ||
933 | octeon_irq_set_ciu_mapping(i + OCTEON_IRQ_PCI_INT0, 0, i + 36, chip, handle_level_irq); | ||
934 | for (i = 0; i < 4; i++) | ||
935 | octeon_irq_set_ciu_mapping(i + OCTEON_IRQ_PCI_MSI0, 0, i + 40, chip, handle_level_irq); | ||
936 | |||
937 | octeon_irq_set_ciu_mapping(OCTEON_IRQ_TWSI, 0, 45, chip, handle_level_irq); | ||
938 | octeon_irq_set_ciu_mapping(OCTEON_IRQ_RML, 0, 46, chip, handle_level_irq); | ||
939 | octeon_irq_set_ciu_mapping(OCTEON_IRQ_TRACE0, 0, 47, chip, handle_level_irq); | ||
940 | |||
941 | for (i = 0; i < 2; i++) | ||
942 | octeon_irq_set_ciu_mapping(i + OCTEON_IRQ_GMX_DRP0, 0, i + 48, chip_edge, handle_edge_irq); | ||
943 | |||
944 | octeon_irq_set_ciu_mapping(OCTEON_IRQ_IPD_DRP, 0, 50, chip_edge, handle_edge_irq); | ||
945 | octeon_irq_set_ciu_mapping(OCTEON_IRQ_KEY_ZERO, 0, 51, chip_edge, handle_edge_irq); | ||
946 | |||
947 | for (i = 0; i < 4; i++) | ||
948 | octeon_irq_set_ciu_mapping(i + OCTEON_IRQ_TIMER0, 0, i + 52, chip_edge, handle_edge_irq); | ||
949 | |||
950 | octeon_irq_set_ciu_mapping(OCTEON_IRQ_USB0, 0, 56, chip, handle_level_irq); | ||
951 | octeon_irq_set_ciu_mapping(OCTEON_IRQ_PCM, 0, 57, chip, handle_level_irq); | ||
952 | octeon_irq_set_ciu_mapping(OCTEON_IRQ_MPI, 0, 58, chip, handle_level_irq); | ||
953 | octeon_irq_set_ciu_mapping(OCTEON_IRQ_TWSI2, 0, 59, chip, handle_level_irq); | ||
954 | octeon_irq_set_ciu_mapping(OCTEON_IRQ_POWIQ, 0, 60, chip, handle_level_irq); | ||
955 | octeon_irq_set_ciu_mapping(OCTEON_IRQ_IPDPPTHR, 0, 61, chip, handle_level_irq); | ||
956 | octeon_irq_set_ciu_mapping(OCTEON_IRQ_MII0, 0, 62, chip, handle_level_irq); | ||
957 | octeon_irq_set_ciu_mapping(OCTEON_IRQ_BOOTDMA, 0, 63, chip, handle_level_irq); | ||
958 | |||
959 | /* CIU_1 */ | ||
960 | for (i = 0; i < 16; i++) | ||
961 | octeon_irq_set_ciu_mapping(i + OCTEON_IRQ_WDOG0, 1, i + 0, chip_wd, handle_level_irq); | ||
962 | |||
963 | octeon_irq_set_ciu_mapping(OCTEON_IRQ_UART2, 1, 16, chip, handle_level_irq); | ||
964 | octeon_irq_set_ciu_mapping(OCTEON_IRQ_USB1, 1, 17, chip, handle_level_irq); | ||
965 | octeon_irq_set_ciu_mapping(OCTEON_IRQ_MII1, 1, 18, chip, handle_level_irq); | ||
966 | octeon_irq_set_ciu_mapping(OCTEON_IRQ_NAND, 1, 19, chip, handle_level_irq); | ||
967 | octeon_irq_set_ciu_mapping(OCTEON_IRQ_MIO, 1, 20, chip, handle_level_irq); | ||
968 | octeon_irq_set_ciu_mapping(OCTEON_IRQ_IOB, 1, 21, chip, handle_level_irq); | ||
969 | octeon_irq_set_ciu_mapping(OCTEON_IRQ_FPA, 1, 22, chip, handle_level_irq); | ||
970 | octeon_irq_set_ciu_mapping(OCTEON_IRQ_POW, 1, 23, chip, handle_level_irq); | ||
971 | octeon_irq_set_ciu_mapping(OCTEON_IRQ_L2C, 1, 24, chip, handle_level_irq); | ||
972 | octeon_irq_set_ciu_mapping(OCTEON_IRQ_IPD, 1, 25, chip, handle_level_irq); | ||
973 | octeon_irq_set_ciu_mapping(OCTEON_IRQ_PIP, 1, 26, chip, handle_level_irq); | ||
974 | octeon_irq_set_ciu_mapping(OCTEON_IRQ_PKO, 1, 27, chip, handle_level_irq); | ||
975 | octeon_irq_set_ciu_mapping(OCTEON_IRQ_ZIP, 1, 28, chip, handle_level_irq); | ||
976 | octeon_irq_set_ciu_mapping(OCTEON_IRQ_TIM, 1, 29, chip, handle_level_irq); | ||
977 | octeon_irq_set_ciu_mapping(OCTEON_IRQ_RAD, 1, 30, chip, handle_level_irq); | ||
978 | octeon_irq_set_ciu_mapping(OCTEON_IRQ_KEY, 1, 31, chip, handle_level_irq); | ||
979 | octeon_irq_set_ciu_mapping(OCTEON_IRQ_DFA, 1, 32, chip, handle_level_irq); | ||
980 | octeon_irq_set_ciu_mapping(OCTEON_IRQ_USBCTL, 1, 33, chip, handle_level_irq); | ||
981 | octeon_irq_set_ciu_mapping(OCTEON_IRQ_SLI, 1, 34, chip, handle_level_irq); | ||
982 | octeon_irq_set_ciu_mapping(OCTEON_IRQ_DPI, 1, 35, chip, handle_level_irq); | ||
983 | |||
984 | octeon_irq_set_ciu_mapping(OCTEON_IRQ_AGX0, 1, 36, chip, handle_level_irq); | ||
985 | |||
986 | octeon_irq_set_ciu_mapping(OCTEON_IRQ_AGL, 1, 46, chip, handle_level_irq); | ||
987 | |||
988 | octeon_irq_set_ciu_mapping(OCTEON_IRQ_PTP, 1, 47, chip_edge, handle_edge_irq); | ||
989 | |||
990 | octeon_irq_set_ciu_mapping(OCTEON_IRQ_PEM0, 1, 48, chip, handle_level_irq); | ||
991 | octeon_irq_set_ciu_mapping(OCTEON_IRQ_PEM1, 1, 49, chip, handle_level_irq); | ||
992 | octeon_irq_set_ciu_mapping(OCTEON_IRQ_SRIO0, 1, 50, chip, handle_level_irq); | ||
993 | octeon_irq_set_ciu_mapping(OCTEON_IRQ_SRIO1, 1, 51, chip, handle_level_irq); | ||
994 | octeon_irq_set_ciu_mapping(OCTEON_IRQ_LMC0, 1, 52, chip, handle_level_irq); | ||
995 | octeon_irq_set_ciu_mapping(OCTEON_IRQ_DFM, 1, 56, chip, handle_level_irq); | ||
996 | octeon_irq_set_ciu_mapping(OCTEON_IRQ_RST, 1, 63, chip, handle_level_irq); | ||
997 | |||
998 | /* Enable the CIU lines */ | ||
999 | set_c0_status(STATUSF_IP3 | STATUSF_IP2); | ||
1000 | clear_c0_status(STATUSF_IP4); | ||
1001 | } | ||
674 | 1002 | ||
675 | void __init arch_init_irq(void) | 1003 | void __init arch_init_irq(void) |
676 | { | 1004 | { |
677 | unsigned int irq; | ||
678 | struct irq_chip *chip0; | ||
679 | struct irq_chip *chip0_mbox; | ||
680 | struct irq_chip *chip1; | ||
681 | struct irq_chip *chip1_wd; | ||
682 | |||
683 | #ifdef CONFIG_SMP | 1005 | #ifdef CONFIG_SMP |
684 | /* Set the default affinity to the boot cpu. */ | 1006 | /* Set the default affinity to the boot cpu. */ |
685 | cpumask_clear(irq_default_affinity); | 1007 | cpumask_clear(irq_default_affinity); |
686 | cpumask_set_cpu(smp_processor_id(), irq_default_affinity); | 1008 | cpumask_set_cpu(smp_processor_id(), irq_default_affinity); |
687 | #endif | 1009 | #endif |
688 | 1010 | octeon_irq_init_ciu(); | |
689 | if (NR_IRQS < OCTEON_IRQ_LAST) | ||
690 | pr_err("octeon_irq_init: NR_IRQS is set too low\n"); | ||
691 | |||
692 | if (OCTEON_IS_MODEL(OCTEON_CN58XX_PASS2_X) || | ||
693 | OCTEON_IS_MODEL(OCTEON_CN56XX_PASS2_X) || | ||
694 | OCTEON_IS_MODEL(OCTEON_CN52XX_PASS2_X)) { | ||
695 | octeon_ciu0_ack = octeon_irq_ciu0_ack_v2; | ||
696 | octeon_ciu1_ack = octeon_irq_ciu1_ack_v2; | ||
697 | chip0 = &octeon_irq_chip_ciu0_v2; | ||
698 | chip0_mbox = &octeon_irq_chip_ciu0_mbox_v2; | ||
699 | chip1 = &octeon_irq_chip_ciu1_v2; | ||
700 | chip1_wd = &octeon_irq_chip_ciu1_wd_v2; | ||
701 | } else { | ||
702 | octeon_ciu0_ack = octeon_irq_ciu0_ack; | ||
703 | octeon_ciu1_ack = octeon_irq_ciu1_ack; | ||
704 | chip0 = &octeon_irq_chip_ciu0; | ||
705 | chip0_mbox = &octeon_irq_chip_ciu0_mbox; | ||
706 | chip1 = &octeon_irq_chip_ciu1; | ||
707 | chip1_wd = &octeon_irq_chip_ciu1_wd; | ||
708 | } | ||
709 | |||
710 | /* 0 - 15 reserved for i8259 master and slave controller. */ | ||
711 | |||
712 | /* 17 - 23 Mips internal */ | ||
713 | for (irq = OCTEON_IRQ_SW0; irq <= OCTEON_IRQ_TIMER; irq++) { | ||
714 | set_irq_chip_and_handler(irq, &octeon_irq_chip_core, | ||
715 | handle_percpu_irq); | ||
716 | } | ||
717 | |||
718 | /* 24 - 87 CIU_INT_SUM0 */ | ||
719 | for (irq = OCTEON_IRQ_WORKQ0; irq <= OCTEON_IRQ_BOOTDMA; irq++) { | ||
720 | switch (irq) { | ||
721 | case OCTEON_IRQ_MBOX0: | ||
722 | case OCTEON_IRQ_MBOX1: | ||
723 | set_irq_chip_and_handler(irq, chip0_mbox, handle_percpu_irq); | ||
724 | break; | ||
725 | default: | ||
726 | set_irq_chip_and_handler(irq, chip0, handle_fasteoi_irq); | ||
727 | break; | ||
728 | } | ||
729 | } | ||
730 | |||
731 | /* 88 - 151 CIU_INT_SUM1 */ | ||
732 | for (irq = OCTEON_IRQ_WDOG0; irq <= OCTEON_IRQ_WDOG15; irq++) | ||
733 | set_irq_chip_and_handler(irq, chip1_wd, handle_fasteoi_irq); | ||
734 | |||
735 | for (irq = OCTEON_IRQ_UART2; irq <= OCTEON_IRQ_RESERVED151; irq++) | ||
736 | set_irq_chip_and_handler(irq, chip1, handle_fasteoi_irq); | ||
737 | |||
738 | set_c0_status(0x300 << 2); | ||
739 | } | 1011 | } |
740 | 1012 | ||
741 | asmlinkage void plat_irq_dispatch(void) | 1013 | asmlinkage void plat_irq_dispatch(void) |
742 | { | 1014 | { |
743 | const unsigned long core_id = cvmx_get_core_num(); | ||
744 | const uint64_t ciu_sum0_address = CVMX_CIU_INTX_SUM0(core_id * 2); | ||
745 | const uint64_t ciu_en0_address = CVMX_CIU_INTX_EN0(core_id * 2); | ||
746 | const uint64_t ciu_sum1_address = CVMX_CIU_INT_SUM1; | ||
747 | const uint64_t ciu_en1_address = CVMX_CIU_INTX_EN1(core_id * 2 + 1); | ||
748 | unsigned long cop0_cause; | 1015 | unsigned long cop0_cause; |
749 | unsigned long cop0_status; | 1016 | unsigned long cop0_status; |
750 | uint64_t ciu_en; | ||
751 | uint64_t ciu_sum; | ||
752 | unsigned int irq; | ||
753 | 1017 | ||
754 | while (1) { | 1018 | while (1) { |
755 | cop0_cause = read_c0_cause(); | 1019 | cop0_cause = read_c0_cause(); |
@@ -757,33 +1021,16 @@ asmlinkage void plat_irq_dispatch(void) | |||
757 | cop0_cause &= cop0_status; | 1021 | cop0_cause &= cop0_status; |
758 | cop0_cause &= ST0_IM; | 1022 | cop0_cause &= ST0_IM; |
759 | 1023 | ||
760 | if (unlikely(cop0_cause & STATUSF_IP2)) { | 1024 | if (unlikely(cop0_cause & STATUSF_IP2)) |
761 | ciu_sum = cvmx_read_csr(ciu_sum0_address); | 1025 | octeon_irq_ip2(); |
762 | ciu_en = cvmx_read_csr(ciu_en0_address); | 1026 | else if (unlikely(cop0_cause & STATUSF_IP3)) |
763 | ciu_sum &= ciu_en; | 1027 | octeon_irq_ip3(); |
764 | if (likely(ciu_sum)) { | 1028 | else if (unlikely(cop0_cause & STATUSF_IP4)) |
765 | irq = fls64(ciu_sum) + OCTEON_IRQ_WORKQ0 - 1; | 1029 | octeon_irq_ip4(); |
766 | octeon_ciu0_ack(irq); | 1030 | else if (likely(cop0_cause)) |
767 | do_IRQ(irq); | ||
768 | } else { | ||
769 | spurious_interrupt(); | ||
770 | } | ||
771 | } else if (unlikely(cop0_cause & STATUSF_IP3)) { | ||
772 | ciu_sum = cvmx_read_csr(ciu_sum1_address); | ||
773 | ciu_en = cvmx_read_csr(ciu_en1_address); | ||
774 | ciu_sum &= ciu_en; | ||
775 | if (likely(ciu_sum)) { | ||
776 | irq = fls64(ciu_sum) + OCTEON_IRQ_WDOG0 - 1; | ||
777 | octeon_ciu1_ack(irq); | ||
778 | do_IRQ(irq); | ||
779 | } else { | ||
780 | spurious_interrupt(); | ||
781 | } | ||
782 | } else if (likely(cop0_cause)) { | ||
783 | do_IRQ(fls(cop0_cause) - 9 + MIPS_CPU_IRQ_BASE); | 1031 | do_IRQ(fls(cop0_cause) - 9 + MIPS_CPU_IRQ_BASE); |
784 | } else { | 1032 | else |
785 | break; | 1033 | break; |
786 | } | ||
787 | } | 1034 | } |
788 | } | 1035 | } |
789 | 1036 | ||
@@ -791,83 +1038,7 @@ asmlinkage void plat_irq_dispatch(void) | |||
791 | 1038 | ||
792 | void fixup_irqs(void) | 1039 | void fixup_irqs(void) |
793 | { | 1040 | { |
794 | int irq; | 1041 | irq_cpu_offline(); |
795 | struct irq_desc *desc; | ||
796 | cpumask_t new_affinity; | ||
797 | unsigned long flags; | ||
798 | int do_set_affinity; | ||
799 | int cpu; | ||
800 | |||
801 | cpu = smp_processor_id(); | ||
802 | |||
803 | for (irq = OCTEON_IRQ_SW0; irq <= OCTEON_IRQ_TIMER; irq++) | ||
804 | octeon_irq_core_disable_local(irq); | ||
805 | |||
806 | for (irq = OCTEON_IRQ_WORKQ0; irq < OCTEON_IRQ_LAST; irq++) { | ||
807 | desc = irq_to_desc(irq); | ||
808 | switch (irq) { | ||
809 | case OCTEON_IRQ_MBOX0: | ||
810 | case OCTEON_IRQ_MBOX1: | ||
811 | /* The eoi function will disable them on this CPU. */ | ||
812 | desc->chip->eoi(irq); | ||
813 | break; | ||
814 | case OCTEON_IRQ_WDOG0: | ||
815 | case OCTEON_IRQ_WDOG1: | ||
816 | case OCTEON_IRQ_WDOG2: | ||
817 | case OCTEON_IRQ_WDOG3: | ||
818 | case OCTEON_IRQ_WDOG4: | ||
819 | case OCTEON_IRQ_WDOG5: | ||
820 | case OCTEON_IRQ_WDOG6: | ||
821 | case OCTEON_IRQ_WDOG7: | ||
822 | case OCTEON_IRQ_WDOG8: | ||
823 | case OCTEON_IRQ_WDOG9: | ||
824 | case OCTEON_IRQ_WDOG10: | ||
825 | case OCTEON_IRQ_WDOG11: | ||
826 | case OCTEON_IRQ_WDOG12: | ||
827 | case OCTEON_IRQ_WDOG13: | ||
828 | case OCTEON_IRQ_WDOG14: | ||
829 | case OCTEON_IRQ_WDOG15: | ||
830 | /* | ||
831 | * These have special per CPU semantics and | ||
832 | * are handled in the watchdog driver. | ||
833 | */ | ||
834 | break; | ||
835 | default: | ||
836 | raw_spin_lock_irqsave(&desc->lock, flags); | ||
837 | /* | ||
838 | * If this irq has an action, it is in use and | ||
839 | * must be migrated if it has affinity to this | ||
840 | * cpu. | ||
841 | */ | ||
842 | if (desc->action && cpumask_test_cpu(cpu, desc->affinity)) { | ||
843 | if (cpumask_weight(desc->affinity) > 1) { | ||
844 | /* | ||
845 | * It has multi CPU affinity, | ||
846 | * just remove this CPU from | ||
847 | * the affinity set. | ||
848 | */ | ||
849 | cpumask_copy(&new_affinity, desc->affinity); | ||
850 | cpumask_clear_cpu(cpu, &new_affinity); | ||
851 | } else { | ||
852 | /* | ||
853 | * Otherwise, put it on lowest | ||
854 | * numbered online CPU. | ||
855 | */ | ||
856 | cpumask_clear(&new_affinity); | ||
857 | cpumask_set_cpu(cpumask_first(cpu_online_mask), &new_affinity); | ||
858 | } | ||
859 | do_set_affinity = 1; | ||
860 | } else { | ||
861 | do_set_affinity = 0; | ||
862 | } | ||
863 | raw_spin_unlock_irqrestore(&desc->lock, flags); | ||
864 | |||
865 | if (do_set_affinity) | ||
866 | irq_set_affinity(irq, &new_affinity); | ||
867 | |||
868 | break; | ||
869 | } | ||
870 | } | ||
871 | } | 1042 | } |
872 | 1043 | ||
873 | #endif /* CONFIG_HOTPLUG_CPU */ | 1044 | #endif /* CONFIG_HOTPLUG_CPU */ |
diff --git a/arch/mips/cavium-octeon/setup.c b/arch/mips/cavium-octeon/setup.c index b0c3686c96dd..8b139bf4a1b5 100644 --- a/arch/mips/cavium-octeon/setup.c +++ b/arch/mips/cavium-octeon/setup.c | |||
@@ -420,7 +420,6 @@ void octeon_user_io_init(void) | |||
420 | void __init prom_init(void) | 420 | void __init prom_init(void) |
421 | { | 421 | { |
422 | struct cvmx_sysinfo *sysinfo; | 422 | struct cvmx_sysinfo *sysinfo; |
423 | const int coreid = cvmx_get_core_num(); | ||
424 | int i; | 423 | int i; |
425 | int argc; | 424 | int argc; |
426 | #ifdef CONFIG_CAVIUM_RESERVE32 | 425 | #ifdef CONFIG_CAVIUM_RESERVE32 |
@@ -537,17 +536,6 @@ void __init prom_init(void) | |||
537 | 536 | ||
538 | octeon_uart = octeon_get_boot_uart(); | 537 | octeon_uart = octeon_get_boot_uart(); |
539 | 538 | ||
540 | /* | ||
541 | * Disable All CIU Interrupts. The ones we need will be | ||
542 | * enabled later. Read the SUM register so we know the write | ||
543 | * completed. | ||
544 | */ | ||
545 | cvmx_write_csr(CVMX_CIU_INTX_EN0((coreid * 2)), 0); | ||
546 | cvmx_write_csr(CVMX_CIU_INTX_EN0((coreid * 2 + 1)), 0); | ||
547 | cvmx_write_csr(CVMX_CIU_INTX_EN1((coreid * 2)), 0); | ||
548 | cvmx_write_csr(CVMX_CIU_INTX_EN1((coreid * 2 + 1)), 0); | ||
549 | cvmx_read_csr(CVMX_CIU_INTX_SUM0((coreid * 2))); | ||
550 | |||
551 | #ifdef CONFIG_SMP | 539 | #ifdef CONFIG_SMP |
552 | octeon_write_lcd("LinuxSMP"); | 540 | octeon_write_lcd("LinuxSMP"); |
553 | #else | 541 | #else |
diff --git a/arch/mips/cavium-octeon/smp.c b/arch/mips/cavium-octeon/smp.c index 391cefe556b3..ba78b21cc8d0 100644 --- a/arch/mips/cavium-octeon/smp.c +++ b/arch/mips/cavium-octeon/smp.c | |||
@@ -171,41 +171,19 @@ static void octeon_boot_secondary(int cpu, struct task_struct *idle) | |||
171 | * After we've done initial boot, this function is called to allow the | 171 | * After we've done initial boot, this function is called to allow the |
172 | * board code to clean up state, if needed | 172 | * board code to clean up state, if needed |
173 | */ | 173 | */ |
174 | static void octeon_init_secondary(void) | 174 | static void __cpuinit octeon_init_secondary(void) |
175 | { | 175 | { |
176 | const int coreid = cvmx_get_core_num(); | ||
177 | union cvmx_ciu_intx_sum0 interrupt_enable; | ||
178 | unsigned int sr; | 176 | unsigned int sr; |
179 | 177 | ||
180 | #ifdef CONFIG_HOTPLUG_CPU | ||
181 | struct linux_app_boot_info *labi; | ||
182 | |||
183 | labi = (struct linux_app_boot_info *)PHYS_TO_XKSEG_CACHED(LABI_ADDR_IN_BOOTLOADER); | ||
184 | |||
185 | if (labi->labi_signature != LABI_SIGNATURE) | ||
186 | panic("The bootloader version on this board is incorrect."); | ||
187 | #endif | ||
188 | |||
189 | sr = set_c0_status(ST0_BEV); | 178 | sr = set_c0_status(ST0_BEV); |
190 | write_c0_ebase((u32)ebase); | 179 | write_c0_ebase((u32)ebase); |
191 | write_c0_status(sr); | 180 | write_c0_status(sr); |
192 | 181 | ||
193 | octeon_check_cpu_bist(); | 182 | octeon_check_cpu_bist(); |
194 | octeon_init_cvmcount(); | 183 | octeon_init_cvmcount(); |
195 | /* | 184 | |
196 | pr_info("SMP: CPU%d (CoreId %lu) started\n", cpu, coreid); | 185 | octeon_irq_setup_secondary(); |
197 | */ | 186 | raw_local_irq_enable(); |
198 | /* Enable Mailbox interrupts to this core. These are the only | ||
199 | interrupts allowed on line 3 */ | ||
200 | cvmx_write_csr(CVMX_CIU_MBOX_CLRX(coreid), 0xffffffff); | ||
201 | interrupt_enable.u64 = 0; | ||
202 | interrupt_enable.s.mbox = 0x3; | ||
203 | cvmx_write_csr(CVMX_CIU_INTX_EN0((coreid * 2)), interrupt_enable.u64); | ||
204 | cvmx_write_csr(CVMX_CIU_INTX_EN0((coreid * 2 + 1)), 0); | ||
205 | cvmx_write_csr(CVMX_CIU_INTX_EN1((coreid * 2)), 0); | ||
206 | cvmx_write_csr(CVMX_CIU_INTX_EN1((coreid * 2 + 1)), 0); | ||
207 | /* Enable core interrupt processing for 2,3 and 7 */ | ||
208 | set_c0_status(0x8c01); | ||
209 | } | 187 | } |
210 | 188 | ||
211 | /** | 189 | /** |
@@ -214,6 +192,15 @@ static void octeon_init_secondary(void) | |||
214 | */ | 192 | */ |
215 | void octeon_prepare_cpus(unsigned int max_cpus) | 193 | void octeon_prepare_cpus(unsigned int max_cpus) |
216 | { | 194 | { |
195 | #ifdef CONFIG_HOTPLUG_CPU | ||
196 | struct linux_app_boot_info *labi; | ||
197 | |||
198 | labi = (struct linux_app_boot_info *)PHYS_TO_XKSEG_CACHED(LABI_ADDR_IN_BOOTLOADER); | ||
199 | |||
200 | if (labi->labi_signature != LABI_SIGNATURE) | ||
201 | panic("The bootloader version on this board is incorrect."); | ||
202 | #endif | ||
203 | |||
217 | cvmx_write_csr(CVMX_CIU_MBOX_CLRX(cvmx_get_core_num()), 0xffffffff); | 204 | cvmx_write_csr(CVMX_CIU_MBOX_CLRX(cvmx_get_core_num()), 0xffffffff); |
218 | if (request_irq(OCTEON_IRQ_MBOX0, mailbox_interrupt, IRQF_DISABLED, | 205 | if (request_irq(OCTEON_IRQ_MBOX0, mailbox_interrupt, IRQF_DISABLED, |
219 | "mailbox0", mailbox_interrupt)) { | 206 | "mailbox0", mailbox_interrupt)) { |
diff --git a/arch/mips/dec/ioasic-irq.c b/arch/mips/dec/ioasic-irq.c index 8d9a5fc607e4..824e08c73798 100644 --- a/arch/mips/dec/ioasic-irq.c +++ b/arch/mips/dec/ioasic-irq.c | |||
@@ -68,10 +68,10 @@ void __init init_ioasic_irqs(int base) | |||
68 | fast_iob(); | 68 | fast_iob(); |
69 | 69 | ||
70 | for (i = base; i < base + IO_INR_DMA; i++) | 70 | for (i = base; i < base + IO_INR_DMA; i++) |
71 | set_irq_chip_and_handler(i, &ioasic_irq_type, | 71 | irq_set_chip_and_handler(i, &ioasic_irq_type, |
72 | handle_level_irq); | 72 | handle_level_irq); |
73 | for (; i < base + IO_IRQ_LINES; i++) | 73 | for (; i < base + IO_IRQ_LINES; i++) |
74 | set_irq_chip(i, &ioasic_dma_irq_type); | 74 | irq_set_chip(i, &ioasic_dma_irq_type); |
75 | 75 | ||
76 | ioasic_irq_base = base; | 76 | ioasic_irq_base = base; |
77 | } | 77 | } |
diff --git a/arch/mips/dec/kn02-irq.c b/arch/mips/dec/kn02-irq.c index ef31d98c4fb8..37199f742c45 100644 --- a/arch/mips/dec/kn02-irq.c +++ b/arch/mips/dec/kn02-irq.c | |||
@@ -73,7 +73,7 @@ void __init init_kn02_irqs(int base) | |||
73 | iob(); | 73 | iob(); |
74 | 74 | ||
75 | for (i = base; i < base + KN02_IRQ_LINES; i++) | 75 | for (i = base; i < base + KN02_IRQ_LINES; i++) |
76 | set_irq_chip_and_handler(i, &kn02_irq_type, handle_level_irq); | 76 | irq_set_chip_and_handler(i, &kn02_irq_type, handle_level_irq); |
77 | 77 | ||
78 | kn02_irq_base = base; | 78 | kn02_irq_base = base; |
79 | } | 79 | } |
diff --git a/arch/mips/emma/markeins/irq.c b/arch/mips/emma/markeins/irq.c index 9b1207ae2256..3dbd7a5a6ad3 100644 --- a/arch/mips/emma/markeins/irq.c +++ b/arch/mips/emma/markeins/irq.c | |||
@@ -69,7 +69,7 @@ void emma2rh_irq_init(void) | |||
69 | u32 i; | 69 | u32 i; |
70 | 70 | ||
71 | for (i = 0; i < NUM_EMMA2RH_IRQ; i++) | 71 | for (i = 0; i < NUM_EMMA2RH_IRQ; i++) |
72 | set_irq_chip_and_handler_name(EMMA2RH_IRQ_BASE + i, | 72 | irq_set_chip_and_handler_name(EMMA2RH_IRQ_BASE + i, |
73 | &emma2rh_irq_controller, | 73 | &emma2rh_irq_controller, |
74 | handle_level_irq, "level"); | 74 | handle_level_irq, "level"); |
75 | } | 75 | } |
@@ -105,7 +105,7 @@ void emma2rh_sw_irq_init(void) | |||
105 | u32 i; | 105 | u32 i; |
106 | 106 | ||
107 | for (i = 0; i < NUM_EMMA2RH_IRQ_SW; i++) | 107 | for (i = 0; i < NUM_EMMA2RH_IRQ_SW; i++) |
108 | set_irq_chip_and_handler_name(EMMA2RH_SW_IRQ_BASE + i, | 108 | irq_set_chip_and_handler_name(EMMA2RH_SW_IRQ_BASE + i, |
109 | &emma2rh_sw_irq_controller, | 109 | &emma2rh_sw_irq_controller, |
110 | handle_level_irq, "level"); | 110 | handle_level_irq, "level"); |
111 | } | 111 | } |
@@ -162,7 +162,7 @@ void emma2rh_gpio_irq_init(void) | |||
162 | u32 i; | 162 | u32 i; |
163 | 163 | ||
164 | for (i = 0; i < NUM_EMMA2RH_IRQ_GPIO; i++) | 164 | for (i = 0; i < NUM_EMMA2RH_IRQ_GPIO; i++) |
165 | set_irq_chip_and_handler_name(EMMA2RH_GPIO_IRQ_BASE + i, | 165 | irq_set_chip_and_handler_name(EMMA2RH_GPIO_IRQ_BASE + i, |
166 | &emma2rh_gpio_irq_controller, | 166 | &emma2rh_gpio_irq_controller, |
167 | handle_edge_irq, "edge"); | 167 | handle_edge_irq, "edge"); |
168 | } | 168 | } |
diff --git a/arch/mips/include/asm/mach-cavium-octeon/irq.h b/arch/mips/include/asm/mach-cavium-octeon/irq.h index 6ddab8aef644..5b05f186e395 100644 --- a/arch/mips/include/asm/mach-cavium-octeon/irq.h +++ b/arch/mips/include/asm/mach-cavium-octeon/irq.h | |||
@@ -11,172 +11,91 @@ | |||
11 | #define NR_IRQS OCTEON_IRQ_LAST | 11 | #define NR_IRQS OCTEON_IRQ_LAST |
12 | #define MIPS_CPU_IRQ_BASE OCTEON_IRQ_SW0 | 12 | #define MIPS_CPU_IRQ_BASE OCTEON_IRQ_SW0 |
13 | 13 | ||
14 | /* 0 - 7 represent the i8259 master */ | 14 | enum octeon_irq { |
15 | #define OCTEON_IRQ_I8259M0 0 | 15 | /* 1 - 8 represent the 8 MIPS standard interrupt sources */ |
16 | #define OCTEON_IRQ_I8259M1 1 | 16 | OCTEON_IRQ_SW0 = 1, |
17 | #define OCTEON_IRQ_I8259M2 2 | 17 | OCTEON_IRQ_SW1, |
18 | #define OCTEON_IRQ_I8259M3 3 | 18 | /* CIU0, CUI2, CIU4 are 3, 4, 5 */ |
19 | #define OCTEON_IRQ_I8259M4 4 | 19 | OCTEON_IRQ_5 = 6, |
20 | #define OCTEON_IRQ_I8259M5 5 | 20 | OCTEON_IRQ_PERF, |
21 | #define OCTEON_IRQ_I8259M6 6 | 21 | OCTEON_IRQ_TIMER, |
22 | #define OCTEON_IRQ_I8259M7 7 | 22 | /* sources in CIU_INTX_EN0 */ |
23 | /* 8 - 15 represent the i8259 slave */ | 23 | OCTEON_IRQ_WORKQ0, |
24 | #define OCTEON_IRQ_I8259S0 8 | 24 | OCTEON_IRQ_GPIO0 = OCTEON_IRQ_WORKQ0 + 16, |
25 | #define OCTEON_IRQ_I8259S1 9 | 25 | OCTEON_IRQ_WDOG0 = OCTEON_IRQ_GPIO0 + 16, |
26 | #define OCTEON_IRQ_I8259S2 10 | 26 | OCTEON_IRQ_WDOG15 = OCTEON_IRQ_WDOG0 + 15, |
27 | #define OCTEON_IRQ_I8259S3 11 | 27 | OCTEON_IRQ_MBOX0 = OCTEON_IRQ_WDOG0 + 16, |
28 | #define OCTEON_IRQ_I8259S4 12 | 28 | OCTEON_IRQ_MBOX1, |
29 | #define OCTEON_IRQ_I8259S5 13 | 29 | OCTEON_IRQ_UART0, |
30 | #define OCTEON_IRQ_I8259S6 14 | 30 | OCTEON_IRQ_UART1, |
31 | #define OCTEON_IRQ_I8259S7 15 | 31 | OCTEON_IRQ_UART2, |
32 | /* 16 - 23 represent the 8 MIPS standard interrupt sources */ | 32 | OCTEON_IRQ_PCI_INT0, |
33 | #define OCTEON_IRQ_SW0 16 | 33 | OCTEON_IRQ_PCI_INT1, |
34 | #define OCTEON_IRQ_SW1 17 | 34 | OCTEON_IRQ_PCI_INT2, |
35 | #define OCTEON_IRQ_CIU0 18 | 35 | OCTEON_IRQ_PCI_INT3, |
36 | #define OCTEON_IRQ_CIU1 19 | 36 | OCTEON_IRQ_PCI_MSI0, |
37 | #define OCTEON_IRQ_CIU4 20 | 37 | OCTEON_IRQ_PCI_MSI1, |
38 | #define OCTEON_IRQ_5 21 | 38 | OCTEON_IRQ_PCI_MSI2, |
39 | #define OCTEON_IRQ_PERF 22 | 39 | OCTEON_IRQ_PCI_MSI3, |
40 | #define OCTEON_IRQ_TIMER 23 | 40 | |
41 | /* 24 - 87 represent the sources in CIU_INTX_EN0 */ | 41 | OCTEON_IRQ_TWSI, |
42 | #define OCTEON_IRQ_WORKQ0 24 | 42 | OCTEON_IRQ_TWSI2, |
43 | #define OCTEON_IRQ_WORKQ1 25 | 43 | OCTEON_IRQ_RML, |
44 | #define OCTEON_IRQ_WORKQ2 26 | 44 | OCTEON_IRQ_TRACE0, |
45 | #define OCTEON_IRQ_WORKQ3 27 | 45 | OCTEON_IRQ_GMX_DRP0 = OCTEON_IRQ_TRACE0 + 4, |
46 | #define OCTEON_IRQ_WORKQ4 28 | 46 | OCTEON_IRQ_IPD_DRP = OCTEON_IRQ_GMX_DRP0 + 5, |
47 | #define OCTEON_IRQ_WORKQ5 29 | 47 | OCTEON_IRQ_KEY_ZERO, |
48 | #define OCTEON_IRQ_WORKQ6 30 | 48 | OCTEON_IRQ_TIMER0, |
49 | #define OCTEON_IRQ_WORKQ7 31 | 49 | OCTEON_IRQ_TIMER1, |
50 | #define OCTEON_IRQ_WORKQ8 32 | 50 | OCTEON_IRQ_TIMER2, |
51 | #define OCTEON_IRQ_WORKQ9 33 | 51 | OCTEON_IRQ_TIMER3, |
52 | #define OCTEON_IRQ_WORKQ10 34 | 52 | OCTEON_IRQ_USB0, |
53 | #define OCTEON_IRQ_WORKQ11 35 | 53 | OCTEON_IRQ_USB1, |
54 | #define OCTEON_IRQ_WORKQ12 36 | 54 | OCTEON_IRQ_PCM, |
55 | #define OCTEON_IRQ_WORKQ13 37 | 55 | OCTEON_IRQ_MPI, |
56 | #define OCTEON_IRQ_WORKQ14 38 | 56 | OCTEON_IRQ_POWIQ, |
57 | #define OCTEON_IRQ_WORKQ15 39 | 57 | OCTEON_IRQ_IPDPPTHR, |
58 | #define OCTEON_IRQ_GPIO0 40 | 58 | OCTEON_IRQ_MII0, |
59 | #define OCTEON_IRQ_GPIO1 41 | 59 | OCTEON_IRQ_MII1, |
60 | #define OCTEON_IRQ_GPIO2 42 | 60 | OCTEON_IRQ_BOOTDMA, |
61 | #define OCTEON_IRQ_GPIO3 43 | 61 | |
62 | #define OCTEON_IRQ_GPIO4 44 | 62 | OCTEON_IRQ_NAND, |
63 | #define OCTEON_IRQ_GPIO5 45 | 63 | OCTEON_IRQ_MIO, /* Summary of MIO_BOOT_ERR */ |
64 | #define OCTEON_IRQ_GPIO6 46 | 64 | OCTEON_IRQ_IOB, /* Summary of IOB_INT_SUM */ |
65 | #define OCTEON_IRQ_GPIO7 47 | 65 | OCTEON_IRQ_FPA, /* Summary of FPA_INT_SUM */ |
66 | #define OCTEON_IRQ_GPIO8 48 | 66 | OCTEON_IRQ_POW, /* Summary of POW_ECC_ERR */ |
67 | #define OCTEON_IRQ_GPIO9 49 | 67 | OCTEON_IRQ_L2C, /* Summary of L2C_INT_STAT */ |
68 | #define OCTEON_IRQ_GPIO10 50 | 68 | OCTEON_IRQ_IPD, /* Summary of IPD_INT_SUM */ |
69 | #define OCTEON_IRQ_GPIO11 51 | 69 | OCTEON_IRQ_PIP, /* Summary of PIP_INT_REG */ |
70 | #define OCTEON_IRQ_GPIO12 52 | 70 | OCTEON_IRQ_PKO, /* Summary of PKO_REG_ERROR */ |
71 | #define OCTEON_IRQ_GPIO13 53 | 71 | OCTEON_IRQ_ZIP, /* Summary of ZIP_ERROR */ |
72 | #define OCTEON_IRQ_GPIO14 54 | 72 | OCTEON_IRQ_TIM, /* Summary of TIM_REG_ERROR */ |
73 | #define OCTEON_IRQ_GPIO15 55 | 73 | OCTEON_IRQ_RAD, /* Summary of RAD_REG_ERROR */ |
74 | #define OCTEON_IRQ_MBOX0 56 | 74 | OCTEON_IRQ_KEY, /* Summary of KEY_INT_SUM */ |
75 | #define OCTEON_IRQ_MBOX1 57 | 75 | OCTEON_IRQ_DFA, /* Summary of DFA */ |
76 | #define OCTEON_IRQ_UART0 58 | 76 | OCTEON_IRQ_USBCTL, /* Summary of USBN0_INT_SUM */ |
77 | #define OCTEON_IRQ_UART1 59 | 77 | OCTEON_IRQ_SLI, /* Summary of SLI_INT_SUM */ |
78 | #define OCTEON_IRQ_PCI_INT0 60 | 78 | OCTEON_IRQ_DPI, /* Summary of DPI_INT_SUM */ |
79 | #define OCTEON_IRQ_PCI_INT1 61 | 79 | OCTEON_IRQ_AGX0, /* Summary of GMX0*+PCS0_INT*_REG */ |
80 | #define OCTEON_IRQ_PCI_INT2 62 | 80 | OCTEON_IRQ_AGL = OCTEON_IRQ_AGX0 + 5, |
81 | #define OCTEON_IRQ_PCI_INT3 63 | 81 | OCTEON_IRQ_PTP, |
82 | #define OCTEON_IRQ_PCI_MSI0 64 | 82 | OCTEON_IRQ_PEM0, |
83 | #define OCTEON_IRQ_PCI_MSI1 65 | 83 | OCTEON_IRQ_PEM1, |
84 | #define OCTEON_IRQ_PCI_MSI2 66 | 84 | OCTEON_IRQ_SRIO0, |
85 | #define OCTEON_IRQ_PCI_MSI3 67 | 85 | OCTEON_IRQ_SRIO1, |
86 | #define OCTEON_IRQ_RESERVED68 68 /* Summary of CIU_INT_SUM1 */ | 86 | OCTEON_IRQ_LMC0, |
87 | #define OCTEON_IRQ_TWSI 69 | 87 | OCTEON_IRQ_DFM = OCTEON_IRQ_LMC0 + 4, /* Summary of DFM */ |
88 | #define OCTEON_IRQ_RML 70 | 88 | OCTEON_IRQ_RST, |
89 | #define OCTEON_IRQ_TRACE 71 | 89 | }; |
90 | #define OCTEON_IRQ_GMX_DRP0 72 | ||
91 | #define OCTEON_IRQ_GMX_DRP1 73 | ||
92 | #define OCTEON_IRQ_IPD_DRP 74 | ||
93 | #define OCTEON_IRQ_KEY_ZERO 75 | ||
94 | #define OCTEON_IRQ_TIMER0 76 | ||
95 | #define OCTEON_IRQ_TIMER1 77 | ||
96 | #define OCTEON_IRQ_TIMER2 78 | ||
97 | #define OCTEON_IRQ_TIMER3 79 | ||
98 | #define OCTEON_IRQ_USB0 80 | ||
99 | #define OCTEON_IRQ_PCM 81 | ||
100 | #define OCTEON_IRQ_MPI 82 | ||
101 | #define OCTEON_IRQ_TWSI2 83 | ||
102 | #define OCTEON_IRQ_POWIQ 84 | ||
103 | #define OCTEON_IRQ_IPDPPTHR 85 | ||
104 | #define OCTEON_IRQ_MII0 86 | ||
105 | #define OCTEON_IRQ_BOOTDMA 87 | ||
106 | /* 88 - 151 represent the sources in CIU_INTX_EN1 */ | ||
107 | #define OCTEON_IRQ_WDOG0 88 | ||
108 | #define OCTEON_IRQ_WDOG1 89 | ||
109 | #define OCTEON_IRQ_WDOG2 90 | ||
110 | #define OCTEON_IRQ_WDOG3 91 | ||
111 | #define OCTEON_IRQ_WDOG4 92 | ||
112 | #define OCTEON_IRQ_WDOG5 93 | ||
113 | #define OCTEON_IRQ_WDOG6 94 | ||
114 | #define OCTEON_IRQ_WDOG7 95 | ||
115 | #define OCTEON_IRQ_WDOG8 96 | ||
116 | #define OCTEON_IRQ_WDOG9 97 | ||
117 | #define OCTEON_IRQ_WDOG10 98 | ||
118 | #define OCTEON_IRQ_WDOG11 99 | ||
119 | #define OCTEON_IRQ_WDOG12 100 | ||
120 | #define OCTEON_IRQ_WDOG13 101 | ||
121 | #define OCTEON_IRQ_WDOG14 102 | ||
122 | #define OCTEON_IRQ_WDOG15 103 | ||
123 | #define OCTEON_IRQ_UART2 104 | ||
124 | #define OCTEON_IRQ_USB1 105 | ||
125 | #define OCTEON_IRQ_MII1 106 | ||
126 | #define OCTEON_IRQ_RESERVED107 107 | ||
127 | #define OCTEON_IRQ_RESERVED108 108 | ||
128 | #define OCTEON_IRQ_RESERVED109 109 | ||
129 | #define OCTEON_IRQ_RESERVED110 110 | ||
130 | #define OCTEON_IRQ_RESERVED111 111 | ||
131 | #define OCTEON_IRQ_RESERVED112 112 | ||
132 | #define OCTEON_IRQ_RESERVED113 113 | ||
133 | #define OCTEON_IRQ_RESERVED114 114 | ||
134 | #define OCTEON_IRQ_RESERVED115 115 | ||
135 | #define OCTEON_IRQ_RESERVED116 116 | ||
136 | #define OCTEON_IRQ_RESERVED117 117 | ||
137 | #define OCTEON_IRQ_RESERVED118 118 | ||
138 | #define OCTEON_IRQ_RESERVED119 119 | ||
139 | #define OCTEON_IRQ_RESERVED120 120 | ||
140 | #define OCTEON_IRQ_RESERVED121 121 | ||
141 | #define OCTEON_IRQ_RESERVED122 122 | ||
142 | #define OCTEON_IRQ_RESERVED123 123 | ||
143 | #define OCTEON_IRQ_RESERVED124 124 | ||
144 | #define OCTEON_IRQ_RESERVED125 125 | ||
145 | #define OCTEON_IRQ_RESERVED126 126 | ||
146 | #define OCTEON_IRQ_RESERVED127 127 | ||
147 | #define OCTEON_IRQ_RESERVED128 128 | ||
148 | #define OCTEON_IRQ_RESERVED129 129 | ||
149 | #define OCTEON_IRQ_RESERVED130 130 | ||
150 | #define OCTEON_IRQ_RESERVED131 131 | ||
151 | #define OCTEON_IRQ_RESERVED132 132 | ||
152 | #define OCTEON_IRQ_RESERVED133 133 | ||
153 | #define OCTEON_IRQ_RESERVED134 134 | ||
154 | #define OCTEON_IRQ_RESERVED135 135 | ||
155 | #define OCTEON_IRQ_RESERVED136 136 | ||
156 | #define OCTEON_IRQ_RESERVED137 137 | ||
157 | #define OCTEON_IRQ_RESERVED138 138 | ||
158 | #define OCTEON_IRQ_RESERVED139 139 | ||
159 | #define OCTEON_IRQ_RESERVED140 140 | ||
160 | #define OCTEON_IRQ_RESERVED141 141 | ||
161 | #define OCTEON_IRQ_RESERVED142 142 | ||
162 | #define OCTEON_IRQ_RESERVED143 143 | ||
163 | #define OCTEON_IRQ_RESERVED144 144 | ||
164 | #define OCTEON_IRQ_RESERVED145 145 | ||
165 | #define OCTEON_IRQ_RESERVED146 146 | ||
166 | #define OCTEON_IRQ_RESERVED147 147 | ||
167 | #define OCTEON_IRQ_RESERVED148 148 | ||
168 | #define OCTEON_IRQ_RESERVED149 149 | ||
169 | #define OCTEON_IRQ_RESERVED150 150 | ||
170 | #define OCTEON_IRQ_RESERVED151 151 | ||
171 | 90 | ||
172 | #ifdef CONFIG_PCI_MSI | 91 | #ifdef CONFIG_PCI_MSI |
173 | /* 152 - 215 represent the MSI interrupts 0-63 */ | 92 | /* 152 - 407 represent the MSI interrupts 0-255 */ |
174 | #define OCTEON_IRQ_MSI_BIT0 152 | 93 | #define OCTEON_IRQ_MSI_BIT0 (OCTEON_IRQ_RST + 1) |
175 | #define OCTEON_IRQ_MSI_LAST (OCTEON_IRQ_MSI_BIT0 + 255) | ||
176 | 94 | ||
177 | #define OCTEON_IRQ_LAST (OCTEON_IRQ_MSI_LAST + 1) | 95 | #define OCTEON_IRQ_MSI_LAST (OCTEON_IRQ_MSI_BIT0 + 255) |
96 | #define OCTEON_IRQ_LAST (OCTEON_IRQ_MSI_LAST + 1) | ||
178 | #else | 97 | #else |
179 | #define OCTEON_IRQ_LAST 152 | 98 | #define OCTEON_IRQ_LAST (OCTEON_IRQ_RST + 1) |
180 | #endif | 99 | #endif |
181 | 100 | ||
182 | #endif | 101 | #endif |
diff --git a/arch/mips/include/asm/octeon/octeon.h b/arch/mips/include/asm/octeon/octeon.h index 6b34afd0d4e7..f72f768cd3a4 100644 --- a/arch/mips/include/asm/octeon/octeon.h +++ b/arch/mips/include/asm/octeon/octeon.h | |||
@@ -257,4 +257,6 @@ extern struct cvmx_bootinfo *octeon_bootinfo; | |||
257 | 257 | ||
258 | extern uint64_t octeon_bootloader_entry_addr; | 258 | extern uint64_t octeon_bootloader_entry_addr; |
259 | 259 | ||
260 | extern void (*octeon_irq_setup_secondary)(void); | ||
261 | |||
260 | #endif /* __ASM_OCTEON_OCTEON_H */ | 262 | #endif /* __ASM_OCTEON_OCTEON_H */ |
diff --git a/arch/mips/include/asm/unistd.h b/arch/mips/include/asm/unistd.h index dae22c1d2c82..fa2e37ea2be1 100644 --- a/arch/mips/include/asm/unistd.h +++ b/arch/mips/include/asm/unistd.h | |||
@@ -1005,7 +1005,7 @@ | |||
1005 | #define __NR_name_to_handle_at (__NR_Linux + 303) | 1005 | #define __NR_name_to_handle_at (__NR_Linux + 303) |
1006 | #define __NR_open_by_handle_at (__NR_Linux + 304) | 1006 | #define __NR_open_by_handle_at (__NR_Linux + 304) |
1007 | #define __NR_clock_adjtime (__NR_Linux + 305) | 1007 | #define __NR_clock_adjtime (__NR_Linux + 305) |
1008 | #define __NR_clock_adjtime (__NR_Linux + 306) | 1008 | #define __NR_syncfs (__NR_Linux + 306) |
1009 | 1009 | ||
1010 | /* | 1010 | /* |
1011 | * Offset of the last N32 flavoured syscall | 1011 | * Offset of the last N32 flavoured syscall |
diff --git a/arch/mips/jazz/irq.c b/arch/mips/jazz/irq.c index 40f7c6b1e260..260df4750949 100644 --- a/arch/mips/jazz/irq.c +++ b/arch/mips/jazz/irq.c | |||
@@ -56,7 +56,7 @@ void __init init_r4030_ints(void) | |||
56 | int i; | 56 | int i; |
57 | 57 | ||
58 | for (i = JAZZ_IRQ_START; i <= JAZZ_IRQ_END; i++) | 58 | for (i = JAZZ_IRQ_START; i <= JAZZ_IRQ_END; i++) |
59 | set_irq_chip_and_handler(i, &r4030_irq_type, handle_level_irq); | 59 | irq_set_chip_and_handler(i, &r4030_irq_type, handle_level_irq); |
60 | 60 | ||
61 | r4030_write_reg16(JAZZ_IO_IRQ_ENABLE, 0); | 61 | r4030_write_reg16(JAZZ_IO_IRQ_ENABLE, 0); |
62 | r4030_read_reg16(JAZZ_IO_IRQ_SOURCE); /* clear pending IRQs */ | 62 | r4030_read_reg16(JAZZ_IO_IRQ_SOURCE); /* clear pending IRQs */ |
diff --git a/arch/mips/jz4740/gpio.c b/arch/mips/jz4740/gpio.c index bd2fc29b95e0..73031f7fc827 100644 --- a/arch/mips/jz4740/gpio.c +++ b/arch/mips/jz4740/gpio.c | |||
@@ -306,7 +306,7 @@ static void jz_gpio_irq_demux_handler(unsigned int irq, struct irq_desc *desc) | |||
306 | uint32_t flag; | 306 | uint32_t flag; |
307 | unsigned int gpio_irq; | 307 | unsigned int gpio_irq; |
308 | unsigned int gpio_bank; | 308 | unsigned int gpio_bank; |
309 | struct jz_gpio_chip *chip = get_irq_desc_data(desc); | 309 | struct jz_gpio_chip *chip = irq_desc_get_handler_data(desc); |
310 | 310 | ||
311 | gpio_bank = JZ4740_IRQ_GPIO0 - irq; | 311 | gpio_bank = JZ4740_IRQ_GPIO0 - irq; |
312 | 312 | ||
@@ -416,7 +416,7 @@ static int jz_gpio_irq_set_wake(struct irq_data *data, unsigned int on) | |||
416 | chip->wakeup &= ~IRQ_TO_BIT(data->irq); | 416 | chip->wakeup &= ~IRQ_TO_BIT(data->irq); |
417 | spin_unlock(&chip->lock); | 417 | spin_unlock(&chip->lock); |
418 | 418 | ||
419 | set_irq_wake(chip->irq, on); | 419 | irq_set_irq_wake(chip->irq, on); |
420 | return 0; | 420 | return 0; |
421 | } | 421 | } |
422 | 422 | ||
@@ -510,14 +510,14 @@ static int jz4740_gpio_chip_init(struct jz_gpio_chip *chip, unsigned int id) | |||
510 | gpiochip_add(&chip->gpio_chip); | 510 | gpiochip_add(&chip->gpio_chip); |
511 | 511 | ||
512 | chip->irq = JZ4740_IRQ_INTC_GPIO(id); | 512 | chip->irq = JZ4740_IRQ_INTC_GPIO(id); |
513 | set_irq_data(chip->irq, chip); | 513 | irq_set_handler_data(chip->irq, chip); |
514 | set_irq_chained_handler(chip->irq, jz_gpio_irq_demux_handler); | 514 | irq_set_chained_handler(chip->irq, jz_gpio_irq_demux_handler); |
515 | 515 | ||
516 | for (irq = chip->irq_base; irq < chip->irq_base + chip->gpio_chip.ngpio; ++irq) { | 516 | for (irq = chip->irq_base; irq < chip->irq_base + chip->gpio_chip.ngpio; ++irq) { |
517 | irq_set_lockdep_class(irq, &gpio_lock_class); | 517 | irq_set_lockdep_class(irq, &gpio_lock_class); |
518 | set_irq_chip_data(irq, chip); | 518 | irq_set_chip_data(irq, chip); |
519 | set_irq_chip_and_handler(irq, &jz_gpio_irq_chip, | 519 | irq_set_chip_and_handler(irq, &jz_gpio_irq_chip, |
520 | handle_level_irq); | 520 | handle_level_irq); |
521 | } | 521 | } |
522 | 522 | ||
523 | return 0; | 523 | return 0; |
diff --git a/arch/mips/jz4740/irq.c b/arch/mips/jz4740/irq.c index dcc5593a9389..d82c0c430e03 100644 --- a/arch/mips/jz4740/irq.c +++ b/arch/mips/jz4740/irq.c | |||
@@ -104,8 +104,8 @@ void __init arch_init_irq(void) | |||
104 | writel(0xffffffff, jz_intc_base + JZ_REG_INTC_SET_MASK); | 104 | writel(0xffffffff, jz_intc_base + JZ_REG_INTC_SET_MASK); |
105 | 105 | ||
106 | for (i = JZ4740_IRQ_BASE; i < JZ4740_IRQ_BASE + 32; i++) { | 106 | for (i = JZ4740_IRQ_BASE; i < JZ4740_IRQ_BASE + 32; i++) { |
107 | set_irq_chip_data(i, (void *)IRQ_BIT(i)); | 107 | irq_set_chip_data(i, (void *)IRQ_BIT(i)); |
108 | set_irq_chip_and_handler(i, &intc_irq_type, handle_level_irq); | 108 | irq_set_chip_and_handler(i, &intc_irq_type, handle_level_irq); |
109 | } | 109 | } |
110 | 110 | ||
111 | setup_irq(2, &jz4740_cascade_action); | 111 | setup_irq(2, &jz4740_cascade_action); |
diff --git a/arch/mips/kernel/i8259.c b/arch/mips/kernel/i8259.c index e221662bb80c..c018696765d4 100644 --- a/arch/mips/kernel/i8259.c +++ b/arch/mips/kernel/i8259.c | |||
@@ -110,7 +110,7 @@ int i8259A_irq_pending(unsigned int irq) | |||
110 | void make_8259A_irq(unsigned int irq) | 110 | void make_8259A_irq(unsigned int irq) |
111 | { | 111 | { |
112 | disable_irq_nosync(irq); | 112 | disable_irq_nosync(irq); |
113 | set_irq_chip_and_handler(irq, &i8259A_chip, handle_level_irq); | 113 | irq_set_chip_and_handler(irq, &i8259A_chip, handle_level_irq); |
114 | enable_irq(irq); | 114 | enable_irq(irq); |
115 | } | 115 | } |
116 | 116 | ||
@@ -336,8 +336,8 @@ void __init init_i8259_irqs(void) | |||
336 | init_8259A(0); | 336 | init_8259A(0); |
337 | 337 | ||
338 | for (i = I8259A_IRQ_BASE; i < I8259A_IRQ_BASE + 16; i++) { | 338 | for (i = I8259A_IRQ_BASE; i < I8259A_IRQ_BASE + 16; i++) { |
339 | set_irq_chip_and_handler(i, &i8259A_chip, handle_level_irq); | 339 | irq_set_chip_and_handler(i, &i8259A_chip, handle_level_irq); |
340 | set_irq_probe(i); | 340 | irq_set_probe(i); |
341 | } | 341 | } |
342 | 342 | ||
343 | setup_irq(I8259A_IRQ_BASE + PIC_CASCADE_IR, &irq2); | 343 | setup_irq(I8259A_IRQ_BASE + PIC_CASCADE_IR, &irq2); |
diff --git a/arch/mips/kernel/irq-gic.c b/arch/mips/kernel/irq-gic.c index 43cd9628251a..0c527f652196 100644 --- a/arch/mips/kernel/irq-gic.c +++ b/arch/mips/kernel/irq-gic.c | |||
@@ -229,7 +229,7 @@ static void __init gic_basic_init(int numintrs, int numvpes, | |||
229 | vpe_local_setup(numvpes); | 229 | vpe_local_setup(numvpes); |
230 | 230 | ||
231 | for (i = _irqbase; i < (_irqbase + numintrs); i++) | 231 | for (i = _irqbase; i < (_irqbase + numintrs); i++) |
232 | set_irq_chip(i, &gic_irq_controller); | 232 | irq_set_chip(i, &gic_irq_controller); |
233 | } | 233 | } |
234 | 234 | ||
235 | void __init gic_init(unsigned long gic_base_addr, | 235 | void __init gic_init(unsigned long gic_base_addr, |
diff --git a/arch/mips/kernel/irq-gt641xx.c b/arch/mips/kernel/irq-gt641xx.c index 7fd176fa367a..883fc6cead36 100644 --- a/arch/mips/kernel/irq-gt641xx.c +++ b/arch/mips/kernel/irq-gt641xx.c | |||
@@ -126,6 +126,6 @@ void __init gt641xx_irq_init(void) | |||
126 | * bit31: logical or of bits[25:1]. | 126 | * bit31: logical or of bits[25:1]. |
127 | */ | 127 | */ |
128 | for (i = 1; i < 30; i++) | 128 | for (i = 1; i < 30; i++) |
129 | set_irq_chip_and_handler(GT641XX_IRQ_BASE + i, | 129 | irq_set_chip_and_handler(GT641XX_IRQ_BASE + i, |
130 | >641xx_irq_chip, handle_level_irq); | 130 | >641xx_irq_chip, handle_level_irq); |
131 | } | 131 | } |
diff --git a/arch/mips/kernel/irq-msc01.c b/arch/mips/kernel/irq-msc01.c index fc800cd9947e..0c6afeed89d2 100644 --- a/arch/mips/kernel/irq-msc01.c +++ b/arch/mips/kernel/irq-msc01.c | |||
@@ -137,16 +137,20 @@ void __init init_msc_irqs(unsigned long icubase, unsigned int irqbase, msc_irqma | |||
137 | 137 | ||
138 | switch (imp->im_type) { | 138 | switch (imp->im_type) { |
139 | case MSC01_IRQ_EDGE: | 139 | case MSC01_IRQ_EDGE: |
140 | set_irq_chip_and_handler_name(irqbase + n, | 140 | irq_set_chip_and_handler_name(irqbase + n, |
141 | &msc_edgeirq_type, handle_edge_irq, "edge"); | 141 | &msc_edgeirq_type, |
142 | handle_edge_irq, | ||
143 | "edge"); | ||
142 | if (cpu_has_veic) | 144 | if (cpu_has_veic) |
143 | MSCIC_WRITE(MSC01_IC_SUP+n*8, MSC01_IC_SUP_EDGE_BIT); | 145 | MSCIC_WRITE(MSC01_IC_SUP+n*8, MSC01_IC_SUP_EDGE_BIT); |
144 | else | 146 | else |
145 | MSCIC_WRITE(MSC01_IC_SUP+n*8, MSC01_IC_SUP_EDGE_BIT | imp->im_lvl); | 147 | MSCIC_WRITE(MSC01_IC_SUP+n*8, MSC01_IC_SUP_EDGE_BIT | imp->im_lvl); |
146 | break; | 148 | break; |
147 | case MSC01_IRQ_LEVEL: | 149 | case MSC01_IRQ_LEVEL: |
148 | set_irq_chip_and_handler_name(irqbase+n, | 150 | irq_set_chip_and_handler_name(irqbase + n, |
149 | &msc_levelirq_type, handle_level_irq, "level"); | 151 | &msc_levelirq_type, |
152 | handle_level_irq, | ||
153 | "level"); | ||
150 | if (cpu_has_veic) | 154 | if (cpu_has_veic) |
151 | MSCIC_WRITE(MSC01_IC_SUP+n*8, 0); | 155 | MSCIC_WRITE(MSC01_IC_SUP+n*8, 0); |
152 | else | 156 | else |
diff --git a/arch/mips/kernel/irq-rm7000.c b/arch/mips/kernel/irq-rm7000.c index fd24fd98b041..a8a8977d5887 100644 --- a/arch/mips/kernel/irq-rm7000.c +++ b/arch/mips/kernel/irq-rm7000.c | |||
@@ -45,6 +45,6 @@ void __init rm7k_cpu_irq_init(void) | |||
45 | clear_c0_intcontrol(0x00000f00); /* Mask all */ | 45 | clear_c0_intcontrol(0x00000f00); /* Mask all */ |
46 | 46 | ||
47 | for (i = base; i < base + 4; i++) | 47 | for (i = base; i < base + 4; i++) |
48 | set_irq_chip_and_handler(i, &rm7k_irq_controller, | 48 | irq_set_chip_and_handler(i, &rm7k_irq_controller, |
49 | handle_percpu_irq); | 49 | handle_percpu_irq); |
50 | } | 50 | } |
diff --git a/arch/mips/kernel/irq-rm9000.c b/arch/mips/kernel/irq-rm9000.c index ca463ec9bad5..38874a4b9255 100644 --- a/arch/mips/kernel/irq-rm9000.c +++ b/arch/mips/kernel/irq-rm9000.c | |||
@@ -98,10 +98,10 @@ void __init rm9k_cpu_irq_init(void) | |||
98 | clear_c0_intcontrol(0x0000f000); /* Mask all */ | 98 | clear_c0_intcontrol(0x0000f000); /* Mask all */ |
99 | 99 | ||
100 | for (i = base; i < base + 4; i++) | 100 | for (i = base; i < base + 4; i++) |
101 | set_irq_chip_and_handler(i, &rm9k_irq_controller, | 101 | irq_set_chip_and_handler(i, &rm9k_irq_controller, |
102 | handle_level_irq); | 102 | handle_level_irq); |
103 | 103 | ||
104 | rm9000_perfcount_irq = base + 1; | 104 | rm9000_perfcount_irq = base + 1; |
105 | set_irq_chip_and_handler(rm9000_perfcount_irq, &rm9k_perfcounter_irq, | 105 | irq_set_chip_and_handler(rm9000_perfcount_irq, &rm9k_perfcounter_irq, |
106 | handle_percpu_irq); | 106 | handle_percpu_irq); |
107 | } | 107 | } |
diff --git a/arch/mips/kernel/irq.c b/arch/mips/kernel/irq.c index 1b68ebe1b458..9b734d74ae8e 100644 --- a/arch/mips/kernel/irq.c +++ b/arch/mips/kernel/irq.c | |||
@@ -102,7 +102,7 @@ void __init init_IRQ(void) | |||
102 | #endif | 102 | #endif |
103 | 103 | ||
104 | for (i = 0; i < NR_IRQS; i++) | 104 | for (i = 0; i < NR_IRQS; i++) |
105 | set_irq_noprobe(i); | 105 | irq_set_noprobe(i); |
106 | 106 | ||
107 | arch_init_irq(); | 107 | arch_init_irq(); |
108 | 108 | ||
diff --git a/arch/mips/kernel/irq_cpu.c b/arch/mips/kernel/irq_cpu.c index fd945c56bc33..6e71b284f6c9 100644 --- a/arch/mips/kernel/irq_cpu.c +++ b/arch/mips/kernel/irq_cpu.c | |||
@@ -109,10 +109,10 @@ void __init mips_cpu_irq_init(void) | |||
109 | */ | 109 | */ |
110 | if (cpu_has_mipsmt) | 110 | if (cpu_has_mipsmt) |
111 | for (i = irq_base; i < irq_base + 2; i++) | 111 | for (i = irq_base; i < irq_base + 2; i++) |
112 | set_irq_chip_and_handler(i, &mips_mt_cpu_irq_controller, | 112 | irq_set_chip_and_handler(i, &mips_mt_cpu_irq_controller, |
113 | handle_percpu_irq); | 113 | handle_percpu_irq); |
114 | 114 | ||
115 | for (i = irq_base + 2; i < irq_base + 8; i++) | 115 | for (i = irq_base + 2; i < irq_base + 8; i++) |
116 | set_irq_chip_and_handler(i, &mips_cpu_irq_controller, | 116 | irq_set_chip_and_handler(i, &mips_cpu_irq_controller, |
117 | handle_percpu_irq); | 117 | handle_percpu_irq); |
118 | } | 118 | } |
diff --git a/arch/mips/kernel/irq_txx9.c b/arch/mips/kernel/irq_txx9.c index 526e1581549a..b0c55b50218e 100644 --- a/arch/mips/kernel/irq_txx9.c +++ b/arch/mips/kernel/irq_txx9.c | |||
@@ -154,8 +154,8 @@ void __init txx9_irq_init(unsigned long baseaddr) | |||
154 | for (i = 0; i < TXx9_MAX_IR; i++) { | 154 | for (i = 0; i < TXx9_MAX_IR; i++) { |
155 | txx9irq[i].level = 4; /* middle level */ | 155 | txx9irq[i].level = 4; /* middle level */ |
156 | txx9irq[i].mode = TXx9_IRCR_LOW; | 156 | txx9irq[i].mode = TXx9_IRCR_LOW; |
157 | set_irq_chip_and_handler(TXX9_IRQ_BASE + i, | 157 | irq_set_chip_and_handler(TXX9_IRQ_BASE + i, &txx9_irq_chip, |
158 | &txx9_irq_chip, handle_level_irq); | 158 | handle_level_irq); |
159 | } | 159 | } |
160 | 160 | ||
161 | /* mask all IRC interrupts */ | 161 | /* mask all IRC interrupts */ |
diff --git a/arch/mips/kernel/smtc.c b/arch/mips/kernel/smtc.c index f7e2c7807d7b..5a88cc4ccd5a 100644 --- a/arch/mips/kernel/smtc.c +++ b/arch/mips/kernel/smtc.c | |||
@@ -1146,7 +1146,7 @@ static void setup_cross_vpe_interrupts(unsigned int nvpe) | |||
1146 | 1146 | ||
1147 | setup_irq_smtc(cpu_ipi_irq, &irq_ipi, (0x100 << MIPS_CPU_IPI_IRQ)); | 1147 | setup_irq_smtc(cpu_ipi_irq, &irq_ipi, (0x100 << MIPS_CPU_IPI_IRQ)); |
1148 | 1148 | ||
1149 | set_irq_handler(cpu_ipi_irq, handle_percpu_irq); | 1149 | irq_set_handler(cpu_ipi_irq, handle_percpu_irq); |
1150 | } | 1150 | } |
1151 | 1151 | ||
1152 | /* | 1152 | /* |
diff --git a/arch/mips/lasat/interrupt.c b/arch/mips/lasat/interrupt.c index 670e3e70d198..de4c165515d7 100644 --- a/arch/mips/lasat/interrupt.c +++ b/arch/mips/lasat/interrupt.c | |||
@@ -128,7 +128,7 @@ void __init arch_init_irq(void) | |||
128 | mips_cpu_irq_init(); | 128 | mips_cpu_irq_init(); |
129 | 129 | ||
130 | for (i = LASAT_IRQ_BASE; i <= LASAT_IRQ_END; i++) | 130 | for (i = LASAT_IRQ_BASE; i <= LASAT_IRQ_END; i++) |
131 | set_irq_chip_and_handler(i, &lasat_irq_type, handle_level_irq); | 131 | irq_set_chip_and_handler(i, &lasat_irq_type, handle_level_irq); |
132 | 132 | ||
133 | setup_irq(LASAT_CASCADE_IRQ, &cascade); | 133 | setup_irq(LASAT_CASCADE_IRQ, &cascade); |
134 | } | 134 | } |
diff --git a/arch/mips/loongson/common/bonito-irq.c b/arch/mips/loongson/common/bonito-irq.c index 1549361696ad..f27d7ccca92a 100644 --- a/arch/mips/loongson/common/bonito-irq.c +++ b/arch/mips/loongson/common/bonito-irq.c | |||
@@ -44,7 +44,8 @@ void bonito_irq_init(void) | |||
44 | u32 i; | 44 | u32 i; |
45 | 45 | ||
46 | for (i = LOONGSON_IRQ_BASE; i < LOONGSON_IRQ_BASE + 32; i++) | 46 | for (i = LOONGSON_IRQ_BASE; i < LOONGSON_IRQ_BASE + 32; i++) |
47 | set_irq_chip_and_handler(i, &bonito_irq_type, handle_level_irq); | 47 | irq_set_chip_and_handler(i, &bonito_irq_type, |
48 | handle_level_irq); | ||
48 | 49 | ||
49 | #ifdef CONFIG_CPU_LOONGSON2E | 50 | #ifdef CONFIG_CPU_LOONGSON2E |
50 | setup_irq(LOONGSON_IRQ_BASE + 10, &dma_timeout_irqaction); | 51 | setup_irq(LOONGSON_IRQ_BASE + 10, &dma_timeout_irqaction); |
diff --git a/arch/mips/mti-malta/malta-int.c b/arch/mips/mti-malta/malta-int.c index b79b24afe3a2..9027061f0ead 100644 --- a/arch/mips/mti-malta/malta-int.c +++ b/arch/mips/mti-malta/malta-int.c | |||
@@ -472,7 +472,7 @@ static void __init fill_ipi_map(void) | |||
472 | void __init arch_init_ipiirq(int irq, struct irqaction *action) | 472 | void __init arch_init_ipiirq(int irq, struct irqaction *action) |
473 | { | 473 | { |
474 | setup_irq(irq, action); | 474 | setup_irq(irq, action); |
475 | set_irq_handler(irq, handle_percpu_irq); | 475 | irq_set_handler(irq, handle_percpu_irq); |
476 | } | 476 | } |
477 | 477 | ||
478 | void __init arch_init_irq(void) | 478 | void __init arch_init_irq(void) |
diff --git a/arch/mips/mti-malta/malta-time.c b/arch/mips/mti-malta/malta-time.c index 3c6f190aa61c..1620b83cd13e 100644 --- a/arch/mips/mti-malta/malta-time.c +++ b/arch/mips/mti-malta/malta-time.c | |||
@@ -119,7 +119,7 @@ static void __init plat_perf_setup(void) | |||
119 | set_vi_handler(cp0_perfcount_irq, mips_perf_dispatch); | 119 | set_vi_handler(cp0_perfcount_irq, mips_perf_dispatch); |
120 | mips_cpu_perf_irq = MIPS_CPU_IRQ_BASE + cp0_perfcount_irq; | 120 | mips_cpu_perf_irq = MIPS_CPU_IRQ_BASE + cp0_perfcount_irq; |
121 | #ifdef CONFIG_SMP | 121 | #ifdef CONFIG_SMP |
122 | set_irq_handler(mips_cpu_perf_irq, handle_percpu_irq); | 122 | irq_set_handler(mips_cpu_perf_irq, handle_percpu_irq); |
123 | #endif | 123 | #endif |
124 | } | 124 | } |
125 | } | 125 | } |
diff --git a/arch/mips/pci/msi-octeon.c b/arch/mips/pci/msi-octeon.c index d8080499872a..5d530f89d872 100644 --- a/arch/mips/pci/msi-octeon.c +++ b/arch/mips/pci/msi-octeon.c | |||
@@ -172,7 +172,7 @@ msi_irq_allocated: | |||
172 | pci_write_config_word(dev, desc->msi_attrib.pos + PCI_MSI_FLAGS, | 172 | pci_write_config_word(dev, desc->msi_attrib.pos + PCI_MSI_FLAGS, |
173 | control); | 173 | control); |
174 | 174 | ||
175 | set_irq_msi(irq, desc); | 175 | irq_set_msi_desc(irq, desc); |
176 | write_msi_msg(irq, &msg); | 176 | write_msi_msg(irq, &msg); |
177 | return 0; | 177 | return 0; |
178 | } | 178 | } |
@@ -259,11 +259,11 @@ static DEFINE_RAW_SPINLOCK(octeon_irq_msi_lock); | |||
259 | static u64 msi_rcv_reg[4]; | 259 | static u64 msi_rcv_reg[4]; |
260 | static u64 mis_ena_reg[4]; | 260 | static u64 mis_ena_reg[4]; |
261 | 261 | ||
262 | static void octeon_irq_msi_enable_pcie(unsigned int irq) | 262 | static void octeon_irq_msi_enable_pcie(struct irq_data *data) |
263 | { | 263 | { |
264 | u64 en; | 264 | u64 en; |
265 | unsigned long flags; | 265 | unsigned long flags; |
266 | int msi_number = irq - OCTEON_IRQ_MSI_BIT0; | 266 | int msi_number = data->irq - OCTEON_IRQ_MSI_BIT0; |
267 | int irq_index = msi_number >> 6; | 267 | int irq_index = msi_number >> 6; |
268 | int irq_bit = msi_number & 0x3f; | 268 | int irq_bit = msi_number & 0x3f; |
269 | 269 | ||
@@ -275,11 +275,11 @@ static void octeon_irq_msi_enable_pcie(unsigned int irq) | |||
275 | raw_spin_unlock_irqrestore(&octeon_irq_msi_lock, flags); | 275 | raw_spin_unlock_irqrestore(&octeon_irq_msi_lock, flags); |
276 | } | 276 | } |
277 | 277 | ||
278 | static void octeon_irq_msi_disable_pcie(unsigned int irq) | 278 | static void octeon_irq_msi_disable_pcie(struct irq_data *data) |
279 | { | 279 | { |
280 | u64 en; | 280 | u64 en; |
281 | unsigned long flags; | 281 | unsigned long flags; |
282 | int msi_number = irq - OCTEON_IRQ_MSI_BIT0; | 282 | int msi_number = data->irq - OCTEON_IRQ_MSI_BIT0; |
283 | int irq_index = msi_number >> 6; | 283 | int irq_index = msi_number >> 6; |
284 | int irq_bit = msi_number & 0x3f; | 284 | int irq_bit = msi_number & 0x3f; |
285 | 285 | ||
@@ -293,11 +293,11 @@ static void octeon_irq_msi_disable_pcie(unsigned int irq) | |||
293 | 293 | ||
294 | static struct irq_chip octeon_irq_chip_msi_pcie = { | 294 | static struct irq_chip octeon_irq_chip_msi_pcie = { |
295 | .name = "MSI", | 295 | .name = "MSI", |
296 | .enable = octeon_irq_msi_enable_pcie, | 296 | .irq_enable = octeon_irq_msi_enable_pcie, |
297 | .disable = octeon_irq_msi_disable_pcie, | 297 | .irq_disable = octeon_irq_msi_disable_pcie, |
298 | }; | 298 | }; |
299 | 299 | ||
300 | static void octeon_irq_msi_enable_pci(unsigned int irq) | 300 | static void octeon_irq_msi_enable_pci(struct irq_data *data) |
301 | { | 301 | { |
302 | /* | 302 | /* |
303 | * Octeon PCI doesn't have the ability to mask/unmask MSI | 303 | * Octeon PCI doesn't have the ability to mask/unmask MSI |
@@ -308,15 +308,15 @@ static void octeon_irq_msi_enable_pci(unsigned int irq) | |||
308 | */ | 308 | */ |
309 | } | 309 | } |
310 | 310 | ||
311 | static void octeon_irq_msi_disable_pci(unsigned int irq) | 311 | static void octeon_irq_msi_disable_pci(struct irq_data *data) |
312 | { | 312 | { |
313 | /* See comment in enable */ | 313 | /* See comment in enable */ |
314 | } | 314 | } |
315 | 315 | ||
316 | static struct irq_chip octeon_irq_chip_msi_pci = { | 316 | static struct irq_chip octeon_irq_chip_msi_pci = { |
317 | .name = "MSI", | 317 | .name = "MSI", |
318 | .enable = octeon_irq_msi_enable_pci, | 318 | .irq_enable = octeon_irq_msi_enable_pci, |
319 | .disable = octeon_irq_msi_disable_pci, | 319 | .irq_disable = octeon_irq_msi_disable_pci, |
320 | }; | 320 | }; |
321 | 321 | ||
322 | /* | 322 | /* |
@@ -388,7 +388,7 @@ int __init octeon_msi_initialize(void) | |||
388 | } | 388 | } |
389 | 389 | ||
390 | for (irq = OCTEON_IRQ_MSI_BIT0; irq <= OCTEON_IRQ_MSI_LAST; irq++) | 390 | for (irq = OCTEON_IRQ_MSI_BIT0; irq <= OCTEON_IRQ_MSI_LAST; irq++) |
391 | set_irq_chip_and_handler(irq, msi, handle_simple_irq); | 391 | irq_set_chip_and_handler(irq, msi, handle_simple_irq); |
392 | 392 | ||
393 | if (octeon_has_feature(OCTEON_FEATURE_PCIE)) { | 393 | if (octeon_has_feature(OCTEON_FEATURE_PCIE)) { |
394 | if (request_irq(OCTEON_IRQ_PCI_MSI0, octeon_msi_interrupt0, | 394 | if (request_irq(OCTEON_IRQ_PCI_MSI0, octeon_msi_interrupt0, |
diff --git a/arch/mips/pmc-sierra/msp71xx/msp_irq_cic.c b/arch/mips/pmc-sierra/msp71xx/msp_irq_cic.c index 352f29d9226f..c4fa2d775d8b 100644 --- a/arch/mips/pmc-sierra/msp71xx/msp_irq_cic.c +++ b/arch/mips/pmc-sierra/msp71xx/msp_irq_cic.c | |||
@@ -182,7 +182,7 @@ void __init msp_cic_irq_init(void) | |||
182 | 182 | ||
183 | /* initialize all the IRQ descriptors */ | 183 | /* initialize all the IRQ descriptors */ |
184 | for (i = MSP_CIC_INTBASE ; i < MSP_CIC_INTBASE + 32 ; i++) { | 184 | for (i = MSP_CIC_INTBASE ; i < MSP_CIC_INTBASE + 32 ; i++) { |
185 | set_irq_chip_and_handler(i, &msp_cic_irq_controller, | 185 | irq_set_chip_and_handler(i, &msp_cic_irq_controller, |
186 | handle_level_irq); | 186 | handle_level_irq); |
187 | #ifdef CONFIG_MIPS_MT_SMTC | 187 | #ifdef CONFIG_MIPS_MT_SMTC |
188 | /* Mask of CIC interrupt */ | 188 | /* Mask of CIC interrupt */ |
diff --git a/arch/mips/pmc-sierra/msp71xx/msp_irq_slp.c b/arch/mips/pmc-sierra/msp71xx/msp_irq_slp.c index 8f51e4adc438..5bbcc47da6b9 100644 --- a/arch/mips/pmc-sierra/msp71xx/msp_irq_slp.c +++ b/arch/mips/pmc-sierra/msp71xx/msp_irq_slp.c | |||
@@ -77,7 +77,7 @@ void __init msp_slp_irq_init(void) | |||
77 | 77 | ||
78 | /* initialize all the IRQ descriptors */ | 78 | /* initialize all the IRQ descriptors */ |
79 | for (i = MSP_SLP_INTBASE; i < MSP_PER_INTBASE + 32; i++) | 79 | for (i = MSP_SLP_INTBASE; i < MSP_PER_INTBASE + 32; i++) |
80 | set_irq_chip_and_handler(i, &msp_slp_irq_controller, | 80 | irq_set_chip_and_handler(i, &msp_slp_irq_controller, |
81 | handle_level_irq); | 81 | handle_level_irq); |
82 | } | 82 | } |
83 | 83 | ||
diff --git a/arch/mips/pmc-sierra/msp71xx/msp_smp.c b/arch/mips/pmc-sierra/msp71xx/msp_smp.c index 43a9e26e1c69..bec17901ff03 100644 --- a/arch/mips/pmc-sierra/msp71xx/msp_smp.c +++ b/arch/mips/pmc-sierra/msp71xx/msp_smp.c | |||
@@ -64,7 +64,7 @@ static struct irqaction irq_call = { | |||
64 | void __init arch_init_ipiirq(int irq, struct irqaction *action) | 64 | void __init arch_init_ipiirq(int irq, struct irqaction *action) |
65 | { | 65 | { |
66 | setup_irq(irq, action); | 66 | setup_irq(irq, action); |
67 | set_irq_handler(irq, handle_percpu_irq); | 67 | irq_set_handler(irq, handle_percpu_irq); |
68 | } | 68 | } |
69 | 69 | ||
70 | void __init msp_vsmp_int_init(void) | 70 | void __init msp_vsmp_int_init(void) |
diff --git a/arch/mips/pnx833x/common/interrupts.c b/arch/mips/pnx833x/common/interrupts.c index b226bcb0a2f4..adc171c8846f 100644 --- a/arch/mips/pnx833x/common/interrupts.c +++ b/arch/mips/pnx833x/common/interrupts.c | |||
@@ -259,11 +259,13 @@ void __init arch_init_irq(void) | |||
259 | /* Set IRQ information in irq_desc */ | 259 | /* Set IRQ information in irq_desc */ |
260 | for (irq = PNX833X_PIC_IRQ_BASE; irq < (PNX833X_PIC_IRQ_BASE + PNX833X_PIC_NUM_IRQ); irq++) { | 260 | for (irq = PNX833X_PIC_IRQ_BASE; irq < (PNX833X_PIC_IRQ_BASE + PNX833X_PIC_NUM_IRQ); irq++) { |
261 | pnx833x_hard_disable_pic_irq(irq); | 261 | pnx833x_hard_disable_pic_irq(irq); |
262 | set_irq_chip_and_handler(irq, &pnx833x_pic_irq_type, handle_simple_irq); | 262 | irq_set_chip_and_handler(irq, &pnx833x_pic_irq_type, |
263 | handle_simple_irq); | ||
263 | } | 264 | } |
264 | 265 | ||
265 | for (irq = PNX833X_GPIO_IRQ_BASE; irq < (PNX833X_GPIO_IRQ_BASE + PNX833X_GPIO_NUM_IRQ); irq++) | 266 | for (irq = PNX833X_GPIO_IRQ_BASE; irq < (PNX833X_GPIO_IRQ_BASE + PNX833X_GPIO_NUM_IRQ); irq++) |
266 | set_irq_chip_and_handler(irq, &pnx833x_gpio_irq_type, handle_simple_irq); | 267 | irq_set_chip_and_handler(irq, &pnx833x_gpio_irq_type, |
268 | handle_simple_irq); | ||
267 | 269 | ||
268 | /* Set PIC priority limiter register to 0 */ | 270 | /* Set PIC priority limiter register to 0 */ |
269 | PNX833X_PIC_INT_PRIORITY = 0; | 271 | PNX833X_PIC_INT_PRIORITY = 0; |
diff --git a/arch/mips/pnx8550/common/int.c b/arch/mips/pnx8550/common/int.c index dbdc35c3531d..6b93c81779c1 100644 --- a/arch/mips/pnx8550/common/int.c +++ b/arch/mips/pnx8550/common/int.c | |||
@@ -183,7 +183,7 @@ void __init arch_init_irq(void) | |||
183 | int configPR; | 183 | int configPR; |
184 | 184 | ||
185 | for (i = 0; i < PNX8550_INT_CP0_TOTINT; i++) | 185 | for (i = 0; i < PNX8550_INT_CP0_TOTINT; i++) |
186 | set_irq_chip_and_handler(i, &level_irq_type, handle_level_irq); | 186 | irq_set_chip_and_handler(i, &level_irq_type, handle_level_irq); |
187 | 187 | ||
188 | /* init of GIC/IPC interrupts */ | 188 | /* init of GIC/IPC interrupts */ |
189 | /* should be done before cp0 since cp0 init enables the GIC int */ | 189 | /* should be done before cp0 since cp0 init enables the GIC int */ |
@@ -206,7 +206,7 @@ void __init arch_init_irq(void) | |||
206 | /* mask/priority is still 0 so we will not get any | 206 | /* mask/priority is still 0 so we will not get any |
207 | * interrupts until it is unmasked */ | 207 | * interrupts until it is unmasked */ |
208 | 208 | ||
209 | set_irq_chip_and_handler(i, &level_irq_type, handle_level_irq); | 209 | irq_set_chip_and_handler(i, &level_irq_type, handle_level_irq); |
210 | } | 210 | } |
211 | 211 | ||
212 | /* Priority level 0 */ | 212 | /* Priority level 0 */ |
@@ -215,20 +215,20 @@ void __init arch_init_irq(void) | |||
215 | /* Set int vector table address */ | 215 | /* Set int vector table address */ |
216 | PNX8550_GIC_VECTOR_0 = PNX8550_GIC_VECTOR_1 = 0; | 216 | PNX8550_GIC_VECTOR_0 = PNX8550_GIC_VECTOR_1 = 0; |
217 | 217 | ||
218 | set_irq_chip_and_handler(MIPS_CPU_GIC_IRQ, &level_irq_type, | 218 | irq_set_chip_and_handler(MIPS_CPU_GIC_IRQ, &level_irq_type, |
219 | handle_level_irq); | 219 | handle_level_irq); |
220 | setup_irq(MIPS_CPU_GIC_IRQ, &gic_action); | 220 | setup_irq(MIPS_CPU_GIC_IRQ, &gic_action); |
221 | 221 | ||
222 | /* init of Timer interrupts */ | 222 | /* init of Timer interrupts */ |
223 | for (i = PNX8550_INT_TIMER_MIN; i <= PNX8550_INT_TIMER_MAX; i++) | 223 | for (i = PNX8550_INT_TIMER_MIN; i <= PNX8550_INT_TIMER_MAX; i++) |
224 | set_irq_chip_and_handler(i, &level_irq_type, handle_level_irq); | 224 | irq_set_chip_and_handler(i, &level_irq_type, handle_level_irq); |
225 | 225 | ||
226 | /* Stop Timer 1-3 */ | 226 | /* Stop Timer 1-3 */ |
227 | configPR = read_c0_config7(); | 227 | configPR = read_c0_config7(); |
228 | configPR |= 0x00000038; | 228 | configPR |= 0x00000038; |
229 | write_c0_config7(configPR); | 229 | write_c0_config7(configPR); |
230 | 230 | ||
231 | set_irq_chip_and_handler(MIPS_CPU_TIMER_IRQ, &level_irq_type, | 231 | irq_set_chip_and_handler(MIPS_CPU_TIMER_IRQ, &level_irq_type, |
232 | handle_level_irq); | 232 | handle_level_irq); |
233 | setup_irq(MIPS_CPU_TIMER_IRQ, &timer_action); | 233 | setup_irq(MIPS_CPU_TIMER_IRQ, &timer_action); |
234 | } | 234 | } |
diff --git a/arch/mips/powertv/asic/irq_asic.c b/arch/mips/powertv/asic/irq_asic.c index 6f1c8ef6a719..7fb97fb0931e 100644 --- a/arch/mips/powertv/asic/irq_asic.c +++ b/arch/mips/powertv/asic/irq_asic.c | |||
@@ -112,5 +112,5 @@ void __init asic_irq_init(void) | |||
112 | * Initialize interrupt handlers. | 112 | * Initialize interrupt handlers. |
113 | */ | 113 | */ |
114 | for (i = 0; i < NR_IRQS; i++) | 114 | for (i = 0; i < NR_IRQS; i++) |
115 | set_irq_chip_and_handler(i, &asic_irq_chip, handle_level_irq); | 115 | irq_set_chip_and_handler(i, &asic_irq_chip, handle_level_irq); |
116 | } | 116 | } |
diff --git a/arch/mips/rb532/irq.c b/arch/mips/rb532/irq.c index b32a768da894..7c6db74e3fad 100644 --- a/arch/mips/rb532/irq.c +++ b/arch/mips/rb532/irq.c | |||
@@ -207,8 +207,8 @@ void __init arch_init_irq(void) | |||
207 | pr_info("Initializing IRQ's: %d out of %d\n", RC32434_NR_IRQS, NR_IRQS); | 207 | pr_info("Initializing IRQ's: %d out of %d\n", RC32434_NR_IRQS, NR_IRQS); |
208 | 208 | ||
209 | for (i = 0; i < RC32434_NR_IRQS; i++) | 209 | for (i = 0; i < RC32434_NR_IRQS; i++) |
210 | set_irq_chip_and_handler(i, &rc32434_irq_type, | 210 | irq_set_chip_and_handler(i, &rc32434_irq_type, |
211 | handle_level_irq); | 211 | handle_level_irq); |
212 | } | 212 | } |
213 | 213 | ||
214 | /* Main Interrupt dispatcher */ | 214 | /* Main Interrupt dispatcher */ |
diff --git a/arch/mips/sgi-ip22/ip22-int.c b/arch/mips/sgi-ip22/ip22-int.c index e6e64750e90a..476423a01296 100644 --- a/arch/mips/sgi-ip22/ip22-int.c +++ b/arch/mips/sgi-ip22/ip22-int.c | |||
@@ -312,7 +312,7 @@ void __init arch_init_irq(void) | |||
312 | else | 312 | else |
313 | handler = &ip22_local3_irq_type; | 313 | handler = &ip22_local3_irq_type; |
314 | 314 | ||
315 | set_irq_chip_and_handler(i, handler, handle_level_irq); | 315 | irq_set_chip_and_handler(i, handler, handle_level_irq); |
316 | } | 316 | } |
317 | 317 | ||
318 | /* vector handler. this register the IRQ as non-sharable */ | 318 | /* vector handler. this register the IRQ as non-sharable */ |
diff --git a/arch/mips/sgi-ip27/ip27-irq.c b/arch/mips/sgi-ip27/ip27-irq.c index f2d09d7700dd..11488719dd97 100644 --- a/arch/mips/sgi-ip27/ip27-irq.c +++ b/arch/mips/sgi-ip27/ip27-irq.c | |||
@@ -337,7 +337,7 @@ static struct irq_chip bridge_irq_type = { | |||
337 | 337 | ||
338 | void __devinit register_bridge_irq(unsigned int irq) | 338 | void __devinit register_bridge_irq(unsigned int irq) |
339 | { | 339 | { |
340 | set_irq_chip_and_handler(irq, &bridge_irq_type, handle_level_irq); | 340 | irq_set_chip_and_handler(irq, &bridge_irq_type, handle_level_irq); |
341 | } | 341 | } |
342 | 342 | ||
343 | int __devinit request_bridge_irq(struct bridge_controller *bc) | 343 | int __devinit request_bridge_irq(struct bridge_controller *bc) |
diff --git a/arch/mips/sgi-ip27/ip27-timer.c b/arch/mips/sgi-ip27/ip27-timer.c index c01f558a2a09..a152538d3c97 100644 --- a/arch/mips/sgi-ip27/ip27-timer.c +++ b/arch/mips/sgi-ip27/ip27-timer.c | |||
@@ -153,7 +153,7 @@ static void __init hub_rt_clock_event_global_init(void) | |||
153 | panic("Allocation of irq number for timer failed"); | 153 | panic("Allocation of irq number for timer failed"); |
154 | } while (xchg(&rt_timer_irq, irq)); | 154 | } while (xchg(&rt_timer_irq, irq)); |
155 | 155 | ||
156 | set_irq_chip_and_handler(irq, &rt_irq_type, handle_percpu_irq); | 156 | irq_set_chip_and_handler(irq, &rt_irq_type, handle_percpu_irq); |
157 | setup_irq(irq, &hub_rt_irqaction); | 157 | setup_irq(irq, &hub_rt_irqaction); |
158 | } | 158 | } |
159 | 159 | ||
diff --git a/arch/mips/sgi-ip32/ip32-irq.c b/arch/mips/sgi-ip32/ip32-irq.c index e0a3ce4a8d48..c65ea76d56c7 100644 --- a/arch/mips/sgi-ip32/ip32-irq.c +++ b/arch/mips/sgi-ip32/ip32-irq.c | |||
@@ -451,43 +451,51 @@ void __init arch_init_irq(void) | |||
451 | for (irq = CRIME_IRQ_BASE; irq <= IP32_IRQ_MAX; irq++) { | 451 | for (irq = CRIME_IRQ_BASE; irq <= IP32_IRQ_MAX; irq++) { |
452 | switch (irq) { | 452 | switch (irq) { |
453 | case MACE_VID_IN1_IRQ ... MACE_PCI_BRIDGE_IRQ: | 453 | case MACE_VID_IN1_IRQ ... MACE_PCI_BRIDGE_IRQ: |
454 | set_irq_chip_and_handler_name(irq,&ip32_mace_interrupt, | 454 | irq_set_chip_and_handler_name(irq, |
455 | handle_level_irq, "level"); | 455 | &ip32_mace_interrupt, |
456 | handle_level_irq, | ||
457 | "level"); | ||
456 | break; | 458 | break; |
457 | 459 | ||
458 | case MACEPCI_SCSI0_IRQ ... MACEPCI_SHARED2_IRQ: | 460 | case MACEPCI_SCSI0_IRQ ... MACEPCI_SHARED2_IRQ: |
459 | set_irq_chip_and_handler_name(irq, | 461 | irq_set_chip_and_handler_name(irq, |
460 | &ip32_macepci_interrupt, handle_level_irq, | 462 | &ip32_macepci_interrupt, |
461 | "level"); | 463 | handle_level_irq, |
464 | "level"); | ||
462 | break; | 465 | break; |
463 | 466 | ||
464 | case CRIME_CPUERR_IRQ: | 467 | case CRIME_CPUERR_IRQ: |
465 | case CRIME_MEMERR_IRQ: | 468 | case CRIME_MEMERR_IRQ: |
466 | set_irq_chip_and_handler_name(irq, | 469 | irq_set_chip_and_handler_name(irq, |
467 | &crime_level_interrupt, handle_level_irq, | 470 | &crime_level_interrupt, |
468 | "level"); | 471 | handle_level_irq, |
472 | "level"); | ||
469 | break; | 473 | break; |
470 | 474 | ||
471 | case CRIME_GBE0_IRQ ... CRIME_GBE3_IRQ: | 475 | case CRIME_GBE0_IRQ ... CRIME_GBE3_IRQ: |
472 | case CRIME_RE_EMPTY_E_IRQ ... CRIME_RE_IDLE_E_IRQ: | 476 | case CRIME_RE_EMPTY_E_IRQ ... CRIME_RE_IDLE_E_IRQ: |
473 | case CRIME_SOFT0_IRQ ... CRIME_SOFT2_IRQ: | 477 | case CRIME_SOFT0_IRQ ... CRIME_SOFT2_IRQ: |
474 | case CRIME_VICE_IRQ: | 478 | case CRIME_VICE_IRQ: |
475 | set_irq_chip_and_handler_name(irq, | 479 | irq_set_chip_and_handler_name(irq, |
476 | &crime_edge_interrupt, handle_edge_irq, "edge"); | 480 | &crime_edge_interrupt, |
481 | handle_edge_irq, | ||
482 | "edge"); | ||
477 | break; | 483 | break; |
478 | 484 | ||
479 | case MACEISA_PARALLEL_IRQ: | 485 | case MACEISA_PARALLEL_IRQ: |
480 | case MACEISA_SERIAL1_TDMAPR_IRQ: | 486 | case MACEISA_SERIAL1_TDMAPR_IRQ: |
481 | case MACEISA_SERIAL2_TDMAPR_IRQ: | 487 | case MACEISA_SERIAL2_TDMAPR_IRQ: |
482 | set_irq_chip_and_handler_name(irq, | 488 | irq_set_chip_and_handler_name(irq, |
483 | &ip32_maceisa_edge_interrupt, handle_edge_irq, | 489 | &ip32_maceisa_edge_interrupt, |
484 | "edge"); | 490 | handle_edge_irq, |
491 | "edge"); | ||
485 | break; | 492 | break; |
486 | 493 | ||
487 | default: | 494 | default: |
488 | set_irq_chip_and_handler_name(irq, | 495 | irq_set_chip_and_handler_name(irq, |
489 | &ip32_maceisa_level_interrupt, handle_level_irq, | 496 | &ip32_maceisa_level_interrupt, |
490 | "level"); | 497 | handle_level_irq, |
498 | "level"); | ||
491 | break; | 499 | break; |
492 | } | 500 | } |
493 | } | 501 | } |
diff --git a/arch/mips/sibyte/bcm1480/irq.c b/arch/mips/sibyte/bcm1480/irq.c index 89e8188a4665..09740d60e187 100644 --- a/arch/mips/sibyte/bcm1480/irq.c +++ b/arch/mips/sibyte/bcm1480/irq.c | |||
@@ -216,7 +216,8 @@ void __init init_bcm1480_irqs(void) | |||
216 | int i; | 216 | int i; |
217 | 217 | ||
218 | for (i = 0; i < BCM1480_NR_IRQS; i++) { | 218 | for (i = 0; i < BCM1480_NR_IRQS; i++) { |
219 | set_irq_chip_and_handler(i, &bcm1480_irq_type, handle_level_irq); | 219 | irq_set_chip_and_handler(i, &bcm1480_irq_type, |
220 | handle_level_irq); | ||
220 | bcm1480_irq_owner[i] = 0; | 221 | bcm1480_irq_owner[i] = 0; |
221 | } | 222 | } |
222 | } | 223 | } |
diff --git a/arch/mips/sibyte/sb1250/irq.c b/arch/mips/sibyte/sb1250/irq.c index fd269ea8d8a8..be4460a5f6a8 100644 --- a/arch/mips/sibyte/sb1250/irq.c +++ b/arch/mips/sibyte/sb1250/irq.c | |||
@@ -190,7 +190,8 @@ void __init init_sb1250_irqs(void) | |||
190 | int i; | 190 | int i; |
191 | 191 | ||
192 | for (i = 0; i < SB1250_NR_IRQS; i++) { | 192 | for (i = 0; i < SB1250_NR_IRQS; i++) { |
193 | set_irq_chip_and_handler(i, &sb1250_irq_type, handle_level_irq); | 193 | irq_set_chip_and_handler(i, &sb1250_irq_type, |
194 | handle_level_irq); | ||
194 | sb1250_irq_owner[i] = 0; | 195 | sb1250_irq_owner[i] = 0; |
195 | } | 196 | } |
196 | } | 197 | } |
diff --git a/arch/mips/sni/a20r.c b/arch/mips/sni/a20r.c index 72b94155778d..c48194c3073b 100644 --- a/arch/mips/sni/a20r.c +++ b/arch/mips/sni/a20r.c | |||
@@ -209,7 +209,7 @@ void __init sni_a20r_irq_init(void) | |||
209 | int i; | 209 | int i; |
210 | 210 | ||
211 | for (i = SNI_A20R_IRQ_BASE + 2 ; i < SNI_A20R_IRQ_BASE + 8; i++) | 211 | for (i = SNI_A20R_IRQ_BASE + 2 ; i < SNI_A20R_IRQ_BASE + 8; i++) |
212 | set_irq_chip_and_handler(i, &a20r_irq_type, handle_level_irq); | 212 | irq_set_chip_and_handler(i, &a20r_irq_type, handle_level_irq); |
213 | sni_hwint = a20r_hwint; | 213 | sni_hwint = a20r_hwint; |
214 | change_c0_status(ST0_IM, IE_IRQ0); | 214 | change_c0_status(ST0_IM, IE_IRQ0); |
215 | setup_irq(SNI_A20R_IRQ_BASE + 3, &sni_isa_irq); | 215 | setup_irq(SNI_A20R_IRQ_BASE + 3, &sni_isa_irq); |
diff --git a/arch/mips/sni/pcimt.c b/arch/mips/sni/pcimt.c index cfcc68abc5b2..ed3b3d317358 100644 --- a/arch/mips/sni/pcimt.c +++ b/arch/mips/sni/pcimt.c | |||
@@ -296,7 +296,7 @@ void __init sni_pcimt_irq_init(void) | |||
296 | mips_cpu_irq_init(); | 296 | mips_cpu_irq_init(); |
297 | /* Actually we've got more interrupts to handle ... */ | 297 | /* Actually we've got more interrupts to handle ... */ |
298 | for (i = PCIMT_IRQ_INT2; i <= PCIMT_IRQ_SCSI; i++) | 298 | for (i = PCIMT_IRQ_INT2; i <= PCIMT_IRQ_SCSI; i++) |
299 | set_irq_chip_and_handler(i, &pcimt_irq_type, handle_level_irq); | 299 | irq_set_chip_and_handler(i, &pcimt_irq_type, handle_level_irq); |
300 | sni_hwint = sni_pcimt_hwint; | 300 | sni_hwint = sni_pcimt_hwint; |
301 | change_c0_status(ST0_IM, IE_IRQ1|IE_IRQ3); | 301 | change_c0_status(ST0_IM, IE_IRQ1|IE_IRQ3); |
302 | } | 302 | } |
diff --git a/arch/mips/sni/pcit.c b/arch/mips/sni/pcit.c index 0846e99a6efe..b5246373d16b 100644 --- a/arch/mips/sni/pcit.c +++ b/arch/mips/sni/pcit.c | |||
@@ -238,7 +238,7 @@ void __init sni_pcit_irq_init(void) | |||
238 | 238 | ||
239 | mips_cpu_irq_init(); | 239 | mips_cpu_irq_init(); |
240 | for (i = SNI_PCIT_INT_START; i <= SNI_PCIT_INT_END; i++) | 240 | for (i = SNI_PCIT_INT_START; i <= SNI_PCIT_INT_END; i++) |
241 | set_irq_chip_and_handler(i, &pcit_irq_type, handle_level_irq); | 241 | irq_set_chip_and_handler(i, &pcit_irq_type, handle_level_irq); |
242 | *(volatile u32 *)SNI_PCIT_INT_REG = 0; | 242 | *(volatile u32 *)SNI_PCIT_INT_REG = 0; |
243 | sni_hwint = sni_pcit_hwint; | 243 | sni_hwint = sni_pcit_hwint; |
244 | change_c0_status(ST0_IM, IE_IRQ1); | 244 | change_c0_status(ST0_IM, IE_IRQ1); |
@@ -251,7 +251,7 @@ void __init sni_pcit_cplus_irq_init(void) | |||
251 | 251 | ||
252 | mips_cpu_irq_init(); | 252 | mips_cpu_irq_init(); |
253 | for (i = SNI_PCIT_INT_START; i <= SNI_PCIT_INT_END; i++) | 253 | for (i = SNI_PCIT_INT_START; i <= SNI_PCIT_INT_END; i++) |
254 | set_irq_chip_and_handler(i, &pcit_irq_type, handle_level_irq); | 254 | irq_set_chip_and_handler(i, &pcit_irq_type, handle_level_irq); |
255 | *(volatile u32 *)SNI_PCIT_INT_REG = 0x40000000; | 255 | *(volatile u32 *)SNI_PCIT_INT_REG = 0x40000000; |
256 | sni_hwint = sni_pcit_hwint_cplus; | 256 | sni_hwint = sni_pcit_hwint_cplus; |
257 | change_c0_status(ST0_IM, IE_IRQ0); | 257 | change_c0_status(ST0_IM, IE_IRQ0); |
diff --git a/arch/mips/sni/rm200.c b/arch/mips/sni/rm200.c index f05d8e593300..a7e5a6d917b1 100644 --- a/arch/mips/sni/rm200.c +++ b/arch/mips/sni/rm200.c | |||
@@ -413,7 +413,7 @@ void __init sni_rm200_i8259_irqs(void) | |||
413 | sni_rm200_init_8259A(); | 413 | sni_rm200_init_8259A(); |
414 | 414 | ||
415 | for (i = RM200_I8259A_IRQ_BASE; i < RM200_I8259A_IRQ_BASE + 16; i++) | 415 | for (i = RM200_I8259A_IRQ_BASE; i < RM200_I8259A_IRQ_BASE + 16; i++) |
416 | set_irq_chip_and_handler(i, &sni_rm200_i8259A_chip, | 416 | irq_set_chip_and_handler(i, &sni_rm200_i8259A_chip, |
417 | handle_level_irq); | 417 | handle_level_irq); |
418 | 418 | ||
419 | setup_irq(RM200_I8259A_IRQ_BASE + PIC_CASCADE_IR, &sni_rm200_irq2); | 419 | setup_irq(RM200_I8259A_IRQ_BASE + PIC_CASCADE_IR, &sni_rm200_irq2); |
@@ -477,7 +477,7 @@ void __init sni_rm200_irq_init(void) | |||
477 | mips_cpu_irq_init(); | 477 | mips_cpu_irq_init(); |
478 | /* Actually we've got more interrupts to handle ... */ | 478 | /* Actually we've got more interrupts to handle ... */ |
479 | for (i = SNI_RM200_INT_START; i <= SNI_RM200_INT_END; i++) | 479 | for (i = SNI_RM200_INT_START; i <= SNI_RM200_INT_END; i++) |
480 | set_irq_chip_and_handler(i, &rm200_irq_type, handle_level_irq); | 480 | irq_set_chip_and_handler(i, &rm200_irq_type, handle_level_irq); |
481 | sni_hwint = sni_rm200_hwint; | 481 | sni_hwint = sni_rm200_hwint; |
482 | change_c0_status(ST0_IM, IE_IRQ0); | 482 | change_c0_status(ST0_IM, IE_IRQ0); |
483 | setup_irq(SNI_RM200_INT_START + 0, &sni_rm200_i8259A_irq); | 483 | setup_irq(SNI_RM200_INT_START + 0, &sni_rm200_i8259A_irq); |
diff --git a/arch/mips/txx9/generic/irq_tx4927.c b/arch/mips/txx9/generic/irq_tx4927.c index e1828e8bcaef..7e3ac5782da4 100644 --- a/arch/mips/txx9/generic/irq_tx4927.c +++ b/arch/mips/txx9/generic/irq_tx4927.c | |||
@@ -35,7 +35,7 @@ void __init tx4927_irq_init(void) | |||
35 | 35 | ||
36 | mips_cpu_irq_init(); | 36 | mips_cpu_irq_init(); |
37 | txx9_irq_init(TX4927_IRC_REG & 0xfffffffffULL); | 37 | txx9_irq_init(TX4927_IRC_REG & 0xfffffffffULL); |
38 | set_irq_chained_handler(MIPS_CPU_IRQ_BASE + TX4927_IRC_INT, | 38 | irq_set_chained_handler(MIPS_CPU_IRQ_BASE + TX4927_IRC_INT, |
39 | handle_simple_irq); | 39 | handle_simple_irq); |
40 | /* raise priority for errors, timers, SIO */ | 40 | /* raise priority for errors, timers, SIO */ |
41 | txx9_irq_set_pri(TX4927_IR_ECCERR, 7); | 41 | txx9_irq_set_pri(TX4927_IR_ECCERR, 7); |
diff --git a/arch/mips/txx9/generic/irq_tx4938.c b/arch/mips/txx9/generic/irq_tx4938.c index a6e6e805097a..aace85653329 100644 --- a/arch/mips/txx9/generic/irq_tx4938.c +++ b/arch/mips/txx9/generic/irq_tx4938.c | |||
@@ -23,7 +23,7 @@ void __init tx4938_irq_init(void) | |||
23 | 23 | ||
24 | mips_cpu_irq_init(); | 24 | mips_cpu_irq_init(); |
25 | txx9_irq_init(TX4938_IRC_REG & 0xfffffffffULL); | 25 | txx9_irq_init(TX4938_IRC_REG & 0xfffffffffULL); |
26 | set_irq_chained_handler(MIPS_CPU_IRQ_BASE + TX4938_IRC_INT, | 26 | irq_set_chained_handler(MIPS_CPU_IRQ_BASE + TX4938_IRC_INT, |
27 | handle_simple_irq); | 27 | handle_simple_irq); |
28 | /* raise priority for errors, timers, SIO */ | 28 | /* raise priority for errors, timers, SIO */ |
29 | txx9_irq_set_pri(TX4938_IR_ECCERR, 7); | 29 | txx9_irq_set_pri(TX4938_IR_ECCERR, 7); |
diff --git a/arch/mips/txx9/generic/irq_tx4939.c b/arch/mips/txx9/generic/irq_tx4939.c index 93b6edbedd64..6b067dbd2ae1 100644 --- a/arch/mips/txx9/generic/irq_tx4939.c +++ b/arch/mips/txx9/generic/irq_tx4939.c | |||
@@ -176,8 +176,8 @@ void __init tx4939_irq_init(void) | |||
176 | for (i = 1; i < TX4939_NUM_IR; i++) { | 176 | for (i = 1; i < TX4939_NUM_IR; i++) { |
177 | tx4939irq[i].level = 4; /* middle level */ | 177 | tx4939irq[i].level = 4; /* middle level */ |
178 | tx4939irq[i].mode = TXx9_IRCR_LOW; | 178 | tx4939irq[i].mode = TXx9_IRCR_LOW; |
179 | set_irq_chip_and_handler(TXX9_IRQ_BASE + i, | 179 | irq_set_chip_and_handler(TXX9_IRQ_BASE + i, &tx4939_irq_chip, |
180 | &tx4939_irq_chip, handle_level_irq); | 180 | handle_level_irq); |
181 | } | 181 | } |
182 | 182 | ||
183 | /* mask all IRC interrupts */ | 183 | /* mask all IRC interrupts */ |
@@ -193,7 +193,7 @@ void __init tx4939_irq_init(void) | |||
193 | __raw_writel(TXx9_IRCER_ICE, &tx4939_ircptr->den.r); | 193 | __raw_writel(TXx9_IRCER_ICE, &tx4939_ircptr->den.r); |
194 | __raw_writel(irc_elevel, &tx4939_ircptr->msk.r); | 194 | __raw_writel(irc_elevel, &tx4939_ircptr->msk.r); |
195 | 195 | ||
196 | set_irq_chained_handler(MIPS_CPU_IRQ_BASE + TX4939_IRC_INT, | 196 | irq_set_chained_handler(MIPS_CPU_IRQ_BASE + TX4939_IRC_INT, |
197 | handle_simple_irq); | 197 | handle_simple_irq); |
198 | 198 | ||
199 | /* raise priority for errors, timers, sio */ | 199 | /* raise priority for errors, timers, sio */ |
diff --git a/arch/mips/txx9/jmr3927/irq.c b/arch/mips/txx9/jmr3927/irq.c index 92a5c1b400f0..c22c859a2c49 100644 --- a/arch/mips/txx9/jmr3927/irq.c +++ b/arch/mips/txx9/jmr3927/irq.c | |||
@@ -120,8 +120,9 @@ void __init jmr3927_irq_setup(void) | |||
120 | 120 | ||
121 | tx3927_irq_init(); | 121 | tx3927_irq_init(); |
122 | for (i = JMR3927_IRQ_IOC; i < JMR3927_IRQ_IOC + JMR3927_NR_IRQ_IOC; i++) | 122 | for (i = JMR3927_IRQ_IOC; i < JMR3927_IRQ_IOC + JMR3927_NR_IRQ_IOC; i++) |
123 | set_irq_chip_and_handler(i, &jmr3927_irq_ioc, handle_level_irq); | 123 | irq_set_chip_and_handler(i, &jmr3927_irq_ioc, |
124 | handle_level_irq); | ||
124 | 125 | ||
125 | /* setup IOC interrupt 1 (PCI, MODEM) */ | 126 | /* setup IOC interrupt 1 (PCI, MODEM) */ |
126 | set_irq_chained_handler(JMR3927_IRQ_IOCINT, handle_simple_irq); | 127 | irq_set_chained_handler(JMR3927_IRQ_IOCINT, handle_simple_irq); |
127 | } | 128 | } |
diff --git a/arch/mips/txx9/rbtx4927/irq.c b/arch/mips/txx9/rbtx4927/irq.c index 7c0a048b307c..6c22c496090b 100644 --- a/arch/mips/txx9/rbtx4927/irq.c +++ b/arch/mips/txx9/rbtx4927/irq.c | |||
@@ -164,9 +164,9 @@ static void __init toshiba_rbtx4927_irq_ioc_init(void) | |||
164 | 164 | ||
165 | for (i = RBTX4927_IRQ_IOC; | 165 | for (i = RBTX4927_IRQ_IOC; |
166 | i < RBTX4927_IRQ_IOC + RBTX4927_NR_IRQ_IOC; i++) | 166 | i < RBTX4927_IRQ_IOC + RBTX4927_NR_IRQ_IOC; i++) |
167 | set_irq_chip_and_handler(i, &toshiba_rbtx4927_irq_ioc_type, | 167 | irq_set_chip_and_handler(i, &toshiba_rbtx4927_irq_ioc_type, |
168 | handle_level_irq); | 168 | handle_level_irq); |
169 | set_irq_chained_handler(RBTX4927_IRQ_IOCINT, handle_simple_irq); | 169 | irq_set_chained_handler(RBTX4927_IRQ_IOCINT, handle_simple_irq); |
170 | } | 170 | } |
171 | 171 | ||
172 | static int rbtx4927_irq_dispatch(int pending) | 172 | static int rbtx4927_irq_dispatch(int pending) |
@@ -194,5 +194,5 @@ void __init rbtx4927_irq_setup(void) | |||
194 | tx4927_irq_init(); | 194 | tx4927_irq_init(); |
195 | toshiba_rbtx4927_irq_ioc_init(); | 195 | toshiba_rbtx4927_irq_ioc_init(); |
196 | /* Onboard 10M Ether: High Active */ | 196 | /* Onboard 10M Ether: High Active */ |
197 | set_irq_type(RBTX4927_RTL_8019_IRQ, IRQF_TRIGGER_HIGH); | 197 | irq_set_irq_type(RBTX4927_RTL_8019_IRQ, IRQF_TRIGGER_HIGH); |
198 | } | 198 | } |
diff --git a/arch/mips/txx9/rbtx4938/irq.c b/arch/mips/txx9/rbtx4938/irq.c index 2ec4fe1b1670..58cd7a9272cc 100644 --- a/arch/mips/txx9/rbtx4938/irq.c +++ b/arch/mips/txx9/rbtx4938/irq.c | |||
@@ -132,10 +132,10 @@ static void __init toshiba_rbtx4938_irq_ioc_init(void) | |||
132 | 132 | ||
133 | for (i = RBTX4938_IRQ_IOC; | 133 | for (i = RBTX4938_IRQ_IOC; |
134 | i < RBTX4938_IRQ_IOC + RBTX4938_NR_IRQ_IOC; i++) | 134 | i < RBTX4938_IRQ_IOC + RBTX4938_NR_IRQ_IOC; i++) |
135 | set_irq_chip_and_handler(i, &toshiba_rbtx4938_irq_ioc_type, | 135 | irq_set_chip_and_handler(i, &toshiba_rbtx4938_irq_ioc_type, |
136 | handle_level_irq); | 136 | handle_level_irq); |
137 | 137 | ||
138 | set_irq_chained_handler(RBTX4938_IRQ_IOCINT, handle_simple_irq); | 138 | irq_set_chained_handler(RBTX4938_IRQ_IOCINT, handle_simple_irq); |
139 | } | 139 | } |
140 | 140 | ||
141 | void __init rbtx4938_irq_setup(void) | 141 | void __init rbtx4938_irq_setup(void) |
@@ -153,5 +153,5 @@ void __init rbtx4938_irq_setup(void) | |||
153 | tx4938_irq_init(); | 153 | tx4938_irq_init(); |
154 | toshiba_rbtx4938_irq_ioc_init(); | 154 | toshiba_rbtx4938_irq_ioc_init(); |
155 | /* Onboard 10M Ether: High Active */ | 155 | /* Onboard 10M Ether: High Active */ |
156 | set_irq_type(RBTX4938_IRQ_ETHER, IRQF_TRIGGER_HIGH); | 156 | irq_set_irq_type(RBTX4938_IRQ_ETHER, IRQF_TRIGGER_HIGH); |
157 | } | 157 | } |
diff --git a/arch/mips/txx9/rbtx4939/irq.c b/arch/mips/txx9/rbtx4939/irq.c index 70074632fb99..69a80616f0c9 100644 --- a/arch/mips/txx9/rbtx4939/irq.c +++ b/arch/mips/txx9/rbtx4939/irq.c | |||
@@ -88,8 +88,8 @@ void __init rbtx4939_irq_setup(void) | |||
88 | tx4939_irq_init(); | 88 | tx4939_irq_init(); |
89 | for (i = RBTX4939_IRQ_IOC; | 89 | for (i = RBTX4939_IRQ_IOC; |
90 | i < RBTX4939_IRQ_IOC + RBTX4939_NR_IRQ_IOC; i++) | 90 | i < RBTX4939_IRQ_IOC + RBTX4939_NR_IRQ_IOC; i++) |
91 | set_irq_chip_and_handler(i, &rbtx4939_ioc_irq_chip, | 91 | irq_set_chip_and_handler(i, &rbtx4939_ioc_irq_chip, |
92 | handle_level_irq); | 92 | handle_level_irq); |
93 | 93 | ||
94 | set_irq_chained_handler(RBTX4939_IRQ_IOCINT, handle_simple_irq); | 94 | irq_set_chained_handler(RBTX4939_IRQ_IOCINT, handle_simple_irq); |
95 | } | 95 | } |
diff --git a/arch/mips/vr41xx/common/icu.c b/arch/mips/vr41xx/common/icu.c index f53156bb9aa8..a39ef3207d71 100644 --- a/arch/mips/vr41xx/common/icu.c +++ b/arch/mips/vr41xx/common/icu.c | |||
@@ -710,11 +710,11 @@ static int __init vr41xx_icu_init(void) | |||
710 | icu2_write(MGIUINTHREG, 0xffff); | 710 | icu2_write(MGIUINTHREG, 0xffff); |
711 | 711 | ||
712 | for (i = SYSINT1_IRQ_BASE; i <= SYSINT1_IRQ_LAST; i++) | 712 | for (i = SYSINT1_IRQ_BASE; i <= SYSINT1_IRQ_LAST; i++) |
713 | set_irq_chip_and_handler(i, &sysint1_irq_type, | 713 | irq_set_chip_and_handler(i, &sysint1_irq_type, |
714 | handle_level_irq); | 714 | handle_level_irq); |
715 | 715 | ||
716 | for (i = SYSINT2_IRQ_BASE; i <= SYSINT2_IRQ_LAST; i++) | 716 | for (i = SYSINT2_IRQ_BASE; i <= SYSINT2_IRQ_LAST; i++) |
717 | set_irq_chip_and_handler(i, &sysint2_irq_type, | 717 | irq_set_chip_and_handler(i, &sysint2_irq_type, |
718 | handle_level_irq); | 718 | handle_level_irq); |
719 | 719 | ||
720 | cascade_irq(INT0_IRQ, icu_get_irq); | 720 | cascade_irq(INT0_IRQ, icu_get_irq); |
diff --git a/arch/mips/vr41xx/common/irq.c b/arch/mips/vr41xx/common/irq.c index 9ff7f397c0e1..70a3b85f3757 100644 --- a/arch/mips/vr41xx/common/irq.c +++ b/arch/mips/vr41xx/common/irq.c | |||
@@ -87,7 +87,7 @@ static void irq_dispatch(unsigned int irq) | |||
87 | atomic_inc(&irq_err_count); | 87 | atomic_inc(&irq_err_count); |
88 | else | 88 | else |
89 | irq_dispatch(irq); | 89 | irq_dispatch(irq); |
90 | if (!(desc->status & IRQ_DISABLED) && chip->irq_unmask) | 90 | if (!irqd_irq_disabled(idata) && chip->irq_unmask) |
91 | chip->irq_unmask(idata); | 91 | chip->irq_unmask(idata); |
92 | } else | 92 | } else |
93 | do_IRQ(irq); | 93 | do_IRQ(irq); |
diff --git a/arch/mn10300/Kconfig b/arch/mn10300/Kconfig index d8ab97a73db2..a523c94fa698 100644 --- a/arch/mn10300/Kconfig +++ b/arch/mn10300/Kconfig | |||
@@ -3,6 +3,7 @@ config MN10300 | |||
3 | select HAVE_OPROFILE | 3 | select HAVE_OPROFILE |
4 | select HAVE_GENERIC_HARDIRQS | 4 | select HAVE_GENERIC_HARDIRQS |
5 | select GENERIC_HARDIRQS_NO_DEPRECATED | 5 | select GENERIC_HARDIRQS_NO_DEPRECATED |
6 | select GENERIC_IRQ_SHOW | ||
6 | select HAVE_ARCH_TRACEHOOK | 7 | select HAVE_ARCH_TRACEHOOK |
7 | select HAVE_ARCH_KGDB | 8 | select HAVE_ARCH_KGDB |
8 | 9 | ||
diff --git a/arch/mn10300/kernel/irq.c b/arch/mn10300/kernel/irq.c index 5f7fc3eb45e6..86af0d7d0771 100644 --- a/arch/mn10300/kernel/irq.c +++ b/arch/mn10300/kernel/irq.c | |||
@@ -263,7 +263,7 @@ void set_intr_level(int irq, u16 level) | |||
263 | */ | 263 | */ |
264 | void mn10300_set_lateack_irq_type(int irq) | 264 | void mn10300_set_lateack_irq_type(int irq) |
265 | { | 265 | { |
266 | set_irq_chip_and_handler(irq, &mn10300_cpu_pic_level, | 266 | irq_set_chip_and_handler(irq, &mn10300_cpu_pic_level, |
267 | handle_level_irq); | 267 | handle_level_irq); |
268 | } | 268 | } |
269 | 269 | ||
@@ -275,12 +275,12 @@ void __init init_IRQ(void) | |||
275 | int irq; | 275 | int irq; |
276 | 276 | ||
277 | for (irq = 0; irq < NR_IRQS; irq++) | 277 | for (irq = 0; irq < NR_IRQS; irq++) |
278 | if (get_irq_chip(irq) == &no_irq_chip) | 278 | if (irq_get_chip(irq) == &no_irq_chip) |
279 | /* due to the PIC latching interrupt requests, even | 279 | /* due to the PIC latching interrupt requests, even |
280 | * when the IRQ is disabled, IRQ_PENDING is superfluous | 280 | * when the IRQ is disabled, IRQ_PENDING is superfluous |
281 | * and we can use handle_level_irq() for edge-triggered | 281 | * and we can use handle_level_irq() for edge-triggered |
282 | * interrupts */ | 282 | * interrupts */ |
283 | set_irq_chip_and_handler(irq, &mn10300_cpu_pic_edge, | 283 | irq_set_chip_and_handler(irq, &mn10300_cpu_pic_edge, |
284 | handle_level_irq); | 284 | handle_level_irq); |
285 | 285 | ||
286 | unit_init_IRQ(); | 286 | unit_init_IRQ(); |
@@ -335,91 +335,42 @@ asmlinkage void do_IRQ(void) | |||
335 | /* | 335 | /* |
336 | * Display interrupt management information through /proc/interrupts | 336 | * Display interrupt management information through /proc/interrupts |
337 | */ | 337 | */ |
338 | int show_interrupts(struct seq_file *p, void *v) | 338 | int arch_show_interrupts(struct seq_file *p, int prec) |
339 | { | 339 | { |
340 | int i = *(loff_t *) v, j, cpu; | ||
341 | struct irqaction *action; | ||
342 | unsigned long flags; | ||
343 | |||
344 | switch (i) { | ||
345 | /* display column title bar naming CPUs */ | ||
346 | case 0: | ||
347 | seq_printf(p, " "); | ||
348 | for (j = 0; j < NR_CPUS; j++) | ||
349 | if (cpu_online(j)) | ||
350 | seq_printf(p, "CPU%d ", j); | ||
351 | seq_putc(p, '\n'); | ||
352 | break; | ||
353 | |||
354 | /* display information rows, one per active CPU */ | ||
355 | case 1 ... NR_IRQS - 1: | ||
356 | raw_spin_lock_irqsave(&irq_desc[i].lock, flags); | ||
357 | |||
358 | action = irq_desc[i].action; | ||
359 | if (action) { | ||
360 | seq_printf(p, "%3d: ", i); | ||
361 | for_each_present_cpu(cpu) | ||
362 | seq_printf(p, "%10u ", kstat_irqs_cpu(i, cpu)); | ||
363 | |||
364 | if (i < NR_CPU_IRQS) | ||
365 | seq_printf(p, " %14s.%u", | ||
366 | irq_desc[i].irq_data.chip->name, | ||
367 | (GxICR(i) & GxICR_LEVEL) >> | ||
368 | GxICR_LEVEL_SHIFT); | ||
369 | else | ||
370 | seq_printf(p, " %14s", | ||
371 | irq_desc[i].irq_data.chip->name); | ||
372 | |||
373 | seq_printf(p, " %s", action->name); | ||
374 | |||
375 | for (action = action->next; | ||
376 | action; | ||
377 | action = action->next) | ||
378 | seq_printf(p, ", %s", action->name); | ||
379 | |||
380 | seq_putc(p, '\n'); | ||
381 | } | ||
382 | |||
383 | raw_spin_unlock_irqrestore(&irq_desc[i].lock, flags); | ||
384 | break; | ||
385 | |||
386 | /* polish off with NMI and error counters */ | ||
387 | case NR_IRQS: | ||
388 | #ifdef CONFIG_MN10300_WD_TIMER | 340 | #ifdef CONFIG_MN10300_WD_TIMER |
389 | seq_printf(p, "NMI: "); | 341 | int j; |
390 | for (j = 0; j < NR_CPUS; j++) | ||
391 | if (cpu_online(j)) | ||
392 | seq_printf(p, "%10u ", nmi_count(j)); | ||
393 | seq_putc(p, '\n'); | ||
394 | #endif | ||
395 | 342 | ||
396 | seq_printf(p, "ERR: %10u\n", atomic_read(&irq_err_count)); | 343 | seq_printf(p, "%*s: ", prec, "NMI"); |
397 | break; | 344 | for (j = 0; j < NR_CPUS; j++) |
398 | } | 345 | if (cpu_online(j)) |
346 | seq_printf(p, "%10u ", nmi_count(j)); | ||
347 | seq_putc(p, '\n'); | ||
348 | #endif | ||
399 | 349 | ||
350 | seq_printf(p, "%*s: ", prec, "ERR"); | ||
351 | seq_printf(p, "%10u\n", atomic_read(&irq_err_count)); | ||
400 | return 0; | 352 | return 0; |
401 | } | 353 | } |
402 | 354 | ||
403 | #ifdef CONFIG_HOTPLUG_CPU | 355 | #ifdef CONFIG_HOTPLUG_CPU |
404 | void migrate_irqs(void) | 356 | void migrate_irqs(void) |
405 | { | 357 | { |
406 | irq_desc_t *desc; | ||
407 | int irq; | 358 | int irq; |
408 | unsigned int self, new; | 359 | unsigned int self, new; |
409 | unsigned long flags; | 360 | unsigned long flags; |
410 | 361 | ||
411 | self = smp_processor_id(); | 362 | self = smp_processor_id(); |
412 | for (irq = 0; irq < NR_IRQS; irq++) { | 363 | for (irq = 0; irq < NR_IRQS; irq++) { |
413 | desc = irq_desc + irq; | 364 | struct irq_data *data = irq_get_irq_data(irq); |
414 | 365 | ||
415 | if (desc->status == IRQ_PER_CPU) | 366 | if (irqd_is_per_cpu(data)) |
416 | continue; | 367 | continue; |
417 | 368 | ||
418 | if (cpu_isset(self, irq_desc[irq].affinity) && | 369 | if (cpu_isset(self, data->affinity) && |
419 | !cpus_intersects(irq_affinity[irq], cpu_online_map)) { | 370 | !cpus_intersects(irq_affinity[irq], cpu_online_map)) { |
420 | int cpu_id; | 371 | int cpu_id; |
421 | cpu_id = first_cpu(cpu_online_map); | 372 | cpu_id = first_cpu(cpu_online_map); |
422 | cpu_set(cpu_id, irq_desc[irq].affinity); | 373 | cpu_set(cpu_id, data->affinity); |
423 | } | 374 | } |
424 | /* We need to operate irq_affinity_online atomically. */ | 375 | /* We need to operate irq_affinity_online atomically. */ |
425 | arch_local_cli_save(flags); | 376 | arch_local_cli_save(flags); |
@@ -430,7 +381,7 @@ void migrate_irqs(void) | |||
430 | GxICR(irq) = x & GxICR_LEVEL; | 381 | GxICR(irq) = x & GxICR_LEVEL; |
431 | tmp = GxICR(irq); | 382 | tmp = GxICR(irq); |
432 | 383 | ||
433 | new = any_online_cpu(irq_desc[irq].affinity); | 384 | new = any_online_cpu(data->affinity); |
434 | irq_affinity_online[irq] = new; | 385 | irq_affinity_online[irq] = new; |
435 | 386 | ||
436 | CROSS_GxICR(irq, new) = | 387 | CROSS_GxICR(irq, new) = |
diff --git a/arch/mn10300/kernel/mn10300-serial.c b/arch/mn10300/kernel/mn10300-serial.c index efca426a2ed4..94901c56baf1 100644 --- a/arch/mn10300/kernel/mn10300-serial.c +++ b/arch/mn10300/kernel/mn10300-serial.c | |||
@@ -933,7 +933,7 @@ static int mn10300_serial_startup(struct uart_port *_port) | |||
933 | NUM2GxICR_LEVEL(CONFIG_MN10300_SERIAL_IRQ_LEVEL)); | 933 | NUM2GxICR_LEVEL(CONFIG_MN10300_SERIAL_IRQ_LEVEL)); |
934 | set_intr_level(port->tx_irq, | 934 | set_intr_level(port->tx_irq, |
935 | NUM2GxICR_LEVEL(CONFIG_MN10300_SERIAL_IRQ_LEVEL)); | 935 | NUM2GxICR_LEVEL(CONFIG_MN10300_SERIAL_IRQ_LEVEL)); |
936 | set_irq_chip(port->tm_irq, &mn10300_serial_pic); | 936 | irq_set_chip(port->tm_irq, &mn10300_serial_pic); |
937 | 937 | ||
938 | if (request_irq(port->rx_irq, mn10300_serial_interrupt, | 938 | if (request_irq(port->rx_irq, mn10300_serial_interrupt, |
939 | IRQF_DISABLED, port->rx_name, port) < 0) | 939 | IRQF_DISABLED, port->rx_name, port) < 0) |
diff --git a/arch/mn10300/kernel/smp.c b/arch/mn10300/kernel/smp.c index 51c02f97dcea..226c826a2194 100644 --- a/arch/mn10300/kernel/smp.c +++ b/arch/mn10300/kernel/smp.c | |||
@@ -156,15 +156,15 @@ static void init_ipi(void) | |||
156 | u16 tmp16; | 156 | u16 tmp16; |
157 | 157 | ||
158 | /* set up the reschedule IPI */ | 158 | /* set up the reschedule IPI */ |
159 | set_irq_chip_and_handler(RESCHEDULE_IPI, | 159 | irq_set_chip_and_handler(RESCHEDULE_IPI, &mn10300_ipi_type, |
160 | &mn10300_ipi_type, handle_percpu_irq); | 160 | handle_percpu_irq); |
161 | setup_irq(RESCHEDULE_IPI, &reschedule_ipi); | 161 | setup_irq(RESCHEDULE_IPI, &reschedule_ipi); |
162 | set_intr_level(RESCHEDULE_IPI, RESCHEDULE_GxICR_LV); | 162 | set_intr_level(RESCHEDULE_IPI, RESCHEDULE_GxICR_LV); |
163 | mn10300_ipi_enable(RESCHEDULE_IPI); | 163 | mn10300_ipi_enable(RESCHEDULE_IPI); |
164 | 164 | ||
165 | /* set up the call function IPI */ | 165 | /* set up the call function IPI */ |
166 | set_irq_chip_and_handler(CALL_FUNC_SINGLE_IPI, | 166 | irq_set_chip_and_handler(CALL_FUNC_SINGLE_IPI, &mn10300_ipi_type, |
167 | &mn10300_ipi_type, handle_percpu_irq); | 167 | handle_percpu_irq); |
168 | setup_irq(CALL_FUNC_SINGLE_IPI, &call_function_ipi); | 168 | setup_irq(CALL_FUNC_SINGLE_IPI, &call_function_ipi); |
169 | set_intr_level(CALL_FUNC_SINGLE_IPI, CALL_FUNCTION_GxICR_LV); | 169 | set_intr_level(CALL_FUNC_SINGLE_IPI, CALL_FUNCTION_GxICR_LV); |
170 | mn10300_ipi_enable(CALL_FUNC_SINGLE_IPI); | 170 | mn10300_ipi_enable(CALL_FUNC_SINGLE_IPI); |
@@ -172,8 +172,8 @@ static void init_ipi(void) | |||
172 | /* set up the local timer IPI */ | 172 | /* set up the local timer IPI */ |
173 | #if !defined(CONFIG_GENERIC_CLOCKEVENTS) || \ | 173 | #if !defined(CONFIG_GENERIC_CLOCKEVENTS) || \ |
174 | defined(CONFIG_GENERIC_CLOCKEVENTS_BROADCAST) | 174 | defined(CONFIG_GENERIC_CLOCKEVENTS_BROADCAST) |
175 | set_irq_chip_and_handler(LOCAL_TIMER_IPI, | 175 | irq_set_chip_and_handler(LOCAL_TIMER_IPI, &mn10300_ipi_type, |
176 | &mn10300_ipi_type, handle_percpu_irq); | 176 | handle_percpu_irq); |
177 | setup_irq(LOCAL_TIMER_IPI, &local_timer_ipi); | 177 | setup_irq(LOCAL_TIMER_IPI, &local_timer_ipi); |
178 | set_intr_level(LOCAL_TIMER_IPI, LOCAL_TIMER_GxICR_LV); | 178 | set_intr_level(LOCAL_TIMER_IPI, LOCAL_TIMER_GxICR_LV); |
179 | mn10300_ipi_enable(LOCAL_TIMER_IPI); | 179 | mn10300_ipi_enable(LOCAL_TIMER_IPI); |
diff --git a/arch/mn10300/unit-asb2364/irq-fpga.c b/arch/mn10300/unit-asb2364/irq-fpga.c index ee84e62b16ed..e16c216f31dc 100644 --- a/arch/mn10300/unit-asb2364/irq-fpga.c +++ b/arch/mn10300/unit-asb2364/irq-fpga.c | |||
@@ -100,7 +100,8 @@ void __init irq_fpga_init(void) | |||
100 | SyncExBus(); | 100 | SyncExBus(); |
101 | 101 | ||
102 | for (irq = NR_CPU_IRQS; irq < NR_IRQS; irq++) | 102 | for (irq = NR_CPU_IRQS; irq < NR_IRQS; irq++) |
103 | set_irq_chip_and_handler(irq, &asb2364_fpga_pic, handle_level_irq); | 103 | irq_set_chip_and_handler(irq, &asb2364_fpga_pic, |
104 | handle_level_irq); | ||
104 | 105 | ||
105 | /* the FPGA drives the XIRQ1 input on the CPU PIC */ | 106 | /* the FPGA drives the XIRQ1 input on the CPU PIC */ |
106 | setup_irq(XIRQ1, &fpga_irq[0]); | 107 | setup_irq(XIRQ1, &fpga_irq[0]); |
diff --git a/arch/parisc/kernel/irq.c b/arch/parisc/kernel/irq.c index cb450e1e79b3..c0b1affc06a8 100644 --- a/arch/parisc/kernel/irq.c +++ b/arch/parisc/kernel/irq.c | |||
@@ -113,13 +113,8 @@ int cpu_check_affinity(struct irq_data *d, const struct cpumask *dest) | |||
113 | int cpu_dest; | 113 | int cpu_dest; |
114 | 114 | ||
115 | /* timer and ipi have to always be received on all CPUs */ | 115 | /* timer and ipi have to always be received on all CPUs */ |
116 | if (CHECK_IRQ_PER_CPU(irq_to_desc(d->irq)->status)) { | 116 | if (irqd_is_per_cpu(d)) |
117 | /* Bad linux design decision. The mask has already | ||
118 | * been set; we must reset it. Will fix - tglx | ||
119 | */ | ||
120 | cpumask_setall(d->affinity); | ||
121 | return -EINVAL; | 117 | return -EINVAL; |
122 | } | ||
123 | 118 | ||
124 | /* whatever mask they set, we just allow one CPU */ | 119 | /* whatever mask they set, we just allow one CPU */ |
125 | cpu_dest = first_cpu(*dest); | 120 | cpu_dest = first_cpu(*dest); |
@@ -174,10 +169,11 @@ int show_interrupts(struct seq_file *p, void *v) | |||
174 | } | 169 | } |
175 | 170 | ||
176 | if (i < NR_IRQS) { | 171 | if (i < NR_IRQS) { |
172 | struct irq_desc *desc = irq_to_desc(i); | ||
177 | struct irqaction *action; | 173 | struct irqaction *action; |
178 | 174 | ||
179 | raw_spin_lock_irqsave(&irq_desc[i].lock, flags); | 175 | raw_spin_lock_irqsave(&desc->lock, flags); |
180 | action = irq_desc[i].action; | 176 | action = desc->action; |
181 | if (!action) | 177 | if (!action) |
182 | goto skip; | 178 | goto skip; |
183 | seq_printf(p, "%3d: ", i); | 179 | seq_printf(p, "%3d: ", i); |
@@ -188,7 +184,7 @@ int show_interrupts(struct seq_file *p, void *v) | |||
188 | seq_printf(p, "%10u ", kstat_irqs(i)); | 184 | seq_printf(p, "%10u ", kstat_irqs(i)); |
189 | #endif | 185 | #endif |
190 | 186 | ||
191 | seq_printf(p, " %14s", irq_desc[i].irq_data.chip->name); | 187 | seq_printf(p, " %14s", irq_desc_get_chip(desc)->name); |
192 | #ifndef PARISC_IRQ_CR16_COUNTS | 188 | #ifndef PARISC_IRQ_CR16_COUNTS |
193 | seq_printf(p, " %s", action->name); | 189 | seq_printf(p, " %s", action->name); |
194 | 190 | ||
@@ -220,7 +216,7 @@ int show_interrupts(struct seq_file *p, void *v) | |||
220 | 216 | ||
221 | seq_putc(p, '\n'); | 217 | seq_putc(p, '\n'); |
222 | skip: | 218 | skip: |
223 | raw_spin_unlock_irqrestore(&irq_desc[i].lock, flags); | 219 | raw_spin_unlock_irqrestore(&desc->lock, flags); |
224 | } | 220 | } |
225 | 221 | ||
226 | return 0; | 222 | return 0; |
@@ -238,15 +234,15 @@ int show_interrupts(struct seq_file *p, void *v) | |||
238 | 234 | ||
239 | int cpu_claim_irq(unsigned int irq, struct irq_chip *type, void *data) | 235 | int cpu_claim_irq(unsigned int irq, struct irq_chip *type, void *data) |
240 | { | 236 | { |
241 | if (irq_desc[irq].action) | 237 | if (irq_has_action(irq)) |
242 | return -EBUSY; | 238 | return -EBUSY; |
243 | if (get_irq_chip(irq) != &cpu_interrupt_type) | 239 | if (irq_get_chip(irq) != &cpu_interrupt_type) |
244 | return -EBUSY; | 240 | return -EBUSY; |
245 | 241 | ||
246 | /* for iosapic interrupts */ | 242 | /* for iosapic interrupts */ |
247 | if (type) { | 243 | if (type) { |
248 | set_irq_chip_and_handler(irq, type, handle_percpu_irq); | 244 | irq_set_chip_and_handler(irq, type, handle_percpu_irq); |
249 | set_irq_chip_data(irq, data); | 245 | irq_set_chip_data(irq, data); |
250 | __cpu_unmask_irq(irq); | 246 | __cpu_unmask_irq(irq); |
251 | } | 247 | } |
252 | return 0; | 248 | return 0; |
@@ -357,7 +353,7 @@ void do_cpu_irq_mask(struct pt_regs *regs) | |||
357 | #ifdef CONFIG_SMP | 353 | #ifdef CONFIG_SMP |
358 | desc = irq_to_desc(irq); | 354 | desc = irq_to_desc(irq); |
359 | cpumask_copy(&dest, desc->irq_data.affinity); | 355 | cpumask_copy(&dest, desc->irq_data.affinity); |
360 | if (CHECK_IRQ_PER_CPU(desc->status) && | 356 | if (irqd_is_per_cpu(&desc->irq_data) && |
361 | !cpu_isset(smp_processor_id(), dest)) { | 357 | !cpu_isset(smp_processor_id(), dest)) { |
362 | int cpu = first_cpu(dest); | 358 | int cpu = first_cpu(dest); |
363 | 359 | ||
@@ -398,14 +394,14 @@ static void claim_cpu_irqs(void) | |||
398 | { | 394 | { |
399 | int i; | 395 | int i; |
400 | for (i = CPU_IRQ_BASE; i <= CPU_IRQ_MAX; i++) { | 396 | for (i = CPU_IRQ_BASE; i <= CPU_IRQ_MAX; i++) { |
401 | set_irq_chip_and_handler(i, &cpu_interrupt_type, | 397 | irq_set_chip_and_handler(i, &cpu_interrupt_type, |
402 | handle_percpu_irq); | 398 | handle_percpu_irq); |
403 | } | 399 | } |
404 | 400 | ||
405 | set_irq_handler(TIMER_IRQ, handle_percpu_irq); | 401 | irq_set_handler(TIMER_IRQ, handle_percpu_irq); |
406 | setup_irq(TIMER_IRQ, &timer_action); | 402 | setup_irq(TIMER_IRQ, &timer_action); |
407 | #ifdef CONFIG_SMP | 403 | #ifdef CONFIG_SMP |
408 | set_irq_handler(IPI_IRQ, handle_percpu_irq); | 404 | irq_set_handler(IPI_IRQ, handle_percpu_irq); |
409 | setup_irq(IPI_IRQ, &ipi_action); | 405 | setup_irq(IPI_IRQ, &ipi_action); |
410 | #endif | 406 | #endif |
411 | } | 407 | } |
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig index 3584e4d4a4ad..d0e8a1dbf822 100644 --- a/arch/powerpc/Kconfig +++ b/arch/powerpc/Kconfig | |||
@@ -139,6 +139,8 @@ config PPC | |||
139 | select HAVE_SPARSE_IRQ | 139 | select HAVE_SPARSE_IRQ |
140 | select IRQ_PER_CPU | 140 | select IRQ_PER_CPU |
141 | select GENERIC_HARDIRQS_NO_DEPRECATED | 141 | select GENERIC_HARDIRQS_NO_DEPRECATED |
142 | select GENERIC_IRQ_SHOW | ||
143 | select GENERIC_IRQ_SHOW_LEVEL | ||
142 | 144 | ||
143 | config EARLY_PRINTK | 145 | config EARLY_PRINTK |
144 | bool | 146 | bool |
diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c index 0a5570338b96..63625e0650b5 100644 --- a/arch/powerpc/kernel/irq.c +++ b/arch/powerpc/kernel/irq.c | |||
@@ -195,7 +195,7 @@ notrace void arch_local_irq_restore(unsigned long en) | |||
195 | EXPORT_SYMBOL(arch_local_irq_restore); | 195 | EXPORT_SYMBOL(arch_local_irq_restore); |
196 | #endif /* CONFIG_PPC64 */ | 196 | #endif /* CONFIG_PPC64 */ |
197 | 197 | ||
198 | static int show_other_interrupts(struct seq_file *p, int prec) | 198 | int arch_show_interrupts(struct seq_file *p, int prec) |
199 | { | 199 | { |
200 | int j; | 200 | int j; |
201 | 201 | ||
@@ -231,65 +231,6 @@ static int show_other_interrupts(struct seq_file *p, int prec) | |||
231 | return 0; | 231 | return 0; |
232 | } | 232 | } |
233 | 233 | ||
234 | int show_interrupts(struct seq_file *p, void *v) | ||
235 | { | ||
236 | unsigned long flags, any_count = 0; | ||
237 | int i = *(loff_t *) v, j, prec; | ||
238 | struct irqaction *action; | ||
239 | struct irq_desc *desc; | ||
240 | struct irq_chip *chip; | ||
241 | |||
242 | if (i > nr_irqs) | ||
243 | return 0; | ||
244 | |||
245 | for (prec = 3, j = 1000; prec < 10 && j <= nr_irqs; ++prec) | ||
246 | j *= 10; | ||
247 | |||
248 | if (i == nr_irqs) | ||
249 | return show_other_interrupts(p, prec); | ||
250 | |||
251 | /* print header */ | ||
252 | if (i == 0) { | ||
253 | seq_printf(p, "%*s", prec + 8, ""); | ||
254 | for_each_online_cpu(j) | ||
255 | seq_printf(p, "CPU%-8d", j); | ||
256 | seq_putc(p, '\n'); | ||
257 | } | ||
258 | |||
259 | desc = irq_to_desc(i); | ||
260 | if (!desc) | ||
261 | return 0; | ||
262 | |||
263 | raw_spin_lock_irqsave(&desc->lock, flags); | ||
264 | for_each_online_cpu(j) | ||
265 | any_count |= kstat_irqs_cpu(i, j); | ||
266 | action = desc->action; | ||
267 | if (!action && !any_count) | ||
268 | goto out; | ||
269 | |||
270 | seq_printf(p, "%*d: ", prec, i); | ||
271 | for_each_online_cpu(j) | ||
272 | seq_printf(p, "%10u ", kstat_irqs_cpu(i, j)); | ||
273 | |||
274 | chip = get_irq_desc_chip(desc); | ||
275 | if (chip) | ||
276 | seq_printf(p, " %-16s", chip->name); | ||
277 | else | ||
278 | seq_printf(p, " %-16s", "None"); | ||
279 | seq_printf(p, " %-8s", (desc->status & IRQ_LEVEL) ? "Level" : "Edge"); | ||
280 | |||
281 | if (action) { | ||
282 | seq_printf(p, " %s", action->name); | ||
283 | while ((action = action->next) != NULL) | ||
284 | seq_printf(p, ", %s", action->name); | ||
285 | } | ||
286 | |||
287 | seq_putc(p, '\n'); | ||
288 | out: | ||
289 | raw_spin_unlock_irqrestore(&desc->lock, flags); | ||
290 | return 0; | ||
291 | } | ||
292 | |||
293 | /* | 234 | /* |
294 | * /proc/stat helpers | 235 | * /proc/stat helpers |
295 | */ | 236 | */ |
@@ -315,24 +256,26 @@ void fixup_irqs(const struct cpumask *map) | |||
315 | alloc_cpumask_var(&mask, GFP_KERNEL); | 256 | alloc_cpumask_var(&mask, GFP_KERNEL); |
316 | 257 | ||
317 | for_each_irq(irq) { | 258 | for_each_irq(irq) { |
259 | struct irq_data *data; | ||
318 | struct irq_chip *chip; | 260 | struct irq_chip *chip; |
319 | 261 | ||
320 | desc = irq_to_desc(irq); | 262 | desc = irq_to_desc(irq); |
321 | if (!desc) | 263 | if (!desc) |
322 | continue; | 264 | continue; |
323 | 265 | ||
324 | if (desc->status & IRQ_PER_CPU) | 266 | data = irq_desc_get_irq_data(desc); |
267 | if (irqd_is_per_cpu(data)) | ||
325 | continue; | 268 | continue; |
326 | 269 | ||
327 | chip = get_irq_desc_chip(desc); | 270 | chip = irq_data_get_irq_chip(data); |
328 | 271 | ||
329 | cpumask_and(mask, desc->irq_data.affinity, map); | 272 | cpumask_and(mask, data->affinity, map); |
330 | if (cpumask_any(mask) >= nr_cpu_ids) { | 273 | if (cpumask_any(mask) >= nr_cpu_ids) { |
331 | printk("Breaking affinity for irq %i\n", irq); | 274 | printk("Breaking affinity for irq %i\n", irq); |
332 | cpumask_copy(mask, map); | 275 | cpumask_copy(mask, map); |
333 | } | 276 | } |
334 | if (chip->irq_set_affinity) | 277 | if (chip->irq_set_affinity) |
335 | chip->irq_set_affinity(&desc->irq_data, mask, true); | 278 | chip->irq_set_affinity(data, mask, true); |
336 | else if (desc->action && !(warned++)) | 279 | else if (desc->action && !(warned++)) |
337 | printk("Cannot set affinity for irq %i\n", irq); | 280 | printk("Cannot set affinity for irq %i\n", irq); |
338 | } | 281 | } |
@@ -618,7 +561,7 @@ struct irq_host *irq_alloc_host(struct device_node *of_node, | |||
618 | smp_wmb(); | 561 | smp_wmb(); |
619 | 562 | ||
620 | /* Clear norequest flags */ | 563 | /* Clear norequest flags */ |
621 | irq_to_desc(i)->status &= ~IRQ_NOREQUEST; | 564 | irq_clear_status_flags(i, IRQ_NOREQUEST); |
622 | 565 | ||
623 | /* Legacy flags are left to default at this point, | 566 | /* Legacy flags are left to default at this point, |
624 | * one can then use irq_create_mapping() to | 567 | * one can then use irq_create_mapping() to |
@@ -827,8 +770,8 @@ unsigned int irq_create_of_mapping(struct device_node *controller, | |||
827 | 770 | ||
828 | /* Set type if specified and different than the current one */ | 771 | /* Set type if specified and different than the current one */ |
829 | if (type != IRQ_TYPE_NONE && | 772 | if (type != IRQ_TYPE_NONE && |
830 | type != (irq_to_desc(virq)->status & IRQF_TRIGGER_MASK)) | 773 | type != (irqd_get_trigger_type(irq_get_irq_data(virq)))) |
831 | set_irq_type(virq, type); | 774 | irq_set_irq_type(virq, type); |
832 | return virq; | 775 | return virq; |
833 | } | 776 | } |
834 | EXPORT_SYMBOL_GPL(irq_create_of_mapping); | 777 | EXPORT_SYMBOL_GPL(irq_create_of_mapping); |
@@ -851,7 +794,7 @@ void irq_dispose_mapping(unsigned int virq) | |||
851 | return; | 794 | return; |
852 | 795 | ||
853 | /* remove chip and handler */ | 796 | /* remove chip and handler */ |
854 | set_irq_chip_and_handler(virq, NULL, NULL); | 797 | irq_set_chip_and_handler(virq, NULL, NULL); |
855 | 798 | ||
856 | /* Make sure it's completed */ | 799 | /* Make sure it's completed */ |
857 | synchronize_irq(virq); | 800 | synchronize_irq(virq); |
@@ -1156,7 +1099,7 @@ static int virq_debug_show(struct seq_file *m, void *private) | |||
1156 | seq_printf(m, "%5d ", i); | 1099 | seq_printf(m, "%5d ", i); |
1157 | seq_printf(m, "0x%05lx ", virq_to_hw(i)); | 1100 | seq_printf(m, "0x%05lx ", virq_to_hw(i)); |
1158 | 1101 | ||
1159 | chip = get_irq_desc_chip(desc); | 1102 | chip = irq_desc_get_chip(desc); |
1160 | if (chip && chip->name) | 1103 | if (chip && chip->name) |
1161 | p = chip->name; | 1104 | p = chip->name; |
1162 | else | 1105 | else |
diff --git a/arch/powerpc/kernel/machine_kexec.c b/arch/powerpc/kernel/machine_kexec.c index bd1e1ff17b2d..7ee50f0547cb 100644 --- a/arch/powerpc/kernel/machine_kexec.c +++ b/arch/powerpc/kernel/machine_kexec.c | |||
@@ -31,17 +31,17 @@ void machine_kexec_mask_interrupts(void) { | |||
31 | if (!desc) | 31 | if (!desc) |
32 | continue; | 32 | continue; |
33 | 33 | ||
34 | chip = get_irq_desc_chip(desc); | 34 | chip = irq_desc_get_chip(desc); |
35 | if (!chip) | 35 | if (!chip) |
36 | continue; | 36 | continue; |
37 | 37 | ||
38 | if (chip->irq_eoi && desc->status & IRQ_INPROGRESS) | 38 | if (chip->irq_eoi && irqd_irq_inprogress(&desc->irq_data)) |
39 | chip->irq_eoi(&desc->irq_data); | 39 | chip->irq_eoi(&desc->irq_data); |
40 | 40 | ||
41 | if (chip->irq_mask) | 41 | if (chip->irq_mask) |
42 | chip->irq_mask(&desc->irq_data); | 42 | chip->irq_mask(&desc->irq_data); |
43 | 43 | ||
44 | if (chip->irq_disable && !(desc->status & IRQ_DISABLED)) | 44 | if (chip->irq_disable && !irqd_irq_disabled(&desc->irq_data)) |
45 | chip->irq_disable(&desc->irq_data); | 45 | chip->irq_disable(&desc->irq_data); |
46 | } | 46 | } |
47 | } | 47 | } |
diff --git a/arch/powerpc/kernel/pci-common.c b/arch/powerpc/kernel/pci-common.c index 3cd85faa8ac6..893af2a9cd03 100644 --- a/arch/powerpc/kernel/pci-common.c +++ b/arch/powerpc/kernel/pci-common.c | |||
@@ -261,7 +261,7 @@ int pci_read_irq_line(struct pci_dev *pci_dev) | |||
261 | 261 | ||
262 | virq = irq_create_mapping(NULL, line); | 262 | virq = irq_create_mapping(NULL, line); |
263 | if (virq != NO_IRQ) | 263 | if (virq != NO_IRQ) |
264 | set_irq_type(virq, IRQ_TYPE_LEVEL_LOW); | 264 | irq_set_irq_type(virq, IRQ_TYPE_LEVEL_LOW); |
265 | } else { | 265 | } else { |
266 | pr_debug(" Got one, spec %d cells (0x%08x 0x%08x...) on %s\n", | 266 | pr_debug(" Got one, spec %d cells (0x%08x 0x%08x...) on %s\n", |
267 | oirq.size, oirq.specifier[0], oirq.specifier[1], | 267 | oirq.size, oirq.specifier[0], oirq.specifier[1], |
diff --git a/arch/powerpc/platforms/512x/mpc5121_ads_cpld.c b/arch/powerpc/platforms/512x/mpc5121_ads_cpld.c index fde0ea50c97d..cfc4b2009982 100644 --- a/arch/powerpc/platforms/512x/mpc5121_ads_cpld.c +++ b/arch/powerpc/platforms/512x/mpc5121_ads_cpld.c | |||
@@ -132,8 +132,8 @@ static int | |||
132 | cpld_pic_host_map(struct irq_host *h, unsigned int virq, | 132 | cpld_pic_host_map(struct irq_host *h, unsigned int virq, |
133 | irq_hw_number_t hw) | 133 | irq_hw_number_t hw) |
134 | { | 134 | { |
135 | irq_to_desc(virq)->status |= IRQ_LEVEL; | 135 | irq_set_status_flags(virq, IRQ_LEVEL); |
136 | set_irq_chip_and_handler(virq, &cpld_pic, handle_level_irq); | 136 | irq_set_chip_and_handler(virq, &cpld_pic, handle_level_irq); |
137 | return 0; | 137 | return 0; |
138 | } | 138 | } |
139 | 139 | ||
@@ -198,7 +198,7 @@ mpc5121_ads_cpld_pic_init(void) | |||
198 | goto end; | 198 | goto end; |
199 | } | 199 | } |
200 | 200 | ||
201 | set_irq_chained_handler(cascade_irq, cpld_pic_cascade); | 201 | irq_set_chained_handler(cascade_irq, cpld_pic_cascade); |
202 | end: | 202 | end: |
203 | of_node_put(np); | 203 | of_node_put(np); |
204 | } | 204 | } |
diff --git a/arch/powerpc/platforms/52xx/media5200.c b/arch/powerpc/platforms/52xx/media5200.c index 2bd1e6cf1f58..57a6a349e932 100644 --- a/arch/powerpc/platforms/52xx/media5200.c +++ b/arch/powerpc/platforms/52xx/media5200.c | |||
@@ -82,7 +82,7 @@ static struct irq_chip media5200_irq_chip = { | |||
82 | 82 | ||
83 | void media5200_irq_cascade(unsigned int virq, struct irq_desc *desc) | 83 | void media5200_irq_cascade(unsigned int virq, struct irq_desc *desc) |
84 | { | 84 | { |
85 | struct irq_chip *chip = get_irq_desc_chip(desc); | 85 | struct irq_chip *chip = irq_desc_get_chip(desc); |
86 | int sub_virq, val; | 86 | int sub_virq, val; |
87 | u32 status, enable; | 87 | u32 status, enable; |
88 | 88 | ||
@@ -107,7 +107,7 @@ void media5200_irq_cascade(unsigned int virq, struct irq_desc *desc) | |||
107 | /* Processing done; can reenable the cascade now */ | 107 | /* Processing done; can reenable the cascade now */ |
108 | raw_spin_lock(&desc->lock); | 108 | raw_spin_lock(&desc->lock); |
109 | chip->irq_ack(&desc->irq_data); | 109 | chip->irq_ack(&desc->irq_data); |
110 | if (!(desc->status & IRQ_DISABLED)) | 110 | if (!irqd_irq_disabled(&desc->irq_data)) |
111 | chip->irq_unmask(&desc->irq_data); | 111 | chip->irq_unmask(&desc->irq_data); |
112 | raw_spin_unlock(&desc->lock); | 112 | raw_spin_unlock(&desc->lock); |
113 | } | 113 | } |
@@ -115,15 +115,10 @@ void media5200_irq_cascade(unsigned int virq, struct irq_desc *desc) | |||
115 | static int media5200_irq_map(struct irq_host *h, unsigned int virq, | 115 | static int media5200_irq_map(struct irq_host *h, unsigned int virq, |
116 | irq_hw_number_t hw) | 116 | irq_hw_number_t hw) |
117 | { | 117 | { |
118 | struct irq_desc *desc = irq_to_desc(virq); | ||
119 | |||
120 | pr_debug("%s: h=%p, virq=%i, hwirq=%i\n", __func__, h, virq, (int)hw); | 118 | pr_debug("%s: h=%p, virq=%i, hwirq=%i\n", __func__, h, virq, (int)hw); |
121 | set_irq_chip_data(virq, &media5200_irq); | 119 | irq_set_chip_data(virq, &media5200_irq); |
122 | set_irq_chip_and_handler(virq, &media5200_irq_chip, handle_level_irq); | 120 | irq_set_chip_and_handler(virq, &media5200_irq_chip, handle_level_irq); |
123 | set_irq_type(virq, IRQ_TYPE_LEVEL_LOW); | 121 | irq_set_status_flags(virq, IRQ_LEVEL); |
124 | desc->status &= ~(IRQ_TYPE_SENSE_MASK | IRQ_LEVEL); | ||
125 | desc->status |= IRQ_TYPE_LEVEL_LOW | IRQ_LEVEL; | ||
126 | |||
127 | return 0; | 122 | return 0; |
128 | } | 123 | } |
129 | 124 | ||
@@ -187,8 +182,8 @@ static void __init media5200_init_irq(void) | |||
187 | 182 | ||
188 | media5200_irq.irqhost->host_data = &media5200_irq; | 183 | media5200_irq.irqhost->host_data = &media5200_irq; |
189 | 184 | ||
190 | set_irq_data(cascade_virq, &media5200_irq); | 185 | irq_set_handler_data(cascade_virq, &media5200_irq); |
191 | set_irq_chained_handler(cascade_virq, media5200_irq_cascade); | 186 | irq_set_chained_handler(cascade_virq, media5200_irq_cascade); |
192 | 187 | ||
193 | return; | 188 | return; |
194 | 189 | ||
diff --git a/arch/powerpc/platforms/52xx/mpc52xx_gpt.c b/arch/powerpc/platforms/52xx/mpc52xx_gpt.c index 6da44f0f2934..6c39b9cc2fa3 100644 --- a/arch/powerpc/platforms/52xx/mpc52xx_gpt.c +++ b/arch/powerpc/platforms/52xx/mpc52xx_gpt.c | |||
@@ -192,7 +192,7 @@ static struct irq_chip mpc52xx_gpt_irq_chip = { | |||
192 | 192 | ||
193 | void mpc52xx_gpt_irq_cascade(unsigned int virq, struct irq_desc *desc) | 193 | void mpc52xx_gpt_irq_cascade(unsigned int virq, struct irq_desc *desc) |
194 | { | 194 | { |
195 | struct mpc52xx_gpt_priv *gpt = get_irq_data(virq); | 195 | struct mpc52xx_gpt_priv *gpt = irq_get_handler_data(virq); |
196 | int sub_virq; | 196 | int sub_virq; |
197 | u32 status; | 197 | u32 status; |
198 | 198 | ||
@@ -209,8 +209,8 @@ static int mpc52xx_gpt_irq_map(struct irq_host *h, unsigned int virq, | |||
209 | struct mpc52xx_gpt_priv *gpt = h->host_data; | 209 | struct mpc52xx_gpt_priv *gpt = h->host_data; |
210 | 210 | ||
211 | dev_dbg(gpt->dev, "%s: h=%p, virq=%i\n", __func__, h, virq); | 211 | dev_dbg(gpt->dev, "%s: h=%p, virq=%i\n", __func__, h, virq); |
212 | set_irq_chip_data(virq, gpt); | 212 | irq_set_chip_data(virq, gpt); |
213 | set_irq_chip_and_handler(virq, &mpc52xx_gpt_irq_chip, handle_edge_irq); | 213 | irq_set_chip_and_handler(virq, &mpc52xx_gpt_irq_chip, handle_edge_irq); |
214 | 214 | ||
215 | return 0; | 215 | return 0; |
216 | } | 216 | } |
@@ -259,8 +259,8 @@ mpc52xx_gpt_irq_setup(struct mpc52xx_gpt_priv *gpt, struct device_node *node) | |||
259 | } | 259 | } |
260 | 260 | ||
261 | gpt->irqhost->host_data = gpt; | 261 | gpt->irqhost->host_data = gpt; |
262 | set_irq_data(cascade_virq, gpt); | 262 | irq_set_handler_data(cascade_virq, gpt); |
263 | set_irq_chained_handler(cascade_virq, mpc52xx_gpt_irq_cascade); | 263 | irq_set_chained_handler(cascade_virq, mpc52xx_gpt_irq_cascade); |
264 | 264 | ||
265 | /* If the GPT is currently disabled, then change it to be in Input | 265 | /* If the GPT is currently disabled, then change it to be in Input |
266 | * Capture mode. If the mode is non-zero, then the pin could be | 266 | * Capture mode. If the mode is non-zero, then the pin could be |
diff --git a/arch/powerpc/platforms/52xx/mpc52xx_pic.c b/arch/powerpc/platforms/52xx/mpc52xx_pic.c index 9f3ed582d082..3ddea96273ca 100644 --- a/arch/powerpc/platforms/52xx/mpc52xx_pic.c +++ b/arch/powerpc/platforms/52xx/mpc52xx_pic.c | |||
@@ -214,7 +214,7 @@ static int mpc52xx_extirq_set_type(struct irq_data *d, unsigned int flow_type) | |||
214 | ctrl_reg |= (type << (22 - (l2irq * 2))); | 214 | ctrl_reg |= (type << (22 - (l2irq * 2))); |
215 | out_be32(&intr->ctrl, ctrl_reg); | 215 | out_be32(&intr->ctrl, ctrl_reg); |
216 | 216 | ||
217 | __set_irq_handler_unlocked(d->irq, handler); | 217 | __irq_set_handler_locked(d->irq, handler); |
218 | 218 | ||
219 | return 0; | 219 | return 0; |
220 | } | 220 | } |
@@ -414,7 +414,7 @@ static int mpc52xx_irqhost_map(struct irq_host *h, unsigned int virq, | |||
414 | else | 414 | else |
415 | hndlr = handle_level_irq; | 415 | hndlr = handle_level_irq; |
416 | 416 | ||
417 | set_irq_chip_and_handler(virq, &mpc52xx_extirq_irqchip, hndlr); | 417 | irq_set_chip_and_handler(virq, &mpc52xx_extirq_irqchip, hndlr); |
418 | pr_debug("%s: External IRQ%i virq=%x, hw=%x. type=%x\n", | 418 | pr_debug("%s: External IRQ%i virq=%x, hw=%x. type=%x\n", |
419 | __func__, l2irq, virq, (int)irq, type); | 419 | __func__, l2irq, virq, (int)irq, type); |
420 | return 0; | 420 | return 0; |
@@ -431,7 +431,7 @@ static int mpc52xx_irqhost_map(struct irq_host *h, unsigned int virq, | |||
431 | return -EINVAL; | 431 | return -EINVAL; |
432 | } | 432 | } |
433 | 433 | ||
434 | set_irq_chip_and_handler(virq, irqchip, handle_level_irq); | 434 | irq_set_chip_and_handler(virq, irqchip, handle_level_irq); |
435 | pr_debug("%s: virq=%x, l1=%i, l2=%i\n", __func__, virq, l1irq, l2irq); | 435 | pr_debug("%s: virq=%x, l1=%i, l2=%i\n", __func__, virq, l1irq, l2irq); |
436 | 436 | ||
437 | return 0; | 437 | return 0; |
diff --git a/arch/powerpc/platforms/82xx/pq2ads-pci-pic.c b/arch/powerpc/platforms/82xx/pq2ads-pci-pic.c index 926dfdaaf57a..4a4eb6ffa12f 100644 --- a/arch/powerpc/platforms/82xx/pq2ads-pci-pic.c +++ b/arch/powerpc/platforms/82xx/pq2ads-pci-pic.c | |||
@@ -81,7 +81,7 @@ static struct irq_chip pq2ads_pci_ic = { | |||
81 | 81 | ||
82 | static void pq2ads_pci_irq_demux(unsigned int irq, struct irq_desc *desc) | 82 | static void pq2ads_pci_irq_demux(unsigned int irq, struct irq_desc *desc) |
83 | { | 83 | { |
84 | struct pq2ads_pci_pic *priv = get_irq_desc_data(desc); | 84 | struct pq2ads_pci_pic *priv = irq_desc_get_handler_data(desc); |
85 | u32 stat, mask, pend; | 85 | u32 stat, mask, pend; |
86 | int bit; | 86 | int bit; |
87 | 87 | ||
@@ -106,17 +106,17 @@ static void pq2ads_pci_irq_demux(unsigned int irq, struct irq_desc *desc) | |||
106 | static int pci_pic_host_map(struct irq_host *h, unsigned int virq, | 106 | static int pci_pic_host_map(struct irq_host *h, unsigned int virq, |
107 | irq_hw_number_t hw) | 107 | irq_hw_number_t hw) |
108 | { | 108 | { |
109 | irq_to_desc(virq)->status |= IRQ_LEVEL; | 109 | irq_set_status_flags(virq, IRQ_LEVEL); |
110 | set_irq_chip_data(virq, h->host_data); | 110 | irq_set_chip_data(virq, h->host_data); |
111 | set_irq_chip_and_handler(virq, &pq2ads_pci_ic, handle_level_irq); | 111 | irq_set_chip_and_handler(virq, &pq2ads_pci_ic, handle_level_irq); |
112 | return 0; | 112 | return 0; |
113 | } | 113 | } |
114 | 114 | ||
115 | static void pci_host_unmap(struct irq_host *h, unsigned int virq) | 115 | static void pci_host_unmap(struct irq_host *h, unsigned int virq) |
116 | { | 116 | { |
117 | /* remove chip and handler */ | 117 | /* remove chip and handler */ |
118 | set_irq_chip_data(virq, NULL); | 118 | irq_set_chip_data(virq, NULL); |
119 | set_irq_chip(virq, NULL); | 119 | irq_set_chip(virq, NULL); |
120 | } | 120 | } |
121 | 121 | ||
122 | static struct irq_host_ops pci_pic_host_ops = { | 122 | static struct irq_host_ops pci_pic_host_ops = { |
@@ -175,8 +175,8 @@ int __init pq2ads_pci_init_irq(void) | |||
175 | 175 | ||
176 | priv->host = host; | 176 | priv->host = host; |
177 | host->host_data = priv; | 177 | host->host_data = priv; |
178 | set_irq_data(irq, priv); | 178 | irq_set_handler_data(irq, priv); |
179 | set_irq_chained_handler(irq, pq2ads_pci_irq_demux); | 179 | irq_set_chained_handler(irq, pq2ads_pci_irq_demux); |
180 | 180 | ||
181 | of_node_put(np); | 181 | of_node_put(np); |
182 | return 0; | 182 | return 0; |
diff --git a/arch/powerpc/platforms/85xx/ksi8560.c b/arch/powerpc/platforms/85xx/ksi8560.c index 64447e48f3d5..c46f9359be15 100644 --- a/arch/powerpc/platforms/85xx/ksi8560.c +++ b/arch/powerpc/platforms/85xx/ksi8560.c | |||
@@ -56,7 +56,7 @@ static void machine_restart(char *cmd) | |||
56 | 56 | ||
57 | static void cpm2_cascade(unsigned int irq, struct irq_desc *desc) | 57 | static void cpm2_cascade(unsigned int irq, struct irq_desc *desc) |
58 | { | 58 | { |
59 | struct irq_chip *chip = get_irq_desc_chip(desc); | 59 | struct irq_chip *chip = irq_desc_get_chip(desc); |
60 | int cascade_irq; | 60 | int cascade_irq; |
61 | 61 | ||
62 | while ((cascade_irq = cpm2_get_irq()) >= 0) | 62 | while ((cascade_irq = cpm2_get_irq()) >= 0) |
@@ -106,7 +106,7 @@ static void __init ksi8560_pic_init(void) | |||
106 | 106 | ||
107 | cpm2_pic_init(np); | 107 | cpm2_pic_init(np); |
108 | of_node_put(np); | 108 | of_node_put(np); |
109 | set_irq_chained_handler(irq, cpm2_cascade); | 109 | irq_set_chained_handler(irq, cpm2_cascade); |
110 | #endif | 110 | #endif |
111 | } | 111 | } |
112 | 112 | ||
diff --git a/arch/powerpc/platforms/85xx/mpc85xx_ads.c b/arch/powerpc/platforms/85xx/mpc85xx_ads.c index 1352d1107bfd..3b2c9bb66199 100644 --- a/arch/powerpc/platforms/85xx/mpc85xx_ads.c +++ b/arch/powerpc/platforms/85xx/mpc85xx_ads.c | |||
@@ -50,7 +50,7 @@ static int mpc85xx_exclude_device(struct pci_controller *hose, | |||
50 | 50 | ||
51 | static void cpm2_cascade(unsigned int irq, struct irq_desc *desc) | 51 | static void cpm2_cascade(unsigned int irq, struct irq_desc *desc) |
52 | { | 52 | { |
53 | struct irq_chip *chip = get_irq_desc_chip(desc); | 53 | struct irq_chip *chip = irq_desc_get_chip(desc); |
54 | int cascade_irq; | 54 | int cascade_irq; |
55 | 55 | ||
56 | while ((cascade_irq = cpm2_get_irq()) >= 0) | 56 | while ((cascade_irq = cpm2_get_irq()) >= 0) |
@@ -101,7 +101,7 @@ static void __init mpc85xx_ads_pic_init(void) | |||
101 | 101 | ||
102 | cpm2_pic_init(np); | 102 | cpm2_pic_init(np); |
103 | of_node_put(np); | 103 | of_node_put(np); |
104 | set_irq_chained_handler(irq, cpm2_cascade); | 104 | irq_set_chained_handler(irq, cpm2_cascade); |
105 | #endif | 105 | #endif |
106 | } | 106 | } |
107 | 107 | ||
diff --git a/arch/powerpc/platforms/85xx/mpc85xx_cds.c b/arch/powerpc/platforms/85xx/mpc85xx_cds.c index 458d91fba91d..6299a2a51ae8 100644 --- a/arch/powerpc/platforms/85xx/mpc85xx_cds.c +++ b/arch/powerpc/platforms/85xx/mpc85xx_cds.c | |||
@@ -255,7 +255,7 @@ static int mpc85xx_cds_8259_attach(void) | |||
255 | } | 255 | } |
256 | 256 | ||
257 | /* Success. Connect our low-level cascade handler. */ | 257 | /* Success. Connect our low-level cascade handler. */ |
258 | set_irq_handler(cascade_irq, mpc85xx_8259_cascade_handler); | 258 | irq_set_handler(cascade_irq, mpc85xx_8259_cascade_handler); |
259 | 259 | ||
260 | return 0; | 260 | return 0; |
261 | } | 261 | } |
diff --git a/arch/powerpc/platforms/85xx/mpc85xx_ds.c b/arch/powerpc/platforms/85xx/mpc85xx_ds.c index 793ead7993ab..c7b97f70312e 100644 --- a/arch/powerpc/platforms/85xx/mpc85xx_ds.c +++ b/arch/powerpc/platforms/85xx/mpc85xx_ds.c | |||
@@ -47,7 +47,7 @@ | |||
47 | #ifdef CONFIG_PPC_I8259 | 47 | #ifdef CONFIG_PPC_I8259 |
48 | static void mpc85xx_8259_cascade(unsigned int irq, struct irq_desc *desc) | 48 | static void mpc85xx_8259_cascade(unsigned int irq, struct irq_desc *desc) |
49 | { | 49 | { |
50 | struct irq_chip *chip = get_irq_desc_chip(desc); | 50 | struct irq_chip *chip = irq_desc_get_chip(desc); |
51 | unsigned int cascade_irq = i8259_irq(); | 51 | unsigned int cascade_irq = i8259_irq(); |
52 | 52 | ||
53 | if (cascade_irq != NO_IRQ) { | 53 | if (cascade_irq != NO_IRQ) { |
@@ -122,7 +122,7 @@ void __init mpc85xx_ds_pic_init(void) | |||
122 | i8259_init(cascade_node, 0); | 122 | i8259_init(cascade_node, 0); |
123 | of_node_put(cascade_node); | 123 | of_node_put(cascade_node); |
124 | 124 | ||
125 | set_irq_chained_handler(cascade_irq, mpc85xx_8259_cascade); | 125 | irq_set_chained_handler(cascade_irq, mpc85xx_8259_cascade); |
126 | #endif /* CONFIG_PPC_I8259 */ | 126 | #endif /* CONFIG_PPC_I8259 */ |
127 | } | 127 | } |
128 | 128 | ||
diff --git a/arch/powerpc/platforms/85xx/sbc8560.c b/arch/powerpc/platforms/85xx/sbc8560.c index d7e28ec3e072..d2dfd465fbf6 100644 --- a/arch/powerpc/platforms/85xx/sbc8560.c +++ b/arch/powerpc/platforms/85xx/sbc8560.c | |||
@@ -41,7 +41,7 @@ | |||
41 | 41 | ||
42 | static void cpm2_cascade(unsigned int irq, struct irq_desc *desc) | 42 | static void cpm2_cascade(unsigned int irq, struct irq_desc *desc) |
43 | { | 43 | { |
44 | struct irq_chip *chip = get_irq_desc_chip(desc); | 44 | struct irq_chip *chip = irq_desc_get_chip(desc); |
45 | int cascade_irq; | 45 | int cascade_irq; |
46 | 46 | ||
47 | while ((cascade_irq = cpm2_get_irq()) >= 0) | 47 | while ((cascade_irq = cpm2_get_irq()) >= 0) |
@@ -92,7 +92,7 @@ static void __init sbc8560_pic_init(void) | |||
92 | 92 | ||
93 | cpm2_pic_init(np); | 93 | cpm2_pic_init(np); |
94 | of_node_put(np); | 94 | of_node_put(np); |
95 | set_irq_chained_handler(irq, cpm2_cascade); | 95 | irq_set_chained_handler(irq, cpm2_cascade); |
96 | #endif | 96 | #endif |
97 | } | 97 | } |
98 | 98 | ||
diff --git a/arch/powerpc/platforms/85xx/socrates_fpga_pic.c b/arch/powerpc/platforms/85xx/socrates_fpga_pic.c index 79d85aca4767..db864623b4ae 100644 --- a/arch/powerpc/platforms/85xx/socrates_fpga_pic.c +++ b/arch/powerpc/platforms/85xx/socrates_fpga_pic.c | |||
@@ -93,7 +93,7 @@ static inline unsigned int socrates_fpga_pic_get_irq(unsigned int irq) | |||
93 | 93 | ||
94 | void socrates_fpga_pic_cascade(unsigned int irq, struct irq_desc *desc) | 94 | void socrates_fpga_pic_cascade(unsigned int irq, struct irq_desc *desc) |
95 | { | 95 | { |
96 | struct irq_chip *chip = get_irq_desc_chip(desc); | 96 | struct irq_chip *chip = irq_desc_get_chip(desc); |
97 | unsigned int cascade_irq; | 97 | unsigned int cascade_irq; |
98 | 98 | ||
99 | /* | 99 | /* |
@@ -245,9 +245,9 @@ static int socrates_fpga_pic_host_map(struct irq_host *h, unsigned int virq, | |||
245 | irq_hw_number_t hwirq) | 245 | irq_hw_number_t hwirq) |
246 | { | 246 | { |
247 | /* All interrupts are LEVEL sensitive */ | 247 | /* All interrupts are LEVEL sensitive */ |
248 | irq_to_desc(virq)->status |= IRQ_LEVEL; | 248 | irq_set_status_flags(virq, IRQ_LEVEL); |
249 | set_irq_chip_and_handler(virq, &socrates_fpga_pic_chip, | 249 | irq_set_chip_and_handler(virq, &socrates_fpga_pic_chip, |
250 | handle_fasteoi_irq); | 250 | handle_fasteoi_irq); |
251 | 251 | ||
252 | return 0; | 252 | return 0; |
253 | } | 253 | } |
@@ -308,8 +308,8 @@ void socrates_fpga_pic_init(struct device_node *pic) | |||
308 | pr_warning("FPGA PIC: can't get irq%d.\n", i); | 308 | pr_warning("FPGA PIC: can't get irq%d.\n", i); |
309 | continue; | 309 | continue; |
310 | } | 310 | } |
311 | set_irq_chained_handler(socrates_fpga_irqs[i], | 311 | irq_set_chained_handler(socrates_fpga_irqs[i], |
312 | socrates_fpga_pic_cascade); | 312 | socrates_fpga_pic_cascade); |
313 | } | 313 | } |
314 | 314 | ||
315 | socrates_fpga_pic_iobase = of_iomap(pic, 0); | 315 | socrates_fpga_pic_iobase = of_iomap(pic, 0); |
diff --git a/arch/powerpc/platforms/85xx/stx_gp3.c b/arch/powerpc/platforms/85xx/stx_gp3.c index 2b62b064eac7..5387e9f06bdb 100644 --- a/arch/powerpc/platforms/85xx/stx_gp3.c +++ b/arch/powerpc/platforms/85xx/stx_gp3.c | |||
@@ -46,7 +46,7 @@ | |||
46 | 46 | ||
47 | static void cpm2_cascade(unsigned int irq, struct irq_desc *desc) | 47 | static void cpm2_cascade(unsigned int irq, struct irq_desc *desc) |
48 | { | 48 | { |
49 | struct irq_chip *chip = get_irq_desc_chip(desc); | 49 | struct irq_chip *chip = irq_desc_get_chip(desc); |
50 | int cascade_irq; | 50 | int cascade_irq; |
51 | 51 | ||
52 | while ((cascade_irq = cpm2_get_irq()) >= 0) | 52 | while ((cascade_irq = cpm2_get_irq()) >= 0) |
@@ -102,7 +102,7 @@ static void __init stx_gp3_pic_init(void) | |||
102 | 102 | ||
103 | cpm2_pic_init(np); | 103 | cpm2_pic_init(np); |
104 | of_node_put(np); | 104 | of_node_put(np); |
105 | set_irq_chained_handler(irq, cpm2_cascade); | 105 | irq_set_chained_handler(irq, cpm2_cascade); |
106 | #endif | 106 | #endif |
107 | } | 107 | } |
108 | 108 | ||
diff --git a/arch/powerpc/platforms/85xx/tqm85xx.c b/arch/powerpc/platforms/85xx/tqm85xx.c index 2265b68e3279..325de772725a 100644 --- a/arch/powerpc/platforms/85xx/tqm85xx.c +++ b/arch/powerpc/platforms/85xx/tqm85xx.c | |||
@@ -44,7 +44,7 @@ | |||
44 | 44 | ||
45 | static void cpm2_cascade(unsigned int irq, struct irq_desc *desc) | 45 | static void cpm2_cascade(unsigned int irq, struct irq_desc *desc) |
46 | { | 46 | { |
47 | struct irq_chip *chip = get_irq_desc_chip(desc); | 47 | struct irq_chip *chip = irq_desc_get_chip(desc); |
48 | int cascade_irq; | 48 | int cascade_irq; |
49 | 49 | ||
50 | while ((cascade_irq = cpm2_get_irq()) >= 0) | 50 | while ((cascade_irq = cpm2_get_irq()) >= 0) |
@@ -100,7 +100,7 @@ static void __init tqm85xx_pic_init(void) | |||
100 | 100 | ||
101 | cpm2_pic_init(np); | 101 | cpm2_pic_init(np); |
102 | of_node_put(np); | 102 | of_node_put(np); |
103 | set_irq_chained_handler(irq, cpm2_cascade); | 103 | irq_set_chained_handler(irq, cpm2_cascade); |
104 | #endif | 104 | #endif |
105 | } | 105 | } |
106 | 106 | ||
diff --git a/arch/powerpc/platforms/86xx/gef_pic.c b/arch/powerpc/platforms/86xx/gef_pic.c index 0adfe3b740cd..0beec7d5566b 100644 --- a/arch/powerpc/platforms/86xx/gef_pic.c +++ b/arch/powerpc/platforms/86xx/gef_pic.c | |||
@@ -95,7 +95,7 @@ static int gef_pic_cascade_irq; | |||
95 | 95 | ||
96 | void gef_pic_cascade(unsigned int irq, struct irq_desc *desc) | 96 | void gef_pic_cascade(unsigned int irq, struct irq_desc *desc) |
97 | { | 97 | { |
98 | struct irq_chip *chip = get_irq_desc_chip(desc); | 98 | struct irq_chip *chip = irq_desc_get_chip(desc); |
99 | unsigned int cascade_irq; | 99 | unsigned int cascade_irq; |
100 | 100 | ||
101 | /* | 101 | /* |
@@ -163,8 +163,8 @@ static int gef_pic_host_map(struct irq_host *h, unsigned int virq, | |||
163 | irq_hw_number_t hwirq) | 163 | irq_hw_number_t hwirq) |
164 | { | 164 | { |
165 | /* All interrupts are LEVEL sensitive */ | 165 | /* All interrupts are LEVEL sensitive */ |
166 | irq_to_desc(virq)->status |= IRQ_LEVEL; | 166 | irq_set_status_flags(virq, IRQ_LEVEL); |
167 | set_irq_chip_and_handler(virq, &gef_pic_chip, handle_level_irq); | 167 | irq_set_chip_and_handler(virq, &gef_pic_chip, handle_level_irq); |
168 | 168 | ||
169 | return 0; | 169 | return 0; |
170 | } | 170 | } |
@@ -225,7 +225,7 @@ void __init gef_pic_init(struct device_node *np) | |||
225 | return; | 225 | return; |
226 | 226 | ||
227 | /* Chain with parent controller */ | 227 | /* Chain with parent controller */ |
228 | set_irq_chained_handler(gef_pic_cascade_irq, gef_pic_cascade); | 228 | irq_set_chained_handler(gef_pic_cascade_irq, gef_pic_cascade); |
229 | } | 229 | } |
230 | 230 | ||
231 | /* | 231 | /* |
diff --git a/arch/powerpc/platforms/86xx/pic.c b/arch/powerpc/platforms/86xx/pic.c index cbe33639b478..8ef8960abda6 100644 --- a/arch/powerpc/platforms/86xx/pic.c +++ b/arch/powerpc/platforms/86xx/pic.c | |||
@@ -19,7 +19,7 @@ | |||
19 | #ifdef CONFIG_PPC_I8259 | 19 | #ifdef CONFIG_PPC_I8259 |
20 | static void mpc86xx_8259_cascade(unsigned int irq, struct irq_desc *desc) | 20 | static void mpc86xx_8259_cascade(unsigned int irq, struct irq_desc *desc) |
21 | { | 21 | { |
22 | struct irq_chip *chip = get_irq_desc_chip(desc); | 22 | struct irq_chip *chip = irq_desc_get_chip(desc); |
23 | unsigned int cascade_irq = i8259_irq(); | 23 | unsigned int cascade_irq = i8259_irq(); |
24 | 24 | ||
25 | if (cascade_irq != NO_IRQ) | 25 | if (cascade_irq != NO_IRQ) |
@@ -77,6 +77,6 @@ void __init mpc86xx_init_irq(void) | |||
77 | i8259_init(cascade_node, 0); | 77 | i8259_init(cascade_node, 0); |
78 | of_node_put(cascade_node); | 78 | of_node_put(cascade_node); |
79 | 79 | ||
80 | set_irq_chained_handler(cascade_irq, mpc86xx_8259_cascade); | 80 | irq_set_chained_handler(cascade_irq, mpc86xx_8259_cascade); |
81 | #endif | 81 | #endif |
82 | } | 82 | } |
diff --git a/arch/powerpc/platforms/8xx/m8xx_setup.c b/arch/powerpc/platforms/8xx/m8xx_setup.c index fabb108e8744..9ecce995dd4b 100644 --- a/arch/powerpc/platforms/8xx/m8xx_setup.c +++ b/arch/powerpc/platforms/8xx/m8xx_setup.c | |||
@@ -226,11 +226,11 @@ static void cpm_cascade(unsigned int irq, struct irq_desc *desc) | |||
226 | 226 | ||
227 | generic_handle_irq(cascade_irq); | 227 | generic_handle_irq(cascade_irq); |
228 | 228 | ||
229 | chip = get_irq_desc_chip(cdesc); | 229 | chip = irq_desc_get_chip(cdesc); |
230 | chip->irq_eoi(&cdesc->irq_data); | 230 | chip->irq_eoi(&cdesc->irq_data); |
231 | } | 231 | } |
232 | 232 | ||
233 | chip = get_irq_desc_chip(desc); | 233 | chip = irq_desc_get_chip(desc); |
234 | chip->irq_eoi(&desc->irq_data); | 234 | chip->irq_eoi(&desc->irq_data); |
235 | } | 235 | } |
236 | 236 | ||
@@ -251,5 +251,5 @@ void __init mpc8xx_pics_init(void) | |||
251 | 251 | ||
252 | irq = cpm_pic_init(); | 252 | irq = cpm_pic_init(); |
253 | if (irq != NO_IRQ) | 253 | if (irq != NO_IRQ) |
254 | set_irq_chained_handler(irq, cpm_cascade); | 254 | irq_set_chained_handler(irq, cpm_cascade); |
255 | } | 255 | } |
diff --git a/arch/powerpc/platforms/cell/Kconfig b/arch/powerpc/platforms/cell/Kconfig index 48cd7d2e1b75..81239ebed83f 100644 --- a/arch/powerpc/platforms/cell/Kconfig +++ b/arch/powerpc/platforms/cell/Kconfig | |||
@@ -9,6 +9,7 @@ config PPC_CELL_COMMON | |||
9 | select PPC_INDIRECT_IO | 9 | select PPC_INDIRECT_IO |
10 | select PPC_NATIVE | 10 | select PPC_NATIVE |
11 | select PPC_RTAS | 11 | select PPC_RTAS |
12 | select IRQ_EDGE_EOI_HANDLER | ||
12 | 13 | ||
13 | config PPC_CELL_NATIVE | 14 | config PPC_CELL_NATIVE |
14 | bool | 15 | bool |
diff --git a/arch/powerpc/platforms/cell/axon_msi.c b/arch/powerpc/platforms/cell/axon_msi.c index c48b66a67e42..bb5ebf8fa80b 100644 --- a/arch/powerpc/platforms/cell/axon_msi.c +++ b/arch/powerpc/platforms/cell/axon_msi.c | |||
@@ -93,8 +93,8 @@ static void msic_dcr_write(struct axon_msic *msic, unsigned int dcr_n, u32 val) | |||
93 | 93 | ||
94 | static void axon_msi_cascade(unsigned int irq, struct irq_desc *desc) | 94 | static void axon_msi_cascade(unsigned int irq, struct irq_desc *desc) |
95 | { | 95 | { |
96 | struct irq_chip *chip = get_irq_desc_chip(desc); | 96 | struct irq_chip *chip = irq_desc_get_chip(desc); |
97 | struct axon_msic *msic = get_irq_data(irq); | 97 | struct axon_msic *msic = irq_get_handler_data(irq); |
98 | u32 write_offset, msi; | 98 | u32 write_offset, msi; |
99 | int idx; | 99 | int idx; |
100 | int retry = 0; | 100 | int retry = 0; |
@@ -287,7 +287,7 @@ static int axon_msi_setup_msi_irqs(struct pci_dev *dev, int nvec, int type) | |||
287 | } | 287 | } |
288 | dev_dbg(&dev->dev, "axon_msi: allocated virq 0x%x\n", virq); | 288 | dev_dbg(&dev->dev, "axon_msi: allocated virq 0x%x\n", virq); |
289 | 289 | ||
290 | set_irq_msi(virq, entry); | 290 | irq_set_msi_desc(virq, entry); |
291 | msg.data = virq; | 291 | msg.data = virq; |
292 | write_msi_msg(virq, &msg); | 292 | write_msi_msg(virq, &msg); |
293 | } | 293 | } |
@@ -305,7 +305,7 @@ static void axon_msi_teardown_msi_irqs(struct pci_dev *dev) | |||
305 | if (entry->irq == NO_IRQ) | 305 | if (entry->irq == NO_IRQ) |
306 | continue; | 306 | continue; |
307 | 307 | ||
308 | set_irq_msi(entry->irq, NULL); | 308 | irq_set_msi_desc(entry->irq, NULL); |
309 | irq_dispose_mapping(entry->irq); | 309 | irq_dispose_mapping(entry->irq); |
310 | } | 310 | } |
311 | } | 311 | } |
@@ -320,7 +320,7 @@ static struct irq_chip msic_irq_chip = { | |||
320 | static int msic_host_map(struct irq_host *h, unsigned int virq, | 320 | static int msic_host_map(struct irq_host *h, unsigned int virq, |
321 | irq_hw_number_t hw) | 321 | irq_hw_number_t hw) |
322 | { | 322 | { |
323 | set_irq_chip_and_handler(virq, &msic_irq_chip, handle_simple_irq); | 323 | irq_set_chip_and_handler(virq, &msic_irq_chip, handle_simple_irq); |
324 | 324 | ||
325 | return 0; | 325 | return 0; |
326 | } | 326 | } |
@@ -400,8 +400,8 @@ static int axon_msi_probe(struct platform_device *device) | |||
400 | 400 | ||
401 | msic->irq_host->host_data = msic; | 401 | msic->irq_host->host_data = msic; |
402 | 402 | ||
403 | set_irq_data(virq, msic); | 403 | irq_set_handler_data(virq, msic); |
404 | set_irq_chained_handler(virq, axon_msi_cascade); | 404 | irq_set_chained_handler(virq, axon_msi_cascade); |
405 | pr_devel("axon_msi: irq 0x%x setup for axon_msi\n", virq); | 405 | pr_devel("axon_msi: irq 0x%x setup for axon_msi\n", virq); |
406 | 406 | ||
407 | /* Enable the MSIC hardware */ | 407 | /* Enable the MSIC hardware */ |
diff --git a/arch/powerpc/platforms/cell/beat_interrupt.c b/arch/powerpc/platforms/cell/beat_interrupt.c index 0b8f7d7135c5..4cb9e147c307 100644 --- a/arch/powerpc/platforms/cell/beat_interrupt.c +++ b/arch/powerpc/platforms/cell/beat_interrupt.c | |||
@@ -136,15 +136,14 @@ static void beatic_pic_host_unmap(struct irq_host *h, unsigned int virq) | |||
136 | static int beatic_pic_host_map(struct irq_host *h, unsigned int virq, | 136 | static int beatic_pic_host_map(struct irq_host *h, unsigned int virq, |
137 | irq_hw_number_t hw) | 137 | irq_hw_number_t hw) |
138 | { | 138 | { |
139 | struct irq_desc *desc = irq_to_desc(virq); | ||
140 | int64_t err; | 139 | int64_t err; |
141 | 140 | ||
142 | err = beat_construct_and_connect_irq_plug(virq, hw); | 141 | err = beat_construct_and_connect_irq_plug(virq, hw); |
143 | if (err < 0) | 142 | if (err < 0) |
144 | return -EIO; | 143 | return -EIO; |
145 | 144 | ||
146 | desc->status |= IRQ_LEVEL; | 145 | irq_set_status_flags(virq, IRQ_LEVEL); |
147 | set_irq_chip_and_handler(virq, &beatic_pic, handle_fasteoi_irq); | 146 | irq_set_chip_and_handler(virq, &beatic_pic, handle_fasteoi_irq); |
148 | return 0; | 147 | return 0; |
149 | } | 148 | } |
150 | 149 | ||
diff --git a/arch/powerpc/platforms/cell/interrupt.c b/arch/powerpc/platforms/cell/interrupt.c index 624d26e72f1d..a19bec078703 100644 --- a/arch/powerpc/platforms/cell/interrupt.c +++ b/arch/powerpc/platforms/cell/interrupt.c | |||
@@ -101,9 +101,9 @@ static void iic_ioexc_eoi(struct irq_data *d) | |||
101 | 101 | ||
102 | static void iic_ioexc_cascade(unsigned int irq, struct irq_desc *desc) | 102 | static void iic_ioexc_cascade(unsigned int irq, struct irq_desc *desc) |
103 | { | 103 | { |
104 | struct irq_chip *chip = get_irq_desc_chip(desc); | 104 | struct irq_chip *chip = irq_desc_get_chip(desc); |
105 | struct cbe_iic_regs __iomem *node_iic = | 105 | struct cbe_iic_regs __iomem *node_iic = |
106 | (void __iomem *)get_irq_desc_data(desc); | 106 | (void __iomem *)irq_desc_get_handler_data(desc); |
107 | unsigned int base = (irq & 0xffffff00) | IIC_IRQ_TYPE_IOEXC; | 107 | unsigned int base = (irq & 0xffffff00) | IIC_IRQ_TYPE_IOEXC; |
108 | unsigned long bits, ack; | 108 | unsigned long bits, ack; |
109 | int cascade; | 109 | int cascade; |
@@ -235,67 +235,19 @@ static int iic_host_match(struct irq_host *h, struct device_node *node) | |||
235 | "IBM,CBEA-Internal-Interrupt-Controller"); | 235 | "IBM,CBEA-Internal-Interrupt-Controller"); |
236 | } | 236 | } |
237 | 237 | ||
238 | extern int noirqdebug; | ||
239 | |||
240 | static void handle_iic_irq(unsigned int irq, struct irq_desc *desc) | ||
241 | { | ||
242 | struct irq_chip *chip = get_irq_desc_chip(desc); | ||
243 | |||
244 | raw_spin_lock(&desc->lock); | ||
245 | |||
246 | desc->status &= ~(IRQ_REPLAY | IRQ_WAITING); | ||
247 | |||
248 | /* | ||
249 | * If we're currently running this IRQ, or its disabled, | ||
250 | * we shouldn't process the IRQ. Mark it pending, handle | ||
251 | * the necessary masking and go out | ||
252 | */ | ||
253 | if (unlikely((desc->status & (IRQ_INPROGRESS | IRQ_DISABLED)) || | ||
254 | !desc->action)) { | ||
255 | desc->status |= IRQ_PENDING; | ||
256 | goto out_eoi; | ||
257 | } | ||
258 | |||
259 | kstat_incr_irqs_this_cpu(irq, desc); | ||
260 | |||
261 | /* Mark the IRQ currently in progress.*/ | ||
262 | desc->status |= IRQ_INPROGRESS; | ||
263 | |||
264 | do { | ||
265 | struct irqaction *action = desc->action; | ||
266 | irqreturn_t action_ret; | ||
267 | |||
268 | if (unlikely(!action)) | ||
269 | goto out_eoi; | ||
270 | |||
271 | desc->status &= ~IRQ_PENDING; | ||
272 | raw_spin_unlock(&desc->lock); | ||
273 | action_ret = handle_IRQ_event(irq, action); | ||
274 | if (!noirqdebug) | ||
275 | note_interrupt(irq, desc, action_ret); | ||
276 | raw_spin_lock(&desc->lock); | ||
277 | |||
278 | } while ((desc->status & (IRQ_PENDING | IRQ_DISABLED)) == IRQ_PENDING); | ||
279 | |||
280 | desc->status &= ~IRQ_INPROGRESS; | ||
281 | out_eoi: | ||
282 | chip->irq_eoi(&desc->irq_data); | ||
283 | raw_spin_unlock(&desc->lock); | ||
284 | } | ||
285 | |||
286 | static int iic_host_map(struct irq_host *h, unsigned int virq, | 238 | static int iic_host_map(struct irq_host *h, unsigned int virq, |
287 | irq_hw_number_t hw) | 239 | irq_hw_number_t hw) |
288 | { | 240 | { |
289 | switch (hw & IIC_IRQ_TYPE_MASK) { | 241 | switch (hw & IIC_IRQ_TYPE_MASK) { |
290 | case IIC_IRQ_TYPE_IPI: | 242 | case IIC_IRQ_TYPE_IPI: |
291 | set_irq_chip_and_handler(virq, &iic_chip, handle_percpu_irq); | 243 | irq_set_chip_and_handler(virq, &iic_chip, handle_percpu_irq); |
292 | break; | 244 | break; |
293 | case IIC_IRQ_TYPE_IOEXC: | 245 | case IIC_IRQ_TYPE_IOEXC: |
294 | set_irq_chip_and_handler(virq, &iic_ioexc_chip, | 246 | irq_set_chip_and_handler(virq, &iic_ioexc_chip, |
295 | handle_iic_irq); | 247 | handle_iic_irq); |
296 | break; | 248 | break; |
297 | default: | 249 | default: |
298 | set_irq_chip_and_handler(virq, &iic_chip, handle_iic_irq); | 250 | irq_set_chip_and_handler(virq, &iic_chip, handle_edge_eoi_irq); |
299 | } | 251 | } |
300 | return 0; | 252 | return 0; |
301 | } | 253 | } |
@@ -412,8 +364,8 @@ static int __init setup_iic(void) | |||
412 | * irq_data is a generic pointer that gets passed back | 364 | * irq_data is a generic pointer that gets passed back |
413 | * to us later, so the forced cast is fine. | 365 | * to us later, so the forced cast is fine. |
414 | */ | 366 | */ |
415 | set_irq_data(cascade, (void __force *)node_iic); | 367 | irq_set_handler_data(cascade, (void __force *)node_iic); |
416 | set_irq_chained_handler(cascade , iic_ioexc_cascade); | 368 | irq_set_chained_handler(cascade, iic_ioexc_cascade); |
417 | out_be64(&node_iic->iic_ir, | 369 | out_be64(&node_iic->iic_ir, |
418 | (1 << 12) /* priority */ | | 370 | (1 << 12) /* priority */ | |
419 | (node << 4) /* dest node */ | | 371 | (node << 4) /* dest node */ | |
diff --git a/arch/powerpc/platforms/cell/setup.c b/arch/powerpc/platforms/cell/setup.c index 6a28d027d959..fd57bfe00edf 100644 --- a/arch/powerpc/platforms/cell/setup.c +++ b/arch/powerpc/platforms/cell/setup.c | |||
@@ -187,8 +187,8 @@ machine_subsys_initcall(cell, cell_publish_devices); | |||
187 | 187 | ||
188 | static void cell_mpic_cascade(unsigned int irq, struct irq_desc *desc) | 188 | static void cell_mpic_cascade(unsigned int irq, struct irq_desc *desc) |
189 | { | 189 | { |
190 | struct irq_chip *chip = get_irq_desc_chip(desc); | 190 | struct irq_chip *chip = irq_desc_get_chip(desc); |
191 | struct mpic *mpic = get_irq_desc_data(desc); | 191 | struct mpic *mpic = irq_desc_get_handler_data(desc); |
192 | unsigned int virq; | 192 | unsigned int virq; |
193 | 193 | ||
194 | virq = mpic_get_one_irq(mpic); | 194 | virq = mpic_get_one_irq(mpic); |
@@ -223,8 +223,8 @@ static void __init mpic_init_IRQ(void) | |||
223 | 223 | ||
224 | printk(KERN_INFO "%s : hooking up to IRQ %d\n", | 224 | printk(KERN_INFO "%s : hooking up to IRQ %d\n", |
225 | dn->full_name, virq); | 225 | dn->full_name, virq); |
226 | set_irq_data(virq, mpic); | 226 | irq_set_handler_data(virq, mpic); |
227 | set_irq_chained_handler(virq, cell_mpic_cascade); | 227 | irq_set_chained_handler(virq, cell_mpic_cascade); |
228 | } | 228 | } |
229 | } | 229 | } |
230 | 230 | ||
diff --git a/arch/powerpc/platforms/cell/spider-pic.c b/arch/powerpc/platforms/cell/spider-pic.c index b38cdfc1deb8..c5cf50e6b45a 100644 --- a/arch/powerpc/platforms/cell/spider-pic.c +++ b/arch/powerpc/platforms/cell/spider-pic.c | |||
@@ -102,7 +102,7 @@ static void spider_ack_irq(struct irq_data *d) | |||
102 | 102 | ||
103 | /* Reset edge detection logic if necessary | 103 | /* Reset edge detection logic if necessary |
104 | */ | 104 | */ |
105 | if (irq_to_desc(d->irq)->status & IRQ_LEVEL) | 105 | if (irqd_is_level_type(d)) |
106 | return; | 106 | return; |
107 | 107 | ||
108 | /* Only interrupts 47 to 50 can be set to edge */ | 108 | /* Only interrupts 47 to 50 can be set to edge */ |
@@ -119,7 +119,6 @@ static int spider_set_irq_type(struct irq_data *d, unsigned int type) | |||
119 | struct spider_pic *pic = spider_virq_to_pic(d->irq); | 119 | struct spider_pic *pic = spider_virq_to_pic(d->irq); |
120 | unsigned int hw = irq_map[d->irq].hwirq; | 120 | unsigned int hw = irq_map[d->irq].hwirq; |
121 | void __iomem *cfg = spider_get_irq_config(pic, hw); | 121 | void __iomem *cfg = spider_get_irq_config(pic, hw); |
122 | struct irq_desc *desc = irq_to_desc(d->irq); | ||
123 | u32 old_mask; | 122 | u32 old_mask; |
124 | u32 ic; | 123 | u32 ic; |
125 | 124 | ||
@@ -147,12 +146,6 @@ static int spider_set_irq_type(struct irq_data *d, unsigned int type) | |||
147 | return -EINVAL; | 146 | return -EINVAL; |
148 | } | 147 | } |
149 | 148 | ||
150 | /* Update irq_desc */ | ||
151 | desc->status &= ~(IRQ_TYPE_SENSE_MASK | IRQ_LEVEL); | ||
152 | desc->status |= type & IRQ_TYPE_SENSE_MASK; | ||
153 | if (type & (IRQ_TYPE_LEVEL_HIGH | IRQ_TYPE_LEVEL_LOW)) | ||
154 | desc->status |= IRQ_LEVEL; | ||
155 | |||
156 | /* Configure the source. One gross hack that was there before and | 149 | /* Configure the source. One gross hack that was there before and |
157 | * that I've kept around is the priority to the BE which I set to | 150 | * that I've kept around is the priority to the BE which I set to |
158 | * be the same as the interrupt source number. I don't know wether | 151 | * be the same as the interrupt source number. I don't know wether |
@@ -178,10 +171,10 @@ static struct irq_chip spider_pic = { | |||
178 | static int spider_host_map(struct irq_host *h, unsigned int virq, | 171 | static int spider_host_map(struct irq_host *h, unsigned int virq, |
179 | irq_hw_number_t hw) | 172 | irq_hw_number_t hw) |
180 | { | 173 | { |
181 | set_irq_chip_and_handler(virq, &spider_pic, handle_level_irq); | 174 | irq_set_chip_and_handler(virq, &spider_pic, handle_level_irq); |
182 | 175 | ||
183 | /* Set default irq type */ | 176 | /* Set default irq type */ |
184 | set_irq_type(virq, IRQ_TYPE_NONE); | 177 | irq_set_irq_type(virq, IRQ_TYPE_NONE); |
185 | 178 | ||
186 | return 0; | 179 | return 0; |
187 | } | 180 | } |
@@ -207,8 +200,8 @@ static struct irq_host_ops spider_host_ops = { | |||
207 | 200 | ||
208 | static void spider_irq_cascade(unsigned int irq, struct irq_desc *desc) | 201 | static void spider_irq_cascade(unsigned int irq, struct irq_desc *desc) |
209 | { | 202 | { |
210 | struct irq_chip *chip = get_irq_desc_chip(desc); | 203 | struct irq_chip *chip = irq_desc_get_chip(desc); |
211 | struct spider_pic *pic = get_irq_desc_data(desc); | 204 | struct spider_pic *pic = irq_desc_get_handler_data(desc); |
212 | unsigned int cs, virq; | 205 | unsigned int cs, virq; |
213 | 206 | ||
214 | cs = in_be32(pic->regs + TIR_CS) >> 24; | 207 | cs = in_be32(pic->regs + TIR_CS) >> 24; |
@@ -328,8 +321,8 @@ static void __init spider_init_one(struct device_node *of_node, int chip, | |||
328 | virq = spider_find_cascade_and_node(pic); | 321 | virq = spider_find_cascade_and_node(pic); |
329 | if (virq == NO_IRQ) | 322 | if (virq == NO_IRQ) |
330 | return; | 323 | return; |
331 | set_irq_data(virq, pic); | 324 | irq_set_handler_data(virq, pic); |
332 | set_irq_chained_handler(virq, spider_irq_cascade); | 325 | irq_set_chained_handler(virq, spider_irq_cascade); |
333 | 326 | ||
334 | printk(KERN_INFO "spider_pic: node %d, addr: 0x%lx %s\n", | 327 | printk(KERN_INFO "spider_pic: node %d, addr: 0x%lx %s\n", |
335 | pic->node_id, addr, of_node->full_name); | 328 | pic->node_id, addr, of_node->full_name); |
diff --git a/arch/powerpc/platforms/chrp/setup.c b/arch/powerpc/platforms/chrp/setup.c index 4c1288451a21..122786498419 100644 --- a/arch/powerpc/platforms/chrp/setup.c +++ b/arch/powerpc/platforms/chrp/setup.c | |||
@@ -365,7 +365,7 @@ void __init chrp_setup_arch(void) | |||
365 | 365 | ||
366 | static void chrp_8259_cascade(unsigned int irq, struct irq_desc *desc) | 366 | static void chrp_8259_cascade(unsigned int irq, struct irq_desc *desc) |
367 | { | 367 | { |
368 | struct irq_chip *chip = get_irq_desc_chip(desc); | 368 | struct irq_chip *chip = irq_desc_get_chip(desc); |
369 | unsigned int cascade_irq = i8259_irq(); | 369 | unsigned int cascade_irq = i8259_irq(); |
370 | 370 | ||
371 | if (cascade_irq != NO_IRQ) | 371 | if (cascade_irq != NO_IRQ) |
@@ -517,7 +517,7 @@ static void __init chrp_find_8259(void) | |||
517 | if (cascade_irq == NO_IRQ) | 517 | if (cascade_irq == NO_IRQ) |
518 | printk(KERN_ERR "i8259: failed to map cascade irq\n"); | 518 | printk(KERN_ERR "i8259: failed to map cascade irq\n"); |
519 | else | 519 | else |
520 | set_irq_chained_handler(cascade_irq, | 520 | irq_set_chained_handler(cascade_irq, |
521 | chrp_8259_cascade); | 521 | chrp_8259_cascade); |
522 | } | 522 | } |
523 | } | 523 | } |
diff --git a/arch/powerpc/platforms/embedded6xx/flipper-pic.c b/arch/powerpc/platforms/embedded6xx/flipper-pic.c index 0aca0e28a8e5..12aa62b6f227 100644 --- a/arch/powerpc/platforms/embedded6xx/flipper-pic.c +++ b/arch/powerpc/platforms/embedded6xx/flipper-pic.c | |||
@@ -101,16 +101,16 @@ static struct irq_host *flipper_irq_host; | |||
101 | static int flipper_pic_map(struct irq_host *h, unsigned int virq, | 101 | static int flipper_pic_map(struct irq_host *h, unsigned int virq, |
102 | irq_hw_number_t hwirq) | 102 | irq_hw_number_t hwirq) |
103 | { | 103 | { |
104 | set_irq_chip_data(virq, h->host_data); | 104 | irq_set_chip_data(virq, h->host_data); |
105 | irq_to_desc(virq)->status |= IRQ_LEVEL; | 105 | irq_set_status_flags(virq, IRQ_LEVEL); |
106 | set_irq_chip_and_handler(virq, &flipper_pic, handle_level_irq); | 106 | irq_set_chip_and_handler(virq, &flipper_pic, handle_level_irq); |
107 | return 0; | 107 | return 0; |
108 | } | 108 | } |
109 | 109 | ||
110 | static void flipper_pic_unmap(struct irq_host *h, unsigned int irq) | 110 | static void flipper_pic_unmap(struct irq_host *h, unsigned int irq) |
111 | { | 111 | { |
112 | set_irq_chip_data(irq, NULL); | 112 | irq_set_chip_data(irq, NULL); |
113 | set_irq_chip(irq, NULL); | 113 | irq_set_chip(irq, NULL); |
114 | } | 114 | } |
115 | 115 | ||
116 | static int flipper_pic_match(struct irq_host *h, struct device_node *np) | 116 | static int flipper_pic_match(struct irq_host *h, struct device_node *np) |
diff --git a/arch/powerpc/platforms/embedded6xx/hlwd-pic.c b/arch/powerpc/platforms/embedded6xx/hlwd-pic.c index 35e448bd8479..2bdddfc9d520 100644 --- a/arch/powerpc/platforms/embedded6xx/hlwd-pic.c +++ b/arch/powerpc/platforms/embedded6xx/hlwd-pic.c | |||
@@ -94,16 +94,16 @@ static struct irq_host *hlwd_irq_host; | |||
94 | static int hlwd_pic_map(struct irq_host *h, unsigned int virq, | 94 | static int hlwd_pic_map(struct irq_host *h, unsigned int virq, |
95 | irq_hw_number_t hwirq) | 95 | irq_hw_number_t hwirq) |
96 | { | 96 | { |
97 | set_irq_chip_data(virq, h->host_data); | 97 | irq_set_chip_data(virq, h->host_data); |
98 | irq_to_desc(virq)->status |= IRQ_LEVEL; | 98 | irq_set_status_flags(virq, IRQ_LEVEL); |
99 | set_irq_chip_and_handler(virq, &hlwd_pic, handle_level_irq); | 99 | irq_set_chip_and_handler(virq, &hlwd_pic, handle_level_irq); |
100 | return 0; | 100 | return 0; |
101 | } | 101 | } |
102 | 102 | ||
103 | static void hlwd_pic_unmap(struct irq_host *h, unsigned int irq) | 103 | static void hlwd_pic_unmap(struct irq_host *h, unsigned int irq) |
104 | { | 104 | { |
105 | set_irq_chip_data(irq, NULL); | 105 | irq_set_chip_data(irq, NULL); |
106 | set_irq_chip(irq, NULL); | 106 | irq_set_chip(irq, NULL); |
107 | } | 107 | } |
108 | 108 | ||
109 | static struct irq_host_ops hlwd_irq_host_ops = { | 109 | static struct irq_host_ops hlwd_irq_host_ops = { |
@@ -129,8 +129,8 @@ static unsigned int __hlwd_pic_get_irq(struct irq_host *h) | |||
129 | static void hlwd_pic_irq_cascade(unsigned int cascade_virq, | 129 | static void hlwd_pic_irq_cascade(unsigned int cascade_virq, |
130 | struct irq_desc *desc) | 130 | struct irq_desc *desc) |
131 | { | 131 | { |
132 | struct irq_chip *chip = get_irq_desc_chip(desc); | 132 | struct irq_chip *chip = irq_desc_get_chip(desc); |
133 | struct irq_host *irq_host = get_irq_data(cascade_virq); | 133 | struct irq_host *irq_host = irq_get_handler_data(cascade_virq); |
134 | unsigned int virq; | 134 | unsigned int virq; |
135 | 135 | ||
136 | raw_spin_lock(&desc->lock); | 136 | raw_spin_lock(&desc->lock); |
@@ -145,7 +145,7 @@ static void hlwd_pic_irq_cascade(unsigned int cascade_virq, | |||
145 | 145 | ||
146 | raw_spin_lock(&desc->lock); | 146 | raw_spin_lock(&desc->lock); |
147 | chip->irq_ack(&desc->irq_data); /* IRQ_LEVEL */ | 147 | chip->irq_ack(&desc->irq_data); /* IRQ_LEVEL */ |
148 | if (!(desc->status & IRQ_DISABLED) && chip->irq_unmask) | 148 | if (!irqd_irq_disabled(&desc->irq_data) && chip->irq_unmask) |
149 | chip->irq_unmask(&desc->irq_data); | 149 | chip->irq_unmask(&desc->irq_data); |
150 | raw_spin_unlock(&desc->lock); | 150 | raw_spin_unlock(&desc->lock); |
151 | } | 151 | } |
@@ -218,8 +218,8 @@ void hlwd_pic_probe(void) | |||
218 | host = hlwd_pic_init(np); | 218 | host = hlwd_pic_init(np); |
219 | BUG_ON(!host); | 219 | BUG_ON(!host); |
220 | cascade_virq = irq_of_parse_and_map(np, 0); | 220 | cascade_virq = irq_of_parse_and_map(np, 0); |
221 | set_irq_data(cascade_virq, host); | 221 | irq_set_handler_data(cascade_virq, host); |
222 | set_irq_chained_handler(cascade_virq, | 222 | irq_set_chained_handler(cascade_virq, |
223 | hlwd_pic_irq_cascade); | 223 | hlwd_pic_irq_cascade); |
224 | hlwd_irq_host = host; | 224 | hlwd_irq_host = host; |
225 | break; | 225 | break; |
diff --git a/arch/powerpc/platforms/embedded6xx/holly.c b/arch/powerpc/platforms/embedded6xx/holly.c index b21fde589ca7..487bda0d18d8 100644 --- a/arch/powerpc/platforms/embedded6xx/holly.c +++ b/arch/powerpc/platforms/embedded6xx/holly.c | |||
@@ -198,8 +198,8 @@ static void __init holly_init_IRQ(void) | |||
198 | cascade_pci_irq = irq_of_parse_and_map(tsi_pci, 0); | 198 | cascade_pci_irq = irq_of_parse_and_map(tsi_pci, 0); |
199 | pr_debug("%s: tsi108 cascade_pci_irq = 0x%x\n", __func__, (u32) cascade_pci_irq); | 199 | pr_debug("%s: tsi108 cascade_pci_irq = 0x%x\n", __func__, (u32) cascade_pci_irq); |
200 | tsi108_pci_int_init(cascade_node); | 200 | tsi108_pci_int_init(cascade_node); |
201 | set_irq_data(cascade_pci_irq, mpic); | 201 | irq_set_handler_data(cascade_pci_irq, mpic); |
202 | set_irq_chained_handler(cascade_pci_irq, tsi108_irq_cascade); | 202 | irq_set_chained_handler(cascade_pci_irq, tsi108_irq_cascade); |
203 | #endif | 203 | #endif |
204 | /* Configure MPIC outputs to CPU0 */ | 204 | /* Configure MPIC outputs to CPU0 */ |
205 | tsi108_write_reg(TSI108_MPIC_OFFSET + 0x30c, 0); | 205 | tsi108_write_reg(TSI108_MPIC_OFFSET + 0x30c, 0); |
diff --git a/arch/powerpc/platforms/embedded6xx/mpc7448_hpc2.c b/arch/powerpc/platforms/embedded6xx/mpc7448_hpc2.c index 7a2ba39d7811..1cb907c94359 100644 --- a/arch/powerpc/platforms/embedded6xx/mpc7448_hpc2.c +++ b/arch/powerpc/platforms/embedded6xx/mpc7448_hpc2.c | |||
@@ -153,8 +153,8 @@ static void __init mpc7448_hpc2_init_IRQ(void) | |||
153 | DBG("%s: tsi108 cascade_pci_irq = 0x%x\n", __func__, | 153 | DBG("%s: tsi108 cascade_pci_irq = 0x%x\n", __func__, |
154 | (u32) cascade_pci_irq); | 154 | (u32) cascade_pci_irq); |
155 | tsi108_pci_int_init(cascade_node); | 155 | tsi108_pci_int_init(cascade_node); |
156 | set_irq_data(cascade_pci_irq, mpic); | 156 | irq_set_handler_data(cascade_pci_irq, mpic); |
157 | set_irq_chained_handler(cascade_pci_irq, tsi108_irq_cascade); | 157 | irq_set_chained_handler(cascade_pci_irq, tsi108_irq_cascade); |
158 | #endif | 158 | #endif |
159 | /* Configure MPIC outputs to CPU0 */ | 159 | /* Configure MPIC outputs to CPU0 */ |
160 | tsi108_write_reg(TSI108_MPIC_OFFSET + 0x30c, 0); | 160 | tsi108_write_reg(TSI108_MPIC_OFFSET + 0x30c, 0); |
diff --git a/arch/powerpc/platforms/iseries/irq.c b/arch/powerpc/platforms/iseries/irq.c index 4fb96f0b2df6..52a6889832c7 100644 --- a/arch/powerpc/platforms/iseries/irq.c +++ b/arch/powerpc/platforms/iseries/irq.c | |||
@@ -220,7 +220,7 @@ void __init iSeries_activate_IRQs() | |||
220 | if (!desc) | 220 | if (!desc) |
221 | continue; | 221 | continue; |
222 | 222 | ||
223 | chip = get_irq_desc_chip(desc); | 223 | chip = irq_desc_get_chip(desc); |
224 | if (chip && chip->irq_startup) { | 224 | if (chip && chip->irq_startup) { |
225 | raw_spin_lock_irqsave(&desc->lock, flags); | 225 | raw_spin_lock_irqsave(&desc->lock, flags); |
226 | chip->irq_startup(&desc->irq_data); | 226 | chip->irq_startup(&desc->irq_data); |
@@ -346,7 +346,7 @@ unsigned int iSeries_get_irq(void) | |||
346 | static int iseries_irq_host_map(struct irq_host *h, unsigned int virq, | 346 | static int iseries_irq_host_map(struct irq_host *h, unsigned int virq, |
347 | irq_hw_number_t hw) | 347 | irq_hw_number_t hw) |
348 | { | 348 | { |
349 | set_irq_chip_and_handler(virq, &iseries_pic, handle_fasteoi_irq); | 349 | irq_set_chip_and_handler(virq, &iseries_pic, handle_fasteoi_irq); |
350 | 350 | ||
351 | return 0; | 351 | return 0; |
352 | } | 352 | } |
diff --git a/arch/powerpc/platforms/maple/pci.c b/arch/powerpc/platforms/maple/pci.c index 04296ffff8bf..dd2e48b28508 100644 --- a/arch/powerpc/platforms/maple/pci.c +++ b/arch/powerpc/platforms/maple/pci.c | |||
@@ -498,7 +498,7 @@ void __devinit maple_pci_irq_fixup(struct pci_dev *dev) | |||
498 | printk(KERN_DEBUG "Fixup U4 PCIe IRQ\n"); | 498 | printk(KERN_DEBUG "Fixup U4 PCIe IRQ\n"); |
499 | dev->irq = irq_create_mapping(NULL, 1); | 499 | dev->irq = irq_create_mapping(NULL, 1); |
500 | if (dev->irq != NO_IRQ) | 500 | if (dev->irq != NO_IRQ) |
501 | set_irq_type(dev->irq, IRQ_TYPE_LEVEL_LOW); | 501 | irq_set_irq_type(dev->irq, IRQ_TYPE_LEVEL_LOW); |
502 | } | 502 | } |
503 | 503 | ||
504 | /* Hide AMD8111 IDE interrupt when in legacy mode so | 504 | /* Hide AMD8111 IDE interrupt when in legacy mode so |
diff --git a/arch/powerpc/platforms/pasemi/setup.c b/arch/powerpc/platforms/pasemi/setup.c index a6067b38d2ca..7c858e6f843c 100644 --- a/arch/powerpc/platforms/pasemi/setup.c +++ b/arch/powerpc/platforms/pasemi/setup.c | |||
@@ -239,7 +239,7 @@ static __init void pas_init_IRQ(void) | |||
239 | if (nmiprop) { | 239 | if (nmiprop) { |
240 | nmi_virq = irq_create_mapping(NULL, *nmiprop); | 240 | nmi_virq = irq_create_mapping(NULL, *nmiprop); |
241 | mpic_irq_set_priority(nmi_virq, 15); | 241 | mpic_irq_set_priority(nmi_virq, 15); |
242 | set_irq_type(nmi_virq, IRQ_TYPE_EDGE_RISING); | 242 | irq_set_irq_type(nmi_virq, IRQ_TYPE_EDGE_RISING); |
243 | mpic_unmask_irq(irq_get_irq_data(nmi_virq)); | 243 | mpic_unmask_irq(irq_get_irq_data(nmi_virq)); |
244 | } | 244 | } |
245 | 245 | ||
diff --git a/arch/powerpc/platforms/powermac/pci.c b/arch/powerpc/platforms/powermac/pci.c index 3bc075c788ef..ab6898942700 100644 --- a/arch/powerpc/platforms/powermac/pci.c +++ b/arch/powerpc/platforms/powermac/pci.c | |||
@@ -988,7 +988,7 @@ void __devinit pmac_pci_irq_fixup(struct pci_dev *dev) | |||
988 | dev->vendor == PCI_VENDOR_ID_DEC && | 988 | dev->vendor == PCI_VENDOR_ID_DEC && |
989 | dev->device == PCI_DEVICE_ID_DEC_TULIP_PLUS) { | 989 | dev->device == PCI_DEVICE_ID_DEC_TULIP_PLUS) { |
990 | dev->irq = irq_create_mapping(NULL, 60); | 990 | dev->irq = irq_create_mapping(NULL, 60); |
991 | set_irq_type(dev->irq, IRQ_TYPE_LEVEL_LOW); | 991 | irq_set_irq_type(dev->irq, IRQ_TYPE_LEVEL_LOW); |
992 | } | 992 | } |
993 | #endif /* CONFIG_PPC32 */ | 993 | #endif /* CONFIG_PPC32 */ |
994 | } | 994 | } |
diff --git a/arch/powerpc/platforms/powermac/pic.c b/arch/powerpc/platforms/powermac/pic.c index c55812bb6a51..023f24086a0a 100644 --- a/arch/powerpc/platforms/powermac/pic.c +++ b/arch/powerpc/platforms/powermac/pic.c | |||
@@ -157,7 +157,7 @@ static unsigned int pmac_startup_irq(struct irq_data *d) | |||
157 | int i = src >> 5; | 157 | int i = src >> 5; |
158 | 158 | ||
159 | raw_spin_lock_irqsave(&pmac_pic_lock, flags); | 159 | raw_spin_lock_irqsave(&pmac_pic_lock, flags); |
160 | if ((irq_to_desc(d->irq)->status & IRQ_LEVEL) == 0) | 160 | if (!irqd_is_level_type(d)) |
161 | out_le32(&pmac_irq_hw[i]->ack, bit); | 161 | out_le32(&pmac_irq_hw[i]->ack, bit); |
162 | __set_bit(src, ppc_cached_irq_mask); | 162 | __set_bit(src, ppc_cached_irq_mask); |
163 | __pmac_set_irq_mask(src, 0); | 163 | __pmac_set_irq_mask(src, 0); |
@@ -289,7 +289,6 @@ static int pmac_pic_host_match(struct irq_host *h, struct device_node *node) | |||
289 | static int pmac_pic_host_map(struct irq_host *h, unsigned int virq, | 289 | static int pmac_pic_host_map(struct irq_host *h, unsigned int virq, |
290 | irq_hw_number_t hw) | 290 | irq_hw_number_t hw) |
291 | { | 291 | { |
292 | struct irq_desc *desc = irq_to_desc(virq); | ||
293 | int level; | 292 | int level; |
294 | 293 | ||
295 | if (hw >= max_irqs) | 294 | if (hw >= max_irqs) |
@@ -300,9 +299,9 @@ static int pmac_pic_host_map(struct irq_host *h, unsigned int virq, | |||
300 | */ | 299 | */ |
301 | level = !!(level_mask[hw >> 5] & (1UL << (hw & 0x1f))); | 300 | level = !!(level_mask[hw >> 5] & (1UL << (hw & 0x1f))); |
302 | if (level) | 301 | if (level) |
303 | desc->status |= IRQ_LEVEL; | 302 | irq_set_status_flags(virq, IRQ_LEVEL); |
304 | set_irq_chip_and_handler(virq, &pmac_pic, level ? | 303 | irq_set_chip_and_handler(virq, &pmac_pic, |
305 | handle_level_irq : handle_edge_irq); | 304 | level ? handle_level_irq : handle_edge_irq); |
306 | return 0; | 305 | return 0; |
307 | } | 306 | } |
308 | 307 | ||
@@ -472,8 +471,8 @@ int of_irq_map_oldworld(struct device_node *device, int index, | |||
472 | 471 | ||
473 | static void pmac_u3_cascade(unsigned int irq, struct irq_desc *desc) | 472 | static void pmac_u3_cascade(unsigned int irq, struct irq_desc *desc) |
474 | { | 473 | { |
475 | struct irq_chip *chip = get_irq_desc_chip(desc); | 474 | struct irq_chip *chip = irq_desc_get_chip(desc); |
476 | struct mpic *mpic = get_irq_desc_data(desc); | 475 | struct mpic *mpic = irq_desc_get_handler_data(desc); |
477 | unsigned int cascade_irq = mpic_get_one_irq(mpic); | 476 | unsigned int cascade_irq = mpic_get_one_irq(mpic); |
478 | 477 | ||
479 | if (cascade_irq != NO_IRQ) | 478 | if (cascade_irq != NO_IRQ) |
@@ -591,8 +590,8 @@ static int __init pmac_pic_probe_mpic(void) | |||
591 | of_node_put(slave); | 590 | of_node_put(slave); |
592 | return 0; | 591 | return 0; |
593 | } | 592 | } |
594 | set_irq_data(cascade, mpic2); | 593 | irq_set_handler_data(cascade, mpic2); |
595 | set_irq_chained_handler(cascade, pmac_u3_cascade); | 594 | irq_set_chained_handler(cascade, pmac_u3_cascade); |
596 | 595 | ||
597 | of_node_put(slave); | 596 | of_node_put(slave); |
598 | return 0; | 597 | return 0; |
diff --git a/arch/powerpc/platforms/ps3/interrupt.c b/arch/powerpc/platforms/ps3/interrupt.c index 3988c86682a5..f2f6413b81d3 100644 --- a/arch/powerpc/platforms/ps3/interrupt.c +++ b/arch/powerpc/platforms/ps3/interrupt.c | |||
@@ -194,7 +194,7 @@ static int ps3_virq_setup(enum ps3_cpu_binding cpu, unsigned long outlet, | |||
194 | pr_debug("%s:%d: outlet %lu => cpu %u, virq %u\n", __func__, __LINE__, | 194 | pr_debug("%s:%d: outlet %lu => cpu %u, virq %u\n", __func__, __LINE__, |
195 | outlet, cpu, *virq); | 195 | outlet, cpu, *virq); |
196 | 196 | ||
197 | result = set_irq_chip_data(*virq, pd); | 197 | result = irq_set_chip_data(*virq, pd); |
198 | 198 | ||
199 | if (result) { | 199 | if (result) { |
200 | pr_debug("%s:%d: set_irq_chip_data failed\n", | 200 | pr_debug("%s:%d: set_irq_chip_data failed\n", |
@@ -221,12 +221,12 @@ fail_create: | |||
221 | 221 | ||
222 | static int ps3_virq_destroy(unsigned int virq) | 222 | static int ps3_virq_destroy(unsigned int virq) |
223 | { | 223 | { |
224 | const struct ps3_private *pd = get_irq_chip_data(virq); | 224 | const struct ps3_private *pd = irq_get_chip_data(virq); |
225 | 225 | ||
226 | pr_debug("%s:%d: ppe_id %llu, thread_id %llu, virq %u\n", __func__, | 226 | pr_debug("%s:%d: ppe_id %llu, thread_id %llu, virq %u\n", __func__, |
227 | __LINE__, pd->ppe_id, pd->thread_id, virq); | 227 | __LINE__, pd->ppe_id, pd->thread_id, virq); |
228 | 228 | ||
229 | set_irq_chip_data(virq, NULL); | 229 | irq_set_chip_data(virq, NULL); |
230 | irq_dispose_mapping(virq); | 230 | irq_dispose_mapping(virq); |
231 | 231 | ||
232 | pr_debug("%s:%d <-\n", __func__, __LINE__); | 232 | pr_debug("%s:%d <-\n", __func__, __LINE__); |
@@ -256,7 +256,7 @@ int ps3_irq_plug_setup(enum ps3_cpu_binding cpu, unsigned long outlet, | |||
256 | goto fail_setup; | 256 | goto fail_setup; |
257 | } | 257 | } |
258 | 258 | ||
259 | pd = get_irq_chip_data(*virq); | 259 | pd = irq_get_chip_data(*virq); |
260 | 260 | ||
261 | /* Binds outlet to cpu + virq. */ | 261 | /* Binds outlet to cpu + virq. */ |
262 | 262 | ||
@@ -291,7 +291,7 @@ EXPORT_SYMBOL_GPL(ps3_irq_plug_setup); | |||
291 | int ps3_irq_plug_destroy(unsigned int virq) | 291 | int ps3_irq_plug_destroy(unsigned int virq) |
292 | { | 292 | { |
293 | int result; | 293 | int result; |
294 | const struct ps3_private *pd = get_irq_chip_data(virq); | 294 | const struct ps3_private *pd = irq_get_chip_data(virq); |
295 | 295 | ||
296 | pr_debug("%s:%d: ppe_id %llu, thread_id %llu, virq %u\n", __func__, | 296 | pr_debug("%s:%d: ppe_id %llu, thread_id %llu, virq %u\n", __func__, |
297 | __LINE__, pd->ppe_id, pd->thread_id, virq); | 297 | __LINE__, pd->ppe_id, pd->thread_id, virq); |
@@ -661,7 +661,7 @@ static void dump_bmp(struct ps3_private* pd) {}; | |||
661 | 661 | ||
662 | static void ps3_host_unmap(struct irq_host *h, unsigned int virq) | 662 | static void ps3_host_unmap(struct irq_host *h, unsigned int virq) |
663 | { | 663 | { |
664 | set_irq_chip_data(virq, NULL); | 664 | irq_set_chip_data(virq, NULL); |
665 | } | 665 | } |
666 | 666 | ||
667 | static int ps3_host_map(struct irq_host *h, unsigned int virq, | 667 | static int ps3_host_map(struct irq_host *h, unsigned int virq, |
@@ -670,7 +670,7 @@ static int ps3_host_map(struct irq_host *h, unsigned int virq, | |||
670 | pr_debug("%s:%d: hwirq %lu, virq %u\n", __func__, __LINE__, hwirq, | 670 | pr_debug("%s:%d: hwirq %lu, virq %u\n", __func__, __LINE__, hwirq, |
671 | virq); | 671 | virq); |
672 | 672 | ||
673 | set_irq_chip_and_handler(virq, &ps3_irq_chip, handle_fasteoi_irq); | 673 | irq_set_chip_and_handler(virq, &ps3_irq_chip, handle_fasteoi_irq); |
674 | 674 | ||
675 | return 0; | 675 | return 0; |
676 | } | 676 | } |
diff --git a/arch/powerpc/platforms/pseries/msi.c b/arch/powerpc/platforms/pseries/msi.c index 18ac801f8e90..38d24e7e7bb1 100644 --- a/arch/powerpc/platforms/pseries/msi.c +++ b/arch/powerpc/platforms/pseries/msi.c | |||
@@ -137,7 +137,7 @@ static void rtas_teardown_msi_irqs(struct pci_dev *pdev) | |||
137 | if (entry->irq == NO_IRQ) | 137 | if (entry->irq == NO_IRQ) |
138 | continue; | 138 | continue; |
139 | 139 | ||
140 | set_irq_msi(entry->irq, NULL); | 140 | irq_set_msi_desc(entry->irq, NULL); |
141 | irq_dispose_mapping(entry->irq); | 141 | irq_dispose_mapping(entry->irq); |
142 | } | 142 | } |
143 | 143 | ||
@@ -437,7 +437,7 @@ static int rtas_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type) | |||
437 | } | 437 | } |
438 | 438 | ||
439 | dev_dbg(&pdev->dev, "rtas_msi: allocated virq %d\n", virq); | 439 | dev_dbg(&pdev->dev, "rtas_msi: allocated virq %d\n", virq); |
440 | set_irq_msi(virq, entry); | 440 | irq_set_msi_desc(virq, entry); |
441 | 441 | ||
442 | /* Read config space back so we can restore after reset */ | 442 | /* Read config space back so we can restore after reset */ |
443 | read_msi_msg(virq, &msg); | 443 | read_msi_msg(virq, &msg); |
diff --git a/arch/powerpc/platforms/pseries/setup.c b/arch/powerpc/platforms/pseries/setup.c index 2a0089a2c829..c319d04aa799 100644 --- a/arch/powerpc/platforms/pseries/setup.c +++ b/arch/powerpc/platforms/pseries/setup.c | |||
@@ -114,7 +114,7 @@ static void __init fwnmi_init(void) | |||
114 | 114 | ||
115 | static void pseries_8259_cascade(unsigned int irq, struct irq_desc *desc) | 115 | static void pseries_8259_cascade(unsigned int irq, struct irq_desc *desc) |
116 | { | 116 | { |
117 | struct irq_chip *chip = get_irq_desc_chip(desc); | 117 | struct irq_chip *chip = irq_desc_get_chip(desc); |
118 | unsigned int cascade_irq = i8259_irq(); | 118 | unsigned int cascade_irq = i8259_irq(); |
119 | 119 | ||
120 | if (cascade_irq != NO_IRQ) | 120 | if (cascade_irq != NO_IRQ) |
@@ -169,7 +169,7 @@ static void __init pseries_setup_i8259_cascade(void) | |||
169 | printk(KERN_DEBUG "pic: PCI 8259 intack at 0x%016lx\n", intack); | 169 | printk(KERN_DEBUG "pic: PCI 8259 intack at 0x%016lx\n", intack); |
170 | i8259_init(found, intack); | 170 | i8259_init(found, intack); |
171 | of_node_put(found); | 171 | of_node_put(found); |
172 | set_irq_chained_handler(cascade, pseries_8259_cascade); | 172 | irq_set_chained_handler(cascade, pseries_8259_cascade); |
173 | } | 173 | } |
174 | 174 | ||
175 | static void __init pseries_mpic_init_IRQ(void) | 175 | static void __init pseries_mpic_init_IRQ(void) |
diff --git a/arch/powerpc/platforms/pseries/xics.c b/arch/powerpc/platforms/pseries/xics.c index 01fea46c0335..6c1e638f0ce9 100644 --- a/arch/powerpc/platforms/pseries/xics.c +++ b/arch/powerpc/platforms/pseries/xics.c | |||
@@ -470,8 +470,8 @@ static int xics_host_map(struct irq_host *h, unsigned int virq, | |||
470 | /* Insert the interrupt mapping into the radix tree for fast lookup */ | 470 | /* Insert the interrupt mapping into the radix tree for fast lookup */ |
471 | irq_radix_revmap_insert(xics_host, virq, hw); | 471 | irq_radix_revmap_insert(xics_host, virq, hw); |
472 | 472 | ||
473 | irq_to_desc(virq)->status |= IRQ_LEVEL; | 473 | irq_set_status_flags(virq, IRQ_LEVEL); |
474 | set_irq_chip_and_handler(virq, xics_irq_chip, handle_fasteoi_irq); | 474 | irq_set_chip_and_handler(virq, xics_irq_chip, handle_fasteoi_irq); |
475 | return 0; | 475 | return 0; |
476 | } | 476 | } |
477 | 477 | ||
@@ -600,7 +600,7 @@ static void xics_request_ipi(void) | |||
600 | * IPIs are marked IRQF_DISABLED as they must run with irqs | 600 | * IPIs are marked IRQF_DISABLED as they must run with irqs |
601 | * disabled | 601 | * disabled |
602 | */ | 602 | */ |
603 | set_irq_handler(ipi, handle_percpu_irq); | 603 | irq_set_handler(ipi, handle_percpu_irq); |
604 | if (firmware_has_feature(FW_FEATURE_LPAR)) | 604 | if (firmware_has_feature(FW_FEATURE_LPAR)) |
605 | rc = request_irq(ipi, xics_ipi_action_lpar, | 605 | rc = request_irq(ipi, xics_ipi_action_lpar, |
606 | IRQF_DISABLED|IRQF_PERCPU, "IPI", NULL); | 606 | IRQF_DISABLED|IRQF_PERCPU, "IPI", NULL); |
@@ -912,7 +912,7 @@ void xics_migrate_irqs_away(void) | |||
912 | if (desc == NULL || desc->action == NULL) | 912 | if (desc == NULL || desc->action == NULL) |
913 | continue; | 913 | continue; |
914 | 914 | ||
915 | chip = get_irq_desc_chip(desc); | 915 | chip = irq_desc_get_chip(desc); |
916 | if (chip == NULL || chip->irq_set_affinity == NULL) | 916 | if (chip == NULL || chip->irq_set_affinity == NULL) |
917 | continue; | 917 | continue; |
918 | 918 | ||
diff --git a/arch/powerpc/sysdev/cpm1.c b/arch/powerpc/sysdev/cpm1.c index 0476bcc7c3e1..8b5aba263323 100644 --- a/arch/powerpc/sysdev/cpm1.c +++ b/arch/powerpc/sysdev/cpm1.c | |||
@@ -103,8 +103,8 @@ static int cpm_pic_host_map(struct irq_host *h, unsigned int virq, | |||
103 | { | 103 | { |
104 | pr_debug("cpm_pic_host_map(%d, 0x%lx)\n", virq, hw); | 104 | pr_debug("cpm_pic_host_map(%d, 0x%lx)\n", virq, hw); |
105 | 105 | ||
106 | irq_to_desc(virq)->status |= IRQ_LEVEL; | 106 | irq_set_status_flags(virq, IRQ_LEVEL); |
107 | set_irq_chip_and_handler(virq, &cpm_pic, handle_fasteoi_irq); | 107 | irq_set_chip_and_handler(virq, &cpm_pic, handle_fasteoi_irq); |
108 | return 0; | 108 | return 0; |
109 | } | 109 | } |
110 | 110 | ||
diff --git a/arch/powerpc/sysdev/cpm2_pic.c b/arch/powerpc/sysdev/cpm2_pic.c index 473032556715..5495c1be472b 100644 --- a/arch/powerpc/sysdev/cpm2_pic.c +++ b/arch/powerpc/sysdev/cpm2_pic.c | |||
@@ -115,32 +115,25 @@ static void cpm2_ack(struct irq_data *d) | |||
115 | 115 | ||
116 | static void cpm2_end_irq(struct irq_data *d) | 116 | static void cpm2_end_irq(struct irq_data *d) |
117 | { | 117 | { |
118 | struct irq_desc *desc; | ||
119 | int bit, word; | 118 | int bit, word; |
120 | unsigned int irq_nr = virq_to_hw(d->irq); | 119 | unsigned int irq_nr = virq_to_hw(d->irq); |
121 | 120 | ||
122 | desc = irq_to_desc(irq_nr); | 121 | bit = irq_to_siubit[irq_nr]; |
123 | if (!(desc->status & (IRQ_DISABLED|IRQ_INPROGRESS)) | 122 | word = irq_to_siureg[irq_nr]; |
124 | && desc->action) { | ||
125 | |||
126 | bit = irq_to_siubit[irq_nr]; | ||
127 | word = irq_to_siureg[irq_nr]; | ||
128 | 123 | ||
129 | ppc_cached_irq_mask[word] |= 1 << bit; | 124 | ppc_cached_irq_mask[word] |= 1 << bit; |
130 | out_be32(&cpm2_intctl->ic_simrh + word, ppc_cached_irq_mask[word]); | 125 | out_be32(&cpm2_intctl->ic_simrh + word, ppc_cached_irq_mask[word]); |
131 | 126 | ||
132 | /* | 127 | /* |
133 | * Work around large numbers of spurious IRQs on PowerPC 82xx | 128 | * Work around large numbers of spurious IRQs on PowerPC 82xx |
134 | * systems. | 129 | * systems. |
135 | */ | 130 | */ |
136 | mb(); | 131 | mb(); |
137 | } | ||
138 | } | 132 | } |
139 | 133 | ||
140 | static int cpm2_set_irq_type(struct irq_data *d, unsigned int flow_type) | 134 | static int cpm2_set_irq_type(struct irq_data *d, unsigned int flow_type) |
141 | { | 135 | { |
142 | unsigned int src = virq_to_hw(d->irq); | 136 | unsigned int src = virq_to_hw(d->irq); |
143 | struct irq_desc *desc = irq_to_desc(d->irq); | ||
144 | unsigned int vold, vnew, edibit; | 137 | unsigned int vold, vnew, edibit; |
145 | 138 | ||
146 | /* Port C interrupts are either IRQ_TYPE_EDGE_FALLING or | 139 | /* Port C interrupts are either IRQ_TYPE_EDGE_FALLING or |
@@ -162,13 +155,11 @@ static int cpm2_set_irq_type(struct irq_data *d, unsigned int flow_type) | |||
162 | goto err_sense; | 155 | goto err_sense; |
163 | } | 156 | } |
164 | 157 | ||
165 | desc->status &= ~(IRQ_TYPE_SENSE_MASK | IRQ_LEVEL); | 158 | irqd_set_trigger_type(d, flow_type); |
166 | desc->status |= flow_type & IRQ_TYPE_SENSE_MASK; | 159 | if (flow_type & IRQ_TYPE_LEVEL_LOW) |
167 | if (flow_type & IRQ_TYPE_LEVEL_LOW) { | 160 | __irq_set_handler_locked(d->irq, handle_level_irq); |
168 | desc->status |= IRQ_LEVEL; | 161 | else |
169 | desc->handle_irq = handle_level_irq; | 162 | __irq_set_handler_locked(d->irq, handle_edge_irq); |
170 | } else | ||
171 | desc->handle_irq = handle_edge_irq; | ||
172 | 163 | ||
173 | /* internal IRQ senses are LEVEL_LOW | 164 | /* internal IRQ senses are LEVEL_LOW |
174 | * EXT IRQ and Port C IRQ senses are programmable | 165 | * EXT IRQ and Port C IRQ senses are programmable |
@@ -179,7 +170,8 @@ static int cpm2_set_irq_type(struct irq_data *d, unsigned int flow_type) | |||
179 | if (src >= CPM2_IRQ_PORTC15 && src <= CPM2_IRQ_PORTC0) | 170 | if (src >= CPM2_IRQ_PORTC15 && src <= CPM2_IRQ_PORTC0) |
180 | edibit = (31 - (CPM2_IRQ_PORTC0 - src)); | 171 | edibit = (31 - (CPM2_IRQ_PORTC0 - src)); |
181 | else | 172 | else |
182 | return (flow_type & IRQ_TYPE_LEVEL_LOW) ? 0 : -EINVAL; | 173 | return (flow_type & IRQ_TYPE_LEVEL_LOW) ? |
174 | IRQ_SET_MASK_OK_NOCOPY : -EINVAL; | ||
183 | 175 | ||
184 | vold = in_be32(&cpm2_intctl->ic_siexr); | 176 | vold = in_be32(&cpm2_intctl->ic_siexr); |
185 | 177 | ||
@@ -190,7 +182,7 @@ static int cpm2_set_irq_type(struct irq_data *d, unsigned int flow_type) | |||
190 | 182 | ||
191 | if (vold != vnew) | 183 | if (vold != vnew) |
192 | out_be32(&cpm2_intctl->ic_siexr, vnew); | 184 | out_be32(&cpm2_intctl->ic_siexr, vnew); |
193 | return 0; | 185 | return IRQ_SET_MASK_OK_NOCOPY; |
194 | 186 | ||
195 | err_sense: | 187 | err_sense: |
196 | pr_err("CPM2 PIC: sense type 0x%x not supported\n", flow_type); | 188 | pr_err("CPM2 PIC: sense type 0x%x not supported\n", flow_type); |
@@ -204,6 +196,7 @@ static struct irq_chip cpm2_pic = { | |||
204 | .irq_ack = cpm2_ack, | 196 | .irq_ack = cpm2_ack, |
205 | .irq_eoi = cpm2_end_irq, | 197 | .irq_eoi = cpm2_end_irq, |
206 | .irq_set_type = cpm2_set_irq_type, | 198 | .irq_set_type = cpm2_set_irq_type, |
199 | .flags = IRQCHIP_EOI_IF_HANDLED, | ||
207 | }; | 200 | }; |
208 | 201 | ||
209 | unsigned int cpm2_get_irq(void) | 202 | unsigned int cpm2_get_irq(void) |
@@ -226,8 +219,8 @@ static int cpm2_pic_host_map(struct irq_host *h, unsigned int virq, | |||
226 | { | 219 | { |
227 | pr_debug("cpm2_pic_host_map(%d, 0x%lx)\n", virq, hw); | 220 | pr_debug("cpm2_pic_host_map(%d, 0x%lx)\n", virq, hw); |
228 | 221 | ||
229 | irq_to_desc(virq)->status |= IRQ_LEVEL; | 222 | irq_set_status_flags(virq, IRQ_LEVEL); |
230 | set_irq_chip_and_handler(virq, &cpm2_pic, handle_level_irq); | 223 | irq_set_chip_and_handler(virq, &cpm2_pic, handle_level_irq); |
231 | return 0; | 224 | return 0; |
232 | } | 225 | } |
233 | 226 | ||
diff --git a/arch/powerpc/sysdev/fsl_msi.c b/arch/powerpc/sysdev/fsl_msi.c index 58e09b2833f2..d5679dc1e20f 100644 --- a/arch/powerpc/sysdev/fsl_msi.c +++ b/arch/powerpc/sysdev/fsl_msi.c | |||
@@ -64,10 +64,10 @@ static int fsl_msi_host_map(struct irq_host *h, unsigned int virq, | |||
64 | struct fsl_msi *msi_data = h->host_data; | 64 | struct fsl_msi *msi_data = h->host_data; |
65 | struct irq_chip *chip = &fsl_msi_chip; | 65 | struct irq_chip *chip = &fsl_msi_chip; |
66 | 66 | ||
67 | irq_to_desc(virq)->status |= IRQ_TYPE_EDGE_FALLING; | 67 | irq_set_status_flags(virq, IRQ_TYPE_EDGE_FALLING); |
68 | 68 | ||
69 | set_irq_chip_data(virq, msi_data); | 69 | irq_set_chip_data(virq, msi_data); |
70 | set_irq_chip_and_handler(virq, chip, handle_edge_irq); | 70 | irq_set_chip_and_handler(virq, chip, handle_edge_irq); |
71 | 71 | ||
72 | return 0; | 72 | return 0; |
73 | } | 73 | } |
@@ -110,8 +110,8 @@ static void fsl_teardown_msi_irqs(struct pci_dev *pdev) | |||
110 | list_for_each_entry(entry, &pdev->msi_list, list) { | 110 | list_for_each_entry(entry, &pdev->msi_list, list) { |
111 | if (entry->irq == NO_IRQ) | 111 | if (entry->irq == NO_IRQ) |
112 | continue; | 112 | continue; |
113 | msi_data = get_irq_data(entry->irq); | 113 | msi_data = irq_get_handler_data(entry->irq); |
114 | set_irq_msi(entry->irq, NULL); | 114 | irq_set_msi_desc(entry->irq, NULL); |
115 | msi_bitmap_free_hwirqs(&msi_data->bitmap, | 115 | msi_bitmap_free_hwirqs(&msi_data->bitmap, |
116 | virq_to_hw(entry->irq), 1); | 116 | virq_to_hw(entry->irq), 1); |
117 | irq_dispose_mapping(entry->irq); | 117 | irq_dispose_mapping(entry->irq); |
@@ -168,8 +168,8 @@ static int fsl_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type) | |||
168 | rc = -ENOSPC; | 168 | rc = -ENOSPC; |
169 | goto out_free; | 169 | goto out_free; |
170 | } | 170 | } |
171 | set_irq_data(virq, msi_data); | 171 | irq_set_handler_data(virq, msi_data); |
172 | set_irq_msi(virq, entry); | 172 | irq_set_msi_desc(virq, entry); |
173 | 173 | ||
174 | fsl_compose_msi_msg(pdev, hwirq, &msg, msi_data); | 174 | fsl_compose_msi_msg(pdev, hwirq, &msg, msi_data); |
175 | write_msi_msg(virq, &msg); | 175 | write_msi_msg(virq, &msg); |
@@ -183,7 +183,8 @@ out_free: | |||
183 | 183 | ||
184 | static void fsl_msi_cascade(unsigned int irq, struct irq_desc *desc) | 184 | static void fsl_msi_cascade(unsigned int irq, struct irq_desc *desc) |
185 | { | 185 | { |
186 | struct irq_chip *chip = get_irq_desc_chip(desc); | 186 | struct irq_chip *chip = irq_desc_get_chip(desc); |
187 | struct irq_data *idata = irq_desc_get_irq_data(desc); | ||
187 | unsigned int cascade_irq; | 188 | unsigned int cascade_irq; |
188 | struct fsl_msi *msi_data; | 189 | struct fsl_msi *msi_data; |
189 | int msir_index = -1; | 190 | int msir_index = -1; |
@@ -192,20 +193,20 @@ static void fsl_msi_cascade(unsigned int irq, struct irq_desc *desc) | |||
192 | u32 have_shift = 0; | 193 | u32 have_shift = 0; |
193 | struct fsl_msi_cascade_data *cascade_data; | 194 | struct fsl_msi_cascade_data *cascade_data; |
194 | 195 | ||
195 | cascade_data = (struct fsl_msi_cascade_data *)get_irq_data(irq); | 196 | cascade_data = (struct fsl_msi_cascade_data *)irq_get_handler_data(irq); |
196 | msi_data = cascade_data->msi_data; | 197 | msi_data = cascade_data->msi_data; |
197 | 198 | ||
198 | raw_spin_lock(&desc->lock); | 199 | raw_spin_lock(&desc->lock); |
199 | if ((msi_data->feature & FSL_PIC_IP_MASK) == FSL_PIC_IP_IPIC) { | 200 | if ((msi_data->feature & FSL_PIC_IP_MASK) == FSL_PIC_IP_IPIC) { |
200 | if (chip->irq_mask_ack) | 201 | if (chip->irq_mask_ack) |
201 | chip->irq_mask_ack(&desc->irq_data); | 202 | chip->irq_mask_ack(idata); |
202 | else { | 203 | else { |
203 | chip->irq_mask(&desc->irq_data); | 204 | chip->irq_mask(idata); |
204 | chip->irq_ack(&desc->irq_data); | 205 | chip->irq_ack(idata); |
205 | } | 206 | } |
206 | } | 207 | } |
207 | 208 | ||
208 | if (unlikely(desc->status & IRQ_INPROGRESS)) | 209 | if (unlikely(irqd_irq_inprogress(idata))) |
209 | goto unlock; | 210 | goto unlock; |
210 | 211 | ||
211 | msir_index = cascade_data->index; | 212 | msir_index = cascade_data->index; |
@@ -213,7 +214,7 @@ static void fsl_msi_cascade(unsigned int irq, struct irq_desc *desc) | |||
213 | if (msir_index >= NR_MSI_REG) | 214 | if (msir_index >= NR_MSI_REG) |
214 | cascade_irq = NO_IRQ; | 215 | cascade_irq = NO_IRQ; |
215 | 216 | ||
216 | desc->status |= IRQ_INPROGRESS; | 217 | irqd_set_chained_irq_inprogress(idata); |
217 | switch (msi_data->feature & FSL_PIC_IP_MASK) { | 218 | switch (msi_data->feature & FSL_PIC_IP_MASK) { |
218 | case FSL_PIC_IP_MPIC: | 219 | case FSL_PIC_IP_MPIC: |
219 | msir_value = fsl_msi_read(msi_data->msi_regs, | 220 | msir_value = fsl_msi_read(msi_data->msi_regs, |
@@ -235,15 +236,15 @@ static void fsl_msi_cascade(unsigned int irq, struct irq_desc *desc) | |||
235 | have_shift += intr_index + 1; | 236 | have_shift += intr_index + 1; |
236 | msir_value = msir_value >> (intr_index + 1); | 237 | msir_value = msir_value >> (intr_index + 1); |
237 | } | 238 | } |
238 | desc->status &= ~IRQ_INPROGRESS; | 239 | irqd_clr_chained_irq_inprogress(idata); |
239 | 240 | ||
240 | switch (msi_data->feature & FSL_PIC_IP_MASK) { | 241 | switch (msi_data->feature & FSL_PIC_IP_MASK) { |
241 | case FSL_PIC_IP_MPIC: | 242 | case FSL_PIC_IP_MPIC: |
242 | chip->irq_eoi(&desc->irq_data); | 243 | chip->irq_eoi(idata); |
243 | break; | 244 | break; |
244 | case FSL_PIC_IP_IPIC: | 245 | case FSL_PIC_IP_IPIC: |
245 | if (!(desc->status & IRQ_DISABLED) && chip->irq_unmask) | 246 | if (!irqd_irq_disabled(idata) && chip->irq_unmask) |
246 | chip->irq_unmask(&desc->irq_data); | 247 | chip->irq_unmask(idata); |
247 | break; | 248 | break; |
248 | } | 249 | } |
249 | unlock: | 250 | unlock: |
@@ -261,7 +262,7 @@ static int fsl_of_msi_remove(struct platform_device *ofdev) | |||
261 | for (i = 0; i < NR_MSI_REG; i++) { | 262 | for (i = 0; i < NR_MSI_REG; i++) { |
262 | virq = msi->msi_virqs[i]; | 263 | virq = msi->msi_virqs[i]; |
263 | if (virq != NO_IRQ) { | 264 | if (virq != NO_IRQ) { |
264 | cascade_data = get_irq_data(virq); | 265 | cascade_data = irq_get_handler_data(virq); |
265 | kfree(cascade_data); | 266 | kfree(cascade_data); |
266 | irq_dispose_mapping(virq); | 267 | irq_dispose_mapping(virq); |
267 | } | 268 | } |
@@ -297,8 +298,8 @@ static int __devinit fsl_msi_setup_hwirq(struct fsl_msi *msi, | |||
297 | msi->msi_virqs[irq_index] = virt_msir; | 298 | msi->msi_virqs[irq_index] = virt_msir; |
298 | cascade_data->index = offset + irq_index; | 299 | cascade_data->index = offset + irq_index; |
299 | cascade_data->msi_data = msi; | 300 | cascade_data->msi_data = msi; |
300 | set_irq_data(virt_msir, cascade_data); | 301 | irq_set_handler_data(virt_msir, cascade_data); |
301 | set_irq_chained_handler(virt_msir, fsl_msi_cascade); | 302 | irq_set_chained_handler(virt_msir, fsl_msi_cascade); |
302 | 303 | ||
303 | return 0; | 304 | return 0; |
304 | } | 305 | } |
diff --git a/arch/powerpc/sysdev/i8259.c b/arch/powerpc/sysdev/i8259.c index aeda4c8d0a0a..142770cb84b6 100644 --- a/arch/powerpc/sysdev/i8259.c +++ b/arch/powerpc/sysdev/i8259.c | |||
@@ -175,13 +175,13 @@ static int i8259_host_map(struct irq_host *h, unsigned int virq, | |||
175 | 175 | ||
176 | /* We block the internal cascade */ | 176 | /* We block the internal cascade */ |
177 | if (hw == 2) | 177 | if (hw == 2) |
178 | irq_to_desc(virq)->status |= IRQ_NOREQUEST; | 178 | irq_set_status_flags(virq, IRQ_NOREQUEST); |
179 | 179 | ||
180 | /* We use the level handler only for now, we might want to | 180 | /* We use the level handler only for now, we might want to |
181 | * be more cautious here but that works for now | 181 | * be more cautious here but that works for now |
182 | */ | 182 | */ |
183 | irq_to_desc(virq)->status |= IRQ_LEVEL; | 183 | irq_set_status_flags(virq, IRQ_LEVEL); |
184 | set_irq_chip_and_handler(virq, &i8259_pic, handle_level_irq); | 184 | irq_set_chip_and_handler(virq, &i8259_pic, handle_level_irq); |
185 | return 0; | 185 | return 0; |
186 | } | 186 | } |
187 | 187 | ||
@@ -191,7 +191,7 @@ static void i8259_host_unmap(struct irq_host *h, unsigned int virq) | |||
191 | i8259_mask_irq(irq_get_irq_data(virq)); | 191 | i8259_mask_irq(irq_get_irq_data(virq)); |
192 | 192 | ||
193 | /* remove chip and handler */ | 193 | /* remove chip and handler */ |
194 | set_irq_chip_and_handler(virq, NULL, NULL); | 194 | irq_set_chip_and_handler(virq, NULL, NULL); |
195 | 195 | ||
196 | /* Make sure it's completed */ | 196 | /* Make sure it's completed */ |
197 | synchronize_irq(virq); | 197 | synchronize_irq(virq); |
diff --git a/arch/powerpc/sysdev/ipic.c b/arch/powerpc/sysdev/ipic.c index 497047dc986e..fa438be962b7 100644 --- a/arch/powerpc/sysdev/ipic.c +++ b/arch/powerpc/sysdev/ipic.c | |||
@@ -605,7 +605,6 @@ static int ipic_set_irq_type(struct irq_data *d, unsigned int flow_type) | |||
605 | { | 605 | { |
606 | struct ipic *ipic = ipic_from_irq(d->irq); | 606 | struct ipic *ipic = ipic_from_irq(d->irq); |
607 | unsigned int src = ipic_irq_to_hw(d->irq); | 607 | unsigned int src = ipic_irq_to_hw(d->irq); |
608 | struct irq_desc *desc = irq_to_desc(d->irq); | ||
609 | unsigned int vold, vnew, edibit; | 608 | unsigned int vold, vnew, edibit; |
610 | 609 | ||
611 | if (flow_type == IRQ_TYPE_NONE) | 610 | if (flow_type == IRQ_TYPE_NONE) |
@@ -623,17 +622,16 @@ static int ipic_set_irq_type(struct irq_data *d, unsigned int flow_type) | |||
623 | printk(KERN_ERR "ipic: edge sense not supported on internal " | 622 | printk(KERN_ERR "ipic: edge sense not supported on internal " |
624 | "interrupts\n"); | 623 | "interrupts\n"); |
625 | return -EINVAL; | 624 | return -EINVAL; |
625 | |||
626 | } | 626 | } |
627 | 627 | ||
628 | desc->status &= ~(IRQ_TYPE_SENSE_MASK | IRQ_LEVEL); | 628 | irqd_set_trigger_type(d, flow_type); |
629 | desc->status |= flow_type & IRQ_TYPE_SENSE_MASK; | ||
630 | if (flow_type & IRQ_TYPE_LEVEL_LOW) { | 629 | if (flow_type & IRQ_TYPE_LEVEL_LOW) { |
631 | desc->status |= IRQ_LEVEL; | 630 | __irq_set_handler_locked(d->irq, handle_level_irq); |
632 | desc->handle_irq = handle_level_irq; | 631 | d->chip = &ipic_level_irq_chip; |
633 | desc->irq_data.chip = &ipic_level_irq_chip; | ||
634 | } else { | 632 | } else { |
635 | desc->handle_irq = handle_edge_irq; | 633 | __irq_set_handler_locked(d->irq, handle_edge_irq); |
636 | desc->irq_data.chip = &ipic_edge_irq_chip; | 634 | d->chip = &ipic_edge_irq_chip; |
637 | } | 635 | } |
638 | 636 | ||
639 | /* only EXT IRQ senses are programmable on ipic | 637 | /* only EXT IRQ senses are programmable on ipic |
@@ -655,7 +653,7 @@ static int ipic_set_irq_type(struct irq_data *d, unsigned int flow_type) | |||
655 | } | 653 | } |
656 | if (vold != vnew) | 654 | if (vold != vnew) |
657 | ipic_write(ipic->regs, IPIC_SECNR, vnew); | 655 | ipic_write(ipic->regs, IPIC_SECNR, vnew); |
658 | return 0; | 656 | return IRQ_SET_MASK_OK_NOCOPY; |
659 | } | 657 | } |
660 | 658 | ||
661 | /* level interrupts and edge interrupts have different ack operations */ | 659 | /* level interrupts and edge interrupts have different ack operations */ |
@@ -687,11 +685,11 @@ static int ipic_host_map(struct irq_host *h, unsigned int virq, | |||
687 | { | 685 | { |
688 | struct ipic *ipic = h->host_data; | 686 | struct ipic *ipic = h->host_data; |
689 | 687 | ||
690 | set_irq_chip_data(virq, ipic); | 688 | irq_set_chip_data(virq, ipic); |
691 | set_irq_chip_and_handler(virq, &ipic_level_irq_chip, handle_level_irq); | 689 | irq_set_chip_and_handler(virq, &ipic_level_irq_chip, handle_level_irq); |
692 | 690 | ||
693 | /* Set default irq type */ | 691 | /* Set default irq type */ |
694 | set_irq_type(virq, IRQ_TYPE_NONE); | 692 | irq_set_irq_type(virq, IRQ_TYPE_NONE); |
695 | 693 | ||
696 | return 0; | 694 | return 0; |
697 | } | 695 | } |
diff --git a/arch/powerpc/sysdev/mpc8xx_pic.c b/arch/powerpc/sysdev/mpc8xx_pic.c index 1a75a7fb4a99..f550e23632f8 100644 --- a/arch/powerpc/sysdev/mpc8xx_pic.c +++ b/arch/powerpc/sysdev/mpc8xx_pic.c | |||
@@ -72,13 +72,6 @@ static void mpc8xx_end_irq(struct irq_data *d) | |||
72 | 72 | ||
73 | static int mpc8xx_set_irq_type(struct irq_data *d, unsigned int flow_type) | 73 | static int mpc8xx_set_irq_type(struct irq_data *d, unsigned int flow_type) |
74 | { | 74 | { |
75 | struct irq_desc *desc = irq_to_desc(d->irq); | ||
76 | |||
77 | desc->status &= ~(IRQ_TYPE_SENSE_MASK | IRQ_LEVEL); | ||
78 | desc->status |= flow_type & IRQ_TYPE_SENSE_MASK; | ||
79 | if (flow_type & (IRQ_TYPE_LEVEL_HIGH | IRQ_TYPE_LEVEL_LOW)) | ||
80 | desc->status |= IRQ_LEVEL; | ||
81 | |||
82 | if (flow_type & IRQ_TYPE_EDGE_FALLING) { | 75 | if (flow_type & IRQ_TYPE_EDGE_FALLING) { |
83 | irq_hw_number_t hw = (unsigned int)irq_map[d->irq].hwirq; | 76 | irq_hw_number_t hw = (unsigned int)irq_map[d->irq].hwirq; |
84 | unsigned int siel = in_be32(&siu_reg->sc_siel); | 77 | unsigned int siel = in_be32(&siu_reg->sc_siel); |
@@ -87,7 +80,7 @@ static int mpc8xx_set_irq_type(struct irq_data *d, unsigned int flow_type) | |||
87 | if ((hw & 1) == 0) { | 80 | if ((hw & 1) == 0) { |
88 | siel |= (0x80000000 >> hw); | 81 | siel |= (0x80000000 >> hw); |
89 | out_be32(&siu_reg->sc_siel, siel); | 82 | out_be32(&siu_reg->sc_siel, siel); |
90 | desc->handle_irq = handle_edge_irq; | 83 | __irq_set_handler_locked(irq, handle_edge_irq); |
91 | } | 84 | } |
92 | } | 85 | } |
93 | return 0; | 86 | return 0; |
@@ -124,7 +117,7 @@ static int mpc8xx_pic_host_map(struct irq_host *h, unsigned int virq, | |||
124 | pr_debug("mpc8xx_pic_host_map(%d, 0x%lx)\n", virq, hw); | 117 | pr_debug("mpc8xx_pic_host_map(%d, 0x%lx)\n", virq, hw); |
125 | 118 | ||
126 | /* Set default irq handle */ | 119 | /* Set default irq handle */ |
127 | set_irq_chip_and_handler(virq, &mpc8xx_pic, handle_level_irq); | 120 | irq_set_chip_and_handler(virq, &mpc8xx_pic, handle_level_irq); |
128 | return 0; | 121 | return 0; |
129 | } | 122 | } |
130 | 123 | ||
diff --git a/arch/powerpc/sysdev/mpc8xxx_gpio.c b/arch/powerpc/sysdev/mpc8xxx_gpio.c index 232e701245d7..0892a2841c2b 100644 --- a/arch/powerpc/sysdev/mpc8xxx_gpio.c +++ b/arch/powerpc/sysdev/mpc8xxx_gpio.c | |||
@@ -145,7 +145,7 @@ static int mpc8xxx_gpio_to_irq(struct gpio_chip *gc, unsigned offset) | |||
145 | 145 | ||
146 | static void mpc8xxx_gpio_irq_cascade(unsigned int irq, struct irq_desc *desc) | 146 | static void mpc8xxx_gpio_irq_cascade(unsigned int irq, struct irq_desc *desc) |
147 | { | 147 | { |
148 | struct mpc8xxx_gpio_chip *mpc8xxx_gc = get_irq_desc_data(desc); | 148 | struct mpc8xxx_gpio_chip *mpc8xxx_gc = irq_desc_get_handler_data(desc); |
149 | struct of_mm_gpio_chip *mm = &mpc8xxx_gc->mm_gc; | 149 | struct of_mm_gpio_chip *mm = &mpc8xxx_gc->mm_gc; |
150 | unsigned int mask; | 150 | unsigned int mask; |
151 | 151 | ||
@@ -278,9 +278,9 @@ static int mpc8xxx_gpio_irq_map(struct irq_host *h, unsigned int virq, | |||
278 | if (mpc8xxx_gc->of_dev_id_data) | 278 | if (mpc8xxx_gc->of_dev_id_data) |
279 | mpc8xxx_irq_chip.irq_set_type = mpc8xxx_gc->of_dev_id_data; | 279 | mpc8xxx_irq_chip.irq_set_type = mpc8xxx_gc->of_dev_id_data; |
280 | 280 | ||
281 | set_irq_chip_data(virq, h->host_data); | 281 | irq_set_chip_data(virq, h->host_data); |
282 | set_irq_chip_and_handler(virq, &mpc8xxx_irq_chip, handle_level_irq); | 282 | irq_set_chip_and_handler(virq, &mpc8xxx_irq_chip, handle_level_irq); |
283 | set_irq_type(virq, IRQ_TYPE_NONE); | 283 | irq_set_irq_type(virq, IRQ_TYPE_NONE); |
284 | 284 | ||
285 | return 0; | 285 | return 0; |
286 | } | 286 | } |
@@ -369,8 +369,8 @@ static void __init mpc8xxx_add_controller(struct device_node *np) | |||
369 | out_be32(mm_gc->regs + GPIO_IER, 0xffffffff); | 369 | out_be32(mm_gc->regs + GPIO_IER, 0xffffffff); |
370 | out_be32(mm_gc->regs + GPIO_IMR, 0); | 370 | out_be32(mm_gc->regs + GPIO_IMR, 0); |
371 | 371 | ||
372 | set_irq_data(hwirq, mpc8xxx_gc); | 372 | irq_set_handler_data(hwirq, mpc8xxx_gc); |
373 | set_irq_chained_handler(hwirq, mpc8xxx_gpio_irq_cascade); | 373 | irq_set_chained_handler(hwirq, mpc8xxx_gpio_irq_cascade); |
374 | 374 | ||
375 | skip_irq: | 375 | skip_irq: |
376 | return; | 376 | return; |
diff --git a/arch/powerpc/sysdev/mpic.c b/arch/powerpc/sysdev/mpic.c index 0f7c6718d261..f91c065bed5a 100644 --- a/arch/powerpc/sysdev/mpic.c +++ b/arch/powerpc/sysdev/mpic.c | |||
@@ -361,7 +361,7 @@ static inline void mpic_ht_end_irq(struct mpic *mpic, unsigned int source) | |||
361 | } | 361 | } |
362 | 362 | ||
363 | static void mpic_startup_ht_interrupt(struct mpic *mpic, unsigned int source, | 363 | static void mpic_startup_ht_interrupt(struct mpic *mpic, unsigned int source, |
364 | unsigned int irqflags) | 364 | bool level) |
365 | { | 365 | { |
366 | struct mpic_irq_fixup *fixup = &mpic->fixups[source]; | 366 | struct mpic_irq_fixup *fixup = &mpic->fixups[source]; |
367 | unsigned long flags; | 367 | unsigned long flags; |
@@ -370,14 +370,14 @@ static void mpic_startup_ht_interrupt(struct mpic *mpic, unsigned int source, | |||
370 | if (fixup->base == NULL) | 370 | if (fixup->base == NULL) |
371 | return; | 371 | return; |
372 | 372 | ||
373 | DBG("startup_ht_interrupt(0x%x, 0x%x) index: %d\n", | 373 | DBG("startup_ht_interrupt(0x%x) index: %d\n", |
374 | source, irqflags, fixup->index); | 374 | source, fixup->index); |
375 | raw_spin_lock_irqsave(&mpic->fixup_lock, flags); | 375 | raw_spin_lock_irqsave(&mpic->fixup_lock, flags); |
376 | /* Enable and configure */ | 376 | /* Enable and configure */ |
377 | writeb(0x10 + 2 * fixup->index, fixup->base + 2); | 377 | writeb(0x10 + 2 * fixup->index, fixup->base + 2); |
378 | tmp = readl(fixup->base + 4); | 378 | tmp = readl(fixup->base + 4); |
379 | tmp &= ~(0x23U); | 379 | tmp &= ~(0x23U); |
380 | if (irqflags & IRQ_LEVEL) | 380 | if (level) |
381 | tmp |= 0x22; | 381 | tmp |= 0x22; |
382 | writel(tmp, fixup->base + 4); | 382 | writel(tmp, fixup->base + 4); |
383 | raw_spin_unlock_irqrestore(&mpic->fixup_lock, flags); | 383 | raw_spin_unlock_irqrestore(&mpic->fixup_lock, flags); |
@@ -389,8 +389,7 @@ static void mpic_startup_ht_interrupt(struct mpic *mpic, unsigned int source, | |||
389 | #endif | 389 | #endif |
390 | } | 390 | } |
391 | 391 | ||
392 | static void mpic_shutdown_ht_interrupt(struct mpic *mpic, unsigned int source, | 392 | static void mpic_shutdown_ht_interrupt(struct mpic *mpic, unsigned int source) |
393 | unsigned int irqflags) | ||
394 | { | 393 | { |
395 | struct mpic_irq_fixup *fixup = &mpic->fixups[source]; | 394 | struct mpic_irq_fixup *fixup = &mpic->fixups[source]; |
396 | unsigned long flags; | 395 | unsigned long flags; |
@@ -399,7 +398,7 @@ static void mpic_shutdown_ht_interrupt(struct mpic *mpic, unsigned int source, | |||
399 | if (fixup->base == NULL) | 398 | if (fixup->base == NULL) |
400 | return; | 399 | return; |
401 | 400 | ||
402 | DBG("shutdown_ht_interrupt(0x%x, 0x%x)\n", source, irqflags); | 401 | DBG("shutdown_ht_interrupt(0x%x)\n", source); |
403 | 402 | ||
404 | /* Disable */ | 403 | /* Disable */ |
405 | raw_spin_lock_irqsave(&mpic->fixup_lock, flags); | 404 | raw_spin_lock_irqsave(&mpic->fixup_lock, flags); |
@@ -616,7 +615,7 @@ static struct mpic *mpic_find(unsigned int irq) | |||
616 | if (irq < NUM_ISA_INTERRUPTS) | 615 | if (irq < NUM_ISA_INTERRUPTS) |
617 | return NULL; | 616 | return NULL; |
618 | 617 | ||
619 | return get_irq_chip_data(irq); | 618 | return irq_get_chip_data(irq); |
620 | } | 619 | } |
621 | 620 | ||
622 | /* Determine if the linux irq is an IPI */ | 621 | /* Determine if the linux irq is an IPI */ |
@@ -650,7 +649,7 @@ static inline struct mpic * mpic_from_ipi(struct irq_data *d) | |||
650 | /* Get the mpic structure from the irq number */ | 649 | /* Get the mpic structure from the irq number */ |
651 | static inline struct mpic * mpic_from_irq(unsigned int irq) | 650 | static inline struct mpic * mpic_from_irq(unsigned int irq) |
652 | { | 651 | { |
653 | return get_irq_chip_data(irq); | 652 | return irq_get_chip_data(irq); |
654 | } | 653 | } |
655 | 654 | ||
656 | /* Get the mpic structure from the irq data */ | 655 | /* Get the mpic structure from the irq data */ |
@@ -738,7 +737,7 @@ static void mpic_unmask_ht_irq(struct irq_data *d) | |||
738 | 737 | ||
739 | mpic_unmask_irq(d); | 738 | mpic_unmask_irq(d); |
740 | 739 | ||
741 | if (irq_to_desc(d->irq)->status & IRQ_LEVEL) | 740 | if (irqd_is_level_type(d)) |
742 | mpic_ht_end_irq(mpic, src); | 741 | mpic_ht_end_irq(mpic, src); |
743 | } | 742 | } |
744 | 743 | ||
@@ -748,7 +747,7 @@ static unsigned int mpic_startup_ht_irq(struct irq_data *d) | |||
748 | unsigned int src = mpic_irq_to_hw(d->irq); | 747 | unsigned int src = mpic_irq_to_hw(d->irq); |
749 | 748 | ||
750 | mpic_unmask_irq(d); | 749 | mpic_unmask_irq(d); |
751 | mpic_startup_ht_interrupt(mpic, src, irq_to_desc(d->irq)->status); | 750 | mpic_startup_ht_interrupt(mpic, src, irqd_is_level_type(d)); |
752 | 751 | ||
753 | return 0; | 752 | return 0; |
754 | } | 753 | } |
@@ -758,7 +757,7 @@ static void mpic_shutdown_ht_irq(struct irq_data *d) | |||
758 | struct mpic *mpic = mpic_from_irq_data(d); | 757 | struct mpic *mpic = mpic_from_irq_data(d); |
759 | unsigned int src = mpic_irq_to_hw(d->irq); | 758 | unsigned int src = mpic_irq_to_hw(d->irq); |
760 | 759 | ||
761 | mpic_shutdown_ht_interrupt(mpic, src, irq_to_desc(d->irq)->status); | 760 | mpic_shutdown_ht_interrupt(mpic, src); |
762 | mpic_mask_irq(d); | 761 | mpic_mask_irq(d); |
763 | } | 762 | } |
764 | 763 | ||
@@ -775,7 +774,7 @@ static void mpic_end_ht_irq(struct irq_data *d) | |||
775 | * latched another edge interrupt coming in anyway | 774 | * latched another edge interrupt coming in anyway |
776 | */ | 775 | */ |
777 | 776 | ||
778 | if (irq_to_desc(d->irq)->status & IRQ_LEVEL) | 777 | if (irqd_is_level_type(d)) |
779 | mpic_ht_end_irq(mpic, src); | 778 | mpic_ht_end_irq(mpic, src); |
780 | mpic_eoi(mpic); | 779 | mpic_eoi(mpic); |
781 | } | 780 | } |
@@ -864,7 +863,6 @@ int mpic_set_irq_type(struct irq_data *d, unsigned int flow_type) | |||
864 | { | 863 | { |
865 | struct mpic *mpic = mpic_from_irq_data(d); | 864 | struct mpic *mpic = mpic_from_irq_data(d); |
866 | unsigned int src = mpic_irq_to_hw(d->irq); | 865 | unsigned int src = mpic_irq_to_hw(d->irq); |
867 | struct irq_desc *desc = irq_to_desc(d->irq); | ||
868 | unsigned int vecpri, vold, vnew; | 866 | unsigned int vecpri, vold, vnew; |
869 | 867 | ||
870 | DBG("mpic: set_irq_type(mpic:@%p,virq:%d,src:0x%x,type:0x%x)\n", | 868 | DBG("mpic: set_irq_type(mpic:@%p,virq:%d,src:0x%x,type:0x%x)\n", |
@@ -879,10 +877,7 @@ int mpic_set_irq_type(struct irq_data *d, unsigned int flow_type) | |||
879 | if (flow_type == IRQ_TYPE_NONE) | 877 | if (flow_type == IRQ_TYPE_NONE) |
880 | flow_type = IRQ_TYPE_LEVEL_LOW; | 878 | flow_type = IRQ_TYPE_LEVEL_LOW; |
881 | 879 | ||
882 | desc->status &= ~(IRQ_TYPE_SENSE_MASK | IRQ_LEVEL); | 880 | irqd_set_trigger_type(d, flow_type); |
883 | desc->status |= flow_type & IRQ_TYPE_SENSE_MASK; | ||
884 | if (flow_type & (IRQ_TYPE_LEVEL_HIGH | IRQ_TYPE_LEVEL_LOW)) | ||
885 | desc->status |= IRQ_LEVEL; | ||
886 | 881 | ||
887 | if (mpic_is_ht_interrupt(mpic, src)) | 882 | if (mpic_is_ht_interrupt(mpic, src)) |
888 | vecpri = MPIC_VECPRI_POLARITY_POSITIVE | | 883 | vecpri = MPIC_VECPRI_POLARITY_POSITIVE | |
@@ -897,7 +892,7 @@ int mpic_set_irq_type(struct irq_data *d, unsigned int flow_type) | |||
897 | if (vold != vnew) | 892 | if (vold != vnew) |
898 | mpic_irq_write(src, MPIC_INFO(IRQ_VECTOR_PRI), vnew); | 893 | mpic_irq_write(src, MPIC_INFO(IRQ_VECTOR_PRI), vnew); |
899 | 894 | ||
900 | return 0; | 895 | return IRQ_SET_MASK_OK_NOCOPY;; |
901 | } | 896 | } |
902 | 897 | ||
903 | void mpic_set_vector(unsigned int virq, unsigned int vector) | 898 | void mpic_set_vector(unsigned int virq, unsigned int vector) |
@@ -983,8 +978,8 @@ static int mpic_host_map(struct irq_host *h, unsigned int virq, | |||
983 | WARN_ON(!(mpic->flags & MPIC_PRIMARY)); | 978 | WARN_ON(!(mpic->flags & MPIC_PRIMARY)); |
984 | 979 | ||
985 | DBG("mpic: mapping as IPI\n"); | 980 | DBG("mpic: mapping as IPI\n"); |
986 | set_irq_chip_data(virq, mpic); | 981 | irq_set_chip_data(virq, mpic); |
987 | set_irq_chip_and_handler(virq, &mpic->hc_ipi, | 982 | irq_set_chip_and_handler(virq, &mpic->hc_ipi, |
988 | handle_percpu_irq); | 983 | handle_percpu_irq); |
989 | return 0; | 984 | return 0; |
990 | } | 985 | } |
@@ -1006,11 +1001,11 @@ static int mpic_host_map(struct irq_host *h, unsigned int virq, | |||
1006 | 1001 | ||
1007 | DBG("mpic: mapping to irq chip @%p\n", chip); | 1002 | DBG("mpic: mapping to irq chip @%p\n", chip); |
1008 | 1003 | ||
1009 | set_irq_chip_data(virq, mpic); | 1004 | irq_set_chip_data(virq, mpic); |
1010 | set_irq_chip_and_handler(virq, chip, handle_fasteoi_irq); | 1005 | irq_set_chip_and_handler(virq, chip, handle_fasteoi_irq); |
1011 | 1006 | ||
1012 | /* Set default irq type */ | 1007 | /* Set default irq type */ |
1013 | set_irq_type(virq, IRQ_TYPE_NONE); | 1008 | irq_set_irq_type(virq, IRQ_TYPE_NONE); |
1014 | 1009 | ||
1015 | /* If the MPIC was reset, then all vectors have already been | 1010 | /* If the MPIC was reset, then all vectors have already been |
1016 | * initialized. Otherwise, a per source lazy initialization | 1011 | * initialized. Otherwise, a per source lazy initialization |
diff --git a/arch/powerpc/sysdev/mpic_pasemi_msi.c b/arch/powerpc/sysdev/mpic_pasemi_msi.c index 0b7794acfce1..38e62382070c 100644 --- a/arch/powerpc/sysdev/mpic_pasemi_msi.c +++ b/arch/powerpc/sysdev/mpic_pasemi_msi.c | |||
@@ -81,7 +81,7 @@ static void pasemi_msi_teardown_msi_irqs(struct pci_dev *pdev) | |||
81 | if (entry->irq == NO_IRQ) | 81 | if (entry->irq == NO_IRQ) |
82 | continue; | 82 | continue; |
83 | 83 | ||
84 | set_irq_msi(entry->irq, NULL); | 84 | irq_set_msi_desc(entry->irq, NULL); |
85 | msi_bitmap_free_hwirqs(&msi_mpic->msi_bitmap, | 85 | msi_bitmap_free_hwirqs(&msi_mpic->msi_bitmap, |
86 | virq_to_hw(entry->irq), ALLOC_CHUNK); | 86 | virq_to_hw(entry->irq), ALLOC_CHUNK); |
87 | irq_dispose_mapping(entry->irq); | 87 | irq_dispose_mapping(entry->irq); |
@@ -131,9 +131,9 @@ static int pasemi_msi_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type) | |||
131 | */ | 131 | */ |
132 | mpic_set_vector(virq, 0); | 132 | mpic_set_vector(virq, 0); |
133 | 133 | ||
134 | set_irq_msi(virq, entry); | 134 | irq_set_msi_desc(virq, entry); |
135 | set_irq_chip(virq, &mpic_pasemi_msi_chip); | 135 | irq_set_chip(virq, &mpic_pasemi_msi_chip); |
136 | set_irq_type(virq, IRQ_TYPE_EDGE_RISING); | 136 | irq_set_irq_type(virq, IRQ_TYPE_EDGE_RISING); |
137 | 137 | ||
138 | pr_debug("pasemi_msi: allocated virq 0x%x (hw 0x%x) " \ | 138 | pr_debug("pasemi_msi: allocated virq 0x%x (hw 0x%x) " \ |
139 | "addr 0x%x\n", virq, hwirq, msg.address_lo); | 139 | "addr 0x%x\n", virq, hwirq, msg.address_lo); |
diff --git a/arch/powerpc/sysdev/mpic_u3msi.c b/arch/powerpc/sysdev/mpic_u3msi.c index 71900ac78270..9a7aa0ed9c1c 100644 --- a/arch/powerpc/sysdev/mpic_u3msi.c +++ b/arch/powerpc/sysdev/mpic_u3msi.c | |||
@@ -129,7 +129,7 @@ static void u3msi_teardown_msi_irqs(struct pci_dev *pdev) | |||
129 | if (entry->irq == NO_IRQ) | 129 | if (entry->irq == NO_IRQ) |
130 | continue; | 130 | continue; |
131 | 131 | ||
132 | set_irq_msi(entry->irq, NULL); | 132 | irq_set_msi_desc(entry->irq, NULL); |
133 | msi_bitmap_free_hwirqs(&msi_mpic->msi_bitmap, | 133 | msi_bitmap_free_hwirqs(&msi_mpic->msi_bitmap, |
134 | virq_to_hw(entry->irq), 1); | 134 | virq_to_hw(entry->irq), 1); |
135 | irq_dispose_mapping(entry->irq); | 135 | irq_dispose_mapping(entry->irq); |
@@ -166,9 +166,9 @@ static int u3msi_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type) | |||
166 | return -ENOSPC; | 166 | return -ENOSPC; |
167 | } | 167 | } |
168 | 168 | ||
169 | set_irq_msi(virq, entry); | 169 | irq_set_msi_desc(virq, entry); |
170 | set_irq_chip(virq, &mpic_u3msi_chip); | 170 | irq_set_chip(virq, &mpic_u3msi_chip); |
171 | set_irq_type(virq, IRQ_TYPE_EDGE_RISING); | 171 | irq_set_irq_type(virq, IRQ_TYPE_EDGE_RISING); |
172 | 172 | ||
173 | pr_debug("u3msi: allocated virq 0x%x (hw 0x%x) addr 0x%lx\n", | 173 | pr_debug("u3msi: allocated virq 0x%x (hw 0x%x) addr 0x%lx\n", |
174 | virq, hwirq, (unsigned long)addr); | 174 | virq, hwirq, (unsigned long)addr); |
diff --git a/arch/powerpc/sysdev/mv64x60_pic.c b/arch/powerpc/sysdev/mv64x60_pic.c index bc61ebb8987c..e9c633c7c083 100644 --- a/arch/powerpc/sysdev/mv64x60_pic.c +++ b/arch/powerpc/sysdev/mv64x60_pic.c | |||
@@ -213,11 +213,12 @@ static int mv64x60_host_map(struct irq_host *h, unsigned int virq, | |||
213 | { | 213 | { |
214 | int level1; | 214 | int level1; |
215 | 215 | ||
216 | irq_to_desc(virq)->status |= IRQ_LEVEL; | 216 | irq_set_status_flags(virq, IRQ_LEVEL); |
217 | 217 | ||
218 | level1 = (hwirq & MV64x60_LEVEL1_MASK) >> MV64x60_LEVEL1_OFFSET; | 218 | level1 = (hwirq & MV64x60_LEVEL1_MASK) >> MV64x60_LEVEL1_OFFSET; |
219 | BUG_ON(level1 > MV64x60_LEVEL1_GPP); | 219 | BUG_ON(level1 > MV64x60_LEVEL1_GPP); |
220 | set_irq_chip_and_handler(virq, mv64x60_chips[level1], handle_level_irq); | 220 | irq_set_chip_and_handler(virq, mv64x60_chips[level1], |
221 | handle_level_irq); | ||
221 | 222 | ||
222 | return 0; | 223 | return 0; |
223 | } | 224 | } |
diff --git a/arch/powerpc/sysdev/qe_lib/qe_ic.c b/arch/powerpc/sysdev/qe_lib/qe_ic.c index 8c9ded8ea07c..832d6924ad1c 100644 --- a/arch/powerpc/sysdev/qe_lib/qe_ic.c +++ b/arch/powerpc/sysdev/qe_lib/qe_ic.c | |||
@@ -189,7 +189,7 @@ static inline void qe_ic_write(volatile __be32 __iomem * base, unsigned int reg | |||
189 | 189 | ||
190 | static inline struct qe_ic *qe_ic_from_irq(unsigned int virq) | 190 | static inline struct qe_ic *qe_ic_from_irq(unsigned int virq) |
191 | { | 191 | { |
192 | return get_irq_chip_data(virq); | 192 | return irq_get_chip_data(virq); |
193 | } | 193 | } |
194 | 194 | ||
195 | static inline struct qe_ic *qe_ic_from_irq_data(struct irq_data *d) | 195 | static inline struct qe_ic *qe_ic_from_irq_data(struct irq_data *d) |
@@ -267,10 +267,10 @@ static int qe_ic_host_map(struct irq_host *h, unsigned int virq, | |||
267 | /* Default chip */ | 267 | /* Default chip */ |
268 | chip = &qe_ic->hc_irq; | 268 | chip = &qe_ic->hc_irq; |
269 | 269 | ||
270 | set_irq_chip_data(virq, qe_ic); | 270 | irq_set_chip_data(virq, qe_ic); |
271 | irq_to_desc(virq)->status |= IRQ_LEVEL; | 271 | irq_set_status_flags(virq, IRQ_LEVEL); |
272 | 272 | ||
273 | set_irq_chip_and_handler(virq, chip, handle_level_irq); | 273 | irq_set_chip_and_handler(virq, chip, handle_level_irq); |
274 | 274 | ||
275 | return 0; | 275 | return 0; |
276 | } | 276 | } |
@@ -386,13 +386,13 @@ void __init qe_ic_init(struct device_node *node, unsigned int flags, | |||
386 | 386 | ||
387 | qe_ic_write(qe_ic->regs, QEIC_CICR, temp); | 387 | qe_ic_write(qe_ic->regs, QEIC_CICR, temp); |
388 | 388 | ||
389 | set_irq_data(qe_ic->virq_low, qe_ic); | 389 | irq_set_handler_data(qe_ic->virq_low, qe_ic); |
390 | set_irq_chained_handler(qe_ic->virq_low, low_handler); | 390 | irq_set_chained_handler(qe_ic->virq_low, low_handler); |
391 | 391 | ||
392 | if (qe_ic->virq_high != NO_IRQ && | 392 | if (qe_ic->virq_high != NO_IRQ && |
393 | qe_ic->virq_high != qe_ic->virq_low) { | 393 | qe_ic->virq_high != qe_ic->virq_low) { |
394 | set_irq_data(qe_ic->virq_high, qe_ic); | 394 | irq_set_handler_data(qe_ic->virq_high, qe_ic); |
395 | set_irq_chained_handler(qe_ic->virq_high, high_handler); | 395 | irq_set_chained_handler(qe_ic->virq_high, high_handler); |
396 | } | 396 | } |
397 | } | 397 | } |
398 | 398 | ||
diff --git a/arch/powerpc/sysdev/tsi108_pci.c b/arch/powerpc/sysdev/tsi108_pci.c index 02c91db90037..4d18658116e5 100644 --- a/arch/powerpc/sysdev/tsi108_pci.c +++ b/arch/powerpc/sysdev/tsi108_pci.c | |||
@@ -391,8 +391,8 @@ static int pci_irq_host_map(struct irq_host *h, unsigned int virq, | |||
391 | DBG("%s(%d, 0x%lx)\n", __func__, virq, hw); | 391 | DBG("%s(%d, 0x%lx)\n", __func__, virq, hw); |
392 | if ((virq >= 1) && (virq <= 4)){ | 392 | if ((virq >= 1) && (virq <= 4)){ |
393 | irq = virq + IRQ_PCI_INTAD_BASE - 1; | 393 | irq = virq + IRQ_PCI_INTAD_BASE - 1; |
394 | irq_to_desc(irq)->status |= IRQ_LEVEL; | 394 | irq_set_status_flags(irq, IRQ_LEVEL); |
395 | set_irq_chip(irq, &tsi108_pci_irq); | 395 | irq_set_chip(irq, &tsi108_pci_irq); |
396 | } | 396 | } |
397 | return 0; | 397 | return 0; |
398 | } | 398 | } |
@@ -431,7 +431,7 @@ void __init tsi108_pci_int_init(struct device_node *node) | |||
431 | 431 | ||
432 | void tsi108_irq_cascade(unsigned int irq, struct irq_desc *desc) | 432 | void tsi108_irq_cascade(unsigned int irq, struct irq_desc *desc) |
433 | { | 433 | { |
434 | struct irq_chip *chip = get_irq_desc_chip(desc); | 434 | struct irq_chip *chip = irq_desc_get_chip(desc); |
435 | unsigned int cascade_irq = get_pci_source(); | 435 | unsigned int cascade_irq = get_pci_source(); |
436 | 436 | ||
437 | if (cascade_irq != NO_IRQ) | 437 | if (cascade_irq != NO_IRQ) |
diff --git a/arch/powerpc/sysdev/uic.c b/arch/powerpc/sysdev/uic.c index 835f7958b237..5d9138516628 100644 --- a/arch/powerpc/sysdev/uic.c +++ b/arch/powerpc/sysdev/uic.c | |||
@@ -57,7 +57,6 @@ struct uic { | |||
57 | 57 | ||
58 | static void uic_unmask_irq(struct irq_data *d) | 58 | static void uic_unmask_irq(struct irq_data *d) |
59 | { | 59 | { |
60 | struct irq_desc *desc = irq_to_desc(d->irq); | ||
61 | struct uic *uic = irq_data_get_irq_chip_data(d); | 60 | struct uic *uic = irq_data_get_irq_chip_data(d); |
62 | unsigned int src = uic_irq_to_hw(d->irq); | 61 | unsigned int src = uic_irq_to_hw(d->irq); |
63 | unsigned long flags; | 62 | unsigned long flags; |
@@ -66,7 +65,7 @@ static void uic_unmask_irq(struct irq_data *d) | |||
66 | sr = 1 << (31-src); | 65 | sr = 1 << (31-src); |
67 | spin_lock_irqsave(&uic->lock, flags); | 66 | spin_lock_irqsave(&uic->lock, flags); |
68 | /* ack level-triggered interrupts here */ | 67 | /* ack level-triggered interrupts here */ |
69 | if (desc->status & IRQ_LEVEL) | 68 | if (irqd_is_level_type(d)) |
70 | mtdcr(uic->dcrbase + UIC_SR, sr); | 69 | mtdcr(uic->dcrbase + UIC_SR, sr); |
71 | er = mfdcr(uic->dcrbase + UIC_ER); | 70 | er = mfdcr(uic->dcrbase + UIC_ER); |
72 | er |= sr; | 71 | er |= sr; |
@@ -101,7 +100,6 @@ static void uic_ack_irq(struct irq_data *d) | |||
101 | 100 | ||
102 | static void uic_mask_ack_irq(struct irq_data *d) | 101 | static void uic_mask_ack_irq(struct irq_data *d) |
103 | { | 102 | { |
104 | struct irq_desc *desc = irq_to_desc(d->irq); | ||
105 | struct uic *uic = irq_data_get_irq_chip_data(d); | 103 | struct uic *uic = irq_data_get_irq_chip_data(d); |
106 | unsigned int src = uic_irq_to_hw(d->irq); | 104 | unsigned int src = uic_irq_to_hw(d->irq); |
107 | unsigned long flags; | 105 | unsigned long flags; |
@@ -120,7 +118,7 @@ static void uic_mask_ack_irq(struct irq_data *d) | |||
120 | * level interrupts are ack'ed after the actual | 118 | * level interrupts are ack'ed after the actual |
121 | * isr call in the uic_unmask_irq() | 119 | * isr call in the uic_unmask_irq() |
122 | */ | 120 | */ |
123 | if (!(desc->status & IRQ_LEVEL)) | 121 | if (!irqd_is_level_type(d)) |
124 | mtdcr(uic->dcrbase + UIC_SR, sr); | 122 | mtdcr(uic->dcrbase + UIC_SR, sr); |
125 | spin_unlock_irqrestore(&uic->lock, flags); | 123 | spin_unlock_irqrestore(&uic->lock, flags); |
126 | } | 124 | } |
@@ -129,7 +127,6 @@ static int uic_set_irq_type(struct irq_data *d, unsigned int flow_type) | |||
129 | { | 127 | { |
130 | struct uic *uic = irq_data_get_irq_chip_data(d); | 128 | struct uic *uic = irq_data_get_irq_chip_data(d); |
131 | unsigned int src = uic_irq_to_hw(d->irq); | 129 | unsigned int src = uic_irq_to_hw(d->irq); |
132 | struct irq_desc *desc = irq_to_desc(d->irq); | ||
133 | unsigned long flags; | 130 | unsigned long flags; |
134 | int trigger, polarity; | 131 | int trigger, polarity; |
135 | u32 tr, pr, mask; | 132 | u32 tr, pr, mask; |
@@ -166,11 +163,6 @@ static int uic_set_irq_type(struct irq_data *d, unsigned int flow_type) | |||
166 | mtdcr(uic->dcrbase + UIC_PR, pr); | 163 | mtdcr(uic->dcrbase + UIC_PR, pr); |
167 | mtdcr(uic->dcrbase + UIC_TR, tr); | 164 | mtdcr(uic->dcrbase + UIC_TR, tr); |
168 | 165 | ||
169 | desc->status &= ~(IRQ_TYPE_SENSE_MASK | IRQ_LEVEL); | ||
170 | desc->status |= flow_type & IRQ_TYPE_SENSE_MASK; | ||
171 | if (!trigger) | ||
172 | desc->status |= IRQ_LEVEL; | ||
173 | |||
174 | spin_unlock_irqrestore(&uic->lock, flags); | 166 | spin_unlock_irqrestore(&uic->lock, flags); |
175 | 167 | ||
176 | return 0; | 168 | return 0; |
@@ -190,13 +182,13 @@ static int uic_host_map(struct irq_host *h, unsigned int virq, | |||
190 | { | 182 | { |
191 | struct uic *uic = h->host_data; | 183 | struct uic *uic = h->host_data; |
192 | 184 | ||
193 | set_irq_chip_data(virq, uic); | 185 | irq_set_chip_data(virq, uic); |
194 | /* Despite the name, handle_level_irq() works for both level | 186 | /* Despite the name, handle_level_irq() works for both level |
195 | * and edge irqs on UIC. FIXME: check this is correct */ | 187 | * and edge irqs on UIC. FIXME: check this is correct */ |
196 | set_irq_chip_and_handler(virq, &uic_irq_chip, handle_level_irq); | 188 | irq_set_chip_and_handler(virq, &uic_irq_chip, handle_level_irq); |
197 | 189 | ||
198 | /* Set default irq type */ | 190 | /* Set default irq type */ |
199 | set_irq_type(virq, IRQ_TYPE_NONE); | 191 | irq_set_irq_type(virq, IRQ_TYPE_NONE); |
200 | 192 | ||
201 | return 0; | 193 | return 0; |
202 | } | 194 | } |
@@ -220,17 +212,18 @@ static struct irq_host_ops uic_host_ops = { | |||
220 | 212 | ||
221 | void uic_irq_cascade(unsigned int virq, struct irq_desc *desc) | 213 | void uic_irq_cascade(unsigned int virq, struct irq_desc *desc) |
222 | { | 214 | { |
223 | struct irq_chip *chip = get_irq_desc_chip(desc); | 215 | struct irq_chip *chip = irq_desc_get_chip(desc); |
224 | struct uic *uic = get_irq_data(virq); | 216 | struct irq_data *idata = irq_desc_get_irq_data(desc); |
217 | struct uic *uic = irq_get_handler_data(virq); | ||
225 | u32 msr; | 218 | u32 msr; |
226 | int src; | 219 | int src; |
227 | int subvirq; | 220 | int subvirq; |
228 | 221 | ||
229 | raw_spin_lock(&desc->lock); | 222 | raw_spin_lock(&desc->lock); |
230 | if (desc->status & IRQ_LEVEL) | 223 | if (irqd_is_level_type(idata)) |
231 | chip->irq_mask(&desc->irq_data); | 224 | chip->irq_mask(idata); |
232 | else | 225 | else |
233 | chip->irq_mask_ack(&desc->irq_data); | 226 | chip->irq_mask_ack(idata); |
234 | raw_spin_unlock(&desc->lock); | 227 | raw_spin_unlock(&desc->lock); |
235 | 228 | ||
236 | msr = mfdcr(uic->dcrbase + UIC_MSR); | 229 | msr = mfdcr(uic->dcrbase + UIC_MSR); |
@@ -244,10 +237,10 @@ void uic_irq_cascade(unsigned int virq, struct irq_desc *desc) | |||
244 | 237 | ||
245 | uic_irq_ret: | 238 | uic_irq_ret: |
246 | raw_spin_lock(&desc->lock); | 239 | raw_spin_lock(&desc->lock); |
247 | if (desc->status & IRQ_LEVEL) | 240 | if (irqd_is_level_type(idata)) |
248 | chip->irq_ack(&desc->irq_data); | 241 | chip->irq_ack(idata); |
249 | if (!(desc->status & IRQ_DISABLED) && chip->irq_unmask) | 242 | if (!irqd_irq_disabled(idata) && chip->irq_unmask) |
250 | chip->irq_unmask(&desc->irq_data); | 243 | chip->irq_unmask(idata); |
251 | raw_spin_unlock(&desc->lock); | 244 | raw_spin_unlock(&desc->lock); |
252 | } | 245 | } |
253 | 246 | ||
@@ -336,8 +329,8 @@ void __init uic_init_tree(void) | |||
336 | 329 | ||
337 | cascade_virq = irq_of_parse_and_map(np, 0); | 330 | cascade_virq = irq_of_parse_and_map(np, 0); |
338 | 331 | ||
339 | set_irq_data(cascade_virq, uic); | 332 | irq_set_handler_data(cascade_virq, uic); |
340 | set_irq_chained_handler(cascade_virq, uic_irq_cascade); | 333 | irq_set_chained_handler(cascade_virq, uic_irq_cascade); |
341 | 334 | ||
342 | /* FIXME: setup critical cascade?? */ | 335 | /* FIXME: setup critical cascade?? */ |
343 | } | 336 | } |
diff --git a/arch/powerpc/sysdev/xilinx_intc.c b/arch/powerpc/sysdev/xilinx_intc.c index 7436f3ed4df6..0a13fc19e287 100644 --- a/arch/powerpc/sysdev/xilinx_intc.c +++ b/arch/powerpc/sysdev/xilinx_intc.c | |||
@@ -79,12 +79,6 @@ static void xilinx_intc_mask(struct irq_data *d) | |||
79 | 79 | ||
80 | static int xilinx_intc_set_type(struct irq_data *d, unsigned int flow_type) | 80 | static int xilinx_intc_set_type(struct irq_data *d, unsigned int flow_type) |
81 | { | 81 | { |
82 | struct irq_desc *desc = irq_to_desc(d->irq); | ||
83 | |||
84 | desc->status &= ~(IRQ_TYPE_SENSE_MASK | IRQ_LEVEL); | ||
85 | desc->status |= flow_type & IRQ_TYPE_SENSE_MASK; | ||
86 | if (flow_type & (IRQ_TYPE_LEVEL_HIGH | IRQ_TYPE_LEVEL_LOW)) | ||
87 | desc->status |= IRQ_LEVEL; | ||
88 | return 0; | 82 | return 0; |
89 | } | 83 | } |
90 | 84 | ||
@@ -170,15 +164,15 @@ static int xilinx_intc_xlate(struct irq_host *h, struct device_node *ct, | |||
170 | static int xilinx_intc_map(struct irq_host *h, unsigned int virq, | 164 | static int xilinx_intc_map(struct irq_host *h, unsigned int virq, |
171 | irq_hw_number_t irq) | 165 | irq_hw_number_t irq) |
172 | { | 166 | { |
173 | set_irq_chip_data(virq, h->host_data); | 167 | irq_set_chip_data(virq, h->host_data); |
174 | 168 | ||
175 | if (xilinx_intc_typetable[irq] == IRQ_TYPE_LEVEL_HIGH || | 169 | if (xilinx_intc_typetable[irq] == IRQ_TYPE_LEVEL_HIGH || |
176 | xilinx_intc_typetable[irq] == IRQ_TYPE_LEVEL_LOW) { | 170 | xilinx_intc_typetable[irq] == IRQ_TYPE_LEVEL_LOW) { |
177 | set_irq_chip_and_handler(virq, &xilinx_intc_level_irqchip, | 171 | irq_set_chip_and_handler(virq, &xilinx_intc_level_irqchip, |
178 | handle_level_irq); | 172 | handle_level_irq); |
179 | } else { | 173 | } else { |
180 | set_irq_chip_and_handler(virq, &xilinx_intc_edge_irqchip, | 174 | irq_set_chip_and_handler(virq, &xilinx_intc_edge_irqchip, |
181 | handle_edge_irq); | 175 | handle_edge_irq); |
182 | } | 176 | } |
183 | return 0; | 177 | return 0; |
184 | } | 178 | } |
@@ -229,7 +223,7 @@ int xilinx_intc_get_irq(void) | |||
229 | */ | 223 | */ |
230 | static void xilinx_i8259_cascade(unsigned int irq, struct irq_desc *desc) | 224 | static void xilinx_i8259_cascade(unsigned int irq, struct irq_desc *desc) |
231 | { | 225 | { |
232 | struct irq_chip *chip = get_irq_desc_chip(desc); | 226 | struct irq_chip *chip = irq_desc_get_chip(desc); |
233 | unsigned int cascade_irq = i8259_irq(); | 227 | unsigned int cascade_irq = i8259_irq(); |
234 | 228 | ||
235 | if (cascade_irq) | 229 | if (cascade_irq) |
@@ -256,7 +250,7 @@ static void __init xilinx_i8259_setup_cascade(void) | |||
256 | } | 250 | } |
257 | 251 | ||
258 | i8259_init(cascade_node, 0); | 252 | i8259_init(cascade_node, 0); |
259 | set_irq_chained_handler(cascade_irq, xilinx_i8259_cascade); | 253 | irq_set_chained_handler(cascade_irq, xilinx_i8259_cascade); |
260 | 254 | ||
261 | /* Program irq 7 (usb/audio), 14/15 (ide) to level sensitive */ | 255 | /* Program irq 7 (usb/audio), 14/15 (ide) to level sensitive */ |
262 | /* This looks like a dirty hack to me --gcl */ | 256 | /* This looks like a dirty hack to me --gcl */ |
diff --git a/arch/score/Kconfig b/arch/score/Kconfig index 27b2295f41f3..4278bbc032ce 100644 --- a/arch/score/Kconfig +++ b/arch/score/Kconfig | |||
@@ -3,6 +3,8 @@ menu "Machine selection" | |||
3 | config SCORE | 3 | config SCORE |
4 | def_bool y | 4 | def_bool y |
5 | select HAVE_GENERIC_HARDIRQS | 5 | select HAVE_GENERIC_HARDIRQS |
6 | select GENERIC_HARDIRQS_NO_DEPRECATED | ||
7 | select GENERIC_IRQ_SHOW | ||
6 | 8 | ||
7 | choice | 9 | choice |
8 | prompt "System type" | 10 | prompt "System type" |
diff --git a/arch/score/include/asm/irqflags.h b/arch/score/include/asm/irqflags.h index 5c7563891e28..37c6ac9dd6e8 100644 --- a/arch/score/include/asm/irqflags.h +++ b/arch/score/include/asm/irqflags.h | |||
@@ -29,7 +29,7 @@ static inline unsigned long arch_local_save_flags(void) | |||
29 | 29 | ||
30 | static inline unsigned long arch_local_irq_save(void) | 30 | static inline unsigned long arch_local_irq_save(void) |
31 | { | 31 | { |
32 | unsigned long flags | 32 | unsigned long flags; |
33 | 33 | ||
34 | asm volatile( | 34 | asm volatile( |
35 | " mfcr r8, cr0 \n" | 35 | " mfcr r8, cr0 \n" |
diff --git a/arch/score/kernel/irq.c b/arch/score/kernel/irq.c index 47647dde09ca..d4196732c65e 100644 --- a/arch/score/kernel/irq.c +++ b/arch/score/kernel/irq.c | |||
@@ -52,9 +52,9 @@ asmlinkage void do_IRQ(int irq) | |||
52 | irq_exit(); | 52 | irq_exit(); |
53 | } | 53 | } |
54 | 54 | ||
55 | static void score_mask(unsigned int irq_nr) | 55 | static void score_mask(struct irq_data *d) |
56 | { | 56 | { |
57 | unsigned int irq_source = 63 - irq_nr; | 57 | unsigned int irq_source = 63 - d->irq; |
58 | 58 | ||
59 | if (irq_source < 32) | 59 | if (irq_source < 32) |
60 | __raw_writel((__raw_readl(SCORE_PIC + INT_MASKL) | \ | 60 | __raw_writel((__raw_readl(SCORE_PIC + INT_MASKL) | \ |
@@ -64,9 +64,9 @@ static void score_mask(unsigned int irq_nr) | |||
64 | (1 << (irq_source - 32))), SCORE_PIC + INT_MASKH); | 64 | (1 << (irq_source - 32))), SCORE_PIC + INT_MASKH); |
65 | } | 65 | } |
66 | 66 | ||
67 | static void score_unmask(unsigned int irq_nr) | 67 | static void score_unmask(struct irq_data *d) |
68 | { | 68 | { |
69 | unsigned int irq_source = 63 - irq_nr; | 69 | unsigned int irq_source = 63 - d->irq; |
70 | 70 | ||
71 | if (irq_source < 32) | 71 | if (irq_source < 32) |
72 | __raw_writel((__raw_readl(SCORE_PIC + INT_MASKL) & \ | 72 | __raw_writel((__raw_readl(SCORE_PIC + INT_MASKL) & \ |
@@ -78,9 +78,9 @@ static void score_unmask(unsigned int irq_nr) | |||
78 | 78 | ||
79 | struct irq_chip score_irq_chip = { | 79 | struct irq_chip score_irq_chip = { |
80 | .name = "Score7-level", | 80 | .name = "Score7-level", |
81 | .mask = score_mask, | 81 | .irq_mask = score_mask, |
82 | .mask_ack = score_mask, | 82 | .irq_mask_ack = score_mask, |
83 | .unmask = score_unmask, | 83 | .irq_unmask = score_unmask, |
84 | }; | 84 | }; |
85 | 85 | ||
86 | /* | 86 | /* |
@@ -92,7 +92,7 @@ void __init init_IRQ(void) | |||
92 | unsigned long target_addr; | 92 | unsigned long target_addr; |
93 | 93 | ||
94 | for (index = 0; index < NR_IRQS; ++index) | 94 | for (index = 0; index < NR_IRQS; ++index) |
95 | set_irq_chip_and_handler(index, &score_irq_chip, | 95 | irq_set_chip_and_handler(index, &score_irq_chip, |
96 | handle_level_irq); | 96 | handle_level_irq); |
97 | 97 | ||
98 | for (target_addr = IRQ_VECTOR_BASE_ADDR; | 98 | for (target_addr = IRQ_VECTOR_BASE_ADDR; |
@@ -109,40 +109,3 @@ void __init init_IRQ(void) | |||
109 | : : "r" (EXCEPTION_VECTOR_BASE_ADDR | \ | 109 | : : "r" (EXCEPTION_VECTOR_BASE_ADDR | \ |
110 | VECTOR_ADDRESS_OFFSET_MODE16)); | 110 | VECTOR_ADDRESS_OFFSET_MODE16)); |
111 | } | 111 | } |
112 | |||
113 | /* | ||
114 | * Generic, controller-independent functions: | ||
115 | */ | ||
116 | int show_interrupts(struct seq_file *p, void *v) | ||
117 | { | ||
118 | int i = *(loff_t *)v, cpu; | ||
119 | struct irqaction *action; | ||
120 | unsigned long flags; | ||
121 | |||
122 | if (i == 0) { | ||
123 | seq_puts(p, " "); | ||
124 | for_each_online_cpu(cpu) | ||
125 | seq_printf(p, "CPU%d ", cpu); | ||
126 | seq_putc(p, '\n'); | ||
127 | } | ||
128 | |||
129 | if (i < NR_IRQS) { | ||
130 | spin_lock_irqsave(&irq_desc[i].lock, flags); | ||
131 | action = irq_desc[i].action; | ||
132 | if (!action) | ||
133 | goto unlock; | ||
134 | |||
135 | seq_printf(p, "%3d: ", i); | ||
136 | seq_printf(p, "%10u ", kstat_irqs(i)); | ||
137 | seq_printf(p, " %8s", irq_desc[i].chip->name ? : "-"); | ||
138 | seq_printf(p, " %s", action->name); | ||
139 | for (action = action->next; action; action = action->next) | ||
140 | seq_printf(p, ", %s", action->name); | ||
141 | |||
142 | seq_putc(p, '\n'); | ||
143 | unlock: | ||
144 | spin_unlock_irqrestore(&irq_desc[i].lock, flags); | ||
145 | } | ||
146 | |||
147 | return 0; | ||
148 | } | ||
diff --git a/arch/sh/boards/board-magicpanelr2.c b/arch/sh/boards/board-magicpanelr2.c index efba450a0518..93f5039099b7 100644 --- a/arch/sh/boards/board-magicpanelr2.c +++ b/arch/sh/boards/board-magicpanelr2.c | |||
@@ -388,12 +388,12 @@ static void __init init_mpr2_IRQ(void) | |||
388 | { | 388 | { |
389 | plat_irq_setup_pins(IRQ_MODE_IRQ); /* install handlers for IRQ0-5 */ | 389 | plat_irq_setup_pins(IRQ_MODE_IRQ); /* install handlers for IRQ0-5 */ |
390 | 390 | ||
391 | set_irq_type(32, IRQ_TYPE_LEVEL_LOW); /* IRQ0 CAN1 */ | 391 | irq_set_irq_type(32, IRQ_TYPE_LEVEL_LOW); /* IRQ0 CAN1 */ |
392 | set_irq_type(33, IRQ_TYPE_LEVEL_LOW); /* IRQ1 CAN2 */ | 392 | irq_set_irq_type(33, IRQ_TYPE_LEVEL_LOW); /* IRQ1 CAN2 */ |
393 | set_irq_type(34, IRQ_TYPE_LEVEL_LOW); /* IRQ2 CAN3 */ | 393 | irq_set_irq_type(34, IRQ_TYPE_LEVEL_LOW); /* IRQ2 CAN3 */ |
394 | set_irq_type(35, IRQ_TYPE_LEVEL_LOW); /* IRQ3 SMSC9115 */ | 394 | irq_set_irq_type(35, IRQ_TYPE_LEVEL_LOW); /* IRQ3 SMSC9115 */ |
395 | set_irq_type(36, IRQ_TYPE_EDGE_RISING); /* IRQ4 touchscreen */ | 395 | irq_set_irq_type(36, IRQ_TYPE_EDGE_RISING); /* IRQ4 touchscreen */ |
396 | set_irq_type(37, IRQ_TYPE_EDGE_FALLING); /* IRQ5 touchscreen */ | 396 | irq_set_irq_type(37, IRQ_TYPE_EDGE_FALLING); /* IRQ5 touchscreen */ |
397 | 397 | ||
398 | intc_set_priority(32, 13); /* IRQ0 CAN1 */ | 398 | intc_set_priority(32, 13); /* IRQ0 CAN1 */ |
399 | intc_set_priority(33, 13); /* IRQ0 CAN2 */ | 399 | intc_set_priority(33, 13); /* IRQ0 CAN2 */ |
diff --git a/arch/sh/boards/mach-ap325rxa/setup.c b/arch/sh/boards/mach-ap325rxa/setup.c index 3e5fc3bbf3ed..636d8318a72a 100644 --- a/arch/sh/boards/mach-ap325rxa/setup.c +++ b/arch/sh/boards/mach-ap325rxa/setup.c | |||
@@ -14,8 +14,8 @@ | |||
14 | #include <linux/device.h> | 14 | #include <linux/device.h> |
15 | #include <linux/interrupt.h> | 15 | #include <linux/interrupt.h> |
16 | #include <linux/platform_device.h> | 16 | #include <linux/platform_device.h> |
17 | #include <linux/mfd/sh_mobile_sdhi.h> | ||
18 | #include <linux/mmc/host.h> | 17 | #include <linux/mmc/host.h> |
18 | #include <linux/mmc/sh_mobile_sdhi.h> | ||
19 | #include <linux/mtd/physmap.h> | 19 | #include <linux/mtd/physmap.h> |
20 | #include <linux/mtd/sh_flctl.h> | 20 | #include <linux/mtd/sh_flctl.h> |
21 | #include <linux/delay.h> | 21 | #include <linux/delay.h> |
@@ -423,7 +423,7 @@ static struct resource sdhi0_cn3_resources[] = { | |||
423 | [0] = { | 423 | [0] = { |
424 | .name = "SDHI0", | 424 | .name = "SDHI0", |
425 | .start = 0x04ce0000, | 425 | .start = 0x04ce0000, |
426 | .end = 0x04ce01ff, | 426 | .end = 0x04ce00ff, |
427 | .flags = IORESOURCE_MEM, | 427 | .flags = IORESOURCE_MEM, |
428 | }, | 428 | }, |
429 | [1] = { | 429 | [1] = { |
@@ -453,7 +453,7 @@ static struct resource sdhi1_cn7_resources[] = { | |||
453 | [0] = { | 453 | [0] = { |
454 | .name = "SDHI1", | 454 | .name = "SDHI1", |
455 | .start = 0x04cf0000, | 455 | .start = 0x04cf0000, |
456 | .end = 0x04cf01ff, | 456 | .end = 0x04cf00ff, |
457 | .flags = IORESOURCE_MEM, | 457 | .flags = IORESOURCE_MEM, |
458 | }, | 458 | }, |
459 | [1] = { | 459 | [1] = { |
diff --git a/arch/sh/boards/mach-cayman/irq.c b/arch/sh/boards/mach-cayman/irq.c index d7ac5af9d102..311bcebdbd07 100644 --- a/arch/sh/boards/mach-cayman/irq.c +++ b/arch/sh/boards/mach-cayman/irq.c | |||
@@ -149,8 +149,8 @@ void init_cayman_irq(void) | |||
149 | } | 149 | } |
150 | 150 | ||
151 | for (i = 0; i < NR_EXT_IRQS; i++) { | 151 | for (i = 0; i < NR_EXT_IRQS; i++) { |
152 | set_irq_chip_and_handler(START_EXT_IRQS + i, &cayman_irq_type, | 152 | irq_set_chip_and_handler(START_EXT_IRQS + i, |
153 | handle_level_irq); | 153 | &cayman_irq_type, handle_level_irq); |
154 | } | 154 | } |
155 | 155 | ||
156 | /* Setup the SMSC interrupt */ | 156 | /* Setup the SMSC interrupt */ |
diff --git a/arch/sh/boards/mach-dreamcast/irq.c b/arch/sh/boards/mach-dreamcast/irq.c index 72e7ac9549da..78cf2ab89d7a 100644 --- a/arch/sh/boards/mach-dreamcast/irq.c +++ b/arch/sh/boards/mach-dreamcast/irq.c | |||
@@ -161,7 +161,6 @@ void systemasic_irq_init(void) | |||
161 | return; | 161 | return; |
162 | } | 162 | } |
163 | 163 | ||
164 | set_irq_chip_and_handler(i, &systemasic_int, | 164 | irq_set_chip_and_handler(i, &systemasic_int, handle_level_irq); |
165 | handle_level_irq); | ||
166 | } | 165 | } |
167 | } | 166 | } |
diff --git a/arch/sh/boards/mach-ecovec24/setup.c b/arch/sh/boards/mach-ecovec24/setup.c index e44480ce2ea8..fd4ff25f23b2 100644 --- a/arch/sh/boards/mach-ecovec24/setup.c +++ b/arch/sh/boards/mach-ecovec24/setup.c | |||
@@ -11,9 +11,9 @@ | |||
11 | #include <linux/init.h> | 11 | #include <linux/init.h> |
12 | #include <linux/device.h> | 12 | #include <linux/device.h> |
13 | #include <linux/platform_device.h> | 13 | #include <linux/platform_device.h> |
14 | #include <linux/mfd/sh_mobile_sdhi.h> | ||
15 | #include <linux/mmc/host.h> | 14 | #include <linux/mmc/host.h> |
16 | #include <linux/mmc/sh_mmcif.h> | 15 | #include <linux/mmc/sh_mmcif.h> |
16 | #include <linux/mmc/sh_mobile_sdhi.h> | ||
17 | #include <linux/mtd/physmap.h> | 17 | #include <linux/mtd/physmap.h> |
18 | #include <linux/gpio.h> | 18 | #include <linux/gpio.h> |
19 | #include <linux/interrupt.h> | 19 | #include <linux/interrupt.h> |
@@ -464,7 +464,7 @@ static struct i2c_board_info ts_i2c_clients = { | |||
464 | .irq = IRQ0, | 464 | .irq = IRQ0, |
465 | }; | 465 | }; |
466 | 466 | ||
467 | #ifdef CONFIG_MFD_SH_MOBILE_SDHI | 467 | #if defined(CONFIG_MMC_TMIO) || defined(CONFIG_MMC_TMIO_MODULE) |
468 | /* SDHI0 */ | 468 | /* SDHI0 */ |
469 | static void sdhi0_set_pwr(struct platform_device *pdev, int state) | 469 | static void sdhi0_set_pwr(struct platform_device *pdev, int state) |
470 | { | 470 | { |
@@ -482,7 +482,7 @@ static struct resource sdhi0_resources[] = { | |||
482 | [0] = { | 482 | [0] = { |
483 | .name = "SDHI0", | 483 | .name = "SDHI0", |
484 | .start = 0x04ce0000, | 484 | .start = 0x04ce0000, |
485 | .end = 0x04ce01ff, | 485 | .end = 0x04ce00ff, |
486 | .flags = IORESOURCE_MEM, | 486 | .flags = IORESOURCE_MEM, |
487 | }, | 487 | }, |
488 | [1] = { | 488 | [1] = { |
@@ -522,7 +522,7 @@ static struct resource sdhi1_resources[] = { | |||
522 | [0] = { | 522 | [0] = { |
523 | .name = "SDHI1", | 523 | .name = "SDHI1", |
524 | .start = 0x04cf0000, | 524 | .start = 0x04cf0000, |
525 | .end = 0x04cf01ff, | 525 | .end = 0x04cf00ff, |
526 | .flags = IORESOURCE_MEM, | 526 | .flags = IORESOURCE_MEM, |
527 | }, | 527 | }, |
528 | [1] = { | 528 | [1] = { |
@@ -880,7 +880,7 @@ static struct platform_device *ecovec_devices[] __initdata = { | |||
880 | &ceu0_device, | 880 | &ceu0_device, |
881 | &ceu1_device, | 881 | &ceu1_device, |
882 | &keysc_device, | 882 | &keysc_device, |
883 | #ifdef CONFIG_MFD_SH_MOBILE_SDHI | 883 | #if defined(CONFIG_MMC_TMIO) || defined(CONFIG_MMC_TMIO_MODULE) |
884 | &sdhi0_device, | 884 | &sdhi0_device, |
885 | #if !defined(CONFIG_MMC_SH_MMCIF) | 885 | #if !defined(CONFIG_MMC_SH_MMCIF) |
886 | &sdhi1_device, | 886 | &sdhi1_device, |
@@ -1102,7 +1102,7 @@ static int __init arch_setup(void) | |||
1102 | 1102 | ||
1103 | /* enable TouchScreen */ | 1103 | /* enable TouchScreen */ |
1104 | i2c_register_board_info(0, &ts_i2c_clients, 1); | 1104 | i2c_register_board_info(0, &ts_i2c_clients, 1); |
1105 | set_irq_type(IRQ0, IRQ_TYPE_LEVEL_LOW); | 1105 | irq_set_irq_type(IRQ0, IRQ_TYPE_LEVEL_LOW); |
1106 | } | 1106 | } |
1107 | 1107 | ||
1108 | /* enable CEU0 */ | 1108 | /* enable CEU0 */ |
@@ -1162,7 +1162,7 @@ static int __init arch_setup(void) | |||
1162 | gpio_direction_input(GPIO_PTR5); | 1162 | gpio_direction_input(GPIO_PTR5); |
1163 | gpio_direction_input(GPIO_PTR6); | 1163 | gpio_direction_input(GPIO_PTR6); |
1164 | 1164 | ||
1165 | #ifdef CONFIG_MFD_SH_MOBILE_SDHI | 1165 | #if defined(CONFIG_MMC_TMIO) || defined(CONFIG_MMC_TMIO_MODULE) |
1166 | /* enable SDHI0 on CN11 (needs DS2.4 set to ON) */ | 1166 | /* enable SDHI0 on CN11 (needs DS2.4 set to ON) */ |
1167 | gpio_request(GPIO_FN_SDHI0CD, NULL); | 1167 | gpio_request(GPIO_FN_SDHI0CD, NULL); |
1168 | gpio_request(GPIO_FN_SDHI0WP, NULL); | 1168 | gpio_request(GPIO_FN_SDHI0WP, NULL); |
diff --git a/arch/sh/boards/mach-kfr2r09/setup.c b/arch/sh/boards/mach-kfr2r09/setup.c index 7504daaa85da..8b4abbbd1477 100644 --- a/arch/sh/boards/mach-kfr2r09/setup.c +++ b/arch/sh/boards/mach-kfr2r09/setup.c | |||
@@ -10,8 +10,8 @@ | |||
10 | #include <linux/init.h> | 10 | #include <linux/init.h> |
11 | #include <linux/platform_device.h> | 11 | #include <linux/platform_device.h> |
12 | #include <linux/interrupt.h> | 12 | #include <linux/interrupt.h> |
13 | #include <linux/mfd/sh_mobile_sdhi.h> | ||
14 | #include <linux/mmc/host.h> | 13 | #include <linux/mmc/host.h> |
14 | #include <linux/mmc/sh_mobile_sdhi.h> | ||
15 | #include <linux/mfd/tmio.h> | 15 | #include <linux/mfd/tmio.h> |
16 | #include <linux/mtd/physmap.h> | 16 | #include <linux/mtd/physmap.h> |
17 | #include <linux/mtd/onenand.h> | 17 | #include <linux/mtd/onenand.h> |
@@ -354,7 +354,7 @@ static struct resource kfr2r09_sh_sdhi0_resources[] = { | |||
354 | [0] = { | 354 | [0] = { |
355 | .name = "SDHI0", | 355 | .name = "SDHI0", |
356 | .start = 0x04ce0000, | 356 | .start = 0x04ce0000, |
357 | .end = 0x04ce01ff, | 357 | .end = 0x04ce00ff, |
358 | .flags = IORESOURCE_MEM, | 358 | .flags = IORESOURCE_MEM, |
359 | }, | 359 | }, |
360 | [1] = { | 360 | [1] = { |
diff --git a/arch/sh/boards/mach-microdev/irq.c b/arch/sh/boards/mach-microdev/irq.c index c35001fd9032..4fb00369f0e2 100644 --- a/arch/sh/boards/mach-microdev/irq.c +++ b/arch/sh/boards/mach-microdev/irq.c | |||
@@ -117,7 +117,7 @@ static struct irq_chip microdev_irq_type = { | |||
117 | static void __init make_microdev_irq(unsigned int irq) | 117 | static void __init make_microdev_irq(unsigned int irq) |
118 | { | 118 | { |
119 | disable_irq_nosync(irq); | 119 | disable_irq_nosync(irq); |
120 | set_irq_chip_and_handler(irq, µdev_irq_type, handle_level_irq); | 120 | irq_set_chip_and_handler(irq, µdev_irq_type, handle_level_irq); |
121 | disable_microdev_irq(irq_get_irq_data(irq)); | 121 | disable_microdev_irq(irq_get_irq_data(irq)); |
122 | } | 122 | } |
123 | 123 | ||
diff --git a/arch/sh/boards/mach-migor/setup.c b/arch/sh/boards/mach-migor/setup.c index 03a7ffe729d5..184fde169132 100644 --- a/arch/sh/boards/mach-migor/setup.c +++ b/arch/sh/boards/mach-migor/setup.c | |||
@@ -12,8 +12,8 @@ | |||
12 | #include <linux/interrupt.h> | 12 | #include <linux/interrupt.h> |
13 | #include <linux/input.h> | 13 | #include <linux/input.h> |
14 | #include <linux/input/sh_keysc.h> | 14 | #include <linux/input/sh_keysc.h> |
15 | #include <linux/mfd/sh_mobile_sdhi.h> | ||
16 | #include <linux/mmc/host.h> | 15 | #include <linux/mmc/host.h> |
16 | #include <linux/mmc/sh_mobile_sdhi.h> | ||
17 | #include <linux/mtd/physmap.h> | 17 | #include <linux/mtd/physmap.h> |
18 | #include <linux/mtd/nand.h> | 18 | #include <linux/mtd/nand.h> |
19 | #include <linux/i2c.h> | 19 | #include <linux/i2c.h> |
@@ -399,7 +399,7 @@ static struct resource sdhi_cn9_resources[] = { | |||
399 | [0] = { | 399 | [0] = { |
400 | .name = "SDHI", | 400 | .name = "SDHI", |
401 | .start = 0x04ce0000, | 401 | .start = 0x04ce0000, |
402 | .end = 0x04ce01ff, | 402 | .end = 0x04ce00ff, |
403 | .flags = IORESOURCE_MEM, | 403 | .flags = IORESOURCE_MEM, |
404 | }, | 404 | }, |
405 | [1] = { | 405 | [1] = { |
diff --git a/arch/sh/boards/mach-se/7206/irq.c b/arch/sh/boards/mach-se/7206/irq.c index 9070d7e60704..0db058e709e9 100644 --- a/arch/sh/boards/mach-se/7206/irq.c +++ b/arch/sh/boards/mach-se/7206/irq.c | |||
@@ -92,9 +92,8 @@ static void eoi_se7206_irq(struct irq_data *data) | |||
92 | { | 92 | { |
93 | unsigned short sts0,sts1; | 93 | unsigned short sts0,sts1; |
94 | unsigned int irq = data->irq; | 94 | unsigned int irq = data->irq; |
95 | struct irq_desc *desc = irq_to_desc(irq); | ||
96 | 95 | ||
97 | if (!(desc->status & (IRQ_DISABLED|IRQ_INPROGRESS))) | 96 | if (!irqd_irq_disabled(data) && !irqd_irq_inprogress(data)) |
98 | enable_se7206_irq(data); | 97 | enable_se7206_irq(data); |
99 | /* FPGA isr clear */ | 98 | /* FPGA isr clear */ |
100 | sts0 = __raw_readw(INTSTS0); | 99 | sts0 = __raw_readw(INTSTS0); |
@@ -126,7 +125,7 @@ static struct irq_chip se7206_irq_chip __read_mostly = { | |||
126 | static void make_se7206_irq(unsigned int irq) | 125 | static void make_se7206_irq(unsigned int irq) |
127 | { | 126 | { |
128 | disable_irq_nosync(irq); | 127 | disable_irq_nosync(irq); |
129 | set_irq_chip_and_handler_name(irq, &se7206_irq_chip, | 128 | irq_set_chip_and_handler_name(irq, &se7206_irq_chip, |
130 | handle_level_irq, "level"); | 129 | handle_level_irq, "level"); |
131 | disable_se7206_irq(irq_get_irq_data(irq)); | 130 | disable_se7206_irq(irq_get_irq_data(irq)); |
132 | } | 131 | } |
diff --git a/arch/sh/boards/mach-se/7343/irq.c b/arch/sh/boards/mach-se/7343/irq.c index 76255a19417f..fd45ffc48340 100644 --- a/arch/sh/boards/mach-se/7343/irq.c +++ b/arch/sh/boards/mach-se/7343/irq.c | |||
@@ -67,19 +67,20 @@ void __init init_7343se_IRQ(void) | |||
67 | return; | 67 | return; |
68 | se7343_fpga_irq[i] = irq; | 68 | se7343_fpga_irq[i] = irq; |
69 | 69 | ||
70 | set_irq_chip_and_handler_name(se7343_fpga_irq[i], | 70 | irq_set_chip_and_handler_name(se7343_fpga_irq[i], |
71 | &se7343_irq_chip, | 71 | &se7343_irq_chip, |
72 | handle_level_irq, "level"); | 72 | handle_level_irq, |
73 | "level"); | ||
73 | 74 | ||
74 | set_irq_chip_data(se7343_fpga_irq[i], (void *)i); | 75 | irq_set_chip_data(se7343_fpga_irq[i], (void *)i); |
75 | } | 76 | } |
76 | 77 | ||
77 | set_irq_chained_handler(IRQ0_IRQ, se7343_irq_demux); | 78 | irq_set_chained_handler(IRQ0_IRQ, se7343_irq_demux); |
78 | set_irq_type(IRQ0_IRQ, IRQ_TYPE_LEVEL_LOW); | 79 | irq_set_irq_type(IRQ0_IRQ, IRQ_TYPE_LEVEL_LOW); |
79 | set_irq_chained_handler(IRQ1_IRQ, se7343_irq_demux); | 80 | irq_set_chained_handler(IRQ1_IRQ, se7343_irq_demux); |
80 | set_irq_type(IRQ1_IRQ, IRQ_TYPE_LEVEL_LOW); | 81 | irq_set_irq_type(IRQ1_IRQ, IRQ_TYPE_LEVEL_LOW); |
81 | set_irq_chained_handler(IRQ4_IRQ, se7343_irq_demux); | 82 | irq_set_chained_handler(IRQ4_IRQ, se7343_irq_demux); |
82 | set_irq_type(IRQ4_IRQ, IRQ_TYPE_LEVEL_LOW); | 83 | irq_set_irq_type(IRQ4_IRQ, IRQ_TYPE_LEVEL_LOW); |
83 | set_irq_chained_handler(IRQ5_IRQ, se7343_irq_demux); | 84 | irq_set_chained_handler(IRQ5_IRQ, se7343_irq_demux); |
84 | set_irq_type(IRQ5_IRQ, IRQ_TYPE_LEVEL_LOW); | 85 | irq_set_irq_type(IRQ5_IRQ, IRQ_TYPE_LEVEL_LOW); |
85 | } | 86 | } |
diff --git a/arch/sh/boards/mach-se/7722/irq.c b/arch/sh/boards/mach-se/7722/irq.c index c013f95628ed..aac92f21ebd2 100644 --- a/arch/sh/boards/mach-se/7722/irq.c +++ b/arch/sh/boards/mach-se/7722/irq.c | |||
@@ -67,16 +67,17 @@ void __init init_se7722_IRQ(void) | |||
67 | return; | 67 | return; |
68 | se7722_fpga_irq[i] = irq; | 68 | se7722_fpga_irq[i] = irq; |
69 | 69 | ||
70 | set_irq_chip_and_handler_name(se7722_fpga_irq[i], | 70 | irq_set_chip_and_handler_name(se7722_fpga_irq[i], |
71 | &se7722_irq_chip, | 71 | &se7722_irq_chip, |
72 | handle_level_irq, "level"); | 72 | handle_level_irq, |
73 | "level"); | ||
73 | 74 | ||
74 | set_irq_chip_data(se7722_fpga_irq[i], (void *)i); | 75 | irq_set_chip_data(se7722_fpga_irq[i], (void *)i); |
75 | } | 76 | } |
76 | 77 | ||
77 | set_irq_chained_handler(IRQ0_IRQ, se7722_irq_demux); | 78 | irq_set_chained_handler(IRQ0_IRQ, se7722_irq_demux); |
78 | set_irq_type(IRQ0_IRQ, IRQ_TYPE_LEVEL_LOW); | 79 | irq_set_irq_type(IRQ0_IRQ, IRQ_TYPE_LEVEL_LOW); |
79 | 80 | ||
80 | set_irq_chained_handler(IRQ1_IRQ, se7722_irq_demux); | 81 | irq_set_chained_handler(IRQ1_IRQ, se7722_irq_demux); |
81 | set_irq_type(IRQ1_IRQ, IRQ_TYPE_LEVEL_LOW); | 82 | irq_set_irq_type(IRQ1_IRQ, IRQ_TYPE_LEVEL_LOW); |
82 | } | 83 | } |
diff --git a/arch/sh/boards/mach-se/7724/irq.c b/arch/sh/boards/mach-se/7724/irq.c index 5bd87c22b65b..c6342ce7768d 100644 --- a/arch/sh/boards/mach-se/7724/irq.c +++ b/arch/sh/boards/mach-se/7724/irq.c | |||
@@ -140,17 +140,16 @@ void __init init_se7724_IRQ(void) | |||
140 | return; | 140 | return; |
141 | } | 141 | } |
142 | 142 | ||
143 | set_irq_chip_and_handler_name(irq, | 143 | irq_set_chip_and_handler_name(irq, &se7724_irq_chip, |
144 | &se7724_irq_chip, | ||
145 | handle_level_irq, "level"); | 144 | handle_level_irq, "level"); |
146 | } | 145 | } |
147 | 146 | ||
148 | set_irq_chained_handler(IRQ0_IRQ, se7724_irq_demux); | 147 | irq_set_chained_handler(IRQ0_IRQ, se7724_irq_demux); |
149 | set_irq_type(IRQ0_IRQ, IRQ_TYPE_LEVEL_LOW); | 148 | irq_set_irq_type(IRQ0_IRQ, IRQ_TYPE_LEVEL_LOW); |
150 | 149 | ||
151 | set_irq_chained_handler(IRQ1_IRQ, se7724_irq_demux); | 150 | irq_set_chained_handler(IRQ1_IRQ, se7724_irq_demux); |
152 | set_irq_type(IRQ1_IRQ, IRQ_TYPE_LEVEL_LOW); | 151 | irq_set_irq_type(IRQ1_IRQ, IRQ_TYPE_LEVEL_LOW); |
153 | 152 | ||
154 | set_irq_chained_handler(IRQ2_IRQ, se7724_irq_demux); | 153 | irq_set_chained_handler(IRQ2_IRQ, se7724_irq_demux); |
155 | set_irq_type(IRQ2_IRQ, IRQ_TYPE_LEVEL_LOW); | 154 | irq_set_irq_type(IRQ2_IRQ, IRQ_TYPE_LEVEL_LOW); |
156 | } | 155 | } |
diff --git a/arch/sh/boards/mach-se/7724/setup.c b/arch/sh/boards/mach-se/7724/setup.c index c8bcf6a19b55..12357671023e 100644 --- a/arch/sh/boards/mach-se/7724/setup.c +++ b/arch/sh/boards/mach-se/7724/setup.c | |||
@@ -14,8 +14,8 @@ | |||
14 | #include <linux/device.h> | 14 | #include <linux/device.h> |
15 | #include <linux/interrupt.h> | 15 | #include <linux/interrupt.h> |
16 | #include <linux/platform_device.h> | 16 | #include <linux/platform_device.h> |
17 | #include <linux/mfd/sh_mobile_sdhi.h> | ||
18 | #include <linux/mmc/host.h> | 17 | #include <linux/mmc/host.h> |
18 | #include <linux/mmc/sh_mobile_sdhi.h> | ||
19 | #include <linux/mtd/physmap.h> | 19 | #include <linux/mtd/physmap.h> |
20 | #include <linux/delay.h> | 20 | #include <linux/delay.h> |
21 | #include <linux/smc91x.h> | 21 | #include <linux/smc91x.h> |
@@ -456,7 +456,7 @@ static struct resource sdhi0_cn7_resources[] = { | |||
456 | [0] = { | 456 | [0] = { |
457 | .name = "SDHI0", | 457 | .name = "SDHI0", |
458 | .start = 0x04ce0000, | 458 | .start = 0x04ce0000, |
459 | .end = 0x04ce01ff, | 459 | .end = 0x04ce00ff, |
460 | .flags = IORESOURCE_MEM, | 460 | .flags = IORESOURCE_MEM, |
461 | }, | 461 | }, |
462 | [1] = { | 462 | [1] = { |
@@ -488,7 +488,7 @@ static struct resource sdhi1_cn8_resources[] = { | |||
488 | [0] = { | 488 | [0] = { |
489 | .name = "SDHI1", | 489 | .name = "SDHI1", |
490 | .start = 0x04cf0000, | 490 | .start = 0x04cf0000, |
491 | .end = 0x04cf01ff, | 491 | .end = 0x04cf00ff, |
492 | .flags = IORESOURCE_MEM, | 492 | .flags = IORESOURCE_MEM, |
493 | }, | 493 | }, |
494 | [1] = { | 494 | [1] = { |
diff --git a/arch/sh/boards/mach-x3proto/gpio.c b/arch/sh/boards/mach-x3proto/gpio.c index 239e74066253..f33b2b57019c 100644 --- a/arch/sh/boards/mach-x3proto/gpio.c +++ b/arch/sh/boards/mach-x3proto/gpio.c | |||
@@ -102,8 +102,8 @@ int __init x3proto_gpio_setup(void) | |||
102 | 102 | ||
103 | spin_lock_irqsave(&x3proto_gpio_lock, flags); | 103 | spin_lock_irqsave(&x3proto_gpio_lock, flags); |
104 | x3proto_gpio_irq_map[i] = irq; | 104 | x3proto_gpio_irq_map[i] = irq; |
105 | set_irq_chip_and_handler_name(irq, &dummy_irq_chip, | 105 | irq_set_chip_and_handler_name(irq, &dummy_irq_chip, |
106 | handle_simple_irq, "gpio"); | 106 | handle_simple_irq, "gpio"); |
107 | spin_unlock_irqrestore(&x3proto_gpio_lock, flags); | 107 | spin_unlock_irqrestore(&x3proto_gpio_lock, flags); |
108 | } | 108 | } |
109 | 109 | ||
@@ -113,8 +113,8 @@ int __init x3proto_gpio_setup(void) | |||
113 | x3proto_gpio_chip.base + x3proto_gpio_chip.ngpio, | 113 | x3proto_gpio_chip.base + x3proto_gpio_chip.ngpio, |
114 | ilsel); | 114 | ilsel); |
115 | 115 | ||
116 | set_irq_chained_handler(ilsel, x3proto_gpio_irq_handler); | 116 | irq_set_chained_handler(ilsel, x3proto_gpio_irq_handler); |
117 | set_irq_wake(ilsel, 1); | 117 | irq_set_irq_wake(ilsel, 1); |
118 | 118 | ||
119 | return 0; | 119 | return 0; |
120 | 120 | ||
diff --git a/arch/sh/cchips/hd6446x/hd64461.c b/arch/sh/cchips/hd6446x/hd64461.c index 177a10b25cad..eb4ea4d44d59 100644 --- a/arch/sh/cchips/hd6446x/hd64461.c +++ b/arch/sh/cchips/hd6446x/hd64461.c | |||
@@ -107,12 +107,12 @@ int __init setup_hd64461(void) | |||
107 | return -EINVAL; | 107 | return -EINVAL; |
108 | } | 108 | } |
109 | 109 | ||
110 | set_irq_chip_and_handler(i, &hd64461_irq_chip, | 110 | irq_set_chip_and_handler(i, &hd64461_irq_chip, |
111 | handle_level_irq); | 111 | handle_level_irq); |
112 | } | 112 | } |
113 | 113 | ||
114 | set_irq_chained_handler(CONFIG_HD64461_IRQ, hd64461_irq_demux); | 114 | irq_set_chained_handler(CONFIG_HD64461_IRQ, hd64461_irq_demux); |
115 | set_irq_type(CONFIG_HD64461_IRQ, IRQ_TYPE_LEVEL_LOW); | 115 | irq_set_irq_type(CONFIG_HD64461_IRQ, IRQ_TYPE_LEVEL_LOW); |
116 | 116 | ||
117 | #ifdef CONFIG_HD64461_ENABLER | 117 | #ifdef CONFIG_HD64461_ENABLER |
118 | printk(KERN_INFO "HD64461: enabling PCMCIA devices\n"); | 118 | printk(KERN_INFO "HD64461: enabling PCMCIA devices\n"); |
diff --git a/arch/sh/kernel/cpu/irq/imask.c b/arch/sh/kernel/cpu/irq/imask.c index 32c825c9488e..39b6a24c159d 100644 --- a/arch/sh/kernel/cpu/irq/imask.c +++ b/arch/sh/kernel/cpu/irq/imask.c | |||
@@ -80,6 +80,6 @@ static struct irq_chip imask_irq_chip = { | |||
80 | 80 | ||
81 | void make_imask_irq(unsigned int irq) | 81 | void make_imask_irq(unsigned int irq) |
82 | { | 82 | { |
83 | set_irq_chip_and_handler_name(irq, &imask_irq_chip, | 83 | irq_set_chip_and_handler_name(irq, &imask_irq_chip, handle_level_irq, |
84 | handle_level_irq, "level"); | 84 | "level"); |
85 | } | 85 | } |
diff --git a/arch/sh/kernel/cpu/irq/intc-sh5.c b/arch/sh/kernel/cpu/irq/intc-sh5.c index 5af48f8357e5..9e056a3a0c73 100644 --- a/arch/sh/kernel/cpu/irq/intc-sh5.c +++ b/arch/sh/kernel/cpu/irq/intc-sh5.c | |||
@@ -135,7 +135,7 @@ void __init plat_irq_setup(void) | |||
135 | 135 | ||
136 | /* Set default: per-line enable/disable, priority driven ack/eoi */ | 136 | /* Set default: per-line enable/disable, priority driven ack/eoi */ |
137 | for (i = 0; i < NR_INTC_IRQS; i++) | 137 | for (i = 0; i < NR_INTC_IRQS; i++) |
138 | set_irq_chip_and_handler(i, &intc_irq_type, handle_level_irq); | 138 | irq_set_chip_and_handler(i, &intc_irq_type, handle_level_irq); |
139 | 139 | ||
140 | 140 | ||
141 | /* Disable all interrupts and set all priorities to 0 to avoid trouble */ | 141 | /* Disable all interrupts and set all priorities to 0 to avoid trouble */ |
diff --git a/arch/sh/kernel/cpu/irq/ipr.c b/arch/sh/kernel/cpu/irq/ipr.c index 7516c35ee514..5de6dff5c21b 100644 --- a/arch/sh/kernel/cpu/irq/ipr.c +++ b/arch/sh/kernel/cpu/irq/ipr.c | |||
@@ -74,9 +74,9 @@ void register_ipr_controller(struct ipr_desc *desc) | |||
74 | } | 74 | } |
75 | 75 | ||
76 | disable_irq_nosync(p->irq); | 76 | disable_irq_nosync(p->irq); |
77 | set_irq_chip_and_handler_name(p->irq, &desc->chip, | 77 | irq_set_chip_and_handler_name(p->irq, &desc->chip, |
78 | handle_level_irq, "level"); | 78 | handle_level_irq, "level"); |
79 | set_irq_chip_data(p->irq, p); | 79 | irq_set_chip_data(p->irq, p); |
80 | disable_ipr_irq(irq_get_irq_data(p->irq)); | 80 | disable_ipr_irq(irq_get_irq_data(p->irq)); |
81 | } | 81 | } |
82 | } | 82 | } |
diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig index f766e6bf370e..14b234631f5f 100644 --- a/arch/sparc/Kconfig +++ b/arch/sparc/Kconfig | |||
@@ -52,6 +52,8 @@ config SPARC64 | |||
52 | select PERF_USE_VMALLOC | 52 | select PERF_USE_VMALLOC |
53 | select HAVE_GENERIC_HARDIRQS | 53 | select HAVE_GENERIC_HARDIRQS |
54 | select GENERIC_HARDIRQS_NO_DEPRECATED | 54 | select GENERIC_HARDIRQS_NO_DEPRECATED |
55 | select GENERIC_IRQ_SHOW | ||
56 | select IRQ_PREFLOW_FASTEOI | ||
55 | 57 | ||
56 | config ARCH_DEFCONFIG | 58 | config ARCH_DEFCONFIG |
57 | string | 59 | string |
diff --git a/arch/sparc/kernel/irq_64.c b/arch/sparc/kernel/irq_64.c index eb16e3b8a2dd..b1d275ce3435 100644 --- a/arch/sparc/kernel/irq_64.c +++ b/arch/sparc/kernel/irq_64.c | |||
@@ -162,47 +162,14 @@ void irq_free(unsigned int irq) | |||
162 | /* | 162 | /* |
163 | * /proc/interrupts printing: | 163 | * /proc/interrupts printing: |
164 | */ | 164 | */ |
165 | 165 | int arch_show_interrupts(struct seq_file *p, int prec) | |
166 | int show_interrupts(struct seq_file *p, void *v) | ||
167 | { | 166 | { |
168 | int i = *(loff_t *) v, j; | 167 | int j; |
169 | struct irqaction * action; | ||
170 | unsigned long flags; | ||
171 | 168 | ||
172 | if (i == 0) { | 169 | seq_printf(p, "NMI: "); |
173 | seq_printf(p, " "); | 170 | for_each_online_cpu(j) |
174 | for_each_online_cpu(j) | 171 | seq_printf(p, "%10u ", cpu_data(j).__nmi_count); |
175 | seq_printf(p, "CPU%d ",j); | 172 | seq_printf(p, " Non-maskable interrupts\n"); |
176 | seq_putc(p, '\n'); | ||
177 | } | ||
178 | |||
179 | if (i < NR_IRQS) { | ||
180 | raw_spin_lock_irqsave(&irq_desc[i].lock, flags); | ||
181 | action = irq_desc[i].action; | ||
182 | if (!action) | ||
183 | goto skip; | ||
184 | seq_printf(p, "%3d: ",i); | ||
185 | #ifndef CONFIG_SMP | ||
186 | seq_printf(p, "%10u ", kstat_irqs(i)); | ||
187 | #else | ||
188 | for_each_online_cpu(j) | ||
189 | seq_printf(p, "%10u ", kstat_irqs_cpu(i, j)); | ||
190 | #endif | ||
191 | seq_printf(p, " %9s", irq_desc[i].irq_data.chip->name); | ||
192 | seq_printf(p, " %s", action->name); | ||
193 | |||
194 | for (action=action->next; action; action = action->next) | ||
195 | seq_printf(p, ", %s", action->name); | ||
196 | |||
197 | seq_putc(p, '\n'); | ||
198 | skip: | ||
199 | raw_spin_unlock_irqrestore(&irq_desc[i].lock, flags); | ||
200 | } else if (i == NR_IRQS) { | ||
201 | seq_printf(p, "NMI: "); | ||
202 | for_each_online_cpu(j) | ||
203 | seq_printf(p, "%10u ", cpu_data(j).__nmi_count); | ||
204 | seq_printf(p, " Non-maskable interrupts\n"); | ||
205 | } | ||
206 | return 0; | 173 | return 0; |
207 | } | 174 | } |
208 | 175 | ||
@@ -344,10 +311,6 @@ static void sun4u_irq_disable(struct irq_data *data) | |||
344 | static void sun4u_irq_eoi(struct irq_data *data) | 311 | static void sun4u_irq_eoi(struct irq_data *data) |
345 | { | 312 | { |
346 | struct irq_handler_data *handler_data = data->handler_data; | 313 | struct irq_handler_data *handler_data = data->handler_data; |
347 | struct irq_desc *desc = irq_desc + data->irq; | ||
348 | |||
349 | if (unlikely(desc->status & (IRQ_DISABLED|IRQ_INPROGRESS))) | ||
350 | return; | ||
351 | 314 | ||
352 | if (likely(handler_data)) | 315 | if (likely(handler_data)) |
353 | upa_writeq(ICLR_IDLE, handler_data->iclr); | 316 | upa_writeq(ICLR_IDLE, handler_data->iclr); |
@@ -402,12 +365,8 @@ static void sun4v_irq_disable(struct irq_data *data) | |||
402 | static void sun4v_irq_eoi(struct irq_data *data) | 365 | static void sun4v_irq_eoi(struct irq_data *data) |
403 | { | 366 | { |
404 | unsigned int ino = irq_table[data->irq].dev_ino; | 367 | unsigned int ino = irq_table[data->irq].dev_ino; |
405 | struct irq_desc *desc = irq_desc + data->irq; | ||
406 | int err; | 368 | int err; |
407 | 369 | ||
408 | if (unlikely(desc->status & (IRQ_DISABLED|IRQ_INPROGRESS))) | ||
409 | return; | ||
410 | |||
411 | err = sun4v_intr_setstate(ino, HV_INTR_STATE_IDLE); | 370 | err = sun4v_intr_setstate(ino, HV_INTR_STATE_IDLE); |
412 | if (err != HV_EOK) | 371 | if (err != HV_EOK) |
413 | printk(KERN_ERR "sun4v_intr_setstate(%x): " | 372 | printk(KERN_ERR "sun4v_intr_setstate(%x): " |
@@ -481,13 +440,9 @@ static void sun4v_virq_disable(struct irq_data *data) | |||
481 | 440 | ||
482 | static void sun4v_virq_eoi(struct irq_data *data) | 441 | static void sun4v_virq_eoi(struct irq_data *data) |
483 | { | 442 | { |
484 | struct irq_desc *desc = irq_desc + data->irq; | ||
485 | unsigned long dev_handle, dev_ino; | 443 | unsigned long dev_handle, dev_ino; |
486 | int err; | 444 | int err; |
487 | 445 | ||
488 | if (unlikely(desc->status & (IRQ_DISABLED|IRQ_INPROGRESS))) | ||
489 | return; | ||
490 | |||
491 | dev_handle = irq_table[data->irq].dev_handle; | 446 | dev_handle = irq_table[data->irq].dev_handle; |
492 | dev_ino = irq_table[data->irq].dev_ino; | 447 | dev_ino = irq_table[data->irq].dev_ino; |
493 | 448 | ||
@@ -505,6 +460,7 @@ static struct irq_chip sun4u_irq = { | |||
505 | .irq_disable = sun4u_irq_disable, | 460 | .irq_disable = sun4u_irq_disable, |
506 | .irq_eoi = sun4u_irq_eoi, | 461 | .irq_eoi = sun4u_irq_eoi, |
507 | .irq_set_affinity = sun4u_set_affinity, | 462 | .irq_set_affinity = sun4u_set_affinity, |
463 | .flags = IRQCHIP_EOI_IF_HANDLED, | ||
508 | }; | 464 | }; |
509 | 465 | ||
510 | static struct irq_chip sun4v_irq = { | 466 | static struct irq_chip sun4v_irq = { |
@@ -513,6 +469,7 @@ static struct irq_chip sun4v_irq = { | |||
513 | .irq_disable = sun4v_irq_disable, | 469 | .irq_disable = sun4v_irq_disable, |
514 | .irq_eoi = sun4v_irq_eoi, | 470 | .irq_eoi = sun4v_irq_eoi, |
515 | .irq_set_affinity = sun4v_set_affinity, | 471 | .irq_set_affinity = sun4v_set_affinity, |
472 | .flags = IRQCHIP_EOI_IF_HANDLED, | ||
516 | }; | 473 | }; |
517 | 474 | ||
518 | static struct irq_chip sun4v_virq = { | 475 | static struct irq_chip sun4v_virq = { |
@@ -521,30 +478,28 @@ static struct irq_chip sun4v_virq = { | |||
521 | .irq_disable = sun4v_virq_disable, | 478 | .irq_disable = sun4v_virq_disable, |
522 | .irq_eoi = sun4v_virq_eoi, | 479 | .irq_eoi = sun4v_virq_eoi, |
523 | .irq_set_affinity = sun4v_virt_set_affinity, | 480 | .irq_set_affinity = sun4v_virt_set_affinity, |
481 | .flags = IRQCHIP_EOI_IF_HANDLED, | ||
524 | }; | 482 | }; |
525 | 483 | ||
526 | static void pre_flow_handler(unsigned int irq, struct irq_desc *desc) | 484 | static void pre_flow_handler(struct irq_data *d) |
527 | { | 485 | { |
528 | struct irq_handler_data *handler_data = get_irq_data(irq); | 486 | struct irq_handler_data *handler_data = irq_data_get_irq_handler_data(d); |
529 | unsigned int ino = irq_table[irq].dev_ino; | 487 | unsigned int ino = irq_table[d->irq].dev_ino; |
530 | 488 | ||
531 | handler_data->pre_handler(ino, handler_data->arg1, handler_data->arg2); | 489 | handler_data->pre_handler(ino, handler_data->arg1, handler_data->arg2); |
532 | |||
533 | handle_fasteoi_irq(irq, desc); | ||
534 | } | 490 | } |
535 | 491 | ||
536 | void irq_install_pre_handler(int irq, | 492 | void irq_install_pre_handler(int irq, |
537 | void (*func)(unsigned int, void *, void *), | 493 | void (*func)(unsigned int, void *, void *), |
538 | void *arg1, void *arg2) | 494 | void *arg1, void *arg2) |
539 | { | 495 | { |
540 | struct irq_handler_data *handler_data = get_irq_data(irq); | 496 | struct irq_handler_data *handler_data = irq_get_handler_data(irq); |
541 | struct irq_desc *desc = irq_desc + irq; | ||
542 | 497 | ||
543 | handler_data->pre_handler = func; | 498 | handler_data->pre_handler = func; |
544 | handler_data->arg1 = arg1; | 499 | handler_data->arg1 = arg1; |
545 | handler_data->arg2 = arg2; | 500 | handler_data->arg2 = arg2; |
546 | 501 | ||
547 | desc->handle_irq = pre_flow_handler; | 502 | __irq_set_preflow_handler(irq, pre_flow_handler); |
548 | } | 503 | } |
549 | 504 | ||
550 | unsigned int build_irq(int inofixup, unsigned long iclr, unsigned long imap) | 505 | unsigned int build_irq(int inofixup, unsigned long iclr, unsigned long imap) |
@@ -562,13 +517,11 @@ unsigned int build_irq(int inofixup, unsigned long iclr, unsigned long imap) | |||
562 | if (!irq) { | 517 | if (!irq) { |
563 | irq = irq_alloc(0, ino); | 518 | irq = irq_alloc(0, ino); |
564 | bucket_set_irq(__pa(bucket), irq); | 519 | bucket_set_irq(__pa(bucket), irq); |
565 | set_irq_chip_and_handler_name(irq, | 520 | irq_set_chip_and_handler_name(irq, &sun4u_irq, |
566 | &sun4u_irq, | 521 | handle_fasteoi_irq, "IVEC"); |
567 | handle_fasteoi_irq, | ||
568 | "IVEC"); | ||
569 | } | 522 | } |
570 | 523 | ||
571 | handler_data = get_irq_data(irq); | 524 | handler_data = irq_get_handler_data(irq); |
572 | if (unlikely(handler_data)) | 525 | if (unlikely(handler_data)) |
573 | goto out; | 526 | goto out; |
574 | 527 | ||
@@ -577,7 +530,7 @@ unsigned int build_irq(int inofixup, unsigned long iclr, unsigned long imap) | |||
577 | prom_printf("IRQ: kzalloc(irq_handler_data) failed.\n"); | 530 | prom_printf("IRQ: kzalloc(irq_handler_data) failed.\n"); |
578 | prom_halt(); | 531 | prom_halt(); |
579 | } | 532 | } |
580 | set_irq_data(irq, handler_data); | 533 | irq_set_handler_data(irq, handler_data); |
581 | 534 | ||
582 | handler_data->imap = imap; | 535 | handler_data->imap = imap; |
583 | handler_data->iclr = iclr; | 536 | handler_data->iclr = iclr; |
@@ -600,12 +553,11 @@ static unsigned int sun4v_build_common(unsigned long sysino, | |||
600 | if (!irq) { | 553 | if (!irq) { |
601 | irq = irq_alloc(0, sysino); | 554 | irq = irq_alloc(0, sysino); |
602 | bucket_set_irq(__pa(bucket), irq); | 555 | bucket_set_irq(__pa(bucket), irq); |
603 | set_irq_chip_and_handler_name(irq, chip, | 556 | irq_set_chip_and_handler_name(irq, chip, handle_fasteoi_irq, |
604 | handle_fasteoi_irq, | ||
605 | "IVEC"); | 557 | "IVEC"); |
606 | } | 558 | } |
607 | 559 | ||
608 | handler_data = get_irq_data(irq); | 560 | handler_data = irq_get_handler_data(irq); |
609 | if (unlikely(handler_data)) | 561 | if (unlikely(handler_data)) |
610 | goto out; | 562 | goto out; |
611 | 563 | ||
@@ -614,7 +566,7 @@ static unsigned int sun4v_build_common(unsigned long sysino, | |||
614 | prom_printf("IRQ: kzalloc(irq_handler_data) failed.\n"); | 566 | prom_printf("IRQ: kzalloc(irq_handler_data) failed.\n"); |
615 | prom_halt(); | 567 | prom_halt(); |
616 | } | 568 | } |
617 | set_irq_data(irq, handler_data); | 569 | irq_set_handler_data(irq, handler_data); |
618 | 570 | ||
619 | /* Catch accidental accesses to these things. IMAP/ICLR handling | 571 | /* Catch accidental accesses to these things. IMAP/ICLR handling |
620 | * is done by hypervisor calls on sun4v platforms, not by direct | 572 | * is done by hypervisor calls on sun4v platforms, not by direct |
@@ -639,7 +591,6 @@ unsigned int sun4v_build_virq(u32 devhandle, unsigned int devino) | |||
639 | struct irq_handler_data *handler_data; | 591 | struct irq_handler_data *handler_data; |
640 | unsigned long hv_err, cookie; | 592 | unsigned long hv_err, cookie; |
641 | struct ino_bucket *bucket; | 593 | struct ino_bucket *bucket; |
642 | struct irq_desc *desc; | ||
643 | unsigned int irq; | 594 | unsigned int irq; |
644 | 595 | ||
645 | bucket = kzalloc(sizeof(struct ino_bucket), GFP_ATOMIC); | 596 | bucket = kzalloc(sizeof(struct ino_bucket), GFP_ATOMIC); |
@@ -660,8 +611,7 @@ unsigned int sun4v_build_virq(u32 devhandle, unsigned int devino) | |||
660 | irq = irq_alloc(devhandle, devino); | 611 | irq = irq_alloc(devhandle, devino); |
661 | bucket_set_irq(__pa(bucket), irq); | 612 | bucket_set_irq(__pa(bucket), irq); |
662 | 613 | ||
663 | set_irq_chip_and_handler_name(irq, &sun4v_virq, | 614 | irq_set_chip_and_handler_name(irq, &sun4v_virq, handle_fasteoi_irq, |
664 | handle_fasteoi_irq, | ||
665 | "IVEC"); | 615 | "IVEC"); |
666 | 616 | ||
667 | handler_data = kzalloc(sizeof(struct irq_handler_data), GFP_ATOMIC); | 617 | handler_data = kzalloc(sizeof(struct irq_handler_data), GFP_ATOMIC); |
@@ -672,10 +622,8 @@ unsigned int sun4v_build_virq(u32 devhandle, unsigned int devino) | |||
672 | * especially wrt. locking, we do not let request_irq() enable | 622 | * especially wrt. locking, we do not let request_irq() enable |
673 | * the interrupt. | 623 | * the interrupt. |
674 | */ | 624 | */ |
675 | desc = irq_desc + irq; | 625 | irq_set_status_flags(irq, IRQ_NOAUTOEN); |
676 | desc->status |= IRQ_NOAUTOEN; | 626 | irq_set_handler_data(irq, handler_data); |
677 | |||
678 | set_irq_data(irq, handler_data); | ||
679 | 627 | ||
680 | /* Catch accidental accesses to these things. IMAP/ICLR handling | 628 | /* Catch accidental accesses to these things. IMAP/ICLR handling |
681 | * is done by hypervisor calls on sun4v platforms, not by direct | 629 | * is done by hypervisor calls on sun4v platforms, not by direct |
@@ -734,7 +682,6 @@ void __irq_entry handler_irq(int pil, struct pt_regs *regs) | |||
734 | orig_sp = set_hardirq_stack(); | 682 | orig_sp = set_hardirq_stack(); |
735 | 683 | ||
736 | while (bucket_pa) { | 684 | while (bucket_pa) { |
737 | struct irq_desc *desc; | ||
738 | unsigned long next_pa; | 685 | unsigned long next_pa; |
739 | unsigned int irq; | 686 | unsigned int irq; |
740 | 687 | ||
@@ -742,10 +689,7 @@ void __irq_entry handler_irq(int pil, struct pt_regs *regs) | |||
742 | irq = bucket_get_irq(bucket_pa); | 689 | irq = bucket_get_irq(bucket_pa); |
743 | bucket_clear_chain_pa(bucket_pa); | 690 | bucket_clear_chain_pa(bucket_pa); |
744 | 691 | ||
745 | desc = irq_desc + irq; | 692 | generic_handle_irq(irq); |
746 | |||
747 | if (!(desc->status & IRQ_DISABLED)) | ||
748 | desc->handle_irq(irq, desc); | ||
749 | 693 | ||
750 | bucket_pa = next_pa; | 694 | bucket_pa = next_pa; |
751 | } | 695 | } |
@@ -788,19 +732,18 @@ void fixup_irqs(void) | |||
788 | unsigned int irq; | 732 | unsigned int irq; |
789 | 733 | ||
790 | for (irq = 0; irq < NR_IRQS; irq++) { | 734 | for (irq = 0; irq < NR_IRQS; irq++) { |
735 | struct irq_desc *desc = irq_to_desc(irq); | ||
736 | struct irq_data *data = irq_desc_get_irq_data(desc); | ||
791 | unsigned long flags; | 737 | unsigned long flags; |
792 | 738 | ||
793 | raw_spin_lock_irqsave(&irq_desc[irq].lock, flags); | 739 | raw_spin_lock_irqsave(&desc->lock, flags); |
794 | if (irq_desc[irq].action && | 740 | if (desc->action && !irqd_is_per_cpu(data)) { |
795 | !(irq_desc[irq].status & IRQ_PER_CPU)) { | ||
796 | struct irq_data *data = irq_get_irq_data(irq); | ||
797 | |||
798 | if (data->chip->irq_set_affinity) | 741 | if (data->chip->irq_set_affinity) |
799 | data->chip->irq_set_affinity(data, | 742 | data->chip->irq_set_affinity(data, |
800 | data->affinity, | 743 | data->affinity, |
801 | false); | 744 | false); |
802 | } | 745 | } |
803 | raw_spin_unlock_irqrestore(&irq_desc[irq].lock, flags); | 746 | raw_spin_unlock_irqrestore(&desc->lock, flags); |
804 | } | 747 | } |
805 | 748 | ||
806 | tick_ops->disable_irq(); | 749 | tick_ops->disable_irq(); |
@@ -1038,5 +981,5 @@ void __init init_IRQ(void) | |||
1038 | : "i" (PSTATE_IE) | 981 | : "i" (PSTATE_IE) |
1039 | : "g1"); | 982 | : "g1"); |
1040 | 983 | ||
1041 | irq_desc[0].action = &timer_irq_action; | 984 | irq_to_desc(0)->action = &timer_irq_action; |
1042 | } | 985 | } |
diff --git a/arch/sparc/kernel/pci.c b/arch/sparc/kernel/pci.c index 44f41e312f73..713dc91020a6 100644 --- a/arch/sparc/kernel/pci.c +++ b/arch/sparc/kernel/pci.c | |||
@@ -1012,7 +1012,7 @@ int arch_setup_msi_irq(struct pci_dev *pdev, struct msi_desc *desc) | |||
1012 | 1012 | ||
1013 | void arch_teardown_msi_irq(unsigned int irq) | 1013 | void arch_teardown_msi_irq(unsigned int irq) |
1014 | { | 1014 | { |
1015 | struct msi_desc *entry = get_irq_msi(irq); | 1015 | struct msi_desc *entry = irq_get_msi_desc(irq); |
1016 | struct pci_dev *pdev = entry->dev; | 1016 | struct pci_dev *pdev = entry->dev; |
1017 | struct pci_pbm_info *pbm = pdev->dev.archdata.host_controller; | 1017 | struct pci_pbm_info *pbm = pdev->dev.archdata.host_controller; |
1018 | 1018 | ||
diff --git a/arch/sparc/kernel/pci_msi.c b/arch/sparc/kernel/pci_msi.c index 550e937720e7..30982e9ab626 100644 --- a/arch/sparc/kernel/pci_msi.c +++ b/arch/sparc/kernel/pci_msi.c | |||
@@ -30,13 +30,10 @@ static irqreturn_t sparc64_msiq_interrupt(int irq, void *cookie) | |||
30 | 30 | ||
31 | err = ops->dequeue_msi(pbm, msiqid, &head, &msi); | 31 | err = ops->dequeue_msi(pbm, msiqid, &head, &msi); |
32 | if (likely(err > 0)) { | 32 | if (likely(err > 0)) { |
33 | struct irq_desc *desc; | ||
34 | unsigned int irq; | 33 | unsigned int irq; |
35 | 34 | ||
36 | irq = pbm->msi_irq_table[msi - pbm->msi_first]; | 35 | irq = pbm->msi_irq_table[msi - pbm->msi_first]; |
37 | desc = irq_desc + irq; | 36 | generic_handle_irq(irq); |
38 | |||
39 | desc->handle_irq(irq, desc); | ||
40 | } | 37 | } |
41 | 38 | ||
42 | if (unlikely(err < 0)) | 39 | if (unlikely(err < 0)) |
@@ -136,8 +133,8 @@ static int sparc64_setup_msi_irq(unsigned int *irq_p, | |||
136 | if (!*irq_p) | 133 | if (!*irq_p) |
137 | goto out_err; | 134 | goto out_err; |
138 | 135 | ||
139 | set_irq_chip_and_handler_name(*irq_p, &msi_irq, | 136 | irq_set_chip_and_handler_name(*irq_p, &msi_irq, handle_simple_irq, |
140 | handle_simple_irq, "MSI"); | 137 | "MSI"); |
141 | 138 | ||
142 | err = alloc_msi(pbm); | 139 | err = alloc_msi(pbm); |
143 | if (unlikely(err < 0)) | 140 | if (unlikely(err < 0)) |
@@ -163,7 +160,7 @@ static int sparc64_setup_msi_irq(unsigned int *irq_p, | |||
163 | } | 160 | } |
164 | msg.data = msi; | 161 | msg.data = msi; |
165 | 162 | ||
166 | set_irq_msi(*irq_p, entry); | 163 | irq_set_msi_desc(*irq_p, entry); |
167 | write_msi_msg(*irq_p, &msg); | 164 | write_msi_msg(*irq_p, &msg); |
168 | 165 | ||
169 | return 0; | 166 | return 0; |
@@ -172,7 +169,7 @@ out_msi_free: | |||
172 | free_msi(pbm, msi); | 169 | free_msi(pbm, msi); |
173 | 170 | ||
174 | out_irq_free: | 171 | out_irq_free: |
175 | set_irq_chip(*irq_p, NULL); | 172 | irq_set_chip(*irq_p, NULL); |
176 | irq_free(*irq_p); | 173 | irq_free(*irq_p); |
177 | *irq_p = 0; | 174 | *irq_p = 0; |
178 | 175 | ||
@@ -211,7 +208,7 @@ static void sparc64_teardown_msi_irq(unsigned int irq, | |||
211 | 208 | ||
212 | free_msi(pbm, msi_num); | 209 | free_msi(pbm, msi_num); |
213 | 210 | ||
214 | set_irq_chip(irq, NULL); | 211 | irq_set_chip(irq, NULL); |
215 | irq_free(irq); | 212 | irq_free(irq); |
216 | } | 213 | } |
217 | 214 | ||
diff --git a/arch/tile/Kconfig b/arch/tile/Kconfig index f3b78701c219..5e34a9fee9b3 100644 --- a/arch/tile/Kconfig +++ b/arch/tile/Kconfig | |||
@@ -12,6 +12,7 @@ config TILE | |||
12 | select GENERIC_IRQ_PROBE | 12 | select GENERIC_IRQ_PROBE |
13 | select GENERIC_PENDING_IRQ if SMP | 13 | select GENERIC_PENDING_IRQ if SMP |
14 | select GENERIC_HARDIRQS_NO_DEPRECATED | 14 | select GENERIC_HARDIRQS_NO_DEPRECATED |
15 | select GENERIC_IRQ_SHOW | ||
15 | 16 | ||
16 | # FIXME: investigate whether we need/want these options. | 17 | # FIXME: investigate whether we need/want these options. |
17 | # select HAVE_IOREMAP_PROT | 18 | # select HAVE_IOREMAP_PROT |
diff --git a/arch/tile/kernel/irq.c b/arch/tile/kernel/irq.c index 0baa7580121f..aa0134db2dd6 100644 --- a/arch/tile/kernel/irq.c +++ b/arch/tile/kernel/irq.c | |||
@@ -241,14 +241,14 @@ void tile_irq_activate(unsigned int irq, int tile_irq_type) | |||
241 | irq_flow_handler_t handle = handle_level_irq; | 241 | irq_flow_handler_t handle = handle_level_irq; |
242 | if (tile_irq_type == TILE_IRQ_PERCPU) | 242 | if (tile_irq_type == TILE_IRQ_PERCPU) |
243 | handle = handle_percpu_irq; | 243 | handle = handle_percpu_irq; |
244 | set_irq_chip_and_handler(irq, &tile_irq_chip, handle); | 244 | irq_set_chip_and_handler(irq, &tile_irq_chip, handle); |
245 | 245 | ||
246 | /* | 246 | /* |
247 | * Flag interrupts that are hardware-cleared so that ack() | 247 | * Flag interrupts that are hardware-cleared so that ack() |
248 | * won't clear them. | 248 | * won't clear them. |
249 | */ | 249 | */ |
250 | if (tile_irq_type == TILE_IRQ_HW_CLEAR) | 250 | if (tile_irq_type == TILE_IRQ_HW_CLEAR) |
251 | set_irq_chip_data(irq, (void *)IS_HW_CLEARED); | 251 | irq_set_chip_data(irq, (void *)IS_HW_CLEARED); |
252 | } | 252 | } |
253 | EXPORT_SYMBOL(tile_irq_activate); | 253 | EXPORT_SYMBOL(tile_irq_activate); |
254 | 254 | ||
@@ -262,47 +262,6 @@ void ack_bad_irq(unsigned int irq) | |||
262 | * Generic, controller-independent functions: | 262 | * Generic, controller-independent functions: |
263 | */ | 263 | */ |
264 | 264 | ||
265 | int show_interrupts(struct seq_file *p, void *v) | ||
266 | { | ||
267 | int i = *(loff_t *) v, j; | ||
268 | struct irqaction *action; | ||
269 | unsigned long flags; | ||
270 | |||
271 | if (i == 0) { | ||
272 | seq_printf(p, " "); | ||
273 | for (j = 0; j < NR_CPUS; j++) | ||
274 | if (cpu_online(j)) | ||
275 | seq_printf(p, "CPU%-8d", j); | ||
276 | seq_putc(p, '\n'); | ||
277 | } | ||
278 | |||
279 | if (i < NR_IRQS) { | ||
280 | struct irq_desc *desc = irq_to_desc(i); | ||
281 | |||
282 | raw_spin_lock_irqsave(&desc->lock, flags); | ||
283 | action = desc->action; | ||
284 | if (!action) | ||
285 | goto skip; | ||
286 | seq_printf(p, "%3d: ", i); | ||
287 | #ifndef CONFIG_SMP | ||
288 | seq_printf(p, "%10u ", kstat_irqs(i)); | ||
289 | #else | ||
290 | for_each_online_cpu(j) | ||
291 | seq_printf(p, "%10u ", kstat_irqs_cpu(i, j)); | ||
292 | #endif | ||
293 | seq_printf(p, " %14s", get_irq_desc_chip(desc)->name); | ||
294 | seq_printf(p, " %s", action->name); | ||
295 | |||
296 | for (action = action->next; action; action = action->next) | ||
297 | seq_printf(p, ", %s", action->name); | ||
298 | |||
299 | seq_putc(p, '\n'); | ||
300 | skip: | ||
301 | raw_spin_unlock_irqrestore(&desc->lock, flags); | ||
302 | } | ||
303 | return 0; | ||
304 | } | ||
305 | |||
306 | #if CHIP_HAS_IPI() | 265 | #if CHIP_HAS_IPI() |
307 | int create_irq(void) | 266 | int create_irq(void) |
308 | { | 267 | { |
diff --git a/arch/unicore32/Kconfig b/arch/unicore32/Kconfig index 4a36db45fb3d..04e024919b2b 100644 --- a/arch/unicore32/Kconfig +++ b/arch/unicore32/Kconfig | |||
@@ -11,6 +11,7 @@ config UNICORE32 | |||
11 | select GENERIC_FIND_FIRST_BIT | 11 | select GENERIC_FIND_FIRST_BIT |
12 | select GENERIC_IRQ_PROBE | 12 | select GENERIC_IRQ_PROBE |
13 | select GENERIC_HARDIRQS_NO_DEPRECATED | 13 | select GENERIC_HARDIRQS_NO_DEPRECATED |
14 | select GENERIC_IRQ_SHOW | ||
14 | select ARCH_WANT_FRAME_POINTERS | 15 | select ARCH_WANT_FRAME_POINTERS |
15 | help | 16 | help |
16 | UniCore-32 is 32-bit Instruction Set Architecture, | 17 | UniCore-32 is 32-bit Instruction Set Architecture, |
diff --git a/arch/unicore32/kernel/irq.c b/arch/unicore32/kernel/irq.c index b23624cf3062..2aa30a364bbe 100644 --- a/arch/unicore32/kernel/irq.c +++ b/arch/unicore32/kernel/irq.c | |||
@@ -321,24 +321,24 @@ void __init init_IRQ(void) | |||
321 | writel(1, INTC_ICCR); | 321 | writel(1, INTC_ICCR); |
322 | 322 | ||
323 | for (irq = 0; irq < IRQ_GPIOHIGH; irq++) { | 323 | for (irq = 0; irq < IRQ_GPIOHIGH; irq++) { |
324 | set_irq_chip(irq, &puv3_low_gpio_chip); | 324 | irq_set_chip(irq, &puv3_low_gpio_chip); |
325 | set_irq_handler(irq, handle_edge_irq); | 325 | irq_set_handler(irq, handle_edge_irq); |
326 | irq_modify_status(irq, | 326 | irq_modify_status(irq, |
327 | IRQ_NOREQUEST | IRQ_NOPROBE | IRQ_NOAUTOEN, | 327 | IRQ_NOREQUEST | IRQ_NOPROBE | IRQ_NOAUTOEN, |
328 | 0); | 328 | 0); |
329 | } | 329 | } |
330 | 330 | ||
331 | for (irq = IRQ_GPIOHIGH + 1; irq < IRQ_GPIO0; irq++) { | 331 | for (irq = IRQ_GPIOHIGH + 1; irq < IRQ_GPIO0; irq++) { |
332 | set_irq_chip(irq, &puv3_normal_chip); | 332 | irq_set_chip(irq, &puv3_normal_chip); |
333 | set_irq_handler(irq, handle_level_irq); | 333 | irq_set_handler(irq, handle_level_irq); |
334 | irq_modify_status(irq, | 334 | irq_modify_status(irq, |
335 | IRQ_NOREQUEST | IRQ_NOAUTOEN, | 335 | IRQ_NOREQUEST | IRQ_NOAUTOEN, |
336 | IRQ_NOPROBE); | 336 | IRQ_NOPROBE); |
337 | } | 337 | } |
338 | 338 | ||
339 | for (irq = IRQ_GPIO0; irq <= IRQ_GPIO27; irq++) { | 339 | for (irq = IRQ_GPIO0; irq <= IRQ_GPIO27; irq++) { |
340 | set_irq_chip(irq, &puv3_high_gpio_chip); | 340 | irq_set_chip(irq, &puv3_high_gpio_chip); |
341 | set_irq_handler(irq, handle_edge_irq); | 341 | irq_set_handler(irq, handle_edge_irq); |
342 | irq_modify_status(irq, | 342 | irq_modify_status(irq, |
343 | IRQ_NOREQUEST | IRQ_NOPROBE | IRQ_NOAUTOEN, | 343 | IRQ_NOREQUEST | IRQ_NOPROBE | IRQ_NOAUTOEN, |
344 | 0); | 344 | 0); |
@@ -347,56 +347,14 @@ void __init init_IRQ(void) | |||
347 | /* | 347 | /* |
348 | * Install handler for GPIO 0-27 edge detect interrupts | 348 | * Install handler for GPIO 0-27 edge detect interrupts |
349 | */ | 349 | */ |
350 | set_irq_chip(IRQ_GPIOHIGH, &puv3_normal_chip); | 350 | irq_set_chip(IRQ_GPIOHIGH, &puv3_normal_chip); |
351 | set_irq_chained_handler(IRQ_GPIOHIGH, puv3_gpio_handler); | 351 | irq_set_chained_handler(IRQ_GPIOHIGH, puv3_gpio_handler); |
352 | 352 | ||
353 | #ifdef CONFIG_PUV3_GPIO | 353 | #ifdef CONFIG_PUV3_GPIO |
354 | puv3_init_gpio(); | 354 | puv3_init_gpio(); |
355 | #endif | 355 | #endif |
356 | } | 356 | } |
357 | 357 | ||
358 | int show_interrupts(struct seq_file *p, void *v) | ||
359 | { | ||
360 | int i = *(loff_t *) v, cpu; | ||
361 | struct irq_desc *desc; | ||
362 | struct irqaction *action; | ||
363 | unsigned long flags; | ||
364 | |||
365 | if (i == 0) { | ||
366 | char cpuname[12]; | ||
367 | |||
368 | seq_printf(p, " "); | ||
369 | for_each_present_cpu(cpu) { | ||
370 | sprintf(cpuname, "CPU%d", cpu); | ||
371 | seq_printf(p, " %10s", cpuname); | ||
372 | } | ||
373 | seq_putc(p, '\n'); | ||
374 | } | ||
375 | |||
376 | if (i < nr_irqs) { | ||
377 | desc = irq_to_desc(i); | ||
378 | raw_spin_lock_irqsave(&desc->lock, flags); | ||
379 | action = desc->action; | ||
380 | if (!action) | ||
381 | goto unlock; | ||
382 | |||
383 | seq_printf(p, "%3d: ", i); | ||
384 | for_each_present_cpu(cpu) | ||
385 | seq_printf(p, "%10u ", kstat_irqs_cpu(i, cpu)); | ||
386 | seq_printf(p, " %10s", desc->irq_data.chip->name ? : "-"); | ||
387 | seq_printf(p, " %s", action->name); | ||
388 | for (action = action->next; action; action = action->next) | ||
389 | seq_printf(p, ", %s", action->name); | ||
390 | |||
391 | seq_putc(p, '\n'); | ||
392 | unlock: | ||
393 | raw_spin_unlock_irqrestore(&desc->lock, flags); | ||
394 | } else if (i == nr_irqs) { | ||
395 | seq_printf(p, "Error in interrupt!\n"); | ||
396 | } | ||
397 | return 0; | ||
398 | } | ||
399 | |||
400 | /* | 358 | /* |
401 | * do_IRQ handles all hardware IRQ's. Decoded IRQs should not | 359 | * do_IRQ handles all hardware IRQ's. Decoded IRQs should not |
402 | * come via this function. Instead, they should provide their | 360 | * come via this function. Instead, they should provide their |
diff --git a/arch/x86/crypto/aesni-intel_asm.S b/arch/x86/crypto/aesni-intel_asm.S index adcf794b22e2..be6d9e365a80 100644 --- a/arch/x86/crypto/aesni-intel_asm.S +++ b/arch/x86/crypto/aesni-intel_asm.S | |||
@@ -1612,6 +1612,7 @@ _zero_cipher_left_encrypt: | |||
1612 | movdqa SHUF_MASK(%rip), %xmm10 | 1612 | movdqa SHUF_MASK(%rip), %xmm10 |
1613 | PSHUFB_XMM %xmm10, %xmm0 | 1613 | PSHUFB_XMM %xmm10, %xmm0 |
1614 | 1614 | ||
1615 | |||
1615 | ENCRYPT_SINGLE_BLOCK %xmm0, %xmm1 # Encrypt(K, Yn) | 1616 | ENCRYPT_SINGLE_BLOCK %xmm0, %xmm1 # Encrypt(K, Yn) |
1616 | sub $16, %r11 | 1617 | sub $16, %r11 |
1617 | add %r13, %r11 | 1618 | add %r13, %r11 |
@@ -1634,7 +1635,9 @@ _zero_cipher_left_encrypt: | |||
1634 | # GHASH computation for the last <16 byte block | 1635 | # GHASH computation for the last <16 byte block |
1635 | sub %r13, %r11 | 1636 | sub %r13, %r11 |
1636 | add $16, %r11 | 1637 | add $16, %r11 |
1637 | PSHUFB_XMM %xmm10, %xmm1 | 1638 | |
1639 | movdqa SHUF_MASK(%rip), %xmm10 | ||
1640 | PSHUFB_XMM %xmm10, %xmm0 | ||
1638 | 1641 | ||
1639 | # shuffle xmm0 back to output as ciphertext | 1642 | # shuffle xmm0 back to output as ciphertext |
1640 | 1643 | ||
diff --git a/arch/x86/crypto/aesni-intel_glue.c b/arch/x86/crypto/aesni-intel_glue.c index e0e6340c8dad..2577613fb32b 100644 --- a/arch/x86/crypto/aesni-intel_glue.c +++ b/arch/x86/crypto/aesni-intel_glue.c | |||
@@ -828,9 +828,15 @@ static int rfc4106_init(struct crypto_tfm *tfm) | |||
828 | struct cryptd_aead *cryptd_tfm; | 828 | struct cryptd_aead *cryptd_tfm; |
829 | struct aesni_rfc4106_gcm_ctx *ctx = (struct aesni_rfc4106_gcm_ctx *) | 829 | struct aesni_rfc4106_gcm_ctx *ctx = (struct aesni_rfc4106_gcm_ctx *) |
830 | PTR_ALIGN((u8 *)crypto_tfm_ctx(tfm), AESNI_ALIGN); | 830 | PTR_ALIGN((u8 *)crypto_tfm_ctx(tfm), AESNI_ALIGN); |
831 | struct crypto_aead *cryptd_child; | ||
832 | struct aesni_rfc4106_gcm_ctx *child_ctx; | ||
831 | cryptd_tfm = cryptd_alloc_aead("__driver-gcm-aes-aesni", 0, 0); | 833 | cryptd_tfm = cryptd_alloc_aead("__driver-gcm-aes-aesni", 0, 0); |
832 | if (IS_ERR(cryptd_tfm)) | 834 | if (IS_ERR(cryptd_tfm)) |
833 | return PTR_ERR(cryptd_tfm); | 835 | return PTR_ERR(cryptd_tfm); |
836 | |||
837 | cryptd_child = cryptd_aead_child(cryptd_tfm); | ||
838 | child_ctx = aesni_rfc4106_gcm_ctx_get(cryptd_child); | ||
839 | memcpy(child_ctx, ctx, sizeof(*ctx)); | ||
834 | ctx->cryptd_tfm = cryptd_tfm; | 840 | ctx->cryptd_tfm = cryptd_tfm; |
835 | tfm->crt_aead.reqsize = sizeof(struct aead_request) | 841 | tfm->crt_aead.reqsize = sizeof(struct aead_request) |
836 | + crypto_aead_reqsize(&cryptd_tfm->base); | 842 | + crypto_aead_reqsize(&cryptd_tfm->base); |
@@ -923,6 +929,9 @@ static int rfc4106_set_key(struct crypto_aead *parent, const u8 *key, | |||
923 | int ret = 0; | 929 | int ret = 0; |
924 | struct crypto_tfm *tfm = crypto_aead_tfm(parent); | 930 | struct crypto_tfm *tfm = crypto_aead_tfm(parent); |
925 | struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(parent); | 931 | struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(parent); |
932 | struct crypto_aead *cryptd_child = cryptd_aead_child(ctx->cryptd_tfm); | ||
933 | struct aesni_rfc4106_gcm_ctx *child_ctx = | ||
934 | aesni_rfc4106_gcm_ctx_get(cryptd_child); | ||
926 | u8 *new_key_mem = NULL; | 935 | u8 *new_key_mem = NULL; |
927 | 936 | ||
928 | if (key_len < 4) { | 937 | if (key_len < 4) { |
@@ -966,6 +975,7 @@ static int rfc4106_set_key(struct crypto_aead *parent, const u8 *key, | |||
966 | goto exit; | 975 | goto exit; |
967 | } | 976 | } |
968 | ret = rfc4106_set_hash_subkey(ctx->hash_subkey, key, key_len); | 977 | ret = rfc4106_set_hash_subkey(ctx->hash_subkey, key, key_len); |
978 | memcpy(child_ctx, ctx, sizeof(*ctx)); | ||
969 | exit: | 979 | exit: |
970 | kfree(new_key_mem); | 980 | kfree(new_key_mem); |
971 | return ret; | 981 | return ret; |
@@ -997,7 +1007,6 @@ static int rfc4106_encrypt(struct aead_request *req) | |||
997 | int ret; | 1007 | int ret; |
998 | struct crypto_aead *tfm = crypto_aead_reqtfm(req); | 1008 | struct crypto_aead *tfm = crypto_aead_reqtfm(req); |
999 | struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm); | 1009 | struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm); |
1000 | struct crypto_aead *cryptd_child = cryptd_aead_child(ctx->cryptd_tfm); | ||
1001 | 1010 | ||
1002 | if (!irq_fpu_usable()) { | 1011 | if (!irq_fpu_usable()) { |
1003 | struct aead_request *cryptd_req = | 1012 | struct aead_request *cryptd_req = |
@@ -1006,6 +1015,7 @@ static int rfc4106_encrypt(struct aead_request *req) | |||
1006 | aead_request_set_tfm(cryptd_req, &ctx->cryptd_tfm->base); | 1015 | aead_request_set_tfm(cryptd_req, &ctx->cryptd_tfm->base); |
1007 | return crypto_aead_encrypt(cryptd_req); | 1016 | return crypto_aead_encrypt(cryptd_req); |
1008 | } else { | 1017 | } else { |
1018 | struct crypto_aead *cryptd_child = cryptd_aead_child(ctx->cryptd_tfm); | ||
1009 | kernel_fpu_begin(); | 1019 | kernel_fpu_begin(); |
1010 | ret = cryptd_child->base.crt_aead.encrypt(req); | 1020 | ret = cryptd_child->base.crt_aead.encrypt(req); |
1011 | kernel_fpu_end(); | 1021 | kernel_fpu_end(); |
@@ -1018,7 +1028,6 @@ static int rfc4106_decrypt(struct aead_request *req) | |||
1018 | int ret; | 1028 | int ret; |
1019 | struct crypto_aead *tfm = crypto_aead_reqtfm(req); | 1029 | struct crypto_aead *tfm = crypto_aead_reqtfm(req); |
1020 | struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm); | 1030 | struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm); |
1021 | struct crypto_aead *cryptd_child = cryptd_aead_child(ctx->cryptd_tfm); | ||
1022 | 1031 | ||
1023 | if (!irq_fpu_usable()) { | 1032 | if (!irq_fpu_usable()) { |
1024 | struct aead_request *cryptd_req = | 1033 | struct aead_request *cryptd_req = |
@@ -1027,6 +1036,7 @@ static int rfc4106_decrypt(struct aead_request *req) | |||
1027 | aead_request_set_tfm(cryptd_req, &ctx->cryptd_tfm->base); | 1036 | aead_request_set_tfm(cryptd_req, &ctx->cryptd_tfm->base); |
1028 | return crypto_aead_decrypt(cryptd_req); | 1037 | return crypto_aead_decrypt(cryptd_req); |
1029 | } else { | 1038 | } else { |
1039 | struct crypto_aead *cryptd_child = cryptd_aead_child(ctx->cryptd_tfm); | ||
1030 | kernel_fpu_begin(); | 1040 | kernel_fpu_begin(); |
1031 | ret = cryptd_child->base.crt_aead.decrypt(req); | 1041 | ret = cryptd_child->base.crt_aead.decrypt(req); |
1032 | kernel_fpu_end(); | 1042 | kernel_fpu_end(); |
diff --git a/arch/x86/xen/p2m.c b/arch/x86/xen/p2m.c index 215a3ce61068..141eb0de8b06 100644 --- a/arch/x86/xen/p2m.c +++ b/arch/x86/xen/p2m.c | |||
@@ -497,7 +497,7 @@ static bool alloc_p2m(unsigned long pfn) | |||
497 | return true; | 497 | return true; |
498 | } | 498 | } |
499 | 499 | ||
500 | bool __early_alloc_p2m(unsigned long pfn) | 500 | static bool __init __early_alloc_p2m(unsigned long pfn) |
501 | { | 501 | { |
502 | unsigned topidx, mididx, idx; | 502 | unsigned topidx, mididx, idx; |
503 | 503 | ||
@@ -530,7 +530,7 @@ bool __early_alloc_p2m(unsigned long pfn) | |||
530 | } | 530 | } |
531 | return idx != 0; | 531 | return idx != 0; |
532 | } | 532 | } |
533 | unsigned long set_phys_range_identity(unsigned long pfn_s, | 533 | unsigned long __init set_phys_range_identity(unsigned long pfn_s, |
534 | unsigned long pfn_e) | 534 | unsigned long pfn_e) |
535 | { | 535 | { |
536 | unsigned long pfn; | 536 | unsigned long pfn; |
@@ -671,7 +671,9 @@ int m2p_add_override(unsigned long mfn, struct page *page) | |||
671 | page->private = mfn; | 671 | page->private = mfn; |
672 | page->index = pfn_to_mfn(pfn); | 672 | page->index = pfn_to_mfn(pfn); |
673 | 673 | ||
674 | __set_phys_to_machine(pfn, FOREIGN_FRAME(mfn)); | 674 | if (unlikely(!set_phys_to_machine(pfn, FOREIGN_FRAME(mfn)))) |
675 | return -ENOMEM; | ||
676 | |||
675 | if (!PageHighMem(page)) | 677 | if (!PageHighMem(page)) |
676 | /* Just zap old mapping for now */ | 678 | /* Just zap old mapping for now */ |
677 | pte_clear(&init_mm, address, ptep); | 679 | pte_clear(&init_mm, address, ptep); |
@@ -709,7 +711,7 @@ int m2p_remove_override(struct page *page) | |||
709 | spin_lock_irqsave(&m2p_override_lock, flags); | 711 | spin_lock_irqsave(&m2p_override_lock, flags); |
710 | list_del(&page->lru); | 712 | list_del(&page->lru); |
711 | spin_unlock_irqrestore(&m2p_override_lock, flags); | 713 | spin_unlock_irqrestore(&m2p_override_lock, flags); |
712 | __set_phys_to_machine(pfn, page->index); | 714 | set_phys_to_machine(pfn, page->index); |
713 | 715 | ||
714 | if (!PageHighMem(page)) | 716 | if (!PageHighMem(page)) |
715 | set_pte_at(&init_mm, address, ptep, | 717 | set_pte_at(&init_mm, address, ptep, |
diff --git a/drivers/acpi/video.c b/drivers/acpi/video.c index a18e497f1c3c..31e9e10f657e 100644 --- a/drivers/acpi/video.c +++ b/drivers/acpi/video.c | |||
@@ -824,11 +824,6 @@ static void acpi_video_device_find_cap(struct acpi_video_device *device) | |||
824 | device->backlight->props.brightness = | 824 | device->backlight->props.brightness = |
825 | acpi_video_get_brightness(device->backlight); | 825 | acpi_video_get_brightness(device->backlight); |
826 | 826 | ||
827 | result = sysfs_create_link(&device->backlight->dev.kobj, | ||
828 | &device->dev->dev.kobj, "device"); | ||
829 | if (result) | ||
830 | printk(KERN_ERR PREFIX "Create sysfs link\n"); | ||
831 | |||
832 | device->cooling_dev = thermal_cooling_device_register("LCD", | 827 | device->cooling_dev = thermal_cooling_device_register("LCD", |
833 | device->dev, &video_cooling_ops); | 828 | device->dev, &video_cooling_ops); |
834 | if (IS_ERR(device->cooling_dev)) { | 829 | if (IS_ERR(device->cooling_dev)) { |
@@ -1381,7 +1376,6 @@ static int acpi_video_bus_put_one_device(struct acpi_video_device *device) | |||
1381 | "Cant remove video notify handler\n"); | 1376 | "Cant remove video notify handler\n"); |
1382 | } | 1377 | } |
1383 | if (device->backlight) { | 1378 | if (device->backlight) { |
1384 | sysfs_remove_link(&device->backlight->dev.kobj, "device"); | ||
1385 | backlight_device_unregister(device->backlight); | 1379 | backlight_device_unregister(device->backlight); |
1386 | device->backlight = NULL; | 1380 | device->backlight = NULL; |
1387 | } | 1381 | } |
diff --git a/drivers/ata/pata_ixp4xx_cf.c b/drivers/ata/pata_ixp4xx_cf.c index 5253b271b3fe..f6b3f995f58a 100644 --- a/drivers/ata/pata_ixp4xx_cf.c +++ b/drivers/ata/pata_ixp4xx_cf.c | |||
@@ -167,7 +167,7 @@ static __devinit int ixp4xx_pata_probe(struct platform_device *pdev) | |||
167 | 167 | ||
168 | irq = platform_get_irq(pdev, 0); | 168 | irq = platform_get_irq(pdev, 0); |
169 | if (irq) | 169 | if (irq) |
170 | set_irq_type(irq, IRQ_TYPE_EDGE_RISING); | 170 | irq_set_irq_type(irq, IRQ_TYPE_EDGE_RISING); |
171 | 171 | ||
172 | /* Setup expansion bus chip selects */ | 172 | /* Setup expansion bus chip selects */ |
173 | *data->cs0_cfg = data->cs0_bits; | 173 | *data->cs0_cfg = data->cs0_bits; |
diff --git a/drivers/ata/pata_rb532_cf.c b/drivers/ata/pata_rb532_cf.c index baeaf938d55b..1b9d10d9c5d9 100644 --- a/drivers/ata/pata_rb532_cf.c +++ b/drivers/ata/pata_rb532_cf.c | |||
@@ -60,10 +60,10 @@ static irqreturn_t rb532_pata_irq_handler(int irq, void *dev_instance) | |||
60 | struct rb532_cf_info *info = ah->private_data; | 60 | struct rb532_cf_info *info = ah->private_data; |
61 | 61 | ||
62 | if (gpio_get_value(info->gpio_line)) { | 62 | if (gpio_get_value(info->gpio_line)) { |
63 | set_irq_type(info->irq, IRQ_TYPE_LEVEL_LOW); | 63 | irq_set_irq_type(info->irq, IRQ_TYPE_LEVEL_LOW); |
64 | ata_sff_interrupt(info->irq, dev_instance); | 64 | ata_sff_interrupt(info->irq, dev_instance); |
65 | } else { | 65 | } else { |
66 | set_irq_type(info->irq, IRQ_TYPE_LEVEL_HIGH); | 66 | irq_set_irq_type(info->irq, IRQ_TYPE_LEVEL_HIGH); |
67 | } | 67 | } |
68 | 68 | ||
69 | return IRQ_HANDLED; | 69 | return IRQ_HANDLED; |
diff --git a/drivers/block/drbd/drbd_bitmap.c b/drivers/block/drbd/drbd_bitmap.c index f0ae63d2df65..76210ba401ac 100644 --- a/drivers/block/drbd/drbd_bitmap.c +++ b/drivers/block/drbd/drbd_bitmap.c | |||
@@ -29,8 +29,6 @@ | |||
29 | #include <linux/slab.h> | 29 | #include <linux/slab.h> |
30 | #include <asm/kmap_types.h> | 30 | #include <asm/kmap_types.h> |
31 | 31 | ||
32 | #include <asm-generic/bitops/le.h> | ||
33 | |||
34 | #include "drbd_int.h" | 32 | #include "drbd_int.h" |
35 | 33 | ||
36 | 34 | ||
@@ -1184,10 +1182,10 @@ static unsigned long __bm_find_next(struct drbd_conf *mdev, unsigned long bm_fo, | |||
1184 | p_addr = __bm_map_pidx(b, bm_bit_to_page_idx(b, bm_fo), km); | 1182 | p_addr = __bm_map_pidx(b, bm_bit_to_page_idx(b, bm_fo), km); |
1185 | 1183 | ||
1186 | if (find_zero_bit) | 1184 | if (find_zero_bit) |
1187 | i = generic_find_next_zero_le_bit(p_addr, | 1185 | i = find_next_zero_bit_le(p_addr, |
1188 | PAGE_SIZE*8, bm_fo & BITS_PER_PAGE_MASK); | 1186 | PAGE_SIZE*8, bm_fo & BITS_PER_PAGE_MASK); |
1189 | else | 1187 | else |
1190 | i = generic_find_next_le_bit(p_addr, | 1188 | i = find_next_bit_le(p_addr, |
1191 | PAGE_SIZE*8, bm_fo & BITS_PER_PAGE_MASK); | 1189 | PAGE_SIZE*8, bm_fo & BITS_PER_PAGE_MASK); |
1192 | 1190 | ||
1193 | __bm_unmap(p_addr, km); | 1191 | __bm_unmap(p_addr, km); |
@@ -1287,9 +1285,9 @@ static int __bm_change_bits_to(struct drbd_conf *mdev, const unsigned long s, | |||
1287 | last_page_nr = page_nr; | 1285 | last_page_nr = page_nr; |
1288 | } | 1286 | } |
1289 | if (val) | 1287 | if (val) |
1290 | c += (0 == generic___test_and_set_le_bit(bitnr & BITS_PER_PAGE_MASK, p_addr)); | 1288 | c += (0 == __test_and_set_bit_le(bitnr & BITS_PER_PAGE_MASK, p_addr)); |
1291 | else | 1289 | else |
1292 | c -= (0 != generic___test_and_clear_le_bit(bitnr & BITS_PER_PAGE_MASK, p_addr)); | 1290 | c -= (0 != __test_and_clear_bit_le(bitnr & BITS_PER_PAGE_MASK, p_addr)); |
1293 | } | 1291 | } |
1294 | if (p_addr) | 1292 | if (p_addr) |
1295 | __bm_unmap(p_addr, km); | 1293 | __bm_unmap(p_addr, km); |
@@ -1438,7 +1436,7 @@ int drbd_bm_test_bit(struct drbd_conf *mdev, const unsigned long bitnr) | |||
1438 | bm_print_lock_info(mdev); | 1436 | bm_print_lock_info(mdev); |
1439 | if (bitnr < b->bm_bits) { | 1437 | if (bitnr < b->bm_bits) { |
1440 | p_addr = bm_map_pidx(b, bm_bit_to_page_idx(b, bitnr)); | 1438 | p_addr = bm_map_pidx(b, bm_bit_to_page_idx(b, bitnr)); |
1441 | i = generic_test_le_bit(bitnr & BITS_PER_PAGE_MASK, p_addr) ? 1 : 0; | 1439 | i = test_bit_le(bitnr & BITS_PER_PAGE_MASK, p_addr) ? 1 : 0; |
1442 | bm_unmap(p_addr); | 1440 | bm_unmap(p_addr); |
1443 | } else if (bitnr == b->bm_bits) { | 1441 | } else if (bitnr == b->bm_bits) { |
1444 | i = -1; | 1442 | i = -1; |
@@ -1482,7 +1480,7 @@ int drbd_bm_count_bits(struct drbd_conf *mdev, const unsigned long s, const unsi | |||
1482 | ERR_IF (bitnr >= b->bm_bits) { | 1480 | ERR_IF (bitnr >= b->bm_bits) { |
1483 | dev_err(DEV, "bitnr=%lu bm_bits=%lu\n", bitnr, b->bm_bits); | 1481 | dev_err(DEV, "bitnr=%lu bm_bits=%lu\n", bitnr, b->bm_bits); |
1484 | } else { | 1482 | } else { |
1485 | c += (0 != generic_test_le_bit(bitnr - (page_nr << (PAGE_SHIFT+3)), p_addr)); | 1483 | c += (0 != test_bit_le(bitnr - (page_nr << (PAGE_SHIFT+3)), p_addr)); |
1486 | } | 1484 | } |
1487 | } | 1485 | } |
1488 | if (p_addr) | 1486 | if (p_addr) |
diff --git a/drivers/char/tpm/tpm.c b/drivers/char/tpm/tpm.c index 1f46f1cd9225..7beb0e25f1e1 100644 --- a/drivers/char/tpm/tpm.c +++ b/drivers/char/tpm/tpm.c | |||
@@ -980,7 +980,7 @@ int tpm_open(struct inode *inode, struct file *file) | |||
980 | return -EBUSY; | 980 | return -EBUSY; |
981 | } | 981 | } |
982 | 982 | ||
983 | chip->data_buffer = kmalloc(TPM_BUFSIZE * sizeof(u8), GFP_KERNEL); | 983 | chip->data_buffer = kzalloc(TPM_BUFSIZE, GFP_KERNEL); |
984 | if (chip->data_buffer == NULL) { | 984 | if (chip->data_buffer == NULL) { |
985 | clear_bit(0, &chip->is_open); | 985 | clear_bit(0, &chip->is_open); |
986 | put_device(chip->dev); | 986 | put_device(chip->dev); |
diff --git a/drivers/dma/ipu/ipu_irq.c b/drivers/dma/ipu/ipu_irq.c index dd8ebc75b667..ab8a4eff072a 100644 --- a/drivers/dma/ipu/ipu_irq.c +++ b/drivers/dma/ipu/ipu_irq.c | |||
@@ -94,9 +94,9 @@ static struct ipu_irq_map *src2map(unsigned int src) | |||
94 | return NULL; | 94 | return NULL; |
95 | } | 95 | } |
96 | 96 | ||
97 | static void ipu_irq_unmask(unsigned int irq) | 97 | static void ipu_irq_unmask(struct irq_data *d) |
98 | { | 98 | { |
99 | struct ipu_irq_map *map = get_irq_chip_data(irq); | 99 | struct ipu_irq_map *map = irq_data_get_irq_chip_data(d); |
100 | struct ipu_irq_bank *bank; | 100 | struct ipu_irq_bank *bank; |
101 | uint32_t reg; | 101 | uint32_t reg; |
102 | unsigned long lock_flags; | 102 | unsigned long lock_flags; |
@@ -106,7 +106,7 @@ static void ipu_irq_unmask(unsigned int irq) | |||
106 | bank = map->bank; | 106 | bank = map->bank; |
107 | if (!bank) { | 107 | if (!bank) { |
108 | spin_unlock_irqrestore(&bank_lock, lock_flags); | 108 | spin_unlock_irqrestore(&bank_lock, lock_flags); |
109 | pr_err("IPU: %s(%u) - unmapped!\n", __func__, irq); | 109 | pr_err("IPU: %s(%u) - unmapped!\n", __func__, d->irq); |
110 | return; | 110 | return; |
111 | } | 111 | } |
112 | 112 | ||
@@ -117,9 +117,9 @@ static void ipu_irq_unmask(unsigned int irq) | |||
117 | spin_unlock_irqrestore(&bank_lock, lock_flags); | 117 | spin_unlock_irqrestore(&bank_lock, lock_flags); |
118 | } | 118 | } |
119 | 119 | ||
120 | static void ipu_irq_mask(unsigned int irq) | 120 | static void ipu_irq_mask(struct irq_data *d) |
121 | { | 121 | { |
122 | struct ipu_irq_map *map = get_irq_chip_data(irq); | 122 | struct ipu_irq_map *map = irq_data_get_irq_chip_data(d); |
123 | struct ipu_irq_bank *bank; | 123 | struct ipu_irq_bank *bank; |
124 | uint32_t reg; | 124 | uint32_t reg; |
125 | unsigned long lock_flags; | 125 | unsigned long lock_flags; |
@@ -129,7 +129,7 @@ static void ipu_irq_mask(unsigned int irq) | |||
129 | bank = map->bank; | 129 | bank = map->bank; |
130 | if (!bank) { | 130 | if (!bank) { |
131 | spin_unlock_irqrestore(&bank_lock, lock_flags); | 131 | spin_unlock_irqrestore(&bank_lock, lock_flags); |
132 | pr_err("IPU: %s(%u) - unmapped!\n", __func__, irq); | 132 | pr_err("IPU: %s(%u) - unmapped!\n", __func__, d->irq); |
133 | return; | 133 | return; |
134 | } | 134 | } |
135 | 135 | ||
@@ -140,9 +140,9 @@ static void ipu_irq_mask(unsigned int irq) | |||
140 | spin_unlock_irqrestore(&bank_lock, lock_flags); | 140 | spin_unlock_irqrestore(&bank_lock, lock_flags); |
141 | } | 141 | } |
142 | 142 | ||
143 | static void ipu_irq_ack(unsigned int irq) | 143 | static void ipu_irq_ack(struct irq_data *d) |
144 | { | 144 | { |
145 | struct ipu_irq_map *map = get_irq_chip_data(irq); | 145 | struct ipu_irq_map *map = irq_data_get_irq_chip_data(d); |
146 | struct ipu_irq_bank *bank; | 146 | struct ipu_irq_bank *bank; |
147 | unsigned long lock_flags; | 147 | unsigned long lock_flags; |
148 | 148 | ||
@@ -151,7 +151,7 @@ static void ipu_irq_ack(unsigned int irq) | |||
151 | bank = map->bank; | 151 | bank = map->bank; |
152 | if (!bank) { | 152 | if (!bank) { |
153 | spin_unlock_irqrestore(&bank_lock, lock_flags); | 153 | spin_unlock_irqrestore(&bank_lock, lock_flags); |
154 | pr_err("IPU: %s(%u) - unmapped!\n", __func__, irq); | 154 | pr_err("IPU: %s(%u) - unmapped!\n", __func__, d->irq); |
155 | return; | 155 | return; |
156 | } | 156 | } |
157 | 157 | ||
@@ -167,7 +167,7 @@ static void ipu_irq_ack(unsigned int irq) | |||
167 | */ | 167 | */ |
168 | bool ipu_irq_status(unsigned int irq) | 168 | bool ipu_irq_status(unsigned int irq) |
169 | { | 169 | { |
170 | struct ipu_irq_map *map = get_irq_chip_data(irq); | 170 | struct ipu_irq_map *map = irq_get_chip_data(irq); |
171 | struct ipu_irq_bank *bank; | 171 | struct ipu_irq_bank *bank; |
172 | unsigned long lock_flags; | 172 | unsigned long lock_flags; |
173 | bool ret; | 173 | bool ret; |
@@ -269,7 +269,7 @@ int ipu_irq_unmap(unsigned int source) | |||
269 | /* Chained IRQ handler for IPU error interrupt */ | 269 | /* Chained IRQ handler for IPU error interrupt */ |
270 | static void ipu_irq_err(unsigned int irq, struct irq_desc *desc) | 270 | static void ipu_irq_err(unsigned int irq, struct irq_desc *desc) |
271 | { | 271 | { |
272 | struct ipu *ipu = get_irq_data(irq); | 272 | struct ipu *ipu = irq_get_handler_data(irq); |
273 | u32 status; | 273 | u32 status; |
274 | int i, line; | 274 | int i, line; |
275 | 275 | ||
@@ -310,7 +310,7 @@ static void ipu_irq_err(unsigned int irq, struct irq_desc *desc) | |||
310 | /* Chained IRQ handler for IPU function interrupt */ | 310 | /* Chained IRQ handler for IPU function interrupt */ |
311 | static void ipu_irq_fn(unsigned int irq, struct irq_desc *desc) | 311 | static void ipu_irq_fn(unsigned int irq, struct irq_desc *desc) |
312 | { | 312 | { |
313 | struct ipu *ipu = get_irq_data(irq); | 313 | struct ipu *ipu = irq_desc_get_handler_data(desc); |
314 | u32 status; | 314 | u32 status; |
315 | int i, line; | 315 | int i, line; |
316 | 316 | ||
@@ -345,10 +345,10 @@ static void ipu_irq_fn(unsigned int irq, struct irq_desc *desc) | |||
345 | } | 345 | } |
346 | 346 | ||
347 | static struct irq_chip ipu_irq_chip = { | 347 | static struct irq_chip ipu_irq_chip = { |
348 | .name = "ipu_irq", | 348 | .name = "ipu_irq", |
349 | .ack = ipu_irq_ack, | 349 | .irq_ack = ipu_irq_ack, |
350 | .mask = ipu_irq_mask, | 350 | .irq_mask = ipu_irq_mask, |
351 | .unmask = ipu_irq_unmask, | 351 | .irq_unmask = ipu_irq_unmask, |
352 | }; | 352 | }; |
353 | 353 | ||
354 | /* Install the IRQ handler */ | 354 | /* Install the IRQ handler */ |
@@ -366,26 +366,26 @@ int __init ipu_irq_attach_irq(struct ipu *ipu, struct platform_device *dev) | |||
366 | int ret; | 366 | int ret; |
367 | 367 | ||
368 | irq = irq_base + i; | 368 | irq = irq_base + i; |
369 | ret = set_irq_chip(irq, &ipu_irq_chip); | 369 | ret = irq_set_chip(irq, &ipu_irq_chip); |
370 | if (ret < 0) | 370 | if (ret < 0) |
371 | return ret; | 371 | return ret; |
372 | ret = set_irq_chip_data(irq, irq_map + i); | 372 | ret = irq_set_chip_data(irq, irq_map + i); |
373 | if (ret < 0) | 373 | if (ret < 0) |
374 | return ret; | 374 | return ret; |
375 | irq_map[i].ipu = ipu; | 375 | irq_map[i].ipu = ipu; |
376 | irq_map[i].irq = irq; | 376 | irq_map[i].irq = irq; |
377 | irq_map[i].source = -EINVAL; | 377 | irq_map[i].source = -EINVAL; |
378 | set_irq_handler(irq, handle_level_irq); | 378 | irq_set_handler(irq, handle_level_irq); |
379 | #ifdef CONFIG_ARM | 379 | #ifdef CONFIG_ARM |
380 | set_irq_flags(irq, IRQF_VALID | IRQF_PROBE); | 380 | set_irq_flags(irq, IRQF_VALID | IRQF_PROBE); |
381 | #endif | 381 | #endif |
382 | } | 382 | } |
383 | 383 | ||
384 | set_irq_data(ipu->irq_fn, ipu); | 384 | irq_set_handler_data(ipu->irq_fn, ipu); |
385 | set_irq_chained_handler(ipu->irq_fn, ipu_irq_fn); | 385 | irq_set_chained_handler(ipu->irq_fn, ipu_irq_fn); |
386 | 386 | ||
387 | set_irq_data(ipu->irq_err, ipu); | 387 | irq_set_handler_data(ipu->irq_err, ipu); |
388 | set_irq_chained_handler(ipu->irq_err, ipu_irq_err); | 388 | irq_set_chained_handler(ipu->irq_err, ipu_irq_err); |
389 | 389 | ||
390 | return 0; | 390 | return 0; |
391 | } | 391 | } |
@@ -397,17 +397,17 @@ void ipu_irq_detach_irq(struct ipu *ipu, struct platform_device *dev) | |||
397 | 397 | ||
398 | irq_base = pdata->irq_base; | 398 | irq_base = pdata->irq_base; |
399 | 399 | ||
400 | set_irq_chained_handler(ipu->irq_fn, NULL); | 400 | irq_set_chained_handler(ipu->irq_fn, NULL); |
401 | set_irq_data(ipu->irq_fn, NULL); | 401 | irq_set_handler_data(ipu->irq_fn, NULL); |
402 | 402 | ||
403 | set_irq_chained_handler(ipu->irq_err, NULL); | 403 | irq_set_chained_handler(ipu->irq_err, NULL); |
404 | set_irq_data(ipu->irq_err, NULL); | 404 | irq_set_handler_data(ipu->irq_err, NULL); |
405 | 405 | ||
406 | for (irq = irq_base; irq < irq_base + CONFIG_MX3_IPU_IRQS; irq++) { | 406 | for (irq = irq_base; irq < irq_base + CONFIG_MX3_IPU_IRQS; irq++) { |
407 | #ifdef CONFIG_ARM | 407 | #ifdef CONFIG_ARM |
408 | set_irq_flags(irq, 0); | 408 | set_irq_flags(irq, 0); |
409 | #endif | 409 | #endif |
410 | set_irq_chip(irq, NULL); | 410 | irq_set_chip(irq, NULL); |
411 | set_irq_chip_data(irq, NULL); | 411 | irq_set_chip_data(irq, NULL); |
412 | } | 412 | } |
413 | } | 413 | } |
diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c index 0be30e978c85..31e71c4fc831 100644 --- a/drivers/edac/amd64_edac.c +++ b/drivers/edac/amd64_edac.c | |||
@@ -2679,7 +2679,7 @@ static int __init amd64_edac_init(void) | |||
2679 | mcis = kzalloc(amd_nb_num() * sizeof(mcis[0]), GFP_KERNEL); | 2679 | mcis = kzalloc(amd_nb_num() * sizeof(mcis[0]), GFP_KERNEL); |
2680 | ecc_stngs = kzalloc(amd_nb_num() * sizeof(ecc_stngs[0]), GFP_KERNEL); | 2680 | ecc_stngs = kzalloc(amd_nb_num() * sizeof(ecc_stngs[0]), GFP_KERNEL); |
2681 | if (!(mcis && ecc_stngs)) | 2681 | if (!(mcis && ecc_stngs)) |
2682 | goto err_ret; | 2682 | goto err_free; |
2683 | 2683 | ||
2684 | msrs = msrs_alloc(); | 2684 | msrs = msrs_alloc(); |
2685 | if (!msrs) | 2685 | if (!msrs) |
diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig index d8d0cda2641d..d3b295305542 100644 --- a/drivers/gpio/Kconfig +++ b/drivers/gpio/Kconfig | |||
@@ -414,4 +414,9 @@ config GPIO_JANZ_TTL | |||
414 | This driver provides support for driving the pins in output | 414 | This driver provides support for driving the pins in output |
415 | mode only. Input mode is not supported. | 415 | mode only. Input mode is not supported. |
416 | 416 | ||
417 | config AB8500_GPIO | ||
418 | bool "ST-Ericsson AB8500 Mixed Signal Circuit gpio functions" | ||
419 | depends on AB8500_CORE && BROKEN | ||
420 | help | ||
421 | Select this to enable the AB8500 IC GPIO driver | ||
417 | endif | 422 | endif |
diff --git a/drivers/gpio/Makefile b/drivers/gpio/Makefile index 3351cf87b0ed..becef5954356 100644 --- a/drivers/gpio/Makefile +++ b/drivers/gpio/Makefile | |||
@@ -42,3 +42,4 @@ obj-$(CONFIG_GPIO_JANZ_TTL) += janz-ttl.o | |||
42 | obj-$(CONFIG_GPIO_SX150X) += sx150x.o | 42 | obj-$(CONFIG_GPIO_SX150X) += sx150x.o |
43 | obj-$(CONFIG_GPIO_VX855) += vx855_gpio.o | 43 | obj-$(CONFIG_GPIO_VX855) += vx855_gpio.o |
44 | obj-$(CONFIG_GPIO_ML_IOH) += ml_ioh_gpio.o | 44 | obj-$(CONFIG_GPIO_ML_IOH) += ml_ioh_gpio.o |
45 | obj-$(CONFIG_AB8500_GPIO) += ab8500-gpio.o | ||
diff --git a/drivers/gpio/ab8500-gpio.c b/drivers/gpio/ab8500-gpio.c new file mode 100644 index 000000000000..e7b834d054b7 --- /dev/null +++ b/drivers/gpio/ab8500-gpio.c | |||
@@ -0,0 +1,522 @@ | |||
1 | /* | ||
2 | * Copyright (C) ST-Ericsson SA 2011 | ||
3 | * | ||
4 | * Author: BIBEK BASU <bibek.basu@stericsson.com> | ||
5 | * License terms: GNU General Public License (GPL) version 2 | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or modify | ||
8 | * it under the terms of the GNU General Public License version 2 as | ||
9 | * published by the Free Software Foundation. | ||
10 | */ | ||
11 | #include <linux/kernel.h> | ||
12 | #include <linux/types.h> | ||
13 | #include <linux/slab.h> | ||
14 | #include <linux/init.h> | ||
15 | #include <linux/module.h> | ||
16 | #include <linux/err.h> | ||
17 | #include <linux/platform_device.h> | ||
18 | #include <linux/slab.h> | ||
19 | #include <linux/gpio.h> | ||
20 | #include <linux/irq.h> | ||
21 | #include <linux/interrupt.h> | ||
22 | #include <linux/mfd/ab8500.h> | ||
23 | #include <linux/mfd/abx500.h> | ||
24 | #include <linux/mfd/ab8500/gpio.h> | ||
25 | |||
26 | /* | ||
27 | * GPIO registers offset | ||
28 | * Bank: 0x10 | ||
29 | */ | ||
30 | #define AB8500_GPIO_SEL1_REG 0x00 | ||
31 | #define AB8500_GPIO_SEL2_REG 0x01 | ||
32 | #define AB8500_GPIO_SEL3_REG 0x02 | ||
33 | #define AB8500_GPIO_SEL4_REG 0x03 | ||
34 | #define AB8500_GPIO_SEL5_REG 0x04 | ||
35 | #define AB8500_GPIO_SEL6_REG 0x05 | ||
36 | |||
37 | #define AB8500_GPIO_DIR1_REG 0x10 | ||
38 | #define AB8500_GPIO_DIR2_REG 0x11 | ||
39 | #define AB8500_GPIO_DIR3_REG 0x12 | ||
40 | #define AB8500_GPIO_DIR4_REG 0x13 | ||
41 | #define AB8500_GPIO_DIR5_REG 0x14 | ||
42 | #define AB8500_GPIO_DIR6_REG 0x15 | ||
43 | |||
44 | #define AB8500_GPIO_OUT1_REG 0x20 | ||
45 | #define AB8500_GPIO_OUT2_REG 0x21 | ||
46 | #define AB8500_GPIO_OUT3_REG 0x22 | ||
47 | #define AB8500_GPIO_OUT4_REG 0x23 | ||
48 | #define AB8500_GPIO_OUT5_REG 0x24 | ||
49 | #define AB8500_GPIO_OUT6_REG 0x25 | ||
50 | |||
51 | #define AB8500_GPIO_PUD1_REG 0x30 | ||
52 | #define AB8500_GPIO_PUD2_REG 0x31 | ||
53 | #define AB8500_GPIO_PUD3_REG 0x32 | ||
54 | #define AB8500_GPIO_PUD4_REG 0x33 | ||
55 | #define AB8500_GPIO_PUD5_REG 0x34 | ||
56 | #define AB8500_GPIO_PUD6_REG 0x35 | ||
57 | |||
58 | #define AB8500_GPIO_IN1_REG 0x40 | ||
59 | #define AB8500_GPIO_IN2_REG 0x41 | ||
60 | #define AB8500_GPIO_IN3_REG 0x42 | ||
61 | #define AB8500_GPIO_IN4_REG 0x43 | ||
62 | #define AB8500_GPIO_IN5_REG 0x44 | ||
63 | #define AB8500_GPIO_IN6_REG 0x45 | ||
64 | #define AB8500_GPIO_ALTFUN_REG 0x45 | ||
65 | #define ALTFUN_REG_INDEX 6 | ||
66 | #define AB8500_NUM_GPIO 42 | ||
67 | #define AB8500_NUM_VIR_GPIO_IRQ 16 | ||
68 | |||
69 | enum ab8500_gpio_action { | ||
70 | NONE, | ||
71 | STARTUP, | ||
72 | SHUTDOWN, | ||
73 | MASK, | ||
74 | UNMASK | ||
75 | }; | ||
76 | |||
77 | struct ab8500_gpio { | ||
78 | struct gpio_chip chip; | ||
79 | struct ab8500 *parent; | ||
80 | struct device *dev; | ||
81 | struct mutex lock; | ||
82 | u32 irq_base; | ||
83 | enum ab8500_gpio_action irq_action; | ||
84 | u16 rising; | ||
85 | u16 falling; | ||
86 | }; | ||
87 | /** | ||
88 | * to_ab8500_gpio() - get the pointer to ab8500_gpio | ||
89 | * @chip: Member of the structure ab8500_gpio | ||
90 | */ | ||
91 | static inline struct ab8500_gpio *to_ab8500_gpio(struct gpio_chip *chip) | ||
92 | { | ||
93 | return container_of(chip, struct ab8500_gpio, chip); | ||
94 | } | ||
95 | |||
96 | static int ab8500_gpio_set_bits(struct gpio_chip *chip, u8 reg, | ||
97 | unsigned offset, int val) | ||
98 | { | ||
99 | struct ab8500_gpio *ab8500_gpio = to_ab8500_gpio(chip); | ||
100 | u8 pos = offset % 8; | ||
101 | int ret; | ||
102 | |||
103 | reg = reg + (offset / 8); | ||
104 | ret = abx500_mask_and_set_register_interruptible(ab8500_gpio->dev, | ||
105 | AB8500_MISC, reg, 1 << pos, val << pos); | ||
106 | if (ret < 0) | ||
107 | dev_err(ab8500_gpio->dev, "%s write failed\n", __func__); | ||
108 | return ret; | ||
109 | } | ||
110 | /** | ||
111 | * ab8500_gpio_get() - Get the particular GPIO value | ||
112 | * @chip: Gpio device | ||
113 | * @offset: GPIO number to read | ||
114 | */ | ||
115 | static int ab8500_gpio_get(struct gpio_chip *chip, unsigned offset) | ||
116 | { | ||
117 | struct ab8500_gpio *ab8500_gpio = to_ab8500_gpio(chip); | ||
118 | u8 mask = 1 << (offset % 8); | ||
119 | u8 reg = AB8500_GPIO_OUT1_REG + (offset / 8); | ||
120 | int ret; | ||
121 | u8 data; | ||
122 | ret = abx500_get_register_interruptible(ab8500_gpio->dev, AB8500_MISC, | ||
123 | reg, &data); | ||
124 | if (ret < 0) { | ||
125 | dev_err(ab8500_gpio->dev, "%s read failed\n", __func__); | ||
126 | return ret; | ||
127 | } | ||
128 | return (data & mask) >> (offset % 8); | ||
129 | } | ||
130 | |||
131 | static void ab8500_gpio_set(struct gpio_chip *chip, unsigned offset, int val) | ||
132 | { | ||
133 | struct ab8500_gpio *ab8500_gpio = to_ab8500_gpio(chip); | ||
134 | int ret; | ||
135 | /* Write the data */ | ||
136 | ret = ab8500_gpio_set_bits(chip, AB8500_GPIO_OUT1_REG, offset, 1); | ||
137 | if (ret < 0) | ||
138 | dev_err(ab8500_gpio->dev, "%s write failed\n", __func__); | ||
139 | } | ||
140 | |||
141 | static int ab8500_gpio_direction_output(struct gpio_chip *chip, unsigned offset, | ||
142 | int val) | ||
143 | { | ||
144 | int ret; | ||
145 | /* set direction as output */ | ||
146 | ret = ab8500_gpio_set_bits(chip, AB8500_GPIO_DIR1_REG, offset, 1); | ||
147 | if (ret < 0) | ||
148 | return ret; | ||
149 | /* disable pull down */ | ||
150 | ret = ab8500_gpio_set_bits(chip, AB8500_GPIO_PUD1_REG, offset, 1); | ||
151 | if (ret < 0) | ||
152 | return ret; | ||
153 | /* set the output as 1 or 0 */ | ||
154 | return ab8500_gpio_set_bits(chip, AB8500_GPIO_OUT1_REG, offset, val); | ||
155 | |||
156 | } | ||
157 | |||
158 | static int ab8500_gpio_direction_input(struct gpio_chip *chip, unsigned offset) | ||
159 | { | ||
160 | /* set the register as input */ | ||
161 | return ab8500_gpio_set_bits(chip, AB8500_GPIO_DIR1_REG, offset, 0); | ||
162 | } | ||
163 | |||
164 | static int ab8500_gpio_to_irq(struct gpio_chip *chip, unsigned offset) | ||
165 | { | ||
166 | /* | ||
167 | * Only some GPIOs are interrupt capable, and they are | ||
168 | * organized in discontiguous clusters: | ||
169 | * | ||
170 | * GPIO6 to GPIO13 | ||
171 | * GPIO24 and GPIO25 | ||
172 | * GPIO36 to GPIO41 | ||
173 | */ | ||
174 | static struct ab8500_gpio_irq_cluster { | ||
175 | int start; | ||
176 | int end; | ||
177 | } clusters[] = { | ||
178 | {.start = 6, .end = 13}, | ||
179 | {.start = 24, .end = 25}, | ||
180 | {.start = 36, .end = 41}, | ||
181 | }; | ||
182 | struct ab8500_gpio *ab8500_gpio = to_ab8500_gpio(chip); | ||
183 | int base = ab8500_gpio->irq_base; | ||
184 | int i; | ||
185 | |||
186 | for (i = 0; i < ARRAY_SIZE(clusters); i++) { | ||
187 | struct ab8500_gpio_irq_cluster *cluster = &clusters[i]; | ||
188 | |||
189 | if (offset >= cluster->start && offset <= cluster->end) | ||
190 | return base + offset - cluster->start; | ||
191 | |||
192 | /* Advance by the number of gpios in this cluster */ | ||
193 | base += cluster->end - cluster->start + 1; | ||
194 | } | ||
195 | |||
196 | return -EINVAL; | ||
197 | } | ||
198 | |||
199 | static struct gpio_chip ab8500gpio_chip = { | ||
200 | .label = "ab8500_gpio", | ||
201 | .owner = THIS_MODULE, | ||
202 | .direction_input = ab8500_gpio_direction_input, | ||
203 | .get = ab8500_gpio_get, | ||
204 | .direction_output = ab8500_gpio_direction_output, | ||
205 | .set = ab8500_gpio_set, | ||
206 | .to_irq = ab8500_gpio_to_irq, | ||
207 | }; | ||
208 | |||
209 | static unsigned int irq_to_rising(unsigned int irq) | ||
210 | { | ||
211 | struct ab8500_gpio *ab8500_gpio = get_irq_chip_data(irq); | ||
212 | int offset = irq - ab8500_gpio->irq_base; | ||
213 | int new_irq = offset + AB8500_INT_GPIO6R | ||
214 | + ab8500_gpio->parent->irq_base; | ||
215 | return new_irq; | ||
216 | } | ||
217 | |||
218 | static unsigned int irq_to_falling(unsigned int irq) | ||
219 | { | ||
220 | struct ab8500_gpio *ab8500_gpio = get_irq_chip_data(irq); | ||
221 | int offset = irq - ab8500_gpio->irq_base; | ||
222 | int new_irq = offset + AB8500_INT_GPIO6F | ||
223 | + ab8500_gpio->parent->irq_base; | ||
224 | return new_irq; | ||
225 | |||
226 | } | ||
227 | |||
228 | static unsigned int rising_to_irq(unsigned int irq, void *dev) | ||
229 | { | ||
230 | struct ab8500_gpio *ab8500_gpio = dev; | ||
231 | int offset = irq - AB8500_INT_GPIO6R | ||
232 | - ab8500_gpio->parent->irq_base ; | ||
233 | int new_irq = offset + ab8500_gpio->irq_base; | ||
234 | return new_irq; | ||
235 | } | ||
236 | |||
237 | static unsigned int falling_to_irq(unsigned int irq, void *dev) | ||
238 | { | ||
239 | struct ab8500_gpio *ab8500_gpio = dev; | ||
240 | int offset = irq - AB8500_INT_GPIO6F | ||
241 | - ab8500_gpio->parent->irq_base ; | ||
242 | int new_irq = offset + ab8500_gpio->irq_base; | ||
243 | return new_irq; | ||
244 | |||
245 | } | ||
246 | |||
247 | /* | ||
248 | * IRQ handler | ||
249 | */ | ||
250 | |||
251 | static irqreturn_t handle_rising(int irq, void *dev) | ||
252 | { | ||
253 | |||
254 | handle_nested_irq(rising_to_irq(irq , dev)); | ||
255 | return IRQ_HANDLED; | ||
256 | } | ||
257 | |||
258 | static irqreturn_t handle_falling(int irq, void *dev) | ||
259 | { | ||
260 | |||
261 | handle_nested_irq(falling_to_irq(irq, dev)); | ||
262 | return IRQ_HANDLED; | ||
263 | } | ||
264 | |||
265 | static void ab8500_gpio_irq_lock(unsigned int irq) | ||
266 | { | ||
267 | struct ab8500_gpio *ab8500_gpio = get_irq_chip_data(irq); | ||
268 | mutex_lock(&ab8500_gpio->lock); | ||
269 | } | ||
270 | |||
271 | static void ab8500_gpio_irq_sync_unlock(unsigned int irq) | ||
272 | { | ||
273 | struct ab8500_gpio *ab8500_gpio = get_irq_chip_data(irq); | ||
274 | int offset = irq - ab8500_gpio->irq_base; | ||
275 | bool rising = ab8500_gpio->rising & BIT(offset); | ||
276 | bool falling = ab8500_gpio->falling & BIT(offset); | ||
277 | int ret; | ||
278 | |||
279 | switch (ab8500_gpio->irq_action) { | ||
280 | case STARTUP: | ||
281 | if (rising) | ||
282 | ret = request_threaded_irq(irq_to_rising(irq), | ||
283 | NULL, handle_rising, | ||
284 | IRQF_TRIGGER_RISING, | ||
285 | "ab8500-gpio-r", ab8500_gpio); | ||
286 | if (falling) | ||
287 | ret = request_threaded_irq(irq_to_falling(irq), | ||
288 | NULL, handle_falling, | ||
289 | IRQF_TRIGGER_FALLING, | ||
290 | "ab8500-gpio-f", ab8500_gpio); | ||
291 | break; | ||
292 | case SHUTDOWN: | ||
293 | if (rising) | ||
294 | free_irq(irq_to_rising(irq), ab8500_gpio); | ||
295 | if (falling) | ||
296 | free_irq(irq_to_falling(irq), ab8500_gpio); | ||
297 | break; | ||
298 | case MASK: | ||
299 | if (rising) | ||
300 | disable_irq(irq_to_rising(irq)); | ||
301 | if (falling) | ||
302 | disable_irq(irq_to_falling(irq)); | ||
303 | break; | ||
304 | case UNMASK: | ||
305 | if (rising) | ||
306 | enable_irq(irq_to_rising(irq)); | ||
307 | if (falling) | ||
308 | enable_irq(irq_to_falling(irq)); | ||
309 | break; | ||
310 | case NONE: | ||
311 | break; | ||
312 | } | ||
313 | ab8500_gpio->irq_action = NONE; | ||
314 | ab8500_gpio->rising &= ~(BIT(offset)); | ||
315 | ab8500_gpio->falling &= ~(BIT(offset)); | ||
316 | mutex_unlock(&ab8500_gpio->lock); | ||
317 | } | ||
318 | |||
319 | |||
320 | static void ab8500_gpio_irq_mask(unsigned int irq) | ||
321 | { | ||
322 | struct ab8500_gpio *ab8500_gpio = get_irq_chip_data(irq); | ||
323 | ab8500_gpio->irq_action = MASK; | ||
324 | } | ||
325 | |||
326 | static void ab8500_gpio_irq_unmask(unsigned int irq) | ||
327 | { | ||
328 | struct ab8500_gpio *ab8500_gpio = get_irq_chip_data(irq); | ||
329 | ab8500_gpio->irq_action = UNMASK; | ||
330 | } | ||
331 | |||
332 | static int ab8500_gpio_irq_set_type(unsigned int irq, unsigned int type) | ||
333 | { | ||
334 | struct ab8500_gpio *ab8500_gpio = get_irq_chip_data(irq); | ||
335 | int offset = irq - ab8500_gpio->irq_base; | ||
336 | |||
337 | if (type == IRQ_TYPE_EDGE_BOTH) { | ||
338 | ab8500_gpio->rising = BIT(offset); | ||
339 | ab8500_gpio->falling = BIT(offset); | ||
340 | } else if (type == IRQ_TYPE_EDGE_RISING) { | ||
341 | ab8500_gpio->rising = BIT(offset); | ||
342 | } else { | ||
343 | ab8500_gpio->falling = BIT(offset); | ||
344 | } | ||
345 | return 0; | ||
346 | } | ||
347 | |||
348 | unsigned int ab8500_gpio_irq_startup(unsigned int irq) | ||
349 | { | ||
350 | struct ab8500_gpio *ab8500_gpio = get_irq_chip_data(irq); | ||
351 | ab8500_gpio->irq_action = STARTUP; | ||
352 | return 0; | ||
353 | } | ||
354 | |||
355 | void ab8500_gpio_irq_shutdown(unsigned int irq) | ||
356 | { | ||
357 | struct ab8500_gpio *ab8500_gpio = get_irq_chip_data(irq); | ||
358 | ab8500_gpio->irq_action = SHUTDOWN; | ||
359 | } | ||
360 | |||
361 | static struct irq_chip ab8500_gpio_irq_chip = { | ||
362 | .name = "ab8500-gpio", | ||
363 | .startup = ab8500_gpio_irq_startup, | ||
364 | .shutdown = ab8500_gpio_irq_shutdown, | ||
365 | .bus_lock = ab8500_gpio_irq_lock, | ||
366 | .bus_sync_unlock = ab8500_gpio_irq_sync_unlock, | ||
367 | .mask = ab8500_gpio_irq_mask, | ||
368 | .unmask = ab8500_gpio_irq_unmask, | ||
369 | .set_type = ab8500_gpio_irq_set_type, | ||
370 | }; | ||
371 | |||
372 | static int ab8500_gpio_irq_init(struct ab8500_gpio *ab8500_gpio) | ||
373 | { | ||
374 | u32 base = ab8500_gpio->irq_base; | ||
375 | int irq; | ||
376 | |||
377 | for (irq = base; irq < base + AB8500_NUM_VIR_GPIO_IRQ ; irq++) { | ||
378 | set_irq_chip_data(irq, ab8500_gpio); | ||
379 | set_irq_chip_and_handler(irq, &ab8500_gpio_irq_chip, | ||
380 | handle_simple_irq); | ||
381 | set_irq_nested_thread(irq, 1); | ||
382 | #ifdef CONFIG_ARM | ||
383 | set_irq_flags(irq, IRQF_VALID); | ||
384 | #else | ||
385 | set_irq_noprobe(irq); | ||
386 | #endif | ||
387 | } | ||
388 | |||
389 | return 0; | ||
390 | } | ||
391 | |||
392 | static void ab8500_gpio_irq_remove(struct ab8500_gpio *ab8500_gpio) | ||
393 | { | ||
394 | int base = ab8500_gpio->irq_base; | ||
395 | int irq; | ||
396 | |||
397 | for (irq = base; irq < base + AB8500_NUM_VIR_GPIO_IRQ; irq++) { | ||
398 | #ifdef CONFIG_ARM | ||
399 | set_irq_flags(irq, 0); | ||
400 | #endif | ||
401 | set_irq_chip_and_handler(irq, NULL, NULL); | ||
402 | set_irq_chip_data(irq, NULL); | ||
403 | } | ||
404 | } | ||
405 | |||
406 | static int __devinit ab8500_gpio_probe(struct platform_device *pdev) | ||
407 | { | ||
408 | struct ab8500_platform_data *ab8500_pdata = | ||
409 | dev_get_platdata(pdev->dev.parent); | ||
410 | struct ab8500_gpio_platform_data *pdata; | ||
411 | struct ab8500_gpio *ab8500_gpio; | ||
412 | int ret; | ||
413 | int i; | ||
414 | |||
415 | pdata = ab8500_pdata->gpio; | ||
416 | if (!pdata) { | ||
417 | dev_err(&pdev->dev, "gpio platform data missing\n"); | ||
418 | return -ENODEV; | ||
419 | } | ||
420 | |||
421 | ab8500_gpio = kzalloc(sizeof(struct ab8500_gpio), GFP_KERNEL); | ||
422 | if (ab8500_gpio == NULL) { | ||
423 | dev_err(&pdev->dev, "failed to allocate memory\n"); | ||
424 | return -ENOMEM; | ||
425 | } | ||
426 | ab8500_gpio->dev = &pdev->dev; | ||
427 | ab8500_gpio->parent = dev_get_drvdata(pdev->dev.parent); | ||
428 | ab8500_gpio->chip = ab8500gpio_chip; | ||
429 | ab8500_gpio->chip.ngpio = AB8500_NUM_GPIO; | ||
430 | ab8500_gpio->chip.dev = &pdev->dev; | ||
431 | ab8500_gpio->chip.base = pdata->gpio_base; | ||
432 | ab8500_gpio->irq_base = pdata->irq_base; | ||
433 | /* initialize the lock */ | ||
434 | mutex_init(&ab8500_gpio->lock); | ||
435 | /* | ||
436 | * AB8500 core will handle and clear the IRQ | ||
437 | * configre GPIO based on config-reg value. | ||
438 | * These values are for selecting the PINs as | ||
439 | * GPIO or alternate function | ||
440 | */ | ||
441 | for (i = AB8500_GPIO_SEL1_REG; i <= AB8500_GPIO_SEL6_REG; i++) { | ||
442 | ret = abx500_set_register_interruptible(ab8500_gpio->dev, | ||
443 | AB8500_MISC, i, | ||
444 | pdata->config_reg[i]); | ||
445 | if (ret < 0) | ||
446 | goto out_free; | ||
447 | } | ||
448 | ret = abx500_set_register_interruptible(ab8500_gpio->dev, AB8500_MISC, | ||
449 | AB8500_GPIO_ALTFUN_REG, | ||
450 | pdata->config_reg[ALTFUN_REG_INDEX]); | ||
451 | if (ret < 0) | ||
452 | goto out_free; | ||
453 | |||
454 | ret = ab8500_gpio_irq_init(ab8500_gpio); | ||
455 | if (ret) | ||
456 | goto out_free; | ||
457 | ret = gpiochip_add(&ab8500_gpio->chip); | ||
458 | if (ret) { | ||
459 | dev_err(&pdev->dev, "unable to add gpiochip: %d\n", | ||
460 | ret); | ||
461 | goto out_rem_irq; | ||
462 | } | ||
463 | platform_set_drvdata(pdev, ab8500_gpio); | ||
464 | return 0; | ||
465 | |||
466 | out_rem_irq: | ||
467 | ab8500_gpio_irq_remove(ab8500_gpio); | ||
468 | out_free: | ||
469 | mutex_destroy(&ab8500_gpio->lock); | ||
470 | kfree(ab8500_gpio); | ||
471 | return ret; | ||
472 | } | ||
473 | |||
474 | /* | ||
475 | * ab8500_gpio_remove() - remove Ab8500-gpio driver | ||
476 | * @pdev : Platform device registered | ||
477 | */ | ||
478 | static int __devexit ab8500_gpio_remove(struct platform_device *pdev) | ||
479 | { | ||
480 | struct ab8500_gpio *ab8500_gpio = platform_get_drvdata(pdev); | ||
481 | int ret; | ||
482 | |||
483 | ret = gpiochip_remove(&ab8500_gpio->chip); | ||
484 | if (ret < 0) { | ||
485 | dev_err(ab8500_gpio->dev, "unable to remove gpiochip:\ | ||
486 | %d\n", ret); | ||
487 | return ret; | ||
488 | } | ||
489 | |||
490 | platform_set_drvdata(pdev, NULL); | ||
491 | mutex_destroy(&ab8500_gpio->lock); | ||
492 | kfree(ab8500_gpio); | ||
493 | |||
494 | return 0; | ||
495 | } | ||
496 | |||
497 | static struct platform_driver ab8500_gpio_driver = { | ||
498 | .driver = { | ||
499 | .name = "ab8500-gpio", | ||
500 | .owner = THIS_MODULE, | ||
501 | }, | ||
502 | .probe = ab8500_gpio_probe, | ||
503 | .remove = __devexit_p(ab8500_gpio_remove), | ||
504 | }; | ||
505 | |||
506 | static int __init ab8500_gpio_init(void) | ||
507 | { | ||
508 | return platform_driver_register(&ab8500_gpio_driver); | ||
509 | } | ||
510 | arch_initcall(ab8500_gpio_init); | ||
511 | |||
512 | static void __exit ab8500_gpio_exit(void) | ||
513 | { | ||
514 | platform_driver_unregister(&ab8500_gpio_driver); | ||
515 | } | ||
516 | module_exit(ab8500_gpio_exit); | ||
517 | |||
518 | MODULE_AUTHOR("BIBEK BASU <bibek.basu@stericsson.com>"); | ||
519 | MODULE_DESCRIPTION("Driver allows to use AB8500 unused pins\ | ||
520 | to be used as GPIO"); | ||
521 | MODULE_ALIAS("AB8500 GPIO driver"); | ||
522 | MODULE_LICENSE("GPL v2"); | ||
diff --git a/drivers/hwmon/gpio-fan.c b/drivers/hwmon/gpio-fan.c index f141a1de519c..89aa9fb743af 100644 --- a/drivers/hwmon/gpio-fan.c +++ b/drivers/hwmon/gpio-fan.c | |||
@@ -116,7 +116,7 @@ static int fan_alarm_init(struct gpio_fan_data *fan_data, | |||
116 | return 0; | 116 | return 0; |
117 | 117 | ||
118 | INIT_WORK(&fan_data->alarm_work, fan_alarm_notify); | 118 | INIT_WORK(&fan_data->alarm_work, fan_alarm_notify); |
119 | set_irq_type(alarm_irq, IRQ_TYPE_EDGE_BOTH); | 119 | irq_set_irq_type(alarm_irq, IRQ_TYPE_EDGE_BOTH); |
120 | err = request_irq(alarm_irq, fan_alarm_irq_handler, IRQF_SHARED, | 120 | err = request_irq(alarm_irq, fan_alarm_irq_handler, IRQF_SHARED, |
121 | "GPIO fan alarm", fan_data); | 121 | "GPIO fan alarm", fan_data); |
122 | if (err) | 122 | if (err) |
diff --git a/drivers/input/keyboard/lm8323.c b/drivers/input/keyboard/lm8323.c index b732870ecc89..71f744a8e686 100644 --- a/drivers/input/keyboard/lm8323.c +++ b/drivers/input/keyboard/lm8323.c | |||
@@ -809,7 +809,7 @@ static int lm8323_suspend(struct device *dev) | |||
809 | struct lm8323_chip *lm = i2c_get_clientdata(client); | 809 | struct lm8323_chip *lm = i2c_get_clientdata(client); |
810 | int i; | 810 | int i; |
811 | 811 | ||
812 | set_irq_wake(client->irq, 0); | 812 | irq_set_irq_wake(client->irq, 0); |
813 | disable_irq(client->irq); | 813 | disable_irq(client->irq); |
814 | 814 | ||
815 | mutex_lock(&lm->lock); | 815 | mutex_lock(&lm->lock); |
@@ -838,7 +838,7 @@ static int lm8323_resume(struct device *dev) | |||
838 | led_classdev_resume(&lm->pwm[i].cdev); | 838 | led_classdev_resume(&lm->pwm[i].cdev); |
839 | 839 | ||
840 | enable_irq(client->irq); | 840 | enable_irq(client->irq); |
841 | set_irq_wake(client->irq, 1); | 841 | irq_set_irq_wake(client->irq, 1); |
842 | 842 | ||
843 | return 0; | 843 | return 0; |
844 | } | 844 | } |
diff --git a/drivers/input/serio/ams_delta_serio.c b/drivers/input/serio/ams_delta_serio.c index ebe955325677..4b2a42f9f0bb 100644 --- a/drivers/input/serio/ams_delta_serio.c +++ b/drivers/input/serio/ams_delta_serio.c | |||
@@ -149,7 +149,7 @@ static int __init ams_delta_serio_init(void) | |||
149 | * at FIQ level, switch back from edge to simple interrupt handler | 149 | * at FIQ level, switch back from edge to simple interrupt handler |
150 | * to avoid bad interaction. | 150 | * to avoid bad interaction. |
151 | */ | 151 | */ |
152 | set_irq_handler(gpio_to_irq(AMS_DELTA_GPIO_PIN_KEYBRD_CLK), | 152 | irq_set_handler(gpio_to_irq(AMS_DELTA_GPIO_PIN_KEYBRD_CLK), |
153 | handle_simple_irq); | 153 | handle_simple_irq); |
154 | 154 | ||
155 | serio_register_port(ams_delta_serio); | 155 | serio_register_port(ams_delta_serio); |
diff --git a/drivers/input/touchscreen/mainstone-wm97xx.c b/drivers/input/touchscreen/mainstone-wm97xx.c index b6b8b1c7ecea..3242e7076258 100644 --- a/drivers/input/touchscreen/mainstone-wm97xx.c +++ b/drivers/input/touchscreen/mainstone-wm97xx.c | |||
@@ -219,7 +219,7 @@ static int wm97xx_acc_startup(struct wm97xx *wm) | |||
219 | } | 219 | } |
220 | 220 | ||
221 | wm->pen_irq = gpio_to_irq(irq); | 221 | wm->pen_irq = gpio_to_irq(irq); |
222 | set_irq_type(wm->pen_irq, IRQ_TYPE_EDGE_BOTH); | 222 | irq_set_irq_type(wm->pen_irq, IRQ_TYPE_EDGE_BOTH); |
223 | } else /* pen irq not supported */ | 223 | } else /* pen irq not supported */ |
224 | pen_int = 0; | 224 | pen_int = 0; |
225 | 225 | ||
diff --git a/drivers/input/touchscreen/zylonite-wm97xx.c b/drivers/input/touchscreen/zylonite-wm97xx.c index 048849867643..5b0f15ec874a 100644 --- a/drivers/input/touchscreen/zylonite-wm97xx.c +++ b/drivers/input/touchscreen/zylonite-wm97xx.c | |||
@@ -193,7 +193,7 @@ static int zylonite_wm97xx_probe(struct platform_device *pdev) | |||
193 | gpio_touch_irq = mfp_to_gpio(MFP_PIN_GPIO26); | 193 | gpio_touch_irq = mfp_to_gpio(MFP_PIN_GPIO26); |
194 | 194 | ||
195 | wm->pen_irq = IRQ_GPIO(gpio_touch_irq); | 195 | wm->pen_irq = IRQ_GPIO(gpio_touch_irq); |
196 | set_irq_type(IRQ_GPIO(gpio_touch_irq), IRQ_TYPE_EDGE_BOTH); | 196 | irq_set_irq_type(IRQ_GPIO(gpio_touch_irq), IRQ_TYPE_EDGE_BOTH); |
197 | 197 | ||
198 | wm97xx_config_gpio(wm, WM97XX_GPIO_13, WM97XX_GPIO_IN, | 198 | wm97xx_config_gpio(wm, WM97XX_GPIO_13, WM97XX_GPIO_IN, |
199 | WM97XX_GPIO_POL_HIGH, | 199 | WM97XX_GPIO_POL_HIGH, |
diff --git a/drivers/md/md.c b/drivers/md/md.c index 06ecea751a39..8b66e04c2ea6 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c | |||
@@ -1777,12 +1777,6 @@ int md_integrity_register(mddev_t *mddev) | |||
1777 | continue; | 1777 | continue; |
1778 | if (rdev->raid_disk < 0) | 1778 | if (rdev->raid_disk < 0) |
1779 | continue; | 1779 | continue; |
1780 | /* | ||
1781 | * If at least one rdev is not integrity capable, we can not | ||
1782 | * enable data integrity for the md device. | ||
1783 | */ | ||
1784 | if (!bdev_get_integrity(rdev->bdev)) | ||
1785 | return -EINVAL; | ||
1786 | if (!reference) { | 1780 | if (!reference) { |
1787 | /* Use the first rdev as the reference */ | 1781 | /* Use the first rdev as the reference */ |
1788 | reference = rdev; | 1782 | reference = rdev; |
@@ -1793,6 +1787,8 @@ int md_integrity_register(mddev_t *mddev) | |||
1793 | rdev->bdev->bd_disk) < 0) | 1787 | rdev->bdev->bd_disk) < 0) |
1794 | return -EINVAL; | 1788 | return -EINVAL; |
1795 | } | 1789 | } |
1790 | if (!reference || !bdev_get_integrity(reference->bdev)) | ||
1791 | return 0; | ||
1796 | /* | 1792 | /* |
1797 | * All component devices are integrity capable and have matching | 1793 | * All component devices are integrity capable and have matching |
1798 | * profiles, register the common profile for the md device. | 1794 | * profiles, register the common profile for the md device. |
diff --git a/drivers/memstick/host/r592.c b/drivers/memstick/host/r592.c index 767406c95291..700d420a59ac 100644 --- a/drivers/memstick/host/r592.c +++ b/drivers/memstick/host/r592.c | |||
@@ -23,7 +23,7 @@ | |||
23 | #include <linux/swab.h> | 23 | #include <linux/swab.h> |
24 | #include "r592.h" | 24 | #include "r592.h" |
25 | 25 | ||
26 | static int enable_dma = 1; | 26 | static int r592_enable_dma = 1; |
27 | static int debug; | 27 | static int debug; |
28 | 28 | ||
29 | static const char *tpc_names[] = { | 29 | static const char *tpc_names[] = { |
@@ -267,7 +267,7 @@ static void r592_stop_dma(struct r592_device *dev, int error) | |||
267 | /* Test if hardware supports DMA */ | 267 | /* Test if hardware supports DMA */ |
268 | static void r592_check_dma(struct r592_device *dev) | 268 | static void r592_check_dma(struct r592_device *dev) |
269 | { | 269 | { |
270 | dev->dma_capable = enable_dma && | 270 | dev->dma_capable = r592_enable_dma && |
271 | (r592_read_reg(dev, R592_FIFO_DMA_SETTINGS) & | 271 | (r592_read_reg(dev, R592_FIFO_DMA_SETTINGS) & |
272 | R592_FIFO_DMA_SETTINGS_CAP); | 272 | R592_FIFO_DMA_SETTINGS_CAP); |
273 | } | 273 | } |
@@ -898,7 +898,7 @@ static void __exit r592_module_exit(void) | |||
898 | module_init(r592_module_init); | 898 | module_init(r592_module_init); |
899 | module_exit(r592_module_exit); | 899 | module_exit(r592_module_exit); |
900 | 900 | ||
901 | module_param(enable_dma, bool, S_IRUGO); | 901 | module_param_named(enable_dma, r592_enable_dma, bool, S_IRUGO); |
902 | MODULE_PARM_DESC(enable_dma, "Enable usage of the DMA (default)"); | 902 | MODULE_PARM_DESC(enable_dma, "Enable usage of the DMA (default)"); |
903 | module_param(debug, int, S_IRUGO | S_IWUSR); | 903 | module_param(debug, int, S_IRUGO | S_IWUSR); |
904 | MODULE_PARM_DESC(debug, "Debug level (0-3)"); | 904 | MODULE_PARM_DESC(debug, "Debug level (0-3)"); |
diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig index e986f91fff9c..e2fea580585a 100644 --- a/drivers/mfd/Kconfig +++ b/drivers/mfd/Kconfig | |||
@@ -60,15 +60,6 @@ config MFD_ASIC3 | |||
60 | This driver supports the ASIC3 multifunction chip found on many | 60 | This driver supports the ASIC3 multifunction chip found on many |
61 | PDAs (mainly iPAQ and HTC based ones) | 61 | PDAs (mainly iPAQ and HTC based ones) |
62 | 62 | ||
63 | config MFD_SH_MOBILE_SDHI | ||
64 | bool "Support for SuperH Mobile SDHI" | ||
65 | depends on SUPERH || ARCH_SHMOBILE | ||
66 | select MFD_CORE | ||
67 | select TMIO_MMC_DMA | ||
68 | ---help--- | ||
69 | This driver supports the SDHI hardware block found in many | ||
70 | SuperH Mobile SoCs. | ||
71 | |||
72 | config MFD_DAVINCI_VOICECODEC | 63 | config MFD_DAVINCI_VOICECODEC |
73 | tristate | 64 | tristate |
74 | select MFD_CORE | 65 | select MFD_CORE |
@@ -266,11 +257,6 @@ config MFD_TMIO | |||
266 | bool | 257 | bool |
267 | default n | 258 | default n |
268 | 259 | ||
269 | config TMIO_MMC_DMA | ||
270 | bool | ||
271 | select DMA_ENGINE | ||
272 | select DMADEVICES | ||
273 | |||
274 | config MFD_T7L66XB | 260 | config MFD_T7L66XB |
275 | bool "Support Toshiba T7L66XB" | 261 | bool "Support Toshiba T7L66XB" |
276 | depends on ARM && HAVE_CLK | 262 | depends on ARM && HAVE_CLK |
@@ -592,7 +578,7 @@ config AB3550_CORE | |||
592 | config MFD_CS5535 | 578 | config MFD_CS5535 |
593 | tristate "Support for CS5535 and CS5536 southbridge core functions" | 579 | tristate "Support for CS5535 and CS5536 southbridge core functions" |
594 | select MFD_CORE | 580 | select MFD_CORE |
595 | depends on PCI | 581 | depends on PCI && X86 |
596 | ---help--- | 582 | ---help--- |
597 | This is the core driver for CS5535/CS5536 MFD functions. This is | 583 | This is the core driver for CS5535/CS5536 MFD functions. This is |
598 | necessary for using the board's GPIO and MFGPT functionality. | 584 | necessary for using the board's GPIO and MFGPT functionality. |
diff --git a/drivers/mfd/Makefile b/drivers/mfd/Makefile index ef489f253402..419caa9d7dcf 100644 --- a/drivers/mfd/Makefile +++ b/drivers/mfd/Makefile | |||
@@ -6,7 +6,6 @@ | |||
6 | obj-$(CONFIG_MFD_88PM860X) += 88pm860x.o | 6 | obj-$(CONFIG_MFD_88PM860X) += 88pm860x.o |
7 | obj-$(CONFIG_MFD_SM501) += sm501.o | 7 | obj-$(CONFIG_MFD_SM501) += sm501.o |
8 | obj-$(CONFIG_MFD_ASIC3) += asic3.o tmio_core.o | 8 | obj-$(CONFIG_MFD_ASIC3) += asic3.o tmio_core.o |
9 | obj-$(CONFIG_MFD_SH_MOBILE_SDHI) += sh_mobile_sdhi.o | ||
10 | 9 | ||
11 | obj-$(CONFIG_HTC_EGPIO) += htc-egpio.o | 10 | obj-$(CONFIG_HTC_EGPIO) += htc-egpio.o |
12 | obj-$(CONFIG_HTC_PASIC3) += htc-pasic3.o | 11 | obj-$(CONFIG_HTC_PASIC3) += htc-pasic3.o |
diff --git a/drivers/mfd/ab8500-core.c b/drivers/mfd/ab8500-core.c index 62e33e2258d4..67d01c938284 100644 --- a/drivers/mfd/ab8500-core.c +++ b/drivers/mfd/ab8500-core.c | |||
@@ -362,6 +362,15 @@ static void ab8500_irq_remove(struct ab8500 *ab8500) | |||
362 | } | 362 | } |
363 | } | 363 | } |
364 | 364 | ||
365 | static struct resource ab8500_gpio_resources[] = { | ||
366 | { | ||
367 | .name = "GPIO_INT6", | ||
368 | .start = AB8500_INT_GPIO6R, | ||
369 | .end = AB8500_INT_GPIO41F, | ||
370 | .flags = IORESOURCE_IRQ, | ||
371 | } | ||
372 | }; | ||
373 | |||
365 | static struct resource ab8500_gpadc_resources[] = { | 374 | static struct resource ab8500_gpadc_resources[] = { |
366 | { | 375 | { |
367 | .name = "HW_CONV_END", | 376 | .name = "HW_CONV_END", |
@@ -596,6 +605,11 @@ static struct mfd_cell ab8500_devs[] = { | |||
596 | .name = "ab8500-regulator", | 605 | .name = "ab8500-regulator", |
597 | }, | 606 | }, |
598 | { | 607 | { |
608 | .name = "ab8500-gpio", | ||
609 | .num_resources = ARRAY_SIZE(ab8500_gpio_resources), | ||
610 | .resources = ab8500_gpio_resources, | ||
611 | }, | ||
612 | { | ||
599 | .name = "ab8500-gpadc", | 613 | .name = "ab8500-gpadc", |
600 | .num_resources = ARRAY_SIZE(ab8500_gpadc_resources), | 614 | .num_resources = ARRAY_SIZE(ab8500_gpadc_resources), |
601 | .resources = ab8500_gpadc_resources, | 615 | .resources = ab8500_gpadc_resources, |
diff --git a/drivers/mfd/ab8500-i2c.c b/drivers/mfd/ab8500-i2c.c index 6820327adf4a..821e6b86afd2 100644 --- a/drivers/mfd/ab8500-i2c.c +++ b/drivers/mfd/ab8500-i2c.c | |||
@@ -97,7 +97,7 @@ static void __exit ab8500_i2c_exit(void) | |||
97 | { | 97 | { |
98 | platform_driver_unregister(&ab8500_i2c_driver); | 98 | platform_driver_unregister(&ab8500_i2c_driver); |
99 | } | 99 | } |
100 | subsys_initcall(ab8500_i2c_init); | 100 | arch_initcall(ab8500_i2c_init); |
101 | module_exit(ab8500_i2c_exit); | 101 | module_exit(ab8500_i2c_exit); |
102 | 102 | ||
103 | MODULE_AUTHOR("Mattias WALLIN <mattias.wallin@stericsson.com"); | 103 | MODULE_AUTHOR("Mattias WALLIN <mattias.wallin@stericsson.com"); |
diff --git a/drivers/misc/sgi-gru/grufile.c b/drivers/misc/sgi-gru/grufile.c index 28852dfa310d..20e4e9395b61 100644 --- a/drivers/misc/sgi-gru/grufile.c +++ b/drivers/misc/sgi-gru/grufile.c | |||
@@ -373,7 +373,7 @@ static int gru_chiplet_setup_tlb_irq(int chiplet, char *irq_name, | |||
373 | 373 | ||
374 | if (gru_irq_count[chiplet] == 0) { | 374 | if (gru_irq_count[chiplet] == 0) { |
375 | gru_chip[chiplet].name = irq_name; | 375 | gru_chip[chiplet].name = irq_name; |
376 | ret = set_irq_chip(irq, &gru_chip[chiplet]); | 376 | ret = irq_set_chip(irq, &gru_chip[chiplet]); |
377 | if (ret) { | 377 | if (ret) { |
378 | printk(KERN_ERR "%s: set_irq_chip failed, errno=%d\n", | 378 | printk(KERN_ERR "%s: set_irq_chip failed, errno=%d\n", |
379 | GRU_DRIVER_ID_STR, -ret); | 379 | GRU_DRIVER_ID_STR, -ret); |
diff --git a/drivers/mmc/card/mmc_test.c b/drivers/mmc/card/mmc_test.c index 5ec8eddfcf6e..f5cedeccad42 100644 --- a/drivers/mmc/card/mmc_test.c +++ b/drivers/mmc/card/mmc_test.c | |||
@@ -1875,7 +1875,7 @@ static int mmc_test_seq_perf(struct mmc_test_card *test, int write, | |||
1875 | unsigned int tot_sz, int max_scatter) | 1875 | unsigned int tot_sz, int max_scatter) |
1876 | { | 1876 | { |
1877 | unsigned int dev_addr, i, cnt, sz, ssz; | 1877 | unsigned int dev_addr, i, cnt, sz, ssz; |
1878 | struct timespec ts1, ts2, ts; | 1878 | struct timespec ts1, ts2; |
1879 | int ret; | 1879 | int ret; |
1880 | 1880 | ||
1881 | sz = test->area.max_tfr; | 1881 | sz = test->area.max_tfr; |
@@ -1912,7 +1912,6 @@ static int mmc_test_seq_perf(struct mmc_test_card *test, int write, | |||
1912 | } | 1912 | } |
1913 | getnstimeofday(&ts2); | 1913 | getnstimeofday(&ts2); |
1914 | 1914 | ||
1915 | ts = timespec_sub(ts2, ts1); | ||
1916 | mmc_test_print_avg_rate(test, sz, cnt, &ts1, &ts2); | 1915 | mmc_test_print_avg_rate(test, sz, cnt, &ts1, &ts2); |
1917 | 1916 | ||
1918 | return 0; | 1917 | return 0; |
diff --git a/drivers/mmc/core/sd_ops.c b/drivers/mmc/core/sd_ops.c index 797cdb5887fd..76af349c14b4 100644 --- a/drivers/mmc/core/sd_ops.c +++ b/drivers/mmc/core/sd_ops.c | |||
@@ -9,6 +9,7 @@ | |||
9 | * your option) any later version. | 9 | * your option) any later version. |
10 | */ | 10 | */ |
11 | 11 | ||
12 | #include <linux/slab.h> | ||
12 | #include <linux/types.h> | 13 | #include <linux/types.h> |
13 | #include <linux/scatterlist.h> | 14 | #include <linux/scatterlist.h> |
14 | 15 | ||
@@ -252,6 +253,7 @@ int mmc_app_send_scr(struct mmc_card *card, u32 *scr) | |||
252 | struct mmc_command cmd; | 253 | struct mmc_command cmd; |
253 | struct mmc_data data; | 254 | struct mmc_data data; |
254 | struct scatterlist sg; | 255 | struct scatterlist sg; |
256 | void *data_buf; | ||
255 | 257 | ||
256 | BUG_ON(!card); | 258 | BUG_ON(!card); |
257 | BUG_ON(!card->host); | 259 | BUG_ON(!card->host); |
@@ -263,6 +265,13 @@ int mmc_app_send_scr(struct mmc_card *card, u32 *scr) | |||
263 | if (err) | 265 | if (err) |
264 | return err; | 266 | return err; |
265 | 267 | ||
268 | /* dma onto stack is unsafe/nonportable, but callers to this | ||
269 | * routine normally provide temporary on-stack buffers ... | ||
270 | */ | ||
271 | data_buf = kmalloc(sizeof(card->raw_scr), GFP_KERNEL); | ||
272 | if (data_buf == NULL) | ||
273 | return -ENOMEM; | ||
274 | |||
266 | memset(&mrq, 0, sizeof(struct mmc_request)); | 275 | memset(&mrq, 0, sizeof(struct mmc_request)); |
267 | memset(&cmd, 0, sizeof(struct mmc_command)); | 276 | memset(&cmd, 0, sizeof(struct mmc_command)); |
268 | memset(&data, 0, sizeof(struct mmc_data)); | 277 | memset(&data, 0, sizeof(struct mmc_data)); |
@@ -280,12 +289,15 @@ int mmc_app_send_scr(struct mmc_card *card, u32 *scr) | |||
280 | data.sg = &sg; | 289 | data.sg = &sg; |
281 | data.sg_len = 1; | 290 | data.sg_len = 1; |
282 | 291 | ||
283 | sg_init_one(&sg, scr, 8); | 292 | sg_init_one(&sg, data_buf, 8); |
284 | 293 | ||
285 | mmc_set_data_timeout(&data, card); | 294 | mmc_set_data_timeout(&data, card); |
286 | 295 | ||
287 | mmc_wait_for_req(card->host, &mrq); | 296 | mmc_wait_for_req(card->host, &mrq); |
288 | 297 | ||
298 | memcpy(scr, data_buf, sizeof(card->raw_scr)); | ||
299 | kfree(data_buf); | ||
300 | |||
289 | if (cmd.error) | 301 | if (cmd.error) |
290 | return cmd.error; | 302 | return cmd.error; |
291 | if (data.error) | 303 | if (data.error) |
diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig index 1a21c6427a19..94df40531c38 100644 --- a/drivers/mmc/host/Kconfig +++ b/drivers/mmc/host/Kconfig | |||
@@ -439,13 +439,25 @@ config MMC_SDRICOH_CS | |||
439 | To compile this driver as a module, choose M here: the | 439 | To compile this driver as a module, choose M here: the |
440 | module will be called sdricoh_cs. | 440 | module will be called sdricoh_cs. |
441 | 441 | ||
442 | config MMC_TMIO_CORE | ||
443 | tristate | ||
444 | |||
442 | config MMC_TMIO | 445 | config MMC_TMIO |
443 | tristate "Toshiba Mobile IO Controller (TMIO) MMC/SD function support" | 446 | tristate "Toshiba Mobile IO Controller (TMIO) MMC/SD function support" |
444 | depends on MFD_TMIO || MFD_ASIC3 || MFD_SH_MOBILE_SDHI | 447 | depends on MFD_TMIO || MFD_ASIC3 |
448 | select MMC_TMIO_CORE | ||
445 | help | 449 | help |
446 | This provides support for the SD/MMC cell found in TC6393XB, | 450 | This provides support for the SD/MMC cell found in TC6393XB, |
447 | T7L66XB and also HTC ASIC3 | 451 | T7L66XB and also HTC ASIC3 |
448 | 452 | ||
453 | config MMC_SDHI | ||
454 | tristate "SH-Mobile SDHI SD/SDIO controller support" | ||
455 | depends on SUPERH || ARCH_SHMOBILE | ||
456 | select MMC_TMIO_CORE | ||
457 | help | ||
458 | This provides support for the SDHI SD/SDIO controller found in | ||
459 | SuperH and ARM SH-Mobile SoCs | ||
460 | |||
449 | config MMC_CB710 | 461 | config MMC_CB710 |
450 | tristate "ENE CB710 MMC/SD Interface support" | 462 | tristate "ENE CB710 MMC/SD Interface support" |
451 | depends on PCI | 463 | depends on PCI |
diff --git a/drivers/mmc/host/Makefile b/drivers/mmc/host/Makefile index 30aa6867745f..4f1df0aae574 100644 --- a/drivers/mmc/host/Makefile +++ b/drivers/mmc/host/Makefile | |||
@@ -29,7 +29,13 @@ endif | |||
29 | obj-$(CONFIG_MMC_S3C) += s3cmci.o | 29 | obj-$(CONFIG_MMC_S3C) += s3cmci.o |
30 | obj-$(CONFIG_MMC_SDRICOH_CS) += sdricoh_cs.o | 30 | obj-$(CONFIG_MMC_SDRICOH_CS) += sdricoh_cs.o |
31 | obj-$(CONFIG_MMC_TMIO) += tmio_mmc.o | 31 | obj-$(CONFIG_MMC_TMIO) += tmio_mmc.o |
32 | obj-$(CONFIG_MMC_CB710) += cb710-mmc.o | 32 | obj-$(CONFIG_MMC_TMIO_CORE) += tmio_mmc_core.o |
33 | tmio_mmc_core-y := tmio_mmc_pio.o | ||
34 | ifneq ($(CONFIG_MMC_SDHI),n) | ||
35 | tmio_mmc_core-y += tmio_mmc_dma.o | ||
36 | endif | ||
37 | obj-$(CONFIG_MMC_SDHI) += sh_mobile_sdhi.o | ||
38 | obj-$(CONFIG_MMC_CB710) += cb710-mmc.o | ||
33 | obj-$(CONFIG_MMC_VIA_SDMMC) += via-sdmmc.o | 39 | obj-$(CONFIG_MMC_VIA_SDMMC) += via-sdmmc.o |
34 | obj-$(CONFIG_SDH_BFIN) += bfin_sdh.o | 40 | obj-$(CONFIG_SDH_BFIN) += bfin_sdh.o |
35 | obj-$(CONFIG_MMC_DW) += dw_mmc.o | 41 | obj-$(CONFIG_MMC_DW) += dw_mmc.o |
diff --git a/drivers/mmc/host/dw_mmc.c b/drivers/mmc/host/dw_mmc.c index 5a614069cb00..87e1f57ec9ba 100644 --- a/drivers/mmc/host/dw_mmc.c +++ b/drivers/mmc/host/dw_mmc.c | |||
@@ -316,7 +316,7 @@ static void dw_mci_idmac_stop_dma(struct dw_mci *host) | |||
316 | 316 | ||
317 | /* Stop the IDMAC running */ | 317 | /* Stop the IDMAC running */ |
318 | temp = mci_readl(host, BMOD); | 318 | temp = mci_readl(host, BMOD); |
319 | temp &= ~SDMMC_IDMAC_ENABLE; | 319 | temp &= ~(SDMMC_IDMAC_ENABLE | SDMMC_IDMAC_FB); |
320 | mci_writel(host, BMOD, temp); | 320 | mci_writel(host, BMOD, temp); |
321 | } | 321 | } |
322 | 322 | ||
@@ -385,7 +385,7 @@ static void dw_mci_idmac_start_dma(struct dw_mci *host, unsigned int sg_len) | |||
385 | 385 | ||
386 | /* Enable the IDMAC */ | 386 | /* Enable the IDMAC */ |
387 | temp = mci_readl(host, BMOD); | 387 | temp = mci_readl(host, BMOD); |
388 | temp |= SDMMC_IDMAC_ENABLE; | 388 | temp |= SDMMC_IDMAC_ENABLE | SDMMC_IDMAC_FB; |
389 | mci_writel(host, BMOD, temp); | 389 | mci_writel(host, BMOD, temp); |
390 | 390 | ||
391 | /* Start it running */ | 391 | /* Start it running */ |
diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c index 5bbb87d10251..b4a7e4fba90f 100644 --- a/drivers/mmc/host/mmci.c +++ b/drivers/mmc/host/mmci.c | |||
@@ -68,6 +68,12 @@ static struct variant_data variant_arm = { | |||
68 | .datalength_bits = 16, | 68 | .datalength_bits = 16, |
69 | }; | 69 | }; |
70 | 70 | ||
71 | static struct variant_data variant_arm_extended_fifo = { | ||
72 | .fifosize = 128 * 4, | ||
73 | .fifohalfsize = 64 * 4, | ||
74 | .datalength_bits = 16, | ||
75 | }; | ||
76 | |||
71 | static struct variant_data variant_u300 = { | 77 | static struct variant_data variant_u300 = { |
72 | .fifosize = 16 * 4, | 78 | .fifosize = 16 * 4, |
73 | .fifohalfsize = 8 * 4, | 79 | .fifohalfsize = 8 * 4, |
@@ -1277,10 +1283,15 @@ static int mmci_resume(struct amba_device *dev) | |||
1277 | static struct amba_id mmci_ids[] = { | 1283 | static struct amba_id mmci_ids[] = { |
1278 | { | 1284 | { |
1279 | .id = 0x00041180, | 1285 | .id = 0x00041180, |
1280 | .mask = 0x000fffff, | 1286 | .mask = 0xff0fffff, |
1281 | .data = &variant_arm, | 1287 | .data = &variant_arm, |
1282 | }, | 1288 | }, |
1283 | { | 1289 | { |
1290 | .id = 0x01041180, | ||
1291 | .mask = 0xff0fffff, | ||
1292 | .data = &variant_arm_extended_fifo, | ||
1293 | }, | ||
1294 | { | ||
1284 | .id = 0x00041181, | 1295 | .id = 0x00041181, |
1285 | .mask = 0x000fffff, | 1296 | .mask = 0x000fffff, |
1286 | .data = &variant_arm, | 1297 | .data = &variant_arm, |
diff --git a/drivers/mmc/host/of_mmc_spi.c b/drivers/mmc/host/of_mmc_spi.c index 5530def54e5b..e2aecb7f1d5c 100644 --- a/drivers/mmc/host/of_mmc_spi.c +++ b/drivers/mmc/host/of_mmc_spi.c | |||
@@ -15,9 +15,11 @@ | |||
15 | #include <linux/module.h> | 15 | #include <linux/module.h> |
16 | #include <linux/device.h> | 16 | #include <linux/device.h> |
17 | #include <linux/slab.h> | 17 | #include <linux/slab.h> |
18 | #include <linux/irq.h> | ||
18 | #include <linux/gpio.h> | 19 | #include <linux/gpio.h> |
19 | #include <linux/of.h> | 20 | #include <linux/of.h> |
20 | #include <linux/of_gpio.h> | 21 | #include <linux/of_gpio.h> |
22 | #include <linux/of_irq.h> | ||
21 | #include <linux/spi/spi.h> | 23 | #include <linux/spi/spi.h> |
22 | #include <linux/spi/mmc_spi.h> | 24 | #include <linux/spi/mmc_spi.h> |
23 | #include <linux/mmc/core.h> | 25 | #include <linux/mmc/core.h> |
diff --git a/drivers/mmc/host/sdhci-esdhc-imx.c b/drivers/mmc/host/sdhci-esdhc-imx.c index 3b5248567973..a19967d0bfc4 100644 --- a/drivers/mmc/host/sdhci-esdhc-imx.c +++ b/drivers/mmc/host/sdhci-esdhc-imx.c | |||
@@ -16,14 +16,40 @@ | |||
16 | #include <linux/err.h> | 16 | #include <linux/err.h> |
17 | #include <linux/clk.h> | 17 | #include <linux/clk.h> |
18 | #include <linux/gpio.h> | 18 | #include <linux/gpio.h> |
19 | #include <linux/slab.h> | ||
19 | #include <linux/mmc/host.h> | 20 | #include <linux/mmc/host.h> |
20 | #include <linux/mmc/sdhci-pltfm.h> | 21 | #include <linux/mmc/sdhci-pltfm.h> |
22 | #include <linux/mmc/mmc.h> | ||
23 | #include <linux/mmc/sdio.h> | ||
21 | #include <mach/hardware.h> | 24 | #include <mach/hardware.h> |
22 | #include <mach/esdhc.h> | 25 | #include <mach/esdhc.h> |
23 | #include "sdhci.h" | 26 | #include "sdhci.h" |
24 | #include "sdhci-pltfm.h" | 27 | #include "sdhci-pltfm.h" |
25 | #include "sdhci-esdhc.h" | 28 | #include "sdhci-esdhc.h" |
26 | 29 | ||
30 | /* VENDOR SPEC register */ | ||
31 | #define SDHCI_VENDOR_SPEC 0xC0 | ||
32 | #define SDHCI_VENDOR_SPEC_SDIO_QUIRK 0x00000002 | ||
33 | |||
34 | #define ESDHC_FLAG_GPIO_FOR_CD_WP (1 << 0) | ||
35 | /* | ||
36 | * The CMDTYPE of the CMD register (offset 0xE) should be set to | ||
37 | * "11" when the STOP CMD12 is issued on imx53 to abort one | ||
38 | * open ended multi-blk IO. Otherwise the TC INT wouldn't | ||
39 | * be generated. | ||
40 | * In exact block transfer, the controller doesn't complete the | ||
41 | * operations automatically as required at the end of the | ||
42 | * transfer and remains on hold if the abort command is not sent. | ||
43 | * As a result, the TC flag is not asserted and SW received timeout | ||
44 | * exeception. Bit1 of Vendor Spec registor is used to fix it. | ||
45 | */ | ||
46 | #define ESDHC_FLAG_MULTIBLK_NO_INT (1 << 1) | ||
47 | |||
48 | struct pltfm_imx_data { | ||
49 | int flags; | ||
50 | u32 scratchpad; | ||
51 | }; | ||
52 | |||
27 | static inline void esdhc_clrset_le(struct sdhci_host *host, u32 mask, u32 val, int reg) | 53 | static inline void esdhc_clrset_le(struct sdhci_host *host, u32 mask, u32 val, int reg) |
28 | { | 54 | { |
29 | void __iomem *base = host->ioaddr + (reg & ~0x3); | 55 | void __iomem *base = host->ioaddr + (reg & ~0x3); |
@@ -34,10 +60,14 @@ static inline void esdhc_clrset_le(struct sdhci_host *host, u32 mask, u32 val, i | |||
34 | 60 | ||
35 | static u32 esdhc_readl_le(struct sdhci_host *host, int reg) | 61 | static u32 esdhc_readl_le(struct sdhci_host *host, int reg) |
36 | { | 62 | { |
63 | struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); | ||
64 | struct pltfm_imx_data *imx_data = pltfm_host->priv; | ||
65 | |||
37 | /* fake CARD_PRESENT flag on mx25/35 */ | 66 | /* fake CARD_PRESENT flag on mx25/35 */ |
38 | u32 val = readl(host->ioaddr + reg); | 67 | u32 val = readl(host->ioaddr + reg); |
39 | 68 | ||
40 | if (unlikely(reg == SDHCI_PRESENT_STATE)) { | 69 | if (unlikely((reg == SDHCI_PRESENT_STATE) |
70 | && (imx_data->flags & ESDHC_FLAG_GPIO_FOR_CD_WP))) { | ||
41 | struct esdhc_platform_data *boarddata = | 71 | struct esdhc_platform_data *boarddata = |
42 | host->mmc->parent->platform_data; | 72 | host->mmc->parent->platform_data; |
43 | 73 | ||
@@ -55,13 +85,26 @@ static u32 esdhc_readl_le(struct sdhci_host *host, int reg) | |||
55 | 85 | ||
56 | static void esdhc_writel_le(struct sdhci_host *host, u32 val, int reg) | 86 | static void esdhc_writel_le(struct sdhci_host *host, u32 val, int reg) |
57 | { | 87 | { |
58 | if (unlikely(reg == SDHCI_INT_ENABLE || reg == SDHCI_SIGNAL_ENABLE)) | 88 | struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); |
89 | struct pltfm_imx_data *imx_data = pltfm_host->priv; | ||
90 | |||
91 | if (unlikely((reg == SDHCI_INT_ENABLE || reg == SDHCI_SIGNAL_ENABLE) | ||
92 | && (imx_data->flags & ESDHC_FLAG_GPIO_FOR_CD_WP))) | ||
59 | /* | 93 | /* |
60 | * these interrupts won't work with a custom card_detect gpio | 94 | * these interrupts won't work with a custom card_detect gpio |
61 | * (only applied to mx25/35) | 95 | * (only applied to mx25/35) |
62 | */ | 96 | */ |
63 | val &= ~(SDHCI_INT_CARD_REMOVE | SDHCI_INT_CARD_INSERT); | 97 | val &= ~(SDHCI_INT_CARD_REMOVE | SDHCI_INT_CARD_INSERT); |
64 | 98 | ||
99 | if (unlikely((imx_data->flags & ESDHC_FLAG_MULTIBLK_NO_INT) | ||
100 | && (reg == SDHCI_INT_STATUS) | ||
101 | && (val & SDHCI_INT_DATA_END))) { | ||
102 | u32 v; | ||
103 | v = readl(host->ioaddr + SDHCI_VENDOR_SPEC); | ||
104 | v &= ~SDHCI_VENDOR_SPEC_SDIO_QUIRK; | ||
105 | writel(v, host->ioaddr + SDHCI_VENDOR_SPEC); | ||
106 | } | ||
107 | |||
65 | writel(val, host->ioaddr + reg); | 108 | writel(val, host->ioaddr + reg); |
66 | } | 109 | } |
67 | 110 | ||
@@ -76,6 +119,7 @@ static u16 esdhc_readw_le(struct sdhci_host *host, int reg) | |||
76 | static void esdhc_writew_le(struct sdhci_host *host, u16 val, int reg) | 119 | static void esdhc_writew_le(struct sdhci_host *host, u16 val, int reg) |
77 | { | 120 | { |
78 | struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); | 121 | struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); |
122 | struct pltfm_imx_data *imx_data = pltfm_host->priv; | ||
79 | 123 | ||
80 | switch (reg) { | 124 | switch (reg) { |
81 | case SDHCI_TRANSFER_MODE: | 125 | case SDHCI_TRANSFER_MODE: |
@@ -83,10 +127,22 @@ static void esdhc_writew_le(struct sdhci_host *host, u16 val, int reg) | |||
83 | * Postpone this write, we must do it together with a | 127 | * Postpone this write, we must do it together with a |
84 | * command write that is down below. | 128 | * command write that is down below. |
85 | */ | 129 | */ |
86 | pltfm_host->scratchpad = val; | 130 | if ((imx_data->flags & ESDHC_FLAG_MULTIBLK_NO_INT) |
131 | && (host->cmd->opcode == SD_IO_RW_EXTENDED) | ||
132 | && (host->cmd->data->blocks > 1) | ||
133 | && (host->cmd->data->flags & MMC_DATA_READ)) { | ||
134 | u32 v; | ||
135 | v = readl(host->ioaddr + SDHCI_VENDOR_SPEC); | ||
136 | v |= SDHCI_VENDOR_SPEC_SDIO_QUIRK; | ||
137 | writel(v, host->ioaddr + SDHCI_VENDOR_SPEC); | ||
138 | } | ||
139 | imx_data->scratchpad = val; | ||
87 | return; | 140 | return; |
88 | case SDHCI_COMMAND: | 141 | case SDHCI_COMMAND: |
89 | writel(val << 16 | pltfm_host->scratchpad, | 142 | if ((host->cmd->opcode == MMC_STOP_TRANSMISSION) |
143 | && (imx_data->flags & ESDHC_FLAG_MULTIBLK_NO_INT)) | ||
144 | val |= SDHCI_CMD_ABORTCMD; | ||
145 | writel(val << 16 | imx_data->scratchpad, | ||
90 | host->ioaddr + SDHCI_TRANSFER_MODE); | 146 | host->ioaddr + SDHCI_TRANSFER_MODE); |
91 | return; | 147 | return; |
92 | case SDHCI_BLOCK_SIZE: | 148 | case SDHCI_BLOCK_SIZE: |
@@ -146,7 +202,9 @@ static unsigned int esdhc_pltfm_get_ro(struct sdhci_host *host) | |||
146 | } | 202 | } |
147 | 203 | ||
148 | static struct sdhci_ops sdhci_esdhc_ops = { | 204 | static struct sdhci_ops sdhci_esdhc_ops = { |
205 | .read_l = esdhc_readl_le, | ||
149 | .read_w = esdhc_readw_le, | 206 | .read_w = esdhc_readw_le, |
207 | .write_l = esdhc_writel_le, | ||
150 | .write_w = esdhc_writew_le, | 208 | .write_w = esdhc_writew_le, |
151 | .write_b = esdhc_writeb_le, | 209 | .write_b = esdhc_writeb_le, |
152 | .set_clock = esdhc_set_clock, | 210 | .set_clock = esdhc_set_clock, |
@@ -168,6 +226,7 @@ static int esdhc_pltfm_init(struct sdhci_host *host, struct sdhci_pltfm_data *pd | |||
168 | struct esdhc_platform_data *boarddata = host->mmc->parent->platform_data; | 226 | struct esdhc_platform_data *boarddata = host->mmc->parent->platform_data; |
169 | struct clk *clk; | 227 | struct clk *clk; |
170 | int err; | 228 | int err; |
229 | struct pltfm_imx_data *imx_data; | ||
171 | 230 | ||
172 | clk = clk_get(mmc_dev(host->mmc), NULL); | 231 | clk = clk_get(mmc_dev(host->mmc), NULL); |
173 | if (IS_ERR(clk)) { | 232 | if (IS_ERR(clk)) { |
@@ -177,7 +236,15 @@ static int esdhc_pltfm_init(struct sdhci_host *host, struct sdhci_pltfm_data *pd | |||
177 | clk_enable(clk); | 236 | clk_enable(clk); |
178 | pltfm_host->clk = clk; | 237 | pltfm_host->clk = clk; |
179 | 238 | ||
180 | if (cpu_is_mx35() || cpu_is_mx51()) | 239 | imx_data = kzalloc(sizeof(struct pltfm_imx_data), GFP_KERNEL); |
240 | if (!imx_data) { | ||
241 | clk_disable(pltfm_host->clk); | ||
242 | clk_put(pltfm_host->clk); | ||
243 | return -ENOMEM; | ||
244 | } | ||
245 | pltfm_host->priv = imx_data; | ||
246 | |||
247 | if (!cpu_is_mx25()) | ||
181 | host->quirks |= SDHCI_QUIRK_BROKEN_TIMEOUT_VAL; | 248 | host->quirks |= SDHCI_QUIRK_BROKEN_TIMEOUT_VAL; |
182 | 249 | ||
183 | if (cpu_is_mx25() || cpu_is_mx35()) { | 250 | if (cpu_is_mx25() || cpu_is_mx35()) { |
@@ -187,6 +254,9 @@ static int esdhc_pltfm_init(struct sdhci_host *host, struct sdhci_pltfm_data *pd | |||
187 | sdhci_esdhc_ops.get_ro = esdhc_pltfm_get_ro; | 254 | sdhci_esdhc_ops.get_ro = esdhc_pltfm_get_ro; |
188 | } | 255 | } |
189 | 256 | ||
257 | if (!(cpu_is_mx25() || cpu_is_mx35() || cpu_is_mx51())) | ||
258 | imx_data->flags |= ESDHC_FLAG_MULTIBLK_NO_INT; | ||
259 | |||
190 | if (boarddata) { | 260 | if (boarddata) { |
191 | err = gpio_request_one(boarddata->wp_gpio, GPIOF_IN, "ESDHC_WP"); | 261 | err = gpio_request_one(boarddata->wp_gpio, GPIOF_IN, "ESDHC_WP"); |
192 | if (err) { | 262 | if (err) { |
@@ -214,8 +284,7 @@ static int esdhc_pltfm_init(struct sdhci_host *host, struct sdhci_pltfm_data *pd | |||
214 | goto no_card_detect_irq; | 284 | goto no_card_detect_irq; |
215 | } | 285 | } |
216 | 286 | ||
217 | sdhci_esdhc_ops.write_l = esdhc_writel_le; | 287 | imx_data->flags |= ESDHC_FLAG_GPIO_FOR_CD_WP; |
218 | sdhci_esdhc_ops.read_l = esdhc_readl_le; | ||
219 | /* Now we have a working card_detect again */ | 288 | /* Now we have a working card_detect again */ |
220 | host->quirks &= ~SDHCI_QUIRK_BROKEN_CARD_DETECTION; | 289 | host->quirks &= ~SDHCI_QUIRK_BROKEN_CARD_DETECTION; |
221 | } | 290 | } |
@@ -227,6 +296,7 @@ static int esdhc_pltfm_init(struct sdhci_host *host, struct sdhci_pltfm_data *pd | |||
227 | no_card_detect_pin: | 296 | no_card_detect_pin: |
228 | boarddata->cd_gpio = err; | 297 | boarddata->cd_gpio = err; |
229 | not_supported: | 298 | not_supported: |
299 | kfree(imx_data); | ||
230 | return 0; | 300 | return 0; |
231 | } | 301 | } |
232 | 302 | ||
@@ -234,6 +304,7 @@ static void esdhc_pltfm_exit(struct sdhci_host *host) | |||
234 | { | 304 | { |
235 | struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); | 305 | struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); |
236 | struct esdhc_platform_data *boarddata = host->mmc->parent->platform_data; | 306 | struct esdhc_platform_data *boarddata = host->mmc->parent->platform_data; |
307 | struct pltfm_imx_data *imx_data = pltfm_host->priv; | ||
237 | 308 | ||
238 | if (boarddata && gpio_is_valid(boarddata->wp_gpio)) | 309 | if (boarddata && gpio_is_valid(boarddata->wp_gpio)) |
239 | gpio_free(boarddata->wp_gpio); | 310 | gpio_free(boarddata->wp_gpio); |
@@ -247,6 +318,7 @@ static void esdhc_pltfm_exit(struct sdhci_host *host) | |||
247 | 318 | ||
248 | clk_disable(pltfm_host->clk); | 319 | clk_disable(pltfm_host->clk); |
249 | clk_put(pltfm_host->clk); | 320 | clk_put(pltfm_host->clk); |
321 | kfree(imx_data); | ||
250 | } | 322 | } |
251 | 323 | ||
252 | struct sdhci_pltfm_data sdhci_esdhc_imx_pdata = { | 324 | struct sdhci_pltfm_data sdhci_esdhc_imx_pdata = { |
diff --git a/drivers/mmc/host/sdhci-esdhc.h b/drivers/mmc/host/sdhci-esdhc.h index c55aae828aac..c3b08f111942 100644 --- a/drivers/mmc/host/sdhci-esdhc.h +++ b/drivers/mmc/host/sdhci-esdhc.h | |||
@@ -23,8 +23,7 @@ | |||
23 | SDHCI_QUIRK_NONSTANDARD_CLOCK | \ | 23 | SDHCI_QUIRK_NONSTANDARD_CLOCK | \ |
24 | SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK | \ | 24 | SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK | \ |
25 | SDHCI_QUIRK_PIO_NEEDS_DELAY | \ | 25 | SDHCI_QUIRK_PIO_NEEDS_DELAY | \ |
26 | SDHCI_QUIRK_RESTORE_IRQS_AFTER_RESET | \ | 26 | SDHCI_QUIRK_RESTORE_IRQS_AFTER_RESET) |
27 | SDHCI_QUIRK_NO_CARD_NO_RESET) | ||
28 | 27 | ||
29 | #define ESDHC_SYSTEM_CONTROL 0x2c | 28 | #define ESDHC_SYSTEM_CONTROL 0x2c |
30 | #define ESDHC_CLOCK_MASK 0x0000fff0 | 29 | #define ESDHC_CLOCK_MASK 0x0000fff0 |
diff --git a/drivers/mmc/host/sdhci-of-esdhc.c b/drivers/mmc/host/sdhci-of-esdhc.c index 08161f690ae8..ba40d6d035c7 100644 --- a/drivers/mmc/host/sdhci-of-esdhc.c +++ b/drivers/mmc/host/sdhci-of-esdhc.c | |||
@@ -74,7 +74,8 @@ static unsigned int esdhc_of_get_min_clock(struct sdhci_host *host) | |||
74 | 74 | ||
75 | struct sdhci_of_data sdhci_esdhc = { | 75 | struct sdhci_of_data sdhci_esdhc = { |
76 | /* card detection could be handled via GPIO */ | 76 | /* card detection could be handled via GPIO */ |
77 | .quirks = ESDHC_DEFAULT_QUIRKS | SDHCI_QUIRK_BROKEN_CARD_DETECTION, | 77 | .quirks = ESDHC_DEFAULT_QUIRKS | SDHCI_QUIRK_BROKEN_CARD_DETECTION |
78 | | SDHCI_QUIRK_NO_CARD_NO_RESET, | ||
78 | .ops = { | 79 | .ops = { |
79 | .read_l = sdhci_be32bs_readl, | 80 | .read_l = sdhci_be32bs_readl, |
80 | .read_w = esdhc_readw, | 81 | .read_w = esdhc_readw, |
diff --git a/drivers/mmc/host/sdhci-pci.c b/drivers/mmc/host/sdhci-pci.c index 2f8d46854acd..a136be706347 100644 --- a/drivers/mmc/host/sdhci-pci.c +++ b/drivers/mmc/host/sdhci-pci.c | |||
@@ -1016,16 +1016,14 @@ static int __devinit sdhci_pci_probe(struct pci_dev *pdev, | |||
1016 | struct sdhci_pci_chip *chip; | 1016 | struct sdhci_pci_chip *chip; |
1017 | struct sdhci_pci_slot *slot; | 1017 | struct sdhci_pci_slot *slot; |
1018 | 1018 | ||
1019 | u8 slots, rev, first_bar; | 1019 | u8 slots, first_bar; |
1020 | int ret, i; | 1020 | int ret, i; |
1021 | 1021 | ||
1022 | BUG_ON(pdev == NULL); | 1022 | BUG_ON(pdev == NULL); |
1023 | BUG_ON(ent == NULL); | 1023 | BUG_ON(ent == NULL); |
1024 | 1024 | ||
1025 | pci_read_config_byte(pdev, PCI_CLASS_REVISION, &rev); | ||
1026 | |||
1027 | dev_info(&pdev->dev, "SDHCI controller found [%04x:%04x] (rev %x)\n", | 1025 | dev_info(&pdev->dev, "SDHCI controller found [%04x:%04x] (rev %x)\n", |
1028 | (int)pdev->vendor, (int)pdev->device, (int)rev); | 1026 | (int)pdev->vendor, (int)pdev->device, (int)pdev->revision); |
1029 | 1027 | ||
1030 | ret = pci_read_config_byte(pdev, PCI_SLOT_INFO, &slots); | 1028 | ret = pci_read_config_byte(pdev, PCI_SLOT_INFO, &slots); |
1031 | if (ret) | 1029 | if (ret) |
diff --git a/drivers/mmc/host/sdhci-pltfm.h b/drivers/mmc/host/sdhci-pltfm.h index ea2e44d9be5e..2b37016ad0ac 100644 --- a/drivers/mmc/host/sdhci-pltfm.h +++ b/drivers/mmc/host/sdhci-pltfm.h | |||
@@ -17,7 +17,7 @@ | |||
17 | 17 | ||
18 | struct sdhci_pltfm_host { | 18 | struct sdhci_pltfm_host { |
19 | struct clk *clk; | 19 | struct clk *clk; |
20 | u32 scratchpad; /* to handle quirks across io-accessor calls */ | 20 | void *priv; /* to handle quirks across io-accessor calls */ |
21 | }; | 21 | }; |
22 | 22 | ||
23 | extern struct sdhci_pltfm_data sdhci_cns3xxx_pdata; | 23 | extern struct sdhci_pltfm_data sdhci_cns3xxx_pdata; |
diff --git a/drivers/mmc/host/sdhci-spear.c b/drivers/mmc/host/sdhci-spear.c index d70c54c7b70a..60a4c97d3d18 100644 --- a/drivers/mmc/host/sdhci-spear.c +++ b/drivers/mmc/host/sdhci-spear.c | |||
@@ -50,7 +50,7 @@ static irqreturn_t sdhci_gpio_irq(int irq, void *dev_id) | |||
50 | /* val == 1 -> card removed, val == 0 -> card inserted */ | 50 | /* val == 1 -> card removed, val == 0 -> card inserted */ |
51 | /* if card removed - set irq for low level, else vice versa */ | 51 | /* if card removed - set irq for low level, else vice versa */ |
52 | gpio_irq_type = val ? IRQF_TRIGGER_LOW : IRQF_TRIGGER_HIGH; | 52 | gpio_irq_type = val ? IRQF_TRIGGER_LOW : IRQF_TRIGGER_HIGH; |
53 | set_irq_type(irq, gpio_irq_type); | 53 | irq_set_irq_type(irq, gpio_irq_type); |
54 | 54 | ||
55 | if (sdhci->data->card_power_gpio >= 0) { | 55 | if (sdhci->data->card_power_gpio >= 0) { |
56 | if (!sdhci->data->power_always_enb) { | 56 | if (!sdhci->data->power_always_enb) { |
diff --git a/drivers/mmc/host/sdhci.h b/drivers/mmc/host/sdhci.h index 6e0969e40650..25e8bde600d1 100644 --- a/drivers/mmc/host/sdhci.h +++ b/drivers/mmc/host/sdhci.h | |||
@@ -45,6 +45,7 @@ | |||
45 | #define SDHCI_CMD_CRC 0x08 | 45 | #define SDHCI_CMD_CRC 0x08 |
46 | #define SDHCI_CMD_INDEX 0x10 | 46 | #define SDHCI_CMD_INDEX 0x10 |
47 | #define SDHCI_CMD_DATA 0x20 | 47 | #define SDHCI_CMD_DATA 0x20 |
48 | #define SDHCI_CMD_ABORTCMD 0xC0 | ||
48 | 49 | ||
49 | #define SDHCI_CMD_RESP_NONE 0x00 | 50 | #define SDHCI_CMD_RESP_NONE 0x00 |
50 | #define SDHCI_CMD_RESP_LONG 0x01 | 51 | #define SDHCI_CMD_RESP_LONG 0x01 |
diff --git a/drivers/mfd/sh_mobile_sdhi.c b/drivers/mmc/host/sh_mobile_sdhi.c index 53a63024bf11..cc701236d16f 100644 --- a/drivers/mfd/sh_mobile_sdhi.c +++ b/drivers/mmc/host/sh_mobile_sdhi.c | |||
@@ -23,51 +23,30 @@ | |||
23 | #include <linux/slab.h> | 23 | #include <linux/slab.h> |
24 | #include <linux/platform_device.h> | 24 | #include <linux/platform_device.h> |
25 | #include <linux/mmc/host.h> | 25 | #include <linux/mmc/host.h> |
26 | #include <linux/mfd/core.h> | 26 | #include <linux/mmc/sh_mobile_sdhi.h> |
27 | #include <linux/mfd/tmio.h> | 27 | #include <linux/mfd/tmio.h> |
28 | #include <linux/mfd/sh_mobile_sdhi.h> | ||
29 | #include <linux/sh_dma.h> | 28 | #include <linux/sh_dma.h> |
30 | 29 | ||
30 | #include "tmio_mmc.h" | ||
31 | |||
31 | struct sh_mobile_sdhi { | 32 | struct sh_mobile_sdhi { |
32 | struct clk *clk; | 33 | struct clk *clk; |
33 | struct tmio_mmc_data mmc_data; | 34 | struct tmio_mmc_data mmc_data; |
34 | struct mfd_cell cell_mmc; | ||
35 | struct sh_dmae_slave param_tx; | 35 | struct sh_dmae_slave param_tx; |
36 | struct sh_dmae_slave param_rx; | 36 | struct sh_dmae_slave param_rx; |
37 | struct tmio_mmc_dma dma_priv; | 37 | struct tmio_mmc_dma dma_priv; |
38 | }; | 38 | }; |
39 | 39 | ||
40 | static struct resource sh_mobile_sdhi_resources[] = { | 40 | static void sh_mobile_sdhi_set_pwr(struct platform_device *pdev, int state) |
41 | { | ||
42 | .start = 0x000, | ||
43 | .end = 0x1ff, | ||
44 | .flags = IORESOURCE_MEM, | ||
45 | }, | ||
46 | { | ||
47 | .start = 0, | ||
48 | .end = 0, | ||
49 | .flags = IORESOURCE_IRQ, | ||
50 | }, | ||
51 | }; | ||
52 | |||
53 | static struct mfd_cell sh_mobile_sdhi_cell = { | ||
54 | .name = "tmio-mmc", | ||
55 | .num_resources = ARRAY_SIZE(sh_mobile_sdhi_resources), | ||
56 | .resources = sh_mobile_sdhi_resources, | ||
57 | }; | ||
58 | |||
59 | static void sh_mobile_sdhi_set_pwr(struct platform_device *tmio, int state) | ||
60 | { | 41 | { |
61 | struct platform_device *pdev = to_platform_device(tmio->dev.parent); | ||
62 | struct sh_mobile_sdhi_info *p = pdev->dev.platform_data; | 42 | struct sh_mobile_sdhi_info *p = pdev->dev.platform_data; |
63 | 43 | ||
64 | if (p && p->set_pwr) | 44 | if (p && p->set_pwr) |
65 | p->set_pwr(pdev, state); | 45 | p->set_pwr(pdev, state); |
66 | } | 46 | } |
67 | 47 | ||
68 | static int sh_mobile_sdhi_get_cd(struct platform_device *tmio) | 48 | static int sh_mobile_sdhi_get_cd(struct platform_device *pdev) |
69 | { | 49 | { |
70 | struct platform_device *pdev = to_platform_device(tmio->dev.parent); | ||
71 | struct sh_mobile_sdhi_info *p = pdev->dev.platform_data; | 50 | struct sh_mobile_sdhi_info *p = pdev->dev.platform_data; |
72 | 51 | ||
73 | if (p && p->get_cd) | 52 | if (p && p->get_cd) |
@@ -81,20 +60,9 @@ static int __devinit sh_mobile_sdhi_probe(struct platform_device *pdev) | |||
81 | struct sh_mobile_sdhi *priv; | 60 | struct sh_mobile_sdhi *priv; |
82 | struct tmio_mmc_data *mmc_data; | 61 | struct tmio_mmc_data *mmc_data; |
83 | struct sh_mobile_sdhi_info *p = pdev->dev.platform_data; | 62 | struct sh_mobile_sdhi_info *p = pdev->dev.platform_data; |
84 | struct resource *mem; | 63 | struct tmio_mmc_host *host; |
85 | char clk_name[8]; | 64 | char clk_name[8]; |
86 | int ret, irq; | 65 | int ret; |
87 | |||
88 | mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
89 | if (!mem) | ||
90 | dev_err(&pdev->dev, "missing MEM resource\n"); | ||
91 | |||
92 | irq = platform_get_irq(pdev, 0); | ||
93 | if (irq < 0) | ||
94 | dev_err(&pdev->dev, "missing IRQ resource\n"); | ||
95 | |||
96 | if (!mem || (irq < 0)) | ||
97 | return -EINVAL; | ||
98 | 66 | ||
99 | priv = kzalloc(sizeof(struct sh_mobile_sdhi), GFP_KERNEL); | 67 | priv = kzalloc(sizeof(struct sh_mobile_sdhi), GFP_KERNEL); |
100 | if (priv == NULL) { | 68 | if (priv == NULL) { |
@@ -109,8 +77,7 @@ static int __devinit sh_mobile_sdhi_probe(struct platform_device *pdev) | |||
109 | if (IS_ERR(priv->clk)) { | 77 | if (IS_ERR(priv->clk)) { |
110 | dev_err(&pdev->dev, "cannot get clock \"%s\"\n", clk_name); | 78 | dev_err(&pdev->dev, "cannot get clock \"%s\"\n", clk_name); |
111 | ret = PTR_ERR(priv->clk); | 79 | ret = PTR_ERR(priv->clk); |
112 | kfree(priv); | 80 | goto eclkget; |
113 | return ret; | ||
114 | } | 81 | } |
115 | 82 | ||
116 | clk_enable(priv->clk); | 83 | clk_enable(priv->clk); |
@@ -123,6 +90,15 @@ static int __devinit sh_mobile_sdhi_probe(struct platform_device *pdev) | |||
123 | mmc_data->flags = p->tmio_flags; | 90 | mmc_data->flags = p->tmio_flags; |
124 | mmc_data->ocr_mask = p->tmio_ocr_mask; | 91 | mmc_data->ocr_mask = p->tmio_ocr_mask; |
125 | mmc_data->capabilities |= p->tmio_caps; | 92 | mmc_data->capabilities |= p->tmio_caps; |
93 | |||
94 | if (p->dma_slave_tx >= 0 && p->dma_slave_rx >= 0) { | ||
95 | priv->param_tx.slave_id = p->dma_slave_tx; | ||
96 | priv->param_rx.slave_id = p->dma_slave_rx; | ||
97 | priv->dma_priv.chan_priv_tx = &priv->param_tx; | ||
98 | priv->dma_priv.chan_priv_rx = &priv->param_rx; | ||
99 | priv->dma_priv.alignment_shift = 1; /* 2-byte alignment */ | ||
100 | mmc_data->dma = &priv->dma_priv; | ||
101 | } | ||
126 | } | 102 | } |
127 | 103 | ||
128 | /* | 104 | /* |
@@ -136,36 +112,30 @@ static int __devinit sh_mobile_sdhi_probe(struct platform_device *pdev) | |||
136 | */ | 112 | */ |
137 | mmc_data->flags |= TMIO_MMC_SDIO_IRQ; | 113 | mmc_data->flags |= TMIO_MMC_SDIO_IRQ; |
138 | 114 | ||
139 | if (p && p->dma_slave_tx >= 0 && p->dma_slave_rx >= 0) { | 115 | ret = tmio_mmc_host_probe(&host, pdev, mmc_data); |
140 | priv->param_tx.slave_id = p->dma_slave_tx; | 116 | if (ret < 0) |
141 | priv->param_rx.slave_id = p->dma_slave_rx; | 117 | goto eprobe; |
142 | priv->dma_priv.chan_priv_tx = &priv->param_tx; | ||
143 | priv->dma_priv.chan_priv_rx = &priv->param_rx; | ||
144 | priv->dma_priv.alignment_shift = 1; /* 2-byte alignment */ | ||
145 | mmc_data->dma = &priv->dma_priv; | ||
146 | } | ||
147 | 118 | ||
148 | memcpy(&priv->cell_mmc, &sh_mobile_sdhi_cell, sizeof(priv->cell_mmc)); | 119 | pr_info("%s at 0x%08lx irq %d\n", mmc_hostname(host->mmc), |
149 | priv->cell_mmc.mfd_data = mmc_data; | 120 | (unsigned long)host->ctl, host->irq); |
150 | 121 | ||
151 | platform_set_drvdata(pdev, priv); | 122 | return ret; |
152 | |||
153 | ret = mfd_add_devices(&pdev->dev, pdev->id, | ||
154 | &priv->cell_mmc, 1, mem, irq); | ||
155 | if (ret) { | ||
156 | clk_disable(priv->clk); | ||
157 | clk_put(priv->clk); | ||
158 | kfree(priv); | ||
159 | } | ||
160 | 123 | ||
124 | eprobe: | ||
125 | clk_disable(priv->clk); | ||
126 | clk_put(priv->clk); | ||
127 | eclkget: | ||
128 | kfree(priv); | ||
161 | return ret; | 129 | return ret; |
162 | } | 130 | } |
163 | 131 | ||
164 | static int sh_mobile_sdhi_remove(struct platform_device *pdev) | 132 | static int sh_mobile_sdhi_remove(struct platform_device *pdev) |
165 | { | 133 | { |
166 | struct sh_mobile_sdhi *priv = platform_get_drvdata(pdev); | 134 | struct mmc_host *mmc = platform_get_drvdata(pdev); |
135 | struct tmio_mmc_host *host = mmc_priv(mmc); | ||
136 | struct sh_mobile_sdhi *priv = container_of(host->pdata, struct sh_mobile_sdhi, mmc_data); | ||
167 | 137 | ||
168 | mfd_remove_devices(&pdev->dev); | 138 | tmio_mmc_host_remove(host); |
169 | clk_disable(priv->clk); | 139 | clk_disable(priv->clk); |
170 | clk_put(priv->clk); | 140 | clk_put(priv->clk); |
171 | kfree(priv); | 141 | kfree(priv); |
@@ -198,3 +168,4 @@ module_exit(sh_mobile_sdhi_exit); | |||
198 | MODULE_DESCRIPTION("SuperH Mobile SDHI driver"); | 168 | MODULE_DESCRIPTION("SuperH Mobile SDHI driver"); |
199 | MODULE_AUTHOR("Magnus Damm"); | 169 | MODULE_AUTHOR("Magnus Damm"); |
200 | MODULE_LICENSE("GPL v2"); | 170 | MODULE_LICENSE("GPL v2"); |
171 | MODULE_ALIAS("platform:sh_mobile_sdhi"); | ||
diff --git a/drivers/mmc/host/tmio_mmc.c b/drivers/mmc/host/tmio_mmc.c index ab1adeabdd22..79c568461d59 100644 --- a/drivers/mmc/host/tmio_mmc.c +++ b/drivers/mmc/host/tmio_mmc.c | |||
@@ -1,8 +1,8 @@ | |||
1 | /* | 1 | /* |
2 | * linux/drivers/mmc/tmio_mmc.c | 2 | * linux/drivers/mmc/host/tmio_mmc.c |
3 | * | 3 | * |
4 | * Copyright (C) 2004 Ian Molton | 4 | * Copyright (C) 2007 Ian Molton |
5 | * Copyright (C) 2007 Ian Molton | 5 | * Copyright (C) 2004 Ian Molton |
6 | * | 6 | * |
7 | * This program is free software; you can redistribute it and/or modify | 7 | * This program is free software; you can redistribute it and/or modify |
8 | * it under the terms of the GNU General Public License version 2 as | 8 | * it under the terms of the GNU General Public License version 2 as |
@@ -11,1182 +11,17 @@ | |||
11 | * Driver for the MMC / SD / SDIO cell found in: | 11 | * Driver for the MMC / SD / SDIO cell found in: |
12 | * | 12 | * |
13 | * TC6393XB TC6391XB TC6387XB T7L66XB ASIC3 | 13 | * TC6393XB TC6391XB TC6387XB T7L66XB ASIC3 |
14 | * | ||
15 | * This driver draws mainly on scattered spec sheets, Reverse engineering | ||
16 | * of the toshiba e800 SD driver and some parts of the 2.4 ASIC3 driver (4 bit | ||
17 | * support). (Further 4 bit support from a later datasheet). | ||
18 | * | ||
19 | * TODO: | ||
20 | * Investigate using a workqueue for PIO transfers | ||
21 | * Eliminate FIXMEs | ||
22 | * SDIO support | ||
23 | * Better Power management | ||
24 | * Handle MMC errors better | ||
25 | * double buffer support | ||
26 | * | ||
27 | */ | 14 | */ |
28 | 15 | ||
29 | #include <linux/delay.h> | ||
30 | #include <linux/device.h> | 16 | #include <linux/device.h> |
31 | #include <linux/dmaengine.h> | ||
32 | #include <linux/highmem.h> | ||
33 | #include <linux/interrupt.h> | ||
34 | #include <linux/io.h> | ||
35 | #include <linux/irq.h> | ||
36 | #include <linux/mfd/core.h> | 17 | #include <linux/mfd/core.h> |
37 | #include <linux/mfd/tmio.h> | 18 | #include <linux/mfd/tmio.h> |
38 | #include <linux/mmc/host.h> | 19 | #include <linux/mmc/host.h> |
39 | #include <linux/module.h> | 20 | #include <linux/module.h> |
40 | #include <linux/pagemap.h> | 21 | #include <linux/pagemap.h> |
41 | #include <linux/scatterlist.h> | 22 | #include <linux/scatterlist.h> |
42 | #include <linux/workqueue.h> | ||
43 | #include <linux/spinlock.h> | ||
44 | |||
45 | #define CTL_SD_CMD 0x00 | ||
46 | #define CTL_ARG_REG 0x04 | ||
47 | #define CTL_STOP_INTERNAL_ACTION 0x08 | ||
48 | #define CTL_XFER_BLK_COUNT 0xa | ||
49 | #define CTL_RESPONSE 0x0c | ||
50 | #define CTL_STATUS 0x1c | ||
51 | #define CTL_IRQ_MASK 0x20 | ||
52 | #define CTL_SD_CARD_CLK_CTL 0x24 | ||
53 | #define CTL_SD_XFER_LEN 0x26 | ||
54 | #define CTL_SD_MEM_CARD_OPT 0x28 | ||
55 | #define CTL_SD_ERROR_DETAIL_STATUS 0x2c | ||
56 | #define CTL_SD_DATA_PORT 0x30 | ||
57 | #define CTL_TRANSACTION_CTL 0x34 | ||
58 | #define CTL_SDIO_STATUS 0x36 | ||
59 | #define CTL_SDIO_IRQ_MASK 0x38 | ||
60 | #define CTL_RESET_SD 0xe0 | ||
61 | #define CTL_SDIO_REGS 0x100 | ||
62 | #define CTL_CLK_AND_WAIT_CTL 0x138 | ||
63 | #define CTL_RESET_SDIO 0x1e0 | ||
64 | |||
65 | /* Definitions for values the CTRL_STATUS register can take. */ | ||
66 | #define TMIO_STAT_CMDRESPEND 0x00000001 | ||
67 | #define TMIO_STAT_DATAEND 0x00000004 | ||
68 | #define TMIO_STAT_CARD_REMOVE 0x00000008 | ||
69 | #define TMIO_STAT_CARD_INSERT 0x00000010 | ||
70 | #define TMIO_STAT_SIGSTATE 0x00000020 | ||
71 | #define TMIO_STAT_WRPROTECT 0x00000080 | ||
72 | #define TMIO_STAT_CARD_REMOVE_A 0x00000100 | ||
73 | #define TMIO_STAT_CARD_INSERT_A 0x00000200 | ||
74 | #define TMIO_STAT_SIGSTATE_A 0x00000400 | ||
75 | #define TMIO_STAT_CMD_IDX_ERR 0x00010000 | ||
76 | #define TMIO_STAT_CRCFAIL 0x00020000 | ||
77 | #define TMIO_STAT_STOPBIT_ERR 0x00040000 | ||
78 | #define TMIO_STAT_DATATIMEOUT 0x00080000 | ||
79 | #define TMIO_STAT_RXOVERFLOW 0x00100000 | ||
80 | #define TMIO_STAT_TXUNDERRUN 0x00200000 | ||
81 | #define TMIO_STAT_CMDTIMEOUT 0x00400000 | ||
82 | #define TMIO_STAT_RXRDY 0x01000000 | ||
83 | #define TMIO_STAT_TXRQ 0x02000000 | ||
84 | #define TMIO_STAT_ILL_FUNC 0x20000000 | ||
85 | #define TMIO_STAT_CMD_BUSY 0x40000000 | ||
86 | #define TMIO_STAT_ILL_ACCESS 0x80000000 | ||
87 | |||
88 | /* Definitions for values the CTRL_SDIO_STATUS register can take. */ | ||
89 | #define TMIO_SDIO_STAT_IOIRQ 0x0001 | ||
90 | #define TMIO_SDIO_STAT_EXPUB52 0x4000 | ||
91 | #define TMIO_SDIO_STAT_EXWT 0x8000 | ||
92 | #define TMIO_SDIO_MASK_ALL 0xc007 | ||
93 | |||
94 | /* Define some IRQ masks */ | ||
95 | /* This is the mask used at reset by the chip */ | ||
96 | #define TMIO_MASK_ALL 0x837f031d | ||
97 | #define TMIO_MASK_READOP (TMIO_STAT_RXRDY | TMIO_STAT_DATAEND) | ||
98 | #define TMIO_MASK_WRITEOP (TMIO_STAT_TXRQ | TMIO_STAT_DATAEND) | ||
99 | #define TMIO_MASK_CMD (TMIO_STAT_CMDRESPEND | TMIO_STAT_CMDTIMEOUT | \ | ||
100 | TMIO_STAT_CARD_REMOVE | TMIO_STAT_CARD_INSERT) | ||
101 | #define TMIO_MASK_IRQ (TMIO_MASK_READOP | TMIO_MASK_WRITEOP | TMIO_MASK_CMD) | ||
102 | |||
103 | #define enable_mmc_irqs(host, i) \ | ||
104 | do { \ | ||
105 | u32 mask;\ | ||
106 | mask = sd_ctrl_read32((host), CTL_IRQ_MASK); \ | ||
107 | mask &= ~((i) & TMIO_MASK_IRQ); \ | ||
108 | sd_ctrl_write32((host), CTL_IRQ_MASK, mask); \ | ||
109 | } while (0) | ||
110 | |||
111 | #define disable_mmc_irqs(host, i) \ | ||
112 | do { \ | ||
113 | u32 mask;\ | ||
114 | mask = sd_ctrl_read32((host), CTL_IRQ_MASK); \ | ||
115 | mask |= ((i) & TMIO_MASK_IRQ); \ | ||
116 | sd_ctrl_write32((host), CTL_IRQ_MASK, mask); \ | ||
117 | } while (0) | ||
118 | |||
119 | #define ack_mmc_irqs(host, i) \ | ||
120 | do { \ | ||
121 | sd_ctrl_write32((host), CTL_STATUS, ~(i)); \ | ||
122 | } while (0) | ||
123 | |||
124 | /* This is arbitrary, just noone needed any higher alignment yet */ | ||
125 | #define MAX_ALIGN 4 | ||
126 | |||
127 | struct tmio_mmc_host { | ||
128 | void __iomem *ctl; | ||
129 | unsigned long bus_shift; | ||
130 | struct mmc_command *cmd; | ||
131 | struct mmc_request *mrq; | ||
132 | struct mmc_data *data; | ||
133 | struct mmc_host *mmc; | ||
134 | int irq; | ||
135 | unsigned int sdio_irq_enabled; | ||
136 | |||
137 | /* Callbacks for clock / power control */ | ||
138 | void (*set_pwr)(struct platform_device *host, int state); | ||
139 | void (*set_clk_div)(struct platform_device *host, int state); | ||
140 | |||
141 | /* pio related stuff */ | ||
142 | struct scatterlist *sg_ptr; | ||
143 | struct scatterlist *sg_orig; | ||
144 | unsigned int sg_len; | ||
145 | unsigned int sg_off; | ||
146 | |||
147 | struct platform_device *pdev; | ||
148 | |||
149 | /* DMA support */ | ||
150 | struct dma_chan *chan_rx; | ||
151 | struct dma_chan *chan_tx; | ||
152 | struct tasklet_struct dma_complete; | ||
153 | struct tasklet_struct dma_issue; | ||
154 | #ifdef CONFIG_TMIO_MMC_DMA | ||
155 | u8 bounce_buf[PAGE_CACHE_SIZE] __attribute__((aligned(MAX_ALIGN))); | ||
156 | struct scatterlist bounce_sg; | ||
157 | #endif | ||
158 | |||
159 | /* Track lost interrupts */ | ||
160 | struct delayed_work delayed_reset_work; | ||
161 | spinlock_t lock; | ||
162 | unsigned long last_req_ts; | ||
163 | }; | ||
164 | |||
165 | static void tmio_check_bounce_buffer(struct tmio_mmc_host *host); | ||
166 | |||
167 | static u16 sd_ctrl_read16(struct tmio_mmc_host *host, int addr) | ||
168 | { | ||
169 | return readw(host->ctl + (addr << host->bus_shift)); | ||
170 | } | ||
171 | |||
172 | static void sd_ctrl_read16_rep(struct tmio_mmc_host *host, int addr, | ||
173 | u16 *buf, int count) | ||
174 | { | ||
175 | readsw(host->ctl + (addr << host->bus_shift), buf, count); | ||
176 | } | ||
177 | |||
178 | static u32 sd_ctrl_read32(struct tmio_mmc_host *host, int addr) | ||
179 | { | ||
180 | return readw(host->ctl + (addr << host->bus_shift)) | | ||
181 | readw(host->ctl + ((addr + 2) << host->bus_shift)) << 16; | ||
182 | } | ||
183 | |||
184 | static void sd_ctrl_write16(struct tmio_mmc_host *host, int addr, u16 val) | ||
185 | { | ||
186 | writew(val, host->ctl + (addr << host->bus_shift)); | ||
187 | } | ||
188 | |||
189 | static void sd_ctrl_write16_rep(struct tmio_mmc_host *host, int addr, | ||
190 | u16 *buf, int count) | ||
191 | { | ||
192 | writesw(host->ctl + (addr << host->bus_shift), buf, count); | ||
193 | } | ||
194 | |||
195 | static void sd_ctrl_write32(struct tmio_mmc_host *host, int addr, u32 val) | ||
196 | { | ||
197 | writew(val, host->ctl + (addr << host->bus_shift)); | ||
198 | writew(val >> 16, host->ctl + ((addr + 2) << host->bus_shift)); | ||
199 | } | ||
200 | |||
201 | static void tmio_mmc_init_sg(struct tmio_mmc_host *host, struct mmc_data *data) | ||
202 | { | ||
203 | host->sg_len = data->sg_len; | ||
204 | host->sg_ptr = data->sg; | ||
205 | host->sg_orig = data->sg; | ||
206 | host->sg_off = 0; | ||
207 | } | ||
208 | |||
209 | static int tmio_mmc_next_sg(struct tmio_mmc_host *host) | ||
210 | { | ||
211 | host->sg_ptr = sg_next(host->sg_ptr); | ||
212 | host->sg_off = 0; | ||
213 | return --host->sg_len; | ||
214 | } | ||
215 | |||
216 | static char *tmio_mmc_kmap_atomic(struct scatterlist *sg, unsigned long *flags) | ||
217 | { | ||
218 | local_irq_save(*flags); | ||
219 | return kmap_atomic(sg_page(sg), KM_BIO_SRC_IRQ) + sg->offset; | ||
220 | } | ||
221 | |||
222 | static void tmio_mmc_kunmap_atomic(struct scatterlist *sg, unsigned long *flags, void *virt) | ||
223 | { | ||
224 | kunmap_atomic(virt - sg->offset, KM_BIO_SRC_IRQ); | ||
225 | local_irq_restore(*flags); | ||
226 | } | ||
227 | |||
228 | #ifdef CONFIG_MMC_DEBUG | ||
229 | |||
230 | #define STATUS_TO_TEXT(a, status, i) \ | ||
231 | do { \ | ||
232 | if (status & TMIO_STAT_##a) { \ | ||
233 | if (i++) \ | ||
234 | printk(" | "); \ | ||
235 | printk(#a); \ | ||
236 | } \ | ||
237 | } while (0) | ||
238 | |||
239 | void pr_debug_status(u32 status) | ||
240 | { | ||
241 | int i = 0; | ||
242 | printk(KERN_DEBUG "status: %08x = ", status); | ||
243 | STATUS_TO_TEXT(CARD_REMOVE, status, i); | ||
244 | STATUS_TO_TEXT(CARD_INSERT, status, i); | ||
245 | STATUS_TO_TEXT(SIGSTATE, status, i); | ||
246 | STATUS_TO_TEXT(WRPROTECT, status, i); | ||
247 | STATUS_TO_TEXT(CARD_REMOVE_A, status, i); | ||
248 | STATUS_TO_TEXT(CARD_INSERT_A, status, i); | ||
249 | STATUS_TO_TEXT(SIGSTATE_A, status, i); | ||
250 | STATUS_TO_TEXT(CMD_IDX_ERR, status, i); | ||
251 | STATUS_TO_TEXT(STOPBIT_ERR, status, i); | ||
252 | STATUS_TO_TEXT(ILL_FUNC, status, i); | ||
253 | STATUS_TO_TEXT(CMD_BUSY, status, i); | ||
254 | STATUS_TO_TEXT(CMDRESPEND, status, i); | ||
255 | STATUS_TO_TEXT(DATAEND, status, i); | ||
256 | STATUS_TO_TEXT(CRCFAIL, status, i); | ||
257 | STATUS_TO_TEXT(DATATIMEOUT, status, i); | ||
258 | STATUS_TO_TEXT(CMDTIMEOUT, status, i); | ||
259 | STATUS_TO_TEXT(RXOVERFLOW, status, i); | ||
260 | STATUS_TO_TEXT(TXUNDERRUN, status, i); | ||
261 | STATUS_TO_TEXT(RXRDY, status, i); | ||
262 | STATUS_TO_TEXT(TXRQ, status, i); | ||
263 | STATUS_TO_TEXT(ILL_ACCESS, status, i); | ||
264 | printk("\n"); | ||
265 | } | ||
266 | |||
267 | #else | ||
268 | #define pr_debug_status(s) do { } while (0) | ||
269 | #endif | ||
270 | |||
271 | static void tmio_mmc_enable_sdio_irq(struct mmc_host *mmc, int enable) | ||
272 | { | ||
273 | struct tmio_mmc_host *host = mmc_priv(mmc); | ||
274 | |||
275 | if (enable) { | ||
276 | host->sdio_irq_enabled = 1; | ||
277 | sd_ctrl_write16(host, CTL_TRANSACTION_CTL, 0x0001); | ||
278 | sd_ctrl_write16(host, CTL_SDIO_IRQ_MASK, | ||
279 | (TMIO_SDIO_MASK_ALL & ~TMIO_SDIO_STAT_IOIRQ)); | ||
280 | } else { | ||
281 | sd_ctrl_write16(host, CTL_SDIO_IRQ_MASK, TMIO_SDIO_MASK_ALL); | ||
282 | sd_ctrl_write16(host, CTL_TRANSACTION_CTL, 0x0000); | ||
283 | host->sdio_irq_enabled = 0; | ||
284 | } | ||
285 | } | ||
286 | |||
287 | static void tmio_mmc_set_clock(struct tmio_mmc_host *host, int new_clock) | ||
288 | { | ||
289 | u32 clk = 0, clock; | ||
290 | |||
291 | if (new_clock) { | ||
292 | for (clock = host->mmc->f_min, clk = 0x80000080; | ||
293 | new_clock >= (clock<<1); clk >>= 1) | ||
294 | clock <<= 1; | ||
295 | clk |= 0x100; | ||
296 | } | ||
297 | |||
298 | if (host->set_clk_div) | ||
299 | host->set_clk_div(host->pdev, (clk>>22) & 1); | ||
300 | |||
301 | sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, clk & 0x1ff); | ||
302 | } | ||
303 | |||
304 | static void tmio_mmc_clk_stop(struct tmio_mmc_host *host) | ||
305 | { | ||
306 | struct tmio_mmc_data *pdata = mfd_get_data(host->pdev); | ||
307 | |||
308 | /* | ||
309 | * Testing on sh-mobile showed that SDIO IRQs are unmasked when | ||
310 | * CTL_CLK_AND_WAIT_CTL gets written, so we have to disable the | ||
311 | * device IRQ here and restore the SDIO IRQ mask before | ||
312 | * re-enabling the device IRQ. | ||
313 | */ | ||
314 | if (pdata->flags & TMIO_MMC_SDIO_IRQ) | ||
315 | disable_irq(host->irq); | ||
316 | sd_ctrl_write16(host, CTL_CLK_AND_WAIT_CTL, 0x0000); | ||
317 | msleep(10); | ||
318 | if (pdata->flags & TMIO_MMC_SDIO_IRQ) { | ||
319 | tmio_mmc_enable_sdio_irq(host->mmc, host->sdio_irq_enabled); | ||
320 | enable_irq(host->irq); | ||
321 | } | ||
322 | sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, ~0x0100 & | ||
323 | sd_ctrl_read16(host, CTL_SD_CARD_CLK_CTL)); | ||
324 | msleep(10); | ||
325 | } | ||
326 | |||
327 | static void tmio_mmc_clk_start(struct tmio_mmc_host *host) | ||
328 | { | ||
329 | struct tmio_mmc_data *pdata = mfd_get_data(host->pdev); | ||
330 | |||
331 | sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, 0x0100 | | ||
332 | sd_ctrl_read16(host, CTL_SD_CARD_CLK_CTL)); | ||
333 | msleep(10); | ||
334 | /* see comment in tmio_mmc_clk_stop above */ | ||
335 | if (pdata->flags & TMIO_MMC_SDIO_IRQ) | ||
336 | disable_irq(host->irq); | ||
337 | sd_ctrl_write16(host, CTL_CLK_AND_WAIT_CTL, 0x0100); | ||
338 | msleep(10); | ||
339 | if (pdata->flags & TMIO_MMC_SDIO_IRQ) { | ||
340 | tmio_mmc_enable_sdio_irq(host->mmc, host->sdio_irq_enabled); | ||
341 | enable_irq(host->irq); | ||
342 | } | ||
343 | } | ||
344 | |||
345 | static void reset(struct tmio_mmc_host *host) | ||
346 | { | ||
347 | /* FIXME - should we set stop clock reg here */ | ||
348 | sd_ctrl_write16(host, CTL_RESET_SD, 0x0000); | ||
349 | sd_ctrl_write16(host, CTL_RESET_SDIO, 0x0000); | ||
350 | msleep(10); | ||
351 | sd_ctrl_write16(host, CTL_RESET_SD, 0x0001); | ||
352 | sd_ctrl_write16(host, CTL_RESET_SDIO, 0x0001); | ||
353 | msleep(10); | ||
354 | } | ||
355 | |||
356 | static void tmio_mmc_reset_work(struct work_struct *work) | ||
357 | { | ||
358 | struct tmio_mmc_host *host = container_of(work, struct tmio_mmc_host, | ||
359 | delayed_reset_work.work); | ||
360 | struct mmc_request *mrq; | ||
361 | unsigned long flags; | ||
362 | |||
363 | spin_lock_irqsave(&host->lock, flags); | ||
364 | mrq = host->mrq; | ||
365 | |||
366 | /* request already finished */ | ||
367 | if (!mrq | ||
368 | || time_is_after_jiffies(host->last_req_ts + | ||
369 | msecs_to_jiffies(2000))) { | ||
370 | spin_unlock_irqrestore(&host->lock, flags); | ||
371 | return; | ||
372 | } | ||
373 | |||
374 | dev_warn(&host->pdev->dev, | ||
375 | "timeout waiting for hardware interrupt (CMD%u)\n", | ||
376 | mrq->cmd->opcode); | ||
377 | |||
378 | if (host->data) | ||
379 | host->data->error = -ETIMEDOUT; | ||
380 | else if (host->cmd) | ||
381 | host->cmd->error = -ETIMEDOUT; | ||
382 | else | ||
383 | mrq->cmd->error = -ETIMEDOUT; | ||
384 | |||
385 | host->cmd = NULL; | ||
386 | host->data = NULL; | ||
387 | host->mrq = NULL; | ||
388 | |||
389 | spin_unlock_irqrestore(&host->lock, flags); | ||
390 | |||
391 | reset(host); | ||
392 | |||
393 | mmc_request_done(host->mmc, mrq); | ||
394 | } | ||
395 | |||
396 | static void | ||
397 | tmio_mmc_finish_request(struct tmio_mmc_host *host) | ||
398 | { | ||
399 | struct mmc_request *mrq = host->mrq; | ||
400 | |||
401 | if (!mrq) | ||
402 | return; | ||
403 | |||
404 | host->mrq = NULL; | ||
405 | host->cmd = NULL; | ||
406 | host->data = NULL; | ||
407 | |||
408 | cancel_delayed_work(&host->delayed_reset_work); | ||
409 | |||
410 | mmc_request_done(host->mmc, mrq); | ||
411 | } | ||
412 | |||
413 | /* These are the bitmasks the tmio chip requires to implement the MMC response | ||
414 | * types. Note that R1 and R6 are the same in this scheme. */ | ||
415 | #define APP_CMD 0x0040 | ||
416 | #define RESP_NONE 0x0300 | ||
417 | #define RESP_R1 0x0400 | ||
418 | #define RESP_R1B 0x0500 | ||
419 | #define RESP_R2 0x0600 | ||
420 | #define RESP_R3 0x0700 | ||
421 | #define DATA_PRESENT 0x0800 | ||
422 | #define TRANSFER_READ 0x1000 | ||
423 | #define TRANSFER_MULTI 0x2000 | ||
424 | #define SECURITY_CMD 0x4000 | ||
425 | |||
426 | static int | ||
427 | tmio_mmc_start_command(struct tmio_mmc_host *host, struct mmc_command *cmd) | ||
428 | { | ||
429 | struct mmc_data *data = host->data; | ||
430 | int c = cmd->opcode; | ||
431 | |||
432 | /* Command 12 is handled by hardware */ | ||
433 | if (cmd->opcode == 12 && !cmd->arg) { | ||
434 | sd_ctrl_write16(host, CTL_STOP_INTERNAL_ACTION, 0x001); | ||
435 | return 0; | ||
436 | } | ||
437 | |||
438 | switch (mmc_resp_type(cmd)) { | ||
439 | case MMC_RSP_NONE: c |= RESP_NONE; break; | ||
440 | case MMC_RSP_R1: c |= RESP_R1; break; | ||
441 | case MMC_RSP_R1B: c |= RESP_R1B; break; | ||
442 | case MMC_RSP_R2: c |= RESP_R2; break; | ||
443 | case MMC_RSP_R3: c |= RESP_R3; break; | ||
444 | default: | ||
445 | pr_debug("Unknown response type %d\n", mmc_resp_type(cmd)); | ||
446 | return -EINVAL; | ||
447 | } | ||
448 | |||
449 | host->cmd = cmd; | ||
450 | |||
451 | /* FIXME - this seems to be ok commented out but the spec suggest this bit | ||
452 | * should be set when issuing app commands. | ||
453 | * if(cmd->flags & MMC_FLAG_ACMD) | ||
454 | * c |= APP_CMD; | ||
455 | */ | ||
456 | if (data) { | ||
457 | c |= DATA_PRESENT; | ||
458 | if (data->blocks > 1) { | ||
459 | sd_ctrl_write16(host, CTL_STOP_INTERNAL_ACTION, 0x100); | ||
460 | c |= TRANSFER_MULTI; | ||
461 | } | ||
462 | if (data->flags & MMC_DATA_READ) | ||
463 | c |= TRANSFER_READ; | ||
464 | } | ||
465 | |||
466 | enable_mmc_irqs(host, TMIO_MASK_CMD); | ||
467 | |||
468 | /* Fire off the command */ | ||
469 | sd_ctrl_write32(host, CTL_ARG_REG, cmd->arg); | ||
470 | sd_ctrl_write16(host, CTL_SD_CMD, c); | ||
471 | |||
472 | return 0; | ||
473 | } | ||
474 | |||
475 | /* | ||
476 | * This chip always returns (at least?) as much data as you ask for. | ||
477 | * I'm unsure what happens if you ask for less than a block. This should be | ||
478 | * looked into to ensure that a funny length read doesnt hose the controller. | ||
479 | */ | ||
480 | static void tmio_mmc_pio_irq(struct tmio_mmc_host *host) | ||
481 | { | ||
482 | struct mmc_data *data = host->data; | ||
483 | void *sg_virt; | ||
484 | unsigned short *buf; | ||
485 | unsigned int count; | ||
486 | unsigned long flags; | ||
487 | |||
488 | if (!data) { | ||
489 | pr_debug("Spurious PIO IRQ\n"); | ||
490 | return; | ||
491 | } | ||
492 | |||
493 | sg_virt = tmio_mmc_kmap_atomic(host->sg_ptr, &flags); | ||
494 | buf = (unsigned short *)(sg_virt + host->sg_off); | ||
495 | |||
496 | count = host->sg_ptr->length - host->sg_off; | ||
497 | if (count > data->blksz) | ||
498 | count = data->blksz; | ||
499 | |||
500 | pr_debug("count: %08x offset: %08x flags %08x\n", | ||
501 | count, host->sg_off, data->flags); | ||
502 | |||
503 | /* Transfer the data */ | ||
504 | if (data->flags & MMC_DATA_READ) | ||
505 | sd_ctrl_read16_rep(host, CTL_SD_DATA_PORT, buf, count >> 1); | ||
506 | else | ||
507 | sd_ctrl_write16_rep(host, CTL_SD_DATA_PORT, buf, count >> 1); | ||
508 | |||
509 | host->sg_off += count; | ||
510 | |||
511 | tmio_mmc_kunmap_atomic(host->sg_ptr, &flags, sg_virt); | ||
512 | |||
513 | if (host->sg_off == host->sg_ptr->length) | ||
514 | tmio_mmc_next_sg(host); | ||
515 | |||
516 | return; | ||
517 | } | ||
518 | |||
519 | /* needs to be called with host->lock held */ | ||
520 | static void tmio_mmc_do_data_irq(struct tmio_mmc_host *host) | ||
521 | { | ||
522 | struct mmc_data *data = host->data; | ||
523 | struct mmc_command *stop; | ||
524 | |||
525 | host->data = NULL; | ||
526 | |||
527 | if (!data) { | ||
528 | dev_warn(&host->pdev->dev, "Spurious data end IRQ\n"); | ||
529 | return; | ||
530 | } | ||
531 | stop = data->stop; | ||
532 | |||
533 | /* FIXME - return correct transfer count on errors */ | ||
534 | if (!data->error) | ||
535 | data->bytes_xfered = data->blocks * data->blksz; | ||
536 | else | ||
537 | data->bytes_xfered = 0; | ||
538 | |||
539 | pr_debug("Completed data request\n"); | ||
540 | |||
541 | /* | ||
542 | * FIXME: other drivers allow an optional stop command of any given type | ||
543 | * which we dont do, as the chip can auto generate them. | ||
544 | * Perhaps we can be smarter about when to use auto CMD12 and | ||
545 | * only issue the auto request when we know this is the desired | ||
546 | * stop command, allowing fallback to the stop command the | ||
547 | * upper layers expect. For now, we do what works. | ||
548 | */ | ||
549 | |||
550 | if (data->flags & MMC_DATA_READ) { | ||
551 | if (!host->chan_rx) | ||
552 | disable_mmc_irqs(host, TMIO_MASK_READOP); | ||
553 | else | ||
554 | tmio_check_bounce_buffer(host); | ||
555 | dev_dbg(&host->pdev->dev, "Complete Rx request %p\n", | ||
556 | host->mrq); | ||
557 | } else { | ||
558 | if (!host->chan_tx) | ||
559 | disable_mmc_irqs(host, TMIO_MASK_WRITEOP); | ||
560 | dev_dbg(&host->pdev->dev, "Complete Tx request %p\n", | ||
561 | host->mrq); | ||
562 | } | ||
563 | |||
564 | if (stop) { | ||
565 | if (stop->opcode == 12 && !stop->arg) | ||
566 | sd_ctrl_write16(host, CTL_STOP_INTERNAL_ACTION, 0x000); | ||
567 | else | ||
568 | BUG(); | ||
569 | } | ||
570 | |||
571 | tmio_mmc_finish_request(host); | ||
572 | } | ||
573 | |||
574 | static void tmio_mmc_data_irq(struct tmio_mmc_host *host) | ||
575 | { | ||
576 | struct mmc_data *data; | ||
577 | spin_lock(&host->lock); | ||
578 | data = host->data; | ||
579 | |||
580 | if (!data) | ||
581 | goto out; | ||
582 | |||
583 | if (host->chan_tx && (data->flags & MMC_DATA_WRITE)) { | ||
584 | /* | ||
585 | * Has all data been written out yet? Testing on SuperH showed, | ||
586 | * that in most cases the first interrupt comes already with the | ||
587 | * BUSY status bit clear, but on some operations, like mount or | ||
588 | * in the beginning of a write / sync / umount, there is one | ||
589 | * DATAEND interrupt with the BUSY bit set, in this cases | ||
590 | * waiting for one more interrupt fixes the problem. | ||
591 | */ | ||
592 | if (!(sd_ctrl_read32(host, CTL_STATUS) & TMIO_STAT_CMD_BUSY)) { | ||
593 | disable_mmc_irqs(host, TMIO_STAT_DATAEND); | ||
594 | tasklet_schedule(&host->dma_complete); | ||
595 | } | ||
596 | } else if (host->chan_rx && (data->flags & MMC_DATA_READ)) { | ||
597 | disable_mmc_irqs(host, TMIO_STAT_DATAEND); | ||
598 | tasklet_schedule(&host->dma_complete); | ||
599 | } else { | ||
600 | tmio_mmc_do_data_irq(host); | ||
601 | } | ||
602 | out: | ||
603 | spin_unlock(&host->lock); | ||
604 | } | ||
605 | |||
606 | static void tmio_mmc_cmd_irq(struct tmio_mmc_host *host, | ||
607 | unsigned int stat) | ||
608 | { | ||
609 | struct mmc_command *cmd = host->cmd; | ||
610 | int i, addr; | ||
611 | |||
612 | spin_lock(&host->lock); | ||
613 | |||
614 | if (!host->cmd) { | ||
615 | pr_debug("Spurious CMD irq\n"); | ||
616 | goto out; | ||
617 | } | ||
618 | |||
619 | host->cmd = NULL; | ||
620 | |||
621 | /* This controller is sicker than the PXA one. Not only do we need to | ||
622 | * drop the top 8 bits of the first response word, we also need to | ||
623 | * modify the order of the response for short response command types. | ||
624 | */ | ||
625 | |||
626 | for (i = 3, addr = CTL_RESPONSE ; i >= 0 ; i--, addr += 4) | ||
627 | cmd->resp[i] = sd_ctrl_read32(host, addr); | ||
628 | |||
629 | if (cmd->flags & MMC_RSP_136) { | ||
630 | cmd->resp[0] = (cmd->resp[0] << 8) | (cmd->resp[1] >> 24); | ||
631 | cmd->resp[1] = (cmd->resp[1] << 8) | (cmd->resp[2] >> 24); | ||
632 | cmd->resp[2] = (cmd->resp[2] << 8) | (cmd->resp[3] >> 24); | ||
633 | cmd->resp[3] <<= 8; | ||
634 | } else if (cmd->flags & MMC_RSP_R3) { | ||
635 | cmd->resp[0] = cmd->resp[3]; | ||
636 | } | ||
637 | |||
638 | if (stat & TMIO_STAT_CMDTIMEOUT) | ||
639 | cmd->error = -ETIMEDOUT; | ||
640 | else if (stat & TMIO_STAT_CRCFAIL && cmd->flags & MMC_RSP_CRC) | ||
641 | cmd->error = -EILSEQ; | ||
642 | |||
643 | /* If there is data to handle we enable data IRQs here, and | ||
644 | * we will ultimatley finish the request in the data_end handler. | ||
645 | * If theres no data or we encountered an error, finish now. | ||
646 | */ | ||
647 | if (host->data && !cmd->error) { | ||
648 | if (host->data->flags & MMC_DATA_READ) { | ||
649 | if (!host->chan_rx) | ||
650 | enable_mmc_irqs(host, TMIO_MASK_READOP); | ||
651 | } else { | ||
652 | if (!host->chan_tx) | ||
653 | enable_mmc_irqs(host, TMIO_MASK_WRITEOP); | ||
654 | else | ||
655 | tasklet_schedule(&host->dma_issue); | ||
656 | } | ||
657 | } else { | ||
658 | tmio_mmc_finish_request(host); | ||
659 | } | ||
660 | |||
661 | out: | ||
662 | spin_unlock(&host->lock); | ||
663 | |||
664 | return; | ||
665 | } | ||
666 | |||
667 | static irqreturn_t tmio_mmc_irq(int irq, void *devid) | ||
668 | { | ||
669 | struct tmio_mmc_host *host = devid; | ||
670 | struct tmio_mmc_data *pdata = mfd_get_data(host->pdev); | ||
671 | unsigned int ireg, irq_mask, status; | ||
672 | unsigned int sdio_ireg, sdio_irq_mask, sdio_status; | ||
673 | |||
674 | pr_debug("MMC IRQ begin\n"); | ||
675 | |||
676 | status = sd_ctrl_read32(host, CTL_STATUS); | ||
677 | irq_mask = sd_ctrl_read32(host, CTL_IRQ_MASK); | ||
678 | ireg = status & TMIO_MASK_IRQ & ~irq_mask; | ||
679 | |||
680 | sdio_ireg = 0; | ||
681 | if (!ireg && pdata->flags & TMIO_MMC_SDIO_IRQ) { | ||
682 | sdio_status = sd_ctrl_read16(host, CTL_SDIO_STATUS); | ||
683 | sdio_irq_mask = sd_ctrl_read16(host, CTL_SDIO_IRQ_MASK); | ||
684 | sdio_ireg = sdio_status & TMIO_SDIO_MASK_ALL & ~sdio_irq_mask; | ||
685 | |||
686 | sd_ctrl_write16(host, CTL_SDIO_STATUS, sdio_status & ~TMIO_SDIO_MASK_ALL); | ||
687 | |||
688 | if (sdio_ireg && !host->sdio_irq_enabled) { | ||
689 | pr_warning("tmio_mmc: Spurious SDIO IRQ, disabling! 0x%04x 0x%04x 0x%04x\n", | ||
690 | sdio_status, sdio_irq_mask, sdio_ireg); | ||
691 | tmio_mmc_enable_sdio_irq(host->mmc, 0); | ||
692 | goto out; | ||
693 | } | ||
694 | 23 | ||
695 | if (host->mmc->caps & MMC_CAP_SDIO_IRQ && | 24 | #include "tmio_mmc.h" |
696 | sdio_ireg & TMIO_SDIO_STAT_IOIRQ) | ||
697 | mmc_signal_sdio_irq(host->mmc); | ||
698 | |||
699 | if (sdio_ireg) | ||
700 | goto out; | ||
701 | } | ||
702 | |||
703 | pr_debug_status(status); | ||
704 | pr_debug_status(ireg); | ||
705 | |||
706 | if (!ireg) { | ||
707 | disable_mmc_irqs(host, status & ~irq_mask); | ||
708 | |||
709 | pr_warning("tmio_mmc: Spurious irq, disabling! " | ||
710 | "0x%08x 0x%08x 0x%08x\n", status, irq_mask, ireg); | ||
711 | pr_debug_status(status); | ||
712 | |||
713 | goto out; | ||
714 | } | ||
715 | |||
716 | while (ireg) { | ||
717 | /* Card insert / remove attempts */ | ||
718 | if (ireg & (TMIO_STAT_CARD_INSERT | TMIO_STAT_CARD_REMOVE)) { | ||
719 | ack_mmc_irqs(host, TMIO_STAT_CARD_INSERT | | ||
720 | TMIO_STAT_CARD_REMOVE); | ||
721 | mmc_detect_change(host->mmc, msecs_to_jiffies(100)); | ||
722 | } | ||
723 | |||
724 | /* CRC and other errors */ | ||
725 | /* if (ireg & TMIO_STAT_ERR_IRQ) | ||
726 | * handled |= tmio_error_irq(host, irq, stat); | ||
727 | */ | ||
728 | |||
729 | /* Command completion */ | ||
730 | if (ireg & (TMIO_STAT_CMDRESPEND | TMIO_STAT_CMDTIMEOUT)) { | ||
731 | ack_mmc_irqs(host, | ||
732 | TMIO_STAT_CMDRESPEND | | ||
733 | TMIO_STAT_CMDTIMEOUT); | ||
734 | tmio_mmc_cmd_irq(host, status); | ||
735 | } | ||
736 | |||
737 | /* Data transfer */ | ||
738 | if (ireg & (TMIO_STAT_RXRDY | TMIO_STAT_TXRQ)) { | ||
739 | ack_mmc_irqs(host, TMIO_STAT_RXRDY | TMIO_STAT_TXRQ); | ||
740 | tmio_mmc_pio_irq(host); | ||
741 | } | ||
742 | |||
743 | /* Data transfer completion */ | ||
744 | if (ireg & TMIO_STAT_DATAEND) { | ||
745 | ack_mmc_irqs(host, TMIO_STAT_DATAEND); | ||
746 | tmio_mmc_data_irq(host); | ||
747 | } | ||
748 | |||
749 | /* Check status - keep going until we've handled it all */ | ||
750 | status = sd_ctrl_read32(host, CTL_STATUS); | ||
751 | irq_mask = sd_ctrl_read32(host, CTL_IRQ_MASK); | ||
752 | ireg = status & TMIO_MASK_IRQ & ~irq_mask; | ||
753 | |||
754 | pr_debug("Status at end of loop: %08x\n", status); | ||
755 | pr_debug_status(status); | ||
756 | } | ||
757 | pr_debug("MMC IRQ end\n"); | ||
758 | |||
759 | out: | ||
760 | return IRQ_HANDLED; | ||
761 | } | ||
762 | |||
763 | #ifdef CONFIG_TMIO_MMC_DMA | ||
764 | static void tmio_check_bounce_buffer(struct tmio_mmc_host *host) | ||
765 | { | ||
766 | if (host->sg_ptr == &host->bounce_sg) { | ||
767 | unsigned long flags; | ||
768 | void *sg_vaddr = tmio_mmc_kmap_atomic(host->sg_orig, &flags); | ||
769 | memcpy(sg_vaddr, host->bounce_buf, host->bounce_sg.length); | ||
770 | tmio_mmc_kunmap_atomic(host->sg_orig, &flags, sg_vaddr); | ||
771 | } | ||
772 | } | ||
773 | |||
774 | static void tmio_mmc_enable_dma(struct tmio_mmc_host *host, bool enable) | ||
775 | { | ||
776 | #if defined(CONFIG_SUPERH) || defined(CONFIG_ARCH_SHMOBILE) | ||
777 | /* Switch DMA mode on or off - SuperH specific? */ | ||
778 | sd_ctrl_write16(host, 0xd8, enable ? 2 : 0); | ||
779 | #endif | ||
780 | } | ||
781 | |||
782 | static void tmio_dma_complete(void *arg) | ||
783 | { | ||
784 | struct tmio_mmc_host *host = arg; | ||
785 | |||
786 | dev_dbg(&host->pdev->dev, "Command completed\n"); | ||
787 | |||
788 | if (!host->data) | ||
789 | dev_warn(&host->pdev->dev, "NULL data in DMA completion!\n"); | ||
790 | else | ||
791 | enable_mmc_irqs(host, TMIO_STAT_DATAEND); | ||
792 | } | ||
793 | |||
794 | static void tmio_mmc_start_dma_rx(struct tmio_mmc_host *host) | ||
795 | { | ||
796 | struct scatterlist *sg = host->sg_ptr, *sg_tmp; | ||
797 | struct dma_async_tx_descriptor *desc = NULL; | ||
798 | struct dma_chan *chan = host->chan_rx; | ||
799 | struct tmio_mmc_data *pdata = mfd_get_data(host->pdev); | ||
800 | dma_cookie_t cookie; | ||
801 | int ret, i; | ||
802 | bool aligned = true, multiple = true; | ||
803 | unsigned int align = (1 << pdata->dma->alignment_shift) - 1; | ||
804 | |||
805 | for_each_sg(sg, sg_tmp, host->sg_len, i) { | ||
806 | if (sg_tmp->offset & align) | ||
807 | aligned = false; | ||
808 | if (sg_tmp->length & align) { | ||
809 | multiple = false; | ||
810 | break; | ||
811 | } | ||
812 | } | ||
813 | |||
814 | if ((!aligned && (host->sg_len > 1 || sg->length > PAGE_CACHE_SIZE || | ||
815 | align >= MAX_ALIGN)) || !multiple) { | ||
816 | ret = -EINVAL; | ||
817 | goto pio; | ||
818 | } | ||
819 | |||
820 | /* The only sg element can be unaligned, use our bounce buffer then */ | ||
821 | if (!aligned) { | ||
822 | sg_init_one(&host->bounce_sg, host->bounce_buf, sg->length); | ||
823 | host->sg_ptr = &host->bounce_sg; | ||
824 | sg = host->sg_ptr; | ||
825 | } | ||
826 | |||
827 | ret = dma_map_sg(chan->device->dev, sg, host->sg_len, DMA_FROM_DEVICE); | ||
828 | if (ret > 0) | ||
829 | desc = chan->device->device_prep_slave_sg(chan, sg, ret, | ||
830 | DMA_FROM_DEVICE, DMA_PREP_INTERRUPT | DMA_CTRL_ACK); | ||
831 | |||
832 | if (desc) { | ||
833 | desc->callback = tmio_dma_complete; | ||
834 | desc->callback_param = host; | ||
835 | cookie = dmaengine_submit(desc); | ||
836 | dma_async_issue_pending(chan); | ||
837 | } | ||
838 | dev_dbg(&host->pdev->dev, "%s(): mapped %d -> %d, cookie %d, rq %p\n", | ||
839 | __func__, host->sg_len, ret, cookie, host->mrq); | ||
840 | |||
841 | pio: | ||
842 | if (!desc) { | ||
843 | /* DMA failed, fall back to PIO */ | ||
844 | if (ret >= 0) | ||
845 | ret = -EIO; | ||
846 | host->chan_rx = NULL; | ||
847 | dma_release_channel(chan); | ||
848 | /* Free the Tx channel too */ | ||
849 | chan = host->chan_tx; | ||
850 | if (chan) { | ||
851 | host->chan_tx = NULL; | ||
852 | dma_release_channel(chan); | ||
853 | } | ||
854 | dev_warn(&host->pdev->dev, | ||
855 | "DMA failed: %d, falling back to PIO\n", ret); | ||
856 | tmio_mmc_enable_dma(host, false); | ||
857 | } | ||
858 | |||
859 | dev_dbg(&host->pdev->dev, "%s(): desc %p, cookie %d, sg[%d]\n", __func__, | ||
860 | desc, cookie, host->sg_len); | ||
861 | } | ||
862 | |||
863 | static void tmio_mmc_start_dma_tx(struct tmio_mmc_host *host) | ||
864 | { | ||
865 | struct scatterlist *sg = host->sg_ptr, *sg_tmp; | ||
866 | struct dma_async_tx_descriptor *desc = NULL; | ||
867 | struct dma_chan *chan = host->chan_tx; | ||
868 | struct tmio_mmc_data *pdata = mfd_get_data(host->pdev); | ||
869 | dma_cookie_t cookie; | ||
870 | int ret, i; | ||
871 | bool aligned = true, multiple = true; | ||
872 | unsigned int align = (1 << pdata->dma->alignment_shift) - 1; | ||
873 | |||
874 | for_each_sg(sg, sg_tmp, host->sg_len, i) { | ||
875 | if (sg_tmp->offset & align) | ||
876 | aligned = false; | ||
877 | if (sg_tmp->length & align) { | ||
878 | multiple = false; | ||
879 | break; | ||
880 | } | ||
881 | } | ||
882 | |||
883 | if ((!aligned && (host->sg_len > 1 || sg->length > PAGE_CACHE_SIZE || | ||
884 | align >= MAX_ALIGN)) || !multiple) { | ||
885 | ret = -EINVAL; | ||
886 | goto pio; | ||
887 | } | ||
888 | |||
889 | /* The only sg element can be unaligned, use our bounce buffer then */ | ||
890 | if (!aligned) { | ||
891 | unsigned long flags; | ||
892 | void *sg_vaddr = tmio_mmc_kmap_atomic(sg, &flags); | ||
893 | sg_init_one(&host->bounce_sg, host->bounce_buf, sg->length); | ||
894 | memcpy(host->bounce_buf, sg_vaddr, host->bounce_sg.length); | ||
895 | tmio_mmc_kunmap_atomic(sg, &flags, sg_vaddr); | ||
896 | host->sg_ptr = &host->bounce_sg; | ||
897 | sg = host->sg_ptr; | ||
898 | } | ||
899 | |||
900 | ret = dma_map_sg(chan->device->dev, sg, host->sg_len, DMA_TO_DEVICE); | ||
901 | if (ret > 0) | ||
902 | desc = chan->device->device_prep_slave_sg(chan, sg, ret, | ||
903 | DMA_TO_DEVICE, DMA_PREP_INTERRUPT | DMA_CTRL_ACK); | ||
904 | |||
905 | if (desc) { | ||
906 | desc->callback = tmio_dma_complete; | ||
907 | desc->callback_param = host; | ||
908 | cookie = dmaengine_submit(desc); | ||
909 | } | ||
910 | dev_dbg(&host->pdev->dev, "%s(): mapped %d -> %d, cookie %d, rq %p\n", | ||
911 | __func__, host->sg_len, ret, cookie, host->mrq); | ||
912 | |||
913 | pio: | ||
914 | if (!desc) { | ||
915 | /* DMA failed, fall back to PIO */ | ||
916 | if (ret >= 0) | ||
917 | ret = -EIO; | ||
918 | host->chan_tx = NULL; | ||
919 | dma_release_channel(chan); | ||
920 | /* Free the Rx channel too */ | ||
921 | chan = host->chan_rx; | ||
922 | if (chan) { | ||
923 | host->chan_rx = NULL; | ||
924 | dma_release_channel(chan); | ||
925 | } | ||
926 | dev_warn(&host->pdev->dev, | ||
927 | "DMA failed: %d, falling back to PIO\n", ret); | ||
928 | tmio_mmc_enable_dma(host, false); | ||
929 | } | ||
930 | |||
931 | dev_dbg(&host->pdev->dev, "%s(): desc %p, cookie %d\n", __func__, | ||
932 | desc, cookie); | ||
933 | } | ||
934 | |||
935 | static void tmio_mmc_start_dma(struct tmio_mmc_host *host, | ||
936 | struct mmc_data *data) | ||
937 | { | ||
938 | if (data->flags & MMC_DATA_READ) { | ||
939 | if (host->chan_rx) | ||
940 | tmio_mmc_start_dma_rx(host); | ||
941 | } else { | ||
942 | if (host->chan_tx) | ||
943 | tmio_mmc_start_dma_tx(host); | ||
944 | } | ||
945 | } | ||
946 | |||
947 | static void tmio_issue_tasklet_fn(unsigned long priv) | ||
948 | { | ||
949 | struct tmio_mmc_host *host = (struct tmio_mmc_host *)priv; | ||
950 | struct dma_chan *chan = host->chan_tx; | ||
951 | |||
952 | dma_async_issue_pending(chan); | ||
953 | } | ||
954 | |||
955 | static void tmio_tasklet_fn(unsigned long arg) | ||
956 | { | ||
957 | struct tmio_mmc_host *host = (struct tmio_mmc_host *)arg; | ||
958 | unsigned long flags; | ||
959 | |||
960 | spin_lock_irqsave(&host->lock, flags); | ||
961 | |||
962 | if (!host->data) | ||
963 | goto out; | ||
964 | |||
965 | if (host->data->flags & MMC_DATA_READ) | ||
966 | dma_unmap_sg(host->chan_rx->device->dev, | ||
967 | host->sg_ptr, host->sg_len, | ||
968 | DMA_FROM_DEVICE); | ||
969 | else | ||
970 | dma_unmap_sg(host->chan_tx->device->dev, | ||
971 | host->sg_ptr, host->sg_len, | ||
972 | DMA_TO_DEVICE); | ||
973 | |||
974 | tmio_mmc_do_data_irq(host); | ||
975 | out: | ||
976 | spin_unlock_irqrestore(&host->lock, flags); | ||
977 | } | ||
978 | |||
979 | /* It might be necessary to make filter MFD specific */ | ||
980 | static bool tmio_mmc_filter(struct dma_chan *chan, void *arg) | ||
981 | { | ||
982 | dev_dbg(chan->device->dev, "%s: slave data %p\n", __func__, arg); | ||
983 | chan->private = arg; | ||
984 | return true; | ||
985 | } | ||
986 | |||
987 | static void tmio_mmc_request_dma(struct tmio_mmc_host *host, | ||
988 | struct tmio_mmc_data *pdata) | ||
989 | { | ||
990 | /* We can only either use DMA for both Tx and Rx or not use it at all */ | ||
991 | if (pdata->dma) { | ||
992 | dma_cap_mask_t mask; | ||
993 | |||
994 | dma_cap_zero(mask); | ||
995 | dma_cap_set(DMA_SLAVE, mask); | ||
996 | |||
997 | host->chan_tx = dma_request_channel(mask, tmio_mmc_filter, | ||
998 | pdata->dma->chan_priv_tx); | ||
999 | dev_dbg(&host->pdev->dev, "%s: TX: got channel %p\n", __func__, | ||
1000 | host->chan_tx); | ||
1001 | |||
1002 | if (!host->chan_tx) | ||
1003 | return; | ||
1004 | |||
1005 | host->chan_rx = dma_request_channel(mask, tmio_mmc_filter, | ||
1006 | pdata->dma->chan_priv_rx); | ||
1007 | dev_dbg(&host->pdev->dev, "%s: RX: got channel %p\n", __func__, | ||
1008 | host->chan_rx); | ||
1009 | |||
1010 | if (!host->chan_rx) { | ||
1011 | dma_release_channel(host->chan_tx); | ||
1012 | host->chan_tx = NULL; | ||
1013 | return; | ||
1014 | } | ||
1015 | |||
1016 | tasklet_init(&host->dma_complete, tmio_tasklet_fn, (unsigned long)host); | ||
1017 | tasklet_init(&host->dma_issue, tmio_issue_tasklet_fn, (unsigned long)host); | ||
1018 | |||
1019 | tmio_mmc_enable_dma(host, true); | ||
1020 | } | ||
1021 | } | ||
1022 | |||
1023 | static void tmio_mmc_release_dma(struct tmio_mmc_host *host) | ||
1024 | { | ||
1025 | if (host->chan_tx) { | ||
1026 | struct dma_chan *chan = host->chan_tx; | ||
1027 | host->chan_tx = NULL; | ||
1028 | dma_release_channel(chan); | ||
1029 | } | ||
1030 | if (host->chan_rx) { | ||
1031 | struct dma_chan *chan = host->chan_rx; | ||
1032 | host->chan_rx = NULL; | ||
1033 | dma_release_channel(chan); | ||
1034 | } | ||
1035 | } | ||
1036 | #else | ||
1037 | static void tmio_check_bounce_buffer(struct tmio_mmc_host *host) | ||
1038 | { | ||
1039 | } | ||
1040 | |||
1041 | static void tmio_mmc_start_dma(struct tmio_mmc_host *host, | ||
1042 | struct mmc_data *data) | ||
1043 | { | ||
1044 | } | ||
1045 | |||
1046 | static void tmio_mmc_request_dma(struct tmio_mmc_host *host, | ||
1047 | struct tmio_mmc_data *pdata) | ||
1048 | { | ||
1049 | host->chan_tx = NULL; | ||
1050 | host->chan_rx = NULL; | ||
1051 | } | ||
1052 | |||
1053 | static void tmio_mmc_release_dma(struct tmio_mmc_host *host) | ||
1054 | { | ||
1055 | } | ||
1056 | #endif | ||
1057 | |||
1058 | static int tmio_mmc_start_data(struct tmio_mmc_host *host, | ||
1059 | struct mmc_data *data) | ||
1060 | { | ||
1061 | struct tmio_mmc_data *pdata = mfd_get_data(host->pdev); | ||
1062 | |||
1063 | pr_debug("setup data transfer: blocksize %08x nr_blocks %d\n", | ||
1064 | data->blksz, data->blocks); | ||
1065 | |||
1066 | /* Some hardware cannot perform 2 byte requests in 4 bit mode */ | ||
1067 | if (host->mmc->ios.bus_width == MMC_BUS_WIDTH_4) { | ||
1068 | int blksz_2bytes = pdata->flags & TMIO_MMC_BLKSZ_2BYTES; | ||
1069 | |||
1070 | if (data->blksz < 2 || (data->blksz < 4 && !blksz_2bytes)) { | ||
1071 | pr_err("%s: %d byte block unsupported in 4 bit mode\n", | ||
1072 | mmc_hostname(host->mmc), data->blksz); | ||
1073 | return -EINVAL; | ||
1074 | } | ||
1075 | } | ||
1076 | |||
1077 | tmio_mmc_init_sg(host, data); | ||
1078 | host->data = data; | ||
1079 | |||
1080 | /* Set transfer length / blocksize */ | ||
1081 | sd_ctrl_write16(host, CTL_SD_XFER_LEN, data->blksz); | ||
1082 | sd_ctrl_write16(host, CTL_XFER_BLK_COUNT, data->blocks); | ||
1083 | |||
1084 | tmio_mmc_start_dma(host, data); | ||
1085 | |||
1086 | return 0; | ||
1087 | } | ||
1088 | |||
1089 | /* Process requests from the MMC layer */ | ||
1090 | static void tmio_mmc_request(struct mmc_host *mmc, struct mmc_request *mrq) | ||
1091 | { | ||
1092 | struct tmio_mmc_host *host = mmc_priv(mmc); | ||
1093 | int ret; | ||
1094 | |||
1095 | if (host->mrq) | ||
1096 | pr_debug("request not null\n"); | ||
1097 | |||
1098 | host->last_req_ts = jiffies; | ||
1099 | wmb(); | ||
1100 | host->mrq = mrq; | ||
1101 | |||
1102 | if (mrq->data) { | ||
1103 | ret = tmio_mmc_start_data(host, mrq->data); | ||
1104 | if (ret) | ||
1105 | goto fail; | ||
1106 | } | ||
1107 | |||
1108 | ret = tmio_mmc_start_command(host, mrq->cmd); | ||
1109 | if (!ret) { | ||
1110 | schedule_delayed_work(&host->delayed_reset_work, | ||
1111 | msecs_to_jiffies(2000)); | ||
1112 | return; | ||
1113 | } | ||
1114 | |||
1115 | fail: | ||
1116 | host->mrq = NULL; | ||
1117 | mrq->cmd->error = ret; | ||
1118 | mmc_request_done(mmc, mrq); | ||
1119 | } | ||
1120 | |||
1121 | /* Set MMC clock / power. | ||
1122 | * Note: This controller uses a simple divider scheme therefore it cannot | ||
1123 | * run a MMC card at full speed (20MHz). The max clock is 24MHz on SD, but as | ||
1124 | * MMC wont run that fast, it has to be clocked at 12MHz which is the next | ||
1125 | * slowest setting. | ||
1126 | */ | ||
1127 | static void tmio_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) | ||
1128 | { | ||
1129 | struct tmio_mmc_host *host = mmc_priv(mmc); | ||
1130 | |||
1131 | if (ios->clock) | ||
1132 | tmio_mmc_set_clock(host, ios->clock); | ||
1133 | |||
1134 | /* Power sequence - OFF -> ON -> UP */ | ||
1135 | switch (ios->power_mode) { | ||
1136 | case MMC_POWER_OFF: /* power down SD bus */ | ||
1137 | if (host->set_pwr) | ||
1138 | host->set_pwr(host->pdev, 0); | ||
1139 | tmio_mmc_clk_stop(host); | ||
1140 | break; | ||
1141 | case MMC_POWER_ON: /* power up SD bus */ | ||
1142 | if (host->set_pwr) | ||
1143 | host->set_pwr(host->pdev, 1); | ||
1144 | break; | ||
1145 | case MMC_POWER_UP: /* start bus clock */ | ||
1146 | tmio_mmc_clk_start(host); | ||
1147 | break; | ||
1148 | } | ||
1149 | |||
1150 | switch (ios->bus_width) { | ||
1151 | case MMC_BUS_WIDTH_1: | ||
1152 | sd_ctrl_write16(host, CTL_SD_MEM_CARD_OPT, 0x80e0); | ||
1153 | break; | ||
1154 | case MMC_BUS_WIDTH_4: | ||
1155 | sd_ctrl_write16(host, CTL_SD_MEM_CARD_OPT, 0x00e0); | ||
1156 | break; | ||
1157 | } | ||
1158 | |||
1159 | /* Let things settle. delay taken from winCE driver */ | ||
1160 | udelay(140); | ||
1161 | } | ||
1162 | |||
1163 | static int tmio_mmc_get_ro(struct mmc_host *mmc) | ||
1164 | { | ||
1165 | struct tmio_mmc_host *host = mmc_priv(mmc); | ||
1166 | struct tmio_mmc_data *pdata = mfd_get_data(host->pdev); | ||
1167 | |||
1168 | return ((pdata->flags & TMIO_MMC_WRPROTECT_DISABLE) || | ||
1169 | (sd_ctrl_read32(host, CTL_STATUS) & TMIO_STAT_WRPROTECT)) ? 0 : 1; | ||
1170 | } | ||
1171 | |||
1172 | static int tmio_mmc_get_cd(struct mmc_host *mmc) | ||
1173 | { | ||
1174 | struct tmio_mmc_host *host = mmc_priv(mmc); | ||
1175 | struct tmio_mmc_data *pdata = mfd_get_data(host->pdev); | ||
1176 | |||
1177 | if (!pdata->get_cd) | ||
1178 | return -ENOSYS; | ||
1179 | else | ||
1180 | return pdata->get_cd(host->pdev); | ||
1181 | } | ||
1182 | |||
1183 | static const struct mmc_host_ops tmio_mmc_ops = { | ||
1184 | .request = tmio_mmc_request, | ||
1185 | .set_ios = tmio_mmc_set_ios, | ||
1186 | .get_ro = tmio_mmc_get_ro, | ||
1187 | .get_cd = tmio_mmc_get_cd, | ||
1188 | .enable_sdio_irq = tmio_mmc_enable_sdio_irq, | ||
1189 | }; | ||
1190 | 25 | ||
1191 | #ifdef CONFIG_PM | 26 | #ifdef CONFIG_PM |
1192 | static int tmio_mmc_suspend(struct platform_device *dev, pm_message_t state) | 27 | static int tmio_mmc_suspend(struct platform_device *dev, pm_message_t state) |
@@ -1227,138 +62,54 @@ out: | |||
1227 | #define tmio_mmc_resume NULL | 62 | #define tmio_mmc_resume NULL |
1228 | #endif | 63 | #endif |
1229 | 64 | ||
1230 | static int __devinit tmio_mmc_probe(struct platform_device *dev) | 65 | static int __devinit tmio_mmc_probe(struct platform_device *pdev) |
1231 | { | 66 | { |
1232 | const struct mfd_cell *cell = mfd_get_cell(dev); | 67 | const struct mfd_cell *cell = mfd_get_cell(pdev); |
1233 | struct tmio_mmc_data *pdata; | 68 | struct tmio_mmc_data *pdata; |
1234 | struct resource *res_ctl; | ||
1235 | struct tmio_mmc_host *host; | 69 | struct tmio_mmc_host *host; |
1236 | struct mmc_host *mmc; | ||
1237 | int ret = -EINVAL; | 70 | int ret = -EINVAL; |
1238 | u32 irq_mask = TMIO_MASK_CMD; | ||
1239 | 71 | ||
1240 | if (dev->num_resources != 2) | 72 | if (pdev->num_resources != 2) |
1241 | goto out; | 73 | goto out; |
1242 | 74 | ||
1243 | res_ctl = platform_get_resource(dev, IORESOURCE_MEM, 0); | 75 | pdata = mfd_get_data(pdev); |
1244 | if (!res_ctl) | ||
1245 | goto out; | ||
1246 | |||
1247 | pdata = mfd_get_data(dev); | ||
1248 | if (!pdata || !pdata->hclk) | 76 | if (!pdata || !pdata->hclk) |
1249 | goto out; | 77 | goto out; |
1250 | 78 | ||
1251 | ret = -ENOMEM; | ||
1252 | |||
1253 | mmc = mmc_alloc_host(sizeof(struct tmio_mmc_host), &dev->dev); | ||
1254 | if (!mmc) | ||
1255 | goto out; | ||
1256 | |||
1257 | host = mmc_priv(mmc); | ||
1258 | host->mmc = mmc; | ||
1259 | host->pdev = dev; | ||
1260 | platform_set_drvdata(dev, mmc); | ||
1261 | |||
1262 | host->set_pwr = pdata->set_pwr; | ||
1263 | host->set_clk_div = pdata->set_clk_div; | ||
1264 | |||
1265 | /* SD control register space size is 0x200, 0x400 for bus_shift=1 */ | ||
1266 | host->bus_shift = resource_size(res_ctl) >> 10; | ||
1267 | |||
1268 | host->ctl = ioremap(res_ctl->start, resource_size(res_ctl)); | ||
1269 | if (!host->ctl) | ||
1270 | goto host_free; | ||
1271 | |||
1272 | mmc->ops = &tmio_mmc_ops; | ||
1273 | mmc->caps = MMC_CAP_4_BIT_DATA | pdata->capabilities; | ||
1274 | mmc->f_max = pdata->hclk; | ||
1275 | mmc->f_min = mmc->f_max / 512; | ||
1276 | mmc->max_segs = 32; | ||
1277 | mmc->max_blk_size = 512; | ||
1278 | mmc->max_blk_count = (PAGE_CACHE_SIZE / mmc->max_blk_size) * | ||
1279 | mmc->max_segs; | ||
1280 | mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count; | ||
1281 | mmc->max_seg_size = mmc->max_req_size; | ||
1282 | if (pdata->ocr_mask) | ||
1283 | mmc->ocr_avail = pdata->ocr_mask; | ||
1284 | else | ||
1285 | mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34; | ||
1286 | |||
1287 | /* Tell the MFD core we are ready to be enabled */ | 79 | /* Tell the MFD core we are ready to be enabled */ |
1288 | if (cell->enable) { | 80 | if (cell->enable) { |
1289 | ret = cell->enable(dev); | 81 | ret = cell->enable(pdev); |
1290 | if (ret) | 82 | if (ret) |
1291 | goto unmap_ctl; | 83 | goto out; |
1292 | } | 84 | } |
1293 | 85 | ||
1294 | tmio_mmc_clk_stop(host); | 86 | ret = tmio_mmc_host_probe(&host, pdev, pdata); |
1295 | reset(host); | ||
1296 | |||
1297 | ret = platform_get_irq(dev, 0); | ||
1298 | if (ret >= 0) | ||
1299 | host->irq = ret; | ||
1300 | else | ||
1301 | goto cell_disable; | ||
1302 | |||
1303 | disable_mmc_irqs(host, TMIO_MASK_ALL); | ||
1304 | if (pdata->flags & TMIO_MMC_SDIO_IRQ) | ||
1305 | tmio_mmc_enable_sdio_irq(mmc, 0); | ||
1306 | |||
1307 | ret = request_irq(host->irq, tmio_mmc_irq, IRQF_DISABLED | | ||
1308 | IRQF_TRIGGER_FALLING, dev_name(&dev->dev), host); | ||
1309 | if (ret) | 87 | if (ret) |
1310 | goto cell_disable; | 88 | goto cell_disable; |
1311 | 89 | ||
1312 | spin_lock_init(&host->lock); | ||
1313 | |||
1314 | /* Init delayed work for request timeouts */ | ||
1315 | INIT_DELAYED_WORK(&host->delayed_reset_work, tmio_mmc_reset_work); | ||
1316 | |||
1317 | /* See if we also get DMA */ | ||
1318 | tmio_mmc_request_dma(host, pdata); | ||
1319 | |||
1320 | mmc_add_host(mmc); | ||
1321 | |||
1322 | pr_info("%s at 0x%08lx irq %d\n", mmc_hostname(host->mmc), | 90 | pr_info("%s at 0x%08lx irq %d\n", mmc_hostname(host->mmc), |
1323 | (unsigned long)host->ctl, host->irq); | 91 | (unsigned long)host->ctl, host->irq); |
1324 | 92 | ||
1325 | /* Unmask the IRQs we want to know about */ | ||
1326 | if (!host->chan_rx) | ||
1327 | irq_mask |= TMIO_MASK_READOP; | ||
1328 | if (!host->chan_tx) | ||
1329 | irq_mask |= TMIO_MASK_WRITEOP; | ||
1330 | enable_mmc_irqs(host, irq_mask); | ||
1331 | |||
1332 | return 0; | 93 | return 0; |
1333 | 94 | ||
1334 | cell_disable: | 95 | cell_disable: |
1335 | if (cell->disable) | 96 | if (cell->disable) |
1336 | cell->disable(dev); | 97 | cell->disable(pdev); |
1337 | unmap_ctl: | ||
1338 | iounmap(host->ctl); | ||
1339 | host_free: | ||
1340 | mmc_free_host(mmc); | ||
1341 | out: | 98 | out: |
1342 | return ret; | 99 | return ret; |
1343 | } | 100 | } |
1344 | 101 | ||
1345 | static int __devexit tmio_mmc_remove(struct platform_device *dev) | 102 | static int __devexit tmio_mmc_remove(struct platform_device *pdev) |
1346 | { | 103 | { |
1347 | const struct mfd_cell *cell = mfd_get_cell(dev); | 104 | const struct mfd_cell *cell = mfd_get_cell(pdev); |
1348 | struct mmc_host *mmc = platform_get_drvdata(dev); | 105 | struct mmc_host *mmc = platform_get_drvdata(pdev); |
1349 | 106 | ||
1350 | platform_set_drvdata(dev, NULL); | 107 | platform_set_drvdata(pdev, NULL); |
1351 | 108 | ||
1352 | if (mmc) { | 109 | if (mmc) { |
1353 | struct tmio_mmc_host *host = mmc_priv(mmc); | 110 | tmio_mmc_host_remove(mmc_priv(mmc)); |
1354 | mmc_remove_host(mmc); | ||
1355 | cancel_delayed_work_sync(&host->delayed_reset_work); | ||
1356 | tmio_mmc_release_dma(host); | ||
1357 | free_irq(host->irq, host); | ||
1358 | if (cell->disable) | 111 | if (cell->disable) |
1359 | cell->disable(dev); | 112 | cell->disable(pdev); |
1360 | iounmap(host->ctl); | ||
1361 | mmc_free_host(mmc); | ||
1362 | } | 113 | } |
1363 | 114 | ||
1364 | return 0; | 115 | return 0; |
diff --git a/drivers/mmc/host/tmio_mmc.h b/drivers/mmc/host/tmio_mmc.h new file mode 100644 index 000000000000..099ed49a259b --- /dev/null +++ b/drivers/mmc/host/tmio_mmc.h | |||
@@ -0,0 +1,123 @@ | |||
1 | /* | ||
2 | * linux/drivers/mmc/host/tmio_mmc.h | ||
3 | * | ||
4 | * Copyright (C) 2007 Ian Molton | ||
5 | * Copyright (C) 2004 Ian Molton | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or modify | ||
8 | * it under the terms of the GNU General Public License version 2 as | ||
9 | * published by the Free Software Foundation. | ||
10 | * | ||
11 | * Driver for the MMC / SD / SDIO cell found in: | ||
12 | * | ||
13 | * TC6393XB TC6391XB TC6387XB T7L66XB ASIC3 | ||
14 | */ | ||
15 | |||
16 | #ifndef TMIO_MMC_H | ||
17 | #define TMIO_MMC_H | ||
18 | |||
19 | #include <linux/highmem.h> | ||
20 | #include <linux/mmc/tmio.h> | ||
21 | #include <linux/pagemap.h> | ||
22 | |||
23 | /* Definitions for values the CTRL_SDIO_STATUS register can take. */ | ||
24 | #define TMIO_SDIO_STAT_IOIRQ 0x0001 | ||
25 | #define TMIO_SDIO_STAT_EXPUB52 0x4000 | ||
26 | #define TMIO_SDIO_STAT_EXWT 0x8000 | ||
27 | #define TMIO_SDIO_MASK_ALL 0xc007 | ||
28 | |||
29 | /* Define some IRQ masks */ | ||
30 | /* This is the mask used at reset by the chip */ | ||
31 | #define TMIO_MASK_ALL 0x837f031d | ||
32 | #define TMIO_MASK_READOP (TMIO_STAT_RXRDY | TMIO_STAT_DATAEND) | ||
33 | #define TMIO_MASK_WRITEOP (TMIO_STAT_TXRQ | TMIO_STAT_DATAEND) | ||
34 | #define TMIO_MASK_CMD (TMIO_STAT_CMDRESPEND | TMIO_STAT_CMDTIMEOUT | \ | ||
35 | TMIO_STAT_CARD_REMOVE | TMIO_STAT_CARD_INSERT) | ||
36 | #define TMIO_MASK_IRQ (TMIO_MASK_READOP | TMIO_MASK_WRITEOP | TMIO_MASK_CMD) | ||
37 | |||
38 | struct tmio_mmc_data; | ||
39 | |||
40 | struct tmio_mmc_host { | ||
41 | void __iomem *ctl; | ||
42 | unsigned long bus_shift; | ||
43 | struct mmc_command *cmd; | ||
44 | struct mmc_request *mrq; | ||
45 | struct mmc_data *data; | ||
46 | struct mmc_host *mmc; | ||
47 | int irq; | ||
48 | unsigned int sdio_irq_enabled; | ||
49 | |||
50 | /* Callbacks for clock / power control */ | ||
51 | void (*set_pwr)(struct platform_device *host, int state); | ||
52 | void (*set_clk_div)(struct platform_device *host, int state); | ||
53 | |||
54 | /* pio related stuff */ | ||
55 | struct scatterlist *sg_ptr; | ||
56 | struct scatterlist *sg_orig; | ||
57 | unsigned int sg_len; | ||
58 | unsigned int sg_off; | ||
59 | |||
60 | struct platform_device *pdev; | ||
61 | struct tmio_mmc_data *pdata; | ||
62 | |||
63 | /* DMA support */ | ||
64 | bool force_pio; | ||
65 | struct dma_chan *chan_rx; | ||
66 | struct dma_chan *chan_tx; | ||
67 | struct tasklet_struct dma_complete; | ||
68 | struct tasklet_struct dma_issue; | ||
69 | struct scatterlist bounce_sg; | ||
70 | u8 *bounce_buf; | ||
71 | |||
72 | /* Track lost interrupts */ | ||
73 | struct delayed_work delayed_reset_work; | ||
74 | spinlock_t lock; | ||
75 | unsigned long last_req_ts; | ||
76 | }; | ||
77 | |||
78 | int tmio_mmc_host_probe(struct tmio_mmc_host **host, | ||
79 | struct platform_device *pdev, | ||
80 | struct tmio_mmc_data *pdata); | ||
81 | void tmio_mmc_host_remove(struct tmio_mmc_host *host); | ||
82 | void tmio_mmc_do_data_irq(struct tmio_mmc_host *host); | ||
83 | |||
84 | void tmio_mmc_enable_mmc_irqs(struct tmio_mmc_host *host, u32 i); | ||
85 | void tmio_mmc_disable_mmc_irqs(struct tmio_mmc_host *host, u32 i); | ||
86 | |||
87 | static inline char *tmio_mmc_kmap_atomic(struct scatterlist *sg, | ||
88 | unsigned long *flags) | ||
89 | { | ||
90 | local_irq_save(*flags); | ||
91 | return kmap_atomic(sg_page(sg), KM_BIO_SRC_IRQ) + sg->offset; | ||
92 | } | ||
93 | |||
94 | static inline void tmio_mmc_kunmap_atomic(struct scatterlist *sg, | ||
95 | unsigned long *flags, void *virt) | ||
96 | { | ||
97 | kunmap_atomic(virt - sg->offset, KM_BIO_SRC_IRQ); | ||
98 | local_irq_restore(*flags); | ||
99 | } | ||
100 | |||
101 | #if defined(CONFIG_MMC_SDHI) || defined(CONFIG_MMC_SDHI_MODULE) | ||
102 | void tmio_mmc_start_dma(struct tmio_mmc_host *host, struct mmc_data *data); | ||
103 | void tmio_mmc_request_dma(struct tmio_mmc_host *host, struct tmio_mmc_data *pdata); | ||
104 | void tmio_mmc_release_dma(struct tmio_mmc_host *host); | ||
105 | #else | ||
106 | static inline void tmio_mmc_start_dma(struct tmio_mmc_host *host, | ||
107 | struct mmc_data *data) | ||
108 | { | ||
109 | } | ||
110 | |||
111 | static inline void tmio_mmc_request_dma(struct tmio_mmc_host *host, | ||
112 | struct tmio_mmc_data *pdata) | ||
113 | { | ||
114 | host->chan_tx = NULL; | ||
115 | host->chan_rx = NULL; | ||
116 | } | ||
117 | |||
118 | static inline void tmio_mmc_release_dma(struct tmio_mmc_host *host) | ||
119 | { | ||
120 | } | ||
121 | #endif | ||
122 | |||
123 | #endif | ||
diff --git a/drivers/mmc/host/tmio_mmc_dma.c b/drivers/mmc/host/tmio_mmc_dma.c new file mode 100644 index 000000000000..d3de74ab633e --- /dev/null +++ b/drivers/mmc/host/tmio_mmc_dma.c | |||
@@ -0,0 +1,317 @@ | |||
1 | /* | ||
2 | * linux/drivers/mmc/tmio_mmc_dma.c | ||
3 | * | ||
4 | * Copyright (C) 2010-2011 Guennadi Liakhovetski | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License version 2 as | ||
8 | * published by the Free Software Foundation. | ||
9 | * | ||
10 | * DMA function for TMIO MMC implementations | ||
11 | */ | ||
12 | |||
13 | #include <linux/device.h> | ||
14 | #include <linux/dmaengine.h> | ||
15 | #include <linux/mfd/tmio.h> | ||
16 | #include <linux/mmc/host.h> | ||
17 | #include <linux/mmc/tmio.h> | ||
18 | #include <linux/pagemap.h> | ||
19 | #include <linux/scatterlist.h> | ||
20 | |||
21 | #include "tmio_mmc.h" | ||
22 | |||
23 | #define TMIO_MMC_MIN_DMA_LEN 8 | ||
24 | |||
25 | static void tmio_mmc_enable_dma(struct tmio_mmc_host *host, bool enable) | ||
26 | { | ||
27 | #if defined(CONFIG_SUPERH) || defined(CONFIG_ARCH_SHMOBILE) | ||
28 | /* Switch DMA mode on or off - SuperH specific? */ | ||
29 | writew(enable ? 2 : 0, host->ctl + (0xd8 << host->bus_shift)); | ||
30 | #endif | ||
31 | } | ||
32 | |||
33 | static void tmio_mmc_start_dma_rx(struct tmio_mmc_host *host) | ||
34 | { | ||
35 | struct scatterlist *sg = host->sg_ptr, *sg_tmp; | ||
36 | struct dma_async_tx_descriptor *desc = NULL; | ||
37 | struct dma_chan *chan = host->chan_rx; | ||
38 | struct tmio_mmc_data *pdata = host->pdata; | ||
39 | dma_cookie_t cookie; | ||
40 | int ret, i; | ||
41 | bool aligned = true, multiple = true; | ||
42 | unsigned int align = (1 << pdata->dma->alignment_shift) - 1; | ||
43 | |||
44 | for_each_sg(sg, sg_tmp, host->sg_len, i) { | ||
45 | if (sg_tmp->offset & align) | ||
46 | aligned = false; | ||
47 | if (sg_tmp->length & align) { | ||
48 | multiple = false; | ||
49 | break; | ||
50 | } | ||
51 | } | ||
52 | |||
53 | if ((!aligned && (host->sg_len > 1 || sg->length > PAGE_CACHE_SIZE || | ||
54 | (align & PAGE_MASK))) || !multiple) { | ||
55 | ret = -EINVAL; | ||
56 | goto pio; | ||
57 | } | ||
58 | |||
59 | if (sg->length < TMIO_MMC_MIN_DMA_LEN) { | ||
60 | host->force_pio = true; | ||
61 | return; | ||
62 | } | ||
63 | |||
64 | tmio_mmc_disable_mmc_irqs(host, TMIO_STAT_RXRDY); | ||
65 | |||
66 | /* The only sg element can be unaligned, use our bounce buffer then */ | ||
67 | if (!aligned) { | ||
68 | sg_init_one(&host->bounce_sg, host->bounce_buf, sg->length); | ||
69 | host->sg_ptr = &host->bounce_sg; | ||
70 | sg = host->sg_ptr; | ||
71 | } | ||
72 | |||
73 | ret = dma_map_sg(chan->device->dev, sg, host->sg_len, DMA_FROM_DEVICE); | ||
74 | if (ret > 0) | ||
75 | desc = chan->device->device_prep_slave_sg(chan, sg, ret, | ||
76 | DMA_FROM_DEVICE, DMA_CTRL_ACK); | ||
77 | |||
78 | if (desc) { | ||
79 | cookie = dmaengine_submit(desc); | ||
80 | if (cookie < 0) { | ||
81 | desc = NULL; | ||
82 | ret = cookie; | ||
83 | } | ||
84 | } | ||
85 | dev_dbg(&host->pdev->dev, "%s(): mapped %d -> %d, cookie %d, rq %p\n", | ||
86 | __func__, host->sg_len, ret, cookie, host->mrq); | ||
87 | |||
88 | pio: | ||
89 | if (!desc) { | ||
90 | /* DMA failed, fall back to PIO */ | ||
91 | if (ret >= 0) | ||
92 | ret = -EIO; | ||
93 | host->chan_rx = NULL; | ||
94 | dma_release_channel(chan); | ||
95 | /* Free the Tx channel too */ | ||
96 | chan = host->chan_tx; | ||
97 | if (chan) { | ||
98 | host->chan_tx = NULL; | ||
99 | dma_release_channel(chan); | ||
100 | } | ||
101 | dev_warn(&host->pdev->dev, | ||
102 | "DMA failed: %d, falling back to PIO\n", ret); | ||
103 | tmio_mmc_enable_dma(host, false); | ||
104 | } | ||
105 | |||
106 | dev_dbg(&host->pdev->dev, "%s(): desc %p, cookie %d, sg[%d]\n", __func__, | ||
107 | desc, cookie, host->sg_len); | ||
108 | } | ||
109 | |||
110 | static void tmio_mmc_start_dma_tx(struct tmio_mmc_host *host) | ||
111 | { | ||
112 | struct scatterlist *sg = host->sg_ptr, *sg_tmp; | ||
113 | struct dma_async_tx_descriptor *desc = NULL; | ||
114 | struct dma_chan *chan = host->chan_tx; | ||
115 | struct tmio_mmc_data *pdata = host->pdata; | ||
116 | dma_cookie_t cookie; | ||
117 | int ret, i; | ||
118 | bool aligned = true, multiple = true; | ||
119 | unsigned int align = (1 << pdata->dma->alignment_shift) - 1; | ||
120 | |||
121 | for_each_sg(sg, sg_tmp, host->sg_len, i) { | ||
122 | if (sg_tmp->offset & align) | ||
123 | aligned = false; | ||
124 | if (sg_tmp->length & align) { | ||
125 | multiple = false; | ||
126 | break; | ||
127 | } | ||
128 | } | ||
129 | |||
130 | if ((!aligned && (host->sg_len > 1 || sg->length > PAGE_CACHE_SIZE || | ||
131 | (align & PAGE_MASK))) || !multiple) { | ||
132 | ret = -EINVAL; | ||
133 | goto pio; | ||
134 | } | ||
135 | |||
136 | if (sg->length < TMIO_MMC_MIN_DMA_LEN) { | ||
137 | host->force_pio = true; | ||
138 | return; | ||
139 | } | ||
140 | |||
141 | tmio_mmc_disable_mmc_irqs(host, TMIO_STAT_TXRQ); | ||
142 | |||
143 | /* The only sg element can be unaligned, use our bounce buffer then */ | ||
144 | if (!aligned) { | ||
145 | unsigned long flags; | ||
146 | void *sg_vaddr = tmio_mmc_kmap_atomic(sg, &flags); | ||
147 | sg_init_one(&host->bounce_sg, host->bounce_buf, sg->length); | ||
148 | memcpy(host->bounce_buf, sg_vaddr, host->bounce_sg.length); | ||
149 | tmio_mmc_kunmap_atomic(sg, &flags, sg_vaddr); | ||
150 | host->sg_ptr = &host->bounce_sg; | ||
151 | sg = host->sg_ptr; | ||
152 | } | ||
153 | |||
154 | ret = dma_map_sg(chan->device->dev, sg, host->sg_len, DMA_TO_DEVICE); | ||
155 | if (ret > 0) | ||
156 | desc = chan->device->device_prep_slave_sg(chan, sg, ret, | ||
157 | DMA_TO_DEVICE, DMA_CTRL_ACK); | ||
158 | |||
159 | if (desc) { | ||
160 | cookie = dmaengine_submit(desc); | ||
161 | if (cookie < 0) { | ||
162 | desc = NULL; | ||
163 | ret = cookie; | ||
164 | } | ||
165 | } | ||
166 | dev_dbg(&host->pdev->dev, "%s(): mapped %d -> %d, cookie %d, rq %p\n", | ||
167 | __func__, host->sg_len, ret, cookie, host->mrq); | ||
168 | |||
169 | pio: | ||
170 | if (!desc) { | ||
171 | /* DMA failed, fall back to PIO */ | ||
172 | if (ret >= 0) | ||
173 | ret = -EIO; | ||
174 | host->chan_tx = NULL; | ||
175 | dma_release_channel(chan); | ||
176 | /* Free the Rx channel too */ | ||
177 | chan = host->chan_rx; | ||
178 | if (chan) { | ||
179 | host->chan_rx = NULL; | ||
180 | dma_release_channel(chan); | ||
181 | } | ||
182 | dev_warn(&host->pdev->dev, | ||
183 | "DMA failed: %d, falling back to PIO\n", ret); | ||
184 | tmio_mmc_enable_dma(host, false); | ||
185 | } | ||
186 | |||
187 | dev_dbg(&host->pdev->dev, "%s(): desc %p, cookie %d\n", __func__, | ||
188 | desc, cookie); | ||
189 | } | ||
190 | |||
191 | void tmio_mmc_start_dma(struct tmio_mmc_host *host, | ||
192 | struct mmc_data *data) | ||
193 | { | ||
194 | if (data->flags & MMC_DATA_READ) { | ||
195 | if (host->chan_rx) | ||
196 | tmio_mmc_start_dma_rx(host); | ||
197 | } else { | ||
198 | if (host->chan_tx) | ||
199 | tmio_mmc_start_dma_tx(host); | ||
200 | } | ||
201 | } | ||
202 | |||
203 | static void tmio_mmc_issue_tasklet_fn(unsigned long priv) | ||
204 | { | ||
205 | struct tmio_mmc_host *host = (struct tmio_mmc_host *)priv; | ||
206 | struct dma_chan *chan = NULL; | ||
207 | |||
208 | spin_lock_irq(&host->lock); | ||
209 | |||
210 | if (host && host->data) { | ||
211 | if (host->data->flags & MMC_DATA_READ) | ||
212 | chan = host->chan_rx; | ||
213 | else | ||
214 | chan = host->chan_tx; | ||
215 | } | ||
216 | |||
217 | spin_unlock_irq(&host->lock); | ||
218 | |||
219 | tmio_mmc_enable_mmc_irqs(host, TMIO_STAT_DATAEND); | ||
220 | |||
221 | if (chan) | ||
222 | dma_async_issue_pending(chan); | ||
223 | } | ||
224 | |||
225 | static void tmio_mmc_tasklet_fn(unsigned long arg) | ||
226 | { | ||
227 | struct tmio_mmc_host *host = (struct tmio_mmc_host *)arg; | ||
228 | |||
229 | spin_lock_irq(&host->lock); | ||
230 | |||
231 | if (!host->data) | ||
232 | goto out; | ||
233 | |||
234 | if (host->data->flags & MMC_DATA_READ) | ||
235 | dma_unmap_sg(host->chan_rx->device->dev, | ||
236 | host->sg_ptr, host->sg_len, | ||
237 | DMA_FROM_DEVICE); | ||
238 | else | ||
239 | dma_unmap_sg(host->chan_tx->device->dev, | ||
240 | host->sg_ptr, host->sg_len, | ||
241 | DMA_TO_DEVICE); | ||
242 | |||
243 | tmio_mmc_do_data_irq(host); | ||
244 | out: | ||
245 | spin_unlock_irq(&host->lock); | ||
246 | } | ||
247 | |||
248 | /* It might be necessary to make filter MFD specific */ | ||
249 | static bool tmio_mmc_filter(struct dma_chan *chan, void *arg) | ||
250 | { | ||
251 | dev_dbg(chan->device->dev, "%s: slave data %p\n", __func__, arg); | ||
252 | chan->private = arg; | ||
253 | return true; | ||
254 | } | ||
255 | |||
256 | void tmio_mmc_request_dma(struct tmio_mmc_host *host, struct tmio_mmc_data *pdata) | ||
257 | { | ||
258 | /* We can only either use DMA for both Tx and Rx or not use it at all */ | ||
259 | if (pdata->dma) { | ||
260 | dma_cap_mask_t mask; | ||
261 | |||
262 | dma_cap_zero(mask); | ||
263 | dma_cap_set(DMA_SLAVE, mask); | ||
264 | |||
265 | host->chan_tx = dma_request_channel(mask, tmio_mmc_filter, | ||
266 | pdata->dma->chan_priv_tx); | ||
267 | dev_dbg(&host->pdev->dev, "%s: TX: got channel %p\n", __func__, | ||
268 | host->chan_tx); | ||
269 | |||
270 | if (!host->chan_tx) | ||
271 | return; | ||
272 | |||
273 | host->chan_rx = dma_request_channel(mask, tmio_mmc_filter, | ||
274 | pdata->dma->chan_priv_rx); | ||
275 | dev_dbg(&host->pdev->dev, "%s: RX: got channel %p\n", __func__, | ||
276 | host->chan_rx); | ||
277 | |||
278 | if (!host->chan_rx) | ||
279 | goto ereqrx; | ||
280 | |||
281 | host->bounce_buf = (u8 *)__get_free_page(GFP_KERNEL | GFP_DMA); | ||
282 | if (!host->bounce_buf) | ||
283 | goto ebouncebuf; | ||
284 | |||
285 | tasklet_init(&host->dma_complete, tmio_mmc_tasklet_fn, (unsigned long)host); | ||
286 | tasklet_init(&host->dma_issue, tmio_mmc_issue_tasklet_fn, (unsigned long)host); | ||
287 | |||
288 | tmio_mmc_enable_dma(host, true); | ||
289 | |||
290 | return; | ||
291 | ebouncebuf: | ||
292 | dma_release_channel(host->chan_rx); | ||
293 | host->chan_rx = NULL; | ||
294 | ereqrx: | ||
295 | dma_release_channel(host->chan_tx); | ||
296 | host->chan_tx = NULL; | ||
297 | return; | ||
298 | } | ||
299 | } | ||
300 | |||
301 | void tmio_mmc_release_dma(struct tmio_mmc_host *host) | ||
302 | { | ||
303 | if (host->chan_tx) { | ||
304 | struct dma_chan *chan = host->chan_tx; | ||
305 | host->chan_tx = NULL; | ||
306 | dma_release_channel(chan); | ||
307 | } | ||
308 | if (host->chan_rx) { | ||
309 | struct dma_chan *chan = host->chan_rx; | ||
310 | host->chan_rx = NULL; | ||
311 | dma_release_channel(chan); | ||
312 | } | ||
313 | if (host->bounce_buf) { | ||
314 | free_pages((unsigned long)host->bounce_buf, 0); | ||
315 | host->bounce_buf = NULL; | ||
316 | } | ||
317 | } | ||
diff --git a/drivers/mmc/host/tmio_mmc_pio.c b/drivers/mmc/host/tmio_mmc_pio.c new file mode 100644 index 000000000000..6ae8d2f00ec7 --- /dev/null +++ b/drivers/mmc/host/tmio_mmc_pio.c | |||
@@ -0,0 +1,897 @@ | |||
1 | /* | ||
2 | * linux/drivers/mmc/host/tmio_mmc_pio.c | ||
3 | * | ||
4 | * Copyright (C) 2011 Guennadi Liakhovetski | ||
5 | * Copyright (C) 2007 Ian Molton | ||
6 | * Copyright (C) 2004 Ian Molton | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify | ||
9 | * it under the terms of the GNU General Public License version 2 as | ||
10 | * published by the Free Software Foundation. | ||
11 | * | ||
12 | * Driver for the MMC / SD / SDIO IP found in: | ||
13 | * | ||
14 | * TC6393XB, TC6391XB, TC6387XB, T7L66XB, ASIC3, SH-Mobile SoCs | ||
15 | * | ||
16 | * This driver draws mainly on scattered spec sheets, Reverse engineering | ||
17 | * of the toshiba e800 SD driver and some parts of the 2.4 ASIC3 driver (4 bit | ||
18 | * support). (Further 4 bit support from a later datasheet). | ||
19 | * | ||
20 | * TODO: | ||
21 | * Investigate using a workqueue for PIO transfers | ||
22 | * Eliminate FIXMEs | ||
23 | * SDIO support | ||
24 | * Better Power management | ||
25 | * Handle MMC errors better | ||
26 | * double buffer support | ||
27 | * | ||
28 | */ | ||
29 | |||
30 | #include <linux/delay.h> | ||
31 | #include <linux/device.h> | ||
32 | #include <linux/highmem.h> | ||
33 | #include <linux/interrupt.h> | ||
34 | #include <linux/io.h> | ||
35 | #include <linux/irq.h> | ||
36 | #include <linux/mfd/tmio.h> | ||
37 | #include <linux/mmc/host.h> | ||
38 | #include <linux/mmc/tmio.h> | ||
39 | #include <linux/module.h> | ||
40 | #include <linux/pagemap.h> | ||
41 | #include <linux/platform_device.h> | ||
42 | #include <linux/scatterlist.h> | ||
43 | #include <linux/workqueue.h> | ||
44 | #include <linux/spinlock.h> | ||
45 | |||
46 | #include "tmio_mmc.h" | ||
47 | |||
48 | static u16 sd_ctrl_read16(struct tmio_mmc_host *host, int addr) | ||
49 | { | ||
50 | return readw(host->ctl + (addr << host->bus_shift)); | ||
51 | } | ||
52 | |||
53 | static void sd_ctrl_read16_rep(struct tmio_mmc_host *host, int addr, | ||
54 | u16 *buf, int count) | ||
55 | { | ||
56 | readsw(host->ctl + (addr << host->bus_shift), buf, count); | ||
57 | } | ||
58 | |||
59 | static u32 sd_ctrl_read32(struct tmio_mmc_host *host, int addr) | ||
60 | { | ||
61 | return readw(host->ctl + (addr << host->bus_shift)) | | ||
62 | readw(host->ctl + ((addr + 2) << host->bus_shift)) << 16; | ||
63 | } | ||
64 | |||
65 | static void sd_ctrl_write16(struct tmio_mmc_host *host, int addr, u16 val) | ||
66 | { | ||
67 | writew(val, host->ctl + (addr << host->bus_shift)); | ||
68 | } | ||
69 | |||
70 | static void sd_ctrl_write16_rep(struct tmio_mmc_host *host, int addr, | ||
71 | u16 *buf, int count) | ||
72 | { | ||
73 | writesw(host->ctl + (addr << host->bus_shift), buf, count); | ||
74 | } | ||
75 | |||
76 | static void sd_ctrl_write32(struct tmio_mmc_host *host, int addr, u32 val) | ||
77 | { | ||
78 | writew(val, host->ctl + (addr << host->bus_shift)); | ||
79 | writew(val >> 16, host->ctl + ((addr + 2) << host->bus_shift)); | ||
80 | } | ||
81 | |||
82 | void tmio_mmc_enable_mmc_irqs(struct tmio_mmc_host *host, u32 i) | ||
83 | { | ||
84 | u32 mask = sd_ctrl_read32(host, CTL_IRQ_MASK) & ~(i & TMIO_MASK_IRQ); | ||
85 | sd_ctrl_write32(host, CTL_IRQ_MASK, mask); | ||
86 | } | ||
87 | |||
88 | void tmio_mmc_disable_mmc_irqs(struct tmio_mmc_host *host, u32 i) | ||
89 | { | ||
90 | u32 mask = sd_ctrl_read32(host, CTL_IRQ_MASK) | (i & TMIO_MASK_IRQ); | ||
91 | sd_ctrl_write32(host, CTL_IRQ_MASK, mask); | ||
92 | } | ||
93 | |||
94 | static void tmio_mmc_ack_mmc_irqs(struct tmio_mmc_host *host, u32 i) | ||
95 | { | ||
96 | sd_ctrl_write32(host, CTL_STATUS, ~i); | ||
97 | } | ||
98 | |||
99 | static void tmio_mmc_init_sg(struct tmio_mmc_host *host, struct mmc_data *data) | ||
100 | { | ||
101 | host->sg_len = data->sg_len; | ||
102 | host->sg_ptr = data->sg; | ||
103 | host->sg_orig = data->sg; | ||
104 | host->sg_off = 0; | ||
105 | } | ||
106 | |||
107 | static int tmio_mmc_next_sg(struct tmio_mmc_host *host) | ||
108 | { | ||
109 | host->sg_ptr = sg_next(host->sg_ptr); | ||
110 | host->sg_off = 0; | ||
111 | return --host->sg_len; | ||
112 | } | ||
113 | |||
114 | #ifdef CONFIG_MMC_DEBUG | ||
115 | |||
116 | #define STATUS_TO_TEXT(a, status, i) \ | ||
117 | do { \ | ||
118 | if (status & TMIO_STAT_##a) { \ | ||
119 | if (i++) \ | ||
120 | printk(" | "); \ | ||
121 | printk(#a); \ | ||
122 | } \ | ||
123 | } while (0) | ||
124 | |||
125 | static void pr_debug_status(u32 status) | ||
126 | { | ||
127 | int i = 0; | ||
128 | printk(KERN_DEBUG "status: %08x = ", status); | ||
129 | STATUS_TO_TEXT(CARD_REMOVE, status, i); | ||
130 | STATUS_TO_TEXT(CARD_INSERT, status, i); | ||
131 | STATUS_TO_TEXT(SIGSTATE, status, i); | ||
132 | STATUS_TO_TEXT(WRPROTECT, status, i); | ||
133 | STATUS_TO_TEXT(CARD_REMOVE_A, status, i); | ||
134 | STATUS_TO_TEXT(CARD_INSERT_A, status, i); | ||
135 | STATUS_TO_TEXT(SIGSTATE_A, status, i); | ||
136 | STATUS_TO_TEXT(CMD_IDX_ERR, status, i); | ||
137 | STATUS_TO_TEXT(STOPBIT_ERR, status, i); | ||
138 | STATUS_TO_TEXT(ILL_FUNC, status, i); | ||
139 | STATUS_TO_TEXT(CMD_BUSY, status, i); | ||
140 | STATUS_TO_TEXT(CMDRESPEND, status, i); | ||
141 | STATUS_TO_TEXT(DATAEND, status, i); | ||
142 | STATUS_TO_TEXT(CRCFAIL, status, i); | ||
143 | STATUS_TO_TEXT(DATATIMEOUT, status, i); | ||
144 | STATUS_TO_TEXT(CMDTIMEOUT, status, i); | ||
145 | STATUS_TO_TEXT(RXOVERFLOW, status, i); | ||
146 | STATUS_TO_TEXT(TXUNDERRUN, status, i); | ||
147 | STATUS_TO_TEXT(RXRDY, status, i); | ||
148 | STATUS_TO_TEXT(TXRQ, status, i); | ||
149 | STATUS_TO_TEXT(ILL_ACCESS, status, i); | ||
150 | printk("\n"); | ||
151 | } | ||
152 | |||
153 | #else | ||
154 | #define pr_debug_status(s) do { } while (0) | ||
155 | #endif | ||
156 | |||
157 | static void tmio_mmc_enable_sdio_irq(struct mmc_host *mmc, int enable) | ||
158 | { | ||
159 | struct tmio_mmc_host *host = mmc_priv(mmc); | ||
160 | |||
161 | if (enable) { | ||
162 | host->sdio_irq_enabled = 1; | ||
163 | sd_ctrl_write16(host, CTL_TRANSACTION_CTL, 0x0001); | ||
164 | sd_ctrl_write16(host, CTL_SDIO_IRQ_MASK, | ||
165 | (TMIO_SDIO_MASK_ALL & ~TMIO_SDIO_STAT_IOIRQ)); | ||
166 | } else { | ||
167 | sd_ctrl_write16(host, CTL_SDIO_IRQ_MASK, TMIO_SDIO_MASK_ALL); | ||
168 | sd_ctrl_write16(host, CTL_TRANSACTION_CTL, 0x0000); | ||
169 | host->sdio_irq_enabled = 0; | ||
170 | } | ||
171 | } | ||
172 | |||
173 | static void tmio_mmc_set_clock(struct tmio_mmc_host *host, int new_clock) | ||
174 | { | ||
175 | u32 clk = 0, clock; | ||
176 | |||
177 | if (new_clock) { | ||
178 | for (clock = host->mmc->f_min, clk = 0x80000080; | ||
179 | new_clock >= (clock<<1); clk >>= 1) | ||
180 | clock <<= 1; | ||
181 | clk |= 0x100; | ||
182 | } | ||
183 | |||
184 | if (host->set_clk_div) | ||
185 | host->set_clk_div(host->pdev, (clk>>22) & 1); | ||
186 | |||
187 | sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, clk & 0x1ff); | ||
188 | } | ||
189 | |||
190 | static void tmio_mmc_clk_stop(struct tmio_mmc_host *host) | ||
191 | { | ||
192 | struct resource *res = platform_get_resource(host->pdev, IORESOURCE_MEM, 0); | ||
193 | |||
194 | /* implicit BUG_ON(!res) */ | ||
195 | if (resource_size(res) > 0x100) { | ||
196 | sd_ctrl_write16(host, CTL_CLK_AND_WAIT_CTL, 0x0000); | ||
197 | msleep(10); | ||
198 | } | ||
199 | |||
200 | sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, ~0x0100 & | ||
201 | sd_ctrl_read16(host, CTL_SD_CARD_CLK_CTL)); | ||
202 | msleep(10); | ||
203 | } | ||
204 | |||
205 | static void tmio_mmc_clk_start(struct tmio_mmc_host *host) | ||
206 | { | ||
207 | struct resource *res = platform_get_resource(host->pdev, IORESOURCE_MEM, 0); | ||
208 | |||
209 | sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, 0x0100 | | ||
210 | sd_ctrl_read16(host, CTL_SD_CARD_CLK_CTL)); | ||
211 | msleep(10); | ||
212 | |||
213 | /* implicit BUG_ON(!res) */ | ||
214 | if (resource_size(res) > 0x100) { | ||
215 | sd_ctrl_write16(host, CTL_CLK_AND_WAIT_CTL, 0x0100); | ||
216 | msleep(10); | ||
217 | } | ||
218 | } | ||
219 | |||
220 | static void tmio_mmc_reset(struct tmio_mmc_host *host) | ||
221 | { | ||
222 | struct resource *res = platform_get_resource(host->pdev, IORESOURCE_MEM, 0); | ||
223 | |||
224 | /* FIXME - should we set stop clock reg here */ | ||
225 | sd_ctrl_write16(host, CTL_RESET_SD, 0x0000); | ||
226 | /* implicit BUG_ON(!res) */ | ||
227 | if (resource_size(res) > 0x100) | ||
228 | sd_ctrl_write16(host, CTL_RESET_SDIO, 0x0000); | ||
229 | msleep(10); | ||
230 | sd_ctrl_write16(host, CTL_RESET_SD, 0x0001); | ||
231 | if (resource_size(res) > 0x100) | ||
232 | sd_ctrl_write16(host, CTL_RESET_SDIO, 0x0001); | ||
233 | msleep(10); | ||
234 | } | ||
235 | |||
236 | static void tmio_mmc_reset_work(struct work_struct *work) | ||
237 | { | ||
238 | struct tmio_mmc_host *host = container_of(work, struct tmio_mmc_host, | ||
239 | delayed_reset_work.work); | ||
240 | struct mmc_request *mrq; | ||
241 | unsigned long flags; | ||
242 | |||
243 | spin_lock_irqsave(&host->lock, flags); | ||
244 | mrq = host->mrq; | ||
245 | |||
246 | /* request already finished */ | ||
247 | if (!mrq | ||
248 | || time_is_after_jiffies(host->last_req_ts + | ||
249 | msecs_to_jiffies(2000))) { | ||
250 | spin_unlock_irqrestore(&host->lock, flags); | ||
251 | return; | ||
252 | } | ||
253 | |||
254 | dev_warn(&host->pdev->dev, | ||
255 | "timeout waiting for hardware interrupt (CMD%u)\n", | ||
256 | mrq->cmd->opcode); | ||
257 | |||
258 | if (host->data) | ||
259 | host->data->error = -ETIMEDOUT; | ||
260 | else if (host->cmd) | ||
261 | host->cmd->error = -ETIMEDOUT; | ||
262 | else | ||
263 | mrq->cmd->error = -ETIMEDOUT; | ||
264 | |||
265 | host->cmd = NULL; | ||
266 | host->data = NULL; | ||
267 | host->mrq = NULL; | ||
268 | host->force_pio = false; | ||
269 | |||
270 | spin_unlock_irqrestore(&host->lock, flags); | ||
271 | |||
272 | tmio_mmc_reset(host); | ||
273 | |||
274 | mmc_request_done(host->mmc, mrq); | ||
275 | } | ||
276 | |||
277 | static void tmio_mmc_finish_request(struct tmio_mmc_host *host) | ||
278 | { | ||
279 | struct mmc_request *mrq = host->mrq; | ||
280 | |||
281 | if (!mrq) | ||
282 | return; | ||
283 | |||
284 | host->mrq = NULL; | ||
285 | host->cmd = NULL; | ||
286 | host->data = NULL; | ||
287 | host->force_pio = false; | ||
288 | |||
289 | cancel_delayed_work(&host->delayed_reset_work); | ||
290 | |||
291 | mmc_request_done(host->mmc, mrq); | ||
292 | } | ||
293 | |||
294 | /* These are the bitmasks the tmio chip requires to implement the MMC response | ||
295 | * types. Note that R1 and R6 are the same in this scheme. */ | ||
296 | #define APP_CMD 0x0040 | ||
297 | #define RESP_NONE 0x0300 | ||
298 | #define RESP_R1 0x0400 | ||
299 | #define RESP_R1B 0x0500 | ||
300 | #define RESP_R2 0x0600 | ||
301 | #define RESP_R3 0x0700 | ||
302 | #define DATA_PRESENT 0x0800 | ||
303 | #define TRANSFER_READ 0x1000 | ||
304 | #define TRANSFER_MULTI 0x2000 | ||
305 | #define SECURITY_CMD 0x4000 | ||
306 | |||
307 | static int tmio_mmc_start_command(struct tmio_mmc_host *host, struct mmc_command *cmd) | ||
308 | { | ||
309 | struct mmc_data *data = host->data; | ||
310 | int c = cmd->opcode; | ||
311 | |||
312 | /* Command 12 is handled by hardware */ | ||
313 | if (cmd->opcode == 12 && !cmd->arg) { | ||
314 | sd_ctrl_write16(host, CTL_STOP_INTERNAL_ACTION, 0x001); | ||
315 | return 0; | ||
316 | } | ||
317 | |||
318 | switch (mmc_resp_type(cmd)) { | ||
319 | case MMC_RSP_NONE: c |= RESP_NONE; break; | ||
320 | case MMC_RSP_R1: c |= RESP_R1; break; | ||
321 | case MMC_RSP_R1B: c |= RESP_R1B; break; | ||
322 | case MMC_RSP_R2: c |= RESP_R2; break; | ||
323 | case MMC_RSP_R3: c |= RESP_R3; break; | ||
324 | default: | ||
325 | pr_debug("Unknown response type %d\n", mmc_resp_type(cmd)); | ||
326 | return -EINVAL; | ||
327 | } | ||
328 | |||
329 | host->cmd = cmd; | ||
330 | |||
331 | /* FIXME - this seems to be ok commented out but the spec suggest this bit | ||
332 | * should be set when issuing app commands. | ||
333 | * if(cmd->flags & MMC_FLAG_ACMD) | ||
334 | * c |= APP_CMD; | ||
335 | */ | ||
336 | if (data) { | ||
337 | c |= DATA_PRESENT; | ||
338 | if (data->blocks > 1) { | ||
339 | sd_ctrl_write16(host, CTL_STOP_INTERNAL_ACTION, 0x100); | ||
340 | c |= TRANSFER_MULTI; | ||
341 | } | ||
342 | if (data->flags & MMC_DATA_READ) | ||
343 | c |= TRANSFER_READ; | ||
344 | } | ||
345 | |||
346 | tmio_mmc_enable_mmc_irqs(host, TMIO_MASK_CMD); | ||
347 | |||
348 | /* Fire off the command */ | ||
349 | sd_ctrl_write32(host, CTL_ARG_REG, cmd->arg); | ||
350 | sd_ctrl_write16(host, CTL_SD_CMD, c); | ||
351 | |||
352 | return 0; | ||
353 | } | ||
354 | |||
355 | /* | ||
356 | * This chip always returns (at least?) as much data as you ask for. | ||
357 | * I'm unsure what happens if you ask for less than a block. This should be | ||
358 | * looked into to ensure that a funny length read doesnt hose the controller. | ||
359 | */ | ||
360 | static void tmio_mmc_pio_irq(struct tmio_mmc_host *host) | ||
361 | { | ||
362 | struct mmc_data *data = host->data; | ||
363 | void *sg_virt; | ||
364 | unsigned short *buf; | ||
365 | unsigned int count; | ||
366 | unsigned long flags; | ||
367 | |||
368 | if ((host->chan_tx || host->chan_rx) && !host->force_pio) { | ||
369 | pr_err("PIO IRQ in DMA mode!\n"); | ||
370 | return; | ||
371 | } else if (!data) { | ||
372 | pr_debug("Spurious PIO IRQ\n"); | ||
373 | return; | ||
374 | } | ||
375 | |||
376 | sg_virt = tmio_mmc_kmap_atomic(host->sg_ptr, &flags); | ||
377 | buf = (unsigned short *)(sg_virt + host->sg_off); | ||
378 | |||
379 | count = host->sg_ptr->length - host->sg_off; | ||
380 | if (count > data->blksz) | ||
381 | count = data->blksz; | ||
382 | |||
383 | pr_debug("count: %08x offset: %08x flags %08x\n", | ||
384 | count, host->sg_off, data->flags); | ||
385 | |||
386 | /* Transfer the data */ | ||
387 | if (data->flags & MMC_DATA_READ) | ||
388 | sd_ctrl_read16_rep(host, CTL_SD_DATA_PORT, buf, count >> 1); | ||
389 | else | ||
390 | sd_ctrl_write16_rep(host, CTL_SD_DATA_PORT, buf, count >> 1); | ||
391 | |||
392 | host->sg_off += count; | ||
393 | |||
394 | tmio_mmc_kunmap_atomic(host->sg_ptr, &flags, sg_virt); | ||
395 | |||
396 | if (host->sg_off == host->sg_ptr->length) | ||
397 | tmio_mmc_next_sg(host); | ||
398 | |||
399 | return; | ||
400 | } | ||
401 | |||
402 | static void tmio_mmc_check_bounce_buffer(struct tmio_mmc_host *host) | ||
403 | { | ||
404 | if (host->sg_ptr == &host->bounce_sg) { | ||
405 | unsigned long flags; | ||
406 | void *sg_vaddr = tmio_mmc_kmap_atomic(host->sg_orig, &flags); | ||
407 | memcpy(sg_vaddr, host->bounce_buf, host->bounce_sg.length); | ||
408 | tmio_mmc_kunmap_atomic(host->sg_orig, &flags, sg_vaddr); | ||
409 | } | ||
410 | } | ||
411 | |||
412 | /* needs to be called with host->lock held */ | ||
413 | void tmio_mmc_do_data_irq(struct tmio_mmc_host *host) | ||
414 | { | ||
415 | struct mmc_data *data = host->data; | ||
416 | struct mmc_command *stop; | ||
417 | |||
418 | host->data = NULL; | ||
419 | |||
420 | if (!data) { | ||
421 | dev_warn(&host->pdev->dev, "Spurious data end IRQ\n"); | ||
422 | return; | ||
423 | } | ||
424 | stop = data->stop; | ||
425 | |||
426 | /* FIXME - return correct transfer count on errors */ | ||
427 | if (!data->error) | ||
428 | data->bytes_xfered = data->blocks * data->blksz; | ||
429 | else | ||
430 | data->bytes_xfered = 0; | ||
431 | |||
432 | pr_debug("Completed data request\n"); | ||
433 | |||
434 | /* | ||
435 | * FIXME: other drivers allow an optional stop command of any given type | ||
436 | * which we dont do, as the chip can auto generate them. | ||
437 | * Perhaps we can be smarter about when to use auto CMD12 and | ||
438 | * only issue the auto request when we know this is the desired | ||
439 | * stop command, allowing fallback to the stop command the | ||
440 | * upper layers expect. For now, we do what works. | ||
441 | */ | ||
442 | |||
443 | if (data->flags & MMC_DATA_READ) { | ||
444 | if (host->chan_rx && !host->force_pio) | ||
445 | tmio_mmc_check_bounce_buffer(host); | ||
446 | dev_dbg(&host->pdev->dev, "Complete Rx request %p\n", | ||
447 | host->mrq); | ||
448 | } else { | ||
449 | dev_dbg(&host->pdev->dev, "Complete Tx request %p\n", | ||
450 | host->mrq); | ||
451 | } | ||
452 | |||
453 | if (stop) { | ||
454 | if (stop->opcode == 12 && !stop->arg) | ||
455 | sd_ctrl_write16(host, CTL_STOP_INTERNAL_ACTION, 0x000); | ||
456 | else | ||
457 | BUG(); | ||
458 | } | ||
459 | |||
460 | tmio_mmc_finish_request(host); | ||
461 | } | ||
462 | |||
463 | static void tmio_mmc_data_irq(struct tmio_mmc_host *host) | ||
464 | { | ||
465 | struct mmc_data *data; | ||
466 | spin_lock(&host->lock); | ||
467 | data = host->data; | ||
468 | |||
469 | if (!data) | ||
470 | goto out; | ||
471 | |||
472 | if (host->chan_tx && (data->flags & MMC_DATA_WRITE) && !host->force_pio) { | ||
473 | /* | ||
474 | * Has all data been written out yet? Testing on SuperH showed, | ||
475 | * that in most cases the first interrupt comes already with the | ||
476 | * BUSY status bit clear, but on some operations, like mount or | ||
477 | * in the beginning of a write / sync / umount, there is one | ||
478 | * DATAEND interrupt with the BUSY bit set, in this cases | ||
479 | * waiting for one more interrupt fixes the problem. | ||
480 | */ | ||
481 | if (!(sd_ctrl_read32(host, CTL_STATUS) & TMIO_STAT_CMD_BUSY)) { | ||
482 | tmio_mmc_disable_mmc_irqs(host, TMIO_STAT_DATAEND); | ||
483 | tasklet_schedule(&host->dma_complete); | ||
484 | } | ||
485 | } else if (host->chan_rx && (data->flags & MMC_DATA_READ) && !host->force_pio) { | ||
486 | tmio_mmc_disable_mmc_irqs(host, TMIO_STAT_DATAEND); | ||
487 | tasklet_schedule(&host->dma_complete); | ||
488 | } else { | ||
489 | tmio_mmc_do_data_irq(host); | ||
490 | tmio_mmc_disable_mmc_irqs(host, TMIO_MASK_READOP | TMIO_MASK_WRITEOP); | ||
491 | } | ||
492 | out: | ||
493 | spin_unlock(&host->lock); | ||
494 | } | ||
495 | |||
496 | static void tmio_mmc_cmd_irq(struct tmio_mmc_host *host, | ||
497 | unsigned int stat) | ||
498 | { | ||
499 | struct mmc_command *cmd = host->cmd; | ||
500 | int i, addr; | ||
501 | |||
502 | spin_lock(&host->lock); | ||
503 | |||
504 | if (!host->cmd) { | ||
505 | pr_debug("Spurious CMD irq\n"); | ||
506 | goto out; | ||
507 | } | ||
508 | |||
509 | host->cmd = NULL; | ||
510 | |||
511 | /* This controller is sicker than the PXA one. Not only do we need to | ||
512 | * drop the top 8 bits of the first response word, we also need to | ||
513 | * modify the order of the response for short response command types. | ||
514 | */ | ||
515 | |||
516 | for (i = 3, addr = CTL_RESPONSE ; i >= 0 ; i--, addr += 4) | ||
517 | cmd->resp[i] = sd_ctrl_read32(host, addr); | ||
518 | |||
519 | if (cmd->flags & MMC_RSP_136) { | ||
520 | cmd->resp[0] = (cmd->resp[0] << 8) | (cmd->resp[1] >> 24); | ||
521 | cmd->resp[1] = (cmd->resp[1] << 8) | (cmd->resp[2] >> 24); | ||
522 | cmd->resp[2] = (cmd->resp[2] << 8) | (cmd->resp[3] >> 24); | ||
523 | cmd->resp[3] <<= 8; | ||
524 | } else if (cmd->flags & MMC_RSP_R3) { | ||
525 | cmd->resp[0] = cmd->resp[3]; | ||
526 | } | ||
527 | |||
528 | if (stat & TMIO_STAT_CMDTIMEOUT) | ||
529 | cmd->error = -ETIMEDOUT; | ||
530 | else if (stat & TMIO_STAT_CRCFAIL && cmd->flags & MMC_RSP_CRC) | ||
531 | cmd->error = -EILSEQ; | ||
532 | |||
533 | /* If there is data to handle we enable data IRQs here, and | ||
534 | * we will ultimatley finish the request in the data_end handler. | ||
535 | * If theres no data or we encountered an error, finish now. | ||
536 | */ | ||
537 | if (host->data && !cmd->error) { | ||
538 | if (host->data->flags & MMC_DATA_READ) { | ||
539 | if (host->force_pio || !host->chan_rx) | ||
540 | tmio_mmc_enable_mmc_irqs(host, TMIO_MASK_READOP); | ||
541 | else | ||
542 | tasklet_schedule(&host->dma_issue); | ||
543 | } else { | ||
544 | if (host->force_pio || !host->chan_tx) | ||
545 | tmio_mmc_enable_mmc_irqs(host, TMIO_MASK_WRITEOP); | ||
546 | else | ||
547 | tasklet_schedule(&host->dma_issue); | ||
548 | } | ||
549 | } else { | ||
550 | tmio_mmc_finish_request(host); | ||
551 | } | ||
552 | |||
553 | out: | ||
554 | spin_unlock(&host->lock); | ||
555 | } | ||
556 | |||
557 | static irqreturn_t tmio_mmc_irq(int irq, void *devid) | ||
558 | { | ||
559 | struct tmio_mmc_host *host = devid; | ||
560 | struct tmio_mmc_data *pdata = host->pdata; | ||
561 | unsigned int ireg, irq_mask, status; | ||
562 | unsigned int sdio_ireg, sdio_irq_mask, sdio_status; | ||
563 | |||
564 | pr_debug("MMC IRQ begin\n"); | ||
565 | |||
566 | status = sd_ctrl_read32(host, CTL_STATUS); | ||
567 | irq_mask = sd_ctrl_read32(host, CTL_IRQ_MASK); | ||
568 | ireg = status & TMIO_MASK_IRQ & ~irq_mask; | ||
569 | |||
570 | sdio_ireg = 0; | ||
571 | if (!ireg && pdata->flags & TMIO_MMC_SDIO_IRQ) { | ||
572 | sdio_status = sd_ctrl_read16(host, CTL_SDIO_STATUS); | ||
573 | sdio_irq_mask = sd_ctrl_read16(host, CTL_SDIO_IRQ_MASK); | ||
574 | sdio_ireg = sdio_status & TMIO_SDIO_MASK_ALL & ~sdio_irq_mask; | ||
575 | |||
576 | sd_ctrl_write16(host, CTL_SDIO_STATUS, sdio_status & ~TMIO_SDIO_MASK_ALL); | ||
577 | |||
578 | if (sdio_ireg && !host->sdio_irq_enabled) { | ||
579 | pr_warning("tmio_mmc: Spurious SDIO IRQ, disabling! 0x%04x 0x%04x 0x%04x\n", | ||
580 | sdio_status, sdio_irq_mask, sdio_ireg); | ||
581 | tmio_mmc_enable_sdio_irq(host->mmc, 0); | ||
582 | goto out; | ||
583 | } | ||
584 | |||
585 | if (host->mmc->caps & MMC_CAP_SDIO_IRQ && | ||
586 | sdio_ireg & TMIO_SDIO_STAT_IOIRQ) | ||
587 | mmc_signal_sdio_irq(host->mmc); | ||
588 | |||
589 | if (sdio_ireg) | ||
590 | goto out; | ||
591 | } | ||
592 | |||
593 | pr_debug_status(status); | ||
594 | pr_debug_status(ireg); | ||
595 | |||
596 | if (!ireg) { | ||
597 | tmio_mmc_disable_mmc_irqs(host, status & ~irq_mask); | ||
598 | |||
599 | pr_warning("tmio_mmc: Spurious irq, disabling! " | ||
600 | "0x%08x 0x%08x 0x%08x\n", status, irq_mask, ireg); | ||
601 | pr_debug_status(status); | ||
602 | |||
603 | goto out; | ||
604 | } | ||
605 | |||
606 | while (ireg) { | ||
607 | /* Card insert / remove attempts */ | ||
608 | if (ireg & (TMIO_STAT_CARD_INSERT | TMIO_STAT_CARD_REMOVE)) { | ||
609 | tmio_mmc_ack_mmc_irqs(host, TMIO_STAT_CARD_INSERT | | ||
610 | TMIO_STAT_CARD_REMOVE); | ||
611 | mmc_detect_change(host->mmc, msecs_to_jiffies(100)); | ||
612 | } | ||
613 | |||
614 | /* CRC and other errors */ | ||
615 | /* if (ireg & TMIO_STAT_ERR_IRQ) | ||
616 | * handled |= tmio_error_irq(host, irq, stat); | ||
617 | */ | ||
618 | |||
619 | /* Command completion */ | ||
620 | if (ireg & (TMIO_STAT_CMDRESPEND | TMIO_STAT_CMDTIMEOUT)) { | ||
621 | tmio_mmc_ack_mmc_irqs(host, | ||
622 | TMIO_STAT_CMDRESPEND | | ||
623 | TMIO_STAT_CMDTIMEOUT); | ||
624 | tmio_mmc_cmd_irq(host, status); | ||
625 | } | ||
626 | |||
627 | /* Data transfer */ | ||
628 | if (ireg & (TMIO_STAT_RXRDY | TMIO_STAT_TXRQ)) { | ||
629 | tmio_mmc_ack_mmc_irqs(host, TMIO_STAT_RXRDY | TMIO_STAT_TXRQ); | ||
630 | tmio_mmc_pio_irq(host); | ||
631 | } | ||
632 | |||
633 | /* Data transfer completion */ | ||
634 | if (ireg & TMIO_STAT_DATAEND) { | ||
635 | tmio_mmc_ack_mmc_irqs(host, TMIO_STAT_DATAEND); | ||
636 | tmio_mmc_data_irq(host); | ||
637 | } | ||
638 | |||
639 | /* Check status - keep going until we've handled it all */ | ||
640 | status = sd_ctrl_read32(host, CTL_STATUS); | ||
641 | irq_mask = sd_ctrl_read32(host, CTL_IRQ_MASK); | ||
642 | ireg = status & TMIO_MASK_IRQ & ~irq_mask; | ||
643 | |||
644 | pr_debug("Status at end of loop: %08x\n", status); | ||
645 | pr_debug_status(status); | ||
646 | } | ||
647 | pr_debug("MMC IRQ end\n"); | ||
648 | |||
649 | out: | ||
650 | return IRQ_HANDLED; | ||
651 | } | ||
652 | |||
653 | static int tmio_mmc_start_data(struct tmio_mmc_host *host, | ||
654 | struct mmc_data *data) | ||
655 | { | ||
656 | struct tmio_mmc_data *pdata = host->pdata; | ||
657 | |||
658 | pr_debug("setup data transfer: blocksize %08x nr_blocks %d\n", | ||
659 | data->blksz, data->blocks); | ||
660 | |||
661 | /* Some hardware cannot perform 2 byte requests in 4 bit mode */ | ||
662 | if (host->mmc->ios.bus_width == MMC_BUS_WIDTH_4) { | ||
663 | int blksz_2bytes = pdata->flags & TMIO_MMC_BLKSZ_2BYTES; | ||
664 | |||
665 | if (data->blksz < 2 || (data->blksz < 4 && !blksz_2bytes)) { | ||
666 | pr_err("%s: %d byte block unsupported in 4 bit mode\n", | ||
667 | mmc_hostname(host->mmc), data->blksz); | ||
668 | return -EINVAL; | ||
669 | } | ||
670 | } | ||
671 | |||
672 | tmio_mmc_init_sg(host, data); | ||
673 | host->data = data; | ||
674 | |||
675 | /* Set transfer length / blocksize */ | ||
676 | sd_ctrl_write16(host, CTL_SD_XFER_LEN, data->blksz); | ||
677 | sd_ctrl_write16(host, CTL_XFER_BLK_COUNT, data->blocks); | ||
678 | |||
679 | tmio_mmc_start_dma(host, data); | ||
680 | |||
681 | return 0; | ||
682 | } | ||
683 | |||
684 | /* Process requests from the MMC layer */ | ||
685 | static void tmio_mmc_request(struct mmc_host *mmc, struct mmc_request *mrq) | ||
686 | { | ||
687 | struct tmio_mmc_host *host = mmc_priv(mmc); | ||
688 | int ret; | ||
689 | |||
690 | if (host->mrq) | ||
691 | pr_debug("request not null\n"); | ||
692 | |||
693 | host->last_req_ts = jiffies; | ||
694 | wmb(); | ||
695 | host->mrq = mrq; | ||
696 | |||
697 | if (mrq->data) { | ||
698 | ret = tmio_mmc_start_data(host, mrq->data); | ||
699 | if (ret) | ||
700 | goto fail; | ||
701 | } | ||
702 | |||
703 | ret = tmio_mmc_start_command(host, mrq->cmd); | ||
704 | if (!ret) { | ||
705 | schedule_delayed_work(&host->delayed_reset_work, | ||
706 | msecs_to_jiffies(2000)); | ||
707 | return; | ||
708 | } | ||
709 | |||
710 | fail: | ||
711 | host->mrq = NULL; | ||
712 | host->force_pio = false; | ||
713 | mrq->cmd->error = ret; | ||
714 | mmc_request_done(mmc, mrq); | ||
715 | } | ||
716 | |||
717 | /* Set MMC clock / power. | ||
718 | * Note: This controller uses a simple divider scheme therefore it cannot | ||
719 | * run a MMC card at full speed (20MHz). The max clock is 24MHz on SD, but as | ||
720 | * MMC wont run that fast, it has to be clocked at 12MHz which is the next | ||
721 | * slowest setting. | ||
722 | */ | ||
723 | static void tmio_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) | ||
724 | { | ||
725 | struct tmio_mmc_host *host = mmc_priv(mmc); | ||
726 | |||
727 | if (ios->clock) | ||
728 | tmio_mmc_set_clock(host, ios->clock); | ||
729 | |||
730 | /* Power sequence - OFF -> UP -> ON */ | ||
731 | if (ios->power_mode == MMC_POWER_OFF || !ios->clock) { | ||
732 | /* power down SD bus */ | ||
733 | if (ios->power_mode == MMC_POWER_OFF && host->set_pwr) | ||
734 | host->set_pwr(host->pdev, 0); | ||
735 | tmio_mmc_clk_stop(host); | ||
736 | } else if (ios->power_mode == MMC_POWER_UP) { | ||
737 | /* power up SD bus */ | ||
738 | if (host->set_pwr) | ||
739 | host->set_pwr(host->pdev, 1); | ||
740 | } else { | ||
741 | /* start bus clock */ | ||
742 | tmio_mmc_clk_start(host); | ||
743 | } | ||
744 | |||
745 | switch (ios->bus_width) { | ||
746 | case MMC_BUS_WIDTH_1: | ||
747 | sd_ctrl_write16(host, CTL_SD_MEM_CARD_OPT, 0x80e0); | ||
748 | break; | ||
749 | case MMC_BUS_WIDTH_4: | ||
750 | sd_ctrl_write16(host, CTL_SD_MEM_CARD_OPT, 0x00e0); | ||
751 | break; | ||
752 | } | ||
753 | |||
754 | /* Let things settle. delay taken from winCE driver */ | ||
755 | udelay(140); | ||
756 | } | ||
757 | |||
758 | static int tmio_mmc_get_ro(struct mmc_host *mmc) | ||
759 | { | ||
760 | struct tmio_mmc_host *host = mmc_priv(mmc); | ||
761 | struct tmio_mmc_data *pdata = host->pdata; | ||
762 | |||
763 | return ((pdata->flags & TMIO_MMC_WRPROTECT_DISABLE) || | ||
764 | !(sd_ctrl_read32(host, CTL_STATUS) & TMIO_STAT_WRPROTECT)); | ||
765 | } | ||
766 | |||
767 | static int tmio_mmc_get_cd(struct mmc_host *mmc) | ||
768 | { | ||
769 | struct tmio_mmc_host *host = mmc_priv(mmc); | ||
770 | struct tmio_mmc_data *pdata = host->pdata; | ||
771 | |||
772 | if (!pdata->get_cd) | ||
773 | return -ENOSYS; | ||
774 | else | ||
775 | return pdata->get_cd(host->pdev); | ||
776 | } | ||
777 | |||
778 | static const struct mmc_host_ops tmio_mmc_ops = { | ||
779 | .request = tmio_mmc_request, | ||
780 | .set_ios = tmio_mmc_set_ios, | ||
781 | .get_ro = tmio_mmc_get_ro, | ||
782 | .get_cd = tmio_mmc_get_cd, | ||
783 | .enable_sdio_irq = tmio_mmc_enable_sdio_irq, | ||
784 | }; | ||
785 | |||
786 | int __devinit tmio_mmc_host_probe(struct tmio_mmc_host **host, | ||
787 | struct platform_device *pdev, | ||
788 | struct tmio_mmc_data *pdata) | ||
789 | { | ||
790 | struct tmio_mmc_host *_host; | ||
791 | struct mmc_host *mmc; | ||
792 | struct resource *res_ctl; | ||
793 | int ret; | ||
794 | u32 irq_mask = TMIO_MASK_CMD; | ||
795 | |||
796 | res_ctl = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
797 | if (!res_ctl) | ||
798 | return -EINVAL; | ||
799 | |||
800 | mmc = mmc_alloc_host(sizeof(struct tmio_mmc_host), &pdev->dev); | ||
801 | if (!mmc) | ||
802 | return -ENOMEM; | ||
803 | |||
804 | _host = mmc_priv(mmc); | ||
805 | _host->pdata = pdata; | ||
806 | _host->mmc = mmc; | ||
807 | _host->pdev = pdev; | ||
808 | platform_set_drvdata(pdev, mmc); | ||
809 | |||
810 | _host->set_pwr = pdata->set_pwr; | ||
811 | _host->set_clk_div = pdata->set_clk_div; | ||
812 | |||
813 | /* SD control register space size is 0x200, 0x400 for bus_shift=1 */ | ||
814 | _host->bus_shift = resource_size(res_ctl) >> 10; | ||
815 | |||
816 | _host->ctl = ioremap(res_ctl->start, resource_size(res_ctl)); | ||
817 | if (!_host->ctl) { | ||
818 | ret = -ENOMEM; | ||
819 | goto host_free; | ||
820 | } | ||
821 | |||
822 | mmc->ops = &tmio_mmc_ops; | ||
823 | mmc->caps = MMC_CAP_4_BIT_DATA | pdata->capabilities; | ||
824 | mmc->f_max = pdata->hclk; | ||
825 | mmc->f_min = mmc->f_max / 512; | ||
826 | mmc->max_segs = 32; | ||
827 | mmc->max_blk_size = 512; | ||
828 | mmc->max_blk_count = (PAGE_CACHE_SIZE / mmc->max_blk_size) * | ||
829 | mmc->max_segs; | ||
830 | mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count; | ||
831 | mmc->max_seg_size = mmc->max_req_size; | ||
832 | if (pdata->ocr_mask) | ||
833 | mmc->ocr_avail = pdata->ocr_mask; | ||
834 | else | ||
835 | mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34; | ||
836 | |||
837 | tmio_mmc_clk_stop(_host); | ||
838 | tmio_mmc_reset(_host); | ||
839 | |||
840 | ret = platform_get_irq(pdev, 0); | ||
841 | if (ret < 0) | ||
842 | goto unmap_ctl; | ||
843 | |||
844 | _host->irq = ret; | ||
845 | |||
846 | tmio_mmc_disable_mmc_irqs(_host, TMIO_MASK_ALL); | ||
847 | if (pdata->flags & TMIO_MMC_SDIO_IRQ) | ||
848 | tmio_mmc_enable_sdio_irq(mmc, 0); | ||
849 | |||
850 | ret = request_irq(_host->irq, tmio_mmc_irq, IRQF_DISABLED | | ||
851 | IRQF_TRIGGER_FALLING, dev_name(&pdev->dev), _host); | ||
852 | if (ret) | ||
853 | goto unmap_ctl; | ||
854 | |||
855 | spin_lock_init(&_host->lock); | ||
856 | |||
857 | /* Init delayed work for request timeouts */ | ||
858 | INIT_DELAYED_WORK(&_host->delayed_reset_work, tmio_mmc_reset_work); | ||
859 | |||
860 | /* See if we also get DMA */ | ||
861 | tmio_mmc_request_dma(_host, pdata); | ||
862 | |||
863 | mmc_add_host(mmc); | ||
864 | |||
865 | /* Unmask the IRQs we want to know about */ | ||
866 | if (!_host->chan_rx) | ||
867 | irq_mask |= TMIO_MASK_READOP; | ||
868 | if (!_host->chan_tx) | ||
869 | irq_mask |= TMIO_MASK_WRITEOP; | ||
870 | |||
871 | tmio_mmc_enable_mmc_irqs(_host, irq_mask); | ||
872 | |||
873 | *host = _host; | ||
874 | |||
875 | return 0; | ||
876 | |||
877 | unmap_ctl: | ||
878 | iounmap(_host->ctl); | ||
879 | host_free: | ||
880 | mmc_free_host(mmc); | ||
881 | |||
882 | return ret; | ||
883 | } | ||
884 | EXPORT_SYMBOL(tmio_mmc_host_probe); | ||
885 | |||
886 | void tmio_mmc_host_remove(struct tmio_mmc_host *host) | ||
887 | { | ||
888 | mmc_remove_host(host->mmc); | ||
889 | cancel_delayed_work_sync(&host->delayed_reset_work); | ||
890 | tmio_mmc_release_dma(host); | ||
891 | free_irq(host->irq, host); | ||
892 | iounmap(host->ctl); | ||
893 | mmc_free_host(host->mmc); | ||
894 | } | ||
895 | EXPORT_SYMBOL(tmio_mmc_host_remove); | ||
896 | |||
897 | MODULE_LICENSE("GPL v2"); | ||
diff --git a/drivers/mmc/host/via-sdmmc.c b/drivers/mmc/host/via-sdmmc.c index 8c5b4881ccd6..4dfe2c02ea91 100644 --- a/drivers/mmc/host/via-sdmmc.c +++ b/drivers/mmc/host/via-sdmmc.c | |||
@@ -1087,14 +1087,13 @@ static int __devinit via_sd_probe(struct pci_dev *pcidev, | |||
1087 | struct mmc_host *mmc; | 1087 | struct mmc_host *mmc; |
1088 | struct via_crdr_mmc_host *sdhost; | 1088 | struct via_crdr_mmc_host *sdhost; |
1089 | u32 base, len; | 1089 | u32 base, len; |
1090 | u8 rev, gatt; | 1090 | u8 gatt; |
1091 | int ret; | 1091 | int ret; |
1092 | 1092 | ||
1093 | pci_read_config_byte(pcidev, PCI_CLASS_REVISION, &rev); | ||
1094 | pr_info(DRV_NAME | 1093 | pr_info(DRV_NAME |
1095 | ": VIA SDMMC controller found at %s [%04x:%04x] (rev %x)\n", | 1094 | ": VIA SDMMC controller found at %s [%04x:%04x] (rev %x)\n", |
1096 | pci_name(pcidev), (int)pcidev->vendor, (int)pcidev->device, | 1095 | pci_name(pcidev), (int)pcidev->vendor, (int)pcidev->device, |
1097 | (int)rev); | 1096 | (int)pcidev->revision); |
1098 | 1097 | ||
1099 | ret = pci_enable_device(pcidev); | 1098 | ret = pci_enable_device(pcidev); |
1100 | if (ret) | 1099 | if (ret) |
diff --git a/drivers/net/bfin_mac.c b/drivers/net/bfin_mac.c index 22abfb39d813..68d45ba2d9b9 100644 --- a/drivers/net/bfin_mac.c +++ b/drivers/net/bfin_mac.c | |||
@@ -1237,8 +1237,17 @@ static int bfin_mac_enable(struct phy_device *phydev) | |||
1237 | 1237 | ||
1238 | if (phydev->interface == PHY_INTERFACE_MODE_RMII) { | 1238 | if (phydev->interface == PHY_INTERFACE_MODE_RMII) { |
1239 | opmode |= RMII; /* For Now only 100MBit are supported */ | 1239 | opmode |= RMII; /* For Now only 100MBit are supported */ |
1240 | #if (defined(CONFIG_BF537) || defined(CONFIG_BF536)) && CONFIG_BF_REV_0_2 | 1240 | #if defined(CONFIG_BF537) || defined(CONFIG_BF536) |
1241 | opmode |= TE; | 1241 | if (__SILICON_REVISION__ < 3) { |
1242 | /* | ||
1243 | * This isn't publicly documented (fun times!), but in | ||
1244 | * silicon <=0.2, the RX and TX pins are clocked together. | ||
1245 | * So in order to recv, we must enable the transmit side | ||
1246 | * as well. This will cause a spurious TX interrupt too, | ||
1247 | * but we can easily consume that. | ||
1248 | */ | ||
1249 | opmode |= TE; | ||
1250 | } | ||
1242 | #endif | 1251 | #endif |
1243 | } | 1252 | } |
1244 | 1253 | ||
diff --git a/drivers/net/bnx2.c b/drivers/net/bnx2.c index d1865cc97313..8e6d618b5305 100644 --- a/drivers/net/bnx2.c +++ b/drivers/net/bnx2.c | |||
@@ -8317,7 +8317,7 @@ static const struct net_device_ops bnx2_netdev_ops = { | |||
8317 | #endif | 8317 | #endif |
8318 | }; | 8318 | }; |
8319 | 8319 | ||
8320 | static void inline vlan_features_add(struct net_device *dev, u32 flags) | 8320 | static inline void vlan_features_add(struct net_device *dev, u32 flags) |
8321 | { | 8321 | { |
8322 | dev->vlan_features |= flags; | 8322 | dev->vlan_features |= flags; |
8323 | } | 8323 | } |
diff --git a/drivers/net/can/c_can/c_can.c b/drivers/net/can/c_can/c_can.c index 110eda01843c..31552959aed7 100644 --- a/drivers/net/can/c_can/c_can.c +++ b/drivers/net/can/c_can/c_can.c | |||
@@ -588,14 +588,9 @@ static void c_can_chip_config(struct net_device *dev) | |||
588 | { | 588 | { |
589 | struct c_can_priv *priv = netdev_priv(dev); | 589 | struct c_can_priv *priv = netdev_priv(dev); |
590 | 590 | ||
591 | if (priv->can.ctrlmode & CAN_CTRLMODE_ONE_SHOT) | 591 | /* enable automatic retransmission */ |
592 | /* disable automatic retransmission */ | 592 | priv->write_reg(priv, &priv->regs->control, |
593 | priv->write_reg(priv, &priv->regs->control, | 593 | CONTROL_ENABLE_AR); |
594 | CONTROL_DISABLE_AR); | ||
595 | else | ||
596 | /* enable automatic retransmission */ | ||
597 | priv->write_reg(priv, &priv->regs->control, | ||
598 | CONTROL_ENABLE_AR); | ||
599 | 594 | ||
600 | if (priv->can.ctrlmode & (CAN_CTRLMODE_LISTENONLY & | 595 | if (priv->can.ctrlmode & (CAN_CTRLMODE_LISTENONLY & |
601 | CAN_CTRLMODE_LOOPBACK)) { | 596 | CAN_CTRLMODE_LOOPBACK)) { |
@@ -704,7 +699,6 @@ static void c_can_do_tx(struct net_device *dev) | |||
704 | 699 | ||
705 | for (/* nix */; (priv->tx_next - priv->tx_echo) > 0; priv->tx_echo++) { | 700 | for (/* nix */; (priv->tx_next - priv->tx_echo) > 0; priv->tx_echo++) { |
706 | msg_obj_no = get_tx_echo_msg_obj(priv); | 701 | msg_obj_no = get_tx_echo_msg_obj(priv); |
707 | c_can_inval_msg_object(dev, 0, msg_obj_no); | ||
708 | val = c_can_read_reg32(priv, &priv->regs->txrqst1); | 702 | val = c_can_read_reg32(priv, &priv->regs->txrqst1); |
709 | if (!(val & (1 << msg_obj_no))) { | 703 | if (!(val & (1 << msg_obj_no))) { |
710 | can_get_echo_skb(dev, | 704 | can_get_echo_skb(dev, |
@@ -713,6 +707,7 @@ static void c_can_do_tx(struct net_device *dev) | |||
713 | &priv->regs->ifregs[0].msg_cntrl) | 707 | &priv->regs->ifregs[0].msg_cntrl) |
714 | & IF_MCONT_DLC_MASK; | 708 | & IF_MCONT_DLC_MASK; |
715 | stats->tx_packets++; | 709 | stats->tx_packets++; |
710 | c_can_inval_msg_object(dev, 0, msg_obj_no); | ||
716 | } | 711 | } |
717 | } | 712 | } |
718 | 713 | ||
@@ -1112,8 +1107,7 @@ struct net_device *alloc_c_can_dev(void) | |||
1112 | priv->can.bittiming_const = &c_can_bittiming_const; | 1107 | priv->can.bittiming_const = &c_can_bittiming_const; |
1113 | priv->can.do_set_mode = c_can_set_mode; | 1108 | priv->can.do_set_mode = c_can_set_mode; |
1114 | priv->can.do_get_berr_counter = c_can_get_berr_counter; | 1109 | priv->can.do_get_berr_counter = c_can_get_berr_counter; |
1115 | priv->can.ctrlmode_supported = CAN_CTRLMODE_ONE_SHOT | | 1110 | priv->can.ctrlmode_supported = CAN_CTRLMODE_LOOPBACK | |
1116 | CAN_CTRLMODE_LOOPBACK | | ||
1117 | CAN_CTRLMODE_LISTENONLY | | 1111 | CAN_CTRLMODE_LISTENONLY | |
1118 | CAN_CTRLMODE_BERR_REPORTING; | 1112 | CAN_CTRLMODE_BERR_REPORTING; |
1119 | 1113 | ||
diff --git a/drivers/net/can/c_can/c_can_platform.c b/drivers/net/can/c_can/c_can_platform.c index e629b961ae2d..cc90824f2c9c 100644 --- a/drivers/net/can/c_can/c_can_platform.c +++ b/drivers/net/can/c_can/c_can_platform.c | |||
@@ -73,7 +73,8 @@ static int __devinit c_can_plat_probe(struct platform_device *pdev) | |||
73 | void __iomem *addr; | 73 | void __iomem *addr; |
74 | struct net_device *dev; | 74 | struct net_device *dev; |
75 | struct c_can_priv *priv; | 75 | struct c_can_priv *priv; |
76 | struct resource *mem, *irq; | 76 | struct resource *mem; |
77 | int irq; | ||
77 | #ifdef CONFIG_HAVE_CLK | 78 | #ifdef CONFIG_HAVE_CLK |
78 | struct clk *clk; | 79 | struct clk *clk; |
79 | 80 | ||
@@ -88,8 +89,8 @@ static int __devinit c_can_plat_probe(struct platform_device *pdev) | |||
88 | 89 | ||
89 | /* get the platform data */ | 90 | /* get the platform data */ |
90 | mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 91 | mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
91 | irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0); | 92 | irq = platform_get_irq(pdev, 0); |
92 | if (!mem || (irq <= 0)) { | 93 | if (!mem || irq <= 0) { |
93 | ret = -ENODEV; | 94 | ret = -ENODEV; |
94 | goto exit_free_clk; | 95 | goto exit_free_clk; |
95 | } | 96 | } |
@@ -117,7 +118,7 @@ static int __devinit c_can_plat_probe(struct platform_device *pdev) | |||
117 | 118 | ||
118 | priv = netdev_priv(dev); | 119 | priv = netdev_priv(dev); |
119 | 120 | ||
120 | dev->irq = irq->start; | 121 | dev->irq = irq; |
121 | priv->regs = addr; | 122 | priv->regs = addr; |
122 | #ifdef CONFIG_HAVE_CLK | 123 | #ifdef CONFIG_HAVE_CLK |
123 | priv->can.clock.freq = clk_get_rate(clk); | 124 | priv->can.clock.freq = clk_get_rate(clk); |
diff --git a/drivers/net/cxgb3/cxgb3_main.c b/drivers/net/cxgb3/cxgb3_main.c index 4d538a4e9d55..910893143295 100644 --- a/drivers/net/cxgb3/cxgb3_main.c +++ b/drivers/net/cxgb3/cxgb3_main.c | |||
@@ -1983,14 +1983,20 @@ static int set_coalesce(struct net_device *dev, struct ethtool_coalesce *c) | |||
1983 | { | 1983 | { |
1984 | struct port_info *pi = netdev_priv(dev); | 1984 | struct port_info *pi = netdev_priv(dev); |
1985 | struct adapter *adapter = pi->adapter; | 1985 | struct adapter *adapter = pi->adapter; |
1986 | struct qset_params *qsp = &adapter->params.sge.qset[0]; | 1986 | struct qset_params *qsp; |
1987 | struct sge_qset *qs = &adapter->sge.qs[0]; | 1987 | struct sge_qset *qs; |
1988 | int i; | ||
1988 | 1989 | ||
1989 | if (c->rx_coalesce_usecs * 10 > M_NEWTIMER) | 1990 | if (c->rx_coalesce_usecs * 10 > M_NEWTIMER) |
1990 | return -EINVAL; | 1991 | return -EINVAL; |
1991 | 1992 | ||
1992 | qsp->coalesce_usecs = c->rx_coalesce_usecs; | 1993 | for (i = 0; i < pi->nqsets; i++) { |
1993 | t3_update_qset_coalesce(qs, qsp); | 1994 | qsp = &adapter->params.sge.qset[i]; |
1995 | qs = &adapter->sge.qs[i]; | ||
1996 | qsp->coalesce_usecs = c->rx_coalesce_usecs; | ||
1997 | t3_update_qset_coalesce(qs, qsp); | ||
1998 | } | ||
1999 | |||
1994 | return 0; | 2000 | return 0; |
1995 | } | 2001 | } |
1996 | 2002 | ||
diff --git a/drivers/net/dm9000.c b/drivers/net/dm9000.c index 317708113601..b7af5bab9937 100644 --- a/drivers/net/dm9000.c +++ b/drivers/net/dm9000.c | |||
@@ -621,9 +621,9 @@ static int dm9000_set_wol(struct net_device *dev, struct ethtool_wolinfo *w) | |||
621 | /* change in wol state, update IRQ state */ | 621 | /* change in wol state, update IRQ state */ |
622 | 622 | ||
623 | if (!dm->wake_state) | 623 | if (!dm->wake_state) |
624 | set_irq_wake(dm->irq_wake, 1); | 624 | irq_set_irq_wake(dm->irq_wake, 1); |
625 | else if (dm->wake_state & !opts) | 625 | else if (dm->wake_state & !opts) |
626 | set_irq_wake(dm->irq_wake, 0); | 626 | irq_set_irq_wake(dm->irq_wake, 0); |
627 | } | 627 | } |
628 | 628 | ||
629 | dm->wake_state = opts; | 629 | dm->wake_state = opts; |
@@ -1424,13 +1424,13 @@ dm9000_probe(struct platform_device *pdev) | |||
1424 | } else { | 1424 | } else { |
1425 | 1425 | ||
1426 | /* test to see if irq is really wakeup capable */ | 1426 | /* test to see if irq is really wakeup capable */ |
1427 | ret = set_irq_wake(db->irq_wake, 1); | 1427 | ret = irq_set_irq_wake(db->irq_wake, 1); |
1428 | if (ret) { | 1428 | if (ret) { |
1429 | dev_err(db->dev, "irq %d cannot set wakeup (%d)\n", | 1429 | dev_err(db->dev, "irq %d cannot set wakeup (%d)\n", |
1430 | db->irq_wake, ret); | 1430 | db->irq_wake, ret); |
1431 | ret = 0; | 1431 | ret = 0; |
1432 | } else { | 1432 | } else { |
1433 | set_irq_wake(db->irq_wake, 0); | 1433 | irq_set_irq_wake(db->irq_wake, 0); |
1434 | db->wake_supported = 1; | 1434 | db->wake_supported = 1; |
1435 | } | 1435 | } |
1436 | } | 1436 | } |
diff --git a/drivers/net/jme.c b/drivers/net/jme.c index f690474f4409..994c80939c7a 100644 --- a/drivers/net/jme.c +++ b/drivers/net/jme.c | |||
@@ -273,7 +273,7 @@ jme_clear_pm(struct jme_adapter *jme) | |||
273 | { | 273 | { |
274 | jwrite32(jme, JME_PMCS, 0xFFFF0000 | jme->reg_pmcs); | 274 | jwrite32(jme, JME_PMCS, 0xFFFF0000 | jme->reg_pmcs); |
275 | pci_set_power_state(jme->pdev, PCI_D0); | 275 | pci_set_power_state(jme->pdev, PCI_D0); |
276 | pci_enable_wake(jme->pdev, PCI_D0, false); | 276 | device_set_wakeup_enable(&jme->pdev->dev, false); |
277 | } | 277 | } |
278 | 278 | ||
279 | static int | 279 | static int |
@@ -2538,6 +2538,8 @@ jme_set_wol(struct net_device *netdev, | |||
2538 | 2538 | ||
2539 | jwrite32(jme, JME_PMCS, jme->reg_pmcs); | 2539 | jwrite32(jme, JME_PMCS, jme->reg_pmcs); |
2540 | 2540 | ||
2541 | device_set_wakeup_enable(&jme->pdev->dev, jme->reg_pmcs); | ||
2542 | |||
2541 | return 0; | 2543 | return 0; |
2542 | } | 2544 | } |
2543 | 2545 | ||
@@ -3172,9 +3174,9 @@ jme_shutdown(struct pci_dev *pdev) | |||
3172 | } | 3174 | } |
3173 | 3175 | ||
3174 | #ifdef CONFIG_PM | 3176 | #ifdef CONFIG_PM |
3175 | static int | 3177 | static int jme_suspend(struct device *dev) |
3176 | jme_suspend(struct pci_dev *pdev, pm_message_t state) | ||
3177 | { | 3178 | { |
3179 | struct pci_dev *pdev = to_pci_dev(dev); | ||
3178 | struct net_device *netdev = pci_get_drvdata(pdev); | 3180 | struct net_device *netdev = pci_get_drvdata(pdev); |
3179 | struct jme_adapter *jme = netdev_priv(netdev); | 3181 | struct jme_adapter *jme = netdev_priv(netdev); |
3180 | 3182 | ||
@@ -3206,22 +3208,18 @@ jme_suspend(struct pci_dev *pdev, pm_message_t state) | |||
3206 | tasklet_hi_enable(&jme->rxclean_task); | 3208 | tasklet_hi_enable(&jme->rxclean_task); |
3207 | tasklet_hi_enable(&jme->rxempty_task); | 3209 | tasklet_hi_enable(&jme->rxempty_task); |
3208 | 3210 | ||
3209 | pci_save_state(pdev); | ||
3210 | jme_powersave_phy(jme); | 3211 | jme_powersave_phy(jme); |
3211 | pci_enable_wake(jme->pdev, PCI_D3hot, true); | ||
3212 | pci_set_power_state(pdev, PCI_D3hot); | ||
3213 | 3212 | ||
3214 | return 0; | 3213 | return 0; |
3215 | } | 3214 | } |
3216 | 3215 | ||
3217 | static int | 3216 | static int jme_resume(struct device *dev) |
3218 | jme_resume(struct pci_dev *pdev) | ||
3219 | { | 3217 | { |
3218 | struct pci_dev *pdev = to_pci_dev(dev); | ||
3220 | struct net_device *netdev = pci_get_drvdata(pdev); | 3219 | struct net_device *netdev = pci_get_drvdata(pdev); |
3221 | struct jme_adapter *jme = netdev_priv(netdev); | 3220 | struct jme_adapter *jme = netdev_priv(netdev); |
3222 | 3221 | ||
3223 | jme_clear_pm(jme); | 3222 | jwrite32(jme, JME_PMCS, 0xFFFF0000 | jme->reg_pmcs); |
3224 | pci_restore_state(pdev); | ||
3225 | 3223 | ||
3226 | jme_phy_on(jme); | 3224 | jme_phy_on(jme); |
3227 | if (test_bit(JME_FLAG_SSET, &jme->flags)) | 3225 | if (test_bit(JME_FLAG_SSET, &jme->flags)) |
@@ -3238,6 +3236,13 @@ jme_resume(struct pci_dev *pdev) | |||
3238 | 3236 | ||
3239 | return 0; | 3237 | return 0; |
3240 | } | 3238 | } |
3239 | |||
3240 | static SIMPLE_DEV_PM_OPS(jme_pm_ops, jme_suspend, jme_resume); | ||
3241 | #define JME_PM_OPS (&jme_pm_ops) | ||
3242 | |||
3243 | #else | ||
3244 | |||
3245 | #define JME_PM_OPS NULL | ||
3241 | #endif | 3246 | #endif |
3242 | 3247 | ||
3243 | static DEFINE_PCI_DEVICE_TABLE(jme_pci_tbl) = { | 3248 | static DEFINE_PCI_DEVICE_TABLE(jme_pci_tbl) = { |
@@ -3251,11 +3256,8 @@ static struct pci_driver jme_driver = { | |||
3251 | .id_table = jme_pci_tbl, | 3256 | .id_table = jme_pci_tbl, |
3252 | .probe = jme_init_one, | 3257 | .probe = jme_init_one, |
3253 | .remove = __devexit_p(jme_remove_one), | 3258 | .remove = __devexit_p(jme_remove_one), |
3254 | #ifdef CONFIG_PM | ||
3255 | .suspend = jme_suspend, | ||
3256 | .resume = jme_resume, | ||
3257 | #endif /* CONFIG_PM */ | ||
3258 | .shutdown = jme_shutdown, | 3259 | .shutdown = jme_shutdown, |
3260 | .driver.pm = JME_PM_OPS, | ||
3259 | }; | 3261 | }; |
3260 | 3262 | ||
3261 | static int __init | 3263 | static int __init |
diff --git a/drivers/net/ksz884x.c b/drivers/net/ksz884x.c index 540a8dcbcc46..7f7d5708a658 100644 --- a/drivers/net/ksz884x.c +++ b/drivers/net/ksz884x.c | |||
@@ -4898,7 +4898,7 @@ static netdev_tx_t netdev_tx(struct sk_buff *skb, struct net_device *dev) | |||
4898 | goto unlock; | 4898 | goto unlock; |
4899 | } | 4899 | } |
4900 | skb_copy_and_csum_dev(org_skb, skb->data); | 4900 | skb_copy_and_csum_dev(org_skb, skb->data); |
4901 | org_skb->ip_summed = 0; | 4901 | org_skb->ip_summed = CHECKSUM_NONE; |
4902 | skb->len = org_skb->len; | 4902 | skb->len = org_skb->len; |
4903 | copy_old_skb(org_skb, skb); | 4903 | copy_old_skb(org_skb, skb); |
4904 | } | 4904 | } |
diff --git a/drivers/net/mlx4/en_netdev.c b/drivers/net/mlx4/en_netdev.c index 5762ebde4455..4f158baa0246 100644 --- a/drivers/net/mlx4/en_netdev.c +++ b/drivers/net/mlx4/en_netdev.c | |||
@@ -742,6 +742,9 @@ int mlx4_en_start_port(struct net_device *dev) | |||
742 | 0, MLX4_PROT_ETH)) | 742 | 0, MLX4_PROT_ETH)) |
743 | mlx4_warn(mdev, "Failed Attaching Broadcast\n"); | 743 | mlx4_warn(mdev, "Failed Attaching Broadcast\n"); |
744 | 744 | ||
745 | /* Must redo promiscuous mode setup. */ | ||
746 | priv->flags &= ~(MLX4_EN_FLAG_PROMISC | MLX4_EN_FLAG_MC_PROMISC); | ||
747 | |||
745 | /* Schedule multicast task to populate multicast list */ | 748 | /* Schedule multicast task to populate multicast list */ |
746 | queue_work(mdev->workqueue, &priv->mcast_task); | 749 | queue_work(mdev->workqueue, &priv->mcast_task); |
747 | 750 | ||
diff --git a/drivers/net/myri10ge/myri10ge.c b/drivers/net/myri10ge/myri10ge.c index 1f4e8680a96a..673dc600c891 100644 --- a/drivers/net/myri10ge/myri10ge.c +++ b/drivers/net/myri10ge/myri10ge.c | |||
@@ -1312,17 +1312,26 @@ myri10ge_unmap_rx_page(struct pci_dev *pdev, | |||
1312 | * page into an skb */ | 1312 | * page into an skb */ |
1313 | 1313 | ||
1314 | static inline int | 1314 | static inline int |
1315 | myri10ge_rx_done(struct myri10ge_slice_state *ss, struct myri10ge_rx_buf *rx, | 1315 | myri10ge_rx_done(struct myri10ge_slice_state *ss, int len, __wsum csum, |
1316 | int bytes, int len, __wsum csum) | 1316 | int lro_enabled) |
1317 | { | 1317 | { |
1318 | struct myri10ge_priv *mgp = ss->mgp; | 1318 | struct myri10ge_priv *mgp = ss->mgp; |
1319 | struct sk_buff *skb; | 1319 | struct sk_buff *skb; |
1320 | struct skb_frag_struct rx_frags[MYRI10GE_MAX_FRAGS_PER_FRAME]; | 1320 | struct skb_frag_struct rx_frags[MYRI10GE_MAX_FRAGS_PER_FRAME]; |
1321 | int i, idx, hlen, remainder; | 1321 | struct myri10ge_rx_buf *rx; |
1322 | int i, idx, hlen, remainder, bytes; | ||
1322 | struct pci_dev *pdev = mgp->pdev; | 1323 | struct pci_dev *pdev = mgp->pdev; |
1323 | struct net_device *dev = mgp->dev; | 1324 | struct net_device *dev = mgp->dev; |
1324 | u8 *va; | 1325 | u8 *va; |
1325 | 1326 | ||
1327 | if (len <= mgp->small_bytes) { | ||
1328 | rx = &ss->rx_small; | ||
1329 | bytes = mgp->small_bytes; | ||
1330 | } else { | ||
1331 | rx = &ss->rx_big; | ||
1332 | bytes = mgp->big_bytes; | ||
1333 | } | ||
1334 | |||
1326 | len += MXGEFW_PAD; | 1335 | len += MXGEFW_PAD; |
1327 | idx = rx->cnt & rx->mask; | 1336 | idx = rx->cnt & rx->mask; |
1328 | va = page_address(rx->info[idx].page) + rx->info[idx].page_offset; | 1337 | va = page_address(rx->info[idx].page) + rx->info[idx].page_offset; |
@@ -1341,7 +1350,7 @@ myri10ge_rx_done(struct myri10ge_slice_state *ss, struct myri10ge_rx_buf *rx, | |||
1341 | remainder -= MYRI10GE_ALLOC_SIZE; | 1350 | remainder -= MYRI10GE_ALLOC_SIZE; |
1342 | } | 1351 | } |
1343 | 1352 | ||
1344 | if (dev->features & NETIF_F_LRO) { | 1353 | if (lro_enabled) { |
1345 | rx_frags[0].page_offset += MXGEFW_PAD; | 1354 | rx_frags[0].page_offset += MXGEFW_PAD; |
1346 | rx_frags[0].size -= MXGEFW_PAD; | 1355 | rx_frags[0].size -= MXGEFW_PAD; |
1347 | len -= MXGEFW_PAD; | 1356 | len -= MXGEFW_PAD; |
@@ -1463,7 +1472,7 @@ myri10ge_clean_rx_done(struct myri10ge_slice_state *ss, int budget) | |||
1463 | { | 1472 | { |
1464 | struct myri10ge_rx_done *rx_done = &ss->rx_done; | 1473 | struct myri10ge_rx_done *rx_done = &ss->rx_done; |
1465 | struct myri10ge_priv *mgp = ss->mgp; | 1474 | struct myri10ge_priv *mgp = ss->mgp; |
1466 | struct net_device *netdev = mgp->dev; | 1475 | |
1467 | unsigned long rx_bytes = 0; | 1476 | unsigned long rx_bytes = 0; |
1468 | unsigned long rx_packets = 0; | 1477 | unsigned long rx_packets = 0; |
1469 | unsigned long rx_ok; | 1478 | unsigned long rx_ok; |
@@ -1474,18 +1483,18 @@ myri10ge_clean_rx_done(struct myri10ge_slice_state *ss, int budget) | |||
1474 | u16 length; | 1483 | u16 length; |
1475 | __wsum checksum; | 1484 | __wsum checksum; |
1476 | 1485 | ||
1486 | /* | ||
1487 | * Prevent compiler from generating more than one ->features memory | ||
1488 | * access to avoid theoretical race condition with functions that | ||
1489 | * change NETIF_F_LRO flag at runtime. | ||
1490 | */ | ||
1491 | bool lro_enabled = ACCESS_ONCE(mgp->dev->features) & NETIF_F_LRO; | ||
1492 | |||
1477 | while (rx_done->entry[idx].length != 0 && work_done < budget) { | 1493 | while (rx_done->entry[idx].length != 0 && work_done < budget) { |
1478 | length = ntohs(rx_done->entry[idx].length); | 1494 | length = ntohs(rx_done->entry[idx].length); |
1479 | rx_done->entry[idx].length = 0; | 1495 | rx_done->entry[idx].length = 0; |
1480 | checksum = csum_unfold(rx_done->entry[idx].checksum); | 1496 | checksum = csum_unfold(rx_done->entry[idx].checksum); |
1481 | if (length <= mgp->small_bytes) | 1497 | rx_ok = myri10ge_rx_done(ss, length, checksum, lro_enabled); |
1482 | rx_ok = myri10ge_rx_done(ss, &ss->rx_small, | ||
1483 | mgp->small_bytes, | ||
1484 | length, checksum); | ||
1485 | else | ||
1486 | rx_ok = myri10ge_rx_done(ss, &ss->rx_big, | ||
1487 | mgp->big_bytes, | ||
1488 | length, checksum); | ||
1489 | rx_packets += rx_ok; | 1498 | rx_packets += rx_ok; |
1490 | rx_bytes += rx_ok * (unsigned long)length; | 1499 | rx_bytes += rx_ok * (unsigned long)length; |
1491 | cnt++; | 1500 | cnt++; |
@@ -1497,7 +1506,7 @@ myri10ge_clean_rx_done(struct myri10ge_slice_state *ss, int budget) | |||
1497 | ss->stats.rx_packets += rx_packets; | 1506 | ss->stats.rx_packets += rx_packets; |
1498 | ss->stats.rx_bytes += rx_bytes; | 1507 | ss->stats.rx_bytes += rx_bytes; |
1499 | 1508 | ||
1500 | if (netdev->features & NETIF_F_LRO) | 1509 | if (lro_enabled) |
1501 | lro_flush_all(&rx_done->lro_mgr); | 1510 | lro_flush_all(&rx_done->lro_mgr); |
1502 | 1511 | ||
1503 | /* restock receive rings if needed */ | 1512 | /* restock receive rings if needed */ |
diff --git a/drivers/net/netxen/netxen_nic_ethtool.c b/drivers/net/netxen/netxen_nic_ethtool.c index 653d308e0f5d..3bdcc803ec68 100644 --- a/drivers/net/netxen/netxen_nic_ethtool.c +++ b/drivers/net/netxen/netxen_nic_ethtool.c | |||
@@ -871,7 +871,7 @@ static int netxen_nic_set_flags(struct net_device *netdev, u32 data) | |||
871 | struct netxen_adapter *adapter = netdev_priv(netdev); | 871 | struct netxen_adapter *adapter = netdev_priv(netdev); |
872 | int hw_lro; | 872 | int hw_lro; |
873 | 873 | ||
874 | if (data & ~ETH_FLAG_LRO) | 874 | if (ethtool_invalid_flags(netdev, data, ETH_FLAG_LRO)) |
875 | return -EINVAL; | 875 | return -EINVAL; |
876 | 876 | ||
877 | if (!(adapter->capabilities & NX_FW_CAPABILITY_HW_LRO)) | 877 | if (!(adapter->capabilities & NX_FW_CAPABILITY_HW_LRO)) |
diff --git a/drivers/net/qlcnic/qlcnic_ethtool.c b/drivers/net/qlcnic/qlcnic_ethtool.c index 4c14510e2a87..45b2755d6cba 100644 --- a/drivers/net/qlcnic/qlcnic_ethtool.c +++ b/drivers/net/qlcnic/qlcnic_ethtool.c | |||
@@ -1003,7 +1003,7 @@ static int qlcnic_set_flags(struct net_device *netdev, u32 data) | |||
1003 | struct qlcnic_adapter *adapter = netdev_priv(netdev); | 1003 | struct qlcnic_adapter *adapter = netdev_priv(netdev); |
1004 | int hw_lro; | 1004 | int hw_lro; |
1005 | 1005 | ||
1006 | if (data & ~ETH_FLAG_LRO) | 1006 | if (ethtool_invalid_flags(netdev, data, ETH_FLAG_LRO)) |
1007 | return -EINVAL; | 1007 | return -EINVAL; |
1008 | 1008 | ||
1009 | if (!(adapter->capabilities & QLCNIC_FW_CAPABILITY_HW_LRO)) | 1009 | if (!(adapter->capabilities & QLCNIC_FW_CAPABILITY_HW_LRO)) |
diff --git a/drivers/net/s2io.c b/drivers/net/s2io.c index 2ad6364103ea..356e74d20b80 100644 --- a/drivers/net/s2io.c +++ b/drivers/net/s2io.c | |||
@@ -6726,7 +6726,7 @@ static int s2io_ethtool_set_flags(struct net_device *dev, u32 data) | |||
6726 | int rc = 0; | 6726 | int rc = 0; |
6727 | int changed = 0; | 6727 | int changed = 0; |
6728 | 6728 | ||
6729 | if (data & ~ETH_FLAG_LRO) | 6729 | if (ethtool_invalid_flags(dev, data, ETH_FLAG_LRO)) |
6730 | return -EINVAL; | 6730 | return -EINVAL; |
6731 | 6731 | ||
6732 | if (data & ETH_FLAG_LRO) { | 6732 | if (data & ETH_FLAG_LRO) { |
diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c index ebec88882c3b..73c942d85f07 100644 --- a/drivers/net/tg3.c +++ b/drivers/net/tg3.c | |||
@@ -48,9 +48,9 @@ | |||
48 | #include <net/ip.h> | 48 | #include <net/ip.h> |
49 | 49 | ||
50 | #include <asm/system.h> | 50 | #include <asm/system.h> |
51 | #include <asm/io.h> | 51 | #include <linux/io.h> |
52 | #include <asm/byteorder.h> | 52 | #include <asm/byteorder.h> |
53 | #include <asm/uaccess.h> | 53 | #include <linux/uaccess.h> |
54 | 54 | ||
55 | #ifdef CONFIG_SPARC | 55 | #ifdef CONFIG_SPARC |
56 | #include <asm/idprom.h> | 56 | #include <asm/idprom.h> |
@@ -13118,7 +13118,7 @@ done: | |||
13118 | 13118 | ||
13119 | static struct pci_dev * __devinit tg3_find_peer(struct tg3 *); | 13119 | static struct pci_dev * __devinit tg3_find_peer(struct tg3 *); |
13120 | 13120 | ||
13121 | static void inline vlan_features_add(struct net_device *dev, unsigned long flags) | 13121 | static inline void vlan_features_add(struct net_device *dev, unsigned long flags) |
13122 | { | 13122 | { |
13123 | dev->vlan_features |= flags; | 13123 | dev->vlan_features |= flags; |
13124 | } | 13124 | } |
diff --git a/drivers/net/vmxnet3/vmxnet3_ethtool.c b/drivers/net/vmxnet3/vmxnet3_ethtool.c index 81254be85b92..51f2ef142a5b 100644 --- a/drivers/net/vmxnet3/vmxnet3_ethtool.c +++ b/drivers/net/vmxnet3/vmxnet3_ethtool.c | |||
@@ -304,8 +304,8 @@ vmxnet3_set_flags(struct net_device *netdev, u32 data) | |||
304 | u8 lro_present = (netdev->features & NETIF_F_LRO) == 0 ? 0 : 1; | 304 | u8 lro_present = (netdev->features & NETIF_F_LRO) == 0 ? 0 : 1; |
305 | unsigned long flags; | 305 | unsigned long flags; |
306 | 306 | ||
307 | if (data & ~ETH_FLAG_LRO) | 307 | if (ethtool_invalid_flags(netdev, data, ETH_FLAG_LRO)) |
308 | return -EOPNOTSUPP; | 308 | return -EINVAL; |
309 | 309 | ||
310 | if (lro_requested ^ lro_present) { | 310 | if (lro_requested ^ lro_present) { |
311 | /* toggle the LRO feature*/ | 311 | /* toggle the LRO feature*/ |
diff --git a/drivers/net/vxge/vxge-ethtool.c b/drivers/net/vxge/vxge-ethtool.c index 1dd3a21b3a43..c5eb034107fd 100644 --- a/drivers/net/vxge/vxge-ethtool.c +++ b/drivers/net/vxge/vxge-ethtool.c | |||
@@ -1117,8 +1117,8 @@ static int vxge_set_flags(struct net_device *dev, u32 data) | |||
1117 | struct vxgedev *vdev = netdev_priv(dev); | 1117 | struct vxgedev *vdev = netdev_priv(dev); |
1118 | enum vxge_hw_status status; | 1118 | enum vxge_hw_status status; |
1119 | 1119 | ||
1120 | if (data & ~ETH_FLAG_RXHASH) | 1120 | if (ethtool_invalid_flags(dev, data, ETH_FLAG_RXHASH)) |
1121 | return -EOPNOTSUPP; | 1121 | return -EINVAL; |
1122 | 1122 | ||
1123 | if (!!(data & ETH_FLAG_RXHASH) == vdev->devh->config.rth_en) | 1123 | if (!!(data & ETH_FLAG_RXHASH) == vdev->devh->config.rth_en) |
1124 | return 0; | 1124 | return 0; |
diff --git a/drivers/net/wireless/p54/p54spi.c b/drivers/net/wireless/p54/p54spi.c index 18d24b7b1e34..7ecc0bda57b3 100644 --- a/drivers/net/wireless/p54/p54spi.c +++ b/drivers/net/wireless/p54/p54spi.c | |||
@@ -649,8 +649,7 @@ static int __devinit p54spi_probe(struct spi_device *spi) | |||
649 | goto err_free_common; | 649 | goto err_free_common; |
650 | } | 650 | } |
651 | 651 | ||
652 | set_irq_type(gpio_to_irq(p54spi_gpio_irq), | 652 | irq_set_irq_type(gpio_to_irq(p54spi_gpio_irq), IRQ_TYPE_EDGE_RISING); |
653 | IRQ_TYPE_EDGE_RISING); | ||
654 | 653 | ||
655 | disable_irq(gpio_to_irq(p54spi_gpio_irq)); | 654 | disable_irq(gpio_to_irq(p54spi_gpio_irq)); |
656 | 655 | ||
diff --git a/drivers/net/wireless/wl1251/sdio.c b/drivers/net/wireless/wl1251/sdio.c index d550b5e68d3c..f51a0241a440 100644 --- a/drivers/net/wireless/wl1251/sdio.c +++ b/drivers/net/wireless/wl1251/sdio.c | |||
@@ -265,7 +265,7 @@ static int wl1251_sdio_probe(struct sdio_func *func, | |||
265 | goto disable; | 265 | goto disable; |
266 | } | 266 | } |
267 | 267 | ||
268 | set_irq_type(wl->irq, IRQ_TYPE_EDGE_RISING); | 268 | irq_set_irq_type(wl->irq, IRQ_TYPE_EDGE_RISING); |
269 | disable_irq(wl->irq); | 269 | disable_irq(wl->irq); |
270 | 270 | ||
271 | wl1251_sdio_ops.enable_irq = wl1251_enable_line_irq; | 271 | wl1251_sdio_ops.enable_irq = wl1251_enable_line_irq; |
diff --git a/drivers/net/wireless/wl1251/spi.c b/drivers/net/wireless/wl1251/spi.c index ac872b38960f..af6448c4d3e2 100644 --- a/drivers/net/wireless/wl1251/spi.c +++ b/drivers/net/wireless/wl1251/spi.c | |||
@@ -286,7 +286,7 @@ static int __devinit wl1251_spi_probe(struct spi_device *spi) | |||
286 | goto out_free; | 286 | goto out_free; |
287 | } | 287 | } |
288 | 288 | ||
289 | set_irq_type(wl->irq, IRQ_TYPE_EDGE_RISING); | 289 | irq_set_irq_type(wl->irq, IRQ_TYPE_EDGE_RISING); |
290 | 290 | ||
291 | disable_irq(wl->irq); | 291 | disable_irq(wl->irq); |
292 | 292 | ||
diff --git a/drivers/parisc/eisa.c b/drivers/parisc/eisa.c index deeec32a5803..103095bbe8c0 100644 --- a/drivers/parisc/eisa.c +++ b/drivers/parisc/eisa.c | |||
@@ -340,7 +340,7 @@ static int __init eisa_probe(struct parisc_device *dev) | |||
340 | /* Reserve IRQ2 */ | 340 | /* Reserve IRQ2 */ |
341 | setup_irq(2, &irq2_action); | 341 | setup_irq(2, &irq2_action); |
342 | for (i = 0; i < 16; i++) { | 342 | for (i = 0; i < 16; i++) { |
343 | set_irq_chip_and_handler(i, &eisa_interrupt_type, | 343 | irq_set_chip_and_handler(i, &eisa_interrupt_type, |
344 | handle_simple_irq); | 344 | handle_simple_irq); |
345 | } | 345 | } |
346 | 346 | ||
diff --git a/drivers/parisc/gsc.c b/drivers/parisc/gsc.c index ef31080cf591..1bab5a2cd359 100644 --- a/drivers/parisc/gsc.c +++ b/drivers/parisc/gsc.c | |||
@@ -152,8 +152,8 @@ int gsc_assign_irq(struct irq_chip *type, void *data) | |||
152 | if (irq > GSC_IRQ_MAX) | 152 | if (irq > GSC_IRQ_MAX) |
153 | return NO_IRQ; | 153 | return NO_IRQ; |
154 | 154 | ||
155 | set_irq_chip_and_handler(irq, type, handle_simple_irq); | 155 | irq_set_chip_and_handler(irq, type, handle_simple_irq); |
156 | set_irq_chip_data(irq, data); | 156 | irq_set_chip_data(irq, data); |
157 | 157 | ||
158 | return irq++; | 158 | return irq++; |
159 | } | 159 | } |
diff --git a/drivers/parisc/superio.c b/drivers/parisc/superio.c index a4d8ff66a639..e3b76d409dee 100644 --- a/drivers/parisc/superio.c +++ b/drivers/parisc/superio.c | |||
@@ -355,7 +355,8 @@ int superio_fixup_irq(struct pci_dev *pcidev) | |||
355 | #endif | 355 | #endif |
356 | 356 | ||
357 | for (i = 0; i < 16; i++) { | 357 | for (i = 0; i < 16; i++) { |
358 | set_irq_chip_and_handler(i, &superio_interrupt_type, handle_simple_irq); | 358 | irq_set_chip_and_handler(i, &superio_interrupt_type, |
359 | handle_simple_irq); | ||
359 | } | 360 | } |
360 | 361 | ||
361 | /* | 362 | /* |
diff --git a/drivers/pci/dmar.c b/drivers/pci/dmar.c index 09933eb9126b..12e02bf92c4a 100644 --- a/drivers/pci/dmar.c +++ b/drivers/pci/dmar.c | |||
@@ -1226,7 +1226,7 @@ const char *dmar_get_fault_reason(u8 fault_reason, int *fault_type) | |||
1226 | 1226 | ||
1227 | void dmar_msi_unmask(struct irq_data *data) | 1227 | void dmar_msi_unmask(struct irq_data *data) |
1228 | { | 1228 | { |
1229 | struct intel_iommu *iommu = irq_data_get_irq_data(data); | 1229 | struct intel_iommu *iommu = irq_data_get_irq_handler_data(data); |
1230 | unsigned long flag; | 1230 | unsigned long flag; |
1231 | 1231 | ||
1232 | /* unmask it */ | 1232 | /* unmask it */ |
@@ -1240,7 +1240,7 @@ void dmar_msi_unmask(struct irq_data *data) | |||
1240 | void dmar_msi_mask(struct irq_data *data) | 1240 | void dmar_msi_mask(struct irq_data *data) |
1241 | { | 1241 | { |
1242 | unsigned long flag; | 1242 | unsigned long flag; |
1243 | struct intel_iommu *iommu = irq_data_get_irq_data(data); | 1243 | struct intel_iommu *iommu = irq_data_get_irq_handler_data(data); |
1244 | 1244 | ||
1245 | /* mask it */ | 1245 | /* mask it */ |
1246 | spin_lock_irqsave(&iommu->register_lock, flag); | 1246 | spin_lock_irqsave(&iommu->register_lock, flag); |
@@ -1252,7 +1252,7 @@ void dmar_msi_mask(struct irq_data *data) | |||
1252 | 1252 | ||
1253 | void dmar_msi_write(int irq, struct msi_msg *msg) | 1253 | void dmar_msi_write(int irq, struct msi_msg *msg) |
1254 | { | 1254 | { |
1255 | struct intel_iommu *iommu = get_irq_data(irq); | 1255 | struct intel_iommu *iommu = irq_get_handler_data(irq); |
1256 | unsigned long flag; | 1256 | unsigned long flag; |
1257 | 1257 | ||
1258 | spin_lock_irqsave(&iommu->register_lock, flag); | 1258 | spin_lock_irqsave(&iommu->register_lock, flag); |
@@ -1264,7 +1264,7 @@ void dmar_msi_write(int irq, struct msi_msg *msg) | |||
1264 | 1264 | ||
1265 | void dmar_msi_read(int irq, struct msi_msg *msg) | 1265 | void dmar_msi_read(int irq, struct msi_msg *msg) |
1266 | { | 1266 | { |
1267 | struct intel_iommu *iommu = get_irq_data(irq); | 1267 | struct intel_iommu *iommu = irq_get_handler_data(irq); |
1268 | unsigned long flag; | 1268 | unsigned long flag; |
1269 | 1269 | ||
1270 | spin_lock_irqsave(&iommu->register_lock, flag); | 1270 | spin_lock_irqsave(&iommu->register_lock, flag); |
@@ -1382,12 +1382,12 @@ int dmar_set_interrupt(struct intel_iommu *iommu) | |||
1382 | return -EINVAL; | 1382 | return -EINVAL; |
1383 | } | 1383 | } |
1384 | 1384 | ||
1385 | set_irq_data(irq, iommu); | 1385 | irq_set_handler_data(irq, iommu); |
1386 | iommu->irq = irq; | 1386 | iommu->irq = irq; |
1387 | 1387 | ||
1388 | ret = arch_setup_dmar_msi(irq); | 1388 | ret = arch_setup_dmar_msi(irq); |
1389 | if (ret) { | 1389 | if (ret) { |
1390 | set_irq_data(irq, NULL); | 1390 | irq_set_handler_data(irq, NULL); |
1391 | iommu->irq = 0; | 1391 | iommu->irq = 0; |
1392 | destroy_irq(irq); | 1392 | destroy_irq(irq); |
1393 | return ret; | 1393 | return ret; |
diff --git a/drivers/pci/htirq.c b/drivers/pci/htirq.c index 834842aa5bbf..db057b6fe0c8 100644 --- a/drivers/pci/htirq.c +++ b/drivers/pci/htirq.c | |||
@@ -34,7 +34,7 @@ struct ht_irq_cfg { | |||
34 | 34 | ||
35 | void write_ht_irq_msg(unsigned int irq, struct ht_irq_msg *msg) | 35 | void write_ht_irq_msg(unsigned int irq, struct ht_irq_msg *msg) |
36 | { | 36 | { |
37 | struct ht_irq_cfg *cfg = get_irq_data(irq); | 37 | struct ht_irq_cfg *cfg = irq_get_handler_data(irq); |
38 | unsigned long flags; | 38 | unsigned long flags; |
39 | spin_lock_irqsave(&ht_irq_lock, flags); | 39 | spin_lock_irqsave(&ht_irq_lock, flags); |
40 | if (cfg->msg.address_lo != msg->address_lo) { | 40 | if (cfg->msg.address_lo != msg->address_lo) { |
@@ -53,13 +53,13 @@ void write_ht_irq_msg(unsigned int irq, struct ht_irq_msg *msg) | |||
53 | 53 | ||
54 | void fetch_ht_irq_msg(unsigned int irq, struct ht_irq_msg *msg) | 54 | void fetch_ht_irq_msg(unsigned int irq, struct ht_irq_msg *msg) |
55 | { | 55 | { |
56 | struct ht_irq_cfg *cfg = get_irq_data(irq); | 56 | struct ht_irq_cfg *cfg = irq_get_handler_data(irq); |
57 | *msg = cfg->msg; | 57 | *msg = cfg->msg; |
58 | } | 58 | } |
59 | 59 | ||
60 | void mask_ht_irq(struct irq_data *data) | 60 | void mask_ht_irq(struct irq_data *data) |
61 | { | 61 | { |
62 | struct ht_irq_cfg *cfg = irq_data_get_irq_data(data); | 62 | struct ht_irq_cfg *cfg = irq_data_get_irq_handler_data(data); |
63 | struct ht_irq_msg msg = cfg->msg; | 63 | struct ht_irq_msg msg = cfg->msg; |
64 | 64 | ||
65 | msg.address_lo |= 1; | 65 | msg.address_lo |= 1; |
@@ -68,7 +68,7 @@ void mask_ht_irq(struct irq_data *data) | |||
68 | 68 | ||
69 | void unmask_ht_irq(struct irq_data *data) | 69 | void unmask_ht_irq(struct irq_data *data) |
70 | { | 70 | { |
71 | struct ht_irq_cfg *cfg = irq_data_get_irq_data(data); | 71 | struct ht_irq_cfg *cfg = irq_data_get_irq_handler_data(data); |
72 | struct ht_irq_msg msg = cfg->msg; | 72 | struct ht_irq_msg msg = cfg->msg; |
73 | 73 | ||
74 | msg.address_lo &= ~1; | 74 | msg.address_lo &= ~1; |
@@ -126,7 +126,7 @@ int __ht_create_irq(struct pci_dev *dev, int idx, ht_irq_update_t *update) | |||
126 | kfree(cfg); | 126 | kfree(cfg); |
127 | return -EBUSY; | 127 | return -EBUSY; |
128 | } | 128 | } |
129 | set_irq_data(irq, cfg); | 129 | irq_set_handler_data(irq, cfg); |
130 | 130 | ||
131 | if (arch_setup_ht_irq(irq, dev) < 0) { | 131 | if (arch_setup_ht_irq(irq, dev) < 0) { |
132 | ht_destroy_irq(irq); | 132 | ht_destroy_irq(irq); |
@@ -162,9 +162,9 @@ void ht_destroy_irq(unsigned int irq) | |||
162 | { | 162 | { |
163 | struct ht_irq_cfg *cfg; | 163 | struct ht_irq_cfg *cfg; |
164 | 164 | ||
165 | cfg = get_irq_data(irq); | 165 | cfg = irq_get_handler_data(irq); |
166 | set_irq_chip(irq, NULL); | 166 | irq_set_chip(irq, NULL); |
167 | set_irq_data(irq, NULL); | 167 | irq_set_handler_data(irq, NULL); |
168 | destroy_irq(irq); | 168 | destroy_irq(irq); |
169 | 169 | ||
170 | kfree(cfg); | 170 | kfree(cfg); |
diff --git a/drivers/pci/intel-iommu.c b/drivers/pci/intel-iommu.c index a4115f1afe1f..7da3bef60d87 100644 --- a/drivers/pci/intel-iommu.c +++ b/drivers/pci/intel-iommu.c | |||
@@ -1206,7 +1206,7 @@ void free_dmar_iommu(struct intel_iommu *iommu) | |||
1206 | iommu_disable_translation(iommu); | 1206 | iommu_disable_translation(iommu); |
1207 | 1207 | ||
1208 | if (iommu->irq) { | 1208 | if (iommu->irq) { |
1209 | set_irq_data(iommu->irq, NULL); | 1209 | irq_set_handler_data(iommu->irq, NULL); |
1210 | /* This will mask the irq */ | 1210 | /* This will mask the irq */ |
1211 | free_irq(iommu->irq, iommu); | 1211 | free_irq(iommu->irq, iommu); |
1212 | destroy_irq(iommu->irq); | 1212 | destroy_irq(iommu->irq); |
diff --git a/drivers/pci/intr_remapping.c b/drivers/pci/intr_remapping.c index ec87cd66f3eb..a22557b20283 100644 --- a/drivers/pci/intr_remapping.c +++ b/drivers/pci/intr_remapping.c | |||
@@ -50,7 +50,7 @@ static DEFINE_SPINLOCK(irq_2_ir_lock); | |||
50 | 50 | ||
51 | static struct irq_2_iommu *irq_2_iommu(unsigned int irq) | 51 | static struct irq_2_iommu *irq_2_iommu(unsigned int irq) |
52 | { | 52 | { |
53 | struct irq_cfg *cfg = get_irq_chip_data(irq); | 53 | struct irq_cfg *cfg = irq_get_chip_data(irq); |
54 | return cfg ? &cfg->irq_2_iommu : NULL; | 54 | return cfg ? &cfg->irq_2_iommu : NULL; |
55 | } | 55 | } |
56 | 56 | ||
diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c index 44b0aeee83e5..2f10328bf661 100644 --- a/drivers/pci/msi.c +++ b/drivers/pci/msi.c | |||
@@ -236,7 +236,7 @@ void __read_msi_msg(struct msi_desc *entry, struct msi_msg *msg) | |||
236 | 236 | ||
237 | void read_msi_msg(unsigned int irq, struct msi_msg *msg) | 237 | void read_msi_msg(unsigned int irq, struct msi_msg *msg) |
238 | { | 238 | { |
239 | struct msi_desc *entry = get_irq_msi(irq); | 239 | struct msi_desc *entry = irq_get_msi_desc(irq); |
240 | 240 | ||
241 | __read_msi_msg(entry, msg); | 241 | __read_msi_msg(entry, msg); |
242 | } | 242 | } |
@@ -253,7 +253,7 @@ void __get_cached_msi_msg(struct msi_desc *entry, struct msi_msg *msg) | |||
253 | 253 | ||
254 | void get_cached_msi_msg(unsigned int irq, struct msi_msg *msg) | 254 | void get_cached_msi_msg(unsigned int irq, struct msi_msg *msg) |
255 | { | 255 | { |
256 | struct msi_desc *entry = get_irq_msi(irq); | 256 | struct msi_desc *entry = irq_get_msi_desc(irq); |
257 | 257 | ||
258 | __get_cached_msi_msg(entry, msg); | 258 | __get_cached_msi_msg(entry, msg); |
259 | } | 259 | } |
@@ -297,7 +297,7 @@ void __write_msi_msg(struct msi_desc *entry, struct msi_msg *msg) | |||
297 | 297 | ||
298 | void write_msi_msg(unsigned int irq, struct msi_msg *msg) | 298 | void write_msi_msg(unsigned int irq, struct msi_msg *msg) |
299 | { | 299 | { |
300 | struct msi_desc *entry = get_irq_msi(irq); | 300 | struct msi_desc *entry = irq_get_msi_desc(irq); |
301 | 301 | ||
302 | __write_msi_msg(entry, msg); | 302 | __write_msi_msg(entry, msg); |
303 | } | 303 | } |
@@ -354,7 +354,7 @@ static void __pci_restore_msi_state(struct pci_dev *dev) | |||
354 | if (!dev->msi_enabled) | 354 | if (!dev->msi_enabled) |
355 | return; | 355 | return; |
356 | 356 | ||
357 | entry = get_irq_msi(dev->irq); | 357 | entry = irq_get_msi_desc(dev->irq); |
358 | pos = entry->msi_attrib.pos; | 358 | pos = entry->msi_attrib.pos; |
359 | 359 | ||
360 | pci_intx_for_msi(dev, 0); | 360 | pci_intx_for_msi(dev, 0); |
@@ -519,7 +519,7 @@ static void msix_program_entries(struct pci_dev *dev, | |||
519 | PCI_MSIX_ENTRY_VECTOR_CTRL; | 519 | PCI_MSIX_ENTRY_VECTOR_CTRL; |
520 | 520 | ||
521 | entries[i].vector = entry->irq; | 521 | entries[i].vector = entry->irq; |
522 | set_irq_msi(entry->irq, entry); | 522 | irq_set_msi_desc(entry->irq, entry); |
523 | entry->masked = readl(entry->mask_base + offset); | 523 | entry->masked = readl(entry->mask_base + offset); |
524 | msix_mask_irq(entry, 1); | 524 | msix_mask_irq(entry, 1); |
525 | i++; | 525 | i++; |
diff --git a/drivers/pcmcia/bfin_cf_pcmcia.c b/drivers/pcmcia/bfin_cf_pcmcia.c index eae9cbe37a3e..49221395101e 100644 --- a/drivers/pcmcia/bfin_cf_pcmcia.c +++ b/drivers/pcmcia/bfin_cf_pcmcia.c | |||
@@ -235,7 +235,7 @@ static int __devinit bfin_cf_probe(struct platform_device *pdev) | |||
235 | cf->irq = irq; | 235 | cf->irq = irq; |
236 | cf->socket.pci_irq = irq; | 236 | cf->socket.pci_irq = irq; |
237 | 237 | ||
238 | set_irq_type(irq, IRQF_TRIGGER_LOW); | 238 | irq_set_irq_type(irq, IRQF_TRIGGER_LOW); |
239 | 239 | ||
240 | io_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 240 | io_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
241 | attr_mem = platform_get_resource(pdev, IORESOURCE_MEM, 1); | 241 | attr_mem = platform_get_resource(pdev, IORESOURCE_MEM, 1); |
diff --git a/drivers/pcmcia/db1xxx_ss.c b/drivers/pcmcia/db1xxx_ss.c index 27575e6378a1..01757f18a208 100644 --- a/drivers/pcmcia/db1xxx_ss.c +++ b/drivers/pcmcia/db1xxx_ss.c | |||
@@ -181,7 +181,7 @@ static int db1x_pcmcia_setup_irqs(struct db1x_pcmcia_sock *sock) | |||
181 | /* all other (older) Db1x00 boards use a GPIO to show | 181 | /* all other (older) Db1x00 boards use a GPIO to show |
182 | * card detection status: use both-edge triggers. | 182 | * card detection status: use both-edge triggers. |
183 | */ | 183 | */ |
184 | set_irq_type(sock->insert_irq, IRQ_TYPE_EDGE_BOTH); | 184 | irq_set_irq_type(sock->insert_irq, IRQ_TYPE_EDGE_BOTH); |
185 | ret = request_irq(sock->insert_irq, db1000_pcmcia_cdirq, | 185 | ret = request_irq(sock->insert_irq, db1000_pcmcia_cdirq, |
186 | 0, "pcmcia_carddetect", sock); | 186 | 0, "pcmcia_carddetect", sock); |
187 | 187 | ||
diff --git a/drivers/pcmcia/sa1100_nanoengine.c b/drivers/pcmcia/sa1100_nanoengine.c index 3d2652e2f5ae..93b9c9ba57c3 100644 --- a/drivers/pcmcia/sa1100_nanoengine.c +++ b/drivers/pcmcia/sa1100_nanoengine.c | |||
@@ -86,7 +86,7 @@ static int nanoengine_pcmcia_hw_init(struct soc_pcmcia_socket *skt) | |||
86 | GPDR &= ~nano_skts[i].input_pins; | 86 | GPDR &= ~nano_skts[i].input_pins; |
87 | GPDR |= nano_skts[i].output_pins; | 87 | GPDR |= nano_skts[i].output_pins; |
88 | GPCR = nano_skts[i].clear_outputs; | 88 | GPCR = nano_skts[i].clear_outputs; |
89 | set_irq_type(nano_skts[i].transition_pins, IRQ_TYPE_EDGE_BOTH); | 89 | irq_set_irq_type(nano_skts[i].transition_pins, IRQ_TYPE_EDGE_BOTH); |
90 | skt->socket.pci_irq = nano_skts[i].pci_irq; | 90 | skt->socket.pci_irq = nano_skts[i].pci_irq; |
91 | 91 | ||
92 | return soc_pcmcia_request_irqs(skt, | 92 | return soc_pcmcia_request_irqs(skt, |
diff --git a/drivers/pcmcia/soc_common.c b/drivers/pcmcia/soc_common.c index 5a9a392eacdf..768f9572a8c8 100644 --- a/drivers/pcmcia/soc_common.c +++ b/drivers/pcmcia/soc_common.c | |||
@@ -155,11 +155,11 @@ static int soc_common_pcmcia_config_skt( | |||
155 | */ | 155 | */ |
156 | if (skt->irq_state != 1 && state->io_irq) { | 156 | if (skt->irq_state != 1 && state->io_irq) { |
157 | skt->irq_state = 1; | 157 | skt->irq_state = 1; |
158 | set_irq_type(skt->socket.pci_irq, | 158 | irq_set_irq_type(skt->socket.pci_irq, |
159 | IRQ_TYPE_EDGE_FALLING); | 159 | IRQ_TYPE_EDGE_FALLING); |
160 | } else if (skt->irq_state == 1 && state->io_irq == 0) { | 160 | } else if (skt->irq_state == 1 && state->io_irq == 0) { |
161 | skt->irq_state = 0; | 161 | skt->irq_state = 0; |
162 | set_irq_type(skt->socket.pci_irq, IRQ_TYPE_NONE); | 162 | irq_set_irq_type(skt->socket.pci_irq, IRQ_TYPE_NONE); |
163 | } | 163 | } |
164 | 164 | ||
165 | skt->cs_state = *state; | 165 | skt->cs_state = *state; |
@@ -537,7 +537,7 @@ int soc_pcmcia_request_irqs(struct soc_pcmcia_socket *skt, | |||
537 | IRQF_DISABLED, irqs[i].str, skt); | 537 | IRQF_DISABLED, irqs[i].str, skt); |
538 | if (res) | 538 | if (res) |
539 | break; | 539 | break; |
540 | set_irq_type(irqs[i].irq, IRQ_TYPE_NONE); | 540 | irq_set_irq_type(irqs[i].irq, IRQ_TYPE_NONE); |
541 | } | 541 | } |
542 | 542 | ||
543 | if (res) { | 543 | if (res) { |
@@ -570,7 +570,7 @@ void soc_pcmcia_disable_irqs(struct soc_pcmcia_socket *skt, | |||
570 | 570 | ||
571 | for (i = 0; i < nr; i++) | 571 | for (i = 0; i < nr; i++) |
572 | if (irqs[i].sock == skt->nr) | 572 | if (irqs[i].sock == skt->nr) |
573 | set_irq_type(irqs[i].irq, IRQ_TYPE_NONE); | 573 | irq_set_irq_type(irqs[i].irq, IRQ_TYPE_NONE); |
574 | } | 574 | } |
575 | EXPORT_SYMBOL(soc_pcmcia_disable_irqs); | 575 | EXPORT_SYMBOL(soc_pcmcia_disable_irqs); |
576 | 576 | ||
@@ -581,8 +581,8 @@ void soc_pcmcia_enable_irqs(struct soc_pcmcia_socket *skt, | |||
581 | 581 | ||
582 | for (i = 0; i < nr; i++) | 582 | for (i = 0; i < nr; i++) |
583 | if (irqs[i].sock == skt->nr) { | 583 | if (irqs[i].sock == skt->nr) { |
584 | set_irq_type(irqs[i].irq, IRQ_TYPE_EDGE_RISING); | 584 | irq_set_irq_type(irqs[i].irq, IRQ_TYPE_EDGE_RISING); |
585 | set_irq_type(irqs[i].irq, IRQ_TYPE_EDGE_BOTH); | 585 | irq_set_irq_type(irqs[i].irq, IRQ_TYPE_EDGE_BOTH); |
586 | } | 586 | } |
587 | } | 587 | } |
588 | EXPORT_SYMBOL(soc_pcmcia_enable_irqs); | 588 | EXPORT_SYMBOL(soc_pcmcia_enable_irqs); |
diff --git a/drivers/pcmcia/xxs1500_ss.c b/drivers/pcmcia/xxs1500_ss.c index 3b67a1b6a197..379f4218857d 100644 --- a/drivers/pcmcia/xxs1500_ss.c +++ b/drivers/pcmcia/xxs1500_ss.c | |||
@@ -274,7 +274,7 @@ static int __devinit xxs1500_pcmcia_probe(struct platform_device *pdev) | |||
274 | * edge detector. | 274 | * edge detector. |
275 | */ | 275 | */ |
276 | irq = gpio_to_irq(GPIO_CDA); | 276 | irq = gpio_to_irq(GPIO_CDA); |
277 | set_irq_type(irq, IRQ_TYPE_EDGE_BOTH); | 277 | irq_set_irq_type(irq, IRQ_TYPE_EDGE_BOTH); |
278 | ret = request_irq(irq, cdirq, 0, "pcmcia_carddetect", sock); | 278 | ret = request_irq(irq, cdirq, 0, "pcmcia_carddetect", sock); |
279 | if (ret) { | 279 | if (ret) { |
280 | dev_err(&pdev->dev, "cannot setup cd irq\n"); | 280 | dev_err(&pdev->dev, "cannot setup cd irq\n"); |
diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig index 222dfb737b11..2ee442c2a5db 100644 --- a/drivers/platform/x86/Kconfig +++ b/drivers/platform/x86/Kconfig | |||
@@ -101,6 +101,19 @@ config DELL_WMI | |||
101 | To compile this driver as a module, choose M here: the module will | 101 | To compile this driver as a module, choose M here: the module will |
102 | be called dell-wmi. | 102 | be called dell-wmi. |
103 | 103 | ||
104 | config DELL_WMI_AIO | ||
105 | tristate "WMI Hotkeys for Dell All-In-One series" | ||
106 | depends on ACPI_WMI | ||
107 | depends on INPUT | ||
108 | select INPUT_SPARSEKMAP | ||
109 | ---help--- | ||
110 | Say Y here if you want to support WMI-based hotkeys on Dell | ||
111 | All-In-One machines. | ||
112 | |||
113 | To compile this driver as a module, choose M here: the module will | ||
114 | be called dell-wmi. | ||
115 | |||
116 | |||
104 | config FUJITSU_LAPTOP | 117 | config FUJITSU_LAPTOP |
105 | tristate "Fujitsu Laptop Extras" | 118 | tristate "Fujitsu Laptop Extras" |
106 | depends on ACPI | 119 | depends on ACPI |
@@ -438,23 +451,53 @@ config EEEPC_LAPTOP | |||
438 | Bluetooth, backlight and allows powering on/off some other | 451 | Bluetooth, backlight and allows powering on/off some other |
439 | devices. | 452 | devices. |
440 | 453 | ||
441 | If you have an Eee PC laptop, say Y or M here. | 454 | If you have an Eee PC laptop, say Y or M here. If this driver |
455 | doesn't work on your Eee PC, try eeepc-wmi instead. | ||
442 | 456 | ||
443 | config EEEPC_WMI | 457 | config ASUS_WMI |
444 | tristate "Eee PC WMI Hotkey Driver (EXPERIMENTAL)" | 458 | tristate "ASUS WMI Driver (EXPERIMENTAL)" |
445 | depends on ACPI_WMI | 459 | depends on ACPI_WMI |
446 | depends on INPUT | 460 | depends on INPUT |
461 | depends on HWMON | ||
447 | depends on EXPERIMENTAL | 462 | depends on EXPERIMENTAL |
448 | depends on BACKLIGHT_CLASS_DEVICE | 463 | depends on BACKLIGHT_CLASS_DEVICE |
449 | depends on RFKILL || RFKILL = n | 464 | depends on RFKILL || RFKILL = n |
465 | depends on HOTPLUG_PCI | ||
450 | select INPUT_SPARSEKMAP | 466 | select INPUT_SPARSEKMAP |
451 | select LEDS_CLASS | 467 | select LEDS_CLASS |
452 | select NEW_LEDS | 468 | select NEW_LEDS |
453 | ---help--- | 469 | ---help--- |
454 | Say Y here if you want to support WMI-based hotkeys on Eee PC laptops. | 470 | Say Y here if you have a WMI aware Asus laptop (like Eee PCs or new |
471 | Asus Notebooks). | ||
455 | 472 | ||
456 | To compile this driver as a module, choose M here: the module will | 473 | To compile this driver as a module, choose M here: the module will |
457 | be called eeepc-wmi. | 474 | be called asus-wmi. |
475 | |||
476 | config ASUS_NB_WMI | ||
477 | tristate "Asus Notebook WMI Driver (EXPERIMENTAL)" | ||
478 | depends on ASUS_WMI | ||
479 | ---help--- | ||
480 | This is a driver for newer Asus notebooks. It adds extra features | ||
481 | like wireless radio and bluetooth control, leds, hotkeys, backlight... | ||
482 | |||
483 | For more informations, see | ||
484 | <file:Documentation/ABI/testing/sysfs-platform-asus-wmi> | ||
485 | |||
486 | If you have an ACPI-WMI compatible Asus Notebook, say Y or M | ||
487 | here. | ||
488 | |||
489 | config EEEPC_WMI | ||
490 | tristate "Eee PC WMI Driver (EXPERIMENTAL)" | ||
491 | depends on ASUS_WMI | ||
492 | ---help--- | ||
493 | This is a driver for newer Eee PC laptops. It adds extra features | ||
494 | like wireless radio and bluetooth control, leds, hotkeys, backlight... | ||
495 | |||
496 | For more informations, see | ||
497 | <file:Documentation/ABI/testing/sysfs-platform-asus-wmi> | ||
498 | |||
499 | If you have an ACPI-WMI compatible Eee PC laptop (>= 1000), say Y or M | ||
500 | here. | ||
458 | 501 | ||
459 | config ACPI_WMI | 502 | config ACPI_WMI |
460 | tristate "WMI" | 503 | tristate "WMI" |
@@ -616,6 +659,21 @@ config GPIO_INTEL_PMIC | |||
616 | Say Y here to support GPIO via the SCU IPC interface | 659 | Say Y here to support GPIO via the SCU IPC interface |
617 | on Intel MID platforms. | 660 | on Intel MID platforms. |
618 | 661 | ||
662 | config INTEL_MID_POWER_BUTTON | ||
663 | tristate "power button driver for Intel MID platforms" | ||
664 | depends on INTEL_SCU_IPC && INPUT | ||
665 | help | ||
666 | This driver handles the power button on the Intel MID platforms. | ||
667 | |||
668 | If unsure, say N. | ||
669 | |||
670 | config INTEL_MFLD_THERMAL | ||
671 | tristate "Thermal driver for Intel Medfield platform" | ||
672 | depends on INTEL_SCU_IPC && THERMAL | ||
673 | help | ||
674 | Say Y here to enable thermal driver support for the Intel Medfield | ||
675 | platform. | ||
676 | |||
619 | config RAR_REGISTER | 677 | config RAR_REGISTER |
620 | bool "Restricted Access Region Register Driver" | 678 | bool "Restricted Access Region Register Driver" |
621 | depends on PCI && X86_MRST | 679 | depends on PCI && X86_MRST |
@@ -672,4 +730,26 @@ config XO1_RFKILL | |||
672 | Support for enabling/disabling the WLAN interface on the OLPC XO-1 | 730 | Support for enabling/disabling the WLAN interface on the OLPC XO-1 |
673 | laptop. | 731 | laptop. |
674 | 732 | ||
733 | config XO15_EBOOK | ||
734 | tristate "OLPC XO-1.5 ebook switch" | ||
735 | depends on ACPI && INPUT | ||
736 | ---help--- | ||
737 | Support for the ebook switch on the OLPC XO-1.5 laptop. | ||
738 | |||
739 | This switch is triggered as the screen is rotated and folded down to | ||
740 | convert the device into ebook form. | ||
741 | |||
742 | config SAMSUNG_LAPTOP | ||
743 | tristate "Samsung Laptop driver" | ||
744 | depends on RFKILL && BACKLIGHT_CLASS_DEVICE && X86 | ||
745 | ---help--- | ||
746 | This module implements a driver for a wide range of different | ||
747 | Samsung laptops. It offers control over the different | ||
748 | function keys, wireless LED, LCD backlight level, and | ||
749 | sometimes provides a "performance_control" sysfs file to allow | ||
750 | the performance level of the laptop to be changed. | ||
751 | |||
752 | To compile this driver as a module, choose M here: the module | ||
753 | will be called samsung-laptop. | ||
754 | |||
675 | endif # X86_PLATFORM_DEVICES | 755 | endif # X86_PLATFORM_DEVICES |
diff --git a/drivers/platform/x86/Makefile b/drivers/platform/x86/Makefile index 299aefb3e74c..029e8861d086 100644 --- a/drivers/platform/x86/Makefile +++ b/drivers/platform/x86/Makefile | |||
@@ -3,6 +3,8 @@ | |||
3 | # x86 Platform-Specific Drivers | 3 | # x86 Platform-Specific Drivers |
4 | # | 4 | # |
5 | obj-$(CONFIG_ASUS_LAPTOP) += asus-laptop.o | 5 | obj-$(CONFIG_ASUS_LAPTOP) += asus-laptop.o |
6 | obj-$(CONFIG_ASUS_WMI) += asus-wmi.o | ||
7 | obj-$(CONFIG_ASUS_NB_WMI) += asus-nb-wmi.o | ||
6 | obj-$(CONFIG_EEEPC_LAPTOP) += eeepc-laptop.o | 8 | obj-$(CONFIG_EEEPC_LAPTOP) += eeepc-laptop.o |
7 | obj-$(CONFIG_EEEPC_WMI) += eeepc-wmi.o | 9 | obj-$(CONFIG_EEEPC_WMI) += eeepc-wmi.o |
8 | obj-$(CONFIG_MSI_LAPTOP) += msi-laptop.o | 10 | obj-$(CONFIG_MSI_LAPTOP) += msi-laptop.o |
@@ -10,6 +12,7 @@ obj-$(CONFIG_ACPI_CMPC) += classmate-laptop.o | |||
10 | obj-$(CONFIG_COMPAL_LAPTOP) += compal-laptop.o | 12 | obj-$(CONFIG_COMPAL_LAPTOP) += compal-laptop.o |
11 | obj-$(CONFIG_DELL_LAPTOP) += dell-laptop.o | 13 | obj-$(CONFIG_DELL_LAPTOP) += dell-laptop.o |
12 | obj-$(CONFIG_DELL_WMI) += dell-wmi.o | 14 | obj-$(CONFIG_DELL_WMI) += dell-wmi.o |
15 | obj-$(CONFIG_DELL_WMI_AIO) += dell-wmi-aio.o | ||
13 | obj-$(CONFIG_ACER_WMI) += acer-wmi.o | 16 | obj-$(CONFIG_ACER_WMI) += acer-wmi.o |
14 | obj-$(CONFIG_ACERHDF) += acerhdf.o | 17 | obj-$(CONFIG_ACERHDF) += acerhdf.o |
15 | obj-$(CONFIG_HP_ACCEL) += hp_accel.o | 18 | obj-$(CONFIG_HP_ACCEL) += hp_accel.o |
@@ -29,9 +32,13 @@ obj-$(CONFIG_TOPSTAR_LAPTOP) += topstar-laptop.o | |||
29 | obj-$(CONFIG_ACPI_TOSHIBA) += toshiba_acpi.o | 32 | obj-$(CONFIG_ACPI_TOSHIBA) += toshiba_acpi.o |
30 | obj-$(CONFIG_TOSHIBA_BT_RFKILL) += toshiba_bluetooth.o | 33 | obj-$(CONFIG_TOSHIBA_BT_RFKILL) += toshiba_bluetooth.o |
31 | obj-$(CONFIG_INTEL_SCU_IPC) += intel_scu_ipc.o | 34 | obj-$(CONFIG_INTEL_SCU_IPC) += intel_scu_ipc.o |
32 | obj-$(CONFIG_INTEL_SCU_IPC_UTIL)+= intel_scu_ipcutil.o | 35 | obj-$(CONFIG_INTEL_SCU_IPC_UTIL) += intel_scu_ipcutil.o |
36 | obj-$(CONFIG_INTEL_MFLD_THERMAL) += intel_mid_thermal.o | ||
33 | obj-$(CONFIG_RAR_REGISTER) += intel_rar_register.o | 37 | obj-$(CONFIG_RAR_REGISTER) += intel_rar_register.o |
34 | obj-$(CONFIG_INTEL_IPS) += intel_ips.o | 38 | obj-$(CONFIG_INTEL_IPS) += intel_ips.o |
35 | obj-$(CONFIG_GPIO_INTEL_PMIC) += intel_pmic_gpio.o | 39 | obj-$(CONFIG_GPIO_INTEL_PMIC) += intel_pmic_gpio.o |
36 | obj-$(CONFIG_XO1_RFKILL) += xo1-rfkill.o | 40 | obj-$(CONFIG_XO1_RFKILL) += xo1-rfkill.o |
41 | obj-$(CONFIG_XO15_EBOOK) += xo15-ebook.o | ||
37 | obj-$(CONFIG_IBM_RTL) += ibm_rtl.o | 42 | obj-$(CONFIG_IBM_RTL) += ibm_rtl.o |
43 | obj-$(CONFIG_SAMSUNG_LAPTOP) += samsung-laptop.o | ||
44 | obj-$(CONFIG_INTEL_MFLD_THERMAL) += intel_mid_thermal.o | ||
diff --git a/drivers/platform/x86/acer-wmi.c b/drivers/platform/x86/acer-wmi.c index c9784705f6ac..5ea6c3477d17 100644 --- a/drivers/platform/x86/acer-wmi.c +++ b/drivers/platform/x86/acer-wmi.c | |||
@@ -22,6 +22,8 @@ | |||
22 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | 22 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA |
23 | */ | 23 | */ |
24 | 24 | ||
25 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | ||
26 | |||
25 | #include <linux/kernel.h> | 27 | #include <linux/kernel.h> |
26 | #include <linux/module.h> | 28 | #include <linux/module.h> |
27 | #include <linux/init.h> | 29 | #include <linux/init.h> |
@@ -46,12 +48,6 @@ MODULE_AUTHOR("Carlos Corbacho"); | |||
46 | MODULE_DESCRIPTION("Acer Laptop WMI Extras Driver"); | 48 | MODULE_DESCRIPTION("Acer Laptop WMI Extras Driver"); |
47 | MODULE_LICENSE("GPL"); | 49 | MODULE_LICENSE("GPL"); |
48 | 50 | ||
49 | #define ACER_LOGPREFIX "acer-wmi: " | ||
50 | #define ACER_ERR KERN_ERR ACER_LOGPREFIX | ||
51 | #define ACER_NOTICE KERN_NOTICE ACER_LOGPREFIX | ||
52 | #define ACER_INFO KERN_INFO ACER_LOGPREFIX | ||
53 | #define ACER_WARNING KERN_WARNING ACER_LOGPREFIX | ||
54 | |||
55 | /* | 51 | /* |
56 | * Magic Number | 52 | * Magic Number |
57 | * Meaning is unknown - this number is required for writing to ACPI for AMW0 | 53 | * Meaning is unknown - this number is required for writing to ACPI for AMW0 |
@@ -84,7 +80,7 @@ MODULE_LICENSE("GPL"); | |||
84 | #define AMW0_GUID1 "67C3371D-95A3-4C37-BB61-DD47B491DAAB" | 80 | #define AMW0_GUID1 "67C3371D-95A3-4C37-BB61-DD47B491DAAB" |
85 | #define AMW0_GUID2 "431F16ED-0C2B-444C-B267-27DEB140CF9C" | 81 | #define AMW0_GUID2 "431F16ED-0C2B-444C-B267-27DEB140CF9C" |
86 | #define WMID_GUID1 "6AF4F258-B401-42FD-BE91-3D4AC2D7C0D3" | 82 | #define WMID_GUID1 "6AF4F258-B401-42FD-BE91-3D4AC2D7C0D3" |
87 | #define WMID_GUID2 "95764E09-FB56-4e83-B31A-37761F60994A" | 83 | #define WMID_GUID2 "95764E09-FB56-4E83-B31A-37761F60994A" |
88 | #define WMID_GUID3 "61EF69EA-865C-4BC3-A502-A0DEBA0CB531" | 84 | #define WMID_GUID3 "61EF69EA-865C-4BC3-A502-A0DEBA0CB531" |
89 | 85 | ||
90 | /* | 86 | /* |
@@ -93,7 +89,7 @@ MODULE_LICENSE("GPL"); | |||
93 | #define ACERWMID_EVENT_GUID "676AA15E-6A47-4D9F-A2CC-1E6D18D14026" | 89 | #define ACERWMID_EVENT_GUID "676AA15E-6A47-4D9F-A2CC-1E6D18D14026" |
94 | 90 | ||
95 | MODULE_ALIAS("wmi:67C3371D-95A3-4C37-BB61-DD47B491DAAB"); | 91 | MODULE_ALIAS("wmi:67C3371D-95A3-4C37-BB61-DD47B491DAAB"); |
96 | MODULE_ALIAS("wmi:6AF4F258-B401-42fd-BE91-3D4AC2D7C0D3"); | 92 | MODULE_ALIAS("wmi:6AF4F258-B401-42Fd-BE91-3D4AC2D7C0D3"); |
97 | MODULE_ALIAS("wmi:676AA15E-6A47-4D9F-A2CC-1E6D18D14026"); | 93 | MODULE_ALIAS("wmi:676AA15E-6A47-4D9F-A2CC-1E6D18D14026"); |
98 | 94 | ||
99 | enum acer_wmi_event_ids { | 95 | enum acer_wmi_event_ids { |
@@ -108,7 +104,7 @@ static const struct key_entry acer_wmi_keymap[] = { | |||
108 | {KE_KEY, 0x23, {KEY_PROG3} }, /* P_Key */ | 104 | {KE_KEY, 0x23, {KEY_PROG3} }, /* P_Key */ |
109 | {KE_KEY, 0x24, {KEY_PROG4} }, /* Social networking_Key */ | 105 | {KE_KEY, 0x24, {KEY_PROG4} }, /* Social networking_Key */ |
110 | {KE_KEY, 0x64, {KEY_SWITCHVIDEOMODE} }, /* Display Switch */ | 106 | {KE_KEY, 0x64, {KEY_SWITCHVIDEOMODE} }, /* Display Switch */ |
111 | {KE_KEY, 0x82, {KEY_F22} }, /* Touch Pad On/Off */ | 107 | {KE_KEY, 0x82, {KEY_TOUCHPAD_TOGGLE} }, /* Touch Pad On/Off */ |
112 | {KE_END, 0} | 108 | {KE_END, 0} |
113 | }; | 109 | }; |
114 | 110 | ||
@@ -221,6 +217,7 @@ struct acer_debug { | |||
221 | static struct rfkill *wireless_rfkill; | 217 | static struct rfkill *wireless_rfkill; |
222 | static struct rfkill *bluetooth_rfkill; | 218 | static struct rfkill *bluetooth_rfkill; |
223 | static struct rfkill *threeg_rfkill; | 219 | static struct rfkill *threeg_rfkill; |
220 | static bool rfkill_inited; | ||
224 | 221 | ||
225 | /* Each low-level interface must define at least some of the following */ | 222 | /* Each low-level interface must define at least some of the following */ |
226 | struct wmi_interface { | 223 | struct wmi_interface { |
@@ -845,7 +842,7 @@ static void type_aa_dmi_decode(const struct dmi_header *header, void *dummy) | |||
845 | has_type_aa = true; | 842 | has_type_aa = true; |
846 | type_aa = (struct hotkey_function_type_aa *) header; | 843 | type_aa = (struct hotkey_function_type_aa *) header; |
847 | 844 | ||
848 | printk(ACER_INFO "Function bitmap for Communication Button: 0x%x\n", | 845 | pr_info("Function bitmap for Communication Button: 0x%x\n", |
849 | type_aa->commun_func_bitmap); | 846 | type_aa->commun_func_bitmap); |
850 | 847 | ||
851 | if (type_aa->commun_func_bitmap & ACER_WMID3_GDS_WIRELESS) | 848 | if (type_aa->commun_func_bitmap & ACER_WMID3_GDS_WIRELESS) |
@@ -991,6 +988,7 @@ static int __devinit acer_led_init(struct device *dev) | |||
991 | 988 | ||
992 | static void acer_led_exit(void) | 989 | static void acer_led_exit(void) |
993 | { | 990 | { |
991 | set_u32(LED_OFF, ACER_CAP_MAILLED); | ||
994 | led_classdev_unregister(&mail_led); | 992 | led_classdev_unregister(&mail_led); |
995 | } | 993 | } |
996 | 994 | ||
@@ -1036,7 +1034,7 @@ static int __devinit acer_backlight_init(struct device *dev) | |||
1036 | bd = backlight_device_register("acer-wmi", dev, NULL, &acer_bl_ops, | 1034 | bd = backlight_device_register("acer-wmi", dev, NULL, &acer_bl_ops, |
1037 | &props); | 1035 | &props); |
1038 | if (IS_ERR(bd)) { | 1036 | if (IS_ERR(bd)) { |
1039 | printk(ACER_ERR "Could not register Acer backlight device\n"); | 1037 | pr_err("Could not register Acer backlight device\n"); |
1040 | acer_backlight_device = NULL; | 1038 | acer_backlight_device = NULL; |
1041 | return PTR_ERR(bd); | 1039 | return PTR_ERR(bd); |
1042 | } | 1040 | } |
@@ -1083,8 +1081,7 @@ static acpi_status wmid3_get_device_status(u32 *value, u16 device) | |||
1083 | return AE_ERROR; | 1081 | return AE_ERROR; |
1084 | } | 1082 | } |
1085 | if (obj->buffer.length != 8) { | 1083 | if (obj->buffer.length != 8) { |
1086 | printk(ACER_WARNING "Unknown buffer length %d\n", | 1084 | pr_warning("Unknown buffer length %d\n", obj->buffer.length); |
1087 | obj->buffer.length); | ||
1088 | kfree(obj); | 1085 | kfree(obj); |
1089 | return AE_ERROR; | 1086 | return AE_ERROR; |
1090 | } | 1087 | } |
@@ -1093,7 +1090,7 @@ static acpi_status wmid3_get_device_status(u32 *value, u16 device) | |||
1093 | kfree(obj); | 1090 | kfree(obj); |
1094 | 1091 | ||
1095 | if (return_value.error_code || return_value.ec_return_value) | 1092 | if (return_value.error_code || return_value.ec_return_value) |
1096 | printk(ACER_WARNING "Get Device Status failed: " | 1093 | pr_warning("Get Device Status failed: " |
1097 | "0x%x - 0x%x\n", return_value.error_code, | 1094 | "0x%x - 0x%x\n", return_value.error_code, |
1098 | return_value.ec_return_value); | 1095 | return_value.ec_return_value); |
1099 | else | 1096 | else |
@@ -1161,9 +1158,13 @@ static int acer_rfkill_set(void *data, bool blocked) | |||
1161 | { | 1158 | { |
1162 | acpi_status status; | 1159 | acpi_status status; |
1163 | u32 cap = (unsigned long)data; | 1160 | u32 cap = (unsigned long)data; |
1164 | status = set_u32(!blocked, cap); | 1161 | |
1165 | if (ACPI_FAILURE(status)) | 1162 | if (rfkill_inited) { |
1166 | return -ENODEV; | 1163 | status = set_u32(!blocked, cap); |
1164 | if (ACPI_FAILURE(status)) | ||
1165 | return -ENODEV; | ||
1166 | } | ||
1167 | |||
1167 | return 0; | 1168 | return 0; |
1168 | } | 1169 | } |
1169 | 1170 | ||
@@ -1187,14 +1188,16 @@ static struct rfkill *acer_rfkill_register(struct device *dev, | |||
1187 | return ERR_PTR(-ENOMEM); | 1188 | return ERR_PTR(-ENOMEM); |
1188 | 1189 | ||
1189 | status = get_device_status(&state, cap); | 1190 | status = get_device_status(&state, cap); |
1190 | if (ACPI_SUCCESS(status)) | ||
1191 | rfkill_init_sw_state(rfkill_dev, !state); | ||
1192 | 1191 | ||
1193 | err = rfkill_register(rfkill_dev); | 1192 | err = rfkill_register(rfkill_dev); |
1194 | if (err) { | 1193 | if (err) { |
1195 | rfkill_destroy(rfkill_dev); | 1194 | rfkill_destroy(rfkill_dev); |
1196 | return ERR_PTR(err); | 1195 | return ERR_PTR(err); |
1197 | } | 1196 | } |
1197 | |||
1198 | if (ACPI_SUCCESS(status)) | ||
1199 | rfkill_set_sw_state(rfkill_dev, !state); | ||
1200 | |||
1198 | return rfkill_dev; | 1201 | return rfkill_dev; |
1199 | } | 1202 | } |
1200 | 1203 | ||
@@ -1229,14 +1232,19 @@ static int acer_rfkill_init(struct device *dev) | |||
1229 | } | 1232 | } |
1230 | } | 1233 | } |
1231 | 1234 | ||
1232 | schedule_delayed_work(&acer_rfkill_work, round_jiffies_relative(HZ)); | 1235 | rfkill_inited = true; |
1236 | |||
1237 | if (ec_raw_mode || !wmi_has_guid(ACERWMID_EVENT_GUID)) | ||
1238 | schedule_delayed_work(&acer_rfkill_work, | ||
1239 | round_jiffies_relative(HZ)); | ||
1233 | 1240 | ||
1234 | return 0; | 1241 | return 0; |
1235 | } | 1242 | } |
1236 | 1243 | ||
1237 | static void acer_rfkill_exit(void) | 1244 | static void acer_rfkill_exit(void) |
1238 | { | 1245 | { |
1239 | cancel_delayed_work_sync(&acer_rfkill_work); | 1246 | if (ec_raw_mode || !wmi_has_guid(ACERWMID_EVENT_GUID)) |
1247 | cancel_delayed_work_sync(&acer_rfkill_work); | ||
1240 | 1248 | ||
1241 | rfkill_unregister(wireless_rfkill); | 1249 | rfkill_unregister(wireless_rfkill); |
1242 | rfkill_destroy(wireless_rfkill); | 1250 | rfkill_destroy(wireless_rfkill); |
@@ -1309,7 +1317,7 @@ static void acer_wmi_notify(u32 value, void *context) | |||
1309 | 1317 | ||
1310 | status = wmi_get_event_data(value, &response); | 1318 | status = wmi_get_event_data(value, &response); |
1311 | if (status != AE_OK) { | 1319 | if (status != AE_OK) { |
1312 | printk(ACER_WARNING "bad event status 0x%x\n", status); | 1320 | pr_warning("bad event status 0x%x\n", status); |
1313 | return; | 1321 | return; |
1314 | } | 1322 | } |
1315 | 1323 | ||
@@ -1318,14 +1326,12 @@ static void acer_wmi_notify(u32 value, void *context) | |||
1318 | if (!obj) | 1326 | if (!obj) |
1319 | return; | 1327 | return; |
1320 | if (obj->type != ACPI_TYPE_BUFFER) { | 1328 | if (obj->type != ACPI_TYPE_BUFFER) { |
1321 | printk(ACER_WARNING "Unknown response received %d\n", | 1329 | pr_warning("Unknown response received %d\n", obj->type); |
1322 | obj->type); | ||
1323 | kfree(obj); | 1330 | kfree(obj); |
1324 | return; | 1331 | return; |
1325 | } | 1332 | } |
1326 | if (obj->buffer.length != 8) { | 1333 | if (obj->buffer.length != 8) { |
1327 | printk(ACER_WARNING "Unknown buffer length %d\n", | 1334 | pr_warning("Unknown buffer length %d\n", obj->buffer.length); |
1328 | obj->buffer.length); | ||
1329 | kfree(obj); | 1335 | kfree(obj); |
1330 | return; | 1336 | return; |
1331 | } | 1337 | } |
@@ -1335,13 +1341,26 @@ static void acer_wmi_notify(u32 value, void *context) | |||
1335 | 1341 | ||
1336 | switch (return_value.function) { | 1342 | switch (return_value.function) { |
1337 | case WMID_HOTKEY_EVENT: | 1343 | case WMID_HOTKEY_EVENT: |
1344 | if (return_value.device_state) { | ||
1345 | u16 device_state = return_value.device_state; | ||
1346 | pr_debug("deivces states: 0x%x\n", device_state); | ||
1347 | if (has_cap(ACER_CAP_WIRELESS)) | ||
1348 | rfkill_set_sw_state(wireless_rfkill, | ||
1349 | !(device_state & ACER_WMID3_GDS_WIRELESS)); | ||
1350 | if (has_cap(ACER_CAP_BLUETOOTH)) | ||
1351 | rfkill_set_sw_state(bluetooth_rfkill, | ||
1352 | !(device_state & ACER_WMID3_GDS_BLUETOOTH)); | ||
1353 | if (has_cap(ACER_CAP_THREEG)) | ||
1354 | rfkill_set_sw_state(threeg_rfkill, | ||
1355 | !(device_state & ACER_WMID3_GDS_THREEG)); | ||
1356 | } | ||
1338 | if (!sparse_keymap_report_event(acer_wmi_input_dev, | 1357 | if (!sparse_keymap_report_event(acer_wmi_input_dev, |
1339 | return_value.key_num, 1, true)) | 1358 | return_value.key_num, 1, true)) |
1340 | printk(ACER_WARNING "Unknown key number - 0x%x\n", | 1359 | pr_warning("Unknown key number - 0x%x\n", |
1341 | return_value.key_num); | 1360 | return_value.key_num); |
1342 | break; | 1361 | break; |
1343 | default: | 1362 | default: |
1344 | printk(ACER_WARNING "Unknown function number - %d - %d\n", | 1363 | pr_warning("Unknown function number - %d - %d\n", |
1345 | return_value.function, return_value.key_num); | 1364 | return_value.function, return_value.key_num); |
1346 | break; | 1365 | break; |
1347 | } | 1366 | } |
@@ -1370,8 +1389,7 @@ wmid3_set_lm_mode(struct lm_input_params *params, | |||
1370 | return AE_ERROR; | 1389 | return AE_ERROR; |
1371 | } | 1390 | } |
1372 | if (obj->buffer.length != 4) { | 1391 | if (obj->buffer.length != 4) { |
1373 | printk(ACER_WARNING "Unknown buffer length %d\n", | 1392 | pr_warning("Unknown buffer length %d\n", obj->buffer.length); |
1374 | obj->buffer.length); | ||
1375 | kfree(obj); | 1393 | kfree(obj); |
1376 | return AE_ERROR; | 1394 | return AE_ERROR; |
1377 | } | 1395 | } |
@@ -1396,11 +1414,11 @@ static int acer_wmi_enable_ec_raw(void) | |||
1396 | status = wmid3_set_lm_mode(¶ms, &return_value); | 1414 | status = wmid3_set_lm_mode(¶ms, &return_value); |
1397 | 1415 | ||
1398 | if (return_value.error_code || return_value.ec_return_value) | 1416 | if (return_value.error_code || return_value.ec_return_value) |
1399 | printk(ACER_WARNING "Enabling EC raw mode failed: " | 1417 | pr_warning("Enabling EC raw mode failed: " |
1400 | "0x%x - 0x%x\n", return_value.error_code, | 1418 | "0x%x - 0x%x\n", return_value.error_code, |
1401 | return_value.ec_return_value); | 1419 | return_value.ec_return_value); |
1402 | else | 1420 | else |
1403 | printk(ACER_INFO "Enabled EC raw mode"); | 1421 | pr_info("Enabled EC raw mode"); |
1404 | 1422 | ||
1405 | return status; | 1423 | return status; |
1406 | } | 1424 | } |
@@ -1419,7 +1437,7 @@ static int acer_wmi_enable_lm(void) | |||
1419 | status = wmid3_set_lm_mode(¶ms, &return_value); | 1437 | status = wmid3_set_lm_mode(¶ms, &return_value); |
1420 | 1438 | ||
1421 | if (return_value.error_code || return_value.ec_return_value) | 1439 | if (return_value.error_code || return_value.ec_return_value) |
1422 | printk(ACER_WARNING "Enabling Launch Manager failed: " | 1440 | pr_warning("Enabling Launch Manager failed: " |
1423 | "0x%x - 0x%x\n", return_value.error_code, | 1441 | "0x%x - 0x%x\n", return_value.error_code, |
1424 | return_value.ec_return_value); | 1442 | return_value.ec_return_value); |
1425 | 1443 | ||
@@ -1553,6 +1571,7 @@ pm_message_t state) | |||
1553 | 1571 | ||
1554 | if (has_cap(ACER_CAP_MAILLED)) { | 1572 | if (has_cap(ACER_CAP_MAILLED)) { |
1555 | get_u32(&value, ACER_CAP_MAILLED); | 1573 | get_u32(&value, ACER_CAP_MAILLED); |
1574 | set_u32(LED_OFF, ACER_CAP_MAILLED); | ||
1556 | data->mailled = value; | 1575 | data->mailled = value; |
1557 | } | 1576 | } |
1558 | 1577 | ||
@@ -1580,6 +1599,17 @@ static int acer_platform_resume(struct platform_device *device) | |||
1580 | return 0; | 1599 | return 0; |
1581 | } | 1600 | } |
1582 | 1601 | ||
1602 | static void acer_platform_shutdown(struct platform_device *device) | ||
1603 | { | ||
1604 | struct acer_data *data = &interface->data; | ||
1605 | |||
1606 | if (!data) | ||
1607 | return; | ||
1608 | |||
1609 | if (has_cap(ACER_CAP_MAILLED)) | ||
1610 | set_u32(LED_OFF, ACER_CAP_MAILLED); | ||
1611 | } | ||
1612 | |||
1583 | static struct platform_driver acer_platform_driver = { | 1613 | static struct platform_driver acer_platform_driver = { |
1584 | .driver = { | 1614 | .driver = { |
1585 | .name = "acer-wmi", | 1615 | .name = "acer-wmi", |
@@ -1589,6 +1619,7 @@ static struct platform_driver acer_platform_driver = { | |||
1589 | .remove = acer_platform_remove, | 1619 | .remove = acer_platform_remove, |
1590 | .suspend = acer_platform_suspend, | 1620 | .suspend = acer_platform_suspend, |
1591 | .resume = acer_platform_resume, | 1621 | .resume = acer_platform_resume, |
1622 | .shutdown = acer_platform_shutdown, | ||
1592 | }; | 1623 | }; |
1593 | 1624 | ||
1594 | static struct platform_device *acer_platform_device; | 1625 | static struct platform_device *acer_platform_device; |
@@ -1636,7 +1667,7 @@ static int create_debugfs(void) | |||
1636 | { | 1667 | { |
1637 | interface->debug.root = debugfs_create_dir("acer-wmi", NULL); | 1668 | interface->debug.root = debugfs_create_dir("acer-wmi", NULL); |
1638 | if (!interface->debug.root) { | 1669 | if (!interface->debug.root) { |
1639 | printk(ACER_ERR "Failed to create debugfs directory"); | 1670 | pr_err("Failed to create debugfs directory"); |
1640 | return -ENOMEM; | 1671 | return -ENOMEM; |
1641 | } | 1672 | } |
1642 | 1673 | ||
@@ -1657,11 +1688,10 @@ static int __init acer_wmi_init(void) | |||
1657 | { | 1688 | { |
1658 | int err; | 1689 | int err; |
1659 | 1690 | ||
1660 | printk(ACER_INFO "Acer Laptop ACPI-WMI Extras\n"); | 1691 | pr_info("Acer Laptop ACPI-WMI Extras\n"); |
1661 | 1692 | ||
1662 | if (dmi_check_system(acer_blacklist)) { | 1693 | if (dmi_check_system(acer_blacklist)) { |
1663 | printk(ACER_INFO "Blacklisted hardware detected - " | 1694 | pr_info("Blacklisted hardware detected - not loading\n"); |
1664 | "not loading\n"); | ||
1665 | return -ENODEV; | 1695 | return -ENODEV; |
1666 | } | 1696 | } |
1667 | 1697 | ||
@@ -1678,12 +1708,11 @@ static int __init acer_wmi_init(void) | |||
1678 | 1708 | ||
1679 | if (wmi_has_guid(WMID_GUID2) && interface) { | 1709 | if (wmi_has_guid(WMID_GUID2) && interface) { |
1680 | if (ACPI_FAILURE(WMID_set_capabilities())) { | 1710 | if (ACPI_FAILURE(WMID_set_capabilities())) { |
1681 | printk(ACER_ERR "Unable to detect available WMID " | 1711 | pr_err("Unable to detect available WMID devices\n"); |
1682 | "devices\n"); | ||
1683 | return -ENODEV; | 1712 | return -ENODEV; |
1684 | } | 1713 | } |
1685 | } else if (!wmi_has_guid(WMID_GUID2) && interface) { | 1714 | } else if (!wmi_has_guid(WMID_GUID2) && interface) { |
1686 | printk(ACER_ERR "No WMID device detection method found\n"); | 1715 | pr_err("No WMID device detection method found\n"); |
1687 | return -ENODEV; | 1716 | return -ENODEV; |
1688 | } | 1717 | } |
1689 | 1718 | ||
@@ -1691,8 +1720,7 @@ static int __init acer_wmi_init(void) | |||
1691 | interface = &AMW0_interface; | 1720 | interface = &AMW0_interface; |
1692 | 1721 | ||
1693 | if (ACPI_FAILURE(AMW0_set_capabilities())) { | 1722 | if (ACPI_FAILURE(AMW0_set_capabilities())) { |
1694 | printk(ACER_ERR "Unable to detect available AMW0 " | 1723 | pr_err("Unable to detect available AMW0 devices\n"); |
1695 | "devices\n"); | ||
1696 | return -ENODEV; | 1724 | return -ENODEV; |
1697 | } | 1725 | } |
1698 | } | 1726 | } |
@@ -1701,8 +1729,7 @@ static int __init acer_wmi_init(void) | |||
1701 | AMW0_find_mailled(); | 1729 | AMW0_find_mailled(); |
1702 | 1730 | ||
1703 | if (!interface) { | 1731 | if (!interface) { |
1704 | printk(ACER_INFO "No or unsupported WMI interface, unable to " | 1732 | pr_err("No or unsupported WMI interface, unable to load\n"); |
1705 | "load\n"); | ||
1706 | return -ENODEV; | 1733 | return -ENODEV; |
1707 | } | 1734 | } |
1708 | 1735 | ||
@@ -1710,22 +1737,22 @@ static int __init acer_wmi_init(void) | |||
1710 | 1737 | ||
1711 | if (acpi_video_backlight_support() && has_cap(ACER_CAP_BRIGHTNESS)) { | 1738 | if (acpi_video_backlight_support() && has_cap(ACER_CAP_BRIGHTNESS)) { |
1712 | interface->capability &= ~ACER_CAP_BRIGHTNESS; | 1739 | interface->capability &= ~ACER_CAP_BRIGHTNESS; |
1713 | printk(ACER_INFO "Brightness must be controlled by " | 1740 | pr_info("Brightness must be controlled by " |
1714 | "generic video driver\n"); | 1741 | "generic video driver\n"); |
1715 | } | 1742 | } |
1716 | 1743 | ||
1717 | if (wmi_has_guid(WMID_GUID3)) { | 1744 | if (wmi_has_guid(WMID_GUID3)) { |
1718 | if (ec_raw_mode) { | 1745 | if (ec_raw_mode) { |
1719 | if (ACPI_FAILURE(acer_wmi_enable_ec_raw())) { | 1746 | if (ACPI_FAILURE(acer_wmi_enable_ec_raw())) { |
1720 | printk(ACER_ERR "Cannot enable EC raw mode\n"); | 1747 | pr_err("Cannot enable EC raw mode\n"); |
1721 | return -ENODEV; | 1748 | return -ENODEV; |
1722 | } | 1749 | } |
1723 | } else if (ACPI_FAILURE(acer_wmi_enable_lm())) { | 1750 | } else if (ACPI_FAILURE(acer_wmi_enable_lm())) { |
1724 | printk(ACER_ERR "Cannot enable Launch Manager mode\n"); | 1751 | pr_err("Cannot enable Launch Manager mode\n"); |
1725 | return -ENODEV; | 1752 | return -ENODEV; |
1726 | } | 1753 | } |
1727 | } else if (ec_raw_mode) { | 1754 | } else if (ec_raw_mode) { |
1728 | printk(ACER_INFO "No WMID EC raw mode enable method\n"); | 1755 | pr_info("No WMID EC raw mode enable method\n"); |
1729 | } | 1756 | } |
1730 | 1757 | ||
1731 | if (wmi_has_guid(ACERWMID_EVENT_GUID)) { | 1758 | if (wmi_has_guid(ACERWMID_EVENT_GUID)) { |
@@ -1736,7 +1763,7 @@ static int __init acer_wmi_init(void) | |||
1736 | 1763 | ||
1737 | err = platform_driver_register(&acer_platform_driver); | 1764 | err = platform_driver_register(&acer_platform_driver); |
1738 | if (err) { | 1765 | if (err) { |
1739 | printk(ACER_ERR "Unable to register platform driver.\n"); | 1766 | pr_err("Unable to register platform driver.\n"); |
1740 | goto error_platform_register; | 1767 | goto error_platform_register; |
1741 | } | 1768 | } |
1742 | 1769 | ||
@@ -1791,7 +1818,7 @@ static void __exit acer_wmi_exit(void) | |||
1791 | platform_device_unregister(acer_platform_device); | 1818 | platform_device_unregister(acer_platform_device); |
1792 | platform_driver_unregister(&acer_platform_driver); | 1819 | platform_driver_unregister(&acer_platform_driver); |
1793 | 1820 | ||
1794 | printk(ACER_INFO "Acer Laptop WMI Extras unloaded\n"); | 1821 | pr_info("Acer Laptop WMI Extras unloaded\n"); |
1795 | return; | 1822 | return; |
1796 | } | 1823 | } |
1797 | 1824 | ||
diff --git a/drivers/platform/x86/asus-laptop.c b/drivers/platform/x86/asus-laptop.c index 5a6f7d7575d6..c53b3ff7978a 100644 --- a/drivers/platform/x86/asus-laptop.c +++ b/drivers/platform/x86/asus-laptop.c | |||
@@ -29,7 +29,7 @@ | |||
29 | * John Belmonte - ACPI code for Toshiba laptop was a good starting point. | 29 | * John Belmonte - ACPI code for Toshiba laptop was a good starting point. |
30 | * Eric Burghard - LED display support for W1N | 30 | * Eric Burghard - LED display support for W1N |
31 | * Josh Green - Light Sens support | 31 | * Josh Green - Light Sens support |
32 | * Thomas Tuttle - His first patch for led support was very helpfull | 32 | * Thomas Tuttle - His first patch for led support was very helpful |
33 | * Sam Lin - GPS support | 33 | * Sam Lin - GPS support |
34 | */ | 34 | */ |
35 | 35 | ||
@@ -50,6 +50,7 @@ | |||
50 | #include <linux/input/sparse-keymap.h> | 50 | #include <linux/input/sparse-keymap.h> |
51 | #include <linux/rfkill.h> | 51 | #include <linux/rfkill.h> |
52 | #include <linux/slab.h> | 52 | #include <linux/slab.h> |
53 | #include <linux/dmi.h> | ||
53 | #include <acpi/acpi_drivers.h> | 54 | #include <acpi/acpi_drivers.h> |
54 | #include <acpi/acpi_bus.h> | 55 | #include <acpi/acpi_bus.h> |
55 | 56 | ||
@@ -157,46 +158,9 @@ MODULE_PARM_DESC(wwan_status, "Set the wireless status on boot " | |||
157 | #define METHOD_BRIGHTNESS_SET "SPLV" | 158 | #define METHOD_BRIGHTNESS_SET "SPLV" |
158 | #define METHOD_BRIGHTNESS_GET "GPLV" | 159 | #define METHOD_BRIGHTNESS_GET "GPLV" |
159 | 160 | ||
160 | /* Backlight */ | ||
161 | static acpi_handle lcd_switch_handle; | ||
162 | static char *lcd_switch_paths[] = { | ||
163 | "\\_SB.PCI0.SBRG.EC0._Q10", /* All new models */ | ||
164 | "\\_SB.PCI0.ISA.EC0._Q10", /* A1x */ | ||
165 | "\\_SB.PCI0.PX40.ECD0._Q10", /* L3C */ | ||
166 | "\\_SB.PCI0.PX40.EC0.Q10", /* M1A */ | ||
167 | "\\_SB.PCI0.LPCB.EC0._Q10", /* P30 */ | ||
168 | "\\_SB.PCI0.LPCB.EC0._Q0E", /* P30/P35 */ | ||
169 | "\\_SB.PCI0.PX40.Q10", /* S1x */ | ||
170 | "\\Q10"}; /* A2x, L2D, L3D, M2E */ | ||
171 | |||
172 | /* Display */ | 161 | /* Display */ |
173 | #define METHOD_SWITCH_DISPLAY "SDSP" | 162 | #define METHOD_SWITCH_DISPLAY "SDSP" |
174 | 163 | ||
175 | static acpi_handle display_get_handle; | ||
176 | static char *display_get_paths[] = { | ||
177 | /* A6B, A6K A6R A7D F3JM L4R M6R A3G M6A M6V VX-1 V6J V6V W3Z */ | ||
178 | "\\_SB.PCI0.P0P1.VGA.GETD", | ||
179 | /* A3E A4K, A4D A4L A6J A7J A8J Z71V M9V S5A M5A z33A W1Jc W2V G1 */ | ||
180 | "\\_SB.PCI0.P0P2.VGA.GETD", | ||
181 | /* A6V A6Q */ | ||
182 | "\\_SB.PCI0.P0P3.VGA.GETD", | ||
183 | /* A6T, A6M */ | ||
184 | "\\_SB.PCI0.P0PA.VGA.GETD", | ||
185 | /* L3C */ | ||
186 | "\\_SB.PCI0.PCI1.VGAC.NMAP", | ||
187 | /* Z96F */ | ||
188 | "\\_SB.PCI0.VGA.GETD", | ||
189 | /* A2D */ | ||
190 | "\\ACTD", | ||
191 | /* A4G Z71A W1N W5A W5F M2N M3N M5N M6N S1N S5N */ | ||
192 | "\\ADVG", | ||
193 | /* P30 */ | ||
194 | "\\DNXT", | ||
195 | /* A2H D1 L2D L3D L3H L2E L5D L5C M1A M2E L4L W3V */ | ||
196 | "\\INFB", | ||
197 | /* A3F A6F A3N A3L M6N W3N W6A */ | ||
198 | "\\SSTE"}; | ||
199 | |||
200 | #define METHOD_ALS_CONTROL "ALSC" /* Z71A Z71V */ | 164 | #define METHOD_ALS_CONTROL "ALSC" /* Z71A Z71V */ |
201 | #define METHOD_ALS_LEVEL "ALSL" /* Z71A Z71V */ | 165 | #define METHOD_ALS_LEVEL "ALSL" /* Z71A Z71V */ |
202 | 166 | ||
@@ -246,7 +210,6 @@ struct asus_laptop { | |||
246 | 210 | ||
247 | int wireless_status; | 211 | int wireless_status; |
248 | bool have_rsts; | 212 | bool have_rsts; |
249 | int lcd_state; | ||
250 | 213 | ||
251 | struct rfkill *gps_rfkill; | 214 | struct rfkill *gps_rfkill; |
252 | 215 | ||
@@ -559,48 +522,6 @@ error: | |||
559 | /* | 522 | /* |
560 | * Backlight device | 523 | * Backlight device |
561 | */ | 524 | */ |
562 | static int asus_lcd_status(struct asus_laptop *asus) | ||
563 | { | ||
564 | return asus->lcd_state; | ||
565 | } | ||
566 | |||
567 | static int asus_lcd_set(struct asus_laptop *asus, int value) | ||
568 | { | ||
569 | int lcd = 0; | ||
570 | acpi_status status = 0; | ||
571 | |||
572 | lcd = !!value; | ||
573 | |||
574 | if (lcd == asus_lcd_status(asus)) | ||
575 | return 0; | ||
576 | |||
577 | if (!lcd_switch_handle) | ||
578 | return -ENODEV; | ||
579 | |||
580 | status = acpi_evaluate_object(lcd_switch_handle, | ||
581 | NULL, NULL, NULL); | ||
582 | |||
583 | if (ACPI_FAILURE(status)) { | ||
584 | pr_warning("Error switching LCD\n"); | ||
585 | return -ENODEV; | ||
586 | } | ||
587 | |||
588 | asus->lcd_state = lcd; | ||
589 | return 0; | ||
590 | } | ||
591 | |||
592 | static void lcd_blank(struct asus_laptop *asus, int blank) | ||
593 | { | ||
594 | struct backlight_device *bd = asus->backlight_device; | ||
595 | |||
596 | asus->lcd_state = (blank == FB_BLANK_UNBLANK); | ||
597 | |||
598 | if (bd) { | ||
599 | bd->props.power = blank; | ||
600 | backlight_update_status(bd); | ||
601 | } | ||
602 | } | ||
603 | |||
604 | static int asus_read_brightness(struct backlight_device *bd) | 525 | static int asus_read_brightness(struct backlight_device *bd) |
605 | { | 526 | { |
606 | struct asus_laptop *asus = bl_get_data(bd); | 527 | struct asus_laptop *asus = bl_get_data(bd); |
@@ -628,16 +549,9 @@ static int asus_set_brightness(struct backlight_device *bd, int value) | |||
628 | 549 | ||
629 | static int update_bl_status(struct backlight_device *bd) | 550 | static int update_bl_status(struct backlight_device *bd) |
630 | { | 551 | { |
631 | struct asus_laptop *asus = bl_get_data(bd); | ||
632 | int rv; | ||
633 | int value = bd->props.brightness; | 552 | int value = bd->props.brightness; |
634 | 553 | ||
635 | rv = asus_set_brightness(bd, value); | 554 | return asus_set_brightness(bd, value); |
636 | if (rv) | ||
637 | return rv; | ||
638 | |||
639 | value = (bd->props.power == FB_BLANK_UNBLANK) ? 1 : 0; | ||
640 | return asus_lcd_set(asus, value); | ||
641 | } | 555 | } |
642 | 556 | ||
643 | static const struct backlight_ops asusbl_ops = { | 557 | static const struct backlight_ops asusbl_ops = { |
@@ -661,8 +575,7 @@ static int asus_backlight_init(struct asus_laptop *asus) | |||
661 | struct backlight_properties props; | 575 | struct backlight_properties props; |
662 | 576 | ||
663 | if (acpi_check_handle(asus->handle, METHOD_BRIGHTNESS_GET, NULL) || | 577 | if (acpi_check_handle(asus->handle, METHOD_BRIGHTNESS_GET, NULL) || |
664 | acpi_check_handle(asus->handle, METHOD_BRIGHTNESS_SET, NULL) || | 578 | acpi_check_handle(asus->handle, METHOD_BRIGHTNESS_SET, NULL)) |
665 | !lcd_switch_handle) | ||
666 | return 0; | 579 | return 0; |
667 | 580 | ||
668 | memset(&props, 0, sizeof(struct backlight_properties)); | 581 | memset(&props, 0, sizeof(struct backlight_properties)); |
@@ -971,41 +884,6 @@ static void asus_set_display(struct asus_laptop *asus, int value) | |||
971 | return; | 884 | return; |
972 | } | 885 | } |
973 | 886 | ||
974 | static int read_display(struct asus_laptop *asus) | ||
975 | { | ||
976 | unsigned long long value = 0; | ||
977 | acpi_status rv = AE_OK; | ||
978 | |||
979 | /* | ||
980 | * In most of the case, we know how to set the display, but sometime | ||
981 | * we can't read it | ||
982 | */ | ||
983 | if (display_get_handle) { | ||
984 | rv = acpi_evaluate_integer(display_get_handle, NULL, | ||
985 | NULL, &value); | ||
986 | if (ACPI_FAILURE(rv)) | ||
987 | pr_warning("Error reading display status\n"); | ||
988 | } | ||
989 | |||
990 | value &= 0x0F; /* needed for some models, shouldn't hurt others */ | ||
991 | |||
992 | return value; | ||
993 | } | ||
994 | |||
995 | /* | ||
996 | * Now, *this* one could be more user-friendly, but so far, no-one has | ||
997 | * complained. The significance of bits is the same as in store_disp() | ||
998 | */ | ||
999 | static ssize_t show_disp(struct device *dev, | ||
1000 | struct device_attribute *attr, char *buf) | ||
1001 | { | ||
1002 | struct asus_laptop *asus = dev_get_drvdata(dev); | ||
1003 | |||
1004 | if (!display_get_handle) | ||
1005 | return -ENODEV; | ||
1006 | return sprintf(buf, "%d\n", read_display(asus)); | ||
1007 | } | ||
1008 | |||
1009 | /* | 887 | /* |
1010 | * Experimental support for display switching. As of now: 1 should activate | 888 | * Experimental support for display switching. As of now: 1 should activate |
1011 | * the LCD output, 2 should do for CRT, 4 for TV-Out and 8 for DVI. | 889 | * the LCD output, 2 should do for CRT, 4 for TV-Out and 8 for DVI. |
@@ -1247,15 +1125,6 @@ static void asus_acpi_notify(struct acpi_device *device, u32 event) | |||
1247 | struct asus_laptop *asus = acpi_driver_data(device); | 1125 | struct asus_laptop *asus = acpi_driver_data(device); |
1248 | u16 count; | 1126 | u16 count; |
1249 | 1127 | ||
1250 | /* | ||
1251 | * We need to tell the backlight device when the backlight power is | ||
1252 | * switched | ||
1253 | */ | ||
1254 | if (event == ATKD_LCD_ON) | ||
1255 | lcd_blank(asus, FB_BLANK_UNBLANK); | ||
1256 | else if (event == ATKD_LCD_OFF) | ||
1257 | lcd_blank(asus, FB_BLANK_POWERDOWN); | ||
1258 | |||
1259 | /* TODO Find a better way to handle events count. */ | 1128 | /* TODO Find a better way to handle events count. */ |
1260 | count = asus->event_count[event % 128]++; | 1129 | count = asus->event_count[event % 128]++; |
1261 | acpi_bus_generate_proc_event(asus->device, event, count); | 1130 | acpi_bus_generate_proc_event(asus->device, event, count); |
@@ -1282,7 +1151,7 @@ static DEVICE_ATTR(bluetooth, S_IRUGO | S_IWUSR, | |||
1282 | show_bluetooth, store_bluetooth); | 1151 | show_bluetooth, store_bluetooth); |
1283 | static DEVICE_ATTR(wimax, S_IRUGO | S_IWUSR, show_wimax, store_wimax); | 1152 | static DEVICE_ATTR(wimax, S_IRUGO | S_IWUSR, show_wimax, store_wimax); |
1284 | static DEVICE_ATTR(wwan, S_IRUGO | S_IWUSR, show_wwan, store_wwan); | 1153 | static DEVICE_ATTR(wwan, S_IRUGO | S_IWUSR, show_wwan, store_wwan); |
1285 | static DEVICE_ATTR(display, S_IRUGO | S_IWUSR, show_disp, store_disp); | 1154 | static DEVICE_ATTR(display, S_IWUSR, NULL, store_disp); |
1286 | static DEVICE_ATTR(ledd, S_IRUGO | S_IWUSR, show_ledd, store_ledd); | 1155 | static DEVICE_ATTR(ledd, S_IRUGO | S_IWUSR, show_ledd, store_ledd); |
1287 | static DEVICE_ATTR(ls_level, S_IRUGO | S_IWUSR, show_lslvl, store_lslvl); | 1156 | static DEVICE_ATTR(ls_level, S_IRUGO | S_IWUSR, show_lslvl, store_lslvl); |
1288 | static DEVICE_ATTR(ls_switch, S_IRUGO | S_IWUSR, show_lssw, store_lssw); | 1157 | static DEVICE_ATTR(ls_switch, S_IRUGO | S_IWUSR, show_lssw, store_lssw); |
@@ -1393,26 +1262,6 @@ static struct platform_driver platform_driver = { | |||
1393 | } | 1262 | } |
1394 | }; | 1263 | }; |
1395 | 1264 | ||
1396 | static int asus_handle_init(char *name, acpi_handle * handle, | ||
1397 | char **paths, int num_paths) | ||
1398 | { | ||
1399 | int i; | ||
1400 | acpi_status status; | ||
1401 | |||
1402 | for (i = 0; i < num_paths; i++) { | ||
1403 | status = acpi_get_handle(NULL, paths[i], handle); | ||
1404 | if (ACPI_SUCCESS(status)) | ||
1405 | return 0; | ||
1406 | } | ||
1407 | |||
1408 | *handle = NULL; | ||
1409 | return -ENODEV; | ||
1410 | } | ||
1411 | |||
1412 | #define ASUS_HANDLE_INIT(object) \ | ||
1413 | asus_handle_init(#object, &object##_handle, object##_paths, \ | ||
1414 | ARRAY_SIZE(object##_paths)) | ||
1415 | |||
1416 | /* | 1265 | /* |
1417 | * This function is used to initialize the context with right values. In this | 1266 | * This function is used to initialize the context with right values. In this |
1418 | * method, we can make all the detection we want, and modify the asus_laptop | 1267 | * method, we can make all the detection we want, and modify the asus_laptop |
@@ -1498,10 +1347,6 @@ static int asus_laptop_get_info(struct asus_laptop *asus) | |||
1498 | if (!acpi_check_handle(asus->handle, METHOD_WL_STATUS, NULL)) | 1347 | if (!acpi_check_handle(asus->handle, METHOD_WL_STATUS, NULL)) |
1499 | asus->have_rsts = true; | 1348 | asus->have_rsts = true; |
1500 | 1349 | ||
1501 | /* Scheduled for removal */ | ||
1502 | ASUS_HANDLE_INIT(lcd_switch); | ||
1503 | ASUS_HANDLE_INIT(display_get); | ||
1504 | |||
1505 | kfree(model); | 1350 | kfree(model); |
1506 | 1351 | ||
1507 | return AE_OK; | 1352 | return AE_OK; |
@@ -1553,10 +1398,23 @@ static int __devinit asus_acpi_init(struct asus_laptop *asus) | |||
1553 | asus_als_level(asus, asus->light_level); | 1398 | asus_als_level(asus, asus->light_level); |
1554 | } | 1399 | } |
1555 | 1400 | ||
1556 | asus->lcd_state = 1; /* LCD should be on when the module load */ | ||
1557 | return result; | 1401 | return result; |
1558 | } | 1402 | } |
1559 | 1403 | ||
1404 | static void __devinit asus_dmi_check(void) | ||
1405 | { | ||
1406 | const char *model; | ||
1407 | |||
1408 | model = dmi_get_system_info(DMI_PRODUCT_NAME); | ||
1409 | if (!model) | ||
1410 | return; | ||
1411 | |||
1412 | /* On L1400B WLED control the sound card, don't mess with it ... */ | ||
1413 | if (strncmp(model, "L1400B", 6) == 0) { | ||
1414 | wlan_status = -1; | ||
1415 | } | ||
1416 | } | ||
1417 | |||
1560 | static bool asus_device_present; | 1418 | static bool asus_device_present; |
1561 | 1419 | ||
1562 | static int __devinit asus_acpi_add(struct acpi_device *device) | 1420 | static int __devinit asus_acpi_add(struct acpi_device *device) |
@@ -1575,6 +1433,8 @@ static int __devinit asus_acpi_add(struct acpi_device *device) | |||
1575 | device->driver_data = asus; | 1433 | device->driver_data = asus; |
1576 | asus->device = device; | 1434 | asus->device = device; |
1577 | 1435 | ||
1436 | asus_dmi_check(); | ||
1437 | |||
1578 | result = asus_acpi_init(asus); | 1438 | result = asus_acpi_init(asus); |
1579 | if (result) | 1439 | if (result) |
1580 | goto fail_platform; | 1440 | goto fail_platform; |
diff --git a/drivers/platform/x86/asus-nb-wmi.c b/drivers/platform/x86/asus-nb-wmi.c new file mode 100644 index 000000000000..0580d99b0798 --- /dev/null +++ b/drivers/platform/x86/asus-nb-wmi.c | |||
@@ -0,0 +1,98 @@ | |||
1 | /* | ||
2 | * Asus Notebooks WMI hotkey driver | ||
3 | * | ||
4 | * Copyright(C) 2010 Corentin Chary <corentin.chary@gmail.com> | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License as published by | ||
8 | * the Free Software Foundation; either version 2 of the License, or | ||
9 | * (at your option) any later version. | ||
10 | * | ||
11 | * This program is distributed in the hope that it will be useful, | ||
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
14 | * GNU General Public License for more details. | ||
15 | * | ||
16 | * You should have received a copy of the GNU General Public License | ||
17 | * along with this program; if not, write to the Free Software | ||
18 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
19 | */ | ||
20 | |||
21 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | ||
22 | |||
23 | #include <linux/kernel.h> | ||
24 | #include <linux/module.h> | ||
25 | #include <linux/init.h> | ||
26 | #include <linux/input.h> | ||
27 | #include <linux/input/sparse-keymap.h> | ||
28 | |||
29 | #include "asus-wmi.h" | ||
30 | |||
31 | #define ASUS_NB_WMI_FILE "asus-nb-wmi" | ||
32 | |||
33 | MODULE_AUTHOR("Corentin Chary <corentincj@iksaif.net>"); | ||
34 | MODULE_DESCRIPTION("Asus Notebooks WMI Hotkey Driver"); | ||
35 | MODULE_LICENSE("GPL"); | ||
36 | |||
37 | #define ASUS_NB_WMI_EVENT_GUID "0B3CBB35-E3C2-45ED-91C2-4C5A6D195D1C" | ||
38 | |||
39 | MODULE_ALIAS("wmi:"ASUS_NB_WMI_EVENT_GUID); | ||
40 | |||
41 | static const struct key_entry asus_nb_wmi_keymap[] = { | ||
42 | { KE_KEY, 0x30, { KEY_VOLUMEUP } }, | ||
43 | { KE_KEY, 0x31, { KEY_VOLUMEDOWN } }, | ||
44 | { KE_KEY, 0x32, { KEY_MUTE } }, | ||
45 | { KE_KEY, 0x33, { KEY_DISPLAYTOGGLE } }, /* LCD on */ | ||
46 | { KE_KEY, 0x34, { KEY_DISPLAY_OFF } }, /* LCD off */ | ||
47 | { KE_KEY, 0x40, { KEY_PREVIOUSSONG } }, | ||
48 | { KE_KEY, 0x41, { KEY_NEXTSONG } }, | ||
49 | { KE_KEY, 0x43, { KEY_STOPCD } }, | ||
50 | { KE_KEY, 0x45, { KEY_PLAYPAUSE } }, | ||
51 | { KE_KEY, 0x4c, { KEY_MEDIA } }, | ||
52 | { KE_KEY, 0x50, { KEY_EMAIL } }, | ||
53 | { KE_KEY, 0x51, { KEY_WWW } }, | ||
54 | { KE_KEY, 0x55, { KEY_CALC } }, | ||
55 | { KE_KEY, 0x5C, { KEY_F15 } }, /* Power Gear key */ | ||
56 | { KE_KEY, 0x5D, { KEY_WLAN } }, | ||
57 | { KE_KEY, 0x5E, { KEY_WLAN } }, | ||
58 | { KE_KEY, 0x5F, { KEY_WLAN } }, | ||
59 | { KE_KEY, 0x60, { KEY_SWITCHVIDEOMODE } }, | ||
60 | { KE_KEY, 0x61, { KEY_SWITCHVIDEOMODE } }, | ||
61 | { KE_KEY, 0x62, { KEY_SWITCHVIDEOMODE } }, | ||
62 | { KE_KEY, 0x63, { KEY_SWITCHVIDEOMODE } }, | ||
63 | { KE_KEY, 0x6B, { KEY_TOUCHPAD_TOGGLE } }, | ||
64 | { KE_KEY, 0x7E, { KEY_BLUETOOTH } }, | ||
65 | { KE_KEY, 0x7D, { KEY_BLUETOOTH } }, | ||
66 | { KE_KEY, 0x82, { KEY_CAMERA } }, | ||
67 | { KE_KEY, 0x88, { KEY_RFKILL } }, | ||
68 | { KE_KEY, 0x8A, { KEY_PROG1 } }, | ||
69 | { KE_KEY, 0x95, { KEY_MEDIA } }, | ||
70 | { KE_KEY, 0x99, { KEY_PHONE } }, | ||
71 | { KE_KEY, 0xb5, { KEY_CALC } }, | ||
72 | { KE_KEY, 0xc4, { KEY_KBDILLUMUP } }, | ||
73 | { KE_KEY, 0xc5, { KEY_KBDILLUMDOWN } }, | ||
74 | { KE_END, 0}, | ||
75 | }; | ||
76 | |||
77 | static struct asus_wmi_driver asus_nb_wmi_driver = { | ||
78 | .name = ASUS_NB_WMI_FILE, | ||
79 | .owner = THIS_MODULE, | ||
80 | .event_guid = ASUS_NB_WMI_EVENT_GUID, | ||
81 | .keymap = asus_nb_wmi_keymap, | ||
82 | .input_name = "Asus WMI hotkeys", | ||
83 | .input_phys = ASUS_NB_WMI_FILE "/input0", | ||
84 | }; | ||
85 | |||
86 | |||
87 | static int __init asus_nb_wmi_init(void) | ||
88 | { | ||
89 | return asus_wmi_register_driver(&asus_nb_wmi_driver); | ||
90 | } | ||
91 | |||
92 | static void __exit asus_nb_wmi_exit(void) | ||
93 | { | ||
94 | asus_wmi_unregister_driver(&asus_nb_wmi_driver); | ||
95 | } | ||
96 | |||
97 | module_init(asus_nb_wmi_init); | ||
98 | module_exit(asus_nb_wmi_exit); | ||
diff --git a/drivers/platform/x86/asus-wmi.c b/drivers/platform/x86/asus-wmi.c new file mode 100644 index 000000000000..efc776cb0c66 --- /dev/null +++ b/drivers/platform/x86/asus-wmi.c | |||
@@ -0,0 +1,1656 @@ | |||
1 | /* | ||
2 | * Asus PC WMI hotkey driver | ||
3 | * | ||
4 | * Copyright(C) 2010 Intel Corporation. | ||
5 | * Copyright(C) 2010-2011 Corentin Chary <corentin.chary@gmail.com> | ||
6 | * | ||
7 | * Portions based on wistron_btns.c: | ||
8 | * Copyright (C) 2005 Miloslav Trmac <mitr@volny.cz> | ||
9 | * Copyright (C) 2005 Bernhard Rosenkraenzer <bero@arklinux.org> | ||
10 | * Copyright (C) 2005 Dmitry Torokhov <dtor@mail.ru> | ||
11 | * | ||
12 | * This program is free software; you can redistribute it and/or modify | ||
13 | * it under the terms of the GNU General Public License as published by | ||
14 | * the Free Software Foundation; either version 2 of the License, or | ||
15 | * (at your option) any later version. | ||
16 | * | ||
17 | * This program is distributed in the hope that it will be useful, | ||
18 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
19 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
20 | * GNU General Public License for more details. | ||
21 | * | ||
22 | * You should have received a copy of the GNU General Public License | ||
23 | * along with this program; if not, write to the Free Software | ||
24 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
25 | */ | ||
26 | |||
27 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | ||
28 | |||
29 | #include <linux/kernel.h> | ||
30 | #include <linux/module.h> | ||
31 | #include <linux/init.h> | ||
32 | #include <linux/types.h> | ||
33 | #include <linux/slab.h> | ||
34 | #include <linux/input.h> | ||
35 | #include <linux/input/sparse-keymap.h> | ||
36 | #include <linux/fb.h> | ||
37 | #include <linux/backlight.h> | ||
38 | #include <linux/leds.h> | ||
39 | #include <linux/rfkill.h> | ||
40 | #include <linux/pci.h> | ||
41 | #include <linux/pci_hotplug.h> | ||
42 | #include <linux/hwmon.h> | ||
43 | #include <linux/hwmon-sysfs.h> | ||
44 | #include <linux/debugfs.h> | ||
45 | #include <linux/seq_file.h> | ||
46 | #include <linux/platform_device.h> | ||
47 | #include <acpi/acpi_bus.h> | ||
48 | #include <acpi/acpi_drivers.h> | ||
49 | |||
50 | #include "asus-wmi.h" | ||
51 | |||
52 | MODULE_AUTHOR("Corentin Chary <corentincj@iksaif.net>, " | ||
53 | "Yong Wang <yong.y.wang@intel.com>"); | ||
54 | MODULE_DESCRIPTION("Asus Generic WMI Driver"); | ||
55 | MODULE_LICENSE("GPL"); | ||
56 | |||
57 | #define to_platform_driver(drv) \ | ||
58 | (container_of((drv), struct platform_driver, driver)) | ||
59 | |||
60 | #define to_asus_wmi_driver(pdrv) \ | ||
61 | (container_of((pdrv), struct asus_wmi_driver, platform_driver)) | ||
62 | |||
63 | #define ASUS_WMI_MGMT_GUID "97845ED0-4E6D-11DE-8A39-0800200C9A66" | ||
64 | |||
65 | #define NOTIFY_BRNUP_MIN 0x11 | ||
66 | #define NOTIFY_BRNUP_MAX 0x1f | ||
67 | #define NOTIFY_BRNDOWN_MIN 0x20 | ||
68 | #define NOTIFY_BRNDOWN_MAX 0x2e | ||
69 | |||
70 | /* WMI Methods */ | ||
71 | #define ASUS_WMI_METHODID_SPEC 0x43455053 /* BIOS SPECification */ | ||
72 | #define ASUS_WMI_METHODID_SFBD 0x44424653 /* Set First Boot Device */ | ||
73 | #define ASUS_WMI_METHODID_GLCD 0x44434C47 /* Get LCD status */ | ||
74 | #define ASUS_WMI_METHODID_GPID 0x44495047 /* Get Panel ID?? (Resol) */ | ||
75 | #define ASUS_WMI_METHODID_QMOD 0x444F4D51 /* Quiet MODe */ | ||
76 | #define ASUS_WMI_METHODID_SPLV 0x4C425053 /* Set Panel Light Value */ | ||
77 | #define ASUS_WMI_METHODID_SFUN 0x4E554653 /* FUNCtionalities */ | ||
78 | #define ASUS_WMI_METHODID_SDSP 0x50534453 /* Set DiSPlay output */ | ||
79 | #define ASUS_WMI_METHODID_GDSP 0x50534447 /* Get DiSPlay output */ | ||
80 | #define ASUS_WMI_METHODID_DEVP 0x50564544 /* DEVice Policy */ | ||
81 | #define ASUS_WMI_METHODID_OSVR 0x5256534F /* OS VeRsion */ | ||
82 | #define ASUS_WMI_METHODID_DSTS 0x53544344 /* Device STatuS */ | ||
83 | #define ASUS_WMI_METHODID_DSTS2 0x53545344 /* Device STatuS #2*/ | ||
84 | #define ASUS_WMI_METHODID_BSTS 0x53545342 /* Bios STatuS ? */ | ||
85 | #define ASUS_WMI_METHODID_DEVS 0x53564544 /* DEVice Set */ | ||
86 | #define ASUS_WMI_METHODID_CFVS 0x53564643 /* CPU Frequency Volt Set */ | ||
87 | #define ASUS_WMI_METHODID_KBFT 0x5446424B /* KeyBoard FilTer */ | ||
88 | #define ASUS_WMI_METHODID_INIT 0x54494E49 /* INITialize */ | ||
89 | #define ASUS_WMI_METHODID_HKEY 0x59454B48 /* Hot KEY ?? */ | ||
90 | |||
91 | #define ASUS_WMI_UNSUPPORTED_METHOD 0xFFFFFFFE | ||
92 | |||
93 | /* Wireless */ | ||
94 | #define ASUS_WMI_DEVID_HW_SWITCH 0x00010001 | ||
95 | #define ASUS_WMI_DEVID_WIRELESS_LED 0x00010002 | ||
96 | #define ASUS_WMI_DEVID_WLAN 0x00010011 | ||
97 | #define ASUS_WMI_DEVID_BLUETOOTH 0x00010013 | ||
98 | #define ASUS_WMI_DEVID_GPS 0x00010015 | ||
99 | #define ASUS_WMI_DEVID_WIMAX 0x00010017 | ||
100 | #define ASUS_WMI_DEVID_WWAN3G 0x00010019 | ||
101 | #define ASUS_WMI_DEVID_UWB 0x00010021 | ||
102 | |||
103 | /* Leds */ | ||
104 | /* 0x000200XX and 0x000400XX */ | ||
105 | |||
106 | /* Backlight and Brightness */ | ||
107 | #define ASUS_WMI_DEVID_BACKLIGHT 0x00050011 | ||
108 | #define ASUS_WMI_DEVID_BRIGHTNESS 0x00050012 | ||
109 | #define ASUS_WMI_DEVID_KBD_BACKLIGHT 0x00050021 | ||
110 | #define ASUS_WMI_DEVID_LIGHT_SENSOR 0x00050022 /* ?? */ | ||
111 | |||
112 | /* Misc */ | ||
113 | #define ASUS_WMI_DEVID_CAMERA 0x00060013 | ||
114 | |||
115 | /* Storage */ | ||
116 | #define ASUS_WMI_DEVID_CARDREADER 0x00080013 | ||
117 | |||
118 | /* Input */ | ||
119 | #define ASUS_WMI_DEVID_TOUCHPAD 0x00100011 | ||
120 | #define ASUS_WMI_DEVID_TOUCHPAD_LED 0x00100012 | ||
121 | |||
122 | /* Fan, Thermal */ | ||
123 | #define ASUS_WMI_DEVID_THERMAL_CTRL 0x00110011 | ||
124 | #define ASUS_WMI_DEVID_FAN_CTRL 0x00110012 | ||
125 | |||
126 | /* Power */ | ||
127 | #define ASUS_WMI_DEVID_PROCESSOR_STATE 0x00120012 | ||
128 | |||
129 | /* DSTS masks */ | ||
130 | #define ASUS_WMI_DSTS_STATUS_BIT 0x00000001 | ||
131 | #define ASUS_WMI_DSTS_UNKNOWN_BIT 0x00000002 | ||
132 | #define ASUS_WMI_DSTS_PRESENCE_BIT 0x00010000 | ||
133 | #define ASUS_WMI_DSTS_USER_BIT 0x00020000 | ||
134 | #define ASUS_WMI_DSTS_BIOS_BIT 0x00040000 | ||
135 | #define ASUS_WMI_DSTS_BRIGHTNESS_MASK 0x000000FF | ||
136 | #define ASUS_WMI_DSTS_MAX_BRIGTH_MASK 0x0000FF00 | ||
137 | |||
138 | struct bios_args { | ||
139 | u32 arg0; | ||
140 | u32 arg1; | ||
141 | } __packed; | ||
142 | |||
143 | /* | ||
144 | * <platform>/ - debugfs root directory | ||
145 | * dev_id - current dev_id | ||
146 | * ctrl_param - current ctrl_param | ||
147 | * method_id - current method_id | ||
148 | * devs - call DEVS(dev_id, ctrl_param) and print result | ||
149 | * dsts - call DSTS(dev_id) and print result | ||
150 | * call - call method_id(dev_id, ctrl_param) and print result | ||
151 | */ | ||
152 | struct asus_wmi_debug { | ||
153 | struct dentry *root; | ||
154 | u32 method_id; | ||
155 | u32 dev_id; | ||
156 | u32 ctrl_param; | ||
157 | }; | ||
158 | |||
159 | struct asus_rfkill { | ||
160 | struct asus_wmi *asus; | ||
161 | struct rfkill *rfkill; | ||
162 | u32 dev_id; | ||
163 | }; | ||
164 | |||
165 | struct asus_wmi { | ||
166 | int dsts_id; | ||
167 | int spec; | ||
168 | int sfun; | ||
169 | |||
170 | struct input_dev *inputdev; | ||
171 | struct backlight_device *backlight_device; | ||
172 | struct device *hwmon_device; | ||
173 | struct platform_device *platform_device; | ||
174 | |||
175 | struct led_classdev tpd_led; | ||
176 | int tpd_led_wk; | ||
177 | struct workqueue_struct *led_workqueue; | ||
178 | struct work_struct tpd_led_work; | ||
179 | |||
180 | struct asus_rfkill wlan; | ||
181 | struct asus_rfkill bluetooth; | ||
182 | struct asus_rfkill wimax; | ||
183 | struct asus_rfkill wwan3g; | ||
184 | |||
185 | struct hotplug_slot *hotplug_slot; | ||
186 | struct mutex hotplug_lock; | ||
187 | struct mutex wmi_lock; | ||
188 | struct workqueue_struct *hotplug_workqueue; | ||
189 | struct work_struct hotplug_work; | ||
190 | |||
191 | struct asus_wmi_debug debug; | ||
192 | |||
193 | struct asus_wmi_driver *driver; | ||
194 | }; | ||
195 | |||
196 | static int asus_wmi_input_init(struct asus_wmi *asus) | ||
197 | { | ||
198 | int err; | ||
199 | |||
200 | asus->inputdev = input_allocate_device(); | ||
201 | if (!asus->inputdev) | ||
202 | return -ENOMEM; | ||
203 | |||
204 | asus->inputdev->name = asus->driver->input_phys; | ||
205 | asus->inputdev->phys = asus->driver->input_name; | ||
206 | asus->inputdev->id.bustype = BUS_HOST; | ||
207 | asus->inputdev->dev.parent = &asus->platform_device->dev; | ||
208 | |||
209 | err = sparse_keymap_setup(asus->inputdev, asus->driver->keymap, NULL); | ||
210 | if (err) | ||
211 | goto err_free_dev; | ||
212 | |||
213 | err = input_register_device(asus->inputdev); | ||
214 | if (err) | ||
215 | goto err_free_keymap; | ||
216 | |||
217 | return 0; | ||
218 | |||
219 | err_free_keymap: | ||
220 | sparse_keymap_free(asus->inputdev); | ||
221 | err_free_dev: | ||
222 | input_free_device(asus->inputdev); | ||
223 | return err; | ||
224 | } | ||
225 | |||
226 | static void asus_wmi_input_exit(struct asus_wmi *asus) | ||
227 | { | ||
228 | if (asus->inputdev) { | ||
229 | sparse_keymap_free(asus->inputdev); | ||
230 | input_unregister_device(asus->inputdev); | ||
231 | } | ||
232 | |||
233 | asus->inputdev = NULL; | ||
234 | } | ||
235 | |||
236 | static int asus_wmi_evaluate_method(u32 method_id, u32 arg0, u32 arg1, | ||
237 | u32 *retval) | ||
238 | { | ||
239 | struct bios_args args = { | ||
240 | .arg0 = arg0, | ||
241 | .arg1 = arg1, | ||
242 | }; | ||
243 | struct acpi_buffer input = { (acpi_size) sizeof(args), &args }; | ||
244 | struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL }; | ||
245 | acpi_status status; | ||
246 | union acpi_object *obj; | ||
247 | u32 tmp; | ||
248 | |||
249 | status = wmi_evaluate_method(ASUS_WMI_MGMT_GUID, 1, method_id, | ||
250 | &input, &output); | ||
251 | |||
252 | if (ACPI_FAILURE(status)) | ||
253 | goto exit; | ||
254 | |||
255 | obj = (union acpi_object *)output.pointer; | ||
256 | if (obj && obj->type == ACPI_TYPE_INTEGER) | ||
257 | tmp = (u32) obj->integer.value; | ||
258 | else | ||
259 | tmp = 0; | ||
260 | |||
261 | if (retval) | ||
262 | *retval = tmp; | ||
263 | |||
264 | kfree(obj); | ||
265 | |||
266 | exit: | ||
267 | if (ACPI_FAILURE(status)) | ||
268 | return -EIO; | ||
269 | |||
270 | if (tmp == ASUS_WMI_UNSUPPORTED_METHOD) | ||
271 | return -ENODEV; | ||
272 | |||
273 | return 0; | ||
274 | } | ||
275 | |||
276 | static int asus_wmi_get_devstate(struct asus_wmi *asus, u32 dev_id, u32 *retval) | ||
277 | { | ||
278 | return asus_wmi_evaluate_method(asus->dsts_id, dev_id, 0, retval); | ||
279 | } | ||
280 | |||
281 | static int asus_wmi_set_devstate(u32 dev_id, u32 ctrl_param, | ||
282 | u32 *retval) | ||
283 | { | ||
284 | return asus_wmi_evaluate_method(ASUS_WMI_METHODID_DEVS, dev_id, | ||
285 | ctrl_param, retval); | ||
286 | } | ||
287 | |||
288 | /* Helper for special devices with magic return codes */ | ||
289 | static int asus_wmi_get_devstate_bits(struct asus_wmi *asus, | ||
290 | u32 dev_id, u32 mask) | ||
291 | { | ||
292 | u32 retval = 0; | ||
293 | int err; | ||
294 | |||
295 | err = asus_wmi_get_devstate(asus, dev_id, &retval); | ||
296 | |||
297 | if (err < 0) | ||
298 | return err; | ||
299 | |||
300 | if (!(retval & ASUS_WMI_DSTS_PRESENCE_BIT)) | ||
301 | return -ENODEV; | ||
302 | |||
303 | if (mask == ASUS_WMI_DSTS_STATUS_BIT) { | ||
304 | if (retval & ASUS_WMI_DSTS_UNKNOWN_BIT) | ||
305 | return -ENODEV; | ||
306 | } | ||
307 | |||
308 | return retval & mask; | ||
309 | } | ||
310 | |||
311 | static int asus_wmi_get_devstate_simple(struct asus_wmi *asus, u32 dev_id) | ||
312 | { | ||
313 | return asus_wmi_get_devstate_bits(asus, dev_id, | ||
314 | ASUS_WMI_DSTS_STATUS_BIT); | ||
315 | } | ||
316 | |||
317 | /* | ||
318 | * LEDs | ||
319 | */ | ||
320 | /* | ||
321 | * These functions actually update the LED's, and are called from a | ||
322 | * workqueue. By doing this as separate work rather than when the LED | ||
323 | * subsystem asks, we avoid messing with the Asus ACPI stuff during a | ||
324 | * potentially bad time, such as a timer interrupt. | ||
325 | */ | ||
326 | static void tpd_led_update(struct work_struct *work) | ||
327 | { | ||
328 | int ctrl_param; | ||
329 | struct asus_wmi *asus; | ||
330 | |||
331 | asus = container_of(work, struct asus_wmi, tpd_led_work); | ||
332 | |||
333 | ctrl_param = asus->tpd_led_wk; | ||
334 | asus_wmi_set_devstate(ASUS_WMI_DEVID_TOUCHPAD_LED, ctrl_param, NULL); | ||
335 | } | ||
336 | |||
337 | static void tpd_led_set(struct led_classdev *led_cdev, | ||
338 | enum led_brightness value) | ||
339 | { | ||
340 | struct asus_wmi *asus; | ||
341 | |||
342 | asus = container_of(led_cdev, struct asus_wmi, tpd_led); | ||
343 | |||
344 | asus->tpd_led_wk = !!value; | ||
345 | queue_work(asus->led_workqueue, &asus->tpd_led_work); | ||
346 | } | ||
347 | |||
348 | static int read_tpd_led_state(struct asus_wmi *asus) | ||
349 | { | ||
350 | return asus_wmi_get_devstate_simple(asus, ASUS_WMI_DEVID_TOUCHPAD_LED); | ||
351 | } | ||
352 | |||
353 | static enum led_brightness tpd_led_get(struct led_classdev *led_cdev) | ||
354 | { | ||
355 | struct asus_wmi *asus; | ||
356 | |||
357 | asus = container_of(led_cdev, struct asus_wmi, tpd_led); | ||
358 | |||
359 | return read_tpd_led_state(asus); | ||
360 | } | ||
361 | |||
362 | static int asus_wmi_led_init(struct asus_wmi *asus) | ||
363 | { | ||
364 | int rv; | ||
365 | |||
366 | if (read_tpd_led_state(asus) < 0) | ||
367 | return 0; | ||
368 | |||
369 | asus->led_workqueue = create_singlethread_workqueue("led_workqueue"); | ||
370 | if (!asus->led_workqueue) | ||
371 | return -ENOMEM; | ||
372 | INIT_WORK(&asus->tpd_led_work, tpd_led_update); | ||
373 | |||
374 | asus->tpd_led.name = "asus::touchpad"; | ||
375 | asus->tpd_led.brightness_set = tpd_led_set; | ||
376 | asus->tpd_led.brightness_get = tpd_led_get; | ||
377 | asus->tpd_led.max_brightness = 1; | ||
378 | |||
379 | rv = led_classdev_register(&asus->platform_device->dev, &asus->tpd_led); | ||
380 | if (rv) { | ||
381 | destroy_workqueue(asus->led_workqueue); | ||
382 | return rv; | ||
383 | } | ||
384 | |||
385 | return 0; | ||
386 | } | ||
387 | |||
388 | static void asus_wmi_led_exit(struct asus_wmi *asus) | ||
389 | { | ||
390 | if (asus->tpd_led.dev) | ||
391 | led_classdev_unregister(&asus->tpd_led); | ||
392 | if (asus->led_workqueue) | ||
393 | destroy_workqueue(asus->led_workqueue); | ||
394 | } | ||
395 | |||
396 | /* | ||
397 | * PCI hotplug (for wlan rfkill) | ||
398 | */ | ||
399 | static bool asus_wlan_rfkill_blocked(struct asus_wmi *asus) | ||
400 | { | ||
401 | int result = asus_wmi_get_devstate_simple(asus, ASUS_WMI_DEVID_WLAN); | ||
402 | |||
403 | if (result < 0) | ||
404 | return false; | ||
405 | return !result; | ||
406 | } | ||
407 | |||
408 | static void asus_rfkill_hotplug(struct asus_wmi *asus) | ||
409 | { | ||
410 | struct pci_dev *dev; | ||
411 | struct pci_bus *bus; | ||
412 | bool blocked; | ||
413 | bool absent; | ||
414 | u32 l; | ||
415 | |||
416 | mutex_lock(&asus->wmi_lock); | ||
417 | blocked = asus_wlan_rfkill_blocked(asus); | ||
418 | mutex_unlock(&asus->wmi_lock); | ||
419 | |||
420 | mutex_lock(&asus->hotplug_lock); | ||
421 | |||
422 | if (asus->wlan.rfkill) | ||
423 | rfkill_set_sw_state(asus->wlan.rfkill, blocked); | ||
424 | |||
425 | if (asus->hotplug_slot) { | ||
426 | bus = pci_find_bus(0, 1); | ||
427 | if (!bus) { | ||
428 | pr_warning("Unable to find PCI bus 1?\n"); | ||
429 | goto out_unlock; | ||
430 | } | ||
431 | |||
432 | if (pci_bus_read_config_dword(bus, 0, PCI_VENDOR_ID, &l)) { | ||
433 | pr_err("Unable to read PCI config space?\n"); | ||
434 | goto out_unlock; | ||
435 | } | ||
436 | absent = (l == 0xffffffff); | ||
437 | |||
438 | if (blocked != absent) { | ||
439 | pr_warning("BIOS says wireless lan is %s, " | ||
440 | "but the pci device is %s\n", | ||
441 | blocked ? "blocked" : "unblocked", | ||
442 | absent ? "absent" : "present"); | ||
443 | pr_warning("skipped wireless hotplug as probably " | ||
444 | "inappropriate for this model\n"); | ||
445 | goto out_unlock; | ||
446 | } | ||
447 | |||
448 | if (!blocked) { | ||
449 | dev = pci_get_slot(bus, 0); | ||
450 | if (dev) { | ||
451 | /* Device already present */ | ||
452 | pci_dev_put(dev); | ||
453 | goto out_unlock; | ||
454 | } | ||
455 | dev = pci_scan_single_device(bus, 0); | ||
456 | if (dev) { | ||
457 | pci_bus_assign_resources(bus); | ||
458 | if (pci_bus_add_device(dev)) | ||
459 | pr_err("Unable to hotplug wifi\n"); | ||
460 | } | ||
461 | } else { | ||
462 | dev = pci_get_slot(bus, 0); | ||
463 | if (dev) { | ||
464 | pci_remove_bus_device(dev); | ||
465 | pci_dev_put(dev); | ||
466 | } | ||
467 | } | ||
468 | } | ||
469 | |||
470 | out_unlock: | ||
471 | mutex_unlock(&asus->hotplug_lock); | ||
472 | } | ||
473 | |||
474 | static void asus_rfkill_notify(acpi_handle handle, u32 event, void *data) | ||
475 | { | ||
476 | struct asus_wmi *asus = data; | ||
477 | |||
478 | if (event != ACPI_NOTIFY_BUS_CHECK) | ||
479 | return; | ||
480 | |||
481 | /* | ||
482 | * We can't call directly asus_rfkill_hotplug because most | ||
483 | * of the time WMBC is still being executed and not reetrant. | ||
484 | * There is currently no way to tell ACPICA that we want this | ||
485 | * method to be serialized, we schedule a asus_rfkill_hotplug | ||
486 | * call later, in a safer context. | ||
487 | */ | ||
488 | queue_work(asus->hotplug_workqueue, &asus->hotplug_work); | ||
489 | } | ||
490 | |||
491 | static int asus_register_rfkill_notifier(struct asus_wmi *asus, char *node) | ||
492 | { | ||
493 | acpi_status status; | ||
494 | acpi_handle handle; | ||
495 | |||
496 | status = acpi_get_handle(NULL, node, &handle); | ||
497 | |||
498 | if (ACPI_SUCCESS(status)) { | ||
499 | status = acpi_install_notify_handler(handle, | ||
500 | ACPI_SYSTEM_NOTIFY, | ||
501 | asus_rfkill_notify, asus); | ||
502 | if (ACPI_FAILURE(status)) | ||
503 | pr_warning("Failed to register notify on %s\n", node); | ||
504 | } else | ||
505 | return -ENODEV; | ||
506 | |||
507 | return 0; | ||
508 | } | ||
509 | |||
510 | static void asus_unregister_rfkill_notifier(struct asus_wmi *asus, char *node) | ||
511 | { | ||
512 | acpi_status status = AE_OK; | ||
513 | acpi_handle handle; | ||
514 | |||
515 | status = acpi_get_handle(NULL, node, &handle); | ||
516 | |||
517 | if (ACPI_SUCCESS(status)) { | ||
518 | status = acpi_remove_notify_handler(handle, | ||
519 | ACPI_SYSTEM_NOTIFY, | ||
520 | asus_rfkill_notify); | ||
521 | if (ACPI_FAILURE(status)) | ||
522 | pr_err("Error removing rfkill notify handler %s\n", | ||
523 | node); | ||
524 | } | ||
525 | } | ||
526 | |||
527 | static int asus_get_adapter_status(struct hotplug_slot *hotplug_slot, | ||
528 | u8 *value) | ||
529 | { | ||
530 | struct asus_wmi *asus = hotplug_slot->private; | ||
531 | int result = asus_wmi_get_devstate_simple(asus, ASUS_WMI_DEVID_WLAN); | ||
532 | |||
533 | if (result < 0) | ||
534 | return result; | ||
535 | |||
536 | *value = !!result; | ||
537 | return 0; | ||
538 | } | ||
539 | |||
540 | static void asus_cleanup_pci_hotplug(struct hotplug_slot *hotplug_slot) | ||
541 | { | ||
542 | kfree(hotplug_slot->info); | ||
543 | kfree(hotplug_slot); | ||
544 | } | ||
545 | |||
546 | static struct hotplug_slot_ops asus_hotplug_slot_ops = { | ||
547 | .owner = THIS_MODULE, | ||
548 | .get_adapter_status = asus_get_adapter_status, | ||
549 | .get_power_status = asus_get_adapter_status, | ||
550 | }; | ||
551 | |||
552 | static void asus_hotplug_work(struct work_struct *work) | ||
553 | { | ||
554 | struct asus_wmi *asus; | ||
555 | |||
556 | asus = container_of(work, struct asus_wmi, hotplug_work); | ||
557 | asus_rfkill_hotplug(asus); | ||
558 | } | ||
559 | |||
560 | static int asus_setup_pci_hotplug(struct asus_wmi *asus) | ||
561 | { | ||
562 | int ret = -ENOMEM; | ||
563 | struct pci_bus *bus = pci_find_bus(0, 1); | ||
564 | |||
565 | if (!bus) { | ||
566 | pr_err("Unable to find wifi PCI bus\n"); | ||
567 | return -ENODEV; | ||
568 | } | ||
569 | |||
570 | asus->hotplug_workqueue = | ||
571 | create_singlethread_workqueue("hotplug_workqueue"); | ||
572 | if (!asus->hotplug_workqueue) | ||
573 | goto error_workqueue; | ||
574 | |||
575 | INIT_WORK(&asus->hotplug_work, asus_hotplug_work); | ||
576 | |||
577 | asus->hotplug_slot = kzalloc(sizeof(struct hotplug_slot), GFP_KERNEL); | ||
578 | if (!asus->hotplug_slot) | ||
579 | goto error_slot; | ||
580 | |||
581 | asus->hotplug_slot->info = kzalloc(sizeof(struct hotplug_slot_info), | ||
582 | GFP_KERNEL); | ||
583 | if (!asus->hotplug_slot->info) | ||
584 | goto error_info; | ||
585 | |||
586 | asus->hotplug_slot->private = asus; | ||
587 | asus->hotplug_slot->release = &asus_cleanup_pci_hotplug; | ||
588 | asus->hotplug_slot->ops = &asus_hotplug_slot_ops; | ||
589 | asus_get_adapter_status(asus->hotplug_slot, | ||
590 | &asus->hotplug_slot->info->adapter_status); | ||
591 | |||
592 | ret = pci_hp_register(asus->hotplug_slot, bus, 0, "asus-wifi"); | ||
593 | if (ret) { | ||
594 | pr_err("Unable to register hotplug slot - %d\n", ret); | ||
595 | goto error_register; | ||
596 | } | ||
597 | |||
598 | return 0; | ||
599 | |||
600 | error_register: | ||
601 | kfree(asus->hotplug_slot->info); | ||
602 | error_info: | ||
603 | kfree(asus->hotplug_slot); | ||
604 | asus->hotplug_slot = NULL; | ||
605 | error_slot: | ||
606 | destroy_workqueue(asus->hotplug_workqueue); | ||
607 | error_workqueue: | ||
608 | return ret; | ||
609 | } | ||
610 | |||
611 | /* | ||
612 | * Rfkill devices | ||
613 | */ | ||
614 | static int asus_rfkill_set(void *data, bool blocked) | ||
615 | { | ||
616 | struct asus_rfkill *priv = data; | ||
617 | u32 ctrl_param = !blocked; | ||
618 | |||
619 | return asus_wmi_set_devstate(priv->dev_id, ctrl_param, NULL); | ||
620 | } | ||
621 | |||
622 | static void asus_rfkill_query(struct rfkill *rfkill, void *data) | ||
623 | { | ||
624 | struct asus_rfkill *priv = data; | ||
625 | int result; | ||
626 | |||
627 | result = asus_wmi_get_devstate_simple(priv->asus, priv->dev_id); | ||
628 | |||
629 | if (result < 0) | ||
630 | return; | ||
631 | |||
632 | rfkill_set_sw_state(priv->rfkill, !result); | ||
633 | } | ||
634 | |||
635 | static int asus_rfkill_wlan_set(void *data, bool blocked) | ||
636 | { | ||
637 | struct asus_rfkill *priv = data; | ||
638 | struct asus_wmi *asus = priv->asus; | ||
639 | int ret; | ||
640 | |||
641 | /* | ||
642 | * This handler is enabled only if hotplug is enabled. | ||
643 | * In this case, the asus_wmi_set_devstate() will | ||
644 | * trigger a wmi notification and we need to wait | ||
645 | * this call to finish before being able to call | ||
646 | * any wmi method | ||
647 | */ | ||
648 | mutex_lock(&asus->wmi_lock); | ||
649 | ret = asus_rfkill_set(data, blocked); | ||
650 | mutex_unlock(&asus->wmi_lock); | ||
651 | return ret; | ||
652 | } | ||
653 | |||
654 | static const struct rfkill_ops asus_rfkill_wlan_ops = { | ||
655 | .set_block = asus_rfkill_wlan_set, | ||
656 | .query = asus_rfkill_query, | ||
657 | }; | ||
658 | |||
659 | static const struct rfkill_ops asus_rfkill_ops = { | ||
660 | .set_block = asus_rfkill_set, | ||
661 | .query = asus_rfkill_query, | ||
662 | }; | ||
663 | |||
664 | static int asus_new_rfkill(struct asus_wmi *asus, | ||
665 | struct asus_rfkill *arfkill, | ||
666 | const char *name, enum rfkill_type type, int dev_id) | ||
667 | { | ||
668 | int result = asus_wmi_get_devstate_simple(asus, dev_id); | ||
669 | struct rfkill **rfkill = &arfkill->rfkill; | ||
670 | |||
671 | if (result < 0) | ||
672 | return result; | ||
673 | |||
674 | arfkill->dev_id = dev_id; | ||
675 | arfkill->asus = asus; | ||
676 | |||
677 | if (dev_id == ASUS_WMI_DEVID_WLAN && asus->driver->hotplug_wireless) | ||
678 | *rfkill = rfkill_alloc(name, &asus->platform_device->dev, type, | ||
679 | &asus_rfkill_wlan_ops, arfkill); | ||
680 | else | ||
681 | *rfkill = rfkill_alloc(name, &asus->platform_device->dev, type, | ||
682 | &asus_rfkill_ops, arfkill); | ||
683 | |||
684 | if (!*rfkill) | ||
685 | return -EINVAL; | ||
686 | |||
687 | rfkill_init_sw_state(*rfkill, !result); | ||
688 | result = rfkill_register(*rfkill); | ||
689 | if (result) { | ||
690 | rfkill_destroy(*rfkill); | ||
691 | *rfkill = NULL; | ||
692 | return result; | ||
693 | } | ||
694 | return 0; | ||
695 | } | ||
696 | |||
697 | static void asus_wmi_rfkill_exit(struct asus_wmi *asus) | ||
698 | { | ||
699 | asus_unregister_rfkill_notifier(asus, "\\_SB.PCI0.P0P5"); | ||
700 | asus_unregister_rfkill_notifier(asus, "\\_SB.PCI0.P0P6"); | ||
701 | asus_unregister_rfkill_notifier(asus, "\\_SB.PCI0.P0P7"); | ||
702 | if (asus->wlan.rfkill) { | ||
703 | rfkill_unregister(asus->wlan.rfkill); | ||
704 | rfkill_destroy(asus->wlan.rfkill); | ||
705 | asus->wlan.rfkill = NULL; | ||
706 | } | ||
707 | /* | ||
708 | * Refresh pci hotplug in case the rfkill state was changed after | ||
709 | * asus_unregister_rfkill_notifier() | ||
710 | */ | ||
711 | asus_rfkill_hotplug(asus); | ||
712 | if (asus->hotplug_slot) | ||
713 | pci_hp_deregister(asus->hotplug_slot); | ||
714 | if (asus->hotplug_workqueue) | ||
715 | destroy_workqueue(asus->hotplug_workqueue); | ||
716 | |||
717 | if (asus->bluetooth.rfkill) { | ||
718 | rfkill_unregister(asus->bluetooth.rfkill); | ||
719 | rfkill_destroy(asus->bluetooth.rfkill); | ||
720 | asus->bluetooth.rfkill = NULL; | ||
721 | } | ||
722 | if (asus->wimax.rfkill) { | ||
723 | rfkill_unregister(asus->wimax.rfkill); | ||
724 | rfkill_destroy(asus->wimax.rfkill); | ||
725 | asus->wimax.rfkill = NULL; | ||
726 | } | ||
727 | if (asus->wwan3g.rfkill) { | ||
728 | rfkill_unregister(asus->wwan3g.rfkill); | ||
729 | rfkill_destroy(asus->wwan3g.rfkill); | ||
730 | asus->wwan3g.rfkill = NULL; | ||
731 | } | ||
732 | } | ||
733 | |||
734 | static int asus_wmi_rfkill_init(struct asus_wmi *asus) | ||
735 | { | ||
736 | int result = 0; | ||
737 | |||
738 | mutex_init(&asus->hotplug_lock); | ||
739 | mutex_init(&asus->wmi_lock); | ||
740 | |||
741 | result = asus_new_rfkill(asus, &asus->wlan, "asus-wlan", | ||
742 | RFKILL_TYPE_WLAN, ASUS_WMI_DEVID_WLAN); | ||
743 | |||
744 | if (result && result != -ENODEV) | ||
745 | goto exit; | ||
746 | |||
747 | result = asus_new_rfkill(asus, &asus->bluetooth, | ||
748 | "asus-bluetooth", RFKILL_TYPE_BLUETOOTH, | ||
749 | ASUS_WMI_DEVID_BLUETOOTH); | ||
750 | |||
751 | if (result && result != -ENODEV) | ||
752 | goto exit; | ||
753 | |||
754 | result = asus_new_rfkill(asus, &asus->wimax, "asus-wimax", | ||
755 | RFKILL_TYPE_WIMAX, ASUS_WMI_DEVID_WIMAX); | ||
756 | |||
757 | if (result && result != -ENODEV) | ||
758 | goto exit; | ||
759 | |||
760 | result = asus_new_rfkill(asus, &asus->wwan3g, "asus-wwan3g", | ||
761 | RFKILL_TYPE_WWAN, ASUS_WMI_DEVID_WWAN3G); | ||
762 | |||
763 | if (result && result != -ENODEV) | ||
764 | goto exit; | ||
765 | |||
766 | if (!asus->driver->hotplug_wireless) | ||
767 | goto exit; | ||
768 | |||
769 | result = asus_setup_pci_hotplug(asus); | ||
770 | /* | ||
771 | * If we get -EBUSY then something else is handling the PCI hotplug - | ||
772 | * don't fail in this case | ||
773 | */ | ||
774 | if (result == -EBUSY) | ||
775 | result = 0; | ||
776 | |||
777 | asus_register_rfkill_notifier(asus, "\\_SB.PCI0.P0P5"); | ||
778 | asus_register_rfkill_notifier(asus, "\\_SB.PCI0.P0P6"); | ||
779 | asus_register_rfkill_notifier(asus, "\\_SB.PCI0.P0P7"); | ||
780 | /* | ||
781 | * Refresh pci hotplug in case the rfkill state was changed during | ||
782 | * setup. | ||
783 | */ | ||
784 | asus_rfkill_hotplug(asus); | ||
785 | |||
786 | exit: | ||
787 | if (result && result != -ENODEV) | ||
788 | asus_wmi_rfkill_exit(asus); | ||
789 | |||
790 | if (result == -ENODEV) | ||
791 | result = 0; | ||
792 | |||
793 | return result; | ||
794 | } | ||
795 | |||
796 | /* | ||
797 | * Hwmon device | ||
798 | */ | ||
799 | static ssize_t asus_hwmon_pwm1(struct device *dev, | ||
800 | struct device_attribute *attr, | ||
801 | char *buf) | ||
802 | { | ||
803 | struct asus_wmi *asus = dev_get_drvdata(dev); | ||
804 | u32 value; | ||
805 | int err; | ||
806 | |||
807 | err = asus_wmi_get_devstate(asus, ASUS_WMI_DEVID_FAN_CTRL, &value); | ||
808 | |||
809 | if (err < 0) | ||
810 | return err; | ||
811 | |||
812 | value |= 0xFF; | ||
813 | |||
814 | if (value == 1) /* Low Speed */ | ||
815 | value = 85; | ||
816 | else if (value == 2) | ||
817 | value = 170; | ||
818 | else if (value == 3) | ||
819 | value = 255; | ||
820 | else if (value != 0) { | ||
821 | pr_err("Unknown fan speed %#x", value); | ||
822 | value = -1; | ||
823 | } | ||
824 | |||
825 | return sprintf(buf, "%d\n", value); | ||
826 | } | ||
827 | |||
828 | static SENSOR_DEVICE_ATTR(pwm1, S_IRUGO, asus_hwmon_pwm1, NULL, 0); | ||
829 | |||
830 | static ssize_t | ||
831 | show_name(struct device *dev, struct device_attribute *attr, char *buf) | ||
832 | { | ||
833 | return sprintf(buf, "asus\n"); | ||
834 | } | ||
835 | static SENSOR_DEVICE_ATTR(name, S_IRUGO, show_name, NULL, 0); | ||
836 | |||
837 | static struct attribute *hwmon_attributes[] = { | ||
838 | &sensor_dev_attr_pwm1.dev_attr.attr, | ||
839 | &sensor_dev_attr_name.dev_attr.attr, | ||
840 | NULL | ||
841 | }; | ||
842 | |||
843 | static mode_t asus_hwmon_sysfs_is_visible(struct kobject *kobj, | ||
844 | struct attribute *attr, int idx) | ||
845 | { | ||
846 | struct device *dev = container_of(kobj, struct device, kobj); | ||
847 | struct platform_device *pdev = to_platform_device(dev->parent); | ||
848 | struct asus_wmi *asus = platform_get_drvdata(pdev); | ||
849 | bool ok = true; | ||
850 | int dev_id = -1; | ||
851 | u32 value = ASUS_WMI_UNSUPPORTED_METHOD; | ||
852 | |||
853 | if (attr == &sensor_dev_attr_pwm1.dev_attr.attr) | ||
854 | dev_id = ASUS_WMI_DEVID_FAN_CTRL; | ||
855 | |||
856 | if (dev_id != -1) { | ||
857 | int err = asus_wmi_get_devstate(asus, dev_id, &value); | ||
858 | |||
859 | if (err < 0) | ||
860 | return err; | ||
861 | } | ||
862 | |||
863 | if (dev_id == ASUS_WMI_DEVID_FAN_CTRL) { | ||
864 | /* | ||
865 | * We need to find a better way, probably using sfun, | ||
866 | * bits or spec ... | ||
867 | * Currently we disable it if: | ||
868 | * - ASUS_WMI_UNSUPPORTED_METHOD is returned | ||
869 | * - reverved bits are non-zero | ||
870 | * - sfun and presence bit are not set | ||
871 | */ | ||
872 | if (value != ASUS_WMI_UNSUPPORTED_METHOD || value & 0xFFF80000 | ||
873 | || (!asus->sfun && !(value & ASUS_WMI_DSTS_PRESENCE_BIT))) | ||
874 | ok = false; | ||
875 | } | ||
876 | |||
877 | return ok ? attr->mode : 0; | ||
878 | } | ||
879 | |||
880 | static struct attribute_group hwmon_attribute_group = { | ||
881 | .is_visible = asus_hwmon_sysfs_is_visible, | ||
882 | .attrs = hwmon_attributes | ||
883 | }; | ||
884 | |||
885 | static void asus_wmi_hwmon_exit(struct asus_wmi *asus) | ||
886 | { | ||
887 | struct device *hwmon; | ||
888 | |||
889 | hwmon = asus->hwmon_device; | ||
890 | if (!hwmon) | ||
891 | return; | ||
892 | sysfs_remove_group(&hwmon->kobj, &hwmon_attribute_group); | ||
893 | hwmon_device_unregister(hwmon); | ||
894 | asus->hwmon_device = NULL; | ||
895 | } | ||
896 | |||
897 | static int asus_wmi_hwmon_init(struct asus_wmi *asus) | ||
898 | { | ||
899 | struct device *hwmon; | ||
900 | int result; | ||
901 | |||
902 | hwmon = hwmon_device_register(&asus->platform_device->dev); | ||
903 | if (IS_ERR(hwmon)) { | ||
904 | pr_err("Could not register asus hwmon device\n"); | ||
905 | return PTR_ERR(hwmon); | ||
906 | } | ||
907 | asus->hwmon_device = hwmon; | ||
908 | result = sysfs_create_group(&hwmon->kobj, &hwmon_attribute_group); | ||
909 | if (result) | ||
910 | asus_wmi_hwmon_exit(asus); | ||
911 | return result; | ||
912 | } | ||
913 | |||
914 | /* | ||
915 | * Backlight | ||
916 | */ | ||
917 | static int read_backlight_power(struct asus_wmi *asus) | ||
918 | { | ||
919 | int ret = asus_wmi_get_devstate_simple(asus, ASUS_WMI_DEVID_BACKLIGHT); | ||
920 | |||
921 | if (ret < 0) | ||
922 | return ret; | ||
923 | |||
924 | return ret ? FB_BLANK_UNBLANK : FB_BLANK_POWERDOWN; | ||
925 | } | ||
926 | |||
927 | static int read_brightness_max(struct asus_wmi *asus) | ||
928 | { | ||
929 | u32 retval; | ||
930 | int err; | ||
931 | |||
932 | err = asus_wmi_get_devstate(asus, ASUS_WMI_DEVID_BRIGHTNESS, &retval); | ||
933 | |||
934 | if (err < 0) | ||
935 | return err; | ||
936 | |||
937 | retval = retval & ASUS_WMI_DSTS_MAX_BRIGTH_MASK; | ||
938 | retval >>= 8; | ||
939 | |||
940 | if (!retval) | ||
941 | return -ENODEV; | ||
942 | |||
943 | return retval; | ||
944 | } | ||
945 | |||
946 | static int read_brightness(struct backlight_device *bd) | ||
947 | { | ||
948 | struct asus_wmi *asus = bl_get_data(bd); | ||
949 | u32 retval; | ||
950 | int err; | ||
951 | |||
952 | err = asus_wmi_get_devstate(asus, ASUS_WMI_DEVID_BRIGHTNESS, &retval); | ||
953 | |||
954 | if (err < 0) | ||
955 | return err; | ||
956 | |||
957 | return retval & ASUS_WMI_DSTS_BRIGHTNESS_MASK; | ||
958 | } | ||
959 | |||
960 | static int update_bl_status(struct backlight_device *bd) | ||
961 | { | ||
962 | struct asus_wmi *asus = bl_get_data(bd); | ||
963 | u32 ctrl_param; | ||
964 | int power, err; | ||
965 | |||
966 | ctrl_param = bd->props.brightness; | ||
967 | |||
968 | err = asus_wmi_set_devstate(ASUS_WMI_DEVID_BRIGHTNESS, | ||
969 | ctrl_param, NULL); | ||
970 | |||
971 | if (err < 0) | ||
972 | return err; | ||
973 | |||
974 | power = read_backlight_power(asus); | ||
975 | if (power != -ENODEV && bd->props.power != power) { | ||
976 | ctrl_param = !!(bd->props.power == FB_BLANK_UNBLANK); | ||
977 | err = asus_wmi_set_devstate(ASUS_WMI_DEVID_BACKLIGHT, | ||
978 | ctrl_param, NULL); | ||
979 | } | ||
980 | return err; | ||
981 | } | ||
982 | |||
983 | static const struct backlight_ops asus_wmi_bl_ops = { | ||
984 | .get_brightness = read_brightness, | ||
985 | .update_status = update_bl_status, | ||
986 | }; | ||
987 | |||
988 | static int asus_wmi_backlight_notify(struct asus_wmi *asus, int code) | ||
989 | { | ||
990 | struct backlight_device *bd = asus->backlight_device; | ||
991 | int old = bd->props.brightness; | ||
992 | int new = old; | ||
993 | |||
994 | if (code >= NOTIFY_BRNUP_MIN && code <= NOTIFY_BRNUP_MAX) | ||
995 | new = code - NOTIFY_BRNUP_MIN + 1; | ||
996 | else if (code >= NOTIFY_BRNDOWN_MIN && code <= NOTIFY_BRNDOWN_MAX) | ||
997 | new = code - NOTIFY_BRNDOWN_MIN; | ||
998 | |||
999 | bd->props.brightness = new; | ||
1000 | backlight_update_status(bd); | ||
1001 | backlight_force_update(bd, BACKLIGHT_UPDATE_HOTKEY); | ||
1002 | |||
1003 | return old; | ||
1004 | } | ||
1005 | |||
1006 | static int asus_wmi_backlight_init(struct asus_wmi *asus) | ||
1007 | { | ||
1008 | struct backlight_device *bd; | ||
1009 | struct backlight_properties props; | ||
1010 | int max; | ||
1011 | int power; | ||
1012 | |||
1013 | max = read_brightness_max(asus); | ||
1014 | |||
1015 | if (max == -ENODEV) | ||
1016 | max = 0; | ||
1017 | else if (max < 0) | ||
1018 | return max; | ||
1019 | |||
1020 | power = read_backlight_power(asus); | ||
1021 | |||
1022 | if (power == -ENODEV) | ||
1023 | power = FB_BLANK_UNBLANK; | ||
1024 | else if (power < 0) | ||
1025 | return power; | ||
1026 | |||
1027 | memset(&props, 0, sizeof(struct backlight_properties)); | ||
1028 | props.max_brightness = max; | ||
1029 | bd = backlight_device_register(asus->driver->name, | ||
1030 | &asus->platform_device->dev, asus, | ||
1031 | &asus_wmi_bl_ops, &props); | ||
1032 | if (IS_ERR(bd)) { | ||
1033 | pr_err("Could not register backlight device\n"); | ||
1034 | return PTR_ERR(bd); | ||
1035 | } | ||
1036 | |||
1037 | asus->backlight_device = bd; | ||
1038 | |||
1039 | bd->props.brightness = read_brightness(bd); | ||
1040 | bd->props.power = power; | ||
1041 | backlight_update_status(bd); | ||
1042 | |||
1043 | return 0; | ||
1044 | } | ||
1045 | |||
1046 | static void asus_wmi_backlight_exit(struct asus_wmi *asus) | ||
1047 | { | ||
1048 | if (asus->backlight_device) | ||
1049 | backlight_device_unregister(asus->backlight_device); | ||
1050 | |||
1051 | asus->backlight_device = NULL; | ||
1052 | } | ||
1053 | |||
1054 | static void asus_wmi_notify(u32 value, void *context) | ||
1055 | { | ||
1056 | struct asus_wmi *asus = context; | ||
1057 | struct acpi_buffer response = { ACPI_ALLOCATE_BUFFER, NULL }; | ||
1058 | union acpi_object *obj; | ||
1059 | acpi_status status; | ||
1060 | int code; | ||
1061 | int orig_code; | ||
1062 | |||
1063 | status = wmi_get_event_data(value, &response); | ||
1064 | if (status != AE_OK) { | ||
1065 | pr_err("bad event status 0x%x\n", status); | ||
1066 | return; | ||
1067 | } | ||
1068 | |||
1069 | obj = (union acpi_object *)response.pointer; | ||
1070 | |||
1071 | if (!obj || obj->type != ACPI_TYPE_INTEGER) | ||
1072 | goto exit; | ||
1073 | |||
1074 | code = obj->integer.value; | ||
1075 | orig_code = code; | ||
1076 | |||
1077 | if (code >= NOTIFY_BRNUP_MIN && code <= NOTIFY_BRNUP_MAX) | ||
1078 | code = NOTIFY_BRNUP_MIN; | ||
1079 | else if (code >= NOTIFY_BRNDOWN_MIN && | ||
1080 | code <= NOTIFY_BRNDOWN_MAX) | ||
1081 | code = NOTIFY_BRNDOWN_MIN; | ||
1082 | |||
1083 | if (code == NOTIFY_BRNUP_MIN || code == NOTIFY_BRNDOWN_MIN) { | ||
1084 | if (!acpi_video_backlight_support()) | ||
1085 | asus_wmi_backlight_notify(asus, orig_code); | ||
1086 | } else if (!sparse_keymap_report_event(asus->inputdev, code, 1, true)) | ||
1087 | pr_info("Unknown key %x pressed\n", code); | ||
1088 | |||
1089 | exit: | ||
1090 | kfree(obj); | ||
1091 | } | ||
1092 | |||
1093 | /* | ||
1094 | * Sys helpers | ||
1095 | */ | ||
1096 | static int parse_arg(const char *buf, unsigned long count, int *val) | ||
1097 | { | ||
1098 | if (!count) | ||
1099 | return 0; | ||
1100 | if (sscanf(buf, "%i", val) != 1) | ||
1101 | return -EINVAL; | ||
1102 | return count; | ||
1103 | } | ||
1104 | |||
1105 | static ssize_t store_sys_wmi(struct asus_wmi *asus, int devid, | ||
1106 | const char *buf, size_t count) | ||
1107 | { | ||
1108 | u32 retval; | ||
1109 | int rv, err, value; | ||
1110 | |||
1111 | value = asus_wmi_get_devstate_simple(asus, devid); | ||
1112 | if (value == -ENODEV) /* Check device presence */ | ||
1113 | return value; | ||
1114 | |||
1115 | rv = parse_arg(buf, count, &value); | ||
1116 | err = asus_wmi_set_devstate(devid, value, &retval); | ||
1117 | |||
1118 | if (err < 0) | ||
1119 | return err; | ||
1120 | |||
1121 | return rv; | ||
1122 | } | ||
1123 | |||
1124 | static ssize_t show_sys_wmi(struct asus_wmi *asus, int devid, char *buf) | ||
1125 | { | ||
1126 | int value = asus_wmi_get_devstate_simple(asus, devid); | ||
1127 | |||
1128 | if (value < 0) | ||
1129 | return value; | ||
1130 | |||
1131 | return sprintf(buf, "%d\n", value); | ||
1132 | } | ||
1133 | |||
1134 | #define ASUS_WMI_CREATE_DEVICE_ATTR(_name, _mode, _cm) \ | ||
1135 | static ssize_t show_##_name(struct device *dev, \ | ||
1136 | struct device_attribute *attr, \ | ||
1137 | char *buf) \ | ||
1138 | { \ | ||
1139 | struct asus_wmi *asus = dev_get_drvdata(dev); \ | ||
1140 | \ | ||
1141 | return show_sys_wmi(asus, _cm, buf); \ | ||
1142 | } \ | ||
1143 | static ssize_t store_##_name(struct device *dev, \ | ||
1144 | struct device_attribute *attr, \ | ||
1145 | const char *buf, size_t count) \ | ||
1146 | { \ | ||
1147 | struct asus_wmi *asus = dev_get_drvdata(dev); \ | ||
1148 | \ | ||
1149 | return store_sys_wmi(asus, _cm, buf, count); \ | ||
1150 | } \ | ||
1151 | static struct device_attribute dev_attr_##_name = { \ | ||
1152 | .attr = { \ | ||
1153 | .name = __stringify(_name), \ | ||
1154 | .mode = _mode }, \ | ||
1155 | .show = show_##_name, \ | ||
1156 | .store = store_##_name, \ | ||
1157 | } | ||
1158 | |||
1159 | ASUS_WMI_CREATE_DEVICE_ATTR(touchpad, 0644, ASUS_WMI_DEVID_TOUCHPAD); | ||
1160 | ASUS_WMI_CREATE_DEVICE_ATTR(camera, 0644, ASUS_WMI_DEVID_CAMERA); | ||
1161 | ASUS_WMI_CREATE_DEVICE_ATTR(cardr, 0644, ASUS_WMI_DEVID_CARDREADER); | ||
1162 | |||
1163 | static ssize_t store_cpufv(struct device *dev, struct device_attribute *attr, | ||
1164 | const char *buf, size_t count) | ||
1165 | { | ||
1166 | int value; | ||
1167 | |||
1168 | if (!count || sscanf(buf, "%i", &value) != 1) | ||
1169 | return -EINVAL; | ||
1170 | if (value < 0 || value > 2) | ||
1171 | return -EINVAL; | ||
1172 | |||
1173 | return asus_wmi_evaluate_method(ASUS_WMI_METHODID_CFVS, value, 0, NULL); | ||
1174 | } | ||
1175 | |||
1176 | static DEVICE_ATTR(cpufv, S_IRUGO | S_IWUSR, NULL, store_cpufv); | ||
1177 | |||
1178 | static struct attribute *platform_attributes[] = { | ||
1179 | &dev_attr_cpufv.attr, | ||
1180 | &dev_attr_camera.attr, | ||
1181 | &dev_attr_cardr.attr, | ||
1182 | &dev_attr_touchpad.attr, | ||
1183 | NULL | ||
1184 | }; | ||
1185 | |||
1186 | static mode_t asus_sysfs_is_visible(struct kobject *kobj, | ||
1187 | struct attribute *attr, int idx) | ||
1188 | { | ||
1189 | struct device *dev = container_of(kobj, struct device, kobj); | ||
1190 | struct platform_device *pdev = to_platform_device(dev); | ||
1191 | struct asus_wmi *asus = platform_get_drvdata(pdev); | ||
1192 | bool ok = true; | ||
1193 | int devid = -1; | ||
1194 | |||
1195 | if (attr == &dev_attr_camera.attr) | ||
1196 | devid = ASUS_WMI_DEVID_CAMERA; | ||
1197 | else if (attr == &dev_attr_cardr.attr) | ||
1198 | devid = ASUS_WMI_DEVID_CARDREADER; | ||
1199 | else if (attr == &dev_attr_touchpad.attr) | ||
1200 | devid = ASUS_WMI_DEVID_TOUCHPAD; | ||
1201 | |||
1202 | if (devid != -1) | ||
1203 | ok = !(asus_wmi_get_devstate_simple(asus, devid) < 0); | ||
1204 | |||
1205 | return ok ? attr->mode : 0; | ||
1206 | } | ||
1207 | |||
1208 | static struct attribute_group platform_attribute_group = { | ||
1209 | .is_visible = asus_sysfs_is_visible, | ||
1210 | .attrs = platform_attributes | ||
1211 | }; | ||
1212 | |||
1213 | static void asus_wmi_sysfs_exit(struct platform_device *device) | ||
1214 | { | ||
1215 | sysfs_remove_group(&device->dev.kobj, &platform_attribute_group); | ||
1216 | } | ||
1217 | |||
1218 | static int asus_wmi_sysfs_init(struct platform_device *device) | ||
1219 | { | ||
1220 | return sysfs_create_group(&device->dev.kobj, &platform_attribute_group); | ||
1221 | } | ||
1222 | |||
1223 | /* | ||
1224 | * Platform device | ||
1225 | */ | ||
1226 | static int __init asus_wmi_platform_init(struct asus_wmi *asus) | ||
1227 | { | ||
1228 | int rv; | ||
1229 | |||
1230 | /* INIT enable hotkeys on some models */ | ||
1231 | if (!asus_wmi_evaluate_method(ASUS_WMI_METHODID_INIT, 0, 0, &rv)) | ||
1232 | pr_info("Initialization: %#x", rv); | ||
1233 | |||
1234 | /* We don't know yet what to do with this version... */ | ||
1235 | if (!asus_wmi_evaluate_method(ASUS_WMI_METHODID_SPEC, 0, 0x9, &rv)) { | ||
1236 | pr_info("BIOS WMI version: %d.%d", rv >> 8, rv & 0xFF); | ||
1237 | asus->spec = rv; | ||
1238 | } | ||
1239 | |||
1240 | /* | ||
1241 | * The SFUN method probably allows the original driver to get the list | ||
1242 | * of features supported by a given model. For now, 0x0100 or 0x0800 | ||
1243 | * bit signifies that the laptop is equipped with a Wi-Fi MiniPCI card. | ||
1244 | * The significance of others is yet to be found. | ||
1245 | */ | ||
1246 | if (!asus_wmi_evaluate_method(ASUS_WMI_METHODID_SFUN, 0, 0, &rv)) { | ||
1247 | pr_info("SFUN value: %#x", rv); | ||
1248 | asus->sfun = rv; | ||
1249 | } | ||
1250 | |||
1251 | /* | ||
1252 | * Eee PC and Notebooks seems to have different method_id for DSTS, | ||
1253 | * but it may also be related to the BIOS's SPEC. | ||
1254 | * Note, on most Eeepc, there is no way to check if a method exist | ||
1255 | * or note, while on notebooks, they returns 0xFFFFFFFE on failure, | ||
1256 | * but once again, SPEC may probably be used for that kind of things. | ||
1257 | */ | ||
1258 | if (!asus_wmi_evaluate_method(ASUS_WMI_METHODID_DSTS, 0, 0, NULL)) | ||
1259 | asus->dsts_id = ASUS_WMI_METHODID_DSTS; | ||
1260 | else if (!asus_wmi_evaluate_method(ASUS_WMI_METHODID_DSTS2, 0, 0, NULL)) | ||
1261 | asus->dsts_id = ASUS_WMI_METHODID_DSTS2; | ||
1262 | |||
1263 | if (!asus->dsts_id) { | ||
1264 | pr_err("Can't find DSTS"); | ||
1265 | return -ENODEV; | ||
1266 | } | ||
1267 | |||
1268 | return asus_wmi_sysfs_init(asus->platform_device); | ||
1269 | } | ||
1270 | |||
1271 | static void asus_wmi_platform_exit(struct asus_wmi *asus) | ||
1272 | { | ||
1273 | asus_wmi_sysfs_exit(asus->platform_device); | ||
1274 | } | ||
1275 | |||
1276 | /* | ||
1277 | * debugfs | ||
1278 | */ | ||
1279 | struct asus_wmi_debugfs_node { | ||
1280 | struct asus_wmi *asus; | ||
1281 | char *name; | ||
1282 | int (*show) (struct seq_file *m, void *data); | ||
1283 | }; | ||
1284 | |||
1285 | static int show_dsts(struct seq_file *m, void *data) | ||
1286 | { | ||
1287 | struct asus_wmi *asus = m->private; | ||
1288 | int err; | ||
1289 | u32 retval = -1; | ||
1290 | |||
1291 | err = asus_wmi_get_devstate(asus, asus->debug.dev_id, &retval); | ||
1292 | |||
1293 | if (err < 0) | ||
1294 | return err; | ||
1295 | |||
1296 | seq_printf(m, "DSTS(%#x) = %#x\n", asus->debug.dev_id, retval); | ||
1297 | |||
1298 | return 0; | ||
1299 | } | ||
1300 | |||
1301 | static int show_devs(struct seq_file *m, void *data) | ||
1302 | { | ||
1303 | struct asus_wmi *asus = m->private; | ||
1304 | int err; | ||
1305 | u32 retval = -1; | ||
1306 | |||
1307 | err = asus_wmi_set_devstate(asus->debug.dev_id, asus->debug.ctrl_param, | ||
1308 | &retval); | ||
1309 | |||
1310 | if (err < 0) | ||
1311 | return err; | ||
1312 | |||
1313 | seq_printf(m, "DEVS(%#x, %#x) = %#x\n", asus->debug.dev_id, | ||
1314 | asus->debug.ctrl_param, retval); | ||
1315 | |||
1316 | return 0; | ||
1317 | } | ||
1318 | |||
1319 | static int show_call(struct seq_file *m, void *data) | ||
1320 | { | ||
1321 | struct asus_wmi *asus = m->private; | ||
1322 | struct bios_args args = { | ||
1323 | .arg0 = asus->debug.dev_id, | ||
1324 | .arg1 = asus->debug.ctrl_param, | ||
1325 | }; | ||
1326 | struct acpi_buffer input = { (acpi_size) sizeof(args), &args }; | ||
1327 | struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL }; | ||
1328 | union acpi_object *obj; | ||
1329 | acpi_status status; | ||
1330 | |||
1331 | status = wmi_evaluate_method(ASUS_WMI_MGMT_GUID, | ||
1332 | 1, asus->debug.method_id, | ||
1333 | &input, &output); | ||
1334 | |||
1335 | if (ACPI_FAILURE(status)) | ||
1336 | return -EIO; | ||
1337 | |||
1338 | obj = (union acpi_object *)output.pointer; | ||
1339 | if (obj && obj->type == ACPI_TYPE_INTEGER) | ||
1340 | seq_printf(m, "%#x(%#x, %#x) = %#x\n", asus->debug.method_id, | ||
1341 | asus->debug.dev_id, asus->debug.ctrl_param, | ||
1342 | (u32) obj->integer.value); | ||
1343 | else | ||
1344 | seq_printf(m, "%#x(%#x, %#x) = t:%d\n", asus->debug.method_id, | ||
1345 | asus->debug.dev_id, asus->debug.ctrl_param, | ||
1346 | obj ? obj->type : -1); | ||
1347 | |||
1348 | kfree(obj); | ||
1349 | |||
1350 | return 0; | ||
1351 | } | ||
1352 | |||
1353 | static struct asus_wmi_debugfs_node asus_wmi_debug_files[] = { | ||
1354 | {NULL, "devs", show_devs}, | ||
1355 | {NULL, "dsts", show_dsts}, | ||
1356 | {NULL, "call", show_call}, | ||
1357 | }; | ||
1358 | |||
1359 | static int asus_wmi_debugfs_open(struct inode *inode, struct file *file) | ||
1360 | { | ||
1361 | struct asus_wmi_debugfs_node *node = inode->i_private; | ||
1362 | |||
1363 | return single_open(file, node->show, node->asus); | ||
1364 | } | ||
1365 | |||
1366 | static const struct file_operations asus_wmi_debugfs_io_ops = { | ||
1367 | .owner = THIS_MODULE, | ||
1368 | .open = asus_wmi_debugfs_open, | ||
1369 | .read = seq_read, | ||
1370 | .llseek = seq_lseek, | ||
1371 | .release = single_release, | ||
1372 | }; | ||
1373 | |||
1374 | static void asus_wmi_debugfs_exit(struct asus_wmi *asus) | ||
1375 | { | ||
1376 | debugfs_remove_recursive(asus->debug.root); | ||
1377 | } | ||
1378 | |||
1379 | static int asus_wmi_debugfs_init(struct asus_wmi *asus) | ||
1380 | { | ||
1381 | struct dentry *dent; | ||
1382 | int i; | ||
1383 | |||
1384 | asus->debug.root = debugfs_create_dir(asus->driver->name, NULL); | ||
1385 | if (!asus->debug.root) { | ||
1386 | pr_err("failed to create debugfs directory"); | ||
1387 | goto error_debugfs; | ||
1388 | } | ||
1389 | |||
1390 | dent = debugfs_create_x32("method_id", S_IRUGO | S_IWUSR, | ||
1391 | asus->debug.root, &asus->debug.method_id); | ||
1392 | if (!dent) | ||
1393 | goto error_debugfs; | ||
1394 | |||
1395 | dent = debugfs_create_x32("dev_id", S_IRUGO | S_IWUSR, | ||
1396 | asus->debug.root, &asus->debug.dev_id); | ||
1397 | if (!dent) | ||
1398 | goto error_debugfs; | ||
1399 | |||
1400 | dent = debugfs_create_x32("ctrl_param", S_IRUGO | S_IWUSR, | ||
1401 | asus->debug.root, &asus->debug.ctrl_param); | ||
1402 | if (!dent) | ||
1403 | goto error_debugfs; | ||
1404 | |||
1405 | for (i = 0; i < ARRAY_SIZE(asus_wmi_debug_files); i++) { | ||
1406 | struct asus_wmi_debugfs_node *node = &asus_wmi_debug_files[i]; | ||
1407 | |||
1408 | node->asus = asus; | ||
1409 | dent = debugfs_create_file(node->name, S_IFREG | S_IRUGO, | ||
1410 | asus->debug.root, node, | ||
1411 | &asus_wmi_debugfs_io_ops); | ||
1412 | if (!dent) { | ||
1413 | pr_err("failed to create debug file: %s\n", node->name); | ||
1414 | goto error_debugfs; | ||
1415 | } | ||
1416 | } | ||
1417 | |||
1418 | return 0; | ||
1419 | |||
1420 | error_debugfs: | ||
1421 | asus_wmi_debugfs_exit(asus); | ||
1422 | return -ENOMEM; | ||
1423 | } | ||
1424 | |||
1425 | /* | ||
1426 | * WMI Driver | ||
1427 | */ | ||
1428 | static int asus_wmi_add(struct platform_device *pdev) | ||
1429 | { | ||
1430 | struct platform_driver *pdrv = to_platform_driver(pdev->dev.driver); | ||
1431 | struct asus_wmi_driver *wdrv = to_asus_wmi_driver(pdrv); | ||
1432 | struct asus_wmi *asus; | ||
1433 | acpi_status status; | ||
1434 | int err; | ||
1435 | |||
1436 | asus = kzalloc(sizeof(struct asus_wmi), GFP_KERNEL); | ||
1437 | if (!asus) | ||
1438 | return -ENOMEM; | ||
1439 | |||
1440 | asus->driver = wdrv; | ||
1441 | asus->platform_device = pdev; | ||
1442 | wdrv->platform_device = pdev; | ||
1443 | platform_set_drvdata(asus->platform_device, asus); | ||
1444 | |||
1445 | if (wdrv->quirks) | ||
1446 | wdrv->quirks(asus->driver); | ||
1447 | |||
1448 | err = asus_wmi_platform_init(asus); | ||
1449 | if (err) | ||
1450 | goto fail_platform; | ||
1451 | |||
1452 | err = asus_wmi_input_init(asus); | ||
1453 | if (err) | ||
1454 | goto fail_input; | ||
1455 | |||
1456 | err = asus_wmi_hwmon_init(asus); | ||
1457 | if (err) | ||
1458 | goto fail_hwmon; | ||
1459 | |||
1460 | err = asus_wmi_led_init(asus); | ||
1461 | if (err) | ||
1462 | goto fail_leds; | ||
1463 | |||
1464 | err = asus_wmi_rfkill_init(asus); | ||
1465 | if (err) | ||
1466 | goto fail_rfkill; | ||
1467 | |||
1468 | if (!acpi_video_backlight_support()) { | ||
1469 | err = asus_wmi_backlight_init(asus); | ||
1470 | if (err && err != -ENODEV) | ||
1471 | goto fail_backlight; | ||
1472 | } else | ||
1473 | pr_info("Backlight controlled by ACPI video driver\n"); | ||
1474 | |||
1475 | status = wmi_install_notify_handler(asus->driver->event_guid, | ||
1476 | asus_wmi_notify, asus); | ||
1477 | if (ACPI_FAILURE(status)) { | ||
1478 | pr_err("Unable to register notify handler - %d\n", status); | ||
1479 | err = -ENODEV; | ||
1480 | goto fail_wmi_handler; | ||
1481 | } | ||
1482 | |||
1483 | err = asus_wmi_debugfs_init(asus); | ||
1484 | if (err) | ||
1485 | goto fail_debugfs; | ||
1486 | |||
1487 | return 0; | ||
1488 | |||
1489 | fail_debugfs: | ||
1490 | wmi_remove_notify_handler(asus->driver->event_guid); | ||
1491 | fail_wmi_handler: | ||
1492 | asus_wmi_backlight_exit(asus); | ||
1493 | fail_backlight: | ||
1494 | asus_wmi_rfkill_exit(asus); | ||
1495 | fail_rfkill: | ||
1496 | asus_wmi_led_exit(asus); | ||
1497 | fail_leds: | ||
1498 | asus_wmi_hwmon_exit(asus); | ||
1499 | fail_hwmon: | ||
1500 | asus_wmi_input_exit(asus); | ||
1501 | fail_input: | ||
1502 | asus_wmi_platform_exit(asus); | ||
1503 | fail_platform: | ||
1504 | kfree(asus); | ||
1505 | return err; | ||
1506 | } | ||
1507 | |||
1508 | static int asus_wmi_remove(struct platform_device *device) | ||
1509 | { | ||
1510 | struct asus_wmi *asus; | ||
1511 | |||
1512 | asus = platform_get_drvdata(device); | ||
1513 | wmi_remove_notify_handler(asus->driver->event_guid); | ||
1514 | asus_wmi_backlight_exit(asus); | ||
1515 | asus_wmi_input_exit(asus); | ||
1516 | asus_wmi_hwmon_exit(asus); | ||
1517 | asus_wmi_led_exit(asus); | ||
1518 | asus_wmi_rfkill_exit(asus); | ||
1519 | asus_wmi_debugfs_exit(asus); | ||
1520 | asus_wmi_platform_exit(asus); | ||
1521 | |||
1522 | kfree(asus); | ||
1523 | return 0; | ||
1524 | } | ||
1525 | |||
1526 | /* | ||
1527 | * Platform driver - hibernate/resume callbacks | ||
1528 | */ | ||
1529 | static int asus_hotk_thaw(struct device *device) | ||
1530 | { | ||
1531 | struct asus_wmi *asus = dev_get_drvdata(device); | ||
1532 | |||
1533 | if (asus->wlan.rfkill) { | ||
1534 | bool wlan; | ||
1535 | |||
1536 | /* | ||
1537 | * Work around bios bug - acpi _PTS turns off the wireless led | ||
1538 | * during suspend. Normally it restores it on resume, but | ||
1539 | * we should kick it ourselves in case hibernation is aborted. | ||
1540 | */ | ||
1541 | wlan = asus_wmi_get_devstate_simple(asus, ASUS_WMI_DEVID_WLAN); | ||
1542 | asus_wmi_set_devstate(ASUS_WMI_DEVID_WLAN, wlan, NULL); | ||
1543 | } | ||
1544 | |||
1545 | return 0; | ||
1546 | } | ||
1547 | |||
1548 | static int asus_hotk_restore(struct device *device) | ||
1549 | { | ||
1550 | struct asus_wmi *asus = dev_get_drvdata(device); | ||
1551 | int bl; | ||
1552 | |||
1553 | /* Refresh both wlan rfkill state and pci hotplug */ | ||
1554 | if (asus->wlan.rfkill) | ||
1555 | asus_rfkill_hotplug(asus); | ||
1556 | |||
1557 | if (asus->bluetooth.rfkill) { | ||
1558 | bl = !asus_wmi_get_devstate_simple(asus, | ||
1559 | ASUS_WMI_DEVID_BLUETOOTH); | ||
1560 | rfkill_set_sw_state(asus->bluetooth.rfkill, bl); | ||
1561 | } | ||
1562 | if (asus->wimax.rfkill) { | ||
1563 | bl = !asus_wmi_get_devstate_simple(asus, ASUS_WMI_DEVID_WIMAX); | ||
1564 | rfkill_set_sw_state(asus->wimax.rfkill, bl); | ||
1565 | } | ||
1566 | if (asus->wwan3g.rfkill) { | ||
1567 | bl = !asus_wmi_get_devstate_simple(asus, ASUS_WMI_DEVID_WWAN3G); | ||
1568 | rfkill_set_sw_state(asus->wwan3g.rfkill, bl); | ||
1569 | } | ||
1570 | |||
1571 | return 0; | ||
1572 | } | ||
1573 | |||
1574 | static const struct dev_pm_ops asus_pm_ops = { | ||
1575 | .thaw = asus_hotk_thaw, | ||
1576 | .restore = asus_hotk_restore, | ||
1577 | }; | ||
1578 | |||
1579 | static int asus_wmi_probe(struct platform_device *pdev) | ||
1580 | { | ||
1581 | struct platform_driver *pdrv = to_platform_driver(pdev->dev.driver); | ||
1582 | struct asus_wmi_driver *wdrv = to_asus_wmi_driver(pdrv); | ||
1583 | int ret; | ||
1584 | |||
1585 | if (!wmi_has_guid(ASUS_WMI_MGMT_GUID)) { | ||
1586 | pr_warning("Management GUID not found\n"); | ||
1587 | return -ENODEV; | ||
1588 | } | ||
1589 | |||
1590 | if (wdrv->event_guid && !wmi_has_guid(wdrv->event_guid)) { | ||
1591 | pr_warning("Event GUID not found\n"); | ||
1592 | return -ENODEV; | ||
1593 | } | ||
1594 | |||
1595 | if (wdrv->probe) { | ||
1596 | ret = wdrv->probe(pdev); | ||
1597 | if (ret) | ||
1598 | return ret; | ||
1599 | } | ||
1600 | |||
1601 | return asus_wmi_add(pdev); | ||
1602 | } | ||
1603 | |||
1604 | static bool used; | ||
1605 | |||
1606 | int asus_wmi_register_driver(struct asus_wmi_driver *driver) | ||
1607 | { | ||
1608 | struct platform_driver *platform_driver; | ||
1609 | struct platform_device *platform_device; | ||
1610 | |||
1611 | if (used) | ||
1612 | return -EBUSY; | ||
1613 | |||
1614 | platform_driver = &driver->platform_driver; | ||
1615 | platform_driver->remove = asus_wmi_remove; | ||
1616 | platform_driver->driver.owner = driver->owner; | ||
1617 | platform_driver->driver.name = driver->name; | ||
1618 | platform_driver->driver.pm = &asus_pm_ops; | ||
1619 | |||
1620 | platform_device = platform_create_bundle(platform_driver, | ||
1621 | asus_wmi_probe, | ||
1622 | NULL, 0, NULL, 0); | ||
1623 | if (IS_ERR(platform_device)) | ||
1624 | return PTR_ERR(platform_device); | ||
1625 | |||
1626 | used = true; | ||
1627 | return 0; | ||
1628 | } | ||
1629 | EXPORT_SYMBOL_GPL(asus_wmi_register_driver); | ||
1630 | |||
1631 | void asus_wmi_unregister_driver(struct asus_wmi_driver *driver) | ||
1632 | { | ||
1633 | platform_device_unregister(driver->platform_device); | ||
1634 | platform_driver_unregister(&driver->platform_driver); | ||
1635 | used = false; | ||
1636 | } | ||
1637 | EXPORT_SYMBOL_GPL(asus_wmi_unregister_driver); | ||
1638 | |||
1639 | static int __init asus_wmi_init(void) | ||
1640 | { | ||
1641 | if (!wmi_has_guid(ASUS_WMI_MGMT_GUID)) { | ||
1642 | pr_info("Asus Management GUID not found"); | ||
1643 | return -ENODEV; | ||
1644 | } | ||
1645 | |||
1646 | pr_info("ASUS WMI generic driver loaded"); | ||
1647 | return 0; | ||
1648 | } | ||
1649 | |||
1650 | static void __exit asus_wmi_exit(void) | ||
1651 | { | ||
1652 | pr_info("ASUS WMI generic driver unloaded"); | ||
1653 | } | ||
1654 | |||
1655 | module_init(asus_wmi_init); | ||
1656 | module_exit(asus_wmi_exit); | ||
diff --git a/drivers/platform/x86/asus-wmi.h b/drivers/platform/x86/asus-wmi.h new file mode 100644 index 000000000000..c044522c8766 --- /dev/null +++ b/drivers/platform/x86/asus-wmi.h | |||
@@ -0,0 +1,58 @@ | |||
1 | /* | ||
2 | * Asus PC WMI hotkey driver | ||
3 | * | ||
4 | * Copyright(C) 2010 Intel Corporation. | ||
5 | * Copyright(C) 2010-2011 Corentin Chary <corentin.chary@gmail.com> | ||
6 | * | ||
7 | * Portions based on wistron_btns.c: | ||
8 | * Copyright (C) 2005 Miloslav Trmac <mitr@volny.cz> | ||
9 | * Copyright (C) 2005 Bernhard Rosenkraenzer <bero@arklinux.org> | ||
10 | * Copyright (C) 2005 Dmitry Torokhov <dtor@mail.ru> | ||
11 | * | ||
12 | * This program is free software; you can redistribute it and/or modify | ||
13 | * it under the terms of the GNU General Public License as published by | ||
14 | * the Free Software Foundation; either version 2 of the License, or | ||
15 | * (at your option) any later version. | ||
16 | * | ||
17 | * This program is distributed in the hope that it will be useful, | ||
18 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
19 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
20 | * GNU General Public License for more details. | ||
21 | * | ||
22 | * You should have received a copy of the GNU General Public License | ||
23 | * along with this program; if not, write to the Free Software | ||
24 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
25 | */ | ||
26 | |||
27 | #ifndef _ASUS_WMI_H_ | ||
28 | #define _ASUS_WMI_H_ | ||
29 | |||
30 | #include <linux/platform_device.h> | ||
31 | |||
32 | struct module; | ||
33 | struct key_entry; | ||
34 | struct asus_wmi; | ||
35 | |||
36 | struct asus_wmi_driver { | ||
37 | bool hotplug_wireless; | ||
38 | |||
39 | const char *name; | ||
40 | struct module *owner; | ||
41 | |||
42 | const char *event_guid; | ||
43 | |||
44 | const struct key_entry *keymap; | ||
45 | const char *input_name; | ||
46 | const char *input_phys; | ||
47 | |||
48 | int (*probe) (struct platform_device *device); | ||
49 | void (*quirks) (struct asus_wmi_driver *driver); | ||
50 | |||
51 | struct platform_driver platform_driver; | ||
52 | struct platform_device *platform_device; | ||
53 | }; | ||
54 | |||
55 | int asus_wmi_register_driver(struct asus_wmi_driver *driver); | ||
56 | void asus_wmi_unregister_driver(struct asus_wmi_driver *driver); | ||
57 | |||
58 | #endif /* !_ASUS_WMI_H_ */ | ||
diff --git a/drivers/platform/x86/compal-laptop.c b/drivers/platform/x86/compal-laptop.c index eb95878fa583..c16a27641ced 100644 --- a/drivers/platform/x86/compal-laptop.c +++ b/drivers/platform/x86/compal-laptop.c | |||
@@ -201,7 +201,7 @@ static bool extra_features; | |||
201 | * into 0x4F and read a few bytes from the output, like so: | 201 | * into 0x4F and read a few bytes from the output, like so: |
202 | * u8 writeData = 0x33; | 202 | * u8 writeData = 0x33; |
203 | * ec_transaction(0x4F, &writeData, 1, buffer, 32, 0); | 203 | * ec_transaction(0x4F, &writeData, 1, buffer, 32, 0); |
204 | * That address is labled "fan1 table information" in the service manual. | 204 | * That address is labelled "fan1 table information" in the service manual. |
205 | * It should be clear which value in 'buffer' changes). This seems to be | 205 | * It should be clear which value in 'buffer' changes). This seems to be |
206 | * related to fan speed. It isn't a proper 'realtime' fan speed value | 206 | * related to fan speed. It isn't a proper 'realtime' fan speed value |
207 | * though, because physically stopping or speeding up the fan doesn't | 207 | * though, because physically stopping or speeding up the fan doesn't |
@@ -275,7 +275,7 @@ static int set_backlight_level(int level) | |||
275 | 275 | ||
276 | ec_write(BACKLIGHT_LEVEL_ADDR, level); | 276 | ec_write(BACKLIGHT_LEVEL_ADDR, level); |
277 | 277 | ||
278 | return 1; | 278 | return 0; |
279 | } | 279 | } |
280 | 280 | ||
281 | static int get_backlight_level(void) | 281 | static int get_backlight_level(void) |
@@ -763,7 +763,7 @@ static int dmi_check_cb(const struct dmi_system_id *id) | |||
763 | printk(KERN_INFO DRIVER_NAME": Identified laptop model '%s'\n", | 763 | printk(KERN_INFO DRIVER_NAME": Identified laptop model '%s'\n", |
764 | id->ident); | 764 | id->ident); |
765 | extra_features = false; | 765 | extra_features = false; |
766 | return 0; | 766 | return 1; |
767 | } | 767 | } |
768 | 768 | ||
769 | static int dmi_check_cb_extra(const struct dmi_system_id *id) | 769 | static int dmi_check_cb_extra(const struct dmi_system_id *id) |
@@ -772,7 +772,7 @@ static int dmi_check_cb_extra(const struct dmi_system_id *id) | |||
772 | "enabling extra features\n", | 772 | "enabling extra features\n", |
773 | id->ident); | 773 | id->ident); |
774 | extra_features = true; | 774 | extra_features = true; |
775 | return 0; | 775 | return 1; |
776 | } | 776 | } |
777 | 777 | ||
778 | static struct dmi_system_id __initdata compal_dmi_table[] = { | 778 | static struct dmi_system_id __initdata compal_dmi_table[] = { |
diff --git a/drivers/platform/x86/dell-wmi-aio.c b/drivers/platform/x86/dell-wmi-aio.c new file mode 100644 index 000000000000..0ed84573ae1f --- /dev/null +++ b/drivers/platform/x86/dell-wmi-aio.c | |||
@@ -0,0 +1,171 @@ | |||
1 | /* | ||
2 | * WMI hotkeys support for Dell All-In-One series | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or modify | ||
5 | * it under the terms of the GNU General Public License as published by | ||
6 | * the Free Software Foundation; either version 2 of the License, or | ||
7 | * (at your option) any later version. | ||
8 | * | ||
9 | * This program is distributed in the hope that it will be useful, | ||
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
12 | * GNU General Public License for more details. | ||
13 | * | ||
14 | * You should have received a copy of the GNU General Public License | ||
15 | * along with this program; if not, write to the Free Software | ||
16 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
17 | */ | ||
18 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | ||
19 | |||
20 | #include <linux/kernel.h> | ||
21 | #include <linux/module.h> | ||
22 | #include <linux/init.h> | ||
23 | #include <linux/types.h> | ||
24 | #include <linux/input.h> | ||
25 | #include <linux/input/sparse-keymap.h> | ||
26 | #include <acpi/acpi_drivers.h> | ||
27 | #include <linux/acpi.h> | ||
28 | #include <linux/string.h> | ||
29 | |||
30 | MODULE_DESCRIPTION("WMI hotkeys driver for Dell All-In-One series"); | ||
31 | MODULE_LICENSE("GPL"); | ||
32 | |||
33 | #define EVENT_GUID1 "284A0E6B-380E-472A-921F-E52786257FB4" | ||
34 | #define EVENT_GUID2 "02314822-307C-4F66-BF0E-48AEAEB26CC8" | ||
35 | |||
36 | static const char *dell_wmi_aio_guids[] = { | ||
37 | EVENT_GUID1, | ||
38 | EVENT_GUID2, | ||
39 | NULL | ||
40 | }; | ||
41 | |||
42 | MODULE_ALIAS("wmi:"EVENT_GUID1); | ||
43 | MODULE_ALIAS("wmi:"EVENT_GUID2); | ||
44 | |||
45 | static const struct key_entry dell_wmi_aio_keymap[] = { | ||
46 | { KE_KEY, 0xc0, { KEY_VOLUMEUP } }, | ||
47 | { KE_KEY, 0xc1, { KEY_VOLUMEDOWN } }, | ||
48 | { KE_END, 0 } | ||
49 | }; | ||
50 | |||
51 | static struct input_dev *dell_wmi_aio_input_dev; | ||
52 | |||
53 | static void dell_wmi_aio_notify(u32 value, void *context) | ||
54 | { | ||
55 | struct acpi_buffer response = { ACPI_ALLOCATE_BUFFER, NULL }; | ||
56 | union acpi_object *obj; | ||
57 | acpi_status status; | ||
58 | |||
59 | status = wmi_get_event_data(value, &response); | ||
60 | if (status != AE_OK) { | ||
61 | pr_info("bad event status 0x%x\n", status); | ||
62 | return; | ||
63 | } | ||
64 | |||
65 | obj = (union acpi_object *)response.pointer; | ||
66 | if (obj) { | ||
67 | unsigned int scancode; | ||
68 | |||
69 | switch (obj->type) { | ||
70 | case ACPI_TYPE_INTEGER: | ||
71 | /* Most All-In-One correctly return integer scancode */ | ||
72 | scancode = obj->integer.value; | ||
73 | sparse_keymap_report_event(dell_wmi_aio_input_dev, | ||
74 | scancode, 1, true); | ||
75 | break; | ||
76 | case ACPI_TYPE_BUFFER: | ||
77 | /* Broken machines return the scancode in a buffer */ | ||
78 | if (obj->buffer.pointer && obj->buffer.length > 0) { | ||
79 | scancode = obj->buffer.pointer[0]; | ||
80 | sparse_keymap_report_event( | ||
81 | dell_wmi_aio_input_dev, | ||
82 | scancode, 1, true); | ||
83 | } | ||
84 | break; | ||
85 | } | ||
86 | } | ||
87 | kfree(obj); | ||
88 | } | ||
89 | |||
90 | static int __init dell_wmi_aio_input_setup(void) | ||
91 | { | ||
92 | int err; | ||
93 | |||
94 | dell_wmi_aio_input_dev = input_allocate_device(); | ||
95 | |||
96 | if (!dell_wmi_aio_input_dev) | ||
97 | return -ENOMEM; | ||
98 | |||
99 | dell_wmi_aio_input_dev->name = "Dell AIO WMI hotkeys"; | ||
100 | dell_wmi_aio_input_dev->phys = "wmi/input0"; | ||
101 | dell_wmi_aio_input_dev->id.bustype = BUS_HOST; | ||
102 | |||
103 | err = sparse_keymap_setup(dell_wmi_aio_input_dev, | ||
104 | dell_wmi_aio_keymap, NULL); | ||
105 | if (err) { | ||
106 | pr_err("Unable to setup input device keymap\n"); | ||
107 | goto err_free_dev; | ||
108 | } | ||
109 | err = input_register_device(dell_wmi_aio_input_dev); | ||
110 | if (err) { | ||
111 | pr_info("Unable to register input device\n"); | ||
112 | goto err_free_keymap; | ||
113 | } | ||
114 | return 0; | ||
115 | |||
116 | err_free_keymap: | ||
117 | sparse_keymap_free(dell_wmi_aio_input_dev); | ||
118 | err_free_dev: | ||
119 | input_free_device(dell_wmi_aio_input_dev); | ||
120 | return err; | ||
121 | } | ||
122 | |||
123 | static const char *dell_wmi_aio_find(void) | ||
124 | { | ||
125 | int i; | ||
126 | |||
127 | for (i = 0; dell_wmi_aio_guids[i] != NULL; i++) | ||
128 | if (wmi_has_guid(dell_wmi_aio_guids[i])) | ||
129 | return dell_wmi_aio_guids[i]; | ||
130 | |||
131 | return NULL; | ||
132 | } | ||
133 | |||
134 | static int __init dell_wmi_aio_init(void) | ||
135 | { | ||
136 | int err; | ||
137 | const char *guid; | ||
138 | |||
139 | guid = dell_wmi_aio_find(); | ||
140 | if (!guid) { | ||
141 | pr_warning("No known WMI GUID found\n"); | ||
142 | return -ENXIO; | ||
143 | } | ||
144 | |||
145 | err = dell_wmi_aio_input_setup(); | ||
146 | if (err) | ||
147 | return err; | ||
148 | |||
149 | err = wmi_install_notify_handler(guid, dell_wmi_aio_notify, NULL); | ||
150 | if (err) { | ||
151 | pr_err("Unable to register notify handler - %d\n", err); | ||
152 | sparse_keymap_free(dell_wmi_aio_input_dev); | ||
153 | input_unregister_device(dell_wmi_aio_input_dev); | ||
154 | return err; | ||
155 | } | ||
156 | |||
157 | return 0; | ||
158 | } | ||
159 | |||
160 | static void __exit dell_wmi_aio_exit(void) | ||
161 | { | ||
162 | const char *guid; | ||
163 | |||
164 | guid = dell_wmi_aio_find(); | ||
165 | wmi_remove_notify_handler(guid); | ||
166 | sparse_keymap_free(dell_wmi_aio_input_dev); | ||
167 | input_unregister_device(dell_wmi_aio_input_dev); | ||
168 | } | ||
169 | |||
170 | module_init(dell_wmi_aio_init); | ||
171 | module_exit(dell_wmi_aio_exit); | ||
diff --git a/drivers/platform/x86/eeepc-laptop.c b/drivers/platform/x86/eeepc-laptop.c index 6605beac0d0e..5f2dd386152b 100644 --- a/drivers/platform/x86/eeepc-laptop.c +++ b/drivers/platform/x86/eeepc-laptop.c | |||
@@ -1322,7 +1322,7 @@ static void cmsg_quirk(struct eeepc_laptop *eeepc, int cm, const char *name) | |||
1322 | { | 1322 | { |
1323 | int dummy; | 1323 | int dummy; |
1324 | 1324 | ||
1325 | /* Some BIOSes do not report cm although it is avaliable. | 1325 | /* Some BIOSes do not report cm although it is available. |
1326 | Check if cm_getv[cm] works and, if yes, assume cm should be set. */ | 1326 | Check if cm_getv[cm] works and, if yes, assume cm should be set. */ |
1327 | if (!(eeepc->cm_supported & (1 << cm)) | 1327 | if (!(eeepc->cm_supported & (1 << cm)) |
1328 | && !read_acpi_int(eeepc->handle, cm_getv[cm], &dummy)) { | 1328 | && !read_acpi_int(eeepc->handle, cm_getv[cm], &dummy)) { |
diff --git a/drivers/platform/x86/eeepc-wmi.c b/drivers/platform/x86/eeepc-wmi.c index 4d38f98aa976..0ddc434fb93b 100644 --- a/drivers/platform/x86/eeepc-wmi.c +++ b/drivers/platform/x86/eeepc-wmi.c | |||
@@ -2,7 +2,7 @@ | |||
2 | * Eee PC WMI hotkey driver | 2 | * Eee PC WMI hotkey driver |
3 | * | 3 | * |
4 | * Copyright(C) 2010 Intel Corporation. | 4 | * Copyright(C) 2010 Intel Corporation. |
5 | * Copyright(C) 2010 Corentin Chary <corentin.chary@gmail.com> | 5 | * Copyright(C) 2010-2011 Corentin Chary <corentin.chary@gmail.com> |
6 | * | 6 | * |
7 | * Portions based on wistron_btns.c: | 7 | * Portions based on wistron_btns.c: |
8 | * Copyright (C) 2005 Miloslav Trmac <mitr@volny.cz> | 8 | * Copyright (C) 2005 Miloslav Trmac <mitr@volny.cz> |
@@ -29,841 +29,57 @@ | |||
29 | #include <linux/kernel.h> | 29 | #include <linux/kernel.h> |
30 | #include <linux/module.h> | 30 | #include <linux/module.h> |
31 | #include <linux/init.h> | 31 | #include <linux/init.h> |
32 | #include <linux/types.h> | ||
33 | #include <linux/slab.h> | ||
34 | #include <linux/input.h> | 32 | #include <linux/input.h> |
35 | #include <linux/input/sparse-keymap.h> | 33 | #include <linux/input/sparse-keymap.h> |
36 | #include <linux/fb.h> | 34 | #include <linux/dmi.h> |
37 | #include <linux/backlight.h> | ||
38 | #include <linux/leds.h> | ||
39 | #include <linux/rfkill.h> | ||
40 | #include <linux/debugfs.h> | ||
41 | #include <linux/seq_file.h> | ||
42 | #include <linux/platform_device.h> | ||
43 | #include <acpi/acpi_bus.h> | 35 | #include <acpi/acpi_bus.h> |
44 | #include <acpi/acpi_drivers.h> | 36 | |
37 | #include "asus-wmi.h" | ||
45 | 38 | ||
46 | #define EEEPC_WMI_FILE "eeepc-wmi" | 39 | #define EEEPC_WMI_FILE "eeepc-wmi" |
47 | 40 | ||
48 | MODULE_AUTHOR("Yong Wang <yong.y.wang@intel.com>"); | 41 | MODULE_AUTHOR("Corentin Chary <corentincj@iksaif.net>"); |
49 | MODULE_DESCRIPTION("Eee PC WMI Hotkey Driver"); | 42 | MODULE_DESCRIPTION("Eee PC WMI Hotkey Driver"); |
50 | MODULE_LICENSE("GPL"); | 43 | MODULE_LICENSE("GPL"); |
51 | 44 | ||
52 | #define EEEPC_ACPI_HID "ASUS010" /* old _HID used in eeepc-laptop */ | 45 | #define EEEPC_ACPI_HID "ASUS010" /* old _HID used in eeepc-laptop */ |
53 | 46 | ||
54 | #define EEEPC_WMI_EVENT_GUID "ABBC0F72-8EA1-11D1-00A0-C90629100000" | 47 | #define EEEPC_WMI_EVENT_GUID "ABBC0F72-8EA1-11D1-00A0-C90629100000" |
55 | #define EEEPC_WMI_MGMT_GUID "97845ED0-4E6D-11DE-8A39-0800200C9A66" | ||
56 | 48 | ||
57 | MODULE_ALIAS("wmi:"EEEPC_WMI_EVENT_GUID); | 49 | MODULE_ALIAS("wmi:"EEEPC_WMI_EVENT_GUID); |
58 | MODULE_ALIAS("wmi:"EEEPC_WMI_MGMT_GUID); | ||
59 | |||
60 | #define NOTIFY_BRNUP_MIN 0x11 | ||
61 | #define NOTIFY_BRNUP_MAX 0x1f | ||
62 | #define NOTIFY_BRNDOWN_MIN 0x20 | ||
63 | #define NOTIFY_BRNDOWN_MAX 0x2e | ||
64 | 50 | ||
65 | #define EEEPC_WMI_METHODID_DEVS 0x53564544 | 51 | static bool hotplug_wireless; |
66 | #define EEEPC_WMI_METHODID_DSTS 0x53544344 | ||
67 | #define EEEPC_WMI_METHODID_CFVS 0x53564643 | ||
68 | 52 | ||
69 | #define EEEPC_WMI_DEVID_BACKLIGHT 0x00050012 | 53 | module_param(hotplug_wireless, bool, 0444); |
70 | #define EEEPC_WMI_DEVID_TPDLED 0x00100011 | 54 | MODULE_PARM_DESC(hotplug_wireless, |
71 | #define EEEPC_WMI_DEVID_WLAN 0x00010011 | 55 | "Enable hotplug for wireless device. " |
72 | #define EEEPC_WMI_DEVID_BLUETOOTH 0x00010013 | 56 | "If your laptop needs that, please report to " |
73 | #define EEEPC_WMI_DEVID_WWAN3G 0x00010019 | 57 | "acpi4asus-user@lists.sourceforge.net."); |
74 | 58 | ||
75 | static const struct key_entry eeepc_wmi_keymap[] = { | 59 | static const struct key_entry eeepc_wmi_keymap[] = { |
76 | /* Sleep already handled via generic ACPI code */ | 60 | /* Sleep already handled via generic ACPI code */ |
77 | { KE_KEY, 0x5d, { KEY_WLAN } }, | ||
78 | { KE_KEY, 0x32, { KEY_MUTE } }, | ||
79 | { KE_KEY, 0x31, { KEY_VOLUMEDOWN } }, | ||
80 | { KE_KEY, 0x30, { KEY_VOLUMEUP } }, | 61 | { KE_KEY, 0x30, { KEY_VOLUMEUP } }, |
81 | { KE_IGNORE, NOTIFY_BRNDOWN_MIN, { KEY_BRIGHTNESSDOWN } }, | 62 | { KE_KEY, 0x31, { KEY_VOLUMEDOWN } }, |
82 | { KE_IGNORE, NOTIFY_BRNUP_MIN, { KEY_BRIGHTNESSUP } }, | 63 | { KE_KEY, 0x32, { KEY_MUTE } }, |
64 | { KE_KEY, 0x5c, { KEY_F15 } }, /* Power Gear key */ | ||
65 | { KE_KEY, 0x5d, { KEY_WLAN } }, | ||
66 | { KE_KEY, 0x6b, { KEY_TOUCHPAD_TOGGLE } }, /* Toggle Touchpad */ | ||
67 | { KE_KEY, 0x82, { KEY_CAMERA } }, | ||
68 | { KE_KEY, 0x83, { KEY_CAMERA_ZOOMIN } }, | ||
69 | { KE_KEY, 0x88, { KEY_WLAN } }, | ||
83 | { KE_KEY, 0xcc, { KEY_SWITCHVIDEOMODE } }, | 70 | { KE_KEY, 0xcc, { KEY_SWITCHVIDEOMODE } }, |
84 | { KE_KEY, 0x6b, { KEY_F13 } }, /* Disable Touchpad */ | 71 | { KE_KEY, 0xe0, { KEY_PROG1 } }, /* Task Manager */ |
85 | { KE_KEY, 0xe1, { KEY_F14 } }, | 72 | { KE_KEY, 0xe1, { KEY_F14 } }, /* Change Resolution */ |
86 | { KE_KEY, 0xe9, { KEY_DISPLAY_OFF } }, | 73 | { KE_KEY, 0xe9, { KEY_BRIGHTNESS_ZERO } }, |
87 | { KE_KEY, 0xe0, { KEY_PROG1 } }, | 74 | { KE_KEY, 0xeb, { KEY_CAMERA_ZOOMOUT } }, |
88 | { KE_KEY, 0x5c, { KEY_F15 } }, | 75 | { KE_KEY, 0xec, { KEY_CAMERA_UP } }, |
76 | { KE_KEY, 0xed, { KEY_CAMERA_DOWN } }, | ||
77 | { KE_KEY, 0xee, { KEY_CAMERA_LEFT } }, | ||
78 | { KE_KEY, 0xef, { KEY_CAMERA_RIGHT } }, | ||
89 | { KE_END, 0}, | 79 | { KE_END, 0}, |
90 | }; | 80 | }; |
91 | 81 | ||
92 | struct bios_args { | 82 | static acpi_status eeepc_wmi_parse_device(acpi_handle handle, u32 level, |
93 | u32 dev_id; | ||
94 | u32 ctrl_param; | ||
95 | }; | ||
96 | |||
97 | /* | ||
98 | * eeepc-wmi/ - debugfs root directory | ||
99 | * dev_id - current dev_id | ||
100 | * ctrl_param - current ctrl_param | ||
101 | * devs - call DEVS(dev_id, ctrl_param) and print result | ||
102 | * dsts - call DSTS(dev_id) and print result | ||
103 | */ | ||
104 | struct eeepc_wmi_debug { | ||
105 | struct dentry *root; | ||
106 | u32 dev_id; | ||
107 | u32 ctrl_param; | ||
108 | }; | ||
109 | |||
110 | struct eeepc_wmi { | ||
111 | struct input_dev *inputdev; | ||
112 | struct backlight_device *backlight_device; | ||
113 | struct platform_device *platform_device; | ||
114 | |||
115 | struct led_classdev tpd_led; | ||
116 | int tpd_led_wk; | ||
117 | struct workqueue_struct *led_workqueue; | ||
118 | struct work_struct tpd_led_work; | ||
119 | |||
120 | struct rfkill *wlan_rfkill; | ||
121 | struct rfkill *bluetooth_rfkill; | ||
122 | struct rfkill *wwan3g_rfkill; | ||
123 | |||
124 | struct eeepc_wmi_debug debug; | ||
125 | }; | ||
126 | |||
127 | /* Only used in eeepc_wmi_init() and eeepc_wmi_exit() */ | ||
128 | static struct platform_device *platform_device; | ||
129 | |||
130 | static int eeepc_wmi_input_init(struct eeepc_wmi *eeepc) | ||
131 | { | ||
132 | int err; | ||
133 | |||
134 | eeepc->inputdev = input_allocate_device(); | ||
135 | if (!eeepc->inputdev) | ||
136 | return -ENOMEM; | ||
137 | |||
138 | eeepc->inputdev->name = "Eee PC WMI hotkeys"; | ||
139 | eeepc->inputdev->phys = EEEPC_WMI_FILE "/input0"; | ||
140 | eeepc->inputdev->id.bustype = BUS_HOST; | ||
141 | eeepc->inputdev->dev.parent = &eeepc->platform_device->dev; | ||
142 | |||
143 | err = sparse_keymap_setup(eeepc->inputdev, eeepc_wmi_keymap, NULL); | ||
144 | if (err) | ||
145 | goto err_free_dev; | ||
146 | |||
147 | err = input_register_device(eeepc->inputdev); | ||
148 | if (err) | ||
149 | goto err_free_keymap; | ||
150 | |||
151 | return 0; | ||
152 | |||
153 | err_free_keymap: | ||
154 | sparse_keymap_free(eeepc->inputdev); | ||
155 | err_free_dev: | ||
156 | input_free_device(eeepc->inputdev); | ||
157 | return err; | ||
158 | } | ||
159 | |||
160 | static void eeepc_wmi_input_exit(struct eeepc_wmi *eeepc) | ||
161 | { | ||
162 | if (eeepc->inputdev) { | ||
163 | sparse_keymap_free(eeepc->inputdev); | ||
164 | input_unregister_device(eeepc->inputdev); | ||
165 | } | ||
166 | |||
167 | eeepc->inputdev = NULL; | ||
168 | } | ||
169 | |||
170 | static acpi_status eeepc_wmi_get_devstate(u32 dev_id, u32 *retval) | ||
171 | { | ||
172 | struct acpi_buffer input = { (acpi_size)sizeof(u32), &dev_id }; | ||
173 | struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL }; | ||
174 | union acpi_object *obj; | ||
175 | acpi_status status; | ||
176 | u32 tmp; | ||
177 | |||
178 | status = wmi_evaluate_method(EEEPC_WMI_MGMT_GUID, | ||
179 | 1, EEEPC_WMI_METHODID_DSTS, &input, &output); | ||
180 | |||
181 | if (ACPI_FAILURE(status)) | ||
182 | return status; | ||
183 | |||
184 | obj = (union acpi_object *)output.pointer; | ||
185 | if (obj && obj->type == ACPI_TYPE_INTEGER) | ||
186 | tmp = (u32)obj->integer.value; | ||
187 | else | ||
188 | tmp = 0; | ||
189 | |||
190 | if (retval) | ||
191 | *retval = tmp; | ||
192 | |||
193 | kfree(obj); | ||
194 | |||
195 | return status; | ||
196 | |||
197 | } | ||
198 | |||
199 | static acpi_status eeepc_wmi_set_devstate(u32 dev_id, u32 ctrl_param, | ||
200 | u32 *retval) | ||
201 | { | ||
202 | struct bios_args args = { | ||
203 | .dev_id = dev_id, | ||
204 | .ctrl_param = ctrl_param, | ||
205 | }; | ||
206 | struct acpi_buffer input = { (acpi_size)sizeof(args), &args }; | ||
207 | acpi_status status; | ||
208 | |||
209 | if (!retval) { | ||
210 | status = wmi_evaluate_method(EEEPC_WMI_MGMT_GUID, 1, | ||
211 | EEEPC_WMI_METHODID_DEVS, | ||
212 | &input, NULL); | ||
213 | } else { | ||
214 | struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL }; | ||
215 | union acpi_object *obj; | ||
216 | u32 tmp; | ||
217 | |||
218 | status = wmi_evaluate_method(EEEPC_WMI_MGMT_GUID, 1, | ||
219 | EEEPC_WMI_METHODID_DEVS, | ||
220 | &input, &output); | ||
221 | |||
222 | if (ACPI_FAILURE(status)) | ||
223 | return status; | ||
224 | |||
225 | obj = (union acpi_object *)output.pointer; | ||
226 | if (obj && obj->type == ACPI_TYPE_INTEGER) | ||
227 | tmp = (u32)obj->integer.value; | ||
228 | else | ||
229 | tmp = 0; | ||
230 | |||
231 | *retval = tmp; | ||
232 | |||
233 | kfree(obj); | ||
234 | } | ||
235 | |||
236 | return status; | ||
237 | } | ||
238 | |||
239 | /* | ||
240 | * LEDs | ||
241 | */ | ||
242 | /* | ||
243 | * These functions actually update the LED's, and are called from a | ||
244 | * workqueue. By doing this as separate work rather than when the LED | ||
245 | * subsystem asks, we avoid messing with the Eeepc ACPI stuff during a | ||
246 | * potentially bad time, such as a timer interrupt. | ||
247 | */ | ||
248 | static void tpd_led_update(struct work_struct *work) | ||
249 | { | ||
250 | int ctrl_param; | ||
251 | struct eeepc_wmi *eeepc; | ||
252 | |||
253 | eeepc = container_of(work, struct eeepc_wmi, tpd_led_work); | ||
254 | |||
255 | ctrl_param = eeepc->tpd_led_wk; | ||
256 | eeepc_wmi_set_devstate(EEEPC_WMI_DEVID_TPDLED, ctrl_param, NULL); | ||
257 | } | ||
258 | |||
259 | static void tpd_led_set(struct led_classdev *led_cdev, | ||
260 | enum led_brightness value) | ||
261 | { | ||
262 | struct eeepc_wmi *eeepc; | ||
263 | |||
264 | eeepc = container_of(led_cdev, struct eeepc_wmi, tpd_led); | ||
265 | |||
266 | eeepc->tpd_led_wk = !!value; | ||
267 | queue_work(eeepc->led_workqueue, &eeepc->tpd_led_work); | ||
268 | } | ||
269 | |||
270 | static int read_tpd_state(struct eeepc_wmi *eeepc) | ||
271 | { | ||
272 | u32 retval; | ||
273 | acpi_status status; | ||
274 | |||
275 | status = eeepc_wmi_get_devstate(EEEPC_WMI_DEVID_TPDLED, &retval); | ||
276 | |||
277 | if (ACPI_FAILURE(status)) | ||
278 | return -1; | ||
279 | else if (!retval || retval == 0x00060000) | ||
280 | /* | ||
281 | * if touchpad led is present, DSTS will set some bits, | ||
282 | * usually 0x00020000. | ||
283 | * 0x00060000 means that the device is not supported | ||
284 | */ | ||
285 | return -ENODEV; | ||
286 | else | ||
287 | /* Status is stored in the first bit */ | ||
288 | return retval & 0x1; | ||
289 | } | ||
290 | |||
291 | static enum led_brightness tpd_led_get(struct led_classdev *led_cdev) | ||
292 | { | ||
293 | struct eeepc_wmi *eeepc; | ||
294 | |||
295 | eeepc = container_of(led_cdev, struct eeepc_wmi, tpd_led); | ||
296 | |||
297 | return read_tpd_state(eeepc); | ||
298 | } | ||
299 | |||
300 | static int eeepc_wmi_led_init(struct eeepc_wmi *eeepc) | ||
301 | { | ||
302 | int rv; | ||
303 | |||
304 | if (read_tpd_state(eeepc) < 0) | ||
305 | return 0; | ||
306 | |||
307 | eeepc->led_workqueue = create_singlethread_workqueue("led_workqueue"); | ||
308 | if (!eeepc->led_workqueue) | ||
309 | return -ENOMEM; | ||
310 | INIT_WORK(&eeepc->tpd_led_work, tpd_led_update); | ||
311 | |||
312 | eeepc->tpd_led.name = "eeepc::touchpad"; | ||
313 | eeepc->tpd_led.brightness_set = tpd_led_set; | ||
314 | eeepc->tpd_led.brightness_get = tpd_led_get; | ||
315 | eeepc->tpd_led.max_brightness = 1; | ||
316 | |||
317 | rv = led_classdev_register(&eeepc->platform_device->dev, | ||
318 | &eeepc->tpd_led); | ||
319 | if (rv) { | ||
320 | destroy_workqueue(eeepc->led_workqueue); | ||
321 | return rv; | ||
322 | } | ||
323 | |||
324 | return 0; | ||
325 | } | ||
326 | |||
327 | static void eeepc_wmi_led_exit(struct eeepc_wmi *eeepc) | ||
328 | { | ||
329 | if (eeepc->tpd_led.dev) | ||
330 | led_classdev_unregister(&eeepc->tpd_led); | ||
331 | if (eeepc->led_workqueue) | ||
332 | destroy_workqueue(eeepc->led_workqueue); | ||
333 | } | ||
334 | |||
335 | /* | ||
336 | * Rfkill devices | ||
337 | */ | ||
338 | static int eeepc_rfkill_set(void *data, bool blocked) | ||
339 | { | ||
340 | int dev_id = (unsigned long)data; | ||
341 | u32 ctrl_param = !blocked; | ||
342 | |||
343 | return eeepc_wmi_set_devstate(dev_id, ctrl_param, NULL); | ||
344 | } | ||
345 | |||
346 | static void eeepc_rfkill_query(struct rfkill *rfkill, void *data) | ||
347 | { | ||
348 | int dev_id = (unsigned long)data; | ||
349 | u32 retval; | ||
350 | acpi_status status; | ||
351 | |||
352 | status = eeepc_wmi_get_devstate(dev_id, &retval); | ||
353 | |||
354 | if (ACPI_FAILURE(status)) | ||
355 | return ; | ||
356 | |||
357 | rfkill_set_sw_state(rfkill, !(retval & 0x1)); | ||
358 | } | ||
359 | |||
360 | static const struct rfkill_ops eeepc_rfkill_ops = { | ||
361 | .set_block = eeepc_rfkill_set, | ||
362 | .query = eeepc_rfkill_query, | ||
363 | }; | ||
364 | |||
365 | static int eeepc_new_rfkill(struct eeepc_wmi *eeepc, | ||
366 | struct rfkill **rfkill, | ||
367 | const char *name, | ||
368 | enum rfkill_type type, int dev_id) | ||
369 | { | ||
370 | int result; | ||
371 | u32 retval; | ||
372 | acpi_status status; | ||
373 | |||
374 | status = eeepc_wmi_get_devstate(dev_id, &retval); | ||
375 | |||
376 | if (ACPI_FAILURE(status)) | ||
377 | return -1; | ||
378 | |||
379 | /* If the device is present, DSTS will always set some bits | ||
380 | * 0x00070000 - 1110000000000000000 - device supported | ||
381 | * 0x00060000 - 1100000000000000000 - not supported | ||
382 | * 0x00020000 - 0100000000000000000 - device supported | ||
383 | * 0x00010000 - 0010000000000000000 - not supported / special mode ? | ||
384 | */ | ||
385 | if (!retval || retval == 0x00060000) | ||
386 | return -ENODEV; | ||
387 | |||
388 | *rfkill = rfkill_alloc(name, &eeepc->platform_device->dev, type, | ||
389 | &eeepc_rfkill_ops, (void *)(long)dev_id); | ||
390 | |||
391 | if (!*rfkill) | ||
392 | return -EINVAL; | ||
393 | |||
394 | rfkill_init_sw_state(*rfkill, !(retval & 0x1)); | ||
395 | result = rfkill_register(*rfkill); | ||
396 | if (result) { | ||
397 | rfkill_destroy(*rfkill); | ||
398 | *rfkill = NULL; | ||
399 | return result; | ||
400 | } | ||
401 | return 0; | ||
402 | } | ||
403 | |||
404 | static void eeepc_wmi_rfkill_exit(struct eeepc_wmi *eeepc) | ||
405 | { | ||
406 | if (eeepc->wlan_rfkill) { | ||
407 | rfkill_unregister(eeepc->wlan_rfkill); | ||
408 | rfkill_destroy(eeepc->wlan_rfkill); | ||
409 | eeepc->wlan_rfkill = NULL; | ||
410 | } | ||
411 | if (eeepc->bluetooth_rfkill) { | ||
412 | rfkill_unregister(eeepc->bluetooth_rfkill); | ||
413 | rfkill_destroy(eeepc->bluetooth_rfkill); | ||
414 | eeepc->bluetooth_rfkill = NULL; | ||
415 | } | ||
416 | if (eeepc->wwan3g_rfkill) { | ||
417 | rfkill_unregister(eeepc->wwan3g_rfkill); | ||
418 | rfkill_destroy(eeepc->wwan3g_rfkill); | ||
419 | eeepc->wwan3g_rfkill = NULL; | ||
420 | } | ||
421 | } | ||
422 | |||
423 | static int eeepc_wmi_rfkill_init(struct eeepc_wmi *eeepc) | ||
424 | { | ||
425 | int result = 0; | ||
426 | |||
427 | result = eeepc_new_rfkill(eeepc, &eeepc->wlan_rfkill, | ||
428 | "eeepc-wlan", RFKILL_TYPE_WLAN, | ||
429 | EEEPC_WMI_DEVID_WLAN); | ||
430 | |||
431 | if (result && result != -ENODEV) | ||
432 | goto exit; | ||
433 | |||
434 | result = eeepc_new_rfkill(eeepc, &eeepc->bluetooth_rfkill, | ||
435 | "eeepc-bluetooth", RFKILL_TYPE_BLUETOOTH, | ||
436 | EEEPC_WMI_DEVID_BLUETOOTH); | ||
437 | |||
438 | if (result && result != -ENODEV) | ||
439 | goto exit; | ||
440 | |||
441 | result = eeepc_new_rfkill(eeepc, &eeepc->wwan3g_rfkill, | ||
442 | "eeepc-wwan3g", RFKILL_TYPE_WWAN, | ||
443 | EEEPC_WMI_DEVID_WWAN3G); | ||
444 | |||
445 | if (result && result != -ENODEV) | ||
446 | goto exit; | ||
447 | |||
448 | exit: | ||
449 | if (result && result != -ENODEV) | ||
450 | eeepc_wmi_rfkill_exit(eeepc); | ||
451 | |||
452 | if (result == -ENODEV) | ||
453 | result = 0; | ||
454 | |||
455 | return result; | ||
456 | } | ||
457 | |||
458 | /* | ||
459 | * Backlight | ||
460 | */ | ||
461 | static int read_brightness(struct backlight_device *bd) | ||
462 | { | ||
463 | u32 retval; | ||
464 | acpi_status status; | ||
465 | |||
466 | status = eeepc_wmi_get_devstate(EEEPC_WMI_DEVID_BACKLIGHT, &retval); | ||
467 | |||
468 | if (ACPI_FAILURE(status)) | ||
469 | return -1; | ||
470 | else | ||
471 | return retval & 0xFF; | ||
472 | } | ||
473 | |||
474 | static int update_bl_status(struct backlight_device *bd) | ||
475 | { | ||
476 | |||
477 | u32 ctrl_param; | ||
478 | acpi_status status; | ||
479 | |||
480 | ctrl_param = bd->props.brightness; | ||
481 | |||
482 | status = eeepc_wmi_set_devstate(EEEPC_WMI_DEVID_BACKLIGHT, | ||
483 | ctrl_param, NULL); | ||
484 | |||
485 | if (ACPI_FAILURE(status)) | ||
486 | return -1; | ||
487 | else | ||
488 | return 0; | ||
489 | } | ||
490 | |||
491 | static const struct backlight_ops eeepc_wmi_bl_ops = { | ||
492 | .get_brightness = read_brightness, | ||
493 | .update_status = update_bl_status, | ||
494 | }; | ||
495 | |||
496 | static int eeepc_wmi_backlight_notify(struct eeepc_wmi *eeepc, int code) | ||
497 | { | ||
498 | struct backlight_device *bd = eeepc->backlight_device; | ||
499 | int old = bd->props.brightness; | ||
500 | int new = old; | ||
501 | |||
502 | if (code >= NOTIFY_BRNUP_MIN && code <= NOTIFY_BRNUP_MAX) | ||
503 | new = code - NOTIFY_BRNUP_MIN + 1; | ||
504 | else if (code >= NOTIFY_BRNDOWN_MIN && code <= NOTIFY_BRNDOWN_MAX) | ||
505 | new = code - NOTIFY_BRNDOWN_MIN; | ||
506 | |||
507 | bd->props.brightness = new; | ||
508 | backlight_update_status(bd); | ||
509 | backlight_force_update(bd, BACKLIGHT_UPDATE_HOTKEY); | ||
510 | |||
511 | return old; | ||
512 | } | ||
513 | |||
514 | static int eeepc_wmi_backlight_init(struct eeepc_wmi *eeepc) | ||
515 | { | ||
516 | struct backlight_device *bd; | ||
517 | struct backlight_properties props; | ||
518 | |||
519 | memset(&props, 0, sizeof(struct backlight_properties)); | ||
520 | props.max_brightness = 15; | ||
521 | bd = backlight_device_register(EEEPC_WMI_FILE, | ||
522 | &eeepc->platform_device->dev, eeepc, | ||
523 | &eeepc_wmi_bl_ops, &props); | ||
524 | if (IS_ERR(bd)) { | ||
525 | pr_err("Could not register backlight device\n"); | ||
526 | return PTR_ERR(bd); | ||
527 | } | ||
528 | |||
529 | eeepc->backlight_device = bd; | ||
530 | |||
531 | bd->props.brightness = read_brightness(bd); | ||
532 | bd->props.power = FB_BLANK_UNBLANK; | ||
533 | backlight_update_status(bd); | ||
534 | |||
535 | return 0; | ||
536 | } | ||
537 | |||
538 | static void eeepc_wmi_backlight_exit(struct eeepc_wmi *eeepc) | ||
539 | { | ||
540 | if (eeepc->backlight_device) | ||
541 | backlight_device_unregister(eeepc->backlight_device); | ||
542 | |||
543 | eeepc->backlight_device = NULL; | ||
544 | } | ||
545 | |||
546 | static void eeepc_wmi_notify(u32 value, void *context) | ||
547 | { | ||
548 | struct eeepc_wmi *eeepc = context; | ||
549 | struct acpi_buffer response = { ACPI_ALLOCATE_BUFFER, NULL }; | ||
550 | union acpi_object *obj; | ||
551 | acpi_status status; | ||
552 | int code; | ||
553 | int orig_code; | ||
554 | |||
555 | status = wmi_get_event_data(value, &response); | ||
556 | if (status != AE_OK) { | ||
557 | pr_err("bad event status 0x%x\n", status); | ||
558 | return; | ||
559 | } | ||
560 | |||
561 | obj = (union acpi_object *)response.pointer; | ||
562 | |||
563 | if (obj && obj->type == ACPI_TYPE_INTEGER) { | ||
564 | code = obj->integer.value; | ||
565 | orig_code = code; | ||
566 | |||
567 | if (code >= NOTIFY_BRNUP_MIN && code <= NOTIFY_BRNUP_MAX) | ||
568 | code = NOTIFY_BRNUP_MIN; | ||
569 | else if (code >= NOTIFY_BRNDOWN_MIN && | ||
570 | code <= NOTIFY_BRNDOWN_MAX) | ||
571 | code = NOTIFY_BRNDOWN_MIN; | ||
572 | |||
573 | if (code == NOTIFY_BRNUP_MIN || code == NOTIFY_BRNDOWN_MIN) { | ||
574 | if (!acpi_video_backlight_support()) | ||
575 | eeepc_wmi_backlight_notify(eeepc, orig_code); | ||
576 | } | ||
577 | |||
578 | if (!sparse_keymap_report_event(eeepc->inputdev, | ||
579 | code, 1, true)) | ||
580 | pr_info("Unknown key %x pressed\n", code); | ||
581 | } | ||
582 | |||
583 | kfree(obj); | ||
584 | } | ||
585 | |||
586 | static ssize_t store_cpufv(struct device *dev, struct device_attribute *attr, | ||
587 | const char *buf, size_t count) | ||
588 | { | ||
589 | int value; | ||
590 | struct acpi_buffer input = { (acpi_size)sizeof(value), &value }; | ||
591 | acpi_status status; | ||
592 | |||
593 | if (!count || sscanf(buf, "%i", &value) != 1) | ||
594 | return -EINVAL; | ||
595 | if (value < 0 || value > 2) | ||
596 | return -EINVAL; | ||
597 | |||
598 | status = wmi_evaluate_method(EEEPC_WMI_MGMT_GUID, | ||
599 | 1, EEEPC_WMI_METHODID_CFVS, &input, NULL); | ||
600 | |||
601 | if (ACPI_FAILURE(status)) | ||
602 | return -EIO; | ||
603 | else | ||
604 | return count; | ||
605 | } | ||
606 | |||
607 | static DEVICE_ATTR(cpufv, S_IRUGO | S_IWUSR, NULL, store_cpufv); | ||
608 | |||
609 | static struct attribute *platform_attributes[] = { | ||
610 | &dev_attr_cpufv.attr, | ||
611 | NULL | ||
612 | }; | ||
613 | |||
614 | static struct attribute_group platform_attribute_group = { | ||
615 | .attrs = platform_attributes | ||
616 | }; | ||
617 | |||
618 | static void eeepc_wmi_sysfs_exit(struct platform_device *device) | ||
619 | { | ||
620 | sysfs_remove_group(&device->dev.kobj, &platform_attribute_group); | ||
621 | } | ||
622 | |||
623 | static int eeepc_wmi_sysfs_init(struct platform_device *device) | ||
624 | { | ||
625 | return sysfs_create_group(&device->dev.kobj, &platform_attribute_group); | ||
626 | } | ||
627 | |||
628 | /* | ||
629 | * Platform device | ||
630 | */ | ||
631 | static int __init eeepc_wmi_platform_init(struct eeepc_wmi *eeepc) | ||
632 | { | ||
633 | int err; | ||
634 | |||
635 | eeepc->platform_device = platform_device_alloc(EEEPC_WMI_FILE, -1); | ||
636 | if (!eeepc->platform_device) | ||
637 | return -ENOMEM; | ||
638 | platform_set_drvdata(eeepc->platform_device, eeepc); | ||
639 | |||
640 | err = platform_device_add(eeepc->platform_device); | ||
641 | if (err) | ||
642 | goto fail_platform_device; | ||
643 | |||
644 | err = eeepc_wmi_sysfs_init(eeepc->platform_device); | ||
645 | if (err) | ||
646 | goto fail_sysfs; | ||
647 | return 0; | ||
648 | |||
649 | fail_sysfs: | ||
650 | platform_device_del(eeepc->platform_device); | ||
651 | fail_platform_device: | ||
652 | platform_device_put(eeepc->platform_device); | ||
653 | return err; | ||
654 | } | ||
655 | |||
656 | static void eeepc_wmi_platform_exit(struct eeepc_wmi *eeepc) | ||
657 | { | ||
658 | eeepc_wmi_sysfs_exit(eeepc->platform_device); | ||
659 | platform_device_unregister(eeepc->platform_device); | ||
660 | } | ||
661 | |||
662 | /* | ||
663 | * debugfs | ||
664 | */ | ||
665 | struct eeepc_wmi_debugfs_node { | ||
666 | struct eeepc_wmi *eeepc; | ||
667 | char *name; | ||
668 | int (*show)(struct seq_file *m, void *data); | ||
669 | }; | ||
670 | |||
671 | static int show_dsts(struct seq_file *m, void *data) | ||
672 | { | ||
673 | struct eeepc_wmi *eeepc = m->private; | ||
674 | acpi_status status; | ||
675 | u32 retval = -1; | ||
676 | |||
677 | status = eeepc_wmi_get_devstate(eeepc->debug.dev_id, &retval); | ||
678 | |||
679 | if (ACPI_FAILURE(status)) | ||
680 | return -EIO; | ||
681 | |||
682 | seq_printf(m, "DSTS(%x) = %x\n", eeepc->debug.dev_id, retval); | ||
683 | |||
684 | return 0; | ||
685 | } | ||
686 | |||
687 | static int show_devs(struct seq_file *m, void *data) | ||
688 | { | ||
689 | struct eeepc_wmi *eeepc = m->private; | ||
690 | acpi_status status; | ||
691 | u32 retval = -1; | ||
692 | |||
693 | status = eeepc_wmi_set_devstate(eeepc->debug.dev_id, | ||
694 | eeepc->debug.ctrl_param, &retval); | ||
695 | if (ACPI_FAILURE(status)) | ||
696 | return -EIO; | ||
697 | |||
698 | seq_printf(m, "DEVS(%x, %x) = %x\n", eeepc->debug.dev_id, | ||
699 | eeepc->debug.ctrl_param, retval); | ||
700 | |||
701 | return 0; | ||
702 | } | ||
703 | |||
704 | static struct eeepc_wmi_debugfs_node eeepc_wmi_debug_files[] = { | ||
705 | { NULL, "devs", show_devs }, | ||
706 | { NULL, "dsts", show_dsts }, | ||
707 | }; | ||
708 | |||
709 | static int eeepc_wmi_debugfs_open(struct inode *inode, struct file *file) | ||
710 | { | ||
711 | struct eeepc_wmi_debugfs_node *node = inode->i_private; | ||
712 | |||
713 | return single_open(file, node->show, node->eeepc); | ||
714 | } | ||
715 | |||
716 | static const struct file_operations eeepc_wmi_debugfs_io_ops = { | ||
717 | .owner = THIS_MODULE, | ||
718 | .open = eeepc_wmi_debugfs_open, | ||
719 | .read = seq_read, | ||
720 | .llseek = seq_lseek, | ||
721 | .release = single_release, | ||
722 | }; | ||
723 | |||
724 | static void eeepc_wmi_debugfs_exit(struct eeepc_wmi *eeepc) | ||
725 | { | ||
726 | debugfs_remove_recursive(eeepc->debug.root); | ||
727 | } | ||
728 | |||
729 | static int eeepc_wmi_debugfs_init(struct eeepc_wmi *eeepc) | ||
730 | { | ||
731 | struct dentry *dent; | ||
732 | int i; | ||
733 | |||
734 | eeepc->debug.root = debugfs_create_dir(EEEPC_WMI_FILE, NULL); | ||
735 | if (!eeepc->debug.root) { | ||
736 | pr_err("failed to create debugfs directory"); | ||
737 | goto error_debugfs; | ||
738 | } | ||
739 | |||
740 | dent = debugfs_create_x32("dev_id", S_IRUGO|S_IWUSR, | ||
741 | eeepc->debug.root, &eeepc->debug.dev_id); | ||
742 | if (!dent) | ||
743 | goto error_debugfs; | ||
744 | |||
745 | dent = debugfs_create_x32("ctrl_param", S_IRUGO|S_IWUSR, | ||
746 | eeepc->debug.root, &eeepc->debug.ctrl_param); | ||
747 | if (!dent) | ||
748 | goto error_debugfs; | ||
749 | |||
750 | for (i = 0; i < ARRAY_SIZE(eeepc_wmi_debug_files); i++) { | ||
751 | struct eeepc_wmi_debugfs_node *node = &eeepc_wmi_debug_files[i]; | ||
752 | |||
753 | node->eeepc = eeepc; | ||
754 | dent = debugfs_create_file(node->name, S_IFREG | S_IRUGO, | ||
755 | eeepc->debug.root, node, | ||
756 | &eeepc_wmi_debugfs_io_ops); | ||
757 | if (!dent) { | ||
758 | pr_err("failed to create debug file: %s\n", node->name); | ||
759 | goto error_debugfs; | ||
760 | } | ||
761 | } | ||
762 | |||
763 | return 0; | ||
764 | |||
765 | error_debugfs: | ||
766 | eeepc_wmi_debugfs_exit(eeepc); | ||
767 | return -ENOMEM; | ||
768 | } | ||
769 | |||
770 | /* | ||
771 | * WMI Driver | ||
772 | */ | ||
773 | static struct platform_device * __init eeepc_wmi_add(void) | ||
774 | { | ||
775 | struct eeepc_wmi *eeepc; | ||
776 | acpi_status status; | ||
777 | int err; | ||
778 | |||
779 | eeepc = kzalloc(sizeof(struct eeepc_wmi), GFP_KERNEL); | ||
780 | if (!eeepc) | ||
781 | return ERR_PTR(-ENOMEM); | ||
782 | |||
783 | /* | ||
784 | * Register the platform device first. It is used as a parent for the | ||
785 | * sub-devices below. | ||
786 | */ | ||
787 | err = eeepc_wmi_platform_init(eeepc); | ||
788 | if (err) | ||
789 | goto fail_platform; | ||
790 | |||
791 | err = eeepc_wmi_input_init(eeepc); | ||
792 | if (err) | ||
793 | goto fail_input; | ||
794 | |||
795 | err = eeepc_wmi_led_init(eeepc); | ||
796 | if (err) | ||
797 | goto fail_leds; | ||
798 | |||
799 | err = eeepc_wmi_rfkill_init(eeepc); | ||
800 | if (err) | ||
801 | goto fail_rfkill; | ||
802 | |||
803 | if (!acpi_video_backlight_support()) { | ||
804 | err = eeepc_wmi_backlight_init(eeepc); | ||
805 | if (err) | ||
806 | goto fail_backlight; | ||
807 | } else | ||
808 | pr_info("Backlight controlled by ACPI video driver\n"); | ||
809 | |||
810 | status = wmi_install_notify_handler(EEEPC_WMI_EVENT_GUID, | ||
811 | eeepc_wmi_notify, eeepc); | ||
812 | if (ACPI_FAILURE(status)) { | ||
813 | pr_err("Unable to register notify handler - %d\n", | ||
814 | status); | ||
815 | err = -ENODEV; | ||
816 | goto fail_wmi_handler; | ||
817 | } | ||
818 | |||
819 | err = eeepc_wmi_debugfs_init(eeepc); | ||
820 | if (err) | ||
821 | goto fail_debugfs; | ||
822 | |||
823 | return eeepc->platform_device; | ||
824 | |||
825 | fail_debugfs: | ||
826 | wmi_remove_notify_handler(EEEPC_WMI_EVENT_GUID); | ||
827 | fail_wmi_handler: | ||
828 | eeepc_wmi_backlight_exit(eeepc); | ||
829 | fail_backlight: | ||
830 | eeepc_wmi_rfkill_exit(eeepc); | ||
831 | fail_rfkill: | ||
832 | eeepc_wmi_led_exit(eeepc); | ||
833 | fail_leds: | ||
834 | eeepc_wmi_input_exit(eeepc); | ||
835 | fail_input: | ||
836 | eeepc_wmi_platform_exit(eeepc); | ||
837 | fail_platform: | ||
838 | kfree(eeepc); | ||
839 | return ERR_PTR(err); | ||
840 | } | ||
841 | |||
842 | static int eeepc_wmi_remove(struct platform_device *device) | ||
843 | { | ||
844 | struct eeepc_wmi *eeepc; | ||
845 | |||
846 | eeepc = platform_get_drvdata(device); | ||
847 | wmi_remove_notify_handler(EEEPC_WMI_EVENT_GUID); | ||
848 | eeepc_wmi_backlight_exit(eeepc); | ||
849 | eeepc_wmi_input_exit(eeepc); | ||
850 | eeepc_wmi_led_exit(eeepc); | ||
851 | eeepc_wmi_rfkill_exit(eeepc); | ||
852 | eeepc_wmi_debugfs_exit(eeepc); | ||
853 | eeepc_wmi_platform_exit(eeepc); | ||
854 | |||
855 | kfree(eeepc); | ||
856 | return 0; | ||
857 | } | ||
858 | |||
859 | static struct platform_driver platform_driver = { | ||
860 | .driver = { | ||
861 | .name = EEEPC_WMI_FILE, | ||
862 | .owner = THIS_MODULE, | ||
863 | }, | ||
864 | }; | ||
865 | |||
866 | static acpi_status __init eeepc_wmi_parse_device(acpi_handle handle, u32 level, | ||
867 | void *context, void **retval) | 83 | void *context, void **retval) |
868 | { | 84 | { |
869 | pr_warning("Found legacy ATKD device (%s)", EEEPC_ACPI_HID); | 85 | pr_warning("Found legacy ATKD device (%s)", EEEPC_ACPI_HID); |
@@ -871,7 +87,7 @@ static acpi_status __init eeepc_wmi_parse_device(acpi_handle handle, u32 level, | |||
871 | return AE_CTRL_TERMINATE; | 87 | return AE_CTRL_TERMINATE; |
872 | } | 88 | } |
873 | 89 | ||
874 | static int __init eeepc_wmi_check_atkd(void) | 90 | static int eeepc_wmi_check_atkd(void) |
875 | { | 91 | { |
876 | acpi_status status; | 92 | acpi_status status; |
877 | bool found = false; | 93 | bool found = false; |
@@ -884,16 +100,8 @@ static int __init eeepc_wmi_check_atkd(void) | |||
884 | return -1; | 100 | return -1; |
885 | } | 101 | } |
886 | 102 | ||
887 | static int __init eeepc_wmi_init(void) | 103 | static int eeepc_wmi_probe(struct platform_device *pdev) |
888 | { | 104 | { |
889 | int err; | ||
890 | |||
891 | if (!wmi_has_guid(EEEPC_WMI_EVENT_GUID) || | ||
892 | !wmi_has_guid(EEEPC_WMI_MGMT_GUID)) { | ||
893 | pr_warning("No known WMI GUID found\n"); | ||
894 | return -ENODEV; | ||
895 | } | ||
896 | |||
897 | if (eeepc_wmi_check_atkd()) { | 105 | if (eeepc_wmi_check_atkd()) { |
898 | pr_warning("WMI device present, but legacy ATKD device is also " | 106 | pr_warning("WMI device present, but legacy ATKD device is also " |
899 | "present and enabled."); | 107 | "present and enabled."); |
@@ -901,33 +109,59 @@ static int __init eeepc_wmi_init(void) | |||
901 | "acpi_osi=\"!Windows 2009\""); | 109 | "acpi_osi=\"!Windows 2009\""); |
902 | pr_warning("Can't load eeepc-wmi, use default acpi_osi " | 110 | pr_warning("Can't load eeepc-wmi, use default acpi_osi " |
903 | "(preferred) or eeepc-laptop"); | 111 | "(preferred) or eeepc-laptop"); |
904 | return -ENODEV; | 112 | return -EBUSY; |
905 | } | 113 | } |
114 | return 0; | ||
115 | } | ||
906 | 116 | ||
907 | platform_device = eeepc_wmi_add(); | 117 | static void eeepc_dmi_check(struct asus_wmi_driver *driver) |
908 | if (IS_ERR(platform_device)) { | 118 | { |
909 | err = PTR_ERR(platform_device); | 119 | const char *model; |
910 | goto fail_eeepc_wmi; | 120 | |
911 | } | 121 | model = dmi_get_system_info(DMI_PRODUCT_NAME); |
122 | if (!model) | ||
123 | return; | ||
912 | 124 | ||
913 | err = platform_driver_register(&platform_driver); | 125 | /* |
914 | if (err) { | 126 | * Whitelist for wlan hotplug |
915 | pr_warning("Unable to register platform driver\n"); | 127 | * |
916 | goto fail_platform_driver; | 128 | * Asus 1000H needs the current hotplug code to handle |
129 | * Fn+F2 correctly. We may add other Asus here later, but | ||
130 | * it seems that most of the laptops supported by asus-wmi | ||
131 | * don't need to be on this list | ||
132 | */ | ||
133 | if (strcmp(model, "1000H") == 0) { | ||
134 | driver->hotplug_wireless = true; | ||
135 | pr_info("wlan hotplug enabled\n"); | ||
917 | } | 136 | } |
137 | } | ||
138 | |||
139 | static void eeepc_wmi_quirks(struct asus_wmi_driver *driver) | ||
140 | { | ||
141 | driver->hotplug_wireless = hotplug_wireless; | ||
142 | eeepc_dmi_check(driver); | ||
143 | } | ||
144 | |||
145 | static struct asus_wmi_driver asus_wmi_driver = { | ||
146 | .name = EEEPC_WMI_FILE, | ||
147 | .owner = THIS_MODULE, | ||
148 | .event_guid = EEEPC_WMI_EVENT_GUID, | ||
149 | .keymap = eeepc_wmi_keymap, | ||
150 | .input_name = "Eee PC WMI hotkeys", | ||
151 | .input_phys = EEEPC_WMI_FILE "/input0", | ||
152 | .probe = eeepc_wmi_probe, | ||
153 | .quirks = eeepc_wmi_quirks, | ||
154 | }; | ||
918 | 155 | ||
919 | return 0; | ||
920 | 156 | ||
921 | fail_platform_driver: | 157 | static int __init eeepc_wmi_init(void) |
922 | eeepc_wmi_remove(platform_device); | 158 | { |
923 | fail_eeepc_wmi: | 159 | return asus_wmi_register_driver(&asus_wmi_driver); |
924 | return err; | ||
925 | } | 160 | } |
926 | 161 | ||
927 | static void __exit eeepc_wmi_exit(void) | 162 | static void __exit eeepc_wmi_exit(void) |
928 | { | 163 | { |
929 | eeepc_wmi_remove(platform_device); | 164 | asus_wmi_unregister_driver(&asus_wmi_driver); |
930 | platform_driver_unregister(&platform_driver); | ||
931 | } | 165 | } |
932 | 166 | ||
933 | module_init(eeepc_wmi_init); | 167 | module_init(eeepc_wmi_init); |
diff --git a/drivers/platform/x86/hp-wmi.c b/drivers/platform/x86/hp-wmi.c index 9e05af9c41cb..1bc4a7539ba9 100644 --- a/drivers/platform/x86/hp-wmi.c +++ b/drivers/platform/x86/hp-wmi.c | |||
@@ -2,6 +2,7 @@ | |||
2 | * HP WMI hotkeys | 2 | * HP WMI hotkeys |
3 | * | 3 | * |
4 | * Copyright (C) 2008 Red Hat <mjg@redhat.com> | 4 | * Copyright (C) 2008 Red Hat <mjg@redhat.com> |
5 | * Copyright (C) 2010, 2011 Anssi Hannula <anssi.hannula@iki.fi> | ||
5 | * | 6 | * |
6 | * Portions based on wistron_btns.c: | 7 | * Portions based on wistron_btns.c: |
7 | * Copyright (C) 2005 Miloslav Trmac <mitr@volny.cz> | 8 | * Copyright (C) 2005 Miloslav Trmac <mitr@volny.cz> |
@@ -51,6 +52,7 @@ MODULE_ALIAS("wmi:5FB7F034-2C63-45e9-BE91-3D44E2C707E4"); | |||
51 | #define HPWMI_HARDWARE_QUERY 0x4 | 52 | #define HPWMI_HARDWARE_QUERY 0x4 |
52 | #define HPWMI_WIRELESS_QUERY 0x5 | 53 | #define HPWMI_WIRELESS_QUERY 0x5 |
53 | #define HPWMI_HOTKEY_QUERY 0xc | 54 | #define HPWMI_HOTKEY_QUERY 0xc |
55 | #define HPWMI_WIRELESS2_QUERY 0x1b | ||
54 | 56 | ||
55 | #define PREFIX "HP WMI: " | 57 | #define PREFIX "HP WMI: " |
56 | #define UNIMP "Unimplemented " | 58 | #define UNIMP "Unimplemented " |
@@ -86,7 +88,46 @@ struct bios_args { | |||
86 | struct bios_return { | 88 | struct bios_return { |
87 | u32 sigpass; | 89 | u32 sigpass; |
88 | u32 return_code; | 90 | u32 return_code; |
89 | u32 value; | 91 | }; |
92 | |||
93 | enum hp_return_value { | ||
94 | HPWMI_RET_WRONG_SIGNATURE = 0x02, | ||
95 | HPWMI_RET_UNKNOWN_COMMAND = 0x03, | ||
96 | HPWMI_RET_UNKNOWN_CMDTYPE = 0x04, | ||
97 | HPWMI_RET_INVALID_PARAMETERS = 0x05, | ||
98 | }; | ||
99 | |||
100 | enum hp_wireless2_bits { | ||
101 | HPWMI_POWER_STATE = 0x01, | ||
102 | HPWMI_POWER_SOFT = 0x02, | ||
103 | HPWMI_POWER_BIOS = 0x04, | ||
104 | HPWMI_POWER_HARD = 0x08, | ||
105 | }; | ||
106 | |||
107 | #define IS_HWBLOCKED(x) ((x & (HPWMI_POWER_BIOS | HPWMI_POWER_HARD)) \ | ||
108 | != (HPWMI_POWER_BIOS | HPWMI_POWER_HARD)) | ||
109 | #define IS_SWBLOCKED(x) !(x & HPWMI_POWER_SOFT) | ||
110 | |||
111 | struct bios_rfkill2_device_state { | ||
112 | u8 radio_type; | ||
113 | u8 bus_type; | ||
114 | u16 vendor_id; | ||
115 | u16 product_id; | ||
116 | u16 subsys_vendor_id; | ||
117 | u16 subsys_product_id; | ||
118 | u8 rfkill_id; | ||
119 | u8 power; | ||
120 | u8 unknown[4]; | ||
121 | }; | ||
122 | |||
123 | /* 7 devices fit into the 128 byte buffer */ | ||
124 | #define HPWMI_MAX_RFKILL2_DEVICES 7 | ||
125 | |||
126 | struct bios_rfkill2_state { | ||
127 | u8 unknown[7]; | ||
128 | u8 count; | ||
129 | u8 pad[8]; | ||
130 | struct bios_rfkill2_device_state device[HPWMI_MAX_RFKILL2_DEVICES]; | ||
90 | }; | 131 | }; |
91 | 132 | ||
92 | static const struct key_entry hp_wmi_keymap[] = { | 133 | static const struct key_entry hp_wmi_keymap[] = { |
@@ -108,6 +149,15 @@ static struct rfkill *wifi_rfkill; | |||
108 | static struct rfkill *bluetooth_rfkill; | 149 | static struct rfkill *bluetooth_rfkill; |
109 | static struct rfkill *wwan_rfkill; | 150 | static struct rfkill *wwan_rfkill; |
110 | 151 | ||
152 | struct rfkill2_device { | ||
153 | u8 id; | ||
154 | int num; | ||
155 | struct rfkill *rfkill; | ||
156 | }; | ||
157 | |||
158 | static int rfkill2_count; | ||
159 | static struct rfkill2_device rfkill2[HPWMI_MAX_RFKILL2_DEVICES]; | ||
160 | |||
111 | static const struct dev_pm_ops hp_wmi_pm_ops = { | 161 | static const struct dev_pm_ops hp_wmi_pm_ops = { |
112 | .resume = hp_wmi_resume_handler, | 162 | .resume = hp_wmi_resume_handler, |
113 | .restore = hp_wmi_resume_handler, | 163 | .restore = hp_wmi_resume_handler, |
@@ -129,7 +179,8 @@ static struct platform_driver hp_wmi_driver = { | |||
129 | * query: The commandtype -> What should be queried | 179 | * query: The commandtype -> What should be queried |
130 | * write: The command -> 0 read, 1 write, 3 ODM specific | 180 | * write: The command -> 0 read, 1 write, 3 ODM specific |
131 | * buffer: Buffer used as input and/or output | 181 | * buffer: Buffer used as input and/or output |
132 | * buffersize: Size of buffer | 182 | * insize: Size of input buffer |
183 | * outsize: Size of output buffer | ||
133 | * | 184 | * |
134 | * returns zero on success | 185 | * returns zero on success |
135 | * an HP WMI query specific error code (which is positive) | 186 | * an HP WMI query specific error code (which is positive) |
@@ -140,25 +191,29 @@ static struct platform_driver hp_wmi_driver = { | |||
140 | * size. E.g. Battery info query (0x7) is defined to have 1 byte input | 191 | * size. E.g. Battery info query (0x7) is defined to have 1 byte input |
141 | * and 128 byte output. The caller would do: | 192 | * and 128 byte output. The caller would do: |
142 | * buffer = kzalloc(128, GFP_KERNEL); | 193 | * buffer = kzalloc(128, GFP_KERNEL); |
143 | * ret = hp_wmi_perform_query(0x7, 0, buffer, 128) | 194 | * ret = hp_wmi_perform_query(0x7, 0, buffer, 1, 128) |
144 | */ | 195 | */ |
145 | static int hp_wmi_perform_query(int query, int write, u32 *buffer, | 196 | static int hp_wmi_perform_query(int query, int write, void *buffer, |
146 | int buffersize) | 197 | int insize, int outsize) |
147 | { | 198 | { |
148 | struct bios_return bios_return; | 199 | struct bios_return *bios_return; |
149 | acpi_status status; | 200 | int actual_outsize; |
150 | union acpi_object *obj; | 201 | union acpi_object *obj; |
151 | struct bios_args args = { | 202 | struct bios_args args = { |
152 | .signature = 0x55434553, | 203 | .signature = 0x55434553, |
153 | .command = write ? 0x2 : 0x1, | 204 | .command = write ? 0x2 : 0x1, |
154 | .commandtype = query, | 205 | .commandtype = query, |
155 | .datasize = buffersize, | 206 | .datasize = insize, |
156 | .data = *buffer, | 207 | .data = 0, |
157 | }; | 208 | }; |
158 | struct acpi_buffer input = { sizeof(struct bios_args), &args }; | 209 | struct acpi_buffer input = { sizeof(struct bios_args), &args }; |
159 | struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL }; | 210 | struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL }; |
160 | 211 | ||
161 | status = wmi_evaluate_method(HPWMI_BIOS_GUID, 0, 0x3, &input, &output); | 212 | if (WARN_ON(insize > sizeof(args.data))) |
213 | return -EINVAL; | ||
214 | memcpy(&args.data, buffer, insize); | ||
215 | |||
216 | wmi_evaluate_method(HPWMI_BIOS_GUID, 0, 0x3, &input, &output); | ||
162 | 217 | ||
163 | obj = output.pointer; | 218 | obj = output.pointer; |
164 | 219 | ||
@@ -169,10 +224,26 @@ static int hp_wmi_perform_query(int query, int write, u32 *buffer, | |||
169 | return -EINVAL; | 224 | return -EINVAL; |
170 | } | 225 | } |
171 | 226 | ||
172 | bios_return = *((struct bios_return *)obj->buffer.pointer); | 227 | bios_return = (struct bios_return *)obj->buffer.pointer; |
173 | 228 | ||
174 | memcpy(buffer, &bios_return.value, sizeof(bios_return.value)); | 229 | if (bios_return->return_code) { |
230 | if (bios_return->return_code != HPWMI_RET_UNKNOWN_CMDTYPE) | ||
231 | printk(KERN_WARNING PREFIX "query 0x%x returned " | ||
232 | "error 0x%x\n", | ||
233 | query, bios_return->return_code); | ||
234 | kfree(obj); | ||
235 | return bios_return->return_code; | ||
236 | } | ||
237 | |||
238 | if (!outsize) { | ||
239 | /* ignore output data */ | ||
240 | kfree(obj); | ||
241 | return 0; | ||
242 | } | ||
175 | 243 | ||
244 | actual_outsize = min(outsize, (int)(obj->buffer.length - sizeof(*bios_return))); | ||
245 | memcpy(buffer, obj->buffer.pointer + sizeof(*bios_return), actual_outsize); | ||
246 | memset(buffer + actual_outsize, 0, outsize - actual_outsize); | ||
176 | kfree(obj); | 247 | kfree(obj); |
177 | return 0; | 248 | return 0; |
178 | } | 249 | } |
@@ -181,7 +252,7 @@ static int hp_wmi_display_state(void) | |||
181 | { | 252 | { |
182 | int state = 0; | 253 | int state = 0; |
183 | int ret = hp_wmi_perform_query(HPWMI_DISPLAY_QUERY, 0, &state, | 254 | int ret = hp_wmi_perform_query(HPWMI_DISPLAY_QUERY, 0, &state, |
184 | sizeof(state)); | 255 | sizeof(state), sizeof(state)); |
185 | if (ret) | 256 | if (ret) |
186 | return -EINVAL; | 257 | return -EINVAL; |
187 | return state; | 258 | return state; |
@@ -191,7 +262,7 @@ static int hp_wmi_hddtemp_state(void) | |||
191 | { | 262 | { |
192 | int state = 0; | 263 | int state = 0; |
193 | int ret = hp_wmi_perform_query(HPWMI_HDDTEMP_QUERY, 0, &state, | 264 | int ret = hp_wmi_perform_query(HPWMI_HDDTEMP_QUERY, 0, &state, |
194 | sizeof(state)); | 265 | sizeof(state), sizeof(state)); |
195 | if (ret) | 266 | if (ret) |
196 | return -EINVAL; | 267 | return -EINVAL; |
197 | return state; | 268 | return state; |
@@ -201,7 +272,7 @@ static int hp_wmi_als_state(void) | |||
201 | { | 272 | { |
202 | int state = 0; | 273 | int state = 0; |
203 | int ret = hp_wmi_perform_query(HPWMI_ALS_QUERY, 0, &state, | 274 | int ret = hp_wmi_perform_query(HPWMI_ALS_QUERY, 0, &state, |
204 | sizeof(state)); | 275 | sizeof(state), sizeof(state)); |
205 | if (ret) | 276 | if (ret) |
206 | return -EINVAL; | 277 | return -EINVAL; |
207 | return state; | 278 | return state; |
@@ -211,7 +282,7 @@ static int hp_wmi_dock_state(void) | |||
211 | { | 282 | { |
212 | int state = 0; | 283 | int state = 0; |
213 | int ret = hp_wmi_perform_query(HPWMI_HARDWARE_QUERY, 0, &state, | 284 | int ret = hp_wmi_perform_query(HPWMI_HARDWARE_QUERY, 0, &state, |
214 | sizeof(state)); | 285 | sizeof(state), sizeof(state)); |
215 | 286 | ||
216 | if (ret) | 287 | if (ret) |
217 | return -EINVAL; | 288 | return -EINVAL; |
@@ -223,7 +294,7 @@ static int hp_wmi_tablet_state(void) | |||
223 | { | 294 | { |
224 | int state = 0; | 295 | int state = 0; |
225 | int ret = hp_wmi_perform_query(HPWMI_HARDWARE_QUERY, 0, &state, | 296 | int ret = hp_wmi_perform_query(HPWMI_HARDWARE_QUERY, 0, &state, |
226 | sizeof(state)); | 297 | sizeof(state), sizeof(state)); |
227 | if (ret) | 298 | if (ret) |
228 | return ret; | 299 | return ret; |
229 | 300 | ||
@@ -237,7 +308,7 @@ static int hp_wmi_set_block(void *data, bool blocked) | |||
237 | int ret; | 308 | int ret; |
238 | 309 | ||
239 | ret = hp_wmi_perform_query(HPWMI_WIRELESS_QUERY, 1, | 310 | ret = hp_wmi_perform_query(HPWMI_WIRELESS_QUERY, 1, |
240 | &query, sizeof(query)); | 311 | &query, sizeof(query), 0); |
241 | if (ret) | 312 | if (ret) |
242 | return -EINVAL; | 313 | return -EINVAL; |
243 | return 0; | 314 | return 0; |
@@ -252,7 +323,8 @@ static bool hp_wmi_get_sw_state(enum hp_wmi_radio r) | |||
252 | int wireless = 0; | 323 | int wireless = 0; |
253 | int mask; | 324 | int mask; |
254 | hp_wmi_perform_query(HPWMI_WIRELESS_QUERY, 0, | 325 | hp_wmi_perform_query(HPWMI_WIRELESS_QUERY, 0, |
255 | &wireless, sizeof(wireless)); | 326 | &wireless, sizeof(wireless), |
327 | sizeof(wireless)); | ||
256 | /* TBD: Pass error */ | 328 | /* TBD: Pass error */ |
257 | 329 | ||
258 | mask = 0x200 << (r * 8); | 330 | mask = 0x200 << (r * 8); |
@@ -268,7 +340,8 @@ static bool hp_wmi_get_hw_state(enum hp_wmi_radio r) | |||
268 | int wireless = 0; | 340 | int wireless = 0; |
269 | int mask; | 341 | int mask; |
270 | hp_wmi_perform_query(HPWMI_WIRELESS_QUERY, 0, | 342 | hp_wmi_perform_query(HPWMI_WIRELESS_QUERY, 0, |
271 | &wireless, sizeof(wireless)); | 343 | &wireless, sizeof(wireless), |
344 | sizeof(wireless)); | ||
272 | /* TBD: Pass error */ | 345 | /* TBD: Pass error */ |
273 | 346 | ||
274 | mask = 0x800 << (r * 8); | 347 | mask = 0x800 << (r * 8); |
@@ -279,6 +352,51 @@ static bool hp_wmi_get_hw_state(enum hp_wmi_radio r) | |||
279 | return true; | 352 | return true; |
280 | } | 353 | } |
281 | 354 | ||
355 | static int hp_wmi_rfkill2_set_block(void *data, bool blocked) | ||
356 | { | ||
357 | int rfkill_id = (int)(long)data; | ||
358 | char buffer[4] = { 0x01, 0x00, rfkill_id, !blocked }; | ||
359 | |||
360 | if (hp_wmi_perform_query(HPWMI_WIRELESS2_QUERY, 1, | ||
361 | buffer, sizeof(buffer), 0)) | ||
362 | return -EINVAL; | ||
363 | return 0; | ||
364 | } | ||
365 | |||
366 | static const struct rfkill_ops hp_wmi_rfkill2_ops = { | ||
367 | .set_block = hp_wmi_rfkill2_set_block, | ||
368 | }; | ||
369 | |||
370 | static int hp_wmi_rfkill2_refresh(void) | ||
371 | { | ||
372 | int err, i; | ||
373 | struct bios_rfkill2_state state; | ||
374 | |||
375 | err = hp_wmi_perform_query(HPWMI_WIRELESS2_QUERY, 0, &state, | ||
376 | 0, sizeof(state)); | ||
377 | if (err) | ||
378 | return err; | ||
379 | |||
380 | for (i = 0; i < rfkill2_count; i++) { | ||
381 | int num = rfkill2[i].num; | ||
382 | struct bios_rfkill2_device_state *devstate; | ||
383 | devstate = &state.device[num]; | ||
384 | |||
385 | if (num >= state.count || | ||
386 | devstate->rfkill_id != rfkill2[i].id) { | ||
387 | printk(KERN_WARNING PREFIX "power configuration of " | ||
388 | "the wireless devices unexpectedly changed\n"); | ||
389 | continue; | ||
390 | } | ||
391 | |||
392 | rfkill_set_states(rfkill2[i].rfkill, | ||
393 | IS_SWBLOCKED(devstate->power), | ||
394 | IS_HWBLOCKED(devstate->power)); | ||
395 | } | ||
396 | |||
397 | return 0; | ||
398 | } | ||
399 | |||
282 | static ssize_t show_display(struct device *dev, struct device_attribute *attr, | 400 | static ssize_t show_display(struct device *dev, struct device_attribute *attr, |
283 | char *buf) | 401 | char *buf) |
284 | { | 402 | { |
@@ -329,7 +447,7 @@ static ssize_t set_als(struct device *dev, struct device_attribute *attr, | |||
329 | { | 447 | { |
330 | u32 tmp = simple_strtoul(buf, NULL, 10); | 448 | u32 tmp = simple_strtoul(buf, NULL, 10); |
331 | int ret = hp_wmi_perform_query(HPWMI_ALS_QUERY, 1, &tmp, | 449 | int ret = hp_wmi_perform_query(HPWMI_ALS_QUERY, 1, &tmp, |
332 | sizeof(tmp)); | 450 | sizeof(tmp), sizeof(tmp)); |
333 | if (ret) | 451 | if (ret) |
334 | return -EINVAL; | 452 | return -EINVAL; |
335 | 453 | ||
@@ -402,6 +520,7 @@ static void hp_wmi_notify(u32 value, void *context) | |||
402 | case HPWMI_BEZEL_BUTTON: | 520 | case HPWMI_BEZEL_BUTTON: |
403 | ret = hp_wmi_perform_query(HPWMI_HOTKEY_QUERY, 0, | 521 | ret = hp_wmi_perform_query(HPWMI_HOTKEY_QUERY, 0, |
404 | &key_code, | 522 | &key_code, |
523 | sizeof(key_code), | ||
405 | sizeof(key_code)); | 524 | sizeof(key_code)); |
406 | if (ret) | 525 | if (ret) |
407 | break; | 526 | break; |
@@ -412,6 +531,11 @@ static void hp_wmi_notify(u32 value, void *context) | |||
412 | key_code); | 531 | key_code); |
413 | break; | 532 | break; |
414 | case HPWMI_WIRELESS: | 533 | case HPWMI_WIRELESS: |
534 | if (rfkill2_count) { | ||
535 | hp_wmi_rfkill2_refresh(); | ||
536 | break; | ||
537 | } | ||
538 | |||
415 | if (wifi_rfkill) | 539 | if (wifi_rfkill) |
416 | rfkill_set_states(wifi_rfkill, | 540 | rfkill_set_states(wifi_rfkill, |
417 | hp_wmi_get_sw_state(HPWMI_WIFI), | 541 | hp_wmi_get_sw_state(HPWMI_WIFI), |
@@ -502,32 +626,16 @@ static void cleanup_sysfs(struct platform_device *device) | |||
502 | device_remove_file(&device->dev, &dev_attr_tablet); | 626 | device_remove_file(&device->dev, &dev_attr_tablet); |
503 | } | 627 | } |
504 | 628 | ||
505 | static int __devinit hp_wmi_bios_setup(struct platform_device *device) | 629 | static int __devinit hp_wmi_rfkill_setup(struct platform_device *device) |
506 | { | 630 | { |
507 | int err; | 631 | int err; |
508 | int wireless = 0; | 632 | int wireless = 0; |
509 | 633 | ||
510 | err = hp_wmi_perform_query(HPWMI_WIRELESS_QUERY, 0, &wireless, | 634 | err = hp_wmi_perform_query(HPWMI_WIRELESS_QUERY, 0, &wireless, |
511 | sizeof(wireless)); | 635 | sizeof(wireless), sizeof(wireless)); |
512 | if (err) | 636 | if (err) |
513 | return err; | 637 | return err; |
514 | 638 | ||
515 | err = device_create_file(&device->dev, &dev_attr_display); | ||
516 | if (err) | ||
517 | goto add_sysfs_error; | ||
518 | err = device_create_file(&device->dev, &dev_attr_hddtemp); | ||
519 | if (err) | ||
520 | goto add_sysfs_error; | ||
521 | err = device_create_file(&device->dev, &dev_attr_als); | ||
522 | if (err) | ||
523 | goto add_sysfs_error; | ||
524 | err = device_create_file(&device->dev, &dev_attr_dock); | ||
525 | if (err) | ||
526 | goto add_sysfs_error; | ||
527 | err = device_create_file(&device->dev, &dev_attr_tablet); | ||
528 | if (err) | ||
529 | goto add_sysfs_error; | ||
530 | |||
531 | if (wireless & 0x1) { | 639 | if (wireless & 0x1) { |
532 | wifi_rfkill = rfkill_alloc("hp-wifi", &device->dev, | 640 | wifi_rfkill = rfkill_alloc("hp-wifi", &device->dev, |
533 | RFKILL_TYPE_WLAN, | 641 | RFKILL_TYPE_WLAN, |
@@ -573,14 +681,131 @@ static int __devinit hp_wmi_bios_setup(struct platform_device *device) | |||
573 | return 0; | 681 | return 0; |
574 | register_wwan_err: | 682 | register_wwan_err: |
575 | rfkill_destroy(wwan_rfkill); | 683 | rfkill_destroy(wwan_rfkill); |
684 | wwan_rfkill = NULL; | ||
576 | if (bluetooth_rfkill) | 685 | if (bluetooth_rfkill) |
577 | rfkill_unregister(bluetooth_rfkill); | 686 | rfkill_unregister(bluetooth_rfkill); |
578 | register_bluetooth_error: | 687 | register_bluetooth_error: |
579 | rfkill_destroy(bluetooth_rfkill); | 688 | rfkill_destroy(bluetooth_rfkill); |
689 | bluetooth_rfkill = NULL; | ||
580 | if (wifi_rfkill) | 690 | if (wifi_rfkill) |
581 | rfkill_unregister(wifi_rfkill); | 691 | rfkill_unregister(wifi_rfkill); |
582 | register_wifi_error: | 692 | register_wifi_error: |
583 | rfkill_destroy(wifi_rfkill); | 693 | rfkill_destroy(wifi_rfkill); |
694 | wifi_rfkill = NULL; | ||
695 | return err; | ||
696 | } | ||
697 | |||
698 | static int __devinit hp_wmi_rfkill2_setup(struct platform_device *device) | ||
699 | { | ||
700 | int err, i; | ||
701 | struct bios_rfkill2_state state; | ||
702 | err = hp_wmi_perform_query(HPWMI_WIRELESS2_QUERY, 0, &state, | ||
703 | 0, sizeof(state)); | ||
704 | if (err) | ||
705 | return err; | ||
706 | |||
707 | if (state.count > HPWMI_MAX_RFKILL2_DEVICES) { | ||
708 | printk(KERN_WARNING PREFIX "unable to parse 0x1b query output\n"); | ||
709 | return -EINVAL; | ||
710 | } | ||
711 | |||
712 | for (i = 0; i < state.count; i++) { | ||
713 | struct rfkill *rfkill; | ||
714 | enum rfkill_type type; | ||
715 | char *name; | ||
716 | switch (state.device[i].radio_type) { | ||
717 | case HPWMI_WIFI: | ||
718 | type = RFKILL_TYPE_WLAN; | ||
719 | name = "hp-wifi"; | ||
720 | break; | ||
721 | case HPWMI_BLUETOOTH: | ||
722 | type = RFKILL_TYPE_BLUETOOTH; | ||
723 | name = "hp-bluetooth"; | ||
724 | break; | ||
725 | case HPWMI_WWAN: | ||
726 | type = RFKILL_TYPE_WWAN; | ||
727 | name = "hp-wwan"; | ||
728 | break; | ||
729 | default: | ||
730 | printk(KERN_WARNING PREFIX "unknown device type 0x%x\n", | ||
731 | state.device[i].radio_type); | ||
732 | continue; | ||
733 | } | ||
734 | |||
735 | if (!state.device[i].vendor_id) { | ||
736 | printk(KERN_WARNING PREFIX "zero device %d while %d " | ||
737 | "reported\n", i, state.count); | ||
738 | continue; | ||
739 | } | ||
740 | |||
741 | rfkill = rfkill_alloc(name, &device->dev, type, | ||
742 | &hp_wmi_rfkill2_ops, (void *)(long)i); | ||
743 | if (!rfkill) { | ||
744 | err = -ENOMEM; | ||
745 | goto fail; | ||
746 | } | ||
747 | |||
748 | rfkill2[rfkill2_count].id = state.device[i].rfkill_id; | ||
749 | rfkill2[rfkill2_count].num = i; | ||
750 | rfkill2[rfkill2_count].rfkill = rfkill; | ||
751 | |||
752 | rfkill_init_sw_state(rfkill, | ||
753 | IS_SWBLOCKED(state.device[i].power)); | ||
754 | rfkill_set_hw_state(rfkill, | ||
755 | IS_HWBLOCKED(state.device[i].power)); | ||
756 | |||
757 | if (!(state.device[i].power & HPWMI_POWER_BIOS)) | ||
758 | printk(KERN_INFO PREFIX "device %s blocked by BIOS\n", | ||
759 | name); | ||
760 | |||
761 | err = rfkill_register(rfkill); | ||
762 | if (err) { | ||
763 | rfkill_destroy(rfkill); | ||
764 | goto fail; | ||
765 | } | ||
766 | |||
767 | rfkill2_count++; | ||
768 | } | ||
769 | |||
770 | return 0; | ||
771 | fail: | ||
772 | for (; rfkill2_count > 0; rfkill2_count--) { | ||
773 | rfkill_unregister(rfkill2[rfkill2_count - 1].rfkill); | ||
774 | rfkill_destroy(rfkill2[rfkill2_count - 1].rfkill); | ||
775 | } | ||
776 | return err; | ||
777 | } | ||
778 | |||
779 | static int __devinit hp_wmi_bios_setup(struct platform_device *device) | ||
780 | { | ||
781 | int err; | ||
782 | |||
783 | /* clear detected rfkill devices */ | ||
784 | wifi_rfkill = NULL; | ||
785 | bluetooth_rfkill = NULL; | ||
786 | wwan_rfkill = NULL; | ||
787 | rfkill2_count = 0; | ||
788 | |||
789 | if (hp_wmi_rfkill_setup(device)) | ||
790 | hp_wmi_rfkill2_setup(device); | ||
791 | |||
792 | err = device_create_file(&device->dev, &dev_attr_display); | ||
793 | if (err) | ||
794 | goto add_sysfs_error; | ||
795 | err = device_create_file(&device->dev, &dev_attr_hddtemp); | ||
796 | if (err) | ||
797 | goto add_sysfs_error; | ||
798 | err = device_create_file(&device->dev, &dev_attr_als); | ||
799 | if (err) | ||
800 | goto add_sysfs_error; | ||
801 | err = device_create_file(&device->dev, &dev_attr_dock); | ||
802 | if (err) | ||
803 | goto add_sysfs_error; | ||
804 | err = device_create_file(&device->dev, &dev_attr_tablet); | ||
805 | if (err) | ||
806 | goto add_sysfs_error; | ||
807 | return 0; | ||
808 | |||
584 | add_sysfs_error: | 809 | add_sysfs_error: |
585 | cleanup_sysfs(device); | 810 | cleanup_sysfs(device); |
586 | return err; | 811 | return err; |
@@ -588,8 +813,14 @@ add_sysfs_error: | |||
588 | 813 | ||
589 | static int __exit hp_wmi_bios_remove(struct platform_device *device) | 814 | static int __exit hp_wmi_bios_remove(struct platform_device *device) |
590 | { | 815 | { |
816 | int i; | ||
591 | cleanup_sysfs(device); | 817 | cleanup_sysfs(device); |
592 | 818 | ||
819 | for (i = 0; i < rfkill2_count; i++) { | ||
820 | rfkill_unregister(rfkill2[i].rfkill); | ||
821 | rfkill_destroy(rfkill2[i].rfkill); | ||
822 | } | ||
823 | |||
593 | if (wifi_rfkill) { | 824 | if (wifi_rfkill) { |
594 | rfkill_unregister(wifi_rfkill); | 825 | rfkill_unregister(wifi_rfkill); |
595 | rfkill_destroy(wifi_rfkill); | 826 | rfkill_destroy(wifi_rfkill); |
@@ -622,6 +853,9 @@ static int hp_wmi_resume_handler(struct device *device) | |||
622 | input_sync(hp_wmi_input_dev); | 853 | input_sync(hp_wmi_input_dev); |
623 | } | 854 | } |
624 | 855 | ||
856 | if (rfkill2_count) | ||
857 | hp_wmi_rfkill2_refresh(); | ||
858 | |||
625 | if (wifi_rfkill) | 859 | if (wifi_rfkill) |
626 | rfkill_set_states(wifi_rfkill, | 860 | rfkill_set_states(wifi_rfkill, |
627 | hp_wmi_get_sw_state(HPWMI_WIFI), | 861 | hp_wmi_get_sw_state(HPWMI_WIFI), |
diff --git a/drivers/platform/x86/ideapad-laptop.c b/drivers/platform/x86/ideapad-laptop.c index 114d95247cdf..21b101899bae 100644 --- a/drivers/platform/x86/ideapad-laptop.c +++ b/drivers/platform/x86/ideapad-laptop.c | |||
@@ -459,6 +459,8 @@ static void ideapad_acpi_notify(struct acpi_device *adevice, u32 event) | |||
459 | if (test_bit(vpc_bit, &vpc1)) { | 459 | if (test_bit(vpc_bit, &vpc1)) { |
460 | if (vpc_bit == 9) | 460 | if (vpc_bit == 9) |
461 | ideapad_sync_rfk_state(adevice); | 461 | ideapad_sync_rfk_state(adevice); |
462 | else if (vpc_bit == 4) | ||
463 | read_ec_data(handle, 0x12, &vpc2); | ||
462 | else | 464 | else |
463 | ideapad_input_report(priv, vpc_bit); | 465 | ideapad_input_report(priv, vpc_bit); |
464 | } | 466 | } |
diff --git a/drivers/platform/x86/intel_ips.c b/drivers/platform/x86/intel_ips.c index 1294a39373ba..85c8ad43c0c5 100644 --- a/drivers/platform/x86/intel_ips.c +++ b/drivers/platform/x86/intel_ips.c | |||
@@ -1111,7 +1111,7 @@ static int ips_monitor(void *data) | |||
1111 | last_msecs = jiffies_to_msecs(jiffies); | 1111 | last_msecs = jiffies_to_msecs(jiffies); |
1112 | expire = jiffies + msecs_to_jiffies(IPS_SAMPLE_PERIOD); | 1112 | expire = jiffies + msecs_to_jiffies(IPS_SAMPLE_PERIOD); |
1113 | 1113 | ||
1114 | __set_current_state(TASK_UNINTERRUPTIBLE); | 1114 | __set_current_state(TASK_INTERRUPTIBLE); |
1115 | mod_timer(&timer, expire); | 1115 | mod_timer(&timer, expire); |
1116 | schedule(); | 1116 | schedule(); |
1117 | 1117 | ||
diff --git a/drivers/platform/x86/intel_mid_powerbtn.c b/drivers/platform/x86/intel_mid_powerbtn.c new file mode 100644 index 000000000000..213e79ba68d5 --- /dev/null +++ b/drivers/platform/x86/intel_mid_powerbtn.c | |||
@@ -0,0 +1,148 @@ | |||
1 | /* | ||
2 | * Power button driver for Medfield. | ||
3 | * | ||
4 | * Copyright (C) 2010 Intel Corp | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License as published by | ||
8 | * the Free Software Foundation; version 2 of the License. | ||
9 | * | ||
10 | * This program is distributed in the hope that it will be useful, but | ||
11 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
13 | * General Public License for more details. | ||
14 | * | ||
15 | * You should have received a copy of the GNU General Public License along | ||
16 | * with this program; if not, write to the Free Software Foundation, Inc., | ||
17 | * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. | ||
18 | */ | ||
19 | |||
20 | #include <linux/module.h> | ||
21 | #include <linux/init.h> | ||
22 | #include <linux/interrupt.h> | ||
23 | #include <linux/slab.h> | ||
24 | #include <linux/platform_device.h> | ||
25 | #include <linux/input.h> | ||
26 | #include <asm/intel_scu_ipc.h> | ||
27 | |||
28 | #define DRIVER_NAME "msic_power_btn" | ||
29 | |||
30 | #define MSIC_IRQ_STAT 0x02 | ||
31 | #define MSIC_IRQ_PB (1 << 0) | ||
32 | #define MSIC_PB_CONFIG 0x3e | ||
33 | #define MSIC_PB_STATUS 0x3f | ||
34 | #define MSIC_PB_LEVEL (1 << 3) /* 1 - release, 0 - press */ | ||
35 | |||
36 | struct mfld_pb_priv { | ||
37 | struct input_dev *input; | ||
38 | unsigned int irq; | ||
39 | }; | ||
40 | |||
41 | static irqreturn_t mfld_pb_isr(int irq, void *dev_id) | ||
42 | { | ||
43 | struct mfld_pb_priv *priv = dev_id; | ||
44 | int ret; | ||
45 | u8 pbstat; | ||
46 | |||
47 | ret = intel_scu_ipc_ioread8(MSIC_PB_STATUS, &pbstat); | ||
48 | if (ret < 0) | ||
49 | return IRQ_HANDLED; | ||
50 | |||
51 | input_event(priv->input, EV_KEY, KEY_POWER, !(pbstat & MSIC_PB_LEVEL)); | ||
52 | input_sync(priv->input); | ||
53 | |||
54 | return IRQ_HANDLED; | ||
55 | } | ||
56 | |||
57 | static int __devinit mfld_pb_probe(struct platform_device *pdev) | ||
58 | { | ||
59 | struct mfld_pb_priv *priv; | ||
60 | struct input_dev *input; | ||
61 | int irq; | ||
62 | int error; | ||
63 | |||
64 | irq = platform_get_irq(pdev, 0); | ||
65 | if (irq < 0) | ||
66 | return -EINVAL; | ||
67 | |||
68 | priv = kzalloc(sizeof(struct mfld_pb_priv), GFP_KERNEL); | ||
69 | input = input_allocate_device(); | ||
70 | if (!priv || !input) { | ||
71 | error = -ENOMEM; | ||
72 | goto err_free_mem; | ||
73 | } | ||
74 | |||
75 | priv->input = input; | ||
76 | priv->irq = irq; | ||
77 | |||
78 | input->name = pdev->name; | ||
79 | input->phys = "power-button/input0"; | ||
80 | input->id.bustype = BUS_HOST; | ||
81 | input->dev.parent = &pdev->dev; | ||
82 | |||
83 | input_set_capability(input, EV_KEY, KEY_POWER); | ||
84 | |||
85 | error = request_threaded_irq(priv->irq, NULL, mfld_pb_isr, | ||
86 | 0, DRIVER_NAME, priv); | ||
87 | if (error) { | ||
88 | dev_err(&pdev->dev, | ||
89 | "unable to request irq %d for mfld power button\n", | ||
90 | irq); | ||
91 | goto err_free_mem; | ||
92 | } | ||
93 | |||
94 | error = input_register_device(input); | ||
95 | if (error) { | ||
96 | dev_err(&pdev->dev, | ||
97 | "unable to register input dev, error %d\n", error); | ||
98 | goto err_free_irq; | ||
99 | } | ||
100 | |||
101 | platform_set_drvdata(pdev, priv); | ||
102 | return 0; | ||
103 | |||
104 | err_free_irq: | ||
105 | free_irq(priv->irq, priv); | ||
106 | err_free_mem: | ||
107 | input_free_device(input); | ||
108 | kfree(priv); | ||
109 | return error; | ||
110 | } | ||
111 | |||
112 | static int __devexit mfld_pb_remove(struct platform_device *pdev) | ||
113 | { | ||
114 | struct mfld_pb_priv *priv = platform_get_drvdata(pdev); | ||
115 | |||
116 | free_irq(priv->irq, priv); | ||
117 | input_unregister_device(priv->input); | ||
118 | kfree(priv); | ||
119 | |||
120 | platform_set_drvdata(pdev, NULL); | ||
121 | return 0; | ||
122 | } | ||
123 | |||
124 | static struct platform_driver mfld_pb_driver = { | ||
125 | .driver = { | ||
126 | .name = DRIVER_NAME, | ||
127 | .owner = THIS_MODULE, | ||
128 | }, | ||
129 | .probe = mfld_pb_probe, | ||
130 | .remove = __devexit_p(mfld_pb_remove), | ||
131 | }; | ||
132 | |||
133 | static int __init mfld_pb_init(void) | ||
134 | { | ||
135 | return platform_driver_register(&mfld_pb_driver); | ||
136 | } | ||
137 | module_init(mfld_pb_init); | ||
138 | |||
139 | static void __exit mfld_pb_exit(void) | ||
140 | { | ||
141 | platform_driver_unregister(&mfld_pb_driver); | ||
142 | } | ||
143 | module_exit(mfld_pb_exit); | ||
144 | |||
145 | MODULE_AUTHOR("Hong Liu <hong.liu@intel.com>"); | ||
146 | MODULE_DESCRIPTION("Intel Medfield Power Button Driver"); | ||
147 | MODULE_LICENSE("GPL v2"); | ||
148 | MODULE_ALIAS("platform:" DRIVER_NAME); | ||
diff --git a/drivers/platform/x86/intel_mid_thermal.c b/drivers/platform/x86/intel_mid_thermal.c new file mode 100644 index 000000000000..6c12db503161 --- /dev/null +++ b/drivers/platform/x86/intel_mid_thermal.c | |||
@@ -0,0 +1,576 @@ | |||
1 | /* | ||
2 | * intel_mid_thermal.c - Intel MID platform thermal driver | ||
3 | * | ||
4 | * Copyright (C) 2011 Intel Corporation | ||
5 | * | ||
6 | * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify | ||
9 | * it under the terms of the GNU General Public License as published by | ||
10 | * the Free Software Foundation; version 2 of the License. | ||
11 | * | ||
12 | * This program is distributed in the hope that it will be useful, but | ||
13 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
15 | * General Public License for more details. | ||
16 | * | ||
17 | * You should have received a copy of the GNU General Public License along | ||
18 | * with this program; if not, write to the Free Software Foundation, Inc., | ||
19 | * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. | ||
20 | * | ||
21 | * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ | ||
22 | * Author: Durgadoss R <durgadoss.r@intel.com> | ||
23 | */ | ||
24 | |||
25 | #define pr_fmt(fmt) "intel_mid_thermal: " fmt | ||
26 | |||
27 | #include <linux/module.h> | ||
28 | #include <linux/init.h> | ||
29 | #include <linux/err.h> | ||
30 | #include <linux/param.h> | ||
31 | #include <linux/device.h> | ||
32 | #include <linux/platform_device.h> | ||
33 | #include <linux/slab.h> | ||
34 | #include <linux/pm.h> | ||
35 | #include <linux/thermal.h> | ||
36 | |||
37 | #include <asm/intel_scu_ipc.h> | ||
38 | |||
39 | /* Number of thermal sensors */ | ||
40 | #define MSIC_THERMAL_SENSORS 4 | ||
41 | |||
42 | /* ADC1 - thermal registers */ | ||
43 | #define MSIC_THERM_ADC1CNTL1 0x1C0 | ||
44 | #define MSIC_ADC_ENBL 0x10 | ||
45 | #define MSIC_ADC_START 0x08 | ||
46 | |||
47 | #define MSIC_THERM_ADC1CNTL3 0x1C2 | ||
48 | #define MSIC_ADCTHERM_ENBL 0x04 | ||
49 | #define MSIC_ADCRRDATA_ENBL 0x05 | ||
50 | #define MSIC_CHANL_MASK_VAL 0x0F | ||
51 | |||
52 | #define MSIC_STOPBIT_MASK 16 | ||
53 | #define MSIC_ADCTHERM_MASK 4 | ||
54 | #define ADC_CHANLS_MAX 15 /* Number of ADC channels */ | ||
55 | #define ADC_LOOP_MAX (ADC_CHANLS_MAX - MSIC_THERMAL_SENSORS) | ||
56 | |||
57 | /* ADC channel code values */ | ||
58 | #define SKIN_SENSOR0_CODE 0x08 | ||
59 | #define SKIN_SENSOR1_CODE 0x09 | ||
60 | #define SYS_SENSOR_CODE 0x0A | ||
61 | #define MSIC_DIE_SENSOR_CODE 0x03 | ||
62 | |||
63 | #define SKIN_THERM_SENSOR0 0 | ||
64 | #define SKIN_THERM_SENSOR1 1 | ||
65 | #define SYS_THERM_SENSOR2 2 | ||
66 | #define MSIC_DIE_THERM_SENSOR3 3 | ||
67 | |||
68 | /* ADC code range */ | ||
69 | #define ADC_MAX 977 | ||
70 | #define ADC_MIN 162 | ||
71 | #define ADC_VAL0C 887 | ||
72 | #define ADC_VAL20C 720 | ||
73 | #define ADC_VAL40C 508 | ||
74 | #define ADC_VAL60C 315 | ||
75 | |||
76 | /* ADC base addresses */ | ||
77 | #define ADC_CHNL_START_ADDR 0x1C5 /* increments by 1 */ | ||
78 | #define ADC_DATA_START_ADDR 0x1D4 /* increments by 2 */ | ||
79 | |||
80 | /* MSIC die attributes */ | ||
81 | #define MSIC_DIE_ADC_MIN 488 | ||
82 | #define MSIC_DIE_ADC_MAX 1004 | ||
83 | |||
84 | /* This holds the address of the first free ADC channel, | ||
85 | * among the 15 channels | ||
86 | */ | ||
87 | static int channel_index; | ||
88 | |||
89 | struct platform_info { | ||
90 | struct platform_device *pdev; | ||
91 | struct thermal_zone_device *tzd[MSIC_THERMAL_SENSORS]; | ||
92 | }; | ||
93 | |||
94 | struct thermal_device_info { | ||
95 | unsigned int chnl_addr; | ||
96 | int direct; | ||
97 | /* This holds the current temperature in millidegree celsius */ | ||
98 | long curr_temp; | ||
99 | }; | ||
100 | |||
101 | /** | ||
102 | * to_msic_die_temp - converts adc_val to msic_die temperature | ||
103 | * @adc_val: ADC value to be converted | ||
104 | * | ||
105 | * Can sleep | ||
106 | */ | ||
107 | static int to_msic_die_temp(uint16_t adc_val) | ||
108 | { | ||
109 | return (368 * (adc_val) / 1000) - 220; | ||
110 | } | ||
111 | |||
112 | /** | ||
113 | * is_valid_adc - checks whether the adc code is within the defined range | ||
114 | * @min: minimum value for the sensor | ||
115 | * @max: maximum value for the sensor | ||
116 | * | ||
117 | * Can sleep | ||
118 | */ | ||
119 | static int is_valid_adc(uint16_t adc_val, uint16_t min, uint16_t max) | ||
120 | { | ||
121 | return (adc_val >= min) && (adc_val <= max); | ||
122 | } | ||
123 | |||
124 | /** | ||
125 | * adc_to_temp - converts the ADC code to temperature in C | ||
126 | * @direct: true if ths channel is direct index | ||
127 | * @adc_val: the adc_val that needs to be converted | ||
128 | * @tp: temperature return value | ||
129 | * | ||
130 | * Linear approximation is used to covert the skin adc value into temperature. | ||
131 | * This technique is used to avoid very long look-up table to get | ||
132 | * the appropriate temp value from ADC value. | ||
133 | * The adc code vs sensor temp curve is split into five parts | ||
134 | * to achieve very close approximate temp value with less than | ||
135 | * 0.5C error | ||
136 | */ | ||
137 | static int adc_to_temp(int direct, uint16_t adc_val, unsigned long *tp) | ||
138 | { | ||
139 | int temp; | ||
140 | |||
141 | /* Direct conversion for die temperature */ | ||
142 | if (direct) { | ||
143 | if (is_valid_adc(adc_val, MSIC_DIE_ADC_MIN, MSIC_DIE_ADC_MAX)) { | ||
144 | *tp = to_msic_die_temp(adc_val) * 1000; | ||
145 | return 0; | ||
146 | } | ||
147 | return -ERANGE; | ||
148 | } | ||
149 | |||
150 | if (!is_valid_adc(adc_val, ADC_MIN, ADC_MAX)) | ||
151 | return -ERANGE; | ||
152 | |||
153 | /* Linear approximation for skin temperature */ | ||
154 | if (adc_val > ADC_VAL0C) | ||
155 | temp = 177 - (adc_val/5); | ||
156 | else if ((adc_val <= ADC_VAL0C) && (adc_val > ADC_VAL20C)) | ||
157 | temp = 111 - (adc_val/8); | ||
158 | else if ((adc_val <= ADC_VAL20C) && (adc_val > ADC_VAL40C)) | ||
159 | temp = 92 - (adc_val/10); | ||
160 | else if ((adc_val <= ADC_VAL40C) && (adc_val > ADC_VAL60C)) | ||
161 | temp = 91 - (adc_val/10); | ||
162 | else | ||
163 | temp = 112 - (adc_val/6); | ||
164 | |||
165 | /* Convert temperature in celsius to milli degree celsius */ | ||
166 | *tp = temp * 1000; | ||
167 | return 0; | ||
168 | } | ||
169 | |||
170 | /** | ||
171 | * mid_read_temp - read sensors for temperature | ||
172 | * @temp: holds the current temperature for the sensor after reading | ||
173 | * | ||
174 | * reads the adc_code from the channel and converts it to real | ||
175 | * temperature. The converted value is stored in temp. | ||
176 | * | ||
177 | * Can sleep | ||
178 | */ | ||
179 | static int mid_read_temp(struct thermal_zone_device *tzd, unsigned long *temp) | ||
180 | { | ||
181 | struct thermal_device_info *td_info = tzd->devdata; | ||
182 | uint16_t adc_val, addr; | ||
183 | uint8_t data = 0; | ||
184 | int ret; | ||
185 | unsigned long curr_temp; | ||
186 | |||
187 | |||
188 | addr = td_info->chnl_addr; | ||
189 | |||
190 | /* Enable the msic for conversion before reading */ | ||
191 | ret = intel_scu_ipc_iowrite8(MSIC_THERM_ADC1CNTL3, MSIC_ADCRRDATA_ENBL); | ||
192 | if (ret) | ||
193 | return ret; | ||
194 | |||
195 | /* Re-toggle the RRDATARD bit (temporary workaround) */ | ||
196 | ret = intel_scu_ipc_iowrite8(MSIC_THERM_ADC1CNTL3, MSIC_ADCTHERM_ENBL); | ||
197 | if (ret) | ||
198 | return ret; | ||
199 | |||
200 | /* Read the higher bits of data */ | ||
201 | ret = intel_scu_ipc_ioread8(addr, &data); | ||
202 | if (ret) | ||
203 | return ret; | ||
204 | |||
205 | /* Shift bits to accomodate the lower two data bits */ | ||
206 | adc_val = (data << 2); | ||
207 | addr++; | ||
208 | |||
209 | ret = intel_scu_ipc_ioread8(addr, &data);/* Read lower bits */ | ||
210 | if (ret) | ||
211 | return ret; | ||
212 | |||
213 | /* Adding lower two bits to the higher bits */ | ||
214 | data &= 03; | ||
215 | adc_val += data; | ||
216 | |||
217 | /* Convert ADC value to temperature */ | ||
218 | ret = adc_to_temp(td_info->direct, adc_val, &curr_temp); | ||
219 | if (ret == 0) | ||
220 | *temp = td_info->curr_temp = curr_temp; | ||
221 | return ret; | ||
222 | } | ||
223 | |||
224 | /** | ||
225 | * configure_adc - enables/disables the ADC for conversion | ||
226 | * @val: zero: disables the ADC non-zero:enables the ADC | ||
227 | * | ||
228 | * Enable/Disable the ADC depending on the argument | ||
229 | * | ||
230 | * Can sleep | ||
231 | */ | ||
232 | static int configure_adc(int val) | ||
233 | { | ||
234 | int ret; | ||
235 | uint8_t data; | ||
236 | |||
237 | ret = intel_scu_ipc_ioread8(MSIC_THERM_ADC1CNTL1, &data); | ||
238 | if (ret) | ||
239 | return ret; | ||
240 | |||
241 | if (val) { | ||
242 | /* Enable and start the ADC */ | ||
243 | data |= (MSIC_ADC_ENBL | MSIC_ADC_START); | ||
244 | } else { | ||
245 | /* Just stop the ADC */ | ||
246 | data &= (~MSIC_ADC_START); | ||
247 | } | ||
248 | |||
249 | return intel_scu_ipc_iowrite8(MSIC_THERM_ADC1CNTL1, data); | ||
250 | } | ||
251 | |||
252 | /** | ||
253 | * set_up_therm_channel - enable thermal channel for conversion | ||
254 | * @base_addr: index of free msic ADC channel | ||
255 | * | ||
256 | * Enable all the three channels for conversion | ||
257 | * | ||
258 | * Can sleep | ||
259 | */ | ||
260 | static int set_up_therm_channel(u16 base_addr) | ||
261 | { | ||
262 | int ret; | ||
263 | |||
264 | /* Enable all the sensor channels */ | ||
265 | ret = intel_scu_ipc_iowrite8(base_addr, SKIN_SENSOR0_CODE); | ||
266 | if (ret) | ||
267 | return ret; | ||
268 | |||
269 | ret = intel_scu_ipc_iowrite8(base_addr + 1, SKIN_SENSOR1_CODE); | ||
270 | if (ret) | ||
271 | return ret; | ||
272 | |||
273 | ret = intel_scu_ipc_iowrite8(base_addr + 2, SYS_SENSOR_CODE); | ||
274 | if (ret) | ||
275 | return ret; | ||
276 | |||
277 | /* Since this is the last channel, set the stop bit | ||
278 | to 1 by ORing the DIE_SENSOR_CODE with 0x10 */ | ||
279 | ret = intel_scu_ipc_iowrite8(base_addr + 3, | ||
280 | (MSIC_DIE_SENSOR_CODE | 0x10)); | ||
281 | if (ret) | ||
282 | return ret; | ||
283 | |||
284 | /* Enable ADC and start it */ | ||
285 | return configure_adc(1); | ||
286 | } | ||
287 | |||
288 | /** | ||
289 | * reset_stopbit - sets the stop bit to 0 on the given channel | ||
290 | * @addr: address of the channel | ||
291 | * | ||
292 | * Can sleep | ||
293 | */ | ||
294 | static int reset_stopbit(uint16_t addr) | ||
295 | { | ||
296 | int ret; | ||
297 | uint8_t data; | ||
298 | ret = intel_scu_ipc_ioread8(addr, &data); | ||
299 | if (ret) | ||
300 | return ret; | ||
301 | /* Set the stop bit to zero */ | ||
302 | return intel_scu_ipc_iowrite8(addr, (data & 0xEF)); | ||
303 | } | ||
304 | |||
305 | /** | ||
306 | * find_free_channel - finds an empty channel for conversion | ||
307 | * | ||
308 | * If the ADC is not enabled then start using 0th channel | ||
309 | * itself. Otherwise find an empty channel by looking for a | ||
310 | * channel in which the stopbit is set to 1. returns the index | ||
311 | * of the first free channel if succeeds or an error code. | ||
312 | * | ||
313 | * Context: can sleep | ||
314 | * | ||
315 | * FIXME: Ultimately the channel allocator will move into the intel_scu_ipc | ||
316 | * code. | ||
317 | */ | ||
318 | static int find_free_channel(void) | ||
319 | { | ||
320 | int ret; | ||
321 | int i; | ||
322 | uint8_t data; | ||
323 | |||
324 | /* check whether ADC is enabled */ | ||
325 | ret = intel_scu_ipc_ioread8(MSIC_THERM_ADC1CNTL1, &data); | ||
326 | if (ret) | ||
327 | return ret; | ||
328 | |||
329 | if ((data & MSIC_ADC_ENBL) == 0) | ||
330 | return 0; | ||
331 | |||
332 | /* ADC is already enabled; Looking for an empty channel */ | ||
333 | for (i = 0; i < ADC_CHANLS_MAX; i++) { | ||
334 | ret = intel_scu_ipc_ioread8(ADC_CHNL_START_ADDR + i, &data); | ||
335 | if (ret) | ||
336 | return ret; | ||
337 | |||
338 | if (data & MSIC_STOPBIT_MASK) { | ||
339 | ret = i; | ||
340 | break; | ||
341 | } | ||
342 | } | ||
343 | return (ret > ADC_LOOP_MAX) ? (-EINVAL) : ret; | ||
344 | } | ||
345 | |||
346 | /** | ||
347 | * mid_initialize_adc - initializing the ADC | ||
348 | * @dev: our device structure | ||
349 | * | ||
350 | * Initialize the ADC for reading thermistor values. Can sleep. | ||
351 | */ | ||
352 | static int mid_initialize_adc(struct device *dev) | ||
353 | { | ||
354 | u8 data; | ||
355 | u16 base_addr; | ||
356 | int ret; | ||
357 | |||
358 | /* | ||
359 | * Ensure that adctherm is disabled before we | ||
360 | * initialize the ADC | ||
361 | */ | ||
362 | ret = intel_scu_ipc_ioread8(MSIC_THERM_ADC1CNTL3, &data); | ||
363 | if (ret) | ||
364 | return ret; | ||
365 | |||
366 | if (data & MSIC_ADCTHERM_MASK) | ||
367 | dev_warn(dev, "ADCTHERM already set"); | ||
368 | |||
369 | /* Index of the first channel in which the stop bit is set */ | ||
370 | channel_index = find_free_channel(); | ||
371 | if (channel_index < 0) { | ||
372 | dev_err(dev, "No free ADC channels"); | ||
373 | return channel_index; | ||
374 | } | ||
375 | |||
376 | base_addr = ADC_CHNL_START_ADDR + channel_index; | ||
377 | |||
378 | if (!(channel_index == 0 || channel_index == ADC_LOOP_MAX)) { | ||
379 | /* Reset stop bit for channels other than 0 and 12 */ | ||
380 | ret = reset_stopbit(base_addr); | ||
381 | if (ret) | ||
382 | return ret; | ||
383 | |||
384 | /* Index of the first free channel */ | ||
385 | base_addr++; | ||
386 | channel_index++; | ||
387 | } | ||
388 | |||
389 | ret = set_up_therm_channel(base_addr); | ||
390 | if (ret) { | ||
391 | dev_err(dev, "unable to enable ADC"); | ||
392 | return ret; | ||
393 | } | ||
394 | dev_dbg(dev, "ADC initialization successful"); | ||
395 | return ret; | ||
396 | } | ||
397 | |||
398 | /** | ||
399 | * initialize_sensor - sets default temp and timer ranges | ||
400 | * @index: index of the sensor | ||
401 | * | ||
402 | * Context: can sleep | ||
403 | */ | ||
404 | static struct thermal_device_info *initialize_sensor(int index) | ||
405 | { | ||
406 | struct thermal_device_info *td_info = | ||
407 | kzalloc(sizeof(struct thermal_device_info), GFP_KERNEL); | ||
408 | |||
409 | if (!td_info) | ||
410 | return NULL; | ||
411 | |||
412 | /* Set the base addr of the channel for this sensor */ | ||
413 | td_info->chnl_addr = ADC_DATA_START_ADDR + 2 * (channel_index + index); | ||
414 | /* Sensor 3 is direct conversion */ | ||
415 | if (index == 3) | ||
416 | td_info->direct = 1; | ||
417 | return td_info; | ||
418 | } | ||
419 | |||
420 | /** | ||
421 | * mid_thermal_resume - resume routine | ||
422 | * @pdev: platform device structure | ||
423 | * | ||
424 | * mid thermal resume: re-initializes the adc. Can sleep. | ||
425 | */ | ||
426 | static int mid_thermal_resume(struct platform_device *pdev) | ||
427 | { | ||
428 | return mid_initialize_adc(&pdev->dev); | ||
429 | } | ||
430 | |||
431 | /** | ||
432 | * mid_thermal_suspend - suspend routine | ||
433 | * @pdev: platform device structure | ||
434 | * | ||
435 | * mid thermal suspend implements the suspend functionality | ||
436 | * by stopping the ADC. Can sleep. | ||
437 | */ | ||
438 | static int mid_thermal_suspend(struct platform_device *pdev, pm_message_t mesg) | ||
439 | { | ||
440 | /* | ||
441 | * This just stops the ADC and does not disable it. | ||
442 | * temporary workaround until we have a generic ADC driver. | ||
443 | * If 0 is passed, it disables the ADC. | ||
444 | */ | ||
445 | return configure_adc(0); | ||
446 | } | ||
447 | |||
448 | /** | ||
449 | * read_curr_temp - reads the current temperature and stores in temp | ||
450 | * @temp: holds the current temperature value after reading | ||
451 | * | ||
452 | * Can sleep | ||
453 | */ | ||
454 | static int read_curr_temp(struct thermal_zone_device *tzd, unsigned long *temp) | ||
455 | { | ||
456 | WARN_ON(tzd == NULL); | ||
457 | return mid_read_temp(tzd, temp); | ||
458 | } | ||
459 | |||
460 | /* Can't be const */ | ||
461 | static struct thermal_zone_device_ops tzd_ops = { | ||
462 | .get_temp = read_curr_temp, | ||
463 | }; | ||
464 | |||
465 | |||
466 | /** | ||
467 | * mid_thermal_probe - mfld thermal initialize | ||
468 | * @pdev: platform device structure | ||
469 | * | ||
470 | * mid thermal probe initializes the hardware and registers | ||
471 | * all the sensors with the generic thermal framework. Can sleep. | ||
472 | */ | ||
473 | static int mid_thermal_probe(struct platform_device *pdev) | ||
474 | { | ||
475 | static char *name[MSIC_THERMAL_SENSORS] = { | ||
476 | "skin0", "skin1", "sys", "msicdie" | ||
477 | }; | ||
478 | |||
479 | int ret; | ||
480 | int i; | ||
481 | struct platform_info *pinfo; | ||
482 | |||
483 | pinfo = kzalloc(sizeof(struct platform_info), GFP_KERNEL); | ||
484 | if (!pinfo) | ||
485 | return -ENOMEM; | ||
486 | |||
487 | /* Initializing the hardware */ | ||
488 | ret = mid_initialize_adc(&pdev->dev); | ||
489 | if (ret) { | ||
490 | dev_err(&pdev->dev, "ADC init failed"); | ||
491 | kfree(pinfo); | ||
492 | return ret; | ||
493 | } | ||
494 | |||
495 | /* Register each sensor with the generic thermal framework*/ | ||
496 | for (i = 0; i < MSIC_THERMAL_SENSORS; i++) { | ||
497 | pinfo->tzd[i] = thermal_zone_device_register(name[i], | ||
498 | 0, initialize_sensor(i), | ||
499 | &tzd_ops, 0, 0, 0, 0); | ||
500 | if (IS_ERR(pinfo->tzd[i])) | ||
501 | goto reg_fail; | ||
502 | } | ||
503 | |||
504 | pinfo->pdev = pdev; | ||
505 | platform_set_drvdata(pdev, pinfo); | ||
506 | return 0; | ||
507 | |||
508 | reg_fail: | ||
509 | ret = PTR_ERR(pinfo->tzd[i]); | ||
510 | while (--i >= 0) | ||
511 | thermal_zone_device_unregister(pinfo->tzd[i]); | ||
512 | configure_adc(0); | ||
513 | kfree(pinfo); | ||
514 | return ret; | ||
515 | } | ||
516 | |||
517 | /** | ||
518 | * mid_thermal_remove - mfld thermal finalize | ||
519 | * @dev: platform device structure | ||
520 | * | ||
521 | * MLFD thermal remove unregisters all the sensors from the generic | ||
522 | * thermal framework. Can sleep. | ||
523 | */ | ||
524 | static int mid_thermal_remove(struct platform_device *pdev) | ||
525 | { | ||
526 | int i; | ||
527 | struct platform_info *pinfo = platform_get_drvdata(pdev); | ||
528 | |||
529 | for (i = 0; i < MSIC_THERMAL_SENSORS; i++) | ||
530 | thermal_zone_device_unregister(pinfo->tzd[i]); | ||
531 | |||
532 | platform_set_drvdata(pdev, NULL); | ||
533 | |||
534 | /* Stop the ADC */ | ||
535 | return configure_adc(0); | ||
536 | } | ||
537 | |||
538 | /********************************************************************* | ||
539 | * Driver initialisation and finalization | ||
540 | *********************************************************************/ | ||
541 | |||
542 | #define DRIVER_NAME "msic_sensor" | ||
543 | |||
544 | static const struct platform_device_id therm_id_table[] = { | ||
545 | { DRIVER_NAME, 1 }, | ||
546 | { } | ||
547 | }; | ||
548 | |||
549 | static struct platform_driver mid_thermal_driver = { | ||
550 | .driver = { | ||
551 | .name = DRIVER_NAME, | ||
552 | .owner = THIS_MODULE, | ||
553 | }, | ||
554 | .probe = mid_thermal_probe, | ||
555 | .suspend = mid_thermal_suspend, | ||
556 | .resume = mid_thermal_resume, | ||
557 | .remove = __devexit_p(mid_thermal_remove), | ||
558 | .id_table = therm_id_table, | ||
559 | }; | ||
560 | |||
561 | static int __init mid_thermal_module_init(void) | ||
562 | { | ||
563 | return platform_driver_register(&mid_thermal_driver); | ||
564 | } | ||
565 | |||
566 | static void __exit mid_thermal_module_exit(void) | ||
567 | { | ||
568 | platform_driver_unregister(&mid_thermal_driver); | ||
569 | } | ||
570 | |||
571 | module_init(mid_thermal_module_init); | ||
572 | module_exit(mid_thermal_module_exit); | ||
573 | |||
574 | MODULE_AUTHOR("Durgadoss R <durgadoss.r@intel.com>"); | ||
575 | MODULE_DESCRIPTION("Intel Medfield Platform Thermal Driver"); | ||
576 | MODULE_LICENSE("GPL"); | ||
diff --git a/drivers/platform/x86/intel_pmic_gpio.c b/drivers/platform/x86/intel_pmic_gpio.c index 61433d492862..d653104b59cb 100644 --- a/drivers/platform/x86/intel_pmic_gpio.c +++ b/drivers/platform/x86/intel_pmic_gpio.c | |||
@@ -257,9 +257,11 @@ static int __devinit platform_pmic_gpio_probe(struct platform_device *pdev) | |||
257 | } | 257 | } |
258 | 258 | ||
259 | for (i = 0; i < 8; i++) { | 259 | for (i = 0; i < 8; i++) { |
260 | set_irq_chip_and_handler_name(i + pg->irq_base, &pmic_irqchip, | 260 | irq_set_chip_and_handler_name(i + pg->irq_base, |
261 | handle_simple_irq, "demux"); | 261 | &pmic_irqchip, |
262 | set_irq_chip_data(i + pg->irq_base, pg); | 262 | handle_simple_irq, |
263 | "demux"); | ||
264 | irq_set_chip_data(i + pg->irq_base, pg); | ||
263 | } | 265 | } |
264 | return 0; | 266 | return 0; |
265 | err: | 267 | err: |
diff --git a/drivers/platform/x86/intel_rar_register.c b/drivers/platform/x86/intel_rar_register.c index 2b11a33325e6..bde47e9080cd 100644 --- a/drivers/platform/x86/intel_rar_register.c +++ b/drivers/platform/x86/intel_rar_register.c | |||
@@ -485,7 +485,7 @@ EXPORT_SYMBOL(rar_lock); | |||
485 | * | 485 | * |
486 | * The register_rar function is to used by other device drivers | 486 | * The register_rar function is to used by other device drivers |
487 | * to ensure that this driver is ready. As we cannot be sure of | 487 | * to ensure that this driver is ready. As we cannot be sure of |
488 | * the compile/execute order of drivers in ther kernel, it is | 488 | * the compile/execute order of drivers in the kernel, it is |
489 | * best to give this driver a callback function to call when | 489 | * best to give this driver a callback function to call when |
490 | * it is ready to give out addresses. The callback function | 490 | * it is ready to give out addresses. The callback function |
491 | * would have those steps that continue the initialization of | 491 | * would have those steps that continue the initialization of |
diff --git a/drivers/platform/x86/intel_scu_ipc.c b/drivers/platform/x86/intel_scu_ipc.c index a91d510a798b..940accbe28d3 100644 --- a/drivers/platform/x86/intel_scu_ipc.c +++ b/drivers/platform/x86/intel_scu_ipc.c | |||
@@ -9,7 +9,7 @@ | |||
9 | * as published by the Free Software Foundation; version 2 | 9 | * as published by the Free Software Foundation; version 2 |
10 | * of the License. | 10 | * of the License. |
11 | * | 11 | * |
12 | * SCU runing in ARC processor communicates with other entity running in IA | 12 | * SCU running in ARC processor communicates with other entity running in IA |
13 | * core through IPC mechanism which in turn messaging between IA core ad SCU. | 13 | * core through IPC mechanism which in turn messaging between IA core ad SCU. |
14 | * SCU has two IPC mechanism IPC-1 and IPC-2. IPC-1 is used between IA32 and | 14 | * SCU has two IPC mechanism IPC-1 and IPC-2. IPC-1 is used between IA32 and |
15 | * SCU where IPC-2 is used between P-Unit and SCU. This driver delas with | 15 | * SCU where IPC-2 is used between P-Unit and SCU. This driver delas with |
diff --git a/drivers/platform/x86/msi-laptop.c b/drivers/platform/x86/msi-laptop.c index 142d38579314..23fb2afda00b 100644 --- a/drivers/platform/x86/msi-laptop.c +++ b/drivers/platform/x86/msi-laptop.c | |||
@@ -51,6 +51,8 @@ | |||
51 | * laptop as MSI S270. YMMV. | 51 | * laptop as MSI S270. YMMV. |
52 | */ | 52 | */ |
53 | 53 | ||
54 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | ||
55 | |||
54 | #include <linux/module.h> | 56 | #include <linux/module.h> |
55 | #include <linux/kernel.h> | 57 | #include <linux/kernel.h> |
56 | #include <linux/init.h> | 58 | #include <linux/init.h> |
@@ -60,6 +62,8 @@ | |||
60 | #include <linux/platform_device.h> | 62 | #include <linux/platform_device.h> |
61 | #include <linux/rfkill.h> | 63 | #include <linux/rfkill.h> |
62 | #include <linux/i8042.h> | 64 | #include <linux/i8042.h> |
65 | #include <linux/input.h> | ||
66 | #include <linux/input/sparse-keymap.h> | ||
63 | 67 | ||
64 | #define MSI_DRIVER_VERSION "0.5" | 68 | #define MSI_DRIVER_VERSION "0.5" |
65 | 69 | ||
@@ -78,6 +82,9 @@ | |||
78 | #define MSI_STANDARD_EC_SCM_LOAD_ADDRESS 0x2d | 82 | #define MSI_STANDARD_EC_SCM_LOAD_ADDRESS 0x2d |
79 | #define MSI_STANDARD_EC_SCM_LOAD_MASK (1 << 0) | 83 | #define MSI_STANDARD_EC_SCM_LOAD_MASK (1 << 0) |
80 | 84 | ||
85 | #define MSI_STANDARD_EC_TOUCHPAD_ADDRESS 0xe4 | ||
86 | #define MSI_STANDARD_EC_TOUCHPAD_MASK (1 << 4) | ||
87 | |||
81 | static int msi_laptop_resume(struct platform_device *device); | 88 | static int msi_laptop_resume(struct platform_device *device); |
82 | 89 | ||
83 | #define MSI_STANDARD_EC_DEVICES_EXISTS_ADDRESS 0x2f | 90 | #define MSI_STANDARD_EC_DEVICES_EXISTS_ADDRESS 0x2f |
@@ -90,6 +97,14 @@ static int auto_brightness; | |||
90 | module_param(auto_brightness, int, 0); | 97 | module_param(auto_brightness, int, 0); |
91 | MODULE_PARM_DESC(auto_brightness, "Enable automatic brightness control (0: disabled; 1: enabled; 2: don't touch)"); | 98 | MODULE_PARM_DESC(auto_brightness, "Enable automatic brightness control (0: disabled; 1: enabled; 2: don't touch)"); |
92 | 99 | ||
100 | static const struct key_entry msi_laptop_keymap[] = { | ||
101 | {KE_KEY, KEY_TOUCHPAD_ON, {KEY_TOUCHPAD_ON} }, /* Touch Pad On */ | ||
102 | {KE_KEY, KEY_TOUCHPAD_OFF, {KEY_TOUCHPAD_OFF} },/* Touch Pad On */ | ||
103 | {KE_END, 0} | ||
104 | }; | ||
105 | |||
106 | static struct input_dev *msi_laptop_input_dev; | ||
107 | |||
93 | static bool old_ec_model; | 108 | static bool old_ec_model; |
94 | static int wlan_s, bluetooth_s, threeg_s; | 109 | static int wlan_s, bluetooth_s, threeg_s; |
95 | static int threeg_exists; | 110 | static int threeg_exists; |
@@ -432,8 +447,7 @@ static struct platform_device *msipf_device; | |||
432 | 447 | ||
433 | static int dmi_check_cb(const struct dmi_system_id *id) | 448 | static int dmi_check_cb(const struct dmi_system_id *id) |
434 | { | 449 | { |
435 | printk(KERN_INFO "msi-laptop: Identified laptop model '%s'.\n", | 450 | pr_info("Identified laptop model '%s'.\n", id->ident); |
436 | id->ident); | ||
437 | return 1; | 451 | return 1; |
438 | } | 452 | } |
439 | 453 | ||
@@ -605,6 +619,21 @@ static void msi_update_rfkill(struct work_struct *ignored) | |||
605 | } | 619 | } |
606 | static DECLARE_DELAYED_WORK(msi_rfkill_work, msi_update_rfkill); | 620 | static DECLARE_DELAYED_WORK(msi_rfkill_work, msi_update_rfkill); |
607 | 621 | ||
622 | static void msi_send_touchpad_key(struct work_struct *ignored) | ||
623 | { | ||
624 | u8 rdata; | ||
625 | int result; | ||
626 | |||
627 | result = ec_read(MSI_STANDARD_EC_TOUCHPAD_ADDRESS, &rdata); | ||
628 | if (result < 0) | ||
629 | return; | ||
630 | |||
631 | sparse_keymap_report_event(msi_laptop_input_dev, | ||
632 | (rdata & MSI_STANDARD_EC_TOUCHPAD_MASK) ? | ||
633 | KEY_TOUCHPAD_ON : KEY_TOUCHPAD_OFF, 1, true); | ||
634 | } | ||
635 | static DECLARE_DELAYED_WORK(msi_touchpad_work, msi_send_touchpad_key); | ||
636 | |||
608 | static bool msi_laptop_i8042_filter(unsigned char data, unsigned char str, | 637 | static bool msi_laptop_i8042_filter(unsigned char data, unsigned char str, |
609 | struct serio *port) | 638 | struct serio *port) |
610 | { | 639 | { |
@@ -613,12 +642,17 @@ static bool msi_laptop_i8042_filter(unsigned char data, unsigned char str, | |||
613 | if (str & 0x20) | 642 | if (str & 0x20) |
614 | return false; | 643 | return false; |
615 | 644 | ||
616 | /* 0x54 wwan, 0x62 bluetooth, 0x76 wlan*/ | 645 | /* 0x54 wwan, 0x62 bluetooth, 0x76 wlan, 0xE4 touchpad toggle*/ |
617 | if (unlikely(data == 0xe0)) { | 646 | if (unlikely(data == 0xe0)) { |
618 | extended = true; | 647 | extended = true; |
619 | return false; | 648 | return false; |
620 | } else if (unlikely(extended)) { | 649 | } else if (unlikely(extended)) { |
650 | extended = false; | ||
621 | switch (data) { | 651 | switch (data) { |
652 | case 0xE4: | ||
653 | schedule_delayed_work(&msi_touchpad_work, | ||
654 | round_jiffies_relative(0.5 * HZ)); | ||
655 | break; | ||
622 | case 0x54: | 656 | case 0x54: |
623 | case 0x62: | 657 | case 0x62: |
624 | case 0x76: | 658 | case 0x76: |
@@ -626,7 +660,6 @@ static bool msi_laptop_i8042_filter(unsigned char data, unsigned char str, | |||
626 | round_jiffies_relative(0.5 * HZ)); | 660 | round_jiffies_relative(0.5 * HZ)); |
627 | break; | 661 | break; |
628 | } | 662 | } |
629 | extended = false; | ||
630 | } | 663 | } |
631 | 664 | ||
632 | return false; | 665 | return false; |
@@ -731,6 +764,42 @@ static int msi_laptop_resume(struct platform_device *device) | |||
731 | return 0; | 764 | return 0; |
732 | } | 765 | } |
733 | 766 | ||
767 | static int __init msi_laptop_input_setup(void) | ||
768 | { | ||
769 | int err; | ||
770 | |||
771 | msi_laptop_input_dev = input_allocate_device(); | ||
772 | if (!msi_laptop_input_dev) | ||
773 | return -ENOMEM; | ||
774 | |||
775 | msi_laptop_input_dev->name = "MSI Laptop hotkeys"; | ||
776 | msi_laptop_input_dev->phys = "msi-laptop/input0"; | ||
777 | msi_laptop_input_dev->id.bustype = BUS_HOST; | ||
778 | |||
779 | err = sparse_keymap_setup(msi_laptop_input_dev, | ||
780 | msi_laptop_keymap, NULL); | ||
781 | if (err) | ||
782 | goto err_free_dev; | ||
783 | |||
784 | err = input_register_device(msi_laptop_input_dev); | ||
785 | if (err) | ||
786 | goto err_free_keymap; | ||
787 | |||
788 | return 0; | ||
789 | |||
790 | err_free_keymap: | ||
791 | sparse_keymap_free(msi_laptop_input_dev); | ||
792 | err_free_dev: | ||
793 | input_free_device(msi_laptop_input_dev); | ||
794 | return err; | ||
795 | } | ||
796 | |||
797 | static void msi_laptop_input_destroy(void) | ||
798 | { | ||
799 | sparse_keymap_free(msi_laptop_input_dev); | ||
800 | input_unregister_device(msi_laptop_input_dev); | ||
801 | } | ||
802 | |||
734 | static int load_scm_model_init(struct platform_device *sdev) | 803 | static int load_scm_model_init(struct platform_device *sdev) |
735 | { | 804 | { |
736 | u8 data; | 805 | u8 data; |
@@ -759,16 +828,23 @@ static int load_scm_model_init(struct platform_device *sdev) | |||
759 | if (result < 0) | 828 | if (result < 0) |
760 | goto fail_rfkill; | 829 | goto fail_rfkill; |
761 | 830 | ||
831 | /* setup input device */ | ||
832 | result = msi_laptop_input_setup(); | ||
833 | if (result) | ||
834 | goto fail_input; | ||
835 | |||
762 | result = i8042_install_filter(msi_laptop_i8042_filter); | 836 | result = i8042_install_filter(msi_laptop_i8042_filter); |
763 | if (result) { | 837 | if (result) { |
764 | printk(KERN_ERR | 838 | pr_err("Unable to install key filter\n"); |
765 | "msi-laptop: Unable to install key filter\n"); | ||
766 | goto fail_filter; | 839 | goto fail_filter; |
767 | } | 840 | } |
768 | 841 | ||
769 | return 0; | 842 | return 0; |
770 | 843 | ||
771 | fail_filter: | 844 | fail_filter: |
845 | msi_laptop_input_destroy(); | ||
846 | |||
847 | fail_input: | ||
772 | rfkill_cleanup(); | 848 | rfkill_cleanup(); |
773 | 849 | ||
774 | fail_rfkill: | 850 | fail_rfkill: |
@@ -799,7 +875,7 @@ static int __init msi_init(void) | |||
799 | /* Register backlight stuff */ | 875 | /* Register backlight stuff */ |
800 | 876 | ||
801 | if (acpi_video_backlight_support()) { | 877 | if (acpi_video_backlight_support()) { |
802 | printk(KERN_INFO "MSI: Brightness ignored, must be controlled " | 878 | pr_info("Brightness ignored, must be controlled " |
803 | "by ACPI video driver\n"); | 879 | "by ACPI video driver\n"); |
804 | } else { | 880 | } else { |
805 | struct backlight_properties props; | 881 | struct backlight_properties props; |
@@ -854,7 +930,7 @@ static int __init msi_init(void) | |||
854 | if (auto_brightness != 2) | 930 | if (auto_brightness != 2) |
855 | set_auto_brightness(auto_brightness); | 931 | set_auto_brightness(auto_brightness); |
856 | 932 | ||
857 | printk(KERN_INFO "msi-laptop: driver "MSI_DRIVER_VERSION" successfully loaded.\n"); | 933 | pr_info("driver "MSI_DRIVER_VERSION" successfully loaded.\n"); |
858 | 934 | ||
859 | return 0; | 935 | return 0; |
860 | 936 | ||
@@ -886,6 +962,7 @@ static void __exit msi_cleanup(void) | |||
886 | { | 962 | { |
887 | if (load_scm_model) { | 963 | if (load_scm_model) { |
888 | i8042_remove_filter(msi_laptop_i8042_filter); | 964 | i8042_remove_filter(msi_laptop_i8042_filter); |
965 | msi_laptop_input_destroy(); | ||
889 | cancel_delayed_work_sync(&msi_rfkill_work); | 966 | cancel_delayed_work_sync(&msi_rfkill_work); |
890 | rfkill_cleanup(); | 967 | rfkill_cleanup(); |
891 | } | 968 | } |
@@ -901,7 +978,7 @@ static void __exit msi_cleanup(void) | |||
901 | if (auto_brightness != 2) | 978 | if (auto_brightness != 2) |
902 | set_auto_brightness(1); | 979 | set_auto_brightness(1); |
903 | 980 | ||
904 | printk(KERN_INFO "msi-laptop: driver unloaded.\n"); | 981 | pr_info("driver unloaded.\n"); |
905 | } | 982 | } |
906 | 983 | ||
907 | module_init(msi_init); | 984 | module_init(msi_init); |
diff --git a/drivers/platform/x86/samsung-laptop.c b/drivers/platform/x86/samsung-laptop.c new file mode 100644 index 000000000000..de434c6dc2d6 --- /dev/null +++ b/drivers/platform/x86/samsung-laptop.c | |||
@@ -0,0 +1,832 @@ | |||
1 | /* | ||
2 | * Samsung Laptop driver | ||
3 | * | ||
4 | * Copyright (C) 2009,2011 Greg Kroah-Hartman (gregkh@suse.de) | ||
5 | * Copyright (C) 2009,2011 Novell Inc. | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or modify it | ||
8 | * under the terms of the GNU General Public License version 2 as published by | ||
9 | * the Free Software Foundation. | ||
10 | * | ||
11 | */ | ||
12 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | ||
13 | |||
14 | #include <linux/kernel.h> | ||
15 | #include <linux/init.h> | ||
16 | #include <linux/module.h> | ||
17 | #include <linux/delay.h> | ||
18 | #include <linux/pci.h> | ||
19 | #include <linux/backlight.h> | ||
20 | #include <linux/fb.h> | ||
21 | #include <linux/dmi.h> | ||
22 | #include <linux/platform_device.h> | ||
23 | #include <linux/rfkill.h> | ||
24 | |||
25 | /* | ||
26 | * This driver is needed because a number of Samsung laptops do not hook | ||
27 | * their control settings through ACPI. So we have to poke around in the | ||
28 | * BIOS to do things like brightness values, and "special" key controls. | ||
29 | */ | ||
30 | |||
31 | /* | ||
32 | * We have 0 - 8 as valid brightness levels. The specs say that level 0 should | ||
33 | * be reserved by the BIOS (which really doesn't make much sense), we tell | ||
34 | * userspace that the value is 0 - 7 and then just tell the hardware 1 - 8 | ||
35 | */ | ||
36 | #define MAX_BRIGHT 0x07 | ||
37 | |||
38 | |||
39 | #define SABI_IFACE_MAIN 0x00 | ||
40 | #define SABI_IFACE_SUB 0x02 | ||
41 | #define SABI_IFACE_COMPLETE 0x04 | ||
42 | #define SABI_IFACE_DATA 0x05 | ||
43 | |||
44 | /* Structure to get data back to the calling function */ | ||
45 | struct sabi_retval { | ||
46 | u8 retval[20]; | ||
47 | }; | ||
48 | |||
49 | struct sabi_header_offsets { | ||
50 | u8 port; | ||
51 | u8 re_mem; | ||
52 | u8 iface_func; | ||
53 | u8 en_mem; | ||
54 | u8 data_offset; | ||
55 | u8 data_segment; | ||
56 | }; | ||
57 | |||
58 | struct sabi_commands { | ||
59 | /* | ||
60 | * Brightness is 0 - 8, as described above. | ||
61 | * Value 0 is for the BIOS to use | ||
62 | */ | ||
63 | u8 get_brightness; | ||
64 | u8 set_brightness; | ||
65 | |||
66 | /* | ||
67 | * first byte: | ||
68 | * 0x00 - wireless is off | ||
69 | * 0x01 - wireless is on | ||
70 | * second byte: | ||
71 | * 0x02 - 3G is off | ||
72 | * 0x03 - 3G is on | ||
73 | * TODO, verify 3G is correct, that doesn't seem right... | ||
74 | */ | ||
75 | u8 get_wireless_button; | ||
76 | u8 set_wireless_button; | ||
77 | |||
78 | /* 0 is off, 1 is on */ | ||
79 | u8 get_backlight; | ||
80 | u8 set_backlight; | ||
81 | |||
82 | /* | ||
83 | * 0x80 or 0x00 - no action | ||
84 | * 0x81 - recovery key pressed | ||
85 | */ | ||
86 | u8 get_recovery_mode; | ||
87 | u8 set_recovery_mode; | ||
88 | |||
89 | /* | ||
90 | * on seclinux: 0 is low, 1 is high, | ||
91 | * on swsmi: 0 is normal, 1 is silent, 2 is turbo | ||
92 | */ | ||
93 | u8 get_performance_level; | ||
94 | u8 set_performance_level; | ||
95 | |||
96 | /* | ||
97 | * Tell the BIOS that Linux is running on this machine. | ||
98 | * 81 is on, 80 is off | ||
99 | */ | ||
100 | u8 set_linux; | ||
101 | }; | ||
102 | |||
103 | struct sabi_performance_level { | ||
104 | const char *name; | ||
105 | u8 value; | ||
106 | }; | ||
107 | |||
108 | struct sabi_config { | ||
109 | const char *test_string; | ||
110 | u16 main_function; | ||
111 | const struct sabi_header_offsets header_offsets; | ||
112 | const struct sabi_commands commands; | ||
113 | const struct sabi_performance_level performance_levels[4]; | ||
114 | u8 min_brightness; | ||
115 | u8 max_brightness; | ||
116 | }; | ||
117 | |||
118 | static const struct sabi_config sabi_configs[] = { | ||
119 | { | ||
120 | .test_string = "SECLINUX", | ||
121 | |||
122 | .main_function = 0x4c49, | ||
123 | |||
124 | .header_offsets = { | ||
125 | .port = 0x00, | ||
126 | .re_mem = 0x02, | ||
127 | .iface_func = 0x03, | ||
128 | .en_mem = 0x04, | ||
129 | .data_offset = 0x05, | ||
130 | .data_segment = 0x07, | ||
131 | }, | ||
132 | |||
133 | .commands = { | ||
134 | .get_brightness = 0x00, | ||
135 | .set_brightness = 0x01, | ||
136 | |||
137 | .get_wireless_button = 0x02, | ||
138 | .set_wireless_button = 0x03, | ||
139 | |||
140 | .get_backlight = 0x04, | ||
141 | .set_backlight = 0x05, | ||
142 | |||
143 | .get_recovery_mode = 0x06, | ||
144 | .set_recovery_mode = 0x07, | ||
145 | |||
146 | .get_performance_level = 0x08, | ||
147 | .set_performance_level = 0x09, | ||
148 | |||
149 | .set_linux = 0x0a, | ||
150 | }, | ||
151 | |||
152 | .performance_levels = { | ||
153 | { | ||
154 | .name = "silent", | ||
155 | .value = 0, | ||
156 | }, | ||
157 | { | ||
158 | .name = "normal", | ||
159 | .value = 1, | ||
160 | }, | ||
161 | { }, | ||
162 | }, | ||
163 | .min_brightness = 1, | ||
164 | .max_brightness = 8, | ||
165 | }, | ||
166 | { | ||
167 | .test_string = "SwSmi@", | ||
168 | |||
169 | .main_function = 0x5843, | ||
170 | |||
171 | .header_offsets = { | ||
172 | .port = 0x00, | ||
173 | .re_mem = 0x04, | ||
174 | .iface_func = 0x02, | ||
175 | .en_mem = 0x03, | ||
176 | .data_offset = 0x05, | ||
177 | .data_segment = 0x07, | ||
178 | }, | ||
179 | |||
180 | .commands = { | ||
181 | .get_brightness = 0x10, | ||
182 | .set_brightness = 0x11, | ||
183 | |||
184 | .get_wireless_button = 0x12, | ||
185 | .set_wireless_button = 0x13, | ||
186 | |||
187 | .get_backlight = 0x2d, | ||
188 | .set_backlight = 0x2e, | ||
189 | |||
190 | .get_recovery_mode = 0xff, | ||
191 | .set_recovery_mode = 0xff, | ||
192 | |||
193 | .get_performance_level = 0x31, | ||
194 | .set_performance_level = 0x32, | ||
195 | |||
196 | .set_linux = 0xff, | ||
197 | }, | ||
198 | |||
199 | .performance_levels = { | ||
200 | { | ||
201 | .name = "normal", | ||
202 | .value = 0, | ||
203 | }, | ||
204 | { | ||
205 | .name = "silent", | ||
206 | .value = 1, | ||
207 | }, | ||
208 | { | ||
209 | .name = "overclock", | ||
210 | .value = 2, | ||
211 | }, | ||
212 | { }, | ||
213 | }, | ||
214 | .min_brightness = 0, | ||
215 | .max_brightness = 8, | ||
216 | }, | ||
217 | { }, | ||
218 | }; | ||
219 | |||
220 | static const struct sabi_config *sabi_config; | ||
221 | |||
222 | static void __iomem *sabi; | ||
223 | static void __iomem *sabi_iface; | ||
224 | static void __iomem *f0000_segment; | ||
225 | static struct backlight_device *backlight_device; | ||
226 | static struct mutex sabi_mutex; | ||
227 | static struct platform_device *sdev; | ||
228 | static struct rfkill *rfk; | ||
229 | |||
230 | static int force; | ||
231 | module_param(force, bool, 0); | ||
232 | MODULE_PARM_DESC(force, | ||
233 | "Disable the DMI check and forces the driver to be loaded"); | ||
234 | |||
235 | static int debug; | ||
236 | module_param(debug, bool, S_IRUGO | S_IWUSR); | ||
237 | MODULE_PARM_DESC(debug, "Debug enabled or not"); | ||
238 | |||
239 | static int sabi_get_command(u8 command, struct sabi_retval *sretval) | ||
240 | { | ||
241 | int retval = 0; | ||
242 | u16 port = readw(sabi + sabi_config->header_offsets.port); | ||
243 | u8 complete, iface_data; | ||
244 | |||
245 | mutex_lock(&sabi_mutex); | ||
246 | |||
247 | /* enable memory to be able to write to it */ | ||
248 | outb(readb(sabi + sabi_config->header_offsets.en_mem), port); | ||
249 | |||
250 | /* write out the command */ | ||
251 | writew(sabi_config->main_function, sabi_iface + SABI_IFACE_MAIN); | ||
252 | writew(command, sabi_iface + SABI_IFACE_SUB); | ||
253 | writeb(0, sabi_iface + SABI_IFACE_COMPLETE); | ||
254 | outb(readb(sabi + sabi_config->header_offsets.iface_func), port); | ||
255 | |||
256 | /* write protect memory to make it safe */ | ||
257 | outb(readb(sabi + sabi_config->header_offsets.re_mem), port); | ||
258 | |||
259 | /* see if the command actually succeeded */ | ||
260 | complete = readb(sabi_iface + SABI_IFACE_COMPLETE); | ||
261 | iface_data = readb(sabi_iface + SABI_IFACE_DATA); | ||
262 | if (complete != 0xaa || iface_data == 0xff) { | ||
263 | pr_warn("SABI get command 0x%02x failed with completion flag 0x%02x and data 0x%02x\n", | ||
264 | command, complete, iface_data); | ||
265 | retval = -EINVAL; | ||
266 | goto exit; | ||
267 | } | ||
268 | /* | ||
269 | * Save off the data into a structure so the caller use it. | ||
270 | * Right now we only want the first 4 bytes, | ||
271 | * There are commands that need more, but not for the ones we | ||
272 | * currently care about. | ||
273 | */ | ||
274 | sretval->retval[0] = readb(sabi_iface + SABI_IFACE_DATA); | ||
275 | sretval->retval[1] = readb(sabi_iface + SABI_IFACE_DATA + 1); | ||
276 | sretval->retval[2] = readb(sabi_iface + SABI_IFACE_DATA + 2); | ||
277 | sretval->retval[3] = readb(sabi_iface + SABI_IFACE_DATA + 3); | ||
278 | |||
279 | exit: | ||
280 | mutex_unlock(&sabi_mutex); | ||
281 | return retval; | ||
282 | |||
283 | } | ||
284 | |||
285 | static int sabi_set_command(u8 command, u8 data) | ||
286 | { | ||
287 | int retval = 0; | ||
288 | u16 port = readw(sabi + sabi_config->header_offsets.port); | ||
289 | u8 complete, iface_data; | ||
290 | |||
291 | mutex_lock(&sabi_mutex); | ||
292 | |||
293 | /* enable memory to be able to write to it */ | ||
294 | outb(readb(sabi + sabi_config->header_offsets.en_mem), port); | ||
295 | |||
296 | /* write out the command */ | ||
297 | writew(sabi_config->main_function, sabi_iface + SABI_IFACE_MAIN); | ||
298 | writew(command, sabi_iface + SABI_IFACE_SUB); | ||
299 | writeb(0, sabi_iface + SABI_IFACE_COMPLETE); | ||
300 | writeb(data, sabi_iface + SABI_IFACE_DATA); | ||
301 | outb(readb(sabi + sabi_config->header_offsets.iface_func), port); | ||
302 | |||
303 | /* write protect memory to make it safe */ | ||
304 | outb(readb(sabi + sabi_config->header_offsets.re_mem), port); | ||
305 | |||
306 | /* see if the command actually succeeded */ | ||
307 | complete = readb(sabi_iface + SABI_IFACE_COMPLETE); | ||
308 | iface_data = readb(sabi_iface + SABI_IFACE_DATA); | ||
309 | if (complete != 0xaa || iface_data == 0xff) { | ||
310 | pr_warn("SABI set command 0x%02x failed with completion flag 0x%02x and data 0x%02x\n", | ||
311 | command, complete, iface_data); | ||
312 | retval = -EINVAL; | ||
313 | } | ||
314 | |||
315 | mutex_unlock(&sabi_mutex); | ||
316 | return retval; | ||
317 | } | ||
318 | |||
319 | static void test_backlight(void) | ||
320 | { | ||
321 | struct sabi_retval sretval; | ||
322 | |||
323 | sabi_get_command(sabi_config->commands.get_backlight, &sretval); | ||
324 | printk(KERN_DEBUG "backlight = 0x%02x\n", sretval.retval[0]); | ||
325 | |||
326 | sabi_set_command(sabi_config->commands.set_backlight, 0); | ||
327 | printk(KERN_DEBUG "backlight should be off\n"); | ||
328 | |||
329 | sabi_get_command(sabi_config->commands.get_backlight, &sretval); | ||
330 | printk(KERN_DEBUG "backlight = 0x%02x\n", sretval.retval[0]); | ||
331 | |||
332 | msleep(1000); | ||
333 | |||
334 | sabi_set_command(sabi_config->commands.set_backlight, 1); | ||
335 | printk(KERN_DEBUG "backlight should be on\n"); | ||
336 | |||
337 | sabi_get_command(sabi_config->commands.get_backlight, &sretval); | ||
338 | printk(KERN_DEBUG "backlight = 0x%02x\n", sretval.retval[0]); | ||
339 | } | ||
340 | |||
341 | static void test_wireless(void) | ||
342 | { | ||
343 | struct sabi_retval sretval; | ||
344 | |||
345 | sabi_get_command(sabi_config->commands.get_wireless_button, &sretval); | ||
346 | printk(KERN_DEBUG "wireless led = 0x%02x\n", sretval.retval[0]); | ||
347 | |||
348 | sabi_set_command(sabi_config->commands.set_wireless_button, 0); | ||
349 | printk(KERN_DEBUG "wireless led should be off\n"); | ||
350 | |||
351 | sabi_get_command(sabi_config->commands.get_wireless_button, &sretval); | ||
352 | printk(KERN_DEBUG "wireless led = 0x%02x\n", sretval.retval[0]); | ||
353 | |||
354 | msleep(1000); | ||
355 | |||
356 | sabi_set_command(sabi_config->commands.set_wireless_button, 1); | ||
357 | printk(KERN_DEBUG "wireless led should be on\n"); | ||
358 | |||
359 | sabi_get_command(sabi_config->commands.get_wireless_button, &sretval); | ||
360 | printk(KERN_DEBUG "wireless led = 0x%02x\n", sretval.retval[0]); | ||
361 | } | ||
362 | |||
363 | static u8 read_brightness(void) | ||
364 | { | ||
365 | struct sabi_retval sretval; | ||
366 | int user_brightness = 0; | ||
367 | int retval; | ||
368 | |||
369 | retval = sabi_get_command(sabi_config->commands.get_brightness, | ||
370 | &sretval); | ||
371 | if (!retval) { | ||
372 | user_brightness = sretval.retval[0]; | ||
373 | if (user_brightness != 0) | ||
374 | user_brightness -= sabi_config->min_brightness; | ||
375 | } | ||
376 | return user_brightness; | ||
377 | } | ||
378 | |||
379 | static void set_brightness(u8 user_brightness) | ||
380 | { | ||
381 | u8 user_level = user_brightness - sabi_config->min_brightness; | ||
382 | |||
383 | sabi_set_command(sabi_config->commands.set_brightness, user_level); | ||
384 | } | ||
385 | |||
386 | static int get_brightness(struct backlight_device *bd) | ||
387 | { | ||
388 | return (int)read_brightness(); | ||
389 | } | ||
390 | |||
391 | static int update_status(struct backlight_device *bd) | ||
392 | { | ||
393 | set_brightness(bd->props.brightness); | ||
394 | |||
395 | if (bd->props.power == FB_BLANK_UNBLANK) | ||
396 | sabi_set_command(sabi_config->commands.set_backlight, 1); | ||
397 | else | ||
398 | sabi_set_command(sabi_config->commands.set_backlight, 0); | ||
399 | return 0; | ||
400 | } | ||
401 | |||
402 | static const struct backlight_ops backlight_ops = { | ||
403 | .get_brightness = get_brightness, | ||
404 | .update_status = update_status, | ||
405 | }; | ||
406 | |||
407 | static int rfkill_set(void *data, bool blocked) | ||
408 | { | ||
409 | /* Do something with blocked...*/ | ||
410 | /* | ||
411 | * blocked == false is on | ||
412 | * blocked == true is off | ||
413 | */ | ||
414 | if (blocked) | ||
415 | sabi_set_command(sabi_config->commands.set_wireless_button, 0); | ||
416 | else | ||
417 | sabi_set_command(sabi_config->commands.set_wireless_button, 1); | ||
418 | |||
419 | return 0; | ||
420 | } | ||
421 | |||
422 | static struct rfkill_ops rfkill_ops = { | ||
423 | .set_block = rfkill_set, | ||
424 | }; | ||
425 | |||
426 | static int init_wireless(struct platform_device *sdev) | ||
427 | { | ||
428 | int retval; | ||
429 | |||
430 | rfk = rfkill_alloc("samsung-wifi", &sdev->dev, RFKILL_TYPE_WLAN, | ||
431 | &rfkill_ops, NULL); | ||
432 | if (!rfk) | ||
433 | return -ENOMEM; | ||
434 | |||
435 | retval = rfkill_register(rfk); | ||
436 | if (retval) { | ||
437 | rfkill_destroy(rfk); | ||
438 | return -ENODEV; | ||
439 | } | ||
440 | |||
441 | return 0; | ||
442 | } | ||
443 | |||
444 | static void destroy_wireless(void) | ||
445 | { | ||
446 | rfkill_unregister(rfk); | ||
447 | rfkill_destroy(rfk); | ||
448 | } | ||
449 | |||
450 | static ssize_t get_performance_level(struct device *dev, | ||
451 | struct device_attribute *attr, char *buf) | ||
452 | { | ||
453 | struct sabi_retval sretval; | ||
454 | int retval; | ||
455 | int i; | ||
456 | |||
457 | /* Read the state */ | ||
458 | retval = sabi_get_command(sabi_config->commands.get_performance_level, | ||
459 | &sretval); | ||
460 | if (retval) | ||
461 | return retval; | ||
462 | |||
463 | /* The logic is backwards, yeah, lots of fun... */ | ||
464 | for (i = 0; sabi_config->performance_levels[i].name; ++i) { | ||
465 | if (sretval.retval[0] == sabi_config->performance_levels[i].value) | ||
466 | return sprintf(buf, "%s\n", sabi_config->performance_levels[i].name); | ||
467 | } | ||
468 | return sprintf(buf, "%s\n", "unknown"); | ||
469 | } | ||
470 | |||
471 | static ssize_t set_performance_level(struct device *dev, | ||
472 | struct device_attribute *attr, const char *buf, | ||
473 | size_t count) | ||
474 | { | ||
475 | if (count >= 1) { | ||
476 | int i; | ||
477 | for (i = 0; sabi_config->performance_levels[i].name; ++i) { | ||
478 | const struct sabi_performance_level *level = | ||
479 | &sabi_config->performance_levels[i]; | ||
480 | if (!strncasecmp(level->name, buf, strlen(level->name))) { | ||
481 | sabi_set_command(sabi_config->commands.set_performance_level, | ||
482 | level->value); | ||
483 | break; | ||
484 | } | ||
485 | } | ||
486 | if (!sabi_config->performance_levels[i].name) | ||
487 | return -EINVAL; | ||
488 | } | ||
489 | return count; | ||
490 | } | ||
491 | static DEVICE_ATTR(performance_level, S_IWUSR | S_IRUGO, | ||
492 | get_performance_level, set_performance_level); | ||
493 | |||
494 | |||
495 | static int __init dmi_check_cb(const struct dmi_system_id *id) | ||
496 | { | ||
497 | pr_info("found laptop model '%s'\n", | ||
498 | id->ident); | ||
499 | return 1; | ||
500 | } | ||
501 | |||
502 | static struct dmi_system_id __initdata samsung_dmi_table[] = { | ||
503 | { | ||
504 | .ident = "N128", | ||
505 | .matches = { | ||
506 | DMI_MATCH(DMI_SYS_VENDOR, | ||
507 | "SAMSUNG ELECTRONICS CO., LTD."), | ||
508 | DMI_MATCH(DMI_PRODUCT_NAME, "N128"), | ||
509 | DMI_MATCH(DMI_BOARD_NAME, "N128"), | ||
510 | }, | ||
511 | .callback = dmi_check_cb, | ||
512 | }, | ||
513 | { | ||
514 | .ident = "N130", | ||
515 | .matches = { | ||
516 | DMI_MATCH(DMI_SYS_VENDOR, | ||
517 | "SAMSUNG ELECTRONICS CO., LTD."), | ||
518 | DMI_MATCH(DMI_PRODUCT_NAME, "N130"), | ||
519 | DMI_MATCH(DMI_BOARD_NAME, "N130"), | ||
520 | }, | ||
521 | .callback = dmi_check_cb, | ||
522 | }, | ||
523 | { | ||
524 | .ident = "X125", | ||
525 | .matches = { | ||
526 | DMI_MATCH(DMI_SYS_VENDOR, | ||
527 | "SAMSUNG ELECTRONICS CO., LTD."), | ||
528 | DMI_MATCH(DMI_PRODUCT_NAME, "X125"), | ||
529 | DMI_MATCH(DMI_BOARD_NAME, "X125"), | ||
530 | }, | ||
531 | .callback = dmi_check_cb, | ||
532 | }, | ||
533 | { | ||
534 | .ident = "X120/X170", | ||
535 | .matches = { | ||
536 | DMI_MATCH(DMI_SYS_VENDOR, | ||
537 | "SAMSUNG ELECTRONICS CO., LTD."), | ||
538 | DMI_MATCH(DMI_PRODUCT_NAME, "X120/X170"), | ||
539 | DMI_MATCH(DMI_BOARD_NAME, "X120/X170"), | ||
540 | }, | ||
541 | .callback = dmi_check_cb, | ||
542 | }, | ||
543 | { | ||
544 | .ident = "NC10", | ||
545 | .matches = { | ||
546 | DMI_MATCH(DMI_SYS_VENDOR, | ||
547 | "SAMSUNG ELECTRONICS CO., LTD."), | ||
548 | DMI_MATCH(DMI_PRODUCT_NAME, "NC10"), | ||
549 | DMI_MATCH(DMI_BOARD_NAME, "NC10"), | ||
550 | }, | ||
551 | .callback = dmi_check_cb, | ||
552 | }, | ||
553 | { | ||
554 | .ident = "NP-Q45", | ||
555 | .matches = { | ||
556 | DMI_MATCH(DMI_SYS_VENDOR, | ||
557 | "SAMSUNG ELECTRONICS CO., LTD."), | ||
558 | DMI_MATCH(DMI_PRODUCT_NAME, "SQ45S70S"), | ||
559 | DMI_MATCH(DMI_BOARD_NAME, "SQ45S70S"), | ||
560 | }, | ||
561 | .callback = dmi_check_cb, | ||
562 | }, | ||
563 | { | ||
564 | .ident = "X360", | ||
565 | .matches = { | ||
566 | DMI_MATCH(DMI_SYS_VENDOR, | ||
567 | "SAMSUNG ELECTRONICS CO., LTD."), | ||
568 | DMI_MATCH(DMI_PRODUCT_NAME, "X360"), | ||
569 | DMI_MATCH(DMI_BOARD_NAME, "X360"), | ||
570 | }, | ||
571 | .callback = dmi_check_cb, | ||
572 | }, | ||
573 | { | ||
574 | .ident = "R518", | ||
575 | .matches = { | ||
576 | DMI_MATCH(DMI_SYS_VENDOR, | ||
577 | "SAMSUNG ELECTRONICS CO., LTD."), | ||
578 | DMI_MATCH(DMI_PRODUCT_NAME, "R518"), | ||
579 | DMI_MATCH(DMI_BOARD_NAME, "R518"), | ||
580 | }, | ||
581 | .callback = dmi_check_cb, | ||
582 | }, | ||
583 | { | ||
584 | .ident = "R519/R719", | ||
585 | .matches = { | ||
586 | DMI_MATCH(DMI_SYS_VENDOR, | ||
587 | "SAMSUNG ELECTRONICS CO., LTD."), | ||
588 | DMI_MATCH(DMI_PRODUCT_NAME, "R519/R719"), | ||
589 | DMI_MATCH(DMI_BOARD_NAME, "R519/R719"), | ||
590 | }, | ||
591 | .callback = dmi_check_cb, | ||
592 | }, | ||
593 | { | ||
594 | .ident = "N150/N210/N220", | ||
595 | .matches = { | ||
596 | DMI_MATCH(DMI_SYS_VENDOR, | ||
597 | "SAMSUNG ELECTRONICS CO., LTD."), | ||
598 | DMI_MATCH(DMI_PRODUCT_NAME, "N150/N210/N220"), | ||
599 | DMI_MATCH(DMI_BOARD_NAME, "N150/N210/N220"), | ||
600 | }, | ||
601 | .callback = dmi_check_cb, | ||
602 | }, | ||
603 | { | ||
604 | .ident = "N150P/N210P/N220P", | ||
605 | .matches = { | ||
606 | DMI_MATCH(DMI_SYS_VENDOR, | ||
607 | "SAMSUNG ELECTRONICS CO., LTD."), | ||
608 | DMI_MATCH(DMI_PRODUCT_NAME, "N150P/N210P/N220P"), | ||
609 | DMI_MATCH(DMI_BOARD_NAME, "N150P/N210P/N220P"), | ||
610 | }, | ||
611 | .callback = dmi_check_cb, | ||
612 | }, | ||
613 | { | ||
614 | .ident = "R530/R730", | ||
615 | .matches = { | ||
616 | DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."), | ||
617 | DMI_MATCH(DMI_PRODUCT_NAME, "R530/R730"), | ||
618 | DMI_MATCH(DMI_BOARD_NAME, "R530/R730"), | ||
619 | }, | ||
620 | .callback = dmi_check_cb, | ||
621 | }, | ||
622 | { | ||
623 | .ident = "NF110/NF210/NF310", | ||
624 | .matches = { | ||
625 | DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."), | ||
626 | DMI_MATCH(DMI_PRODUCT_NAME, "NF110/NF210/NF310"), | ||
627 | DMI_MATCH(DMI_BOARD_NAME, "NF110/NF210/NF310"), | ||
628 | }, | ||
629 | .callback = dmi_check_cb, | ||
630 | }, | ||
631 | { | ||
632 | .ident = "N145P/N250P/N260P", | ||
633 | .matches = { | ||
634 | DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."), | ||
635 | DMI_MATCH(DMI_PRODUCT_NAME, "N145P/N250P/N260P"), | ||
636 | DMI_MATCH(DMI_BOARD_NAME, "N145P/N250P/N260P"), | ||
637 | }, | ||
638 | .callback = dmi_check_cb, | ||
639 | }, | ||
640 | { | ||
641 | .ident = "R70/R71", | ||
642 | .matches = { | ||
643 | DMI_MATCH(DMI_SYS_VENDOR, | ||
644 | "SAMSUNG ELECTRONICS CO., LTD."), | ||
645 | DMI_MATCH(DMI_PRODUCT_NAME, "R70/R71"), | ||
646 | DMI_MATCH(DMI_BOARD_NAME, "R70/R71"), | ||
647 | }, | ||
648 | .callback = dmi_check_cb, | ||
649 | }, | ||
650 | { | ||
651 | .ident = "P460", | ||
652 | .matches = { | ||
653 | DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."), | ||
654 | DMI_MATCH(DMI_PRODUCT_NAME, "P460"), | ||
655 | DMI_MATCH(DMI_BOARD_NAME, "P460"), | ||
656 | }, | ||
657 | .callback = dmi_check_cb, | ||
658 | }, | ||
659 | { }, | ||
660 | }; | ||
661 | MODULE_DEVICE_TABLE(dmi, samsung_dmi_table); | ||
662 | |||
663 | static int find_signature(void __iomem *memcheck, const char *testStr) | ||
664 | { | ||
665 | int i = 0; | ||
666 | int loca; | ||
667 | |||
668 | for (loca = 0; loca < 0xffff; loca++) { | ||
669 | char temp = readb(memcheck + loca); | ||
670 | |||
671 | if (temp == testStr[i]) { | ||
672 | if (i == strlen(testStr)-1) | ||
673 | break; | ||
674 | ++i; | ||
675 | } else { | ||
676 | i = 0; | ||
677 | } | ||
678 | } | ||
679 | return loca; | ||
680 | } | ||
681 | |||
682 | static int __init samsung_init(void) | ||
683 | { | ||
684 | struct backlight_properties props; | ||
685 | struct sabi_retval sretval; | ||
686 | unsigned int ifaceP; | ||
687 | int i; | ||
688 | int loca; | ||
689 | int retval; | ||
690 | |||
691 | mutex_init(&sabi_mutex); | ||
692 | |||
693 | if (!force && !dmi_check_system(samsung_dmi_table)) | ||
694 | return -ENODEV; | ||
695 | |||
696 | f0000_segment = ioremap_nocache(0xf0000, 0xffff); | ||
697 | if (!f0000_segment) { | ||
698 | pr_err("Can't map the segment at 0xf0000\n"); | ||
699 | return -EINVAL; | ||
700 | } | ||
701 | |||
702 | /* Try to find one of the signatures in memory to find the header */ | ||
703 | for (i = 0; sabi_configs[i].test_string != 0; ++i) { | ||
704 | sabi_config = &sabi_configs[i]; | ||
705 | loca = find_signature(f0000_segment, sabi_config->test_string); | ||
706 | if (loca != 0xffff) | ||
707 | break; | ||
708 | } | ||
709 | |||
710 | if (loca == 0xffff) { | ||
711 | pr_err("This computer does not support SABI\n"); | ||
712 | goto error_no_signature; | ||
713 | } | ||
714 | |||
715 | /* point to the SMI port Number */ | ||
716 | loca += 1; | ||
717 | sabi = (f0000_segment + loca); | ||
718 | |||
719 | if (debug) { | ||
720 | printk(KERN_DEBUG "This computer supports SABI==%x\n", | ||
721 | loca + 0xf0000 - 6); | ||
722 | printk(KERN_DEBUG "SABI header:\n"); | ||
723 | printk(KERN_DEBUG " SMI Port Number = 0x%04x\n", | ||
724 | readw(sabi + sabi_config->header_offsets.port)); | ||
725 | printk(KERN_DEBUG " SMI Interface Function = 0x%02x\n", | ||
726 | readb(sabi + sabi_config->header_offsets.iface_func)); | ||
727 | printk(KERN_DEBUG " SMI enable memory buffer = 0x%02x\n", | ||
728 | readb(sabi + sabi_config->header_offsets.en_mem)); | ||
729 | printk(KERN_DEBUG " SMI restore memory buffer = 0x%02x\n", | ||
730 | readb(sabi + sabi_config->header_offsets.re_mem)); | ||
731 | printk(KERN_DEBUG " SABI data offset = 0x%04x\n", | ||
732 | readw(sabi + sabi_config->header_offsets.data_offset)); | ||
733 | printk(KERN_DEBUG " SABI data segment = 0x%04x\n", | ||
734 | readw(sabi + sabi_config->header_offsets.data_segment)); | ||
735 | } | ||
736 | |||
737 | /* Get a pointer to the SABI Interface */ | ||
738 | ifaceP = (readw(sabi + sabi_config->header_offsets.data_segment) & 0x0ffff) << 4; | ||
739 | ifaceP += readw(sabi + sabi_config->header_offsets.data_offset) & 0x0ffff; | ||
740 | sabi_iface = ioremap_nocache(ifaceP, 16); | ||
741 | if (!sabi_iface) { | ||
742 | pr_err("Can't remap %x\n", ifaceP); | ||
743 | goto exit; | ||
744 | } | ||
745 | if (debug) { | ||
746 | printk(KERN_DEBUG "ifaceP = 0x%08x\n", ifaceP); | ||
747 | printk(KERN_DEBUG "sabi_iface = %p\n", sabi_iface); | ||
748 | |||
749 | test_backlight(); | ||
750 | test_wireless(); | ||
751 | |||
752 | retval = sabi_get_command(sabi_config->commands.get_brightness, | ||
753 | &sretval); | ||
754 | printk(KERN_DEBUG "brightness = 0x%02x\n", sretval.retval[0]); | ||
755 | } | ||
756 | |||
757 | /* Turn on "Linux" mode in the BIOS */ | ||
758 | if (sabi_config->commands.set_linux != 0xff) { | ||
759 | retval = sabi_set_command(sabi_config->commands.set_linux, | ||
760 | 0x81); | ||
761 | if (retval) { | ||
762 | pr_warn("Linux mode was not set!\n"); | ||
763 | goto error_no_platform; | ||
764 | } | ||
765 | } | ||
766 | |||
767 | /* knock up a platform device to hang stuff off of */ | ||
768 | sdev = platform_device_register_simple("samsung", -1, NULL, 0); | ||
769 | if (IS_ERR(sdev)) | ||
770 | goto error_no_platform; | ||
771 | |||
772 | /* create a backlight device to talk to this one */ | ||
773 | memset(&props, 0, sizeof(struct backlight_properties)); | ||
774 | props.max_brightness = sabi_config->max_brightness; | ||
775 | backlight_device = backlight_device_register("samsung", &sdev->dev, | ||
776 | NULL, &backlight_ops, | ||
777 | &props); | ||
778 | if (IS_ERR(backlight_device)) | ||
779 | goto error_no_backlight; | ||
780 | |||
781 | backlight_device->props.brightness = read_brightness(); | ||
782 | backlight_device->props.power = FB_BLANK_UNBLANK; | ||
783 | backlight_update_status(backlight_device); | ||
784 | |||
785 | retval = init_wireless(sdev); | ||
786 | if (retval) | ||
787 | goto error_no_rfk; | ||
788 | |||
789 | retval = device_create_file(&sdev->dev, &dev_attr_performance_level); | ||
790 | if (retval) | ||
791 | goto error_file_create; | ||
792 | |||
793 | exit: | ||
794 | return 0; | ||
795 | |||
796 | error_file_create: | ||
797 | destroy_wireless(); | ||
798 | |||
799 | error_no_rfk: | ||
800 | backlight_device_unregister(backlight_device); | ||
801 | |||
802 | error_no_backlight: | ||
803 | platform_device_unregister(sdev); | ||
804 | |||
805 | error_no_platform: | ||
806 | iounmap(sabi_iface); | ||
807 | |||
808 | error_no_signature: | ||
809 | iounmap(f0000_segment); | ||
810 | return -EINVAL; | ||
811 | } | ||
812 | |||
813 | static void __exit samsung_exit(void) | ||
814 | { | ||
815 | /* Turn off "Linux" mode in the BIOS */ | ||
816 | if (sabi_config->commands.set_linux != 0xff) | ||
817 | sabi_set_command(sabi_config->commands.set_linux, 0x80); | ||
818 | |||
819 | device_remove_file(&sdev->dev, &dev_attr_performance_level); | ||
820 | backlight_device_unregister(backlight_device); | ||
821 | destroy_wireless(); | ||
822 | iounmap(sabi_iface); | ||
823 | iounmap(f0000_segment); | ||
824 | platform_device_unregister(sdev); | ||
825 | } | ||
826 | |||
827 | module_init(samsung_init); | ||
828 | module_exit(samsung_exit); | ||
829 | |||
830 | MODULE_AUTHOR("Greg Kroah-Hartman <gregkh@suse.de>"); | ||
831 | MODULE_DESCRIPTION("Samsung Backlight driver"); | ||
832 | MODULE_LICENSE("GPL"); | ||
diff --git a/drivers/platform/x86/sony-laptop.c b/drivers/platform/x86/sony-laptop.c index 13d8d63bcca9..e642f5f29504 100644 --- a/drivers/platform/x86/sony-laptop.c +++ b/drivers/platform/x86/sony-laptop.c | |||
@@ -71,8 +71,9 @@ | |||
71 | #endif | 71 | #endif |
72 | 72 | ||
73 | #define DRV_PFX "sony-laptop: " | 73 | #define DRV_PFX "sony-laptop: " |
74 | #define dprintk(msg...) do { \ | 74 | #define dprintk(msg...) do { \ |
75 | if (debug) printk(KERN_WARNING DRV_PFX msg); \ | 75 | if (debug) \ |
76 | pr_warn(DRV_PFX msg); \ | ||
76 | } while (0) | 77 | } while (0) |
77 | 78 | ||
78 | #define SONY_LAPTOP_DRIVER_VERSION "0.6" | 79 | #define SONY_LAPTOP_DRIVER_VERSION "0.6" |
@@ -124,6 +125,19 @@ MODULE_PARM_DESC(minor, | |||
124 | "default is -1 (automatic)"); | 125 | "default is -1 (automatic)"); |
125 | #endif | 126 | #endif |
126 | 127 | ||
128 | static int kbd_backlight; /* = 1 */ | ||
129 | module_param(kbd_backlight, int, 0444); | ||
130 | MODULE_PARM_DESC(kbd_backlight, | ||
131 | "set this to 0 to disable keyboard backlight, " | ||
132 | "1 to enable it (default: 0)"); | ||
133 | |||
134 | static int kbd_backlight_timeout; /* = 0 */ | ||
135 | module_param(kbd_backlight_timeout, int, 0444); | ||
136 | MODULE_PARM_DESC(kbd_backlight_timeout, | ||
137 | "set this to 0 to set the default 10 seconds timeout, " | ||
138 | "1 for 30 seconds, 2 for 60 seconds and 3 to disable timeout " | ||
139 | "(default: 0)"); | ||
140 | |||
127 | enum sony_nc_rfkill { | 141 | enum sony_nc_rfkill { |
128 | SONY_WIFI, | 142 | SONY_WIFI, |
129 | SONY_BLUETOOTH, | 143 | SONY_BLUETOOTH, |
@@ -402,7 +416,7 @@ static int sony_laptop_setup_input(struct acpi_device *acpi_device) | |||
402 | error = kfifo_alloc(&sony_laptop_input.fifo, | 416 | error = kfifo_alloc(&sony_laptop_input.fifo, |
403 | SONY_LAPTOP_BUF_SIZE, GFP_KERNEL); | 417 | SONY_LAPTOP_BUF_SIZE, GFP_KERNEL); |
404 | if (error) { | 418 | if (error) { |
405 | printk(KERN_ERR DRV_PFX "kfifo_alloc failed\n"); | 419 | pr_err(DRV_PFX "kfifo_alloc failed\n"); |
406 | goto err_dec_users; | 420 | goto err_dec_users; |
407 | } | 421 | } |
408 | 422 | ||
@@ -591,7 +605,7 @@ struct sony_nc_value { | |||
591 | int value; /* current setting */ | 605 | int value; /* current setting */ |
592 | int valid; /* Has ever been set */ | 606 | int valid; /* Has ever been set */ |
593 | int debug; /* active only in debug mode ? */ | 607 | int debug; /* active only in debug mode ? */ |
594 | struct device_attribute devattr; /* sysfs atribute */ | 608 | struct device_attribute devattr; /* sysfs attribute */ |
595 | }; | 609 | }; |
596 | 610 | ||
597 | #define SNC_HANDLE_NAMES(_name, _values...) \ | 611 | #define SNC_HANDLE_NAMES(_name, _values...) \ |
@@ -686,7 +700,7 @@ static int acpi_callgetfunc(acpi_handle handle, char *name, int *result) | |||
686 | return 0; | 700 | return 0; |
687 | } | 701 | } |
688 | 702 | ||
689 | printk(KERN_WARNING DRV_PFX "acpi_callreadfunc failed\n"); | 703 | pr_warn(DRV_PFX "acpi_callreadfunc failed\n"); |
690 | 704 | ||
691 | return -1; | 705 | return -1; |
692 | } | 706 | } |
@@ -712,7 +726,7 @@ static int acpi_callsetfunc(acpi_handle handle, char *name, int value, | |||
712 | if (status == AE_OK) { | 726 | if (status == AE_OK) { |
713 | if (result != NULL) { | 727 | if (result != NULL) { |
714 | if (out_obj.type != ACPI_TYPE_INTEGER) { | 728 | if (out_obj.type != ACPI_TYPE_INTEGER) { |
715 | printk(KERN_WARNING DRV_PFX "acpi_evaluate_object bad " | 729 | pr_warn(DRV_PFX "acpi_evaluate_object bad " |
716 | "return type\n"); | 730 | "return type\n"); |
717 | return -1; | 731 | return -1; |
718 | } | 732 | } |
@@ -721,34 +735,103 @@ static int acpi_callsetfunc(acpi_handle handle, char *name, int value, | |||
721 | return 0; | 735 | return 0; |
722 | } | 736 | } |
723 | 737 | ||
724 | printk(KERN_WARNING DRV_PFX "acpi_evaluate_object failed\n"); | 738 | pr_warn(DRV_PFX "acpi_evaluate_object failed\n"); |
725 | 739 | ||
726 | return -1; | 740 | return -1; |
727 | } | 741 | } |
728 | 742 | ||
729 | static int sony_find_snc_handle(int handle) | 743 | struct sony_nc_handles { |
744 | u16 cap[0x10]; | ||
745 | struct device_attribute devattr; | ||
746 | }; | ||
747 | |||
748 | static struct sony_nc_handles *handles; | ||
749 | |||
750 | static ssize_t sony_nc_handles_show(struct device *dev, | ||
751 | struct device_attribute *attr, char *buffer) | ||
752 | { | ||
753 | ssize_t len = 0; | ||
754 | int i; | ||
755 | |||
756 | for (i = 0; i < ARRAY_SIZE(handles->cap); i++) { | ||
757 | len += snprintf(buffer + len, PAGE_SIZE - len, "0x%.4x ", | ||
758 | handles->cap[i]); | ||
759 | } | ||
760 | len += snprintf(buffer + len, PAGE_SIZE - len, "\n"); | ||
761 | |||
762 | return len; | ||
763 | } | ||
764 | |||
765 | static int sony_nc_handles_setup(struct platform_device *pd) | ||
730 | { | 766 | { |
731 | int i; | 767 | int i; |
732 | int result; | 768 | int result; |
733 | 769 | ||
734 | for (i = 0x20; i < 0x30; i++) { | 770 | handles = kzalloc(sizeof(*handles), GFP_KERNEL); |
735 | acpi_callsetfunc(sony_nc_acpi_handle, "SN00", i, &result); | 771 | if (!handles) |
736 | if (result == handle) | 772 | return -ENOMEM; |
737 | return i-0x20; | 773 | |
774 | sysfs_attr_init(&handles->devattr.attr); | ||
775 | handles->devattr.attr.name = "handles"; | ||
776 | handles->devattr.attr.mode = S_IRUGO; | ||
777 | handles->devattr.show = sony_nc_handles_show; | ||
778 | |||
779 | for (i = 0; i < ARRAY_SIZE(handles->cap); i++) { | ||
780 | if (!acpi_callsetfunc(sony_nc_acpi_handle, | ||
781 | "SN00", i + 0x20, &result)) { | ||
782 | dprintk("caching handle 0x%.4x (offset: 0x%.2x)\n", | ||
783 | result, i); | ||
784 | handles->cap[i] = result; | ||
785 | } | ||
786 | } | ||
787 | |||
788 | /* allow reading capabilities via sysfs */ | ||
789 | if (device_create_file(&pd->dev, &handles->devattr)) { | ||
790 | kfree(handles); | ||
791 | handles = NULL; | ||
792 | return -1; | ||
793 | } | ||
794 | |||
795 | return 0; | ||
796 | } | ||
797 | |||
798 | static int sony_nc_handles_cleanup(struct platform_device *pd) | ||
799 | { | ||
800 | if (handles) { | ||
801 | device_remove_file(&pd->dev, &handles->devattr); | ||
802 | kfree(handles); | ||
803 | handles = NULL; | ||
738 | } | 804 | } |
805 | return 0; | ||
806 | } | ||
739 | 807 | ||
808 | static int sony_find_snc_handle(int handle) | ||
809 | { | ||
810 | int i; | ||
811 | for (i = 0; i < 0x10; i++) { | ||
812 | if (handles->cap[i] == handle) { | ||
813 | dprintk("found handle 0x%.4x (offset: 0x%.2x)\n", | ||
814 | handle, i); | ||
815 | return i; | ||
816 | } | ||
817 | } | ||
818 | dprintk("handle 0x%.4x not found\n", handle); | ||
740 | return -1; | 819 | return -1; |
741 | } | 820 | } |
742 | 821 | ||
743 | static int sony_call_snc_handle(int handle, int argument, int *result) | 822 | static int sony_call_snc_handle(int handle, int argument, int *result) |
744 | { | 823 | { |
824 | int ret = 0; | ||
745 | int offset = sony_find_snc_handle(handle); | 825 | int offset = sony_find_snc_handle(handle); |
746 | 826 | ||
747 | if (offset < 0) | 827 | if (offset < 0) |
748 | return -1; | 828 | return -1; |
749 | 829 | ||
750 | return acpi_callsetfunc(sony_nc_acpi_handle, "SN07", offset | argument, | 830 | ret = acpi_callsetfunc(sony_nc_acpi_handle, "SN07", offset | argument, |
751 | result); | 831 | result); |
832 | dprintk("called SN07 with 0x%.4x (result: 0x%.4x)\n", offset | argument, | ||
833 | *result); | ||
834 | return ret; | ||
752 | } | 835 | } |
753 | 836 | ||
754 | /* | 837 | /* |
@@ -857,11 +940,39 @@ static int sony_backlight_get_brightness(struct backlight_device *bd) | |||
857 | return value - 1; | 940 | return value - 1; |
858 | } | 941 | } |
859 | 942 | ||
860 | static struct backlight_device *sony_backlight_device; | 943 | static int sony_nc_get_brightness_ng(struct backlight_device *bd) |
944 | { | ||
945 | int result; | ||
946 | int *handle = (int *)bl_get_data(bd); | ||
947 | |||
948 | sony_call_snc_handle(*handle, 0x0200, &result); | ||
949 | |||
950 | return result & 0xff; | ||
951 | } | ||
952 | |||
953 | static int sony_nc_update_status_ng(struct backlight_device *bd) | ||
954 | { | ||
955 | int value, result; | ||
956 | int *handle = (int *)bl_get_data(bd); | ||
957 | |||
958 | value = bd->props.brightness; | ||
959 | sony_call_snc_handle(*handle, 0x0100 | (value << 16), &result); | ||
960 | |||
961 | return sony_nc_get_brightness_ng(bd); | ||
962 | } | ||
963 | |||
861 | static const struct backlight_ops sony_backlight_ops = { | 964 | static const struct backlight_ops sony_backlight_ops = { |
965 | .options = BL_CORE_SUSPENDRESUME, | ||
862 | .update_status = sony_backlight_update_status, | 966 | .update_status = sony_backlight_update_status, |
863 | .get_brightness = sony_backlight_get_brightness, | 967 | .get_brightness = sony_backlight_get_brightness, |
864 | }; | 968 | }; |
969 | static const struct backlight_ops sony_backlight_ng_ops = { | ||
970 | .options = BL_CORE_SUSPENDRESUME, | ||
971 | .update_status = sony_nc_update_status_ng, | ||
972 | .get_brightness = sony_nc_get_brightness_ng, | ||
973 | }; | ||
974 | static int backlight_ng_handle; | ||
975 | static struct backlight_device *sony_backlight_device; | ||
865 | 976 | ||
866 | /* | 977 | /* |
867 | * New SNC-only Vaios event mapping to driver known keys | 978 | * New SNC-only Vaios event mapping to driver known keys |
@@ -972,7 +1083,7 @@ static void sony_nc_notify(struct acpi_device *device, u32 event) | |||
972 | } | 1083 | } |
973 | 1084 | ||
974 | if (!key_event->data) | 1085 | if (!key_event->data) |
975 | printk(KERN_INFO DRV_PFX | 1086 | pr_info(DRV_PFX |
976 | "Unknown event: 0x%x 0x%x\n", | 1087 | "Unknown event: 0x%x 0x%x\n", |
977 | key_handle, | 1088 | key_handle, |
978 | ev); | 1089 | ev); |
@@ -996,7 +1107,7 @@ static acpi_status sony_walk_callback(acpi_handle handle, u32 level, | |||
996 | struct acpi_device_info *info; | 1107 | struct acpi_device_info *info; |
997 | 1108 | ||
998 | if (ACPI_SUCCESS(acpi_get_object_info(handle, &info))) { | 1109 | if (ACPI_SUCCESS(acpi_get_object_info(handle, &info))) { |
999 | printk(KERN_WARNING DRV_PFX "method: name: %4.4s, args %X\n", | 1110 | pr_warn(DRV_PFX "method: name: %4.4s, args %X\n", |
1000 | (char *)&info->name, info->param_count); | 1111 | (char *)&info->name, info->param_count); |
1001 | 1112 | ||
1002 | kfree(info); | 1113 | kfree(info); |
@@ -1037,7 +1148,7 @@ static int sony_nc_resume(struct acpi_device *device) | |||
1037 | ret = acpi_callsetfunc(sony_nc_acpi_handle, *item->acpiset, | 1148 | ret = acpi_callsetfunc(sony_nc_acpi_handle, *item->acpiset, |
1038 | item->value, NULL); | 1149 | item->value, NULL); |
1039 | if (ret < 0) { | 1150 | if (ret < 0) { |
1040 | printk("%s: %d\n", __func__, ret); | 1151 | pr_err(DRV_PFX "%s: %d\n", __func__, ret); |
1041 | break; | 1152 | break; |
1042 | } | 1153 | } |
1043 | } | 1154 | } |
@@ -1054,11 +1165,6 @@ static int sony_nc_resume(struct acpi_device *device) | |||
1054 | sony_nc_function_setup(device); | 1165 | sony_nc_function_setup(device); |
1055 | } | 1166 | } |
1056 | 1167 | ||
1057 | /* set the last requested brightness level */ | ||
1058 | if (sony_backlight_device && | ||
1059 | sony_backlight_update_status(sony_backlight_device) < 0) | ||
1060 | printk(KERN_WARNING DRV_PFX "unable to restore brightness level\n"); | ||
1061 | |||
1062 | /* re-read rfkill state */ | 1168 | /* re-read rfkill state */ |
1063 | sony_nc_rfkill_update(); | 1169 | sony_nc_rfkill_update(); |
1064 | 1170 | ||
@@ -1206,12 +1312,12 @@ static void sony_nc_rfkill_setup(struct acpi_device *device) | |||
1206 | 1312 | ||
1207 | device_enum = (union acpi_object *) buffer.pointer; | 1313 | device_enum = (union acpi_object *) buffer.pointer; |
1208 | if (!device_enum) { | 1314 | if (!device_enum) { |
1209 | pr_err("Invalid SN06 return object\n"); | 1315 | pr_err(DRV_PFX "No SN06 return object."); |
1210 | goto out_no_enum; | 1316 | goto out_no_enum; |
1211 | } | 1317 | } |
1212 | if (device_enum->type != ACPI_TYPE_BUFFER) { | 1318 | if (device_enum->type != ACPI_TYPE_BUFFER) { |
1213 | pr_err("Invalid SN06 return object type 0x%.2x\n", | 1319 | pr_err(DRV_PFX "Invalid SN06 return object 0x%.2x\n", |
1214 | device_enum->type); | 1320 | device_enum->type); |
1215 | goto out_no_enum; | 1321 | goto out_no_enum; |
1216 | } | 1322 | } |
1217 | 1323 | ||
@@ -1245,6 +1351,209 @@ out_no_enum: | |||
1245 | return; | 1351 | return; |
1246 | } | 1352 | } |
1247 | 1353 | ||
1354 | /* Keyboard backlight feature */ | ||
1355 | #define KBDBL_HANDLER 0x137 | ||
1356 | #define KBDBL_PRESENT 0xB00 | ||
1357 | #define SET_MODE 0xC00 | ||
1358 | #define SET_TIMEOUT 0xE00 | ||
1359 | |||
1360 | struct kbd_backlight { | ||
1361 | int mode; | ||
1362 | int timeout; | ||
1363 | struct device_attribute mode_attr; | ||
1364 | struct device_attribute timeout_attr; | ||
1365 | }; | ||
1366 | |||
1367 | static struct kbd_backlight *kbdbl_handle; | ||
1368 | |||
1369 | static ssize_t __sony_nc_kbd_backlight_mode_set(u8 value) | ||
1370 | { | ||
1371 | int result; | ||
1372 | |||
1373 | if (value > 1) | ||
1374 | return -EINVAL; | ||
1375 | |||
1376 | if (sony_call_snc_handle(KBDBL_HANDLER, | ||
1377 | (value << 0x10) | SET_MODE, &result)) | ||
1378 | return -EIO; | ||
1379 | |||
1380 | kbdbl_handle->mode = value; | ||
1381 | |||
1382 | return 0; | ||
1383 | } | ||
1384 | |||
1385 | static ssize_t sony_nc_kbd_backlight_mode_store(struct device *dev, | ||
1386 | struct device_attribute *attr, | ||
1387 | const char *buffer, size_t count) | ||
1388 | { | ||
1389 | int ret = 0; | ||
1390 | unsigned long value; | ||
1391 | |||
1392 | if (count > 31) | ||
1393 | return -EINVAL; | ||
1394 | |||
1395 | if (strict_strtoul(buffer, 10, &value)) | ||
1396 | return -EINVAL; | ||
1397 | |||
1398 | ret = __sony_nc_kbd_backlight_mode_set(value); | ||
1399 | if (ret < 0) | ||
1400 | return ret; | ||
1401 | |||
1402 | return count; | ||
1403 | } | ||
1404 | |||
1405 | static ssize_t sony_nc_kbd_backlight_mode_show(struct device *dev, | ||
1406 | struct device_attribute *attr, char *buffer) | ||
1407 | { | ||
1408 | ssize_t count = 0; | ||
1409 | count = snprintf(buffer, PAGE_SIZE, "%d\n", kbdbl_handle->mode); | ||
1410 | return count; | ||
1411 | } | ||
1412 | |||
1413 | static int __sony_nc_kbd_backlight_timeout_set(u8 value) | ||
1414 | { | ||
1415 | int result; | ||
1416 | |||
1417 | if (value > 3) | ||
1418 | return -EINVAL; | ||
1419 | |||
1420 | if (sony_call_snc_handle(KBDBL_HANDLER, | ||
1421 | (value << 0x10) | SET_TIMEOUT, &result)) | ||
1422 | return -EIO; | ||
1423 | |||
1424 | kbdbl_handle->timeout = value; | ||
1425 | |||
1426 | return 0; | ||
1427 | } | ||
1428 | |||
1429 | static ssize_t sony_nc_kbd_backlight_timeout_store(struct device *dev, | ||
1430 | struct device_attribute *attr, | ||
1431 | const char *buffer, size_t count) | ||
1432 | { | ||
1433 | int ret = 0; | ||
1434 | unsigned long value; | ||
1435 | |||
1436 | if (count > 31) | ||
1437 | return -EINVAL; | ||
1438 | |||
1439 | if (strict_strtoul(buffer, 10, &value)) | ||
1440 | return -EINVAL; | ||
1441 | |||
1442 | ret = __sony_nc_kbd_backlight_timeout_set(value); | ||
1443 | if (ret < 0) | ||
1444 | return ret; | ||
1445 | |||
1446 | return count; | ||
1447 | } | ||
1448 | |||
1449 | static ssize_t sony_nc_kbd_backlight_timeout_show(struct device *dev, | ||
1450 | struct device_attribute *attr, char *buffer) | ||
1451 | { | ||
1452 | ssize_t count = 0; | ||
1453 | count = snprintf(buffer, PAGE_SIZE, "%d\n", kbdbl_handle->timeout); | ||
1454 | return count; | ||
1455 | } | ||
1456 | |||
1457 | static int sony_nc_kbd_backlight_setup(struct platform_device *pd) | ||
1458 | { | ||
1459 | int result; | ||
1460 | |||
1461 | if (sony_call_snc_handle(0x137, KBDBL_PRESENT, &result)) | ||
1462 | return 0; | ||
1463 | if (!(result & 0x02)) | ||
1464 | return 0; | ||
1465 | |||
1466 | kbdbl_handle = kzalloc(sizeof(*kbdbl_handle), GFP_KERNEL); | ||
1467 | if (!kbdbl_handle) | ||
1468 | return -ENOMEM; | ||
1469 | |||
1470 | sysfs_attr_init(&kbdbl_handle->mode_attr.attr); | ||
1471 | kbdbl_handle->mode_attr.attr.name = "kbd_backlight"; | ||
1472 | kbdbl_handle->mode_attr.attr.mode = S_IRUGO | S_IWUSR; | ||
1473 | kbdbl_handle->mode_attr.show = sony_nc_kbd_backlight_mode_show; | ||
1474 | kbdbl_handle->mode_attr.store = sony_nc_kbd_backlight_mode_store; | ||
1475 | |||
1476 | sysfs_attr_init(&kbdbl_handle->timeout_attr.attr); | ||
1477 | kbdbl_handle->timeout_attr.attr.name = "kbd_backlight_timeout"; | ||
1478 | kbdbl_handle->timeout_attr.attr.mode = S_IRUGO | S_IWUSR; | ||
1479 | kbdbl_handle->timeout_attr.show = sony_nc_kbd_backlight_timeout_show; | ||
1480 | kbdbl_handle->timeout_attr.store = sony_nc_kbd_backlight_timeout_store; | ||
1481 | |||
1482 | if (device_create_file(&pd->dev, &kbdbl_handle->mode_attr)) | ||
1483 | goto outkzalloc; | ||
1484 | |||
1485 | if (device_create_file(&pd->dev, &kbdbl_handle->timeout_attr)) | ||
1486 | goto outmode; | ||
1487 | |||
1488 | __sony_nc_kbd_backlight_mode_set(kbd_backlight); | ||
1489 | __sony_nc_kbd_backlight_timeout_set(kbd_backlight_timeout); | ||
1490 | |||
1491 | return 0; | ||
1492 | |||
1493 | outmode: | ||
1494 | device_remove_file(&pd->dev, &kbdbl_handle->mode_attr); | ||
1495 | outkzalloc: | ||
1496 | kfree(kbdbl_handle); | ||
1497 | kbdbl_handle = NULL; | ||
1498 | return -1; | ||
1499 | } | ||
1500 | |||
1501 | static int sony_nc_kbd_backlight_cleanup(struct platform_device *pd) | ||
1502 | { | ||
1503 | if (kbdbl_handle) { | ||
1504 | device_remove_file(&pd->dev, &kbdbl_handle->mode_attr); | ||
1505 | device_remove_file(&pd->dev, &kbdbl_handle->timeout_attr); | ||
1506 | kfree(kbdbl_handle); | ||
1507 | } | ||
1508 | return 0; | ||
1509 | } | ||
1510 | |||
1511 | static void sony_nc_backlight_setup(void) | ||
1512 | { | ||
1513 | acpi_handle unused; | ||
1514 | int max_brightness = 0; | ||
1515 | const struct backlight_ops *ops = NULL; | ||
1516 | struct backlight_properties props; | ||
1517 | |||
1518 | if (sony_find_snc_handle(0x12f) != -1) { | ||
1519 | backlight_ng_handle = 0x12f; | ||
1520 | ops = &sony_backlight_ng_ops; | ||
1521 | max_brightness = 0xff; | ||
1522 | |||
1523 | } else if (sony_find_snc_handle(0x137) != -1) { | ||
1524 | backlight_ng_handle = 0x137; | ||
1525 | ops = &sony_backlight_ng_ops; | ||
1526 | max_brightness = 0xff; | ||
1527 | |||
1528 | } else if (ACPI_SUCCESS(acpi_get_handle(sony_nc_acpi_handle, "GBRT", | ||
1529 | &unused))) { | ||
1530 | ops = &sony_backlight_ops; | ||
1531 | max_brightness = SONY_MAX_BRIGHTNESS - 1; | ||
1532 | |||
1533 | } else | ||
1534 | return; | ||
1535 | |||
1536 | memset(&props, 0, sizeof(struct backlight_properties)); | ||
1537 | props.type = BACKLIGHT_PLATFORM; | ||
1538 | props.max_brightness = max_brightness; | ||
1539 | sony_backlight_device = backlight_device_register("sony", NULL, | ||
1540 | &backlight_ng_handle, | ||
1541 | ops, &props); | ||
1542 | |||
1543 | if (IS_ERR(sony_backlight_device)) { | ||
1544 | pr_warning(DRV_PFX "unable to register backlight device\n"); | ||
1545 | sony_backlight_device = NULL; | ||
1546 | } else | ||
1547 | sony_backlight_device->props.brightness = | ||
1548 | ops->get_brightness(sony_backlight_device); | ||
1549 | } | ||
1550 | |||
1551 | static void sony_nc_backlight_cleanup(void) | ||
1552 | { | ||
1553 | if (sony_backlight_device) | ||
1554 | backlight_device_unregister(sony_backlight_device); | ||
1555 | } | ||
1556 | |||
1248 | static int sony_nc_add(struct acpi_device *device) | 1557 | static int sony_nc_add(struct acpi_device *device) |
1249 | { | 1558 | { |
1250 | acpi_status status; | 1559 | acpi_status status; |
@@ -1252,8 +1561,8 @@ static int sony_nc_add(struct acpi_device *device) | |||
1252 | acpi_handle handle; | 1561 | acpi_handle handle; |
1253 | struct sony_nc_value *item; | 1562 | struct sony_nc_value *item; |
1254 | 1563 | ||
1255 | printk(KERN_INFO DRV_PFX "%s v%s.\n", | 1564 | pr_info(DRV_PFX "%s v%s.\n", SONY_NC_DRIVER_NAME, |
1256 | SONY_NC_DRIVER_NAME, SONY_LAPTOP_DRIVER_VERSION); | 1565 | SONY_LAPTOP_DRIVER_VERSION); |
1257 | 1566 | ||
1258 | sony_nc_acpi_device = device; | 1567 | sony_nc_acpi_device = device; |
1259 | strcpy(acpi_device_class(device), "sony/hotkey"); | 1568 | strcpy(acpi_device_class(device), "sony/hotkey"); |
@@ -1269,13 +1578,18 @@ static int sony_nc_add(struct acpi_device *device) | |||
1269 | goto outwalk; | 1578 | goto outwalk; |
1270 | } | 1579 | } |
1271 | 1580 | ||
1581 | result = sony_pf_add(); | ||
1582 | if (result) | ||
1583 | goto outpresent; | ||
1584 | |||
1272 | if (debug) { | 1585 | if (debug) { |
1273 | status = acpi_walk_namespace(ACPI_TYPE_METHOD, sony_nc_acpi_handle, | 1586 | status = acpi_walk_namespace(ACPI_TYPE_METHOD, |
1274 | 1, sony_walk_callback, NULL, NULL, NULL); | 1587 | sony_nc_acpi_handle, 1, sony_walk_callback, |
1588 | NULL, NULL, NULL); | ||
1275 | if (ACPI_FAILURE(status)) { | 1589 | if (ACPI_FAILURE(status)) { |
1276 | printk(KERN_WARNING DRV_PFX "unable to walk acpi resources\n"); | 1590 | pr_warn(DRV_PFX "unable to walk acpi resources\n"); |
1277 | result = -ENODEV; | 1591 | result = -ENODEV; |
1278 | goto outwalk; | 1592 | goto outpresent; |
1279 | } | 1593 | } |
1280 | } | 1594 | } |
1281 | 1595 | ||
@@ -1288,6 +1602,12 @@ static int sony_nc_add(struct acpi_device *device) | |||
1288 | if (ACPI_SUCCESS(acpi_get_handle(sony_nc_acpi_handle, "SN00", | 1602 | if (ACPI_SUCCESS(acpi_get_handle(sony_nc_acpi_handle, "SN00", |
1289 | &handle))) { | 1603 | &handle))) { |
1290 | dprintk("Doing SNC setup\n"); | 1604 | dprintk("Doing SNC setup\n"); |
1605 | result = sony_nc_handles_setup(sony_pf_device); | ||
1606 | if (result) | ||
1607 | goto outpresent; | ||
1608 | result = sony_nc_kbd_backlight_setup(sony_pf_device); | ||
1609 | if (result) | ||
1610 | goto outsnc; | ||
1291 | sony_nc_function_setup(device); | 1611 | sony_nc_function_setup(device); |
1292 | sony_nc_rfkill_setup(device); | 1612 | sony_nc_rfkill_setup(device); |
1293 | } | 1613 | } |
@@ -1295,40 +1615,17 @@ static int sony_nc_add(struct acpi_device *device) | |||
1295 | /* setup input devices and helper fifo */ | 1615 | /* setup input devices and helper fifo */ |
1296 | result = sony_laptop_setup_input(device); | 1616 | result = sony_laptop_setup_input(device); |
1297 | if (result) { | 1617 | if (result) { |
1298 | printk(KERN_ERR DRV_PFX | 1618 | pr_err(DRV_PFX "Unable to create input devices.\n"); |
1299 | "Unable to create input devices.\n"); | 1619 | goto outkbdbacklight; |
1300 | goto outwalk; | ||
1301 | } | 1620 | } |
1302 | 1621 | ||
1303 | if (acpi_video_backlight_support()) { | 1622 | if (acpi_video_backlight_support()) { |
1304 | printk(KERN_INFO DRV_PFX "brightness ignored, must be " | 1623 | pr_info(DRV_PFX "brightness ignored, must be " |
1305 | "controlled by ACPI video driver\n"); | 1624 | "controlled by ACPI video driver\n"); |
1306 | } else if (ACPI_SUCCESS(acpi_get_handle(sony_nc_acpi_handle, "GBRT", | 1625 | } else { |
1307 | &handle))) { | 1626 | sony_nc_backlight_setup(); |
1308 | struct backlight_properties props; | ||
1309 | memset(&props, 0, sizeof(struct backlight_properties)); | ||
1310 | props.type = BACKLIGHT_PLATFORM; | ||
1311 | props.max_brightness = SONY_MAX_BRIGHTNESS - 1; | ||
1312 | sony_backlight_device = backlight_device_register("sony", NULL, | ||
1313 | NULL, | ||
1314 | &sony_backlight_ops, | ||
1315 | &props); | ||
1316 | |||
1317 | if (IS_ERR(sony_backlight_device)) { | ||
1318 | printk(KERN_WARNING DRV_PFX "unable to register backlight device\n"); | ||
1319 | sony_backlight_device = NULL; | ||
1320 | } else { | ||
1321 | sony_backlight_device->props.brightness = | ||
1322 | sony_backlight_get_brightness | ||
1323 | (sony_backlight_device); | ||
1324 | } | ||
1325 | |||
1326 | } | 1627 | } |
1327 | 1628 | ||
1328 | result = sony_pf_add(); | ||
1329 | if (result) | ||
1330 | goto outbacklight; | ||
1331 | |||
1332 | /* create sony_pf sysfs attributes related to the SNC device */ | 1629 | /* create sony_pf sysfs attributes related to the SNC device */ |
1333 | for (item = sony_nc_values; item->name; ++item) { | 1630 | for (item = sony_nc_values; item->name; ++item) { |
1334 | 1631 | ||
@@ -1374,14 +1671,19 @@ static int sony_nc_add(struct acpi_device *device) | |||
1374 | for (item = sony_nc_values; item->name; ++item) { | 1671 | for (item = sony_nc_values; item->name; ++item) { |
1375 | device_remove_file(&sony_pf_device->dev, &item->devattr); | 1672 | device_remove_file(&sony_pf_device->dev, &item->devattr); |
1376 | } | 1673 | } |
1377 | sony_pf_remove(); | 1674 | sony_nc_backlight_cleanup(); |
1378 | |||
1379 | outbacklight: | ||
1380 | if (sony_backlight_device) | ||
1381 | backlight_device_unregister(sony_backlight_device); | ||
1382 | 1675 | ||
1383 | sony_laptop_remove_input(); | 1676 | sony_laptop_remove_input(); |
1384 | 1677 | ||
1678 | outkbdbacklight: | ||
1679 | sony_nc_kbd_backlight_cleanup(sony_pf_device); | ||
1680 | |||
1681 | outsnc: | ||
1682 | sony_nc_handles_cleanup(sony_pf_device); | ||
1683 | |||
1684 | outpresent: | ||
1685 | sony_pf_remove(); | ||
1686 | |||
1385 | outwalk: | 1687 | outwalk: |
1386 | sony_nc_rfkill_cleanup(); | 1688 | sony_nc_rfkill_cleanup(); |
1387 | return result; | 1689 | return result; |
@@ -1391,8 +1693,7 @@ static int sony_nc_remove(struct acpi_device *device, int type) | |||
1391 | { | 1693 | { |
1392 | struct sony_nc_value *item; | 1694 | struct sony_nc_value *item; |
1393 | 1695 | ||
1394 | if (sony_backlight_device) | 1696 | sony_nc_backlight_cleanup(); |
1395 | backlight_device_unregister(sony_backlight_device); | ||
1396 | 1697 | ||
1397 | sony_nc_acpi_device = NULL; | 1698 | sony_nc_acpi_device = NULL; |
1398 | 1699 | ||
@@ -1400,6 +1701,8 @@ static int sony_nc_remove(struct acpi_device *device, int type) | |||
1400 | device_remove_file(&sony_pf_device->dev, &item->devattr); | 1701 | device_remove_file(&sony_pf_device->dev, &item->devattr); |
1401 | } | 1702 | } |
1402 | 1703 | ||
1704 | sony_nc_kbd_backlight_cleanup(sony_pf_device); | ||
1705 | sony_nc_handles_cleanup(sony_pf_device); | ||
1403 | sony_pf_remove(); | 1706 | sony_pf_remove(); |
1404 | sony_laptop_remove_input(); | 1707 | sony_laptop_remove_input(); |
1405 | sony_nc_rfkill_cleanup(); | 1708 | sony_nc_rfkill_cleanup(); |
@@ -1438,7 +1741,6 @@ static struct acpi_driver sony_nc_driver = { | |||
1438 | #define SONYPI_DEVICE_TYPE1 0x00000001 | 1741 | #define SONYPI_DEVICE_TYPE1 0x00000001 |
1439 | #define SONYPI_DEVICE_TYPE2 0x00000002 | 1742 | #define SONYPI_DEVICE_TYPE2 0x00000002 |
1440 | #define SONYPI_DEVICE_TYPE3 0x00000004 | 1743 | #define SONYPI_DEVICE_TYPE3 0x00000004 |
1441 | #define SONYPI_DEVICE_TYPE4 0x00000008 | ||
1442 | 1744 | ||
1443 | #define SONYPI_TYPE1_OFFSET 0x04 | 1745 | #define SONYPI_TYPE1_OFFSET 0x04 |
1444 | #define SONYPI_TYPE2_OFFSET 0x12 | 1746 | #define SONYPI_TYPE2_OFFSET 0x12 |
@@ -1584,8 +1886,8 @@ static struct sonypi_event sonypi_blueev[] = { | |||
1584 | 1886 | ||
1585 | /* The set of possible wireless events */ | 1887 | /* The set of possible wireless events */ |
1586 | static struct sonypi_event sonypi_wlessev[] = { | 1888 | static struct sonypi_event sonypi_wlessev[] = { |
1587 | { 0x59, SONYPI_EVENT_WIRELESS_ON }, | 1889 | { 0x59, SONYPI_EVENT_IGNORE }, |
1588 | { 0x5a, SONYPI_EVENT_WIRELESS_OFF }, | 1890 | { 0x5a, SONYPI_EVENT_IGNORE }, |
1589 | { 0, 0 } | 1891 | { 0, 0 } |
1590 | }; | 1892 | }; |
1591 | 1893 | ||
@@ -1842,7 +2144,7 @@ out: | |||
1842 | if (pcidev) | 2144 | if (pcidev) |
1843 | pci_dev_put(pcidev); | 2145 | pci_dev_put(pcidev); |
1844 | 2146 | ||
1845 | printk(KERN_INFO DRV_PFX "detected Type%d model\n", | 2147 | pr_info(DRV_PFX "detected Type%d model\n", |
1846 | dev->model == SONYPI_DEVICE_TYPE1 ? 1 : | 2148 | dev->model == SONYPI_DEVICE_TYPE1 ? 1 : |
1847 | dev->model == SONYPI_DEVICE_TYPE2 ? 2 : 3); | 2149 | dev->model == SONYPI_DEVICE_TYPE2 ? 2 : 3); |
1848 | } | 2150 | } |
@@ -1890,7 +2192,7 @@ static int __sony_pic_camera_ready(void) | |||
1890 | static int __sony_pic_camera_off(void) | 2192 | static int __sony_pic_camera_off(void) |
1891 | { | 2193 | { |
1892 | if (!camera) { | 2194 | if (!camera) { |
1893 | printk(KERN_WARNING DRV_PFX "camera control not enabled\n"); | 2195 | pr_warn(DRV_PFX "camera control not enabled\n"); |
1894 | return -ENODEV; | 2196 | return -ENODEV; |
1895 | } | 2197 | } |
1896 | 2198 | ||
@@ -1910,7 +2212,7 @@ static int __sony_pic_camera_on(void) | |||
1910 | int i, j, x; | 2212 | int i, j, x; |
1911 | 2213 | ||
1912 | if (!camera) { | 2214 | if (!camera) { |
1913 | printk(KERN_WARNING DRV_PFX "camera control not enabled\n"); | 2215 | pr_warn(DRV_PFX "camera control not enabled\n"); |
1914 | return -ENODEV; | 2216 | return -ENODEV; |
1915 | } | 2217 | } |
1916 | 2218 | ||
@@ -1933,7 +2235,7 @@ static int __sony_pic_camera_on(void) | |||
1933 | } | 2235 | } |
1934 | 2236 | ||
1935 | if (j == 0) { | 2237 | if (j == 0) { |
1936 | printk(KERN_WARNING DRV_PFX "failed to power on camera\n"); | 2238 | pr_warn(DRV_PFX "failed to power on camera\n"); |
1937 | return -ENODEV; | 2239 | return -ENODEV; |
1938 | } | 2240 | } |
1939 | 2241 | ||
@@ -1989,7 +2291,7 @@ int sony_pic_camera_command(int command, u8 value) | |||
1989 | ITERATIONS_SHORT); | 2291 | ITERATIONS_SHORT); |
1990 | break; | 2292 | break; |
1991 | default: | 2293 | default: |
1992 | printk(KERN_ERR DRV_PFX "sony_pic_camera_command invalid: %d\n", | 2294 | pr_err(DRV_PFX "sony_pic_camera_command invalid: %d\n", |
1993 | command); | 2295 | command); |
1994 | break; | 2296 | break; |
1995 | } | 2297 | } |
@@ -2396,7 +2698,7 @@ static int sonypi_compat_init(void) | |||
2396 | error = | 2698 | error = |
2397 | kfifo_alloc(&sonypi_compat.fifo, SONY_LAPTOP_BUF_SIZE, GFP_KERNEL); | 2699 | kfifo_alloc(&sonypi_compat.fifo, SONY_LAPTOP_BUF_SIZE, GFP_KERNEL); |
2398 | if (error) { | 2700 | if (error) { |
2399 | printk(KERN_ERR DRV_PFX "kfifo_alloc failed\n"); | 2701 | pr_err(DRV_PFX "kfifo_alloc failed\n"); |
2400 | return error; | 2702 | return error; |
2401 | } | 2703 | } |
2402 | 2704 | ||
@@ -2406,11 +2708,11 @@ static int sonypi_compat_init(void) | |||
2406 | sonypi_misc_device.minor = minor; | 2708 | sonypi_misc_device.minor = minor; |
2407 | error = misc_register(&sonypi_misc_device); | 2709 | error = misc_register(&sonypi_misc_device); |
2408 | if (error) { | 2710 | if (error) { |
2409 | printk(KERN_ERR DRV_PFX "misc_register failed\n"); | 2711 | pr_err(DRV_PFX "misc_register failed\n"); |
2410 | goto err_free_kfifo; | 2712 | goto err_free_kfifo; |
2411 | } | 2713 | } |
2412 | if (minor == -1) | 2714 | if (minor == -1) |
2413 | printk(KERN_INFO DRV_PFX "device allocated minor is %d\n", | 2715 | pr_info(DRV_PFX "device allocated minor is %d\n", |
2414 | sonypi_misc_device.minor); | 2716 | sonypi_misc_device.minor); |
2415 | 2717 | ||
2416 | return 0; | 2718 | return 0; |
@@ -2470,8 +2772,7 @@ sony_pic_read_possible_resource(struct acpi_resource *resource, void *context) | |||
2470 | } | 2772 | } |
2471 | for (i = 0; i < p->interrupt_count; i++) { | 2773 | for (i = 0; i < p->interrupt_count; i++) { |
2472 | if (!p->interrupts[i]) { | 2774 | if (!p->interrupts[i]) { |
2473 | printk(KERN_WARNING DRV_PFX | 2775 | pr_warn(DRV_PFX "Invalid IRQ %d\n", |
2474 | "Invalid IRQ %d\n", | ||
2475 | p->interrupts[i]); | 2776 | p->interrupts[i]); |
2476 | continue; | 2777 | continue; |
2477 | } | 2778 | } |
@@ -2510,7 +2811,7 @@ sony_pic_read_possible_resource(struct acpi_resource *resource, void *context) | |||
2510 | ioport->io2.address_length); | 2811 | ioport->io2.address_length); |
2511 | } | 2812 | } |
2512 | else { | 2813 | else { |
2513 | printk(KERN_ERR DRV_PFX "Unknown SPIC Type, more than 2 IO Ports\n"); | 2814 | pr_err(DRV_PFX "Unknown SPIC Type, more than 2 IO Ports\n"); |
2514 | return AE_ERROR; | 2815 | return AE_ERROR; |
2515 | } | 2816 | } |
2516 | return AE_OK; | 2817 | return AE_OK; |
@@ -2538,7 +2839,7 @@ static int sony_pic_possible_resources(struct acpi_device *device) | |||
2538 | dprintk("Evaluating _STA\n"); | 2839 | dprintk("Evaluating _STA\n"); |
2539 | result = acpi_bus_get_status(device); | 2840 | result = acpi_bus_get_status(device); |
2540 | if (result) { | 2841 | if (result) { |
2541 | printk(KERN_WARNING DRV_PFX "Unable to read status\n"); | 2842 | pr_warn(DRV_PFX "Unable to read status\n"); |
2542 | goto end; | 2843 | goto end; |
2543 | } | 2844 | } |
2544 | 2845 | ||
@@ -2554,8 +2855,7 @@ static int sony_pic_possible_resources(struct acpi_device *device) | |||
2554 | status = acpi_walk_resources(device->handle, METHOD_NAME__PRS, | 2855 | status = acpi_walk_resources(device->handle, METHOD_NAME__PRS, |
2555 | sony_pic_read_possible_resource, &spic_dev); | 2856 | sony_pic_read_possible_resource, &spic_dev); |
2556 | if (ACPI_FAILURE(status)) { | 2857 | if (ACPI_FAILURE(status)) { |
2557 | printk(KERN_WARNING DRV_PFX | 2858 | pr_warn(DRV_PFX "Failure evaluating %s\n", |
2558 | "Failure evaluating %s\n", | ||
2559 | METHOD_NAME__PRS); | 2859 | METHOD_NAME__PRS); |
2560 | result = -ENODEV; | 2860 | result = -ENODEV; |
2561 | } | 2861 | } |
@@ -2669,7 +2969,7 @@ static int sony_pic_enable(struct acpi_device *device, | |||
2669 | 2969 | ||
2670 | /* check for total failure */ | 2970 | /* check for total failure */ |
2671 | if (ACPI_FAILURE(status)) { | 2971 | if (ACPI_FAILURE(status)) { |
2672 | printk(KERN_ERR DRV_PFX "Error evaluating _SRS\n"); | 2972 | pr_err(DRV_PFX "Error evaluating _SRS\n"); |
2673 | result = -ENODEV; | 2973 | result = -ENODEV; |
2674 | goto end; | 2974 | goto end; |
2675 | } | 2975 | } |
@@ -2725,6 +3025,9 @@ static irqreturn_t sony_pic_irq(int irq, void *dev_id) | |||
2725 | if (ev == dev->event_types[i].events[j].data) { | 3025 | if (ev == dev->event_types[i].events[j].data) { |
2726 | device_event = | 3026 | device_event = |
2727 | dev->event_types[i].events[j].event; | 3027 | dev->event_types[i].events[j].event; |
3028 | /* some events may require ignoring */ | ||
3029 | if (!device_event) | ||
3030 | return IRQ_HANDLED; | ||
2728 | goto found; | 3031 | goto found; |
2729 | } | 3032 | } |
2730 | } | 3033 | } |
@@ -2744,7 +3047,6 @@ found: | |||
2744 | sony_laptop_report_input_event(device_event); | 3047 | sony_laptop_report_input_event(device_event); |
2745 | acpi_bus_generate_proc_event(dev->acpi_dev, 1, device_event); | 3048 | acpi_bus_generate_proc_event(dev->acpi_dev, 1, device_event); |
2746 | sonypi_compat_report_event(device_event); | 3049 | sonypi_compat_report_event(device_event); |
2747 | |||
2748 | return IRQ_HANDLED; | 3050 | return IRQ_HANDLED; |
2749 | } | 3051 | } |
2750 | 3052 | ||
@@ -2759,7 +3061,7 @@ static int sony_pic_remove(struct acpi_device *device, int type) | |||
2759 | struct sony_pic_irq *irq, *tmp_irq; | 3061 | struct sony_pic_irq *irq, *tmp_irq; |
2760 | 3062 | ||
2761 | if (sony_pic_disable(device)) { | 3063 | if (sony_pic_disable(device)) { |
2762 | printk(KERN_ERR DRV_PFX "Couldn't disable device.\n"); | 3064 | pr_err(DRV_PFX "Couldn't disable device.\n"); |
2763 | return -ENXIO; | 3065 | return -ENXIO; |
2764 | } | 3066 | } |
2765 | 3067 | ||
@@ -2799,8 +3101,8 @@ static int sony_pic_add(struct acpi_device *device) | |||
2799 | struct sony_pic_ioport *io, *tmp_io; | 3101 | struct sony_pic_ioport *io, *tmp_io; |
2800 | struct sony_pic_irq *irq, *tmp_irq; | 3102 | struct sony_pic_irq *irq, *tmp_irq; |
2801 | 3103 | ||
2802 | printk(KERN_INFO DRV_PFX "%s v%s.\n", | 3104 | pr_info(DRV_PFX "%s v%s.\n", SONY_PIC_DRIVER_NAME, |
2803 | SONY_PIC_DRIVER_NAME, SONY_LAPTOP_DRIVER_VERSION); | 3105 | SONY_LAPTOP_DRIVER_VERSION); |
2804 | 3106 | ||
2805 | spic_dev.acpi_dev = device; | 3107 | spic_dev.acpi_dev = device; |
2806 | strcpy(acpi_device_class(device), "sony/hotkey"); | 3108 | strcpy(acpi_device_class(device), "sony/hotkey"); |
@@ -2810,16 +3112,14 @@ static int sony_pic_add(struct acpi_device *device) | |||
2810 | /* read _PRS resources */ | 3112 | /* read _PRS resources */ |
2811 | result = sony_pic_possible_resources(device); | 3113 | result = sony_pic_possible_resources(device); |
2812 | if (result) { | 3114 | if (result) { |
2813 | printk(KERN_ERR DRV_PFX | 3115 | pr_err(DRV_PFX "Unable to read possible resources.\n"); |
2814 | "Unable to read possible resources.\n"); | ||
2815 | goto err_free_resources; | 3116 | goto err_free_resources; |
2816 | } | 3117 | } |
2817 | 3118 | ||
2818 | /* setup input devices and helper fifo */ | 3119 | /* setup input devices and helper fifo */ |
2819 | result = sony_laptop_setup_input(device); | 3120 | result = sony_laptop_setup_input(device); |
2820 | if (result) { | 3121 | if (result) { |
2821 | printk(KERN_ERR DRV_PFX | 3122 | pr_err(DRV_PFX "Unable to create input devices.\n"); |
2822 | "Unable to create input devices.\n"); | ||
2823 | goto err_free_resources; | 3123 | goto err_free_resources; |
2824 | } | 3124 | } |
2825 | 3125 | ||
@@ -2829,7 +3129,7 @@ static int sony_pic_add(struct acpi_device *device) | |||
2829 | /* request io port */ | 3129 | /* request io port */ |
2830 | list_for_each_entry_reverse(io, &spic_dev.ioports, list) { | 3130 | list_for_each_entry_reverse(io, &spic_dev.ioports, list) { |
2831 | if (request_region(io->io1.minimum, io->io1.address_length, | 3131 | if (request_region(io->io1.minimum, io->io1.address_length, |
2832 | "Sony Programable I/O Device")) { | 3132 | "Sony Programmable I/O Device")) { |
2833 | dprintk("I/O port1: 0x%.4x (0x%.4x) + 0x%.2x\n", | 3133 | dprintk("I/O port1: 0x%.4x (0x%.4x) + 0x%.2x\n", |
2834 | io->io1.minimum, io->io1.maximum, | 3134 | io->io1.minimum, io->io1.maximum, |
2835 | io->io1.address_length); | 3135 | io->io1.address_length); |
@@ -2837,7 +3137,7 @@ static int sony_pic_add(struct acpi_device *device) | |||
2837 | if (io->io2.minimum) { | 3137 | if (io->io2.minimum) { |
2838 | if (request_region(io->io2.minimum, | 3138 | if (request_region(io->io2.minimum, |
2839 | io->io2.address_length, | 3139 | io->io2.address_length, |
2840 | "Sony Programable I/O Device")) { | 3140 | "Sony Programmable I/O Device")) { |
2841 | dprintk("I/O port2: 0x%.4x (0x%.4x) + 0x%.2x\n", | 3141 | dprintk("I/O port2: 0x%.4x (0x%.4x) + 0x%.2x\n", |
2842 | io->io2.minimum, io->io2.maximum, | 3142 | io->io2.minimum, io->io2.maximum, |
2843 | io->io2.address_length); | 3143 | io->io2.address_length); |
@@ -2860,7 +3160,7 @@ static int sony_pic_add(struct acpi_device *device) | |||
2860 | } | 3160 | } |
2861 | } | 3161 | } |
2862 | if (!spic_dev.cur_ioport) { | 3162 | if (!spic_dev.cur_ioport) { |
2863 | printk(KERN_ERR DRV_PFX "Failed to request_region.\n"); | 3163 | pr_err(DRV_PFX "Failed to request_region.\n"); |
2864 | result = -ENODEV; | 3164 | result = -ENODEV; |
2865 | goto err_remove_compat; | 3165 | goto err_remove_compat; |
2866 | } | 3166 | } |
@@ -2880,7 +3180,7 @@ static int sony_pic_add(struct acpi_device *device) | |||
2880 | } | 3180 | } |
2881 | } | 3181 | } |
2882 | if (!spic_dev.cur_irq) { | 3182 | if (!spic_dev.cur_irq) { |
2883 | printk(KERN_ERR DRV_PFX "Failed to request_irq.\n"); | 3183 | pr_err(DRV_PFX "Failed to request_irq.\n"); |
2884 | result = -ENODEV; | 3184 | result = -ENODEV; |
2885 | goto err_release_region; | 3185 | goto err_release_region; |
2886 | } | 3186 | } |
@@ -2888,7 +3188,7 @@ static int sony_pic_add(struct acpi_device *device) | |||
2888 | /* set resource status _SRS */ | 3188 | /* set resource status _SRS */ |
2889 | result = sony_pic_enable(device, spic_dev.cur_ioport, spic_dev.cur_irq); | 3189 | result = sony_pic_enable(device, spic_dev.cur_ioport, spic_dev.cur_irq); |
2890 | if (result) { | 3190 | if (result) { |
2891 | printk(KERN_ERR DRV_PFX "Couldn't enable device.\n"); | 3191 | pr_err(DRV_PFX "Couldn't enable device.\n"); |
2892 | goto err_free_irq; | 3192 | goto err_free_irq; |
2893 | } | 3193 | } |
2894 | 3194 | ||
@@ -2997,8 +3297,7 @@ static int __init sony_laptop_init(void) | |||
2997 | if (!no_spic && dmi_check_system(sonypi_dmi_table)) { | 3297 | if (!no_spic && dmi_check_system(sonypi_dmi_table)) { |
2998 | result = acpi_bus_register_driver(&sony_pic_driver); | 3298 | result = acpi_bus_register_driver(&sony_pic_driver); |
2999 | if (result) { | 3299 | if (result) { |
3000 | printk(KERN_ERR DRV_PFX | 3300 | pr_err(DRV_PFX "Unable to register SPIC driver."); |
3001 | "Unable to register SPIC driver."); | ||
3002 | goto out; | 3301 | goto out; |
3003 | } | 3302 | } |
3004 | spic_drv_registered = 1; | 3303 | spic_drv_registered = 1; |
@@ -3006,7 +3305,7 @@ static int __init sony_laptop_init(void) | |||
3006 | 3305 | ||
3007 | result = acpi_bus_register_driver(&sony_nc_driver); | 3306 | result = acpi_bus_register_driver(&sony_nc_driver); |
3008 | if (result) { | 3307 | if (result) { |
3009 | printk(KERN_ERR DRV_PFX "Unable to register SNC driver."); | 3308 | pr_err(DRV_PFX "Unable to register SNC driver."); |
3010 | goto out_unregister_pic; | 3309 | goto out_unregister_pic; |
3011 | } | 3310 | } |
3012 | 3311 | ||
diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c index 947bdcaa0ce9..a08561f5349e 100644 --- a/drivers/platform/x86/thinkpad_acpi.c +++ b/drivers/platform/x86/thinkpad_acpi.c | |||
@@ -2407,7 +2407,7 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn, | |||
2407 | * This code is supposed to duplicate the IBM firmware behaviour: | 2407 | * This code is supposed to duplicate the IBM firmware behaviour: |
2408 | * - Pressing MUTE issues mute hotkey message, even when already mute | 2408 | * - Pressing MUTE issues mute hotkey message, even when already mute |
2409 | * - Pressing Volume up/down issues volume up/down hotkey messages, | 2409 | * - Pressing Volume up/down issues volume up/down hotkey messages, |
2410 | * even when already at maximum or minumum volume | 2410 | * even when already at maximum or minimum volume |
2411 | * - The act of unmuting issues volume up/down notification, | 2411 | * - The act of unmuting issues volume up/down notification, |
2412 | * depending which key was used to unmute | 2412 | * depending which key was used to unmute |
2413 | * | 2413 | * |
@@ -2990,7 +2990,7 @@ static void tpacpi_send_radiosw_update(void) | |||
2990 | * rfkill input events, or we will race the rfkill core input | 2990 | * rfkill input events, or we will race the rfkill core input |
2991 | * handler. | 2991 | * handler. |
2992 | * | 2992 | * |
2993 | * tpacpi_inputdev_send_mutex works as a syncronization point | 2993 | * tpacpi_inputdev_send_mutex works as a synchronization point |
2994 | * for the above. | 2994 | * for the above. |
2995 | * | 2995 | * |
2996 | * We optimize to avoid numerous calls to hotkey_get_wlsw. | 2996 | * We optimize to avoid numerous calls to hotkey_get_wlsw. |
diff --git a/drivers/platform/x86/xo15-ebook.c b/drivers/platform/x86/xo15-ebook.c new file mode 100644 index 000000000000..c1372ed9d2e9 --- /dev/null +++ b/drivers/platform/x86/xo15-ebook.c | |||
@@ -0,0 +1,180 @@ | |||
1 | /* | ||
2 | * OLPC XO-1.5 ebook switch driver | ||
3 | * (based on generic ACPI button driver) | ||
4 | * | ||
5 | * Copyright (C) 2009 Paul Fox <pgf@laptop.org> | ||
6 | * Copyright (C) 2010 One Laptop per Child | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify | ||
9 | * it under the terms of the GNU General Public License as published by | ||
10 | * the Free Software Foundation; either version 2 of the License, or (at | ||
11 | * your option) any later version. | ||
12 | */ | ||
13 | |||
14 | #include <linux/kernel.h> | ||
15 | #include <linux/module.h> | ||
16 | #include <linux/init.h> | ||
17 | #include <linux/types.h> | ||
18 | #include <linux/input.h> | ||
19 | #include <acpi/acpi_bus.h> | ||
20 | #include <acpi/acpi_drivers.h> | ||
21 | |||
22 | #define MODULE_NAME "xo15-ebook" | ||
23 | #define PREFIX MODULE_NAME ": " | ||
24 | |||
25 | #define XO15_EBOOK_CLASS MODULE_NAME | ||
26 | #define XO15_EBOOK_TYPE_UNKNOWN 0x00 | ||
27 | #define XO15_EBOOK_NOTIFY_STATUS 0x80 | ||
28 | |||
29 | #define XO15_EBOOK_SUBCLASS "ebook" | ||
30 | #define XO15_EBOOK_HID "XO15EBK" | ||
31 | #define XO15_EBOOK_DEVICE_NAME "EBook Switch" | ||
32 | |||
33 | ACPI_MODULE_NAME(MODULE_NAME); | ||
34 | |||
35 | MODULE_DESCRIPTION("OLPC XO-1.5 ebook switch driver"); | ||
36 | MODULE_LICENSE("GPL"); | ||
37 | |||
38 | static const struct acpi_device_id ebook_device_ids[] = { | ||
39 | { XO15_EBOOK_HID, 0 }, | ||
40 | { "", 0 }, | ||
41 | }; | ||
42 | MODULE_DEVICE_TABLE(acpi, ebook_device_ids); | ||
43 | |||
44 | struct ebook_switch { | ||
45 | struct input_dev *input; | ||
46 | char phys[32]; /* for input device */ | ||
47 | }; | ||
48 | |||
49 | static int ebook_send_state(struct acpi_device *device) | ||
50 | { | ||
51 | struct ebook_switch *button = acpi_driver_data(device); | ||
52 | unsigned long long state; | ||
53 | acpi_status status; | ||
54 | |||
55 | status = acpi_evaluate_integer(device->handle, "EBK", NULL, &state); | ||
56 | if (ACPI_FAILURE(status)) | ||
57 | return -EIO; | ||
58 | |||
59 | /* input layer checks if event is redundant */ | ||
60 | input_report_switch(button->input, SW_TABLET_MODE, !state); | ||
61 | input_sync(button->input); | ||
62 | return 0; | ||
63 | } | ||
64 | |||
65 | static void ebook_switch_notify(struct acpi_device *device, u32 event) | ||
66 | { | ||
67 | switch (event) { | ||
68 | case ACPI_FIXED_HARDWARE_EVENT: | ||
69 | case XO15_EBOOK_NOTIFY_STATUS: | ||
70 | ebook_send_state(device); | ||
71 | break; | ||
72 | default: | ||
73 | ACPI_DEBUG_PRINT((ACPI_DB_INFO, | ||
74 | "Unsupported event [0x%x]\n", event)); | ||
75 | break; | ||
76 | } | ||
77 | } | ||
78 | |||
79 | static int ebook_switch_resume(struct acpi_device *device) | ||
80 | { | ||
81 | return ebook_send_state(device); | ||
82 | } | ||
83 | |||
84 | static int ebook_switch_add(struct acpi_device *device) | ||
85 | { | ||
86 | struct ebook_switch *button; | ||
87 | struct input_dev *input; | ||
88 | const char *hid = acpi_device_hid(device); | ||
89 | char *name, *class; | ||
90 | int error; | ||
91 | |||
92 | button = kzalloc(sizeof(struct ebook_switch), GFP_KERNEL); | ||
93 | if (!button) | ||
94 | return -ENOMEM; | ||
95 | |||
96 | device->driver_data = button; | ||
97 | |||
98 | button->input = input = input_allocate_device(); | ||
99 | if (!input) { | ||
100 | error = -ENOMEM; | ||
101 | goto err_free_button; | ||
102 | } | ||
103 | |||
104 | name = acpi_device_name(device); | ||
105 | class = acpi_device_class(device); | ||
106 | |||
107 | if (strcmp(hid, XO15_EBOOK_HID)) { | ||
108 | printk(KERN_ERR PREFIX "Unsupported hid [%s]\n", hid); | ||
109 | error = -ENODEV; | ||
110 | goto err_free_input; | ||
111 | } | ||
112 | |||
113 | strcpy(name, XO15_EBOOK_DEVICE_NAME); | ||
114 | sprintf(class, "%s/%s", XO15_EBOOK_CLASS, XO15_EBOOK_SUBCLASS); | ||
115 | |||
116 | snprintf(button->phys, sizeof(button->phys), "%s/button/input0", hid); | ||
117 | |||
118 | input->name = name; | ||
119 | input->phys = button->phys; | ||
120 | input->id.bustype = BUS_HOST; | ||
121 | input->dev.parent = &device->dev; | ||
122 | |||
123 | input->evbit[0] = BIT_MASK(EV_SW); | ||
124 | set_bit(SW_TABLET_MODE, input->swbit); | ||
125 | |||
126 | error = input_register_device(input); | ||
127 | if (error) | ||
128 | goto err_free_input; | ||
129 | |||
130 | ebook_send_state(device); | ||
131 | |||
132 | if (device->wakeup.flags.valid) { | ||
133 | /* Button's GPE is run-wake GPE */ | ||
134 | acpi_enable_gpe(device->wakeup.gpe_device, | ||
135 | device->wakeup.gpe_number); | ||
136 | device_set_wakeup_enable(&device->dev, true); | ||
137 | } | ||
138 | |||
139 | return 0; | ||
140 | |||
141 | err_free_input: | ||
142 | input_free_device(input); | ||
143 | err_free_button: | ||
144 | kfree(button); | ||
145 | return error; | ||
146 | } | ||
147 | |||
148 | static int ebook_switch_remove(struct acpi_device *device, int type) | ||
149 | { | ||
150 | struct ebook_switch *button = acpi_driver_data(device); | ||
151 | |||
152 | input_unregister_device(button->input); | ||
153 | kfree(button); | ||
154 | return 0; | ||
155 | } | ||
156 | |||
157 | static struct acpi_driver xo15_ebook_driver = { | ||
158 | .name = MODULE_NAME, | ||
159 | .class = XO15_EBOOK_CLASS, | ||
160 | .ids = ebook_device_ids, | ||
161 | .ops = { | ||
162 | .add = ebook_switch_add, | ||
163 | .resume = ebook_switch_resume, | ||
164 | .remove = ebook_switch_remove, | ||
165 | .notify = ebook_switch_notify, | ||
166 | }, | ||
167 | }; | ||
168 | |||
169 | static int __init xo15_ebook_init(void) | ||
170 | { | ||
171 | return acpi_bus_register_driver(&xo15_ebook_driver); | ||
172 | } | ||
173 | |||
174 | static void __exit xo15_ebook_exit(void) | ||
175 | { | ||
176 | acpi_bus_unregister_driver(&xo15_ebook_driver); | ||
177 | } | ||
178 | |||
179 | module_init(xo15_ebook_init); | ||
180 | module_exit(xo15_ebook_exit); | ||
diff --git a/drivers/power/z2_battery.c b/drivers/power/z2_battery.c index 2a9ab89f83b8..e5ced3a4c1ed 100644 --- a/drivers/power/z2_battery.c +++ b/drivers/power/z2_battery.c | |||
@@ -215,8 +215,8 @@ static int __devinit z2_batt_probe(struct i2c_client *client, | |||
215 | if (ret) | 215 | if (ret) |
216 | goto err2; | 216 | goto err2; |
217 | 217 | ||
218 | set_irq_type(gpio_to_irq(info->charge_gpio), | 218 | irq_set_irq_type(gpio_to_irq(info->charge_gpio), |
219 | IRQ_TYPE_EDGE_BOTH); | 219 | IRQ_TYPE_EDGE_BOTH); |
220 | ret = request_irq(gpio_to_irq(info->charge_gpio), | 220 | ret = request_irq(gpio_to_irq(info->charge_gpio), |
221 | z2_charge_switch_irq, IRQF_DISABLED, | 221 | z2_charge_switch_irq, IRQF_DISABLED, |
222 | "AC Detect", charger); | 222 | "AC Detect", charger); |
diff --git a/drivers/rtc/rtc-sh.c b/drivers/rtc/rtc-sh.c index e55dc1ac83ab..6ac55fd48413 100644 --- a/drivers/rtc/rtc-sh.c +++ b/drivers/rtc/rtc-sh.c | |||
@@ -782,11 +782,11 @@ static void sh_rtc_set_irq_wake(struct device *dev, int enabled) | |||
782 | struct platform_device *pdev = to_platform_device(dev); | 782 | struct platform_device *pdev = to_platform_device(dev); |
783 | struct sh_rtc *rtc = platform_get_drvdata(pdev); | 783 | struct sh_rtc *rtc = platform_get_drvdata(pdev); |
784 | 784 | ||
785 | set_irq_wake(rtc->periodic_irq, enabled); | 785 | irq_set_irq_wake(rtc->periodic_irq, enabled); |
786 | 786 | ||
787 | if (rtc->carry_irq > 0) { | 787 | if (rtc->carry_irq > 0) { |
788 | set_irq_wake(rtc->carry_irq, enabled); | 788 | irq_set_irq_wake(rtc->carry_irq, enabled); |
789 | set_irq_wake(rtc->alarm_irq, enabled); | 789 | irq_set_irq_wake(rtc->alarm_irq, enabled); |
790 | } | 790 | } |
791 | } | 791 | } |
792 | 792 | ||
diff --git a/drivers/sh/intc/core.c b/drivers/sh/intc/core.c index 5833afbf08d7..c6ca115c71df 100644 --- a/drivers/sh/intc/core.c +++ b/drivers/sh/intc/core.c | |||
@@ -63,7 +63,7 @@ void intc_set_prio_level(unsigned int irq, unsigned int level) | |||
63 | 63 | ||
64 | static void intc_redirect_irq(unsigned int irq, struct irq_desc *desc) | 64 | static void intc_redirect_irq(unsigned int irq, struct irq_desc *desc) |
65 | { | 65 | { |
66 | generic_handle_irq((unsigned int)get_irq_data(irq)); | 66 | generic_handle_irq((unsigned int)irq_get_handler_data(irq)); |
67 | } | 67 | } |
68 | 68 | ||
69 | static void __init intc_register_irq(struct intc_desc *desc, | 69 | static void __init intc_register_irq(struct intc_desc *desc, |
@@ -116,9 +116,9 @@ static void __init intc_register_irq(struct intc_desc *desc, | |||
116 | irq_data = irq_get_irq_data(irq); | 116 | irq_data = irq_get_irq_data(irq); |
117 | 117 | ||
118 | disable_irq_nosync(irq); | 118 | disable_irq_nosync(irq); |
119 | set_irq_chip_and_handler_name(irq, &d->chip, | 119 | irq_set_chip_and_handler_name(irq, &d->chip, handle_level_irq, |
120 | handle_level_irq, "level"); | 120 | "level"); |
121 | set_irq_chip_data(irq, (void *)data[primary]); | 121 | irq_set_chip_data(irq, (void *)data[primary]); |
122 | 122 | ||
123 | /* | 123 | /* |
124 | * set priority level | 124 | * set priority level |
@@ -340,9 +340,9 @@ int __init register_intc_controller(struct intc_desc *desc) | |||
340 | vect2->enum_id = 0; | 340 | vect2->enum_id = 0; |
341 | 341 | ||
342 | /* redirect this interrupts to the first one */ | 342 | /* redirect this interrupts to the first one */ |
343 | set_irq_chip(irq2, &dummy_irq_chip); | 343 | irq_set_chip(irq2, &dummy_irq_chip); |
344 | set_irq_chained_handler(irq2, intc_redirect_irq); | 344 | irq_set_chained_handler(irq2, intc_redirect_irq); |
345 | set_irq_data(irq2, (void *)irq); | 345 | irq_set_handler_data(irq2, (void *)irq); |
346 | } | 346 | } |
347 | } | 347 | } |
348 | 348 | ||
@@ -387,19 +387,16 @@ static int intc_suspend(void) | |||
387 | /* enable wakeup irqs belonging to this intc controller */ | 387 | /* enable wakeup irqs belonging to this intc controller */ |
388 | for_each_active_irq(irq) { | 388 | for_each_active_irq(irq) { |
389 | struct irq_data *data; | 389 | struct irq_data *data; |
390 | struct irq_desc *desc; | ||
391 | struct irq_chip *chip; | 390 | struct irq_chip *chip; |
392 | 391 | ||
393 | data = irq_get_irq_data(irq); | 392 | data = irq_get_irq_data(irq); |
394 | chip = irq_data_get_irq_chip(data); | 393 | chip = irq_data_get_irq_chip(data); |
395 | if (chip != &d->chip) | 394 | if (chip != &d->chip) |
396 | continue; | 395 | continue; |
397 | desc = irq_to_desc(irq); | 396 | if (irqd_is_wakeup_set(data)) |
398 | if ((desc->status & IRQ_WAKEUP)) | ||
399 | chip->irq_enable(data); | 397 | chip->irq_enable(data); |
400 | } | 398 | } |
401 | } | 399 | } |
402 | |||
403 | return 0; | 400 | return 0; |
404 | } | 401 | } |
405 | 402 | ||
@@ -412,7 +409,6 @@ static void intc_resume(void) | |||
412 | 409 | ||
413 | for_each_active_irq(irq) { | 410 | for_each_active_irq(irq) { |
414 | struct irq_data *data; | 411 | struct irq_data *data; |
415 | struct irq_desc *desc; | ||
416 | struct irq_chip *chip; | 412 | struct irq_chip *chip; |
417 | 413 | ||
418 | data = irq_get_irq_data(irq); | 414 | data = irq_get_irq_data(irq); |
@@ -423,8 +419,7 @@ static void intc_resume(void) | |||
423 | */ | 419 | */ |
424 | if (chip != &d->chip) | 420 | if (chip != &d->chip) |
425 | continue; | 421 | continue; |
426 | desc = irq_to_desc(irq); | 422 | if (irqd_irq_disabled(data)) |
427 | if (desc->status & IRQ_DISABLED) | ||
428 | chip->irq_disable(data); | 423 | chip->irq_disable(data); |
429 | else | 424 | else |
430 | chip->irq_enable(data); | 425 | chip->irq_enable(data); |
diff --git a/drivers/sh/intc/virq.c b/drivers/sh/intc/virq.c index 4e0ff7181164..ce5f81d7cc6b 100644 --- a/drivers/sh/intc/virq.c +++ b/drivers/sh/intc/virq.c | |||
@@ -110,7 +110,7 @@ static void intc_virq_handler(unsigned int irq, struct irq_desc *desc) | |||
110 | { | 110 | { |
111 | struct irq_data *data = irq_get_irq_data(irq); | 111 | struct irq_data *data = irq_get_irq_data(irq); |
112 | struct irq_chip *chip = irq_data_get_irq_chip(data); | 112 | struct irq_chip *chip = irq_data_get_irq_chip(data); |
113 | struct intc_virq_list *entry, *vlist = irq_data_get_irq_data(data); | 113 | struct intc_virq_list *entry, *vlist = irq_data_get_irq_handler_data(data); |
114 | struct intc_desc_int *d = get_intc_desc(irq); | 114 | struct intc_desc_int *d = get_intc_desc(irq); |
115 | 115 | ||
116 | chip->irq_mask_ack(data); | 116 | chip->irq_mask_ack(data); |
@@ -118,7 +118,7 @@ static void intc_virq_handler(unsigned int irq, struct irq_desc *desc) | |||
118 | for_each_virq(entry, vlist) { | 118 | for_each_virq(entry, vlist) { |
119 | unsigned long addr, handle; | 119 | unsigned long addr, handle; |
120 | 120 | ||
121 | handle = (unsigned long)get_irq_data(entry->irq); | 121 | handle = (unsigned long)irq_get_handler_data(entry->irq); |
122 | addr = INTC_REG(d, _INTC_ADDR_E(handle), 0); | 122 | addr = INTC_REG(d, _INTC_ADDR_E(handle), 0); |
123 | 123 | ||
124 | if (intc_reg_fns[_INTC_FN(handle)](addr, handle, 0)) | 124 | if (intc_reg_fns[_INTC_FN(handle)](addr, handle, 0)) |
@@ -229,13 +229,13 @@ restart: | |||
229 | 229 | ||
230 | intc_irq_xlate_set(irq, entry->enum_id, d); | 230 | intc_irq_xlate_set(irq, entry->enum_id, d); |
231 | 231 | ||
232 | set_irq_chip_and_handler_name(irq, get_irq_chip(entry->pirq), | 232 | irq_set_chip_and_handler_name(irq, irq_get_chip(entry->pirq), |
233 | handle_simple_irq, "virq"); | 233 | handle_simple_irq, "virq"); |
234 | set_irq_chip_data(irq, get_irq_chip_data(entry->pirq)); | 234 | irq_set_chip_data(irq, irq_get_chip_data(entry->pirq)); |
235 | 235 | ||
236 | set_irq_data(irq, (void *)entry->handle); | 236 | irq_set_handler_data(irq, (void *)entry->handle); |
237 | 237 | ||
238 | set_irq_chained_handler(entry->pirq, intc_virq_handler); | 238 | irq_set_chained_handler(entry->pirq, intc_virq_handler); |
239 | add_virq_to_pirq(entry->pirq, irq); | 239 | add_virq_to_pirq(entry->pirq, irq); |
240 | 240 | ||
241 | radix_tree_tag_clear(&d->tree, entry->enum_id, | 241 | radix_tree_tag_clear(&d->tree, entry->enum_id, |
diff --git a/drivers/staging/brcm80211/brcmfmac/bcmsdh_linux.c b/drivers/staging/brcm80211/brcmfmac/bcmsdh_linux.c index e3556ff43bb9..ac5bbc8722e5 100644 --- a/drivers/staging/brcm80211/brcmfmac/bcmsdh_linux.c +++ b/drivers/staging/brcm80211/brcmfmac/bcmsdh_linux.c | |||
@@ -341,7 +341,7 @@ int bcmsdh_register_oob_intr(void *dhdp) | |||
341 | if (error) | 341 | if (error) |
342 | return -ENODEV; | 342 | return -ENODEV; |
343 | 343 | ||
344 | set_irq_wake(sdhcinfo->oob_irq, 1); | 344 | irq_set_irq_wake(sdhcinfo->oob_irq, 1); |
345 | sdhcinfo->oob_irq_registered = true; | 345 | sdhcinfo->oob_irq_registered = true; |
346 | } | 346 | } |
347 | 347 | ||
@@ -352,7 +352,7 @@ void bcmsdh_unregister_oob_intr(void) | |||
352 | { | 352 | { |
353 | SDLX_MSG(("%s: Enter\n", __func__)); | 353 | SDLX_MSG(("%s: Enter\n", __func__)); |
354 | 354 | ||
355 | set_irq_wake(sdhcinfo->oob_irq, 0); | 355 | irq_set_irq_wake(sdhcinfo->oob_irq, 0); |
356 | disable_irq(sdhcinfo->oob_irq); /* just in case.. */ | 356 | disable_irq(sdhcinfo->oob_irq); /* just in case.. */ |
357 | free_irq(sdhcinfo->oob_irq, NULL); | 357 | free_irq(sdhcinfo->oob_irq, NULL); |
358 | sdhcinfo->oob_irq_registered = false; | 358 | sdhcinfo->oob_irq_registered = false; |
diff --git a/drivers/staging/westbridge/astoria/arch/arm/mach-omap2/cyashalomap_kernel.c b/drivers/staging/westbridge/astoria/arch/arm/mach-omap2/cyashalomap_kernel.c index ea9b733c3926..21cdb0637beb 100644 --- a/drivers/staging/westbridge/astoria/arch/arm/mach-omap2/cyashalomap_kernel.c +++ b/drivers/staging/westbridge/astoria/arch/arm/mach-omap2/cyashalomap_kernel.c | |||
@@ -597,7 +597,7 @@ static int cy_as_hal_configure_interrupts(void *dev_p) | |||
597 | int result; | 597 | int result; |
598 | int irq_pin = AST_INT; | 598 | int irq_pin = AST_INT; |
599 | 599 | ||
600 | set_irq_type(OMAP_GPIO_IRQ(irq_pin), IRQ_TYPE_LEVEL_LOW); | 600 | irq_set_irq_type(OMAP_GPIO_IRQ(irq_pin), IRQ_TYPE_LEVEL_LOW); |
601 | 601 | ||
602 | /* | 602 | /* |
603 | * for shared IRQS must provide non NULL device ptr | 603 | * for shared IRQS must provide non NULL device ptr |
diff --git a/drivers/tty/hvc/hvc_xen.c b/drivers/tty/hvc/hvc_xen.c index c35f1a73bc8b..52fdf60bdbe2 100644 --- a/drivers/tty/hvc/hvc_xen.c +++ b/drivers/tty/hvc/hvc_xen.c | |||
@@ -178,7 +178,7 @@ static int __init xen_hvc_init(void) | |||
178 | if (xencons_irq < 0) | 178 | if (xencons_irq < 0) |
179 | xencons_irq = 0; /* NO_IRQ */ | 179 | xencons_irq = 0; /* NO_IRQ */ |
180 | else | 180 | else |
181 | set_irq_noprobe(xencons_irq); | 181 | irq_set_noprobe(xencons_irq); |
182 | 182 | ||
183 | hp = hvc_alloc(HVC_COOKIE, xencons_irq, ops, 256); | 183 | hp = hvc_alloc(HVC_COOKIE, xencons_irq, ops, 256); |
184 | if (IS_ERR(hp)) | 184 | if (IS_ERR(hp)) |
diff --git a/drivers/tty/serial/msm_serial_hs.c b/drivers/tty/serial/msm_serial_hs.c index 2e7fc9cee9cc..b906f11f7c1a 100644 --- a/drivers/tty/serial/msm_serial_hs.c +++ b/drivers/tty/serial/msm_serial_hs.c | |||
@@ -1644,7 +1644,7 @@ static int __devinit msm_hs_probe(struct platform_device *pdev) | |||
1644 | if (unlikely(uport->irq < 0)) | 1644 | if (unlikely(uport->irq < 0)) |
1645 | return -ENXIO; | 1645 | return -ENXIO; |
1646 | 1646 | ||
1647 | if (unlikely(set_irq_wake(uport->irq, 1))) | 1647 | if (unlikely(irq_set_irq_wake(uport->irq, 1))) |
1648 | return -ENXIO; | 1648 | return -ENXIO; |
1649 | 1649 | ||
1650 | if (pdata == NULL || pdata->rx_wakeup_irq < 0) | 1650 | if (pdata == NULL || pdata->rx_wakeup_irq < 0) |
@@ -1658,7 +1658,7 @@ static int __devinit msm_hs_probe(struct platform_device *pdev) | |||
1658 | if (unlikely(msm_uport->rx_wakeup.irq < 0)) | 1658 | if (unlikely(msm_uport->rx_wakeup.irq < 0)) |
1659 | return -ENXIO; | 1659 | return -ENXIO; |
1660 | 1660 | ||
1661 | if (unlikely(set_irq_wake(msm_uport->rx_wakeup.irq, 1))) | 1661 | if (unlikely(irq_set_irq_wake(msm_uport->rx_wakeup.irq, 1))) |
1662 | return -ENXIO; | 1662 | return -ENXIO; |
1663 | } | 1663 | } |
1664 | 1664 | ||
diff --git a/drivers/usb/host/oxu210hp-hcd.c b/drivers/usb/host/oxu210hp-hcd.c index 38193f4e980e..44e4deb362e1 100644 --- a/drivers/usb/host/oxu210hp-hcd.c +++ b/drivers/usb/host/oxu210hp-hcd.c | |||
@@ -3832,7 +3832,7 @@ static int oxu_drv_probe(struct platform_device *pdev) | |||
3832 | return -EBUSY; | 3832 | return -EBUSY; |
3833 | } | 3833 | } |
3834 | 3834 | ||
3835 | ret = set_irq_type(irq, IRQF_TRIGGER_FALLING); | 3835 | ret = irq_set_irq_type(irq, IRQF_TRIGGER_FALLING); |
3836 | if (ret) { | 3836 | if (ret) { |
3837 | dev_err(&pdev->dev, "error setting irq type\n"); | 3837 | dev_err(&pdev->dev, "error setting irq type\n"); |
3838 | ret = -EFAULT; | 3838 | ret = -EFAULT; |
diff --git a/drivers/usb/musb/tusb6010.c b/drivers/usb/musb/tusb6010.c index 2ba3b070ed0b..c47aac4a1f98 100644 --- a/drivers/usb/musb/tusb6010.c +++ b/drivers/usb/musb/tusb6010.c | |||
@@ -943,7 +943,7 @@ static void tusb_musb_enable(struct musb *musb) | |||
943 | musb_writel(tbase, TUSB_INT_CTRL_CONF, | 943 | musb_writel(tbase, TUSB_INT_CTRL_CONF, |
944 | TUSB_INT_CTRL_CONF_INT_RELCYC(0)); | 944 | TUSB_INT_CTRL_CONF_INT_RELCYC(0)); |
945 | 945 | ||
946 | set_irq_type(musb->nIrq, IRQ_TYPE_LEVEL_LOW); | 946 | irq_set_irq_type(musb->nIrq, IRQ_TYPE_LEVEL_LOW); |
947 | 947 | ||
948 | /* maybe force into the Default-A OTG state machine */ | 948 | /* maybe force into the Default-A OTG state machine */ |
949 | if (!(musb_readl(tbase, TUSB_DEV_OTG_STAT) | 949 | if (!(musb_readl(tbase, TUSB_DEV_OTG_STAT) |
diff --git a/drivers/vlynq/vlynq.c b/drivers/vlynq/vlynq.c index f885c868a04d..aa250cebecd2 100644 --- a/drivers/vlynq/vlynq.c +++ b/drivers/vlynq/vlynq.c | |||
@@ -135,40 +135,40 @@ static void vlynq_reset(struct vlynq_device *dev) | |||
135 | msleep(5); | 135 | msleep(5); |
136 | } | 136 | } |
137 | 137 | ||
138 | static void vlynq_irq_unmask(unsigned int irq) | 138 | static void vlynq_irq_unmask(struct irq_data *d) |
139 | { | 139 | { |
140 | u32 val; | 140 | struct vlynq_device *dev = irq_data_get_irq_chip_data(d); |
141 | struct vlynq_device *dev = get_irq_chip_data(irq); | ||
142 | int virq; | 141 | int virq; |
142 | u32 val; | ||
143 | 143 | ||
144 | BUG_ON(!dev); | 144 | BUG_ON(!dev); |
145 | virq = irq - dev->irq_start; | 145 | virq = d->irq - dev->irq_start; |
146 | val = readl(&dev->remote->int_device[virq >> 2]); | 146 | val = readl(&dev->remote->int_device[virq >> 2]); |
147 | val |= (VINT_ENABLE | virq) << VINT_OFFSET(virq); | 147 | val |= (VINT_ENABLE | virq) << VINT_OFFSET(virq); |
148 | writel(val, &dev->remote->int_device[virq >> 2]); | 148 | writel(val, &dev->remote->int_device[virq >> 2]); |
149 | } | 149 | } |
150 | 150 | ||
151 | static void vlynq_irq_mask(unsigned int irq) | 151 | static void vlynq_irq_mask(struct irq_data *d) |
152 | { | 152 | { |
153 | u32 val; | 153 | struct vlynq_device *dev = irq_data_get_irq_chip_data(d); |
154 | struct vlynq_device *dev = get_irq_chip_data(irq); | ||
155 | int virq; | 154 | int virq; |
155 | u32 val; | ||
156 | 156 | ||
157 | BUG_ON(!dev); | 157 | BUG_ON(!dev); |
158 | virq = irq - dev->irq_start; | 158 | virq = d->irq - dev->irq_start; |
159 | val = readl(&dev->remote->int_device[virq >> 2]); | 159 | val = readl(&dev->remote->int_device[virq >> 2]); |
160 | val &= ~(VINT_ENABLE << VINT_OFFSET(virq)); | 160 | val &= ~(VINT_ENABLE << VINT_OFFSET(virq)); |
161 | writel(val, &dev->remote->int_device[virq >> 2]); | 161 | writel(val, &dev->remote->int_device[virq >> 2]); |
162 | } | 162 | } |
163 | 163 | ||
164 | static int vlynq_irq_type(unsigned int irq, unsigned int flow_type) | 164 | static int vlynq_irq_type(struct irq_data *d, unsigned int flow_type) |
165 | { | 165 | { |
166 | u32 val; | 166 | struct vlynq_device *dev = irq_data_get_irq_chip_data(d); |
167 | struct vlynq_device *dev = get_irq_chip_data(irq); | ||
168 | int virq; | 167 | int virq; |
168 | u32 val; | ||
169 | 169 | ||
170 | BUG_ON(!dev); | 170 | BUG_ON(!dev); |
171 | virq = irq - dev->irq_start; | 171 | virq = d->irq - dev->irq_start; |
172 | val = readl(&dev->remote->int_device[virq >> 2]); | 172 | val = readl(&dev->remote->int_device[virq >> 2]); |
173 | switch (flow_type & IRQ_TYPE_SENSE_MASK) { | 173 | switch (flow_type & IRQ_TYPE_SENSE_MASK) { |
174 | case IRQ_TYPE_EDGE_RISING: | 174 | case IRQ_TYPE_EDGE_RISING: |
@@ -192,10 +192,9 @@ static int vlynq_irq_type(unsigned int irq, unsigned int flow_type) | |||
192 | return 0; | 192 | return 0; |
193 | } | 193 | } |
194 | 194 | ||
195 | static void vlynq_local_ack(unsigned int irq) | 195 | static void vlynq_local_ack(struct irq_data *d) |
196 | { | 196 | { |
197 | struct vlynq_device *dev = get_irq_chip_data(irq); | 197 | struct vlynq_device *dev = irq_data_get_irq_chip_data(d); |
198 | |||
199 | u32 status = readl(&dev->local->status); | 198 | u32 status = readl(&dev->local->status); |
200 | 199 | ||
201 | pr_debug("%s: local status: 0x%08x\n", | 200 | pr_debug("%s: local status: 0x%08x\n", |
@@ -203,10 +202,9 @@ static void vlynq_local_ack(unsigned int irq) | |||
203 | writel(status, &dev->local->status); | 202 | writel(status, &dev->local->status); |
204 | } | 203 | } |
205 | 204 | ||
206 | static void vlynq_remote_ack(unsigned int irq) | 205 | static void vlynq_remote_ack(struct irq_data *d) |
207 | { | 206 | { |
208 | struct vlynq_device *dev = get_irq_chip_data(irq); | 207 | struct vlynq_device *dev = irq_data_get_irq_chip_data(d); |
209 | |||
210 | u32 status = readl(&dev->remote->status); | 208 | u32 status = readl(&dev->remote->status); |
211 | 209 | ||
212 | pr_debug("%s: remote status: 0x%08x\n", | 210 | pr_debug("%s: remote status: 0x%08x\n", |
@@ -238,23 +236,23 @@ static irqreturn_t vlynq_irq(int irq, void *dev_id) | |||
238 | 236 | ||
239 | static struct irq_chip vlynq_irq_chip = { | 237 | static struct irq_chip vlynq_irq_chip = { |
240 | .name = "vlynq", | 238 | .name = "vlynq", |
241 | .unmask = vlynq_irq_unmask, | 239 | .irq_unmask = vlynq_irq_unmask, |
242 | .mask = vlynq_irq_mask, | 240 | .irq_mask = vlynq_irq_mask, |
243 | .set_type = vlynq_irq_type, | 241 | .irq_set_type = vlynq_irq_type, |
244 | }; | 242 | }; |
245 | 243 | ||
246 | static struct irq_chip vlynq_local_chip = { | 244 | static struct irq_chip vlynq_local_chip = { |
247 | .name = "vlynq local error", | 245 | .name = "vlynq local error", |
248 | .unmask = vlynq_irq_unmask, | 246 | .irq_unmask = vlynq_irq_unmask, |
249 | .mask = vlynq_irq_mask, | 247 | .irq_mask = vlynq_irq_mask, |
250 | .ack = vlynq_local_ack, | 248 | .irq_ack = vlynq_local_ack, |
251 | }; | 249 | }; |
252 | 250 | ||
253 | static struct irq_chip vlynq_remote_chip = { | 251 | static struct irq_chip vlynq_remote_chip = { |
254 | .name = "vlynq local error", | 252 | .name = "vlynq local error", |
255 | .unmask = vlynq_irq_unmask, | 253 | .irq_unmask = vlynq_irq_unmask, |
256 | .mask = vlynq_irq_mask, | 254 | .irq_mask = vlynq_irq_mask, |
257 | .ack = vlynq_remote_ack, | 255 | .irq_ack = vlynq_remote_ack, |
258 | }; | 256 | }; |
259 | 257 | ||
260 | static int vlynq_setup_irq(struct vlynq_device *dev) | 258 | static int vlynq_setup_irq(struct vlynq_device *dev) |
@@ -291,17 +289,17 @@ static int vlynq_setup_irq(struct vlynq_device *dev) | |||
291 | for (i = dev->irq_start; i <= dev->irq_end; i++) { | 289 | for (i = dev->irq_start; i <= dev->irq_end; i++) { |
292 | virq = i - dev->irq_start; | 290 | virq = i - dev->irq_start; |
293 | if (virq == dev->local_irq) { | 291 | if (virq == dev->local_irq) { |
294 | set_irq_chip_and_handler(i, &vlynq_local_chip, | 292 | irq_set_chip_and_handler(i, &vlynq_local_chip, |
295 | handle_level_irq); | 293 | handle_level_irq); |
296 | set_irq_chip_data(i, dev); | 294 | irq_set_chip_data(i, dev); |
297 | } else if (virq == dev->remote_irq) { | 295 | } else if (virq == dev->remote_irq) { |
298 | set_irq_chip_and_handler(i, &vlynq_remote_chip, | 296 | irq_set_chip_and_handler(i, &vlynq_remote_chip, |
299 | handle_level_irq); | 297 | handle_level_irq); |
300 | set_irq_chip_data(i, dev); | 298 | irq_set_chip_data(i, dev); |
301 | } else { | 299 | } else { |
302 | set_irq_chip_and_handler(i, &vlynq_irq_chip, | 300 | irq_set_chip_and_handler(i, &vlynq_irq_chip, |
303 | handle_simple_irq); | 301 | handle_simple_irq); |
304 | set_irq_chip_data(i, dev); | 302 | irq_set_chip_data(i, dev); |
305 | writel(0, &dev->remote->int_device[virq >> 2]); | 303 | writel(0, &dev->remote->int_device[virq >> 2]); |
306 | } | 304 | } |
307 | } | 305 | } |
diff --git a/drivers/w1/masters/ds1wm.c b/drivers/w1/masters/ds1wm.c index 95921b77cf86..2f4fa02744a5 100644 --- a/drivers/w1/masters/ds1wm.c +++ b/drivers/w1/masters/ds1wm.c | |||
@@ -368,9 +368,9 @@ static int ds1wm_probe(struct platform_device *pdev) | |||
368 | ds1wm_data->active_high = plat->active_high; | 368 | ds1wm_data->active_high = plat->active_high; |
369 | 369 | ||
370 | if (res->flags & IORESOURCE_IRQ_HIGHEDGE) | 370 | if (res->flags & IORESOURCE_IRQ_HIGHEDGE) |
371 | set_irq_type(ds1wm_data->irq, IRQ_TYPE_EDGE_RISING); | 371 | irq_set_irq_type(ds1wm_data->irq, IRQ_TYPE_EDGE_RISING); |
372 | if (res->flags & IORESOURCE_IRQ_LOWEDGE) | 372 | if (res->flags & IORESOURCE_IRQ_LOWEDGE) |
373 | set_irq_type(ds1wm_data->irq, IRQ_TYPE_EDGE_FALLING); | 373 | irq_set_irq_type(ds1wm_data->irq, IRQ_TYPE_EDGE_FALLING); |
374 | 374 | ||
375 | ret = request_irq(ds1wm_data->irq, ds1wm_isr, IRQF_DISABLED, | 375 | ret = request_irq(ds1wm_data->irq, ds1wm_isr, IRQF_DISABLED, |
376 | "ds1wm", ds1wm_data); | 376 | "ds1wm", ds1wm_data); |
diff --git a/drivers/watchdog/davinci_wdt.c b/drivers/watchdog/davinci_wdt.c index 596ba604e78d..51b5551b4e3f 100644 --- a/drivers/watchdog/davinci_wdt.c +++ b/drivers/watchdog/davinci_wdt.c | |||
@@ -202,7 +202,6 @@ static struct miscdevice davinci_wdt_miscdev = { | |||
202 | static int __devinit davinci_wdt_probe(struct platform_device *pdev) | 202 | static int __devinit davinci_wdt_probe(struct platform_device *pdev) |
203 | { | 203 | { |
204 | int ret = 0, size; | 204 | int ret = 0, size; |
205 | struct resource *res; | ||
206 | struct device *dev = &pdev->dev; | 205 | struct device *dev = &pdev->dev; |
207 | 206 | ||
208 | wdt_clk = clk_get(dev, NULL); | 207 | wdt_clk = clk_get(dev, NULL); |
@@ -216,31 +215,31 @@ static int __devinit davinci_wdt_probe(struct platform_device *pdev) | |||
216 | 215 | ||
217 | dev_info(dev, "heartbeat %d sec\n", heartbeat); | 216 | dev_info(dev, "heartbeat %d sec\n", heartbeat); |
218 | 217 | ||
219 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 218 | wdt_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
220 | if (res == NULL) { | 219 | if (wdt_mem == NULL) { |
221 | dev_err(dev, "failed to get memory region resource\n"); | 220 | dev_err(dev, "failed to get memory region resource\n"); |
222 | return -ENOENT; | 221 | return -ENOENT; |
223 | } | 222 | } |
224 | 223 | ||
225 | size = resource_size(res); | 224 | size = resource_size(wdt_mem); |
226 | wdt_mem = request_mem_region(res->start, size, pdev->name); | 225 | if (!request_mem_region(wdt_mem->start, size, pdev->name)) { |
227 | |||
228 | if (wdt_mem == NULL) { | ||
229 | dev_err(dev, "failed to get memory region\n"); | 226 | dev_err(dev, "failed to get memory region\n"); |
230 | return -ENOENT; | 227 | return -ENOENT; |
231 | } | 228 | } |
232 | 229 | ||
233 | wdt_base = ioremap(res->start, size); | 230 | wdt_base = ioremap(wdt_mem->start, size); |
234 | if (!wdt_base) { | 231 | if (!wdt_base) { |
235 | dev_err(dev, "failed to map memory region\n"); | 232 | dev_err(dev, "failed to map memory region\n"); |
233 | release_mem_region(wdt_mem->start, size); | ||
234 | wdt_mem = NULL; | ||
236 | return -ENOMEM; | 235 | return -ENOMEM; |
237 | } | 236 | } |
238 | 237 | ||
239 | ret = misc_register(&davinci_wdt_miscdev); | 238 | ret = misc_register(&davinci_wdt_miscdev); |
240 | if (ret < 0) { | 239 | if (ret < 0) { |
241 | dev_err(dev, "cannot register misc device\n"); | 240 | dev_err(dev, "cannot register misc device\n"); |
242 | release_resource(wdt_mem); | 241 | release_mem_region(wdt_mem->start, size); |
243 | kfree(wdt_mem); | 242 | wdt_mem = NULL; |
244 | } else { | 243 | } else { |
245 | set_bit(WDT_DEVICE_INITED, &wdt_status); | 244 | set_bit(WDT_DEVICE_INITED, &wdt_status); |
246 | } | 245 | } |
@@ -253,8 +252,7 @@ static int __devexit davinci_wdt_remove(struct platform_device *pdev) | |||
253 | { | 252 | { |
254 | misc_deregister(&davinci_wdt_miscdev); | 253 | misc_deregister(&davinci_wdt_miscdev); |
255 | if (wdt_mem) { | 254 | if (wdt_mem) { |
256 | release_resource(wdt_mem); | 255 | release_mem_region(wdt_mem->start, resource_size(wdt_mem)); |
257 | kfree(wdt_mem); | ||
258 | wdt_mem = NULL; | 256 | wdt_mem = NULL; |
259 | } | 257 | } |
260 | 258 | ||
diff --git a/drivers/watchdog/max63xx_wdt.c b/drivers/watchdog/max63xx_wdt.c index 7a82ce5a6337..73ba2fd8e591 100644 --- a/drivers/watchdog/max63xx_wdt.c +++ b/drivers/watchdog/max63xx_wdt.c | |||
@@ -270,7 +270,6 @@ static int __devinit max63xx_wdt_probe(struct platform_device *pdev) | |||
270 | { | 270 | { |
271 | int ret = 0; | 271 | int ret = 0; |
272 | int size; | 272 | int size; |
273 | struct resource *res; | ||
274 | struct device *dev = &pdev->dev; | 273 | struct device *dev = &pdev->dev; |
275 | struct max63xx_timeout *table; | 274 | struct max63xx_timeout *table; |
276 | 275 | ||
@@ -294,21 +293,19 @@ static int __devinit max63xx_wdt_probe(struct platform_device *pdev) | |||
294 | 293 | ||
295 | max63xx_pdev = pdev; | 294 | max63xx_pdev = pdev; |
296 | 295 | ||
297 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 296 | wdt_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
298 | if (res == NULL) { | 297 | if (wdt_mem == NULL) { |
299 | dev_err(dev, "failed to get memory region resource\n"); | 298 | dev_err(dev, "failed to get memory region resource\n"); |
300 | return -ENOENT; | 299 | return -ENOENT; |
301 | } | 300 | } |
302 | 301 | ||
303 | size = resource_size(res); | 302 | size = resource_size(wdt_mem); |
304 | wdt_mem = request_mem_region(res->start, size, pdev->name); | 303 | if (!request_mem_region(wdt_mem->start, size, pdev->name)) { |
305 | |||
306 | if (wdt_mem == NULL) { | ||
307 | dev_err(dev, "failed to get memory region\n"); | 304 | dev_err(dev, "failed to get memory region\n"); |
308 | return -ENOENT; | 305 | return -ENOENT; |
309 | } | 306 | } |
310 | 307 | ||
311 | wdt_base = ioremap(res->start, size); | 308 | wdt_base = ioremap(wdt_mem->start, size); |
312 | if (!wdt_base) { | 309 | if (!wdt_base) { |
313 | dev_err(dev, "failed to map memory region\n"); | 310 | dev_err(dev, "failed to map memory region\n"); |
314 | ret = -ENOMEM; | 311 | ret = -ENOMEM; |
@@ -326,8 +323,8 @@ static int __devinit max63xx_wdt_probe(struct platform_device *pdev) | |||
326 | out_unmap: | 323 | out_unmap: |
327 | iounmap(wdt_base); | 324 | iounmap(wdt_base); |
328 | out_request: | 325 | out_request: |
329 | release_resource(wdt_mem); | 326 | release_mem_region(wdt_mem->start, size); |
330 | kfree(wdt_mem); | 327 | wdt_mem = NULL; |
331 | 328 | ||
332 | return ret; | 329 | return ret; |
333 | } | 330 | } |
@@ -336,8 +333,7 @@ static int __devexit max63xx_wdt_remove(struct platform_device *pdev) | |||
336 | { | 333 | { |
337 | misc_deregister(&max63xx_wdt_miscdev); | 334 | misc_deregister(&max63xx_wdt_miscdev); |
338 | if (wdt_mem) { | 335 | if (wdt_mem) { |
339 | release_resource(wdt_mem); | 336 | release_mem_region(wdt_mem->start, resource_size(wdt_mem)); |
340 | kfree(wdt_mem); | ||
341 | wdt_mem = NULL; | 337 | wdt_mem = NULL; |
342 | } | 338 | } |
343 | 339 | ||
diff --git a/drivers/watchdog/nv_tco.c b/drivers/watchdog/nv_tco.c index 267377a5a83e..afa78a54711e 100644 --- a/drivers/watchdog/nv_tco.c +++ b/drivers/watchdog/nv_tco.c | |||
@@ -302,7 +302,7 @@ MODULE_DEVICE_TABLE(pci, tco_pci_tbl); | |||
302 | * Init & exit routines | 302 | * Init & exit routines |
303 | */ | 303 | */ |
304 | 304 | ||
305 | static unsigned char __init nv_tco_getdevice(void) | 305 | static unsigned char __devinit nv_tco_getdevice(void) |
306 | { | 306 | { |
307 | struct pci_dev *dev = NULL; | 307 | struct pci_dev *dev = NULL; |
308 | u32 val; | 308 | u32 val; |
diff --git a/drivers/watchdog/pnx4008_wdt.c b/drivers/watchdog/pnx4008_wdt.c index c7cf4cbf8ab3..614933225560 100644 --- a/drivers/watchdog/pnx4008_wdt.c +++ b/drivers/watchdog/pnx4008_wdt.c | |||
@@ -254,7 +254,6 @@ static struct miscdevice pnx4008_wdt_miscdev = { | |||
254 | static int __devinit pnx4008_wdt_probe(struct platform_device *pdev) | 254 | static int __devinit pnx4008_wdt_probe(struct platform_device *pdev) |
255 | { | 255 | { |
256 | int ret = 0, size; | 256 | int ret = 0, size; |
257 | struct resource *res; | ||
258 | 257 | ||
259 | if (heartbeat < 1 || heartbeat > MAX_HEARTBEAT) | 258 | if (heartbeat < 1 || heartbeat > MAX_HEARTBEAT) |
260 | heartbeat = DEFAULT_HEARTBEAT; | 259 | heartbeat = DEFAULT_HEARTBEAT; |
@@ -262,42 +261,42 @@ static int __devinit pnx4008_wdt_probe(struct platform_device *pdev) | |||
262 | printk(KERN_INFO MODULE_NAME | 261 | printk(KERN_INFO MODULE_NAME |
263 | "PNX4008 Watchdog Timer: heartbeat %d sec\n", heartbeat); | 262 | "PNX4008 Watchdog Timer: heartbeat %d sec\n", heartbeat); |
264 | 263 | ||
265 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 264 | wdt_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
266 | if (res == NULL) { | 265 | if (wdt_mem == NULL) { |
267 | printk(KERN_INFO MODULE_NAME | 266 | printk(KERN_INFO MODULE_NAME |
268 | "failed to get memory region resouce\n"); | 267 | "failed to get memory region resouce\n"); |
269 | return -ENOENT; | 268 | return -ENOENT; |
270 | } | 269 | } |
271 | 270 | ||
272 | size = resource_size(res); | 271 | size = resource_size(wdt_mem); |
273 | wdt_mem = request_mem_region(res->start, size, pdev->name); | ||
274 | 272 | ||
275 | if (wdt_mem == NULL) { | 273 | if (!request_mem_region(wdt_mem->start, size, pdev->name)) { |
276 | printk(KERN_INFO MODULE_NAME "failed to get memory region\n"); | 274 | printk(KERN_INFO MODULE_NAME "failed to get memory region\n"); |
277 | return -ENOENT; | 275 | return -ENOENT; |
278 | } | 276 | } |
279 | wdt_base = (void __iomem *)IO_ADDRESS(res->start); | 277 | wdt_base = (void __iomem *)IO_ADDRESS(wdt_mem->start); |
280 | 278 | ||
281 | wdt_clk = clk_get(&pdev->dev, NULL); | 279 | wdt_clk = clk_get(&pdev->dev, NULL); |
282 | if (IS_ERR(wdt_clk)) { | 280 | if (IS_ERR(wdt_clk)) { |
283 | ret = PTR_ERR(wdt_clk); | 281 | ret = PTR_ERR(wdt_clk); |
284 | release_resource(wdt_mem); | 282 | release_mem_region(wdt_mem->start, size); |
285 | kfree(wdt_mem); | 283 | wdt_mem = NULL; |
286 | goto out; | 284 | goto out; |
287 | } | 285 | } |
288 | 286 | ||
289 | ret = clk_enable(wdt_clk); | 287 | ret = clk_enable(wdt_clk); |
290 | if (ret) { | 288 | if (ret) { |
291 | release_resource(wdt_mem); | 289 | release_mem_region(wdt_mem->start, size); |
292 | kfree(wdt_mem); | 290 | wdt_mem = NULL; |
291 | clk_put(wdt_clk); | ||
293 | goto out; | 292 | goto out; |
294 | } | 293 | } |
295 | 294 | ||
296 | ret = misc_register(&pnx4008_wdt_miscdev); | 295 | ret = misc_register(&pnx4008_wdt_miscdev); |
297 | if (ret < 0) { | 296 | if (ret < 0) { |
298 | printk(KERN_ERR MODULE_NAME "cannot register misc device\n"); | 297 | printk(KERN_ERR MODULE_NAME "cannot register misc device\n"); |
299 | release_resource(wdt_mem); | 298 | release_mem_region(wdt_mem->start, size); |
300 | kfree(wdt_mem); | 299 | wdt_mem = NULL; |
301 | clk_disable(wdt_clk); | 300 | clk_disable(wdt_clk); |
302 | clk_put(wdt_clk); | 301 | clk_put(wdt_clk); |
303 | } else { | 302 | } else { |
@@ -320,8 +319,7 @@ static int __devexit pnx4008_wdt_remove(struct platform_device *pdev) | |||
320 | clk_put(wdt_clk); | 319 | clk_put(wdt_clk); |
321 | 320 | ||
322 | if (wdt_mem) { | 321 | if (wdt_mem) { |
323 | release_resource(wdt_mem); | 322 | release_mem_region(wdt_mem->start, resource_size(wdt_mem)); |
324 | kfree(wdt_mem); | ||
325 | wdt_mem = NULL; | 323 | wdt_mem = NULL; |
326 | } | 324 | } |
327 | return 0; | 325 | return 0; |
diff --git a/drivers/watchdog/s3c2410_wdt.c b/drivers/watchdog/s3c2410_wdt.c index 25b39bf35925..f7f5aa00df60 100644 --- a/drivers/watchdog/s3c2410_wdt.c +++ b/drivers/watchdog/s3c2410_wdt.c | |||
@@ -402,7 +402,6 @@ static inline void s3c2410wdt_cpufreq_deregister(void) | |||
402 | 402 | ||
403 | static int __devinit s3c2410wdt_probe(struct platform_device *pdev) | 403 | static int __devinit s3c2410wdt_probe(struct platform_device *pdev) |
404 | { | 404 | { |
405 | struct resource *res; | ||
406 | struct device *dev; | 405 | struct device *dev; |
407 | unsigned int wtcon; | 406 | unsigned int wtcon; |
408 | int started = 0; | 407 | int started = 0; |
@@ -416,20 +415,19 @@ static int __devinit s3c2410wdt_probe(struct platform_device *pdev) | |||
416 | 415 | ||
417 | /* get the memory region for the watchdog timer */ | 416 | /* get the memory region for the watchdog timer */ |
418 | 417 | ||
419 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 418 | wdt_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
420 | if (res == NULL) { | 419 | if (wdt_mem == NULL) { |
421 | dev_err(dev, "no memory resource specified\n"); | 420 | dev_err(dev, "no memory resource specified\n"); |
422 | return -ENOENT; | 421 | return -ENOENT; |
423 | } | 422 | } |
424 | 423 | ||
425 | size = resource_size(res); | 424 | size = resource_size(wdt_mem); |
426 | wdt_mem = request_mem_region(res->start, size, pdev->name); | 425 | if (!request_mem_region(wdt_mem->start, size, pdev->name)) { |
427 | if (wdt_mem == NULL) { | ||
428 | dev_err(dev, "failed to get memory region\n"); | 426 | dev_err(dev, "failed to get memory region\n"); |
429 | return -EBUSY; | 427 | return -EBUSY; |
430 | } | 428 | } |
431 | 429 | ||
432 | wdt_base = ioremap(res->start, size); | 430 | wdt_base = ioremap(wdt_mem->start, size); |
433 | if (wdt_base == NULL) { | 431 | if (wdt_base == NULL) { |
434 | dev_err(dev, "failed to ioremap() region\n"); | 432 | dev_err(dev, "failed to ioremap() region\n"); |
435 | ret = -EINVAL; | 433 | ret = -EINVAL; |
@@ -524,8 +522,8 @@ static int __devinit s3c2410wdt_probe(struct platform_device *pdev) | |||
524 | iounmap(wdt_base); | 522 | iounmap(wdt_base); |
525 | 523 | ||
526 | err_req: | 524 | err_req: |
527 | release_resource(wdt_mem); | 525 | release_mem_region(wdt_mem->start, size); |
528 | kfree(wdt_mem); | 526 | wdt_mem = NULL; |
529 | 527 | ||
530 | return ret; | 528 | return ret; |
531 | } | 529 | } |
@@ -545,8 +543,7 @@ static int __devexit s3c2410wdt_remove(struct platform_device *dev) | |||
545 | 543 | ||
546 | iounmap(wdt_base); | 544 | iounmap(wdt_base); |
547 | 545 | ||
548 | release_resource(wdt_mem); | 546 | release_mem_region(wdt_mem->start, resource_size(wdt_mem)); |
549 | kfree(wdt_mem); | ||
550 | wdt_mem = NULL; | 547 | wdt_mem = NULL; |
551 | return 0; | 548 | return 0; |
552 | } | 549 | } |
diff --git a/drivers/watchdog/softdog.c b/drivers/watchdog/softdog.c index 100b114e3c3c..bf16ffb4d21e 100644 --- a/drivers/watchdog/softdog.c +++ b/drivers/watchdog/softdog.c | |||
@@ -48,6 +48,7 @@ | |||
48 | #include <linux/init.h> | 48 | #include <linux/init.h> |
49 | #include <linux/jiffies.h> | 49 | #include <linux/jiffies.h> |
50 | #include <linux/uaccess.h> | 50 | #include <linux/uaccess.h> |
51 | #include <linux/kernel.h> | ||
51 | 52 | ||
52 | #define PFX "SoftDog: " | 53 | #define PFX "SoftDog: " |
53 | 54 | ||
@@ -75,6 +76,11 @@ MODULE_PARM_DESC(soft_noboot, | |||
75 | "Softdog action, set to 1 to ignore reboots, 0 to reboot " | 76 | "Softdog action, set to 1 to ignore reboots, 0 to reboot " |
76 | "(default depends on ONLY_TESTING)"); | 77 | "(default depends on ONLY_TESTING)"); |
77 | 78 | ||
79 | static int soft_panic; | ||
80 | module_param(soft_panic, int, 0); | ||
81 | MODULE_PARM_DESC(soft_panic, | ||
82 | "Softdog action, set to 1 to panic, 0 to reboot (default=0)"); | ||
83 | |||
78 | /* | 84 | /* |
79 | * Our timer | 85 | * Our timer |
80 | */ | 86 | */ |
@@ -98,7 +104,10 @@ static void watchdog_fire(unsigned long data) | |||
98 | 104 | ||
99 | if (soft_noboot) | 105 | if (soft_noboot) |
100 | printk(KERN_CRIT PFX "Triggered - Reboot ignored.\n"); | 106 | printk(KERN_CRIT PFX "Triggered - Reboot ignored.\n"); |
101 | else { | 107 | else if (soft_panic) { |
108 | printk(KERN_CRIT PFX "Initiating panic.\n"); | ||
109 | panic("Software Watchdog Timer expired."); | ||
110 | } else { | ||
102 | printk(KERN_CRIT PFX "Initiating system reboot.\n"); | 111 | printk(KERN_CRIT PFX "Initiating system reboot.\n"); |
103 | emergency_restart(); | 112 | emergency_restart(); |
104 | printk(KERN_CRIT PFX "Reboot didn't ?????\n"); | 113 | printk(KERN_CRIT PFX "Reboot didn't ?????\n"); |
@@ -267,7 +276,8 @@ static struct notifier_block softdog_notifier = { | |||
267 | }; | 276 | }; |
268 | 277 | ||
269 | static char banner[] __initdata = KERN_INFO "Software Watchdog Timer: 0.07 " | 278 | static char banner[] __initdata = KERN_INFO "Software Watchdog Timer: 0.07 " |
270 | "initialized. soft_noboot=%d soft_margin=%d sec (nowayout= %d)\n"; | 279 | "initialized. soft_noboot=%d soft_margin=%d sec soft_panic=%d " |
280 | "(nowayout= %d)\n"; | ||
271 | 281 | ||
272 | static int __init watchdog_init(void) | 282 | static int __init watchdog_init(void) |
273 | { | 283 | { |
@@ -298,7 +308,7 @@ static int __init watchdog_init(void) | |||
298 | return ret; | 308 | return ret; |
299 | } | 309 | } |
300 | 310 | ||
301 | printk(banner, soft_noboot, soft_margin, nowayout); | 311 | printk(banner, soft_noboot, soft_margin, soft_panic, nowayout); |
302 | 312 | ||
303 | return 0; | 313 | return 0; |
304 | } | 314 | } |
diff --git a/drivers/watchdog/sp5100_tco.c b/drivers/watchdog/sp5100_tco.c index 1bc493848ed4..87e0527669d8 100644 --- a/drivers/watchdog/sp5100_tco.c +++ b/drivers/watchdog/sp5100_tco.c | |||
@@ -42,6 +42,7 @@ | |||
42 | #define PFX TCO_MODULE_NAME ": " | 42 | #define PFX TCO_MODULE_NAME ": " |
43 | 43 | ||
44 | /* internal variables */ | 44 | /* internal variables */ |
45 | static u32 tcobase_phys; | ||
45 | static void __iomem *tcobase; | 46 | static void __iomem *tcobase; |
46 | static unsigned int pm_iobase; | 47 | static unsigned int pm_iobase; |
47 | static DEFINE_SPINLOCK(tco_lock); /* Guards the hardware */ | 48 | static DEFINE_SPINLOCK(tco_lock); /* Guards the hardware */ |
@@ -305,10 +306,18 @@ static unsigned char __devinit sp5100_tco_setupdevice(void) | |||
305 | /* Low three bits of BASE0 are reserved. */ | 306 | /* Low three bits of BASE0 are reserved. */ |
306 | val = val << 8 | (inb(SP5100_IO_PM_DATA_REG) & 0xf8); | 307 | val = val << 8 | (inb(SP5100_IO_PM_DATA_REG) & 0xf8); |
307 | 308 | ||
309 | if (!request_mem_region_exclusive(val, SP5100_WDT_MEM_MAP_SIZE, | ||
310 | "SP5100 TCO")) { | ||
311 | printk(KERN_ERR PFX "mmio address 0x%04x already in use\n", | ||
312 | val); | ||
313 | goto unreg_region; | ||
314 | } | ||
315 | tcobase_phys = val; | ||
316 | |||
308 | tcobase = ioremap(val, SP5100_WDT_MEM_MAP_SIZE); | 317 | tcobase = ioremap(val, SP5100_WDT_MEM_MAP_SIZE); |
309 | if (tcobase == 0) { | 318 | if (tcobase == 0) { |
310 | printk(KERN_ERR PFX "failed to get tcobase address\n"); | 319 | printk(KERN_ERR PFX "failed to get tcobase address\n"); |
311 | goto unreg_region; | 320 | goto unreg_mem_region; |
312 | } | 321 | } |
313 | 322 | ||
314 | /* Enable watchdog decode bit */ | 323 | /* Enable watchdog decode bit */ |
@@ -346,7 +355,8 @@ static unsigned char __devinit sp5100_tco_setupdevice(void) | |||
346 | /* Done */ | 355 | /* Done */ |
347 | return 1; | 356 | return 1; |
348 | 357 | ||
349 | iounmap(tcobase); | 358 | unreg_mem_region: |
359 | release_mem_region(tcobase_phys, SP5100_WDT_MEM_MAP_SIZE); | ||
350 | unreg_region: | 360 | unreg_region: |
351 | release_region(pm_iobase, SP5100_PM_IOPORTS_SIZE); | 361 | release_region(pm_iobase, SP5100_PM_IOPORTS_SIZE); |
352 | exit: | 362 | exit: |
@@ -401,6 +411,7 @@ static int __devinit sp5100_tco_init(struct platform_device *dev) | |||
401 | 411 | ||
402 | exit: | 412 | exit: |
403 | iounmap(tcobase); | 413 | iounmap(tcobase); |
414 | release_mem_region(tcobase_phys, SP5100_WDT_MEM_MAP_SIZE); | ||
404 | release_region(pm_iobase, SP5100_PM_IOPORTS_SIZE); | 415 | release_region(pm_iobase, SP5100_PM_IOPORTS_SIZE); |
405 | return ret; | 416 | return ret; |
406 | } | 417 | } |
@@ -414,6 +425,7 @@ static void __devexit sp5100_tco_cleanup(void) | |||
414 | /* Deregister */ | 425 | /* Deregister */ |
415 | misc_deregister(&sp5100_tco_miscdev); | 426 | misc_deregister(&sp5100_tco_miscdev); |
416 | iounmap(tcobase); | 427 | iounmap(tcobase); |
428 | release_mem_region(tcobase_phys, SP5100_WDT_MEM_MAP_SIZE); | ||
417 | release_region(pm_iobase, SP5100_PM_IOPORTS_SIZE); | 429 | release_region(pm_iobase, SP5100_PM_IOPORTS_SIZE); |
418 | } | 430 | } |
419 | 431 | ||
diff --git a/drivers/xen/events.c b/drivers/xen/events.c index 02b5a9c05cfa..036343ba204e 100644 --- a/drivers/xen/events.c +++ b/drivers/xen/events.c | |||
@@ -122,7 +122,7 @@ static struct irq_chip xen_pirq_chip; | |||
122 | /* Get info for IRQ */ | 122 | /* Get info for IRQ */ |
123 | static struct irq_info *info_for_irq(unsigned irq) | 123 | static struct irq_info *info_for_irq(unsigned irq) |
124 | { | 124 | { |
125 | return get_irq_data(irq); | 125 | return irq_get_handler_data(irq); |
126 | } | 126 | } |
127 | 127 | ||
128 | /* Constructors for packed IRQ information. */ | 128 | /* Constructors for packed IRQ information. */ |
@@ -403,7 +403,7 @@ static void xen_irq_init(unsigned irq) | |||
403 | 403 | ||
404 | info->type = IRQT_UNBOUND; | 404 | info->type = IRQT_UNBOUND; |
405 | 405 | ||
406 | set_irq_data(irq, info); | 406 | irq_set_handler_data(irq, info); |
407 | 407 | ||
408 | list_add_tail(&info->list, &xen_irq_list_head); | 408 | list_add_tail(&info->list, &xen_irq_list_head); |
409 | } | 409 | } |
@@ -458,11 +458,11 @@ static int __must_check xen_allocate_irq_gsi(unsigned gsi) | |||
458 | 458 | ||
459 | static void xen_free_irq(unsigned irq) | 459 | static void xen_free_irq(unsigned irq) |
460 | { | 460 | { |
461 | struct irq_info *info = get_irq_data(irq); | 461 | struct irq_info *info = irq_get_handler_data(irq); |
462 | 462 | ||
463 | list_del(&info->list); | 463 | list_del(&info->list); |
464 | 464 | ||
465 | set_irq_data(irq, NULL); | 465 | irq_set_handler_data(irq, NULL); |
466 | 466 | ||
467 | kfree(info); | 467 | kfree(info); |
468 | 468 | ||
@@ -585,7 +585,7 @@ static void ack_pirq(struct irq_data *data) | |||
585 | { | 585 | { |
586 | int evtchn = evtchn_from_irq(data->irq); | 586 | int evtchn = evtchn_from_irq(data->irq); |
587 | 587 | ||
588 | move_native_irq(data->irq); | 588 | irq_move_irq(data); |
589 | 589 | ||
590 | if (VALID_EVTCHN(evtchn)) { | 590 | if (VALID_EVTCHN(evtchn)) { |
591 | mask_evtchn(evtchn); | 591 | mask_evtchn(evtchn); |
@@ -639,8 +639,8 @@ int xen_bind_pirq_gsi_to_irq(unsigned gsi, | |||
639 | if (irq < 0) | 639 | if (irq < 0) |
640 | goto out; | 640 | goto out; |
641 | 641 | ||
642 | set_irq_chip_and_handler_name(irq, &xen_pirq_chip, | 642 | irq_set_chip_and_handler_name(irq, &xen_pirq_chip, handle_level_irq, |
643 | handle_level_irq, name); | 643 | name); |
644 | 644 | ||
645 | irq_op.irq = irq; | 645 | irq_op.irq = irq; |
646 | irq_op.vector = 0; | 646 | irq_op.vector = 0; |
@@ -690,8 +690,8 @@ int xen_bind_pirq_msi_to_irq(struct pci_dev *dev, struct msi_desc *msidesc, | |||
690 | if (irq == -1) | 690 | if (irq == -1) |
691 | goto out; | 691 | goto out; |
692 | 692 | ||
693 | set_irq_chip_and_handler_name(irq, &xen_pirq_chip, | 693 | irq_set_chip_and_handler_name(irq, &xen_pirq_chip, handle_level_irq, |
694 | handle_level_irq, name); | 694 | name); |
695 | 695 | ||
696 | xen_irq_info_pirq_init(irq, 0, pirq, 0, vector, 0); | 696 | xen_irq_info_pirq_init(irq, 0, pirq, 0, vector, 0); |
697 | ret = irq_set_msi_desc(irq, msidesc); | 697 | ret = irq_set_msi_desc(irq, msidesc); |
@@ -772,7 +772,7 @@ int bind_evtchn_to_irq(unsigned int evtchn) | |||
772 | if (irq == -1) | 772 | if (irq == -1) |
773 | goto out; | 773 | goto out; |
774 | 774 | ||
775 | set_irq_chip_and_handler_name(irq, &xen_dynamic_chip, | 775 | irq_set_chip_and_handler_name(irq, &xen_dynamic_chip, |
776 | handle_fasteoi_irq, "event"); | 776 | handle_fasteoi_irq, "event"); |
777 | 777 | ||
778 | xen_irq_info_evtchn_init(irq, evtchn); | 778 | xen_irq_info_evtchn_init(irq, evtchn); |
@@ -799,7 +799,7 @@ static int bind_ipi_to_irq(unsigned int ipi, unsigned int cpu) | |||
799 | if (irq < 0) | 799 | if (irq < 0) |
800 | goto out; | 800 | goto out; |
801 | 801 | ||
802 | set_irq_chip_and_handler_name(irq, &xen_percpu_chip, | 802 | irq_set_chip_and_handler_name(irq, &xen_percpu_chip, |
803 | handle_percpu_irq, "ipi"); | 803 | handle_percpu_irq, "ipi"); |
804 | 804 | ||
805 | bind_ipi.vcpu = cpu; | 805 | bind_ipi.vcpu = cpu; |
@@ -848,7 +848,7 @@ int bind_virq_to_irq(unsigned int virq, unsigned int cpu) | |||
848 | if (irq == -1) | 848 | if (irq == -1) |
849 | goto out; | 849 | goto out; |
850 | 850 | ||
851 | set_irq_chip_and_handler_name(irq, &xen_percpu_chip, | 851 | irq_set_chip_and_handler_name(irq, &xen_percpu_chip, |
852 | handle_percpu_irq, "virq"); | 852 | handle_percpu_irq, "virq"); |
853 | 853 | ||
854 | bind_virq.virq = virq; | 854 | bind_virq.virq = virq; |
@@ -1339,7 +1339,7 @@ static void ack_dynirq(struct irq_data *data) | |||
1339 | { | 1339 | { |
1340 | int evtchn = evtchn_from_irq(data->irq); | 1340 | int evtchn = evtchn_from_irq(data->irq); |
1341 | 1341 | ||
1342 | move_masked_irq(data->irq); | 1342 | irq_move_masked_irq(data); |
1343 | 1343 | ||
1344 | if (VALID_EVTCHN(evtchn)) | 1344 | if (VALID_EVTCHN(evtchn)) |
1345 | unmask_evtchn(evtchn); | 1345 | unmask_evtchn(evtchn); |
diff --git a/drivers/xen/gntdev.c b/drivers/xen/gntdev.c index 017ce600fbc6..b0f9e8fb0052 100644 --- a/drivers/xen/gntdev.c +++ b/drivers/xen/gntdev.c | |||
@@ -273,7 +273,7 @@ static int __unmap_grant_pages(struct grant_map *map, int offset, int pages) | |||
273 | map->vma->vm_start + map->notify.addr; | 273 | map->vma->vm_start + map->notify.addr; |
274 | err = copy_to_user(tmp, &err, 1); | 274 | err = copy_to_user(tmp, &err, 1); |
275 | if (err) | 275 | if (err) |
276 | return err; | 276 | return -EFAULT; |
277 | map->notify.flags &= ~UNMAP_NOTIFY_CLEAR_BYTE; | 277 | map->notify.flags &= ~UNMAP_NOTIFY_CLEAR_BYTE; |
278 | } else if (pgno >= offset && pgno < offset + pages) { | 278 | } else if (pgno >= offset && pgno < offset + pages) { |
279 | uint8_t *tmp = kmap(map->pages[pgno]); | 279 | uint8_t *tmp = kmap(map->pages[pgno]); |
@@ -662,7 +662,7 @@ static int gntdev_mmap(struct file *flip, struct vm_area_struct *vma) | |||
662 | if (map->flags) { | 662 | if (map->flags) { |
663 | if ((vma->vm_flags & VM_WRITE) && | 663 | if ((vma->vm_flags & VM_WRITE) && |
664 | (map->flags & GNTMAP_readonly)) | 664 | (map->flags & GNTMAP_readonly)) |
665 | return -EINVAL; | 665 | goto out_unlock_put; |
666 | } else { | 666 | } else { |
667 | map->flags = GNTMAP_host_map; | 667 | map->flags = GNTMAP_host_map; |
668 | if (!(vma->vm_flags & VM_WRITE)) | 668 | if (!(vma->vm_flags & VM_WRITE)) |
@@ -700,6 +700,8 @@ unlock_out: | |||
700 | spin_unlock(&priv->lock); | 700 | spin_unlock(&priv->lock); |
701 | return err; | 701 | return err; |
702 | 702 | ||
703 | out_unlock_put: | ||
704 | spin_unlock(&priv->lock); | ||
703 | out_put_map: | 705 | out_put_map: |
704 | if (use_ptemod) | 706 | if (use_ptemod) |
705 | map->vma = NULL; | 707 | map->vma = NULL; |
diff --git a/fs/btrfs/btrfs_inode.h b/fs/btrfs/btrfs_inode.h index ccc991c542df..57c3bb2884ce 100644 --- a/fs/btrfs/btrfs_inode.h +++ b/fs/btrfs/btrfs_inode.h | |||
@@ -136,9 +136,8 @@ struct btrfs_inode { | |||
136 | * items we think we'll end up using, and reserved_extents is the number | 136 | * items we think we'll end up using, and reserved_extents is the number |
137 | * of extent items we've reserved metadata for. | 137 | * of extent items we've reserved metadata for. |
138 | */ | 138 | */ |
139 | spinlock_t accounting_lock; | ||
140 | atomic_t outstanding_extents; | 139 | atomic_t outstanding_extents; |
141 | int reserved_extents; | 140 | atomic_t reserved_extents; |
142 | 141 | ||
143 | /* | 142 | /* |
144 | * ordered_data_close is set by truncate when a file that used | 143 | * ordered_data_close is set by truncate when a file that used |
diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c index 4d2110eafe29..41d1d7c70e29 100644 --- a/fs/btrfs/compression.c +++ b/fs/btrfs/compression.c | |||
@@ -340,6 +340,8 @@ int btrfs_submit_compressed_write(struct inode *inode, u64 start, | |||
340 | 340 | ||
341 | WARN_ON(start & ((u64)PAGE_CACHE_SIZE - 1)); | 341 | WARN_ON(start & ((u64)PAGE_CACHE_SIZE - 1)); |
342 | cb = kmalloc(compressed_bio_size(root, compressed_len), GFP_NOFS); | 342 | cb = kmalloc(compressed_bio_size(root, compressed_len), GFP_NOFS); |
343 | if (!cb) | ||
344 | return -ENOMEM; | ||
343 | atomic_set(&cb->pending_bios, 0); | 345 | atomic_set(&cb->pending_bios, 0); |
344 | cb->errors = 0; | 346 | cb->errors = 0; |
345 | cb->inode = inode; | 347 | cb->inode = inode; |
@@ -354,6 +356,10 @@ int btrfs_submit_compressed_write(struct inode *inode, u64 start, | |||
354 | bdev = BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev; | 356 | bdev = BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev; |
355 | 357 | ||
356 | bio = compressed_bio_alloc(bdev, first_byte, GFP_NOFS); | 358 | bio = compressed_bio_alloc(bdev, first_byte, GFP_NOFS); |
359 | if(!bio) { | ||
360 | kfree(cb); | ||
361 | return -ENOMEM; | ||
362 | } | ||
357 | bio->bi_private = cb; | 363 | bio->bi_private = cb; |
358 | bio->bi_end_io = end_compressed_bio_write; | 364 | bio->bi_end_io = end_compressed_bio_write; |
359 | atomic_inc(&cb->pending_bios); | 365 | atomic_inc(&cb->pending_bios); |
@@ -657,8 +663,9 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio, | |||
657 | atomic_inc(&cb->pending_bios); | 663 | atomic_inc(&cb->pending_bios); |
658 | 664 | ||
659 | if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM)) { | 665 | if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM)) { |
660 | btrfs_lookup_bio_sums(root, inode, comp_bio, | 666 | ret = btrfs_lookup_bio_sums(root, inode, |
661 | sums); | 667 | comp_bio, sums); |
668 | BUG_ON(ret); | ||
662 | } | 669 | } |
663 | sums += (comp_bio->bi_size + root->sectorsize - 1) / | 670 | sums += (comp_bio->bi_size + root->sectorsize - 1) / |
664 | root->sectorsize; | 671 | root->sectorsize; |
@@ -683,8 +690,10 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio, | |||
683 | ret = btrfs_bio_wq_end_io(root->fs_info, comp_bio, 0); | 690 | ret = btrfs_bio_wq_end_io(root->fs_info, comp_bio, 0); |
684 | BUG_ON(ret); | 691 | BUG_ON(ret); |
685 | 692 | ||
686 | if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM)) | 693 | if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM)) { |
687 | btrfs_lookup_bio_sums(root, inode, comp_bio, sums); | 694 | ret = btrfs_lookup_bio_sums(root, inode, comp_bio, sums); |
695 | BUG_ON(ret); | ||
696 | } | ||
688 | 697 | ||
689 | ret = btrfs_map_bio(root, READ, comp_bio, mirror_num, 0); | 698 | ret = btrfs_map_bio(root, READ, comp_bio, mirror_num, 0); |
690 | BUG_ON(ret); | 699 | BUG_ON(ret); |
diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c index b5baff0dccfe..84d7ca1fe0ba 100644 --- a/fs/btrfs/ctree.c +++ b/fs/btrfs/ctree.c | |||
@@ -147,10 +147,11 @@ noinline void btrfs_release_path(struct btrfs_root *root, struct btrfs_path *p) | |||
147 | struct extent_buffer *btrfs_root_node(struct btrfs_root *root) | 147 | struct extent_buffer *btrfs_root_node(struct btrfs_root *root) |
148 | { | 148 | { |
149 | struct extent_buffer *eb; | 149 | struct extent_buffer *eb; |
150 | spin_lock(&root->node_lock); | 150 | |
151 | eb = root->node; | 151 | rcu_read_lock(); |
152 | eb = rcu_dereference(root->node); | ||
152 | extent_buffer_get(eb); | 153 | extent_buffer_get(eb); |
153 | spin_unlock(&root->node_lock); | 154 | rcu_read_unlock(); |
154 | return eb; | 155 | return eb; |
155 | } | 156 | } |
156 | 157 | ||
@@ -165,14 +166,8 @@ struct extent_buffer *btrfs_lock_root_node(struct btrfs_root *root) | |||
165 | while (1) { | 166 | while (1) { |
166 | eb = btrfs_root_node(root); | 167 | eb = btrfs_root_node(root); |
167 | btrfs_tree_lock(eb); | 168 | btrfs_tree_lock(eb); |
168 | 169 | if (eb == root->node) | |
169 | spin_lock(&root->node_lock); | ||
170 | if (eb == root->node) { | ||
171 | spin_unlock(&root->node_lock); | ||
172 | break; | 170 | break; |
173 | } | ||
174 | spin_unlock(&root->node_lock); | ||
175 | |||
176 | btrfs_tree_unlock(eb); | 171 | btrfs_tree_unlock(eb); |
177 | free_extent_buffer(eb); | 172 | free_extent_buffer(eb); |
178 | } | 173 | } |
@@ -458,10 +453,8 @@ static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans, | |||
458 | else | 453 | else |
459 | parent_start = 0; | 454 | parent_start = 0; |
460 | 455 | ||
461 | spin_lock(&root->node_lock); | ||
462 | root->node = cow; | ||
463 | extent_buffer_get(cow); | 456 | extent_buffer_get(cow); |
464 | spin_unlock(&root->node_lock); | 457 | rcu_assign_pointer(root->node, cow); |
465 | 458 | ||
466 | btrfs_free_tree_block(trans, root, buf, parent_start, | 459 | btrfs_free_tree_block(trans, root, buf, parent_start, |
467 | last_ref); | 460 | last_ref); |
@@ -542,6 +535,9 @@ noinline int btrfs_cow_block(struct btrfs_trans_handle *trans, | |||
542 | 535 | ||
543 | ret = __btrfs_cow_block(trans, root, buf, parent, | 536 | ret = __btrfs_cow_block(trans, root, buf, parent, |
544 | parent_slot, cow_ret, search_start, 0); | 537 | parent_slot, cow_ret, search_start, 0); |
538 | |||
539 | trace_btrfs_cow_block(root, buf, *cow_ret); | ||
540 | |||
545 | return ret; | 541 | return ret; |
546 | } | 542 | } |
547 | 543 | ||
@@ -686,6 +682,8 @@ int btrfs_realloc_node(struct btrfs_trans_handle *trans, | |||
686 | if (!cur) { | 682 | if (!cur) { |
687 | cur = read_tree_block(root, blocknr, | 683 | cur = read_tree_block(root, blocknr, |
688 | blocksize, gen); | 684 | blocksize, gen); |
685 | if (!cur) | ||
686 | return -EIO; | ||
689 | } else if (!uptodate) { | 687 | } else if (!uptodate) { |
690 | btrfs_read_buffer(cur, gen); | 688 | btrfs_read_buffer(cur, gen); |
691 | } | 689 | } |
@@ -732,122 +730,6 @@ static inline unsigned int leaf_data_end(struct btrfs_root *root, | |||
732 | return btrfs_item_offset_nr(leaf, nr - 1); | 730 | return btrfs_item_offset_nr(leaf, nr - 1); |
733 | } | 731 | } |
734 | 732 | ||
735 | /* | ||
736 | * extra debugging checks to make sure all the items in a key are | ||
737 | * well formed and in the proper order | ||
738 | */ | ||
739 | static int check_node(struct btrfs_root *root, struct btrfs_path *path, | ||
740 | int level) | ||
741 | { | ||
742 | struct extent_buffer *parent = NULL; | ||
743 | struct extent_buffer *node = path->nodes[level]; | ||
744 | struct btrfs_disk_key parent_key; | ||
745 | struct btrfs_disk_key node_key; | ||
746 | int parent_slot; | ||
747 | int slot; | ||
748 | struct btrfs_key cpukey; | ||
749 | u32 nritems = btrfs_header_nritems(node); | ||
750 | |||
751 | if (path->nodes[level + 1]) | ||
752 | parent = path->nodes[level + 1]; | ||
753 | |||
754 | slot = path->slots[level]; | ||
755 | BUG_ON(nritems == 0); | ||
756 | if (parent) { | ||
757 | parent_slot = path->slots[level + 1]; | ||
758 | btrfs_node_key(parent, &parent_key, parent_slot); | ||
759 | btrfs_node_key(node, &node_key, 0); | ||
760 | BUG_ON(memcmp(&parent_key, &node_key, | ||
761 | sizeof(struct btrfs_disk_key))); | ||
762 | BUG_ON(btrfs_node_blockptr(parent, parent_slot) != | ||
763 | btrfs_header_bytenr(node)); | ||
764 | } | ||
765 | BUG_ON(nritems > BTRFS_NODEPTRS_PER_BLOCK(root)); | ||
766 | if (slot != 0) { | ||
767 | btrfs_node_key_to_cpu(node, &cpukey, slot - 1); | ||
768 | btrfs_node_key(node, &node_key, slot); | ||
769 | BUG_ON(comp_keys(&node_key, &cpukey) <= 0); | ||
770 | } | ||
771 | if (slot < nritems - 1) { | ||
772 | btrfs_node_key_to_cpu(node, &cpukey, slot + 1); | ||
773 | btrfs_node_key(node, &node_key, slot); | ||
774 | BUG_ON(comp_keys(&node_key, &cpukey) >= 0); | ||
775 | } | ||
776 | return 0; | ||
777 | } | ||
778 | |||
779 | /* | ||
780 | * extra checking to make sure all the items in a leaf are | ||
781 | * well formed and in the proper order | ||
782 | */ | ||
783 | static int check_leaf(struct btrfs_root *root, struct btrfs_path *path, | ||
784 | int level) | ||
785 | { | ||
786 | struct extent_buffer *leaf = path->nodes[level]; | ||
787 | struct extent_buffer *parent = NULL; | ||
788 | int parent_slot; | ||
789 | struct btrfs_key cpukey; | ||
790 | struct btrfs_disk_key parent_key; | ||
791 | struct btrfs_disk_key leaf_key; | ||
792 | int slot = path->slots[0]; | ||
793 | |||
794 | u32 nritems = btrfs_header_nritems(leaf); | ||
795 | |||
796 | if (path->nodes[level + 1]) | ||
797 | parent = path->nodes[level + 1]; | ||
798 | |||
799 | if (nritems == 0) | ||
800 | return 0; | ||
801 | |||
802 | if (parent) { | ||
803 | parent_slot = path->slots[level + 1]; | ||
804 | btrfs_node_key(parent, &parent_key, parent_slot); | ||
805 | btrfs_item_key(leaf, &leaf_key, 0); | ||
806 | |||
807 | BUG_ON(memcmp(&parent_key, &leaf_key, | ||
808 | sizeof(struct btrfs_disk_key))); | ||
809 | BUG_ON(btrfs_node_blockptr(parent, parent_slot) != | ||
810 | btrfs_header_bytenr(leaf)); | ||
811 | } | ||
812 | if (slot != 0 && slot < nritems - 1) { | ||
813 | btrfs_item_key(leaf, &leaf_key, slot); | ||
814 | btrfs_item_key_to_cpu(leaf, &cpukey, slot - 1); | ||
815 | if (comp_keys(&leaf_key, &cpukey) <= 0) { | ||
816 | btrfs_print_leaf(root, leaf); | ||
817 | printk(KERN_CRIT "slot %d offset bad key\n", slot); | ||
818 | BUG_ON(1); | ||
819 | } | ||
820 | if (btrfs_item_offset_nr(leaf, slot - 1) != | ||
821 | btrfs_item_end_nr(leaf, slot)) { | ||
822 | btrfs_print_leaf(root, leaf); | ||
823 | printk(KERN_CRIT "slot %d offset bad\n", slot); | ||
824 | BUG_ON(1); | ||
825 | } | ||
826 | } | ||
827 | if (slot < nritems - 1) { | ||
828 | btrfs_item_key(leaf, &leaf_key, slot); | ||
829 | btrfs_item_key_to_cpu(leaf, &cpukey, slot + 1); | ||
830 | BUG_ON(comp_keys(&leaf_key, &cpukey) >= 0); | ||
831 | if (btrfs_item_offset_nr(leaf, slot) != | ||
832 | btrfs_item_end_nr(leaf, slot + 1)) { | ||
833 | btrfs_print_leaf(root, leaf); | ||
834 | printk(KERN_CRIT "slot %d offset bad\n", slot); | ||
835 | BUG_ON(1); | ||
836 | } | ||
837 | } | ||
838 | BUG_ON(btrfs_item_offset_nr(leaf, 0) + | ||
839 | btrfs_item_size_nr(leaf, 0) != BTRFS_LEAF_DATA_SIZE(root)); | ||
840 | return 0; | ||
841 | } | ||
842 | |||
843 | static noinline int check_block(struct btrfs_root *root, | ||
844 | struct btrfs_path *path, int level) | ||
845 | { | ||
846 | return 0; | ||
847 | if (level == 0) | ||
848 | return check_leaf(root, path, level); | ||
849 | return check_node(root, path, level); | ||
850 | } | ||
851 | 733 | ||
852 | /* | 734 | /* |
853 | * search for key in the extent_buffer. The items start at offset p, | 735 | * search for key in the extent_buffer. The items start at offset p, |
@@ -1046,9 +928,7 @@ static noinline int balance_level(struct btrfs_trans_handle *trans, | |||
1046 | goto enospc; | 928 | goto enospc; |
1047 | } | 929 | } |
1048 | 930 | ||
1049 | spin_lock(&root->node_lock); | 931 | rcu_assign_pointer(root->node, child); |
1050 | root->node = child; | ||
1051 | spin_unlock(&root->node_lock); | ||
1052 | 932 | ||
1053 | add_root_to_dirty_list(root); | 933 | add_root_to_dirty_list(root); |
1054 | btrfs_tree_unlock(child); | 934 | btrfs_tree_unlock(child); |
@@ -1188,7 +1068,6 @@ static noinline int balance_level(struct btrfs_trans_handle *trans, | |||
1188 | } | 1068 | } |
1189 | } | 1069 | } |
1190 | /* double check we haven't messed things up */ | 1070 | /* double check we haven't messed things up */ |
1191 | check_block(root, path, level); | ||
1192 | if (orig_ptr != | 1071 | if (orig_ptr != |
1193 | btrfs_node_blockptr(path->nodes[level], path->slots[level])) | 1072 | btrfs_node_blockptr(path->nodes[level], path->slots[level])) |
1194 | BUG(); | 1073 | BUG(); |
@@ -1798,12 +1677,6 @@ cow_done: | |||
1798 | if (!cow) | 1677 | if (!cow) |
1799 | btrfs_unlock_up_safe(p, level + 1); | 1678 | btrfs_unlock_up_safe(p, level + 1); |
1800 | 1679 | ||
1801 | ret = check_block(root, p, level); | ||
1802 | if (ret) { | ||
1803 | ret = -1; | ||
1804 | goto done; | ||
1805 | } | ||
1806 | |||
1807 | ret = bin_search(b, key, level, &slot); | 1680 | ret = bin_search(b, key, level, &slot); |
1808 | 1681 | ||
1809 | if (level != 0) { | 1682 | if (level != 0) { |
@@ -2130,10 +2003,8 @@ static noinline int insert_new_root(struct btrfs_trans_handle *trans, | |||
2130 | 2003 | ||
2131 | btrfs_mark_buffer_dirty(c); | 2004 | btrfs_mark_buffer_dirty(c); |
2132 | 2005 | ||
2133 | spin_lock(&root->node_lock); | ||
2134 | old = root->node; | 2006 | old = root->node; |
2135 | root->node = c; | 2007 | rcu_assign_pointer(root->node, c); |
2136 | spin_unlock(&root->node_lock); | ||
2137 | 2008 | ||
2138 | /* the super has an extra ref to root->node */ | 2009 | /* the super has an extra ref to root->node */ |
2139 | free_extent_buffer(old); | 2010 | free_extent_buffer(old); |
@@ -3840,7 +3711,8 @@ int btrfs_insert_item(struct btrfs_trans_handle *trans, struct btrfs_root | |||
3840 | unsigned long ptr; | 3711 | unsigned long ptr; |
3841 | 3712 | ||
3842 | path = btrfs_alloc_path(); | 3713 | path = btrfs_alloc_path(); |
3843 | BUG_ON(!path); | 3714 | if (!path) |
3715 | return -ENOMEM; | ||
3844 | ret = btrfs_insert_empty_item(trans, root, path, cpu_key, data_size); | 3716 | ret = btrfs_insert_empty_item(trans, root, path, cpu_key, data_size); |
3845 | if (!ret) { | 3717 | if (!ret) { |
3846 | leaf = path->nodes[0]; | 3718 | leaf = path->nodes[0]; |
@@ -4217,6 +4089,7 @@ find_next_key: | |||
4217 | } | 4089 | } |
4218 | btrfs_set_path_blocking(path); | 4090 | btrfs_set_path_blocking(path); |
4219 | cur = read_node_slot(root, cur, slot); | 4091 | cur = read_node_slot(root, cur, slot); |
4092 | BUG_ON(!cur); | ||
4220 | 4093 | ||
4221 | btrfs_tree_lock(cur); | 4094 | btrfs_tree_lock(cur); |
4222 | 4095 | ||
diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index 7f78cc78fdd0..d47ce8307854 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h | |||
@@ -28,6 +28,7 @@ | |||
28 | #include <linux/wait.h> | 28 | #include <linux/wait.h> |
29 | #include <linux/slab.h> | 29 | #include <linux/slab.h> |
30 | #include <linux/kobject.h> | 30 | #include <linux/kobject.h> |
31 | #include <trace/events/btrfs.h> | ||
31 | #include <asm/kmap_types.h> | 32 | #include <asm/kmap_types.h> |
32 | #include "extent_io.h" | 33 | #include "extent_io.h" |
33 | #include "extent_map.h" | 34 | #include "extent_map.h" |
@@ -40,6 +41,7 @@ extern struct kmem_cache *btrfs_trans_handle_cachep; | |||
40 | extern struct kmem_cache *btrfs_transaction_cachep; | 41 | extern struct kmem_cache *btrfs_transaction_cachep; |
41 | extern struct kmem_cache *btrfs_bit_radix_cachep; | 42 | extern struct kmem_cache *btrfs_bit_radix_cachep; |
42 | extern struct kmem_cache *btrfs_path_cachep; | 43 | extern struct kmem_cache *btrfs_path_cachep; |
44 | extern struct kmem_cache *btrfs_free_space_cachep; | ||
43 | struct btrfs_ordered_sum; | 45 | struct btrfs_ordered_sum; |
44 | 46 | ||
45 | #define BTRFS_MAGIC "_BHRfS_M" | 47 | #define BTRFS_MAGIC "_BHRfS_M" |
@@ -782,9 +784,6 @@ struct btrfs_free_cluster { | |||
782 | /* first extent starting offset */ | 784 | /* first extent starting offset */ |
783 | u64 window_start; | 785 | u64 window_start; |
784 | 786 | ||
785 | /* if this cluster simply points at a bitmap in the block group */ | ||
786 | bool points_to_bitmap; | ||
787 | |||
788 | struct btrfs_block_group_cache *block_group; | 787 | struct btrfs_block_group_cache *block_group; |
789 | /* | 788 | /* |
790 | * when a cluster is allocated from a block group, we put the | 789 | * when a cluster is allocated from a block group, we put the |
@@ -1283,6 +1282,7 @@ struct btrfs_root { | |||
1283 | #define BTRFS_INODE_NODUMP (1 << 8) | 1282 | #define BTRFS_INODE_NODUMP (1 << 8) |
1284 | #define BTRFS_INODE_NOATIME (1 << 9) | 1283 | #define BTRFS_INODE_NOATIME (1 << 9) |
1285 | #define BTRFS_INODE_DIRSYNC (1 << 10) | 1284 | #define BTRFS_INODE_DIRSYNC (1 << 10) |
1285 | #define BTRFS_INODE_COMPRESS (1 << 11) | ||
1286 | 1286 | ||
1287 | /* some macros to generate set/get funcs for the struct fields. This | 1287 | /* some macros to generate set/get funcs for the struct fields. This |
1288 | * assumes there is a lefoo_to_cpu for every type, so lets make a simple | 1288 | * assumes there is a lefoo_to_cpu for every type, so lets make a simple |
@@ -2157,6 +2157,8 @@ int btrfs_free_extent(struct btrfs_trans_handle *trans, | |||
2157 | u64 root_objectid, u64 owner, u64 offset); | 2157 | u64 root_objectid, u64 owner, u64 offset); |
2158 | 2158 | ||
2159 | int btrfs_free_reserved_extent(struct btrfs_root *root, u64 start, u64 len); | 2159 | int btrfs_free_reserved_extent(struct btrfs_root *root, u64 start, u64 len); |
2160 | int btrfs_update_reserved_bytes(struct btrfs_block_group_cache *cache, | ||
2161 | u64 num_bytes, int reserve, int sinfo); | ||
2160 | int btrfs_prepare_extent_commit(struct btrfs_trans_handle *trans, | 2162 | int btrfs_prepare_extent_commit(struct btrfs_trans_handle *trans, |
2161 | struct btrfs_root *root); | 2163 | struct btrfs_root *root); |
2162 | int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans, | 2164 | int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans, |
@@ -2227,10 +2229,12 @@ u64 btrfs_account_ro_block_groups_free_space(struct btrfs_space_info *sinfo); | |||
2227 | int btrfs_error_unpin_extent_range(struct btrfs_root *root, | 2229 | int btrfs_error_unpin_extent_range(struct btrfs_root *root, |
2228 | u64 start, u64 end); | 2230 | u64 start, u64 end); |
2229 | int btrfs_error_discard_extent(struct btrfs_root *root, u64 bytenr, | 2231 | int btrfs_error_discard_extent(struct btrfs_root *root, u64 bytenr, |
2230 | u64 num_bytes); | 2232 | u64 num_bytes, u64 *actual_bytes); |
2231 | int btrfs_force_chunk_alloc(struct btrfs_trans_handle *trans, | 2233 | int btrfs_force_chunk_alloc(struct btrfs_trans_handle *trans, |
2232 | struct btrfs_root *root, u64 type); | 2234 | struct btrfs_root *root, u64 type); |
2235 | int btrfs_trim_fs(struct btrfs_root *root, struct fstrim_range *range); | ||
2233 | 2236 | ||
2237 | int btrfs_init_space_info(struct btrfs_fs_info *fs_info); | ||
2234 | /* ctree.c */ | 2238 | /* ctree.c */ |
2235 | int btrfs_bin_search(struct extent_buffer *eb, struct btrfs_key *key, | 2239 | int btrfs_bin_search(struct extent_buffer *eb, struct btrfs_key *key, |
2236 | int level, int *slot); | 2240 | int level, int *slot); |
@@ -2392,6 +2396,9 @@ struct btrfs_dir_item *btrfs_lookup_xattr(struct btrfs_trans_handle *trans, | |||
2392 | struct btrfs_path *path, u64 dir, | 2396 | struct btrfs_path *path, u64 dir, |
2393 | const char *name, u16 name_len, | 2397 | const char *name, u16 name_len, |
2394 | int mod); | 2398 | int mod); |
2399 | int verify_dir_item(struct btrfs_root *root, | ||
2400 | struct extent_buffer *leaf, | ||
2401 | struct btrfs_dir_item *dir_item); | ||
2395 | 2402 | ||
2396 | /* orphan.c */ | 2403 | /* orphan.c */ |
2397 | int btrfs_insert_orphan_item(struct btrfs_trans_handle *trans, | 2404 | int btrfs_insert_orphan_item(struct btrfs_trans_handle *trans, |
@@ -2528,7 +2535,7 @@ int btrfs_update_inode(struct btrfs_trans_handle *trans, | |||
2528 | struct inode *inode); | 2535 | struct inode *inode); |
2529 | int btrfs_orphan_add(struct btrfs_trans_handle *trans, struct inode *inode); | 2536 | int btrfs_orphan_add(struct btrfs_trans_handle *trans, struct inode *inode); |
2530 | int btrfs_orphan_del(struct btrfs_trans_handle *trans, struct inode *inode); | 2537 | int btrfs_orphan_del(struct btrfs_trans_handle *trans, struct inode *inode); |
2531 | void btrfs_orphan_cleanup(struct btrfs_root *root); | 2538 | int btrfs_orphan_cleanup(struct btrfs_root *root); |
2532 | void btrfs_orphan_pre_snapshot(struct btrfs_trans_handle *trans, | 2539 | void btrfs_orphan_pre_snapshot(struct btrfs_trans_handle *trans, |
2533 | struct btrfs_pending_snapshot *pending, | 2540 | struct btrfs_pending_snapshot *pending, |
2534 | u64 *bytes_to_reserve); | 2541 | u64 *bytes_to_reserve); |
@@ -2536,7 +2543,7 @@ void btrfs_orphan_post_snapshot(struct btrfs_trans_handle *trans, | |||
2536 | struct btrfs_pending_snapshot *pending); | 2543 | struct btrfs_pending_snapshot *pending); |
2537 | void btrfs_orphan_commit_root(struct btrfs_trans_handle *trans, | 2544 | void btrfs_orphan_commit_root(struct btrfs_trans_handle *trans, |
2538 | struct btrfs_root *root); | 2545 | struct btrfs_root *root); |
2539 | int btrfs_cont_expand(struct inode *inode, loff_t size); | 2546 | int btrfs_cont_expand(struct inode *inode, loff_t oldsize, loff_t size); |
2540 | int btrfs_invalidate_inodes(struct btrfs_root *root); | 2547 | int btrfs_invalidate_inodes(struct btrfs_root *root); |
2541 | void btrfs_add_delayed_iput(struct inode *inode); | 2548 | void btrfs_add_delayed_iput(struct inode *inode); |
2542 | void btrfs_run_delayed_iputs(struct btrfs_root *root); | 2549 | void btrfs_run_delayed_iputs(struct btrfs_root *root); |
diff --git a/fs/btrfs/delayed-ref.c b/fs/btrfs/delayed-ref.c index e807b143b857..bce28f653899 100644 --- a/fs/btrfs/delayed-ref.c +++ b/fs/btrfs/delayed-ref.c | |||
@@ -483,6 +483,8 @@ static noinline int add_delayed_ref_head(struct btrfs_trans_handle *trans, | |||
483 | INIT_LIST_HEAD(&head_ref->cluster); | 483 | INIT_LIST_HEAD(&head_ref->cluster); |
484 | mutex_init(&head_ref->mutex); | 484 | mutex_init(&head_ref->mutex); |
485 | 485 | ||
486 | trace_btrfs_delayed_ref_head(ref, head_ref, action); | ||
487 | |||
486 | existing = tree_insert(&delayed_refs->root, &ref->rb_node); | 488 | existing = tree_insert(&delayed_refs->root, &ref->rb_node); |
487 | 489 | ||
488 | if (existing) { | 490 | if (existing) { |
@@ -537,6 +539,8 @@ static noinline int add_delayed_tree_ref(struct btrfs_trans_handle *trans, | |||
537 | } | 539 | } |
538 | full_ref->level = level; | 540 | full_ref->level = level; |
539 | 541 | ||
542 | trace_btrfs_delayed_tree_ref(ref, full_ref, action); | ||
543 | |||
540 | existing = tree_insert(&delayed_refs->root, &ref->rb_node); | 544 | existing = tree_insert(&delayed_refs->root, &ref->rb_node); |
541 | 545 | ||
542 | if (existing) { | 546 | if (existing) { |
@@ -591,6 +595,8 @@ static noinline int add_delayed_data_ref(struct btrfs_trans_handle *trans, | |||
591 | full_ref->objectid = owner; | 595 | full_ref->objectid = owner; |
592 | full_ref->offset = offset; | 596 | full_ref->offset = offset; |
593 | 597 | ||
598 | trace_btrfs_delayed_data_ref(ref, full_ref, action); | ||
599 | |||
594 | existing = tree_insert(&delayed_refs->root, &ref->rb_node); | 600 | existing = tree_insert(&delayed_refs->root, &ref->rb_node); |
595 | 601 | ||
596 | if (existing) { | 602 | if (existing) { |
diff --git a/fs/btrfs/dir-item.c b/fs/btrfs/dir-item.c index f0cad5ae5be7..c62f02f6ae69 100644 --- a/fs/btrfs/dir-item.c +++ b/fs/btrfs/dir-item.c | |||
@@ -151,7 +151,7 @@ int btrfs_insert_dir_item(struct btrfs_trans_handle *trans, struct btrfs_root | |||
151 | ret = PTR_ERR(dir_item); | 151 | ret = PTR_ERR(dir_item); |
152 | if (ret == -EEXIST) | 152 | if (ret == -EEXIST) |
153 | goto second_insert; | 153 | goto second_insert; |
154 | goto out; | 154 | goto out_free; |
155 | } | 155 | } |
156 | 156 | ||
157 | leaf = path->nodes[0]; | 157 | leaf = path->nodes[0]; |
@@ -170,7 +170,7 @@ second_insert: | |||
170 | /* FIXME, use some real flag for selecting the extra index */ | 170 | /* FIXME, use some real flag for selecting the extra index */ |
171 | if (root == root->fs_info->tree_root) { | 171 | if (root == root->fs_info->tree_root) { |
172 | ret = 0; | 172 | ret = 0; |
173 | goto out; | 173 | goto out_free; |
174 | } | 174 | } |
175 | btrfs_release_path(root, path); | 175 | btrfs_release_path(root, path); |
176 | 176 | ||
@@ -180,7 +180,7 @@ second_insert: | |||
180 | name, name_len); | 180 | name, name_len); |
181 | if (IS_ERR(dir_item)) { | 181 | if (IS_ERR(dir_item)) { |
182 | ret2 = PTR_ERR(dir_item); | 182 | ret2 = PTR_ERR(dir_item); |
183 | goto out; | 183 | goto out_free; |
184 | } | 184 | } |
185 | leaf = path->nodes[0]; | 185 | leaf = path->nodes[0]; |
186 | btrfs_cpu_key_to_disk(&disk_key, location); | 186 | btrfs_cpu_key_to_disk(&disk_key, location); |
@@ -192,7 +192,9 @@ second_insert: | |||
192 | name_ptr = (unsigned long)(dir_item + 1); | 192 | name_ptr = (unsigned long)(dir_item + 1); |
193 | write_extent_buffer(leaf, name, name_ptr, name_len); | 193 | write_extent_buffer(leaf, name, name_ptr, name_len); |
194 | btrfs_mark_buffer_dirty(leaf); | 194 | btrfs_mark_buffer_dirty(leaf); |
195 | out: | 195 | |
196 | out_free: | ||
197 | |||
196 | btrfs_free_path(path); | 198 | btrfs_free_path(path); |
197 | if (ret) | 199 | if (ret) |
198 | return ret; | 200 | return ret; |
@@ -377,6 +379,9 @@ struct btrfs_dir_item *btrfs_match_dir_item_name(struct btrfs_root *root, | |||
377 | 379 | ||
378 | leaf = path->nodes[0]; | 380 | leaf = path->nodes[0]; |
379 | dir_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dir_item); | 381 | dir_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dir_item); |
382 | if (verify_dir_item(root, leaf, dir_item)) | ||
383 | return NULL; | ||
384 | |||
380 | total_len = btrfs_item_size_nr(leaf, path->slots[0]); | 385 | total_len = btrfs_item_size_nr(leaf, path->slots[0]); |
381 | while (cur < total_len) { | 386 | while (cur < total_len) { |
382 | this_len = sizeof(*dir_item) + | 387 | this_len = sizeof(*dir_item) + |
@@ -429,3 +434,35 @@ int btrfs_delete_one_dir_name(struct btrfs_trans_handle *trans, | |||
429 | } | 434 | } |
430 | return ret; | 435 | return ret; |
431 | } | 436 | } |
437 | |||
438 | int verify_dir_item(struct btrfs_root *root, | ||
439 | struct extent_buffer *leaf, | ||
440 | struct btrfs_dir_item *dir_item) | ||
441 | { | ||
442 | u16 namelen = BTRFS_NAME_LEN; | ||
443 | u8 type = btrfs_dir_type(leaf, dir_item); | ||
444 | |||
445 | if (type >= BTRFS_FT_MAX) { | ||
446 | printk(KERN_CRIT "btrfs: invalid dir item type: %d\n", | ||
447 | (int)type); | ||
448 | return 1; | ||
449 | } | ||
450 | |||
451 | if (type == BTRFS_FT_XATTR) | ||
452 | namelen = XATTR_NAME_MAX; | ||
453 | |||
454 | if (btrfs_dir_name_len(leaf, dir_item) > namelen) { | ||
455 | printk(KERN_CRIT "btrfS: invalid dir item name len: %u\n", | ||
456 | (unsigned)btrfs_dir_data_len(leaf, dir_item)); | ||
457 | return 1; | ||
458 | } | ||
459 | |||
460 | /* BTRFS_MAX_XATTR_SIZE is the same for all dir items */ | ||
461 | if (btrfs_dir_data_len(leaf, dir_item) > BTRFS_MAX_XATTR_SIZE(root)) { | ||
462 | printk(KERN_CRIT "btrfs: invalid dir item data len: %u\n", | ||
463 | (unsigned)btrfs_dir_data_len(leaf, dir_item)); | ||
464 | return 1; | ||
465 | } | ||
466 | |||
467 | return 0; | ||
468 | } | ||
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index 830d261d0e6b..d7a7315bd031 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c | |||
@@ -29,6 +29,7 @@ | |||
29 | #include <linux/crc32c.h> | 29 | #include <linux/crc32c.h> |
30 | #include <linux/slab.h> | 30 | #include <linux/slab.h> |
31 | #include <linux/migrate.h> | 31 | #include <linux/migrate.h> |
32 | #include <asm/unaligned.h> | ||
32 | #include "compat.h" | 33 | #include "compat.h" |
33 | #include "ctree.h" | 34 | #include "ctree.h" |
34 | #include "disk-io.h" | 35 | #include "disk-io.h" |
@@ -198,7 +199,7 @@ u32 btrfs_csum_data(struct btrfs_root *root, char *data, u32 seed, size_t len) | |||
198 | 199 | ||
199 | void btrfs_csum_final(u32 crc, char *result) | 200 | void btrfs_csum_final(u32 crc, char *result) |
200 | { | 201 | { |
201 | *(__le32 *)result = ~cpu_to_le32(crc); | 202 | put_unaligned_le32(~crc, result); |
202 | } | 203 | } |
203 | 204 | ||
204 | /* | 205 | /* |
@@ -323,6 +324,7 @@ static int btree_read_extent_buffer_pages(struct btrfs_root *root, | |||
323 | int num_copies = 0; | 324 | int num_copies = 0; |
324 | int mirror_num = 0; | 325 | int mirror_num = 0; |
325 | 326 | ||
327 | clear_bit(EXTENT_BUFFER_CORRUPT, &eb->bflags); | ||
326 | io_tree = &BTRFS_I(root->fs_info->btree_inode)->io_tree; | 328 | io_tree = &BTRFS_I(root->fs_info->btree_inode)->io_tree; |
327 | while (1) { | 329 | while (1) { |
328 | ret = read_extent_buffer_pages(io_tree, eb, start, 1, | 330 | ret = read_extent_buffer_pages(io_tree, eb, start, 1, |
@@ -331,6 +333,14 @@ static int btree_read_extent_buffer_pages(struct btrfs_root *root, | |||
331 | !verify_parent_transid(io_tree, eb, parent_transid)) | 333 | !verify_parent_transid(io_tree, eb, parent_transid)) |
332 | return ret; | 334 | return ret; |
333 | 335 | ||
336 | /* | ||
337 | * This buffer's crc is fine, but its contents are corrupted, so | ||
338 | * there is no reason to read the other copies, they won't be | ||
339 | * any less wrong. | ||
340 | */ | ||
341 | if (test_bit(EXTENT_BUFFER_CORRUPT, &eb->bflags)) | ||
342 | return ret; | ||
343 | |||
334 | num_copies = btrfs_num_copies(&root->fs_info->mapping_tree, | 344 | num_copies = btrfs_num_copies(&root->fs_info->mapping_tree, |
335 | eb->start, eb->len); | 345 | eb->start, eb->len); |
336 | if (num_copies == 1) | 346 | if (num_copies == 1) |
@@ -419,6 +429,73 @@ static int check_tree_block_fsid(struct btrfs_root *root, | |||
419 | return ret; | 429 | return ret; |
420 | } | 430 | } |
421 | 431 | ||
432 | #define CORRUPT(reason, eb, root, slot) \ | ||
433 | printk(KERN_CRIT "btrfs: corrupt leaf, %s: block=%llu," \ | ||
434 | "root=%llu, slot=%d\n", reason, \ | ||
435 | (unsigned long long)btrfs_header_bytenr(eb), \ | ||
436 | (unsigned long long)root->objectid, slot) | ||
437 | |||
438 | static noinline int check_leaf(struct btrfs_root *root, | ||
439 | struct extent_buffer *leaf) | ||
440 | { | ||
441 | struct btrfs_key key; | ||
442 | struct btrfs_key leaf_key; | ||
443 | u32 nritems = btrfs_header_nritems(leaf); | ||
444 | int slot; | ||
445 | |||
446 | if (nritems == 0) | ||
447 | return 0; | ||
448 | |||
449 | /* Check the 0 item */ | ||
450 | if (btrfs_item_offset_nr(leaf, 0) + btrfs_item_size_nr(leaf, 0) != | ||
451 | BTRFS_LEAF_DATA_SIZE(root)) { | ||
452 | CORRUPT("invalid item offset size pair", leaf, root, 0); | ||
453 | return -EIO; | ||
454 | } | ||
455 | |||
456 | /* | ||
457 | * Check to make sure each items keys are in the correct order and their | ||
458 | * offsets make sense. We only have to loop through nritems-1 because | ||
459 | * we check the current slot against the next slot, which verifies the | ||
460 | * next slot's offset+size makes sense and that the current's slot | ||
461 | * offset is correct. | ||
462 | */ | ||
463 | for (slot = 0; slot < nritems - 1; slot++) { | ||
464 | btrfs_item_key_to_cpu(leaf, &leaf_key, slot); | ||
465 | btrfs_item_key_to_cpu(leaf, &key, slot + 1); | ||
466 | |||
467 | /* Make sure the keys are in the right order */ | ||
468 | if (btrfs_comp_cpu_keys(&leaf_key, &key) >= 0) { | ||
469 | CORRUPT("bad key order", leaf, root, slot); | ||
470 | return -EIO; | ||
471 | } | ||
472 | |||
473 | /* | ||
474 | * Make sure the offset and ends are right, remember that the | ||
475 | * item data starts at the end of the leaf and grows towards the | ||
476 | * front. | ||
477 | */ | ||
478 | if (btrfs_item_offset_nr(leaf, slot) != | ||
479 | btrfs_item_end_nr(leaf, slot + 1)) { | ||
480 | CORRUPT("slot offset bad", leaf, root, slot); | ||
481 | return -EIO; | ||
482 | } | ||
483 | |||
484 | /* | ||
485 | * Check to make sure that we don't point outside of the leaf, | ||
486 | * just incase all the items are consistent to eachother, but | ||
487 | * all point outside of the leaf. | ||
488 | */ | ||
489 | if (btrfs_item_end_nr(leaf, slot) > | ||
490 | BTRFS_LEAF_DATA_SIZE(root)) { | ||
491 | CORRUPT("slot end outside of leaf", leaf, root, slot); | ||
492 | return -EIO; | ||
493 | } | ||
494 | } | ||
495 | |||
496 | return 0; | ||
497 | } | ||
498 | |||
422 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | 499 | #ifdef CONFIG_DEBUG_LOCK_ALLOC |
423 | void btrfs_set_buffer_lockdep_class(struct extent_buffer *eb, int level) | 500 | void btrfs_set_buffer_lockdep_class(struct extent_buffer *eb, int level) |
424 | { | 501 | { |
@@ -485,8 +562,20 @@ static int btree_readpage_end_io_hook(struct page *page, u64 start, u64 end, | |||
485 | btrfs_set_buffer_lockdep_class(eb, found_level); | 562 | btrfs_set_buffer_lockdep_class(eb, found_level); |
486 | 563 | ||
487 | ret = csum_tree_block(root, eb, 1); | 564 | ret = csum_tree_block(root, eb, 1); |
488 | if (ret) | 565 | if (ret) { |
489 | ret = -EIO; | 566 | ret = -EIO; |
567 | goto err; | ||
568 | } | ||
569 | |||
570 | /* | ||
571 | * If this is a leaf block and it is corrupt, set the corrupt bit so | ||
572 | * that we don't try and read the other copies of this block, just | ||
573 | * return -EIO. | ||
574 | */ | ||
575 | if (found_level == 0 && check_leaf(root, eb)) { | ||
576 | set_bit(EXTENT_BUFFER_CORRUPT, &eb->bflags); | ||
577 | ret = -EIO; | ||
578 | } | ||
490 | 579 | ||
491 | end = min_t(u64, eb->len, PAGE_CACHE_SIZE); | 580 | end = min_t(u64, eb->len, PAGE_CACHE_SIZE); |
492 | end = eb->start + end - 1; | 581 | end = eb->start + end - 1; |
@@ -1159,7 +1248,10 @@ struct btrfs_root *btrfs_read_fs_root_no_radix(struct btrfs_root *tree_root, | |||
1159 | root, fs_info, location->objectid); | 1248 | root, fs_info, location->objectid); |
1160 | 1249 | ||
1161 | path = btrfs_alloc_path(); | 1250 | path = btrfs_alloc_path(); |
1162 | BUG_ON(!path); | 1251 | if (!path) { |
1252 | kfree(root); | ||
1253 | return ERR_PTR(-ENOMEM); | ||
1254 | } | ||
1163 | ret = btrfs_search_slot(NULL, tree_root, location, path, 0, 0); | 1255 | ret = btrfs_search_slot(NULL, tree_root, location, path, 0, 0); |
1164 | if (ret == 0) { | 1256 | if (ret == 0) { |
1165 | l = path->nodes[0]; | 1257 | l = path->nodes[0]; |
@@ -1553,6 +1645,8 @@ struct btrfs_root *open_ctree(struct super_block *sb, | |||
1553 | goto fail_bdi; | 1645 | goto fail_bdi; |
1554 | } | 1646 | } |
1555 | 1647 | ||
1648 | fs_info->btree_inode->i_mapping->flags &= ~__GFP_FS; | ||
1649 | |||
1556 | INIT_RADIX_TREE(&fs_info->fs_roots_radix, GFP_ATOMIC); | 1650 | INIT_RADIX_TREE(&fs_info->fs_roots_radix, GFP_ATOMIC); |
1557 | INIT_LIST_HEAD(&fs_info->trans_list); | 1651 | INIT_LIST_HEAD(&fs_info->trans_list); |
1558 | INIT_LIST_HEAD(&fs_info->dead_roots); | 1652 | INIT_LIST_HEAD(&fs_info->dead_roots); |
@@ -1683,6 +1777,12 @@ struct btrfs_root *open_ctree(struct super_block *sb, | |||
1683 | 1777 | ||
1684 | btrfs_check_super_valid(fs_info, sb->s_flags & MS_RDONLY); | 1778 | btrfs_check_super_valid(fs_info, sb->s_flags & MS_RDONLY); |
1685 | 1779 | ||
1780 | /* | ||
1781 | * In the long term, we'll store the compression type in the super | ||
1782 | * block, and it'll be used for per file compression control. | ||
1783 | */ | ||
1784 | fs_info->compress_type = BTRFS_COMPRESS_ZLIB; | ||
1785 | |||
1686 | ret = btrfs_parse_options(tree_root, options); | 1786 | ret = btrfs_parse_options(tree_root, options); |
1687 | if (ret) { | 1787 | if (ret) { |
1688 | err = ret; | 1788 | err = ret; |
@@ -1888,6 +1988,12 @@ struct btrfs_root *open_ctree(struct super_block *sb, | |||
1888 | fs_info->metadata_alloc_profile = (u64)-1; | 1988 | fs_info->metadata_alloc_profile = (u64)-1; |
1889 | fs_info->system_alloc_profile = fs_info->metadata_alloc_profile; | 1989 | fs_info->system_alloc_profile = fs_info->metadata_alloc_profile; |
1890 | 1990 | ||
1991 | ret = btrfs_init_space_info(fs_info); | ||
1992 | if (ret) { | ||
1993 | printk(KERN_ERR "Failed to initial space info: %d\n", ret); | ||
1994 | goto fail_block_groups; | ||
1995 | } | ||
1996 | |||
1891 | ret = btrfs_read_block_groups(extent_root); | 1997 | ret = btrfs_read_block_groups(extent_root); |
1892 | if (ret) { | 1998 | if (ret) { |
1893 | printk(KERN_ERR "Failed to read block groups: %d\n", ret); | 1999 | printk(KERN_ERR "Failed to read block groups: %d\n", ret); |
@@ -1979,9 +2085,14 @@ struct btrfs_root *open_ctree(struct super_block *sb, | |||
1979 | 2085 | ||
1980 | if (!(sb->s_flags & MS_RDONLY)) { | 2086 | if (!(sb->s_flags & MS_RDONLY)) { |
1981 | down_read(&fs_info->cleanup_work_sem); | 2087 | down_read(&fs_info->cleanup_work_sem); |
1982 | btrfs_orphan_cleanup(fs_info->fs_root); | 2088 | err = btrfs_orphan_cleanup(fs_info->fs_root); |
1983 | btrfs_orphan_cleanup(fs_info->tree_root); | 2089 | if (!err) |
2090 | err = btrfs_orphan_cleanup(fs_info->tree_root); | ||
1984 | up_read(&fs_info->cleanup_work_sem); | 2091 | up_read(&fs_info->cleanup_work_sem); |
2092 | if (err) { | ||
2093 | close_ctree(tree_root); | ||
2094 | return ERR_PTR(err); | ||
2095 | } | ||
1985 | } | 2096 | } |
1986 | 2097 | ||
1987 | return tree_root; | 2098 | return tree_root; |
@@ -2356,8 +2467,12 @@ int btrfs_cleanup_fs_roots(struct btrfs_fs_info *fs_info) | |||
2356 | 2467 | ||
2357 | root_objectid = gang[ret - 1]->root_key.objectid + 1; | 2468 | root_objectid = gang[ret - 1]->root_key.objectid + 1; |
2358 | for (i = 0; i < ret; i++) { | 2469 | for (i = 0; i < ret; i++) { |
2470 | int err; | ||
2471 | |||
2359 | root_objectid = gang[i]->root_key.objectid; | 2472 | root_objectid = gang[i]->root_key.objectid; |
2360 | btrfs_orphan_cleanup(gang[i]); | 2473 | err = btrfs_orphan_cleanup(gang[i]); |
2474 | if (err) | ||
2475 | return err; | ||
2361 | } | 2476 | } |
2362 | root_objectid++; | 2477 | root_objectid++; |
2363 | } | 2478 | } |
@@ -2868,7 +2983,10 @@ static int btrfs_destroy_pinned_extent(struct btrfs_root *root, | |||
2868 | break; | 2983 | break; |
2869 | 2984 | ||
2870 | /* opt_discard */ | 2985 | /* opt_discard */ |
2871 | ret = btrfs_error_discard_extent(root, start, end + 1 - start); | 2986 | if (btrfs_test_opt(root, DISCARD)) |
2987 | ret = btrfs_error_discard_extent(root, start, | ||
2988 | end + 1 - start, | ||
2989 | NULL); | ||
2872 | 2990 | ||
2873 | clear_extent_dirty(unpin, start, end, GFP_NOFS); | 2991 | clear_extent_dirty(unpin, start, end, GFP_NOFS); |
2874 | btrfs_error_unpin_extent_range(root, start, end); | 2992 | btrfs_error_unpin_extent_range(root, start, end); |
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index 7b3089b5c2df..f619c3cb13b7 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c | |||
@@ -36,8 +36,6 @@ | |||
36 | static int update_block_group(struct btrfs_trans_handle *trans, | 36 | static int update_block_group(struct btrfs_trans_handle *trans, |
37 | struct btrfs_root *root, | 37 | struct btrfs_root *root, |
38 | u64 bytenr, u64 num_bytes, int alloc); | 38 | u64 bytenr, u64 num_bytes, int alloc); |
39 | static int update_reserved_bytes(struct btrfs_block_group_cache *cache, | ||
40 | u64 num_bytes, int reserve, int sinfo); | ||
41 | static int __btrfs_free_extent(struct btrfs_trans_handle *trans, | 39 | static int __btrfs_free_extent(struct btrfs_trans_handle *trans, |
42 | struct btrfs_root *root, | 40 | struct btrfs_root *root, |
43 | u64 bytenr, u64 num_bytes, u64 parent, | 41 | u64 bytenr, u64 num_bytes, u64 parent, |
@@ -442,7 +440,7 @@ static int cache_block_group(struct btrfs_block_group_cache *cache, | |||
442 | * allocate blocks for the tree root we can't do the fast caching since | 440 | * allocate blocks for the tree root we can't do the fast caching since |
443 | * we likely hold important locks. | 441 | * we likely hold important locks. |
444 | */ | 442 | */ |
445 | if (!trans->transaction->in_commit && | 443 | if (trans && (!trans->transaction->in_commit) && |
446 | (root && root != root->fs_info->tree_root)) { | 444 | (root && root != root->fs_info->tree_root)) { |
447 | spin_lock(&cache->lock); | 445 | spin_lock(&cache->lock); |
448 | if (cache->cached != BTRFS_CACHE_NO) { | 446 | if (cache->cached != BTRFS_CACHE_NO) { |
@@ -471,7 +469,7 @@ static int cache_block_group(struct btrfs_block_group_cache *cache, | |||
471 | if (load_cache_only) | 469 | if (load_cache_only) |
472 | return 0; | 470 | return 0; |
473 | 471 | ||
474 | caching_ctl = kzalloc(sizeof(*caching_ctl), GFP_KERNEL); | 472 | caching_ctl = kzalloc(sizeof(*caching_ctl), GFP_NOFS); |
475 | BUG_ON(!caching_ctl); | 473 | BUG_ON(!caching_ctl); |
476 | 474 | ||
477 | INIT_LIST_HEAD(&caching_ctl->list); | 475 | INIT_LIST_HEAD(&caching_ctl->list); |
@@ -1740,39 +1738,45 @@ static int remove_extent_backref(struct btrfs_trans_handle *trans, | |||
1740 | return ret; | 1738 | return ret; |
1741 | } | 1739 | } |
1742 | 1740 | ||
1743 | static void btrfs_issue_discard(struct block_device *bdev, | 1741 | static int btrfs_issue_discard(struct block_device *bdev, |
1744 | u64 start, u64 len) | 1742 | u64 start, u64 len) |
1745 | { | 1743 | { |
1746 | blkdev_issue_discard(bdev, start >> 9, len >> 9, GFP_KERNEL, 0); | 1744 | return blkdev_issue_discard(bdev, start >> 9, len >> 9, GFP_NOFS, 0); |
1747 | } | 1745 | } |
1748 | 1746 | ||
1749 | static int btrfs_discard_extent(struct btrfs_root *root, u64 bytenr, | 1747 | static int btrfs_discard_extent(struct btrfs_root *root, u64 bytenr, |
1750 | u64 num_bytes) | 1748 | u64 num_bytes, u64 *actual_bytes) |
1751 | { | 1749 | { |
1752 | int ret; | 1750 | int ret; |
1753 | u64 map_length = num_bytes; | 1751 | u64 discarded_bytes = 0; |
1754 | struct btrfs_multi_bio *multi = NULL; | 1752 | struct btrfs_multi_bio *multi = NULL; |
1755 | 1753 | ||
1756 | if (!btrfs_test_opt(root, DISCARD)) | ||
1757 | return 0; | ||
1758 | 1754 | ||
1759 | /* Tell the block device(s) that the sectors can be discarded */ | 1755 | /* Tell the block device(s) that the sectors can be discarded */ |
1760 | ret = btrfs_map_block(&root->fs_info->mapping_tree, READ, | 1756 | ret = btrfs_map_block(&root->fs_info->mapping_tree, REQ_DISCARD, |
1761 | bytenr, &map_length, &multi, 0); | 1757 | bytenr, &num_bytes, &multi, 0); |
1762 | if (!ret) { | 1758 | if (!ret) { |
1763 | struct btrfs_bio_stripe *stripe = multi->stripes; | 1759 | struct btrfs_bio_stripe *stripe = multi->stripes; |
1764 | int i; | 1760 | int i; |
1765 | 1761 | ||
1766 | if (map_length > num_bytes) | ||
1767 | map_length = num_bytes; | ||
1768 | 1762 | ||
1769 | for (i = 0; i < multi->num_stripes; i++, stripe++) { | 1763 | for (i = 0; i < multi->num_stripes; i++, stripe++) { |
1770 | btrfs_issue_discard(stripe->dev->bdev, | 1764 | ret = btrfs_issue_discard(stripe->dev->bdev, |
1771 | stripe->physical, | 1765 | stripe->physical, |
1772 | map_length); | 1766 | stripe->length); |
1767 | if (!ret) | ||
1768 | discarded_bytes += stripe->length; | ||
1769 | else if (ret != -EOPNOTSUPP) | ||
1770 | break; | ||
1773 | } | 1771 | } |
1774 | kfree(multi); | 1772 | kfree(multi); |
1775 | } | 1773 | } |
1774 | if (discarded_bytes && ret == -EOPNOTSUPP) | ||
1775 | ret = 0; | ||
1776 | |||
1777 | if (actual_bytes) | ||
1778 | *actual_bytes = discarded_bytes; | ||
1779 | |||
1776 | 1780 | ||
1777 | return ret; | 1781 | return ret; |
1778 | } | 1782 | } |
@@ -3996,6 +4000,7 @@ int btrfs_delalloc_reserve_metadata(struct inode *inode, u64 num_bytes) | |||
3996 | struct btrfs_block_rsv *block_rsv = &root->fs_info->delalloc_block_rsv; | 4000 | struct btrfs_block_rsv *block_rsv = &root->fs_info->delalloc_block_rsv; |
3997 | u64 to_reserve; | 4001 | u64 to_reserve; |
3998 | int nr_extents; | 4002 | int nr_extents; |
4003 | int reserved_extents; | ||
3999 | int ret; | 4004 | int ret; |
4000 | 4005 | ||
4001 | if (btrfs_transaction_in_commit(root->fs_info)) | 4006 | if (btrfs_transaction_in_commit(root->fs_info)) |
@@ -4003,25 +4008,24 @@ int btrfs_delalloc_reserve_metadata(struct inode *inode, u64 num_bytes) | |||
4003 | 4008 | ||
4004 | num_bytes = ALIGN(num_bytes, root->sectorsize); | 4009 | num_bytes = ALIGN(num_bytes, root->sectorsize); |
4005 | 4010 | ||
4006 | spin_lock(&BTRFS_I(inode)->accounting_lock); | ||
4007 | nr_extents = atomic_read(&BTRFS_I(inode)->outstanding_extents) + 1; | 4011 | nr_extents = atomic_read(&BTRFS_I(inode)->outstanding_extents) + 1; |
4008 | if (nr_extents > BTRFS_I(inode)->reserved_extents) { | 4012 | reserved_extents = atomic_read(&BTRFS_I(inode)->reserved_extents); |
4009 | nr_extents -= BTRFS_I(inode)->reserved_extents; | 4013 | |
4014 | if (nr_extents > reserved_extents) { | ||
4015 | nr_extents -= reserved_extents; | ||
4010 | to_reserve = calc_trans_metadata_size(root, nr_extents); | 4016 | to_reserve = calc_trans_metadata_size(root, nr_extents); |
4011 | } else { | 4017 | } else { |
4012 | nr_extents = 0; | 4018 | nr_extents = 0; |
4013 | to_reserve = 0; | 4019 | to_reserve = 0; |
4014 | } | 4020 | } |
4015 | spin_unlock(&BTRFS_I(inode)->accounting_lock); | 4021 | |
4016 | to_reserve += calc_csum_metadata_size(inode, num_bytes); | 4022 | to_reserve += calc_csum_metadata_size(inode, num_bytes); |
4017 | ret = reserve_metadata_bytes(NULL, root, block_rsv, to_reserve, 1); | 4023 | ret = reserve_metadata_bytes(NULL, root, block_rsv, to_reserve, 1); |
4018 | if (ret) | 4024 | if (ret) |
4019 | return ret; | 4025 | return ret; |
4020 | 4026 | ||
4021 | spin_lock(&BTRFS_I(inode)->accounting_lock); | 4027 | atomic_add(nr_extents, &BTRFS_I(inode)->reserved_extents); |
4022 | BTRFS_I(inode)->reserved_extents += nr_extents; | ||
4023 | atomic_inc(&BTRFS_I(inode)->outstanding_extents); | 4028 | atomic_inc(&BTRFS_I(inode)->outstanding_extents); |
4024 | spin_unlock(&BTRFS_I(inode)->accounting_lock); | ||
4025 | 4029 | ||
4026 | block_rsv_add_bytes(block_rsv, to_reserve, 1); | 4030 | block_rsv_add_bytes(block_rsv, to_reserve, 1); |
4027 | 4031 | ||
@@ -4036,20 +4040,30 @@ void btrfs_delalloc_release_metadata(struct inode *inode, u64 num_bytes) | |||
4036 | struct btrfs_root *root = BTRFS_I(inode)->root; | 4040 | struct btrfs_root *root = BTRFS_I(inode)->root; |
4037 | u64 to_free; | 4041 | u64 to_free; |
4038 | int nr_extents; | 4042 | int nr_extents; |
4043 | int reserved_extents; | ||
4039 | 4044 | ||
4040 | num_bytes = ALIGN(num_bytes, root->sectorsize); | 4045 | num_bytes = ALIGN(num_bytes, root->sectorsize); |
4041 | atomic_dec(&BTRFS_I(inode)->outstanding_extents); | 4046 | atomic_dec(&BTRFS_I(inode)->outstanding_extents); |
4042 | WARN_ON(atomic_read(&BTRFS_I(inode)->outstanding_extents) < 0); | 4047 | WARN_ON(atomic_read(&BTRFS_I(inode)->outstanding_extents) < 0); |
4043 | 4048 | ||
4044 | spin_lock(&BTRFS_I(inode)->accounting_lock); | 4049 | reserved_extents = atomic_read(&BTRFS_I(inode)->reserved_extents); |
4045 | nr_extents = atomic_read(&BTRFS_I(inode)->outstanding_extents); | 4050 | do { |
4046 | if (nr_extents < BTRFS_I(inode)->reserved_extents) { | 4051 | int old, new; |
4047 | nr_extents = BTRFS_I(inode)->reserved_extents - nr_extents; | 4052 | |
4048 | BTRFS_I(inode)->reserved_extents -= nr_extents; | 4053 | nr_extents = atomic_read(&BTRFS_I(inode)->outstanding_extents); |
4049 | } else { | 4054 | if (nr_extents >= reserved_extents) { |
4050 | nr_extents = 0; | 4055 | nr_extents = 0; |
4051 | } | 4056 | break; |
4052 | spin_unlock(&BTRFS_I(inode)->accounting_lock); | 4057 | } |
4058 | old = reserved_extents; | ||
4059 | nr_extents = reserved_extents - nr_extents; | ||
4060 | new = reserved_extents - nr_extents; | ||
4061 | old = atomic_cmpxchg(&BTRFS_I(inode)->reserved_extents, | ||
4062 | reserved_extents, new); | ||
4063 | if (likely(old == reserved_extents)) | ||
4064 | break; | ||
4065 | reserved_extents = old; | ||
4066 | } while (1); | ||
4053 | 4067 | ||
4054 | to_free = calc_csum_metadata_size(inode, num_bytes); | 4068 | to_free = calc_csum_metadata_size(inode, num_bytes); |
4055 | if (nr_extents > 0) | 4069 | if (nr_extents > 0) |
@@ -4223,8 +4237,8 @@ int btrfs_pin_extent(struct btrfs_root *root, | |||
4223 | * update size of reserved extents. this function may return -EAGAIN | 4237 | * update size of reserved extents. this function may return -EAGAIN |
4224 | * if 'reserve' is true or 'sinfo' is false. | 4238 | * if 'reserve' is true or 'sinfo' is false. |
4225 | */ | 4239 | */ |
4226 | static int update_reserved_bytes(struct btrfs_block_group_cache *cache, | 4240 | int btrfs_update_reserved_bytes(struct btrfs_block_group_cache *cache, |
4227 | u64 num_bytes, int reserve, int sinfo) | 4241 | u64 num_bytes, int reserve, int sinfo) |
4228 | { | 4242 | { |
4229 | int ret = 0; | 4243 | int ret = 0; |
4230 | if (sinfo) { | 4244 | if (sinfo) { |
@@ -4363,7 +4377,9 @@ int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans, | |||
4363 | if (ret) | 4377 | if (ret) |
4364 | break; | 4378 | break; |
4365 | 4379 | ||
4366 | ret = btrfs_discard_extent(root, start, end + 1 - start); | 4380 | if (btrfs_test_opt(root, DISCARD)) |
4381 | ret = btrfs_discard_extent(root, start, | ||
4382 | end + 1 - start, NULL); | ||
4367 | 4383 | ||
4368 | clear_extent_dirty(unpin, start, end, GFP_NOFS); | 4384 | clear_extent_dirty(unpin, start, end, GFP_NOFS); |
4369 | unpin_extent_range(root, start, end); | 4385 | unpin_extent_range(root, start, end); |
@@ -4704,10 +4720,10 @@ void btrfs_free_tree_block(struct btrfs_trans_handle *trans, | |||
4704 | WARN_ON(test_bit(EXTENT_BUFFER_DIRTY, &buf->bflags)); | 4720 | WARN_ON(test_bit(EXTENT_BUFFER_DIRTY, &buf->bflags)); |
4705 | 4721 | ||
4706 | btrfs_add_free_space(cache, buf->start, buf->len); | 4722 | btrfs_add_free_space(cache, buf->start, buf->len); |
4707 | ret = update_reserved_bytes(cache, buf->len, 0, 0); | 4723 | ret = btrfs_update_reserved_bytes(cache, buf->len, 0, 0); |
4708 | if (ret == -EAGAIN) { | 4724 | if (ret == -EAGAIN) { |
4709 | /* block group became read-only */ | 4725 | /* block group became read-only */ |
4710 | update_reserved_bytes(cache, buf->len, 0, 1); | 4726 | btrfs_update_reserved_bytes(cache, buf->len, 0, 1); |
4711 | goto out; | 4727 | goto out; |
4712 | } | 4728 | } |
4713 | 4729 | ||
@@ -4744,6 +4760,11 @@ pin: | |||
4744 | } | 4760 | } |
4745 | } | 4761 | } |
4746 | out: | 4762 | out: |
4763 | /* | ||
4764 | * Deleting the buffer, clear the corrupt flag since it doesn't matter | ||
4765 | * anymore. | ||
4766 | */ | ||
4767 | clear_bit(EXTENT_BUFFER_CORRUPT, &buf->bflags); | ||
4747 | btrfs_put_block_group(cache); | 4768 | btrfs_put_block_group(cache); |
4748 | } | 4769 | } |
4749 | 4770 | ||
@@ -5191,7 +5212,7 @@ checks: | |||
5191 | search_start - offset); | 5212 | search_start - offset); |
5192 | BUG_ON(offset > search_start); | 5213 | BUG_ON(offset > search_start); |
5193 | 5214 | ||
5194 | ret = update_reserved_bytes(block_group, num_bytes, 1, | 5215 | ret = btrfs_update_reserved_bytes(block_group, num_bytes, 1, |
5195 | (data & BTRFS_BLOCK_GROUP_DATA)); | 5216 | (data & BTRFS_BLOCK_GROUP_DATA)); |
5196 | if (ret == -EAGAIN) { | 5217 | if (ret == -EAGAIN) { |
5197 | btrfs_add_free_space(block_group, offset, num_bytes); | 5218 | btrfs_add_free_space(block_group, offset, num_bytes); |
@@ -5397,6 +5418,8 @@ again: | |||
5397 | dump_space_info(sinfo, num_bytes, 1); | 5418 | dump_space_info(sinfo, num_bytes, 1); |
5398 | } | 5419 | } |
5399 | 5420 | ||
5421 | trace_btrfs_reserved_extent_alloc(root, ins->objectid, ins->offset); | ||
5422 | |||
5400 | return ret; | 5423 | return ret; |
5401 | } | 5424 | } |
5402 | 5425 | ||
@@ -5412,12 +5435,15 @@ int btrfs_free_reserved_extent(struct btrfs_root *root, u64 start, u64 len) | |||
5412 | return -ENOSPC; | 5435 | return -ENOSPC; |
5413 | } | 5436 | } |
5414 | 5437 | ||
5415 | ret = btrfs_discard_extent(root, start, len); | 5438 | if (btrfs_test_opt(root, DISCARD)) |
5439 | ret = btrfs_discard_extent(root, start, len, NULL); | ||
5416 | 5440 | ||
5417 | btrfs_add_free_space(cache, start, len); | 5441 | btrfs_add_free_space(cache, start, len); |
5418 | update_reserved_bytes(cache, len, 0, 1); | 5442 | btrfs_update_reserved_bytes(cache, len, 0, 1); |
5419 | btrfs_put_block_group(cache); | 5443 | btrfs_put_block_group(cache); |
5420 | 5444 | ||
5445 | trace_btrfs_reserved_extent_free(root, start, len); | ||
5446 | |||
5421 | return ret; | 5447 | return ret; |
5422 | } | 5448 | } |
5423 | 5449 | ||
@@ -5444,7 +5470,8 @@ static int alloc_reserved_file_extent(struct btrfs_trans_handle *trans, | |||
5444 | size = sizeof(*extent_item) + btrfs_extent_inline_ref_size(type); | 5470 | size = sizeof(*extent_item) + btrfs_extent_inline_ref_size(type); |
5445 | 5471 | ||
5446 | path = btrfs_alloc_path(); | 5472 | path = btrfs_alloc_path(); |
5447 | BUG_ON(!path); | 5473 | if (!path) |
5474 | return -ENOMEM; | ||
5448 | 5475 | ||
5449 | path->leave_spinning = 1; | 5476 | path->leave_spinning = 1; |
5450 | ret = btrfs_insert_empty_item(trans, fs_info->extent_root, path, | 5477 | ret = btrfs_insert_empty_item(trans, fs_info->extent_root, path, |
@@ -5614,7 +5641,7 @@ int btrfs_alloc_logged_file_extent(struct btrfs_trans_handle *trans, | |||
5614 | put_caching_control(caching_ctl); | 5641 | put_caching_control(caching_ctl); |
5615 | } | 5642 | } |
5616 | 5643 | ||
5617 | ret = update_reserved_bytes(block_group, ins->offset, 1, 1); | 5644 | ret = btrfs_update_reserved_bytes(block_group, ins->offset, 1, 1); |
5618 | BUG_ON(ret); | 5645 | BUG_ON(ret); |
5619 | btrfs_put_block_group(block_group); | 5646 | btrfs_put_block_group(block_group); |
5620 | ret = alloc_reserved_file_extent(trans, root, 0, root_objectid, | 5647 | ret = alloc_reserved_file_extent(trans, root, 0, root_objectid, |
@@ -6047,6 +6074,8 @@ static noinline int do_walk_down(struct btrfs_trans_handle *trans, | |||
6047 | if (reada && level == 1) | 6074 | if (reada && level == 1) |
6048 | reada_walk_down(trans, root, wc, path); | 6075 | reada_walk_down(trans, root, wc, path); |
6049 | next = read_tree_block(root, bytenr, blocksize, generation); | 6076 | next = read_tree_block(root, bytenr, blocksize, generation); |
6077 | if (!next) | ||
6078 | return -EIO; | ||
6050 | btrfs_tree_lock(next); | 6079 | btrfs_tree_lock(next); |
6051 | btrfs_set_lock_blocking(next); | 6080 | btrfs_set_lock_blocking(next); |
6052 | } | 6081 | } |
@@ -6438,10 +6467,14 @@ int btrfs_drop_subtree(struct btrfs_trans_handle *trans, | |||
6438 | BUG_ON(root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID); | 6467 | BUG_ON(root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID); |
6439 | 6468 | ||
6440 | path = btrfs_alloc_path(); | 6469 | path = btrfs_alloc_path(); |
6441 | BUG_ON(!path); | 6470 | if (!path) |
6471 | return -ENOMEM; | ||
6442 | 6472 | ||
6443 | wc = kzalloc(sizeof(*wc), GFP_NOFS); | 6473 | wc = kzalloc(sizeof(*wc), GFP_NOFS); |
6444 | BUG_ON(!wc); | 6474 | if (!wc) { |
6475 | btrfs_free_path(path); | ||
6476 | return -ENOMEM; | ||
6477 | } | ||
6445 | 6478 | ||
6446 | btrfs_assert_tree_locked(parent); | 6479 | btrfs_assert_tree_locked(parent); |
6447 | parent_level = btrfs_header_level(parent); | 6480 | parent_level = btrfs_header_level(parent); |
@@ -6899,7 +6932,11 @@ static noinline int get_new_locations(struct inode *reloc_inode, | |||
6899 | } | 6932 | } |
6900 | 6933 | ||
6901 | path = btrfs_alloc_path(); | 6934 | path = btrfs_alloc_path(); |
6902 | BUG_ON(!path); | 6935 | if (!path) { |
6936 | if (exts != *extents) | ||
6937 | kfree(exts); | ||
6938 | return -ENOMEM; | ||
6939 | } | ||
6903 | 6940 | ||
6904 | cur_pos = extent_key->objectid - offset; | 6941 | cur_pos = extent_key->objectid - offset; |
6905 | last_byte = extent_key->objectid + extent_key->offset; | 6942 | last_byte = extent_key->objectid + extent_key->offset; |
@@ -6941,6 +6978,10 @@ static noinline int get_new_locations(struct inode *reloc_inode, | |||
6941 | struct disk_extent *old = exts; | 6978 | struct disk_extent *old = exts; |
6942 | max *= 2; | 6979 | max *= 2; |
6943 | exts = kzalloc(sizeof(*exts) * max, GFP_NOFS); | 6980 | exts = kzalloc(sizeof(*exts) * max, GFP_NOFS); |
6981 | if (!exts) { | ||
6982 | ret = -ENOMEM; | ||
6983 | goto out; | ||
6984 | } | ||
6944 | memcpy(exts, old, sizeof(*exts) * nr); | 6985 | memcpy(exts, old, sizeof(*exts) * nr); |
6945 | if (old != *extents) | 6986 | if (old != *extents) |
6946 | kfree(old); | 6987 | kfree(old); |
@@ -7423,7 +7464,8 @@ static noinline int replace_extents_in_leaf(struct btrfs_trans_handle *trans, | |||
7423 | int ret; | 7464 | int ret; |
7424 | 7465 | ||
7425 | new_extent = kmalloc(sizeof(*new_extent), GFP_NOFS); | 7466 | new_extent = kmalloc(sizeof(*new_extent), GFP_NOFS); |
7426 | BUG_ON(!new_extent); | 7467 | if (!new_extent) |
7468 | return -ENOMEM; | ||
7427 | 7469 | ||
7428 | ref = btrfs_lookup_leaf_ref(root, leaf->start); | 7470 | ref = btrfs_lookup_leaf_ref(root, leaf->start); |
7429 | BUG_ON(!ref); | 7471 | BUG_ON(!ref); |
@@ -7609,7 +7651,8 @@ int btrfs_cleanup_reloc_trees(struct btrfs_root *root) | |||
7609 | 7651 | ||
7610 | reloc_root = btrfs_read_fs_root_no_name(root->fs_info, &location); | 7652 | reloc_root = btrfs_read_fs_root_no_name(root->fs_info, &location); |
7611 | BUG_ON(!reloc_root); | 7653 | BUG_ON(!reloc_root); |
7612 | btrfs_orphan_cleanup(reloc_root); | 7654 | ret = btrfs_orphan_cleanup(reloc_root); |
7655 | BUG_ON(ret); | ||
7613 | return 0; | 7656 | return 0; |
7614 | } | 7657 | } |
7615 | 7658 | ||
@@ -7627,7 +7670,8 @@ static noinline int init_reloc_tree(struct btrfs_trans_handle *trans, | |||
7627 | return 0; | 7670 | return 0; |
7628 | 7671 | ||
7629 | root_item = kmalloc(sizeof(*root_item), GFP_NOFS); | 7672 | root_item = kmalloc(sizeof(*root_item), GFP_NOFS); |
7630 | BUG_ON(!root_item); | 7673 | if (!root_item) |
7674 | return -ENOMEM; | ||
7631 | 7675 | ||
7632 | ret = btrfs_copy_root(trans, root, root->commit_root, | 7676 | ret = btrfs_copy_root(trans, root, root->commit_root, |
7633 | &eb, BTRFS_TREE_RELOC_OBJECTID); | 7677 | &eb, BTRFS_TREE_RELOC_OBJECTID); |
@@ -7653,7 +7697,7 @@ static noinline int init_reloc_tree(struct btrfs_trans_handle *trans, | |||
7653 | 7697 | ||
7654 | reloc_root = btrfs_read_fs_root_no_radix(root->fs_info->tree_root, | 7698 | reloc_root = btrfs_read_fs_root_no_radix(root->fs_info->tree_root, |
7655 | &root_key); | 7699 | &root_key); |
7656 | BUG_ON(!reloc_root); | 7700 | BUG_ON(IS_ERR(reloc_root)); |
7657 | reloc_root->last_trans = trans->transid; | 7701 | reloc_root->last_trans = trans->transid; |
7658 | reloc_root->commit_root = NULL; | 7702 | reloc_root->commit_root = NULL; |
7659 | reloc_root->ref_tree = &root->fs_info->reloc_ref_tree; | 7703 | reloc_root->ref_tree = &root->fs_info->reloc_ref_tree; |
@@ -7906,6 +7950,10 @@ static noinline int relocate_one_extent(struct btrfs_root *extent_root, | |||
7906 | 7950 | ||
7907 | eb = read_tree_block(found_root, block_start, | 7951 | eb = read_tree_block(found_root, block_start, |
7908 | block_size, 0); | 7952 | block_size, 0); |
7953 | if (!eb) { | ||
7954 | ret = -EIO; | ||
7955 | goto out; | ||
7956 | } | ||
7909 | btrfs_tree_lock(eb); | 7957 | btrfs_tree_lock(eb); |
7910 | BUG_ON(level != btrfs_header_level(eb)); | 7958 | BUG_ON(level != btrfs_header_level(eb)); |
7911 | 7959 | ||
@@ -8621,6 +8669,12 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans, | |||
8621 | BUG_ON(!block_group); | 8669 | BUG_ON(!block_group); |
8622 | BUG_ON(!block_group->ro); | 8670 | BUG_ON(!block_group->ro); |
8623 | 8671 | ||
8672 | /* | ||
8673 | * Free the reserved super bytes from this block group before | ||
8674 | * remove it. | ||
8675 | */ | ||
8676 | free_excluded_extents(root, block_group); | ||
8677 | |||
8624 | memcpy(&key, &block_group->key, sizeof(key)); | 8678 | memcpy(&key, &block_group->key, sizeof(key)); |
8625 | if (block_group->flags & (BTRFS_BLOCK_GROUP_DUP | | 8679 | if (block_group->flags & (BTRFS_BLOCK_GROUP_DUP | |
8626 | BTRFS_BLOCK_GROUP_RAID1 | | 8680 | BTRFS_BLOCK_GROUP_RAID1 | |
@@ -8724,13 +8778,84 @@ out: | |||
8724 | return ret; | 8778 | return ret; |
8725 | } | 8779 | } |
8726 | 8780 | ||
8781 | int btrfs_init_space_info(struct btrfs_fs_info *fs_info) | ||
8782 | { | ||
8783 | struct btrfs_space_info *space_info; | ||
8784 | int ret; | ||
8785 | |||
8786 | ret = update_space_info(fs_info, BTRFS_BLOCK_GROUP_SYSTEM, 0, 0, | ||
8787 | &space_info); | ||
8788 | if (ret) | ||
8789 | return ret; | ||
8790 | |||
8791 | ret = update_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA, 0, 0, | ||
8792 | &space_info); | ||
8793 | if (ret) | ||
8794 | return ret; | ||
8795 | |||
8796 | ret = update_space_info(fs_info, BTRFS_BLOCK_GROUP_DATA, 0, 0, | ||
8797 | &space_info); | ||
8798 | if (ret) | ||
8799 | return ret; | ||
8800 | |||
8801 | return ret; | ||
8802 | } | ||
8803 | |||
8727 | int btrfs_error_unpin_extent_range(struct btrfs_root *root, u64 start, u64 end) | 8804 | int btrfs_error_unpin_extent_range(struct btrfs_root *root, u64 start, u64 end) |
8728 | { | 8805 | { |
8729 | return unpin_extent_range(root, start, end); | 8806 | return unpin_extent_range(root, start, end); |
8730 | } | 8807 | } |
8731 | 8808 | ||
8732 | int btrfs_error_discard_extent(struct btrfs_root *root, u64 bytenr, | 8809 | int btrfs_error_discard_extent(struct btrfs_root *root, u64 bytenr, |
8733 | u64 num_bytes) | 8810 | u64 num_bytes, u64 *actual_bytes) |
8811 | { | ||
8812 | return btrfs_discard_extent(root, bytenr, num_bytes, actual_bytes); | ||
8813 | } | ||
8814 | |||
8815 | int btrfs_trim_fs(struct btrfs_root *root, struct fstrim_range *range) | ||
8734 | { | 8816 | { |
8735 | return btrfs_discard_extent(root, bytenr, num_bytes); | 8817 | struct btrfs_fs_info *fs_info = root->fs_info; |
8818 | struct btrfs_block_group_cache *cache = NULL; | ||
8819 | u64 group_trimmed; | ||
8820 | u64 start; | ||
8821 | u64 end; | ||
8822 | u64 trimmed = 0; | ||
8823 | int ret = 0; | ||
8824 | |||
8825 | cache = btrfs_lookup_block_group(fs_info, range->start); | ||
8826 | |||
8827 | while (cache) { | ||
8828 | if (cache->key.objectid >= (range->start + range->len)) { | ||
8829 | btrfs_put_block_group(cache); | ||
8830 | break; | ||
8831 | } | ||
8832 | |||
8833 | start = max(range->start, cache->key.objectid); | ||
8834 | end = min(range->start + range->len, | ||
8835 | cache->key.objectid + cache->key.offset); | ||
8836 | |||
8837 | if (end - start >= range->minlen) { | ||
8838 | if (!block_group_cache_done(cache)) { | ||
8839 | ret = cache_block_group(cache, NULL, root, 0); | ||
8840 | if (!ret) | ||
8841 | wait_block_group_cache_done(cache); | ||
8842 | } | ||
8843 | ret = btrfs_trim_block_group(cache, | ||
8844 | &group_trimmed, | ||
8845 | start, | ||
8846 | end, | ||
8847 | range->minlen); | ||
8848 | |||
8849 | trimmed += group_trimmed; | ||
8850 | if (ret) { | ||
8851 | btrfs_put_block_group(cache); | ||
8852 | break; | ||
8853 | } | ||
8854 | } | ||
8855 | |||
8856 | cache = next_block_group(fs_info->tree_root, cache); | ||
8857 | } | ||
8858 | |||
8859 | range->len = trimmed; | ||
8860 | return ret; | ||
8736 | } | 8861 | } |
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index b5b92824a271..20ddb28602a8 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c | |||
@@ -2192,6 +2192,8 @@ static int __extent_writepage(struct page *page, struct writeback_control *wbc, | |||
2192 | else | 2192 | else |
2193 | write_flags = WRITE; | 2193 | write_flags = WRITE; |
2194 | 2194 | ||
2195 | trace___extent_writepage(page, inode, wbc); | ||
2196 | |||
2195 | WARN_ON(!PageLocked(page)); | 2197 | WARN_ON(!PageLocked(page)); |
2196 | pg_offset = i_size & (PAGE_CACHE_SIZE - 1); | 2198 | pg_offset = i_size & (PAGE_CACHE_SIZE - 1); |
2197 | if (page->index > end_index || | 2199 | if (page->index > end_index || |
@@ -3690,6 +3692,7 @@ int map_private_extent_buffer(struct extent_buffer *eb, unsigned long start, | |||
3690 | "wanted %lu %lu\n", (unsigned long long)eb->start, | 3692 | "wanted %lu %lu\n", (unsigned long long)eb->start, |
3691 | eb->len, start, min_len); | 3693 | eb->len, start, min_len); |
3692 | WARN_ON(1); | 3694 | WARN_ON(1); |
3695 | return -EINVAL; | ||
3693 | } | 3696 | } |
3694 | 3697 | ||
3695 | p = extent_buffer_page(eb, i); | 3698 | p = extent_buffer_page(eb, i); |
diff --git a/fs/btrfs/extent_io.h b/fs/btrfs/extent_io.h index 9318dfefd59c..f62c5442835d 100644 --- a/fs/btrfs/extent_io.h +++ b/fs/btrfs/extent_io.h | |||
@@ -31,6 +31,7 @@ | |||
31 | #define EXTENT_BUFFER_UPTODATE 0 | 31 | #define EXTENT_BUFFER_UPTODATE 0 |
32 | #define EXTENT_BUFFER_BLOCKING 1 | 32 | #define EXTENT_BUFFER_BLOCKING 1 |
33 | #define EXTENT_BUFFER_DIRTY 2 | 33 | #define EXTENT_BUFFER_DIRTY 2 |
34 | #define EXTENT_BUFFER_CORRUPT 3 | ||
34 | 35 | ||
35 | /* these are flags for extent_clear_unlock_delalloc */ | 36 | /* these are flags for extent_clear_unlock_delalloc */ |
36 | #define EXTENT_CLEAR_UNLOCK_PAGE 0x1 | 37 | #define EXTENT_CLEAR_UNLOCK_PAGE 0x1 |
diff --git a/fs/btrfs/file-item.c b/fs/btrfs/file-item.c index 4f19a3e1bf32..a6a9d4e8b491 100644 --- a/fs/btrfs/file-item.c +++ b/fs/btrfs/file-item.c | |||
@@ -48,7 +48,8 @@ int btrfs_insert_file_extent(struct btrfs_trans_handle *trans, | |||
48 | struct extent_buffer *leaf; | 48 | struct extent_buffer *leaf; |
49 | 49 | ||
50 | path = btrfs_alloc_path(); | 50 | path = btrfs_alloc_path(); |
51 | BUG_ON(!path); | 51 | if (!path) |
52 | return -ENOMEM; | ||
52 | file_key.objectid = objectid; | 53 | file_key.objectid = objectid; |
53 | file_key.offset = pos; | 54 | file_key.offset = pos; |
54 | btrfs_set_key_type(&file_key, BTRFS_EXTENT_DATA_KEY); | 55 | btrfs_set_key_type(&file_key, BTRFS_EXTENT_DATA_KEY); |
@@ -169,6 +170,8 @@ static int __btrfs_lookup_bio_sums(struct btrfs_root *root, | |||
169 | struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; | 170 | struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; |
170 | 171 | ||
171 | path = btrfs_alloc_path(); | 172 | path = btrfs_alloc_path(); |
173 | if (!path) | ||
174 | return -ENOMEM; | ||
172 | if (bio->bi_size > PAGE_CACHE_SIZE * 8) | 175 | if (bio->bi_size > PAGE_CACHE_SIZE * 8) |
173 | path->reada = 2; | 176 | path->reada = 2; |
174 | 177 | ||
diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c index f447b783bb84..656bc0a892b1 100644 --- a/fs/btrfs/file.c +++ b/fs/btrfs/file.c | |||
@@ -45,14 +45,14 @@ | |||
45 | * and be replaced with calls into generic code. | 45 | * and be replaced with calls into generic code. |
46 | */ | 46 | */ |
47 | static noinline int btrfs_copy_from_user(loff_t pos, int num_pages, | 47 | static noinline int btrfs_copy_from_user(loff_t pos, int num_pages, |
48 | int write_bytes, | 48 | size_t write_bytes, |
49 | struct page **prepared_pages, | 49 | struct page **prepared_pages, |
50 | struct iov_iter *i) | 50 | struct iov_iter *i) |
51 | { | 51 | { |
52 | size_t copied = 0; | 52 | size_t copied = 0; |
53 | size_t total_copied = 0; | ||
53 | int pg = 0; | 54 | int pg = 0; |
54 | int offset = pos & (PAGE_CACHE_SIZE - 1); | 55 | int offset = pos & (PAGE_CACHE_SIZE - 1); |
55 | int total_copied = 0; | ||
56 | 56 | ||
57 | while (write_bytes > 0) { | 57 | while (write_bytes > 0) { |
58 | size_t count = min_t(size_t, | 58 | size_t count = min_t(size_t, |
@@ -88,9 +88,8 @@ static noinline int btrfs_copy_from_user(loff_t pos, int num_pages, | |||
88 | total_copied += copied; | 88 | total_copied += copied; |
89 | 89 | ||
90 | /* Return to btrfs_file_aio_write to fault page */ | 90 | /* Return to btrfs_file_aio_write to fault page */ |
91 | if (unlikely(copied == 0)) { | 91 | if (unlikely(copied == 0)) |
92 | break; | 92 | break; |
93 | } | ||
94 | 93 | ||
95 | if (unlikely(copied < PAGE_CACHE_SIZE - offset)) { | 94 | if (unlikely(copied < PAGE_CACHE_SIZE - offset)) { |
96 | offset += copied; | 95 | offset += copied; |
@@ -109,8 +108,6 @@ static noinline void btrfs_drop_pages(struct page **pages, size_t num_pages) | |||
109 | { | 108 | { |
110 | size_t i; | 109 | size_t i; |
111 | for (i = 0; i < num_pages; i++) { | 110 | for (i = 0; i < num_pages; i++) { |
112 | if (!pages[i]) | ||
113 | break; | ||
114 | /* page checked is some magic around finding pages that | 111 | /* page checked is some magic around finding pages that |
115 | * have been modified without going through btrfs_set_page_dirty | 112 | * have been modified without going through btrfs_set_page_dirty |
116 | * clear it here | 113 | * clear it here |
@@ -130,13 +127,12 @@ static noinline void btrfs_drop_pages(struct page **pages, size_t num_pages) | |||
130 | * this also makes the decision about creating an inline extent vs | 127 | * this also makes the decision about creating an inline extent vs |
131 | * doing real data extents, marking pages dirty and delalloc as required. | 128 | * doing real data extents, marking pages dirty and delalloc as required. |
132 | */ | 129 | */ |
133 | static noinline int dirty_and_release_pages(struct btrfs_trans_handle *trans, | 130 | static noinline int dirty_and_release_pages(struct btrfs_root *root, |
134 | struct btrfs_root *root, | 131 | struct file *file, |
135 | struct file *file, | 132 | struct page **pages, |
136 | struct page **pages, | 133 | size_t num_pages, |
137 | size_t num_pages, | 134 | loff_t pos, |
138 | loff_t pos, | 135 | size_t write_bytes) |
139 | size_t write_bytes) | ||
140 | { | 136 | { |
141 | int err = 0; | 137 | int err = 0; |
142 | int i; | 138 | int i; |
@@ -154,7 +150,8 @@ static noinline int dirty_and_release_pages(struct btrfs_trans_handle *trans, | |||
154 | end_of_last_block = start_pos + num_bytes - 1; | 150 | end_of_last_block = start_pos + num_bytes - 1; |
155 | err = btrfs_set_extent_delalloc(inode, start_pos, end_of_last_block, | 151 | err = btrfs_set_extent_delalloc(inode, start_pos, end_of_last_block, |
156 | NULL); | 152 | NULL); |
157 | BUG_ON(err); | 153 | if (err) |
154 | return err; | ||
158 | 155 | ||
159 | for (i = 0; i < num_pages; i++) { | 156 | for (i = 0; i < num_pages; i++) { |
160 | struct page *p = pages[i]; | 157 | struct page *p = pages[i]; |
@@ -162,13 +159,14 @@ static noinline int dirty_and_release_pages(struct btrfs_trans_handle *trans, | |||
162 | ClearPageChecked(p); | 159 | ClearPageChecked(p); |
163 | set_page_dirty(p); | 160 | set_page_dirty(p); |
164 | } | 161 | } |
165 | if (end_pos > isize) { | 162 | |
163 | /* | ||
164 | * we've only changed i_size in ram, and we haven't updated | ||
165 | * the disk i_size. There is no need to log the inode | ||
166 | * at this time. | ||
167 | */ | ||
168 | if (end_pos > isize) | ||
166 | i_size_write(inode, end_pos); | 169 | i_size_write(inode, end_pos); |
167 | /* we've only changed i_size in ram, and we haven't updated | ||
168 | * the disk i_size. There is no need to log the inode | ||
169 | * at this time. | ||
170 | */ | ||
171 | } | ||
172 | return 0; | 170 | return 0; |
173 | } | 171 | } |
174 | 172 | ||
@@ -610,6 +608,8 @@ again: | |||
610 | key.offset = split; | 608 | key.offset = split; |
611 | 609 | ||
612 | ret = btrfs_search_slot(trans, root, &key, path, -1, 1); | 610 | ret = btrfs_search_slot(trans, root, &key, path, -1, 1); |
611 | if (ret < 0) | ||
612 | goto out; | ||
613 | if (ret > 0 && path->slots[0] > 0) | 613 | if (ret > 0 && path->slots[0] > 0) |
614 | path->slots[0]--; | 614 | path->slots[0]--; |
615 | 615 | ||
@@ -819,12 +819,11 @@ static noinline int prepare_pages(struct btrfs_root *root, struct file *file, | |||
819 | last_pos = ((u64)index + num_pages) << PAGE_CACHE_SHIFT; | 819 | last_pos = ((u64)index + num_pages) << PAGE_CACHE_SHIFT; |
820 | 820 | ||
821 | if (start_pos > inode->i_size) { | 821 | if (start_pos > inode->i_size) { |
822 | err = btrfs_cont_expand(inode, start_pos); | 822 | err = btrfs_cont_expand(inode, i_size_read(inode), start_pos); |
823 | if (err) | 823 | if (err) |
824 | return err; | 824 | return err; |
825 | } | 825 | } |
826 | 826 | ||
827 | memset(pages, 0, num_pages * sizeof(struct page *)); | ||
828 | again: | 827 | again: |
829 | for (i = 0; i < num_pages; i++) { | 828 | for (i = 0; i < num_pages; i++) { |
830 | pages[i] = grab_cache_page(inode->i_mapping, index + i); | 829 | pages[i] = grab_cache_page(inode->i_mapping, index + i); |
@@ -896,156 +895,71 @@ fail: | |||
896 | 895 | ||
897 | } | 896 | } |
898 | 897 | ||
899 | static ssize_t btrfs_file_aio_write(struct kiocb *iocb, | 898 | static noinline ssize_t __btrfs_buffered_write(struct file *file, |
900 | const struct iovec *iov, | 899 | struct iov_iter *i, |
901 | unsigned long nr_segs, loff_t pos) | 900 | loff_t pos) |
902 | { | 901 | { |
903 | struct file *file = iocb->ki_filp; | ||
904 | struct inode *inode = fdentry(file)->d_inode; | 902 | struct inode *inode = fdentry(file)->d_inode; |
905 | struct btrfs_root *root = BTRFS_I(inode)->root; | 903 | struct btrfs_root *root = BTRFS_I(inode)->root; |
906 | struct page **pages = NULL; | 904 | struct page **pages = NULL; |
907 | struct iov_iter i; | ||
908 | loff_t *ppos = &iocb->ki_pos; | ||
909 | loff_t start_pos; | ||
910 | ssize_t num_written = 0; | ||
911 | ssize_t err = 0; | ||
912 | size_t count; | ||
913 | size_t ocount; | ||
914 | int ret = 0; | ||
915 | int nrptrs; | ||
916 | unsigned long first_index; | 905 | unsigned long first_index; |
917 | unsigned long last_index; | 906 | unsigned long last_index; |
918 | int will_write; | 907 | size_t num_written = 0; |
919 | int buffered = 0; | 908 | int nrptrs; |
920 | int copied = 0; | 909 | int ret; |
921 | int dirty_pages = 0; | ||
922 | |||
923 | will_write = ((file->f_flags & O_DSYNC) || IS_SYNC(inode) || | ||
924 | (file->f_flags & O_DIRECT)); | ||
925 | |||
926 | start_pos = pos; | ||
927 | |||
928 | vfs_check_frozen(inode->i_sb, SB_FREEZE_WRITE); | ||
929 | |||
930 | mutex_lock(&inode->i_mutex); | ||
931 | |||
932 | err = generic_segment_checks(iov, &nr_segs, &ocount, VERIFY_READ); | ||
933 | if (err) | ||
934 | goto out; | ||
935 | count = ocount; | ||
936 | |||
937 | current->backing_dev_info = inode->i_mapping->backing_dev_info; | ||
938 | err = generic_write_checks(file, &pos, &count, S_ISBLK(inode->i_mode)); | ||
939 | if (err) | ||
940 | goto out; | ||
941 | |||
942 | if (count == 0) | ||
943 | goto out; | ||
944 | |||
945 | err = file_remove_suid(file); | ||
946 | if (err) | ||
947 | goto out; | ||
948 | |||
949 | /* | ||
950 | * If BTRFS flips readonly due to some impossible error | ||
951 | * (fs_info->fs_state now has BTRFS_SUPER_FLAG_ERROR), | ||
952 | * although we have opened a file as writable, we have | ||
953 | * to stop this write operation to ensure FS consistency. | ||
954 | */ | ||
955 | if (root->fs_info->fs_state & BTRFS_SUPER_FLAG_ERROR) { | ||
956 | err = -EROFS; | ||
957 | goto out; | ||
958 | } | ||
959 | |||
960 | file_update_time(file); | ||
961 | BTRFS_I(inode)->sequence++; | ||
962 | |||
963 | if (unlikely(file->f_flags & O_DIRECT)) { | ||
964 | num_written = generic_file_direct_write(iocb, iov, &nr_segs, | ||
965 | pos, ppos, count, | ||
966 | ocount); | ||
967 | /* | ||
968 | * the generic O_DIRECT will update in-memory i_size after the | ||
969 | * DIOs are done. But our endio handlers that update the on | ||
970 | * disk i_size never update past the in memory i_size. So we | ||
971 | * need one more update here to catch any additions to the | ||
972 | * file | ||
973 | */ | ||
974 | if (inode->i_size != BTRFS_I(inode)->disk_i_size) { | ||
975 | btrfs_ordered_update_i_size(inode, inode->i_size, NULL); | ||
976 | mark_inode_dirty(inode); | ||
977 | } | ||
978 | |||
979 | if (num_written < 0) { | ||
980 | ret = num_written; | ||
981 | num_written = 0; | ||
982 | goto out; | ||
983 | } else if (num_written == count) { | ||
984 | /* pick up pos changes done by the generic code */ | ||
985 | pos = *ppos; | ||
986 | goto out; | ||
987 | } | ||
988 | /* | ||
989 | * We are going to do buffered for the rest of the range, so we | ||
990 | * need to make sure to invalidate the buffered pages when we're | ||
991 | * done. | ||
992 | */ | ||
993 | buffered = 1; | ||
994 | pos += num_written; | ||
995 | } | ||
996 | 910 | ||
997 | iov_iter_init(&i, iov, nr_segs, count, num_written); | 911 | nrptrs = min((iov_iter_count(i) + PAGE_CACHE_SIZE - 1) / |
998 | nrptrs = min((iov_iter_count(&i) + PAGE_CACHE_SIZE - 1) / | ||
999 | PAGE_CACHE_SIZE, PAGE_CACHE_SIZE / | 912 | PAGE_CACHE_SIZE, PAGE_CACHE_SIZE / |
1000 | (sizeof(struct page *))); | 913 | (sizeof(struct page *))); |
1001 | pages = kmalloc(nrptrs * sizeof(struct page *), GFP_KERNEL); | 914 | pages = kmalloc(nrptrs * sizeof(struct page *), GFP_KERNEL); |
1002 | if (!pages) { | 915 | if (!pages) |
1003 | ret = -ENOMEM; | 916 | return -ENOMEM; |
1004 | goto out; | ||
1005 | } | ||
1006 | |||
1007 | /* generic_write_checks can change our pos */ | ||
1008 | start_pos = pos; | ||
1009 | 917 | ||
1010 | first_index = pos >> PAGE_CACHE_SHIFT; | 918 | first_index = pos >> PAGE_CACHE_SHIFT; |
1011 | last_index = (pos + iov_iter_count(&i)) >> PAGE_CACHE_SHIFT; | 919 | last_index = (pos + iov_iter_count(i)) >> PAGE_CACHE_SHIFT; |
1012 | 920 | ||
1013 | while (iov_iter_count(&i) > 0) { | 921 | while (iov_iter_count(i) > 0) { |
1014 | size_t offset = pos & (PAGE_CACHE_SIZE - 1); | 922 | size_t offset = pos & (PAGE_CACHE_SIZE - 1); |
1015 | size_t write_bytes = min(iov_iter_count(&i), | 923 | size_t write_bytes = min(iov_iter_count(i), |
1016 | nrptrs * (size_t)PAGE_CACHE_SIZE - | 924 | nrptrs * (size_t)PAGE_CACHE_SIZE - |
1017 | offset); | 925 | offset); |
1018 | size_t num_pages = (write_bytes + offset + | 926 | size_t num_pages = (write_bytes + offset + |
1019 | PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; | 927 | PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; |
928 | size_t dirty_pages; | ||
929 | size_t copied; | ||
1020 | 930 | ||
1021 | WARN_ON(num_pages > nrptrs); | 931 | WARN_ON(num_pages > nrptrs); |
1022 | memset(pages, 0, sizeof(struct page *) * nrptrs); | ||
1023 | 932 | ||
1024 | /* | 933 | /* |
1025 | * Fault pages before locking them in prepare_pages | 934 | * Fault pages before locking them in prepare_pages |
1026 | * to avoid recursive lock | 935 | * to avoid recursive lock |
1027 | */ | 936 | */ |
1028 | if (unlikely(iov_iter_fault_in_readable(&i, write_bytes))) { | 937 | if (unlikely(iov_iter_fault_in_readable(i, write_bytes))) { |
1029 | ret = -EFAULT; | 938 | ret = -EFAULT; |
1030 | goto out; | 939 | break; |
1031 | } | 940 | } |
1032 | 941 | ||
1033 | ret = btrfs_delalloc_reserve_space(inode, | 942 | ret = btrfs_delalloc_reserve_space(inode, |
1034 | num_pages << PAGE_CACHE_SHIFT); | 943 | num_pages << PAGE_CACHE_SHIFT); |
1035 | if (ret) | 944 | if (ret) |
1036 | goto out; | 945 | break; |
1037 | 946 | ||
947 | /* | ||
948 | * This is going to setup the pages array with the number of | ||
949 | * pages we want, so we don't really need to worry about the | ||
950 | * contents of pages from loop to loop | ||
951 | */ | ||
1038 | ret = prepare_pages(root, file, pages, num_pages, | 952 | ret = prepare_pages(root, file, pages, num_pages, |
1039 | pos, first_index, last_index, | 953 | pos, first_index, last_index, |
1040 | write_bytes); | 954 | write_bytes); |
1041 | if (ret) { | 955 | if (ret) { |
1042 | btrfs_delalloc_release_space(inode, | 956 | btrfs_delalloc_release_space(inode, |
1043 | num_pages << PAGE_CACHE_SHIFT); | 957 | num_pages << PAGE_CACHE_SHIFT); |
1044 | goto out; | 958 | break; |
1045 | } | 959 | } |
1046 | 960 | ||
1047 | copied = btrfs_copy_from_user(pos, num_pages, | 961 | copied = btrfs_copy_from_user(pos, num_pages, |
1048 | write_bytes, pages, &i); | 962 | write_bytes, pages, i); |
1049 | 963 | ||
1050 | /* | 964 | /* |
1051 | * if we have trouble faulting in the pages, fall | 965 | * if we have trouble faulting in the pages, fall |
@@ -1061,6 +975,13 @@ static ssize_t btrfs_file_aio_write(struct kiocb *iocb, | |||
1061 | PAGE_CACHE_SIZE - 1) >> | 975 | PAGE_CACHE_SIZE - 1) >> |
1062 | PAGE_CACHE_SHIFT; | 976 | PAGE_CACHE_SHIFT; |
1063 | 977 | ||
978 | /* | ||
979 | * If we had a short copy we need to release the excess delaloc | ||
980 | * bytes we reserved. We need to increment outstanding_extents | ||
981 | * because btrfs_delalloc_release_space will decrement it, but | ||
982 | * we still have an outstanding extent for the chunk we actually | ||
983 | * managed to copy. | ||
984 | */ | ||
1064 | if (num_pages > dirty_pages) { | 985 | if (num_pages > dirty_pages) { |
1065 | if (copied > 0) | 986 | if (copied > 0) |
1066 | atomic_inc( | 987 | atomic_inc( |
@@ -1071,39 +992,157 @@ static ssize_t btrfs_file_aio_write(struct kiocb *iocb, | |||
1071 | } | 992 | } |
1072 | 993 | ||
1073 | if (copied > 0) { | 994 | if (copied > 0) { |
1074 | dirty_and_release_pages(NULL, root, file, pages, | 995 | ret = dirty_and_release_pages(root, file, pages, |
1075 | dirty_pages, pos, copied); | 996 | dirty_pages, pos, |
997 | copied); | ||
998 | if (ret) { | ||
999 | btrfs_delalloc_release_space(inode, | ||
1000 | dirty_pages << PAGE_CACHE_SHIFT); | ||
1001 | btrfs_drop_pages(pages, num_pages); | ||
1002 | break; | ||
1003 | } | ||
1076 | } | 1004 | } |
1077 | 1005 | ||
1078 | btrfs_drop_pages(pages, num_pages); | 1006 | btrfs_drop_pages(pages, num_pages); |
1079 | 1007 | ||
1080 | if (copied > 0) { | 1008 | cond_resched(); |
1081 | if (will_write) { | 1009 | |
1082 | filemap_fdatawrite_range(inode->i_mapping, pos, | 1010 | balance_dirty_pages_ratelimited_nr(inode->i_mapping, |
1083 | pos + copied - 1); | 1011 | dirty_pages); |
1084 | } else { | 1012 | if (dirty_pages < (root->leafsize >> PAGE_CACHE_SHIFT) + 1) |
1085 | balance_dirty_pages_ratelimited_nr( | 1013 | btrfs_btree_balance_dirty(root, 1); |
1086 | inode->i_mapping, | 1014 | btrfs_throttle(root); |
1087 | dirty_pages); | ||
1088 | if (dirty_pages < | ||
1089 | (root->leafsize >> PAGE_CACHE_SHIFT) + 1) | ||
1090 | btrfs_btree_balance_dirty(root, 1); | ||
1091 | btrfs_throttle(root); | ||
1092 | } | ||
1093 | } | ||
1094 | 1015 | ||
1095 | pos += copied; | 1016 | pos += copied; |
1096 | num_written += copied; | 1017 | num_written += copied; |
1018 | } | ||
1097 | 1019 | ||
1098 | cond_resched(); | 1020 | kfree(pages); |
1021 | |||
1022 | return num_written ? num_written : ret; | ||
1023 | } | ||
1024 | |||
1025 | static ssize_t __btrfs_direct_write(struct kiocb *iocb, | ||
1026 | const struct iovec *iov, | ||
1027 | unsigned long nr_segs, loff_t pos, | ||
1028 | loff_t *ppos, size_t count, size_t ocount) | ||
1029 | { | ||
1030 | struct file *file = iocb->ki_filp; | ||
1031 | struct inode *inode = fdentry(file)->d_inode; | ||
1032 | struct iov_iter i; | ||
1033 | ssize_t written; | ||
1034 | ssize_t written_buffered; | ||
1035 | loff_t endbyte; | ||
1036 | int err; | ||
1037 | |||
1038 | written = generic_file_direct_write(iocb, iov, &nr_segs, pos, ppos, | ||
1039 | count, ocount); | ||
1040 | |||
1041 | /* | ||
1042 | * the generic O_DIRECT will update in-memory i_size after the | ||
1043 | * DIOs are done. But our endio handlers that update the on | ||
1044 | * disk i_size never update past the in memory i_size. So we | ||
1045 | * need one more update here to catch any additions to the | ||
1046 | * file | ||
1047 | */ | ||
1048 | if (inode->i_size != BTRFS_I(inode)->disk_i_size) { | ||
1049 | btrfs_ordered_update_i_size(inode, inode->i_size, NULL); | ||
1050 | mark_inode_dirty(inode); | ||
1099 | } | 1051 | } |
1052 | |||
1053 | if (written < 0 || written == count) | ||
1054 | return written; | ||
1055 | |||
1056 | pos += written; | ||
1057 | count -= written; | ||
1058 | iov_iter_init(&i, iov, nr_segs, count, written); | ||
1059 | written_buffered = __btrfs_buffered_write(file, &i, pos); | ||
1060 | if (written_buffered < 0) { | ||
1061 | err = written_buffered; | ||
1062 | goto out; | ||
1063 | } | ||
1064 | endbyte = pos + written_buffered - 1; | ||
1065 | err = filemap_write_and_wait_range(file->f_mapping, pos, endbyte); | ||
1066 | if (err) | ||
1067 | goto out; | ||
1068 | written += written_buffered; | ||
1069 | *ppos = pos + written_buffered; | ||
1070 | invalidate_mapping_pages(file->f_mapping, pos >> PAGE_CACHE_SHIFT, | ||
1071 | endbyte >> PAGE_CACHE_SHIFT); | ||
1100 | out: | 1072 | out: |
1101 | mutex_unlock(&inode->i_mutex); | 1073 | return written ? written : err; |
1102 | if (ret) | 1074 | } |
1103 | err = ret; | ||
1104 | 1075 | ||
1105 | kfree(pages); | 1076 | static ssize_t btrfs_file_aio_write(struct kiocb *iocb, |
1106 | *ppos = pos; | 1077 | const struct iovec *iov, |
1078 | unsigned long nr_segs, loff_t pos) | ||
1079 | { | ||
1080 | struct file *file = iocb->ki_filp; | ||
1081 | struct inode *inode = fdentry(file)->d_inode; | ||
1082 | struct btrfs_root *root = BTRFS_I(inode)->root; | ||
1083 | loff_t *ppos = &iocb->ki_pos; | ||
1084 | ssize_t num_written = 0; | ||
1085 | ssize_t err = 0; | ||
1086 | size_t count, ocount; | ||
1087 | |||
1088 | vfs_check_frozen(inode->i_sb, SB_FREEZE_WRITE); | ||
1089 | |||
1090 | mutex_lock(&inode->i_mutex); | ||
1091 | |||
1092 | err = generic_segment_checks(iov, &nr_segs, &ocount, VERIFY_READ); | ||
1093 | if (err) { | ||
1094 | mutex_unlock(&inode->i_mutex); | ||
1095 | goto out; | ||
1096 | } | ||
1097 | count = ocount; | ||
1098 | |||
1099 | current->backing_dev_info = inode->i_mapping->backing_dev_info; | ||
1100 | err = generic_write_checks(file, &pos, &count, S_ISBLK(inode->i_mode)); | ||
1101 | if (err) { | ||
1102 | mutex_unlock(&inode->i_mutex); | ||
1103 | goto out; | ||
1104 | } | ||
1105 | |||
1106 | if (count == 0) { | ||
1107 | mutex_unlock(&inode->i_mutex); | ||
1108 | goto out; | ||
1109 | } | ||
1110 | |||
1111 | err = file_remove_suid(file); | ||
1112 | if (err) { | ||
1113 | mutex_unlock(&inode->i_mutex); | ||
1114 | goto out; | ||
1115 | } | ||
1116 | |||
1117 | /* | ||
1118 | * If BTRFS flips readonly due to some impossible error | ||
1119 | * (fs_info->fs_state now has BTRFS_SUPER_FLAG_ERROR), | ||
1120 | * although we have opened a file as writable, we have | ||
1121 | * to stop this write operation to ensure FS consistency. | ||
1122 | */ | ||
1123 | if (root->fs_info->fs_state & BTRFS_SUPER_FLAG_ERROR) { | ||
1124 | mutex_unlock(&inode->i_mutex); | ||
1125 | err = -EROFS; | ||
1126 | goto out; | ||
1127 | } | ||
1128 | |||
1129 | file_update_time(file); | ||
1130 | BTRFS_I(inode)->sequence++; | ||
1131 | |||
1132 | if (unlikely(file->f_flags & O_DIRECT)) { | ||
1133 | num_written = __btrfs_direct_write(iocb, iov, nr_segs, | ||
1134 | pos, ppos, count, ocount); | ||
1135 | } else { | ||
1136 | struct iov_iter i; | ||
1137 | |||
1138 | iov_iter_init(&i, iov, nr_segs, count, num_written); | ||
1139 | |||
1140 | num_written = __btrfs_buffered_write(file, &i, pos); | ||
1141 | if (num_written > 0) | ||
1142 | *ppos = pos + num_written; | ||
1143 | } | ||
1144 | |||
1145 | mutex_unlock(&inode->i_mutex); | ||
1107 | 1146 | ||
1108 | /* | 1147 | /* |
1109 | * we want to make sure fsync finds this change | 1148 | * we want to make sure fsync finds this change |
@@ -1118,43 +1157,12 @@ out: | |||
1118 | * one running right now. | 1157 | * one running right now. |
1119 | */ | 1158 | */ |
1120 | BTRFS_I(inode)->last_trans = root->fs_info->generation + 1; | 1159 | BTRFS_I(inode)->last_trans = root->fs_info->generation + 1; |
1121 | 1160 | if (num_written > 0 || num_written == -EIOCBQUEUED) { | |
1122 | if (num_written > 0 && will_write) { | 1161 | err = generic_write_sync(file, pos, num_written); |
1123 | struct btrfs_trans_handle *trans; | 1162 | if (err < 0 && num_written > 0) |
1124 | |||
1125 | err = btrfs_wait_ordered_range(inode, start_pos, num_written); | ||
1126 | if (err) | ||
1127 | num_written = err; | 1163 | num_written = err; |
1128 | |||
1129 | if ((file->f_flags & O_DSYNC) || IS_SYNC(inode)) { | ||
1130 | trans = btrfs_start_transaction(root, 0); | ||
1131 | if (IS_ERR(trans)) { | ||
1132 | num_written = PTR_ERR(trans); | ||
1133 | goto done; | ||
1134 | } | ||
1135 | mutex_lock(&inode->i_mutex); | ||
1136 | ret = btrfs_log_dentry_safe(trans, root, | ||
1137 | file->f_dentry); | ||
1138 | mutex_unlock(&inode->i_mutex); | ||
1139 | if (ret == 0) { | ||
1140 | ret = btrfs_sync_log(trans, root); | ||
1141 | if (ret == 0) | ||
1142 | btrfs_end_transaction(trans, root); | ||
1143 | else | ||
1144 | btrfs_commit_transaction(trans, root); | ||
1145 | } else if (ret != BTRFS_NO_LOG_SYNC) { | ||
1146 | btrfs_commit_transaction(trans, root); | ||
1147 | } else { | ||
1148 | btrfs_end_transaction(trans, root); | ||
1149 | } | ||
1150 | } | ||
1151 | if (file->f_flags & O_DIRECT && buffered) { | ||
1152 | invalidate_mapping_pages(inode->i_mapping, | ||
1153 | start_pos >> PAGE_CACHE_SHIFT, | ||
1154 | (start_pos + num_written - 1) >> PAGE_CACHE_SHIFT); | ||
1155 | } | ||
1156 | } | 1164 | } |
1157 | done: | 1165 | out: |
1158 | current->backing_dev_info = NULL; | 1166 | current->backing_dev_info = NULL; |
1159 | return num_written ? num_written : err; | 1167 | return num_written ? num_written : err; |
1160 | } | 1168 | } |
@@ -1197,6 +1205,7 @@ int btrfs_sync_file(struct file *file, int datasync) | |||
1197 | int ret = 0; | 1205 | int ret = 0; |
1198 | struct btrfs_trans_handle *trans; | 1206 | struct btrfs_trans_handle *trans; |
1199 | 1207 | ||
1208 | trace_btrfs_sync_file(file, datasync); | ||
1200 | 1209 | ||
1201 | /* we wait first, since the writeback may change the inode */ | 1210 | /* we wait first, since the writeback may change the inode */ |
1202 | root->log_batch++; | 1211 | root->log_batch++; |
@@ -1324,7 +1333,8 @@ static long btrfs_fallocate(struct file *file, int mode, | |||
1324 | goto out; | 1333 | goto out; |
1325 | 1334 | ||
1326 | if (alloc_start > inode->i_size) { | 1335 | if (alloc_start > inode->i_size) { |
1327 | ret = btrfs_cont_expand(inode, alloc_start); | 1336 | ret = btrfs_cont_expand(inode, i_size_read(inode), |
1337 | alloc_start); | ||
1328 | if (ret) | 1338 | if (ret) |
1329 | goto out; | 1339 | goto out; |
1330 | } | 1340 | } |
diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c index a0390657451b..0037427d8a9d 100644 --- a/fs/btrfs/free-space-cache.c +++ b/fs/btrfs/free-space-cache.c | |||
@@ -393,7 +393,8 @@ int load_free_space_cache(struct btrfs_fs_info *fs_info, | |||
393 | break; | 393 | break; |
394 | 394 | ||
395 | need_loop = 1; | 395 | need_loop = 1; |
396 | e = kzalloc(sizeof(struct btrfs_free_space), GFP_NOFS); | 396 | e = kmem_cache_zalloc(btrfs_free_space_cachep, |
397 | GFP_NOFS); | ||
397 | if (!e) { | 398 | if (!e) { |
398 | kunmap(page); | 399 | kunmap(page); |
399 | unlock_page(page); | 400 | unlock_page(page); |
@@ -405,7 +406,7 @@ int load_free_space_cache(struct btrfs_fs_info *fs_info, | |||
405 | e->bytes = le64_to_cpu(entry->bytes); | 406 | e->bytes = le64_to_cpu(entry->bytes); |
406 | if (!e->bytes) { | 407 | if (!e->bytes) { |
407 | kunmap(page); | 408 | kunmap(page); |
408 | kfree(e); | 409 | kmem_cache_free(btrfs_free_space_cachep, e); |
409 | unlock_page(page); | 410 | unlock_page(page); |
410 | page_cache_release(page); | 411 | page_cache_release(page); |
411 | goto free_cache; | 412 | goto free_cache; |
@@ -420,7 +421,8 @@ int load_free_space_cache(struct btrfs_fs_info *fs_info, | |||
420 | e->bitmap = kzalloc(PAGE_CACHE_SIZE, GFP_NOFS); | 421 | e->bitmap = kzalloc(PAGE_CACHE_SIZE, GFP_NOFS); |
421 | if (!e->bitmap) { | 422 | if (!e->bitmap) { |
422 | kunmap(page); | 423 | kunmap(page); |
423 | kfree(e); | 424 | kmem_cache_free( |
425 | btrfs_free_space_cachep, e); | ||
424 | unlock_page(page); | 426 | unlock_page(page); |
425 | page_cache_release(page); | 427 | page_cache_release(page); |
426 | goto free_cache; | 428 | goto free_cache; |
@@ -1187,7 +1189,7 @@ static void free_bitmap(struct btrfs_block_group_cache *block_group, | |||
1187 | { | 1189 | { |
1188 | unlink_free_space(block_group, bitmap_info); | 1190 | unlink_free_space(block_group, bitmap_info); |
1189 | kfree(bitmap_info->bitmap); | 1191 | kfree(bitmap_info->bitmap); |
1190 | kfree(bitmap_info); | 1192 | kmem_cache_free(btrfs_free_space_cachep, bitmap_info); |
1191 | block_group->total_bitmaps--; | 1193 | block_group->total_bitmaps--; |
1192 | recalculate_thresholds(block_group); | 1194 | recalculate_thresholds(block_group); |
1193 | } | 1195 | } |
@@ -1285,9 +1287,22 @@ static int insert_into_bitmap(struct btrfs_block_group_cache *block_group, | |||
1285 | * If we are below the extents threshold then we can add this as an | 1287 | * If we are below the extents threshold then we can add this as an |
1286 | * extent, and don't have to deal with the bitmap | 1288 | * extent, and don't have to deal with the bitmap |
1287 | */ | 1289 | */ |
1288 | if (block_group->free_extents < block_group->extents_thresh && | 1290 | if (block_group->free_extents < block_group->extents_thresh) { |
1289 | info->bytes > block_group->sectorsize * 4) | 1291 | /* |
1290 | return 0; | 1292 | * If this block group has some small extents we don't want to |
1293 | * use up all of our free slots in the cache with them, we want | ||
1294 | * to reserve them to larger extents, however if we have plent | ||
1295 | * of cache left then go ahead an dadd them, no sense in adding | ||
1296 | * the overhead of a bitmap if we don't have to. | ||
1297 | */ | ||
1298 | if (info->bytes <= block_group->sectorsize * 4) { | ||
1299 | if (block_group->free_extents * 2 <= | ||
1300 | block_group->extents_thresh) | ||
1301 | return 0; | ||
1302 | } else { | ||
1303 | return 0; | ||
1304 | } | ||
1305 | } | ||
1291 | 1306 | ||
1292 | /* | 1307 | /* |
1293 | * some block groups are so tiny they can't be enveloped by a bitmap, so | 1308 | * some block groups are so tiny they can't be enveloped by a bitmap, so |
@@ -1342,8 +1357,8 @@ new_bitmap: | |||
1342 | 1357 | ||
1343 | /* no pre-allocated info, allocate a new one */ | 1358 | /* no pre-allocated info, allocate a new one */ |
1344 | if (!info) { | 1359 | if (!info) { |
1345 | info = kzalloc(sizeof(struct btrfs_free_space), | 1360 | info = kmem_cache_zalloc(btrfs_free_space_cachep, |
1346 | GFP_NOFS); | 1361 | GFP_NOFS); |
1347 | if (!info) { | 1362 | if (!info) { |
1348 | spin_lock(&block_group->tree_lock); | 1363 | spin_lock(&block_group->tree_lock); |
1349 | ret = -ENOMEM; | 1364 | ret = -ENOMEM; |
@@ -1365,7 +1380,7 @@ out: | |||
1365 | if (info) { | 1380 | if (info) { |
1366 | if (info->bitmap) | 1381 | if (info->bitmap) |
1367 | kfree(info->bitmap); | 1382 | kfree(info->bitmap); |
1368 | kfree(info); | 1383 | kmem_cache_free(btrfs_free_space_cachep, info); |
1369 | } | 1384 | } |
1370 | 1385 | ||
1371 | return ret; | 1386 | return ret; |
@@ -1398,7 +1413,7 @@ bool try_merge_free_space(struct btrfs_block_group_cache *block_group, | |||
1398 | else | 1413 | else |
1399 | __unlink_free_space(block_group, right_info); | 1414 | __unlink_free_space(block_group, right_info); |
1400 | info->bytes += right_info->bytes; | 1415 | info->bytes += right_info->bytes; |
1401 | kfree(right_info); | 1416 | kmem_cache_free(btrfs_free_space_cachep, right_info); |
1402 | merged = true; | 1417 | merged = true; |
1403 | } | 1418 | } |
1404 | 1419 | ||
@@ -1410,7 +1425,7 @@ bool try_merge_free_space(struct btrfs_block_group_cache *block_group, | |||
1410 | __unlink_free_space(block_group, left_info); | 1425 | __unlink_free_space(block_group, left_info); |
1411 | info->offset = left_info->offset; | 1426 | info->offset = left_info->offset; |
1412 | info->bytes += left_info->bytes; | 1427 | info->bytes += left_info->bytes; |
1413 | kfree(left_info); | 1428 | kmem_cache_free(btrfs_free_space_cachep, left_info); |
1414 | merged = true; | 1429 | merged = true; |
1415 | } | 1430 | } |
1416 | 1431 | ||
@@ -1423,7 +1438,7 @@ int btrfs_add_free_space(struct btrfs_block_group_cache *block_group, | |||
1423 | struct btrfs_free_space *info; | 1438 | struct btrfs_free_space *info; |
1424 | int ret = 0; | 1439 | int ret = 0; |
1425 | 1440 | ||
1426 | info = kzalloc(sizeof(struct btrfs_free_space), GFP_NOFS); | 1441 | info = kmem_cache_zalloc(btrfs_free_space_cachep, GFP_NOFS); |
1427 | if (!info) | 1442 | if (!info) |
1428 | return -ENOMEM; | 1443 | return -ENOMEM; |
1429 | 1444 | ||
@@ -1450,7 +1465,7 @@ int btrfs_add_free_space(struct btrfs_block_group_cache *block_group, | |||
1450 | link: | 1465 | link: |
1451 | ret = link_free_space(block_group, info); | 1466 | ret = link_free_space(block_group, info); |
1452 | if (ret) | 1467 | if (ret) |
1453 | kfree(info); | 1468 | kmem_cache_free(btrfs_free_space_cachep, info); |
1454 | out: | 1469 | out: |
1455 | spin_unlock(&block_group->tree_lock); | 1470 | spin_unlock(&block_group->tree_lock); |
1456 | 1471 | ||
@@ -1520,7 +1535,7 @@ again: | |||
1520 | kfree(info->bitmap); | 1535 | kfree(info->bitmap); |
1521 | block_group->total_bitmaps--; | 1536 | block_group->total_bitmaps--; |
1522 | } | 1537 | } |
1523 | kfree(info); | 1538 | kmem_cache_free(btrfs_free_space_cachep, info); |
1524 | goto out_lock; | 1539 | goto out_lock; |
1525 | } | 1540 | } |
1526 | 1541 | ||
@@ -1556,7 +1571,7 @@ again: | |||
1556 | /* the hole we're creating ends at the end | 1571 | /* the hole we're creating ends at the end |
1557 | * of the info struct, just free the info | 1572 | * of the info struct, just free the info |
1558 | */ | 1573 | */ |
1559 | kfree(info); | 1574 | kmem_cache_free(btrfs_free_space_cachep, info); |
1560 | } | 1575 | } |
1561 | spin_unlock(&block_group->tree_lock); | 1576 | spin_unlock(&block_group->tree_lock); |
1562 | 1577 | ||
@@ -1629,30 +1644,28 @@ __btrfs_return_cluster_to_free_space( | |||
1629 | { | 1644 | { |
1630 | struct btrfs_free_space *entry; | 1645 | struct btrfs_free_space *entry; |
1631 | struct rb_node *node; | 1646 | struct rb_node *node; |
1632 | bool bitmap; | ||
1633 | 1647 | ||
1634 | spin_lock(&cluster->lock); | 1648 | spin_lock(&cluster->lock); |
1635 | if (cluster->block_group != block_group) | 1649 | if (cluster->block_group != block_group) |
1636 | goto out; | 1650 | goto out; |
1637 | 1651 | ||
1638 | bitmap = cluster->points_to_bitmap; | ||
1639 | cluster->block_group = NULL; | 1652 | cluster->block_group = NULL; |
1640 | cluster->window_start = 0; | 1653 | cluster->window_start = 0; |
1641 | list_del_init(&cluster->block_group_list); | 1654 | list_del_init(&cluster->block_group_list); |
1642 | cluster->points_to_bitmap = false; | ||
1643 | |||
1644 | if (bitmap) | ||
1645 | goto out; | ||
1646 | 1655 | ||
1647 | node = rb_first(&cluster->root); | 1656 | node = rb_first(&cluster->root); |
1648 | while (node) { | 1657 | while (node) { |
1658 | bool bitmap; | ||
1659 | |||
1649 | entry = rb_entry(node, struct btrfs_free_space, offset_index); | 1660 | entry = rb_entry(node, struct btrfs_free_space, offset_index); |
1650 | node = rb_next(&entry->offset_index); | 1661 | node = rb_next(&entry->offset_index); |
1651 | rb_erase(&entry->offset_index, &cluster->root); | 1662 | rb_erase(&entry->offset_index, &cluster->root); |
1652 | BUG_ON(entry->bitmap); | 1663 | |
1653 | try_merge_free_space(block_group, entry, false); | 1664 | bitmap = (entry->bitmap != NULL); |
1665 | if (!bitmap) | ||
1666 | try_merge_free_space(block_group, entry, false); | ||
1654 | tree_insert_offset(&block_group->free_space_offset, | 1667 | tree_insert_offset(&block_group->free_space_offset, |
1655 | entry->offset, &entry->offset_index, 0); | 1668 | entry->offset, &entry->offset_index, bitmap); |
1656 | } | 1669 | } |
1657 | cluster->root = RB_ROOT; | 1670 | cluster->root = RB_ROOT; |
1658 | 1671 | ||
@@ -1689,7 +1702,7 @@ void btrfs_remove_free_space_cache(struct btrfs_block_group_cache *block_group) | |||
1689 | unlink_free_space(block_group, info); | 1702 | unlink_free_space(block_group, info); |
1690 | if (info->bitmap) | 1703 | if (info->bitmap) |
1691 | kfree(info->bitmap); | 1704 | kfree(info->bitmap); |
1692 | kfree(info); | 1705 | kmem_cache_free(btrfs_free_space_cachep, info); |
1693 | if (need_resched()) { | 1706 | if (need_resched()) { |
1694 | spin_unlock(&block_group->tree_lock); | 1707 | spin_unlock(&block_group->tree_lock); |
1695 | cond_resched(); | 1708 | cond_resched(); |
@@ -1722,7 +1735,7 @@ u64 btrfs_find_space_for_alloc(struct btrfs_block_group_cache *block_group, | |||
1722 | entry->offset += bytes; | 1735 | entry->offset += bytes; |
1723 | entry->bytes -= bytes; | 1736 | entry->bytes -= bytes; |
1724 | if (!entry->bytes) | 1737 | if (!entry->bytes) |
1725 | kfree(entry); | 1738 | kmem_cache_free(btrfs_free_space_cachep, entry); |
1726 | else | 1739 | else |
1727 | link_free_space(block_group, entry); | 1740 | link_free_space(block_group, entry); |
1728 | } | 1741 | } |
@@ -1775,50 +1788,24 @@ int btrfs_return_cluster_to_free_space( | |||
1775 | 1788 | ||
1776 | static u64 btrfs_alloc_from_bitmap(struct btrfs_block_group_cache *block_group, | 1789 | static u64 btrfs_alloc_from_bitmap(struct btrfs_block_group_cache *block_group, |
1777 | struct btrfs_free_cluster *cluster, | 1790 | struct btrfs_free_cluster *cluster, |
1791 | struct btrfs_free_space *entry, | ||
1778 | u64 bytes, u64 min_start) | 1792 | u64 bytes, u64 min_start) |
1779 | { | 1793 | { |
1780 | struct btrfs_free_space *entry; | ||
1781 | int err; | 1794 | int err; |
1782 | u64 search_start = cluster->window_start; | 1795 | u64 search_start = cluster->window_start; |
1783 | u64 search_bytes = bytes; | 1796 | u64 search_bytes = bytes; |
1784 | u64 ret = 0; | 1797 | u64 ret = 0; |
1785 | 1798 | ||
1786 | spin_lock(&block_group->tree_lock); | ||
1787 | spin_lock(&cluster->lock); | ||
1788 | |||
1789 | if (!cluster->points_to_bitmap) | ||
1790 | goto out; | ||
1791 | |||
1792 | if (cluster->block_group != block_group) | ||
1793 | goto out; | ||
1794 | |||
1795 | /* | ||
1796 | * search_start is the beginning of the bitmap, but at some point it may | ||
1797 | * be a good idea to point to the actual start of the free area in the | ||
1798 | * bitmap, so do the offset_to_bitmap trick anyway, and set bitmap_only | ||
1799 | * to 1 to make sure we get the bitmap entry | ||
1800 | */ | ||
1801 | entry = tree_search_offset(block_group, | ||
1802 | offset_to_bitmap(block_group, search_start), | ||
1803 | 1, 0); | ||
1804 | if (!entry || !entry->bitmap) | ||
1805 | goto out; | ||
1806 | |||
1807 | search_start = min_start; | 1799 | search_start = min_start; |
1808 | search_bytes = bytes; | 1800 | search_bytes = bytes; |
1809 | 1801 | ||
1810 | err = search_bitmap(block_group, entry, &search_start, | 1802 | err = search_bitmap(block_group, entry, &search_start, |
1811 | &search_bytes); | 1803 | &search_bytes); |
1812 | if (err) | 1804 | if (err) |
1813 | goto out; | 1805 | return 0; |
1814 | 1806 | ||
1815 | ret = search_start; | 1807 | ret = search_start; |
1816 | bitmap_clear_bits(block_group, entry, ret, bytes); | 1808 | bitmap_clear_bits(block_group, entry, ret, bytes); |
1817 | if (entry->bytes == 0) | ||
1818 | free_bitmap(block_group, entry); | ||
1819 | out: | ||
1820 | spin_unlock(&cluster->lock); | ||
1821 | spin_unlock(&block_group->tree_lock); | ||
1822 | 1809 | ||
1823 | return ret; | 1810 | return ret; |
1824 | } | 1811 | } |
@@ -1836,10 +1823,6 @@ u64 btrfs_alloc_from_cluster(struct btrfs_block_group_cache *block_group, | |||
1836 | struct rb_node *node; | 1823 | struct rb_node *node; |
1837 | u64 ret = 0; | 1824 | u64 ret = 0; |
1838 | 1825 | ||
1839 | if (cluster->points_to_bitmap) | ||
1840 | return btrfs_alloc_from_bitmap(block_group, cluster, bytes, | ||
1841 | min_start); | ||
1842 | |||
1843 | spin_lock(&cluster->lock); | 1826 | spin_lock(&cluster->lock); |
1844 | if (bytes > cluster->max_size) | 1827 | if (bytes > cluster->max_size) |
1845 | goto out; | 1828 | goto out; |
@@ -1852,9 +1835,9 @@ u64 btrfs_alloc_from_cluster(struct btrfs_block_group_cache *block_group, | |||
1852 | goto out; | 1835 | goto out; |
1853 | 1836 | ||
1854 | entry = rb_entry(node, struct btrfs_free_space, offset_index); | 1837 | entry = rb_entry(node, struct btrfs_free_space, offset_index); |
1855 | |||
1856 | while(1) { | 1838 | while(1) { |
1857 | if (entry->bytes < bytes || entry->offset < min_start) { | 1839 | if (entry->bytes < bytes || |
1840 | (!entry->bitmap && entry->offset < min_start)) { | ||
1858 | struct rb_node *node; | 1841 | struct rb_node *node; |
1859 | 1842 | ||
1860 | node = rb_next(&entry->offset_index); | 1843 | node = rb_next(&entry->offset_index); |
@@ -1864,10 +1847,27 @@ u64 btrfs_alloc_from_cluster(struct btrfs_block_group_cache *block_group, | |||
1864 | offset_index); | 1847 | offset_index); |
1865 | continue; | 1848 | continue; |
1866 | } | 1849 | } |
1867 | ret = entry->offset; | ||
1868 | 1850 | ||
1869 | entry->offset += bytes; | 1851 | if (entry->bitmap) { |
1870 | entry->bytes -= bytes; | 1852 | ret = btrfs_alloc_from_bitmap(block_group, |
1853 | cluster, entry, bytes, | ||
1854 | min_start); | ||
1855 | if (ret == 0) { | ||
1856 | struct rb_node *node; | ||
1857 | node = rb_next(&entry->offset_index); | ||
1858 | if (!node) | ||
1859 | break; | ||
1860 | entry = rb_entry(node, struct btrfs_free_space, | ||
1861 | offset_index); | ||
1862 | continue; | ||
1863 | } | ||
1864 | } else { | ||
1865 | |||
1866 | ret = entry->offset; | ||
1867 | |||
1868 | entry->offset += bytes; | ||
1869 | entry->bytes -= bytes; | ||
1870 | } | ||
1871 | 1871 | ||
1872 | if (entry->bytes == 0) | 1872 | if (entry->bytes == 0) |
1873 | rb_erase(&entry->offset_index, &cluster->root); | 1873 | rb_erase(&entry->offset_index, &cluster->root); |
@@ -1884,7 +1884,12 @@ out: | |||
1884 | block_group->free_space -= bytes; | 1884 | block_group->free_space -= bytes; |
1885 | if (entry->bytes == 0) { | 1885 | if (entry->bytes == 0) { |
1886 | block_group->free_extents--; | 1886 | block_group->free_extents--; |
1887 | kfree(entry); | 1887 | if (entry->bitmap) { |
1888 | kfree(entry->bitmap); | ||
1889 | block_group->total_bitmaps--; | ||
1890 | recalculate_thresholds(block_group); | ||
1891 | } | ||
1892 | kmem_cache_free(btrfs_free_space_cachep, entry); | ||
1888 | } | 1893 | } |
1889 | 1894 | ||
1890 | spin_unlock(&block_group->tree_lock); | 1895 | spin_unlock(&block_group->tree_lock); |
@@ -1904,12 +1909,13 @@ static int btrfs_bitmap_cluster(struct btrfs_block_group_cache *block_group, | |||
1904 | unsigned long found_bits; | 1909 | unsigned long found_bits; |
1905 | unsigned long start = 0; | 1910 | unsigned long start = 0; |
1906 | unsigned long total_found = 0; | 1911 | unsigned long total_found = 0; |
1912 | int ret; | ||
1907 | bool found = false; | 1913 | bool found = false; |
1908 | 1914 | ||
1909 | i = offset_to_bit(entry->offset, block_group->sectorsize, | 1915 | i = offset_to_bit(entry->offset, block_group->sectorsize, |
1910 | max_t(u64, offset, entry->offset)); | 1916 | max_t(u64, offset, entry->offset)); |
1911 | search_bits = bytes_to_bits(min_bytes, block_group->sectorsize); | 1917 | search_bits = bytes_to_bits(bytes, block_group->sectorsize); |
1912 | total_bits = bytes_to_bits(bytes, block_group->sectorsize); | 1918 | total_bits = bytes_to_bits(min_bytes, block_group->sectorsize); |
1913 | 1919 | ||
1914 | again: | 1920 | again: |
1915 | found_bits = 0; | 1921 | found_bits = 0; |
@@ -1926,7 +1932,7 @@ again: | |||
1926 | } | 1932 | } |
1927 | 1933 | ||
1928 | if (!found_bits) | 1934 | if (!found_bits) |
1929 | return -1; | 1935 | return -ENOSPC; |
1930 | 1936 | ||
1931 | if (!found) { | 1937 | if (!found) { |
1932 | start = i; | 1938 | start = i; |
@@ -1950,189 +1956,208 @@ again: | |||
1950 | 1956 | ||
1951 | cluster->window_start = start * block_group->sectorsize + | 1957 | cluster->window_start = start * block_group->sectorsize + |
1952 | entry->offset; | 1958 | entry->offset; |
1953 | cluster->points_to_bitmap = true; | 1959 | rb_erase(&entry->offset_index, &block_group->free_space_offset); |
1960 | ret = tree_insert_offset(&cluster->root, entry->offset, | ||
1961 | &entry->offset_index, 1); | ||
1962 | BUG_ON(ret); | ||
1954 | 1963 | ||
1955 | return 0; | 1964 | return 0; |
1956 | } | 1965 | } |
1957 | 1966 | ||
1958 | /* | 1967 | /* |
1959 | * here we try to find a cluster of blocks in a block group. The goal | 1968 | * This searches the block group for just extents to fill the cluster with. |
1960 | * is to find at least bytes free and up to empty_size + bytes free. | ||
1961 | * We might not find them all in one contiguous area. | ||
1962 | * | ||
1963 | * returns zero and sets up cluster if things worked out, otherwise | ||
1964 | * it returns -enospc | ||
1965 | */ | 1969 | */ |
1966 | int btrfs_find_space_cluster(struct btrfs_trans_handle *trans, | 1970 | static int setup_cluster_no_bitmap(struct btrfs_block_group_cache *block_group, |
1967 | struct btrfs_root *root, | 1971 | struct btrfs_free_cluster *cluster, |
1968 | struct btrfs_block_group_cache *block_group, | 1972 | u64 offset, u64 bytes, u64 min_bytes) |
1969 | struct btrfs_free_cluster *cluster, | ||
1970 | u64 offset, u64 bytes, u64 empty_size) | ||
1971 | { | 1973 | { |
1974 | struct btrfs_free_space *first = NULL; | ||
1972 | struct btrfs_free_space *entry = NULL; | 1975 | struct btrfs_free_space *entry = NULL; |
1976 | struct btrfs_free_space *prev = NULL; | ||
1977 | struct btrfs_free_space *last; | ||
1973 | struct rb_node *node; | 1978 | struct rb_node *node; |
1974 | struct btrfs_free_space *next; | ||
1975 | struct btrfs_free_space *last = NULL; | ||
1976 | u64 min_bytes; | ||
1977 | u64 window_start; | 1979 | u64 window_start; |
1978 | u64 window_free; | 1980 | u64 window_free; |
1979 | u64 max_extent = 0; | 1981 | u64 max_extent; |
1980 | bool found_bitmap = false; | 1982 | u64 max_gap = 128 * 1024; |
1981 | int ret; | ||
1982 | 1983 | ||
1983 | /* for metadata, allow allocates with more holes */ | 1984 | entry = tree_search_offset(block_group, offset, 0, 1); |
1984 | if (btrfs_test_opt(root, SSD_SPREAD)) { | 1985 | if (!entry) |
1985 | min_bytes = bytes + empty_size; | 1986 | return -ENOSPC; |
1986 | } else if (block_group->flags & BTRFS_BLOCK_GROUP_METADATA) { | ||
1987 | /* | ||
1988 | * we want to do larger allocations when we are | ||
1989 | * flushing out the delayed refs, it helps prevent | ||
1990 | * making more work as we go along. | ||
1991 | */ | ||
1992 | if (trans->transaction->delayed_refs.flushing) | ||
1993 | min_bytes = max(bytes, (bytes + empty_size) >> 1); | ||
1994 | else | ||
1995 | min_bytes = max(bytes, (bytes + empty_size) >> 4); | ||
1996 | } else | ||
1997 | min_bytes = max(bytes, (bytes + empty_size) >> 2); | ||
1998 | |||
1999 | spin_lock(&block_group->tree_lock); | ||
2000 | spin_lock(&cluster->lock); | ||
2001 | |||
2002 | /* someone already found a cluster, hooray */ | ||
2003 | if (cluster->block_group) { | ||
2004 | ret = 0; | ||
2005 | goto out; | ||
2006 | } | ||
2007 | again: | ||
2008 | entry = tree_search_offset(block_group, offset, found_bitmap, 1); | ||
2009 | if (!entry) { | ||
2010 | ret = -ENOSPC; | ||
2011 | goto out; | ||
2012 | } | ||
2013 | 1987 | ||
2014 | /* | 1988 | /* |
2015 | * If found_bitmap is true, we exhausted our search for extent entries, | 1989 | * We don't want bitmaps, so just move along until we find a normal |
2016 | * and we just want to search all of the bitmaps that we can find, and | 1990 | * extent entry. |
2017 | * ignore any extent entries we find. | ||
2018 | */ | 1991 | */ |
2019 | while (entry->bitmap || found_bitmap || | 1992 | while (entry->bitmap) { |
2020 | (!entry->bitmap && entry->bytes < min_bytes)) { | 1993 | node = rb_next(&entry->offset_index); |
2021 | struct rb_node *node = rb_next(&entry->offset_index); | 1994 | if (!node) |
2022 | 1995 | return -ENOSPC; | |
2023 | if (entry->bitmap && entry->bytes > bytes + empty_size) { | ||
2024 | ret = btrfs_bitmap_cluster(block_group, entry, cluster, | ||
2025 | offset, bytes + empty_size, | ||
2026 | min_bytes); | ||
2027 | if (!ret) | ||
2028 | goto got_it; | ||
2029 | } | ||
2030 | |||
2031 | if (!node) { | ||
2032 | ret = -ENOSPC; | ||
2033 | goto out; | ||
2034 | } | ||
2035 | entry = rb_entry(node, struct btrfs_free_space, offset_index); | 1996 | entry = rb_entry(node, struct btrfs_free_space, offset_index); |
2036 | } | 1997 | } |
2037 | 1998 | ||
2038 | /* | ||
2039 | * We already searched all the extent entries from the passed in offset | ||
2040 | * to the end and didn't find enough space for the cluster, and we also | ||
2041 | * didn't find any bitmaps that met our criteria, just go ahead and exit | ||
2042 | */ | ||
2043 | if (found_bitmap) { | ||
2044 | ret = -ENOSPC; | ||
2045 | goto out; | ||
2046 | } | ||
2047 | |||
2048 | cluster->points_to_bitmap = false; | ||
2049 | window_start = entry->offset; | 1999 | window_start = entry->offset; |
2050 | window_free = entry->bytes; | 2000 | window_free = entry->bytes; |
2051 | last = entry; | ||
2052 | max_extent = entry->bytes; | 2001 | max_extent = entry->bytes; |
2002 | first = entry; | ||
2003 | last = entry; | ||
2004 | prev = entry; | ||
2053 | 2005 | ||
2054 | while (1) { | 2006 | while (window_free <= min_bytes) { |
2055 | /* out window is just right, lets fill it */ | 2007 | node = rb_next(&entry->offset_index); |
2056 | if (window_free >= bytes + empty_size) | 2008 | if (!node) |
2057 | break; | 2009 | return -ENOSPC; |
2058 | 2010 | entry = rb_entry(node, struct btrfs_free_space, offset_index); | |
2059 | node = rb_next(&last->offset_index); | ||
2060 | if (!node) { | ||
2061 | if (found_bitmap) | ||
2062 | goto again; | ||
2063 | ret = -ENOSPC; | ||
2064 | goto out; | ||
2065 | } | ||
2066 | next = rb_entry(node, struct btrfs_free_space, offset_index); | ||
2067 | 2011 | ||
2068 | /* | 2012 | if (entry->bitmap) |
2069 | * we found a bitmap, so if this search doesn't result in a | ||
2070 | * cluster, we know to go and search again for the bitmaps and | ||
2071 | * start looking for space there | ||
2072 | */ | ||
2073 | if (next->bitmap) { | ||
2074 | if (!found_bitmap) | ||
2075 | offset = next->offset; | ||
2076 | found_bitmap = true; | ||
2077 | last = next; | ||
2078 | continue; | 2013 | continue; |
2079 | } | ||
2080 | |||
2081 | /* | 2014 | /* |
2082 | * we haven't filled the empty size and the window is | 2015 | * we haven't filled the empty size and the window is |
2083 | * very large. reset and try again | 2016 | * very large. reset and try again |
2084 | */ | 2017 | */ |
2085 | if (next->offset - (last->offset + last->bytes) > 128 * 1024 || | 2018 | if (entry->offset - (prev->offset + prev->bytes) > max_gap || |
2086 | next->offset - window_start > (bytes + empty_size) * 2) { | 2019 | entry->offset - window_start > (min_bytes * 2)) { |
2087 | entry = next; | 2020 | first = entry; |
2088 | window_start = entry->offset; | 2021 | window_start = entry->offset; |
2089 | window_free = entry->bytes; | 2022 | window_free = entry->bytes; |
2090 | last = entry; | 2023 | last = entry; |
2091 | max_extent = entry->bytes; | 2024 | max_extent = entry->bytes; |
2092 | } else { | 2025 | } else { |
2093 | last = next; | 2026 | last = entry; |
2094 | window_free += next->bytes; | 2027 | window_free += entry->bytes; |
2095 | if (entry->bytes > max_extent) | 2028 | if (entry->bytes > max_extent) |
2096 | max_extent = entry->bytes; | 2029 | max_extent = entry->bytes; |
2097 | } | 2030 | } |
2031 | prev = entry; | ||
2098 | } | 2032 | } |
2099 | 2033 | ||
2100 | cluster->window_start = entry->offset; | 2034 | cluster->window_start = first->offset; |
2035 | |||
2036 | node = &first->offset_index; | ||
2101 | 2037 | ||
2102 | /* | 2038 | /* |
2103 | * now we've found our entries, pull them out of the free space | 2039 | * now we've found our entries, pull them out of the free space |
2104 | * cache and put them into the cluster rbtree | 2040 | * cache and put them into the cluster rbtree |
2105 | * | ||
2106 | * The cluster includes an rbtree, but only uses the offset index | ||
2107 | * of each free space cache entry. | ||
2108 | */ | 2041 | */ |
2109 | while (1) { | 2042 | do { |
2043 | int ret; | ||
2044 | |||
2045 | entry = rb_entry(node, struct btrfs_free_space, offset_index); | ||
2110 | node = rb_next(&entry->offset_index); | 2046 | node = rb_next(&entry->offset_index); |
2111 | if (entry->bitmap && node) { | 2047 | if (entry->bitmap) |
2112 | entry = rb_entry(node, struct btrfs_free_space, | ||
2113 | offset_index); | ||
2114 | continue; | 2048 | continue; |
2115 | } else if (entry->bitmap && !node) { | ||
2116 | break; | ||
2117 | } | ||
2118 | 2049 | ||
2119 | rb_erase(&entry->offset_index, &block_group->free_space_offset); | 2050 | rb_erase(&entry->offset_index, &block_group->free_space_offset); |
2120 | ret = tree_insert_offset(&cluster->root, entry->offset, | 2051 | ret = tree_insert_offset(&cluster->root, entry->offset, |
2121 | &entry->offset_index, 0); | 2052 | &entry->offset_index, 0); |
2122 | BUG_ON(ret); | 2053 | BUG_ON(ret); |
2054 | } while (node && entry != last); | ||
2123 | 2055 | ||
2124 | if (!node || entry == last) | 2056 | cluster->max_size = max_extent; |
2125 | break; | 2057 | |
2058 | return 0; | ||
2059 | } | ||
2060 | |||
2061 | /* | ||
2062 | * This specifically looks for bitmaps that may work in the cluster, we assume | ||
2063 | * that we have already failed to find extents that will work. | ||
2064 | */ | ||
2065 | static int setup_cluster_bitmap(struct btrfs_block_group_cache *block_group, | ||
2066 | struct btrfs_free_cluster *cluster, | ||
2067 | u64 offset, u64 bytes, u64 min_bytes) | ||
2068 | { | ||
2069 | struct btrfs_free_space *entry; | ||
2070 | struct rb_node *node; | ||
2071 | int ret = -ENOSPC; | ||
2072 | |||
2073 | if (block_group->total_bitmaps == 0) | ||
2074 | return -ENOSPC; | ||
2126 | 2075 | ||
2076 | entry = tree_search_offset(block_group, | ||
2077 | offset_to_bitmap(block_group, offset), | ||
2078 | 0, 1); | ||
2079 | if (!entry) | ||
2080 | return -ENOSPC; | ||
2081 | |||
2082 | node = &entry->offset_index; | ||
2083 | do { | ||
2127 | entry = rb_entry(node, struct btrfs_free_space, offset_index); | 2084 | entry = rb_entry(node, struct btrfs_free_space, offset_index); |
2085 | node = rb_next(&entry->offset_index); | ||
2086 | if (!entry->bitmap) | ||
2087 | continue; | ||
2088 | if (entry->bytes < min_bytes) | ||
2089 | continue; | ||
2090 | ret = btrfs_bitmap_cluster(block_group, entry, cluster, offset, | ||
2091 | bytes, min_bytes); | ||
2092 | } while (ret && node); | ||
2093 | |||
2094 | return ret; | ||
2095 | } | ||
2096 | |||
2097 | /* | ||
2098 | * here we try to find a cluster of blocks in a block group. The goal | ||
2099 | * is to find at least bytes free and up to empty_size + bytes free. | ||
2100 | * We might not find them all in one contiguous area. | ||
2101 | * | ||
2102 | * returns zero and sets up cluster if things worked out, otherwise | ||
2103 | * it returns -enospc | ||
2104 | */ | ||
2105 | int btrfs_find_space_cluster(struct btrfs_trans_handle *trans, | ||
2106 | struct btrfs_root *root, | ||
2107 | struct btrfs_block_group_cache *block_group, | ||
2108 | struct btrfs_free_cluster *cluster, | ||
2109 | u64 offset, u64 bytes, u64 empty_size) | ||
2110 | { | ||
2111 | u64 min_bytes; | ||
2112 | int ret; | ||
2113 | |||
2114 | /* for metadata, allow allocates with more holes */ | ||
2115 | if (btrfs_test_opt(root, SSD_SPREAD)) { | ||
2116 | min_bytes = bytes + empty_size; | ||
2117 | } else if (block_group->flags & BTRFS_BLOCK_GROUP_METADATA) { | ||
2118 | /* | ||
2119 | * we want to do larger allocations when we are | ||
2120 | * flushing out the delayed refs, it helps prevent | ||
2121 | * making more work as we go along. | ||
2122 | */ | ||
2123 | if (trans->transaction->delayed_refs.flushing) | ||
2124 | min_bytes = max(bytes, (bytes + empty_size) >> 1); | ||
2125 | else | ||
2126 | min_bytes = max(bytes, (bytes + empty_size) >> 4); | ||
2127 | } else | ||
2128 | min_bytes = max(bytes, (bytes + empty_size) >> 2); | ||
2129 | |||
2130 | spin_lock(&block_group->tree_lock); | ||
2131 | |||
2132 | /* | ||
2133 | * If we know we don't have enough space to make a cluster don't even | ||
2134 | * bother doing all the work to try and find one. | ||
2135 | */ | ||
2136 | if (block_group->free_space < min_bytes) { | ||
2137 | spin_unlock(&block_group->tree_lock); | ||
2138 | return -ENOSPC; | ||
2128 | } | 2139 | } |
2129 | 2140 | ||
2130 | cluster->max_size = max_extent; | 2141 | spin_lock(&cluster->lock); |
2131 | got_it: | 2142 | |
2132 | ret = 0; | 2143 | /* someone already found a cluster, hooray */ |
2133 | atomic_inc(&block_group->count); | 2144 | if (cluster->block_group) { |
2134 | list_add_tail(&cluster->block_group_list, &block_group->cluster_list); | 2145 | ret = 0; |
2135 | cluster->block_group = block_group; | 2146 | goto out; |
2147 | } | ||
2148 | |||
2149 | ret = setup_cluster_no_bitmap(block_group, cluster, offset, bytes, | ||
2150 | min_bytes); | ||
2151 | if (ret) | ||
2152 | ret = setup_cluster_bitmap(block_group, cluster, offset, | ||
2153 | bytes, min_bytes); | ||
2154 | |||
2155 | if (!ret) { | ||
2156 | atomic_inc(&block_group->count); | ||
2157 | list_add_tail(&cluster->block_group_list, | ||
2158 | &block_group->cluster_list); | ||
2159 | cluster->block_group = block_group; | ||
2160 | } | ||
2136 | out: | 2161 | out: |
2137 | spin_unlock(&cluster->lock); | 2162 | spin_unlock(&cluster->lock); |
2138 | spin_unlock(&block_group->tree_lock); | 2163 | spin_unlock(&block_group->tree_lock); |
@@ -2149,8 +2174,99 @@ void btrfs_init_free_cluster(struct btrfs_free_cluster *cluster) | |||
2149 | spin_lock_init(&cluster->refill_lock); | 2174 | spin_lock_init(&cluster->refill_lock); |
2150 | cluster->root = RB_ROOT; | 2175 | cluster->root = RB_ROOT; |
2151 | cluster->max_size = 0; | 2176 | cluster->max_size = 0; |
2152 | cluster->points_to_bitmap = false; | ||
2153 | INIT_LIST_HEAD(&cluster->block_group_list); | 2177 | INIT_LIST_HEAD(&cluster->block_group_list); |
2154 | cluster->block_group = NULL; | 2178 | cluster->block_group = NULL; |
2155 | } | 2179 | } |
2156 | 2180 | ||
2181 | int btrfs_trim_block_group(struct btrfs_block_group_cache *block_group, | ||
2182 | u64 *trimmed, u64 start, u64 end, u64 minlen) | ||
2183 | { | ||
2184 | struct btrfs_free_space *entry = NULL; | ||
2185 | struct btrfs_fs_info *fs_info = block_group->fs_info; | ||
2186 | u64 bytes = 0; | ||
2187 | u64 actually_trimmed; | ||
2188 | int ret = 0; | ||
2189 | |||
2190 | *trimmed = 0; | ||
2191 | |||
2192 | while (start < end) { | ||
2193 | spin_lock(&block_group->tree_lock); | ||
2194 | |||
2195 | if (block_group->free_space < minlen) { | ||
2196 | spin_unlock(&block_group->tree_lock); | ||
2197 | break; | ||
2198 | } | ||
2199 | |||
2200 | entry = tree_search_offset(block_group, start, 0, 1); | ||
2201 | if (!entry) | ||
2202 | entry = tree_search_offset(block_group, | ||
2203 | offset_to_bitmap(block_group, | ||
2204 | start), | ||
2205 | 1, 1); | ||
2206 | |||
2207 | if (!entry || entry->offset >= end) { | ||
2208 | spin_unlock(&block_group->tree_lock); | ||
2209 | break; | ||
2210 | } | ||
2211 | |||
2212 | if (entry->bitmap) { | ||
2213 | ret = search_bitmap(block_group, entry, &start, &bytes); | ||
2214 | if (!ret) { | ||
2215 | if (start >= end) { | ||
2216 | spin_unlock(&block_group->tree_lock); | ||
2217 | break; | ||
2218 | } | ||
2219 | bytes = min(bytes, end - start); | ||
2220 | bitmap_clear_bits(block_group, entry, | ||
2221 | start, bytes); | ||
2222 | if (entry->bytes == 0) | ||
2223 | free_bitmap(block_group, entry); | ||
2224 | } else { | ||
2225 | start = entry->offset + BITS_PER_BITMAP * | ||
2226 | block_group->sectorsize; | ||
2227 | spin_unlock(&block_group->tree_lock); | ||
2228 | ret = 0; | ||
2229 | continue; | ||
2230 | } | ||
2231 | } else { | ||
2232 | start = entry->offset; | ||
2233 | bytes = min(entry->bytes, end - start); | ||
2234 | unlink_free_space(block_group, entry); | ||
2235 | kfree(entry); | ||
2236 | } | ||
2237 | |||
2238 | spin_unlock(&block_group->tree_lock); | ||
2239 | |||
2240 | if (bytes >= minlen) { | ||
2241 | int update_ret; | ||
2242 | update_ret = btrfs_update_reserved_bytes(block_group, | ||
2243 | bytes, 1, 1); | ||
2244 | |||
2245 | ret = btrfs_error_discard_extent(fs_info->extent_root, | ||
2246 | start, | ||
2247 | bytes, | ||
2248 | &actually_trimmed); | ||
2249 | |||
2250 | btrfs_add_free_space(block_group, | ||
2251 | start, bytes); | ||
2252 | if (!update_ret) | ||
2253 | btrfs_update_reserved_bytes(block_group, | ||
2254 | bytes, 0, 1); | ||
2255 | |||
2256 | if (ret) | ||
2257 | break; | ||
2258 | *trimmed += actually_trimmed; | ||
2259 | } | ||
2260 | start += bytes; | ||
2261 | bytes = 0; | ||
2262 | |||
2263 | if (fatal_signal_pending(current)) { | ||
2264 | ret = -ERESTARTSYS; | ||
2265 | break; | ||
2266 | } | ||
2267 | |||
2268 | cond_resched(); | ||
2269 | } | ||
2270 | |||
2271 | return ret; | ||
2272 | } | ||
diff --git a/fs/btrfs/free-space-cache.h b/fs/btrfs/free-space-cache.h index e49ca5c321b5..65c3b935289f 100644 --- a/fs/btrfs/free-space-cache.h +++ b/fs/btrfs/free-space-cache.h | |||
@@ -68,4 +68,6 @@ u64 btrfs_alloc_from_cluster(struct btrfs_block_group_cache *block_group, | |||
68 | int btrfs_return_cluster_to_free_space( | 68 | int btrfs_return_cluster_to_free_space( |
69 | struct btrfs_block_group_cache *block_group, | 69 | struct btrfs_block_group_cache *block_group, |
70 | struct btrfs_free_cluster *cluster); | 70 | struct btrfs_free_cluster *cluster); |
71 | int btrfs_trim_block_group(struct btrfs_block_group_cache *block_group, | ||
72 | u64 *trimmed, u64 start, u64 end, u64 minlen); | ||
71 | #endif | 73 | #endif |
diff --git a/fs/btrfs/inode-map.c b/fs/btrfs/inode-map.c index c56eb5909172..c05a08f4c411 100644 --- a/fs/btrfs/inode-map.c +++ b/fs/btrfs/inode-map.c | |||
@@ -30,7 +30,8 @@ int btrfs_find_highest_inode(struct btrfs_root *root, u64 *objectid) | |||
30 | int slot; | 30 | int slot; |
31 | 31 | ||
32 | path = btrfs_alloc_path(); | 32 | path = btrfs_alloc_path(); |
33 | BUG_ON(!path); | 33 | if (!path) |
34 | return -ENOMEM; | ||
34 | 35 | ||
35 | search_key.objectid = BTRFS_LAST_FREE_OBJECTID; | 36 | search_key.objectid = BTRFS_LAST_FREE_OBJECTID; |
36 | search_key.type = -1; | 37 | search_key.type = -1; |
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 119520bdb9a5..93c28a1d6bdc 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c | |||
@@ -50,6 +50,7 @@ | |||
50 | #include "tree-log.h" | 50 | #include "tree-log.h" |
51 | #include "compression.h" | 51 | #include "compression.h" |
52 | #include "locking.h" | 52 | #include "locking.h" |
53 | #include "free-space-cache.h" | ||
53 | 54 | ||
54 | struct btrfs_iget_args { | 55 | struct btrfs_iget_args { |
55 | u64 ino; | 56 | u64 ino; |
@@ -70,6 +71,7 @@ static struct kmem_cache *btrfs_inode_cachep; | |||
70 | struct kmem_cache *btrfs_trans_handle_cachep; | 71 | struct kmem_cache *btrfs_trans_handle_cachep; |
71 | struct kmem_cache *btrfs_transaction_cachep; | 72 | struct kmem_cache *btrfs_transaction_cachep; |
72 | struct kmem_cache *btrfs_path_cachep; | 73 | struct kmem_cache *btrfs_path_cachep; |
74 | struct kmem_cache *btrfs_free_space_cachep; | ||
73 | 75 | ||
74 | #define S_SHIFT 12 | 76 | #define S_SHIFT 12 |
75 | static unsigned char btrfs_type_by_mode[S_IFMT >> S_SHIFT] = { | 77 | static unsigned char btrfs_type_by_mode[S_IFMT >> S_SHIFT] = { |
@@ -82,7 +84,8 @@ static unsigned char btrfs_type_by_mode[S_IFMT >> S_SHIFT] = { | |||
82 | [S_IFLNK >> S_SHIFT] = BTRFS_FT_SYMLINK, | 84 | [S_IFLNK >> S_SHIFT] = BTRFS_FT_SYMLINK, |
83 | }; | 85 | }; |
84 | 86 | ||
85 | static void btrfs_truncate(struct inode *inode); | 87 | static int btrfs_setsize(struct inode *inode, loff_t newsize); |
88 | static int btrfs_truncate(struct inode *inode); | ||
86 | static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end); | 89 | static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end); |
87 | static noinline int cow_file_range(struct inode *inode, | 90 | static noinline int cow_file_range(struct inode *inode, |
88 | struct page *locked_page, | 91 | struct page *locked_page, |
@@ -288,6 +291,7 @@ static noinline int add_async_extent(struct async_cow *cow, | |||
288 | struct async_extent *async_extent; | 291 | struct async_extent *async_extent; |
289 | 292 | ||
290 | async_extent = kmalloc(sizeof(*async_extent), GFP_NOFS); | 293 | async_extent = kmalloc(sizeof(*async_extent), GFP_NOFS); |
294 | BUG_ON(!async_extent); | ||
291 | async_extent->start = start; | 295 | async_extent->start = start; |
292 | async_extent->ram_size = ram_size; | 296 | async_extent->ram_size = ram_size; |
293 | async_extent->compressed_size = compressed_size; | 297 | async_extent->compressed_size = compressed_size; |
@@ -382,9 +386,11 @@ again: | |||
382 | */ | 386 | */ |
383 | if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NOCOMPRESS) && | 387 | if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NOCOMPRESS) && |
384 | (btrfs_test_opt(root, COMPRESS) || | 388 | (btrfs_test_opt(root, COMPRESS) || |
385 | (BTRFS_I(inode)->force_compress))) { | 389 | (BTRFS_I(inode)->force_compress) || |
390 | (BTRFS_I(inode)->flags & BTRFS_INODE_COMPRESS))) { | ||
386 | WARN_ON(pages); | 391 | WARN_ON(pages); |
387 | pages = kzalloc(sizeof(struct page *) * nr_pages, GFP_NOFS); | 392 | pages = kzalloc(sizeof(struct page *) * nr_pages, GFP_NOFS); |
393 | BUG_ON(!pages); | ||
388 | 394 | ||
389 | if (BTRFS_I(inode)->force_compress) | 395 | if (BTRFS_I(inode)->force_compress) |
390 | compress_type = BTRFS_I(inode)->force_compress; | 396 | compress_type = BTRFS_I(inode)->force_compress; |
@@ -1254,7 +1260,8 @@ static int run_delalloc_range(struct inode *inode, struct page *locked_page, | |||
1254 | ret = run_delalloc_nocow(inode, locked_page, start, end, | 1260 | ret = run_delalloc_nocow(inode, locked_page, start, end, |
1255 | page_started, 0, nr_written); | 1261 | page_started, 0, nr_written); |
1256 | else if (!btrfs_test_opt(root, COMPRESS) && | 1262 | else if (!btrfs_test_opt(root, COMPRESS) && |
1257 | !(BTRFS_I(inode)->force_compress)) | 1263 | !(BTRFS_I(inode)->force_compress) && |
1264 | !(BTRFS_I(inode)->flags & BTRFS_INODE_COMPRESS)) | ||
1258 | ret = cow_file_range(inode, locked_page, start, end, | 1265 | ret = cow_file_range(inode, locked_page, start, end, |
1259 | page_started, nr_written, 1); | 1266 | page_started, nr_written, 1); |
1260 | else | 1267 | else |
@@ -1461,8 +1468,11 @@ static int btrfs_submit_bio_hook(struct inode *inode, int rw, struct bio *bio, | |||
1461 | if (bio_flags & EXTENT_BIO_COMPRESSED) { | 1468 | if (bio_flags & EXTENT_BIO_COMPRESSED) { |
1462 | return btrfs_submit_compressed_read(inode, bio, | 1469 | return btrfs_submit_compressed_read(inode, bio, |
1463 | mirror_num, bio_flags); | 1470 | mirror_num, bio_flags); |
1464 | } else if (!skip_sum) | 1471 | } else if (!skip_sum) { |
1465 | btrfs_lookup_bio_sums(root, inode, bio, NULL); | 1472 | ret = btrfs_lookup_bio_sums(root, inode, bio, NULL); |
1473 | if (ret) | ||
1474 | return ret; | ||
1475 | } | ||
1466 | goto mapit; | 1476 | goto mapit; |
1467 | } else if (!skip_sum) { | 1477 | } else if (!skip_sum) { |
1468 | /* csum items have already been cloned */ | 1478 | /* csum items have already been cloned */ |
@@ -1785,6 +1795,8 @@ out: | |||
1785 | static int btrfs_writepage_end_io_hook(struct page *page, u64 start, u64 end, | 1795 | static int btrfs_writepage_end_io_hook(struct page *page, u64 start, u64 end, |
1786 | struct extent_state *state, int uptodate) | 1796 | struct extent_state *state, int uptodate) |
1787 | { | 1797 | { |
1798 | trace_btrfs_writepage_end_io_hook(page, start, end, uptodate); | ||
1799 | |||
1788 | ClearPagePrivate2(page); | 1800 | ClearPagePrivate2(page); |
1789 | return btrfs_finish_ordered_io(page->mapping->host, start, end); | 1801 | return btrfs_finish_ordered_io(page->mapping->host, start, end); |
1790 | } | 1802 | } |
@@ -1895,10 +1907,10 @@ static int btrfs_io_failed_hook(struct bio *failed_bio, | |||
1895 | else | 1907 | else |
1896 | rw = READ; | 1908 | rw = READ; |
1897 | 1909 | ||
1898 | BTRFS_I(inode)->io_tree.ops->submit_bio_hook(inode, rw, bio, | 1910 | ret = BTRFS_I(inode)->io_tree.ops->submit_bio_hook(inode, rw, bio, |
1899 | failrec->last_mirror, | 1911 | failrec->last_mirror, |
1900 | failrec->bio_flags, 0); | 1912 | failrec->bio_flags, 0); |
1901 | return 0; | 1913 | return ret; |
1902 | } | 1914 | } |
1903 | 1915 | ||
1904 | /* | 1916 | /* |
@@ -2282,7 +2294,7 @@ int btrfs_orphan_del(struct btrfs_trans_handle *trans, struct inode *inode) | |||
2282 | * this cleans up any orphans that may be left on the list from the last use | 2294 | * this cleans up any orphans that may be left on the list from the last use |
2283 | * of this root. | 2295 | * of this root. |
2284 | */ | 2296 | */ |
2285 | void btrfs_orphan_cleanup(struct btrfs_root *root) | 2297 | int btrfs_orphan_cleanup(struct btrfs_root *root) |
2286 | { | 2298 | { |
2287 | struct btrfs_path *path; | 2299 | struct btrfs_path *path; |
2288 | struct extent_buffer *leaf; | 2300 | struct extent_buffer *leaf; |
@@ -2292,10 +2304,13 @@ void btrfs_orphan_cleanup(struct btrfs_root *root) | |||
2292 | int ret = 0, nr_unlink = 0, nr_truncate = 0; | 2304 | int ret = 0, nr_unlink = 0, nr_truncate = 0; |
2293 | 2305 | ||
2294 | if (cmpxchg(&root->orphan_cleanup_state, 0, ORPHAN_CLEANUP_STARTED)) | 2306 | if (cmpxchg(&root->orphan_cleanup_state, 0, ORPHAN_CLEANUP_STARTED)) |
2295 | return; | 2307 | return 0; |
2296 | 2308 | ||
2297 | path = btrfs_alloc_path(); | 2309 | path = btrfs_alloc_path(); |
2298 | BUG_ON(!path); | 2310 | if (!path) { |
2311 | ret = -ENOMEM; | ||
2312 | goto out; | ||
2313 | } | ||
2299 | path->reada = -1; | 2314 | path->reada = -1; |
2300 | 2315 | ||
2301 | key.objectid = BTRFS_ORPHAN_OBJECTID; | 2316 | key.objectid = BTRFS_ORPHAN_OBJECTID; |
@@ -2304,11 +2319,8 @@ void btrfs_orphan_cleanup(struct btrfs_root *root) | |||
2304 | 2319 | ||
2305 | while (1) { | 2320 | while (1) { |
2306 | ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); | 2321 | ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); |
2307 | if (ret < 0) { | 2322 | if (ret < 0) |
2308 | printk(KERN_ERR "Error searching slot for orphan: %d" | 2323 | goto out; |
2309 | "\n", ret); | ||
2310 | break; | ||
2311 | } | ||
2312 | 2324 | ||
2313 | /* | 2325 | /* |
2314 | * if ret == 0 means we found what we were searching for, which | 2326 | * if ret == 0 means we found what we were searching for, which |
@@ -2316,6 +2328,7 @@ void btrfs_orphan_cleanup(struct btrfs_root *root) | |||
2316 | * find the key and see if we have stuff that matches | 2328 | * find the key and see if we have stuff that matches |
2317 | */ | 2329 | */ |
2318 | if (ret > 0) { | 2330 | if (ret > 0) { |
2331 | ret = 0; | ||
2319 | if (path->slots[0] == 0) | 2332 | if (path->slots[0] == 0) |
2320 | break; | 2333 | break; |
2321 | path->slots[0]--; | 2334 | path->slots[0]--; |
@@ -2343,7 +2356,10 @@ void btrfs_orphan_cleanup(struct btrfs_root *root) | |||
2343 | found_key.type = BTRFS_INODE_ITEM_KEY; | 2356 | found_key.type = BTRFS_INODE_ITEM_KEY; |
2344 | found_key.offset = 0; | 2357 | found_key.offset = 0; |
2345 | inode = btrfs_iget(root->fs_info->sb, &found_key, root, NULL); | 2358 | inode = btrfs_iget(root->fs_info->sb, &found_key, root, NULL); |
2346 | BUG_ON(IS_ERR(inode)); | 2359 | if (IS_ERR(inode)) { |
2360 | ret = PTR_ERR(inode); | ||
2361 | goto out; | ||
2362 | } | ||
2347 | 2363 | ||
2348 | /* | 2364 | /* |
2349 | * add this inode to the orphan list so btrfs_orphan_del does | 2365 | * add this inode to the orphan list so btrfs_orphan_del does |
@@ -2361,7 +2377,10 @@ void btrfs_orphan_cleanup(struct btrfs_root *root) | |||
2361 | */ | 2377 | */ |
2362 | if (is_bad_inode(inode)) { | 2378 | if (is_bad_inode(inode)) { |
2363 | trans = btrfs_start_transaction(root, 0); | 2379 | trans = btrfs_start_transaction(root, 0); |
2364 | BUG_ON(IS_ERR(trans)); | 2380 | if (IS_ERR(trans)) { |
2381 | ret = PTR_ERR(trans); | ||
2382 | goto out; | ||
2383 | } | ||
2365 | btrfs_orphan_del(trans, inode); | 2384 | btrfs_orphan_del(trans, inode); |
2366 | btrfs_end_transaction(trans, root); | 2385 | btrfs_end_transaction(trans, root); |
2367 | iput(inode); | 2386 | iput(inode); |
@@ -2370,17 +2389,22 @@ void btrfs_orphan_cleanup(struct btrfs_root *root) | |||
2370 | 2389 | ||
2371 | /* if we have links, this was a truncate, lets do that */ | 2390 | /* if we have links, this was a truncate, lets do that */ |
2372 | if (inode->i_nlink) { | 2391 | if (inode->i_nlink) { |
2392 | if (!S_ISREG(inode->i_mode)) { | ||
2393 | WARN_ON(1); | ||
2394 | iput(inode); | ||
2395 | continue; | ||
2396 | } | ||
2373 | nr_truncate++; | 2397 | nr_truncate++; |
2374 | btrfs_truncate(inode); | 2398 | ret = btrfs_truncate(inode); |
2375 | } else { | 2399 | } else { |
2376 | nr_unlink++; | 2400 | nr_unlink++; |
2377 | } | 2401 | } |
2378 | 2402 | ||
2379 | /* this will do delete_inode and everything for us */ | 2403 | /* this will do delete_inode and everything for us */ |
2380 | iput(inode); | 2404 | iput(inode); |
2405 | if (ret) | ||
2406 | goto out; | ||
2381 | } | 2407 | } |
2382 | btrfs_free_path(path); | ||
2383 | |||
2384 | root->orphan_cleanup_state = ORPHAN_CLEANUP_DONE; | 2408 | root->orphan_cleanup_state = ORPHAN_CLEANUP_DONE; |
2385 | 2409 | ||
2386 | if (root->orphan_block_rsv) | 2410 | if (root->orphan_block_rsv) |
@@ -2389,14 +2413,20 @@ void btrfs_orphan_cleanup(struct btrfs_root *root) | |||
2389 | 2413 | ||
2390 | if (root->orphan_block_rsv || root->orphan_item_inserted) { | 2414 | if (root->orphan_block_rsv || root->orphan_item_inserted) { |
2391 | trans = btrfs_join_transaction(root, 1); | 2415 | trans = btrfs_join_transaction(root, 1); |
2392 | BUG_ON(IS_ERR(trans)); | 2416 | if (!IS_ERR(trans)) |
2393 | btrfs_end_transaction(trans, root); | 2417 | btrfs_end_transaction(trans, root); |
2394 | } | 2418 | } |
2395 | 2419 | ||
2396 | if (nr_unlink) | 2420 | if (nr_unlink) |
2397 | printk(KERN_INFO "btrfs: unlinked %d orphans\n", nr_unlink); | 2421 | printk(KERN_INFO "btrfs: unlinked %d orphans\n", nr_unlink); |
2398 | if (nr_truncate) | 2422 | if (nr_truncate) |
2399 | printk(KERN_INFO "btrfs: truncated %d orphans\n", nr_truncate); | 2423 | printk(KERN_INFO "btrfs: truncated %d orphans\n", nr_truncate); |
2424 | |||
2425 | out: | ||
2426 | if (ret) | ||
2427 | printk(KERN_CRIT "btrfs: could not do orphan cleanup %d\n", ret); | ||
2428 | btrfs_free_path(path); | ||
2429 | return ret; | ||
2400 | } | 2430 | } |
2401 | 2431 | ||
2402 | /* | 2432 | /* |
@@ -2507,6 +2537,8 @@ static void btrfs_read_locked_inode(struct inode *inode) | |||
2507 | BTRFS_I(inode)->flags = btrfs_inode_flags(leaf, inode_item); | 2537 | BTRFS_I(inode)->flags = btrfs_inode_flags(leaf, inode_item); |
2508 | 2538 | ||
2509 | alloc_group_block = btrfs_inode_block_group(leaf, inode_item); | 2539 | alloc_group_block = btrfs_inode_block_group(leaf, inode_item); |
2540 | if (location.objectid == BTRFS_FREE_SPACE_OBJECTID) | ||
2541 | inode->i_mapping->flags &= ~__GFP_FS; | ||
2510 | 2542 | ||
2511 | /* | 2543 | /* |
2512 | * try to precache a NULL acl entry for files that don't have | 2544 | * try to precache a NULL acl entry for files that don't have |
@@ -2635,10 +2667,10 @@ failed: | |||
2635 | * recovery code. It remove a link in a directory with a given name, and | 2667 | * recovery code. It remove a link in a directory with a given name, and |
2636 | * also drops the back refs in the inode to the directory | 2668 | * also drops the back refs in the inode to the directory |
2637 | */ | 2669 | */ |
2638 | int btrfs_unlink_inode(struct btrfs_trans_handle *trans, | 2670 | static int __btrfs_unlink_inode(struct btrfs_trans_handle *trans, |
2639 | struct btrfs_root *root, | 2671 | struct btrfs_root *root, |
2640 | struct inode *dir, struct inode *inode, | 2672 | struct inode *dir, struct inode *inode, |
2641 | const char *name, int name_len) | 2673 | const char *name, int name_len) |
2642 | { | 2674 | { |
2643 | struct btrfs_path *path; | 2675 | struct btrfs_path *path; |
2644 | int ret = 0; | 2676 | int ret = 0; |
@@ -2710,12 +2742,25 @@ err: | |||
2710 | btrfs_i_size_write(dir, dir->i_size - name_len * 2); | 2742 | btrfs_i_size_write(dir, dir->i_size - name_len * 2); |
2711 | inode->i_ctime = dir->i_mtime = dir->i_ctime = CURRENT_TIME; | 2743 | inode->i_ctime = dir->i_mtime = dir->i_ctime = CURRENT_TIME; |
2712 | btrfs_update_inode(trans, root, dir); | 2744 | btrfs_update_inode(trans, root, dir); |
2713 | btrfs_drop_nlink(inode); | ||
2714 | ret = btrfs_update_inode(trans, root, inode); | ||
2715 | out: | 2745 | out: |
2716 | return ret; | 2746 | return ret; |
2717 | } | 2747 | } |
2718 | 2748 | ||
2749 | int btrfs_unlink_inode(struct btrfs_trans_handle *trans, | ||
2750 | struct btrfs_root *root, | ||
2751 | struct inode *dir, struct inode *inode, | ||
2752 | const char *name, int name_len) | ||
2753 | { | ||
2754 | int ret; | ||
2755 | ret = __btrfs_unlink_inode(trans, root, dir, inode, name, name_len); | ||
2756 | if (!ret) { | ||
2757 | btrfs_drop_nlink(inode); | ||
2758 | ret = btrfs_update_inode(trans, root, inode); | ||
2759 | } | ||
2760 | return ret; | ||
2761 | } | ||
2762 | |||
2763 | |||
2719 | /* helper to check if there is any shared block in the path */ | 2764 | /* helper to check if there is any shared block in the path */ |
2720 | static int check_path_shared(struct btrfs_root *root, | 2765 | static int check_path_shared(struct btrfs_root *root, |
2721 | struct btrfs_path *path) | 2766 | struct btrfs_path *path) |
@@ -3537,7 +3582,13 @@ out: | |||
3537 | return ret; | 3582 | return ret; |
3538 | } | 3583 | } |
3539 | 3584 | ||
3540 | int btrfs_cont_expand(struct inode *inode, loff_t size) | 3585 | /* |
3586 | * This function puts in dummy file extents for the area we're creating a hole | ||
3587 | * for. So if we are truncating this file to a larger size we need to insert | ||
3588 | * these file extents so that btrfs_get_extent will return a EXTENT_MAP_HOLE for | ||
3589 | * the range between oldsize and size | ||
3590 | */ | ||
3591 | int btrfs_cont_expand(struct inode *inode, loff_t oldsize, loff_t size) | ||
3541 | { | 3592 | { |
3542 | struct btrfs_trans_handle *trans; | 3593 | struct btrfs_trans_handle *trans; |
3543 | struct btrfs_root *root = BTRFS_I(inode)->root; | 3594 | struct btrfs_root *root = BTRFS_I(inode)->root; |
@@ -3545,7 +3596,7 @@ int btrfs_cont_expand(struct inode *inode, loff_t size) | |||
3545 | struct extent_map *em = NULL; | 3596 | struct extent_map *em = NULL; |
3546 | struct extent_state *cached_state = NULL; | 3597 | struct extent_state *cached_state = NULL; |
3547 | u64 mask = root->sectorsize - 1; | 3598 | u64 mask = root->sectorsize - 1; |
3548 | u64 hole_start = (inode->i_size + mask) & ~mask; | 3599 | u64 hole_start = (oldsize + mask) & ~mask; |
3549 | u64 block_end = (size + mask) & ~mask; | 3600 | u64 block_end = (size + mask) & ~mask; |
3550 | u64 last_byte; | 3601 | u64 last_byte; |
3551 | u64 cur_offset; | 3602 | u64 cur_offset; |
@@ -3590,13 +3641,15 @@ int btrfs_cont_expand(struct inode *inode, loff_t size) | |||
3590 | err = btrfs_drop_extents(trans, inode, cur_offset, | 3641 | err = btrfs_drop_extents(trans, inode, cur_offset, |
3591 | cur_offset + hole_size, | 3642 | cur_offset + hole_size, |
3592 | &hint_byte, 1); | 3643 | &hint_byte, 1); |
3593 | BUG_ON(err); | 3644 | if (err) |
3645 | break; | ||
3594 | 3646 | ||
3595 | err = btrfs_insert_file_extent(trans, root, | 3647 | err = btrfs_insert_file_extent(trans, root, |
3596 | inode->i_ino, cur_offset, 0, | 3648 | inode->i_ino, cur_offset, 0, |
3597 | 0, hole_size, 0, hole_size, | 3649 | 0, hole_size, 0, hole_size, |
3598 | 0, 0, 0); | 3650 | 0, 0, 0); |
3599 | BUG_ON(err); | 3651 | if (err) |
3652 | break; | ||
3600 | 3653 | ||
3601 | btrfs_drop_extent_cache(inode, hole_start, | 3654 | btrfs_drop_extent_cache(inode, hole_start, |
3602 | last_byte - 1, 0); | 3655 | last_byte - 1, 0); |
@@ -3616,81 +3669,41 @@ int btrfs_cont_expand(struct inode *inode, loff_t size) | |||
3616 | return err; | 3669 | return err; |
3617 | } | 3670 | } |
3618 | 3671 | ||
3619 | static int btrfs_setattr_size(struct inode *inode, struct iattr *attr) | 3672 | static int btrfs_setsize(struct inode *inode, loff_t newsize) |
3620 | { | 3673 | { |
3621 | struct btrfs_root *root = BTRFS_I(inode)->root; | 3674 | loff_t oldsize = i_size_read(inode); |
3622 | struct btrfs_trans_handle *trans; | ||
3623 | unsigned long nr; | ||
3624 | int ret; | 3675 | int ret; |
3625 | 3676 | ||
3626 | if (attr->ia_size == inode->i_size) | 3677 | if (newsize == oldsize) |
3627 | return 0; | 3678 | return 0; |
3628 | 3679 | ||
3629 | if (attr->ia_size > inode->i_size) { | 3680 | if (newsize > oldsize) { |
3630 | unsigned long limit; | 3681 | i_size_write(inode, newsize); |
3631 | limit = current->signal->rlim[RLIMIT_FSIZE].rlim_cur; | 3682 | btrfs_ordered_update_i_size(inode, i_size_read(inode), NULL); |
3632 | if (attr->ia_size > inode->i_sb->s_maxbytes) | 3683 | truncate_pagecache(inode, oldsize, newsize); |
3633 | return -EFBIG; | 3684 | ret = btrfs_cont_expand(inode, oldsize, newsize); |
3634 | if (limit != RLIM_INFINITY && attr->ia_size > limit) { | ||
3635 | send_sig(SIGXFSZ, current, 0); | ||
3636 | return -EFBIG; | ||
3637 | } | ||
3638 | } | ||
3639 | |||
3640 | trans = btrfs_start_transaction(root, 5); | ||
3641 | if (IS_ERR(trans)) | ||
3642 | return PTR_ERR(trans); | ||
3643 | |||
3644 | btrfs_set_trans_block_group(trans, inode); | ||
3645 | |||
3646 | ret = btrfs_orphan_add(trans, inode); | ||
3647 | BUG_ON(ret); | ||
3648 | |||
3649 | nr = trans->blocks_used; | ||
3650 | btrfs_end_transaction(trans, root); | ||
3651 | btrfs_btree_balance_dirty(root, nr); | ||
3652 | |||
3653 | if (attr->ia_size > inode->i_size) { | ||
3654 | ret = btrfs_cont_expand(inode, attr->ia_size); | ||
3655 | if (ret) { | 3685 | if (ret) { |
3656 | btrfs_truncate(inode); | 3686 | btrfs_setsize(inode, oldsize); |
3657 | return ret; | 3687 | return ret; |
3658 | } | 3688 | } |
3659 | 3689 | ||
3660 | i_size_write(inode, attr->ia_size); | 3690 | mark_inode_dirty(inode); |
3661 | btrfs_ordered_update_i_size(inode, inode->i_size, NULL); | 3691 | } else { |
3662 | 3692 | ||
3663 | trans = btrfs_start_transaction(root, 0); | 3693 | /* |
3664 | BUG_ON(IS_ERR(trans)); | 3694 | * We're truncating a file that used to have good data down to |
3665 | btrfs_set_trans_block_group(trans, inode); | 3695 | * zero. Make sure it gets into the ordered flush list so that |
3666 | trans->block_rsv = root->orphan_block_rsv; | 3696 | * any new writes get down to disk quickly. |
3667 | BUG_ON(!trans->block_rsv); | 3697 | */ |
3698 | if (newsize == 0) | ||
3699 | BTRFS_I(inode)->ordered_data_close = 1; | ||
3668 | 3700 | ||
3669 | ret = btrfs_update_inode(trans, root, inode); | 3701 | /* we don't support swapfiles, so vmtruncate shouldn't fail */ |
3670 | BUG_ON(ret); | 3702 | truncate_setsize(inode, newsize); |
3671 | if (inode->i_nlink > 0) { | 3703 | ret = btrfs_truncate(inode); |
3672 | ret = btrfs_orphan_del(trans, inode); | ||
3673 | BUG_ON(ret); | ||
3674 | } | ||
3675 | nr = trans->blocks_used; | ||
3676 | btrfs_end_transaction(trans, root); | ||
3677 | btrfs_btree_balance_dirty(root, nr); | ||
3678 | return 0; | ||
3679 | } | 3704 | } |
3680 | 3705 | ||
3681 | /* | 3706 | return ret; |
3682 | * We're truncating a file that used to have good data down to | ||
3683 | * zero. Make sure it gets into the ordered flush list so that | ||
3684 | * any new writes get down to disk quickly. | ||
3685 | */ | ||
3686 | if (attr->ia_size == 0) | ||
3687 | BTRFS_I(inode)->ordered_data_close = 1; | ||
3688 | |||
3689 | /* we don't support swapfiles, so vmtruncate shouldn't fail */ | ||
3690 | ret = vmtruncate(inode, attr->ia_size); | ||
3691 | BUG_ON(ret); | ||
3692 | |||
3693 | return 0; | ||
3694 | } | 3707 | } |
3695 | 3708 | ||
3696 | static int btrfs_setattr(struct dentry *dentry, struct iattr *attr) | 3709 | static int btrfs_setattr(struct dentry *dentry, struct iattr *attr) |
@@ -3707,7 +3720,7 @@ static int btrfs_setattr(struct dentry *dentry, struct iattr *attr) | |||
3707 | return err; | 3720 | return err; |
3708 | 3721 | ||
3709 | if (S_ISREG(inode->i_mode) && (attr->ia_valid & ATTR_SIZE)) { | 3722 | if (S_ISREG(inode->i_mode) && (attr->ia_valid & ATTR_SIZE)) { |
3710 | err = btrfs_setattr_size(inode, attr); | 3723 | err = btrfs_setsize(inode, attr->ia_size); |
3711 | if (err) | 3724 | if (err) |
3712 | return err; | 3725 | return err; |
3713 | } | 3726 | } |
@@ -3730,6 +3743,8 @@ void btrfs_evict_inode(struct inode *inode) | |||
3730 | unsigned long nr; | 3743 | unsigned long nr; |
3731 | int ret; | 3744 | int ret; |
3732 | 3745 | ||
3746 | trace_btrfs_inode_evict(inode); | ||
3747 | |||
3733 | truncate_inode_pages(&inode->i_data, 0); | 3748 | truncate_inode_pages(&inode->i_data, 0); |
3734 | if (inode->i_nlink && (btrfs_root_refs(&root->root_item) != 0 || | 3749 | if (inode->i_nlink && (btrfs_root_refs(&root->root_item) != 0 || |
3735 | root == root->fs_info->tree_root)) | 3750 | root == root->fs_info->tree_root)) |
@@ -4072,7 +4087,6 @@ struct inode *btrfs_iget(struct super_block *s, struct btrfs_key *location, | |||
4072 | BTRFS_I(inode)->root = root; | 4087 | BTRFS_I(inode)->root = root; |
4073 | memcpy(&BTRFS_I(inode)->location, location, sizeof(*location)); | 4088 | memcpy(&BTRFS_I(inode)->location, location, sizeof(*location)); |
4074 | btrfs_read_locked_inode(inode); | 4089 | btrfs_read_locked_inode(inode); |
4075 | |||
4076 | inode_tree_add(inode); | 4090 | inode_tree_add(inode); |
4077 | unlock_new_inode(inode); | 4091 | unlock_new_inode(inode); |
4078 | if (new) | 4092 | if (new) |
@@ -4147,8 +4161,10 @@ struct inode *btrfs_lookup_dentry(struct inode *dir, struct dentry *dentry) | |||
4147 | if (!IS_ERR(inode) && root != sub_root) { | 4161 | if (!IS_ERR(inode) && root != sub_root) { |
4148 | down_read(&root->fs_info->cleanup_work_sem); | 4162 | down_read(&root->fs_info->cleanup_work_sem); |
4149 | if (!(inode->i_sb->s_flags & MS_RDONLY)) | 4163 | if (!(inode->i_sb->s_flags & MS_RDONLY)) |
4150 | btrfs_orphan_cleanup(sub_root); | 4164 | ret = btrfs_orphan_cleanup(sub_root); |
4151 | up_read(&root->fs_info->cleanup_work_sem); | 4165 | up_read(&root->fs_info->cleanup_work_sem); |
4166 | if (ret) | ||
4167 | inode = ERR_PTR(ret); | ||
4152 | } | 4168 | } |
4153 | 4169 | ||
4154 | return inode; | 4170 | return inode; |
@@ -4282,6 +4298,9 @@ static int btrfs_real_readdir(struct file *filp, void *dirent, | |||
4282 | while (di_cur < di_total) { | 4298 | while (di_cur < di_total) { |
4283 | struct btrfs_key location; | 4299 | struct btrfs_key location; |
4284 | 4300 | ||
4301 | if (verify_dir_item(root, leaf, di)) | ||
4302 | break; | ||
4303 | |||
4285 | name_len = btrfs_dir_name_len(leaf, di); | 4304 | name_len = btrfs_dir_name_len(leaf, di); |
4286 | if (name_len <= sizeof(tmp_name)) { | 4305 | if (name_len <= sizeof(tmp_name)) { |
4287 | name_ptr = tmp_name; | 4306 | name_ptr = tmp_name; |
@@ -4517,6 +4536,8 @@ static struct inode *btrfs_new_inode(struct btrfs_trans_handle *trans, | |||
4517 | return ERR_PTR(-ENOMEM); | 4536 | return ERR_PTR(-ENOMEM); |
4518 | 4537 | ||
4519 | if (dir) { | 4538 | if (dir) { |
4539 | trace_btrfs_inode_request(dir); | ||
4540 | |||
4520 | ret = btrfs_set_inode_index(dir, index); | 4541 | ret = btrfs_set_inode_index(dir, index); |
4521 | if (ret) { | 4542 | if (ret) { |
4522 | iput(inode); | 4543 | iput(inode); |
@@ -4585,12 +4606,16 @@ static struct inode *btrfs_new_inode(struct btrfs_trans_handle *trans, | |||
4585 | if ((mode & S_IFREG)) { | 4606 | if ((mode & S_IFREG)) { |
4586 | if (btrfs_test_opt(root, NODATASUM)) | 4607 | if (btrfs_test_opt(root, NODATASUM)) |
4587 | BTRFS_I(inode)->flags |= BTRFS_INODE_NODATASUM; | 4608 | BTRFS_I(inode)->flags |= BTRFS_INODE_NODATASUM; |
4588 | if (btrfs_test_opt(root, NODATACOW)) | 4609 | if (btrfs_test_opt(root, NODATACOW) || |
4610 | (BTRFS_I(dir)->flags & BTRFS_INODE_NODATACOW)) | ||
4589 | BTRFS_I(inode)->flags |= BTRFS_INODE_NODATACOW; | 4611 | BTRFS_I(inode)->flags |= BTRFS_INODE_NODATACOW; |
4590 | } | 4612 | } |
4591 | 4613 | ||
4592 | insert_inode_hash(inode); | 4614 | insert_inode_hash(inode); |
4593 | inode_tree_add(inode); | 4615 | inode_tree_add(inode); |
4616 | |||
4617 | trace_btrfs_inode_new(inode); | ||
4618 | |||
4594 | return inode; | 4619 | return inode; |
4595 | fail: | 4620 | fail: |
4596 | if (dir) | 4621 | if (dir) |
@@ -4809,7 +4834,10 @@ static int btrfs_link(struct dentry *old_dentry, struct inode *dir, | |||
4809 | 4834 | ||
4810 | /* do not allow sys_link's with other subvols of the same device */ | 4835 | /* do not allow sys_link's with other subvols of the same device */ |
4811 | if (root->objectid != BTRFS_I(inode)->root->objectid) | 4836 | if (root->objectid != BTRFS_I(inode)->root->objectid) |
4812 | return -EPERM; | 4837 | return -EXDEV; |
4838 | |||
4839 | if (inode->i_nlink == ~0U) | ||
4840 | return -EMLINK; | ||
4813 | 4841 | ||
4814 | btrfs_inc_nlink(inode); | 4842 | btrfs_inc_nlink(inode); |
4815 | inode->i_ctime = CURRENT_TIME; | 4843 | inode->i_ctime = CURRENT_TIME; |
@@ -5265,6 +5293,9 @@ insert: | |||
5265 | } | 5293 | } |
5266 | write_unlock(&em_tree->lock); | 5294 | write_unlock(&em_tree->lock); |
5267 | out: | 5295 | out: |
5296 | |||
5297 | trace_btrfs_get_extent(root, em); | ||
5298 | |||
5268 | if (path) | 5299 | if (path) |
5269 | btrfs_free_path(path); | 5300 | btrfs_free_path(path); |
5270 | if (trans) { | 5301 | if (trans) { |
@@ -5748,6 +5779,10 @@ static void btrfs_endio_direct_read(struct bio *bio, int err) | |||
5748 | 5779 | ||
5749 | kfree(dip->csums); | 5780 | kfree(dip->csums); |
5750 | kfree(dip); | 5781 | kfree(dip); |
5782 | |||
5783 | /* If we had a csum failure make sure to clear the uptodate flag */ | ||
5784 | if (err) | ||
5785 | clear_bit(BIO_UPTODATE, &bio->bi_flags); | ||
5751 | dio_end_io(bio, err); | 5786 | dio_end_io(bio, err); |
5752 | } | 5787 | } |
5753 | 5788 | ||
@@ -5849,6 +5884,10 @@ out_done: | |||
5849 | 5884 | ||
5850 | kfree(dip->csums); | 5885 | kfree(dip->csums); |
5851 | kfree(dip); | 5886 | kfree(dip); |
5887 | |||
5888 | /* If we had an error make sure to clear the uptodate flag */ | ||
5889 | if (err) | ||
5890 | clear_bit(BIO_UPTODATE, &bio->bi_flags); | ||
5852 | dio_end_io(bio, err); | 5891 | dio_end_io(bio, err); |
5853 | } | 5892 | } |
5854 | 5893 | ||
@@ -5922,9 +5961,12 @@ static inline int __btrfs_submit_dio_bio(struct bio *bio, struct inode *inode, | |||
5922 | __btrfs_submit_bio_start_direct_io, | 5961 | __btrfs_submit_bio_start_direct_io, |
5923 | __btrfs_submit_bio_done); | 5962 | __btrfs_submit_bio_done); |
5924 | goto err; | 5963 | goto err; |
5925 | } else if (!skip_sum) | 5964 | } else if (!skip_sum) { |
5926 | btrfs_lookup_bio_sums_dio(root, inode, bio, | 5965 | ret = btrfs_lookup_bio_sums_dio(root, inode, bio, |
5927 | file_offset, csums); | 5966 | file_offset, csums); |
5967 | if (ret) | ||
5968 | goto err; | ||
5969 | } | ||
5928 | 5970 | ||
5929 | ret = btrfs_map_bio(root, rw, bio, 0, 1); | 5971 | ret = btrfs_map_bio(root, rw, bio, 0, 1); |
5930 | err: | 5972 | err: |
@@ -5948,6 +5990,7 @@ static int btrfs_submit_direct_hook(int rw, struct btrfs_dio_private *dip, | |||
5948 | int nr_pages = 0; | 5990 | int nr_pages = 0; |
5949 | u32 *csums = dip->csums; | 5991 | u32 *csums = dip->csums; |
5950 | int ret = 0; | 5992 | int ret = 0; |
5993 | int write = rw & REQ_WRITE; | ||
5951 | 5994 | ||
5952 | bio = btrfs_dio_bio_alloc(orig_bio->bi_bdev, start_sector, GFP_NOFS); | 5995 | bio = btrfs_dio_bio_alloc(orig_bio->bi_bdev, start_sector, GFP_NOFS); |
5953 | if (!bio) | 5996 | if (!bio) |
@@ -5984,7 +6027,8 @@ static int btrfs_submit_direct_hook(int rw, struct btrfs_dio_private *dip, | |||
5984 | goto out_err; | 6027 | goto out_err; |
5985 | } | 6028 | } |
5986 | 6029 | ||
5987 | if (!skip_sum) | 6030 | /* Write's use the ordered csums */ |
6031 | if (!write && !skip_sum) | ||
5988 | csums = csums + nr_pages; | 6032 | csums = csums + nr_pages; |
5989 | start_sector += submit_len >> 9; | 6033 | start_sector += submit_len >> 9; |
5990 | file_offset += submit_len; | 6034 | file_offset += submit_len; |
@@ -6052,7 +6096,8 @@ static void btrfs_submit_direct(int rw, struct bio *bio, struct inode *inode, | |||
6052 | } | 6096 | } |
6053 | dip->csums = NULL; | 6097 | dip->csums = NULL; |
6054 | 6098 | ||
6055 | if (!skip_sum) { | 6099 | /* Write's use the ordered csum stuff, so we don't need dip->csums */ |
6100 | if (!write && !skip_sum) { | ||
6056 | dip->csums = kmalloc(sizeof(u32) * bio->bi_vcnt, GFP_NOFS); | 6101 | dip->csums = kmalloc(sizeof(u32) * bio->bi_vcnt, GFP_NOFS); |
6057 | if (!dip->csums) { | 6102 | if (!dip->csums) { |
6058 | kfree(dip); | 6103 | kfree(dip); |
@@ -6474,28 +6519,42 @@ out: | |||
6474 | return ret; | 6519 | return ret; |
6475 | } | 6520 | } |
6476 | 6521 | ||
6477 | static void btrfs_truncate(struct inode *inode) | 6522 | static int btrfs_truncate(struct inode *inode) |
6478 | { | 6523 | { |
6479 | struct btrfs_root *root = BTRFS_I(inode)->root; | 6524 | struct btrfs_root *root = BTRFS_I(inode)->root; |
6480 | int ret; | 6525 | int ret; |
6526 | int err = 0; | ||
6481 | struct btrfs_trans_handle *trans; | 6527 | struct btrfs_trans_handle *trans; |
6482 | unsigned long nr; | 6528 | unsigned long nr; |
6483 | u64 mask = root->sectorsize - 1; | 6529 | u64 mask = root->sectorsize - 1; |
6484 | 6530 | ||
6485 | if (!S_ISREG(inode->i_mode)) { | ||
6486 | WARN_ON(1); | ||
6487 | return; | ||
6488 | } | ||
6489 | |||
6490 | ret = btrfs_truncate_page(inode->i_mapping, inode->i_size); | 6531 | ret = btrfs_truncate_page(inode->i_mapping, inode->i_size); |
6491 | if (ret) | 6532 | if (ret) |
6492 | return; | 6533 | return ret; |
6493 | 6534 | ||
6494 | btrfs_wait_ordered_range(inode, inode->i_size & (~mask), (u64)-1); | 6535 | btrfs_wait_ordered_range(inode, inode->i_size & (~mask), (u64)-1); |
6495 | btrfs_ordered_update_i_size(inode, inode->i_size, NULL); | 6536 | btrfs_ordered_update_i_size(inode, inode->i_size, NULL); |
6496 | 6537 | ||
6538 | trans = btrfs_start_transaction(root, 5); | ||
6539 | if (IS_ERR(trans)) | ||
6540 | return PTR_ERR(trans); | ||
6541 | |||
6542 | btrfs_set_trans_block_group(trans, inode); | ||
6543 | |||
6544 | ret = btrfs_orphan_add(trans, inode); | ||
6545 | if (ret) { | ||
6546 | btrfs_end_transaction(trans, root); | ||
6547 | return ret; | ||
6548 | } | ||
6549 | |||
6550 | nr = trans->blocks_used; | ||
6551 | btrfs_end_transaction(trans, root); | ||
6552 | btrfs_btree_balance_dirty(root, nr); | ||
6553 | |||
6554 | /* Now start a transaction for the truncate */ | ||
6497 | trans = btrfs_start_transaction(root, 0); | 6555 | trans = btrfs_start_transaction(root, 0); |
6498 | BUG_ON(IS_ERR(trans)); | 6556 | if (IS_ERR(trans)) |
6557 | return PTR_ERR(trans); | ||
6499 | btrfs_set_trans_block_group(trans, inode); | 6558 | btrfs_set_trans_block_group(trans, inode); |
6500 | trans->block_rsv = root->orphan_block_rsv; | 6559 | trans->block_rsv = root->orphan_block_rsv; |
6501 | 6560 | ||
@@ -6522,29 +6581,38 @@ static void btrfs_truncate(struct inode *inode) | |||
6522 | while (1) { | 6581 | while (1) { |
6523 | if (!trans) { | 6582 | if (!trans) { |
6524 | trans = btrfs_start_transaction(root, 0); | 6583 | trans = btrfs_start_transaction(root, 0); |
6525 | BUG_ON(IS_ERR(trans)); | 6584 | if (IS_ERR(trans)) |
6585 | return PTR_ERR(trans); | ||
6526 | btrfs_set_trans_block_group(trans, inode); | 6586 | btrfs_set_trans_block_group(trans, inode); |
6527 | trans->block_rsv = root->orphan_block_rsv; | 6587 | trans->block_rsv = root->orphan_block_rsv; |
6528 | } | 6588 | } |
6529 | 6589 | ||
6530 | ret = btrfs_block_rsv_check(trans, root, | 6590 | ret = btrfs_block_rsv_check(trans, root, |
6531 | root->orphan_block_rsv, 0, 5); | 6591 | root->orphan_block_rsv, 0, 5); |
6532 | if (ret) { | 6592 | if (ret == -EAGAIN) { |
6533 | BUG_ON(ret != -EAGAIN); | ||
6534 | ret = btrfs_commit_transaction(trans, root); | 6593 | ret = btrfs_commit_transaction(trans, root); |
6535 | BUG_ON(ret); | 6594 | if (ret) |
6595 | return ret; | ||
6536 | trans = NULL; | 6596 | trans = NULL; |
6537 | continue; | 6597 | continue; |
6598 | } else if (ret) { | ||
6599 | err = ret; | ||
6600 | break; | ||
6538 | } | 6601 | } |
6539 | 6602 | ||
6540 | ret = btrfs_truncate_inode_items(trans, root, inode, | 6603 | ret = btrfs_truncate_inode_items(trans, root, inode, |
6541 | inode->i_size, | 6604 | inode->i_size, |
6542 | BTRFS_EXTENT_DATA_KEY); | 6605 | BTRFS_EXTENT_DATA_KEY); |
6543 | if (ret != -EAGAIN) | 6606 | if (ret != -EAGAIN) { |
6607 | err = ret; | ||
6544 | break; | 6608 | break; |
6609 | } | ||
6545 | 6610 | ||
6546 | ret = btrfs_update_inode(trans, root, inode); | 6611 | ret = btrfs_update_inode(trans, root, inode); |
6547 | BUG_ON(ret); | 6612 | if (ret) { |
6613 | err = ret; | ||
6614 | break; | ||
6615 | } | ||
6548 | 6616 | ||
6549 | nr = trans->blocks_used; | 6617 | nr = trans->blocks_used; |
6550 | btrfs_end_transaction(trans, root); | 6618 | btrfs_end_transaction(trans, root); |
@@ -6554,16 +6622,27 @@ static void btrfs_truncate(struct inode *inode) | |||
6554 | 6622 | ||
6555 | if (ret == 0 && inode->i_nlink > 0) { | 6623 | if (ret == 0 && inode->i_nlink > 0) { |
6556 | ret = btrfs_orphan_del(trans, inode); | 6624 | ret = btrfs_orphan_del(trans, inode); |
6557 | BUG_ON(ret); | 6625 | if (ret) |
6626 | err = ret; | ||
6627 | } else if (ret && inode->i_nlink > 0) { | ||
6628 | /* | ||
6629 | * Failed to do the truncate, remove us from the in memory | ||
6630 | * orphan list. | ||
6631 | */ | ||
6632 | ret = btrfs_orphan_del(NULL, inode); | ||
6558 | } | 6633 | } |
6559 | 6634 | ||
6560 | ret = btrfs_update_inode(trans, root, inode); | 6635 | ret = btrfs_update_inode(trans, root, inode); |
6561 | BUG_ON(ret); | 6636 | if (ret && !err) |
6637 | err = ret; | ||
6562 | 6638 | ||
6563 | nr = trans->blocks_used; | 6639 | nr = trans->blocks_used; |
6564 | ret = btrfs_end_transaction_throttle(trans, root); | 6640 | ret = btrfs_end_transaction_throttle(trans, root); |
6565 | BUG_ON(ret); | 6641 | if (ret && !err) |
6642 | err = ret; | ||
6566 | btrfs_btree_balance_dirty(root, nr); | 6643 | btrfs_btree_balance_dirty(root, nr); |
6644 | |||
6645 | return err; | ||
6567 | } | 6646 | } |
6568 | 6647 | ||
6569 | /* | 6648 | /* |
@@ -6630,9 +6709,8 @@ struct inode *btrfs_alloc_inode(struct super_block *sb) | |||
6630 | ei->index_cnt = (u64)-1; | 6709 | ei->index_cnt = (u64)-1; |
6631 | ei->last_unlink_trans = 0; | 6710 | ei->last_unlink_trans = 0; |
6632 | 6711 | ||
6633 | spin_lock_init(&ei->accounting_lock); | ||
6634 | atomic_set(&ei->outstanding_extents, 0); | 6712 | atomic_set(&ei->outstanding_extents, 0); |
6635 | ei->reserved_extents = 0; | 6713 | atomic_set(&ei->reserved_extents, 0); |
6636 | 6714 | ||
6637 | ei->ordered_data_close = 0; | 6715 | ei->ordered_data_close = 0; |
6638 | ei->orphan_meta_reserved = 0; | 6716 | ei->orphan_meta_reserved = 0; |
@@ -6668,7 +6746,7 @@ void btrfs_destroy_inode(struct inode *inode) | |||
6668 | WARN_ON(!list_empty(&inode->i_dentry)); | 6746 | WARN_ON(!list_empty(&inode->i_dentry)); |
6669 | WARN_ON(inode->i_data.nrpages); | 6747 | WARN_ON(inode->i_data.nrpages); |
6670 | WARN_ON(atomic_read(&BTRFS_I(inode)->outstanding_extents)); | 6748 | WARN_ON(atomic_read(&BTRFS_I(inode)->outstanding_extents)); |
6671 | WARN_ON(BTRFS_I(inode)->reserved_extents); | 6749 | WARN_ON(atomic_read(&BTRFS_I(inode)->reserved_extents)); |
6672 | 6750 | ||
6673 | /* | 6751 | /* |
6674 | * This can happen where we create an inode, but somebody else also | 6752 | * This can happen where we create an inode, but somebody else also |
@@ -6760,6 +6838,8 @@ void btrfs_destroy_cachep(void) | |||
6760 | kmem_cache_destroy(btrfs_transaction_cachep); | 6838 | kmem_cache_destroy(btrfs_transaction_cachep); |
6761 | if (btrfs_path_cachep) | 6839 | if (btrfs_path_cachep) |
6762 | kmem_cache_destroy(btrfs_path_cachep); | 6840 | kmem_cache_destroy(btrfs_path_cachep); |
6841 | if (btrfs_free_space_cachep) | ||
6842 | kmem_cache_destroy(btrfs_free_space_cachep); | ||
6763 | } | 6843 | } |
6764 | 6844 | ||
6765 | int btrfs_init_cachep(void) | 6845 | int btrfs_init_cachep(void) |
@@ -6788,6 +6868,12 @@ int btrfs_init_cachep(void) | |||
6788 | if (!btrfs_path_cachep) | 6868 | if (!btrfs_path_cachep) |
6789 | goto fail; | 6869 | goto fail; |
6790 | 6870 | ||
6871 | btrfs_free_space_cachep = kmem_cache_create("btrfs_free_space_cache", | ||
6872 | sizeof(struct btrfs_free_space), 0, | ||
6873 | SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL); | ||
6874 | if (!btrfs_free_space_cachep) | ||
6875 | goto fail; | ||
6876 | |||
6791 | return 0; | 6877 | return 0; |
6792 | fail: | 6878 | fail: |
6793 | btrfs_destroy_cachep(); | 6879 | btrfs_destroy_cachep(); |
@@ -6806,6 +6892,26 @@ static int btrfs_getattr(struct vfsmount *mnt, | |||
6806 | return 0; | 6892 | return 0; |
6807 | } | 6893 | } |
6808 | 6894 | ||
6895 | /* | ||
6896 | * If a file is moved, it will inherit the cow and compression flags of the new | ||
6897 | * directory. | ||
6898 | */ | ||
6899 | static void fixup_inode_flags(struct inode *dir, struct inode *inode) | ||
6900 | { | ||
6901 | struct btrfs_inode *b_dir = BTRFS_I(dir); | ||
6902 | struct btrfs_inode *b_inode = BTRFS_I(inode); | ||
6903 | |||
6904 | if (b_dir->flags & BTRFS_INODE_NODATACOW) | ||
6905 | b_inode->flags |= BTRFS_INODE_NODATACOW; | ||
6906 | else | ||
6907 | b_inode->flags &= ~BTRFS_INODE_NODATACOW; | ||
6908 | |||
6909 | if (b_dir->flags & BTRFS_INODE_COMPRESS) | ||
6910 | b_inode->flags |= BTRFS_INODE_COMPRESS; | ||
6911 | else | ||
6912 | b_inode->flags &= ~BTRFS_INODE_COMPRESS; | ||
6913 | } | ||
6914 | |||
6809 | static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry, | 6915 | static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry, |
6810 | struct inode *new_dir, struct dentry *new_dentry) | 6916 | struct inode *new_dir, struct dentry *new_dentry) |
6811 | { | 6917 | { |
@@ -6908,11 +7014,12 @@ static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry, | |||
6908 | old_dentry->d_name.name, | 7014 | old_dentry->d_name.name, |
6909 | old_dentry->d_name.len); | 7015 | old_dentry->d_name.len); |
6910 | } else { | 7016 | } else { |
6911 | btrfs_inc_nlink(old_dentry->d_inode); | 7017 | ret = __btrfs_unlink_inode(trans, root, old_dir, |
6912 | ret = btrfs_unlink_inode(trans, root, old_dir, | 7018 | old_dentry->d_inode, |
6913 | old_dentry->d_inode, | 7019 | old_dentry->d_name.name, |
6914 | old_dentry->d_name.name, | 7020 | old_dentry->d_name.len); |
6915 | old_dentry->d_name.len); | 7021 | if (!ret) |
7022 | ret = btrfs_update_inode(trans, root, old_inode); | ||
6916 | } | 7023 | } |
6917 | BUG_ON(ret); | 7024 | BUG_ON(ret); |
6918 | 7025 | ||
@@ -6939,6 +7046,8 @@ static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry, | |||
6939 | } | 7046 | } |
6940 | } | 7047 | } |
6941 | 7048 | ||
7049 | fixup_inode_flags(new_dir, old_inode); | ||
7050 | |||
6942 | ret = btrfs_add_link(trans, new_dir, old_inode, | 7051 | ret = btrfs_add_link(trans, new_dir, old_inode, |
6943 | new_dentry->d_name.name, | 7052 | new_dentry->d_name.name, |
6944 | new_dentry->d_name.len, 0, index); | 7053 | new_dentry->d_name.len, 0, index); |
@@ -7355,7 +7464,6 @@ static const struct address_space_operations btrfs_symlink_aops = { | |||
7355 | }; | 7464 | }; |
7356 | 7465 | ||
7357 | static const struct inode_operations btrfs_file_inode_operations = { | 7466 | static const struct inode_operations btrfs_file_inode_operations = { |
7358 | .truncate = btrfs_truncate, | ||
7359 | .getattr = btrfs_getattr, | 7467 | .getattr = btrfs_getattr, |
7360 | .setattr = btrfs_setattr, | 7468 | .setattr = btrfs_setattr, |
7361 | .setxattr = btrfs_setxattr, | 7469 | .setxattr = btrfs_setxattr, |
diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c index d1bace3df9b6..7c07fe26b7cf 100644 --- a/fs/btrfs/ioctl.c +++ b/fs/btrfs/ioctl.c | |||
@@ -40,6 +40,7 @@ | |||
40 | #include <linux/xattr.h> | 40 | #include <linux/xattr.h> |
41 | #include <linux/vmalloc.h> | 41 | #include <linux/vmalloc.h> |
42 | #include <linux/slab.h> | 42 | #include <linux/slab.h> |
43 | #include <linux/blkdev.h> | ||
43 | #include "compat.h" | 44 | #include "compat.h" |
44 | #include "ctree.h" | 45 | #include "ctree.h" |
45 | #include "disk-io.h" | 46 | #include "disk-io.h" |
@@ -138,6 +139,24 @@ static int btrfs_ioctl_getflags(struct file *file, void __user *arg) | |||
138 | return 0; | 139 | return 0; |
139 | } | 140 | } |
140 | 141 | ||
142 | static int check_flags(unsigned int flags) | ||
143 | { | ||
144 | if (flags & ~(FS_IMMUTABLE_FL | FS_APPEND_FL | \ | ||
145 | FS_NOATIME_FL | FS_NODUMP_FL | \ | ||
146 | FS_SYNC_FL | FS_DIRSYNC_FL | \ | ||
147 | FS_NOCOMP_FL | FS_COMPR_FL | \ | ||
148 | FS_NOCOW_FL | FS_COW_FL)) | ||
149 | return -EOPNOTSUPP; | ||
150 | |||
151 | if ((flags & FS_NOCOMP_FL) && (flags & FS_COMPR_FL)) | ||
152 | return -EINVAL; | ||
153 | |||
154 | if ((flags & FS_NOCOW_FL) && (flags & FS_COW_FL)) | ||
155 | return -EINVAL; | ||
156 | |||
157 | return 0; | ||
158 | } | ||
159 | |||
141 | static int btrfs_ioctl_setflags(struct file *file, void __user *arg) | 160 | static int btrfs_ioctl_setflags(struct file *file, void __user *arg) |
142 | { | 161 | { |
143 | struct inode *inode = file->f_path.dentry->d_inode; | 162 | struct inode *inode = file->f_path.dentry->d_inode; |
@@ -153,10 +172,9 @@ static int btrfs_ioctl_setflags(struct file *file, void __user *arg) | |||
153 | if (copy_from_user(&flags, arg, sizeof(flags))) | 172 | if (copy_from_user(&flags, arg, sizeof(flags))) |
154 | return -EFAULT; | 173 | return -EFAULT; |
155 | 174 | ||
156 | if (flags & ~(FS_IMMUTABLE_FL | FS_APPEND_FL | \ | 175 | ret = check_flags(flags); |
157 | FS_NOATIME_FL | FS_NODUMP_FL | \ | 176 | if (ret) |
158 | FS_SYNC_FL | FS_DIRSYNC_FL)) | 177 | return ret; |
159 | return -EOPNOTSUPP; | ||
160 | 178 | ||
161 | if (!inode_owner_or_capable(inode)) | 179 | if (!inode_owner_or_capable(inode)) |
162 | return -EACCES; | 180 | return -EACCES; |
@@ -201,6 +219,22 @@ static int btrfs_ioctl_setflags(struct file *file, void __user *arg) | |||
201 | else | 219 | else |
202 | ip->flags &= ~BTRFS_INODE_DIRSYNC; | 220 | ip->flags &= ~BTRFS_INODE_DIRSYNC; |
203 | 221 | ||
222 | /* | ||
223 | * The COMPRESS flag can only be changed by users, while the NOCOMPRESS | ||
224 | * flag may be changed automatically if compression code won't make | ||
225 | * things smaller. | ||
226 | */ | ||
227 | if (flags & FS_NOCOMP_FL) { | ||
228 | ip->flags &= ~BTRFS_INODE_COMPRESS; | ||
229 | ip->flags |= BTRFS_INODE_NOCOMPRESS; | ||
230 | } else if (flags & FS_COMPR_FL) { | ||
231 | ip->flags |= BTRFS_INODE_COMPRESS; | ||
232 | ip->flags &= ~BTRFS_INODE_NOCOMPRESS; | ||
233 | } | ||
234 | if (flags & FS_NOCOW_FL) | ||
235 | ip->flags |= BTRFS_INODE_NODATACOW; | ||
236 | else if (flags & FS_COW_FL) | ||
237 | ip->flags &= ~BTRFS_INODE_NODATACOW; | ||
204 | 238 | ||
205 | trans = btrfs_join_transaction(root, 1); | 239 | trans = btrfs_join_transaction(root, 1); |
206 | BUG_ON(IS_ERR(trans)); | 240 | BUG_ON(IS_ERR(trans)); |
@@ -213,9 +247,11 @@ static int btrfs_ioctl_setflags(struct file *file, void __user *arg) | |||
213 | btrfs_end_transaction(trans, root); | 247 | btrfs_end_transaction(trans, root); |
214 | 248 | ||
215 | mnt_drop_write(file->f_path.mnt); | 249 | mnt_drop_write(file->f_path.mnt); |
250 | |||
251 | ret = 0; | ||
216 | out_unlock: | 252 | out_unlock: |
217 | mutex_unlock(&inode->i_mutex); | 253 | mutex_unlock(&inode->i_mutex); |
218 | return 0; | 254 | return ret; |
219 | } | 255 | } |
220 | 256 | ||
221 | static int btrfs_ioctl_getversion(struct file *file, int __user *arg) | 257 | static int btrfs_ioctl_getversion(struct file *file, int __user *arg) |
@@ -225,6 +261,49 @@ static int btrfs_ioctl_getversion(struct file *file, int __user *arg) | |||
225 | return put_user(inode->i_generation, arg); | 261 | return put_user(inode->i_generation, arg); |
226 | } | 262 | } |
227 | 263 | ||
264 | static noinline int btrfs_ioctl_fitrim(struct file *file, void __user *arg) | ||
265 | { | ||
266 | struct btrfs_root *root = fdentry(file)->d_sb->s_fs_info; | ||
267 | struct btrfs_fs_info *fs_info = root->fs_info; | ||
268 | struct btrfs_device *device; | ||
269 | struct request_queue *q; | ||
270 | struct fstrim_range range; | ||
271 | u64 minlen = ULLONG_MAX; | ||
272 | u64 num_devices = 0; | ||
273 | int ret; | ||
274 | |||
275 | if (!capable(CAP_SYS_ADMIN)) | ||
276 | return -EPERM; | ||
277 | |||
278 | mutex_lock(&fs_info->fs_devices->device_list_mutex); | ||
279 | list_for_each_entry(device, &fs_info->fs_devices->devices, dev_list) { | ||
280 | if (!device->bdev) | ||
281 | continue; | ||
282 | q = bdev_get_queue(device->bdev); | ||
283 | if (blk_queue_discard(q)) { | ||
284 | num_devices++; | ||
285 | minlen = min((u64)q->limits.discard_granularity, | ||
286 | minlen); | ||
287 | } | ||
288 | } | ||
289 | mutex_unlock(&fs_info->fs_devices->device_list_mutex); | ||
290 | if (!num_devices) | ||
291 | return -EOPNOTSUPP; | ||
292 | |||
293 | if (copy_from_user(&range, arg, sizeof(range))) | ||
294 | return -EFAULT; | ||
295 | |||
296 | range.minlen = max(range.minlen, minlen); | ||
297 | ret = btrfs_trim_fs(root, &range); | ||
298 | if (ret < 0) | ||
299 | return ret; | ||
300 | |||
301 | if (copy_to_user(arg, &range, sizeof(range))) | ||
302 | return -EFAULT; | ||
303 | |||
304 | return 0; | ||
305 | } | ||
306 | |||
228 | static noinline int create_subvol(struct btrfs_root *root, | 307 | static noinline int create_subvol(struct btrfs_root *root, |
229 | struct dentry *dentry, | 308 | struct dentry *dentry, |
230 | char *name, int namelen, | 309 | char *name, int namelen, |
@@ -409,7 +488,9 @@ static int create_snapshot(struct btrfs_root *root, struct dentry *dentry, | |||
409 | if (ret) | 488 | if (ret) |
410 | goto fail; | 489 | goto fail; |
411 | 490 | ||
412 | btrfs_orphan_cleanup(pending_snapshot->snap); | 491 | ret = btrfs_orphan_cleanup(pending_snapshot->snap); |
492 | if (ret) | ||
493 | goto fail; | ||
413 | 494 | ||
414 | parent = dget_parent(dentry); | 495 | parent = dget_parent(dentry); |
415 | inode = btrfs_lookup_dentry(parent->d_inode, dentry); | 496 | inode = btrfs_lookup_dentry(parent->d_inode, dentry); |
@@ -2348,12 +2429,15 @@ static noinline long btrfs_ioctl_start_sync(struct file *file, void __user *argp | |||
2348 | struct btrfs_root *root = BTRFS_I(file->f_dentry->d_inode)->root; | 2429 | struct btrfs_root *root = BTRFS_I(file->f_dentry->d_inode)->root; |
2349 | struct btrfs_trans_handle *trans; | 2430 | struct btrfs_trans_handle *trans; |
2350 | u64 transid; | 2431 | u64 transid; |
2432 | int ret; | ||
2351 | 2433 | ||
2352 | trans = btrfs_start_transaction(root, 0); | 2434 | trans = btrfs_start_transaction(root, 0); |
2353 | if (IS_ERR(trans)) | 2435 | if (IS_ERR(trans)) |
2354 | return PTR_ERR(trans); | 2436 | return PTR_ERR(trans); |
2355 | transid = trans->transid; | 2437 | transid = trans->transid; |
2356 | btrfs_commit_transaction_async(trans, root, 0); | 2438 | ret = btrfs_commit_transaction_async(trans, root, 0); |
2439 | if (ret) | ||
2440 | return ret; | ||
2357 | 2441 | ||
2358 | if (argp) | 2442 | if (argp) |
2359 | if (copy_to_user(argp, &transid, sizeof(transid))) | 2443 | if (copy_to_user(argp, &transid, sizeof(transid))) |
@@ -2388,6 +2472,8 @@ long btrfs_ioctl(struct file *file, unsigned int | |||
2388 | return btrfs_ioctl_setflags(file, argp); | 2472 | return btrfs_ioctl_setflags(file, argp); |
2389 | case FS_IOC_GETVERSION: | 2473 | case FS_IOC_GETVERSION: |
2390 | return btrfs_ioctl_getversion(file, argp); | 2474 | return btrfs_ioctl_getversion(file, argp); |
2475 | case FITRIM: | ||
2476 | return btrfs_ioctl_fitrim(file, argp); | ||
2391 | case BTRFS_IOC_SNAP_CREATE: | 2477 | case BTRFS_IOC_SNAP_CREATE: |
2392 | return btrfs_ioctl_snap_create(file, argp, 0); | 2478 | return btrfs_ioctl_snap_create(file, argp, 0); |
2393 | case BTRFS_IOC_SNAP_CREATE_V2: | 2479 | case BTRFS_IOC_SNAP_CREATE_V2: |
diff --git a/fs/btrfs/ordered-data.c b/fs/btrfs/ordered-data.c index 083a55477375..a1c940425307 100644 --- a/fs/btrfs/ordered-data.c +++ b/fs/btrfs/ordered-data.c | |||
@@ -202,6 +202,8 @@ static int __btrfs_add_ordered_extent(struct inode *inode, u64 file_offset, | |||
202 | INIT_LIST_HEAD(&entry->list); | 202 | INIT_LIST_HEAD(&entry->list); |
203 | INIT_LIST_HEAD(&entry->root_extent_list); | 203 | INIT_LIST_HEAD(&entry->root_extent_list); |
204 | 204 | ||
205 | trace_btrfs_ordered_extent_add(inode, entry); | ||
206 | |||
205 | spin_lock(&tree->lock); | 207 | spin_lock(&tree->lock); |
206 | node = tree_insert(&tree->tree, file_offset, | 208 | node = tree_insert(&tree->tree, file_offset, |
207 | &entry->rb_node); | 209 | &entry->rb_node); |
@@ -387,6 +389,8 @@ int btrfs_put_ordered_extent(struct btrfs_ordered_extent *entry) | |||
387 | struct list_head *cur; | 389 | struct list_head *cur; |
388 | struct btrfs_ordered_sum *sum; | 390 | struct btrfs_ordered_sum *sum; |
389 | 391 | ||
392 | trace_btrfs_ordered_extent_put(entry->inode, entry); | ||
393 | |||
390 | if (atomic_dec_and_test(&entry->refs)) { | 394 | if (atomic_dec_and_test(&entry->refs)) { |
391 | while (!list_empty(&entry->list)) { | 395 | while (!list_empty(&entry->list)) { |
392 | cur = entry->list.next; | 396 | cur = entry->list.next; |
@@ -420,6 +424,8 @@ static int __btrfs_remove_ordered_extent(struct inode *inode, | |||
420 | spin_lock(&root->fs_info->ordered_extent_lock); | 424 | spin_lock(&root->fs_info->ordered_extent_lock); |
421 | list_del_init(&entry->root_extent_list); | 425 | list_del_init(&entry->root_extent_list); |
422 | 426 | ||
427 | trace_btrfs_ordered_extent_remove(inode, entry); | ||
428 | |||
423 | /* | 429 | /* |
424 | * we have no more ordered extents for this inode and | 430 | * we have no more ordered extents for this inode and |
425 | * no dirty pages. We can safely remove it from the | 431 | * no dirty pages. We can safely remove it from the |
@@ -585,6 +591,8 @@ void btrfs_start_ordered_extent(struct inode *inode, | |||
585 | u64 start = entry->file_offset; | 591 | u64 start = entry->file_offset; |
586 | u64 end = start + entry->len - 1; | 592 | u64 end = start + entry->len - 1; |
587 | 593 | ||
594 | trace_btrfs_ordered_extent_start(inode, entry); | ||
595 | |||
588 | /* | 596 | /* |
589 | * pages in the range can be dirty, clean or writeback. We | 597 | * pages in the range can be dirty, clean or writeback. We |
590 | * start IO on any dirty ones so the wait doesn't stall waiting | 598 | * start IO on any dirty ones so the wait doesn't stall waiting |
diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c index 31ade5802ae8..58250e09eb05 100644 --- a/fs/btrfs/relocation.c +++ b/fs/btrfs/relocation.c | |||
@@ -1724,6 +1724,7 @@ again: | |||
1724 | 1724 | ||
1725 | eb = read_tree_block(dest, old_bytenr, blocksize, | 1725 | eb = read_tree_block(dest, old_bytenr, blocksize, |
1726 | old_ptr_gen); | 1726 | old_ptr_gen); |
1727 | BUG_ON(!eb); | ||
1727 | btrfs_tree_lock(eb); | 1728 | btrfs_tree_lock(eb); |
1728 | if (cow) { | 1729 | if (cow) { |
1729 | ret = btrfs_cow_block(trans, dest, eb, parent, | 1730 | ret = btrfs_cow_block(trans, dest, eb, parent, |
@@ -2513,6 +2514,10 @@ static int do_relocation(struct btrfs_trans_handle *trans, | |||
2513 | blocksize = btrfs_level_size(root, node->level); | 2514 | blocksize = btrfs_level_size(root, node->level); |
2514 | generation = btrfs_node_ptr_generation(upper->eb, slot); | 2515 | generation = btrfs_node_ptr_generation(upper->eb, slot); |
2515 | eb = read_tree_block(root, bytenr, blocksize, generation); | 2516 | eb = read_tree_block(root, bytenr, blocksize, generation); |
2517 | if (!eb) { | ||
2518 | err = -EIO; | ||
2519 | goto next; | ||
2520 | } | ||
2516 | btrfs_tree_lock(eb); | 2521 | btrfs_tree_lock(eb); |
2517 | btrfs_set_lock_blocking(eb); | 2522 | btrfs_set_lock_blocking(eb); |
2518 | 2523 | ||
@@ -2670,6 +2675,7 @@ static int get_tree_block_key(struct reloc_control *rc, | |||
2670 | BUG_ON(block->key_ready); | 2675 | BUG_ON(block->key_ready); |
2671 | eb = read_tree_block(rc->extent_root, block->bytenr, | 2676 | eb = read_tree_block(rc->extent_root, block->bytenr, |
2672 | block->key.objectid, block->key.offset); | 2677 | block->key.objectid, block->key.offset); |
2678 | BUG_ON(!eb); | ||
2673 | WARN_ON(btrfs_header_level(eb) != block->level); | 2679 | WARN_ON(btrfs_header_level(eb) != block->level); |
2674 | if (block->level == 0) | 2680 | if (block->level == 0) |
2675 | btrfs_item_key_to_cpu(eb, &block->key, 0); | 2681 | btrfs_item_key_to_cpu(eb, &block->key, 0); |
@@ -4209,7 +4215,7 @@ out: | |||
4209 | if (IS_ERR(fs_root)) | 4215 | if (IS_ERR(fs_root)) |
4210 | err = PTR_ERR(fs_root); | 4216 | err = PTR_ERR(fs_root); |
4211 | else | 4217 | else |
4212 | btrfs_orphan_cleanup(fs_root); | 4218 | err = btrfs_orphan_cleanup(fs_root); |
4213 | } | 4219 | } |
4214 | return err; | 4220 | return err; |
4215 | } | 4221 | } |
diff --git a/fs/btrfs/root-tree.c b/fs/btrfs/root-tree.c index 6a1086e83ffc..29b2d7c930eb 100644 --- a/fs/btrfs/root-tree.c +++ b/fs/btrfs/root-tree.c | |||
@@ -88,7 +88,8 @@ int btrfs_find_last_root(struct btrfs_root *root, u64 objectid, | |||
88 | search_key.offset = (u64)-1; | 88 | search_key.offset = (u64)-1; |
89 | 89 | ||
90 | path = btrfs_alloc_path(); | 90 | path = btrfs_alloc_path(); |
91 | BUG_ON(!path); | 91 | if (!path) |
92 | return -ENOMEM; | ||
92 | ret = btrfs_search_slot(NULL, root, &search_key, path, 0, 0); | 93 | ret = btrfs_search_slot(NULL, root, &search_key, path, 0, 0); |
93 | if (ret < 0) | 94 | if (ret < 0) |
94 | goto out; | 95 | goto out; |
@@ -332,7 +333,8 @@ int btrfs_del_root(struct btrfs_trans_handle *trans, struct btrfs_root *root, | |||
332 | struct extent_buffer *leaf; | 333 | struct extent_buffer *leaf; |
333 | 334 | ||
334 | path = btrfs_alloc_path(); | 335 | path = btrfs_alloc_path(); |
335 | BUG_ON(!path); | 336 | if (!path) |
337 | return -ENOMEM; | ||
336 | ret = btrfs_search_slot(trans, root, key, path, -1, 1); | 338 | ret = btrfs_search_slot(trans, root, key, path, -1, 1); |
337 | if (ret < 0) | 339 | if (ret < 0) |
338 | goto out; | 340 | goto out; |
diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c index d39a9895d932..2edfc039f098 100644 --- a/fs/btrfs/super.c +++ b/fs/btrfs/super.c | |||
@@ -52,6 +52,9 @@ | |||
52 | #include "export.h" | 52 | #include "export.h" |
53 | #include "compression.h" | 53 | #include "compression.h" |
54 | 54 | ||
55 | #define CREATE_TRACE_POINTS | ||
56 | #include <trace/events/btrfs.h> | ||
57 | |||
55 | static const struct super_operations btrfs_super_ops; | 58 | static const struct super_operations btrfs_super_ops; |
56 | 59 | ||
57 | static const char *btrfs_decode_error(struct btrfs_fs_info *fs_info, int errno, | 60 | static const char *btrfs_decode_error(struct btrfs_fs_info *fs_info, int errno, |
@@ -620,6 +623,8 @@ int btrfs_sync_fs(struct super_block *sb, int wait) | |||
620 | struct btrfs_root *root = btrfs_sb(sb); | 623 | struct btrfs_root *root = btrfs_sb(sb); |
621 | int ret; | 624 | int ret; |
622 | 625 | ||
626 | trace_btrfs_sync_fs(wait); | ||
627 | |||
623 | if (!wait) { | 628 | if (!wait) { |
624 | filemap_flush(root->fs_info->btree_inode->i_mapping); | 629 | filemap_flush(root->fs_info->btree_inode->i_mapping); |
625 | return 0; | 630 | return 0; |
diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c index 3d73c8d93bbb..ce48eb59d615 100644 --- a/fs/btrfs/transaction.c +++ b/fs/btrfs/transaction.c | |||
@@ -57,7 +57,8 @@ static noinline int join_transaction(struct btrfs_root *root) | |||
57 | if (!cur_trans) { | 57 | if (!cur_trans) { |
58 | cur_trans = kmem_cache_alloc(btrfs_transaction_cachep, | 58 | cur_trans = kmem_cache_alloc(btrfs_transaction_cachep, |
59 | GFP_NOFS); | 59 | GFP_NOFS); |
60 | BUG_ON(!cur_trans); | 60 | if (!cur_trans) |
61 | return -ENOMEM; | ||
61 | root->fs_info->generation++; | 62 | root->fs_info->generation++; |
62 | cur_trans->num_writers = 1; | 63 | cur_trans->num_writers = 1; |
63 | cur_trans->num_joined = 0; | 64 | cur_trans->num_joined = 0; |
@@ -195,7 +196,11 @@ again: | |||
195 | wait_current_trans(root); | 196 | wait_current_trans(root); |
196 | 197 | ||
197 | ret = join_transaction(root); | 198 | ret = join_transaction(root); |
198 | BUG_ON(ret); | 199 | if (ret < 0) { |
200 | if (type != TRANS_JOIN_NOLOCK) | ||
201 | mutex_unlock(&root->fs_info->trans_mutex); | ||
202 | return ERR_PTR(ret); | ||
203 | } | ||
199 | 204 | ||
200 | cur_trans = root->fs_info->running_transaction; | 205 | cur_trans = root->fs_info->running_transaction; |
201 | cur_trans->use_count++; | 206 | cur_trans->use_count++; |
@@ -1156,7 +1161,8 @@ int btrfs_commit_transaction_async(struct btrfs_trans_handle *trans, | |||
1156 | struct btrfs_transaction *cur_trans; | 1161 | struct btrfs_transaction *cur_trans; |
1157 | 1162 | ||
1158 | ac = kmalloc(sizeof(*ac), GFP_NOFS); | 1163 | ac = kmalloc(sizeof(*ac), GFP_NOFS); |
1159 | BUG_ON(!ac); | 1164 | if (!ac) |
1165 | return -ENOMEM; | ||
1160 | 1166 | ||
1161 | INIT_DELAYED_WORK(&ac->work, do_async_commit); | 1167 | INIT_DELAYED_WORK(&ac->work, do_async_commit); |
1162 | ac->root = root; | 1168 | ac->root = root; |
@@ -1389,6 +1395,8 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans, | |||
1389 | put_transaction(cur_trans); | 1395 | put_transaction(cur_trans); |
1390 | put_transaction(cur_trans); | 1396 | put_transaction(cur_trans); |
1391 | 1397 | ||
1398 | trace_btrfs_transaction_commit(root); | ||
1399 | |||
1392 | mutex_unlock(&root->fs_info->trans_mutex); | 1400 | mutex_unlock(&root->fs_info->trans_mutex); |
1393 | 1401 | ||
1394 | if (current->journal_info == trans) | 1402 | if (current->journal_info == trans) |
diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c index a4bbb854dfd2..c50271ad3157 100644 --- a/fs/btrfs/tree-log.c +++ b/fs/btrfs/tree-log.c | |||
@@ -799,12 +799,12 @@ static noinline int add_inode_ref(struct btrfs_trans_handle *trans, | |||
799 | struct inode *dir; | 799 | struct inode *dir; |
800 | int ret; | 800 | int ret; |
801 | struct btrfs_inode_ref *ref; | 801 | struct btrfs_inode_ref *ref; |
802 | struct btrfs_dir_item *di; | ||
803 | struct inode *inode; | 802 | struct inode *inode; |
804 | char *name; | 803 | char *name; |
805 | int namelen; | 804 | int namelen; |
806 | unsigned long ref_ptr; | 805 | unsigned long ref_ptr; |
807 | unsigned long ref_end; | 806 | unsigned long ref_end; |
807 | int search_done = 0; | ||
808 | 808 | ||
809 | /* | 809 | /* |
810 | * it is possible that we didn't log all the parent directories | 810 | * it is possible that we didn't log all the parent directories |
@@ -845,7 +845,10 @@ again: | |||
845 | * existing back reference, and we don't want to create | 845 | * existing back reference, and we don't want to create |
846 | * dangling pointers in the directory. | 846 | * dangling pointers in the directory. |
847 | */ | 847 | */ |
848 | conflict_again: | 848 | |
849 | if (search_done) | ||
850 | goto insert; | ||
851 | |||
849 | ret = btrfs_search_slot(NULL, root, key, path, 0, 0); | 852 | ret = btrfs_search_slot(NULL, root, key, path, 0, 0); |
850 | if (ret == 0) { | 853 | if (ret == 0) { |
851 | char *victim_name; | 854 | char *victim_name; |
@@ -886,37 +889,21 @@ conflict_again: | |||
886 | ret = btrfs_unlink_inode(trans, root, dir, | 889 | ret = btrfs_unlink_inode(trans, root, dir, |
887 | inode, victim_name, | 890 | inode, victim_name, |
888 | victim_name_len); | 891 | victim_name_len); |
889 | kfree(victim_name); | ||
890 | btrfs_release_path(root, path); | ||
891 | goto conflict_again; | ||
892 | } | 892 | } |
893 | kfree(victim_name); | 893 | kfree(victim_name); |
894 | ptr = (unsigned long)(victim_ref + 1) + victim_name_len; | 894 | ptr = (unsigned long)(victim_ref + 1) + victim_name_len; |
895 | } | 895 | } |
896 | BUG_ON(ret); | 896 | BUG_ON(ret); |
897 | } | ||
898 | btrfs_release_path(root, path); | ||
899 | |||
900 | /* look for a conflicting sequence number */ | ||
901 | di = btrfs_lookup_dir_index_item(trans, root, path, dir->i_ino, | ||
902 | btrfs_inode_ref_index(eb, ref), | ||
903 | name, namelen, 0); | ||
904 | if (di && !IS_ERR(di)) { | ||
905 | ret = drop_one_dir_item(trans, root, path, dir, di); | ||
906 | BUG_ON(ret); | ||
907 | } | ||
908 | btrfs_release_path(root, path); | ||
909 | 897 | ||
910 | 898 | /* | |
911 | /* look for a conflicting name */ | 899 | * NOTE: we have searched root tree and checked the |
912 | di = btrfs_lookup_dir_item(trans, root, path, dir->i_ino, | 900 | * coresponding ref, it does not need to check again. |
913 | name, namelen, 0); | 901 | */ |
914 | if (di && !IS_ERR(di)) { | 902 | search_done = 1; |
915 | ret = drop_one_dir_item(trans, root, path, dir, di); | ||
916 | BUG_ON(ret); | ||
917 | } | 903 | } |
918 | btrfs_release_path(root, path); | 904 | btrfs_release_path(root, path); |
919 | 905 | ||
906 | insert: | ||
920 | /* insert our name */ | 907 | /* insert our name */ |
921 | ret = btrfs_add_link(trans, dir, inode, name, namelen, 0, | 908 | ret = btrfs_add_link(trans, dir, inode, name, namelen, 0, |
922 | btrfs_inode_ref_index(eb, ref)); | 909 | btrfs_inode_ref_index(eb, ref)); |
@@ -1286,6 +1273,8 @@ static noinline int replay_one_dir_item(struct btrfs_trans_handle *trans, | |||
1286 | ptr_end = ptr + item_size; | 1273 | ptr_end = ptr + item_size; |
1287 | while (ptr < ptr_end) { | 1274 | while (ptr < ptr_end) { |
1288 | di = (struct btrfs_dir_item *)ptr; | 1275 | di = (struct btrfs_dir_item *)ptr; |
1276 | if (verify_dir_item(root, eb, di)) | ||
1277 | return -EIO; | ||
1289 | name_len = btrfs_dir_name_len(eb, di); | 1278 | name_len = btrfs_dir_name_len(eb, di); |
1290 | ret = replay_one_name(trans, root, path, eb, di, key); | 1279 | ret = replay_one_name(trans, root, path, eb, di, key); |
1291 | BUG_ON(ret); | 1280 | BUG_ON(ret); |
@@ -1412,6 +1401,11 @@ again: | |||
1412 | ptr_end = ptr + item_size; | 1401 | ptr_end = ptr + item_size; |
1413 | while (ptr < ptr_end) { | 1402 | while (ptr < ptr_end) { |
1414 | di = (struct btrfs_dir_item *)ptr; | 1403 | di = (struct btrfs_dir_item *)ptr; |
1404 | if (verify_dir_item(root, eb, di)) { | ||
1405 | ret = -EIO; | ||
1406 | goto out; | ||
1407 | } | ||
1408 | |||
1415 | name_len = btrfs_dir_name_len(eb, di); | 1409 | name_len = btrfs_dir_name_len(eb, di); |
1416 | name = kmalloc(name_len, GFP_NOFS); | 1410 | name = kmalloc(name_len, GFP_NOFS); |
1417 | if (!name) { | 1411 | if (!name) { |
@@ -1821,7 +1815,8 @@ static int walk_log_tree(struct btrfs_trans_handle *trans, | |||
1821 | int orig_level; | 1815 | int orig_level; |
1822 | 1816 | ||
1823 | path = btrfs_alloc_path(); | 1817 | path = btrfs_alloc_path(); |
1824 | BUG_ON(!path); | 1818 | if (!path) |
1819 | return -ENOMEM; | ||
1825 | 1820 | ||
1826 | level = btrfs_header_level(log->node); | 1821 | level = btrfs_header_level(log->node); |
1827 | orig_level = level; | 1822 | orig_level = level; |
@@ -3107,9 +3102,11 @@ int btrfs_recover_log_trees(struct btrfs_root *log_root_tree) | |||
3107 | .stage = 0, | 3102 | .stage = 0, |
3108 | }; | 3103 | }; |
3109 | 3104 | ||
3110 | fs_info->log_root_recovering = 1; | ||
3111 | path = btrfs_alloc_path(); | 3105 | path = btrfs_alloc_path(); |
3112 | BUG_ON(!path); | 3106 | if (!path) |
3107 | return -ENOMEM; | ||
3108 | |||
3109 | fs_info->log_root_recovering = 1; | ||
3113 | 3110 | ||
3114 | trans = btrfs_start_transaction(fs_info->tree_root, 0); | 3111 | trans = btrfs_start_transaction(fs_info->tree_root, 0); |
3115 | BUG_ON(IS_ERR(trans)); | 3112 | BUG_ON(IS_ERR(trans)); |
@@ -3117,7 +3114,8 @@ int btrfs_recover_log_trees(struct btrfs_root *log_root_tree) | |||
3117 | wc.trans = trans; | 3114 | wc.trans = trans; |
3118 | wc.pin = 1; | 3115 | wc.pin = 1; |
3119 | 3116 | ||
3120 | walk_log_tree(trans, log_root_tree, &wc); | 3117 | ret = walk_log_tree(trans, log_root_tree, &wc); |
3118 | BUG_ON(ret); | ||
3121 | 3119 | ||
3122 | again: | 3120 | again: |
3123 | key.objectid = BTRFS_TREE_LOG_OBJECTID; | 3121 | key.objectid = BTRFS_TREE_LOG_OBJECTID; |
@@ -3141,8 +3139,7 @@ again: | |||
3141 | 3139 | ||
3142 | log = btrfs_read_fs_root_no_radix(log_root_tree, | 3140 | log = btrfs_read_fs_root_no_radix(log_root_tree, |
3143 | &found_key); | 3141 | &found_key); |
3144 | BUG_ON(!log); | 3142 | BUG_ON(IS_ERR(log)); |
3145 | |||
3146 | 3143 | ||
3147 | tmp_key.objectid = found_key.offset; | 3144 | tmp_key.objectid = found_key.offset; |
3148 | tmp_key.type = BTRFS_ROOT_ITEM_KEY; | 3145 | tmp_key.type = BTRFS_ROOT_ITEM_KEY; |
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index 9d554e8e6583..309a57b9fc85 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c | |||
@@ -33,17 +33,6 @@ | |||
33 | #include "volumes.h" | 33 | #include "volumes.h" |
34 | #include "async-thread.h" | 34 | #include "async-thread.h" |
35 | 35 | ||
36 | struct map_lookup { | ||
37 | u64 type; | ||
38 | int io_align; | ||
39 | int io_width; | ||
40 | int stripe_len; | ||
41 | int sector_size; | ||
42 | int num_stripes; | ||
43 | int sub_stripes; | ||
44 | struct btrfs_bio_stripe stripes[]; | ||
45 | }; | ||
46 | |||
47 | static int init_first_rw_device(struct btrfs_trans_handle *trans, | 36 | static int init_first_rw_device(struct btrfs_trans_handle *trans, |
48 | struct btrfs_root *root, | 37 | struct btrfs_root *root, |
49 | struct btrfs_device *device); | 38 | struct btrfs_device *device); |
@@ -1879,6 +1868,8 @@ static int btrfs_relocate_chunk(struct btrfs_root *root, | |||
1879 | 1868 | ||
1880 | BUG_ON(ret); | 1869 | BUG_ON(ret); |
1881 | 1870 | ||
1871 | trace_btrfs_chunk_free(root, map, chunk_offset, em->len); | ||
1872 | |||
1882 | if (map->type & BTRFS_BLOCK_GROUP_SYSTEM) { | 1873 | if (map->type & BTRFS_BLOCK_GROUP_SYSTEM) { |
1883 | ret = btrfs_del_sys_chunk(root, chunk_objectid, chunk_offset); | 1874 | ret = btrfs_del_sys_chunk(root, chunk_objectid, chunk_offset); |
1884 | BUG_ON(ret); | 1875 | BUG_ON(ret); |
@@ -2606,6 +2597,8 @@ static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans, | |||
2606 | *num_bytes = chunk_bytes_by_type(type, calc_size, | 2597 | *num_bytes = chunk_bytes_by_type(type, calc_size, |
2607 | map->num_stripes, sub_stripes); | 2598 | map->num_stripes, sub_stripes); |
2608 | 2599 | ||
2600 | trace_btrfs_chunk_alloc(info->chunk_root, map, start, *num_bytes); | ||
2601 | |||
2609 | em = alloc_extent_map(GFP_NOFS); | 2602 | em = alloc_extent_map(GFP_NOFS); |
2610 | if (!em) { | 2603 | if (!em) { |
2611 | ret = -ENOMEM; | 2604 | ret = -ENOMEM; |
@@ -2714,6 +2707,7 @@ static int __finish_chunk_alloc(struct btrfs_trans_handle *trans, | |||
2714 | item_size); | 2707 | item_size); |
2715 | BUG_ON(ret); | 2708 | BUG_ON(ret); |
2716 | } | 2709 | } |
2710 | |||
2717 | kfree(chunk); | 2711 | kfree(chunk); |
2718 | return 0; | 2712 | return 0; |
2719 | } | 2713 | } |
@@ -2918,7 +2912,10 @@ static int __btrfs_map_block(struct btrfs_mapping_tree *map_tree, int rw, | |||
2918 | struct extent_map_tree *em_tree = &map_tree->map_tree; | 2912 | struct extent_map_tree *em_tree = &map_tree->map_tree; |
2919 | u64 offset; | 2913 | u64 offset; |
2920 | u64 stripe_offset; | 2914 | u64 stripe_offset; |
2915 | u64 stripe_end_offset; | ||
2921 | u64 stripe_nr; | 2916 | u64 stripe_nr; |
2917 | u64 stripe_nr_orig; | ||
2918 | u64 stripe_nr_end; | ||
2922 | int stripes_allocated = 8; | 2919 | int stripes_allocated = 8; |
2923 | int stripes_required = 1; | 2920 | int stripes_required = 1; |
2924 | int stripe_index; | 2921 | int stripe_index; |
@@ -2927,7 +2924,7 @@ static int __btrfs_map_block(struct btrfs_mapping_tree *map_tree, int rw, | |||
2927 | int max_errors = 0; | 2924 | int max_errors = 0; |
2928 | struct btrfs_multi_bio *multi = NULL; | 2925 | struct btrfs_multi_bio *multi = NULL; |
2929 | 2926 | ||
2930 | if (multi_ret && !(rw & REQ_WRITE)) | 2927 | if (multi_ret && !(rw & (REQ_WRITE | REQ_DISCARD))) |
2931 | stripes_allocated = 1; | 2928 | stripes_allocated = 1; |
2932 | again: | 2929 | again: |
2933 | if (multi_ret) { | 2930 | if (multi_ret) { |
@@ -2968,7 +2965,15 @@ again: | |||
2968 | max_errors = 1; | 2965 | max_errors = 1; |
2969 | } | 2966 | } |
2970 | } | 2967 | } |
2971 | if (multi_ret && (rw & REQ_WRITE) && | 2968 | if (rw & REQ_DISCARD) { |
2969 | if (map->type & (BTRFS_BLOCK_GROUP_RAID0 | | ||
2970 | BTRFS_BLOCK_GROUP_RAID1 | | ||
2971 | BTRFS_BLOCK_GROUP_DUP | | ||
2972 | BTRFS_BLOCK_GROUP_RAID10)) { | ||
2973 | stripes_required = map->num_stripes; | ||
2974 | } | ||
2975 | } | ||
2976 | if (multi_ret && (rw & (REQ_WRITE | REQ_DISCARD)) && | ||
2972 | stripes_allocated < stripes_required) { | 2977 | stripes_allocated < stripes_required) { |
2973 | stripes_allocated = map->num_stripes; | 2978 | stripes_allocated = map->num_stripes; |
2974 | free_extent_map(em); | 2979 | free_extent_map(em); |
@@ -2988,12 +2993,15 @@ again: | |||
2988 | /* stripe_offset is the offset of this block in its stripe*/ | 2993 | /* stripe_offset is the offset of this block in its stripe*/ |
2989 | stripe_offset = offset - stripe_offset; | 2994 | stripe_offset = offset - stripe_offset; |
2990 | 2995 | ||
2991 | if (map->type & (BTRFS_BLOCK_GROUP_RAID0 | BTRFS_BLOCK_GROUP_RAID1 | | 2996 | if (rw & REQ_DISCARD) |
2992 | BTRFS_BLOCK_GROUP_RAID10 | | 2997 | *length = min_t(u64, em->len - offset, *length); |
2993 | BTRFS_BLOCK_GROUP_DUP)) { | 2998 | else if (map->type & (BTRFS_BLOCK_GROUP_RAID0 | |
2999 | BTRFS_BLOCK_GROUP_RAID1 | | ||
3000 | BTRFS_BLOCK_GROUP_RAID10 | | ||
3001 | BTRFS_BLOCK_GROUP_DUP)) { | ||
2994 | /* we limit the length of each bio to what fits in a stripe */ | 3002 | /* we limit the length of each bio to what fits in a stripe */ |
2995 | *length = min_t(u64, em->len - offset, | 3003 | *length = min_t(u64, em->len - offset, |
2996 | map->stripe_len - stripe_offset); | 3004 | map->stripe_len - stripe_offset); |
2997 | } else { | 3005 | } else { |
2998 | *length = em->len - offset; | 3006 | *length = em->len - offset; |
2999 | } | 3007 | } |
@@ -3003,8 +3011,19 @@ again: | |||
3003 | 3011 | ||
3004 | num_stripes = 1; | 3012 | num_stripes = 1; |
3005 | stripe_index = 0; | 3013 | stripe_index = 0; |
3006 | if (map->type & BTRFS_BLOCK_GROUP_RAID1) { | 3014 | stripe_nr_orig = stripe_nr; |
3007 | if (rw & REQ_WRITE) | 3015 | stripe_nr_end = (offset + *length + map->stripe_len - 1) & |
3016 | (~(map->stripe_len - 1)); | ||
3017 | do_div(stripe_nr_end, map->stripe_len); | ||
3018 | stripe_end_offset = stripe_nr_end * map->stripe_len - | ||
3019 | (offset + *length); | ||
3020 | if (map->type & BTRFS_BLOCK_GROUP_RAID0) { | ||
3021 | if (rw & REQ_DISCARD) | ||
3022 | num_stripes = min_t(u64, map->num_stripes, | ||
3023 | stripe_nr_end - stripe_nr_orig); | ||
3024 | stripe_index = do_div(stripe_nr, map->num_stripes); | ||
3025 | } else if (map->type & BTRFS_BLOCK_GROUP_RAID1) { | ||
3026 | if (rw & (REQ_WRITE | REQ_DISCARD)) | ||
3008 | num_stripes = map->num_stripes; | 3027 | num_stripes = map->num_stripes; |
3009 | else if (mirror_num) | 3028 | else if (mirror_num) |
3010 | stripe_index = mirror_num - 1; | 3029 | stripe_index = mirror_num - 1; |
@@ -3015,7 +3034,7 @@ again: | |||
3015 | } | 3034 | } |
3016 | 3035 | ||
3017 | } else if (map->type & BTRFS_BLOCK_GROUP_DUP) { | 3036 | } else if (map->type & BTRFS_BLOCK_GROUP_DUP) { |
3018 | if (rw & REQ_WRITE) | 3037 | if (rw & (REQ_WRITE | REQ_DISCARD)) |
3019 | num_stripes = map->num_stripes; | 3038 | num_stripes = map->num_stripes; |
3020 | else if (mirror_num) | 3039 | else if (mirror_num) |
3021 | stripe_index = mirror_num - 1; | 3040 | stripe_index = mirror_num - 1; |
@@ -3028,6 +3047,10 @@ again: | |||
3028 | 3047 | ||
3029 | if (rw & REQ_WRITE) | 3048 | if (rw & REQ_WRITE) |
3030 | num_stripes = map->sub_stripes; | 3049 | num_stripes = map->sub_stripes; |
3050 | else if (rw & REQ_DISCARD) | ||
3051 | num_stripes = min_t(u64, map->sub_stripes * | ||
3052 | (stripe_nr_end - stripe_nr_orig), | ||
3053 | map->num_stripes); | ||
3031 | else if (mirror_num) | 3054 | else if (mirror_num) |
3032 | stripe_index += mirror_num - 1; | 3055 | stripe_index += mirror_num - 1; |
3033 | else { | 3056 | else { |
@@ -3045,12 +3068,101 @@ again: | |||
3045 | } | 3068 | } |
3046 | BUG_ON(stripe_index >= map->num_stripes); | 3069 | BUG_ON(stripe_index >= map->num_stripes); |
3047 | 3070 | ||
3048 | for (i = 0; i < num_stripes; i++) { | 3071 | if (rw & REQ_DISCARD) { |
3049 | multi->stripes[i].physical = | 3072 | for (i = 0; i < num_stripes; i++) { |
3050 | map->stripes[stripe_index].physical + | 3073 | multi->stripes[i].physical = |
3051 | stripe_offset + stripe_nr * map->stripe_len; | 3074 | map->stripes[stripe_index].physical + |
3052 | multi->stripes[i].dev = map->stripes[stripe_index].dev; | 3075 | stripe_offset + stripe_nr * map->stripe_len; |
3053 | stripe_index++; | 3076 | multi->stripes[i].dev = map->stripes[stripe_index].dev; |
3077 | |||
3078 | if (map->type & BTRFS_BLOCK_GROUP_RAID0) { | ||
3079 | u64 stripes; | ||
3080 | u32 last_stripe = 0; | ||
3081 | int j; | ||
3082 | |||
3083 | div_u64_rem(stripe_nr_end - 1, | ||
3084 | map->num_stripes, | ||
3085 | &last_stripe); | ||
3086 | |||
3087 | for (j = 0; j < map->num_stripes; j++) { | ||
3088 | u32 test; | ||
3089 | |||
3090 | div_u64_rem(stripe_nr_end - 1 - j, | ||
3091 | map->num_stripes, &test); | ||
3092 | if (test == stripe_index) | ||
3093 | break; | ||
3094 | } | ||
3095 | stripes = stripe_nr_end - 1 - j; | ||
3096 | do_div(stripes, map->num_stripes); | ||
3097 | multi->stripes[i].length = map->stripe_len * | ||
3098 | (stripes - stripe_nr + 1); | ||
3099 | |||
3100 | if (i == 0) { | ||
3101 | multi->stripes[i].length -= | ||
3102 | stripe_offset; | ||
3103 | stripe_offset = 0; | ||
3104 | } | ||
3105 | if (stripe_index == last_stripe) | ||
3106 | multi->stripes[i].length -= | ||
3107 | stripe_end_offset; | ||
3108 | } else if (map->type & BTRFS_BLOCK_GROUP_RAID10) { | ||
3109 | u64 stripes; | ||
3110 | int j; | ||
3111 | int factor = map->num_stripes / | ||
3112 | map->sub_stripes; | ||
3113 | u32 last_stripe = 0; | ||
3114 | |||
3115 | div_u64_rem(stripe_nr_end - 1, | ||
3116 | factor, &last_stripe); | ||
3117 | last_stripe *= map->sub_stripes; | ||
3118 | |||
3119 | for (j = 0; j < factor; j++) { | ||
3120 | u32 test; | ||
3121 | |||
3122 | div_u64_rem(stripe_nr_end - 1 - j, | ||
3123 | factor, &test); | ||
3124 | |||
3125 | if (test == | ||
3126 | stripe_index / map->sub_stripes) | ||
3127 | break; | ||
3128 | } | ||
3129 | stripes = stripe_nr_end - 1 - j; | ||
3130 | do_div(stripes, factor); | ||
3131 | multi->stripes[i].length = map->stripe_len * | ||
3132 | (stripes - stripe_nr + 1); | ||
3133 | |||
3134 | if (i < map->sub_stripes) { | ||
3135 | multi->stripes[i].length -= | ||
3136 | stripe_offset; | ||
3137 | if (i == map->sub_stripes - 1) | ||
3138 | stripe_offset = 0; | ||
3139 | } | ||
3140 | if (stripe_index >= last_stripe && | ||
3141 | stripe_index <= (last_stripe + | ||
3142 | map->sub_stripes - 1)) { | ||
3143 | multi->stripes[i].length -= | ||
3144 | stripe_end_offset; | ||
3145 | } | ||
3146 | } else | ||
3147 | multi->stripes[i].length = *length; | ||
3148 | |||
3149 | stripe_index++; | ||
3150 | if (stripe_index == map->num_stripes) { | ||
3151 | /* This could only happen for RAID0/10 */ | ||
3152 | stripe_index = 0; | ||
3153 | stripe_nr++; | ||
3154 | } | ||
3155 | } | ||
3156 | } else { | ||
3157 | for (i = 0; i < num_stripes; i++) { | ||
3158 | multi->stripes[i].physical = | ||
3159 | map->stripes[stripe_index].physical + | ||
3160 | stripe_offset + | ||
3161 | stripe_nr * map->stripe_len; | ||
3162 | multi->stripes[i].dev = | ||
3163 | map->stripes[stripe_index].dev; | ||
3164 | stripe_index++; | ||
3165 | } | ||
3054 | } | 3166 | } |
3055 | if (multi_ret) { | 3167 | if (multi_ret) { |
3056 | *multi_ret = multi; | 3168 | *multi_ret = multi; |
diff --git a/fs/btrfs/volumes.h b/fs/btrfs/volumes.h index 7fb59d45fe8c..cc2eadaf7a27 100644 --- a/fs/btrfs/volumes.h +++ b/fs/btrfs/volumes.h | |||
@@ -126,6 +126,7 @@ struct btrfs_fs_devices { | |||
126 | struct btrfs_bio_stripe { | 126 | struct btrfs_bio_stripe { |
127 | struct btrfs_device *dev; | 127 | struct btrfs_device *dev; |
128 | u64 physical; | 128 | u64 physical; |
129 | u64 length; /* only used for discard mappings */ | ||
129 | }; | 130 | }; |
130 | 131 | ||
131 | struct btrfs_multi_bio { | 132 | struct btrfs_multi_bio { |
@@ -145,6 +146,17 @@ struct btrfs_device_info { | |||
145 | u64 max_avail; | 146 | u64 max_avail; |
146 | }; | 147 | }; |
147 | 148 | ||
149 | struct map_lookup { | ||
150 | u64 type; | ||
151 | int io_align; | ||
152 | int io_width; | ||
153 | int stripe_len; | ||
154 | int sector_size; | ||
155 | int num_stripes; | ||
156 | int sub_stripes; | ||
157 | struct btrfs_bio_stripe stripes[]; | ||
158 | }; | ||
159 | |||
148 | /* Used to sort the devices by max_avail(descending sort) */ | 160 | /* Used to sort the devices by max_avail(descending sort) */ |
149 | int btrfs_cmp_device_free_bytes(const void *dev_info1, const void *dev_info2); | 161 | int btrfs_cmp_device_free_bytes(const void *dev_info1, const void *dev_info2); |
150 | 162 | ||
diff --git a/fs/btrfs/xattr.c b/fs/btrfs/xattr.c index d779cefcfd7d..a5303b871b13 100644 --- a/fs/btrfs/xattr.c +++ b/fs/btrfs/xattr.c | |||
@@ -242,6 +242,8 @@ ssize_t btrfs_listxattr(struct dentry *dentry, char *buffer, size_t size) | |||
242 | break; | 242 | break; |
243 | 243 | ||
244 | di = btrfs_item_ptr(leaf, slot, struct btrfs_dir_item); | 244 | di = btrfs_item_ptr(leaf, slot, struct btrfs_dir_item); |
245 | if (verify_dir_item(root, leaf, di)) | ||
246 | continue; | ||
245 | 247 | ||
246 | name_len = btrfs_dir_name_len(leaf, di); | 248 | name_len = btrfs_dir_name_len(leaf, di); |
247 | total_size += name_len + 1; | 249 | total_size += name_len + 1; |
diff --git a/fs/ceph/addr.c b/fs/ceph/addr.c index 561438b6a50c..37368ba2e67c 100644 --- a/fs/ceph/addr.c +++ b/fs/ceph/addr.c | |||
@@ -92,7 +92,7 @@ static int ceph_set_page_dirty(struct page *page) | |||
92 | ci->i_head_snapc = ceph_get_snap_context(snapc); | 92 | ci->i_head_snapc = ceph_get_snap_context(snapc); |
93 | ++ci->i_wrbuffer_ref_head; | 93 | ++ci->i_wrbuffer_ref_head; |
94 | if (ci->i_wrbuffer_ref == 0) | 94 | if (ci->i_wrbuffer_ref == 0) |
95 | igrab(inode); | 95 | ihold(inode); |
96 | ++ci->i_wrbuffer_ref; | 96 | ++ci->i_wrbuffer_ref; |
97 | dout("%p set_page_dirty %p idx %lu head %d/%d -> %d/%d " | 97 | dout("%p set_page_dirty %p idx %lu head %d/%d -> %d/%d " |
98 | "snapc %p seq %lld (%d snaps)\n", | 98 | "snapc %p seq %lld (%d snaps)\n", |
diff --git a/fs/ceph/snap.c b/fs/ceph/snap.c index f40b9139e437..0aee66b92af3 100644 --- a/fs/ceph/snap.c +++ b/fs/ceph/snap.c | |||
@@ -463,8 +463,8 @@ void ceph_queue_cap_snap(struct ceph_inode_info *ci) | |||
463 | 463 | ||
464 | dout("queue_cap_snap %p cap_snap %p queuing under %p\n", inode, | 464 | dout("queue_cap_snap %p cap_snap %p queuing under %p\n", inode, |
465 | capsnap, snapc); | 465 | capsnap, snapc); |
466 | igrab(inode); | 466 | ihold(inode); |
467 | 467 | ||
468 | atomic_set(&capsnap->nref, 1); | 468 | atomic_set(&capsnap->nref, 1); |
469 | capsnap->ci = ci; | 469 | capsnap->ci = ci; |
470 | INIT_LIST_HEAD(&capsnap->ci_item); | 470 | INIT_LIST_HEAD(&capsnap->ci_item); |
diff --git a/fs/ecryptfs/crypto.c b/fs/ecryptfs/crypto.c index bfd8b680e648..d2a70a4561f9 100644 --- a/fs/ecryptfs/crypto.c +++ b/fs/ecryptfs/crypto.c | |||
@@ -266,7 +266,6 @@ void ecryptfs_destroy_mount_crypt_stat( | |||
266 | &mount_crypt_stat->global_auth_tok_list, | 266 | &mount_crypt_stat->global_auth_tok_list, |
267 | mount_crypt_stat_list) { | 267 | mount_crypt_stat_list) { |
268 | list_del(&auth_tok->mount_crypt_stat_list); | 268 | list_del(&auth_tok->mount_crypt_stat_list); |
269 | mount_crypt_stat->num_global_auth_toks--; | ||
270 | if (auth_tok->global_auth_tok_key | 269 | if (auth_tok->global_auth_tok_key |
271 | && !(auth_tok->flags & ECRYPTFS_AUTH_TOK_INVALID)) | 270 | && !(auth_tok->flags & ECRYPTFS_AUTH_TOK_INVALID)) |
272 | key_put(auth_tok->global_auth_tok_key); | 271 | key_put(auth_tok->global_auth_tok_key); |
@@ -1389,6 +1388,7 @@ int ecryptfs_write_metadata(struct dentry *ecryptfs_dentry) | |||
1389 | rc = -ENOMEM; | 1388 | rc = -ENOMEM; |
1390 | goto out; | 1389 | goto out; |
1391 | } | 1390 | } |
1391 | /* Zeroed page ensures the in-header unencrypted i_size is set to 0 */ | ||
1392 | rc = ecryptfs_write_headers_virt(virt, virt_len, &size, crypt_stat, | 1392 | rc = ecryptfs_write_headers_virt(virt, virt_len, &size, crypt_stat, |
1393 | ecryptfs_dentry); | 1393 | ecryptfs_dentry); |
1394 | if (unlikely(rc)) { | 1394 | if (unlikely(rc)) { |
diff --git a/fs/ecryptfs/ecryptfs_kernel.h b/fs/ecryptfs/ecryptfs_kernel.h index e00753496e3e..bd3cafd0949d 100644 --- a/fs/ecryptfs/ecryptfs_kernel.h +++ b/fs/ecryptfs/ecryptfs_kernel.h | |||
@@ -233,7 +233,7 @@ ecryptfs_get_key_payload_data(struct key *key) | |||
233 | 233 | ||
234 | struct ecryptfs_key_sig { | 234 | struct ecryptfs_key_sig { |
235 | struct list_head crypt_stat_list; | 235 | struct list_head crypt_stat_list; |
236 | char keysig[ECRYPTFS_SIG_SIZE_HEX]; | 236 | char keysig[ECRYPTFS_SIG_SIZE_HEX + 1]; |
237 | }; | 237 | }; |
238 | 238 | ||
239 | struct ecryptfs_filename { | 239 | struct ecryptfs_filename { |
@@ -257,19 +257,18 @@ struct ecryptfs_filename { | |||
257 | struct ecryptfs_crypt_stat { | 257 | struct ecryptfs_crypt_stat { |
258 | #define ECRYPTFS_STRUCT_INITIALIZED 0x00000001 | 258 | #define ECRYPTFS_STRUCT_INITIALIZED 0x00000001 |
259 | #define ECRYPTFS_POLICY_APPLIED 0x00000002 | 259 | #define ECRYPTFS_POLICY_APPLIED 0x00000002 |
260 | #define ECRYPTFS_NEW_FILE 0x00000004 | 260 | #define ECRYPTFS_ENCRYPTED 0x00000004 |
261 | #define ECRYPTFS_ENCRYPTED 0x00000008 | 261 | #define ECRYPTFS_SECURITY_WARNING 0x00000008 |
262 | #define ECRYPTFS_SECURITY_WARNING 0x00000010 | 262 | #define ECRYPTFS_ENABLE_HMAC 0x00000010 |
263 | #define ECRYPTFS_ENABLE_HMAC 0x00000020 | 263 | #define ECRYPTFS_ENCRYPT_IV_PAGES 0x00000020 |
264 | #define ECRYPTFS_ENCRYPT_IV_PAGES 0x00000040 | 264 | #define ECRYPTFS_KEY_VALID 0x00000040 |
265 | #define ECRYPTFS_KEY_VALID 0x00000080 | 265 | #define ECRYPTFS_METADATA_IN_XATTR 0x00000080 |
266 | #define ECRYPTFS_METADATA_IN_XATTR 0x00000100 | 266 | #define ECRYPTFS_VIEW_AS_ENCRYPTED 0x00000100 |
267 | #define ECRYPTFS_VIEW_AS_ENCRYPTED 0x00000200 | 267 | #define ECRYPTFS_KEY_SET 0x00000200 |
268 | #define ECRYPTFS_KEY_SET 0x00000400 | 268 | #define ECRYPTFS_ENCRYPT_FILENAMES 0x00000400 |
269 | #define ECRYPTFS_ENCRYPT_FILENAMES 0x00000800 | 269 | #define ECRYPTFS_ENCFN_USE_MOUNT_FNEK 0x00000800 |
270 | #define ECRYPTFS_ENCFN_USE_MOUNT_FNEK 0x00001000 | 270 | #define ECRYPTFS_ENCFN_USE_FEK 0x00001000 |
271 | #define ECRYPTFS_ENCFN_USE_FEK 0x00002000 | 271 | #define ECRYPTFS_UNLINK_SIGS 0x00002000 |
272 | #define ECRYPTFS_UNLINK_SIGS 0x00004000 | ||
273 | u32 flags; | 272 | u32 flags; |
274 | unsigned int file_version; | 273 | unsigned int file_version; |
275 | size_t iv_bytes; | 274 | size_t iv_bytes; |
@@ -297,7 +296,6 @@ struct ecryptfs_inode_info { | |||
297 | struct inode vfs_inode; | 296 | struct inode vfs_inode; |
298 | struct inode *wii_inode; | 297 | struct inode *wii_inode; |
299 | struct file *lower_file; | 298 | struct file *lower_file; |
300 | struct mutex lower_file_mutex; | ||
301 | struct ecryptfs_crypt_stat crypt_stat; | 299 | struct ecryptfs_crypt_stat crypt_stat; |
302 | }; | 300 | }; |
303 | 301 | ||
@@ -333,7 +331,6 @@ struct ecryptfs_global_auth_tok { | |||
333 | u32 flags; | 331 | u32 flags; |
334 | struct list_head mount_crypt_stat_list; | 332 | struct list_head mount_crypt_stat_list; |
335 | struct key *global_auth_tok_key; | 333 | struct key *global_auth_tok_key; |
336 | struct ecryptfs_auth_tok *global_auth_tok; | ||
337 | unsigned char sig[ECRYPTFS_SIG_SIZE_HEX + 1]; | 334 | unsigned char sig[ECRYPTFS_SIG_SIZE_HEX + 1]; |
338 | }; | 335 | }; |
339 | 336 | ||
@@ -380,7 +377,6 @@ struct ecryptfs_mount_crypt_stat { | |||
380 | u32 flags; | 377 | u32 flags; |
381 | struct list_head global_auth_tok_list; | 378 | struct list_head global_auth_tok_list; |
382 | struct mutex global_auth_tok_list_mutex; | 379 | struct mutex global_auth_tok_list_mutex; |
383 | size_t num_global_auth_toks; | ||
384 | size_t global_default_cipher_key_size; | 380 | size_t global_default_cipher_key_size; |
385 | size_t global_default_fn_cipher_key_bytes; | 381 | size_t global_default_fn_cipher_key_bytes; |
386 | unsigned char global_default_cipher_name[ECRYPTFS_MAX_CIPHER_NAME_SIZE | 382 | unsigned char global_default_cipher_name[ECRYPTFS_MAX_CIPHER_NAME_SIZE |
diff --git a/fs/ecryptfs/file.c b/fs/ecryptfs/file.c index 7d1050e254f9..cedc913d11ba 100644 --- a/fs/ecryptfs/file.c +++ b/fs/ecryptfs/file.c | |||
@@ -273,7 +273,14 @@ static int ecryptfs_release(struct inode *inode, struct file *file) | |||
273 | static int | 273 | static int |
274 | ecryptfs_fsync(struct file *file, int datasync) | 274 | ecryptfs_fsync(struct file *file, int datasync) |
275 | { | 275 | { |
276 | return vfs_fsync(ecryptfs_file_to_lower(file), datasync); | 276 | int rc = 0; |
277 | |||
278 | rc = generic_file_fsync(file, datasync); | ||
279 | if (rc) | ||
280 | goto out; | ||
281 | rc = vfs_fsync(ecryptfs_file_to_lower(file), datasync); | ||
282 | out: | ||
283 | return rc; | ||
277 | } | 284 | } |
278 | 285 | ||
279 | static int ecryptfs_fasync(int fd, struct file *file, int flag) | 286 | static int ecryptfs_fasync(int fd, struct file *file, int flag) |
diff --git a/fs/ecryptfs/inode.c b/fs/ecryptfs/inode.c index b592938a84bc..f99051b7adab 100644 --- a/fs/ecryptfs/inode.c +++ b/fs/ecryptfs/inode.c | |||
@@ -143,26 +143,6 @@ out: | |||
143 | } | 143 | } |
144 | 144 | ||
145 | /** | 145 | /** |
146 | * grow_file | ||
147 | * @ecryptfs_dentry: the eCryptfs dentry | ||
148 | * | ||
149 | * This is the code which will grow the file to its correct size. | ||
150 | */ | ||
151 | static int grow_file(struct dentry *ecryptfs_dentry) | ||
152 | { | ||
153 | struct inode *ecryptfs_inode = ecryptfs_dentry->d_inode; | ||
154 | char zero_virt[] = { 0x00 }; | ||
155 | int rc = 0; | ||
156 | |||
157 | rc = ecryptfs_write(ecryptfs_inode, zero_virt, 0, 1); | ||
158 | i_size_write(ecryptfs_inode, 0); | ||
159 | rc = ecryptfs_write_inode_size_to_metadata(ecryptfs_inode); | ||
160 | ecryptfs_inode_to_private(ecryptfs_inode)->crypt_stat.flags |= | ||
161 | ECRYPTFS_NEW_FILE; | ||
162 | return rc; | ||
163 | } | ||
164 | |||
165 | /** | ||
166 | * ecryptfs_initialize_file | 146 | * ecryptfs_initialize_file |
167 | * | 147 | * |
168 | * Cause the file to be changed from a basic empty file to an ecryptfs | 148 | * Cause the file to be changed from a basic empty file to an ecryptfs |
@@ -181,7 +161,6 @@ static int ecryptfs_initialize_file(struct dentry *ecryptfs_dentry) | |||
181 | crypt_stat->flags &= ~(ECRYPTFS_ENCRYPTED); | 161 | crypt_stat->flags &= ~(ECRYPTFS_ENCRYPTED); |
182 | goto out; | 162 | goto out; |
183 | } | 163 | } |
184 | crypt_stat->flags |= ECRYPTFS_NEW_FILE; | ||
185 | ecryptfs_printk(KERN_DEBUG, "Initializing crypto context\n"); | 164 | ecryptfs_printk(KERN_DEBUG, "Initializing crypto context\n"); |
186 | rc = ecryptfs_new_file_context(ecryptfs_dentry); | 165 | rc = ecryptfs_new_file_context(ecryptfs_dentry); |
187 | if (rc) { | 166 | if (rc) { |
@@ -202,9 +181,6 @@ static int ecryptfs_initialize_file(struct dentry *ecryptfs_dentry) | |||
202 | printk(KERN_ERR "Error writing headers; rc = [%d]\n", rc); | 181 | printk(KERN_ERR "Error writing headers; rc = [%d]\n", rc); |
203 | goto out; | 182 | goto out; |
204 | } | 183 | } |
205 | rc = grow_file(ecryptfs_dentry); | ||
206 | if (rc) | ||
207 | printk(KERN_ERR "Error growing file; rc = [%d]\n", rc); | ||
208 | out: | 184 | out: |
209 | return rc; | 185 | return rc; |
210 | } | 186 | } |
diff --git a/fs/ecryptfs/keystore.c b/fs/ecryptfs/keystore.c index c1436cff6f2d..03e609c45012 100644 --- a/fs/ecryptfs/keystore.c +++ b/fs/ecryptfs/keystore.c | |||
@@ -65,6 +65,24 @@ static int process_request_key_err(long err_code) | |||
65 | return rc; | 65 | return rc; |
66 | } | 66 | } |
67 | 67 | ||
68 | static int process_find_global_auth_tok_for_sig_err(int err_code) | ||
69 | { | ||
70 | int rc = err_code; | ||
71 | |||
72 | switch (err_code) { | ||
73 | case -ENOENT: | ||
74 | ecryptfs_printk(KERN_WARNING, "Missing auth tok\n"); | ||
75 | break; | ||
76 | case -EINVAL: | ||
77 | ecryptfs_printk(KERN_WARNING, "Invalid auth tok\n"); | ||
78 | break; | ||
79 | default: | ||
80 | rc = process_request_key_err(err_code); | ||
81 | break; | ||
82 | } | ||
83 | return rc; | ||
84 | } | ||
85 | |||
68 | /** | 86 | /** |
69 | * ecryptfs_parse_packet_length | 87 | * ecryptfs_parse_packet_length |
70 | * @data: Pointer to memory containing length at offset | 88 | * @data: Pointer to memory containing length at offset |
@@ -403,27 +421,120 @@ out: | |||
403 | return rc; | 421 | return rc; |
404 | } | 422 | } |
405 | 423 | ||
424 | /** | ||
425 | * ecryptfs_verify_version | ||
426 | * @version: The version number to confirm | ||
427 | * | ||
428 | * Returns zero on good version; non-zero otherwise | ||
429 | */ | ||
430 | static int ecryptfs_verify_version(u16 version) | ||
431 | { | ||
432 | int rc = 0; | ||
433 | unsigned char major; | ||
434 | unsigned char minor; | ||
435 | |||
436 | major = ((version >> 8) & 0xFF); | ||
437 | minor = (version & 0xFF); | ||
438 | if (major != ECRYPTFS_VERSION_MAJOR) { | ||
439 | ecryptfs_printk(KERN_ERR, "Major version number mismatch. " | ||
440 | "Expected [%d]; got [%d]\n", | ||
441 | ECRYPTFS_VERSION_MAJOR, major); | ||
442 | rc = -EINVAL; | ||
443 | goto out; | ||
444 | } | ||
445 | if (minor != ECRYPTFS_VERSION_MINOR) { | ||
446 | ecryptfs_printk(KERN_ERR, "Minor version number mismatch. " | ||
447 | "Expected [%d]; got [%d]\n", | ||
448 | ECRYPTFS_VERSION_MINOR, minor); | ||
449 | rc = -EINVAL; | ||
450 | goto out; | ||
451 | } | ||
452 | out: | ||
453 | return rc; | ||
454 | } | ||
455 | |||
456 | /** | ||
457 | * ecryptfs_verify_auth_tok_from_key | ||
458 | * @auth_tok_key: key containing the authentication token | ||
459 | * @auth_tok: authentication token | ||
460 | * | ||
461 | * Returns zero on valid auth tok; -EINVAL otherwise | ||
462 | */ | ||
463 | static int | ||
464 | ecryptfs_verify_auth_tok_from_key(struct key *auth_tok_key, | ||
465 | struct ecryptfs_auth_tok **auth_tok) | ||
466 | { | ||
467 | int rc = 0; | ||
468 | |||
469 | (*auth_tok) = ecryptfs_get_key_payload_data(auth_tok_key); | ||
470 | if (ecryptfs_verify_version((*auth_tok)->version)) { | ||
471 | printk(KERN_ERR "Data structure version mismatch. Userspace " | ||
472 | "tools must match eCryptfs kernel module with major " | ||
473 | "version [%d] and minor version [%d]\n", | ||
474 | ECRYPTFS_VERSION_MAJOR, ECRYPTFS_VERSION_MINOR); | ||
475 | rc = -EINVAL; | ||
476 | goto out; | ||
477 | } | ||
478 | if ((*auth_tok)->token_type != ECRYPTFS_PASSWORD | ||
479 | && (*auth_tok)->token_type != ECRYPTFS_PRIVATE_KEY) { | ||
480 | printk(KERN_ERR "Invalid auth_tok structure " | ||
481 | "returned from key query\n"); | ||
482 | rc = -EINVAL; | ||
483 | goto out; | ||
484 | } | ||
485 | out: | ||
486 | return rc; | ||
487 | } | ||
488 | |||
406 | static int | 489 | static int |
407 | ecryptfs_find_global_auth_tok_for_sig( | 490 | ecryptfs_find_global_auth_tok_for_sig( |
408 | struct ecryptfs_global_auth_tok **global_auth_tok, | 491 | struct key **auth_tok_key, |
492 | struct ecryptfs_auth_tok **auth_tok, | ||
409 | struct ecryptfs_mount_crypt_stat *mount_crypt_stat, char *sig) | 493 | struct ecryptfs_mount_crypt_stat *mount_crypt_stat, char *sig) |
410 | { | 494 | { |
411 | struct ecryptfs_global_auth_tok *walker; | 495 | struct ecryptfs_global_auth_tok *walker; |
412 | int rc = 0; | 496 | int rc = 0; |
413 | 497 | ||
414 | (*global_auth_tok) = NULL; | 498 | (*auth_tok_key) = NULL; |
499 | (*auth_tok) = NULL; | ||
415 | mutex_lock(&mount_crypt_stat->global_auth_tok_list_mutex); | 500 | mutex_lock(&mount_crypt_stat->global_auth_tok_list_mutex); |
416 | list_for_each_entry(walker, | 501 | list_for_each_entry(walker, |
417 | &mount_crypt_stat->global_auth_tok_list, | 502 | &mount_crypt_stat->global_auth_tok_list, |
418 | mount_crypt_stat_list) { | 503 | mount_crypt_stat_list) { |
419 | if (memcmp(walker->sig, sig, ECRYPTFS_SIG_SIZE_HEX) == 0) { | 504 | if (memcmp(walker->sig, sig, ECRYPTFS_SIG_SIZE_HEX)) |
420 | rc = key_validate(walker->global_auth_tok_key); | 505 | continue; |
421 | if (!rc) | 506 | |
422 | (*global_auth_tok) = walker; | 507 | if (walker->flags & ECRYPTFS_AUTH_TOK_INVALID) { |
508 | rc = -EINVAL; | ||
423 | goto out; | 509 | goto out; |
424 | } | 510 | } |
511 | |||
512 | rc = key_validate(walker->global_auth_tok_key); | ||
513 | if (rc) { | ||
514 | if (rc == -EKEYEXPIRED) | ||
515 | goto out; | ||
516 | goto out_invalid_auth_tok; | ||
517 | } | ||
518 | |||
519 | down_write(&(walker->global_auth_tok_key->sem)); | ||
520 | rc = ecryptfs_verify_auth_tok_from_key( | ||
521 | walker->global_auth_tok_key, auth_tok); | ||
522 | if (rc) | ||
523 | goto out_invalid_auth_tok_unlock; | ||
524 | |||
525 | (*auth_tok_key) = walker->global_auth_tok_key; | ||
526 | key_get(*auth_tok_key); | ||
527 | goto out; | ||
425 | } | 528 | } |
426 | rc = -EINVAL; | 529 | rc = -ENOENT; |
530 | goto out; | ||
531 | out_invalid_auth_tok_unlock: | ||
532 | up_write(&(walker->global_auth_tok_key->sem)); | ||
533 | out_invalid_auth_tok: | ||
534 | printk(KERN_WARNING "Invalidating auth tok with sig = [%s]\n", sig); | ||
535 | walker->flags |= ECRYPTFS_AUTH_TOK_INVALID; | ||
536 | key_put(walker->global_auth_tok_key); | ||
537 | walker->global_auth_tok_key = NULL; | ||
427 | out: | 538 | out: |
428 | mutex_unlock(&mount_crypt_stat->global_auth_tok_list_mutex); | 539 | mutex_unlock(&mount_crypt_stat->global_auth_tok_list_mutex); |
429 | return rc; | 540 | return rc; |
@@ -451,14 +562,11 @@ ecryptfs_find_auth_tok_for_sig( | |||
451 | struct ecryptfs_mount_crypt_stat *mount_crypt_stat, | 562 | struct ecryptfs_mount_crypt_stat *mount_crypt_stat, |
452 | char *sig) | 563 | char *sig) |
453 | { | 564 | { |
454 | struct ecryptfs_global_auth_tok *global_auth_tok; | ||
455 | int rc = 0; | 565 | int rc = 0; |
456 | 566 | ||
457 | (*auth_tok_key) = NULL; | 567 | rc = ecryptfs_find_global_auth_tok_for_sig(auth_tok_key, auth_tok, |
458 | (*auth_tok) = NULL; | 568 | mount_crypt_stat, sig); |
459 | if (ecryptfs_find_global_auth_tok_for_sig(&global_auth_tok, | 569 | if (rc == -ENOENT) { |
460 | mount_crypt_stat, sig)) { | ||
461 | |||
462 | /* if the flag ECRYPTFS_GLOBAL_MOUNT_AUTH_TOK_ONLY is set in the | 570 | /* if the flag ECRYPTFS_GLOBAL_MOUNT_AUTH_TOK_ONLY is set in the |
463 | * mount_crypt_stat structure, we prevent to use auth toks that | 571 | * mount_crypt_stat structure, we prevent to use auth toks that |
464 | * are not inserted through the ecryptfs_add_global_auth_tok | 572 | * are not inserted through the ecryptfs_add_global_auth_tok |
@@ -470,8 +578,7 @@ ecryptfs_find_auth_tok_for_sig( | |||
470 | 578 | ||
471 | rc = ecryptfs_keyring_auth_tok_for_sig(auth_tok_key, auth_tok, | 579 | rc = ecryptfs_keyring_auth_tok_for_sig(auth_tok_key, auth_tok, |
472 | sig); | 580 | sig); |
473 | } else | 581 | } |
474 | (*auth_tok) = global_auth_tok->global_auth_tok; | ||
475 | return rc; | 582 | return rc; |
476 | } | 583 | } |
477 | 584 | ||
@@ -531,6 +638,16 @@ ecryptfs_write_tag_70_packet(char *dest, size_t *remaining_bytes, | |||
531 | } | 638 | } |
532 | s->desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP; | 639 | s->desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP; |
533 | (*packet_size) = 0; | 640 | (*packet_size) = 0; |
641 | rc = ecryptfs_find_auth_tok_for_sig( | ||
642 | &auth_tok_key, | ||
643 | &s->auth_tok, mount_crypt_stat, | ||
644 | mount_crypt_stat->global_default_fnek_sig); | ||
645 | if (rc) { | ||
646 | printk(KERN_ERR "%s: Error attempting to find auth tok for " | ||
647 | "fnek sig [%s]; rc = [%d]\n", __func__, | ||
648 | mount_crypt_stat->global_default_fnek_sig, rc); | ||
649 | goto out; | ||
650 | } | ||
534 | rc = ecryptfs_get_tfm_and_mutex_for_cipher_name( | 651 | rc = ecryptfs_get_tfm_and_mutex_for_cipher_name( |
535 | &s->desc.tfm, | 652 | &s->desc.tfm, |
536 | &s->tfm_mutex, mount_crypt_stat->global_default_fn_cipher_name); | 653 | &s->tfm_mutex, mount_crypt_stat->global_default_fn_cipher_name); |
@@ -616,16 +733,6 @@ ecryptfs_write_tag_70_packet(char *dest, size_t *remaining_bytes, | |||
616 | goto out_free_unlock; | 733 | goto out_free_unlock; |
617 | } | 734 | } |
618 | dest[s->i++] = s->cipher_code; | 735 | dest[s->i++] = s->cipher_code; |
619 | rc = ecryptfs_find_auth_tok_for_sig( | ||
620 | &auth_tok_key, | ||
621 | &s->auth_tok, mount_crypt_stat, | ||
622 | mount_crypt_stat->global_default_fnek_sig); | ||
623 | if (rc) { | ||
624 | printk(KERN_ERR "%s: Error attempting to find auth tok for " | ||
625 | "fnek sig [%s]; rc = [%d]\n", __func__, | ||
626 | mount_crypt_stat->global_default_fnek_sig, rc); | ||
627 | goto out_free_unlock; | ||
628 | } | ||
629 | /* TODO: Support other key modules than passphrase for | 736 | /* TODO: Support other key modules than passphrase for |
630 | * filename encryption */ | 737 | * filename encryption */ |
631 | if (s->auth_tok->token_type != ECRYPTFS_PASSWORD) { | 738 | if (s->auth_tok->token_type != ECRYPTFS_PASSWORD) { |
@@ -765,8 +872,10 @@ out_free_unlock: | |||
765 | out_unlock: | 872 | out_unlock: |
766 | mutex_unlock(s->tfm_mutex); | 873 | mutex_unlock(s->tfm_mutex); |
767 | out: | 874 | out: |
768 | if (auth_tok_key) | 875 | if (auth_tok_key) { |
876 | up_write(&(auth_tok_key->sem)); | ||
769 | key_put(auth_tok_key); | 877 | key_put(auth_tok_key); |
878 | } | ||
770 | kfree(s); | 879 | kfree(s); |
771 | return rc; | 880 | return rc; |
772 | } | 881 | } |
@@ -879,6 +988,15 @@ ecryptfs_parse_tag_70_packet(char **filename, size_t *filename_size, | |||
879 | __func__, s->cipher_code); | 988 | __func__, s->cipher_code); |
880 | goto out; | 989 | goto out; |
881 | } | 990 | } |
991 | rc = ecryptfs_find_auth_tok_for_sig(&auth_tok_key, | ||
992 | &s->auth_tok, mount_crypt_stat, | ||
993 | s->fnek_sig_hex); | ||
994 | if (rc) { | ||
995 | printk(KERN_ERR "%s: Error attempting to find auth tok for " | ||
996 | "fnek sig [%s]; rc = [%d]\n", __func__, s->fnek_sig_hex, | ||
997 | rc); | ||
998 | goto out; | ||
999 | } | ||
882 | rc = ecryptfs_get_tfm_and_mutex_for_cipher_name(&s->desc.tfm, | 1000 | rc = ecryptfs_get_tfm_and_mutex_for_cipher_name(&s->desc.tfm, |
883 | &s->tfm_mutex, | 1001 | &s->tfm_mutex, |
884 | s->cipher_string); | 1002 | s->cipher_string); |
@@ -925,15 +1043,6 @@ ecryptfs_parse_tag_70_packet(char **filename, size_t *filename_size, | |||
925 | * >= ECRYPTFS_MAX_IV_BYTES. */ | 1043 | * >= ECRYPTFS_MAX_IV_BYTES. */ |
926 | memset(s->iv, 0, ECRYPTFS_MAX_IV_BYTES); | 1044 | memset(s->iv, 0, ECRYPTFS_MAX_IV_BYTES); |
927 | s->desc.info = s->iv; | 1045 | s->desc.info = s->iv; |
928 | rc = ecryptfs_find_auth_tok_for_sig(&auth_tok_key, | ||
929 | &s->auth_tok, mount_crypt_stat, | ||
930 | s->fnek_sig_hex); | ||
931 | if (rc) { | ||
932 | printk(KERN_ERR "%s: Error attempting to find auth tok for " | ||
933 | "fnek sig [%s]; rc = [%d]\n", __func__, s->fnek_sig_hex, | ||
934 | rc); | ||
935 | goto out_free_unlock; | ||
936 | } | ||
937 | /* TODO: Support other key modules than passphrase for | 1046 | /* TODO: Support other key modules than passphrase for |
938 | * filename encryption */ | 1047 | * filename encryption */ |
939 | if (s->auth_tok->token_type != ECRYPTFS_PASSWORD) { | 1048 | if (s->auth_tok->token_type != ECRYPTFS_PASSWORD) { |
@@ -1002,8 +1111,10 @@ out: | |||
1002 | (*filename_size) = 0; | 1111 | (*filename_size) = 0; |
1003 | (*filename) = NULL; | 1112 | (*filename) = NULL; |
1004 | } | 1113 | } |
1005 | if (auth_tok_key) | 1114 | if (auth_tok_key) { |
1115 | up_write(&(auth_tok_key->sem)); | ||
1006 | key_put(auth_tok_key); | 1116 | key_put(auth_tok_key); |
1117 | } | ||
1007 | kfree(s); | 1118 | kfree(s); |
1008 | return rc; | 1119 | return rc; |
1009 | } | 1120 | } |
@@ -1520,38 +1631,6 @@ out: | |||
1520 | return rc; | 1631 | return rc; |
1521 | } | 1632 | } |
1522 | 1633 | ||
1523 | /** | ||
1524 | * ecryptfs_verify_version | ||
1525 | * @version: The version number to confirm | ||
1526 | * | ||
1527 | * Returns zero on good version; non-zero otherwise | ||
1528 | */ | ||
1529 | static int ecryptfs_verify_version(u16 version) | ||
1530 | { | ||
1531 | int rc = 0; | ||
1532 | unsigned char major; | ||
1533 | unsigned char minor; | ||
1534 | |||
1535 | major = ((version >> 8) & 0xFF); | ||
1536 | minor = (version & 0xFF); | ||
1537 | if (major != ECRYPTFS_VERSION_MAJOR) { | ||
1538 | ecryptfs_printk(KERN_ERR, "Major version number mismatch. " | ||
1539 | "Expected [%d]; got [%d]\n", | ||
1540 | ECRYPTFS_VERSION_MAJOR, major); | ||
1541 | rc = -EINVAL; | ||
1542 | goto out; | ||
1543 | } | ||
1544 | if (minor != ECRYPTFS_VERSION_MINOR) { | ||
1545 | ecryptfs_printk(KERN_ERR, "Minor version number mismatch. " | ||
1546 | "Expected [%d]; got [%d]\n", | ||
1547 | ECRYPTFS_VERSION_MINOR, minor); | ||
1548 | rc = -EINVAL; | ||
1549 | goto out; | ||
1550 | } | ||
1551 | out: | ||
1552 | return rc; | ||
1553 | } | ||
1554 | |||
1555 | int ecryptfs_keyring_auth_tok_for_sig(struct key **auth_tok_key, | 1634 | int ecryptfs_keyring_auth_tok_for_sig(struct key **auth_tok_key, |
1556 | struct ecryptfs_auth_tok **auth_tok, | 1635 | struct ecryptfs_auth_tok **auth_tok, |
1557 | char *sig) | 1636 | char *sig) |
@@ -1563,31 +1642,16 @@ int ecryptfs_keyring_auth_tok_for_sig(struct key **auth_tok_key, | |||
1563 | printk(KERN_ERR "Could not find key with description: [%s]\n", | 1642 | printk(KERN_ERR "Could not find key with description: [%s]\n", |
1564 | sig); | 1643 | sig); |
1565 | rc = process_request_key_err(PTR_ERR(*auth_tok_key)); | 1644 | rc = process_request_key_err(PTR_ERR(*auth_tok_key)); |
1645 | (*auth_tok_key) = NULL; | ||
1566 | goto out; | 1646 | goto out; |
1567 | } | 1647 | } |
1568 | (*auth_tok) = ecryptfs_get_key_payload_data(*auth_tok_key); | 1648 | down_write(&(*auth_tok_key)->sem); |
1569 | if (ecryptfs_verify_version((*auth_tok)->version)) { | 1649 | rc = ecryptfs_verify_auth_tok_from_key(*auth_tok_key, auth_tok); |
1570 | printk(KERN_ERR | ||
1571 | "Data structure version mismatch. " | ||
1572 | "Userspace tools must match eCryptfs " | ||
1573 | "kernel module with major version [%d] " | ||
1574 | "and minor version [%d]\n", | ||
1575 | ECRYPTFS_VERSION_MAJOR, | ||
1576 | ECRYPTFS_VERSION_MINOR); | ||
1577 | rc = -EINVAL; | ||
1578 | goto out_release_key; | ||
1579 | } | ||
1580 | if ((*auth_tok)->token_type != ECRYPTFS_PASSWORD | ||
1581 | && (*auth_tok)->token_type != ECRYPTFS_PRIVATE_KEY) { | ||
1582 | printk(KERN_ERR "Invalid auth_tok structure " | ||
1583 | "returned from key query\n"); | ||
1584 | rc = -EINVAL; | ||
1585 | goto out_release_key; | ||
1586 | } | ||
1587 | out_release_key: | ||
1588 | if (rc) { | 1650 | if (rc) { |
1651 | up_write(&(*auth_tok_key)->sem); | ||
1589 | key_put(*auth_tok_key); | 1652 | key_put(*auth_tok_key); |
1590 | (*auth_tok_key) = NULL; | 1653 | (*auth_tok_key) = NULL; |
1654 | goto out; | ||
1591 | } | 1655 | } |
1592 | out: | 1656 | out: |
1593 | return rc; | 1657 | return rc; |
@@ -1809,6 +1873,7 @@ int ecryptfs_parse_packet_set(struct ecryptfs_crypt_stat *crypt_stat, | |||
1809 | find_next_matching_auth_tok: | 1873 | find_next_matching_auth_tok: |
1810 | found_auth_tok = 0; | 1874 | found_auth_tok = 0; |
1811 | if (auth_tok_key) { | 1875 | if (auth_tok_key) { |
1876 | up_write(&(auth_tok_key->sem)); | ||
1812 | key_put(auth_tok_key); | 1877 | key_put(auth_tok_key); |
1813 | auth_tok_key = NULL; | 1878 | auth_tok_key = NULL; |
1814 | } | 1879 | } |
@@ -1895,8 +1960,10 @@ found_matching_auth_tok: | |||
1895 | out_wipe_list: | 1960 | out_wipe_list: |
1896 | wipe_auth_tok_list(&auth_tok_list); | 1961 | wipe_auth_tok_list(&auth_tok_list); |
1897 | out: | 1962 | out: |
1898 | if (auth_tok_key) | 1963 | if (auth_tok_key) { |
1964 | up_write(&(auth_tok_key->sem)); | ||
1899 | key_put(auth_tok_key); | 1965 | key_put(auth_tok_key); |
1966 | } | ||
1900 | return rc; | 1967 | return rc; |
1901 | } | 1968 | } |
1902 | 1969 | ||
@@ -2324,7 +2391,7 @@ ecryptfs_generate_key_packet_set(char *dest_base, | |||
2324 | size_t max) | 2391 | size_t max) |
2325 | { | 2392 | { |
2326 | struct ecryptfs_auth_tok *auth_tok; | 2393 | struct ecryptfs_auth_tok *auth_tok; |
2327 | struct ecryptfs_global_auth_tok *global_auth_tok; | 2394 | struct key *auth_tok_key = NULL; |
2328 | struct ecryptfs_mount_crypt_stat *mount_crypt_stat = | 2395 | struct ecryptfs_mount_crypt_stat *mount_crypt_stat = |
2329 | &ecryptfs_superblock_to_private( | 2396 | &ecryptfs_superblock_to_private( |
2330 | ecryptfs_dentry->d_sb)->mount_crypt_stat; | 2397 | ecryptfs_dentry->d_sb)->mount_crypt_stat; |
@@ -2343,21 +2410,16 @@ ecryptfs_generate_key_packet_set(char *dest_base, | |||
2343 | list_for_each_entry(key_sig, &crypt_stat->keysig_list, | 2410 | list_for_each_entry(key_sig, &crypt_stat->keysig_list, |
2344 | crypt_stat_list) { | 2411 | crypt_stat_list) { |
2345 | memset(key_rec, 0, sizeof(*key_rec)); | 2412 | memset(key_rec, 0, sizeof(*key_rec)); |
2346 | rc = ecryptfs_find_global_auth_tok_for_sig(&global_auth_tok, | 2413 | rc = ecryptfs_find_global_auth_tok_for_sig(&auth_tok_key, |
2414 | &auth_tok, | ||
2347 | mount_crypt_stat, | 2415 | mount_crypt_stat, |
2348 | key_sig->keysig); | 2416 | key_sig->keysig); |
2349 | if (rc) { | 2417 | if (rc) { |
2350 | printk(KERN_ERR "Error attempting to get the global " | 2418 | printk(KERN_WARNING "Unable to retrieve auth tok with " |
2351 | "auth_tok; rc = [%d]\n", rc); | 2419 | "sig = [%s]\n", key_sig->keysig); |
2420 | rc = process_find_global_auth_tok_for_sig_err(rc); | ||
2352 | goto out_free; | 2421 | goto out_free; |
2353 | } | 2422 | } |
2354 | if (global_auth_tok->flags & ECRYPTFS_AUTH_TOK_INVALID) { | ||
2355 | printk(KERN_WARNING | ||
2356 | "Skipping invalid auth tok with sig = [%s]\n", | ||
2357 | global_auth_tok->sig); | ||
2358 | continue; | ||
2359 | } | ||
2360 | auth_tok = global_auth_tok->global_auth_tok; | ||
2361 | if (auth_tok->token_type == ECRYPTFS_PASSWORD) { | 2423 | if (auth_tok->token_type == ECRYPTFS_PASSWORD) { |
2362 | rc = write_tag_3_packet((dest_base + (*len)), | 2424 | rc = write_tag_3_packet((dest_base + (*len)), |
2363 | &max, auth_tok, | 2425 | &max, auth_tok, |
@@ -2395,6 +2457,9 @@ ecryptfs_generate_key_packet_set(char *dest_base, | |||
2395 | rc = -EINVAL; | 2457 | rc = -EINVAL; |
2396 | goto out_free; | 2458 | goto out_free; |
2397 | } | 2459 | } |
2460 | up_write(&(auth_tok_key->sem)); | ||
2461 | key_put(auth_tok_key); | ||
2462 | auth_tok_key = NULL; | ||
2398 | } | 2463 | } |
2399 | if (likely(max > 0)) { | 2464 | if (likely(max > 0)) { |
2400 | dest_base[(*len)] = 0x00; | 2465 | dest_base[(*len)] = 0x00; |
@@ -2407,6 +2472,11 @@ out_free: | |||
2407 | out: | 2472 | out: |
2408 | if (rc) | 2473 | if (rc) |
2409 | (*len) = 0; | 2474 | (*len) = 0; |
2475 | if (auth_tok_key) { | ||
2476 | up_write(&(auth_tok_key->sem)); | ||
2477 | key_put(auth_tok_key); | ||
2478 | } | ||
2479 | |||
2410 | mutex_unlock(&crypt_stat->keysig_list_mutex); | 2480 | mutex_unlock(&crypt_stat->keysig_list_mutex); |
2411 | return rc; | 2481 | return rc; |
2412 | } | 2482 | } |
@@ -2424,6 +2494,7 @@ int ecryptfs_add_keysig(struct ecryptfs_crypt_stat *crypt_stat, char *sig) | |||
2424 | return -ENOMEM; | 2494 | return -ENOMEM; |
2425 | } | 2495 | } |
2426 | memcpy(new_key_sig->keysig, sig, ECRYPTFS_SIG_SIZE_HEX); | 2496 | memcpy(new_key_sig->keysig, sig, ECRYPTFS_SIG_SIZE_HEX); |
2497 | new_key_sig->keysig[ECRYPTFS_SIG_SIZE_HEX] = '\0'; | ||
2427 | /* Caller must hold keysig_list_mutex */ | 2498 | /* Caller must hold keysig_list_mutex */ |
2428 | list_add(&new_key_sig->crypt_stat_list, &crypt_stat->keysig_list); | 2499 | list_add(&new_key_sig->crypt_stat_list, &crypt_stat->keysig_list); |
2429 | 2500 | ||
@@ -2453,7 +2524,6 @@ ecryptfs_add_global_auth_tok(struct ecryptfs_mount_crypt_stat *mount_crypt_stat, | |||
2453 | mutex_lock(&mount_crypt_stat->global_auth_tok_list_mutex); | 2524 | mutex_lock(&mount_crypt_stat->global_auth_tok_list_mutex); |
2454 | list_add(&new_auth_tok->mount_crypt_stat_list, | 2525 | list_add(&new_auth_tok->mount_crypt_stat_list, |
2455 | &mount_crypt_stat->global_auth_tok_list); | 2526 | &mount_crypt_stat->global_auth_tok_list); |
2456 | mount_crypt_stat->num_global_auth_toks++; | ||
2457 | mutex_unlock(&mount_crypt_stat->global_auth_tok_list_mutex); | 2527 | mutex_unlock(&mount_crypt_stat->global_auth_tok_list_mutex); |
2458 | out: | 2528 | out: |
2459 | return rc; | 2529 | return rc; |
diff --git a/fs/ecryptfs/main.c b/fs/ecryptfs/main.c index 758323a0f09a..c27c0ecf90bc 100644 --- a/fs/ecryptfs/main.c +++ b/fs/ecryptfs/main.c | |||
@@ -122,7 +122,6 @@ int ecryptfs_init_persistent_file(struct dentry *ecryptfs_dentry) | |||
122 | ecryptfs_inode_to_private(ecryptfs_dentry->d_inode); | 122 | ecryptfs_inode_to_private(ecryptfs_dentry->d_inode); |
123 | int rc = 0; | 123 | int rc = 0; |
124 | 124 | ||
125 | mutex_lock(&inode_info->lower_file_mutex); | ||
126 | if (!inode_info->lower_file) { | 125 | if (!inode_info->lower_file) { |
127 | struct dentry *lower_dentry; | 126 | struct dentry *lower_dentry; |
128 | struct vfsmount *lower_mnt = | 127 | struct vfsmount *lower_mnt = |
@@ -138,7 +137,6 @@ int ecryptfs_init_persistent_file(struct dentry *ecryptfs_dentry) | |||
138 | inode_info->lower_file = NULL; | 137 | inode_info->lower_file = NULL; |
139 | } | 138 | } |
140 | } | 139 | } |
141 | mutex_unlock(&inode_info->lower_file_mutex); | ||
142 | return rc; | 140 | return rc; |
143 | } | 141 | } |
144 | 142 | ||
@@ -241,14 +239,14 @@ static int ecryptfs_init_global_auth_toks( | |||
241 | struct ecryptfs_mount_crypt_stat *mount_crypt_stat) | 239 | struct ecryptfs_mount_crypt_stat *mount_crypt_stat) |
242 | { | 240 | { |
243 | struct ecryptfs_global_auth_tok *global_auth_tok; | 241 | struct ecryptfs_global_auth_tok *global_auth_tok; |
242 | struct ecryptfs_auth_tok *auth_tok; | ||
244 | int rc = 0; | 243 | int rc = 0; |
245 | 244 | ||
246 | list_for_each_entry(global_auth_tok, | 245 | list_for_each_entry(global_auth_tok, |
247 | &mount_crypt_stat->global_auth_tok_list, | 246 | &mount_crypt_stat->global_auth_tok_list, |
248 | mount_crypt_stat_list) { | 247 | mount_crypt_stat_list) { |
249 | rc = ecryptfs_keyring_auth_tok_for_sig( | 248 | rc = ecryptfs_keyring_auth_tok_for_sig( |
250 | &global_auth_tok->global_auth_tok_key, | 249 | &global_auth_tok->global_auth_tok_key, &auth_tok, |
251 | &global_auth_tok->global_auth_tok, | ||
252 | global_auth_tok->sig); | 250 | global_auth_tok->sig); |
253 | if (rc) { | 251 | if (rc) { |
254 | printk(KERN_ERR "Could not find valid key in user " | 252 | printk(KERN_ERR "Could not find valid key in user " |
@@ -256,8 +254,10 @@ static int ecryptfs_init_global_auth_toks( | |||
256 | "option: [%s]\n", global_auth_tok->sig); | 254 | "option: [%s]\n", global_auth_tok->sig); |
257 | global_auth_tok->flags |= ECRYPTFS_AUTH_TOK_INVALID; | 255 | global_auth_tok->flags |= ECRYPTFS_AUTH_TOK_INVALID; |
258 | goto out; | 256 | goto out; |
259 | } else | 257 | } else { |
260 | global_auth_tok->flags &= ~ECRYPTFS_AUTH_TOK_INVALID; | 258 | global_auth_tok->flags &= ~ECRYPTFS_AUTH_TOK_INVALID; |
259 | up_write(&(global_auth_tok->global_auth_tok_key)->sem); | ||
260 | } | ||
261 | } | 261 | } |
262 | out: | 262 | out: |
263 | return rc; | 263 | return rc; |
diff --git a/fs/ecryptfs/mmap.c b/fs/ecryptfs/mmap.c index cc64fca89f8d..6a44148c5fb9 100644 --- a/fs/ecryptfs/mmap.c +++ b/fs/ecryptfs/mmap.c | |||
@@ -62,6 +62,18 @@ static int ecryptfs_writepage(struct page *page, struct writeback_control *wbc) | |||
62 | { | 62 | { |
63 | int rc; | 63 | int rc; |
64 | 64 | ||
65 | /* | ||
66 | * Refuse to write the page out if we are called from reclaim context | ||
67 | * since our writepage() path may potentially allocate memory when | ||
68 | * calling into the lower fs vfs_write() which may in turn invoke | ||
69 | * us again. | ||
70 | */ | ||
71 | if (current->flags & PF_MEMALLOC) { | ||
72 | redirty_page_for_writepage(wbc, page); | ||
73 | rc = 0; | ||
74 | goto out; | ||
75 | } | ||
76 | |||
65 | rc = ecryptfs_encrypt_page(page); | 77 | rc = ecryptfs_encrypt_page(page); |
66 | if (rc) { | 78 | if (rc) { |
67 | ecryptfs_printk(KERN_WARNING, "Error encrypting " | 79 | ecryptfs_printk(KERN_WARNING, "Error encrypting " |
@@ -70,8 +82,8 @@ static int ecryptfs_writepage(struct page *page, struct writeback_control *wbc) | |||
70 | goto out; | 82 | goto out; |
71 | } | 83 | } |
72 | SetPageUptodate(page); | 84 | SetPageUptodate(page); |
73 | unlock_page(page); | ||
74 | out: | 85 | out: |
86 | unlock_page(page); | ||
75 | return rc; | 87 | return rc; |
76 | } | 88 | } |
77 | 89 | ||
@@ -193,11 +205,7 @@ static int ecryptfs_readpage(struct file *file, struct page *page) | |||
193 | &ecryptfs_inode_to_private(page->mapping->host)->crypt_stat; | 205 | &ecryptfs_inode_to_private(page->mapping->host)->crypt_stat; |
194 | int rc = 0; | 206 | int rc = 0; |
195 | 207 | ||
196 | if (!crypt_stat | 208 | if (!crypt_stat || !(crypt_stat->flags & ECRYPTFS_ENCRYPTED)) { |
197 | || !(crypt_stat->flags & ECRYPTFS_ENCRYPTED) | ||
198 | || (crypt_stat->flags & ECRYPTFS_NEW_FILE)) { | ||
199 | ecryptfs_printk(KERN_DEBUG, | ||
200 | "Passing through unencrypted page\n"); | ||
201 | rc = ecryptfs_read_lower_page_segment(page, page->index, 0, | 209 | rc = ecryptfs_read_lower_page_segment(page, page->index, 0, |
202 | PAGE_CACHE_SIZE, | 210 | PAGE_CACHE_SIZE, |
203 | page->mapping->host); | 211 | page->mapping->host); |
@@ -295,8 +303,7 @@ static int ecryptfs_write_begin(struct file *file, | |||
295 | struct ecryptfs_crypt_stat *crypt_stat = | 303 | struct ecryptfs_crypt_stat *crypt_stat = |
296 | &ecryptfs_inode_to_private(mapping->host)->crypt_stat; | 304 | &ecryptfs_inode_to_private(mapping->host)->crypt_stat; |
297 | 305 | ||
298 | if (!(crypt_stat->flags & ECRYPTFS_ENCRYPTED) | 306 | if (!(crypt_stat->flags & ECRYPTFS_ENCRYPTED)) { |
299 | || (crypt_stat->flags & ECRYPTFS_NEW_FILE)) { | ||
300 | rc = ecryptfs_read_lower_page_segment( | 307 | rc = ecryptfs_read_lower_page_segment( |
301 | page, index, 0, PAGE_CACHE_SIZE, mapping->host); | 308 | page, index, 0, PAGE_CACHE_SIZE, mapping->host); |
302 | if (rc) { | 309 | if (rc) { |
@@ -374,6 +381,11 @@ static int ecryptfs_write_begin(struct file *file, | |||
374 | && (pos != 0)) | 381 | && (pos != 0)) |
375 | zero_user(page, 0, PAGE_CACHE_SIZE); | 382 | zero_user(page, 0, PAGE_CACHE_SIZE); |
376 | out: | 383 | out: |
384 | if (unlikely(rc)) { | ||
385 | unlock_page(page); | ||
386 | page_cache_release(page); | ||
387 | *pagep = NULL; | ||
388 | } | ||
377 | return rc; | 389 | return rc; |
378 | } | 390 | } |
379 | 391 | ||
@@ -486,13 +498,8 @@ static int ecryptfs_write_end(struct file *file, | |||
486 | struct ecryptfs_crypt_stat *crypt_stat = | 498 | struct ecryptfs_crypt_stat *crypt_stat = |
487 | &ecryptfs_inode_to_private(ecryptfs_inode)->crypt_stat; | 499 | &ecryptfs_inode_to_private(ecryptfs_inode)->crypt_stat; |
488 | int rc; | 500 | int rc; |
501 | int need_unlock_page = 1; | ||
489 | 502 | ||
490 | if (crypt_stat->flags & ECRYPTFS_NEW_FILE) { | ||
491 | ecryptfs_printk(KERN_DEBUG, "ECRYPTFS_NEW_FILE flag set in " | ||
492 | "crypt_stat at memory location [%p]\n", crypt_stat); | ||
493 | crypt_stat->flags &= ~(ECRYPTFS_NEW_FILE); | ||
494 | } else | ||
495 | ecryptfs_printk(KERN_DEBUG, "Not a new file\n"); | ||
496 | ecryptfs_printk(KERN_DEBUG, "Calling fill_zeros_to_end_of_page" | 503 | ecryptfs_printk(KERN_DEBUG, "Calling fill_zeros_to_end_of_page" |
497 | "(page w/ index = [0x%.16lx], to = [%d])\n", index, to); | 504 | "(page w/ index = [0x%.16lx], to = [%d])\n", index, to); |
498 | if (!(crypt_stat->flags & ECRYPTFS_ENCRYPTED)) { | 505 | if (!(crypt_stat->flags & ECRYPTFS_ENCRYPTED)) { |
@@ -512,26 +519,26 @@ static int ecryptfs_write_end(struct file *file, | |||
512 | "zeros in page with index = [0x%.16lx]\n", index); | 519 | "zeros in page with index = [0x%.16lx]\n", index); |
513 | goto out; | 520 | goto out; |
514 | } | 521 | } |
515 | rc = ecryptfs_encrypt_page(page); | 522 | set_page_dirty(page); |
516 | if (rc) { | 523 | unlock_page(page); |
517 | ecryptfs_printk(KERN_WARNING, "Error encrypting page (upper " | 524 | need_unlock_page = 0; |
518 | "index [0x%.16lx])\n", index); | ||
519 | goto out; | ||
520 | } | ||
521 | if (pos + copied > i_size_read(ecryptfs_inode)) { | 525 | if (pos + copied > i_size_read(ecryptfs_inode)) { |
522 | i_size_write(ecryptfs_inode, pos + copied); | 526 | i_size_write(ecryptfs_inode, pos + copied); |
523 | ecryptfs_printk(KERN_DEBUG, "Expanded file size to " | 527 | ecryptfs_printk(KERN_DEBUG, "Expanded file size to " |
524 | "[0x%.16llx]\n", | 528 | "[0x%.16llx]\n", |
525 | (unsigned long long)i_size_read(ecryptfs_inode)); | 529 | (unsigned long long)i_size_read(ecryptfs_inode)); |
530 | balance_dirty_pages_ratelimited(mapping); | ||
531 | rc = ecryptfs_write_inode_size_to_metadata(ecryptfs_inode); | ||
532 | if (rc) { | ||
533 | printk(KERN_ERR "Error writing inode size to metadata; " | ||
534 | "rc = [%d]\n", rc); | ||
535 | goto out; | ||
536 | } | ||
526 | } | 537 | } |
527 | rc = ecryptfs_write_inode_size_to_metadata(ecryptfs_inode); | 538 | rc = copied; |
528 | if (rc) | ||
529 | printk(KERN_ERR "Error writing inode size to metadata; " | ||
530 | "rc = [%d]\n", rc); | ||
531 | else | ||
532 | rc = copied; | ||
533 | out: | 539 | out: |
534 | unlock_page(page); | 540 | if (need_unlock_page) |
541 | unlock_page(page); | ||
535 | page_cache_release(page); | 542 | page_cache_release(page); |
536 | return rc; | 543 | return rc; |
537 | } | 544 | } |
diff --git a/fs/ecryptfs/read_write.c b/fs/ecryptfs/read_write.c index db184ef15d3d..85d430963116 100644 --- a/fs/ecryptfs/read_write.c +++ b/fs/ecryptfs/read_write.c | |||
@@ -44,15 +44,11 @@ int ecryptfs_write_lower(struct inode *ecryptfs_inode, char *data, | |||
44 | ssize_t rc; | 44 | ssize_t rc; |
45 | 45 | ||
46 | inode_info = ecryptfs_inode_to_private(ecryptfs_inode); | 46 | inode_info = ecryptfs_inode_to_private(ecryptfs_inode); |
47 | mutex_lock(&inode_info->lower_file_mutex); | ||
48 | BUG_ON(!inode_info->lower_file); | 47 | BUG_ON(!inode_info->lower_file); |
49 | inode_info->lower_file->f_pos = offset; | ||
50 | fs_save = get_fs(); | 48 | fs_save = get_fs(); |
51 | set_fs(get_ds()); | 49 | set_fs(get_ds()); |
52 | rc = vfs_write(inode_info->lower_file, data, size, | 50 | rc = vfs_write(inode_info->lower_file, data, size, &offset); |
53 | &inode_info->lower_file->f_pos); | ||
54 | set_fs(fs_save); | 51 | set_fs(fs_save); |
55 | mutex_unlock(&inode_info->lower_file_mutex); | ||
56 | mark_inode_dirty_sync(ecryptfs_inode); | 52 | mark_inode_dirty_sync(ecryptfs_inode); |
57 | return rc; | 53 | return rc; |
58 | } | 54 | } |
@@ -234,15 +230,11 @@ int ecryptfs_read_lower(char *data, loff_t offset, size_t size, | |||
234 | mm_segment_t fs_save; | 230 | mm_segment_t fs_save; |
235 | ssize_t rc; | 231 | ssize_t rc; |
236 | 232 | ||
237 | mutex_lock(&inode_info->lower_file_mutex); | ||
238 | BUG_ON(!inode_info->lower_file); | 233 | BUG_ON(!inode_info->lower_file); |
239 | inode_info->lower_file->f_pos = offset; | ||
240 | fs_save = get_fs(); | 234 | fs_save = get_fs(); |
241 | set_fs(get_ds()); | 235 | set_fs(get_ds()); |
242 | rc = vfs_read(inode_info->lower_file, data, size, | 236 | rc = vfs_read(inode_info->lower_file, data, size, &offset); |
243 | &inode_info->lower_file->f_pos); | ||
244 | set_fs(fs_save); | 237 | set_fs(fs_save); |
245 | mutex_unlock(&inode_info->lower_file_mutex); | ||
246 | return rc; | 238 | return rc; |
247 | } | 239 | } |
248 | 240 | ||
diff --git a/fs/ecryptfs/super.c b/fs/ecryptfs/super.c index 3042fe123a34..bacc882e1ae4 100644 --- a/fs/ecryptfs/super.c +++ b/fs/ecryptfs/super.c | |||
@@ -55,7 +55,6 @@ static struct inode *ecryptfs_alloc_inode(struct super_block *sb) | |||
55 | if (unlikely(!inode_info)) | 55 | if (unlikely(!inode_info)) |
56 | goto out; | 56 | goto out; |
57 | ecryptfs_init_crypt_stat(&inode_info->crypt_stat); | 57 | ecryptfs_init_crypt_stat(&inode_info->crypt_stat); |
58 | mutex_init(&inode_info->lower_file_mutex); | ||
59 | inode_info->lower_file = NULL; | 58 | inode_info->lower_file = NULL; |
60 | inode = &inode_info->vfs_inode; | 59 | inode = &inode_info->vfs_inode; |
61 | out: | 60 | out: |
@@ -198,7 +197,7 @@ static int ecryptfs_show_options(struct seq_file *m, struct vfsmount *mnt) | |||
198 | const struct super_operations ecryptfs_sops = { | 197 | const struct super_operations ecryptfs_sops = { |
199 | .alloc_inode = ecryptfs_alloc_inode, | 198 | .alloc_inode = ecryptfs_alloc_inode, |
200 | .destroy_inode = ecryptfs_destroy_inode, | 199 | .destroy_inode = ecryptfs_destroy_inode, |
201 | .drop_inode = generic_delete_inode, | 200 | .drop_inode = generic_drop_inode, |
202 | .statfs = ecryptfs_statfs, | 201 | .statfs = ecryptfs_statfs, |
203 | .remount_fs = NULL, | 202 | .remount_fs = NULL, |
204 | .evict_inode = ecryptfs_evict_inode, | 203 | .evict_inode = ecryptfs_evict_inode, |
diff --git a/fs/nfs/namespace.c b/fs/nfs/namespace.c index ad92bf731ff5..9166fcb66da2 100644 --- a/fs/nfs/namespace.c +++ b/fs/nfs/namespace.c | |||
@@ -192,13 +192,15 @@ static rpc_authflavor_t nfs_lookup_with_sec(struct nfs_server *server, struct de | |||
192 | auth = rpcauth_create(flavor, clone); | 192 | auth = rpcauth_create(flavor, clone); |
193 | if (!auth) { | 193 | if (!auth) { |
194 | flavor = -EIO; | 194 | flavor = -EIO; |
195 | goto out; | 195 | goto out_shutdown; |
196 | } | 196 | } |
197 | err = server->nfs_client->rpc_ops->lookup(clone, parent->d_inode, | 197 | err = server->nfs_client->rpc_ops->lookup(clone, parent->d_inode, |
198 | &path->dentry->d_name, | 198 | &path->dentry->d_name, |
199 | fh, fattr); | 199 | fh, fattr); |
200 | if (err < 0) | 200 | if (err < 0) |
201 | flavor = err; | 201 | flavor = err; |
202 | out_shutdown: | ||
203 | rpc_shutdown_client(clone); | ||
202 | out: | 204 | out: |
203 | return flavor; | 205 | return flavor; |
204 | } | 206 | } |
diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c index ab1bf5bb021f..a6804f704d9d 100644 --- a/fs/nfs/nfs4state.c +++ b/fs/nfs/nfs4state.c | |||
@@ -590,7 +590,8 @@ nfs4_get_open_state(struct inode *inode, struct nfs4_state_owner *owner) | |||
590 | state->owner = owner; | 590 | state->owner = owner; |
591 | atomic_inc(&owner->so_count); | 591 | atomic_inc(&owner->so_count); |
592 | list_add(&state->inode_states, &nfsi->open_states); | 592 | list_add(&state->inode_states, &nfsi->open_states); |
593 | state->inode = igrab(inode); | 593 | ihold(inode); |
594 | state->inode = inode; | ||
594 | spin_unlock(&inode->i_lock); | 595 | spin_unlock(&inode->i_lock); |
595 | /* Note: The reclaim code dictates that we add stateless | 596 | /* Note: The reclaim code dictates that we add stateless |
596 | * and read-only stateids to the end of the list */ | 597 | * and read-only stateids to the end of the list */ |
diff --git a/fs/nfs/pagelist.c b/fs/nfs/pagelist.c index 87a593c2b055..c80add6e2213 100644 --- a/fs/nfs/pagelist.c +++ b/fs/nfs/pagelist.c | |||
@@ -135,14 +135,14 @@ void nfs_clear_page_tag_locked(struct nfs_page *req) | |||
135 | nfs_unlock_request(req); | 135 | nfs_unlock_request(req); |
136 | } | 136 | } |
137 | 137 | ||
138 | /** | 138 | /* |
139 | * nfs_clear_request - Free up all resources allocated to the request | 139 | * nfs_clear_request - Free up all resources allocated to the request |
140 | * @req: | 140 | * @req: |
141 | * | 141 | * |
142 | * Release page and open context resources associated with a read/write | 142 | * Release page and open context resources associated with a read/write |
143 | * request after it has completed. | 143 | * request after it has completed. |
144 | */ | 144 | */ |
145 | void nfs_clear_request(struct nfs_page *req) | 145 | static void nfs_clear_request(struct nfs_page *req) |
146 | { | 146 | { |
147 | struct page *page = req->wb_page; | 147 | struct page *page = req->wb_page; |
148 | struct nfs_open_context *ctx = req->wb_context; | 148 | struct nfs_open_context *ctx = req->wb_context; |
diff --git a/fs/nfs/write.c b/fs/nfs/write.c index 85d75254328e..af0c6279a4a7 100644 --- a/fs/nfs/write.c +++ b/fs/nfs/write.c | |||
@@ -389,11 +389,8 @@ static int nfs_inode_add_request(struct inode *inode, struct nfs_page *req) | |||
389 | spin_lock(&inode->i_lock); | 389 | spin_lock(&inode->i_lock); |
390 | error = radix_tree_insert(&nfsi->nfs_page_tree, req->wb_index, req); | 390 | error = radix_tree_insert(&nfsi->nfs_page_tree, req->wb_index, req); |
391 | BUG_ON(error); | 391 | BUG_ON(error); |
392 | if (!nfsi->npages) { | 392 | if (!nfsi->npages && nfs_have_delegation(inode, FMODE_WRITE)) |
393 | igrab(inode); | 393 | nfsi->change_attr++; |
394 | if (nfs_have_delegation(inode, FMODE_WRITE)) | ||
395 | nfsi->change_attr++; | ||
396 | } | ||
397 | set_bit(PG_MAPPED, &req->wb_flags); | 394 | set_bit(PG_MAPPED, &req->wb_flags); |
398 | SetPagePrivate(req->wb_page); | 395 | SetPagePrivate(req->wb_page); |
399 | set_page_private(req->wb_page, (unsigned long)req); | 396 | set_page_private(req->wb_page, (unsigned long)req); |
@@ -423,11 +420,7 @@ static void nfs_inode_remove_request(struct nfs_page *req) | |||
423 | clear_bit(PG_MAPPED, &req->wb_flags); | 420 | clear_bit(PG_MAPPED, &req->wb_flags); |
424 | radix_tree_delete(&nfsi->nfs_page_tree, req->wb_index); | 421 | radix_tree_delete(&nfsi->nfs_page_tree, req->wb_index); |
425 | nfsi->npages--; | 422 | nfsi->npages--; |
426 | if (!nfsi->npages) { | 423 | spin_unlock(&inode->i_lock); |
427 | spin_unlock(&inode->i_lock); | ||
428 | iput(inode); | ||
429 | } else | ||
430 | spin_unlock(&inode->i_lock); | ||
431 | nfs_release_request(req); | 424 | nfs_release_request(req); |
432 | } | 425 | } |
433 | 426 | ||
diff --git a/fs/ocfs2/acl.c b/fs/ocfs2/acl.c index 90f2729b7a5b..e913ad130fdd 100644 --- a/fs/ocfs2/acl.c +++ b/fs/ocfs2/acl.c | |||
@@ -24,7 +24,6 @@ | |||
24 | #include <linux/slab.h> | 24 | #include <linux/slab.h> |
25 | #include <linux/string.h> | 25 | #include <linux/string.h> |
26 | 26 | ||
27 | #define MLOG_MASK_PREFIX ML_INODE | ||
28 | #include <cluster/masklog.h> | 27 | #include <cluster/masklog.h> |
29 | 28 | ||
30 | #include "ocfs2.h" | 29 | #include "ocfs2.h" |
diff --git a/fs/ocfs2/alloc.c b/fs/ocfs2/alloc.c index e4984e259cb6..b27a0d86f8c5 100644 --- a/fs/ocfs2/alloc.c +++ b/fs/ocfs2/alloc.c | |||
@@ -30,7 +30,6 @@ | |||
30 | #include <linux/swap.h> | 30 | #include <linux/swap.h> |
31 | #include <linux/quotaops.h> | 31 | #include <linux/quotaops.h> |
32 | 32 | ||
33 | #define MLOG_MASK_PREFIX ML_DISK_ALLOC | ||
34 | #include <cluster/masklog.h> | 33 | #include <cluster/masklog.h> |
35 | 34 | ||
36 | #include "ocfs2.h" | 35 | #include "ocfs2.h" |
@@ -50,6 +49,7 @@ | |||
50 | #include "uptodate.h" | 49 | #include "uptodate.h" |
51 | #include "xattr.h" | 50 | #include "xattr.h" |
52 | #include "refcounttree.h" | 51 | #include "refcounttree.h" |
52 | #include "ocfs2_trace.h" | ||
53 | 53 | ||
54 | #include "buffer_head_io.h" | 54 | #include "buffer_head_io.h" |
55 | 55 | ||
@@ -886,8 +886,7 @@ static int ocfs2_validate_extent_block(struct super_block *sb, | |||
886 | struct ocfs2_extent_block *eb = | 886 | struct ocfs2_extent_block *eb = |
887 | (struct ocfs2_extent_block *)bh->b_data; | 887 | (struct ocfs2_extent_block *)bh->b_data; |
888 | 888 | ||
889 | mlog(0, "Validating extent block %llu\n", | 889 | trace_ocfs2_validate_extent_block((unsigned long long)bh->b_blocknr); |
890 | (unsigned long long)bh->b_blocknr); | ||
891 | 890 | ||
892 | BUG_ON(!buffer_uptodate(bh)); | 891 | BUG_ON(!buffer_uptodate(bh)); |
893 | 892 | ||
@@ -965,8 +964,6 @@ int ocfs2_num_free_extents(struct ocfs2_super *osb, | |||
965 | struct buffer_head *eb_bh = NULL; | 964 | struct buffer_head *eb_bh = NULL; |
966 | u64 last_eb_blk = 0; | 965 | u64 last_eb_blk = 0; |
967 | 966 | ||
968 | mlog_entry_void(); | ||
969 | |||
970 | el = et->et_root_el; | 967 | el = et->et_root_el; |
971 | last_eb_blk = ocfs2_et_get_last_eb_blk(et); | 968 | last_eb_blk = ocfs2_et_get_last_eb_blk(et); |
972 | 969 | ||
@@ -987,7 +984,7 @@ int ocfs2_num_free_extents(struct ocfs2_super *osb, | |||
987 | bail: | 984 | bail: |
988 | brelse(eb_bh); | 985 | brelse(eb_bh); |
989 | 986 | ||
990 | mlog_exit(retval); | 987 | trace_ocfs2_num_free_extents(retval); |
991 | return retval; | 988 | return retval; |
992 | } | 989 | } |
993 | 990 | ||
@@ -1010,8 +1007,6 @@ static int ocfs2_create_new_meta_bhs(handle_t *handle, | |||
1010 | OCFS2_SB(ocfs2_metadata_cache_get_super(et->et_ci)); | 1007 | OCFS2_SB(ocfs2_metadata_cache_get_super(et->et_ci)); |
1011 | struct ocfs2_extent_block *eb; | 1008 | struct ocfs2_extent_block *eb; |
1012 | 1009 | ||
1013 | mlog_entry_void(); | ||
1014 | |||
1015 | count = 0; | 1010 | count = 0; |
1016 | while (count < wanted) { | 1011 | while (count < wanted) { |
1017 | status = ocfs2_claim_metadata(handle, | 1012 | status = ocfs2_claim_metadata(handle, |
@@ -1074,8 +1069,8 @@ bail: | |||
1074 | brelse(bhs[i]); | 1069 | brelse(bhs[i]); |
1075 | bhs[i] = NULL; | 1070 | bhs[i] = NULL; |
1076 | } | 1071 | } |
1072 | mlog_errno(status); | ||
1077 | } | 1073 | } |
1078 | mlog_exit(status); | ||
1079 | return status; | 1074 | return status; |
1080 | } | 1075 | } |
1081 | 1076 | ||
@@ -1173,8 +1168,6 @@ static int ocfs2_add_branch(handle_t *handle, | |||
1173 | struct ocfs2_extent_list *el; | 1168 | struct ocfs2_extent_list *el; |
1174 | u32 new_cpos, root_end; | 1169 | u32 new_cpos, root_end; |
1175 | 1170 | ||
1176 | mlog_entry_void(); | ||
1177 | |||
1178 | BUG_ON(!last_eb_bh || !*last_eb_bh); | 1171 | BUG_ON(!last_eb_bh || !*last_eb_bh); |
1179 | 1172 | ||
1180 | if (eb_bh) { | 1173 | if (eb_bh) { |
@@ -1200,8 +1193,11 @@ static int ocfs2_add_branch(handle_t *handle, | |||
1200 | * from new_cpos). | 1193 | * from new_cpos). |
1201 | */ | 1194 | */ |
1202 | if (root_end > new_cpos) { | 1195 | if (root_end > new_cpos) { |
1203 | mlog(0, "adjust the cluster end from %u to %u\n", | 1196 | trace_ocfs2_adjust_rightmost_branch( |
1204 | root_end, new_cpos); | 1197 | (unsigned long long) |
1198 | ocfs2_metadata_cache_owner(et->et_ci), | ||
1199 | root_end, new_cpos); | ||
1200 | |||
1205 | status = ocfs2_adjust_rightmost_branch(handle, et); | 1201 | status = ocfs2_adjust_rightmost_branch(handle, et); |
1206 | if (status) { | 1202 | if (status) { |
1207 | mlog_errno(status); | 1203 | mlog_errno(status); |
@@ -1332,7 +1328,6 @@ bail: | |||
1332 | kfree(new_eb_bhs); | 1328 | kfree(new_eb_bhs); |
1333 | } | 1329 | } |
1334 | 1330 | ||
1335 | mlog_exit(status); | ||
1336 | return status; | 1331 | return status; |
1337 | } | 1332 | } |
1338 | 1333 | ||
@@ -1353,8 +1348,6 @@ static int ocfs2_shift_tree_depth(handle_t *handle, | |||
1353 | struct ocfs2_extent_list *root_el; | 1348 | struct ocfs2_extent_list *root_el; |
1354 | struct ocfs2_extent_list *eb_el; | 1349 | struct ocfs2_extent_list *eb_el; |
1355 | 1350 | ||
1356 | mlog_entry_void(); | ||
1357 | |||
1358 | status = ocfs2_create_new_meta_bhs(handle, et, 1, meta_ac, | 1351 | status = ocfs2_create_new_meta_bhs(handle, et, 1, meta_ac, |
1359 | &new_eb_bh); | 1352 | &new_eb_bh); |
1360 | if (status < 0) { | 1353 | if (status < 0) { |
@@ -1415,7 +1408,6 @@ static int ocfs2_shift_tree_depth(handle_t *handle, | |||
1415 | bail: | 1408 | bail: |
1416 | brelse(new_eb_bh); | 1409 | brelse(new_eb_bh); |
1417 | 1410 | ||
1418 | mlog_exit(status); | ||
1419 | return status; | 1411 | return status; |
1420 | } | 1412 | } |
1421 | 1413 | ||
@@ -1446,8 +1438,6 @@ static int ocfs2_find_branch_target(struct ocfs2_extent_tree *et, | |||
1446 | struct buffer_head *bh = NULL; | 1438 | struct buffer_head *bh = NULL; |
1447 | struct buffer_head *lowest_bh = NULL; | 1439 | struct buffer_head *lowest_bh = NULL; |
1448 | 1440 | ||
1449 | mlog_entry_void(); | ||
1450 | |||
1451 | *target_bh = NULL; | 1441 | *target_bh = NULL; |
1452 | 1442 | ||
1453 | el = et->et_root_el; | 1443 | el = et->et_root_el; |
@@ -1503,7 +1493,6 @@ static int ocfs2_find_branch_target(struct ocfs2_extent_tree *et, | |||
1503 | bail: | 1493 | bail: |
1504 | brelse(bh); | 1494 | brelse(bh); |
1505 | 1495 | ||
1506 | mlog_exit(status); | ||
1507 | return status; | 1496 | return status; |
1508 | } | 1497 | } |
1509 | 1498 | ||
@@ -1540,7 +1529,10 @@ static int ocfs2_grow_tree(handle_t *handle, struct ocfs2_extent_tree *et, | |||
1540 | * another tree level */ | 1529 | * another tree level */ |
1541 | if (shift) { | 1530 | if (shift) { |
1542 | BUG_ON(bh); | 1531 | BUG_ON(bh); |
1543 | mlog(0, "need to shift tree depth (current = %d)\n", depth); | 1532 | trace_ocfs2_grow_tree( |
1533 | (unsigned long long) | ||
1534 | ocfs2_metadata_cache_owner(et->et_ci), | ||
1535 | depth); | ||
1544 | 1536 | ||
1545 | /* ocfs2_shift_tree_depth will return us a buffer with | 1537 | /* ocfs2_shift_tree_depth will return us a buffer with |
1546 | * the new extent block (so we can pass that to | 1538 | * the new extent block (so we can pass that to |
@@ -1570,7 +1562,6 @@ static int ocfs2_grow_tree(handle_t *handle, struct ocfs2_extent_tree *et, | |||
1570 | 1562 | ||
1571 | /* call ocfs2_add_branch to add the final part of the tree with | 1563 | /* call ocfs2_add_branch to add the final part of the tree with |
1572 | * the new data. */ | 1564 | * the new data. */ |
1573 | mlog(0, "add branch. bh = %p\n", bh); | ||
1574 | ret = ocfs2_add_branch(handle, et, bh, last_eb_bh, | 1565 | ret = ocfs2_add_branch(handle, et, bh, last_eb_bh, |
1575 | meta_ac); | 1566 | meta_ac); |
1576 | if (ret < 0) { | 1567 | if (ret < 0) { |
@@ -1645,8 +1636,9 @@ static void ocfs2_rotate_leaf(struct ocfs2_extent_list *el, | |||
1645 | } | 1636 | } |
1646 | insert_index = i; | 1637 | insert_index = i; |
1647 | 1638 | ||
1648 | mlog(0, "ins %u: index %d, has_empty %d, next_free %d, count %d\n", | 1639 | trace_ocfs2_rotate_leaf(insert_cpos, insert_index, |
1649 | insert_cpos, insert_index, has_empty, next_free, le16_to_cpu(el->l_count)); | 1640 | has_empty, next_free, |
1641 | le16_to_cpu(el->l_count)); | ||
1650 | 1642 | ||
1651 | BUG_ON(insert_index < 0); | 1643 | BUG_ON(insert_index < 0); |
1652 | BUG_ON(insert_index >= le16_to_cpu(el->l_count)); | 1644 | BUG_ON(insert_index >= le16_to_cpu(el->l_count)); |
@@ -2059,7 +2051,7 @@ static void ocfs2_complete_edge_insert(handle_t *handle, | |||
2059 | left_el = path_leaf_el(left_path); | 2051 | left_el = path_leaf_el(left_path); |
2060 | right_el = path_leaf_el(right_path); | 2052 | right_el = path_leaf_el(right_path); |
2061 | for(i = left_path->p_tree_depth - 1; i > subtree_index; i--) { | 2053 | for(i = left_path->p_tree_depth - 1; i > subtree_index; i--) { |
2062 | mlog(0, "Adjust records at index %u\n", i); | 2054 | trace_ocfs2_complete_edge_insert(i); |
2063 | 2055 | ||
2064 | /* | 2056 | /* |
2065 | * One nice property of knowing that all of these | 2057 | * One nice property of knowing that all of these |
@@ -2389,7 +2381,9 @@ static int ocfs2_rotate_tree_right(handle_t *handle, | |||
2389 | goto out; | 2381 | goto out; |
2390 | } | 2382 | } |
2391 | 2383 | ||
2392 | mlog(0, "Insert: %u, first left path cpos: %u\n", insert_cpos, cpos); | 2384 | trace_ocfs2_rotate_tree_right( |
2385 | (unsigned long long)ocfs2_metadata_cache_owner(et->et_ci), | ||
2386 | insert_cpos, cpos); | ||
2393 | 2387 | ||
2394 | /* | 2388 | /* |
2395 | * What we want to do here is: | 2389 | * What we want to do here is: |
@@ -2418,8 +2412,10 @@ static int ocfs2_rotate_tree_right(handle_t *handle, | |||
2418 | * rotating subtrees. | 2412 | * rotating subtrees. |
2419 | */ | 2413 | */ |
2420 | while (cpos && insert_cpos <= cpos) { | 2414 | while (cpos && insert_cpos <= cpos) { |
2421 | mlog(0, "Rotating a tree: ins. cpos: %u, left path cpos: %u\n", | 2415 | trace_ocfs2_rotate_tree_right( |
2422 | insert_cpos, cpos); | 2416 | (unsigned long long) |
2417 | ocfs2_metadata_cache_owner(et->et_ci), | ||
2418 | insert_cpos, cpos); | ||
2423 | 2419 | ||
2424 | ret = ocfs2_find_path(et->et_ci, left_path, cpos); | 2420 | ret = ocfs2_find_path(et->et_ci, left_path, cpos); |
2425 | if (ret) { | 2421 | if (ret) { |
@@ -2461,10 +2457,10 @@ static int ocfs2_rotate_tree_right(handle_t *handle, | |||
2461 | 2457 | ||
2462 | start = ocfs2_find_subtree_root(et, left_path, right_path); | 2458 | start = ocfs2_find_subtree_root(et, left_path, right_path); |
2463 | 2459 | ||
2464 | mlog(0, "Subtree root at index %d (blk %llu, depth %d)\n", | 2460 | trace_ocfs2_rotate_subtree(start, |
2465 | start, | 2461 | (unsigned long long) |
2466 | (unsigned long long) right_path->p_node[start].bh->b_blocknr, | 2462 | right_path->p_node[start].bh->b_blocknr, |
2467 | right_path->p_tree_depth); | 2463 | right_path->p_tree_depth); |
2468 | 2464 | ||
2469 | ret = ocfs2_extend_rotate_transaction(handle, start, | 2465 | ret = ocfs2_extend_rotate_transaction(handle, start, |
2470 | orig_credits, right_path); | 2466 | orig_credits, right_path); |
@@ -2964,8 +2960,7 @@ static int __ocfs2_rotate_tree_left(handle_t *handle, | |||
2964 | subtree_root = ocfs2_find_subtree_root(et, left_path, | 2960 | subtree_root = ocfs2_find_subtree_root(et, left_path, |
2965 | right_path); | 2961 | right_path); |
2966 | 2962 | ||
2967 | mlog(0, "Subtree root at index %d (blk %llu, depth %d)\n", | 2963 | trace_ocfs2_rotate_subtree(subtree_root, |
2968 | subtree_root, | ||
2969 | (unsigned long long) | 2964 | (unsigned long long) |
2970 | right_path->p_node[subtree_root].bh->b_blocknr, | 2965 | right_path->p_node[subtree_root].bh->b_blocknr, |
2971 | right_path->p_tree_depth); | 2966 | right_path->p_tree_depth); |
@@ -3989,9 +3984,11 @@ static int ocfs2_append_rec_to_path(handle_t *handle, | |||
3989 | goto out; | 3984 | goto out; |
3990 | } | 3985 | } |
3991 | 3986 | ||
3992 | mlog(0, "Append may need a left path update. cpos: %u, " | 3987 | trace_ocfs2_append_rec_to_path( |
3993 | "left_cpos: %u\n", le32_to_cpu(insert_rec->e_cpos), | 3988 | (unsigned long long) |
3994 | left_cpos); | 3989 | ocfs2_metadata_cache_owner(et->et_ci), |
3990 | le32_to_cpu(insert_rec->e_cpos), | ||
3991 | left_cpos); | ||
3995 | 3992 | ||
3996 | /* | 3993 | /* |
3997 | * No need to worry if the append is already in the | 3994 | * No need to worry if the append is already in the |
@@ -4562,7 +4559,7 @@ static int ocfs2_figure_insert_type(struct ocfs2_extent_tree *et, | |||
4562 | ocfs2_et_get_last_eb_blk(et), | 4559 | ocfs2_et_get_last_eb_blk(et), |
4563 | &bh); | 4560 | &bh); |
4564 | if (ret) { | 4561 | if (ret) { |
4565 | mlog_exit(ret); | 4562 | mlog_errno(ret); |
4566 | goto out; | 4563 | goto out; |
4567 | } | 4564 | } |
4568 | eb = (struct ocfs2_extent_block *) bh->b_data; | 4565 | eb = (struct ocfs2_extent_block *) bh->b_data; |
@@ -4678,9 +4675,9 @@ int ocfs2_insert_extent(handle_t *handle, | |||
4678 | struct ocfs2_insert_type insert = {0, }; | 4675 | struct ocfs2_insert_type insert = {0, }; |
4679 | struct ocfs2_extent_rec rec; | 4676 | struct ocfs2_extent_rec rec; |
4680 | 4677 | ||
4681 | mlog(0, "add %u clusters at position %u to owner %llu\n", | 4678 | trace_ocfs2_insert_extent_start( |
4682 | new_clusters, cpos, | 4679 | (unsigned long long)ocfs2_metadata_cache_owner(et->et_ci), |
4683 | (unsigned long long)ocfs2_metadata_cache_owner(et->et_ci)); | 4680 | cpos, new_clusters); |
4684 | 4681 | ||
4685 | memset(&rec, 0, sizeof(rec)); | 4682 | memset(&rec, 0, sizeof(rec)); |
4686 | rec.e_cpos = cpu_to_le32(cpos); | 4683 | rec.e_cpos = cpu_to_le32(cpos); |
@@ -4700,11 +4697,9 @@ int ocfs2_insert_extent(handle_t *handle, | |||
4700 | goto bail; | 4697 | goto bail; |
4701 | } | 4698 | } |
4702 | 4699 | ||
4703 | mlog(0, "Insert.appending: %u, Insert.Contig: %u, " | 4700 | trace_ocfs2_insert_extent(insert.ins_appending, insert.ins_contig, |
4704 | "Insert.contig_index: %d, Insert.free_records: %d, " | 4701 | insert.ins_contig_index, free_records, |
4705 | "Insert.tree_depth: %d\n", | 4702 | insert.ins_tree_depth); |
4706 | insert.ins_appending, insert.ins_contig, insert.ins_contig_index, | ||
4707 | free_records, insert.ins_tree_depth); | ||
4708 | 4703 | ||
4709 | if (insert.ins_contig == CONTIG_NONE && free_records == 0) { | 4704 | if (insert.ins_contig == CONTIG_NONE && free_records == 0) { |
4710 | status = ocfs2_grow_tree(handle, et, | 4705 | status = ocfs2_grow_tree(handle, et, |
@@ -4726,7 +4721,6 @@ int ocfs2_insert_extent(handle_t *handle, | |||
4726 | bail: | 4721 | bail: |
4727 | brelse(last_eb_bh); | 4722 | brelse(last_eb_bh); |
4728 | 4723 | ||
4729 | mlog_exit(status); | ||
4730 | return status; | 4724 | return status; |
4731 | } | 4725 | } |
4732 | 4726 | ||
@@ -4746,7 +4740,7 @@ int ocfs2_add_clusters_in_btree(handle_t *handle, | |||
4746 | struct ocfs2_alloc_context *meta_ac, | 4740 | struct ocfs2_alloc_context *meta_ac, |
4747 | enum ocfs2_alloc_restarted *reason_ret) | 4741 | enum ocfs2_alloc_restarted *reason_ret) |
4748 | { | 4742 | { |
4749 | int status = 0; | 4743 | int status = 0, err = 0; |
4750 | int free_extents; | 4744 | int free_extents; |
4751 | enum ocfs2_alloc_restarted reason = RESTART_NONE; | 4745 | enum ocfs2_alloc_restarted reason = RESTART_NONE; |
4752 | u32 bit_off, num_bits; | 4746 | u32 bit_off, num_bits; |
@@ -4773,14 +4767,14 @@ int ocfs2_add_clusters_in_btree(handle_t *handle, | |||
4773 | * 2) we are so fragmented, we've needed to add metadata too | 4767 | * 2) we are so fragmented, we've needed to add metadata too |
4774 | * many times. */ | 4768 | * many times. */ |
4775 | if (!free_extents && !meta_ac) { | 4769 | if (!free_extents && !meta_ac) { |
4776 | mlog(0, "we haven't reserved any metadata!\n"); | 4770 | err = -1; |
4777 | status = -EAGAIN; | 4771 | status = -EAGAIN; |
4778 | reason = RESTART_META; | 4772 | reason = RESTART_META; |
4779 | goto leave; | 4773 | goto leave; |
4780 | } else if ((!free_extents) | 4774 | } else if ((!free_extents) |
4781 | && (ocfs2_alloc_context_bits_left(meta_ac) | 4775 | && (ocfs2_alloc_context_bits_left(meta_ac) |
4782 | < ocfs2_extend_meta_needed(et->et_root_el))) { | 4776 | < ocfs2_extend_meta_needed(et->et_root_el))) { |
4783 | mlog(0, "filesystem is really fragmented...\n"); | 4777 | err = -2; |
4784 | status = -EAGAIN; | 4778 | status = -EAGAIN; |
4785 | reason = RESTART_META; | 4779 | reason = RESTART_META; |
4786 | goto leave; | 4780 | goto leave; |
@@ -4805,9 +4799,9 @@ int ocfs2_add_clusters_in_btree(handle_t *handle, | |||
4805 | } | 4799 | } |
4806 | 4800 | ||
4807 | block = ocfs2_clusters_to_blocks(osb->sb, bit_off); | 4801 | block = ocfs2_clusters_to_blocks(osb->sb, bit_off); |
4808 | mlog(0, "Allocating %u clusters at block %u for owner %llu\n", | 4802 | trace_ocfs2_add_clusters_in_btree( |
4809 | num_bits, bit_off, | 4803 | (unsigned long long)ocfs2_metadata_cache_owner(et->et_ci), |
4810 | (unsigned long long)ocfs2_metadata_cache_owner(et->et_ci)); | 4804 | bit_off, num_bits); |
4811 | status = ocfs2_insert_extent(handle, et, *logical_offset, block, | 4805 | status = ocfs2_insert_extent(handle, et, *logical_offset, block, |
4812 | num_bits, flags, meta_ac); | 4806 | num_bits, flags, meta_ac); |
4813 | if (status < 0) { | 4807 | if (status < 0) { |
@@ -4821,16 +4815,15 @@ int ocfs2_add_clusters_in_btree(handle_t *handle, | |||
4821 | *logical_offset += num_bits; | 4815 | *logical_offset += num_bits; |
4822 | 4816 | ||
4823 | if (clusters_to_add) { | 4817 | if (clusters_to_add) { |
4824 | mlog(0, "need to alloc once more, wanted = %u\n", | 4818 | err = clusters_to_add; |
4825 | clusters_to_add); | ||
4826 | status = -EAGAIN; | 4819 | status = -EAGAIN; |
4827 | reason = RESTART_TRANS; | 4820 | reason = RESTART_TRANS; |
4828 | } | 4821 | } |
4829 | 4822 | ||
4830 | leave: | 4823 | leave: |
4831 | mlog_exit(status); | ||
4832 | if (reason_ret) | 4824 | if (reason_ret) |
4833 | *reason_ret = reason; | 4825 | *reason_ret = reason; |
4826 | trace_ocfs2_add_clusters_in_btree_ret(status, reason, err); | ||
4834 | return status; | 4827 | return status; |
4835 | } | 4828 | } |
4836 | 4829 | ||
@@ -5039,7 +5032,7 @@ int ocfs2_split_extent(handle_t *handle, | |||
5039 | ocfs2_et_get_last_eb_blk(et), | 5032 | ocfs2_et_get_last_eb_blk(et), |
5040 | &last_eb_bh); | 5033 | &last_eb_bh); |
5041 | if (ret) { | 5034 | if (ret) { |
5042 | mlog_exit(ret); | 5035 | mlog_errno(ret); |
5043 | goto out; | 5036 | goto out; |
5044 | } | 5037 | } |
5045 | 5038 | ||
@@ -5056,9 +5049,9 @@ int ocfs2_split_extent(handle_t *handle, | |||
5056 | 5049 | ||
5057 | ctxt.c_has_empty_extent = ocfs2_is_empty_extent(&el->l_recs[0]); | 5050 | ctxt.c_has_empty_extent = ocfs2_is_empty_extent(&el->l_recs[0]); |
5058 | 5051 | ||
5059 | mlog(0, "index: %d, contig: %u, has_empty: %u, split_covers: %u\n", | 5052 | trace_ocfs2_split_extent(split_index, ctxt.c_contig_type, |
5060 | split_index, ctxt.c_contig_type, ctxt.c_has_empty_extent, | 5053 | ctxt.c_has_empty_extent, |
5061 | ctxt.c_split_covers_rec); | 5054 | ctxt.c_split_covers_rec); |
5062 | 5055 | ||
5063 | if (ctxt.c_contig_type == CONTIG_NONE) { | 5056 | if (ctxt.c_contig_type == CONTIG_NONE) { |
5064 | if (ctxt.c_split_covers_rec) | 5057 | if (ctxt.c_split_covers_rec) |
@@ -5192,8 +5185,9 @@ int ocfs2_mark_extent_written(struct inode *inode, | |||
5192 | { | 5185 | { |
5193 | int ret; | 5186 | int ret; |
5194 | 5187 | ||
5195 | mlog(0, "Inode %lu cpos %u, len %u, phys clusters %u\n", | 5188 | trace_ocfs2_mark_extent_written( |
5196 | inode->i_ino, cpos, len, phys); | 5189 | (unsigned long long)OCFS2_I(inode)->ip_blkno, |
5190 | cpos, len, phys); | ||
5197 | 5191 | ||
5198 | if (!ocfs2_writes_unwritten_extents(OCFS2_SB(inode->i_sb))) { | 5192 | if (!ocfs2_writes_unwritten_extents(OCFS2_SB(inode->i_sb))) { |
5199 | ocfs2_error(inode->i_sb, "Inode %llu has unwritten extents " | 5193 | ocfs2_error(inode->i_sb, "Inode %llu has unwritten extents " |
@@ -5512,11 +5506,10 @@ int ocfs2_remove_extent(handle_t *handle, | |||
5512 | 5506 | ||
5513 | BUG_ON(cpos < le32_to_cpu(rec->e_cpos) || trunc_range > rec_range); | 5507 | BUG_ON(cpos < le32_to_cpu(rec->e_cpos) || trunc_range > rec_range); |
5514 | 5508 | ||
5515 | mlog(0, "Owner %llu, remove (cpos %u, len %u). Existing index %d " | 5509 | trace_ocfs2_remove_extent( |
5516 | "(cpos %u, len %u)\n", | 5510 | (unsigned long long)ocfs2_metadata_cache_owner(et->et_ci), |
5517 | (unsigned long long)ocfs2_metadata_cache_owner(et->et_ci), | 5511 | cpos, len, index, le32_to_cpu(rec->e_cpos), |
5518 | cpos, len, index, | 5512 | ocfs2_rec_clusters(el, rec)); |
5519 | le32_to_cpu(rec->e_cpos), ocfs2_rec_clusters(el, rec)); | ||
5520 | 5513 | ||
5521 | if (le32_to_cpu(rec->e_cpos) == cpos || rec_range == trunc_range) { | 5514 | if (le32_to_cpu(rec->e_cpos) == cpos || rec_range == trunc_range) { |
5522 | ret = ocfs2_truncate_rec(handle, et, path, index, dealloc, | 5515 | ret = ocfs2_truncate_rec(handle, et, path, index, dealloc, |
@@ -5795,9 +5788,6 @@ int ocfs2_truncate_log_append(struct ocfs2_super *osb, | |||
5795 | struct ocfs2_dinode *di; | 5788 | struct ocfs2_dinode *di; |
5796 | struct ocfs2_truncate_log *tl; | 5789 | struct ocfs2_truncate_log *tl; |
5797 | 5790 | ||
5798 | mlog_entry("start_blk = %llu, num_clusters = %u\n", | ||
5799 | (unsigned long long)start_blk, num_clusters); | ||
5800 | |||
5801 | BUG_ON(mutex_trylock(&tl_inode->i_mutex)); | 5791 | BUG_ON(mutex_trylock(&tl_inode->i_mutex)); |
5802 | 5792 | ||
5803 | start_cluster = ocfs2_blocks_to_clusters(osb->sb, start_blk); | 5793 | start_cluster = ocfs2_blocks_to_clusters(osb->sb, start_blk); |
@@ -5834,10 +5824,9 @@ int ocfs2_truncate_log_append(struct ocfs2_super *osb, | |||
5834 | goto bail; | 5824 | goto bail; |
5835 | } | 5825 | } |
5836 | 5826 | ||
5837 | mlog(0, "Log truncate of %u clusters starting at cluster %u to " | 5827 | trace_ocfs2_truncate_log_append( |
5838 | "%llu (index = %d)\n", num_clusters, start_cluster, | 5828 | (unsigned long long)OCFS2_I(tl_inode)->ip_blkno, index, |
5839 | (unsigned long long)OCFS2_I(tl_inode)->ip_blkno, index); | 5829 | start_cluster, num_clusters); |
5840 | |||
5841 | if (ocfs2_truncate_log_can_coalesce(tl, start_cluster)) { | 5830 | if (ocfs2_truncate_log_can_coalesce(tl, start_cluster)) { |
5842 | /* | 5831 | /* |
5843 | * Move index back to the record we are coalescing with. | 5832 | * Move index back to the record we are coalescing with. |
@@ -5846,9 +5835,10 @@ int ocfs2_truncate_log_append(struct ocfs2_super *osb, | |||
5846 | index--; | 5835 | index--; |
5847 | 5836 | ||
5848 | num_clusters += le32_to_cpu(tl->tl_recs[index].t_clusters); | 5837 | num_clusters += le32_to_cpu(tl->tl_recs[index].t_clusters); |
5849 | mlog(0, "Coalesce with index %u (start = %u, clusters = %u)\n", | 5838 | trace_ocfs2_truncate_log_append( |
5850 | index, le32_to_cpu(tl->tl_recs[index].t_start), | 5839 | (unsigned long long)OCFS2_I(tl_inode)->ip_blkno, |
5851 | num_clusters); | 5840 | index, le32_to_cpu(tl->tl_recs[index].t_start), |
5841 | num_clusters); | ||
5852 | } else { | 5842 | } else { |
5853 | tl->tl_recs[index].t_start = cpu_to_le32(start_cluster); | 5843 | tl->tl_recs[index].t_start = cpu_to_le32(start_cluster); |
5854 | tl->tl_used = cpu_to_le16(index + 1); | 5844 | tl->tl_used = cpu_to_le16(index + 1); |
@@ -5859,7 +5849,6 @@ int ocfs2_truncate_log_append(struct ocfs2_super *osb, | |||
5859 | 5849 | ||
5860 | osb->truncated_clusters += num_clusters; | 5850 | osb->truncated_clusters += num_clusters; |
5861 | bail: | 5851 | bail: |
5862 | mlog_exit(status); | ||
5863 | return status; | 5852 | return status; |
5864 | } | 5853 | } |
5865 | 5854 | ||
@@ -5878,8 +5867,6 @@ static int ocfs2_replay_truncate_records(struct ocfs2_super *osb, | |||
5878 | struct inode *tl_inode = osb->osb_tl_inode; | 5867 | struct inode *tl_inode = osb->osb_tl_inode; |
5879 | struct buffer_head *tl_bh = osb->osb_tl_bh; | 5868 | struct buffer_head *tl_bh = osb->osb_tl_bh; |
5880 | 5869 | ||
5881 | mlog_entry_void(); | ||
5882 | |||
5883 | di = (struct ocfs2_dinode *) tl_bh->b_data; | 5870 | di = (struct ocfs2_dinode *) tl_bh->b_data; |
5884 | tl = &di->id2.i_dealloc; | 5871 | tl = &di->id2.i_dealloc; |
5885 | i = le16_to_cpu(tl->tl_used) - 1; | 5872 | i = le16_to_cpu(tl->tl_used) - 1; |
@@ -5915,8 +5902,9 @@ static int ocfs2_replay_truncate_records(struct ocfs2_super *osb, | |||
5915 | /* if start_blk is not set, we ignore the record as | 5902 | /* if start_blk is not set, we ignore the record as |
5916 | * invalid. */ | 5903 | * invalid. */ |
5917 | if (start_blk) { | 5904 | if (start_blk) { |
5918 | mlog(0, "free record %d, start = %u, clusters = %u\n", | 5905 | trace_ocfs2_replay_truncate_records( |
5919 | i, le32_to_cpu(rec.t_start), num_clusters); | 5906 | (unsigned long long)OCFS2_I(tl_inode)->ip_blkno, |
5907 | i, le32_to_cpu(rec.t_start), num_clusters); | ||
5920 | 5908 | ||
5921 | status = ocfs2_free_clusters(handle, data_alloc_inode, | 5909 | status = ocfs2_free_clusters(handle, data_alloc_inode, |
5922 | data_alloc_bh, start_blk, | 5910 | data_alloc_bh, start_blk, |
@@ -5932,7 +5920,6 @@ static int ocfs2_replay_truncate_records(struct ocfs2_super *osb, | |||
5932 | osb->truncated_clusters = 0; | 5920 | osb->truncated_clusters = 0; |
5933 | 5921 | ||
5934 | bail: | 5922 | bail: |
5935 | mlog_exit(status); | ||
5936 | return status; | 5923 | return status; |
5937 | } | 5924 | } |
5938 | 5925 | ||
@@ -5949,8 +5936,6 @@ int __ocfs2_flush_truncate_log(struct ocfs2_super *osb) | |||
5949 | struct ocfs2_dinode *di; | 5936 | struct ocfs2_dinode *di; |
5950 | struct ocfs2_truncate_log *tl; | 5937 | struct ocfs2_truncate_log *tl; |
5951 | 5938 | ||
5952 | mlog_entry_void(); | ||
5953 | |||
5954 | BUG_ON(mutex_trylock(&tl_inode->i_mutex)); | 5939 | BUG_ON(mutex_trylock(&tl_inode->i_mutex)); |
5955 | 5940 | ||
5956 | di = (struct ocfs2_dinode *) tl_bh->b_data; | 5941 | di = (struct ocfs2_dinode *) tl_bh->b_data; |
@@ -5962,8 +5947,9 @@ int __ocfs2_flush_truncate_log(struct ocfs2_super *osb) | |||
5962 | 5947 | ||
5963 | tl = &di->id2.i_dealloc; | 5948 | tl = &di->id2.i_dealloc; |
5964 | num_to_flush = le16_to_cpu(tl->tl_used); | 5949 | num_to_flush = le16_to_cpu(tl->tl_used); |
5965 | mlog(0, "Flush %u records from truncate log #%llu\n", | 5950 | trace_ocfs2_flush_truncate_log( |
5966 | num_to_flush, (unsigned long long)OCFS2_I(tl_inode)->ip_blkno); | 5951 | (unsigned long long)OCFS2_I(tl_inode)->ip_blkno, |
5952 | num_to_flush); | ||
5967 | if (!num_to_flush) { | 5953 | if (!num_to_flush) { |
5968 | status = 0; | 5954 | status = 0; |
5969 | goto out; | 5955 | goto out; |
@@ -6009,7 +5995,6 @@ out_mutex: | |||
6009 | iput(data_alloc_inode); | 5995 | iput(data_alloc_inode); |
6010 | 5996 | ||
6011 | out: | 5997 | out: |
6012 | mlog_exit(status); | ||
6013 | return status; | 5998 | return status; |
6014 | } | 5999 | } |
6015 | 6000 | ||
@@ -6032,15 +6017,11 @@ static void ocfs2_truncate_log_worker(struct work_struct *work) | |||
6032 | container_of(work, struct ocfs2_super, | 6017 | container_of(work, struct ocfs2_super, |
6033 | osb_truncate_log_wq.work); | 6018 | osb_truncate_log_wq.work); |
6034 | 6019 | ||
6035 | mlog_entry_void(); | ||
6036 | |||
6037 | status = ocfs2_flush_truncate_log(osb); | 6020 | status = ocfs2_flush_truncate_log(osb); |
6038 | if (status < 0) | 6021 | if (status < 0) |
6039 | mlog_errno(status); | 6022 | mlog_errno(status); |
6040 | else | 6023 | else |
6041 | ocfs2_init_steal_slots(osb); | 6024 | ocfs2_init_steal_slots(osb); |
6042 | |||
6043 | mlog_exit(status); | ||
6044 | } | 6025 | } |
6045 | 6026 | ||
6046 | #define OCFS2_TRUNCATE_LOG_FLUSH_INTERVAL (2 * HZ) | 6027 | #define OCFS2_TRUNCATE_LOG_FLUSH_INTERVAL (2 * HZ) |
@@ -6086,7 +6067,6 @@ static int ocfs2_get_truncate_log_info(struct ocfs2_super *osb, | |||
6086 | *tl_inode = inode; | 6067 | *tl_inode = inode; |
6087 | *tl_bh = bh; | 6068 | *tl_bh = bh; |
6088 | bail: | 6069 | bail: |
6089 | mlog_exit(status); | ||
6090 | return status; | 6070 | return status; |
6091 | } | 6071 | } |
6092 | 6072 | ||
@@ -6106,7 +6086,7 @@ int ocfs2_begin_truncate_log_recovery(struct ocfs2_super *osb, | |||
6106 | 6086 | ||
6107 | *tl_copy = NULL; | 6087 | *tl_copy = NULL; |
6108 | 6088 | ||
6109 | mlog(0, "recover truncate log from slot %d\n", slot_num); | 6089 | trace_ocfs2_begin_truncate_log_recovery(slot_num); |
6110 | 6090 | ||
6111 | status = ocfs2_get_truncate_log_info(osb, slot_num, &tl_inode, &tl_bh); | 6091 | status = ocfs2_get_truncate_log_info(osb, slot_num, &tl_inode, &tl_bh); |
6112 | if (status < 0) { | 6092 | if (status < 0) { |
@@ -6123,8 +6103,7 @@ int ocfs2_begin_truncate_log_recovery(struct ocfs2_super *osb, | |||
6123 | 6103 | ||
6124 | tl = &di->id2.i_dealloc; | 6104 | tl = &di->id2.i_dealloc; |
6125 | if (le16_to_cpu(tl->tl_used)) { | 6105 | if (le16_to_cpu(tl->tl_used)) { |
6126 | mlog(0, "We'll have %u logs to recover\n", | 6106 | trace_ocfs2_truncate_log_recovery_num(le16_to_cpu(tl->tl_used)); |
6127 | le16_to_cpu(tl->tl_used)); | ||
6128 | 6107 | ||
6129 | *tl_copy = kmalloc(tl_bh->b_size, GFP_KERNEL); | 6108 | *tl_copy = kmalloc(tl_bh->b_size, GFP_KERNEL); |
6130 | if (!(*tl_copy)) { | 6109 | if (!(*tl_copy)) { |
@@ -6157,9 +6136,9 @@ bail: | |||
6157 | if (status < 0 && (*tl_copy)) { | 6136 | if (status < 0 && (*tl_copy)) { |
6158 | kfree(*tl_copy); | 6137 | kfree(*tl_copy); |
6159 | *tl_copy = NULL; | 6138 | *tl_copy = NULL; |
6139 | mlog_errno(status); | ||
6160 | } | 6140 | } |
6161 | 6141 | ||
6162 | mlog_exit(status); | ||
6163 | return status; | 6142 | return status; |
6164 | } | 6143 | } |
6165 | 6144 | ||
@@ -6174,8 +6153,6 @@ int ocfs2_complete_truncate_log_recovery(struct ocfs2_super *osb, | |||
6174 | struct inode *tl_inode = osb->osb_tl_inode; | 6153 | struct inode *tl_inode = osb->osb_tl_inode; |
6175 | struct ocfs2_truncate_log *tl; | 6154 | struct ocfs2_truncate_log *tl; |
6176 | 6155 | ||
6177 | mlog_entry_void(); | ||
6178 | |||
6179 | if (OCFS2_I(tl_inode)->ip_blkno == le64_to_cpu(tl_copy->i_blkno)) { | 6156 | if (OCFS2_I(tl_inode)->ip_blkno == le64_to_cpu(tl_copy->i_blkno)) { |
6180 | mlog(ML_ERROR, "Asked to recover my own truncate log!\n"); | 6157 | mlog(ML_ERROR, "Asked to recover my own truncate log!\n"); |
6181 | return -EINVAL; | 6158 | return -EINVAL; |
@@ -6183,8 +6160,9 @@ int ocfs2_complete_truncate_log_recovery(struct ocfs2_super *osb, | |||
6183 | 6160 | ||
6184 | tl = &tl_copy->id2.i_dealloc; | 6161 | tl = &tl_copy->id2.i_dealloc; |
6185 | num_recs = le16_to_cpu(tl->tl_used); | 6162 | num_recs = le16_to_cpu(tl->tl_used); |
6186 | mlog(0, "cleanup %u records from %llu\n", num_recs, | 6163 | trace_ocfs2_complete_truncate_log_recovery( |
6187 | (unsigned long long)le64_to_cpu(tl_copy->i_blkno)); | 6164 | (unsigned long long)le64_to_cpu(tl_copy->i_blkno), |
6165 | num_recs); | ||
6188 | 6166 | ||
6189 | mutex_lock(&tl_inode->i_mutex); | 6167 | mutex_lock(&tl_inode->i_mutex); |
6190 | for(i = 0; i < num_recs; i++) { | 6168 | for(i = 0; i < num_recs; i++) { |
@@ -6219,7 +6197,6 @@ int ocfs2_complete_truncate_log_recovery(struct ocfs2_super *osb, | |||
6219 | bail_up: | 6197 | bail_up: |
6220 | mutex_unlock(&tl_inode->i_mutex); | 6198 | mutex_unlock(&tl_inode->i_mutex); |
6221 | 6199 | ||
6222 | mlog_exit(status); | ||
6223 | return status; | 6200 | return status; |
6224 | } | 6201 | } |
6225 | 6202 | ||
@@ -6228,8 +6205,6 @@ void ocfs2_truncate_log_shutdown(struct ocfs2_super *osb) | |||
6228 | int status; | 6205 | int status; |
6229 | struct inode *tl_inode = osb->osb_tl_inode; | 6206 | struct inode *tl_inode = osb->osb_tl_inode; |
6230 | 6207 | ||
6231 | mlog_entry_void(); | ||
6232 | |||
6233 | if (tl_inode) { | 6208 | if (tl_inode) { |
6234 | cancel_delayed_work(&osb->osb_truncate_log_wq); | 6209 | cancel_delayed_work(&osb->osb_truncate_log_wq); |
6235 | flush_workqueue(ocfs2_wq); | 6210 | flush_workqueue(ocfs2_wq); |
@@ -6241,8 +6216,6 @@ void ocfs2_truncate_log_shutdown(struct ocfs2_super *osb) | |||
6241 | brelse(osb->osb_tl_bh); | 6216 | brelse(osb->osb_tl_bh); |
6242 | iput(osb->osb_tl_inode); | 6217 | iput(osb->osb_tl_inode); |
6243 | } | 6218 | } |
6244 | |||
6245 | mlog_exit_void(); | ||
6246 | } | 6219 | } |
6247 | 6220 | ||
6248 | int ocfs2_truncate_log_init(struct ocfs2_super *osb) | 6221 | int ocfs2_truncate_log_init(struct ocfs2_super *osb) |
@@ -6251,8 +6224,6 @@ int ocfs2_truncate_log_init(struct ocfs2_super *osb) | |||
6251 | struct inode *tl_inode = NULL; | 6224 | struct inode *tl_inode = NULL; |
6252 | struct buffer_head *tl_bh = NULL; | 6225 | struct buffer_head *tl_bh = NULL; |
6253 | 6226 | ||
6254 | mlog_entry_void(); | ||
6255 | |||
6256 | status = ocfs2_get_truncate_log_info(osb, | 6227 | status = ocfs2_get_truncate_log_info(osb, |
6257 | osb->slot_num, | 6228 | osb->slot_num, |
6258 | &tl_inode, | 6229 | &tl_inode, |
@@ -6268,7 +6239,6 @@ int ocfs2_truncate_log_init(struct ocfs2_super *osb) | |||
6268 | osb->osb_tl_bh = tl_bh; | 6239 | osb->osb_tl_bh = tl_bh; |
6269 | osb->osb_tl_inode = tl_inode; | 6240 | osb->osb_tl_inode = tl_inode; |
6270 | 6241 | ||
6271 | mlog_exit(status); | ||
6272 | return status; | 6242 | return status; |
6273 | } | 6243 | } |
6274 | 6244 | ||
@@ -6350,8 +6320,8 @@ static int ocfs2_free_cached_blocks(struct ocfs2_super *osb, | |||
6350 | else | 6320 | else |
6351 | bg_blkno = ocfs2_which_suballoc_group(head->free_blk, | 6321 | bg_blkno = ocfs2_which_suballoc_group(head->free_blk, |
6352 | head->free_bit); | 6322 | head->free_bit); |
6353 | mlog(0, "Free bit: (bit %u, blkno %llu)\n", | 6323 | trace_ocfs2_free_cached_blocks( |
6354 | head->free_bit, (unsigned long long)head->free_blk); | 6324 | (unsigned long long)head->free_blk, head->free_bit); |
6355 | 6325 | ||
6356 | ret = ocfs2_free_suballoc_bits(handle, inode, di_bh, | 6326 | ret = ocfs2_free_suballoc_bits(handle, inode, di_bh, |
6357 | head->free_bit, bg_blkno, 1); | 6327 | head->free_bit, bg_blkno, 1); |
@@ -6404,8 +6374,7 @@ int ocfs2_cache_cluster_dealloc(struct ocfs2_cached_dealloc_ctxt *ctxt, | |||
6404 | return ret; | 6374 | return ret; |
6405 | } | 6375 | } |
6406 | 6376 | ||
6407 | mlog(0, "Insert clusters: (bit %u, blk %llu)\n", | 6377 | trace_ocfs2_cache_cluster_dealloc((unsigned long long)blkno, bit); |
6408 | bit, (unsigned long long)blkno); | ||
6409 | 6378 | ||
6410 | item->free_blk = blkno; | 6379 | item->free_blk = blkno; |
6411 | item->free_bit = bit; | 6380 | item->free_bit = bit; |
@@ -6480,8 +6449,8 @@ int ocfs2_run_deallocs(struct ocfs2_super *osb, | |||
6480 | fl = ctxt->c_first_suballocator; | 6449 | fl = ctxt->c_first_suballocator; |
6481 | 6450 | ||
6482 | if (fl->f_first) { | 6451 | if (fl->f_first) { |
6483 | mlog(0, "Free items: (type %u, slot %d)\n", | 6452 | trace_ocfs2_run_deallocs(fl->f_inode_type, |
6484 | fl->f_inode_type, fl->f_slot); | 6453 | fl->f_slot); |
6485 | ret2 = ocfs2_free_cached_blocks(osb, | 6454 | ret2 = ocfs2_free_cached_blocks(osb, |
6486 | fl->f_inode_type, | 6455 | fl->f_inode_type, |
6487 | fl->f_slot, | 6456 | fl->f_slot, |
@@ -6558,8 +6527,9 @@ int ocfs2_cache_block_dealloc(struct ocfs2_cached_dealloc_ctxt *ctxt, | |||
6558 | goto out; | 6527 | goto out; |
6559 | } | 6528 | } |
6560 | 6529 | ||
6561 | mlog(0, "Insert: (type %d, slot %u, bit %u, blk %llu)\n", | 6530 | trace_ocfs2_cache_block_dealloc(type, slot, |
6562 | type, slot, bit, (unsigned long long)blkno); | 6531 | (unsigned long long)suballoc, |
6532 | (unsigned long long)blkno, bit); | ||
6563 | 6533 | ||
6564 | item->free_bg = suballoc; | 6534 | item->free_bg = suballoc; |
6565 | item->free_blk = blkno; | 6535 | item->free_blk = blkno; |
@@ -7005,8 +6975,6 @@ int ocfs2_commit_truncate(struct ocfs2_super *osb, | |||
7005 | struct ocfs2_extent_tree et; | 6975 | struct ocfs2_extent_tree et; |
7006 | struct ocfs2_cached_dealloc_ctxt dealloc; | 6976 | struct ocfs2_cached_dealloc_ctxt dealloc; |
7007 | 6977 | ||
7008 | mlog_entry_void(); | ||
7009 | |||
7010 | ocfs2_init_dinode_extent_tree(&et, INODE_CACHE(inode), di_bh); | 6978 | ocfs2_init_dinode_extent_tree(&et, INODE_CACHE(inode), di_bh); |
7011 | ocfs2_init_dealloc_ctxt(&dealloc); | 6979 | ocfs2_init_dealloc_ctxt(&dealloc); |
7012 | 6980 | ||
@@ -7041,8 +7009,11 @@ start: | |||
7041 | goto bail; | 7009 | goto bail; |
7042 | } | 7010 | } |
7043 | 7011 | ||
7044 | mlog(0, "inode->ip_clusters = %u, tree_depth = %u\n", | 7012 | trace_ocfs2_commit_truncate( |
7045 | OCFS2_I(inode)->ip_clusters, path->p_tree_depth); | 7013 | (unsigned long long)OCFS2_I(inode)->ip_blkno, |
7014 | new_highest_cpos, | ||
7015 | OCFS2_I(inode)->ip_clusters, | ||
7016 | path->p_tree_depth); | ||
7046 | 7017 | ||
7047 | /* | 7018 | /* |
7048 | * By now, el will point to the extent list on the bottom most | 7019 | * By now, el will point to the extent list on the bottom most |
@@ -7136,7 +7107,6 @@ bail: | |||
7136 | 7107 | ||
7137 | ocfs2_free_path(path); | 7108 | ocfs2_free_path(path); |
7138 | 7109 | ||
7139 | mlog_exit(status); | ||
7140 | return status; | 7110 | return status; |
7141 | } | 7111 | } |
7142 | 7112 | ||
diff --git a/fs/ocfs2/aops.c b/fs/ocfs2/aops.c index daea0359e974..ac97bca282d2 100644 --- a/fs/ocfs2/aops.c +++ b/fs/ocfs2/aops.c | |||
@@ -29,7 +29,6 @@ | |||
29 | #include <linux/mpage.h> | 29 | #include <linux/mpage.h> |
30 | #include <linux/quotaops.h> | 30 | #include <linux/quotaops.h> |
31 | 31 | ||
32 | #define MLOG_MASK_PREFIX ML_FILE_IO | ||
33 | #include <cluster/masklog.h> | 32 | #include <cluster/masklog.h> |
34 | 33 | ||
35 | #include "ocfs2.h" | 34 | #include "ocfs2.h" |
@@ -45,6 +44,7 @@ | |||
45 | #include "super.h" | 44 | #include "super.h" |
46 | #include "symlink.h" | 45 | #include "symlink.h" |
47 | #include "refcounttree.h" | 46 | #include "refcounttree.h" |
47 | #include "ocfs2_trace.h" | ||
48 | 48 | ||
49 | #include "buffer_head_io.h" | 49 | #include "buffer_head_io.h" |
50 | 50 | ||
@@ -59,8 +59,9 @@ static int ocfs2_symlink_get_block(struct inode *inode, sector_t iblock, | |||
59 | struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); | 59 | struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); |
60 | void *kaddr; | 60 | void *kaddr; |
61 | 61 | ||
62 | mlog_entry("(0x%p, %llu, 0x%p, %d)\n", inode, | 62 | trace_ocfs2_symlink_get_block( |
63 | (unsigned long long)iblock, bh_result, create); | 63 | (unsigned long long)OCFS2_I(inode)->ip_blkno, |
64 | (unsigned long long)iblock, bh_result, create); | ||
64 | 65 | ||
65 | BUG_ON(ocfs2_inode_is_fast_symlink(inode)); | 66 | BUG_ON(ocfs2_inode_is_fast_symlink(inode)); |
66 | 67 | ||
@@ -123,7 +124,6 @@ static int ocfs2_symlink_get_block(struct inode *inode, sector_t iblock, | |||
123 | bail: | 124 | bail: |
124 | brelse(bh); | 125 | brelse(bh); |
125 | 126 | ||
126 | mlog_exit(err); | ||
127 | return err; | 127 | return err; |
128 | } | 128 | } |
129 | 129 | ||
@@ -136,8 +136,8 @@ int ocfs2_get_block(struct inode *inode, sector_t iblock, | |||
136 | u64 p_blkno, count, past_eof; | 136 | u64 p_blkno, count, past_eof; |
137 | struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); | 137 | struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); |
138 | 138 | ||
139 | mlog_entry("(0x%p, %llu, 0x%p, %d)\n", inode, | 139 | trace_ocfs2_get_block((unsigned long long)OCFS2_I(inode)->ip_blkno, |
140 | (unsigned long long)iblock, bh_result, create); | 140 | (unsigned long long)iblock, bh_result, create); |
141 | 141 | ||
142 | if (OCFS2_I(inode)->ip_flags & OCFS2_INODE_SYSTEM_FILE) | 142 | if (OCFS2_I(inode)->ip_flags & OCFS2_INODE_SYSTEM_FILE) |
143 | mlog(ML_NOTICE, "get_block on system inode 0x%p (%lu)\n", | 143 | mlog(ML_NOTICE, "get_block on system inode 0x%p (%lu)\n", |
@@ -199,8 +199,9 @@ int ocfs2_get_block(struct inode *inode, sector_t iblock, | |||
199 | } | 199 | } |
200 | 200 | ||
201 | past_eof = ocfs2_blocks_for_bytes(inode->i_sb, i_size_read(inode)); | 201 | past_eof = ocfs2_blocks_for_bytes(inode->i_sb, i_size_read(inode)); |
202 | mlog(0, "Inode %lu, past_eof = %llu\n", inode->i_ino, | 202 | |
203 | (unsigned long long)past_eof); | 203 | trace_ocfs2_get_block_end((unsigned long long)OCFS2_I(inode)->ip_blkno, |
204 | (unsigned long long)past_eof); | ||
204 | if (create && (iblock >= past_eof)) | 205 | if (create && (iblock >= past_eof)) |
205 | set_buffer_new(bh_result); | 206 | set_buffer_new(bh_result); |
206 | 207 | ||
@@ -208,7 +209,6 @@ bail: | |||
208 | if (err < 0) | 209 | if (err < 0) |
209 | err = -EIO; | 210 | err = -EIO; |
210 | 211 | ||
211 | mlog_exit(err); | ||
212 | return err; | 212 | return err; |
213 | } | 213 | } |
214 | 214 | ||
@@ -278,7 +278,8 @@ static int ocfs2_readpage(struct file *file, struct page *page) | |||
278 | loff_t start = (loff_t)page->index << PAGE_CACHE_SHIFT; | 278 | loff_t start = (loff_t)page->index << PAGE_CACHE_SHIFT; |
279 | int ret, unlock = 1; | 279 | int ret, unlock = 1; |
280 | 280 | ||
281 | mlog_entry("(0x%p, %lu)\n", file, (page ? page->index : 0)); | 281 | trace_ocfs2_readpage((unsigned long long)oi->ip_blkno, |
282 | (page ? page->index : 0)); | ||
282 | 283 | ||
283 | ret = ocfs2_inode_lock_with_page(inode, NULL, 0, page); | 284 | ret = ocfs2_inode_lock_with_page(inode, NULL, 0, page); |
284 | if (ret != 0) { | 285 | if (ret != 0) { |
@@ -323,7 +324,6 @@ out_inode_unlock: | |||
323 | out: | 324 | out: |
324 | if (unlock) | 325 | if (unlock) |
325 | unlock_page(page); | 326 | unlock_page(page); |
326 | mlog_exit(ret); | ||
327 | return ret; | 327 | return ret; |
328 | } | 328 | } |
329 | 329 | ||
@@ -396,15 +396,11 @@ out_unlock: | |||
396 | */ | 396 | */ |
397 | static int ocfs2_writepage(struct page *page, struct writeback_control *wbc) | 397 | static int ocfs2_writepage(struct page *page, struct writeback_control *wbc) |
398 | { | 398 | { |
399 | int ret; | 399 | trace_ocfs2_writepage( |
400 | 400 | (unsigned long long)OCFS2_I(page->mapping->host)->ip_blkno, | |
401 | mlog_entry("(0x%p)\n", page); | 401 | page->index); |
402 | |||
403 | ret = block_write_full_page(page, ocfs2_get_block, wbc); | ||
404 | 402 | ||
405 | mlog_exit(ret); | 403 | return block_write_full_page(page, ocfs2_get_block, wbc); |
406 | |||
407 | return ret; | ||
408 | } | 404 | } |
409 | 405 | ||
410 | /* Taken from ext3. We don't necessarily need the full blown | 406 | /* Taken from ext3. We don't necessarily need the full blown |
@@ -450,7 +446,8 @@ static sector_t ocfs2_bmap(struct address_space *mapping, sector_t block) | |||
450 | int err = 0; | 446 | int err = 0; |
451 | struct inode *inode = mapping->host; | 447 | struct inode *inode = mapping->host; |
452 | 448 | ||
453 | mlog_entry("(block = %llu)\n", (unsigned long long)block); | 449 | trace_ocfs2_bmap((unsigned long long)OCFS2_I(inode)->ip_blkno, |
450 | (unsigned long long)block); | ||
454 | 451 | ||
455 | /* We don't need to lock journal system files, since they aren't | 452 | /* We don't need to lock journal system files, since they aren't |
456 | * accessed concurrently from multiple nodes. | 453 | * accessed concurrently from multiple nodes. |
@@ -484,8 +481,6 @@ static sector_t ocfs2_bmap(struct address_space *mapping, sector_t block) | |||
484 | bail: | 481 | bail: |
485 | status = err ? 0 : p_blkno; | 482 | status = err ? 0 : p_blkno; |
486 | 483 | ||
487 | mlog_exit((int)status); | ||
488 | |||
489 | return status; | 484 | return status; |
490 | } | 485 | } |
491 | 486 | ||
@@ -616,9 +611,6 @@ static ssize_t ocfs2_direct_IO(int rw, | |||
616 | { | 611 | { |
617 | struct file *file = iocb->ki_filp; | 612 | struct file *file = iocb->ki_filp; |
618 | struct inode *inode = file->f_path.dentry->d_inode->i_mapping->host; | 613 | struct inode *inode = file->f_path.dentry->d_inode->i_mapping->host; |
619 | int ret; | ||
620 | |||
621 | mlog_entry_void(); | ||
622 | 614 | ||
623 | /* | 615 | /* |
624 | * Fallback to buffered I/O if we see an inode without | 616 | * Fallback to buffered I/O if we see an inode without |
@@ -631,13 +623,10 @@ static ssize_t ocfs2_direct_IO(int rw, | |||
631 | if (i_size_read(inode) <= offset) | 623 | if (i_size_read(inode) <= offset) |
632 | return 0; | 624 | return 0; |
633 | 625 | ||
634 | ret = __blockdev_direct_IO(rw, iocb, inode, inode->i_sb->s_bdev, | 626 | return __blockdev_direct_IO(rw, iocb, inode, inode->i_sb->s_bdev, |
635 | iov, offset, nr_segs, | 627 | iov, offset, nr_segs, |
636 | ocfs2_direct_IO_get_blocks, | 628 | ocfs2_direct_IO_get_blocks, |
637 | ocfs2_dio_end_io, NULL, 0); | 629 | ocfs2_dio_end_io, NULL, 0); |
638 | |||
639 | mlog_exit(ret); | ||
640 | return ret; | ||
641 | } | 630 | } |
642 | 631 | ||
643 | static void ocfs2_figure_cluster_boundaries(struct ocfs2_super *osb, | 632 | static void ocfs2_figure_cluster_boundaries(struct ocfs2_super *osb, |
@@ -1026,6 +1015,12 @@ static int ocfs2_prepare_page_for_write(struct inode *inode, u64 *p_blkno, | |||
1026 | ocfs2_figure_cluster_boundaries(OCFS2_SB(inode->i_sb), cpos, | 1015 | ocfs2_figure_cluster_boundaries(OCFS2_SB(inode->i_sb), cpos, |
1027 | &cluster_start, &cluster_end); | 1016 | &cluster_start, &cluster_end); |
1028 | 1017 | ||
1018 | /* treat the write as new if the a hole/lseek spanned across | ||
1019 | * the page boundary. | ||
1020 | */ | ||
1021 | new = new | ((i_size_read(inode) <= page_offset(page)) && | ||
1022 | (page_offset(page) <= user_pos)); | ||
1023 | |||
1029 | if (page == wc->w_target_page) { | 1024 | if (page == wc->w_target_page) { |
1030 | map_from = user_pos & (PAGE_CACHE_SIZE - 1); | 1025 | map_from = user_pos & (PAGE_CACHE_SIZE - 1); |
1031 | map_to = map_from + user_len; | 1026 | map_to = map_from + user_len; |
@@ -1534,9 +1529,9 @@ static int ocfs2_try_to_write_inline_data(struct address_space *mapping, | |||
1534 | struct ocfs2_inode_info *oi = OCFS2_I(inode); | 1529 | struct ocfs2_inode_info *oi = OCFS2_I(inode); |
1535 | struct ocfs2_dinode *di = NULL; | 1530 | struct ocfs2_dinode *di = NULL; |
1536 | 1531 | ||
1537 | mlog(0, "Inode %llu, write of %u bytes at off %llu. features: 0x%x\n", | 1532 | trace_ocfs2_try_to_write_inline_data((unsigned long long)oi->ip_blkno, |
1538 | (unsigned long long)oi->ip_blkno, len, (unsigned long long)pos, | 1533 | len, (unsigned long long)pos, |
1539 | oi->ip_dyn_features); | 1534 | oi->ip_dyn_features); |
1540 | 1535 | ||
1541 | /* | 1536 | /* |
1542 | * Handle inodes which already have inline data 1st. | 1537 | * Handle inodes which already have inline data 1st. |
@@ -1739,6 +1734,13 @@ try_again: | |||
1739 | 1734 | ||
1740 | di = (struct ocfs2_dinode *)wc->w_di_bh->b_data; | 1735 | di = (struct ocfs2_dinode *)wc->w_di_bh->b_data; |
1741 | 1736 | ||
1737 | trace_ocfs2_write_begin_nolock( | ||
1738 | (unsigned long long)OCFS2_I(inode)->ip_blkno, | ||
1739 | (long long)i_size_read(inode), | ||
1740 | le32_to_cpu(di->i_clusters), | ||
1741 | pos, len, flags, mmap_page, | ||
1742 | clusters_to_alloc, extents_to_split); | ||
1743 | |||
1742 | /* | 1744 | /* |
1743 | * We set w_target_from, w_target_to here so that | 1745 | * We set w_target_from, w_target_to here so that |
1744 | * ocfs2_write_end() knows which range in the target page to | 1746 | * ocfs2_write_end() knows which range in the target page to |
@@ -1751,12 +1753,6 @@ try_again: | |||
1751 | * ocfs2_lock_allocators(). It greatly over-estimates | 1753 | * ocfs2_lock_allocators(). It greatly over-estimates |
1752 | * the work to be done. | 1754 | * the work to be done. |
1753 | */ | 1755 | */ |
1754 | mlog(0, "extend inode %llu, i_size = %lld, di->i_clusters = %u," | ||
1755 | " clusters_to_add = %u, extents_to_split = %u\n", | ||
1756 | (unsigned long long)OCFS2_I(inode)->ip_blkno, | ||
1757 | (long long)i_size_read(inode), le32_to_cpu(di->i_clusters), | ||
1758 | clusters_to_alloc, extents_to_split); | ||
1759 | |||
1760 | ocfs2_init_dinode_extent_tree(&et, INODE_CACHE(inode), | 1756 | ocfs2_init_dinode_extent_tree(&et, INODE_CACHE(inode), |
1761 | wc->w_di_bh); | 1757 | wc->w_di_bh); |
1762 | ret = ocfs2_lock_allocators(inode, &et, | 1758 | ret = ocfs2_lock_allocators(inode, &et, |
@@ -1938,8 +1934,8 @@ static void ocfs2_write_end_inline(struct inode *inode, loff_t pos, | |||
1938 | memcpy(di->id2.i_data.id_data + pos, kaddr + pos, *copied); | 1934 | memcpy(di->id2.i_data.id_data + pos, kaddr + pos, *copied); |
1939 | kunmap_atomic(kaddr, KM_USER0); | 1935 | kunmap_atomic(kaddr, KM_USER0); |
1940 | 1936 | ||
1941 | mlog(0, "Data written to inode at offset %llu. " | 1937 | trace_ocfs2_write_end_inline( |
1942 | "id_count = %u, copied = %u, i_dyn_features = 0x%x\n", | 1938 | (unsigned long long)OCFS2_I(inode)->ip_blkno, |
1943 | (unsigned long long)pos, *copied, | 1939 | (unsigned long long)pos, *copied, |
1944 | le16_to_cpu(di->id2.i_data.id_count), | 1940 | le16_to_cpu(di->id2.i_data.id_count), |
1945 | le16_to_cpu(di->i_dyn_features)); | 1941 | le16_to_cpu(di->i_dyn_features)); |
diff --git a/fs/ocfs2/buffer_head_io.c b/fs/ocfs2/buffer_head_io.c index f9d5d3ffc75a..5d18ad10c27f 100644 --- a/fs/ocfs2/buffer_head_io.c +++ b/fs/ocfs2/buffer_head_io.c | |||
@@ -35,8 +35,8 @@ | |||
35 | #include "inode.h" | 35 | #include "inode.h" |
36 | #include "journal.h" | 36 | #include "journal.h" |
37 | #include "uptodate.h" | 37 | #include "uptodate.h" |
38 | |||
39 | #include "buffer_head_io.h" | 38 | #include "buffer_head_io.h" |
39 | #include "ocfs2_trace.h" | ||
40 | 40 | ||
41 | /* | 41 | /* |
42 | * Bits on bh->b_state used by ocfs2. | 42 | * Bits on bh->b_state used by ocfs2. |
@@ -55,8 +55,7 @@ int ocfs2_write_block(struct ocfs2_super *osb, struct buffer_head *bh, | |||
55 | { | 55 | { |
56 | int ret = 0; | 56 | int ret = 0; |
57 | 57 | ||
58 | mlog_entry("(bh->b_blocknr = %llu, ci=%p)\n", | 58 | trace_ocfs2_write_block((unsigned long long)bh->b_blocknr, ci); |
59 | (unsigned long long)bh->b_blocknr, ci); | ||
60 | 59 | ||
61 | BUG_ON(bh->b_blocknr < OCFS2_SUPER_BLOCK_BLKNO); | 60 | BUG_ON(bh->b_blocknr < OCFS2_SUPER_BLOCK_BLKNO); |
62 | BUG_ON(buffer_jbd(bh)); | 61 | BUG_ON(buffer_jbd(bh)); |
@@ -66,6 +65,7 @@ int ocfs2_write_block(struct ocfs2_super *osb, struct buffer_head *bh, | |||
66 | * can get modified during recovery even if read-only. */ | 65 | * can get modified during recovery even if read-only. */ |
67 | if (ocfs2_is_hard_readonly(osb)) { | 66 | if (ocfs2_is_hard_readonly(osb)) { |
68 | ret = -EROFS; | 67 | ret = -EROFS; |
68 | mlog_errno(ret); | ||
69 | goto out; | 69 | goto out; |
70 | } | 70 | } |
71 | 71 | ||
@@ -91,11 +91,11 @@ int ocfs2_write_block(struct ocfs2_super *osb, struct buffer_head *bh, | |||
91 | * uptodate. */ | 91 | * uptodate. */ |
92 | ret = -EIO; | 92 | ret = -EIO; |
93 | put_bh(bh); | 93 | put_bh(bh); |
94 | mlog_errno(ret); | ||
94 | } | 95 | } |
95 | 96 | ||
96 | ocfs2_metadata_cache_io_unlock(ci); | 97 | ocfs2_metadata_cache_io_unlock(ci); |
97 | out: | 98 | out: |
98 | mlog_exit(ret); | ||
99 | return ret; | 99 | return ret; |
100 | } | 100 | } |
101 | 101 | ||
@@ -106,10 +106,10 @@ int ocfs2_read_blocks_sync(struct ocfs2_super *osb, u64 block, | |||
106 | unsigned int i; | 106 | unsigned int i; |
107 | struct buffer_head *bh; | 107 | struct buffer_head *bh; |
108 | 108 | ||
109 | if (!nr) { | 109 | trace_ocfs2_read_blocks_sync((unsigned long long)block, nr); |
110 | mlog(ML_BH_IO, "No buffers will be read!\n"); | 110 | |
111 | if (!nr) | ||
111 | goto bail; | 112 | goto bail; |
112 | } | ||
113 | 113 | ||
114 | for (i = 0 ; i < nr ; i++) { | 114 | for (i = 0 ; i < nr ; i++) { |
115 | if (bhs[i] == NULL) { | 115 | if (bhs[i] == NULL) { |
@@ -123,10 +123,8 @@ int ocfs2_read_blocks_sync(struct ocfs2_super *osb, u64 block, | |||
123 | bh = bhs[i]; | 123 | bh = bhs[i]; |
124 | 124 | ||
125 | if (buffer_jbd(bh)) { | 125 | if (buffer_jbd(bh)) { |
126 | mlog(ML_BH_IO, | 126 | trace_ocfs2_read_blocks_sync_jbd( |
127 | "trying to sync read a jbd " | 127 | (unsigned long long)bh->b_blocknr); |
128 | "managed bh (blocknr = %llu), skipping\n", | ||
129 | (unsigned long long)bh->b_blocknr); | ||
130 | continue; | 128 | continue; |
131 | } | 129 | } |
132 | 130 | ||
@@ -186,8 +184,7 @@ int ocfs2_read_blocks(struct ocfs2_caching_info *ci, u64 block, int nr, | |||
186 | struct buffer_head *bh; | 184 | struct buffer_head *bh; |
187 | struct super_block *sb = ocfs2_metadata_cache_get_super(ci); | 185 | struct super_block *sb = ocfs2_metadata_cache_get_super(ci); |
188 | 186 | ||
189 | mlog_entry("(ci=%p, block=(%llu), nr=(%d), flags=%d)\n", | 187 | trace_ocfs2_read_blocks_begin(ci, (unsigned long long)block, nr, flags); |
190 | ci, (unsigned long long)block, nr, flags); | ||
191 | 188 | ||
192 | BUG_ON(!ci); | 189 | BUG_ON(!ci); |
193 | BUG_ON((flags & OCFS2_BH_READAHEAD) && | 190 | BUG_ON((flags & OCFS2_BH_READAHEAD) && |
@@ -207,7 +204,6 @@ int ocfs2_read_blocks(struct ocfs2_caching_info *ci, u64 block, int nr, | |||
207 | } | 204 | } |
208 | 205 | ||
209 | if (nr == 0) { | 206 | if (nr == 0) { |
210 | mlog(ML_BH_IO, "No buffers will be read!\n"); | ||
211 | status = 0; | 207 | status = 0; |
212 | goto bail; | 208 | goto bail; |
213 | } | 209 | } |
@@ -251,8 +247,7 @@ int ocfs2_read_blocks(struct ocfs2_caching_info *ci, u64 block, int nr, | |||
251 | */ | 247 | */ |
252 | 248 | ||
253 | if (!ignore_cache && !ocfs2_buffer_uptodate(ci, bh)) { | 249 | if (!ignore_cache && !ocfs2_buffer_uptodate(ci, bh)) { |
254 | mlog(ML_UPTODATE, | 250 | trace_ocfs2_read_blocks_from_disk( |
255 | "bh (%llu), owner %llu not uptodate\n", | ||
256 | (unsigned long long)bh->b_blocknr, | 251 | (unsigned long long)bh->b_blocknr, |
257 | (unsigned long long)ocfs2_metadata_cache_owner(ci)); | 252 | (unsigned long long)ocfs2_metadata_cache_owner(ci)); |
258 | /* We're using ignore_cache here to say | 253 | /* We're using ignore_cache here to say |
@@ -260,11 +255,10 @@ int ocfs2_read_blocks(struct ocfs2_caching_info *ci, u64 block, int nr, | |||
260 | ignore_cache = 1; | 255 | ignore_cache = 1; |
261 | } | 256 | } |
262 | 257 | ||
258 | trace_ocfs2_read_blocks_bh((unsigned long long)bh->b_blocknr, | ||
259 | ignore_cache, buffer_jbd(bh), buffer_dirty(bh)); | ||
260 | |||
263 | if (buffer_jbd(bh)) { | 261 | if (buffer_jbd(bh)) { |
264 | if (ignore_cache) | ||
265 | mlog(ML_BH_IO, "trying to sync read a jbd " | ||
266 | "managed bh (blocknr = %llu)\n", | ||
267 | (unsigned long long)bh->b_blocknr); | ||
268 | continue; | 262 | continue; |
269 | } | 263 | } |
270 | 264 | ||
@@ -272,9 +266,6 @@ int ocfs2_read_blocks(struct ocfs2_caching_info *ci, u64 block, int nr, | |||
272 | if (buffer_dirty(bh)) { | 266 | if (buffer_dirty(bh)) { |
273 | /* This should probably be a BUG, or | 267 | /* This should probably be a BUG, or |
274 | * at least return an error. */ | 268 | * at least return an error. */ |
275 | mlog(ML_BH_IO, "asking me to sync read a dirty " | ||
276 | "buffer! (blocknr = %llu)\n", | ||
277 | (unsigned long long)bh->b_blocknr); | ||
278 | continue; | 269 | continue; |
279 | } | 270 | } |
280 | 271 | ||
@@ -367,14 +358,11 @@ int ocfs2_read_blocks(struct ocfs2_caching_info *ci, u64 block, int nr, | |||
367 | } | 358 | } |
368 | ocfs2_metadata_cache_io_unlock(ci); | 359 | ocfs2_metadata_cache_io_unlock(ci); |
369 | 360 | ||
370 | mlog(ML_BH_IO, "block=(%llu), nr=(%d), cached=%s, flags=0x%x\n", | 361 | trace_ocfs2_read_blocks_end((unsigned long long)block, nr, |
371 | (unsigned long long)block, nr, | 362 | flags, ignore_cache); |
372 | ((flags & OCFS2_BH_IGNORE_CACHE) || ignore_cache) ? "no" : "yes", | ||
373 | flags); | ||
374 | 363 | ||
375 | bail: | 364 | bail: |
376 | 365 | ||
377 | mlog_exit(status); | ||
378 | return status; | 366 | return status; |
379 | } | 367 | } |
380 | 368 | ||
@@ -408,13 +396,12 @@ int ocfs2_write_super_or_backup(struct ocfs2_super *osb, | |||
408 | int ret = 0; | 396 | int ret = 0; |
409 | struct ocfs2_dinode *di = (struct ocfs2_dinode *)bh->b_data; | 397 | struct ocfs2_dinode *di = (struct ocfs2_dinode *)bh->b_data; |
410 | 398 | ||
411 | mlog_entry_void(); | ||
412 | |||
413 | BUG_ON(buffer_jbd(bh)); | 399 | BUG_ON(buffer_jbd(bh)); |
414 | ocfs2_check_super_or_backup(osb->sb, bh->b_blocknr); | 400 | ocfs2_check_super_or_backup(osb->sb, bh->b_blocknr); |
415 | 401 | ||
416 | if (ocfs2_is_hard_readonly(osb) || ocfs2_is_soft_readonly(osb)) { | 402 | if (ocfs2_is_hard_readonly(osb) || ocfs2_is_soft_readonly(osb)) { |
417 | ret = -EROFS; | 403 | ret = -EROFS; |
404 | mlog_errno(ret); | ||
418 | goto out; | 405 | goto out; |
419 | } | 406 | } |
420 | 407 | ||
@@ -434,9 +421,9 @@ int ocfs2_write_super_or_backup(struct ocfs2_super *osb, | |||
434 | if (!buffer_uptodate(bh)) { | 421 | if (!buffer_uptodate(bh)) { |
435 | ret = -EIO; | 422 | ret = -EIO; |
436 | put_bh(bh); | 423 | put_bh(bh); |
424 | mlog_errno(ret); | ||
437 | } | 425 | } |
438 | 426 | ||
439 | out: | 427 | out: |
440 | mlog_exit(ret); | ||
441 | return ret; | 428 | return ret; |
442 | } | 429 | } |
diff --git a/fs/ocfs2/cluster/heartbeat.c b/fs/ocfs2/cluster/heartbeat.c index 1adab287bd24..2461eb3272ed 100644 --- a/fs/ocfs2/cluster/heartbeat.c +++ b/fs/ocfs2/cluster/heartbeat.c | |||
@@ -1654,8 +1654,6 @@ static int o2hb_populate_slot_data(struct o2hb_region *reg) | |||
1654 | struct o2hb_disk_slot *slot; | 1654 | struct o2hb_disk_slot *slot; |
1655 | struct o2hb_disk_heartbeat_block *hb_block; | 1655 | struct o2hb_disk_heartbeat_block *hb_block; |
1656 | 1656 | ||
1657 | mlog_entry_void(); | ||
1658 | |||
1659 | ret = o2hb_read_slots(reg, reg->hr_blocks); | 1657 | ret = o2hb_read_slots(reg, reg->hr_blocks); |
1660 | if (ret) { | 1658 | if (ret) { |
1661 | mlog_errno(ret); | 1659 | mlog_errno(ret); |
@@ -1677,7 +1675,6 @@ static int o2hb_populate_slot_data(struct o2hb_region *reg) | |||
1677 | } | 1675 | } |
1678 | 1676 | ||
1679 | out: | 1677 | out: |
1680 | mlog_exit(ret); | ||
1681 | return ret; | 1678 | return ret; |
1682 | } | 1679 | } |
1683 | 1680 | ||
diff --git a/fs/ocfs2/cluster/masklog.c b/fs/ocfs2/cluster/masklog.c index 6c61771469af..07ac24fd9252 100644 --- a/fs/ocfs2/cluster/masklog.c +++ b/fs/ocfs2/cluster/masklog.c | |||
@@ -30,7 +30,7 @@ | |||
30 | 30 | ||
31 | struct mlog_bits mlog_and_bits = MLOG_BITS_RHS(MLOG_INITIAL_AND_MASK); | 31 | struct mlog_bits mlog_and_bits = MLOG_BITS_RHS(MLOG_INITIAL_AND_MASK); |
32 | EXPORT_SYMBOL_GPL(mlog_and_bits); | 32 | EXPORT_SYMBOL_GPL(mlog_and_bits); |
33 | struct mlog_bits mlog_not_bits = MLOG_BITS_RHS(MLOG_INITIAL_NOT_MASK); | 33 | struct mlog_bits mlog_not_bits = MLOG_BITS_RHS(0); |
34 | EXPORT_SYMBOL_GPL(mlog_not_bits); | 34 | EXPORT_SYMBOL_GPL(mlog_not_bits); |
35 | 35 | ||
36 | static ssize_t mlog_mask_show(u64 mask, char *buf) | 36 | static ssize_t mlog_mask_show(u64 mask, char *buf) |
@@ -80,8 +80,6 @@ struct mlog_attribute { | |||
80 | } | 80 | } |
81 | 81 | ||
82 | static struct mlog_attribute mlog_attrs[MLOG_MAX_BITS] = { | 82 | static struct mlog_attribute mlog_attrs[MLOG_MAX_BITS] = { |
83 | define_mask(ENTRY), | ||
84 | define_mask(EXIT), | ||
85 | define_mask(TCP), | 83 | define_mask(TCP), |
86 | define_mask(MSG), | 84 | define_mask(MSG), |
87 | define_mask(SOCKET), | 85 | define_mask(SOCKET), |
@@ -93,27 +91,11 @@ static struct mlog_attribute mlog_attrs[MLOG_MAX_BITS] = { | |||
93 | define_mask(DLM_THREAD), | 91 | define_mask(DLM_THREAD), |
94 | define_mask(DLM_MASTER), | 92 | define_mask(DLM_MASTER), |
95 | define_mask(DLM_RECOVERY), | 93 | define_mask(DLM_RECOVERY), |
96 | define_mask(AIO), | ||
97 | define_mask(JOURNAL), | ||
98 | define_mask(DISK_ALLOC), | ||
99 | define_mask(SUPER), | ||
100 | define_mask(FILE_IO), | ||
101 | define_mask(EXTENT_MAP), | ||
102 | define_mask(DLM_GLUE), | 94 | define_mask(DLM_GLUE), |
103 | define_mask(BH_IO), | ||
104 | define_mask(UPTODATE), | ||
105 | define_mask(NAMEI), | ||
106 | define_mask(INODE), | ||
107 | define_mask(VOTE), | 95 | define_mask(VOTE), |
108 | define_mask(DCACHE), | ||
109 | define_mask(CONN), | 96 | define_mask(CONN), |
110 | define_mask(QUORUM), | 97 | define_mask(QUORUM), |
111 | define_mask(EXPORT), | ||
112 | define_mask(XATTR), | ||
113 | define_mask(QUOTA), | ||
114 | define_mask(REFCOUNT), | ||
115 | define_mask(BASTS), | 98 | define_mask(BASTS), |
116 | define_mask(RESERVATIONS), | ||
117 | define_mask(CLUSTER), | 99 | define_mask(CLUSTER), |
118 | define_mask(ERROR), | 100 | define_mask(ERROR), |
119 | define_mask(NOTICE), | 101 | define_mask(NOTICE), |
diff --git a/fs/ocfs2/cluster/masklog.h b/fs/ocfs2/cluster/masklog.h index 34d6544357d9..baa2b9ef7eef 100644 --- a/fs/ocfs2/cluster/masklog.h +++ b/fs/ocfs2/cluster/masklog.h | |||
@@ -82,41 +82,23 @@ | |||
82 | 82 | ||
83 | /* bits that are frequently given and infrequently matched in the low word */ | 83 | /* bits that are frequently given and infrequently matched in the low word */ |
84 | /* NOTE: If you add a flag, you need to also update masklog.c! */ | 84 | /* NOTE: If you add a flag, you need to also update masklog.c! */ |
85 | #define ML_ENTRY 0x0000000000000001ULL /* func call entry */ | 85 | #define ML_TCP 0x0000000000000001ULL /* net cluster/tcp.c */ |
86 | #define ML_EXIT 0x0000000000000002ULL /* func call exit */ | 86 | #define ML_MSG 0x0000000000000002ULL /* net network messages */ |
87 | #define ML_TCP 0x0000000000000004ULL /* net cluster/tcp.c */ | 87 | #define ML_SOCKET 0x0000000000000004ULL /* net socket lifetime */ |
88 | #define ML_MSG 0x0000000000000008ULL /* net network messages */ | 88 | #define ML_HEARTBEAT 0x0000000000000008ULL /* hb all heartbeat tracking */ |
89 | #define ML_SOCKET 0x0000000000000010ULL /* net socket lifetime */ | 89 | #define ML_HB_BIO 0x0000000000000010ULL /* hb io tracing */ |
90 | #define ML_HEARTBEAT 0x0000000000000020ULL /* hb all heartbeat tracking */ | 90 | #define ML_DLMFS 0x0000000000000020ULL /* dlm user dlmfs */ |
91 | #define ML_HB_BIO 0x0000000000000040ULL /* hb io tracing */ | 91 | #define ML_DLM 0x0000000000000040ULL /* dlm general debugging */ |
92 | #define ML_DLMFS 0x0000000000000080ULL /* dlm user dlmfs */ | 92 | #define ML_DLM_DOMAIN 0x0000000000000080ULL /* dlm domain debugging */ |
93 | #define ML_DLM 0x0000000000000100ULL /* dlm general debugging */ | 93 | #define ML_DLM_THREAD 0x0000000000000100ULL /* dlm domain thread */ |
94 | #define ML_DLM_DOMAIN 0x0000000000000200ULL /* dlm domain debugging */ | 94 | #define ML_DLM_MASTER 0x0000000000000200ULL /* dlm master functions */ |
95 | #define ML_DLM_THREAD 0x0000000000000400ULL /* dlm domain thread */ | 95 | #define ML_DLM_RECOVERY 0x0000000000000400ULL /* dlm master functions */ |
96 | #define ML_DLM_MASTER 0x0000000000000800ULL /* dlm master functions */ | 96 | #define ML_DLM_GLUE 0x0000000000000800ULL /* ocfs2 dlm glue layer */ |
97 | #define ML_DLM_RECOVERY 0x0000000000001000ULL /* dlm master functions */ | 97 | #define ML_VOTE 0x0000000000001000ULL /* ocfs2 node messaging */ |
98 | #define ML_AIO 0x0000000000002000ULL /* ocfs2 aio read and write */ | 98 | #define ML_CONN 0x0000000000002000ULL /* net connection management */ |
99 | #define ML_JOURNAL 0x0000000000004000ULL /* ocfs2 journalling functions */ | 99 | #define ML_QUORUM 0x0000000000004000ULL /* net connection quorum */ |
100 | #define ML_DISK_ALLOC 0x0000000000008000ULL /* ocfs2 disk allocation */ | 100 | #define ML_BASTS 0x0000000000008000ULL /* dlmglue asts and basts */ |
101 | #define ML_SUPER 0x0000000000010000ULL /* ocfs2 mount / umount */ | 101 | #define ML_CLUSTER 0x0000000000010000ULL /* cluster stack */ |
102 | #define ML_FILE_IO 0x0000000000020000ULL /* ocfs2 file I/O */ | ||
103 | #define ML_EXTENT_MAP 0x0000000000040000ULL /* ocfs2 extent map caching */ | ||
104 | #define ML_DLM_GLUE 0x0000000000080000ULL /* ocfs2 dlm glue layer */ | ||
105 | #define ML_BH_IO 0x0000000000100000ULL /* ocfs2 buffer I/O */ | ||
106 | #define ML_UPTODATE 0x0000000000200000ULL /* ocfs2 caching sequence #'s */ | ||
107 | #define ML_NAMEI 0x0000000000400000ULL /* ocfs2 directory / namespace */ | ||
108 | #define ML_INODE 0x0000000000800000ULL /* ocfs2 inode manipulation */ | ||
109 | #define ML_VOTE 0x0000000001000000ULL /* ocfs2 node messaging */ | ||
110 | #define ML_DCACHE 0x0000000002000000ULL /* ocfs2 dcache operations */ | ||
111 | #define ML_CONN 0x0000000004000000ULL /* net connection management */ | ||
112 | #define ML_QUORUM 0x0000000008000000ULL /* net connection quorum */ | ||
113 | #define ML_EXPORT 0x0000000010000000ULL /* ocfs2 export operations */ | ||
114 | #define ML_XATTR 0x0000000020000000ULL /* ocfs2 extended attributes */ | ||
115 | #define ML_QUOTA 0x0000000040000000ULL /* ocfs2 quota operations */ | ||
116 | #define ML_REFCOUNT 0x0000000080000000ULL /* refcount tree operations */ | ||
117 | #define ML_BASTS 0x0000000100000000ULL /* dlmglue asts and basts */ | ||
118 | #define ML_RESERVATIONS 0x0000000200000000ULL /* ocfs2 alloc reservations */ | ||
119 | #define ML_CLUSTER 0x0000000400000000ULL /* cluster stack */ | ||
120 | 102 | ||
121 | /* bits that are infrequently given and frequently matched in the high word */ | 103 | /* bits that are infrequently given and frequently matched in the high word */ |
122 | #define ML_ERROR 0x1000000000000000ULL /* sent to KERN_ERR */ | 104 | #define ML_ERROR 0x1000000000000000ULL /* sent to KERN_ERR */ |
@@ -124,7 +106,6 @@ | |||
124 | #define ML_KTHREAD 0x4000000000000000ULL /* kernel thread activity */ | 106 | #define ML_KTHREAD 0x4000000000000000ULL /* kernel thread activity */ |
125 | 107 | ||
126 | #define MLOG_INITIAL_AND_MASK (ML_ERROR|ML_NOTICE) | 108 | #define MLOG_INITIAL_AND_MASK (ML_ERROR|ML_NOTICE) |
127 | #define MLOG_INITIAL_NOT_MASK (ML_ENTRY|ML_EXIT) | ||
128 | #ifndef MLOG_MASK_PREFIX | 109 | #ifndef MLOG_MASK_PREFIX |
129 | #define MLOG_MASK_PREFIX 0 | 110 | #define MLOG_MASK_PREFIX 0 |
130 | #endif | 111 | #endif |
@@ -222,58 +203,6 @@ extern struct mlog_bits mlog_and_bits, mlog_not_bits; | |||
222 | mlog(ML_ERROR, "status = %lld\n", (long long)_st); \ | 203 | mlog(ML_ERROR, "status = %lld\n", (long long)_st); \ |
223 | } while (0) | 204 | } while (0) |
224 | 205 | ||
225 | #if defined(CONFIG_OCFS2_DEBUG_MASKLOG) | ||
226 | #define mlog_entry(fmt, args...) do { \ | ||
227 | mlog(ML_ENTRY, "ENTRY:" fmt , ##args); \ | ||
228 | } while (0) | ||
229 | |||
230 | #define mlog_entry_void() do { \ | ||
231 | mlog(ML_ENTRY, "ENTRY:\n"); \ | ||
232 | } while (0) | ||
233 | |||
234 | /* | ||
235 | * We disable this for sparse. | ||
236 | */ | ||
237 | #if !defined(__CHECKER__) | ||
238 | #define mlog_exit(st) do { \ | ||
239 | if (__builtin_types_compatible_p(typeof(st), unsigned long)) \ | ||
240 | mlog(ML_EXIT, "EXIT: %lu\n", (unsigned long) (st)); \ | ||
241 | else if (__builtin_types_compatible_p(typeof(st), signed long)) \ | ||
242 | mlog(ML_EXIT, "EXIT: %ld\n", (signed long) (st)); \ | ||
243 | else if (__builtin_types_compatible_p(typeof(st), unsigned int) \ | ||
244 | || __builtin_types_compatible_p(typeof(st), unsigned short) \ | ||
245 | || __builtin_types_compatible_p(typeof(st), unsigned char)) \ | ||
246 | mlog(ML_EXIT, "EXIT: %u\n", (unsigned int) (st)); \ | ||
247 | else if (__builtin_types_compatible_p(typeof(st), signed int) \ | ||
248 | || __builtin_types_compatible_p(typeof(st), signed short) \ | ||
249 | || __builtin_types_compatible_p(typeof(st), signed char)) \ | ||
250 | mlog(ML_EXIT, "EXIT: %d\n", (signed int) (st)); \ | ||
251 | else if (__builtin_types_compatible_p(typeof(st), long long)) \ | ||
252 | mlog(ML_EXIT, "EXIT: %lld\n", (long long) (st)); \ | ||
253 | else \ | ||
254 | mlog(ML_EXIT, "EXIT: %llu\n", (unsigned long long) (st)); \ | ||
255 | } while (0) | ||
256 | #else | ||
257 | #define mlog_exit(st) do { \ | ||
258 | mlog(ML_EXIT, "EXIT: %lld\n", (long long) (st)); \ | ||
259 | } while (0) | ||
260 | #endif | ||
261 | |||
262 | #define mlog_exit_ptr(ptr) do { \ | ||
263 | mlog(ML_EXIT, "EXIT: %p\n", ptr); \ | ||
264 | } while (0) | ||
265 | |||
266 | #define mlog_exit_void() do { \ | ||
267 | mlog(ML_EXIT, "EXIT\n"); \ | ||
268 | } while (0) | ||
269 | #else | ||
270 | #define mlog_entry(...) do { } while (0) | ||
271 | #define mlog_entry_void(...) do { } while (0) | ||
272 | #define mlog_exit(...) do { } while (0) | ||
273 | #define mlog_exit_ptr(...) do { } while (0) | ||
274 | #define mlog_exit_void(...) do { } while (0) | ||
275 | #endif /* defined(CONFIG_OCFS2_DEBUG_MASKLOG) */ | ||
276 | |||
277 | #define mlog_bug_on_msg(cond, fmt, args...) do { \ | 206 | #define mlog_bug_on_msg(cond, fmt, args...) do { \ |
278 | if (cond) { \ | 207 | if (cond) { \ |
279 | mlog(ML_ERROR, "bug expression: " #cond "\n"); \ | 208 | mlog(ML_ERROR, "bug expression: " #cond "\n"); \ |
diff --git a/fs/ocfs2/cluster/tcp.c b/fs/ocfs2/cluster/tcp.c index 3b11cb1e38fc..ee04ff5ee603 100644 --- a/fs/ocfs2/cluster/tcp.c +++ b/fs/ocfs2/cluster/tcp.c | |||
@@ -210,10 +210,6 @@ static inline void o2net_set_func_stop_time(struct o2net_sock_container *sc) | |||
210 | sc->sc_tv_func_stop = ktime_get(); | 210 | sc->sc_tv_func_stop = ktime_get(); |
211 | } | 211 | } |
212 | 212 | ||
213 | static ktime_t o2net_get_func_run_time(struct o2net_sock_container *sc) | ||
214 | { | ||
215 | return ktime_sub(sc->sc_tv_func_stop, sc->sc_tv_func_start); | ||
216 | } | ||
217 | #else /* CONFIG_DEBUG_FS */ | 213 | #else /* CONFIG_DEBUG_FS */ |
218 | # define o2net_init_nst(a, b, c, d, e) | 214 | # define o2net_init_nst(a, b, c, d, e) |
219 | # define o2net_set_nst_sock_time(a) | 215 | # define o2net_set_nst_sock_time(a) |
@@ -227,10 +223,14 @@ static ktime_t o2net_get_func_run_time(struct o2net_sock_container *sc) | |||
227 | # define o2net_set_advance_stop_time(a) | 223 | # define o2net_set_advance_stop_time(a) |
228 | # define o2net_set_func_start_time(a) | 224 | # define o2net_set_func_start_time(a) |
229 | # define o2net_set_func_stop_time(a) | 225 | # define o2net_set_func_stop_time(a) |
230 | # define o2net_get_func_run_time(a) (ktime_t)0 | ||
231 | #endif /* CONFIG_DEBUG_FS */ | 226 | #endif /* CONFIG_DEBUG_FS */ |
232 | 227 | ||
233 | #ifdef CONFIG_OCFS2_FS_STATS | 228 | #ifdef CONFIG_OCFS2_FS_STATS |
229 | static ktime_t o2net_get_func_run_time(struct o2net_sock_container *sc) | ||
230 | { | ||
231 | return ktime_sub(sc->sc_tv_func_stop, sc->sc_tv_func_start); | ||
232 | } | ||
233 | |||
234 | static void o2net_update_send_stats(struct o2net_send_tracking *nst, | 234 | static void o2net_update_send_stats(struct o2net_send_tracking *nst, |
235 | struct o2net_sock_container *sc) | 235 | struct o2net_sock_container *sc) |
236 | { | 236 | { |
diff --git a/fs/ocfs2/dcache.c b/fs/ocfs2/dcache.c index 7eb90403fc8a..e5ba34818332 100644 --- a/fs/ocfs2/dcache.c +++ b/fs/ocfs2/dcache.c | |||
@@ -28,7 +28,6 @@ | |||
28 | #include <linux/slab.h> | 28 | #include <linux/slab.h> |
29 | #include <linux/namei.h> | 29 | #include <linux/namei.h> |
30 | 30 | ||
31 | #define MLOG_MASK_PREFIX ML_DCACHE | ||
32 | #include <cluster/masklog.h> | 31 | #include <cluster/masklog.h> |
33 | 32 | ||
34 | #include "ocfs2.h" | 33 | #include "ocfs2.h" |
@@ -39,6 +38,7 @@ | |||
39 | #include "file.h" | 38 | #include "file.h" |
40 | #include "inode.h" | 39 | #include "inode.h" |
41 | #include "super.h" | 40 | #include "super.h" |
41 | #include "ocfs2_trace.h" | ||
42 | 42 | ||
43 | void ocfs2_dentry_attach_gen(struct dentry *dentry) | 43 | void ocfs2_dentry_attach_gen(struct dentry *dentry) |
44 | { | 44 | { |
@@ -62,8 +62,8 @@ static int ocfs2_dentry_revalidate(struct dentry *dentry, | |||
62 | inode = dentry->d_inode; | 62 | inode = dentry->d_inode; |
63 | osb = OCFS2_SB(dentry->d_sb); | 63 | osb = OCFS2_SB(dentry->d_sb); |
64 | 64 | ||
65 | mlog_entry("(0x%p, '%.*s')\n", dentry, | 65 | trace_ocfs2_dentry_revalidate(dentry, dentry->d_name.len, |
66 | dentry->d_name.len, dentry->d_name.name); | 66 | dentry->d_name.name); |
67 | 67 | ||
68 | /* For a negative dentry - | 68 | /* For a negative dentry - |
69 | * check the generation number of the parent and compare with the | 69 | * check the generation number of the parent and compare with the |
@@ -73,9 +73,10 @@ static int ocfs2_dentry_revalidate(struct dentry *dentry, | |||
73 | unsigned long gen = (unsigned long) dentry->d_fsdata; | 73 | unsigned long gen = (unsigned long) dentry->d_fsdata; |
74 | unsigned long pgen = | 74 | unsigned long pgen = |
75 | OCFS2_I(dentry->d_parent->d_inode)->ip_dir_lock_gen; | 75 | OCFS2_I(dentry->d_parent->d_inode)->ip_dir_lock_gen; |
76 | mlog(0, "negative dentry: %.*s parent gen: %lu " | 76 | |
77 | "dentry gen: %lu\n", | 77 | trace_ocfs2_dentry_revalidate_negative(dentry->d_name.len, |
78 | dentry->d_name.len, dentry->d_name.name, pgen, gen); | 78 | dentry->d_name.name, |
79 | pgen, gen); | ||
79 | if (gen != pgen) | 80 | if (gen != pgen) |
80 | goto bail; | 81 | goto bail; |
81 | goto valid; | 82 | goto valid; |
@@ -90,8 +91,8 @@ static int ocfs2_dentry_revalidate(struct dentry *dentry, | |||
90 | /* did we or someone else delete this inode? */ | 91 | /* did we or someone else delete this inode? */ |
91 | if (OCFS2_I(inode)->ip_flags & OCFS2_INODE_DELETED) { | 92 | if (OCFS2_I(inode)->ip_flags & OCFS2_INODE_DELETED) { |
92 | spin_unlock(&OCFS2_I(inode)->ip_lock); | 93 | spin_unlock(&OCFS2_I(inode)->ip_lock); |
93 | mlog(0, "inode (%llu) deleted, returning false\n", | 94 | trace_ocfs2_dentry_revalidate_delete( |
94 | (unsigned long long)OCFS2_I(inode)->ip_blkno); | 95 | (unsigned long long)OCFS2_I(inode)->ip_blkno); |
95 | goto bail; | 96 | goto bail; |
96 | } | 97 | } |
97 | spin_unlock(&OCFS2_I(inode)->ip_lock); | 98 | spin_unlock(&OCFS2_I(inode)->ip_lock); |
@@ -101,10 +102,9 @@ static int ocfs2_dentry_revalidate(struct dentry *dentry, | |||
101 | * inode nlink hits zero, it never goes back. | 102 | * inode nlink hits zero, it never goes back. |
102 | */ | 103 | */ |
103 | if (inode->i_nlink == 0) { | 104 | if (inode->i_nlink == 0) { |
104 | mlog(0, "Inode %llu orphaned, returning false " | 105 | trace_ocfs2_dentry_revalidate_orphaned( |
105 | "dir = %d\n", | 106 | (unsigned long long)OCFS2_I(inode)->ip_blkno, |
106 | (unsigned long long)OCFS2_I(inode)->ip_blkno, | 107 | S_ISDIR(inode->i_mode)); |
107 | S_ISDIR(inode->i_mode)); | ||
108 | goto bail; | 108 | goto bail; |
109 | } | 109 | } |
110 | 110 | ||
@@ -113,9 +113,8 @@ static int ocfs2_dentry_revalidate(struct dentry *dentry, | |||
113 | * redo it. | 113 | * redo it. |
114 | */ | 114 | */ |
115 | if (!dentry->d_fsdata) { | 115 | if (!dentry->d_fsdata) { |
116 | mlog(0, "Inode %llu doesn't have dentry lock, " | 116 | trace_ocfs2_dentry_revalidate_nofsdata( |
117 | "returning false\n", | 117 | (unsigned long long)OCFS2_I(inode)->ip_blkno); |
118 | (unsigned long long)OCFS2_I(inode)->ip_blkno); | ||
119 | goto bail; | 118 | goto bail; |
120 | } | 119 | } |
121 | 120 | ||
@@ -123,8 +122,7 @@ valid: | |||
123 | ret = 1; | 122 | ret = 1; |
124 | 123 | ||
125 | bail: | 124 | bail: |
126 | mlog_exit(ret); | 125 | trace_ocfs2_dentry_revalidate_ret(ret); |
127 | |||
128 | return ret; | 126 | return ret; |
129 | } | 127 | } |
130 | 128 | ||
@@ -181,8 +179,8 @@ struct dentry *ocfs2_find_local_alias(struct inode *inode, | |||
181 | 179 | ||
182 | spin_lock(&dentry->d_lock); | 180 | spin_lock(&dentry->d_lock); |
183 | if (ocfs2_match_dentry(dentry, parent_blkno, skip_unhashed)) { | 181 | if (ocfs2_match_dentry(dentry, parent_blkno, skip_unhashed)) { |
184 | mlog(0, "dentry found: %.*s\n", | 182 | trace_ocfs2_find_local_alias(dentry->d_name.len, |
185 | dentry->d_name.len, dentry->d_name.name); | 183 | dentry->d_name.name); |
186 | 184 | ||
187 | dget_dlock(dentry); | 185 | dget_dlock(dentry); |
188 | spin_unlock(&dentry->d_lock); | 186 | spin_unlock(&dentry->d_lock); |
@@ -240,9 +238,8 @@ int ocfs2_dentry_attach_lock(struct dentry *dentry, | |||
240 | struct dentry *alias; | 238 | struct dentry *alias; |
241 | struct ocfs2_dentry_lock *dl = dentry->d_fsdata; | 239 | struct ocfs2_dentry_lock *dl = dentry->d_fsdata; |
242 | 240 | ||
243 | mlog(0, "Attach \"%.*s\", parent %llu, fsdata: %p\n", | 241 | trace_ocfs2_dentry_attach_lock(dentry->d_name.len, dentry->d_name.name, |
244 | dentry->d_name.len, dentry->d_name.name, | 242 | (unsigned long long)parent_blkno, dl); |
245 | (unsigned long long)parent_blkno, dl); | ||
246 | 243 | ||
247 | /* | 244 | /* |
248 | * Negative dentry. We ignore these for now. | 245 | * Negative dentry. We ignore these for now. |
@@ -292,7 +289,9 @@ int ocfs2_dentry_attach_lock(struct dentry *dentry, | |||
292 | (unsigned long long)parent_blkno, | 289 | (unsigned long long)parent_blkno, |
293 | (unsigned long long)dl->dl_parent_blkno); | 290 | (unsigned long long)dl->dl_parent_blkno); |
294 | 291 | ||
295 | mlog(0, "Found: %s\n", dl->dl_lockres.l_name); | 292 | trace_ocfs2_dentry_attach_lock_found(dl->dl_lockres.l_name, |
293 | (unsigned long long)parent_blkno, | ||
294 | (unsigned long long)OCFS2_I(inode)->ip_blkno); | ||
296 | 295 | ||
297 | goto out_attach; | 296 | goto out_attach; |
298 | } | 297 | } |
diff --git a/fs/ocfs2/dir.c b/fs/ocfs2/dir.c index f97b6f1c61dd..9fe5b8fd658f 100644 --- a/fs/ocfs2/dir.c +++ b/fs/ocfs2/dir.c | |||
@@ -43,7 +43,6 @@ | |||
43 | #include <linux/quotaops.h> | 43 | #include <linux/quotaops.h> |
44 | #include <linux/sort.h> | 44 | #include <linux/sort.h> |
45 | 45 | ||
46 | #define MLOG_MASK_PREFIX ML_NAMEI | ||
47 | #include <cluster/masklog.h> | 46 | #include <cluster/masklog.h> |
48 | 47 | ||
49 | #include "ocfs2.h" | 48 | #include "ocfs2.h" |
@@ -61,6 +60,7 @@ | |||
61 | #include "super.h" | 60 | #include "super.h" |
62 | #include "sysfile.h" | 61 | #include "sysfile.h" |
63 | #include "uptodate.h" | 62 | #include "uptodate.h" |
63 | #include "ocfs2_trace.h" | ||
64 | 64 | ||
65 | #include "buffer_head_io.h" | 65 | #include "buffer_head_io.h" |
66 | 66 | ||
@@ -322,21 +322,23 @@ static int ocfs2_check_dir_entry(struct inode * dir, | |||
322 | const char *error_msg = NULL; | 322 | const char *error_msg = NULL; |
323 | const int rlen = le16_to_cpu(de->rec_len); | 323 | const int rlen = le16_to_cpu(de->rec_len); |
324 | 324 | ||
325 | if (rlen < OCFS2_DIR_REC_LEN(1)) | 325 | if (unlikely(rlen < OCFS2_DIR_REC_LEN(1))) |
326 | error_msg = "rec_len is smaller than minimal"; | 326 | error_msg = "rec_len is smaller than minimal"; |
327 | else if (rlen % 4 != 0) | 327 | else if (unlikely(rlen % 4 != 0)) |
328 | error_msg = "rec_len % 4 != 0"; | 328 | error_msg = "rec_len % 4 != 0"; |
329 | else if (rlen < OCFS2_DIR_REC_LEN(de->name_len)) | 329 | else if (unlikely(rlen < OCFS2_DIR_REC_LEN(de->name_len))) |
330 | error_msg = "rec_len is too small for name_len"; | 330 | error_msg = "rec_len is too small for name_len"; |
331 | else if (((char *) de - bh->b_data) + rlen > dir->i_sb->s_blocksize) | 331 | else if (unlikely( |
332 | ((char *) de - bh->b_data) + rlen > dir->i_sb->s_blocksize)) | ||
332 | error_msg = "directory entry across blocks"; | 333 | error_msg = "directory entry across blocks"; |
333 | 334 | ||
334 | if (error_msg != NULL) | 335 | if (unlikely(error_msg != NULL)) |
335 | mlog(ML_ERROR, "bad entry in directory #%llu: %s - " | 336 | mlog(ML_ERROR, "bad entry in directory #%llu: %s - " |
336 | "offset=%lu, inode=%llu, rec_len=%d, name_len=%d\n", | 337 | "offset=%lu, inode=%llu, rec_len=%d, name_len=%d\n", |
337 | (unsigned long long)OCFS2_I(dir)->ip_blkno, error_msg, | 338 | (unsigned long long)OCFS2_I(dir)->ip_blkno, error_msg, |
338 | offset, (unsigned long long)le64_to_cpu(de->inode), rlen, | 339 | offset, (unsigned long long)le64_to_cpu(de->inode), rlen, |
339 | de->name_len); | 340 | de->name_len); |
341 | |||
340 | return error_msg == NULL ? 1 : 0; | 342 | return error_msg == NULL ? 1 : 0; |
341 | } | 343 | } |
342 | 344 | ||
@@ -367,8 +369,6 @@ static inline int ocfs2_search_dirblock(struct buffer_head *bh, | |||
367 | int de_len; | 369 | int de_len; |
368 | int ret = 0; | 370 | int ret = 0; |
369 | 371 | ||
370 | mlog_entry_void(); | ||
371 | |||
372 | de_buf = first_de; | 372 | de_buf = first_de; |
373 | dlimit = de_buf + bytes; | 373 | dlimit = de_buf + bytes; |
374 | 374 | ||
@@ -402,7 +402,7 @@ static inline int ocfs2_search_dirblock(struct buffer_head *bh, | |||
402 | } | 402 | } |
403 | 403 | ||
404 | bail: | 404 | bail: |
405 | mlog_exit(ret); | 405 | trace_ocfs2_search_dirblock(ret); |
406 | return ret; | 406 | return ret; |
407 | } | 407 | } |
408 | 408 | ||
@@ -447,8 +447,7 @@ static int ocfs2_validate_dir_block(struct super_block *sb, | |||
447 | * We don't validate dirents here, that's handled | 447 | * We don't validate dirents here, that's handled |
448 | * in-place when the code walks them. | 448 | * in-place when the code walks them. |
449 | */ | 449 | */ |
450 | mlog(0, "Validating dirblock %llu\n", | 450 | trace_ocfs2_validate_dir_block((unsigned long long)bh->b_blocknr); |
451 | (unsigned long long)bh->b_blocknr); | ||
452 | 451 | ||
453 | BUG_ON(!buffer_uptodate(bh)); | 452 | BUG_ON(!buffer_uptodate(bh)); |
454 | 453 | ||
@@ -706,8 +705,6 @@ static struct buffer_head *ocfs2_find_entry_el(const char *name, int namelen, | |||
706 | int num = 0; | 705 | int num = 0; |
707 | int nblocks, i, err; | 706 | int nblocks, i, err; |
708 | 707 | ||
709 | mlog_entry_void(); | ||
710 | |||
711 | sb = dir->i_sb; | 708 | sb = dir->i_sb; |
712 | 709 | ||
713 | nblocks = i_size_read(dir) >> sb->s_blocksize_bits; | 710 | nblocks = i_size_read(dir) >> sb->s_blocksize_bits; |
@@ -788,7 +785,7 @@ cleanup_and_exit: | |||
788 | for (; ra_ptr < ra_max; ra_ptr++) | 785 | for (; ra_ptr < ra_max; ra_ptr++) |
789 | brelse(bh_use[ra_ptr]); | 786 | brelse(bh_use[ra_ptr]); |
790 | 787 | ||
791 | mlog_exit_ptr(ret); | 788 | trace_ocfs2_find_entry_el(ret); |
792 | return ret; | 789 | return ret; |
793 | } | 790 | } |
794 | 791 | ||
@@ -950,11 +947,9 @@ static int ocfs2_dx_dir_search(const char *name, int namelen, | |||
950 | goto out; | 947 | goto out; |
951 | } | 948 | } |
952 | 949 | ||
953 | mlog(0, "Dir %llu: name: \"%.*s\", lookup of hash: %u.0x%x " | 950 | trace_ocfs2_dx_dir_search((unsigned long long)OCFS2_I(dir)->ip_blkno, |
954 | "returns: %llu\n", | 951 | namelen, name, hinfo->major_hash, |
955 | (unsigned long long)OCFS2_I(dir)->ip_blkno, | 952 | hinfo->minor_hash, (unsigned long long)phys); |
956 | namelen, name, hinfo->major_hash, hinfo->minor_hash, | ||
957 | (unsigned long long)phys); | ||
958 | 953 | ||
959 | ret = ocfs2_read_dx_leaf(dir, phys, &dx_leaf_bh); | 954 | ret = ocfs2_read_dx_leaf(dir, phys, &dx_leaf_bh); |
960 | if (ret) { | 955 | if (ret) { |
@@ -964,9 +959,9 @@ static int ocfs2_dx_dir_search(const char *name, int namelen, | |||
964 | 959 | ||
965 | dx_leaf = (struct ocfs2_dx_leaf *) dx_leaf_bh->b_data; | 960 | dx_leaf = (struct ocfs2_dx_leaf *) dx_leaf_bh->b_data; |
966 | 961 | ||
967 | mlog(0, "leaf info: num_used: %d, count: %d\n", | 962 | trace_ocfs2_dx_dir_search_leaf_info( |
968 | le16_to_cpu(dx_leaf->dl_list.de_num_used), | 963 | le16_to_cpu(dx_leaf->dl_list.de_num_used), |
969 | le16_to_cpu(dx_leaf->dl_list.de_count)); | 964 | le16_to_cpu(dx_leaf->dl_list.de_count)); |
970 | 965 | ||
971 | entry_list = &dx_leaf->dl_list; | 966 | entry_list = &dx_leaf->dl_list; |
972 | 967 | ||
@@ -1166,8 +1161,6 @@ static int __ocfs2_delete_entry(handle_t *handle, struct inode *dir, | |||
1166 | int i, status = -ENOENT; | 1161 | int i, status = -ENOENT; |
1167 | ocfs2_journal_access_func access = ocfs2_journal_access_db; | 1162 | ocfs2_journal_access_func access = ocfs2_journal_access_db; |
1168 | 1163 | ||
1169 | mlog_entry("(0x%p, 0x%p, 0x%p, 0x%p)\n", handle, dir, de_del, bh); | ||
1170 | |||
1171 | if (OCFS2_I(dir)->ip_dyn_features & OCFS2_INLINE_DATA_FL) | 1164 | if (OCFS2_I(dir)->ip_dyn_features & OCFS2_INLINE_DATA_FL) |
1172 | access = ocfs2_journal_access_di; | 1165 | access = ocfs2_journal_access_di; |
1173 | 1166 | ||
@@ -1202,7 +1195,6 @@ static int __ocfs2_delete_entry(handle_t *handle, struct inode *dir, | |||
1202 | de = (struct ocfs2_dir_entry *)((char *)de + le16_to_cpu(de->rec_len)); | 1195 | de = (struct ocfs2_dir_entry *)((char *)de + le16_to_cpu(de->rec_len)); |
1203 | } | 1196 | } |
1204 | bail: | 1197 | bail: |
1205 | mlog_exit(status); | ||
1206 | return status; | 1198 | return status; |
1207 | } | 1199 | } |
1208 | 1200 | ||
@@ -1348,8 +1340,8 @@ static int ocfs2_delete_entry_dx(handle_t *handle, struct inode *dir, | |||
1348 | } | 1340 | } |
1349 | } | 1341 | } |
1350 | 1342 | ||
1351 | mlog(0, "Dir %llu: delete entry at index: %d\n", | 1343 | trace_ocfs2_delete_entry_dx((unsigned long long)OCFS2_I(dir)->ip_blkno, |
1352 | (unsigned long long)OCFS2_I(dir)->ip_blkno, index); | 1344 | index); |
1353 | 1345 | ||
1354 | ret = __ocfs2_delete_entry(handle, dir, lookup->dl_entry, | 1346 | ret = __ocfs2_delete_entry(handle, dir, lookup->dl_entry, |
1355 | leaf_bh, leaf_bh->b_data, leaf_bh->b_size); | 1347 | leaf_bh, leaf_bh->b_data, leaf_bh->b_size); |
@@ -1632,8 +1624,6 @@ int __ocfs2_add_entry(handle_t *handle, | |||
1632 | struct buffer_head *insert_bh = lookup->dl_leaf_bh; | 1624 | struct buffer_head *insert_bh = lookup->dl_leaf_bh; |
1633 | char *data_start = insert_bh->b_data; | 1625 | char *data_start = insert_bh->b_data; |
1634 | 1626 | ||
1635 | mlog_entry_void(); | ||
1636 | |||
1637 | if (!namelen) | 1627 | if (!namelen) |
1638 | return -EINVAL; | 1628 | return -EINVAL; |
1639 | 1629 | ||
@@ -1765,8 +1755,9 @@ int __ocfs2_add_entry(handle_t *handle, | |||
1765 | * from ever getting here. */ | 1755 | * from ever getting here. */ |
1766 | retval = -ENOSPC; | 1756 | retval = -ENOSPC; |
1767 | bail: | 1757 | bail: |
1758 | if (retval) | ||
1759 | mlog_errno(retval); | ||
1768 | 1760 | ||
1769 | mlog_exit(retval); | ||
1770 | return retval; | 1761 | return retval; |
1771 | } | 1762 | } |
1772 | 1763 | ||
@@ -2028,8 +2019,7 @@ int ocfs2_readdir(struct file * filp, void * dirent, filldir_t filldir) | |||
2028 | struct inode *inode = filp->f_path.dentry->d_inode; | 2019 | struct inode *inode = filp->f_path.dentry->d_inode; |
2029 | int lock_level = 0; | 2020 | int lock_level = 0; |
2030 | 2021 | ||
2031 | mlog_entry("dirino=%llu\n", | 2022 | trace_ocfs2_readdir((unsigned long long)OCFS2_I(inode)->ip_blkno); |
2032 | (unsigned long long)OCFS2_I(inode)->ip_blkno); | ||
2033 | 2023 | ||
2034 | error = ocfs2_inode_lock_atime(inode, filp->f_vfsmnt, &lock_level); | 2024 | error = ocfs2_inode_lock_atime(inode, filp->f_vfsmnt, &lock_level); |
2035 | if (lock_level && error >= 0) { | 2025 | if (lock_level && error >= 0) { |
@@ -2051,9 +2041,10 @@ int ocfs2_readdir(struct file * filp, void * dirent, filldir_t filldir) | |||
2051 | dirent, filldir, NULL); | 2041 | dirent, filldir, NULL); |
2052 | 2042 | ||
2053 | ocfs2_inode_unlock(inode, lock_level); | 2043 | ocfs2_inode_unlock(inode, lock_level); |
2044 | if (error) | ||
2045 | mlog_errno(error); | ||
2054 | 2046 | ||
2055 | bail_nolock: | 2047 | bail_nolock: |
2056 | mlog_exit(error); | ||
2057 | 2048 | ||
2058 | return error; | 2049 | return error; |
2059 | } | 2050 | } |
@@ -2069,8 +2060,8 @@ int ocfs2_find_files_on_disk(const char *name, | |||
2069 | { | 2060 | { |
2070 | int status = -ENOENT; | 2061 | int status = -ENOENT; |
2071 | 2062 | ||
2072 | mlog(0, "name=%.*s, blkno=%p, inode=%llu\n", namelen, name, blkno, | 2063 | trace_ocfs2_find_files_on_disk(namelen, name, blkno, |
2073 | (unsigned long long)OCFS2_I(inode)->ip_blkno); | 2064 | (unsigned long long)OCFS2_I(inode)->ip_blkno); |
2074 | 2065 | ||
2075 | status = ocfs2_find_entry(name, namelen, inode, lookup); | 2066 | status = ocfs2_find_entry(name, namelen, inode, lookup); |
2076 | if (status) | 2067 | if (status) |
@@ -2114,8 +2105,8 @@ int ocfs2_check_dir_for_entry(struct inode *dir, | |||
2114 | int ret; | 2105 | int ret; |
2115 | struct ocfs2_dir_lookup_result lookup = { NULL, }; | 2106 | struct ocfs2_dir_lookup_result lookup = { NULL, }; |
2116 | 2107 | ||
2117 | mlog_entry("dir %llu, name '%.*s'\n", | 2108 | trace_ocfs2_check_dir_for_entry( |
2118 | (unsigned long long)OCFS2_I(dir)->ip_blkno, namelen, name); | 2109 | (unsigned long long)OCFS2_I(dir)->ip_blkno, namelen, name); |
2119 | 2110 | ||
2120 | ret = -EEXIST; | 2111 | ret = -EEXIST; |
2121 | if (ocfs2_find_entry(name, namelen, dir, &lookup) == 0) | 2112 | if (ocfs2_find_entry(name, namelen, dir, &lookup) == 0) |
@@ -2125,7 +2116,8 @@ int ocfs2_check_dir_for_entry(struct inode *dir, | |||
2125 | bail: | 2116 | bail: |
2126 | ocfs2_free_dir_lookup_result(&lookup); | 2117 | ocfs2_free_dir_lookup_result(&lookup); |
2127 | 2118 | ||
2128 | mlog_exit(ret); | 2119 | if (ret) |
2120 | mlog_errno(ret); | ||
2129 | return ret; | 2121 | return ret; |
2130 | } | 2122 | } |
2131 | 2123 | ||
@@ -2324,8 +2316,6 @@ static int ocfs2_fill_new_dir_el(struct ocfs2_super *osb, | |||
2324 | struct buffer_head *new_bh = NULL; | 2316 | struct buffer_head *new_bh = NULL; |
2325 | struct ocfs2_dir_entry *de; | 2317 | struct ocfs2_dir_entry *de; |
2326 | 2318 | ||
2327 | mlog_entry_void(); | ||
2328 | |||
2329 | if (ocfs2_new_dir_wants_trailer(inode)) | 2319 | if (ocfs2_new_dir_wants_trailer(inode)) |
2330 | size = ocfs2_dir_trailer_blk_off(parent->i_sb); | 2320 | size = ocfs2_dir_trailer_blk_off(parent->i_sb); |
2331 | 2321 | ||
@@ -2380,7 +2370,6 @@ static int ocfs2_fill_new_dir_el(struct ocfs2_super *osb, | |||
2380 | bail: | 2370 | bail: |
2381 | brelse(new_bh); | 2371 | brelse(new_bh); |
2382 | 2372 | ||
2383 | mlog_exit(status); | ||
2384 | return status; | 2373 | return status; |
2385 | } | 2374 | } |
2386 | 2375 | ||
@@ -2409,9 +2398,9 @@ static int ocfs2_dx_dir_attach_index(struct ocfs2_super *osb, | |||
2409 | goto out; | 2398 | goto out; |
2410 | } | 2399 | } |
2411 | 2400 | ||
2412 | mlog(0, "Dir %llu, attach new index block: %llu\n", | 2401 | trace_ocfs2_dx_dir_attach_index( |
2413 | (unsigned long long)OCFS2_I(dir)->ip_blkno, | 2402 | (unsigned long long)OCFS2_I(dir)->ip_blkno, |
2414 | (unsigned long long)dr_blkno); | 2403 | (unsigned long long)dr_blkno); |
2415 | 2404 | ||
2416 | dx_root_bh = sb_getblk(osb->sb, dr_blkno); | 2405 | dx_root_bh = sb_getblk(osb->sb, dr_blkno); |
2417 | if (dx_root_bh == NULL) { | 2406 | if (dx_root_bh == NULL) { |
@@ -2511,11 +2500,10 @@ static int ocfs2_dx_dir_format_cluster(struct ocfs2_super *osb, | |||
2511 | dx_leaf->dl_list.de_count = | 2500 | dx_leaf->dl_list.de_count = |
2512 | cpu_to_le16(ocfs2_dx_entries_per_leaf(osb->sb)); | 2501 | cpu_to_le16(ocfs2_dx_entries_per_leaf(osb->sb)); |
2513 | 2502 | ||
2514 | mlog(0, | 2503 | trace_ocfs2_dx_dir_format_cluster( |
2515 | "Dir %llu, format dx_leaf: %llu, entry count: %u\n", | 2504 | (unsigned long long)OCFS2_I(dir)->ip_blkno, |
2516 | (unsigned long long)OCFS2_I(dir)->ip_blkno, | 2505 | (unsigned long long)bh->b_blocknr, |
2517 | (unsigned long long)bh->b_blocknr, | 2506 | le16_to_cpu(dx_leaf->dl_list.de_count)); |
2518 | le16_to_cpu(dx_leaf->dl_list.de_count)); | ||
2519 | 2507 | ||
2520 | ocfs2_journal_dirty(handle, bh); | 2508 | ocfs2_journal_dirty(handle, bh); |
2521 | } | 2509 | } |
@@ -2759,12 +2747,11 @@ static void ocfs2_dx_dir_index_root_block(struct inode *dir, | |||
2759 | 2747 | ||
2760 | ocfs2_dx_dir_name_hash(dir, de->name, de->name_len, &hinfo); | 2748 | ocfs2_dx_dir_name_hash(dir, de->name, de->name_len, &hinfo); |
2761 | 2749 | ||
2762 | mlog(0, | 2750 | trace_ocfs2_dx_dir_index_root_block( |
2763 | "dir: %llu, major: 0x%x minor: 0x%x, index: %u, name: %.*s\n", | 2751 | (unsigned long long)dir->i_ino, |
2764 | (unsigned long long)dir->i_ino, hinfo.major_hash, | 2752 | hinfo.major_hash, hinfo.minor_hash, |
2765 | hinfo.minor_hash, | 2753 | de->name_len, de->name, |
2766 | le16_to_cpu(dx_root->dr_entries.de_num_used), | 2754 | le16_to_cpu(dx_root->dr_entries.de_num_used)); |
2767 | de->name_len, de->name); | ||
2768 | 2755 | ||
2769 | ocfs2_dx_entry_list_insert(&dx_root->dr_entries, &hinfo, | 2756 | ocfs2_dx_entry_list_insert(&dx_root->dr_entries, &hinfo, |
2770 | dirent_blk); | 2757 | dirent_blk); |
@@ -3235,7 +3222,6 @@ static int ocfs2_do_extend_dir(struct super_block *sb, | |||
3235 | bail: | 3222 | bail: |
3236 | if (did_quota && status < 0) | 3223 | if (did_quota && status < 0) |
3237 | dquot_free_space_nodirty(dir, ocfs2_clusters_to_bytes(sb, 1)); | 3224 | dquot_free_space_nodirty(dir, ocfs2_clusters_to_bytes(sb, 1)); |
3238 | mlog_exit(status); | ||
3239 | return status; | 3225 | return status; |
3240 | } | 3226 | } |
3241 | 3227 | ||
@@ -3270,8 +3256,6 @@ static int ocfs2_extend_dir(struct ocfs2_super *osb, | |||
3270 | struct ocfs2_extent_tree et; | 3256 | struct ocfs2_extent_tree et; |
3271 | struct buffer_head *dx_root_bh = lookup->dl_dx_root_bh; | 3257 | struct buffer_head *dx_root_bh = lookup->dl_dx_root_bh; |
3272 | 3258 | ||
3273 | mlog_entry_void(); | ||
3274 | |||
3275 | if (OCFS2_I(dir)->ip_dyn_features & OCFS2_INLINE_DATA_FL) { | 3259 | if (OCFS2_I(dir)->ip_dyn_features & OCFS2_INLINE_DATA_FL) { |
3276 | /* | 3260 | /* |
3277 | * This would be a code error as an inline directory should | 3261 | * This would be a code error as an inline directory should |
@@ -3320,8 +3304,8 @@ static int ocfs2_extend_dir(struct ocfs2_super *osb, | |||
3320 | down_write(&OCFS2_I(dir)->ip_alloc_sem); | 3304 | down_write(&OCFS2_I(dir)->ip_alloc_sem); |
3321 | drop_alloc_sem = 1; | 3305 | drop_alloc_sem = 1; |
3322 | dir_i_size = i_size_read(dir); | 3306 | dir_i_size = i_size_read(dir); |
3323 | mlog(0, "extending dir %llu (i_size = %lld)\n", | 3307 | trace_ocfs2_extend_dir((unsigned long long)OCFS2_I(dir)->ip_blkno, |
3324 | (unsigned long long)OCFS2_I(dir)->ip_blkno, dir_i_size); | 3308 | dir_i_size); |
3325 | 3309 | ||
3326 | /* dir->i_size is always block aligned. */ | 3310 | /* dir->i_size is always block aligned. */ |
3327 | spin_lock(&OCFS2_I(dir)->ip_lock); | 3311 | spin_lock(&OCFS2_I(dir)->ip_lock); |
@@ -3436,7 +3420,6 @@ bail: | |||
3436 | 3420 | ||
3437 | brelse(new_bh); | 3421 | brelse(new_bh); |
3438 | 3422 | ||
3439 | mlog_exit(status); | ||
3440 | return status; | 3423 | return status; |
3441 | } | 3424 | } |
3442 | 3425 | ||
@@ -3583,8 +3566,9 @@ next: | |||
3583 | status = 0; | 3566 | status = 0; |
3584 | bail: | 3567 | bail: |
3585 | brelse(bh); | 3568 | brelse(bh); |
3569 | if (status) | ||
3570 | mlog_errno(status); | ||
3586 | 3571 | ||
3587 | mlog_exit(status); | ||
3588 | return status; | 3572 | return status; |
3589 | } | 3573 | } |
3590 | 3574 | ||
@@ -3815,9 +3799,9 @@ static int ocfs2_dx_dir_rebalance(struct ocfs2_super *osb, struct inode *dir, | |||
3815 | struct ocfs2_dx_root_block *dx_root; | 3799 | struct ocfs2_dx_root_block *dx_root; |
3816 | struct ocfs2_dx_leaf *tmp_dx_leaf = NULL; | 3800 | struct ocfs2_dx_leaf *tmp_dx_leaf = NULL; |
3817 | 3801 | ||
3818 | mlog(0, "DX Dir: %llu, rebalance leaf leaf_blkno: %llu insert: %u\n", | 3802 | trace_ocfs2_dx_dir_rebalance((unsigned long long)OCFS2_I(dir)->ip_blkno, |
3819 | (unsigned long long)OCFS2_I(dir)->ip_blkno, | 3803 | (unsigned long long)leaf_blkno, |
3820 | (unsigned long long)leaf_blkno, insert_hash); | 3804 | insert_hash); |
3821 | 3805 | ||
3822 | ocfs2_init_dx_root_extent_tree(&et, INODE_CACHE(dir), dx_root_bh); | 3806 | ocfs2_init_dx_root_extent_tree(&et, INODE_CACHE(dir), dx_root_bh); |
3823 | 3807 | ||
@@ -3897,8 +3881,7 @@ static int ocfs2_dx_dir_rebalance(struct ocfs2_super *osb, struct inode *dir, | |||
3897 | goto out_commit; | 3881 | goto out_commit; |
3898 | } | 3882 | } |
3899 | 3883 | ||
3900 | mlog(0, "Split leaf (%u) at %u, insert major hash is %u\n", | 3884 | trace_ocfs2_dx_dir_rebalance_split(leaf_cpos, split_hash, insert_hash); |
3901 | leaf_cpos, split_hash, insert_hash); | ||
3902 | 3885 | ||
3903 | /* | 3886 | /* |
3904 | * We have to carefully order operations here. There are items | 3887 | * We have to carefully order operations here. There are items |
@@ -4355,8 +4338,8 @@ int ocfs2_prepare_dir_for_insert(struct ocfs2_super *osb, | |||
4355 | unsigned int blocks_wanted = 1; | 4338 | unsigned int blocks_wanted = 1; |
4356 | struct buffer_head *bh = NULL; | 4339 | struct buffer_head *bh = NULL; |
4357 | 4340 | ||
4358 | mlog(0, "getting ready to insert namelen %d into dir %llu\n", | 4341 | trace_ocfs2_prepare_dir_for_insert( |
4359 | namelen, (unsigned long long)OCFS2_I(dir)->ip_blkno); | 4342 | (unsigned long long)OCFS2_I(dir)->ip_blkno, namelen); |
4360 | 4343 | ||
4361 | if (!namelen) { | 4344 | if (!namelen) { |
4362 | ret = -EINVAL; | 4345 | ret = -EINVAL; |
diff --git a/fs/ocfs2/dlm/dlmconvert.c b/fs/ocfs2/dlm/dlmconvert.c index 9f30491e5e88..29a886d1e82c 100644 --- a/fs/ocfs2/dlm/dlmconvert.c +++ b/fs/ocfs2/dlm/dlmconvert.c | |||
@@ -128,8 +128,8 @@ static enum dlm_status __dlmconvert_master(struct dlm_ctxt *dlm, | |||
128 | 128 | ||
129 | assert_spin_locked(&res->spinlock); | 129 | assert_spin_locked(&res->spinlock); |
130 | 130 | ||
131 | mlog_entry("type=%d, convert_type=%d, new convert_type=%d\n", | 131 | mlog(0, "type=%d, convert_type=%d, new convert_type=%d\n", |
132 | lock->ml.type, lock->ml.convert_type, type); | 132 | lock->ml.type, lock->ml.convert_type, type); |
133 | 133 | ||
134 | spin_lock(&lock->spinlock); | 134 | spin_lock(&lock->spinlock); |
135 | 135 | ||
@@ -353,7 +353,7 @@ static enum dlm_status dlm_send_remote_convert_request(struct dlm_ctxt *dlm, | |||
353 | struct kvec vec[2]; | 353 | struct kvec vec[2]; |
354 | size_t veclen = 1; | 354 | size_t veclen = 1; |
355 | 355 | ||
356 | mlog_entry("%.*s\n", res->lockname.len, res->lockname.name); | 356 | mlog(0, "%.*s\n", res->lockname.len, res->lockname.name); |
357 | 357 | ||
358 | memset(&convert, 0, sizeof(struct dlm_convert_lock)); | 358 | memset(&convert, 0, sizeof(struct dlm_convert_lock)); |
359 | convert.node_idx = dlm->node_num; | 359 | convert.node_idx = dlm->node_num; |
diff --git a/fs/ocfs2/dlm/dlmdomain.c b/fs/ocfs2/dlm/dlmdomain.c index 7e38a072d720..7540a492eaba 100644 --- a/fs/ocfs2/dlm/dlmdomain.c +++ b/fs/ocfs2/dlm/dlmdomain.c | |||
@@ -188,7 +188,7 @@ struct dlm_lock_resource * __dlm_lookup_lockres_full(struct dlm_ctxt *dlm, | |||
188 | struct hlist_head *bucket; | 188 | struct hlist_head *bucket; |
189 | struct hlist_node *list; | 189 | struct hlist_node *list; |
190 | 190 | ||
191 | mlog_entry("%.*s\n", len, name); | 191 | mlog(0, "%.*s\n", len, name); |
192 | 192 | ||
193 | assert_spin_locked(&dlm->spinlock); | 193 | assert_spin_locked(&dlm->spinlock); |
194 | 194 | ||
@@ -222,7 +222,7 @@ struct dlm_lock_resource * __dlm_lookup_lockres(struct dlm_ctxt *dlm, | |||
222 | { | 222 | { |
223 | struct dlm_lock_resource *res = NULL; | 223 | struct dlm_lock_resource *res = NULL; |
224 | 224 | ||
225 | mlog_entry("%.*s\n", len, name); | 225 | mlog(0, "%.*s\n", len, name); |
226 | 226 | ||
227 | assert_spin_locked(&dlm->spinlock); | 227 | assert_spin_locked(&dlm->spinlock); |
228 | 228 | ||
@@ -531,7 +531,7 @@ static int dlm_exit_domain_handler(struct o2net_msg *msg, u32 len, void *data, | |||
531 | unsigned int node; | 531 | unsigned int node; |
532 | struct dlm_exit_domain *exit_msg = (struct dlm_exit_domain *) msg->buf; | 532 | struct dlm_exit_domain *exit_msg = (struct dlm_exit_domain *) msg->buf; |
533 | 533 | ||
534 | mlog_entry("%p %u %p", msg, len, data); | 534 | mlog(0, "%p %u %p", msg, len, data); |
535 | 535 | ||
536 | if (!dlm_grab(dlm)) | 536 | if (!dlm_grab(dlm)) |
537 | return 0; | 537 | return 0; |
@@ -926,9 +926,10 @@ static int dlm_assert_joined_handler(struct o2net_msg *msg, u32 len, void *data, | |||
926 | } | 926 | } |
927 | 927 | ||
928 | static int dlm_match_regions(struct dlm_ctxt *dlm, | 928 | static int dlm_match_regions(struct dlm_ctxt *dlm, |
929 | struct dlm_query_region *qr) | 929 | struct dlm_query_region *qr, |
930 | char *local, int locallen) | ||
930 | { | 931 | { |
931 | char *local = NULL, *remote = qr->qr_regions; | 932 | char *remote = qr->qr_regions; |
932 | char *l, *r; | 933 | char *l, *r; |
933 | int localnr, i, j, foundit; | 934 | int localnr, i, j, foundit; |
934 | int status = 0; | 935 | int status = 0; |
@@ -957,13 +958,8 @@ static int dlm_match_regions(struct dlm_ctxt *dlm, | |||
957 | r += O2HB_MAX_REGION_NAME_LEN; | 958 | r += O2HB_MAX_REGION_NAME_LEN; |
958 | } | 959 | } |
959 | 960 | ||
960 | local = kmalloc(sizeof(qr->qr_regions), GFP_ATOMIC); | 961 | localnr = min(O2NM_MAX_REGIONS, locallen/O2HB_MAX_REGION_NAME_LEN); |
961 | if (!local) { | 962 | localnr = o2hb_get_all_regions(local, (u8)localnr); |
962 | status = -ENOMEM; | ||
963 | goto bail; | ||
964 | } | ||
965 | |||
966 | localnr = o2hb_get_all_regions(local, O2NM_MAX_REGIONS); | ||
967 | 963 | ||
968 | /* compare local regions with remote */ | 964 | /* compare local regions with remote */ |
969 | l = local; | 965 | l = local; |
@@ -1012,8 +1008,6 @@ static int dlm_match_regions(struct dlm_ctxt *dlm, | |||
1012 | } | 1008 | } |
1013 | 1009 | ||
1014 | bail: | 1010 | bail: |
1015 | kfree(local); | ||
1016 | |||
1017 | return status; | 1011 | return status; |
1018 | } | 1012 | } |
1019 | 1013 | ||
@@ -1075,6 +1069,7 @@ static int dlm_query_region_handler(struct o2net_msg *msg, u32 len, | |||
1075 | { | 1069 | { |
1076 | struct dlm_query_region *qr; | 1070 | struct dlm_query_region *qr; |
1077 | struct dlm_ctxt *dlm = NULL; | 1071 | struct dlm_ctxt *dlm = NULL; |
1072 | char *local = NULL; | ||
1078 | int status = 0; | 1073 | int status = 0; |
1079 | int locked = 0; | 1074 | int locked = 0; |
1080 | 1075 | ||
@@ -1083,6 +1078,13 @@ static int dlm_query_region_handler(struct o2net_msg *msg, u32 len, | |||
1083 | mlog(0, "Node %u queries hb regions on domain %s\n", qr->qr_node, | 1078 | mlog(0, "Node %u queries hb regions on domain %s\n", qr->qr_node, |
1084 | qr->qr_domain); | 1079 | qr->qr_domain); |
1085 | 1080 | ||
1081 | /* buffer used in dlm_mast_regions() */ | ||
1082 | local = kmalloc(sizeof(qr->qr_regions), GFP_KERNEL); | ||
1083 | if (!local) { | ||
1084 | status = -ENOMEM; | ||
1085 | goto bail; | ||
1086 | } | ||
1087 | |||
1086 | status = -EINVAL; | 1088 | status = -EINVAL; |
1087 | 1089 | ||
1088 | spin_lock(&dlm_domain_lock); | 1090 | spin_lock(&dlm_domain_lock); |
@@ -1112,13 +1114,15 @@ static int dlm_query_region_handler(struct o2net_msg *msg, u32 len, | |||
1112 | goto bail; | 1114 | goto bail; |
1113 | } | 1115 | } |
1114 | 1116 | ||
1115 | status = dlm_match_regions(dlm, qr); | 1117 | status = dlm_match_regions(dlm, qr, local, sizeof(qr->qr_regions)); |
1116 | 1118 | ||
1117 | bail: | 1119 | bail: |
1118 | if (locked) | 1120 | if (locked) |
1119 | spin_unlock(&dlm->spinlock); | 1121 | spin_unlock(&dlm->spinlock); |
1120 | spin_unlock(&dlm_domain_lock); | 1122 | spin_unlock(&dlm_domain_lock); |
1121 | 1123 | ||
1124 | kfree(local); | ||
1125 | |||
1122 | return status; | 1126 | return status; |
1123 | } | 1127 | } |
1124 | 1128 | ||
@@ -1553,7 +1557,7 @@ static int dlm_try_to_join_domain(struct dlm_ctxt *dlm) | |||
1553 | struct domain_join_ctxt *ctxt; | 1557 | struct domain_join_ctxt *ctxt; |
1554 | enum dlm_query_join_response_code response = JOIN_DISALLOW; | 1558 | enum dlm_query_join_response_code response = JOIN_DISALLOW; |
1555 | 1559 | ||
1556 | mlog_entry("%p", dlm); | 1560 | mlog(0, "%p", dlm); |
1557 | 1561 | ||
1558 | ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL); | 1562 | ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL); |
1559 | if (!ctxt) { | 1563 | if (!ctxt) { |
diff --git a/fs/ocfs2/dlm/dlmlock.c b/fs/ocfs2/dlm/dlmlock.c index 7009292aac5a..8d39e0fd66f7 100644 --- a/fs/ocfs2/dlm/dlmlock.c +++ b/fs/ocfs2/dlm/dlmlock.c | |||
@@ -128,7 +128,7 @@ static enum dlm_status dlmlock_master(struct dlm_ctxt *dlm, | |||
128 | int call_ast = 0, kick_thread = 0; | 128 | int call_ast = 0, kick_thread = 0; |
129 | enum dlm_status status = DLM_NORMAL; | 129 | enum dlm_status status = DLM_NORMAL; |
130 | 130 | ||
131 | mlog_entry("type=%d\n", lock->ml.type); | 131 | mlog(0, "type=%d\n", lock->ml.type); |
132 | 132 | ||
133 | spin_lock(&res->spinlock); | 133 | spin_lock(&res->spinlock); |
134 | /* if called from dlm_create_lock_handler, need to | 134 | /* if called from dlm_create_lock_handler, need to |
@@ -227,8 +227,8 @@ static enum dlm_status dlmlock_remote(struct dlm_ctxt *dlm, | |||
227 | enum dlm_status status = DLM_DENIED; | 227 | enum dlm_status status = DLM_DENIED; |
228 | int lockres_changed = 1; | 228 | int lockres_changed = 1; |
229 | 229 | ||
230 | mlog_entry("type=%d\n", lock->ml.type); | 230 | mlog(0, "type=%d, lockres %.*s, flags = 0x%x\n", |
231 | mlog(0, "lockres %.*s, flags = 0x%x\n", res->lockname.len, | 231 | lock->ml.type, res->lockname.len, |
232 | res->lockname.name, flags); | 232 | res->lockname.name, flags); |
233 | 233 | ||
234 | spin_lock(&res->spinlock); | 234 | spin_lock(&res->spinlock); |
@@ -308,8 +308,6 @@ static enum dlm_status dlm_send_remote_lock_request(struct dlm_ctxt *dlm, | |||
308 | int tmpret, status = 0; | 308 | int tmpret, status = 0; |
309 | enum dlm_status ret; | 309 | enum dlm_status ret; |
310 | 310 | ||
311 | mlog_entry_void(); | ||
312 | |||
313 | memset(&create, 0, sizeof(create)); | 311 | memset(&create, 0, sizeof(create)); |
314 | create.node_idx = dlm->node_num; | 312 | create.node_idx = dlm->node_num; |
315 | create.requested_type = lock->ml.type; | 313 | create.requested_type = lock->ml.type; |
@@ -477,8 +475,6 @@ int dlm_create_lock_handler(struct o2net_msg *msg, u32 len, void *data, | |||
477 | 475 | ||
478 | BUG_ON(!dlm); | 476 | BUG_ON(!dlm); |
479 | 477 | ||
480 | mlog_entry_void(); | ||
481 | |||
482 | if (!dlm_grab(dlm)) | 478 | if (!dlm_grab(dlm)) |
483 | return DLM_REJECTED; | 479 | return DLM_REJECTED; |
484 | 480 | ||
diff --git a/fs/ocfs2/dlm/dlmmaster.c b/fs/ocfs2/dlm/dlmmaster.c index 59f0f6bdfc62..9d67610dfc74 100644 --- a/fs/ocfs2/dlm/dlmmaster.c +++ b/fs/ocfs2/dlm/dlmmaster.c | |||
@@ -426,8 +426,6 @@ static void dlm_mle_release(struct kref *kref) | |||
426 | struct dlm_master_list_entry *mle; | 426 | struct dlm_master_list_entry *mle; |
427 | struct dlm_ctxt *dlm; | 427 | struct dlm_ctxt *dlm; |
428 | 428 | ||
429 | mlog_entry_void(); | ||
430 | |||
431 | mle = container_of(kref, struct dlm_master_list_entry, mle_refs); | 429 | mle = container_of(kref, struct dlm_master_list_entry, mle_refs); |
432 | dlm = mle->dlm; | 430 | dlm = mle->dlm; |
433 | 431 | ||
@@ -3120,8 +3118,6 @@ static int dlm_add_migration_mle(struct dlm_ctxt *dlm, | |||
3120 | 3118 | ||
3121 | *oldmle = NULL; | 3119 | *oldmle = NULL; |
3122 | 3120 | ||
3123 | mlog_entry_void(); | ||
3124 | |||
3125 | assert_spin_locked(&dlm->spinlock); | 3121 | assert_spin_locked(&dlm->spinlock); |
3126 | assert_spin_locked(&dlm->master_lock); | 3122 | assert_spin_locked(&dlm->master_lock); |
3127 | 3123 | ||
@@ -3261,7 +3257,7 @@ void dlm_clean_master_list(struct dlm_ctxt *dlm, u8 dead_node) | |||
3261 | struct hlist_node *list; | 3257 | struct hlist_node *list; |
3262 | unsigned int i; | 3258 | unsigned int i; |
3263 | 3259 | ||
3264 | mlog_entry("dlm=%s, dead node=%u\n", dlm->name, dead_node); | 3260 | mlog(0, "dlm=%s, dead node=%u\n", dlm->name, dead_node); |
3265 | top: | 3261 | top: |
3266 | assert_spin_locked(&dlm->spinlock); | 3262 | assert_spin_locked(&dlm->spinlock); |
3267 | 3263 | ||
diff --git a/fs/ocfs2/dlm/dlmrecovery.c b/fs/ocfs2/dlm/dlmrecovery.c index aaaffbcbe916..f1beb6fc254d 100644 --- a/fs/ocfs2/dlm/dlmrecovery.c +++ b/fs/ocfs2/dlm/dlmrecovery.c | |||
@@ -727,7 +727,6 @@ static int dlm_remaster_locks(struct dlm_ctxt *dlm, u8 dead_node) | |||
727 | if (destroy) | 727 | if (destroy) |
728 | dlm_destroy_recovery_area(dlm, dead_node); | 728 | dlm_destroy_recovery_area(dlm, dead_node); |
729 | 729 | ||
730 | mlog_exit(status); | ||
731 | return status; | 730 | return status; |
732 | } | 731 | } |
733 | 732 | ||
@@ -1496,9 +1495,9 @@ leave: | |||
1496 | kfree(buf); | 1495 | kfree(buf); |
1497 | if (item) | 1496 | if (item) |
1498 | kfree(item); | 1497 | kfree(item); |
1498 | mlog_errno(ret); | ||
1499 | } | 1499 | } |
1500 | 1500 | ||
1501 | mlog_exit(ret); | ||
1502 | return ret; | 1501 | return ret; |
1503 | } | 1502 | } |
1504 | 1503 | ||
@@ -1567,7 +1566,6 @@ leave: | |||
1567 | dlm_lockres_put(res); | 1566 | dlm_lockres_put(res); |
1568 | } | 1567 | } |
1569 | kfree(data); | 1568 | kfree(data); |
1570 | mlog_exit(ret); | ||
1571 | } | 1569 | } |
1572 | 1570 | ||
1573 | 1571 | ||
@@ -1986,7 +1984,6 @@ leave: | |||
1986 | dlm_lock_put(newlock); | 1984 | dlm_lock_put(newlock); |
1987 | } | 1985 | } |
1988 | 1986 | ||
1989 | mlog_exit(ret); | ||
1990 | return ret; | 1987 | return ret; |
1991 | } | 1988 | } |
1992 | 1989 | ||
@@ -2083,8 +2080,6 @@ static void dlm_finish_local_lockres_recovery(struct dlm_ctxt *dlm, | |||
2083 | struct hlist_head *bucket; | 2080 | struct hlist_head *bucket; |
2084 | struct dlm_lock_resource *res, *next; | 2081 | struct dlm_lock_resource *res, *next; |
2085 | 2082 | ||
2086 | mlog_entry_void(); | ||
2087 | |||
2088 | assert_spin_locked(&dlm->spinlock); | 2083 | assert_spin_locked(&dlm->spinlock); |
2089 | 2084 | ||
2090 | list_for_each_entry_safe(res, next, &dlm->reco.resources, recovering) { | 2085 | list_for_each_entry_safe(res, next, &dlm->reco.resources, recovering) { |
@@ -2607,8 +2602,6 @@ static int dlm_send_begin_reco_message(struct dlm_ctxt *dlm, u8 dead_node) | |||
2607 | int nodenum; | 2602 | int nodenum; |
2608 | int status; | 2603 | int status; |
2609 | 2604 | ||
2610 | mlog_entry("%u\n", dead_node); | ||
2611 | |||
2612 | mlog(0, "%s: dead node is %u\n", dlm->name, dead_node); | 2605 | mlog(0, "%s: dead node is %u\n", dlm->name, dead_node); |
2613 | 2606 | ||
2614 | spin_lock(&dlm->spinlock); | 2607 | spin_lock(&dlm->spinlock); |
diff --git a/fs/ocfs2/dlm/dlmunlock.c b/fs/ocfs2/dlm/dlmunlock.c index 817287c6a6db..850aa7e87537 100644 --- a/fs/ocfs2/dlm/dlmunlock.c +++ b/fs/ocfs2/dlm/dlmunlock.c | |||
@@ -317,7 +317,7 @@ static enum dlm_status dlm_send_remote_unlock_request(struct dlm_ctxt *dlm, | |||
317 | struct kvec vec[2]; | 317 | struct kvec vec[2]; |
318 | size_t veclen = 1; | 318 | size_t veclen = 1; |
319 | 319 | ||
320 | mlog_entry("%.*s\n", res->lockname.len, res->lockname.name); | 320 | mlog(0, "%.*s\n", res->lockname.len, res->lockname.name); |
321 | 321 | ||
322 | if (owner == dlm->node_num) { | 322 | if (owner == dlm->node_num) { |
323 | /* ended up trying to contact ourself. this means | 323 | /* ended up trying to contact ourself. this means |
@@ -588,8 +588,6 @@ enum dlm_status dlmunlock(struct dlm_ctxt *dlm, struct dlm_lockstatus *lksb, | |||
588 | struct dlm_lock *lock = NULL; | 588 | struct dlm_lock *lock = NULL; |
589 | int call_ast, is_master; | 589 | int call_ast, is_master; |
590 | 590 | ||
591 | mlog_entry_void(); | ||
592 | |||
593 | if (!lksb) { | 591 | if (!lksb) { |
594 | dlm_error(DLM_BADARGS); | 592 | dlm_error(DLM_BADARGS); |
595 | return DLM_BADARGS; | 593 | return DLM_BADARGS; |
diff --git a/fs/ocfs2/dlmglue.c b/fs/ocfs2/dlmglue.c index e8d94d722ecb..7642d7ca73e5 100644 --- a/fs/ocfs2/dlmglue.c +++ b/fs/ocfs2/dlmglue.c | |||
@@ -64,7 +64,7 @@ struct ocfs2_mask_waiter { | |||
64 | unsigned long mw_mask; | 64 | unsigned long mw_mask; |
65 | unsigned long mw_goal; | 65 | unsigned long mw_goal; |
66 | #ifdef CONFIG_OCFS2_FS_STATS | 66 | #ifdef CONFIG_OCFS2_FS_STATS |
67 | unsigned long long mw_lock_start; | 67 | ktime_t mw_lock_start; |
68 | #endif | 68 | #endif |
69 | }; | 69 | }; |
70 | 70 | ||
@@ -397,8 +397,6 @@ static void ocfs2_build_lock_name(enum ocfs2_lock_type type, | |||
397 | { | 397 | { |
398 | int len; | 398 | int len; |
399 | 399 | ||
400 | mlog_entry_void(); | ||
401 | |||
402 | BUG_ON(type >= OCFS2_NUM_LOCK_TYPES); | 400 | BUG_ON(type >= OCFS2_NUM_LOCK_TYPES); |
403 | 401 | ||
404 | len = snprintf(name, OCFS2_LOCK_ID_MAX_LEN, "%c%s%016llx%08x", | 402 | len = snprintf(name, OCFS2_LOCK_ID_MAX_LEN, "%c%s%016llx%08x", |
@@ -408,8 +406,6 @@ static void ocfs2_build_lock_name(enum ocfs2_lock_type type, | |||
408 | BUG_ON(len != (OCFS2_LOCK_ID_MAX_LEN - 1)); | 406 | BUG_ON(len != (OCFS2_LOCK_ID_MAX_LEN - 1)); |
409 | 407 | ||
410 | mlog(0, "built lock resource with name: %s\n", name); | 408 | mlog(0, "built lock resource with name: %s\n", name); |
411 | |||
412 | mlog_exit_void(); | ||
413 | } | 409 | } |
414 | 410 | ||
415 | static DEFINE_SPINLOCK(ocfs2_dlm_tracking_lock); | 411 | static DEFINE_SPINLOCK(ocfs2_dlm_tracking_lock); |
@@ -435,44 +431,41 @@ static void ocfs2_remove_lockres_tracking(struct ocfs2_lock_res *res) | |||
435 | #ifdef CONFIG_OCFS2_FS_STATS | 431 | #ifdef CONFIG_OCFS2_FS_STATS |
436 | static void ocfs2_init_lock_stats(struct ocfs2_lock_res *res) | 432 | static void ocfs2_init_lock_stats(struct ocfs2_lock_res *res) |
437 | { | 433 | { |
438 | res->l_lock_num_prmode = 0; | ||
439 | res->l_lock_num_prmode_failed = 0; | ||
440 | res->l_lock_total_prmode = 0; | ||
441 | res->l_lock_max_prmode = 0; | ||
442 | res->l_lock_num_exmode = 0; | ||
443 | res->l_lock_num_exmode_failed = 0; | ||
444 | res->l_lock_total_exmode = 0; | ||
445 | res->l_lock_max_exmode = 0; | ||
446 | res->l_lock_refresh = 0; | 434 | res->l_lock_refresh = 0; |
435 | memset(&res->l_lock_prmode, 0, sizeof(struct ocfs2_lock_stats)); | ||
436 | memset(&res->l_lock_exmode, 0, sizeof(struct ocfs2_lock_stats)); | ||
447 | } | 437 | } |
448 | 438 | ||
449 | static void ocfs2_update_lock_stats(struct ocfs2_lock_res *res, int level, | 439 | static void ocfs2_update_lock_stats(struct ocfs2_lock_res *res, int level, |
450 | struct ocfs2_mask_waiter *mw, int ret) | 440 | struct ocfs2_mask_waiter *mw, int ret) |
451 | { | 441 | { |
452 | unsigned long long *num, *sum; | 442 | u32 usec; |
453 | unsigned int *max, *failed; | 443 | ktime_t kt; |
454 | struct timespec ts = current_kernel_time(); | 444 | struct ocfs2_lock_stats *stats; |
455 | unsigned long long time = timespec_to_ns(&ts) - mw->mw_lock_start; | 445 | |
456 | 446 | if (level == LKM_PRMODE) | |
457 | if (level == LKM_PRMODE) { | 447 | stats = &res->l_lock_prmode; |
458 | num = &res->l_lock_num_prmode; | 448 | else if (level == LKM_EXMODE) |
459 | sum = &res->l_lock_total_prmode; | 449 | stats = &res->l_lock_exmode; |
460 | max = &res->l_lock_max_prmode; | 450 | else |
461 | failed = &res->l_lock_num_prmode_failed; | ||
462 | } else if (level == LKM_EXMODE) { | ||
463 | num = &res->l_lock_num_exmode; | ||
464 | sum = &res->l_lock_total_exmode; | ||
465 | max = &res->l_lock_max_exmode; | ||
466 | failed = &res->l_lock_num_exmode_failed; | ||
467 | } else | ||
468 | return; | 451 | return; |
469 | 452 | ||
470 | (*num)++; | 453 | kt = ktime_sub(ktime_get(), mw->mw_lock_start); |
471 | (*sum) += time; | 454 | usec = ktime_to_us(kt); |
472 | if (time > *max) | 455 | |
473 | *max = time; | 456 | stats->ls_gets++; |
457 | stats->ls_total += ktime_to_ns(kt); | ||
458 | /* overflow */ | ||
459 | if (unlikely(stats->ls_gets) == 0) { | ||
460 | stats->ls_gets++; | ||
461 | stats->ls_total = ktime_to_ns(kt); | ||
462 | } | ||
463 | |||
464 | if (stats->ls_max < usec) | ||
465 | stats->ls_max = usec; | ||
466 | |||
474 | if (ret) | 467 | if (ret) |
475 | (*failed)++; | 468 | stats->ls_fail++; |
476 | } | 469 | } |
477 | 470 | ||
478 | static inline void ocfs2_track_lock_refresh(struct ocfs2_lock_res *lockres) | 471 | static inline void ocfs2_track_lock_refresh(struct ocfs2_lock_res *lockres) |
@@ -482,8 +475,7 @@ static inline void ocfs2_track_lock_refresh(struct ocfs2_lock_res *lockres) | |||
482 | 475 | ||
483 | static inline void ocfs2_init_start_time(struct ocfs2_mask_waiter *mw) | 476 | static inline void ocfs2_init_start_time(struct ocfs2_mask_waiter *mw) |
484 | { | 477 | { |
485 | struct timespec ts = current_kernel_time(); | 478 | mw->mw_lock_start = ktime_get(); |
486 | mw->mw_lock_start = timespec_to_ns(&ts); | ||
487 | } | 479 | } |
488 | #else | 480 | #else |
489 | static inline void ocfs2_init_lock_stats(struct ocfs2_lock_res *res) | 481 | static inline void ocfs2_init_lock_stats(struct ocfs2_lock_res *res) |
@@ -729,8 +721,6 @@ void ocfs2_refcount_lock_res_init(struct ocfs2_lock_res *lockres, | |||
729 | 721 | ||
730 | void ocfs2_lock_res_free(struct ocfs2_lock_res *res) | 722 | void ocfs2_lock_res_free(struct ocfs2_lock_res *res) |
731 | { | 723 | { |
732 | mlog_entry_void(); | ||
733 | |||
734 | if (!(res->l_flags & OCFS2_LOCK_INITIALIZED)) | 724 | if (!(res->l_flags & OCFS2_LOCK_INITIALIZED)) |
735 | return; | 725 | return; |
736 | 726 | ||
@@ -756,14 +746,11 @@ void ocfs2_lock_res_free(struct ocfs2_lock_res *res) | |||
756 | memset(&res->l_lksb, 0, sizeof(res->l_lksb)); | 746 | memset(&res->l_lksb, 0, sizeof(res->l_lksb)); |
757 | 747 | ||
758 | res->l_flags = 0UL; | 748 | res->l_flags = 0UL; |
759 | mlog_exit_void(); | ||
760 | } | 749 | } |
761 | 750 | ||
762 | static inline void ocfs2_inc_holders(struct ocfs2_lock_res *lockres, | 751 | static inline void ocfs2_inc_holders(struct ocfs2_lock_res *lockres, |
763 | int level) | 752 | int level) |
764 | { | 753 | { |
765 | mlog_entry_void(); | ||
766 | |||
767 | BUG_ON(!lockres); | 754 | BUG_ON(!lockres); |
768 | 755 | ||
769 | switch(level) { | 756 | switch(level) { |
@@ -776,15 +763,11 @@ static inline void ocfs2_inc_holders(struct ocfs2_lock_res *lockres, | |||
776 | default: | 763 | default: |
777 | BUG(); | 764 | BUG(); |
778 | } | 765 | } |
779 | |||
780 | mlog_exit_void(); | ||
781 | } | 766 | } |
782 | 767 | ||
783 | static inline void ocfs2_dec_holders(struct ocfs2_lock_res *lockres, | 768 | static inline void ocfs2_dec_holders(struct ocfs2_lock_res *lockres, |
784 | int level) | 769 | int level) |
785 | { | 770 | { |
786 | mlog_entry_void(); | ||
787 | |||
788 | BUG_ON(!lockres); | 771 | BUG_ON(!lockres); |
789 | 772 | ||
790 | switch(level) { | 773 | switch(level) { |
@@ -799,7 +782,6 @@ static inline void ocfs2_dec_holders(struct ocfs2_lock_res *lockres, | |||
799 | default: | 782 | default: |
800 | BUG(); | 783 | BUG(); |
801 | } | 784 | } |
802 | mlog_exit_void(); | ||
803 | } | 785 | } |
804 | 786 | ||
805 | /* WARNING: This function lives in a world where the only three lock | 787 | /* WARNING: This function lives in a world where the only three lock |
@@ -846,8 +828,6 @@ static void lockres_clear_flags(struct ocfs2_lock_res *lockres, | |||
846 | 828 | ||
847 | static inline void ocfs2_generic_handle_downconvert_action(struct ocfs2_lock_res *lockres) | 829 | static inline void ocfs2_generic_handle_downconvert_action(struct ocfs2_lock_res *lockres) |
848 | { | 830 | { |
849 | mlog_entry_void(); | ||
850 | |||
851 | BUG_ON(!(lockres->l_flags & OCFS2_LOCK_BUSY)); | 831 | BUG_ON(!(lockres->l_flags & OCFS2_LOCK_BUSY)); |
852 | BUG_ON(!(lockres->l_flags & OCFS2_LOCK_ATTACHED)); | 832 | BUG_ON(!(lockres->l_flags & OCFS2_LOCK_ATTACHED)); |
853 | BUG_ON(!(lockres->l_flags & OCFS2_LOCK_BLOCKED)); | 833 | BUG_ON(!(lockres->l_flags & OCFS2_LOCK_BLOCKED)); |
@@ -860,14 +840,10 @@ static inline void ocfs2_generic_handle_downconvert_action(struct ocfs2_lock_res | |||
860 | lockres_clear_flags(lockres, OCFS2_LOCK_BLOCKED); | 840 | lockres_clear_flags(lockres, OCFS2_LOCK_BLOCKED); |
861 | } | 841 | } |
862 | lockres_clear_flags(lockres, OCFS2_LOCK_BUSY); | 842 | lockres_clear_flags(lockres, OCFS2_LOCK_BUSY); |
863 | |||
864 | mlog_exit_void(); | ||
865 | } | 843 | } |
866 | 844 | ||
867 | static inline void ocfs2_generic_handle_convert_action(struct ocfs2_lock_res *lockres) | 845 | static inline void ocfs2_generic_handle_convert_action(struct ocfs2_lock_res *lockres) |
868 | { | 846 | { |
869 | mlog_entry_void(); | ||
870 | |||
871 | BUG_ON(!(lockres->l_flags & OCFS2_LOCK_BUSY)); | 847 | BUG_ON(!(lockres->l_flags & OCFS2_LOCK_BUSY)); |
872 | BUG_ON(!(lockres->l_flags & OCFS2_LOCK_ATTACHED)); | 848 | BUG_ON(!(lockres->l_flags & OCFS2_LOCK_ATTACHED)); |
873 | 849 | ||
@@ -889,14 +865,10 @@ static inline void ocfs2_generic_handle_convert_action(struct ocfs2_lock_res *lo | |||
889 | lockres_or_flags(lockres, OCFS2_LOCK_UPCONVERT_FINISHING); | 865 | lockres_or_flags(lockres, OCFS2_LOCK_UPCONVERT_FINISHING); |
890 | 866 | ||
891 | lockres_clear_flags(lockres, OCFS2_LOCK_BUSY); | 867 | lockres_clear_flags(lockres, OCFS2_LOCK_BUSY); |
892 | |||
893 | mlog_exit_void(); | ||
894 | } | 868 | } |
895 | 869 | ||
896 | static inline void ocfs2_generic_handle_attach_action(struct ocfs2_lock_res *lockres) | 870 | static inline void ocfs2_generic_handle_attach_action(struct ocfs2_lock_res *lockres) |
897 | { | 871 | { |
898 | mlog_entry_void(); | ||
899 | |||
900 | BUG_ON((!(lockres->l_flags & OCFS2_LOCK_BUSY))); | 872 | BUG_ON((!(lockres->l_flags & OCFS2_LOCK_BUSY))); |
901 | BUG_ON(lockres->l_flags & OCFS2_LOCK_ATTACHED); | 873 | BUG_ON(lockres->l_flags & OCFS2_LOCK_ATTACHED); |
902 | 874 | ||
@@ -908,15 +880,12 @@ static inline void ocfs2_generic_handle_attach_action(struct ocfs2_lock_res *loc | |||
908 | lockres->l_level = lockres->l_requested; | 880 | lockres->l_level = lockres->l_requested; |
909 | lockres_or_flags(lockres, OCFS2_LOCK_ATTACHED); | 881 | lockres_or_flags(lockres, OCFS2_LOCK_ATTACHED); |
910 | lockres_clear_flags(lockres, OCFS2_LOCK_BUSY); | 882 | lockres_clear_flags(lockres, OCFS2_LOCK_BUSY); |
911 | |||
912 | mlog_exit_void(); | ||
913 | } | 883 | } |
914 | 884 | ||
915 | static int ocfs2_generic_handle_bast(struct ocfs2_lock_res *lockres, | 885 | static int ocfs2_generic_handle_bast(struct ocfs2_lock_res *lockres, |
916 | int level) | 886 | int level) |
917 | { | 887 | { |
918 | int needs_downconvert = 0; | 888 | int needs_downconvert = 0; |
919 | mlog_entry_void(); | ||
920 | 889 | ||
921 | assert_spin_locked(&lockres->l_lock); | 890 | assert_spin_locked(&lockres->l_lock); |
922 | 891 | ||
@@ -938,8 +907,7 @@ static int ocfs2_generic_handle_bast(struct ocfs2_lock_res *lockres, | |||
938 | 907 | ||
939 | if (needs_downconvert) | 908 | if (needs_downconvert) |
940 | lockres_or_flags(lockres, OCFS2_LOCK_BLOCKED); | 909 | lockres_or_flags(lockres, OCFS2_LOCK_BLOCKED); |
941 | 910 | mlog(0, "needs_downconvert = %d\n", needs_downconvert); | |
942 | mlog_exit(needs_downconvert); | ||
943 | return needs_downconvert; | 911 | return needs_downconvert; |
944 | } | 912 | } |
945 | 913 | ||
@@ -1151,8 +1119,6 @@ static void ocfs2_unlock_ast(struct ocfs2_dlm_lksb *lksb, int error) | |||
1151 | struct ocfs2_lock_res *lockres = ocfs2_lksb_to_lock_res(lksb); | 1119 | struct ocfs2_lock_res *lockres = ocfs2_lksb_to_lock_res(lksb); |
1152 | unsigned long flags; | 1120 | unsigned long flags; |
1153 | 1121 | ||
1154 | mlog_entry_void(); | ||
1155 | |||
1156 | mlog(ML_BASTS, "UNLOCK AST fired for lockres %s, action = %d\n", | 1122 | mlog(ML_BASTS, "UNLOCK AST fired for lockres %s, action = %d\n", |
1157 | lockres->l_name, lockres->l_unlock_action); | 1123 | lockres->l_name, lockres->l_unlock_action); |
1158 | 1124 | ||
@@ -1162,7 +1128,6 @@ static void ocfs2_unlock_ast(struct ocfs2_dlm_lksb *lksb, int error) | |||
1162 | "unlock_action %d\n", error, lockres->l_name, | 1128 | "unlock_action %d\n", error, lockres->l_name, |
1163 | lockres->l_unlock_action); | 1129 | lockres->l_unlock_action); |
1164 | spin_unlock_irqrestore(&lockres->l_lock, flags); | 1130 | spin_unlock_irqrestore(&lockres->l_lock, flags); |
1165 | mlog_exit_void(); | ||
1166 | return; | 1131 | return; |
1167 | } | 1132 | } |
1168 | 1133 | ||
@@ -1186,8 +1151,6 @@ static void ocfs2_unlock_ast(struct ocfs2_dlm_lksb *lksb, int error) | |||
1186 | lockres->l_unlock_action = OCFS2_UNLOCK_INVALID; | 1151 | lockres->l_unlock_action = OCFS2_UNLOCK_INVALID; |
1187 | wake_up(&lockres->l_event); | 1152 | wake_up(&lockres->l_event); |
1188 | spin_unlock_irqrestore(&lockres->l_lock, flags); | 1153 | spin_unlock_irqrestore(&lockres->l_lock, flags); |
1189 | |||
1190 | mlog_exit_void(); | ||
1191 | } | 1154 | } |
1192 | 1155 | ||
1193 | /* | 1156 | /* |
@@ -1233,7 +1196,6 @@ static inline void ocfs2_recover_from_dlm_error(struct ocfs2_lock_res *lockres, | |||
1233 | { | 1196 | { |
1234 | unsigned long flags; | 1197 | unsigned long flags; |
1235 | 1198 | ||
1236 | mlog_entry_void(); | ||
1237 | spin_lock_irqsave(&lockres->l_lock, flags); | 1199 | spin_lock_irqsave(&lockres->l_lock, flags); |
1238 | lockres_clear_flags(lockres, OCFS2_LOCK_BUSY); | 1200 | lockres_clear_flags(lockres, OCFS2_LOCK_BUSY); |
1239 | lockres_clear_flags(lockres, OCFS2_LOCK_UPCONVERT_FINISHING); | 1201 | lockres_clear_flags(lockres, OCFS2_LOCK_UPCONVERT_FINISHING); |
@@ -1244,7 +1206,6 @@ static inline void ocfs2_recover_from_dlm_error(struct ocfs2_lock_res *lockres, | |||
1244 | spin_unlock_irqrestore(&lockres->l_lock, flags); | 1206 | spin_unlock_irqrestore(&lockres->l_lock, flags); |
1245 | 1207 | ||
1246 | wake_up(&lockres->l_event); | 1208 | wake_up(&lockres->l_event); |
1247 | mlog_exit_void(); | ||
1248 | } | 1209 | } |
1249 | 1210 | ||
1250 | /* Note: If we detect another process working on the lock (i.e., | 1211 | /* Note: If we detect another process working on the lock (i.e., |
@@ -1260,8 +1221,6 @@ static int ocfs2_lock_create(struct ocfs2_super *osb, | |||
1260 | unsigned long flags; | 1221 | unsigned long flags; |
1261 | unsigned int gen; | 1222 | unsigned int gen; |
1262 | 1223 | ||
1263 | mlog_entry_void(); | ||
1264 | |||
1265 | mlog(0, "lock %s, level = %d, flags = %u\n", lockres->l_name, level, | 1224 | mlog(0, "lock %s, level = %d, flags = %u\n", lockres->l_name, level, |
1266 | dlm_flags); | 1225 | dlm_flags); |
1267 | 1226 | ||
@@ -1293,7 +1252,6 @@ static int ocfs2_lock_create(struct ocfs2_super *osb, | |||
1293 | mlog(0, "lock %s, return from ocfs2_dlm_lock\n", lockres->l_name); | 1252 | mlog(0, "lock %s, return from ocfs2_dlm_lock\n", lockres->l_name); |
1294 | 1253 | ||
1295 | bail: | 1254 | bail: |
1296 | mlog_exit(ret); | ||
1297 | return ret; | 1255 | return ret; |
1298 | } | 1256 | } |
1299 | 1257 | ||
@@ -1416,8 +1374,6 @@ static int __ocfs2_cluster_lock(struct ocfs2_super *osb, | |||
1416 | unsigned int gen; | 1374 | unsigned int gen; |
1417 | int noqueue_attempted = 0; | 1375 | int noqueue_attempted = 0; |
1418 | 1376 | ||
1419 | mlog_entry_void(); | ||
1420 | |||
1421 | ocfs2_init_mask_waiter(&mw); | 1377 | ocfs2_init_mask_waiter(&mw); |
1422 | 1378 | ||
1423 | if (lockres->l_ops->flags & LOCK_TYPE_USES_LVB) | 1379 | if (lockres->l_ops->flags & LOCK_TYPE_USES_LVB) |
@@ -1583,7 +1539,6 @@ out: | |||
1583 | caller_ip); | 1539 | caller_ip); |
1584 | } | 1540 | } |
1585 | #endif | 1541 | #endif |
1586 | mlog_exit(ret); | ||
1587 | return ret; | 1542 | return ret; |
1588 | } | 1543 | } |
1589 | 1544 | ||
@@ -1605,7 +1560,6 @@ static void __ocfs2_cluster_unlock(struct ocfs2_super *osb, | |||
1605 | { | 1560 | { |
1606 | unsigned long flags; | 1561 | unsigned long flags; |
1607 | 1562 | ||
1608 | mlog_entry_void(); | ||
1609 | spin_lock_irqsave(&lockres->l_lock, flags); | 1563 | spin_lock_irqsave(&lockres->l_lock, flags); |
1610 | ocfs2_dec_holders(lockres, level); | 1564 | ocfs2_dec_holders(lockres, level); |
1611 | ocfs2_downconvert_on_unlock(osb, lockres); | 1565 | ocfs2_downconvert_on_unlock(osb, lockres); |
@@ -1614,7 +1568,6 @@ static void __ocfs2_cluster_unlock(struct ocfs2_super *osb, | |||
1614 | if (lockres->l_lockdep_map.key != NULL) | 1568 | if (lockres->l_lockdep_map.key != NULL) |
1615 | rwsem_release(&lockres->l_lockdep_map, 1, caller_ip); | 1569 | rwsem_release(&lockres->l_lockdep_map, 1, caller_ip); |
1616 | #endif | 1570 | #endif |
1617 | mlog_exit_void(); | ||
1618 | } | 1571 | } |
1619 | 1572 | ||
1620 | static int ocfs2_create_new_lock(struct ocfs2_super *osb, | 1573 | static int ocfs2_create_new_lock(struct ocfs2_super *osb, |
@@ -1648,8 +1601,6 @@ int ocfs2_create_new_inode_locks(struct inode *inode) | |||
1648 | BUG_ON(!inode); | 1601 | BUG_ON(!inode); |
1649 | BUG_ON(!ocfs2_inode_is_new(inode)); | 1602 | BUG_ON(!ocfs2_inode_is_new(inode)); |
1650 | 1603 | ||
1651 | mlog_entry_void(); | ||
1652 | |||
1653 | mlog(0, "Inode %llu\n", (unsigned long long)OCFS2_I(inode)->ip_blkno); | 1604 | mlog(0, "Inode %llu\n", (unsigned long long)OCFS2_I(inode)->ip_blkno); |
1654 | 1605 | ||
1655 | /* NOTE: That we don't increment any of the holder counts, nor | 1606 | /* NOTE: That we don't increment any of the holder counts, nor |
@@ -1683,7 +1634,6 @@ int ocfs2_create_new_inode_locks(struct inode *inode) | |||
1683 | } | 1634 | } |
1684 | 1635 | ||
1685 | bail: | 1636 | bail: |
1686 | mlog_exit(ret); | ||
1687 | return ret; | 1637 | return ret; |
1688 | } | 1638 | } |
1689 | 1639 | ||
@@ -1695,16 +1645,12 @@ int ocfs2_rw_lock(struct inode *inode, int write) | |||
1695 | 1645 | ||
1696 | BUG_ON(!inode); | 1646 | BUG_ON(!inode); |
1697 | 1647 | ||
1698 | mlog_entry_void(); | ||
1699 | |||
1700 | mlog(0, "inode %llu take %s RW lock\n", | 1648 | mlog(0, "inode %llu take %s RW lock\n", |
1701 | (unsigned long long)OCFS2_I(inode)->ip_blkno, | 1649 | (unsigned long long)OCFS2_I(inode)->ip_blkno, |
1702 | write ? "EXMODE" : "PRMODE"); | 1650 | write ? "EXMODE" : "PRMODE"); |
1703 | 1651 | ||
1704 | if (ocfs2_mount_local(osb)) { | 1652 | if (ocfs2_mount_local(osb)) |
1705 | mlog_exit(0); | ||
1706 | return 0; | 1653 | return 0; |
1707 | } | ||
1708 | 1654 | ||
1709 | lockres = &OCFS2_I(inode)->ip_rw_lockres; | 1655 | lockres = &OCFS2_I(inode)->ip_rw_lockres; |
1710 | 1656 | ||
@@ -1715,7 +1661,6 @@ int ocfs2_rw_lock(struct inode *inode, int write) | |||
1715 | if (status < 0) | 1661 | if (status < 0) |
1716 | mlog_errno(status); | 1662 | mlog_errno(status); |
1717 | 1663 | ||
1718 | mlog_exit(status); | ||
1719 | return status; | 1664 | return status; |
1720 | } | 1665 | } |
1721 | 1666 | ||
@@ -1725,16 +1670,12 @@ void ocfs2_rw_unlock(struct inode *inode, int write) | |||
1725 | struct ocfs2_lock_res *lockres = &OCFS2_I(inode)->ip_rw_lockres; | 1670 | struct ocfs2_lock_res *lockres = &OCFS2_I(inode)->ip_rw_lockres; |
1726 | struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); | 1671 | struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); |
1727 | 1672 | ||
1728 | mlog_entry_void(); | ||
1729 | |||
1730 | mlog(0, "inode %llu drop %s RW lock\n", | 1673 | mlog(0, "inode %llu drop %s RW lock\n", |
1731 | (unsigned long long)OCFS2_I(inode)->ip_blkno, | 1674 | (unsigned long long)OCFS2_I(inode)->ip_blkno, |
1732 | write ? "EXMODE" : "PRMODE"); | 1675 | write ? "EXMODE" : "PRMODE"); |
1733 | 1676 | ||
1734 | if (!ocfs2_mount_local(osb)) | 1677 | if (!ocfs2_mount_local(osb)) |
1735 | ocfs2_cluster_unlock(OCFS2_SB(inode->i_sb), lockres, level); | 1678 | ocfs2_cluster_unlock(OCFS2_SB(inode->i_sb), lockres, level); |
1736 | |||
1737 | mlog_exit_void(); | ||
1738 | } | 1679 | } |
1739 | 1680 | ||
1740 | /* | 1681 | /* |
@@ -1748,8 +1689,6 @@ int ocfs2_open_lock(struct inode *inode) | |||
1748 | 1689 | ||
1749 | BUG_ON(!inode); | 1690 | BUG_ON(!inode); |
1750 | 1691 | ||
1751 | mlog_entry_void(); | ||
1752 | |||
1753 | mlog(0, "inode %llu take PRMODE open lock\n", | 1692 | mlog(0, "inode %llu take PRMODE open lock\n", |
1754 | (unsigned long long)OCFS2_I(inode)->ip_blkno); | 1693 | (unsigned long long)OCFS2_I(inode)->ip_blkno); |
1755 | 1694 | ||
@@ -1764,7 +1703,6 @@ int ocfs2_open_lock(struct inode *inode) | |||
1764 | mlog_errno(status); | 1703 | mlog_errno(status); |
1765 | 1704 | ||
1766 | out: | 1705 | out: |
1767 | mlog_exit(status); | ||
1768 | return status; | 1706 | return status; |
1769 | } | 1707 | } |
1770 | 1708 | ||
@@ -1776,8 +1714,6 @@ int ocfs2_try_open_lock(struct inode *inode, int write) | |||
1776 | 1714 | ||
1777 | BUG_ON(!inode); | 1715 | BUG_ON(!inode); |
1778 | 1716 | ||
1779 | mlog_entry_void(); | ||
1780 | |||
1781 | mlog(0, "inode %llu try to take %s open lock\n", | 1717 | mlog(0, "inode %llu try to take %s open lock\n", |
1782 | (unsigned long long)OCFS2_I(inode)->ip_blkno, | 1718 | (unsigned long long)OCFS2_I(inode)->ip_blkno, |
1783 | write ? "EXMODE" : "PRMODE"); | 1719 | write ? "EXMODE" : "PRMODE"); |
@@ -1799,7 +1735,6 @@ int ocfs2_try_open_lock(struct inode *inode, int write) | |||
1799 | level, DLM_LKF_NOQUEUE, 0); | 1735 | level, DLM_LKF_NOQUEUE, 0); |
1800 | 1736 | ||
1801 | out: | 1737 | out: |
1802 | mlog_exit(status); | ||
1803 | return status; | 1738 | return status; |
1804 | } | 1739 | } |
1805 | 1740 | ||
@@ -1811,8 +1746,6 @@ void ocfs2_open_unlock(struct inode *inode) | |||
1811 | struct ocfs2_lock_res *lockres = &OCFS2_I(inode)->ip_open_lockres; | 1746 | struct ocfs2_lock_res *lockres = &OCFS2_I(inode)->ip_open_lockres; |
1812 | struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); | 1747 | struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); |
1813 | 1748 | ||
1814 | mlog_entry_void(); | ||
1815 | |||
1816 | mlog(0, "inode %llu drop open lock\n", | 1749 | mlog(0, "inode %llu drop open lock\n", |
1817 | (unsigned long long)OCFS2_I(inode)->ip_blkno); | 1750 | (unsigned long long)OCFS2_I(inode)->ip_blkno); |
1818 | 1751 | ||
@@ -1827,7 +1760,7 @@ void ocfs2_open_unlock(struct inode *inode) | |||
1827 | DLM_LOCK_EX); | 1760 | DLM_LOCK_EX); |
1828 | 1761 | ||
1829 | out: | 1762 | out: |
1830 | mlog_exit_void(); | 1763 | return; |
1831 | } | 1764 | } |
1832 | 1765 | ||
1833 | static int ocfs2_flock_handle_signal(struct ocfs2_lock_res *lockres, | 1766 | static int ocfs2_flock_handle_signal(struct ocfs2_lock_res *lockres, |
@@ -2043,8 +1976,6 @@ static void ocfs2_downconvert_on_unlock(struct ocfs2_super *osb, | |||
2043 | { | 1976 | { |
2044 | int kick = 0; | 1977 | int kick = 0; |
2045 | 1978 | ||
2046 | mlog_entry_void(); | ||
2047 | |||
2048 | /* If we know that another node is waiting on our lock, kick | 1979 | /* If we know that another node is waiting on our lock, kick |
2049 | * the downconvert thread * pre-emptively when we reach a release | 1980 | * the downconvert thread * pre-emptively when we reach a release |
2050 | * condition. */ | 1981 | * condition. */ |
@@ -2065,8 +1996,6 @@ static void ocfs2_downconvert_on_unlock(struct ocfs2_super *osb, | |||
2065 | 1996 | ||
2066 | if (kick) | 1997 | if (kick) |
2067 | ocfs2_wake_downconvert_thread(osb); | 1998 | ocfs2_wake_downconvert_thread(osb); |
2068 | |||
2069 | mlog_exit_void(); | ||
2070 | } | 1999 | } |
2071 | 2000 | ||
2072 | #define OCFS2_SEC_BITS 34 | 2001 | #define OCFS2_SEC_BITS 34 |
@@ -2095,8 +2024,6 @@ static void __ocfs2_stuff_meta_lvb(struct inode *inode) | |||
2095 | struct ocfs2_lock_res *lockres = &oi->ip_inode_lockres; | 2024 | struct ocfs2_lock_res *lockres = &oi->ip_inode_lockres; |
2096 | struct ocfs2_meta_lvb *lvb; | 2025 | struct ocfs2_meta_lvb *lvb; |
2097 | 2026 | ||
2098 | mlog_entry_void(); | ||
2099 | |||
2100 | lvb = ocfs2_dlm_lvb(&lockres->l_lksb); | 2027 | lvb = ocfs2_dlm_lvb(&lockres->l_lksb); |
2101 | 2028 | ||
2102 | /* | 2029 | /* |
@@ -2128,8 +2055,6 @@ static void __ocfs2_stuff_meta_lvb(struct inode *inode) | |||
2128 | 2055 | ||
2129 | out: | 2056 | out: |
2130 | mlog_meta_lvb(0, lockres); | 2057 | mlog_meta_lvb(0, lockres); |
2131 | |||
2132 | mlog_exit_void(); | ||
2133 | } | 2058 | } |
2134 | 2059 | ||
2135 | static void ocfs2_unpack_timespec(struct timespec *spec, | 2060 | static void ocfs2_unpack_timespec(struct timespec *spec, |
@@ -2145,8 +2070,6 @@ static void ocfs2_refresh_inode_from_lvb(struct inode *inode) | |||
2145 | struct ocfs2_lock_res *lockres = &oi->ip_inode_lockres; | 2070 | struct ocfs2_lock_res *lockres = &oi->ip_inode_lockres; |
2146 | struct ocfs2_meta_lvb *lvb; | 2071 | struct ocfs2_meta_lvb *lvb; |
2147 | 2072 | ||
2148 | mlog_entry_void(); | ||
2149 | |||
2150 | mlog_meta_lvb(0, lockres); | 2073 | mlog_meta_lvb(0, lockres); |
2151 | 2074 | ||
2152 | lvb = ocfs2_dlm_lvb(&lockres->l_lksb); | 2075 | lvb = ocfs2_dlm_lvb(&lockres->l_lksb); |
@@ -2177,8 +2100,6 @@ static void ocfs2_refresh_inode_from_lvb(struct inode *inode) | |||
2177 | ocfs2_unpack_timespec(&inode->i_ctime, | 2100 | ocfs2_unpack_timespec(&inode->i_ctime, |
2178 | be64_to_cpu(lvb->lvb_ictime_packed)); | 2101 | be64_to_cpu(lvb->lvb_ictime_packed)); |
2179 | spin_unlock(&oi->ip_lock); | 2102 | spin_unlock(&oi->ip_lock); |
2180 | |||
2181 | mlog_exit_void(); | ||
2182 | } | 2103 | } |
2183 | 2104 | ||
2184 | static inline int ocfs2_meta_lvb_is_trustable(struct inode *inode, | 2105 | static inline int ocfs2_meta_lvb_is_trustable(struct inode *inode, |
@@ -2205,8 +2126,6 @@ static int ocfs2_should_refresh_lock_res(struct ocfs2_lock_res *lockres) | |||
2205 | unsigned long flags; | 2126 | unsigned long flags; |
2206 | int status = 0; | 2127 | int status = 0; |
2207 | 2128 | ||
2208 | mlog_entry_void(); | ||
2209 | |||
2210 | refresh_check: | 2129 | refresh_check: |
2211 | spin_lock_irqsave(&lockres->l_lock, flags); | 2130 | spin_lock_irqsave(&lockres->l_lock, flags); |
2212 | if (!(lockres->l_flags & OCFS2_LOCK_NEEDS_REFRESH)) { | 2131 | if (!(lockres->l_flags & OCFS2_LOCK_NEEDS_REFRESH)) { |
@@ -2227,7 +2146,7 @@ refresh_check: | |||
2227 | 2146 | ||
2228 | status = 1; | 2147 | status = 1; |
2229 | bail: | 2148 | bail: |
2230 | mlog_exit(status); | 2149 | mlog(0, "status %d\n", status); |
2231 | return status; | 2150 | return status; |
2232 | } | 2151 | } |
2233 | 2152 | ||
@@ -2237,7 +2156,6 @@ static inline void ocfs2_complete_lock_res_refresh(struct ocfs2_lock_res *lockre | |||
2237 | int status) | 2156 | int status) |
2238 | { | 2157 | { |
2239 | unsigned long flags; | 2158 | unsigned long flags; |
2240 | mlog_entry_void(); | ||
2241 | 2159 | ||
2242 | spin_lock_irqsave(&lockres->l_lock, flags); | 2160 | spin_lock_irqsave(&lockres->l_lock, flags); |
2243 | lockres_clear_flags(lockres, OCFS2_LOCK_REFRESHING); | 2161 | lockres_clear_flags(lockres, OCFS2_LOCK_REFRESHING); |
@@ -2246,8 +2164,6 @@ static inline void ocfs2_complete_lock_res_refresh(struct ocfs2_lock_res *lockre | |||
2246 | spin_unlock_irqrestore(&lockres->l_lock, flags); | 2164 | spin_unlock_irqrestore(&lockres->l_lock, flags); |
2247 | 2165 | ||
2248 | wake_up(&lockres->l_event); | 2166 | wake_up(&lockres->l_event); |
2249 | |||
2250 | mlog_exit_void(); | ||
2251 | } | 2167 | } |
2252 | 2168 | ||
2253 | /* may or may not return a bh if it went to disk. */ | 2169 | /* may or may not return a bh if it went to disk. */ |
@@ -2260,8 +2176,6 @@ static int ocfs2_inode_lock_update(struct inode *inode, | |||
2260 | struct ocfs2_dinode *fe; | 2176 | struct ocfs2_dinode *fe; |
2261 | struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); | 2177 | struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); |
2262 | 2178 | ||
2263 | mlog_entry_void(); | ||
2264 | |||
2265 | if (ocfs2_mount_local(osb)) | 2179 | if (ocfs2_mount_local(osb)) |
2266 | goto bail; | 2180 | goto bail; |
2267 | 2181 | ||
@@ -2330,7 +2244,6 @@ static int ocfs2_inode_lock_update(struct inode *inode, | |||
2330 | bail_refresh: | 2244 | bail_refresh: |
2331 | ocfs2_complete_lock_res_refresh(lockres, status); | 2245 | ocfs2_complete_lock_res_refresh(lockres, status); |
2332 | bail: | 2246 | bail: |
2333 | mlog_exit(status); | ||
2334 | return status; | 2247 | return status; |
2335 | } | 2248 | } |
2336 | 2249 | ||
@@ -2374,8 +2287,6 @@ int ocfs2_inode_lock_full_nested(struct inode *inode, | |||
2374 | 2287 | ||
2375 | BUG_ON(!inode); | 2288 | BUG_ON(!inode); |
2376 | 2289 | ||
2377 | mlog_entry_void(); | ||
2378 | |||
2379 | mlog(0, "inode %llu, take %s META lock\n", | 2290 | mlog(0, "inode %llu, take %s META lock\n", |
2380 | (unsigned long long)OCFS2_I(inode)->ip_blkno, | 2291 | (unsigned long long)OCFS2_I(inode)->ip_blkno, |
2381 | ex ? "EXMODE" : "PRMODE"); | 2292 | ex ? "EXMODE" : "PRMODE"); |
@@ -2467,7 +2378,6 @@ bail: | |||
2467 | if (local_bh) | 2378 | if (local_bh) |
2468 | brelse(local_bh); | 2379 | brelse(local_bh); |
2469 | 2380 | ||
2470 | mlog_exit(status); | ||
2471 | return status; | 2381 | return status; |
2472 | } | 2382 | } |
2473 | 2383 | ||
@@ -2517,7 +2427,6 @@ int ocfs2_inode_lock_atime(struct inode *inode, | |||
2517 | { | 2427 | { |
2518 | int ret; | 2428 | int ret; |
2519 | 2429 | ||
2520 | mlog_entry_void(); | ||
2521 | ret = ocfs2_inode_lock(inode, NULL, 0); | 2430 | ret = ocfs2_inode_lock(inode, NULL, 0); |
2522 | if (ret < 0) { | 2431 | if (ret < 0) { |
2523 | mlog_errno(ret); | 2432 | mlog_errno(ret); |
@@ -2545,7 +2454,6 @@ int ocfs2_inode_lock_atime(struct inode *inode, | |||
2545 | } else | 2454 | } else |
2546 | *level = 0; | 2455 | *level = 0; |
2547 | 2456 | ||
2548 | mlog_exit(ret); | ||
2549 | return ret; | 2457 | return ret; |
2550 | } | 2458 | } |
2551 | 2459 | ||
@@ -2556,8 +2464,6 @@ void ocfs2_inode_unlock(struct inode *inode, | |||
2556 | struct ocfs2_lock_res *lockres = &OCFS2_I(inode)->ip_inode_lockres; | 2464 | struct ocfs2_lock_res *lockres = &OCFS2_I(inode)->ip_inode_lockres; |
2557 | struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); | 2465 | struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); |
2558 | 2466 | ||
2559 | mlog_entry_void(); | ||
2560 | |||
2561 | mlog(0, "inode %llu drop %s META lock\n", | 2467 | mlog(0, "inode %llu drop %s META lock\n", |
2562 | (unsigned long long)OCFS2_I(inode)->ip_blkno, | 2468 | (unsigned long long)OCFS2_I(inode)->ip_blkno, |
2563 | ex ? "EXMODE" : "PRMODE"); | 2469 | ex ? "EXMODE" : "PRMODE"); |
@@ -2565,8 +2471,6 @@ void ocfs2_inode_unlock(struct inode *inode, | |||
2565 | if (!ocfs2_is_hard_readonly(OCFS2_SB(inode->i_sb)) && | 2471 | if (!ocfs2_is_hard_readonly(OCFS2_SB(inode->i_sb)) && |
2566 | !ocfs2_mount_local(osb)) | 2472 | !ocfs2_mount_local(osb)) |
2567 | ocfs2_cluster_unlock(OCFS2_SB(inode->i_sb), lockres, level); | 2473 | ocfs2_cluster_unlock(OCFS2_SB(inode->i_sb), lockres, level); |
2568 | |||
2569 | mlog_exit_void(); | ||
2570 | } | 2474 | } |
2571 | 2475 | ||
2572 | int ocfs2_orphan_scan_lock(struct ocfs2_super *osb, u32 *seqno) | 2476 | int ocfs2_orphan_scan_lock(struct ocfs2_super *osb, u32 *seqno) |
@@ -2617,8 +2521,6 @@ int ocfs2_super_lock(struct ocfs2_super *osb, | |||
2617 | int level = ex ? DLM_LOCK_EX : DLM_LOCK_PR; | 2521 | int level = ex ? DLM_LOCK_EX : DLM_LOCK_PR; |
2618 | struct ocfs2_lock_res *lockres = &osb->osb_super_lockres; | 2522 | struct ocfs2_lock_res *lockres = &osb->osb_super_lockres; |
2619 | 2523 | ||
2620 | mlog_entry_void(); | ||
2621 | |||
2622 | if (ocfs2_is_hard_readonly(osb)) | 2524 | if (ocfs2_is_hard_readonly(osb)) |
2623 | return -EROFS; | 2525 | return -EROFS; |
2624 | 2526 | ||
@@ -2650,7 +2552,6 @@ int ocfs2_super_lock(struct ocfs2_super *osb, | |||
2650 | ocfs2_track_lock_refresh(lockres); | 2552 | ocfs2_track_lock_refresh(lockres); |
2651 | } | 2553 | } |
2652 | bail: | 2554 | bail: |
2653 | mlog_exit(status); | ||
2654 | return status; | 2555 | return status; |
2655 | } | 2556 | } |
2656 | 2557 | ||
@@ -2869,8 +2770,15 @@ static void *ocfs2_dlm_seq_next(struct seq_file *m, void *v, loff_t *pos) | |||
2869 | return iter; | 2770 | return iter; |
2870 | } | 2771 | } |
2871 | 2772 | ||
2872 | /* So that debugfs.ocfs2 can determine which format is being used */ | 2773 | /* |
2873 | #define OCFS2_DLM_DEBUG_STR_VERSION 2 | 2774 | * Version is used by debugfs.ocfs2 to determine the format being used |
2775 | * | ||
2776 | * New in version 2 | ||
2777 | * - Lock stats printed | ||
2778 | * New in version 3 | ||
2779 | * - Max time in lock stats is in usecs (instead of nsecs) | ||
2780 | */ | ||
2781 | #define OCFS2_DLM_DEBUG_STR_VERSION 3 | ||
2874 | static int ocfs2_dlm_seq_show(struct seq_file *m, void *v) | 2782 | static int ocfs2_dlm_seq_show(struct seq_file *m, void *v) |
2875 | { | 2783 | { |
2876 | int i; | 2784 | int i; |
@@ -2912,18 +2820,18 @@ static int ocfs2_dlm_seq_show(struct seq_file *m, void *v) | |||
2912 | seq_printf(m, "0x%x\t", lvb[i]); | 2820 | seq_printf(m, "0x%x\t", lvb[i]); |
2913 | 2821 | ||
2914 | #ifdef CONFIG_OCFS2_FS_STATS | 2822 | #ifdef CONFIG_OCFS2_FS_STATS |
2915 | # define lock_num_prmode(_l) (_l)->l_lock_num_prmode | 2823 | # define lock_num_prmode(_l) ((_l)->l_lock_prmode.ls_gets) |
2916 | # define lock_num_exmode(_l) (_l)->l_lock_num_exmode | 2824 | # define lock_num_exmode(_l) ((_l)->l_lock_exmode.ls_gets) |
2917 | # define lock_num_prmode_failed(_l) (_l)->l_lock_num_prmode_failed | 2825 | # define lock_num_prmode_failed(_l) ((_l)->l_lock_prmode.ls_fail) |
2918 | # define lock_num_exmode_failed(_l) (_l)->l_lock_num_exmode_failed | 2826 | # define lock_num_exmode_failed(_l) ((_l)->l_lock_exmode.ls_fail) |
2919 | # define lock_total_prmode(_l) (_l)->l_lock_total_prmode | 2827 | # define lock_total_prmode(_l) ((_l)->l_lock_prmode.ls_total) |
2920 | # define lock_total_exmode(_l) (_l)->l_lock_total_exmode | 2828 | # define lock_total_exmode(_l) ((_l)->l_lock_exmode.ls_total) |
2921 | # define lock_max_prmode(_l) (_l)->l_lock_max_prmode | 2829 | # define lock_max_prmode(_l) ((_l)->l_lock_prmode.ls_max) |
2922 | # define lock_max_exmode(_l) (_l)->l_lock_max_exmode | 2830 | # define lock_max_exmode(_l) ((_l)->l_lock_exmode.ls_max) |
2923 | # define lock_refresh(_l) (_l)->l_lock_refresh | 2831 | # define lock_refresh(_l) ((_l)->l_lock_refresh) |
2924 | #else | 2832 | #else |
2925 | # define lock_num_prmode(_l) (0ULL) | 2833 | # define lock_num_prmode(_l) (0) |
2926 | # define lock_num_exmode(_l) (0ULL) | 2834 | # define lock_num_exmode(_l) (0) |
2927 | # define lock_num_prmode_failed(_l) (0) | 2835 | # define lock_num_prmode_failed(_l) (0) |
2928 | # define lock_num_exmode_failed(_l) (0) | 2836 | # define lock_num_exmode_failed(_l) (0) |
2929 | # define lock_total_prmode(_l) (0ULL) | 2837 | # define lock_total_prmode(_l) (0ULL) |
@@ -2933,8 +2841,8 @@ static int ocfs2_dlm_seq_show(struct seq_file *m, void *v) | |||
2933 | # define lock_refresh(_l) (0) | 2841 | # define lock_refresh(_l) (0) |
2934 | #endif | 2842 | #endif |
2935 | /* The following seq_print was added in version 2 of this output */ | 2843 | /* The following seq_print was added in version 2 of this output */ |
2936 | seq_printf(m, "%llu\t" | 2844 | seq_printf(m, "%u\t" |
2937 | "%llu\t" | 2845 | "%u\t" |
2938 | "%u\t" | 2846 | "%u\t" |
2939 | "%u\t" | 2847 | "%u\t" |
2940 | "%llu\t" | 2848 | "%llu\t" |
@@ -3054,8 +2962,6 @@ int ocfs2_dlm_init(struct ocfs2_super *osb) | |||
3054 | int status = 0; | 2962 | int status = 0; |
3055 | struct ocfs2_cluster_connection *conn = NULL; | 2963 | struct ocfs2_cluster_connection *conn = NULL; |
3056 | 2964 | ||
3057 | mlog_entry_void(); | ||
3058 | |||
3059 | if (ocfs2_mount_local(osb)) { | 2965 | if (ocfs2_mount_local(osb)) { |
3060 | osb->node_num = 0; | 2966 | osb->node_num = 0; |
3061 | goto local; | 2967 | goto local; |
@@ -3112,15 +3018,12 @@ bail: | |||
3112 | kthread_stop(osb->dc_task); | 3018 | kthread_stop(osb->dc_task); |
3113 | } | 3019 | } |
3114 | 3020 | ||
3115 | mlog_exit(status); | ||
3116 | return status; | 3021 | return status; |
3117 | } | 3022 | } |
3118 | 3023 | ||
3119 | void ocfs2_dlm_shutdown(struct ocfs2_super *osb, | 3024 | void ocfs2_dlm_shutdown(struct ocfs2_super *osb, |
3120 | int hangup_pending) | 3025 | int hangup_pending) |
3121 | { | 3026 | { |
3122 | mlog_entry_void(); | ||
3123 | |||
3124 | ocfs2_drop_osb_locks(osb); | 3027 | ocfs2_drop_osb_locks(osb); |
3125 | 3028 | ||
3126 | /* | 3029 | /* |
@@ -3143,8 +3046,6 @@ void ocfs2_dlm_shutdown(struct ocfs2_super *osb, | |||
3143 | osb->cconn = NULL; | 3046 | osb->cconn = NULL; |
3144 | 3047 | ||
3145 | ocfs2_dlm_shutdown_debug(osb); | 3048 | ocfs2_dlm_shutdown_debug(osb); |
3146 | |||
3147 | mlog_exit_void(); | ||
3148 | } | 3049 | } |
3149 | 3050 | ||
3150 | static int ocfs2_drop_lock(struct ocfs2_super *osb, | 3051 | static int ocfs2_drop_lock(struct ocfs2_super *osb, |
@@ -3226,7 +3127,6 @@ static int ocfs2_drop_lock(struct ocfs2_super *osb, | |||
3226 | 3127 | ||
3227 | ocfs2_wait_on_busy_lock(lockres); | 3128 | ocfs2_wait_on_busy_lock(lockres); |
3228 | out: | 3129 | out: |
3229 | mlog_exit(0); | ||
3230 | return 0; | 3130 | return 0; |
3231 | } | 3131 | } |
3232 | 3132 | ||
@@ -3284,8 +3184,6 @@ int ocfs2_drop_inode_locks(struct inode *inode) | |||
3284 | { | 3184 | { |
3285 | int status, err; | 3185 | int status, err; |
3286 | 3186 | ||
3287 | mlog_entry_void(); | ||
3288 | |||
3289 | /* No need to call ocfs2_mark_lockres_freeing here - | 3187 | /* No need to call ocfs2_mark_lockres_freeing here - |
3290 | * ocfs2_clear_inode has done it for us. */ | 3188 | * ocfs2_clear_inode has done it for us. */ |
3291 | 3189 | ||
@@ -3310,7 +3208,6 @@ int ocfs2_drop_inode_locks(struct inode *inode) | |||
3310 | if (err < 0 && !status) | 3208 | if (err < 0 && !status) |
3311 | status = err; | 3209 | status = err; |
3312 | 3210 | ||
3313 | mlog_exit(status); | ||
3314 | return status; | 3211 | return status; |
3315 | } | 3212 | } |
3316 | 3213 | ||
@@ -3352,8 +3249,6 @@ static int ocfs2_downconvert_lock(struct ocfs2_super *osb, | |||
3352 | int ret; | 3249 | int ret; |
3353 | u32 dlm_flags = DLM_LKF_CONVERT; | 3250 | u32 dlm_flags = DLM_LKF_CONVERT; |
3354 | 3251 | ||
3355 | mlog_entry_void(); | ||
3356 | |||
3357 | mlog(ML_BASTS, "lockres %s, level %d => %d\n", lockres->l_name, | 3252 | mlog(ML_BASTS, "lockres %s, level %d => %d\n", lockres->l_name, |
3358 | lockres->l_level, new_level); | 3253 | lockres->l_level, new_level); |
3359 | 3254 | ||
@@ -3375,7 +3270,6 @@ static int ocfs2_downconvert_lock(struct ocfs2_super *osb, | |||
3375 | 3270 | ||
3376 | ret = 0; | 3271 | ret = 0; |
3377 | bail: | 3272 | bail: |
3378 | mlog_exit(ret); | ||
3379 | return ret; | 3273 | return ret; |
3380 | } | 3274 | } |
3381 | 3275 | ||
@@ -3385,8 +3279,6 @@ static int ocfs2_prepare_cancel_convert(struct ocfs2_super *osb, | |||
3385 | { | 3279 | { |
3386 | assert_spin_locked(&lockres->l_lock); | 3280 | assert_spin_locked(&lockres->l_lock); |
3387 | 3281 | ||
3388 | mlog_entry_void(); | ||
3389 | |||
3390 | if (lockres->l_unlock_action == OCFS2_UNLOCK_CANCEL_CONVERT) { | 3282 | if (lockres->l_unlock_action == OCFS2_UNLOCK_CANCEL_CONVERT) { |
3391 | /* If we're already trying to cancel a lock conversion | 3283 | /* If we're already trying to cancel a lock conversion |
3392 | * then just drop the spinlock and allow the caller to | 3284 | * then just drop the spinlock and allow the caller to |
@@ -3416,8 +3308,6 @@ static int ocfs2_cancel_convert(struct ocfs2_super *osb, | |||
3416 | { | 3308 | { |
3417 | int ret; | 3309 | int ret; |
3418 | 3310 | ||
3419 | mlog_entry_void(); | ||
3420 | |||
3421 | ret = ocfs2_dlm_unlock(osb->cconn, &lockres->l_lksb, | 3311 | ret = ocfs2_dlm_unlock(osb->cconn, &lockres->l_lksb, |
3422 | DLM_LKF_CANCEL); | 3312 | DLM_LKF_CANCEL); |
3423 | if (ret) { | 3313 | if (ret) { |
@@ -3427,7 +3317,6 @@ static int ocfs2_cancel_convert(struct ocfs2_super *osb, | |||
3427 | 3317 | ||
3428 | mlog(ML_BASTS, "lockres %s\n", lockres->l_name); | 3318 | mlog(ML_BASTS, "lockres %s\n", lockres->l_name); |
3429 | 3319 | ||
3430 | mlog_exit(ret); | ||
3431 | return ret; | 3320 | return ret; |
3432 | } | 3321 | } |
3433 | 3322 | ||
@@ -3443,8 +3332,6 @@ static int ocfs2_unblock_lock(struct ocfs2_super *osb, | |||
3443 | int set_lvb = 0; | 3332 | int set_lvb = 0; |
3444 | unsigned int gen; | 3333 | unsigned int gen; |
3445 | 3334 | ||
3446 | mlog_entry_void(); | ||
3447 | |||
3448 | spin_lock_irqsave(&lockres->l_lock, flags); | 3335 | spin_lock_irqsave(&lockres->l_lock, flags); |
3449 | 3336 | ||
3450 | recheck: | 3337 | recheck: |
@@ -3619,14 +3506,14 @@ downconvert: | |||
3619 | gen); | 3506 | gen); |
3620 | 3507 | ||
3621 | leave: | 3508 | leave: |
3622 | mlog_exit(ret); | 3509 | if (ret) |
3510 | mlog_errno(ret); | ||
3623 | return ret; | 3511 | return ret; |
3624 | 3512 | ||
3625 | leave_requeue: | 3513 | leave_requeue: |
3626 | spin_unlock_irqrestore(&lockres->l_lock, flags); | 3514 | spin_unlock_irqrestore(&lockres->l_lock, flags); |
3627 | ctl->requeue = 1; | 3515 | ctl->requeue = 1; |
3628 | 3516 | ||
3629 | mlog_exit(0); | ||
3630 | return 0; | 3517 | return 0; |
3631 | } | 3518 | } |
3632 | 3519 | ||
@@ -3859,8 +3746,6 @@ static void ocfs2_set_qinfo_lvb(struct ocfs2_lock_res *lockres) | |||
3859 | struct mem_dqinfo *info = sb_dqinfo(oinfo->dqi_gi.dqi_sb, | 3746 | struct mem_dqinfo *info = sb_dqinfo(oinfo->dqi_gi.dqi_sb, |
3860 | oinfo->dqi_gi.dqi_type); | 3747 | oinfo->dqi_gi.dqi_type); |
3861 | 3748 | ||
3862 | mlog_entry_void(); | ||
3863 | |||
3864 | lvb = ocfs2_dlm_lvb(&lockres->l_lksb); | 3749 | lvb = ocfs2_dlm_lvb(&lockres->l_lksb); |
3865 | lvb->lvb_version = OCFS2_QINFO_LVB_VERSION; | 3750 | lvb->lvb_version = OCFS2_QINFO_LVB_VERSION; |
3866 | lvb->lvb_bgrace = cpu_to_be32(info->dqi_bgrace); | 3751 | lvb->lvb_bgrace = cpu_to_be32(info->dqi_bgrace); |
@@ -3869,8 +3754,6 @@ static void ocfs2_set_qinfo_lvb(struct ocfs2_lock_res *lockres) | |||
3869 | lvb->lvb_blocks = cpu_to_be32(oinfo->dqi_gi.dqi_blocks); | 3754 | lvb->lvb_blocks = cpu_to_be32(oinfo->dqi_gi.dqi_blocks); |
3870 | lvb->lvb_free_blk = cpu_to_be32(oinfo->dqi_gi.dqi_free_blk); | 3755 | lvb->lvb_free_blk = cpu_to_be32(oinfo->dqi_gi.dqi_free_blk); |
3871 | lvb->lvb_free_entry = cpu_to_be32(oinfo->dqi_gi.dqi_free_entry); | 3756 | lvb->lvb_free_entry = cpu_to_be32(oinfo->dqi_gi.dqi_free_entry); |
3872 | |||
3873 | mlog_exit_void(); | ||
3874 | } | 3757 | } |
3875 | 3758 | ||
3876 | void ocfs2_qinfo_unlock(struct ocfs2_mem_dqinfo *oinfo, int ex) | 3759 | void ocfs2_qinfo_unlock(struct ocfs2_mem_dqinfo *oinfo, int ex) |
@@ -3879,10 +3762,8 @@ void ocfs2_qinfo_unlock(struct ocfs2_mem_dqinfo *oinfo, int ex) | |||
3879 | struct ocfs2_super *osb = OCFS2_SB(oinfo->dqi_gi.dqi_sb); | 3762 | struct ocfs2_super *osb = OCFS2_SB(oinfo->dqi_gi.dqi_sb); |
3880 | int level = ex ? DLM_LOCK_EX : DLM_LOCK_PR; | 3763 | int level = ex ? DLM_LOCK_EX : DLM_LOCK_PR; |
3881 | 3764 | ||
3882 | mlog_entry_void(); | ||
3883 | if (!ocfs2_is_hard_readonly(osb) && !ocfs2_mount_local(osb)) | 3765 | if (!ocfs2_is_hard_readonly(osb) && !ocfs2_mount_local(osb)) |
3884 | ocfs2_cluster_unlock(osb, lockres, level); | 3766 | ocfs2_cluster_unlock(osb, lockres, level); |
3885 | mlog_exit_void(); | ||
3886 | } | 3767 | } |
3887 | 3768 | ||
3888 | static int ocfs2_refresh_qinfo(struct ocfs2_mem_dqinfo *oinfo) | 3769 | static int ocfs2_refresh_qinfo(struct ocfs2_mem_dqinfo *oinfo) |
@@ -3937,8 +3818,6 @@ int ocfs2_qinfo_lock(struct ocfs2_mem_dqinfo *oinfo, int ex) | |||
3937 | int level = ex ? DLM_LOCK_EX : DLM_LOCK_PR; | 3818 | int level = ex ? DLM_LOCK_EX : DLM_LOCK_PR; |
3938 | int status = 0; | 3819 | int status = 0; |
3939 | 3820 | ||
3940 | mlog_entry_void(); | ||
3941 | |||
3942 | /* On RO devices, locking really isn't needed... */ | 3821 | /* On RO devices, locking really isn't needed... */ |
3943 | if (ocfs2_is_hard_readonly(osb)) { | 3822 | if (ocfs2_is_hard_readonly(osb)) { |
3944 | if (ex) | 3823 | if (ex) |
@@ -3961,7 +3840,6 @@ int ocfs2_qinfo_lock(struct ocfs2_mem_dqinfo *oinfo, int ex) | |||
3961 | ocfs2_qinfo_unlock(oinfo, ex); | 3840 | ocfs2_qinfo_unlock(oinfo, ex); |
3962 | ocfs2_complete_lock_res_refresh(lockres, status); | 3841 | ocfs2_complete_lock_res_refresh(lockres, status); |
3963 | bail: | 3842 | bail: |
3964 | mlog_exit(status); | ||
3965 | return status; | 3843 | return status; |
3966 | } | 3844 | } |
3967 | 3845 | ||
@@ -4007,8 +3885,6 @@ static void ocfs2_process_blocked_lock(struct ocfs2_super *osb, | |||
4007 | * considered valid until we remove the OCFS2_LOCK_QUEUED | 3885 | * considered valid until we remove the OCFS2_LOCK_QUEUED |
4008 | * flag. */ | 3886 | * flag. */ |
4009 | 3887 | ||
4010 | mlog_entry_void(); | ||
4011 | |||
4012 | BUG_ON(!lockres); | 3888 | BUG_ON(!lockres); |
4013 | BUG_ON(!lockres->l_ops); | 3889 | BUG_ON(!lockres->l_ops); |
4014 | 3890 | ||
@@ -4042,15 +3918,11 @@ unqueue: | |||
4042 | if (ctl.unblock_action != UNBLOCK_CONTINUE | 3918 | if (ctl.unblock_action != UNBLOCK_CONTINUE |
4043 | && lockres->l_ops->post_unlock) | 3919 | && lockres->l_ops->post_unlock) |
4044 | lockres->l_ops->post_unlock(osb, lockres); | 3920 | lockres->l_ops->post_unlock(osb, lockres); |
4045 | |||
4046 | mlog_exit_void(); | ||
4047 | } | 3921 | } |
4048 | 3922 | ||
4049 | static void ocfs2_schedule_blocked_lock(struct ocfs2_super *osb, | 3923 | static void ocfs2_schedule_blocked_lock(struct ocfs2_super *osb, |
4050 | struct ocfs2_lock_res *lockres) | 3924 | struct ocfs2_lock_res *lockres) |
4051 | { | 3925 | { |
4052 | mlog_entry_void(); | ||
4053 | |||
4054 | assert_spin_locked(&lockres->l_lock); | 3926 | assert_spin_locked(&lockres->l_lock); |
4055 | 3927 | ||
4056 | if (lockres->l_flags & OCFS2_LOCK_FREEING) { | 3928 | if (lockres->l_flags & OCFS2_LOCK_FREEING) { |
@@ -4071,8 +3943,6 @@ static void ocfs2_schedule_blocked_lock(struct ocfs2_super *osb, | |||
4071 | osb->blocked_lock_count++; | 3943 | osb->blocked_lock_count++; |
4072 | } | 3944 | } |
4073 | spin_unlock(&osb->dc_task_lock); | 3945 | spin_unlock(&osb->dc_task_lock); |
4074 | |||
4075 | mlog_exit_void(); | ||
4076 | } | 3946 | } |
4077 | 3947 | ||
4078 | static void ocfs2_downconvert_thread_do_work(struct ocfs2_super *osb) | 3948 | static void ocfs2_downconvert_thread_do_work(struct ocfs2_super *osb) |
@@ -4080,8 +3950,6 @@ static void ocfs2_downconvert_thread_do_work(struct ocfs2_super *osb) | |||
4080 | unsigned long processed; | 3950 | unsigned long processed; |
4081 | struct ocfs2_lock_res *lockres; | 3951 | struct ocfs2_lock_res *lockres; |
4082 | 3952 | ||
4083 | mlog_entry_void(); | ||
4084 | |||
4085 | spin_lock(&osb->dc_task_lock); | 3953 | spin_lock(&osb->dc_task_lock); |
4086 | /* grab this early so we know to try again if a state change and | 3954 | /* grab this early so we know to try again if a state change and |
4087 | * wake happens part-way through our work */ | 3955 | * wake happens part-way through our work */ |
@@ -4105,8 +3973,6 @@ static void ocfs2_downconvert_thread_do_work(struct ocfs2_super *osb) | |||
4105 | spin_lock(&osb->dc_task_lock); | 3973 | spin_lock(&osb->dc_task_lock); |
4106 | } | 3974 | } |
4107 | spin_unlock(&osb->dc_task_lock); | 3975 | spin_unlock(&osb->dc_task_lock); |
4108 | |||
4109 | mlog_exit_void(); | ||
4110 | } | 3976 | } |
4111 | 3977 | ||
4112 | static int ocfs2_downconvert_thread_lists_empty(struct ocfs2_super *osb) | 3978 | static int ocfs2_downconvert_thread_lists_empty(struct ocfs2_super *osb) |
diff --git a/fs/ocfs2/export.c b/fs/ocfs2/export.c index 254652a9b542..745db42528d5 100644 --- a/fs/ocfs2/export.c +++ b/fs/ocfs2/export.c | |||
@@ -26,7 +26,6 @@ | |||
26 | #include <linux/fs.h> | 26 | #include <linux/fs.h> |
27 | #include <linux/types.h> | 27 | #include <linux/types.h> |
28 | 28 | ||
29 | #define MLOG_MASK_PREFIX ML_EXPORT | ||
30 | #include <cluster/masklog.h> | 29 | #include <cluster/masklog.h> |
31 | 30 | ||
32 | #include "ocfs2.h" | 31 | #include "ocfs2.h" |
@@ -40,6 +39,7 @@ | |||
40 | 39 | ||
41 | #include "buffer_head_io.h" | 40 | #include "buffer_head_io.h" |
42 | #include "suballoc.h" | 41 | #include "suballoc.h" |
42 | #include "ocfs2_trace.h" | ||
43 | 43 | ||
44 | struct ocfs2_inode_handle | 44 | struct ocfs2_inode_handle |
45 | { | 45 | { |
@@ -56,10 +56,9 @@ static struct dentry *ocfs2_get_dentry(struct super_block *sb, | |||
56 | int status, set; | 56 | int status, set; |
57 | struct dentry *result; | 57 | struct dentry *result; |
58 | 58 | ||
59 | mlog_entry("(0x%p, 0x%p)\n", sb, handle); | 59 | trace_ocfs2_get_dentry_begin(sb, handle, (unsigned long long)blkno); |
60 | 60 | ||
61 | if (blkno == 0) { | 61 | if (blkno == 0) { |
62 | mlog(0, "nfs wants inode with blkno: 0\n"); | ||
63 | result = ERR_PTR(-ESTALE); | 62 | result = ERR_PTR(-ESTALE); |
64 | goto bail; | 63 | goto bail; |
65 | } | 64 | } |
@@ -83,6 +82,7 @@ static struct dentry *ocfs2_get_dentry(struct super_block *sb, | |||
83 | } | 82 | } |
84 | 83 | ||
85 | status = ocfs2_test_inode_bit(osb, blkno, &set); | 84 | status = ocfs2_test_inode_bit(osb, blkno, &set); |
85 | trace_ocfs2_get_dentry_test_bit(status, set); | ||
86 | if (status < 0) { | 86 | if (status < 0) { |
87 | if (status == -EINVAL) { | 87 | if (status == -EINVAL) { |
88 | /* | 88 | /* |
@@ -90,18 +90,14 @@ static struct dentry *ocfs2_get_dentry(struct super_block *sb, | |||
90 | * as an inode, we return -ESTALE to be | 90 | * as an inode, we return -ESTALE to be |
91 | * nice | 91 | * nice |
92 | */ | 92 | */ |
93 | mlog(0, "test inode bit failed %d\n", status); | ||
94 | status = -ESTALE; | 93 | status = -ESTALE; |
95 | } else { | 94 | } else |
96 | mlog(ML_ERROR, "test inode bit failed %d\n", status); | 95 | mlog(ML_ERROR, "test inode bit failed %d\n", status); |
97 | } | ||
98 | goto unlock_nfs_sync; | 96 | goto unlock_nfs_sync; |
99 | } | 97 | } |
100 | 98 | ||
101 | /* If the inode allocator bit is clear, this inode must be stale */ | 99 | /* If the inode allocator bit is clear, this inode must be stale */ |
102 | if (!set) { | 100 | if (!set) { |
103 | mlog(0, "inode %llu suballoc bit is clear\n", | ||
104 | (unsigned long long)blkno); | ||
105 | status = -ESTALE; | 101 | status = -ESTALE; |
106 | goto unlock_nfs_sync; | 102 | goto unlock_nfs_sync; |
107 | } | 103 | } |
@@ -114,8 +110,8 @@ unlock_nfs_sync: | |||
114 | check_err: | 110 | check_err: |
115 | if (status < 0) { | 111 | if (status < 0) { |
116 | if (status == -ESTALE) { | 112 | if (status == -ESTALE) { |
117 | mlog(0, "stale inode ino: %llu generation: %u\n", | 113 | trace_ocfs2_get_dentry_stale((unsigned long long)blkno, |
118 | (unsigned long long)blkno, handle->ih_generation); | 114 | handle->ih_generation); |
119 | } | 115 | } |
120 | result = ERR_PTR(status); | 116 | result = ERR_PTR(status); |
121 | goto bail; | 117 | goto bail; |
@@ -130,8 +126,9 @@ check_err: | |||
130 | check_gen: | 126 | check_gen: |
131 | if (handle->ih_generation != inode->i_generation) { | 127 | if (handle->ih_generation != inode->i_generation) { |
132 | iput(inode); | 128 | iput(inode); |
133 | mlog(0, "stale inode ino: %llu generation: %u\n", | 129 | trace_ocfs2_get_dentry_generation((unsigned long long)blkno, |
134 | (unsigned long long)blkno, handle->ih_generation); | 130 | handle->ih_generation, |
131 | inode->i_generation); | ||
135 | result = ERR_PTR(-ESTALE); | 132 | result = ERR_PTR(-ESTALE); |
136 | goto bail; | 133 | goto bail; |
137 | } | 134 | } |
@@ -141,7 +138,7 @@ check_gen: | |||
141 | mlog_errno(PTR_ERR(result)); | 138 | mlog_errno(PTR_ERR(result)); |
142 | 139 | ||
143 | bail: | 140 | bail: |
144 | mlog_exit_ptr(result); | 141 | trace_ocfs2_get_dentry_end(result); |
145 | return result; | 142 | return result; |
146 | } | 143 | } |
147 | 144 | ||
@@ -152,11 +149,8 @@ static struct dentry *ocfs2_get_parent(struct dentry *child) | |||
152 | struct dentry *parent; | 149 | struct dentry *parent; |
153 | struct inode *dir = child->d_inode; | 150 | struct inode *dir = child->d_inode; |
154 | 151 | ||
155 | mlog_entry("(0x%p, '%.*s')\n", child, | 152 | trace_ocfs2_get_parent(child, child->d_name.len, child->d_name.name, |
156 | child->d_name.len, child->d_name.name); | 153 | (unsigned long long)OCFS2_I(dir)->ip_blkno); |
157 | |||
158 | mlog(0, "find parent of directory %llu\n", | ||
159 | (unsigned long long)OCFS2_I(dir)->ip_blkno); | ||
160 | 154 | ||
161 | status = ocfs2_inode_lock(dir, NULL, 0); | 155 | status = ocfs2_inode_lock(dir, NULL, 0); |
162 | if (status < 0) { | 156 | if (status < 0) { |
@@ -178,7 +172,7 @@ bail_unlock: | |||
178 | ocfs2_inode_unlock(dir, 0); | 172 | ocfs2_inode_unlock(dir, 0); |
179 | 173 | ||
180 | bail: | 174 | bail: |
181 | mlog_exit_ptr(parent); | 175 | trace_ocfs2_get_parent_end(parent); |
182 | 176 | ||
183 | return parent; | 177 | return parent; |
184 | } | 178 | } |
@@ -193,9 +187,9 @@ static int ocfs2_encode_fh(struct dentry *dentry, u32 *fh_in, int *max_len, | |||
193 | u32 generation; | 187 | u32 generation; |
194 | __le32 *fh = (__force __le32 *) fh_in; | 188 | __le32 *fh = (__force __le32 *) fh_in; |
195 | 189 | ||
196 | mlog_entry("(0x%p, '%.*s', 0x%p, %d, %d)\n", dentry, | 190 | trace_ocfs2_encode_fh_begin(dentry, dentry->d_name.len, |
197 | dentry->d_name.len, dentry->d_name.name, | 191 | dentry->d_name.name, |
198 | fh, len, connectable); | 192 | fh, len, connectable); |
199 | 193 | ||
200 | if (connectable && (len < 6)) { | 194 | if (connectable && (len < 6)) { |
201 | *max_len = 6; | 195 | *max_len = 6; |
@@ -210,8 +204,7 @@ static int ocfs2_encode_fh(struct dentry *dentry, u32 *fh_in, int *max_len, | |||
210 | blkno = OCFS2_I(inode)->ip_blkno; | 204 | blkno = OCFS2_I(inode)->ip_blkno; |
211 | generation = inode->i_generation; | 205 | generation = inode->i_generation; |
212 | 206 | ||
213 | mlog(0, "Encoding fh: blkno: %llu, generation: %u\n", | 207 | trace_ocfs2_encode_fh_self((unsigned long long)blkno, generation); |
214 | (unsigned long long)blkno, generation); | ||
215 | 208 | ||
216 | len = 3; | 209 | len = 3; |
217 | fh[0] = cpu_to_le32((u32)(blkno >> 32)); | 210 | fh[0] = cpu_to_le32((u32)(blkno >> 32)); |
@@ -236,14 +229,14 @@ static int ocfs2_encode_fh(struct dentry *dentry, u32 *fh_in, int *max_len, | |||
236 | len = 6; | 229 | len = 6; |
237 | type = 2; | 230 | type = 2; |
238 | 231 | ||
239 | mlog(0, "Encoding parent: blkno: %llu, generation: %u\n", | 232 | trace_ocfs2_encode_fh_parent((unsigned long long)blkno, |
240 | (unsigned long long)blkno, generation); | 233 | generation); |
241 | } | 234 | } |
242 | 235 | ||
243 | *max_len = len; | 236 | *max_len = len; |
244 | 237 | ||
245 | bail: | 238 | bail: |
246 | mlog_exit(type); | 239 | trace_ocfs2_encode_fh_type(type); |
247 | return type; | 240 | return type; |
248 | } | 241 | } |
249 | 242 | ||
diff --git a/fs/ocfs2/extent_map.c b/fs/ocfs2/extent_map.c index 09e3fdfa6d33..23457b491e8c 100644 --- a/fs/ocfs2/extent_map.c +++ b/fs/ocfs2/extent_map.c | |||
@@ -28,7 +28,6 @@ | |||
28 | #include <linux/types.h> | 28 | #include <linux/types.h> |
29 | #include <linux/fiemap.h> | 29 | #include <linux/fiemap.h> |
30 | 30 | ||
31 | #define MLOG_MASK_PREFIX ML_EXTENT_MAP | ||
32 | #include <cluster/masklog.h> | 31 | #include <cluster/masklog.h> |
33 | 32 | ||
34 | #include "ocfs2.h" | 33 | #include "ocfs2.h" |
@@ -39,6 +38,7 @@ | |||
39 | #include "inode.h" | 38 | #include "inode.h" |
40 | #include "super.h" | 39 | #include "super.h" |
41 | #include "symlink.h" | 40 | #include "symlink.h" |
41 | #include "ocfs2_trace.h" | ||
42 | 42 | ||
43 | #include "buffer_head_io.h" | 43 | #include "buffer_head_io.h" |
44 | 44 | ||
@@ -841,10 +841,9 @@ int ocfs2_read_virt_blocks(struct inode *inode, u64 v_block, int nr, | |||
841 | u64 p_block, p_count; | 841 | u64 p_block, p_count; |
842 | int i, count, done = 0; | 842 | int i, count, done = 0; |
843 | 843 | ||
844 | mlog_entry("(inode = %p, v_block = %llu, nr = %d, bhs = %p, " | 844 | trace_ocfs2_read_virt_blocks( |
845 | "flags = %x, validate = %p)\n", | 845 | inode, (unsigned long long)v_block, nr, bhs, flags, |
846 | inode, (unsigned long long)v_block, nr, bhs, flags, | 846 | validate); |
847 | validate); | ||
848 | 847 | ||
849 | if (((v_block + nr - 1) << inode->i_sb->s_blocksize_bits) >= | 848 | if (((v_block + nr - 1) << inode->i_sb->s_blocksize_bits) >= |
850 | i_size_read(inode)) { | 849 | i_size_read(inode)) { |
@@ -897,7 +896,6 @@ int ocfs2_read_virt_blocks(struct inode *inode, u64 v_block, int nr, | |||
897 | } | 896 | } |
898 | 897 | ||
899 | out: | 898 | out: |
900 | mlog_exit(rc); | ||
901 | return rc; | 899 | return rc; |
902 | } | 900 | } |
903 | 901 | ||
diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c index a6651956482e..41565ae52856 100644 --- a/fs/ocfs2/file.c +++ b/fs/ocfs2/file.c | |||
@@ -38,7 +38,6 @@ | |||
38 | #include <linux/quotaops.h> | 38 | #include <linux/quotaops.h> |
39 | #include <linux/blkdev.h> | 39 | #include <linux/blkdev.h> |
40 | 40 | ||
41 | #define MLOG_MASK_PREFIX ML_INODE | ||
42 | #include <cluster/masklog.h> | 41 | #include <cluster/masklog.h> |
43 | 42 | ||
44 | #include "ocfs2.h" | 43 | #include "ocfs2.h" |
@@ -61,6 +60,7 @@ | |||
61 | #include "acl.h" | 60 | #include "acl.h" |
62 | #include "quota.h" | 61 | #include "quota.h" |
63 | #include "refcounttree.h" | 62 | #include "refcounttree.h" |
63 | #include "ocfs2_trace.h" | ||
64 | 64 | ||
65 | #include "buffer_head_io.h" | 65 | #include "buffer_head_io.h" |
66 | 66 | ||
@@ -99,8 +99,10 @@ static int ocfs2_file_open(struct inode *inode, struct file *file) | |||
99 | int mode = file->f_flags; | 99 | int mode = file->f_flags; |
100 | struct ocfs2_inode_info *oi = OCFS2_I(inode); | 100 | struct ocfs2_inode_info *oi = OCFS2_I(inode); |
101 | 101 | ||
102 | mlog_entry("(0x%p, 0x%p, '%.*s')\n", inode, file, | 102 | trace_ocfs2_file_open(inode, file, file->f_path.dentry, |
103 | file->f_path.dentry->d_name.len, file->f_path.dentry->d_name.name); | 103 | (unsigned long long)OCFS2_I(inode)->ip_blkno, |
104 | file->f_path.dentry->d_name.len, | ||
105 | file->f_path.dentry->d_name.name, mode); | ||
104 | 106 | ||
105 | if (file->f_mode & FMODE_WRITE) | 107 | if (file->f_mode & FMODE_WRITE) |
106 | dquot_initialize(inode); | 108 | dquot_initialize(inode); |
@@ -135,7 +137,6 @@ static int ocfs2_file_open(struct inode *inode, struct file *file) | |||
135 | } | 137 | } |
136 | 138 | ||
137 | leave: | 139 | leave: |
138 | mlog_exit(status); | ||
139 | return status; | 140 | return status; |
140 | } | 141 | } |
141 | 142 | ||
@@ -143,19 +144,19 @@ static int ocfs2_file_release(struct inode *inode, struct file *file) | |||
143 | { | 144 | { |
144 | struct ocfs2_inode_info *oi = OCFS2_I(inode); | 145 | struct ocfs2_inode_info *oi = OCFS2_I(inode); |
145 | 146 | ||
146 | mlog_entry("(0x%p, 0x%p, '%.*s')\n", inode, file, | ||
147 | file->f_path.dentry->d_name.len, | ||
148 | file->f_path.dentry->d_name.name); | ||
149 | |||
150 | spin_lock(&oi->ip_lock); | 147 | spin_lock(&oi->ip_lock); |
151 | if (!--oi->ip_open_count) | 148 | if (!--oi->ip_open_count) |
152 | oi->ip_flags &= ~OCFS2_INODE_OPEN_DIRECT; | 149 | oi->ip_flags &= ~OCFS2_INODE_OPEN_DIRECT; |
150 | |||
151 | trace_ocfs2_file_release(inode, file, file->f_path.dentry, | ||
152 | oi->ip_blkno, | ||
153 | file->f_path.dentry->d_name.len, | ||
154 | file->f_path.dentry->d_name.name, | ||
155 | oi->ip_open_count); | ||
153 | spin_unlock(&oi->ip_lock); | 156 | spin_unlock(&oi->ip_lock); |
154 | 157 | ||
155 | ocfs2_free_file_private(inode, file); | 158 | ocfs2_free_file_private(inode, file); |
156 | 159 | ||
157 | mlog_exit(0); | ||
158 | |||
159 | return 0; | 160 | return 0; |
160 | } | 161 | } |
161 | 162 | ||
@@ -177,9 +178,11 @@ static int ocfs2_sync_file(struct file *file, int datasync) | |||
177 | struct inode *inode = file->f_mapping->host; | 178 | struct inode *inode = file->f_mapping->host; |
178 | struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); | 179 | struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); |
179 | 180 | ||
180 | mlog_entry("(0x%p, %d, 0x%p, '%.*s')\n", file, datasync, | 181 | trace_ocfs2_sync_file(inode, file, file->f_path.dentry, |
181 | file->f_path.dentry, file->f_path.dentry->d_name.len, | 182 | OCFS2_I(inode)->ip_blkno, |
182 | file->f_path.dentry->d_name.name); | 183 | file->f_path.dentry->d_name.len, |
184 | file->f_path.dentry->d_name.name, | ||
185 | (unsigned long long)datasync); | ||
183 | 186 | ||
184 | if (datasync && !(inode->i_state & I_DIRTY_DATASYNC)) { | 187 | if (datasync && !(inode->i_state & I_DIRTY_DATASYNC)) { |
185 | /* | 188 | /* |
@@ -195,7 +198,8 @@ static int ocfs2_sync_file(struct file *file, int datasync) | |||
195 | err = jbd2_journal_force_commit(journal); | 198 | err = jbd2_journal_force_commit(journal); |
196 | 199 | ||
197 | bail: | 200 | bail: |
198 | mlog_exit(err); | 201 | if (err) |
202 | mlog_errno(err); | ||
199 | 203 | ||
200 | return (err < 0) ? -EIO : 0; | 204 | return (err < 0) ? -EIO : 0; |
201 | } | 205 | } |
@@ -251,8 +255,6 @@ int ocfs2_update_inode_atime(struct inode *inode, | |||
251 | handle_t *handle; | 255 | handle_t *handle; |
252 | struct ocfs2_dinode *di = (struct ocfs2_dinode *) bh->b_data; | 256 | struct ocfs2_dinode *di = (struct ocfs2_dinode *) bh->b_data; |
253 | 257 | ||
254 | mlog_entry_void(); | ||
255 | |||
256 | handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS); | 258 | handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS); |
257 | if (IS_ERR(handle)) { | 259 | if (IS_ERR(handle)) { |
258 | ret = PTR_ERR(handle); | 260 | ret = PTR_ERR(handle); |
@@ -280,7 +282,6 @@ int ocfs2_update_inode_atime(struct inode *inode, | |||
280 | out_commit: | 282 | out_commit: |
281 | ocfs2_commit_trans(OCFS2_SB(inode->i_sb), handle); | 283 | ocfs2_commit_trans(OCFS2_SB(inode->i_sb), handle); |
282 | out: | 284 | out: |
283 | mlog_exit(ret); | ||
284 | return ret; | 285 | return ret; |
285 | } | 286 | } |
286 | 287 | ||
@@ -291,7 +292,6 @@ static int ocfs2_set_inode_size(handle_t *handle, | |||
291 | { | 292 | { |
292 | int status; | 293 | int status; |
293 | 294 | ||
294 | mlog_entry_void(); | ||
295 | i_size_write(inode, new_i_size); | 295 | i_size_write(inode, new_i_size); |
296 | inode->i_blocks = ocfs2_inode_sector_count(inode); | 296 | inode->i_blocks = ocfs2_inode_sector_count(inode); |
297 | inode->i_ctime = inode->i_mtime = CURRENT_TIME; | 297 | inode->i_ctime = inode->i_mtime = CURRENT_TIME; |
@@ -303,7 +303,6 @@ static int ocfs2_set_inode_size(handle_t *handle, | |||
303 | } | 303 | } |
304 | 304 | ||
305 | bail: | 305 | bail: |
306 | mlog_exit(status); | ||
307 | return status; | 306 | return status; |
308 | } | 307 | } |
309 | 308 | ||
@@ -375,8 +374,6 @@ static int ocfs2_orphan_for_truncate(struct ocfs2_super *osb, | |||
375 | struct ocfs2_dinode *di; | 374 | struct ocfs2_dinode *di; |
376 | u64 cluster_bytes; | 375 | u64 cluster_bytes; |
377 | 376 | ||
378 | mlog_entry_void(); | ||
379 | |||
380 | /* | 377 | /* |
381 | * We need to CoW the cluster contains the offset if it is reflinked | 378 | * We need to CoW the cluster contains the offset if it is reflinked |
382 | * since we will call ocfs2_zero_range_for_truncate later which will | 379 | * since we will call ocfs2_zero_range_for_truncate later which will |
@@ -429,8 +426,6 @@ static int ocfs2_orphan_for_truncate(struct ocfs2_super *osb, | |||
429 | out_commit: | 426 | out_commit: |
430 | ocfs2_commit_trans(osb, handle); | 427 | ocfs2_commit_trans(osb, handle); |
431 | out: | 428 | out: |
432 | |||
433 | mlog_exit(status); | ||
434 | return status; | 429 | return status; |
435 | } | 430 | } |
436 | 431 | ||
@@ -442,14 +437,14 @@ static int ocfs2_truncate_file(struct inode *inode, | |||
442 | struct ocfs2_dinode *fe = NULL; | 437 | struct ocfs2_dinode *fe = NULL; |
443 | struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); | 438 | struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); |
444 | 439 | ||
445 | mlog_entry("(inode = %llu, new_i_size = %llu\n", | ||
446 | (unsigned long long)OCFS2_I(inode)->ip_blkno, | ||
447 | (unsigned long long)new_i_size); | ||
448 | |||
449 | /* We trust di_bh because it comes from ocfs2_inode_lock(), which | 440 | /* We trust di_bh because it comes from ocfs2_inode_lock(), which |
450 | * already validated it */ | 441 | * already validated it */ |
451 | fe = (struct ocfs2_dinode *) di_bh->b_data; | 442 | fe = (struct ocfs2_dinode *) di_bh->b_data; |
452 | 443 | ||
444 | trace_ocfs2_truncate_file((unsigned long long)OCFS2_I(inode)->ip_blkno, | ||
445 | (unsigned long long)le64_to_cpu(fe->i_size), | ||
446 | (unsigned long long)new_i_size); | ||
447 | |||
453 | mlog_bug_on_msg(le64_to_cpu(fe->i_size) != i_size_read(inode), | 448 | mlog_bug_on_msg(le64_to_cpu(fe->i_size) != i_size_read(inode), |
454 | "Inode %llu, inode i_size = %lld != di " | 449 | "Inode %llu, inode i_size = %lld != di " |
455 | "i_size = %llu, i_flags = 0x%x\n", | 450 | "i_size = %llu, i_flags = 0x%x\n", |
@@ -459,19 +454,14 @@ static int ocfs2_truncate_file(struct inode *inode, | |||
459 | le32_to_cpu(fe->i_flags)); | 454 | le32_to_cpu(fe->i_flags)); |
460 | 455 | ||
461 | if (new_i_size > le64_to_cpu(fe->i_size)) { | 456 | if (new_i_size > le64_to_cpu(fe->i_size)) { |
462 | mlog(0, "asked to truncate file with size (%llu) to size (%llu)!\n", | 457 | trace_ocfs2_truncate_file_error( |
463 | (unsigned long long)le64_to_cpu(fe->i_size), | 458 | (unsigned long long)le64_to_cpu(fe->i_size), |
464 | (unsigned long long)new_i_size); | 459 | (unsigned long long)new_i_size); |
465 | status = -EINVAL; | 460 | status = -EINVAL; |
466 | mlog_errno(status); | 461 | mlog_errno(status); |
467 | goto bail; | 462 | goto bail; |
468 | } | 463 | } |
469 | 464 | ||
470 | mlog(0, "inode %llu, i_size = %llu, new_i_size = %llu\n", | ||
471 | (unsigned long long)le64_to_cpu(fe->i_blkno), | ||
472 | (unsigned long long)le64_to_cpu(fe->i_size), | ||
473 | (unsigned long long)new_i_size); | ||
474 | |||
475 | /* lets handle the simple truncate cases before doing any more | 465 | /* lets handle the simple truncate cases before doing any more |
476 | * cluster locking. */ | 466 | * cluster locking. */ |
477 | if (new_i_size == le64_to_cpu(fe->i_size)) | 467 | if (new_i_size == le64_to_cpu(fe->i_size)) |
@@ -525,7 +515,6 @@ bail: | |||
525 | if (!status && OCFS2_I(inode)->ip_clusters == 0) | 515 | if (!status && OCFS2_I(inode)->ip_clusters == 0) |
526 | status = ocfs2_try_remove_refcount_tree(inode, di_bh); | 516 | status = ocfs2_try_remove_refcount_tree(inode, di_bh); |
527 | 517 | ||
528 | mlog_exit(status); | ||
529 | return status; | 518 | return status; |
530 | } | 519 | } |
531 | 520 | ||
@@ -578,8 +567,6 @@ static int __ocfs2_extend_allocation(struct inode *inode, u32 logical_start, | |||
578 | struct ocfs2_extent_tree et; | 567 | struct ocfs2_extent_tree et; |
579 | int did_quota = 0; | 568 | int did_quota = 0; |
580 | 569 | ||
581 | mlog_entry("(clusters_to_add = %u)\n", clusters_to_add); | ||
582 | |||
583 | /* | 570 | /* |
584 | * This function only exists for file systems which don't | 571 | * This function only exists for file systems which don't |
585 | * support holes. | 572 | * support holes. |
@@ -596,11 +583,6 @@ static int __ocfs2_extend_allocation(struct inode *inode, u32 logical_start, | |||
596 | restart_all: | 583 | restart_all: |
597 | BUG_ON(le32_to_cpu(fe->i_clusters) != OCFS2_I(inode)->ip_clusters); | 584 | BUG_ON(le32_to_cpu(fe->i_clusters) != OCFS2_I(inode)->ip_clusters); |
598 | 585 | ||
599 | mlog(0, "extend inode %llu, i_size = %lld, di->i_clusters = %u, " | ||
600 | "clusters_to_add = %u\n", | ||
601 | (unsigned long long)OCFS2_I(inode)->ip_blkno, | ||
602 | (long long)i_size_read(inode), le32_to_cpu(fe->i_clusters), | ||
603 | clusters_to_add); | ||
604 | ocfs2_init_dinode_extent_tree(&et, INODE_CACHE(inode), bh); | 586 | ocfs2_init_dinode_extent_tree(&et, INODE_CACHE(inode), bh); |
605 | status = ocfs2_lock_allocators(inode, &et, clusters_to_add, 0, | 587 | status = ocfs2_lock_allocators(inode, &et, clusters_to_add, 0, |
606 | &data_ac, &meta_ac); | 588 | &data_ac, &meta_ac); |
@@ -620,6 +602,12 @@ restart_all: | |||
620 | } | 602 | } |
621 | 603 | ||
622 | restarted_transaction: | 604 | restarted_transaction: |
605 | trace_ocfs2_extend_allocation( | ||
606 | (unsigned long long)OCFS2_I(inode)->ip_blkno, | ||
607 | (unsigned long long)i_size_read(inode), | ||
608 | le32_to_cpu(fe->i_clusters), clusters_to_add, | ||
609 | why, restart_func); | ||
610 | |||
623 | status = dquot_alloc_space_nodirty(inode, | 611 | status = dquot_alloc_space_nodirty(inode, |
624 | ocfs2_clusters_to_bytes(osb->sb, clusters_to_add)); | 612 | ocfs2_clusters_to_bytes(osb->sb, clusters_to_add)); |
625 | if (status) | 613 | if (status) |
@@ -666,13 +654,11 @@ restarted_transaction: | |||
666 | 654 | ||
667 | if (why != RESTART_NONE && clusters_to_add) { | 655 | if (why != RESTART_NONE && clusters_to_add) { |
668 | if (why == RESTART_META) { | 656 | if (why == RESTART_META) { |
669 | mlog(0, "restarting function.\n"); | ||
670 | restart_func = 1; | 657 | restart_func = 1; |
671 | status = 0; | 658 | status = 0; |
672 | } else { | 659 | } else { |
673 | BUG_ON(why != RESTART_TRANS); | 660 | BUG_ON(why != RESTART_TRANS); |
674 | 661 | ||
675 | mlog(0, "restarting transaction.\n"); | ||
676 | /* TODO: This can be more intelligent. */ | 662 | /* TODO: This can be more intelligent. */ |
677 | credits = ocfs2_calc_extend_credits(osb->sb, | 663 | credits = ocfs2_calc_extend_credits(osb->sb, |
678 | &fe->id2.i_list, | 664 | &fe->id2.i_list, |
@@ -689,11 +675,11 @@ restarted_transaction: | |||
689 | } | 675 | } |
690 | } | 676 | } |
691 | 677 | ||
692 | mlog(0, "fe: i_clusters = %u, i_size=%llu\n", | 678 | trace_ocfs2_extend_allocation_end(OCFS2_I(inode)->ip_blkno, |
693 | le32_to_cpu(fe->i_clusters), | 679 | le32_to_cpu(fe->i_clusters), |
694 | (unsigned long long)le64_to_cpu(fe->i_size)); | 680 | (unsigned long long)le64_to_cpu(fe->i_size), |
695 | mlog(0, "inode: ip_clusters=%u, i_size=%lld\n", | 681 | OCFS2_I(inode)->ip_clusters, |
696 | OCFS2_I(inode)->ip_clusters, (long long)i_size_read(inode)); | 682 | (unsigned long long)i_size_read(inode)); |
697 | 683 | ||
698 | leave: | 684 | leave: |
699 | if (status < 0 && did_quota) | 685 | if (status < 0 && did_quota) |
@@ -718,7 +704,6 @@ leave: | |||
718 | brelse(bh); | 704 | brelse(bh); |
719 | bh = NULL; | 705 | bh = NULL; |
720 | 706 | ||
721 | mlog_exit(status); | ||
722 | return status; | 707 | return status; |
723 | } | 708 | } |
724 | 709 | ||
@@ -785,10 +770,11 @@ static int ocfs2_write_zero_page(struct inode *inode, u64 abs_from, | |||
785 | if (!zero_to) | 770 | if (!zero_to) |
786 | zero_to = PAGE_CACHE_SIZE; | 771 | zero_to = PAGE_CACHE_SIZE; |
787 | 772 | ||
788 | mlog(0, | 773 | trace_ocfs2_write_zero_page( |
789 | "abs_from = %llu, abs_to = %llu, index = %lu, zero_from = %u, zero_to = %u\n", | 774 | (unsigned long long)OCFS2_I(inode)->ip_blkno, |
790 | (unsigned long long)abs_from, (unsigned long long)abs_to, | 775 | (unsigned long long)abs_from, |
791 | index, zero_from, zero_to); | 776 | (unsigned long long)abs_to, |
777 | index, zero_from, zero_to); | ||
792 | 778 | ||
793 | /* We know that zero_from is block aligned */ | 779 | /* We know that zero_from is block aligned */ |
794 | for (block_start = zero_from; block_start < zero_to; | 780 | for (block_start = zero_from; block_start < zero_to; |
@@ -928,9 +914,10 @@ static int ocfs2_zero_extend_range(struct inode *inode, u64 range_start, | |||
928 | u64 next_pos; | 914 | u64 next_pos; |
929 | u64 zero_pos = range_start; | 915 | u64 zero_pos = range_start; |
930 | 916 | ||
931 | mlog(0, "range_start = %llu, range_end = %llu\n", | 917 | trace_ocfs2_zero_extend_range( |
932 | (unsigned long long)range_start, | 918 | (unsigned long long)OCFS2_I(inode)->ip_blkno, |
933 | (unsigned long long)range_end); | 919 | (unsigned long long)range_start, |
920 | (unsigned long long)range_end); | ||
934 | BUG_ON(range_start >= range_end); | 921 | BUG_ON(range_start >= range_end); |
935 | 922 | ||
936 | while (zero_pos < range_end) { | 923 | while (zero_pos < range_end) { |
@@ -962,9 +949,9 @@ int ocfs2_zero_extend(struct inode *inode, struct buffer_head *di_bh, | |||
962 | struct super_block *sb = inode->i_sb; | 949 | struct super_block *sb = inode->i_sb; |
963 | 950 | ||
964 | zero_start = ocfs2_align_bytes_to_blocks(sb, i_size_read(inode)); | 951 | zero_start = ocfs2_align_bytes_to_blocks(sb, i_size_read(inode)); |
965 | mlog(0, "zero_start %llu for i_size %llu\n", | 952 | trace_ocfs2_zero_extend((unsigned long long)OCFS2_I(inode)->ip_blkno, |
966 | (unsigned long long)zero_start, | 953 | (unsigned long long)zero_start, |
967 | (unsigned long long)i_size_read(inode)); | 954 | (unsigned long long)i_size_read(inode)); |
968 | while (zero_start < zero_to_size) { | 955 | while (zero_start < zero_to_size) { |
969 | ret = ocfs2_zero_extend_get_range(inode, di_bh, zero_start, | 956 | ret = ocfs2_zero_extend_get_range(inode, di_bh, zero_start, |
970 | zero_to_size, | 957 | zero_to_size, |
@@ -1113,30 +1100,20 @@ int ocfs2_setattr(struct dentry *dentry, struct iattr *attr) | |||
1113 | struct dquot *transfer_to[MAXQUOTAS] = { }; | 1100 | struct dquot *transfer_to[MAXQUOTAS] = { }; |
1114 | int qtype; | 1101 | int qtype; |
1115 | 1102 | ||
1116 | mlog_entry("(0x%p, '%.*s')\n", dentry, | 1103 | trace_ocfs2_setattr(inode, dentry, |
1117 | dentry->d_name.len, dentry->d_name.name); | 1104 | (unsigned long long)OCFS2_I(inode)->ip_blkno, |
1105 | dentry->d_name.len, dentry->d_name.name, | ||
1106 | attr->ia_valid, attr->ia_mode, | ||
1107 | attr->ia_uid, attr->ia_gid); | ||
1118 | 1108 | ||
1119 | /* ensuring we don't even attempt to truncate a symlink */ | 1109 | /* ensuring we don't even attempt to truncate a symlink */ |
1120 | if (S_ISLNK(inode->i_mode)) | 1110 | if (S_ISLNK(inode->i_mode)) |
1121 | attr->ia_valid &= ~ATTR_SIZE; | 1111 | attr->ia_valid &= ~ATTR_SIZE; |
1122 | 1112 | ||
1123 | if (attr->ia_valid & ATTR_MODE) | ||
1124 | mlog(0, "mode change: %d\n", attr->ia_mode); | ||
1125 | if (attr->ia_valid & ATTR_UID) | ||
1126 | mlog(0, "uid change: %d\n", attr->ia_uid); | ||
1127 | if (attr->ia_valid & ATTR_GID) | ||
1128 | mlog(0, "gid change: %d\n", attr->ia_gid); | ||
1129 | if (attr->ia_valid & ATTR_SIZE) | ||
1130 | mlog(0, "size change...\n"); | ||
1131 | if (attr->ia_valid & (ATTR_ATIME | ATTR_MTIME | ATTR_CTIME)) | ||
1132 | mlog(0, "time change...\n"); | ||
1133 | |||
1134 | #define OCFS2_VALID_ATTRS (ATTR_ATIME | ATTR_MTIME | ATTR_CTIME | ATTR_SIZE \ | 1113 | #define OCFS2_VALID_ATTRS (ATTR_ATIME | ATTR_MTIME | ATTR_CTIME | ATTR_SIZE \ |
1135 | | ATTR_GID | ATTR_UID | ATTR_MODE) | 1114 | | ATTR_GID | ATTR_UID | ATTR_MODE) |
1136 | if (!(attr->ia_valid & OCFS2_VALID_ATTRS)) { | 1115 | if (!(attr->ia_valid & OCFS2_VALID_ATTRS)) |
1137 | mlog(0, "can't handle attrs: 0x%x\n", attr->ia_valid); | ||
1138 | return 0; | 1116 | return 0; |
1139 | } | ||
1140 | 1117 | ||
1141 | status = inode_change_ok(inode, attr); | 1118 | status = inode_change_ok(inode, attr); |
1142 | if (status) | 1119 | if (status) |
@@ -1274,7 +1251,6 @@ bail: | |||
1274 | mlog_errno(status); | 1251 | mlog_errno(status); |
1275 | } | 1252 | } |
1276 | 1253 | ||
1277 | mlog_exit(status); | ||
1278 | return status; | 1254 | return status; |
1279 | } | 1255 | } |
1280 | 1256 | ||
@@ -1287,8 +1263,6 @@ int ocfs2_getattr(struct vfsmount *mnt, | |||
1287 | struct ocfs2_super *osb = sb->s_fs_info; | 1263 | struct ocfs2_super *osb = sb->s_fs_info; |
1288 | int err; | 1264 | int err; |
1289 | 1265 | ||
1290 | mlog_entry_void(); | ||
1291 | |||
1292 | err = ocfs2_inode_revalidate(dentry); | 1266 | err = ocfs2_inode_revalidate(dentry); |
1293 | if (err) { | 1267 | if (err) { |
1294 | if (err != -ENOENT) | 1268 | if (err != -ENOENT) |
@@ -1302,8 +1276,6 @@ int ocfs2_getattr(struct vfsmount *mnt, | |||
1302 | stat->blksize = osb->s_clustersize; | 1276 | stat->blksize = osb->s_clustersize; |
1303 | 1277 | ||
1304 | bail: | 1278 | bail: |
1305 | mlog_exit(err); | ||
1306 | |||
1307 | return err; | 1279 | return err; |
1308 | } | 1280 | } |
1309 | 1281 | ||
@@ -1314,8 +1286,6 @@ int ocfs2_permission(struct inode *inode, int mask, unsigned int flags) | |||
1314 | if (flags & IPERM_FLAG_RCU) | 1286 | if (flags & IPERM_FLAG_RCU) |
1315 | return -ECHILD; | 1287 | return -ECHILD; |
1316 | 1288 | ||
1317 | mlog_entry_void(); | ||
1318 | |||
1319 | ret = ocfs2_inode_lock(inode, NULL, 0); | 1289 | ret = ocfs2_inode_lock(inode, NULL, 0); |
1320 | if (ret) { | 1290 | if (ret) { |
1321 | if (ret != -ENOENT) | 1291 | if (ret != -ENOENT) |
@@ -1327,7 +1297,6 @@ int ocfs2_permission(struct inode *inode, int mask, unsigned int flags) | |||
1327 | 1297 | ||
1328 | ocfs2_inode_unlock(inode, 0); | 1298 | ocfs2_inode_unlock(inode, 0); |
1329 | out: | 1299 | out: |
1330 | mlog_exit(ret); | ||
1331 | return ret; | 1300 | return ret; |
1332 | } | 1301 | } |
1333 | 1302 | ||
@@ -1339,8 +1308,9 @@ static int __ocfs2_write_remove_suid(struct inode *inode, | |||
1339 | struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); | 1308 | struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); |
1340 | struct ocfs2_dinode *di; | 1309 | struct ocfs2_dinode *di; |
1341 | 1310 | ||
1342 | mlog_entry("(Inode %llu, mode 0%o)\n", | 1311 | trace_ocfs2_write_remove_suid( |
1343 | (unsigned long long)OCFS2_I(inode)->ip_blkno, inode->i_mode); | 1312 | (unsigned long long)OCFS2_I(inode)->ip_blkno, |
1313 | inode->i_mode); | ||
1344 | 1314 | ||
1345 | handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS); | 1315 | handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS); |
1346 | if (IS_ERR(handle)) { | 1316 | if (IS_ERR(handle)) { |
@@ -1368,7 +1338,6 @@ static int __ocfs2_write_remove_suid(struct inode *inode, | |||
1368 | out_trans: | 1338 | out_trans: |
1369 | ocfs2_commit_trans(osb, handle); | 1339 | ocfs2_commit_trans(osb, handle); |
1370 | out: | 1340 | out: |
1371 | mlog_exit(ret); | ||
1372 | return ret; | 1341 | return ret; |
1373 | } | 1342 | } |
1374 | 1343 | ||
@@ -1547,8 +1516,9 @@ static int ocfs2_zero_partial_clusters(struct inode *inode, | |||
1547 | * partial clusters here. There's no need to worry about | 1516 | * partial clusters here. There's no need to worry about |
1548 | * physical allocation - the zeroing code knows to skip holes. | 1517 | * physical allocation - the zeroing code knows to skip holes. |
1549 | */ | 1518 | */ |
1550 | mlog(0, "byte start: %llu, end: %llu\n", | 1519 | trace_ocfs2_zero_partial_clusters( |
1551 | (unsigned long long)start, (unsigned long long)end); | 1520 | (unsigned long long)OCFS2_I(inode)->ip_blkno, |
1521 | (unsigned long long)start, (unsigned long long)end); | ||
1552 | 1522 | ||
1553 | /* | 1523 | /* |
1554 | * If both edges are on a cluster boundary then there's no | 1524 | * If both edges are on a cluster boundary then there's no |
@@ -1572,8 +1542,8 @@ static int ocfs2_zero_partial_clusters(struct inode *inode, | |||
1572 | if (tmpend > end) | 1542 | if (tmpend > end) |
1573 | tmpend = end; | 1543 | tmpend = end; |
1574 | 1544 | ||
1575 | mlog(0, "1st range: start: %llu, tmpend: %llu\n", | 1545 | trace_ocfs2_zero_partial_clusters_range1((unsigned long long)start, |
1576 | (unsigned long long)start, (unsigned long long)tmpend); | 1546 | (unsigned long long)tmpend); |
1577 | 1547 | ||
1578 | ret = ocfs2_zero_range_for_truncate(inode, handle, start, tmpend); | 1548 | ret = ocfs2_zero_range_for_truncate(inode, handle, start, tmpend); |
1579 | if (ret) | 1549 | if (ret) |
@@ -1587,8 +1557,8 @@ static int ocfs2_zero_partial_clusters(struct inode *inode, | |||
1587 | */ | 1557 | */ |
1588 | start = end & ~(osb->s_clustersize - 1); | 1558 | start = end & ~(osb->s_clustersize - 1); |
1589 | 1559 | ||
1590 | mlog(0, "2nd range: start: %llu, end: %llu\n", | 1560 | trace_ocfs2_zero_partial_clusters_range2( |
1591 | (unsigned long long)start, (unsigned long long)end); | 1561 | (unsigned long long)start, (unsigned long long)end); |
1592 | 1562 | ||
1593 | ret = ocfs2_zero_range_for_truncate(inode, handle, start, end); | 1563 | ret = ocfs2_zero_range_for_truncate(inode, handle, start, end); |
1594 | if (ret) | 1564 | if (ret) |
@@ -1688,6 +1658,11 @@ static int ocfs2_remove_inode_range(struct inode *inode, | |||
1688 | ocfs2_init_dinode_extent_tree(&et, INODE_CACHE(inode), di_bh); | 1658 | ocfs2_init_dinode_extent_tree(&et, INODE_CACHE(inode), di_bh); |
1689 | ocfs2_init_dealloc_ctxt(&dealloc); | 1659 | ocfs2_init_dealloc_ctxt(&dealloc); |
1690 | 1660 | ||
1661 | trace_ocfs2_remove_inode_range( | ||
1662 | (unsigned long long)OCFS2_I(inode)->ip_blkno, | ||
1663 | (unsigned long long)byte_start, | ||
1664 | (unsigned long long)byte_len); | ||
1665 | |||
1691 | if (byte_len == 0) | 1666 | if (byte_len == 0) |
1692 | return 0; | 1667 | return 0; |
1693 | 1668 | ||
@@ -1734,11 +1709,6 @@ static int ocfs2_remove_inode_range(struct inode *inode, | |||
1734 | trunc_end = (byte_start + byte_len) >> osb->s_clustersize_bits; | 1709 | trunc_end = (byte_start + byte_len) >> osb->s_clustersize_bits; |
1735 | cluster_in_el = trunc_end; | 1710 | cluster_in_el = trunc_end; |
1736 | 1711 | ||
1737 | mlog(0, "Inode: %llu, start: %llu, len: %llu, cstart: %u, cend: %u\n", | ||
1738 | (unsigned long long)OCFS2_I(inode)->ip_blkno, | ||
1739 | (unsigned long long)byte_start, | ||
1740 | (unsigned long long)byte_len, trunc_start, trunc_end); | ||
1741 | |||
1742 | ret = ocfs2_zero_partial_clusters(inode, byte_start, byte_len); | 1712 | ret = ocfs2_zero_partial_clusters(inode, byte_start, byte_len); |
1743 | if (ret) { | 1713 | if (ret) { |
1744 | mlog_errno(ret); | 1714 | mlog_errno(ret); |
@@ -2093,7 +2063,7 @@ static int ocfs2_prepare_inode_for_write(struct file *file, | |||
2093 | int ret = 0, meta_level = 0; | 2063 | int ret = 0, meta_level = 0; |
2094 | struct dentry *dentry = file->f_path.dentry; | 2064 | struct dentry *dentry = file->f_path.dentry; |
2095 | struct inode *inode = dentry->d_inode; | 2065 | struct inode *inode = dentry->d_inode; |
2096 | loff_t saved_pos, end; | 2066 | loff_t saved_pos = 0, end; |
2097 | 2067 | ||
2098 | /* | 2068 | /* |
2099 | * We start with a read level meta lock and only jump to an ex | 2069 | * We start with a read level meta lock and only jump to an ex |
@@ -2132,12 +2102,10 @@ static int ocfs2_prepare_inode_for_write(struct file *file, | |||
2132 | 2102 | ||
2133 | /* work on a copy of ppos until we're sure that we won't have | 2103 | /* work on a copy of ppos until we're sure that we won't have |
2134 | * to recalculate it due to relocking. */ | 2104 | * to recalculate it due to relocking. */ |
2135 | if (appending) { | 2105 | if (appending) |
2136 | saved_pos = i_size_read(inode); | 2106 | saved_pos = i_size_read(inode); |
2137 | mlog(0, "O_APPEND: inode->i_size=%llu\n", saved_pos); | 2107 | else |
2138 | } else { | ||
2139 | saved_pos = *ppos; | 2108 | saved_pos = *ppos; |
2140 | } | ||
2141 | 2109 | ||
2142 | end = saved_pos + count; | 2110 | end = saved_pos + count; |
2143 | 2111 | ||
@@ -2208,6 +2176,10 @@ static int ocfs2_prepare_inode_for_write(struct file *file, | |||
2208 | *ppos = saved_pos; | 2176 | *ppos = saved_pos; |
2209 | 2177 | ||
2210 | out_unlock: | 2178 | out_unlock: |
2179 | trace_ocfs2_prepare_inode_for_write(OCFS2_I(inode)->ip_blkno, | ||
2180 | saved_pos, appending, count, | ||
2181 | direct_io, has_refcount); | ||
2182 | |||
2211 | if (meta_level >= 0) | 2183 | if (meta_level >= 0) |
2212 | ocfs2_inode_unlock(inode, meta_level); | 2184 | ocfs2_inode_unlock(inode, meta_level); |
2213 | 2185 | ||
@@ -2233,10 +2205,11 @@ static ssize_t ocfs2_file_aio_write(struct kiocb *iocb, | |||
2233 | int full_coherency = !(osb->s_mount_opt & | 2205 | int full_coherency = !(osb->s_mount_opt & |
2234 | OCFS2_MOUNT_COHERENCY_BUFFERED); | 2206 | OCFS2_MOUNT_COHERENCY_BUFFERED); |
2235 | 2207 | ||
2236 | mlog_entry("(0x%p, %u, '%.*s')\n", file, | 2208 | trace_ocfs2_file_aio_write(inode, file, file->f_path.dentry, |
2237 | (unsigned int)nr_segs, | 2209 | (unsigned long long)OCFS2_I(inode)->ip_blkno, |
2238 | file->f_path.dentry->d_name.len, | 2210 | file->f_path.dentry->d_name.len, |
2239 | file->f_path.dentry->d_name.name); | 2211 | file->f_path.dentry->d_name.name, |
2212 | (unsigned int)nr_segs); | ||
2240 | 2213 | ||
2241 | if (iocb->ki_left == 0) | 2214 | if (iocb->ki_left == 0) |
2242 | return 0; | 2215 | return 0; |
@@ -2402,7 +2375,6 @@ out_sems: | |||
2402 | 2375 | ||
2403 | if (written) | 2376 | if (written) |
2404 | ret = written; | 2377 | ret = written; |
2405 | mlog_exit(ret); | ||
2406 | return ret; | 2378 | return ret; |
2407 | } | 2379 | } |
2408 | 2380 | ||
@@ -2438,10 +2410,11 @@ static ssize_t ocfs2_file_splice_write(struct pipe_inode_info *pipe, | |||
2438 | .u.file = out, | 2410 | .u.file = out, |
2439 | }; | 2411 | }; |
2440 | 2412 | ||
2441 | mlog_entry("(0x%p, 0x%p, %u, '%.*s')\n", out, pipe, | 2413 | |
2442 | (unsigned int)len, | 2414 | trace_ocfs2_file_splice_write(inode, out, out->f_path.dentry, |
2443 | out->f_path.dentry->d_name.len, | 2415 | (unsigned long long)OCFS2_I(inode)->ip_blkno, |
2444 | out->f_path.dentry->d_name.name); | 2416 | out->f_path.dentry->d_name.len, |
2417 | out->f_path.dentry->d_name.name, len); | ||
2445 | 2418 | ||
2446 | if (pipe->inode) | 2419 | if (pipe->inode) |
2447 | mutex_lock_nested(&pipe->inode->i_mutex, I_MUTEX_PARENT); | 2420 | mutex_lock_nested(&pipe->inode->i_mutex, I_MUTEX_PARENT); |
@@ -2485,7 +2458,6 @@ static ssize_t ocfs2_file_splice_write(struct pipe_inode_info *pipe, | |||
2485 | balance_dirty_pages_ratelimited_nr(mapping, nr_pages); | 2458 | balance_dirty_pages_ratelimited_nr(mapping, nr_pages); |
2486 | } | 2459 | } |
2487 | 2460 | ||
2488 | mlog_exit(ret); | ||
2489 | return ret; | 2461 | return ret; |
2490 | } | 2462 | } |
2491 | 2463 | ||
@@ -2498,10 +2470,10 @@ static ssize_t ocfs2_file_splice_read(struct file *in, | |||
2498 | int ret = 0, lock_level = 0; | 2470 | int ret = 0, lock_level = 0; |
2499 | struct inode *inode = in->f_path.dentry->d_inode; | 2471 | struct inode *inode = in->f_path.dentry->d_inode; |
2500 | 2472 | ||
2501 | mlog_entry("(0x%p, 0x%p, %u, '%.*s')\n", in, pipe, | 2473 | trace_ocfs2_file_splice_read(inode, in, in->f_path.dentry, |
2502 | (unsigned int)len, | 2474 | (unsigned long long)OCFS2_I(inode)->ip_blkno, |
2503 | in->f_path.dentry->d_name.len, | 2475 | in->f_path.dentry->d_name.len, |
2504 | in->f_path.dentry->d_name.name); | 2476 | in->f_path.dentry->d_name.name, len); |
2505 | 2477 | ||
2506 | /* | 2478 | /* |
2507 | * See the comment in ocfs2_file_aio_read() | 2479 | * See the comment in ocfs2_file_aio_read() |
@@ -2516,7 +2488,6 @@ static ssize_t ocfs2_file_splice_read(struct file *in, | |||
2516 | ret = generic_file_splice_read(in, ppos, pipe, len, flags); | 2488 | ret = generic_file_splice_read(in, ppos, pipe, len, flags); |
2517 | 2489 | ||
2518 | bail: | 2490 | bail: |
2519 | mlog_exit(ret); | ||
2520 | return ret; | 2491 | return ret; |
2521 | } | 2492 | } |
2522 | 2493 | ||
@@ -2529,10 +2500,11 @@ static ssize_t ocfs2_file_aio_read(struct kiocb *iocb, | |||
2529 | struct file *filp = iocb->ki_filp; | 2500 | struct file *filp = iocb->ki_filp; |
2530 | struct inode *inode = filp->f_path.dentry->d_inode; | 2501 | struct inode *inode = filp->f_path.dentry->d_inode; |
2531 | 2502 | ||
2532 | mlog_entry("(0x%p, %u, '%.*s')\n", filp, | 2503 | trace_ocfs2_file_aio_read(inode, filp, filp->f_path.dentry, |
2533 | (unsigned int)nr_segs, | 2504 | (unsigned long long)OCFS2_I(inode)->ip_blkno, |
2534 | filp->f_path.dentry->d_name.len, | 2505 | filp->f_path.dentry->d_name.len, |
2535 | filp->f_path.dentry->d_name.name); | 2506 | filp->f_path.dentry->d_name.name, nr_segs); |
2507 | |||
2536 | 2508 | ||
2537 | if (!inode) { | 2509 | if (!inode) { |
2538 | ret = -EINVAL; | 2510 | ret = -EINVAL; |
@@ -2578,8 +2550,7 @@ static ssize_t ocfs2_file_aio_read(struct kiocb *iocb, | |||
2578 | ocfs2_inode_unlock(inode, lock_level); | 2550 | ocfs2_inode_unlock(inode, lock_level); |
2579 | 2551 | ||
2580 | ret = generic_file_aio_read(iocb, iov, nr_segs, iocb->ki_pos); | 2552 | ret = generic_file_aio_read(iocb, iov, nr_segs, iocb->ki_pos); |
2581 | if (ret == -EINVAL) | 2553 | trace_generic_file_aio_read_ret(ret); |
2582 | mlog(0, "generic_file_aio_read returned -EINVAL\n"); | ||
2583 | 2554 | ||
2584 | /* buffered aio wouldn't have proper lock coverage today */ | 2555 | /* buffered aio wouldn't have proper lock coverage today */ |
2585 | BUG_ON(ret == -EIOCBQUEUED && !(filp->f_flags & O_DIRECT)); | 2556 | BUG_ON(ret == -EIOCBQUEUED && !(filp->f_flags & O_DIRECT)); |
@@ -2597,7 +2568,6 @@ bail: | |||
2597 | } | 2568 | } |
2598 | if (rw_level != -1) | 2569 | if (rw_level != -1) |
2599 | ocfs2_rw_unlock(inode, rw_level); | 2570 | ocfs2_rw_unlock(inode, rw_level); |
2600 | mlog_exit(ret); | ||
2601 | 2571 | ||
2602 | return ret; | 2572 | return ret; |
2603 | } | 2573 | } |
diff --git a/fs/ocfs2/heartbeat.c b/fs/ocfs2/heartbeat.c index 1aa863dd901f..d8208b20dc53 100644 --- a/fs/ocfs2/heartbeat.c +++ b/fs/ocfs2/heartbeat.c | |||
@@ -28,7 +28,6 @@ | |||
28 | #include <linux/types.h> | 28 | #include <linux/types.h> |
29 | #include <linux/highmem.h> | 29 | #include <linux/highmem.h> |
30 | 30 | ||
31 | #define MLOG_MASK_PREFIX ML_SUPER | ||
32 | #include <cluster/masklog.h> | 31 | #include <cluster/masklog.h> |
33 | 32 | ||
34 | #include "ocfs2.h" | 33 | #include "ocfs2.h" |
@@ -37,6 +36,7 @@ | |||
37 | #include "heartbeat.h" | 36 | #include "heartbeat.h" |
38 | #include "inode.h" | 37 | #include "inode.h" |
39 | #include "journal.h" | 38 | #include "journal.h" |
39 | #include "ocfs2_trace.h" | ||
40 | 40 | ||
41 | #include "buffer_head_io.h" | 41 | #include "buffer_head_io.h" |
42 | 42 | ||
@@ -66,7 +66,7 @@ void ocfs2_do_node_down(int node_num, void *data) | |||
66 | 66 | ||
67 | BUG_ON(osb->node_num == node_num); | 67 | BUG_ON(osb->node_num == node_num); |
68 | 68 | ||
69 | mlog(0, "ocfs2: node down event for %d\n", node_num); | 69 | trace_ocfs2_do_node_down(node_num); |
70 | 70 | ||
71 | if (!osb->cconn) { | 71 | if (!osb->cconn) { |
72 | /* | 72 | /* |
diff --git a/fs/ocfs2/inode.c b/fs/ocfs2/inode.c index 4068c6c4c6f6..177d3a6c2a5f 100644 --- a/fs/ocfs2/inode.c +++ b/fs/ocfs2/inode.c | |||
@@ -31,7 +31,6 @@ | |||
31 | 31 | ||
32 | #include <asm/byteorder.h> | 32 | #include <asm/byteorder.h> |
33 | 33 | ||
34 | #define MLOG_MASK_PREFIX ML_INODE | ||
35 | #include <cluster/masklog.h> | 34 | #include <cluster/masklog.h> |
36 | 35 | ||
37 | #include "ocfs2.h" | 36 | #include "ocfs2.h" |
@@ -53,6 +52,7 @@ | |||
53 | #include "uptodate.h" | 52 | #include "uptodate.h" |
54 | #include "xattr.h" | 53 | #include "xattr.h" |
55 | #include "refcounttree.h" | 54 | #include "refcounttree.h" |
55 | #include "ocfs2_trace.h" | ||
56 | 56 | ||
57 | #include "buffer_head_io.h" | 57 | #include "buffer_head_io.h" |
58 | 58 | ||
@@ -131,7 +131,8 @@ struct inode *ocfs2_iget(struct ocfs2_super *osb, u64 blkno, unsigned flags, | |||
131 | struct super_block *sb = osb->sb; | 131 | struct super_block *sb = osb->sb; |
132 | struct ocfs2_find_inode_args args; | 132 | struct ocfs2_find_inode_args args; |
133 | 133 | ||
134 | mlog_entry("(blkno = %llu)\n", (unsigned long long)blkno); | 134 | trace_ocfs2_iget_begin((unsigned long long)blkno, flags, |
135 | sysfile_type); | ||
135 | 136 | ||
136 | /* Ok. By now we've either got the offsets passed to us by the | 137 | /* Ok. By now we've either got the offsets passed to us by the |
137 | * caller, or we just pulled them off the bh. Lets do some | 138 | * caller, or we just pulled them off the bh. Lets do some |
@@ -152,16 +153,16 @@ struct inode *ocfs2_iget(struct ocfs2_super *osb, u64 blkno, unsigned flags, | |||
152 | /* inode was *not* in the inode cache. 2.6.x requires | 153 | /* inode was *not* in the inode cache. 2.6.x requires |
153 | * us to do our own read_inode call and unlock it | 154 | * us to do our own read_inode call and unlock it |
154 | * afterwards. */ | 155 | * afterwards. */ |
155 | if (inode && inode->i_state & I_NEW) { | ||
156 | mlog(0, "Inode was not in inode cache, reading it.\n"); | ||
157 | ocfs2_read_locked_inode(inode, &args); | ||
158 | unlock_new_inode(inode); | ||
159 | } | ||
160 | if (inode == NULL) { | 156 | if (inode == NULL) { |
161 | inode = ERR_PTR(-ENOMEM); | 157 | inode = ERR_PTR(-ENOMEM); |
162 | mlog_errno(PTR_ERR(inode)); | 158 | mlog_errno(PTR_ERR(inode)); |
163 | goto bail; | 159 | goto bail; |
164 | } | 160 | } |
161 | trace_ocfs2_iget5_locked(inode->i_state); | ||
162 | if (inode->i_state & I_NEW) { | ||
163 | ocfs2_read_locked_inode(inode, &args); | ||
164 | unlock_new_inode(inode); | ||
165 | } | ||
165 | if (is_bad_inode(inode)) { | 166 | if (is_bad_inode(inode)) { |
166 | iput(inode); | 167 | iput(inode); |
167 | inode = ERR_PTR(-ESTALE); | 168 | inode = ERR_PTR(-ESTALE); |
@@ -170,9 +171,8 @@ struct inode *ocfs2_iget(struct ocfs2_super *osb, u64 blkno, unsigned flags, | |||
170 | 171 | ||
171 | bail: | 172 | bail: |
172 | if (!IS_ERR(inode)) { | 173 | if (!IS_ERR(inode)) { |
173 | mlog(0, "returning inode with number %llu\n", | 174 | trace_ocfs2_iget_end(inode, |
174 | (unsigned long long)OCFS2_I(inode)->ip_blkno); | 175 | (unsigned long long)OCFS2_I(inode)->ip_blkno); |
175 | mlog_exit_ptr(inode); | ||
176 | } | 176 | } |
177 | 177 | ||
178 | return inode; | 178 | return inode; |
@@ -192,18 +192,17 @@ static int ocfs2_find_actor(struct inode *inode, void *opaque) | |||
192 | struct ocfs2_inode_info *oi = OCFS2_I(inode); | 192 | struct ocfs2_inode_info *oi = OCFS2_I(inode); |
193 | int ret = 0; | 193 | int ret = 0; |
194 | 194 | ||
195 | mlog_entry("(0x%p, %lu, 0x%p)\n", inode, inode->i_ino, opaque); | ||
196 | |||
197 | args = opaque; | 195 | args = opaque; |
198 | 196 | ||
199 | mlog_bug_on_msg(!inode, "No inode in find actor!\n"); | 197 | mlog_bug_on_msg(!inode, "No inode in find actor!\n"); |
200 | 198 | ||
199 | trace_ocfs2_find_actor(inode, inode->i_ino, opaque, args->fi_blkno); | ||
200 | |||
201 | if (oi->ip_blkno != args->fi_blkno) | 201 | if (oi->ip_blkno != args->fi_blkno) |
202 | goto bail; | 202 | goto bail; |
203 | 203 | ||
204 | ret = 1; | 204 | ret = 1; |
205 | bail: | 205 | bail: |
206 | mlog_exit(ret); | ||
207 | return ret; | 206 | return ret; |
208 | } | 207 | } |
209 | 208 | ||
@@ -218,8 +217,6 @@ static int ocfs2_init_locked_inode(struct inode *inode, void *opaque) | |||
218 | static struct lock_class_key ocfs2_quota_ip_alloc_sem_key, | 217 | static struct lock_class_key ocfs2_quota_ip_alloc_sem_key, |
219 | ocfs2_file_ip_alloc_sem_key; | 218 | ocfs2_file_ip_alloc_sem_key; |
220 | 219 | ||
221 | mlog_entry("inode = %p, opaque = %p\n", inode, opaque); | ||
222 | |||
223 | inode->i_ino = args->fi_ino; | 220 | inode->i_ino = args->fi_ino; |
224 | OCFS2_I(inode)->ip_blkno = args->fi_blkno; | 221 | OCFS2_I(inode)->ip_blkno = args->fi_blkno; |
225 | if (args->fi_sysfile_type != 0) | 222 | if (args->fi_sysfile_type != 0) |
@@ -235,7 +232,6 @@ static int ocfs2_init_locked_inode(struct inode *inode, void *opaque) | |||
235 | lockdep_set_class(&OCFS2_I(inode)->ip_alloc_sem, | 232 | lockdep_set_class(&OCFS2_I(inode)->ip_alloc_sem, |
236 | &ocfs2_file_ip_alloc_sem_key); | 233 | &ocfs2_file_ip_alloc_sem_key); |
237 | 234 | ||
238 | mlog_exit(0); | ||
239 | return 0; | 235 | return 0; |
240 | } | 236 | } |
241 | 237 | ||
@@ -246,9 +242,6 @@ void ocfs2_populate_inode(struct inode *inode, struct ocfs2_dinode *fe, | |||
246 | struct ocfs2_super *osb; | 242 | struct ocfs2_super *osb; |
247 | int use_plocks = 1; | 243 | int use_plocks = 1; |
248 | 244 | ||
249 | mlog_entry("(0x%p, size:%llu)\n", inode, | ||
250 | (unsigned long long)le64_to_cpu(fe->i_size)); | ||
251 | |||
252 | sb = inode->i_sb; | 245 | sb = inode->i_sb; |
253 | osb = OCFS2_SB(sb); | 246 | osb = OCFS2_SB(sb); |
254 | 247 | ||
@@ -300,20 +293,20 @@ void ocfs2_populate_inode(struct inode *inode, struct ocfs2_dinode *fe, | |||
300 | 293 | ||
301 | inode->i_nlink = ocfs2_read_links_count(fe); | 294 | inode->i_nlink = ocfs2_read_links_count(fe); |
302 | 295 | ||
296 | trace_ocfs2_populate_inode(OCFS2_I(inode)->ip_blkno, | ||
297 | le32_to_cpu(fe->i_flags)); | ||
303 | if (fe->i_flags & cpu_to_le32(OCFS2_SYSTEM_FL)) { | 298 | if (fe->i_flags & cpu_to_le32(OCFS2_SYSTEM_FL)) { |
304 | OCFS2_I(inode)->ip_flags |= OCFS2_INODE_SYSTEM_FILE; | 299 | OCFS2_I(inode)->ip_flags |= OCFS2_INODE_SYSTEM_FILE; |
305 | inode->i_flags |= S_NOQUOTA; | 300 | inode->i_flags |= S_NOQUOTA; |
306 | } | 301 | } |
307 | 302 | ||
308 | if (fe->i_flags & cpu_to_le32(OCFS2_LOCAL_ALLOC_FL)) { | 303 | if (fe->i_flags & cpu_to_le32(OCFS2_LOCAL_ALLOC_FL)) { |
309 | OCFS2_I(inode)->ip_flags |= OCFS2_INODE_BITMAP; | 304 | OCFS2_I(inode)->ip_flags |= OCFS2_INODE_BITMAP; |
310 | mlog(0, "local alloc inode: i_ino=%lu\n", inode->i_ino); | ||
311 | } else if (fe->i_flags & cpu_to_le32(OCFS2_BITMAP_FL)) { | 305 | } else if (fe->i_flags & cpu_to_le32(OCFS2_BITMAP_FL)) { |
312 | OCFS2_I(inode)->ip_flags |= OCFS2_INODE_BITMAP; | 306 | OCFS2_I(inode)->ip_flags |= OCFS2_INODE_BITMAP; |
313 | } else if (fe->i_flags & cpu_to_le32(OCFS2_QUOTA_FL)) { | 307 | } else if (fe->i_flags & cpu_to_le32(OCFS2_QUOTA_FL)) { |
314 | inode->i_flags |= S_NOQUOTA; | 308 | inode->i_flags |= S_NOQUOTA; |
315 | } else if (fe->i_flags & cpu_to_le32(OCFS2_SUPER_BLOCK_FL)) { | 309 | } else if (fe->i_flags & cpu_to_le32(OCFS2_SUPER_BLOCK_FL)) { |
316 | mlog(0, "superblock inode: i_ino=%lu\n", inode->i_ino); | ||
317 | /* we can't actually hit this as read_inode can't | 310 | /* we can't actually hit this as read_inode can't |
318 | * handle superblocks today ;-) */ | 311 | * handle superblocks today ;-) */ |
319 | BUG(); | 312 | BUG(); |
@@ -381,7 +374,6 @@ void ocfs2_populate_inode(struct inode *inode, struct ocfs2_dinode *fe, | |||
381 | if (S_ISDIR(inode->i_mode)) | 374 | if (S_ISDIR(inode->i_mode)) |
382 | ocfs2_resv_set_type(&OCFS2_I(inode)->ip_la_data_resv, | 375 | ocfs2_resv_set_type(&OCFS2_I(inode)->ip_la_data_resv, |
383 | OCFS2_RESV_FLAG_DIR); | 376 | OCFS2_RESV_FLAG_DIR); |
384 | mlog_exit_void(); | ||
385 | } | 377 | } |
386 | 378 | ||
387 | static int ocfs2_read_locked_inode(struct inode *inode, | 379 | static int ocfs2_read_locked_inode(struct inode *inode, |
@@ -394,8 +386,6 @@ static int ocfs2_read_locked_inode(struct inode *inode, | |||
394 | int status, can_lock; | 386 | int status, can_lock; |
395 | u32 generation = 0; | 387 | u32 generation = 0; |
396 | 388 | ||
397 | mlog_entry("(0x%p, 0x%p)\n", inode, args); | ||
398 | |||
399 | status = -EINVAL; | 389 | status = -EINVAL; |
400 | if (inode == NULL || inode->i_sb == NULL) { | 390 | if (inode == NULL || inode->i_sb == NULL) { |
401 | mlog(ML_ERROR, "bad inode\n"); | 391 | mlog(ML_ERROR, "bad inode\n"); |
@@ -443,6 +433,9 @@ static int ocfs2_read_locked_inode(struct inode *inode, | |||
443 | && !(args->fi_flags & OCFS2_FI_FLAG_ORPHAN_RECOVERY) | 433 | && !(args->fi_flags & OCFS2_FI_FLAG_ORPHAN_RECOVERY) |
444 | && !ocfs2_mount_local(osb); | 434 | && !ocfs2_mount_local(osb); |
445 | 435 | ||
436 | trace_ocfs2_read_locked_inode( | ||
437 | (unsigned long long)OCFS2_I(inode)->ip_blkno, can_lock); | ||
438 | |||
446 | /* | 439 | /* |
447 | * To maintain backwards compatibility with older versions of | 440 | * To maintain backwards compatibility with older versions of |
448 | * ocfs2-tools, we still store the generation value for system | 441 | * ocfs2-tools, we still store the generation value for system |
@@ -534,7 +527,6 @@ bail: | |||
534 | if (args && bh) | 527 | if (args && bh) |
535 | brelse(bh); | 528 | brelse(bh); |
536 | 529 | ||
537 | mlog_exit(status); | ||
538 | return status; | 530 | return status; |
539 | } | 531 | } |
540 | 532 | ||
@@ -551,8 +543,6 @@ static int ocfs2_truncate_for_delete(struct ocfs2_super *osb, | |||
551 | struct ocfs2_dinode *fe; | 543 | struct ocfs2_dinode *fe; |
552 | handle_t *handle = NULL; | 544 | handle_t *handle = NULL; |
553 | 545 | ||
554 | mlog_entry_void(); | ||
555 | |||
556 | fe = (struct ocfs2_dinode *) fe_bh->b_data; | 546 | fe = (struct ocfs2_dinode *) fe_bh->b_data; |
557 | 547 | ||
558 | /* | 548 | /* |
@@ -600,7 +590,6 @@ static int ocfs2_truncate_for_delete(struct ocfs2_super *osb, | |||
600 | out: | 590 | out: |
601 | if (handle) | 591 | if (handle) |
602 | ocfs2_commit_trans(osb, handle); | 592 | ocfs2_commit_trans(osb, handle); |
603 | mlog_exit(status); | ||
604 | return status; | 593 | return status; |
605 | } | 594 | } |
606 | 595 | ||
@@ -696,8 +685,6 @@ static int ocfs2_check_orphan_recovery_state(struct ocfs2_super *osb, | |||
696 | 685 | ||
697 | spin_lock(&osb->osb_lock); | 686 | spin_lock(&osb->osb_lock); |
698 | if (ocfs2_node_map_test_bit(osb, &osb->osb_recovering_orphan_dirs, slot)) { | 687 | if (ocfs2_node_map_test_bit(osb, &osb->osb_recovering_orphan_dirs, slot)) { |
699 | mlog(0, "Recovery is happening on orphan dir %d, will skip " | ||
700 | "this inode\n", slot); | ||
701 | ret = -EDEADLK; | 688 | ret = -EDEADLK; |
702 | goto out; | 689 | goto out; |
703 | } | 690 | } |
@@ -706,6 +693,7 @@ static int ocfs2_check_orphan_recovery_state(struct ocfs2_super *osb, | |||
706 | osb->osb_orphan_wipes[slot]++; | 693 | osb->osb_orphan_wipes[slot]++; |
707 | out: | 694 | out: |
708 | spin_unlock(&osb->osb_lock); | 695 | spin_unlock(&osb->osb_lock); |
696 | trace_ocfs2_check_orphan_recovery_state(slot, ret); | ||
709 | return ret; | 697 | return ret; |
710 | } | 698 | } |
711 | 699 | ||
@@ -816,6 +804,10 @@ static int ocfs2_inode_is_valid_to_delete(struct inode *inode) | |||
816 | struct ocfs2_inode_info *oi = OCFS2_I(inode); | 804 | struct ocfs2_inode_info *oi = OCFS2_I(inode); |
817 | struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); | 805 | struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); |
818 | 806 | ||
807 | trace_ocfs2_inode_is_valid_to_delete(current, osb->dc_task, | ||
808 | (unsigned long long)oi->ip_blkno, | ||
809 | oi->ip_flags); | ||
810 | |||
819 | /* We shouldn't be getting here for the root directory | 811 | /* We shouldn't be getting here for the root directory |
820 | * inode.. */ | 812 | * inode.. */ |
821 | if (inode == osb->root_inode) { | 813 | if (inode == osb->root_inode) { |
@@ -828,11 +820,8 @@ static int ocfs2_inode_is_valid_to_delete(struct inode *inode) | |||
828 | * have to skip deleting this guy. That's OK though because | 820 | * have to skip deleting this guy. That's OK though because |
829 | * the node who's doing the actual deleting should handle it | 821 | * the node who's doing the actual deleting should handle it |
830 | * anyway. */ | 822 | * anyway. */ |
831 | if (current == osb->dc_task) { | 823 | if (current == osb->dc_task) |
832 | mlog(0, "Skipping delete of %lu because we're currently " | ||
833 | "in downconvert\n", inode->i_ino); | ||
834 | goto bail; | 824 | goto bail; |
835 | } | ||
836 | 825 | ||
837 | spin_lock(&oi->ip_lock); | 826 | spin_lock(&oi->ip_lock); |
838 | /* OCFS2 *never* deletes system files. This should technically | 827 | /* OCFS2 *never* deletes system files. This should technically |
@@ -847,11 +836,8 @@ static int ocfs2_inode_is_valid_to_delete(struct inode *inode) | |||
847 | /* If we have allowd wipe of this inode for another node, it | 836 | /* If we have allowd wipe of this inode for another node, it |
848 | * will be marked here so we can safely skip it. Recovery will | 837 | * will be marked here so we can safely skip it. Recovery will |
849 | * cleanup any inodes we might inadvertantly skip here. */ | 838 | * cleanup any inodes we might inadvertantly skip here. */ |
850 | if (oi->ip_flags & OCFS2_INODE_SKIP_DELETE) { | 839 | if (oi->ip_flags & OCFS2_INODE_SKIP_DELETE) |
851 | mlog(0, "Skipping delete of %lu because another node " | ||
852 | "has done this for us.\n", inode->i_ino); | ||
853 | goto bail_unlock; | 840 | goto bail_unlock; |
854 | } | ||
855 | 841 | ||
856 | ret = 1; | 842 | ret = 1; |
857 | bail_unlock: | 843 | bail_unlock: |
@@ -868,28 +854,27 @@ static int ocfs2_query_inode_wipe(struct inode *inode, | |||
868 | struct buffer_head *di_bh, | 854 | struct buffer_head *di_bh, |
869 | int *wipe) | 855 | int *wipe) |
870 | { | 856 | { |
871 | int status = 0; | 857 | int status = 0, reason = 0; |
872 | struct ocfs2_inode_info *oi = OCFS2_I(inode); | 858 | struct ocfs2_inode_info *oi = OCFS2_I(inode); |
873 | struct ocfs2_dinode *di; | 859 | struct ocfs2_dinode *di; |
874 | 860 | ||
875 | *wipe = 0; | 861 | *wipe = 0; |
876 | 862 | ||
863 | trace_ocfs2_query_inode_wipe_begin((unsigned long long)oi->ip_blkno, | ||
864 | inode->i_nlink); | ||
865 | |||
877 | /* While we were waiting for the cluster lock in | 866 | /* While we were waiting for the cluster lock in |
878 | * ocfs2_delete_inode, another node might have asked to delete | 867 | * ocfs2_delete_inode, another node might have asked to delete |
879 | * the inode. Recheck our flags to catch this. */ | 868 | * the inode. Recheck our flags to catch this. */ |
880 | if (!ocfs2_inode_is_valid_to_delete(inode)) { | 869 | if (!ocfs2_inode_is_valid_to_delete(inode)) { |
881 | mlog(0, "Skipping delete of %llu because flags changed\n", | 870 | reason = 1; |
882 | (unsigned long long)oi->ip_blkno); | ||
883 | goto bail; | 871 | goto bail; |
884 | } | 872 | } |
885 | 873 | ||
886 | /* Now that we have an up to date inode, we can double check | 874 | /* Now that we have an up to date inode, we can double check |
887 | * the link count. */ | 875 | * the link count. */ |
888 | if (inode->i_nlink) { | 876 | if (inode->i_nlink) |
889 | mlog(0, "Skipping delete of %llu because nlink = %u\n", | ||
890 | (unsigned long long)oi->ip_blkno, inode->i_nlink); | ||
891 | goto bail; | 877 | goto bail; |
892 | } | ||
893 | 878 | ||
894 | /* Do some basic inode verification... */ | 879 | /* Do some basic inode verification... */ |
895 | di = (struct ocfs2_dinode *) di_bh->b_data; | 880 | di = (struct ocfs2_dinode *) di_bh->b_data; |
@@ -904,9 +889,7 @@ static int ocfs2_query_inode_wipe(struct inode *inode, | |||
904 | * ORPHANED_FL not. | 889 | * ORPHANED_FL not. |
905 | */ | 890 | */ |
906 | if (di->i_dyn_features & cpu_to_le16(OCFS2_HAS_REFCOUNT_FL)) { | 891 | if (di->i_dyn_features & cpu_to_le16(OCFS2_HAS_REFCOUNT_FL)) { |
907 | mlog(0, "Reflinked inode %llu is no longer orphaned. " | 892 | reason = 2; |
908 | "it shouldn't be deleted\n", | ||
909 | (unsigned long long)oi->ip_blkno); | ||
910 | goto bail; | 893 | goto bail; |
911 | } | 894 | } |
912 | 895 | ||
@@ -943,8 +926,7 @@ static int ocfs2_query_inode_wipe(struct inode *inode, | |||
943 | status = ocfs2_try_open_lock(inode, 1); | 926 | status = ocfs2_try_open_lock(inode, 1); |
944 | if (status == -EAGAIN) { | 927 | if (status == -EAGAIN) { |
945 | status = 0; | 928 | status = 0; |
946 | mlog(0, "Skipping delete of %llu because it is in use on " | 929 | reason = 3; |
947 | "other nodes\n", (unsigned long long)oi->ip_blkno); | ||
948 | goto bail; | 930 | goto bail; |
949 | } | 931 | } |
950 | if (status < 0) { | 932 | if (status < 0) { |
@@ -953,11 +935,10 @@ static int ocfs2_query_inode_wipe(struct inode *inode, | |||
953 | } | 935 | } |
954 | 936 | ||
955 | *wipe = 1; | 937 | *wipe = 1; |
956 | mlog(0, "Inode %llu is ok to wipe from orphan dir %u\n", | 938 | trace_ocfs2_query_inode_wipe_succ(le16_to_cpu(di->i_orphaned_slot)); |
957 | (unsigned long long)oi->ip_blkno, | ||
958 | le16_to_cpu(di->i_orphaned_slot)); | ||
959 | 939 | ||
960 | bail: | 940 | bail: |
941 | trace_ocfs2_query_inode_wipe_end(status, reason); | ||
961 | return status; | 942 | return status; |
962 | } | 943 | } |
963 | 944 | ||
@@ -967,8 +948,8 @@ bail: | |||
967 | static void ocfs2_cleanup_delete_inode(struct inode *inode, | 948 | static void ocfs2_cleanup_delete_inode(struct inode *inode, |
968 | int sync_data) | 949 | int sync_data) |
969 | { | 950 | { |
970 | mlog(0, "Cleanup inode %llu, sync = %d\n", | 951 | trace_ocfs2_cleanup_delete_inode( |
971 | (unsigned long long)OCFS2_I(inode)->ip_blkno, sync_data); | 952 | (unsigned long long)OCFS2_I(inode)->ip_blkno, sync_data); |
972 | if (sync_data) | 953 | if (sync_data) |
973 | write_inode_now(inode, 1); | 954 | write_inode_now(inode, 1); |
974 | truncate_inode_pages(&inode->i_data, 0); | 955 | truncate_inode_pages(&inode->i_data, 0); |
@@ -980,15 +961,15 @@ static void ocfs2_delete_inode(struct inode *inode) | |||
980 | sigset_t oldset; | 961 | sigset_t oldset; |
981 | struct buffer_head *di_bh = NULL; | 962 | struct buffer_head *di_bh = NULL; |
982 | 963 | ||
983 | mlog_entry("(inode->i_ino = %lu)\n", inode->i_ino); | 964 | trace_ocfs2_delete_inode(inode->i_ino, |
965 | (unsigned long long)OCFS2_I(inode)->ip_blkno, | ||
966 | is_bad_inode(inode)); | ||
984 | 967 | ||
985 | /* When we fail in read_inode() we mark inode as bad. The second test | 968 | /* When we fail in read_inode() we mark inode as bad. The second test |
986 | * catches the case when inode allocation fails before allocating | 969 | * catches the case when inode allocation fails before allocating |
987 | * a block for inode. */ | 970 | * a block for inode. */ |
988 | if (is_bad_inode(inode) || !OCFS2_I(inode)->ip_blkno) { | 971 | if (is_bad_inode(inode) || !OCFS2_I(inode)->ip_blkno) |
989 | mlog(0, "Skipping delete of bad inode\n"); | ||
990 | goto bail; | 972 | goto bail; |
991 | } | ||
992 | 973 | ||
993 | dquot_initialize(inode); | 974 | dquot_initialize(inode); |
994 | 975 | ||
@@ -1080,7 +1061,7 @@ bail_unlock_nfs_sync: | |||
1080 | bail_unblock: | 1061 | bail_unblock: |
1081 | ocfs2_unblock_signals(&oldset); | 1062 | ocfs2_unblock_signals(&oldset); |
1082 | bail: | 1063 | bail: |
1083 | mlog_exit_void(); | 1064 | return; |
1084 | } | 1065 | } |
1085 | 1066 | ||
1086 | static void ocfs2_clear_inode(struct inode *inode) | 1067 | static void ocfs2_clear_inode(struct inode *inode) |
@@ -1088,11 +1069,9 @@ static void ocfs2_clear_inode(struct inode *inode) | |||
1088 | int status; | 1069 | int status; |
1089 | struct ocfs2_inode_info *oi = OCFS2_I(inode); | 1070 | struct ocfs2_inode_info *oi = OCFS2_I(inode); |
1090 | 1071 | ||
1091 | mlog_entry_void(); | ||
1092 | |||
1093 | end_writeback(inode); | 1072 | end_writeback(inode); |
1094 | mlog(0, "Clearing inode: %llu, nlink = %u\n", | 1073 | trace_ocfs2_clear_inode((unsigned long long)oi->ip_blkno, |
1095 | (unsigned long long)OCFS2_I(inode)->ip_blkno, inode->i_nlink); | 1074 | inode->i_nlink); |
1096 | 1075 | ||
1097 | mlog_bug_on_msg(OCFS2_SB(inode->i_sb) == NULL, | 1076 | mlog_bug_on_msg(OCFS2_SB(inode->i_sb) == NULL, |
1098 | "Inode=%lu\n", inode->i_ino); | 1077 | "Inode=%lu\n", inode->i_ino); |
@@ -1181,8 +1160,6 @@ static void ocfs2_clear_inode(struct inode *inode) | |||
1181 | */ | 1160 | */ |
1182 | jbd2_journal_release_jbd_inode(OCFS2_SB(inode->i_sb)->journal->j_journal, | 1161 | jbd2_journal_release_jbd_inode(OCFS2_SB(inode->i_sb)->journal->j_journal, |
1183 | &oi->ip_jinode); | 1162 | &oi->ip_jinode); |
1184 | |||
1185 | mlog_exit_void(); | ||
1186 | } | 1163 | } |
1187 | 1164 | ||
1188 | void ocfs2_evict_inode(struct inode *inode) | 1165 | void ocfs2_evict_inode(struct inode *inode) |
@@ -1204,17 +1181,14 @@ int ocfs2_drop_inode(struct inode *inode) | |||
1204 | struct ocfs2_inode_info *oi = OCFS2_I(inode); | 1181 | struct ocfs2_inode_info *oi = OCFS2_I(inode); |
1205 | int res; | 1182 | int res; |
1206 | 1183 | ||
1207 | mlog_entry_void(); | 1184 | trace_ocfs2_drop_inode((unsigned long long)oi->ip_blkno, |
1208 | 1185 | inode->i_nlink, oi->ip_flags); | |
1209 | mlog(0, "Drop inode %llu, nlink = %u, ip_flags = 0x%x\n", | ||
1210 | (unsigned long long)oi->ip_blkno, inode->i_nlink, oi->ip_flags); | ||
1211 | 1186 | ||
1212 | if (oi->ip_flags & OCFS2_INODE_MAYBE_ORPHANED) | 1187 | if (oi->ip_flags & OCFS2_INODE_MAYBE_ORPHANED) |
1213 | res = 1; | 1188 | res = 1; |
1214 | else | 1189 | else |
1215 | res = generic_drop_inode(inode); | 1190 | res = generic_drop_inode(inode); |
1216 | 1191 | ||
1217 | mlog_exit_void(); | ||
1218 | return res; | 1192 | return res; |
1219 | } | 1193 | } |
1220 | 1194 | ||
@@ -1226,11 +1200,11 @@ int ocfs2_inode_revalidate(struct dentry *dentry) | |||
1226 | struct inode *inode = dentry->d_inode; | 1200 | struct inode *inode = dentry->d_inode; |
1227 | int status = 0; | 1201 | int status = 0; |
1228 | 1202 | ||
1229 | mlog_entry("(inode = 0x%p, ino = %llu)\n", inode, | 1203 | trace_ocfs2_inode_revalidate(inode, |
1230 | inode ? (unsigned long long)OCFS2_I(inode)->ip_blkno : 0ULL); | 1204 | inode ? (unsigned long long)OCFS2_I(inode)->ip_blkno : 0ULL, |
1205 | inode ? (unsigned long long)OCFS2_I(inode)->ip_flags : 0); | ||
1231 | 1206 | ||
1232 | if (!inode) { | 1207 | if (!inode) { |
1233 | mlog(0, "eep, no inode!\n"); | ||
1234 | status = -ENOENT; | 1208 | status = -ENOENT; |
1235 | goto bail; | 1209 | goto bail; |
1236 | } | 1210 | } |
@@ -1238,7 +1212,6 @@ int ocfs2_inode_revalidate(struct dentry *dentry) | |||
1238 | spin_lock(&OCFS2_I(inode)->ip_lock); | 1212 | spin_lock(&OCFS2_I(inode)->ip_lock); |
1239 | if (OCFS2_I(inode)->ip_flags & OCFS2_INODE_DELETED) { | 1213 | if (OCFS2_I(inode)->ip_flags & OCFS2_INODE_DELETED) { |
1240 | spin_unlock(&OCFS2_I(inode)->ip_lock); | 1214 | spin_unlock(&OCFS2_I(inode)->ip_lock); |
1241 | mlog(0, "inode deleted!\n"); | ||
1242 | status = -ENOENT; | 1215 | status = -ENOENT; |
1243 | goto bail; | 1216 | goto bail; |
1244 | } | 1217 | } |
@@ -1254,8 +1227,6 @@ int ocfs2_inode_revalidate(struct dentry *dentry) | |||
1254 | } | 1227 | } |
1255 | ocfs2_inode_unlock(inode, 0); | 1228 | ocfs2_inode_unlock(inode, 0); |
1256 | bail: | 1229 | bail: |
1257 | mlog_exit(status); | ||
1258 | |||
1259 | return status; | 1230 | return status; |
1260 | } | 1231 | } |
1261 | 1232 | ||
@@ -1271,8 +1242,7 @@ int ocfs2_mark_inode_dirty(handle_t *handle, | |||
1271 | int status; | 1242 | int status; |
1272 | struct ocfs2_dinode *fe = (struct ocfs2_dinode *) bh->b_data; | 1243 | struct ocfs2_dinode *fe = (struct ocfs2_dinode *) bh->b_data; |
1273 | 1244 | ||
1274 | mlog_entry("(inode %llu)\n", | 1245 | trace_ocfs2_mark_inode_dirty((unsigned long long)OCFS2_I(inode)->ip_blkno); |
1275 | (unsigned long long)OCFS2_I(inode)->ip_blkno); | ||
1276 | 1246 | ||
1277 | status = ocfs2_journal_access_di(handle, INODE_CACHE(inode), bh, | 1247 | status = ocfs2_journal_access_di(handle, INODE_CACHE(inode), bh, |
1278 | OCFS2_JOURNAL_ACCESS_WRITE); | 1248 | OCFS2_JOURNAL_ACCESS_WRITE); |
@@ -1302,7 +1272,6 @@ int ocfs2_mark_inode_dirty(handle_t *handle, | |||
1302 | 1272 | ||
1303 | ocfs2_journal_dirty(handle, bh); | 1273 | ocfs2_journal_dirty(handle, bh); |
1304 | leave: | 1274 | leave: |
1305 | mlog_exit(status); | ||
1306 | return status; | 1275 | return status; |
1307 | } | 1276 | } |
1308 | 1277 | ||
@@ -1345,8 +1314,7 @@ int ocfs2_validate_inode_block(struct super_block *sb, | |||
1345 | int rc; | 1314 | int rc; |
1346 | struct ocfs2_dinode *di = (struct ocfs2_dinode *)bh->b_data; | 1315 | struct ocfs2_dinode *di = (struct ocfs2_dinode *)bh->b_data; |
1347 | 1316 | ||
1348 | mlog(0, "Validating dinode %llu\n", | 1317 | trace_ocfs2_validate_inode_block((unsigned long long)bh->b_blocknr); |
1349 | (unsigned long long)bh->b_blocknr); | ||
1350 | 1318 | ||
1351 | BUG_ON(!buffer_uptodate(bh)); | 1319 | BUG_ON(!buffer_uptodate(bh)); |
1352 | 1320 | ||
diff --git a/fs/ocfs2/ioctl.c b/fs/ocfs2/ioctl.c index 09de77ce002a..8f13c5989eae 100644 --- a/fs/ocfs2/ioctl.c +++ b/fs/ocfs2/ioctl.c | |||
@@ -9,7 +9,6 @@ | |||
9 | #include <linux/mount.h> | 9 | #include <linux/mount.h> |
10 | #include <linux/compat.h> | 10 | #include <linux/compat.h> |
11 | 11 | ||
12 | #define MLOG_MASK_PREFIX ML_INODE | ||
13 | #include <cluster/masklog.h> | 12 | #include <cluster/masklog.h> |
14 | 13 | ||
15 | #include "ocfs2.h" | 14 | #include "ocfs2.h" |
@@ -46,6 +45,22 @@ static inline void __o2info_set_request_error(struct ocfs2_info_request *kreq, | |||
46 | #define o2info_set_request_error(a, b) \ | 45 | #define o2info_set_request_error(a, b) \ |
47 | __o2info_set_request_error((struct ocfs2_info_request *)&(a), b) | 46 | __o2info_set_request_error((struct ocfs2_info_request *)&(a), b) |
48 | 47 | ||
48 | static inline void __o2info_set_request_filled(struct ocfs2_info_request *req) | ||
49 | { | ||
50 | req->ir_flags |= OCFS2_INFO_FL_FILLED; | ||
51 | } | ||
52 | |||
53 | #define o2info_set_request_filled(a) \ | ||
54 | __o2info_set_request_filled((struct ocfs2_info_request *)&(a)) | ||
55 | |||
56 | static inline void __o2info_clear_request_filled(struct ocfs2_info_request *req) | ||
57 | { | ||
58 | req->ir_flags &= ~OCFS2_INFO_FL_FILLED; | ||
59 | } | ||
60 | |||
61 | #define o2info_clear_request_filled(a) \ | ||
62 | __o2info_clear_request_filled((struct ocfs2_info_request *)&(a)) | ||
63 | |||
49 | static int ocfs2_get_inode_attr(struct inode *inode, unsigned *flags) | 64 | static int ocfs2_get_inode_attr(struct inode *inode, unsigned *flags) |
50 | { | 65 | { |
51 | int status; | 66 | int status; |
@@ -59,7 +74,6 @@ static int ocfs2_get_inode_attr(struct inode *inode, unsigned *flags) | |||
59 | *flags = OCFS2_I(inode)->ip_attr; | 74 | *flags = OCFS2_I(inode)->ip_attr; |
60 | ocfs2_inode_unlock(inode, 0); | 75 | ocfs2_inode_unlock(inode, 0); |
61 | 76 | ||
62 | mlog_exit(status); | ||
63 | return status; | 77 | return status; |
64 | } | 78 | } |
65 | 79 | ||
@@ -125,7 +139,6 @@ bail: | |||
125 | 139 | ||
126 | brelse(bh); | 140 | brelse(bh); |
127 | 141 | ||
128 | mlog_exit(status); | ||
129 | return status; | 142 | return status; |
130 | } | 143 | } |
131 | 144 | ||
@@ -139,7 +152,8 @@ int ocfs2_info_handle_blocksize(struct inode *inode, | |||
139 | goto bail; | 152 | goto bail; |
140 | 153 | ||
141 | oib.ib_blocksize = inode->i_sb->s_blocksize; | 154 | oib.ib_blocksize = inode->i_sb->s_blocksize; |
142 | oib.ib_req.ir_flags |= OCFS2_INFO_FL_FILLED; | 155 | |
156 | o2info_set_request_filled(oib); | ||
143 | 157 | ||
144 | if (o2info_to_user(oib, req)) | 158 | if (o2info_to_user(oib, req)) |
145 | goto bail; | 159 | goto bail; |
@@ -163,7 +177,8 @@ int ocfs2_info_handle_clustersize(struct inode *inode, | |||
163 | goto bail; | 177 | goto bail; |
164 | 178 | ||
165 | oic.ic_clustersize = osb->s_clustersize; | 179 | oic.ic_clustersize = osb->s_clustersize; |
166 | oic.ic_req.ir_flags |= OCFS2_INFO_FL_FILLED; | 180 | |
181 | o2info_set_request_filled(oic); | ||
167 | 182 | ||
168 | if (o2info_to_user(oic, req)) | 183 | if (o2info_to_user(oic, req)) |
169 | goto bail; | 184 | goto bail; |
@@ -187,7 +202,8 @@ int ocfs2_info_handle_maxslots(struct inode *inode, | |||
187 | goto bail; | 202 | goto bail; |
188 | 203 | ||
189 | oim.im_max_slots = osb->max_slots; | 204 | oim.im_max_slots = osb->max_slots; |
190 | oim.im_req.ir_flags |= OCFS2_INFO_FL_FILLED; | 205 | |
206 | o2info_set_request_filled(oim); | ||
191 | 207 | ||
192 | if (o2info_to_user(oim, req)) | 208 | if (o2info_to_user(oim, req)) |
193 | goto bail; | 209 | goto bail; |
@@ -211,7 +227,8 @@ int ocfs2_info_handle_label(struct inode *inode, | |||
211 | goto bail; | 227 | goto bail; |
212 | 228 | ||
213 | memcpy(oil.il_label, osb->vol_label, OCFS2_MAX_VOL_LABEL_LEN); | 229 | memcpy(oil.il_label, osb->vol_label, OCFS2_MAX_VOL_LABEL_LEN); |
214 | oil.il_req.ir_flags |= OCFS2_INFO_FL_FILLED; | 230 | |
231 | o2info_set_request_filled(oil); | ||
215 | 232 | ||
216 | if (o2info_to_user(oil, req)) | 233 | if (o2info_to_user(oil, req)) |
217 | goto bail; | 234 | goto bail; |
@@ -235,7 +252,8 @@ int ocfs2_info_handle_uuid(struct inode *inode, | |||
235 | goto bail; | 252 | goto bail; |
236 | 253 | ||
237 | memcpy(oiu.iu_uuid_str, osb->uuid_str, OCFS2_TEXT_UUID_LEN + 1); | 254 | memcpy(oiu.iu_uuid_str, osb->uuid_str, OCFS2_TEXT_UUID_LEN + 1); |
238 | oiu.iu_req.ir_flags |= OCFS2_INFO_FL_FILLED; | 255 | |
256 | o2info_set_request_filled(oiu); | ||
239 | 257 | ||
240 | if (o2info_to_user(oiu, req)) | 258 | if (o2info_to_user(oiu, req)) |
241 | goto bail; | 259 | goto bail; |
@@ -261,7 +279,8 @@ int ocfs2_info_handle_fs_features(struct inode *inode, | |||
261 | oif.if_compat_features = osb->s_feature_compat; | 279 | oif.if_compat_features = osb->s_feature_compat; |
262 | oif.if_incompat_features = osb->s_feature_incompat; | 280 | oif.if_incompat_features = osb->s_feature_incompat; |
263 | oif.if_ro_compat_features = osb->s_feature_ro_compat; | 281 | oif.if_ro_compat_features = osb->s_feature_ro_compat; |
264 | oif.if_req.ir_flags |= OCFS2_INFO_FL_FILLED; | 282 | |
283 | o2info_set_request_filled(oif); | ||
265 | 284 | ||
266 | if (o2info_to_user(oif, req)) | 285 | if (o2info_to_user(oif, req)) |
267 | goto bail; | 286 | goto bail; |
@@ -286,7 +305,7 @@ int ocfs2_info_handle_journal_size(struct inode *inode, | |||
286 | 305 | ||
287 | oij.ij_journal_size = osb->journal->j_inode->i_size; | 306 | oij.ij_journal_size = osb->journal->j_inode->i_size; |
288 | 307 | ||
289 | oij.ij_req.ir_flags |= OCFS2_INFO_FL_FILLED; | 308 | o2info_set_request_filled(oij); |
290 | 309 | ||
291 | if (o2info_to_user(oij, req)) | 310 | if (o2info_to_user(oij, req)) |
292 | goto bail; | 311 | goto bail; |
@@ -308,7 +327,7 @@ int ocfs2_info_handle_unknown(struct inode *inode, | |||
308 | if (o2info_from_user(oir, req)) | 327 | if (o2info_from_user(oir, req)) |
309 | goto bail; | 328 | goto bail; |
310 | 329 | ||
311 | oir.ir_flags &= ~OCFS2_INFO_FL_FILLED; | 330 | o2info_clear_request_filled(oir); |
312 | 331 | ||
313 | if (o2info_to_user(oir, req)) | 332 | if (o2info_to_user(oir, req)) |
314 | goto bail; | 333 | goto bail; |
diff --git a/fs/ocfs2/journal.c b/fs/ocfs2/journal.c index faa2303dbf0a..dcc2d9327150 100644 --- a/fs/ocfs2/journal.c +++ b/fs/ocfs2/journal.c | |||
@@ -31,7 +31,6 @@ | |||
31 | #include <linux/time.h> | 31 | #include <linux/time.h> |
32 | #include <linux/random.h> | 32 | #include <linux/random.h> |
33 | 33 | ||
34 | #define MLOG_MASK_PREFIX ML_JOURNAL | ||
35 | #include <cluster/masklog.h> | 34 | #include <cluster/masklog.h> |
36 | 35 | ||
37 | #include "ocfs2.h" | 36 | #include "ocfs2.h" |
@@ -52,6 +51,7 @@ | |||
52 | #include "quota.h" | 51 | #include "quota.h" |
53 | 52 | ||
54 | #include "buffer_head_io.h" | 53 | #include "buffer_head_io.h" |
54 | #include "ocfs2_trace.h" | ||
55 | 55 | ||
56 | DEFINE_SPINLOCK(trans_inc_lock); | 56 | DEFINE_SPINLOCK(trans_inc_lock); |
57 | 57 | ||
@@ -303,16 +303,15 @@ static int ocfs2_commit_cache(struct ocfs2_super *osb) | |||
303 | unsigned int flushed; | 303 | unsigned int flushed; |
304 | struct ocfs2_journal *journal = NULL; | 304 | struct ocfs2_journal *journal = NULL; |
305 | 305 | ||
306 | mlog_entry_void(); | ||
307 | |||
308 | journal = osb->journal; | 306 | journal = osb->journal; |
309 | 307 | ||
310 | /* Flush all pending commits and checkpoint the journal. */ | 308 | /* Flush all pending commits and checkpoint the journal. */ |
311 | down_write(&journal->j_trans_barrier); | 309 | down_write(&journal->j_trans_barrier); |
312 | 310 | ||
313 | if (atomic_read(&journal->j_num_trans) == 0) { | 311 | flushed = atomic_read(&journal->j_num_trans); |
312 | trace_ocfs2_commit_cache_begin(flushed); | ||
313 | if (flushed == 0) { | ||
314 | up_write(&journal->j_trans_barrier); | 314 | up_write(&journal->j_trans_barrier); |
315 | mlog(0, "No transactions for me to flush!\n"); | ||
316 | goto finally; | 315 | goto finally; |
317 | } | 316 | } |
318 | 317 | ||
@@ -331,13 +330,11 @@ static int ocfs2_commit_cache(struct ocfs2_super *osb) | |||
331 | atomic_set(&journal->j_num_trans, 0); | 330 | atomic_set(&journal->j_num_trans, 0); |
332 | up_write(&journal->j_trans_barrier); | 331 | up_write(&journal->j_trans_barrier); |
333 | 332 | ||
334 | mlog(0, "commit_thread: flushed transaction %lu (%u handles)\n", | 333 | trace_ocfs2_commit_cache_end(journal->j_trans_id, flushed); |
335 | journal->j_trans_id, flushed); | ||
336 | 334 | ||
337 | ocfs2_wake_downconvert_thread(osb); | 335 | ocfs2_wake_downconvert_thread(osb); |
338 | wake_up(&journal->j_checkpointed); | 336 | wake_up(&journal->j_checkpointed); |
339 | finally: | 337 | finally: |
340 | mlog_exit(status); | ||
341 | return status; | 338 | return status; |
342 | } | 339 | } |
343 | 340 | ||
@@ -425,9 +422,8 @@ int ocfs2_extend_trans(handle_t *handle, int nblocks) | |||
425 | return 0; | 422 | return 0; |
426 | 423 | ||
427 | old_nblocks = handle->h_buffer_credits; | 424 | old_nblocks = handle->h_buffer_credits; |
428 | mlog_entry_void(); | ||
429 | 425 | ||
430 | mlog(0, "Trying to extend transaction by %d blocks\n", nblocks); | 426 | trace_ocfs2_extend_trans(old_nblocks, nblocks); |
431 | 427 | ||
432 | #ifdef CONFIG_OCFS2_DEBUG_FS | 428 | #ifdef CONFIG_OCFS2_DEBUG_FS |
433 | status = 1; | 429 | status = 1; |
@@ -440,9 +436,7 @@ int ocfs2_extend_trans(handle_t *handle, int nblocks) | |||
440 | #endif | 436 | #endif |
441 | 437 | ||
442 | if (status > 0) { | 438 | if (status > 0) { |
443 | mlog(0, | 439 | trace_ocfs2_extend_trans_restart(old_nblocks + nblocks); |
444 | "jbd2_journal_extend failed, trying " | ||
445 | "jbd2_journal_restart\n"); | ||
446 | status = jbd2_journal_restart(handle, | 440 | status = jbd2_journal_restart(handle, |
447 | old_nblocks + nblocks); | 441 | old_nblocks + nblocks); |
448 | if (status < 0) { | 442 | if (status < 0) { |
@@ -453,8 +447,6 @@ int ocfs2_extend_trans(handle_t *handle, int nblocks) | |||
453 | 447 | ||
454 | status = 0; | 448 | status = 0; |
455 | bail: | 449 | bail: |
456 | |||
457 | mlog_exit(status); | ||
458 | return status; | 450 | return status; |
459 | } | 451 | } |
460 | 452 | ||
@@ -622,12 +614,9 @@ static int __ocfs2_journal_access(handle_t *handle, | |||
622 | BUG_ON(!handle); | 614 | BUG_ON(!handle); |
623 | BUG_ON(!bh); | 615 | BUG_ON(!bh); |
624 | 616 | ||
625 | mlog_entry("bh->b_blocknr=%llu, type=%d (\"%s\"), bh->b_size = %zu\n", | 617 | trace_ocfs2_journal_access( |
626 | (unsigned long long)bh->b_blocknr, type, | 618 | (unsigned long long)ocfs2_metadata_cache_owner(ci), |
627 | (type == OCFS2_JOURNAL_ACCESS_CREATE) ? | 619 | (unsigned long long)bh->b_blocknr, type, bh->b_size); |
628 | "OCFS2_JOURNAL_ACCESS_CREATE" : | ||
629 | "OCFS2_JOURNAL_ACCESS_WRITE", | ||
630 | bh->b_size); | ||
631 | 620 | ||
632 | /* we can safely remove this assertion after testing. */ | 621 | /* we can safely remove this assertion after testing. */ |
633 | if (!buffer_uptodate(bh)) { | 622 | if (!buffer_uptodate(bh)) { |
@@ -668,7 +657,6 @@ static int __ocfs2_journal_access(handle_t *handle, | |||
668 | mlog(ML_ERROR, "Error %d getting %d access to buffer!\n", | 657 | mlog(ML_ERROR, "Error %d getting %d access to buffer!\n", |
669 | status, type); | 658 | status, type); |
670 | 659 | ||
671 | mlog_exit(status); | ||
672 | return status; | 660 | return status; |
673 | } | 661 | } |
674 | 662 | ||
@@ -737,13 +725,10 @@ void ocfs2_journal_dirty(handle_t *handle, struct buffer_head *bh) | |||
737 | { | 725 | { |
738 | int status; | 726 | int status; |
739 | 727 | ||
740 | mlog_entry("(bh->b_blocknr=%llu)\n", | 728 | trace_ocfs2_journal_dirty((unsigned long long)bh->b_blocknr); |
741 | (unsigned long long)bh->b_blocknr); | ||
742 | 729 | ||
743 | status = jbd2_journal_dirty_metadata(handle, bh); | 730 | status = jbd2_journal_dirty_metadata(handle, bh); |
744 | BUG_ON(status); | 731 | BUG_ON(status); |
745 | |||
746 | mlog_exit_void(); | ||
747 | } | 732 | } |
748 | 733 | ||
749 | #define OCFS2_DEFAULT_COMMIT_INTERVAL (HZ * JBD2_DEFAULT_MAX_COMMIT_AGE) | 734 | #define OCFS2_DEFAULT_COMMIT_INTERVAL (HZ * JBD2_DEFAULT_MAX_COMMIT_AGE) |
@@ -775,8 +760,6 @@ int ocfs2_journal_init(struct ocfs2_journal *journal, int *dirty) | |||
775 | struct ocfs2_super *osb; | 760 | struct ocfs2_super *osb; |
776 | int inode_lock = 0; | 761 | int inode_lock = 0; |
777 | 762 | ||
778 | mlog_entry_void(); | ||
779 | |||
780 | BUG_ON(!journal); | 763 | BUG_ON(!journal); |
781 | 764 | ||
782 | osb = journal->j_osb; | 765 | osb = journal->j_osb; |
@@ -820,10 +803,9 @@ int ocfs2_journal_init(struct ocfs2_journal *journal, int *dirty) | |||
820 | goto done; | 803 | goto done; |
821 | } | 804 | } |
822 | 805 | ||
823 | mlog(0, "inode->i_size = %lld\n", inode->i_size); | 806 | trace_ocfs2_journal_init(inode->i_size, |
824 | mlog(0, "inode->i_blocks = %llu\n", | 807 | (unsigned long long)inode->i_blocks, |
825 | (unsigned long long)inode->i_blocks); | 808 | OCFS2_I(inode)->ip_clusters); |
826 | mlog(0, "inode->ip_clusters = %u\n", OCFS2_I(inode)->ip_clusters); | ||
827 | 809 | ||
828 | /* call the kernels journal init function now */ | 810 | /* call the kernels journal init function now */ |
829 | j_journal = jbd2_journal_init_inode(inode); | 811 | j_journal = jbd2_journal_init_inode(inode); |
@@ -833,8 +815,7 @@ int ocfs2_journal_init(struct ocfs2_journal *journal, int *dirty) | |||
833 | goto done; | 815 | goto done; |
834 | } | 816 | } |
835 | 817 | ||
836 | mlog(0, "Returned from jbd2_journal_init_inode\n"); | 818 | trace_ocfs2_journal_init_maxlen(j_journal->j_maxlen); |
837 | mlog(0, "j_journal->j_maxlen = %u\n", j_journal->j_maxlen); | ||
838 | 819 | ||
839 | *dirty = (le32_to_cpu(di->id1.journal1.ij_flags) & | 820 | *dirty = (le32_to_cpu(di->id1.journal1.ij_flags) & |
840 | OCFS2_JOURNAL_DIRTY_FL); | 821 | OCFS2_JOURNAL_DIRTY_FL); |
@@ -859,7 +840,6 @@ done: | |||
859 | } | 840 | } |
860 | } | 841 | } |
861 | 842 | ||
862 | mlog_exit(status); | ||
863 | return status; | 843 | return status; |
864 | } | 844 | } |
865 | 845 | ||
@@ -882,8 +862,6 @@ static int ocfs2_journal_toggle_dirty(struct ocfs2_super *osb, | |||
882 | struct buffer_head *bh = journal->j_bh; | 862 | struct buffer_head *bh = journal->j_bh; |
883 | struct ocfs2_dinode *fe; | 863 | struct ocfs2_dinode *fe; |
884 | 864 | ||
885 | mlog_entry_void(); | ||
886 | |||
887 | fe = (struct ocfs2_dinode *)bh->b_data; | 865 | fe = (struct ocfs2_dinode *)bh->b_data; |
888 | 866 | ||
889 | /* The journal bh on the osb always comes from ocfs2_journal_init() | 867 | /* The journal bh on the osb always comes from ocfs2_journal_init() |
@@ -906,7 +884,6 @@ static int ocfs2_journal_toggle_dirty(struct ocfs2_super *osb, | |||
906 | if (status < 0) | 884 | if (status < 0) |
907 | mlog_errno(status); | 885 | mlog_errno(status); |
908 | 886 | ||
909 | mlog_exit(status); | ||
910 | return status; | 887 | return status; |
911 | } | 888 | } |
912 | 889 | ||
@@ -921,8 +898,6 @@ void ocfs2_journal_shutdown(struct ocfs2_super *osb) | |||
921 | struct inode *inode = NULL; | 898 | struct inode *inode = NULL; |
922 | int num_running_trans = 0; | 899 | int num_running_trans = 0; |
923 | 900 | ||
924 | mlog_entry_void(); | ||
925 | |||
926 | BUG_ON(!osb); | 901 | BUG_ON(!osb); |
927 | 902 | ||
928 | journal = osb->journal; | 903 | journal = osb->journal; |
@@ -939,10 +914,7 @@ void ocfs2_journal_shutdown(struct ocfs2_super *osb) | |||
939 | BUG(); | 914 | BUG(); |
940 | 915 | ||
941 | num_running_trans = atomic_read(&(osb->journal->j_num_trans)); | 916 | num_running_trans = atomic_read(&(osb->journal->j_num_trans)); |
942 | if (num_running_trans > 0) | 917 | trace_ocfs2_journal_shutdown(num_running_trans); |
943 | mlog(0, "Shutting down journal: must wait on %d " | ||
944 | "running transactions!\n", | ||
945 | num_running_trans); | ||
946 | 918 | ||
947 | /* Do a commit_cache here. It will flush our journal, *and* | 919 | /* Do a commit_cache here. It will flush our journal, *and* |
948 | * release any locks that are still held. | 920 | * release any locks that are still held. |
@@ -955,7 +927,7 @@ void ocfs2_journal_shutdown(struct ocfs2_super *osb) | |||
955 | * completely destroy the journal. */ | 927 | * completely destroy the journal. */ |
956 | if (osb->commit_task) { | 928 | if (osb->commit_task) { |
957 | /* Wait for the commit thread */ | 929 | /* Wait for the commit thread */ |
958 | mlog(0, "Waiting for ocfs2commit to exit....\n"); | 930 | trace_ocfs2_journal_shutdown_wait(osb->commit_task); |
959 | kthread_stop(osb->commit_task); | 931 | kthread_stop(osb->commit_task); |
960 | osb->commit_task = NULL; | 932 | osb->commit_task = NULL; |
961 | } | 933 | } |
@@ -998,7 +970,6 @@ void ocfs2_journal_shutdown(struct ocfs2_super *osb) | |||
998 | done: | 970 | done: |
999 | if (inode) | 971 | if (inode) |
1000 | iput(inode); | 972 | iput(inode); |
1001 | mlog_exit_void(); | ||
1002 | } | 973 | } |
1003 | 974 | ||
1004 | static void ocfs2_clear_journal_error(struct super_block *sb, | 975 | static void ocfs2_clear_journal_error(struct super_block *sb, |
@@ -1024,8 +995,6 @@ int ocfs2_journal_load(struct ocfs2_journal *journal, int local, int replayed) | |||
1024 | int status = 0; | 995 | int status = 0; |
1025 | struct ocfs2_super *osb; | 996 | struct ocfs2_super *osb; |
1026 | 997 | ||
1027 | mlog_entry_void(); | ||
1028 | |||
1029 | BUG_ON(!journal); | 998 | BUG_ON(!journal); |
1030 | 999 | ||
1031 | osb = journal->j_osb; | 1000 | osb = journal->j_osb; |
@@ -1059,7 +1028,6 @@ int ocfs2_journal_load(struct ocfs2_journal *journal, int local, int replayed) | |||
1059 | osb->commit_task = NULL; | 1028 | osb->commit_task = NULL; |
1060 | 1029 | ||
1061 | done: | 1030 | done: |
1062 | mlog_exit(status); | ||
1063 | return status; | 1031 | return status; |
1064 | } | 1032 | } |
1065 | 1033 | ||
@@ -1070,8 +1038,6 @@ int ocfs2_journal_wipe(struct ocfs2_journal *journal, int full) | |||
1070 | { | 1038 | { |
1071 | int status; | 1039 | int status; |
1072 | 1040 | ||
1073 | mlog_entry_void(); | ||
1074 | |||
1075 | BUG_ON(!journal); | 1041 | BUG_ON(!journal); |
1076 | 1042 | ||
1077 | status = jbd2_journal_wipe(journal->j_journal, full); | 1043 | status = jbd2_journal_wipe(journal->j_journal, full); |
@@ -1085,7 +1051,6 @@ int ocfs2_journal_wipe(struct ocfs2_journal *journal, int full) | |||
1085 | mlog_errno(status); | 1051 | mlog_errno(status); |
1086 | 1052 | ||
1087 | bail: | 1053 | bail: |
1088 | mlog_exit(status); | ||
1089 | return status; | 1054 | return status; |
1090 | } | 1055 | } |
1091 | 1056 | ||
@@ -1124,8 +1089,6 @@ static int ocfs2_force_read_journal(struct inode *inode) | |||
1124 | #define CONCURRENT_JOURNAL_FILL 32ULL | 1089 | #define CONCURRENT_JOURNAL_FILL 32ULL |
1125 | struct buffer_head *bhs[CONCURRENT_JOURNAL_FILL]; | 1090 | struct buffer_head *bhs[CONCURRENT_JOURNAL_FILL]; |
1126 | 1091 | ||
1127 | mlog_entry_void(); | ||
1128 | |||
1129 | memset(bhs, 0, sizeof(struct buffer_head *) * CONCURRENT_JOURNAL_FILL); | 1092 | memset(bhs, 0, sizeof(struct buffer_head *) * CONCURRENT_JOURNAL_FILL); |
1130 | 1093 | ||
1131 | num_blocks = ocfs2_blocks_for_bytes(inode->i_sb, inode->i_size); | 1094 | num_blocks = ocfs2_blocks_for_bytes(inode->i_sb, inode->i_size); |
@@ -1161,7 +1124,6 @@ static int ocfs2_force_read_journal(struct inode *inode) | |||
1161 | bail: | 1124 | bail: |
1162 | for(i = 0; i < CONCURRENT_JOURNAL_FILL; i++) | 1125 | for(i = 0; i < CONCURRENT_JOURNAL_FILL; i++) |
1163 | brelse(bhs[i]); | 1126 | brelse(bhs[i]); |
1164 | mlog_exit(status); | ||
1165 | return status; | 1127 | return status; |
1166 | } | 1128 | } |
1167 | 1129 | ||
@@ -1185,7 +1147,7 @@ struct ocfs2_la_recovery_item { | |||
1185 | */ | 1147 | */ |
1186 | void ocfs2_complete_recovery(struct work_struct *work) | 1148 | void ocfs2_complete_recovery(struct work_struct *work) |
1187 | { | 1149 | { |
1188 | int ret; | 1150 | int ret = 0; |
1189 | struct ocfs2_journal *journal = | 1151 | struct ocfs2_journal *journal = |
1190 | container_of(work, struct ocfs2_journal, j_recovery_work); | 1152 | container_of(work, struct ocfs2_journal, j_recovery_work); |
1191 | struct ocfs2_super *osb = journal->j_osb; | 1153 | struct ocfs2_super *osb = journal->j_osb; |
@@ -1194,9 +1156,8 @@ void ocfs2_complete_recovery(struct work_struct *work) | |||
1194 | struct ocfs2_quota_recovery *qrec; | 1156 | struct ocfs2_quota_recovery *qrec; |
1195 | LIST_HEAD(tmp_la_list); | 1157 | LIST_HEAD(tmp_la_list); |
1196 | 1158 | ||
1197 | mlog_entry_void(); | 1159 | trace_ocfs2_complete_recovery( |
1198 | 1160 | (unsigned long long)OCFS2_I(journal->j_inode)->ip_blkno); | |
1199 | mlog(0, "completing recovery from keventd\n"); | ||
1200 | 1161 | ||
1201 | spin_lock(&journal->j_lock); | 1162 | spin_lock(&journal->j_lock); |
1202 | list_splice_init(&journal->j_la_cleanups, &tmp_la_list); | 1163 | list_splice_init(&journal->j_la_cleanups, &tmp_la_list); |
@@ -1205,15 +1166,18 @@ void ocfs2_complete_recovery(struct work_struct *work) | |||
1205 | list_for_each_entry_safe(item, n, &tmp_la_list, lri_list) { | 1166 | list_for_each_entry_safe(item, n, &tmp_la_list, lri_list) { |
1206 | list_del_init(&item->lri_list); | 1167 | list_del_init(&item->lri_list); |
1207 | 1168 | ||
1208 | mlog(0, "Complete recovery for slot %d\n", item->lri_slot); | ||
1209 | |||
1210 | ocfs2_wait_on_quotas(osb); | 1169 | ocfs2_wait_on_quotas(osb); |
1211 | 1170 | ||
1212 | la_dinode = item->lri_la_dinode; | 1171 | la_dinode = item->lri_la_dinode; |
1213 | if (la_dinode) { | 1172 | tl_dinode = item->lri_tl_dinode; |
1214 | mlog(0, "Clean up local alloc %llu\n", | 1173 | qrec = item->lri_qrec; |
1215 | (unsigned long long)le64_to_cpu(la_dinode->i_blkno)); | 1174 | |
1175 | trace_ocfs2_complete_recovery_slot(item->lri_slot, | ||
1176 | la_dinode ? le64_to_cpu(la_dinode->i_blkno) : 0, | ||
1177 | tl_dinode ? le64_to_cpu(tl_dinode->i_blkno) : 0, | ||
1178 | qrec); | ||
1216 | 1179 | ||
1180 | if (la_dinode) { | ||
1217 | ret = ocfs2_complete_local_alloc_recovery(osb, | 1181 | ret = ocfs2_complete_local_alloc_recovery(osb, |
1218 | la_dinode); | 1182 | la_dinode); |
1219 | if (ret < 0) | 1183 | if (ret < 0) |
@@ -1222,11 +1186,7 @@ void ocfs2_complete_recovery(struct work_struct *work) | |||
1222 | kfree(la_dinode); | 1186 | kfree(la_dinode); |
1223 | } | 1187 | } |
1224 | 1188 | ||
1225 | tl_dinode = item->lri_tl_dinode; | ||
1226 | if (tl_dinode) { | 1189 | if (tl_dinode) { |
1227 | mlog(0, "Clean up truncate log %llu\n", | ||
1228 | (unsigned long long)le64_to_cpu(tl_dinode->i_blkno)); | ||
1229 | |||
1230 | ret = ocfs2_complete_truncate_log_recovery(osb, | 1190 | ret = ocfs2_complete_truncate_log_recovery(osb, |
1231 | tl_dinode); | 1191 | tl_dinode); |
1232 | if (ret < 0) | 1192 | if (ret < 0) |
@@ -1239,9 +1199,7 @@ void ocfs2_complete_recovery(struct work_struct *work) | |||
1239 | if (ret < 0) | 1199 | if (ret < 0) |
1240 | mlog_errno(ret); | 1200 | mlog_errno(ret); |
1241 | 1201 | ||
1242 | qrec = item->lri_qrec; | ||
1243 | if (qrec) { | 1202 | if (qrec) { |
1244 | mlog(0, "Recovering quota files"); | ||
1245 | ret = ocfs2_finish_quota_recovery(osb, qrec, | 1203 | ret = ocfs2_finish_quota_recovery(osb, qrec, |
1246 | item->lri_slot); | 1204 | item->lri_slot); |
1247 | if (ret < 0) | 1205 | if (ret < 0) |
@@ -1252,8 +1210,7 @@ void ocfs2_complete_recovery(struct work_struct *work) | |||
1252 | kfree(item); | 1210 | kfree(item); |
1253 | } | 1211 | } |
1254 | 1212 | ||
1255 | mlog(0, "Recovery completion\n"); | 1213 | trace_ocfs2_complete_recovery_end(ret); |
1256 | mlog_exit_void(); | ||
1257 | } | 1214 | } |
1258 | 1215 | ||
1259 | /* NOTE: This function always eats your references to la_dinode and | 1216 | /* NOTE: This function always eats your references to la_dinode and |
@@ -1339,8 +1296,6 @@ static int __ocfs2_recovery_thread(void *arg) | |||
1339 | int rm_quota_used = 0, i; | 1296 | int rm_quota_used = 0, i; |
1340 | struct ocfs2_quota_recovery *qrec; | 1297 | struct ocfs2_quota_recovery *qrec; |
1341 | 1298 | ||
1342 | mlog_entry_void(); | ||
1343 | |||
1344 | status = ocfs2_wait_on_mount(osb); | 1299 | status = ocfs2_wait_on_mount(osb); |
1345 | if (status < 0) { | 1300 | if (status < 0) { |
1346 | goto bail; | 1301 | goto bail; |
@@ -1372,15 +1327,12 @@ restart: | |||
1372 | * clear it until ocfs2_recover_node() has succeeded. */ | 1327 | * clear it until ocfs2_recover_node() has succeeded. */ |
1373 | node_num = rm->rm_entries[0]; | 1328 | node_num = rm->rm_entries[0]; |
1374 | spin_unlock(&osb->osb_lock); | 1329 | spin_unlock(&osb->osb_lock); |
1375 | mlog(0, "checking node %d\n", node_num); | ||
1376 | slot_num = ocfs2_node_num_to_slot(osb, node_num); | 1330 | slot_num = ocfs2_node_num_to_slot(osb, node_num); |
1331 | trace_ocfs2_recovery_thread_node(node_num, slot_num); | ||
1377 | if (slot_num == -ENOENT) { | 1332 | if (slot_num == -ENOENT) { |
1378 | status = 0; | 1333 | status = 0; |
1379 | mlog(0, "no slot for this node, so no recovery" | ||
1380 | "required.\n"); | ||
1381 | goto skip_recovery; | 1334 | goto skip_recovery; |
1382 | } | 1335 | } |
1383 | mlog(0, "node %d was using slot %d\n", node_num, slot_num); | ||
1384 | 1336 | ||
1385 | /* It is a bit subtle with quota recovery. We cannot do it | 1337 | /* It is a bit subtle with quota recovery. We cannot do it |
1386 | * immediately because we have to obtain cluster locks from | 1338 | * immediately because we have to obtain cluster locks from |
@@ -1407,7 +1359,7 @@ skip_recovery: | |||
1407 | spin_lock(&osb->osb_lock); | 1359 | spin_lock(&osb->osb_lock); |
1408 | } | 1360 | } |
1409 | spin_unlock(&osb->osb_lock); | 1361 | spin_unlock(&osb->osb_lock); |
1410 | mlog(0, "All nodes recovered\n"); | 1362 | trace_ocfs2_recovery_thread_end(status); |
1411 | 1363 | ||
1412 | /* Refresh all journal recovery generations from disk */ | 1364 | /* Refresh all journal recovery generations from disk */ |
1413 | status = ocfs2_check_journals_nolocks(osb); | 1365 | status = ocfs2_check_journals_nolocks(osb); |
@@ -1451,7 +1403,6 @@ bail: | |||
1451 | if (rm_quota) | 1403 | if (rm_quota) |
1452 | kfree(rm_quota); | 1404 | kfree(rm_quota); |
1453 | 1405 | ||
1454 | mlog_exit(status); | ||
1455 | /* no one is callint kthread_stop() for us so the kthread() api | 1406 | /* no one is callint kthread_stop() for us so the kthread() api |
1456 | * requires that we call do_exit(). And it isn't exported, but | 1407 | * requires that we call do_exit(). And it isn't exported, but |
1457 | * complete_and_exit() seems to be a minimal wrapper around it. */ | 1408 | * complete_and_exit() seems to be a minimal wrapper around it. */ |
@@ -1461,19 +1412,15 @@ bail: | |||
1461 | 1412 | ||
1462 | void ocfs2_recovery_thread(struct ocfs2_super *osb, int node_num) | 1413 | void ocfs2_recovery_thread(struct ocfs2_super *osb, int node_num) |
1463 | { | 1414 | { |
1464 | mlog_entry("(node_num=%d, osb->node_num = %d)\n", | ||
1465 | node_num, osb->node_num); | ||
1466 | |||
1467 | mutex_lock(&osb->recovery_lock); | 1415 | mutex_lock(&osb->recovery_lock); |
1468 | if (osb->disable_recovery) | ||
1469 | goto out; | ||
1470 | 1416 | ||
1471 | /* People waiting on recovery will wait on | 1417 | trace_ocfs2_recovery_thread(node_num, osb->node_num, |
1472 | * the recovery map to empty. */ | 1418 | osb->disable_recovery, osb->recovery_thread_task, |
1473 | if (ocfs2_recovery_map_set(osb, node_num)) | 1419 | osb->disable_recovery ? |
1474 | mlog(0, "node %d already in recovery map.\n", node_num); | 1420 | -1 : ocfs2_recovery_map_set(osb, node_num)); |
1475 | 1421 | ||
1476 | mlog(0, "starting recovery thread...\n"); | 1422 | if (osb->disable_recovery) |
1423 | goto out; | ||
1477 | 1424 | ||
1478 | if (osb->recovery_thread_task) | 1425 | if (osb->recovery_thread_task) |
1479 | goto out; | 1426 | goto out; |
@@ -1488,8 +1435,6 @@ void ocfs2_recovery_thread(struct ocfs2_super *osb, int node_num) | |||
1488 | out: | 1435 | out: |
1489 | mutex_unlock(&osb->recovery_lock); | 1436 | mutex_unlock(&osb->recovery_lock); |
1490 | wake_up(&osb->recovery_event); | 1437 | wake_up(&osb->recovery_event); |
1491 | |||
1492 | mlog_exit_void(); | ||
1493 | } | 1438 | } |
1494 | 1439 | ||
1495 | static int ocfs2_read_journal_inode(struct ocfs2_super *osb, | 1440 | static int ocfs2_read_journal_inode(struct ocfs2_super *osb, |
@@ -1563,7 +1508,7 @@ static int ocfs2_replay_journal(struct ocfs2_super *osb, | |||
1563 | * If not, it needs recovery. | 1508 | * If not, it needs recovery. |
1564 | */ | 1509 | */ |
1565 | if (osb->slot_recovery_generations[slot_num] != slot_reco_gen) { | 1510 | if (osb->slot_recovery_generations[slot_num] != slot_reco_gen) { |
1566 | mlog(0, "Slot %u already recovered (old/new=%u/%u)\n", slot_num, | 1511 | trace_ocfs2_replay_journal_recovered(slot_num, |
1567 | osb->slot_recovery_generations[slot_num], slot_reco_gen); | 1512 | osb->slot_recovery_generations[slot_num], slot_reco_gen); |
1568 | osb->slot_recovery_generations[slot_num] = slot_reco_gen; | 1513 | osb->slot_recovery_generations[slot_num] = slot_reco_gen; |
1569 | status = -EBUSY; | 1514 | status = -EBUSY; |
@@ -1574,7 +1519,7 @@ static int ocfs2_replay_journal(struct ocfs2_super *osb, | |||
1574 | 1519 | ||
1575 | status = ocfs2_inode_lock_full(inode, &bh, 1, OCFS2_META_LOCK_RECOVERY); | 1520 | status = ocfs2_inode_lock_full(inode, &bh, 1, OCFS2_META_LOCK_RECOVERY); |
1576 | if (status < 0) { | 1521 | if (status < 0) { |
1577 | mlog(0, "status returned from ocfs2_inode_lock=%d\n", status); | 1522 | trace_ocfs2_replay_journal_lock_err(status); |
1578 | if (status != -ERESTARTSYS) | 1523 | if (status != -ERESTARTSYS) |
1579 | mlog(ML_ERROR, "Could not lock journal!\n"); | 1524 | mlog(ML_ERROR, "Could not lock journal!\n"); |
1580 | goto done; | 1525 | goto done; |
@@ -1587,7 +1532,7 @@ static int ocfs2_replay_journal(struct ocfs2_super *osb, | |||
1587 | slot_reco_gen = ocfs2_get_recovery_generation(fe); | 1532 | slot_reco_gen = ocfs2_get_recovery_generation(fe); |
1588 | 1533 | ||
1589 | if (!(flags & OCFS2_JOURNAL_DIRTY_FL)) { | 1534 | if (!(flags & OCFS2_JOURNAL_DIRTY_FL)) { |
1590 | mlog(0, "No recovery required for node %d\n", node_num); | 1535 | trace_ocfs2_replay_journal_skip(node_num); |
1591 | /* Refresh recovery generation for the slot */ | 1536 | /* Refresh recovery generation for the slot */ |
1592 | osb->slot_recovery_generations[slot_num] = slot_reco_gen; | 1537 | osb->slot_recovery_generations[slot_num] = slot_reco_gen; |
1593 | goto done; | 1538 | goto done; |
@@ -1608,7 +1553,6 @@ static int ocfs2_replay_journal(struct ocfs2_super *osb, | |||
1608 | goto done; | 1553 | goto done; |
1609 | } | 1554 | } |
1610 | 1555 | ||
1611 | mlog(0, "calling journal_init_inode\n"); | ||
1612 | journal = jbd2_journal_init_inode(inode); | 1556 | journal = jbd2_journal_init_inode(inode); |
1613 | if (journal == NULL) { | 1557 | if (journal == NULL) { |
1614 | mlog(ML_ERROR, "Linux journal layer error\n"); | 1558 | mlog(ML_ERROR, "Linux journal layer error\n"); |
@@ -1628,7 +1572,6 @@ static int ocfs2_replay_journal(struct ocfs2_super *osb, | |||
1628 | ocfs2_clear_journal_error(osb->sb, journal, slot_num); | 1572 | ocfs2_clear_journal_error(osb->sb, journal, slot_num); |
1629 | 1573 | ||
1630 | /* wipe the journal */ | 1574 | /* wipe the journal */ |
1631 | mlog(0, "flushing the journal.\n"); | ||
1632 | jbd2_journal_lock_updates(journal); | 1575 | jbd2_journal_lock_updates(journal); |
1633 | status = jbd2_journal_flush(journal); | 1576 | status = jbd2_journal_flush(journal); |
1634 | jbd2_journal_unlock_updates(journal); | 1577 | jbd2_journal_unlock_updates(journal); |
@@ -1665,7 +1608,6 @@ done: | |||
1665 | 1608 | ||
1666 | brelse(bh); | 1609 | brelse(bh); |
1667 | 1610 | ||
1668 | mlog_exit(status); | ||
1669 | return status; | 1611 | return status; |
1670 | } | 1612 | } |
1671 | 1613 | ||
@@ -1688,8 +1630,7 @@ static int ocfs2_recover_node(struct ocfs2_super *osb, | |||
1688 | struct ocfs2_dinode *la_copy = NULL; | 1630 | struct ocfs2_dinode *la_copy = NULL; |
1689 | struct ocfs2_dinode *tl_copy = NULL; | 1631 | struct ocfs2_dinode *tl_copy = NULL; |
1690 | 1632 | ||
1691 | mlog_entry("(node_num=%d, slot_num=%d, osb->node_num = %d)\n", | 1633 | trace_ocfs2_recover_node(node_num, slot_num, osb->node_num); |
1692 | node_num, slot_num, osb->node_num); | ||
1693 | 1634 | ||
1694 | /* Should not ever be called to recover ourselves -- in that | 1635 | /* Should not ever be called to recover ourselves -- in that |
1695 | * case we should've called ocfs2_journal_load instead. */ | 1636 | * case we should've called ocfs2_journal_load instead. */ |
@@ -1698,9 +1639,7 @@ static int ocfs2_recover_node(struct ocfs2_super *osb, | |||
1698 | status = ocfs2_replay_journal(osb, node_num, slot_num); | 1639 | status = ocfs2_replay_journal(osb, node_num, slot_num); |
1699 | if (status < 0) { | 1640 | if (status < 0) { |
1700 | if (status == -EBUSY) { | 1641 | if (status == -EBUSY) { |
1701 | mlog(0, "Skipping recovery for slot %u (node %u) " | 1642 | trace_ocfs2_recover_node_skip(slot_num, node_num); |
1702 | "as another node has recovered it\n", slot_num, | ||
1703 | node_num); | ||
1704 | status = 0; | 1643 | status = 0; |
1705 | goto done; | 1644 | goto done; |
1706 | } | 1645 | } |
@@ -1735,7 +1674,6 @@ static int ocfs2_recover_node(struct ocfs2_super *osb, | |||
1735 | status = 0; | 1674 | status = 0; |
1736 | done: | 1675 | done: |
1737 | 1676 | ||
1738 | mlog_exit(status); | ||
1739 | return status; | 1677 | return status; |
1740 | } | 1678 | } |
1741 | 1679 | ||
@@ -1808,8 +1746,8 @@ int ocfs2_mark_dead_nodes(struct ocfs2_super *osb) | |||
1808 | spin_lock(&osb->osb_lock); | 1746 | spin_lock(&osb->osb_lock); |
1809 | osb->slot_recovery_generations[i] = gen; | 1747 | osb->slot_recovery_generations[i] = gen; |
1810 | 1748 | ||
1811 | mlog(0, "Slot %u recovery generation is %u\n", i, | 1749 | trace_ocfs2_mark_dead_nodes(i, |
1812 | osb->slot_recovery_generations[i]); | 1750 | osb->slot_recovery_generations[i]); |
1813 | 1751 | ||
1814 | if (i == osb->slot_num) { | 1752 | if (i == osb->slot_num) { |
1815 | spin_unlock(&osb->osb_lock); | 1753 | spin_unlock(&osb->osb_lock); |
@@ -1845,7 +1783,6 @@ int ocfs2_mark_dead_nodes(struct ocfs2_super *osb) | |||
1845 | 1783 | ||
1846 | status = 0; | 1784 | status = 0; |
1847 | bail: | 1785 | bail: |
1848 | mlog_exit(status); | ||
1849 | return status; | 1786 | return status; |
1850 | } | 1787 | } |
1851 | 1788 | ||
@@ -1884,11 +1821,12 @@ void ocfs2_queue_orphan_scan(struct ocfs2_super *osb) | |||
1884 | 1821 | ||
1885 | os = &osb->osb_orphan_scan; | 1822 | os = &osb->osb_orphan_scan; |
1886 | 1823 | ||
1887 | mlog(0, "Begin orphan scan\n"); | ||
1888 | |||
1889 | if (atomic_read(&os->os_state) == ORPHAN_SCAN_INACTIVE) | 1824 | if (atomic_read(&os->os_state) == ORPHAN_SCAN_INACTIVE) |
1890 | goto out; | 1825 | goto out; |
1891 | 1826 | ||
1827 | trace_ocfs2_queue_orphan_scan_begin(os->os_count, os->os_seqno, | ||
1828 | atomic_read(&os->os_state)); | ||
1829 | |||
1892 | status = ocfs2_orphan_scan_lock(osb, &seqno); | 1830 | status = ocfs2_orphan_scan_lock(osb, &seqno); |
1893 | if (status < 0) { | 1831 | if (status < 0) { |
1894 | if (status != -EAGAIN) | 1832 | if (status != -EAGAIN) |
@@ -1918,7 +1856,8 @@ void ocfs2_queue_orphan_scan(struct ocfs2_super *osb) | |||
1918 | unlock: | 1856 | unlock: |
1919 | ocfs2_orphan_scan_unlock(osb, seqno); | 1857 | ocfs2_orphan_scan_unlock(osb, seqno); |
1920 | out: | 1858 | out: |
1921 | mlog(0, "Orphan scan completed\n"); | 1859 | trace_ocfs2_queue_orphan_scan_end(os->os_count, os->os_seqno, |
1860 | atomic_read(&os->os_state)); | ||
1922 | return; | 1861 | return; |
1923 | } | 1862 | } |
1924 | 1863 | ||
@@ -2002,8 +1941,7 @@ static int ocfs2_orphan_filldir(void *priv, const char *name, int name_len, | |||
2002 | if (IS_ERR(iter)) | 1941 | if (IS_ERR(iter)) |
2003 | return 0; | 1942 | return 0; |
2004 | 1943 | ||
2005 | mlog(0, "queue orphan %llu\n", | 1944 | trace_ocfs2_orphan_filldir((unsigned long long)OCFS2_I(iter)->ip_blkno); |
2006 | (unsigned long long)OCFS2_I(iter)->ip_blkno); | ||
2007 | /* No locking is required for the next_orphan queue as there | 1945 | /* No locking is required for the next_orphan queue as there |
2008 | * is only ever a single process doing orphan recovery. */ | 1946 | * is only ever a single process doing orphan recovery. */ |
2009 | OCFS2_I(iter)->ip_next_orphan = p->head; | 1947 | OCFS2_I(iter)->ip_next_orphan = p->head; |
@@ -2119,7 +2057,7 @@ static int ocfs2_recover_orphans(struct ocfs2_super *osb, | |||
2119 | struct inode *iter; | 2057 | struct inode *iter; |
2120 | struct ocfs2_inode_info *oi; | 2058 | struct ocfs2_inode_info *oi; |
2121 | 2059 | ||
2122 | mlog(0, "Recover inodes from orphan dir in slot %d\n", slot); | 2060 | trace_ocfs2_recover_orphans(slot); |
2123 | 2061 | ||
2124 | ocfs2_mark_recovering_orphan_dir(osb, slot); | 2062 | ocfs2_mark_recovering_orphan_dir(osb, slot); |
2125 | ret = ocfs2_queue_orphans(osb, slot, &inode); | 2063 | ret = ocfs2_queue_orphans(osb, slot, &inode); |
@@ -2132,7 +2070,8 @@ static int ocfs2_recover_orphans(struct ocfs2_super *osb, | |||
2132 | 2070 | ||
2133 | while (inode) { | 2071 | while (inode) { |
2134 | oi = OCFS2_I(inode); | 2072 | oi = OCFS2_I(inode); |
2135 | mlog(0, "iput orphan %llu\n", (unsigned long long)oi->ip_blkno); | 2073 | trace_ocfs2_recover_orphans_iput( |
2074 | (unsigned long long)oi->ip_blkno); | ||
2136 | 2075 | ||
2137 | iter = oi->ip_next_orphan; | 2076 | iter = oi->ip_next_orphan; |
2138 | 2077 | ||
@@ -2170,6 +2109,7 @@ static int __ocfs2_wait_on_mount(struct ocfs2_super *osb, int quota) | |||
2170 | * MOUNTED flag, but this is set right before | 2109 | * MOUNTED flag, but this is set right before |
2171 | * dismount_volume() so we can trust it. */ | 2110 | * dismount_volume() so we can trust it. */ |
2172 | if (atomic_read(&osb->vol_state) == VOLUME_DISABLED) { | 2111 | if (atomic_read(&osb->vol_state) == VOLUME_DISABLED) { |
2112 | trace_ocfs2_wait_on_mount(VOLUME_DISABLED); | ||
2173 | mlog(0, "mount error, exiting!\n"); | 2113 | mlog(0, "mount error, exiting!\n"); |
2174 | return -EBUSY; | 2114 | return -EBUSY; |
2175 | } | 2115 | } |
diff --git a/fs/ocfs2/localalloc.c b/fs/ocfs2/localalloc.c index ec6adbf8f551..210c35237548 100644 --- a/fs/ocfs2/localalloc.c +++ b/fs/ocfs2/localalloc.c | |||
@@ -29,7 +29,6 @@ | |||
29 | #include <linux/highmem.h> | 29 | #include <linux/highmem.h> |
30 | #include <linux/bitops.h> | 30 | #include <linux/bitops.h> |
31 | 31 | ||
32 | #define MLOG_MASK_PREFIX ML_DISK_ALLOC | ||
33 | #include <cluster/masklog.h> | 32 | #include <cluster/masklog.h> |
34 | 33 | ||
35 | #include "ocfs2.h" | 34 | #include "ocfs2.h" |
@@ -43,6 +42,7 @@ | |||
43 | #include "suballoc.h" | 42 | #include "suballoc.h" |
44 | #include "super.h" | 43 | #include "super.h" |
45 | #include "sysfile.h" | 44 | #include "sysfile.h" |
45 | #include "ocfs2_trace.h" | ||
46 | 46 | ||
47 | #include "buffer_head_io.h" | 47 | #include "buffer_head_io.h" |
48 | 48 | ||
@@ -201,8 +201,7 @@ void ocfs2_la_set_sizes(struct ocfs2_super *osb, int requested_mb) | |||
201 | la_max_mb = ocfs2_clusters_to_megabytes(sb, | 201 | la_max_mb = ocfs2_clusters_to_megabytes(sb, |
202 | ocfs2_local_alloc_size(sb) * 8); | 202 | ocfs2_local_alloc_size(sb) * 8); |
203 | 203 | ||
204 | mlog(0, "requested: %dM, max: %uM, default: %uM\n", | 204 | trace_ocfs2_la_set_sizes(requested_mb, la_max_mb, la_default_mb); |
205 | requested_mb, la_max_mb, la_default_mb); | ||
206 | 205 | ||
207 | if (requested_mb == -1) { | 206 | if (requested_mb == -1) { |
208 | /* No user request - use defaults */ | 207 | /* No user request - use defaults */ |
@@ -276,8 +275,8 @@ int ocfs2_alloc_should_use_local(struct ocfs2_super *osb, u64 bits) | |||
276 | 275 | ||
277 | ret = 1; | 276 | ret = 1; |
278 | bail: | 277 | bail: |
279 | mlog(0, "state=%d, bits=%llu, la_bits=%d, ret=%d\n", | 278 | trace_ocfs2_alloc_should_use_local( |
280 | osb->local_alloc_state, (unsigned long long)bits, la_bits, ret); | 279 | (unsigned long long)bits, osb->local_alloc_state, la_bits, ret); |
281 | spin_unlock(&osb->osb_lock); | 280 | spin_unlock(&osb->osb_lock); |
282 | return ret; | 281 | return ret; |
283 | } | 282 | } |
@@ -291,8 +290,6 @@ int ocfs2_load_local_alloc(struct ocfs2_super *osb) | |||
291 | struct inode *inode = NULL; | 290 | struct inode *inode = NULL; |
292 | struct ocfs2_local_alloc *la; | 291 | struct ocfs2_local_alloc *la; |
293 | 292 | ||
294 | mlog_entry_void(); | ||
295 | |||
296 | if (osb->local_alloc_bits == 0) | 293 | if (osb->local_alloc_bits == 0) |
297 | goto bail; | 294 | goto bail; |
298 | 295 | ||
@@ -364,9 +361,10 @@ bail: | |||
364 | if (inode) | 361 | if (inode) |
365 | iput(inode); | 362 | iput(inode); |
366 | 363 | ||
367 | mlog(0, "Local alloc window bits = %d\n", osb->local_alloc_bits); | 364 | trace_ocfs2_load_local_alloc(osb->local_alloc_bits); |
368 | 365 | ||
369 | mlog_exit(status); | 366 | if (status) |
367 | mlog_errno(status); | ||
370 | return status; | 368 | return status; |
371 | } | 369 | } |
372 | 370 | ||
@@ -388,8 +386,6 @@ void ocfs2_shutdown_local_alloc(struct ocfs2_super *osb) | |||
388 | struct ocfs2_dinode *alloc_copy = NULL; | 386 | struct ocfs2_dinode *alloc_copy = NULL; |
389 | struct ocfs2_dinode *alloc = NULL; | 387 | struct ocfs2_dinode *alloc = NULL; |
390 | 388 | ||
391 | mlog_entry_void(); | ||
392 | |||
393 | cancel_delayed_work(&osb->la_enable_wq); | 389 | cancel_delayed_work(&osb->la_enable_wq); |
394 | flush_workqueue(ocfs2_wq); | 390 | flush_workqueue(ocfs2_wq); |
395 | 391 | ||
@@ -482,8 +478,6 @@ out: | |||
482 | 478 | ||
483 | if (alloc_copy) | 479 | if (alloc_copy) |
484 | kfree(alloc_copy); | 480 | kfree(alloc_copy); |
485 | |||
486 | mlog_exit_void(); | ||
487 | } | 481 | } |
488 | 482 | ||
489 | /* | 483 | /* |
@@ -502,7 +496,7 @@ int ocfs2_begin_local_alloc_recovery(struct ocfs2_super *osb, | |||
502 | struct inode *inode = NULL; | 496 | struct inode *inode = NULL; |
503 | struct ocfs2_dinode *alloc; | 497 | struct ocfs2_dinode *alloc; |
504 | 498 | ||
505 | mlog_entry("(slot_num = %d)\n", slot_num); | 499 | trace_ocfs2_begin_local_alloc_recovery(slot_num); |
506 | 500 | ||
507 | *alloc_copy = NULL; | 501 | *alloc_copy = NULL; |
508 | 502 | ||
@@ -552,7 +546,8 @@ bail: | |||
552 | iput(inode); | 546 | iput(inode); |
553 | } | 547 | } |
554 | 548 | ||
555 | mlog_exit(status); | 549 | if (status) |
550 | mlog_errno(status); | ||
556 | return status; | 551 | return status; |
557 | } | 552 | } |
558 | 553 | ||
@@ -570,8 +565,6 @@ int ocfs2_complete_local_alloc_recovery(struct ocfs2_super *osb, | |||
570 | struct buffer_head *main_bm_bh = NULL; | 565 | struct buffer_head *main_bm_bh = NULL; |
571 | struct inode *main_bm_inode; | 566 | struct inode *main_bm_inode; |
572 | 567 | ||
573 | mlog_entry_void(); | ||
574 | |||
575 | main_bm_inode = ocfs2_get_system_file_inode(osb, | 568 | main_bm_inode = ocfs2_get_system_file_inode(osb, |
576 | GLOBAL_BITMAP_SYSTEM_INODE, | 569 | GLOBAL_BITMAP_SYSTEM_INODE, |
577 | OCFS2_INVALID_SLOT); | 570 | OCFS2_INVALID_SLOT); |
@@ -620,7 +613,8 @@ out_mutex: | |||
620 | out: | 613 | out: |
621 | if (!status) | 614 | if (!status) |
622 | ocfs2_init_steal_slots(osb); | 615 | ocfs2_init_steal_slots(osb); |
623 | mlog_exit(status); | 616 | if (status) |
617 | mlog_errno(status); | ||
624 | return status; | 618 | return status; |
625 | } | 619 | } |
626 | 620 | ||
@@ -640,8 +634,6 @@ int ocfs2_reserve_local_alloc_bits(struct ocfs2_super *osb, | |||
640 | struct inode *local_alloc_inode; | 634 | struct inode *local_alloc_inode; |
641 | unsigned int free_bits; | 635 | unsigned int free_bits; |
642 | 636 | ||
643 | mlog_entry_void(); | ||
644 | |||
645 | BUG_ON(!ac); | 637 | BUG_ON(!ac); |
646 | 638 | ||
647 | local_alloc_inode = | 639 | local_alloc_inode = |
@@ -712,10 +704,6 @@ int ocfs2_reserve_local_alloc_bits(struct ocfs2_super *osb, | |||
712 | goto bail; | 704 | goto bail; |
713 | } | 705 | } |
714 | 706 | ||
715 | if (ac->ac_max_block) | ||
716 | mlog(0, "Calling in_range for max block %llu\n", | ||
717 | (unsigned long long)ac->ac_max_block); | ||
718 | |||
719 | ac->ac_inode = local_alloc_inode; | 707 | ac->ac_inode = local_alloc_inode; |
720 | /* We should never use localalloc from another slot */ | 708 | /* We should never use localalloc from another slot */ |
721 | ac->ac_alloc_slot = osb->slot_num; | 709 | ac->ac_alloc_slot = osb->slot_num; |
@@ -729,10 +717,12 @@ bail: | |||
729 | iput(local_alloc_inode); | 717 | iput(local_alloc_inode); |
730 | } | 718 | } |
731 | 719 | ||
732 | mlog(0, "bits=%d, slot=%d, ret=%d\n", bits_wanted, osb->slot_num, | 720 | trace_ocfs2_reserve_local_alloc_bits( |
733 | status); | 721 | (unsigned long long)ac->ac_max_block, |
722 | bits_wanted, osb->slot_num, status); | ||
734 | 723 | ||
735 | mlog_exit(status); | 724 | if (status) |
725 | mlog_errno(status); | ||
736 | return status; | 726 | return status; |
737 | } | 727 | } |
738 | 728 | ||
@@ -749,7 +739,6 @@ int ocfs2_claim_local_alloc_bits(struct ocfs2_super *osb, | |||
749 | struct ocfs2_dinode *alloc; | 739 | struct ocfs2_dinode *alloc; |
750 | struct ocfs2_local_alloc *la; | 740 | struct ocfs2_local_alloc *la; |
751 | 741 | ||
752 | mlog_entry_void(); | ||
753 | BUG_ON(ac->ac_which != OCFS2_AC_USE_LOCAL); | 742 | BUG_ON(ac->ac_which != OCFS2_AC_USE_LOCAL); |
754 | 743 | ||
755 | local_alloc_inode = ac->ac_inode; | 744 | local_alloc_inode = ac->ac_inode; |
@@ -788,7 +777,8 @@ int ocfs2_claim_local_alloc_bits(struct ocfs2_super *osb, | |||
788 | ocfs2_journal_dirty(handle, osb->local_alloc_bh); | 777 | ocfs2_journal_dirty(handle, osb->local_alloc_bh); |
789 | 778 | ||
790 | bail: | 779 | bail: |
791 | mlog_exit(status); | 780 | if (status) |
781 | mlog_errno(status); | ||
792 | return status; | 782 | return status; |
793 | } | 783 | } |
794 | 784 | ||
@@ -799,13 +789,11 @@ static u32 ocfs2_local_alloc_count_bits(struct ocfs2_dinode *alloc) | |||
799 | u32 count = 0; | 789 | u32 count = 0; |
800 | struct ocfs2_local_alloc *la = OCFS2_LOCAL_ALLOC(alloc); | 790 | struct ocfs2_local_alloc *la = OCFS2_LOCAL_ALLOC(alloc); |
801 | 791 | ||
802 | mlog_entry_void(); | ||
803 | |||
804 | buffer = la->la_bitmap; | 792 | buffer = la->la_bitmap; |
805 | for (i = 0; i < le16_to_cpu(la->la_size); i++) | 793 | for (i = 0; i < le16_to_cpu(la->la_size); i++) |
806 | count += hweight8(buffer[i]); | 794 | count += hweight8(buffer[i]); |
807 | 795 | ||
808 | mlog_exit(count); | 796 | trace_ocfs2_local_alloc_count_bits(count); |
809 | return count; | 797 | return count; |
810 | } | 798 | } |
811 | 799 | ||
@@ -820,10 +808,7 @@ static int ocfs2_local_alloc_find_clear_bits(struct ocfs2_super *osb, | |||
820 | void *bitmap = NULL; | 808 | void *bitmap = NULL; |
821 | struct ocfs2_reservation_map *resmap = &osb->osb_la_resmap; | 809 | struct ocfs2_reservation_map *resmap = &osb->osb_la_resmap; |
822 | 810 | ||
823 | mlog_entry("(numbits wanted = %u)\n", *numbits); | ||
824 | |||
825 | if (!alloc->id1.bitmap1.i_total) { | 811 | if (!alloc->id1.bitmap1.i_total) { |
826 | mlog(0, "No bits in my window!\n"); | ||
827 | bitoff = -1; | 812 | bitoff = -1; |
828 | goto bail; | 813 | goto bail; |
829 | } | 814 | } |
@@ -883,8 +868,7 @@ static int ocfs2_local_alloc_find_clear_bits(struct ocfs2_super *osb, | |||
883 | } | 868 | } |
884 | } | 869 | } |
885 | 870 | ||
886 | mlog(0, "Exiting loop, bitoff = %d, numfound = %d\n", bitoff, | 871 | trace_ocfs2_local_alloc_find_clear_bits_search_bitmap(bitoff, numfound); |
887 | numfound); | ||
888 | 872 | ||
889 | if (numfound == *numbits) | 873 | if (numfound == *numbits) |
890 | bitoff = startoff - numfound; | 874 | bitoff = startoff - numfound; |
@@ -895,7 +879,10 @@ bail: | |||
895 | if (local_resv) | 879 | if (local_resv) |
896 | ocfs2_resv_discard(resmap, resv); | 880 | ocfs2_resv_discard(resmap, resv); |
897 | 881 | ||
898 | mlog_exit(bitoff); | 882 | trace_ocfs2_local_alloc_find_clear_bits(*numbits, |
883 | le32_to_cpu(alloc->id1.bitmap1.i_total), | ||
884 | bitoff, numfound); | ||
885 | |||
899 | return bitoff; | 886 | return bitoff; |
900 | } | 887 | } |
901 | 888 | ||
@@ -903,15 +890,12 @@ static void ocfs2_clear_local_alloc(struct ocfs2_dinode *alloc) | |||
903 | { | 890 | { |
904 | struct ocfs2_local_alloc *la = OCFS2_LOCAL_ALLOC(alloc); | 891 | struct ocfs2_local_alloc *la = OCFS2_LOCAL_ALLOC(alloc); |
905 | int i; | 892 | int i; |
906 | mlog_entry_void(); | ||
907 | 893 | ||
908 | alloc->id1.bitmap1.i_total = 0; | 894 | alloc->id1.bitmap1.i_total = 0; |
909 | alloc->id1.bitmap1.i_used = 0; | 895 | alloc->id1.bitmap1.i_used = 0; |
910 | la->la_bm_off = 0; | 896 | la->la_bm_off = 0; |
911 | for(i = 0; i < le16_to_cpu(la->la_size); i++) | 897 | for(i = 0; i < le16_to_cpu(la->la_size); i++) |
912 | la->la_bitmap[i] = 0; | 898 | la->la_bitmap[i] = 0; |
913 | |||
914 | mlog_exit_void(); | ||
915 | } | 899 | } |
916 | 900 | ||
917 | #if 0 | 901 | #if 0 |
@@ -952,18 +936,16 @@ static int ocfs2_sync_local_to_main(struct ocfs2_super *osb, | |||
952 | void *bitmap; | 936 | void *bitmap; |
953 | struct ocfs2_local_alloc *la = OCFS2_LOCAL_ALLOC(alloc); | 937 | struct ocfs2_local_alloc *la = OCFS2_LOCAL_ALLOC(alloc); |
954 | 938 | ||
955 | mlog_entry("total = %u, used = %u\n", | 939 | trace_ocfs2_sync_local_to_main( |
956 | le32_to_cpu(alloc->id1.bitmap1.i_total), | 940 | le32_to_cpu(alloc->id1.bitmap1.i_total), |
957 | le32_to_cpu(alloc->id1.bitmap1.i_used)); | 941 | le32_to_cpu(alloc->id1.bitmap1.i_used)); |
958 | 942 | ||
959 | if (!alloc->id1.bitmap1.i_total) { | 943 | if (!alloc->id1.bitmap1.i_total) { |
960 | mlog(0, "nothing to sync!\n"); | ||
961 | goto bail; | 944 | goto bail; |
962 | } | 945 | } |
963 | 946 | ||
964 | if (le32_to_cpu(alloc->id1.bitmap1.i_used) == | 947 | if (le32_to_cpu(alloc->id1.bitmap1.i_used) == |
965 | le32_to_cpu(alloc->id1.bitmap1.i_total)) { | 948 | le32_to_cpu(alloc->id1.bitmap1.i_total)) { |
966 | mlog(0, "all bits were taken!\n"); | ||
967 | goto bail; | 949 | goto bail; |
968 | } | 950 | } |
969 | 951 | ||
@@ -985,8 +967,7 @@ static int ocfs2_sync_local_to_main(struct ocfs2_super *osb, | |||
985 | ocfs2_clusters_to_blocks(osb->sb, | 967 | ocfs2_clusters_to_blocks(osb->sb, |
986 | start - count); | 968 | start - count); |
987 | 969 | ||
988 | mlog(0, "freeing %u bits starting at local alloc bit " | 970 | trace_ocfs2_sync_local_to_main_free( |
989 | "%u (la_start_blk = %llu, blkno = %llu)\n", | ||
990 | count, start - count, | 971 | count, start - count, |
991 | (unsigned long long)la_start_blk, | 972 | (unsigned long long)la_start_blk, |
992 | (unsigned long long)blkno); | 973 | (unsigned long long)blkno); |
@@ -1007,7 +988,8 @@ static int ocfs2_sync_local_to_main(struct ocfs2_super *osb, | |||
1007 | } | 988 | } |
1008 | 989 | ||
1009 | bail: | 990 | bail: |
1010 | mlog_exit(status); | 991 | if (status) |
992 | mlog_errno(status); | ||
1011 | return status; | 993 | return status; |
1012 | } | 994 | } |
1013 | 995 | ||
@@ -1132,7 +1114,8 @@ bail: | |||
1132 | *ac = NULL; | 1114 | *ac = NULL; |
1133 | } | 1115 | } |
1134 | 1116 | ||
1135 | mlog_exit(status); | 1117 | if (status) |
1118 | mlog_errno(status); | ||
1136 | return status; | 1119 | return status; |
1137 | } | 1120 | } |
1138 | 1121 | ||
@@ -1148,17 +1131,12 @@ static int ocfs2_local_alloc_new_window(struct ocfs2_super *osb, | |||
1148 | struct ocfs2_dinode *alloc = NULL; | 1131 | struct ocfs2_dinode *alloc = NULL; |
1149 | struct ocfs2_local_alloc *la; | 1132 | struct ocfs2_local_alloc *la; |
1150 | 1133 | ||
1151 | mlog_entry_void(); | ||
1152 | |||
1153 | alloc = (struct ocfs2_dinode *) osb->local_alloc_bh->b_data; | 1134 | alloc = (struct ocfs2_dinode *) osb->local_alloc_bh->b_data; |
1154 | la = OCFS2_LOCAL_ALLOC(alloc); | 1135 | la = OCFS2_LOCAL_ALLOC(alloc); |
1155 | 1136 | ||
1156 | if (alloc->id1.bitmap1.i_total) | 1137 | trace_ocfs2_local_alloc_new_window( |
1157 | mlog(0, "asking me to alloc a new window over a non-empty " | 1138 | le32_to_cpu(alloc->id1.bitmap1.i_total), |
1158 | "one\n"); | 1139 | osb->local_alloc_bits); |
1159 | |||
1160 | mlog(0, "Allocating %u clusters for a new window.\n", | ||
1161 | osb->local_alloc_bits); | ||
1162 | 1140 | ||
1163 | /* Instruct the allocation code to try the most recently used | 1141 | /* Instruct the allocation code to try the most recently used |
1164 | * cluster group. We'll re-record the group used this pass | 1142 | * cluster group. We'll re-record the group used this pass |
@@ -1220,13 +1198,13 @@ retry_enospc: | |||
1220 | ocfs2_resmap_restart(&osb->osb_la_resmap, cluster_count, | 1198 | ocfs2_resmap_restart(&osb->osb_la_resmap, cluster_count, |
1221 | OCFS2_LOCAL_ALLOC(alloc)->la_bitmap); | 1199 | OCFS2_LOCAL_ALLOC(alloc)->la_bitmap); |
1222 | 1200 | ||
1223 | mlog(0, "New window allocated:\n"); | 1201 | trace_ocfs2_local_alloc_new_window_result( |
1224 | mlog(0, "window la_bm_off = %u\n", | 1202 | OCFS2_LOCAL_ALLOC(alloc)->la_bm_off, |
1225 | OCFS2_LOCAL_ALLOC(alloc)->la_bm_off); | 1203 | le32_to_cpu(alloc->id1.bitmap1.i_total)); |
1226 | mlog(0, "window bits = %u\n", le32_to_cpu(alloc->id1.bitmap1.i_total)); | ||
1227 | 1204 | ||
1228 | bail: | 1205 | bail: |
1229 | mlog_exit(status); | 1206 | if (status) |
1207 | mlog_errno(status); | ||
1230 | return status; | 1208 | return status; |
1231 | } | 1209 | } |
1232 | 1210 | ||
@@ -1243,8 +1221,6 @@ static int ocfs2_local_alloc_slide_window(struct ocfs2_super *osb, | |||
1243 | struct ocfs2_dinode *alloc_copy = NULL; | 1221 | struct ocfs2_dinode *alloc_copy = NULL; |
1244 | struct ocfs2_alloc_context *ac = NULL; | 1222 | struct ocfs2_alloc_context *ac = NULL; |
1245 | 1223 | ||
1246 | mlog_entry_void(); | ||
1247 | |||
1248 | ocfs2_recalc_la_window(osb, OCFS2_LA_EVENT_SLIDE); | 1224 | ocfs2_recalc_la_window(osb, OCFS2_LA_EVENT_SLIDE); |
1249 | 1225 | ||
1250 | /* This will lock the main bitmap for us. */ | 1226 | /* This will lock the main bitmap for us. */ |
@@ -1324,7 +1300,8 @@ bail: | |||
1324 | if (ac) | 1300 | if (ac) |
1325 | ocfs2_free_alloc_context(ac); | 1301 | ocfs2_free_alloc_context(ac); |
1326 | 1302 | ||
1327 | mlog_exit(status); | 1303 | if (status) |
1304 | mlog_errno(status); | ||
1328 | return status; | 1305 | return status; |
1329 | } | 1306 | } |
1330 | 1307 | ||
diff --git a/fs/ocfs2/locks.c b/fs/ocfs2/locks.c index b5cb3ede9408..e57c804069ea 100644 --- a/fs/ocfs2/locks.c +++ b/fs/ocfs2/locks.c | |||
@@ -26,7 +26,6 @@ | |||
26 | #include <linux/fs.h> | 26 | #include <linux/fs.h> |
27 | #include <linux/fcntl.h> | 27 | #include <linux/fcntl.h> |
28 | 28 | ||
29 | #define MLOG_MASK_PREFIX ML_INODE | ||
30 | #include <cluster/masklog.h> | 29 | #include <cluster/masklog.h> |
31 | 30 | ||
32 | #include "ocfs2.h" | 31 | #include "ocfs2.h" |
diff --git a/fs/ocfs2/mmap.c b/fs/ocfs2/mmap.c index 7e32db9c2c99..3e9393ca39eb 100644 --- a/fs/ocfs2/mmap.c +++ b/fs/ocfs2/mmap.c | |||
@@ -31,7 +31,6 @@ | |||
31 | #include <linux/signal.h> | 31 | #include <linux/signal.h> |
32 | #include <linux/rbtree.h> | 32 | #include <linux/rbtree.h> |
33 | 33 | ||
34 | #define MLOG_MASK_PREFIX ML_FILE_IO | ||
35 | #include <cluster/masklog.h> | 34 | #include <cluster/masklog.h> |
36 | 35 | ||
37 | #include "ocfs2.h" | 36 | #include "ocfs2.h" |
@@ -42,6 +41,7 @@ | |||
42 | #include "inode.h" | 41 | #include "inode.h" |
43 | #include "mmap.h" | 42 | #include "mmap.h" |
44 | #include "super.h" | 43 | #include "super.h" |
44 | #include "ocfs2_trace.h" | ||
45 | 45 | ||
46 | 46 | ||
47 | static int ocfs2_fault(struct vm_area_struct *area, struct vm_fault *vmf) | 47 | static int ocfs2_fault(struct vm_area_struct *area, struct vm_fault *vmf) |
@@ -49,13 +49,12 @@ static int ocfs2_fault(struct vm_area_struct *area, struct vm_fault *vmf) | |||
49 | sigset_t oldset; | 49 | sigset_t oldset; |
50 | int ret; | 50 | int ret; |
51 | 51 | ||
52 | mlog_entry("(area=%p, page offset=%lu)\n", area, vmf->pgoff); | ||
53 | |||
54 | ocfs2_block_signals(&oldset); | 52 | ocfs2_block_signals(&oldset); |
55 | ret = filemap_fault(area, vmf); | 53 | ret = filemap_fault(area, vmf); |
56 | ocfs2_unblock_signals(&oldset); | 54 | ocfs2_unblock_signals(&oldset); |
57 | 55 | ||
58 | mlog_exit_ptr(vmf->page); | 56 | trace_ocfs2_fault(OCFS2_I(area->vm_file->f_mapping->host)->ip_blkno, |
57 | area, vmf->page, vmf->pgoff); | ||
59 | return ret; | 58 | return ret; |
60 | } | 59 | } |
61 | 60 | ||
diff --git a/fs/ocfs2/namei.c b/fs/ocfs2/namei.c index d6c25d76b537..28f2cc1080d8 100644 --- a/fs/ocfs2/namei.c +++ b/fs/ocfs2/namei.c | |||
@@ -42,7 +42,6 @@ | |||
42 | #include <linux/highmem.h> | 42 | #include <linux/highmem.h> |
43 | #include <linux/quotaops.h> | 43 | #include <linux/quotaops.h> |
44 | 44 | ||
45 | #define MLOG_MASK_PREFIX ML_NAMEI | ||
46 | #include <cluster/masklog.h> | 45 | #include <cluster/masklog.h> |
47 | 46 | ||
48 | #include "ocfs2.h" | 47 | #include "ocfs2.h" |
@@ -63,6 +62,7 @@ | |||
63 | #include "uptodate.h" | 62 | #include "uptodate.h" |
64 | #include "xattr.h" | 63 | #include "xattr.h" |
65 | #include "acl.h" | 64 | #include "acl.h" |
65 | #include "ocfs2_trace.h" | ||
66 | 66 | ||
67 | #include "buffer_head_io.h" | 67 | #include "buffer_head_io.h" |
68 | 68 | ||
@@ -106,17 +106,15 @@ static struct dentry *ocfs2_lookup(struct inode *dir, struct dentry *dentry, | |||
106 | struct dentry *ret; | 106 | struct dentry *ret; |
107 | struct ocfs2_inode_info *oi; | 107 | struct ocfs2_inode_info *oi; |
108 | 108 | ||
109 | mlog_entry("(0x%p, 0x%p, '%.*s')\n", dir, dentry, | 109 | trace_ocfs2_lookup(dir, dentry, dentry->d_name.len, |
110 | dentry->d_name.len, dentry->d_name.name); | 110 | dentry->d_name.name, |
111 | (unsigned long long)OCFS2_I(dir)->ip_blkno, 0); | ||
111 | 112 | ||
112 | if (dentry->d_name.len > OCFS2_MAX_FILENAME_LEN) { | 113 | if (dentry->d_name.len > OCFS2_MAX_FILENAME_LEN) { |
113 | ret = ERR_PTR(-ENAMETOOLONG); | 114 | ret = ERR_PTR(-ENAMETOOLONG); |
114 | goto bail; | 115 | goto bail; |
115 | } | 116 | } |
116 | 117 | ||
117 | mlog(0, "find name %.*s in directory %llu\n", dentry->d_name.len, | ||
118 | dentry->d_name.name, (unsigned long long)OCFS2_I(dir)->ip_blkno); | ||
119 | |||
120 | status = ocfs2_inode_lock_nested(dir, NULL, 0, OI_LS_PARENT); | 118 | status = ocfs2_inode_lock_nested(dir, NULL, 0, OI_LS_PARENT); |
121 | if (status < 0) { | 119 | if (status < 0) { |
122 | if (status != -ENOENT) | 120 | if (status != -ENOENT) |
@@ -182,7 +180,7 @@ bail_unlock: | |||
182 | 180 | ||
183 | bail: | 181 | bail: |
184 | 182 | ||
185 | mlog_exit_ptr(ret); | 183 | trace_ocfs2_lookup_ret(ret); |
186 | 184 | ||
187 | return ret; | 185 | return ret; |
188 | } | 186 | } |
@@ -235,9 +233,9 @@ static int ocfs2_mknod(struct inode *dir, | |||
235 | sigset_t oldset; | 233 | sigset_t oldset; |
236 | int did_block_signals = 0; | 234 | int did_block_signals = 0; |
237 | 235 | ||
238 | mlog_entry("(0x%p, 0x%p, %d, %lu, '%.*s')\n", dir, dentry, mode, | 236 | trace_ocfs2_mknod(dir, dentry, dentry->d_name.len, dentry->d_name.name, |
239 | (unsigned long)dev, dentry->d_name.len, | 237 | (unsigned long long)OCFS2_I(dir)->ip_blkno, |
240 | dentry->d_name.name); | 238 | (unsigned long)dev, mode); |
241 | 239 | ||
242 | dquot_initialize(dir); | 240 | dquot_initialize(dir); |
243 | 241 | ||
@@ -354,10 +352,6 @@ static int ocfs2_mknod(struct inode *dir, | |||
354 | goto leave; | 352 | goto leave; |
355 | did_quota_inode = 1; | 353 | did_quota_inode = 1; |
356 | 354 | ||
357 | mlog_entry("(0x%p, 0x%p, %d, %lu, '%.*s')\n", dir, dentry, | ||
358 | inode->i_mode, (unsigned long)dev, dentry->d_name.len, | ||
359 | dentry->d_name.name); | ||
360 | |||
361 | /* do the real work now. */ | 355 | /* do the real work now. */ |
362 | status = ocfs2_mknod_locked(osb, dir, inode, dev, | 356 | status = ocfs2_mknod_locked(osb, dir, inode, dev, |
363 | &new_fe_bh, parent_fe_bh, handle, | 357 | &new_fe_bh, parent_fe_bh, handle, |
@@ -436,9 +430,6 @@ leave: | |||
436 | if (did_block_signals) | 430 | if (did_block_signals) |
437 | ocfs2_unblock_signals(&oldset); | 431 | ocfs2_unblock_signals(&oldset); |
438 | 432 | ||
439 | if (status == -ENOSPC) | ||
440 | mlog(0, "Disk is full\n"); | ||
441 | |||
442 | brelse(new_fe_bh); | 433 | brelse(new_fe_bh); |
443 | brelse(parent_fe_bh); | 434 | brelse(parent_fe_bh); |
444 | kfree(si.name); | 435 | kfree(si.name); |
@@ -466,7 +457,8 @@ leave: | |||
466 | iput(inode); | 457 | iput(inode); |
467 | } | 458 | } |
468 | 459 | ||
469 | mlog_exit(status); | 460 | if (status) |
461 | mlog_errno(status); | ||
470 | 462 | ||
471 | return status; | 463 | return status; |
472 | } | 464 | } |
@@ -577,7 +569,8 @@ leave: | |||
577 | } | 569 | } |
578 | } | 570 | } |
579 | 571 | ||
580 | mlog_exit(status); | 572 | if (status) |
573 | mlog_errno(status); | ||
581 | return status; | 574 | return status; |
582 | } | 575 | } |
583 | 576 | ||
@@ -615,10 +608,11 @@ static int ocfs2_mkdir(struct inode *dir, | |||
615 | { | 608 | { |
616 | int ret; | 609 | int ret; |
617 | 610 | ||
618 | mlog_entry("(0x%p, 0x%p, %d, '%.*s')\n", dir, dentry, mode, | 611 | trace_ocfs2_mkdir(dir, dentry, dentry->d_name.len, dentry->d_name.name, |
619 | dentry->d_name.len, dentry->d_name.name); | 612 | OCFS2_I(dir)->ip_blkno, mode); |
620 | ret = ocfs2_mknod(dir, dentry, mode | S_IFDIR, 0); | 613 | ret = ocfs2_mknod(dir, dentry, mode | S_IFDIR, 0); |
621 | mlog_exit(ret); | 614 | if (ret) |
615 | mlog_errno(ret); | ||
622 | 616 | ||
623 | return ret; | 617 | return ret; |
624 | } | 618 | } |
@@ -630,10 +624,11 @@ static int ocfs2_create(struct inode *dir, | |||
630 | { | 624 | { |
631 | int ret; | 625 | int ret; |
632 | 626 | ||
633 | mlog_entry("(0x%p, 0x%p, %d, '%.*s')\n", dir, dentry, mode, | 627 | trace_ocfs2_create(dir, dentry, dentry->d_name.len, dentry->d_name.name, |
634 | dentry->d_name.len, dentry->d_name.name); | 628 | (unsigned long long)OCFS2_I(dir)->ip_blkno, mode); |
635 | ret = ocfs2_mknod(dir, dentry, mode | S_IFREG, 0); | 629 | ret = ocfs2_mknod(dir, dentry, mode | S_IFREG, 0); |
636 | mlog_exit(ret); | 630 | if (ret) |
631 | mlog_errno(ret); | ||
637 | 632 | ||
638 | return ret; | 633 | return ret; |
639 | } | 634 | } |
@@ -652,9 +647,9 @@ static int ocfs2_link(struct dentry *old_dentry, | |||
652 | struct ocfs2_dir_lookup_result lookup = { NULL, }; | 647 | struct ocfs2_dir_lookup_result lookup = { NULL, }; |
653 | sigset_t oldset; | 648 | sigset_t oldset; |
654 | 649 | ||
655 | mlog_entry("(inode=%lu, old='%.*s' new='%.*s')\n", inode->i_ino, | 650 | trace_ocfs2_link((unsigned long long)OCFS2_I(inode)->ip_blkno, |
656 | old_dentry->d_name.len, old_dentry->d_name.name, | 651 | old_dentry->d_name.len, old_dentry->d_name.name, |
657 | dentry->d_name.len, dentry->d_name.name); | 652 | dentry->d_name.len, dentry->d_name.name); |
658 | 653 | ||
659 | if (S_ISDIR(inode->i_mode)) | 654 | if (S_ISDIR(inode->i_mode)) |
660 | return -EPERM; | 655 | return -EPERM; |
@@ -757,7 +752,8 @@ out: | |||
757 | 752 | ||
758 | ocfs2_free_dir_lookup_result(&lookup); | 753 | ocfs2_free_dir_lookup_result(&lookup); |
759 | 754 | ||
760 | mlog_exit(err); | 755 | if (err) |
756 | mlog_errno(err); | ||
761 | 757 | ||
762 | return err; | 758 | return err; |
763 | } | 759 | } |
@@ -809,19 +805,17 @@ static int ocfs2_unlink(struct inode *dir, | |||
809 | struct ocfs2_dir_lookup_result lookup = { NULL, }; | 805 | struct ocfs2_dir_lookup_result lookup = { NULL, }; |
810 | struct ocfs2_dir_lookup_result orphan_insert = { NULL, }; | 806 | struct ocfs2_dir_lookup_result orphan_insert = { NULL, }; |
811 | 807 | ||
812 | mlog_entry("(0x%p, 0x%p, '%.*s')\n", dir, dentry, | 808 | trace_ocfs2_unlink(dir, dentry, dentry->d_name.len, |
813 | dentry->d_name.len, dentry->d_name.name); | 809 | dentry->d_name.name, |
810 | (unsigned long long)OCFS2_I(dir)->ip_blkno, | ||
811 | (unsigned long long)OCFS2_I(inode)->ip_blkno); | ||
814 | 812 | ||
815 | dquot_initialize(dir); | 813 | dquot_initialize(dir); |
816 | 814 | ||
817 | BUG_ON(dentry->d_parent->d_inode != dir); | 815 | BUG_ON(dentry->d_parent->d_inode != dir); |
818 | 816 | ||
819 | mlog(0, "ino = %llu\n", (unsigned long long)OCFS2_I(inode)->ip_blkno); | 817 | if (inode == osb->root_inode) |
820 | |||
821 | if (inode == osb->root_inode) { | ||
822 | mlog(0, "Cannot delete the root directory\n"); | ||
823 | return -EPERM; | 818 | return -EPERM; |
824 | } | ||
825 | 819 | ||
826 | status = ocfs2_inode_lock_nested(dir, &parent_node_bh, 1, | 820 | status = ocfs2_inode_lock_nested(dir, &parent_node_bh, 1, |
827 | OI_LS_PARENT); | 821 | OI_LS_PARENT); |
@@ -843,9 +837,10 @@ static int ocfs2_unlink(struct inode *dir, | |||
843 | if (OCFS2_I(inode)->ip_blkno != blkno) { | 837 | if (OCFS2_I(inode)->ip_blkno != blkno) { |
844 | status = -ENOENT; | 838 | status = -ENOENT; |
845 | 839 | ||
846 | mlog(0, "ip_blkno %llu != dirent blkno %llu ip_flags = %x\n", | 840 | trace_ocfs2_unlink_noent( |
847 | (unsigned long long)OCFS2_I(inode)->ip_blkno, | 841 | (unsigned long long)OCFS2_I(inode)->ip_blkno, |
848 | (unsigned long long)blkno, OCFS2_I(inode)->ip_flags); | 842 | (unsigned long long)blkno, |
843 | OCFS2_I(inode)->ip_flags); | ||
849 | goto leave; | 844 | goto leave; |
850 | } | 845 | } |
851 | 846 | ||
@@ -954,7 +949,8 @@ leave: | |||
954 | ocfs2_free_dir_lookup_result(&orphan_insert); | 949 | ocfs2_free_dir_lookup_result(&orphan_insert); |
955 | ocfs2_free_dir_lookup_result(&lookup); | 950 | ocfs2_free_dir_lookup_result(&lookup); |
956 | 951 | ||
957 | mlog_exit(status); | 952 | if (status) |
953 | mlog_errno(status); | ||
958 | 954 | ||
959 | return status; | 955 | return status; |
960 | } | 956 | } |
@@ -975,9 +971,8 @@ static int ocfs2_double_lock(struct ocfs2_super *osb, | |||
975 | struct buffer_head **tmpbh; | 971 | struct buffer_head **tmpbh; |
976 | struct inode *tmpinode; | 972 | struct inode *tmpinode; |
977 | 973 | ||
978 | mlog_entry("(inode1 = %llu, inode2 = %llu)\n", | 974 | trace_ocfs2_double_lock((unsigned long long)oi1->ip_blkno, |
979 | (unsigned long long)oi1->ip_blkno, | 975 | (unsigned long long)oi2->ip_blkno); |
980 | (unsigned long long)oi2->ip_blkno); | ||
981 | 976 | ||
982 | if (*bh1) | 977 | if (*bh1) |
983 | *bh1 = NULL; | 978 | *bh1 = NULL; |
@@ -988,7 +983,6 @@ static int ocfs2_double_lock(struct ocfs2_super *osb, | |||
988 | if (oi1->ip_blkno != oi2->ip_blkno) { | 983 | if (oi1->ip_blkno != oi2->ip_blkno) { |
989 | if (oi1->ip_blkno < oi2->ip_blkno) { | 984 | if (oi1->ip_blkno < oi2->ip_blkno) { |
990 | /* switch id1 and id2 around */ | 985 | /* switch id1 and id2 around */ |
991 | mlog(0, "switching them around...\n"); | ||
992 | tmpbh = bh2; | 986 | tmpbh = bh2; |
993 | bh2 = bh1; | 987 | bh2 = bh1; |
994 | bh1 = tmpbh; | 988 | bh1 = tmpbh; |
@@ -1024,8 +1018,13 @@ static int ocfs2_double_lock(struct ocfs2_super *osb, | |||
1024 | mlog_errno(status); | 1018 | mlog_errno(status); |
1025 | } | 1019 | } |
1026 | 1020 | ||
1021 | trace_ocfs2_double_lock_end( | ||
1022 | (unsigned long long)OCFS2_I(inode1)->ip_blkno, | ||
1023 | (unsigned long long)OCFS2_I(inode2)->ip_blkno); | ||
1024 | |||
1027 | bail: | 1025 | bail: |
1028 | mlog_exit(status); | 1026 | if (status) |
1027 | mlog_errno(status); | ||
1029 | return status; | 1028 | return status; |
1030 | } | 1029 | } |
1031 | 1030 | ||
@@ -1067,10 +1066,9 @@ static int ocfs2_rename(struct inode *old_dir, | |||
1067 | /* At some point it might be nice to break this function up a | 1066 | /* At some point it might be nice to break this function up a |
1068 | * bit. */ | 1067 | * bit. */ |
1069 | 1068 | ||
1070 | mlog_entry("(0x%p, 0x%p, 0x%p, 0x%p, from='%.*s' to='%.*s')\n", | 1069 | trace_ocfs2_rename(old_dir, old_dentry, new_dir, new_dentry, |
1071 | old_dir, old_dentry, new_dir, new_dentry, | 1070 | old_dentry->d_name.len, old_dentry->d_name.name, |
1072 | old_dentry->d_name.len, old_dentry->d_name.name, | 1071 | new_dentry->d_name.len, new_dentry->d_name.name); |
1073 | new_dentry->d_name.len, new_dentry->d_name.name); | ||
1074 | 1072 | ||
1075 | dquot_initialize(old_dir); | 1073 | dquot_initialize(old_dir); |
1076 | dquot_initialize(new_dir); | 1074 | dquot_initialize(new_dir); |
@@ -1227,16 +1225,15 @@ static int ocfs2_rename(struct inode *old_dir, | |||
1227 | if (!new_inode) { | 1225 | if (!new_inode) { |
1228 | status = -EACCES; | 1226 | status = -EACCES; |
1229 | 1227 | ||
1230 | mlog(0, "We found an inode for name %.*s but VFS " | 1228 | trace_ocfs2_rename_target_exists(new_dentry->d_name.len, |
1231 | "didn't give us one.\n", new_dentry->d_name.len, | 1229 | new_dentry->d_name.name); |
1232 | new_dentry->d_name.name); | ||
1233 | goto bail; | 1230 | goto bail; |
1234 | } | 1231 | } |
1235 | 1232 | ||
1236 | if (OCFS2_I(new_inode)->ip_blkno != newfe_blkno) { | 1233 | if (OCFS2_I(new_inode)->ip_blkno != newfe_blkno) { |
1237 | status = -EACCES; | 1234 | status = -EACCES; |
1238 | 1235 | ||
1239 | mlog(0, "Inode %llu and dir %llu disagree. flags = %x\n", | 1236 | trace_ocfs2_rename_disagree( |
1240 | (unsigned long long)OCFS2_I(new_inode)->ip_blkno, | 1237 | (unsigned long long)OCFS2_I(new_inode)->ip_blkno, |
1241 | (unsigned long long)newfe_blkno, | 1238 | (unsigned long long)newfe_blkno, |
1242 | OCFS2_I(new_inode)->ip_flags); | 1239 | OCFS2_I(new_inode)->ip_flags); |
@@ -1259,8 +1256,7 @@ static int ocfs2_rename(struct inode *old_dir, | |||
1259 | 1256 | ||
1260 | newfe = (struct ocfs2_dinode *) newfe_bh->b_data; | 1257 | newfe = (struct ocfs2_dinode *) newfe_bh->b_data; |
1261 | 1258 | ||
1262 | mlog(0, "aha rename over existing... new_blkno=%llu " | 1259 | trace_ocfs2_rename_over_existing( |
1263 | "newfebh=%p bhblocknr=%llu\n", | ||
1264 | (unsigned long long)newfe_blkno, newfe_bh, newfe_bh ? | 1260 | (unsigned long long)newfe_blkno, newfe_bh, newfe_bh ? |
1265 | (unsigned long long)newfe_bh->b_blocknr : 0ULL); | 1261 | (unsigned long long)newfe_bh->b_blocknr : 0ULL); |
1266 | 1262 | ||
@@ -1476,7 +1472,8 @@ bail: | |||
1476 | brelse(old_dir_bh); | 1472 | brelse(old_dir_bh); |
1477 | brelse(new_dir_bh); | 1473 | brelse(new_dir_bh); |
1478 | 1474 | ||
1479 | mlog_exit(status); | 1475 | if (status) |
1476 | mlog_errno(status); | ||
1480 | 1477 | ||
1481 | return status; | 1478 | return status; |
1482 | } | 1479 | } |
@@ -1501,9 +1498,8 @@ static int ocfs2_create_symlink_data(struct ocfs2_super *osb, | |||
1501 | * write i_size + 1 bytes. */ | 1498 | * write i_size + 1 bytes. */ |
1502 | blocks = (bytes_left + sb->s_blocksize - 1) >> sb->s_blocksize_bits; | 1499 | blocks = (bytes_left + sb->s_blocksize - 1) >> sb->s_blocksize_bits; |
1503 | 1500 | ||
1504 | mlog_entry("i_blocks = %llu, i_size = %llu, blocks = %d\n", | 1501 | trace_ocfs2_create_symlink_data((unsigned long long)inode->i_blocks, |
1505 | (unsigned long long)inode->i_blocks, | 1502 | i_size_read(inode), blocks); |
1506 | i_size_read(inode), blocks); | ||
1507 | 1503 | ||
1508 | /* Sanity check -- make sure we're going to fit. */ | 1504 | /* Sanity check -- make sure we're going to fit. */ |
1509 | if (bytes_left > | 1505 | if (bytes_left > |
@@ -1579,7 +1575,8 @@ bail: | |||
1579 | kfree(bhs); | 1575 | kfree(bhs); |
1580 | } | 1576 | } |
1581 | 1577 | ||
1582 | mlog_exit(status); | 1578 | if (status) |
1579 | mlog_errno(status); | ||
1583 | return status; | 1580 | return status; |
1584 | } | 1581 | } |
1585 | 1582 | ||
@@ -1610,8 +1607,8 @@ static int ocfs2_symlink(struct inode *dir, | |||
1610 | sigset_t oldset; | 1607 | sigset_t oldset; |
1611 | int did_block_signals = 0; | 1608 | int did_block_signals = 0; |
1612 | 1609 | ||
1613 | mlog_entry("(0x%p, 0x%p, symname='%s' actual='%.*s')\n", dir, | 1610 | trace_ocfs2_symlink_begin(dir, dentry, symname, |
1614 | dentry, symname, dentry->d_name.len, dentry->d_name.name); | 1611 | dentry->d_name.len, dentry->d_name.name); |
1615 | 1612 | ||
1616 | dquot_initialize(dir); | 1613 | dquot_initialize(dir); |
1617 | 1614 | ||
@@ -1713,9 +1710,10 @@ static int ocfs2_symlink(struct inode *dir, | |||
1713 | goto bail; | 1710 | goto bail; |
1714 | did_quota_inode = 1; | 1711 | did_quota_inode = 1; |
1715 | 1712 | ||
1716 | mlog_entry("(0x%p, 0x%p, %d, '%.*s')\n", dir, dentry, | 1713 | trace_ocfs2_symlink_create(dir, dentry, dentry->d_name.len, |
1717 | inode->i_mode, dentry->d_name.len, | 1714 | dentry->d_name.name, |
1718 | dentry->d_name.name); | 1715 | (unsigned long long)OCFS2_I(dir)->ip_blkno, |
1716 | inode->i_mode); | ||
1719 | 1717 | ||
1720 | status = ocfs2_mknod_locked(osb, dir, inode, | 1718 | status = ocfs2_mknod_locked(osb, dir, inode, |
1721 | 0, &new_fe_bh, parent_fe_bh, handle, | 1719 | 0, &new_fe_bh, parent_fe_bh, handle, |
@@ -1835,7 +1833,8 @@ bail: | |||
1835 | iput(inode); | 1833 | iput(inode); |
1836 | } | 1834 | } |
1837 | 1835 | ||
1838 | mlog_exit(status); | 1836 | if (status) |
1837 | mlog_errno(status); | ||
1839 | 1838 | ||
1840 | return status; | 1839 | return status; |
1841 | } | 1840 | } |
@@ -1844,8 +1843,6 @@ static int ocfs2_blkno_stringify(u64 blkno, char *name) | |||
1844 | { | 1843 | { |
1845 | int status, namelen; | 1844 | int status, namelen; |
1846 | 1845 | ||
1847 | mlog_entry_void(); | ||
1848 | |||
1849 | namelen = snprintf(name, OCFS2_ORPHAN_NAMELEN + 1, "%016llx", | 1846 | namelen = snprintf(name, OCFS2_ORPHAN_NAMELEN + 1, "%016llx", |
1850 | (long long)blkno); | 1847 | (long long)blkno); |
1851 | if (namelen <= 0) { | 1848 | if (namelen <= 0) { |
@@ -1862,12 +1859,12 @@ static int ocfs2_blkno_stringify(u64 blkno, char *name) | |||
1862 | goto bail; | 1859 | goto bail; |
1863 | } | 1860 | } |
1864 | 1861 | ||
1865 | mlog(0, "built filename '%s' for orphan dir (len=%d)\n", name, | 1862 | trace_ocfs2_blkno_stringify(blkno, name, namelen); |
1866 | namelen); | ||
1867 | 1863 | ||
1868 | status = 0; | 1864 | status = 0; |
1869 | bail: | 1865 | bail: |
1870 | mlog_exit(status); | 1866 | if (status < 0) |
1867 | mlog_errno(status); | ||
1871 | return status; | 1868 | return status; |
1872 | } | 1869 | } |
1873 | 1870 | ||
@@ -1980,7 +1977,8 @@ out: | |||
1980 | iput(orphan_dir_inode); | 1977 | iput(orphan_dir_inode); |
1981 | } | 1978 | } |
1982 | 1979 | ||
1983 | mlog_exit(ret); | 1980 | if (ret) |
1981 | mlog_errno(ret); | ||
1984 | return ret; | 1982 | return ret; |
1985 | } | 1983 | } |
1986 | 1984 | ||
@@ -1997,7 +1995,8 @@ static int ocfs2_orphan_add(struct ocfs2_super *osb, | |||
1997 | struct ocfs2_dinode *orphan_fe; | 1995 | struct ocfs2_dinode *orphan_fe; |
1998 | struct ocfs2_dinode *fe = (struct ocfs2_dinode *) fe_bh->b_data; | 1996 | struct ocfs2_dinode *fe = (struct ocfs2_dinode *) fe_bh->b_data; |
1999 | 1997 | ||
2000 | mlog_entry("(inode->i_ino = %lu)\n", inode->i_ino); | 1998 | trace_ocfs2_orphan_add_begin( |
1999 | (unsigned long long)OCFS2_I(inode)->ip_blkno); | ||
2001 | 2000 | ||
2002 | status = ocfs2_read_inode_block(orphan_dir_inode, &orphan_dir_bh); | 2001 | status = ocfs2_read_inode_block(orphan_dir_inode, &orphan_dir_bh); |
2003 | if (status < 0) { | 2002 | if (status < 0) { |
@@ -2056,13 +2055,14 @@ static int ocfs2_orphan_add(struct ocfs2_super *osb, | |||
2056 | 2055 | ||
2057 | ocfs2_journal_dirty(handle, fe_bh); | 2056 | ocfs2_journal_dirty(handle, fe_bh); |
2058 | 2057 | ||
2059 | mlog(0, "Inode %llu orphaned in slot %d\n", | 2058 | trace_ocfs2_orphan_add_end((unsigned long long)OCFS2_I(inode)->ip_blkno, |
2060 | (unsigned long long)OCFS2_I(inode)->ip_blkno, osb->slot_num); | 2059 | osb->slot_num); |
2061 | 2060 | ||
2062 | leave: | 2061 | leave: |
2063 | brelse(orphan_dir_bh); | 2062 | brelse(orphan_dir_bh); |
2064 | 2063 | ||
2065 | mlog_exit(status); | 2064 | if (status) |
2065 | mlog_errno(status); | ||
2066 | return status; | 2066 | return status; |
2067 | } | 2067 | } |
2068 | 2068 | ||
@@ -2078,17 +2078,15 @@ int ocfs2_orphan_del(struct ocfs2_super *osb, | |||
2078 | int status = 0; | 2078 | int status = 0; |
2079 | struct ocfs2_dir_lookup_result lookup = { NULL, }; | 2079 | struct ocfs2_dir_lookup_result lookup = { NULL, }; |
2080 | 2080 | ||
2081 | mlog_entry_void(); | ||
2082 | |||
2083 | status = ocfs2_blkno_stringify(OCFS2_I(inode)->ip_blkno, name); | 2081 | status = ocfs2_blkno_stringify(OCFS2_I(inode)->ip_blkno, name); |
2084 | if (status < 0) { | 2082 | if (status < 0) { |
2085 | mlog_errno(status); | 2083 | mlog_errno(status); |
2086 | goto leave; | 2084 | goto leave; |
2087 | } | 2085 | } |
2088 | 2086 | ||
2089 | mlog(0, "removing '%s' from orphan dir %llu (namelen=%d)\n", | 2087 | trace_ocfs2_orphan_del( |
2090 | name, (unsigned long long)OCFS2_I(orphan_dir_inode)->ip_blkno, | 2088 | (unsigned long long)OCFS2_I(orphan_dir_inode)->ip_blkno, |
2091 | OCFS2_ORPHAN_NAMELEN); | 2089 | name, OCFS2_ORPHAN_NAMELEN); |
2092 | 2090 | ||
2093 | /* find it's spot in the orphan directory */ | 2091 | /* find it's spot in the orphan directory */ |
2094 | status = ocfs2_find_entry(name, OCFS2_ORPHAN_NAMELEN, orphan_dir_inode, | 2092 | status = ocfs2_find_entry(name, OCFS2_ORPHAN_NAMELEN, orphan_dir_inode, |
@@ -2124,7 +2122,8 @@ int ocfs2_orphan_del(struct ocfs2_super *osb, | |||
2124 | leave: | 2122 | leave: |
2125 | ocfs2_free_dir_lookup_result(&lookup); | 2123 | ocfs2_free_dir_lookup_result(&lookup); |
2126 | 2124 | ||
2127 | mlog_exit(status); | 2125 | if (status) |
2126 | mlog_errno(status); | ||
2128 | return status; | 2127 | return status; |
2129 | } | 2128 | } |
2130 | 2129 | ||
@@ -2321,9 +2320,6 @@ leave: | |||
2321 | iput(orphan_dir); | 2320 | iput(orphan_dir); |
2322 | } | 2321 | } |
2323 | 2322 | ||
2324 | if (status == -ENOSPC) | ||
2325 | mlog(0, "Disk is full\n"); | ||
2326 | |||
2327 | if ((status < 0) && inode) { | 2323 | if ((status < 0) && inode) { |
2328 | clear_nlink(inode); | 2324 | clear_nlink(inode); |
2329 | iput(inode); | 2325 | iput(inode); |
@@ -2358,8 +2354,10 @@ int ocfs2_mv_orphaned_inode_to_new(struct inode *dir, | |||
2358 | struct buffer_head *di_bh = NULL; | 2354 | struct buffer_head *di_bh = NULL; |
2359 | struct ocfs2_dir_lookup_result lookup = { NULL, }; | 2355 | struct ocfs2_dir_lookup_result lookup = { NULL, }; |
2360 | 2356 | ||
2361 | mlog_entry("(0x%p, 0x%p, %.*s')\n", dir, dentry, | 2357 | trace_ocfs2_mv_orphaned_inode_to_new(dir, dentry, |
2362 | dentry->d_name.len, dentry->d_name.name); | 2358 | dentry->d_name.len, dentry->d_name.name, |
2359 | (unsigned long long)OCFS2_I(dir)->ip_blkno, | ||
2360 | (unsigned long long)OCFS2_I(inode)->ip_blkno); | ||
2363 | 2361 | ||
2364 | status = ocfs2_inode_lock(dir, &parent_di_bh, 1); | 2362 | status = ocfs2_inode_lock(dir, &parent_di_bh, 1); |
2365 | if (status < 0) { | 2363 | if (status < 0) { |
@@ -2476,7 +2474,8 @@ leave: | |||
2476 | 2474 | ||
2477 | ocfs2_free_dir_lookup_result(&lookup); | 2475 | ocfs2_free_dir_lookup_result(&lookup); |
2478 | 2476 | ||
2479 | mlog_exit(status); | 2477 | if (status) |
2478 | mlog_errno(status); | ||
2480 | 2479 | ||
2481 | return status; | 2480 | return status; |
2482 | } | 2481 | } |
diff --git a/fs/ocfs2/ocfs2.h b/fs/ocfs2/ocfs2.h index 1a97ba1ec3fc..409285854f64 100644 --- a/fs/ocfs2/ocfs2.h +++ b/fs/ocfs2/ocfs2.h | |||
@@ -147,6 +147,17 @@ struct ocfs2_lock_res_ops; | |||
147 | 147 | ||
148 | typedef void (*ocfs2_lock_callback)(int status, unsigned long data); | 148 | typedef void (*ocfs2_lock_callback)(int status, unsigned long data); |
149 | 149 | ||
150 | #ifdef CONFIG_OCFS2_FS_STATS | ||
151 | struct ocfs2_lock_stats { | ||
152 | u64 ls_total; /* Total wait in NSEC */ | ||
153 | u32 ls_gets; /* Num acquires */ | ||
154 | u32 ls_fail; /* Num failed acquires */ | ||
155 | |||
156 | /* Storing max wait in usecs saves 24 bytes per inode */ | ||
157 | u32 ls_max; /* Max wait in USEC */ | ||
158 | }; | ||
159 | #endif | ||
160 | |||
150 | struct ocfs2_lock_res { | 161 | struct ocfs2_lock_res { |
151 | void *l_priv; | 162 | void *l_priv; |
152 | struct ocfs2_lock_res_ops *l_ops; | 163 | struct ocfs2_lock_res_ops *l_ops; |
@@ -182,15 +193,9 @@ struct ocfs2_lock_res { | |||
182 | struct list_head l_debug_list; | 193 | struct list_head l_debug_list; |
183 | 194 | ||
184 | #ifdef CONFIG_OCFS2_FS_STATS | 195 | #ifdef CONFIG_OCFS2_FS_STATS |
185 | unsigned long long l_lock_num_prmode; /* PR acquires */ | 196 | struct ocfs2_lock_stats l_lock_prmode; /* PR mode stats */ |
186 | unsigned long long l_lock_num_exmode; /* EX acquires */ | 197 | u32 l_lock_refresh; /* Disk refreshes */ |
187 | unsigned int l_lock_num_prmode_failed; /* Failed PR gets */ | 198 | struct ocfs2_lock_stats l_lock_exmode; /* EX mode stats */ |
188 | unsigned int l_lock_num_exmode_failed; /* Failed EX gets */ | ||
189 | unsigned long long l_lock_total_prmode; /* Tot wait for PR */ | ||
190 | unsigned long long l_lock_total_exmode; /* Tot wait for EX */ | ||
191 | unsigned int l_lock_max_prmode; /* Max wait for PR */ | ||
192 | unsigned int l_lock_max_exmode; /* Max wait for EX */ | ||
193 | unsigned int l_lock_refresh; /* Disk refreshes */ | ||
194 | #endif | 199 | #endif |
195 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | 200 | #ifdef CONFIG_DEBUG_LOCK_ALLOC |
196 | struct lockdep_map l_lockdep_map; | 201 | struct lockdep_map l_lockdep_map; |
diff --git a/fs/ocfs2/ocfs2_trace.h b/fs/ocfs2/ocfs2_trace.h new file mode 100644 index 000000000000..a1dae5bb54ac --- /dev/null +++ b/fs/ocfs2/ocfs2_trace.h | |||
@@ -0,0 +1,2739 @@ | |||
1 | #undef TRACE_SYSTEM | ||
2 | #define TRACE_SYSTEM ocfs2 | ||
3 | |||
4 | #if !defined(_TRACE_OCFS2_H) || defined(TRACE_HEADER_MULTI_READ) | ||
5 | #define _TRACE_OCFS2_H | ||
6 | |||
7 | #include <linux/tracepoint.h> | ||
8 | |||
9 | DECLARE_EVENT_CLASS(ocfs2__int, | ||
10 | TP_PROTO(int num), | ||
11 | TP_ARGS(num), | ||
12 | TP_STRUCT__entry( | ||
13 | __field(int, num) | ||
14 | ), | ||
15 | TP_fast_assign( | ||
16 | __entry->num = num; | ||
17 | ), | ||
18 | TP_printk("%d", __entry->num) | ||
19 | ); | ||
20 | |||
21 | #define DEFINE_OCFS2_INT_EVENT(name) \ | ||
22 | DEFINE_EVENT(ocfs2__int, name, \ | ||
23 | TP_PROTO(int num), \ | ||
24 | TP_ARGS(num)) | ||
25 | |||
26 | DECLARE_EVENT_CLASS(ocfs2__uint, | ||
27 | TP_PROTO(unsigned int num), | ||
28 | TP_ARGS(num), | ||
29 | TP_STRUCT__entry( | ||
30 | __field( unsigned int, num ) | ||
31 | ), | ||
32 | TP_fast_assign( | ||
33 | __entry->num = num; | ||
34 | ), | ||
35 | TP_printk("%u", __entry->num) | ||
36 | ); | ||
37 | |||
38 | #define DEFINE_OCFS2_UINT_EVENT(name) \ | ||
39 | DEFINE_EVENT(ocfs2__uint, name, \ | ||
40 | TP_PROTO(unsigned int num), \ | ||
41 | TP_ARGS(num)) | ||
42 | |||
43 | DECLARE_EVENT_CLASS(ocfs2__ull, | ||
44 | TP_PROTO(unsigned long long blkno), | ||
45 | TP_ARGS(blkno), | ||
46 | TP_STRUCT__entry( | ||
47 | __field(unsigned long long, blkno) | ||
48 | ), | ||
49 | TP_fast_assign( | ||
50 | __entry->blkno = blkno; | ||
51 | ), | ||
52 | TP_printk("%llu", __entry->blkno) | ||
53 | ); | ||
54 | |||
55 | #define DEFINE_OCFS2_ULL_EVENT(name) \ | ||
56 | DEFINE_EVENT(ocfs2__ull, name, \ | ||
57 | TP_PROTO(unsigned long long num), \ | ||
58 | TP_ARGS(num)) | ||
59 | |||
60 | DECLARE_EVENT_CLASS(ocfs2__pointer, | ||
61 | TP_PROTO(void *pointer), | ||
62 | TP_ARGS(pointer), | ||
63 | TP_STRUCT__entry( | ||
64 | __field(void *, pointer) | ||
65 | ), | ||
66 | TP_fast_assign( | ||
67 | __entry->pointer = pointer; | ||
68 | ), | ||
69 | TP_printk("%p", __entry->pointer) | ||
70 | ); | ||
71 | |||
72 | #define DEFINE_OCFS2_POINTER_EVENT(name) \ | ||
73 | DEFINE_EVENT(ocfs2__pointer, name, \ | ||
74 | TP_PROTO(void *pointer), \ | ||
75 | TP_ARGS(pointer)) | ||
76 | |||
77 | DECLARE_EVENT_CLASS(ocfs2__string, | ||
78 | TP_PROTO(const char *name), | ||
79 | TP_ARGS(name), | ||
80 | TP_STRUCT__entry( | ||
81 | __string(name,name) | ||
82 | ), | ||
83 | TP_fast_assign( | ||
84 | __assign_str(name, name); | ||
85 | ), | ||
86 | TP_printk("%s", __get_str(name)) | ||
87 | ); | ||
88 | |||
89 | #define DEFINE_OCFS2_STRING_EVENT(name) \ | ||
90 | DEFINE_EVENT(ocfs2__string, name, \ | ||
91 | TP_PROTO(const char *name), \ | ||
92 | TP_ARGS(name)) | ||
93 | |||
94 | DECLARE_EVENT_CLASS(ocfs2__int_int, | ||
95 | TP_PROTO(int value1, int value2), | ||
96 | TP_ARGS(value1, value2), | ||
97 | TP_STRUCT__entry( | ||
98 | __field(int, value1) | ||
99 | __field(int, value2) | ||
100 | ), | ||
101 | TP_fast_assign( | ||
102 | __entry->value1 = value1; | ||
103 | __entry->value2 = value2; | ||
104 | ), | ||
105 | TP_printk("%d %d", __entry->value1, __entry->value2) | ||
106 | ); | ||
107 | |||
108 | #define DEFINE_OCFS2_INT_INT_EVENT(name) \ | ||
109 | DEFINE_EVENT(ocfs2__int_int, name, \ | ||
110 | TP_PROTO(int val1, int val2), \ | ||
111 | TP_ARGS(val1, val2)) | ||
112 | |||
113 | DECLARE_EVENT_CLASS(ocfs2__uint_int, | ||
114 | TP_PROTO(unsigned int value1, int value2), | ||
115 | TP_ARGS(value1, value2), | ||
116 | TP_STRUCT__entry( | ||
117 | __field(unsigned int, value1) | ||
118 | __field(int, value2) | ||
119 | ), | ||
120 | TP_fast_assign( | ||
121 | __entry->value1 = value1; | ||
122 | __entry->value2 = value2; | ||
123 | ), | ||
124 | TP_printk("%u %d", __entry->value1, __entry->value2) | ||
125 | ); | ||
126 | |||
127 | #define DEFINE_OCFS2_UINT_INT_EVENT(name) \ | ||
128 | DEFINE_EVENT(ocfs2__uint_int, name, \ | ||
129 | TP_PROTO(unsigned int val1, int val2), \ | ||
130 | TP_ARGS(val1, val2)) | ||
131 | |||
132 | DECLARE_EVENT_CLASS(ocfs2__uint_uint, | ||
133 | TP_PROTO(unsigned int value1, unsigned int value2), | ||
134 | TP_ARGS(value1, value2), | ||
135 | TP_STRUCT__entry( | ||
136 | __field(unsigned int, value1) | ||
137 | __field(unsigned int, value2) | ||
138 | ), | ||
139 | TP_fast_assign( | ||
140 | __entry->value1 = value1; | ||
141 | __entry->value2 = value2; | ||
142 | ), | ||
143 | TP_printk("%u %u", __entry->value1, __entry->value2) | ||
144 | ); | ||
145 | |||
146 | #define DEFINE_OCFS2_UINT_UINT_EVENT(name) \ | ||
147 | DEFINE_EVENT(ocfs2__uint_uint, name, \ | ||
148 | TP_PROTO(unsigned int val1, unsigned int val2), \ | ||
149 | TP_ARGS(val1, val2)) | ||
150 | |||
151 | DECLARE_EVENT_CLASS(ocfs2__ull_uint, | ||
152 | TP_PROTO(unsigned long long value1, unsigned int value2), | ||
153 | TP_ARGS(value1, value2), | ||
154 | TP_STRUCT__entry( | ||
155 | __field(unsigned long long, value1) | ||
156 | __field(unsigned int, value2) | ||
157 | ), | ||
158 | TP_fast_assign( | ||
159 | __entry->value1 = value1; | ||
160 | __entry->value2 = value2; | ||
161 | ), | ||
162 | TP_printk("%llu %u", __entry->value1, __entry->value2) | ||
163 | ); | ||
164 | |||
165 | #define DEFINE_OCFS2_ULL_UINT_EVENT(name) \ | ||
166 | DEFINE_EVENT(ocfs2__ull_uint, name, \ | ||
167 | TP_PROTO(unsigned long long val1, unsigned int val2), \ | ||
168 | TP_ARGS(val1, val2)) | ||
169 | |||
170 | DECLARE_EVENT_CLASS(ocfs2__ull_int, | ||
171 | TP_PROTO(unsigned long long value1, int value2), | ||
172 | TP_ARGS(value1, value2), | ||
173 | TP_STRUCT__entry( | ||
174 | __field(unsigned long long, value1) | ||
175 | __field(int, value2) | ||
176 | ), | ||
177 | TP_fast_assign( | ||
178 | __entry->value1 = value1; | ||
179 | __entry->value2 = value2; | ||
180 | ), | ||
181 | TP_printk("%llu %d", __entry->value1, __entry->value2) | ||
182 | ); | ||
183 | |||
184 | #define DEFINE_OCFS2_ULL_INT_EVENT(name) \ | ||
185 | DEFINE_EVENT(ocfs2__ull_int, name, \ | ||
186 | TP_PROTO(unsigned long long val1, int val2), \ | ||
187 | TP_ARGS(val1, val2)) | ||
188 | |||
189 | DECLARE_EVENT_CLASS(ocfs2__ull_ull, | ||
190 | TP_PROTO(unsigned long long value1, unsigned long long value2), | ||
191 | TP_ARGS(value1, value2), | ||
192 | TP_STRUCT__entry( | ||
193 | __field(unsigned long long, value1) | ||
194 | __field(unsigned long long, value2) | ||
195 | ), | ||
196 | TP_fast_assign( | ||
197 | __entry->value1 = value1; | ||
198 | __entry->value2 = value2; | ||
199 | ), | ||
200 | TP_printk("%llu %llu", __entry->value1, __entry->value2) | ||
201 | ); | ||
202 | |||
203 | #define DEFINE_OCFS2_ULL_ULL_EVENT(name) \ | ||
204 | DEFINE_EVENT(ocfs2__ull_ull, name, \ | ||
205 | TP_PROTO(unsigned long long val1, unsigned long long val2), \ | ||
206 | TP_ARGS(val1, val2)) | ||
207 | |||
208 | DECLARE_EVENT_CLASS(ocfs2__ull_ull_uint, | ||
209 | TP_PROTO(unsigned long long value1, | ||
210 | unsigned long long value2, unsigned int value3), | ||
211 | TP_ARGS(value1, value2, value3), | ||
212 | TP_STRUCT__entry( | ||
213 | __field(unsigned long long, value1) | ||
214 | __field(unsigned long long, value2) | ||
215 | __field(unsigned int, value3) | ||
216 | ), | ||
217 | TP_fast_assign( | ||
218 | __entry->value1 = value1; | ||
219 | __entry->value2 = value2; | ||
220 | __entry->value3 = value3; | ||
221 | ), | ||
222 | TP_printk("%llu %llu %u", | ||
223 | __entry->value1, __entry->value2, __entry->value3) | ||
224 | ); | ||
225 | |||
226 | #define DEFINE_OCFS2_ULL_ULL_UINT_EVENT(name) \ | ||
227 | DEFINE_EVENT(ocfs2__ull_ull_uint, name, \ | ||
228 | TP_PROTO(unsigned long long val1, \ | ||
229 | unsigned long long val2, unsigned int val3), \ | ||
230 | TP_ARGS(val1, val2, val3)) | ||
231 | |||
232 | DECLARE_EVENT_CLASS(ocfs2__ull_uint_uint, | ||
233 | TP_PROTO(unsigned long long value1, | ||
234 | unsigned int value2, unsigned int value3), | ||
235 | TP_ARGS(value1, value2, value3), | ||
236 | TP_STRUCT__entry( | ||
237 | __field(unsigned long long, value1) | ||
238 | __field(unsigned int, value2) | ||
239 | __field(unsigned int, value3) | ||
240 | ), | ||
241 | TP_fast_assign( | ||
242 | __entry->value1 = value1; | ||
243 | __entry->value2 = value2; | ||
244 | __entry->value3 = value3; | ||
245 | ), | ||
246 | TP_printk("%llu %u %u", __entry->value1, | ||
247 | __entry->value2, __entry->value3) | ||
248 | ); | ||
249 | |||
250 | #define DEFINE_OCFS2_ULL_UINT_UINT_EVENT(name) \ | ||
251 | DEFINE_EVENT(ocfs2__ull_uint_uint, name, \ | ||
252 | TP_PROTO(unsigned long long val1, \ | ||
253 | unsigned int val2, unsigned int val3), \ | ||
254 | TP_ARGS(val1, val2, val3)) | ||
255 | |||
256 | DECLARE_EVENT_CLASS(ocfs2__uint_uint_uint, | ||
257 | TP_PROTO(unsigned int value1, unsigned int value2, | ||
258 | unsigned int value3), | ||
259 | TP_ARGS(value1, value2, value3), | ||
260 | TP_STRUCT__entry( | ||
261 | __field( unsigned int, value1 ) | ||
262 | __field( unsigned int, value2 ) | ||
263 | __field( unsigned int, value3 ) | ||
264 | ), | ||
265 | TP_fast_assign( | ||
266 | __entry->value1 = value1; | ||
267 | __entry->value2 = value2; | ||
268 | __entry->value3 = value3; | ||
269 | ), | ||
270 | TP_printk("%u %u %u", __entry->value1, __entry->value2, __entry->value3) | ||
271 | ); | ||
272 | |||
273 | #define DEFINE_OCFS2_UINT_UINT_UINT_EVENT(name) \ | ||
274 | DEFINE_EVENT(ocfs2__uint_uint_uint, name, \ | ||
275 | TP_PROTO(unsigned int value1, unsigned int value2, \ | ||
276 | unsigned int value3), \ | ||
277 | TP_ARGS(value1, value2, value3)) | ||
278 | |||
279 | DECLARE_EVENT_CLASS(ocfs2__ull_ull_ull, | ||
280 | TP_PROTO(unsigned long long value1, | ||
281 | unsigned long long value2, unsigned long long value3), | ||
282 | TP_ARGS(value1, value2, value3), | ||
283 | TP_STRUCT__entry( | ||
284 | __field(unsigned long long, value1) | ||
285 | __field(unsigned long long, value2) | ||
286 | __field(unsigned long long, value3) | ||
287 | ), | ||
288 | TP_fast_assign( | ||
289 | __entry->value1 = value1; | ||
290 | __entry->value2 = value2; | ||
291 | __entry->value3 = value3; | ||
292 | ), | ||
293 | TP_printk("%llu %llu %llu", | ||
294 | __entry->value1, __entry->value2, __entry->value3) | ||
295 | ); | ||
296 | |||
297 | #define DEFINE_OCFS2_ULL_ULL_ULL_EVENT(name) \ | ||
298 | DEFINE_EVENT(ocfs2__ull_ull_ull, name, \ | ||
299 | TP_PROTO(unsigned long long value1, unsigned long long value2, \ | ||
300 | unsigned long long value3), \ | ||
301 | TP_ARGS(value1, value2, value3)) | ||
302 | |||
303 | DECLARE_EVENT_CLASS(ocfs2__ull_int_int_int, | ||
304 | TP_PROTO(unsigned long long ull, int value1, int value2, int value3), | ||
305 | TP_ARGS(ull, value1, value2, value3), | ||
306 | TP_STRUCT__entry( | ||
307 | __field( unsigned long long, ull ) | ||
308 | __field( int, value1 ) | ||
309 | __field( int, value2 ) | ||
310 | __field( int, value3 ) | ||
311 | ), | ||
312 | TP_fast_assign( | ||
313 | __entry->ull = ull; | ||
314 | __entry->value1 = value1; | ||
315 | __entry->value2 = value2; | ||
316 | __entry->value3 = value3; | ||
317 | ), | ||
318 | TP_printk("%llu %d %d %d", | ||
319 | __entry->ull, __entry->value1, | ||
320 | __entry->value2, __entry->value3) | ||
321 | ); | ||
322 | |||
323 | #define DEFINE_OCFS2_ULL_INT_INT_INT_EVENT(name) \ | ||
324 | DEFINE_EVENT(ocfs2__ull_int_int_int, name, \ | ||
325 | TP_PROTO(unsigned long long ull, int value1, \ | ||
326 | int value2, int value3), \ | ||
327 | TP_ARGS(ull, value1, value2, value3)) | ||
328 | |||
329 | DECLARE_EVENT_CLASS(ocfs2__ull_uint_uint_uint, | ||
330 | TP_PROTO(unsigned long long ull, unsigned int value1, | ||
331 | unsigned int value2, unsigned int value3), | ||
332 | TP_ARGS(ull, value1, value2, value3), | ||
333 | TP_STRUCT__entry( | ||
334 | __field(unsigned long long, ull) | ||
335 | __field(unsigned int, value1) | ||
336 | __field(unsigned int, value2) | ||
337 | __field(unsigned int, value3) | ||
338 | ), | ||
339 | TP_fast_assign( | ||
340 | __entry->ull = ull; | ||
341 | __entry->value1 = value1; | ||
342 | __entry->value2 = value2; | ||
343 | __entry->value3 = value3; | ||
344 | ), | ||
345 | TP_printk("%llu %u %u %u", | ||
346 | __entry->ull, __entry->value1, | ||
347 | __entry->value2, __entry->value3) | ||
348 | ); | ||
349 | |||
350 | #define DEFINE_OCFS2_ULL_UINT_UINT_UINT_EVENT(name) \ | ||
351 | DEFINE_EVENT(ocfs2__ull_uint_uint_uint, name, \ | ||
352 | TP_PROTO(unsigned long long ull, unsigned int value1, \ | ||
353 | unsigned int value2, unsigned int value3), \ | ||
354 | TP_ARGS(ull, value1, value2, value3)) | ||
355 | |||
356 | DECLARE_EVENT_CLASS(ocfs2__ull_ull_uint_uint, | ||
357 | TP_PROTO(unsigned long long value1, unsigned long long value2, | ||
358 | unsigned int value3, unsigned int value4), | ||
359 | TP_ARGS(value1, value2, value3, value4), | ||
360 | TP_STRUCT__entry( | ||
361 | __field(unsigned long long, value1) | ||
362 | __field(unsigned long long, value2) | ||
363 | __field(unsigned int, value3) | ||
364 | __field(unsigned int, value4) | ||
365 | ), | ||
366 | TP_fast_assign( | ||
367 | __entry->value1 = value1; | ||
368 | __entry->value2 = value2; | ||
369 | __entry->value3 = value3; | ||
370 | __entry->value4 = value4; | ||
371 | ), | ||
372 | TP_printk("%llu %llu %u %u", | ||
373 | __entry->value1, __entry->value2, | ||
374 | __entry->value3, __entry->value4) | ||
375 | ); | ||
376 | |||
377 | #define DEFINE_OCFS2_ULL_ULL_UINT_UINT_EVENT(name) \ | ||
378 | DEFINE_EVENT(ocfs2__ull_ull_uint_uint, name, \ | ||
379 | TP_PROTO(unsigned long long ull, unsigned long long ull1, \ | ||
380 | unsigned int value2, unsigned int value3), \ | ||
381 | TP_ARGS(ull, ull1, value2, value3)) | ||
382 | |||
383 | /* Trace events for fs/ocfs2/alloc.c. */ | ||
384 | DECLARE_EVENT_CLASS(ocfs2__btree_ops, | ||
385 | TP_PROTO(unsigned long long owner,\ | ||
386 | unsigned int value1, unsigned int value2), | ||
387 | TP_ARGS(owner, value1, value2), | ||
388 | TP_STRUCT__entry( | ||
389 | __field(unsigned long long, owner) | ||
390 | __field(unsigned int, value1) | ||
391 | __field(unsigned int, value2) | ||
392 | ), | ||
393 | TP_fast_assign( | ||
394 | __entry->owner = owner; | ||
395 | __entry->value1 = value1; | ||
396 | __entry->value2 = value2; | ||
397 | ), | ||
398 | TP_printk("%llu %u %u", | ||
399 | __entry->owner, __entry->value1, __entry->value2) | ||
400 | ); | ||
401 | |||
402 | #define DEFINE_OCFS2_BTREE_EVENT(name) \ | ||
403 | DEFINE_EVENT(ocfs2__btree_ops, name, \ | ||
404 | TP_PROTO(unsigned long long owner, \ | ||
405 | unsigned int value1, unsigned int value2), \ | ||
406 | TP_ARGS(owner, value1, value2)) | ||
407 | |||
408 | DEFINE_OCFS2_BTREE_EVENT(ocfs2_adjust_rightmost_branch); | ||
409 | |||
410 | DEFINE_OCFS2_BTREE_EVENT(ocfs2_rotate_tree_right); | ||
411 | |||
412 | DEFINE_OCFS2_BTREE_EVENT(ocfs2_append_rec_to_path); | ||
413 | |||
414 | DEFINE_OCFS2_BTREE_EVENT(ocfs2_insert_extent_start); | ||
415 | |||
416 | DEFINE_OCFS2_BTREE_EVENT(ocfs2_add_clusters_in_btree); | ||
417 | |||
418 | DEFINE_OCFS2_INT_EVENT(ocfs2_num_free_extents); | ||
419 | |||
420 | DEFINE_OCFS2_INT_EVENT(ocfs2_complete_edge_insert); | ||
421 | |||
422 | TRACE_EVENT(ocfs2_grow_tree, | ||
423 | TP_PROTO(unsigned long long owner, int depth), | ||
424 | TP_ARGS(owner, depth), | ||
425 | TP_STRUCT__entry( | ||
426 | __field(unsigned long long, owner) | ||
427 | __field(int, depth) | ||
428 | ), | ||
429 | TP_fast_assign( | ||
430 | __entry->owner = owner; | ||
431 | __entry->depth = depth; | ||
432 | ), | ||
433 | TP_printk("%llu %d", __entry->owner, __entry->depth) | ||
434 | ); | ||
435 | |||
436 | TRACE_EVENT(ocfs2_rotate_subtree, | ||
437 | TP_PROTO(int subtree_root, unsigned long long blkno, | ||
438 | int depth), | ||
439 | TP_ARGS(subtree_root, blkno, depth), | ||
440 | TP_STRUCT__entry( | ||
441 | __field(int, subtree_root) | ||
442 | __field(unsigned long long, blkno) | ||
443 | __field(int, depth) | ||
444 | ), | ||
445 | TP_fast_assign( | ||
446 | __entry->subtree_root = subtree_root; | ||
447 | __entry->blkno = blkno; | ||
448 | __entry->depth = depth; | ||
449 | ), | ||
450 | TP_printk("%d %llu %d", __entry->subtree_root, | ||
451 | __entry->blkno, __entry->depth) | ||
452 | ); | ||
453 | |||
454 | TRACE_EVENT(ocfs2_insert_extent, | ||
455 | TP_PROTO(unsigned int ins_appending, unsigned int ins_contig, | ||
456 | int ins_contig_index, int free_records, int ins_tree_depth), | ||
457 | TP_ARGS(ins_appending, ins_contig, ins_contig_index, free_records, | ||
458 | ins_tree_depth), | ||
459 | TP_STRUCT__entry( | ||
460 | __field(unsigned int, ins_appending) | ||
461 | __field(unsigned int, ins_contig) | ||
462 | __field(int, ins_contig_index) | ||
463 | __field(int, free_records) | ||
464 | __field(int, ins_tree_depth) | ||
465 | ), | ||
466 | TP_fast_assign( | ||
467 | __entry->ins_appending = ins_appending; | ||
468 | __entry->ins_contig = ins_contig; | ||
469 | __entry->ins_contig_index = ins_contig_index; | ||
470 | __entry->free_records = free_records; | ||
471 | __entry->ins_tree_depth = ins_tree_depth; | ||
472 | ), | ||
473 | TP_printk("%u %u %d %d %d", | ||
474 | __entry->ins_appending, __entry->ins_contig, | ||
475 | __entry->ins_contig_index, __entry->free_records, | ||
476 | __entry->ins_tree_depth) | ||
477 | ); | ||
478 | |||
479 | TRACE_EVENT(ocfs2_split_extent, | ||
480 | TP_PROTO(int split_index, unsigned int c_contig_type, | ||
481 | unsigned int c_has_empty_extent, | ||
482 | unsigned int c_split_covers_rec), | ||
483 | TP_ARGS(split_index, c_contig_type, | ||
484 | c_has_empty_extent, c_split_covers_rec), | ||
485 | TP_STRUCT__entry( | ||
486 | __field(int, split_index) | ||
487 | __field(unsigned int, c_contig_type) | ||
488 | __field(unsigned int, c_has_empty_extent) | ||
489 | __field(unsigned int, c_split_covers_rec) | ||
490 | ), | ||
491 | TP_fast_assign( | ||
492 | __entry->split_index = split_index; | ||
493 | __entry->c_contig_type = c_contig_type; | ||
494 | __entry->c_has_empty_extent = c_has_empty_extent; | ||
495 | __entry->c_split_covers_rec = c_split_covers_rec; | ||
496 | ), | ||
497 | TP_printk("%d %u %u %u", __entry->split_index, __entry->c_contig_type, | ||
498 | __entry->c_has_empty_extent, __entry->c_split_covers_rec) | ||
499 | ); | ||
500 | |||
501 | TRACE_EVENT(ocfs2_remove_extent, | ||
502 | TP_PROTO(unsigned long long owner, unsigned int cpos, | ||
503 | unsigned int len, int index, | ||
504 | unsigned int e_cpos, unsigned int clusters), | ||
505 | TP_ARGS(owner, cpos, len, index, e_cpos, clusters), | ||
506 | TP_STRUCT__entry( | ||
507 | __field(unsigned long long, owner) | ||
508 | __field(unsigned int, cpos) | ||
509 | __field(unsigned int, len) | ||
510 | __field(int, index) | ||
511 | __field(unsigned int, e_cpos) | ||
512 | __field(unsigned int, clusters) | ||
513 | ), | ||
514 | TP_fast_assign( | ||
515 | __entry->owner = owner; | ||
516 | __entry->cpos = cpos; | ||
517 | __entry->len = len; | ||
518 | __entry->index = index; | ||
519 | __entry->e_cpos = e_cpos; | ||
520 | __entry->clusters = clusters; | ||
521 | ), | ||
522 | TP_printk("%llu %u %u %d %u %u", | ||
523 | __entry->owner, __entry->cpos, __entry->len, __entry->index, | ||
524 | __entry->e_cpos, __entry->clusters) | ||
525 | ); | ||
526 | |||
527 | TRACE_EVENT(ocfs2_commit_truncate, | ||
528 | TP_PROTO(unsigned long long ino, unsigned int new_cpos, | ||
529 | unsigned int clusters, unsigned int depth), | ||
530 | TP_ARGS(ino, new_cpos, clusters, depth), | ||
531 | TP_STRUCT__entry( | ||
532 | __field(unsigned long long, ino) | ||
533 | __field(unsigned int, new_cpos) | ||
534 | __field(unsigned int, clusters) | ||
535 | __field(unsigned int, depth) | ||
536 | ), | ||
537 | TP_fast_assign( | ||
538 | __entry->ino = ino; | ||
539 | __entry->new_cpos = new_cpos; | ||
540 | __entry->clusters = clusters; | ||
541 | __entry->depth = depth; | ||
542 | ), | ||
543 | TP_printk("%llu %u %u %u", | ||
544 | __entry->ino, __entry->new_cpos, | ||
545 | __entry->clusters, __entry->depth) | ||
546 | ); | ||
547 | |||
548 | TRACE_EVENT(ocfs2_validate_extent_block, | ||
549 | TP_PROTO(unsigned long long blkno), | ||
550 | TP_ARGS(blkno), | ||
551 | TP_STRUCT__entry( | ||
552 | __field(unsigned long long, blkno) | ||
553 | ), | ||
554 | TP_fast_assign( | ||
555 | __entry->blkno = blkno; | ||
556 | ), | ||
557 | TP_printk("%llu ", __entry->blkno) | ||
558 | ); | ||
559 | |||
560 | TRACE_EVENT(ocfs2_rotate_leaf, | ||
561 | TP_PROTO(unsigned int insert_cpos, int insert_index, | ||
562 | int has_empty, int next_free, | ||
563 | unsigned int l_count), | ||
564 | TP_ARGS(insert_cpos, insert_index, has_empty, | ||
565 | next_free, l_count), | ||
566 | TP_STRUCT__entry( | ||
567 | __field(unsigned int, insert_cpos) | ||
568 | __field(int, insert_index) | ||
569 | __field(int, has_empty) | ||
570 | __field(int, next_free) | ||
571 | __field(unsigned int, l_count) | ||
572 | ), | ||
573 | TP_fast_assign( | ||
574 | __entry->insert_cpos = insert_cpos; | ||
575 | __entry->insert_index = insert_index; | ||
576 | __entry->has_empty = has_empty; | ||
577 | __entry->next_free = next_free; | ||
578 | __entry->l_count = l_count; | ||
579 | ), | ||
580 | TP_printk("%u %d %d %d %u", __entry->insert_cpos, | ||
581 | __entry->insert_index, __entry->has_empty, | ||
582 | __entry->next_free, __entry->l_count) | ||
583 | ); | ||
584 | |||
585 | TRACE_EVENT(ocfs2_add_clusters_in_btree_ret, | ||
586 | TP_PROTO(int status, int reason, int err), | ||
587 | TP_ARGS(status, reason, err), | ||
588 | TP_STRUCT__entry( | ||
589 | __field(int, status) | ||
590 | __field(int, reason) | ||
591 | __field(int, err) | ||
592 | ), | ||
593 | TP_fast_assign( | ||
594 | __entry->status = status; | ||
595 | __entry->reason = reason; | ||
596 | __entry->err = err; | ||
597 | ), | ||
598 | TP_printk("%d %d %d", __entry->status, | ||
599 | __entry->reason, __entry->err) | ||
600 | ); | ||
601 | |||
602 | TRACE_EVENT(ocfs2_mark_extent_written, | ||
603 | TP_PROTO(unsigned long long owner, unsigned int cpos, | ||
604 | unsigned int len, unsigned int phys), | ||
605 | TP_ARGS(owner, cpos, len, phys), | ||
606 | TP_STRUCT__entry( | ||
607 | __field(unsigned long long, owner) | ||
608 | __field(unsigned int, cpos) | ||
609 | __field(unsigned int, len) | ||
610 | __field(unsigned int, phys) | ||
611 | ), | ||
612 | TP_fast_assign( | ||
613 | __entry->owner = owner; | ||
614 | __entry->cpos = cpos; | ||
615 | __entry->len = len; | ||
616 | __entry->phys = phys; | ||
617 | ), | ||
618 | TP_printk("%llu %u %u %u", | ||
619 | __entry->owner, __entry->cpos, | ||
620 | __entry->len, __entry->phys) | ||
621 | ); | ||
622 | |||
623 | DECLARE_EVENT_CLASS(ocfs2__truncate_log_ops, | ||
624 | TP_PROTO(unsigned long long blkno, int index, | ||
625 | unsigned int start, unsigned int num), | ||
626 | TP_ARGS(blkno, index, start, num), | ||
627 | TP_STRUCT__entry( | ||
628 | __field(unsigned long long, blkno) | ||
629 | __field(int, index) | ||
630 | __field(unsigned int, start) | ||
631 | __field(unsigned int, num) | ||
632 | ), | ||
633 | TP_fast_assign( | ||
634 | __entry->blkno = blkno; | ||
635 | __entry->index = index; | ||
636 | __entry->start = start; | ||
637 | __entry->num = num; | ||
638 | ), | ||
639 | TP_printk("%llu %d %u %u", | ||
640 | __entry->blkno, __entry->index, | ||
641 | __entry->start, __entry->num) | ||
642 | ); | ||
643 | |||
644 | #define DEFINE_OCFS2_TRUNCATE_LOG_OPS_EVENT(name) \ | ||
645 | DEFINE_EVENT(ocfs2__truncate_log_ops, name, \ | ||
646 | TP_PROTO(unsigned long long blkno, int index, \ | ||
647 | unsigned int start, unsigned int num), \ | ||
648 | TP_ARGS(blkno, index, start, num)) | ||
649 | |||
650 | DEFINE_OCFS2_TRUNCATE_LOG_OPS_EVENT(ocfs2_truncate_log_append); | ||
651 | |||
652 | DEFINE_OCFS2_TRUNCATE_LOG_OPS_EVENT(ocfs2_replay_truncate_records); | ||
653 | |||
654 | DEFINE_OCFS2_ULL_UINT_EVENT(ocfs2_flush_truncate_log); | ||
655 | |||
656 | DEFINE_OCFS2_INT_EVENT(ocfs2_begin_truncate_log_recovery); | ||
657 | |||
658 | DEFINE_OCFS2_INT_EVENT(ocfs2_truncate_log_recovery_num); | ||
659 | |||
660 | DEFINE_OCFS2_ULL_UINT_EVENT(ocfs2_complete_truncate_log_recovery); | ||
661 | |||
662 | DEFINE_OCFS2_ULL_UINT_EVENT(ocfs2_free_cached_blocks); | ||
663 | |||
664 | DEFINE_OCFS2_ULL_UINT_EVENT(ocfs2_cache_cluster_dealloc); | ||
665 | |||
666 | DEFINE_OCFS2_INT_INT_EVENT(ocfs2_run_deallocs); | ||
667 | |||
668 | TRACE_EVENT(ocfs2_cache_block_dealloc, | ||
669 | TP_PROTO(int type, int slot, unsigned long long suballoc, | ||
670 | unsigned long long blkno, unsigned int bit), | ||
671 | TP_ARGS(type, slot, suballoc, blkno, bit), | ||
672 | TP_STRUCT__entry( | ||
673 | __field(int, type) | ||
674 | __field(int, slot) | ||
675 | __field(unsigned long long, suballoc) | ||
676 | __field(unsigned long long, blkno) | ||
677 | __field(unsigned int, bit) | ||
678 | ), | ||
679 | TP_fast_assign( | ||
680 | __entry->type = type; | ||
681 | __entry->slot = slot; | ||
682 | __entry->suballoc = suballoc; | ||
683 | __entry->blkno = blkno; | ||
684 | __entry->bit = bit; | ||
685 | ), | ||
686 | TP_printk("%d %d %llu %llu %u", | ||
687 | __entry->type, __entry->slot, __entry->suballoc, | ||
688 | __entry->blkno, __entry->bit) | ||
689 | ); | ||
690 | |||
691 | /* End of trace events for fs/ocfs2/alloc.c. */ | ||
692 | |||
693 | /* Trace events for fs/ocfs2/localalloc.c. */ | ||
694 | |||
695 | DEFINE_OCFS2_UINT_UINT_UINT_EVENT(ocfs2_la_set_sizes); | ||
696 | |||
697 | DEFINE_OCFS2_ULL_INT_INT_INT_EVENT(ocfs2_alloc_should_use_local); | ||
698 | |||
699 | DEFINE_OCFS2_INT_EVENT(ocfs2_load_local_alloc); | ||
700 | |||
701 | DEFINE_OCFS2_INT_EVENT(ocfs2_begin_local_alloc_recovery); | ||
702 | |||
703 | DEFINE_OCFS2_ULL_INT_INT_INT_EVENT(ocfs2_reserve_local_alloc_bits); | ||
704 | |||
705 | DEFINE_OCFS2_UINT_EVENT(ocfs2_local_alloc_count_bits); | ||
706 | |||
707 | DEFINE_OCFS2_INT_INT_EVENT(ocfs2_local_alloc_find_clear_bits_search_bitmap); | ||
708 | |||
709 | DEFINE_OCFS2_ULL_INT_INT_INT_EVENT(ocfs2_local_alloc_find_clear_bits); | ||
710 | |||
711 | DEFINE_OCFS2_INT_INT_EVENT(ocfs2_sync_local_to_main); | ||
712 | |||
713 | TRACE_EVENT(ocfs2_sync_local_to_main_free, | ||
714 | TP_PROTO(int count, int bit, unsigned long long start_blk, | ||
715 | unsigned long long blkno), | ||
716 | TP_ARGS(count, bit, start_blk, blkno), | ||
717 | TP_STRUCT__entry( | ||
718 | __field(int, count) | ||
719 | __field(int, bit) | ||
720 | __field(unsigned long long, start_blk) | ||
721 | __field(unsigned long long, blkno) | ||
722 | ), | ||
723 | TP_fast_assign( | ||
724 | __entry->count = count; | ||
725 | __entry->bit = bit; | ||
726 | __entry->start_blk = start_blk; | ||
727 | __entry->blkno = blkno; | ||
728 | ), | ||
729 | TP_printk("%d %d %llu %llu", | ||
730 | __entry->count, __entry->bit, __entry->start_blk, | ||
731 | __entry->blkno) | ||
732 | ); | ||
733 | |||
734 | DEFINE_OCFS2_INT_INT_EVENT(ocfs2_local_alloc_new_window); | ||
735 | |||
736 | DEFINE_OCFS2_ULL_UINT_EVENT(ocfs2_local_alloc_new_window_result); | ||
737 | |||
738 | /* End of trace events for fs/ocfs2/localalloc.c. */ | ||
739 | |||
740 | /* Trace events for fs/ocfs2/resize.c. */ | ||
741 | |||
742 | DEFINE_OCFS2_UINT_UINT_EVENT(ocfs2_update_last_group_and_inode); | ||
743 | |||
744 | DEFINE_OCFS2_ULL_UINT_EVENT(ocfs2_group_extend); | ||
745 | |||
746 | DEFINE_OCFS2_ULL_UINT_UINT_UINT_EVENT(ocfs2_group_add); | ||
747 | |||
748 | /* End of trace events for fs/ocfs2/resize.c. */ | ||
749 | |||
750 | /* Trace events for fs/ocfs2/suballoc.c. */ | ||
751 | |||
752 | DEFINE_OCFS2_ULL_EVENT(ocfs2_validate_group_descriptor); | ||
753 | |||
754 | DEFINE_OCFS2_ULL_UINT_EVENT(ocfs2_block_group_alloc_contig); | ||
755 | |||
756 | DEFINE_OCFS2_ULL_UINT_EVENT(ocfs2_block_group_alloc_discontig); | ||
757 | |||
758 | DEFINE_OCFS2_ULL_EVENT(ocfs2_block_group_alloc); | ||
759 | |||
760 | DEFINE_OCFS2_UINT_UINT_EVENT(ocfs2_reserve_suballoc_bits_nospc); | ||
761 | |||
762 | DEFINE_OCFS2_UINT_UINT_UINT_EVENT(ocfs2_reserve_suballoc_bits_no_new_group); | ||
763 | |||
764 | DEFINE_OCFS2_ULL_EVENT(ocfs2_reserve_new_inode_new_group); | ||
765 | |||
766 | DEFINE_OCFS2_UINT_UINT_EVENT(ocfs2_block_group_set_bits); | ||
767 | |||
768 | TRACE_EVENT(ocfs2_relink_block_group, | ||
769 | TP_PROTO(unsigned long long i_blkno, unsigned int chain, | ||
770 | unsigned long long bg_blkno, | ||
771 | unsigned long long prev_blkno), | ||
772 | TP_ARGS(i_blkno, chain, bg_blkno, prev_blkno), | ||
773 | TP_STRUCT__entry( | ||
774 | __field(unsigned long long, i_blkno) | ||
775 | __field(unsigned int, chain) | ||
776 | __field(unsigned long long, bg_blkno) | ||
777 | __field(unsigned long long, prev_blkno) | ||
778 | ), | ||
779 | TP_fast_assign( | ||
780 | __entry->i_blkno = i_blkno; | ||
781 | __entry->chain = chain; | ||
782 | __entry->bg_blkno = bg_blkno; | ||
783 | __entry->prev_blkno = prev_blkno; | ||
784 | ), | ||
785 | TP_printk("%llu %u %llu %llu", | ||
786 | __entry->i_blkno, __entry->chain, __entry->bg_blkno, | ||
787 | __entry->prev_blkno) | ||
788 | ); | ||
789 | |||
790 | DEFINE_OCFS2_ULL_UINT_UINT_UINT_EVENT(ocfs2_cluster_group_search_wrong_max_bits); | ||
791 | |||
792 | DEFINE_OCFS2_ULL_ULL_EVENT(ocfs2_cluster_group_search_max_block); | ||
793 | |||
794 | DEFINE_OCFS2_ULL_ULL_EVENT(ocfs2_block_group_search_max_block); | ||
795 | |||
796 | DEFINE_OCFS2_ULL_UINT_UINT_EVENT(ocfs2_search_chain_begin); | ||
797 | |||
798 | DEFINE_OCFS2_ULL_UINT_EVENT(ocfs2_search_chain_succ); | ||
799 | |||
800 | DEFINE_OCFS2_ULL_UINT_EVENT(ocfs2_search_chain_end); | ||
801 | |||
802 | DEFINE_OCFS2_UINT_EVENT(ocfs2_claim_suballoc_bits); | ||
803 | |||
804 | DEFINE_OCFS2_ULL_UINT_EVENT(ocfs2_claim_new_inode_at_loc); | ||
805 | |||
806 | DEFINE_OCFS2_UINT_UINT_EVENT(ocfs2_block_group_clear_bits); | ||
807 | |||
808 | TRACE_EVENT(ocfs2_free_suballoc_bits, | ||
809 | TP_PROTO(unsigned long long inode, unsigned long long group, | ||
810 | unsigned int start_bit, unsigned int count), | ||
811 | TP_ARGS(inode, group, start_bit, count), | ||
812 | TP_STRUCT__entry( | ||
813 | __field(unsigned long long, inode) | ||
814 | __field(unsigned long long, group) | ||
815 | __field(unsigned int, start_bit) | ||
816 | __field(unsigned int, count) | ||
817 | ), | ||
818 | TP_fast_assign( | ||
819 | __entry->inode = inode; | ||
820 | __entry->group = group; | ||
821 | __entry->start_bit = start_bit; | ||
822 | __entry->count = count; | ||
823 | ), | ||
824 | TP_printk("%llu %llu %u %u", __entry->inode, __entry->group, | ||
825 | __entry->start_bit, __entry->count) | ||
826 | ); | ||
827 | |||
828 | TRACE_EVENT(ocfs2_free_clusters, | ||
829 | TP_PROTO(unsigned long long bg_blkno, unsigned long long start_blk, | ||
830 | unsigned int start_bit, unsigned int count), | ||
831 | TP_ARGS(bg_blkno, start_blk, start_bit, count), | ||
832 | TP_STRUCT__entry( | ||
833 | __field(unsigned long long, bg_blkno) | ||
834 | __field(unsigned long long, start_blk) | ||
835 | __field(unsigned int, start_bit) | ||
836 | __field(unsigned int, count) | ||
837 | ), | ||
838 | TP_fast_assign( | ||
839 | __entry->bg_blkno = bg_blkno; | ||
840 | __entry->start_blk = start_blk; | ||
841 | __entry->start_bit = start_bit; | ||
842 | __entry->count = count; | ||
843 | ), | ||
844 | TP_printk("%llu %llu %u %u", __entry->bg_blkno, __entry->start_blk, | ||
845 | __entry->start_bit, __entry->count) | ||
846 | ); | ||
847 | |||
848 | DEFINE_OCFS2_ULL_EVENT(ocfs2_get_suballoc_slot_bit); | ||
849 | |||
850 | DEFINE_OCFS2_ULL_UINT_EVENT(ocfs2_test_suballoc_bit); | ||
851 | |||
852 | DEFINE_OCFS2_ULL_EVENT(ocfs2_test_inode_bit); | ||
853 | |||
854 | /* End of trace events for fs/ocfs2/suballoc.c. */ | ||
855 | |||
856 | /* Trace events for fs/ocfs2/refcounttree.c. */ | ||
857 | |||
858 | DEFINE_OCFS2_ULL_EVENT(ocfs2_validate_refcount_block); | ||
859 | |||
860 | DEFINE_OCFS2_ULL_EVENT(ocfs2_purge_refcount_trees); | ||
861 | |||
862 | DEFINE_OCFS2_ULL_EVENT(ocfs2_create_refcount_tree); | ||
863 | |||
864 | DEFINE_OCFS2_ULL_EVENT(ocfs2_create_refcount_tree_blkno); | ||
865 | |||
866 | DEFINE_OCFS2_ULL_INT_INT_INT_EVENT(ocfs2_change_refcount_rec); | ||
867 | |||
868 | DEFINE_OCFS2_ULL_UINT_EVENT(ocfs2_expand_inline_ref_root); | ||
869 | |||
870 | DEFINE_OCFS2_ULL_UINT_UINT_EVENT(ocfs2_divide_leaf_refcount_block); | ||
871 | |||
872 | DEFINE_OCFS2_ULL_UINT_EVENT(ocfs2_new_leaf_refcount_block); | ||
873 | |||
874 | DECLARE_EVENT_CLASS(ocfs2__refcount_tree_ops, | ||
875 | TP_PROTO(unsigned long long blkno, int index, | ||
876 | unsigned long long cpos, | ||
877 | unsigned int clusters, unsigned int refcount), | ||
878 | TP_ARGS(blkno, index, cpos, clusters, refcount), | ||
879 | TP_STRUCT__entry( | ||
880 | __field(unsigned long long, blkno) | ||
881 | __field(int, index) | ||
882 | __field(unsigned long long, cpos) | ||
883 | __field(unsigned int, clusters) | ||
884 | __field(unsigned int, refcount) | ||
885 | ), | ||
886 | TP_fast_assign( | ||
887 | __entry->blkno = blkno; | ||
888 | __entry->index = index; | ||
889 | __entry->cpos = cpos; | ||
890 | __entry->clusters = clusters; | ||
891 | __entry->refcount = refcount; | ||
892 | ), | ||
893 | TP_printk("%llu %d %llu %u %u", __entry->blkno, __entry->index, | ||
894 | __entry->cpos, __entry->clusters, __entry->refcount) | ||
895 | ); | ||
896 | |||
897 | #define DEFINE_OCFS2_REFCOUNT_TREE_OPS_EVENT(name) \ | ||
898 | DEFINE_EVENT(ocfs2__refcount_tree_ops, name, \ | ||
899 | TP_PROTO(unsigned long long blkno, int index, \ | ||
900 | unsigned long long cpos, \ | ||
901 | unsigned int count, unsigned int refcount), \ | ||
902 | TP_ARGS(blkno, index, cpos, count, refcount)) | ||
903 | |||
904 | DEFINE_OCFS2_REFCOUNT_TREE_OPS_EVENT(ocfs2_insert_refcount_rec); | ||
905 | |||
906 | TRACE_EVENT(ocfs2_split_refcount_rec, | ||
907 | TP_PROTO(unsigned long long cpos, | ||
908 | unsigned int clusters, unsigned int refcount, | ||
909 | unsigned long long split_cpos, | ||
910 | unsigned int split_clusters, unsigned int split_refcount), | ||
911 | TP_ARGS(cpos, clusters, refcount, | ||
912 | split_cpos, split_clusters, split_refcount), | ||
913 | TP_STRUCT__entry( | ||
914 | __field(unsigned long long, cpos) | ||
915 | __field(unsigned int, clusters) | ||
916 | __field(unsigned int, refcount) | ||
917 | __field(unsigned long long, split_cpos) | ||
918 | __field(unsigned int, split_clusters) | ||
919 | __field(unsigned int, split_refcount) | ||
920 | ), | ||
921 | TP_fast_assign( | ||
922 | __entry->cpos = cpos; | ||
923 | __entry->clusters = clusters; | ||
924 | __entry->refcount = refcount; | ||
925 | __entry->split_cpos = split_cpos; | ||
926 | __entry->split_clusters = split_clusters; | ||
927 | __entry->split_refcount = split_refcount; | ||
928 | ), | ||
929 | TP_printk("%llu %u %u %llu %u %u", | ||
930 | __entry->cpos, __entry->clusters, __entry->refcount, | ||
931 | __entry->split_cpos, __entry->split_clusters, | ||
932 | __entry->split_refcount) | ||
933 | ); | ||
934 | |||
935 | DEFINE_OCFS2_REFCOUNT_TREE_OPS_EVENT(ocfs2_split_refcount_rec_insert); | ||
936 | |||
937 | DEFINE_OCFS2_ULL_ULL_UINT_EVENT(ocfs2_increase_refcount_begin); | ||
938 | |||
939 | DEFINE_OCFS2_ULL_UINT_UINT_EVENT(ocfs2_increase_refcount_change); | ||
940 | |||
941 | DEFINE_OCFS2_ULL_UINT_EVENT(ocfs2_increase_refcount_insert); | ||
942 | |||
943 | DEFINE_OCFS2_ULL_UINT_UINT_EVENT(ocfs2_increase_refcount_split); | ||
944 | |||
945 | DEFINE_OCFS2_ULL_ULL_UINT_EVENT(ocfs2_remove_refcount_extent); | ||
946 | |||
947 | DEFINE_OCFS2_ULL_EVENT(ocfs2_restore_refcount_block); | ||
948 | |||
949 | DEFINE_OCFS2_ULL_ULL_UINT_EVENT(ocfs2_decrease_refcount_rec); | ||
950 | |||
951 | TRACE_EVENT(ocfs2_decrease_refcount, | ||
952 | TP_PROTO(unsigned long long owner, | ||
953 | unsigned long long cpos, | ||
954 | unsigned int len, int delete), | ||
955 | TP_ARGS(owner, cpos, len, delete), | ||
956 | TP_STRUCT__entry( | ||
957 | __field(unsigned long long, owner) | ||
958 | __field(unsigned long long, cpos) | ||
959 | __field(unsigned int, len) | ||
960 | __field(int, delete) | ||
961 | ), | ||
962 | TP_fast_assign( | ||
963 | __entry->owner = owner; | ||
964 | __entry->cpos = cpos; | ||
965 | __entry->len = len; | ||
966 | __entry->delete = delete; | ||
967 | ), | ||
968 | TP_printk("%llu %llu %u %d", | ||
969 | __entry->owner, __entry->cpos, __entry->len, __entry->delete) | ||
970 | ); | ||
971 | |||
972 | DEFINE_OCFS2_ULL_UINT_UINT_UINT_EVENT(ocfs2_mark_extent_refcounted); | ||
973 | |||
974 | DEFINE_OCFS2_ULL_UINT_UINT_UINT_EVENT(ocfs2_calc_refcount_meta_credits); | ||
975 | |||
976 | TRACE_EVENT(ocfs2_calc_refcount_meta_credits_iterate, | ||
977 | TP_PROTO(int recs_add, unsigned long long cpos, | ||
978 | unsigned int clusters, unsigned long long r_cpos, | ||
979 | unsigned int r_clusters, unsigned int refcount, int index), | ||
980 | TP_ARGS(recs_add, cpos, clusters, r_cpos, r_clusters, refcount, index), | ||
981 | TP_STRUCT__entry( | ||
982 | __field(int, recs_add) | ||
983 | __field(unsigned long long, cpos) | ||
984 | __field(unsigned int, clusters) | ||
985 | __field(unsigned long long, r_cpos) | ||
986 | __field(unsigned int, r_clusters) | ||
987 | __field(unsigned int, refcount) | ||
988 | __field(int, index) | ||
989 | ), | ||
990 | TP_fast_assign( | ||
991 | __entry->recs_add = recs_add; | ||
992 | __entry->cpos = cpos; | ||
993 | __entry->clusters = clusters; | ||
994 | __entry->r_cpos = r_cpos; | ||
995 | __entry->r_clusters = r_clusters; | ||
996 | __entry->refcount = refcount; | ||
997 | __entry->index = index; | ||
998 | ), | ||
999 | TP_printk("%d %llu %u %llu %u %u %d", | ||
1000 | __entry->recs_add, __entry->cpos, __entry->clusters, | ||
1001 | __entry->r_cpos, __entry->r_clusters, | ||
1002 | __entry->refcount, __entry->index) | ||
1003 | ); | ||
1004 | |||
1005 | DEFINE_OCFS2_INT_INT_EVENT(ocfs2_add_refcount_flag); | ||
1006 | |||
1007 | DEFINE_OCFS2_INT_INT_EVENT(ocfs2_prepare_refcount_change_for_del); | ||
1008 | |||
1009 | DEFINE_OCFS2_INT_INT_EVENT(ocfs2_lock_refcount_allocators); | ||
1010 | |||
1011 | DEFINE_OCFS2_ULL_UINT_UINT_UINT_EVENT(ocfs2_duplicate_clusters_by_page); | ||
1012 | |||
1013 | DEFINE_OCFS2_ULL_UINT_UINT_UINT_EVENT(ocfs2_duplicate_clusters_by_jbd); | ||
1014 | |||
1015 | TRACE_EVENT(ocfs2_clear_ext_refcount, | ||
1016 | TP_PROTO(unsigned long long ino, unsigned int cpos, | ||
1017 | unsigned int len, unsigned int p_cluster, | ||
1018 | unsigned int ext_flags), | ||
1019 | TP_ARGS(ino, cpos, len, p_cluster, ext_flags), | ||
1020 | TP_STRUCT__entry( | ||
1021 | __field(unsigned long long, ino) | ||
1022 | __field(unsigned int, cpos) | ||
1023 | __field(unsigned int, len) | ||
1024 | __field(unsigned int, p_cluster) | ||
1025 | __field(unsigned int, ext_flags) | ||
1026 | ), | ||
1027 | TP_fast_assign( | ||
1028 | __entry->ino = ino; | ||
1029 | __entry->cpos = cpos; | ||
1030 | __entry->len = len; | ||
1031 | __entry->p_cluster = p_cluster; | ||
1032 | __entry->ext_flags = ext_flags; | ||
1033 | ), | ||
1034 | TP_printk("%llu %u %u %u %u", | ||
1035 | __entry->ino, __entry->cpos, __entry->len, | ||
1036 | __entry->p_cluster, __entry->ext_flags) | ||
1037 | ); | ||
1038 | |||
1039 | TRACE_EVENT(ocfs2_replace_clusters, | ||
1040 | TP_PROTO(unsigned long long ino, unsigned int cpos, | ||
1041 | unsigned int old, unsigned int new, unsigned int len, | ||
1042 | unsigned int ext_flags), | ||
1043 | TP_ARGS(ino, cpos, old, new, len, ext_flags), | ||
1044 | TP_STRUCT__entry( | ||
1045 | __field(unsigned long long, ino) | ||
1046 | __field(unsigned int, cpos) | ||
1047 | __field(unsigned int, old) | ||
1048 | __field(unsigned int, new) | ||
1049 | __field(unsigned int, len) | ||
1050 | __field(unsigned int, ext_flags) | ||
1051 | ), | ||
1052 | TP_fast_assign( | ||
1053 | __entry->ino = ino; | ||
1054 | __entry->cpos = cpos; | ||
1055 | __entry->old = old; | ||
1056 | __entry->new = new; | ||
1057 | __entry->len = len; | ||
1058 | __entry->ext_flags = ext_flags; | ||
1059 | ), | ||
1060 | TP_printk("%llu %u %u %u %u %u", | ||
1061 | __entry->ino, __entry->cpos, __entry->old, __entry->new, | ||
1062 | __entry->len, __entry->ext_flags) | ||
1063 | ); | ||
1064 | |||
1065 | DEFINE_OCFS2_ULL_UINT_UINT_UINT_EVENT(ocfs2_make_clusters_writable); | ||
1066 | |||
1067 | TRACE_EVENT(ocfs2_refcount_cow_hunk, | ||
1068 | TP_PROTO(unsigned long long ino, unsigned int cpos, | ||
1069 | unsigned int write_len, unsigned int max_cpos, | ||
1070 | unsigned int cow_start, unsigned int cow_len), | ||
1071 | TP_ARGS(ino, cpos, write_len, max_cpos, cow_start, cow_len), | ||
1072 | TP_STRUCT__entry( | ||
1073 | __field(unsigned long long, ino) | ||
1074 | __field(unsigned int, cpos) | ||
1075 | __field(unsigned int, write_len) | ||
1076 | __field(unsigned int, max_cpos) | ||
1077 | __field(unsigned int, cow_start) | ||
1078 | __field(unsigned int, cow_len) | ||
1079 | ), | ||
1080 | TP_fast_assign( | ||
1081 | __entry->ino = ino; | ||
1082 | __entry->cpos = cpos; | ||
1083 | __entry->write_len = write_len; | ||
1084 | __entry->max_cpos = max_cpos; | ||
1085 | __entry->cow_start = cow_start; | ||
1086 | __entry->cow_len = cow_len; | ||
1087 | ), | ||
1088 | TP_printk("%llu %u %u %u %u %u", | ||
1089 | __entry->ino, __entry->cpos, __entry->write_len, | ||
1090 | __entry->max_cpos, __entry->cow_start, __entry->cow_len) | ||
1091 | ); | ||
1092 | |||
1093 | /* End of trace events for fs/ocfs2/refcounttree.c. */ | ||
1094 | |||
1095 | /* Trace events for fs/ocfs2/aops.c. */ | ||
1096 | |||
1097 | DECLARE_EVENT_CLASS(ocfs2__get_block, | ||
1098 | TP_PROTO(unsigned long long ino, unsigned long long iblock, | ||
1099 | void *bh_result, int create), | ||
1100 | TP_ARGS(ino, iblock, bh_result, create), | ||
1101 | TP_STRUCT__entry( | ||
1102 | __field(unsigned long long, ino) | ||
1103 | __field(unsigned long long, iblock) | ||
1104 | __field(void *, bh_result) | ||
1105 | __field(int, create) | ||
1106 | ), | ||
1107 | TP_fast_assign( | ||
1108 | __entry->ino = ino; | ||
1109 | __entry->iblock = iblock; | ||
1110 | __entry->bh_result = bh_result; | ||
1111 | __entry->create = create; | ||
1112 | ), | ||
1113 | TP_printk("%llu %llu %p %d", | ||
1114 | __entry->ino, __entry->iblock, | ||
1115 | __entry->bh_result, __entry->create) | ||
1116 | ); | ||
1117 | |||
1118 | #define DEFINE_OCFS2_GET_BLOCK_EVENT(name) \ | ||
1119 | DEFINE_EVENT(ocfs2__get_block, name, \ | ||
1120 | TP_PROTO(unsigned long long ino, unsigned long long iblock, \ | ||
1121 | void *bh_result, int create), \ | ||
1122 | TP_ARGS(ino, iblock, bh_result, create)) | ||
1123 | |||
1124 | DEFINE_OCFS2_GET_BLOCK_EVENT(ocfs2_symlink_get_block); | ||
1125 | |||
1126 | DEFINE_OCFS2_GET_BLOCK_EVENT(ocfs2_get_block); | ||
1127 | |||
1128 | DEFINE_OCFS2_ULL_ULL_EVENT(ocfs2_get_block_end); | ||
1129 | |||
1130 | DEFINE_OCFS2_ULL_ULL_EVENT(ocfs2_readpage); | ||
1131 | |||
1132 | DEFINE_OCFS2_ULL_ULL_EVENT(ocfs2_writepage); | ||
1133 | |||
1134 | DEFINE_OCFS2_ULL_ULL_EVENT(ocfs2_bmap); | ||
1135 | |||
1136 | TRACE_EVENT(ocfs2_try_to_write_inline_data, | ||
1137 | TP_PROTO(unsigned long long ino, unsigned int len, | ||
1138 | unsigned long long pos, unsigned int flags), | ||
1139 | TP_ARGS(ino, len, pos, flags), | ||
1140 | TP_STRUCT__entry( | ||
1141 | __field(unsigned long long, ino) | ||
1142 | __field(unsigned int, len) | ||
1143 | __field(unsigned long long, pos) | ||
1144 | __field(unsigned int, flags) | ||
1145 | ), | ||
1146 | TP_fast_assign( | ||
1147 | __entry->ino = ino; | ||
1148 | __entry->len = len; | ||
1149 | __entry->pos = pos; | ||
1150 | __entry->flags = flags; | ||
1151 | ), | ||
1152 | TP_printk("%llu %u %llu 0x%x", | ||
1153 | __entry->ino, __entry->len, __entry->pos, __entry->flags) | ||
1154 | ); | ||
1155 | |||
1156 | TRACE_EVENT(ocfs2_write_begin_nolock, | ||
1157 | TP_PROTO(unsigned long long ino, | ||
1158 | long long i_size, unsigned int i_clusters, | ||
1159 | unsigned long long pos, unsigned int len, | ||
1160 | unsigned int flags, void *page, | ||
1161 | unsigned int clusters, unsigned int extents_to_split), | ||
1162 | TP_ARGS(ino, i_size, i_clusters, pos, len, flags, | ||
1163 | page, clusters, extents_to_split), | ||
1164 | TP_STRUCT__entry( | ||
1165 | __field(unsigned long long, ino) | ||
1166 | __field(long long, i_size) | ||
1167 | __field(unsigned int, i_clusters) | ||
1168 | __field(unsigned long long, pos) | ||
1169 | __field(unsigned int, len) | ||
1170 | __field(unsigned int, flags) | ||
1171 | __field(void *, page) | ||
1172 | __field(unsigned int, clusters) | ||
1173 | __field(unsigned int, extents_to_split) | ||
1174 | ), | ||
1175 | TP_fast_assign( | ||
1176 | __entry->ino = ino; | ||
1177 | __entry->i_size = i_size; | ||
1178 | __entry->i_clusters = i_clusters; | ||
1179 | __entry->pos = pos; | ||
1180 | __entry->len = len; | ||
1181 | __entry->flags = flags; | ||
1182 | __entry->page = page; | ||
1183 | __entry->clusters = clusters; | ||
1184 | __entry->extents_to_split = extents_to_split; | ||
1185 | ), | ||
1186 | TP_printk("%llu %lld %u %llu %u %u %p %u %u", | ||
1187 | __entry->ino, __entry->i_size, __entry->i_clusters, | ||
1188 | __entry->pos, __entry->len, | ||
1189 | __entry->flags, __entry->page, __entry->clusters, | ||
1190 | __entry->extents_to_split) | ||
1191 | ); | ||
1192 | |||
1193 | TRACE_EVENT(ocfs2_write_end_inline, | ||
1194 | TP_PROTO(unsigned long long ino, | ||
1195 | unsigned long long pos, unsigned int copied, | ||
1196 | unsigned int id_count, unsigned int features), | ||
1197 | TP_ARGS(ino, pos, copied, id_count, features), | ||
1198 | TP_STRUCT__entry( | ||
1199 | __field(unsigned long long, ino) | ||
1200 | __field(unsigned long long, pos) | ||
1201 | __field(unsigned int, copied) | ||
1202 | __field(unsigned int, id_count) | ||
1203 | __field(unsigned int, features) | ||
1204 | ), | ||
1205 | TP_fast_assign( | ||
1206 | __entry->ino = ino; | ||
1207 | __entry->pos = pos; | ||
1208 | __entry->copied = copied; | ||
1209 | __entry->id_count = id_count; | ||
1210 | __entry->features = features; | ||
1211 | ), | ||
1212 | TP_printk("%llu %llu %u %u %u", | ||
1213 | __entry->ino, __entry->pos, __entry->copied, | ||
1214 | __entry->id_count, __entry->features) | ||
1215 | ); | ||
1216 | |||
1217 | /* End of trace events for fs/ocfs2/aops.c. */ | ||
1218 | |||
1219 | /* Trace events for fs/ocfs2/mmap.c. */ | ||
1220 | |||
1221 | TRACE_EVENT(ocfs2_fault, | ||
1222 | TP_PROTO(unsigned long long ino, | ||
1223 | void *area, void *page, unsigned long pgoff), | ||
1224 | TP_ARGS(ino, area, page, pgoff), | ||
1225 | TP_STRUCT__entry( | ||
1226 | __field(unsigned long long, ino) | ||
1227 | __field(void *, area) | ||
1228 | __field(void *, page) | ||
1229 | __field(unsigned long, pgoff) | ||
1230 | ), | ||
1231 | TP_fast_assign( | ||
1232 | __entry->ino = ino; | ||
1233 | __entry->area = area; | ||
1234 | __entry->page = page; | ||
1235 | __entry->pgoff = pgoff; | ||
1236 | ), | ||
1237 | TP_printk("%llu %p %p %lu", | ||
1238 | __entry->ino, __entry->area, __entry->page, __entry->pgoff) | ||
1239 | ); | ||
1240 | |||
1241 | /* End of trace events for fs/ocfs2/mmap.c. */ | ||
1242 | |||
1243 | /* Trace events for fs/ocfs2/file.c. */ | ||
1244 | |||
1245 | DECLARE_EVENT_CLASS(ocfs2__file_ops, | ||
1246 | TP_PROTO(void *inode, void *file, void *dentry, | ||
1247 | unsigned long long ino, | ||
1248 | unsigned int d_len, const unsigned char *d_name, | ||
1249 | unsigned long long para), | ||
1250 | TP_ARGS(inode, file, dentry, ino, d_len, d_name, para), | ||
1251 | TP_STRUCT__entry( | ||
1252 | __field(void *, inode) | ||
1253 | __field(void *, file) | ||
1254 | __field(void *, dentry) | ||
1255 | __field(unsigned long long, ino) | ||
1256 | __field(unsigned int, d_len) | ||
1257 | __string(d_name, d_name) | ||
1258 | __field(unsigned long long, para) | ||
1259 | ), | ||
1260 | TP_fast_assign( | ||
1261 | __entry->inode = inode; | ||
1262 | __entry->file = file; | ||
1263 | __entry->dentry = dentry; | ||
1264 | __entry->ino = ino; | ||
1265 | __entry->d_len = d_len; | ||
1266 | __assign_str(d_name, d_name); | ||
1267 | __entry->para = para; | ||
1268 | ), | ||
1269 | TP_printk("%p %p %p %llu %llu %.*s", __entry->inode, __entry->file, | ||
1270 | __entry->dentry, __entry->ino, __entry->para, | ||
1271 | __entry->d_len, __get_str(d_name)) | ||
1272 | ); | ||
1273 | |||
1274 | #define DEFINE_OCFS2_FILE_OPS(name) \ | ||
1275 | DEFINE_EVENT(ocfs2__file_ops, name, \ | ||
1276 | TP_PROTO(void *inode, void *file, void *dentry, \ | ||
1277 | unsigned long long ino, \ | ||
1278 | unsigned int d_len, const unsigned char *d_name, \ | ||
1279 | unsigned long long mode), \ | ||
1280 | TP_ARGS(inode, file, dentry, ino, d_len, d_name, mode)) | ||
1281 | |||
1282 | DEFINE_OCFS2_FILE_OPS(ocfs2_file_open); | ||
1283 | |||
1284 | DEFINE_OCFS2_FILE_OPS(ocfs2_file_release); | ||
1285 | |||
1286 | DEFINE_OCFS2_FILE_OPS(ocfs2_sync_file); | ||
1287 | |||
1288 | DEFINE_OCFS2_FILE_OPS(ocfs2_file_aio_write); | ||
1289 | |||
1290 | DEFINE_OCFS2_FILE_OPS(ocfs2_file_splice_write); | ||
1291 | |||
1292 | DEFINE_OCFS2_FILE_OPS(ocfs2_file_splice_read); | ||
1293 | |||
1294 | DEFINE_OCFS2_FILE_OPS(ocfs2_file_aio_read); | ||
1295 | |||
1296 | DEFINE_OCFS2_ULL_ULL_ULL_EVENT(ocfs2_truncate_file); | ||
1297 | |||
1298 | DEFINE_OCFS2_ULL_ULL_EVENT(ocfs2_truncate_file_error); | ||
1299 | |||
1300 | TRACE_EVENT(ocfs2_extend_allocation, | ||
1301 | TP_PROTO(unsigned long long ip_blkno, unsigned long long size, | ||
1302 | unsigned int clusters, unsigned int clusters_to_add, | ||
1303 | int why, int restart_func), | ||
1304 | TP_ARGS(ip_blkno, size, clusters, clusters_to_add, why, restart_func), | ||
1305 | TP_STRUCT__entry( | ||
1306 | __field(unsigned long long, ip_blkno) | ||
1307 | __field(unsigned long long, size) | ||
1308 | __field(unsigned int, clusters) | ||
1309 | __field(unsigned int, clusters_to_add) | ||
1310 | __field(int, why) | ||
1311 | __field(int, restart_func) | ||
1312 | ), | ||
1313 | TP_fast_assign( | ||
1314 | __entry->ip_blkno = ip_blkno; | ||
1315 | __entry->size = size; | ||
1316 | __entry->clusters = clusters; | ||
1317 | __entry->clusters_to_add = clusters_to_add; | ||
1318 | __entry->why = why; | ||
1319 | __entry->restart_func = restart_func; | ||
1320 | ), | ||
1321 | TP_printk("%llu %llu %u %u %d %d", | ||
1322 | __entry->ip_blkno, __entry->size, __entry->clusters, | ||
1323 | __entry->clusters_to_add, __entry->why, __entry->restart_func) | ||
1324 | ); | ||
1325 | |||
1326 | TRACE_EVENT(ocfs2_extend_allocation_end, | ||
1327 | TP_PROTO(unsigned long long ino, | ||
1328 | unsigned int di_clusters, unsigned long long di_size, | ||
1329 | unsigned int ip_clusters, unsigned long long i_size), | ||
1330 | TP_ARGS(ino, di_clusters, di_size, ip_clusters, i_size), | ||
1331 | TP_STRUCT__entry( | ||
1332 | __field(unsigned long long, ino) | ||
1333 | __field(unsigned int, di_clusters) | ||
1334 | __field(unsigned long long, di_size) | ||
1335 | __field(unsigned int, ip_clusters) | ||
1336 | __field(unsigned long long, i_size) | ||
1337 | ), | ||
1338 | TP_fast_assign( | ||
1339 | __entry->ino = ino; | ||
1340 | __entry->di_clusters = di_clusters; | ||
1341 | __entry->di_size = di_size; | ||
1342 | __entry->ip_clusters = ip_clusters; | ||
1343 | __entry->i_size = i_size; | ||
1344 | ), | ||
1345 | TP_printk("%llu %u %llu %u %llu", __entry->ino, __entry->di_clusters, | ||
1346 | __entry->di_size, __entry->ip_clusters, __entry->i_size) | ||
1347 | ); | ||
1348 | |||
1349 | TRACE_EVENT(ocfs2_write_zero_page, | ||
1350 | TP_PROTO(unsigned long long ino, | ||
1351 | unsigned long long abs_from, unsigned long long abs_to, | ||
1352 | unsigned long index, unsigned int zero_from, | ||
1353 | unsigned int zero_to), | ||
1354 | TP_ARGS(ino, abs_from, abs_to, index, zero_from, zero_to), | ||
1355 | TP_STRUCT__entry( | ||
1356 | __field(unsigned long long, ino) | ||
1357 | __field(unsigned long long, abs_from) | ||
1358 | __field(unsigned long long, abs_to) | ||
1359 | __field(unsigned long, index) | ||
1360 | __field(unsigned int, zero_from) | ||
1361 | __field(unsigned int, zero_to) | ||
1362 | ), | ||
1363 | TP_fast_assign( | ||
1364 | __entry->ino = ino; | ||
1365 | __entry->abs_from = abs_from; | ||
1366 | __entry->abs_to = abs_to; | ||
1367 | __entry->index = index; | ||
1368 | __entry->zero_from = zero_from; | ||
1369 | __entry->zero_to = zero_to; | ||
1370 | ), | ||
1371 | TP_printk("%llu %llu %llu %lu %u %u", __entry->ino, | ||
1372 | __entry->abs_from, __entry->abs_to, | ||
1373 | __entry->index, __entry->zero_from, __entry->zero_to) | ||
1374 | ); | ||
1375 | |||
1376 | DEFINE_OCFS2_ULL_ULL_ULL_EVENT(ocfs2_zero_extend_range); | ||
1377 | |||
1378 | DEFINE_OCFS2_ULL_ULL_ULL_EVENT(ocfs2_zero_extend); | ||
1379 | |||
1380 | TRACE_EVENT(ocfs2_setattr, | ||
1381 | TP_PROTO(void *inode, void *dentry, | ||
1382 | unsigned long long ino, | ||
1383 | unsigned int d_len, const unsigned char *d_name, | ||
1384 | unsigned int ia_valid, unsigned int ia_mode, | ||
1385 | unsigned int ia_uid, unsigned int ia_gid), | ||
1386 | TP_ARGS(inode, dentry, ino, d_len, d_name, | ||
1387 | ia_valid, ia_mode, ia_uid, ia_gid), | ||
1388 | TP_STRUCT__entry( | ||
1389 | __field(void *, inode) | ||
1390 | __field(void *, dentry) | ||
1391 | __field(unsigned long long, ino) | ||
1392 | __field(unsigned int, d_len) | ||
1393 | __string(d_name, d_name) | ||
1394 | __field(unsigned int, ia_valid) | ||
1395 | __field(unsigned int, ia_mode) | ||
1396 | __field(unsigned int, ia_uid) | ||
1397 | __field(unsigned int, ia_gid) | ||
1398 | ), | ||
1399 | TP_fast_assign( | ||
1400 | __entry->inode = inode; | ||
1401 | __entry->dentry = dentry; | ||
1402 | __entry->ino = ino; | ||
1403 | __entry->d_len = d_len; | ||
1404 | __assign_str(d_name, d_name); | ||
1405 | __entry->ia_valid = ia_valid; | ||
1406 | __entry->ia_mode = ia_mode; | ||
1407 | __entry->ia_uid = ia_uid; | ||
1408 | __entry->ia_gid = ia_gid; | ||
1409 | ), | ||
1410 | TP_printk("%p %p %llu %.*s %u %u %u %u", __entry->inode, | ||
1411 | __entry->dentry, __entry->ino, __entry->d_len, | ||
1412 | __get_str(d_name), __entry->ia_valid, __entry->ia_mode, | ||
1413 | __entry->ia_uid, __entry->ia_gid) | ||
1414 | ); | ||
1415 | |||
1416 | DEFINE_OCFS2_ULL_UINT_EVENT(ocfs2_write_remove_suid); | ||
1417 | |||
1418 | DEFINE_OCFS2_ULL_ULL_ULL_EVENT(ocfs2_zero_partial_clusters); | ||
1419 | |||
1420 | DEFINE_OCFS2_ULL_ULL_EVENT(ocfs2_zero_partial_clusters_range1); | ||
1421 | |||
1422 | DEFINE_OCFS2_ULL_ULL_EVENT(ocfs2_zero_partial_clusters_range2); | ||
1423 | |||
1424 | DEFINE_OCFS2_ULL_ULL_ULL_EVENT(ocfs2_remove_inode_range); | ||
1425 | |||
1426 | TRACE_EVENT(ocfs2_prepare_inode_for_write, | ||
1427 | TP_PROTO(unsigned long long ino, unsigned long long saved_pos, | ||
1428 | int appending, unsigned long count, | ||
1429 | int *direct_io, int *has_refcount), | ||
1430 | TP_ARGS(ino, saved_pos, appending, count, direct_io, has_refcount), | ||
1431 | TP_STRUCT__entry( | ||
1432 | __field(unsigned long long, ino) | ||
1433 | __field(unsigned long long, saved_pos) | ||
1434 | __field(int, appending) | ||
1435 | __field(unsigned long, count) | ||
1436 | __field(int, direct_io) | ||
1437 | __field(int, has_refcount) | ||
1438 | ), | ||
1439 | TP_fast_assign( | ||
1440 | __entry->ino = ino; | ||
1441 | __entry->saved_pos = saved_pos; | ||
1442 | __entry->appending = appending; | ||
1443 | __entry->count = count; | ||
1444 | __entry->direct_io = direct_io ? *direct_io : -1; | ||
1445 | __entry->has_refcount = has_refcount ? *has_refcount : -1; | ||
1446 | ), | ||
1447 | TP_printk("%llu %llu %d %lu %d %d", __entry->ino, | ||
1448 | __entry->saved_pos, __entry->appending, __entry->count, | ||
1449 | __entry->direct_io, __entry->has_refcount) | ||
1450 | ); | ||
1451 | |||
1452 | DEFINE_OCFS2_INT_EVENT(generic_file_aio_read_ret); | ||
1453 | |||
1454 | /* End of trace events for fs/ocfs2/file.c. */ | ||
1455 | |||
1456 | /* Trace events for fs/ocfs2/inode.c. */ | ||
1457 | |||
1458 | TRACE_EVENT(ocfs2_iget_begin, | ||
1459 | TP_PROTO(unsigned long long ino, unsigned int flags, int sysfile_type), | ||
1460 | TP_ARGS(ino, flags, sysfile_type), | ||
1461 | TP_STRUCT__entry( | ||
1462 | __field(unsigned long long, ino) | ||
1463 | __field(unsigned int, flags) | ||
1464 | __field(int, sysfile_type) | ||
1465 | ), | ||
1466 | TP_fast_assign( | ||
1467 | __entry->ino = ino; | ||
1468 | __entry->flags = flags; | ||
1469 | __entry->sysfile_type = sysfile_type; | ||
1470 | ), | ||
1471 | TP_printk("%llu %u %d", __entry->ino, | ||
1472 | __entry->flags, __entry->sysfile_type) | ||
1473 | ); | ||
1474 | |||
1475 | DEFINE_OCFS2_ULL_EVENT(ocfs2_iget5_locked); | ||
1476 | |||
1477 | TRACE_EVENT(ocfs2_iget_end, | ||
1478 | TP_PROTO(void *inode, unsigned long long ino), | ||
1479 | TP_ARGS(inode, ino), | ||
1480 | TP_STRUCT__entry( | ||
1481 | __field(void *, inode) | ||
1482 | __field(unsigned long long, ino) | ||
1483 | ), | ||
1484 | TP_fast_assign( | ||
1485 | __entry->inode = inode; | ||
1486 | __entry->ino = ino; | ||
1487 | ), | ||
1488 | TP_printk("%p %llu", __entry->inode, __entry->ino) | ||
1489 | ); | ||
1490 | |||
1491 | TRACE_EVENT(ocfs2_find_actor, | ||
1492 | TP_PROTO(void *inode, unsigned long long ino, | ||
1493 | void *args, unsigned long long fi_blkno), | ||
1494 | TP_ARGS(inode, ino, args, fi_blkno), | ||
1495 | TP_STRUCT__entry( | ||
1496 | __field(void *, inode) | ||
1497 | __field(unsigned long long, ino) | ||
1498 | __field(void *, args) | ||
1499 | __field(unsigned long long, fi_blkno) | ||
1500 | ), | ||
1501 | TP_fast_assign( | ||
1502 | __entry->inode = inode; | ||
1503 | __entry->ino = ino; | ||
1504 | __entry->args = args; | ||
1505 | __entry->fi_blkno = fi_blkno; | ||
1506 | ), | ||
1507 | TP_printk("%p %llu %p %llu", __entry->inode, __entry->ino, | ||
1508 | __entry->args, __entry->fi_blkno) | ||
1509 | ); | ||
1510 | |||
1511 | DEFINE_OCFS2_ULL_UINT_EVENT(ocfs2_populate_inode); | ||
1512 | |||
1513 | DEFINE_OCFS2_ULL_INT_EVENT(ocfs2_read_locked_inode); | ||
1514 | |||
1515 | DEFINE_OCFS2_INT_INT_EVENT(ocfs2_check_orphan_recovery_state); | ||
1516 | |||
1517 | DEFINE_OCFS2_ULL_EVENT(ocfs2_validate_inode_block); | ||
1518 | |||
1519 | TRACE_EVENT(ocfs2_inode_is_valid_to_delete, | ||
1520 | TP_PROTO(void *task, void *dc_task, unsigned long long ino, | ||
1521 | unsigned int flags), | ||
1522 | TP_ARGS(task, dc_task, ino, flags), | ||
1523 | TP_STRUCT__entry( | ||
1524 | __field(void *, task) | ||
1525 | __field(void *, dc_task) | ||
1526 | __field(unsigned long long, ino) | ||
1527 | __field(unsigned int, flags) | ||
1528 | ), | ||
1529 | TP_fast_assign( | ||
1530 | __entry->task = task; | ||
1531 | __entry->dc_task = dc_task; | ||
1532 | __entry->ino = ino; | ||
1533 | __entry->flags = flags; | ||
1534 | ), | ||
1535 | TP_printk("%p %p %llu %u", __entry->task, __entry->dc_task, | ||
1536 | __entry->ino, __entry->flags) | ||
1537 | ); | ||
1538 | |||
1539 | DEFINE_OCFS2_ULL_UINT_EVENT(ocfs2_query_inode_wipe_begin); | ||
1540 | |||
1541 | DEFINE_OCFS2_UINT_EVENT(ocfs2_query_inode_wipe_succ); | ||
1542 | |||
1543 | DEFINE_OCFS2_INT_INT_EVENT(ocfs2_query_inode_wipe_end); | ||
1544 | |||
1545 | DEFINE_OCFS2_ULL_INT_EVENT(ocfs2_cleanup_delete_inode); | ||
1546 | |||
1547 | DEFINE_OCFS2_ULL_ULL_UINT_EVENT(ocfs2_delete_inode); | ||
1548 | |||
1549 | DEFINE_OCFS2_ULL_UINT_EVENT(ocfs2_clear_inode); | ||
1550 | |||
1551 | DEFINE_OCFS2_ULL_UINT_UINT_EVENT(ocfs2_drop_inode); | ||
1552 | |||
1553 | TRACE_EVENT(ocfs2_inode_revalidate, | ||
1554 | TP_PROTO(void *inode, unsigned long long ino, | ||
1555 | unsigned int flags), | ||
1556 | TP_ARGS(inode, ino, flags), | ||
1557 | TP_STRUCT__entry( | ||
1558 | __field(void *, inode) | ||
1559 | __field(unsigned long long, ino) | ||
1560 | __field(unsigned int, flags) | ||
1561 | ), | ||
1562 | TP_fast_assign( | ||
1563 | __entry->inode = inode; | ||
1564 | __entry->ino = ino; | ||
1565 | __entry->flags = flags; | ||
1566 | ), | ||
1567 | TP_printk("%p %llu %u", __entry->inode, __entry->ino, __entry->flags) | ||
1568 | ); | ||
1569 | |||
1570 | DEFINE_OCFS2_ULL_EVENT(ocfs2_mark_inode_dirty); | ||
1571 | |||
1572 | /* End of trace events for fs/ocfs2/inode.c. */ | ||
1573 | |||
1574 | /* Trace events for fs/ocfs2/extent_map.c. */ | ||
1575 | |||
1576 | TRACE_EVENT(ocfs2_read_virt_blocks, | ||
1577 | TP_PROTO(void *inode, unsigned long long vblock, int nr, | ||
1578 | void *bhs, unsigned int flags, void *validate), | ||
1579 | TP_ARGS(inode, vblock, nr, bhs, flags, validate), | ||
1580 | TP_STRUCT__entry( | ||
1581 | __field(void *, inode) | ||
1582 | __field(unsigned long long, vblock) | ||
1583 | __field(int, nr) | ||
1584 | __field(void *, bhs) | ||
1585 | __field(unsigned int, flags) | ||
1586 | __field(void *, validate) | ||
1587 | ), | ||
1588 | TP_fast_assign( | ||
1589 | __entry->inode = inode; | ||
1590 | __entry->vblock = vblock; | ||
1591 | __entry->nr = nr; | ||
1592 | __entry->bhs = bhs; | ||
1593 | __entry->flags = flags; | ||
1594 | __entry->validate = validate; | ||
1595 | ), | ||
1596 | TP_printk("%p %llu %d %p %x %p", __entry->inode, __entry->vblock, | ||
1597 | __entry->nr, __entry->bhs, __entry->flags, __entry->validate) | ||
1598 | ); | ||
1599 | |||
1600 | /* End of trace events for fs/ocfs2/extent_map.c. */ | ||
1601 | |||
1602 | /* Trace events for fs/ocfs2/slot_map.c. */ | ||
1603 | |||
1604 | DEFINE_OCFS2_UINT_EVENT(ocfs2_refresh_slot_info); | ||
1605 | |||
1606 | DEFINE_OCFS2_ULL_UINT_EVENT(ocfs2_map_slot_buffers); | ||
1607 | |||
1608 | DEFINE_OCFS2_ULL_UINT_EVENT(ocfs2_map_slot_buffers_block); | ||
1609 | |||
1610 | DEFINE_OCFS2_INT_EVENT(ocfs2_find_slot); | ||
1611 | |||
1612 | /* End of trace events for fs/ocfs2/slot_map.c. */ | ||
1613 | |||
1614 | /* Trace events for fs/ocfs2/heartbeat.c. */ | ||
1615 | |||
1616 | DEFINE_OCFS2_INT_EVENT(ocfs2_do_node_down); | ||
1617 | |||
1618 | /* End of trace events for fs/ocfs2/heartbeat.c. */ | ||
1619 | |||
1620 | /* Trace events for fs/ocfs2/super.c. */ | ||
1621 | |||
1622 | TRACE_EVENT(ocfs2_remount, | ||
1623 | TP_PROTO(unsigned long s_flags, unsigned long osb_flags, int flags), | ||
1624 | TP_ARGS(s_flags, osb_flags, flags), | ||
1625 | TP_STRUCT__entry( | ||
1626 | __field(unsigned long, s_flags) | ||
1627 | __field(unsigned long, osb_flags) | ||
1628 | __field(int, flags) | ||
1629 | ), | ||
1630 | TP_fast_assign( | ||
1631 | __entry->s_flags = s_flags; | ||
1632 | __entry->osb_flags = osb_flags; | ||
1633 | __entry->flags = flags; | ||
1634 | ), | ||
1635 | TP_printk("%lu %lu %d", __entry->s_flags, | ||
1636 | __entry->osb_flags, __entry->flags) | ||
1637 | ); | ||
1638 | |||
1639 | TRACE_EVENT(ocfs2_fill_super, | ||
1640 | TP_PROTO(void *sb, void *data, int silent), | ||
1641 | TP_ARGS(sb, data, silent), | ||
1642 | TP_STRUCT__entry( | ||
1643 | __field(void *, sb) | ||
1644 | __field(void *, data) | ||
1645 | __field(int, silent) | ||
1646 | ), | ||
1647 | TP_fast_assign( | ||
1648 | __entry->sb = sb; | ||
1649 | __entry->data = data; | ||
1650 | __entry->silent = silent; | ||
1651 | ), | ||
1652 | TP_printk("%p %p %d", __entry->sb, | ||
1653 | __entry->data, __entry->silent) | ||
1654 | ); | ||
1655 | |||
1656 | TRACE_EVENT(ocfs2_parse_options, | ||
1657 | TP_PROTO(int is_remount, char *options), | ||
1658 | TP_ARGS(is_remount, options), | ||
1659 | TP_STRUCT__entry( | ||
1660 | __field(int, is_remount) | ||
1661 | __string(options, options) | ||
1662 | ), | ||
1663 | TP_fast_assign( | ||
1664 | __entry->is_remount = is_remount; | ||
1665 | __assign_str(options, options); | ||
1666 | ), | ||
1667 | TP_printk("%d %s", __entry->is_remount, __get_str(options)) | ||
1668 | ); | ||
1669 | |||
1670 | DEFINE_OCFS2_POINTER_EVENT(ocfs2_put_super); | ||
1671 | |||
1672 | TRACE_EVENT(ocfs2_statfs, | ||
1673 | TP_PROTO(void *sb, void *buf), | ||
1674 | TP_ARGS(sb, buf), | ||
1675 | TP_STRUCT__entry( | ||
1676 | __field(void *, sb) | ||
1677 | __field(void *, buf) | ||
1678 | ), | ||
1679 | TP_fast_assign( | ||
1680 | __entry->sb = sb; | ||
1681 | __entry->buf = buf; | ||
1682 | ), | ||
1683 | TP_printk("%p %p", __entry->sb, __entry->buf) | ||
1684 | ); | ||
1685 | |||
1686 | DEFINE_OCFS2_POINTER_EVENT(ocfs2_dismount_volume); | ||
1687 | |||
1688 | TRACE_EVENT(ocfs2_initialize_super, | ||
1689 | TP_PROTO(char *label, char *uuid_str, unsigned long long root_dir, | ||
1690 | unsigned long long system_dir, int cluster_bits), | ||
1691 | TP_ARGS(label, uuid_str, root_dir, system_dir, cluster_bits), | ||
1692 | TP_STRUCT__entry( | ||
1693 | __string(label, label) | ||
1694 | __string(uuid_str, uuid_str) | ||
1695 | __field(unsigned long long, root_dir) | ||
1696 | __field(unsigned long long, system_dir) | ||
1697 | __field(int, cluster_bits) | ||
1698 | ), | ||
1699 | TP_fast_assign( | ||
1700 | __assign_str(label, label); | ||
1701 | __assign_str(uuid_str, uuid_str); | ||
1702 | __entry->root_dir = root_dir; | ||
1703 | __entry->system_dir = system_dir; | ||
1704 | __entry->cluster_bits = cluster_bits; | ||
1705 | ), | ||
1706 | TP_printk("%s %s %llu %llu %d", __get_str(label), __get_str(uuid_str), | ||
1707 | __entry->root_dir, __entry->system_dir, __entry->cluster_bits) | ||
1708 | ); | ||
1709 | |||
1710 | /* End of trace events for fs/ocfs2/super.c. */ | ||
1711 | |||
1712 | /* Trace events for fs/ocfs2/xattr.c. */ | ||
1713 | |||
1714 | DEFINE_OCFS2_ULL_EVENT(ocfs2_validate_xattr_block); | ||
1715 | |||
1716 | DEFINE_OCFS2_UINT_EVENT(ocfs2_xattr_extend_allocation); | ||
1717 | |||
1718 | TRACE_EVENT(ocfs2_init_xattr_set_ctxt, | ||
1719 | TP_PROTO(const char *name, int meta, int clusters, int credits), | ||
1720 | TP_ARGS(name, meta, clusters, credits), | ||
1721 | TP_STRUCT__entry( | ||
1722 | __string(name, name) | ||
1723 | __field(int, meta) | ||
1724 | __field(int, clusters) | ||
1725 | __field(int, credits) | ||
1726 | ), | ||
1727 | TP_fast_assign( | ||
1728 | __assign_str(name, name); | ||
1729 | __entry->meta = meta; | ||
1730 | __entry->clusters = clusters; | ||
1731 | __entry->credits = credits; | ||
1732 | ), | ||
1733 | TP_printk("%s %d %d %d", __get_str(name), __entry->meta, | ||
1734 | __entry->clusters, __entry->credits) | ||
1735 | ); | ||
1736 | |||
1737 | DECLARE_EVENT_CLASS(ocfs2__xattr_find, | ||
1738 | TP_PROTO(unsigned long long ino, const char *name, int name_index, | ||
1739 | unsigned int hash, unsigned long long location, | ||
1740 | int xe_index), | ||
1741 | TP_ARGS(ino, name, name_index, hash, location, xe_index), | ||
1742 | TP_STRUCT__entry( | ||
1743 | __field(unsigned long long, ino) | ||
1744 | __string(name, name) | ||
1745 | __field(int, name_index) | ||
1746 | __field(unsigned int, hash) | ||
1747 | __field(unsigned long long, location) | ||
1748 | __field(int, xe_index) | ||
1749 | ), | ||
1750 | TP_fast_assign( | ||
1751 | __entry->ino = ino; | ||
1752 | __assign_str(name, name); | ||
1753 | __entry->name_index = name_index; | ||
1754 | __entry->hash = hash; | ||
1755 | __entry->location = location; | ||
1756 | __entry->xe_index = xe_index; | ||
1757 | ), | ||
1758 | TP_printk("%llu %s %d %u %llu %d", __entry->ino, __get_str(name), | ||
1759 | __entry->name_index, __entry->hash, __entry->location, | ||
1760 | __entry->xe_index) | ||
1761 | ); | ||
1762 | |||
1763 | #define DEFINE_OCFS2_XATTR_FIND_EVENT(name) \ | ||
1764 | DEFINE_EVENT(ocfs2__xattr_find, name, \ | ||
1765 | TP_PROTO(unsigned long long ino, const char *name, int name_index, \ | ||
1766 | unsigned int hash, unsigned long long bucket, \ | ||
1767 | int xe_index), \ | ||
1768 | TP_ARGS(ino, name, name_index, hash, bucket, xe_index)) | ||
1769 | |||
1770 | DEFINE_OCFS2_XATTR_FIND_EVENT(ocfs2_xattr_bucket_find); | ||
1771 | |||
1772 | DEFINE_OCFS2_XATTR_FIND_EVENT(ocfs2_xattr_index_block_find); | ||
1773 | |||
1774 | DEFINE_OCFS2_XATTR_FIND_EVENT(ocfs2_xattr_index_block_find_rec); | ||
1775 | |||
1776 | DEFINE_OCFS2_ULL_ULL_UINT_EVENT(ocfs2_iterate_xattr_buckets); | ||
1777 | |||
1778 | DEFINE_OCFS2_ULL_UINT_EVENT(ocfs2_iterate_xattr_bucket); | ||
1779 | |||
1780 | DEFINE_OCFS2_ULL_ULL_EVENT(ocfs2_cp_xattr_block_to_bucket_begin); | ||
1781 | |||
1782 | DEFINE_OCFS2_UINT_UINT_UINT_EVENT(ocfs2_cp_xattr_block_to_bucket_end); | ||
1783 | |||
1784 | DEFINE_OCFS2_ULL_EVENT(ocfs2_xattr_create_index_block_begin); | ||
1785 | |||
1786 | DEFINE_OCFS2_ULL_EVENT(ocfs2_xattr_create_index_block); | ||
1787 | |||
1788 | DEFINE_OCFS2_ULL_UINT_UINT_UINT_EVENT(ocfs2_defrag_xattr_bucket); | ||
1789 | |||
1790 | DEFINE_OCFS2_ULL_ULL_EVENT(ocfs2_mv_xattr_bucket_cross_cluster); | ||
1791 | |||
1792 | DEFINE_OCFS2_ULL_ULL_EVENT(ocfs2_divide_xattr_bucket_begin); | ||
1793 | |||
1794 | DEFINE_OCFS2_UINT_UINT_UINT_EVENT(ocfs2_divide_xattr_bucket_move); | ||
1795 | |||
1796 | DEFINE_OCFS2_ULL_ULL_UINT_EVENT(ocfs2_cp_xattr_bucket); | ||
1797 | |||
1798 | DEFINE_OCFS2_ULL_ULL_EVENT(ocfs2_mv_xattr_buckets); | ||
1799 | |||
1800 | DEFINE_OCFS2_ULL_ULL_UINT_EVENT(ocfs2_adjust_xattr_cross_cluster); | ||
1801 | |||
1802 | DEFINE_OCFS2_ULL_ULL_UINT_UINT_EVENT(ocfs2_add_new_xattr_cluster_begin); | ||
1803 | |||
1804 | DEFINE_OCFS2_ULL_UINT_EVENT(ocfs2_add_new_xattr_cluster); | ||
1805 | |||
1806 | DEFINE_OCFS2_ULL_UINT_UINT_EVENT(ocfs2_add_new_xattr_cluster_insert); | ||
1807 | |||
1808 | DEFINE_OCFS2_ULL_ULL_UINT_UINT_EVENT(ocfs2_extend_xattr_bucket); | ||
1809 | |||
1810 | DEFINE_OCFS2_ULL_EVENT(ocfs2_add_new_xattr_bucket); | ||
1811 | |||
1812 | DEFINE_OCFS2_ULL_UINT_UINT_EVENT(ocfs2_xattr_bucket_value_truncate); | ||
1813 | |||
1814 | DEFINE_OCFS2_ULL_ULL_UINT_UINT_EVENT(ocfs2_rm_xattr_cluster); | ||
1815 | |||
1816 | DEFINE_OCFS2_ULL_UINT_EVENT(ocfs2_reflink_xattr_header); | ||
1817 | |||
1818 | DEFINE_OCFS2_ULL_INT_EVENT(ocfs2_create_empty_xattr_block); | ||
1819 | |||
1820 | DEFINE_OCFS2_STRING_EVENT(ocfs2_xattr_set_entry_bucket); | ||
1821 | |||
1822 | DEFINE_OCFS2_STRING_EVENT(ocfs2_xattr_set_entry_index_block); | ||
1823 | |||
1824 | DEFINE_OCFS2_ULL_UINT_EVENT(ocfs2_xattr_bucket_value_refcount); | ||
1825 | |||
1826 | DEFINE_OCFS2_ULL_UINT_UINT_EVENT(ocfs2_reflink_xattr_buckets); | ||
1827 | |||
1828 | DEFINE_OCFS2_ULL_UINT_EVENT(ocfs2_reflink_xattr_rec); | ||
1829 | |||
1830 | /* End of trace events for fs/ocfs2/xattr.c. */ | ||
1831 | |||
1832 | /* Trace events for fs/ocfs2/reservations.c. */ | ||
1833 | |||
1834 | DEFINE_OCFS2_UINT_UINT_EVENT(ocfs2_resv_insert); | ||
1835 | |||
1836 | DEFINE_OCFS2_ULL_UINT_UINT_UINT_EVENT(ocfs2_resmap_find_free_bits_begin); | ||
1837 | |||
1838 | DEFINE_OCFS2_UINT_UINT_EVENT(ocfs2_resmap_find_free_bits_end); | ||
1839 | |||
1840 | TRACE_EVENT(ocfs2_resv_find_window_begin, | ||
1841 | TP_PROTO(unsigned int r_start, unsigned int r_end, unsigned int goal, | ||
1842 | unsigned int wanted, int empty_root), | ||
1843 | TP_ARGS(r_start, r_end, goal, wanted, empty_root), | ||
1844 | TP_STRUCT__entry( | ||
1845 | __field(unsigned int, r_start) | ||
1846 | __field(unsigned int, r_end) | ||
1847 | __field(unsigned int, goal) | ||
1848 | __field(unsigned int, wanted) | ||
1849 | __field(int, empty_root) | ||
1850 | ), | ||
1851 | TP_fast_assign( | ||
1852 | __entry->r_start = r_start; | ||
1853 | __entry->r_end = r_end; | ||
1854 | __entry->goal = goal; | ||
1855 | __entry->wanted = wanted; | ||
1856 | __entry->empty_root = empty_root; | ||
1857 | ), | ||
1858 | TP_printk("%u %u %u %u %d", __entry->r_start, __entry->r_end, | ||
1859 | __entry->goal, __entry->wanted, __entry->empty_root) | ||
1860 | ); | ||
1861 | |||
1862 | DEFINE_OCFS2_UINT_UINT_EVENT(ocfs2_resv_find_window_prev); | ||
1863 | |||
1864 | DEFINE_OCFS2_INT_INT_EVENT(ocfs2_resv_find_window_next); | ||
1865 | |||
1866 | DEFINE_OCFS2_UINT_UINT_UINT_EVENT(ocfs2_cannibalize_resv_begin); | ||
1867 | |||
1868 | TRACE_EVENT(ocfs2_cannibalize_resv_end, | ||
1869 | TP_PROTO(unsigned int start, unsigned int end, unsigned int len, | ||
1870 | unsigned int last_start, unsigned int last_len), | ||
1871 | TP_ARGS(start, end, len, last_start, last_len), | ||
1872 | TP_STRUCT__entry( | ||
1873 | __field(unsigned int, start) | ||
1874 | __field(unsigned int, end) | ||
1875 | __field(unsigned int, len) | ||
1876 | __field(unsigned int, last_start) | ||
1877 | __field(unsigned int, last_len) | ||
1878 | ), | ||
1879 | TP_fast_assign( | ||
1880 | __entry->start = start; | ||
1881 | __entry->end = end; | ||
1882 | __entry->len = len; | ||
1883 | __entry->last_start = last_start; | ||
1884 | __entry->last_len = last_len; | ||
1885 | ), | ||
1886 | TP_printk("%u %u %u %u %u", __entry->start, __entry->end, | ||
1887 | __entry->len, __entry->last_start, __entry->last_len) | ||
1888 | ); | ||
1889 | |||
1890 | DEFINE_OCFS2_UINT_UINT_EVENT(ocfs2_resmap_resv_bits); | ||
1891 | |||
1892 | TRACE_EVENT(ocfs2_resmap_claimed_bits_begin, | ||
1893 | TP_PROTO(unsigned int cstart, unsigned int cend, unsigned int clen, | ||
1894 | unsigned int r_start, unsigned int r_end, unsigned int r_len, | ||
1895 | unsigned int last_start, unsigned int last_len), | ||
1896 | TP_ARGS(cstart, cend, clen, r_start, r_end, | ||
1897 | r_len, last_start, last_len), | ||
1898 | TP_STRUCT__entry( | ||
1899 | __field(unsigned int, cstart) | ||
1900 | __field(unsigned int, cend) | ||
1901 | __field(unsigned int, clen) | ||
1902 | __field(unsigned int, r_start) | ||
1903 | __field(unsigned int, r_end) | ||
1904 | __field(unsigned int, r_len) | ||
1905 | __field(unsigned int, last_start) | ||
1906 | __field(unsigned int, last_len) | ||
1907 | ), | ||
1908 | TP_fast_assign( | ||
1909 | __entry->cstart = cstart; | ||
1910 | __entry->cend = cend; | ||
1911 | __entry->clen = clen; | ||
1912 | __entry->r_start = r_start; | ||
1913 | __entry->r_end = r_end; | ||
1914 | __entry->r_len = r_len; | ||
1915 | __entry->last_start = last_start; | ||
1916 | __entry->last_len = last_len; | ||
1917 | ), | ||
1918 | TP_printk("%u %u %u %u %u %u %u %u", | ||
1919 | __entry->cstart, __entry->cend, __entry->clen, | ||
1920 | __entry->r_start, __entry->r_end, __entry->r_len, | ||
1921 | __entry->last_start, __entry->last_len) | ||
1922 | ); | ||
1923 | |||
1924 | TRACE_EVENT(ocfs2_resmap_claimed_bits_end, | ||
1925 | TP_PROTO(unsigned int start, unsigned int end, unsigned int len, | ||
1926 | unsigned int last_start, unsigned int last_len), | ||
1927 | TP_ARGS(start, end, len, last_start, last_len), | ||
1928 | TP_STRUCT__entry( | ||
1929 | __field(unsigned int, start) | ||
1930 | __field(unsigned int, end) | ||
1931 | __field(unsigned int, len) | ||
1932 | __field(unsigned int, last_start) | ||
1933 | __field(unsigned int, last_len) | ||
1934 | ), | ||
1935 | TP_fast_assign( | ||
1936 | __entry->start = start; | ||
1937 | __entry->end = end; | ||
1938 | __entry->len = len; | ||
1939 | __entry->last_start = last_start; | ||
1940 | __entry->last_len = last_len; | ||
1941 | ), | ||
1942 | TP_printk("%u %u %u %u %u", __entry->start, __entry->end, | ||
1943 | __entry->len, __entry->last_start, __entry->last_len) | ||
1944 | ); | ||
1945 | |||
1946 | /* End of trace events for fs/ocfs2/reservations.c. */ | ||
1947 | |||
1948 | /* Trace events for fs/ocfs2/quota_local.c. */ | ||
1949 | |||
1950 | DEFINE_OCFS2_ULL_UINT_EVENT(ocfs2_recover_local_quota_file); | ||
1951 | |||
1952 | DEFINE_OCFS2_INT_EVENT(ocfs2_finish_quota_recovery); | ||
1953 | |||
1954 | DEFINE_OCFS2_ULL_ULL_UINT_EVENT(olq_set_dquot); | ||
1955 | |||
1956 | /* End of trace events for fs/ocfs2/quota_local.c. */ | ||
1957 | |||
1958 | /* Trace events for fs/ocfs2/quota_global.c. */ | ||
1959 | |||
1960 | DEFINE_OCFS2_ULL_EVENT(ocfs2_validate_quota_block); | ||
1961 | |||
1962 | TRACE_EVENT(ocfs2_sync_dquot, | ||
1963 | TP_PROTO(unsigned int dq_id, long long dqb_curspace, | ||
1964 | long long spacechange, long long curinodes, | ||
1965 | long long inodechange), | ||
1966 | TP_ARGS(dq_id, dqb_curspace, spacechange, curinodes, inodechange), | ||
1967 | TP_STRUCT__entry( | ||
1968 | __field(unsigned int, dq_id) | ||
1969 | __field(long long, dqb_curspace) | ||
1970 | __field(long long, spacechange) | ||
1971 | __field(long long, curinodes) | ||
1972 | __field(long long, inodechange) | ||
1973 | ), | ||
1974 | TP_fast_assign( | ||
1975 | __entry->dq_id = dq_id; | ||
1976 | __entry->dqb_curspace = dqb_curspace; | ||
1977 | __entry->spacechange = spacechange; | ||
1978 | __entry->curinodes = curinodes; | ||
1979 | __entry->inodechange = inodechange; | ||
1980 | ), | ||
1981 | TP_printk("%u %lld %lld %lld %lld", __entry->dq_id, | ||
1982 | __entry->dqb_curspace, __entry->spacechange, | ||
1983 | __entry->curinodes, __entry->inodechange) | ||
1984 | ); | ||
1985 | |||
1986 | TRACE_EVENT(ocfs2_sync_dquot_helper, | ||
1987 | TP_PROTO(unsigned int dq_id, unsigned int dq_type, unsigned long type, | ||
1988 | const char *s_id), | ||
1989 | TP_ARGS(dq_id, dq_type, type, s_id), | ||
1990 | |||
1991 | TP_STRUCT__entry( | ||
1992 | __field(unsigned int, dq_id) | ||
1993 | __field(unsigned int, dq_type) | ||
1994 | __field(unsigned long, type) | ||
1995 | __string(s_id, s_id) | ||
1996 | ), | ||
1997 | TP_fast_assign( | ||
1998 | __entry->dq_id = dq_id; | ||
1999 | __entry->dq_type = dq_type; | ||
2000 | __entry->type = type; | ||
2001 | __assign_str(s_id, s_id); | ||
2002 | ), | ||
2003 | TP_printk("%u %u %lu %s", __entry->dq_id, __entry->dq_type, | ||
2004 | __entry->type, __get_str(s_id)) | ||
2005 | ); | ||
2006 | |||
2007 | DEFINE_OCFS2_UINT_INT_EVENT(ocfs2_write_dquot); | ||
2008 | |||
2009 | DEFINE_OCFS2_UINT_INT_EVENT(ocfs2_release_dquot); | ||
2010 | |||
2011 | DEFINE_OCFS2_UINT_INT_EVENT(ocfs2_acquire_dquot); | ||
2012 | |||
2013 | DEFINE_OCFS2_UINT_INT_EVENT(ocfs2_mark_dquot_dirty); | ||
2014 | |||
2015 | /* End of trace events for fs/ocfs2/quota_global.c. */ | ||
2016 | |||
2017 | /* Trace events for fs/ocfs2/dir.c. */ | ||
2018 | DEFINE_OCFS2_INT_EVENT(ocfs2_search_dirblock); | ||
2019 | |||
2020 | DEFINE_OCFS2_ULL_EVENT(ocfs2_validate_dir_block); | ||
2021 | |||
2022 | DEFINE_OCFS2_POINTER_EVENT(ocfs2_find_entry_el); | ||
2023 | |||
2024 | TRACE_EVENT(ocfs2_dx_dir_search, | ||
2025 | TP_PROTO(unsigned long long ino, int namelen, const char *name, | ||
2026 | unsigned int major_hash, unsigned int minor_hash, | ||
2027 | unsigned long long blkno), | ||
2028 | TP_ARGS(ino, namelen, name, major_hash, minor_hash, blkno), | ||
2029 | TP_STRUCT__entry( | ||
2030 | __field(unsigned long long, ino) | ||
2031 | __field(int, namelen) | ||
2032 | __string(name, name) | ||
2033 | __field(unsigned int, major_hash) | ||
2034 | __field(unsigned int,minor_hash) | ||
2035 | __field(unsigned long long, blkno) | ||
2036 | ), | ||
2037 | TP_fast_assign( | ||
2038 | __entry->ino = ino; | ||
2039 | __entry->namelen = namelen; | ||
2040 | __assign_str(name, name); | ||
2041 | __entry->major_hash = major_hash; | ||
2042 | __entry->minor_hash = minor_hash; | ||
2043 | __entry->blkno = blkno; | ||
2044 | ), | ||
2045 | TP_printk("%llu %.*s %u %u %llu", __entry->ino, | ||
2046 | __entry->namelen, __get_str(name), | ||
2047 | __entry->major_hash, __entry->minor_hash, __entry->blkno) | ||
2048 | ); | ||
2049 | |||
2050 | DEFINE_OCFS2_UINT_UINT_EVENT(ocfs2_dx_dir_search_leaf_info); | ||
2051 | |||
2052 | DEFINE_OCFS2_ULL_INT_EVENT(ocfs2_delete_entry_dx); | ||
2053 | |||
2054 | DEFINE_OCFS2_ULL_EVENT(ocfs2_readdir); | ||
2055 | |||
2056 | TRACE_EVENT(ocfs2_find_files_on_disk, | ||
2057 | TP_PROTO(int namelen, const char *name, void *blkno, | ||
2058 | unsigned long long dir), | ||
2059 | TP_ARGS(namelen, name, blkno, dir), | ||
2060 | TP_STRUCT__entry( | ||
2061 | __field(int, namelen) | ||
2062 | __string(name, name) | ||
2063 | __field(void *, blkno) | ||
2064 | __field(unsigned long long, dir) | ||
2065 | ), | ||
2066 | TP_fast_assign( | ||
2067 | __entry->namelen = namelen; | ||
2068 | __assign_str(name, name); | ||
2069 | __entry->blkno = blkno; | ||
2070 | __entry->dir = dir; | ||
2071 | ), | ||
2072 | TP_printk("%.*s %p %llu", __entry->namelen, __get_str(name), | ||
2073 | __entry->blkno, __entry->dir) | ||
2074 | ); | ||
2075 | |||
2076 | TRACE_EVENT(ocfs2_check_dir_for_entry, | ||
2077 | TP_PROTO(unsigned long long dir, int namelen, const char *name), | ||
2078 | TP_ARGS(dir, namelen, name), | ||
2079 | TP_STRUCT__entry( | ||
2080 | __field(unsigned long long, dir) | ||
2081 | __field(int, namelen) | ||
2082 | __string(name, name) | ||
2083 | ), | ||
2084 | TP_fast_assign( | ||
2085 | __entry->dir = dir; | ||
2086 | __entry->namelen = namelen; | ||
2087 | __assign_str(name, name); | ||
2088 | ), | ||
2089 | TP_printk("%llu %.*s", __entry->dir, | ||
2090 | __entry->namelen, __get_str(name)) | ||
2091 | ); | ||
2092 | |||
2093 | DEFINE_OCFS2_ULL_ULL_EVENT(ocfs2_dx_dir_attach_index); | ||
2094 | |||
2095 | DEFINE_OCFS2_ULL_ULL_UINT_EVENT(ocfs2_dx_dir_format_cluster); | ||
2096 | |||
2097 | TRACE_EVENT(ocfs2_dx_dir_index_root_block, | ||
2098 | TP_PROTO(unsigned long long dir, | ||
2099 | unsigned int major_hash, unsigned int minor_hash, | ||
2100 | int namelen, const char *name, unsigned int num_used), | ||
2101 | TP_ARGS(dir, major_hash, minor_hash, namelen, name, num_used), | ||
2102 | TP_STRUCT__entry( | ||
2103 | __field(unsigned long long, dir) | ||
2104 | __field(unsigned int, major_hash) | ||
2105 | __field(unsigned int, minor_hash) | ||
2106 | __field(int, namelen) | ||
2107 | __string(name, name) | ||
2108 | __field(unsigned int, num_used) | ||
2109 | ), | ||
2110 | TP_fast_assign( | ||
2111 | __entry->dir = dir; | ||
2112 | __entry->major_hash = major_hash; | ||
2113 | __entry->minor_hash = minor_hash; | ||
2114 | __entry->namelen = namelen; | ||
2115 | __assign_str(name, name); | ||
2116 | __entry->num_used = num_used; | ||
2117 | ), | ||
2118 | TP_printk("%llu %x %x %.*s %u", __entry->dir, | ||
2119 | __entry->major_hash, __entry->minor_hash, | ||
2120 | __entry->namelen, __get_str(name), __entry->num_used) | ||
2121 | ); | ||
2122 | |||
2123 | DEFINE_OCFS2_ULL_ULL_EVENT(ocfs2_extend_dir); | ||
2124 | |||
2125 | DEFINE_OCFS2_ULL_ULL_UINT_EVENT(ocfs2_dx_dir_rebalance); | ||
2126 | |||
2127 | DEFINE_OCFS2_UINT_UINT_UINT_EVENT(ocfs2_dx_dir_rebalance_split); | ||
2128 | |||
2129 | DEFINE_OCFS2_ULL_INT_EVENT(ocfs2_prepare_dir_for_insert); | ||
2130 | |||
2131 | /* End of trace events for fs/ocfs2/dir.c. */ | ||
2132 | |||
2133 | /* Trace events for fs/ocfs2/namei.c. */ | ||
2134 | |||
2135 | DECLARE_EVENT_CLASS(ocfs2__dentry_ops, | ||
2136 | TP_PROTO(void *dir, void *dentry, int name_len, const char *name, | ||
2137 | unsigned long long dir_blkno, unsigned long long extra), | ||
2138 | TP_ARGS(dir, dentry, name_len, name, dir_blkno, extra), | ||
2139 | TP_STRUCT__entry( | ||
2140 | __field(void *, dir) | ||
2141 | __field(void *, dentry) | ||
2142 | __field(int, name_len) | ||
2143 | __string(name, name) | ||
2144 | __field(unsigned long long, dir_blkno) | ||
2145 | __field(unsigned long long, extra) | ||
2146 | ), | ||
2147 | TP_fast_assign( | ||
2148 | __entry->dir = dir; | ||
2149 | __entry->dentry = dentry; | ||
2150 | __entry->name_len = name_len; | ||
2151 | __assign_str(name, name); | ||
2152 | __entry->dir_blkno = dir_blkno; | ||
2153 | __entry->extra = extra; | ||
2154 | ), | ||
2155 | TP_printk("%p %p %.*s %llu %llu", __entry->dir, __entry->dentry, | ||
2156 | __entry->name_len, __get_str(name), | ||
2157 | __entry->dir_blkno, __entry->extra) | ||
2158 | ); | ||
2159 | |||
2160 | #define DEFINE_OCFS2_DENTRY_OPS(name) \ | ||
2161 | DEFINE_EVENT(ocfs2__dentry_ops, name, \ | ||
2162 | TP_PROTO(void *dir, void *dentry, int name_len, const char *name, \ | ||
2163 | unsigned long long dir_blkno, unsigned long long extra), \ | ||
2164 | TP_ARGS(dir, dentry, name_len, name, dir_blkno, extra)) | ||
2165 | |||
2166 | DEFINE_OCFS2_DENTRY_OPS(ocfs2_lookup); | ||
2167 | |||
2168 | DEFINE_OCFS2_DENTRY_OPS(ocfs2_mkdir); | ||
2169 | |||
2170 | DEFINE_OCFS2_DENTRY_OPS(ocfs2_create); | ||
2171 | |||
2172 | DEFINE_OCFS2_DENTRY_OPS(ocfs2_unlink); | ||
2173 | |||
2174 | DEFINE_OCFS2_DENTRY_OPS(ocfs2_symlink_create); | ||
2175 | |||
2176 | DEFINE_OCFS2_DENTRY_OPS(ocfs2_mv_orphaned_inode_to_new); | ||
2177 | |||
2178 | DEFINE_OCFS2_POINTER_EVENT(ocfs2_lookup_ret); | ||
2179 | |||
2180 | TRACE_EVENT(ocfs2_mknod, | ||
2181 | TP_PROTO(void *dir, void *dentry, int name_len, const char *name, | ||
2182 | unsigned long long dir_blkno, unsigned long dev, int mode), | ||
2183 | TP_ARGS(dir, dentry, name_len, name, dir_blkno, dev, mode), | ||
2184 | TP_STRUCT__entry( | ||
2185 | __field(void *, dir) | ||
2186 | __field(void *, dentry) | ||
2187 | __field(int, name_len) | ||
2188 | __string(name, name) | ||
2189 | __field(unsigned long long, dir_blkno) | ||
2190 | __field(unsigned long, dev) | ||
2191 | __field(int, mode) | ||
2192 | ), | ||
2193 | TP_fast_assign( | ||
2194 | __entry->dir = dir; | ||
2195 | __entry->dentry = dentry; | ||
2196 | __entry->name_len = name_len; | ||
2197 | __assign_str(name, name); | ||
2198 | __entry->dir_blkno = dir_blkno; | ||
2199 | __entry->dev = dev; | ||
2200 | __entry->mode = mode; | ||
2201 | ), | ||
2202 | TP_printk("%p %p %.*s %llu %lu %d", __entry->dir, __entry->dentry, | ||
2203 | __entry->name_len, __get_str(name), | ||
2204 | __entry->dir_blkno, __entry->dev, __entry->mode) | ||
2205 | ); | ||
2206 | |||
2207 | TRACE_EVENT(ocfs2_link, | ||
2208 | TP_PROTO(unsigned long long ino, int old_len, const char *old_name, | ||
2209 | int name_len, const char *name), | ||
2210 | TP_ARGS(ino, old_len, old_name, name_len, name), | ||
2211 | TP_STRUCT__entry( | ||
2212 | __field(unsigned long long, ino) | ||
2213 | __field(int, old_len) | ||
2214 | __string(old_name, old_name) | ||
2215 | __field(int, name_len) | ||
2216 | __string(name, name) | ||
2217 | ), | ||
2218 | TP_fast_assign( | ||
2219 | __entry->ino = ino; | ||
2220 | __entry->old_len = old_len; | ||
2221 | __assign_str(old_name, old_name); | ||
2222 | __entry->name_len = name_len; | ||
2223 | __assign_str(name, name); | ||
2224 | ), | ||
2225 | TP_printk("%llu %.*s %.*s", __entry->ino, | ||
2226 | __entry->old_len, __get_str(old_name), | ||
2227 | __entry->name_len, __get_str(name)) | ||
2228 | ); | ||
2229 | |||
2230 | DEFINE_OCFS2_ULL_ULL_UINT_EVENT(ocfs2_unlink_noent); | ||
2231 | |||
2232 | DEFINE_OCFS2_ULL_ULL_EVENT(ocfs2_double_lock); | ||
2233 | |||
2234 | DEFINE_OCFS2_ULL_ULL_EVENT(ocfs2_double_lock_end); | ||
2235 | |||
2236 | TRACE_EVENT(ocfs2_rename, | ||
2237 | TP_PROTO(void *old_dir, void *old_dentry, | ||
2238 | void *new_dir, void *new_dentry, | ||
2239 | int old_len, const char *old_name, | ||
2240 | int new_len, const char *new_name), | ||
2241 | TP_ARGS(old_dir, old_dentry, new_dir, new_dentry, | ||
2242 | old_len, old_name, new_len, new_name), | ||
2243 | TP_STRUCT__entry( | ||
2244 | __field(void *, old_dir) | ||
2245 | __field(void *, old_dentry) | ||
2246 | __field(void *, new_dir) | ||
2247 | __field(void *, new_dentry) | ||
2248 | __field(int, old_len) | ||
2249 | __string(old_name, old_name) | ||
2250 | __field(int, new_len) | ||
2251 | __string(new_name, new_name) | ||
2252 | ), | ||
2253 | TP_fast_assign( | ||
2254 | __entry->old_dir = old_dir; | ||
2255 | __entry->old_dentry = old_dentry; | ||
2256 | __entry->new_dir = new_dir; | ||
2257 | __entry->new_dentry = new_dentry; | ||
2258 | __entry->old_len = old_len; | ||
2259 | __assign_str(old_name, old_name); | ||
2260 | __entry->new_len = new_len; | ||
2261 | __assign_str(new_name, new_name); | ||
2262 | ), | ||
2263 | TP_printk("%p %p %p %p %.*s %.*s", | ||
2264 | __entry->old_dir, __entry->old_dentry, | ||
2265 | __entry->new_dir, __entry->new_dentry, | ||
2266 | __entry->old_len, __get_str(old_name), | ||
2267 | __entry->new_len, __get_str(new_name)) | ||
2268 | ); | ||
2269 | |||
2270 | TRACE_EVENT(ocfs2_rename_target_exists, | ||
2271 | TP_PROTO(int new_len, const char *new_name), | ||
2272 | TP_ARGS(new_len, new_name), | ||
2273 | TP_STRUCT__entry( | ||
2274 | __field(int, new_len) | ||
2275 | __string(new_name, new_name) | ||
2276 | ), | ||
2277 | TP_fast_assign( | ||
2278 | __entry->new_len = new_len; | ||
2279 | __assign_str(new_name, new_name); | ||
2280 | ), | ||
2281 | TP_printk("%.*s", __entry->new_len, __get_str(new_name)) | ||
2282 | ); | ||
2283 | |||
2284 | DEFINE_OCFS2_ULL_ULL_UINT_EVENT(ocfs2_rename_disagree); | ||
2285 | |||
2286 | TRACE_EVENT(ocfs2_rename_over_existing, | ||
2287 | TP_PROTO(unsigned long long new_blkno, void *new_bh, | ||
2288 | unsigned long long newdi_blkno), | ||
2289 | TP_ARGS(new_blkno, new_bh, newdi_blkno), | ||
2290 | TP_STRUCT__entry( | ||
2291 | __field(unsigned long long, new_blkno) | ||
2292 | __field(void *, new_bh) | ||
2293 | __field(unsigned long long, newdi_blkno) | ||
2294 | ), | ||
2295 | TP_fast_assign( | ||
2296 | __entry->new_blkno = new_blkno; | ||
2297 | __entry->new_bh = new_bh; | ||
2298 | __entry->newdi_blkno = newdi_blkno; | ||
2299 | ), | ||
2300 | TP_printk("%llu %p %llu", __entry->new_blkno, __entry->new_bh, | ||
2301 | __entry->newdi_blkno) | ||
2302 | ); | ||
2303 | |||
2304 | DEFINE_OCFS2_ULL_ULL_UINT_EVENT(ocfs2_create_symlink_data); | ||
2305 | |||
2306 | TRACE_EVENT(ocfs2_symlink_begin, | ||
2307 | TP_PROTO(void *dir, void *dentry, const char *symname, | ||
2308 | int len, const char *name), | ||
2309 | TP_ARGS(dir, dentry, symname, len, name), | ||
2310 | TP_STRUCT__entry( | ||
2311 | __field(void *, dir) | ||
2312 | __field(void *, dentry) | ||
2313 | __field(const char *, symname) | ||
2314 | __field(int, len) | ||
2315 | __string(name, name) | ||
2316 | ), | ||
2317 | TP_fast_assign( | ||
2318 | __entry->dir = dir; | ||
2319 | __entry->dentry = dentry; | ||
2320 | __entry->symname = symname; | ||
2321 | __entry->len = len; | ||
2322 | __assign_str(name, name); | ||
2323 | ), | ||
2324 | TP_printk("%p %p %s %.*s", __entry->dir, __entry->dentry, | ||
2325 | __entry->symname, __entry->len, __get_str(name)) | ||
2326 | ); | ||
2327 | |||
2328 | TRACE_EVENT(ocfs2_blkno_stringify, | ||
2329 | TP_PROTO(unsigned long long blkno, const char *name, int namelen), | ||
2330 | TP_ARGS(blkno, name, namelen), | ||
2331 | TP_STRUCT__entry( | ||
2332 | __field(unsigned long long, blkno) | ||
2333 | __string(name, name) | ||
2334 | __field(int, namelen) | ||
2335 | ), | ||
2336 | TP_fast_assign( | ||
2337 | __entry->blkno = blkno; | ||
2338 | __assign_str(name, name); | ||
2339 | __entry->namelen = namelen; | ||
2340 | ), | ||
2341 | TP_printk("%llu %s %d", __entry->blkno, __get_str(name), | ||
2342 | __entry->namelen) | ||
2343 | ); | ||
2344 | |||
2345 | DEFINE_OCFS2_ULL_EVENT(ocfs2_orphan_add_begin); | ||
2346 | |||
2347 | DEFINE_OCFS2_ULL_UINT_EVENT(ocfs2_orphan_add_end); | ||
2348 | |||
2349 | TRACE_EVENT(ocfs2_orphan_del, | ||
2350 | TP_PROTO(unsigned long long dir, const char *name, int namelen), | ||
2351 | TP_ARGS(dir, name, namelen), | ||
2352 | TP_STRUCT__entry( | ||
2353 | __field(unsigned long long, dir) | ||
2354 | __string(name, name) | ||
2355 | __field(int, namelen) | ||
2356 | ), | ||
2357 | TP_fast_assign( | ||
2358 | __entry->dir = dir; | ||
2359 | __assign_str(name, name); | ||
2360 | __entry->namelen = namelen; | ||
2361 | ), | ||
2362 | TP_printk("%llu %s %d", __entry->dir, __get_str(name), | ||
2363 | __entry->namelen) | ||
2364 | ); | ||
2365 | |||
2366 | /* End of trace events for fs/ocfs2/namei.c. */ | ||
2367 | |||
2368 | /* Trace events for fs/ocfs2/dcache.c. */ | ||
2369 | |||
2370 | TRACE_EVENT(ocfs2_dentry_revalidate, | ||
2371 | TP_PROTO(void *dentry, int len, const char *name), | ||
2372 | TP_ARGS(dentry, len, name), | ||
2373 | TP_STRUCT__entry( | ||
2374 | __field(void *, dentry) | ||
2375 | __field(int, len) | ||
2376 | __string(name, name) | ||
2377 | ), | ||
2378 | TP_fast_assign( | ||
2379 | __entry->dentry = dentry; | ||
2380 | __entry->len = len; | ||
2381 | __assign_str(name, name); | ||
2382 | ), | ||
2383 | TP_printk("%p %.*s", __entry->dentry, __entry->len, __get_str(name)) | ||
2384 | ); | ||
2385 | |||
2386 | TRACE_EVENT(ocfs2_dentry_revalidate_negative, | ||
2387 | TP_PROTO(int len, const char *name, unsigned long pgen, | ||
2388 | unsigned long gen), | ||
2389 | TP_ARGS(len, name, pgen, gen), | ||
2390 | TP_STRUCT__entry( | ||
2391 | __field(int, len) | ||
2392 | __string(name, name) | ||
2393 | __field(unsigned long, pgen) | ||
2394 | __field(unsigned long, gen) | ||
2395 | ), | ||
2396 | TP_fast_assign( | ||
2397 | __entry->len = len; | ||
2398 | __assign_str(name, name); | ||
2399 | __entry->pgen = pgen; | ||
2400 | __entry->gen = gen; | ||
2401 | ), | ||
2402 | TP_printk("%.*s %lu %lu", __entry->len, __get_str(name), | ||
2403 | __entry->pgen, __entry->gen) | ||
2404 | ); | ||
2405 | |||
2406 | DEFINE_OCFS2_ULL_EVENT(ocfs2_dentry_revalidate_delete); | ||
2407 | |||
2408 | DEFINE_OCFS2_ULL_INT_EVENT(ocfs2_dentry_revalidate_orphaned); | ||
2409 | |||
2410 | DEFINE_OCFS2_ULL_EVENT(ocfs2_dentry_revalidate_nofsdata); | ||
2411 | |||
2412 | DEFINE_OCFS2_INT_EVENT(ocfs2_dentry_revalidate_ret); | ||
2413 | |||
2414 | TRACE_EVENT(ocfs2_find_local_alias, | ||
2415 | TP_PROTO(int len, const char *name), | ||
2416 | TP_ARGS(len, name), | ||
2417 | TP_STRUCT__entry( | ||
2418 | __field(int, len) | ||
2419 | __string(name, name) | ||
2420 | ), | ||
2421 | TP_fast_assign( | ||
2422 | __entry->len = len; | ||
2423 | __assign_str(name, name); | ||
2424 | ), | ||
2425 | TP_printk("%.*s", __entry->len, __get_str(name)) | ||
2426 | ); | ||
2427 | |||
2428 | TRACE_EVENT(ocfs2_dentry_attach_lock, | ||
2429 | TP_PROTO(int len, const char *name, | ||
2430 | unsigned long long parent, void *fsdata), | ||
2431 | TP_ARGS(len, name, parent, fsdata), | ||
2432 | TP_STRUCT__entry( | ||
2433 | __field(int, len) | ||
2434 | __string(name, name) | ||
2435 | __field(unsigned long long, parent) | ||
2436 | __field(void *, fsdata) | ||
2437 | ), | ||
2438 | TP_fast_assign( | ||
2439 | __entry->len = len; | ||
2440 | __assign_str(name, name); | ||
2441 | __entry->parent = parent; | ||
2442 | __entry->fsdata = fsdata; | ||
2443 | ), | ||
2444 | TP_printk("%.*s %llu %p", __entry->len, __get_str(name), | ||
2445 | __entry->parent, __entry->fsdata) | ||
2446 | ); | ||
2447 | |||
2448 | TRACE_EVENT(ocfs2_dentry_attach_lock_found, | ||
2449 | TP_PROTO(const char *name, unsigned long long parent, | ||
2450 | unsigned long long ino), | ||
2451 | TP_ARGS(name, parent, ino), | ||
2452 | TP_STRUCT__entry( | ||
2453 | __string(name, name) | ||
2454 | __field(unsigned long long, parent) | ||
2455 | __field(unsigned long long, ino) | ||
2456 | ), | ||
2457 | TP_fast_assign( | ||
2458 | __assign_str(name, name); | ||
2459 | __entry->parent = parent; | ||
2460 | __entry->ino = ino; | ||
2461 | ), | ||
2462 | TP_printk("%s %llu %llu", __get_str(name), __entry->parent, __entry->ino) | ||
2463 | ); | ||
2464 | /* End of trace events for fs/ocfs2/dcache.c. */ | ||
2465 | |||
2466 | /* Trace events for fs/ocfs2/export.c. */ | ||
2467 | |||
2468 | TRACE_EVENT(ocfs2_get_dentry_begin, | ||
2469 | TP_PROTO(void *sb, void *handle, unsigned long long blkno), | ||
2470 | TP_ARGS(sb, handle, blkno), | ||
2471 | TP_STRUCT__entry( | ||
2472 | __field(void *, sb) | ||
2473 | __field(void *, handle) | ||
2474 | __field(unsigned long long, blkno) | ||
2475 | ), | ||
2476 | TP_fast_assign( | ||
2477 | __entry->sb = sb; | ||
2478 | __entry->handle = handle; | ||
2479 | __entry->blkno = blkno; | ||
2480 | ), | ||
2481 | TP_printk("%p %p %llu", __entry->sb, __entry->handle, __entry->blkno) | ||
2482 | ); | ||
2483 | |||
2484 | DEFINE_OCFS2_INT_INT_EVENT(ocfs2_get_dentry_test_bit); | ||
2485 | |||
2486 | DEFINE_OCFS2_ULL_UINT_EVENT(ocfs2_get_dentry_stale); | ||
2487 | |||
2488 | DEFINE_OCFS2_ULL_UINT_UINT_EVENT(ocfs2_get_dentry_generation); | ||
2489 | |||
2490 | DEFINE_OCFS2_POINTER_EVENT(ocfs2_get_dentry_end); | ||
2491 | |||
2492 | TRACE_EVENT(ocfs2_get_parent, | ||
2493 | TP_PROTO(void *child, int len, const char *name, | ||
2494 | unsigned long long ino), | ||
2495 | TP_ARGS(child, len, name, ino), | ||
2496 | TP_STRUCT__entry( | ||
2497 | __field(void *, child) | ||
2498 | __field(int, len) | ||
2499 | __string(name, name) | ||
2500 | __field(unsigned long long, ino) | ||
2501 | ), | ||
2502 | TP_fast_assign( | ||
2503 | __entry->child = child; | ||
2504 | __entry->len = len; | ||
2505 | __assign_str(name, name); | ||
2506 | __entry->ino = ino; | ||
2507 | ), | ||
2508 | TP_printk("%p %.*s %llu", __entry->child, __entry->len, | ||
2509 | __get_str(name), __entry->ino) | ||
2510 | ); | ||
2511 | |||
2512 | DEFINE_OCFS2_POINTER_EVENT(ocfs2_get_parent_end); | ||
2513 | |||
2514 | TRACE_EVENT(ocfs2_encode_fh_begin, | ||
2515 | TP_PROTO(void *dentry, int name_len, const char *name, | ||
2516 | void *fh, int len, int connectable), | ||
2517 | TP_ARGS(dentry, name_len, name, fh, len, connectable), | ||
2518 | TP_STRUCT__entry( | ||
2519 | __field(void *, dentry) | ||
2520 | __field(int, name_len) | ||
2521 | __string(name, name) | ||
2522 | __field(void *, fh) | ||
2523 | __field(int, len) | ||
2524 | __field(int, connectable) | ||
2525 | ), | ||
2526 | TP_fast_assign( | ||
2527 | __entry->dentry = dentry; | ||
2528 | __entry->name_len = name_len; | ||
2529 | __assign_str(name, name); | ||
2530 | __entry->fh = fh; | ||
2531 | __entry->len = len; | ||
2532 | __entry->connectable = connectable; | ||
2533 | ), | ||
2534 | TP_printk("%p %.*s %p %d %d", __entry->dentry, __entry->name_len, | ||
2535 | __get_str(name), __entry->fh, __entry->len, | ||
2536 | __entry->connectable) | ||
2537 | ); | ||
2538 | |||
2539 | DEFINE_OCFS2_ULL_UINT_EVENT(ocfs2_encode_fh_self); | ||
2540 | |||
2541 | DEFINE_OCFS2_ULL_UINT_EVENT(ocfs2_encode_fh_parent); | ||
2542 | |||
2543 | DEFINE_OCFS2_INT_EVENT(ocfs2_encode_fh_type); | ||
2544 | |||
2545 | /* End of trace events for fs/ocfs2/export.c. */ | ||
2546 | |||
2547 | /* Trace events for fs/ocfs2/journal.c. */ | ||
2548 | |||
2549 | DEFINE_OCFS2_UINT_EVENT(ocfs2_commit_cache_begin); | ||
2550 | |||
2551 | DEFINE_OCFS2_ULL_UINT_EVENT(ocfs2_commit_cache_end); | ||
2552 | |||
2553 | DEFINE_OCFS2_INT_INT_EVENT(ocfs2_extend_trans); | ||
2554 | |||
2555 | DEFINE_OCFS2_INT_EVENT(ocfs2_extend_trans_restart); | ||
2556 | |||
2557 | DEFINE_OCFS2_ULL_ULL_UINT_UINT_EVENT(ocfs2_journal_access); | ||
2558 | |||
2559 | DEFINE_OCFS2_ULL_EVENT(ocfs2_journal_dirty); | ||
2560 | |||
2561 | DEFINE_OCFS2_ULL_ULL_UINT_EVENT(ocfs2_journal_init); | ||
2562 | |||
2563 | DEFINE_OCFS2_UINT_EVENT(ocfs2_journal_init_maxlen); | ||
2564 | |||
2565 | DEFINE_OCFS2_INT_EVENT(ocfs2_journal_shutdown); | ||
2566 | |||
2567 | DEFINE_OCFS2_POINTER_EVENT(ocfs2_journal_shutdown_wait); | ||
2568 | |||
2569 | DEFINE_OCFS2_ULL_EVENT(ocfs2_complete_recovery); | ||
2570 | |||
2571 | DEFINE_OCFS2_INT_EVENT(ocfs2_complete_recovery_end); | ||
2572 | |||
2573 | TRACE_EVENT(ocfs2_complete_recovery_slot, | ||
2574 | TP_PROTO(int slot, unsigned long long la_ino, | ||
2575 | unsigned long long tl_ino, void *qrec), | ||
2576 | TP_ARGS(slot, la_ino, tl_ino, qrec), | ||
2577 | TP_STRUCT__entry( | ||
2578 | __field(int, slot) | ||
2579 | __field(unsigned long long, la_ino) | ||
2580 | __field(unsigned long long, tl_ino) | ||
2581 | __field(void *, qrec) | ||
2582 | ), | ||
2583 | TP_fast_assign( | ||
2584 | __entry->slot = slot; | ||
2585 | __entry->la_ino = la_ino; | ||
2586 | __entry->tl_ino = tl_ino; | ||
2587 | __entry->qrec = qrec; | ||
2588 | ), | ||
2589 | TP_printk("%d %llu %llu %p", __entry->slot, __entry->la_ino, | ||
2590 | __entry->tl_ino, __entry->qrec) | ||
2591 | ); | ||
2592 | |||
2593 | DEFINE_OCFS2_INT_INT_EVENT(ocfs2_recovery_thread_node); | ||
2594 | |||
2595 | DEFINE_OCFS2_INT_EVENT(ocfs2_recovery_thread_end); | ||
2596 | |||
2597 | TRACE_EVENT(ocfs2_recovery_thread, | ||
2598 | TP_PROTO(int node_num, int osb_node_num, int disable, | ||
2599 | void *recovery_thread, int map_set), | ||
2600 | TP_ARGS(node_num, osb_node_num, disable, recovery_thread, map_set), | ||
2601 | TP_STRUCT__entry( | ||
2602 | __field(int, node_num) | ||
2603 | __field(int, osb_node_num) | ||
2604 | __field(int,disable) | ||
2605 | __field(void *, recovery_thread) | ||
2606 | __field(int,map_set) | ||
2607 | ), | ||
2608 | TP_fast_assign( | ||
2609 | __entry->node_num = node_num; | ||
2610 | __entry->osb_node_num = osb_node_num; | ||
2611 | __entry->disable = disable; | ||
2612 | __entry->recovery_thread = recovery_thread; | ||
2613 | __entry->map_set = map_set; | ||
2614 | ), | ||
2615 | TP_printk("%d %d %d %p %d", __entry->node_num, | ||
2616 | __entry->osb_node_num, __entry->disable, | ||
2617 | __entry->recovery_thread, __entry->map_set) | ||
2618 | ); | ||
2619 | |||
2620 | DEFINE_OCFS2_UINT_UINT_UINT_EVENT(ocfs2_replay_journal_recovered); | ||
2621 | |||
2622 | DEFINE_OCFS2_INT_EVENT(ocfs2_replay_journal_lock_err); | ||
2623 | |||
2624 | DEFINE_OCFS2_INT_EVENT(ocfs2_replay_journal_skip); | ||
2625 | |||
2626 | DEFINE_OCFS2_UINT_UINT_UINT_EVENT(ocfs2_recover_node); | ||
2627 | |||
2628 | DEFINE_OCFS2_UINT_UINT_EVENT(ocfs2_recover_node_skip); | ||
2629 | |||
2630 | DEFINE_OCFS2_UINT_UINT_EVENT(ocfs2_mark_dead_nodes); | ||
2631 | |||
2632 | DEFINE_OCFS2_UINT_UINT_UINT_EVENT(ocfs2_queue_orphan_scan_begin); | ||
2633 | |||
2634 | DEFINE_OCFS2_UINT_UINT_UINT_EVENT(ocfs2_queue_orphan_scan_end); | ||
2635 | |||
2636 | DEFINE_OCFS2_ULL_EVENT(ocfs2_orphan_filldir); | ||
2637 | |||
2638 | DEFINE_OCFS2_INT_EVENT(ocfs2_recover_orphans); | ||
2639 | |||
2640 | DEFINE_OCFS2_ULL_EVENT(ocfs2_recover_orphans_iput); | ||
2641 | |||
2642 | DEFINE_OCFS2_INT_EVENT(ocfs2_wait_on_mount); | ||
2643 | |||
2644 | /* End of trace events for fs/ocfs2/journal.c. */ | ||
2645 | |||
2646 | /* Trace events for fs/ocfs2/buffer_head_io.c. */ | ||
2647 | |||
2648 | DEFINE_OCFS2_ULL_UINT_EVENT(ocfs2_read_blocks_sync); | ||
2649 | |||
2650 | DEFINE_OCFS2_ULL_EVENT(ocfs2_read_blocks_sync_jbd); | ||
2651 | |||
2652 | DEFINE_OCFS2_ULL_ULL_EVENT(ocfs2_read_blocks_from_disk); | ||
2653 | |||
2654 | DEFINE_OCFS2_ULL_INT_INT_INT_EVENT(ocfs2_read_blocks_bh); | ||
2655 | |||
2656 | DEFINE_OCFS2_ULL_INT_INT_INT_EVENT(ocfs2_read_blocks_end); | ||
2657 | |||
2658 | TRACE_EVENT(ocfs2_write_block, | ||
2659 | TP_PROTO(unsigned long long block, void *ci), | ||
2660 | TP_ARGS(block, ci), | ||
2661 | TP_STRUCT__entry( | ||
2662 | __field(unsigned long long, block) | ||
2663 | __field(void *, ci) | ||
2664 | ), | ||
2665 | TP_fast_assign( | ||
2666 | __entry->block = block; | ||
2667 | __entry->ci = ci; | ||
2668 | ), | ||
2669 | TP_printk("%llu %p", __entry->block, __entry->ci) | ||
2670 | ); | ||
2671 | |||
2672 | TRACE_EVENT(ocfs2_read_blocks_begin, | ||
2673 | TP_PROTO(void *ci, unsigned long long block, | ||
2674 | unsigned int nr, int flags), | ||
2675 | TP_ARGS(ci, block, nr, flags), | ||
2676 | TP_STRUCT__entry( | ||
2677 | __field(void *, ci) | ||
2678 | __field(unsigned long long, block) | ||
2679 | __field(unsigned int, nr) | ||
2680 | __field(int, flags) | ||
2681 | ), | ||
2682 | TP_fast_assign( | ||
2683 | __entry->ci = ci; | ||
2684 | __entry->block = block; | ||
2685 | __entry->nr = nr; | ||
2686 | __entry->flags = flags; | ||
2687 | ), | ||
2688 | TP_printk("%p %llu %u %d", __entry->ci, __entry->block, | ||
2689 | __entry->nr, __entry->flags) | ||
2690 | ); | ||
2691 | |||
2692 | /* End of trace events for fs/ocfs2/buffer_head_io.c. */ | ||
2693 | |||
2694 | /* Trace events for fs/ocfs2/uptodate.c. */ | ||
2695 | |||
2696 | DEFINE_OCFS2_ULL_EVENT(ocfs2_purge_copied_metadata_tree); | ||
2697 | |||
2698 | DEFINE_OCFS2_ULL_UINT_UINT_EVENT(ocfs2_metadata_cache_purge); | ||
2699 | |||
2700 | DEFINE_OCFS2_ULL_ULL_UINT_EVENT(ocfs2_buffer_cached_begin); | ||
2701 | |||
2702 | TRACE_EVENT(ocfs2_buffer_cached_end, | ||
2703 | TP_PROTO(int index, void *item), | ||
2704 | TP_ARGS(index, item), | ||
2705 | TP_STRUCT__entry( | ||
2706 | __field(int, index) | ||
2707 | __field(void *, item) | ||
2708 | ), | ||
2709 | TP_fast_assign( | ||
2710 | __entry->index = index; | ||
2711 | __entry->item = item; | ||
2712 | ), | ||
2713 | TP_printk("%d %p", __entry->index, __entry->item) | ||
2714 | ); | ||
2715 | |||
2716 | DEFINE_OCFS2_ULL_ULL_UINT_EVENT(ocfs2_append_cache_array); | ||
2717 | |||
2718 | DEFINE_OCFS2_ULL_ULL_UINT_EVENT(ocfs2_insert_cache_tree); | ||
2719 | |||
2720 | DEFINE_OCFS2_ULL_UINT_UINT_EVENT(ocfs2_expand_cache); | ||
2721 | |||
2722 | DEFINE_OCFS2_ULL_UINT_UINT_EVENT(ocfs2_set_buffer_uptodate); | ||
2723 | |||
2724 | DEFINE_OCFS2_ULL_ULL_EVENT(ocfs2_set_buffer_uptodate_begin); | ||
2725 | |||
2726 | DEFINE_OCFS2_ULL_UINT_UINT_EVENT(ocfs2_remove_metadata_array); | ||
2727 | |||
2728 | DEFINE_OCFS2_ULL_ULL_EVENT(ocfs2_remove_metadata_tree); | ||
2729 | |||
2730 | DEFINE_OCFS2_ULL_ULL_UINT_UINT_EVENT(ocfs2_remove_block_from_cache); | ||
2731 | |||
2732 | /* End of trace events for fs/ocfs2/uptodate.c. */ | ||
2733 | #endif /* _TRACE_OCFS2_H */ | ||
2734 | |||
2735 | /* This part must be outside protection */ | ||
2736 | #undef TRACE_INCLUDE_PATH | ||
2737 | #define TRACE_INCLUDE_PATH . | ||
2738 | #define TRACE_INCLUDE_FILE ocfs2_trace | ||
2739 | #include <trace/define_trace.h> | ||
diff --git a/fs/ocfs2/quota_global.c b/fs/ocfs2/quota_global.c index a73f64166481..279aef68025b 100644 --- a/fs/ocfs2/quota_global.c +++ b/fs/ocfs2/quota_global.c | |||
@@ -11,7 +11,6 @@ | |||
11 | #include <linux/writeback.h> | 11 | #include <linux/writeback.h> |
12 | #include <linux/workqueue.h> | 12 | #include <linux/workqueue.h> |
13 | 13 | ||
14 | #define MLOG_MASK_PREFIX ML_QUOTA | ||
15 | #include <cluster/masklog.h> | 14 | #include <cluster/masklog.h> |
16 | 15 | ||
17 | #include "ocfs2_fs.h" | 16 | #include "ocfs2_fs.h" |
@@ -27,6 +26,7 @@ | |||
27 | #include "super.h" | 26 | #include "super.h" |
28 | #include "buffer_head_io.h" | 27 | #include "buffer_head_io.h" |
29 | #include "quota.h" | 28 | #include "quota.h" |
29 | #include "ocfs2_trace.h" | ||
30 | 30 | ||
31 | /* | 31 | /* |
32 | * Locking of quotas with OCFS2 is rather complex. Here are rules that | 32 | * Locking of quotas with OCFS2 is rather complex. Here are rules that |
@@ -130,8 +130,7 @@ int ocfs2_validate_quota_block(struct super_block *sb, struct buffer_head *bh) | |||
130 | struct ocfs2_disk_dqtrailer *dqt = | 130 | struct ocfs2_disk_dqtrailer *dqt = |
131 | ocfs2_block_dqtrailer(sb->s_blocksize, bh->b_data); | 131 | ocfs2_block_dqtrailer(sb->s_blocksize, bh->b_data); |
132 | 132 | ||
133 | mlog(0, "Validating quota block %llu\n", | 133 | trace_ocfs2_validate_quota_block((unsigned long long)bh->b_blocknr); |
134 | (unsigned long long)bh->b_blocknr); | ||
135 | 134 | ||
136 | BUG_ON(!buffer_uptodate(bh)); | 135 | BUG_ON(!buffer_uptodate(bh)); |
137 | 136 | ||
@@ -341,8 +340,6 @@ int ocfs2_global_read_info(struct super_block *sb, int type) | |||
341 | u64 pcount; | 340 | u64 pcount; |
342 | int status; | 341 | int status; |
343 | 342 | ||
344 | mlog_entry_void(); | ||
345 | |||
346 | /* Read global header */ | 343 | /* Read global header */ |
347 | gqinode = ocfs2_get_system_file_inode(OCFS2_SB(sb), ino[type], | 344 | gqinode = ocfs2_get_system_file_inode(OCFS2_SB(sb), ino[type], |
348 | OCFS2_INVALID_SLOT); | 345 | OCFS2_INVALID_SLOT); |
@@ -402,7 +399,8 @@ int ocfs2_global_read_info(struct super_block *sb, int type) | |||
402 | msecs_to_jiffies(oinfo->dqi_syncms)); | 399 | msecs_to_jiffies(oinfo->dqi_syncms)); |
403 | 400 | ||
404 | out_err: | 401 | out_err: |
405 | mlog_exit(status); | 402 | if (status) |
403 | mlog_errno(status); | ||
406 | return status; | 404 | return status; |
407 | out_unlock: | 405 | out_unlock: |
408 | ocfs2_unlock_global_qf(oinfo, 0); | 406 | ocfs2_unlock_global_qf(oinfo, 0); |
@@ -508,9 +506,10 @@ int __ocfs2_sync_dquot(struct dquot *dquot, int freeing) | |||
508 | olditime = dquot->dq_dqb.dqb_itime; | 506 | olditime = dquot->dq_dqb.dqb_itime; |
509 | oldbtime = dquot->dq_dqb.dqb_btime; | 507 | oldbtime = dquot->dq_dqb.dqb_btime; |
510 | ocfs2_global_disk2memdqb(dquot, &dqblk); | 508 | ocfs2_global_disk2memdqb(dquot, &dqblk); |
511 | mlog(0, "Syncing global dquot %u space %lld+%lld, inodes %lld+%lld\n", | 509 | trace_ocfs2_sync_dquot(dquot->dq_id, dquot->dq_dqb.dqb_curspace, |
512 | dquot->dq_id, dquot->dq_dqb.dqb_curspace, (long long)spacechange, | 510 | (long long)spacechange, |
513 | dquot->dq_dqb.dqb_curinodes, (long long)inodechange); | 511 | dquot->dq_dqb.dqb_curinodes, |
512 | (long long)inodechange); | ||
514 | if (!test_bit(DQ_LASTSET_B + QIF_SPACE_B, &dquot->dq_flags)) | 513 | if (!test_bit(DQ_LASTSET_B + QIF_SPACE_B, &dquot->dq_flags)) |
515 | dquot->dq_dqb.dqb_curspace += spacechange; | 514 | dquot->dq_dqb.dqb_curspace += spacechange; |
516 | if (!test_bit(DQ_LASTSET_B + QIF_INODES_B, &dquot->dq_flags)) | 515 | if (!test_bit(DQ_LASTSET_B + QIF_INODES_B, &dquot->dq_flags)) |
@@ -594,8 +593,8 @@ static int ocfs2_sync_dquot_helper(struct dquot *dquot, unsigned long type) | |||
594 | struct ocfs2_super *osb = OCFS2_SB(sb); | 593 | struct ocfs2_super *osb = OCFS2_SB(sb); |
595 | int status = 0; | 594 | int status = 0; |
596 | 595 | ||
597 | mlog_entry("id=%u qtype=%u type=%lu device=%s\n", dquot->dq_id, | 596 | trace_ocfs2_sync_dquot_helper(dquot->dq_id, dquot->dq_type, |
598 | dquot->dq_type, type, sb->s_id); | 597 | type, sb->s_id); |
599 | if (type != dquot->dq_type) | 598 | if (type != dquot->dq_type) |
600 | goto out; | 599 | goto out; |
601 | status = ocfs2_lock_global_qf(oinfo, 1); | 600 | status = ocfs2_lock_global_qf(oinfo, 1); |
@@ -621,7 +620,6 @@ static int ocfs2_sync_dquot_helper(struct dquot *dquot, unsigned long type) | |||
621 | out_ilock: | 620 | out_ilock: |
622 | ocfs2_unlock_global_qf(oinfo, 1); | 621 | ocfs2_unlock_global_qf(oinfo, 1); |
623 | out: | 622 | out: |
624 | mlog_exit(status); | ||
625 | return status; | 623 | return status; |
626 | } | 624 | } |
627 | 625 | ||
@@ -647,7 +645,7 @@ static int ocfs2_write_dquot(struct dquot *dquot) | |||
647 | struct ocfs2_super *osb = OCFS2_SB(dquot->dq_sb); | 645 | struct ocfs2_super *osb = OCFS2_SB(dquot->dq_sb); |
648 | int status = 0; | 646 | int status = 0; |
649 | 647 | ||
650 | mlog_entry("id=%u, type=%d", dquot->dq_id, dquot->dq_type); | 648 | trace_ocfs2_write_dquot(dquot->dq_id, dquot->dq_type); |
651 | 649 | ||
652 | handle = ocfs2_start_trans(osb, OCFS2_QWRITE_CREDITS); | 650 | handle = ocfs2_start_trans(osb, OCFS2_QWRITE_CREDITS); |
653 | if (IS_ERR(handle)) { | 651 | if (IS_ERR(handle)) { |
@@ -660,7 +658,6 @@ static int ocfs2_write_dquot(struct dquot *dquot) | |||
660 | mutex_unlock(&sb_dqopt(dquot->dq_sb)->dqio_mutex); | 658 | mutex_unlock(&sb_dqopt(dquot->dq_sb)->dqio_mutex); |
661 | ocfs2_commit_trans(osb, handle); | 659 | ocfs2_commit_trans(osb, handle); |
662 | out: | 660 | out: |
663 | mlog_exit(status); | ||
664 | return status; | 661 | return status; |
665 | } | 662 | } |
666 | 663 | ||
@@ -686,7 +683,7 @@ static int ocfs2_release_dquot(struct dquot *dquot) | |||
686 | struct ocfs2_super *osb = OCFS2_SB(dquot->dq_sb); | 683 | struct ocfs2_super *osb = OCFS2_SB(dquot->dq_sb); |
687 | int status = 0; | 684 | int status = 0; |
688 | 685 | ||
689 | mlog_entry("id=%u, type=%d", dquot->dq_id, dquot->dq_type); | 686 | trace_ocfs2_release_dquot(dquot->dq_id, dquot->dq_type); |
690 | 687 | ||
691 | mutex_lock(&dquot->dq_lock); | 688 | mutex_lock(&dquot->dq_lock); |
692 | /* Check whether we are not racing with some other dqget() */ | 689 | /* Check whether we are not racing with some other dqget() */ |
@@ -722,7 +719,8 @@ out_ilock: | |||
722 | ocfs2_unlock_global_qf(oinfo, 1); | 719 | ocfs2_unlock_global_qf(oinfo, 1); |
723 | out: | 720 | out: |
724 | mutex_unlock(&dquot->dq_lock); | 721 | mutex_unlock(&dquot->dq_lock); |
725 | mlog_exit(status); | 722 | if (status) |
723 | mlog_errno(status); | ||
726 | return status; | 724 | return status; |
727 | } | 725 | } |
728 | 726 | ||
@@ -743,7 +741,7 @@ static int ocfs2_acquire_dquot(struct dquot *dquot) | |||
743 | int need_alloc = ocfs2_global_qinit_alloc(sb, type); | 741 | int need_alloc = ocfs2_global_qinit_alloc(sb, type); |
744 | handle_t *handle; | 742 | handle_t *handle; |
745 | 743 | ||
746 | mlog_entry("id=%u, type=%d", dquot->dq_id, type); | 744 | trace_ocfs2_acquire_dquot(dquot->dq_id, type); |
747 | mutex_lock(&dquot->dq_lock); | 745 | mutex_lock(&dquot->dq_lock); |
748 | /* | 746 | /* |
749 | * We need an exclusive lock, because we're going to update use count | 747 | * We need an exclusive lock, because we're going to update use count |
@@ -809,7 +807,8 @@ out_dq: | |||
809 | set_bit(DQ_ACTIVE_B, &dquot->dq_flags); | 807 | set_bit(DQ_ACTIVE_B, &dquot->dq_flags); |
810 | out: | 808 | out: |
811 | mutex_unlock(&dquot->dq_lock); | 809 | mutex_unlock(&dquot->dq_lock); |
812 | mlog_exit(status); | 810 | if (status) |
811 | mlog_errno(status); | ||
813 | return status; | 812 | return status; |
814 | } | 813 | } |
815 | 814 | ||
@@ -829,7 +828,7 @@ static int ocfs2_mark_dquot_dirty(struct dquot *dquot) | |||
829 | handle_t *handle; | 828 | handle_t *handle; |
830 | struct ocfs2_super *osb = OCFS2_SB(sb); | 829 | struct ocfs2_super *osb = OCFS2_SB(sb); |
831 | 830 | ||
832 | mlog_entry("id=%u, type=%d", dquot->dq_id, type); | 831 | trace_ocfs2_mark_dquot_dirty(dquot->dq_id, type); |
833 | 832 | ||
834 | /* In case user set some limits, sync dquot immediately to global | 833 | /* In case user set some limits, sync dquot immediately to global |
835 | * quota file so that information propagates quicker */ | 834 | * quota file so that information propagates quicker */ |
@@ -866,7 +865,8 @@ out_dlock: | |||
866 | out_ilock: | 865 | out_ilock: |
867 | ocfs2_unlock_global_qf(oinfo, 1); | 866 | ocfs2_unlock_global_qf(oinfo, 1); |
868 | out: | 867 | out: |
869 | mlog_exit(status); | 868 | if (status) |
869 | mlog_errno(status); | ||
870 | return status; | 870 | return status; |
871 | } | 871 | } |
872 | 872 | ||
@@ -877,8 +877,6 @@ static int ocfs2_write_info(struct super_block *sb, int type) | |||
877 | int status = 0; | 877 | int status = 0; |
878 | struct ocfs2_mem_dqinfo *oinfo = sb_dqinfo(sb, type)->dqi_priv; | 878 | struct ocfs2_mem_dqinfo *oinfo = sb_dqinfo(sb, type)->dqi_priv; |
879 | 879 | ||
880 | mlog_entry_void(); | ||
881 | |||
882 | status = ocfs2_lock_global_qf(oinfo, 1); | 880 | status = ocfs2_lock_global_qf(oinfo, 1); |
883 | if (status < 0) | 881 | if (status < 0) |
884 | goto out; | 882 | goto out; |
@@ -893,7 +891,8 @@ static int ocfs2_write_info(struct super_block *sb, int type) | |||
893 | out_ilock: | 891 | out_ilock: |
894 | ocfs2_unlock_global_qf(oinfo, 1); | 892 | ocfs2_unlock_global_qf(oinfo, 1); |
895 | out: | 893 | out: |
896 | mlog_exit(status); | 894 | if (status) |
895 | mlog_errno(status); | ||
897 | return status; | 896 | return status; |
898 | } | 897 | } |
899 | 898 | ||
diff --git a/fs/ocfs2/quota_local.c b/fs/ocfs2/quota_local.c index dc78764ccc4c..dc8007fc9247 100644 --- a/fs/ocfs2/quota_local.c +++ b/fs/ocfs2/quota_local.c | |||
@@ -8,7 +8,6 @@ | |||
8 | #include <linux/quotaops.h> | 8 | #include <linux/quotaops.h> |
9 | #include <linux/module.h> | 9 | #include <linux/module.h> |
10 | 10 | ||
11 | #define MLOG_MASK_PREFIX ML_QUOTA | ||
12 | #include <cluster/masklog.h> | 11 | #include <cluster/masklog.h> |
13 | 12 | ||
14 | #include "ocfs2_fs.h" | 13 | #include "ocfs2_fs.h" |
@@ -23,6 +22,7 @@ | |||
23 | #include "quota.h" | 22 | #include "quota.h" |
24 | #include "uptodate.h" | 23 | #include "uptodate.h" |
25 | #include "super.h" | 24 | #include "super.h" |
25 | #include "ocfs2_trace.h" | ||
26 | 26 | ||
27 | /* Number of local quota structures per block */ | 27 | /* Number of local quota structures per block */ |
28 | static inline unsigned int ol_quota_entries_per_block(struct super_block *sb) | 28 | static inline unsigned int ol_quota_entries_per_block(struct super_block *sb) |
@@ -475,7 +475,7 @@ static int ocfs2_recover_local_quota_file(struct inode *lqinode, | |||
475 | struct ocfs2_recovery_chunk *rchunk, *next; | 475 | struct ocfs2_recovery_chunk *rchunk, *next; |
476 | qsize_t spacechange, inodechange; | 476 | qsize_t spacechange, inodechange; |
477 | 477 | ||
478 | mlog_entry("ino=%lu type=%u", (unsigned long)lqinode->i_ino, type); | 478 | trace_ocfs2_recover_local_quota_file((unsigned long)lqinode->i_ino, type); |
479 | 479 | ||
480 | list_for_each_entry_safe(rchunk, next, &(rec->r_list[type]), rc_list) { | 480 | list_for_each_entry_safe(rchunk, next, &(rec->r_list[type]), rc_list) { |
481 | chunk = rchunk->rc_chunk; | 481 | chunk = rchunk->rc_chunk; |
@@ -575,7 +575,8 @@ out_put_bh: | |||
575 | } | 575 | } |
576 | if (status < 0) | 576 | if (status < 0) |
577 | free_recovery_list(&(rec->r_list[type])); | 577 | free_recovery_list(&(rec->r_list[type])); |
578 | mlog_exit(status); | 578 | if (status) |
579 | mlog_errno(status); | ||
579 | return status; | 580 | return status; |
580 | } | 581 | } |
581 | 582 | ||
@@ -600,7 +601,7 @@ int ocfs2_finish_quota_recovery(struct ocfs2_super *osb, | |||
600 | for (type = 0; type < MAXQUOTAS; type++) { | 601 | for (type = 0; type < MAXQUOTAS; type++) { |
601 | if (list_empty(&(rec->r_list[type]))) | 602 | if (list_empty(&(rec->r_list[type]))) |
602 | continue; | 603 | continue; |
603 | mlog(0, "Recovering quota in slot %d\n", slot_num); | 604 | trace_ocfs2_finish_quota_recovery(slot_num); |
604 | lqinode = ocfs2_get_system_file_inode(osb, ino[type], slot_num); | 605 | lqinode = ocfs2_get_system_file_inode(osb, ino[type], slot_num); |
605 | if (!lqinode) { | 606 | if (!lqinode) { |
606 | status = -ENOENT; | 607 | status = -ENOENT; |
@@ -882,9 +883,10 @@ static void olq_set_dquot(struct buffer_head *bh, void *private) | |||
882 | dqblk->dqb_inodemod = cpu_to_le64(od->dq_dquot.dq_dqb.dqb_curinodes - | 883 | dqblk->dqb_inodemod = cpu_to_le64(od->dq_dquot.dq_dqb.dqb_curinodes - |
883 | od->dq_originodes); | 884 | od->dq_originodes); |
884 | spin_unlock(&dq_data_lock); | 885 | spin_unlock(&dq_data_lock); |
885 | mlog(0, "Writing local dquot %u space %lld inodes %lld\n", | 886 | trace_olq_set_dquot( |
886 | od->dq_dquot.dq_id, (long long)le64_to_cpu(dqblk->dqb_spacemod), | 887 | (unsigned long long)le64_to_cpu(dqblk->dqb_spacemod), |
887 | (long long)le64_to_cpu(dqblk->dqb_inodemod)); | 888 | (unsigned long long)le64_to_cpu(dqblk->dqb_inodemod), |
889 | od->dq_dquot.dq_id); | ||
888 | } | 890 | } |
889 | 891 | ||
890 | /* Write dquot to local quota file */ | 892 | /* Write dquot to local quota file */ |
diff --git a/fs/ocfs2/refcounttree.c b/fs/ocfs2/refcounttree.c index c384d634872a..5d32749c896d 100644 --- a/fs/ocfs2/refcounttree.c +++ b/fs/ocfs2/refcounttree.c | |||
@@ -16,7 +16,6 @@ | |||
16 | */ | 16 | */ |
17 | 17 | ||
18 | #include <linux/sort.h> | 18 | #include <linux/sort.h> |
19 | #define MLOG_MASK_PREFIX ML_REFCOUNT | ||
20 | #include <cluster/masklog.h> | 19 | #include <cluster/masklog.h> |
21 | #include "ocfs2.h" | 20 | #include "ocfs2.h" |
22 | #include "inode.h" | 21 | #include "inode.h" |
@@ -34,6 +33,7 @@ | |||
34 | #include "aops.h" | 33 | #include "aops.h" |
35 | #include "xattr.h" | 34 | #include "xattr.h" |
36 | #include "namei.h" | 35 | #include "namei.h" |
36 | #include "ocfs2_trace.h" | ||
37 | 37 | ||
38 | #include <linux/bio.h> | 38 | #include <linux/bio.h> |
39 | #include <linux/blkdev.h> | 39 | #include <linux/blkdev.h> |
@@ -84,8 +84,7 @@ static int ocfs2_validate_refcount_block(struct super_block *sb, | |||
84 | struct ocfs2_refcount_block *rb = | 84 | struct ocfs2_refcount_block *rb = |
85 | (struct ocfs2_refcount_block *)bh->b_data; | 85 | (struct ocfs2_refcount_block *)bh->b_data; |
86 | 86 | ||
87 | mlog(0, "Validating refcount block %llu\n", | 87 | trace_ocfs2_validate_refcount_block((unsigned long long)bh->b_blocknr); |
88 | (unsigned long long)bh->b_blocknr); | ||
89 | 88 | ||
90 | BUG_ON(!buffer_uptodate(bh)); | 89 | BUG_ON(!buffer_uptodate(bh)); |
91 | 90 | ||
@@ -545,8 +544,8 @@ void ocfs2_purge_refcount_trees(struct ocfs2_super *osb) | |||
545 | while ((node = rb_last(root)) != NULL) { | 544 | while ((node = rb_last(root)) != NULL) { |
546 | tree = rb_entry(node, struct ocfs2_refcount_tree, rf_node); | 545 | tree = rb_entry(node, struct ocfs2_refcount_tree, rf_node); |
547 | 546 | ||
548 | mlog(0, "Purge tree %llu\n", | 547 | trace_ocfs2_purge_refcount_trees( |
549 | (unsigned long long) tree->rf_blkno); | 548 | (unsigned long long) tree->rf_blkno); |
550 | 549 | ||
551 | rb_erase(&tree->rf_node, root); | 550 | rb_erase(&tree->rf_node, root); |
552 | ocfs2_free_refcount_tree(tree); | 551 | ocfs2_free_refcount_tree(tree); |
@@ -575,7 +574,8 @@ static int ocfs2_create_refcount_tree(struct inode *inode, | |||
575 | 574 | ||
576 | BUG_ON(oi->ip_dyn_features & OCFS2_HAS_REFCOUNT_FL); | 575 | BUG_ON(oi->ip_dyn_features & OCFS2_HAS_REFCOUNT_FL); |
577 | 576 | ||
578 | mlog(0, "create tree for inode %lu\n", inode->i_ino); | 577 | trace_ocfs2_create_refcount_tree( |
578 | (unsigned long long)OCFS2_I(inode)->ip_blkno); | ||
579 | 579 | ||
580 | ret = ocfs2_reserve_new_metadata_blocks(osb, 1, &meta_ac); | 580 | ret = ocfs2_reserve_new_metadata_blocks(osb, 1, &meta_ac); |
581 | if (ret) { | 581 | if (ret) { |
@@ -646,8 +646,7 @@ static int ocfs2_create_refcount_tree(struct inode *inode, | |||
646 | di->i_refcount_loc = cpu_to_le64(first_blkno); | 646 | di->i_refcount_loc = cpu_to_le64(first_blkno); |
647 | spin_unlock(&oi->ip_lock); | 647 | spin_unlock(&oi->ip_lock); |
648 | 648 | ||
649 | mlog(0, "created tree for inode %lu, refblock %llu\n", | 649 | trace_ocfs2_create_refcount_tree_blkno((unsigned long long)first_blkno); |
650 | inode->i_ino, (unsigned long long)first_blkno); | ||
651 | 650 | ||
652 | ocfs2_journal_dirty(handle, di_bh); | 651 | ocfs2_journal_dirty(handle, di_bh); |
653 | 652 | ||
@@ -1256,8 +1255,9 @@ static int ocfs2_change_refcount_rec(handle_t *handle, | |||
1256 | goto out; | 1255 | goto out; |
1257 | } | 1256 | } |
1258 | 1257 | ||
1259 | mlog(0, "change index %d, old count %u, change %d\n", index, | 1258 | trace_ocfs2_change_refcount_rec( |
1260 | le32_to_cpu(rec->r_refcount), change); | 1259 | (unsigned long long)ocfs2_metadata_cache_owner(ci), |
1260 | index, le32_to_cpu(rec->r_refcount), change); | ||
1261 | le32_add_cpu(&rec->r_refcount, change); | 1261 | le32_add_cpu(&rec->r_refcount, change); |
1262 | 1262 | ||
1263 | if (!rec->r_refcount) { | 1263 | if (!rec->r_refcount) { |
@@ -1353,8 +1353,8 @@ static int ocfs2_expand_inline_ref_root(handle_t *handle, | |||
1353 | 1353 | ||
1354 | ocfs2_journal_dirty(handle, ref_root_bh); | 1354 | ocfs2_journal_dirty(handle, ref_root_bh); |
1355 | 1355 | ||
1356 | mlog(0, "new leaf block %llu, used %u\n", (unsigned long long)blkno, | 1356 | trace_ocfs2_expand_inline_ref_root((unsigned long long)blkno, |
1357 | le16_to_cpu(new_rb->rf_records.rl_used)); | 1357 | le16_to_cpu(new_rb->rf_records.rl_used)); |
1358 | 1358 | ||
1359 | *ref_leaf_bh = new_bh; | 1359 | *ref_leaf_bh = new_bh; |
1360 | new_bh = NULL; | 1360 | new_bh = NULL; |
@@ -1466,9 +1466,9 @@ static int ocfs2_divide_leaf_refcount_block(struct buffer_head *ref_leaf_bh, | |||
1466 | (struct ocfs2_refcount_block *)new_bh->b_data; | 1466 | (struct ocfs2_refcount_block *)new_bh->b_data; |
1467 | struct ocfs2_refcount_list *new_rl = &new_rb->rf_records; | 1467 | struct ocfs2_refcount_list *new_rl = &new_rb->rf_records; |
1468 | 1468 | ||
1469 | mlog(0, "split old leaf refcount block %llu, count = %u, used = %u\n", | 1469 | trace_ocfs2_divide_leaf_refcount_block( |
1470 | (unsigned long long)ref_leaf_bh->b_blocknr, | 1470 | (unsigned long long)ref_leaf_bh->b_blocknr, |
1471 | le32_to_cpu(rl->rl_count), le32_to_cpu(rl->rl_used)); | 1471 | le32_to_cpu(rl->rl_count), le32_to_cpu(rl->rl_used)); |
1472 | 1472 | ||
1473 | /* | 1473 | /* |
1474 | * XXX: Improvement later. | 1474 | * XXX: Improvement later. |
@@ -1601,8 +1601,8 @@ static int ocfs2_new_leaf_refcount_block(handle_t *handle, | |||
1601 | 1601 | ||
1602 | ocfs2_init_refcount_extent_tree(&ref_et, ci, ref_root_bh); | 1602 | ocfs2_init_refcount_extent_tree(&ref_et, ci, ref_root_bh); |
1603 | 1603 | ||
1604 | mlog(0, "insert new leaf block %llu at %u\n", | 1604 | trace_ocfs2_new_leaf_refcount_block( |
1605 | (unsigned long long)new_bh->b_blocknr, new_cpos); | 1605 | (unsigned long long)new_bh->b_blocknr, new_cpos); |
1606 | 1606 | ||
1607 | /* Insert the new leaf block with the specific offset cpos. */ | 1607 | /* Insert the new leaf block with the specific offset cpos. */ |
1608 | ret = ocfs2_insert_extent(handle, &ref_et, new_cpos, new_bh->b_blocknr, | 1608 | ret = ocfs2_insert_extent(handle, &ref_et, new_cpos, new_bh->b_blocknr, |
@@ -1794,11 +1794,10 @@ static int ocfs2_insert_refcount_rec(handle_t *handle, | |||
1794 | (le16_to_cpu(rf_list->rl_used) - index) * | 1794 | (le16_to_cpu(rf_list->rl_used) - index) * |
1795 | sizeof(struct ocfs2_refcount_rec)); | 1795 | sizeof(struct ocfs2_refcount_rec)); |
1796 | 1796 | ||
1797 | mlog(0, "insert refcount record start %llu, len %u, count %u " | 1797 | trace_ocfs2_insert_refcount_rec( |
1798 | "to leaf block %llu at index %d\n", | 1798 | (unsigned long long)ref_leaf_bh->b_blocknr, index, |
1799 | (unsigned long long)le64_to_cpu(rec->r_cpos), | 1799 | (unsigned long long)le64_to_cpu(rec->r_cpos), |
1800 | le32_to_cpu(rec->r_clusters), le32_to_cpu(rec->r_refcount), | 1800 | le32_to_cpu(rec->r_clusters), le32_to_cpu(rec->r_refcount)); |
1801 | (unsigned long long)ref_leaf_bh->b_blocknr, index); | ||
1802 | 1801 | ||
1803 | rf_list->rl_recs[index] = *rec; | 1802 | rf_list->rl_recs[index] = *rec; |
1804 | 1803 | ||
@@ -1850,10 +1849,12 @@ static int ocfs2_split_refcount_rec(handle_t *handle, | |||
1850 | 1849 | ||
1851 | BUG_ON(le32_to_cpu(rb->rf_flags) & OCFS2_REFCOUNT_TREE_FL); | 1850 | BUG_ON(le32_to_cpu(rb->rf_flags) & OCFS2_REFCOUNT_TREE_FL); |
1852 | 1851 | ||
1853 | mlog(0, "original r_pos %llu, cluster %u, split %llu, cluster %u\n", | 1852 | trace_ocfs2_split_refcount_rec(le64_to_cpu(orig_rec->r_cpos), |
1854 | le64_to_cpu(orig_rec->r_cpos), le32_to_cpu(orig_rec->r_clusters), | 1853 | le32_to_cpu(orig_rec->r_clusters), |
1855 | le64_to_cpu(split_rec->r_cpos), | 1854 | le32_to_cpu(orig_rec->r_refcount), |
1856 | le32_to_cpu(split_rec->r_clusters)); | 1855 | le64_to_cpu(split_rec->r_cpos), |
1856 | le32_to_cpu(split_rec->r_clusters), | ||
1857 | le32_to_cpu(split_rec->r_refcount)); | ||
1857 | 1858 | ||
1858 | /* | 1859 | /* |
1859 | * If we just need to split the header or tail clusters, | 1860 | * If we just need to split the header or tail clusters, |
@@ -1967,12 +1968,11 @@ static int ocfs2_split_refcount_rec(handle_t *handle, | |||
1967 | 1968 | ||
1968 | if (split_rec->r_refcount) { | 1969 | if (split_rec->r_refcount) { |
1969 | rf_list->rl_recs[index] = *split_rec; | 1970 | rf_list->rl_recs[index] = *split_rec; |
1970 | mlog(0, "insert refcount record start %llu, len %u, count %u " | 1971 | trace_ocfs2_split_refcount_rec_insert( |
1971 | "to leaf block %llu at index %d\n", | 1972 | (unsigned long long)ref_leaf_bh->b_blocknr, index, |
1972 | (unsigned long long)le64_to_cpu(split_rec->r_cpos), | 1973 | (unsigned long long)le64_to_cpu(split_rec->r_cpos), |
1973 | le32_to_cpu(split_rec->r_clusters), | 1974 | le32_to_cpu(split_rec->r_clusters), |
1974 | le32_to_cpu(split_rec->r_refcount), | 1975 | le32_to_cpu(split_rec->r_refcount)); |
1975 | (unsigned long long)ref_leaf_bh->b_blocknr, index); | ||
1976 | 1976 | ||
1977 | if (merge) | 1977 | if (merge) |
1978 | ocfs2_refcount_rec_merge(rb, index); | 1978 | ocfs2_refcount_rec_merge(rb, index); |
@@ -1997,7 +1997,7 @@ static int __ocfs2_increase_refcount(handle_t *handle, | |||
1997 | struct ocfs2_refcount_rec rec; | 1997 | struct ocfs2_refcount_rec rec; |
1998 | unsigned int set_len = 0; | 1998 | unsigned int set_len = 0; |
1999 | 1999 | ||
2000 | mlog(0, "Tree owner %llu, add refcount start %llu, len %u\n", | 2000 | trace_ocfs2_increase_refcount_begin( |
2001 | (unsigned long long)ocfs2_metadata_cache_owner(ci), | 2001 | (unsigned long long)ocfs2_metadata_cache_owner(ci), |
2002 | (unsigned long long)cpos, len); | 2002 | (unsigned long long)cpos, len); |
2003 | 2003 | ||
@@ -2024,9 +2024,9 @@ static int __ocfs2_increase_refcount(handle_t *handle, | |||
2024 | */ | 2024 | */ |
2025 | if (rec.r_refcount && le64_to_cpu(rec.r_cpos) == cpos && | 2025 | if (rec.r_refcount && le64_to_cpu(rec.r_cpos) == cpos && |
2026 | set_len <= len) { | 2026 | set_len <= len) { |
2027 | mlog(0, "increase refcount rec, start %llu, len %u, " | 2027 | trace_ocfs2_increase_refcount_change( |
2028 | "count %u\n", (unsigned long long)cpos, set_len, | 2028 | (unsigned long long)cpos, set_len, |
2029 | le32_to_cpu(rec.r_refcount)); | 2029 | le32_to_cpu(rec.r_refcount)); |
2030 | ret = ocfs2_change_refcount_rec(handle, ci, | 2030 | ret = ocfs2_change_refcount_rec(handle, ci, |
2031 | ref_leaf_bh, index, | 2031 | ref_leaf_bh, index, |
2032 | merge, 1); | 2032 | merge, 1); |
@@ -2037,7 +2037,7 @@ static int __ocfs2_increase_refcount(handle_t *handle, | |||
2037 | } else if (!rec.r_refcount) { | 2037 | } else if (!rec.r_refcount) { |
2038 | rec.r_refcount = cpu_to_le32(1); | 2038 | rec.r_refcount = cpu_to_le32(1); |
2039 | 2039 | ||
2040 | mlog(0, "insert refcount rec, start %llu, len %u\n", | 2040 | trace_ocfs2_increase_refcount_insert( |
2041 | (unsigned long long)le64_to_cpu(rec.r_cpos), | 2041 | (unsigned long long)le64_to_cpu(rec.r_cpos), |
2042 | set_len); | 2042 | set_len); |
2043 | ret = ocfs2_insert_refcount_rec(handle, ci, ref_root_bh, | 2043 | ret = ocfs2_insert_refcount_rec(handle, ci, ref_root_bh, |
@@ -2055,8 +2055,7 @@ static int __ocfs2_increase_refcount(handle_t *handle, | |||
2055 | rec.r_clusters = cpu_to_le32(set_len); | 2055 | rec.r_clusters = cpu_to_le32(set_len); |
2056 | le32_add_cpu(&rec.r_refcount, 1); | 2056 | le32_add_cpu(&rec.r_refcount, 1); |
2057 | 2057 | ||
2058 | mlog(0, "split refcount rec, start %llu, " | 2058 | trace_ocfs2_increase_refcount_split( |
2059 | "len %u, count %u\n", | ||
2060 | (unsigned long long)le64_to_cpu(rec.r_cpos), | 2059 | (unsigned long long)le64_to_cpu(rec.r_cpos), |
2061 | set_len, le32_to_cpu(rec.r_refcount)); | 2060 | set_len, le32_to_cpu(rec.r_refcount)); |
2062 | ret = ocfs2_split_refcount_rec(handle, ci, | 2061 | ret = ocfs2_split_refcount_rec(handle, ci, |
@@ -2095,6 +2094,11 @@ static int ocfs2_remove_refcount_extent(handle_t *handle, | |||
2095 | 2094 | ||
2096 | BUG_ON(rb->rf_records.rl_used); | 2095 | BUG_ON(rb->rf_records.rl_used); |
2097 | 2096 | ||
2097 | trace_ocfs2_remove_refcount_extent( | ||
2098 | (unsigned long long)ocfs2_metadata_cache_owner(ci), | ||
2099 | (unsigned long long)ref_leaf_bh->b_blocknr, | ||
2100 | le32_to_cpu(rb->rf_cpos)); | ||
2101 | |||
2098 | ocfs2_init_refcount_extent_tree(&et, ci, ref_root_bh); | 2102 | ocfs2_init_refcount_extent_tree(&et, ci, ref_root_bh); |
2099 | ret = ocfs2_remove_extent(handle, &et, le32_to_cpu(rb->rf_cpos), | 2103 | ret = ocfs2_remove_extent(handle, &et, le32_to_cpu(rb->rf_cpos), |
2100 | 1, meta_ac, dealloc); | 2104 | 1, meta_ac, dealloc); |
@@ -2137,7 +2141,7 @@ static int ocfs2_remove_refcount_extent(handle_t *handle, | |||
2137 | if (!rb->rf_list.l_next_free_rec) { | 2141 | if (!rb->rf_list.l_next_free_rec) { |
2138 | BUG_ON(rb->rf_clusters); | 2142 | BUG_ON(rb->rf_clusters); |
2139 | 2143 | ||
2140 | mlog(0, "reset refcount tree root %llu to be a record block.\n", | 2144 | trace_ocfs2_restore_refcount_block( |
2141 | (unsigned long long)ref_root_bh->b_blocknr); | 2145 | (unsigned long long)ref_root_bh->b_blocknr); |
2142 | 2146 | ||
2143 | rb->rf_flags = 0; | 2147 | rb->rf_flags = 0; |
@@ -2184,6 +2188,10 @@ static int ocfs2_decrease_refcount_rec(handle_t *handle, | |||
2184 | BUG_ON(cpos + len > | 2188 | BUG_ON(cpos + len > |
2185 | le64_to_cpu(rec->r_cpos) + le32_to_cpu(rec->r_clusters)); | 2189 | le64_to_cpu(rec->r_cpos) + le32_to_cpu(rec->r_clusters)); |
2186 | 2190 | ||
2191 | trace_ocfs2_decrease_refcount_rec( | ||
2192 | (unsigned long long)ocfs2_metadata_cache_owner(ci), | ||
2193 | (unsigned long long)cpos, len); | ||
2194 | |||
2187 | if (cpos == le64_to_cpu(rec->r_cpos) && | 2195 | if (cpos == le64_to_cpu(rec->r_cpos) && |
2188 | len == le32_to_cpu(rec->r_clusters)) | 2196 | len == le32_to_cpu(rec->r_clusters)) |
2189 | ret = ocfs2_change_refcount_rec(handle, ci, | 2197 | ret = ocfs2_change_refcount_rec(handle, ci, |
@@ -2195,12 +2203,6 @@ static int ocfs2_decrease_refcount_rec(handle_t *handle, | |||
2195 | 2203 | ||
2196 | le32_add_cpu(&split.r_refcount, -1); | 2204 | le32_add_cpu(&split.r_refcount, -1); |
2197 | 2205 | ||
2198 | mlog(0, "split refcount rec, start %llu, " | ||
2199 | "len %u, count %u, original start %llu, len %u\n", | ||
2200 | (unsigned long long)le64_to_cpu(split.r_cpos), | ||
2201 | len, le32_to_cpu(split.r_refcount), | ||
2202 | (unsigned long long)le64_to_cpu(rec->r_cpos), | ||
2203 | le32_to_cpu(rec->r_clusters)); | ||
2204 | ret = ocfs2_split_refcount_rec(handle, ci, | 2206 | ret = ocfs2_split_refcount_rec(handle, ci, |
2205 | ref_root_bh, ref_leaf_bh, | 2207 | ref_root_bh, ref_leaf_bh, |
2206 | &split, index, 1, | 2208 | &split, index, 1, |
@@ -2239,10 +2241,9 @@ static int __ocfs2_decrease_refcount(handle_t *handle, | |||
2239 | struct super_block *sb = ocfs2_metadata_cache_get_super(ci); | 2241 | struct super_block *sb = ocfs2_metadata_cache_get_super(ci); |
2240 | struct buffer_head *ref_leaf_bh = NULL; | 2242 | struct buffer_head *ref_leaf_bh = NULL; |
2241 | 2243 | ||
2242 | mlog(0, "Tree owner %llu, decrease refcount start %llu, " | 2244 | trace_ocfs2_decrease_refcount( |
2243 | "len %u, delete %u\n", | 2245 | (unsigned long long)ocfs2_metadata_cache_owner(ci), |
2244 | (unsigned long long)ocfs2_metadata_cache_owner(ci), | 2246 | (unsigned long long)cpos, len, delete); |
2245 | (unsigned long long)cpos, len, delete); | ||
2246 | 2247 | ||
2247 | while (len) { | 2248 | while (len) { |
2248 | ret = ocfs2_get_refcount_rec(ci, ref_root_bh, | 2249 | ret = ocfs2_get_refcount_rec(ci, ref_root_bh, |
@@ -2352,8 +2353,8 @@ static int ocfs2_mark_extent_refcounted(struct inode *inode, | |||
2352 | { | 2353 | { |
2353 | int ret; | 2354 | int ret; |
2354 | 2355 | ||
2355 | mlog(0, "Inode %lu refcount tree cpos %u, len %u, phys cluster %u\n", | 2356 | trace_ocfs2_mark_extent_refcounted(OCFS2_I(inode)->ip_blkno, |
2356 | inode->i_ino, cpos, len, phys); | 2357 | cpos, len, phys); |
2357 | 2358 | ||
2358 | if (!ocfs2_refcount_tree(OCFS2_SB(inode->i_sb))) { | 2359 | if (!ocfs2_refcount_tree(OCFS2_SB(inode->i_sb))) { |
2359 | ocfs2_error(inode->i_sb, "Inode %lu want to use refcount " | 2360 | ocfs2_error(inode->i_sb, "Inode %lu want to use refcount " |
@@ -2392,8 +2393,6 @@ static int ocfs2_calc_refcount_meta_credits(struct super_block *sb, | |||
2392 | struct buffer_head *ref_leaf_bh = NULL, *prev_bh = NULL; | 2393 | struct buffer_head *ref_leaf_bh = NULL, *prev_bh = NULL; |
2393 | u32 len; | 2394 | u32 len; |
2394 | 2395 | ||
2395 | mlog(0, "start_cpos %llu, clusters %u\n", | ||
2396 | (unsigned long long)start_cpos, clusters); | ||
2397 | while (clusters) { | 2396 | while (clusters) { |
2398 | ret = ocfs2_get_refcount_rec(ci, ref_root_bh, | 2397 | ret = ocfs2_get_refcount_rec(ci, ref_root_bh, |
2399 | cpos, clusters, &rec, | 2398 | cpos, clusters, &rec, |
@@ -2427,12 +2426,11 @@ static int ocfs2_calc_refcount_meta_credits(struct super_block *sb, | |||
2427 | 2426 | ||
2428 | rb = (struct ocfs2_refcount_block *)ref_leaf_bh->b_data; | 2427 | rb = (struct ocfs2_refcount_block *)ref_leaf_bh->b_data; |
2429 | 2428 | ||
2430 | mlog(0, "recs_add %d,cpos %llu, clusters %u, rec->r_cpos %llu," | 2429 | trace_ocfs2_calc_refcount_meta_credits_iterate( |
2431 | "rec->r_clusters %u, rec->r_refcount %u, index %d\n", | 2430 | recs_add, (unsigned long long)cpos, clusters, |
2432 | recs_add, (unsigned long long)cpos, clusters, | 2431 | (unsigned long long)le64_to_cpu(rec.r_cpos), |
2433 | (unsigned long long)le64_to_cpu(rec.r_cpos), | 2432 | le32_to_cpu(rec.r_clusters), |
2434 | le32_to_cpu(rec.r_clusters), | 2433 | le32_to_cpu(rec.r_refcount), index); |
2435 | le32_to_cpu(rec.r_refcount), index); | ||
2436 | 2434 | ||
2437 | len = min((u64)cpos + clusters, le64_to_cpu(rec.r_cpos) + | 2435 | len = min((u64)cpos + clusters, le64_to_cpu(rec.r_cpos) + |
2438 | le32_to_cpu(rec.r_clusters)) - cpos; | 2436 | le32_to_cpu(rec.r_clusters)) - cpos; |
@@ -2488,7 +2486,6 @@ static int ocfs2_calc_refcount_meta_credits(struct super_block *sb, | |||
2488 | if (!ref_blocks) | 2486 | if (!ref_blocks) |
2489 | goto out; | 2487 | goto out; |
2490 | 2488 | ||
2491 | mlog(0, "we need ref_blocks %d\n", ref_blocks); | ||
2492 | *meta_add += ref_blocks; | 2489 | *meta_add += ref_blocks; |
2493 | *credits += ref_blocks; | 2490 | *credits += ref_blocks; |
2494 | 2491 | ||
@@ -2514,6 +2511,10 @@ static int ocfs2_calc_refcount_meta_credits(struct super_block *sb, | |||
2514 | } | 2511 | } |
2515 | 2512 | ||
2516 | out: | 2513 | out: |
2514 | |||
2515 | trace_ocfs2_calc_refcount_meta_credits( | ||
2516 | (unsigned long long)start_cpos, clusters, | ||
2517 | *meta_add, *credits); | ||
2517 | brelse(ref_leaf_bh); | 2518 | brelse(ref_leaf_bh); |
2518 | brelse(prev_bh); | 2519 | brelse(prev_bh); |
2519 | return ret; | 2520 | return ret; |
@@ -2578,8 +2579,7 @@ int ocfs2_prepare_refcount_change_for_del(struct inode *inode, | |||
2578 | goto out; | 2579 | goto out; |
2579 | } | 2580 | } |
2580 | 2581 | ||
2581 | mlog(0, "reserve new metadata %d blocks, credits = %d\n", | 2582 | trace_ocfs2_prepare_refcount_change_for_del(*ref_blocks, *credits); |
2582 | *ref_blocks, *credits); | ||
2583 | 2583 | ||
2584 | out: | 2584 | out: |
2585 | brelse(ref_root_bh); | 2585 | brelse(ref_root_bh); |
@@ -2886,8 +2886,7 @@ static int ocfs2_lock_refcount_allocators(struct super_block *sb, | |||
2886 | goto out; | 2886 | goto out; |
2887 | } | 2887 | } |
2888 | 2888 | ||
2889 | mlog(0, "reserve new metadata %d, clusters %u, credits = %d\n", | 2889 | trace_ocfs2_lock_refcount_allocators(meta_add, *credits); |
2890 | meta_add, num_clusters, *credits); | ||
2891 | ret = ocfs2_reserve_new_metadata_blocks(OCFS2_SB(sb), meta_add, | 2890 | ret = ocfs2_reserve_new_metadata_blocks(OCFS2_SB(sb), meta_add, |
2892 | meta_ac); | 2891 | meta_ac); |
2893 | if (ret) { | 2892 | if (ret) { |
@@ -2937,8 +2936,8 @@ static int ocfs2_duplicate_clusters_by_page(handle_t *handle, | |||
2937 | loff_t offset, end, map_end; | 2936 | loff_t offset, end, map_end; |
2938 | struct address_space *mapping = context->inode->i_mapping; | 2937 | struct address_space *mapping = context->inode->i_mapping; |
2939 | 2938 | ||
2940 | mlog(0, "old_cluster %u, new %u, len %u at offset %u\n", old_cluster, | 2939 | trace_ocfs2_duplicate_clusters_by_page(cpos, old_cluster, |
2941 | new_cluster, new_len, cpos); | 2940 | new_cluster, new_len); |
2942 | 2941 | ||
2943 | readahead_pages = | 2942 | readahead_pages = |
2944 | (ocfs2_cow_contig_clusters(sb) << | 2943 | (ocfs2_cow_contig_clusters(sb) << |
@@ -3031,8 +3030,8 @@ static int ocfs2_duplicate_clusters_by_jbd(handle_t *handle, | |||
3031 | struct buffer_head *old_bh = NULL; | 3030 | struct buffer_head *old_bh = NULL; |
3032 | struct buffer_head *new_bh = NULL; | 3031 | struct buffer_head *new_bh = NULL; |
3033 | 3032 | ||
3034 | mlog(0, "old_cluster %u, new %u, len %u\n", old_cluster, | 3033 | trace_ocfs2_duplicate_clusters_by_page(cpos, old_cluster, |
3035 | new_cluster, new_len); | 3034 | new_cluster, new_len); |
3036 | 3035 | ||
3037 | for (i = 0; i < blocks; i++, old_block++, new_block++) { | 3036 | for (i = 0; i < blocks; i++, old_block++, new_block++) { |
3038 | new_bh = sb_getblk(osb->sb, new_block); | 3037 | new_bh = sb_getblk(osb->sb, new_block); |
@@ -3085,8 +3084,8 @@ static int ocfs2_clear_ext_refcount(handle_t *handle, | |||
3085 | struct super_block *sb = ocfs2_metadata_cache_get_super(et->et_ci); | 3084 | struct super_block *sb = ocfs2_metadata_cache_get_super(et->et_ci); |
3086 | u64 ino = ocfs2_metadata_cache_owner(et->et_ci); | 3085 | u64 ino = ocfs2_metadata_cache_owner(et->et_ci); |
3087 | 3086 | ||
3088 | mlog(0, "inode %llu cpos %u, len %u, p_cluster %u, ext_flags %u\n", | 3087 | trace_ocfs2_clear_ext_refcount((unsigned long long)ino, |
3089 | (unsigned long long)ino, cpos, len, p_cluster, ext_flags); | 3088 | cpos, len, p_cluster, ext_flags); |
3090 | 3089 | ||
3091 | memset(&replace_rec, 0, sizeof(replace_rec)); | 3090 | memset(&replace_rec, 0, sizeof(replace_rec)); |
3092 | replace_rec.e_cpos = cpu_to_le32(cpos); | 3091 | replace_rec.e_cpos = cpu_to_le32(cpos); |
@@ -3141,8 +3140,8 @@ static int ocfs2_replace_clusters(handle_t *handle, | |||
3141 | struct ocfs2_caching_info *ci = context->data_et.et_ci; | 3140 | struct ocfs2_caching_info *ci = context->data_et.et_ci; |
3142 | u64 ino = ocfs2_metadata_cache_owner(ci); | 3141 | u64 ino = ocfs2_metadata_cache_owner(ci); |
3143 | 3142 | ||
3144 | mlog(0, "inode %llu, cpos %u, old %u, new %u, len %u, ext_flags %u\n", | 3143 | trace_ocfs2_replace_clusters((unsigned long long)ino, |
3145 | (unsigned long long)ino, cpos, old, new, len, ext_flags); | 3144 | cpos, old, new, len, ext_flags); |
3146 | 3145 | ||
3147 | /*If the old clusters is unwritten, no need to duplicate. */ | 3146 | /*If the old clusters is unwritten, no need to duplicate. */ |
3148 | if (!(ext_flags & OCFS2_EXT_UNWRITTEN)) { | 3147 | if (!(ext_flags & OCFS2_EXT_UNWRITTEN)) { |
@@ -3236,8 +3235,8 @@ static int ocfs2_make_clusters_writable(struct super_block *sb, | |||
3236 | struct ocfs2_caching_info *ref_ci = &context->ref_tree->rf_ci; | 3235 | struct ocfs2_caching_info *ref_ci = &context->ref_tree->rf_ci; |
3237 | struct ocfs2_refcount_rec rec; | 3236 | struct ocfs2_refcount_rec rec; |
3238 | 3237 | ||
3239 | mlog(0, "cpos %u, p_cluster %u, num_clusters %u, e_flags %u\n", | 3238 | trace_ocfs2_make_clusters_writable(cpos, p_cluster, |
3240 | cpos, p_cluster, num_clusters, e_flags); | 3239 | num_clusters, e_flags); |
3241 | 3240 | ||
3242 | ret = ocfs2_lock_refcount_allocators(sb, p_cluster, num_clusters, | 3241 | ret = ocfs2_lock_refcount_allocators(sb, p_cluster, num_clusters, |
3243 | &context->data_et, | 3242 | &context->data_et, |
@@ -3475,9 +3474,9 @@ static int ocfs2_refcount_cow_hunk(struct inode *inode, | |||
3475 | goto out; | 3474 | goto out; |
3476 | } | 3475 | } |
3477 | 3476 | ||
3478 | mlog(0, "CoW inode %lu, cpos %u, write_len %u, cow_start %u, " | 3477 | trace_ocfs2_refcount_cow_hunk(OCFS2_I(inode)->ip_blkno, |
3479 | "cow_len %u\n", inode->i_ino, | 3478 | cpos, write_len, max_cpos, |
3480 | cpos, write_len, cow_start, cow_len); | 3479 | cow_start, cow_len); |
3481 | 3480 | ||
3482 | BUG_ON(cow_len == 0); | 3481 | BUG_ON(cow_len == 0); |
3483 | 3482 | ||
@@ -3756,8 +3755,7 @@ int ocfs2_add_refcount_flag(struct inode *inode, | |||
3756 | goto out; | 3755 | goto out; |
3757 | } | 3756 | } |
3758 | 3757 | ||
3759 | mlog(0, "reserve new metadata %d, credits = %d\n", | 3758 | trace_ocfs2_add_refcount_flag(ref_blocks, credits); |
3760 | ref_blocks, credits); | ||
3761 | 3759 | ||
3762 | if (ref_blocks) { | 3760 | if (ref_blocks) { |
3763 | ret = ocfs2_reserve_new_metadata_blocks(OCFS2_SB(inode->i_sb), | 3761 | ret = ocfs2_reserve_new_metadata_blocks(OCFS2_SB(inode->i_sb), |
diff --git a/fs/ocfs2/reservations.c b/fs/ocfs2/reservations.c index 3e78db361bc7..41ffd36c689c 100644 --- a/fs/ocfs2/reservations.c +++ b/fs/ocfs2/reservations.c | |||
@@ -30,10 +30,10 @@ | |||
30 | #include <linux/bitops.h> | 30 | #include <linux/bitops.h> |
31 | #include <linux/list.h> | 31 | #include <linux/list.h> |
32 | 32 | ||
33 | #define MLOG_MASK_PREFIX ML_RESERVATIONS | ||
34 | #include <cluster/masklog.h> | 33 | #include <cluster/masklog.h> |
35 | 34 | ||
36 | #include "ocfs2.h" | 35 | #include "ocfs2.h" |
36 | #include "ocfs2_trace.h" | ||
37 | 37 | ||
38 | #ifdef CONFIG_OCFS2_DEBUG_FS | 38 | #ifdef CONFIG_OCFS2_DEBUG_FS |
39 | #define OCFS2_CHECK_RESERVATIONS | 39 | #define OCFS2_CHECK_RESERVATIONS |
@@ -321,8 +321,7 @@ static void ocfs2_resv_insert(struct ocfs2_reservation_map *resmap, | |||
321 | 321 | ||
322 | assert_spin_locked(&resv_lock); | 322 | assert_spin_locked(&resv_lock); |
323 | 323 | ||
324 | mlog(0, "Insert reservation start: %u len: %u\n", new->r_start, | 324 | trace_ocfs2_resv_insert(new->r_start, new->r_len); |
325 | new->r_len); | ||
326 | 325 | ||
327 | while (*p) { | 326 | while (*p) { |
328 | parent = *p; | 327 | parent = *p; |
@@ -423,8 +422,8 @@ static int ocfs2_resmap_find_free_bits(struct ocfs2_reservation_map *resmap, | |||
423 | unsigned int best_start, best_len = 0; | 422 | unsigned int best_start, best_len = 0; |
424 | int offset, start, found; | 423 | int offset, start, found; |
425 | 424 | ||
426 | mlog(0, "Find %u bits within range (%u, len %u) resmap len: %u\n", | 425 | trace_ocfs2_resmap_find_free_bits_begin(search_start, search_len, |
427 | wanted, search_start, search_len, resmap->m_bitmap_len); | 426 | wanted, resmap->m_bitmap_len); |
428 | 427 | ||
429 | found = best_start = best_len = 0; | 428 | found = best_start = best_len = 0; |
430 | 429 | ||
@@ -463,7 +462,7 @@ static int ocfs2_resmap_find_free_bits(struct ocfs2_reservation_map *resmap, | |||
463 | *rlen = best_len; | 462 | *rlen = best_len; |
464 | *rstart = best_start; | 463 | *rstart = best_start; |
465 | 464 | ||
466 | mlog(0, "Found start: %u len: %u\n", best_start, best_len); | 465 | trace_ocfs2_resmap_find_free_bits_end(best_start, best_len); |
467 | 466 | ||
468 | return *rlen; | 467 | return *rlen; |
469 | } | 468 | } |
@@ -487,9 +486,8 @@ static void __ocfs2_resv_find_window(struct ocfs2_reservation_map *resmap, | |||
487 | * - our window should be last in all reservations | 486 | * - our window should be last in all reservations |
488 | * - need to make sure we don't go past end of bitmap | 487 | * - need to make sure we don't go past end of bitmap |
489 | */ | 488 | */ |
490 | 489 | trace_ocfs2_resv_find_window_begin(resv->r_start, ocfs2_resv_end(resv), | |
491 | mlog(0, "resv start: %u resv end: %u goal: %u wanted: %u\n", | 490 | goal, wanted, RB_EMPTY_ROOT(root)); |
492 | resv->r_start, ocfs2_resv_end(resv), goal, wanted); | ||
493 | 491 | ||
494 | assert_spin_locked(&resv_lock); | 492 | assert_spin_locked(&resv_lock); |
495 | 493 | ||
@@ -498,9 +496,6 @@ static void __ocfs2_resv_find_window(struct ocfs2_reservation_map *resmap, | |||
498 | * Easiest case - empty tree. We can just take | 496 | * Easiest case - empty tree. We can just take |
499 | * whatever window of free bits we want. | 497 | * whatever window of free bits we want. |
500 | */ | 498 | */ |
501 | |||
502 | mlog(0, "Empty root\n"); | ||
503 | |||
504 | clen = ocfs2_resmap_find_free_bits(resmap, wanted, goal, | 499 | clen = ocfs2_resmap_find_free_bits(resmap, wanted, goal, |
505 | resmap->m_bitmap_len - goal, | 500 | resmap->m_bitmap_len - goal, |
506 | &cstart, &clen); | 501 | &cstart, &clen); |
@@ -524,8 +519,6 @@ static void __ocfs2_resv_find_window(struct ocfs2_reservation_map *resmap, | |||
524 | prev_resv = ocfs2_find_resv_lhs(resmap, goal); | 519 | prev_resv = ocfs2_find_resv_lhs(resmap, goal); |
525 | 520 | ||
526 | if (prev_resv == NULL) { | 521 | if (prev_resv == NULL) { |
527 | mlog(0, "Goal on LHS of leftmost window\n"); | ||
528 | |||
529 | /* | 522 | /* |
530 | * A NULL here means that the search code couldn't | 523 | * A NULL here means that the search code couldn't |
531 | * find a window that starts before goal. | 524 | * find a window that starts before goal. |
@@ -570,13 +563,15 @@ static void __ocfs2_resv_find_window(struct ocfs2_reservation_map *resmap, | |||
570 | next_resv = NULL; | 563 | next_resv = NULL; |
571 | } | 564 | } |
572 | 565 | ||
566 | trace_ocfs2_resv_find_window_prev(prev_resv->r_start, | ||
567 | ocfs2_resv_end(prev_resv)); | ||
568 | |||
573 | prev = &prev_resv->r_node; | 569 | prev = &prev_resv->r_node; |
574 | 570 | ||
575 | /* Now we do a linear search for a window, starting at 'prev_rsv' */ | 571 | /* Now we do a linear search for a window, starting at 'prev_rsv' */ |
576 | while (1) { | 572 | while (1) { |
577 | next = rb_next(prev); | 573 | next = rb_next(prev); |
578 | if (next) { | 574 | if (next) { |
579 | mlog(0, "One more resv found in linear search\n"); | ||
580 | next_resv = rb_entry(next, | 575 | next_resv = rb_entry(next, |
581 | struct ocfs2_alloc_reservation, | 576 | struct ocfs2_alloc_reservation, |
582 | r_node); | 577 | r_node); |
@@ -585,7 +580,6 @@ static void __ocfs2_resv_find_window(struct ocfs2_reservation_map *resmap, | |||
585 | gap_end = next_resv->r_start - 1; | 580 | gap_end = next_resv->r_start - 1; |
586 | gap_len = gap_end - gap_start + 1; | 581 | gap_len = gap_end - gap_start + 1; |
587 | } else { | 582 | } else { |
588 | mlog(0, "No next node\n"); | ||
589 | /* | 583 | /* |
590 | * We're at the rightmost edge of the | 584 | * We're at the rightmost edge of the |
591 | * tree. See if a reservation between this | 585 | * tree. See if a reservation between this |
@@ -596,6 +590,8 @@ static void __ocfs2_resv_find_window(struct ocfs2_reservation_map *resmap, | |||
596 | gap_end = resmap->m_bitmap_len - 1; | 590 | gap_end = resmap->m_bitmap_len - 1; |
597 | } | 591 | } |
598 | 592 | ||
593 | trace_ocfs2_resv_find_window_next(next ? next_resv->r_start: -1, | ||
594 | next ? ocfs2_resv_end(next_resv) : -1); | ||
599 | /* | 595 | /* |
600 | * No need to check this gap if we have already found | 596 | * No need to check this gap if we have already found |
601 | * a larger region of free bits. | 597 | * a larger region of free bits. |
@@ -654,8 +650,9 @@ static void ocfs2_cannibalize_resv(struct ocfs2_reservation_map *resmap, | |||
654 | lru_resv = list_first_entry(&resmap->m_lru, | 650 | lru_resv = list_first_entry(&resmap->m_lru, |
655 | struct ocfs2_alloc_reservation, r_lru); | 651 | struct ocfs2_alloc_reservation, r_lru); |
656 | 652 | ||
657 | mlog(0, "lru resv: start: %u len: %u end: %u\n", lru_resv->r_start, | 653 | trace_ocfs2_cannibalize_resv_begin(lru_resv->r_start, |
658 | lru_resv->r_len, ocfs2_resv_end(lru_resv)); | 654 | lru_resv->r_len, |
655 | ocfs2_resv_end(lru_resv)); | ||
659 | 656 | ||
660 | /* | 657 | /* |
661 | * Cannibalize (some or all) of the target reservation and | 658 | * Cannibalize (some or all) of the target reservation and |
@@ -684,10 +681,9 @@ static void ocfs2_cannibalize_resv(struct ocfs2_reservation_map *resmap, | |||
684 | resv->r_len = shrink; | 681 | resv->r_len = shrink; |
685 | } | 682 | } |
686 | 683 | ||
687 | mlog(0, "Reservation now looks like: r_start: %u r_end: %u " | 684 | trace_ocfs2_cannibalize_resv_end(resv->r_start, ocfs2_resv_end(resv), |
688 | "r_len: %u r_last_start: %u r_last_len: %u\n", | 685 | resv->r_len, resv->r_last_start, |
689 | resv->r_start, ocfs2_resv_end(resv), resv->r_len, | 686 | resv->r_last_len); |
690 | resv->r_last_start, resv->r_last_len); | ||
691 | 687 | ||
692 | ocfs2_resv_insert(resmap, resv); | 688 | ocfs2_resv_insert(resmap, resv); |
693 | } | 689 | } |
@@ -748,7 +744,6 @@ int ocfs2_resmap_resv_bits(struct ocfs2_reservation_map *resmap, | |||
748 | if ((resv->r_flags & OCFS2_RESV_FLAG_TMP) || wanted < *clen) | 744 | if ((resv->r_flags & OCFS2_RESV_FLAG_TMP) || wanted < *clen) |
749 | wanted = *clen; | 745 | wanted = *clen; |
750 | 746 | ||
751 | mlog(0, "empty reservation, find new window\n"); | ||
752 | /* | 747 | /* |
753 | * Try to get a window here. If it works, we must fall | 748 | * Try to get a window here. If it works, we must fall |
754 | * through and test the bitmap . This avoids some | 749 | * through and test the bitmap . This avoids some |
@@ -757,6 +752,7 @@ int ocfs2_resmap_resv_bits(struct ocfs2_reservation_map *resmap, | |||
757 | * that inode. | 752 | * that inode. |
758 | */ | 753 | */ |
759 | ocfs2_resv_find_window(resmap, resv, wanted); | 754 | ocfs2_resv_find_window(resmap, resv, wanted); |
755 | trace_ocfs2_resmap_resv_bits(resv->r_start, resv->r_len); | ||
760 | } | 756 | } |
761 | 757 | ||
762 | BUG_ON(ocfs2_resv_empty(resv)); | 758 | BUG_ON(ocfs2_resv_empty(resv)); |
@@ -813,10 +809,10 @@ void ocfs2_resmap_claimed_bits(struct ocfs2_reservation_map *resmap, | |||
813 | 809 | ||
814 | spin_lock(&resv_lock); | 810 | spin_lock(&resv_lock); |
815 | 811 | ||
816 | mlog(0, "claim bits: cstart: %u cend: %u clen: %u r_start: %u " | 812 | trace_ocfs2_resmap_claimed_bits_begin(cstart, cend, clen, resv->r_start, |
817 | "r_end: %u r_len: %u, r_last_start: %u r_last_len: %u\n", | 813 | ocfs2_resv_end(resv), resv->r_len, |
818 | cstart, cend, clen, resv->r_start, ocfs2_resv_end(resv), | 814 | resv->r_last_start, |
819 | resv->r_len, resv->r_last_start, resv->r_last_len); | 815 | resv->r_last_len); |
820 | 816 | ||
821 | BUG_ON(cstart < resv->r_start); | 817 | BUG_ON(cstart < resv->r_start); |
822 | BUG_ON(cstart > ocfs2_resv_end(resv)); | 818 | BUG_ON(cstart > ocfs2_resv_end(resv)); |
@@ -833,10 +829,9 @@ void ocfs2_resmap_claimed_bits(struct ocfs2_reservation_map *resmap, | |||
833 | if (!ocfs2_resv_empty(resv)) | 829 | if (!ocfs2_resv_empty(resv)) |
834 | ocfs2_resv_mark_lru(resmap, resv); | 830 | ocfs2_resv_mark_lru(resmap, resv); |
835 | 831 | ||
836 | mlog(0, "Reservation now looks like: r_start: %u r_end: %u " | 832 | trace_ocfs2_resmap_claimed_bits_end(resv->r_start, ocfs2_resv_end(resv), |
837 | "r_len: %u r_last_start: %u r_last_len: %u\n", | 833 | resv->r_len, resv->r_last_start, |
838 | resv->r_start, ocfs2_resv_end(resv), resv->r_len, | 834 | resv->r_last_len); |
839 | resv->r_last_start, resv->r_last_len); | ||
840 | 835 | ||
841 | ocfs2_check_resmap(resmap); | 836 | ocfs2_check_resmap(resmap); |
842 | 837 | ||
diff --git a/fs/ocfs2/resize.c b/fs/ocfs2/resize.c index dacd553d8617..ec55add7604a 100644 --- a/fs/ocfs2/resize.c +++ b/fs/ocfs2/resize.c | |||
@@ -27,7 +27,6 @@ | |||
27 | #include <linux/fs.h> | 27 | #include <linux/fs.h> |
28 | #include <linux/types.h> | 28 | #include <linux/types.h> |
29 | 29 | ||
30 | #define MLOG_MASK_PREFIX ML_DISK_ALLOC | ||
31 | #include <cluster/masklog.h> | 30 | #include <cluster/masklog.h> |
32 | 31 | ||
33 | #include "ocfs2.h" | 32 | #include "ocfs2.h" |
@@ -39,6 +38,7 @@ | |||
39 | #include "super.h" | 38 | #include "super.h" |
40 | #include "sysfile.h" | 39 | #include "sysfile.h" |
41 | #include "uptodate.h" | 40 | #include "uptodate.h" |
41 | #include "ocfs2_trace.h" | ||
42 | 42 | ||
43 | #include "buffer_head_io.h" | 43 | #include "buffer_head_io.h" |
44 | #include "suballoc.h" | 44 | #include "suballoc.h" |
@@ -82,7 +82,6 @@ static u16 ocfs2_calc_new_backup_super(struct inode *inode, | |||
82 | backups++; | 82 | backups++; |
83 | } | 83 | } |
84 | 84 | ||
85 | mlog_exit_void(); | ||
86 | return backups; | 85 | return backups; |
87 | } | 86 | } |
88 | 87 | ||
@@ -103,8 +102,8 @@ static int ocfs2_update_last_group_and_inode(handle_t *handle, | |||
103 | u16 cl_bpc = le16_to_cpu(cl->cl_bpc); | 102 | u16 cl_bpc = le16_to_cpu(cl->cl_bpc); |
104 | u16 cl_cpg = le16_to_cpu(cl->cl_cpg); | 103 | u16 cl_cpg = le16_to_cpu(cl->cl_cpg); |
105 | 104 | ||
106 | mlog_entry("(new_clusters=%d, first_new_cluster = %u)\n", | 105 | trace_ocfs2_update_last_group_and_inode(new_clusters, |
107 | new_clusters, first_new_cluster); | 106 | first_new_cluster); |
108 | 107 | ||
109 | ret = ocfs2_journal_access_gd(handle, INODE_CACHE(bm_inode), | 108 | ret = ocfs2_journal_access_gd(handle, INODE_CACHE(bm_inode), |
110 | group_bh, OCFS2_JOURNAL_ACCESS_WRITE); | 109 | group_bh, OCFS2_JOURNAL_ACCESS_WRITE); |
@@ -176,7 +175,8 @@ out_rollback: | |||
176 | le16_add_cpu(&group->bg_free_bits_count, -1 * num_bits); | 175 | le16_add_cpu(&group->bg_free_bits_count, -1 * num_bits); |
177 | } | 176 | } |
178 | out: | 177 | out: |
179 | mlog_exit(ret); | 178 | if (ret) |
179 | mlog_errno(ret); | ||
180 | return ret; | 180 | return ret; |
181 | } | 181 | } |
182 | 182 | ||
@@ -281,8 +281,6 @@ int ocfs2_group_extend(struct inode * inode, int new_clusters) | |||
281 | u32 first_new_cluster; | 281 | u32 first_new_cluster; |
282 | u64 lgd_blkno; | 282 | u64 lgd_blkno; |
283 | 283 | ||
284 | mlog_entry_void(); | ||
285 | |||
286 | if (ocfs2_is_hard_readonly(osb) || ocfs2_is_soft_readonly(osb)) | 284 | if (ocfs2_is_hard_readonly(osb) || ocfs2_is_soft_readonly(osb)) |
287 | return -EROFS; | 285 | return -EROFS; |
288 | 286 | ||
@@ -342,7 +340,8 @@ int ocfs2_group_extend(struct inode * inode, int new_clusters) | |||
342 | goto out_unlock; | 340 | goto out_unlock; |
343 | } | 341 | } |
344 | 342 | ||
345 | mlog(0, "extend the last group at %llu, new clusters = %d\n", | 343 | |
344 | trace_ocfs2_group_extend( | ||
346 | (unsigned long long)le64_to_cpu(group->bg_blkno), new_clusters); | 345 | (unsigned long long)le64_to_cpu(group->bg_blkno), new_clusters); |
347 | 346 | ||
348 | handle = ocfs2_start_trans(osb, OCFS2_GROUP_EXTEND_CREDITS); | 347 | handle = ocfs2_start_trans(osb, OCFS2_GROUP_EXTEND_CREDITS); |
@@ -377,7 +376,6 @@ out_mutex: | |||
377 | iput(main_bm_inode); | 376 | iput(main_bm_inode); |
378 | 377 | ||
379 | out: | 378 | out: |
380 | mlog_exit_void(); | ||
381 | return ret; | 379 | return ret; |
382 | } | 380 | } |
383 | 381 | ||
@@ -472,8 +470,6 @@ int ocfs2_group_add(struct inode *inode, struct ocfs2_new_group_input *input) | |||
472 | struct ocfs2_chain_rec *cr; | 470 | struct ocfs2_chain_rec *cr; |
473 | u16 cl_bpc; | 471 | u16 cl_bpc; |
474 | 472 | ||
475 | mlog_entry_void(); | ||
476 | |||
477 | if (ocfs2_is_hard_readonly(osb) || ocfs2_is_soft_readonly(osb)) | 473 | if (ocfs2_is_hard_readonly(osb) || ocfs2_is_soft_readonly(osb)) |
478 | return -EROFS; | 474 | return -EROFS; |
479 | 475 | ||
@@ -520,8 +516,8 @@ int ocfs2_group_add(struct inode *inode, struct ocfs2_new_group_input *input) | |||
520 | goto out_unlock; | 516 | goto out_unlock; |
521 | } | 517 | } |
522 | 518 | ||
523 | mlog(0, "Add a new group %llu in chain = %u, length = %u\n", | 519 | trace_ocfs2_group_add((unsigned long long)input->group, |
524 | (unsigned long long)input->group, input->chain, input->clusters); | 520 | input->chain, input->clusters, input->frees); |
525 | 521 | ||
526 | handle = ocfs2_start_trans(osb, OCFS2_GROUP_ADD_CREDITS); | 522 | handle = ocfs2_start_trans(osb, OCFS2_GROUP_ADD_CREDITS); |
527 | if (IS_ERR(handle)) { | 523 | if (IS_ERR(handle)) { |
@@ -589,6 +585,5 @@ out_mutex: | |||
589 | iput(main_bm_inode); | 585 | iput(main_bm_inode); |
590 | 586 | ||
591 | out: | 587 | out: |
592 | mlog_exit_void(); | ||
593 | return ret; | 588 | return ret; |
594 | } | 589 | } |
diff --git a/fs/ocfs2/slot_map.c b/fs/ocfs2/slot_map.c index ab4e0172cc1d..26fc0014d509 100644 --- a/fs/ocfs2/slot_map.c +++ b/fs/ocfs2/slot_map.c | |||
@@ -27,7 +27,6 @@ | |||
27 | #include <linux/slab.h> | 27 | #include <linux/slab.h> |
28 | #include <linux/highmem.h> | 28 | #include <linux/highmem.h> |
29 | 29 | ||
30 | #define MLOG_MASK_PREFIX ML_SUPER | ||
31 | #include <cluster/masklog.h> | 30 | #include <cluster/masklog.h> |
32 | 31 | ||
33 | #include "ocfs2.h" | 32 | #include "ocfs2.h" |
@@ -39,6 +38,7 @@ | |||
39 | #include "slot_map.h" | 38 | #include "slot_map.h" |
40 | #include "super.h" | 39 | #include "super.h" |
41 | #include "sysfile.h" | 40 | #include "sysfile.h" |
41 | #include "ocfs2_trace.h" | ||
42 | 42 | ||
43 | #include "buffer_head_io.h" | 43 | #include "buffer_head_io.h" |
44 | 44 | ||
@@ -142,8 +142,7 @@ int ocfs2_refresh_slot_info(struct ocfs2_super *osb) | |||
142 | BUG_ON(si->si_blocks == 0); | 142 | BUG_ON(si->si_blocks == 0); |
143 | BUG_ON(si->si_bh == NULL); | 143 | BUG_ON(si->si_bh == NULL); |
144 | 144 | ||
145 | mlog(0, "Refreshing slot map, reading %u block(s)\n", | 145 | trace_ocfs2_refresh_slot_info(si->si_blocks); |
146 | si->si_blocks); | ||
147 | 146 | ||
148 | /* | 147 | /* |
149 | * We pass -1 as blocknr because we expect all of si->si_bh to | 148 | * We pass -1 as blocknr because we expect all of si->si_bh to |
@@ -381,8 +380,7 @@ static int ocfs2_map_slot_buffers(struct ocfs2_super *osb, | |||
381 | /* The size checks above should ensure this */ | 380 | /* The size checks above should ensure this */ |
382 | BUG_ON((osb->max_slots / si->si_slots_per_block) > blocks); | 381 | BUG_ON((osb->max_slots / si->si_slots_per_block) > blocks); |
383 | 382 | ||
384 | mlog(0, "Slot map needs %u buffers for %llu bytes\n", | 383 | trace_ocfs2_map_slot_buffers(bytes, si->si_blocks); |
385 | si->si_blocks, bytes); | ||
386 | 384 | ||
387 | si->si_bh = kzalloc(sizeof(struct buffer_head *) * si->si_blocks, | 385 | si->si_bh = kzalloc(sizeof(struct buffer_head *) * si->si_blocks, |
388 | GFP_KERNEL); | 386 | GFP_KERNEL); |
@@ -400,8 +398,7 @@ static int ocfs2_map_slot_buffers(struct ocfs2_super *osb, | |||
400 | goto bail; | 398 | goto bail; |
401 | } | 399 | } |
402 | 400 | ||
403 | mlog(0, "Reading slot map block %u at %llu\n", i, | 401 | trace_ocfs2_map_slot_buffers_block((unsigned long long)blkno, i); |
404 | (unsigned long long)blkno); | ||
405 | 402 | ||
406 | bh = NULL; /* Acquire a fresh bh */ | 403 | bh = NULL; /* Acquire a fresh bh */ |
407 | status = ocfs2_read_blocks(INODE_CACHE(si->si_inode), blkno, | 404 | status = ocfs2_read_blocks(INODE_CACHE(si->si_inode), blkno, |
@@ -475,8 +472,6 @@ int ocfs2_find_slot(struct ocfs2_super *osb) | |||
475 | int slot; | 472 | int slot; |
476 | struct ocfs2_slot_info *si; | 473 | struct ocfs2_slot_info *si; |
477 | 474 | ||
478 | mlog_entry_void(); | ||
479 | |||
480 | si = osb->slot_info; | 475 | si = osb->slot_info; |
481 | 476 | ||
482 | spin_lock(&osb->osb_lock); | 477 | spin_lock(&osb->osb_lock); |
@@ -505,14 +500,13 @@ int ocfs2_find_slot(struct ocfs2_super *osb) | |||
505 | osb->slot_num = slot; | 500 | osb->slot_num = slot; |
506 | spin_unlock(&osb->osb_lock); | 501 | spin_unlock(&osb->osb_lock); |
507 | 502 | ||
508 | mlog(0, "taking node slot %d\n", osb->slot_num); | 503 | trace_ocfs2_find_slot(osb->slot_num); |
509 | 504 | ||
510 | status = ocfs2_update_disk_slot(osb, si, osb->slot_num); | 505 | status = ocfs2_update_disk_slot(osb, si, osb->slot_num); |
511 | if (status < 0) | 506 | if (status < 0) |
512 | mlog_errno(status); | 507 | mlog_errno(status); |
513 | 508 | ||
514 | bail: | 509 | bail: |
515 | mlog_exit(status); | ||
516 | return status; | 510 | return status; |
517 | } | 511 | } |
518 | 512 | ||
diff --git a/fs/ocfs2/suballoc.c b/fs/ocfs2/suballoc.c index 71998d4d61d5..ab6e2061074f 100644 --- a/fs/ocfs2/suballoc.c +++ b/fs/ocfs2/suballoc.c | |||
@@ -29,7 +29,6 @@ | |||
29 | #include <linux/slab.h> | 29 | #include <linux/slab.h> |
30 | #include <linux/highmem.h> | 30 | #include <linux/highmem.h> |
31 | 31 | ||
32 | #define MLOG_MASK_PREFIX ML_DISK_ALLOC | ||
33 | #include <cluster/masklog.h> | 32 | #include <cluster/masklog.h> |
34 | 33 | ||
35 | #include "ocfs2.h" | 34 | #include "ocfs2.h" |
@@ -44,6 +43,7 @@ | |||
44 | #include "super.h" | 43 | #include "super.h" |
45 | #include "sysfile.h" | 44 | #include "sysfile.h" |
46 | #include "uptodate.h" | 45 | #include "uptodate.h" |
46 | #include "ocfs2_trace.h" | ||
47 | 47 | ||
48 | #include "buffer_head_io.h" | 48 | #include "buffer_head_io.h" |
49 | 49 | ||
@@ -308,8 +308,8 @@ static int ocfs2_validate_group_descriptor(struct super_block *sb, | |||
308 | int rc; | 308 | int rc; |
309 | struct ocfs2_group_desc *gd = (struct ocfs2_group_desc *)bh->b_data; | 309 | struct ocfs2_group_desc *gd = (struct ocfs2_group_desc *)bh->b_data; |
310 | 310 | ||
311 | mlog(0, "Validating group descriptor %llu\n", | 311 | trace_ocfs2_validate_group_descriptor( |
312 | (unsigned long long)bh->b_blocknr); | 312 | (unsigned long long)bh->b_blocknr); |
313 | 313 | ||
314 | BUG_ON(!buffer_uptodate(bh)); | 314 | BUG_ON(!buffer_uptodate(bh)); |
315 | 315 | ||
@@ -389,8 +389,6 @@ static int ocfs2_block_group_fill(handle_t *handle, | |||
389 | struct ocfs2_group_desc *bg = (struct ocfs2_group_desc *) bg_bh->b_data; | 389 | struct ocfs2_group_desc *bg = (struct ocfs2_group_desc *) bg_bh->b_data; |
390 | struct super_block * sb = alloc_inode->i_sb; | 390 | struct super_block * sb = alloc_inode->i_sb; |
391 | 391 | ||
392 | mlog_entry_void(); | ||
393 | |||
394 | if (((unsigned long long) bg_bh->b_blocknr) != group_blkno) { | 392 | if (((unsigned long long) bg_bh->b_blocknr) != group_blkno) { |
395 | ocfs2_error(alloc_inode->i_sb, "group block (%llu) != " | 393 | ocfs2_error(alloc_inode->i_sb, "group block (%llu) != " |
396 | "b_blocknr (%llu)", | 394 | "b_blocknr (%llu)", |
@@ -436,7 +434,8 @@ static int ocfs2_block_group_fill(handle_t *handle, | |||
436 | * allocation time. */ | 434 | * allocation time. */ |
437 | 435 | ||
438 | bail: | 436 | bail: |
439 | mlog_exit(status); | 437 | if (status) |
438 | mlog_errno(status); | ||
440 | return status; | 439 | return status; |
441 | } | 440 | } |
442 | 441 | ||
@@ -477,8 +476,8 @@ ocfs2_block_group_alloc_contig(struct ocfs2_super *osb, handle_t *handle, | |||
477 | 476 | ||
478 | /* setup the group */ | 477 | /* setup the group */ |
479 | bg_blkno = ocfs2_clusters_to_blocks(osb->sb, bit_off); | 478 | bg_blkno = ocfs2_clusters_to_blocks(osb->sb, bit_off); |
480 | mlog(0, "new descriptor, record %u, at block %llu\n", | 479 | trace_ocfs2_block_group_alloc_contig( |
481 | alloc_rec, (unsigned long long)bg_blkno); | 480 | (unsigned long long)bg_blkno, alloc_rec); |
482 | 481 | ||
483 | bg_bh = sb_getblk(osb->sb, bg_blkno); | 482 | bg_bh = sb_getblk(osb->sb, bg_blkno); |
484 | if (!bg_bh) { | 483 | if (!bg_bh) { |
@@ -657,8 +656,8 @@ ocfs2_block_group_alloc_discontig(handle_t *handle, | |||
657 | 656 | ||
658 | /* setup the group */ | 657 | /* setup the group */ |
659 | bg_blkno = ocfs2_clusters_to_blocks(osb->sb, bit_off); | 658 | bg_blkno = ocfs2_clusters_to_blocks(osb->sb, bit_off); |
660 | mlog(0, "new descriptor, record %u, at block %llu\n", | 659 | trace_ocfs2_block_group_alloc_discontig( |
661 | alloc_rec, (unsigned long long)bg_blkno); | 660 | (unsigned long long)bg_blkno, alloc_rec); |
662 | 661 | ||
663 | bg_bh = sb_getblk(osb->sb, bg_blkno); | 662 | bg_bh = sb_getblk(osb->sb, bg_blkno); |
664 | if (!bg_bh) { | 663 | if (!bg_bh) { |
@@ -707,8 +706,6 @@ static int ocfs2_block_group_alloc(struct ocfs2_super *osb, | |||
707 | 706 | ||
708 | BUG_ON(ocfs2_is_cluster_bitmap(alloc_inode)); | 707 | BUG_ON(ocfs2_is_cluster_bitmap(alloc_inode)); |
709 | 708 | ||
710 | mlog_entry_void(); | ||
711 | |||
712 | cl = &fe->id2.i_chain; | 709 | cl = &fe->id2.i_chain; |
713 | status = ocfs2_reserve_clusters_with_limit(osb, | 710 | status = ocfs2_reserve_clusters_with_limit(osb, |
714 | le16_to_cpu(cl->cl_cpg), | 711 | le16_to_cpu(cl->cl_cpg), |
@@ -730,8 +727,8 @@ static int ocfs2_block_group_alloc(struct ocfs2_super *osb, | |||
730 | } | 727 | } |
731 | 728 | ||
732 | if (last_alloc_group && *last_alloc_group != 0) { | 729 | if (last_alloc_group && *last_alloc_group != 0) { |
733 | mlog(0, "use old allocation group %llu for block group alloc\n", | 730 | trace_ocfs2_block_group_alloc( |
734 | (unsigned long long)*last_alloc_group); | 731 | (unsigned long long)*last_alloc_group); |
735 | ac->ac_last_group = *last_alloc_group; | 732 | ac->ac_last_group = *last_alloc_group; |
736 | } | 733 | } |
737 | 734 | ||
@@ -796,7 +793,8 @@ bail: | |||
796 | 793 | ||
797 | brelse(bg_bh); | 794 | brelse(bg_bh); |
798 | 795 | ||
799 | mlog_exit(status); | 796 | if (status) |
797 | mlog_errno(status); | ||
800 | return status; | 798 | return status; |
801 | } | 799 | } |
802 | 800 | ||
@@ -814,8 +812,6 @@ static int ocfs2_reserve_suballoc_bits(struct ocfs2_super *osb, | |||
814 | struct ocfs2_dinode *fe; | 812 | struct ocfs2_dinode *fe; |
815 | u32 free_bits; | 813 | u32 free_bits; |
816 | 814 | ||
817 | mlog_entry_void(); | ||
818 | |||
819 | alloc_inode = ocfs2_get_system_file_inode(osb, type, slot); | 815 | alloc_inode = ocfs2_get_system_file_inode(osb, type, slot); |
820 | if (!alloc_inode) { | 816 | if (!alloc_inode) { |
821 | mlog_errno(-EINVAL); | 817 | mlog_errno(-EINVAL); |
@@ -855,16 +851,15 @@ static int ocfs2_reserve_suballoc_bits(struct ocfs2_super *osb, | |||
855 | if (bits_wanted > free_bits) { | 851 | if (bits_wanted > free_bits) { |
856 | /* cluster bitmap never grows */ | 852 | /* cluster bitmap never grows */ |
857 | if (ocfs2_is_cluster_bitmap(alloc_inode)) { | 853 | if (ocfs2_is_cluster_bitmap(alloc_inode)) { |
858 | mlog(0, "Disk Full: wanted=%u, free_bits=%u\n", | 854 | trace_ocfs2_reserve_suballoc_bits_nospc(bits_wanted, |
859 | bits_wanted, free_bits); | 855 | free_bits); |
860 | status = -ENOSPC; | 856 | status = -ENOSPC; |
861 | goto bail; | 857 | goto bail; |
862 | } | 858 | } |
863 | 859 | ||
864 | if (!(flags & ALLOC_NEW_GROUP)) { | 860 | if (!(flags & ALLOC_NEW_GROUP)) { |
865 | mlog(0, "Alloc File %u Full: wanted=%u, free_bits=%u, " | 861 | trace_ocfs2_reserve_suballoc_bits_no_new_group( |
866 | "and we don't alloc a new group for it.\n", | 862 | slot, bits_wanted, free_bits); |
867 | slot, bits_wanted, free_bits); | ||
868 | status = -ENOSPC; | 863 | status = -ENOSPC; |
869 | goto bail; | 864 | goto bail; |
870 | } | 865 | } |
@@ -890,7 +885,8 @@ static int ocfs2_reserve_suballoc_bits(struct ocfs2_super *osb, | |||
890 | bail: | 885 | bail: |
891 | brelse(bh); | 886 | brelse(bh); |
892 | 887 | ||
893 | mlog_exit(status); | 888 | if (status) |
889 | mlog_errno(status); | ||
894 | return status; | 890 | return status; |
895 | } | 891 | } |
896 | 892 | ||
@@ -1052,7 +1048,8 @@ bail: | |||
1052 | *ac = NULL; | 1048 | *ac = NULL; |
1053 | } | 1049 | } |
1054 | 1050 | ||
1055 | mlog_exit(status); | 1051 | if (status) |
1052 | mlog_errno(status); | ||
1056 | return status; | 1053 | return status; |
1057 | } | 1054 | } |
1058 | 1055 | ||
@@ -1119,8 +1116,8 @@ int ocfs2_reserve_new_inode(struct ocfs2_super *osb, | |||
1119 | spin_lock(&osb->osb_lock); | 1116 | spin_lock(&osb->osb_lock); |
1120 | osb->osb_inode_alloc_group = alloc_group; | 1117 | osb->osb_inode_alloc_group = alloc_group; |
1121 | spin_unlock(&osb->osb_lock); | 1118 | spin_unlock(&osb->osb_lock); |
1122 | mlog(0, "after reservation, new allocation group is " | 1119 | trace_ocfs2_reserve_new_inode_new_group( |
1123 | "%llu\n", (unsigned long long)alloc_group); | 1120 | (unsigned long long)alloc_group); |
1124 | 1121 | ||
1125 | /* | 1122 | /* |
1126 | * Some inodes must be freed by us, so try to allocate | 1123 | * Some inodes must be freed by us, so try to allocate |
@@ -1152,7 +1149,8 @@ bail: | |||
1152 | *ac = NULL; | 1149 | *ac = NULL; |
1153 | } | 1150 | } |
1154 | 1151 | ||
1155 | mlog_exit(status); | 1152 | if (status) |
1153 | mlog_errno(status); | ||
1156 | return status; | 1154 | return status; |
1157 | } | 1155 | } |
1158 | 1156 | ||
@@ -1189,8 +1187,6 @@ static int ocfs2_reserve_clusters_with_limit(struct ocfs2_super *osb, | |||
1189 | { | 1187 | { |
1190 | int status; | 1188 | int status; |
1191 | 1189 | ||
1192 | mlog_entry_void(); | ||
1193 | |||
1194 | *ac = kzalloc(sizeof(struct ocfs2_alloc_context), GFP_KERNEL); | 1190 | *ac = kzalloc(sizeof(struct ocfs2_alloc_context), GFP_KERNEL); |
1195 | if (!(*ac)) { | 1191 | if (!(*ac)) { |
1196 | status = -ENOMEM; | 1192 | status = -ENOMEM; |
@@ -1229,7 +1225,8 @@ bail: | |||
1229 | *ac = NULL; | 1225 | *ac = NULL; |
1230 | } | 1226 | } |
1231 | 1227 | ||
1232 | mlog_exit(status); | 1228 | if (status) |
1229 | mlog_errno(status); | ||
1233 | return status; | 1230 | return status; |
1234 | } | 1231 | } |
1235 | 1232 | ||
@@ -1357,15 +1354,12 @@ static inline int ocfs2_block_group_set_bits(handle_t *handle, | |||
1357 | void *bitmap = bg->bg_bitmap; | 1354 | void *bitmap = bg->bg_bitmap; |
1358 | int journal_type = OCFS2_JOURNAL_ACCESS_WRITE; | 1355 | int journal_type = OCFS2_JOURNAL_ACCESS_WRITE; |
1359 | 1356 | ||
1360 | mlog_entry_void(); | ||
1361 | |||
1362 | /* All callers get the descriptor via | 1357 | /* All callers get the descriptor via |
1363 | * ocfs2_read_group_descriptor(). Any corruption is a code bug. */ | 1358 | * ocfs2_read_group_descriptor(). Any corruption is a code bug. */ |
1364 | BUG_ON(!OCFS2_IS_VALID_GROUP_DESC(bg)); | 1359 | BUG_ON(!OCFS2_IS_VALID_GROUP_DESC(bg)); |
1365 | BUG_ON(le16_to_cpu(bg->bg_free_bits_count) < num_bits); | 1360 | BUG_ON(le16_to_cpu(bg->bg_free_bits_count) < num_bits); |
1366 | 1361 | ||
1367 | mlog(0, "block_group_set_bits: off = %u, num = %u\n", bit_off, | 1362 | trace_ocfs2_block_group_set_bits(bit_off, num_bits); |
1368 | num_bits); | ||
1369 | 1363 | ||
1370 | if (ocfs2_is_cluster_bitmap(alloc_inode)) | 1364 | if (ocfs2_is_cluster_bitmap(alloc_inode)) |
1371 | journal_type = OCFS2_JOURNAL_ACCESS_UNDO; | 1365 | journal_type = OCFS2_JOURNAL_ACCESS_UNDO; |
@@ -1394,7 +1388,8 @@ static inline int ocfs2_block_group_set_bits(handle_t *handle, | |||
1394 | ocfs2_journal_dirty(handle, group_bh); | 1388 | ocfs2_journal_dirty(handle, group_bh); |
1395 | 1389 | ||
1396 | bail: | 1390 | bail: |
1397 | mlog_exit(status); | 1391 | if (status) |
1392 | mlog_errno(status); | ||
1398 | return status; | 1393 | return status; |
1399 | } | 1394 | } |
1400 | 1395 | ||
@@ -1437,10 +1432,10 @@ static int ocfs2_relink_block_group(handle_t *handle, | |||
1437 | BUG_ON(!OCFS2_IS_VALID_GROUP_DESC(bg)); | 1432 | BUG_ON(!OCFS2_IS_VALID_GROUP_DESC(bg)); |
1438 | BUG_ON(!OCFS2_IS_VALID_GROUP_DESC(prev_bg)); | 1433 | BUG_ON(!OCFS2_IS_VALID_GROUP_DESC(prev_bg)); |
1439 | 1434 | ||
1440 | mlog(0, "Suballoc %llu, chain %u, move group %llu to top, prev = %llu\n", | 1435 | trace_ocfs2_relink_block_group( |
1441 | (unsigned long long)le64_to_cpu(fe->i_blkno), chain, | 1436 | (unsigned long long)le64_to_cpu(fe->i_blkno), chain, |
1442 | (unsigned long long)le64_to_cpu(bg->bg_blkno), | 1437 | (unsigned long long)le64_to_cpu(bg->bg_blkno), |
1443 | (unsigned long long)le64_to_cpu(prev_bg->bg_blkno)); | 1438 | (unsigned long long)le64_to_cpu(prev_bg->bg_blkno)); |
1444 | 1439 | ||
1445 | fe_ptr = le64_to_cpu(fe->id2.i_chain.cl_recs[chain].c_blkno); | 1440 | fe_ptr = le64_to_cpu(fe->id2.i_chain.cl_recs[chain].c_blkno); |
1446 | bg_ptr = le64_to_cpu(bg->bg_next_group); | 1441 | bg_ptr = le64_to_cpu(bg->bg_next_group); |
@@ -1484,7 +1479,8 @@ out_rollback: | |||
1484 | prev_bg->bg_next_group = cpu_to_le64(prev_bg_ptr); | 1479 | prev_bg->bg_next_group = cpu_to_le64(prev_bg_ptr); |
1485 | } | 1480 | } |
1486 | 1481 | ||
1487 | mlog_exit(status); | 1482 | if (status) |
1483 | mlog_errno(status); | ||
1488 | return status; | 1484 | return status; |
1489 | } | 1485 | } |
1490 | 1486 | ||
@@ -1525,10 +1521,10 @@ static int ocfs2_cluster_group_search(struct inode *inode, | |||
1525 | if ((gd_cluster_off + max_bits) > | 1521 | if ((gd_cluster_off + max_bits) > |
1526 | OCFS2_I(inode)->ip_clusters) { | 1522 | OCFS2_I(inode)->ip_clusters) { |
1527 | max_bits = OCFS2_I(inode)->ip_clusters - gd_cluster_off; | 1523 | max_bits = OCFS2_I(inode)->ip_clusters - gd_cluster_off; |
1528 | mlog(0, "Desc %llu, bg_bits %u, clusters %u, use %u\n", | 1524 | trace_ocfs2_cluster_group_search_wrong_max_bits( |
1529 | (unsigned long long)le64_to_cpu(gd->bg_blkno), | 1525 | (unsigned long long)le64_to_cpu(gd->bg_blkno), |
1530 | le16_to_cpu(gd->bg_bits), | 1526 | le16_to_cpu(gd->bg_bits), |
1531 | OCFS2_I(inode)->ip_clusters, max_bits); | 1527 | OCFS2_I(inode)->ip_clusters, max_bits); |
1532 | } | 1528 | } |
1533 | 1529 | ||
1534 | ret = ocfs2_block_group_find_clear_bits(OCFS2_SB(inode->i_sb), | 1530 | ret = ocfs2_block_group_find_clear_bits(OCFS2_SB(inode->i_sb), |
@@ -1542,9 +1538,9 @@ static int ocfs2_cluster_group_search(struct inode *inode, | |||
1542 | gd_cluster_off + | 1538 | gd_cluster_off + |
1543 | res->sr_bit_offset + | 1539 | res->sr_bit_offset + |
1544 | res->sr_bits); | 1540 | res->sr_bits); |
1545 | mlog(0, "Checking %llu against %llu\n", | 1541 | trace_ocfs2_cluster_group_search_max_block( |
1546 | (unsigned long long)blkoff, | 1542 | (unsigned long long)blkoff, |
1547 | (unsigned long long)max_block); | 1543 | (unsigned long long)max_block); |
1548 | if (blkoff > max_block) | 1544 | if (blkoff > max_block) |
1549 | return -ENOSPC; | 1545 | return -ENOSPC; |
1550 | } | 1546 | } |
@@ -1588,9 +1584,9 @@ static int ocfs2_block_group_search(struct inode *inode, | |||
1588 | if (!ret && max_block) { | 1584 | if (!ret && max_block) { |
1589 | blkoff = le64_to_cpu(bg->bg_blkno) + | 1585 | blkoff = le64_to_cpu(bg->bg_blkno) + |
1590 | res->sr_bit_offset + res->sr_bits; | 1586 | res->sr_bit_offset + res->sr_bits; |
1591 | mlog(0, "Checking %llu against %llu\n", | 1587 | trace_ocfs2_block_group_search_max_block( |
1592 | (unsigned long long)blkoff, | 1588 | (unsigned long long)blkoff, |
1593 | (unsigned long long)max_block); | 1589 | (unsigned long long)max_block); |
1594 | if (blkoff > max_block) | 1590 | if (blkoff > max_block) |
1595 | ret = -ENOSPC; | 1591 | ret = -ENOSPC; |
1596 | } | 1592 | } |
@@ -1756,9 +1752,9 @@ static int ocfs2_search_chain(struct ocfs2_alloc_context *ac, | |||
1756 | struct ocfs2_group_desc *bg; | 1752 | struct ocfs2_group_desc *bg; |
1757 | 1753 | ||
1758 | chain = ac->ac_chain; | 1754 | chain = ac->ac_chain; |
1759 | mlog(0, "trying to alloc %u bits from chain %u, inode %llu\n", | 1755 | trace_ocfs2_search_chain_begin( |
1760 | bits_wanted, chain, | 1756 | (unsigned long long)OCFS2_I(alloc_inode)->ip_blkno, |
1761 | (unsigned long long)OCFS2_I(alloc_inode)->ip_blkno); | 1757 | bits_wanted, chain); |
1762 | 1758 | ||
1763 | status = ocfs2_read_group_descriptor(alloc_inode, fe, | 1759 | status = ocfs2_read_group_descriptor(alloc_inode, fe, |
1764 | le64_to_cpu(cl->cl_recs[chain].c_blkno), | 1760 | le64_to_cpu(cl->cl_recs[chain].c_blkno), |
@@ -1799,8 +1795,8 @@ static int ocfs2_search_chain(struct ocfs2_alloc_context *ac, | |||
1799 | goto bail; | 1795 | goto bail; |
1800 | } | 1796 | } |
1801 | 1797 | ||
1802 | mlog(0, "alloc succeeds: we give %u bits from block group %llu\n", | 1798 | trace_ocfs2_search_chain_succ( |
1803 | res->sr_bits, (unsigned long long)le64_to_cpu(bg->bg_blkno)); | 1799 | (unsigned long long)le64_to_cpu(bg->bg_blkno), res->sr_bits); |
1804 | 1800 | ||
1805 | res->sr_bg_blkno = le64_to_cpu(bg->bg_blkno); | 1801 | res->sr_bg_blkno = le64_to_cpu(bg->bg_blkno); |
1806 | 1802 | ||
@@ -1861,8 +1857,9 @@ static int ocfs2_search_chain(struct ocfs2_alloc_context *ac, | |||
1861 | goto bail; | 1857 | goto bail; |
1862 | } | 1858 | } |
1863 | 1859 | ||
1864 | mlog(0, "Allocated %u bits from suballocator %llu\n", res->sr_bits, | 1860 | trace_ocfs2_search_chain_end( |
1865 | (unsigned long long)le64_to_cpu(fe->i_blkno)); | 1861 | (unsigned long long)le64_to_cpu(fe->i_blkno), |
1862 | res->sr_bits); | ||
1866 | 1863 | ||
1867 | out_loc_only: | 1864 | out_loc_only: |
1868 | *bits_left = le16_to_cpu(bg->bg_free_bits_count); | 1865 | *bits_left = le16_to_cpu(bg->bg_free_bits_count); |
@@ -1870,7 +1867,8 @@ bail: | |||
1870 | brelse(group_bh); | 1867 | brelse(group_bh); |
1871 | brelse(prev_group_bh); | 1868 | brelse(prev_group_bh); |
1872 | 1869 | ||
1873 | mlog_exit(status); | 1870 | if (status) |
1871 | mlog_errno(status); | ||
1874 | return status; | 1872 | return status; |
1875 | } | 1873 | } |
1876 | 1874 | ||
@@ -1888,8 +1886,6 @@ static int ocfs2_claim_suballoc_bits(struct ocfs2_alloc_context *ac, | |||
1888 | struct ocfs2_chain_list *cl; | 1886 | struct ocfs2_chain_list *cl; |
1889 | struct ocfs2_dinode *fe; | 1887 | struct ocfs2_dinode *fe; |
1890 | 1888 | ||
1891 | mlog_entry_void(); | ||
1892 | |||
1893 | BUG_ON(ac->ac_bits_given >= ac->ac_bits_wanted); | 1889 | BUG_ON(ac->ac_bits_given >= ac->ac_bits_wanted); |
1894 | BUG_ON(bits_wanted > (ac->ac_bits_wanted - ac->ac_bits_given)); | 1890 | BUG_ON(bits_wanted > (ac->ac_bits_wanted - ac->ac_bits_given)); |
1895 | BUG_ON(!ac->ac_bh); | 1891 | BUG_ON(!ac->ac_bh); |
@@ -1945,8 +1941,7 @@ static int ocfs2_claim_suballoc_bits(struct ocfs2_alloc_context *ac, | |||
1945 | goto bail; | 1941 | goto bail; |
1946 | } | 1942 | } |
1947 | 1943 | ||
1948 | mlog(0, "Search of victim chain %u came up with nothing, " | 1944 | trace_ocfs2_claim_suballoc_bits(victim); |
1949 | "trying all chains now.\n", victim); | ||
1950 | 1945 | ||
1951 | /* If we didn't pick a good victim, then just default to | 1946 | /* If we didn't pick a good victim, then just default to |
1952 | * searching each chain in order. Don't allow chain relinking | 1947 | * searching each chain in order. Don't allow chain relinking |
@@ -1984,7 +1979,8 @@ set_hint: | |||
1984 | } | 1979 | } |
1985 | 1980 | ||
1986 | bail: | 1981 | bail: |
1987 | mlog_exit(status); | 1982 | if (status) |
1983 | mlog_errno(status); | ||
1988 | return status; | 1984 | return status; |
1989 | } | 1985 | } |
1990 | 1986 | ||
@@ -2021,7 +2017,8 @@ int ocfs2_claim_metadata(handle_t *handle, | |||
2021 | *num_bits = res.sr_bits; | 2017 | *num_bits = res.sr_bits; |
2022 | status = 0; | 2018 | status = 0; |
2023 | bail: | 2019 | bail: |
2024 | mlog_exit(status); | 2020 | if (status) |
2021 | mlog_errno(status); | ||
2025 | return status; | 2022 | return status; |
2026 | } | 2023 | } |
2027 | 2024 | ||
@@ -2172,8 +2169,8 @@ int ocfs2_claim_new_inode_at_loc(handle_t *handle, | |||
2172 | goto out; | 2169 | goto out; |
2173 | } | 2170 | } |
2174 | 2171 | ||
2175 | mlog(0, "Allocated %u bits from suballocator %llu\n", res->sr_bits, | 2172 | trace_ocfs2_claim_new_inode_at_loc((unsigned long long)di_blkno, |
2176 | (unsigned long long)di_blkno); | 2173 | res->sr_bits); |
2177 | 2174 | ||
2178 | atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs); | 2175 | atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs); |
2179 | 2176 | ||
@@ -2201,8 +2198,6 @@ int ocfs2_claim_new_inode(handle_t *handle, | |||
2201 | int status; | 2198 | int status; |
2202 | struct ocfs2_suballoc_result res; | 2199 | struct ocfs2_suballoc_result res; |
2203 | 2200 | ||
2204 | mlog_entry_void(); | ||
2205 | |||
2206 | BUG_ON(!ac); | 2201 | BUG_ON(!ac); |
2207 | BUG_ON(ac->ac_bits_given != 0); | 2202 | BUG_ON(ac->ac_bits_given != 0); |
2208 | BUG_ON(ac->ac_bits_wanted != 1); | 2203 | BUG_ON(ac->ac_bits_wanted != 1); |
@@ -2230,7 +2225,8 @@ int ocfs2_claim_new_inode(handle_t *handle, | |||
2230 | ocfs2_save_inode_ac_group(dir, ac); | 2225 | ocfs2_save_inode_ac_group(dir, ac); |
2231 | status = 0; | 2226 | status = 0; |
2232 | bail: | 2227 | bail: |
2233 | mlog_exit(status); | 2228 | if (status) |
2229 | mlog_errno(status); | ||
2234 | return status; | 2230 | return status; |
2235 | } | 2231 | } |
2236 | 2232 | ||
@@ -2307,8 +2303,6 @@ int __ocfs2_claim_clusters(handle_t *handle, | |||
2307 | struct ocfs2_suballoc_result res = { .sr_blkno = 0, }; | 2303 | struct ocfs2_suballoc_result res = { .sr_blkno = 0, }; |
2308 | struct ocfs2_super *osb = OCFS2_SB(ac->ac_inode->i_sb); | 2304 | struct ocfs2_super *osb = OCFS2_SB(ac->ac_inode->i_sb); |
2309 | 2305 | ||
2310 | mlog_entry_void(); | ||
2311 | |||
2312 | BUG_ON(ac->ac_bits_given >= ac->ac_bits_wanted); | 2306 | BUG_ON(ac->ac_bits_given >= ac->ac_bits_wanted); |
2313 | 2307 | ||
2314 | BUG_ON(ac->ac_which != OCFS2_AC_USE_LOCAL | 2308 | BUG_ON(ac->ac_which != OCFS2_AC_USE_LOCAL |
@@ -2363,7 +2357,8 @@ int __ocfs2_claim_clusters(handle_t *handle, | |||
2363 | ac->ac_bits_given += *num_clusters; | 2357 | ac->ac_bits_given += *num_clusters; |
2364 | 2358 | ||
2365 | bail: | 2359 | bail: |
2366 | mlog_exit(status); | 2360 | if (status) |
2361 | mlog_errno(status); | ||
2367 | return status; | 2362 | return status; |
2368 | } | 2363 | } |
2369 | 2364 | ||
@@ -2392,13 +2387,11 @@ static int ocfs2_block_group_clear_bits(handle_t *handle, | |||
2392 | unsigned int tmp; | 2387 | unsigned int tmp; |
2393 | struct ocfs2_group_desc *undo_bg = NULL; | 2388 | struct ocfs2_group_desc *undo_bg = NULL; |
2394 | 2389 | ||
2395 | mlog_entry_void(); | ||
2396 | |||
2397 | /* The caller got this descriptor from | 2390 | /* The caller got this descriptor from |
2398 | * ocfs2_read_group_descriptor(). Any corruption is a code bug. */ | 2391 | * ocfs2_read_group_descriptor(). Any corruption is a code bug. */ |
2399 | BUG_ON(!OCFS2_IS_VALID_GROUP_DESC(bg)); | 2392 | BUG_ON(!OCFS2_IS_VALID_GROUP_DESC(bg)); |
2400 | 2393 | ||
2401 | mlog(0, "off = %u, num = %u\n", bit_off, num_bits); | 2394 | trace_ocfs2_block_group_clear_bits(bit_off, num_bits); |
2402 | 2395 | ||
2403 | BUG_ON(undo_fn && !ocfs2_is_cluster_bitmap(alloc_inode)); | 2396 | BUG_ON(undo_fn && !ocfs2_is_cluster_bitmap(alloc_inode)); |
2404 | status = ocfs2_journal_access_gd(handle, INODE_CACHE(alloc_inode), | 2397 | status = ocfs2_journal_access_gd(handle, INODE_CACHE(alloc_inode), |
@@ -2463,8 +2456,6 @@ static int _ocfs2_free_suballoc_bits(handle_t *handle, | |||
2463 | struct buffer_head *group_bh = NULL; | 2456 | struct buffer_head *group_bh = NULL; |
2464 | struct ocfs2_group_desc *group; | 2457 | struct ocfs2_group_desc *group; |
2465 | 2458 | ||
2466 | mlog_entry_void(); | ||
2467 | |||
2468 | /* The alloc_bh comes from ocfs2_free_dinode() or | 2459 | /* The alloc_bh comes from ocfs2_free_dinode() or |
2469 | * ocfs2_free_clusters(). The callers have all locked the | 2460 | * ocfs2_free_clusters(). The callers have all locked the |
2470 | * allocator and gotten alloc_bh from the lock call. This | 2461 | * allocator and gotten alloc_bh from the lock call. This |
@@ -2473,9 +2464,10 @@ static int _ocfs2_free_suballoc_bits(handle_t *handle, | |||
2473 | BUG_ON(!OCFS2_IS_VALID_DINODE(fe)); | 2464 | BUG_ON(!OCFS2_IS_VALID_DINODE(fe)); |
2474 | BUG_ON((count + start_bit) > ocfs2_bits_per_group(cl)); | 2465 | BUG_ON((count + start_bit) > ocfs2_bits_per_group(cl)); |
2475 | 2466 | ||
2476 | mlog(0, "%llu: freeing %u bits from group %llu, starting at %u\n", | 2467 | trace_ocfs2_free_suballoc_bits( |
2477 | (unsigned long long)OCFS2_I(alloc_inode)->ip_blkno, count, | 2468 | (unsigned long long)OCFS2_I(alloc_inode)->ip_blkno, |
2478 | (unsigned long long)bg_blkno, start_bit); | 2469 | (unsigned long long)bg_blkno, |
2470 | start_bit, count); | ||
2479 | 2471 | ||
2480 | status = ocfs2_read_group_descriptor(alloc_inode, fe, bg_blkno, | 2472 | status = ocfs2_read_group_descriptor(alloc_inode, fe, bg_blkno, |
2481 | &group_bh); | 2473 | &group_bh); |
@@ -2511,7 +2503,8 @@ static int _ocfs2_free_suballoc_bits(handle_t *handle, | |||
2511 | bail: | 2503 | bail: |
2512 | brelse(group_bh); | 2504 | brelse(group_bh); |
2513 | 2505 | ||
2514 | mlog_exit(status); | 2506 | if (status) |
2507 | mlog_errno(status); | ||
2515 | return status; | 2508 | return status; |
2516 | } | 2509 | } |
2517 | 2510 | ||
@@ -2556,11 +2549,8 @@ static int _ocfs2_free_clusters(handle_t *handle, | |||
2556 | 2549 | ||
2557 | /* You can't ever have a contiguous set of clusters | 2550 | /* You can't ever have a contiguous set of clusters |
2558 | * bigger than a block group bitmap so we never have to worry | 2551 | * bigger than a block group bitmap so we never have to worry |
2559 | * about looping on them. */ | 2552 | * about looping on them. |
2560 | 2553 | * This is expensive. We can safely remove once this stuff has | |
2561 | mlog_entry_void(); | ||
2562 | |||
2563 | /* This is expensive. We can safely remove once this stuff has | ||
2564 | * gotten tested really well. */ | 2554 | * gotten tested really well. */ |
2565 | BUG_ON(start_blk != ocfs2_clusters_to_blocks(bitmap_inode->i_sb, ocfs2_blocks_to_clusters(bitmap_inode->i_sb, start_blk))); | 2555 | BUG_ON(start_blk != ocfs2_clusters_to_blocks(bitmap_inode->i_sb, ocfs2_blocks_to_clusters(bitmap_inode->i_sb, start_blk))); |
2566 | 2556 | ||
@@ -2569,10 +2559,9 @@ static int _ocfs2_free_clusters(handle_t *handle, | |||
2569 | ocfs2_block_to_cluster_group(bitmap_inode, start_blk, &bg_blkno, | 2559 | ocfs2_block_to_cluster_group(bitmap_inode, start_blk, &bg_blkno, |
2570 | &bg_start_bit); | 2560 | &bg_start_bit); |
2571 | 2561 | ||
2572 | mlog(0, "want to free %u clusters starting at block %llu\n", | 2562 | trace_ocfs2_free_clusters((unsigned long long)bg_blkno, |
2573 | num_clusters, (unsigned long long)start_blk); | 2563 | (unsigned long long)start_blk, |
2574 | mlog(0, "bg_blkno = %llu, bg_start_bit = %u\n", | 2564 | bg_start_bit, num_clusters); |
2575 | (unsigned long long)bg_blkno, bg_start_bit); | ||
2576 | 2565 | ||
2577 | status = _ocfs2_free_suballoc_bits(handle, bitmap_inode, bitmap_bh, | 2566 | status = _ocfs2_free_suballoc_bits(handle, bitmap_inode, bitmap_bh, |
2578 | bg_start_bit, bg_blkno, | 2567 | bg_start_bit, bg_blkno, |
@@ -2586,7 +2575,8 @@ static int _ocfs2_free_clusters(handle_t *handle, | |||
2586 | num_clusters); | 2575 | num_clusters); |
2587 | 2576 | ||
2588 | out: | 2577 | out: |
2589 | mlog_exit(status); | 2578 | if (status) |
2579 | mlog_errno(status); | ||
2590 | return status; | 2580 | return status; |
2591 | } | 2581 | } |
2592 | 2582 | ||
@@ -2756,7 +2746,7 @@ static int ocfs2_get_suballoc_slot_bit(struct ocfs2_super *osb, u64 blkno, | |||
2756 | struct buffer_head *inode_bh = NULL; | 2746 | struct buffer_head *inode_bh = NULL; |
2757 | struct ocfs2_dinode *inode_fe; | 2747 | struct ocfs2_dinode *inode_fe; |
2758 | 2748 | ||
2759 | mlog_entry("blkno: %llu\n", (unsigned long long)blkno); | 2749 | trace_ocfs2_get_suballoc_slot_bit((unsigned long long)blkno); |
2760 | 2750 | ||
2761 | /* dirty read disk */ | 2751 | /* dirty read disk */ |
2762 | status = ocfs2_read_blocks_sync(osb, blkno, 1, &inode_bh); | 2752 | status = ocfs2_read_blocks_sync(osb, blkno, 1, &inode_bh); |
@@ -2793,7 +2783,8 @@ static int ocfs2_get_suballoc_slot_bit(struct ocfs2_super *osb, u64 blkno, | |||
2793 | bail: | 2783 | bail: |
2794 | brelse(inode_bh); | 2784 | brelse(inode_bh); |
2795 | 2785 | ||
2796 | mlog_exit(status); | 2786 | if (status) |
2787 | mlog_errno(status); | ||
2797 | return status; | 2788 | return status; |
2798 | } | 2789 | } |
2799 | 2790 | ||
@@ -2816,8 +2807,8 @@ static int ocfs2_test_suballoc_bit(struct ocfs2_super *osb, | |||
2816 | u64 bg_blkno; | 2807 | u64 bg_blkno; |
2817 | int status; | 2808 | int status; |
2818 | 2809 | ||
2819 | mlog_entry("blkno: %llu bit: %u\n", (unsigned long long)blkno, | 2810 | trace_ocfs2_test_suballoc_bit((unsigned long long)blkno, |
2820 | (unsigned int)bit); | 2811 | (unsigned int)bit); |
2821 | 2812 | ||
2822 | alloc_di = (struct ocfs2_dinode *)alloc_bh->b_data; | 2813 | alloc_di = (struct ocfs2_dinode *)alloc_bh->b_data; |
2823 | if ((bit + 1) > ocfs2_bits_per_group(&alloc_di->id2.i_chain)) { | 2814 | if ((bit + 1) > ocfs2_bits_per_group(&alloc_di->id2.i_chain)) { |
@@ -2844,7 +2835,8 @@ static int ocfs2_test_suballoc_bit(struct ocfs2_super *osb, | |||
2844 | bail: | 2835 | bail: |
2845 | brelse(group_bh); | 2836 | brelse(group_bh); |
2846 | 2837 | ||
2847 | mlog_exit(status); | 2838 | if (status) |
2839 | mlog_errno(status); | ||
2848 | return status; | 2840 | return status; |
2849 | } | 2841 | } |
2850 | 2842 | ||
@@ -2869,7 +2861,7 @@ int ocfs2_test_inode_bit(struct ocfs2_super *osb, u64 blkno, int *res) | |||
2869 | struct inode *inode_alloc_inode; | 2861 | struct inode *inode_alloc_inode; |
2870 | struct buffer_head *alloc_bh = NULL; | 2862 | struct buffer_head *alloc_bh = NULL; |
2871 | 2863 | ||
2872 | mlog_entry("blkno: %llu", (unsigned long long)blkno); | 2864 | trace_ocfs2_test_inode_bit((unsigned long long)blkno); |
2873 | 2865 | ||
2874 | status = ocfs2_get_suballoc_slot_bit(osb, blkno, &suballoc_slot, | 2866 | status = ocfs2_get_suballoc_slot_bit(osb, blkno, &suballoc_slot, |
2875 | &group_blkno, &suballoc_bit); | 2867 | &group_blkno, &suballoc_bit); |
@@ -2910,6 +2902,7 @@ int ocfs2_test_inode_bit(struct ocfs2_super *osb, u64 blkno, int *res) | |||
2910 | iput(inode_alloc_inode); | 2902 | iput(inode_alloc_inode); |
2911 | brelse(alloc_bh); | 2903 | brelse(alloc_bh); |
2912 | bail: | 2904 | bail: |
2913 | mlog_exit(status); | 2905 | if (status) |
2906 | mlog_errno(status); | ||
2914 | return status; | 2907 | return status; |
2915 | } | 2908 | } |
diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c index 236ed1bdca2c..69fa11b35aa4 100644 --- a/fs/ocfs2/super.c +++ b/fs/ocfs2/super.c | |||
@@ -42,7 +42,9 @@ | |||
42 | #include <linux/seq_file.h> | 42 | #include <linux/seq_file.h> |
43 | #include <linux/quotaops.h> | 43 | #include <linux/quotaops.h> |
44 | 44 | ||
45 | #define MLOG_MASK_PREFIX ML_SUPER | 45 | #define CREATE_TRACE_POINTS |
46 | #include "ocfs2_trace.h" | ||
47 | |||
46 | #include <cluster/masklog.h> | 48 | #include <cluster/masklog.h> |
47 | 49 | ||
48 | #include "ocfs2.h" | 50 | #include "ocfs2.h" |
@@ -441,8 +443,6 @@ static int ocfs2_init_global_system_inodes(struct ocfs2_super *osb) | |||
441 | int status = 0; | 443 | int status = 0; |
442 | int i; | 444 | int i; |
443 | 445 | ||
444 | mlog_entry_void(); | ||
445 | |||
446 | new = ocfs2_iget(osb, osb->root_blkno, OCFS2_FI_FLAG_SYSFILE, 0); | 446 | new = ocfs2_iget(osb, osb->root_blkno, OCFS2_FI_FLAG_SYSFILE, 0); |
447 | if (IS_ERR(new)) { | 447 | if (IS_ERR(new)) { |
448 | status = PTR_ERR(new); | 448 | status = PTR_ERR(new); |
@@ -478,7 +478,8 @@ static int ocfs2_init_global_system_inodes(struct ocfs2_super *osb) | |||
478 | } | 478 | } |
479 | 479 | ||
480 | bail: | 480 | bail: |
481 | mlog_exit(status); | 481 | if (status) |
482 | mlog_errno(status); | ||
482 | return status; | 483 | return status; |
483 | } | 484 | } |
484 | 485 | ||
@@ -488,8 +489,6 @@ static int ocfs2_init_local_system_inodes(struct ocfs2_super *osb) | |||
488 | int status = 0; | 489 | int status = 0; |
489 | int i; | 490 | int i; |
490 | 491 | ||
491 | mlog_entry_void(); | ||
492 | |||
493 | for (i = OCFS2_LAST_GLOBAL_SYSTEM_INODE + 1; | 492 | for (i = OCFS2_LAST_GLOBAL_SYSTEM_INODE + 1; |
494 | i < NUM_SYSTEM_INODES; | 493 | i < NUM_SYSTEM_INODES; |
495 | i++) { | 494 | i++) { |
@@ -508,7 +507,8 @@ static int ocfs2_init_local_system_inodes(struct ocfs2_super *osb) | |||
508 | } | 507 | } |
509 | 508 | ||
510 | bail: | 509 | bail: |
511 | mlog_exit(status); | 510 | if (status) |
511 | mlog_errno(status); | ||
512 | return status; | 512 | return status; |
513 | } | 513 | } |
514 | 514 | ||
@@ -517,8 +517,6 @@ static void ocfs2_release_system_inodes(struct ocfs2_super *osb) | |||
517 | int i; | 517 | int i; |
518 | struct inode *inode; | 518 | struct inode *inode; |
519 | 519 | ||
520 | mlog_entry_void(); | ||
521 | |||
522 | for (i = 0; i < NUM_GLOBAL_SYSTEM_INODES; i++) { | 520 | for (i = 0; i < NUM_GLOBAL_SYSTEM_INODES; i++) { |
523 | inode = osb->global_system_inodes[i]; | 521 | inode = osb->global_system_inodes[i]; |
524 | if (inode) { | 522 | if (inode) { |
@@ -540,7 +538,7 @@ static void ocfs2_release_system_inodes(struct ocfs2_super *osb) | |||
540 | } | 538 | } |
541 | 539 | ||
542 | if (!osb->local_system_inodes) | 540 | if (!osb->local_system_inodes) |
543 | goto out; | 541 | return; |
544 | 542 | ||
545 | for (i = 0; i < NUM_LOCAL_SYSTEM_INODES * osb->max_slots; i++) { | 543 | for (i = 0; i < NUM_LOCAL_SYSTEM_INODES * osb->max_slots; i++) { |
546 | if (osb->local_system_inodes[i]) { | 544 | if (osb->local_system_inodes[i]) { |
@@ -551,9 +549,6 @@ static void ocfs2_release_system_inodes(struct ocfs2_super *osb) | |||
551 | 549 | ||
552 | kfree(osb->local_system_inodes); | 550 | kfree(osb->local_system_inodes); |
553 | osb->local_system_inodes = NULL; | 551 | osb->local_system_inodes = NULL; |
554 | |||
555 | out: | ||
556 | mlog_exit(0); | ||
557 | } | 552 | } |
558 | 553 | ||
559 | /* We're allocating fs objects, use GFP_NOFS */ | 554 | /* We're allocating fs objects, use GFP_NOFS */ |
@@ -684,12 +679,9 @@ static int ocfs2_remount(struct super_block *sb, int *flags, char *data) | |||
684 | } | 679 | } |
685 | 680 | ||
686 | if (*flags & MS_RDONLY) { | 681 | if (*flags & MS_RDONLY) { |
687 | mlog(0, "Going to ro mode.\n"); | ||
688 | sb->s_flags |= MS_RDONLY; | 682 | sb->s_flags |= MS_RDONLY; |
689 | osb->osb_flags |= OCFS2_OSB_SOFT_RO; | 683 | osb->osb_flags |= OCFS2_OSB_SOFT_RO; |
690 | } else { | 684 | } else { |
691 | mlog(0, "Making ro filesystem writeable.\n"); | ||
692 | |||
693 | if (osb->osb_flags & OCFS2_OSB_ERROR_FS) { | 685 | if (osb->osb_flags & OCFS2_OSB_ERROR_FS) { |
694 | mlog(ML_ERROR, "Cannot remount RDWR " | 686 | mlog(ML_ERROR, "Cannot remount RDWR " |
695 | "filesystem due to previous errors.\n"); | 687 | "filesystem due to previous errors.\n"); |
@@ -707,6 +699,7 @@ static int ocfs2_remount(struct super_block *sb, int *flags, char *data) | |||
707 | sb->s_flags &= ~MS_RDONLY; | 699 | sb->s_flags &= ~MS_RDONLY; |
708 | osb->osb_flags &= ~OCFS2_OSB_SOFT_RO; | 700 | osb->osb_flags &= ~OCFS2_OSB_SOFT_RO; |
709 | } | 701 | } |
702 | trace_ocfs2_remount(sb->s_flags, osb->osb_flags, *flags); | ||
710 | unlock_osb: | 703 | unlock_osb: |
711 | spin_unlock(&osb->osb_lock); | 704 | spin_unlock(&osb->osb_lock); |
712 | /* Enable quota accounting after remounting RW */ | 705 | /* Enable quota accounting after remounting RW */ |
@@ -1032,7 +1025,7 @@ static int ocfs2_fill_super(struct super_block *sb, void *data, int silent) | |||
1032 | char nodestr[8]; | 1025 | char nodestr[8]; |
1033 | struct ocfs2_blockcheck_stats stats; | 1026 | struct ocfs2_blockcheck_stats stats; |
1034 | 1027 | ||
1035 | mlog_entry("%p, %p, %i", sb, data, silent); | 1028 | trace_ocfs2_fill_super(sb, data, silent); |
1036 | 1029 | ||
1037 | if (!ocfs2_parse_options(sb, data, &parsed_options, 0)) { | 1030 | if (!ocfs2_parse_options(sb, data, &parsed_options, 0)) { |
1038 | status = -EINVAL; | 1031 | status = -EINVAL; |
@@ -1208,7 +1201,6 @@ static int ocfs2_fill_super(struct super_block *sb, void *data, int silent) | |||
1208 | mlog_errno(status); | 1201 | mlog_errno(status); |
1209 | atomic_set(&osb->vol_state, VOLUME_DISABLED); | 1202 | atomic_set(&osb->vol_state, VOLUME_DISABLED); |
1210 | wake_up(&osb->osb_mount_event); | 1203 | wake_up(&osb->osb_mount_event); |
1211 | mlog_exit(status); | ||
1212 | return status; | 1204 | return status; |
1213 | } | 1205 | } |
1214 | } | 1206 | } |
@@ -1222,7 +1214,6 @@ static int ocfs2_fill_super(struct super_block *sb, void *data, int silent) | |||
1222 | /* Start this when the mount is almost sure of being successful */ | 1214 | /* Start this when the mount is almost sure of being successful */ |
1223 | ocfs2_orphan_scan_start(osb); | 1215 | ocfs2_orphan_scan_start(osb); |
1224 | 1216 | ||
1225 | mlog_exit(status); | ||
1226 | return status; | 1217 | return status; |
1227 | 1218 | ||
1228 | read_super_error: | 1219 | read_super_error: |
@@ -1237,7 +1228,8 @@ read_super_error: | |||
1237 | ocfs2_dismount_volume(sb, 1); | 1228 | ocfs2_dismount_volume(sb, 1); |
1238 | } | 1229 | } |
1239 | 1230 | ||
1240 | mlog_exit(status); | 1231 | if (status) |
1232 | mlog_errno(status); | ||
1241 | return status; | 1233 | return status; |
1242 | } | 1234 | } |
1243 | 1235 | ||
@@ -1320,8 +1312,7 @@ static int ocfs2_parse_options(struct super_block *sb, | |||
1320 | char *p; | 1312 | char *p; |
1321 | u32 tmp; | 1313 | u32 tmp; |
1322 | 1314 | ||
1323 | mlog_entry("remount: %d, options: \"%s\"\n", is_remount, | 1315 | trace_ocfs2_parse_options(is_remount, options ? options : "(none)"); |
1324 | options ? options : "(none)"); | ||
1325 | 1316 | ||
1326 | mopt->commit_interval = 0; | 1317 | mopt->commit_interval = 0; |
1327 | mopt->mount_opt = OCFS2_MOUNT_NOINTR; | 1318 | mopt->mount_opt = OCFS2_MOUNT_NOINTR; |
@@ -1538,7 +1529,6 @@ static int ocfs2_parse_options(struct super_block *sb, | |||
1538 | status = 1; | 1529 | status = 1; |
1539 | 1530 | ||
1540 | bail: | 1531 | bail: |
1541 | mlog_exit(status); | ||
1542 | return status; | 1532 | return status; |
1543 | } | 1533 | } |
1544 | 1534 | ||
@@ -1629,8 +1619,6 @@ static int __init ocfs2_init(void) | |||
1629 | { | 1619 | { |
1630 | int status; | 1620 | int status; |
1631 | 1621 | ||
1632 | mlog_entry_void(); | ||
1633 | |||
1634 | ocfs2_print_version(); | 1622 | ocfs2_print_version(); |
1635 | 1623 | ||
1636 | status = init_ocfs2_uptodate_cache(); | 1624 | status = init_ocfs2_uptodate_cache(); |
@@ -1664,10 +1652,9 @@ leave: | |||
1664 | if (status < 0) { | 1652 | if (status < 0) { |
1665 | ocfs2_free_mem_caches(); | 1653 | ocfs2_free_mem_caches(); |
1666 | exit_ocfs2_uptodate_cache(); | 1654 | exit_ocfs2_uptodate_cache(); |
1655 | mlog_errno(status); | ||
1667 | } | 1656 | } |
1668 | 1657 | ||
1669 | mlog_exit(status); | ||
1670 | |||
1671 | if (status >= 0) { | 1658 | if (status >= 0) { |
1672 | return register_filesystem(&ocfs2_fs_type); | 1659 | return register_filesystem(&ocfs2_fs_type); |
1673 | } else | 1660 | } else |
@@ -1676,8 +1663,6 @@ leave: | |||
1676 | 1663 | ||
1677 | static void __exit ocfs2_exit(void) | 1664 | static void __exit ocfs2_exit(void) |
1678 | { | 1665 | { |
1679 | mlog_entry_void(); | ||
1680 | |||
1681 | if (ocfs2_wq) { | 1666 | if (ocfs2_wq) { |
1682 | flush_workqueue(ocfs2_wq); | 1667 | flush_workqueue(ocfs2_wq); |
1683 | destroy_workqueue(ocfs2_wq); | 1668 | destroy_workqueue(ocfs2_wq); |
@@ -1692,18 +1677,14 @@ static void __exit ocfs2_exit(void) | |||
1692 | unregister_filesystem(&ocfs2_fs_type); | 1677 | unregister_filesystem(&ocfs2_fs_type); |
1693 | 1678 | ||
1694 | exit_ocfs2_uptodate_cache(); | 1679 | exit_ocfs2_uptodate_cache(); |
1695 | |||
1696 | mlog_exit_void(); | ||
1697 | } | 1680 | } |
1698 | 1681 | ||
1699 | static void ocfs2_put_super(struct super_block *sb) | 1682 | static void ocfs2_put_super(struct super_block *sb) |
1700 | { | 1683 | { |
1701 | mlog_entry("(0x%p)\n", sb); | 1684 | trace_ocfs2_put_super(sb); |
1702 | 1685 | ||
1703 | ocfs2_sync_blockdev(sb); | 1686 | ocfs2_sync_blockdev(sb); |
1704 | ocfs2_dismount_volume(sb, 0); | 1687 | ocfs2_dismount_volume(sb, 0); |
1705 | |||
1706 | mlog_exit_void(); | ||
1707 | } | 1688 | } |
1708 | 1689 | ||
1709 | static int ocfs2_statfs(struct dentry *dentry, struct kstatfs *buf) | 1690 | static int ocfs2_statfs(struct dentry *dentry, struct kstatfs *buf) |
@@ -1715,7 +1696,7 @@ static int ocfs2_statfs(struct dentry *dentry, struct kstatfs *buf) | |||
1715 | struct buffer_head *bh = NULL; | 1696 | struct buffer_head *bh = NULL; |
1716 | struct inode *inode = NULL; | 1697 | struct inode *inode = NULL; |
1717 | 1698 | ||
1718 | mlog_entry("(%p, %p)\n", dentry->d_sb, buf); | 1699 | trace_ocfs2_statfs(dentry->d_sb, buf); |
1719 | 1700 | ||
1720 | osb = OCFS2_SB(dentry->d_sb); | 1701 | osb = OCFS2_SB(dentry->d_sb); |
1721 | 1702 | ||
@@ -1762,7 +1743,8 @@ bail: | |||
1762 | if (inode) | 1743 | if (inode) |
1763 | iput(inode); | 1744 | iput(inode); |
1764 | 1745 | ||
1765 | mlog_exit(status); | 1746 | if (status) |
1747 | mlog_errno(status); | ||
1766 | 1748 | ||
1767 | return status; | 1749 | return status; |
1768 | } | 1750 | } |
@@ -1882,8 +1864,6 @@ static int ocfs2_mount_volume(struct super_block *sb) | |||
1882 | int unlock_super = 0; | 1864 | int unlock_super = 0; |
1883 | struct ocfs2_super *osb = OCFS2_SB(sb); | 1865 | struct ocfs2_super *osb = OCFS2_SB(sb); |
1884 | 1866 | ||
1885 | mlog_entry_void(); | ||
1886 | |||
1887 | if (ocfs2_is_hard_readonly(osb)) | 1867 | if (ocfs2_is_hard_readonly(osb)) |
1888 | goto leave; | 1868 | goto leave; |
1889 | 1869 | ||
@@ -1928,7 +1908,6 @@ leave: | |||
1928 | if (unlock_super) | 1908 | if (unlock_super) |
1929 | ocfs2_super_unlock(osb, 1); | 1909 | ocfs2_super_unlock(osb, 1); |
1930 | 1910 | ||
1931 | mlog_exit(status); | ||
1932 | return status; | 1911 | return status; |
1933 | } | 1912 | } |
1934 | 1913 | ||
@@ -1938,7 +1917,7 @@ static void ocfs2_dismount_volume(struct super_block *sb, int mnt_err) | |||
1938 | struct ocfs2_super *osb = NULL; | 1917 | struct ocfs2_super *osb = NULL; |
1939 | char nodestr[8]; | 1918 | char nodestr[8]; |
1940 | 1919 | ||
1941 | mlog_entry("(0x%p)\n", sb); | 1920 | trace_ocfs2_dismount_volume(sb); |
1942 | 1921 | ||
1943 | BUG_ON(!sb); | 1922 | BUG_ON(!sb); |
1944 | osb = OCFS2_SB(sb); | 1923 | osb = OCFS2_SB(sb); |
@@ -2090,8 +2069,6 @@ static int ocfs2_initialize_super(struct super_block *sb, | |||
2090 | struct ocfs2_super *osb; | 2069 | struct ocfs2_super *osb; |
2091 | u64 total_blocks; | 2070 | u64 total_blocks; |
2092 | 2071 | ||
2093 | mlog_entry_void(); | ||
2094 | |||
2095 | osb = kzalloc(sizeof(struct ocfs2_super), GFP_KERNEL); | 2072 | osb = kzalloc(sizeof(struct ocfs2_super), GFP_KERNEL); |
2096 | if (!osb) { | 2073 | if (!osb) { |
2097 | status = -ENOMEM; | 2074 | status = -ENOMEM; |
@@ -2155,7 +2132,6 @@ static int ocfs2_initialize_super(struct super_block *sb, | |||
2155 | status = -EINVAL; | 2132 | status = -EINVAL; |
2156 | goto bail; | 2133 | goto bail; |
2157 | } | 2134 | } |
2158 | mlog(0, "max_slots for this device: %u\n", osb->max_slots); | ||
2159 | 2135 | ||
2160 | ocfs2_orphan_scan_init(osb); | 2136 | ocfs2_orphan_scan_init(osb); |
2161 | 2137 | ||
@@ -2294,7 +2270,6 @@ static int ocfs2_initialize_super(struct super_block *sb, | |||
2294 | osb->s_clustersize_bits = | 2270 | osb->s_clustersize_bits = |
2295 | le32_to_cpu(di->id2.i_super.s_clustersize_bits); | 2271 | le32_to_cpu(di->id2.i_super.s_clustersize_bits); |
2296 | osb->s_clustersize = 1 << osb->s_clustersize_bits; | 2272 | osb->s_clustersize = 1 << osb->s_clustersize_bits; |
2297 | mlog(0, "clusterbits=%d\n", osb->s_clustersize_bits); | ||
2298 | 2273 | ||
2299 | if (osb->s_clustersize < OCFS2_MIN_CLUSTERSIZE || | 2274 | if (osb->s_clustersize < OCFS2_MIN_CLUSTERSIZE || |
2300 | osb->s_clustersize > OCFS2_MAX_CLUSTERSIZE) { | 2275 | osb->s_clustersize > OCFS2_MAX_CLUSTERSIZE) { |
@@ -2333,11 +2308,10 @@ static int ocfs2_initialize_super(struct super_block *sb, | |||
2333 | le64_to_cpu(di->id2.i_super.s_first_cluster_group); | 2308 | le64_to_cpu(di->id2.i_super.s_first_cluster_group); |
2334 | osb->fs_generation = le32_to_cpu(di->i_fs_generation); | 2309 | osb->fs_generation = le32_to_cpu(di->i_fs_generation); |
2335 | osb->uuid_hash = le32_to_cpu(di->id2.i_super.s_uuid_hash); | 2310 | osb->uuid_hash = le32_to_cpu(di->id2.i_super.s_uuid_hash); |
2336 | mlog(0, "vol_label: %s\n", osb->vol_label); | 2311 | trace_ocfs2_initialize_super(osb->vol_label, osb->uuid_str, |
2337 | mlog(0, "uuid: %s\n", osb->uuid_str); | 2312 | (unsigned long long)osb->root_blkno, |
2338 | mlog(0, "root_blkno=%llu, system_dir_blkno=%llu\n", | 2313 | (unsigned long long)osb->system_dir_blkno, |
2339 | (unsigned long long)osb->root_blkno, | 2314 | osb->s_clustersize_bits); |
2340 | (unsigned long long)osb->system_dir_blkno); | ||
2341 | 2315 | ||
2342 | osb->osb_dlm_debug = ocfs2_new_dlm_debug(); | 2316 | osb->osb_dlm_debug = ocfs2_new_dlm_debug(); |
2343 | if (!osb->osb_dlm_debug) { | 2317 | if (!osb->osb_dlm_debug) { |
@@ -2380,7 +2354,6 @@ static int ocfs2_initialize_super(struct super_block *sb, | |||
2380 | } | 2354 | } |
2381 | 2355 | ||
2382 | bail: | 2356 | bail: |
2383 | mlog_exit(status); | ||
2384 | return status; | 2357 | return status; |
2385 | } | 2358 | } |
2386 | 2359 | ||
@@ -2396,8 +2369,6 @@ static int ocfs2_verify_volume(struct ocfs2_dinode *di, | |||
2396 | { | 2369 | { |
2397 | int status = -EAGAIN; | 2370 | int status = -EAGAIN; |
2398 | 2371 | ||
2399 | mlog_entry_void(); | ||
2400 | |||
2401 | if (memcmp(di->i_signature, OCFS2_SUPER_BLOCK_SIGNATURE, | 2372 | if (memcmp(di->i_signature, OCFS2_SUPER_BLOCK_SIGNATURE, |
2402 | strlen(OCFS2_SUPER_BLOCK_SIGNATURE)) == 0) { | 2373 | strlen(OCFS2_SUPER_BLOCK_SIGNATURE)) == 0) { |
2403 | /* We have to do a raw check of the feature here */ | 2374 | /* We have to do a raw check of the feature here */ |
@@ -2452,7 +2423,8 @@ static int ocfs2_verify_volume(struct ocfs2_dinode *di, | |||
2452 | } | 2423 | } |
2453 | 2424 | ||
2454 | out: | 2425 | out: |
2455 | mlog_exit(status); | 2426 | if (status && status != -EAGAIN) |
2427 | mlog_errno(status); | ||
2456 | return status; | 2428 | return status; |
2457 | } | 2429 | } |
2458 | 2430 | ||
@@ -2465,8 +2437,6 @@ static int ocfs2_check_volume(struct ocfs2_super *osb) | |||
2465 | * recover | 2437 | * recover |
2466 | * ourselves. */ | 2438 | * ourselves. */ |
2467 | 2439 | ||
2468 | mlog_entry_void(); | ||
2469 | |||
2470 | /* Init our journal object. */ | 2440 | /* Init our journal object. */ |
2471 | status = ocfs2_journal_init(osb->journal, &dirty); | 2441 | status = ocfs2_journal_init(osb->journal, &dirty); |
2472 | if (status < 0) { | 2442 | if (status < 0) { |
@@ -2516,8 +2486,6 @@ static int ocfs2_check_volume(struct ocfs2_super *osb) | |||
2516 | * ourselves as mounted. */ | 2486 | * ourselves as mounted. */ |
2517 | } | 2487 | } |
2518 | 2488 | ||
2519 | mlog(0, "Journal loaded.\n"); | ||
2520 | |||
2521 | status = ocfs2_load_local_alloc(osb); | 2489 | status = ocfs2_load_local_alloc(osb); |
2522 | if (status < 0) { | 2490 | if (status < 0) { |
2523 | mlog_errno(status); | 2491 | mlog_errno(status); |
@@ -2549,7 +2517,8 @@ finally: | |||
2549 | if (local_alloc) | 2517 | if (local_alloc) |
2550 | kfree(local_alloc); | 2518 | kfree(local_alloc); |
2551 | 2519 | ||
2552 | mlog_exit(status); | 2520 | if (status) |
2521 | mlog_errno(status); | ||
2553 | return status; | 2522 | return status; |
2554 | } | 2523 | } |
2555 | 2524 | ||
@@ -2561,8 +2530,6 @@ finally: | |||
2561 | */ | 2530 | */ |
2562 | static void ocfs2_delete_osb(struct ocfs2_super *osb) | 2531 | static void ocfs2_delete_osb(struct ocfs2_super *osb) |
2563 | { | 2532 | { |
2564 | mlog_entry_void(); | ||
2565 | |||
2566 | /* This function assumes that the caller has the main osb resource */ | 2533 | /* This function assumes that the caller has the main osb resource */ |
2567 | 2534 | ||
2568 | ocfs2_free_slot_info(osb); | 2535 | ocfs2_free_slot_info(osb); |
@@ -2580,8 +2547,6 @@ static void ocfs2_delete_osb(struct ocfs2_super *osb) | |||
2580 | kfree(osb->uuid_str); | 2547 | kfree(osb->uuid_str); |
2581 | ocfs2_put_dlm_debug(osb->osb_dlm_debug); | 2548 | ocfs2_put_dlm_debug(osb->osb_dlm_debug); |
2582 | memset(osb, 0, sizeof(struct ocfs2_super)); | 2549 | memset(osb, 0, sizeof(struct ocfs2_super)); |
2583 | |||
2584 | mlog_exit_void(); | ||
2585 | } | 2550 | } |
2586 | 2551 | ||
2587 | /* Put OCFS2 into a readonly state, or (if the user specifies it), | 2552 | /* Put OCFS2 into a readonly state, or (if the user specifies it), |
diff --git a/fs/ocfs2/symlink.c b/fs/ocfs2/symlink.c index 9975457c981f..5d22872e2bb3 100644 --- a/fs/ocfs2/symlink.c +++ b/fs/ocfs2/symlink.c | |||
@@ -40,7 +40,6 @@ | |||
40 | #include <linux/pagemap.h> | 40 | #include <linux/pagemap.h> |
41 | #include <linux/namei.h> | 41 | #include <linux/namei.h> |
42 | 42 | ||
43 | #define MLOG_MASK_PREFIX ML_NAMEI | ||
44 | #include <cluster/masklog.h> | 43 | #include <cluster/masklog.h> |
45 | 44 | ||
46 | #include "ocfs2.h" | 45 | #include "ocfs2.h" |
@@ -62,8 +61,6 @@ static char *ocfs2_fast_symlink_getlink(struct inode *inode, | |||
62 | char *link = NULL; | 61 | char *link = NULL; |
63 | struct ocfs2_dinode *fe; | 62 | struct ocfs2_dinode *fe; |
64 | 63 | ||
65 | mlog_entry_void(); | ||
66 | |||
67 | status = ocfs2_read_inode_block(inode, bh); | 64 | status = ocfs2_read_inode_block(inode, bh); |
68 | if (status < 0) { | 65 | if (status < 0) { |
69 | mlog_errno(status); | 66 | mlog_errno(status); |
@@ -74,7 +71,6 @@ static char *ocfs2_fast_symlink_getlink(struct inode *inode, | |||
74 | fe = (struct ocfs2_dinode *) (*bh)->b_data; | 71 | fe = (struct ocfs2_dinode *) (*bh)->b_data; |
75 | link = (char *) fe->id2.i_symlink; | 72 | link = (char *) fe->id2.i_symlink; |
76 | bail: | 73 | bail: |
77 | mlog_exit(status); | ||
78 | 74 | ||
79 | return link; | 75 | return link; |
80 | } | 76 | } |
@@ -88,8 +84,6 @@ static int ocfs2_readlink(struct dentry *dentry, | |||
88 | struct buffer_head *bh = NULL; | 84 | struct buffer_head *bh = NULL; |
89 | struct inode *inode = dentry->d_inode; | 85 | struct inode *inode = dentry->d_inode; |
90 | 86 | ||
91 | mlog_entry_void(); | ||
92 | |||
93 | link = ocfs2_fast_symlink_getlink(inode, &bh); | 87 | link = ocfs2_fast_symlink_getlink(inode, &bh); |
94 | if (IS_ERR(link)) { | 88 | if (IS_ERR(link)) { |
95 | ret = PTR_ERR(link); | 89 | ret = PTR_ERR(link); |
@@ -104,7 +98,8 @@ static int ocfs2_readlink(struct dentry *dentry, | |||
104 | 98 | ||
105 | brelse(bh); | 99 | brelse(bh); |
106 | out: | 100 | out: |
107 | mlog_exit(ret); | 101 | if (ret < 0) |
102 | mlog_errno(ret); | ||
108 | return ret; | 103 | return ret; |
109 | } | 104 | } |
110 | 105 | ||
@@ -117,8 +112,6 @@ static void *ocfs2_fast_follow_link(struct dentry *dentry, | |||
117 | struct inode *inode = dentry->d_inode; | 112 | struct inode *inode = dentry->d_inode; |
118 | struct buffer_head *bh = NULL; | 113 | struct buffer_head *bh = NULL; |
119 | 114 | ||
120 | mlog_entry_void(); | ||
121 | |||
122 | BUG_ON(!ocfs2_inode_is_fast_symlink(inode)); | 115 | BUG_ON(!ocfs2_inode_is_fast_symlink(inode)); |
123 | target = ocfs2_fast_symlink_getlink(inode, &bh); | 116 | target = ocfs2_fast_symlink_getlink(inode, &bh); |
124 | if (IS_ERR(target)) { | 117 | if (IS_ERR(target)) { |
@@ -142,7 +135,8 @@ bail: | |||
142 | nd_set_link(nd, status ? ERR_PTR(status) : link); | 135 | nd_set_link(nd, status ? ERR_PTR(status) : link); |
143 | brelse(bh); | 136 | brelse(bh); |
144 | 137 | ||
145 | mlog_exit(status); | 138 | if (status) |
139 | mlog_errno(status); | ||
146 | return NULL; | 140 | return NULL; |
147 | } | 141 | } |
148 | 142 | ||
diff --git a/fs/ocfs2/sysfile.c b/fs/ocfs2/sysfile.c index 902efb23b6a6..3d635f4bbb20 100644 --- a/fs/ocfs2/sysfile.c +++ b/fs/ocfs2/sysfile.c | |||
@@ -27,7 +27,6 @@ | |||
27 | #include <linux/types.h> | 27 | #include <linux/types.h> |
28 | #include <linux/highmem.h> | 28 | #include <linux/highmem.h> |
29 | 29 | ||
30 | #define MLOG_MASK_PREFIX ML_INODE | ||
31 | #include <cluster/masklog.h> | 30 | #include <cluster/masklog.h> |
32 | 31 | ||
33 | #include "ocfs2.h" | 32 | #include "ocfs2.h" |
diff --git a/fs/ocfs2/uptodate.c b/fs/ocfs2/uptodate.c index a0a120e82b97..52eaf33d346f 100644 --- a/fs/ocfs2/uptodate.c +++ b/fs/ocfs2/uptodate.c | |||
@@ -54,14 +54,13 @@ | |||
54 | #include <linux/buffer_head.h> | 54 | #include <linux/buffer_head.h> |
55 | #include <linux/rbtree.h> | 55 | #include <linux/rbtree.h> |
56 | 56 | ||
57 | #define MLOG_MASK_PREFIX ML_UPTODATE | ||
58 | |||
59 | #include <cluster/masklog.h> | 57 | #include <cluster/masklog.h> |
60 | 58 | ||
61 | #include "ocfs2.h" | 59 | #include "ocfs2.h" |
62 | 60 | ||
63 | #include "inode.h" | 61 | #include "inode.h" |
64 | #include "uptodate.h" | 62 | #include "uptodate.h" |
63 | #include "ocfs2_trace.h" | ||
65 | 64 | ||
66 | struct ocfs2_meta_cache_item { | 65 | struct ocfs2_meta_cache_item { |
67 | struct rb_node c_node; | 66 | struct rb_node c_node; |
@@ -152,8 +151,8 @@ static unsigned int ocfs2_purge_copied_metadata_tree(struct rb_root *root) | |||
152 | while ((node = rb_last(root)) != NULL) { | 151 | while ((node = rb_last(root)) != NULL) { |
153 | item = rb_entry(node, struct ocfs2_meta_cache_item, c_node); | 152 | item = rb_entry(node, struct ocfs2_meta_cache_item, c_node); |
154 | 153 | ||
155 | mlog(0, "Purge item %llu\n", | 154 | trace_ocfs2_purge_copied_metadata_tree( |
156 | (unsigned long long) item->c_block); | 155 | (unsigned long long) item->c_block); |
157 | 156 | ||
158 | rb_erase(&item->c_node, root); | 157 | rb_erase(&item->c_node, root); |
159 | kmem_cache_free(ocfs2_uptodate_cachep, item); | 158 | kmem_cache_free(ocfs2_uptodate_cachep, item); |
@@ -180,9 +179,9 @@ void ocfs2_metadata_cache_purge(struct ocfs2_caching_info *ci) | |||
180 | tree = !(ci->ci_flags & OCFS2_CACHE_FL_INLINE); | 179 | tree = !(ci->ci_flags & OCFS2_CACHE_FL_INLINE); |
181 | to_purge = ci->ci_num_cached; | 180 | to_purge = ci->ci_num_cached; |
182 | 181 | ||
183 | mlog(0, "Purge %u %s items from Owner %llu\n", to_purge, | 182 | trace_ocfs2_metadata_cache_purge( |
184 | tree ? "array" : "tree", | 183 | (unsigned long long)ocfs2_metadata_cache_owner(ci), |
185 | (unsigned long long)ocfs2_metadata_cache_owner(ci)); | 184 | to_purge, tree); |
186 | 185 | ||
187 | /* If we're a tree, save off the root so that we can safely | 186 | /* If we're a tree, save off the root so that we can safely |
188 | * initialize the cache. We do the work to free tree members | 187 | * initialize the cache. We do the work to free tree members |
@@ -249,10 +248,10 @@ static int ocfs2_buffer_cached(struct ocfs2_caching_info *ci, | |||
249 | 248 | ||
250 | ocfs2_metadata_cache_lock(ci); | 249 | ocfs2_metadata_cache_lock(ci); |
251 | 250 | ||
252 | mlog(0, "Owner %llu, query block %llu (inline = %u)\n", | 251 | trace_ocfs2_buffer_cached_begin( |
253 | (unsigned long long)ocfs2_metadata_cache_owner(ci), | 252 | (unsigned long long)ocfs2_metadata_cache_owner(ci), |
254 | (unsigned long long) bh->b_blocknr, | 253 | (unsigned long long) bh->b_blocknr, |
255 | !!(ci->ci_flags & OCFS2_CACHE_FL_INLINE)); | 254 | !!(ci->ci_flags & OCFS2_CACHE_FL_INLINE)); |
256 | 255 | ||
257 | if (ci->ci_flags & OCFS2_CACHE_FL_INLINE) | 256 | if (ci->ci_flags & OCFS2_CACHE_FL_INLINE) |
258 | index = ocfs2_search_cache_array(ci, bh->b_blocknr); | 257 | index = ocfs2_search_cache_array(ci, bh->b_blocknr); |
@@ -261,7 +260,7 @@ static int ocfs2_buffer_cached(struct ocfs2_caching_info *ci, | |||
261 | 260 | ||
262 | ocfs2_metadata_cache_unlock(ci); | 261 | ocfs2_metadata_cache_unlock(ci); |
263 | 262 | ||
264 | mlog(0, "index = %d, item = %p\n", index, item); | 263 | trace_ocfs2_buffer_cached_end(index, item); |
265 | 264 | ||
266 | return (index != -1) || (item != NULL); | 265 | return (index != -1) || (item != NULL); |
267 | } | 266 | } |
@@ -306,8 +305,9 @@ static void ocfs2_append_cache_array(struct ocfs2_caching_info *ci, | |||
306 | { | 305 | { |
307 | BUG_ON(ci->ci_num_cached >= OCFS2_CACHE_INFO_MAX_ARRAY); | 306 | BUG_ON(ci->ci_num_cached >= OCFS2_CACHE_INFO_MAX_ARRAY); |
308 | 307 | ||
309 | mlog(0, "block %llu takes position %u\n", (unsigned long long) block, | 308 | trace_ocfs2_append_cache_array( |
310 | ci->ci_num_cached); | 309 | (unsigned long long)ocfs2_metadata_cache_owner(ci), |
310 | (unsigned long long)block, ci->ci_num_cached); | ||
311 | 311 | ||
312 | ci->ci_cache.ci_array[ci->ci_num_cached] = block; | 312 | ci->ci_cache.ci_array[ci->ci_num_cached] = block; |
313 | ci->ci_num_cached++; | 313 | ci->ci_num_cached++; |
@@ -324,8 +324,9 @@ static void __ocfs2_insert_cache_tree(struct ocfs2_caching_info *ci, | |||
324 | struct rb_node **p = &ci->ci_cache.ci_tree.rb_node; | 324 | struct rb_node **p = &ci->ci_cache.ci_tree.rb_node; |
325 | struct ocfs2_meta_cache_item *tmp; | 325 | struct ocfs2_meta_cache_item *tmp; |
326 | 326 | ||
327 | mlog(0, "Insert block %llu num = %u\n", (unsigned long long) block, | 327 | trace_ocfs2_insert_cache_tree( |
328 | ci->ci_num_cached); | 328 | (unsigned long long)ocfs2_metadata_cache_owner(ci), |
329 | (unsigned long long)block, ci->ci_num_cached); | ||
329 | 330 | ||
330 | while(*p) { | 331 | while(*p) { |
331 | parent = *p; | 332 | parent = *p; |
@@ -389,9 +390,9 @@ static void ocfs2_expand_cache(struct ocfs2_caching_info *ci, | |||
389 | tree[i] = NULL; | 390 | tree[i] = NULL; |
390 | } | 391 | } |
391 | 392 | ||
392 | mlog(0, "Expanded %llu to a tree cache: flags 0x%x, num = %u\n", | 393 | trace_ocfs2_expand_cache( |
393 | (unsigned long long)ocfs2_metadata_cache_owner(ci), | 394 | (unsigned long long)ocfs2_metadata_cache_owner(ci), |
394 | ci->ci_flags, ci->ci_num_cached); | 395 | ci->ci_flags, ci->ci_num_cached); |
395 | } | 396 | } |
396 | 397 | ||
397 | /* Slow path function - memory allocation is necessary. See the | 398 | /* Slow path function - memory allocation is necessary. See the |
@@ -405,9 +406,9 @@ static void __ocfs2_set_buffer_uptodate(struct ocfs2_caching_info *ci, | |||
405 | struct ocfs2_meta_cache_item *tree[OCFS2_CACHE_INFO_MAX_ARRAY] = | 406 | struct ocfs2_meta_cache_item *tree[OCFS2_CACHE_INFO_MAX_ARRAY] = |
406 | { NULL, }; | 407 | { NULL, }; |
407 | 408 | ||
408 | mlog(0, "Owner %llu, block %llu, expand = %d\n", | 409 | trace_ocfs2_set_buffer_uptodate( |
409 | (unsigned long long)ocfs2_metadata_cache_owner(ci), | 410 | (unsigned long long)ocfs2_metadata_cache_owner(ci), |
410 | (unsigned long long)block, expand_tree); | 411 | (unsigned long long)block, expand_tree); |
411 | 412 | ||
412 | new = kmem_cache_alloc(ocfs2_uptodate_cachep, GFP_NOFS); | 413 | new = kmem_cache_alloc(ocfs2_uptodate_cachep, GFP_NOFS); |
413 | if (!new) { | 414 | if (!new) { |
@@ -433,7 +434,6 @@ static void __ocfs2_set_buffer_uptodate(struct ocfs2_caching_info *ci, | |||
433 | 434 | ||
434 | ocfs2_metadata_cache_lock(ci); | 435 | ocfs2_metadata_cache_lock(ci); |
435 | if (ocfs2_insert_can_use_array(ci)) { | 436 | if (ocfs2_insert_can_use_array(ci)) { |
436 | mlog(0, "Someone cleared the tree underneath us\n"); | ||
437 | /* Ok, items were removed from the cache in between | 437 | /* Ok, items were removed from the cache in between |
438 | * locks. Detect this and revert back to the fast path */ | 438 | * locks. Detect this and revert back to the fast path */ |
439 | ocfs2_append_cache_array(ci, block); | 439 | ocfs2_append_cache_array(ci, block); |
@@ -490,9 +490,9 @@ void ocfs2_set_buffer_uptodate(struct ocfs2_caching_info *ci, | |||
490 | if (ocfs2_buffer_cached(ci, bh)) | 490 | if (ocfs2_buffer_cached(ci, bh)) |
491 | return; | 491 | return; |
492 | 492 | ||
493 | mlog(0, "Owner %llu, inserting block %llu\n", | 493 | trace_ocfs2_set_buffer_uptodate_begin( |
494 | (unsigned long long)ocfs2_metadata_cache_owner(ci), | 494 | (unsigned long long)ocfs2_metadata_cache_owner(ci), |
495 | (unsigned long long)bh->b_blocknr); | 495 | (unsigned long long)bh->b_blocknr); |
496 | 496 | ||
497 | /* No need to recheck under spinlock - insertion is guarded by | 497 | /* No need to recheck under spinlock - insertion is guarded by |
498 | * co_io_lock() */ | 498 | * co_io_lock() */ |
@@ -542,8 +542,9 @@ static void ocfs2_remove_metadata_array(struct ocfs2_caching_info *ci, | |||
542 | BUG_ON(index >= ci->ci_num_cached); | 542 | BUG_ON(index >= ci->ci_num_cached); |
543 | BUG_ON(!ci->ci_num_cached); | 543 | BUG_ON(!ci->ci_num_cached); |
544 | 544 | ||
545 | mlog(0, "remove index %d (num_cached = %u\n", index, | 545 | trace_ocfs2_remove_metadata_array( |
546 | ci->ci_num_cached); | 546 | (unsigned long long)ocfs2_metadata_cache_owner(ci), |
547 | index, ci->ci_num_cached); | ||
547 | 548 | ||
548 | ci->ci_num_cached--; | 549 | ci->ci_num_cached--; |
549 | 550 | ||
@@ -559,8 +560,9 @@ static void ocfs2_remove_metadata_array(struct ocfs2_caching_info *ci, | |||
559 | static void ocfs2_remove_metadata_tree(struct ocfs2_caching_info *ci, | 560 | static void ocfs2_remove_metadata_tree(struct ocfs2_caching_info *ci, |
560 | struct ocfs2_meta_cache_item *item) | 561 | struct ocfs2_meta_cache_item *item) |
561 | { | 562 | { |
562 | mlog(0, "remove block %llu from tree\n", | 563 | trace_ocfs2_remove_metadata_tree( |
563 | (unsigned long long) item->c_block); | 564 | (unsigned long long)ocfs2_metadata_cache_owner(ci), |
565 | (unsigned long long)item->c_block); | ||
564 | 566 | ||
565 | rb_erase(&item->c_node, &ci->ci_cache.ci_tree); | 567 | rb_erase(&item->c_node, &ci->ci_cache.ci_tree); |
566 | ci->ci_num_cached--; | 568 | ci->ci_num_cached--; |
@@ -573,10 +575,10 @@ static void ocfs2_remove_block_from_cache(struct ocfs2_caching_info *ci, | |||
573 | struct ocfs2_meta_cache_item *item = NULL; | 575 | struct ocfs2_meta_cache_item *item = NULL; |
574 | 576 | ||
575 | ocfs2_metadata_cache_lock(ci); | 577 | ocfs2_metadata_cache_lock(ci); |
576 | mlog(0, "Owner %llu, remove %llu, items = %u, array = %u\n", | 578 | trace_ocfs2_remove_block_from_cache( |
577 | (unsigned long long)ocfs2_metadata_cache_owner(ci), | 579 | (unsigned long long)ocfs2_metadata_cache_owner(ci), |
578 | (unsigned long long) block, ci->ci_num_cached, | 580 | (unsigned long long) block, ci->ci_num_cached, |
579 | ci->ci_flags & OCFS2_CACHE_FL_INLINE); | 581 | ci->ci_flags); |
580 | 582 | ||
581 | if (ci->ci_flags & OCFS2_CACHE_FL_INLINE) { | 583 | if (ci->ci_flags & OCFS2_CACHE_FL_INLINE) { |
582 | index = ocfs2_search_cache_array(ci, block); | 584 | index = ocfs2_search_cache_array(ci, block); |
@@ -626,9 +628,6 @@ int __init init_ocfs2_uptodate_cache(void) | |||
626 | if (!ocfs2_uptodate_cachep) | 628 | if (!ocfs2_uptodate_cachep) |
627 | return -ENOMEM; | 629 | return -ENOMEM; |
628 | 630 | ||
629 | mlog(0, "%u inlined cache items per inode.\n", | ||
630 | OCFS2_CACHE_INFO_MAX_ARRAY); | ||
631 | |||
632 | return 0; | 631 | return 0; |
633 | } | 632 | } |
634 | 633 | ||
diff --git a/fs/ocfs2/xattr.c b/fs/ocfs2/xattr.c index 6bb602486c6b..57a215dc2d9b 100644 --- a/fs/ocfs2/xattr.c +++ b/fs/ocfs2/xattr.c | |||
@@ -37,7 +37,6 @@ | |||
37 | #include <linux/string.h> | 37 | #include <linux/string.h> |
38 | #include <linux/security.h> | 38 | #include <linux/security.h> |
39 | 39 | ||
40 | #define MLOG_MASK_PREFIX ML_XATTR | ||
41 | #include <cluster/masklog.h> | 40 | #include <cluster/masklog.h> |
42 | 41 | ||
43 | #include "ocfs2.h" | 42 | #include "ocfs2.h" |
@@ -57,6 +56,7 @@ | |||
57 | #include "xattr.h" | 56 | #include "xattr.h" |
58 | #include "refcounttree.h" | 57 | #include "refcounttree.h" |
59 | #include "acl.h" | 58 | #include "acl.h" |
59 | #include "ocfs2_trace.h" | ||
60 | 60 | ||
61 | struct ocfs2_xattr_def_value_root { | 61 | struct ocfs2_xattr_def_value_root { |
62 | struct ocfs2_xattr_value_root xv; | 62 | struct ocfs2_xattr_value_root xv; |
@@ -474,8 +474,7 @@ static int ocfs2_validate_xattr_block(struct super_block *sb, | |||
474 | struct ocfs2_xattr_block *xb = | 474 | struct ocfs2_xattr_block *xb = |
475 | (struct ocfs2_xattr_block *)bh->b_data; | 475 | (struct ocfs2_xattr_block *)bh->b_data; |
476 | 476 | ||
477 | mlog(0, "Validating xattr block %llu\n", | 477 | trace_ocfs2_validate_xattr_block((unsigned long long)bh->b_blocknr); |
478 | (unsigned long long)bh->b_blocknr); | ||
479 | 478 | ||
480 | BUG_ON(!buffer_uptodate(bh)); | 479 | BUG_ON(!buffer_uptodate(bh)); |
481 | 480 | ||
@@ -715,11 +714,11 @@ static int ocfs2_xattr_extend_allocation(struct inode *inode, | |||
715 | u32 prev_clusters, logical_start = le32_to_cpu(vb->vb_xv->xr_clusters); | 714 | u32 prev_clusters, logical_start = le32_to_cpu(vb->vb_xv->xr_clusters); |
716 | struct ocfs2_extent_tree et; | 715 | struct ocfs2_extent_tree et; |
717 | 716 | ||
718 | mlog(0, "(clusters_to_add for xattr= %u)\n", clusters_to_add); | ||
719 | |||
720 | ocfs2_init_xattr_value_extent_tree(&et, INODE_CACHE(inode), vb); | 717 | ocfs2_init_xattr_value_extent_tree(&et, INODE_CACHE(inode), vb); |
721 | 718 | ||
722 | while (clusters_to_add) { | 719 | while (clusters_to_add) { |
720 | trace_ocfs2_xattr_extend_allocation(clusters_to_add); | ||
721 | |||
723 | status = vb->vb_access(handle, INODE_CACHE(inode), vb->vb_bh, | 722 | status = vb->vb_access(handle, INODE_CACHE(inode), vb->vb_bh, |
724 | OCFS2_JOURNAL_ACCESS_WRITE); | 723 | OCFS2_JOURNAL_ACCESS_WRITE); |
725 | if (status < 0) { | 724 | if (status < 0) { |
@@ -754,8 +753,6 @@ static int ocfs2_xattr_extend_allocation(struct inode *inode, | |||
754 | */ | 753 | */ |
755 | BUG_ON(why == RESTART_META); | 754 | BUG_ON(why == RESTART_META); |
756 | 755 | ||
757 | mlog(0, "restarting xattr value extension for %u" | ||
758 | " clusters,.\n", clusters_to_add); | ||
759 | credits = ocfs2_calc_extend_credits(inode->i_sb, | 756 | credits = ocfs2_calc_extend_credits(inode->i_sb, |
760 | &vb->vb_xv->xr_list, | 757 | &vb->vb_xv->xr_list, |
761 | clusters_to_add); | 758 | clusters_to_add); |
@@ -3246,8 +3243,8 @@ static int ocfs2_init_xattr_set_ctxt(struct inode *inode, | |||
3246 | } | 3243 | } |
3247 | 3244 | ||
3248 | meta_add += extra_meta; | 3245 | meta_add += extra_meta; |
3249 | mlog(0, "Set xattr %s, reserve meta blocks = %d, clusters = %d, " | 3246 | trace_ocfs2_init_xattr_set_ctxt(xi->xi_name, meta_add, |
3250 | "credits = %d\n", xi->xi_name, meta_add, clusters_add, *credits); | 3247 | clusters_add, *credits); |
3251 | 3248 | ||
3252 | if (meta_add) { | 3249 | if (meta_add) { |
3253 | ret = ocfs2_reserve_new_metadata_blocks(osb, meta_add, | 3250 | ret = ocfs2_reserve_new_metadata_blocks(osb, meta_add, |
@@ -3887,8 +3884,10 @@ static int ocfs2_xattr_bucket_find(struct inode *inode, | |||
3887 | 3884 | ||
3888 | if (found) { | 3885 | if (found) { |
3889 | xs->here = &xs->header->xh_entries[index]; | 3886 | xs->here = &xs->header->xh_entries[index]; |
3890 | mlog(0, "find xattr %s in bucket %llu, entry = %u\n", name, | 3887 | trace_ocfs2_xattr_bucket_find(OCFS2_I(inode)->ip_blkno, |
3891 | (unsigned long long)bucket_blkno(xs->bucket), index); | 3888 | name, name_index, name_hash, |
3889 | (unsigned long long)bucket_blkno(xs->bucket), | ||
3890 | index); | ||
3892 | } else | 3891 | } else |
3893 | ret = -ENODATA; | 3892 | ret = -ENODATA; |
3894 | 3893 | ||
@@ -3915,8 +3914,10 @@ static int ocfs2_xattr_index_block_find(struct inode *inode, | |||
3915 | if (le16_to_cpu(el->l_next_free_rec) == 0) | 3914 | if (le16_to_cpu(el->l_next_free_rec) == 0) |
3916 | return -ENODATA; | 3915 | return -ENODATA; |
3917 | 3916 | ||
3918 | mlog(0, "find xattr %s, hash = %u, index = %d in xattr tree\n", | 3917 | trace_ocfs2_xattr_index_block_find(OCFS2_I(inode)->ip_blkno, |
3919 | name, name_hash, name_index); | 3918 | name, name_index, name_hash, |
3919 | (unsigned long long)root_bh->b_blocknr, | ||
3920 | -1); | ||
3920 | 3921 | ||
3921 | ret = ocfs2_xattr_get_rec(inode, name_hash, &p_blkno, &first_hash, | 3922 | ret = ocfs2_xattr_get_rec(inode, name_hash, &p_blkno, &first_hash, |
3922 | &num_clusters, el); | 3923 | &num_clusters, el); |
@@ -3927,9 +3928,10 @@ static int ocfs2_xattr_index_block_find(struct inode *inode, | |||
3927 | 3928 | ||
3928 | BUG_ON(p_blkno == 0 || num_clusters == 0 || first_hash > name_hash); | 3929 | BUG_ON(p_blkno == 0 || num_clusters == 0 || first_hash > name_hash); |
3929 | 3930 | ||
3930 | mlog(0, "find xattr extent rec %u clusters from %llu, the first hash " | 3931 | trace_ocfs2_xattr_index_block_find_rec(OCFS2_I(inode)->ip_blkno, |
3931 | "in the rec is %u\n", num_clusters, (unsigned long long)p_blkno, | 3932 | name, name_index, first_hash, |
3932 | first_hash); | 3933 | (unsigned long long)p_blkno, |
3934 | num_clusters); | ||
3933 | 3935 | ||
3934 | ret = ocfs2_xattr_bucket_find(inode, name_index, name, name_hash, | 3936 | ret = ocfs2_xattr_bucket_find(inode, name_index, name, name_hash, |
3935 | p_blkno, first_hash, num_clusters, xs); | 3937 | p_blkno, first_hash, num_clusters, xs); |
@@ -3955,8 +3957,9 @@ static int ocfs2_iterate_xattr_buckets(struct inode *inode, | |||
3955 | return -ENOMEM; | 3957 | return -ENOMEM; |
3956 | } | 3958 | } |
3957 | 3959 | ||
3958 | mlog(0, "iterating xattr buckets in %u clusters starting from %llu\n", | 3960 | trace_ocfs2_iterate_xattr_buckets( |
3959 | clusters, (unsigned long long)blkno); | 3961 | (unsigned long long)OCFS2_I(inode)->ip_blkno, |
3962 | (unsigned long long)blkno, clusters); | ||
3960 | 3963 | ||
3961 | for (i = 0; i < num_buckets; i++, blkno += bucket->bu_blocks) { | 3964 | for (i = 0; i < num_buckets; i++, blkno += bucket->bu_blocks) { |
3962 | ret = ocfs2_read_xattr_bucket(bucket, blkno); | 3965 | ret = ocfs2_read_xattr_bucket(bucket, blkno); |
@@ -3972,8 +3975,7 @@ static int ocfs2_iterate_xattr_buckets(struct inode *inode, | |||
3972 | if (i == 0) | 3975 | if (i == 0) |
3973 | num_buckets = le16_to_cpu(bucket_xh(bucket)->xh_num_buckets); | 3976 | num_buckets = le16_to_cpu(bucket_xh(bucket)->xh_num_buckets); |
3974 | 3977 | ||
3975 | mlog(0, "iterating xattr bucket %llu, first hash %u\n", | 3978 | trace_ocfs2_iterate_xattr_bucket((unsigned long long)blkno, |
3976 | (unsigned long long)blkno, | ||
3977 | le32_to_cpu(bucket_xh(bucket)->xh_entries[0].xe_name_hash)); | 3979 | le32_to_cpu(bucket_xh(bucket)->xh_entries[0].xe_name_hash)); |
3978 | if (func) { | 3980 | if (func) { |
3979 | ret = func(inode, bucket, para); | 3981 | ret = func(inode, bucket, para); |
@@ -4173,9 +4175,9 @@ static void ocfs2_cp_xattr_block_to_bucket(struct inode *inode, | |||
4173 | char *src = xb_bh->b_data; | 4175 | char *src = xb_bh->b_data; |
4174 | char *target = bucket_block(bucket, blks - 1); | 4176 | char *target = bucket_block(bucket, blks - 1); |
4175 | 4177 | ||
4176 | mlog(0, "cp xattr from block %llu to bucket %llu\n", | 4178 | trace_ocfs2_cp_xattr_block_to_bucket_begin( |
4177 | (unsigned long long)xb_bh->b_blocknr, | 4179 | (unsigned long long)xb_bh->b_blocknr, |
4178 | (unsigned long long)bucket_blkno(bucket)); | 4180 | (unsigned long long)bucket_blkno(bucket)); |
4179 | 4181 | ||
4180 | for (i = 0; i < blks; i++) | 4182 | for (i = 0; i < blks; i++) |
4181 | memset(bucket_block(bucket, i), 0, blocksize); | 4183 | memset(bucket_block(bucket, i), 0, blocksize); |
@@ -4211,8 +4213,7 @@ static void ocfs2_cp_xattr_block_to_bucket(struct inode *inode, | |||
4211 | for (i = 0; i < count; i++) | 4213 | for (i = 0; i < count; i++) |
4212 | le16_add_cpu(&xh->xh_entries[i].xe_name_offset, off_change); | 4214 | le16_add_cpu(&xh->xh_entries[i].xe_name_offset, off_change); |
4213 | 4215 | ||
4214 | mlog(0, "copy entry: start = %u, size = %u, offset_change = %u\n", | 4216 | trace_ocfs2_cp_xattr_block_to_bucket_end(offset, size, off_change); |
4215 | offset, size, off_change); | ||
4216 | 4217 | ||
4217 | sort(target + offset, count, sizeof(struct ocfs2_xattr_entry), | 4218 | sort(target + offset, count, sizeof(struct ocfs2_xattr_entry), |
4218 | cmp_xe, swap_xe); | 4219 | cmp_xe, swap_xe); |
@@ -4261,8 +4262,8 @@ static int ocfs2_xattr_create_index_block(struct inode *inode, | |||
4261 | struct ocfs2_xattr_tree_root *xr; | 4262 | struct ocfs2_xattr_tree_root *xr; |
4262 | u16 xb_flags = le16_to_cpu(xb->xb_flags); | 4263 | u16 xb_flags = le16_to_cpu(xb->xb_flags); |
4263 | 4264 | ||
4264 | mlog(0, "create xattr index block for %llu\n", | 4265 | trace_ocfs2_xattr_create_index_block_begin( |
4265 | (unsigned long long)xb_bh->b_blocknr); | 4266 | (unsigned long long)xb_bh->b_blocknr); |
4266 | 4267 | ||
4267 | BUG_ON(xb_flags & OCFS2_XATTR_INDEXED); | 4268 | BUG_ON(xb_flags & OCFS2_XATTR_INDEXED); |
4268 | BUG_ON(!xs->bucket); | 4269 | BUG_ON(!xs->bucket); |
@@ -4295,8 +4296,7 @@ static int ocfs2_xattr_create_index_block(struct inode *inode, | |||
4295 | */ | 4296 | */ |
4296 | blkno = ocfs2_clusters_to_blocks(inode->i_sb, bit_off); | 4297 | blkno = ocfs2_clusters_to_blocks(inode->i_sb, bit_off); |
4297 | 4298 | ||
4298 | mlog(0, "allocate 1 cluster from %llu to xattr block\n", | 4299 | trace_ocfs2_xattr_create_index_block((unsigned long long)blkno); |
4299 | (unsigned long long)blkno); | ||
4300 | 4300 | ||
4301 | ret = ocfs2_init_xattr_bucket(xs->bucket, blkno); | 4301 | ret = ocfs2_init_xattr_bucket(xs->bucket, blkno); |
4302 | if (ret) { | 4302 | if (ret) { |
@@ -4400,8 +4400,7 @@ static int ocfs2_defrag_xattr_bucket(struct inode *inode, | |||
4400 | entries = (char *)xh->xh_entries; | 4400 | entries = (char *)xh->xh_entries; |
4401 | xh_free_start = le16_to_cpu(xh->xh_free_start); | 4401 | xh_free_start = le16_to_cpu(xh->xh_free_start); |
4402 | 4402 | ||
4403 | mlog(0, "adjust xattr bucket in %llu, count = %u, " | 4403 | trace_ocfs2_defrag_xattr_bucket( |
4404 | "xh_free_start = %u, xh_name_value_len = %u.\n", | ||
4405 | (unsigned long long)blkno, le16_to_cpu(xh->xh_count), | 4404 | (unsigned long long)blkno, le16_to_cpu(xh->xh_count), |
4406 | xh_free_start, le16_to_cpu(xh->xh_name_value_len)); | 4405 | xh_free_start, le16_to_cpu(xh->xh_name_value_len)); |
4407 | 4406 | ||
@@ -4503,8 +4502,9 @@ static int ocfs2_mv_xattr_bucket_cross_cluster(struct inode *inode, | |||
4503 | BUG_ON(le16_to_cpu(bucket_xh(first)->xh_num_buckets) < num_buckets); | 4502 | BUG_ON(le16_to_cpu(bucket_xh(first)->xh_num_buckets) < num_buckets); |
4504 | BUG_ON(OCFS2_XATTR_BUCKET_SIZE == OCFS2_SB(sb)->s_clustersize); | 4503 | BUG_ON(OCFS2_XATTR_BUCKET_SIZE == OCFS2_SB(sb)->s_clustersize); |
4505 | 4504 | ||
4506 | mlog(0, "move half of xattrs in cluster %llu to %llu\n", | 4505 | trace_ocfs2_mv_xattr_bucket_cross_cluster( |
4507 | (unsigned long long)last_cluster_blkno, (unsigned long long)new_blkno); | 4506 | (unsigned long long)last_cluster_blkno, |
4507 | (unsigned long long)new_blkno); | ||
4508 | 4508 | ||
4509 | ret = ocfs2_mv_xattr_buckets(inode, handle, bucket_blkno(first), | 4509 | ret = ocfs2_mv_xattr_buckets(inode, handle, bucket_blkno(first), |
4510 | last_cluster_blkno, new_blkno, | 4510 | last_cluster_blkno, new_blkno, |
@@ -4614,8 +4614,8 @@ static int ocfs2_divide_xattr_bucket(struct inode *inode, | |||
4614 | struct ocfs2_xattr_entry *xe; | 4614 | struct ocfs2_xattr_entry *xe; |
4615 | int blocksize = inode->i_sb->s_blocksize; | 4615 | int blocksize = inode->i_sb->s_blocksize; |
4616 | 4616 | ||
4617 | mlog(0, "move some of xattrs from bucket %llu to %llu\n", | 4617 | trace_ocfs2_divide_xattr_bucket_begin((unsigned long long)blk, |
4618 | (unsigned long long)blk, (unsigned long long)new_blk); | 4618 | (unsigned long long)new_blk); |
4619 | 4619 | ||
4620 | s_bucket = ocfs2_xattr_bucket_new(inode); | 4620 | s_bucket = ocfs2_xattr_bucket_new(inode); |
4621 | t_bucket = ocfs2_xattr_bucket_new(inode); | 4621 | t_bucket = ocfs2_xattr_bucket_new(inode); |
@@ -4714,9 +4714,9 @@ static int ocfs2_divide_xattr_bucket(struct inode *inode, | |||
4714 | */ | 4714 | */ |
4715 | xe = &xh->xh_entries[start]; | 4715 | xe = &xh->xh_entries[start]; |
4716 | len = sizeof(struct ocfs2_xattr_entry) * (count - start); | 4716 | len = sizeof(struct ocfs2_xattr_entry) * (count - start); |
4717 | mlog(0, "mv xattr entry len %d from %d to %d\n", len, | 4717 | trace_ocfs2_divide_xattr_bucket_move(len, |
4718 | (int)((char *)xe - (char *)xh), | 4718 | (int)((char *)xe - (char *)xh), |
4719 | (int)((char *)xh->xh_entries - (char *)xh)); | 4719 | (int)((char *)xh->xh_entries - (char *)xh)); |
4720 | memmove((char *)xh->xh_entries, (char *)xe, len); | 4720 | memmove((char *)xh->xh_entries, (char *)xe, len); |
4721 | xe = &xh->xh_entries[count - start]; | 4721 | xe = &xh->xh_entries[count - start]; |
4722 | len = sizeof(struct ocfs2_xattr_entry) * start; | 4722 | len = sizeof(struct ocfs2_xattr_entry) * start; |
@@ -4788,9 +4788,9 @@ static int ocfs2_cp_xattr_bucket(struct inode *inode, | |||
4788 | 4788 | ||
4789 | BUG_ON(s_blkno == t_blkno); | 4789 | BUG_ON(s_blkno == t_blkno); |
4790 | 4790 | ||
4791 | mlog(0, "cp bucket %llu to %llu, target is %d\n", | 4791 | trace_ocfs2_cp_xattr_bucket((unsigned long long)s_blkno, |
4792 | (unsigned long long)s_blkno, (unsigned long long)t_blkno, | 4792 | (unsigned long long)t_blkno, |
4793 | t_is_new); | 4793 | t_is_new); |
4794 | 4794 | ||
4795 | s_bucket = ocfs2_xattr_bucket_new(inode); | 4795 | s_bucket = ocfs2_xattr_bucket_new(inode); |
4796 | t_bucket = ocfs2_xattr_bucket_new(inode); | 4796 | t_bucket = ocfs2_xattr_bucket_new(inode); |
@@ -4862,8 +4862,8 @@ static int ocfs2_mv_xattr_buckets(struct inode *inode, handle_t *handle, | |||
4862 | int num_buckets = ocfs2_xattr_buckets_per_cluster(osb); | 4862 | int num_buckets = ocfs2_xattr_buckets_per_cluster(osb); |
4863 | struct ocfs2_xattr_bucket *old_first, *new_first; | 4863 | struct ocfs2_xattr_bucket *old_first, *new_first; |
4864 | 4864 | ||
4865 | mlog(0, "mv xattrs from cluster %llu to %llu\n", | 4865 | trace_ocfs2_mv_xattr_buckets((unsigned long long)last_blk, |
4866 | (unsigned long long)last_blk, (unsigned long long)to_blk); | 4866 | (unsigned long long)to_blk); |
4867 | 4867 | ||
4868 | BUG_ON(start_bucket >= num_buckets); | 4868 | BUG_ON(start_bucket >= num_buckets); |
4869 | if (start_bucket) { | 4869 | if (start_bucket) { |
@@ -5013,9 +5013,9 @@ static int ocfs2_adjust_xattr_cross_cluster(struct inode *inode, | |||
5013 | { | 5013 | { |
5014 | int ret; | 5014 | int ret; |
5015 | 5015 | ||
5016 | mlog(0, "adjust xattrs from cluster %llu len %u to %llu\n", | 5016 | trace_ocfs2_adjust_xattr_cross_cluster( |
5017 | (unsigned long long)bucket_blkno(first), prev_clusters, | 5017 | (unsigned long long)bucket_blkno(first), |
5018 | (unsigned long long)new_blk); | 5018 | (unsigned long long)new_blk, prev_clusters); |
5019 | 5019 | ||
5020 | if (ocfs2_xattr_buckets_per_cluster(OCFS2_SB(inode->i_sb)) > 1) { | 5020 | if (ocfs2_xattr_buckets_per_cluster(OCFS2_SB(inode->i_sb)) > 1) { |
5021 | ret = ocfs2_mv_xattr_bucket_cross_cluster(inode, | 5021 | ret = ocfs2_mv_xattr_bucket_cross_cluster(inode, |
@@ -5088,10 +5088,10 @@ static int ocfs2_add_new_xattr_cluster(struct inode *inode, | |||
5088 | struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); | 5088 | struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); |
5089 | struct ocfs2_extent_tree et; | 5089 | struct ocfs2_extent_tree et; |
5090 | 5090 | ||
5091 | mlog(0, "Add new xattr cluster for %llu, previous xattr hash = %u, " | 5091 | trace_ocfs2_add_new_xattr_cluster_begin( |
5092 | "previous xattr blkno = %llu\n", | 5092 | (unsigned long long)OCFS2_I(inode)->ip_blkno, |
5093 | (unsigned long long)OCFS2_I(inode)->ip_blkno, | 5093 | (unsigned long long)bucket_blkno(first), |
5094 | prev_cpos, (unsigned long long)bucket_blkno(first)); | 5094 | prev_cpos, prev_clusters); |
5095 | 5095 | ||
5096 | ocfs2_init_xattr_tree_extent_tree(&et, INODE_CACHE(inode), root_bh); | 5096 | ocfs2_init_xattr_tree_extent_tree(&et, INODE_CACHE(inode), root_bh); |
5097 | 5097 | ||
@@ -5113,8 +5113,7 @@ static int ocfs2_add_new_xattr_cluster(struct inode *inode, | |||
5113 | BUG_ON(num_bits > clusters_to_add); | 5113 | BUG_ON(num_bits > clusters_to_add); |
5114 | 5114 | ||
5115 | block = ocfs2_clusters_to_blocks(osb->sb, bit_off); | 5115 | block = ocfs2_clusters_to_blocks(osb->sb, bit_off); |
5116 | mlog(0, "Allocating %u clusters at block %u for xattr in inode %llu\n", | 5116 | trace_ocfs2_add_new_xattr_cluster((unsigned long long)block, num_bits); |
5117 | num_bits, bit_off, (unsigned long long)OCFS2_I(inode)->ip_blkno); | ||
5118 | 5117 | ||
5119 | if (bucket_blkno(first) + (prev_clusters * bpc) == block && | 5118 | if (bucket_blkno(first) + (prev_clusters * bpc) == block && |
5120 | (prev_clusters + num_bits) << osb->s_clustersize_bits <= | 5119 | (prev_clusters + num_bits) << osb->s_clustersize_bits <= |
@@ -5130,8 +5129,6 @@ static int ocfs2_add_new_xattr_cluster(struct inode *inode, | |||
5130 | */ | 5129 | */ |
5131 | v_start = prev_cpos + prev_clusters; | 5130 | v_start = prev_cpos + prev_clusters; |
5132 | *num_clusters = prev_clusters + num_bits; | 5131 | *num_clusters = prev_clusters + num_bits; |
5133 | mlog(0, "Add contiguous %u clusters to previous extent rec.\n", | ||
5134 | num_bits); | ||
5135 | } else { | 5132 | } else { |
5136 | ret = ocfs2_adjust_xattr_cross_cluster(inode, | 5133 | ret = ocfs2_adjust_xattr_cross_cluster(inode, |
5137 | handle, | 5134 | handle, |
@@ -5147,8 +5144,8 @@ static int ocfs2_add_new_xattr_cluster(struct inode *inode, | |||
5147 | } | 5144 | } |
5148 | } | 5145 | } |
5149 | 5146 | ||
5150 | mlog(0, "Insert %u clusters at block %llu for xattr at %u\n", | 5147 | trace_ocfs2_add_new_xattr_cluster_insert((unsigned long long)block, |
5151 | num_bits, (unsigned long long)block, v_start); | 5148 | v_start, num_bits); |
5152 | ret = ocfs2_insert_extent(handle, &et, v_start, block, | 5149 | ret = ocfs2_insert_extent(handle, &et, v_start, block, |
5153 | num_bits, 0, ctxt->meta_ac); | 5150 | num_bits, 0, ctxt->meta_ac); |
5154 | if (ret < 0) { | 5151 | if (ret < 0) { |
@@ -5183,9 +5180,9 @@ static int ocfs2_extend_xattr_bucket(struct inode *inode, | |||
5183 | u64 end_blk; | 5180 | u64 end_blk; |
5184 | u16 new_bucket = le16_to_cpu(bucket_xh(first)->xh_num_buckets); | 5181 | u16 new_bucket = le16_to_cpu(bucket_xh(first)->xh_num_buckets); |
5185 | 5182 | ||
5186 | mlog(0, "extend xattr bucket in %llu, xattr extend rec starting " | 5183 | trace_ocfs2_extend_xattr_bucket((unsigned long long)target_blk, |
5187 | "from %llu, len = %u\n", (unsigned long long)target_blk, | 5184 | (unsigned long long)bucket_blkno(first), |
5188 | (unsigned long long)bucket_blkno(first), num_clusters); | 5185 | num_clusters, new_bucket); |
5189 | 5186 | ||
5190 | /* The extent must have room for an additional bucket */ | 5187 | /* The extent must have room for an additional bucket */ |
5191 | BUG_ON(new_bucket >= | 5188 | BUG_ON(new_bucket >= |
@@ -5265,8 +5262,8 @@ static int ocfs2_add_new_xattr_bucket(struct inode *inode, | |||
5265 | /* The bucket at the front of the extent */ | 5262 | /* The bucket at the front of the extent */ |
5266 | struct ocfs2_xattr_bucket *first; | 5263 | struct ocfs2_xattr_bucket *first; |
5267 | 5264 | ||
5268 | mlog(0, "Add new xattr bucket starting from %llu\n", | 5265 | trace_ocfs2_add_new_xattr_bucket( |
5269 | (unsigned long long)bucket_blkno(target)); | 5266 | (unsigned long long)bucket_blkno(target)); |
5270 | 5267 | ||
5271 | /* The first bucket of the original extent */ | 5268 | /* The first bucket of the original extent */ |
5272 | first = ocfs2_xattr_bucket_new(inode); | 5269 | first = ocfs2_xattr_bucket_new(inode); |
@@ -5382,8 +5379,8 @@ static int ocfs2_xattr_bucket_value_truncate(struct inode *inode, | |||
5382 | * modified something. We have to assume they did, and dirty | 5379 | * modified something. We have to assume they did, and dirty |
5383 | * the whole bucket. This leaves us in a consistent state. | 5380 | * the whole bucket. This leaves us in a consistent state. |
5384 | */ | 5381 | */ |
5385 | mlog(0, "truncate %u in xattr bucket %llu to %d bytes.\n", | 5382 | trace_ocfs2_xattr_bucket_value_truncate( |
5386 | xe_off, (unsigned long long)bucket_blkno(bucket), len); | 5383 | (unsigned long long)bucket_blkno(bucket), xe_off, len); |
5387 | ret = ocfs2_xattr_value_truncate(inode, &vb, len, ctxt); | 5384 | ret = ocfs2_xattr_value_truncate(inode, &vb, len, ctxt); |
5388 | if (ret) { | 5385 | if (ret) { |
5389 | mlog_errno(ret); | 5386 | mlog_errno(ret); |
@@ -5433,8 +5430,9 @@ static int ocfs2_rm_xattr_cluster(struct inode *inode, | |||
5433 | 5430 | ||
5434 | ocfs2_init_dealloc_ctxt(&dealloc); | 5431 | ocfs2_init_dealloc_ctxt(&dealloc); |
5435 | 5432 | ||
5436 | mlog(0, "rm xattr extent rec at %u len = %u, start from %llu\n", | 5433 | trace_ocfs2_rm_xattr_cluster( |
5437 | cpos, len, (unsigned long long)blkno); | 5434 | (unsigned long long)OCFS2_I(inode)->ip_blkno, |
5435 | (unsigned long long)blkno, cpos, len); | ||
5438 | 5436 | ||
5439 | ocfs2_remove_xattr_clusters_from_cache(INODE_CACHE(inode), blkno, | 5437 | ocfs2_remove_xattr_clusters_from_cache(INODE_CACHE(inode), blkno, |
5440 | len); | 5438 | len); |
@@ -5538,7 +5536,7 @@ static int ocfs2_xattr_set_entry_bucket(struct inode *inode, | |||
5538 | int ret; | 5536 | int ret; |
5539 | struct ocfs2_xa_loc loc; | 5537 | struct ocfs2_xa_loc loc; |
5540 | 5538 | ||
5541 | mlog_entry("Set xattr %s in xattr bucket\n", xi->xi_name); | 5539 | trace_ocfs2_xattr_set_entry_bucket(xi->xi_name); |
5542 | 5540 | ||
5543 | ocfs2_init_xattr_bucket_xa_loc(&loc, xs->bucket, | 5541 | ocfs2_init_xattr_bucket_xa_loc(&loc, xs->bucket, |
5544 | xs->not_found ? NULL : xs->here); | 5542 | xs->not_found ? NULL : xs->here); |
@@ -5570,7 +5568,6 @@ static int ocfs2_xattr_set_entry_bucket(struct inode *inode, | |||
5570 | 5568 | ||
5571 | 5569 | ||
5572 | out: | 5570 | out: |
5573 | mlog_exit(ret); | ||
5574 | return ret; | 5571 | return ret; |
5575 | } | 5572 | } |
5576 | 5573 | ||
@@ -5581,7 +5578,7 @@ static int ocfs2_xattr_set_entry_index_block(struct inode *inode, | |||
5581 | { | 5578 | { |
5582 | int ret; | 5579 | int ret; |
5583 | 5580 | ||
5584 | mlog_entry("Set xattr %s in xattr index block\n", xi->xi_name); | 5581 | trace_ocfs2_xattr_set_entry_index_block(xi->xi_name); |
5585 | 5582 | ||
5586 | ret = ocfs2_xattr_set_entry_bucket(inode, xi, xs, ctxt); | 5583 | ret = ocfs2_xattr_set_entry_bucket(inode, xi, xs, ctxt); |
5587 | if (!ret) | 5584 | if (!ret) |
@@ -5637,7 +5634,6 @@ static int ocfs2_xattr_set_entry_index_block(struct inode *inode, | |||
5637 | mlog_errno(ret); | 5634 | mlog_errno(ret); |
5638 | 5635 | ||
5639 | out: | 5636 | out: |
5640 | mlog_exit(ret); | ||
5641 | return ret; | 5637 | return ret; |
5642 | } | 5638 | } |
5643 | 5639 | ||
@@ -6041,9 +6037,9 @@ static int ocfs2_xattr_bucket_value_refcount(struct inode *inode, | |||
6041 | if (ocfs2_meta_ecc(OCFS2_SB(inode->i_sb))) | 6037 | if (ocfs2_meta_ecc(OCFS2_SB(inode->i_sb))) |
6042 | p = &refcount; | 6038 | p = &refcount; |
6043 | 6039 | ||
6044 | mlog(0, "refcount bucket %llu, count = %u\n", | 6040 | trace_ocfs2_xattr_bucket_value_refcount( |
6045 | (unsigned long long)bucket_blkno(bucket), | 6041 | (unsigned long long)bucket_blkno(bucket), |
6046 | le16_to_cpu(xh->xh_count)); | 6042 | le16_to_cpu(xh->xh_count)); |
6047 | for (i = 0; i < le16_to_cpu(xh->xh_count); i++) { | 6043 | for (i = 0; i < le16_to_cpu(xh->xh_count); i++) { |
6048 | xe = &xh->xh_entries[i]; | 6044 | xe = &xh->xh_entries[i]; |
6049 | 6045 | ||
@@ -6339,8 +6335,8 @@ static int ocfs2_reflink_xattr_header(handle_t *handle, | |||
6339 | u32 clusters, cpos, p_cluster, num_clusters; | 6335 | u32 clusters, cpos, p_cluster, num_clusters; |
6340 | unsigned int ext_flags = 0; | 6336 | unsigned int ext_flags = 0; |
6341 | 6337 | ||
6342 | mlog(0, "reflink xattr in container %llu, count = %u\n", | 6338 | trace_ocfs2_reflink_xattr_header((unsigned long long)old_bh->b_blocknr, |
6343 | (unsigned long long)old_bh->b_blocknr, le16_to_cpu(xh->xh_count)); | 6339 | le16_to_cpu(xh->xh_count)); |
6344 | 6340 | ||
6345 | last = &new_xh->xh_entries[le16_to_cpu(new_xh->xh_count)]; | 6341 | last = &new_xh->xh_entries[le16_to_cpu(new_xh->xh_count)]; |
6346 | for (i = 0, j = 0; i < le16_to_cpu(xh->xh_count); i++, j++) { | 6342 | for (i = 0, j = 0; i < le16_to_cpu(xh->xh_count); i++, j++) { |
@@ -6540,8 +6536,8 @@ static int ocfs2_create_empty_xattr_block(struct inode *inode, | |||
6540 | goto out; | 6536 | goto out; |
6541 | } | 6537 | } |
6542 | 6538 | ||
6543 | mlog(0, "create new xattr block for inode %llu, index = %d\n", | 6539 | trace_ocfs2_create_empty_xattr_block( |
6544 | (unsigned long long)fe_bh->b_blocknr, indexed); | 6540 | (unsigned long long)fe_bh->b_blocknr, indexed); |
6545 | ret = ocfs2_create_xattr_block(inode, fe_bh, &ctxt, indexed, | 6541 | ret = ocfs2_create_xattr_block(inode, fe_bh, &ctxt, indexed, |
6546 | ret_bh); | 6542 | ret_bh); |
6547 | if (ret) | 6543 | if (ret) |
@@ -6952,8 +6948,8 @@ static int ocfs2_reflink_xattr_buckets(handle_t *handle, | |||
6952 | if (ret) | 6948 | if (ret) |
6953 | mlog_errno(ret); | 6949 | mlog_errno(ret); |
6954 | 6950 | ||
6955 | mlog(0, "insert new xattr extent rec start %llu len %u to %u\n", | 6951 | trace_ocfs2_reflink_xattr_buckets((unsigned long long)new_blkno, |
6956 | (unsigned long long)new_blkno, num_clusters, reflink_cpos); | 6952 | num_clusters, reflink_cpos); |
6957 | 6953 | ||
6958 | len -= num_clusters; | 6954 | len -= num_clusters; |
6959 | blkno += ocfs2_clusters_to_blocks(inode->i_sb, num_clusters); | 6955 | blkno += ocfs2_clusters_to_blocks(inode->i_sb, num_clusters); |
@@ -6982,8 +6978,7 @@ static int ocfs2_reflink_xattr_rec(struct inode *inode, | |||
6982 | struct ocfs2_alloc_context *data_ac = NULL; | 6978 | struct ocfs2_alloc_context *data_ac = NULL; |
6983 | struct ocfs2_extent_tree et; | 6979 | struct ocfs2_extent_tree et; |
6984 | 6980 | ||
6985 | mlog(0, "reflink xattr buckets %llu len %u\n", | 6981 | trace_ocfs2_reflink_xattr_rec((unsigned long long)blkno, len); |
6986 | (unsigned long long)blkno, len); | ||
6987 | 6982 | ||
6988 | ocfs2_init_xattr_tree_extent_tree(&et, | 6983 | ocfs2_init_xattr_tree_extent_tree(&et, |
6989 | INODE_CACHE(args->reflink->new_inode), | 6984 | INODE_CACHE(args->reflink->new_inode), |
diff --git a/fs/xfs/linux-2.6/xfs_buf.c b/fs/xfs/linux-2.6/xfs_buf.c index c05324d3282c..596bb2c9de42 100644 --- a/fs/xfs/linux-2.6/xfs_buf.c +++ b/fs/xfs/linux-2.6/xfs_buf.c | |||
@@ -94,75 +94,6 @@ xfs_buf_vmap_len( | |||
94 | } | 94 | } |
95 | 95 | ||
96 | /* | 96 | /* |
97 | * Page Region interfaces. | ||
98 | * | ||
99 | * For pages in filesystems where the blocksize is smaller than the | ||
100 | * pagesize, we use the page->private field (long) to hold a bitmap | ||
101 | * of uptodate regions within the page. | ||
102 | * | ||
103 | * Each such region is "bytes per page / bits per long" bytes long. | ||
104 | * | ||
105 | * NBPPR == number-of-bytes-per-page-region | ||
106 | * BTOPR == bytes-to-page-region (rounded up) | ||
107 | * BTOPRT == bytes-to-page-region-truncated (rounded down) | ||
108 | */ | ||
109 | #if (BITS_PER_LONG == 32) | ||
110 | #define PRSHIFT (PAGE_CACHE_SHIFT - 5) /* (32 == 1<<5) */ | ||
111 | #elif (BITS_PER_LONG == 64) | ||
112 | #define PRSHIFT (PAGE_CACHE_SHIFT - 6) /* (64 == 1<<6) */ | ||
113 | #else | ||
114 | #error BITS_PER_LONG must be 32 or 64 | ||
115 | #endif | ||
116 | #define NBPPR (PAGE_CACHE_SIZE/BITS_PER_LONG) | ||
117 | #define BTOPR(b) (((unsigned int)(b) + (NBPPR - 1)) >> PRSHIFT) | ||
118 | #define BTOPRT(b) (((unsigned int)(b) >> PRSHIFT)) | ||
119 | |||
120 | STATIC unsigned long | ||
121 | page_region_mask( | ||
122 | size_t offset, | ||
123 | size_t length) | ||
124 | { | ||
125 | unsigned long mask; | ||
126 | int first, final; | ||
127 | |||
128 | first = BTOPR(offset); | ||
129 | final = BTOPRT(offset + length - 1); | ||
130 | first = min(first, final); | ||
131 | |||
132 | mask = ~0UL; | ||
133 | mask <<= BITS_PER_LONG - (final - first); | ||
134 | mask >>= BITS_PER_LONG - (final); | ||
135 | |||
136 | ASSERT(offset + length <= PAGE_CACHE_SIZE); | ||
137 | ASSERT((final - first) < BITS_PER_LONG && (final - first) >= 0); | ||
138 | |||
139 | return mask; | ||
140 | } | ||
141 | |||
142 | STATIC void | ||
143 | set_page_region( | ||
144 | struct page *page, | ||
145 | size_t offset, | ||
146 | size_t length) | ||
147 | { | ||
148 | set_page_private(page, | ||
149 | page_private(page) | page_region_mask(offset, length)); | ||
150 | if (page_private(page) == ~0UL) | ||
151 | SetPageUptodate(page); | ||
152 | } | ||
153 | |||
154 | STATIC int | ||
155 | test_page_region( | ||
156 | struct page *page, | ||
157 | size_t offset, | ||
158 | size_t length) | ||
159 | { | ||
160 | unsigned long mask = page_region_mask(offset, length); | ||
161 | |||
162 | return (mask && (page_private(page) & mask) == mask); | ||
163 | } | ||
164 | |||
165 | /* | ||
166 | * xfs_buf_lru_add - add a buffer to the LRU. | 97 | * xfs_buf_lru_add - add a buffer to the LRU. |
167 | * | 98 | * |
168 | * The LRU takes a new reference to the buffer so that it will only be freed | 99 | * The LRU takes a new reference to the buffer so that it will only be freed |
@@ -332,7 +263,7 @@ xfs_buf_free( | |||
332 | 263 | ||
333 | ASSERT(list_empty(&bp->b_lru)); | 264 | ASSERT(list_empty(&bp->b_lru)); |
334 | 265 | ||
335 | if (bp->b_flags & (_XBF_PAGE_CACHE|_XBF_PAGES)) { | 266 | if (bp->b_flags & _XBF_PAGES) { |
336 | uint i; | 267 | uint i; |
337 | 268 | ||
338 | if (xfs_buf_is_vmapped(bp)) | 269 | if (xfs_buf_is_vmapped(bp)) |
@@ -342,25 +273,22 @@ xfs_buf_free( | |||
342 | for (i = 0; i < bp->b_page_count; i++) { | 273 | for (i = 0; i < bp->b_page_count; i++) { |
343 | struct page *page = bp->b_pages[i]; | 274 | struct page *page = bp->b_pages[i]; |
344 | 275 | ||
345 | if (bp->b_flags & _XBF_PAGE_CACHE) | 276 | __free_page(page); |
346 | ASSERT(!PagePrivate(page)); | ||
347 | page_cache_release(page); | ||
348 | } | 277 | } |
349 | } | 278 | } else if (bp->b_flags & _XBF_KMEM) |
279 | kmem_free(bp->b_addr); | ||
350 | _xfs_buf_free_pages(bp); | 280 | _xfs_buf_free_pages(bp); |
351 | xfs_buf_deallocate(bp); | 281 | xfs_buf_deallocate(bp); |
352 | } | 282 | } |
353 | 283 | ||
354 | /* | 284 | /* |
355 | * Finds all pages for buffer in question and builds it's page list. | 285 | * Allocates all the pages for buffer in question and builds it's page list. |
356 | */ | 286 | */ |
357 | STATIC int | 287 | STATIC int |
358 | _xfs_buf_lookup_pages( | 288 | xfs_buf_allocate_memory( |
359 | xfs_buf_t *bp, | 289 | xfs_buf_t *bp, |
360 | uint flags) | 290 | uint flags) |
361 | { | 291 | { |
362 | struct address_space *mapping = bp->b_target->bt_mapping; | ||
363 | size_t blocksize = bp->b_target->bt_bsize; | ||
364 | size_t size = bp->b_count_desired; | 292 | size_t size = bp->b_count_desired; |
365 | size_t nbytes, offset; | 293 | size_t nbytes, offset; |
366 | gfp_t gfp_mask = xb_to_gfp(flags); | 294 | gfp_t gfp_mask = xb_to_gfp(flags); |
@@ -369,29 +297,55 @@ _xfs_buf_lookup_pages( | |||
369 | xfs_off_t end; | 297 | xfs_off_t end; |
370 | int error; | 298 | int error; |
371 | 299 | ||
300 | /* | ||
301 | * for buffers that are contained within a single page, just allocate | ||
302 | * the memory from the heap - there's no need for the complexity of | ||
303 | * page arrays to keep allocation down to order 0. | ||
304 | */ | ||
305 | if (bp->b_buffer_length < PAGE_SIZE) { | ||
306 | bp->b_addr = kmem_alloc(bp->b_buffer_length, xb_to_km(flags)); | ||
307 | if (!bp->b_addr) { | ||
308 | /* low memory - use alloc_page loop instead */ | ||
309 | goto use_alloc_page; | ||
310 | } | ||
311 | |||
312 | if (((unsigned long)(bp->b_addr + bp->b_buffer_length - 1) & | ||
313 | PAGE_MASK) != | ||
314 | ((unsigned long)bp->b_addr & PAGE_MASK)) { | ||
315 | /* b_addr spans two pages - use alloc_page instead */ | ||
316 | kmem_free(bp->b_addr); | ||
317 | bp->b_addr = NULL; | ||
318 | goto use_alloc_page; | ||
319 | } | ||
320 | bp->b_offset = offset_in_page(bp->b_addr); | ||
321 | bp->b_pages = bp->b_page_array; | ||
322 | bp->b_pages[0] = virt_to_page(bp->b_addr); | ||
323 | bp->b_page_count = 1; | ||
324 | bp->b_flags |= XBF_MAPPED | _XBF_KMEM; | ||
325 | return 0; | ||
326 | } | ||
327 | |||
328 | use_alloc_page: | ||
372 | end = bp->b_file_offset + bp->b_buffer_length; | 329 | end = bp->b_file_offset + bp->b_buffer_length; |
373 | page_count = xfs_buf_btoc(end) - xfs_buf_btoct(bp->b_file_offset); | 330 | page_count = xfs_buf_btoc(end) - xfs_buf_btoct(bp->b_file_offset); |
374 | |||
375 | error = _xfs_buf_get_pages(bp, page_count, flags); | 331 | error = _xfs_buf_get_pages(bp, page_count, flags); |
376 | if (unlikely(error)) | 332 | if (unlikely(error)) |
377 | return error; | 333 | return error; |
378 | bp->b_flags |= _XBF_PAGE_CACHE; | ||
379 | 334 | ||
380 | offset = bp->b_offset; | 335 | offset = bp->b_offset; |
381 | first = bp->b_file_offset >> PAGE_CACHE_SHIFT; | 336 | first = bp->b_file_offset >> PAGE_SHIFT; |
337 | bp->b_flags |= _XBF_PAGES; | ||
382 | 338 | ||
383 | for (i = 0; i < bp->b_page_count; i++) { | 339 | for (i = 0; i < bp->b_page_count; i++) { |
384 | struct page *page; | 340 | struct page *page; |
385 | uint retries = 0; | 341 | uint retries = 0; |
386 | 342 | retry: | |
387 | retry: | 343 | page = alloc_page(gfp_mask); |
388 | page = find_or_create_page(mapping, first + i, gfp_mask); | ||
389 | if (unlikely(page == NULL)) { | 344 | if (unlikely(page == NULL)) { |
390 | if (flags & XBF_READ_AHEAD) { | 345 | if (flags & XBF_READ_AHEAD) { |
391 | bp->b_page_count = i; | 346 | bp->b_page_count = i; |
392 | for (i = 0; i < bp->b_page_count; i++) | 347 | error = ENOMEM; |
393 | unlock_page(bp->b_pages[i]); | 348 | goto out_free_pages; |
394 | return -ENOMEM; | ||
395 | } | 349 | } |
396 | 350 | ||
397 | /* | 351 | /* |
@@ -412,33 +366,16 @@ _xfs_buf_lookup_pages( | |||
412 | 366 | ||
413 | XFS_STATS_INC(xb_page_found); | 367 | XFS_STATS_INC(xb_page_found); |
414 | 368 | ||
415 | nbytes = min_t(size_t, size, PAGE_CACHE_SIZE - offset); | 369 | nbytes = min_t(size_t, size, PAGE_SIZE - offset); |
416 | size -= nbytes; | 370 | size -= nbytes; |
417 | |||
418 | ASSERT(!PagePrivate(page)); | ||
419 | if (!PageUptodate(page)) { | ||
420 | page_count--; | ||
421 | if (blocksize >= PAGE_CACHE_SIZE) { | ||
422 | if (flags & XBF_READ) | ||
423 | bp->b_flags |= _XBF_PAGE_LOCKED; | ||
424 | } else if (!PagePrivate(page)) { | ||
425 | if (test_page_region(page, offset, nbytes)) | ||
426 | page_count++; | ||
427 | } | ||
428 | } | ||
429 | |||
430 | bp->b_pages[i] = page; | 371 | bp->b_pages[i] = page; |
431 | offset = 0; | 372 | offset = 0; |
432 | } | 373 | } |
374 | return 0; | ||
433 | 375 | ||
434 | if (!(bp->b_flags & _XBF_PAGE_LOCKED)) { | 376 | out_free_pages: |
435 | for (i = 0; i < bp->b_page_count; i++) | 377 | for (i = 0; i < bp->b_page_count; i++) |
436 | unlock_page(bp->b_pages[i]); | 378 | __free_page(bp->b_pages[i]); |
437 | } | ||
438 | |||
439 | if (page_count == bp->b_page_count) | ||
440 | bp->b_flags |= XBF_DONE; | ||
441 | |||
442 | return error; | 379 | return error; |
443 | } | 380 | } |
444 | 381 | ||
@@ -450,14 +387,23 @@ _xfs_buf_map_pages( | |||
450 | xfs_buf_t *bp, | 387 | xfs_buf_t *bp, |
451 | uint flags) | 388 | uint flags) |
452 | { | 389 | { |
453 | /* A single page buffer is always mappable */ | 390 | ASSERT(bp->b_flags & _XBF_PAGES); |
454 | if (bp->b_page_count == 1) { | 391 | if (bp->b_page_count == 1) { |
392 | /* A single page buffer is always mappable */ | ||
455 | bp->b_addr = page_address(bp->b_pages[0]) + bp->b_offset; | 393 | bp->b_addr = page_address(bp->b_pages[0]) + bp->b_offset; |
456 | bp->b_flags |= XBF_MAPPED; | 394 | bp->b_flags |= XBF_MAPPED; |
457 | } else if (flags & XBF_MAPPED) { | 395 | } else if (flags & XBF_MAPPED) { |
458 | bp->b_addr = vm_map_ram(bp->b_pages, bp->b_page_count, | 396 | int retried = 0; |
459 | -1, PAGE_KERNEL); | 397 | |
460 | if (unlikely(bp->b_addr == NULL)) | 398 | do { |
399 | bp->b_addr = vm_map_ram(bp->b_pages, bp->b_page_count, | ||
400 | -1, PAGE_KERNEL); | ||
401 | if (bp->b_addr) | ||
402 | break; | ||
403 | vm_unmap_aliases(); | ||
404 | } while (retried++ <= 1); | ||
405 | |||
406 | if (!bp->b_addr) | ||
461 | return -ENOMEM; | 407 | return -ENOMEM; |
462 | bp->b_addr += bp->b_offset; | 408 | bp->b_addr += bp->b_offset; |
463 | bp->b_flags |= XBF_MAPPED; | 409 | bp->b_flags |= XBF_MAPPED; |
@@ -568,9 +514,14 @@ found: | |||
568 | } | 514 | } |
569 | } | 515 | } |
570 | 516 | ||
517 | /* | ||
518 | * if the buffer is stale, clear all the external state associated with | ||
519 | * it. We need to keep flags such as how we allocated the buffer memory | ||
520 | * intact here. | ||
521 | */ | ||
571 | if (bp->b_flags & XBF_STALE) { | 522 | if (bp->b_flags & XBF_STALE) { |
572 | ASSERT((bp->b_flags & _XBF_DELWRI_Q) == 0); | 523 | ASSERT((bp->b_flags & _XBF_DELWRI_Q) == 0); |
573 | bp->b_flags &= XBF_MAPPED; | 524 | bp->b_flags &= XBF_MAPPED | _XBF_KMEM | _XBF_PAGES; |
574 | } | 525 | } |
575 | 526 | ||
576 | trace_xfs_buf_find(bp, flags, _RET_IP_); | 527 | trace_xfs_buf_find(bp, flags, _RET_IP_); |
@@ -591,7 +542,7 @@ xfs_buf_get( | |||
591 | xfs_buf_flags_t flags) | 542 | xfs_buf_flags_t flags) |
592 | { | 543 | { |
593 | xfs_buf_t *bp, *new_bp; | 544 | xfs_buf_t *bp, *new_bp; |
594 | int error = 0, i; | 545 | int error = 0; |
595 | 546 | ||
596 | new_bp = xfs_buf_allocate(flags); | 547 | new_bp = xfs_buf_allocate(flags); |
597 | if (unlikely(!new_bp)) | 548 | if (unlikely(!new_bp)) |
@@ -599,7 +550,7 @@ xfs_buf_get( | |||
599 | 550 | ||
600 | bp = _xfs_buf_find(target, ioff, isize, flags, new_bp); | 551 | bp = _xfs_buf_find(target, ioff, isize, flags, new_bp); |
601 | if (bp == new_bp) { | 552 | if (bp == new_bp) { |
602 | error = _xfs_buf_lookup_pages(bp, flags); | 553 | error = xfs_buf_allocate_memory(bp, flags); |
603 | if (error) | 554 | if (error) |
604 | goto no_buffer; | 555 | goto no_buffer; |
605 | } else { | 556 | } else { |
@@ -608,9 +559,6 @@ xfs_buf_get( | |||
608 | return NULL; | 559 | return NULL; |
609 | } | 560 | } |
610 | 561 | ||
611 | for (i = 0; i < bp->b_page_count; i++) | ||
612 | mark_page_accessed(bp->b_pages[i]); | ||
613 | |||
614 | if (!(bp->b_flags & XBF_MAPPED)) { | 562 | if (!(bp->b_flags & XBF_MAPPED)) { |
615 | error = _xfs_buf_map_pages(bp, flags); | 563 | error = _xfs_buf_map_pages(bp, flags); |
616 | if (unlikely(error)) { | 564 | if (unlikely(error)) { |
@@ -711,8 +659,7 @@ xfs_buf_readahead( | |||
711 | { | 659 | { |
712 | struct backing_dev_info *bdi; | 660 | struct backing_dev_info *bdi; |
713 | 661 | ||
714 | bdi = target->bt_mapping->backing_dev_info; | 662 | if (bdi_read_congested(target->bt_bdi)) |
715 | if (bdi_read_congested(bdi)) | ||
716 | return; | 663 | return; |
717 | 664 | ||
718 | xfs_buf_read(target, ioff, isize, | 665 | xfs_buf_read(target, ioff, isize, |
@@ -790,10 +737,10 @@ xfs_buf_associate_memory( | |||
790 | size_t buflen; | 737 | size_t buflen; |
791 | int page_count; | 738 | int page_count; |
792 | 739 | ||
793 | pageaddr = (unsigned long)mem & PAGE_CACHE_MASK; | 740 | pageaddr = (unsigned long)mem & PAGE_MASK; |
794 | offset = (unsigned long)mem - pageaddr; | 741 | offset = (unsigned long)mem - pageaddr; |
795 | buflen = PAGE_CACHE_ALIGN(len + offset); | 742 | buflen = PAGE_ALIGN(len + offset); |
796 | page_count = buflen >> PAGE_CACHE_SHIFT; | 743 | page_count = buflen >> PAGE_SHIFT; |
797 | 744 | ||
798 | /* Free any previous set of page pointers */ | 745 | /* Free any previous set of page pointers */ |
799 | if (bp->b_pages) | 746 | if (bp->b_pages) |
@@ -810,13 +757,12 @@ xfs_buf_associate_memory( | |||
810 | 757 | ||
811 | for (i = 0; i < bp->b_page_count; i++) { | 758 | for (i = 0; i < bp->b_page_count; i++) { |
812 | bp->b_pages[i] = mem_to_page((void *)pageaddr); | 759 | bp->b_pages[i] = mem_to_page((void *)pageaddr); |
813 | pageaddr += PAGE_CACHE_SIZE; | 760 | pageaddr += PAGE_SIZE; |
814 | } | 761 | } |
815 | 762 | ||
816 | bp->b_count_desired = len; | 763 | bp->b_count_desired = len; |
817 | bp->b_buffer_length = buflen; | 764 | bp->b_buffer_length = buflen; |
818 | bp->b_flags |= XBF_MAPPED; | 765 | bp->b_flags |= XBF_MAPPED; |
819 | bp->b_flags &= ~_XBF_PAGE_LOCKED; | ||
820 | 766 | ||
821 | return 0; | 767 | return 0; |
822 | } | 768 | } |
@@ -923,20 +869,7 @@ xfs_buf_rele( | |||
923 | 869 | ||
924 | 870 | ||
925 | /* | 871 | /* |
926 | * Mutual exclusion on buffers. Locking model: | 872 | * Lock a buffer object, if it is not already locked. |
927 | * | ||
928 | * Buffers associated with inodes for which buffer locking | ||
929 | * is not enabled are not protected by semaphores, and are | ||
930 | * assumed to be exclusively owned by the caller. There is a | ||
931 | * spinlock in the buffer, used by the caller when concurrent | ||
932 | * access is possible. | ||
933 | */ | ||
934 | |||
935 | /* | ||
936 | * Locks a buffer object, if it is not already locked. Note that this in | ||
937 | * no way locks the underlying pages, so it is only useful for | ||
938 | * synchronizing concurrent use of buffer objects, not for synchronizing | ||
939 | * independent access to the underlying pages. | ||
940 | * | 873 | * |
941 | * If we come across a stale, pinned, locked buffer, we know that we are | 874 | * If we come across a stale, pinned, locked buffer, we know that we are |
942 | * being asked to lock a buffer that has been reallocated. Because it is | 875 | * being asked to lock a buffer that has been reallocated. Because it is |
@@ -970,10 +903,7 @@ xfs_buf_lock_value( | |||
970 | } | 903 | } |
971 | 904 | ||
972 | /* | 905 | /* |
973 | * Locks a buffer object. | 906 | * Lock a buffer object. |
974 | * Note that this in no way locks the underlying pages, so it is only | ||
975 | * useful for synchronizing concurrent use of buffer objects, not for | ||
976 | * synchronizing independent access to the underlying pages. | ||
977 | * | 907 | * |
978 | * If we come across a stale, pinned, locked buffer, we know that we | 908 | * If we come across a stale, pinned, locked buffer, we know that we |
979 | * are being asked to lock a buffer that has been reallocated. Because | 909 | * are being asked to lock a buffer that has been reallocated. Because |
@@ -1246,10 +1176,8 @@ _xfs_buf_ioend( | |||
1246 | xfs_buf_t *bp, | 1176 | xfs_buf_t *bp, |
1247 | int schedule) | 1177 | int schedule) |
1248 | { | 1178 | { |
1249 | if (atomic_dec_and_test(&bp->b_io_remaining) == 1) { | 1179 | if (atomic_dec_and_test(&bp->b_io_remaining) == 1) |
1250 | bp->b_flags &= ~_XBF_PAGE_LOCKED; | ||
1251 | xfs_buf_ioend(bp, schedule); | 1180 | xfs_buf_ioend(bp, schedule); |
1252 | } | ||
1253 | } | 1181 | } |
1254 | 1182 | ||
1255 | STATIC void | 1183 | STATIC void |
@@ -1258,35 +1186,12 @@ xfs_buf_bio_end_io( | |||
1258 | int error) | 1186 | int error) |
1259 | { | 1187 | { |
1260 | xfs_buf_t *bp = (xfs_buf_t *)bio->bi_private; | 1188 | xfs_buf_t *bp = (xfs_buf_t *)bio->bi_private; |
1261 | unsigned int blocksize = bp->b_target->bt_bsize; | ||
1262 | struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1; | ||
1263 | 1189 | ||
1264 | xfs_buf_ioerror(bp, -error); | 1190 | xfs_buf_ioerror(bp, -error); |
1265 | 1191 | ||
1266 | if (!error && xfs_buf_is_vmapped(bp) && (bp->b_flags & XBF_READ)) | 1192 | if (!error && xfs_buf_is_vmapped(bp) && (bp->b_flags & XBF_READ)) |
1267 | invalidate_kernel_vmap_range(bp->b_addr, xfs_buf_vmap_len(bp)); | 1193 | invalidate_kernel_vmap_range(bp->b_addr, xfs_buf_vmap_len(bp)); |
1268 | 1194 | ||
1269 | do { | ||
1270 | struct page *page = bvec->bv_page; | ||
1271 | |||
1272 | ASSERT(!PagePrivate(page)); | ||
1273 | if (unlikely(bp->b_error)) { | ||
1274 | if (bp->b_flags & XBF_READ) | ||
1275 | ClearPageUptodate(page); | ||
1276 | } else if (blocksize >= PAGE_CACHE_SIZE) { | ||
1277 | SetPageUptodate(page); | ||
1278 | } else if (!PagePrivate(page) && | ||
1279 | (bp->b_flags & _XBF_PAGE_CACHE)) { | ||
1280 | set_page_region(page, bvec->bv_offset, bvec->bv_len); | ||
1281 | } | ||
1282 | |||
1283 | if (--bvec >= bio->bi_io_vec) | ||
1284 | prefetchw(&bvec->bv_page->flags); | ||
1285 | |||
1286 | if (bp->b_flags & _XBF_PAGE_LOCKED) | ||
1287 | unlock_page(page); | ||
1288 | } while (bvec >= bio->bi_io_vec); | ||
1289 | |||
1290 | _xfs_buf_ioend(bp, 1); | 1195 | _xfs_buf_ioend(bp, 1); |
1291 | bio_put(bio); | 1196 | bio_put(bio); |
1292 | } | 1197 | } |
@@ -1300,7 +1205,6 @@ _xfs_buf_ioapply( | |||
1300 | int offset = bp->b_offset; | 1205 | int offset = bp->b_offset; |
1301 | int size = bp->b_count_desired; | 1206 | int size = bp->b_count_desired; |
1302 | sector_t sector = bp->b_bn; | 1207 | sector_t sector = bp->b_bn; |
1303 | unsigned int blocksize = bp->b_target->bt_bsize; | ||
1304 | 1208 | ||
1305 | total_nr_pages = bp->b_page_count; | 1209 | total_nr_pages = bp->b_page_count; |
1306 | map_i = 0; | 1210 | map_i = 0; |
@@ -1321,29 +1225,6 @@ _xfs_buf_ioapply( | |||
1321 | (bp->b_flags & XBF_READ_AHEAD) ? READA : READ; | 1225 | (bp->b_flags & XBF_READ_AHEAD) ? READA : READ; |
1322 | } | 1226 | } |
1323 | 1227 | ||
1324 | /* Special code path for reading a sub page size buffer in -- | ||
1325 | * we populate up the whole page, and hence the other metadata | ||
1326 | * in the same page. This optimization is only valid when the | ||
1327 | * filesystem block size is not smaller than the page size. | ||
1328 | */ | ||
1329 | if ((bp->b_buffer_length < PAGE_CACHE_SIZE) && | ||
1330 | ((bp->b_flags & (XBF_READ|_XBF_PAGE_LOCKED)) == | ||
1331 | (XBF_READ|_XBF_PAGE_LOCKED)) && | ||
1332 | (blocksize >= PAGE_CACHE_SIZE)) { | ||
1333 | bio = bio_alloc(GFP_NOIO, 1); | ||
1334 | |||
1335 | bio->bi_bdev = bp->b_target->bt_bdev; | ||
1336 | bio->bi_sector = sector - (offset >> BBSHIFT); | ||
1337 | bio->bi_end_io = xfs_buf_bio_end_io; | ||
1338 | bio->bi_private = bp; | ||
1339 | |||
1340 | bio_add_page(bio, bp->b_pages[0], PAGE_CACHE_SIZE, 0); | ||
1341 | size = 0; | ||
1342 | |||
1343 | atomic_inc(&bp->b_io_remaining); | ||
1344 | |||
1345 | goto submit_io; | ||
1346 | } | ||
1347 | 1228 | ||
1348 | next_chunk: | 1229 | next_chunk: |
1349 | atomic_inc(&bp->b_io_remaining); | 1230 | atomic_inc(&bp->b_io_remaining); |
@@ -1357,8 +1238,9 @@ next_chunk: | |||
1357 | bio->bi_end_io = xfs_buf_bio_end_io; | 1238 | bio->bi_end_io = xfs_buf_bio_end_io; |
1358 | bio->bi_private = bp; | 1239 | bio->bi_private = bp; |
1359 | 1240 | ||
1241 | |||
1360 | for (; size && nr_pages; nr_pages--, map_i++) { | 1242 | for (; size && nr_pages; nr_pages--, map_i++) { |
1361 | int rbytes, nbytes = PAGE_CACHE_SIZE - offset; | 1243 | int rbytes, nbytes = PAGE_SIZE - offset; |
1362 | 1244 | ||
1363 | if (nbytes > size) | 1245 | if (nbytes > size) |
1364 | nbytes = size; | 1246 | nbytes = size; |
@@ -1373,7 +1255,6 @@ next_chunk: | |||
1373 | total_nr_pages--; | 1255 | total_nr_pages--; |
1374 | } | 1256 | } |
1375 | 1257 | ||
1376 | submit_io: | ||
1377 | if (likely(bio->bi_size)) { | 1258 | if (likely(bio->bi_size)) { |
1378 | if (xfs_buf_is_vmapped(bp)) { | 1259 | if (xfs_buf_is_vmapped(bp)) { |
1379 | flush_kernel_vmap_range(bp->b_addr, | 1260 | flush_kernel_vmap_range(bp->b_addr, |
@@ -1383,18 +1264,7 @@ submit_io: | |||
1383 | if (size) | 1264 | if (size) |
1384 | goto next_chunk; | 1265 | goto next_chunk; |
1385 | } else { | 1266 | } else { |
1386 | /* | ||
1387 | * if we get here, no pages were added to the bio. However, | ||
1388 | * we can't just error out here - if the pages are locked then | ||
1389 | * we have to unlock them otherwise we can hang on a later | ||
1390 | * access to the page. | ||
1391 | */ | ||
1392 | xfs_buf_ioerror(bp, EIO); | 1267 | xfs_buf_ioerror(bp, EIO); |
1393 | if (bp->b_flags & _XBF_PAGE_LOCKED) { | ||
1394 | int i; | ||
1395 | for (i = 0; i < bp->b_page_count; i++) | ||
1396 | unlock_page(bp->b_pages[i]); | ||
1397 | } | ||
1398 | bio_put(bio); | 1268 | bio_put(bio); |
1399 | } | 1269 | } |
1400 | } | 1270 | } |
@@ -1458,8 +1328,8 @@ xfs_buf_offset( | |||
1458 | return XFS_BUF_PTR(bp) + offset; | 1328 | return XFS_BUF_PTR(bp) + offset; |
1459 | 1329 | ||
1460 | offset += bp->b_offset; | 1330 | offset += bp->b_offset; |
1461 | page = bp->b_pages[offset >> PAGE_CACHE_SHIFT]; | 1331 | page = bp->b_pages[offset >> PAGE_SHIFT]; |
1462 | return (xfs_caddr_t)page_address(page) + (offset & (PAGE_CACHE_SIZE-1)); | 1332 | return (xfs_caddr_t)page_address(page) + (offset & (PAGE_SIZE-1)); |
1463 | } | 1333 | } |
1464 | 1334 | ||
1465 | /* | 1335 | /* |
@@ -1481,9 +1351,9 @@ xfs_buf_iomove( | |||
1481 | page = bp->b_pages[xfs_buf_btoct(boff + bp->b_offset)]; | 1351 | page = bp->b_pages[xfs_buf_btoct(boff + bp->b_offset)]; |
1482 | cpoff = xfs_buf_poff(boff + bp->b_offset); | 1352 | cpoff = xfs_buf_poff(boff + bp->b_offset); |
1483 | csize = min_t(size_t, | 1353 | csize = min_t(size_t, |
1484 | PAGE_CACHE_SIZE-cpoff, bp->b_count_desired-boff); | 1354 | PAGE_SIZE-cpoff, bp->b_count_desired-boff); |
1485 | 1355 | ||
1486 | ASSERT(((csize + cpoff) <= PAGE_CACHE_SIZE)); | 1356 | ASSERT(((csize + cpoff) <= PAGE_SIZE)); |
1487 | 1357 | ||
1488 | switch (mode) { | 1358 | switch (mode) { |
1489 | case XBRW_ZERO: | 1359 | case XBRW_ZERO: |
@@ -1596,7 +1466,6 @@ xfs_free_buftarg( | |||
1596 | xfs_flush_buftarg(btp, 1); | 1466 | xfs_flush_buftarg(btp, 1); |
1597 | if (mp->m_flags & XFS_MOUNT_BARRIER) | 1467 | if (mp->m_flags & XFS_MOUNT_BARRIER) |
1598 | xfs_blkdev_issue_flush(btp); | 1468 | xfs_blkdev_issue_flush(btp); |
1599 | iput(btp->bt_mapping->host); | ||
1600 | 1469 | ||
1601 | kthread_stop(btp->bt_task); | 1470 | kthread_stop(btp->bt_task); |
1602 | kmem_free(btp); | 1471 | kmem_free(btp); |
@@ -1620,15 +1489,6 @@ xfs_setsize_buftarg_flags( | |||
1620 | return EINVAL; | 1489 | return EINVAL; |
1621 | } | 1490 | } |
1622 | 1491 | ||
1623 | if (verbose && | ||
1624 | (PAGE_CACHE_SIZE / BITS_PER_LONG) > sectorsize) { | ||
1625 | printk(KERN_WARNING | ||
1626 | "XFS: %u byte sectors in use on device %s. " | ||
1627 | "This is suboptimal; %u or greater is ideal.\n", | ||
1628 | sectorsize, XFS_BUFTARG_NAME(btp), | ||
1629 | (unsigned int)PAGE_CACHE_SIZE / BITS_PER_LONG); | ||
1630 | } | ||
1631 | |||
1632 | return 0; | 1492 | return 0; |
1633 | } | 1493 | } |
1634 | 1494 | ||
@@ -1643,7 +1503,7 @@ xfs_setsize_buftarg_early( | |||
1643 | struct block_device *bdev) | 1503 | struct block_device *bdev) |
1644 | { | 1504 | { |
1645 | return xfs_setsize_buftarg_flags(btp, | 1505 | return xfs_setsize_buftarg_flags(btp, |
1646 | PAGE_CACHE_SIZE, bdev_logical_block_size(bdev), 0); | 1506 | PAGE_SIZE, bdev_logical_block_size(bdev), 0); |
1647 | } | 1507 | } |
1648 | 1508 | ||
1649 | int | 1509 | int |
@@ -1656,40 +1516,6 @@ xfs_setsize_buftarg( | |||
1656 | } | 1516 | } |
1657 | 1517 | ||
1658 | STATIC int | 1518 | STATIC int |
1659 | xfs_mapping_buftarg( | ||
1660 | xfs_buftarg_t *btp, | ||
1661 | struct block_device *bdev) | ||
1662 | { | ||
1663 | struct backing_dev_info *bdi; | ||
1664 | struct inode *inode; | ||
1665 | struct address_space *mapping; | ||
1666 | static const struct address_space_operations mapping_aops = { | ||
1667 | .migratepage = fail_migrate_page, | ||
1668 | }; | ||
1669 | |||
1670 | inode = new_inode(bdev->bd_inode->i_sb); | ||
1671 | if (!inode) { | ||
1672 | printk(KERN_WARNING | ||
1673 | "XFS: Cannot allocate mapping inode for device %s\n", | ||
1674 | XFS_BUFTARG_NAME(btp)); | ||
1675 | return ENOMEM; | ||
1676 | } | ||
1677 | inode->i_ino = get_next_ino(); | ||
1678 | inode->i_mode = S_IFBLK; | ||
1679 | inode->i_bdev = bdev; | ||
1680 | inode->i_rdev = bdev->bd_dev; | ||
1681 | bdi = blk_get_backing_dev_info(bdev); | ||
1682 | if (!bdi) | ||
1683 | bdi = &default_backing_dev_info; | ||
1684 | mapping = &inode->i_data; | ||
1685 | mapping->a_ops = &mapping_aops; | ||
1686 | mapping->backing_dev_info = bdi; | ||
1687 | mapping_set_gfp_mask(mapping, GFP_NOFS); | ||
1688 | btp->bt_mapping = mapping; | ||
1689 | return 0; | ||
1690 | } | ||
1691 | |||
1692 | STATIC int | ||
1693 | xfs_alloc_delwrite_queue( | 1519 | xfs_alloc_delwrite_queue( |
1694 | xfs_buftarg_t *btp, | 1520 | xfs_buftarg_t *btp, |
1695 | const char *fsname) | 1521 | const char *fsname) |
@@ -1717,12 +1543,14 @@ xfs_alloc_buftarg( | |||
1717 | btp->bt_mount = mp; | 1543 | btp->bt_mount = mp; |
1718 | btp->bt_dev = bdev->bd_dev; | 1544 | btp->bt_dev = bdev->bd_dev; |
1719 | btp->bt_bdev = bdev; | 1545 | btp->bt_bdev = bdev; |
1546 | btp->bt_bdi = blk_get_backing_dev_info(bdev); | ||
1547 | if (!btp->bt_bdi) | ||
1548 | goto error; | ||
1549 | |||
1720 | INIT_LIST_HEAD(&btp->bt_lru); | 1550 | INIT_LIST_HEAD(&btp->bt_lru); |
1721 | spin_lock_init(&btp->bt_lru_lock); | 1551 | spin_lock_init(&btp->bt_lru_lock); |
1722 | if (xfs_setsize_buftarg_early(btp, bdev)) | 1552 | if (xfs_setsize_buftarg_early(btp, bdev)) |
1723 | goto error; | 1553 | goto error; |
1724 | if (xfs_mapping_buftarg(btp, bdev)) | ||
1725 | goto error; | ||
1726 | if (xfs_alloc_delwrite_queue(btp, fsname)) | 1554 | if (xfs_alloc_delwrite_queue(btp, fsname)) |
1727 | goto error; | 1555 | goto error; |
1728 | btp->bt_shrinker.shrink = xfs_buftarg_shrink; | 1556 | btp->bt_shrinker.shrink = xfs_buftarg_shrink; |
diff --git a/fs/xfs/linux-2.6/xfs_buf.h b/fs/xfs/linux-2.6/xfs_buf.h index cbe65950e524..a9a1c4512645 100644 --- a/fs/xfs/linux-2.6/xfs_buf.h +++ b/fs/xfs/linux-2.6/xfs_buf.h | |||
@@ -61,30 +61,11 @@ typedef enum { | |||
61 | #define XBF_DONT_BLOCK (1 << 16)/* do not block in current thread */ | 61 | #define XBF_DONT_BLOCK (1 << 16)/* do not block in current thread */ |
62 | 62 | ||
63 | /* flags used only internally */ | 63 | /* flags used only internally */ |
64 | #define _XBF_PAGE_CACHE (1 << 17)/* backed by pagecache */ | ||
65 | #define _XBF_PAGES (1 << 18)/* backed by refcounted pages */ | 64 | #define _XBF_PAGES (1 << 18)/* backed by refcounted pages */ |
66 | #define _XBF_RUN_QUEUES (1 << 19)/* run block device task queue */ | 65 | #define _XBF_RUN_QUEUES (1 << 19)/* run block device task queue */ |
66 | #define _XBF_KMEM (1 << 20)/* backed by heap memory */ | ||
67 | #define _XBF_DELWRI_Q (1 << 21)/* buffer on delwri queue */ | 67 | #define _XBF_DELWRI_Q (1 << 21)/* buffer on delwri queue */ |
68 | 68 | ||
69 | /* | ||
70 | * Special flag for supporting metadata blocks smaller than a FSB. | ||
71 | * | ||
72 | * In this case we can have multiple xfs_buf_t on a single page and | ||
73 | * need to lock out concurrent xfs_buf_t readers as they only | ||
74 | * serialise access to the buffer. | ||
75 | * | ||
76 | * If the FSB size >= PAGE_CACHE_SIZE case, we have no serialisation | ||
77 | * between reads of the page. Hence we can have one thread read the | ||
78 | * page and modify it, but then race with another thread that thinks | ||
79 | * the page is not up-to-date and hence reads it again. | ||
80 | * | ||
81 | * The result is that the first modifcation to the page is lost. | ||
82 | * This sort of AGF/AGI reading race can happen when unlinking inodes | ||
83 | * that require truncation and results in the AGI unlinked list | ||
84 | * modifications being lost. | ||
85 | */ | ||
86 | #define _XBF_PAGE_LOCKED (1 << 22) | ||
87 | |||
88 | typedef unsigned int xfs_buf_flags_t; | 69 | typedef unsigned int xfs_buf_flags_t; |
89 | 70 | ||
90 | #define XFS_BUF_FLAGS \ | 71 | #define XFS_BUF_FLAGS \ |
@@ -100,12 +81,10 @@ typedef unsigned int xfs_buf_flags_t; | |||
100 | { XBF_LOCK, "LOCK" }, /* should never be set */\ | 81 | { XBF_LOCK, "LOCK" }, /* should never be set */\ |
101 | { XBF_TRYLOCK, "TRYLOCK" }, /* ditto */\ | 82 | { XBF_TRYLOCK, "TRYLOCK" }, /* ditto */\ |
102 | { XBF_DONT_BLOCK, "DONT_BLOCK" }, /* ditto */\ | 83 | { XBF_DONT_BLOCK, "DONT_BLOCK" }, /* ditto */\ |
103 | { _XBF_PAGE_CACHE, "PAGE_CACHE" }, \ | ||
104 | { _XBF_PAGES, "PAGES" }, \ | 84 | { _XBF_PAGES, "PAGES" }, \ |
105 | { _XBF_RUN_QUEUES, "RUN_QUEUES" }, \ | 85 | { _XBF_RUN_QUEUES, "RUN_QUEUES" }, \ |
106 | { _XBF_DELWRI_Q, "DELWRI_Q" }, \ | 86 | { _XBF_KMEM, "KMEM" }, \ |
107 | { _XBF_PAGE_LOCKED, "PAGE_LOCKED" } | 87 | { _XBF_DELWRI_Q, "DELWRI_Q" } |
108 | |||
109 | 88 | ||
110 | typedef enum { | 89 | typedef enum { |
111 | XBT_FORCE_SLEEP = 0, | 90 | XBT_FORCE_SLEEP = 0, |
@@ -120,7 +99,7 @@ typedef struct xfs_bufhash { | |||
120 | typedef struct xfs_buftarg { | 99 | typedef struct xfs_buftarg { |
121 | dev_t bt_dev; | 100 | dev_t bt_dev; |
122 | struct block_device *bt_bdev; | 101 | struct block_device *bt_bdev; |
123 | struct address_space *bt_mapping; | 102 | struct backing_dev_info *bt_bdi; |
124 | struct xfs_mount *bt_mount; | 103 | struct xfs_mount *bt_mount; |
125 | unsigned int bt_bsize; | 104 | unsigned int bt_bsize; |
126 | unsigned int bt_sshift; | 105 | unsigned int bt_sshift; |
@@ -139,17 +118,6 @@ typedef struct xfs_buftarg { | |||
139 | unsigned int bt_lru_nr; | 118 | unsigned int bt_lru_nr; |
140 | } xfs_buftarg_t; | 119 | } xfs_buftarg_t; |
141 | 120 | ||
142 | /* | ||
143 | * xfs_buf_t: Buffer structure for pagecache-based buffers | ||
144 | * | ||
145 | * This buffer structure is used by the pagecache buffer management routines | ||
146 | * to refer to an assembly of pages forming a logical buffer. | ||
147 | * | ||
148 | * The buffer structure is used on a temporary basis only, and discarded when | ||
149 | * released. The real data storage is recorded in the pagecache. Buffers are | ||
150 | * hashed to the block device on which the file system resides. | ||
151 | */ | ||
152 | |||
153 | struct xfs_buf; | 121 | struct xfs_buf; |
154 | typedef void (*xfs_buf_iodone_t)(struct xfs_buf *); | 122 | typedef void (*xfs_buf_iodone_t)(struct xfs_buf *); |
155 | 123 | ||
diff --git a/fs/xfs/linux-2.6/xfs_file.c b/fs/xfs/linux-2.6/xfs_file.c index a55c1b46b219..52aadfbed132 100644 --- a/fs/xfs/linux-2.6/xfs_file.c +++ b/fs/xfs/linux-2.6/xfs_file.c | |||
@@ -896,6 +896,7 @@ xfs_file_fallocate( | |||
896 | xfs_flock64_t bf; | 896 | xfs_flock64_t bf; |
897 | xfs_inode_t *ip = XFS_I(inode); | 897 | xfs_inode_t *ip = XFS_I(inode); |
898 | int cmd = XFS_IOC_RESVSP; | 898 | int cmd = XFS_IOC_RESVSP; |
899 | int attr_flags = XFS_ATTR_NOLOCK; | ||
899 | 900 | ||
900 | if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE)) | 901 | if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE)) |
901 | return -EOPNOTSUPP; | 902 | return -EOPNOTSUPP; |
@@ -918,7 +919,10 @@ xfs_file_fallocate( | |||
918 | goto out_unlock; | 919 | goto out_unlock; |
919 | } | 920 | } |
920 | 921 | ||
921 | error = -xfs_change_file_space(ip, cmd, &bf, 0, XFS_ATTR_NOLOCK); | 922 | if (file->f_flags & O_DSYNC) |
923 | attr_flags |= XFS_ATTR_SYNC; | ||
924 | |||
925 | error = -xfs_change_file_space(ip, cmd, &bf, 0, attr_flags); | ||
922 | if (error) | 926 | if (error) |
923 | goto out_unlock; | 927 | goto out_unlock; |
924 | 928 | ||
diff --git a/fs/xfs/linux-2.6/xfs_ioctl.c b/fs/xfs/linux-2.6/xfs_ioctl.c index 0ca0e3c024d7..acca2c5ca3fa 100644 --- a/fs/xfs/linux-2.6/xfs_ioctl.c +++ b/fs/xfs/linux-2.6/xfs_ioctl.c | |||
@@ -624,6 +624,10 @@ xfs_ioc_space( | |||
624 | 624 | ||
625 | if (filp->f_flags & (O_NDELAY|O_NONBLOCK)) | 625 | if (filp->f_flags & (O_NDELAY|O_NONBLOCK)) |
626 | attr_flags |= XFS_ATTR_NONBLOCK; | 626 | attr_flags |= XFS_ATTR_NONBLOCK; |
627 | |||
628 | if (filp->f_flags & O_DSYNC) | ||
629 | attr_flags |= XFS_ATTR_SYNC; | ||
630 | |||
627 | if (ioflags & IO_INVIS) | 631 | if (ioflags & IO_INVIS) |
628 | attr_flags |= XFS_ATTR_DMI; | 632 | attr_flags |= XFS_ATTR_DMI; |
629 | 633 | ||
diff --git a/fs/xfs/linux-2.6/xfs_super.c b/fs/xfs/linux-2.6/xfs_super.c index 818c4cf2de86..1ba5c451da36 100644 --- a/fs/xfs/linux-2.6/xfs_super.c +++ b/fs/xfs/linux-2.6/xfs_super.c | |||
@@ -1078,7 +1078,7 @@ xfs_fs_write_inode( | |||
1078 | error = 0; | 1078 | error = 0; |
1079 | goto out_unlock; | 1079 | goto out_unlock; |
1080 | } | 1080 | } |
1081 | error = xfs_iflush(ip, 0); | 1081 | error = xfs_iflush(ip, SYNC_TRYLOCK); |
1082 | } | 1082 | } |
1083 | 1083 | ||
1084 | out_unlock: | 1084 | out_unlock: |
@@ -1539,10 +1539,14 @@ xfs_fs_fill_super( | |||
1539 | if (error) | 1539 | if (error) |
1540 | goto out_free_sb; | 1540 | goto out_free_sb; |
1541 | 1541 | ||
1542 | error = xfs_mountfs(mp); | 1542 | /* |
1543 | if (error) | 1543 | * we must configure the block size in the superblock before we run the |
1544 | goto out_filestream_unmount; | 1544 | * full mount process as the mount process can lookup and cache inodes. |
1545 | 1545 | * For the same reason we must also initialise the syncd and register | |
1546 | * the inode cache shrinker so that inodes can be reclaimed during | ||
1547 | * operations like a quotacheck that iterate all inodes in the | ||
1548 | * filesystem. | ||
1549 | */ | ||
1546 | sb->s_magic = XFS_SB_MAGIC; | 1550 | sb->s_magic = XFS_SB_MAGIC; |
1547 | sb->s_blocksize = mp->m_sb.sb_blocksize; | 1551 | sb->s_blocksize = mp->m_sb.sb_blocksize; |
1548 | sb->s_blocksize_bits = ffs(sb->s_blocksize) - 1; | 1552 | sb->s_blocksize_bits = ffs(sb->s_blocksize) - 1; |
@@ -1550,6 +1554,16 @@ xfs_fs_fill_super( | |||
1550 | sb->s_time_gran = 1; | 1554 | sb->s_time_gran = 1; |
1551 | set_posix_acl_flag(sb); | 1555 | set_posix_acl_flag(sb); |
1552 | 1556 | ||
1557 | error = xfs_syncd_init(mp); | ||
1558 | if (error) | ||
1559 | goto out_filestream_unmount; | ||
1560 | |||
1561 | xfs_inode_shrinker_register(mp); | ||
1562 | |||
1563 | error = xfs_mountfs(mp); | ||
1564 | if (error) | ||
1565 | goto out_syncd_stop; | ||
1566 | |||
1553 | root = igrab(VFS_I(mp->m_rootip)); | 1567 | root = igrab(VFS_I(mp->m_rootip)); |
1554 | if (!root) { | 1568 | if (!root) { |
1555 | error = ENOENT; | 1569 | error = ENOENT; |
@@ -1565,14 +1579,11 @@ xfs_fs_fill_super( | |||
1565 | goto fail_vnrele; | 1579 | goto fail_vnrele; |
1566 | } | 1580 | } |
1567 | 1581 | ||
1568 | error = xfs_syncd_init(mp); | ||
1569 | if (error) | ||
1570 | goto fail_vnrele; | ||
1571 | |||
1572 | xfs_inode_shrinker_register(mp); | ||
1573 | |||
1574 | return 0; | 1582 | return 0; |
1575 | 1583 | ||
1584 | out_syncd_stop: | ||
1585 | xfs_inode_shrinker_unregister(mp); | ||
1586 | xfs_syncd_stop(mp); | ||
1576 | out_filestream_unmount: | 1587 | out_filestream_unmount: |
1577 | xfs_filestream_unmount(mp); | 1588 | xfs_filestream_unmount(mp); |
1578 | out_free_sb: | 1589 | out_free_sb: |
@@ -1596,6 +1607,9 @@ xfs_fs_fill_super( | |||
1596 | } | 1607 | } |
1597 | 1608 | ||
1598 | fail_unmount: | 1609 | fail_unmount: |
1610 | xfs_inode_shrinker_unregister(mp); | ||
1611 | xfs_syncd_stop(mp); | ||
1612 | |||
1599 | /* | 1613 | /* |
1600 | * Blow away any referenced inode in the filestreams cache. | 1614 | * Blow away any referenced inode in the filestreams cache. |
1601 | * This can and will cause log traffic as inodes go inactive | 1615 | * This can and will cause log traffic as inodes go inactive |
diff --git a/fs/xfs/linux-2.6/xfs_sync.c b/fs/xfs/linux-2.6/xfs_sync.c index 6c10f1d2e3d3..594cd822d84d 100644 --- a/fs/xfs/linux-2.6/xfs_sync.c +++ b/fs/xfs/linux-2.6/xfs_sync.c | |||
@@ -761,8 +761,10 @@ xfs_reclaim_inode( | |||
761 | struct xfs_perag *pag, | 761 | struct xfs_perag *pag, |
762 | int sync_mode) | 762 | int sync_mode) |
763 | { | 763 | { |
764 | int error = 0; | 764 | int error; |
765 | 765 | ||
766 | restart: | ||
767 | error = 0; | ||
766 | xfs_ilock(ip, XFS_ILOCK_EXCL); | 768 | xfs_ilock(ip, XFS_ILOCK_EXCL); |
767 | if (!xfs_iflock_nowait(ip)) { | 769 | if (!xfs_iflock_nowait(ip)) { |
768 | if (!(sync_mode & SYNC_WAIT)) | 770 | if (!(sync_mode & SYNC_WAIT)) |
@@ -788,9 +790,31 @@ xfs_reclaim_inode( | |||
788 | if (xfs_inode_clean(ip)) | 790 | if (xfs_inode_clean(ip)) |
789 | goto reclaim; | 791 | goto reclaim; |
790 | 792 | ||
791 | /* Now we have an inode that needs flushing */ | 793 | /* |
792 | error = xfs_iflush(ip, sync_mode); | 794 | * Now we have an inode that needs flushing. |
795 | * | ||
796 | * We do a nonblocking flush here even if we are doing a SYNC_WAIT | ||
797 | * reclaim as we can deadlock with inode cluster removal. | ||
798 | * xfs_ifree_cluster() can lock the inode buffer before it locks the | ||
799 | * ip->i_lock, and we are doing the exact opposite here. As a result, | ||
800 | * doing a blocking xfs_itobp() to get the cluster buffer will result | ||
801 | * in an ABBA deadlock with xfs_ifree_cluster(). | ||
802 | * | ||
803 | * As xfs_ifree_cluser() must gather all inodes that are active in the | ||
804 | * cache to mark them stale, if we hit this case we don't actually want | ||
805 | * to do IO here - we want the inode marked stale so we can simply | ||
806 | * reclaim it. Hence if we get an EAGAIN error on a SYNC_WAIT flush, | ||
807 | * just unlock the inode, back off and try again. Hopefully the next | ||
808 | * pass through will see the stale flag set on the inode. | ||
809 | */ | ||
810 | error = xfs_iflush(ip, SYNC_TRYLOCK | sync_mode); | ||
793 | if (sync_mode & SYNC_WAIT) { | 811 | if (sync_mode & SYNC_WAIT) { |
812 | if (error == EAGAIN) { | ||
813 | xfs_iunlock(ip, XFS_ILOCK_EXCL); | ||
814 | /* backoff longer than in xfs_ifree_cluster */ | ||
815 | delay(2); | ||
816 | goto restart; | ||
817 | } | ||
794 | xfs_iflock(ip); | 818 | xfs_iflock(ip); |
795 | goto reclaim; | 819 | goto reclaim; |
796 | } | 820 | } |
diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c index da871f532236..742c8330994a 100644 --- a/fs/xfs/xfs_inode.c +++ b/fs/xfs/xfs_inode.c | |||
@@ -2835,7 +2835,7 @@ xfs_iflush( | |||
2835 | * Get the buffer containing the on-disk inode. | 2835 | * Get the buffer containing the on-disk inode. |
2836 | */ | 2836 | */ |
2837 | error = xfs_itobp(mp, NULL, ip, &dip, &bp, | 2837 | error = xfs_itobp(mp, NULL, ip, &dip, &bp, |
2838 | (flags & SYNC_WAIT) ? XBF_LOCK : XBF_TRYLOCK); | 2838 | (flags & SYNC_TRYLOCK) ? XBF_TRYLOCK : XBF_LOCK); |
2839 | if (error || !bp) { | 2839 | if (error || !bp) { |
2840 | xfs_ifunlock(ip); | 2840 | xfs_ifunlock(ip); |
2841 | return error; | 2841 | return error; |
diff --git a/fs/xfs/xfs_inode_item.c b/fs/xfs/xfs_inode_item.c index fd4f398bd6f1..46cc40131d4a 100644 --- a/fs/xfs/xfs_inode_item.c +++ b/fs/xfs/xfs_inode_item.c | |||
@@ -760,11 +760,11 @@ xfs_inode_item_push( | |||
760 | * Push the inode to it's backing buffer. This will not remove the | 760 | * Push the inode to it's backing buffer. This will not remove the |
761 | * inode from the AIL - a further push will be required to trigger a | 761 | * inode from the AIL - a further push will be required to trigger a |
762 | * buffer push. However, this allows all the dirty inodes to be pushed | 762 | * buffer push. However, this allows all the dirty inodes to be pushed |
763 | * to the buffer before it is pushed to disk. THe buffer IO completion | 763 | * to the buffer before it is pushed to disk. The buffer IO completion |
764 | * will pull th einode from the AIL, mark it clean and unlock the flush | 764 | * will pull the inode from the AIL, mark it clean and unlock the flush |
765 | * lock. | 765 | * lock. |
766 | */ | 766 | */ |
767 | (void) xfs_iflush(ip, 0); | 767 | (void) xfs_iflush(ip, SYNC_TRYLOCK); |
768 | xfs_iunlock(ip, XFS_ILOCK_SHARED); | 768 | xfs_iunlock(ip, XFS_ILOCK_SHARED); |
769 | } | 769 | } |
770 | 770 | ||
diff --git a/fs/xfs/xfs_trans_buf.c b/fs/xfs/xfs_trans_buf.c index 3bea66132334..03b3b7f85a3b 100644 --- a/fs/xfs/xfs_trans_buf.c +++ b/fs/xfs/xfs_trans_buf.c | |||
@@ -383,7 +383,8 @@ xfs_trans_read_buf( | |||
383 | bp = xfs_buf_read(target, blkno, len, flags | XBF_DONT_BLOCK); | 383 | bp = xfs_buf_read(target, blkno, len, flags | XBF_DONT_BLOCK); |
384 | if (bp == NULL) { | 384 | if (bp == NULL) { |
385 | *bpp = NULL; | 385 | *bpp = NULL; |
386 | return 0; | 386 | return (flags & XBF_TRYLOCK) ? |
387 | 0 : XFS_ERROR(ENOMEM); | ||
387 | } | 388 | } |
388 | if (XFS_BUF_GETERROR(bp) != 0) { | 389 | if (XFS_BUF_GETERROR(bp) != 0) { |
389 | XFS_BUF_SUPER_STALE(bp); | 390 | XFS_BUF_SUPER_STALE(bp); |
diff --git a/fs/xfs/xfs_vnodeops.c b/fs/xfs/xfs_vnodeops.c index 37d8146ee15b..c48b4217ec47 100644 --- a/fs/xfs/xfs_vnodeops.c +++ b/fs/xfs/xfs_vnodeops.c | |||
@@ -2831,7 +2831,8 @@ xfs_change_file_space( | |||
2831 | ip->i_d.di_flags &= ~XFS_DIFLAG_PREALLOC; | 2831 | ip->i_d.di_flags &= ~XFS_DIFLAG_PREALLOC; |
2832 | 2832 | ||
2833 | xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); | 2833 | xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); |
2834 | xfs_trans_set_sync(tp); | 2834 | if (attr_flags & XFS_ATTR_SYNC) |
2835 | xfs_trans_set_sync(tp); | ||
2835 | 2836 | ||
2836 | error = xfs_trans_commit(tp, 0); | 2837 | error = xfs_trans_commit(tp, 0); |
2837 | 2838 | ||
diff --git a/fs/xfs/xfs_vnodeops.h b/fs/xfs/xfs_vnodeops.h index f6702927eee4..3bcd23353d6c 100644 --- a/fs/xfs/xfs_vnodeops.h +++ b/fs/xfs/xfs_vnodeops.h | |||
@@ -18,6 +18,7 @@ int xfs_setattr(struct xfs_inode *ip, struct iattr *vap, int flags); | |||
18 | #define XFS_ATTR_NONBLOCK 0x02 /* return EAGAIN if operation would block */ | 18 | #define XFS_ATTR_NONBLOCK 0x02 /* return EAGAIN if operation would block */ |
19 | #define XFS_ATTR_NOLOCK 0x04 /* Don't grab any conflicting locks */ | 19 | #define XFS_ATTR_NOLOCK 0x04 /* Don't grab any conflicting locks */ |
20 | #define XFS_ATTR_NOACL 0x08 /* Don't call xfs_acl_chmod */ | 20 | #define XFS_ATTR_NOACL 0x08 /* Don't call xfs_acl_chmod */ |
21 | #define XFS_ATTR_SYNC 0x10 /* synchronous operation required */ | ||
21 | 22 | ||
22 | int xfs_readlink(struct xfs_inode *ip, char *link); | 23 | int xfs_readlink(struct xfs_inode *ip, char *link); |
23 | int xfs_release(struct xfs_inode *ip); | 24 | int xfs_release(struct xfs_inode *ip); |
diff --git a/include/linux/can/core.h b/include/linux/can/core.h index 6c507bea275f..6f70a6d3a16e 100644 --- a/include/linux/can/core.h +++ b/include/linux/can/core.h | |||
@@ -36,10 +36,10 @@ | |||
36 | * @prot: pointer to struct proto structure. | 36 | * @prot: pointer to struct proto structure. |
37 | */ | 37 | */ |
38 | struct can_proto { | 38 | struct can_proto { |
39 | int type; | 39 | int type; |
40 | int protocol; | 40 | int protocol; |
41 | struct proto_ops *ops; | 41 | const struct proto_ops *ops; |
42 | struct proto *prot; | 42 | struct proto *prot; |
43 | }; | 43 | }; |
44 | 44 | ||
45 | /* function prototypes for the CAN networklayer core (af_can.c) */ | 45 | /* function prototypes for the CAN networklayer core (af_can.c) */ |
@@ -58,5 +58,6 @@ extern void can_rx_unregister(struct net_device *dev, canid_t can_id, | |||
58 | void *data); | 58 | void *data); |
59 | 59 | ||
60 | extern int can_send(struct sk_buff *skb, int loop); | 60 | extern int can_send(struct sk_buff *skb, int loop); |
61 | extern int can_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg); | ||
61 | 62 | ||
62 | #endif /* CAN_CORE_H */ | 63 | #endif /* CAN_CORE_H */ |
diff --git a/include/linux/ethtool.h b/include/linux/ethtool.h index ae757bcf1280..c8fcbdd2b0e7 100644 --- a/include/linux/ethtool.h +++ b/include/linux/ethtool.h | |||
@@ -680,6 +680,7 @@ int ethtool_op_set_ufo(struct net_device *dev, u32 data); | |||
680 | u32 ethtool_op_get_flags(struct net_device *dev); | 680 | u32 ethtool_op_get_flags(struct net_device *dev); |
681 | int ethtool_op_set_flags(struct net_device *dev, u32 data, u32 supported); | 681 | int ethtool_op_set_flags(struct net_device *dev, u32 data, u32 supported); |
682 | void ethtool_ntuple_flush(struct net_device *dev); | 682 | void ethtool_ntuple_flush(struct net_device *dev); |
683 | bool ethtool_invalid_flags(struct net_device *dev, u32 data, u32 supported); | ||
683 | 684 | ||
684 | /** | 685 | /** |
685 | * ðtool_ops - Alter and report network device settings | 686 | * ðtool_ops - Alter and report network device settings |
diff --git a/include/linux/fs.h b/include/linux/fs.h index b677bd77f2d6..52f283c1edb2 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h | |||
@@ -357,6 +357,8 @@ struct inodes_stat_t { | |||
357 | #define FS_TOPDIR_FL 0x00020000 /* Top of directory hierarchies*/ | 357 | #define FS_TOPDIR_FL 0x00020000 /* Top of directory hierarchies*/ |
358 | #define FS_EXTENT_FL 0x00080000 /* Extents */ | 358 | #define FS_EXTENT_FL 0x00080000 /* Extents */ |
359 | #define FS_DIRECTIO_FL 0x00100000 /* Use direct i/o */ | 359 | #define FS_DIRECTIO_FL 0x00100000 /* Use direct i/o */ |
360 | #define FS_NOCOW_FL 0x00800000 /* Do not cow file */ | ||
361 | #define FS_COW_FL 0x02000000 /* Cow file */ | ||
360 | #define FS_RESERVED_FL 0x80000000 /* reserved for ext2 lib */ | 362 | #define FS_RESERVED_FL 0x80000000 /* reserved for ext2 lib */ |
361 | 363 | ||
362 | #define FS_FL_USER_VISIBLE 0x0003DFFF /* User visible flags */ | 364 | #define FS_FL_USER_VISIBLE 0x0003DFFF /* User visible flags */ |
diff --git a/include/linux/input.h b/include/linux/input.h index 056ae8a5bd9b..f3a7794a18c4 100644 --- a/include/linux/input.h +++ b/include/linux/input.h | |||
@@ -664,6 +664,13 @@ struct input_keymap_entry { | |||
664 | #define KEY_TOUCHPAD_ON 0x213 | 664 | #define KEY_TOUCHPAD_ON 0x213 |
665 | #define KEY_TOUCHPAD_OFF 0x214 | 665 | #define KEY_TOUCHPAD_OFF 0x214 |
666 | 666 | ||
667 | #define KEY_CAMERA_ZOOMIN 0x215 | ||
668 | #define KEY_CAMERA_ZOOMOUT 0x216 | ||
669 | #define KEY_CAMERA_UP 0x217 | ||
670 | #define KEY_CAMERA_DOWN 0x218 | ||
671 | #define KEY_CAMERA_LEFT 0x219 | ||
672 | #define KEY_CAMERA_RIGHT 0x21a | ||
673 | |||
667 | #define BTN_TRIGGER_HAPPY 0x2c0 | 674 | #define BTN_TRIGGER_HAPPY 0x2c0 |
668 | #define BTN_TRIGGER_HAPPY1 0x2c0 | 675 | #define BTN_TRIGGER_HAPPY1 0x2c0 |
669 | #define BTN_TRIGGER_HAPPY2 0x2c1 | 676 | #define BTN_TRIGGER_HAPPY2 0x2c1 |
diff --git a/include/linux/irq.h b/include/linux/irq.h index 5d876c9b3a3d..2a375a72ce3c 100644 --- a/include/linux/irq.h +++ b/include/linux/irq.h | |||
@@ -92,18 +92,6 @@ enum { | |||
92 | IRQ_NO_BALANCING = (1 << 13), | 92 | IRQ_NO_BALANCING = (1 << 13), |
93 | IRQ_MOVE_PCNTXT = (1 << 14), | 93 | IRQ_MOVE_PCNTXT = (1 << 14), |
94 | IRQ_NESTED_THREAD = (1 << 15), | 94 | IRQ_NESTED_THREAD = (1 << 15), |
95 | |||
96 | #ifndef CONFIG_GENERIC_HARDIRQS_NO_COMPAT | ||
97 | IRQ_INPROGRESS = (1 << 16), | ||
98 | IRQ_REPLAY = (1 << 17), | ||
99 | IRQ_WAITING = (1 << 18), | ||
100 | IRQ_DISABLED = (1 << 19), | ||
101 | IRQ_PENDING = (1 << 20), | ||
102 | IRQ_MASKED = (1 << 21), | ||
103 | IRQ_MOVE_PENDING = (1 << 22), | ||
104 | IRQ_AFFINITY_SET = (1 << 23), | ||
105 | IRQ_WAKEUP = (1 << 24), | ||
106 | #endif | ||
107 | }; | 95 | }; |
108 | 96 | ||
109 | #define IRQF_MODIFY_MASK \ | 97 | #define IRQF_MODIFY_MASK \ |
@@ -135,7 +123,7 @@ struct msi_desc; | |||
135 | * struct irq_data - per irq and irq chip data passed down to chip functions | 123 | * struct irq_data - per irq and irq chip data passed down to chip functions |
136 | * @irq: interrupt number | 124 | * @irq: interrupt number |
137 | * @node: node index useful for balancing | 125 | * @node: node index useful for balancing |
138 | * @state_use_accessor: status information for irq chip functions. | 126 | * @state_use_accessors: status information for irq chip functions. |
139 | * Use accessor functions to deal with it | 127 | * Use accessor functions to deal with it |
140 | * @chip: low level interrupt hardware access | 128 | * @chip: low level interrupt hardware access |
141 | * @handler_data: per-IRQ data for the irq_chip methods | 129 | * @handler_data: per-IRQ data for the irq_chip methods |
@@ -174,6 +162,9 @@ struct irq_data { | |||
174 | * from suspend | 162 | * from suspend |
175 | * IRDQ_MOVE_PCNTXT - Interrupt can be moved in process | 163 | * IRDQ_MOVE_PCNTXT - Interrupt can be moved in process |
176 | * context | 164 | * context |
165 | * IRQD_IRQ_DISABLED - Disabled state of the interrupt | ||
166 | * IRQD_IRQ_MASKED - Masked state of the interrupt | ||
167 | * IRQD_IRQ_INPROGRESS - In progress state of the interrupt | ||
177 | */ | 168 | */ |
178 | enum { | 169 | enum { |
179 | IRQD_TRIGGER_MASK = 0xf, | 170 | IRQD_TRIGGER_MASK = 0xf, |
@@ -184,6 +175,9 @@ enum { | |||
184 | IRQD_LEVEL = (1 << 13), | 175 | IRQD_LEVEL = (1 << 13), |
185 | IRQD_WAKEUP_STATE = (1 << 14), | 176 | IRQD_WAKEUP_STATE = (1 << 14), |
186 | IRQD_MOVE_PCNTXT = (1 << 15), | 177 | IRQD_MOVE_PCNTXT = (1 << 15), |
178 | IRQD_IRQ_DISABLED = (1 << 16), | ||
179 | IRQD_IRQ_MASKED = (1 << 17), | ||
180 | IRQD_IRQ_INPROGRESS = (1 << 18), | ||
187 | }; | 181 | }; |
188 | 182 | ||
189 | static inline bool irqd_is_setaffinity_pending(struct irq_data *d) | 183 | static inline bool irqd_is_setaffinity_pending(struct irq_data *d) |
@@ -206,6 +200,11 @@ static inline bool irqd_affinity_was_set(struct irq_data *d) | |||
206 | return d->state_use_accessors & IRQD_AFFINITY_SET; | 200 | return d->state_use_accessors & IRQD_AFFINITY_SET; |
207 | } | 201 | } |
208 | 202 | ||
203 | static inline void irqd_mark_affinity_was_set(struct irq_data *d) | ||
204 | { | ||
205 | d->state_use_accessors |= IRQD_AFFINITY_SET; | ||
206 | } | ||
207 | |||
209 | static inline u32 irqd_get_trigger_type(struct irq_data *d) | 208 | static inline u32 irqd_get_trigger_type(struct irq_data *d) |
210 | { | 209 | { |
211 | return d->state_use_accessors & IRQD_TRIGGER_MASK; | 210 | return d->state_use_accessors & IRQD_TRIGGER_MASK; |
@@ -235,6 +234,36 @@ static inline bool irqd_can_move_in_process_context(struct irq_data *d) | |||
235 | return d->state_use_accessors & IRQD_MOVE_PCNTXT; | 234 | return d->state_use_accessors & IRQD_MOVE_PCNTXT; |
236 | } | 235 | } |
237 | 236 | ||
237 | static inline bool irqd_irq_disabled(struct irq_data *d) | ||
238 | { | ||
239 | return d->state_use_accessors & IRQD_IRQ_DISABLED; | ||
240 | } | ||
241 | |||
242 | static inline bool irqd_irq_masked(struct irq_data *d) | ||
243 | { | ||
244 | return d->state_use_accessors & IRQD_IRQ_MASKED; | ||
245 | } | ||
246 | |||
247 | static inline bool irqd_irq_inprogress(struct irq_data *d) | ||
248 | { | ||
249 | return d->state_use_accessors & IRQD_IRQ_INPROGRESS; | ||
250 | } | ||
251 | |||
252 | /* | ||
253 | * Functions for chained handlers which can be enabled/disabled by the | ||
254 | * standard disable_irq/enable_irq calls. Must be called with | ||
255 | * irq_desc->lock held. | ||
256 | */ | ||
257 | static inline void irqd_set_chained_irq_inprogress(struct irq_data *d) | ||
258 | { | ||
259 | d->state_use_accessors |= IRQD_IRQ_INPROGRESS; | ||
260 | } | ||
261 | |||
262 | static inline void irqd_clr_chained_irq_inprogress(struct irq_data *d) | ||
263 | { | ||
264 | d->state_use_accessors &= ~IRQD_IRQ_INPROGRESS; | ||
265 | } | ||
266 | |||
238 | /** | 267 | /** |
239 | * struct irq_chip - hardware interrupt chip descriptor | 268 | * struct irq_chip - hardware interrupt chip descriptor |
240 | * | 269 | * |
@@ -271,6 +300,8 @@ static inline bool irqd_can_move_in_process_context(struct irq_data *d) | |||
271 | * @irq_set_wake: enable/disable power-management wake-on of an IRQ | 300 | * @irq_set_wake: enable/disable power-management wake-on of an IRQ |
272 | * @irq_bus_lock: function to lock access to slow bus (i2c) chips | 301 | * @irq_bus_lock: function to lock access to slow bus (i2c) chips |
273 | * @irq_bus_sync_unlock:function to sync and unlock slow bus (i2c) chips | 302 | * @irq_bus_sync_unlock:function to sync and unlock slow bus (i2c) chips |
303 | * @irq_cpu_online: configure an interrupt source for a secondary CPU | ||
304 | * @irq_cpu_offline: un-configure an interrupt source for a secondary CPU | ||
274 | * @irq_print_chip: optional to print special chip info in show_interrupts | 305 | * @irq_print_chip: optional to print special chip info in show_interrupts |
275 | * @flags: chip specific flags | 306 | * @flags: chip specific flags |
276 | * | 307 | * |
@@ -278,28 +309,6 @@ static inline bool irqd_can_move_in_process_context(struct irq_data *d) | |||
278 | */ | 309 | */ |
279 | struct irq_chip { | 310 | struct irq_chip { |
280 | const char *name; | 311 | const char *name; |
281 | #ifndef CONFIG_GENERIC_HARDIRQS_NO_DEPRECATED | ||
282 | unsigned int (*startup)(unsigned int irq); | ||
283 | void (*shutdown)(unsigned int irq); | ||
284 | void (*enable)(unsigned int irq); | ||
285 | void (*disable)(unsigned int irq); | ||
286 | |||
287 | void (*ack)(unsigned int irq); | ||
288 | void (*mask)(unsigned int irq); | ||
289 | void (*mask_ack)(unsigned int irq); | ||
290 | void (*unmask)(unsigned int irq); | ||
291 | void (*eoi)(unsigned int irq); | ||
292 | |||
293 | void (*end)(unsigned int irq); | ||
294 | int (*set_affinity)(unsigned int irq, | ||
295 | const struct cpumask *dest); | ||
296 | int (*retrigger)(unsigned int irq); | ||
297 | int (*set_type)(unsigned int irq, unsigned int flow_type); | ||
298 | int (*set_wake)(unsigned int irq, unsigned int on); | ||
299 | |||
300 | void (*bus_lock)(unsigned int irq); | ||
301 | void (*bus_sync_unlock)(unsigned int irq); | ||
302 | #endif | ||
303 | unsigned int (*irq_startup)(struct irq_data *data); | 312 | unsigned int (*irq_startup)(struct irq_data *data); |
304 | void (*irq_shutdown)(struct irq_data *data); | 313 | void (*irq_shutdown)(struct irq_data *data); |
305 | void (*irq_enable)(struct irq_data *data); | 314 | void (*irq_enable)(struct irq_data *data); |
@@ -319,6 +328,9 @@ struct irq_chip { | |||
319 | void (*irq_bus_lock)(struct irq_data *data); | 328 | void (*irq_bus_lock)(struct irq_data *data); |
320 | void (*irq_bus_sync_unlock)(struct irq_data *data); | 329 | void (*irq_bus_sync_unlock)(struct irq_data *data); |
321 | 330 | ||
331 | void (*irq_cpu_online)(struct irq_data *data); | ||
332 | void (*irq_cpu_offline)(struct irq_data *data); | ||
333 | |||
322 | void (*irq_print_chip)(struct irq_data *data, struct seq_file *p); | 334 | void (*irq_print_chip)(struct irq_data *data, struct seq_file *p); |
323 | 335 | ||
324 | unsigned long flags; | 336 | unsigned long flags; |
@@ -335,11 +347,14 @@ struct irq_chip { | |||
335 | * IRQCHIP_SET_TYPE_MASKED: Mask before calling chip.irq_set_type() | 347 | * IRQCHIP_SET_TYPE_MASKED: Mask before calling chip.irq_set_type() |
336 | * IRQCHIP_EOI_IF_HANDLED: Only issue irq_eoi() when irq was handled | 348 | * IRQCHIP_EOI_IF_HANDLED: Only issue irq_eoi() when irq was handled |
337 | * IRQCHIP_MASK_ON_SUSPEND: Mask non wake irqs in the suspend path | 349 | * IRQCHIP_MASK_ON_SUSPEND: Mask non wake irqs in the suspend path |
350 | * IRQCHIP_ONOFFLINE_ENABLED: Only call irq_on/off_line callbacks | ||
351 | * when irq enabled | ||
338 | */ | 352 | */ |
339 | enum { | 353 | enum { |
340 | IRQCHIP_SET_TYPE_MASKED = (1 << 0), | 354 | IRQCHIP_SET_TYPE_MASKED = (1 << 0), |
341 | IRQCHIP_EOI_IF_HANDLED = (1 << 1), | 355 | IRQCHIP_EOI_IF_HANDLED = (1 << 1), |
342 | IRQCHIP_MASK_ON_SUSPEND = (1 << 2), | 356 | IRQCHIP_MASK_ON_SUSPEND = (1 << 2), |
357 | IRQCHIP_ONOFFLINE_ENABLED = (1 << 3), | ||
343 | }; | 358 | }; |
344 | 359 | ||
345 | /* This include will go away once we isolated irq_desc usage to core code */ | 360 | /* This include will go away once we isolated irq_desc usage to core code */ |
@@ -364,25 +379,22 @@ struct irqaction; | |||
364 | extern int setup_irq(unsigned int irq, struct irqaction *new); | 379 | extern int setup_irq(unsigned int irq, struct irqaction *new); |
365 | extern void remove_irq(unsigned int irq, struct irqaction *act); | 380 | extern void remove_irq(unsigned int irq, struct irqaction *act); |
366 | 381 | ||
382 | extern void irq_cpu_online(void); | ||
383 | extern void irq_cpu_offline(void); | ||
384 | extern int __irq_set_affinity_locked(struct irq_data *data, const struct cpumask *cpumask); | ||
385 | |||
367 | #ifdef CONFIG_GENERIC_HARDIRQS | 386 | #ifdef CONFIG_GENERIC_HARDIRQS |
368 | 387 | ||
369 | #if defined(CONFIG_SMP) && defined(CONFIG_GENERIC_PENDING_IRQ) | 388 | #if defined(CONFIG_SMP) && defined(CONFIG_GENERIC_PENDING_IRQ) |
370 | void move_native_irq(int irq); | ||
371 | void move_masked_irq(int irq); | ||
372 | void irq_move_irq(struct irq_data *data); | 389 | void irq_move_irq(struct irq_data *data); |
373 | void irq_move_masked_irq(struct irq_data *data); | 390 | void irq_move_masked_irq(struct irq_data *data); |
374 | #else | 391 | #else |
375 | static inline void move_native_irq(int irq) { } | ||
376 | static inline void move_masked_irq(int irq) { } | ||
377 | static inline void irq_move_irq(struct irq_data *data) { } | 392 | static inline void irq_move_irq(struct irq_data *data) { } |
378 | static inline void irq_move_masked_irq(struct irq_data *data) { } | 393 | static inline void irq_move_masked_irq(struct irq_data *data) { } |
379 | #endif | 394 | #endif |
380 | 395 | ||
381 | extern int no_irq_affinity; | 396 | extern int no_irq_affinity; |
382 | 397 | ||
383 | /* Handle irq action chains: */ | ||
384 | extern irqreturn_t handle_IRQ_event(unsigned int irq, struct irqaction *action); | ||
385 | |||
386 | /* | 398 | /* |
387 | * Built-in IRQ handlers for various IRQ types, | 399 | * Built-in IRQ handlers for various IRQ types, |
388 | * callable via desc->handle_irq() | 400 | * callable via desc->handle_irq() |
@@ -390,6 +402,7 @@ extern irqreturn_t handle_IRQ_event(unsigned int irq, struct irqaction *action); | |||
390 | extern void handle_level_irq(unsigned int irq, struct irq_desc *desc); | 402 | extern void handle_level_irq(unsigned int irq, struct irq_desc *desc); |
391 | extern void handle_fasteoi_irq(unsigned int irq, struct irq_desc *desc); | 403 | extern void handle_fasteoi_irq(unsigned int irq, struct irq_desc *desc); |
392 | extern void handle_edge_irq(unsigned int irq, struct irq_desc *desc); | 404 | extern void handle_edge_irq(unsigned int irq, struct irq_desc *desc); |
405 | extern void handle_edge_eoi_irq(unsigned int irq, struct irq_desc *desc); | ||
393 | extern void handle_simple_irq(unsigned int irq, struct irq_desc *desc); | 406 | extern void handle_simple_irq(unsigned int irq, struct irq_desc *desc); |
394 | extern void handle_percpu_irq(unsigned int irq, struct irq_desc *desc); | 407 | extern void handle_percpu_irq(unsigned int irq, struct irq_desc *desc); |
395 | extern void handle_bad_irq(unsigned int irq, struct irq_desc *desc); | 408 | extern void handle_bad_irq(unsigned int irq, struct irq_desc *desc); |
@@ -538,89 +551,6 @@ static inline struct msi_desc *irq_data_get_msi(struct irq_data *d) | |||
538 | return d->msi_desc; | 551 | return d->msi_desc; |
539 | } | 552 | } |
540 | 553 | ||
541 | #ifndef CONFIG_GENERIC_HARDIRQS_NO_COMPAT | ||
542 | /* Please do not use: Use the replacement functions instead */ | ||
543 | static inline int set_irq_chip(unsigned int irq, struct irq_chip *chip) | ||
544 | { | ||
545 | return irq_set_chip(irq, chip); | ||
546 | } | ||
547 | static inline int set_irq_data(unsigned int irq, void *data) | ||
548 | { | ||
549 | return irq_set_handler_data(irq, data); | ||
550 | } | ||
551 | static inline int set_irq_chip_data(unsigned int irq, void *data) | ||
552 | { | ||
553 | return irq_set_chip_data(irq, data); | ||
554 | } | ||
555 | static inline int set_irq_type(unsigned int irq, unsigned int type) | ||
556 | { | ||
557 | return irq_set_irq_type(irq, type); | ||
558 | } | ||
559 | static inline int set_irq_msi(unsigned int irq, struct msi_desc *entry) | ||
560 | { | ||
561 | return irq_set_msi_desc(irq, entry); | ||
562 | } | ||
563 | static inline struct irq_chip *get_irq_chip(unsigned int irq) | ||
564 | { | ||
565 | return irq_get_chip(irq); | ||
566 | } | ||
567 | static inline void *get_irq_chip_data(unsigned int irq) | ||
568 | { | ||
569 | return irq_get_chip_data(irq); | ||
570 | } | ||
571 | static inline void *get_irq_data(unsigned int irq) | ||
572 | { | ||
573 | return irq_get_handler_data(irq); | ||
574 | } | ||
575 | static inline void *irq_data_get_irq_data(struct irq_data *d) | ||
576 | { | ||
577 | return irq_data_get_irq_handler_data(d); | ||
578 | } | ||
579 | static inline struct msi_desc *get_irq_msi(unsigned int irq) | ||
580 | { | ||
581 | return irq_get_msi_desc(irq); | ||
582 | } | ||
583 | static inline void set_irq_noprobe(unsigned int irq) | ||
584 | { | ||
585 | irq_set_noprobe(irq); | ||
586 | } | ||
587 | static inline void set_irq_probe(unsigned int irq) | ||
588 | { | ||
589 | irq_set_probe(irq); | ||
590 | } | ||
591 | static inline void set_irq_nested_thread(unsigned int irq, int nest) | ||
592 | { | ||
593 | irq_set_nested_thread(irq, nest); | ||
594 | } | ||
595 | static inline void | ||
596 | set_irq_chip_and_handler_name(unsigned int irq, struct irq_chip *chip, | ||
597 | irq_flow_handler_t handle, const char *name) | ||
598 | { | ||
599 | irq_set_chip_and_handler_name(irq, chip, handle, name); | ||
600 | } | ||
601 | static inline void | ||
602 | set_irq_chip_and_handler(unsigned int irq, struct irq_chip *chip, | ||
603 | irq_flow_handler_t handle) | ||
604 | { | ||
605 | irq_set_chip_and_handler(irq, chip, handle); | ||
606 | } | ||
607 | static inline void | ||
608 | __set_irq_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained, | ||
609 | const char *name) | ||
610 | { | ||
611 | __irq_set_handler(irq, handle, is_chained, name); | ||
612 | } | ||
613 | static inline void set_irq_handler(unsigned int irq, irq_flow_handler_t handle) | ||
614 | { | ||
615 | irq_set_handler(irq, handle); | ||
616 | } | ||
617 | static inline void | ||
618 | set_irq_chained_handler(unsigned int irq, irq_flow_handler_t handle) | ||
619 | { | ||
620 | irq_set_chained_handler(irq, handle); | ||
621 | } | ||
622 | #endif | ||
623 | |||
624 | int irq_alloc_descs(int irq, unsigned int from, unsigned int cnt, int node); | 554 | int irq_alloc_descs(int irq, unsigned int from, unsigned int cnt, int node); |
625 | void irq_free_descs(unsigned int irq, unsigned int cnt); | 555 | void irq_free_descs(unsigned int irq, unsigned int cnt); |
626 | int irq_reserve_irqs(unsigned int from, unsigned int cnt); | 556 | int irq_reserve_irqs(unsigned int from, unsigned int cnt); |
diff --git a/include/linux/irqdesc.h b/include/linux/irqdesc.h index 15e6c3905f41..a082905b5ebe 100644 --- a/include/linux/irqdesc.h +++ b/include/linux/irqdesc.h | |||
@@ -35,32 +35,7 @@ struct timer_rand_state; | |||
35 | * @name: flow handler name for /proc/interrupts output | 35 | * @name: flow handler name for /proc/interrupts output |
36 | */ | 36 | */ |
37 | struct irq_desc { | 37 | struct irq_desc { |
38 | |||
39 | #ifdef CONFIG_GENERIC_HARDIRQS_NO_DEPRECATED | ||
40 | struct irq_data irq_data; | 38 | struct irq_data irq_data; |
41 | #else | ||
42 | /* | ||
43 | * This union will go away, once we fixed the direct access to | ||
44 | * irq_desc all over the place. The direct fields are a 1:1 | ||
45 | * overlay of irq_data. | ||
46 | */ | ||
47 | union { | ||
48 | struct irq_data irq_data; | ||
49 | struct { | ||
50 | unsigned int irq; | ||
51 | unsigned int node; | ||
52 | unsigned int pad_do_not_even_think_about_it; | ||
53 | struct irq_chip *chip; | ||
54 | void *handler_data; | ||
55 | void *chip_data; | ||
56 | struct msi_desc *msi_desc; | ||
57 | #ifdef CONFIG_SMP | ||
58 | cpumask_var_t affinity; | ||
59 | #endif | ||
60 | }; | ||
61 | }; | ||
62 | #endif | ||
63 | |||
64 | struct timer_rand_state *timer_rand_state; | 39 | struct timer_rand_state *timer_rand_state; |
65 | unsigned int __percpu *kstat_irqs; | 40 | unsigned int __percpu *kstat_irqs; |
66 | irq_flow_handler_t handle_irq; | 41 | irq_flow_handler_t handle_irq; |
@@ -68,11 +43,7 @@ struct irq_desc { | |||
68 | irq_preflow_handler_t preflow_handler; | 43 | irq_preflow_handler_t preflow_handler; |
69 | #endif | 44 | #endif |
70 | struct irqaction *action; /* IRQ action list */ | 45 | struct irqaction *action; /* IRQ action list */ |
71 | #ifdef CONFIG_GENERIC_HARDIRQS_NO_COMPAT | ||
72 | unsigned int status_use_accessors; | 46 | unsigned int status_use_accessors; |
73 | #else | ||
74 | unsigned int status; /* IRQ status */ | ||
75 | #endif | ||
76 | unsigned int core_internal_state__do_not_mess_with_it; | 47 | unsigned int core_internal_state__do_not_mess_with_it; |
77 | unsigned int depth; /* nested irq disables */ | 48 | unsigned int depth; /* nested irq disables */ |
78 | unsigned int wake_depth; /* nested wake enables */ | 49 | unsigned int wake_depth; /* nested wake enables */ |
@@ -127,27 +98,6 @@ static inline struct msi_desc *irq_desc_get_msi_desc(struct irq_desc *desc) | |||
127 | return desc->irq_data.msi_desc; | 98 | return desc->irq_data.msi_desc; |
128 | } | 99 | } |
129 | 100 | ||
130 | #ifndef CONFIG_GENERIC_HARDIRQS_NO_COMPAT | ||
131 | static inline struct irq_chip *get_irq_desc_chip(struct irq_desc *desc) | ||
132 | { | ||
133 | return irq_desc_get_chip(desc); | ||
134 | } | ||
135 | static inline void *get_irq_desc_data(struct irq_desc *desc) | ||
136 | { | ||
137 | return irq_desc_get_handler_data(desc); | ||
138 | } | ||
139 | |||
140 | static inline void *get_irq_desc_chip_data(struct irq_desc *desc) | ||
141 | { | ||
142 | return irq_desc_get_chip_data(desc); | ||
143 | } | ||
144 | |||
145 | static inline struct msi_desc *get_irq_desc_msi(struct irq_desc *desc) | ||
146 | { | ||
147 | return irq_desc_get_msi_desc(desc); | ||
148 | } | ||
149 | #endif | ||
150 | |||
151 | /* | 101 | /* |
152 | * Architectures call this to let the generic IRQ layer | 102 | * Architectures call this to let the generic IRQ layer |
153 | * handle an interrupt. If the descriptor is attached to an | 103 | * handle an interrupt. If the descriptor is attached to an |
@@ -194,21 +144,13 @@ __irq_set_chip_handler_name_locked(unsigned int irq, struct irq_chip *chip, | |||
194 | desc->name = name; | 144 | desc->name = name; |
195 | } | 145 | } |
196 | 146 | ||
197 | #ifndef CONFIG_GENERIC_HARDIRQS_NO_COMPAT | ||
198 | static inline void __set_irq_handler_unlocked(int irq, | ||
199 | irq_flow_handler_t handler) | ||
200 | { | ||
201 | __irq_set_handler_locked(irq, handler); | ||
202 | } | ||
203 | |||
204 | static inline int irq_balancing_disabled(unsigned int irq) | 147 | static inline int irq_balancing_disabled(unsigned int irq) |
205 | { | 148 | { |
206 | struct irq_desc *desc; | 149 | struct irq_desc *desc; |
207 | 150 | ||
208 | desc = irq_to_desc(irq); | 151 | desc = irq_to_desc(irq); |
209 | return desc->status & IRQ_NO_BALANCING_MASK; | 152 | return desc->status_use_accessors & IRQ_NO_BALANCING_MASK; |
210 | } | 153 | } |
211 | #endif | ||
212 | 154 | ||
213 | static inline void | 155 | static inline void |
214 | irq_set_lockdep_class(unsigned int irq, struct lock_class_key *class) | 156 | irq_set_lockdep_class(unsigned int irq, struct lock_class_key *class) |
diff --git a/include/linux/mfd/ab8500.h b/include/linux/mfd/ab8500.h index 6e4f77ef4d20..b31843075198 100644 --- a/include/linux/mfd/ab8500.h +++ b/include/linux/mfd/ab8500.h | |||
@@ -74,6 +74,45 @@ | |||
74 | #define AB8500_INT_ACC_DETECT_21DB_F 37 | 74 | #define AB8500_INT_ACC_DETECT_21DB_F 37 |
75 | #define AB8500_INT_ACC_DETECT_21DB_R 38 | 75 | #define AB8500_INT_ACC_DETECT_21DB_R 38 |
76 | #define AB8500_INT_GP_SW_ADC_CONV_END 39 | 76 | #define AB8500_INT_GP_SW_ADC_CONV_END 39 |
77 | #define AB8500_INT_ACC_DETECT_1DB_F 33 | ||
78 | #define AB8500_INT_ACC_DETECT_1DB_R 34 | ||
79 | #define AB8500_INT_ACC_DETECT_22DB_F 35 | ||
80 | #define AB8500_INT_ACC_DETECT_22DB_R 36 | ||
81 | #define AB8500_INT_ACC_DETECT_21DB_F 37 | ||
82 | #define AB8500_INT_ACC_DETECT_21DB_R 38 | ||
83 | #define AB8500_INT_GP_SW_ADC_CONV_END 39 | ||
84 | #define AB8500_INT_GPIO6R 40 | ||
85 | #define AB8500_INT_GPIO7R 41 | ||
86 | #define AB8500_INT_GPIO8R 42 | ||
87 | #define AB8500_INT_GPIO9R 43 | ||
88 | #define AB8500_INT_GPIO10R 44 | ||
89 | #define AB8500_INT_GPIO11R 45 | ||
90 | #define AB8500_INT_GPIO12R 46 | ||
91 | #define AB8500_INT_GPIO13R 47 | ||
92 | #define AB8500_INT_GPIO24R 48 | ||
93 | #define AB8500_INT_GPIO25R 49 | ||
94 | #define AB8500_INT_GPIO36R 50 | ||
95 | #define AB8500_INT_GPIO37R 51 | ||
96 | #define AB8500_INT_GPIO38R 52 | ||
97 | #define AB8500_INT_GPIO39R 53 | ||
98 | #define AB8500_INT_GPIO40R 54 | ||
99 | #define AB8500_INT_GPIO41R 55 | ||
100 | #define AB8500_INT_GPIO6F 56 | ||
101 | #define AB8500_INT_GPIO7F 57 | ||
102 | #define AB8500_INT_GPIO8F 58 | ||
103 | #define AB8500_INT_GPIO9F 59 | ||
104 | #define AB8500_INT_GPIO10F 60 | ||
105 | #define AB8500_INT_GPIO11F 61 | ||
106 | #define AB8500_INT_GPIO12F 62 | ||
107 | #define AB8500_INT_GPIO13F 63 | ||
108 | #define AB8500_INT_GPIO24F 64 | ||
109 | #define AB8500_INT_GPIO25F 65 | ||
110 | #define AB8500_INT_GPIO36F 66 | ||
111 | #define AB8500_INT_GPIO37F 67 | ||
112 | #define AB8500_INT_GPIO38F 68 | ||
113 | #define AB8500_INT_GPIO39F 69 | ||
114 | #define AB8500_INT_GPIO40F 70 | ||
115 | #define AB8500_INT_GPIO41F 71 | ||
77 | #define AB8500_INT_ADP_SOURCE_ERROR 72 | 116 | #define AB8500_INT_ADP_SOURCE_ERROR 72 |
78 | #define AB8500_INT_ADP_SINK_ERROR 73 | 117 | #define AB8500_INT_ADP_SINK_ERROR 73 |
79 | #define AB8500_INT_ADP_PROBE_PLUG 74 | 118 | #define AB8500_INT_ADP_PROBE_PLUG 74 |
@@ -141,6 +180,7 @@ struct ab8500 { | |||
141 | 180 | ||
142 | struct regulator_reg_init; | 181 | struct regulator_reg_init; |
143 | struct regulator_init_data; | 182 | struct regulator_init_data; |
183 | struct ab8500_gpio_platform_data; | ||
144 | 184 | ||
145 | /** | 185 | /** |
146 | * struct ab8500_platform_data - AB8500 platform data | 186 | * struct ab8500_platform_data - AB8500 platform data |
@@ -158,6 +198,7 @@ struct ab8500_platform_data { | |||
158 | struct ab8500_regulator_reg_init *regulator_reg_init; | 198 | struct ab8500_regulator_reg_init *regulator_reg_init; |
159 | int num_regulator; | 199 | int num_regulator; |
160 | struct regulator_init_data *regulator; | 200 | struct regulator_init_data *regulator; |
201 | struct ab8500_gpio_platform_data *gpio; | ||
161 | }; | 202 | }; |
162 | 203 | ||
163 | extern int __devinit ab8500_init(struct ab8500 *ab8500); | 204 | extern int __devinit ab8500_init(struct ab8500 *ab8500); |
diff --git a/include/linux/mfd/ab8500/gpio.h b/include/linux/mfd/ab8500/gpio.h new file mode 100644 index 000000000000..488a8c920a29 --- /dev/null +++ b/include/linux/mfd/ab8500/gpio.h | |||
@@ -0,0 +1,21 @@ | |||
1 | /* | ||
2 | * Copyright ST-Ericsson 2010. | ||
3 | * | ||
4 | * Author: Bibek Basu <bibek.basu@stericsson.com> | ||
5 | * Licensed under GPLv2. | ||
6 | */ | ||
7 | |||
8 | #ifndef _AB8500_GPIO_H | ||
9 | #define _AB8500_GPIO_H | ||
10 | |||
11 | /* | ||
12 | * Platform data to register a block: only the initial gpio/irq number. | ||
13 | */ | ||
14 | |||
15 | struct ab8500_gpio_platform_data { | ||
16 | int gpio_base; | ||
17 | u32 irq_base; | ||
18 | u8 config_reg[7]; | ||
19 | }; | ||
20 | |||
21 | #endif /* _AB8500_GPIO_H */ | ||
diff --git a/include/linux/mfd/sh_mobile_sdhi.h b/include/linux/mmc/sh_mobile_sdhi.h index c981b959760f..c981b959760f 100644 --- a/include/linux/mfd/sh_mobile_sdhi.h +++ b/include/linux/mmc/sh_mobile_sdhi.h | |||
diff --git a/include/linux/mmc/tmio.h b/include/linux/mmc/tmio.h new file mode 100644 index 000000000000..19490b942db0 --- /dev/null +++ b/include/linux/mmc/tmio.h | |||
@@ -0,0 +1,63 @@ | |||
1 | /* | ||
2 | * include/linux/mmc/tmio.h | ||
3 | * | ||
4 | * Copyright (C) 2007 Ian Molton | ||
5 | * Copyright (C) 2004 Ian Molton | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or modify | ||
8 | * it under the terms of the GNU General Public License version 2 as | ||
9 | * published by the Free Software Foundation. | ||
10 | * | ||
11 | * Driver for the MMC / SD / SDIO cell found in: | ||
12 | * | ||
13 | * TC6393XB TC6391XB TC6387XB T7L66XB ASIC3 | ||
14 | */ | ||
15 | #ifndef _LINUX_MMC_TMIO_H_ | ||
16 | #define _LINUX_MMC_TMIO_H_ | ||
17 | |||
18 | #define CTL_SD_CMD 0x00 | ||
19 | #define CTL_ARG_REG 0x04 | ||
20 | #define CTL_STOP_INTERNAL_ACTION 0x08 | ||
21 | #define CTL_XFER_BLK_COUNT 0xa | ||
22 | #define CTL_RESPONSE 0x0c | ||
23 | #define CTL_STATUS 0x1c | ||
24 | #define CTL_IRQ_MASK 0x20 | ||
25 | #define CTL_SD_CARD_CLK_CTL 0x24 | ||
26 | #define CTL_SD_XFER_LEN 0x26 | ||
27 | #define CTL_SD_MEM_CARD_OPT 0x28 | ||
28 | #define CTL_SD_ERROR_DETAIL_STATUS 0x2c | ||
29 | #define CTL_SD_DATA_PORT 0x30 | ||
30 | #define CTL_TRANSACTION_CTL 0x34 | ||
31 | #define CTL_SDIO_STATUS 0x36 | ||
32 | #define CTL_SDIO_IRQ_MASK 0x38 | ||
33 | #define CTL_RESET_SD 0xe0 | ||
34 | #define CTL_SDIO_REGS 0x100 | ||
35 | #define CTL_CLK_AND_WAIT_CTL 0x138 | ||
36 | #define CTL_RESET_SDIO 0x1e0 | ||
37 | |||
38 | /* Definitions for values the CTRL_STATUS register can take. */ | ||
39 | #define TMIO_STAT_CMDRESPEND 0x00000001 | ||
40 | #define TMIO_STAT_DATAEND 0x00000004 | ||
41 | #define TMIO_STAT_CARD_REMOVE 0x00000008 | ||
42 | #define TMIO_STAT_CARD_INSERT 0x00000010 | ||
43 | #define TMIO_STAT_SIGSTATE 0x00000020 | ||
44 | #define TMIO_STAT_WRPROTECT 0x00000080 | ||
45 | #define TMIO_STAT_CARD_REMOVE_A 0x00000100 | ||
46 | #define TMIO_STAT_CARD_INSERT_A 0x00000200 | ||
47 | #define TMIO_STAT_SIGSTATE_A 0x00000400 | ||
48 | #define TMIO_STAT_CMD_IDX_ERR 0x00010000 | ||
49 | #define TMIO_STAT_CRCFAIL 0x00020000 | ||
50 | #define TMIO_STAT_STOPBIT_ERR 0x00040000 | ||
51 | #define TMIO_STAT_DATATIMEOUT 0x00080000 | ||
52 | #define TMIO_STAT_RXOVERFLOW 0x00100000 | ||
53 | #define TMIO_STAT_TXUNDERRUN 0x00200000 | ||
54 | #define TMIO_STAT_CMDTIMEOUT 0x00400000 | ||
55 | #define TMIO_STAT_RXRDY 0x01000000 | ||
56 | #define TMIO_STAT_TXRQ 0x02000000 | ||
57 | #define TMIO_STAT_ILL_FUNC 0x20000000 | ||
58 | #define TMIO_STAT_CMD_BUSY 0x40000000 | ||
59 | #define TMIO_STAT_ILL_ACCESS 0x80000000 | ||
60 | |||
61 | #define TMIO_BBS 512 /* Boot block size */ | ||
62 | |||
63 | #endif /* _LINUX_MMC_TMIO_H_ */ | ||
diff --git a/include/linux/nfs_page.h b/include/linux/nfs_page.h index 8023e4e25133..91af2e49fa3a 100644 --- a/include/linux/nfs_page.h +++ b/include/linux/nfs_page.h | |||
@@ -78,7 +78,6 @@ extern struct nfs_page *nfs_create_request(struct nfs_open_context *ctx, | |||
78 | struct page *page, | 78 | struct page *page, |
79 | unsigned int offset, | 79 | unsigned int offset, |
80 | unsigned int count); | 80 | unsigned int count); |
81 | extern void nfs_clear_request(struct nfs_page *req); | ||
82 | extern void nfs_release_request(struct nfs_page *req); | 81 | extern void nfs_release_request(struct nfs_page *req); |
83 | 82 | ||
84 | 83 | ||
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 24cfa626931e..239083bfea13 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h | |||
@@ -122,8 +122,14 @@ struct sk_buff_head { | |||
122 | 122 | ||
123 | struct sk_buff; | 123 | struct sk_buff; |
124 | 124 | ||
125 | /* To allow 64K frame to be packed as single skb without frag_list */ | 125 | /* To allow 64K frame to be packed as single skb without frag_list. Since |
126 | * GRO uses frags we allocate at least 16 regardless of page size. | ||
127 | */ | ||
128 | #if (65536/PAGE_SIZE + 2) < 16 | ||
129 | #define MAX_SKB_FRAGS 16 | ||
130 | #else | ||
126 | #define MAX_SKB_FRAGS (65536/PAGE_SIZE + 2) | 131 | #define MAX_SKB_FRAGS (65536/PAGE_SIZE + 2) |
132 | #endif | ||
127 | 133 | ||
128 | typedef struct skb_frag_struct skb_frag_t; | 134 | typedef struct skb_frag_struct skb_frag_t; |
129 | 135 | ||
diff --git a/include/linux/sonypi.h b/include/linux/sonypi.h index 0e6dc3891942..c0f87da78f8a 100644 --- a/include/linux/sonypi.h +++ b/include/linux/sonypi.h | |||
@@ -40,6 +40,7 @@ | |||
40 | 40 | ||
41 | /* events the user application reading /dev/sonypi can use */ | 41 | /* events the user application reading /dev/sonypi can use */ |
42 | 42 | ||
43 | #define SONYPI_EVENT_IGNORE 0 | ||
43 | #define SONYPI_EVENT_JOGDIAL_DOWN 1 | 44 | #define SONYPI_EVENT_JOGDIAL_DOWN 1 |
44 | #define SONYPI_EVENT_JOGDIAL_UP 2 | 45 | #define SONYPI_EVENT_JOGDIAL_UP 2 |
45 | #define SONYPI_EVENT_JOGDIAL_DOWN_PRESSED 3 | 46 | #define SONYPI_EVENT_JOGDIAL_DOWN_PRESSED 3 |
diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h index 4ed6fcd6b726..9332e52ea8c2 100644 --- a/include/linux/vmalloc.h +++ b/include/linux/vmalloc.h | |||
@@ -95,10 +95,27 @@ extern struct vm_struct *remove_vm_area(const void *addr); | |||
95 | 95 | ||
96 | extern int map_vm_area(struct vm_struct *area, pgprot_t prot, | 96 | extern int map_vm_area(struct vm_struct *area, pgprot_t prot, |
97 | struct page ***pages); | 97 | struct page ***pages); |
98 | #ifdef CONFIG_MMU | ||
98 | extern int map_kernel_range_noflush(unsigned long start, unsigned long size, | 99 | extern int map_kernel_range_noflush(unsigned long start, unsigned long size, |
99 | pgprot_t prot, struct page **pages); | 100 | pgprot_t prot, struct page **pages); |
100 | extern void unmap_kernel_range_noflush(unsigned long addr, unsigned long size); | 101 | extern void unmap_kernel_range_noflush(unsigned long addr, unsigned long size); |
101 | extern void unmap_kernel_range(unsigned long addr, unsigned long size); | 102 | extern void unmap_kernel_range(unsigned long addr, unsigned long size); |
103 | #else | ||
104 | static inline int | ||
105 | map_kernel_range_noflush(unsigned long start, unsigned long size, | ||
106 | pgprot_t prot, struct page **pages) | ||
107 | { | ||
108 | return size >> PAGE_SHIFT; | ||
109 | } | ||
110 | static inline void | ||
111 | unmap_kernel_range_noflush(unsigned long addr, unsigned long size) | ||
112 | { | ||
113 | } | ||
114 | static inline void | ||
115 | unmap_kernel_range(unsigned long addr, unsigned long size) | ||
116 | { | ||
117 | } | ||
118 | #endif | ||
102 | 119 | ||
103 | /* Allocate/destroy a 'vmalloc' VM area. */ | 120 | /* Allocate/destroy a 'vmalloc' VM area. */ |
104 | extern struct vm_struct *alloc_vm_area(size_t size); | 121 | extern struct vm_struct *alloc_vm_area(size_t size); |
@@ -116,11 +133,26 @@ extern struct vm_struct *vmlist; | |||
116 | extern __init void vm_area_register_early(struct vm_struct *vm, size_t align); | 133 | extern __init void vm_area_register_early(struct vm_struct *vm, size_t align); |
117 | 134 | ||
118 | #ifdef CONFIG_SMP | 135 | #ifdef CONFIG_SMP |
136 | # ifdef CONFIG_MMU | ||
119 | struct vm_struct **pcpu_get_vm_areas(const unsigned long *offsets, | 137 | struct vm_struct **pcpu_get_vm_areas(const unsigned long *offsets, |
120 | const size_t *sizes, int nr_vms, | 138 | const size_t *sizes, int nr_vms, |
121 | size_t align); | 139 | size_t align); |
122 | 140 | ||
123 | void pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms); | 141 | void pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms); |
142 | # else | ||
143 | static inline struct vm_struct ** | ||
144 | pcpu_get_vm_areas(const unsigned long *offsets, | ||
145 | const size_t *sizes, int nr_vms, | ||
146 | size_t align) | ||
147 | { | ||
148 | return NULL; | ||
149 | } | ||
150 | |||
151 | static inline void | ||
152 | pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms) | ||
153 | { | ||
154 | } | ||
155 | # endif | ||
124 | #endif | 156 | #endif |
125 | 157 | ||
126 | #endif /* _LINUX_VMALLOC_H */ | 158 | #endif /* _LINUX_VMALLOC_H */ |
diff --git a/include/net/dst.h b/include/net/dst.h index 2a46cbaef92d..75b95df4afe7 100644 --- a/include/net/dst.h +++ b/include/net/dst.h | |||
@@ -345,7 +345,7 @@ static inline void skb_tunnel_rx(struct sk_buff *skb, struct net_device *dev) | |||
345 | 345 | ||
346 | static inline struct dst_entry *skb_dst_pop(struct sk_buff *skb) | 346 | static inline struct dst_entry *skb_dst_pop(struct sk_buff *skb) |
347 | { | 347 | { |
348 | struct dst_entry *child = skb_dst(skb)->child; | 348 | struct dst_entry *child = dst_clone(skb_dst(skb)->child); |
349 | 349 | ||
350 | skb_dst_drop(skb); | 350 | skb_dst_drop(skb); |
351 | return child; | 351 | return child; |
diff --git a/include/net/rose.h b/include/net/rose.h index 5ba9f02731eb..555dd198aab7 100644 --- a/include/net/rose.h +++ b/include/net/rose.h | |||
@@ -14,6 +14,12 @@ | |||
14 | 14 | ||
15 | #define ROSE_MIN_LEN 3 | 15 | #define ROSE_MIN_LEN 3 |
16 | 16 | ||
17 | #define ROSE_CALL_REQ_ADDR_LEN_OFF 3 | ||
18 | #define ROSE_CALL_REQ_ADDR_LEN_VAL 0xAA /* each address is 10 digits */ | ||
19 | #define ROSE_CALL_REQ_DEST_ADDR_OFF 4 | ||
20 | #define ROSE_CALL_REQ_SRC_ADDR_OFF 9 | ||
21 | #define ROSE_CALL_REQ_FACILITIES_OFF 14 | ||
22 | |||
17 | #define ROSE_GFI 0x10 | 23 | #define ROSE_GFI 0x10 |
18 | #define ROSE_Q_BIT 0x80 | 24 | #define ROSE_Q_BIT 0x80 |
19 | #define ROSE_D_BIT 0x40 | 25 | #define ROSE_D_BIT 0x40 |
@@ -214,7 +220,7 @@ extern void rose_requeue_frames(struct sock *); | |||
214 | extern int rose_validate_nr(struct sock *, unsigned short); | 220 | extern int rose_validate_nr(struct sock *, unsigned short); |
215 | extern void rose_write_internal(struct sock *, int); | 221 | extern void rose_write_internal(struct sock *, int); |
216 | extern int rose_decode(struct sk_buff *, int *, int *, int *, int *, int *); | 222 | extern int rose_decode(struct sk_buff *, int *, int *, int *, int *, int *); |
217 | extern int rose_parse_facilities(unsigned char *, struct rose_facilities_struct *); | 223 | extern int rose_parse_facilities(unsigned char *, unsigned int, struct rose_facilities_struct *); |
218 | extern void rose_disconnect(struct sock *, int, int, int); | 224 | extern void rose_disconnect(struct sock *, int, int, int); |
219 | 225 | ||
220 | /* rose_timer.c */ | 226 | /* rose_timer.c */ |
diff --git a/include/net/xfrm.h b/include/net/xfrm.h index cffa5dc66449..6ae4bc5ce8a7 100644 --- a/include/net/xfrm.h +++ b/include/net/xfrm.h | |||
@@ -1601,6 +1601,28 @@ static inline int xfrm_replay_state_esn_len(struct xfrm_replay_state_esn *replay | |||
1601 | } | 1601 | } |
1602 | 1602 | ||
1603 | #ifdef CONFIG_XFRM_MIGRATE | 1603 | #ifdef CONFIG_XFRM_MIGRATE |
1604 | static inline int xfrm_replay_clone(struct xfrm_state *x, | ||
1605 | struct xfrm_state *orig) | ||
1606 | { | ||
1607 | x->replay_esn = kzalloc(xfrm_replay_state_esn_len(orig->replay_esn), | ||
1608 | GFP_KERNEL); | ||
1609 | if (!x->replay_esn) | ||
1610 | return -ENOMEM; | ||
1611 | |||
1612 | x->replay_esn->bmp_len = orig->replay_esn->bmp_len; | ||
1613 | x->replay_esn->replay_window = orig->replay_esn->replay_window; | ||
1614 | |||
1615 | x->preplay_esn = kmemdup(x->replay_esn, | ||
1616 | xfrm_replay_state_esn_len(x->replay_esn), | ||
1617 | GFP_KERNEL); | ||
1618 | if (!x->preplay_esn) { | ||
1619 | kfree(x->replay_esn); | ||
1620 | return -ENOMEM; | ||
1621 | } | ||
1622 | |||
1623 | return 0; | ||
1624 | } | ||
1625 | |||
1604 | static inline struct xfrm_algo *xfrm_algo_clone(struct xfrm_algo *orig) | 1626 | static inline struct xfrm_algo *xfrm_algo_clone(struct xfrm_algo *orig) |
1605 | { | 1627 | { |
1606 | return kmemdup(orig, xfrm_alg_len(orig), GFP_KERNEL); | 1628 | return kmemdup(orig, xfrm_alg_len(orig), GFP_KERNEL); |
diff --git a/include/trace/events/btrfs.h b/include/trace/events/btrfs.h new file mode 100644 index 000000000000..f445cff66ab7 --- /dev/null +++ b/include/trace/events/btrfs.h | |||
@@ -0,0 +1,667 @@ | |||
1 | #undef TRACE_SYSTEM | ||
2 | #define TRACE_SYSTEM btrfs | ||
3 | |||
4 | #if !defined(_TRACE_BTRFS_H) || defined(TRACE_HEADER_MULTI_READ) | ||
5 | #define _TRACE_BTRFS_H | ||
6 | |||
7 | #include <linux/writeback.h> | ||
8 | #include <linux/tracepoint.h> | ||
9 | |||
10 | struct btrfs_root; | ||
11 | struct btrfs_fs_info; | ||
12 | struct btrfs_inode; | ||
13 | struct extent_map; | ||
14 | struct btrfs_ordered_extent; | ||
15 | struct btrfs_delayed_ref_node; | ||
16 | struct btrfs_delayed_tree_ref; | ||
17 | struct btrfs_delayed_data_ref; | ||
18 | struct btrfs_delayed_ref_head; | ||
19 | struct map_lookup; | ||
20 | struct extent_buffer; | ||
21 | |||
22 | #define show_ref_type(type) \ | ||
23 | __print_symbolic(type, \ | ||
24 | { BTRFS_TREE_BLOCK_REF_KEY, "TREE_BLOCK_REF" }, \ | ||
25 | { BTRFS_EXTENT_DATA_REF_KEY, "EXTENT_DATA_REF" }, \ | ||
26 | { BTRFS_EXTENT_REF_V0_KEY, "EXTENT_REF_V0" }, \ | ||
27 | { BTRFS_SHARED_BLOCK_REF_KEY, "SHARED_BLOCK_REF" }, \ | ||
28 | { BTRFS_SHARED_DATA_REF_KEY, "SHARED_DATA_REF" }) | ||
29 | |||
30 | #define __show_root_type(obj) \ | ||
31 | __print_symbolic(obj, \ | ||
32 | { BTRFS_ROOT_TREE_OBJECTID, "ROOT_TREE" }, \ | ||
33 | { BTRFS_EXTENT_TREE_OBJECTID, "EXTENT_TREE" }, \ | ||
34 | { BTRFS_CHUNK_TREE_OBJECTID, "CHUNK_TREE" }, \ | ||
35 | { BTRFS_DEV_TREE_OBJECTID, "DEV_TREE" }, \ | ||
36 | { BTRFS_FS_TREE_OBJECTID, "FS_TREE" }, \ | ||
37 | { BTRFS_ROOT_TREE_DIR_OBJECTID, "ROOT_TREE_DIR" }, \ | ||
38 | { BTRFS_CSUM_TREE_OBJECTID, "CSUM_TREE" }, \ | ||
39 | { BTRFS_TREE_LOG_OBJECTID, "TREE_LOG" }, \ | ||
40 | { BTRFS_TREE_RELOC_OBJECTID, "TREE_RELOC" }, \ | ||
41 | { BTRFS_DATA_RELOC_TREE_OBJECTID, "DATA_RELOC_TREE" }) | ||
42 | |||
43 | #define show_root_type(obj) \ | ||
44 | obj, ((obj >= BTRFS_DATA_RELOC_TREE_OBJECTID) || \ | ||
45 | (obj <= BTRFS_CSUM_TREE_OBJECTID )) ? __show_root_type(obj) : "-" | ||
46 | |||
47 | TRACE_EVENT(btrfs_transaction_commit, | ||
48 | |||
49 | TP_PROTO(struct btrfs_root *root), | ||
50 | |||
51 | TP_ARGS(root), | ||
52 | |||
53 | TP_STRUCT__entry( | ||
54 | __field( u64, generation ) | ||
55 | __field( u64, root_objectid ) | ||
56 | ), | ||
57 | |||
58 | TP_fast_assign( | ||
59 | __entry->generation = root->fs_info->generation; | ||
60 | __entry->root_objectid = root->root_key.objectid; | ||
61 | ), | ||
62 | |||
63 | TP_printk("root = %llu(%s), gen = %llu", | ||
64 | show_root_type(__entry->root_objectid), | ||
65 | (unsigned long long)__entry->generation) | ||
66 | ); | ||
67 | |||
68 | DECLARE_EVENT_CLASS(btrfs__inode, | ||
69 | |||
70 | TP_PROTO(struct inode *inode), | ||
71 | |||
72 | TP_ARGS(inode), | ||
73 | |||
74 | TP_STRUCT__entry( | ||
75 | __field( ino_t, ino ) | ||
76 | __field( blkcnt_t, blocks ) | ||
77 | __field( u64, disk_i_size ) | ||
78 | __field( u64, generation ) | ||
79 | __field( u64, last_trans ) | ||
80 | __field( u64, logged_trans ) | ||
81 | __field( u64, root_objectid ) | ||
82 | ), | ||
83 | |||
84 | TP_fast_assign( | ||
85 | __entry->ino = inode->i_ino; | ||
86 | __entry->blocks = inode->i_blocks; | ||
87 | __entry->disk_i_size = BTRFS_I(inode)->disk_i_size; | ||
88 | __entry->generation = BTRFS_I(inode)->generation; | ||
89 | __entry->last_trans = BTRFS_I(inode)->last_trans; | ||
90 | __entry->logged_trans = BTRFS_I(inode)->logged_trans; | ||
91 | __entry->root_objectid = | ||
92 | BTRFS_I(inode)->root->root_key.objectid; | ||
93 | ), | ||
94 | |||
95 | TP_printk("root = %llu(%s), gen = %llu, ino = %lu, blocks = %llu, " | ||
96 | "disk_i_size = %llu, last_trans = %llu, logged_trans = %llu", | ||
97 | show_root_type(__entry->root_objectid), | ||
98 | (unsigned long long)__entry->generation, | ||
99 | (unsigned long)__entry->ino, | ||
100 | (unsigned long long)__entry->blocks, | ||
101 | (unsigned long long)__entry->disk_i_size, | ||
102 | (unsigned long long)__entry->last_trans, | ||
103 | (unsigned long long)__entry->logged_trans) | ||
104 | ); | ||
105 | |||
106 | DEFINE_EVENT(btrfs__inode, btrfs_inode_new, | ||
107 | |||
108 | TP_PROTO(struct inode *inode), | ||
109 | |||
110 | TP_ARGS(inode) | ||
111 | ); | ||
112 | |||
113 | DEFINE_EVENT(btrfs__inode, btrfs_inode_request, | ||
114 | |||
115 | TP_PROTO(struct inode *inode), | ||
116 | |||
117 | TP_ARGS(inode) | ||
118 | ); | ||
119 | |||
120 | DEFINE_EVENT(btrfs__inode, btrfs_inode_evict, | ||
121 | |||
122 | TP_PROTO(struct inode *inode), | ||
123 | |||
124 | TP_ARGS(inode) | ||
125 | ); | ||
126 | |||
127 | #define __show_map_type(type) \ | ||
128 | __print_symbolic(type, \ | ||
129 | { EXTENT_MAP_LAST_BYTE, "LAST_BYTE" }, \ | ||
130 | { EXTENT_MAP_HOLE, "HOLE" }, \ | ||
131 | { EXTENT_MAP_INLINE, "INLINE" }, \ | ||
132 | { EXTENT_MAP_DELALLOC, "DELALLOC" }) | ||
133 | |||
134 | #define show_map_type(type) \ | ||
135 | type, (type >= EXTENT_MAP_LAST_BYTE) ? "-" : __show_map_type(type) | ||
136 | |||
137 | #define show_map_flags(flag) \ | ||
138 | __print_flags(flag, "|", \ | ||
139 | { EXTENT_FLAG_PINNED, "PINNED" }, \ | ||
140 | { EXTENT_FLAG_COMPRESSED, "COMPRESSED" }, \ | ||
141 | { EXTENT_FLAG_VACANCY, "VACANCY" }, \ | ||
142 | { EXTENT_FLAG_PREALLOC, "PREALLOC" }) | ||
143 | |||
144 | TRACE_EVENT(btrfs_get_extent, | ||
145 | |||
146 | TP_PROTO(struct btrfs_root *root, struct extent_map *map), | ||
147 | |||
148 | TP_ARGS(root, map), | ||
149 | |||
150 | TP_STRUCT__entry( | ||
151 | __field( u64, root_objectid ) | ||
152 | __field( u64, start ) | ||
153 | __field( u64, len ) | ||
154 | __field( u64, orig_start ) | ||
155 | __field( u64, block_start ) | ||
156 | __field( u64, block_len ) | ||
157 | __field( unsigned long, flags ) | ||
158 | __field( int, refs ) | ||
159 | __field( unsigned int, compress_type ) | ||
160 | ), | ||
161 | |||
162 | TP_fast_assign( | ||
163 | __entry->root_objectid = root->root_key.objectid; | ||
164 | __entry->start = map->start; | ||
165 | __entry->len = map->len; | ||
166 | __entry->orig_start = map->orig_start; | ||
167 | __entry->block_start = map->block_start; | ||
168 | __entry->block_len = map->block_len; | ||
169 | __entry->flags = map->flags; | ||
170 | __entry->refs = atomic_read(&map->refs); | ||
171 | __entry->compress_type = map->compress_type; | ||
172 | ), | ||
173 | |||
174 | TP_printk("root = %llu(%s), start = %llu, len = %llu, " | ||
175 | "orig_start = %llu, block_start = %llu(%s), " | ||
176 | "block_len = %llu, flags = %s, refs = %u, " | ||
177 | "compress_type = %u", | ||
178 | show_root_type(__entry->root_objectid), | ||
179 | (unsigned long long)__entry->start, | ||
180 | (unsigned long long)__entry->len, | ||
181 | (unsigned long long)__entry->orig_start, | ||
182 | show_map_type(__entry->block_start), | ||
183 | (unsigned long long)__entry->block_len, | ||
184 | show_map_flags(__entry->flags), | ||
185 | __entry->refs, __entry->compress_type) | ||
186 | ); | ||
187 | |||
188 | #define show_ordered_flags(flags) \ | ||
189 | __print_symbolic(flags, \ | ||
190 | { BTRFS_ORDERED_IO_DONE, "IO_DONE" }, \ | ||
191 | { BTRFS_ORDERED_COMPLETE, "COMPLETE" }, \ | ||
192 | { BTRFS_ORDERED_NOCOW, "NOCOW" }, \ | ||
193 | { BTRFS_ORDERED_COMPRESSED, "COMPRESSED" }, \ | ||
194 | { BTRFS_ORDERED_PREALLOC, "PREALLOC" }, \ | ||
195 | { BTRFS_ORDERED_DIRECT, "DIRECT" }) | ||
196 | |||
197 | DECLARE_EVENT_CLASS(btrfs__ordered_extent, | ||
198 | |||
199 | TP_PROTO(struct inode *inode, struct btrfs_ordered_extent *ordered), | ||
200 | |||
201 | TP_ARGS(inode, ordered), | ||
202 | |||
203 | TP_STRUCT__entry( | ||
204 | __field( ino_t, ino ) | ||
205 | __field( u64, file_offset ) | ||
206 | __field( u64, start ) | ||
207 | __field( u64, len ) | ||
208 | __field( u64, disk_len ) | ||
209 | __field( u64, bytes_left ) | ||
210 | __field( unsigned long, flags ) | ||
211 | __field( int, compress_type ) | ||
212 | __field( int, refs ) | ||
213 | __field( u64, root_objectid ) | ||
214 | ), | ||
215 | |||
216 | TP_fast_assign( | ||
217 | __entry->ino = inode->i_ino; | ||
218 | __entry->file_offset = ordered->file_offset; | ||
219 | __entry->start = ordered->start; | ||
220 | __entry->len = ordered->len; | ||
221 | __entry->disk_len = ordered->disk_len; | ||
222 | __entry->bytes_left = ordered->bytes_left; | ||
223 | __entry->flags = ordered->flags; | ||
224 | __entry->compress_type = ordered->compress_type; | ||
225 | __entry->refs = atomic_read(&ordered->refs); | ||
226 | __entry->root_objectid = | ||
227 | BTRFS_I(inode)->root->root_key.objectid; | ||
228 | ), | ||
229 | |||
230 | TP_printk("root = %llu(%s), ino = %llu, file_offset = %llu, " | ||
231 | "start = %llu, len = %llu, disk_len = %llu, " | ||
232 | "bytes_left = %llu, flags = %s, compress_type = %d, " | ||
233 | "refs = %d", | ||
234 | show_root_type(__entry->root_objectid), | ||
235 | (unsigned long long)__entry->ino, | ||
236 | (unsigned long long)__entry->file_offset, | ||
237 | (unsigned long long)__entry->start, | ||
238 | (unsigned long long)__entry->len, | ||
239 | (unsigned long long)__entry->disk_len, | ||
240 | (unsigned long long)__entry->bytes_left, | ||
241 | show_ordered_flags(__entry->flags), | ||
242 | __entry->compress_type, __entry->refs) | ||
243 | ); | ||
244 | |||
245 | DEFINE_EVENT(btrfs__ordered_extent, btrfs_ordered_extent_add, | ||
246 | |||
247 | TP_PROTO(struct inode *inode, struct btrfs_ordered_extent *ordered), | ||
248 | |||
249 | TP_ARGS(inode, ordered) | ||
250 | ); | ||
251 | |||
252 | DEFINE_EVENT(btrfs__ordered_extent, btrfs_ordered_extent_remove, | ||
253 | |||
254 | TP_PROTO(struct inode *inode, struct btrfs_ordered_extent *ordered), | ||
255 | |||
256 | TP_ARGS(inode, ordered) | ||
257 | ); | ||
258 | |||
259 | DEFINE_EVENT(btrfs__ordered_extent, btrfs_ordered_extent_start, | ||
260 | |||
261 | TP_PROTO(struct inode *inode, struct btrfs_ordered_extent *ordered), | ||
262 | |||
263 | TP_ARGS(inode, ordered) | ||
264 | ); | ||
265 | |||
266 | DEFINE_EVENT(btrfs__ordered_extent, btrfs_ordered_extent_put, | ||
267 | |||
268 | TP_PROTO(struct inode *inode, struct btrfs_ordered_extent *ordered), | ||
269 | |||
270 | TP_ARGS(inode, ordered) | ||
271 | ); | ||
272 | |||
273 | DECLARE_EVENT_CLASS(btrfs__writepage, | ||
274 | |||
275 | TP_PROTO(struct page *page, struct inode *inode, | ||
276 | struct writeback_control *wbc), | ||
277 | |||
278 | TP_ARGS(page, inode, wbc), | ||
279 | |||
280 | TP_STRUCT__entry( | ||
281 | __field( ino_t, ino ) | ||
282 | __field( pgoff_t, index ) | ||
283 | __field( long, nr_to_write ) | ||
284 | __field( long, pages_skipped ) | ||
285 | __field( loff_t, range_start ) | ||
286 | __field( loff_t, range_end ) | ||
287 | __field( char, nonblocking ) | ||
288 | __field( char, for_kupdate ) | ||
289 | __field( char, for_reclaim ) | ||
290 | __field( char, range_cyclic ) | ||
291 | __field( pgoff_t, writeback_index ) | ||
292 | __field( u64, root_objectid ) | ||
293 | ), | ||
294 | |||
295 | TP_fast_assign( | ||
296 | __entry->ino = inode->i_ino; | ||
297 | __entry->index = page->index; | ||
298 | __entry->nr_to_write = wbc->nr_to_write; | ||
299 | __entry->pages_skipped = wbc->pages_skipped; | ||
300 | __entry->range_start = wbc->range_start; | ||
301 | __entry->range_end = wbc->range_end; | ||
302 | __entry->nonblocking = wbc->nonblocking; | ||
303 | __entry->for_kupdate = wbc->for_kupdate; | ||
304 | __entry->for_reclaim = wbc->for_reclaim; | ||
305 | __entry->range_cyclic = wbc->range_cyclic; | ||
306 | __entry->writeback_index = inode->i_mapping->writeback_index; | ||
307 | __entry->root_objectid = | ||
308 | BTRFS_I(inode)->root->root_key.objectid; | ||
309 | ), | ||
310 | |||
311 | TP_printk("root = %llu(%s), ino = %lu, page_index = %lu, " | ||
312 | "nr_to_write = %ld, pages_skipped = %ld, range_start = %llu, " | ||
313 | "range_end = %llu, nonblocking = %d, for_kupdate = %d, " | ||
314 | "for_reclaim = %d, range_cyclic = %d, writeback_index = %lu", | ||
315 | show_root_type(__entry->root_objectid), | ||
316 | (unsigned long)__entry->ino, __entry->index, | ||
317 | __entry->nr_to_write, __entry->pages_skipped, | ||
318 | __entry->range_start, __entry->range_end, | ||
319 | __entry->nonblocking, __entry->for_kupdate, | ||
320 | __entry->for_reclaim, __entry->range_cyclic, | ||
321 | (unsigned long)__entry->writeback_index) | ||
322 | ); | ||
323 | |||
324 | DEFINE_EVENT(btrfs__writepage, __extent_writepage, | ||
325 | |||
326 | TP_PROTO(struct page *page, struct inode *inode, | ||
327 | struct writeback_control *wbc), | ||
328 | |||
329 | TP_ARGS(page, inode, wbc) | ||
330 | ); | ||
331 | |||
332 | TRACE_EVENT(btrfs_writepage_end_io_hook, | ||
333 | |||
334 | TP_PROTO(struct page *page, u64 start, u64 end, int uptodate), | ||
335 | |||
336 | TP_ARGS(page, start, end, uptodate), | ||
337 | |||
338 | TP_STRUCT__entry( | ||
339 | __field( ino_t, ino ) | ||
340 | __field( pgoff_t, index ) | ||
341 | __field( u64, start ) | ||
342 | __field( u64, end ) | ||
343 | __field( int, uptodate ) | ||
344 | __field( u64, root_objectid ) | ||
345 | ), | ||
346 | |||
347 | TP_fast_assign( | ||
348 | __entry->ino = page->mapping->host->i_ino; | ||
349 | __entry->index = page->index; | ||
350 | __entry->start = start; | ||
351 | __entry->end = end; | ||
352 | __entry->uptodate = uptodate; | ||
353 | __entry->root_objectid = | ||
354 | BTRFS_I(page->mapping->host)->root->root_key.objectid; | ||
355 | ), | ||
356 | |||
357 | TP_printk("root = %llu(%s), ino = %lu, page_index = %lu, start = %llu, " | ||
358 | "end = %llu, uptodate = %d", | ||
359 | show_root_type(__entry->root_objectid), | ||
360 | (unsigned long)__entry->ino, (unsigned long)__entry->index, | ||
361 | (unsigned long long)__entry->start, | ||
362 | (unsigned long long)__entry->end, __entry->uptodate) | ||
363 | ); | ||
364 | |||
365 | TRACE_EVENT(btrfs_sync_file, | ||
366 | |||
367 | TP_PROTO(struct file *file, int datasync), | ||
368 | |||
369 | TP_ARGS(file, datasync), | ||
370 | |||
371 | TP_STRUCT__entry( | ||
372 | __field( ino_t, ino ) | ||
373 | __field( ino_t, parent ) | ||
374 | __field( int, datasync ) | ||
375 | __field( u64, root_objectid ) | ||
376 | ), | ||
377 | |||
378 | TP_fast_assign( | ||
379 | struct dentry *dentry = file->f_path.dentry; | ||
380 | struct inode *inode = dentry->d_inode; | ||
381 | |||
382 | __entry->ino = inode->i_ino; | ||
383 | __entry->parent = dentry->d_parent->d_inode->i_ino; | ||
384 | __entry->datasync = datasync; | ||
385 | __entry->root_objectid = | ||
386 | BTRFS_I(inode)->root->root_key.objectid; | ||
387 | ), | ||
388 | |||
389 | TP_printk("root = %llu(%s), ino = %ld, parent = %ld, datasync = %d", | ||
390 | show_root_type(__entry->root_objectid), | ||
391 | (unsigned long)__entry->ino, (unsigned long)__entry->parent, | ||
392 | __entry->datasync) | ||
393 | ); | ||
394 | |||
395 | TRACE_EVENT(btrfs_sync_fs, | ||
396 | |||
397 | TP_PROTO(int wait), | ||
398 | |||
399 | TP_ARGS(wait), | ||
400 | |||
401 | TP_STRUCT__entry( | ||
402 | __field( int, wait ) | ||
403 | ), | ||
404 | |||
405 | TP_fast_assign( | ||
406 | __entry->wait = wait; | ||
407 | ), | ||
408 | |||
409 | TP_printk("wait = %d", __entry->wait) | ||
410 | ); | ||
411 | |||
412 | #define show_ref_action(action) \ | ||
413 | __print_symbolic(action, \ | ||
414 | { BTRFS_ADD_DELAYED_REF, "ADD_DELAYED_REF" }, \ | ||
415 | { BTRFS_DROP_DELAYED_REF, "DROP_DELAYED_REF" }, \ | ||
416 | { BTRFS_ADD_DELAYED_EXTENT, "ADD_DELAYED_EXTENT" }, \ | ||
417 | { BTRFS_UPDATE_DELAYED_HEAD, "UPDATE_DELAYED_HEAD" }) | ||
418 | |||
419 | |||
420 | TRACE_EVENT(btrfs_delayed_tree_ref, | ||
421 | |||
422 | TP_PROTO(struct btrfs_delayed_ref_node *ref, | ||
423 | struct btrfs_delayed_tree_ref *full_ref, | ||
424 | int action), | ||
425 | |||
426 | TP_ARGS(ref, full_ref, action), | ||
427 | |||
428 | TP_STRUCT__entry( | ||
429 | __field( u64, bytenr ) | ||
430 | __field( u64, num_bytes ) | ||
431 | __field( int, action ) | ||
432 | __field( u64, parent ) | ||
433 | __field( u64, ref_root ) | ||
434 | __field( int, level ) | ||
435 | __field( int, type ) | ||
436 | ), | ||
437 | |||
438 | TP_fast_assign( | ||
439 | __entry->bytenr = ref->bytenr; | ||
440 | __entry->num_bytes = ref->num_bytes; | ||
441 | __entry->action = action; | ||
442 | __entry->parent = full_ref->parent; | ||
443 | __entry->ref_root = full_ref->root; | ||
444 | __entry->level = full_ref->level; | ||
445 | __entry->type = ref->type; | ||
446 | ), | ||
447 | |||
448 | TP_printk("bytenr = %llu, num_bytes = %llu, action = %s, " | ||
449 | "parent = %llu(%s), ref_root = %llu(%s), level = %d, " | ||
450 | "type = %s", | ||
451 | (unsigned long long)__entry->bytenr, | ||
452 | (unsigned long long)__entry->num_bytes, | ||
453 | show_ref_action(__entry->action), | ||
454 | show_root_type(__entry->parent), | ||
455 | show_root_type(__entry->ref_root), | ||
456 | __entry->level, show_ref_type(__entry->type)) | ||
457 | ); | ||
458 | |||
459 | TRACE_EVENT(btrfs_delayed_data_ref, | ||
460 | |||
461 | TP_PROTO(struct btrfs_delayed_ref_node *ref, | ||
462 | struct btrfs_delayed_data_ref *full_ref, | ||
463 | int action), | ||
464 | |||
465 | TP_ARGS(ref, full_ref, action), | ||
466 | |||
467 | TP_STRUCT__entry( | ||
468 | __field( u64, bytenr ) | ||
469 | __field( u64, num_bytes ) | ||
470 | __field( int, action ) | ||
471 | __field( u64, parent ) | ||
472 | __field( u64, ref_root ) | ||
473 | __field( u64, owner ) | ||
474 | __field( u64, offset ) | ||
475 | __field( int, type ) | ||
476 | ), | ||
477 | |||
478 | TP_fast_assign( | ||
479 | __entry->bytenr = ref->bytenr; | ||
480 | __entry->num_bytes = ref->num_bytes; | ||
481 | __entry->action = action; | ||
482 | __entry->parent = full_ref->parent; | ||
483 | __entry->ref_root = full_ref->root; | ||
484 | __entry->owner = full_ref->objectid; | ||
485 | __entry->offset = full_ref->offset; | ||
486 | __entry->type = ref->type; | ||
487 | ), | ||
488 | |||
489 | TP_printk("bytenr = %llu, num_bytes = %llu, action = %s, " | ||
490 | "parent = %llu(%s), ref_root = %llu(%s), owner = %llu, " | ||
491 | "offset = %llu, type = %s", | ||
492 | (unsigned long long)__entry->bytenr, | ||
493 | (unsigned long long)__entry->num_bytes, | ||
494 | show_ref_action(__entry->action), | ||
495 | show_root_type(__entry->parent), | ||
496 | show_root_type(__entry->ref_root), | ||
497 | (unsigned long long)__entry->owner, | ||
498 | (unsigned long long)__entry->offset, | ||
499 | show_ref_type(__entry->type)) | ||
500 | ); | ||
501 | |||
502 | TRACE_EVENT(btrfs_delayed_ref_head, | ||
503 | |||
504 | TP_PROTO(struct btrfs_delayed_ref_node *ref, | ||
505 | struct btrfs_delayed_ref_head *head_ref, | ||
506 | int action), | ||
507 | |||
508 | TP_ARGS(ref, head_ref, action), | ||
509 | |||
510 | TP_STRUCT__entry( | ||
511 | __field( u64, bytenr ) | ||
512 | __field( u64, num_bytes ) | ||
513 | __field( int, action ) | ||
514 | __field( int, is_data ) | ||
515 | ), | ||
516 | |||
517 | TP_fast_assign( | ||
518 | __entry->bytenr = ref->bytenr; | ||
519 | __entry->num_bytes = ref->num_bytes; | ||
520 | __entry->action = action; | ||
521 | __entry->is_data = head_ref->is_data; | ||
522 | ), | ||
523 | |||
524 | TP_printk("bytenr = %llu, num_bytes = %llu, action = %s, is_data = %d", | ||
525 | (unsigned long long)__entry->bytenr, | ||
526 | (unsigned long long)__entry->num_bytes, | ||
527 | show_ref_action(__entry->action), | ||
528 | __entry->is_data) | ||
529 | ); | ||
530 | |||
531 | #define show_chunk_type(type) \ | ||
532 | __print_flags(type, "|", \ | ||
533 | { BTRFS_BLOCK_GROUP_DATA, "DATA" }, \ | ||
534 | { BTRFS_BLOCK_GROUP_SYSTEM, "SYSTEM"}, \ | ||
535 | { BTRFS_BLOCK_GROUP_METADATA, "METADATA"}, \ | ||
536 | { BTRFS_BLOCK_GROUP_RAID0, "RAID0" }, \ | ||
537 | { BTRFS_BLOCK_GROUP_RAID1, "RAID1" }, \ | ||
538 | { BTRFS_BLOCK_GROUP_DUP, "DUP" }, \ | ||
539 | { BTRFS_BLOCK_GROUP_RAID10, "RAID10"}) | ||
540 | |||
541 | DECLARE_EVENT_CLASS(btrfs__chunk, | ||
542 | |||
543 | TP_PROTO(struct btrfs_root *root, struct map_lookup *map, | ||
544 | u64 offset, u64 size), | ||
545 | |||
546 | TP_ARGS(root, map, offset, size), | ||
547 | |||
548 | TP_STRUCT__entry( | ||
549 | __field( int, num_stripes ) | ||
550 | __field( u64, type ) | ||
551 | __field( int, sub_stripes ) | ||
552 | __field( u64, offset ) | ||
553 | __field( u64, size ) | ||
554 | __field( u64, root_objectid ) | ||
555 | ), | ||
556 | |||
557 | TP_fast_assign( | ||
558 | __entry->num_stripes = map->num_stripes; | ||
559 | __entry->type = map->type; | ||
560 | __entry->sub_stripes = map->sub_stripes; | ||
561 | __entry->offset = offset; | ||
562 | __entry->size = size; | ||
563 | __entry->root_objectid = root->root_key.objectid; | ||
564 | ), | ||
565 | |||
566 | TP_printk("root = %llu(%s), offset = %llu, size = %llu, " | ||
567 | "num_stripes = %d, sub_stripes = %d, type = %s", | ||
568 | show_root_type(__entry->root_objectid), | ||
569 | (unsigned long long)__entry->offset, | ||
570 | (unsigned long long)__entry->size, | ||
571 | __entry->num_stripes, __entry->sub_stripes, | ||
572 | show_chunk_type(__entry->type)) | ||
573 | ); | ||
574 | |||
575 | DEFINE_EVENT(btrfs__chunk, btrfs_chunk_alloc, | ||
576 | |||
577 | TP_PROTO(struct btrfs_root *root, struct map_lookup *map, | ||
578 | u64 offset, u64 size), | ||
579 | |||
580 | TP_ARGS(root, map, offset, size) | ||
581 | ); | ||
582 | |||
583 | DEFINE_EVENT(btrfs__chunk, btrfs_chunk_free, | ||
584 | |||
585 | TP_PROTO(struct btrfs_root *root, struct map_lookup *map, | ||
586 | u64 offset, u64 size), | ||
587 | |||
588 | TP_ARGS(root, map, offset, size) | ||
589 | ); | ||
590 | |||
591 | TRACE_EVENT(btrfs_cow_block, | ||
592 | |||
593 | TP_PROTO(struct btrfs_root *root, struct extent_buffer *buf, | ||
594 | struct extent_buffer *cow), | ||
595 | |||
596 | TP_ARGS(root, buf, cow), | ||
597 | |||
598 | TP_STRUCT__entry( | ||
599 | __field( u64, root_objectid ) | ||
600 | __field( u64, buf_start ) | ||
601 | __field( int, refs ) | ||
602 | __field( u64, cow_start ) | ||
603 | __field( int, buf_level ) | ||
604 | __field( int, cow_level ) | ||
605 | ), | ||
606 | |||
607 | TP_fast_assign( | ||
608 | __entry->root_objectid = root->root_key.objectid; | ||
609 | __entry->buf_start = buf->start; | ||
610 | __entry->refs = atomic_read(&buf->refs); | ||
611 | __entry->cow_start = cow->start; | ||
612 | __entry->buf_level = btrfs_header_level(buf); | ||
613 | __entry->cow_level = btrfs_header_level(cow); | ||
614 | ), | ||
615 | |||
616 | TP_printk("root = %llu(%s), refs = %d, orig_buf = %llu " | ||
617 | "(orig_level = %d), cow_buf = %llu (cow_level = %d)", | ||
618 | show_root_type(__entry->root_objectid), | ||
619 | __entry->refs, | ||
620 | (unsigned long long)__entry->buf_start, | ||
621 | __entry->buf_level, | ||
622 | (unsigned long long)__entry->cow_start, | ||
623 | __entry->cow_level) | ||
624 | ); | ||
625 | |||
626 | DECLARE_EVENT_CLASS(btrfs__reserved_extent, | ||
627 | |||
628 | TP_PROTO(struct btrfs_root *root, u64 start, u64 len), | ||
629 | |||
630 | TP_ARGS(root, start, len), | ||
631 | |||
632 | TP_STRUCT__entry( | ||
633 | __field( u64, root_objectid ) | ||
634 | __field( u64, start ) | ||
635 | __field( u64, len ) | ||
636 | ), | ||
637 | |||
638 | TP_fast_assign( | ||
639 | __entry->root_objectid = root->root_key.objectid; | ||
640 | __entry->start = start; | ||
641 | __entry->len = len; | ||
642 | ), | ||
643 | |||
644 | TP_printk("root = %llu(%s), start = %llu, len = %llu", | ||
645 | show_root_type(__entry->root_objectid), | ||
646 | (unsigned long long)__entry->start, | ||
647 | (unsigned long long)__entry->len) | ||
648 | ); | ||
649 | |||
650 | DEFINE_EVENT(btrfs__reserved_extent, btrfs_reserved_extent_alloc, | ||
651 | |||
652 | TP_PROTO(struct btrfs_root *root, u64 start, u64 len), | ||
653 | |||
654 | TP_ARGS(root, start, len) | ||
655 | ); | ||
656 | |||
657 | DEFINE_EVENT(btrfs__reserved_extent, btrfs_reserved_extent_free, | ||
658 | |||
659 | TP_PROTO(struct btrfs_root *root, u64 start, u64 len), | ||
660 | |||
661 | TP_ARGS(root, start, len) | ||
662 | ); | ||
663 | |||
664 | #endif /* _TRACE_BTRFS_H */ | ||
665 | |||
666 | /* This part must be outside protection */ | ||
667 | #include <trace/define_trace.h> | ||
diff --git a/kernel/irq/Kconfig b/kernel/irq/Kconfig index 00f2c037267a..a69c333f78e4 100644 --- a/kernel/irq/Kconfig +++ b/kernel/irq/Kconfig | |||
@@ -10,10 +10,6 @@ menu "IRQ subsystem" | |||
10 | config GENERIC_HARDIRQS | 10 | config GENERIC_HARDIRQS |
11 | def_bool y | 11 | def_bool y |
12 | 12 | ||
13 | # Select this to disable the deprecated stuff | ||
14 | config GENERIC_HARDIRQS_NO_DEPRECATED | ||
15 | bool | ||
16 | |||
17 | config GENERIC_HARDIRQS_NO_COMPAT | 13 | config GENERIC_HARDIRQS_NO_COMPAT |
18 | bool | 14 | bool |
19 | 15 | ||
@@ -51,6 +47,10 @@ config HARDIRQS_SW_RESEND | |||
51 | config IRQ_PREFLOW_FASTEOI | 47 | config IRQ_PREFLOW_FASTEOI |
52 | bool | 48 | bool |
53 | 49 | ||
50 | # Edge style eoi based handler (cell) | ||
51 | config IRQ_EDGE_EOI_HANDLER | ||
52 | bool | ||
53 | |||
54 | # Support forced irq threading | 54 | # Support forced irq threading |
55 | config IRQ_FORCED_THREADING | 55 | config IRQ_FORCED_THREADING |
56 | bool | 56 | bool |
diff --git a/kernel/irq/autoprobe.c b/kernel/irq/autoprobe.c index 394784c57060..342d8f44e401 100644 --- a/kernel/irq/autoprobe.c +++ b/kernel/irq/autoprobe.c | |||
@@ -70,10 +70,8 @@ unsigned long probe_irq_on(void) | |||
70 | raw_spin_lock_irq(&desc->lock); | 70 | raw_spin_lock_irq(&desc->lock); |
71 | if (!desc->action && irq_settings_can_probe(desc)) { | 71 | if (!desc->action && irq_settings_can_probe(desc)) { |
72 | desc->istate |= IRQS_AUTODETECT | IRQS_WAITING; | 72 | desc->istate |= IRQS_AUTODETECT | IRQS_WAITING; |
73 | if (irq_startup(desc)) { | 73 | if (irq_startup(desc)) |
74 | irq_compat_set_pending(desc); | ||
75 | desc->istate |= IRQS_PENDING; | 74 | desc->istate |= IRQS_PENDING; |
76 | } | ||
77 | } | 75 | } |
78 | raw_spin_unlock_irq(&desc->lock); | 76 | raw_spin_unlock_irq(&desc->lock); |
79 | } | 77 | } |
diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c index c9c0601f0615..616ec1c6b06f 100644 --- a/kernel/irq/chip.c +++ b/kernel/irq/chip.c | |||
@@ -34,9 +34,14 @@ int irq_set_chip(unsigned int irq, struct irq_chip *chip) | |||
34 | if (!chip) | 34 | if (!chip) |
35 | chip = &no_irq_chip; | 35 | chip = &no_irq_chip; |
36 | 36 | ||
37 | irq_chip_set_defaults(chip); | ||
38 | desc->irq_data.chip = chip; | 37 | desc->irq_data.chip = chip; |
39 | irq_put_desc_unlock(desc, flags); | 38 | irq_put_desc_unlock(desc, flags); |
39 | /* | ||
40 | * For !CONFIG_SPARSE_IRQ make the irq show up in | ||
41 | * allocated_irqs. For the CONFIG_SPARSE_IRQ case, it is | ||
42 | * already marked, and this call is harmless. | ||
43 | */ | ||
44 | irq_reserve_irq(irq); | ||
40 | return 0; | 45 | return 0; |
41 | } | 46 | } |
42 | EXPORT_SYMBOL(irq_set_chip); | 47 | EXPORT_SYMBOL(irq_set_chip); |
@@ -134,26 +139,22 @@ EXPORT_SYMBOL_GPL(irq_get_irq_data); | |||
134 | 139 | ||
135 | static void irq_state_clr_disabled(struct irq_desc *desc) | 140 | static void irq_state_clr_disabled(struct irq_desc *desc) |
136 | { | 141 | { |
137 | desc->istate &= ~IRQS_DISABLED; | 142 | irqd_clear(&desc->irq_data, IRQD_IRQ_DISABLED); |
138 | irq_compat_clr_disabled(desc); | ||
139 | } | 143 | } |
140 | 144 | ||
141 | static void irq_state_set_disabled(struct irq_desc *desc) | 145 | static void irq_state_set_disabled(struct irq_desc *desc) |
142 | { | 146 | { |
143 | desc->istate |= IRQS_DISABLED; | 147 | irqd_set(&desc->irq_data, IRQD_IRQ_DISABLED); |
144 | irq_compat_set_disabled(desc); | ||
145 | } | 148 | } |
146 | 149 | ||
147 | static void irq_state_clr_masked(struct irq_desc *desc) | 150 | static void irq_state_clr_masked(struct irq_desc *desc) |
148 | { | 151 | { |
149 | desc->istate &= ~IRQS_MASKED; | 152 | irqd_clear(&desc->irq_data, IRQD_IRQ_MASKED); |
150 | irq_compat_clr_masked(desc); | ||
151 | } | 153 | } |
152 | 154 | ||
153 | static void irq_state_set_masked(struct irq_desc *desc) | 155 | static void irq_state_set_masked(struct irq_desc *desc) |
154 | { | 156 | { |
155 | desc->istate |= IRQS_MASKED; | 157 | irqd_set(&desc->irq_data, IRQD_IRQ_MASKED); |
156 | irq_compat_set_masked(desc); | ||
157 | } | 158 | } |
158 | 159 | ||
159 | int irq_startup(struct irq_desc *desc) | 160 | int irq_startup(struct irq_desc *desc) |
@@ -203,126 +204,6 @@ void irq_disable(struct irq_desc *desc) | |||
203 | } | 204 | } |
204 | } | 205 | } |
205 | 206 | ||
206 | #ifndef CONFIG_GENERIC_HARDIRQS_NO_DEPRECATED | ||
207 | /* Temporary migration helpers */ | ||
208 | static void compat_irq_mask(struct irq_data *data) | ||
209 | { | ||
210 | data->chip->mask(data->irq); | ||
211 | } | ||
212 | |||
213 | static void compat_irq_unmask(struct irq_data *data) | ||
214 | { | ||
215 | data->chip->unmask(data->irq); | ||
216 | } | ||
217 | |||
218 | static void compat_irq_ack(struct irq_data *data) | ||
219 | { | ||
220 | data->chip->ack(data->irq); | ||
221 | } | ||
222 | |||
223 | static void compat_irq_mask_ack(struct irq_data *data) | ||
224 | { | ||
225 | data->chip->mask_ack(data->irq); | ||
226 | } | ||
227 | |||
228 | static void compat_irq_eoi(struct irq_data *data) | ||
229 | { | ||
230 | data->chip->eoi(data->irq); | ||
231 | } | ||
232 | |||
233 | static void compat_irq_enable(struct irq_data *data) | ||
234 | { | ||
235 | data->chip->enable(data->irq); | ||
236 | } | ||
237 | |||
238 | static void compat_irq_disable(struct irq_data *data) | ||
239 | { | ||
240 | data->chip->disable(data->irq); | ||
241 | } | ||
242 | |||
243 | static void compat_irq_shutdown(struct irq_data *data) | ||
244 | { | ||
245 | data->chip->shutdown(data->irq); | ||
246 | } | ||
247 | |||
248 | static unsigned int compat_irq_startup(struct irq_data *data) | ||
249 | { | ||
250 | return data->chip->startup(data->irq); | ||
251 | } | ||
252 | |||
253 | static int compat_irq_set_affinity(struct irq_data *data, | ||
254 | const struct cpumask *dest, bool force) | ||
255 | { | ||
256 | return data->chip->set_affinity(data->irq, dest); | ||
257 | } | ||
258 | |||
259 | static int compat_irq_set_type(struct irq_data *data, unsigned int type) | ||
260 | { | ||
261 | return data->chip->set_type(data->irq, type); | ||
262 | } | ||
263 | |||
264 | static int compat_irq_set_wake(struct irq_data *data, unsigned int on) | ||
265 | { | ||
266 | return data->chip->set_wake(data->irq, on); | ||
267 | } | ||
268 | |||
269 | static int compat_irq_retrigger(struct irq_data *data) | ||
270 | { | ||
271 | return data->chip->retrigger(data->irq); | ||
272 | } | ||
273 | |||
274 | static void compat_bus_lock(struct irq_data *data) | ||
275 | { | ||
276 | data->chip->bus_lock(data->irq); | ||
277 | } | ||
278 | |||
279 | static void compat_bus_sync_unlock(struct irq_data *data) | ||
280 | { | ||
281 | data->chip->bus_sync_unlock(data->irq); | ||
282 | } | ||
283 | #endif | ||
284 | |||
285 | /* | ||
286 | * Fixup enable/disable function pointers | ||
287 | */ | ||
288 | void irq_chip_set_defaults(struct irq_chip *chip) | ||
289 | { | ||
290 | #ifndef CONFIG_GENERIC_HARDIRQS_NO_DEPRECATED | ||
291 | if (chip->enable) | ||
292 | chip->irq_enable = compat_irq_enable; | ||
293 | if (chip->disable) | ||
294 | chip->irq_disable = compat_irq_disable; | ||
295 | if (chip->shutdown) | ||
296 | chip->irq_shutdown = compat_irq_shutdown; | ||
297 | if (chip->startup) | ||
298 | chip->irq_startup = compat_irq_startup; | ||
299 | if (!chip->end) | ||
300 | chip->end = dummy_irq_chip.end; | ||
301 | if (chip->bus_lock) | ||
302 | chip->irq_bus_lock = compat_bus_lock; | ||
303 | if (chip->bus_sync_unlock) | ||
304 | chip->irq_bus_sync_unlock = compat_bus_sync_unlock; | ||
305 | if (chip->mask) | ||
306 | chip->irq_mask = compat_irq_mask; | ||
307 | if (chip->unmask) | ||
308 | chip->irq_unmask = compat_irq_unmask; | ||
309 | if (chip->ack) | ||
310 | chip->irq_ack = compat_irq_ack; | ||
311 | if (chip->mask_ack) | ||
312 | chip->irq_mask_ack = compat_irq_mask_ack; | ||
313 | if (chip->eoi) | ||
314 | chip->irq_eoi = compat_irq_eoi; | ||
315 | if (chip->set_affinity) | ||
316 | chip->irq_set_affinity = compat_irq_set_affinity; | ||
317 | if (chip->set_type) | ||
318 | chip->irq_set_type = compat_irq_set_type; | ||
319 | if (chip->set_wake) | ||
320 | chip->irq_set_wake = compat_irq_set_wake; | ||
321 | if (chip->retrigger) | ||
322 | chip->irq_retrigger = compat_irq_retrigger; | ||
323 | #endif | ||
324 | } | ||
325 | |||
326 | static inline void mask_ack_irq(struct irq_desc *desc) | 207 | static inline void mask_ack_irq(struct irq_desc *desc) |
327 | { | 208 | { |
328 | if (desc->irq_data.chip->irq_mask_ack) | 209 | if (desc->irq_data.chip->irq_mask_ack) |
@@ -372,11 +253,10 @@ void handle_nested_irq(unsigned int irq) | |||
372 | kstat_incr_irqs_this_cpu(irq, desc); | 253 | kstat_incr_irqs_this_cpu(irq, desc); |
373 | 254 | ||
374 | action = desc->action; | 255 | action = desc->action; |
375 | if (unlikely(!action || (desc->istate & IRQS_DISABLED))) | 256 | if (unlikely(!action || irqd_irq_disabled(&desc->irq_data))) |
376 | goto out_unlock; | 257 | goto out_unlock; |
377 | 258 | ||
378 | irq_compat_set_progress(desc); | 259 | irqd_set(&desc->irq_data, IRQD_IRQ_INPROGRESS); |
379 | desc->istate |= IRQS_INPROGRESS; | ||
380 | raw_spin_unlock_irq(&desc->lock); | 260 | raw_spin_unlock_irq(&desc->lock); |
381 | 261 | ||
382 | action_ret = action->thread_fn(action->irq, action->dev_id); | 262 | action_ret = action->thread_fn(action->irq, action->dev_id); |
@@ -384,8 +264,7 @@ void handle_nested_irq(unsigned int irq) | |||
384 | note_interrupt(irq, desc, action_ret); | 264 | note_interrupt(irq, desc, action_ret); |
385 | 265 | ||
386 | raw_spin_lock_irq(&desc->lock); | 266 | raw_spin_lock_irq(&desc->lock); |
387 | desc->istate &= ~IRQS_INPROGRESS; | 267 | irqd_clear(&desc->irq_data, IRQD_IRQ_INPROGRESS); |
388 | irq_compat_clr_progress(desc); | ||
389 | 268 | ||
390 | out_unlock: | 269 | out_unlock: |
391 | raw_spin_unlock_irq(&desc->lock); | 270 | raw_spin_unlock_irq(&desc->lock); |
@@ -416,14 +295,14 @@ handle_simple_irq(unsigned int irq, struct irq_desc *desc) | |||
416 | { | 295 | { |
417 | raw_spin_lock(&desc->lock); | 296 | raw_spin_lock(&desc->lock); |
418 | 297 | ||
419 | if (unlikely(desc->istate & IRQS_INPROGRESS)) | 298 | if (unlikely(irqd_irq_inprogress(&desc->irq_data))) |
420 | if (!irq_check_poll(desc)) | 299 | if (!irq_check_poll(desc)) |
421 | goto out_unlock; | 300 | goto out_unlock; |
422 | 301 | ||
423 | desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); | 302 | desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); |
424 | kstat_incr_irqs_this_cpu(irq, desc); | 303 | kstat_incr_irqs_this_cpu(irq, desc); |
425 | 304 | ||
426 | if (unlikely(!desc->action || (desc->istate & IRQS_DISABLED))) | 305 | if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) |
427 | goto out_unlock; | 306 | goto out_unlock; |
428 | 307 | ||
429 | handle_irq_event(desc); | 308 | handle_irq_event(desc); |
@@ -448,7 +327,7 @@ handle_level_irq(unsigned int irq, struct irq_desc *desc) | |||
448 | raw_spin_lock(&desc->lock); | 327 | raw_spin_lock(&desc->lock); |
449 | mask_ack_irq(desc); | 328 | mask_ack_irq(desc); |
450 | 329 | ||
451 | if (unlikely(desc->istate & IRQS_INPROGRESS)) | 330 | if (unlikely(irqd_irq_inprogress(&desc->irq_data))) |
452 | if (!irq_check_poll(desc)) | 331 | if (!irq_check_poll(desc)) |
453 | goto out_unlock; | 332 | goto out_unlock; |
454 | 333 | ||
@@ -459,12 +338,12 @@ handle_level_irq(unsigned int irq, struct irq_desc *desc) | |||
459 | * If its disabled or no action available | 338 | * If its disabled or no action available |
460 | * keep it masked and get out of here | 339 | * keep it masked and get out of here |
461 | */ | 340 | */ |
462 | if (unlikely(!desc->action || (desc->istate & IRQS_DISABLED))) | 341 | if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) |
463 | goto out_unlock; | 342 | goto out_unlock; |
464 | 343 | ||
465 | handle_irq_event(desc); | 344 | handle_irq_event(desc); |
466 | 345 | ||
467 | if (!(desc->istate & (IRQS_DISABLED | IRQS_ONESHOT))) | 346 | if (!irqd_irq_disabled(&desc->irq_data) && !(desc->istate & IRQS_ONESHOT)) |
468 | unmask_irq(desc); | 347 | unmask_irq(desc); |
469 | out_unlock: | 348 | out_unlock: |
470 | raw_spin_unlock(&desc->lock); | 349 | raw_spin_unlock(&desc->lock); |
@@ -496,7 +375,7 @@ handle_fasteoi_irq(unsigned int irq, struct irq_desc *desc) | |||
496 | { | 375 | { |
497 | raw_spin_lock(&desc->lock); | 376 | raw_spin_lock(&desc->lock); |
498 | 377 | ||
499 | if (unlikely(desc->istate & IRQS_INPROGRESS)) | 378 | if (unlikely(irqd_irq_inprogress(&desc->irq_data))) |
500 | if (!irq_check_poll(desc)) | 379 | if (!irq_check_poll(desc)) |
501 | goto out; | 380 | goto out; |
502 | 381 | ||
@@ -507,8 +386,7 @@ handle_fasteoi_irq(unsigned int irq, struct irq_desc *desc) | |||
507 | * If its disabled or no action available | 386 | * If its disabled or no action available |
508 | * then mask it and get out of here: | 387 | * then mask it and get out of here: |
509 | */ | 388 | */ |
510 | if (unlikely(!desc->action || (desc->istate & IRQS_DISABLED))) { | 389 | if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) { |
511 | irq_compat_set_pending(desc); | ||
512 | desc->istate |= IRQS_PENDING; | 390 | desc->istate |= IRQS_PENDING; |
513 | mask_irq(desc); | 391 | mask_irq(desc); |
514 | goto out; | 392 | goto out; |
@@ -558,10 +436,9 @@ handle_edge_irq(unsigned int irq, struct irq_desc *desc) | |||
558 | * we shouldn't process the IRQ. Mark it pending, handle | 436 | * we shouldn't process the IRQ. Mark it pending, handle |
559 | * the necessary masking and go out | 437 | * the necessary masking and go out |
560 | */ | 438 | */ |
561 | if (unlikely((desc->istate & (IRQS_DISABLED | IRQS_INPROGRESS) || | 439 | if (unlikely(irqd_irq_disabled(&desc->irq_data) || |
562 | !desc->action))) { | 440 | irqd_irq_inprogress(&desc->irq_data) || !desc->action)) { |
563 | if (!irq_check_poll(desc)) { | 441 | if (!irq_check_poll(desc)) { |
564 | irq_compat_set_pending(desc); | ||
565 | desc->istate |= IRQS_PENDING; | 442 | desc->istate |= IRQS_PENDING; |
566 | mask_ack_irq(desc); | 443 | mask_ack_irq(desc); |
567 | goto out_unlock; | 444 | goto out_unlock; |
@@ -584,20 +461,65 @@ handle_edge_irq(unsigned int irq, struct irq_desc *desc) | |||
584 | * Renable it, if it was not disabled in meantime. | 461 | * Renable it, if it was not disabled in meantime. |
585 | */ | 462 | */ |
586 | if (unlikely(desc->istate & IRQS_PENDING)) { | 463 | if (unlikely(desc->istate & IRQS_PENDING)) { |
587 | if (!(desc->istate & IRQS_DISABLED) && | 464 | if (!irqd_irq_disabled(&desc->irq_data) && |
588 | (desc->istate & IRQS_MASKED)) | 465 | irqd_irq_masked(&desc->irq_data)) |
589 | unmask_irq(desc); | 466 | unmask_irq(desc); |
590 | } | 467 | } |
591 | 468 | ||
592 | handle_irq_event(desc); | 469 | handle_irq_event(desc); |
593 | 470 | ||
594 | } while ((desc->istate & IRQS_PENDING) && | 471 | } while ((desc->istate & IRQS_PENDING) && |
595 | !(desc->istate & IRQS_DISABLED)); | 472 | !irqd_irq_disabled(&desc->irq_data)); |
596 | 473 | ||
597 | out_unlock: | 474 | out_unlock: |
598 | raw_spin_unlock(&desc->lock); | 475 | raw_spin_unlock(&desc->lock); |
599 | } | 476 | } |
600 | 477 | ||
478 | #ifdef CONFIG_IRQ_EDGE_EOI_HANDLER | ||
479 | /** | ||
480 | * handle_edge_eoi_irq - edge eoi type IRQ handler | ||
481 | * @irq: the interrupt number | ||
482 | * @desc: the interrupt description structure for this irq | ||
483 | * | ||
484 | * Similar as the above handle_edge_irq, but using eoi and w/o the | ||
485 | * mask/unmask logic. | ||
486 | */ | ||
487 | void handle_edge_eoi_irq(unsigned int irq, struct irq_desc *desc) | ||
488 | { | ||
489 | struct irq_chip *chip = irq_desc_get_chip(desc); | ||
490 | |||
491 | raw_spin_lock(&desc->lock); | ||
492 | |||
493 | desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); | ||
494 | /* | ||
495 | * If we're currently running this IRQ, or its disabled, | ||
496 | * we shouldn't process the IRQ. Mark it pending, handle | ||
497 | * the necessary masking and go out | ||
498 | */ | ||
499 | if (unlikely(irqd_irq_disabled(&desc->irq_data) || | ||
500 | irqd_irq_inprogress(&desc->irq_data) || !desc->action)) { | ||
501 | if (!irq_check_poll(desc)) { | ||
502 | desc->istate |= IRQS_PENDING; | ||
503 | goto out_eoi; | ||
504 | } | ||
505 | } | ||
506 | kstat_incr_irqs_this_cpu(irq, desc); | ||
507 | |||
508 | do { | ||
509 | if (unlikely(!desc->action)) | ||
510 | goto out_eoi; | ||
511 | |||
512 | handle_irq_event(desc); | ||
513 | |||
514 | } while ((desc->istate & IRQS_PENDING) && | ||
515 | !irqd_irq_disabled(&desc->irq_data)); | ||
516 | |||
517 | out_unlock: | ||
518 | chip->irq_eoi(&desc->irq_data); | ||
519 | raw_spin_unlock(&desc->lock); | ||
520 | } | ||
521 | #endif | ||
522 | |||
601 | /** | 523 | /** |
602 | * handle_percpu_irq - Per CPU local irq handler | 524 | * handle_percpu_irq - Per CPU local irq handler |
603 | * @irq: the interrupt number | 525 | * @irq: the interrupt number |
@@ -642,8 +564,7 @@ __irq_set_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained, | |||
642 | if (handle == handle_bad_irq) { | 564 | if (handle == handle_bad_irq) { |
643 | if (desc->irq_data.chip != &no_irq_chip) | 565 | if (desc->irq_data.chip != &no_irq_chip) |
644 | mask_ack_irq(desc); | 566 | mask_ack_irq(desc); |
645 | irq_compat_set_disabled(desc); | 567 | irq_state_set_disabled(desc); |
646 | desc->istate |= IRQS_DISABLED; | ||
647 | desc->depth = 1; | 568 | desc->depth = 1; |
648 | } | 569 | } |
649 | desc->handle_irq = handle; | 570 | desc->handle_irq = handle; |
@@ -684,8 +605,70 @@ void irq_modify_status(unsigned int irq, unsigned long clr, unsigned long set) | |||
684 | irqd_set(&desc->irq_data, IRQD_PER_CPU); | 605 | irqd_set(&desc->irq_data, IRQD_PER_CPU); |
685 | if (irq_settings_can_move_pcntxt(desc)) | 606 | if (irq_settings_can_move_pcntxt(desc)) |
686 | irqd_set(&desc->irq_data, IRQD_MOVE_PCNTXT); | 607 | irqd_set(&desc->irq_data, IRQD_MOVE_PCNTXT); |
608 | if (irq_settings_is_level(desc)) | ||
609 | irqd_set(&desc->irq_data, IRQD_LEVEL); | ||
687 | 610 | ||
688 | irqd_set(&desc->irq_data, irq_settings_get_trigger_mask(desc)); | 611 | irqd_set(&desc->irq_data, irq_settings_get_trigger_mask(desc)); |
689 | 612 | ||
690 | irq_put_desc_unlock(desc, flags); | 613 | irq_put_desc_unlock(desc, flags); |
691 | } | 614 | } |
615 | |||
616 | /** | ||
617 | * irq_cpu_online - Invoke all irq_cpu_online functions. | ||
618 | * | ||
619 | * Iterate through all irqs and invoke the chip.irq_cpu_online() | ||
620 | * for each. | ||
621 | */ | ||
622 | void irq_cpu_online(void) | ||
623 | { | ||
624 | struct irq_desc *desc; | ||
625 | struct irq_chip *chip; | ||
626 | unsigned long flags; | ||
627 | unsigned int irq; | ||
628 | |||
629 | for_each_active_irq(irq) { | ||
630 | desc = irq_to_desc(irq); | ||
631 | if (!desc) | ||
632 | continue; | ||
633 | |||
634 | raw_spin_lock_irqsave(&desc->lock, flags); | ||
635 | |||
636 | chip = irq_data_get_irq_chip(&desc->irq_data); | ||
637 | if (chip && chip->irq_cpu_online && | ||
638 | (!(chip->flags & IRQCHIP_ONOFFLINE_ENABLED) || | ||
639 | !irqd_irq_disabled(&desc->irq_data))) | ||
640 | chip->irq_cpu_online(&desc->irq_data); | ||
641 | |||
642 | raw_spin_unlock_irqrestore(&desc->lock, flags); | ||
643 | } | ||
644 | } | ||
645 | |||
646 | /** | ||
647 | * irq_cpu_offline - Invoke all irq_cpu_offline functions. | ||
648 | * | ||
649 | * Iterate through all irqs and invoke the chip.irq_cpu_offline() | ||
650 | * for each. | ||
651 | */ | ||
652 | void irq_cpu_offline(void) | ||
653 | { | ||
654 | struct irq_desc *desc; | ||
655 | struct irq_chip *chip; | ||
656 | unsigned long flags; | ||
657 | unsigned int irq; | ||
658 | |||
659 | for_each_active_irq(irq) { | ||
660 | desc = irq_to_desc(irq); | ||
661 | if (!desc) | ||
662 | continue; | ||
663 | |||
664 | raw_spin_lock_irqsave(&desc->lock, flags); | ||
665 | |||
666 | chip = irq_data_get_irq_chip(&desc->irq_data); | ||
667 | if (chip && chip->irq_cpu_offline && | ||
668 | (!(chip->flags & IRQCHIP_ONOFFLINE_ENABLED) || | ||
669 | !irqd_irq_disabled(&desc->irq_data))) | ||
670 | chip->irq_cpu_offline(&desc->irq_data); | ||
671 | |||
672 | raw_spin_unlock_irqrestore(&desc->lock, flags); | ||
673 | } | ||
674 | } | ||
diff --git a/kernel/irq/compat.h b/kernel/irq/compat.h deleted file mode 100644 index 6bbaf66aca85..000000000000 --- a/kernel/irq/compat.h +++ /dev/null | |||
@@ -1,72 +0,0 @@ | |||
1 | /* | ||
2 | * Compat layer for transition period | ||
3 | */ | ||
4 | #ifndef CONFIG_GENERIC_HARDIRQS_NO_COMPAT | ||
5 | static inline void irq_compat_set_progress(struct irq_desc *desc) | ||
6 | { | ||
7 | desc->status |= IRQ_INPROGRESS; | ||
8 | } | ||
9 | |||
10 | static inline void irq_compat_clr_progress(struct irq_desc *desc) | ||
11 | { | ||
12 | desc->status &= ~IRQ_INPROGRESS; | ||
13 | } | ||
14 | static inline void irq_compat_set_disabled(struct irq_desc *desc) | ||
15 | { | ||
16 | desc->status |= IRQ_DISABLED; | ||
17 | } | ||
18 | static inline void irq_compat_clr_disabled(struct irq_desc *desc) | ||
19 | { | ||
20 | desc->status &= ~IRQ_DISABLED; | ||
21 | } | ||
22 | static inline void irq_compat_set_pending(struct irq_desc *desc) | ||
23 | { | ||
24 | desc->status |= IRQ_PENDING; | ||
25 | } | ||
26 | |||
27 | static inline void irq_compat_clr_pending(struct irq_desc *desc) | ||
28 | { | ||
29 | desc->status &= ~IRQ_PENDING; | ||
30 | } | ||
31 | static inline void irq_compat_set_masked(struct irq_desc *desc) | ||
32 | { | ||
33 | desc->status |= IRQ_MASKED; | ||
34 | } | ||
35 | |||
36 | static inline void irq_compat_clr_masked(struct irq_desc *desc) | ||
37 | { | ||
38 | desc->status &= ~IRQ_MASKED; | ||
39 | } | ||
40 | static inline void irq_compat_set_move_pending(struct irq_desc *desc) | ||
41 | { | ||
42 | desc->status |= IRQ_MOVE_PENDING; | ||
43 | } | ||
44 | |||
45 | static inline void irq_compat_clr_move_pending(struct irq_desc *desc) | ||
46 | { | ||
47 | desc->status &= ~IRQ_MOVE_PENDING; | ||
48 | } | ||
49 | static inline void irq_compat_set_affinity(struct irq_desc *desc) | ||
50 | { | ||
51 | desc->status |= IRQ_AFFINITY_SET; | ||
52 | } | ||
53 | |||
54 | static inline void irq_compat_clr_affinity(struct irq_desc *desc) | ||
55 | { | ||
56 | desc->status &= ~IRQ_AFFINITY_SET; | ||
57 | } | ||
58 | #else | ||
59 | static inline void irq_compat_set_progress(struct irq_desc *desc) { } | ||
60 | static inline void irq_compat_clr_progress(struct irq_desc *desc) { } | ||
61 | static inline void irq_compat_set_disabled(struct irq_desc *desc) { } | ||
62 | static inline void irq_compat_clr_disabled(struct irq_desc *desc) { } | ||
63 | static inline void irq_compat_set_pending(struct irq_desc *desc) { } | ||
64 | static inline void irq_compat_clr_pending(struct irq_desc *desc) { } | ||
65 | static inline void irq_compat_set_masked(struct irq_desc *desc) { } | ||
66 | static inline void irq_compat_clr_masked(struct irq_desc *desc) { } | ||
67 | static inline void irq_compat_set_move_pending(struct irq_desc *desc) { } | ||
68 | static inline void irq_compat_clr_move_pending(struct irq_desc *desc) { } | ||
69 | static inline void irq_compat_set_affinity(struct irq_desc *desc) { } | ||
70 | static inline void irq_compat_clr_affinity(struct irq_desc *desc) { } | ||
71 | #endif | ||
72 | |||
diff --git a/kernel/irq/debug.h b/kernel/irq/debug.h index d1a33b7fa61d..306cba37e9a5 100644 --- a/kernel/irq/debug.h +++ b/kernel/irq/debug.h | |||
@@ -4,8 +4,10 @@ | |||
4 | 4 | ||
5 | #include <linux/kallsyms.h> | 5 | #include <linux/kallsyms.h> |
6 | 6 | ||
7 | #define P(f) if (desc->status & f) printk("%14s set\n", #f) | 7 | #define P(f) if (desc->status_use_accessors & f) printk("%14s set\n", #f) |
8 | #define PS(f) if (desc->istate & f) printk("%14s set\n", #f) | 8 | #define PS(f) if (desc->istate & f) printk("%14s set\n", #f) |
9 | /* FIXME */ | ||
10 | #define PD(f) do { } while (0) | ||
9 | 11 | ||
10 | static inline void print_irq_desc(unsigned int irq, struct irq_desc *desc) | 12 | static inline void print_irq_desc(unsigned int irq, struct irq_desc *desc) |
11 | { | 13 | { |
@@ -28,13 +30,15 @@ static inline void print_irq_desc(unsigned int irq, struct irq_desc *desc) | |||
28 | P(IRQ_NOAUTOEN); | 30 | P(IRQ_NOAUTOEN); |
29 | 31 | ||
30 | PS(IRQS_AUTODETECT); | 32 | PS(IRQS_AUTODETECT); |
31 | PS(IRQS_INPROGRESS); | ||
32 | PS(IRQS_REPLAY); | 33 | PS(IRQS_REPLAY); |
33 | PS(IRQS_WAITING); | 34 | PS(IRQS_WAITING); |
34 | PS(IRQS_DISABLED); | ||
35 | PS(IRQS_PENDING); | 35 | PS(IRQS_PENDING); |
36 | PS(IRQS_MASKED); | 36 | |
37 | PD(IRQS_INPROGRESS); | ||
38 | PD(IRQS_DISABLED); | ||
39 | PD(IRQS_MASKED); | ||
37 | } | 40 | } |
38 | 41 | ||
39 | #undef P | 42 | #undef P |
40 | #undef PS | 43 | #undef PS |
44 | #undef PD | ||
diff --git a/kernel/irq/dummychip.c b/kernel/irq/dummychip.c index 20dc5474947e..b5fcd96c7102 100644 --- a/kernel/irq/dummychip.c +++ b/kernel/irq/dummychip.c | |||
@@ -31,13 +31,6 @@ static unsigned int noop_ret(struct irq_data *data) | |||
31 | return 0; | 31 | return 0; |
32 | } | 32 | } |
33 | 33 | ||
34 | #ifndef CONFIG_GENERIC_HARDIRQS_NO_DEPRECATED | ||
35 | static void compat_noop(unsigned int irq) { } | ||
36 | #define END_INIT .end = compat_noop | ||
37 | #else | ||
38 | #define END_INIT | ||
39 | #endif | ||
40 | |||
41 | /* | 34 | /* |
42 | * Generic no controller implementation | 35 | * Generic no controller implementation |
43 | */ | 36 | */ |
@@ -48,7 +41,6 @@ struct irq_chip no_irq_chip = { | |||
48 | .irq_enable = noop, | 41 | .irq_enable = noop, |
49 | .irq_disable = noop, | 42 | .irq_disable = noop, |
50 | .irq_ack = ack_bad, | 43 | .irq_ack = ack_bad, |
51 | END_INIT | ||
52 | }; | 44 | }; |
53 | 45 | ||
54 | /* | 46 | /* |
@@ -64,5 +56,4 @@ struct irq_chip dummy_irq_chip = { | |||
64 | .irq_ack = noop, | 56 | .irq_ack = noop, |
65 | .irq_mask = noop, | 57 | .irq_mask = noop, |
66 | .irq_unmask = noop, | 58 | .irq_unmask = noop, |
67 | END_INIT | ||
68 | }; | 59 | }; |
diff --git a/kernel/irq/handle.c b/kernel/irq/handle.c index 517561fc7317..90cb55f6d7eb 100644 --- a/kernel/irq/handle.c +++ b/kernel/irq/handle.c | |||
@@ -175,28 +175,13 @@ irqreturn_t handle_irq_event(struct irq_desc *desc) | |||
175 | struct irqaction *action = desc->action; | 175 | struct irqaction *action = desc->action; |
176 | irqreturn_t ret; | 176 | irqreturn_t ret; |
177 | 177 | ||
178 | irq_compat_clr_pending(desc); | ||
179 | desc->istate &= ~IRQS_PENDING; | 178 | desc->istate &= ~IRQS_PENDING; |
180 | irq_compat_set_progress(desc); | 179 | irqd_set(&desc->irq_data, IRQD_IRQ_INPROGRESS); |
181 | desc->istate |= IRQS_INPROGRESS; | ||
182 | raw_spin_unlock(&desc->lock); | 180 | raw_spin_unlock(&desc->lock); |
183 | 181 | ||
184 | ret = handle_irq_event_percpu(desc, action); | 182 | ret = handle_irq_event_percpu(desc, action); |
185 | 183 | ||
186 | raw_spin_lock(&desc->lock); | 184 | raw_spin_lock(&desc->lock); |
187 | desc->istate &= ~IRQS_INPROGRESS; | 185 | irqd_clear(&desc->irq_data, IRQD_IRQ_INPROGRESS); |
188 | irq_compat_clr_progress(desc); | ||
189 | return ret; | 186 | return ret; |
190 | } | 187 | } |
191 | |||
192 | /** | ||
193 | * handle_IRQ_event - irq action chain handler | ||
194 | * @irq: the interrupt number | ||
195 | * @action: the interrupt action chain for this irq | ||
196 | * | ||
197 | * Handles the action chain of an irq event | ||
198 | */ | ||
199 | irqreturn_t handle_IRQ_event(unsigned int irq, struct irqaction *action) | ||
200 | { | ||
201 | return handle_irq_event_percpu(irq_to_desc(irq), action); | ||
202 | } | ||
diff --git a/kernel/irq/internals.h b/kernel/irq/internals.h index 6c6ec9a49027..6546431447d7 100644 --- a/kernel/irq/internals.h +++ b/kernel/irq/internals.h | |||
@@ -15,10 +15,6 @@ | |||
15 | 15 | ||
16 | #define istate core_internal_state__do_not_mess_with_it | 16 | #define istate core_internal_state__do_not_mess_with_it |
17 | 17 | ||
18 | #ifdef CONFIG_GENERIC_HARDIRQS_NO_COMPAT | ||
19 | # define status status_use_accessors | ||
20 | #endif | ||
21 | |||
22 | extern int noirqdebug; | 18 | extern int noirqdebug; |
23 | 19 | ||
24 | /* | 20 | /* |
@@ -44,38 +40,28 @@ enum { | |||
44 | * IRQS_SPURIOUS_DISABLED - was disabled due to spurious interrupt | 40 | * IRQS_SPURIOUS_DISABLED - was disabled due to spurious interrupt |
45 | * detection | 41 | * detection |
46 | * IRQS_POLL_INPROGRESS - polling in progress | 42 | * IRQS_POLL_INPROGRESS - polling in progress |
47 | * IRQS_INPROGRESS - Interrupt in progress | ||
48 | * IRQS_ONESHOT - irq is not unmasked in primary handler | 43 | * IRQS_ONESHOT - irq is not unmasked in primary handler |
49 | * IRQS_REPLAY - irq is replayed | 44 | * IRQS_REPLAY - irq is replayed |
50 | * IRQS_WAITING - irq is waiting | 45 | * IRQS_WAITING - irq is waiting |
51 | * IRQS_DISABLED - irq is disabled | ||
52 | * IRQS_PENDING - irq is pending and replayed later | 46 | * IRQS_PENDING - irq is pending and replayed later |
53 | * IRQS_MASKED - irq is masked | ||
54 | * IRQS_SUSPENDED - irq is suspended | 47 | * IRQS_SUSPENDED - irq is suspended |
55 | */ | 48 | */ |
56 | enum { | 49 | enum { |
57 | IRQS_AUTODETECT = 0x00000001, | 50 | IRQS_AUTODETECT = 0x00000001, |
58 | IRQS_SPURIOUS_DISABLED = 0x00000002, | 51 | IRQS_SPURIOUS_DISABLED = 0x00000002, |
59 | IRQS_POLL_INPROGRESS = 0x00000008, | 52 | IRQS_POLL_INPROGRESS = 0x00000008, |
60 | IRQS_INPROGRESS = 0x00000010, | ||
61 | IRQS_ONESHOT = 0x00000020, | 53 | IRQS_ONESHOT = 0x00000020, |
62 | IRQS_REPLAY = 0x00000040, | 54 | IRQS_REPLAY = 0x00000040, |
63 | IRQS_WAITING = 0x00000080, | 55 | IRQS_WAITING = 0x00000080, |
64 | IRQS_DISABLED = 0x00000100, | ||
65 | IRQS_PENDING = 0x00000200, | 56 | IRQS_PENDING = 0x00000200, |
66 | IRQS_MASKED = 0x00000400, | ||
67 | IRQS_SUSPENDED = 0x00000800, | 57 | IRQS_SUSPENDED = 0x00000800, |
68 | }; | 58 | }; |
69 | 59 | ||
70 | #include "compat.h" | ||
71 | #include "debug.h" | 60 | #include "debug.h" |
72 | #include "settings.h" | 61 | #include "settings.h" |
73 | 62 | ||
74 | #define irq_data_to_desc(data) container_of(data, struct irq_desc, irq_data) | 63 | #define irq_data_to_desc(data) container_of(data, struct irq_desc, irq_data) |
75 | 64 | ||
76 | /* Set default functions for irq_chip structures: */ | ||
77 | extern void irq_chip_set_defaults(struct irq_chip *chip); | ||
78 | |||
79 | extern int __irq_set_trigger(struct irq_desc *desc, unsigned int irq, | 65 | extern int __irq_set_trigger(struct irq_desc *desc, unsigned int irq, |
80 | unsigned long flags); | 66 | unsigned long flags); |
81 | extern void __disable_irq(struct irq_desc *desc, unsigned int irq, bool susp); | 67 | extern void __disable_irq(struct irq_desc *desc, unsigned int irq, bool susp); |
@@ -162,13 +148,11 @@ irq_put_desc_unlock(struct irq_desc *desc, unsigned long flags) | |||
162 | static inline void irqd_set_move_pending(struct irq_data *d) | 148 | static inline void irqd_set_move_pending(struct irq_data *d) |
163 | { | 149 | { |
164 | d->state_use_accessors |= IRQD_SETAFFINITY_PENDING; | 150 | d->state_use_accessors |= IRQD_SETAFFINITY_PENDING; |
165 | irq_compat_set_move_pending(irq_data_to_desc(d)); | ||
166 | } | 151 | } |
167 | 152 | ||
168 | static inline void irqd_clr_move_pending(struct irq_data *d) | 153 | static inline void irqd_clr_move_pending(struct irq_data *d) |
169 | { | 154 | { |
170 | d->state_use_accessors &= ~IRQD_SETAFFINITY_PENDING; | 155 | d->state_use_accessors &= ~IRQD_SETAFFINITY_PENDING; |
171 | irq_compat_clr_move_pending(irq_data_to_desc(d)); | ||
172 | } | 156 | } |
173 | 157 | ||
174 | static inline void irqd_clear(struct irq_data *d, unsigned int mask) | 158 | static inline void irqd_clear(struct irq_data *d, unsigned int mask) |
diff --git a/kernel/irq/irqdesc.c b/kernel/irq/irqdesc.c index 6fb014f172f7..2c039c9b9383 100644 --- a/kernel/irq/irqdesc.c +++ b/kernel/irq/irqdesc.c | |||
@@ -80,7 +80,7 @@ static void desc_set_defaults(unsigned int irq, struct irq_desc *desc, int node) | |||
80 | desc->irq_data.handler_data = NULL; | 80 | desc->irq_data.handler_data = NULL; |
81 | desc->irq_data.msi_desc = NULL; | 81 | desc->irq_data.msi_desc = NULL; |
82 | irq_settings_clr_and_set(desc, ~0, _IRQ_DEFAULT_INIT_FLAGS); | 82 | irq_settings_clr_and_set(desc, ~0, _IRQ_DEFAULT_INIT_FLAGS); |
83 | desc->istate = IRQS_DISABLED; | 83 | irqd_set(&desc->irq_data, IRQD_IRQ_DISABLED); |
84 | desc->handle_irq = handle_bad_irq; | 84 | desc->handle_irq = handle_bad_irq; |
85 | desc->depth = 1; | 85 | desc->depth = 1; |
86 | desc->irq_count = 0; | 86 | desc->irq_count = 0; |
@@ -238,7 +238,6 @@ int __init early_irq_init(void) | |||
238 | 238 | ||
239 | struct irq_desc irq_desc[NR_IRQS] __cacheline_aligned_in_smp = { | 239 | struct irq_desc irq_desc[NR_IRQS] __cacheline_aligned_in_smp = { |
240 | [0 ... NR_IRQS-1] = { | 240 | [0 ... NR_IRQS-1] = { |
241 | .istate = IRQS_DISABLED, | ||
242 | .handle_irq = handle_bad_irq, | 241 | .handle_irq = handle_bad_irq, |
243 | .depth = 1, | 242 | .depth = 1, |
244 | .lock = __RAW_SPIN_LOCK_UNLOCKED(irq_desc->lock), | 243 | .lock = __RAW_SPIN_LOCK_UNLOCKED(irq_desc->lock), |
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c index 0a2aa73e536c..12a80fdae11c 100644 --- a/kernel/irq/manage.c +++ b/kernel/irq/manage.c | |||
@@ -41,7 +41,7 @@ early_param("threadirqs", setup_forced_irqthreads); | |||
41 | void synchronize_irq(unsigned int irq) | 41 | void synchronize_irq(unsigned int irq) |
42 | { | 42 | { |
43 | struct irq_desc *desc = irq_to_desc(irq); | 43 | struct irq_desc *desc = irq_to_desc(irq); |
44 | unsigned int state; | 44 | bool inprogress; |
45 | 45 | ||
46 | if (!desc) | 46 | if (!desc) |
47 | return; | 47 | return; |
@@ -53,16 +53,16 @@ void synchronize_irq(unsigned int irq) | |||
53 | * Wait until we're out of the critical section. This might | 53 | * Wait until we're out of the critical section. This might |
54 | * give the wrong answer due to the lack of memory barriers. | 54 | * give the wrong answer due to the lack of memory barriers. |
55 | */ | 55 | */ |
56 | while (desc->istate & IRQS_INPROGRESS) | 56 | while (irqd_irq_inprogress(&desc->irq_data)) |
57 | cpu_relax(); | 57 | cpu_relax(); |
58 | 58 | ||
59 | /* Ok, that indicated we're done: double-check carefully. */ | 59 | /* Ok, that indicated we're done: double-check carefully. */ |
60 | raw_spin_lock_irqsave(&desc->lock, flags); | 60 | raw_spin_lock_irqsave(&desc->lock, flags); |
61 | state = desc->istate; | 61 | inprogress = irqd_irq_inprogress(&desc->irq_data); |
62 | raw_spin_unlock_irqrestore(&desc->lock, flags); | 62 | raw_spin_unlock_irqrestore(&desc->lock, flags); |
63 | 63 | ||
64 | /* Oops, that failed? */ | 64 | /* Oops, that failed? */ |
65 | } while (state & IRQS_INPROGRESS); | 65 | } while (inprogress); |
66 | 66 | ||
67 | /* | 67 | /* |
68 | * We made sure that no hardirq handler is running. Now verify | 68 | * We made sure that no hardirq handler is running. Now verify |
@@ -112,13 +112,13 @@ void irq_set_thread_affinity(struct irq_desc *desc) | |||
112 | } | 112 | } |
113 | 113 | ||
114 | #ifdef CONFIG_GENERIC_PENDING_IRQ | 114 | #ifdef CONFIG_GENERIC_PENDING_IRQ |
115 | static inline bool irq_can_move_pcntxt(struct irq_desc *desc) | 115 | static inline bool irq_can_move_pcntxt(struct irq_data *data) |
116 | { | 116 | { |
117 | return irq_settings_can_move_pcntxt(desc); | 117 | return irqd_can_move_in_process_context(data); |
118 | } | 118 | } |
119 | static inline bool irq_move_pending(struct irq_desc *desc) | 119 | static inline bool irq_move_pending(struct irq_data *data) |
120 | { | 120 | { |
121 | return irqd_is_setaffinity_pending(&desc->irq_data); | 121 | return irqd_is_setaffinity_pending(data); |
122 | } | 122 | } |
123 | static inline void | 123 | static inline void |
124 | irq_copy_pending(struct irq_desc *desc, const struct cpumask *mask) | 124 | irq_copy_pending(struct irq_desc *desc, const struct cpumask *mask) |
@@ -131,43 +131,34 @@ irq_get_pending(struct cpumask *mask, struct irq_desc *desc) | |||
131 | cpumask_copy(mask, desc->pending_mask); | 131 | cpumask_copy(mask, desc->pending_mask); |
132 | } | 132 | } |
133 | #else | 133 | #else |
134 | static inline bool irq_can_move_pcntxt(struct irq_desc *desc) { return true; } | 134 | static inline bool irq_can_move_pcntxt(struct irq_data *data) { return true; } |
135 | static inline bool irq_move_pending(struct irq_desc *desc) { return false; } | 135 | static inline bool irq_move_pending(struct irq_data *data) { return false; } |
136 | static inline void | 136 | static inline void |
137 | irq_copy_pending(struct irq_desc *desc, const struct cpumask *mask) { } | 137 | irq_copy_pending(struct irq_desc *desc, const struct cpumask *mask) { } |
138 | static inline void | 138 | static inline void |
139 | irq_get_pending(struct cpumask *mask, struct irq_desc *desc) { } | 139 | irq_get_pending(struct cpumask *mask, struct irq_desc *desc) { } |
140 | #endif | 140 | #endif |
141 | 141 | ||
142 | /** | 142 | int __irq_set_affinity_locked(struct irq_data *data, const struct cpumask *mask) |
143 | * irq_set_affinity - Set the irq affinity of a given irq | ||
144 | * @irq: Interrupt to set affinity | ||
145 | * @cpumask: cpumask | ||
146 | * | ||
147 | */ | ||
148 | int irq_set_affinity(unsigned int irq, const struct cpumask *mask) | ||
149 | { | 143 | { |
150 | struct irq_desc *desc = irq_to_desc(irq); | 144 | struct irq_chip *chip = irq_data_get_irq_chip(data); |
151 | struct irq_chip *chip = desc->irq_data.chip; | 145 | struct irq_desc *desc = irq_data_to_desc(data); |
152 | unsigned long flags; | ||
153 | int ret = 0; | 146 | int ret = 0; |
154 | 147 | ||
155 | if (!chip->irq_set_affinity) | 148 | if (!chip || !chip->irq_set_affinity) |
156 | return -EINVAL; | 149 | return -EINVAL; |
157 | 150 | ||
158 | raw_spin_lock_irqsave(&desc->lock, flags); | 151 | if (irq_can_move_pcntxt(data)) { |
159 | 152 | ret = chip->irq_set_affinity(data, mask, false); | |
160 | if (irq_can_move_pcntxt(desc)) { | ||
161 | ret = chip->irq_set_affinity(&desc->irq_data, mask, false); | ||
162 | switch (ret) { | 153 | switch (ret) { |
163 | case IRQ_SET_MASK_OK: | 154 | case IRQ_SET_MASK_OK: |
164 | cpumask_copy(desc->irq_data.affinity, mask); | 155 | cpumask_copy(data->affinity, mask); |
165 | case IRQ_SET_MASK_OK_NOCOPY: | 156 | case IRQ_SET_MASK_OK_NOCOPY: |
166 | irq_set_thread_affinity(desc); | 157 | irq_set_thread_affinity(desc); |
167 | ret = 0; | 158 | ret = 0; |
168 | } | 159 | } |
169 | } else { | 160 | } else { |
170 | irqd_set_move_pending(&desc->irq_data); | 161 | irqd_set_move_pending(data); |
171 | irq_copy_pending(desc, mask); | 162 | irq_copy_pending(desc, mask); |
172 | } | 163 | } |
173 | 164 | ||
@@ -175,8 +166,28 @@ int irq_set_affinity(unsigned int irq, const struct cpumask *mask) | |||
175 | kref_get(&desc->affinity_notify->kref); | 166 | kref_get(&desc->affinity_notify->kref); |
176 | schedule_work(&desc->affinity_notify->work); | 167 | schedule_work(&desc->affinity_notify->work); |
177 | } | 168 | } |
178 | irq_compat_set_affinity(desc); | 169 | irqd_set(data, IRQD_AFFINITY_SET); |
179 | irqd_set(&desc->irq_data, IRQD_AFFINITY_SET); | 170 | |
171 | return ret; | ||
172 | } | ||
173 | |||
174 | /** | ||
175 | * irq_set_affinity - Set the irq affinity of a given irq | ||
176 | * @irq: Interrupt to set affinity | ||
177 | * @mask: cpumask | ||
178 | * | ||
179 | */ | ||
180 | int irq_set_affinity(unsigned int irq, const struct cpumask *mask) | ||
181 | { | ||
182 | struct irq_desc *desc = irq_to_desc(irq); | ||
183 | unsigned long flags; | ||
184 | int ret; | ||
185 | |||
186 | if (!desc) | ||
187 | return -EINVAL; | ||
188 | |||
189 | raw_spin_lock_irqsave(&desc->lock, flags); | ||
190 | ret = __irq_set_affinity_locked(irq_desc_get_irq_data(desc), mask); | ||
180 | raw_spin_unlock_irqrestore(&desc->lock, flags); | 191 | raw_spin_unlock_irqrestore(&desc->lock, flags); |
181 | return ret; | 192 | return ret; |
182 | } | 193 | } |
@@ -206,7 +217,7 @@ static void irq_affinity_notify(struct work_struct *work) | |||
206 | goto out; | 217 | goto out; |
207 | 218 | ||
208 | raw_spin_lock_irqsave(&desc->lock, flags); | 219 | raw_spin_lock_irqsave(&desc->lock, flags); |
209 | if (irq_move_pending(desc)) | 220 | if (irq_move_pending(&desc->irq_data)) |
210 | irq_get_pending(cpumask, desc); | 221 | irq_get_pending(cpumask, desc); |
211 | else | 222 | else |
212 | cpumask_copy(cpumask, desc->irq_data.affinity); | 223 | cpumask_copy(cpumask, desc->irq_data.affinity); |
@@ -285,10 +296,8 @@ setup_affinity(unsigned int irq, struct irq_desc *desc, struct cpumask *mask) | |||
285 | if (cpumask_intersects(desc->irq_data.affinity, | 296 | if (cpumask_intersects(desc->irq_data.affinity, |
286 | cpu_online_mask)) | 297 | cpu_online_mask)) |
287 | set = desc->irq_data.affinity; | 298 | set = desc->irq_data.affinity; |
288 | else { | 299 | else |
289 | irq_compat_clr_affinity(desc); | ||
290 | irqd_clear(&desc->irq_data, IRQD_AFFINITY_SET); | 300 | irqd_clear(&desc->irq_data, IRQD_AFFINITY_SET); |
291 | } | ||
292 | } | 301 | } |
293 | 302 | ||
294 | cpumask_and(mask, cpu_online_mask, set); | 303 | cpumask_and(mask, cpu_online_mask, set); |
@@ -551,9 +560,9 @@ int __irq_set_trigger(struct irq_desc *desc, unsigned int irq, | |||
551 | flags &= IRQ_TYPE_SENSE_MASK; | 560 | flags &= IRQ_TYPE_SENSE_MASK; |
552 | 561 | ||
553 | if (chip->flags & IRQCHIP_SET_TYPE_MASKED) { | 562 | if (chip->flags & IRQCHIP_SET_TYPE_MASKED) { |
554 | if (!(desc->istate & IRQS_MASKED)) | 563 | if (!irqd_irq_masked(&desc->irq_data)) |
555 | mask_irq(desc); | 564 | mask_irq(desc); |
556 | if (!(desc->istate & IRQS_DISABLED)) | 565 | if (!irqd_irq_disabled(&desc->irq_data)) |
557 | unmask = 1; | 566 | unmask = 1; |
558 | } | 567 | } |
559 | 568 | ||
@@ -575,8 +584,6 @@ int __irq_set_trigger(struct irq_desc *desc, unsigned int irq, | |||
575 | irqd_set(&desc->irq_data, IRQD_LEVEL); | 584 | irqd_set(&desc->irq_data, IRQD_LEVEL); |
576 | } | 585 | } |
577 | 586 | ||
578 | if (chip != desc->irq_data.chip) | ||
579 | irq_chip_set_defaults(desc->irq_data.chip); | ||
580 | ret = 0; | 587 | ret = 0; |
581 | break; | 588 | break; |
582 | default: | 589 | default: |
@@ -651,7 +658,7 @@ again: | |||
651 | * irq_wake_thread(). See the comment there which explains the | 658 | * irq_wake_thread(). See the comment there which explains the |
652 | * serialization. | 659 | * serialization. |
653 | */ | 660 | */ |
654 | if (unlikely(desc->istate & IRQS_INPROGRESS)) { | 661 | if (unlikely(irqd_irq_inprogress(&desc->irq_data))) { |
655 | raw_spin_unlock_irq(&desc->lock); | 662 | raw_spin_unlock_irq(&desc->lock); |
656 | chip_bus_sync_unlock(desc); | 663 | chip_bus_sync_unlock(desc); |
657 | cpu_relax(); | 664 | cpu_relax(); |
@@ -668,12 +675,10 @@ again: | |||
668 | 675 | ||
669 | desc->threads_oneshot &= ~action->thread_mask; | 676 | desc->threads_oneshot &= ~action->thread_mask; |
670 | 677 | ||
671 | if (!desc->threads_oneshot && !(desc->istate & IRQS_DISABLED) && | 678 | if (!desc->threads_oneshot && !irqd_irq_disabled(&desc->irq_data) && |
672 | (desc->istate & IRQS_MASKED)) { | 679 | irqd_irq_masked(&desc->irq_data)) |
673 | irq_compat_clr_masked(desc); | 680 | unmask_irq(desc); |
674 | desc->istate &= ~IRQS_MASKED; | 681 | |
675 | desc->irq_data.chip->irq_unmask(&desc->irq_data); | ||
676 | } | ||
677 | out_unlock: | 682 | out_unlock: |
678 | raw_spin_unlock_irq(&desc->lock); | 683 | raw_spin_unlock_irq(&desc->lock); |
679 | chip_bus_sync_unlock(desc); | 684 | chip_bus_sync_unlock(desc); |
@@ -767,7 +772,7 @@ static int irq_thread(void *data) | |||
767 | atomic_inc(&desc->threads_active); | 772 | atomic_inc(&desc->threads_active); |
768 | 773 | ||
769 | raw_spin_lock_irq(&desc->lock); | 774 | raw_spin_lock_irq(&desc->lock); |
770 | if (unlikely(desc->istate & IRQS_DISABLED)) { | 775 | if (unlikely(irqd_irq_disabled(&desc->irq_data))) { |
771 | /* | 776 | /* |
772 | * CHECKME: We might need a dedicated | 777 | * CHECKME: We might need a dedicated |
773 | * IRQ_THREAD_PENDING flag here, which | 778 | * IRQ_THREAD_PENDING flag here, which |
@@ -775,7 +780,6 @@ static int irq_thread(void *data) | |||
775 | * but AFAICT IRQS_PENDING should be fine as it | 780 | * but AFAICT IRQS_PENDING should be fine as it |
776 | * retriggers the interrupt itself --- tglx | 781 | * retriggers the interrupt itself --- tglx |
777 | */ | 782 | */ |
778 | irq_compat_set_pending(desc); | ||
779 | desc->istate |= IRQS_PENDING; | 783 | desc->istate |= IRQS_PENDING; |
780 | raw_spin_unlock_irq(&desc->lock); | 784 | raw_spin_unlock_irq(&desc->lock); |
781 | } else { | 785 | } else { |
@@ -971,8 +975,6 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new) | |||
971 | new->thread_mask = 1 << ffz(thread_mask); | 975 | new->thread_mask = 1 << ffz(thread_mask); |
972 | 976 | ||
973 | if (!shared) { | 977 | if (!shared) { |
974 | irq_chip_set_defaults(desc->irq_data.chip); | ||
975 | |||
976 | init_waitqueue_head(&desc->wait_for_threads); | 978 | init_waitqueue_head(&desc->wait_for_threads); |
977 | 979 | ||
978 | /* Setup the type (level, edge polarity) if configured: */ | 980 | /* Setup the type (level, edge polarity) if configured: */ |
@@ -985,8 +987,8 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new) | |||
985 | } | 987 | } |
986 | 988 | ||
987 | desc->istate &= ~(IRQS_AUTODETECT | IRQS_SPURIOUS_DISABLED | \ | 989 | desc->istate &= ~(IRQS_AUTODETECT | IRQS_SPURIOUS_DISABLED | \ |
988 | IRQS_INPROGRESS | IRQS_ONESHOT | \ | 990 | IRQS_ONESHOT | IRQS_WAITING); |
989 | IRQS_WAITING); | 991 | irqd_clear(&desc->irq_data, IRQD_IRQ_INPROGRESS); |
990 | 992 | ||
991 | if (new->flags & IRQF_PERCPU) { | 993 | if (new->flags & IRQF_PERCPU) { |
992 | irqd_set(&desc->irq_data, IRQD_PER_CPU); | 994 | irqd_set(&desc->irq_data, IRQD_PER_CPU); |
diff --git a/kernel/irq/migration.c b/kernel/irq/migration.c index ec4806d4778b..bc6194698dfd 100644 --- a/kernel/irq/migration.c +++ b/kernel/irq/migration.c | |||
@@ -53,20 +53,14 @@ void irq_move_masked_irq(struct irq_data *idata) | |||
53 | cpumask_clear(desc->pending_mask); | 53 | cpumask_clear(desc->pending_mask); |
54 | } | 54 | } |
55 | 55 | ||
56 | void move_masked_irq(int irq) | ||
57 | { | ||
58 | irq_move_masked_irq(irq_get_irq_data(irq)); | ||
59 | } | ||
60 | |||
61 | void irq_move_irq(struct irq_data *idata) | 56 | void irq_move_irq(struct irq_data *idata) |
62 | { | 57 | { |
63 | struct irq_desc *desc = irq_data_to_desc(idata); | ||
64 | bool masked; | 58 | bool masked; |
65 | 59 | ||
66 | if (likely(!irqd_is_setaffinity_pending(idata))) | 60 | if (likely(!irqd_is_setaffinity_pending(idata))) |
67 | return; | 61 | return; |
68 | 62 | ||
69 | if (unlikely(desc->istate & IRQS_DISABLED)) | 63 | if (unlikely(irqd_irq_disabled(idata))) |
70 | return; | 64 | return; |
71 | 65 | ||
72 | /* | 66 | /* |
@@ -74,15 +68,10 @@ void irq_move_irq(struct irq_data *idata) | |||
74 | * threaded interrupt with ONESHOT set, we can end up with an | 68 | * threaded interrupt with ONESHOT set, we can end up with an |
75 | * interrupt storm. | 69 | * interrupt storm. |
76 | */ | 70 | */ |
77 | masked = desc->istate & IRQS_MASKED; | 71 | masked = irqd_irq_masked(idata); |
78 | if (!masked) | 72 | if (!masked) |
79 | idata->chip->irq_mask(idata); | 73 | idata->chip->irq_mask(idata); |
80 | irq_move_masked_irq(idata); | 74 | irq_move_masked_irq(idata); |
81 | if (!masked) | 75 | if (!masked) |
82 | idata->chip->irq_unmask(idata); | 76 | idata->chip->irq_unmask(idata); |
83 | } | 77 | } |
84 | |||
85 | void move_native_irq(int irq) | ||
86 | { | ||
87 | irq_move_irq(irq_get_irq_data(irq)); | ||
88 | } | ||
diff --git a/kernel/irq/proc.c b/kernel/irq/proc.c index 626d092eed9a..dd201bd35103 100644 --- a/kernel/irq/proc.c +++ b/kernel/irq/proc.c | |||
@@ -364,6 +364,10 @@ int __weak arch_show_interrupts(struct seq_file *p, int prec) | |||
364 | return 0; | 364 | return 0; |
365 | } | 365 | } |
366 | 366 | ||
367 | #ifndef ACTUAL_NR_IRQS | ||
368 | # define ACTUAL_NR_IRQS nr_irqs | ||
369 | #endif | ||
370 | |||
367 | int show_interrupts(struct seq_file *p, void *v) | 371 | int show_interrupts(struct seq_file *p, void *v) |
368 | { | 372 | { |
369 | static int prec; | 373 | static int prec; |
@@ -373,10 +377,10 @@ int show_interrupts(struct seq_file *p, void *v) | |||
373 | struct irqaction *action; | 377 | struct irqaction *action; |
374 | struct irq_desc *desc; | 378 | struct irq_desc *desc; |
375 | 379 | ||
376 | if (i > nr_irqs) | 380 | if (i > ACTUAL_NR_IRQS) |
377 | return 0; | 381 | return 0; |
378 | 382 | ||
379 | if (i == nr_irqs) | 383 | if (i == ACTUAL_NR_IRQS) |
380 | return arch_show_interrupts(p, prec); | 384 | return arch_show_interrupts(p, prec); |
381 | 385 | ||
382 | /* print header and calculate the width of the first column */ | 386 | /* print header and calculate the width of the first column */ |
diff --git a/kernel/irq/resend.c b/kernel/irq/resend.c index ad683a99b1ec..14dd5761e8c9 100644 --- a/kernel/irq/resend.c +++ b/kernel/irq/resend.c | |||
@@ -65,7 +65,6 @@ void check_irq_resend(struct irq_desc *desc, unsigned int irq) | |||
65 | if (desc->istate & IRQS_REPLAY) | 65 | if (desc->istate & IRQS_REPLAY) |
66 | return; | 66 | return; |
67 | if (desc->istate & IRQS_PENDING) { | 67 | if (desc->istate & IRQS_PENDING) { |
68 | irq_compat_clr_pending(desc); | ||
69 | desc->istate &= ~IRQS_PENDING; | 68 | desc->istate &= ~IRQS_PENDING; |
70 | desc->istate |= IRQS_REPLAY; | 69 | desc->istate |= IRQS_REPLAY; |
71 | 70 | ||
diff --git a/kernel/irq/settings.h b/kernel/irq/settings.h index 0227ad358272..0d91730b6330 100644 --- a/kernel/irq/settings.h +++ b/kernel/irq/settings.h | |||
@@ -15,17 +15,8 @@ enum { | |||
15 | _IRQF_MODIFY_MASK = IRQF_MODIFY_MASK, | 15 | _IRQF_MODIFY_MASK = IRQF_MODIFY_MASK, |
16 | }; | 16 | }; |
17 | 17 | ||
18 | #define IRQ_INPROGRESS GOT_YOU_MORON | ||
19 | #define IRQ_REPLAY GOT_YOU_MORON | ||
20 | #define IRQ_WAITING GOT_YOU_MORON | ||
21 | #define IRQ_DISABLED GOT_YOU_MORON | ||
22 | #define IRQ_PENDING GOT_YOU_MORON | ||
23 | #define IRQ_MASKED GOT_YOU_MORON | ||
24 | #define IRQ_WAKEUP GOT_YOU_MORON | ||
25 | #define IRQ_MOVE_PENDING GOT_YOU_MORON | ||
26 | #define IRQ_PER_CPU GOT_YOU_MORON | 18 | #define IRQ_PER_CPU GOT_YOU_MORON |
27 | #define IRQ_NO_BALANCING GOT_YOU_MORON | 19 | #define IRQ_NO_BALANCING GOT_YOU_MORON |
28 | #define IRQ_AFFINITY_SET GOT_YOU_MORON | ||
29 | #define IRQ_LEVEL GOT_YOU_MORON | 20 | #define IRQ_LEVEL GOT_YOU_MORON |
30 | #define IRQ_NOPROBE GOT_YOU_MORON | 21 | #define IRQ_NOPROBE GOT_YOU_MORON |
31 | #define IRQ_NOREQUEST GOT_YOU_MORON | 22 | #define IRQ_NOREQUEST GOT_YOU_MORON |
@@ -37,102 +28,98 @@ enum { | |||
37 | static inline void | 28 | static inline void |
38 | irq_settings_clr_and_set(struct irq_desc *desc, u32 clr, u32 set) | 29 | irq_settings_clr_and_set(struct irq_desc *desc, u32 clr, u32 set) |
39 | { | 30 | { |
40 | desc->status &= ~(clr & _IRQF_MODIFY_MASK); | 31 | desc->status_use_accessors &= ~(clr & _IRQF_MODIFY_MASK); |
41 | desc->status |= (set & _IRQF_MODIFY_MASK); | 32 | desc->status_use_accessors |= (set & _IRQF_MODIFY_MASK); |
42 | } | 33 | } |
43 | 34 | ||
44 | static inline bool irq_settings_is_per_cpu(struct irq_desc *desc) | 35 | static inline bool irq_settings_is_per_cpu(struct irq_desc *desc) |
45 | { | 36 | { |
46 | return desc->status & _IRQ_PER_CPU; | 37 | return desc->status_use_accessors & _IRQ_PER_CPU; |
47 | } | 38 | } |
48 | 39 | ||
49 | static inline void irq_settings_set_per_cpu(struct irq_desc *desc) | 40 | static inline void irq_settings_set_per_cpu(struct irq_desc *desc) |
50 | { | 41 | { |
51 | desc->status |= _IRQ_PER_CPU; | 42 | desc->status_use_accessors |= _IRQ_PER_CPU; |
52 | } | 43 | } |
53 | 44 | ||
54 | static inline void irq_settings_set_no_balancing(struct irq_desc *desc) | 45 | static inline void irq_settings_set_no_balancing(struct irq_desc *desc) |
55 | { | 46 | { |
56 | desc->status |= _IRQ_NO_BALANCING; | 47 | desc->status_use_accessors |= _IRQ_NO_BALANCING; |
57 | } | 48 | } |
58 | 49 | ||
59 | static inline bool irq_settings_has_no_balance_set(struct irq_desc *desc) | 50 | static inline bool irq_settings_has_no_balance_set(struct irq_desc *desc) |
60 | { | 51 | { |
61 | return desc->status & _IRQ_NO_BALANCING; | 52 | return desc->status_use_accessors & _IRQ_NO_BALANCING; |
62 | } | 53 | } |
63 | 54 | ||
64 | static inline u32 irq_settings_get_trigger_mask(struct irq_desc *desc) | 55 | static inline u32 irq_settings_get_trigger_mask(struct irq_desc *desc) |
65 | { | 56 | { |
66 | return desc->status & IRQ_TYPE_SENSE_MASK; | 57 | return desc->status_use_accessors & IRQ_TYPE_SENSE_MASK; |
67 | } | 58 | } |
68 | 59 | ||
69 | static inline void | 60 | static inline void |
70 | irq_settings_set_trigger_mask(struct irq_desc *desc, u32 mask) | 61 | irq_settings_set_trigger_mask(struct irq_desc *desc, u32 mask) |
71 | { | 62 | { |
72 | desc->status &= ~IRQ_TYPE_SENSE_MASK; | 63 | desc->status_use_accessors &= ~IRQ_TYPE_SENSE_MASK; |
73 | desc->status |= mask & IRQ_TYPE_SENSE_MASK; | 64 | desc->status_use_accessors |= mask & IRQ_TYPE_SENSE_MASK; |
74 | } | 65 | } |
75 | 66 | ||
76 | static inline bool irq_settings_is_level(struct irq_desc *desc) | 67 | static inline bool irq_settings_is_level(struct irq_desc *desc) |
77 | { | 68 | { |
78 | return desc->status & _IRQ_LEVEL; | 69 | return desc->status_use_accessors & _IRQ_LEVEL; |
79 | } | 70 | } |
80 | 71 | ||
81 | static inline void irq_settings_clr_level(struct irq_desc *desc) | 72 | static inline void irq_settings_clr_level(struct irq_desc *desc) |
82 | { | 73 | { |
83 | desc->status &= ~_IRQ_LEVEL; | 74 | desc->status_use_accessors &= ~_IRQ_LEVEL; |
84 | } | 75 | } |
85 | 76 | ||
86 | static inline void irq_settings_set_level(struct irq_desc *desc) | 77 | static inline void irq_settings_set_level(struct irq_desc *desc) |
87 | { | 78 | { |
88 | desc->status |= _IRQ_LEVEL; | 79 | desc->status_use_accessors |= _IRQ_LEVEL; |
89 | } | 80 | } |
90 | 81 | ||
91 | static inline bool irq_settings_can_request(struct irq_desc *desc) | 82 | static inline bool irq_settings_can_request(struct irq_desc *desc) |
92 | { | 83 | { |
93 | return !(desc->status & _IRQ_NOREQUEST); | 84 | return !(desc->status_use_accessors & _IRQ_NOREQUEST); |
94 | } | 85 | } |
95 | 86 | ||
96 | static inline void irq_settings_clr_norequest(struct irq_desc *desc) | 87 | static inline void irq_settings_clr_norequest(struct irq_desc *desc) |
97 | { | 88 | { |
98 | desc->status &= ~_IRQ_NOREQUEST; | 89 | desc->status_use_accessors &= ~_IRQ_NOREQUEST; |
99 | } | 90 | } |
100 | 91 | ||
101 | static inline void irq_settings_set_norequest(struct irq_desc *desc) | 92 | static inline void irq_settings_set_norequest(struct irq_desc *desc) |
102 | { | 93 | { |
103 | desc->status |= _IRQ_NOREQUEST; | 94 | desc->status_use_accessors |= _IRQ_NOREQUEST; |
104 | } | 95 | } |
105 | 96 | ||
106 | static inline bool irq_settings_can_probe(struct irq_desc *desc) | 97 | static inline bool irq_settings_can_probe(struct irq_desc *desc) |
107 | { | 98 | { |
108 | return !(desc->status & _IRQ_NOPROBE); | 99 | return !(desc->status_use_accessors & _IRQ_NOPROBE); |
109 | } | 100 | } |
110 | 101 | ||
111 | static inline void irq_settings_clr_noprobe(struct irq_desc *desc) | 102 | static inline void irq_settings_clr_noprobe(struct irq_desc *desc) |
112 | { | 103 | { |
113 | desc->status &= ~_IRQ_NOPROBE; | 104 | desc->status_use_accessors &= ~_IRQ_NOPROBE; |
114 | } | 105 | } |
115 | 106 | ||
116 | static inline void irq_settings_set_noprobe(struct irq_desc *desc) | 107 | static inline void irq_settings_set_noprobe(struct irq_desc *desc) |
117 | { | 108 | { |
118 | desc->status |= _IRQ_NOPROBE; | 109 | desc->status_use_accessors |= _IRQ_NOPROBE; |
119 | } | 110 | } |
120 | 111 | ||
121 | static inline bool irq_settings_can_move_pcntxt(struct irq_desc *desc) | 112 | static inline bool irq_settings_can_move_pcntxt(struct irq_desc *desc) |
122 | { | 113 | { |
123 | return desc->status & _IRQ_MOVE_PCNTXT; | 114 | return desc->status_use_accessors & _IRQ_MOVE_PCNTXT; |
124 | } | 115 | } |
125 | 116 | ||
126 | static inline bool irq_settings_can_autoenable(struct irq_desc *desc) | 117 | static inline bool irq_settings_can_autoenable(struct irq_desc *desc) |
127 | { | 118 | { |
128 | return !(desc->status & _IRQ_NOAUTOEN); | 119 | return !(desc->status_use_accessors & _IRQ_NOAUTOEN); |
129 | } | 120 | } |
130 | 121 | ||
131 | static inline bool irq_settings_is_nested_thread(struct irq_desc *desc) | 122 | static inline bool irq_settings_is_nested_thread(struct irq_desc *desc) |
132 | { | 123 | { |
133 | return desc->status & _IRQ_NESTED_THREAD; | 124 | return desc->status_use_accessors & _IRQ_NESTED_THREAD; |
134 | } | 125 | } |
135 | |||
136 | /* Nothing should touch desc->status from now on */ | ||
137 | #undef status | ||
138 | #define status USE_THE_PROPER_WRAPPERS_YOU_MORON | ||
diff --git a/kernel/irq/spurious.c b/kernel/irq/spurious.c index dd586ebf9c8c..dfbd550401b2 100644 --- a/kernel/irq/spurious.c +++ b/kernel/irq/spurious.c | |||
@@ -45,12 +45,12 @@ bool irq_wait_for_poll(struct irq_desc *desc) | |||
45 | #ifdef CONFIG_SMP | 45 | #ifdef CONFIG_SMP |
46 | do { | 46 | do { |
47 | raw_spin_unlock(&desc->lock); | 47 | raw_spin_unlock(&desc->lock); |
48 | while (desc->istate & IRQS_INPROGRESS) | 48 | while (irqd_irq_inprogress(&desc->irq_data)) |
49 | cpu_relax(); | 49 | cpu_relax(); |
50 | raw_spin_lock(&desc->lock); | 50 | raw_spin_lock(&desc->lock); |
51 | } while (desc->istate & IRQS_INPROGRESS); | 51 | } while (irqd_irq_inprogress(&desc->irq_data)); |
52 | /* Might have been disabled in meantime */ | 52 | /* Might have been disabled in meantime */ |
53 | return !(desc->istate & IRQS_DISABLED) && desc->action; | 53 | return !irqd_irq_disabled(&desc->irq_data) && desc->action; |
54 | #else | 54 | #else |
55 | return false; | 55 | return false; |
56 | #endif | 56 | #endif |
@@ -75,7 +75,7 @@ static int try_one_irq(int irq, struct irq_desc *desc, bool force) | |||
75 | * Do not poll disabled interrupts unless the spurious | 75 | * Do not poll disabled interrupts unless the spurious |
76 | * disabled poller asks explicitely. | 76 | * disabled poller asks explicitely. |
77 | */ | 77 | */ |
78 | if ((desc->istate & IRQS_DISABLED) && !force) | 78 | if (irqd_irq_disabled(&desc->irq_data) && !force) |
79 | goto out; | 79 | goto out; |
80 | 80 | ||
81 | /* | 81 | /* |
@@ -88,12 +88,11 @@ static int try_one_irq(int irq, struct irq_desc *desc, bool force) | |||
88 | goto out; | 88 | goto out; |
89 | 89 | ||
90 | /* Already running on another processor */ | 90 | /* Already running on another processor */ |
91 | if (desc->istate & IRQS_INPROGRESS) { | 91 | if (irqd_irq_inprogress(&desc->irq_data)) { |
92 | /* | 92 | /* |
93 | * Already running: If it is shared get the other | 93 | * Already running: If it is shared get the other |
94 | * CPU to go looking for our mystery interrupt too | 94 | * CPU to go looking for our mystery interrupt too |
95 | */ | 95 | */ |
96 | irq_compat_set_pending(desc); | ||
97 | desc->istate |= IRQS_PENDING; | 96 | desc->istate |= IRQS_PENDING; |
98 | goto out; | 97 | goto out; |
99 | } | 98 | } |
diff --git a/kernel/signal.c b/kernel/signal.c index 324eff5468ad..1186cf7fac77 100644 --- a/kernel/signal.c +++ b/kernel/signal.c | |||
@@ -2437,7 +2437,7 @@ SYSCALL_DEFINE3(rt_sigqueueinfo, pid_t, pid, int, sig, | |||
2437 | /* Not even root can pretend to send signals from the kernel. | 2437 | /* Not even root can pretend to send signals from the kernel. |
2438 | * Nor can they impersonate a kill()/tgkill(), which adds source info. | 2438 | * Nor can they impersonate a kill()/tgkill(), which adds source info. |
2439 | */ | 2439 | */ |
2440 | if (info.si_code != SI_QUEUE) { | 2440 | if (info.si_code >= 0 || info.si_code == SI_TKILL) { |
2441 | /* We used to allow any < 0 si_code */ | 2441 | /* We used to allow any < 0 si_code */ |
2442 | WARN_ON_ONCE(info.si_code < 0); | 2442 | WARN_ON_ONCE(info.si_code < 0); |
2443 | return -EPERM; | 2443 | return -EPERM; |
@@ -2457,7 +2457,7 @@ long do_rt_tgsigqueueinfo(pid_t tgid, pid_t pid, int sig, siginfo_t *info) | |||
2457 | /* Not even root can pretend to send signals from the kernel. | 2457 | /* Not even root can pretend to send signals from the kernel. |
2458 | * Nor can they impersonate a kill()/tgkill(), which adds source info. | 2458 | * Nor can they impersonate a kill()/tgkill(), which adds source info. |
2459 | */ | 2459 | */ |
2460 | if (info->si_code != SI_QUEUE) { | 2460 | if (info->si_code >= 0 || info->si_code == SI_TKILL) { |
2461 | /* We used to allow any < 0 si_code */ | 2461 | /* We used to allow any < 0 si_code */ |
2462 | WARN_ON_ONCE(info->si_code < 0); | 2462 | WARN_ON_ONCE(info->si_code < 0); |
2463 | return -EPERM; | 2463 | return -EPERM; |
diff --git a/mm/nommu.c b/mm/nommu.c index cb86e7d5e7f5..c4c542c736a9 100644 --- a/mm/nommu.c +++ b/mm/nommu.c | |||
@@ -1971,21 +1971,10 @@ int filemap_fault(struct vm_area_struct *vma, struct vm_fault *vmf) | |||
1971 | } | 1971 | } |
1972 | EXPORT_SYMBOL(filemap_fault); | 1972 | EXPORT_SYMBOL(filemap_fault); |
1973 | 1973 | ||
1974 | /* | 1974 | static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm, |
1975 | * Access another process' address space. | 1975 | unsigned long addr, void *buf, int len, int write) |
1976 | * - source/target buffer must be kernel space | ||
1977 | */ | ||
1978 | int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write) | ||
1979 | { | 1976 | { |
1980 | struct vm_area_struct *vma; | 1977 | struct vm_area_struct *vma; |
1981 | struct mm_struct *mm; | ||
1982 | |||
1983 | if (addr + len < addr) | ||
1984 | return 0; | ||
1985 | |||
1986 | mm = get_task_mm(tsk); | ||
1987 | if (!mm) | ||
1988 | return 0; | ||
1989 | 1978 | ||
1990 | down_read(&mm->mmap_sem); | 1979 | down_read(&mm->mmap_sem); |
1991 | 1980 | ||
@@ -2010,6 +1999,43 @@ int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, in | |||
2010 | } | 1999 | } |
2011 | 2000 | ||
2012 | up_read(&mm->mmap_sem); | 2001 | up_read(&mm->mmap_sem); |
2002 | |||
2003 | return len; | ||
2004 | } | ||
2005 | |||
2006 | /** | ||
2007 | * @access_remote_vm - access another process' address space | ||
2008 | * @mm: the mm_struct of the target address space | ||
2009 | * @addr: start address to access | ||
2010 | * @buf: source or destination buffer | ||
2011 | * @len: number of bytes to transfer | ||
2012 | * @write: whether the access is a write | ||
2013 | * | ||
2014 | * The caller must hold a reference on @mm. | ||
2015 | */ | ||
2016 | int access_remote_vm(struct mm_struct *mm, unsigned long addr, | ||
2017 | void *buf, int len, int write) | ||
2018 | { | ||
2019 | return __access_remote_vm(NULL, mm, addr, buf, len, write); | ||
2020 | } | ||
2021 | |||
2022 | /* | ||
2023 | * Access another process' address space. | ||
2024 | * - source/target buffer must be kernel space | ||
2025 | */ | ||
2026 | int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write) | ||
2027 | { | ||
2028 | struct mm_struct *mm; | ||
2029 | |||
2030 | if (addr + len < addr) | ||
2031 | return 0; | ||
2032 | |||
2033 | mm = get_task_mm(tsk); | ||
2034 | if (!mm) | ||
2035 | return 0; | ||
2036 | |||
2037 | len = __access_remote_vm(tsk, mm, addr, buf, len, write); | ||
2038 | |||
2013 | mmput(mm); | 2039 | mmput(mm); |
2014 | return len; | 2040 | return len; |
2015 | } | 2041 | } |
diff --git a/mm/percpu.c b/mm/percpu.c index 3f930018aa60..55d4d113fbd3 100644 --- a/mm/percpu.c +++ b/mm/percpu.c | |||
@@ -1008,8 +1008,7 @@ phys_addr_t per_cpu_ptr_to_phys(void *addr) | |||
1008 | } | 1008 | } |
1009 | 1009 | ||
1010 | if (in_first_chunk) { | 1010 | if (in_first_chunk) { |
1011 | if ((unsigned long)addr < VMALLOC_START || | 1011 | if (!is_vmalloc_addr(addr)) |
1012 | (unsigned long)addr >= VMALLOC_END) | ||
1013 | return __pa(addr); | 1012 | return __pa(addr); |
1014 | else | 1013 | else |
1015 | return page_to_phys(vmalloc_to_page(addr)); | 1014 | return page_to_phys(vmalloc_to_page(addr)); |
diff --git a/net/bridge/br_if.c b/net/bridge/br_if.c index dce8f0009a12..718b60366dfe 100644 --- a/net/bridge/br_if.c +++ b/net/bridge/br_if.c | |||
@@ -389,6 +389,7 @@ int br_add_if(struct net_bridge *br, struct net_device *dev) | |||
389 | { | 389 | { |
390 | struct net_bridge_port *p; | 390 | struct net_bridge_port *p; |
391 | int err = 0; | 391 | int err = 0; |
392 | bool changed_addr; | ||
392 | 393 | ||
393 | /* Don't allow bridging non-ethernet like devices */ | 394 | /* Don't allow bridging non-ethernet like devices */ |
394 | if ((dev->flags & IFF_LOOPBACK) || | 395 | if ((dev->flags & IFF_LOOPBACK) || |
@@ -446,7 +447,7 @@ int br_add_if(struct net_bridge *br, struct net_device *dev) | |||
446 | list_add_rcu(&p->list, &br->port_list); | 447 | list_add_rcu(&p->list, &br->port_list); |
447 | 448 | ||
448 | spin_lock_bh(&br->lock); | 449 | spin_lock_bh(&br->lock); |
449 | br_stp_recalculate_bridge_id(br); | 450 | changed_addr = br_stp_recalculate_bridge_id(br); |
450 | br_features_recompute(br); | 451 | br_features_recompute(br); |
451 | 452 | ||
452 | if ((dev->flags & IFF_UP) && netif_carrier_ok(dev) && | 453 | if ((dev->flags & IFF_UP) && netif_carrier_ok(dev) && |
@@ -456,6 +457,9 @@ int br_add_if(struct net_bridge *br, struct net_device *dev) | |||
456 | 457 | ||
457 | br_ifinfo_notify(RTM_NEWLINK, p); | 458 | br_ifinfo_notify(RTM_NEWLINK, p); |
458 | 459 | ||
460 | if (changed_addr) | ||
461 | call_netdevice_notifiers(NETDEV_CHANGEADDR, dev); | ||
462 | |||
459 | dev_set_mtu(br->dev, br_min_mtu(br)); | 463 | dev_set_mtu(br->dev, br_min_mtu(br)); |
460 | 464 | ||
461 | kobject_uevent(&p->kobj, KOBJ_ADD); | 465 | kobject_uevent(&p->kobj, KOBJ_ADD); |
diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h index 19e2f46ed086..387013d33745 100644 --- a/net/bridge/br_private.h +++ b/net/bridge/br_private.h | |||
@@ -497,7 +497,7 @@ extern void br_stp_disable_bridge(struct net_bridge *br); | |||
497 | extern void br_stp_set_enabled(struct net_bridge *br, unsigned long val); | 497 | extern void br_stp_set_enabled(struct net_bridge *br, unsigned long val); |
498 | extern void br_stp_enable_port(struct net_bridge_port *p); | 498 | extern void br_stp_enable_port(struct net_bridge_port *p); |
499 | extern void br_stp_disable_port(struct net_bridge_port *p); | 499 | extern void br_stp_disable_port(struct net_bridge_port *p); |
500 | extern void br_stp_recalculate_bridge_id(struct net_bridge *br); | 500 | extern bool br_stp_recalculate_bridge_id(struct net_bridge *br); |
501 | extern void br_stp_change_bridge_id(struct net_bridge *br, const unsigned char *a); | 501 | extern void br_stp_change_bridge_id(struct net_bridge *br, const unsigned char *a); |
502 | extern void br_stp_set_bridge_priority(struct net_bridge *br, | 502 | extern void br_stp_set_bridge_priority(struct net_bridge *br, |
503 | u16 newprio); | 503 | u16 newprio); |
diff --git a/net/bridge/br_stp_if.c b/net/bridge/br_stp_if.c index 79372d4a4055..5593f5aec942 100644 --- a/net/bridge/br_stp_if.c +++ b/net/bridge/br_stp_if.c | |||
@@ -204,7 +204,7 @@ void br_stp_change_bridge_id(struct net_bridge *br, const unsigned char *addr) | |||
204 | static const unsigned short br_mac_zero_aligned[ETH_ALEN >> 1]; | 204 | static const unsigned short br_mac_zero_aligned[ETH_ALEN >> 1]; |
205 | 205 | ||
206 | /* called under bridge lock */ | 206 | /* called under bridge lock */ |
207 | void br_stp_recalculate_bridge_id(struct net_bridge *br) | 207 | bool br_stp_recalculate_bridge_id(struct net_bridge *br) |
208 | { | 208 | { |
209 | const unsigned char *br_mac_zero = | 209 | const unsigned char *br_mac_zero = |
210 | (const unsigned char *)br_mac_zero_aligned; | 210 | (const unsigned char *)br_mac_zero_aligned; |
@@ -222,8 +222,11 @@ void br_stp_recalculate_bridge_id(struct net_bridge *br) | |||
222 | 222 | ||
223 | } | 223 | } |
224 | 224 | ||
225 | if (compare_ether_addr(br->bridge_id.addr, addr)) | 225 | if (compare_ether_addr(br->bridge_id.addr, addr) == 0) |
226 | br_stp_change_bridge_id(br, addr); | 226 | return false; /* no change */ |
227 | |||
228 | br_stp_change_bridge_id(br, addr); | ||
229 | return true; | ||
227 | } | 230 | } |
228 | 231 | ||
229 | /* called under bridge lock */ | 232 | /* called under bridge lock */ |
diff --git a/net/can/af_can.c b/net/can/af_can.c index 702be5a2c956..733d66f1b05a 100644 --- a/net/can/af_can.c +++ b/net/can/af_can.c | |||
@@ -95,7 +95,7 @@ struct s_pstats can_pstats; /* receive list statistics */ | |||
95 | * af_can socket functions | 95 | * af_can socket functions |
96 | */ | 96 | */ |
97 | 97 | ||
98 | static int can_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) | 98 | int can_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) |
99 | { | 99 | { |
100 | struct sock *sk = sock->sk; | 100 | struct sock *sk = sock->sk; |
101 | 101 | ||
@@ -108,6 +108,7 @@ static int can_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) | |||
108 | return -ENOIOCTLCMD; | 108 | return -ENOIOCTLCMD; |
109 | } | 109 | } |
110 | } | 110 | } |
111 | EXPORT_SYMBOL(can_ioctl); | ||
111 | 112 | ||
112 | static void can_sock_destruct(struct sock *sk) | 113 | static void can_sock_destruct(struct sock *sk) |
113 | { | 114 | { |
@@ -698,13 +699,9 @@ int can_proto_register(struct can_proto *cp) | |||
698 | printk(KERN_ERR "can: protocol %d already registered\n", | 699 | printk(KERN_ERR "can: protocol %d already registered\n", |
699 | proto); | 700 | proto); |
700 | err = -EBUSY; | 701 | err = -EBUSY; |
701 | } else { | 702 | } else |
702 | proto_tab[proto] = cp; | 703 | proto_tab[proto] = cp; |
703 | 704 | ||
704 | /* use generic ioctl function if not defined by module */ | ||
705 | if (!cp->ops->ioctl) | ||
706 | cp->ops->ioctl = can_ioctl; | ||
707 | } | ||
708 | spin_unlock(&proto_tab_lock); | 705 | spin_unlock(&proto_tab_lock); |
709 | 706 | ||
710 | if (err < 0) | 707 | if (err < 0) |
diff --git a/net/can/bcm.c b/net/can/bcm.c index 092dc88a7c64..871a0ad51025 100644 --- a/net/can/bcm.c +++ b/net/can/bcm.c | |||
@@ -1569,7 +1569,7 @@ static int bcm_recvmsg(struct kiocb *iocb, struct socket *sock, | |||
1569 | return size; | 1569 | return size; |
1570 | } | 1570 | } |
1571 | 1571 | ||
1572 | static struct proto_ops bcm_ops __read_mostly = { | 1572 | static const struct proto_ops bcm_ops = { |
1573 | .family = PF_CAN, | 1573 | .family = PF_CAN, |
1574 | .release = bcm_release, | 1574 | .release = bcm_release, |
1575 | .bind = sock_no_bind, | 1575 | .bind = sock_no_bind, |
@@ -1578,7 +1578,7 @@ static struct proto_ops bcm_ops __read_mostly = { | |||
1578 | .accept = sock_no_accept, | 1578 | .accept = sock_no_accept, |
1579 | .getname = sock_no_getname, | 1579 | .getname = sock_no_getname, |
1580 | .poll = datagram_poll, | 1580 | .poll = datagram_poll, |
1581 | .ioctl = NULL, /* use can_ioctl() from af_can.c */ | 1581 | .ioctl = can_ioctl, /* use can_ioctl() from af_can.c */ |
1582 | .listen = sock_no_listen, | 1582 | .listen = sock_no_listen, |
1583 | .shutdown = sock_no_shutdown, | 1583 | .shutdown = sock_no_shutdown, |
1584 | .setsockopt = sock_no_setsockopt, | 1584 | .setsockopt = sock_no_setsockopt, |
diff --git a/net/can/raw.c b/net/can/raw.c index 883e9d74fddf..649acfa7c70a 100644 --- a/net/can/raw.c +++ b/net/can/raw.c | |||
@@ -742,7 +742,7 @@ static int raw_recvmsg(struct kiocb *iocb, struct socket *sock, | |||
742 | return size; | 742 | return size; |
743 | } | 743 | } |
744 | 744 | ||
745 | static struct proto_ops raw_ops __read_mostly = { | 745 | static const struct proto_ops raw_ops = { |
746 | .family = PF_CAN, | 746 | .family = PF_CAN, |
747 | .release = raw_release, | 747 | .release = raw_release, |
748 | .bind = raw_bind, | 748 | .bind = raw_bind, |
@@ -751,7 +751,7 @@ static struct proto_ops raw_ops __read_mostly = { | |||
751 | .accept = sock_no_accept, | 751 | .accept = sock_no_accept, |
752 | .getname = raw_getname, | 752 | .getname = raw_getname, |
753 | .poll = datagram_poll, | 753 | .poll = datagram_poll, |
754 | .ioctl = NULL, /* use can_ioctl() from af_can.c */ | 754 | .ioctl = can_ioctl, /* use can_ioctl() from af_can.c */ |
755 | .listen = sock_no_listen, | 755 | .listen = sock_no_listen, |
756 | .shutdown = sock_no_shutdown, | 756 | .shutdown = sock_no_shutdown, |
757 | .setsockopt = raw_setsockopt, | 757 | .setsockopt = raw_setsockopt, |
diff --git a/net/core/dev.c b/net/core/dev.c index f453370131a0..563ddc28139d 100644 --- a/net/core/dev.c +++ b/net/core/dev.c | |||
@@ -1140,9 +1140,6 @@ static int __dev_open(struct net_device *dev) | |||
1140 | 1140 | ||
1141 | ASSERT_RTNL(); | 1141 | ASSERT_RTNL(); |
1142 | 1142 | ||
1143 | /* | ||
1144 | * Is it even present? | ||
1145 | */ | ||
1146 | if (!netif_device_present(dev)) | 1143 | if (!netif_device_present(dev)) |
1147 | return -ENODEV; | 1144 | return -ENODEV; |
1148 | 1145 | ||
@@ -1151,9 +1148,6 @@ static int __dev_open(struct net_device *dev) | |||
1151 | if (ret) | 1148 | if (ret) |
1152 | return ret; | 1149 | return ret; |
1153 | 1150 | ||
1154 | /* | ||
1155 | * Call device private open method | ||
1156 | */ | ||
1157 | set_bit(__LINK_STATE_START, &dev->state); | 1151 | set_bit(__LINK_STATE_START, &dev->state); |
1158 | 1152 | ||
1159 | if (ops->ndo_validate_addr) | 1153 | if (ops->ndo_validate_addr) |
@@ -1162,31 +1156,12 @@ static int __dev_open(struct net_device *dev) | |||
1162 | if (!ret && ops->ndo_open) | 1156 | if (!ret && ops->ndo_open) |
1163 | ret = ops->ndo_open(dev); | 1157 | ret = ops->ndo_open(dev); |
1164 | 1158 | ||
1165 | /* | ||
1166 | * If it went open OK then: | ||
1167 | */ | ||
1168 | |||
1169 | if (ret) | 1159 | if (ret) |
1170 | clear_bit(__LINK_STATE_START, &dev->state); | 1160 | clear_bit(__LINK_STATE_START, &dev->state); |
1171 | else { | 1161 | else { |
1172 | /* | ||
1173 | * Set the flags. | ||
1174 | */ | ||
1175 | dev->flags |= IFF_UP; | 1162 | dev->flags |= IFF_UP; |
1176 | |||
1177 | /* | ||
1178 | * Enable NET_DMA | ||
1179 | */ | ||
1180 | net_dmaengine_get(); | 1163 | net_dmaengine_get(); |
1181 | |||
1182 | /* | ||
1183 | * Initialize multicasting status | ||
1184 | */ | ||
1185 | dev_set_rx_mode(dev); | 1164 | dev_set_rx_mode(dev); |
1186 | |||
1187 | /* | ||
1188 | * Wakeup transmit queue engine | ||
1189 | */ | ||
1190 | dev_activate(dev); | 1165 | dev_activate(dev); |
1191 | } | 1166 | } |
1192 | 1167 | ||
@@ -1209,22 +1184,13 @@ int dev_open(struct net_device *dev) | |||
1209 | { | 1184 | { |
1210 | int ret; | 1185 | int ret; |
1211 | 1186 | ||
1212 | /* | ||
1213 | * Is it already up? | ||
1214 | */ | ||
1215 | if (dev->flags & IFF_UP) | 1187 | if (dev->flags & IFF_UP) |
1216 | return 0; | 1188 | return 0; |
1217 | 1189 | ||
1218 | /* | ||
1219 | * Open device | ||
1220 | */ | ||
1221 | ret = __dev_open(dev); | 1190 | ret = __dev_open(dev); |
1222 | if (ret < 0) | 1191 | if (ret < 0) |
1223 | return ret; | 1192 | return ret; |
1224 | 1193 | ||
1225 | /* | ||
1226 | * ... and announce new interface. | ||
1227 | */ | ||
1228 | rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP|IFF_RUNNING); | 1194 | rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP|IFF_RUNNING); |
1229 | call_netdevice_notifiers(NETDEV_UP, dev); | 1195 | call_netdevice_notifiers(NETDEV_UP, dev); |
1230 | 1196 | ||
@@ -1240,10 +1206,6 @@ static int __dev_close_many(struct list_head *head) | |||
1240 | might_sleep(); | 1206 | might_sleep(); |
1241 | 1207 | ||
1242 | list_for_each_entry(dev, head, unreg_list) { | 1208 | list_for_each_entry(dev, head, unreg_list) { |
1243 | /* | ||
1244 | * Tell people we are going down, so that they can | ||
1245 | * prepare to death, when device is still operating. | ||
1246 | */ | ||
1247 | call_netdevice_notifiers(NETDEV_GOING_DOWN, dev); | 1209 | call_netdevice_notifiers(NETDEV_GOING_DOWN, dev); |
1248 | 1210 | ||
1249 | clear_bit(__LINK_STATE_START, &dev->state); | 1211 | clear_bit(__LINK_STATE_START, &dev->state); |
@@ -1272,15 +1234,7 @@ static int __dev_close_many(struct list_head *head) | |||
1272 | if (ops->ndo_stop) | 1234 | if (ops->ndo_stop) |
1273 | ops->ndo_stop(dev); | 1235 | ops->ndo_stop(dev); |
1274 | 1236 | ||
1275 | /* | ||
1276 | * Device is now down. | ||
1277 | */ | ||
1278 | |||
1279 | dev->flags &= ~IFF_UP; | 1237 | dev->flags &= ~IFF_UP; |
1280 | |||
1281 | /* | ||
1282 | * Shutdown NET_DMA | ||
1283 | */ | ||
1284 | net_dmaengine_put(); | 1238 | net_dmaengine_put(); |
1285 | } | 1239 | } |
1286 | 1240 | ||
@@ -1309,9 +1263,6 @@ static int dev_close_many(struct list_head *head) | |||
1309 | 1263 | ||
1310 | __dev_close_many(head); | 1264 | __dev_close_many(head); |
1311 | 1265 | ||
1312 | /* | ||
1313 | * Tell people we are down | ||
1314 | */ | ||
1315 | list_for_each_entry(dev, head, unreg_list) { | 1266 | list_for_each_entry(dev, head, unreg_list) { |
1316 | rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP|IFF_RUNNING); | 1267 | rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP|IFF_RUNNING); |
1317 | call_netdevice_notifiers(NETDEV_DOWN, dev); | 1268 | call_netdevice_notifiers(NETDEV_DOWN, dev); |
@@ -1371,11 +1322,6 @@ EXPORT_SYMBOL(dev_disable_lro); | |||
1371 | 1322 | ||
1372 | static int dev_boot_phase = 1; | 1323 | static int dev_boot_phase = 1; |
1373 | 1324 | ||
1374 | /* | ||
1375 | * Device change register/unregister. These are not inline or static | ||
1376 | * as we export them to the world. | ||
1377 | */ | ||
1378 | |||
1379 | /** | 1325 | /** |
1380 | * register_netdevice_notifier - register a network notifier block | 1326 | * register_netdevice_notifier - register a network notifier block |
1381 | * @nb: notifier | 1327 | * @nb: notifier |
@@ -1477,6 +1423,7 @@ int call_netdevice_notifiers(unsigned long val, struct net_device *dev) | |||
1477 | ASSERT_RTNL(); | 1423 | ASSERT_RTNL(); |
1478 | return raw_notifier_call_chain(&netdev_chain, val, dev); | 1424 | return raw_notifier_call_chain(&netdev_chain, val, dev); |
1479 | } | 1425 | } |
1426 | EXPORT_SYMBOL(call_netdevice_notifiers); | ||
1480 | 1427 | ||
1481 | /* When > 0 there are consumers of rx skb time stamps */ | 1428 | /* When > 0 there are consumers of rx skb time stamps */ |
1482 | static atomic_t netstamp_needed = ATOMIC_INIT(0); | 1429 | static atomic_t netstamp_needed = ATOMIC_INIT(0); |
diff --git a/net/core/ethtool.c b/net/core/ethtool.c index 24bd57493c0d..74ead9eca126 100644 --- a/net/core/ethtool.c +++ b/net/core/ethtool.c | |||
@@ -141,9 +141,24 @@ u32 ethtool_op_get_flags(struct net_device *dev) | |||
141 | } | 141 | } |
142 | EXPORT_SYMBOL(ethtool_op_get_flags); | 142 | EXPORT_SYMBOL(ethtool_op_get_flags); |
143 | 143 | ||
144 | /* Check if device can enable (or disable) particular feature coded in "data" | ||
145 | * argument. Flags "supported" describe features that can be toggled by device. | ||
146 | * If feature can not be toggled, it state (enabled or disabled) must match | ||
147 | * hardcoded device features state, otherwise flags are marked as invalid. | ||
148 | */ | ||
149 | bool ethtool_invalid_flags(struct net_device *dev, u32 data, u32 supported) | ||
150 | { | ||
151 | u32 features = dev->features & flags_dup_features; | ||
152 | /* "data" can contain only flags_dup_features bits, | ||
153 | * see __ethtool_set_flags */ | ||
154 | |||
155 | return (features & ~supported) != (data & ~supported); | ||
156 | } | ||
157 | EXPORT_SYMBOL(ethtool_invalid_flags); | ||
158 | |||
144 | int ethtool_op_set_flags(struct net_device *dev, u32 data, u32 supported) | 159 | int ethtool_op_set_flags(struct net_device *dev, u32 data, u32 supported) |
145 | { | 160 | { |
146 | if (data & ~supported) | 161 | if (ethtool_invalid_flags(dev, data, supported)) |
147 | return -EINVAL; | 162 | return -EINVAL; |
148 | 163 | ||
149 | dev->features = ((dev->features & ~flags_dup_features) | | 164 | dev->features = ((dev->features & ~flags_dup_features) | |
diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c index 90a3ff605591..b92c86f6e9b3 100644 --- a/net/ipv4/fib_trie.c +++ b/net/ipv4/fib_trie.c | |||
@@ -1365,9 +1365,9 @@ static int check_leaf(struct fib_table *tb, struct trie *t, struct leaf *l, | |||
1365 | err = fib_props[fa->fa_type].error; | 1365 | err = fib_props[fa->fa_type].error; |
1366 | if (err) { | 1366 | if (err) { |
1367 | #ifdef CONFIG_IP_FIB_TRIE_STATS | 1367 | #ifdef CONFIG_IP_FIB_TRIE_STATS |
1368 | t->stats.semantic_match_miss++; | 1368 | t->stats.semantic_match_passed++; |
1369 | #endif | 1369 | #endif |
1370 | return 1; | 1370 | return err; |
1371 | } | 1371 | } |
1372 | if (fi->fib_flags & RTNH_F_DEAD) | 1372 | if (fi->fib_flags & RTNH_F_DEAD) |
1373 | continue; | 1373 | continue; |
diff --git a/net/ipv4/ip_options.c b/net/ipv4/ip_options.c index 1906fa35860c..28a736f3442f 100644 --- a/net/ipv4/ip_options.c +++ b/net/ipv4/ip_options.c | |||
@@ -140,11 +140,11 @@ int ip_options_echo(struct ip_options * dopt, struct sk_buff * skb) | |||
140 | } else { | 140 | } else { |
141 | dopt->ts_needtime = 0; | 141 | dopt->ts_needtime = 0; |
142 | 142 | ||
143 | if (soffset + 8 <= optlen) { | 143 | if (soffset + 7 <= optlen) { |
144 | __be32 addr; | 144 | __be32 addr; |
145 | 145 | ||
146 | memcpy(&addr, sptr+soffset-1, 4); | 146 | memcpy(&addr, dptr+soffset-1, 4); |
147 | if (inet_addr_type(dev_net(skb_dst(skb)->dev), addr) != RTN_LOCAL) { | 147 | if (inet_addr_type(dev_net(skb_dst(skb)->dev), addr) != RTN_UNICAST) { |
148 | dopt->ts_needtime = 1; | 148 | dopt->ts_needtime = 1; |
149 | soffset += 8; | 149 | soffset += 8; |
150 | } | 150 | } |
diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c index e837ffd3edc3..2d3c72e5bbbf 100644 --- a/net/ipv4/raw.c +++ b/net/ipv4/raw.c | |||
@@ -569,6 +569,7 @@ static int raw_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, | |||
569 | rt = ip_route_output_flow(sock_net(sk), &fl4, sk); | 569 | rt = ip_route_output_flow(sock_net(sk), &fl4, sk); |
570 | if (IS_ERR(rt)) { | 570 | if (IS_ERR(rt)) { |
571 | err = PTR_ERR(rt); | 571 | err = PTR_ERR(rt); |
572 | rt = NULL; | ||
572 | goto done; | 573 | goto done; |
573 | } | 574 | } |
574 | } | 575 | } |
diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c index 7ff0343e05c7..29e48593bf22 100644 --- a/net/ipv6/ip6mr.c +++ b/net/ipv6/ip6mr.c | |||
@@ -663,7 +663,7 @@ static int pim6_rcv(struct sk_buff *skb) | |||
663 | skb_pull(skb, (u8 *)encap - skb->data); | 663 | skb_pull(skb, (u8 *)encap - skb->data); |
664 | skb_reset_network_header(skb); | 664 | skb_reset_network_header(skb); |
665 | skb->protocol = htons(ETH_P_IPV6); | 665 | skb->protocol = htons(ETH_P_IPV6); |
666 | skb->ip_summed = 0; | 666 | skb->ip_summed = CHECKSUM_NONE; |
667 | skb->pkt_type = PACKET_HOST; | 667 | skb->pkt_type = PACKET_HOST; |
668 | 668 | ||
669 | skb_tunnel_rx(skb, reg_dev); | 669 | skb_tunnel_rx(skb, reg_dev); |
diff --git a/net/irda/iriap.c b/net/irda/iriap.c index 5b743bdd89ba..36477538cea8 100644 --- a/net/irda/iriap.c +++ b/net/irda/iriap.c | |||
@@ -656,10 +656,16 @@ static void iriap_getvaluebyclass_indication(struct iriap_cb *self, | |||
656 | n = 1; | 656 | n = 1; |
657 | 657 | ||
658 | name_len = fp[n++]; | 658 | name_len = fp[n++]; |
659 | |||
660 | IRDA_ASSERT(name_len < IAS_MAX_CLASSNAME + 1, return;); | ||
661 | |||
659 | memcpy(name, fp+n, name_len); n+=name_len; | 662 | memcpy(name, fp+n, name_len); n+=name_len; |
660 | name[name_len] = '\0'; | 663 | name[name_len] = '\0'; |
661 | 664 | ||
662 | attr_len = fp[n++]; | 665 | attr_len = fp[n++]; |
666 | |||
667 | IRDA_ASSERT(attr_len < IAS_MAX_ATTRIBNAME + 1, return;); | ||
668 | |||
663 | memcpy(attr, fp+n, attr_len); n+=attr_len; | 669 | memcpy(attr, fp+n, attr_len); n+=attr_len; |
664 | attr[attr_len] = '\0'; | 670 | attr[attr_len] = '\0'; |
665 | 671 | ||
diff --git a/net/irda/irnet/irnet_ppp.c b/net/irda/irnet/irnet_ppp.c index 7c567b8aa89a..2bb2beb6a373 100644 --- a/net/irda/irnet/irnet_ppp.c +++ b/net/irda/irnet/irnet_ppp.c | |||
@@ -105,6 +105,9 @@ irnet_ctrl_write(irnet_socket * ap, | |||
105 | while(isspace(start[length - 1])) | 105 | while(isspace(start[length - 1])) |
106 | length--; | 106 | length--; |
107 | 107 | ||
108 | DABORT(length < 5 || length > NICKNAME_MAX_LEN + 5, | ||
109 | -EINVAL, CTRL_ERROR, "Invalid nickname.\n"); | ||
110 | |||
108 | /* Copy the name for later reuse */ | 111 | /* Copy the name for later reuse */ |
109 | memcpy(ap->rname, start + 5, length - 5); | 112 | memcpy(ap->rname, start + 5, length - 5); |
110 | ap->rname[length - 5] = '\0'; | 113 | ap->rname[length - 5] = '\0'; |
diff --git a/net/rose/af_rose.c b/net/rose/af_rose.c index 5ee0c62046a0..a80aef6e3d1f 100644 --- a/net/rose/af_rose.c +++ b/net/rose/af_rose.c | |||
@@ -978,7 +978,7 @@ int rose_rx_call_request(struct sk_buff *skb, struct net_device *dev, struct ros | |||
978 | struct sock *make; | 978 | struct sock *make; |
979 | struct rose_sock *make_rose; | 979 | struct rose_sock *make_rose; |
980 | struct rose_facilities_struct facilities; | 980 | struct rose_facilities_struct facilities; |
981 | int n, len; | 981 | int n; |
982 | 982 | ||
983 | skb->sk = NULL; /* Initially we don't know who it's for */ | 983 | skb->sk = NULL; /* Initially we don't know who it's for */ |
984 | 984 | ||
@@ -987,9 +987,9 @@ int rose_rx_call_request(struct sk_buff *skb, struct net_device *dev, struct ros | |||
987 | */ | 987 | */ |
988 | memset(&facilities, 0x00, sizeof(struct rose_facilities_struct)); | 988 | memset(&facilities, 0x00, sizeof(struct rose_facilities_struct)); |
989 | 989 | ||
990 | len = (((skb->data[3] >> 4) & 0x0F) + 1) >> 1; | 990 | if (!rose_parse_facilities(skb->data + ROSE_CALL_REQ_FACILITIES_OFF, |
991 | len += (((skb->data[3] >> 0) & 0x0F) + 1) >> 1; | 991 | skb->len - ROSE_CALL_REQ_FACILITIES_OFF, |
992 | if (!rose_parse_facilities(skb->data + len + 4, &facilities)) { | 992 | &facilities)) { |
993 | rose_transmit_clear_request(neigh, lci, ROSE_INVALID_FACILITY, 76); | 993 | rose_transmit_clear_request(neigh, lci, ROSE_INVALID_FACILITY, 76); |
994 | return 0; | 994 | return 0; |
995 | } | 995 | } |
diff --git a/net/rose/rose_loopback.c b/net/rose/rose_loopback.c index ae4a9d99aec7..344456206b70 100644 --- a/net/rose/rose_loopback.c +++ b/net/rose/rose_loopback.c | |||
@@ -73,9 +73,20 @@ static void rose_loopback_timer(unsigned long param) | |||
73 | unsigned int lci_i, lci_o; | 73 | unsigned int lci_i, lci_o; |
74 | 74 | ||
75 | while ((skb = skb_dequeue(&loopback_queue)) != NULL) { | 75 | while ((skb = skb_dequeue(&loopback_queue)) != NULL) { |
76 | if (skb->len < ROSE_MIN_LEN) { | ||
77 | kfree_skb(skb); | ||
78 | continue; | ||
79 | } | ||
76 | lci_i = ((skb->data[0] << 8) & 0xF00) + ((skb->data[1] << 0) & 0x0FF); | 80 | lci_i = ((skb->data[0] << 8) & 0xF00) + ((skb->data[1] << 0) & 0x0FF); |
77 | frametype = skb->data[2]; | 81 | frametype = skb->data[2]; |
78 | dest = (rose_address *)(skb->data + 4); | 82 | if (frametype == ROSE_CALL_REQUEST && |
83 | (skb->len <= ROSE_CALL_REQ_FACILITIES_OFF || | ||
84 | skb->data[ROSE_CALL_REQ_ADDR_LEN_OFF] != | ||
85 | ROSE_CALL_REQ_ADDR_LEN_VAL)) { | ||
86 | kfree_skb(skb); | ||
87 | continue; | ||
88 | } | ||
89 | dest = (rose_address *)(skb->data + ROSE_CALL_REQ_DEST_ADDR_OFF); | ||
79 | lci_o = ROSE_DEFAULT_MAXVC + 1 - lci_i; | 90 | lci_o = ROSE_DEFAULT_MAXVC + 1 - lci_i; |
80 | 91 | ||
81 | skb_reset_transport_header(skb); | 92 | skb_reset_transport_header(skb); |
diff --git a/net/rose/rose_route.c b/net/rose/rose_route.c index 88a77e90e7e8..08dcd2f29cdc 100644 --- a/net/rose/rose_route.c +++ b/net/rose/rose_route.c | |||
@@ -861,7 +861,7 @@ int rose_route_frame(struct sk_buff *skb, ax25_cb *ax25) | |||
861 | unsigned int lci, new_lci; | 861 | unsigned int lci, new_lci; |
862 | unsigned char cause, diagnostic; | 862 | unsigned char cause, diagnostic; |
863 | struct net_device *dev; | 863 | struct net_device *dev; |
864 | int len, res = 0; | 864 | int res = 0; |
865 | char buf[11]; | 865 | char buf[11]; |
866 | 866 | ||
867 | #if 0 | 867 | #if 0 |
@@ -869,10 +869,17 @@ int rose_route_frame(struct sk_buff *skb, ax25_cb *ax25) | |||
869 | return res; | 869 | return res; |
870 | #endif | 870 | #endif |
871 | 871 | ||
872 | if (skb->len < ROSE_MIN_LEN) | ||
873 | return res; | ||
872 | frametype = skb->data[2]; | 874 | frametype = skb->data[2]; |
873 | lci = ((skb->data[0] << 8) & 0xF00) + ((skb->data[1] << 0) & 0x0FF); | 875 | lci = ((skb->data[0] << 8) & 0xF00) + ((skb->data[1] << 0) & 0x0FF); |
874 | src_addr = (rose_address *)(skb->data + 9); | 876 | if (frametype == ROSE_CALL_REQUEST && |
875 | dest_addr = (rose_address *)(skb->data + 4); | 877 | (skb->len <= ROSE_CALL_REQ_FACILITIES_OFF || |
878 | skb->data[ROSE_CALL_REQ_ADDR_LEN_OFF] != | ||
879 | ROSE_CALL_REQ_ADDR_LEN_VAL)) | ||
880 | return res; | ||
881 | src_addr = (rose_address *)(skb->data + ROSE_CALL_REQ_SRC_ADDR_OFF); | ||
882 | dest_addr = (rose_address *)(skb->data + ROSE_CALL_REQ_DEST_ADDR_OFF); | ||
876 | 883 | ||
877 | spin_lock_bh(&rose_neigh_list_lock); | 884 | spin_lock_bh(&rose_neigh_list_lock); |
878 | spin_lock_bh(&rose_route_list_lock); | 885 | spin_lock_bh(&rose_route_list_lock); |
@@ -1010,12 +1017,11 @@ int rose_route_frame(struct sk_buff *skb, ax25_cb *ax25) | |||
1010 | goto out; | 1017 | goto out; |
1011 | } | 1018 | } |
1012 | 1019 | ||
1013 | len = (((skb->data[3] >> 4) & 0x0F) + 1) >> 1; | ||
1014 | len += (((skb->data[3] >> 0) & 0x0F) + 1) >> 1; | ||
1015 | |||
1016 | memset(&facilities, 0x00, sizeof(struct rose_facilities_struct)); | 1020 | memset(&facilities, 0x00, sizeof(struct rose_facilities_struct)); |
1017 | 1021 | ||
1018 | if (!rose_parse_facilities(skb->data + len + 4, &facilities)) { | 1022 | if (!rose_parse_facilities(skb->data + ROSE_CALL_REQ_FACILITIES_OFF, |
1023 | skb->len - ROSE_CALL_REQ_FACILITIES_OFF, | ||
1024 | &facilities)) { | ||
1019 | rose_transmit_clear_request(rose_neigh, lci, ROSE_INVALID_FACILITY, 76); | 1025 | rose_transmit_clear_request(rose_neigh, lci, ROSE_INVALID_FACILITY, 76); |
1020 | goto out; | 1026 | goto out; |
1021 | } | 1027 | } |
diff --git a/net/rose/rose_subr.c b/net/rose/rose_subr.c index 1734abba26a2..f6c71caa94b9 100644 --- a/net/rose/rose_subr.c +++ b/net/rose/rose_subr.c | |||
@@ -142,7 +142,7 @@ void rose_write_internal(struct sock *sk, int frametype) | |||
142 | *dptr++ = ROSE_GFI | lci1; | 142 | *dptr++ = ROSE_GFI | lci1; |
143 | *dptr++ = lci2; | 143 | *dptr++ = lci2; |
144 | *dptr++ = frametype; | 144 | *dptr++ = frametype; |
145 | *dptr++ = 0xAA; | 145 | *dptr++ = ROSE_CALL_REQ_ADDR_LEN_VAL; |
146 | memcpy(dptr, &rose->dest_addr, ROSE_ADDR_LEN); | 146 | memcpy(dptr, &rose->dest_addr, ROSE_ADDR_LEN); |
147 | dptr += ROSE_ADDR_LEN; | 147 | dptr += ROSE_ADDR_LEN; |
148 | memcpy(dptr, &rose->source_addr, ROSE_ADDR_LEN); | 148 | memcpy(dptr, &rose->source_addr, ROSE_ADDR_LEN); |
@@ -246,12 +246,16 @@ static int rose_parse_national(unsigned char *p, struct rose_facilities_struct * | |||
246 | do { | 246 | do { |
247 | switch (*p & 0xC0) { | 247 | switch (*p & 0xC0) { |
248 | case 0x00: | 248 | case 0x00: |
249 | if (len < 2) | ||
250 | return -1; | ||
249 | p += 2; | 251 | p += 2; |
250 | n += 2; | 252 | n += 2; |
251 | len -= 2; | 253 | len -= 2; |
252 | break; | 254 | break; |
253 | 255 | ||
254 | case 0x40: | 256 | case 0x40: |
257 | if (len < 3) | ||
258 | return -1; | ||
255 | if (*p == FAC_NATIONAL_RAND) | 259 | if (*p == FAC_NATIONAL_RAND) |
256 | facilities->rand = ((p[1] << 8) & 0xFF00) + ((p[2] << 0) & 0x00FF); | 260 | facilities->rand = ((p[1] << 8) & 0xFF00) + ((p[2] << 0) & 0x00FF); |
257 | p += 3; | 261 | p += 3; |
@@ -260,40 +264,61 @@ static int rose_parse_national(unsigned char *p, struct rose_facilities_struct * | |||
260 | break; | 264 | break; |
261 | 265 | ||
262 | case 0x80: | 266 | case 0x80: |
267 | if (len < 4) | ||
268 | return -1; | ||
263 | p += 4; | 269 | p += 4; |
264 | n += 4; | 270 | n += 4; |
265 | len -= 4; | 271 | len -= 4; |
266 | break; | 272 | break; |
267 | 273 | ||
268 | case 0xC0: | 274 | case 0xC0: |
275 | if (len < 2) | ||
276 | return -1; | ||
269 | l = p[1]; | 277 | l = p[1]; |
278 | if (len < 2 + l) | ||
279 | return -1; | ||
270 | if (*p == FAC_NATIONAL_DEST_DIGI) { | 280 | if (*p == FAC_NATIONAL_DEST_DIGI) { |
271 | if (!fac_national_digis_received) { | 281 | if (!fac_national_digis_received) { |
282 | if (l < AX25_ADDR_LEN) | ||
283 | return -1; | ||
272 | memcpy(&facilities->source_digis[0], p + 2, AX25_ADDR_LEN); | 284 | memcpy(&facilities->source_digis[0], p + 2, AX25_ADDR_LEN); |
273 | facilities->source_ndigis = 1; | 285 | facilities->source_ndigis = 1; |
274 | } | 286 | } |
275 | } | 287 | } |
276 | else if (*p == FAC_NATIONAL_SRC_DIGI) { | 288 | else if (*p == FAC_NATIONAL_SRC_DIGI) { |
277 | if (!fac_national_digis_received) { | 289 | if (!fac_national_digis_received) { |
290 | if (l < AX25_ADDR_LEN) | ||
291 | return -1; | ||
278 | memcpy(&facilities->dest_digis[0], p + 2, AX25_ADDR_LEN); | 292 | memcpy(&facilities->dest_digis[0], p + 2, AX25_ADDR_LEN); |
279 | facilities->dest_ndigis = 1; | 293 | facilities->dest_ndigis = 1; |
280 | } | 294 | } |
281 | } | 295 | } |
282 | else if (*p == FAC_NATIONAL_FAIL_CALL) { | 296 | else if (*p == FAC_NATIONAL_FAIL_CALL) { |
297 | if (l < AX25_ADDR_LEN) | ||
298 | return -1; | ||
283 | memcpy(&facilities->fail_call, p + 2, AX25_ADDR_LEN); | 299 | memcpy(&facilities->fail_call, p + 2, AX25_ADDR_LEN); |
284 | } | 300 | } |
285 | else if (*p == FAC_NATIONAL_FAIL_ADD) { | 301 | else if (*p == FAC_NATIONAL_FAIL_ADD) { |
302 | if (l < 1 + ROSE_ADDR_LEN) | ||
303 | return -1; | ||
286 | memcpy(&facilities->fail_addr, p + 3, ROSE_ADDR_LEN); | 304 | memcpy(&facilities->fail_addr, p + 3, ROSE_ADDR_LEN); |
287 | } | 305 | } |
288 | else if (*p == FAC_NATIONAL_DIGIS) { | 306 | else if (*p == FAC_NATIONAL_DIGIS) { |
307 | if (l % AX25_ADDR_LEN) | ||
308 | return -1; | ||
289 | fac_national_digis_received = 1; | 309 | fac_national_digis_received = 1; |
290 | facilities->source_ndigis = 0; | 310 | facilities->source_ndigis = 0; |
291 | facilities->dest_ndigis = 0; | 311 | facilities->dest_ndigis = 0; |
292 | for (pt = p + 2, lg = 0 ; lg < l ; pt += AX25_ADDR_LEN, lg += AX25_ADDR_LEN) { | 312 | for (pt = p + 2, lg = 0 ; lg < l ; pt += AX25_ADDR_LEN, lg += AX25_ADDR_LEN) { |
293 | if (pt[6] & AX25_HBIT) | 313 | if (pt[6] & AX25_HBIT) { |
314 | if (facilities->dest_ndigis >= ROSE_MAX_DIGIS) | ||
315 | return -1; | ||
294 | memcpy(&facilities->dest_digis[facilities->dest_ndigis++], pt, AX25_ADDR_LEN); | 316 | memcpy(&facilities->dest_digis[facilities->dest_ndigis++], pt, AX25_ADDR_LEN); |
295 | else | 317 | } else { |
318 | if (facilities->source_ndigis >= ROSE_MAX_DIGIS) | ||
319 | return -1; | ||
296 | memcpy(&facilities->source_digis[facilities->source_ndigis++], pt, AX25_ADDR_LEN); | 320 | memcpy(&facilities->source_digis[facilities->source_ndigis++], pt, AX25_ADDR_LEN); |
321 | } | ||
297 | } | 322 | } |
298 | } | 323 | } |
299 | p += l + 2; | 324 | p += l + 2; |
@@ -314,25 +339,38 @@ static int rose_parse_ccitt(unsigned char *p, struct rose_facilities_struct *fac | |||
314 | do { | 339 | do { |
315 | switch (*p & 0xC0) { | 340 | switch (*p & 0xC0) { |
316 | case 0x00: | 341 | case 0x00: |
342 | if (len < 2) | ||
343 | return -1; | ||
317 | p += 2; | 344 | p += 2; |
318 | n += 2; | 345 | n += 2; |
319 | len -= 2; | 346 | len -= 2; |
320 | break; | 347 | break; |
321 | 348 | ||
322 | case 0x40: | 349 | case 0x40: |
350 | if (len < 3) | ||
351 | return -1; | ||
323 | p += 3; | 352 | p += 3; |
324 | n += 3; | 353 | n += 3; |
325 | len -= 3; | 354 | len -= 3; |
326 | break; | 355 | break; |
327 | 356 | ||
328 | case 0x80: | 357 | case 0x80: |
358 | if (len < 4) | ||
359 | return -1; | ||
329 | p += 4; | 360 | p += 4; |
330 | n += 4; | 361 | n += 4; |
331 | len -= 4; | 362 | len -= 4; |
332 | break; | 363 | break; |
333 | 364 | ||
334 | case 0xC0: | 365 | case 0xC0: |
366 | if (len < 2) | ||
367 | return -1; | ||
335 | l = p[1]; | 368 | l = p[1]; |
369 | |||
370 | /* Prevent overflows*/ | ||
371 | if (l < 10 || l > 20) | ||
372 | return -1; | ||
373 | |||
336 | if (*p == FAC_CCITT_DEST_NSAP) { | 374 | if (*p == FAC_CCITT_DEST_NSAP) { |
337 | memcpy(&facilities->source_addr, p + 7, ROSE_ADDR_LEN); | 375 | memcpy(&facilities->source_addr, p + 7, ROSE_ADDR_LEN); |
338 | memcpy(callsign, p + 12, l - 10); | 376 | memcpy(callsign, p + 12, l - 10); |
@@ -355,45 +393,44 @@ static int rose_parse_ccitt(unsigned char *p, struct rose_facilities_struct *fac | |||
355 | return n; | 393 | return n; |
356 | } | 394 | } |
357 | 395 | ||
358 | int rose_parse_facilities(unsigned char *p, | 396 | int rose_parse_facilities(unsigned char *p, unsigned packet_len, |
359 | struct rose_facilities_struct *facilities) | 397 | struct rose_facilities_struct *facilities) |
360 | { | 398 | { |
361 | int facilities_len, len; | 399 | int facilities_len, len; |
362 | 400 | ||
363 | facilities_len = *p++; | 401 | facilities_len = *p++; |
364 | 402 | ||
365 | if (facilities_len == 0) | 403 | if (facilities_len == 0 || (unsigned)facilities_len > packet_len) |
366 | return 0; | 404 | return 0; |
367 | 405 | ||
368 | while (facilities_len > 0) { | 406 | while (facilities_len >= 3 && *p == 0x00) { |
369 | if (*p == 0x00) { | 407 | facilities_len--; |
370 | facilities_len--; | 408 | p++; |
371 | p++; | 409 | |
372 | 410 | switch (*p) { | |
373 | switch (*p) { | 411 | case FAC_NATIONAL: /* National */ |
374 | case FAC_NATIONAL: /* National */ | 412 | len = rose_parse_national(p + 1, facilities, facilities_len - 1); |
375 | len = rose_parse_national(p + 1, facilities, facilities_len - 1); | 413 | break; |
376 | facilities_len -= len + 1; | 414 | |
377 | p += len + 1; | 415 | case FAC_CCITT: /* CCITT */ |
378 | break; | 416 | len = rose_parse_ccitt(p + 1, facilities, facilities_len - 1); |
379 | 417 | break; | |
380 | case FAC_CCITT: /* CCITT */ | 418 | |
381 | len = rose_parse_ccitt(p + 1, facilities, facilities_len - 1); | 419 | default: |
382 | facilities_len -= len + 1; | 420 | printk(KERN_DEBUG "ROSE: rose_parse_facilities - unknown facilities family %02X\n", *p); |
383 | p += len + 1; | 421 | len = 1; |
384 | break; | 422 | break; |
385 | 423 | } | |
386 | default: | 424 | |
387 | printk(KERN_DEBUG "ROSE: rose_parse_facilities - unknown facilities family %02X\n", *p); | 425 | if (len < 0) |
388 | facilities_len--; | 426 | return 0; |
389 | p++; | 427 | if (WARN_ON(len >= facilities_len)) |
390 | break; | 428 | return 0; |
391 | } | 429 | facilities_len -= len + 1; |
392 | } else | 430 | p += len + 1; |
393 | break; /* Error in facilities format */ | ||
394 | } | 431 | } |
395 | 432 | ||
396 | return 1; | 433 | return facilities_len == 0; |
397 | } | 434 | } |
398 | 435 | ||
399 | static int rose_create_facilities(unsigned char *buffer, struct rose_sock *rose) | 436 | static int rose_create_facilities(unsigned char *buffer, struct rose_sock *rose) |
diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c index ffb687671da0..6b43ee7221d5 100644 --- a/net/sunrpc/sched.c +++ b/net/sunrpc/sched.c | |||
@@ -860,8 +860,10 @@ static void rpc_release_resources_task(struct rpc_task *task) | |||
860 | { | 860 | { |
861 | if (task->tk_rqstp) | 861 | if (task->tk_rqstp) |
862 | xprt_release(task); | 862 | xprt_release(task); |
863 | if (task->tk_msg.rpc_cred) | 863 | if (task->tk_msg.rpc_cred) { |
864 | put_rpccred(task->tk_msg.rpc_cred); | 864 | put_rpccred(task->tk_msg.rpc_cred); |
865 | task->tk_msg.rpc_cred = NULL; | ||
866 | } | ||
865 | rpc_task_release_client(task); | 867 | rpc_task_release_client(task); |
866 | } | 868 | } |
867 | 869 | ||
diff --git a/net/xfrm/xfrm_input.c b/net/xfrm/xfrm_input.c index 872065ca7f8c..a026b0ef2443 100644 --- a/net/xfrm/xfrm_input.c +++ b/net/xfrm/xfrm_input.c | |||
@@ -173,7 +173,7 @@ int xfrm_input(struct sk_buff *skb, int nexthdr, __be32 spi, int encap_type) | |||
173 | goto drop_unlock; | 173 | goto drop_unlock; |
174 | } | 174 | } |
175 | 175 | ||
176 | if (x->props.replay_window && x->repl->check(x, skb, seq)) { | 176 | if (x->repl->check(x, skb, seq)) { |
177 | XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATESEQERROR); | 177 | XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATESEQERROR); |
178 | goto drop_unlock; | 178 | goto drop_unlock; |
179 | } | 179 | } |
@@ -190,6 +190,8 @@ int xfrm_input(struct sk_buff *skb, int nexthdr, __be32 spi, int encap_type) | |||
190 | XFRM_SKB_CB(skb)->seq.input.low = seq; | 190 | XFRM_SKB_CB(skb)->seq.input.low = seq; |
191 | XFRM_SKB_CB(skb)->seq.input.hi = seq_hi; | 191 | XFRM_SKB_CB(skb)->seq.input.hi = seq_hi; |
192 | 192 | ||
193 | skb_dst_force(skb); | ||
194 | |||
193 | nexthdr = x->type->input(x, skb); | 195 | nexthdr = x->type->input(x, skb); |
194 | 196 | ||
195 | if (nexthdr == -EINPROGRESS) | 197 | if (nexthdr == -EINPROGRESS) |
diff --git a/net/xfrm/xfrm_output.c b/net/xfrm/xfrm_output.c index 1aba03f449cc..47bacd8c0250 100644 --- a/net/xfrm/xfrm_output.c +++ b/net/xfrm/xfrm_output.c | |||
@@ -78,6 +78,8 @@ static int xfrm_output_one(struct sk_buff *skb, int err) | |||
78 | 78 | ||
79 | spin_unlock_bh(&x->lock); | 79 | spin_unlock_bh(&x->lock); |
80 | 80 | ||
81 | skb_dst_force(skb); | ||
82 | |||
81 | err = x->type->output(x, skb); | 83 | err = x->type->output(x, skb); |
82 | if (err == -EINPROGRESS) | 84 | if (err == -EINPROGRESS) |
83 | goto out_exit; | 85 | goto out_exit; |
@@ -94,7 +96,7 @@ resume: | |||
94 | err = -EHOSTUNREACH; | 96 | err = -EHOSTUNREACH; |
95 | goto error_nolock; | 97 | goto error_nolock; |
96 | } | 98 | } |
97 | skb_dst_set(skb, dst_clone(dst)); | 99 | skb_dst_set(skb, dst); |
98 | x = dst->xfrm; | 100 | x = dst->xfrm; |
99 | } while (x && !(x->outer_mode->flags & XFRM_MODE_FLAG_TUNNEL)); | 101 | } while (x && !(x->outer_mode->flags & XFRM_MODE_FLAG_TUNNEL)); |
100 | 102 | ||
diff --git a/net/xfrm/xfrm_replay.c b/net/xfrm/xfrm_replay.c index 2f5be5b15740..f218385950ca 100644 --- a/net/xfrm/xfrm_replay.c +++ b/net/xfrm/xfrm_replay.c | |||
@@ -118,6 +118,9 @@ static int xfrm_replay_check(struct xfrm_state *x, | |||
118 | u32 diff; | 118 | u32 diff; |
119 | u32 seq = ntohl(net_seq); | 119 | u32 seq = ntohl(net_seq); |
120 | 120 | ||
121 | if (!x->props.replay_window) | ||
122 | return 0; | ||
123 | |||
121 | if (unlikely(seq == 0)) | 124 | if (unlikely(seq == 0)) |
122 | goto err; | 125 | goto err; |
123 | 126 | ||
@@ -193,9 +196,14 @@ static int xfrm_replay_check_bmp(struct xfrm_state *x, | |||
193 | { | 196 | { |
194 | unsigned int bitnr, nr; | 197 | unsigned int bitnr, nr; |
195 | struct xfrm_replay_state_esn *replay_esn = x->replay_esn; | 198 | struct xfrm_replay_state_esn *replay_esn = x->replay_esn; |
199 | u32 pos; | ||
196 | u32 seq = ntohl(net_seq); | 200 | u32 seq = ntohl(net_seq); |
197 | u32 diff = replay_esn->seq - seq; | 201 | u32 diff = replay_esn->seq - seq; |
198 | u32 pos = (replay_esn->seq - 1) % replay_esn->replay_window; | 202 | |
203 | if (!replay_esn->replay_window) | ||
204 | return 0; | ||
205 | |||
206 | pos = (replay_esn->seq - 1) % replay_esn->replay_window; | ||
199 | 207 | ||
200 | if (unlikely(seq == 0)) | 208 | if (unlikely(seq == 0)) |
201 | goto err; | 209 | goto err; |
@@ -373,12 +381,17 @@ static int xfrm_replay_check_esn(struct xfrm_state *x, | |||
373 | unsigned int bitnr, nr; | 381 | unsigned int bitnr, nr; |
374 | u32 diff; | 382 | u32 diff; |
375 | struct xfrm_replay_state_esn *replay_esn = x->replay_esn; | 383 | struct xfrm_replay_state_esn *replay_esn = x->replay_esn; |
384 | u32 pos; | ||
376 | u32 seq = ntohl(net_seq); | 385 | u32 seq = ntohl(net_seq); |
377 | u32 pos = (replay_esn->seq - 1) % replay_esn->replay_window; | ||
378 | u32 wsize = replay_esn->replay_window; | 386 | u32 wsize = replay_esn->replay_window; |
379 | u32 top = replay_esn->seq; | 387 | u32 top = replay_esn->seq; |
380 | u32 bottom = top - wsize + 1; | 388 | u32 bottom = top - wsize + 1; |
381 | 389 | ||
390 | if (!wsize) | ||
391 | return 0; | ||
392 | |||
393 | pos = (replay_esn->seq - 1) % replay_esn->replay_window; | ||
394 | |||
382 | if (unlikely(seq == 0 && replay_esn->seq_hi == 0 && | 395 | if (unlikely(seq == 0 && replay_esn->seq_hi == 0 && |
383 | (replay_esn->seq < replay_esn->replay_window - 1))) | 396 | (replay_esn->seq < replay_esn->replay_window - 1))) |
384 | goto err; | 397 | goto err; |
diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c index f83a3d1da81b..dd78536d40de 100644 --- a/net/xfrm/xfrm_state.c +++ b/net/xfrm/xfrm_state.c | |||
@@ -1181,6 +1181,12 @@ static struct xfrm_state *xfrm_state_clone(struct xfrm_state *orig, int *errp) | |||
1181 | goto error; | 1181 | goto error; |
1182 | } | 1182 | } |
1183 | 1183 | ||
1184 | if (orig->replay_esn) { | ||
1185 | err = xfrm_replay_clone(x, orig); | ||
1186 | if (err) | ||
1187 | goto error; | ||
1188 | } | ||
1189 | |||
1184 | memcpy(&x->mark, &orig->mark, sizeof(x->mark)); | 1190 | memcpy(&x->mark, &orig->mark, sizeof(x->mark)); |
1185 | 1191 | ||
1186 | err = xfrm_init_state(x); | 1192 | err = xfrm_init_state(x); |
diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c index fc152d28753c..3d15d3e1b2c4 100644 --- a/net/xfrm/xfrm_user.c +++ b/net/xfrm/xfrm_user.c | |||
@@ -127,6 +127,9 @@ static inline int verify_replay(struct xfrm_usersa_info *p, | |||
127 | if (!rt) | 127 | if (!rt) |
128 | return 0; | 128 | return 0; |
129 | 129 | ||
130 | if (p->id.proto != IPPROTO_ESP) | ||
131 | return -EINVAL; | ||
132 | |||
130 | if (p->replay_window != 0) | 133 | if (p->replay_window != 0) |
131 | return -EINVAL; | 134 | return -EINVAL; |
132 | 135 | ||
@@ -360,6 +363,23 @@ static int attach_aead(struct xfrm_algo_aead **algpp, u8 *props, | |||
360 | return 0; | 363 | return 0; |
361 | } | 364 | } |
362 | 365 | ||
366 | static inline int xfrm_replay_verify_len(struct xfrm_replay_state_esn *replay_esn, | ||
367 | struct nlattr *rp) | ||
368 | { | ||
369 | struct xfrm_replay_state_esn *up; | ||
370 | |||
371 | if (!replay_esn || !rp) | ||
372 | return 0; | ||
373 | |||
374 | up = nla_data(rp); | ||
375 | |||
376 | if (xfrm_replay_state_esn_len(replay_esn) != | ||
377 | xfrm_replay_state_esn_len(up)) | ||
378 | return -EINVAL; | ||
379 | |||
380 | return 0; | ||
381 | } | ||
382 | |||
363 | static int xfrm_alloc_replay_state_esn(struct xfrm_replay_state_esn **replay_esn, | 383 | static int xfrm_alloc_replay_state_esn(struct xfrm_replay_state_esn **replay_esn, |
364 | struct xfrm_replay_state_esn **preplay_esn, | 384 | struct xfrm_replay_state_esn **preplay_esn, |
365 | struct nlattr *rta) | 385 | struct nlattr *rta) |
@@ -1766,6 +1786,10 @@ static int xfrm_new_ae(struct sk_buff *skb, struct nlmsghdr *nlh, | |||
1766 | if (x->km.state != XFRM_STATE_VALID) | 1786 | if (x->km.state != XFRM_STATE_VALID) |
1767 | goto out; | 1787 | goto out; |
1768 | 1788 | ||
1789 | err = xfrm_replay_verify_len(x->replay_esn, rp); | ||
1790 | if (err) | ||
1791 | goto out; | ||
1792 | |||
1769 | spin_lock_bh(&x->lock); | 1793 | spin_lock_bh(&x->lock); |
1770 | xfrm_update_ae_params(x, attrs); | 1794 | xfrm_update_ae_params(x, attrs); |
1771 | spin_unlock_bh(&x->lock); | 1795 | spin_unlock_bh(&x->lock); |
diff --git a/security/selinux/ss/services.c b/security/selinux/ss/services.c index 3e7544d2a07b..ea7c01f4a2bf 100644 --- a/security/selinux/ss/services.c +++ b/security/selinux/ss/services.c | |||
@@ -213,7 +213,7 @@ static u16 map_class(u16 pol_value) | |||
213 | return i; | 213 | return i; |
214 | } | 214 | } |
215 | 215 | ||
216 | return pol_value; | 216 | return SECCLASS_NULL; |
217 | } | 217 | } |
218 | 218 | ||
219 | static void map_decision(u16 tclass, struct av_decision *avd, | 219 | static void map_decision(u16 tclass, struct av_decision *avd, |